summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig4
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/accessibility/braille/braille_console.c2
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile4
-rw-r--r--drivers/acpi/acpica/Makefile158
-rw-r--r--drivers/acpi/acpica/accommon.h2
-rw-r--r--drivers/acpi/acpica/acconfig.h9
-rw-r--r--drivers/acpi/acpica/acdebug.h2
-rw-r--r--drivers/acpi/acpica/acdispat.h2
-rw-r--r--drivers/acpi/acpica/acevents.h3
-rw-r--r--drivers/acpi/acpica/acglobal.h19
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/acinterp.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h26
-rw-r--r--drivers/acpi/acpica/acmacros.h2
-rw-r--r--drivers/acpi/acpica/acnamesp.h2
-rw-r--r--drivers/acpi/acpica/acobject.h8
-rw-r--r--drivers/acpi/acpica/acopcode.h6
-rw-r--r--drivers/acpi/acpica/acparser.h2
-rw-r--r--drivers/acpi/acpica/acpredef.h41
-rw-r--r--drivers/acpi/acpica/acresrc.h115
-rw-r--r--drivers/acpi/acpica/acstruct.h2
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/acutils.h21
-rw-r--r--drivers/acpi/acpica/amlcode.h29
-rw-r--r--drivers/acpi/acpica/amlresrc.h138
-rw-r--r--drivers/acpi/acpica/dsargs.c18
-rw-r--r--drivers/acpi/acpica/dscontrol.c2
-rw-r--r--drivers/acpi/acpica/dsfield.c83
-rw-r--r--drivers/acpi/acpica/dsinit.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c2
-rw-r--r--drivers/acpi/acpica/dsobject.c2
-rw-r--r--drivers/acpi/acpica/dsopcode.c2
-rw-r--r--drivers/acpi/acpica/dsutils.c2
-rw-r--r--drivers/acpi/acpica/dswexec.c2
-rw-r--r--drivers/acpi/acpica/dswload.c2
-rw-r--r--drivers/acpi/acpica/dswload2.c2
-rw-r--r--drivers/acpi/acpica/dswscope.c2
-rw-r--r--drivers/acpi/acpica/dswstate.c2
-rw-r--r--drivers/acpi/acpica/evevent.c14
-rw-r--r--drivers/acpi/acpica/evglock.c8
-rw-r--r--drivers/acpi/acpica/evgpe.c2
-rw-r--r--drivers/acpi/acpica/evgpeblk.c2
-rw-r--r--drivers/acpi/acpica/evgpeinit.c2
-rw-r--r--drivers/acpi/acpica/evgpeutil.c2
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evregion.c31
-rw-r--r--drivers/acpi/acpica/evrgnini.c2
-rw-r--r--drivers/acpi/acpica/evsci.c2
-rw-r--r--drivers/acpi/acpica/evxface.c2
-rw-r--r--drivers/acpi/acpica/evxfevnt.c2
-rw-r--r--drivers/acpi/acpica/evxfgpe.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c2
-rw-r--r--drivers/acpi/acpica/exconfig.c8
-rw-r--r--drivers/acpi/acpica/exconvrt.c2
-rw-r--r--drivers/acpi/acpica/excreate.c31
-rw-r--r--drivers/acpi/acpica/exdebug.c2
-rw-r--r--drivers/acpi/acpica/exdump.c9
-rw-r--r--drivers/acpi/acpica/exfield.c30
-rw-r--r--drivers/acpi/acpica/exfldio.c38
-rw-r--r--drivers/acpi/acpica/exmisc.c2
-rw-r--r--drivers/acpi/acpica/exmutex.c2
-rw-r--r--drivers/acpi/acpica/exnames.c2
-rw-r--r--drivers/acpi/acpica/exoparg1.c2
-rw-r--r--drivers/acpi/acpica/exoparg2.c2
-rw-r--r--drivers/acpi/acpica/exoparg3.c2
-rw-r--r--drivers/acpi/acpica/exoparg6.c2
-rw-r--r--drivers/acpi/acpica/exprep.c27
-rw-r--r--drivers/acpi/acpica/exregion.c2
-rw-r--r--drivers/acpi/acpica/exresnte.c2
-rw-r--r--drivers/acpi/acpica/exresolv.c2
-rw-r--r--drivers/acpi/acpica/exresop.c2
-rw-r--r--drivers/acpi/acpica/exstore.c2
-rw-r--r--drivers/acpi/acpica/exstoren.c2
-rw-r--r--drivers/acpi/acpica/exstorob.c2
-rw-r--r--drivers/acpi/acpica/exsystem.c2
-rw-r--r--drivers/acpi/acpica/exutils.c27
-rw-r--r--drivers/acpi/acpica/hwacpi.c2
-rw-r--r--drivers/acpi/acpica/hwgpe.c2
-rw-r--r--drivers/acpi/acpica/hwpci.c2
-rw-r--r--drivers/acpi/acpica/hwregs.c2
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwtimer.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c4
-rw-r--r--drivers/acpi/acpica/hwxface.c4
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsalloc.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c2
-rw-r--r--drivers/acpi/acpica/nsdumpdv.c2
-rw-r--r--drivers/acpi/acpica/nseval.c2
-rw-r--r--drivers/acpi/acpica/nsinit.c2
-rw-r--r--drivers/acpi/acpica/nsload.c2
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nsobject.c2
-rw-r--r--drivers/acpi/acpica/nsparse.c2
-rw-r--r--drivers/acpi/acpica/nspredef.c31
-rw-r--r--drivers/acpi/acpica/nsrepair.c3
-rw-r--r--drivers/acpi/acpica/nsrepair2.c7
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c2
-rw-r--r--drivers/acpi/acpica/nswalk.c2
-rw-r--r--drivers/acpi/acpica/nsxfeval.c2
-rw-r--r--drivers/acpi/acpica/nsxfname.c2
-rw-r--r--drivers/acpi/acpica/nsxfobj.c2
-rw-r--r--drivers/acpi/acpica/psargs.c143
-rw-r--r--drivers/acpi/acpica/psloop.c2
-rw-r--r--drivers/acpi/acpica/psopcode.c15
-rw-r--r--drivers/acpi/acpica/psparse.c2
-rw-r--r--drivers/acpi/acpica/psscope.c2
-rw-r--r--drivers/acpi/acpica/pstree.c8
-rw-r--r--drivers/acpi/acpica/psutils.c2
-rw-r--r--drivers/acpi/acpica/pswalk.c2
-rw-r--r--drivers/acpi/acpica/psxface.c2
-rw-r--r--drivers/acpi/acpica/rsaddr.c2
-rw-r--r--drivers/acpi/acpica/rscalc.c89
-rw-r--r--drivers/acpi/acpica/rscreate.c69
-rw-r--r--drivers/acpi/acpica/rsdump.c196
-rw-r--r--drivers/acpi/acpica/rsinfo.c58
-rw-r--r--drivers/acpi/acpica/rsio.c2
-rw-r--r--drivers/acpi/acpica/rsirq.c33
-rw-r--r--drivers/acpi/acpica/rslist.c77
-rw-r--r--drivers/acpi/acpica/rsmemory.c2
-rw-r--r--drivers/acpi/acpica/rsmisc.c269
-rw-r--r--drivers/acpi/acpica/rsserial.c441
-rw-r--r--drivers/acpi/acpica/rsutils.c56
-rw-r--r--drivers/acpi/acpica/rsxface.c52
-rw-r--r--drivers/acpi/acpica/tbfadt.c41
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c2
-rw-r--r--drivers/acpi/acpica/tbutils.c9
-rw-r--r--drivers/acpi/acpica/tbxface.c2
-rw-r--r--drivers/acpi/acpica/tbxfroot.c2
-rw-r--r--drivers/acpi/acpica/utaddress.c294
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c2
-rw-r--r--drivers/acpi/acpica/utdebug.c2
-rw-r--r--drivers/acpi/acpica/utdecode.c6
-rw-r--r--drivers/acpi/acpica/utdelete.c15
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c8
-rw-r--r--drivers/acpi/acpica/utids.c2
-rw-r--r--drivers/acpi/acpica/utinit.c3
-rw-r--r--drivers/acpi/acpica/utlock.c2
-rw-r--r--drivers/acpi/acpica/utmath.c2
-rw-r--r--drivers/acpi/acpica/utmisc.c2
-rw-r--r--drivers/acpi/acpica/utmutex.c11
-rw-r--r--drivers/acpi/acpica/utobject.c2
-rw-r--r--drivers/acpi/acpica/utosi.c2
-rw-r--r--drivers/acpi/acpica/utresrc.c278
-rw-r--r--drivers/acpi/acpica/utstate.c2
-rw-r--r--drivers/acpi/acpica/utxface.c40
-rw-r--r--drivers/acpi/acpica/utxferror.c2
-rw-r--r--drivers/acpi/acpica/utxfmutex.c187
-rw-r--r--drivers/acpi/apei/apei-base.c123
-rw-r--r--drivers/acpi/apei/apei-internal.h6
-rw-r--r--drivers/acpi/apei/einj.c315
-rw-r--r--drivers/acpi/apei/erst.c11
-rw-r--r--drivers/acpi/apei/ghes.c104
-rw-r--r--drivers/acpi/apei/hest.c7
-rw-r--r--drivers/acpi/atomicio.c365
-rw-r--r--drivers/acpi/battery.c2
-rw-r--r--drivers/acpi/dock.c2
-rw-r--r--drivers/acpi/ec_sys.c2
-rw-r--r--drivers/acpi/numa.c6
-rw-r--r--drivers/acpi/nvs.c53
-rw-r--r--drivers/acpi/osl.c394
-rw-r--r--drivers/acpi/pci_irq.c10
-rw-r--r--drivers/acpi/pci_root.c7
-rw-r--r--drivers/acpi/pci_slot.c2
-rw-r--r--drivers/acpi/processor_core.c26
-rw-r--r--drivers/acpi/processor_driver.c175
-rw-r--r--drivers/acpi/processor_thermal.c1
-rw-r--r--drivers/acpi/sleep.c24
-rw-r--r--drivers/acpi/video.c6
-rw-r--r--drivers/amba/bus.c140
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c26
-rw-r--r--drivers/ata/ahci_platform.c68
-rw-r--r--drivers/ata/ata_piix.c7
-rw-r--r--drivers/ata/libahci.c5
-rw-r--r--drivers/ata/libata-core.c190
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-sff.c4
-rw-r--r--drivers/ata/libata-transport.c6
-rw-r--r--drivers/ata/libata.h1
-rw-r--r--drivers/ata/pata_arasan_cf.c12
-rw-r--r--drivers/ata/pata_at91.c69
-rw-r--r--drivers/ata/pata_bf54x.c187
-rw-r--r--drivers/ata/pata_cs5536.c99
-rw-r--r--drivers/ata/pata_imx.c12
-rw-r--r--drivers/ata/pata_ixp4xx_cf.c13
-rw-r--r--drivers/ata/pata_mpc52xx.c21
-rw-r--r--drivers/ata/pata_of_platform.c27
-rw-r--r--drivers/ata/pata_palmld.c13
-rw-r--r--drivers/ata/pata_platform.c12
-rw-r--r--drivers/ata/pata_pxa.c13
-rw-r--r--drivers/ata/pata_rb532_cf.c21
-rw-r--r--drivers/ata/sata_dwc_460ex.c13
-rw-r--r--drivers/ata/sata_fsl.c25
-rw-r--r--drivers/ata/sata_mv.c19
-rw-r--r--drivers/ata/sata_nv.c6
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/atm/he.c6
-rw-r--r--drivers/atm/iphase.c4
-rw-r--r--drivers/atm/solos-pci.c4
-rw-r--r--drivers/base/Kconfig15
-rw-r--r--drivers/base/Makefile7
-rw-r--r--drivers/base/base.h14
-rw-r--r--drivers/base/bus.c298
-rw-r--r--drivers/base/class.c14
-rw-r--r--drivers/base/core.c106
-rw-r--r--drivers/base/cpu.c192
-rw-r--r--drivers/base/devres.c2
-rw-r--r--drivers/base/devtmpfs.c11
-rw-r--r--drivers/base/dma-buf.c291
-rw-r--r--drivers/base/firmware_class.c21
-rw-r--r--drivers/base/init.c1
-rw-r--r--drivers/base/memory.c208
-rw-r--r--drivers/base/node.c162
-rw-r--r--drivers/base/platform.c117
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/domain.c554
-rw-r--r--drivers/base/power/domain_governor.c170
-rw-r--r--drivers/base/power/generic_ops.c91
-rw-r--r--drivers/base/power/main.c375
-rw-r--r--drivers/base/power/qos.c49
-rw-r--r--drivers/base/power/runtime.c157
-rw-r--r--drivers/base/regmap/Kconfig3
-rw-r--r--drivers/base/regmap/Makefile4
-rw-r--r--drivers/base/regmap/internal.h6
-rw-r--r--drivers/base/regmap/regcache-indexed.c64
-rw-r--r--drivers/base/regmap/regcache-lzo.c21
-rw-r--r--drivers/base/regmap/regcache-rbtree.c61
-rw-r--r--drivers/base/regmap/regcache.c91
-rw-r--r--drivers/base/regmap/regmap-irq.c302
-rw-r--r--drivers/base/regmap/regmap.c182
-rw-r--r--drivers/base/sys.c391
-rw-r--r--drivers/base/topology.c51
-rw-r--r--drivers/bcma/bcma_private.h4
-rw-r--r--drivers/bcma/host_pci.c62
-rw-r--r--drivers/bcma/main.c44
-rw-r--r--drivers/bcma/scan.c19
-rw-r--r--drivers/bcma/sprom.c61
-rw-r--r--drivers/block/DAC960.c18
-rw-r--r--drivers/block/Kconfig13
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/aoe/aoechr.c2
-rw-r--r--drivers/block/brd.c9
-rw-r--r--drivers/block/cciss.c6
-rw-r--r--drivers/block/drbd/drbd_int.h4
-rw-r--r--drivers/block/drbd/drbd_main.c4
-rw-r--r--drivers/block/floppy.c20
-rw-r--r--drivers/block/loop.c25
-rw-r--r--drivers/block/mtip32xx/Kconfig9
-rw-r--r--drivers/block/mtip32xx/Makefile5
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c3650
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h418
-rw-r--r--drivers/block/nvme.c1741
-rw-r--r--drivers/block/paride/bpck6.c5
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/paride/pd.c3
-rw-r--r--drivers/block/paride/pf.c4
-rw-r--r--drivers/block/paride/pg.c3
-rw-r--r--drivers/block/paride/pt.c4
-rw-r--r--drivers/block/pktcdvd.c2
-rw-r--r--drivers/block/rbd.c9
-rw-r--r--drivers/block/swim.c1
-rw-r--r--drivers/block/sx8.c14
-rw-r--r--drivers/block/ub.c3
-rw-r--r--drivers/block/virtio_blk.c91
-rw-r--r--drivers/block/xd.c2
-rw-r--r--drivers/block/xen-blkback/blkback.c84
-rw-r--r--drivers/block/xen-blkback/common.h67
-rw-r--r--drivers/block/xen-blkback/xenbus.c23
-rw-r--r--drivers/block/xen-blkfront.c90
-rw-r--r--drivers/block/xsysace.c10
-rw-r--r--drivers/bluetooth/ath3k.c29
-rw-r--r--drivers/bluetooth/bcm203x.c21
-rw-r--r--drivers/bluetooth/bfusb.c25
-rw-r--r--drivers/bluetooth/bluecard_cs.c4
-rw-r--r--drivers/bluetooth/bpa10x.c15
-rw-r--r--drivers/bluetooth/bt3c_cs.c4
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btuart_cs.c4
-rw-r--r--drivers/bluetooth/btusb.c51
-rw-r--r--drivers/bluetooth/dtl1_cs.c4
-rw-r--r--drivers/bluetooth/hci_bcsp.c4
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c13
-rw-r--r--drivers/cdrom/cdrom.c36
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/backend.c12
-rw-r--r--drivers/char/agp/generic.c8
-rw-r--r--drivers/char/agp/sis-agp.c2
-rw-r--r--drivers/char/hw_random/atmel-rng.c12
-rw-r--r--drivers/char/hw_random/n2-drv.c13
-rw-r--r--drivers/char/hw_random/nomadik-rng.c2
-rw-r--r--drivers/char/hw_random/octeon-rng.c13
-rw-r--r--drivers/char/hw_random/pasemi-rng.c12
-rw-r--r--drivers/char/hw_random/picoxcell-rng.c12
-rw-r--r--drivers/char/hw_random/ppc4xx-rng.c12
-rw-r--r--drivers/char/hw_random/timeriomem-rng.c13
-rw-r--r--drivers/char/hw_random/virtio-rng.c2
-rw-r--r--drivers/char/i8k.c8
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c2
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c2
-rw-r--r--drivers/char/lp.c2
-rw-r--r--drivers/char/mem.c4
-rw-r--r--drivers/char/misc.c2
-rw-r--r--drivers/char/nwflash.c2
-rw-r--r--drivers/char/pcmcia/synclink_cs.c2
-rw-r--r--drivers/char/ramoops.c24
-rw-r--r--drivers/char/random.c16
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/tile-srom.c2
-rw-r--r--drivers/char/tpm/Kconfig2
-rw-r--r--drivers/char/tpm/tpm.c146
-rw-r--r--drivers/char/tpm/tpm.h12
-rw-r--r--drivers/char/tpm/tpm_tis.c90
-rw-r--r--drivers/char/virtio_console.c140
-rw-r--r--drivers/clk/Kconfig3
-rw-r--r--drivers/clocksource/acpi_pm.c2
-rw-r--r--drivers/clocksource/clksrc-dbx500-prcmu.c16
-rw-r--r--drivers/clocksource/i8253.c6
-rw-r--r--drivers/clocksource/tcb_clksrc.c4
-rw-r--r--drivers/cpufreq/Kconfig.arm15
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq.c82
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c50
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c57
-rw-r--r--drivers/cpufreq/cpufreq_stats.c6
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c8
-rw-r--r--drivers/cpufreq/exynos-cpufreq.c290
-rw-r--r--drivers/cpufreq/exynos4210-cpufreq.c643
-rw-r--r--drivers/cpufreq/omap-cpufreq.c274
-rw-r--r--drivers/cpufreq/powernow-k8.c47
-rw-r--r--drivers/cpufreq/s3c64xx-cpufreq.c35
-rw-r--r--drivers/cpuidle/Kconfig2
-rw-r--r--drivers/cpuidle/cpuidle.c12
-rw-r--r--drivers/cpuidle/cpuidle.h10
-rw-r--r--drivers/cpuidle/sysfs.c74
-rw-r--r--drivers/crypto/amcc/crypto4xx_core.c13
-rw-r--r--drivers/crypto/caam/caamalg.c67
-rw-r--r--drivers/crypto/caam/compat.h1
-rw-r--r--drivers/crypto/caam/ctrl.c26
-rw-r--r--drivers/crypto/caam/desc.h2265
-rw-r--r--drivers/crypto/caam/desc_constr.h7
-rw-r--r--drivers/crypto/caam/regs.h1
-rw-r--r--drivers/crypto/mv_cesa.c13
-rw-r--r--drivers/crypto/picoxcell_crypto.c16
-rw-r--r--drivers/crypto/s5p-sss.c13
-rw-r--r--drivers/crypto/talitos.c493
-rw-r--r--drivers/crypto/talitos.h45
-rw-r--r--drivers/devfreq/Kconfig13
-rw-r--r--drivers/devfreq/Makefile3
-rw-r--r--drivers/devfreq/devfreq.c15
-rw-r--r--drivers/devfreq/exynos4_bus.c1135
-rw-r--r--drivers/dma/Kconfig27
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c43
-rw-r--r--drivers/dma/at_hdmac.c107
-rw-r--r--drivers/dma/at_hdmac_regs.h18
-rw-r--r--drivers/dma/coh901318.c12
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/coh901318_lli.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c48
-rw-r--r--drivers/dma/dw_dmac.c83
-rw-r--r--drivers/dma/dw_dmac_regs.h1
-rw-r--r--drivers/dma/ep93xx_dma.c90
-rw-r--r--drivers/dma/fsldma.c4
-rw-r--r--drivers/dma/imx-dma.c10
-rw-r--r--drivers/dma/imx-sdma.c33
-rw-r--r--drivers/dma/intel_mid_dma.c39
-rw-r--r--drivers/dma/intel_mid_dma_regs.h4
-rw-r--r--drivers/dma/iop-adma.c16
-rw-r--r--drivers/dma/ipu/ipu_idmac.c29
-rw-r--r--drivers/dma/mpc512x_dma.c12
-rw-r--r--drivers/dma/mv_xor.c11
-rw-r--r--drivers/dma/mxs-dma.c59
-rw-r--r--drivers/dma/pch_dma.c20
-rw-r--r--drivers/dma/pl330.c132
-rw-r--r--drivers/dma/shdma.c75
-rw-r--r--drivers/dma/sirf-dma.c707
-rw-r--r--drivers/dma/ste_dma40.c441
-rw-r--r--drivers/dma/ste_dma40_ll.h11
-rw-r--r--drivers/dma/timb_dma.c30
-rw-r--r--drivers/dma/txx9dmac.c12
-rw-r--r--drivers/edac/edac_core.h7
-rw-r--r--drivers/edac/edac_device.c1
-rw-r--r--drivers/edac/edac_device_sysfs.c20
-rw-r--r--drivers/edac/edac_mc.c1
-rw-r--r--drivers/edac/edac_mc_sysfs.c16
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c1
-rw-r--r--drivers/edac/edac_pci_sysfs.c16
-rw-r--r--drivers/edac/edac_stub.c27
-rw-r--r--drivers/edac/i3200_edac.c15
-rw-r--r--drivers/edac/i7core_edac.c4
-rw-r--r--drivers/edac/i82975x_edac.c30
-rw-r--r--drivers/edac/mce_amd.c4
-rw-r--r--drivers/edac/mce_amd_inj.c13
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/edac/r82600_edac.c2
-rw-r--r--drivers/edac/sb_edac.c8
-rw-r--r--drivers/firewire/ohci.c6
-rw-r--r--drivers/firewire/sbp2.c2
-rw-r--r--drivers/firmware/Kconfig12
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efivars.c8
-rw-r--r--drivers/firmware/google/gsmi.c3
-rw-r--r--drivers/firmware/iscsi_ibft.c12
-rw-r--r--drivers/firmware/sigma.c153
-rw-r--r--drivers/gpio/Kconfig32
-rw-r--r--drivers/gpio/Makefile5
-rw-r--r--drivers/gpio/devres.c90
-rw-r--r--drivers/gpio/gpio-adp5520.c12
-rw-r--r--drivers/gpio/gpio-adp5588.c5
-rw-r--r--drivers/gpio/gpio-bt8xx.c3
-rw-r--r--drivers/gpio/gpio-cs5535.c14
-rw-r--r--drivers/gpio/gpio-da9052.c12
-rw-r--r--drivers/gpio/gpio-generic.c12
-rw-r--r--drivers/gpio/gpio-janz-ttl.c15
-rw-r--r--drivers/gpio/gpio-lpc32xx.c2
-rw-r--r--drivers/gpio/gpio-ml-ioh.c9
-rw-r--r--drivers/gpio/gpio-nomadik.c4
-rw-r--r--drivers/gpio/gpio-pcf857x.c5
-rw-r--r--drivers/gpio/gpio-pch.c12
-rw-r--r--drivers/gpio/gpio-pl061.c202
-rw-r--r--drivers/gpio/gpio-pxa.c377
-rw-r--r--drivers/gpio/gpio-rdc321x.c13
-rw-r--r--drivers/gpio/gpio-sa1100.c6
-rw-r--r--drivers/gpio/gpio-samsung.c108
-rw-r--r--drivers/gpio/gpio-sch.c13
-rw-r--r--drivers/gpio/gpio-stmpe.c25
-rw-r--r--drivers/gpio/gpio-tegra.c9
-rw-r--r--drivers/gpio/gpio-timberdale.c13
-rw-r--r--drivers/gpio/gpio-tps65910.c2
-rw-r--r--drivers/gpio/gpio-ucb1400.c13
-rw-r--r--drivers/gpio/gpio-vr41xx.c13
-rw-r--r--drivers/gpio/gpio-vx855.c12
-rw-r--r--drivers/gpio/gpio-wm8994.c79
-rw-r--r--drivers/gpio/gpio-xilinx.c1
-rw-r--r--drivers/gpio/gpiolib.c6
-rw-r--r--drivers/gpu/drm/Kconfig3
-rw-r--r--drivers/gpu/drm/Makefile3
-rw-r--r--drivers/gpu/drm/drm_auth.c6
-rw-r--r--drivers/gpu/drm/drm_context.c5
-rw-r--r--drivers/gpu/drm/drm_crtc.c608
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c50
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_edid.c103
-rw-r--r--drivers/gpu/drm/drm_edid_modes.h284
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c7
-rw-r--r--drivers/gpu/drm/drm_fops.c7
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_ioc32.c3
-rw-r--r--drivers/gpu/drm/drm_ioctl.c15
-rw-r--r--drivers/gpu/drm/drm_lock.c3
-rw-r--r--drivers/gpu/drm/drm_sman.c351
-rw-r--r--drivers/gpu/drm/drm_sysfs.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig9
-rw-r--r--drivers/gpu/drm/exynos/Makefile5
-rw-r--r--drivers/gpu/drm/exynos/exynos_ddc.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c16
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c95
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c61
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c169
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c166
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c335
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c227
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h53
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c439
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h73
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c163
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.h14
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c1176
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.h87
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmiphy.c58
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c1075
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.h92
-rw-r--r--drivers/gpu/drm/exynos/regs-hdmi.h147
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h141
-rw-r--r--drivers/gpu/drm/exynos/regs-vp.h91
-rw-r--r--drivers/gpu/drm/gma500/Kconfig (renamed from drivers/staging/gma500/Kconfig)24
-rw-r--r--drivers/gpu/drm/gma500/Makefile40
-rw-r--r--drivers/gpu/drm/gma500/accel_2d.c (renamed from drivers/staging/gma500/accel_2d.c)52
-rw-r--r--drivers/gpu/drm/gma500/backlight.c (renamed from drivers/staging/gma500/backlight.c)0
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.c (renamed from drivers/staging/gma500/cdv_device.c)7
-rw-r--r--drivers/gpu/drm/gma500/cdv_device.h (renamed from drivers/staging/gma500/cdv_device.h)0
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_crt.c (renamed from drivers/staging/gma500/cdv_intel_crt.c)53
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c (renamed from drivers/staging/gma500/cdv_intel_display.c)18
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_hdmi.c (renamed from drivers/staging/gma500/cdv_intel_hdmi.c)126
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c (renamed from drivers/staging/gma500/cdv_intel_lvds.c)117
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c (renamed from drivers/staging/gma500/framebuffer.c)181
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.h (renamed from drivers/staging/gma500/framebuffer.h)1
-rw-r--r--drivers/gpu/drm/gma500/gem.c (renamed from drivers/staging/gma500/gem.c)6
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.c (renamed from drivers/staging/gma500/gem_glue.c)0
-rw-r--r--drivers/gpu/drm/gma500/gem_glue.h (renamed from drivers/staging/gma500/gem_glue.h)0
-rw-r--r--drivers/gpu/drm/gma500/gtt.c (renamed from drivers/staging/gma500/gtt.c)14
-rw-r--r--drivers/gpu/drm/gma500/gtt.h (renamed from drivers/staging/gma500/gtt.h)0
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.c (renamed from drivers/staging/gma500/intel_bios.c)2
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h (renamed from drivers/staging/gma500/intel_bios.h)0
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c493
-rw-r--r--drivers/gpu/drm/gma500/intel_i2c.c (renamed from drivers/staging/gma500/intel_i2c.c)3
-rw-r--r--drivers/gpu/drm/gma500/intel_opregion.c (renamed from drivers/staging/gma500/intel_opregion.c)0
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.c (renamed from drivers/staging/gma500/mid_bios.c)97
-rw-r--r--drivers/gpu/drm/gma500/mid_bios.h (renamed from drivers/staging/gma500/mid_bios.h)0
-rw-r--r--drivers/gpu/drm/gma500/mmu.c (renamed from drivers/staging/gma500/mmu.c)0
-rw-r--r--drivers/gpu/drm/gma500/oaktrail.h (renamed from drivers/staging/gma500/mrst.h)48
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_crtc.c (renamed from drivers/staging/gma500/mrst_crtc.c)96
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_device.c (renamed from drivers/staging/gma500/mrst_device.c)250
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi.c (renamed from drivers/staging/gma500/mrst_hdmi.c)167
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c (renamed from drivers/staging/gma500/mrst_hdmi_i2c.c)58
-rw-r--r--drivers/gpu/drm/gma500/oaktrail_lvds.c (renamed from drivers/staging/gma500/mrst_lvds.c)154
-rw-r--r--drivers/gpu/drm/gma500/power.c (renamed from drivers/staging/gma500/power.c)6
-rw-r--r--drivers/gpu/drm/gma500/power.h (renamed from drivers/staging/gma500/power.h)0
-rw-r--r--drivers/gpu/drm/gma500/psb_device.c (renamed from drivers/staging/gma500/psb_device.c)9
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c703
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h (renamed from drivers/staging/gma500/psb_drv.h)80
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c (renamed from drivers/staging/gma500/psb_intel_display.c)49
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.h (renamed from drivers/staging/gma500/psb_intel_display.h)0
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_drv.h (renamed from drivers/staging/gma500/psb_intel_drv.h)105
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_lvds.c (renamed from drivers/staging/gma500/psb_intel_lvds.c)152
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_modes.c (renamed from drivers/staging/gma500/psb_intel_modes.c)16
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_reg.h (renamed from drivers/staging/gma500/psb_intel_reg.h)74
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo.c2623
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h723
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c (renamed from drivers/staging/gma500/psb_irq.c)63
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h (renamed from drivers/staging/gma500/psb_irq.h)0
-rw-r--r--drivers/gpu/drm/gma500/psb_lid.c (renamed from drivers/staging/gma500/psb_lid.c)0
-rw-r--r--drivers/gpu/drm/gma500/psb_reg.h (renamed from drivers/staging/gma500/psb_reg.h)0
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c2
-rw-r--r--drivers/gpu/drm/i810/i810_drv.c23
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c109
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c94
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h18
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c63
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c25
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h201
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c11
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h6
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c23
-rw-r--r--drivers/gpu/drm/i915/intel_display.c411
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c21
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h51
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c19
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c16
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c57
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c8
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h2
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c664
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c138
-rw-r--r--drivers/gpu/drm/mga/mga_drv.c29
-rw-r--r--drivers/gpu/drm/nouveau/Makefile9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c79
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c904
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h72
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c123
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c403
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_crtc.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c208
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dp.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c80
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h135
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fb.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c23
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.c400
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gpio.h71
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hdmi.c258
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwsq.h115
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c556
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mxm.c686
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c25
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c382
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.h24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c179
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c198
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_temp.c29
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_volt.c15
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c14
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c16
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_pm.c109
-rw-r--r--drivers/gpu/drm/nouveau/nv04_timer.c3
-rw-r--r--drivers/gpu/drm/nouveau/nv10_gpio.c117
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c50
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c347
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c7
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c140
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.h4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_evo.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_gpio.c272
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_pm.c783
-rw-r--r--drivers/gpu/drm/nouveau/nv50_sor.c28
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv84_bsp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv84_vp.c83
-rw-r--r--drivers/gpu/drm/nouveau/nv98_crypt.c78
-rw-r--r--drivers/gpu/drm/nouveau/nv98_ppp.c78
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc262
-rw-r--r--drivers/gpu/drm/nouveau/nva3_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nva3_pm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_copy.fuc.h2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c6
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.fuc56
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c127
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc217
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h80
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc311
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h96
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_pm.c237
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c833
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c30
-rw-r--r--drivers/gpu/drm/radeon/Makefile5
-rw-r--r--drivers/gpu/drm/radeon/atom.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c70
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c40
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c118
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c244
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c242
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c247
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h13
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h66
-rw-r--r--drivers/gpu/drm/radeon/ni.c397
-rw-r--r--drivers/gpu/drm/radeon/nid.h36
-rw-r--r--drivers/gpu/drm/radeon/r100.c242
-rw-r--r--drivers/gpu/drm/radeon/r200.c21
-rw-r--r--drivers/gpu/drm/radeon/r300.c168
-rw-r--r--drivers/gpu/drm/radeon/r420.c57
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r520.c33
-rw-r--r--drivers/gpu/drm/radeon/r600.c277
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c57
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c270
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c8
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c9
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c65
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h449
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c197
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h48
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c306
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c88
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c75
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c309
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c425
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c147
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c30
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c38
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h32
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c483
-rw-r--r--drivers/gpu/drm/radeon/radeon_sa.c189
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c178
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c269
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c355
-rw-r--r--drivers/gpu/drm/radeon/rs400.c35
-rw-r--r--drivers/gpu/drm/radeon/rs600.c59
-rw-r--r--drivers/gpu/drm/radeon/rs690.c38
-rw-r--r--drivers/gpu/drm/radeon/rv515.c114
-rw-r--r--drivers/gpu/drm/radeon/rv770.c64
-rw-r--r--drivers/gpu/drm/savage/savage_drv.c23
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c59
-rw-r--r--drivers/gpu/drm/sis/sis_drv.h7
-rw-r--r--drivers/gpu/drm/sis/sis_mm.c199
-rw-r--r--drivers/gpu/drm/tdfx/tdfx_drv.c23
-rw-r--r--drivers/gpu/drm/ttm/Makefile4
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c105
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c95
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c32
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c184
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c1142
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c324
-rw-r--r--drivers/gpu/drm/via/via_drv.c48
-rw-r--r--drivers/gpu/drm/via/via_drv.h7
-rw-r--r--drivers/gpu/drm/via/via_map.c10
-rw-r--r--drivers/gpu/drm/via/via_mm.c135
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c71
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c30
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.h1
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c35
-rw-r--r--drivers/hid/Kconfig38
-rw-r--r--drivers/hid/Makefile11
-rw-r--r--drivers/hid/hid-core.c82
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-emsff.c2
-rw-r--r--drivers/hid/hid-hyperv.c (renamed from drivers/staging/hv/hv_mouse.c)304
-rw-r--r--drivers/hid/hid-ids.h46
-rw-r--r--drivers/hid/hid-input.c233
-rw-r--r--drivers/hid/hid-lg4ff.c2
-rw-r--r--drivers/hid/hid-multitouch.c213
-rw-r--r--drivers/hid/hid-picolcd.c4
-rw-r--r--drivers/hid/hid-pl.c4
-rw-r--r--drivers/hid/hid-prodikeys.c2
-rw-r--r--drivers/hid/hid-quanta.c261
-rw-r--r--drivers/hid/hid-roccat-common.c4
-rw-r--r--drivers/hid/hid-roccat-isku.c487
-rw-r--r--drivers/hid/hid-roccat-isku.h147
-rw-r--r--drivers/hid/hid-roccat-kone.c4
-rw-r--r--drivers/hid/hid-twinhan.c2
-rw-r--r--drivers/hid/hid-wacom.c196
-rw-r--r--drivers/hid/hid-wiimote-core.c (renamed from drivers/hid/hid-wiimote.c)256
-rw-r--r--drivers/hid/hid-wiimote-debug.c227
-rw-r--r--drivers/hid/hid-wiimote-ext.c752
-rw-r--r--drivers/hid/hid-wiimote.h208
-rw-r--r--drivers/hid/usbhid/hid-core.c241
-rw-r--r--drivers/hid/usbhid/hid-quirks.c5
-rw-r--r--drivers/hid/usbhid/hiddev.c6
-rw-r--r--drivers/hid/usbhid/usbhid.h3
-rw-r--r--drivers/hid/usbhid/usbkbd.c81
-rw-r--r--drivers/hid/usbhid/usbmouse.c17
-rw-r--r--drivers/hv/Kconfig4
-rw-r--r--drivers/hv/channel_mgmt.c12
-rw-r--r--drivers/hv/hv.c8
-rw-r--r--drivers/hv/hv_kvp.c10
-rw-r--r--drivers/hv/hyperv_vmbus.h1
-rw-r--r--drivers/hv/vmbus_drv.c30
-rw-r--r--drivers/hwmon/Kconfig17
-rw-r--r--drivers/hwmon/abituguru.c2
-rw-r--r--drivers/hwmon/abituguru3.c4
-rw-r--r--drivers/hwmon/acpi_power_meter.c8
-rw-r--r--drivers/hwmon/adcxx.c2
-rw-r--r--drivers/hwmon/adm1021.c2
-rw-r--r--drivers/hwmon/adm1031.c155
-rw-r--r--drivers/hwmon/adm9240.c2
-rw-r--r--drivers/hwmon/ads1015.c3
-rw-r--r--drivers/hwmon/ads7828.c4
-rw-r--r--drivers/hwmon/adt7411.c2
-rw-r--r--drivers/hwmon/adt7462.c28
-rw-r--r--drivers/hwmon/adt7470.c26
-rw-r--r--drivers/hwmon/adt7475.c20
-rw-r--r--drivers/hwmon/amc6821.c14
-rw-r--r--drivers/hwmon/applesmc.c6
-rw-r--r--drivers/hwmon/asc7621.c24
-rw-r--r--drivers/hwmon/coretemp.c33
-rw-r--r--drivers/hwmon/dme1737.c10
-rw-r--r--drivers/hwmon/ds620.c2
-rw-r--r--drivers/hwmon/emc1403.c6
-rw-r--r--drivers/hwmon/emc2103.c12
-rw-r--r--drivers/hwmon/emc6w201.c6
-rw-r--r--drivers/hwmon/f71805f.c10
-rw-r--r--drivers/hwmon/f71882fg.c32
-rw-r--r--drivers/hwmon/f75375s.c382
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/gpio-fan.c6
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/it87.c99
-rw-r--r--drivers/hwmon/jc42.c36
-rw-r--r--drivers/hwmon/lm63.c592
-rw-r--r--drivers/hwmon/lm73.c2
-rw-r--r--drivers/hwmon/lm75.c25
-rw-r--r--drivers/hwmon/lm75.h5
-rw-r--r--drivers/hwmon/lm80.c70
-rw-r--r--drivers/hwmon/lm90.c12
-rw-r--r--drivers/hwmon/lm93.c4
-rw-r--r--drivers/hwmon/lm95241.c8
-rw-r--r--drivers/hwmon/lm95245.c8
-rw-r--r--drivers/hwmon/ltc4261.c1
-rw-r--r--drivers/hwmon/max1111.c17
-rw-r--r--drivers/hwmon/max16065.c2
-rw-r--r--drivers/hwmon/max1668.c6
-rw-r--r--drivers/hwmon/max6639.c30
-rw-r--r--drivers/hwmon/max6642.c2
-rw-r--r--drivers/hwmon/max6650.c2
-rw-r--r--drivers/hwmon/pc87427.c6
-rw-r--r--drivers/hwmon/pmbus/Kconfig9
-rw-r--r--drivers/hwmon/pmbus/adm1275.c71
-rw-r--r--drivers/hwmon/pmbus/max34440.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus.c2
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c3
-rw-r--r--drivers/hwmon/pmbus/zl6100.c53
-rw-r--r--drivers/hwmon/sht15.c5
-rw-r--r--drivers/hwmon/tmp102.c2
-rw-r--r--drivers/hwmon/tmp401.c10
-rw-r--r--drivers/hwmon/tmp421.c2
-rw-r--r--drivers/hwmon/w83627ehf.c56
-rw-r--r--drivers/hwmon/w83627hf.c2
-rw-r--r--drivers/hwmon/w83781d.c4
-rw-r--r--drivers/hwmon/w83791d.c12
-rw-r--r--drivers/hwmon/w83792d.c4
-rw-r--r--drivers/hwmon/w83793.c4
-rw-r--r--drivers/hwmon/w83795.c34
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/i2c/busses/Kconfig18
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c38
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-at91.c17
-rw-r--r--drivers/i2c/busses/i2c-au1550.c13
-rw-r--r--drivers/i2c/busses/i2c-cpm.c13
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c12
-rw-r--r--drivers/i2c/busses/i2c-diolan-u2c.c15
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c21
-rw-r--r--drivers/i2c/busses/i2c-highlander.c15
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c2
-rw-r--r--drivers/i2c/busses/i2c-ibm_iic.c17
-rw-r--r--drivers/i2c/busses/i2c-intel-mid.c2
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.c16
-rw-r--r--drivers/i2c/busses/i2c-isch.c13
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c13
-rw-r--r--drivers/i2c/busses/i2c-mpc.c13
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c15
-rw-r--r--drivers/i2c/busses/i2c-mxs.c13
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c4
-rw-r--r--drivers/i2c/busses/i2c-ocores.c17
-rw-r--r--drivers/i2c/busses/i2c-octeon.c16
-rw-r--r--drivers/i2c/busses/i2c-omap.c110
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c14
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c17
-rw-r--r--drivers/i2c/busses/i2c-powermac.c19
-rw-r--r--drivers/i2c/busses/i2c-puv3.c16
-rw-r--r--drivers/i2c/busses/i2c-pxa-pci.c2
-rw-r--r--drivers/i2c/busses/i2c-sh7760.c13
-rw-r--r--drivers/i2c/busses/i2c-simtec.c18
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c6
-rw-r--r--drivers/i2c/busses/i2c-sis630.c12
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c12
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c15
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c11
-rw-r--r--drivers/i2c/busses/i2c-xiic.c20
-rw-r--r--drivers/i2c/busses/scx200_acb.c2
-rw-r--r--drivers/i2c/i2c-dev.c13
-rw-r--r--drivers/i2c/muxes/gpio-i2cmux.c13
-rw-r--r--drivers/ide/Makefile1
-rw-r--r--drivers/ide/ali14xx.c2
-rw-r--r--drivers/ide/at91_ide.c366
-rw-r--r--drivers/ide/cmd640.c2
-rw-r--r--drivers/ide/dtc2278.c2
-rw-r--r--drivers/ide/gayle.c2
-rw-r--r--drivers/ide/ht6560b.c2
-rw-r--r--drivers/ide/ide-4drives.c2
-rw-r--r--drivers/ide/ide-acpi.c6
-rw-r--r--drivers/ide/ide-floppy_ioctl.c3
-rw-r--r--drivers/ide/ide-pci-generic.c2
-rw-r--r--drivers/ide/qd65xx.c2
-rw-r--r--drivers/ide/umc8672.c2
-rw-r--r--drivers/idle/intel_idle.c98
-rw-r--r--drivers/ieee802154/fakehard.c2
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/Makefile1
-rw-r--r--drivers/infiniband/core/addr.c53
-rw-r--r--drivers/infiniband/core/cm.c2
-rw-r--r--drivers/infiniband/core/cm_msgs.h1
-rw-r--r--drivers/infiniband/core/cma.c14
-rw-r--r--drivers/infiniband/core/ucm.c3
-rw-r--r--drivers/infiniband/core/ucma.c5
-rw-r--r--drivers/infiniband/core/user_mad.c2
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c28
-rw-r--r--drivers/infiniband/core/uverbs_main.c2
-rw-r--r--drivers/infiniband/core/verbs.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c15
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c220
-rw-r--r--drivers/infiniband/hw/ehca/ehca_classes.h4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_main.c10
-rw-r--r--drivers/infiniband/hw/ipath/ipath_fs.c8
-rw-r--r--drivers/infiniband/hw/mlx4/ah.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx4/mad.c13
-rw-r--r--drivers/infiniband/hw/mlx4/main.c14
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.c4
-rw-r--r--drivers/infiniband/hw/nes/nes.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c23
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_context.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_mgt.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c8
-rw-r--r--drivers/infiniband/hw/nes/nes_user.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_utils.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_7220.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_driver.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_fs.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c6
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c10
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c40
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_pcie.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_qsfp.h2
-rw-r--r--drivers/infiniband/hw/qib/qib_sd7220.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_sysfs.c2
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c43
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c85
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c12
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c2
-rw-r--r--drivers/infiniband/ulp/srpt/Kconfig12
-rw-r--r--drivers/infiniband/ulp/srpt/Makefile2
-rw-r--r--drivers/infiniband/ulp/srpt/ib_dm_mad.h139
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c4070
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.h443
-rw-r--r--drivers/input/evdev.c24
-rw-r--r--drivers/input/input-polldev.c8
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/joystick/xpad.c19
-rw-r--r--drivers/input/keyboard/Kconfig21
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/adp5520-keys.c13
-rw-r--r--drivers/input/keyboard/atkbd.c40
-rw-r--r--drivers/input/keyboard/bf54x-keys.c16
-rw-r--r--drivers/input/keyboard/ep93xx_keypad.c14
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c14
-rw-r--r--drivers/input/keyboard/imx_keypad.c14
-rw-r--r--drivers/input/keyboard/jornada680_kbd.c14
-rw-r--r--drivers/input/keyboard/jornada720_kbd.c14
-rw-r--r--drivers/input/keyboard/lm8323.c11
-rw-r--r--drivers/input/keyboard/matrix_keypad.c14
-rw-r--r--drivers/input/keyboard/nomadik-ske-keypad.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c15
-rw-r--r--drivers/input/keyboard/omap4-keypad.c13
-rw-r--r--drivers/input/keyboard/opencores-kbd.c13
-rw-r--r--drivers/input/keyboard/pmic8xxx-keypad.c13
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c14
-rw-r--r--drivers/input/keyboard/pxa930_rotary.c13
-rw-r--r--drivers/input/keyboard/samsung-keypad.c278
-rw-r--r--drivers/input/keyboard/sh_keysc.c14
-rw-r--r--drivers/input/keyboard/spear-keyboard.c13
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c13
-rw-r--r--drivers/input/keyboard/tc3589x-keypad.c15
-rw-r--r--drivers/input/keyboard/tca8418_keypad.c430
-rw-r--r--drivers/input/keyboard/tegra-kbc.c132
-rw-r--r--drivers/input/keyboard/tnetv107x-keypad.c14
-rw-r--r--drivers/input/keyboard/twl4030_keypad.c17
-rw-r--r--drivers/input/keyboard/w90p910_keypad.c14
-rw-r--r--drivers/input/misc/88pm860x_onkey.c13
-rw-r--r--drivers/input/misc/Kconfig25
-rw-r--r--drivers/input/misc/Makefile2
-rw-r--r--drivers/input/misc/ab8500-ponkey.c15
-rw-r--r--drivers/input/misc/adxl34x-spi.c1
-rw-r--r--drivers/input/misc/adxl34x.c16
-rw-r--r--drivers/input/misc/ati_remote2.c40
-rw-r--r--drivers/input/misc/bfin_rotary.c13
-rw-r--r--drivers/input/misc/cobalt_btns.c14
-rw-r--r--drivers/input/misc/dm355evm_keys.c13
-rw-r--r--drivers/input/misc/gp2ap002a00f.c299
-rw-r--r--drivers/input/misc/gpio_tilt_polled.c213
-rw-r--r--drivers/input/misc/ixp4xx-beeper.c13
-rw-r--r--drivers/input/misc/keyspan_remote.c21
-rw-r--r--drivers/input/misc/max8925_onkey.c13
-rw-r--r--drivers/input/misc/mc13783-pwrbutton.c14
-rw-r--r--drivers/input/misc/mpu3050.c128
-rw-r--r--drivers/input/misc/pcap_keys.c14
-rw-r--r--drivers/input/misc/pcf50633-input.c13
-rw-r--r--drivers/input/misc/pcspkr.c14
-rw-r--r--drivers/input/misc/pm8xxx-vibrator.c13
-rw-r--r--drivers/input/misc/pmic8xxx-pwrkey.c13
-rw-r--r--drivers/input/misc/powermate.c13
-rw-r--r--drivers/input/misc/pwm-beeper.c13
-rw-r--r--drivers/input/misc/rb532_button.c14
-rw-r--r--drivers/input/misc/rotary_encoder.c14
-rw-r--r--drivers/input/misc/sgi_btns.c13
-rw-r--r--drivers/input/misc/twl4030-vibra.c20
-rw-r--r--drivers/input/misc/twl6040-vibra.c13
-rw-r--r--drivers/input/misc/wistron_btns.c2
-rw-r--r--drivers/input/misc/wm831x-on.c13
-rw-r--r--drivers/input/misc/xen-kbdfront.c7
-rw-r--r--drivers/input/misc/yealink.c17
-rw-r--r--drivers/input/mouse/alps.c1043
-rw-r--r--drivers/input/mouse/alps.h19
-rw-r--r--drivers/input/mouse/appletouch.c13
-rw-r--r--drivers/input/mouse/bcm5974.c17
-rw-r--r--drivers/input/mouse/elantech.c80
-rw-r--r--drivers/input/mouse/elantech.h2
-rw-r--r--drivers/input/mouse/gpio_mouse.c13
-rw-r--r--drivers/input/mouse/hgpk.c18
-rw-r--r--drivers/input/mouse/logips2pp.c9
-rw-r--r--drivers/input/mouse/psmouse-base.c231
-rw-r--r--drivers/input/mouse/psmouse.h3
-rw-r--r--drivers/input/mouse/pxa930_trkball.c14
-rw-r--r--drivers/input/mouse/sentelic.c43
-rw-r--r--drivers/input/mouse/synaptics.c197
-rw-r--r--drivers/input/mouse/synaptics.h5
-rw-r--r--drivers/input/mouse/synaptics_i2c.c6
-rw-r--r--drivers/input/mouse/trackpoint.c17
-rw-r--r--drivers/input/serio/altera_ps2.c13
-rw-r--r--drivers/input/serio/ambakmi.c2
-rw-r--r--drivers/input/serio/hp_sdc.c2
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/serio/i8042.c23
-rw-r--r--drivers/input/serio/rpckbd.c14
-rw-r--r--drivers/input/serio/serio_raw.c23
-rw-r--r--drivers/input/serio/xilinx_ps2.c16
-rw-r--r--drivers/input/tablet/Kconfig2
-rw-r--r--drivers/input/tablet/acecad.c17
-rw-r--r--drivers/input/tablet/aiptek.c53
-rw-r--r--drivers/input/tablet/gtco.c28
-rw-r--r--drivers/input/tablet/hanwang.c13
-rw-r--r--drivers/input/tablet/kbtab.c20
-rw-r--r--drivers/input/tablet/wacom_sys.c120
-rw-r--r--drivers/input/tablet/wacom_wac.c187
-rw-r--r--drivers/input/tablet/wacom_wac.h5
-rw-r--r--drivers/input/touchscreen/88pm860x-ts.c13
-rw-r--r--drivers/input/touchscreen/Kconfig41
-rw-r--r--drivers/input/touchscreen/Makefile3
-rw-r--r--drivers/input/touchscreen/ad7877.c21
-rw-r--r--drivers/input/touchscreen/ad7879-i2c.c31
-rw-r--r--drivers/input/touchscreen/ad7879-spi.c27
-rw-r--r--drivers/input/touchscreen/ad7879.c23
-rw-r--r--drivers/input/touchscreen/ad7879.h4
-rw-r--r--drivers/input/touchscreen/ads7846.c9
-rw-r--r--drivers/input/touchscreen/atmel_tsadcc.c15
-rw-r--r--drivers/input/touchscreen/auo-pixcir-ts.c652
-rw-r--r--drivers/input/touchscreen/da9034-ts.c13
-rw-r--r--drivers/input/touchscreen/eeti_ts.c4
-rw-r--r--drivers/input/touchscreen/egalax_ts.c303
-rw-r--r--drivers/input/touchscreen/htcpen.c11
-rw-r--r--drivers/input/touchscreen/intel-mid-touch.c13
-rw-r--r--drivers/input/touchscreen/jornada720_ts.c14
-rw-r--r--drivers/input/touchscreen/lpc32xx_ts.c13
-rw-r--r--drivers/input/touchscreen/mainstone-wm97xx.c14
-rw-r--r--drivers/input/touchscreen/migor_ts.c117
-rw-r--r--drivers/input/touchscreen/pcap_ts.c14
-rw-r--r--drivers/input/touchscreen/pixcir_i2c_ts.c239
-rw-r--r--drivers/input/touchscreen/s3c2410_ts.c14
-rw-r--r--drivers/input/touchscreen/st1232.c13
-rw-r--r--drivers/input/touchscreen/stmpe-ts.c15
-rw-r--r--drivers/input/touchscreen/tnetv107x-ts.c14
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c13
-rw-r--r--drivers/input/touchscreen/tsc2005.c4
-rw-r--r--drivers/input/touchscreen/ucb1400_ts.c289
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c53
-rw-r--r--drivers/input/touchscreen/w90p910_ts.c14
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c13
-rw-r--r--drivers/input/touchscreen/zylonite-wm97xx.c19
-rw-r--r--drivers/iommu/Kconfig13
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/amd_iommu.c886
-rw-r--r--drivers/iommu/amd_iommu_init.c135
-rw-r--r--drivers/iommu/amd_iommu_proto.h24
-rw-r--r--drivers/iommu/amd_iommu_types.h118
-rw-r--r--drivers/iommu/amd_iommu_v2.c994
-rw-r--r--drivers/iommu/intel-iommu.c104
-rw-r--r--drivers/iommu/iommu.c177
-rw-r--r--drivers/iommu/msm_iommu.c32
-rw-r--r--drivers/iommu/omap-iommu-debug.c59
-rw-r--r--drivers/iommu/omap-iommu.c83
-rw-r--r--drivers/iommu/omap-iovmm.c48
-rw-r--r--drivers/isdn/gigaset/i4l.c3
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c2
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c28
-rw-r--r--drivers/isdn/hisax/enternow_pci.c2
-rw-r--r--drivers/isdn/i4l/Kconfig2
-rw-r--r--drivers/isdn/i4l/isdn_net.c2
-rw-r--r--drivers/isdn/sc/init.c2
-rw-r--r--drivers/leds/Kconfig25
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c1
-rw-r--r--drivers/leds/led-triggers.c1
-rw-r--r--drivers/leds/leds-88pm860x.c12
-rw-r--r--drivers/leds/leds-adp5520.c12
-rw-r--r--drivers/leds/leds-ams-delta.c13
-rw-r--r--drivers/leds/leds-asic3.c16
-rw-r--r--drivers/leds/leds-atmel-pwm.c17
-rw-r--r--drivers/leds/leds-bd2802.c15
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cobalt-qube.c17
-rw-r--r--drivers/leds/leds-da903x.c12
-rw-r--r--drivers/leds/leds-dac124s085.c13
-rw-r--r--drivers/leds/leds-fsg.c15
-rw-r--r--drivers/leds/leds-gpio.c16
-rw-r--r--drivers/leds/leds-hp6xx.c17
-rw-r--r--drivers/leds/leds-lm3530.c17
-rw-r--r--drivers/leds/leds-lp3944.c13
-rw-r--r--drivers/leds/leds-lp5521.c20
-rw-r--r--drivers/leds/leds-lp5523.c22
-rw-r--r--drivers/leds/leds-lt3593.c16
-rw-r--r--drivers/leds/leds-max8997.c372
-rw-r--r--drivers/leds/leds-mc13783.c14
-rw-r--r--drivers/leds/leds-netxbig.c39
-rw-r--r--drivers/leds/leds-ns2.c15
-rw-r--r--drivers/leds/leds-ot200.c171
-rw-r--r--drivers/leds/leds-pca9532.c14
-rw-r--r--drivers/leds/leds-pca955x.c13
-rw-r--r--drivers/leds/leds-pwm.c13
-rw-r--r--drivers/leds/leds-rb532.c16
-rw-r--r--drivers/leds/leds-regulator.c12
-rw-r--r--drivers/leds/leds-renesas-tpu.c13
-rw-r--r--drivers/leds/leds-s3c24xx.c13
-rw-r--r--drivers/leds/leds-ss4200.c2
-rw-r--r--drivers/leds/leds-tca6507.c779
-rw-r--r--drivers/leds/leds-wm831x-status.c17
-rw-r--r--drivers/leds/leds-wm8350.c19
-rw-r--r--drivers/lguest/Makefile2
-rw-r--r--drivers/lguest/lguest_device.c24
-rw-r--r--drivers/lguest/segments.c28
-rw-r--r--drivers/lguest/x86/core.c2
-rw-r--r--drivers/macintosh/adb.c4
-rw-r--r--drivers/macintosh/ams/ams-core.c2
-rw-r--r--drivers/macintosh/ams/ams-input.c4
-rw-r--r--drivers/macintosh/rack-meter.c14
-rw-r--r--drivers/macintosh/smu.c4
-rw-r--r--drivers/macintosh/therm_adt746x.c2
-rw-r--r--drivers/md/bitmap.c12
-rw-r--r--drivers/md/dm-flakey.c13
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-ioctl.c2
-rw-r--r--drivers/md/dm-linear.c12
-rw-r--r--drivers/md/dm-mpath.c6
-rw-r--r--drivers/md/dm-raid.c29
-rw-r--r--drivers/md/dm-table.c6
-rw-r--r--drivers/md/dm-thin-metadata.c25
-rw-r--r--drivers/md/dm.c1
-rw-r--r--drivers/md/md.c122
-rw-r--r--drivers/md/md.h82
-rw-r--r--drivers/md/multipath.c7
-rw-r--r--drivers/md/raid1.c185
-rw-r--r--drivers/md/raid1.h7
-rw-r--r--drivers/md/raid10.c620
-rw-r--r--drivers/md/raid10.h61
-rw-r--r--drivers/md/raid5.c557
-rw-r--r--drivers/md/raid5.h98
-rw-r--r--drivers/media/dvb/b2c2/flexcop-usb.c20
-rw-r--r--drivers/media/dvb/ddbridge/ddbridge-core.c2
-rw-r--r--drivers/media/dvb/dvb-core/dvbdev.c2
-rw-r--r--drivers/media/dvb/dvb-usb/a800.c21
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.c2
-rw-r--r--drivers/media/dvb/dvb-usb/af9005.h2
-rw-r--r--drivers/media/dvb/dvb-usb/af9015.c20
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c21
-rw-r--r--drivers/media/dvb/dvb-usb/au6610.c21
-rw-r--r--drivers/media/dvb/dvb-usb/az6027.c23
-rw-r--r--drivers/media/dvb/dvb-usb/ce6230.c22
-rw-r--r--drivers/media/dvb/dvb-usb/cinergyT2-core.c20
-rw-r--r--drivers/media/dvb/dvb-usb/cxusb.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_core.c22
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mb.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dibusb-mc.c21
-rw-r--r--drivers/media/dvb/dvb-usb/digitv.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dtt200u.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dtv5100.c21
-rw-r--r--drivers/media/dvb/dvb-usb/dw2102.c17
-rw-r--r--drivers/media/dvb/dvb-usb/ec168.c22
-rw-r--r--drivers/media/dvb/dvb-usb/friio.c23
-rw-r--r--drivers/media/dvb/dvb-usb/gl861.c21
-rw-r--r--drivers/media/dvb/dvb-usb/gp8psk.c21
-rw-r--r--drivers/media/dvb/dvb-usb/it913x.c21
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c21
-rw-r--r--drivers/media/dvb/dvb-usb/m920x.c22
-rw-r--r--drivers/media/dvb/dvb-usb/mxl111sf.c19
-rw-r--r--drivers/media/dvb/dvb-usb/nova-t-usb2.c21
-rw-r--r--drivers/media/dvb/dvb-usb/opera1.c17
-rw-r--r--drivers/media/dvb/dvb-usb/pctv452e.c17
-rw-r--r--drivers/media/dvb/dvb-usb/technisat-usb2.c20
-rw-r--r--drivers/media/dvb/dvb-usb/ttusb2.c21
-rw-r--r--drivers/media/dvb/dvb-usb/umt-010.c21
-rw-r--r--drivers/media/dvb/dvb-usb/vp702x.c21
-rw-r--r--drivers/media/dvb/dvb-usb/vp7045.c21
-rw-r--r--drivers/media/dvb/siano/smsusb.c21
-rw-r--r--drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c21
-rw-r--r--drivers/media/dvb/ttusb-dec/ttusb_dec.c21
-rw-r--r--drivers/media/radio/dsbr100.c16
-rw-r--r--drivers/media/radio/radio-gemtek.c10
-rw-r--r--drivers/media/radio/radio-miropcm20.c2
-rw-r--r--drivers/media/radio/radio-mr800.c23
-rw-r--r--drivers/media/radio/si470x/radio-si470x-usb.c28
-rw-r--r--drivers/media/rc/ati_remote.c33
-rw-r--r--drivers/media/rc/ene_ir.c2
-rw-r--r--drivers/media/rc/ene_ir.h2
-rw-r--r--drivers/media/rc/imon.c21
-rw-r--r--drivers/media/rc/lirc_dev.c2
-rw-r--r--drivers/media/rc/mceusb.c24
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/rc/redrat3.c20
-rw-r--r--drivers/media/rc/streamzap.c32
-rw-r--r--drivers/media/rc/winbond-cir.c6
-rw-r--r--drivers/media/video/c-qcam.c2
-rw-r--r--drivers/media/video/cs5345.c2
-rw-r--r--drivers/media/video/cs53l32a.c2
-rw-r--r--drivers/media/video/cx18/cx18-driver.c2
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c24
-rw-r--r--drivers/media/video/cx25821/cx25821-alsa.c2
-rw-r--r--drivers/media/video/cx88/cx88-alsa.c2
-rw-r--r--drivers/media/video/davinci/vpif.h1
-rw-r--r--drivers/media/video/davinci/vpif_capture.h2
-rw-r--r--drivers/media/video/davinci/vpif_display.h1
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c24
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c29
-rw-r--r--drivers/media/video/gspca/benq.c13
-rw-r--r--drivers/media/video/gspca/conex.c13
-rw-r--r--drivers/media/video/gspca/cpia1.c13
-rw-r--r--drivers/media/video/gspca/etoms.c14
-rw-r--r--drivers/media/video/gspca/finepix.c14
-rw-r--r--drivers/media/video/gspca/gl860/gl860.c17
-rw-r--r--drivers/media/video/gspca/jeilinj.c14
-rw-r--r--drivers/media/video/gspca/kinect.c14
-rw-r--r--drivers/media/video/gspca/konica.c13
-rw-r--r--drivers/media/video/gspca/m5602/m5602_core.c18
-rw-r--r--drivers/media/video/gspca/m5602/m5602_mt9m111.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov7660.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_ov9650.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_po1030.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k4aa.h2
-rw-r--r--drivers/media/video/gspca/m5602/m5602_s5k83a.h2
-rw-r--r--drivers/media/video/gspca/mars.c13
-rw-r--r--drivers/media/video/gspca/mr97310a.c13
-rw-r--r--drivers/media/video/gspca/nw80x.c13
-rw-r--r--drivers/media/video/gspca/ov519.c13
-rw-r--r--drivers/media/video/gspca/ov534.c14
-rw-r--r--drivers/media/video/gspca/ov534_9.c14
-rw-r--r--drivers/media/video/gspca/pac207.c13
-rw-r--r--drivers/media/video/gspca/pac7302.c13
-rw-r--r--drivers/media/video/gspca/pac7311.c13
-rw-r--r--drivers/media/video/gspca/se401.c13
-rw-r--r--drivers/media/video/gspca/sn9c2028.c14
-rw-r--r--drivers/media/video/gspca/sn9c20x.c13
-rw-r--r--drivers/media/video/gspca/sonixb.c13
-rw-r--r--drivers/media/video/gspca/sonixj.c13
-rw-r--r--drivers/media/video/gspca/spca1528.c13
-rw-r--r--drivers/media/video/gspca/spca500.c13
-rw-r--r--drivers/media/video/gspca/spca501.c13
-rw-r--r--drivers/media/video/gspca/spca505.c13
-rw-r--r--drivers/media/video/gspca/spca506.c19
-rw-r--r--drivers/media/video/gspca/spca508.c13
-rw-r--r--drivers/media/video/gspca/spca561.c13
-rw-r--r--drivers/media/video/gspca/sq905.c14
-rw-r--r--drivers/media/video/gspca/sq905c.c14
-rw-r--r--drivers/media/video/gspca/sq930x.c13
-rw-r--r--drivers/media/video/gspca/stk014.c13
-rw-r--r--drivers/media/video/gspca/stv0680.c13
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx.c17
-rw-r--r--drivers/media/video/gspca/sunplus.c13
-rw-r--r--drivers/media/video/gspca/t613.c13
-rw-r--r--drivers/media/video/gspca/topro.c13
-rw-r--r--drivers/media/video/gspca/tv8532.c14
-rw-r--r--drivers/media/video/gspca/vc032x.c13
-rw-r--r--drivers/media/video/gspca/vicam.c14
-rw-r--r--drivers/media/video/gspca/xirlink_cit.c13
-rw-r--r--drivers/media/video/gspca/zc3xx.c13
-rw-r--r--drivers/media/video/hdpvr/hdpvr-core.c23
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c2
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c2
-rw-r--r--drivers/media/video/marvell-ccic/mcam-core.c6
-rw-r--r--drivers/media/video/msp3400-driver.c6
-rw-r--r--drivers/media/video/msp3400-driver.h6
-rw-r--r--drivers/media/video/mx3_camera.c2
-rw-r--r--drivers/media/video/omap/omap_vout.c39
-rw-r--r--drivers/media/video/omap/omap_vout_vrfb.c2
-rw-r--r--drivers/media/video/omap3isp/isp.c30
-rw-r--r--drivers/media/video/omap3isp/isp.h2
-rw-r--r--drivers/media/video/omap3isp/ispccdc.c18
-rw-r--r--drivers/media/video/omap3isp/ispstat.c8
-rw-r--r--drivers/media/video/omap3isp/ispvideo.c4
-rw-r--r--drivers/media/video/ov7670.c2
-rw-r--r--drivers/media/video/s2255drv.c20
-rw-r--r--drivers/media/video/saa7115.c2
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c29
-rw-r--r--drivers/media/video/stk-webcam.c27
-rw-r--r--drivers/media/video/timblogiw.c2
-rw-r--r--drivers/media/video/tm6000/tm6000-alsa.c2
-rw-r--r--drivers/media/video/tm6000/tm6000-cards.c26
-rw-r--r--drivers/media/video/tvp514x.c2
-rw-r--r--drivers/media/video/tvp7002.c2
-rw-r--r--drivers/media/video/upd64083.c2
-rw-r--r--drivers/media/video/via-camera.c4
-rw-r--r--drivers/media/video/zoran/zoran_device.c2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c2
-rw-r--r--drivers/media/video/zoran/zr36060.c2
-rw-r--r--drivers/media/video/zr364xx.c23
-rw-r--r--drivers/memstick/host/jmb38x_ms.c2
-rw-r--r--drivers/memstick/host/r592.c2
-rw-r--r--drivers/memstick/host/tifm_ms.c2
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h1
-rw-r--r--drivers/message/fusion/lsi/mpi_ioc.h2
-rw-r--r--drivers/message/fusion/mptbase.c7
-rw-r--r--drivers/message/fusion/mptbase.h1
-rw-r--r--drivers/message/fusion/mptsas.c2
-rw-r--r--drivers/message/i2o/i2o_proc.c2
-rw-r--r--drivers/mfd/88pm860x-i2c.c241
-rw-r--r--drivers/mfd/Kconfig62
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/aat2870-core.c25
-rw-r--r--drivers/mfd/ab5500-core.c2
-rw-r--r--drivers/mfd/ab5500-debugfs.c2
-rw-r--r--drivers/mfd/ab8500-core.c7
-rw-r--r--drivers/mfd/ab8500-debugfs.c2
-rw-r--r--drivers/mfd/ab8500-gpadc.c4
-rw-r--r--drivers/mfd/ab8500-i2c.c2
-rw-r--r--drivers/mfd/ab8500-sysctrl.c4
-rw-r--r--drivers/mfd/cs5535-mfd.c8
-rw-r--r--drivers/mfd/da9052-core.c694
-rw-r--r--drivers/mfd/da9052-i2c.c140
-rw-r--r--drivers/mfd/da9052-spi.c115
-rw-r--r--drivers/mfd/db8500-prcmu.c7
-rw-r--r--drivers/mfd/dm355evm_msp.c3
-rw-r--r--drivers/mfd/intel_msic.c12
-rw-r--r--drivers/mfd/janz-cmodio.c2
-rw-r--r--drivers/mfd/jz4740-adc.c14
-rw-r--r--drivers/mfd/lpc_sch.c2
-rw-r--r--drivers/mfd/max8925-core.c15
-rw-r--r--drivers/mfd/max8925-i2c.c27
-rw-r--r--drivers/mfd/max8997.c3
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/mc13xxx-core.c111
-rw-r--r--drivers/mfd/mcp-core.c17
-rw-r--r--drivers/mfd/mcp-sa11x0.c13
-rw-r--r--drivers/mfd/mfd-core.c2
-rw-r--r--drivers/mfd/omap-usb-host.c763
-rw-r--r--drivers/mfd/pcf50633-adc.c12
-rw-r--r--drivers/mfd/s5m-core.c176
-rw-r--r--drivers/mfd/s5m-irq.c487
-rw-r--r--drivers/mfd/sm501.c2
-rw-r--r--drivers/mfd/stmpe-i2c.c109
-rw-r--r--drivers/mfd/stmpe-spi.c150
-rw-r--r--drivers/mfd/stmpe.c277
-rw-r--r--drivers/mfd/stmpe.h53
-rw-r--r--drivers/mfd/t7l66xb.c16
-rw-r--r--drivers/mfd/tc6387xb.c14
-rw-r--r--drivers/mfd/ti-ssp.c12
-rw-r--r--drivers/mfd/timberdale.c2
-rw-r--r--drivers/mfd/tps65910-irq.c3
-rw-r--r--drivers/mfd/tps65910.c9
-rw-r--r--drivers/mfd/tps65912-core.c2
-rw-r--r--drivers/mfd/tps65912-spi.c1
-rw-r--r--drivers/mfd/twl-core.c53
-rw-r--r--drivers/mfd/twl4030-audio.c12
-rw-r--r--drivers/mfd/twl4030-irq.c3
-rw-r--r--drivers/mfd/twl4030-madc.c14
-rw-r--r--drivers/mfd/twl4030-power.c62
-rw-r--r--drivers/mfd/twl6030-irq.c2
-rw-r--r--drivers/mfd/twl6040-core.c148
-rw-r--r--drivers/mfd/ucb1x00-core.c19
-rw-r--r--drivers/mfd/ucb1x00-ts.c32
-rw-r--r--drivers/mfd/vx855.c2
-rw-r--r--drivers/mfd/wm831x-core.c4
-rw-r--r--drivers/mfd/wm831x-i2c.c3
-rw-r--r--drivers/mfd/wm831x-irq.c8
-rw-r--r--drivers/mfd/wm831x-spi.c4
-rw-r--r--drivers/mfd/wm8350-core.c2
-rw-r--r--drivers/mfd/wm8350-i2c.c4
-rw-r--r--drivers/mfd/wm8350-irq.c1
-rw-r--r--drivers/mfd/wm8400-core.c7
-rw-r--r--drivers/mfd/wm8994-core.c172
-rw-r--r--drivers/mfd/wm8994-irq.c206
-rw-r--r--drivers/mfd/wm8994-regmap.c1239
-rw-r--r--drivers/mfd/wm8994.h25
-rw-r--r--drivers/misc/Kconfig25
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ab8500-pwm.c2
-rw-r--r--drivers/misc/ad525x_dpot-i2c.c10
-rw-r--r--drivers/misc/ad525x_dpot-spi.c97
-rw-r--r--drivers/misc/ad525x_dpot.c24
-rw-r--r--drivers/misc/ad525x_dpot.h8
-rw-r--r--drivers/misc/bmp085.c2
-rw-r--r--drivers/misc/c2port/c2port-duramar2150.c1
-rw-r--r--drivers/misc/c2port/core.c4
-rw-r--r--drivers/misc/carma/carma-fpga-program.c2
-rw-r--r--drivers/misc/cb710/core.c1
-rw-r--r--drivers/misc/cs5535-mfgpt.c2
-rw-r--r--drivers/misc/eeprom/eeprom_93cx6.c88
-rw-r--r--drivers/misc/ibmasm/command.c2
-rw-r--r--drivers/misc/ibmasm/dot_command.c2
-rw-r--r--drivers/misc/ibmasm/dot_command.h2
-rw-r--r--drivers/misc/ibmasm/event.c2
-rw-r--r--drivers/misc/ibmasm/heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/i2o.h2
-rw-r--r--drivers/misc/ibmasm/ibmasm.h2
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.c2
-rw-r--r--drivers/misc/ibmasm/lowlevel.h2
-rw-r--r--drivers/misc/ibmasm/module.c2
-rw-r--r--drivers/misc/ibmasm/r_heartbeat.c2
-rw-r--r--drivers/misc/ibmasm/remote.h2
-rw-r--r--drivers/misc/ibmasm/uart.c2
-rw-r--r--drivers/misc/isl29020.c2
-rw-r--r--drivers/misc/iwmc3200top/main.c12
-rw-r--r--drivers/misc/lis3lv02d/lis3lv02d.c2
-rw-r--r--drivers/misc/lkdtm.c6
-rw-r--r--drivers/misc/max8997-muic.c505
-rw-r--r--drivers/misc/sgi-gru/gruprocfs.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/misc/ti-st/st_core.c18
-rw-r--r--drivers/misc/ti-st/st_kim.c84
-rw-r--r--drivers/misc/vmw_balloon.c14
-rw-r--r--drivers/mmc/Makefile3
-rw-r--r--drivers/mmc/card/block.c248
-rw-r--r--drivers/mmc/card/mmc_test.c3
-rw-r--r--drivers/mmc/card/queue.c5
-rw-r--r--drivers/mmc/core/Makefile2
-rw-r--r--drivers/mmc/core/bus.c5
-rw-r--r--drivers/mmc/core/cd-gpio.c74
-rw-r--r--drivers/mmc/core/core.c153
-rw-r--r--drivers/mmc/core/core.h5
-rw-r--r--drivers/mmc/core/debugfs.c5
-rw-r--r--drivers/mmc/core/host.c53
-rw-r--r--drivers/mmc/core/host.h21
-rw-r--r--drivers/mmc/core/mmc.c228
-rw-r--r--drivers/mmc/core/sd.c51
-rw-r--r--drivers/mmc/core/sdio.c355
-rw-r--r--drivers/mmc/core/sdio_io.c8
-rw-r--r--drivers/mmc/core/sdio_irq.c10
-rw-r--r--drivers/mmc/core/sdio_ops.c14
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/at91_mci.c38
-rw-r--r--drivers/mmc/host/atmel-mci.c34
-rw-r--r--drivers/mmc/host/au1xmmc.c45
-rw-r--r--drivers/mmc/host/bfin_sdh.c12
-rw-r--r--drivers/mmc/host/cb710-mmc.c13
-rw-r--r--drivers/mmc/host/dw_mmc.c215
-rw-r--r--drivers/mmc/host/dw_mmc.h2
-rw-r--r--drivers/mmc/host/jz4740_mmc.c12
-rw-r--r--drivers/mmc/host/mmc_spi.c1
-rw-r--r--drivers/mmc/host/mmci.c21
-rw-r--r--drivers/mmc/host/msm_sdcc.c19
-rw-r--r--drivers/mmc/host/mvsdio.c13
-rw-r--r--drivers/mmc/host/mxcmmc.c23
-rw-r--r--drivers/mmc/host/mxs-mmc.c33
-rw-r--r--drivers/mmc/host/of_mmc_spi.c4
-rw-r--r--drivers/mmc/host/omap.c4
-rw-r--r--drivers/mmc/host/omap_hsmmc.c69
-rw-r--r--drivers/mmc/host/pxamci.c13
-rw-r--r--drivers/mmc/host/s3cmci.c13
-rw-r--r--drivers/mmc/host/sdhci-cns3xxx.c12
-rw-r--r--drivers/mmc/host/sdhci-dove.c12
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c17
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c44
-rw-r--r--drivers/mmc/host/sdhci-of-hlwd.c12
-rw-r--r--drivers/mmc/host/sdhci-pci-data.c5
-rw-r--r--drivers/mmc/host/sdhci-pci.c181
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c10
-rw-r--r--drivers/mmc/host/sdhci-pxav2.c12
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c12
-rw-r--r--drivers/mmc/host/sdhci-s3c.c25
-rw-r--r--drivers/mmc/host/sdhci-spear.c51
-rw-r--r--drivers/mmc/host/sdhci-tegra.c12
-rw-r--r--drivers/mmc/host/sdhci.c150
-rw-r--r--drivers/mmc/host/sdhci.h1
-rw-r--r--drivers/mmc/host/sh_mmcif.c740
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c13
-rw-r--r--drivers/mmc/host/tifm_sd.c20
-rw-r--r--drivers/mmc/host/tmio_mmc.c14
-rw-r--r--drivers/mmc/host/tmio_mmc.h11
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c16
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c36
-rw-r--r--drivers/mmc/host/ushc.c12
-rw-r--r--drivers/mmc/host/vub300.c10
-rw-r--r--drivers/mtd/Kconfig8
-rw-r--r--drivers/mtd/Makefile1
-rw-r--r--drivers/mtd/afs.c4
-rw-r--r--drivers/mtd/ar7part.c15
-rw-r--r--drivers/mtd/bcm63xxpart.c222
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0020.c13
-rw-r--r--drivers/mtd/devices/Kconfig12
-rw-r--r--drivers/mtd/devices/block2mtd.c3
-rw-r--r--drivers/mtd/devices/doc2000.c9
-rw-r--r--drivers/mtd/devices/doc2001.c8
-rw-r--r--drivers/mtd/devices/doc2001plus.c9
-rw-r--r--drivers/mtd/devices/docg3.c1441
-rw-r--r--drivers/mtd/devices/docg3.h65
-rw-r--r--drivers/mtd/devices/docprobe.c7
-rw-r--r--drivers/mtd/devices/m25p80.c1
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c1
-rw-r--r--drivers/mtd/devices/sst25l.c3
-rw-r--r--drivers/mtd/ftl.c81
-rw-r--r--drivers/mtd/inftlcore.c25
-rw-r--r--drivers/mtd/inftlmount.c19
-rw-r--r--drivers/mtd/lpddr/lpddr_cmds.c7
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/bcm963xx-flash.c277
-rw-r--r--drivers/mtd/maps/bfin-async-flash.c12
-rw-r--r--drivers/mtd/maps/gpio-addr-flash.c12
-rw-r--r--drivers/mtd/maps/ixp2000.c12
-rw-r--r--drivers/mtd/maps/ixp4xx.c14
-rw-r--r--drivers/mtd/maps/lantiq-flash.c6
-rw-r--r--drivers/mtd/maps/latch-addr-flash.c12
-rw-r--r--drivers/mtd/maps/physmap.c10
-rw-r--r--drivers/mtd/maps/physmap_of.c13
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c17
-rw-r--r--drivers/mtd/maps/rbtx4939-flash.c18
-rw-r--r--drivers/mtd/maps/sa1100-flash.c17
-rw-r--r--drivers/mtd/maps/scb2_flash.c3
-rw-r--r--drivers/mtd/maps/sun_uflash.c13
-rw-r--r--drivers/mtd/mtd_blkdevs.c3
-rw-r--r--drivers/mtd/mtdblock.c21
-rw-r--r--drivers/mtd/mtdblock_ro.c4
-rw-r--r--drivers/mtd/mtdchar.c203
-rw-r--r--drivers/mtd/mtdconcat.c53
-rw-r--r--drivers/mtd/mtdcore.c126
-rw-r--r--drivers/mtd/mtdoops.c47
-rw-r--r--drivers/mtd/mtdpart.c63
-rw-r--r--drivers/mtd/mtdswap.c29
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/alauda.c13
-rw-r--r--drivers/mtd/nand/ams-delta.c12
-rw-r--r--drivers/mtd/nand/atmel_nand.c53
-rw-r--r--drivers/mtd/nand/au1550nd.c298
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c13
-rw-r--r--drivers/mtd/nand/davinci_nand.c4
-rw-r--r--drivers/mtd/nand/diskonchip.c4
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c75
-rw-r--r--drivers/mtd/nand/fsl_upm.c12
-rw-r--r--drivers/mtd/nand/gpio.c115
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c52
-rw-r--r--drivers/mtd/nand/jz4740_nand.c12
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c14
-rw-r--r--drivers/mtd/nand/nand_base.c9
-rw-r--r--drivers/mtd/nand/nand_bbt.c14
-rw-r--r--drivers/mtd/nand/nand_ids.c4
-rw-r--r--drivers/mtd/nand/nandsim.c2
-rw-r--r--drivers/mtd/nand/ndfc.c13
-rw-r--r--drivers/mtd/nand/nomadik_nand.c18
-rw-r--r--drivers/mtd/nand/nuc900_nand.c13
-rw-r--r--drivers/mtd/nand/omap2.c15
-rw-r--r--drivers/mtd/nand/pasemi_nand.c12
-rw-r--r--drivers/mtd/nand/plat_nand.c13
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c18
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/nand/sharpsl.c12
-rw-r--r--drivers/mtd/nand/sm_common.c2
-rw-r--r--drivers/mtd/nand/socrates_nand.c13
-rw-r--r--drivers/mtd/nand/tmio_nand.c13
-rw-r--r--drivers/mtd/nand/txx9ndfmc.c6
-rw-r--r--drivers/mtd/nftlcore.c25
-rw-r--r--drivers/mtd/nftlmount.c13
-rw-r--r--drivers/mtd/onenand/generic.c16
-rw-r--r--drivers/mtd/onenand/onenand_base.c3
-rw-r--r--drivers/mtd/onenand/samsung.c13
-rw-r--r--drivers/mtd/redboot.c12
-rw-r--r--drivers/mtd/rfd_ftl.c46
-rw-r--r--drivers/mtd/sm_ftl.c12
-rw-r--r--drivers/mtd/ssfdc.c12
-rw-r--r--drivers/mtd/tests/mtd_oobtest.c28
-rw-r--r--drivers/mtd/tests/mtd_pagetest.c57
-rw-r--r--drivers/mtd/tests/mtd_readtest.c11
-rw-r--r--drivers/mtd/tests/mtd_speedtest.c37
-rw-r--r--drivers/mtd/tests/mtd_stresstest.c22
-rw-r--r--drivers/mtd/tests/mtd_subpagetest.c32
-rw-r--r--drivers/mtd/tests/mtd_torturetest.c15
-rw-r--r--drivers/mtd/ubi/build.c2
-rw-r--r--drivers/mtd/ubi/cdev.c3
-rw-r--r--drivers/mtd/ubi/debug.c2
-rw-r--r--drivers/mtd/ubi/debug.h5
-rw-r--r--drivers/mtd/ubi/eba.c6
-rw-r--r--drivers/mtd/ubi/io.c19
-rw-r--r--drivers/mtd/ubi/kapi.c4
-rw-r--r--drivers/mtd/ubi/ubi.h2
-rw-r--r--drivers/mtd/ubi/vtbl.c6
-rw-r--r--drivers/mtd/ubi/wl.c12
-rw-r--r--drivers/net/Kconfig6
-rw-r--r--drivers/net/Makefile4
-rw-r--r--drivers/net/bonding/bond_alb.c139
-rw-r--r--drivers/net/bonding/bond_ipv6.c225
-rw-r--r--drivers/net/bonding/bond_main.c83
-rw-r--r--drivers/net/bonding/bond_sysfs.c1
-rw-r--r--drivers/net/caif/caif_hsi.c15
-rw-r--r--drivers/net/caif/caif_serial.c10
-rw-r--r--drivers/net/caif/caif_shmcore.c27
-rw-r--r--drivers/net/caif/caif_spi.c178
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/at91_can.c13
-rw-r--r--drivers/net/can/bfin_can.c12
-rw-r--r--drivers/net/can/c_can/c_can_platform.c12
-rw-r--r--drivers/net/can/cc770/Kconfig21
-rw-r--r--drivers/net/can/cc770/Makefile9
-rw-r--r--drivers/net/can/cc770/cc770.c883
-rw-r--r--drivers/net/can/cc770/cc770.h203
-rw-r--r--drivers/net/can/cc770/cc770_isa.c381
-rw-r--r--drivers/net/can/cc770/cc770_platform.c272
-rw-r--r--drivers/net/can/dev.c2
-rw-r--r--drivers/net/can/flexcan.c32
-rw-r--r--drivers/net/can/janz-ican3.c13
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c12
-rw-r--r--drivers/net/can/mscan/mscan.c8
-rw-r--r--drivers/net/can/pch_can.c3
-rw-r--r--drivers/net/can/sja1000/Kconfig1
-rw-r--r--drivers/net/can/sja1000/peak_pci.c23
-rw-r--r--drivers/net/can/sja1000/sja1000.c13
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c118
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c12
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c13
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/softing/softing_main.c16
-rw-r--r--drivers/net/can/ti_hecc.c18
-rw-r--r--drivers/net/can/usb/ems_usb.c35
-rw-r--r--drivers/net/can/usb/esd_usb2.c23
-rw-r--r--drivers/net/can/vcan.c2
-rw-r--r--drivers/net/dsa/Kconfig36
-rw-r--r--drivers/net/dsa/Makefile9
-rw-r--r--drivers/net/dsa/mv88e6060.c294
-rw-r--r--drivers/net/dsa/mv88e6123_61_65.c452
-rw-r--r--drivers/net/dsa/mv88e6131.c436
-rw-r--r--drivers/net/dsa/mv88e6xxx.c550
-rw-r--r--drivers/net/dsa/mv88e6xxx.h98
-rw-r--r--drivers/net/dummy.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c7
-rw-r--r--drivers/net/ethernet/3com/3c59x.c14
-rw-r--r--drivers/net/ethernet/3com/typhoon.c16
-rw-r--r--drivers/net/ethernet/8390/8390.h2
-rw-r--r--drivers/net/ethernet/8390/apne.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c21
-rw-r--r--drivers/net/ethernet/8390/es3210.c2
-rw-r--r--drivers/net/ethernet/8390/hp-plus.c2
-rw-r--r--drivers/net/ethernet/8390/hp.c2
-rw-r--r--drivers/net/ethernet/8390/hydra.c2
-rw-r--r--drivers/net/ethernet/8390/lne390.c4
-rw-r--r--drivers/net/ethernet/8390/ne-h8300.c2
-rw-r--r--drivers/net/ethernet/8390/ne.c4
-rw-r--r--drivers/net/ethernet/8390/ne2.c2
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/8390/ne3210.c2
-rw-r--r--drivers/net/ethernet/8390/stnic.c2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c2
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile3
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c14
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c3
-rw-r--r--drivers/net/ethernet/aeroflex/greth.c13
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c15
-rw-r--r--drivers/net/ethernet/amd/amd8111e.h5
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c18
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c19
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c10
-rw-r--r--drivers/net/ethernet/amd/sunlance.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c1
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c19
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c10
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c13
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c1
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c23
-rw-r--r--drivers/net/ethernet/atheros/atlx/atlx.c13
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c196
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c369
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h128
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c69
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c194
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h217
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c1248
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c506
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h70
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c81
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c108
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c136
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h16
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c19
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c774
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h44
-rw-r--r--drivers/net/ethernet/brocade/bna/Makefile2
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.c35
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_cee.h4
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_defs.h98
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c493
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.h54
-rw-r--r--drivers/net/ethernet/brocade/bna/bfi.h97
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_enet.c13
-rw-r--r--drivers/net/ethernet/brocade/bna/bna_types.h7
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c70
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.h29
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c623
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c155
-rw-r--r--drivers/net/ethernet/brocade/bna/cna.h3
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c1
-rw-r--r--drivers/net/ethernet/cadence/Kconfig16
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c26
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.h4
-rw-r--r--drivers/net/ethernet/cadence/macb.c419
-rw-r--r--drivers/net/ethernet/cadence/macb.h152
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig7
-rw-r--r--drivers/net/ethernet/calxeda/Makefile1
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c1928
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.c27
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/l2t.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c58
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c21
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/cq_enet_desc.h2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c14
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c50
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_pp.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c5
-rw-r--r--drivers/net/ethernet/dec/tulip/de2104x.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c13
-rw-r--r--drivers/net/ethernet/dec/tulip/dmfe.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c6
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c7
-rw-r--r--drivers/net/ethernet/dec/tulip/winbond-840.c6
-rw-r--r--drivers/net/ethernet/dlink/de600.c2
-rw-r--r--drivers/net/ethernet/dlink/sundance.c6
-rw-r--r--drivers/net/ethernet/dnet.c22
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h39
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c237
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h84
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c126
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c587
-rw-r--r--drivers/net/ethernet/ethoc.c13
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c1
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c1
-rw-r--r--drivers/net/ethernet/fealnx.c6
-rw-r--r--drivers/net/ethernet/freescale/Kconfig2
-rw-r--r--drivers/net/ethernet/freescale/fec.c94
-rw-r--r--drivers/net/ethernet/freescale/fec.h4
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c15
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c13
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mii-fec.c13
-rw-r--r--drivers/net/ethernet/freescale/fsl_pq_mdio.c16
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c62
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h10
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c12
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c16
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.h6
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c7
-rw-r--r--drivers/net/ethernet/i825xx/eepro.c7
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c2
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c57
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c14
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c6
-rw-r--r--drivers/net/ethernet/icplus/ipg.c13
-rw-r--r--drivers/net/ethernet/intel/e100.c8
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c12
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c71
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c465
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h2
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c39
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c372
-rw-r--r--drivers/net/ethernet/intel/igbvf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/igbvf/defines.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c15
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/mbx.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c46
-rw-r--r--drivers/net/ethernet/intel/igbvf/regs.h2
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.c2
-rw-r--r--drivers/net/ethernet/intel/igbvf/vf.h2
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c10
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c109
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c123
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c71
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c20
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/Makefile2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/defines.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c23
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h10
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c87
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/mbx.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/regs.h44
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.c32
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/vf.h4
-rw-r--r--drivers/net/ethernet/jme.c26
-rw-r--r--drivers/net/ethernet/jme.h2
-rw-r--r--drivers/net/ethernet/korina.c13
-rw-r--r--drivers/net/ethernet/lantiq_etop.c5
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c41
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/marvell/skge.c50
-rw-r--r--drivers/net/ethernet/marvell/sky2.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c1337
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c190
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h43
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c49
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c440
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c408
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.h27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/icm.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/intf.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c913
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c232
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h674
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c486
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/pd.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c639
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c233
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c3073
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/sense.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/srq.c132
-rw-r--r--drivers/net/ethernet/micrel/Kconfig1
-rw-r--r--drivers/net/ethernet/micrel/ks8842.c17
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c515
-rw-r--r--drivers/net/ethernet/micrel/ks8851.h15
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c30
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c79
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c5
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h2
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c13
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c20
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c6
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c6
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c13
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/nuvoton/w90p910_ether.c13
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c503
-rw-r--r--drivers/net/ethernet/octeon/octeon_mgmt.c4
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c22
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c15
-rw-r--r--drivers/net/ethernet/packetengines/Kconfig1
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c12
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c12
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c11
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c48
-rw-r--r--drivers/net/ethernet/rdc/r6040.c42
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c20
-rw-r--r--drivers/net/ethernet/realtek/8139too.c8
-rw-r--r--drivers/net/ethernet/realtek/r8169.c35
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c26
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/s6gmac.c2
-rw-r--r--drivers/net/ethernet/seeq/sgiseeq.c18
-rw-r--r--drivers/net/ethernet/sfc/efx.c9
-rw-r--r--drivers/net/ethernet/sfc/efx.h20
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c221
-rw-r--r--drivers/net/ethernet/sfc/falcon.c2
-rw-r--r--drivers/net/ethernet/sfc/filter.c310
-rw-r--r--drivers/net/ethernet/sfc/filter.h12
-rw-r--r--drivers/net/ethernet/sfc/mtd.c6
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/rx.c13
-rw-r--r--drivers/net/ethernet/sfc/selftest.c4
-rw-r--r--drivers/net/ethernet/sfc/siena.c2
-rw-r--r--drivers/net/ethernet/sfc/tx.c29
-rw-r--r--drivers/net/ethernet/sgi/meth.c67
-rw-r--r--drivers/net/ethernet/sis/sis190.c15
-rw-r--r--drivers/net/ethernet/sis/sis900.c7
-rw-r--r--drivers/net/ethernet/sis/sis900.h2
-rw-r--r--drivers/net/ethernet/smsc/epic100.c6
-rw-r--r--drivers/net/ethernet/smsc/smc911x.c13
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c13
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c206
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.h2
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig27
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c13
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c502
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c219
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c195
-rw-r--r--drivers/net/ethernet/sun/cassini.c7
-rw-r--r--drivers/net/ethernet/sun/niu.c42
-rw-r--r--drivers/net/ethernet/sun/sunbmac.c13
-rw-r--r--drivers/net/ethernet/sun/sungem.c6
-rw-r--r--drivers/net/ethernet/sun/sunhme.c18
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c6
-rw-r--r--drivers/net/ethernet/ti/cpmac.c6
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c17
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c5
-rw-r--r--drivers/net/ethernet/tile/tilepro.c7
-rw-r--r--drivers/net/ethernet/toshiba/Kconfig2
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c4
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c26
-rw-r--r--drivers/net/ethernet/via/via-rhine.c689
-rw-r--r--drivers/net/ethernet/via/via-velocity.c15
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c30
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c30
-rw-r--r--drivers/net/ethernet/xircom/xirc2ps_cs.c2
-rw-r--r--drivers/net/ethernet/xscale/ixp4xx_eth.c5
-rw-r--r--drivers/net/hyperv/Kconfig5
-rw-r--r--drivers/net/hyperv/Makefile3
-rw-r--r--drivers/net/hyperv/hyperv_net.h (renamed from drivers/staging/hv/hyperv_net.h)139
-rw-r--r--drivers/net/hyperv/netvsc.c (renamed from drivers/staging/hv/netvsc.c)164
-rw-r--r--drivers/net/hyperv/netvsc_drv.c (renamed from drivers/staging/hv/netvsc_drv.c)179
-rw-r--r--drivers/net/hyperv/rndis_filter.c (renamed from drivers/staging/hv/rndis_filter.c)74
-rw-r--r--drivers/net/ifb.c2
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/au1000_ircc.h125
-rw-r--r--drivers/net/irda/au1k_ir.c1229
-rw-r--r--drivers/net/irda/bfin_sir.c13
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/irda-usb.c35
-rw-r--r--drivers/net/irda/kingsun-sir.c19
-rw-r--r--drivers/net/irda/ks959-sir.c21
-rw-r--r--drivers/net/irda/ksdazzle-sir.c21
-rw-r--r--drivers/net/irda/mcs7780.c23
-rw-r--r--drivers/net/irda/nsc-ircc.c2
-rw-r--r--drivers/net/irda/pxaficp_ir.c13
-rw-r--r--drivers/net/irda/sh_irda.c13
-rw-r--r--drivers/net/irda/sh_sir.c13
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/irda/stir4200.c21
-rw-r--r--drivers/net/irda/via-ircc.c4
-rw-r--r--drivers/net/irda/w83977af_ir.c2
-rw-r--r--drivers/net/loopback.c2
-rw-r--r--drivers/net/macvlan.c15
-rw-r--r--drivers/net/macvtap.c24
-rw-r--r--drivers/net/mii.c53
-rw-r--r--drivers/net/phy/Kconfig4
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/dp83640.c2
-rw-r--r--drivers/net/phy/fixed.c2
-rw-r--r--drivers/net/phy/icplus.c55
-rw-r--r--drivers/net/phy/mdio-bitbang.c9
-rw-r--r--drivers/net/phy/mdio-gpio.c3
-rw-r--r--drivers/net/phy/mdio-octeon.c3
-rw-r--r--drivers/net/phy/mdio_bus.c23
-rw-r--r--drivers/net/phy/phy_device.c20
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/phy/spi_ks8995.c375
-rw-r--r--drivers/net/ppp/ppp_generic.c23
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/team/Kconfig43
-rw-r--r--drivers/net/team/Makefile7
-rw-r--r--drivers/net/team/team.c1728
-rw-r--r--drivers/net/team/team_mode_activebackup.c136
-rw-r--r--drivers/net/team/team_mode_roundrobin.c107
-rw-r--r--drivers/net/tokenring/Kconfig5
-rw-r--r--drivers/net/tun.c31
-rw-r--r--drivers/net/usb/asix.c41
-rw-r--r--drivers/net/usb/catc.c17
-rw-r--r--drivers/net/usb/cdc-phonet.c23
-rw-r--r--drivers/net/usb/cdc_eem.c13
-rw-r--r--drivers/net/usb/cdc_ether.c26
-rw-r--r--drivers/net/usb/cdc_ncm.c23
-rw-r--r--drivers/net/usb/cdc_subset.c12
-rw-r--r--drivers/net/usb/cx82310_eth.c12
-rw-r--r--drivers/net/usb/dm9601.c13
-rw-r--r--drivers/net/usb/gl620a.c12
-rw-r--r--drivers/net/usb/hso.c2
-rw-r--r--drivers/net/usb/int51x1.c12
-rw-r--r--drivers/net/usb/ipheth.c27
-rw-r--r--drivers/net/usb/kalmia.c12
-rw-r--r--drivers/net/usb/kaweth.c30
-rw-r--r--drivers/net/usb/lg-vl600.c12
-rw-r--r--drivers/net/usb/mcs7830.c12
-rw-r--r--drivers/net/usb/net1080.c12
-rw-r--r--drivers/net/usb/pegasus.c8
-rw-r--r--drivers/net/usb/plusb.c12
-rw-r--r--drivers/net/usb/rndis_host.c12
-rw-r--r--drivers/net/usb/rtl8150.c15
-rw-r--r--drivers/net/usb/sierra_net.c21
-rw-r--r--drivers/net/usb/smsc75xx.c17
-rw-r--r--drivers/net/usb/smsc95xx.c17
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/usb/zaurus.c24
-rw-r--r--drivers/net/veth.c13
-rw-r--r--drivers/net/virtio_net.c173
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c22
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c46
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h6
-rw-r--r--drivers/net/wan/sbni.c2
-rw-r--r--drivers/net/wan/sealevel.c2
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h2
-rw-r--r--drivers/net/wimax/i2400m/tx.c8
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c7
-rw-r--r--drivers/net/wireless/Makefile8
-rw-r--r--drivers/net/wireless/airo.c4
-rw-r--r--drivers/net/wireless/ath/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath.h14
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c91
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h32
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h571
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c16
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c293
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c27
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c217
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h124
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c370
-rw-r--r--drivers/net/wireless/ath/ath5k/gpio.c81
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c75
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c222
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c853
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c143
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c234
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h59
-rw-r--r--drivers/net/wireless/ath/ath5k/rfgain.h22
-rw-r--r--drivers/net/wireless/ath/ath5k/trace.h5
-rw-r--r--drivers/net/wireless/ath/ath6kl/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.c244
-rw-r--r--drivers/net/wireless/ath/ath6kl/bmi.h8
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c1695
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.h32
-rw-r--r--drivers/net/wireless/ath/ath6kl/common.h17
-rw-r--r--drivers/net/wireless/ath/ath6kl/core.h309
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.c849
-rw-r--r--drivers/net/wireless/ath/ath6kl/debug.h34
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif-ops.h79
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.c (renamed from drivers/net/wireless/ath/ath6kl/htc_hif.c)150
-rw-r--r--drivers/net/wireless/ath/ath6kl/hif.h66
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.c725
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc.h18
-rw-r--r--drivers/net/wireless/ath/ath6kl/htc_hif.h92
-rw-r--r--drivers/net/wireless/ath/ath6kl/init.c1013
-rw-r--r--drivers/net/wireless/ath/ath6kl/main.c771
-rw-r--r--drivers/net/wireless/ath/ath6kl/sdio.c660
-rw-r--r--drivers/net/wireless/ath/ath6kl/target.h19
-rw-r--r--drivers/net/wireless/ath/ath6kl/txrx.c219
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.c834
-rw-r--r--drivers/net/wireless/ath/ath6kl/wmi.h386
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig31
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile6
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c67
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c136
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c155
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c223
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.c1493
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mci.h102
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c50
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_rtt.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h60
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h20
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c46
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c135
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h41
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c21
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.c215
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs.h43
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.c81
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h57
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c28
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c51
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c93
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c289
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h244
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c58
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c341
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.c668
-rw-r--r--drivers/net/wireless/ath/ath9k/mci.h134
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h321
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c359
-rw-r--r--drivers/net/wireless/ath/carl9170/debug.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fw.c97
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c14
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c13
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c13
-rw-r--r--drivers/net/wireless/ath/key.c8
-rw-r--r--drivers/net/wireless/ath/regd.c77
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/b43.h20
-rw-r--r--drivers/net/wireless/b43/dma.c27
-rw-r--r--drivers/net/wireless/b43/leds.c16
-rw-r--r--drivers/net/wireless/b43/lo.c8
-rw-r--r--drivers/net/wireless/b43/main.c162
-rw-r--r--drivers/net/wireless/b43/phy_common.c8
-rw-r--r--drivers/net/wireless/b43/phy_g.c34
-rw-r--r--drivers/net/wireless/b43/phy_lp.c8
-rw-r--r--drivers/net/wireless/b43/phy_n.c4112
-rw-r--r--drivers/net/wireless/b43/phy_n.h14
-rw-r--r--drivers/net/wireless/b43/pio.c6
-rw-r--r--drivers/net/wireless/b43/radio_2056.c25
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c183
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h31
-rw-r--r--drivers/net/wireless/b43/xmit.c4
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h20
-rw-r--r--drivers/net/wireless/b43legacy/dma.c81
-rw-r--r--drivers/net/wireless/b43legacy/dma.h5
-rw-r--r--drivers/net/wireless/b43legacy/leds.c4
-rw-r--r--drivers/net/wireless/b43legacy/main.c150
-rw-r--r--drivers/net/wireless/b43legacy/radio.c20
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile21
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h32
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c153
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c218
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd.h140
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h96
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c50
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c38
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c711
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c1296
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c607
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h136
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h41
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c52
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h9
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.c1251
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/aiutils.h224
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/ampdu.c25
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/channel.c118
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/d11.h3
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.c469
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/dma.h12
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c336
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.c1418
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/main.h29
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/nicpci.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/otp.c76
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c241
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h4
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h11
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c101
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c122
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.c271
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pmu.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/pub.h44
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/rate.h5
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.c508
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/srom.h7
-rw-r--r--drivers/net/wireless/brcm80211/brcmsmac/types.h54
-rw-r--r--drivers/net/wireless/brcm80211/brcmutil/utils.c218
-rw-r--r--drivers/net/wireless/brcm80211/include/brcmu_utils.h30
-rw-r--r--drivers/net/wireless/brcm80211/include/chipcommon.h2
-rw-r--r--drivers/net/wireless/brcm80211/include/defs.h1
-rw-r--r--drivers/net/wireless/brcm80211/include/soc.h12
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c4
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c7
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c19
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h12
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_wx.c25
-rw-r--r--drivers/net/wireless/iwlegacy/3945-debug.c505
-rw-r--r--drivers/net/wireless/iwlegacy/3945-mac.c3976
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c986
-rw-r--r--drivers/net/wireless/iwlegacy/3945.c2744
-rw-r--r--drivers/net/wireless/iwlegacy/3945.h607
-rw-r--r--drivers/net/wireless/iwlegacy/4965-calib.c (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.c)613
-rw-r--r--drivers/net/wireless/iwlegacy/4965-debug.c746
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c6515
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c2860
-rw-r--r--drivers/net/wireless/iwlegacy/4965.c2402
-rw-r--r--drivers/net/wireless/iwlegacy/4965.h1301
-rw-r--r--drivers/net/wireless/iwlegacy/Kconfig43
-rw-r--r--drivers/net/wireless/iwlegacy/Makefile24
-rw-r--r--drivers/net/wireless/iwlegacy/commands.h (renamed from drivers/net/wireless/iwlegacy/iwl-commands.h)1134
-rw-r--r--drivers/net/wireless/iwlegacy/common.c5867
-rw-r--r--drivers/net/wireless/iwlegacy/common.h3246
-rw-r--r--drivers/net/wireless/iwlegacy/csr.h (renamed from drivers/net/wireless/iwlegacy/iwl-csr.h)93
-rw-r--r--drivers/net/wireless/iwlegacy/debug.c1411
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c523
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h60
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-fh.h187
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-hw.h291
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.c63
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-led.h32
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945-rs.c996
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.c2741
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-3945.h308
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c774
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h59
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c154
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-hw.h811
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.c73
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-led.h33
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-lib.c1194
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rs.c2871
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-rx.c215
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-sta.c721
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-tx.c1378
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965-ucode.c166
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c2183
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.h282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c2661
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h636
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debug.h198
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-debugfs.c1314
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h1364
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.c42
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-devtrace.h210
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.c553
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-eeprom.h344
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-fh.h513
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-hcmd.c271
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-helpers.h196
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-io.h545
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.c205
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-led.h56
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-legacy-rs.h456
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.c165
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-power.h55
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-rx.c282
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-scan.c550
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-spectrum.h4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.c817
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-sta.h148
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-tx.c659
-rw-r--r--drivers/net/wireless/iwlegacy/iwl3945-base.c4016
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c3281
-rw-r--r--drivers/net/wireless/iwlegacy/prph.h (renamed from drivers/net/wireless/iwlegacy/iwl-prph.h)133
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c48
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c436
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c37
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rx.c78
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c27
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c108
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tt.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c366
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c1685
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h62
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-bus.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-cfg.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h112
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c381
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h92
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c49
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h190
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h52
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c252
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c21
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c1601
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-pci.c30
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-shared.h237
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.c (renamed from drivers/net/wireless/iwlwifi/iwl-sv-open.c)265
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-testmode.h66
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h53
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c144
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c188
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c182
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h94
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-ucode.c (renamed from drivers/net/wireless/iwlwifi/iwl-agn-ucode.c)418
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-wifi.h (renamed from drivers/net/wireless/iwlegacy/iwl-4965-calib.h)21
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c12
-rw-r--r--drivers/net/wireless/libertas/cfg.c35
-rw-r--r--drivers/net/wireless/libertas/debugfs.c2
-rw-r--r--drivers/net/wireless/libertas/ethtool.c7
-rw-r--r--drivers/net/wireless/libertas/if_cs.c4
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/if_usb.c24
-rw-r--r--drivers/net/wireless/libertas_tf/if_usb.c22
-rw-r--r--drivers/net/wireless/libertas_tf/main.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c27
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c18
-rw-r--r--drivers/net/wireless/mwifiex/Kconfig4
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c332
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.h1
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c38
-rw-r--r--drivers/net/wireless/mwifiex/fw.h5
-rw-r--r--drivers/net/wireless/mwifiex/init.c49
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h11
-rw-r--r--drivers/net/wireless/mwifiex/join.c106
-rw-r--r--drivers/net/wireless/mwifiex/main.c25
-rw-r--r--drivers/net/wireless/mwifiex/main.h26
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c27
-rw-r--r--drivers/net/wireless/mwifiex/scan.c28
-rw-r--r--drivers/net/wireless/mwifiex/sdio.c43
-rw-r--r--drivers/net/wireless/mwifiex/sdio.h1
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c23
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c77
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c12
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c5
-rw-r--r--drivers/net/wireless/mwl8k.c166
-rw-r--r--drivers/net/wireless/orinoco/main.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_usb.c27
-rw-r--r--drivers/net/wireless/orinoco/scan.c16
-rw-r--r--drivers/net/wireless/p54/main.c2
-rw-r--r--drivers/net/wireless/p54/p54spi.c7
-rw-r--r--drivers/net/wireless/p54/p54usb.c13
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.c333
-rw-r--r--drivers/net/wireless/prism54/isl_ioctl.h2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c5
-rw-r--r--drivers/net/wireless/ray_cs.c4
-rw-r--r--drivers/net/wireless/rayctl.h4
-rw-r--r--drivers/net/wireless/rndis_wlan.c121
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c61
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c30
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c82
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c13
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c16
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c15
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c13
-rw-r--r--drivers/net/wireless/rtlwifi/base.c10
-rw-r--r--drivers/net/wireless/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c46
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c22
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c62
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h23
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c11
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/mac.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c53
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/hw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/fw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c4
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c55
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h14
-rw-r--r--drivers/net/wireless/wl1251/spi.c1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig10
-rw-r--r--drivers/net/wireless/wl12xx/Makefile3
-rw-r--r--drivers/net/wireless/wl12xx/acx.c172
-rw-r--r--drivers/net/wireless/wl12xx/acx.h91
-rw-r--r--drivers/net/wireless/wl12xx/boot.c15
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c371
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h50
-rw-r--r--drivers/net/wireless/wl12xx/conf.h4
-rw-r--r--drivers/net/wireless/wl12xx/debug.h101
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c157
-rw-r--r--drivers/net/wireless/wl12xx/event.c216
-rw-r--r--drivers/net/wireless/wl12xx/event.h3
-rw-r--r--drivers/net/wireless/wl12xx/init.c491
-rw-r--r--drivers/net/wireless/wl12xx/init.h8
-rw-r--r--drivers/net/wireless/wl12xx/io.c12
-rw-r--r--drivers/net/wireless/wl12xx/io.h23
-rw-r--r--drivers/net/wireless/wl12xx/main.c2058
-rw-r--r--drivers/net/wireless/wl12xx/ps.c62
-rw-r--r--drivers/net/wireless/wl12xx/ps.h9
-rw-r--r--drivers/net/wireless/wl12xx/reg.h2
-rw-r--r--drivers/net/wireless/wl12xx/rx.c38
-rw-r--r--drivers/net/wireless/wl12xx/scan.c120
-rw-r--r--drivers/net/wireless/wl12xx/scan.h8
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c259
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c543
-rw-r--r--drivers/net/wireless/wl12xx/spi.c215
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c77
-rw-r--r--drivers/net/wireless/wl12xx/tx.c382
-rw-r--r--drivers/net/wireless/wl12xx/tx.h11
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h386
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h5
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_platform_data.c25
-rw-r--r--drivers/net/wireless/zd1201.c13
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c8
-rw-r--r--drivers/net/xen-netback/interface.c7
-rw-r--r--drivers/net/xen-netback/netback.c8
-rw-r--r--drivers/net/xen-netback/xenbus.c9
-rw-r--r--drivers/net/xen-netfront.c23
-rw-r--r--drivers/nfc/pn533.c189
-rw-r--r--drivers/of/Kconfig9
-rw-r--r--drivers/of/Makefile1
-rw-r--r--drivers/of/address.c16
-rw-r--r--drivers/of/base.c156
-rw-r--r--drivers/of/fdt.c5
-rw-r--r--drivers/of/gpio.c45
-rw-r--r--drivers/of/irq.c11
-rw-r--r--drivers/of/of_mdio.c2
-rw-r--r--drivers/of/pdt.c2
-rw-r--r--drivers/of/selftest.c139
-rw-r--r--drivers/oprofile/nmi_timer_int.c173
-rw-r--r--drivers/oprofile/oprof.c30
-rw-r--r--drivers/oprofile/oprof.h10
-rw-r--r--drivers/oprofile/timer_int.c30
-rw-r--r--drivers/parisc/Kconfig7
-rw-r--r--drivers/parisc/dino.c47
-rw-r--r--drivers/parisc/iommu-helpers.h2
-rw-r--r--drivers/parisc/lba_pci.c72
-rw-r--r--drivers/parport/parport_ax88796.c13
-rw-r--r--drivers/parport/parport_ip32.c2
-rw-r--r--drivers/parport/parport_mfc3.c2
-rw-r--r--drivers/parport/parport_pc.c4
-rw-r--r--drivers/parport/parport_sunbpp.c13
-rw-r--r--drivers/pci/Kconfig4
-rw-r--r--drivers/pci/access.c76
-rw-r--r--drivers/pci/ats.c107
-rw-r--r--drivers/pci/bus.c32
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c2
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c2
-rw-r--r--drivers/pci/hotplug/cpcihp_zt5550.c4
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c4
-rw-r--r--drivers/pci/hotplug/ibmphp_core.c2
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h7
-rw-r--r--drivers/pci/hotplug/pciehp_core.c17
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c1
-rw-r--r--drivers/pci/hotplug/pcihp_skeleton.c2
-rw-r--r--drivers/pci/hotplug/rpaphp.h2
-rw-r--r--drivers/pci/hotplug/rpaphp_core.c2
-rw-r--r--drivers/pci/hotplug/shpchp.h4
-rw-r--r--drivers/pci/hotplug/shpchp_core.c4
-rw-r--r--drivers/pci/ioapic.c15
-rw-r--r--drivers/pci/iov.c13
-rw-r--r--drivers/pci/msi.c160
-rw-r--r--drivers/pci/pci-acpi.c13
-rw-r--r--drivers/pci/pci-driver.c3
-rw-r--r--drivers/pci/pci-label.c4
-rw-r--r--drivers/pci/pci-sysfs.c2
-rw-r--r--drivers/pci/pci.c153
-rw-r--r--drivers/pci/pci.h10
-rw-r--r--drivers/pci/pcie/Kconfig2
-rw-r--r--drivers/pci/pcie/aer/aer_inject.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c4
-rw-r--r--drivers/pci/pcie/aspm.c59
-rw-r--r--drivers/pci/probe.c73
-rw-r--r--drivers/pci/remove.c36
-rw-r--r--drivers/pci/setup-res.c6
-rw-r--r--drivers/pci/xen-pcifront.c21
-rw-r--r--drivers/pcmcia/Kconfig8
-rw-r--r--drivers/pcmcia/Makefile4
-rw-r--r--drivers/pcmcia/au1000_generic.c545
-rw-r--r--drivers/pcmcia/au1000_generic.h135
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c294
-rw-r--r--drivers/pcmcia/db1xxx_ss.c26
-rw-r--r--drivers/pcmcia/ds.c4
-rw-r--r--drivers/pcmcia/pxa2xx_base.c12
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x255.c16
-rw-r--r--drivers/pcmcia/pxa2xx_cm_x270.c9
-rw-r--r--drivers/pcmcia/pxa2xx_e740.c11
-rw-r--r--drivers/pcmcia/pxa2xx_palmld.c2
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c2
-rw-r--r--drivers/pcmcia/pxa2xx_stargate2.c6
-rw-r--r--drivers/pcmcia/pxa2xx_trizeps4.c5
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c4
-rw-r--r--drivers/pcmcia/sa1111_generic.c3
-rw-r--r--drivers/pcmcia/yenta_socket.c6
-rw-r--r--drivers/pinctrl/Kconfig22
-rw-r--r--drivers/pinctrl/Makefile8
-rw-r--r--drivers/pinctrl/core.c173
-rw-r--r--drivers/pinctrl/core.h16
-rw-r--r--drivers/pinctrl/pinconf.c328
-rw-r--r--drivers/pinctrl/pinconf.h36
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c (renamed from drivers/gpio/gpio-u300.c)21
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c (renamed from drivers/pinctrl/pinmux-sirf.c)9
-rw-r--r--drivers/pinctrl/pinctrl-u300.c (renamed from drivers/pinctrl/pinmux-u300.c)47
-rw-r--r--drivers/pinctrl/pinmux.c340
-rw-r--r--drivers/pinctrl/pinmux.h4
-rw-r--r--drivers/platform/x86/Kconfig26
-rw-r--r--drivers/platform/x86/Makefile2
-rw-r--r--drivers/platform/x86/acer-wmi.c30
-rw-r--r--drivers/platform/x86/amilo-rfkill.c173
-rw-r--r--drivers/platform/x86/asus-laptop.c2
-rw-r--r--drivers/platform/x86/asus-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c4
-rw-r--r--drivers/platform/x86/compal-laptop.c2
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c478
-rw-r--r--drivers/platform/x86/ibm_rtl.c49
-rw-r--r--drivers/platform/x86/ideapad-laptop.c2
-rw-r--r--drivers/platform/x86/intel_ips.c15
-rw-r--r--drivers/platform/x86/intel_menlow.c2
-rw-r--r--drivers/platform/x86/intel_oaktrail.c2
-rw-r--r--drivers/platform/x86/intel_scu_ipc.c2
-rw-r--r--drivers/platform/x86/msi-laptop.c2
-rw-r--r--drivers/platform/x86/panasonic-laptop.c4
-rw-r--r--drivers/platform/x86/samsung-laptop.c4
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c35
-rw-r--r--drivers/platform/x86/wmi.c4
-rw-r--r--drivers/pnp/quirks.c42
-rw-r--r--drivers/power/Kconfig32
-rw-r--r--drivers/power/Makefile5
-rw-r--r--drivers/power/bq27x00_battery.c199
-rw-r--r--drivers/power/charger-manager.c1072
-rw-r--r--drivers/power/collie_battery.c55
-rw-r--r--drivers/power/da9030_battery.c13
-rw-r--r--drivers/power/da9052-battery.c664
-rw-r--r--drivers/power/ds2760_battery.c21
-rw-r--r--drivers/power/ds2780_battery.c18
-rw-r--r--drivers/power/gpio-charger.c12
-rw-r--r--drivers/power/intel_mid_battery.c13
-rw-r--r--drivers/power/isp1704_charger.c14
-rw-r--r--drivers/power/jz4740-battery.c14
-rw-r--r--drivers/power/lp8727_charger.c495
-rw-r--r--drivers/power/max17042_battery.c94
-rw-r--r--drivers/power/max8903_charger.c14
-rw-r--r--drivers/power/max8925_power.c75
-rw-r--r--drivers/power/max8997_charger.c4
-rw-r--r--drivers/power/max8998_charger.c14
-rw-r--r--drivers/power/olpc_battery.c75
-rw-r--r--drivers/power/pcf50633-charger.c12
-rw-r--r--drivers/power/pda_power.c89
-rw-r--r--drivers/power/power_supply_core.c19
-rw-r--r--drivers/power/power_supply_sysfs.c16
-rw-r--r--drivers/power/s3c_adc_battery.c37
-rw-r--r--drivers/power/sbs-battery.c (renamed from drivers/power/bq20z75.c)481
-rw-r--r--drivers/power/tosa_battery.c79
-rw-r--r--drivers/power/wm831x_backup.c12
-rw-r--r--drivers/power/wm831x_power.c56
-rw-r--r--drivers/power/wm8350_power.c12
-rw-r--r--drivers/power/wm97xx_battery.c20
-rw-r--r--drivers/power/z2_battery.c4
-rw-r--r--drivers/pps/pps.c4
-rw-r--r--drivers/rapidio/devices/tsi721.c5
-rw-r--r--drivers/regulator/88pm8607.c8
-rw-r--r--drivers/regulator/Kconfig8
-rw-r--r--drivers/regulator/Makefile2
-rw-r--r--drivers/regulator/aat2870-regulator.c4
-rw-r--r--drivers/regulator/ab3100.c2
-rw-r--r--drivers/regulator/ab8500.c4
-rw-r--r--drivers/regulator/ad5398.c2
-rw-r--r--drivers/regulator/bq24022.c2
-rw-r--r--drivers/regulator/core.c180
-rw-r--r--drivers/regulator/da903x.c2
-rw-r--r--drivers/regulator/da9052-regulator.c610
-rw-r--r--drivers/regulator/db8500-prcmu.c2
-rw-r--r--drivers/regulator/dummy.c2
-rw-r--r--drivers/regulator/fixed.c85
-rw-r--r--drivers/regulator/gpio-regulator.c2
-rw-r--r--drivers/regulator/isl6271a-regulator.c2
-rw-r--r--drivers/regulator/lp3971.c2
-rw-r--r--drivers/regulator/lp3972.c2
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max8649.c159
-rw-r--r--drivers/regulator/max8660.c2
-rw-r--r--drivers/regulator/max8925-regulator.c35
-rw-r--r--drivers/regulator/max8952.c2
-rw-r--r--drivers/regulator/max8997.c2
-rw-r--r--drivers/regulator/max8998.c2
-rw-r--r--drivers/regulator/mc13783-regulator.c7
-rw-r--r--drivers/regulator/mc13892-regulator.c47
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c59
-rw-r--r--drivers/regulator/mc13xxx.h26
-rw-r--r--drivers/regulator/of_regulator.c87
-rw-r--r--drivers/regulator/pcap-regulator.c2
-rw-r--r--drivers/regulator/pcf50633-regulator.c2
-rw-r--r--drivers/regulator/tps6105x-regulator.c3
-rw-r--r--drivers/regulator/tps65023-regulator.c89
-rw-r--r--drivers/regulator/tps6507x-regulator.c2
-rw-r--r--drivers/regulator/tps6524x-regulator.c4
-rw-r--r--drivers/regulator/tps6586x-regulator.c2
-rw-r--r--drivers/regulator/tps65910-regulator.c41
-rw-r--r--drivers/regulator/tps65912-regulator.c2
-rw-r--r--drivers/regulator/twl-regulator.c2
-rw-r--r--drivers/regulator/userspace-consumer.c13
-rw-r--r--drivers/regulator/virtual.c12
-rw-r--r--drivers/regulator/wm831x-dcdc.c18
-rw-r--r--drivers/regulator/wm831x-isink.c7
-rw-r--r--drivers/regulator/wm831x-ldo.c18
-rw-r--r--drivers/regulator/wm8350-regulator.c2
-rw-r--r--drivers/regulator/wm8400-regulator.c2
-rw-r--r--drivers/regulator/wm8994-regulator.c2
-rw-r--r--drivers/rtc/Kconfig6
-rw-r--r--drivers/rtc/interface.c4
-rw-r--r--drivers/rtc/rtc-88pm860x.c12
-rw-r--r--drivers/rtc/rtc-ab8500.c138
-rw-r--r--drivers/rtc/rtc-at91rm9200.c101
-rw-r--r--drivers/rtc/rtc-at91sam9.c13
-rw-r--r--drivers/rtc/rtc-bfin.c13
-rw-r--r--drivers/rtc/rtc-bq4802.c13
-rw-r--r--drivers/rtc/rtc-cmos.c2
-rw-r--r--drivers/rtc/rtc-dm355evm.c12
-rw-r--r--drivers/rtc/rtc-ds1286.c13
-rw-r--r--drivers/rtc/rtc-ds1511.c15
-rw-r--r--drivers/rtc/rtc-ds1553.c13
-rw-r--r--drivers/rtc/rtc-ds1742.c13
-rw-r--r--drivers/rtc/rtc-jz4740.c14
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t93.c1
-rw-r--r--drivers/rtc/rtc-m41t94.c1
-rw-r--r--drivers/rtc/rtc-m48t35.c13
-rw-r--r--drivers/rtc/rtc-m48t59.c13
-rw-r--r--drivers/rtc/rtc-m48t86.c13
-rw-r--r--drivers/rtc/rtc-max6902.c1
-rw-r--r--drivers/rtc/rtc-max8925.c38
-rw-r--r--drivers/rtc/rtc-max8998.c12
-rw-r--r--drivers/rtc/rtc-mc13xxx.c2
-rw-r--r--drivers/rtc/rtc-mpc5121.c12
-rw-r--r--drivers/rtc/rtc-mrst.c13
-rw-r--r--drivers/rtc/rtc-mxc.c123
-rw-r--r--drivers/rtc/rtc-pcf2123.c1
-rw-r--r--drivers/rtc/rtc-pcf50633.c12
-rw-r--r--drivers/rtc/rtc-pl030.c2
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-pm8xxx.c12
-rw-r--r--drivers/rtc/rtc-puv3.c22
-rw-r--r--drivers/rtc/rtc-r9701.c14
-rw-r--r--drivers/rtc/rtc-rs5c348.c1
-rw-r--r--drivers/rtc/rtc-s3c.c37
-rw-r--r--drivers/rtc/rtc-sa1100.c16
-rw-r--r--drivers/rtc/rtc-spear.c12
-rw-r--r--drivers/rtc/rtc-stk17ta8.c13
-rw-r--r--drivers/rtc/rtc-stmp3xxx.c13
-rw-r--r--drivers/rtc/rtc-twl.c10
-rw-r--r--drivers/rtc/rtc-v3020.c13
-rw-r--r--drivers/rtc/rtc-vr41xx.c13
-rw-r--r--drivers/rtc/rtc-vt8500.c12
-rw-r--r--drivers/rtc/rtc-wm831x.c36
-rw-r--r--drivers/rtc/rtc-wm8350.c12
-rw-r--r--drivers/s390/block/dasd.c11
-rw-r--r--drivers/s390/block/dasd_3990_erp.c4
-rw-r--r--drivers/s390/block/dasd_alias.c74
-rw-r--r--drivers/s390/block/dasd_eckd.c476
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/block/dasd_ioctl.c1
-rw-r--r--drivers/s390/block/xpram.c2
-rw-r--r--drivers/s390/char/con3215.c22
-rw-r--r--drivers/s390/char/fs3270.c1
-rw-r--r--drivers/s390/char/raw3270.c2
-rw-r--r--drivers/s390/char/sclp_config.c8
-rw-r--r--drivers/s390/char/tape_class.h1
-rw-r--r--drivers/s390/char/vmcp.c1
-rw-r--r--drivers/s390/char/vmwatchdog.c4
-rw-r--r--drivers/s390/cio/chsc_sch.c1
-rw-r--r--drivers/s390/cio/cmf.c2
-rw-r--r--drivers/s390/cio/qdio_main.c4
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/crypto/zcrypt_pcixcc.c32
-rw-r--r--drivers/s390/kvm/kvm_virtio.c8
-rw-r--r--drivers/s390/net/netiucv.c217
-rw-r--r--drivers/s390/net/qeth_core_main.c47
-rw-r--r--drivers/s390/net/qeth_l2_main.c19
-rw-r--r--drivers/s390/net/qeth_l3_main.c32
-rw-r--r--drivers/s390/scsi/zfcp_cfdc.c1
-rw-r--r--drivers/scsi/Kconfig5
-rw-r--r--drivers/scsi/aacraid/commctrl.c1
-rw-r--r--drivers/scsi/aha1542.c2
-rw-r--r--drivers/scsi/aic7xxx/aicasm/aicasm.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c2
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c17
-rw-r--r--drivers/scsi/bfa/bfa_defs.h4
-rw-r--r--drivers/scsi/bfa/bfa_defs_svc.h444
-rw-r--r--drivers/scsi/bfa/bfa_fc.h155
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c416
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.h7
-rw-r--r--drivers/scsi/bfa/bfa_ioc.c6
-rw-r--r--drivers/scsi/bfa/bfa_svc.h5
-rw-r--r--drivers/scsi/bfa/bfad.c2
-rw-r--r--drivers/scsi/bfa/bfad_attr.c2
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c27
-rw-r--r--drivers/scsi/bfa/bfad_debugfs.c5
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/bfa/bfad_im.c56
-rw-r--r--drivers/scsi/bfa/bfad_im.h27
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb3i/cxgb3i.c2
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c17
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h2
-rw-r--r--drivers/scsi/dc395x.c2
-rw-r--r--drivers/scsi/device_handler/scsi_dh.c58
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_emc.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_hp_sw.c19
-rw-r--r--drivers/scsi/device_handler/scsi_dh_rdac.c46
-rw-r--r--drivers/scsi/fcoe/fcoe.c46
-rw-r--r--drivers/scsi/fcoe/fcoe.h4
-rw-r--r--drivers/scsi/gdth.h2
-rw-r--r--drivers/scsi/hpsa.c8
-rw-r--r--drivers/scsi/ipr.c91
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/ips.c2
-rw-r--r--drivers/scsi/isci/firmware/Makefile19
-rw-r--r--drivers/scsi/isci/firmware/README36
-rw-r--r--drivers/scsi/isci/firmware/create_fw.c99
-rw-r--r--drivers/scsi/isci/firmware/create_fw.h77
-rw-r--r--drivers/scsi/isci/host.c342
-rw-r--r--drivers/scsi/isci/host.h27
-rw-r--r--drivers/scsi/isci/init.c25
-rw-r--r--drivers/scsi/isci/isci.h1
-rw-r--r--drivers/scsi/isci/phy.c172
-rw-r--r--drivers/scsi/isci/port.c104
-rw-r--r--drivers/scsi/isci/port.h10
-rw-r--r--drivers/scsi/isci/port_config.c35
-rw-r--r--drivers/scsi/isci/probe_roms.c2
-rw-r--r--drivers/scsi/isci/probe_roms.h89
-rw-r--r--drivers/scsi/isci/remote_device.c10
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/isci/task.h7
-rw-r--r--drivers/scsi/iscsi_boot_sysfs.c14
-rw-r--r--drivers/scsi/iscsi_tcp.c2
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c6
-rw-r--r--drivers/scsi/libfc/fc_elsct.c1
-rw-r--r--drivers/scsi/libfc/fc_exch.c2
-rw-r--r--drivers/scsi/libfc/fc_fcp.c4
-rw-r--r--drivers/scsi/libfc/fc_lport.c5
-rw-r--r--drivers/scsi/libfc/fc_rport.c10
-rw-r--r--drivers/scsi/lpfc/lpfc.h14
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c436
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c432
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.h3
-rw-r--r--drivers/scsi/lpfc/lpfc_compat.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h13
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c172
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c214
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c157
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h21
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c338
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c25
-rw-r--r--drivers/scsi/lpfc/lpfc_mem.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c17
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c704
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c6
-rw-r--r--drivers/scsi/mac_esp.c6
-rw-r--r--drivers/scsi/mac_scsi.c9
-rw-r--r--drivers/scsi/megaraid.c13
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c145
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fp.c4
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2.h10
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h28
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_ioc.h49
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_raid.h67
-rw-r--r--drivers/scsi/mpt2sas/mpi/mpi2_tool.h9
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c208
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h26
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c168
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c9
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/osd/osd_uld.c4
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c2
-rw-r--r--drivers/scsi/pmcraid.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c70
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c311
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h7
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c95
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h13
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c641
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c254
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c527
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c382
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c7
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c51
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c648
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi_error.c5
-rw-r--r--drivers/scsi/scsi_lib.c7
-rw-r--r--drivers/scsi/scsi_netlink.c2
-rw-r--r--drivers/scsi/scsi_pm.c43
-rw-r--r--drivers/scsi/scsi_priv.h2
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/scsi/scsi_transport_fc.c3
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c29
-rw-r--r--drivers/scsi/scsi_transport_spi.c2
-rw-r--r--drivers/scsi/scsicam.c1
-rw-r--r--drivers/scsi/sd.c18
-rw-r--r--drivers/scsi/sd_dif.c2
-rw-r--r--drivers/scsi/sg.c32
-rw-r--r--drivers/scsi/sni_53c710.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c4
-rw-r--r--drivers/scsi/vmw_pvscsi.c2
-rw-r--r--drivers/sh/Makefile9
-rw-r--r--drivers/sh/clk/core.c9
-rw-r--r--drivers/sh/clk/cpg.c79
-rw-r--r--drivers/sh/intc/core.c37
-rw-r--r--drivers/sh/intc/internals.h7
-rw-r--r--drivers/sh/intc/userimask.c16
-rw-r--r--drivers/sh/pfc.c273
-rw-r--r--drivers/spi/Kconfig18
-rw-r--r--drivers/spi/spi-dw-mid.c8
-rw-r--r--drivers/spi/spi-ep93xx.c9
-rw-r--r--drivers/spi/spi-omap2-mcspi.c51
-rw-r--r--drivers/spi/spi-pl022.c153
-rw-r--r--drivers/spi/spi-s3c64xx.c14
-rw-r--r--drivers/spi/spi-topcliff-pch.c23
-rw-r--r--drivers/spi/spi.c2
-rw-r--r--drivers/ssb/driver_pcicore.c2
-rw-r--r--drivers/ssb/pci.c23
-rw-r--r--drivers/staging/Kconfig12
-rw-r--r--drivers/staging/Makefile6
-rw-r--r--drivers/staging/android/Kconfig107
-rw-r--r--drivers/staging/android/Makefile8
-rw-r--r--drivers/staging/android/TODO10
-rw-r--r--drivers/staging/android/ashmem.c752
-rw-r--r--drivers/staging/android/ashmem.h48
-rw-r--r--drivers/staging/android/binder.c3609
-rw-r--r--drivers/staging/android/binder.h330
-rw-r--r--drivers/staging/android/logger.c616
-rw-r--r--drivers/staging/android/logger.h49
-rw-r--r--drivers/staging/android/lowmemorykiller.c222
-rw-r--r--drivers/staging/android/ram_console.c443
-rw-r--r--drivers/staging/android/ram_console.h22
-rw-r--r--drivers/staging/android/switch/Kconfig11
-rw-r--r--drivers/staging/android/switch/Makefile4
-rw-r--r--drivers/staging/android/switch/switch.h53
-rw-r--r--drivers/staging/android/switch/switch_class.c174
-rw-r--r--drivers/staging/android/switch/switch_gpio.c172
-rw-r--r--drivers/staging/android/timed_gpio.c176
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c123
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/asus_oled/asus_oled.c17
-rw-r--r--drivers/staging/bcm/Bcmchar.c376
-rw-r--r--drivers/staging/bcm/HandleControlPacket.c323
-rw-r--r--drivers/staging/bcm/InterfaceDld.c12
-rw-r--r--drivers/staging/bcm/InterfaceIdleMode.c33
-rw-r--r--drivers/staging/bcm/InterfaceInit.c12
-rw-r--r--drivers/staging/bcm/InterfaceMisc.c24
-rw-r--r--drivers/staging/bcm/Misc.c32
-rw-r--r--drivers/staging/bcm/hostmibs.c178
-rw-r--r--drivers/staging/bcm/led_control.c1131
-rw-r--r--drivers/staging/bcm/nvm.c95
-rw-r--r--drivers/staging/bcm/target_params.h4
-rw-r--r--drivers/staging/comedi/comedi_fops.c88
-rw-r--r--drivers/staging/comedi/comedi_fops.h3
-rw-r--r--drivers/staging/comedi/drivers/addi-data/addi_common.c137
-rw-r--r--drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c2
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7230.c10
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7296.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci7432.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci8164.c6
-rw-r--r--drivers/staging/comedi/drivers/adl_pci9111.c6
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c32
-rw-r--r--drivers/staging/comedi/drivers/adv_pci_dio.c23
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc236.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c6
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c9
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c9
-rw-r--r--drivers/staging/comedi/drivers/cb_das16_cs.c20
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas.c42
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidas64.c26
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidda.c18
-rw-r--r--drivers/staging/comedi/drivers/cb_pcidio.c23
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdas.c50
-rw-r--r--drivers/staging/comedi/drivers/cb_pcimdda.c30
-rw-r--r--drivers/staging/comedi/drivers/contec_pci_dio.c19
-rw-r--r--drivers/staging/comedi/drivers/daqboard2000.c73
-rw-r--r--drivers/staging/comedi/drivers/das08.c6
-rw-r--r--drivers/staging/comedi/drivers/das08_cs.c8
-rw-r--r--drivers/staging/comedi/drivers/das16m1.c37
-rw-r--r--drivers/staging/comedi/drivers/das1800.c65
-rw-r--r--drivers/staging/comedi/drivers/das6402.c23
-rw-r--r--drivers/staging/comedi/drivers/das800.c43
-rw-r--r--drivers/staging/comedi/drivers/dt3000.c38
-rw-r--r--drivers/staging/comedi/drivers/jr3_pci.c81
-rw-r--r--drivers/staging/comedi/drivers/ke_counter.c6
-rw-r--r--drivers/staging/comedi/drivers/me_daq.c9
-rw-r--r--drivers/staging/comedi/drivers/ni_at_a2150.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_daq_dio24.c19
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c2
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c29
-rw-r--r--drivers/staging/comedi/drivers/pcl816.c4
-rw-r--r--drivers/staging/comedi/drivers/pcl818.c108
-rw-r--r--drivers/staging/comedi/drivers/pcmmio.c26
-rw-r--r--drivers/staging/comedi/drivers/pcmuio.c40
-rw-r--r--drivers/staging/comedi/drivers/serial2002.c7
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c16
-rw-r--r--drivers/staging/crystalhd/bc_dts_defs.h262
-rw-r--r--drivers/staging/cxt1e1/comet.h22
-rw-r--r--drivers/staging/cxt1e1/comet_tables.c24
-rw-r--r--drivers/staging/cxt1e1/comet_tables.h24
-rw-r--r--drivers/staging/cxt1e1/libsbew.h34
-rw-r--r--drivers/staging/cxt1e1/musycc.c49
-rw-r--r--drivers/staging/cxt1e1/musycc.h31
-rw-r--r--drivers/staging/cxt1e1/ossiRelease.c10
-rw-r--r--drivers/staging/cxt1e1/pmc93x6_eeprom.h21
-rw-r--r--drivers/staging/cxt1e1/pmcc4.h42
-rw-r--r--drivers/staging/cxt1e1/pmcc4_cpld.h33
-rw-r--r--drivers/staging/cxt1e1/pmcc4_defs.h14
-rw-r--r--drivers/staging/cxt1e1/pmcc4_drv.c70
-rw-r--r--drivers/staging/cxt1e1/pmcc4_ioctls.h16
-rw-r--r--drivers/staging/cxt1e1/sbe_bid.h14
-rw-r--r--drivers/staging/cxt1e1/sbe_promformat.h27
-rw-r--r--drivers/staging/cxt1e1/sbecom_inline_linux.h20
-rw-r--r--drivers/staging/cxt1e1/sbeproc.h20
-rw-r--r--drivers/staging/cxt1e1/sbew_ioc.h55
-rw-r--r--drivers/staging/et131x/et131x.c370
-rw-r--r--drivers/staging/frontier/alphatrack.c28
-rw-r--r--drivers/staging/frontier/tranzport.c29
-rw-r--r--drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c4
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_hw.c2
-rw-r--r--drivers/staging/ft1000/ft1000-usb/ft1000_usb.c22
-rw-r--r--drivers/staging/gma500/Makefile52
-rw-r--r--drivers/staging/gma500/TODO15
-rw-r--r--drivers/staging/gma500/displays/hdmi.h33
-rw-r--r--drivers/staging/gma500/displays/pyr_cmd.h34
-rw-r--r--drivers/staging/gma500/displays/pyr_vid.h34
-rw-r--r--drivers/staging/gma500/displays/tmd_cmd.h34
-rw-r--r--drivers/staging/gma500/displays/tmd_vid.h34
-rw-r--r--drivers/staging/gma500/displays/tpo_cmd.h35
-rw-r--r--drivers/staging/gma500/displays/tpo_vid.h33
-rw-r--r--drivers/staging/gma500/mdfld_device.c714
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.c761
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi.h173
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi_dpu.c778
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dbi_dpu.h154
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.c805
-rw-r--r--drivers/staging/gma500/mdfld_dsi_dpi.h78
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.c1014
-rw-r--r--drivers/staging/gma500/mdfld_dsi_output.h138
-rw-r--r--drivers/staging/gma500/mdfld_dsi_pkg_sender.c1484
-rw-r--r--drivers/staging/gma500/mdfld_dsi_pkg_sender.h184
-rw-r--r--drivers/staging/gma500/mdfld_intel_display.c1404
-rw-r--r--drivers/staging/gma500/mdfld_msic.h31
-rw-r--r--drivers/staging/gma500/mdfld_output.c171
-rw-r--r--drivers/staging/gma500/mdfld_output.h41
-rw-r--r--drivers/staging/gma500/mdfld_pyr_cmd.c558
-rw-r--r--drivers/staging/gma500/mdfld_tmd_vid.c206
-rw-r--r--drivers/staging/gma500/mdfld_tpo_cmd.c509
-rw-r--r--drivers/staging/gma500/mdfld_tpo_vid.c140
-rw-r--r--drivers/staging/gma500/medfield.h268
-rw-r--r--drivers/staging/gma500/psb_drm.h219
-rw-r--r--drivers/staging/gma500/psb_drv.c1229
-rw-r--r--drivers/staging/gma500/psb_intel_sdvo.c1293
-rw-r--r--drivers/staging/gma500/psb_intel_sdvo_regs.h338
-rw-r--r--drivers/staging/hv/Kconfig12
-rw-r--r--drivers/staging/hv/Makefile3
-rw-r--r--drivers/staging/hv/TODO4
-rw-r--r--drivers/staging/hv/storvsc_drv.c310
-rw-r--r--drivers/staging/iio/Documentation/generic_buffer.c8
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h26
-rw-r--r--drivers/staging/iio/Documentation/ring.txt10
-rw-r--r--drivers/staging/iio/Documentation/sysfs-bus-iio10
-rw-r--r--drivers/staging/iio/Kconfig1
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c49
-rw-r--r--drivers/staging/iio/accel/adis16201_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c37
-rw-r--r--drivers/staging/iio/accel/adis16203_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c48
-rw-r--r--drivers/staging/iio/accel/adis16204_ring.c14
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c45
-rw-r--r--drivers/staging/iio/accel/adis16209_ring.c9
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c40
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c45
-rw-r--r--drivers/staging/iio/accel/adis16240_ring.c9
-rw-r--r--drivers/staging/iio/accel/kxsd9.c23
-rw-r--r--drivers/staging/iio/accel/lis3l02dq.h12
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_core.c48
-rw-r--r--drivers/staging/iio/accel/lis3l02dq_ring.c61
-rw-r--r--drivers/staging/iio/accel/sca3000_core.c37
-rw-r--r--drivers/staging/iio/accel/sca3000_ring.c17
-rw-r--r--drivers/staging/iio/adc/ad7192.c97
-rw-r--r--drivers/staging/iio/adc/ad7280a.c41
-rw-r--r--drivers/staging/iio/adc/ad7291.c53
-rw-r--r--drivers/staging/iio/adc/ad7298.h5
-rw-r--r--drivers/staging/iio/adc/ad7298_core.c80
-rw-r--r--drivers/staging/iio/adc/ad7298_ring.c45
-rw-r--r--drivers/staging/iio/adc/ad7476.h5
-rw-r--r--drivers/staging/iio/adc/ad7476_core.c38
-rw-r--r--drivers/staging/iio/adc/ad7476_ring.c31
-rw-r--r--drivers/staging/iio/adc/ad7606.h1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c10
-rw-r--r--drivers/staging/iio/adc/ad7606_par.c1
-rw-r--r--drivers/staging/iio/adc/ad7606_ring.c30
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c16
-rw-r--r--drivers/staging/iio/adc/ad7780.c21
-rw-r--r--drivers/staging/iio/adc/ad7793.c108
-rw-r--r--drivers/staging/iio/adc/ad7816.c16
-rw-r--r--drivers/staging/iio/adc/ad7887.h5
-rw-r--r--drivers/staging/iio/adc/ad7887_core.c26
-rw-r--r--drivers/staging/iio/adc/ad7887_ring.c46
-rw-r--r--drivers/staging/iio/adc/ad799x.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c23
-rw-r--r--drivers/staging/iio/adc/ad799x_ring.c47
-rw-r--r--drivers/staging/iio/adc/adt7310.c17
-rw-r--r--drivers/staging/iio/adc/adt7410.c15
-rw-r--r--drivers/staging/iio/adc/max1363.h14
-rw-r--r--drivers/staging/iio/adc/max1363_core.c92
-rw-r--r--drivers/staging/iio/adc/max1363_ring.c92
-rw-r--r--drivers/staging/iio/addac/adt7316-i2c.c14
-rw-r--r--drivers/staging/iio/addac/adt7316-spi.c15
-rw-r--r--drivers/staging/iio/addac/adt7316.c1
-rw-r--r--drivers/staging/iio/buffer.h (renamed from drivers/staging/iio/buffer_generic.h)87
-rw-r--r--drivers/staging/iio/cdc/ad7150.c22
-rw-r--r--drivers/staging/iio/cdc/ad7152.c52
-rw-r--r--drivers/staging/iio/cdc/ad7746.c64
-rw-r--r--drivers/staging/iio/chrdev.h25
-rw-r--r--drivers/staging/iio/dac/Kconfig39
-rw-r--r--drivers/staging/iio/dac/Makefile3
-rw-r--r--drivers/staging/iio/dac/ad5064.c19
-rw-r--r--drivers/staging/iio/dac/ad5360.c37
-rw-r--r--drivers/staging/iio/dac/ad5380.c676
-rw-r--r--drivers/staging/iio/dac/ad5421.c555
-rw-r--r--drivers/staging/iio/dac/ad5421.h32
-rw-r--r--drivers/staging/iio/dac/ad5446.c201
-rw-r--r--drivers/staging/iio/dac/ad5446.h10
-rw-r--r--drivers/staging/iio/dac/ad5504.c145
-rw-r--r--drivers/staging/iio/dac/ad5504.h5
-rw-r--r--drivers/staging/iio/dac/ad5624r.h4
-rw-r--r--drivers/staging/iio/dac/ad5624r_spi.c140
-rw-r--r--drivers/staging/iio/dac/ad5686.c18
-rw-r--r--drivers/staging/iio/dac/ad5764.c393
-rw-r--r--drivers/staging/iio/dac/ad5791.c28
-rw-r--r--drivers/staging/iio/dac/max517.c14
-rw-r--r--drivers/staging/iio/dds/ad5930.c14
-rw-r--r--drivers/staging/iio/dds/ad9832.c18
-rw-r--r--drivers/staging/iio/dds/ad9834.c24
-rw-r--r--drivers/staging/iio/dds/ad9850.c14
-rw-r--r--drivers/staging/iio/dds/ad9852.c14
-rw-r--r--drivers/staging/iio/dds/ad9910.c14
-rw-r--r--drivers/staging/iio/dds/ad9951.c14
-rw-r--r--drivers/staging/iio/events.h103
-rw-r--r--drivers/staging/iio/gyro/Kconfig6
-rw-r--r--drivers/staging/iio/gyro/adis16060_core.c8
-rw-r--r--drivers/staging/iio/gyro/adis16080_core.c14
-rw-r--r--drivers/staging/iio/gyro/adis16130_core.c14
-rw-r--r--drivers/staging/iio/gyro/adis16260_core.c58
-rw-r--r--drivers/staging/iio/gyro/adis16260_ring.c9
-rw-r--r--drivers/staging/iio/gyro/adxrs450.h5
-rw-r--r--drivers/staging/iio/gyro/adxrs450_core.c101
-rw-r--r--drivers/staging/iio/iio.h144
-rw-r--r--drivers/staging/iio/iio_core.h11
-rw-r--r--drivers/staging/iio/iio_core_trigger.h3
-rw-r--r--drivers/staging/iio/iio_dummy_evgen.c4
-rw-r--r--drivers/staging/iio/iio_simple_dummy.c49
-rw-r--r--drivers/staging/iio/iio_simple_dummy_buffer.c12
-rw-r--r--drivers/staging/iio/iio_simple_dummy_events.c1
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c43
-rw-r--r--drivers/staging/iio/imu/adis16400.h2
-rw-r--r--drivers/staging/iio/imu/adis16400_core.c282
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c23
-rw-r--r--drivers/staging/iio/industrialio-buffer.c346
-rw-r--r--drivers/staging/iio/industrialio-core.c70
-rw-r--r--drivers/staging/iio/industrialio-trigger.c36
-rw-r--r--drivers/staging/iio/kfifo_buf.c72
-rw-r--r--drivers/staging/iio/kfifo_buf.h2
-rw-r--r--drivers/staging/iio/light/isl29018.c21
-rw-r--r--drivers/staging/iio/light/tsl2563.c25
-rw-r--r--drivers/staging/iio/light/tsl2583.c31
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c18
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c19
-rw-r--r--drivers/staging/iio/meter/ade7753.c14
-rw-r--r--drivers/staging/iio/meter/ade7754.c14
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c50
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c10
-rw-r--r--drivers/staging/iio/meter/ade7759.c14
-rw-r--r--drivers/staging/iio/meter/ade7854-i2c.c14
-rw-r--r--drivers/staging/iio/meter/ade7854-spi.c14
-rw-r--r--drivers/staging/iio/resolver/ad2s1200.c14
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.c14
-rw-r--r--drivers/staging/iio/resolver/ad2s90.c14
-rw-r--r--drivers/staging/iio/ring_sw.c151
-rw-r--r--drivers/staging/iio/ring_sw.h2
-rw-r--r--drivers/staging/iio/sysfs.h43
-rw-r--r--drivers/staging/iio/trigger.h2
-rw-r--r--drivers/staging/iio/trigger/iio-trig-periodic-rtc.c1
-rw-r--r--drivers/staging/iio/types.h49
-rw-r--r--drivers/staging/intel_sst/Kconfig19
-rw-r--r--drivers/staging/intel_sst/Makefile7
-rw-r--r--drivers/staging/intel_sst/TODO13
-rw-r--r--drivers/staging/intel_sst/intel_sst.c649
-rw-r--r--drivers/staging/intel_sst/intel_sst.h162
-rw-r--r--drivers/staging/intel_sst/intel_sst_app_interface.c1460
-rw-r--r--drivers/staging/intel_sst/intel_sst_common.h623
-rw-r--r--drivers/staging/intel_sst/intel_sst_drv_interface.c564
-rw-r--r--drivers/staging/intel_sst/intel_sst_dsp.c496
-rw-r--r--drivers/staging/intel_sst/intel_sst_fw_ipc.h416
-rw-r--r--drivers/staging/intel_sst/intel_sst_ioctl.h440
-rw-r--r--drivers/staging/intel_sst/intel_sst_ipc.c774
-rw-r--r--drivers/staging/intel_sst/intel_sst_pvt.c313
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream.c583
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream_encoded.c1273
-rw-r--r--drivers/staging/intel_sst/intelmid.c1022
-rw-r--r--drivers/staging/intel_sst/intelmid.h209
-rw-r--r--drivers/staging/intel_sst/intelmid_adc_control.h193
-rw-r--r--drivers/staging/intel_sst/intelmid_ctrl.c921
-rw-r--r--drivers/staging/intel_sst/intelmid_msic_control.c1047
-rw-r--r--drivers/staging/intel_sst/intelmid_pvt.c173
-rw-r--r--drivers/staging/intel_sst/intelmid_snd_control.h123
-rw-r--r--drivers/staging/intel_sst/intelmid_v0_control.c866
-rw-r--r--drivers/staging/intel_sst/intelmid_v1_control.c978
-rw-r--r--drivers/staging/intel_sst/intelmid_v2_control.c1156
-rw-r--r--drivers/staging/keucr/smilmain.c4
-rw-r--r--drivers/staging/keucr/usb.c24
-rw-r--r--drivers/staging/line6/Makefile3
-rw-r--r--drivers/staging/line6/capture.c44
-rw-r--r--drivers/staging/line6/capture.h2
-rw-r--r--drivers/staging/line6/driver.c74
-rw-r--r--drivers/staging/line6/driver.h10
-rw-r--r--drivers/staging/line6/midi.c37
-rw-r--r--drivers/staging/line6/midi.h4
-rw-r--r--drivers/staging/line6/pcm.c115
-rw-r--r--drivers/staging/line6/pcm.h8
-rw-r--r--drivers/staging/line6/playback.c53
-rw-r--r--drivers/staging/line6/playback.h2
-rw-r--r--drivers/staging/line6/pod.c6
-rw-r--r--drivers/staging/line6/podhd.c154
-rw-r--r--drivers/staging/line6/podhd.h30
-rw-r--r--drivers/staging/line6/revision.h2
-rw-r--r--drivers/staging/line6/toneport.c6
-rw-r--r--drivers/staging/line6/usbdefs.h91
-rw-r--r--drivers/staging/line6/variax.c6
-rw-r--r--drivers/staging/media/go7007/go7007-usb.c14
-rw-r--r--drivers/staging/media/go7007/snd-go7007.c2
-rw-r--r--drivers/staging/media/lirc/lirc_bt829.c2
-rw-r--r--drivers/staging/media/lirc/lirc_igorplugusb.c25
-rw-r--r--drivers/staging/media/lirc/lirc_imon.c24
-rw-r--r--drivers/staging/media/lirc/lirc_parallel.c6
-rw-r--r--drivers/staging/media/lirc/lirc_sasem.c25
-rw-r--r--drivers/staging/media/lirc/lirc_serial.c10
-rw-r--r--drivers/staging/media/lirc/lirc_sir.c2
-rw-r--r--drivers/staging/media/lirc/lirc_ttusbir.c22
-rw-r--r--drivers/staging/media/lirc/lirc_zilog.c4
-rw-r--r--drivers/staging/mei/init.c49
-rw-r--r--drivers/staging/mei/interface.c8
-rw-r--r--drivers/staging/mei/interface.h11
-rw-r--r--drivers/staging/mei/interrupt.c322
-rw-r--r--drivers/staging/mei/iorw.c77
-rw-r--r--drivers/staging/mei/main.c576
-rw-r--r--drivers/staging/mei/mei.txt226
-rw-r--r--drivers/staging/mei/mei_dev.h13
-rw-r--r--drivers/staging/mei/wd.c32
-rw-r--r--drivers/staging/nvec/nvec.c30
-rw-r--r--drivers/staging/octeon/Makefile5
-rw-r--r--drivers/staging/octeon/cvmx-address.h274
-rw-r--r--drivers/staging/octeon/cvmx-asxx-defs.h475
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.c306
-rw-r--r--drivers/staging/octeon/cvmx-cmd-queue.h617
-rw-r--r--drivers/staging/octeon/cvmx-config.h169
-rw-r--r--drivers/staging/octeon/cvmx-dbg-defs.h72
-rw-r--r--drivers/staging/octeon/cvmx-fau.h597
-rw-r--r--drivers/staging/octeon/cvmx-fpa-defs.h403
-rw-r--r--drivers/staging/octeon/cvmx-fpa.c183
-rw-r--r--drivers/staging/octeon/cvmx-fpa.h299
-rw-r--r--drivers/staging/octeon/cvmx-gmxx-defs.h2529
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.c695
-rw-r--r--drivers/staging/octeon/cvmx-helper-board.h151
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.c243
-rw-r--r--drivers/staging/octeon/cvmx-helper-fpa.h64
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.c85
-rw-r--r--drivers/staging/octeon/cvmx-helper-loop.h59
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.c113
-rw-r--r--drivers/staging/octeon/cvmx-helper-npi.h60
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.c525
-rw-r--r--drivers/staging/octeon/cvmx-helper-rgmii.h110
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.c550
-rw-r--r--drivers/staging/octeon/cvmx-helper-sgmii.h104
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.c195
-rw-r--r--drivers/staging/octeon/cvmx-helper-spi.h84
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.c433
-rw-r--r--drivers/staging/octeon/cvmx-helper-util.h215
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.c348
-rw-r--r--drivers/staging/octeon/cvmx-helper-xaui.h103
-rw-r--r--drivers/staging/octeon/cvmx-helper.c1058
-rw-r--r--drivers/staging/octeon/cvmx-helper.h227
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-decodes.c371
-rw-r--r--drivers/staging/octeon/cvmx-interrupt-rsl.c140
-rw-r--r--drivers/staging/octeon/cvmx-ipd.h338
-rw-r--r--drivers/staging/octeon/cvmx-mdio.h506
-rw-r--r--drivers/staging/octeon/cvmx-packet.h65
-rw-r--r--drivers/staging/octeon/cvmx-pcsx-defs.h370
-rw-r--r--drivers/staging/octeon/cvmx-pcsxx-defs.h316
-rw-r--r--drivers/staging/octeon/cvmx-pip-defs.h1267
-rw-r--r--drivers/staging/octeon/cvmx-pip.h524
-rw-r--r--drivers/staging/octeon/cvmx-pko-defs.h1133
-rw-r--r--drivers/staging/octeon/cvmx-pko.c506
-rw-r--r--drivers/staging/octeon/cvmx-pko.h610
-rw-r--r--drivers/staging/octeon/cvmx-pow.h1982
-rw-r--r--drivers/staging/octeon/cvmx-scratch.h139
-rw-r--r--drivers/staging/octeon/cvmx-smix-defs.h178
-rw-r--r--drivers/staging/octeon/cvmx-spi.c667
-rw-r--r--drivers/staging/octeon/cvmx-spi.h269
-rw-r--r--drivers/staging/octeon/cvmx-spxx-defs.h347
-rw-r--r--drivers/staging/octeon/cvmx-srxx-defs.h126
-rw-r--r--drivers/staging/octeon/cvmx-stxx-defs.h292
-rw-r--r--drivers/staging/octeon/cvmx-wqe.h397
-rw-r--r--drivers/staging/octeon/ethernet-defines.h2
-rw-r--r--drivers/staging/octeon/ethernet-mdio.c4
-rw-r--r--drivers/staging/octeon/ethernet-mem.c2
-rw-r--r--drivers/staging/octeon/ethernet-rgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c16
-rw-r--r--drivers/staging/octeon/ethernet-rx.h2
-rw-r--r--drivers/staging/octeon/ethernet-sgmii.c4
-rw-r--r--drivers/staging/octeon/ethernet-spi.c6
-rw-r--r--drivers/staging/octeon/ethernet-tx.c12
-rw-r--r--drivers/staging/octeon/ethernet-xaui.c4
-rw-r--r--drivers/staging/octeon/ethernet.c16
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.c18
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon.h2
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1.c10
-rw-r--r--drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c10
-rw-r--r--drivers/staging/omapdrm/Kconfig25
-rw-r--r--drivers/staging/omapdrm/Makefile22
-rw-r--r--drivers/staging/omapdrm/TODO38
-rw-r--r--drivers/staging/omapdrm/omap_connector.c371
-rw-r--r--drivers/staging/omapdrm/omap_crtc.c225
-rw-r--r--drivers/staging/omapdrm/omap_debugfs.c42
-rw-r--r--drivers/staging/omapdrm/omap_dmm_priv.h187
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.c830
-rw-r--r--drivers/staging/omapdrm/omap_dmm_tiler.h135
-rw-r--r--drivers/staging/omapdrm/omap_drm.h123
-rw-r--r--drivers/staging/omapdrm/omap_drv.c836
-rw-r--r--drivers/staging/omapdrm/omap_drv.h175
-rw-r--r--drivers/staging/omapdrm/omap_encoder.c171
-rw-r--r--drivers/staging/omapdrm/omap_fb.c353
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c387
-rw-r--r--drivers/staging/omapdrm/omap_gem.c1246
-rw-r--r--drivers/staging/omapdrm/omap_gem_helpers.c169
-rw-r--r--drivers/staging/omapdrm/omap_plane.c344
-rw-r--r--drivers/staging/omapdrm/omap_priv.h55
-rw-r--r--drivers/staging/omapdrm/tcm-sita.c703
-rw-r--r--drivers/staging/omapdrm/tcm-sita.h95
-rw-r--r--drivers/staging/omapdrm/tcm.h326
-rw-r--r--drivers/staging/phison/phison.c2
-rw-r--r--drivers/staging/pohmelfs/Kconfig20
-rw-r--r--drivers/staging/pohmelfs/Makefile3
-rw-r--r--drivers/staging/pohmelfs/config.c611
-rw-r--r--drivers/staging/pohmelfs/crypto.c878
-rw-r--r--drivers/staging/pohmelfs/dir.c1101
-rw-r--r--drivers/staging/pohmelfs/inode.c2056
-rw-r--r--drivers/staging/pohmelfs/lock.c182
-rw-r--r--drivers/staging/pohmelfs/mcache.c171
-rw-r--r--drivers/staging/pohmelfs/net.c1209
-rw-r--r--drivers/staging/pohmelfs/netfs.h919
-rw-r--r--drivers/staging/pohmelfs/path_entry.c120
-rw-r--r--drivers/staging/pohmelfs/trans.c706
-rw-r--r--drivers/staging/quatech_usb2/quatech_usb2.c2
-rw-r--r--drivers/staging/rtl8192e/Kconfig50
-rw-r--r--drivers/staging/rtl8192e/Makefile46
-rw-r--r--drivers/staging/rtl8192e/dot11d.c4
-rw-r--r--drivers/staging/rtl8192e/dot11d.h2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Kconfig9
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/Makefile21
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_def.h (renamed from drivers/staging/rtl8192e/r8190P_def.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c (renamed from drivers/staging/rtl8192e/r8190P_rtl8256.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h (renamed from drivers/staging/rtl8192e/r8190P_rtl8256.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c (renamed from drivers/staging/rtl8192e/r8192E_cmdpkt.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h (renamed from drivers/staging/rtl8192e/r8192E_cmdpkt.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c (renamed from drivers/staging/rtl8192e/r8192E_dev.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h (renamed from drivers/staging/rtl8192e/r8192E_dev.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c (renamed from drivers/staging/rtl8192e/r8192E_firmware.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h (renamed from drivers/staging/rtl8192e/r8192E_firmware.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h (renamed from drivers/staging/rtl8192e/r8192E_hw.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c (renamed from drivers/staging/rtl8192e/r8192E_hwimg.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h (renamed from drivers/staging/rtl8192e/r8192E_hwimg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c (renamed from drivers/staging/rtl8192e/r8192E_phy.c)3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h (renamed from drivers/staging/rtl8192e/r8192E_phy.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h (renamed from drivers/staging/rtl8192e/r8192E_phyreg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h (renamed from drivers/staging/rtl8192e/r819xE_phyreg.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.c (renamed from drivers/staging/rtl8192e/rtl_cam.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_cam.h (renamed from drivers/staging/rtl8192e/rtl_cam.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.c (renamed from drivers/staging/rtl8192e/rtl_core.c)68
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_core.h (renamed from drivers/staging/rtl8192e/rtl_core.h)53
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h (renamed from drivers/staging/rtl8192e/rtl_crypto.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_debug.c (renamed from drivers/staging/rtl8192e/rtl_debug.c)79
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.c (renamed from drivers/staging/rtl8192e/rtl_dm.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_dm.h (renamed from drivers/staging/rtl8192e/rtl_dm.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c (renamed from drivers/staging/rtl8192e/rtl_eeprom.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h (renamed from drivers/staging/rtl8192e/rtl_eeprom.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c (renamed from drivers/staging/rtl8192e/rtl_ethtool.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.c (renamed from drivers/staging/rtl8192e/rtl_pci.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pci.h (renamed from drivers/staging/rtl8192e/rtl_pci.h)1
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.c (renamed from drivers/staging/rtl8192e/rtl_pm.c)2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_pm.h (renamed from drivers/staging/rtl8192e/rtl_pm.h)4
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.c (renamed from drivers/staging/rtl8192e/rtl_ps.c)0
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_ps.h (renamed from drivers/staging/rtl8192e/rtl_ps.h)2
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.c (renamed from drivers/staging/rtl8192e/rtl_wx.c)3
-rw-r--r--drivers/staging/rtl8192e/rtl8192e/rtl_wx.h (renamed from drivers/staging/rtl8192e/rtl_wx.h)0
-rw-r--r--drivers/staging/rtl8192e/rtl819x_BAProc.c1
-rw-r--r--drivers/staging/rtl8192e/rtl819x_HTProc.c5
-rw-r--r--drivers/staging/rtl8192e/rtl819x_TSProc.c1
-rw-r--r--drivers/staging/rtl8192e/rtl_debug.h299
-rw-r--r--drivers/staging/rtl8192e/rtllib.h113
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.c80
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt.h64
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_ccmp.c26
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_tkip.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_crypt_wep.c25
-rw-r--r--drivers/staging/rtl8192e/rtllib_debug.h86
-rw-r--r--drivers/staging/rtl8192e/rtllib_module.c38
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c21
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac.c96
-rw-r--r--drivers/staging/rtl8192e/rtllib_softmac_wx.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_tx.c19
-rw-r--r--drivers/staging/rtl8192e/rtllib_wx.c109
-rw-r--r--drivers/staging/rtl8192u/ieee80211/api.c244
-rw-r--r--drivers/staging/rtl8712/drv_types.h7
-rw-r--r--drivers/staging/rtl8712/hal_init.c62
-rw-r--r--drivers/staging/rtl8712/os_intfs.c14
-rw-r--r--drivers/staging/rtl8712/rtl8712_hal.h1
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c1
-rw-r--r--drivers/staging/rtl8712/usb_intf.c10
-rw-r--r--drivers/staging/rts5139/rts51x.c32
-rw-r--r--drivers/staging/rts5139/rts51x.h1
-rw-r--r--drivers/staging/rts5139/rts51x_transport.h1
-rw-r--r--drivers/staging/rts_pstor/rtsx.c2
-rw-r--r--drivers/staging/sep/sep_driver.c4
-rw-r--r--drivers/staging/serial/68360serial.c4
-rw-r--r--drivers/staging/serqt_usb2/serqt_usb2.c2
-rw-r--r--drivers/staging/sm7xx/smtcfb.c6
-rw-r--r--drivers/staging/speakup/kobjects.c3
-rw-r--r--drivers/staging/speakup/main.c5
-rw-r--r--drivers/staging/speakup/speakup.h2
-rw-r--r--drivers/staging/speakup/synth.c2
-rw-r--r--drivers/staging/spectra/Kconfig41
-rw-r--r--drivers/staging/spectra/Makefile11
-rw-r--r--drivers/staging/spectra/README29
-rw-r--r--drivers/staging/spectra/ffsdefs.h58
-rw-r--r--drivers/staging/spectra/ffsport.c834
-rw-r--r--drivers/staging/spectra/ffsport.h85
-rw-r--r--drivers/staging/spectra/flash.c4305
-rw-r--r--drivers/staging/spectra/flash.h198
-rw-r--r--drivers/staging/spectra/lld.c339
-rw-r--r--drivers/staging/spectra/lld.h111
-rw-r--r--drivers/staging/spectra/lld_cdma.c910
-rw-r--r--drivers/staging/spectra/lld_cdma.h123
-rw-r--r--drivers/staging/spectra/lld_emu.c776
-rw-r--r--drivers/staging/spectra/lld_emu.h51
-rw-r--r--drivers/staging/spectra/lld_mtd.c683
-rw-r--r--drivers/staging/spectra/lld_mtd.h51
-rw-r--r--drivers/staging/spectra/lld_nand.c2619
-rw-r--r--drivers/staging/spectra/lld_nand.h131
-rw-r--r--drivers/staging/spectra/nand_regs.h619
-rw-r--r--drivers/staging/spectra/spectraswconfig.h82
-rw-r--r--drivers/staging/tidspbridge/Kconfig2
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/dbdcd.c2
-rw-r--r--drivers/staging/tidspbridge/rmgr/drv_interface.c58
-rw-r--r--drivers/staging/usbip/stub_dev.c5
-rw-r--r--drivers/staging/usbip/stub_main.c4
-rw-r--r--drivers/staging/usbip/stub_rx.c2
-rw-r--r--drivers/staging/usbip/usbip_common.c61
-rw-r--r--drivers/staging/usbip/usbip_common.h17
-rw-r--r--drivers/staging/usbip/vhci_rx.c2
-rw-r--r--drivers/staging/vme/TODO67
-rw-r--r--drivers/staging/vme/bridges/vme_ca91cx42.c31
-rw-r--r--drivers/staging/vme/bridges/vme_tsi148.c38
-rw-r--r--drivers/staging/vme/devices/Kconfig13
-rw-r--r--drivers/staging/vme/devices/Makefile3
-rw-r--r--drivers/staging/vme/devices/vme_pio2.h249
-rw-r--r--drivers/staging/vme/devices/vme_pio2_cntr.c71
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c524
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c232
-rw-r--r--drivers/staging/vme/devices/vme_user.h10
-rw-r--r--drivers/staging/vme/vme.c69
-rw-r--r--drivers/staging/vme/vme.h38
-rw-r--r--drivers/staging/vme/vme_api.txt61
-rw-r--r--drivers/staging/vme/vme_bridge.h38
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6655/ioctl.c8
-rw-r--r--drivers/staging/vt6655/iwctl.c12
-rw-r--r--drivers/staging/vt6655/iwctl.h5
-rw-r--r--drivers/staging/vt6656/80211mgr.c35
-rw-r--r--drivers/staging/vt6656/baseband.c61
-rw-r--r--drivers/staging/vt6656/bssdb.c100
-rw-r--r--drivers/staging/vt6656/card.c14
-rw-r--r--drivers/staging/vt6656/card.h4
-rw-r--r--drivers/staging/vt6656/int.c5
-rw-r--r--drivers/staging/vt6656/int.h2
-rw-r--r--drivers/staging/vt6656/ioctl.c8
-rw-r--r--drivers/staging/vt6656/iwctl.c12
-rw-r--r--drivers/staging/vt6656/iwctl.h5
-rw-r--r--drivers/staging/vt6656/mac.c4
-rw-r--r--drivers/staging/vt6656/mac.h2
-rw-r--r--drivers/staging/vt6656/main_usb.c26
-rw-r--r--drivers/staging/winbond/wbusb.c13
-rw-r--r--drivers/staging/wlags49_h2/debug.h2
-rw-r--r--drivers/staging/wlags49_h2/dhfcfg.h2
-rw-r--r--drivers/staging/wlags49_h2/hcf.c6
-rw-r--r--drivers/staging/wlags49_h2/hcf.h6
-rw-r--r--drivers/staging/wlags49_h2/hcfcfg.h6
-rw-r--r--drivers/staging/wlags49_h2/hcfdef.h6
-rw-r--r--drivers/staging/wlags49_h2/mdd.h6
-rw-r--r--drivers/staging/wlags49_h2/mmd.c2
-rw-r--r--drivers/staging/wlags49_h2/mmd.h2
-rw-r--r--drivers/staging/wlags49_h2/wl_cs.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_enc.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_if.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_internal.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_main.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.c17
-rw-r--r--drivers/staging/wlags49_h2/wl_pci.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_priv.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_profile.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_util.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_util.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_version.h4
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c4
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.h4
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c14
-rw-r--r--drivers/staging/xgifb/Makefile2
-rw-r--r--drivers/staging/xgifb/XGI_main.h13
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c158
-rw-r--r--drivers/staging/xgifb/XGIfb.h3
-rw-r--r--drivers/staging/xgifb/vb_ext.c444
-rw-r--r--drivers/staging/xgifb/vb_ext.h9
-rw-r--r--drivers/staging/xgifb/vb_init.c276
-rw-r--r--drivers/staging/xgifb/vb_setmode.c1003
-rw-r--r--drivers/staging/xgifb/vb_setmode.h52
-rw-r--r--drivers/staging/xgifb/vb_struct.h4
-rw-r--r--drivers/staging/xgifb/vb_table.h27
-rw-r--r--drivers/staging/xgifb/vgatypes.h2
-rw-r--r--drivers/staging/zcache/zcache-main.c31
-rw-r--r--drivers/staging/zram/zram_drv.c3
-rw-r--r--drivers/staging/zram/zram_sysfs.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c60
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c36
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h6
-rw-r--r--drivers/target/iscsi/iscsi_target_device.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c62
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_nodeattrib.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_stat.c17
-rw-r--r--drivers/target/iscsi/iscsi_target_tmr.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c19
-rw-r--r--drivers/target/loopback/tcm_loop.c33
-rw-r--r--drivers/target/loopback/tcm_loop.h11
-rw-r--r--drivers/target/target_core_alua.c15
-rw-r--r--drivers/target/target_core_cdb.c90
-rw-r--r--drivers/target/target_core_cdb.h14
-rw-r--r--drivers/target/target_core_configfs.c42
-rw-r--r--drivers/target/target_core_device.c43
-rw-r--r--drivers/target/target_core_fabric_configfs.c12
-rw-r--r--drivers/target/target_core_fabric_lib.c13
-rw-r--r--drivers/target/target_core_file.c15
-rw-r--r--drivers/target/target_core_hba.c7
-rw-r--r--drivers/target/target_core_hba.h7
-rw-r--r--drivers/target/target_core_iblock.c18
-rw-r--r--drivers/target/target_core_internal.h123
-rw-r--r--drivers/target/target_core_pr.c101
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c24
-rw-r--r--drivers/target/target_core_rd.c6
-rw-r--r--drivers/target/target_core_stat.c9
-rw-r--r--drivers/target/target_core_stat.h8
-rw-r--r--drivers/target/target_core_tmr.c36
-rw-r--r--drivers/target/target_core_tpg.c12
-rw-r--r--drivers/target/target_core_transport.c446
-rw-r--r--drivers/target/target_core_ua.c6
-rw-r--r--drivers/target/tcm_fc/tfc_cmd.c62
-rw-r--r--drivers/target/tcm_fc/tfc_conf.c6
-rw-r--r--drivers/target/tcm_fc/tfc_io.c5
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c5
-rw-r--r--drivers/thermal/thermal_sys.c4
-rw-r--r--drivers/tty/Kconfig2
-rw-r--r--drivers/tty/n_hdlc.c6
-rw-r--r--drivers/tty/n_tty.c8
-rw-r--r--drivers/tty/pty.c26
-rw-r--r--drivers/tty/rocket.c2
-rw-r--r--drivers/tty/serial/8250/8250.c (renamed from drivers/tty/serial/8250.c)100
-rw-r--r--drivers/tty/serial/8250/8250.h (renamed from drivers/tty/serial/8250.h)26
-rw-r--r--drivers/tty/serial/8250/8250_accent.c (renamed from drivers/tty/serial/8250_accent.c)0
-rw-r--r--drivers/tty/serial/8250/8250_acorn.c (renamed from drivers/tty/serial/8250_acorn.c)0
-rw-r--r--drivers/tty/serial/8250/8250_boca.c (renamed from drivers/tty/serial/8250_boca.c)0
-rw-r--r--drivers/tty/serial/8250/8250_dw.c (renamed from drivers/tty/serial/8250_dw.c)12
-rw-r--r--drivers/tty/serial/8250/8250_early.c (renamed from drivers/tty/serial/8250_early.c)0
-rw-r--r--drivers/tty/serial/8250/8250_exar_st16c554.c (renamed from drivers/tty/serial/8250_exar_st16c554.c)0
-rw-r--r--drivers/tty/serial/8250/8250_fourport.c (renamed from drivers/tty/serial/8250_fourport.c)0
-rw-r--r--drivers/tty/serial/8250/8250_fsl.c63
-rw-r--r--drivers/tty/serial/8250/8250_gsc.c (renamed from drivers/tty/serial/8250_gsc.c)0
-rw-r--r--drivers/tty/serial/8250/8250_hp300.c (renamed from drivers/tty/serial/8250_hp300.c)0
-rw-r--r--drivers/tty/serial/8250/8250_hub6.c (renamed from drivers/tty/serial/8250_hub6.c)0
-rw-r--r--drivers/tty/serial/8250/8250_mca.c (renamed from drivers/tty/serial/8250_mca.c)0
-rw-r--r--drivers/tty/serial/8250/8250_pci.c (renamed from drivers/tty/serial/8250_pci.c)47
-rw-r--r--drivers/tty/serial/8250/8250_pnp.c (renamed from drivers/tty/serial/8250_pnp.c)0
-rw-r--r--drivers/tty/serial/8250/Kconfig280
-rw-r--r--drivers/tty/serial/8250/Makefile20
-rw-r--r--drivers/tty/serial/8250/serial_cs.c (renamed from drivers/tty/serial/serial_cs.c)8
-rw-r--r--drivers/tty/serial/Kconfig379
-rw-r--r--drivers/tty/serial/Makefile26
-rw-r--r--drivers/tty/serial/amba-pl010.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c40
-rw-r--r--drivers/tty/serial/apbuart.c4
-rw-r--r--drivers/tty/serial/ar933x_uart.c688
-rw-r--r--drivers/tty/serial/atmel_serial.c12
-rw-r--r--drivers/tty/serial/bfin_sport_uart.c22
-rw-r--r--drivers/tty/serial/bfin_sport_uart.h5
-rw-r--r--drivers/tty/serial/bfin_uart.c83
-rw-r--r--drivers/tty/serial/ifx6x60.c1
-rw-r--r--drivers/tty/serial/imx.c148
-rw-r--r--drivers/tty/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/tty/serial/m32r_sio.c7
-rw-r--r--drivers/tty/serial/max3100.c1
-rw-r--r--drivers/tty/serial/max3107-aava.c345
-rw-r--r--drivers/tty/serial/max3107.c1
-rw-r--r--drivers/tty/serial/mfd.c18
-rw-r--r--drivers/tty/serial/mrst_max3110.c1
-rw-r--r--drivers/tty/serial/msm_serial_hs.c23
-rw-r--r--drivers/tty/serial/mxs-auart.c13
-rw-r--r--drivers/tty/serial/omap-serial.c454
-rw-r--r--drivers/tty/serial/pch_uart.c164
-rw-r--r--drivers/tty/serial/pmac_zilog.c423
-rw-r--r--drivers/tty/serial/pmac_zilog.h19
-rw-r--r--drivers/tty/serial/s3c2410.c115
-rw-r--r--drivers/tty/serial/s3c2412.c149
-rw-r--r--drivers/tty/serial/s3c2440.c178
-rw-r--r--drivers/tty/serial/s3c6400.c149
-rw-r--r--drivers/tty/serial/s5pv210.c158
-rw-r--r--drivers/tty/serial/samsung.c640
-rw-r--r--drivers/tty/serial/samsung.h32
-rw-r--r--drivers/tty/serial/sc26xx.c14
-rw-r--r--drivers/tty/serial/serial_core.c331
-rw-r--r--drivers/tty/serial/sh-sci.c192
-rw-r--r--drivers/tty/serial/sh-sci.h4
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c783
-rw-r--r--drivers/tty/serial/sirfsoc_uart.h185
-rw-r--r--drivers/tty/serial/timbuart.c15
-rw-r--r--drivers/tty/serial/ucc_uart.c3
-rw-r--r--drivers/tty/serial/vr41xx_siu.c13
-rw-r--r--drivers/tty/synclink.c2
-rw-r--r--drivers/tty/synclinkmp.c2
-rw-r--r--drivers/tty/sysrq.c2
-rw-r--r--drivers/tty/tty_io.c311
-rw-r--r--drivers/tty/tty_ldisc.c22
-rw-r--r--drivers/tty/tty_port.c12
-rw-r--r--drivers/tty/vt/consolemap.c2
-rw-r--r--drivers/tty/vt/vt_ioctl.c1
-rw-r--r--drivers/uio/uio_pci_generic.c78
-rw-r--r--drivers/uio/uio_pdrv.c12
-rw-r--r--drivers/uio/uio_pdrv_genirq.c13
-rw-r--r--drivers/uio/uio_pruss.c14
-rw-r--r--drivers/usb/Kconfig1
-rw-r--r--drivers/usb/Makefile3
-rw-r--r--drivers/usb/atm/cxacru.c13
-rw-r--r--drivers/usb/atm/speedtch.c23
-rw-r--r--drivers/usb/atm/ueagle-atm.c33
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c15
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.c1
-rw-r--r--drivers/usb/class/cdc-acm.c338
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/cdc-wdm.c78
-rw-r--r--drivers/usb/class/usblp.c15
-rw-r--r--drivers/usb/class/usbtmc.c17
-rw-r--r--drivers/usb/core/devio.c191
-rw-r--r--drivers/usb/core/driver.c36
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hcd.c37
-rw-r--r--drivers/usb/core/hub.c127
-rw-r--r--drivers/usb/core/inode.c31
-rw-r--r--drivers/usb/core/quirks.c5
-rw-r--r--drivers/usb/core/sysfs.c4
-rw-r--r--drivers/usb/core/usb.c4
-rw-r--r--drivers/usb/core/usb.h14
-rw-r--r--drivers/usb/dwc3/Kconfig5
-rw-r--r--drivers/usb/dwc3/Makefile6
-rw-r--r--drivers/usb/dwc3/core.c209
-rw-r--r--drivers/usb/dwc3/core.h62
-rw-r--r--drivers/usb/dwc3/debugfs.c99
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c43
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c51
-rw-r--r--drivers/usb/dwc3/ep0.c173
-rw-r--r--drivers/usb/dwc3/gadget.c440
-rw-r--r--drivers/usb/dwc3/gadget.h29
-rw-r--r--drivers/usb/dwc3/host.c102
-rw-r--r--drivers/usb/dwc3/io.h2
-rw-r--r--drivers/usb/gadget/Kconfig29
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/amd5536udc.c12
-rw-r--r--drivers/usb/gadget/at91_udc.c16
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.c36
-rw-r--r--drivers/usb/gadget/ci13xxx_udc.h2
-rw-r--r--drivers/usb/gadget/composite.c15
-rw-r--r--drivers/usb/gadget/dbgp.c2
-rw-r--r--drivers/usb/gadget/dummy_hcd.c15
-rw-r--r--drivers/usb/gadget/epautoconf.c8
-rw-r--r--drivers/usb/gadget/ether.c4
-rw-r--r--drivers/usb/gadget/f_fs.c33
-rw-r--r--drivers/usb/gadget/f_loopback.c2
-rw-r--r--drivers/usb/gadget/f_mass_storage.c70
-rw-r--r--drivers/usb/gadget/f_phonet.c11
-rw-r--r--drivers/usb/gadget/file_storage.c74
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c19
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c8
-rw-r--r--drivers/usb/gadget/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/goku_udc.c3
-rw-r--r--drivers/usb/gadget/imx_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c32
-rw-r--r--drivers/usb/gadget/langwell_udc.c109
-rw-r--r--drivers/usb/gadget/langwell_udc.h1
-rw-r--r--drivers/usb/gadget/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/mv_udc.h7
-rw-r--r--drivers/usb/gadget/mv_udc_core.c344
-rw-r--r--drivers/usb/gadget/net2272.c6
-rw-r--r--drivers/usb/gadget/net2280.c10
-rw-r--r--drivers/usb/gadget/omap_udc.c5
-rw-r--r--drivers/usb/gadget/pch_udc.c6
-rw-r--r--drivers/usb/gadget/printer.c6
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c2
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c4
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c17
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c136
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c8
-rw-r--r--drivers/usb/gadget/s3c2410_udc.h2
-rw-r--r--drivers/usb/gadget/serial.c4
-rw-r--r--drivers/usb/gadget/storage_common.c6
-rw-r--r--drivers/usb/gadget/udc-core.c26
-rw-r--r--drivers/usb/gadget/usbstring.c73
-rw-r--r--drivers/usb/gadget/zero.c2
-rw-r--r--drivers/usb/host/Kconfig25
-rw-r--r--drivers/usb/host/alchemy-common.c277
-rw-r--r--drivers/usb/host/ehci-ath79.c4
-rw-r--r--drivers/usb/host/ehci-au1xxx.c1
-rw-r--r--drivers/usb/host/ehci-fsl.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c71
-rw-r--r--drivers/usb/host/ehci-mv.c391
-rw-r--r--drivers/usb/host/ehci-octeon.c2
-rw-r--r--drivers/usb/host/ehci-omap.c19
-rw-r--r--drivers/usb/host/ehci-orion.c10
-rw-r--r--drivers/usb/host/ehci-pci.c6
-rw-r--r--drivers/usb/host/ehci-ps3.c30
-rw-r--r--drivers/usb/host/ehci-pxa168.c2
-rw-r--r--drivers/usb/host/ehci-q.c13
-rw-r--r--drivers/usb/host/ehci-s5p.c4
-rw-r--r--drivers/usb/host/ehci-tegra.c71
-rw-r--r--drivers/usb/host/ehci-vt8500.c2
-rw-r--r--drivers/usb/host/ehci-w90x900.c2
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c2
-rw-r--r--drivers/usb/host/ehci-xls.c2
-rw-r--r--drivers/usb/host/fhci-hcd.c12
-rw-r--r--drivers/usb/host/fsl-mph-dr-of.c12
-rw-r--r--drivers/usb/host/hwa-hc.c16
-rw-r--r--drivers/usb/host/imx21-hcd.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c74
-rw-r--r--drivers/usb/host/isp1760-if.c19
-rw-r--r--drivers/usb/host/ohci-at91.c36
-rw-r--r--drivers/usb/host/ohci-au1xxx.c18
-rw-r--r--drivers/usb/host/ohci-dbg.c30
-rw-r--r--drivers/usb/host/ohci-ep93xx.c2
-rw-r--r--drivers/usb/host/ohci-exynos.c274
-rw-r--r--drivers/usb/host/ohci-hcd.c37
-rw-r--r--drivers/usb/host/ohci-hub.c7
-rw-r--r--drivers/usb/host/ohci-omap.c1
-rw-r--r--drivers/usb/host/ohci-omap3.c18
-rw-r--r--drivers/usb/host/ohci-pci.c9
-rw-r--r--drivers/usb/host/ohci-pxa27x.c2
-rw-r--r--drivers/usb/host/ohci-q.c8
-rw-r--r--drivers/usb/host/ohci-s3c2410.c55
-rw-r--r--drivers/usb/host/ohci-sh.c1
-rw-r--r--drivers/usb/host/ohci-sm501.c1
-rw-r--r--drivers/usb/host/ohci-spear.c1
-rw-r--r--drivers/usb/host/ohci-tmio.c3
-rw-r--r--drivers/usb/host/ohci-xls.c2
-rw-r--r--drivers/usb/host/ohci.h14
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c21
-rw-r--r--drivers/usb/host/pci-quirks.c17
-rw-r--r--drivers/usb/host/u132-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hcd.c2
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/whci/qset.c4
-rw-r--r--drivers/usb/host/xhci-hub.c20
-rw-r--r--drivers/usb/host/xhci-mem.c46
-rw-r--r--drivers/usb/host/xhci-ring.c119
-rw-r--r--drivers/usb/host/xhci.c36
-rw-r--r--drivers/usb/host/xhci.h3
-rw-r--r--drivers/usb/image/microtek.c14
-rw-r--r--drivers/usb/misc/adutux.c35
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c22
-rw-r--r--drivers/usb/misc/cytherm.c26
-rw-r--r--drivers/usb/misc/emi26.c16
-rw-r--r--drivers/usb/misc/emi62.c20
-rw-r--r--drivers/usb/misc/ftdi-elan.c2
-rw-r--r--drivers/usb/misc/idmouse.c24
-rw-r--r--drivers/usb/misc/iowarrior.c17
-rw-r--r--drivers/usb/misc/isight_firmware.c19
-rw-r--r--drivers/usb/misc/ldusb.c27
-rw-r--r--drivers/usb/misc/legousbtower.c48
-rw-r--r--drivers/usb/misc/rio500.c28
-rw-r--r--drivers/usb/misc/trancevibrator.c21
-rw-r--r--drivers/usb/misc/usblcd.c20
-rw-r--r--drivers/usb/misc/usbled.c20
-rw-r--r--drivers/usb/misc/usbsevseg.c20
-rw-r--r--drivers/usb/misc/usbtest.c1
-rw-r--r--drivers/usb/misc/yurex.c22
-rw-r--r--drivers/usb/musb/Kconfig61
-rw-r--r--drivers/usb/musb/Makefile26
-rw-r--r--drivers/usb/musb/cppi_dma.c4
-rw-r--r--drivers/usb/musb/davinci.c3
-rw-r--r--drivers/usb/musb/musb_core.c15
-rw-r--r--drivers/usb/musb/musb_core.h4
-rw-r--r--drivers/usb/musb/musb_debug.h4
-rw-r--r--drivers/usb/musb/musb_debugfs.c8
-rw-r--r--drivers/usb/musb/musb_gadget.c6
-rw-r--r--drivers/usb/musb/musb_gadget_ep0.c1
-rw-r--r--drivers/usb/musb/musb_io.h5
-rw-r--r--drivers/usb/musb/omap2430.c63
-rw-r--r--drivers/usb/musb/tusb6010.c1
-rw-r--r--drivers/usb/musb/ux500_dma.c43
-rw-r--r--drivers/usb/otg/Kconfig48
-rw-r--r--drivers/usb/otg/Makefile2
-rw-r--r--drivers/usb/otg/ab8500-usb.c2
-rw-r--r--drivers/usb/otg/fsl_otg.c15
-rw-r--r--drivers/usb/otg/langwell_otg.c2347
-rw-r--r--drivers/usb/otg/mv_otg.c959
-rw-r--r--drivers/usb/otg/mv_otg.h165
-rw-r--r--drivers/usb/renesas_usbhs/common.c52
-rw-r--r--drivers/usb/renesas_usbhs/common.h9
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c13
-rw-r--r--drivers/usb/renesas_usbhs/fifo.h3
-rw-r--r--drivers/usb/renesas_usbhs/mod.c4
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c197
-rw-r--r--drivers/usb/renesas_usbhs/mod_host.c952
-rw-r--r--drivers/usb/renesas_usbhs/pipe.c31
-rw-r--r--drivers/usb/renesas_usbhs/pipe.h1
-rw-r--r--drivers/usb/serial/ChangeLog.history730
-rw-r--r--drivers/usb/serial/aircable.c2
-rw-r--r--drivers/usb/serial/ark3116.c2
-rw-r--r--drivers/usb/serial/belkin_sa.c45
-rw-r--r--drivers/usb/serial/ch341.c5
-rw-r--r--drivers/usb/serial/cp210x.c168
-rw-r--r--drivers/usb/serial/cyberjack.c35
-rw-r--r--drivers/usb/serial/cypress_m8.c35
-rw-r--r--drivers/usb/serial/digi_acceleport.c229
-rw-r--r--drivers/usb/serial/empeg.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c21
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h32
-rw-r--r--drivers/usb/serial/funsoft.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c11
-rw-r--r--drivers/usb/serial/generic.c83
-rw-r--r--drivers/usb/serial/io_edgeport.c5
-rw-r--r--drivers/usb/serial/io_ti.c42
-rw-r--r--drivers/usb/serial/ipaq.c36
-rw-r--r--drivers/usb/serial/ipw.c2
-rw-r--r--drivers/usb/serial/ir-usb.c34
-rw-r--r--drivers/usb/serial/iuu_phoenix.c9
-rw-r--r--drivers/usb/serial/keyspan.c92
-rw-r--r--drivers/usb/serial/keyspan_pda.c68
-rw-r--r--drivers/usb/serial/kl5kusb105.c2
-rw-r--r--drivers/usb/serial/kobil_sct.c27
-rw-r--r--drivers/usb/serial/mct_u232.c48
-rw-r--r--drivers/usb/serial/mos7720.c20
-rw-r--r--drivers/usb/serial/mos7840.c6
-rw-r--r--drivers/usb/serial/navman.c2
-rw-r--r--drivers/usb/serial/omninet.c53
-rw-r--r--drivers/usb/serial/opticon.c3
-rw-r--r--drivers/usb/serial/option.c52
-rw-r--r--drivers/usb/serial/oti6858.c25
-rw-r--r--drivers/usb/serial/pl2303.c19
-rw-r--r--drivers/usb/serial/qcaux.c7
-rw-r--r--drivers/usb/serial/qcserial.c18
-rw-r--r--drivers/usb/serial/safe_serial.c6
-rw-r--r--drivers/usb/serial/sierra.c5
-rw-r--r--drivers/usb/serial/spcp8x5.c2
-rw-r--r--drivers/usb/serial/ssu100.c2
-rw-r--r--drivers/usb/serial/symbolserial.c3
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c21
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.h4
-rw-r--r--drivers/usb/serial/usb-serial.c100
-rw-r--r--drivers/usb/serial/usb_debug.c13
-rw-r--r--drivers/usb/serial/usb_wwan.c2
-rw-r--r--drivers/usb/serial/visor.c2
-rw-r--r--drivers/usb/serial/whiteheat.c60
-rw-r--r--drivers/usb/storage/alauda.c15
-rw-r--r--drivers/usb/storage/cypress_atacb.c15
-rw-r--r--drivers/usb/storage/datafab.c15
-rw-r--r--drivers/usb/storage/ene_ub6250.c25
-rw-r--r--drivers/usb/storage/freecom.c15
-rw-r--r--drivers/usb/storage/isd200.c17
-rw-r--r--drivers/usb/storage/jumpshot.c15
-rw-r--r--drivers/usb/storage/karma.c15
-rw-r--r--drivers/usb/storage/onetouch.c15
-rw-r--r--drivers/usb/storage/realtek_cr.c29
-rw-r--r--drivers/usb/storage/sddr09.c15
-rw-r--r--drivers/usb/storage/sddr55.c15
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/uas.c13
-rw-r--r--drivers/usb/storage/unusual_devs.h2
-rw-r--r--drivers/usb/storage/usb.c90
-rw-r--r--drivers/usb/storage/usb.h7
-rw-r--r--drivers/usb/usb-skeleton.c43
-rw-r--r--drivers/usb/wusbcore/Kconfig3
-rw-r--r--drivers/usb/wusbcore/cbaf.c12
-rw-r--r--drivers/usb/wusbcore/security.c2
-rw-r--r--drivers/uwb/est.c2
-rw-r--r--drivers/uwb/hwa-rc.c12
-rw-r--r--drivers/uwb/i1480/dfu/usb.c22
-rw-r--r--drivers/vhost/net.c8
-rw-r--r--drivers/video/Kconfig9
-rw-r--r--drivers/video/Makefile2
-rw-r--r--drivers/video/amba-clcd.c2
-rw-r--r--drivers/video/amifb.c3732
-rw-r--r--drivers/video/atmel_lcdfb.c39
-rw-r--r--drivers/video/aty/atyfb_base.c4
-rw-r--r--drivers/video/aty/radeon_base.c18
-rw-r--r--drivers/video/au1100fb.c12
-rw-r--r--drivers/video/au1200fb.c273
-rw-r--r--drivers/video/backlight/88pm860x_bl.c12
-rw-r--r--drivers/video/backlight/Kconfig8
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c12
-rw-r--r--drivers/video/backlight/adp8860_bl.c2
-rw-r--r--drivers/video/backlight/adp8870_bl.c2
-rw-r--r--drivers/video/backlight/adx_bl.c182
-rw-r--r--drivers/video/backlight/backlight.c6
-rw-r--r--drivers/video/backlight/da903x_bl.c12
-rw-r--r--drivers/video/backlight/ep93xx_bl.c13
-rw-r--r--drivers/video/backlight/generic_bl.c13
-rw-r--r--drivers/video/backlight/jornada720_bl.c13
-rw-r--r--drivers/video/backlight/jornada720_lcd.c13
-rw-r--r--drivers/video/backlight/l4f00242t03.c2
-rw-r--r--drivers/video/backlight/lcd.c26
-rw-r--r--drivers/video/backlight/ld9040.c71
-rw-r--r--drivers/video/backlight/max8925_bl.c12
-rw-r--r--drivers/video/backlight/omap1_bl.c13
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c12
-rw-r--r--drivers/video/backlight/platform_lcd.c22
-rw-r--r--drivers/video/backlight/pwm_bl.c33
-rw-r--r--drivers/video/backlight/wm831x_bl.c12
-rw-r--r--drivers/video/bf54x-lq043fb.c2
-rw-r--r--drivers/video/bfin-t350mcqb-fb.c2
-rw-r--r--drivers/video/cirrusfb.c270
-rw-r--r--drivers/video/console/Kconfig2
-rw-r--r--drivers/video/console/newport_con.c63
-rw-r--r--drivers/video/controlfb.c2
-rw-r--r--drivers/video/display/Kconfig24
-rw-r--r--drivers/video/display/Makefile6
-rw-r--r--drivers/video/display/display-sysfs.c219
-rw-r--r--drivers/video/fbmem.c14
-rw-r--r--drivers/video/fsl-diu-fb.c587
-rw-r--r--drivers/video/grvga.c4
-rw-r--r--drivers/video/hgafb.c2
-rw-r--r--drivers/video/i810/i810_main.c16
-rw-r--r--drivers/video/intelfb/intelfbdrv.c19
-rw-r--r--drivers/video/logo/logo.c2
-rw-r--r--drivers/video/macfb.c60
-rw-r--r--drivers/video/matrox/matroxfb_base.c1
-rw-r--r--drivers/video/matrox/matroxfb_crtc2.c1
-rw-r--r--drivers/video/mbx/mbxfb.c13
-rw-r--r--drivers/video/mx3fb.c65
-rw-r--r--drivers/video/mxsfb.c21
-rw-r--r--drivers/video/neofb.c10
-rw-r--r--drivers/video/nuc900fb.c13
-rw-r--r--drivers/video/nvidia/nvidia.c6
-rw-r--r--drivers/video/offb.c71
-rw-r--r--drivers/video/omap/lcd_ams_delta.c15
-rw-r--r--drivers/video/omap/lcd_h3.c16
-rw-r--r--drivers/video/omap/lcd_htcherald.c16
-rw-r--r--drivers/video/omap/lcd_inn1510.c16
-rw-r--r--drivers/video/omap/lcd_inn1610.c16
-rw-r--r--drivers/video/omap/lcd_mipid.c1
-rw-r--r--drivers/video/omap/lcd_osk.c16
-rw-r--r--drivers/video/omap/lcd_palmte.c16
-rw-r--r--drivers/video/omap/lcd_palmtt.c15
-rw-r--r--drivers/video/omap/lcd_palmz71.c15
-rw-r--r--drivers/video/omap/omapfb_main.c4
-rw-r--r--drivers/video/omap/rfbi.c2
-rw-r--r--drivers/video/omap/sossi.c2
-rw-r--r--drivers/video/omap2/displays/Kconfig4
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c66
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c1
-rw-r--r--drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c62
-rw-r--r--drivers/video/omap2/displays/panel-taal.c38
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c1
-rw-r--r--drivers/video/omap2/dss/Makefile3
-rw-r--r--drivers/video/omap2/dss/apply.c1330
-rw-r--r--drivers/video/omap2/dss/core.c4
-rw-r--r--drivers/video/omap2/dss/dispc.c409
-rw-r--r--drivers/video/omap2/dss/dispc.h11
-rw-r--r--drivers/video/omap2/dss/dispc_coefs.c326
-rw-r--r--drivers/video/omap2/dss/dpi.c12
-rw-r--r--drivers/video/omap2/dss/dsi.c618
-rw-r--r--drivers/video/omap2/dss/dss.c2
-rw-r--r--drivers/video/omap2/dss/dss.h76
-rw-r--r--drivers/video/omap2/dss/dss_features.c11
-rw-r--r--drivers/video/omap2/dss/dss_features.h1
-rw-r--r--drivers/video/omap2/dss/hdmi.c88
-rw-r--r--drivers/video/omap2/dss/manager.c1221
-rw-r--r--drivers/video/omap2/dss/overlay.c435
-rw-r--r--drivers/video/omap2/dss/rfbi.c3
-rw-r--r--drivers/video/omap2/dss/sdi.c8
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h14
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c114
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h3
-rw-r--r--drivers/video/omap2/dss/venc.c30
-rw-r--r--drivers/video/omap2/omapfb/Kconfig2
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c42
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c22
-rw-r--r--drivers/video/omap2/omapfb/omapfb-sysfs.c4
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h13
-rw-r--r--drivers/video/pm2fb.c8
-rw-r--r--drivers/video/pm3fb.c4
-rw-r--r--drivers/video/pnx4008/pnxrgbfb.c13
-rw-r--r--drivers/video/pnx4008/sdum.c13
-rw-r--r--drivers/video/pvr2fb.c2
-rw-r--r--drivers/video/pxa168fb.c12
-rw-r--r--drivers/video/pxa3xx-gcu.c15
-rw-r--r--drivers/video/riva/fbdev.c6
-rw-r--r--drivers/video/s3c-fb.c202
-rw-r--r--drivers/video/s3c2410fb.c29
-rw-r--r--drivers/video/s3fb.c30
-rw-r--r--drivers/video/sbuslib.c2
-rw-r--r--drivers/video/sh7760fb.c13
-rw-r--r--drivers/video/sh_mipi_dsi.c218
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c375
-rw-r--r--drivers/video/sh_mobile_meram.c13
-rw-r--r--drivers/video/sm501fb.c13
-rw-r--r--drivers/video/smscufx.c23
-rw-r--r--drivers/video/sstfb.c6
-rw-r--r--drivers/video/tdfxfb.c2
-rw-r--r--drivers/video/udlfb.c25
-rw-r--r--drivers/video/uvesafb.c6
-rw-r--r--drivers/video/vfb.c2
-rw-r--r--drivers/video/via/hw.c4
-rw-r--r--drivers/video/vt8500lcdfb.c13
-rw-r--r--drivers/video/w100fb.c13
-rw-r--r--drivers/video/wm8505fb.c13
-rw-r--r--drivers/video/wmt_ge_rops.c13
-rw-r--r--drivers/video/xen-fbfront.c9
-rw-r--r--drivers/video/xilinxfb.c20
-rw-r--r--drivers/virtio/virtio_balloon.c119
-rw-r--r--drivers/virtio/virtio_mmio.c10
-rw-r--r--drivers/virtio/virtio_pci.c118
-rw-r--r--drivers/virtio/virtio_ring.c249
-rw-r--r--drivers/w1/masters/ds2490.c21
-rw-r--r--drivers/w1/slaves/w1_therm.c36
-rw-r--r--drivers/w1/w1.c10
-rw-r--r--drivers/watchdog/Kconfig15
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/ar7_wdt.c17
-rw-r--r--drivers/watchdog/at91sam9_wdt.c22
-rw-r--r--drivers/watchdog/at91sam9_wdt.h6
-rw-r--r--drivers/watchdog/ath79_wdt.c6
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c13
-rw-r--r--drivers/watchdog/booke_wdt.c6
-rw-r--r--drivers/watchdog/cpu5wdt.c3
-rw-r--r--drivers/watchdog/cpwd.c13
-rw-r--r--drivers/watchdog/davinci_wdt.c13
-rw-r--r--drivers/watchdog/dw_wdt.c18
-rw-r--r--drivers/watchdog/eurotechwdt.c4
-rw-r--r--drivers/watchdog/f71808e_wdt.c2
-rw-r--r--drivers/watchdog/hpwdt.c5
-rw-r--r--drivers/watchdog/iTCO_wdt.c35
-rw-r--r--drivers/watchdog/ibmasr.c4
-rw-r--r--drivers/watchdog/imx2_wdt.c11
-rw-r--r--drivers/watchdog/indydog.c4
-rw-r--r--drivers/watchdog/iop_wdt.c5
-rw-r--r--drivers/watchdog/ixp2000_wdt.c3
-rw-r--r--drivers/watchdog/ixp4xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c13
-rw-r--r--drivers/watchdog/ks8695_wdt.c3
-rw-r--r--drivers/watchdog/lantiq_wdt.c3
-rw-r--r--drivers/watchdog/max63xx_wdt.c13
-rw-r--r--drivers/watchdog/mpc8xxx_wdt.c2
-rw-r--r--drivers/watchdog/mtx-1_wdt.c13
-rw-r--r--drivers/watchdog/nuc900_wdt.c18
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c13
-rw-r--r--drivers/watchdog/omap_wdt.c19
-rw-r--r--drivers/watchdog/orion_wdt.c16
-rw-r--r--drivers/watchdog/pcwd_usb.c35
-rw-r--r--drivers/watchdog/pnx4008_wdt.c17
-rw-r--r--drivers/watchdog/rc32434_wdt.c13
-rw-r--r--drivers/watchdog/rdc321x_wdt.c13
-rw-r--r--drivers/watchdog/riowd.c13
-rw-r--r--drivers/watchdog/s3c2410_wdt.c59
-rw-r--r--drivers/watchdog/sp805_wdt.c2
-rw-r--r--drivers/watchdog/stmp3xxx_wdt.c15
-rw-r--r--drivers/watchdog/ts72xx_wdt.c12
-rw-r--r--drivers/watchdog/twl4030_wdt.c12
-rw-r--r--drivers/watchdog/via_wdt.c267
-rw-r--r--drivers/watchdog/w83627hf_wdt.c4
-rw-r--r--drivers/watchdog/wafer5823wdt.c4
-rw-r--r--drivers/watchdog/wm831x_wdt.c23
-rw-r--r--drivers/watchdog/wm8350_wdt.c16
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/biomerge.c2
-rw-r--r--drivers/xen/cpu_hotplug.c3
-rw-r--r--drivers/xen/events.c77
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntalloc.c121
-rw-r--r--drivers/xen/gntdev.c34
-rw-r--r--drivers/xen/grant-table.c521
-rw-r--r--drivers/xen/privcmd.c (renamed from drivers/xen/xenfs/privcmd.c)41
-rw-r--r--drivers/xen/privcmd.h3
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xen-balloon.c86
-rw-r--r--drivers/xen/xen-pciback/conf_space.c2
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c12
-rw-r--r--drivers/xen/xen-pciback/xenbus.c26
-rw-r--r--drivers/xen/xen-selfballoon.c76
-rw-r--r--drivers/xen/xenbus/Makefile2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c193
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h4
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c90
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c (renamed from drivers/xen/xenfs/xenbus.c)44
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c9
-rw-r--r--drivers/xen/xenbus/xenbus_probe.h6
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_probe_frontend.c8
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c23
-rw-r--r--drivers/xen/xenfs/Makefile2
-rw-r--r--drivers/xen/xenfs/super.c6
-rw-r--r--drivers/xen/xenfs/xenfs.h2
-rw-r--r--drivers/zorro/zorro.ids2
4153 files changed, 223334 insertions, 197381 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index b5e6f24..5afe5d1 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -116,6 +116,8 @@ source "drivers/vlynq/Kconfig"
source "drivers/virtio/Kconfig"
+source "drivers/hv/Kconfig"
+
source "drivers/xen/Kconfig"
source "drivers/staging/Kconfig"
@@ -132,8 +134,6 @@ source "drivers/iommu/Kconfig"
source "drivers/virt/Kconfig"
-source "drivers/hv/Kconfig"
-
source "drivers/devfreq/Kconfig"
endmenu
diff --git a/drivers/Makefile b/drivers/Makefile
index 1b31421..c07be02 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -97,7 +97,7 @@ obj-$(CONFIG_EISA) += eisa/
obj-y += lguest/
obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_CPU_IDLE) += cpuidle/
-obj-$(CONFIG_MMC) += mmc/
+obj-y += mmc/
obj-$(CONFIG_MEMSTICK) += memstick/
obj-y += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c
index cb423f5..c339a08 100644
--- a/drivers/accessibility/braille/braille_console.c
+++ b/drivers/accessibility/braille/braille_console.c
@@ -44,7 +44,7 @@ MODULE_LICENSE("GPL");
*/
/* Emit various sounds */
-static int sound;
+static bool sound;
module_param(sound, bool, 0);
MODULE_PARM_DESC(sound, "emit sounds");
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index de0e3df..7556913 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -374,7 +374,7 @@ config ACPI_CUSTOM_METHOD
depends on DEBUG_FS
default n
help
- This debug facility allows ACPI AML methods to me inserted and/or
+ This debug facility allows ACPI AML methods to be inserted and/or
replaced without rebooting the system. For details refer to:
Documentation/acpi/method-customizing.txt.
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ecb26b4..1567028 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,12 +19,12 @@ obj-y += acpi.o \
# All the builtin files are in the "acpi." module_param namespace.
acpi-y += osl.o utils.o reboot.o
-acpi-y += atomicio.o
+acpi-y += nvs.o
# sleep related files
acpi-y += wakeup.o
acpi-y += sleep.o
-acpi-$(CONFIG_ACPI_SLEEP) += proc.o nvs.o
+acpi-$(CONFIG_ACPI_SLEEP) += proc.o
#
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 301bd2d..0ca208b 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -8,41 +8,151 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
# use acpi.o to put all files here into acpi.o modparam namespace
obj-y += acpi.o
-acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
- dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
- dsinit.o dsargs.o dscontrol.o dswload2.o
+acpi-y := \
+ dsargs.o \
+ dscontrol.o \
+ dsfield.o \
+ dsinit.o \
+ dsmethod.o \
+ dsmthdat.o \
+ dsobject.o \
+ dsopcode.o \
+ dsutils.o \
+ dswexec.o \
+ dswload.o \
+ dswload2.o \
+ dswscope.o \
+ dswstate.o
-acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
- evmisc.o evrgnini.o evxface.o evxfregn.o \
- evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o evxfgpe.o evglock.o
+acpi-y += \
+ evevent.o \
+ evgpe.o \
+ evgpeblk.o \
+ evgpeinit.o \
+ evgpeutil.o \
+ evglock.o \
+ evmisc.o \
+ evregion.o \
+ evrgnini.o \
+ evsci.o \
+ evxface.o \
+ evxfevnt.o \
+ evxfgpe.o \
+ evxfregn.o
-acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
- exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
- excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
- exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
+acpi-y += \
+ exconfig.o \
+ exconvrt.o \
+ excreate.o \
+ exdebug.o \
+ exdump.o \
+ exfield.o \
+ exfldio.o \
+ exmutex.o \
+ exnames.o \
+ exoparg1.o \
+ exoparg2.o \
+ exoparg3.o \
+ exoparg6.o \
+ exprep.o \
+ exmisc.o \
+ exregion.o \
+ exresnte.o \
+ exresolv.o \
+ exresop.o \
+ exstore.o \
+ exstoren.o \
+ exstorob.o \
+ exsystem.o \
+ exutils.o
-acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o hwpci.o
+acpi-y += \
+ hwacpi.o \
+ hwgpe.o \
+ hwpci.o \
+ hwregs.o \
+ hwsleep.o \
+ hwvalid.o \
+ hwxface.o
acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
-acpi-y += nsaccess.o nsload.o nssearch.o nsxfeval.o \
- nsalloc.o nseval.o nsnames.o nsutils.o nsxfname.o \
- nsdump.o nsinit.o nsobject.o nswalk.o nsxfobj.o \
- nsparse.o nspredef.o nsrepair.o nsrepair2.o
+acpi-y += \
+ nsaccess.o \
+ nsalloc.o \
+ nsdump.o \
+ nseval.o \
+ nsinit.o \
+ nsload.o \
+ nsnames.o \
+ nsobject.o \
+ nsparse.o \
+ nspredef.o \
+ nsrepair.o \
+ nsrepair2.o \
+ nssearch.o \
+ nsutils.o \
+ nswalk.o \
+ nsxfeval.o \
+ nsxfname.o \
+ nsxfobj.o
acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
-acpi-y += psargs.o psparse.o psloop.o pstree.o pswalk.o \
- psopcode.o psscope.o psutils.o psxface.o
+acpi-y += \
+ psargs.o \
+ psloop.o \
+ psopcode.o \
+ psparse.o \
+ psscope.o \
+ pstree.o \
+ psutils.o \
+ pswalk.o \
+ psxface.o
-acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
- rscalc.o rsirq.o rsmemory.o rsutils.o
+acpi-y += \
+ rsaddr.o \
+ rscalc.o \
+ rscreate.o \
+ rsinfo.o \
+ rsio.o \
+ rsirq.o \
+ rslist.o \
+ rsmemory.o \
+ rsmisc.o \
+ rsserial.o \
+ rsutils.o \
+ rsxface.o
acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
-acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+acpi-y += \
+ tbfadt.o \
+ tbfind.o \
+ tbinstal.o \
+ tbutils.o \
+ tbxface.o \
+ tbxfroot.o
-acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
- utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
- utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
- utosi.o utxferror.o utdecode.o
+acpi-y += \
+ utaddress.o \
+ utalloc.o \
+ utcopy.o \
+ utdebug.o \
+ utdecode.o \
+ utdelete.o \
+ uteval.o \
+ utglobal.o \
+ utids.o \
+ utinit.o \
+ utlock.o \
+ utmath.o \
+ utmisc.o \
+ utmutex.o \
+ utobject.o \
+ utosi.o \
+ utresrc.o \
+ utstate.o \
+ utxface.o \
+ utxferror.o \
+ utxfmutex.o
diff --git a/drivers/acpi/acpica/accommon.h b/drivers/acpi/acpica/accommon.h
index e0ba17f..a44bd42 100644
--- a/drivers/acpi/acpica/accommon.h
+++ b/drivers/acpi/acpica/accommon.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acconfig.h b/drivers/acpi/acpica/acconfig.h
index f895a24..1f30af6 100644
--- a/drivers/acpi/acpica/acconfig.h
+++ b/drivers/acpi/acpica/acconfig.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -123,6 +123,10 @@
#define ACPI_MAX_SLEEP 2000 /* Two seconds */
+/* Address Range lists are per-space_id (Memory and I/O only) */
+
+#define ACPI_ADDRESS_RANGE_MAX 2
+
/******************************************************************************
*
* ACPI Specification constants (Do not change unless the specification changes)
@@ -202,9 +206,10 @@
#define ACPI_RSDP_CHECKSUM_LENGTH 20
#define ACPI_RSDP_XCHECKSUM_LENGTH 36
-/* SMBus and IPMI bidirectional buffer size */
+/* SMBus, GSBus and IPMI bidirectional buffer size */
#define ACPI_SMBUS_BUFFER_SIZE 34
+#define ACPI_GSBUS_BUFFER_SIZE 34
#define ACPI_IPMI_BUFFER_SIZE 66
/* _sx_d and _sx_w control methods */
diff --git a/drivers/acpi/acpica/acdebug.h b/drivers/acpi/acpica/acdebug.h
index eb0b1f8..deaa819 100644
--- a/drivers/acpi/acpica/acdebug.h
+++ b/drivers/acpi/acpica/acdebug.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 2d1b7ff..5935ba6 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index bea3b48..c53caa5 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -162,6 +162,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value);
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 76dc02f..2853f76 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -108,7 +108,7 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
/*
* Optionally enable output from the AML Debug Object.
*/
-u32 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
+bool ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
/*
* Optionally copy the entire DSDT to local memory (instead of simply
@@ -140,8 +140,19 @@ u32 acpi_gbl_trace_flags;
acpi_name acpi_gbl_trace_method_name;
u8 acpi_gbl_system_awake_and_running;
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
+
#endif
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
+
/*****************************************************************************
*
* Debug support
@@ -207,7 +218,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
/*****************************************************************************
*
- * Mutual exlusion within ACPICA subsystem
+ * Mutual exclusion within ACPICA subsystem
*
****************************************************************************/
@@ -295,6 +306,8 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
ACPI_EXTERN u8 acpi_gbl_events_initialized;
ACPI_EXTERN u8 acpi_gbl_osi_data;
ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
+ACPI_EXTERN struct acpi_address_range
+ *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
#ifndef DEFINE_ACPI_GLOBALS
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index e7213be..677793e 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 3731e1c..eb30863 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
void acpi_ex_integer_to_string(char *dest, u64 value);
+u8 acpi_is_valid_space_id(u8 space_id);
+
/*
* exregion - default op_region handlers
*/
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 5552125..3f24068 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
/* Total number of aml opcodes defined */
-#define AML_NUM_OPCODES 0x7F
+#define AML_NUM_OPCODES 0x81
/* Forward declarations */
@@ -249,12 +249,16 @@ struct acpi_create_field_info {
struct acpi_namespace_node *field_node;
struct acpi_namespace_node *register_node;
struct acpi_namespace_node *data_register_node;
+ struct acpi_namespace_node *connection_node;
+ u8 *resource_buffer;
u32 bank_value;
u32 field_bit_position;
u32 field_bit_length;
+ u16 resource_length;
u8 field_flags;
u8 attribute;
u8 field_type;
+ u8 access_length;
};
typedef
@@ -315,7 +319,8 @@ struct acpi_name_info {
/*
* Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
- * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
+ * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
+ * ACPI_PTYPE2_FIX_VAR
*/
struct acpi_package_info {
u8 type;
@@ -625,6 +630,15 @@ union acpi_generic_state {
typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
+/* Address Range info block */
+
+struct acpi_address_range {
+ struct acpi_address_range *next;
+ struct acpi_namespace_node *region_node;
+ acpi_physical_address start_address;
+ acpi_physical_address end_address;
+};
+
/*****************************************************************************
*
* Parser typedefs and structs
@@ -951,7 +965,7 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_END_DEPENDENT 0x38
#define ACPI_RESOURCE_NAME_IO 0x40
#define ACPI_RESOURCE_NAME_FIXED_IO 0x48
-#define ACPI_RESOURCE_NAME_RESERVED_S1 0x50
+#define ACPI_RESOURCE_NAME_FIXED_DMA 0x50
#define ACPI_RESOURCE_NAME_RESERVED_S2 0x58
#define ACPI_RESOURCE_NAME_RESERVED_S3 0x60
#define ACPI_RESOURCE_NAME_RESERVED_S4 0x68
@@ -973,7 +987,9 @@ struct acpi_port_info {
#define ACPI_RESOURCE_NAME_EXTENDED_IRQ 0x89
#define ACPI_RESOURCE_NAME_ADDRESS64 0x8A
#define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 0x8B
-#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8B
+#define ACPI_RESOURCE_NAME_GPIO 0x8C
+#define ACPI_RESOURCE_NAME_SERIAL_BUS 0x8E
+#define ACPI_RESOURCE_NAME_LARGE_MAX 0x8E
/*****************************************************************************
*
diff --git a/drivers/acpi/acpica/acmacros.h b/drivers/acpi/acpica/acmacros.h
index b7491ee..ef338a9 100644
--- a/drivers/acpi/acpica/acmacros.h
+++ b/drivers/acpi/acpica/acmacros.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acnamesp.h b/drivers/acpi/acpica/acnamesp.h
index 79a598c..2c9e0f0 100644
--- a/drivers/acpi/acpica/acnamesp.h
+++ b/drivers/acpi/acpica/acnamesp.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acobject.h b/drivers/acpi/acpica/acobject.h
index 1055769..c065078 100644
--- a/drivers/acpi/acpica/acobject.h
+++ b/drivers/acpi/acpica/acobject.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
u32 base_byte_offset; /* Byte offset within containing object */\
u32 value; /* Value to store into the Bank or Index register */\
u8 start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+ u8 access_length; /* For serial regions/fields */
struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -261,7 +262,9 @@ struct acpi_object_field_common { /* COMMON FIELD (for BUFFER, REGION, BANK, and
};
struct acpi_object_region_field {
- ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+ union acpi_operand_object *region_obj; /* Containing op_region object */
+ u8 *resource_buffer; /* resource_template for serial regions/fields */
};
struct acpi_object_bank_field {
@@ -358,6 +361,7 @@ typedef enum {
*/
struct acpi_object_extra {
ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG; /* _REG method for this region (if any) */
+ struct acpi_namespace_node *scope_node;
void *region_context; /* Region-specific data */
u8 *aml_start;
u32 aml_length;
diff --git a/drivers/acpi/acpica/acopcode.h b/drivers/acpi/acpica/acopcode.h
index bb2ccfa..9440d05 100644
--- a/drivers/acpi/acpica/acopcode.h
+++ b/drivers/acpi/acpica/acopcode.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,7 @@
#define ARGP_CONCAT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_CONCAT_RES_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_COND_REF_OF_OP ARGP_LIST2 (ARGP_SUPERNAME, ARGP_SUPERNAME)
+#define ARGP_CONNECTFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_CONTINUE_OP ARG_NONE
#define ARGP_COPY_OP ARGP_LIST2 (ARGP_TERMARG, ARGP_SIMPLENAME)
#define ARGP_CREATE_BIT_FIELD_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_NAME)
@@ -164,6 +165,7 @@
#define ARGP_RETURN_OP ARGP_LIST1 (ARGP_TERMARG)
#define ARGP_REVISION_OP ARG_NONE
#define ARGP_SCOPE_OP ARGP_LIST3 (ARGP_PKGLENGTH, ARGP_NAME, ARGP_TERMLIST)
+#define ARGP_SERIALFIELD_OP ARGP_LIST1 (ARGP_NAMESTRING)
#define ARGP_SHIFT_LEFT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SHIFT_RIGHT_OP ARGP_LIST3 (ARGP_TERMARG, ARGP_TERMARG, ARGP_TARGET)
#define ARGP_SIGNAL_OP ARGP_LIST1 (ARGP_SUPERNAME)
@@ -223,6 +225,7 @@
#define ARGI_CONCAT_OP ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA, ARGI_TARGETREF)
#define ARGI_CONCAT_RES_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_BUFFER, ARGI_TARGETREF)
#define ARGI_COND_REF_OF_OP ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
+#define ARGI_CONNECTFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_CONTINUE_OP ARGI_INVALID_OPCODE
#define ARGI_COPY_OP ARGI_LIST2 (ARGI_ANYTYPE, ARGI_SIMPLE_TARGET)
#define ARGI_CREATE_BIT_FIELD_OP ARGI_LIST3 (ARGI_BUFFER, ARGI_INTEGER, ARGI_REFERENCE)
@@ -294,6 +297,7 @@
#define ARGI_RETURN_OP ARGI_INVALID_OPCODE
#define ARGI_REVISION_OP ARG_NONE
#define ARGI_SCOPE_OP ARGI_INVALID_OPCODE
+#define ARGI_SERIALFIELD_OP ARGI_INVALID_OPCODE
#define ARGI_SHIFT_LEFT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SHIFT_RIGHT_OP ARGI_LIST3 (ARGI_INTEGER, ARGI_INTEGER, ARGI_TARGETREF)
#define ARGI_SIGNAL_OP ARGI_LIST1 (ARGI_EVENT)
diff --git a/drivers/acpi/acpica/acparser.h b/drivers/acpi/acpica/acparser.h
index 5ea1e06..b725d78 100644
--- a/drivers/acpi/acpica/acparser.h
+++ b/drivers/acpi/acpica/acparser.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index c445cca..bbb34c9 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -94,6 +94,14 @@
* ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
* (Used for _ART, _FPS)
*
+ * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
+ * followed by an optional element
+ * object type
+ * count
+ * object type
+ * count = 0 (optional)
+ * (Used for _DLM)
+ *
*****************************************************************************/
enum acpi_return_package_types {
@@ -105,7 +113,8 @@ enum acpi_return_package_types {
ACPI_PTYPE2_PKG_COUNT = 6,
ACPI_PTYPE2_FIXED = 7,
ACPI_PTYPE2_MIN = 8,
- ACPI_PTYPE2_REV_FIXED = 9
+ ACPI_PTYPE2_REV_FIXED = 9,
+ ACPI_PTYPE2_FIX_VAR = 10
};
#ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -154,6 +163,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_AC8", 0, ACPI_RTYPE_INTEGER}},
{{"_AC9", 0, ACPI_RTYPE_INTEGER}},
{{"_ADR", 0, ACPI_RTYPE_INTEGER}},
+ {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
{{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -229,6 +239,13 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
+ {{"_CLS", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (3 Int) */
+ {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+
+ {{"_CPC", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Bufs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
+ 0}},
+
{{"_CRS", 0, ACPI_RTYPE_BUFFER}},
{{"_CRT", 0, ACPI_RTYPE_INTEGER}},
{{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -237,12 +254,21 @@ static const union acpi_predefined_info predefined_names[] =
{{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
{{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
+ {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
{{"_DCK", 1, ACPI_RTYPE_INTEGER}},
{{"_DCS", 0, ACPI_RTYPE_INTEGER}},
{{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
{{"_DDN", 0, ACPI_RTYPE_STRING}},
+ {{"_DEP", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
{{"_DGS", 0, ACPI_RTYPE_INTEGER}},
{{"_DIS", 0, 0}},
+
+ {{"_DLM", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
+ {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
+ ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
+
{{"_DMA", 0, ACPI_RTYPE_BUFFER}},
{{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -262,6 +288,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_EJ3", 1, 0}},
{{"_EJ4", 1, 0}},
{{"_EJD", 0, ACPI_RTYPE_STRING}},
+ {{"_EVT", 1, 0}},
{{"_FDE", 0, ACPI_RTYPE_BUFFER}},
{{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -281,14 +308,17 @@ static const union acpi_predefined_info predefined_names[] =
{{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
{{"_GAI", 0, ACPI_RTYPE_INTEGER}},
+ {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
{{"_GHL", 0, ACPI_RTYPE_INTEGER}},
{{"_GLK", 0, ACPI_RTYPE_INTEGER}},
{{"_GPD", 0, ACPI_RTYPE_INTEGER}},
{{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
+ {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
{{"_GSB", 0, ACPI_RTYPE_INTEGER}},
{{"_GTF", 0, ACPI_RTYPE_BUFFER}},
{{"_GTM", 0, ACPI_RTYPE_BUFFER}},
{{"_GTS", 1, 0}},
+ {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
{{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
{{"_HOT", 0, ACPI_RTYPE_INTEGER}},
{{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -303,6 +333,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
{{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
+ {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
{{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_INI", 0, 0}},
{{"_IRC", 0, 0}},
@@ -361,6 +392,9 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PR3", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+ {{"_PRE", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
+ {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
{{"_PRL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
@@ -391,6 +425,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
{{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
+ {{"_PSE", 1, 0}},
{{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
{{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
@@ -457,6 +492,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_SLI", 0, ACPI_RTYPE_BUFFER}},
{{"_SPD", 1, ACPI_RTYPE_INTEGER}},
{{"_SRS", 1, 0}},
+ {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
{{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
{{"_SST", 1, 0}},
{{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -464,6 +500,7 @@ static const union acpi_predefined_info predefined_names[] =
{{"_STP", 2, ACPI_RTYPE_INTEGER}},
{{"_STR", 0, ACPI_RTYPE_BUFFER}},
{{"_STV", 2, ACPI_RTYPE_INTEGER}},
+ {{"_SUB", 0, ACPI_RTYPE_STRING}},
{{"_SUN", 0, ACPI_RTYPE_INTEGER}},
{{"_SWS", 0, ACPI_RTYPE_INTEGER}},
{{"_TC1", 0, ACPI_RTYPE_INTEGER}},
diff --git a/drivers/acpi/acpica/acresrc.h b/drivers/acpi/acpica/acresrc.h
index f08b55b..0347d09 100644
--- a/drivers/acpi/acpica/acresrc.h
+++ b/drivers/acpi/acpica/acresrc.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -73,28 +73,40 @@ typedef const struct acpi_rsconvert_info {
/* Resource conversion opcodes */
-#define ACPI_RSC_INITGET 0
-#define ACPI_RSC_INITSET 1
-#define ACPI_RSC_FLAGINIT 2
-#define ACPI_RSC_1BITFLAG 3
-#define ACPI_RSC_2BITFLAG 4
-#define ACPI_RSC_COUNT 5
-#define ACPI_RSC_COUNT16 6
-#define ACPI_RSC_LENGTH 7
-#define ACPI_RSC_MOVE8 8
-#define ACPI_RSC_MOVE16 9
-#define ACPI_RSC_MOVE32 10
-#define ACPI_RSC_MOVE64 11
-#define ACPI_RSC_SET8 12
-#define ACPI_RSC_DATA8 13
-#define ACPI_RSC_ADDRESS 14
-#define ACPI_RSC_SOURCE 15
-#define ACPI_RSC_SOURCEX 16
-#define ACPI_RSC_BITMASK 17
-#define ACPI_RSC_BITMASK16 18
-#define ACPI_RSC_EXIT_NE 19
-#define ACPI_RSC_EXIT_LE 20
-#define ACPI_RSC_EXIT_EQ 21
+typedef enum {
+ ACPI_RSC_INITGET = 0,
+ ACPI_RSC_INITSET,
+ ACPI_RSC_FLAGINIT,
+ ACPI_RSC_1BITFLAG,
+ ACPI_RSC_2BITFLAG,
+ ACPI_RSC_3BITFLAG,
+ ACPI_RSC_ADDRESS,
+ ACPI_RSC_BITMASK,
+ ACPI_RSC_BITMASK16,
+ ACPI_RSC_COUNT,
+ ACPI_RSC_COUNT16,
+ ACPI_RSC_COUNT_GPIO_PIN,
+ ACPI_RSC_COUNT_GPIO_RES,
+ ACPI_RSC_COUNT_GPIO_VEN,
+ ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RSC_DATA8,
+ ACPI_RSC_EXIT_EQ,
+ ACPI_RSC_EXIT_LE,
+ ACPI_RSC_EXIT_NE,
+ ACPI_RSC_LENGTH,
+ ACPI_RSC_MOVE_GPIO_PIN,
+ ACPI_RSC_MOVE_GPIO_RES,
+ ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RSC_MOVE8,
+ ACPI_RSC_MOVE16,
+ ACPI_RSC_MOVE32,
+ ACPI_RSC_MOVE64,
+ ACPI_RSC_SET8,
+ ACPI_RSC_SOURCE,
+ ACPI_RSC_SOURCEX
+} ACPI_RSCONVERT_OPCODES;
/* Resource Conversion sub-opcodes */
@@ -106,6 +118,9 @@ typedef const struct acpi_rsconvert_info {
#define ACPI_RS_OFFSET(f) (u8) ACPI_OFFSET (struct acpi_resource,f)
#define AML_OFFSET(f) (u8) ACPI_OFFSET (union aml_resource,f)
+/*
+ * Individual entry for the resource dump tables
+ */
typedef const struct acpi_rsdump_info {
u8 opcode;
u8 offset;
@@ -116,20 +131,25 @@ typedef const struct acpi_rsdump_info {
/* Values for the Opcode field above */
-#define ACPI_RSD_TITLE 0
-#define ACPI_RSD_LITERAL 1
-#define ACPI_RSD_STRING 2
-#define ACPI_RSD_UINT8 3
-#define ACPI_RSD_UINT16 4
-#define ACPI_RSD_UINT32 5
-#define ACPI_RSD_UINT64 6
-#define ACPI_RSD_1BITFLAG 7
-#define ACPI_RSD_2BITFLAG 8
-#define ACPI_RSD_SHORTLIST 9
-#define ACPI_RSD_LONGLIST 10
-#define ACPI_RSD_DWORDLIST 11
-#define ACPI_RSD_ADDRESS 12
-#define ACPI_RSD_SOURCE 13
+typedef enum {
+ ACPI_RSD_TITLE = 0,
+ ACPI_RSD_1BITFLAG,
+ ACPI_RSD_2BITFLAG,
+ ACPI_RSD_3BITFLAG,
+ ACPI_RSD_ADDRESS,
+ ACPI_RSD_DWORDLIST,
+ ACPI_RSD_LITERAL,
+ ACPI_RSD_LONGLIST,
+ ACPI_RSD_SHORTLIST,
+ ACPI_RSD_SHORTLISTX,
+ ACPI_RSD_SOURCE,
+ ACPI_RSD_STRING,
+ ACPI_RSD_UINT8,
+ ACPI_RSD_UINT16,
+ ACPI_RSD_UINT32,
+ ACPI_RSD_UINT64,
+ ACPI_RSD_WORDLIST
+} ACPI_RSDUMP_OPCODES;
/* restore default alignment */
@@ -138,13 +158,18 @@ typedef const struct acpi_rsdump_info {
/* Resource tables indexed by internal resource type */
extern const u8 acpi_gbl_aml_resource_sizes[];
+extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
/* Resource tables indexed by raw AML resource descriptor type */
extern const u8 acpi_gbl_resource_struct_sizes[];
+extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
+extern struct acpi_rsconvert_info
+ *acpi_gbl_convert_resource_serial_bus_dispatch[];
+
struct acpi_vendor_walk_info {
struct acpi_vendor_uuid *uuid;
struct acpi_buffer *buffer;
@@ -190,6 +215,10 @@ acpi_status
acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
struct acpi_buffer *ret_buffer);
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer);
+
/*
* rscalc
*/
@@ -293,6 +322,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
+extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
+extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
+extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
/* These resources require separate get/set tables */
@@ -310,6 +344,7 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
* rsinfo
*/
extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
+extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
/*
* rsdump
@@ -331,6 +366,12 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
+extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
+extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
+extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
#endif
#endif /* __ACRESRC_H__ */
diff --git a/drivers/acpi/acpica/acstruct.h b/drivers/acpi/acpica/acstruct.h
index 1623b24..0404df6 100644
--- a/drivers/acpi/acpica/acstruct.h
+++ b/drivers/acpi/acpica/acstruct.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 967f081..d5bec30 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/acutils.h b/drivers/acpi/acpica/acutils.h
index 99c140d..925ccf2 100644
--- a/drivers/acpi/acpica/acutils.h
+++ b/drivers/acpi/acpica/acutils.h
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
#define _ACUTILS_H
extern const u8 acpi_gbl_resource_aml_sizes[];
+extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
/* Strings used by the disassembler and debugger resource dump routines */
@@ -579,6 +580,24 @@ acpi_ut_create_list(char *list_name,
#endif /* ACPI_DBG_TRACK_ALLOCATIONS */
/*
+ * utaddress - address range check
+ */
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ u32 length, struct acpi_namespace_node *region_node);
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ struct acpi_namespace_node *region_node);
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address, u32 length, u8 warn);
+
+void acpi_ut_delete_address_lists(void);
+
+/*
* utxferror - various error/warning output functions
*/
void ACPI_INTERNAL_VAR_XFACE
diff --git a/drivers/acpi/acpica/amlcode.h b/drivers/acpi/acpica/amlcode.h
index 1077f17..905280f 100644
--- a/drivers/acpi/acpica/amlcode.h
+++ b/drivers/acpi/acpica/amlcode.h
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -189,6 +189,14 @@
#define AML_LNOTEQUAL_OP (u16) 0x9293
/*
+ * Opcodes for "Field" operators
+ */
+#define AML_FIELD_OFFSET_OP (u8) 0x00
+#define AML_FIELD_ACCESS_OP (u8) 0x01
+#define AML_FIELD_CONNECTION_OP (u8) 0x02 /* ACPI 5.0 */
+#define AML_FIELD_EXT_ACCESS_OP (u8) 0x03 /* ACPI 5.0 */
+
+/*
* Internal opcodes
* Use only "Unknown" AML opcodes, don't attempt to use
* any valid ACPI ASCII values (A-Z, 0-9, '-')
@@ -202,6 +210,8 @@
#define AML_INT_METHODCALL_OP (u16) 0x0035
#define AML_INT_RETURN_VALUE_OP (u16) 0x0036
#define AML_INT_EVAL_SUBTREE_OP (u16) 0x0037
+#define AML_INT_CONNECTION_OP (u16) 0x0038
+#define AML_INT_EXTACCESSFIELD_OP (u16) 0x0039
#define ARG_NONE 0x0
@@ -456,13 +466,16 @@ typedef enum {
* access_as keyword
*/
typedef enum {
- AML_FIELD_ATTRIB_SMB_QUICK = 0x02,
- AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04,
- AML_FIELD_ATTRIB_SMB_BYTE = 0x06,
- AML_FIELD_ATTRIB_SMB_WORD = 0x08,
- AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A,
- AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C,
- AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
+ AML_FIELD_ATTRIB_QUICK = 0x02,
+ AML_FIELD_ATTRIB_SEND_RCV = 0x04,
+ AML_FIELD_ATTRIB_BYTE = 0x06,
+ AML_FIELD_ATTRIB_WORD = 0x08,
+ AML_FIELD_ATTRIB_BLOCK = 0x0A,
+ AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
+ AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
+ AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
+ AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
+ AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
} AML_ACCESS_ATTRIBUTE;
/* Bit fields in the AML method_flags byte */
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index 59122cd..7b2128f 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -58,29 +58,48 @@
#define ACPI_RESTAG_TYPESPECIFICATTRIBUTES "_ATT"
#define ACPI_RESTAG_BASEADDRESS "_BAS"
#define ACPI_RESTAG_BUSMASTER "_BM_" /* Master(1), Slave(0) */
+#define ACPI_RESTAG_DEBOUNCETIME "_DBT"
#define ACPI_RESTAG_DECODE "_DEC"
+#define ACPI_RESTAG_DEVICEPOLARITY "_DPL"
#define ACPI_RESTAG_DMA "_DMA"
#define ACPI_RESTAG_DMATYPE "_TYP" /* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_DRIVESTRENGTH "_DRS"
+#define ACPI_RESTAG_ENDIANNESS "_END"
+#define ACPI_RESTAG_FLOWCONTROL "_FLC"
#define ACPI_RESTAG_GRANULARITY "_GRA"
#define ACPI_RESTAG_INTERRUPT "_INT"
#define ACPI_RESTAG_INTERRUPTLEVEL "_LL_" /* active_lo(1), active_hi(0) */
#define ACPI_RESTAG_INTERRUPTSHARE "_SHR" /* Shareable(1), no_share(0) */
#define ACPI_RESTAG_INTERRUPTTYPE "_HE_" /* Edge(1), Level(0) */
+#define ACPI_RESTAG_IORESTRICTION "_IOR"
#define ACPI_RESTAG_LENGTH "_LEN"
+#define ACPI_RESTAG_LINE "_LIN"
#define ACPI_RESTAG_MEMATTRIBUTES "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
#define ACPI_RESTAG_MEMTYPE "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
#define ACPI_RESTAG_MAXADDR "_MAX"
#define ACPI_RESTAG_MINADDR "_MIN"
#define ACPI_RESTAG_MAXTYPE "_MAF"
#define ACPI_RESTAG_MINTYPE "_MIF"
+#define ACPI_RESTAG_MODE "_MOD"
+#define ACPI_RESTAG_PARITY "_PAR"
+#define ACPI_RESTAG_PHASE "_PHA"
+#define ACPI_RESTAG_PIN "_PIN"
+#define ACPI_RESTAG_PINCONFIG "_PPI"
+#define ACPI_RESTAG_POLARITY "_POL"
#define ACPI_RESTAG_REGISTERBITOFFSET "_RBO"
#define ACPI_RESTAG_REGISTERBITWIDTH "_RBW"
#define ACPI_RESTAG_RANGETYPE "_RNG"
#define ACPI_RESTAG_READWRITETYPE "_RW_" /* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_LENGTH_RX "_RXL"
+#define ACPI_RESTAG_LENGTH_TX "_TXL"
+#define ACPI_RESTAG_SLAVEMODE "_SLV"
+#define ACPI_RESTAG_SPEED "_SPE"
+#define ACPI_RESTAG_STOPBITS "_STB"
#define ACPI_RESTAG_TRANSLATION "_TRA"
#define ACPI_RESTAG_TRANSTYPE "_TRS" /* Sparse(1), Dense(0) */
#define ACPI_RESTAG_TYPE "_TTP" /* Translation(1), Static (0) */
#define ACPI_RESTAG_XFERTYPE "_SIZ" /* 8(0), 8_and16(1), 16(2) */
+#define ACPI_RESTAG_VENDORDATA "_VEN"
/* Default sizes for "small" resource descriptors */
@@ -90,6 +109,7 @@
#define ASL_RDESC_END_DEPEND_SIZE 0x00
#define ASL_RDESC_IO_SIZE 0x07
#define ASL_RDESC_FIXED_IO_SIZE 0x03
+#define ASL_RDESC_FIXED_DMA_SIZE 0x05
#define ASL_RDESC_END_TAG_SIZE 0x01
struct asl_resource_node {
@@ -164,6 +184,12 @@ struct aml_resource_end_tag {
AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
};
+struct aml_resource_fixed_dma {
+ AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
+ u16 channels;
+ u8 width;
+};
+
/*
* LARGE descriptors
*/
@@ -263,6 +289,110 @@ struct aml_resource_generic_register {
u64 address;
};
+/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
+
+struct aml_resource_gpio {
+ AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+ u8 connection_type;
+ u16 flags;
+ u16 int_flags;
+ u8 pin_config;
+ u16 drive_strength;
+ u16 debounce_timeout;
+ u16 pin_table_offset;
+ u8 res_source_index;
+ u16 res_source_offset;
+ u16 vendor_offset;
+ u16 vendor_length;
+ /*
+ * Optional fields follow immediately:
+ * 1) PIN list (Words)
+ * 2) Resource Source String
+ * 3) Vendor Data bytes
+ */
+};
+
+#define AML_RESOURCE_GPIO_REVISION 1 /* ACPI 5.0 */
+
+/* Values for connection_type above */
+
+#define AML_RESOURCE_GPIO_TYPE_INT 0
+#define AML_RESOURCE_GPIO_TYPE_IO 1
+#define AML_RESOURCE_MAX_GPIOTYPE 1
+
+/* Common preamble for all serial descriptors (ACPI 5.0) */
+
+#define AML_RESOURCE_SERIAL_COMMON \
+ u8 revision_id; \
+ u8 res_source_index; \
+ u8 type; \
+ u8 flags; \
+ u16 type_specific_flags; \
+ u8 type_revision_id; \
+ u16 type_data_length; \
+
+/* Values for the type field above */
+
+#define AML_RESOURCE_I2C_SERIALBUSTYPE 1
+#define AML_RESOURCE_SPI_SERIALBUSTYPE 2
+#define AML_RESOURCE_UART_SERIALBUSTYPE 3
+#define AML_RESOURCE_MAX_SERIALBUSTYPE 3
+#define AML_RESOURCE_VENDOR_SERIALBUSTYPE 192 /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
+
+struct aml_resource_common_serialbus {
+AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
+
+struct aml_resource_i2c_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+ u16 slave_address;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_I2C_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_MIN_DATA_LEN 6
+
+struct aml_resource_spi_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+ u8 data_bit_length;
+ u8 clock_phase;
+ u8 clock_polarity;
+ u16 device_selection;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_SPI_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_MIN_DATA_LEN 9
+
+struct aml_resource_uart_serialbus {
+ AML_RESOURCE_LARGE_HEADER_COMMON
+ AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
+ u16 rx_fifo_size;
+ u16 tx_fifo_size;
+ u8 parity;
+ u8 lines_enabled;
+ /*
+ * Optional fields follow immediately:
+ * 1) Vendor Data bytes
+ * 2) Resource Source String
+ */
+};
+
+#define AML_RESOURCE_UART_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_UART_TYPE_REVISION 1 /* ACPI 5.0 */
+#define AML_RESOURCE_UART_MIN_DATA_LEN 10
+
/* restore default alignment */
#pragma pack()
@@ -284,6 +414,7 @@ union aml_resource {
struct aml_resource_end_dependent end_dpf;
struct aml_resource_io io;
struct aml_resource_fixed_io fixed_io;
+ struct aml_resource_fixed_dma fixed_dma;
struct aml_resource_vendor_small vendor_small;
struct aml_resource_end_tag end_tag;
@@ -299,6 +430,11 @@ union aml_resource {
struct aml_resource_address64 address64;
struct aml_resource_extended_address64 ext_address64;
struct aml_resource_extended_irq extended_irq;
+ struct aml_resource_gpio gpio;
+ struct aml_resource_i2c_serialbus i2c_serial_bus;
+ struct aml_resource_spi_serialbus spi_serial_bus;
+ struct aml_resource_uart_serialbus uart_serial_bus;
+ struct aml_resource_common_serialbus common_serial_bus;
/* Utility overlays */
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
index 8c7b997..80eb190 100644
--- a/drivers/acpi/acpica/dsargs.c
+++ b/drivers/acpi/acpica/dsargs.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -250,6 +250,13 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ut_add_address_range(obj_desc->region.space_id,
+ obj_desc->region.address,
+ obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
@@ -384,8 +391,15 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
/* Execute the argument AML */
- status = acpi_ds_execute_arguments(node, node->parent,
+ status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_ut_add_address_range(obj_desc->region.space_id,
+ obj_desc->region.address,
+ obj_desc->region.length, node);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
index 26c49ff..effe4ca 100644
--- a/drivers/acpi/acpica/dscontrol.c
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index 34be60c..cd243cf 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
{
acpi_status status;
u64 position;
+ union acpi_parse_object *child;
ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
@@ -232,10 +233,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
while (arg) {
/*
- * Three types of field elements are handled:
- * 1) Offset - specifies a bit offset
- * 2) access_as - changes the access mode
- * 3) Name - Enters a new named field into the namespace
+ * Four types of field elements are handled:
+ * 1) Name - Enters a new named field into the namespace
+ * 2) Offset - specifies a bit offset
+ * 3) access_as - changes the access mode/attributes
+ * 4) Connection - Associate a resource template with the field
*/
switch (arg->common.aml_opcode) {
case AML_INT_RESERVEDFIELD_OP:
@@ -253,21 +255,70 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
break;
case AML_INT_ACCESSFIELD_OP:
-
+ case AML_INT_EXTACCESSFIELD_OP:
/*
- * Get a new access_type and access_attribute -- to be used for all
- * field units that follow, until field end or another access_as
- * keyword.
+ * Get new access_type, access_attribute, and access_length fields
+ * -- to be used for all field units that follow, until the
+ * end-of-field or another access_as keyword is encountered.
+ * NOTE. These three bytes are encoded in the integer value
+ * of the parseop for convenience.
*
* In field_flags, preserve the flag bits other than the
- * ACCESS_TYPE bits
+ * ACCESS_TYPE bits.
*/
+
+ /* access_type (byte_acc, word_acc, etc.) */
+
info->field_flags = (u8)
((info->
field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
- ((u8) ((u32) arg->common.value.integer >> 8)));
+ ((u8)((u32)(arg->common.value.integer & 0x07))));
+
+ /* access_attribute (attrib_quick, attrib_byte, etc.) */
+
+ info->attribute =
+ (u8)((arg->common.value.integer >> 8) & 0xFF);
+
+ /* access_length (for serial/buffer protocols) */
+
+ info->access_length =
+ (u8)((arg->common.value.integer >> 16) & 0xFF);
+ break;
+
+ case AML_INT_CONNECTION_OP:
+ /*
+ * Clear any previous connection. New connection is used for all
+ * fields that follow, similar to access_as
+ */
+ info->resource_buffer = NULL;
+ info->connection_node = NULL;
- info->attribute = (u8) (arg->common.value.integer);
+ /*
+ * A Connection() is either an actual resource descriptor (buffer)
+ * or a named reference to a resource template
+ */
+ child = arg->common.value.arg;
+ if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
+ info->resource_buffer = child->named.data;
+ info->resource_length =
+ (u16)child->named.value.integer;
+ } else {
+ /* Lookup the Connection() namepath, it should already exist */
+
+ status = acpi_ns_lookup(walk_state->scope_info,
+ child->common.value.
+ name, ACPI_TYPE_ANY,
+ ACPI_IMODE_EXECUTE,
+ ACPI_NS_DONT_OPEN_SCOPE,
+ walk_state,
+ &info->connection_node);
+ if (ACPI_FAILURE(status)) {
+ ACPI_ERROR_NAMESPACE(child->common.
+ value.name,
+ status);
+ return_ACPI_STATUS(status);
+ }
+ }
break;
case AML_INT_NAMEDFIELD_OP:
@@ -374,6 +425,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
}
}
+ ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
+
/* Second arg is the field flags */
arg = arg->common.next;
@@ -386,7 +439,6 @@ acpi_ds_create_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
return_ACPI_STATUS(status);
}
@@ -474,8 +526,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
*/
while (arg) {
/*
- * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
- * field names in order to enter them into the namespace.
+ * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
+ * in the field names in order to enter them into the namespace.
*/
if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
status = acpi_ns_lookup(walk_state->scope_info,
@@ -651,6 +703,5 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
info.region_node = region_node;
status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/dsinit.c b/drivers/acpi/acpica/dsinit.c
index a7718bf..9e5ac7f 100644
--- a/drivers/acpi/acpica/dsinit.c
+++ b/drivers/acpi/acpica/dsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 5d79775..00f5dab 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index 905ce29..b40bd50 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index f42e17e..d7045ca 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index c627a28..e5eff75 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dsutils.c b/drivers/acpi/acpica/dsutils.c
index 2c477ce..1abcda3 100644
--- a/drivers/acpi/acpica/dsutils.c
+++ b/drivers/acpi/acpica/dsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index fe40e4c..642f3c0 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 324acec..552aa3a 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
index 9763181..ae71477 100644
--- a/drivers/acpi/acpica/dswload2.c
+++ b/drivers/acpi/acpica/dswload2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswscope.c b/drivers/acpi/acpica/dswscope.c
index 76a661f..9e9490a 100644
--- a/drivers/acpi/acpica/dswscope.c
+++ b/drivers/acpi/acpica/dswscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index a6c374e..c9c2ac1 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index d458b04..6729ebe 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,12 @@ acpi_status acpi_ev_initialize_events(void)
ACPI_FUNCTION_TRACE(ev_initialize_events);
+ /* If Hardware Reduced flag is set, there are no fixed events */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/*
* Initialize the Fixed and General Purpose Events. This is done prior to
* enabling SCIs to prevent interrupts from occurring before the handlers
@@ -111,6 +117,12 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
+ /* If Hardware Reduced flag is set, there is no ACPI h/w */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Install the SCI handler */
status = acpi_ev_install_sci_handler();
diff --git a/drivers/acpi/acpica/evglock.c b/drivers/acpi/acpica/evglock.c
index 56a562a..5e5683c 100644
--- a/drivers/acpi/acpica/evglock.c
+++ b/drivers/acpi/acpica/evglock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,12 @@ acpi_status acpi_ev_init_global_lock_handler(void)
ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
+ /* If Hardware Reduced flag is set, there is no global lock */
+
+ if (acpi_gbl_reduced_hardware) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
/* Attempt installation of the global lock handler */
status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 65c79ad..9e88cb6 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index ca2c41a..be75339 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index ce9aa9f..adf7494 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
index 80a81d0..2507393 100644
--- a/drivers/acpi/acpica/evgpeutil.c
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index d0b3318..84966f4 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index f0edf5c..1b0180a 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -329,6 +329,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
* FUNCTION: acpi_ev_address_space_dispatch
*
* PARAMETERS: region_obj - Internal region object
+ * field_obj - Corresponding field. Can be NULL.
* Function - Read or Write operation
* region_offset - Where in the region to read or write
* bit_width - Field width in bits (8, 16, 32, or 64)
@@ -344,6 +345,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
acpi_status
acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+ union acpi_operand_object *field_obj,
u32 function,
u32 region_offset, u32 bit_width, u64 *value)
{
@@ -353,6 +355,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
union acpi_operand_object *handler_desc;
union acpi_operand_object *region_obj2;
void *region_context = NULL;
+ struct acpi_connection_info *context;
ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
@@ -375,6 +378,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
return_ACPI_STATUS(AE_NOT_EXIST);
}
+ context = handler_desc->address_space.context;
+
/*
* It may be the case that the region has never been initialized.
* Some types of regions require special init code
@@ -404,8 +409,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ex_exit_interpreter();
status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
- handler_desc->address_space.context,
- &region_context);
+ context, &region_context);
/* Re-enter the interpreter */
@@ -455,6 +459,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
acpi_ut_get_region_name(region_obj->region.
space_id)));
+ /*
+ * Special handling for generic_serial_bus and general_purpose_io:
+ * There are three extra parameters that must be passed to the
+ * handler via the context:
+ * 1) Connection buffer, a resource template from Connection() op.
+ * 2) Length of the above buffer.
+ * 3) Actual access length from the access_as() op.
+ */
+ if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
+ (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+ context && field_obj) {
+
+ /* Get the Connection (resource_template) buffer */
+
+ context->connection = field_obj->field.resource_buffer;
+ context->length = field_obj->field.resource_length;
+ context->access_length = field_obj->field.access_length;
+ }
+
if (!(handler_desc->address_space.handler_flags &
ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
/*
@@ -469,7 +492,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
status = handler(function,
(region_obj->region.address + region_offset),
- bit_width, value, handler_desc->address_space.context,
+ bit_width, value, context,
region_obj2->extra.region_context);
if (ACPI_FAILURE(status)) {
diff --git a/drivers/acpi/acpica/evrgnini.c b/drivers/acpi/acpica/evrgnini.c
index 55a5d35..819c17f 100644
--- a/drivers/acpi/acpica/evrgnini.c
+++ b/drivers/acpi/acpica/evrgnini.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evsci.c b/drivers/acpi/acpica/evsci.c
index 2ebd40e..26065c6 100644
--- a/drivers/acpi/acpica/evsci.c
+++ b/drivers/acpi/acpica/evsci.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index f4f523b..61944e8 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 20516e5..1768bbe 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index f06a3ee..33388fd 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index aee887e..6019208c 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 745a42b..c86d44e 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
/* Bytewise reads */
for (i = 0; i < length; i++) {
- status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
- region_offset, 8,
- &value);
+ status =
+ acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
+ region_offset, 8, &value);
if (ACPI_FAILURE(status)) {
return status;
}
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index 74162a1..e385436 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 110711a..3f5bc99 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
*
* PARAMETERS: aml_start - Pointer to the region declaration AML
* aml_length - Max length of the declaration AML
- * region_space - space_iD for the region
+ * space_id - Address space ID for the region
* walk_state - Current state
*
* RETURN: Status
@@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
acpi_status
acpi_ex_create_region(u8 * aml_start,
u32 aml_length,
- u8 region_space, struct acpi_walk_state *walk_state)
+ u8 space_id, struct acpi_walk_state *walk_state)
{
acpi_status status;
union acpi_operand_object *obj_desc;
@@ -304,16 +304,19 @@ acpi_ex_create_region(u8 * aml_start,
* Space ID must be one of the predefined IDs, or in the user-defined
* range
*/
- if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
- (region_space < ACPI_USER_REGION_BEGIN) &&
- (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
- ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
- region_space));
- return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ if (!acpi_is_valid_space_id(space_id)) {
+ /*
+ * Print an error message, but continue. We don't want to abort
+ * a table load for this exception. Instead, if the region is
+ * actually used at runtime, abort the executing method.
+ */
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unknown Address Space ID: 0x%2.2X",
+ space_id));
}
ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
- acpi_ut_get_region_name(region_space), region_space));
+ acpi_ut_get_region_name(space_id), space_id));
/* Create the region descriptor */
@@ -330,10 +333,16 @@ acpi_ex_create_region(u8 * aml_start,
region_obj2 = obj_desc->common.next_object;
region_obj2->extra.aml_start = aml_start;
region_obj2->extra.aml_length = aml_length;
+ if (walk_state->scope_info) {
+ region_obj2->extra.scope_node =
+ walk_state->scope_info->scope.node;
+ } else {
+ region_obj2->extra.scope_node = node;
+ }
/* Init the region from the operands */
- obj_desc->region.space_id = region_space;
+ obj_desc->region.space_id = space_id;
obj_desc->region.address = 0;
obj_desc->region.length = 0;
obj_desc->region.node = node;
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
index c7a2f1e..e211e9c 100644
--- a/drivers/acpi/acpica/exdebug.c
+++ b/drivers/acpi/acpica/exdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exdump.c b/drivers/acpi/acpica/exdump.c
index 61b8c0e..2a6ac0a 100644
--- a/drivers/acpi/acpica/exdump.c
+++ b/drivers/acpi/acpica/exdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -192,10 +192,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
"Buffer Object"}
};
-static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
+static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
{ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
{ACPI_EXD_FIELD, 0, NULL},
- {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
+ {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
+ {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
+ "ResourceBuffer"}
};
static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 0bde223..dc092f5 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -100,18 +100,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS
+ || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus or IPMI read. We must create a buffer to hold
+ * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
* the data and then directly access the region handler.
*
- * Note: Smbus protocol value is passed in upper 16-bits of Function
+ * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
*/
if (obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS) {
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_READ | (obj_desc->field.attribute << 16);
+ } else if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS) {
+ length = ACPI_GSBUS_BUFFER_SIZE;
+ function =
+ ACPI_READ | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -248,21 +255,23 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
(obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_SMBUS
|| obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS
+ || obj_desc->field.region_obj->region.space_id ==
ACPI_ADR_SPACE_IPMI)) {
/*
- * This is an SMBus or IPMI write. We will bypass the entire field
+ * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
* mechanism and handoff the buffer directly to the handler. For
* these address spaces, the buffer is bi-directional; on a write,
* return data is returned in the same buffer.
*
* Source must be a buffer of sufficient size:
- * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
+ * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
*
- * Note: SMBus protocol type is passed in upper 16-bits of Function
+ * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
*/
if (source_desc->common.type != ACPI_TYPE_BUFFER) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer, found type %s",
+ "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
acpi_ut_get_object_type_name(source_desc)));
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -273,6 +282,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
length = ACPI_SMBUS_BUFFER_SIZE;
function =
ACPI_WRITE | (obj_desc->field.attribute << 16);
+ } else if (obj_desc->field.region_obj->region.space_id ==
+ ACPI_ADR_SPACE_GSBUS) {
+ length = ACPI_GSBUS_BUFFER_SIZE;
+ function =
+ ACPI_WRITE | (obj_desc->field.attribute << 16);
} else { /* IPMI */
length = ACPI_IPMI_BUFFER_SIZE;
@@ -281,7 +295,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
if (source_desc->buffer.length < length) {
ACPI_ERROR((AE_INFO,
- "SMBus or IPMI write requires Buffer of length %u, found length %u",
+ "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
length, source_desc->buffer.length));
return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f915a7f..149de45 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
{
acpi_status status = AE_OK;
union acpi_operand_object *rgn_desc;
+ u8 space_id;
ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
@@ -101,6 +102,17 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
}
+ space_id = rgn_desc->region.space_id;
+
+ /* Validate the Space ID */
+
+ if (!acpi_is_valid_space_id(space_id)) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unknown Address Space ID: 0x%2.2X",
+ space_id));
+ return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+ }
+
/*
* If the Region Address and Length have not been previously evaluated,
* evaluate them now and save the results.
@@ -119,11 +131,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
}
/*
- * Exit now for SMBus or IPMI address space, it has a non-linear
+ * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
* address space and the request cannot be directly validated
*/
- if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
- rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
+ if (space_id == ACPI_ADR_SPACE_SMBUS ||
+ space_id == ACPI_ADR_SPACE_GSBUS ||
+ space_id == ACPI_ADR_SPACE_IPMI) {
/* SMBus or IPMI has a non-linear address space */
@@ -271,11 +284,12 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
/* Invoke the appropriate address_space/op_region handler */
- status =
- acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
- ACPI_MUL_8(obj_desc->common_field.
- access_byte_width),
- value);
+ status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
+ function, region_offset,
+ ACPI_MUL_8(obj_desc->
+ common_field.
+ access_byte_width),
+ value);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
@@ -316,6 +330,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
static u8
acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
{
+ ACPI_FUNCTION_NAME(ex_register_overflow);
if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
/*
@@ -330,6 +345,11 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
* The Value is larger than the maximum value that can fit into
* the register.
*/
+ ACPI_ERROR((AE_INFO,
+ "Index value 0x%8.8X%8.8X overflows field width 0x%X",
+ ACPI_FORMAT_UINT64(value),
+ obj_desc->common_field.bit_length));
+
return (TRUE);
}
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index 703d88e..0a08933 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index be1c56e..60933e9 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 49ec049..fcc75fa 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 236ead1..9ba8c73 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 2571b4a..879e8a2 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 1b48d9d..71fcc65 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f4a2787..0786b86 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index cc95e20..30157f5 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
#include "acinterp.h"
#include "amlcode.h"
#include "acnamesp.h"
+#include "acdispat.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exprep")
@@ -455,6 +456,30 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
obj_desc->field.region_obj =
acpi_ns_get_attached_object(info->region_node);
+ /* Fields specific to generic_serial_bus fields */
+
+ obj_desc->field.access_length = info->access_length;
+
+ if (info->connection_node) {
+ second_desc = info->connection_node->object;
+ if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
+ status =
+ acpi_ds_get_buffer_arguments(second_desc);
+ if (ACPI_FAILURE(status)) {
+ acpi_ut_delete_object_desc(obj_desc);
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ obj_desc->field.resource_buffer =
+ second_desc->buffer.pointer;
+ obj_desc->field.resource_length =
+ (u16)second_desc->buffer.length;
+ } else if (info->resource_buffer) {
+ obj_desc->field.resource_buffer = info->resource_buffer;
+ obj_desc->field.resource_length = info->resource_length;
+ }
+
/* Allow full data read from EC address space */
if ((obj_desc->field.region_obj->region.space_id ==
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index f0d5e14..12d51df 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index 55997e4..fa50e77 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index db502cd..6e335dc 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index e3bb00c..a67b1d9 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index c0c8842..c6cf843 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstoren.c b/drivers/acpi/acpica/exstoren.c
index a979017..b35bed5 100644
--- a/drivers/acpi/acpica/exstoren.c
+++ b/drivers/acpi/acpica/exstoren.c
@@ -7,7 +7,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exstorob.c b/drivers/acpi/acpica/exstorob.c
index dc665cc..65a45d8 100644
--- a/drivers/acpi/acpica/exstorob.c
+++ b/drivers/acpi/acpica/exstorob.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index df66e7b..191a129 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/exutils.c b/drivers/acpi/acpica/exutils.c
index 8ad9314..eb6798b 100644
--- a/drivers/acpi/acpica/exutils.c
+++ b/drivers/acpi/acpica/exutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -435,4 +435,29 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
}
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_is_valid_space_id
+ *
+ * PARAMETERS: space_id - ID to be validated
+ *
+ * RETURN: TRUE if valid/supported ID.
+ *
+ * DESCRIPTION: Validate an operation region space_iD.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_space_id(u8 space_id)
+{
+
+ if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
+ (space_id < ACPI_USER_REGION_BEGIN) &&
+ (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
+ (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+ return (FALSE);
+ }
+
+ return (TRUE);
+}
+
#endif
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index fc380d3..d21ec5f 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index f610d88..1a6894a 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwpci.c b/drivers/acpi/acpica/hwpci.c
index 050fd22..1455ddc 100644
--- a/drivers/acpi/acpica/hwpci.c
+++ b/drivers/acpi/acpica/hwpci.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index cc70f3f..4ea4eeb5 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -7,7 +7,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index d52da30..3c4a922 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwtimer.c b/drivers/acpi/acpica/hwtimer.c
index 50d21c4..d4973d9 100644
--- a/drivers/acpi/acpica/hwtimer.c
+++ b/drivers/acpi/acpica/hwtimer.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index 5f16058..6e5c43a 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
/* Supported widths are 8/16/32 */
if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
+ ACPI_ERROR((AE_INFO,
+ "Bad BitWidth parameter: %8.8X", bit_width));
return AE_BAD_PARAMETER;
}
diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c
index c2793a8..9d38eb6 100644
--- a/drivers/acpi/acpica/hwxface.c
+++ b/drivers/acpi/acpica/hwxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -356,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_read_bit_register)
*
* PARAMETERS: register_id - ID of ACPI Bit Register to access
* Value - Value to write to the register, in bit
- * position zero. The bit is automaticallly
+ * position zero. The bit is automatically
* shifted to the correct position.
*
* RETURN: Status
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index d93172f..61623f3 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsalloc.c b/drivers/acpi/acpica/nsalloc.c
index 1d0ef15..7c3d3ce 100644
--- a/drivers/acpi/acpica/nsalloc.c
+++ b/drivers/acpi/acpica/nsalloc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index b683cc2..b7f2b3b 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsdumpdv.c b/drivers/acpi/acpica/nsdumpdv.c
index 2ed294b..30ea5bc 100644
--- a/drivers/acpi/acpica/nsdumpdv.c
+++ b/drivers/acpi/acpica/nsdumpdv.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nseval.c b/drivers/acpi/acpica/nseval.c
index c1bd02b..f375cb8 100644
--- a/drivers/acpi/acpica/nseval.c
+++ b/drivers/acpi/acpica/nseval.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsinit.c b/drivers/acpi/acpica/nsinit.c
index fd7c638..9d84ec2 100644
--- a/drivers/acpi/acpica/nsinit.c
+++ b/drivers/acpi/acpica/nsinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsload.c b/drivers/acpi/acpica/nsload.c
index 5f7dc69..5cbf15f 100644
--- a/drivers/acpi/acpica/nsload.c
+++ b/drivers/acpi/acpica/nsload.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index d5fa520..b20e7c8 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsobject.c b/drivers/acpi/acpica/nsobject.c
index 3bb8bf1..dd77a3c 100644
--- a/drivers/acpi/acpica/nsobject.c
+++ b/drivers/acpi/acpica/nsobject.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsparse.c b/drivers/acpi/acpica/nsparse.c
index b3234fa..ec7ba2d 100644
--- a/drivers/acpi/acpica/nsparse.c
+++ b/drivers/acpi/acpica/nsparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nspredef.c b/drivers/acpi/acpica/nspredef.c
index c845c80..bbe46a4 100644
--- a/drivers/acpi/acpica/nspredef.c
+++ b/drivers/acpi/acpica/nspredef.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -620,6 +620,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_COUNT:
+ case ACPI_PTYPE2_FIX_VAR:
/*
* These types all return a single Package that consists of a
@@ -759,6 +760,34 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
}
break;
+ case ACPI_PTYPE2_FIX_VAR:
+ /*
+ * Each subpackage has a fixed number of elements and an
+ * optional element
+ */
+ expected_count =
+ package->ret_info.count1 + package->ret_info.count2;
+ if (sub_package->package.count < expected_count) {
+ goto package_too_small;
+ }
+
+ status =
+ acpi_ns_check_package_elements(data, sub_elements,
+ package->ret_info.
+ object_type1,
+ package->ret_info.
+ count1,
+ package->ret_info.
+ object_type2,
+ sub_package->package.
+ count -
+ package->ret_info.
+ count1, 0);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ break;
+
case ACPI_PTYPE2_FIXED:
/* Each sub-package has a fixed length */
diff --git a/drivers/acpi/acpica/nsrepair.c b/drivers/acpi/acpica/nsrepair.c
index ac7b854..9c35d20 100644
--- a/drivers/acpi/acpica/nsrepair.c
+++ b/drivers/acpi/acpica/nsrepair.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -634,6 +634,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
case ACPI_PTYPE2_FIXED:
case ACPI_PTYPE2_MIN:
case ACPI_PTYPE2_REV_FIXED:
+ case ACPI_PTYPE2_FIX_VAR:
break;
default:
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c
index 024c4f2..726bc8e 100644
--- a/drivers/acpi/acpica/nsrepair2.c
+++ b/drivers/acpi/acpica/nsrepair2.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -467,11 +467,12 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
}
/*
- * Copy and uppercase the string. From the ACPI specification:
+ * Copy and uppercase the string. From the ACPI 5.0 specification:
*
* A valid PNP ID must be of the form "AAA####" where A is an uppercase
* letter and # is a hex digit. A valid ACPI ID must be of the form
- * "ACPI####" where # is a hex digit.
+ * "NNNN####" where N is an uppercase letter or decimal digit, and
+ * # is a hex digit.
*/
for (dest = new_string->string.pointer; *source; dest++, source++) {
*dest = (char)ACPI_TOUPPER(*source);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 28b0d7a..507043d 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index cb1b104..a535b7a 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nswalk.c b/drivers/acpi/acpica/nswalk.c
index 345f0c3..f69895a 100644
--- a/drivers/acpi/acpica/nswalk.c
+++ b/drivers/acpi/acpica/nswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfeval.c b/drivers/acpi/acpica/nsxfeval.c
index e7f016d..71d15f6 100644
--- a/drivers/acpi/acpica/nsxfeval.c
+++ b/drivers/acpi/acpica/nsxfeval.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfname.c b/drivers/acpi/acpica/nsxfname.c
index 83bf930..af401c9 100644
--- a/drivers/acpi/acpica/nsxfname.c
+++ b/drivers/acpi/acpica/nsxfname.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/nsxfobj.c b/drivers/acpi/acpica/nsxfobj.c
index 57e6d82..880a605 100644
--- a/drivers/acpi/acpica/nsxfobj.c
+++ b/drivers/acpi/acpica/nsxfobj.c
@@ -6,7 +6,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index e1fad0e..5ac36ab 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -484,34 +484,54 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
*parser_state)
{
- u32 aml_offset = (u32)
- ACPI_PTR_DIFF(parser_state->aml,
- parser_state->aml_start);
+ u32 aml_offset;
union acpi_parse_object *field;
+ union acpi_parse_object *arg = NULL;
u16 opcode;
u32 name;
+ u8 access_type;
+ u8 access_attribute;
+ u8 access_length;
+ u32 pkg_length;
+ u8 *pkg_end;
+ u32 buffer_length;
ACPI_FUNCTION_TRACE(ps_get_next_field);
+ aml_offset =
+ (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+
/* Determine field type */
switch (ACPI_GET8(parser_state->aml)) {
- default:
+ case AML_FIELD_OFFSET_OP:
- opcode = AML_INT_NAMEDFIELD_OP;
+ opcode = AML_INT_RESERVEDFIELD_OP;
+ parser_state->aml++;
break;
- case 0x00:
+ case AML_FIELD_ACCESS_OP:
- opcode = AML_INT_RESERVEDFIELD_OP;
+ opcode = AML_INT_ACCESSFIELD_OP;
parser_state->aml++;
break;
- case 0x01:
+ case AML_FIELD_CONNECTION_OP:
- opcode = AML_INT_ACCESSFIELD_OP;
+ opcode = AML_INT_CONNECTION_OP;
+ parser_state->aml++;
+ break;
+
+ case AML_FIELD_EXT_ACCESS_OP:
+
+ opcode = AML_INT_EXTACCESSFIELD_OP;
parser_state->aml++;
break;
+
+ default:
+
+ opcode = AML_INT_NAMEDFIELD_OP;
+ break;
}
/* Allocate a new field op */
@@ -549,16 +569,111 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
break;
case AML_INT_ACCESSFIELD_OP:
+ case AML_INT_EXTACCESSFIELD_OP:
/*
* Get access_type and access_attrib and merge into the field Op
- * access_type is first operand, access_attribute is second
+ * access_type is first operand, access_attribute is second. stuff
+ * these bytes into the node integer value for convenience.
*/
- field->common.value.integer =
- (((u32) ACPI_GET8(parser_state->aml) << 8));
+
+ /* Get the two bytes (Type/Attribute) */
+
+ access_type = ACPI_GET8(parser_state->aml);
parser_state->aml++;
- field->common.value.integer |= ACPI_GET8(parser_state->aml);
+ access_attribute = ACPI_GET8(parser_state->aml);
parser_state->aml++;
+
+ field->common.value.integer = (u8)access_type;
+ field->common.value.integer |= (u16)(access_attribute << 8);
+
+ /* This opcode has a third byte, access_length */
+
+ if (opcode == AML_INT_EXTACCESSFIELD_OP) {
+ access_length = ACPI_GET8(parser_state->aml);
+ parser_state->aml++;
+
+ field->common.value.integer |=
+ (u32)(access_length << 16);
+ }
+ break;
+
+ case AML_INT_CONNECTION_OP:
+
+ /*
+ * Argument for Connection operator can be either a Buffer
+ * (resource descriptor), or a name_string.
+ */
+ if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
+ parser_state->aml++;
+
+ pkg_end = parser_state->aml;
+ pkg_length =
+ acpi_ps_get_next_package_length(parser_state);
+ pkg_end += pkg_length;
+
+ if (parser_state->aml < pkg_end) {
+
+ /* Non-empty list */
+
+ arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+ if (!arg) {
+ return_PTR(NULL);
+ }
+
+ /* Get the actual buffer length argument */
+
+ opcode = ACPI_GET8(parser_state->aml);
+ parser_state->aml++;
+
+ switch (opcode) {
+ case AML_BYTE_OP: /* AML_BYTEDATA_ARG */
+ buffer_length =
+ ACPI_GET8(parser_state->aml);
+ parser_state->aml += 1;
+ break;
+
+ case AML_WORD_OP: /* AML_WORDDATA_ARG */
+ buffer_length =
+ ACPI_GET16(parser_state->aml);
+ parser_state->aml += 2;
+ break;
+
+ case AML_DWORD_OP: /* AML_DWORDATA_ARG */
+ buffer_length =
+ ACPI_GET32(parser_state->aml);
+ parser_state->aml += 4;
+ break;
+
+ default:
+ buffer_length = 0;
+ break;
+ }
+
+ /* Fill in bytelist data */
+
+ arg->named.value.size = buffer_length;
+ arg->named.data = parser_state->aml;
+ }
+
+ /* Skip to End of byte data */
+
+ parser_state->aml = pkg_end;
+ } else {
+ arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+ if (!arg) {
+ return_PTR(NULL);
+ }
+
+ /* Get the Namestring argument */
+
+ arg->common.value.name =
+ acpi_ps_get_next_namestring(parser_state);
+ }
+
+ /* Link the buffer/namestring to parent (CONNECTION_OP) */
+
+ acpi_ps_append_arg(field, arg);
break;
default:
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 01dd70d..9547ad8 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psopcode.c b/drivers/acpi/acpica/psopcode.c
index bed08de..a0226fd 100644
--- a/drivers/acpi/acpica/psopcode.c
+++ b/drivers/acpi/acpica/psopcode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -638,7 +638,16 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
/* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
- AML_FLAGS_EXEC_0A_0T_1R)
+ AML_FLAGS_EXEC_0A_0T_1R),
+
+/* ACPI 5.0 opcodes */
+
+/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
+ ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
+ ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+ AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
/*! [End] no source code translation !*/
};
@@ -657,7 +666,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
/* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
/* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
/* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
/* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
diff --git a/drivers/acpi/acpica/psparse.c b/drivers/acpi/acpica/psparse.c
index 9bb0cbd..2ff9c35 100644
--- a/drivers/acpi/acpica/psparse.c
+++ b/drivers/acpi/acpica/psparse.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psscope.c b/drivers/acpi/acpica/psscope.c
index a5faa13..c872aa4 100644
--- a/drivers/acpi/acpica/psscope.c
+++ b/drivers/acpi/acpica/psscope.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pstree.c b/drivers/acpi/acpica/pstree.c
index f1464c0..2b03cdb 100644
--- a/drivers/acpi/acpica/pstree.c
+++ b/drivers/acpi/acpica/pstree.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,12 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
ACPI_FUNCTION_ENTRY();
+/*
+ if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
+ {
+ return (Op->Common.Value.Arg);
+ }
+*/
/* Get the info structure for this opcode */
op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
diff --git a/drivers/acpi/acpica/psutils.c b/drivers/acpi/acpica/psutils.c
index 7eda785..13bb131 100644
--- a/drivers/acpi/acpica/psutils.c
+++ b/drivers/acpi/acpica/psutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/pswalk.c b/drivers/acpi/acpica/pswalk.c
index 3312d63..ab96cf4 100644
--- a/drivers/acpi/acpica/pswalk.c
+++ b/drivers/acpi/acpica/pswalk.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 8086805..9d98c5f 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsaddr.c b/drivers/acpi/acpica/rsaddr.c
index 9e66f90..a030565 100644
--- a/drivers/acpi/acpica/rsaddr.c
+++ b/drivers/acpi/acpica/rsaddr.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rscalc.c b/drivers/acpi/acpica/rscalc.c
index 3a8a89e..3c6df4b 100644
--- a/drivers/acpi/acpica/rscalc.c
+++ b/drivers/acpi/acpica/rscalc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -313,6 +313,38 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
resource_source));
break;
+ case ACPI_RESOURCE_TYPE_GPIO:
+
+ total_size =
+ (acpi_rs_length) (total_size +
+ (resource->data.gpio.
+ pin_table_length * 2) +
+ resource->data.gpio.
+ resource_source.string_length +
+ resource->data.gpio.
+ vendor_length);
+
+ break;
+
+ case ACPI_RESOURCE_TYPE_SERIAL_BUS:
+
+ total_size =
+ acpi_gbl_aml_resource_serial_bus_sizes[resource->
+ data.
+ common_serial_bus.
+ type];
+
+ total_size = (acpi_rs_length) (total_size +
+ resource->data.
+ i2c_serial_bus.
+ resource_source.
+ string_length +
+ resource->data.
+ i2c_serial_bus.
+ vendor_length);
+
+ break;
+
default:
break;
}
@@ -362,10 +394,11 @@ acpi_rs_get_list_length(u8 * aml_buffer,
u32 extra_struct_bytes;
u8 resource_index;
u8 minimum_aml_resource_length;
+ union aml_resource *aml_resource;
ACPI_FUNCTION_TRACE(rs_get_list_length);
- *size_needed = 0;
+ *size_needed = ACPI_RS_SIZE_MIN; /* Minimum size is one end_tag */
end_aml = aml_buffer + aml_buffer_length;
/* Walk the list of AML resource descriptors */
@@ -376,9 +409,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
status = acpi_ut_validate_resource(aml_buffer, &resource_index);
if (ACPI_FAILURE(status)) {
+ /*
+ * Exit on failure. Cannot continue because the descriptor length
+ * may be bogus also.
+ */
return_ACPI_STATUS(status);
}
+ aml_resource = (void *)aml_buffer;
+
/* Get the resource length and base (minimum) AML size */
resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -422,10 +461,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
case ACPI_RESOURCE_NAME_END_TAG:
/*
- * End Tag:
- * This is the normal exit, add size of end_tag
+ * End Tag: This is the normal exit
*/
- *size_needed += ACPI_RS_SIZE_MIN;
return_ACPI_STATUS(AE_OK);
case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -457,6 +494,33 @@ acpi_rs_get_list_length(u8 * aml_buffer,
minimum_aml_resource_length);
break;
+ case ACPI_RESOURCE_NAME_GPIO:
+
+ /* Vendor data is optional */
+
+ if (aml_resource->gpio.vendor_length) {
+ extra_struct_bytes +=
+ aml_resource->gpio.vendor_offset -
+ aml_resource->gpio.pin_table_offset +
+ aml_resource->gpio.vendor_length;
+ } else {
+ extra_struct_bytes +=
+ aml_resource->large_header.resource_length +
+ sizeof(struct aml_resource_large_header) -
+ aml_resource->gpio.pin_table_offset;
+ }
+ break;
+
+ case ACPI_RESOURCE_NAME_SERIAL_BUS:
+
+ minimum_aml_resource_length =
+ acpi_gbl_resource_aml_serial_bus_sizes
+ [aml_resource->common_serial_bus.type];
+ extra_struct_bytes +=
+ aml_resource->common_serial_bus.resource_length -
+ minimum_aml_resource_length;
+ break;
+
default:
break;
}
@@ -467,9 +531,18 @@ acpi_rs_get_list_length(u8 * aml_buffer,
* Important: Round the size up for the appropriate alignment. This
* is a requirement on IA64.
*/
- buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
- extra_struct_bytes;
- buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+ if (acpi_ut_get_resource_type(aml_buffer) ==
+ ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ buffer_size =
+ acpi_gbl_resource_struct_serial_bus_sizes
+ [aml_resource->common_serial_bus.type] +
+ extra_struct_bytes;
+ } else {
+ buffer_size =
+ acpi_gbl_resource_struct_sizes[resource_index] +
+ extra_struct_bytes;
+ }
+ buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
*size_needed += buffer_size;
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index 4ce6e11..46d6eb3 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -51,6 +51,70 @@ ACPI_MODULE_NAME("rscreate")
/*******************************************************************************
*
+ * FUNCTION: acpi_buffer_to_resource
+ *
+ * PARAMETERS: aml_buffer - Pointer to the resource byte stream
+ * aml_buffer_length - Length of the aml_buffer
+ * resource_ptr - Where the converted resource is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Convert a raw AML buffer to a resource list
+ *
+ ******************************************************************************/
+acpi_status
+acpi_buffer_to_resource(u8 *aml_buffer,
+ u16 aml_buffer_length,
+ struct acpi_resource **resource_ptr)
+{
+ acpi_status status;
+ acpi_size list_size_needed;
+ void *resource;
+ void *current_resource_ptr;
+
+ /*
+ * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
+ * is not required here.
+ */
+
+ /* Get the required length for the converted resource */
+
+ status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
+ &list_size_needed);
+ if (status == AE_AML_NO_RESOURCE_END_TAG) {
+ status = AE_OK;
+ }
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Allocate a buffer for the converted resource */
+
+ resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
+ current_resource_ptr = resource;
+ if (!resource) {
+ return (AE_NO_MEMORY);
+ }
+
+ /* Perform the AML-to-Resource conversion */
+
+ status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
+ acpi_rs_convert_aml_to_resources,
+ &current_resource_ptr);
+ if (status == AE_AML_NO_RESOURCE_END_TAG) {
+ status = AE_OK;
+ }
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(resource);
+ } else {
+ *resource_ptr = resource;
+ }
+
+ return (status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_rs_create_resource_list
*
* PARAMETERS: aml_buffer - Pointer to the resource byte stream
@@ -66,9 +130,10 @@ ACPI_MODULE_NAME("rscreate")
* of device resources.
*
******************************************************************************/
+
acpi_status
acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
- struct acpi_buffer *output_buffer)
+ struct acpi_buffer * output_buffer)
{
acpi_status status;
diff --git a/drivers/acpi/acpica/rsdump.c b/drivers/acpi/acpica/rsdump.c
index 33db752..b4c5811 100644
--- a/drivers/acpi/acpica/rsdump.c
+++ b/drivers/acpi/acpica/rsdump.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -61,11 +61,13 @@ static void acpi_rs_out_integer64(char *title, u64 value);
static void acpi_rs_out_title(char *title);
-static void acpi_rs_dump_byte_list(u16 length, u8 * data);
+static void acpi_rs_dump_byte_list(u16 length, u8 *data);
-static void acpi_rs_dump_dword_list(u8 length, u32 * data);
+static void acpi_rs_dump_word_list(u16 length, u16 *data);
-static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
+static void acpi_rs_dump_dword_list(u8 length, u32 *data);
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
static void
acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -309,6 +311,125 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
{ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
};
+struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
+ "ConnectionType", acpi_gbl_ct_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
+ "ProducerConsumer", acpi_gbl_consume_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
+ acpi_gbl_ppc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
+ acpi_gbl_shr_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
+ "IoRestriction", acpi_gbl_ior_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
+ acpi_gbl_he_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
+ acpi_gbl_ll_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
+ NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
+ "DebounceTimeout", NULL},
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
+ "ResourceSource", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
+ "PinTableLength", NULL},
+ {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
+ NULL},
+ {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
+ NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
+ "FixedDma", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
+ "RequestLines", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
+ NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
+ acpi_gbl_dts_decode},
+};
+
+#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.revision_id), "RevisionId", NULL}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type), "Type", acpi_gbl_sbt_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer", acpi_gbl_consume_decode}, \
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode), "SlaveMode", acpi_gbl_sm_decode}, \
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength", NULL}, \
+ {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource", NULL}, \
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET (common_serial_bus.vendor_length), "VendorLength", NULL}, \
+ {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data), "VendorData", NULL},
+
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
+ "Common Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS
+};
+
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
+ "I2C Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(i2c_serial_bus.
+ access_mode),
+ "AccessMode", acpi_gbl_am_decode},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
+ "SlaveAddress", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
+ "Spi Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+ ACPI_RSD_OFFSET(spi_serial_bus.
+ wire_mode), "WireMode",
+ acpi_gbl_wm_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
+ "DevicePolarity", acpi_gbl_dp_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
+ "DataBitLength", NULL},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
+ "ClockPhase", acpi_gbl_cph_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
+ "ClockPolarity", acpi_gbl_cpo_decode},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
+ "DeviceSelection", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
+ "ConnectionSpeed", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+ {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
+ "Uart Serial Bus", NULL},
+ ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
+ ACPI_RSD_OFFSET(uart_serial_bus.
+ flow_control),
+ "FlowControl", acpi_gbl_fc_decode},
+ {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
+ "StopBits", acpi_gbl_sb_decode},
+ {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
+ "DataBits", acpi_gbl_bpb_decode},
+ {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
+ acpi_gbl_ed_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
+ acpi_gbl_pt_decode},
+ {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
+ "LinesEnabled", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
+ "RxFifoSize", NULL},
+ {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
+ "TxFifoSize", NULL},
+ {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
+ "ConnectionSpeed", NULL},
+};
+
/*
* Tables used for common address descriptor flag fields
*/
@@ -413,7 +534,14 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
/* Data items, 8/16/32/64 bit */
case ACPI_RSD_UINT8:
- acpi_rs_out_integer8(name, ACPI_GET8(target));
+ if (table->pointer) {
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer
+ [*target]));
+ } else {
+ acpi_rs_out_integer8(name, ACPI_GET8(target));
+ }
break;
case ACPI_RSD_UINT16:
@@ -444,6 +572,13 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
0x03]));
break;
+ case ACPI_RSD_3BITFLAG:
+ acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+ table->
+ pointer[*target &
+ 0x07]));
+ break;
+
case ACPI_RSD_SHORTLIST:
/*
* Short byte list (single line output) for DMA and IRQ resources
@@ -456,6 +591,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
+ case ACPI_RSD_SHORTLISTX:
+ /*
+ * Short byte list (single line output) for GPIO vendor data
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_out_title(name);
+ acpi_rs_dump_short_byte_list(*previous_target,
+ *
+ (ACPI_CAST_INDIRECT_PTR
+ (u8, target)));
+ }
+ break;
+
case ACPI_RSD_LONGLIST:
/*
* Long byte list for Vendor resource data
@@ -480,6 +629,18 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
}
break;
+ case ACPI_RSD_WORDLIST:
+ /*
+ * Word list for GPIO Pin Table
+ * Note: The list length is obtained from the previous table entry
+ */
+ if (previous_target) {
+ acpi_rs_dump_word_list(*previous_target,
+ *(ACPI_CAST_INDIRECT_PTR
+ (u16, target)));
+ }
+ break;
+
case ACPI_RSD_ADDRESS:
/*
* Common flags for all Address resources
@@ -627,14 +788,20 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
/* Dump the resource descriptor */
- acpi_rs_dump_descriptor(&resource_list->data,
- acpi_gbl_dump_resource_dispatch[type]);
+ if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_serial_bus_dispatch
+ [resource_list->data.
+ common_serial_bus.type]);
+ } else {
+ acpi_rs_dump_descriptor(&resource_list->data,
+ acpi_gbl_dump_resource_dispatch
+ [type]);
+ }
/* Point to the next resource structure */
- resource_list =
- ACPI_ADD_PTR(struct acpi_resource, resource_list,
- resource_list->length);
+ resource_list = ACPI_NEXT_RESOURCE(resource_list);
/* Exit when END_TAG descriptor is reached */
@@ -768,4 +935,13 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data)
}
}
+static void acpi_rs_dump_word_list(u16 length, u16 *data)
+{
+ u16 i;
+
+ for (i = 0; i < length; i++) {
+ acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
+ }
+}
+
#endif
diff --git a/drivers/acpi/acpica/rsinfo.c b/drivers/acpi/acpica/rsinfo.c
index f9ea608..a9fa515 100644
--- a/drivers/acpi/acpica/rsinfo.c
+++ b/drivers/acpi/acpica/rsinfo.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -76,7 +76,10 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
acpi_rs_convert_address64, /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
acpi_rs_convert_ext_address64, /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_convert_ext_irq, /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- acpi_rs_convert_generic_reg /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_convert_generic_reg, /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_convert_gpio, /* 0x11, ACPI_RESOURCE_TYPE_GPIO */
+ acpi_rs_convert_fixed_dma, /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
+ NULL, /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
};
/* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,7 +97,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_end_dpf, /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
acpi_rs_convert_io, /* 0x08, ACPI_RESOURCE_NAME_IO */
acpi_rs_convert_fixed_io, /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
- NULL, /* 0x0A, Reserved */
+ acpi_rs_convert_fixed_dma, /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
NULL, /* 0x0B, Reserved */
NULL, /* 0x0C, Reserved */
NULL, /* 0x0D, Reserved */
@@ -114,7 +117,19 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
acpi_rs_convert_address16, /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
acpi_rs_convert_ext_irq, /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
acpi_rs_convert_address64, /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
- acpi_rs_convert_ext_address64 /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+ acpi_rs_convert_ext_address64, /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+ acpi_rs_convert_gpio, /* 0x0C, ACPI_RESOURCE_NAME_GPIO */
+ NULL, /* 0x0D, Reserved */
+ NULL, /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
+};
+
+/* Subtype table for serial_bus -- I2C, SPI, and UART */
+
+struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
+ NULL,
+ acpi_rs_convert_i2c_serial_bus,
+ acpi_rs_convert_spi_serial_bus,
+ acpi_rs_convert_uart_serial_bus,
};
#ifdef ACPI_FUTURE_USAGE
@@ -140,6 +155,16 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
acpi_rs_dump_ext_address64, /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
acpi_rs_dump_ext_irq, /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
acpi_rs_dump_generic_reg, /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ acpi_rs_dump_gpio, /* ACPI_RESOURCE_TYPE_GPIO */
+ acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+ NULL, /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+};
+
+struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
+ NULL,
+ acpi_rs_dump_i2c_serial_bus, /* AML_RESOURCE_I2C_BUS_TYPE */
+ acpi_rs_dump_spi_serial_bus, /* AML_RESOURCE_SPI_BUS_TYPE */
+ acpi_rs_dump_uart_serial_bus, /* AML_RESOURCE_UART_BUS_TYPE */
};
#endif
@@ -166,7 +191,10 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
sizeof(struct aml_resource_address64), /* ACPI_RESOURCE_TYPE_ADDRESS64 */
sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
sizeof(struct aml_resource_extended_irq), /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
- sizeof(struct aml_resource_generic_register) /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ sizeof(struct aml_resource_generic_register), /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+ sizeof(struct aml_resource_gpio), /* ACPI_RESOURCE_TYPE_GPIO */
+ sizeof(struct aml_resource_fixed_dma), /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+ sizeof(struct aml_resource_common_serialbus), /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
};
const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -182,7 +210,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE_MIN,
ACPI_RS_SIZE(struct acpi_resource_io),
ACPI_RS_SIZE(struct acpi_resource_fixed_io),
- 0,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
0,
0,
0,
@@ -202,5 +230,21 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
ACPI_RS_SIZE(struct acpi_resource_address16),
ACPI_RS_SIZE(struct acpi_resource_extended_irq),
ACPI_RS_SIZE(struct acpi_resource_address64),
- ACPI_RS_SIZE(struct acpi_resource_extended_address64)
+ ACPI_RS_SIZE(struct acpi_resource_extended_address64),
+ ACPI_RS_SIZE(struct acpi_resource_gpio),
+ ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
+};
+
+const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
+ 0,
+ sizeof(struct aml_resource_i2c_serialbus),
+ sizeof(struct aml_resource_spi_serialbus),
+ sizeof(struct aml_resource_uart_serialbus),
+};
+
+const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
+ 0,
+ ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+ ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+ ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
};
diff --git a/drivers/acpi/acpica/rsio.c b/drivers/acpi/acpica/rsio.c
index 0c7efef..f6a0810 100644
--- a/drivers/acpi/acpica/rsio.c
+++ b/drivers/acpi/acpica/rsio.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsirq.c b/drivers/acpi/acpica/rsirq.c
index 50b8ad2..e23a9ec 100644
--- a/drivers/acpi/acpica/rsirq.c
+++ b/drivers/acpi/acpica/rsirq.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,3 +264,34 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
AML_OFFSET(dma.dma_channel_mask),
ACPI_RS_OFFSET(data.dma.channel_count)}
};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_dma
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
+ ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
+ sizeof(struct aml_resource_fixed_dma),
+ 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * request_lines
+ * Channels
+ */
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
+ AML_OFFSET(fixed_dma.request_lines),
+ 2},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
+ AML_OFFSET(fixed_dma.width),
+ 1},
+
+};
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index 1bfcef7..9be129f 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
struct acpi_resource **resource_ptr =
ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
struct acpi_resource *resource;
+ union aml_resource *aml_resource;
+ struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -84,14 +86,37 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
"Misaligned resource pointer %p", resource));
}
+ /* Get the appropriate conversion info table */
+
+ aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+ if (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ conversion_table = NULL;
+ } else {
+ /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+ conversion_table =
+ acpi_gbl_convert_resource_serial_bus_dispatch
+ [aml_resource->common_serial_bus.type];
+ }
+ } else {
+ conversion_table =
+ acpi_gbl_get_resource_dispatch[resource_index];
+ }
+
+ if (!conversion_table) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_index));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
/* Convert the AML byte stream resource to a local resource struct */
status =
- acpi_rs_convert_aml_to_resource(resource,
- ACPI_CAST_PTR(union aml_resource,
- aml),
- acpi_gbl_get_resource_dispatch
- [resource_index]);
+ acpi_rs_convert_aml_to_resource(resource, aml_resource,
+ conversion_table);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert AML resource (Type 0x%X)",
@@ -106,7 +131,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
/* Point to the next structure in the output buffer */
- *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+ *resource_ptr = ACPI_NEXT_RESOURCE(resource);
return_ACPI_STATUS(AE_OK);
}
@@ -135,6 +160,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
{
u8 *aml = output_buffer;
u8 *end_aml = output_buffer + aml_size_needed;
+ struct acpi_rsconvert_info *conversion_table;
acpi_status status;
ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -154,11 +180,34 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Perform the conversion */
- status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
- aml_resource,
- aml),
- acpi_gbl_set_resource_dispatch
- [resource->type]);
+ if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ if (resource->data.common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE) {
+ conversion_table = NULL;
+ } else {
+ /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+ conversion_table =
+ acpi_gbl_convert_resource_serial_bus_dispatch
+ [resource->data.common_serial_bus.type];
+ }
+ } else {
+ conversion_table =
+ acpi_gbl_set_resource_dispatch[resource->type];
+ }
+
+ if (!conversion_table) {
+ ACPI_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource->type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
+
+ status = acpi_rs_convert_resource_to_aml(resource,
+ ACPI_CAST_PTR(union
+ aml_resource,
+ aml),
+ conversion_table);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Could not convert resource (type 0x%X) to AML",
@@ -192,9 +241,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
/* Point to the next input resource descriptor */
- resource =
- ACPI_ADD_PTR(struct acpi_resource, resource,
- resource->length);
+ resource = ACPI_NEXT_RESOURCE(resource);
}
/* Completed buffer, but did not find an end_tag resource descriptor */
diff --git a/drivers/acpi/acpica/rsmemory.c b/drivers/acpi/acpica/rsmemory.c
index 7cc6d86..4fd611a 100644
--- a/drivers/acpi/acpica/rsmemory.c
+++ b/drivers/acpi/acpica/rsmemory.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 410264b..8073b37 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
+ if (!info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
if (((acpi_size) resource) & 0x3) {
/* Each internal resource struct is expected to be 32-bit aligned */
@@ -101,7 +105,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* table length (# of table entries)
*/
count = INIT_TABLE_LENGTH(info);
-
while (count) {
/*
* Source is the external AML byte stream buffer,
@@ -145,6 +148,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
((ACPI_GET8(source) >> info->value) & 0x03);
break;
+ case ACPI_RSC_3BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) = (u8)
+ ((ACPI_GET8(source) >> info->value) & 0x07);
+ break;
+
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -163,6 +174,69 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
(info->value * (item_count - 1));
break;
+ case ACPI_RSC_COUNT_GPIO_PIN:
+
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ item_count = ACPI_GET16(target) - ACPI_GET16(source);
+
+ resource->length = resource->length + item_count;
+ item_count = item_count / 2;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_VEN:
+
+ item_count = ACPI_GET8(source);
+ ACPI_SET8(destination) = (u8)item_count;
+
+ resource->length = resource->length +
+ (info->value * item_count);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_RES:
+
+ /*
+ * Vendor data is optional (length/offset may both be zero)
+ * Examine vendor data length field first
+ */
+ target = ACPI_ADD_PTR(void, aml, (info->value + 2));
+ if (ACPI_GET16(target)) {
+
+ /* Use vendor offset to get resource source length */
+
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ item_count =
+ ACPI_GET16(target) - ACPI_GET16(source);
+ } else {
+ /* No vendor data to worry about */
+
+ item_count = aml->large_header.resource_length +
+ sizeof(struct aml_resource_large_header) -
+ ACPI_GET16(source);
+ }
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_VEN:
+
+ item_count = ACPI_GET16(source) - info->value;
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_RES:
+
+ item_count = (aml_resource_length +
+ sizeof(struct aml_resource_large_header))
+ - ACPI_GET16(source) - info->value;
+
+ resource->length = resource->length + item_count;
+ ACPI_SET16(destination) = item_count;
+ break;
+
case ACPI_RSC_LENGTH:
resource->length = resource->length + info->value;
@@ -183,6 +257,72 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
info->opcode);
break;
+ case ACPI_RSC_MOVE_GPIO_PIN:
+
+ /* Generate and set the PIN data pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count * 2));
+ *(u16 **)destination = ACPI_CAST_PTR(u16, target);
+
+ /* Copy the PIN data */
+
+ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_GPIO_RES:
+
+ /* Generate and set the resource_source string pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the resource_source string */
+
+ source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+
+ /* Generate and set the Vendor Data pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the Vendor Data */
+
+ source = ACPI_ADD_PTR(void, aml, info->value);
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_RES:
+
+ /* Generate and set the resource_source string pointer */
+
+ target = (char *)ACPI_ADD_PTR(void, resource,
+ (resource->length -
+ item_count));
+ *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+ /* Copy the resource_source string */
+
+ source =
+ ACPI_ADD_PTR(void, aml,
+ (ACPI_GET16(source) + info->value));
+ acpi_rs_move_data(target, source, item_count,
+ info->opcode);
+ break;
+
case ACPI_RSC_SET8:
ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -219,13 +359,18 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
* Optional resource_source (Index and String). This is the more
* complicated case used by the Interrupt() macro
*/
- target =
- ACPI_ADD_PTR(char, resource,
- info->aml_offset + (item_count * 4));
+ target = ACPI_ADD_PTR(char, resource,
+ info->aml_offset +
+ (item_count * 4));
resource->length +=
acpi_rs_get_resource_source(aml_resource_length,
- (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
+ (acpi_rs_length)
+ (((item_count -
+ 1) * sizeof(u32)) +
+ info->value),
+ destination, aml,
+ target);
break;
case ACPI_RSC_BITMASK:
@@ -327,6 +472,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
{
void *source = NULL;
void *destination;
+ char *target;
acpi_rsdesc_size aml_length = 0;
u8 count;
u16 temp16 = 0;
@@ -334,6 +480,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
+ if (!info) {
+ return_ACPI_STATUS(AE_BAD_PARAMETER);
+ }
+
/*
* First table entry must be ACPI_RSC_INITxxx and must contain the
* table length (# of table entries)
@@ -383,6 +533,14 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
((ACPI_GET8(source) & 0x03) << info->value);
break;
+ case ACPI_RSC_3BITFLAG:
+ /*
+ * Mask and shift the flag bits
+ */
+ ACPI_SET8(destination) |= (u8)
+ ((ACPI_GET8(source) & 0x07) << info->value);
+ break;
+
case ACPI_RSC_COUNT:
item_count = ACPI_GET8(source);
@@ -400,6 +558,63 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
acpi_rs_set_resource_length(aml_length, aml);
break;
+ case ACPI_RSC_COUNT_GPIO_PIN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)aml_length;
+
+ aml_length = (u16)(aml_length + item_count * 2);
+ target = ACPI_ADD_PTR(void, aml, info->value);
+ ACPI_SET16(target) = (u16)aml_length;
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_VEN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)item_count;
+
+ aml_length =
+ (u16)(aml_length + (info->value * item_count));
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_GPIO_RES:
+
+ /* Set resource source string length */
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = (u16)aml_length;
+
+ /* Compute offset for the Vendor Data */
+
+ aml_length = (u16)(aml_length + item_count);
+ target = ACPI_ADD_PTR(void, aml, info->value);
+
+ /* Set vendor offset only if there is vendor data */
+
+ if (resource->data.gpio.vendor_length) {
+ ACPI_SET16(target) = (u16)aml_length;
+ }
+
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_VEN:
+
+ item_count = ACPI_GET16(source);
+ ACPI_SET16(destination) = item_count + info->value;
+ aml_length = (u16)(aml_length + item_count);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
+ case ACPI_RSC_COUNT_SERIAL_RES:
+
+ item_count = ACPI_GET16(source);
+ aml_length = (u16)(aml_length + item_count);
+ acpi_rs_set_resource_length(aml_length, aml);
+ break;
+
case ACPI_RSC_LENGTH:
acpi_rs_set_resource_length(info->value, aml);
@@ -417,6 +632,48 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
info->opcode);
break;
+ case ACPI_RSC_MOVE_GPIO_PIN:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ ACPI_GET16
+ (destination));
+ source = *(u16 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_GPIO_RES:
+
+ /* Used for both resource_source string and vendor_data */
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ ACPI_GET16
+ (destination));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ (aml_length -
+ item_count));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
+ case ACPI_RSC_MOVE_SERIAL_RES:
+
+ destination = (char *)ACPI_ADD_PTR(void, aml,
+ (aml_length -
+ item_count));
+ source = *(u8 **)source;
+ acpi_rs_move_data(destination, source, item_count,
+ info->opcode);
+ break;
+
case ACPI_RSC_ADDRESS:
/* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
new file mode 100644
index 0000000..9aa5e68
--- /dev/null
+++ b/drivers/acpi/acpica/rsserial.c
@@ -0,0 +1,441 @@
+/*******************************************************************************
+ *
+ * Module Name: rsserial - GPIO/serial_bus resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT ACPI_RESOURCES
+ACPI_MODULE_NAME("rsserial")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_gpio
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
+ ACPI_RS_SIZE(struct acpi_resource_gpio),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
+ sizeof(struct aml_resource_gpio),
+ 0},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * revision_id
+ * connection_type
+ */
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
+ AML_OFFSET(gpio.revision_id),
+ 2},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
+ AML_OFFSET(gpio.flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+ AML_OFFSET(gpio.int_flags),
+ 3},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
+ AML_OFFSET(gpio.int_flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
+ AML_OFFSET(gpio.int_flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
+ AML_OFFSET(gpio.int_flags),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
+ AML_OFFSET(gpio.pin_config),
+ 1},
+
+ /*
+ * These fields are contiguous in both the source and destination:
+ * drive_strength
+ * debounce_timeout
+ */
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
+ AML_OFFSET(gpio.drive_strength),
+ 2},
+
+ /* Pin Table */
+
+ {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
+ AML_OFFSET(gpio.pin_table_offset),
+ AML_OFFSET(gpio.res_source_offset)},
+
+ {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
+ AML_OFFSET(gpio.pin_table_offset),
+ 0},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
+ AML_OFFSET(gpio.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_GPIO_RES,
+ ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
+ AML_OFFSET(gpio.res_source_offset),
+ AML_OFFSET(gpio.vendor_offset)},
+
+ {ACPI_RSC_MOVE_GPIO_RES,
+ ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
+ AML_OFFSET(gpio.res_source_offset),
+ 0},
+
+ /* Vendor Data */
+
+ {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
+ AML_OFFSET(gpio.vendor_length),
+ 1},
+
+ {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
+ AML_OFFSET(gpio.vendor_offset),
+ 0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_i2c_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_i2c_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_I2C_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_i2c_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* I2C bus type specific */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
+ AML_OFFSET(i2c_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
+ AML_OFFSET(i2c_serial_bus.connection_speed),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
+ AML_OFFSET(i2c_serial_bus.slave_address),
+ 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_spi_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_spi_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_SPI_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_spi_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* Spi bus type specific */
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
+ AML_OFFSET(spi_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
+ AML_OFFSET(spi_serial_bus.type_specific_flags),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
+ AML_OFFSET(spi_serial_bus.data_bit_length),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
+ AML_OFFSET(spi_serial_bus.clock_phase),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
+ AML_OFFSET(spi_serial_bus.clock_polarity),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
+ AML_OFFSET(spi_serial_bus.device_selection),
+ 1},
+
+ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
+ AML_OFFSET(spi_serial_bus.connection_speed),
+ 1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_uart_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+ {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+ ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
+ ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
+
+ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+ sizeof(struct aml_resource_uart_serialbus),
+ 0},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+ AML_OFFSET(common_serial_bus.revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+ AML_OFFSET(common_serial_bus.type),
+ 1},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+ AML_OFFSET(common_serial_bus.flags),
+ 0},
+
+ {ACPI_RSC_1BITFLAG,
+ ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+ AML_OFFSET(common_serial_bus.flags),
+ 1},
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+ AML_OFFSET(common_serial_bus.type_revision_id),
+ 1},
+
+ {ACPI_RSC_MOVE16,
+ ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ 1},
+
+ /* Vendor data */
+
+ {ACPI_RSC_COUNT_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ AML_RESOURCE_UART_MIN_DATA_LEN},
+
+ {ACPI_RSC_MOVE_SERIAL_VEN,
+ ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+ 0,
+ sizeof(struct aml_resource_uart_serialbus)},
+
+ /* Resource Source */
+
+ {ACPI_RSC_MOVE8,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+ AML_OFFSET(common_serial_bus.res_source_index),
+ 1},
+
+ {ACPI_RSC_COUNT_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ {ACPI_RSC_MOVE_SERIAL_RES,
+ ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+ AML_OFFSET(common_serial_bus.type_data_length),
+ sizeof(struct aml_resource_common_serialbus)},
+
+ /* Uart bus type specific */
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 0},
+
+ {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 2},
+
+ {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 4},
+
+ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
+ AML_OFFSET(uart_serial_bus.type_specific_flags),
+ 7},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
+ AML_OFFSET(uart_serial_bus.parity),
+ 1},
+
+ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
+ AML_OFFSET(uart_serial_bus.lines_enabled),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
+ AML_OFFSET(uart_serial_bus.rx_fifo_size),
+ 1},
+
+ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
+ AML_OFFSET(uart_serial_bus.tx_fifo_size),
+ 1},
+
+ {ACPI_RSC_MOVE32,
+ ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
+ AML_OFFSET(uart_serial_bus.default_baud_rate),
+ 1},
+};
diff --git a/drivers/acpi/acpica/rsutils.c b/drivers/acpi/acpica/rsutils.c
index 231811e..433a375 100644
--- a/drivers/acpi/acpica/rsutils.c
+++ b/drivers/acpi/acpica/rsutils.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,9 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* since there are no alignment or endian issues
*/
case ACPI_RSC_MOVE8:
+ case ACPI_RSC_MOVE_GPIO_RES:
+ case ACPI_RSC_MOVE_SERIAL_VEN:
+ case ACPI_RSC_MOVE_SERIAL_RES:
ACPI_MEMCPY(destination, source, item_count);
return;
@@ -153,6 +156,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
* misaligned memory transfers
*/
case ACPI_RSC_MOVE16:
+ case ACPI_RSC_MOVE_GPIO_PIN:
ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
&ACPI_CAST_PTR(u16, source)[i]);
break;
@@ -590,6 +594,56 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
/*******************************************************************************
*
+ * FUNCTION: acpi_rs_get_aei_method_data
+ *
+ * PARAMETERS: Node - Device node
+ * ret_buffer - Pointer to a buffer structure for the
+ * results
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the _AEI value of an object
+ * contained in an object specified by the handle passed in
+ *
+ * If the function fails an appropriate status will be returned
+ * and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+ struct acpi_buffer *ret_buffer)
+{
+ union acpi_operand_object *obj_desc;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
+
+ /* Parameters guaranteed valid by caller */
+
+ /* Execute the method, no parameters */
+
+ status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
+ ACPI_BTYPE_BUFFER, &obj_desc);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ /*
+ * Make the call to create a resource linked list from the
+ * byte stream buffer that comes back from the _CRS method
+ * execution.
+ */
+ status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+ /* On exit, we must delete the object returned by evaluate_object */
+
+ acpi_ut_remove_reference(obj_desc);
+ return_ACPI_STATUS(status);
+}
+
+/*******************************************************************************
+ *
* FUNCTION: acpi_rs_get_method_data
*
* PARAMETERS: Handle - Handle to the containing object
diff --git a/drivers/acpi/acpica/rsxface.c b/drivers/acpi/acpica/rsxface.c
index fe86b37..f58c098 100644
--- a/drivers/acpi/acpica/rsxface.c
+++ b/drivers/acpi/acpica/rsxface.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -307,6 +307,46 @@ acpi_set_current_resources(acpi_handle device_handle,
ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_get_event_resources
+ *
+ * PARAMETERS: device_handle - Handle to the device object for the
+ * device we are getting resources
+ * in_buffer - Pointer to a buffer containing the
+ * resources to be set for the device
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: This function is called to get the event resources for a
+ * specific device. The caller must first acquire a handle for
+ * the desired device. The resource data is passed to the routine
+ * the buffer pointed to by the in_buffer variable. Uses the
+ * _AEI method.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_event_resources(acpi_handle device_handle,
+ struct acpi_buffer *ret_buffer)
+{
+ acpi_status status;
+ struct acpi_namespace_node *node;
+
+ ACPI_FUNCTION_TRACE(acpi_get_event_resources);
+
+ /* Validate parameters then dispatch to internal routine */
+
+ status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+
+ status = acpi_rs_get_aei_method_data(node, ret_buffer);
+ return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
+
/******************************************************************************
*
* FUNCTION: acpi_resource_to_address64
@@ -486,8 +526,9 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
*
* PARAMETERS: device_handle - Handle to the device object for the
* device we are querying
- * Name - Method name of the resources we want
- * (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ * Name - Method name of the resources we want.
+ * (METHOD_NAME__CRS, METHOD_NAME__PRS, or
+ * METHOD_NAME__AEI)
* user_function - Called for each resource
* Context - Passed to user_function
*
@@ -514,11 +555,12 @@ acpi_walk_resources(acpi_handle device_handle,
if (!device_handle || !user_function || !name ||
(!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
- !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
+ !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
- /* Get the _CRS or _PRS resource list */
+ /* Get the _CRS/_PRS/_AEI resource list */
buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
status = acpi_rs_get_method_data(device_handle, name, &buffer);
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 6f5588e..c5d8704 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,15 @@ static void acpi_tb_setup_fadt_registers(void);
typedef struct acpi_fadt_info {
char *name;
- u8 address64;
- u8 address32;
- u8 length;
+ u16 address64;
+ u16 address32;
+ u16 length;
u8 default_length;
u8 type;
} acpi_fadt_info;
+#define ACPI_FADT_OPTIONAL 0
#define ACPI_FADT_REQUIRED 1
#define ACPI_FADT_SEPARATE_LENGTH 2
@@ -87,7 +88,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_event_block),
ACPI_FADT_OFFSET(pm1_event_length),
ACPI_PM1_REGISTER_WIDTH * 2, /* Enable + Status register */
- 0},
+ ACPI_FADT_OPTIONAL},
{"Pm1aControlBlock",
ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -101,7 +102,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
ACPI_FADT_OFFSET(pm1b_control_block),
ACPI_FADT_OFFSET(pm1_control_length),
ACPI_PM1_REGISTER_WIDTH,
- 0},
+ ACPI_FADT_OPTIONAL},
{"Pm2ControlBlock",
ACPI_FADT_OFFSET(xpm2_control_block),
@@ -139,7 +140,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
typedef struct acpi_fadt_pm_info {
struct acpi_generic_address *target;
- u8 source;
+ u16 source;
u8 register_num;
} acpi_fadt_pm_info;
@@ -253,8 +254,13 @@ void acpi_tb_parse_fadt(u32 table_index)
acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
- acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
- ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
+ /* If Hardware Reduced flag is set, there is no FACS */
+
+ if (!acpi_gbl_reduced_hardware) {
+ acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
+ Xfacs, ACPI_SIG_FACS,
+ ACPI_TABLE_INDEX_FACS);
+ }
}
/*******************************************************************************
@@ -277,12 +283,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
{
/*
* Check if the FADT is larger than the largest table that we expect
- * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+ * (the ACPI 5.0 version). If so, truncate the table, and issue
* a warning.
*/
if (length > sizeof(struct acpi_table_fadt)) {
ACPI_WARNING((AE_INFO,
- "FADT (revision %u) is longer than ACPI 2.0 version, "
+ "FADT (revision %u) is longer than ACPI 5.0 version, "
"truncating length %u to %u",
table->revision, length,
(u32)sizeof(struct acpi_table_fadt)));
@@ -297,6 +303,13 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
ACPI_MEMCPY(&acpi_gbl_FADT, table,
ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
+ /* Take a copy of the Hardware Reduced flag */
+
+ acpi_gbl_reduced_hardware = FALSE;
+ if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
+ acpi_gbl_reduced_hardware = TRUE;
+ }
+
/* Convert the local copy of the FADT to the common internal format */
acpi_tb_convert_fadt();
@@ -502,6 +515,12 @@ static void acpi_tb_validate_fadt(void)
acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
}
+ /* If Hardware Reduced flag is set, we are all done */
+
+ if (acpi_gbl_reduced_hardware) {
+ return;
+ }
+
/* Examine all of the 64-bit extended address fields (X fields) */
for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index a55cb2b..4903e36 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 62365f6..1aecf7b 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 0f2d395..09ca39e 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -135,6 +135,13 @@ acpi_status acpi_tb_initialize_facs(void)
{
acpi_status status;
+ /* If Hardware Reduced flag is set, there is no FACS */
+
+ if (acpi_gbl_reduced_hardware) {
+ acpi_gbl_FACS = NULL;
+ return (AE_OK);
+ }
+
status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
ACPI_CAST_INDIRECT_PTR(struct
acpi_table_header,
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index e7d13f5..abcc641 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -6,7 +6,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 7eb6c6c..4258f64 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
new file mode 100644
index 0000000..67932ae
--- /dev/null
+++ b/drivers/acpi/acpica/utaddress.c
@@ -0,0 +1,294 @@
+/******************************************************************************
+ *
+ * Module Name: utaddress - op_region address range check
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utaddress")
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_add_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - op_region start address
+ * Length - op_region length
+ * region_node - op_region namespace node
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Add the Operation Region address range to the global list.
+ * The only supported Space IDs are Memory and I/O. Called when
+ * the op_region address/length operands are fully evaluated.
+ *
+ * MUTEX: Locks the namespace
+ *
+ * NOTE: Because this interface is only called when an op_region argument
+ * list is evaluated, there cannot be any duplicate region_nodes.
+ * Duplicate Address/Length values are allowed, however, so that multiple
+ * address conflicts can be detected.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ u32 length, struct acpi_namespace_node *region_node)
+{
+ struct acpi_address_range *range_info;
+ acpi_status status;
+
+ ACPI_FUNCTION_TRACE(ut_add_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_ACPI_STATUS(AE_OK);
+ }
+
+ /* Allocate/init a new info block, add it to the appropriate list */
+
+ range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
+ if (!range_info) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
+ range_info->start_address = address;
+ range_info->end_address = (address + length - 1);
+ range_info->region_node = region_node;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ ACPI_FREE(range_info);
+ return_ACPI_STATUS(status);
+ }
+
+ range_info->next = acpi_gbl_address_range_list[space_id];
+ acpi_gbl_address_range_list[space_id] = range_info;
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+ acpi_ut_get_node_name(range_info->region_node),
+ ACPI_CAST_PTR(void, address),
+ ACPI_CAST_PTR(void, range_info->end_address)));
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_remove_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * region_node - op_region namespace node
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Remove the Operation Region from the global list. The only
+ * supported Space IDs are Memory and I/O. Called when an
+ * op_region is deleted.
+ *
+ * MUTEX: Assumes the namespace is locked
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+ struct acpi_namespace_node *region_node)
+{
+ struct acpi_address_range *range_info;
+ struct acpi_address_range *prev;
+
+ ACPI_FUNCTION_TRACE(ut_remove_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_VOID;
+ }
+
+ /* Get the appropriate list head and check the list */
+
+ range_info = prev = acpi_gbl_address_range_list[space_id];
+ while (range_info) {
+ if (range_info->region_node == region_node) {
+ if (range_info == prev) { /* Found at list head */
+ acpi_gbl_address_range_list[space_id] =
+ range_info->next;
+ } else {
+ prev->next = range_info->next;
+ }
+
+ ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+ "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+ acpi_ut_get_node_name(range_info->
+ region_node),
+ ACPI_CAST_PTR(void,
+ range_info->
+ start_address),
+ ACPI_CAST_PTR(void,
+ range_info->
+ end_address)));
+
+ ACPI_FREE(range_info);
+ return_VOID;
+ }
+
+ prev = range_info;
+ range_info = range_info->next;
+ }
+
+ return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_check_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - Start address
+ * Length - Length of address range
+ * Warn - TRUE if warning on overlap desired
+ *
+ * RETURN: Count of the number of conflicts detected. Zero is always
+ * returned for Space IDs other than Memory or I/O.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ * ASL operation region address ranges. The only supported
+ * Space IDs are Memory and I/O.
+ *
+ * MUTEX: Assumes the namespace is locked.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address, u32 length, u8 warn)
+{
+ struct acpi_address_range *range_info;
+ acpi_physical_address end_address;
+ char *pathname;
+ u32 overlap_count = 0;
+
+ ACPI_FUNCTION_TRACE(ut_check_address_range);
+
+ if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+ (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+ return_UINT32(0);
+ }
+
+ range_info = acpi_gbl_address_range_list[space_id];
+ end_address = address + length - 1;
+
+ /* Check entire list for all possible conflicts */
+
+ while (range_info) {
+ /*
+ * Check if the requested Address/Length overlaps this address_range.
+ * Four cases to consider:
+ *
+ * 1) Input address/length is contained completely in the address range
+ * 2) Input address/length overlaps range at the range start
+ * 3) Input address/length overlaps range at the range end
+ * 4) Input address/length completely encompasses the range
+ */
+ if ((address <= range_info->end_address) &&
+ (end_address >= range_info->start_address)) {
+
+ /* Found an address range overlap */
+
+ overlap_count++;
+ if (warn) { /* Optional warning message */
+ pathname =
+ acpi_ns_get_external_pathname(range_info->
+ region_node);
+
+ ACPI_WARNING((AE_INFO,
+ "0x%p-0x%p %s conflicts with Region %s %d",
+ ACPI_CAST_PTR(void, address),
+ ACPI_CAST_PTR(void, end_address),
+ acpi_ut_get_region_name(space_id),
+ pathname, overlap_count));
+ ACPI_FREE(pathname);
+ }
+ }
+
+ range_info = range_info->next;
+ }
+
+ return_UINT32(overlap_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_delete_address_lists
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: None
+ *
+ * DESCRIPTION: Delete all global address range lists (called during
+ * subsystem shutdown).
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_address_lists(void)
+{
+ struct acpi_address_range *next;
+ struct acpi_address_range *range_info;
+ int i;
+
+ /* Delete all elements in all address range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ next = acpi_gbl_address_range_list[i];
+
+ while (next) {
+ range_info = next;
+ next = range_info->next;
+ ACPI_FREE(range_info);
+ }
+
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+}
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 0a69735..9982d2ea 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index aded299..3317c0a 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdebug.c b/drivers/acpi/acpica/utdebug.c
index a1f8d75..a0998a8 100644
--- a/drivers/acpi/acpica/utdebug.c
+++ b/drivers/acpi/acpica/utdebug.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
index 8b087e2..d42ede52 100644
--- a/drivers/acpi/acpica/utdecode.c
+++ b/drivers/acpi/acpica/utdecode.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -171,7 +171,9 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SMBus",
"SystemCMOS",
"PCIBARTarget",
- "IPMI"
+ "IPMI",
+ "GeneralPurposeIo",
+ "GenericSerialBus"
};
char *acpi_ut_get_region_name(u8 space_id)
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 31f5a78..2a6c3e1 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -215,11 +215,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
"***** Region %p\n", object));
- /* Invalidate the region address/length via the host OS */
-
- acpi_os_invalidate_address(object->region.space_id,
- object->region.address,
- (acpi_size) object->region.length);
+ /*
+ * Update address_range list. However, only permanent regions
+ * are installed in this list. (Not created within a method)
+ */
+ if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
+ acpi_ut_remove_address_range(object->region.space_id,
+ object->region.node);
+ }
second_desc = acpi_ns_get_secondary_object(object);
if (second_desc) {
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 18f73c9..479f32b 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index ffba0a3..4153584 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -264,6 +264,12 @@ acpi_status acpi_ut_init_globals(void)
return_ACPI_STATUS(status);
}
+ /* Address Range lists */
+
+ for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+ acpi_gbl_address_range_list[i] = NULL;
+ }
+
/* Mutex locked flags */
for (i = 0; i < ACPI_NUM_MUTEX; i++) {
diff --git a/drivers/acpi/acpica/utids.c b/drivers/acpi/acpica/utids.c
index b679ea6..c92eb1d 100644
--- a/drivers/acpi/acpica/utids.c
+++ b/drivers/acpi/acpica/utids.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utinit.c b/drivers/acpi/acpica/utinit.c
index 191b682..8359c0c 100644
--- a/drivers/acpi/acpica/utinit.c
+++ b/drivers/acpi/acpica/utinit.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@ static void acpi_ut_terminate(void)
gpe_xrupt_info = next_gpe_xrupt_info;
}
+ acpi_ut_delete_address_lists();
return_VOID;
}
diff --git a/drivers/acpi/acpica/utlock.c b/drivers/acpi/acpica/utlock.c
index f6bb75c..155fd78 100644
--- a/drivers/acpi/acpica/utlock.c
+++ b/drivers/acpi/acpica/utlock.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmath.c b/drivers/acpi/acpica/utmath.c
index ce481da..2491a55 100644
--- a/drivers/acpi/acpica/utmath.c
+++ b/drivers/acpi/acpica/utmath.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index c33a852..86f19db 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 7d797e2..43174df 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -293,14 +293,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
{
- acpi_thread_id this_thread_id;
-
ACPI_FUNCTION_NAME(ut_release_mutex);
- this_thread_id = acpi_os_get_thread_id();
-
ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
- (u32)this_thread_id,
+ (u32)acpi_os_get_thread_id(),
acpi_ut_get_mutex_name(mutex_id)));
if (mutex_id > ACPI_MAX_MUTEX) {
@@ -329,7 +325,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
* the ACPI subsystem code.
*/
for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
- if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+ if (acpi_gbl_mutex_info[i].thread_id ==
+ acpi_os_get_thread_id()) {
if (i == mutex_id) {
continue;
}
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 188340a..b112744 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utosi.c b/drivers/acpi/acpica/utosi.c
index 1fb10cb..2360cf7 100644
--- a/drivers/acpi/acpica/utosi.c
+++ b/drivers/acpi/acpica/utosi.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utresrc.c b/drivers/acpi/acpica/utresrc.c
index 6ffd3a8..9d441ea 100644
--- a/drivers/acpi/acpica/utresrc.c
+++ b/drivers/acpi/acpica/utresrc.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
#include <acpi/acpi.h>
#include "accommon.h"
-#include "amlresrc.h"
+#include "acresrc.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utresrc")
@@ -154,6 +154,138 @@ const char *acpi_gbl_typ_decode[] = {
"TypeF"
};
+const char *acpi_gbl_ppc_decode[] = {
+ "PullDefault",
+ "PullUp",
+ "PullDown",
+ "PullNone"
+};
+
+const char *acpi_gbl_ior_decode[] = {
+ "IoRestrictionNone",
+ "IoRestrictionInputOnly",
+ "IoRestrictionOutputOnly",
+ "IoRestrictionNoneAndPreserve"
+};
+
+const char *acpi_gbl_dts_decode[] = {
+ "Width8bit",
+ "Width16bit",
+ "Width32bit",
+ "Width64bit",
+ "Width128bit",
+ "Width256bit",
+};
+
+/* GPIO connection type */
+
+const char *acpi_gbl_ct_decode[] = {
+ "Interrupt",
+ "I/O"
+};
+
+/* Serial bus type */
+
+const char *acpi_gbl_sbt_decode[] = {
+ "/* UNKNOWN serial bus type */",
+ "I2C",
+ "SPI",
+ "UART"
+};
+
+/* I2C serial bus access mode */
+
+const char *acpi_gbl_am_decode[] = {
+ "AddressingMode7Bit",
+ "AddressingMode10Bit"
+};
+
+/* I2C serial bus slave mode */
+
+const char *acpi_gbl_sm_decode[] = {
+ "ControllerInitiated",
+ "DeviceInitiated"
+};
+
+/* SPI serial bus wire mode */
+
+const char *acpi_gbl_wm_decode[] = {
+ "FourWireMode",
+ "ThreeWireMode"
+};
+
+/* SPI serial clock phase */
+
+const char *acpi_gbl_cph_decode[] = {
+ "ClockPhaseFirst",
+ "ClockPhaseSecond"
+};
+
+/* SPI serial bus clock polarity */
+
+const char *acpi_gbl_cpo_decode[] = {
+ "ClockPolarityLow",
+ "ClockPolarityHigh"
+};
+
+/* SPI serial bus device polarity */
+
+const char *acpi_gbl_dp_decode[] = {
+ "PolarityLow",
+ "PolarityHigh"
+};
+
+/* UART serial bus endian */
+
+const char *acpi_gbl_ed_decode[] = {
+ "LittleEndian",
+ "BigEndian"
+};
+
+/* UART serial bus bits per byte */
+
+const char *acpi_gbl_bpb_decode[] = {
+ "DataBitsFive",
+ "DataBitsSix",
+ "DataBitsSeven",
+ "DataBitsEight",
+ "DataBitsNine",
+ "/* UNKNOWN Bits per byte */",
+ "/* UNKNOWN Bits per byte */",
+ "/* UNKNOWN Bits per byte */"
+};
+
+/* UART serial bus stop bits */
+
+const char *acpi_gbl_sb_decode[] = {
+ "StopBitsNone",
+ "StopBitsOne",
+ "StopBitsOnePlusHalf",
+ "StopBitsTwo"
+};
+
+/* UART serial bus flow control */
+
+const char *acpi_gbl_fc_decode[] = {
+ "FlowControlNone",
+ "FlowControlHardware",
+ "FlowControlXON",
+ "/* UNKNOWN flow control keyword */"
+};
+
+/* UART serial bus parity type */
+
+const char *acpi_gbl_pt_decode[] = {
+ "ParityTypeNone",
+ "ParityTypeEven",
+ "ParityTypeOdd",
+ "ParityTypeMark",
+ "ParityTypeSpace",
+ "/* UNKNOWN parity keyword */",
+ "/* UNKNOWN parity keyword */",
+ "/* UNKNOWN parity keyword */"
+};
+
#endif
/*
@@ -173,7 +305,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
ACPI_AML_SIZE_SMALL(struct aml_resource_io),
ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
- 0,
+ ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
0,
0,
0,
@@ -193,7 +325,17 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
- ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
+ ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
+};
+
+const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
+ 0,
+ ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
+ ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
};
/*
@@ -209,35 +351,49 @@ static const u8 acpi_gbl_resource_types[] = {
0,
0,
0,
- ACPI_SMALL_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_SMALL_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- 0,
+ ACPI_SMALL_VARIABLE_LENGTH, /* 04 IRQ */
+ ACPI_FIXED_LENGTH, /* 05 DMA */
+ ACPI_SMALL_VARIABLE_LENGTH, /* 06 start_dependent_functions */
+ ACPI_FIXED_LENGTH, /* 07 end_dependent_functions */
+ ACPI_FIXED_LENGTH, /* 08 IO */
+ ACPI_FIXED_LENGTH, /* 09 fixed_iO */
+ ACPI_FIXED_LENGTH, /* 0_a fixed_dMA */
0,
0,
0,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
+ ACPI_VARIABLE_LENGTH, /* 0_e vendor_short */
+ ACPI_FIXED_LENGTH, /* 0_f end_tag */
/* Large descriptors */
0,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
+ ACPI_FIXED_LENGTH, /* 01 Memory24 */
+ ACPI_FIXED_LENGTH, /* 02 generic_register */
0,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_FIXED_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_VARIABLE_LENGTH,
- ACPI_FIXED_LENGTH
+ ACPI_VARIABLE_LENGTH, /* 04 vendor_long */
+ ACPI_FIXED_LENGTH, /* 05 Memory32 */
+ ACPI_FIXED_LENGTH, /* 06 memory32_fixed */
+ ACPI_VARIABLE_LENGTH, /* 07 Dword* address */
+ ACPI_VARIABLE_LENGTH, /* 08 Word* address */
+ ACPI_VARIABLE_LENGTH, /* 09 extended_iRQ */
+ ACPI_VARIABLE_LENGTH, /* 0_a Qword* address */
+ ACPI_FIXED_LENGTH, /* 0_b Extended* address */
+ ACPI_VARIABLE_LENGTH, /* 0_c Gpio* */
+ 0,
+ ACPI_VARIABLE_LENGTH /* 0_e *serial_bus */
};
+/*
+ * For the i_aSL compiler/disassembler, we don't want any error messages
+ * because the disassembler uses the resource validation code to determine
+ * if Buffer objects are actually Resource Templates.
+ */
+#ifdef ACPI_ASL_COMPILER
+#define ACPI_RESOURCE_ERROR(plist)
+#else
+#define ACPI_RESOURCE_ERROR(plist) ACPI_ERROR(plist)
+#endif
+
/*******************************************************************************
*
* FUNCTION: acpi_ut_walk_aml_resources
@@ -265,6 +421,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
u8 resource_index;
u32 length;
u32 offset = 0;
+ u8 end_tag[2] = { 0x79, 0x00 };
ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
@@ -286,6 +443,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
status = acpi_ut_validate_resource(aml, &resource_index);
if (ACPI_FAILURE(status)) {
+ /*
+ * Exit on failure. Cannot continue because the descriptor length
+ * may be bogus also.
+ */
return_ACPI_STATUS(status);
}
@@ -300,7 +461,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
user_function(aml, length, offset, resource_index,
context);
if (ACPI_FAILURE(status)) {
- return (status);
+ return_ACPI_STATUS(status);
}
}
@@ -333,7 +494,19 @@ acpi_ut_walk_aml_resources(u8 * aml,
/* Did not find an end_tag descriptor */
- return (AE_AML_NO_RESOURCE_END_TAG);
+ if (user_function) {
+
+ /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
+
+ (void)acpi_ut_validate_resource(end_tag, &resource_index);
+ status =
+ user_function(end_tag, 2, offset, resource_index, context);
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
+
+ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/*******************************************************************************
@@ -354,6 +527,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
{
+ union aml_resource *aml_resource;
u8 resource_type;
u8 resource_index;
acpi_rs_length resource_length;
@@ -375,7 +549,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Verify the large resource type (name) against the max */
if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
}
/*
@@ -392,15 +566,17 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
}
- /* Check validity of the resource type, zero indicates name is invalid */
-
+ /*
+ * Check validity of the resource type, via acpi_gbl_resource_types. Zero
+ * indicates an invalid resource.
+ */
if (!acpi_gbl_resource_types[resource_index]) {
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
}
/*
- * 2) Validate the resource_length field. This ensures that the length
- * is at least reasonable, and guarantees that it is non-zero.
+ * Validate the resource_length field. This ensures that the length
+ * is at least reasonable, and guarantees that it is non-zero.
*/
resource_length = acpi_ut_get_resource_length(aml);
minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -413,7 +589,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Fixed length resource, length must match exactly */
if (resource_length != minimum_resource_length) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -422,7 +598,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Variable length resource, length must be at least the minimum */
if (resource_length < minimum_resource_length) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -432,7 +608,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
if ((resource_length > minimum_resource_length) ||
(resource_length < (minimum_resource_length - 1))) {
- return (AE_AML_BAD_RESOURCE_LENGTH);
+ goto bad_resource_length;
}
break;
@@ -440,7 +616,23 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
/* Shouldn't happen (because of validation earlier), but be sure */
- return (AE_AML_INVALID_RESOURCE_TYPE);
+ goto invalid_resource;
+ }
+
+ aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+ if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+
+ /* Validate the bus_type field */
+
+ if ((aml_resource->common_serial_bus.type == 0) ||
+ (aml_resource->common_serial_bus.type >
+ AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+ aml_resource->common_serial_bus.
+ type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+ }
}
/* Optionally return the resource table index */
@@ -450,6 +642,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
}
return (AE_OK);
+
+ invalid_resource:
+
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+ resource_type));
+ return (AE_AML_INVALID_RESOURCE_TYPE);
+
+ bad_resource_length:
+
+ ACPI_RESOURCE_ERROR((AE_INFO,
+ "Invalid resource descriptor length: Type "
+ "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
+ resource_type, resource_length,
+ minimum_resource_length));
+ return (AE_AML_BAD_RESOURCE_LENGTH);
}
/*******************************************************************************
diff --git a/drivers/acpi/acpica/utstate.c b/drivers/acpi/acpica/utstate.c
index 30c21e1..4267477 100644
--- a/drivers/acpi/acpica/utstate.c
+++ b/drivers/acpi/acpica/utstate.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxface.c b/drivers/acpi/acpica/utxface.c
index 420ebfe..644e8c8 100644
--- a/drivers/acpi/acpica/utxface.c
+++ b/drivers/acpi/acpica/utxface.c
@@ -5,7 +5,7 @@
*****************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
#include "acnamesp.h"
#include "acdebug.h"
#include "actables.h"
+#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utxface")
@@ -640,4 +641,41 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
}
ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+
+/*****************************************************************************
+ *
+ * FUNCTION: acpi_check_address_range
+ *
+ * PARAMETERS: space_id - Address space ID
+ * Address - Start address
+ * Length - Length
+ * Warn - TRUE if warning on overlap desired
+ *
+ * RETURN: Count of the number of conflicts detected.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ * ASL operation region address ranges.
+ *
+ ****************************************************************************/
+u32
+acpi_check_address_range(acpi_adr_space_type space_id,
+ acpi_physical_address address,
+ acpi_size length, u8 warn)
+{
+ u32 overlaps;
+ acpi_status status;
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+ if (ACPI_FAILURE(status)) {
+ return (0);
+ }
+
+ overlaps = acpi_ut_check_address_range(space_id, address,
+ (u32)length, warn);
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+ return (overlaps);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_check_address_range)
#endif /* !ACPI_ASL_COMPILER */
diff --git a/drivers/acpi/acpica/utxferror.c b/drivers/acpi/acpica/utxferror.c
index 8d0245e..52b568a 100644
--- a/drivers/acpi/acpica/utxferror.c
+++ b/drivers/acpi/acpica/utxferror.c
@@ -5,7 +5,7 @@
******************************************************************************/
/*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
new file mode 100644
index 0000000..1427d19
--- /dev/null
+++ b/drivers/acpi/acpica/utxfmutex.c
@@ -0,0 +1,187 @@
+/*******************************************************************************
+ *
+ * Module Name: utxfmutex - external AML mutex access functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ * substantially similar to the "NO WARRANTY" disclaimer below
+ * ("Disclaimer") and any redistribution must be conditioned upon
+ * including a substantially similar Disclaimer requirement for further
+ * binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ * of any contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT ACPI_UTILITIES
+ACPI_MODULE_NAME("utxfmutex")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+ acpi_string pathname,
+ union acpi_operand_object **ret_obj);
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_ut_get_mutex_object
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ * ret_obj - Where the mutex object is returned
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+ acpi_string pathname,
+ union acpi_operand_object **ret_obj)
+{
+ struct acpi_namespace_node *mutex_node;
+ union acpi_operand_object *mutex_obj;
+ acpi_status status;
+
+ /* Parameter validation */
+
+ if (!ret_obj || (!handle && !pathname)) {
+ return (AE_BAD_PARAMETER);
+ }
+
+ /* Get a the namespace node for the mutex */
+
+ mutex_node = handle;
+ if (pathname != NULL) {
+ status = acpi_get_handle(handle, pathname,
+ ACPI_CAST_PTR(acpi_handle,
+ &mutex_node));
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+ }
+
+ /* Ensure that we actually have a Mutex object */
+
+ if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
+ return (AE_TYPE);
+ }
+
+ /* Get the low-level mutex object */
+
+ mutex_obj = acpi_ns_get_attached_object(mutex_node);
+ if (!mutex_obj) {
+ return (AE_NULL_OBJECT);
+ }
+
+ *ret_obj = mutex_obj;
+ return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_acquire_mutex
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ * Timeout - Max time to wait for the lock (millisec)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
+ * AML mutex objects, and allows for transaction locking between
+ * drivers and AML code. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
+{
+ acpi_status status;
+ union acpi_operand_object *mutex_obj;
+
+ /* Get the low-level mutex associated with Handle:Pathname */
+
+ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Acquire the OS mutex */
+
+ status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
+ return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_release_mutex
+ *
+ * PARAMETERS: Handle - Mutex or prefix handle (optional)
+ * Pathname - Mutex pathname (optional)
+ *
+ * RETURN: Status
+ *
+ * DESCRIPTION: Release an AML mutex. This is a device driver interface to
+ * AML mutex objects, and allows for transaction locking between
+ * drivers and AML code. The mutex node is pointed to by
+ * Handle:Pathname. Either Handle or Pathname can be NULL, but
+ * not both.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
+{
+ acpi_status status;
+ union acpi_operand_object *mutex_obj;
+
+ /* Get the low-level mutex associated with Handle:Pathname */
+
+ status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+ if (ACPI_FAILURE(status)) {
+ return (status);
+ }
+
+ /* Release the OS mutex */
+
+ acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
+ return (AE_OK);
+}
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 6154036..e5d53b7 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -34,13 +34,13 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/kref.h>
#include <linux/rculist.h>
#include <linux/interrupt.h>
#include <linux/debugfs.h>
-#include <acpi/atomicio.h>
#include "apei-internal.h"
@@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
{
int rc;
- rc = acpi_atomic_read(val, &entry->register_region);
+ rc = apei_read(val, &entry->register_region);
if (rc)
return rc;
*val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
val <<= entry->register_region.bit_offset;
if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
u64 valr = 0;
- rc = acpi_atomic_read(&valr, &entry->register_region);
+ rc = apei_read(&valr, &entry->register_region);
if (rc)
return rc;
valr &= ~(entry->mask << entry->register_region.bit_offset);
val |= valr;
}
- rc = acpi_atomic_write(val, &entry->register_region);
+ rc = apei_write(val, &entry->register_region);
return rc;
}
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- return acpi_pre_map_gar(&entry->register_region);
+ return acpi_os_map_generic_address(&entry->register_region);
return 0;
}
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
u8 ins = entry->instruction;
if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
- acpi_post_unmap_gar(&entry->register_region);
+ acpi_os_unmap_generic_address(&entry->register_region);
return 0;
}
@@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1,
return 0;
}
+int apei_resources_add(struct apei_resources *resources,
+ unsigned long start, unsigned long size,
+ bool iomem)
+{
+ if (iomem)
+ return apei_res_add(&resources->iomem, start, size);
+ else
+ return apei_res_add(&resources->ioport, start, size);
+}
+EXPORT_SYMBOL_GPL(apei_resources_add);
+
/*
* EINJ has two groups of GARs (EINJ table entry and trigger table
* entry), so common resources are subtracted from the trigger table
@@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1,
}
EXPORT_SYMBOL_GPL(apei_resources_sub);
+static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
+{
+ struct apei_resources *resources = data;
+ return apei_res_add(&resources->iomem, start, size);
+}
+
+static int apei_get_nvs_resources(struct apei_resources *resources)
+{
+ return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
+}
+
/*
- * IO memory/port rersource management mechanism is used to check
+ * IO memory/port resource management mechanism is used to check
* whether memory/port area used by GARs conflicts with normal memory
* or IO memory/port of devices.
*/
@@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources,
{
struct apei_res *res, *res_bak = NULL;
struct resource *r;
+ struct apei_resources nvs_resources;
int rc;
rc = apei_resources_sub(resources, &apei_resources_all);
if (rc)
return rc;
+ /*
+ * Some firmware uses ACPI NVS region, that has been marked as
+ * busy, so exclude it from APEI resources to avoid false
+ * conflict.
+ */
+ apei_resources_init(&nvs_resources);
+ rc = apei_get_nvs_resources(&nvs_resources);
+ if (rc)
+ goto res_fini;
+ rc = apei_resources_sub(resources, &nvs_resources);
+ if (rc)
+ goto res_fini;
+
rc = -EINVAL;
list_for_each_entry(res, &resources->iomem, list) {
r = request_mem_region(res->start, res->end - res->start,
desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+ "Can not request [mem %#010llx-%#010llx] for %s registers\n",
(unsigned long long)res->start,
- (unsigned long long)res->end);
+ (unsigned long long)res->end - 1, desc);
res_bak = res;
goto err_unmap_iomem;
}
@@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources,
r = request_region(res->start, res->end - res->start, desc);
if (!r) {
pr_err(APEI_PFX
- "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+ "Can not request [io %#06llx-%#06llx] for %s registers\n",
(unsigned long long)res->start,
- (unsigned long long)res->end);
+ (unsigned long long)res->end - 1, desc);
res_bak = res;
goto err_unmap_ioport;
}
@@ -500,6 +536,8 @@ err_unmap_iomem:
break;
release_mem_region(res->start, res->end - res->start);
}
+res_fini:
+ apei_resources_fini(&nvs_resources);
return rc;
}
EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -553,6 +591,69 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
return 0;
}
+/* read GAR in interrupt (including NMI) or process context */
+int apei_read(u64 *val, struct acpi_generic_address *reg)
+{
+ int rc;
+ u64 address;
+ acpi_status status;
+
+ rc = apei_check_gar(reg, &address);
+ if (rc)
+ return rc;
+
+ *val = 0;
+ switch(reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ status = acpi_os_read_memory64((acpi_physical_address)
+ address, val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_read);
+
+/* write GAR in interrupt (including NMI) or process context */
+int apei_write(u64 val, struct acpi_generic_address *reg)
+{
+ int rc;
+ u64 address;
+ acpi_status status;
+
+ rc = apei_check_gar(reg, &address);
+ if (rc)
+ return rc;
+
+ switch (reg->space_id) {
+ case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+ status = acpi_os_write_memory64((acpi_physical_address)
+ address, val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ case ACPI_ADR_SPACE_SYSTEM_IO:
+ status = acpi_os_write_port(address, val, reg->bit_width);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(apei_write);
+
static int collect_res_callback(struct apei_exec_context *ctx,
struct acpi_whea_header *entry,
void *data)
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
index f57050e..cca240a 100644
--- a/drivers/acpi/apei/apei-internal.h
+++ b/drivers/acpi/apei/apei-internal.h
@@ -68,6 +68,9 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
/* IP has been set in instruction function */
#define APEI_EXEC_SET_IP 1
+int apei_read(u64 *val, struct acpi_generic_address *reg);
+int apei_write(u64 val, struct acpi_generic_address *reg);
+
int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -95,6 +98,9 @@ static inline void apei_resources_init(struct apei_resources *resources)
}
void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_add(struct apei_resources *resources,
+ unsigned long start, unsigned long size,
+ bool iomem);
int apei_resources_sub(struct apei_resources *resources1,
struct apei_resources *resources2);
int apei_resources_request(struct apei_resources *resources,
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
index 589b96c..4ca087d 100644
--- a/drivers/acpi/apei/einj.c
+++ b/drivers/acpi/apei/einj.c
@@ -43,6 +43,42 @@
#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
/*
+ * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
+ */
+static int acpi5;
+
+struct set_error_type_with_address {
+ u32 type;
+ u32 vendor_extension;
+ u32 flags;
+ u32 apicid;
+ u64 memory_address;
+ u64 memory_address_range;
+ u32 pcie_sbdf;
+};
+enum {
+ SETWA_FLAGS_APICID = 1,
+ SETWA_FLAGS_MEM = 2,
+ SETWA_FLAGS_PCIE_SBDF = 4,
+};
+
+/*
+ * Vendor extensions for platform specific operations
+ */
+struct vendor_error_type_extension {
+ u32 length;
+ u32 pcie_sbdf;
+ u16 vendor_id;
+ u16 device_id;
+ u8 rev_id;
+ u8 reserved[3];
+};
+
+static u32 vendor_flags;
+static struct debugfs_blob_wrapper vendor_blob;
+static char vendor_dev[64];
+
+/*
* Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
* EINJ table through an unpublished extension. Use with caution as
* most will ignore the parameter and make their own choice of address
@@ -103,15 +139,7 @@ static struct apei_exec_ins_type einj_ins_type[] = {
*/
static DEFINE_MUTEX(einj_mutex);
-static struct einj_parameter *einj_param;
-
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
-{
- writel(val, addr);
- writel(val >> 32, addr+4);
-}
-#endif
+static void *einj_param;
static void einj_exec_ctx_init(struct apei_exec_context *ctx)
{
@@ -158,10 +186,30 @@ static int einj_timedout(u64 *t)
return 0;
}
-static u64 einj_get_parameter_address(void)
+static void check_vendor_extension(u64 paddr,
+ struct set_error_type_with_address *v5param)
+{
+ int offset = v5param->vendor_extension;
+ struct vendor_error_type_extension *v;
+ u32 sbdf;
+
+ if (!offset)
+ return;
+ v = acpi_os_map_memory(paddr + offset, sizeof(*v));
+ if (!v)
+ return;
+ sbdf = v->pcie_sbdf;
+ sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
+ sbdf >> 24, (sbdf >> 16) & 0xff,
+ (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
+ v->vendor_id, v->device_id, v->rev_id);
+ acpi_os_unmap_memory(v, sizeof(*v));
+}
+
+static void *einj_get_parameter_address(void)
{
int i;
- u64 paddr = 0;
+ u64 paddrv4 = 0, paddrv5 = 0;
struct acpi_whea_header *entry;
entry = EINJ_TAB_ENTRY(einj_tab);
@@ -170,12 +218,40 @@ static u64 einj_get_parameter_address(void)
entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
entry->register_region.space_id ==
ACPI_ADR_SPACE_SYSTEM_MEMORY)
- memcpy(&paddr, &entry->register_region.address,
- sizeof(paddr));
+ memcpy(&paddrv4, &entry->register_region.address,
+ sizeof(paddrv4));
+ if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ memcpy(&paddrv5, &entry->register_region.address,
+ sizeof(paddrv5));
entry++;
}
+ if (paddrv5) {
+ struct set_error_type_with_address *v5param;
+
+ v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param));
+ if (v5param) {
+ acpi5 = 1;
+ check_vendor_extension(paddrv5, v5param);
+ return v5param;
+ }
+ }
+ if (paddrv4) {
+ struct einj_parameter *v4param;
+
+ v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
+ if (!v4param)
+ return NULL;
+ if (v4param->reserved1 || v4param->reserved2) {
+ acpi_os_unmap_memory(v4param, sizeof(*v4param));
+ return NULL;
+ }
+ return v4param;
+ }
- return paddr;
+ return NULL;
}
/* do sanity check to trigger table */
@@ -184,7 +260,7 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
return -EINVAL;
if (trigger_tab->table_size > PAGE_SIZE ||
- trigger_tab->table_size <= trigger_tab->header_size)
+ trigger_tab->table_size < trigger_tab->header_size)
return -EINVAL;
if (trigger_tab->entry_count !=
(trigger_tab->table_size - trigger_tab->header_size) /
@@ -194,8 +270,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
return 0;
}
+static struct acpi_generic_address *einj_get_trigger_parameter_region(
+ struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
+{
+ int i;
+ struct acpi_whea_header *entry;
+
+ entry = (struct acpi_whea_header *)
+ ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+ for (i = 0; i < trigger_tab->entry_count; i++) {
+ if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
+ entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+ entry->register_region.space_id ==
+ ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+ (entry->register_region.address & param2) == (param1 & param2))
+ return &entry->register_region;
+ entry++;
+ }
+
+ return NULL;
+}
/* Execute instructions in trigger error action table */
-static int __einj_error_trigger(u64 trigger_paddr)
+static int __einj_error_trigger(u64 trigger_paddr, u32 type,
+ u64 param1, u64 param2)
{
struct acpi_einj_trigger *trigger_tab = NULL;
struct apei_exec_context trigger_ctx;
@@ -204,14 +301,16 @@ static int __einj_error_trigger(u64 trigger_paddr)
struct resource *r;
u32 table_size;
int rc = -EIO;
+ struct acpi_generic_address *trigger_param_region = NULL;
r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
- "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+ "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
(unsigned long long)trigger_paddr,
- (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+ (unsigned long long)trigger_paddr +
+ sizeof(*trigger_tab) - 1);
goto out;
}
trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -225,6 +324,11 @@ static int __einj_error_trigger(u64 trigger_paddr)
"The trigger error action table is invalid\n");
goto out_rel_header;
}
+
+ /* No action structures in the TRIGGER_ERROR table, nothing to do */
+ if (!trigger_tab->entry_count)
+ goto out_rel_header;
+
rc = -EIO;
table_size = trigger_tab->table_size;
r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
@@ -232,9 +336,9 @@ static int __einj_error_trigger(u64 trigger_paddr)
"APEI EINJ Trigger Table");
if (!r) {
pr_err(EINJ_PFX
-"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
- (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
- (unsigned long long)trigger_paddr + table_size);
+"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
+ (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+ (unsigned long long)trigger_paddr + table_size - 1);
goto out_rel_header;
}
iounmap(trigger_tab);
@@ -255,6 +359,30 @@ static int __einj_error_trigger(u64 trigger_paddr)
rc = apei_resources_sub(&trigger_resources, &einj_resources);
if (rc)
goto out_fini;
+ /*
+ * Some firmware will access target address specified in
+ * param1 to trigger the error when injecting memory error.
+ * This will cause resource conflict with regular memory. So
+ * remove it from trigger table resources.
+ */
+ if (param_extension && (type & 0x0038) && param2) {
+ struct apei_resources addr_resources;
+ apei_resources_init(&addr_resources);
+ trigger_param_region = einj_get_trigger_parameter_region(
+ trigger_tab, param1, param2);
+ if (trigger_param_region) {
+ rc = apei_resources_add(&addr_resources,
+ trigger_param_region->address,
+ trigger_param_region->bit_width/8, true);
+ if (rc)
+ goto out_fini;
+ rc = apei_resources_sub(&trigger_resources,
+ &addr_resources);
+ }
+ apei_resources_fini(&addr_resources);
+ if (rc)
+ goto out_fini;
+ }
rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
if (rc)
goto out_fini;
@@ -293,12 +421,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
apei_exec_ctx_set_input(&ctx, type);
- rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
- if (rc)
- return rc;
- if (einj_param) {
- writeq(param1, &einj_param->param1);
- writeq(param2, &einj_param->param2);
+ if (acpi5) {
+ struct set_error_type_with_address *v5param = einj_param;
+
+ v5param->type = type;
+ if (type & 0x80000000) {
+ switch (vendor_flags) {
+ case SETWA_FLAGS_APICID:
+ v5param->apicid = param1;
+ break;
+ case SETWA_FLAGS_MEM:
+ v5param->memory_address = param1;
+ v5param->memory_address_range = param2;
+ break;
+ case SETWA_FLAGS_PCIE_SBDF:
+ v5param->pcie_sbdf = param1;
+ break;
+ }
+ v5param->flags = vendor_flags;
+ } else {
+ switch (type) {
+ case ACPI_EINJ_PROCESSOR_CORRECTABLE:
+ case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
+ case ACPI_EINJ_PROCESSOR_FATAL:
+ v5param->apicid = param1;
+ v5param->flags = SETWA_FLAGS_APICID;
+ break;
+ case ACPI_EINJ_MEMORY_CORRECTABLE:
+ case ACPI_EINJ_MEMORY_UNCORRECTABLE:
+ case ACPI_EINJ_MEMORY_FATAL:
+ v5param->memory_address = param1;
+ v5param->memory_address_range = param2;
+ v5param->flags = SETWA_FLAGS_MEM;
+ break;
+ case ACPI_EINJ_PCIX_CORRECTABLE:
+ case ACPI_EINJ_PCIX_UNCORRECTABLE:
+ case ACPI_EINJ_PCIX_FATAL:
+ v5param->pcie_sbdf = param1;
+ v5param->flags = SETWA_FLAGS_PCIE_SBDF;
+ break;
+ }
+ }
+ } else {
+ rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+ if (rc)
+ return rc;
+ if (einj_param) {
+ struct einj_parameter *v4param = einj_param;
+ v4param->param1 = param1;
+ v4param->param2 = param2;
+ }
}
rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
if (rc)
@@ -324,7 +496,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
if (rc)
return rc;
trigger_paddr = apei_exec_ctx_get_output(&ctx);
- rc = __einj_error_trigger(trigger_paddr);
+ rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
if (rc)
return rc;
rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -408,15 +580,25 @@ static int error_type_set(void *data, u64 val)
{
int rc;
u32 available_error_type = 0;
+ u32 tval, vendor;
+
+ /*
+ * Vendor defined types have 0x80000000 bit set, and
+ * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
+ */
+ vendor = val & 0x80000000;
+ tval = val & 0x7fffffff;
/* Only one error type can be specified */
- if (val & (val - 1))
- return -EINVAL;
- rc = einj_get_available_error_type(&available_error_type);
- if (rc)
- return rc;
- if (!(val & available_error_type))
+ if (tval & (tval - 1))
return -EINVAL;
+ if (!vendor) {
+ rc = einj_get_available_error_type(&available_error_type);
+ if (rc)
+ return rc;
+ if (!(val & available_error_type))
+ return -EINVAL;
+ }
error_type = val;
return 0;
@@ -455,7 +637,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
static int __init einj_init(void)
{
int rc;
- u64 param_paddr;
acpi_status status;
struct dentry *fentry;
struct apei_exec_context ctx;
@@ -465,10 +646,9 @@ static int __init einj_init(void)
status = acpi_get_table(ACPI_SIG_EINJ, 0,
(struct acpi_table_header **)&einj_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(EINJ_PFX "Table is not found!\n");
+ if (status == AE_NOT_FOUND)
return -ENODEV;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
return -EINVAL;
@@ -509,23 +689,30 @@ static int __init einj_init(void)
rc = apei_exec_pre_map_gars(&ctx);
if (rc)
goto err_release;
- if (param_extension) {
- param_paddr = einj_get_parameter_address();
- if (param_paddr) {
- einj_param = ioremap(param_paddr, sizeof(*einj_param));
- rc = -ENOMEM;
- if (!einj_param)
- goto err_unmap;
- fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param1);
- if (!fentry)
- goto err_unmap;
- fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
- einj_debug_dir, &error_param2);
- if (!fentry)
- goto err_unmap;
- } else
- pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
+
+ einj_param = einj_get_parameter_address();
+ if ((param_extension || acpi5) && einj_param) {
+ fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param1);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &error_param2);
+ if (!fentry)
+ goto err_unmap;
+ }
+
+ if (vendor_dev[0]) {
+ vendor_blob.data = vendor_dev;
+ vendor_blob.size = strlen(vendor_dev);
+ fentry = debugfs_create_blob("vendor", S_IRUSR,
+ einj_debug_dir, &vendor_blob);
+ if (!fentry)
+ goto err_unmap;
+ fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
+ einj_debug_dir, &vendor_flags);
+ if (!fentry)
+ goto err_unmap;
}
pr_info(EINJ_PFX "Error INJection is initialized.\n");
@@ -533,8 +720,13 @@ static int __init einj_init(void)
return 0;
err_unmap:
- if (einj_param)
- iounmap(einj_param);
+ if (einj_param) {
+ acpi_size size = (acpi5) ?
+ sizeof(struct set_error_type_with_address) :
+ sizeof(struct einj_parameter);
+
+ acpi_os_unmap_memory(einj_param, size);
+ }
apei_exec_post_unmap_gars(&ctx);
err_release:
apei_resources_release(&einj_resources);
@@ -550,8 +742,13 @@ static void __exit einj_exit(void)
{
struct apei_exec_context ctx;
- if (einj_param)
- iounmap(einj_param);
+ if (einj_param) {
+ acpi_size size = (acpi5) ?
+ sizeof(struct set_error_type_with_address) :
+ sizeof(struct einj_parameter);
+
+ acpi_os_unmap_memory(einj_param, size);
+ }
einj_exec_ctx_init(&ctx);
apei_exec_post_unmap_gars(&ctx);
apei_resources_release(&einj_resources);
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 631b947..eb9fab5 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -934,7 +934,8 @@ static int erst_close_pstore(struct pstore_info *psi);
static ssize_t erst_reader(u64 *id, enum pstore_type_id *type,
struct timespec *time, char **buf,
struct pstore_info *psi);
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
size_t size, struct pstore_info *psi);
static int erst_clearer(enum pstore_type_id type, u64 id,
struct pstore_info *psi);
@@ -1053,7 +1054,8 @@ out:
return (rc < 0) ? rc : (len - sizeof(*rcd));
}
-static int erst_writer(enum pstore_type_id type, u64 *id, unsigned int part,
+static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
size_t size, struct pstore_info *psi)
{
struct cper_pstore_record *rcd = (struct cper_pstore_record *)
@@ -1125,10 +1127,9 @@ static int __init erst_init(void)
status = acpi_get_table(ACPI_SIG_ERST, 0,
(struct acpi_table_header **)&erst_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(ERST_PFX "Table is not found!\n");
+ if (status == AE_NOT_FOUND)
goto err;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(ERST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index b8e08cb..9b3cac0 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -33,6 +33,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/acpi.h>
+#include <linux/acpi_io.h>
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
@@ -45,8 +46,9 @@
#include <linux/irq_work.h>
#include <linux/llist.h>
#include <linux/genalloc.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
#include <acpi/apei.h>
-#include <acpi/atomicio.h>
#include <acpi/hed.h>
#include <asm/mce.h>
#include <asm/tlbflush.h>
@@ -118,7 +120,7 @@ struct ghes_estatus_cache {
struct rcu_head rcu;
};
-int ghes_disable;
+bool ghes_disable;
module_param_named(disable, ghes_disable, bool, 0);
static int ghes_panic_timeout __read_mostly = 30;
@@ -299,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
if (!ghes)
return ERR_PTR(-ENOMEM);
ghes->generic = generic;
- rc = acpi_pre_map_gar(&generic->error_status_address);
+ rc = acpi_os_map_generic_address(&generic->error_status_address);
if (rc)
goto err_free;
error_block_length = generic->error_block_length;
@@ -319,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
return ghes;
err_unmap:
- acpi_post_unmap_gar(&generic->error_status_address);
+ acpi_os_unmap_generic_address(&generic->error_status_address);
err_free:
kfree(ghes);
return ERR_PTR(rc);
@@ -328,7 +330,7 @@ err_free:
static void ghes_fini(struct ghes *ghes)
{
kfree(ghes->estatus);
- acpi_post_unmap_gar(&ghes->generic->error_status_address);
+ acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
}
enum {
@@ -399,7 +401,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
u32 len;
int rc;
- rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+ rc = apei_read(&buf_paddr, &g->error_status_address);
if (rc) {
if (!silent && printk_ratelimit())
pr_warning(FW_WARN GHES_PFX
@@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
}
#endif
}
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+ else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+ CPER_SEC_PCIE)) {
+ struct cper_sec_pcie *pcie_err;
+ pcie_err = (struct cper_sec_pcie *)(gdata+1);
+ if (sev == GHES_SEV_RECOVERABLE &&
+ sec_sev == GHES_SEV_RECOVERABLE &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+ pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+ unsigned int devfn;
+ int aer_severity;
+ devfn = PCI_DEVFN(pcie_err->device_id.device,
+ pcie_err->device_id.function);
+ aer_severity = cper_severity_to_aer(sev);
+ aer_recover_queue(pcie_err->device_id.segment,
+ pcie_err->device_id.bus,
+ devfn, aer_severity);
+ }
+
+ }
+#endif
}
}
@@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx,
const struct acpi_hest_generic *generic,
const struct acpi_hest_generic_status *estatus)
{
+ static atomic_t seqno;
+ unsigned int curr_seqno;
+ char pfx_seq[64];
+
if (pfx == NULL) {
if (ghes_severity(estatus->error_severity) <=
GHES_SEV_CORRECTED)
- pfx = KERN_WARNING HW_ERR;
+ pfx = KERN_WARNING;
else
- pfx = KERN_ERR HW_ERR;
+ pfx = KERN_ERR;
}
+ curr_seqno = atomic_inc_return(&seqno);
+ snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
- pfx, generic->header.source_id);
- apei_estatus_print(pfx, estatus);
+ pfx_seq, generic->header.source_id);
+ apei_estatus_print(pfx_seq, estatus);
}
static int ghes_print_estatus(const char *pfx,
@@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this,
return ret;
}
+static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
+{
+ struct llist_node *next, *tail = NULL;
+
+ while (llnode) {
+ next = llnode->next;
+ llnode->next = tail;
+ tail = llnode;
+ llnode = next;
+ }
+
+ return tail;
+}
+
static void ghes_proc_in_irq(struct irq_work *irq_work)
{
- struct llist_node *llnode, *next, *tail = NULL;
+ struct llist_node *llnode, *next;
struct ghes_estatus_node *estatus_node;
struct acpi_hest_generic *generic;
struct acpi_hest_generic_status *estatus;
u32 len, node_len;
+ llnode = llist_del_all(&ghes_estatus_llist);
/*
* Because the time order of estatus in list is reversed,
* revert it back to proper order.
*/
- llnode = llist_del_all(&ghes_estatus_llist);
- while (llnode) {
- next = llnode->next;
- llnode->next = tail;
- tail = llnode;
- llnode = next;
- }
- llnode = tail;
+ llnode = llist_nodes_reverse(llnode);
while (llnode) {
next = llnode->next;
estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
}
}
+static void ghes_print_queued_estatus(void)
+{
+ struct llist_node *llnode;
+ struct ghes_estatus_node *estatus_node;
+ struct acpi_hest_generic *generic;
+ struct acpi_hest_generic_status *estatus;
+ u32 len, node_len;
+
+ llnode = llist_del_all(&ghes_estatus_llist);
+ /*
+ * Because the time order of estatus in list is reversed,
+ * revert it back to proper order.
+ */
+ llnode = llist_nodes_reverse(llnode);
+ while (llnode) {
+ estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+ llnode);
+ estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+ len = apei_estatus_len(estatus);
+ node_len = GHES_ESTATUS_NODE_LEN(len);
+ generic = estatus_node->generic;
+ ghes_print_estatus(NULL, generic, estatus);
+ llnode = llnode->next;
+ }
+}
+
static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
{
struct ghes *ghes, *ghes_global = NULL;
@@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
if (sev_global >= GHES_SEV_PANIC) {
oops_begin();
- __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+ ghes_print_queued_estatus();
+ __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
ghes_global->estatus);
/* reboot to log the error! */
if (panic_timeout == 0)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index 05fee06..7f00cf3 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -41,7 +41,7 @@
#define HEST_PFX "HEST: "
-int hest_disable;
+bool hest_disable;
EXPORT_SYMBOL_GPL(hest_disable);
/* HEST table parsing */
@@ -221,10 +221,9 @@ void __init acpi_hest_init(void)
status = acpi_get_table(ACPI_SIG_HEST, 0,
(struct acpi_table_header **)&hest_tab);
- if (status == AE_NOT_FOUND) {
- pr_info(HEST_PFX "Table not found.\n");
+ if (status == AE_NOT_FOUND)
goto err;
- } else if (ACPI_FAILURE(status)) {
+ else if (ACPI_FAILURE(status)) {
const char *msg = acpi_format_exception(status);
pr_err(HEST_PFX "Failed to get table, %s\n", msg);
rc = -EINVAL;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
deleted file mode 100644
index cfc0cc1..0000000
--- a/drivers/acpi/atomicio.c
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
- * accessing in atomic context.
- *
- * This is used for NMI handler to access IO memory area, because
- * ioremap/iounmap can not be used in NMI handler. The IO memory area
- * is pre-mapped in process context and accessed in NMI handler.
- *
- * Copyright (C) 2009-2010, Intel Corp.
- * Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/io.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <acpi/atomicio.h>
-
-#define ACPI_PFX "ACPI: "
-
-static LIST_HEAD(acpi_iomaps);
-/*
- * Used for mutual exclusion between writers of acpi_iomaps list, for
- * synchronization between readers and writer, RCU is used.
- */
-static DEFINE_SPINLOCK(acpi_iomaps_lock);
-
-struct acpi_iomap {
- struct list_head list;
- void __iomem *vaddr;
- unsigned long size;
- phys_addr_t paddr;
- struct kref ref;
-};
-
-/* acpi_iomaps_lock or RCU read lock must be held before calling */
-static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
- unsigned long size)
-{
- struct acpi_iomap *map;
-
- list_for_each_entry_rcu(map, &acpi_iomaps, list) {
- if (map->paddr + map->size >= paddr + size &&
- map->paddr <= paddr)
- return map;
- }
- return NULL;
-}
-
-/*
- * Atomic "ioremap" used by NMI handler, if the specified IO memory
- * area is not pre-mapped, NULL will be returned.
- *
- * acpi_iomaps_lock or RCU read lock must be held before calling
- */
-static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
- unsigned long size)
-{
- struct acpi_iomap *map;
-
- map = __acpi_find_iomap(paddr, size/8);
- if (map)
- return map->vaddr + (paddr - map->paddr);
- else
- return NULL;
-}
-
-/* acpi_iomaps_lock must be held before calling */
-static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
- unsigned long size)
-{
- struct acpi_iomap *map;
-
- map = __acpi_find_iomap(paddr, size);
- if (map) {
- kref_get(&map->ref);
- return map->vaddr + (paddr - map->paddr);
- } else
- return NULL;
-}
-
-/*
- * Used to pre-map the specified IO memory area. First try to find
- * whether the area is already pre-mapped, if it is, increase the
- * reference count (in __acpi_try_ioremap) and return; otherwise, do
- * the real ioremap, and add the mapping into acpi_iomaps list.
- */
-static void __iomem *acpi_pre_map(phys_addr_t paddr,
- unsigned long size)
-{
- void __iomem *vaddr;
- struct acpi_iomap *map;
- unsigned long pg_sz, flags;
- phys_addr_t pg_off;
-
- spin_lock_irqsave(&acpi_iomaps_lock, flags);
- vaddr = __acpi_try_ioremap(paddr, size);
- spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
- if (vaddr)
- return vaddr;
-
- pg_off = paddr & PAGE_MASK;
- pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
- vaddr = ioremap(pg_off, pg_sz);
- if (!vaddr)
- return NULL;
- map = kmalloc(sizeof(*map), GFP_KERNEL);
- if (!map)
- goto err_unmap;
- INIT_LIST_HEAD(&map->list);
- map->paddr = pg_off;
- map->size = pg_sz;
- map->vaddr = vaddr;
- kref_init(&map->ref);
-
- spin_lock_irqsave(&acpi_iomaps_lock, flags);
- vaddr = __acpi_try_ioremap(paddr, size);
- if (vaddr) {
- spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
- iounmap(map->vaddr);
- kfree(map);
- return vaddr;
- }
- list_add_tail_rcu(&map->list, &acpi_iomaps);
- spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-
- return map->vaddr + (paddr - map->paddr);
-err_unmap:
- iounmap(vaddr);
- return NULL;
-}
-
-/* acpi_iomaps_lock must be held before calling */
-static void __acpi_kref_del_iomap(struct kref *ref)
-{
- struct acpi_iomap *map;
-
- map = container_of(ref, struct acpi_iomap, ref);
- list_del_rcu(&map->list);
-}
-
-/*
- * Used to post-unmap the specified IO memory area. The iounmap is
- * done only if the reference count goes zero.
- */
-static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
-{
- struct acpi_iomap *map;
- unsigned long flags;
- int del;
-
- spin_lock_irqsave(&acpi_iomaps_lock, flags);
- map = __acpi_find_iomap(paddr, size);
- BUG_ON(!map);
- del = kref_put(&map->ref, __acpi_kref_del_iomap);
- spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-
- if (!del)
- return;
-
- synchronize_rcu();
- iounmap(map->vaddr);
- kfree(map);
-}
-
-/* In NMI handler, should set silent = 1 */
-static int acpi_check_gar(struct acpi_generic_address *reg,
- u64 *paddr, int silent)
-{
- u32 width, space_id;
-
- width = reg->bit_width;
- space_id = reg->space_id;
- /* Handle possible alignment issues */
- memcpy(paddr, &reg->address, sizeof(*paddr));
- if (!*paddr) {
- if (!silent)
- pr_warning(FW_BUG ACPI_PFX
- "Invalid physical address in GAR [0x%llx/%u/%u]\n",
- *paddr, width, space_id);
- return -EINVAL;
- }
-
- if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
- if (!silent)
- pr_warning(FW_BUG ACPI_PFX
- "Invalid bit width in GAR [0x%llx/%u/%u]\n",
- *paddr, width, space_id);
- return -EINVAL;
- }
-
- if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
- space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
- if (!silent)
- pr_warning(FW_BUG ACPI_PFX
- "Invalid address space type in GAR [0x%llx/%u/%u]\n",
- *paddr, width, space_id);
- return -EINVAL;
- }
-
- return 0;
-}
-
-/* Pre-map, working on GAR */
-int acpi_pre_map_gar(struct acpi_generic_address *reg)
-{
- u64 paddr;
- void __iomem *vaddr;
- int rc;
-
- if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
- return 0;
-
- rc = acpi_check_gar(reg, &paddr, 0);
- if (rc)
- return rc;
-
- vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
- if (!vaddr)
- return -EIO;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
-
-/* Post-unmap, working on GAR */
-int acpi_post_unmap_gar(struct acpi_generic_address *reg)
-{
- u64 paddr;
- int rc;
-
- if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
- return 0;
-
- rc = acpi_check_gar(reg, &paddr, 0);
- if (rc)
- return rc;
-
- acpi_post_unmap(paddr, reg->bit_width / 8);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
-
-/*
- * Can be used in atomic (including NMI) or process context. RCU read
- * lock can only be released after the IO memory area accessing.
- */
-static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
-{
- void __iomem *addr;
-
- rcu_read_lock();
- addr = __acpi_ioremap_fast(paddr, width);
- switch (width) {
- case 8:
- *val = readb(addr);
- break;
- case 16:
- *val = readw(addr);
- break;
- case 32:
- *val = readl(addr);
- break;
-#ifdef readq
- case 64:
- *val = readq(addr);
- break;
-#endif
- default:
- return -EINVAL;
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
-{
- void __iomem *addr;
-
- rcu_read_lock();
- addr = __acpi_ioremap_fast(paddr, width);
- switch (width) {
- case 8:
- writeb(val, addr);
- break;
- case 16:
- writew(val, addr);
- break;
- case 32:
- writel(val, addr);
- break;
-#ifdef writeq
- case 64:
- writeq(val, addr);
- break;
-#endif
- default:
- return -EINVAL;
- }
- rcu_read_unlock();
-
- return 0;
-}
-
-/* GAR accessing in atomic (including NMI) or process context */
-int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
-{
- u64 paddr;
- int rc;
-
- rc = acpi_check_gar(reg, &paddr, 1);
- if (rc)
- return rc;
-
- *val = 0;
- switch (reg->space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- return acpi_atomic_read_mem(paddr, val, reg->bit_width);
- case ACPI_ADR_SPACE_SYSTEM_IO:
- return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(acpi_atomic_read);
-
-int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
-{
- u64 paddr;
- int rc;
-
- rc = acpi_check_gar(reg, &paddr, 1);
- if (rc)
- return rc;
-
- switch (reg->space_id) {
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- return acpi_atomic_write_mem(paddr, val, reg->bit_width);
- case ACPI_ADR_SPACE_SYSTEM_IO:
- return acpi_os_write_port(paddr, val, reg->bit_width);
- default:
- return -EINVAL;
- }
-}
-EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 7711d94..86933ca 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -873,7 +873,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
static const struct battery_file {
struct file_operations ops;
- mode_t mode;
+ umode_t mode;
const char *name;
} acpi_battery_file[] = {
FILE_DESCRIPTION_RO(info),
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 19a6113..88eb143 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -43,7 +43,7 @@ MODULE_AUTHOR("Kristen Carlson Accardi");
MODULE_DESCRIPTION(ACPI_DOCK_DRIVER_DESCRIPTION);
MODULE_LICENSE("GPL");
-static int immediate_undock = 1;
+static bool immediate_undock = 1;
module_param(immediate_undock, bool, 0644);
MODULE_PARM_DESC(immediate_undock, "1 (default) will cause the driver to "
"undock immediately when the undock button is pressed, 0 will cause"
diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
index 6c47ae9..b258cab 100644
--- a/drivers/acpi/ec_sys.c
+++ b/drivers/acpi/ec_sys.c
@@ -105,7 +105,7 @@ int acpi_ec_add_debugfs(struct acpi_ec *ec, unsigned int ec_device_count)
{
struct dentry *dev_dir;
char name[64];
- mode_t mode = 0400;
+ umode_t mode = 0400;
if (ec_device_count == 0) {
acpi_ec_debugfs_dir = debugfs_create_dir("ec", NULL);
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 3b5c318..e56f3be 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
static int node_to_pxm_map[MAX_NUMNODES]
= { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
+unsigned char acpi_srat_revision __initdata;
+
int pxm_to_node(int pxm)
{
if (pxm < 0)
@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
static int __init acpi_parse_srat(struct acpi_table_header *table)
{
+ struct acpi_table_srat *srat;
if (!table)
return -EINVAL;
+ srat = (struct acpi_table_srat *)table;
+ acpi_srat_revision = srat->header.revision;
+
/* Real work done in acpi_table_parse_srat below. */
return 0;
diff --git a/drivers/acpi/nvs.c b/drivers/acpi/nvs.c
index 096787b..7a2035f 100644
--- a/drivers/acpi/nvs.c
+++ b/drivers/acpi/nvs.c
@@ -15,6 +15,56 @@
#include <linux/acpi_io.h>
#include <acpi/acpiosxf.h>
+/* ACPI NVS regions, APEI may use it */
+
+struct nvs_region {
+ __u64 phys_start;
+ __u64 size;
+ struct list_head node;
+};
+
+static LIST_HEAD(nvs_region_list);
+
+#ifdef CONFIG_ACPI_SLEEP
+static int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+ return 0;
+}
+#endif
+
+int acpi_nvs_register(__u64 start, __u64 size)
+{
+ struct nvs_region *region;
+
+ region = kmalloc(sizeof(*region), GFP_KERNEL);
+ if (!region)
+ return -ENOMEM;
+ region->phys_start = start;
+ region->size = size;
+ list_add_tail(&region->node, &nvs_region_list);
+
+ return suspend_nvs_register(start, size);
+}
+
+int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
+ void *data)
+{
+ int rc;
+ struct nvs_region *region;
+
+ list_for_each_entry(region, &nvs_region_list, node) {
+ rc = func(region->phys_start, region->size, data);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+
+#ifdef CONFIG_ACPI_SLEEP
/*
* Platforms, like ACPI, may want us to save some memory used by them during
* suspend and to restore the contents of this memory during the subsequent
@@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list);
* things so that the data from page-aligned addresses in this region will
* be copied into separate RAM pages.
*/
-int suspend_nvs_register(unsigned long start, unsigned long size)
+static int suspend_nvs_register(unsigned long start, unsigned long size)
{
struct nvs_page *entry, *next;
@@ -159,3 +209,4 @@ void suspend_nvs_restore(void)
if (entry->data)
memcpy(entry->kaddr, entry->data, entry->size);
}
+#endif
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index f31c5c5..412a1e0 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -31,6 +31,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
+#include <linux/highmem.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/kmod.h>
@@ -83,19 +84,6 @@ static struct workqueue_struct *kacpi_notify_wq;
struct workqueue_struct *kacpi_hotplug_wq;
EXPORT_SYMBOL(kacpi_hotplug_wq);
-struct acpi_res_list {
- resource_size_t start;
- resource_size_t end;
- acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
- char name[5]; /* only can have a length of 4 chars, make use of this
- one instead of res->name, no need to kalloc then */
- struct list_head resource_list;
- int count;
-};
-
-static LIST_HEAD(resource_list_head);
-static DEFINE_SPINLOCK(acpi_res_lock);
-
/*
* This list of permanent mappings is for memory that may be accessed from
* interrupt context, where we can't do the ioremap().
@@ -166,17 +154,21 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
return supported;
}
-static void __init acpi_request_region (struct acpi_generic_address *addr,
+static void __init acpi_request_region (struct acpi_generic_address *gas,
unsigned int length, char *desc)
{
- if (!addr->address || !length)
+ u64 addr;
+
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !length)
return;
/* Resources are never freed */
- if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- request_region(addr->address, length, desc);
- else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
- request_mem_region(addr->address, length, desc);
+ if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+ request_region(addr, length, desc);
+ else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ request_mem_region(addr, length, desc);
}
static int __init acpi_reserve_resources(void)
@@ -330,6 +322,37 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
return NULL;
}
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn) page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn) 0
+#endif
+
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
+{
+ unsigned long pfn;
+
+ pfn = pg_off >> PAGE_SHIFT;
+ if (should_use_kmap(pfn)) {
+ if (pg_sz > PAGE_SIZE)
+ return NULL;
+ return (void __iomem __force *)kmap(pfn_to_page(pfn));
+ } else
+ return acpi_os_ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
+{
+ unsigned long pfn;
+
+ pfn = pg_off >> PAGE_SHIFT;
+ if (page_is_ram(pfn))
+ kunmap(pfn_to_page(pfn));
+ else
+ iounmap(vaddr);
+}
+
void __iomem *__init_refok
acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
{
@@ -362,7 +385,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
pg_off = round_down(phys, PAGE_SIZE);
pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
- virt = acpi_os_ioremap(pg_off, pg_sz);
+ virt = acpi_map(pg_off, pg_sz);
if (!virt) {
mutex_unlock(&acpi_ioremap_lock);
kfree(map);
@@ -393,7 +416,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
{
if (!map->refcount) {
synchronize_rcu();
- iounmap(map->virt);
+ acpi_unmap(map->phys, map->virt);
kfree(map);
}
}
@@ -427,35 +450,42 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
__acpi_unmap_table(virt, size);
}
-static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+int acpi_os_map_generic_address(struct acpi_generic_address *gas)
{
+ u64 addr;
void __iomem *virt;
- if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return 0;
- if (!addr->address || !addr->bit_width)
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !gas->bit_width)
return -EINVAL;
- virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+ virt = acpi_os_map_memory(addr, gas->bit_width / 8);
if (!virt)
return -EIO;
return 0;
}
+EXPORT_SYMBOL(acpi_os_map_generic_address);
-static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
{
+ u64 addr;
struct acpi_ioremap *map;
- if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
return;
- if (!addr->address || !addr->bit_width)
+ /* Handle possible alignment issues */
+ memcpy(&addr, &gas->address, sizeof(addr));
+ if (!addr || !gas->bit_width)
return;
mutex_lock(&acpi_ioremap_lock);
- map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+ map = acpi_map_lookup(addr, gas->bit_width / 8);
if (!map) {
mutex_unlock(&acpi_ioremap_lock);
return;
@@ -465,6 +495,7 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
acpi_os_map_cleanup(map);
}
+EXPORT_SYMBOL(acpi_os_unmap_generic_address);
#ifdef ACPI_FUTURE_USAGE
acpi_status
@@ -711,6 +742,67 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
return AE_OK;
}
+#ifdef readq
+static inline u64 read64(const volatile void __iomem *addr)
+{
+ return readq(addr);
+}
+#else
+static inline u64 read64(const volatile void __iomem *addr)
+{
+ u64 l, h;
+ l = readl(addr);
+ h = readl(addr+4);
+ return l | (h << 32);
+}
+#endif
+
+acpi_status
+acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
+{
+ void __iomem *virt_addr;
+ unsigned int size = width / 8;
+ bool unmap = false;
+ u64 dummy;
+
+ rcu_read_lock();
+ virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ if (!virt_addr) {
+ rcu_read_unlock();
+ virt_addr = acpi_os_ioremap(phys_addr, size);
+ if (!virt_addr)
+ return AE_BAD_ADDRESS;
+ unmap = true;
+ }
+
+ if (!value)
+ value = &dummy;
+
+ switch (width) {
+ case 8:
+ *(u8 *) value = readb(virt_addr);
+ break;
+ case 16:
+ *(u16 *) value = readw(virt_addr);
+ break;
+ case 32:
+ *(u32 *) value = readl(virt_addr);
+ break;
+ case 64:
+ *(u64 *) value = read64(virt_addr);
+ break;
+ default:
+ BUG();
+ }
+
+ if (unmap)
+ iounmap(virt_addr);
+ else
+ rcu_read_unlock();
+
+ return AE_OK;
+}
+
acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
{
@@ -750,6 +842,61 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
return AE_OK;
}
+#ifdef writeq
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+ writeq(val, addr);
+}
+#else
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val>>32, addr+4);
+}
+#endif
+
+acpi_status
+acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width)
+{
+ void __iomem *virt_addr;
+ unsigned int size = width / 8;
+ bool unmap = false;
+
+ rcu_read_lock();
+ virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+ if (!virt_addr) {
+ rcu_read_unlock();
+ virt_addr = acpi_os_ioremap(phys_addr, size);
+ if (!virt_addr)
+ return AE_BAD_ADDRESS;
+ unmap = true;
+ }
+
+ switch (width) {
+ case 8:
+ writeb(value, virt_addr);
+ break;
+ case 16:
+ writew(value, virt_addr);
+ break;
+ case 32:
+ writel(value, virt_addr);
+ break;
+ case 64:
+ write64(value, virt_addr);
+ break;
+ default:
+ BUG();
+ }
+
+ if (unmap)
+ iounmap(virt_addr);
+ else
+ rcu_read_unlock();
+
+ return AE_OK;
+}
+
acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
u64 *value, u32 width)
@@ -1278,44 +1425,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
* drivers */
int acpi_check_resource_conflict(const struct resource *res)
{
- struct acpi_res_list *res_list_elem;
- int ioport = 0, clash = 0;
+ acpi_adr_space_type space_id;
+ acpi_size length;
+ u8 warn = 0;
+ int clash = 0;
if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
return 0;
if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
return 0;
- ioport = res->flags & IORESOURCE_IO;
-
- spin_lock(&acpi_res_lock);
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
- if (ioport && (res_list_elem->resource_type
- != ACPI_ADR_SPACE_SYSTEM_IO))
- continue;
- if (!ioport && (res_list_elem->resource_type
- != ACPI_ADR_SPACE_SYSTEM_MEMORY))
- continue;
+ if (res->flags & IORESOURCE_IO)
+ space_id = ACPI_ADR_SPACE_SYSTEM_IO;
+ else
+ space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
- if (res->end < res_list_elem->start
- || res_list_elem->end < res->start)
- continue;
- clash = 1;
- break;
- }
- spin_unlock(&acpi_res_lock);
+ length = res->end - res->start + 1;
+ if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
+ warn = 1;
+ clash = acpi_check_address_range(space_id, res->start, length, warn);
if (clash) {
if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
- printk(KERN_WARNING "ACPI: resource %s %pR"
- " conflicts with ACPI region %s "
- "[%s 0x%zx-0x%zx]\n",
- res->name, res, res_list_elem->name,
- (res_list_elem->resource_type ==
- ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
- (size_t) res_list_elem->start,
- (size_t) res_list_elem->end);
if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
printk(KERN_NOTICE "ACPI: This conflict may"
" cause random problems and system"
@@ -1467,155 +1598,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
kmem_cache_free(cache, object);
return (AE_OK);
}
-
-static inline int acpi_res_list_add(struct acpi_res_list *res)
-{
- struct acpi_res_list *res_list_elem;
-
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
-
- if (res->resource_type == res_list_elem->resource_type &&
- res->start == res_list_elem->start &&
- res->end == res_list_elem->end) {
-
- /*
- * The Region(addr,len) already exist in the list,
- * just increase the count
- */
-
- res_list_elem->count++;
- return 0;
- }
- }
-
- res->count = 1;
- list_add(&res->resource_list, &resource_list_head);
- return 1;
-}
-
-static inline void acpi_res_list_del(struct acpi_res_list *res)
-{
- struct acpi_res_list *res_list_elem;
-
- list_for_each_entry(res_list_elem, &resource_list_head,
- resource_list) {
-
- if (res->resource_type == res_list_elem->resource_type &&
- res->start == res_list_elem->start &&
- res->end == res_list_elem->end) {
-
- /*
- * If the res count is decreased to 0,
- * remove and free it
- */
-
- if (--res_list_elem->count == 0) {
- list_del(&res_list_elem->resource_list);
- kfree(res_list_elem);
- }
- return;
- }
- }
-}
-
-acpi_status
-acpi_os_invalidate_address(
- u8 space_id,
- acpi_physical_address address,
- acpi_size length)
-{
- struct acpi_res_list res;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_IO:
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SystemMemory
- are needed */
- res.start = address;
- res.end = address + length - 1;
- res.resource_type = space_id;
- spin_lock(&acpi_res_lock);
- acpi_res_list_del(&res);
- spin_unlock(&acpi_res_lock);
- break;
- case ACPI_ADR_SPACE_PCI_CONFIG:
- case ACPI_ADR_SPACE_EC:
- case ACPI_ADR_SPACE_SMBUS:
- case ACPI_ADR_SPACE_CMOS:
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- case ACPI_ADR_SPACE_DATA_TABLE:
- case ACPI_ADR_SPACE_FIXED_HARDWARE:
- break;
- }
- return AE_OK;
-}
-
-/******************************************************************************
- *
- * FUNCTION: acpi_os_validate_address
- *
- * PARAMETERS: space_id - ACPI space ID
- * address - Physical address
- * length - Address length
- *
- * RETURN: AE_OK if address/length is valid for the space_id. Otherwise,
- * should return AE_AML_ILLEGAL_ADDRESS.
- *
- * DESCRIPTION: Validate a system address via the host OS. Used to validate
- * the addresses accessed by AML operation regions.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_address (
- u8 space_id,
- acpi_physical_address address,
- acpi_size length,
- char *name)
-{
- struct acpi_res_list *res;
- int added;
- if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
- return AE_OK;
-
- switch (space_id) {
- case ACPI_ADR_SPACE_SYSTEM_IO:
- case ACPI_ADR_SPACE_SYSTEM_MEMORY:
- /* Only interference checks against SystemIO and SystemMemory
- are needed */
- res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
- if (!res)
- return AE_OK;
- /* ACPI names are fixed to 4 bytes, still better use strlcpy */
- strlcpy(res->name, name, 5);
- res->start = address;
- res->end = address + length - 1;
- res->resource_type = space_id;
- spin_lock(&acpi_res_lock);
- added = acpi_res_list_add(res);
- spin_unlock(&acpi_res_lock);
- pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
- "name: %s\n", added ? "Added" : "Already exist",
- (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
- ? "SystemIO" : "System Memory",
- (unsigned long long)res->start,
- (unsigned long long)res->end,
- res->name);
- if (!added)
- kfree(res);
- break;
- case ACPI_ADR_SPACE_PCI_CONFIG:
- case ACPI_ADR_SPACE_EC:
- case ACPI_ADR_SPACE_SMBUS:
- case ACPI_ADR_SPACE_CMOS:
- case ACPI_ADR_SPACE_PCI_BAR_TARGET:
- case ACPI_ADR_SPACE_DATA_TABLE:
- case ACPI_ADR_SPACE_FIXED_HARDWARE:
- break;
- }
- return AE_OK;
-}
#endif
acpi_status __init acpi_os_initialize(void)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 7f9eba9..0eefa12 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -487,10 +487,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
else
link_desc[0] = '\0';
- dev_info(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
- pin_name(pin), link_desc, gsi,
- (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
- (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
+ dev_dbg(&dev->dev, "PCI INT %c%s -> GSI %u (%s, %s) -> IRQ %d\n",
+ pin_name(pin), link_desc, gsi,
+ (triggering == ACPI_LEVEL_SENSITIVE) ? "level" : "edge",
+ (polarity == ACPI_ACTIVE_LOW) ? "low" : "high", dev->irq);
return 0;
}
@@ -524,6 +524,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
* (e.g. PCI_UNDEFINED_IRQ).
*/
- dev_info(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
+ dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
acpi_unregister_gsi(gsi);
}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 2672c79..7aff631 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -596,6 +596,13 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
if (ACPI_SUCCESS(status)) {
dev_info(root->bus->bridge,
"ACPI _OSC control (0x%02x) granted\n", flags);
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
+ /*
+ * We have ASPM control, but the FADT indicates
+ * that it's unsupported. Clear it.
+ */
+ pcie_clear_aspm(root->bus);
+ }
} else {
dev_info(root->bus->bridge,
"ACPI _OSC request failed (%s), "
diff --git a/drivers/acpi/pci_slot.c b/drivers/acpi/pci_slot.c
index 07f7fea..e50e31a 100644
--- a/drivers/acpi/pci_slot.c
+++ b/drivers/acpi/pci_slot.c
@@ -34,7 +34,7 @@
#include <acpi/acpi_drivers.h>
#include <linux/dmi.h>
-static int debug;
+static bool debug;
static int check_sta_before_sun;
#define DRIVER_VERSION "0.1"
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 3a0428e..c850de4 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
apic_id = map_mat_entry(handle, type, acpi_id);
if (apic_id == -1)
apic_id = map_madt_entry(type, acpi_id);
- if (apic_id == -1)
- return apic_id;
+ if (apic_id == -1) {
+ /*
+ * On UP processor, there is no _MAT or MADT table.
+ * So above apic_id is always set to -1.
+ *
+ * BIOS may define multiple CPU handles even for UP processor.
+ * For example,
+ *
+ * Scope (_PR)
+ * {
+ * Processor (CPU0, 0x00, 0x00000410, 0x06) {}
+ * Processor (CPU1, 0x01, 0x00000410, 0x06) {}
+ * Processor (CPU2, 0x02, 0x00000410, 0x06) {}
+ * Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+ * }
+ *
+ * Ignores apic_id and always return 0 for CPU0's handle.
+ * Return -1 for other CPU's handle.
+ */
+ if (acpi_id == 0)
+ return acpi_id;
+ else
+ return apic_id;
+ }
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 9d7bc9f..8ae05ce 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -82,9 +82,9 @@ MODULE_LICENSE("GPL");
static int acpi_processor_add(struct acpi_device *device);
static int acpi_processor_remove(struct acpi_device *device, int type);
static void acpi_processor_notify(struct acpi_device *device, u32 event);
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
static int acpi_processor_handle_eject(struct acpi_processor *pr);
-
+static int acpi_processor_start(struct acpi_processor *pr);
static const struct acpi_device_id processor_device_ids[] = {
{ACPI_PROCESSOR_OBJECT_HID, 0},
@@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
* they are physically not present.
*/
if (pr->id == -1) {
- if (ACPI_FAILURE
- (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
+ if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
return -ENODEV;
- }
}
/*
* On some boxes several processors use the same processor bus id.
@@ -425,10 +423,29 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
struct acpi_processor *pr = per_cpu(processors, cpu);
if (action == CPU_ONLINE && pr) {
- acpi_processor_ppc_has_changed(pr, 0);
- acpi_processor_hotplug(pr);
- acpi_processor_reevaluate_tstate(pr, action);
- acpi_processor_tstate_has_changed(pr);
+ /* CPU got physically hotplugged and onlined the first time:
+ * Initialize missing things
+ */
+ if (pr->flags.need_hotplug_init) {
+ struct cpuidle_driver *idle_driver =
+ cpuidle_get_driver();
+
+ printk(KERN_INFO "Will online and init hotplugged "
+ "CPU: %d\n", pr->id);
+ WARN(acpi_processor_start(pr), "Failed to start CPU:"
+ " %d\n", pr->id);
+ pr->flags.need_hotplug_init = 0;
+ if (idle_driver && !strcmp(idle_driver->name,
+ "intel_idle")) {
+ intel_idle_cpu_init(pr->id);
+ }
+ /* Normal CPU soft online event */
+ } else {
+ acpi_processor_ppc_has_changed(pr, 0);
+ acpi_processor_cst_has_changed(pr);
+ acpi_processor_reevaluate_tstate(pr, action);
+ acpi_processor_tstate_has_changed(pr);
+ }
}
if (action == CPU_DEAD && pr) {
/* invalidate the flag.throttling after one CPU is offline */
@@ -442,11 +459,76 @@ static struct notifier_block acpi_cpu_notifier =
.notifier_call = acpi_cpu_soft_notify,
};
+/*
+ * acpi_processor_start() is called by the cpu_hotplug_notifier func:
+ * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the
+ * root cause seem to be that acpi_processor_uninstall_hotplug_notify()
+ * is in the module_exit (__exit) func. Allowing acpi_processor_start()
+ * to not be in __cpuinit section, but being called from __cpuinit funcs
+ * via __ref looks like the right thing to do here.
+ */
+static __ref int acpi_processor_start(struct acpi_processor *pr)
+{
+ struct acpi_device *device = per_cpu(processor_device_array, pr->id);
+ int result = 0;
+
+#ifdef CONFIG_CPU_FREQ
+ acpi_processor_ppc_has_changed(pr, 0);
+#endif
+ acpi_processor_get_throttling_info(pr);
+ acpi_processor_get_limit_info(pr);
+
+ if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
+ acpi_processor_power_init(pr, device);
+
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (IS_ERR(pr->cdev)) {
+ result = PTR_ERR(pr->cdev);
+ goto err_power_exit;
+ }
+
+ dev_dbg(&device->dev, "registered as cooling_device%d\n",
+ pr->cdev->id);
+
+ result = sysfs_create_link(&device->dev.kobj,
+ &pr->cdev->device.kobj,
+ "thermal_cooling");
+ if (result) {
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ goto err_thermal_unregister;
+ }
+ result = sysfs_create_link(&pr->cdev->device.kobj,
+ &device->dev.kobj,
+ "device");
+ if (result) {
+ printk(KERN_ERR PREFIX "Create sysfs link\n");
+ goto err_remove_sysfs_thermal;
+ }
+
+ return 0;
+
+err_remove_sysfs_thermal:
+ sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+err_thermal_unregister:
+ thermal_cooling_device_unregister(pr->cdev);
+err_power_exit:
+ acpi_processor_power_exit(pr, device);
+
+ return result;
+}
+
+/*
+ * Do not put anything in here which needs the core to be online.
+ * For example MSR access or setting up things which check for cpuinfo_x86
+ * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
+ * Such things have to be put in and set up above in acpi_processor_start()
+ */
static int __cpuinit acpi_processor_add(struct acpi_device *device)
{
struct acpi_processor *pr = NULL;
int result = 0;
- struct sys_device *sysdev;
+ struct device *dev;
pr = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL);
if (!pr)
@@ -491,54 +573,27 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
per_cpu(processors, pr->id) = pr;
- sysdev = get_cpu_sysdev(pr->id);
- if (sysfs_create_link(&device->dev.kobj, &sysdev->kobj, "sysdev")) {
+ dev = get_cpu_device(pr->id);
+ if (sysfs_create_link(&device->dev.kobj, &dev->kobj, "sysdev")) {
result = -EFAULT;
goto err_free_cpumask;
}
-#ifdef CONFIG_CPU_FREQ
- acpi_processor_ppc_has_changed(pr, 0);
-#endif
- acpi_processor_get_throttling_info(pr);
- acpi_processor_get_limit_info(pr);
-
- if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
- acpi_processor_power_init(pr, device);
-
- pr->cdev = thermal_cooling_device_register("Processor", device,
- &processor_cooling_ops);
- if (IS_ERR(pr->cdev)) {
- result = PTR_ERR(pr->cdev);
- goto err_power_exit;
- }
-
- dev_dbg(&device->dev, "registered as cooling_device%d\n",
- pr->cdev->id);
+ /*
+ * Do not start hotplugged CPUs now, but when they
+ * are onlined the first time
+ */
+ if (pr->flags.need_hotplug_init)
+ return 0;
- result = sysfs_create_link(&device->dev.kobj,
- &pr->cdev->device.kobj,
- "thermal_cooling");
- if (result) {
- printk(KERN_ERR PREFIX "Create sysfs link\n");
- goto err_thermal_unregister;
- }
- result = sysfs_create_link(&pr->cdev->device.kobj,
- &device->dev.kobj,
- "device");
- if (result) {
- printk(KERN_ERR PREFIX "Create sysfs link\n");
+ result = acpi_processor_start(pr);
+ if (result)
goto err_remove_sysfs;
- }
return 0;
err_remove_sysfs:
- sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
-err_thermal_unregister:
- thermal_cooling_device_unregister(pr->cdev);
-err_power_exit:
- acpi_processor_power_exit(pr, device);
+ sysfs_remove_link(&device->dev.kobj, "sysdev");
err_free_cpumask:
free_cpumask_var(pr->throttling.shared_cpu_map);
@@ -720,21 +775,33 @@ processor_walk_namespace_cb(acpi_handle handle,
return (AE_OK);
}
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
{
+ acpi_handle handle = pr->handle;
if (!is_processor_present(handle)) {
return AE_ERROR;
}
- if (acpi_map_lsapic(handle, p_cpu))
+ if (acpi_map_lsapic(handle, &pr->id))
return AE_ERROR;
- if (arch_register_cpu(*p_cpu)) {
- acpi_unmap_lsapic(*p_cpu);
+ if (arch_register_cpu(pr->id)) {
+ acpi_unmap_lsapic(pr->id);
return AE_ERROR;
}
+ /* CPU got hot-plugged, but cpu_data is not initialized yet
+ * Set flag to delay cpu_idle/throttling initialization
+ * in:
+ * acpi_processor_add()
+ * acpi_processor_get_info()
+ * and do it when the CPU gets online the first time
+ * TBD: Cleanup above functions and try to do this more elegant.
+ */
+ printk(KERN_INFO "CPU %d got hotplugged\n", pr->id);
+ pr->flags.need_hotplug_init = 1;
+
return AE_OK;
}
@@ -748,7 +815,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
return (0);
}
#else
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
{
return AE_ERROR;
}
@@ -827,8 +894,6 @@ static void __exit acpi_processor_exit(void)
acpi_bus_unregister_driver(&acpi_processor_driver);
- cpuidle_unregister_driver(&acpi_idle_driver);
-
return;
}
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c
index 870550d..3b599ab 100644
--- a/drivers/acpi/processor_thermal.c
+++ b/drivers/acpi/processor_thermal.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
-#include <linux/sysdev.h>
#include <asm/uaccess.h>
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 6d9a3ab..ca191ff 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -438,6 +438,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
+ .ident = "Sony Vaio VPCCW29FX",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
.ident = "Averatec AV1020-ED2",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
@@ -476,6 +484,22 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW520F"),
},
},
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54C",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54C"),
+ },
+ },
+ {
+ .callback = init_nvs_nosave,
+ .ident = "Asus K54HR",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "K54HR"),
+ },
+ },
{},
};
#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 08a44b5..eaef02a 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -69,21 +69,21 @@ MODULE_AUTHOR("Bruno Ducrot");
MODULE_DESCRIPTION("ACPI Video Driver");
MODULE_LICENSE("GPL");
-static int brightness_switch_enabled = 1;
+static bool brightness_switch_enabled = 1;
module_param(brightness_switch_enabled, bool, 0644);
/*
* By default, we don't allow duplicate ACPI video bus devices
* under the same VGA controller
*/
-static int allow_duplicates;
+static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
* Some BIOSes claim they use minimum backlight at boot,
* and this may bring dimming screen after boot
*/
-static int use_bios_initial_backlight = 1;
+static bool use_bios_initial_backlight = 1;
module_param(use_bios_initial_backlight, bool, 0644);
static int register_count = 0;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index bd230e8..54eaf96 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -52,6 +52,10 @@ static int amba_uevent(struct device *dev, struct kobj_uevent_env *env)
int retval = 0;
retval = add_uevent_var(env, "AMBA_ID=%08x", pcdev->periphid);
+ if (retval)
+ return retval;
+
+ retval = add_uevent_var(env, "MODALIAS=amba:d%08X", pcdev->periphid);
return retval;
}
#else
@@ -109,31 +113,7 @@ static int amba_legacy_resume(struct device *dev)
return ret;
}
-static int amba_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-static void amba_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define amba_pm_prepare NULL
-#define amba_pm_complete NULL
-
-#endif /* !CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -155,22 +135,6 @@ static int amba_pm_suspend(struct device *dev)
return ret;
}
-static int amba_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -189,28 +153,10 @@ static int amba_pm_resume(struct device *dev)
return ret;
}
-static int amba_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_SUSPEND */
#define amba_pm_suspend NULL
#define amba_pm_resume NULL
-#define amba_pm_suspend_noirq NULL
-#define amba_pm_resume_noirq NULL
#endif /* !CONFIG_SUSPEND */
@@ -234,22 +180,6 @@ static int amba_pm_freeze(struct device *dev)
return ret;
}
-static int amba_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -268,22 +198,6 @@ static int amba_pm_thaw(struct device *dev)
return ret;
}
-static int amba_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -302,22 +216,6 @@ static int amba_pm_poweroff(struct device *dev)
return ret;
}
-static int amba_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
static int amba_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -336,32 +234,12 @@ static int amba_pm_restore(struct device *dev)
return ret;
}
-static int amba_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#else /* !CONFIG_HIBERNATE_CALLBACKS */
#define amba_pm_freeze NULL
#define amba_pm_thaw NULL
#define amba_pm_poweroff NULL
#define amba_pm_restore NULL
-#define amba_pm_freeze_noirq NULL
-#define amba_pm_thaw_noirq NULL
-#define amba_pm_poweroff_noirq NULL
-#define amba_pm_restore_noirq NULL
#endif /* !CONFIG_HIBERNATE_CALLBACKS */
@@ -402,20 +280,12 @@ static int amba_pm_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static const struct dev_pm_ops amba_pm = {
- .prepare = amba_pm_prepare,
- .complete = amba_pm_complete,
.suspend = amba_pm_suspend,
.resume = amba_pm_resume,
.freeze = amba_pm_freeze,
.thaw = amba_pm_thaw,
.poweroff = amba_pm_poweroff,
.restore = amba_pm_restore,
- .suspend_noirq = amba_pm_suspend_noirq,
- .resume_noirq = amba_pm_resume_noirq,
- .freeze_noirq = amba_pm_freeze_noirq,
- .thaw_noirq = amba_pm_thaw_noirq,
- .poweroff_noirq = amba_pm_poweroff_noirq,
- .restore_noirq = amba_pm_restore_noirq,
SET_RUNTIME_PM_OPS(
amba_pm_runtime_suspend,
amba_pm_runtime_resume,
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index cf047c4..6bdedd7 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -820,7 +820,7 @@ config PATA_PLATFORM
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
- depends on PATA_PLATFORM && OF && OF_IRQ
+ depends on PATA_PLATFORM && OF
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index cf26222..d07bf03 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -52,7 +52,8 @@
#define DRV_VERSION "3.0"
enum {
- AHCI_PCI_BAR = 5,
+ AHCI_PCI_BAR_STA2X11 = 0,
+ AHCI_PCI_BAR_STANDARD = 5,
};
enum board_ids {
@@ -375,6 +376,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 968 */
{ PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */
+ /* ST Microelectronics */
+ { PCI_VDEVICE(STMICRO, 0xCC06), board_ahci }, /* ST ConneXt */
+
/* Marvell */
{ PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */
{ PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */
@@ -622,6 +626,13 @@ static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
{
int rc;
+ /*
+ * If the device fixup already set the dma_mask to some non-standard
+ * value, don't extend it here. This happens on STA2X11, for example.
+ */
+ if (pdev->dma_mask && pdev->dma_mask < DMA_BIT_MASK(32))
+ return 0;
+
if (using_dac &&
!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1026,6 +1037,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct ahci_host_priv *hpriv;
struct ata_host *host;
int n_ports, i, rc;
+ int ahci_pci_bar = AHCI_PCI_BAR_STANDARD;
VPRINTK("ENTER\n");
@@ -1057,6 +1069,10 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev,
"PDC42819 can only drive SATA devices with this driver\n");
+ /* The Connext uses non-standard BAR */
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO && pdev->device == 0xCC06)
+ ahci_pci_bar = AHCI_PCI_BAR_STA2X11;
+
/* acquire resources */
rc = pcim_enable_device(pdev);
if (rc)
@@ -1065,7 +1081,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* AHCI controllers often implement SFF compatible interface.
* Grab all PCI BARs just in case.
*/
- rc = pcim_iomap_regions_request_all(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
if (rc == -EBUSY)
pcim_pin_device(pdev);
if (rc)
@@ -1108,7 +1124,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev))
pci_intx(pdev, 1);
- hpriv->mmio = pcim_iomap_table(pdev)[AHCI_PCI_BAR];
+ hpriv->mmio = pcim_iomap_table(pdev)[ahci_pci_bar];
/* save initial config */
ahci_pci_save_initial_config(pdev, hpriv);
@@ -1172,8 +1188,8 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < host->n_ports; i++) {
struct ata_port *ap = host->ports[i];
- ata_port_pbar_desc(ap, AHCI_PCI_BAR, -1, "abar");
- ata_port_pbar_desc(ap, AHCI_PCI_BAR,
+ ata_port_pbar_desc(ap, ahci_pci_bar, -1, "abar");
+ ata_port_pbar_desc(ap, ahci_pci_bar,
0x100 + ap->port_no * 0x80, "port");
/* set enclosure management message type */
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index 43b8758..48be4e1 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -202,6 +202,71 @@ static int __devexit ahci_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int ahci_suspend(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ struct ahci_host_priv *hpriv = host->private_data;
+ void __iomem *mmio = hpriv->mmio;
+ u32 ctl;
+ int rc;
+
+ if (hpriv->flags & AHCI_HFLAG_NO_SUSPEND) {
+ dev_err(dev, "firmware update required for suspend/resume\n");
+ return -EIO;
+ }
+
+ /*
+ * AHCI spec rev1.1 section 8.3.3:
+ * Software must disable interrupts prior to requesting a
+ * transition of the HBA to D3 state.
+ */
+ ctl = readl(mmio + HOST_CTL);
+ ctl &= ~HOST_IRQ_EN;
+ writel(ctl, mmio + HOST_CTL);
+ readl(mmio + HOST_CTL); /* flush */
+
+ rc = ata_host_suspend(host, PMSG_SUSPEND);
+ if (rc)
+ return rc;
+
+ if (pdata && pdata->suspend)
+ return pdata->suspend(dev);
+ return 0;
+}
+
+static int ahci_resume(struct device *dev)
+{
+ struct ahci_platform_data *pdata = dev_get_platdata(dev);
+ struct ata_host *host = dev_get_drvdata(dev);
+ int rc;
+
+ if (pdata && pdata->resume) {
+ rc = pdata->resume(dev);
+ if (rc)
+ return rc;
+ }
+
+ if (dev->power.power_state.event == PM_EVENT_SUSPEND) {
+ rc = ahci_reset_controller(host);
+ if (rc)
+ return rc;
+
+ ahci_init_controller(host);
+ }
+
+ ata_host_resume(host);
+
+ return 0;
+}
+
+static struct dev_pm_ops ahci_pm_ops = {
+ .suspend = &ahci_suspend,
+ .resume = &ahci_resume,
+};
+#endif
+
static const struct of_device_id ahci_of_match[] = {
{ .compatible = "calxeda,hb-ahci", },
{},
@@ -214,6 +279,9 @@ static struct platform_driver ahci_driver = {
.name = "ahci",
.owner = THIS_MODULE,
.of_match_table = ahci_of_match,
+#ifdef CONFIG_PM
+ .pm = &ahci_pm_ops,
+#endif
},
.id_table = ahci_devtype,
};
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
index 69ac373..fdf27b9 100644
--- a/drivers/ata/ata_piix.c
+++ b/drivers/ata/ata_piix.c
@@ -1117,6 +1117,13 @@ static int piix_broken_suspend(void)
},
},
{
+ .ident = "Satellite Pro A120",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
+ },
+ },
+ {
.ident = "Portege M500",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 3c92dbd..a72bfd0 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -746,9 +746,6 @@ static void ahci_start_port(struct ata_port *ap)
/* enable FIS reception */
ahci_start_fis_rx(ap);
- /* enable DMA */
- ahci_start_engine(ap);
-
/* turn on LEDs */
if (ap->flags & ATA_FLAG_EM) {
ata_for_each_link(link, ap, EDGE) {
@@ -2022,7 +2019,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
ahci_power_down(ap);
else {
ata_port_err(ap, "%s (%d)\n", emsg, rc);
- ahci_start_port(ap);
+ ata_port_freeze(ap);
}
return rc;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c04ad68..c06e0ec 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -66,6 +66,7 @@
#include <asm/byteorder.h>
#include <linux/cdrom.h>
#include <linux/ratelimit.h>
+#include <linux/pm_runtime.h>
#include "libata.h"
#include "libata-transport.h"
@@ -3248,10 +3249,10 @@ int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
ata_force_xfermask(dev);
pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
- dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
if (libata_dma_mask & mode_mask)
- dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
+ dma_mask = ata_pack_xfermask(0, dev->mwdma_mask,
+ dev->udma_mask);
else
dma_mask = 0;
@@ -4124,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
* device and controller are SATA.
*/
{ "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER },
+ { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
@@ -5234,73 +5237,55 @@ bool ata_link_offline(struct ata_link *link)
}
#ifdef CONFIG_PM
-static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
+static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg,
unsigned int action, unsigned int ehi_flags,
int wait)
{
+ struct ata_link *link;
unsigned long flags;
- int i, rc;
-
- for (i = 0; i < host->n_ports; i++) {
- struct ata_port *ap = host->ports[i];
- struct ata_link *link;
+ int rc;
- /* Previous resume operation might still be in
- * progress. Wait for PM_PENDING to clear.
- */
- if (ap->pflags & ATA_PFLAG_PM_PENDING) {
- ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- }
+ /* Previous resume operation might still be in
+ * progress. Wait for PM_PENDING to clear.
+ */
+ if (ap->pflags & ATA_PFLAG_PM_PENDING) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
+ }
- /* request PM ops to EH */
- spin_lock_irqsave(ap->lock, flags);
+ /* request PM ops to EH */
+ spin_lock_irqsave(ap->lock, flags);
- ap->pm_mesg = mesg;
- if (wait) {
- rc = 0;
- ap->pm_result = &rc;
- }
+ ap->pm_mesg = mesg;
+ if (wait) {
+ rc = 0;
+ ap->pm_result = &rc;
+ }
- ap->pflags |= ATA_PFLAG_PM_PENDING;
- ata_for_each_link(link, ap, HOST_FIRST) {
- link->eh_info.action |= action;
- link->eh_info.flags |= ehi_flags;
- }
+ ap->pflags |= ATA_PFLAG_PM_PENDING;
+ ata_for_each_link(link, ap, HOST_FIRST) {
+ link->eh_info.action |= action;
+ link->eh_info.flags |= ehi_flags;
+ }
- ata_port_schedule_eh(ap);
+ ata_port_schedule_eh(ap);
- spin_unlock_irqrestore(ap->lock, flags);
+ spin_unlock_irqrestore(ap->lock, flags);
- /* wait and check result */
- if (wait) {
- ata_port_wait_eh(ap);
- WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
- if (rc)
- return rc;
- }
+ /* wait and check result */
+ if (wait) {
+ ata_port_wait_eh(ap);
+ WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
}
- return 0;
+ return rc;
}
-/**
- * ata_host_suspend - suspend host
- * @host: host to suspend
- * @mesg: PM message
- *
- * Suspend @host. Actual operation is performed by EH. This
- * function requests EH to perform PM operations and waits for EH
- * to finish.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
- *
- * RETURNS:
- * 0 on success, -errno on failure.
- */
-int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+#define to_ata_port(d) container_of(d, struct ata_port, tdev)
+
+static int ata_port_suspend_common(struct device *dev, pm_message_t mesg)
{
+ struct ata_port *ap = to_ata_port(dev);
unsigned int ehi_flags = ATA_EHI_QUIET;
int rc;
@@ -5315,31 +5300,108 @@ int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
if (mesg.event == PM_EVENT_SUSPEND)
ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY;
- rc = ata_host_request_pm(host, mesg, 0, ehi_flags, 1);
- if (rc == 0)
- host->dev->power.power_state = mesg;
+ rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1);
return rc;
}
+static int ata_port_suspend(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ata_port_suspend_common(dev, PMSG_SUSPEND);
+}
+
+static int ata_port_do_freeze(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ pm_runtime_resume(dev);
+
+ return ata_port_suspend_common(dev, PMSG_FREEZE);
+}
+
+static int ata_port_poweroff(struct device *dev)
+{
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ return ata_port_suspend_common(dev, PMSG_HIBERNATE);
+}
+
+static int ata_port_resume_common(struct device *dev)
+{
+ struct ata_port *ap = to_ata_port(dev);
+ int rc;
+
+ rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET,
+ ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1);
+ return rc;
+}
+
+static int ata_port_resume(struct device *dev)
+{
+ int rc;
+
+ rc = ata_port_resume_common(dev);
+ if (!rc) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return rc;
+}
+
+static int ata_port_runtime_idle(struct device *dev)
+{
+ return pm_runtime_suspend(dev);
+}
+
+static const struct dev_pm_ops ata_port_pm_ops = {
+ .suspend = ata_port_suspend,
+ .resume = ata_port_resume,
+ .freeze = ata_port_do_freeze,
+ .thaw = ata_port_resume,
+ .poweroff = ata_port_poweroff,
+ .restore = ata_port_resume,
+
+ .runtime_suspend = ata_port_suspend,
+ .runtime_resume = ata_port_resume_common,
+ .runtime_idle = ata_port_runtime_idle,
+};
+
+/**
+ * ata_host_suspend - suspend host
+ * @host: host to suspend
+ * @mesg: PM message
+ *
+ * Suspend @host. Actual operation is performed by port suspend.
+ */
+int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
+{
+ host->dev->power.power_state = mesg;
+ return 0;
+}
+
/**
* ata_host_resume - resume host
* @host: host to resume
*
- * Resume @host. Actual operation is performed by EH. This
- * function requests EH to perform PM operations and returns.
- * Note that all resume operations are performed parallelly.
- *
- * LOCKING:
- * Kernel thread context (may sleep).
+ * Resume @host. Actual operation is performed by port resume.
*/
void ata_host_resume(struct ata_host *host)
{
- ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
- ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
host->dev->power.power_state = PMSG_ON;
}
#endif
+struct device_type ata_port_type = {
+ .name = "ata_port",
+#ifdef CONFIG_PM
+ .pm = &ata_port_pm_ops,
+#endif
+};
+
/**
* ata_dev_init - Initialize an ata_device structure
* @dev: Device structure to initialize
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 2a5412e..508a60b 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3381,6 +3381,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
if (!shost)
goto err_alloc;
+ shost->eh_noresume = 1;
*(struct ata_port **)&shost->hostdata[0] = ap;
ap->scsi_host = shost;
@@ -3398,7 +3399,7 @@ int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht)
*/
shost->max_host_blocked = 1;
- rc = scsi_add_host(ap->scsi_host, ap->host->dev);
+ rc = scsi_add_host(ap->scsi_host, &ap->tdev);
if (rc)
goto err_add;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 4cadfa2..9691dd0 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -929,11 +929,11 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc)
bytes = (bc_hi << 8) | bc_lo;
/* shall be cleared to zero, indicating xfer of data */
- if (unlikely(ireason & (1 << 0)))
+ if (unlikely(ireason & ATAPI_COD))
goto atapi_check;
/* make sure transfer direction matches expected */
- i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
+ i_write = ((ireason & ATAPI_IO) == 0) ? 1 : 0;
if (unlikely(do_write != i_write))
goto atapi_check;
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index ce9dc62..74aaee3 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -32,6 +32,7 @@
#include <linux/libata.h>
#include <linux/hdreg.h>
#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
#include "libata.h"
#include "libata-transport.h"
@@ -279,6 +280,7 @@ int ata_tport_add(struct device *parent,
struct device *dev = &ap->tdev;
device_initialize(dev);
+ dev->type = &ata_port_type;
dev->parent = get_device(parent);
dev->release = ata_tport_release;
@@ -289,6 +291,10 @@ int ata_tport_add(struct device *parent,
goto tport_err;
}
+ device_enable_async_suspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
transport_add_device(dev);
transport_configure_device(dev);
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index 773de97..814486d 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -58,6 +58,7 @@ extern int atapi_passthru16;
extern int libata_fua;
extern int libata_noacpi;
extern int libata_allow_tpm;
+extern struct device_type ata_port_type;
extern struct ata_link *ata_dev_phys_link(struct ata_device *dev);
extern void ata_force_cbl(struct ata_port *ap);
extern u64 ata_tf_to_lba(const struct ata_taskfile *tf);
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index e8574bb..048589f 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -963,17 +963,7 @@ static struct platform_driver arasan_cf_driver = {
},
};
-static int __init arasan_cf_init(void)
-{
- return platform_driver_register(&arasan_cf_driver);
-}
-module_init(arasan_cf_init);
-
-static void __exit arasan_cf_exit(void)
-{
- platform_driver_unregister(&arasan_cf_driver);
-}
-module_exit(arasan_cf_exit);
+module_platform_driver(arasan_cf_driver);
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
index a76f24a..53d3770 100644
--- a/drivers/ata/pata_at91.c
+++ b/drivers/ata/pata_at91.c
@@ -207,11 +207,11 @@ static void set_smc_timing(struct device *dev, struct ata_device *adev,
{
int ret = 0;
int use_iordy;
+ struct sam9_smc_config smc;
unsigned int t6z; /* data tristate time in ns */
unsigned int cycle; /* SMC Cycle width in MCK ticks */
unsigned int setup; /* SMC Setup width in MCK ticks */
unsigned int pulse; /* CFIOR and CFIOW pulse width in MCK ticks */
- unsigned int cs_setup = 0;/* CS4 or CS5 setup width in MCK ticks */
unsigned int cs_pulse; /* CS4 or CS5 pulse width in MCK ticks*/
unsigned int tdf_cycles; /* SMC TDF MCK ticks */
unsigned long mck_hz; /* MCK frequency in Hz */
@@ -244,26 +244,20 @@ static void set_smc_timing(struct device *dev, struct ata_device *adev,
}
dev_dbg(dev, "Use IORDY=%u, TDF Cycles=%u\n", use_iordy, tdf_cycles);
- info->mode |= AT91_SMC_TDF_(tdf_cycles);
-
- /* write SMC Setup Register */
- at91_sys_write(AT91_SMC_SETUP(info->cs),
- AT91_SMC_NWESETUP_(setup) |
- AT91_SMC_NRDSETUP_(setup) |
- AT91_SMC_NCS_WRSETUP_(cs_setup) |
- AT91_SMC_NCS_RDSETUP_(cs_setup));
- /* write SMC Pulse Register */
- at91_sys_write(AT91_SMC_PULSE(info->cs),
- AT91_SMC_NWEPULSE_(pulse) |
- AT91_SMC_NRDPULSE_(pulse) |
- AT91_SMC_NCS_WRPULSE_(cs_pulse) |
- AT91_SMC_NCS_RDPULSE_(cs_pulse));
- /* write SMC Cycle Register */
- at91_sys_write(AT91_SMC_CYCLE(info->cs),
- AT91_SMC_NWECYCLE_(cycle) |
- AT91_SMC_NRDCYCLE_(cycle));
- /* write SMC Mode Register*/
- at91_sys_write(AT91_SMC_MODE(info->cs), info->mode);
+
+ /* SMC Setup Register */
+ smc.nwe_setup = smc.nrd_setup = setup;
+ smc.ncs_write_setup = smc.ncs_read_setup = 0;
+ /* SMC Pulse Register */
+ smc.nwe_pulse = smc.nrd_pulse = pulse;
+ smc.ncs_write_pulse = smc.ncs_read_pulse = cs_pulse;
+ /* SMC Cycle Register */
+ smc.write_cycle = smc.read_cycle = cycle;
+ /* SMC Mode Register*/
+ smc.tdf_cycles = tdf_cycles;
+ smc.mode = info->mode;
+
+ sam9_smc_configure(0, info->cs, &smc);
}
static void pata_at91_set_piomode(struct ata_port *ap, struct ata_device *adev)
@@ -288,20 +282,20 @@ static unsigned int pata_at91_data_xfer_noirq(struct ata_device *dev,
struct at91_ide_info *info = dev->link->ap->host->private_data;
unsigned int consumed;
unsigned long flags;
- unsigned int mode;
+ struct sam9_smc_config smc;
local_irq_save(flags);
- mode = at91_sys_read(AT91_SMC_MODE(info->cs));
+ sam9_smc_read_mode(0, info->cs, &smc);
/* set 16bit mode before writing data */
- at91_sys_write(AT91_SMC_MODE(info->cs),
- (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16);
+ smc.mode = (smc.mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_16;
+ sam9_smc_write_mode(0, info->cs, &smc);
consumed = ata_sff_data_xfer(dev, buf, buflen, rw);
/* restore 8bit mode after data is written */
- at91_sys_write(AT91_SMC_MODE(info->cs),
- (mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8);
+ smc.mode = (smc.mode & ~AT91_SMC_DBW) | AT91_SMC_DBW_8;
+ sam9_smc_write_mode(0, info->cs, &smc);
local_irq_restore(flags);
return consumed;
@@ -360,7 +354,7 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
ap->flags |= ATA_FLAG_SLAVE_POSS;
ap->pio_mask = ATA_PIO4;
- if (!irq) {
+ if (!gpio_is_valid(irq)) {
ap->flags |= ATA_FLAG_PIO_POLLING;
ata_port_desc(ap, "no IRQ, using PIO polling");
}
@@ -414,8 +408,8 @@ static int __devinit pata_at91_probe(struct platform_device *pdev)
host->private_data = info;
- ret = ata_host_activate(host, irq ? gpio_to_irq(irq) : 0,
- irq ? ata_sff_interrupt : NULL,
+ return ata_host_activate(host, gpio_is_valid(irq) ? gpio_to_irq(irq) : 0,
+ gpio_is_valid(irq) ? ata_sff_interrupt : NULL,
irq_flags, &pata_at91_sht);
if (!ret)
@@ -454,20 +448,7 @@ static struct platform_driver pata_at91_driver = {
},
};
-static int __init pata_at91_init(void)
-{
- return platform_driver_register(&pata_at91_driver);
-}
-
-static void __exit pata_at91_exit(void)
-{
- platform_driver_unregister(&pata_at91_driver);
-}
-
-
-module_init(pata_at91_init);
-module_exit(pata_at91_exit);
-
+module_platform_driver(pata_at91_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for CF in True IDE mode on AT91SAM9260 SoC");
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
index bd987bb..1e65842 100644
--- a/drivers/ata/pata_bf54x.c
+++ b/drivers/ata/pata_bf54x.c
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20;
static const u32 udma_tackmin = 20;
static const u32 udma_tssmin = 50;
+#define BFIN_MAX_SG_SEGMENTS 4
+
/**
*
* Function: num_clocks_min
@@ -418,14 +420,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
(tcyc_tdvs<<8 | tdvs));
ATAPI_SET_ULTRA_TIM_2(base, (tmli<<8 | tss));
ATAPI_SET_ULTRA_TIM_3(base, (trp<<8 | tzah));
-
- /* Enable host ATAPI Untra DMA interrupts */
- ATAPI_SET_INT_MASK(base,
- ATAPI_GET_INT_MASK(base)
- | UDMAIN_DONE_MASK
- | UDMAOUT_DONE_MASK
- | UDMAIN_TERM_MASK
- | UDMAOUT_TERM_MASK);
}
}
}
@@ -470,10 +464,6 @@ static void bfin_set_dmamode(struct ata_port *ap, struct ata_device *adev)
ATAPI_SET_MULTI_TIM_0(base, (tm<<8 | td));
ATAPI_SET_MULTI_TIM_1(base, (tkr<<8 | tkw));
ATAPI_SET_MULTI_TIM_2(base, (teoc<<8 | th));
-
- /* Enable host ATAPI Multi DMA interrupts */
- ATAPI_SET_INT_MASK(base, ATAPI_GET_INT_MASK(base)
- | MULTI_DONE_MASK | MULTI_TERM_MASK);
SSYNC();
}
}
@@ -841,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
{
- unsigned short config = WDSIZE_16;
+ struct ata_port *ap = qc->ap;
+ struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+ unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
struct scatterlist *sg;
unsigned int si;
+ unsigned int channel;
+ unsigned int dir;
+ unsigned int size = 0;
dev_dbg(qc->ap->dev, "in atapi dma setup\n");
/* Program the ATA_CTRL register with dir */
if (qc->tf.flags & ATA_TFLAG_WRITE) {
- /* fill the ATAPI DMA controller */
- set_dma_config(CH_ATAPI_TX, config);
- set_dma_x_modify(CH_ATAPI_TX, 2);
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
- set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
- }
+ channel = CH_ATAPI_TX;
+ dir = DMA_TO_DEVICE;
} else {
+ channel = CH_ATAPI_RX;
+ dir = DMA_FROM_DEVICE;
config |= WNR;
- /* fill the ATAPI DMA controller */
- set_dma_config(CH_ATAPI_RX, config);
- set_dma_x_modify(CH_ATAPI_RX, 2);
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
- set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
- }
}
-}
-/**
- * bfin_bmdma_start - Start an IDE DMA transaction
- * @qc: Info associated with this ATA transaction.
- *
- * Note: Original code is ata_bmdma_start().
- */
+ dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
-static void bfin_bmdma_start(struct ata_queued_cmd *qc)
-{
- struct ata_port *ap = qc->ap;
- void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- struct scatterlist *sg;
- unsigned int si;
+ /* fill the ATAPI DMA controller */
+ for_each_sg(qc->sg, sg, qc->n_elem, si) {
+ dma_desc_cpu[si].start_addr = sg_dma_address(sg);
+ dma_desc_cpu[si].cfg = config;
+ dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
+ dma_desc_cpu[si].x_modify = 2;
+ size += sg_dma_len(sg);
+ }
- dev_dbg(qc->ap->dev, "in atapi dma start\n");
- if (!(ap->udma_mask || ap->mwdma_mask))
- return;
+ /* Set the last descriptor to stop mode */
+ dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
- /* start ATAPI DMA controller*/
- if (qc->tf.flags & ATA_TFLAG_WRITE) {
- /*
- * On blackfin arch, uncacheable memory is not
- * allocated with flag GFP_DMA. DMA buffer from
- * common kenel code should be flushed if WB
- * data cache is enabled. Otherwise, this loop
- * is an empty loop and optimized out.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- flush_dcache_range(sg_dma_address(sg),
- sg_dma_address(sg) + sg_dma_len(sg));
- }
- enable_dma(CH_ATAPI_TX);
- dev_dbg(qc->ap->dev, "enable udma write\n");
+ flush_dcache_range((unsigned int)dma_desc_cpu,
+ (unsigned int)dma_desc_cpu +
+ qc->n_elem * sizeof(struct dma_desc_array));
+
+ /* Enable ATA DMA operation*/
+ set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
+ set_dma_x_count(channel, 0);
+ set_dma_x_modify(channel, 0);
+ set_dma_config(channel, config);
+
+ SSYNC();
- /* Send ATA DMA write command */
- bfin_exec_command(ap, &qc->tf);
+ /* Send ATA DMA command */
+ bfin_exec_command(ap, &qc->tf);
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
/* set ATA DMA write direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
| XFER_DIR));
} else {
- enable_dma(CH_ATAPI_RX);
- dev_dbg(qc->ap->dev, "enable udma read\n");
-
- /* Send ATA DMA read command */
- bfin_exec_command(ap, &qc->tf);
-
/* set ATA DMA read direction */
ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
& ~XFER_DIR));
@@ -925,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
/* Set ATAPI state machine contorl in terminate sequence */
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
- /* Set transfer length to buffer len */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
- }
+ /* Set transfer length to the total size of sg buffers */
+ ATAPI_SET_XFER_LEN(base, size >> 1);
+}
- /* Enable ATA DMA operation*/
+/**
+ * bfin_bmdma_start - Start an IDE DMA transaction
+ * @qc: Info associated with this ATA transaction.
+ *
+ * Note: Original code is ata_bmdma_start().
+ */
+
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+ dev_dbg(qc->ap->dev, "in atapi dma start\n");
+
+ if (!(ap->udma_mask || ap->mwdma_mask))
+ return;
+
+ /* start ATAPI transfer*/
if (ap->udma_mask)
ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
| ULTRA_START);
@@ -947,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
- struct scatterlist *sg;
- unsigned int si;
+ unsigned int dir;
dev_dbg(qc->ap->dev, "in atapi dma stop\n");
+
if (!(ap->udma_mask || ap->mwdma_mask))
return;
/* stop ATAPI DMA controller*/
- if (qc->tf.flags & ATA_TFLAG_WRITE)
+ if (qc->tf.flags & ATA_TFLAG_WRITE) {
+ dir = DMA_TO_DEVICE;
disable_dma(CH_ATAPI_TX);
- else {
+ } else {
+ dir = DMA_FROM_DEVICE;
disable_dma(CH_ATAPI_RX);
- if (ap->hsm_task_state & HSM_ST_LAST) {
- /*
- * On blackfin arch, uncacheable memory is not
- * allocated with flag GFP_DMA. DMA buffer from
- * common kenel code should be invalidated if
- * data cache is enabled. Otherwise, this loop
- * is an empty loop and optimized out.
- */
- for_each_sg(qc->sg, sg, qc->n_elem, si) {
- invalidate_dcache_range(
- sg_dma_address(sg),
- sg_dma_address(sg)
- + sg_dma_len(sg));
- }
- }
}
+
+ dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
}
/**
@@ -1153,15 +1130,11 @@ static unsigned char bfin_bmdma_status(struct ata_port *ap)
{
unsigned char host_stat = 0;
void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
- unsigned short int_status = ATAPI_GET_INT_STATUS(base);
- if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON|ULTRA_XFER_ON))
+ if (ATAPI_GET_STATUS(base) & (MULTI_XFER_ON | ULTRA_XFER_ON))
host_stat |= ATA_DMA_ACTIVE;
- if (int_status & (MULTI_DONE_INT|UDMAIN_DONE_INT|UDMAOUT_DONE_INT|
- ATAPI_DEV_INT))
+ if (ATAPI_GET_INT_STATUS(base) & ATAPI_DEV_INT)
host_stat |= ATA_DMA_INTR;
- if (int_status & (MULTI_TERM_INT|UDMAIN_TERM_INT|UDMAOUT_TERM_INT))
- host_stat |= ATA_DMA_ERR|ATA_DMA_INTR;
dev_dbg(ap->dev, "ATAPI: host_stat=0x%x\n", host_stat);
@@ -1276,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap)
{
dev_dbg(ap->dev, "in atapi port stop\n");
if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
+ dma_free_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ ap->bmdma_prd,
+ ap->bmdma_prd_dma);
+
free_dma(CH_ATAPI_RX);
free_dma(CH_ATAPI_TX);
}
@@ -1287,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap)
if (!(ap->udma_mask || ap->mwdma_mask))
return 0;
+ ap->bmdma_prd = dma_alloc_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ &ap->bmdma_prd_dma,
+ GFP_KERNEL);
+
+ if (ap->bmdma_prd == NULL) {
+ dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
+ goto out;
+ }
+
if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
if (request_dma(CH_ATAPI_TX,
"BFIN ATAPI TX DMA") >= 0)
return 0;
free_dma(CH_ATAPI_RX);
+ dma_free_coherent(ap->dev,
+ BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+ ap->bmdma_prd,
+ ap->bmdma_prd_dma);
}
+out:
ap->udma_mask = 0;
ap->mwdma_mask = 0;
dev_err(ap->dev, "Unable to request ATAPI DMA!"
@@ -1416,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
static struct scsi_host_template bfin_sht = {
ATA_BASE_SHT(DRV_NAME),
- .sg_tablesize = SG_NONE,
+ .sg_tablesize = BFIN_MAX_SG_SEGMENTS,
.dma_boundary = ATA_DMA_BOUNDARY,
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 628c8fa..7a402c7 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -1,6 +1,7 @@
/*
* pata_cs5536.c - CS5536 PATA for new ATA layer
* (C) 2007 Martin K. Petersen <mkp@mkp.net>
+ * (C) 2011 Bartlomiej Zolnierkiewicz
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -55,24 +56,16 @@ MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
#define DRV_VERSION "0.0.8"
enum {
- CFG = 0,
- DTC = 1,
- CAST = 2,
- ETC = 3,
-
- MSR_IDE_BASE = 0x51300000,
- MSR_IDE_CFG = (MSR_IDE_BASE + 0x10),
- MSR_IDE_DTC = (MSR_IDE_BASE + 0x12),
- MSR_IDE_CAST = (MSR_IDE_BASE + 0x13),
- MSR_IDE_ETC = (MSR_IDE_BASE + 0x14),
-
+ MSR_IDE_CFG = 0x51300010,
PCI_IDE_CFG = 0x40,
- PCI_IDE_DTC = 0x48,
- PCI_IDE_CAST = 0x4c,
- PCI_IDE_ETC = 0x50,
- IDE_CFG_CHANEN = 0x2,
- IDE_CFG_CABLE = 0x10000,
+ CFG = 0,
+ DTC = 2,
+ CAST = 3,
+ ETC = 4,
+
+ IDE_CFG_CHANEN = (1 << 1),
+ IDE_CFG_CABLE = (1 << 17) | (1 << 16),
IDE_D0_SHIFT = 24,
IDE_D1_SHIFT = 16,
@@ -84,45 +77,50 @@ enum {
IDE_CAST_CMD_MASK = 0xff,
IDE_CAST_CMD_SHIFT = 24,
- IDE_ETC_NODMA = 0x03,
-};
-
-static const u32 msr_reg[4] = {
- MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
-};
-
-static const u8 pci_reg[4] = {
- PCI_IDE_CFG, PCI_IDE_DTC, PCI_IDE_CAST, PCI_IDE_ETC,
+ IDE_ETC_UDMA_MASK = 0xc0,
};
-static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
+static int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
u32 dummy __maybe_unused;
- rdmsr(msr_reg[reg], *val, dummy);
+ rdmsr(MSR_IDE_CFG + reg, *val, dummy);
return 0;
}
- return pci_read_config_dword(pdev, pci_reg[reg], val);
+ return pci_read_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
}
-static inline int cs5536_write(struct pci_dev *pdev, int reg, int val)
+static int cs5536_write(struct pci_dev *pdev, int reg, int val)
{
if (unlikely(use_msr)) {
- wrmsr(msr_reg[reg], val, 0);
+ wrmsr(MSR_IDE_CFG + reg, val, 0);
return 0;
}
- return pci_write_config_dword(pdev, pci_reg[reg], val);
+ return pci_write_config_dword(pdev, PCI_IDE_CFG + reg * 4, val);
+}
+
+static void cs5536_program_dtc(struct ata_device *adev, u8 tim)
+{
+ struct pci_dev *pdev = to_pci_dev(adev->link->ap->host->dev);
+ int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
+ u32 dtc;
+
+ cs5536_read(pdev, DTC, &dtc);
+ dtc &= ~(IDE_DRV_MASK << dshift);
+ dtc |= tim << dshift;
+ cs5536_write(pdev, DTC, dtc);
}
/**
* cs5536_cable_detect - detect cable type
* @ap: Port to detect on
*
- * Perform cable detection for ATA66 capable cable. Return a libata
- * cable type.
+ * Perform cable detection for ATA66 capable cable.
+ *
+ * Returns a cable type.
*/
static int cs5536_cable_detect(struct ata_port *ap)
@@ -132,7 +130,7 @@ static int cs5536_cable_detect(struct ata_port *ap)
cs5536_read(pdev, CFG, &cfg);
- if (cfg & (IDE_CFG_CABLE << ap->port_no))
+ if (cfg & IDE_CFG_CABLE)
return ATA_CBL_PATA80;
else
return ATA_CBL_PATA40;
@@ -162,19 +160,15 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
struct ata_device *pair = ata_dev_pair(adev);
int mode = adev->pio_mode - XFER_PIO_0;
int cmdmode = mode;
- int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
int cshift = adev->devno ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
- u32 dtc, cast, etc;
+ u32 cast;
if (pair)
cmdmode = min(mode, pair->pio_mode - XFER_PIO_0);
- cs5536_read(pdev, DTC, &dtc);
- cs5536_read(pdev, CAST, &cast);
- cs5536_read(pdev, ETC, &etc);
+ cs5536_program_dtc(adev, drv_timings[mode]);
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= drv_timings[mode] << dshift;
+ cs5536_read(pdev, CAST, &cast);
cast &= ~(IDE_CAST_DRV_MASK << cshift);
cast |= addr_timings[mode] << cshift;
@@ -182,12 +176,7 @@ static void cs5536_set_piomode(struct ata_port *ap, struct ata_device *adev)
cast &= ~(IDE_CAST_CMD_MASK << IDE_CAST_CMD_SHIFT);
cast |= cmd_timings[cmdmode] << IDE_CAST_CMD_SHIFT;
- etc &= ~(IDE_DRV_MASK << dshift);
- etc |= IDE_ETC_NODMA << dshift;
-
- cs5536_write(pdev, DTC, dtc);
cs5536_write(pdev, CAST, cast);
- cs5536_write(pdev, ETC, etc);
}
/**
@@ -208,25 +197,21 @@ static void cs5536_set_dmamode(struct ata_port *ap, struct ata_device *adev)
};
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
- u32 dtc, etc;
+ u32 etc;
int mode = adev->dma_mode;
int dshift = adev->devno ? IDE_D1_SHIFT : IDE_D0_SHIFT;
- if (mode >= XFER_UDMA_0) {
- cs5536_read(pdev, ETC, &etc);
+ cs5536_read(pdev, ETC, &etc);
+ if (mode >= XFER_UDMA_0) {
etc &= ~(IDE_DRV_MASK << dshift);
etc |= udma_timings[mode - XFER_UDMA_0] << dshift;
-
- cs5536_write(pdev, ETC, etc);
} else { /* MWDMA */
- cs5536_read(pdev, DTC, &dtc);
-
- dtc &= ~(IDE_DRV_MASK << dshift);
- dtc |= mwdma_timings[mode - XFER_MW_DMA_0] << dshift;
-
- cs5536_write(pdev, DTC, dtc);
+ etc &= ~(IDE_ETC_UDMA_MASK << dshift);
+ cs5536_program_dtc(adev, mwdma_timings[mode - XFER_MW_DMA_0]);
}
+
+ cs5536_write(pdev, ETC, etc);
}
static struct scsi_host_template cs5536_sht = {
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index ca9d9ca..c5af97f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -235,17 +235,7 @@ static struct platform_driver pata_imx_driver = {
},
};
-static int __init pata_imx_init(void)
-{
- return platform_driver_register(&pata_imx_driver);
-}
-
-static void __exit pata_imx_exit(void)
-{
- platform_driver_unregister(&pata_imx_driver);
-}
-module_init(pata_imx_init);
-module_exit(pata_imx_exit);
+module_platform_driver(pata_imx_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
MODULE_DESCRIPTION("low-level driver for iMX PATA");
diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
index 15b6431..badb178 100644
--- a/drivers/ata/pata_ixp4xx_cf.c
+++ b/drivers/ata/pata_ixp4xx_cf.c
@@ -205,21 +205,10 @@ static struct platform_driver ixp4xx_pata_platform_driver = {
.remove = __devexit_p(ixp4xx_pata_remove),
};
-static int __init ixp4xx_pata_init(void)
-{
- return platform_driver_register(&ixp4xx_pata_platform_driver);
-}
-
-static void __exit ixp4xx_pata_exit(void)
-{
- platform_driver_unregister(&ixp4xx_pata_platform_driver);
-}
+module_platform_driver(ixp4xx_pata_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("low-level driver for ixp4xx Compact Flash PATA");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(ixp4xx_pata_init);
-module_exit(ixp4xx_pata_exit);
diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
index 3e17463..00748ae 100644
--- a/drivers/ata/pata_mpc52xx.c
+++ b/drivers/ata/pata_mpc52xx.c
@@ -897,26 +897,7 @@ static struct platform_driver mpc52xx_ata_of_platform_driver = {
},
};
-
-/* ======================================================================== */
-/* Module */
-/* ======================================================================== */
-
-static int __init
-mpc52xx_ata_init(void)
-{
- printk(KERN_INFO "ata: MPC52xx IDE/ATA libata driver\n");
- return platform_driver_register(&mpc52xx_ata_of_platform_driver);
-}
-
-static void __exit
-mpc52xx_ata_exit(void)
-{
- platform_driver_unregister(&mpc52xx_ata_of_platform_driver);
-}
-
-module_init(mpc52xx_ata_init);
-module_exit(mpc52xx_ata_exit);
+module_platform_driver(mpc52xx_ata_of_platform_driver);
MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
MODULE_DESCRIPTION("Freescale MPC52xx IDE/ATA libata driver");
diff --git a/drivers/ata/pata_of_platform.c b/drivers/ata/pata_of_platform.c
index 2a472c5..1654dc2 100644
--- a/drivers/ata/pata_of_platform.c
+++ b/drivers/ata/pata_of_platform.c
@@ -12,8 +12,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of_address.h>
-#include <linux/of_irq.h>
-#include <linux/of_platform.h>
+#include <linux/platform_device.h>
#include <linux/ata_platform.h>
static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
@@ -22,7 +21,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
struct device_node *dn = ofdev->dev.of_node;
struct resource io_res;
struct resource ctl_res;
- struct resource irq_res;
+ struct resource *irq_res;
unsigned int reg_shift = 0;
int pio_mode = 0;
int pio_mask;
@@ -51,11 +50,9 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
}
}
- ret = of_irq_to_resource(dn, 0, &irq_res);
- if (!ret)
- irq_res.start = irq_res.end = 0;
- else
- irq_res.flags = 0;
+ irq_res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
+ if (irq_res)
+ irq_res->flags = 0;
prop = of_get_property(dn, "reg-shift", NULL);
if (prop)
@@ -75,7 +72,7 @@ static int __devinit pata_of_platform_probe(struct platform_device *ofdev)
pio_mask = 1 << pio_mode;
pio_mask |= (1 << pio_mode) - 1;
- return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, &irq_res,
+ return __pata_platform_probe(&ofdev->dev, &io_res, &ctl_res, irq_res,
reg_shift, pio_mask);
}
@@ -101,17 +98,7 @@ static struct platform_driver pata_of_platform_driver = {
.remove = __devexit_p(pata_of_platform_remove),
};
-static int __init pata_of_platform_init(void)
-{
- return platform_driver_register(&pata_of_platform_driver);
-}
-module_init(pata_of_platform_init);
-
-static void __exit pata_of_platform_exit(void)
-{
- platform_driver_unregister(&pata_of_platform_driver);
-}
-module_exit(pata_of_platform_exit);
+module_platform_driver(pata_of_platform_driver);
MODULE_DESCRIPTION("OF-platform PATA driver");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
index b86d7e2..5ff31b6 100644
--- a/drivers/ata/pata_palmld.c
+++ b/drivers/ata/pata_palmld.c
@@ -132,20 +132,9 @@ static struct platform_driver palmld_pata_platform_driver = {
.remove = __devexit_p(palmld_pata_remove),
};
-static int __init palmld_pata_init(void)
-{
- return platform_driver_register(&palmld_pata_platform_driver);
-}
-
-static void __exit palmld_pata_exit(void)
-{
- platform_driver_unregister(&palmld_pata_platform_driver);
-}
+module_platform_driver(palmld_pata_platform_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("PalmLD PATA driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(palmld_pata_init);
-module_exit(palmld_pata_exit);
diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
index 2067308..f1848ae 100644
--- a/drivers/ata/pata_platform.c
+++ b/drivers/ata/pata_platform.c
@@ -256,17 +256,7 @@ static struct platform_driver pata_platform_driver = {
},
};
-static int __init pata_platform_init(void)
-{
- return platform_driver_register(&pata_platform_driver);
-}
-
-static void __exit pata_platform_exit(void)
-{
- platform_driver_unregister(&pata_platform_driver);
-}
-module_init(pata_platform_init);
-module_exit(pata_platform_exit);
+module_platform_driver(pata_platform_driver);
module_param(pio_mask, int, 0);
diff --git a/drivers/ata/pata_pxa.c b/drivers/ata/pata_pxa.c
index b4ede40..0bb0fb7 100644
--- a/drivers/ata/pata_pxa.c
+++ b/drivers/ata/pata_pxa.c
@@ -390,18 +390,7 @@ static struct platform_driver pxa_ata_driver = {
},
};
-static int __init pxa_ata_init(void)
-{
- return platform_driver_register(&pxa_ata_driver);
-}
-
-static void __exit pxa_ata_exit(void)
-{
- platform_driver_unregister(&pxa_ata_driver);
-}
-
-module_init(pxa_ata_init);
-module_exit(pxa_ata_exit);
+module_platform_driver(pxa_ata_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("DMA-capable driver for PATA on PXA CPU");
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
index 1b9d10d..9417101 100644
--- a/drivers/ata/pata_rb532_cf.c
+++ b/drivers/ata/pata_rb532_cf.c
@@ -188,9 +188,6 @@ static __devexit int rb532_pata_driver_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" DRV_NAME);
-
static struct platform_driver rb532_pata_platform_driver = {
.probe = rb532_pata_driver_probe,
.remove = __devexit_p(rb532_pata_driver_remove),
@@ -200,27 +197,13 @@ static struct platform_driver rb532_pata_platform_driver = {
},
};
-/* ------------------------------------------------------------------------ */
-
#define DRV_INFO DRV_DESC " version " DRV_VERSION
-static int __init rb532_pata_module_init(void)
-{
- printk(KERN_INFO DRV_INFO "\n");
-
- return platform_driver_register(&rb532_pata_platform_driver);
-}
-
-static void __exit rb532_pata_module_exit(void)
-{
- platform_driver_unregister(&rb532_pata_platform_driver);
-}
+module_platform_driver(rb532_pata_platform_driver);
MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION(DRV_DESC);
MODULE_VERSION(DRV_VERSION);
MODULE_LICENSE("GPL");
-
-module_init(rb532_pata_module_init);
-module_exit(rb532_pata_module_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index 5c42374..69f7cde 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -1777,18 +1777,7 @@ static struct platform_driver sata_dwc_driver = {
.remove = sata_dwc_remove,
};
-static int __init sata_dwc_init(void)
-{
- return platform_driver_register(&sata_dwc_driver);
-}
-
-static void __exit sata_dwc_exit(void)
-{
- platform_driver_unregister(&sata_dwc_driver);
-}
-
-module_init(sata_dwc_init);
-module_exit(sata_dwc_exit);
+module_platform_driver(sata_dwc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Mark Miesfeld <mmiesfeld@amcc.com>");
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 78ae7b6..0120b0d 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -140,6 +140,7 @@ enum {
*/
HCONTROL_ONLINE_PHY_RST = (1 << 31),
HCONTROL_FORCE_OFFLINE = (1 << 30),
+ HCONTROL_LEGACY = (1 << 28),
HCONTROL_PARITY_PROT_MOD = (1 << 14),
HCONTROL_DPATH_PARITY = (1 << 12),
HCONTROL_SNOOP_ENABLE = (1 << 10),
@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host)
* part of the port_start() callback
*/
+ /* sata controller to operate in enterprise mode */
+ temp = ioread32(hcr_base + HCONTROL);
+ iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
+
/* ack. any pending IRQs for this controller/port */
temp = ioread32(hcr_base + HSTATUS);
if (temp & 0x3F)
@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op)
/* Recovery the CHBA register in host controller cmd register set */
iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
+ iowrite32((ioread32(hcr_base + HCONTROL)
+ | HCONTROL_ONLINE_PHY_RST
+ | HCONTROL_SNOOP_ENABLE
+ | HCONTROL_PMP_ATTACHED),
+ hcr_base + HCONTROL);
+
ata_host_resume(host);
return 0;
}
@@ -1452,21 +1463,9 @@ static struct platform_driver fsl_sata_driver = {
#endif
};
-static int __init sata_fsl_init(void)
-{
- platform_driver_register(&fsl_sata_driver);
- return 0;
-}
-
-static void __exit sata_fsl_exit(void)
-{
- platform_driver_unregister(&fsl_sata_driver);
-}
+module_platform_driver(fsl_sata_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ashish Kalra, Freescale Semiconductor");
MODULE_DESCRIPTION("Freescale 3.0Gbps SATA controller low level driver");
MODULE_VERSION("1.10");
-
-module_init(sata_fsl_init);
-module_exit(sata_fsl_exit);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 0b8b8b4..38950ea 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -3988,7 +3988,7 @@ static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
}
static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -3998,7 +3998,7 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -4019,6 +4019,7 @@ static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
static int mv_platform_probe(struct platform_device *pdev)
{
const struct mv_sata_platform_data *mv_platform_data;
+ const struct mbus_dram_target_info *dram;
const struct ata_port_info *ppi[] =
{ &mv_port_info[chip_soc], NULL };
struct ata_host *host;
@@ -4072,8 +4073,9 @@ static int mv_platform_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (mv_platform_data->dram != NULL)
- mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(hpriv, dram);
rc = mv_create_dma_pools(hpriv, &pdev->dev);
if (rc)
@@ -4141,17 +4143,18 @@ static int mv_platform_suspend(struct platform_device *pdev, pm_message_t state)
static int mv_platform_resume(struct platform_device *pdev)
{
struct ata_host *host = platform_get_drvdata(pdev);
+ const struct mbus_dram_target_info *dram;
int ret;
if (host) {
struct mv_host_priv *hpriv = host->private_data;
- const struct mv_sata_platform_data *mv_platform_data = \
- pdev->dev.platform_data;
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (mv_platform_data->dram != NULL)
- mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(hpriv, dram);
/* initialize adapter */
ret = mv_init_host(host);
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index e0bc964..55d6179 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -599,9 +599,9 @@ MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
MODULE_VERSION(DRV_VERSION);
-static int adma_enabled;
-static int swncq_enabled = 1;
-static int msi_enabled;
+static bool adma_enabled;
+static bool swncq_enabled = 1;
+static bool msi_enabled;
static void nv_adma_register_mode(struct ata_port *ap)
{
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index 1e91406..e7e610a 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -417,7 +417,7 @@ static struct ata_port_operations sil24_ops = {
#endif
};
-static int sata_sil24_msi; /* Disable MSI */
+static bool sata_sil24_msi; /* Disable MSI */
module_param_named(msi, sata_sil24_msi, bool, S_IRUGO);
MODULE_PARM_DESC(msi, "Enable MSI (Default: false)");
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 9a51df4..b182c2f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -112,12 +112,12 @@ static u8 read_prom_byte(struct he_dev *he_dev, int addr);
/* globals */
static struct he_dev *he_devs;
-static int disable64;
+static bool disable64;
static short nvpibits = -1;
static short nvcibits = -1;
static short rx_skb_reserve = 16;
-static int irq_coalesce = 1;
-static int sdh = 0;
+static bool irq_coalesce = 1;
+static bool sdh = 0;
/* Read from EEPROM = 0000 0011b */
static unsigned int readtab[] = {
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index 3d0c2b0..9e373ba 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1320,8 +1320,8 @@ static void rx_dle_intr(struct atm_dev *dev)
if (ia_vcc == NULL)
{
atomic_inc(&vcc->stats->rx_err);
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
// get real pkt length pwang_test
@@ -1334,8 +1334,8 @@ static void rx_dle_intr(struct atm_dev *dev)
atomic_inc(&vcc->stats->rx_err);
IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
length, skb->len);)
+ atm_return(vcc, skb->truesize);
dev_kfree_skb_any(skb);
- atm_return(vcc, atm_guess_pdu2truesize(len));
goto INCR_DLE;
}
skb_trim(skb, length);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 5d1d076..e8cd652 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1206,9 +1206,9 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
out_unmap_both:
pci_set_drvdata(dev, NULL);
- pci_iounmap(dev, card->config_regs);
- out_unmap_config:
pci_iounmap(dev, card->buffers);
+ out_unmap_config:
+ pci_iounmap(dev, card->config_regs);
out_release_regions:
pci_release_regions(dev);
out:
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 21cf46f..7be9f79 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -172,6 +172,21 @@ config SYS_HYPERVISOR
bool
default n
+config GENERIC_CPU_DEVICES
+ bool
+ default n
+
source "drivers/base/regmap/Kconfig"
+config DMA_SHARED_BUFFER
+ bool
+ default n
+ select ANON_INODES
+ depends on EXPERIMENTAL
+ help
+ This option enables the framework for buffer-sharing between
+ multiple drivers. A buffer is associated with a file using driver
+ APIs extension; the file's descriptor can then be passed on to other
+ driver.
+
endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 99a375a..610f999 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -1,18 +1,19 @@
# Makefile for the Linux device tree
-obj-y := core.o sys.o bus.o dd.o syscore.o \
+obj-y := core.o bus.o dd.o syscore.o \
driver.o class.o platform.o \
cpu.o firmware.o init.o map.o devres.o \
- attribute_container.o transport_class.o
+ attribute_container.o transport_class.o \
+ topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
+obj-$(CONFIG_DMA_SHARED_BUFFER) += dma-buf.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o
obj-$(CONFIG_MEMORY_HOTPLUG_SPARSE) += memory.o
-obj-$(CONFIG_SMP) += topology.o
ifeq ($(CONFIG_SYSFS),y)
obj-$(CONFIG_MODULES) += module.o
endif
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 21c1b96..b858dfd 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -4,7 +4,9 @@
* struct subsys_private - structure to hold the private to the driver core portions of the bus_type/class structure.
*
* @subsys - the struct kset that defines this subsystem
- * @devices_kset - the list of devices associated
+ * @devices_kset - the subsystem's 'devices' directory
+ * @interfaces - list of subsystem interfaces associated
+ * @mutex - protect the devices, and interfaces lists.
*
* @drivers_kset - the list of drivers associated
* @klist_devices - the klist to iterate over the @devices_kset
@@ -14,10 +16,8 @@
* @bus - pointer back to the struct bus_type that this structure is associated
* with.
*
- * @class_interfaces - list of class_interfaces associated
* @glue_dirs - "glue" directory to put in-between the parent device to
* avoid namespace conflicts
- * @class_mutex - mutex to protect the children, devices, and interfaces lists.
* @class - pointer back to the struct class that this structure is associated
* with.
*
@@ -28,6 +28,8 @@
struct subsys_private {
struct kset subsys;
struct kset *devices_kset;
+ struct list_head interfaces;
+ struct mutex mutex;
struct kset *drivers_kset;
struct klist klist_devices;
@@ -36,9 +38,7 @@ struct subsys_private {
unsigned int drivers_autoprobe:1;
struct bus_type *bus;
- struct list_head class_interfaces;
struct kset glue_dirs;
- struct mutex class_mutex;
struct class *class;
};
#define to_subsys_private(obj) container_of(obj, struct subsys_private, subsys.kobj)
@@ -94,8 +94,7 @@ extern int hypervisor_init(void);
static inline int hypervisor_init(void) { return 0; }
#endif
extern int platform_bus_init(void);
-extern int system_bus_init(void);
-extern int cpu_dev_init(void);
+extern void cpu_dev_init(void);
extern int bus_add_device(struct device *dev);
extern void bus_probe_device(struct device *dev);
@@ -116,6 +115,7 @@ extern char *make_class_name(const char *name, struct kobject *kobj);
extern int devres_release_all(struct device *dev);
+/* /sys/devices directory */
extern struct kset *devices_kset;
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 000e7b2..40fb122 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -16,9 +16,14 @@
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/string.h>
+#include <linux/mutex.h>
#include "base.h"
#include "power/power.h"
+/* /sys/devices/system */
+/* FIXME: make static after drivers/base/sys.c is deleted */
+struct kset *system_kset;
+
#define to_bus_attr(_attr) container_of(_attr, struct bus_attribute, attr)
/*
@@ -360,6 +365,47 @@ struct device *bus_find_device_by_name(struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_find_device_by_name);
+/**
+ * subsys_find_device_by_id - find a device with a specific enumeration number
+ * @subsys: subsystem
+ * @id: index 'id' in struct device
+ * @hint: device to check first
+ *
+ * Check the hint's next object and if it is a match return it directly,
+ * otherwise, fall back to a full list search. Either way a reference for
+ * the returned object is taken.
+ */
+struct device *subsys_find_device_by_id(struct bus_type *subsys, unsigned int id,
+ struct device *hint)
+{
+ struct klist_iter i;
+ struct device *dev;
+
+ if (!subsys)
+ return NULL;
+
+ if (hint) {
+ klist_iter_init_node(&subsys->p->klist_devices, &i, &hint->p->knode_bus);
+ dev = next_device(&i);
+ if (dev && dev->id == id && get_device(dev)) {
+ klist_iter_exit(&i);
+ return dev;
+ }
+ klist_iter_exit(&i);
+ }
+
+ klist_iter_init_node(&subsys->p->klist_devices, &i, NULL);
+ while ((dev = next_device(&i))) {
+ if (dev->id == id && get_device(dev)) {
+ klist_iter_exit(&i);
+ return dev;
+ }
+ }
+ klist_iter_exit(&i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(subsys_find_device_by_id);
+
static struct device_driver *next_driver(struct klist_iter *i)
{
struct klist_node *n = klist_next(i);
@@ -487,38 +533,59 @@ out_put:
void bus_probe_device(struct device *dev)
{
struct bus_type *bus = dev->bus;
+ struct subsys_interface *sif;
int ret;
- if (bus && bus->p->drivers_autoprobe) {
+ if (!bus)
+ return;
+
+ if (bus->p->drivers_autoprobe) {
ret = device_attach(dev);
WARN_ON(ret < 0);
}
+
+ mutex_lock(&bus->p->mutex);
+ list_for_each_entry(sif, &bus->p->interfaces, node)
+ if (sif->add_dev)
+ sif->add_dev(dev, sif);
+ mutex_unlock(&bus->p->mutex);
}
/**
* bus_remove_device - remove device from bus
* @dev: device to be removed
*
- * - Remove symlink from bus's directory.
+ * - Remove device from all interfaces.
+ * - Remove symlink from bus' directory.
* - Delete device from bus's list.
* - Detach from its driver.
* - Drop reference taken in bus_add_device().
*/
void bus_remove_device(struct device *dev)
{
- if (dev->bus) {
- sysfs_remove_link(&dev->kobj, "subsystem");
- sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
- dev_name(dev));
- device_remove_attrs(dev->bus, dev);
- if (klist_node_attached(&dev->p->knode_bus))
- klist_del(&dev->p->knode_bus);
-
- pr_debug("bus: '%s': remove device %s\n",
- dev->bus->name, dev_name(dev));
- device_release_driver(dev);
- bus_put(dev->bus);
- }
+ struct bus_type *bus = dev->bus;
+ struct subsys_interface *sif;
+
+ if (!bus)
+ return;
+
+ mutex_lock(&bus->p->mutex);
+ list_for_each_entry(sif, &bus->p->interfaces, node)
+ if (sif->remove_dev)
+ sif->remove_dev(dev, sif);
+ mutex_unlock(&bus->p->mutex);
+
+ sysfs_remove_link(&dev->kobj, "subsystem");
+ sysfs_remove_link(&dev->bus->p->devices_kset->kobj,
+ dev_name(dev));
+ device_remove_attrs(dev->bus, dev);
+ if (klist_node_attached(&dev->p->knode_bus))
+ klist_del(&dev->p->knode_bus);
+
+ pr_debug("bus: '%s': remove device %s\n",
+ dev->bus->name, dev_name(dev));
+ device_release_driver(dev);
+ bus_put(dev->bus);
}
static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
@@ -847,14 +914,15 @@ static ssize_t bus_uevent_store(struct bus_type *bus,
static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
/**
- * bus_register - register a bus with the system.
- * @bus: bus.
+ * __bus_register - register a driver-core subsystem
+ * @bus: bus to register
+ * @key: lockdep class key
*
- * Once we have that, we registered the bus with the kobject
+ * Once we have that, we register the bus with the kobject
* infrastructure, then register the children subsystems it has:
- * the devices and drivers that belong to the bus.
+ * the devices and drivers that belong to the subsystem.
*/
-int bus_register(struct bus_type *bus)
+int __bus_register(struct bus_type *bus, struct lock_class_key *key)
{
int retval;
struct subsys_private *priv;
@@ -898,6 +966,8 @@ int bus_register(struct bus_type *bus)
goto bus_drivers_fail;
}
+ INIT_LIST_HEAD(&priv->interfaces);
+ __mutex_init(&priv->mutex, "subsys mutex", key);
klist_init(&priv->klist_devices, klist_devices_get, klist_devices_put);
klist_init(&priv->klist_drivers, NULL, NULL);
@@ -927,7 +997,7 @@ out:
bus->p = NULL;
return retval;
}
-EXPORT_SYMBOL_GPL(bus_register);
+EXPORT_SYMBOL_GPL(__bus_register);
/**
* bus_unregister - remove a bus from the system
@@ -939,6 +1009,8 @@ EXPORT_SYMBOL_GPL(bus_register);
void bus_unregister(struct bus_type *bus)
{
pr_debug("bus: '%s': unregistering\n", bus->name);
+ if (bus->dev_root)
+ device_unregister(bus->dev_root);
bus_remove_attrs(bus);
remove_probe_files(bus);
kset_unregister(bus->p->drivers_kset);
@@ -1028,10 +1100,194 @@ void bus_sort_breadthfirst(struct bus_type *bus,
}
EXPORT_SYMBOL_GPL(bus_sort_breadthfirst);
+/**
+ * subsys_dev_iter_init - initialize subsys device iterator
+ * @iter: subsys iterator to initialize
+ * @subsys: the subsys we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize subsys iterator @iter such that it iterates over devices
+ * of @subsys. If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void subsys_dev_iter_init(struct subsys_dev_iter *iter, struct bus_type *subsys,
+ struct device *start, const struct device_type *type)
+{
+ struct klist_node *start_knode = NULL;
+
+ if (start)
+ start_knode = &start->p->knode_bus;
+ klist_iter_init_node(&subsys->p->klist_devices, &iter->ki, start_knode);
+ iter->type = type;
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_init);
+
+/**
+ * subsys_dev_iter_next - iterate to the next device
+ * @iter: subsys iterator to proceed
+ *
+ * Proceed @iter to the next device and return it. Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited. The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into subsys code.
+ */
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter)
+{
+ struct klist_node *knode;
+ struct device *dev;
+
+ for (;;) {
+ knode = klist_next(&iter->ki);
+ if (!knode)
+ return NULL;
+ dev = container_of(knode, struct device_private, knode_bus)->device;
+ if (!iter->type || iter->type == dev->type)
+ return dev;
+ }
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_next);
+
+/**
+ * subsys_dev_iter_exit - finish iteration
+ * @iter: subsys iterator to finish
+ *
+ * Finish an iteration. Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter)
+{
+ klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(subsys_dev_iter_exit);
+
+int subsys_interface_register(struct subsys_interface *sif)
+{
+ struct bus_type *subsys;
+ struct subsys_dev_iter iter;
+ struct device *dev;
+
+ if (!sif || !sif->subsys)
+ return -ENODEV;
+
+ subsys = bus_get(sif->subsys);
+ if (!subsys)
+ return -EINVAL;
+
+ mutex_lock(&subsys->p->mutex);
+ list_add_tail(&sif->node, &subsys->p->interfaces);
+ if (sif->add_dev) {
+ subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ while ((dev = subsys_dev_iter_next(&iter)))
+ sif->add_dev(dev, sif);
+ subsys_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&subsys->p->mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(subsys_interface_register);
+
+void subsys_interface_unregister(struct subsys_interface *sif)
+{
+ struct bus_type *subsys = sif->subsys;
+ struct subsys_dev_iter iter;
+ struct device *dev;
+
+ if (!sif)
+ return;
+
+ mutex_lock(&subsys->p->mutex);
+ list_del_init(&sif->node);
+ if (sif->remove_dev) {
+ subsys_dev_iter_init(&iter, subsys, NULL, NULL);
+ while ((dev = subsys_dev_iter_next(&iter)))
+ sif->remove_dev(dev, sif);
+ subsys_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&subsys->p->mutex);
+
+ bus_put(subsys);
+}
+EXPORT_SYMBOL_GPL(subsys_interface_unregister);
+
+static void system_root_device_release(struct device *dev)
+{
+ kfree(dev);
+}
+/**
+ * subsys_system_register - register a subsystem at /sys/devices/system/
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
+ *
+ * All 'system' subsystems have a /sys/devices/system/<name> root device
+ * with the name of the subsystem. The root device can carry subsystem-
+ * wide attributes. All registered devices are below this single root
+ * device and are named after the subsystem with a simple enumeration
+ * number appended. The registered devices are not explicitely named;
+ * only 'id' in the device needs to be set.
+ *
+ * Do not use this interface for anything new, it exists for compatibility
+ * with bad ideas only. New subsystems should use plain subsystems; and
+ * add the subsystem-wide attributes should be added to the subsystem
+ * directory itself and not some create fake root-device placed in
+ * /sys/devices/system/<name>.
+ */
+int subsys_system_register(struct bus_type *subsys,
+ const struct attribute_group **groups)
+{
+ struct device *dev;
+ int err;
+
+ err = bus_register(subsys);
+ if (err < 0)
+ return err;
+
+ dev = kzalloc(sizeof(struct device), GFP_KERNEL);
+ if (!dev) {
+ err = -ENOMEM;
+ goto err_dev;
+ }
+
+ err = dev_set_name(dev, "%s", subsys->name);
+ if (err < 0)
+ goto err_name;
+
+ dev->kobj.parent = &system_kset->kobj;
+ dev->groups = groups;
+ dev->release = system_root_device_release;
+
+ err = device_register(dev);
+ if (err < 0)
+ goto err_dev_reg;
+
+ subsys->dev_root = dev;
+ return 0;
+
+err_dev_reg:
+ put_device(dev);
+ dev = NULL;
+err_name:
+ kfree(dev);
+err_dev:
+ bus_unregister(subsys);
+ return err;
+}
+EXPORT_SYMBOL_GPL(subsys_system_register);
+
int __init buses_init(void)
{
bus_kset = kset_create_and_add("bus", &bus_uevent_ops, NULL);
if (!bus_kset)
return -ENOMEM;
+
+ system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
+ if (!system_kset)
+ return -ENOMEM;
+
return 0;
}
diff --git a/drivers/base/class.c b/drivers/base/class.c
index b80d91c..03243d4 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -184,9 +184,9 @@ int __class_register(struct class *cls, struct lock_class_key *key)
if (!cp)
return -ENOMEM;
klist_init(&cp->klist_devices, klist_class_dev_get, klist_class_dev_put);
- INIT_LIST_HEAD(&cp->class_interfaces);
+ INIT_LIST_HEAD(&cp->interfaces);
kset_init(&cp->glue_dirs);
- __mutex_init(&cp->class_mutex, "struct class mutex", key);
+ __mutex_init(&cp->mutex, "subsys mutex", key);
error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name);
if (error) {
kfree(cp);
@@ -460,15 +460,15 @@ int class_interface_register(struct class_interface *class_intf)
if (!parent)
return -EINVAL;
- mutex_lock(&parent->p->class_mutex);
- list_add_tail(&class_intf->node, &parent->p->class_interfaces);
+ mutex_lock(&parent->p->mutex);
+ list_add_tail(&class_intf->node, &parent->p->interfaces);
if (class_intf->add_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
while ((dev = class_dev_iter_next(&iter)))
class_intf->add_dev(dev, class_intf);
class_dev_iter_exit(&iter);
}
- mutex_unlock(&parent->p->class_mutex);
+ mutex_unlock(&parent->p->mutex);
return 0;
}
@@ -482,7 +482,7 @@ void class_interface_unregister(struct class_interface *class_intf)
if (!parent)
return;
- mutex_lock(&parent->p->class_mutex);
+ mutex_lock(&parent->p->mutex);
list_del_init(&class_intf->node);
if (class_intf->remove_dev) {
class_dev_iter_init(&iter, parent, NULL, NULL);
@@ -490,7 +490,7 @@ void class_interface_unregister(struct class_interface *class_intf)
class_intf->remove_dev(dev, class_intf);
class_dev_iter_exit(&iter);
}
- mutex_unlock(&parent->p->class_mutex);
+ mutex_unlock(&parent->p->mutex);
class_put(parent);
}
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 919daa7..74dda4f 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -118,6 +118,56 @@ static const struct sysfs_ops dev_sysfs_ops = {
.store = dev_attr_store,
};
+#define to_ext_attr(x) container_of(x, struct dev_ext_attribute, attr)
+
+ssize_t device_store_ulong(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ char *end;
+ unsigned long new = simple_strtoul(buf, &end, 0);
+ if (end == buf)
+ return -EINVAL;
+ *(unsigned long *)(ea->var) = new;
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_ulong);
+
+ssize_t device_show_ulong(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_ulong);
+
+ssize_t device_store_int(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+ char *end;
+ long new = simple_strtol(buf, &end, 0);
+ if (end == buf || new > INT_MAX || new < INT_MIN)
+ return -EINVAL;
+ *(int *)(ea->var) = new;
+ /* Always return full write size even if we didn't consume all */
+ return size;
+}
+EXPORT_SYMBOL_GPL(device_store_int);
+
+ssize_t device_show_int(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct dev_ext_attribute *ea = to_ext_attr(attr);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
+}
+EXPORT_SYMBOL_GPL(device_show_int);
/**
* device_release - free device structure.
@@ -198,7 +248,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
if (MAJOR(dev->devt)) {
const char *tmp;
const char *name;
- mode_t mode = 0;
+ umode_t mode = 0;
add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
@@ -464,7 +514,7 @@ static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
static struct device_attribute devt_attr =
__ATTR(dev, S_IRUGO, show_dev, NULL);
-/* kset to create /sys/devices/ */
+/* /sys/devices/ */
struct kset *devices_kset;
/**
@@ -582,6 +632,11 @@ static void klist_children_put(struct klist_node *n)
* may be used for reference counting of @dev after calling this
* function.
*
+ * All fields in @dev must be initialized by the caller to 0, except
+ * for those explicitly set to some other value. The simplest
+ * approach is to use kzalloc() to allocate the structure containing
+ * @dev.
+ *
* NOTE: Use put_device() to give up your reference instead of freeing
* @dev directly once you have called this function.
*/
@@ -711,6 +766,10 @@ static struct kobject *get_device_parent(struct device *dev,
return k;
}
+ /* subsystems can specify a default root directory for their devices */
+ if (!parent && dev->bus && dev->bus->dev_root)
+ return &dev->bus->dev_root->kobj;
+
if (parent)
return &parent->kobj;
return NULL;
@@ -731,14 +790,6 @@ static void cleanup_device_parent(struct device *dev)
cleanup_glue_dir(dev, dev->kobj.parent);
}
-static void setup_parent(struct device *dev, struct device *parent)
-{
- struct kobject *kobj;
- kobj = get_device_parent(dev, parent);
- if (kobj)
- dev->kobj.parent = kobj;
-}
-
static int device_add_class_symlinks(struct device *dev)
{
int error;
@@ -884,6 +935,13 @@ int device_private_init(struct device *dev)
* to the global and sibling lists for the device, then
* adds it to the other relevant subsystems of the driver model.
*
+ * Do not call this routine or device_register() more than once for
+ * any device structure. The driver model core is not designed to work
+ * with devices that get unregistered and then spring back to life.
+ * (Among other things, it's very hard to guarantee that all references
+ * to the previous incarnation of @dev have been dropped.) Allocate
+ * and register a fresh new struct device instead.
+ *
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up your
* reference instead.
@@ -891,6 +949,7 @@ int device_private_init(struct device *dev)
int device_add(struct device *dev)
{
struct device *parent = NULL;
+ struct kobject *kobj;
struct class_interface *class_intf;
int error = -EINVAL;
@@ -914,6 +973,10 @@ int device_add(struct device *dev)
dev->init_name = NULL;
}
+ /* subsystems can specify simple device enumeration */
+ if (!dev_name(dev) && dev->bus && dev->bus->dev_name)
+ dev_set_name(dev, "%s%u", dev->bus->dev_name, dev->id);
+
if (!dev_name(dev)) {
error = -EINVAL;
goto name_error;
@@ -922,7 +985,9 @@ int device_add(struct device *dev)
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
parent = get_device(dev->parent);
- setup_parent(dev, parent);
+ kobj = get_device_parent(dev, parent);
+ if (kobj)
+ dev->kobj.parent = kobj;
/* use parent numa_node */
if (parent)
@@ -969,7 +1034,7 @@ int device_add(struct device *dev)
device_pm_add(dev);
/* Notify clients of device addition. This call must come
- * after dpm_sysf_add() and before kobject_uevent().
+ * after dpm_sysfs_add() and before kobject_uevent().
*/
if (dev->bus)
blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
@@ -982,17 +1047,17 @@ int device_add(struct device *dev)
&parent->p->klist_children);
if (dev->class) {
- mutex_lock(&dev->class->p->class_mutex);
+ mutex_lock(&dev->class->p->mutex);
/* tie the class to the device */
klist_add_tail(&dev->knode_class,
&dev->class->p->klist_devices);
/* notify any interfaces that the device is here */
list_for_each_entry(class_intf,
- &dev->class->p->class_interfaces, node)
+ &dev->class->p->interfaces, node)
if (class_intf->add_dev)
class_intf->add_dev(dev, class_intf);
- mutex_unlock(&dev->class->p->class_mutex);
+ mutex_unlock(&dev->class->p->mutex);
}
done:
put_device(dev);
@@ -1037,6 +1102,9 @@ name_error:
* have a clearly defined need to use and refcount the device
* before it is added to the hierarchy.
*
+ * For more information, see the kerneldoc for device_initialize()
+ * and device_add().
+ *
* NOTE: _Never_ directly free @dev after calling this function, even
* if it returned an error! Always use put_device() to give up the
* reference initialized in this function instead.
@@ -1107,15 +1175,15 @@ void device_del(struct device *dev)
if (dev->class) {
device_remove_class_symlinks(dev);
- mutex_lock(&dev->class->p->class_mutex);
+ mutex_lock(&dev->class->p->mutex);
/* notify any interfaces that the device is now gone */
list_for_each_entry(class_intf,
- &dev->class->p->class_interfaces, node)
+ &dev->class->p->interfaces, node)
if (class_intf->remove_dev)
class_intf->remove_dev(dev, class_intf);
/* remove the device from the class list */
klist_del(&dev->knode_class);
- mutex_unlock(&dev->class->p->class_mutex);
+ mutex_unlock(&dev->class->p->mutex);
}
device_remove_file(dev, &uevent_attr);
device_remove_attrs(dev);
@@ -1182,7 +1250,7 @@ static struct device *next_device(struct klist_iter *i)
* freed by the caller.
*/
const char *device_get_devnode(struct device *dev,
- mode_t *mode, const char **tmp)
+ umode_t *mode, const char **tmp)
{
char *s;
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 251acea..4dabf50 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -1,8 +1,8 @@
/*
- * drivers/base/cpu.c - basic CPU class support
+ * CPU subsystem support
*/
-#include <linux/sysdev.h>
+#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -11,43 +11,44 @@
#include <linux/device.h>
#include <linux/node.h>
#include <linux/gfp.h>
+#include <linux/percpu.h>
#include "base.h"
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[];
-
-struct sysdev_class cpu_sysdev_class = {
+struct bus_type cpu_subsys = {
.name = "cpu",
- .attrs = cpu_sysdev_class_attrs,
+ .dev_name = "cpu",
};
-EXPORT_SYMBOL(cpu_sysdev_class);
+EXPORT_SYMBOL_GPL(cpu_subsys);
-static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
+static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
#ifdef CONFIG_HOTPLUG_CPU
-static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_online(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
- return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id));
+ return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
}
-static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
- const char *buf, size_t count)
+static ssize_t __ref store_online(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t ret;
cpu_hotplug_driver_lock();
switch (buf[0]) {
case '0':
- ret = cpu_down(cpu->sysdev.id);
+ ret = cpu_down(cpu->dev.id);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
break;
case '1':
- ret = cpu_up(cpu->sysdev.id);
+ ret = cpu_up(cpu->dev.id);
if (!ret)
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
break;
@@ -60,44 +61,44 @@ static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribut
ret = count;
return ret;
}
-static SYSDEV_ATTR(online, 0644, show_online, store_online);
+static DEVICE_ATTR(online, 0644, show_online, store_online);
static void __cpuinit register_cpu_control(struct cpu *cpu)
{
- sysdev_create_file(&cpu->sysdev, &attr_online);
+ device_create_file(&cpu->dev, &dev_attr_online);
}
void unregister_cpu(struct cpu *cpu)
{
- int logical_cpu = cpu->sysdev.id;
+ int logical_cpu = cpu->dev.id;
unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
- sysdev_remove_file(&cpu->sysdev, &attr_online);
+ device_remove_file(&cpu->dev, &dev_attr_online);
- sysdev_unregister(&cpu->sysdev);
+ device_unregister(&cpu->dev);
per_cpu(cpu_sys_devices, logical_cpu) = NULL;
return;
}
#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static ssize_t cpu_probe_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t cpu_probe_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
return arch_cpu_probe(buf, count);
}
-static ssize_t cpu_release_store(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t cpu_release_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
return arch_cpu_release(buf, count);
}
-static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
-static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
#else /* ... !CONFIG_HOTPLUG_CPU */
@@ -109,15 +110,15 @@ static inline void register_cpu_control(struct cpu *cpu)
#ifdef CONFIG_KEXEC
#include <linux/kexec.h>
-static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
char *buf)
{
- struct cpu *cpu = container_of(dev, struct cpu, sysdev);
+ struct cpu *cpu = container_of(dev, struct cpu, dev);
ssize_t rc;
unsigned long long addr;
int cpunum;
- cpunum = cpu->sysdev.id;
+ cpunum = cpu->dev.id;
/*
* Might be reading other cpu's data based on which cpu read thread
@@ -129,7 +130,7 @@ static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute
rc = sprintf(buf, "%Lx\n", addr);
return rc;
}
-static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
+static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
#endif
/*
@@ -137,12 +138,12 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
*/
struct cpu_attr {
- struct sysdev_class_attribute attr;
+ struct device_attribute attr;
const struct cpumask *const * const map;
};
-static ssize_t show_cpus_attr(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_cpus_attr(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
@@ -153,10 +154,10 @@ static ssize_t show_cpus_attr(struct sysdev_class *class,
return n;
}
-#define _CPU_ATTR(name, map) \
- { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map }
+#define _CPU_ATTR(name, map) \
+ { __ATTR(name, 0444, show_cpus_attr, NULL), map }
-/* Keep in sync with cpu_sysdev_class_attrs */
+/* Keep in sync with cpu_subsys_attrs */
static struct cpu_attr cpu_attrs[] = {
_CPU_ATTR(online, &cpu_online_mask),
_CPU_ATTR(possible, &cpu_possible_mask),
@@ -166,19 +167,19 @@ static struct cpu_attr cpu_attrs[] = {
/*
* Print values for NR_CPUS and offlined cpus
*/
-static ssize_t print_cpus_kernel_max(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_kernel_max(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
return n;
}
-static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
+static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
unsigned int total_cpus;
-static ssize_t print_cpus_offline(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t print_cpus_offline(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n = 0, len = PAGE_SIZE-2;
cpumask_var_t offline;
@@ -205,7 +206,26 @@ static ssize_t print_cpus_offline(struct sysdev_class *class,
n += snprintf(&buf[n], len - n, "\n");
return n;
}
-static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
+static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
+
+static void cpu_device_release(struct device *dev)
+{
+ /*
+ * This is an empty function to prevent the driver core from spitting a
+ * warning at us. Yes, I know this is directly opposite of what the
+ * documentation for the driver core and kobjects say, and the author
+ * of this code has already been publically ridiculed for doing
+ * something as foolish as this. However, at this point in time, it is
+ * the only way to handle the issue of statically allocated cpu
+ * devices. The different architectures will have their cpu device
+ * code reworked to properly handle this in the near future, so this
+ * function will then be changed to correctly free up the memory held
+ * by the cpu device.
+ *
+ * Never copy this way of doing things, or you too will be made fun of
+ * on the linux-kerenl list, you have been warned.
+ */
+}
/*
* register_cpu - Setup a sysfs device for a CPU.
@@ -218,57 +238,89 @@ static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
int __cpuinit register_cpu(struct cpu *cpu, int num)
{
int error;
- cpu->node_id = cpu_to_node(num);
- cpu->sysdev.id = num;
- cpu->sysdev.cls = &cpu_sysdev_class;
-
- error = sysdev_register(&cpu->sysdev);
+ cpu->node_id = cpu_to_node(num);
+ memset(&cpu->dev, 0x00, sizeof(struct device));
+ cpu->dev.id = num;
+ cpu->dev.bus = &cpu_subsys;
+ cpu->dev.release = cpu_device_release;
+ error = device_register(&cpu->dev);
if (!error && cpu->hotpluggable)
register_cpu_control(cpu);
if (!error)
- per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
+ per_cpu(cpu_sys_devices, num) = &cpu->dev;
if (!error)
register_cpu_under_node(num, cpu_to_node(num));
#ifdef CONFIG_KEXEC
if (!error)
- error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes);
+ error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
#endif
return error;
}
-struct sys_device *get_cpu_sysdev(unsigned cpu)
+struct device *get_cpu_device(unsigned cpu)
{
if (cpu < nr_cpu_ids && cpu_possible(cpu))
return per_cpu(cpu_sys_devices, cpu);
else
return NULL;
}
-EXPORT_SYMBOL_GPL(get_cpu_sysdev);
+EXPORT_SYMBOL_GPL(get_cpu_device);
+
+static struct attribute *cpu_root_attrs[] = {
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
+ &dev_attr_probe.attr,
+ &dev_attr_release.attr,
+#endif
+ &cpu_attrs[0].attr.attr,
+ &cpu_attrs[1].attr.attr,
+ &cpu_attrs[2].attr.attr,
+ &dev_attr_kernel_max.attr,
+ &dev_attr_offline.attr,
+ NULL
+};
+
+static struct attribute_group cpu_root_attr_group = {
+ .attrs = cpu_root_attrs,
+};
-int __init cpu_dev_init(void)
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &cpu_root_attr_group,
+ NULL,
+};
+
+bool cpu_is_hotpluggable(unsigned cpu)
{
- int err;
+ struct device *dev = get_cpu_device(cpu);
+ return dev && container_of(dev, struct cpu, dev)->hotpluggable;
+}
+EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
- err = sysdev_class_register(&cpu_sysdev_class);
-#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
- if (!err)
- err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
#endif
- return err;
+static void __init cpu_dev_register_generic(void)
+{
+#ifdef CONFIG_GENERIC_CPU_DEVICES
+ int i;
+
+ for_each_possible_cpu(i) {
+ if (register_cpu(&per_cpu(cpu_devices, i), i))
+ panic("Failed to register CPU device");
+ }
+#endif
}
-static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = {
-#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
- &attr_probe,
- &attr_release,
+void __init cpu_dev_init(void)
+{
+ if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
+ panic("Failed to register CPU subsystem");
+
+ cpu_dev_register_generic();
+
+#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
+ sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
#endif
- &cpu_attrs[0].attr,
- &cpu_attrs[1].attr,
- &cpu_attrs[2].attr,
- &attr_kernel_max,
- &attr_offline,
- NULL
-};
+}
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 65cd748..524bf96 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -639,7 +639,7 @@ EXPORT_SYMBOL_GPL(devm_kzalloc);
* @dev: Device this memory belongs to
* @p: Memory to free
*
- * Free memory allocated with dev_kzalloc().
+ * Free memory allocated with devm_kzalloc().
*/
void devm_kfree(struct device *dev, void *p)
{
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index a4760e0..8493536 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -40,7 +40,7 @@ static struct req {
struct completion done;
int err;
const char *name;
- mode_t mode; /* 0 => delete */
+ umode_t mode; /* 0 => delete */
struct device *dev;
} *requests;
@@ -142,7 +142,7 @@ int devtmpfs_delete_node(struct device *dev)
return req.err;
}
-static int dev_mkdir(const char *name, mode_t mode)
+static int dev_mkdir(const char *name, umode_t mode)
{
struct dentry *dentry;
struct path path;
@@ -189,7 +189,7 @@ static int create_path(const char *nodepath)
return err;
}
-static int handle_create(const char *nodename, mode_t mode, struct device *dev)
+static int handle_create(const char *nodename, umode_t mode, struct device *dev)
{
struct dentry *dentry;
struct path path;
@@ -378,7 +378,7 @@ int devtmpfs_mount(const char *mntdir)
static DECLARE_COMPLETION(setup_done);
-static int handle(const char *name, mode_t mode, struct device *dev)
+static int handle(const char *name, umode_t mode, struct device *dev)
{
if (mode)
return handle_create(name, mode, dev);
@@ -413,10 +413,9 @@ static int devtmpfsd(void *p)
}
spin_lock(&req_lock);
}
- set_current_state(TASK_INTERRUPTIBLE);
+ __set_current_state(TASK_INTERRUPTIBLE);
spin_unlock(&req_lock);
schedule();
- __set_current_state(TASK_RUNNING);
}
return 0;
out:
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
new file mode 100644
index 0000000..e38ad24
--- /dev/null
+++ b/drivers/base/dma-buf.c
@@ -0,0 +1,291 @@
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/anon_inodes.h>
+#include <linux/export.h>
+
+static inline int is_dma_buf_file(struct file *);
+
+static int dma_buf_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ dmabuf->ops->release(dmabuf);
+ kfree(dmabuf);
+ return 0;
+}
+
+static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_release,
+};
+
+/*
+ * is_dma_buf_file - Check if struct file* is associated with dma_buf
+ */
+static inline int is_dma_buf_file(struct file *file)
+{
+ return file->f_op == &dma_buf_fops;
+}
+
+/**
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
+ * with this buffer, so it can be exported.
+ * Also connect the allocator specific data and ops to the buffer.
+ *
+ * @priv: [in] Attach private data of allocator to this buffer
+ * @ops: [in] Attach allocator-defined dma buf ops to the new buffer.
+ * @size: [in] Size of the buffer
+ * @flags: [in] mode flags for the file.
+ *
+ * Returns, on success, a newly created dma_buf object, which wraps the
+ * supplied private data and operations for dma_buf_ops. On either missing
+ * ops, or error in allocating struct dma_buf, will return negative error.
+ *
+ */
+struct dma_buf *dma_buf_export(void *priv, struct dma_buf_ops *ops,
+ size_t size, int flags)
+{
+ struct dma_buf *dmabuf;
+ struct file *file;
+
+ if (WARN_ON(!priv || !ops
+ || !ops->map_dma_buf
+ || !ops->unmap_dma_buf
+ || !ops->release)) {
+ return ERR_PTR(-EINVAL);
+ }
+
+ dmabuf = kzalloc(sizeof(struct dma_buf), GFP_KERNEL);
+ if (dmabuf == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ dmabuf->priv = priv;
+ dmabuf->ops = ops;
+ dmabuf->size = size;
+
+ file = anon_inode_getfile("dmabuf", &dma_buf_fops, dmabuf, flags);
+
+ dmabuf->file = file;
+
+ mutex_init(&dmabuf->lock);
+ INIT_LIST_HEAD(&dmabuf->attachments);
+
+ return dmabuf;
+}
+EXPORT_SYMBOL_GPL(dma_buf_export);
+
+
+/**
+ * dma_buf_fd - returns a file descriptor for the given dma_buf
+ * @dmabuf: [in] pointer to dma_buf for which fd is required.
+ *
+ * On success, returns an associated 'fd'. Else, returns error.
+ */
+int dma_buf_fd(struct dma_buf *dmabuf)
+{
+ int error, fd;
+
+ if (!dmabuf || !dmabuf->file)
+ return -EINVAL;
+
+ error = get_unused_fd();
+ if (error < 0)
+ return error;
+ fd = error;
+
+ fd_install(fd, dmabuf->file);
+
+ return fd;
+}
+EXPORT_SYMBOL_GPL(dma_buf_fd);
+
+/**
+ * dma_buf_get - returns the dma_buf structure related to an fd
+ * @fd: [in] fd associated with the dma_buf to be returned
+ *
+ * On success, returns the dma_buf structure associated with an fd; uses
+ * file's refcounting done by fget to increase refcount. returns ERR_PTR
+ * otherwise.
+ */
+struct dma_buf *dma_buf_get(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (!is_dma_buf_file(file)) {
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file->private_data;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get);
+
+/**
+ * dma_buf_put - decreases refcount of the buffer
+ * @dmabuf: [in] buffer to reduce refcount of
+ *
+ * Uses file's refcounting done implicitly by fput()
+ */
+void dma_buf_put(struct dma_buf *dmabuf)
+{
+ if (WARN_ON(!dmabuf || !dmabuf->file))
+ return;
+
+ fput(dmabuf->file);
+}
+EXPORT_SYMBOL_GPL(dma_buf_put);
+
+/**
+ * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
+ * calls attach() of dma_buf_ops to allow device-specific attach functionality
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Returns struct dma_buf_attachment * for this attachment; may return negative
+ * error codes.
+ *
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ struct dma_buf_attachment *attach;
+ int ret;
+
+ if (WARN_ON(!dmabuf || !dev || !dmabuf->ops))
+ return ERR_PTR(-EINVAL);
+
+ attach = kzalloc(sizeof(struct dma_buf_attachment), GFP_KERNEL);
+ if (attach == NULL)
+ goto err_alloc;
+
+ mutex_lock(&dmabuf->lock);
+
+ attach->dev = dev;
+ attach->dmabuf = dmabuf;
+ if (dmabuf->ops->attach) {
+ ret = dmabuf->ops->attach(dmabuf, dev, attach);
+ if (ret)
+ goto err_attach;
+ }
+ list_add(&attach->node, &dmabuf->attachments);
+
+ mutex_unlock(&dmabuf->lock);
+ return attach;
+
+err_alloc:
+ return ERR_PTR(-ENOMEM);
+err_attach:
+ kfree(attach);
+ mutex_unlock(&dmabuf->lock);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(dma_buf_attach);
+
+/**
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
+ * optionally calls detach() of dma_buf_ops for device-specific detach
+ * @dmabuf: [in] buffer to detach from.
+ * @attach: [in] attachment to be detached; is free'd after this call.
+ *
+ */
+void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
+{
+ if (WARN_ON(!dmabuf || !attach || !dmabuf->ops))
+ return;
+
+ mutex_lock(&dmabuf->lock);
+ list_del(&attach->node);
+ if (dmabuf->ops->detach)
+ dmabuf->ops->detach(dmabuf, attach);
+
+ mutex_unlock(&dmabuf->lock);
+ kfree(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_detach);
+
+/**
+ * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; may return NULL
+ * or ERR_PTR.
+ *
+ */
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table = ERR_PTR(-EINVAL);
+
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf || !attach->dmabuf->ops))
+ return ERR_PTR(-EINVAL);
+
+ mutex_lock(&attach->dmabuf->lock);
+ if (attach->dmabuf->ops->map_dma_buf)
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ mutex_unlock(&attach->dmabuf->lock);
+
+ return sg_table;
+}
+EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
+
+/**
+ * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach: [in] attachment to unmap buffer from
+ * @sg_table: [in] scatterlist info of the buffer to unmap
+ *
+ */
+void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table)
+{
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table
+ || !attach->dmabuf->ops))
+ return;
+
+ mutex_lock(&attach->dmabuf->lock);
+ if (attach->dmabuf->ops->unmap_dma_buf)
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table);
+ mutex_unlock(&attach->dmabuf->lock);
+
+}
+EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 06ed6b4..6c9387d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -226,13 +226,13 @@ static ssize_t firmware_loading_store(struct device *dev,
int loading = simple_strtol(buf, NULL, 10);
int i;
+ mutex_lock(&fw_lock);
+
+ if (!fw_priv->fw)
+ goto out;
+
switch (loading) {
case 1:
- mutex_lock(&fw_lock);
- if (!fw_priv->fw) {
- mutex_unlock(&fw_lock);
- break;
- }
firmware_free_data(fw_priv->fw);
memset(fw_priv->fw, 0, sizeof(struct firmware));
/* If the pages are not owned by 'struct firmware' */
@@ -243,7 +243,6 @@ static ssize_t firmware_loading_store(struct device *dev,
fw_priv->page_array_size = 0;
fw_priv->nr_pages = 0;
set_bit(FW_STATUS_LOADING, &fw_priv->status);
- mutex_unlock(&fw_lock);
break;
case 0:
if (test_bit(FW_STATUS_LOADING, &fw_priv->status)) {
@@ -274,7 +273,8 @@ static ssize_t firmware_loading_store(struct device *dev,
fw_load_abort(fw_priv);
break;
}
-
+out:
+ mutex_unlock(&fw_lock);
return count;
}
@@ -525,8 +525,7 @@ static int _request_firmware(const struct firmware **firmware_p,
if (!firmware) {
dev_err(device, "%s: kmalloc(struct firmware) failed\n",
__func__);
- retval = -ENOMEM;
- goto out;
+ return -ENOMEM;
}
if (fw_get_builtin_firmware(firmware, name)) {
@@ -534,6 +533,8 @@ static int _request_firmware(const struct firmware **firmware_p,
return 0;
}
+ read_lock_usermodehelper();
+
if (WARN_ON(usermodehelper_is_disabled())) {
dev_err(device, "firmware: %s will not be loaded\n", name);
retval = -EBUSY;
@@ -572,6 +573,8 @@ static int _request_firmware(const struct firmware **firmware_p,
fw_destroy_instance(fw_priv);
out:
+ read_unlock_usermodehelper();
+
if (retval) {
release_firmware(firmware);
*firmware_p = NULL;
diff --git a/drivers/base/init.c b/drivers/base/init.c
index c8a934e..c16f0b8 100644
--- a/drivers/base/init.c
+++ b/drivers/base/init.c
@@ -31,7 +31,6 @@ void __init driver_init(void)
* core core pieces.
*/
platform_bus_init();
- system_bus_init();
cpu_dev_init();
memory_dev_init();
}
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8272d92..9e60dbe 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -1,5 +1,5 @@
/*
- * drivers/base/memory.c - basic Memory class support
+ * Memory subsystem support
*
* Written by Matt Tolentino <matthew.e.tolentino@intel.com>
* Dave Hansen <haveblue@us.ibm.com>
@@ -10,7 +10,6 @@
* SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
*/
-#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/topology.h>
@@ -38,26 +37,9 @@ static inline int base_memory_block_id(int section_nr)
return section_nr / sections_per_block;
}
-static struct sysdev_class memory_sysdev_class = {
+static struct bus_type memory_subsys = {
.name = MEMORY_CLASS_NAME,
-};
-
-static const char *memory_uevent_name(struct kset *kset, struct kobject *kobj)
-{
- return MEMORY_CLASS_NAME;
-}
-
-static int memory_uevent(struct kset *kset, struct kobject *obj,
- struct kobj_uevent_env *env)
-{
- int retval = 0;
-
- return retval;
-}
-
-static const struct kset_uevent_ops memory_uevent_ops = {
- .name = memory_uevent_name,
- .uevent = memory_uevent,
+ .dev_name = MEMORY_CLASS_NAME,
};
static BLOCKING_NOTIFIER_HEAD(memory_chain);
@@ -96,21 +78,21 @@ int register_memory(struct memory_block *memory)
{
int error;
- memory->sysdev.cls = &memory_sysdev_class;
- memory->sysdev.id = memory->start_section_nr / sections_per_block;
+ memory->dev.bus = &memory_subsys;
+ memory->dev.id = memory->start_section_nr / sections_per_block;
- error = sysdev_register(&memory->sysdev);
+ error = device_register(&memory->dev);
return error;
}
static void
unregister_memory(struct memory_block *memory)
{
- BUG_ON(memory->sysdev.cls != &memory_sysdev_class);
+ BUG_ON(memory->dev.bus != &memory_subsys);
/* drop the ref. we got in remove_memory_block() */
- kobject_put(&memory->sysdev.kobj);
- sysdev_unregister(&memory->sysdev);
+ kobject_put(&memory->dev.kobj);
+ device_unregister(&memory->dev);
}
unsigned long __weak memory_block_size_bytes(void)
@@ -138,22 +120,22 @@ static unsigned long get_memory_block_size(void)
* uses.
*/
-static ssize_t show_mem_start_phys_index(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_start_phys_index(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
unsigned long phys_index;
phys_index = mem->start_section_nr / sections_per_block;
return sprintf(buf, "%08lx\n", phys_index);
}
-static ssize_t show_mem_end_phys_index(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_end_phys_index(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
unsigned long phys_index;
phys_index = mem->end_section_nr / sections_per_block;
@@ -163,13 +145,13 @@ static ssize_t show_mem_end_phys_index(struct sys_device *dev,
/*
* Show whether the section of memory is likely to be hot-removable
*/
-static ssize_t show_mem_removable(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_removable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
unsigned long i, pfn;
int ret = 1;
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
for (i = 0; i < sections_per_block; i++) {
pfn = section_nr_to_pfn(mem->start_section_nr + i);
@@ -182,11 +164,11 @@ static ssize_t show_mem_removable(struct sys_device *dev,
/*
* online, offline, going offline, etc.
*/
-static ssize_t show_mem_state(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_mem_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
ssize_t len = 0;
/*
@@ -313,24 +295,35 @@ static int memory_block_change_state(struct memory_block *mem,
ret = memory_block_action(mem->start_section_nr, to_state);
- if (ret)
+ if (ret) {
mem->state = from_state_req;
- else
- mem->state = to_state;
+ goto out;
+ }
+ mem->state = to_state;
+ switch (mem->state) {
+ case MEM_OFFLINE:
+ kobject_uevent(&mem->dev.kobj, KOBJ_OFFLINE);
+ break;
+ case MEM_ONLINE:
+ kobject_uevent(&mem->dev.kobj, KOBJ_ONLINE);
+ break;
+ default:
+ break;
+ }
out:
mutex_unlock(&mem->state_mutex);
return ret;
}
static ssize_t
-store_mem_state(struct sys_device *dev,
- struct sysdev_attribute *attr, const char *buf, size_t count)
+store_mem_state(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
{
struct memory_block *mem;
int ret = -EINVAL;
- mem = container_of(dev, struct memory_block, sysdev);
+ mem = container_of(dev, struct memory_block, dev);
if (!strncmp(buf, "online", min((int)count, 6)))
ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
@@ -351,41 +344,41 @@ store_mem_state(struct sys_device *dev,
* s.t. if I offline all of these sections I can then
* remove the physical device?
*/
-static ssize_t show_phys_device(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t show_phys_device(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct memory_block *mem =
- container_of(dev, struct memory_block, sysdev);
+ container_of(dev, struct memory_block, dev);
return sprintf(buf, "%d\n", mem->phys_device);
}
-static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
-static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
-static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state);
-static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL);
-static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
+static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
+static DEVICE_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL);
+static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
+static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
+static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
#define mem_create_simple_file(mem, attr_name) \
- sysdev_create_file(&mem->sysdev, &attr_##attr_name)
+ device_create_file(&mem->dev, &dev_attr_##attr_name)
#define mem_remove_simple_file(mem, attr_name) \
- sysdev_remove_file(&mem->sysdev, &attr_##attr_name)
+ device_remove_file(&mem->dev, &dev_attr_##attr_name)
/*
* Block size attribute stuff
*/
static ssize_t
-print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr,
+print_block_size(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lx\n", get_memory_block_size());
}
-static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
+static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
static int block_size_init(void)
{
- return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &attr_block_size_bytes.attr);
+ return device_create_file(memory_subsys.dev_root,
+ &dev_attr_block_size_bytes);
}
/*
@@ -396,7 +389,7 @@ static int block_size_init(void)
*/
#ifdef CONFIG_ARCH_MEMORY_PROBE
static ssize_t
-memory_probe_store(struct class *class, struct class_attribute *attr,
+memory_probe_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
u64 phys_addr;
@@ -423,12 +416,11 @@ memory_probe_store(struct class *class, struct class_attribute *attr,
out:
return ret;
}
-static CLASS_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
+static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
static int memory_probe_init(void)
{
- return sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_probe.attr);
+ return device_create_file(memory_subsys.dev_root, &dev_attr_probe);
}
#else
static inline int memory_probe_init(void)
@@ -444,8 +436,8 @@ static inline int memory_probe_init(void)
/* Soft offline a page */
static ssize_t
-store_soft_offline_page(struct class *class,
- struct class_attribute *attr,
+store_soft_offline_page(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -463,8 +455,8 @@ store_soft_offline_page(struct class *class,
/* Forcibly offline a page, including killing processes. */
static ssize_t
-store_hard_offline_page(struct class *class,
- struct class_attribute *attr,
+store_hard_offline_page(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
int ret;
@@ -478,18 +470,18 @@ store_hard_offline_page(struct class *class,
return ret ? ret : count;
}
-static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
-static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
+static DEVICE_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
+static DEVICE_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
static __init int memory_fail_init(void)
{
int err;
- err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_soft_offline_page.attr);
+ err = device_create_file(memory_subsys.dev_root,
+ &dev_attr_soft_offline_page);
if (!err)
- err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
- &class_attr_hard_offline_page.attr);
+ err = device_create_file(memory_subsys.dev_root,
+ &dev_attr_hard_offline_page);
return err;
}
#else
@@ -509,31 +501,23 @@ int __weak arch_get_memory_phys_device(unsigned long start_pfn)
return 0;
}
+/*
+ * A reference for the returned object is held and the reference for the
+ * hinted object is released.
+ */
struct memory_block *find_memory_block_hinted(struct mem_section *section,
struct memory_block *hint)
{
- struct kobject *kobj;
- struct sys_device *sysdev;
- struct memory_block *mem;
- char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1];
int block_id = base_memory_block_id(__section_nr(section));
+ struct device *hintdev = hint ? &hint->dev : NULL;
+ struct device *dev;
- kobj = hint ? &hint->sysdev.kobj : NULL;
-
- /*
- * This only works because we know that section == sysdev->id
- * slightly redundant with sysdev_register()
- */
- sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id);
-
- kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj);
- if (!kobj)
+ dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
+ if (hint)
+ put_device(&hint->dev);
+ if (!dev)
return NULL;
-
- sysdev = container_of(kobj, struct sys_device, kobj);
- mem = container_of(sysdev, struct memory_block, sysdev);
-
- return mem;
+ return container_of(dev, struct memory_block, dev);
}
/*
@@ -542,7 +526,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section,
* this gets to be a real problem, we can always use a radix
* tree or something here.
*
- * This could be made generic for all sysdev classes.
+ * This could be made generic for all device subsystems.
*/
struct memory_block *find_memory_block(struct mem_section *section)
{
@@ -588,19 +572,36 @@ static int init_memory_block(struct memory_block **memory,
}
static int add_memory_section(int nid, struct mem_section *section,
+ struct memory_block **mem_p,
unsigned long state, enum mem_add_context context)
{
- struct memory_block *mem;
+ struct memory_block *mem = NULL;
+ int scn_nr = __section_nr(section);
int ret = 0;
mutex_lock(&mem_sysfs_mutex);
- mem = find_memory_block(section);
+ if (context == BOOT) {
+ /* same memory block ? */
+ if (mem_p && *mem_p)
+ if (scn_nr >= (*mem_p)->start_section_nr &&
+ scn_nr <= (*mem_p)->end_section_nr) {
+ mem = *mem_p;
+ kobject_get(&mem->dev.kobj);
+ }
+ } else
+ mem = find_memory_block(section);
+
if (mem) {
mem->section_count++;
- kobject_put(&mem->sysdev.kobj);
- } else
+ kobject_put(&mem->dev.kobj);
+ } else {
ret = init_memory_block(&mem, section, state);
+ /* store memory_block pointer for next loop */
+ if (!ret && context == BOOT)
+ if (mem_p)
+ *mem_p = mem;
+ }
if (!ret) {
if (context == HOTPLUG &&
@@ -631,7 +632,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
unregister_memory(mem);
kfree(mem);
} else
- kobject_put(&mem->sysdev.kobj);
+ kobject_put(&mem->dev.kobj);
mutex_unlock(&mem_sysfs_mutex);
return 0;
@@ -643,7 +644,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section,
*/
int register_new_memory(int nid, struct mem_section *section)
{
- return add_memory_section(nid, section, MEM_OFFLINE, HOTPLUG);
+ return add_memory_section(nid, section, NULL, MEM_OFFLINE, HOTPLUG);
}
int unregister_memory_section(struct mem_section *section)
@@ -663,9 +664,9 @@ int __init memory_dev_init(void)
int ret;
int err;
unsigned long block_sz;
+ struct memory_block *mem = NULL;
- memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops;
- ret = sysdev_class_register(&memory_sysdev_class);
+ ret = subsys_system_register(&memory_subsys, NULL);
if (ret)
goto out;
@@ -679,7 +680,10 @@ int __init memory_dev_init(void)
for (i = 0; i < NR_MEM_SECTIONS; i++) {
if (!present_section_nr(i))
continue;
- err = add_memory_section(0, __nr_to_section(i), MEM_ONLINE,
+ /* don't need to reuse memory_block if only one per block */
+ err = add_memory_section(0, __nr_to_section(i),
+ (sections_per_block == 1) ? NULL : &mem,
+ MEM_ONLINE,
BOOT);
if (!ret)
ret = err;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 5693ece..90aa2a1 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -1,8 +1,7 @@
/*
- * drivers/base/node.c - basic Node class support
+ * Basic Node interface support
*/
-#include <linux/sysdev.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
@@ -19,18 +18,16 @@
#include <linux/swap.h>
#include <linux/slab.h>
-static struct sysdev_class_attribute *node_state_attrs[];
-
-static struct sysdev_class node_class = {
+static struct bus_type node_subsys = {
.name = "node",
- .attrs = node_state_attrs,
+ .dev_name = "node",
};
-static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
+static ssize_t node_read_cpumap(struct device *dev, int type, char *buf)
{
struct node *node_dev = to_node(dev);
- const struct cpumask *mask = cpumask_of_node(node_dev->sysdev.id);
+ const struct cpumask *mask = cpumask_of_node(node_dev->dev.id);
int len;
/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
@@ -44,23 +41,23 @@ static ssize_t node_read_cpumap(struct sys_device *dev, int type, char *buf)
return len;
}
-static inline ssize_t node_read_cpumask(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpumask(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 0, buf);
}
-static inline ssize_t node_read_cpulist(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static inline ssize_t node_read_cpulist(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return node_read_cpumap(dev, 1, buf);
}
-static SYSDEV_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
-static SYSDEV_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
+static DEVICE_ATTR(cpumap, S_IRUGO, node_read_cpumask, NULL);
+static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
#define K(x) ((x) << (PAGE_SHIFT - 10))
-static ssize_t node_read_meminfo(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_meminfo(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int n;
int nid = dev->id;
@@ -157,10 +154,10 @@ static ssize_t node_read_meminfo(struct sys_device * dev,
}
#undef K
-static SYSDEV_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
+static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
-static ssize_t node_read_numastat(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_numastat(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf,
"numa_hit %lu\n"
@@ -176,10 +173,10 @@ static ssize_t node_read_numastat(struct sys_device * dev,
node_page_state(dev->id, NUMA_LOCAL),
node_page_state(dev->id, NUMA_OTHER));
}
-static SYSDEV_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
+static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
-static ssize_t node_read_vmstat(struct sys_device *dev,
- struct sysdev_attribute *attr, char *buf)
+static ssize_t node_read_vmstat(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
int nid = dev->id;
int i;
@@ -191,10 +188,10 @@ static ssize_t node_read_vmstat(struct sys_device *dev,
return n;
}
-static SYSDEV_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
+static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
-static ssize_t node_read_distance(struct sys_device * dev,
- struct sysdev_attribute *attr, char * buf)
+static ssize_t node_read_distance(struct device *dev,
+ struct device_attribute *attr, char * buf)
{
int nid = dev->id;
int len = 0;
@@ -212,7 +209,7 @@ static ssize_t node_read_distance(struct sys_device * dev,
len += sprintf(buf + len, "\n");
return len;
}
-static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
+static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
#ifdef CONFIG_HUGETLBFS
/*
@@ -230,7 +227,7 @@ static node_registration_func_t __hugetlb_unregister_node;
static inline bool hugetlb_register_node(struct node *node)
{
if (__hugetlb_register_node &&
- node_state(node->sysdev.id, N_HIGH_MEMORY)) {
+ node_state(node->dev.id, N_HIGH_MEMORY)) {
__hugetlb_register_node(node);
return true;
}
@@ -266,17 +263,17 @@ int register_node(struct node *node, int num, struct node *parent)
{
int error;
- node->sysdev.id = num;
- node->sysdev.cls = &node_class;
- error = sysdev_register(&node->sysdev);
+ node->dev.id = num;
+ node->dev.bus = &node_subsys;
+ error = device_register(&node->dev);
if (!error){
- sysdev_create_file(&node->sysdev, &attr_cpumap);
- sysdev_create_file(&node->sysdev, &attr_cpulist);
- sysdev_create_file(&node->sysdev, &attr_meminfo);
- sysdev_create_file(&node->sysdev, &attr_numastat);
- sysdev_create_file(&node->sysdev, &attr_distance);
- sysdev_create_file(&node->sysdev, &attr_vmstat);
+ device_create_file(&node->dev, &dev_attr_cpumap);
+ device_create_file(&node->dev, &dev_attr_cpulist);
+ device_create_file(&node->dev, &dev_attr_meminfo);
+ device_create_file(&node->dev, &dev_attr_numastat);
+ device_create_file(&node->dev, &dev_attr_distance);
+ device_create_file(&node->dev, &dev_attr_vmstat);
scan_unevictable_register_node(node);
@@ -296,17 +293,17 @@ int register_node(struct node *node, int num, struct node *parent)
*/
void unregister_node(struct node *node)
{
- sysdev_remove_file(&node->sysdev, &attr_cpumap);
- sysdev_remove_file(&node->sysdev, &attr_cpulist);
- sysdev_remove_file(&node->sysdev, &attr_meminfo);
- sysdev_remove_file(&node->sysdev, &attr_numastat);
- sysdev_remove_file(&node->sysdev, &attr_distance);
- sysdev_remove_file(&node->sysdev, &attr_vmstat);
+ device_remove_file(&node->dev, &dev_attr_cpumap);
+ device_remove_file(&node->dev, &dev_attr_cpulist);
+ device_remove_file(&node->dev, &dev_attr_meminfo);
+ device_remove_file(&node->dev, &dev_attr_numastat);
+ device_remove_file(&node->dev, &dev_attr_distance);
+ device_remove_file(&node->dev, &dev_attr_vmstat);
scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
- sysdev_unregister(&node->sysdev);
+ device_unregister(&node->dev);
}
struct node node_devices[MAX_NUMNODES];
@@ -317,41 +314,41 @@ struct node node_devices[MAX_NUMNODES];
int register_cpu_under_node(unsigned int cpu, unsigned int nid)
{
int ret;
- struct sys_device *obj;
+ struct device *obj;
if (!node_online(nid))
return 0;
- obj = get_cpu_sysdev(cpu);
+ obj = get_cpu_device(cpu);
if (!obj)
return 0;
- ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
+ ret = sysfs_create_link(&node_devices[nid].dev.kobj,
&obj->kobj,
kobject_name(&obj->kobj));
if (ret)
return ret;
return sysfs_create_link(&obj->kobj,
- &node_devices[nid].sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ &node_devices[nid].dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
{
- struct sys_device *obj;
+ struct device *obj;
if (!node_online(nid))
return 0;
- obj = get_cpu_sysdev(cpu);
+ obj = get_cpu_device(cpu);
if (!obj)
return 0;
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
+ sysfs_remove_link(&node_devices[nid].dev.kobj,
kobject_name(&obj->kobj));
sysfs_remove_link(&obj->kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ kobject_name(&node_devices[nid].dev.kobj));
return 0;
}
@@ -393,15 +390,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
continue;
if (page_nid != nid)
continue;
- ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
- &mem_blk->sysdev.kobj,
- kobject_name(&mem_blk->sysdev.kobj));
+ ret = sysfs_create_link_nowarn(&node_devices[nid].dev.kobj,
+ &mem_blk->dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
if (ret)
return ret;
- return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
- &node_devices[nid].sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
+ &node_devices[nid].dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
/* mem section does not span the specified node */
return 0;
@@ -434,10 +431,10 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
continue;
if (node_test_and_set(nid, *unlinked_nodes))
continue;
- sysfs_remove_link(&node_devices[nid].sysdev.kobj,
- kobject_name(&mem_blk->sysdev.kobj));
- sysfs_remove_link(&mem_blk->sysdev.kobj,
- kobject_name(&node_devices[nid].sysdev.kobj));
+ sysfs_remove_link(&node_devices[nid].dev.kobj,
+ kobject_name(&mem_blk->dev.kobj));
+ sysfs_remove_link(&mem_blk->dev.kobj,
+ kobject_name(&node_devices[nid].dev.kobj));
}
NODEMASK_FREE(unlinked_nodes);
return 0;
@@ -459,7 +456,15 @@ static int link_mem_sections(int nid)
if (!present_section_nr(section_nr))
continue;
mem_sect = __nr_to_section(section_nr);
+
+ /* same memblock ? */
+ if (mem_blk)
+ if ((section_nr >= mem_blk->start_section_nr) &&
+ (section_nr <= mem_blk->end_section_nr))
+ continue;
+
mem_blk = find_memory_block_hinted(mem_sect, mem_blk);
+
ret = register_mem_sect_under_node(mem_blk, nid);
if (!err)
err = ret;
@@ -468,7 +473,7 @@ static int link_mem_sections(int nid)
}
if (mem_blk)
- kobject_put(&mem_blk->sysdev.kobj);
+ kobject_put(&mem_blk->dev.kobj);
return err;
}
@@ -596,19 +601,19 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
}
struct node_attr {
- struct sysdev_class_attribute attr;
+ struct device_attribute attr;
enum node_states state;
};
-static ssize_t show_node_state(struct sysdev_class *class,
- struct sysdev_class_attribute *attr, char *buf)
+static ssize_t show_node_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct node_attr *na = container_of(attr, struct node_attr, attr);
return print_nodes_state(na->state, buf);
}
#define _NODE_ATTR(name, state) \
- { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state }
+ { __ATTR(name, 0444, show_node_state, NULL), state }
static struct node_attr node_state_attr[] = {
_NODE_ATTR(possible, N_POSSIBLE),
@@ -620,17 +625,26 @@ static struct node_attr node_state_attr[] = {
#endif
};
-static struct sysdev_class_attribute *node_state_attrs[] = {
- &node_state_attr[0].attr,
- &node_state_attr[1].attr,
- &node_state_attr[2].attr,
- &node_state_attr[3].attr,
+static struct attribute *node_state_attrs[] = {
+ &node_state_attr[0].attr.attr,
+ &node_state_attr[1].attr.attr,
+ &node_state_attr[2].attr.attr,
+ &node_state_attr[3].attr.attr,
#ifdef CONFIG_HIGHMEM
- &node_state_attr[4].attr,
+ &node_state_attr[4].attr.attr,
#endif
NULL
};
+static struct attribute_group memory_root_attr_group = {
+ .attrs = node_state_attrs,
+};
+
+static const struct attribute_group *cpu_root_attr_groups[] = {
+ &memory_root_attr_group,
+ NULL,
+};
+
#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
static int __init register_node_type(void)
{
@@ -639,7 +653,7 @@ static int __init register_node_type(void)
BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
- ret = sysdev_class_register(&node_class);
+ ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
if (!ret) {
hotplug_memory_notifier(node_memory_callback,
NODE_CALLBACK_PRI);
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 7a24895..f0c605e 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -383,7 +383,7 @@ EXPORT_SYMBOL_GPL(platform_device_unregister);
* Returns &struct platform_device pointer on success, or ERR_PTR() on error.
*/
struct platform_device *platform_device_register_full(
- struct platform_device_info *pdevinfo)
+ const struct platform_device_info *pdevinfo)
{
int ret = -ENOMEM;
struct platform_device *pdev;
@@ -700,25 +700,6 @@ static int platform_legacy_resume(struct device *dev)
return ret;
}
-int platform_pm_prepare(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (drv && drv->pm && drv->pm->prepare)
- ret = drv->pm->prepare(dev);
-
- return ret;
-}
-
-void platform_pm_complete(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
-
- if (drv && drv->pm && drv->pm->complete)
- drv->pm->complete(dev);
-}
-
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_SUSPEND
@@ -741,22 +722,6 @@ int platform_pm_suspend(struct device *dev)
return ret;
}
-int platform_pm_suspend_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->suspend_noirq)
- ret = drv->pm->suspend_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_resume(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -775,22 +740,6 @@ int platform_pm_resume(struct device *dev)
return ret;
}
-int platform_pm_resume_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->resume_noirq)
- ret = drv->pm->resume_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
@@ -813,22 +762,6 @@ int platform_pm_freeze(struct device *dev)
return ret;
}
-int platform_pm_freeze_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->freeze_noirq)
- ret = drv->pm->freeze_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_thaw(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -847,22 +780,6 @@ int platform_pm_thaw(struct device *dev)
return ret;
}
-int platform_pm_thaw_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->thaw_noirq)
- ret = drv->pm->thaw_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_poweroff(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -881,22 +798,6 @@ int platform_pm_poweroff(struct device *dev)
return ret;
}
-int platform_pm_poweroff_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->poweroff_noirq)
- ret = drv->pm->poweroff_noirq(dev);
- }
-
- return ret;
-}
-
int platform_pm_restore(struct device *dev)
{
struct device_driver *drv = dev->driver;
@@ -915,22 +816,6 @@ int platform_pm_restore(struct device *dev)
return ret;
}
-int platform_pm_restore_noirq(struct device *dev)
-{
- struct device_driver *drv = dev->driver;
- int ret = 0;
-
- if (!drv)
- return 0;
-
- if (drv->pm) {
- if (drv->pm->restore_noirq)
- ret = drv->pm->restore_noirq(dev);
- }
-
- return ret;
-}
-
#endif /* CONFIG_HIBERNATE_CALLBACKS */
static const struct dev_pm_ops platform_dev_pm_ops = {
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 81676dd..2e58ebb 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
-obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o
+obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6790cf7..978bbf7 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -15,13 +15,44 @@
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/suspend.h>
+#include <linux/export.h>
+
+#define GENPD_DEV_CALLBACK(genpd, type, callback, dev) \
+({ \
+ type (*__routine)(struct device *__d); \
+ type __ret = (type)0; \
+ \
+ __routine = genpd->dev_ops.callback; \
+ if (__routine) { \
+ __ret = __routine(dev); \
+ } else { \
+ __routine = dev_gpd_data(dev)->ops.callback; \
+ if (__routine) \
+ __ret = __routine(dev); \
+ } \
+ __ret; \
+})
+
+#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
+({ \
+ ktime_t __start = ktime_get(); \
+ type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
+ s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
+ struct generic_pm_domain_data *__gpd_data = dev_gpd_data(dev); \
+ if (__elapsed > __gpd_data->td.field) { \
+ __gpd_data->td.field = __elapsed; \
+ dev_warn(dev, name " latency exceeded, new value %lld ns\n", \
+ __elapsed); \
+ } \
+ __retval; \
+})
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
#ifdef CONFIG_PM
-static struct generic_pm_domain *dev_to_genpd(struct device *dev)
+struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
return ERR_PTR(-EINVAL);
@@ -29,6 +60,31 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
return pd_to_genpd(dev->pm_domain);
}
+static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
+ stop_latency_ns, "stop");
+}
+
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
+ start_latency_ns, "start");
+}
+
+static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
+ save_state_latency_ns, "state save");
+}
+
+static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
+ restore_state_latency_ns,
+ "state restore");
+}
+
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
{
bool ret = false;
@@ -145,9 +201,21 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
if (genpd->power_on) {
+ ktime_t time_start = ktime_get();
+ s64 elapsed_ns;
+
ret = genpd->power_on(genpd);
if (ret)
goto err;
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_on_latency_ns) {
+ genpd->power_on_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-on latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd_set_active(genpd);
@@ -190,7 +258,6 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
int ret = 0;
if (gpd_data->need_restore)
@@ -198,15 +265,9 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_suspend) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- ret = drv->pm->runtime_suspend(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ ret = genpd_save_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -227,22 +288,15 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- struct device_driver *drv = dev->driver;
if (!gpd_data->need_restore)
return;
mutex_unlock(&genpd->lock);
- if (drv && drv->pm && drv->pm->runtime_resume) {
- if (genpd->start_device)
- genpd->start_device(dev);
-
- drv->pm->runtime_resume(dev);
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
- }
+ genpd_start_dev(genpd, dev);
+ genpd_restore_dev(genpd, dev);
+ genpd_stop_dev(genpd, dev);
mutex_lock(&genpd->lock);
@@ -354,11 +408,16 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
if (genpd->power_off) {
+ ktime_t time_start;
+ s64 elapsed_ns;
+
if (atomic_read(&genpd->sd_count) > 0) {
ret = -EBUSY;
goto out;
}
+ time_start = ktime_get();
+
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
* managed to call pm_genpd_poweron() for the master yet after
@@ -372,9 +431,29 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
genpd_set_active(genpd);
goto out;
}
+
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > genpd->power_off_latency_ns) {
+ genpd->power_off_latency_ns = elapsed_ns;
+ if (genpd->name)
+ pr_warning("%s: Power-off latency exceeded, "
+ "new value %lld ns\n", genpd->name,
+ elapsed_ns);
+ }
}
genpd->status = GPD_STATE_POWER_OFF;
+ genpd->power_off_time = ktime_get();
+
+ /* Update PM QoS information for devices in the domain. */
+ list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td = &to_gpd_data(pdd)->td;
+
+ pm_runtime_update_max_time_suspended(pdd->dev,
+ td->start_latency_ns +
+ td->restore_state_latency_ns +
+ genpd->power_on_latency_ns);
+ }
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_dec(link->master);
@@ -413,6 +492,8 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
+ bool (*stop_ok)(struct device *__dev);
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -422,11 +503,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
might_sleep_if(!genpd->dev_irq_safe);
- if (genpd->stop_device) {
- int ret = genpd->stop_device(dev);
- if (ret)
- return ret;
- }
+ stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
+ if (stop_ok && !stop_ok(dev))
+ return -EBUSY;
+
+ ret = genpd_stop_dev(genpd, dev);
+ if (ret)
+ return ret;
+
+ pm_runtime_update_max_time_suspended(dev,
+ dev_gpd_data(dev)->td.start_latency_ns);
/*
* If power.irq_safe is set, this routine will be run with interrupts
@@ -502,8 +588,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
mutex_unlock(&genpd->lock);
out:
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
return 0;
}
@@ -534,6 +619,52 @@ static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#ifdef CONFIG_PM_SLEEP
+static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
+ struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
+}
+
+static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
+}
+
+static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
+}
+
+static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
+}
+
+static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
+}
+
+static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
+}
+
+static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
+}
+
+static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
+}
+
+static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
+{
+ return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
+}
+
/**
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -590,7 +721,7 @@ static bool resume_needed(struct device *dev, struct generic_pm_domain *genpd)
if (!device_can_wakeup(dev))
return false;
- active_wakeup = genpd->active_wakeup && genpd->active_wakeup(dev);
+ active_wakeup = genpd_dev_active_wakeup(genpd, dev);
return device_may_wakeup(dev) ? active_wakeup : !active_wakeup;
}
@@ -646,7 +777,7 @@ static int pm_genpd_prepare(struct device *dev)
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
* so pm_genpd_poweron() will return immediately, but if the device
- * is suspended (e.g. it's been stopped by .stop_device()), we need
+ * is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
* to make it operational.
*/
pm_runtime_resume(dev);
@@ -685,7 +816,7 @@ static int pm_genpd_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
+ return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
}
/**
@@ -710,16 +841,14 @@ static int pm_genpd_suspend_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_suspend_noirq(dev);
+ ret = genpd_suspend_late(genpd, dev);
if (ret)
return ret;
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
+ if (dev->power.wakeup_path && genpd_dev_active_wakeup(genpd, dev))
return 0;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
/*
* Since all of the "noirq" callbacks are executed sequentially, it is
@@ -761,10 +890,9 @@ static int pm_genpd_resume_noirq(struct device *dev)
*/
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_resume_noirq(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -785,7 +913,7 @@ static int pm_genpd_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
+ return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
}
/**
@@ -806,7 +934,7 @@ static int pm_genpd_freeze(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
+ return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
}
/**
@@ -832,12 +960,11 @@ static int pm_genpd_freeze_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- ret = pm_generic_freeze_noirq(dev);
+ ret = genpd_freeze_late(genpd, dev);
if (ret)
return ret;
- if (genpd->stop_device)
- genpd->stop_device(dev);
+ genpd_stop_dev(genpd, dev);
return 0;
}
@@ -864,10 +991,9 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (genpd->suspend_power_off)
return 0;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_thaw_noirq(dev);
+ return genpd_thaw_early(genpd, dev);
}
/**
@@ -888,72 +1014,7 @@ static int pm_genpd_thaw(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff - Power off a device belonging to an I/O PM domain.
- * @dev: Device to suspend.
- *
- * Power off a device under the assumption that its pm_domain field points to
- * the domain member of an object of type struct generic_pm_domain representing
- * a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_poweroff(dev);
-}
-
-/**
- * pm_genpd_dev_poweroff_noirq - Late power off of a device from a PM domain.
- * @dev: Device to suspend.
- *
- * Carry out a late powering off of a device under the assumption that its
- * pm_domain field points to the domain member of an object of type
- * struct generic_pm_domain representing a PM domain consisting of I/O devices.
- */
-static int pm_genpd_dev_poweroff_noirq(struct device *dev)
-{
- struct generic_pm_domain *genpd;
- int ret;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- if (genpd->suspend_power_off)
- return 0;
-
- ret = pm_generic_poweroff_noirq(dev);
- if (ret)
- return ret;
-
- if (dev->power.wakeup_path
- && genpd->active_wakeup && genpd->active_wakeup(dev))
- return 0;
-
- if (genpd->stop_device)
- genpd->stop_device(dev);
-
- /*
- * Since all of the "noirq" callbacks are executed sequentially, it is
- * guaranteed that this function will never run twice in parallel for
- * the same PM domain, so it is not necessary to use locking here.
- */
- genpd->suspended_count++;
- pm_genpd_sync_poweroff(genpd);
-
- return 0;
+ return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
}
/**
@@ -993,31 +1054,9 @@ static int pm_genpd_restore_noirq(struct device *dev)
pm_genpd_poweron(genpd);
genpd->suspended_count--;
- if (genpd->start_device)
- genpd->start_device(dev);
+ genpd_start_dev(genpd, dev);
- return pm_generic_restore_noirq(dev);
-}
-
-/**
- * pm_genpd_restore - Restore a device belonging to an I/O power domain.
- * @dev: Device to resume.
- *
- * Restore a device under the assumption that its pm_domain field points to the
- * domain member of an object of type struct generic_pm_domain representing
- * a power domain consisting of I/O devices.
- */
-static int pm_genpd_restore(struct device *dev)
-{
- struct generic_pm_domain *genpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- genpd = dev_to_genpd(dev);
- if (IS_ERR(genpd))
- return -EINVAL;
-
- return genpd->suspend_power_off ? 0 : pm_generic_restore(dev);
+ return genpd_resume_early(genpd, dev);
}
/**
@@ -1067,20 +1106,19 @@ static void pm_genpd_complete(struct device *dev)
#define pm_genpd_freeze_noirq NULL
#define pm_genpd_thaw_noirq NULL
#define pm_genpd_thaw NULL
-#define pm_genpd_dev_poweroff_noirq NULL
-#define pm_genpd_dev_poweroff NULL
#define pm_genpd_restore_noirq NULL
-#define pm_genpd_restore NULL
#define pm_genpd_complete NULL
#endif /* CONFIG_PM_SLEEP */
/**
- * pm_genpd_add_device - Add a device to an I/O PM domain.
+ * __pm_genpd_add_device - Add a device to an I/O PM domain.
* @genpd: PM domain to add the device to.
* @dev: Device to be added.
+ * @td: Set of PM QoS timing parameters to attach to the device.
*/
-int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
+int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
+ struct gpd_timing_data *td)
{
struct generic_pm_domain_data *gpd_data;
struct pm_domain_data *pdd;
@@ -1123,6 +1161,8 @@ int pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev)
gpd_data->base.dev = dev;
gpd_data->need_restore = false;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
+ if (td)
+ gpd_data->td = *td;
out:
genpd_release_lock(genpd);
@@ -1280,6 +1320,219 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
/**
+ * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
+ * @dev: Device to add the callbacks to.
+ * @ops: Set of callbacks to add.
+ * @td: Timing data to add to the device along with the callbacks (optional).
+ */
+int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
+ struct gpd_timing_data *td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data && ops))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = *ops;
+ if (td)
+ gpd_data->td = *td;
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
+
+/**
+ * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
+ * @dev: Device to remove the callbacks from.
+ * @clear_td: If set, clear the device's timing data too.
+ */
+int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
+{
+ struct pm_domain_data *pdd;
+ int ret = 0;
+
+ if (!(dev && dev->power.subsys_data))
+ return -EINVAL;
+
+ pm_runtime_disable(dev);
+ device_pm_lock();
+
+ pdd = dev->power.subsys_data->domain_data;
+ if (pdd) {
+ struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
+
+ gpd_data->ops = (struct gpd_dev_ops){ 0 };
+ if (clear_td)
+ gpd_data->td = (struct gpd_timing_data){ 0 };
+ } else {
+ ret = -EINVAL;
+ }
+
+ device_pm_unlock();
+ pm_runtime_enable(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
+
+/* Default device callbacks for generic PM domains. */
+
+/**
+ * pm_genpd_default_save_state - Default "save device state" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_save_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.save_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_suspend)
+ return drv->pm->runtime_suspend(dev);
+
+ return 0;
+}
+
+/**
+ * pm_genpd_default_restore_state - Default PM domians "restore device state".
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_restore_state(struct device *dev)
+{
+ int (*cb)(struct device *__dev);
+ struct device_driver *drv = dev->driver;
+
+ cb = dev_gpd_data(dev)->ops.restore_state;
+ if (cb)
+ return cb(dev);
+
+ if (drv && drv->pm && drv->pm->runtime_resume)
+ return drv->pm->runtime_resume(dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+/**
+ * pm_genpd_default_suspend - Default "device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
+
+ return cb ? cb(dev) : pm_generic_suspend(dev);
+}
+
+/**
+ * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_suspend_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
+
+ return cb ? cb(dev) : pm_generic_suspend_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
+
+ return cb ? cb(dev) : pm_generic_resume_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_resume - Default "device resume" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_resume(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
+
+ return cb ? cb(dev) : pm_generic_resume(dev);
+}
+
+/**
+ * pm_genpd_default_freeze - Default "device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
+
+ return cb ? cb(dev) : pm_generic_freeze(dev);
+}
+
+/**
+ * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_freeze_late(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
+
+ return cb ? cb(dev) : pm_generic_freeze_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw_early(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
+
+ return cb ? cb(dev) : pm_generic_thaw_noirq(dev);
+}
+
+/**
+ * pm_genpd_default_thaw - Default "device thaw" for PM domians.
+ * @dev: Device to handle.
+ */
+static int pm_genpd_default_thaw(struct device *dev)
+{
+ int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
+
+ return cb ? cb(dev) : pm_generic_thaw(dev);
+}
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define pm_genpd_default_suspend NULL
+#define pm_genpd_default_suspend_late NULL
+#define pm_genpd_default_resume_early NULL
+#define pm_genpd_default_resume NULL
+#define pm_genpd_default_freeze NULL
+#define pm_genpd_default_freeze_late NULL
+#define pm_genpd_default_thaw_early NULL
+#define pm_genpd_default_thaw NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
* @gov: PM domain governor to associate with the domain (may be NULL).
@@ -1305,6 +1558,7 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->resume_count = 0;
genpd->device_count = 0;
genpd->suspended_count = 0;
+ genpd->max_off_time_ns = -1;
genpd->domain.ops.runtime_suspend = pm_genpd_runtime_suspend;
genpd->domain.ops.runtime_resume = pm_genpd_runtime_resume;
genpd->domain.ops.runtime_idle = pm_generic_runtime_idle;
@@ -1317,11 +1571,21 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.freeze_noirq = pm_genpd_freeze_noirq;
genpd->domain.ops.thaw_noirq = pm_genpd_thaw_noirq;
genpd->domain.ops.thaw = pm_genpd_thaw;
- genpd->domain.ops.poweroff = pm_genpd_dev_poweroff;
- genpd->domain.ops.poweroff_noirq = pm_genpd_dev_poweroff_noirq;
+ genpd->domain.ops.poweroff = pm_genpd_suspend;
+ genpd->domain.ops.poweroff_noirq = pm_genpd_suspend_noirq;
genpd->domain.ops.restore_noirq = pm_genpd_restore_noirq;
- genpd->domain.ops.restore = pm_genpd_restore;
+ genpd->domain.ops.restore = pm_genpd_resume;
genpd->domain.ops.complete = pm_genpd_complete;
+ genpd->dev_ops.save_state = pm_genpd_default_save_state;
+ genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
+ genpd->dev_ops.suspend = pm_genpd_default_suspend;
+ genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
+ genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
+ genpd->dev_ops.resume = pm_genpd_default_resume;
+ genpd->dev_ops.freeze = pm_genpd_default_freeze;
+ genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
+ genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
+ genpd->dev_ops.thaw = pm_genpd_default_thaw;
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
new file mode 100644
index 0000000..66a265b
--- /dev/null
+++ b/drivers/base/power/domain_governor.c
@@ -0,0 +1,170 @@
+/*
+ * drivers/base/power/domain_governor.c - Governors for device PM domains.
+ *
+ * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Renesas Electronics Corp.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_qos.h>
+#include <linux/hrtimer.h>
+
+#ifdef CONFIG_PM_RUNTIME
+
+/**
+ * default_stop_ok - Default PM domain governor routine for stopping devices.
+ * @dev: Device to check.
+ */
+bool default_stop_ok(struct device *dev)
+{
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+
+ dev_dbg(dev, "%s()\n", __func__);
+
+ if (dev->power.max_time_suspended_ns < 0 || td->break_even_ns == 0)
+ return true;
+
+ return td->stop_latency_ns + td->start_latency_ns < td->break_even_ns
+ && td->break_even_ns < dev->power.max_time_suspended_ns;
+}
+
+/**
+ * default_power_down_ok - Default generic PM domain power off governor routine.
+ * @pd: PM domain to check.
+ *
+ * This routine must be executed under the PM domain's lock.
+ */
+static bool default_power_down_ok(struct dev_pm_domain *pd)
+{
+ struct generic_pm_domain *genpd = pd_to_genpd(pd);
+ struct gpd_link *link;
+ struct pm_domain_data *pdd;
+ s64 min_dev_off_time_ns;
+ s64 off_on_time_ns;
+ ktime_t time_now = ktime_get();
+
+ off_on_time_ns = genpd->power_off_latency_ns +
+ genpd->power_on_latency_ns;
+ /*
+ * It doesn't make sense to remove power from the domain if saving
+ * the state of all devices in it and the power off/power on operations
+ * take too much time.
+ *
+ * All devices in this domain have been stopped already at this point.
+ */
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ if (pdd->dev->driver)
+ off_on_time_ns +=
+ to_gpd_data(pdd)->td.save_state_latency_ns;
+ }
+
+ /*
+ * Check if subdomains can be off for enough time.
+ *
+ * All subdomains have been powered off already at this point.
+ */
+ list_for_each_entry(link, &genpd->master_links, master_node) {
+ struct generic_pm_domain *sd = link->slave;
+ s64 sd_max_off_ns = sd->max_off_time_ns;
+
+ if (sd_max_off_ns < 0)
+ continue;
+
+ sd_max_off_ns -= ktime_to_ns(ktime_sub(time_now,
+ sd->power_off_time));
+ /*
+ * Check if the subdomain is allowed to be off long enough for
+ * the current domain to turn off and on (that's how much time
+ * it will have to wait worst case).
+ */
+ if (sd_max_off_ns <= off_on_time_ns)
+ return false;
+ }
+
+ /*
+ * Check if the devices in the domain can be off enough time.
+ */
+ min_dev_off_time_ns = -1;
+ list_for_each_entry(pdd, &genpd->dev_list, list_node) {
+ struct gpd_timing_data *td;
+ struct device *dev = pdd->dev;
+ s64 dev_off_time_ns;
+
+ if (!dev->driver || dev->power.max_time_suspended_ns < 0)
+ continue;
+
+ td = &to_gpd_data(pdd)->td;
+ dev_off_time_ns = dev->power.max_time_suspended_ns -
+ (td->start_latency_ns + td->restore_state_latency_ns +
+ ktime_to_ns(ktime_sub(time_now,
+ dev->power.suspend_time)));
+ if (dev_off_time_ns <= off_on_time_ns)
+ return false;
+
+ if (min_dev_off_time_ns > dev_off_time_ns
+ || min_dev_off_time_ns < 0)
+ min_dev_off_time_ns = dev_off_time_ns;
+ }
+
+ if (min_dev_off_time_ns < 0) {
+ /*
+ * There are no latency constraints, so the domain can spend
+ * arbitrary time in the "off" state.
+ */
+ genpd->max_off_time_ns = -1;
+ return true;
+ }
+
+ /*
+ * The difference between the computed minimum delta and the time needed
+ * to turn the domain on is the maximum theoretical time this domain can
+ * spend in the "off" state.
+ */
+ min_dev_off_time_ns -= genpd->power_on_latency_ns;
+
+ /*
+ * If the difference between the computed minimum delta and the time
+ * needed to turn the domain off and back on on is smaller than the
+ * domain's power break even time, removing power from the domain is not
+ * worth it.
+ */
+ if (genpd->break_even_ns >
+ min_dev_off_time_ns - genpd->power_off_latency_ns)
+ return false;
+
+ genpd->max_off_time_ns = min_dev_off_time_ns;
+ return true;
+}
+
+static bool always_on_power_down_ok(struct dev_pm_domain *domain)
+{
+ return false;
+}
+
+#else /* !CONFIG_PM_RUNTIME */
+
+bool default_stop_ok(struct device *dev)
+{
+ return false;
+}
+
+#define default_power_down_ok NULL
+#define always_on_power_down_ok NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+struct dev_power_governor simple_qos_governor = {
+ .stop_ok = default_stop_ok,
+ .power_down_ok = default_power_down_ok,
+};
+
+/**
+ * pm_genpd_gov_always_on - A governor implementing an always-on policy
+ */
+struct dev_power_governor pm_domain_always_on_gov = {
+ .power_down_ok = always_on_power_down_ok,
+ .stop_ok = default_stop_ok,
+};
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 265a0ee..10bdd79 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -97,16 +97,16 @@ int pm_generic_prepare(struct device *dev)
* @event: PM transition of the system under way.
* @bool: Whether or not this is the "noirq" stage.
*
- * If the device has not been suspended at run time, execute the
- * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
- * return its error code. Otherwise, return zero.
+ * Execute the PM callback corresponding to @event provided by the driver of
+ * @dev, if defined, and return its error code. Return 0 if the callback is
+ * not present.
*/
static int __pm_generic_call(struct device *dev, int event, bool noirq)
{
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
int (*callback)(struct device *);
- if (!pm || pm_runtime_suspended(dev))
+ if (!pm)
return 0;
switch (event) {
@@ -119,9 +119,15 @@ static int __pm_generic_call(struct device *dev, int event, bool noirq)
case PM_EVENT_HIBERNATE:
callback = noirq ? pm->poweroff_noirq : pm->poweroff;
break;
+ case PM_EVENT_RESUME:
+ callback = noirq ? pm->resume_noirq : pm->resume;
+ break;
case PM_EVENT_THAW:
callback = noirq ? pm->thaw_noirq : pm->thaw;
break;
+ case PM_EVENT_RESTORE:
+ callback = noirq ? pm->restore_noirq : pm->restore;
+ break;
default:
callback = NULL;
break;
@@ -211,56 +217,12 @@ int pm_generic_thaw(struct device *dev)
EXPORT_SYMBOL_GPL(pm_generic_thaw);
/**
- * __pm_generic_resume - Generic resume/restore callback for subsystems.
- * @dev: Device to handle.
- * @event: PM transition of the system under way.
- * @bool: Whether or not this is the "noirq" stage.
- *
- * Execute the resume/resotre callback provided by the @dev's driver, if
- * defined. If it returns 0, change the device's runtime PM status to 'active'.
- * Return the callback's error code.
- */
-static int __pm_generic_resume(struct device *dev, int event, bool noirq)
-{
- const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int (*callback)(struct device *);
- int ret;
-
- if (!pm)
- return 0;
-
- switch (event) {
- case PM_EVENT_RESUME:
- callback = noirq ? pm->resume_noirq : pm->resume;
- break;
- case PM_EVENT_RESTORE:
- callback = noirq ? pm->restore_noirq : pm->restore;
- break;
- default:
- callback = NULL;
- break;
- }
-
- if (!callback)
- return 0;
-
- ret = callback(dev);
- if (!ret && !noirq && pm_runtime_enabled(dev)) {
- pm_runtime_disable(dev);
- pm_runtime_set_active(dev);
- pm_runtime_enable(dev);
- }
-
- return ret;
-}
-
-/**
* pm_generic_resume_noirq - Generic resume_noirq callback for subsystems.
* @dev: Device to resume.
*/
int pm_generic_resume_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, true);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, true);
}
EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
@@ -270,7 +232,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume_noirq);
*/
int pm_generic_resume(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESUME, false);
+ return __pm_generic_call(dev, PM_EVENT_RESUME, false);
}
EXPORT_SYMBOL_GPL(pm_generic_resume);
@@ -280,7 +242,7 @@ EXPORT_SYMBOL_GPL(pm_generic_resume);
*/
int pm_generic_restore_noirq(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, true);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, true);
}
EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
@@ -290,7 +252,7 @@ EXPORT_SYMBOL_GPL(pm_generic_restore_noirq);
*/
int pm_generic_restore(struct device *dev)
{
- return __pm_generic_resume(dev, PM_EVENT_RESTORE, false);
+ return __pm_generic_call(dev, PM_EVENT_RESTORE, false);
}
EXPORT_SYMBOL_GPL(pm_generic_restore);
@@ -314,28 +276,3 @@ void pm_generic_complete(struct device *dev)
pm_runtime_idle(dev);
}
#endif /* CONFIG_PM_SLEEP */
-
-struct dev_pm_ops generic_subsys_pm_ops = {
-#ifdef CONFIG_PM_SLEEP
- .prepare = pm_generic_prepare,
- .suspend = pm_generic_suspend,
- .suspend_noirq = pm_generic_suspend_noirq,
- .resume = pm_generic_resume,
- .resume_noirq = pm_generic_resume_noirq,
- .freeze = pm_generic_freeze,
- .freeze_noirq = pm_generic_freeze_noirq,
- .thaw = pm_generic_thaw,
- .thaw_noirq = pm_generic_thaw_noirq,
- .poweroff = pm_generic_poweroff,
- .poweroff_noirq = pm_generic_poweroff_noirq,
- .restore = pm_generic_restore,
- .restore_noirq = pm_generic_restore_noirq,
- .complete = pm_generic_complete,
-#endif
-#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = pm_generic_runtime_suspend,
- .runtime_resume = pm_generic_runtime_resume,
- .runtime_idle = pm_generic_runtime_idle,
-#endif
-};
-EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index c3d2dfc..e2cc3d2 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -32,6 +32,8 @@
#include "../base.h"
#include "power.h"
+typedef int (*pm_callback_t)(struct device *);
+
/*
* The entries in the dpm_list list are in a depth first order, simply
* because children are guaranteed to be discovered after parents, and
@@ -164,8 +166,9 @@ static ktime_t initcall_debug_start(struct device *dev)
ktime_t calltime = ktime_set(0, 0);
if (initcall_debug) {
- pr_info("calling %s+ @ %i\n",
- dev_name(dev), task_pid_nr(current));
+ pr_info("calling %s+ @ %i, parent: %s\n",
+ dev_name(dev), task_pid_nr(current),
+ dev->parent ? dev_name(dev->parent) : "none");
calltime = ktime_get();
}
@@ -211,151 +214,69 @@ static void dpm_wait_for_children(struct device *dev, bool async)
}
/**
- * pm_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*/
-static int pm_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend) {
- error = ops->suspend(dev);
- suspend_report_result(ops->suspend, error);
- }
- break;
+ return ops->suspend;
case PM_EVENT_RESUME:
- if (ops->resume) {
- error = ops->resume(dev);
- suspend_report_result(ops->resume, error);
- }
- break;
+ return ops->resume;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze) {
- error = ops->freeze(dev);
- suspend_report_result(ops->freeze, error);
- }
- break;
+ return ops->freeze;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff) {
- error = ops->poweroff(dev);
- suspend_report_result(ops->poweroff, error);
- }
- break;
+ return ops->poweroff;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw) {
- error = ops->thaw(dev);
- suspend_report_result(ops->thaw, error);
- }
+ return ops->thaw;
break;
case PM_EVENT_RESTORE:
- if (ops->restore) {
- error = ops->restore(dev);
- suspend_report_result(ops->restore, error);
- }
- break;
+ return ops->restore;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
}
- initcall_debug_report(dev, calltime, error);
-
- return error;
+ return NULL;
}
/**
- * pm_noirq_op - Execute the PM operation appropriate for given PM event.
- * @dev: Device to handle.
+ * pm_noirq_op - Return the PM operation appropriate for given PM event.
* @ops: PM operations to choose from.
* @state: PM transition of the system being carried out.
*
* The driver of @dev will not receive interrupts while this function is being
* executed.
*/
-static int pm_noirq_op(struct device *dev,
- const struct dev_pm_ops *ops,
- pm_message_t state)
+static pm_callback_t pm_noirq_op(const struct dev_pm_ops *ops, pm_message_t state)
{
- int error = 0;
- ktime_t calltime = ktime_set(0, 0), delta, rettime;
-
- if (initcall_debug) {
- pr_info("calling %s+ @ %i, parent: %s\n",
- dev_name(dev), task_pid_nr(current),
- dev->parent ? dev_name(dev->parent) : "none");
- calltime = ktime_get();
- }
-
switch (state.event) {
#ifdef CONFIG_SUSPEND
case PM_EVENT_SUSPEND:
- if (ops->suspend_noirq) {
- error = ops->suspend_noirq(dev);
- suspend_report_result(ops->suspend_noirq, error);
- }
- break;
+ return ops->suspend_noirq;
case PM_EVENT_RESUME:
- if (ops->resume_noirq) {
- error = ops->resume_noirq(dev);
- suspend_report_result(ops->resume_noirq, error);
- }
- break;
+ return ops->resume_noirq;
#endif /* CONFIG_SUSPEND */
#ifdef CONFIG_HIBERNATE_CALLBACKS
case PM_EVENT_FREEZE:
case PM_EVENT_QUIESCE:
- if (ops->freeze_noirq) {
- error = ops->freeze_noirq(dev);
- suspend_report_result(ops->freeze_noirq, error);
- }
- break;
+ return ops->freeze_noirq;
case PM_EVENT_HIBERNATE:
- if (ops->poweroff_noirq) {
- error = ops->poweroff_noirq(dev);
- suspend_report_result(ops->poweroff_noirq, error);
- }
- break;
+ return ops->poweroff_noirq;
case PM_EVENT_THAW:
case PM_EVENT_RECOVER:
- if (ops->thaw_noirq) {
- error = ops->thaw_noirq(dev);
- suspend_report_result(ops->thaw_noirq, error);
- }
- break;
+ return ops->thaw_noirq;
case PM_EVENT_RESTORE:
- if (ops->restore_noirq) {
- error = ops->restore_noirq(dev);
- suspend_report_result(ops->restore_noirq, error);
- }
- break;
+ return ops->restore_noirq;
#endif /* CONFIG_HIBERNATE_CALLBACKS */
- default:
- error = -EINVAL;
- }
-
- if (initcall_debug) {
- rettime = ktime_get();
- delta = ktime_sub(rettime, calltime);
- printk("initcall %s_i+ returned %d after %Ld usecs\n",
- dev_name(dev), error,
- (unsigned long long)ktime_to_ns(delta) >> 10);
}
- return error;
+ return NULL;
}
static char *pm_verb(int event)
@@ -413,6 +334,26 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
}
+static int dpm_run_callback(pm_callback_t cb, struct device *dev,
+ pm_message_t state, char *info)
+{
+ ktime_t calltime;
+ int error;
+
+ if (!cb)
+ return 0;
+
+ calltime = initcall_debug_start(dev);
+
+ pm_dev_dbg(dev, state, info);
+ error = cb(dev);
+ suspend_report_result(cb, error);
+
+ initcall_debug_report(dev, calltime, error);
+
+ return error;
+}
+
/*------------------------- Resume routines -------------------------*/
/**
@@ -425,25 +366,34 @@ static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
*/
static int device_resume_noirq(struct device *dev, pm_message_t state)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
TRACE_DEVICE(dev);
TRACE_RESUME(0);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "EARLY power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
+ info = "EARLY power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "EARLY type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
+ info = "EARLY type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "EARLY class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
+ info = "EARLY class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "EARLY ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
+ info = "EARLY bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "EARLY driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
TRACE_RESUME(error);
return error;
}
@@ -486,26 +436,6 @@ void dpm_resume_noirq(pm_message_t state)
EXPORT_SYMBOL_GPL(dpm_resume_noirq);
/**
- * legacy_resume - Execute a legacy (bus or class) resume callback for device.
- * @dev: Device to resume.
- * @cb: Resume callback to execute.
- */
-static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
-{
- int error;
- ktime_t calltime;
-
- calltime = initcall_debug_start(dev);
-
- error = cb(dev);
- suspend_report_result(cb, error);
-
- initcall_debug_report(dev, calltime, error);
-
- return error;
-}
-
-/**
* device_resume - Execute "resume" callbacks for given device.
* @dev: Device to handle.
* @state: PM transition of the system being carried out.
@@ -513,6 +443,8 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
*/
static int device_resume(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
bool put = false;
@@ -535,40 +467,48 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
put = true;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Driver;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Driver;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Driver;
} else if (dev->class->resume) {
- pm_dev_dbg(dev, state, "legacy class ");
- error = legacy_resume(dev, dev->class->resume);
+ info = "legacy class ";
+ callback = dev->class->resume;
goto End;
}
}
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->resume) {
- pm_dev_dbg(dev, state, "legacy ");
- error = legacy_resume(dev, dev->bus->resume);
+ info = "legacy bus ";
+ callback = dev->bus->resume;
+ goto End;
}
}
+ Driver:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
End:
+ error = dpm_run_callback(callback, dev, state, info);
dev->power.is_suspended = false;
Unlock:
@@ -660,24 +600,33 @@ void dpm_resume(pm_message_t state)
*/
static void device_complete(struct device *dev, pm_message_t state)
{
+ void (*callback)(struct device *) = NULL;
+ char *info = NULL;
+
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "completing power domain ");
- if (dev->pm_domain->ops.complete)
- dev->pm_domain->ops.complete(dev);
+ info = "completing power domain ";
+ callback = dev->pm_domain->ops.complete;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "completing type ");
- if (dev->type->pm->complete)
- dev->type->pm->complete(dev);
+ info = "completing type ";
+ callback = dev->type->pm->complete;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "completing class ");
- if (dev->class->pm->complete)
- dev->class->pm->complete(dev);
+ info = "completing class ";
+ callback = dev->class->pm->complete;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "completing ");
- if (dev->bus->pm->complete)
- dev->bus->pm->complete(dev);
+ info = "completing bus ";
+ callback = dev->bus->pm->complete;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "completing driver ";
+ callback = dev->driver->pm->complete;
+ }
+
+ if (callback) {
+ pm_dev_dbg(dev, state, info);
+ callback(dev);
}
device_unlock(dev);
@@ -763,31 +712,29 @@ static pm_message_t resume_event(pm_message_t sleep_state)
*/
static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
- int error;
+ pm_callback_t callback = NULL;
+ char *info = NULL;
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "LATE power domain ");
- error = pm_noirq_op(dev, &dev->pm_domain->ops, state);
- if (error)
- return error;
+ info = "LATE power domain ";
+ callback = pm_noirq_op(&dev->pm_domain->ops, state);
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "LATE type ");
- error = pm_noirq_op(dev, dev->type->pm, state);
- if (error)
- return error;
+ info = "LATE type ";
+ callback = pm_noirq_op(dev->type->pm, state);
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "LATE class ");
- error = pm_noirq_op(dev, dev->class->pm, state);
- if (error)
- return error;
+ info = "LATE class ";
+ callback = pm_noirq_op(dev->class->pm, state);
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "LATE ");
- error = pm_noirq_op(dev, dev->bus->pm, state);
- if (error)
- return error;
+ info = "LATE bus ";
+ callback = pm_noirq_op(dev->bus->pm, state);
}
- return 0;
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "LATE driver ";
+ callback = pm_noirq_op(dev->driver->pm, state);
+ }
+
+ return dpm_run_callback(callback, dev, state, info);
}
/**
@@ -864,6 +811,8 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
*/
static int __device_suspend(struct device *dev, pm_message_t state, bool async)
{
+ pm_callback_t callback = NULL;
+ char *info = NULL;
int error = 0;
dpm_wait_for_children(dev, async);
@@ -884,22 +833,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
device_lock(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "power domain ");
- error = pm_op(dev, &dev->pm_domain->ops, state);
- goto End;
+ info = "power domain ";
+ callback = pm_op(&dev->pm_domain->ops, state);
+ goto Run;
}
if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "type ");
- error = pm_op(dev, dev->type->pm, state);
- goto End;
+ info = "type ";
+ callback = pm_op(dev->type->pm, state);
+ goto Run;
}
if (dev->class) {
if (dev->class->pm) {
- pm_dev_dbg(dev, state, "class ");
- error = pm_op(dev, dev->class->pm, state);
- goto End;
+ info = "class ";
+ callback = pm_op(dev->class->pm, state);
+ goto Run;
} else if (dev->class->suspend) {
pm_dev_dbg(dev, state, "legacy class ");
error = legacy_suspend(dev, state, dev->class->suspend);
@@ -909,14 +858,23 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
if (dev->bus) {
if (dev->bus->pm) {
- pm_dev_dbg(dev, state, "");
- error = pm_op(dev, dev->bus->pm, state);
+ info = "bus ";
+ callback = pm_op(dev->bus->pm, state);
} else if (dev->bus->suspend) {
- pm_dev_dbg(dev, state, "legacy ");
+ pm_dev_dbg(dev, state, "legacy bus ");
error = legacy_suspend(dev, state, dev->bus->suspend);
+ goto End;
}
}
+ Run:
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "driver ";
+ callback = pm_op(dev->driver->pm, state);
+ }
+
+ error = dpm_run_callback(callback, dev, state, info);
+
End:
if (!error) {
dev->power.is_suspended = true;
@@ -1022,6 +980,8 @@ int dpm_suspend(pm_message_t state)
*/
static int device_prepare(struct device *dev, pm_message_t state)
{
+ int (*callback)(struct device *) = NULL;
+ char *info = NULL;
int error = 0;
device_lock(dev);
@@ -1029,34 +989,29 @@ static int device_prepare(struct device *dev, pm_message_t state)
dev->power.wakeup_path = device_may_wakeup(dev);
if (dev->pm_domain) {
- pm_dev_dbg(dev, state, "preparing power domain ");
- if (dev->pm_domain->ops.prepare)
- error = dev->pm_domain->ops.prepare(dev);
- suspend_report_result(dev->pm_domain->ops.prepare, error);
- if (error)
- goto End;
+ info = "preparing power domain ";
+ callback = dev->pm_domain->ops.prepare;
} else if (dev->type && dev->type->pm) {
- pm_dev_dbg(dev, state, "preparing type ");
- if (dev->type->pm->prepare)
- error = dev->type->pm->prepare(dev);
- suspend_report_result(dev->type->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing type ";
+ callback = dev->type->pm->prepare;
} else if (dev->class && dev->class->pm) {
- pm_dev_dbg(dev, state, "preparing class ");
- if (dev->class->pm->prepare)
- error = dev->class->pm->prepare(dev);
- suspend_report_result(dev->class->pm->prepare, error);
- if (error)
- goto End;
+ info = "preparing class ";
+ callback = dev->class->pm->prepare;
} else if (dev->bus && dev->bus->pm) {
- pm_dev_dbg(dev, state, "preparing ");
- if (dev->bus->pm->prepare)
- error = dev->bus->pm->prepare(dev);
- suspend_report_result(dev->bus->pm->prepare, error);
+ info = "preparing bus ";
+ callback = dev->bus->pm->prepare;
+ }
+
+ if (!callback && dev->driver && dev->driver->pm) {
+ info = "preparing driver ";
+ callback = dev->driver->pm->prepare;
+ }
+
+ if (callback) {
+ error = callback(dev);
+ suspend_report_result(callback, error);
}
- End:
device_unlock(dev);
return error;
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index 86de6c5..c5d3588 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -47,21 +47,29 @@ static DEFINE_MUTEX(dev_pm_qos_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
/**
- * dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * __dev_pm_qos_read_value - Get PM QoS constraint for a given device.
+ * @dev: Device to get the PM QoS constraint value for.
+ *
+ * This routine must be called with dev->power.lock held.
+ */
+s32 __dev_pm_qos_read_value(struct device *dev)
+{
+ struct pm_qos_constraints *c = dev->power.constraints;
+
+ return c ? pm_qos_read_value(c) : 0;
+}
+
+/**
+ * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
* @dev: Device to get the PM QoS constraint value for.
*/
s32 dev_pm_qos_read_value(struct device *dev)
{
- struct pm_qos_constraints *c;
unsigned long flags;
- s32 ret = 0;
+ s32 ret;
spin_lock_irqsave(&dev->power.lock, flags);
-
- c = dev->power.constraints;
- if (c)
- ret = pm_qos_read_value(c);
-
+ ret = __dev_pm_qos_read_value(dev);
spin_unlock_irqrestore(&dev->power.lock, flags);
return ret;
@@ -412,3 +420,28 @@ int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier)
return blocking_notifier_chain_unregister(&dev_pm_notifiers, notifier);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_remove_global_notifier);
+
+/**
+ * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
+ * @dev: Device whose ancestor to add the request for.
+ * @req: Pointer to the preallocated handle.
+ * @value: Constraint latency value.
+ */
+int dev_pm_qos_add_ancestor_request(struct device *dev,
+ struct dev_pm_qos_request *req, s32 value)
+{
+ struct device *ancestor = dev->parent;
+ int error = -ENODEV;
+
+ while (ancestor && !ancestor->power.ignore_children)
+ ancestor = ancestor->parent;
+
+ if (ancestor)
+ error = dev_pm_qos_add_request(ancestor, req, value);
+
+ if (error)
+ req->dev = NULL;
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 8c78443..541f821 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -250,6 +250,9 @@ static int rpm_idle(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_idle;
+
if (callback)
__rpm_callback(callback, dev);
@@ -279,6 +282,47 @@ static int rpm_callback(int (*cb)(struct device *), struct device *dev)
return retval != -EACCES ? retval : -EIO;
}
+struct rpm_qos_data {
+ ktime_t time_now;
+ s64 constraint_ns;
+};
+
+/**
+ * rpm_update_qos_constraint - Update a given PM QoS constraint data.
+ * @dev: Device whose timing data to use.
+ * @data: PM QoS constraint data to update.
+ *
+ * Use the suspend timing data of @dev to update PM QoS constraint data pointed
+ * to by @data.
+ */
+static int rpm_update_qos_constraint(struct device *dev, void *data)
+{
+ struct rpm_qos_data *qos = data;
+ unsigned long flags;
+ s64 delta_ns;
+ int ret = 0;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (dev->power.max_time_suspended_ns < 0)
+ goto out;
+
+ delta_ns = dev->power.max_time_suspended_ns -
+ ktime_to_ns(ktime_sub(qos->time_now, dev->power.suspend_time));
+ if (delta_ns <= 0) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (qos->constraint_ns > delta_ns || qos->constraint_ns == 0)
+ qos->constraint_ns = delta_ns;
+
+ out:
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+
+ return ret;
+}
+
/**
* rpm_suspend - Carry out runtime suspend of given device.
* @dev: Device to suspend.
@@ -305,6 +349,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
{
int (*callback)(struct device *);
struct device *parent = NULL;
+ struct rpm_qos_data qos;
int retval;
trace_rpm_suspend(dev, rpmflags);
@@ -400,8 +445,38 @@ static int rpm_suspend(struct device *dev, int rpmflags)
goto out;
}
+ qos.constraint_ns = __dev_pm_qos_read_value(dev);
+ if (qos.constraint_ns < 0) {
+ /* Negative constraint means "never suspend". */
+ retval = -EPERM;
+ goto out;
+ }
+ qos.constraint_ns *= NSEC_PER_USEC;
+ qos.time_now = ktime_get();
+
__update_runtime_status(dev, RPM_SUSPENDING);
+ if (!dev->power.ignore_children) {
+ if (dev->power.irq_safe)
+ spin_unlock(&dev->power.lock);
+ else
+ spin_unlock_irq(&dev->power.lock);
+
+ retval = device_for_each_child(dev, &qos,
+ rpm_update_qos_constraint);
+
+ if (dev->power.irq_safe)
+ spin_lock(&dev->power.lock);
+ else
+ spin_lock_irq(&dev->power.lock);
+
+ if (retval)
+ goto fail;
+ }
+
+ dev->power.suspend_time = qos.time_now;
+ dev->power.max_time_suspended_ns = qos.constraint_ns ? : -1;
+
if (dev->pm_domain)
callback = dev->pm_domain->ops.runtime_suspend;
else if (dev->type && dev->type->pm)
@@ -413,28 +488,13 @@ static int rpm_suspend(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_suspend;
+
retval = rpm_callback(callback, dev);
- if (retval) {
- __update_runtime_status(dev, RPM_ACTIVE);
- dev->power.deferred_resume = false;
- if (retval == -EAGAIN || retval == -EBUSY) {
- dev->power.runtime_error = 0;
+ if (retval)
+ goto fail;
- /*
- * If the callback routine failed an autosuspend, and
- * if the last_busy time has been updated so that there
- * is a new autosuspend expiration time, automatically
- * reschedule another autosuspend.
- */
- if ((rpmflags & RPM_AUTO) &&
- pm_runtime_autosuspend_expiration(dev) != 0)
- goto repeat;
- } else {
- pm_runtime_cancel_pending(dev);
- }
- wake_up_all(&dev->power.wait_queue);
- goto out;
- }
no_callback:
__update_runtime_status(dev, RPM_SUSPENDED);
pm_runtime_deactivate_timer(dev);
@@ -466,6 +526,29 @@ static int rpm_suspend(struct device *dev, int rpmflags)
trace_rpm_return_int(dev, _THIS_IP_, retval);
return retval;
+
+ fail:
+ __update_runtime_status(dev, RPM_ACTIVE);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+ dev->power.deferred_resume = false;
+ if (retval == -EAGAIN || retval == -EBUSY) {
+ dev->power.runtime_error = 0;
+
+ /*
+ * If the callback routine failed an autosuspend, and
+ * if the last_busy time has been updated so that there
+ * is a new autosuspend expiration time, automatically
+ * reschedule another autosuspend.
+ */
+ if ((rpmflags & RPM_AUTO) &&
+ pm_runtime_autosuspend_expiration(dev) != 0)
+ goto repeat;
+ } else {
+ pm_runtime_cancel_pending(dev);
+ }
+ wake_up_all(&dev->power.wait_queue);
+ goto out;
}
/**
@@ -620,6 +703,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
if (dev->power.no_callbacks)
goto no_callback; /* Assume success. */
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
__update_runtime_status(dev, RPM_RESUMING);
if (dev->pm_domain)
@@ -633,6 +719,9 @@ static int rpm_resume(struct device *dev, int rpmflags)
else
callback = NULL;
+ if (!callback && dev->driver && dev->driver->pm)
+ callback = dev->driver->pm->runtime_resume;
+
retval = rpm_callback(callback, dev);
if (retval) {
__update_runtime_status(dev, RPM_SUSPENDED);
@@ -1279,6 +1368,9 @@ void pm_runtime_init(struct device *dev)
setup_timer(&dev->power.suspend_timer, pm_suspend_timer_fn,
(unsigned long)dev);
+ dev->power.suspend_time = ktime_set(0, 0);
+ dev->power.max_time_suspended_ns = -1;
+
init_waitqueue_head(&dev->power.wait_queue);
}
@@ -1296,3 +1388,28 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put_sync(dev->parent);
}
+
+/**
+ * pm_runtime_update_max_time_suspended - Update device's suspend time data.
+ * @dev: Device to handle.
+ * @delta_ns: Value to subtract from the device's max_time_suspended_ns field.
+ *
+ * Update the device's power.max_time_suspended_ns field by subtracting
+ * @delta_ns from it. The resulting value of power.max_time_suspended_ns is
+ * never negative.
+ */
+void pm_runtime_update_max_time_suspended(struct device *dev, s64 delta_ns)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->power.lock, flags);
+
+ if (delta_ns > 0 && dev->power.max_time_suspended_ns > 0) {
+ if (dev->power.max_time_suspended_ns > delta_ns)
+ dev->power.max_time_suspended_ns -= delta_ns;
+ else
+ dev->power.max_time_suspended_ns = 0;
+ }
+
+ spin_unlock_irqrestore(&dev->power.lock, flags);
+}
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 2fc6a66..0f6c7fb 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -13,3 +13,6 @@ config REGMAP_I2C
config REGMAP_SPI
tristate
+
+config REGMAP_IRQ
+ bool
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index 0573c8a..defd579 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,4 +1,6 @@
-obj-$(CONFIG_REGMAP) += regmap.o regcache.o regcache-indexed.o regcache-rbtree.o regcache-lzo.o
+obj-$(CONFIG_REGMAP) += regmap.o regcache.o
+obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o
obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
+obj-$(CONFIG_REGMAP_IRQ) += regmap-irq.o
diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h
index 348ff02..1a02b75 100644
--- a/drivers/base/regmap/internal.h
+++ b/drivers/base/regmap/internal.h
@@ -74,6 +74,7 @@ struct regmap {
struct reg_default *reg_defaults;
const void *reg_defaults_raw;
void *cache;
+ bool cache_dirty;
};
struct regcache_ops {
@@ -105,7 +106,7 @@ static inline void regmap_debugfs_exit(struct regmap *map) { }
#endif
/* regcache core declarations */
-int regcache_init(struct regmap *map);
+int regcache_init(struct regmap *map, const struct regmap_config *config);
void regcache_exit(struct regmap *map);
int regcache_read(struct regmap *map,
unsigned int reg, unsigned int *value);
@@ -118,10 +119,7 @@ unsigned int regcache_get_val(const void *base, unsigned int idx,
bool regcache_set_val(void *base, unsigned int idx,
unsigned int val, unsigned int word_size);
int regcache_lookup_reg(struct regmap *map, unsigned int reg);
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
- unsigned int val);
-extern struct regcache_ops regcache_indexed_ops;
extern struct regcache_ops regcache_rbtree_ops;
extern struct regcache_ops regcache_lzo_ops;
diff --git a/drivers/base/regmap/regcache-indexed.c b/drivers/base/regmap/regcache-indexed.c
deleted file mode 100644
index 507731a..0000000
--- a/drivers/base/regmap/regcache-indexed.c
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Register cache access API - indexed caching support
- *
- * Copyright 2011 Wolfson Microelectronics plc
- *
- * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#include <linux/slab.h>
-
-#include "internal.h"
-
-static int regcache_indexed_read(struct regmap *map, unsigned int reg,
- unsigned int *value)
-{
- int ret;
-
- ret = regcache_lookup_reg(map, reg);
- if (ret >= 0)
- *value = map->reg_defaults[ret].def;
-
- return ret;
-}
-
-static int regcache_indexed_write(struct regmap *map, unsigned int reg,
- unsigned int value)
-{
- int ret;
-
- ret = regcache_lookup_reg(map, reg);
- if (ret < 0)
- return regcache_insert_reg(map, reg, value);
- map->reg_defaults[ret].def = value;
- return 0;
-}
-
-static int regcache_indexed_sync(struct regmap *map)
-{
- unsigned int i;
- int ret;
-
- for (i = 0; i < map->num_reg_defaults; i++) {
- ret = _regmap_write(map, map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- if (ret < 0)
- return ret;
- dev_dbg(map->dev, "Synced register %#x, value %#x\n",
- map->reg_defaults[i].reg,
- map->reg_defaults[i].def);
- }
- return 0;
-}
-
-struct regcache_ops regcache_indexed_ops = {
- .type = REGCACHE_INDEXED,
- .name = "indexed",
- .read = regcache_indexed_read,
- .write = regcache_indexed_write,
- .sync = regcache_indexed_sync
-};
diff --git a/drivers/base/regmap/regcache-lzo.c b/drivers/base/regmap/regcache-lzo.c
index 066aeec..b7d1614 100644
--- a/drivers/base/regmap/regcache-lzo.c
+++ b/drivers/base/regmap/regcache-lzo.c
@@ -15,6 +15,8 @@
#include "internal.h"
+static int regcache_lzo_exit(struct regmap *map);
+
struct regcache_lzo_ctx {
void *wmem;
void *dst;
@@ -27,7 +29,7 @@ struct regcache_lzo_ctx {
};
#define LZO_BLOCK_NUM 8
-static int regcache_lzo_block_count(void)
+static int regcache_lzo_block_count(struct regmap *map)
{
return LZO_BLOCK_NUM;
}
@@ -106,19 +108,22 @@ static inline int regcache_lzo_get_blkindex(struct regmap *map,
unsigned int reg)
{
return (reg * map->cache_word_size) /
- DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+ DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
}
static inline int regcache_lzo_get_blkpos(struct regmap *map,
unsigned int reg)
{
- return reg % (DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count()) /
+ return reg % (DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map)) /
map->cache_word_size);
}
static inline int regcache_lzo_get_blksize(struct regmap *map)
{
- return DIV_ROUND_UP(map->cache_size_raw, regcache_lzo_block_count());
+ return DIV_ROUND_UP(map->cache_size_raw,
+ regcache_lzo_block_count(map));
}
static int regcache_lzo_init(struct regmap *map)
@@ -131,7 +136,7 @@ static int regcache_lzo_init(struct regmap *map)
ret = 0;
- blkcount = regcache_lzo_block_count();
+ blkcount = regcache_lzo_block_count(map);
map->cache = kzalloc(blkcount * sizeof *lzo_blocks,
GFP_KERNEL);
if (!map->cache)
@@ -190,7 +195,7 @@ static int regcache_lzo_init(struct regmap *map)
return 0;
err:
- regcache_exit(map);
+ regcache_lzo_exit(map);
return ret;
}
@@ -203,7 +208,7 @@ static int regcache_lzo_exit(struct regmap *map)
if (!lzo_blocks)
return 0;
- blkcount = regcache_lzo_block_count();
+ blkcount = regcache_lzo_block_count(map);
/*
* the pointer to the bitmap used for syncing the cache
* is shared amongst all lzo_blocks. Ensure it is freed
@@ -351,7 +356,7 @@ static int regcache_lzo_sync(struct regmap *map)
}
struct regcache_ops regcache_lzo_ops = {
- .type = REGCACHE_LZO,
+ .type = REGCACHE_COMPRESSED,
.name = "lzo",
.init = regcache_lzo_init,
.exit = regcache_lzo_exit,
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index e314984..32620c4 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -11,12 +11,15 @@
*/
#include <linux/slab.h>
+#include <linux/debugfs.h>
#include <linux/rbtree.h>
+#include <linux/seq_file.h>
#include "internal.h"
static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
unsigned int value);
+static int regcache_rbtree_exit(struct regmap *map);
struct regcache_rbtree_node {
/* the actual rbtree node holding this block */
@@ -124,6 +127,60 @@ static int regcache_rbtree_insert(struct rb_root *root,
return 1;
}
+#ifdef CONFIG_DEBUG_FS
+static int rbtree_show(struct seq_file *s, void *ignored)
+{
+ struct regmap *map = s->private;
+ struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
+ struct regcache_rbtree_node *n;
+ struct rb_node *node;
+ unsigned int base, top;
+ int nodes = 0;
+ int registers = 0;
+
+ mutex_lock(&map->lock);
+
+ for (node = rb_first(&rbtree_ctx->root); node != NULL;
+ node = rb_next(node)) {
+ n = container_of(node, struct regcache_rbtree_node, node);
+
+ regcache_rbtree_get_base_top_reg(n, &base, &top);
+ seq_printf(s, "%x-%x (%d)\n", base, top, top - base + 1);
+
+ nodes++;
+ registers += top - base + 1;
+ }
+
+ seq_printf(s, "%d nodes, %d registers, average %d registers\n",
+ nodes, registers, registers / nodes);
+
+ mutex_unlock(&map->lock);
+
+ return 0;
+}
+
+static int rbtree_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, rbtree_show, inode->i_private);
+}
+
+static const struct file_operations rbtree_fops = {
+ .open = rbtree_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void rbtree_debugfs_init(struct regmap *map)
+{
+ debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
+}
+#else
+static void rbtree_debugfs_init(struct regmap *map)
+{
+}
+#endif
+
static int regcache_rbtree_init(struct regmap *map)
{
struct regcache_rbtree_ctx *rbtree_ctx;
@@ -146,10 +203,12 @@ static int regcache_rbtree_init(struct regmap *map)
goto err;
}
+ rbtree_debugfs_init(map);
+
return 0;
err:
- regcache_exit(map);
+ regcache_rbtree_exit(map);
return ret;
}
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c
index 666f6f5..d1daa5e 100644
--- a/drivers/base/regmap/regcache.c
+++ b/drivers/base/regmap/regcache.c
@@ -19,7 +19,6 @@
#include "internal.h"
static const struct regcache_ops *cache_types[] = {
- &regcache_indexed_ops,
&regcache_rbtree_ops,
&regcache_lzo_ops,
};
@@ -54,22 +53,24 @@ static int regcache_hw_init(struct regmap *map)
for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
val = regcache_get_val(map->reg_defaults_raw,
i, map->cache_word_size);
- if (!val)
+ if (regmap_volatile(map, i))
continue;
count++;
}
map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
GFP_KERNEL);
- if (!map->reg_defaults)
- return -ENOMEM;
+ if (!map->reg_defaults) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
/* fill the reg_defaults */
map->num_reg_defaults = count;
for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
val = regcache_get_val(map->reg_defaults_raw,
i, map->cache_word_size);
- if (!val)
+ if (regmap_volatile(map, i))
continue;
map->reg_defaults[j].reg = i;
map->reg_defaults[j].def = val;
@@ -77,9 +78,15 @@ static int regcache_hw_init(struct regmap *map)
}
return 0;
+
+err_free:
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ return ret;
}
-int regcache_init(struct regmap *map)
+int regcache_init(struct regmap *map, const struct regmap_config *config)
{
int ret;
int i;
@@ -100,6 +107,12 @@ int regcache_init(struct regmap *map)
return -EINVAL;
}
+ map->num_reg_defaults = config->num_reg_defaults;
+ map->num_reg_defaults_raw = config->num_reg_defaults_raw;
+ map->reg_defaults_raw = config->reg_defaults_raw;
+ map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
+ map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
+
map->cache = NULL;
map->cache_ops = cache_types[i];
@@ -112,10 +125,10 @@ int regcache_init(struct regmap *map)
* won't vanish from under us. We'll need to make
* a copy of it.
*/
- if (map->reg_defaults) {
+ if (config->reg_defaults) {
if (!map->num_reg_defaults)
return -EINVAL;
- tmp_buf = kmemdup(map->reg_defaults, map->num_reg_defaults *
+ tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
sizeof(struct reg_default), GFP_KERNEL);
if (!tmp_buf)
return -ENOMEM;
@@ -136,9 +149,18 @@ int regcache_init(struct regmap *map)
if (map->cache_ops->init) {
dev_dbg(map->dev, "Initializing %s cache\n",
map->cache_ops->name);
- return map->cache_ops->init(map);
+ ret = map->cache_ops->init(map);
+ if (ret)
+ goto err_free;
}
return 0;
+
+err_free:
+ kfree(map->reg_defaults);
+ if (map->cache_free)
+ kfree(map->reg_defaults_raw);
+
+ return ret;
}
void regcache_exit(struct regmap *map)
@@ -171,16 +193,21 @@ void regcache_exit(struct regmap *map)
int regcache_read(struct regmap *map,
unsigned int reg, unsigned int *value)
{
+ int ret;
+
if (map->cache_type == REGCACHE_NONE)
return -ENOSYS;
BUG_ON(!map->cache_ops);
- if (!regmap_readable(map, reg))
- return -EIO;
+ if (!regmap_volatile(map, reg)) {
+ ret = map->cache_ops->read(map, reg, value);
- if (!regmap_volatile(map, reg))
- return map->cache_ops->read(map, reg, value);
+ if (ret == 0)
+ trace_regmap_reg_read_cache(map->dev, reg, *value);
+
+ return ret;
+ }
return -EINVAL;
}
@@ -241,6 +268,8 @@ int regcache_sync(struct regmap *map)
map->cache_ops->name);
name = map->cache_ops->name;
trace_regcache_sync(map->dev, name, "start");
+ if (!map->cache_dirty)
+ goto out;
if (map->cache_ops->sync) {
ret = map->cache_ops->sync(map);
} else {
@@ -291,6 +320,23 @@ void regcache_cache_only(struct regmap *map, bool enable)
EXPORT_SYMBOL_GPL(regcache_cache_only);
/**
+ * regcache_mark_dirty: Mark the register cache as dirty
+ *
+ * @map: map to mark
+ *
+ * Mark the register cache as dirty, for example due to the device
+ * having been powered down for suspend. If the cache is not marked
+ * as dirty then the cache sync will be suppressed.
+ */
+void regcache_mark_dirty(struct regmap *map)
+{
+ mutex_lock(&map->lock);
+ map->cache_dirty = true;
+ mutex_unlock(&map->lock);
+}
+EXPORT_SYMBOL_GPL(regcache_mark_dirty);
+
+/**
* regcache_cache_bypass: Put a register map into cache bypass mode
*
* @map: map to configure
@@ -381,22 +427,3 @@ int regcache_lookup_reg(struct regmap *map, unsigned int reg)
else
return -ENOENT;
}
-
-int regcache_insert_reg(struct regmap *map, unsigned int reg,
- unsigned int val)
-{
- void *tmp;
-
- tmp = krealloc(map->reg_defaults,
- (map->num_reg_defaults + 1) * sizeof(struct reg_default),
- GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
- map->reg_defaults = tmp;
- map->num_reg_defaults++;
- map->reg_defaults[map->num_reg_defaults - 1].reg = reg;
- map->reg_defaults[map->num_reg_defaults - 1].def = val;
- sort(map->reg_defaults, map->num_reg_defaults,
- sizeof(struct reg_default), regcache_default_cmp, NULL);
- return 0;
-}
diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c
new file mode 100644
index 0000000..428836f
--- /dev/null
+++ b/drivers/base/regmap/regmap-irq.c
@@ -0,0 +1,302 @@
+/*
+ * regmap based irq_chip
+ *
+ * Copyright 2011 Wolfson Microelectronics plc
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/export.h>
+#include <linux/regmap.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+
+#include "internal.h"
+
+struct regmap_irq_chip_data {
+ struct mutex lock;
+
+ struct regmap *map;
+ struct regmap_irq_chip *chip;
+
+ int irq_base;
+
+ void *status_reg_buf;
+ unsigned int *status_buf;
+ unsigned int *mask_buf;
+ unsigned int *mask_buf_def;
+};
+
+static inline const
+struct regmap_irq *irq_to_regmap_irq(struct regmap_irq_chip_data *data,
+ int irq)
+{
+ return &data->chip->irqs[irq - data->irq_base];
+}
+
+static void regmap_irq_lock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&d->lock);
+}
+
+static void regmap_irq_sync_unlock(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ int i, ret;
+
+ /*
+ * If there's been a change in the mask write it back to the
+ * hardware. We rely on the use of the regmap core cache to
+ * suppress pointless writes.
+ */
+ for (i = 0; i < d->chip->num_regs; i++) {
+ ret = regmap_update_bits(d->map, d->chip->mask_base + i,
+ d->mask_buf_def[i], d->mask_buf[i]);
+ if (ret != 0)
+ dev_err(d->map->dev, "Failed to sync masks in %x\n",
+ d->chip->mask_base + i);
+ }
+
+ mutex_unlock(&d->lock);
+}
+
+static void regmap_irq_enable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+ d->mask_buf[irq_data->reg_offset] &= ~irq_data->mask;
+}
+
+static void regmap_irq_disable(struct irq_data *data)
+{
+ struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data);
+ const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->irq);
+
+ d->mask_buf[irq_data->reg_offset] |= irq_data->mask;
+}
+
+static struct irq_chip regmap_irq_chip = {
+ .name = "regmap",
+ .irq_bus_lock = regmap_irq_lock,
+ .irq_bus_sync_unlock = regmap_irq_sync_unlock,
+ .irq_disable = regmap_irq_disable,
+ .irq_enable = regmap_irq_enable,
+};
+
+static irqreturn_t regmap_irq_thread(int irq, void *d)
+{
+ struct regmap_irq_chip_data *data = d;
+ struct regmap_irq_chip *chip = data->chip;
+ struct regmap *map = data->map;
+ int ret, i;
+ u8 *buf8 = data->status_reg_buf;
+ u16 *buf16 = data->status_reg_buf;
+ u32 *buf32 = data->status_reg_buf;
+ bool handled = false;
+
+ ret = regmap_bulk_read(map, chip->status_base, data->status_reg_buf,
+ chip->num_regs);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to read IRQ status: %d\n", ret);
+ return IRQ_NONE;
+ }
+
+ /*
+ * Ignore masked IRQs and ack if we need to; we ack early so
+ * there is no race between handling and acknowleding the
+ * interrupt. We assume that typically few of the interrupts
+ * will fire simultaneously so don't worry about overhead from
+ * doing a write per register.
+ */
+ for (i = 0; i < data->chip->num_regs; i++) {
+ switch (map->format.val_bytes) {
+ case 1:
+ data->status_buf[i] = buf8[i];
+ break;
+ case 2:
+ data->status_buf[i] = buf16[i];
+ break;
+ case 4:
+ data->status_buf[i] = buf32[i];
+ break;
+ default:
+ BUG();
+ return IRQ_NONE;
+ }
+
+ data->status_buf[i] &= ~data->mask_buf[i];
+
+ if (data->status_buf[i] && chip->ack_base) {
+ ret = regmap_write(map, chip->ack_base + i,
+ data->status_buf[i]);
+ if (ret != 0)
+ dev_err(map->dev, "Failed to ack 0x%x: %d\n",
+ chip->ack_base + i, ret);
+ }
+ }
+
+ for (i = 0; i < chip->num_irqs; i++) {
+ if (data->status_buf[chip->irqs[i].reg_offset] &
+ chip->irqs[i].mask) {
+ handle_nested_irq(data->irq_base + i);
+ handled = true;
+ }
+ }
+
+ if (handled)
+ return IRQ_HANDLED;
+ else
+ return IRQ_NONE;
+}
+
+/**
+ * regmap_add_irq_chip(): Use standard regmap IRQ controller handling
+ *
+ * map: The regmap for the device.
+ * irq: The IRQ the device uses to signal interrupts
+ * irq_flags: The IRQF_ flags to use for the primary interrupt.
+ * chip: Configuration for the interrupt controller.
+ * data: Runtime data structure for the controller, allocated on success
+ *
+ * Returns 0 on success or an errno on failure.
+ *
+ * In order for this to be efficient the chip really should use a
+ * register cache. The chip driver is responsible for restoring the
+ * register values used by the IRQ controller over suspend and resume.
+ */
+int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags,
+ int irq_base, struct regmap_irq_chip *chip,
+ struct regmap_irq_chip_data **data)
+{
+ struct regmap_irq_chip_data *d;
+ int cur_irq, i;
+ int ret = -ENOMEM;
+
+ irq_base = irq_alloc_descs(irq_base, 0, chip->num_irqs, 0);
+ if (irq_base < 0) {
+ dev_warn(map->dev, "Failed to allocate IRQs: %d\n",
+ irq_base);
+ return irq_base;
+ }
+
+ d = kzalloc(sizeof(*d), GFP_KERNEL);
+ if (!d)
+ return -ENOMEM;
+
+ d->status_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->status_buf)
+ goto err_alloc;
+
+ d->status_reg_buf = kzalloc(map->format.val_bytes * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->status_reg_buf)
+ goto err_alloc;
+
+ d->mask_buf = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->mask_buf)
+ goto err_alloc;
+
+ d->mask_buf_def = kzalloc(sizeof(unsigned int) * chip->num_regs,
+ GFP_KERNEL);
+ if (!d->mask_buf_def)
+ goto err_alloc;
+
+ d->map = map;
+ d->chip = chip;
+ d->irq_base = irq_base;
+ mutex_init(&d->lock);
+
+ for (i = 0; i < chip->num_irqs; i++)
+ d->mask_buf_def[chip->irqs[i].reg_offset]
+ |= chip->irqs[i].mask;
+
+ /* Mask all the interrupts by default */
+ for (i = 0; i < chip->num_regs; i++) {
+ d->mask_buf[i] = d->mask_buf_def[i];
+ ret = regmap_write(map, chip->mask_base + i, d->mask_buf[i]);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to set masks in 0x%x: %d\n",
+ chip->mask_base + i, ret);
+ goto err_alloc;
+ }
+ }
+
+ /* Register them with genirq */
+ for (cur_irq = irq_base;
+ cur_irq < chip->num_irqs + irq_base;
+ cur_irq++) {
+ irq_set_chip_data(cur_irq, d);
+ irq_set_chip_and_handler(cur_irq, &regmap_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+
+ /* ARM needs us to explicitly flag the IRQ as valid
+ * and will set them noprobe when we do so. */
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(irq, NULL, regmap_irq_thread, irq_flags,
+ chip->name, d);
+ if (ret != 0) {
+ dev_err(map->dev, "Failed to request IRQ %d: %d\n", irq, ret);
+ goto err_alloc;
+ }
+
+ return 0;
+
+err_alloc:
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
+ kfree(d->status_buf);
+ kfree(d);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regmap_add_irq_chip);
+
+/**
+ * regmap_del_irq_chip(): Stop interrupt handling for a regmap IRQ chip
+ *
+ * @irq: Primary IRQ for the device
+ * @d: regmap_irq_chip_data allocated by regmap_add_irq_chip()
+ */
+void regmap_del_irq_chip(int irq, struct regmap_irq_chip_data *d)
+{
+ if (!d)
+ return;
+
+ free_irq(irq, d);
+ kfree(d->mask_buf_def);
+ kfree(d->mask_buf);
+ kfree(d->status_reg_buf);
+ kfree(d->status_buf);
+ kfree(d);
+}
+EXPORT_SYMBOL_GPL(regmap_del_irq_chip);
+
+/**
+ * regmap_irq_chip_get_base(): Retrieve interrupt base for a regmap IRQ chip
+ *
+ * Useful for drivers to request their own IRQs.
+ *
+ * @data: regmap_irq controller to operate on.
+ */
+int regmap_irq_chip_get_base(struct regmap_irq_chip_data *data)
+{
+ return data->irq_base;
+}
+EXPORT_SYMBOL_GPL(regmap_irq_chip_get_base);
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index bf441db..6555803 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -64,6 +64,18 @@ bool regmap_precious(struct regmap *map, unsigned int reg)
return false;
}
+static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
+ unsigned int num)
+{
+ unsigned int i;
+
+ for (i = 0; i < num; i++)
+ if (!regmap_volatile(map, reg + i))
+ return false;
+
+ return true;
+}
+
static void regmap_format_4_12_write(struct regmap *map,
unsigned int reg, unsigned int val)
{
@@ -78,6 +90,16 @@ static void regmap_format_7_9_write(struct regmap *map,
*out = cpu_to_be16((reg << 9) | val);
}
+static void regmap_format_10_14_write(struct regmap *map,
+ unsigned int reg, unsigned int val)
+{
+ u8 *out = map->work_buf;
+
+ out[2] = val;
+ out[1] = (val >> 8) | (reg << 6);
+ out[0] = reg >> 2;
+}
+
static void regmap_format_8(void *buf, unsigned int val)
{
u8 *b = buf;
@@ -127,7 +149,7 @@ struct regmap *regmap_init(struct device *dev,
int ret = -EINVAL;
if (!bus || !config)
- return NULL;
+ goto err;
map = kzalloc(sizeof(*map), GFP_KERNEL);
if (map == NULL) {
@@ -147,12 +169,6 @@ struct regmap *regmap_init(struct device *dev,
map->volatile_reg = config->volatile_reg;
map->precious_reg = config->precious_reg;
map->cache_type = config->cache_type;
- map->reg_defaults = config->reg_defaults;
- map->num_reg_defaults = config->num_reg_defaults;
- map->num_reg_defaults_raw = config->num_reg_defaults_raw;
- map->reg_defaults_raw = config->reg_defaults_raw;
- map->cache_size_raw = (config->val_bits / 8) * config->num_reg_defaults_raw;
- map->cache_word_size = config->val_bits / 8;
if (config->read_flag_mask || config->write_flag_mask) {
map->read_flag_mask = config->read_flag_mask;
@@ -182,6 +198,16 @@ struct regmap *regmap_init(struct device *dev,
}
break;
+ case 10:
+ switch (config->val_bits) {
+ case 14:
+ map->format.format_write = regmap_format_10_14_write;
+ break;
+ default:
+ goto err_map;
+ }
+ break;
+
case 8:
map->format.format_reg = regmap_format_8;
break;
@@ -215,14 +241,16 @@ struct regmap *regmap_init(struct device *dev,
goto err_map;
}
- ret = regcache_init(map);
- if (ret < 0)
- goto err_map;
-
regmap_debugfs_init(map);
+ ret = regcache_init(map, config);
+ if (ret < 0)
+ goto err_free_workbuf;
+
return map;
+err_free_workbuf:
+ kfree(map->work_buf);
err_map:
kfree(map);
err:
@@ -231,6 +259,42 @@ err:
EXPORT_SYMBOL_GPL(regmap_init);
/**
+ * regmap_reinit_cache(): Reinitialise the current register cache
+ *
+ * @map: Register map to operate on.
+ * @config: New configuration. Only the cache data will be used.
+ *
+ * Discard any existing register cache for the map and initialize a
+ * new cache. This can be used to restore the cache to defaults or to
+ * update the cache configuration to reflect runtime discovery of the
+ * hardware.
+ */
+int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
+{
+ int ret;
+
+ mutex_lock(&map->lock);
+
+ regcache_exit(map);
+
+ map->max_register = config->max_register;
+ map->writeable_reg = config->writeable_reg;
+ map->readable_reg = config->readable_reg;
+ map->volatile_reg = config->volatile_reg;
+ map->precious_reg = config->precious_reg;
+ map->cache_type = config->cache_type;
+
+ map->cache_bypass = false;
+ map->cache_only = false;
+
+ ret = regcache_init(map, config);
+
+ mutex_unlock(&map->lock);
+
+ return ret;
+}
+
+/**
* regmap_exit(): Free a previously allocated register map
*/
void regmap_exit(struct regmap *map)
@@ -306,8 +370,10 @@ int _regmap_write(struct regmap *map, unsigned int reg,
ret = regcache_write(map, reg, val);
if (ret != 0)
return ret;
- if (map->cache_only)
+ if (map->cache_only) {
+ map->cache_dirty = true;
return 0;
+ }
}
trace_regmap_reg_write(map->dev, reg, val);
@@ -375,9 +441,11 @@ EXPORT_SYMBOL_GPL(regmap_write);
int regmap_raw_write(struct regmap *map, unsigned int reg,
const void *val, size_t val_len)
{
+ size_t val_count = val_len / map->format.val_bytes;
int ret;
- WARN_ON(map->cache_type != REGCACHE_NONE);
+ WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+ map->cache_type != REGCACHE_NONE);
mutex_lock(&map->lock);
@@ -422,15 +490,15 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
{
int ret;
- if (!map->format.parse_val)
- return -EINVAL;
-
if (!map->cache_bypass) {
ret = regcache_read(map, reg, val);
if (ret == 0)
return 0;
}
+ if (!map->format.parse_val)
+ return -EINVAL;
+
if (map->cache_only)
return -EBUSY;
@@ -481,15 +549,11 @@ EXPORT_SYMBOL_GPL(regmap_read);
int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
size_t val_len)
{
+ size_t val_count = val_len / map->format.val_bytes;
int ret;
- int i;
- bool vol = true;
-
- for (i = 0; i < val_len / map->format.val_bytes; i++)
- if (!regmap_volatile(map, reg + i))
- vol = false;
- WARN_ON(!vol && map->cache_type != REGCACHE_NONE);
+ WARN_ON(!regmap_volatile_range(map, reg, val_count) &&
+ map->cache_type != REGCACHE_NONE);
mutex_lock(&map->lock);
@@ -517,16 +581,11 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
{
int ret, i;
size_t val_bytes = map->format.val_bytes;
- bool vol = true;
+ bool vol = regmap_volatile_range(map, reg, val_count);
if (!map->format.parse_val)
return -EINVAL;
- /* Is this a block of volatile registers? */
- for (i = 0; i < val_count; i++)
- if (!regmap_volatile(map, reg + i))
- vol = false;
-
if (vol || map->cache_type == REGCACHE_NONE) {
ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
if (ret != 0)
@@ -546,40 +605,73 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
}
EXPORT_SYMBOL_GPL(regmap_bulk_read);
-/**
- * remap_update_bits: Perform a read/modify/write cycle on the register map
- *
- * @map: Register map to update
- * @reg: Register to update
- * @mask: Bitmask to change
- * @val: New value for bitmask
- *
- * Returns zero for success, a negative number on error.
- */
-int regmap_update_bits(struct regmap *map, unsigned int reg,
- unsigned int mask, unsigned int val)
+static int _regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
{
int ret;
- unsigned int tmp;
+ unsigned int tmp, orig;
mutex_lock(&map->lock);
- ret = _regmap_read(map, reg, &tmp);
+ ret = _regmap_read(map, reg, &orig);
if (ret != 0)
goto out;
- tmp &= ~mask;
+ tmp = orig & ~mask;
tmp |= val & mask;
- ret = _regmap_write(map, reg, tmp);
+ if (tmp != orig) {
+ ret = _regmap_write(map, reg, tmp);
+ *change = true;
+ } else {
+ *change = false;
+ }
out:
mutex_unlock(&map->lock);
return ret;
}
+
+/**
+ * regmap_update_bits: Perform a read/modify/write cycle on the register map
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val)
+{
+ bool change;
+ return _regmap_update_bits(map, reg, mask, val, &change);
+}
EXPORT_SYMBOL_GPL(regmap_update_bits);
+/**
+ * regmap_update_bits_check: Perform a read/modify/write cycle on the
+ * register map and report if updated
+ *
+ * @map: Register map to update
+ * @reg: Register to update
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @change: Boolean indicating if a write was done
+ *
+ * Returns zero for success, a negative number on error.
+ */
+int regmap_update_bits_check(struct regmap *map, unsigned int reg,
+ unsigned int mask, unsigned int val,
+ bool *change)
+{
+ return _regmap_update_bits(map, reg, mask, val, change);
+}
+EXPORT_SYMBOL_GPL(regmap_update_bits_check);
+
static int __init regmap_initcall(void)
{
regmap_debugfs_initcall();
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
deleted file mode 100644
index 9dff77b..0000000
--- a/drivers/base/sys.c
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * sys.c - pseudo-bus for system 'devices' (cpus, PICs, timers, etc)
- *
- * Copyright (c) 2002-3 Patrick Mochel
- * 2002-3 Open Source Development Lab
- *
- * This file is released under the GPLv2
- *
- * This exports a 'system' bus type.
- * By default, a 'sys' bus gets added to the root of the system. There will
- * always be core system devices. Devices can use sysdev_register() to
- * add themselves as children of the system bus.
- */
-
-#include <linux/sysdev.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/pm.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-
-#include "base.h"
-
-#define to_sysdev(k) container_of(k, struct sys_device, kobj)
-#define to_sysdev_attr(a) container_of(a, struct sysdev_attribute, attr)
-
-
-static ssize_t
-sysdev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
-{
- struct sys_device *sysdev = to_sysdev(kobj);
- struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr);
-
- if (sysdev_attr->show)
- return sysdev_attr->show(sysdev, sysdev_attr, buffer);
- return -EIO;
-}
-
-
-static ssize_t
-sysdev_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct sys_device *sysdev = to_sysdev(kobj);
- struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr);
-
- if (sysdev_attr->store)
- return sysdev_attr->store(sysdev, sysdev_attr, buffer, count);
- return -EIO;
-}
-
-static const struct sysfs_ops sysfs_ops = {
- .show = sysdev_show,
- .store = sysdev_store,
-};
-
-static struct kobj_type ktype_sysdev = {
- .sysfs_ops = &sysfs_ops,
-};
-
-
-int sysdev_create_file(struct sys_device *s, struct sysdev_attribute *a)
-{
- return sysfs_create_file(&s->kobj, &a->attr);
-}
-
-
-void sysdev_remove_file(struct sys_device *s, struct sysdev_attribute *a)
-{
- sysfs_remove_file(&s->kobj, &a->attr);
-}
-
-EXPORT_SYMBOL_GPL(sysdev_create_file);
-EXPORT_SYMBOL_GPL(sysdev_remove_file);
-
-#define to_sysdev_class(k) container_of(k, struct sysdev_class, kset.kobj)
-#define to_sysdev_class_attr(a) container_of(a, \
- struct sysdev_class_attribute, attr)
-
-static ssize_t sysdev_class_show(struct kobject *kobj, struct attribute *attr,
- char *buffer)
-{
- struct sysdev_class *class = to_sysdev_class(kobj);
- struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr);
-
- if (class_attr->show)
- return class_attr->show(class, class_attr, buffer);
- return -EIO;
-}
-
-static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
- const char *buffer, size_t count)
-{
- struct sysdev_class *class = to_sysdev_class(kobj);
- struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr);
-
- if (class_attr->store)
- return class_attr->store(class, class_attr, buffer, count);
- return -EIO;
-}
-
-static const struct sysfs_ops sysfs_class_ops = {
- .show = sysdev_class_show,
- .store = sysdev_class_store,
-};
-
-static struct kobj_type ktype_sysdev_class = {
- .sysfs_ops = &sysfs_class_ops,
-};
-
-int sysdev_class_create_file(struct sysdev_class *c,
- struct sysdev_class_attribute *a)
-{
- return sysfs_create_file(&c->kset.kobj, &a->attr);
-}
-EXPORT_SYMBOL_GPL(sysdev_class_create_file);
-
-void sysdev_class_remove_file(struct sysdev_class *c,
- struct sysdev_class_attribute *a)
-{
- sysfs_remove_file(&c->kset.kobj, &a->attr);
-}
-EXPORT_SYMBOL_GPL(sysdev_class_remove_file);
-
-static struct kset *system_kset;
-
-int sysdev_class_register(struct sysdev_class *cls)
-{
- int retval;
-
- pr_debug("Registering sysdev class '%s'\n", cls->name);
-
- INIT_LIST_HEAD(&cls->drivers);
- memset(&cls->kset.kobj, 0x00, sizeof(struct kobject));
- cls->kset.kobj.parent = &system_kset->kobj;
- cls->kset.kobj.ktype = &ktype_sysdev_class;
- cls->kset.kobj.kset = system_kset;
-
- retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name);
- if (retval)
- return retval;
-
- retval = kset_register(&cls->kset);
- if (!retval && cls->attrs)
- retval = sysfs_create_files(&cls->kset.kobj,
- (const struct attribute **)cls->attrs);
- return retval;
-}
-
-void sysdev_class_unregister(struct sysdev_class *cls)
-{
- pr_debug("Unregistering sysdev class '%s'\n",
- kobject_name(&cls->kset.kobj));
- if (cls->attrs)
- sysfs_remove_files(&cls->kset.kobj,
- (const struct attribute **)cls->attrs);
- kset_unregister(&cls->kset);
-}
-
-EXPORT_SYMBOL_GPL(sysdev_class_register);
-EXPORT_SYMBOL_GPL(sysdev_class_unregister);
-
-static DEFINE_MUTEX(sysdev_drivers_lock);
-
-/*
- * @dev != NULL means that we're unwinding because some drv->add()
- * failed for some reason. You need to grab sysdev_drivers_lock before
- * calling this.
- */
-static void __sysdev_driver_remove(struct sysdev_class *cls,
- struct sysdev_driver *drv,
- struct sys_device *from_dev)
-{
- struct sys_device *dev = from_dev;
-
- list_del_init(&drv->entry);
- if (!cls)
- return;
-
- if (!drv->remove)
- goto kset_put;
-
- if (dev)
- list_for_each_entry_continue_reverse(dev, &cls->kset.list,
- kobj.entry)
- drv->remove(dev);
- else
- list_for_each_entry(dev, &cls->kset.list, kobj.entry)
- drv->remove(dev);
-
-kset_put:
- kset_put(&cls->kset);
-}
-
-/**
- * sysdev_driver_register - Register auxiliary driver
- * @cls: Device class driver belongs to.
- * @drv: Driver.
- *
- * @drv is inserted into @cls->drivers to be
- * called on each operation on devices of that class. The refcount
- * of @cls is incremented.
- */
-int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv)
-{
- struct sys_device *dev = NULL;
- int err = 0;
-
- if (!cls) {
- WARN(1, KERN_WARNING "sysdev: invalid class passed to %s!\n",
- __func__);
- return -EINVAL;
- }
-
- /* Check whether this driver has already been added to a class. */
- if (drv->entry.next && !list_empty(&drv->entry))
- WARN(1, KERN_WARNING "sysdev: class %s: driver (%p) has already"
- " been registered to a class, something is wrong, but "
- "will forge on!\n", cls->name, drv);
-
- mutex_lock(&sysdev_drivers_lock);
- if (cls && kset_get(&cls->kset)) {
- list_add_tail(&drv->entry, &cls->drivers);
-
- /* If devices of this class already exist, tell the driver */
- if (drv->add) {
- list_for_each_entry(dev, &cls->kset.list, kobj.entry) {
- err = drv->add(dev);
- if (err)
- goto unwind;
- }
- }
- } else {
- err = -EINVAL;
- WARN(1, KERN_ERR "%s: invalid device class\n", __func__);
- }
-
- goto unlock;
-
-unwind:
- __sysdev_driver_remove(cls, drv, dev);
-
-unlock:
- mutex_unlock(&sysdev_drivers_lock);
- return err;
-}
-
-/**
- * sysdev_driver_unregister - Remove an auxiliary driver.
- * @cls: Class driver belongs to.
- * @drv: Driver.
- */
-void sysdev_driver_unregister(struct sysdev_class *cls,
- struct sysdev_driver *drv)
-{
- mutex_lock(&sysdev_drivers_lock);
- __sysdev_driver_remove(cls, drv, NULL);
- mutex_unlock(&sysdev_drivers_lock);
-}
-EXPORT_SYMBOL_GPL(sysdev_driver_register);
-EXPORT_SYMBOL_GPL(sysdev_driver_unregister);
-
-/**
- * sysdev_register - add a system device to the tree
- * @sysdev: device in question
- *
- */
-int sysdev_register(struct sys_device *sysdev)
-{
- int error;
- struct sysdev_class *cls = sysdev->cls;
-
- if (!cls)
- return -EINVAL;
-
- pr_debug("Registering sys device of class '%s'\n",
- kobject_name(&cls->kset.kobj));
-
- /* initialize the kobject to 0, in case it had previously been used */
- memset(&sysdev->kobj, 0x00, sizeof(struct kobject));
-
- /* Make sure the kset is set */
- sysdev->kobj.kset = &cls->kset;
-
- /* Register the object */
- error = kobject_init_and_add(&sysdev->kobj, &ktype_sysdev, NULL,
- "%s%d", kobject_name(&cls->kset.kobj),
- sysdev->id);
-
- if (!error) {
- struct sysdev_driver *drv;
-
- pr_debug("Registering sys device '%s'\n",
- kobject_name(&sysdev->kobj));
-
- mutex_lock(&sysdev_drivers_lock);
- /* Generic notification is implicit, because it's that
- * code that should have called us.
- */
-
- /* Notify class auxiliary drivers */
- list_for_each_entry(drv, &cls->drivers, entry) {
- if (drv->add)
- drv->add(sysdev);
- }
- mutex_unlock(&sysdev_drivers_lock);
- kobject_uevent(&sysdev->kobj, KOBJ_ADD);
- }
-
- return error;
-}
-
-void sysdev_unregister(struct sys_device *sysdev)
-{
- struct sysdev_driver *drv;
-
- mutex_lock(&sysdev_drivers_lock);
- list_for_each_entry(drv, &sysdev->cls->drivers, entry) {
- if (drv->remove)
- drv->remove(sysdev);
- }
- mutex_unlock(&sysdev_drivers_lock);
-
- kobject_put(&sysdev->kobj);
-}
-
-EXPORT_SYMBOL_GPL(sysdev_register);
-EXPORT_SYMBOL_GPL(sysdev_unregister);
-
-int __init system_bus_init(void)
-{
- system_kset = kset_create_and_add("system", NULL, &devices_kset->kobj);
- if (!system_kset)
- return -ENOMEM;
- return 0;
-}
-
-#define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr)
-
-ssize_t sysdev_store_ulong(struct sys_device *sysdev,
- struct sysdev_attribute *attr,
- const char *buf, size_t size)
-{
- struct sysdev_ext_attribute *ea = to_ext_attr(attr);
- char *end;
- unsigned long new = simple_strtoul(buf, &end, 0);
- if (end == buf)
- return -EINVAL;
- *(unsigned long *)(ea->var) = new;
- /* Always return full write size even if we didn't consume all */
- return size;
-}
-EXPORT_SYMBOL_GPL(sysdev_store_ulong);
-
-ssize_t sysdev_show_ulong(struct sys_device *sysdev,
- struct sysdev_attribute *attr,
- char *buf)
-{
- struct sysdev_ext_attribute *ea = to_ext_attr(attr);
- return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
-}
-EXPORT_SYMBOL_GPL(sysdev_show_ulong);
-
-ssize_t sysdev_store_int(struct sys_device *sysdev,
- struct sysdev_attribute *attr,
- const char *buf, size_t size)
-{
- struct sysdev_ext_attribute *ea = to_ext_attr(attr);
- char *end;
- long new = simple_strtol(buf, &end, 0);
- if (end == buf || new > INT_MAX || new < INT_MIN)
- return -EINVAL;
- *(int *)(ea->var) = new;
- /* Always return full write size even if we didn't consume all */
- return size;
-}
-EXPORT_SYMBOL_GPL(sysdev_store_int);
-
-ssize_t sysdev_show_int(struct sys_device *sysdev,
- struct sysdev_attribute *attr,
- char *buf)
-{
- struct sysdev_ext_attribute *ea = to_ext_attr(attr);
- return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
-}
-EXPORT_SYMBOL_GPL(sysdev_show_int);
-
diff --git a/drivers/base/topology.c b/drivers/base/topology.c
index f6f37a0..ae989c5 100644
--- a/drivers/base/topology.c
+++ b/drivers/base/topology.c
@@ -23,7 +23,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
-#include <linux/sysdev.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/cpu.h>
@@ -32,14 +31,14 @@
#include <linux/topology.h>
#define define_one_ro_named(_name, _func) \
-static SYSDEV_ATTR(_name, 0444, _func, NULL)
+ static DEVICE_ATTR(_name, 0444, _func, NULL)
#define define_one_ro(_name) \
-static SYSDEV_ATTR(_name, 0444, show_##_name, NULL)
+ static DEVICE_ATTR(_name, 0444, show_##_name, NULL)
#define define_id_show_func(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
return sprintf(buf, "%d\n", topology_##name(cpu)); \
@@ -65,16 +64,16 @@ static ssize_t show_cpumap(int type, const struct cpumask *mask, char *buf)
#ifdef arch_provides_topology_pointers
#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
unsigned int cpu = dev->id; \
return show_cpumap(0, topology_##name(cpu), buf); \
}
#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t show_##name##_list(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
unsigned int cpu = dev->id; \
@@ -83,15 +82,15 @@ static ssize_t show_##name##_list(struct sys_device *dev, \
#else
#define define_siblings_show_map(name) \
-static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, char *buf) \
+static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
return show_cpumap(0, topology_##name(dev->id), buf); \
}
#define define_siblings_show_list(name) \
-static ssize_t show_##name##_list(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+static ssize_t show_##name##_list(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
return show_cpumap(1, topology_##name(dev->id), buf); \
@@ -124,16 +123,16 @@ define_one_ro_named(book_siblings_list, show_book_cpumask_list);
#endif
static struct attribute *default_attrs[] = {
- &attr_physical_package_id.attr,
- &attr_core_id.attr,
- &attr_thread_siblings.attr,
- &attr_thread_siblings_list.attr,
- &attr_core_siblings.attr,
- &attr_core_siblings_list.attr,
+ &dev_attr_physical_package_id.attr,
+ &dev_attr_core_id.attr,
+ &dev_attr_thread_siblings.attr,
+ &dev_attr_thread_siblings_list.attr,
+ &dev_attr_core_siblings.attr,
+ &dev_attr_core_siblings_list.attr,
#ifdef CONFIG_SCHED_BOOK
- &attr_book_id.attr,
- &attr_book_siblings.attr,
- &attr_book_siblings_list.attr,
+ &dev_attr_book_id.attr,
+ &dev_attr_book_siblings.attr,
+ &dev_attr_book_siblings_list.attr,
#endif
NULL
};
@@ -146,16 +145,16 @@ static struct attribute_group topology_attr_group = {
/* Add/Remove cpu_topology interface for CPU device */
static int __cpuinit topology_add_dev(unsigned int cpu)
{
- struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+ struct device *dev = get_cpu_device(cpu);
- return sysfs_create_group(&sys_dev->kobj, &topology_attr_group);
+ return sysfs_create_group(&dev->kobj, &topology_attr_group);
}
static void __cpuinit topology_remove_dev(unsigned int cpu)
{
- struct sys_device *sys_dev = get_cpu_sysdev(cpu);
+ struct device *dev = get_cpu_device(cpu);
- sysfs_remove_group(&sys_dev->kobj, &topology_attr_group);
+ sysfs_remove_group(&dev->kobj, &topology_attr_group);
}
static int __cpuinit topology_cpu_callback(struct notifier_block *nfb,
diff --git a/drivers/bcma/bcma_private.h b/drivers/bcma/bcma_private.h
index 30a3085..0def898 100644
--- a/drivers/bcma/bcma_private.h
+++ b/drivers/bcma/bcma_private.h
@@ -18,6 +18,10 @@ void bcma_bus_unregister(struct bcma_bus *bus);
int __init bcma_bus_early_register(struct bcma_bus *bus,
struct bcma_device *core_cc,
struct bcma_device *core_mips);
+#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus);
+int bcma_bus_resume(struct bcma_bus *bus);
+#endif
/* scan.c */
int bcma_bus_scan(struct bcma_bus *bus);
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 1b51d8b..f59244e 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -21,48 +21,58 @@ static void bcma_host_pci_switch_core(struct bcma_device *core)
pr_debug("Switched to core: 0x%X\n", core->id.id);
}
-static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+/* Provides access to the requested core. Returns base offset that has to be
+ * used. It makes use of fixed windows when possible. */
+static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core)
{
+ switch (core->id.id) {
+ case BCMA_CORE_CHIPCOMMON:
+ return 3 * BCMA_CORE_SIZE;
+ case BCMA_CORE_PCIE:
+ return 2 * BCMA_CORE_SIZE;
+ }
+
if (core->bus->mapped_core != core)
bcma_host_pci_switch_core(core);
+ return 0;
+}
+
+static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset)
+{
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread8(core->bus->mmio + offset);
}
static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread16(core->bus->mmio + offset);
}
static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
return ioread32(core->bus->mmio + offset);
}
static void bcma_host_pci_write8(struct bcma_device *core, u16 offset,
u8 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite8(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write16(struct bcma_device *core, u16 offset,
u16 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite16(value, core->bus->mmio + offset);
}
static void bcma_host_pci_write32(struct bcma_device *core, u16 offset,
u32 value)
{
- if (core->bus->mapped_core != core)
- bcma_host_pci_switch_core(core);
+ offset += bcma_host_pci_provide_access_to_core(core);
iowrite32(value, core->bus->mmio + offset);
}
@@ -224,6 +234,35 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
pci_set_drvdata(dev, NULL);
}
+#ifdef CONFIG_PM
+static int bcma_host_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bcma_bus *bus = pci_get_drvdata(pdev);
+
+ bus->mapped_core = NULL;
+
+ return bcma_bus_suspend(bus);
+}
+
+static int bcma_host_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct bcma_bus *bus = pci_get_drvdata(pdev);
+
+ return bcma_bus_resume(bus);
+}
+
+static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
+ bcma_host_pci_resume);
+#define BCMA_PM_OPS (&bcma_pm_ops)
+
+#else /* CONFIG_PM */
+
+#define BCMA_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
@@ -239,6 +278,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
.id_table = bcma_pci_bridge_tbl,
.probe = bcma_host_pci_probe,
.remove = bcma_host_pci_remove,
+ .driver.pm = BCMA_PM_OPS,
};
int __init bcma_host_pci_init(void)
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index 70c84b9..ec31f7d 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -169,10 +169,8 @@ int bcma_bus_register(struct bcma_bus *bus)
err = bcma_sprom_get(bus);
if (err == -ENOENT) {
pr_err("No SPROM available\n");
- } else if (err) {
+ } else if (err)
pr_err("Failed to get SPROM: %d\n", err);
- return -ENOENT;
- }
/* Register found cores */
bcma_register_cores(bus);
@@ -240,6 +238,46 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
return 0;
}
+#ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ list_for_each_entry(core, &bus->cores, list) {
+ struct device_driver *drv = core->dev.driver;
+ if (drv) {
+ struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ if (adrv->suspend)
+ adrv->suspend(core);
+ }
+ }
+ return 0;
+}
+
+int bcma_bus_resume(struct bcma_bus *bus)
+{
+ struct bcma_device *core;
+
+ /* Init CC core */
+ core = bcma_find_core(bus, BCMA_CORE_CHIPCOMMON);
+ if (core) {
+ bus->drv_cc.setup_done = false;
+ bcma_core_chipcommon_init(&bus->drv_cc);
+ }
+
+ list_for_each_entry(core, &bus->cores, list) {
+ struct device_driver *drv = core->dev.driver;
+ if (drv) {
+ struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+ if (adrv->resume)
+ adrv->resume(core);
+ }
+ }
+
+ return 0;
+}
+#endif
+
int __bcma_driver_register(struct bcma_driver *drv, struct module *owner)
{
drv->drv.name = drv->name;
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index cad9948..3a2f672 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -399,15 +399,18 @@ int bcma_bus_scan(struct bcma_bus *bus)
core->bus = bus;
err = bcma_get_next_core(bus, &eromptr, NULL, core_num, core);
- if (err == -ENODEV) {
- core_num++;
- continue;
- } else if (err == -ENXIO)
- continue;
- else if (err == -ESPIPE)
- break;
- else if (err < 0)
+ if (err < 0) {
+ kfree(core);
+ if (err == -ENODEV) {
+ core_num++;
+ continue;
+ } else if (err == -ENXIO) {
+ continue;
+ } else if (err == -ESPIPE) {
+ break;
+ }
return err;
+ }
core->core_index = core_num++;
bus->nr_cores++;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index d729239..6f230fb 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -129,6 +129,9 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
u16 v;
int i;
+ bus->sprom.revision = sprom[SSB_SPROMSIZE_WORDS_R4 - 1] &
+ SSB_SPROM_REVISION_REV;
+
for (i = 0; i < 3; i++) {
v = sprom[SPOFF(SSB_SPROM8_IL0MAC) + i];
*(((__be16 *)bus->sprom.il0mac) + i) = cpu_to_be16(v);
@@ -136,12 +139,70 @@ static void bcma_sprom_extract_r8(struct bcma_bus *bus, const u16 *sprom)
bus->sprom.board_rev = sprom[SPOFF(SSB_SPROM8_BOARDREV)];
+ bus->sprom.txpid2g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G0) >> SSB_SPROM4_TXPID2G0_SHIFT;
+ bus->sprom.txpid2g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID2G01)] &
+ SSB_SPROM4_TXPID2G1) >> SSB_SPROM4_TXPID2G1_SHIFT;
+ bus->sprom.txpid2g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G2) >> SSB_SPROM4_TXPID2G2_SHIFT;
+ bus->sprom.txpid2g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID2G23)] &
+ SSB_SPROM4_TXPID2G3) >> SSB_SPROM4_TXPID2G3_SHIFT;
+
+ bus->sprom.txpid5gl[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL0) >> SSB_SPROM4_TXPID5GL0_SHIFT;
+ bus->sprom.txpid5gl[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL01)] &
+ SSB_SPROM4_TXPID5GL1) >> SSB_SPROM4_TXPID5GL1_SHIFT;
+ bus->sprom.txpid5gl[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL2) >> SSB_SPROM4_TXPID5GL2_SHIFT;
+ bus->sprom.txpid5gl[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GL23)] &
+ SSB_SPROM4_TXPID5GL3) >> SSB_SPROM4_TXPID5GL3_SHIFT;
+
+ bus->sprom.txpid5g[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G0) >> SSB_SPROM4_TXPID5G0_SHIFT;
+ bus->sprom.txpid5g[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5G01)] &
+ SSB_SPROM4_TXPID5G1) >> SSB_SPROM4_TXPID5G1_SHIFT;
+ bus->sprom.txpid5g[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G2) >> SSB_SPROM4_TXPID5G2_SHIFT;
+ bus->sprom.txpid5g[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5G23)] &
+ SSB_SPROM4_TXPID5G3) >> SSB_SPROM4_TXPID5G3_SHIFT;
+
+ bus->sprom.txpid5gh[0] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH0) >> SSB_SPROM4_TXPID5GH0_SHIFT;
+ bus->sprom.txpid5gh[1] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH01)] &
+ SSB_SPROM4_TXPID5GH1) >> SSB_SPROM4_TXPID5GH1_SHIFT;
+ bus->sprom.txpid5gh[2] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH2) >> SSB_SPROM4_TXPID5GH2_SHIFT;
+ bus->sprom.txpid5gh[3] = (sprom[SPOFF(SSB_SPROM4_TXPID5GH23)] &
+ SSB_SPROM4_TXPID5GH3) >> SSB_SPROM4_TXPID5GH3_SHIFT;
+
bus->sprom.boardflags_lo = sprom[SPOFF(SSB_SPROM8_BFLLO)];
bus->sprom.boardflags_hi = sprom[SPOFF(SSB_SPROM8_BFLHI)];
bus->sprom.boardflags2_lo = sprom[SPOFF(SSB_SPROM8_BFL2LO)];
bus->sprom.boardflags2_hi = sprom[SPOFF(SSB_SPROM8_BFL2HI)];
bus->sprom.country_code = sprom[SPOFF(SSB_SPROM8_CCODE)];
+
+ bus->sprom.fem.ghz2.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz2.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz2.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz2.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz2.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM2G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
+
+ bus->sprom.fem.ghz5.tssipos = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TSSIPOS) >> SSB_SROM8_FEM_TSSIPOS_SHIFT;
+ bus->sprom.fem.ghz5.extpa_gain = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_EXTPA_GAIN) >> SSB_SROM8_FEM_EXTPA_GAIN_SHIFT;
+ bus->sprom.fem.ghz5.pdet_range = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_PDET_RANGE) >> SSB_SROM8_FEM_PDET_RANGE_SHIFT;
+ bus->sprom.fem.ghz5.tr_iso = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_TR_ISO) >> SSB_SROM8_FEM_TR_ISO_SHIFT;
+ bus->sprom.fem.ghz5.antswlut = (sprom[SPOFF(SSB_SPROM8_FEM5G)] &
+ SSB_SROM8_FEM_ANTSWLUT) >> SSB_SROM8_FEM_ANTSWLUT_SHIFT;
}
int bcma_sprom_get(struct bcma_bus *bus)
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index e086fbb..8db9089 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -1177,7 +1177,8 @@ static bool DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
int TimeoutCounter;
int i;
-
+ memset(&CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
+
if (pci_set_dma_mask(Controller->PCIDevice, DMA_BIT_MASK(32)))
return DAC960_Failure(Controller, "DMA mask out of range");
Controller->BounceBufferLimit = DMA_BIT_MASK(32);
@@ -4627,7 +4628,8 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
DAC960_Controller_T *Controller = Command->Controller;
DAC960_CommandType_T CommandType = Command->CommandType;
DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
- DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode;
+ DAC960_V2_IOCTL_Opcode_T IOCTLOpcode = CommandMailbox->Common.IOCTL_Opcode;
+ DAC960_V2_CommandOpcode_T CommandOpcode = CommandMailbox->SCSI_10.CommandOpcode;
DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus;
if (CommandType == DAC960_ReadCommand ||
@@ -4699,7 +4701,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
{
if (Controller->ShutdownMonitoringTimer)
return;
- if (CommandOpcode == DAC960_V2_GetControllerInfo)
+ if (IOCTLOpcode == DAC960_V2_GetControllerInfo)
{
DAC960_V2_ControllerInfo_T *NewControllerInfo =
Controller->V2.NewControllerInformation;
@@ -4719,14 +4721,14 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
memcpy(ControllerInfo, NewControllerInfo,
sizeof(DAC960_V2_ControllerInfo_T));
}
- else if (CommandOpcode == DAC960_V2_GetEvent)
+ else if (IOCTLOpcode == DAC960_V2_GetEvent)
{
if (CommandStatus == DAC960_V2_NormalCompletion) {
DAC960_V2_ReportEvent(Controller, Controller->V2.Event);
}
Controller->V2.NextEventSequenceNumber++;
}
- else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
+ else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
CommandStatus == DAC960_V2_NormalCompletion)
{
DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
@@ -4915,7 +4917,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
NewPhysicalDeviceInfo->LogicalUnit++;
Controller->V2.PhysicalDeviceIndex++;
}
- else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
+ else if (IOCTLOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
{
unsigned int DeviceIndex;
for (DeviceIndex = Controller->V2.PhysicalDeviceIndex;
@@ -4938,7 +4940,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
}
Controller->V2.NeedPhysicalDeviceInformation = false;
}
- else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
+ else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
CommandStatus == DAC960_V2_NormalCompletion)
{
DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
@@ -5065,7 +5067,7 @@ static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
[LogicalDeviceNumber] = true;
NewLogicalDeviceInfo->LogicalDeviceNumber++;
}
- else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
+ else if (IOCTLOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
{
int LogicalDriveNumber;
for (LogicalDriveNumber = 0;
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 6f07ec1..4e4c8a4 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -116,6 +116,8 @@ config PARIDE
source "drivers/block/paride/Kconfig"
+source "drivers/block/mtip32xx/Kconfig"
+
config BLK_CPQ_DA
tristate "Compaq SMART2 support"
depends on PCI && VIRT_TO_BUS
@@ -315,6 +317,17 @@ config BLK_DEV_NBD
If unsure, say N.
+config BLK_DEV_NVME
+ tristate "NVM Express block device"
+ depends on PCI
+ ---help---
+ The NVM Express driver is for solid state drives directly
+ connected to the PCI or PCI Express bus. If you know you
+ don't have one of these, it is safe to answer N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called nvme.
+
config BLK_DEV_OSD
tristate "OSD object-as-blkdev support"
depends on SCSI_OSD_ULD
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 76646e9..5b79505 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
obj-$(CONFIG_MG_DISK) += mg_disk.o
obj-$(CONFIG_SUNVDC) += sunvdc.o
+obj-$(CONFIG_BLK_DEV_NVME) += nvme.o
obj-$(CONFIG_BLK_DEV_OSD) += osdblk.o
obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
@@ -39,5 +40,6 @@ obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
obj-$(CONFIG_XEN_BLKDEV_BACKEND) += xen-blkback/
obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
+obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index 8eba86b..386146d 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -63,7 +63,7 @@
#include <linux/mutex.h>
#include <linux/amifdreg.h>
#include <linux/amifd.h>
-#include <linux/buffer_head.h>
+#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/interrupt.h>
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index 5f8e39c..e86d206 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -270,7 +270,7 @@ static const struct file_operations aoe_fops = {
.llseek = noop_llseek,
};
-static char *aoe_devnode(struct device *dev, mode_t *mode)
+static char *aoe_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "etherd/%s", dev_name(dev));
}
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d22119d..ec24643 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -17,7 +17,7 @@
#include <linux/highmem.h>
#include <linux/mutex.h>
#include <linux/radix-tree.h>
-#include <linux/buffer_head.h> /* invalidate_bh_lrus() */
+#include <linux/fs.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
@@ -402,14 +402,13 @@ static int brd_ioctl(struct block_device *bdev, fmode_t mode,
error = -EBUSY;
if (bdev->bd_openers <= 1) {
/*
- * Invalidate the cache first, so it isn't written
- * back to the device.
+ * Kill the cache first, so it isn't written back to the
+ * device.
*
* Another thread might instantiate more buffercache here,
* but there is not much we can do to close that race.
*/
- invalidate_bh_lrus();
- truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+ kill_bdev(bdev);
brd_free_pages(brd);
error = 0;
}
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 587cce5..b0f553b 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -1735,7 +1735,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
case CCISS_BIG_PASSTHRU:
return cciss_bigpassthru(h, argp);
- /* scsi_cmd_ioctl handles these, below, though some are not */
+ /* scsi_cmd_blk_ioctl handles these, below, though some are not */
/* very meaningful for cciss. SG_IO is the main one people want. */
case SG_GET_VERSION_NUM:
@@ -1746,9 +1746,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
case SG_EMULATED_HOST:
case SG_IO:
case SCSI_IOCTL_SEND_COMMAND:
- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
+ return scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
- /* scsi_cmd_ioctl would normally handle these, below, but */
+ /* scsi_cmd_blk_ioctl would normally handle these, below, but */
/* they aren't a good fit for cciss, as CD-ROMs are */
/* not supported, and we don't have any bus/target/lun */
/* which we present to the kernel. */
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9cf2035..8d68056 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -59,8 +59,8 @@
/* module parameter, defined in drbd_main.c */
extern unsigned int minor_count;
-extern int disable_sendpage;
-extern int allow_oos;
+extern bool disable_sendpage;
+extern bool allow_oos;
extern unsigned int cn_idx;
#ifdef CONFIG_DRBD_FAULT_INJECTION
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 0358e55..211fc44 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -117,8 +117,8 @@ module_param(fault_devs, int, 0644);
/* module parameter, defined */
unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
-int disable_sendpage;
-int allow_oos;
+bool disable_sendpage;
+bool allow_oos;
unsigned int cn_idx = CN_IDX_DRBD;
int proc_details; /* Detail level in proc drbd*/
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9955a53..744f078 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -188,7 +188,6 @@ static int print_unex = 1;
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mod_devicetable.h>
-#include <linux/buffer_head.h> /* for invalidate_buffers() */
#include <linux/mutex.h>
#include <linux/io.h>
#include <linux/uaccess.h>
@@ -3833,7 +3832,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
bio.bi_size = size;
bio.bi_bdev = bdev;
bio.bi_sector = 0;
- bio.bi_flags = BIO_QUIET;
+ bio.bi_flags = (1 << BIO_QUIET);
init_completion(&complete);
bio.bi_private = &complete;
bio.bi_end_io = floppy_rb0_complete;
@@ -4369,8 +4368,14 @@ out_unreg_blkdev:
out_put_disk:
while (dr--) {
del_timer_sync(&motor_off_timer[dr]);
- if (disks[dr]->queue)
+ if (disks[dr]->queue) {
blk_cleanup_queue(disks[dr]->queue);
+ /*
+ * put_disk() is not paired with add_disk() and
+ * will put queue reference one extra time. fix it.
+ */
+ disks[dr]->queue = NULL;
+ }
put_disk(disks[dr]);
}
return err;
@@ -4580,6 +4585,15 @@ static void __exit floppy_module_exit(void)
platform_device_unregister(&floppy_device[drive]);
}
blk_cleanup_queue(disks[drive]->queue);
+
+ /*
+ * These disks have not called add_disk(). Don't put down
+ * queue reference in put_disk().
+ */
+ if (!(allowed_drive_mask & (1 << drive)) ||
+ fdc_state[FDC(drive)].version == FDC_NONE)
+ disks[drive]->queue = NULL;
+
put_disk(disks[drive]);
}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e888c9..cd50435 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -69,7 +69,6 @@
#include <linux/freezer.h>
#include <linux/mutex.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev() */
#include <linux/completion.h>
#include <linux/highmem.h>
#include <linux/kthread.h>
@@ -357,14 +356,14 @@ lo_direct_splice_actor(struct pipe_inode_info *pipe, struct splice_desc *sd)
return __splice_from_pipe(pipe, sd, lo_splice_actor);
}
-static int
+static ssize_t
do_lo_receive(struct loop_device *lo,
struct bio_vec *bvec, int bsize, loff_t pos)
{
struct lo_read_data cookie;
struct splice_desc sd;
struct file *file;
- long retval;
+ ssize_t retval;
cookie.lo = lo;
cookie.page = bvec->bv_page;
@@ -380,26 +379,28 @@ do_lo_receive(struct loop_device *lo,
file = lo->lo_backing_file;
retval = splice_direct_to_actor(file, &sd, lo_direct_splice_actor);
- if (retval < 0)
- return retval;
- if (retval != bvec->bv_len)
- return -EIO;
- return 0;
+ return retval;
}
static int
lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
{
struct bio_vec *bvec;
- int i, ret = 0;
+ ssize_t s;
+ int i;
bio_for_each_segment(bvec, bio, i) {
- ret = do_lo_receive(lo, bvec, bsize, pos);
- if (ret < 0)
+ s = do_lo_receive(lo, bvec, bsize, pos);
+ if (s < 0)
+ return s;
+
+ if (s != bvec->bv_len) {
+ zero_fill_bio(bio);
break;
+ }
pos += bvec->bv_len;
}
- return ret;
+ return 0;
}
static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
new file mode 100644
index 0000000..b5dd14e
--- /dev/null
+++ b/drivers/block/mtip32xx/Kconfig
@@ -0,0 +1,9 @@
+#
+# mtip32xx device driver configuration
+#
+
+config BLK_DEV_PCIESSD_MTIP32XX
+ tristate "Block Device Driver for Micron PCIe SSDs"
+ depends on HOTPLUG_PCI_PCIE
+ help
+ This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/mtip32xx/Makefile b/drivers/block/mtip32xx/Makefile
new file mode 100644
index 0000000..4fbef8c
--- /dev/null
+++ b/drivers/block/mtip32xx/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for Block device driver for Micron PCIe SSD
+#
+
+obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx.o
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
new file mode 100644
index 0000000..8eb81c9
--- /dev/null
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -0,0 +1,3650 @@
+/*
+ * Driver for the Micron P320 SSD
+ * Copyright (C) 2011 Micron Technology, Inc.
+ *
+ * Portions of this code were derived from works subjected to the
+ * following copyright:
+ * Copyright (C) 2009 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ata.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/uaccess.h>
+#include <linux/random.h>
+#include <linux/smp.h>
+#include <linux/compat.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/dma-mapping.h>
+#include <linux/idr.h>
+#include <linux/kthread.h>
+#include <../drivers/ata/ahci.h>
+#include "mtip32xx.h"
+
+#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
+#define HW_CMD_TBL_SZ (AHCI_CMD_TBL_HDR_SZ + (MTIP_MAX_SG * 16))
+#define HW_CMD_TBL_AR_SZ (HW_CMD_TBL_SZ * MTIP_MAX_COMMAND_SLOTS)
+#define HW_PORT_PRIV_DMA_SZ \
+ (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+
+#define HOST_HSORG 0xFC
+#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
+#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
+#define HSORG_HWREV 0xFF00
+#define HSORG_STYLE 0x8
+#define HSORG_SLOTGROUPS 0x7
+
+#define PORT_COMMAND_ISSUE 0x38
+#define PORT_SDBV 0x7C
+
+#define PORT_OFFSET 0x100
+#define PORT_MEM_SIZE 0x80
+
+#define PORT_IRQ_ERR \
+ (PORT_IRQ_HBUS_ERR | PORT_IRQ_IF_ERR | PORT_IRQ_CONNECT | \
+ PORT_IRQ_PHYRDY | PORT_IRQ_UNK_FIS | PORT_IRQ_BAD_PMP | \
+ PORT_IRQ_TF_ERR | PORT_IRQ_HBUS_DATA_ERR | PORT_IRQ_IF_NONFATAL | \
+ PORT_IRQ_OVERFLOW)
+#define PORT_IRQ_LEGACY \
+ (PORT_IRQ_PIOS_FIS | PORT_IRQ_D2H_REG_FIS)
+#define PORT_IRQ_HANDLED \
+ (PORT_IRQ_SDB_FIS | PORT_IRQ_LEGACY | \
+ PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR | \
+ PORT_IRQ_CONNECT | PORT_IRQ_PHYRDY)
+#define DEF_PORT_IRQ \
+ (PORT_IRQ_ERR | PORT_IRQ_LEGACY | PORT_IRQ_SDB_FIS)
+
+/* product numbers */
+#define MTIP_PRODUCT_UNKNOWN 0x00
+#define MTIP_PRODUCT_ASICFPGA 0x11
+
+/* Device instance number, incremented each time a device is probed. */
+static int instance;
+
+/*
+ * Global variable used to hold the major block device number
+ * allocated in mtip_init().
+ */
+static int mtip_major;
+
+static DEFINE_SPINLOCK(rssd_index_lock);
+static DEFINE_IDA(rssd_index_ida);
+
+static int mtip_block_initialize(struct driver_data *dd);
+
+#ifdef CONFIG_COMPAT
+struct mtip_compat_ide_task_request_s {
+ __u8 io_ports[8];
+ __u8 hob_ports[8];
+ ide_reg_valid_t out_flags;
+ ide_reg_valid_t in_flags;
+ int data_phase;
+ int req_cmd;
+ compat_ulong_t out_size;
+ compat_ulong_t in_size;
+};
+#endif
+
+/*
+ * This function check_for_surprise_removal is called
+ * while card is removed from the system and it will
+ * read the vendor id from the configration space
+ *
+ * @pdev Pointer to the pci_dev structure.
+ *
+ * return value
+ * true if device removed, else false
+ */
+static bool mtip_check_surprise_removal(struct pci_dev *pdev)
+{
+ u16 vendor_id = 0;
+
+ /* Read the vendorID from the configuration space */
+ pci_read_config_word(pdev, 0x00, &vendor_id);
+ if (vendor_id == 0xFFFF)
+ return true; /* device removed */
+
+ return false; /* device present */
+}
+
+/*
+ * This function is called for clean the pending command in the
+ * command slot during the surprise removal of device and return
+ * error to the upper layer.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_command_cleanup(struct driver_data *dd)
+{
+ int group = 0, commandslot = 0, commandindex = 0;
+ struct mtip_cmd *command;
+ struct mtip_port *port = dd->port;
+
+ for (group = 0; group < 4; group++) {
+ for (commandslot = 0; commandslot < 32; commandslot++) {
+ if (!(port->allocated[group] & (1 << commandslot)))
+ continue;
+
+ commandindex = group << 5 | commandslot;
+ command = &port->commands[commandindex];
+
+ if (atomic_read(&command->active)
+ && (command->async_callback)) {
+ command->async_callback(command->async_data,
+ -ENODEV);
+ command->async_callback = NULL;
+ command->async_data = NULL;
+ }
+
+ dma_unmap_sg(&port->dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+ }
+ }
+
+ up(&port->cmd_slot);
+
+ atomic_set(&dd->drv_cleanup_done, true);
+}
+
+/*
+ * Obtain an empty command slot.
+ *
+ * This function needs to be reentrant since it could be called
+ * at the same time on multiple CPUs. The allocation of the
+ * command slot must be atomic.
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * >= 0 Index of command slot obtained.
+ * -1 No command slots available.
+ */
+static int get_slot(struct mtip_port *port)
+{
+ int slot, i;
+ unsigned int num_command_slots = port->dd->slot_groups * 32;
+
+ /*
+ * Try 10 times, because there is a small race here.
+ * that's ok, because it's still cheaper than a lock.
+ *
+ * Race: Since this section is not protected by lock, same bit
+ * could be chosen by different process contexts running in
+ * different processor. So instead of costly lock, we are going
+ * with loop.
+ */
+ for (i = 0; i < 10; i++) {
+ slot = find_next_zero_bit(port->allocated,
+ num_command_slots, 1);
+ if ((slot < num_command_slots) &&
+ (!test_and_set_bit(slot, port->allocated)))
+ return slot;
+ }
+ dev_warn(&port->dd->pdev->dev, "Failed to get a tag.\n");
+
+ if (mtip_check_surprise_removal(port->dd->pdev)) {
+ /* Device not present, clean outstanding commands */
+ mtip_command_cleanup(port->dd);
+ }
+ return -1;
+}
+
+/*
+ * Release a command slot.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of command to release
+ *
+ * return value
+ * None
+ */
+static inline void release_slot(struct mtip_port *port, int tag)
+{
+ smp_mb__before_clear_bit();
+ clear_bit(tag, port->allocated);
+ smp_mb__after_clear_bit();
+}
+
+/*
+ * Reset the HBA (without sleeping)
+ *
+ * Just like hba_reset, except does not call sleep, so can be
+ * run from interrupt/tasklet context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int hba_reset_nosleep(struct driver_data *dd)
+{
+ unsigned long timeout;
+
+ /* Chip quirk: quiesce any chip function */
+ mdelay(10);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /*
+ * Wait 10ms then spin for up to 1 second
+ * waiting for reset acknowledgement
+ */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ mdelay(10);
+ while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ && time_before(jiffies, timeout))
+ mdelay(1);
+
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Issue a command to the hardware.
+ *
+ * Set the appropriate bit in the s_active and Command Issue hardware
+ * registers, causing hardware command processing to begin.
+ *
+ * @port Pointer to the port structure.
+ * @tag The tag of the command to be issued.
+ *
+ * return value
+ * None
+ */
+static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
+{
+ unsigned long flags = 0;
+
+ atomic_set(&port->commands[tag].active, 1);
+
+ spin_lock_irqsave(&port->cmd_issue_lock, flags);
+
+ writel((1 << MTIP_TAG_BIT(tag)),
+ port->s_active[MTIP_TAG_INDEX(tag)]);
+ writel((1 << MTIP_TAG_BIT(tag)),
+ port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+
+ spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+}
+
+/*
+ * Enable/disable the reception of FIS
+ *
+ * @port Pointer to the port data structure
+ * @enable 1 to enable, 0 to disable
+ *
+ * return value
+ * Previous state: 1 enabled, 0 disabled
+ */
+static int mtip_enable_fis(struct mtip_port *port, int enable)
+{
+ u32 tmp;
+
+ /* enable FIS reception */
+ tmp = readl(port->mmio + PORT_CMD);
+ if (enable)
+ writel(tmp | PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
+ else
+ writel(tmp & ~PORT_CMD_FIS_RX, port->mmio + PORT_CMD);
+
+ /* Flush */
+ readl(port->mmio + PORT_CMD);
+
+ return (((tmp & PORT_CMD_FIS_RX) == PORT_CMD_FIS_RX));
+}
+
+/*
+ * Enable/disable the DMA engine
+ *
+ * @port Pointer to the port data structure
+ * @enable 1 to enable, 0 to disable
+ *
+ * return value
+ * Previous state: 1 enabled, 0 disabled.
+ */
+static int mtip_enable_engine(struct mtip_port *port, int enable)
+{
+ u32 tmp;
+
+ /* enable FIS reception */
+ tmp = readl(port->mmio + PORT_CMD);
+ if (enable)
+ writel(tmp | PORT_CMD_START, port->mmio + PORT_CMD);
+ else
+ writel(tmp & ~PORT_CMD_START, port->mmio + PORT_CMD);
+
+ readl(port->mmio + PORT_CMD);
+ return (((tmp & PORT_CMD_START) == PORT_CMD_START));
+}
+
+/*
+ * Enables the port DMA engine and FIS reception.
+ *
+ * return value
+ * None
+ */
+static inline void mtip_start_port(struct mtip_port *port)
+{
+ /* Enable FIS reception */
+ mtip_enable_fis(port, 1);
+
+ /* Enable the DMA engine */
+ mtip_enable_engine(port, 1);
+}
+
+/*
+ * Deinitialize a port by disabling port interrupts, the DMA engine,
+ * and FIS reception.
+ *
+ * @port Pointer to the port structure
+ *
+ * return value
+ * None
+ */
+static inline void mtip_deinit_port(struct mtip_port *port)
+{
+ /* Disable interrupts on this port */
+ writel(0, port->mmio + PORT_IRQ_MASK);
+
+ /* Disable the DMA engine */
+ mtip_enable_engine(port, 0);
+
+ /* Disable FIS reception */
+ mtip_enable_fis(port, 0);
+}
+
+/*
+ * Initialize a port.
+ *
+ * This function deinitializes the port by calling mtip_deinit_port() and
+ * then initializes it by setting the command header and RX FIS addresses,
+ * clearing the SError register and any pending port interrupts before
+ * re-enabling the default set of port interrupts.
+ *
+ * @port Pointer to the port structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_init_port(struct mtip_port *port)
+{
+ int i;
+ mtip_deinit_port(port);
+
+ /* Program the command list base and FIS base addresses */
+ if (readl(port->dd->mmio + HOST_CAP) & HOST_CAP_64) {
+ writel((port->command_list_dma >> 16) >> 16,
+ port->mmio + PORT_LST_ADDR_HI);
+ writel((port->rxfis_dma >> 16) >> 16,
+ port->mmio + PORT_FIS_ADDR_HI);
+ }
+
+ writel(port->command_list_dma & 0xFFFFFFFF,
+ port->mmio + PORT_LST_ADDR);
+ writel(port->rxfis_dma & 0xFFFFFFFF, port->mmio + PORT_FIS_ADDR);
+
+ /* Clear SError */
+ writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+
+ /* reset the completed registers.*/
+ for (i = 0; i < port->dd->slot_groups; i++)
+ writel(0xFFFFFFFF, port->completed[i]);
+
+ /* Clear any pending interrupts for this port */
+ writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
+
+ /* Enable port interrupts */
+ writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
+}
+
+/*
+ * Restart a port
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_restart_port(struct mtip_port *port)
+{
+ unsigned long timeout;
+
+ /* Disable the DMA engine */
+ mtip_enable_engine(port, 0);
+
+ /* Chip quirk: wait up to 500ms for PxCMD.CR == 0 */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while ((readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON)
+ && time_before(jiffies, timeout))
+ ;
+
+ /*
+ * Chip quirk: escalate to hba reset if
+ * PxCMD.CR not clear after 500 ms
+ */
+ if (readl(port->mmio + PORT_CMD) & PORT_CMD_LIST_ON) {
+ dev_warn(&port->dd->pdev->dev,
+ "PxCMD.CR not clear, escalating reset\n");
+
+ if (hba_reset_nosleep(port->dd))
+ dev_err(&port->dd->pdev->dev,
+ "HBA reset escalation failed.\n");
+
+ /* 30 ms delay before com reset to quiesce chip */
+ mdelay(30);
+ }
+
+ dev_warn(&port->dd->pdev->dev, "Issuing COM reset\n");
+
+ /* Set PxSCTL.DET */
+ writel(readl(port->mmio + PORT_SCR_CTL) |
+ 1, port->mmio + PORT_SCR_CTL);
+ readl(port->mmio + PORT_SCR_CTL);
+
+ /* Wait 1 ms to quiesce chip function */
+ timeout = jiffies + msecs_to_jiffies(1);
+ while (time_before(jiffies, timeout))
+ ;
+
+ /* Clear PxSCTL.DET */
+ writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
+ port->mmio + PORT_SCR_CTL);
+ readl(port->mmio + PORT_SCR_CTL);
+
+ /* Wait 500 ms for bit 0 of PORT_SCR_STS to be set */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
+ && time_before(jiffies, timeout))
+ ;
+
+ if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
+ dev_warn(&port->dd->pdev->dev,
+ "COM reset failed\n");
+
+ /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
+ writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+
+ /* Enable the DMA engine */
+ mtip_enable_engine(port, 1);
+}
+
+/*
+ * Called periodically to see if any read/write commands are
+ * taking too long to complete.
+ *
+ * @data Pointer to the PORT data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_timeout_function(unsigned long int data)
+{
+ struct mtip_port *port = (struct mtip_port *) data;
+ struct host_to_dev_fis *fis;
+ struct mtip_cmd *command;
+ int tag, cmdto_cnt = 0;
+ unsigned int bit, group;
+ unsigned int num_command_slots = port->dd->slot_groups * 32;
+
+ if (unlikely(!port))
+ return;
+
+ if (atomic_read(&port->dd->resumeflag) == true) {
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(30000));
+ return;
+ }
+
+ for (tag = 0; tag < num_command_slots; tag++) {
+ /*
+ * Skip internal command slot as it has
+ * its own timeout mechanism
+ */
+ if (tag == MTIP_TAG_INTERNAL)
+ continue;
+
+ if (atomic_read(&port->commands[tag].active) &&
+ (time_after(jiffies, port->commands[tag].comp_time))) {
+ group = tag >> 5;
+ bit = tag & 0x1F;
+
+ command = &port->commands[tag];
+ fis = (struct host_to_dev_fis *) command->command;
+
+ dev_warn(&port->dd->pdev->dev,
+ "Timeout for command tag %d\n", tag);
+
+ cmdto_cnt++;
+ if (cmdto_cnt == 1)
+ set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+
+ /*
+ * Clear the completed bit. This should prevent
+ * any interrupt handlers from trying to retire
+ * the command.
+ */
+ writel(1 << bit, port->completed[group]);
+
+ /* Call the async completion callback. */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data,
+ -EIO);
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&port->dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /*
+ * Clear the allocated bit and active tag for the
+ * command.
+ */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+ }
+ }
+
+ if (cmdto_cnt) {
+ dev_warn(&port->dd->pdev->dev,
+ "%d commands timed out: restarting port",
+ cmdto_cnt);
+ mtip_restart_port(port);
+ clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ }
+
+ /* Restart the timer */
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+}
+
+/*
+ * IO completion function.
+ *
+ * This completion function is called by the driver ISR when a
+ * command that was issued by the kernel completes. It first calls the
+ * asynchronous completion function which normally calls back into the block
+ * layer passing the asynchronous callback data, then unmaps the
+ * scatter list associated with the completed command, and finally
+ * clears the allocated bit associated with the completed command.
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command.
+ * @data Pointer to driver_data.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_async_complete(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command;
+ struct driver_data *dd = data;
+ int cb_status = status ? -EIO : 0;
+
+ if (unlikely(!dd) || unlikely(!port))
+ return;
+
+ command = &port->commands[tag];
+
+ if (unlikely(status == PORT_IRQ_TF_ERR)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Command tag %d failed due to TFE\n", tag);
+ }
+
+ /* Upper layer callback */
+ if (likely(command->async_callback))
+ command->async_callback(command->async_data, cb_status);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ /* Unmap the DMA scatter list entries */
+ dma_unmap_sg(&dd->pdev->dev,
+ command->sg,
+ command->scatter_ents,
+ command->direction);
+
+ /* Clear the allocated and active bits for the command */
+ atomic_set(&port->commands[tag].active, 0);
+ release_slot(port, tag);
+
+ up(&port->cmd_slot);
+}
+
+/*
+ * Internal command completion callback function.
+ *
+ * This function is normally called by the driver ISR when an internal
+ * command completed. This function signals the command completion by
+ * calling complete().
+ *
+ * @port Pointer to the port data structure.
+ * @tag Tag of the command that has completed.
+ * @data Pointer to a completion structure.
+ * @status Completion status.
+ *
+ * return value
+ * None
+ */
+static void mtip_completion(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
+{
+ struct mtip_cmd *command = &port->commands[tag];
+ struct completion *waiting = data;
+ if (unlikely(status == PORT_IRQ_TF_ERR))
+ dev_warn(&port->dd->pdev->dev,
+ "Internal command %d completed with TFE\n", tag);
+
+ command->async_callback = NULL;
+ command->comp_func = NULL;
+
+ complete(waiting);
+}
+
+/*
+ * Helper function for tag logging
+ */
+static void print_tags(struct driver_data *dd,
+ char *msg,
+ unsigned long *tagbits)
+{
+ unsigned int tag, count = 0;
+
+ for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
+ if (test_bit(tag, tagbits))
+ count++;
+ }
+ if (count)
+ dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
+}
+
+/*
+ * Handle an error.
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_handle_tfe(struct driver_data *dd)
+{
+ int group, tag, bit, reissue;
+ struct mtip_port *port;
+ struct mtip_cmd *command;
+ u32 completed;
+ struct host_to_dev_fis *fis;
+ unsigned long tagaccum[SLOTBITS_IN_LONGS];
+
+ dev_warn(&dd->pdev->dev, "Taskfile error\n");
+
+ port = dd->port;
+
+ /* Stop the timer to prevent command timeouts. */
+ del_timer(&port->cmd_timer);
+
+ /* Set eh_active */
+ set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+
+ /* Loop through all the groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ completed = readl(port->completed[group]);
+
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
+
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
+ /* Process successfully completed commands */
+ for (bit = 0; bit < 32 && completed; bit++) {
+ if (!(completed & (1<<bit)))
+ continue;
+ tag = (group << 5) + bit;
+
+ /* Skip the internal command slot */
+ if (tag == MTIP_TAG_INTERNAL)
+ continue;
+
+ command = &port->commands[tag];
+ if (likely(command->comp_func)) {
+ set_bit(tag, tagaccum);
+ atomic_set(&port->commands[tag].active, 0);
+ command->comp_func(port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_err(&port->dd->pdev->dev,
+ "Missing completion func for tag %d",
+ tag);
+ if (mtip_check_surprise_removal(dd->pdev)) {
+ mtip_command_cleanup(dd);
+ /* don't proceed further */
+ return;
+ }
+ }
+ }
+ }
+ print_tags(dd, "TFE tags completed:", tagaccum);
+
+ /* Restart the port */
+ mdelay(20);
+ mtip_restart_port(port);
+
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
+ /* Loop through all the groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ for (bit = 0; bit < 32; bit++) {
+ reissue = 1;
+ tag = (group << 5) + bit;
+
+ /* If the active bit is set re-issue the command */
+ if (atomic_read(&port->commands[tag].active) == 0)
+ continue;
+
+ fis = (struct host_to_dev_fis *)
+ port->commands[tag].command;
+
+ /* Should re-issue? */
+ if (tag == MTIP_TAG_INTERNAL ||
+ fis->command == ATA_CMD_SET_FEATURES)
+ reissue = 0;
+
+ /*
+ * First check if this command has
+ * exceeded its retries.
+ */
+ if (reissue &&
+ (port->commands[tag].retries-- > 0)) {
+
+ set_bit(tag, tagaccum);
+
+ /* Update the timeout value. */
+ port->commands[tag].comp_time =
+ jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
+ /* Re-issue the command. */
+ mtip_issue_ncq_command(port, tag);
+
+ continue;
+ }
+
+ /* Retire a command that will not be reissued */
+ dev_warn(&port->dd->pdev->dev,
+ "retiring tag %d\n", tag);
+ atomic_set(&port->commands[tag].active, 0);
+
+ if (port->commands[tag].comp_func)
+ port->commands[tag].comp_func(
+ port,
+ tag,
+ port->commands[tag].comp_data,
+ PORT_IRQ_TF_ERR);
+ else
+ dev_warn(&port->dd->pdev->dev,
+ "Bad completion for tag %d\n",
+ tag);
+ }
+ }
+ print_tags(dd, "TFE tags reissued:", tagaccum);
+
+ /* clear eh_active */
+ clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+
+ mod_timer(&port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+}
+
+/*
+ * Handle a set device bits interrupt
+ */
+static inline void mtip_process_sdbf(struct driver_data *dd)
+{
+ struct mtip_port *port = dd->port;
+ int group, tag, bit;
+ u32 completed;
+ struct mtip_cmd *command;
+
+ /* walk all bits in all slot groups */
+ for (group = 0; group < dd->slot_groups; group++) {
+ completed = readl(port->completed[group]);
+
+ /* clear completed status register in the hardware.*/
+ writel(completed, port->completed[group]);
+
+ /* Process completed commands. */
+ for (bit = 0;
+ (bit < 32) && completed;
+ bit++, completed >>= 1) {
+ if (completed & 0x01) {
+ tag = (group << 5) | bit;
+
+ /* skip internal command slot. */
+ if (unlikely(tag == MTIP_TAG_INTERNAL))
+ continue;
+
+ command = &port->commands[tag];
+ /* make internal callback */
+ if (likely(command->comp_func)) {
+ command->comp_func(
+ port,
+ tag,
+ command->comp_data,
+ 0);
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "Null completion "
+ "for tag %d",
+ tag);
+
+ if (mtip_check_surprise_removal(
+ dd->pdev)) {
+ mtip_command_cleanup(dd);
+ return;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Process legacy pio and d2h interrupts
+ */
+static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
+{
+ struct mtip_port *port = dd->port;
+ struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
+
+ if (test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
+ (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))) {
+ if (cmd->comp_func) {
+ cmd->comp_func(port,
+ MTIP_TAG_INTERNAL,
+ cmd->comp_data,
+ 0);
+ return;
+ }
+ }
+
+ dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
+
+ return;
+}
+
+/*
+ * Demux and handle errors
+ */
+static inline void mtip_process_errors(struct driver_data *dd, u32 port_stat)
+{
+ if (likely(port_stat & (PORT_IRQ_TF_ERR | PORT_IRQ_IF_ERR)))
+ mtip_handle_tfe(dd);
+
+ if (unlikely(port_stat & PORT_IRQ_CONNECT)) {
+ dev_warn(&dd->pdev->dev,
+ "Clearing PxSERR.DIAG.x\n");
+ writel((1 << 26), dd->port->mmio + PORT_SCR_ERR);
+ }
+
+ if (unlikely(port_stat & PORT_IRQ_PHYRDY)) {
+ dev_warn(&dd->pdev->dev,
+ "Clearing PxSERR.DIAG.n\n");
+ writel((1 << 16), dd->port->mmio + PORT_SCR_ERR);
+ }
+
+ if (unlikely(port_stat & ~PORT_IRQ_HANDLED)) {
+ dev_warn(&dd->pdev->dev,
+ "Port stat errors %x unhandled\n",
+ (port_stat & ~PORT_IRQ_HANDLED));
+ }
+}
+
+static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
+{
+ struct driver_data *dd = (struct driver_data *) data;
+ struct mtip_port *port = dd->port;
+ u32 hba_stat, port_stat;
+ int rv = IRQ_NONE;
+
+ hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
+ if (hba_stat) {
+ rv = IRQ_HANDLED;
+
+ /* Acknowledge the interrupt status on the port.*/
+ port_stat = readl(port->mmio + PORT_IRQ_STAT);
+ writel(port_stat, port->mmio + PORT_IRQ_STAT);
+
+ /* Demux port status */
+ if (likely(port_stat & PORT_IRQ_SDB_FIS))
+ mtip_process_sdbf(dd);
+
+ if (unlikely(port_stat & PORT_IRQ_ERR)) {
+ if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ mtip_command_cleanup(dd);
+ /* don't proceed further */
+ return IRQ_HANDLED;
+ }
+
+ mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
+ }
+
+ if (unlikely(port_stat & PORT_IRQ_LEGACY))
+ mtip_process_legacy(dd, port_stat & PORT_IRQ_LEGACY);
+ }
+
+ /* acknowledge interrupt */
+ writel(hba_stat, dd->mmio + HOST_IRQ_STAT);
+
+ return rv;
+}
+
+/*
+ * Wrapper for mtip_handle_irq
+ * (ignores return code)
+ */
+static void mtip_tasklet(unsigned long data)
+{
+ mtip_handle_irq((struct driver_data *) data);
+}
+
+/*
+ * HBA interrupt subroutine.
+ *
+ * @irq IRQ number.
+ * @instance Pointer to the driver data structure.
+ *
+ * return value
+ * IRQ_HANDLED A HBA interrupt was pending and handled.
+ * IRQ_NONE This interrupt was not for the HBA.
+ */
+static irqreturn_t mtip_irq_handler(int irq, void *instance)
+{
+ struct driver_data *dd = instance;
+ tasklet_schedule(&dd->tasklet);
+ return IRQ_HANDLED;
+}
+
+static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
+{
+ atomic_set(&port->commands[tag].active, 1);
+ writel(1 << MTIP_TAG_BIT(tag),
+ port->cmd_issue[MTIP_TAG_INDEX(tag)]);
+}
+
+/*
+ * Wait for port to quiesce
+ *
+ * @port Pointer to port data structure
+ * @timeout Max duration to wait (ms)
+ *
+ * return value
+ * 0 Success
+ * -EBUSY Commands still active
+ */
+static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
+{
+ unsigned long to;
+ unsigned int n;
+ unsigned int active = 1;
+
+ to = jiffies + msecs_to_jiffies(timeout);
+ do {
+ if (test_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags) &&
+ test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ msleep(20);
+ continue; /* svc thd is actively issuing commands */
+ }
+ /*
+ * Ignore s_active bit 0 of array element 0.
+ * This bit will always be set
+ */
+ active = readl(port->s_active[0]) & 0xFFFFFFFE;
+ for (n = 1; n < port->dd->slot_groups; n++)
+ active |= readl(port->s_active[n]);
+
+ if (!active)
+ break;
+
+ msleep(20);
+ } while (time_before(jiffies, to));
+
+ return active ? -EBUSY : 0;
+}
+
+/*
+ * Execute an internal command and wait for the completion.
+ *
+ * @port Pointer to the port data structure.
+ * @fis Pointer to the FIS that describes the command.
+ * @fis_len Length in WORDS of the FIS.
+ * @buffer DMA accessible for command data.
+ * @buf_len Length, in bytes, of the data buffer.
+ * @opts Command header options, excluding the FIS length
+ * and the number of PRD entries.
+ * @timeout Time in ms to wait for the command to complete.
+ *
+ * return value
+ * 0 Command completed successfully.
+ * -EFAULT The buffer address is not correctly aligned.
+ * -EBUSY Internal command or other IO in progress.
+ * -EAGAIN Time out waiting for command to complete.
+ */
+static int mtip_exec_internal_command(struct mtip_port *port,
+ void *fis,
+ int fis_len,
+ dma_addr_t buffer,
+ int buf_len,
+ u32 opts,
+ gfp_t atomic,
+ unsigned long timeout)
+{
+ struct mtip_cmd_sg *command_sg;
+ DECLARE_COMPLETION_ONSTACK(wait);
+ int rv = 0;
+ struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
+
+ /* Make sure the buffer is 8 byte aligned. This is asic specific. */
+ if (buffer & 0x00000007) {
+ dev_err(&port->dd->pdev->dev,
+ "SG buffer is not 8 byte aligned\n");
+ return -EFAULT;
+ }
+
+ /* Only one internal command should be running at a time */
+ if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Internal command already active\n");
+ return -EBUSY;
+ }
+ set_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+
+ if (atomic == GFP_KERNEL) {
+ /* wait for io to complete if non atomic */
+ if (mtip_quiesce_io(port, 5000) < 0) {
+ dev_warn(&port->dd->pdev->dev,
+ "Failed to quiesce IO\n");
+ release_slot(port, MTIP_TAG_INTERNAL);
+ clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ return -EBUSY;
+ }
+
+ /* Set the completion function and data for the command. */
+ int_cmd->comp_data = &wait;
+ int_cmd->comp_func = mtip_completion;
+
+ } else {
+ /* Clear completion - we're going to poll */
+ int_cmd->comp_data = NULL;
+ int_cmd->comp_func = NULL;
+ }
+
+ /* Copy the command to the command table */
+ memcpy(int_cmd->command, fis, fis_len*4);
+
+ /* Populate the SG list */
+ int_cmd->command_header->opts =
+ __force_bit2int cpu_to_le32(opts | fis_len);
+ if (buf_len) {
+ command_sg = int_cmd->command + AHCI_CMD_TBL_HDR_SZ;
+
+ command_sg->info =
+ __force_bit2int cpu_to_le32((buf_len-1) & 0x3FFFFF);
+ command_sg->dba =
+ __force_bit2int cpu_to_le32(buffer & 0xFFFFFFFF);
+ command_sg->dba_upper =
+ __force_bit2int cpu_to_le32((buffer >> 16) >> 16);
+
+ int_cmd->command_header->opts |=
+ __force_bit2int cpu_to_le32((1 << 16));
+ }
+
+ /* Populate the command header */
+ int_cmd->command_header->byte_count = 0;
+
+ /* Issue the command to the hardware */
+ mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+
+ /* Poll if atomic, wait_for_completion otherwise */
+ if (atomic == GFP_KERNEL) {
+ /* Wait for the command to complete or timeout. */
+ if (wait_for_completion_timeout(
+ &wait,
+ msecs_to_jiffies(timeout)) == 0) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d] "
+ "within timeout of %lu ms\n",
+ atomic, timeout);
+ rv = -EAGAIN;
+ }
+
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL)) {
+ dev_warn(&port->dd->pdev->dev,
+ "Retiring internal command but CI is 1.\n");
+ }
+
+ } else {
+ /* Spin for <timeout> checking if command still outstanding */
+ timeout = jiffies + msecs_to_jiffies(timeout);
+
+ while ((readl(
+ port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))
+ && time_before(jiffies, timeout))
+ ;
+
+ if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL)) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d]\n",
+ atomic);
+ rv = -EAGAIN;
+ }
+ }
+
+ /* Clear the allocated and active bits for the internal command. */
+ atomic_set(&int_cmd->active, 0);
+ release_slot(port, MTIP_TAG_INTERNAL);
+ clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+
+ return rv;
+}
+
+/*
+ * Byte-swap ATA ID strings.
+ *
+ * ATA identify data contains strings in byte-swapped 16-bit words.
+ * They must be swapped (on all architectures) to be usable as C strings.
+ * This function swaps bytes in-place.
+ *
+ * @buf The buffer location of the string
+ * @len The number of bytes to swap
+ *
+ * return value
+ * None
+ */
+static inline void ata_swap_string(u16 *buf, unsigned int len)
+{
+ int i;
+ for (i = 0; i < (len/2); i++)
+ be16_to_cpus(&buf[i]);
+}
+
+/*
+ * Request the device identity information.
+ *
+ * If a user space buffer is not specified, i.e. is NULL, the
+ * identify information is still read from the drive and placed
+ * into the identify data buffer (@e port->identify) in the
+ * port data structure.
+ * When the identify buffer contains valid identify information @e
+ * port->identify_valid is non-zero.
+ *
+ * @port Pointer to the port structure.
+ * @user_buffer A user space buffer where the identify data should be
+ * copied.
+ *
+ * return value
+ * 0 Command completed successfully.
+ * -EFAULT An error occurred while coping data to the user buffer.
+ * -1 Command failed.
+ */
+static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
+{
+ int rv = 0;
+ struct host_to_dev_fis fis;
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_ID_ATA;
+
+ /* Set the identify information as invalid. */
+ port->identify_valid = 0;
+
+ /* Clear the identify information. */
+ memset(port->identify, 0, sizeof(u16) * ATA_ID_WORDS);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ port->identify_dma,
+ sizeof(u16) * ATA_ID_WORDS,
+ 0,
+ GFP_KERNEL,
+ MTIP_INTERNAL_COMMAND_TIMEOUT_MS)
+ < 0) {
+ rv = -1;
+ goto out;
+ }
+
+ /*
+ * Perform any necessary byte-swapping. Yes, the kernel does in fact
+ * perform field-sensitive swapping on the string fields.
+ * See the kernel use of ata_id_string() for proof of this.
+ */
+#ifdef __LITTLE_ENDIAN
+ ata_swap_string(port->identify + 27, 40); /* model string*/
+ ata_swap_string(port->identify + 23, 8); /* firmware string*/
+ ata_swap_string(port->identify + 10, 20); /* serial# string*/
+#else
+ {
+ int i;
+ for (i = 0; i < ATA_ID_WORDS; i++)
+ port->identify[i] = le16_to_cpu(port->identify[i]);
+ }
+#endif
+
+ /* Set the identify buffer as valid. */
+ port->identify_valid = 1;
+
+ if (user_buffer) {
+ if (copy_to_user(
+ user_buffer,
+ port->identify,
+ ATA_ID_WORDS * sizeof(u16))) {
+ rv = -EFAULT;
+ goto out;
+ }
+ }
+
+out:
+ return rv;
+}
+
+/*
+ * Issue a standby immediate command to the device.
+ *
+ * @port Pointer to the port structure.
+ *
+ * return value
+ * 0 Command was executed successfully.
+ * -1 An error occurred while executing the command.
+ */
+static int mtip_standby_immediate(struct mtip_port *port)
+{
+ int rv;
+ struct host_to_dev_fis fis;
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_STANDBYNOW1;
+
+ /* Execute the command. Use a 15-second timeout for large drives. */
+ rv = mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ 0,
+ 0,
+ 0,
+ GFP_KERNEL,
+ 15000);
+
+ return rv;
+}
+
+/*
+ * Get the drive capacity.
+ *
+ * @dd Pointer to the device data structure.
+ * @sectors Pointer to the variable that will receive the sector count.
+ *
+ * return value
+ * 1 Capacity was returned successfully.
+ * 0 The identify information is invalid.
+ */
+static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
+{
+ struct mtip_port *port = dd->port;
+ u64 total, raw0, raw1, raw2, raw3;
+ raw0 = port->identify[100];
+ raw1 = port->identify[101];
+ raw2 = port->identify[102];
+ raw3 = port->identify[103];
+ total = raw0 | raw1<<16 | raw2<<32 | raw3<<48;
+ *sectors = total;
+ return (bool) !!port->identify_valid;
+}
+
+/*
+ * Reset the HBA.
+ *
+ * Resets the HBA by setting the HBA Reset bit in the Global
+ * HBA Control register. After setting the HBA Reset bit the
+ * function waits for 1 second before reading the HBA Reset
+ * bit to make sure it has cleared. If HBA Reset is not clear
+ * an error is returned. Cannot be used in non-blockable
+ * context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int mtip_hba_reset(struct driver_data *dd)
+{
+ mtip_deinit_port(dd->port);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /* Wait for reset to clear */
+ ssleep(1);
+
+ /* Check the bit has cleared */
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
+ dev_err(&dd->pdev->dev,
+ "Reset bit did not clear.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
+ * Display the identify command data.
+ *
+ * @port Pointer to the port data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_dump_identify(struct mtip_port *port)
+{
+ sector_t sectors;
+ unsigned short revid;
+ char cbuf[42];
+
+ if (!port->identify_valid)
+ return;
+
+ strlcpy(cbuf, (char *)(port->identify+10), 21);
+ dev_info(&port->dd->pdev->dev,
+ "Serial No.: %s\n", cbuf);
+
+ strlcpy(cbuf, (char *)(port->identify+23), 9);
+ dev_info(&port->dd->pdev->dev,
+ "Firmware Ver.: %s\n", cbuf);
+
+ strlcpy(cbuf, (char *)(port->identify+27), 41);
+ dev_info(&port->dd->pdev->dev, "Model: %s\n", cbuf);
+
+ if (mtip_hw_get_capacity(port->dd, &sectors))
+ dev_info(&port->dd->pdev->dev,
+ "Capacity: %llu sectors (%llu MB)\n",
+ (u64)sectors,
+ ((u64)sectors) * ATA_SECT_SIZE >> 20);
+
+ pci_read_config_word(port->dd->pdev, PCI_REVISION_ID, &revid);
+ switch (revid & 0xFF) {
+ case 0x1:
+ strlcpy(cbuf, "A0", 3);
+ break;
+ case 0x3:
+ strlcpy(cbuf, "A2", 3);
+ break;
+ default:
+ strlcpy(cbuf, "?", 2);
+ break;
+ }
+ dev_info(&port->dd->pdev->dev,
+ "Card Type: %s\n", cbuf);
+}
+
+/*
+ * Map the commands scatter list into the command table.
+ *
+ * @command Pointer to the command.
+ * @nents Number of scatter list entries.
+ *
+ * return value
+ * None
+ */
+static inline void fill_command_sg(struct driver_data *dd,
+ struct mtip_cmd *command,
+ int nents)
+{
+ int n;
+ unsigned int dma_len;
+ struct mtip_cmd_sg *command_sg;
+ struct scatterlist *sg = command->sg;
+
+ command_sg = command->command + AHCI_CMD_TBL_HDR_SZ;
+
+ for (n = 0; n < nents; n++) {
+ dma_len = sg_dma_len(sg);
+ if (dma_len > 0x400000)
+ dev_err(&dd->pdev->dev,
+ "DMA segment length truncated\n");
+ command_sg->info = __force_bit2int
+ cpu_to_le32((dma_len-1) & 0x3FFFFF);
+ command_sg->dba = __force_bit2int
+ cpu_to_le32(sg_dma_address(sg));
+ command_sg->dba_upper = __force_bit2int
+ cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
+ command_sg++;
+ sg++;
+ }
+}
+
+/*
+ * @brief Execute a drive command.
+ *
+ * return value 0 The command completed successfully.
+ * return value -1 An error occurred while executing the command.
+ */
+static int exec_drive_task(struct mtip_port *port, u8 *command)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
+ fis.features = command[1];
+ fis.sect_count = command[2];
+ fis.sector = command[3];
+ fis.cyl_low = command[4];
+ fis.cyl_hi = command[5];
+ fis.device = command[6] & ~0x10; /* Clear the dev bit*/
+
+
+ dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
+ "nsect %x, sect %x, lcyl %x, "
+ "hcyl %x, sel %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2],
+ command[3],
+ command[4],
+ command[5],
+ command[6]);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ 0,
+ 0,
+ 0,
+ GFP_KERNEL,
+ MTIP_IOCTL_COMMAND_TIMEOUT_MS) < 0) {
+ return -1;
+ }
+
+ command[0] = reply->command; /* Status*/
+ command[1] = reply->features; /* Error*/
+ command[4] = reply->cyl_low;
+ command[5] = reply->cyl_hi;
+
+ dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
+ "err %x , cyl_lo %x cyl_hi %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[4],
+ command[5]);
+
+ return 0;
+}
+
+/*
+ * @brief Execute a drive command.
+ *
+ * @param port Pointer to the port data structure.
+ * @param command Pointer to the user specified command parameters.
+ * @param user_buffer Pointer to the user space buffer where read sector
+ * data should be copied.
+ *
+ * return value 0 The command completed successfully.
+ * return value -EFAULT An error occurred while copying the completion
+ * data to the user space buffer.
+ * return value -1 An error occurred while executing the command.
+ */
+static int exec_drive_command(struct mtip_port *port, u8 *command,
+ void __user *user_buffer)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG);
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = command[0];
+ fis.features = command[2];
+ fis.sect_count = command[3];
+ if (fis.command == ATA_CMD_SMART) {
+ fis.sector = command[1];
+ fis.cyl_low = 0x4F;
+ fis.cyl_hi = 0xC2;
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: User Command: cmd %x, sect %x, "
+ "feat %x, sectcnt %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2],
+ command[3]);
+
+ memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
+
+ /* Execute the command. */
+ if (mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ port->sector_buffer_dma,
+ (command[3] != 0) ? ATA_SECT_SIZE : 0,
+ 0,
+ GFP_KERNEL,
+ MTIP_IOCTL_COMMAND_TIMEOUT_MS)
+ < 0) {
+ return -1;
+ }
+
+ /* Collect the completion status. */
+ command[0] = reply->command; /* Status*/
+ command[1] = reply->features; /* Error*/
+ command[2] = command[3];
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: Completion Status: stat %x, "
+ "err %x, cmd %x\n",
+ __func__,
+ command[0],
+ command[1],
+ command[2]);
+
+ if (user_buffer && command[3]) {
+ if (copy_to_user(user_buffer,
+ port->sector_buffer,
+ ATA_SECT_SIZE * command[3])) {
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Indicates whether a command has a single sector payload.
+ *
+ * @command passed to the device to perform the certain event.
+ * @features passed to the device to perform the certain event.
+ *
+ * return value
+ * 1 command is one that always has a single sector payload,
+ * regardless of the value in the Sector Count field.
+ * 0 otherwise
+ *
+ */
+static unsigned int implicit_sector(unsigned char command,
+ unsigned char features)
+{
+ unsigned int rv = 0;
+
+ /* list of commands that have an implicit sector count of 1 */
+ switch (command) {
+ case ATA_CMD_SEC_SET_PASS:
+ case ATA_CMD_SEC_UNLOCK:
+ case ATA_CMD_SEC_ERASE_PREP:
+ case ATA_CMD_SEC_ERASE_UNIT:
+ case ATA_CMD_SEC_FREEZE_LOCK:
+ case ATA_CMD_SEC_DISABLE_PASS:
+ case ATA_CMD_PMP_READ:
+ case ATA_CMD_PMP_WRITE:
+ rv = 1;
+ break;
+ case ATA_CMD_SET_MAX:
+ if (features == ATA_SET_MAX_UNLOCK)
+ rv = 1;
+ break;
+ case ATA_CMD_SMART:
+ if ((features == ATA_SMART_READ_VALUES) ||
+ (features == ATA_SMART_READ_THRESHOLDS))
+ rv = 1;
+ break;
+ case ATA_CMD_CONF_OVERLAY:
+ if ((features == ATA_DCO_IDENTIFY) ||
+ (features == ATA_DCO_SET))
+ rv = 1;
+ break;
+ }
+ return rv;
+}
+
+/*
+ * Executes a taskfile
+ * See ide_taskfile_ioctl() for derivation
+ */
+static int exec_drive_taskfile(struct driver_data *dd,
+ void __user *buf,
+ ide_task_request_t *req_task,
+ int outtotal)
+{
+ struct host_to_dev_fis fis;
+ struct host_to_dev_fis *reply;
+ u8 *outbuf = NULL;
+ u8 *inbuf = NULL;
+ dma_addr_t outbuf_dma = 0;
+ dma_addr_t inbuf_dma = 0;
+ dma_addr_t dma_buffer = 0;
+ int err = 0;
+ unsigned int taskin = 0;
+ unsigned int taskout = 0;
+ u8 nsect = 0;
+ unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ unsigned int force_single_sector;
+ unsigned int transfer_size;
+ unsigned long task_file_data;
+ int intotal = outtotal + req_task->out_size;
+
+ taskout = req_task->out_size;
+ taskin = req_task->in_size;
+ /* 130560 = 512 * 0xFF*/
+ if (taskin > 130560 || taskout > 130560) {
+ err = -EINVAL;
+ goto abort;
+ }
+
+ if (taskout) {
+ outbuf = kzalloc(taskout, GFP_KERNEL);
+ if (outbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ if (copy_from_user(outbuf, buf + outtotal, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ outbuf_dma = pci_map_single(dd->pdev,
+ outbuf,
+ taskout,
+ DMA_TO_DEVICE);
+ if (outbuf_dma == 0) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ dma_buffer = outbuf_dma;
+ }
+
+ if (taskin) {
+ inbuf = kzalloc(taskin, GFP_KERNEL);
+ if (inbuf == NULL) {
+ err = -ENOMEM;
+ goto abort;
+ }
+
+ if (copy_from_user(inbuf, buf + intotal, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ inbuf_dma = pci_map_single(dd->pdev,
+ inbuf,
+ taskin, DMA_FROM_DEVICE);
+ if (inbuf_dma == 0) {
+ err = -ENOMEM;
+ goto abort;
+ }
+ dma_buffer = inbuf_dma;
+ }
+
+ /* only supports PIO and non-data commands from this ioctl. */
+ switch (req_task->data_phase) {
+ case TASKFILE_OUT:
+ nsect = taskout / ATA_SECT_SIZE;
+ reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
+ break;
+ case TASKFILE_IN:
+ reply = (dd->port->rxfis + RX_FIS_PIO_SETUP);
+ break;
+ case TASKFILE_NO_DATA:
+ reply = (dd->port->rxfis + RX_FIS_D2H_REG);
+ break;
+ default:
+ err = -EINVAL;
+ goto abort;
+ }
+
+ /* Build the FIS. */
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = req_task->io_ports[7];
+ fis.features = req_task->io_ports[1];
+ fis.sect_count = req_task->io_ports[2];
+ fis.lba_low = req_task->io_ports[3];
+ fis.lba_mid = req_task->io_ports[4];
+ fis.lba_hi = req_task->io_ports[5];
+ /* Clear the dev bit*/
+ fis.device = req_task->io_ports[6] & ~0x10;
+
+ if ((req_task->in_flags.all == 0) && (req_task->out_flags.all & 1)) {
+ req_task->in_flags.all =
+ IDE_TASKFILE_STD_IN_FLAGS |
+ (IDE_HOB_STD_IN_FLAGS << 8);
+ fis.lba_low_ex = req_task->hob_ports[3];
+ fis.lba_mid_ex = req_task->hob_ports[4];
+ fis.lba_hi_ex = req_task->hob_ports[5];
+ fis.features_ex = req_task->hob_ports[1];
+ fis.sect_cnt_ex = req_task->hob_ports[2];
+
+ } else {
+ req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
+ }
+
+ force_single_sector = implicit_sector(fis.command, fis.features);
+
+ if ((taskin || taskout) && (!fis.sect_count)) {
+ if (nsect)
+ fis.sect_count = nsect;
+ else {
+ if (!force_single_sector) {
+ dev_warn(&dd->pdev->dev,
+ "data movement but "
+ "sect_count is 0\n");
+ err = -EINVAL;
+ goto abort;
+ }
+ }
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "taskfile: cmd %x, feat %x, nsect %x,"
+ " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
+ " head/dev %x\n",
+ fis.command,
+ fis.features,
+ fis.sect_count,
+ fis.lba_low,
+ fis.lba_mid,
+ fis.lba_hi,
+ fis.device);
+
+ switch (fis.command) {
+ case ATA_CMD_DOWNLOAD_MICRO:
+ /* Change timeout for Download Microcode to 60 seconds.*/
+ timeout = 60000;
+ break;
+ case ATA_CMD_SEC_ERASE_UNIT:
+ /* Change timeout for Security Erase Unit to 4 minutes.*/
+ timeout = 240000;
+ break;
+ case ATA_CMD_STANDBYNOW1:
+ /* Change timeout for standby immediate to 10 seconds.*/
+ timeout = 10000;
+ break;
+ case 0xF7:
+ case 0xFA:
+ /* Change timeout for vendor unique command to 10 secs */
+ timeout = 10000;
+ break;
+ case ATA_CMD_SMART:
+ /* Change timeout for vendor unique command to 10 secs */
+ timeout = 10000;
+ break;
+ default:
+ timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
+ break;
+ }
+
+ /* Determine the correct transfer size.*/
+ if (force_single_sector)
+ transfer_size = ATA_SECT_SIZE;
+ else
+ transfer_size = ATA_SECT_SIZE * fis.sect_count;
+
+ /* Execute the command.*/
+ if (mtip_exec_internal_command(dd->port,
+ &fis,
+ 5,
+ dma_buffer,
+ transfer_size,
+ 0,
+ GFP_KERNEL,
+ timeout) < 0) {
+ err = -EIO;
+ goto abort;
+ }
+
+ task_file_data = readl(dd->port->mmio+PORT_TFDATA);
+
+ if ((req_task->data_phase == TASKFILE_IN) && !(task_file_data & 1)) {
+ reply = dd->port->rxfis + RX_FIS_PIO_SETUP;
+ req_task->io_ports[7] = reply->control;
+ } else {
+ reply = dd->port->rxfis + RX_FIS_D2H_REG;
+ req_task->io_ports[7] = reply->command;
+ }
+
+ /* reclaim the DMA buffers.*/
+ if (inbuf_dma)
+ pci_unmap_single(dd->pdev, inbuf_dma,
+ taskin, DMA_FROM_DEVICE);
+ if (outbuf_dma)
+ pci_unmap_single(dd->pdev, outbuf_dma,
+ taskout, DMA_TO_DEVICE);
+ inbuf_dma = 0;
+ outbuf_dma = 0;
+
+ /* return the ATA registers to the caller.*/
+ req_task->io_ports[1] = reply->features;
+ req_task->io_ports[2] = reply->sect_count;
+ req_task->io_ports[3] = reply->lba_low;
+ req_task->io_ports[4] = reply->lba_mid;
+ req_task->io_ports[5] = reply->lba_hi;
+ req_task->io_ports[6] = reply->device;
+
+ if (req_task->out_flags.all & 1) {
+
+ req_task->hob_ports[3] = reply->lba_low_ex;
+ req_task->hob_ports[4] = reply->lba_mid_ex;
+ req_task->hob_ports[5] = reply->lba_hi_ex;
+ req_task->hob_ports[1] = reply->features_ex;
+ req_task->hob_ports[2] = reply->sect_cnt_ex;
+ }
+
+ /* Com rest after secure erase or lowlevel format */
+ if (((fis.command == ATA_CMD_SEC_ERASE_UNIT) ||
+ ((fis.command == 0xFC) &&
+ (fis.features == 0x27 || fis.features == 0x72 ||
+ fis.features == 0x62 || fis.features == 0x26))) &&
+ !(reply->command & 1)) {
+ mtip_restart_port(dd->port);
+ }
+
+ dbg_printk(MTIP_DRV_NAME
+ "%s: Completion: stat %x,"
+ "err %x, sect_cnt %x, lbalo %x,"
+ "lbamid %x, lbahi %x, dev %x\n",
+ __func__,
+ req_task->io_ports[7],
+ req_task->io_ports[1],
+ req_task->io_ports[2],
+ req_task->io_ports[3],
+ req_task->io_ports[4],
+ req_task->io_ports[5],
+ req_task->io_ports[6]);
+
+ if (taskout) {
+ if (copy_to_user(buf + outtotal, outbuf, taskout)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+ if (taskin) {
+ if (copy_to_user(buf + intotal, inbuf, taskin)) {
+ err = -EFAULT;
+ goto abort;
+ }
+ }
+abort:
+ if (inbuf_dma)
+ pci_unmap_single(dd->pdev, inbuf_dma,
+ taskin, DMA_FROM_DEVICE);
+ if (outbuf_dma)
+ pci_unmap_single(dd->pdev, outbuf_dma,
+ taskout, DMA_TO_DEVICE);
+ kfree(outbuf);
+ kfree(inbuf);
+
+ return err;
+}
+
+/*
+ * Handle IOCTL calls from the Block Layer.
+ *
+ * This function is called by the Block Layer when it receives an IOCTL
+ * command that it does not understand. If the IOCTL command is not supported
+ * this function returns -ENOTTY.
+ *
+ * @dd Pointer to the driver data structure.
+ * @cmd IOCTL command passed from the Block Layer.
+ * @arg IOCTL argument passed from the Block Layer.
+ *
+ * return value
+ * 0 The IOCTL completed successfully.
+ * -ENOTTY The specified command is not supported.
+ * -EFAULT An error occurred copying data to a user space buffer.
+ * -EIO An error occurred while executing the command.
+ */
+static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case HDIO_GET_IDENTITY:
+ if (mtip_get_identify(dd->port, (void __user *) arg) < 0) {
+ dev_warn(&dd->pdev->dev,
+ "Unable to read identity\n");
+ return -EIO;
+ }
+
+ break;
+ case HDIO_DRIVE_CMD:
+ {
+ u8 drive_command[4];
+
+ /* Copy the user command info to our buffer. */
+ if (copy_from_user(drive_command,
+ (void __user *) arg,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ /* Execute the drive command. */
+ if (exec_drive_command(dd->port,
+ drive_command,
+ (void __user *) (arg+4)))
+ return -EIO;
+
+ /* Copy the status back to the users buffer. */
+ if (copy_to_user((void __user *) arg,
+ drive_command,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ break;
+ }
+ case HDIO_DRIVE_TASK:
+ {
+ u8 drive_command[7];
+
+ /* Copy the user command info to our buffer. */
+ if (copy_from_user(drive_command,
+ (void __user *) arg,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ /* Execute the drive command. */
+ if (exec_drive_task(dd->port, drive_command))
+ return -EIO;
+
+ /* Copy the status back to the users buffer. */
+ if (copy_to_user((void __user *) arg,
+ drive_command,
+ sizeof(drive_command)))
+ return -EFAULT;
+
+ break;
+ }
+ case HDIO_DRIVE_TASKFILE: {
+ ide_task_request_t req_task;
+ int ret, outtotal;
+
+ if (copy_from_user(&req_task, (void __user *) arg,
+ sizeof(req_task)))
+ return -EFAULT;
+
+ outtotal = sizeof(req_task);
+
+ ret = exec_drive_taskfile(dd, (void __user *) arg,
+ &req_task, outtotal);
+
+ if (copy_to_user((void __user *) arg, &req_task,
+ sizeof(req_task)))
+ return -EFAULT;
+
+ return ret;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Submit an IO to the hw
+ *
+ * This function is called by the block layer to issue an io
+ * to the device. Upon completion, the callback function will
+ * be called with the data parameter passed as the callback data.
+ *
+ * @dd Pointer to the driver data structure.
+ * @start First sector to read.
+ * @nsect Number of sectors to read.
+ * @nents Number of entries in scatter list for the read command.
+ * @tag The tag of this read command.
+ * @callback Pointer to the function that should be called
+ * when the read completes.
+ * @data Callback data passed to the callback function
+ * when the read completes.
+ * @dir Direction (read or write)
+ *
+ * return value
+ * None
+ */
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+ int nsect, int nents, int tag, void *callback,
+ void *data, int dir)
+{
+ struct host_to_dev_fis *fis;
+ struct mtip_port *port = dd->port;
+ struct mtip_cmd *command = &port->commands[tag];
+
+ /* Map the scatter list for DMA access */
+ if (dir == READ)
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ nents, DMA_FROM_DEVICE);
+ else
+ nents = dma_map_sg(&dd->pdev->dev, command->sg,
+ nents, DMA_TO_DEVICE);
+
+ command->scatter_ents = nents;
+
+ /*
+ * The number of retries for this command before it is
+ * reported as a failure to the upper layers.
+ */
+ command->retries = MTIP_MAX_RETRIES;
+
+ /* Fill out fis */
+ fis = command->command;
+ fis->type = 0x27;
+ fis->opts = 1 << 7;
+ fis->command =
+ (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
+ *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
+ *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+ fis->device = 1 << 6;
+ fis->features = nsect & 0xFF;
+ fis->features_ex = (nsect >> 8) & 0xFF;
+ fis->sect_count = ((tag << 3) | (tag >> 5));
+ fis->sect_cnt_ex = 0;
+ fis->control = 0;
+ fis->res2 = 0;
+ fis->res3 = 0;
+ fill_command_sg(dd, command, nents);
+
+ /* Populate the command header */
+ command->command_header->opts =
+ __force_bit2int cpu_to_le32(
+ (nents << 16) | 5 | AHCI_CMD_PREFETCH);
+ command->command_header->byte_count = 0;
+
+ /*
+ * Set the completion function and data for the command
+ * within this layer.
+ */
+ command->comp_data = dd;
+ command->comp_func = mtip_async_complete;
+ command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+
+ /*
+ * Set the completion function and data for the command passed
+ * from the upper layer.
+ */
+ command->async_data = data;
+ command->async_callback = callback;
+
+ /*
+ * To prevent this command from being issued
+ * if an internal command is in progress or error handling is active.
+ */
+ if (unlikely(test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) ||
+ test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags))) {
+ set_bit(tag, port->cmds_to_issue);
+ set_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
+ return;
+ }
+
+ /* Issue the command to the hardware */
+ mtip_issue_ncq_command(port, tag);
+
+ /* Set the command's timeout value.*/
+ port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
+}
+
+/*
+ * Release a command slot.
+ *
+ * @dd Pointer to the driver data structure.
+ * @tag Slot tag
+ *
+ * return value
+ * None
+ */
+static void mtip_hw_release_scatterlist(struct driver_data *dd, int tag)
+{
+ release_slot(dd->port, tag);
+}
+
+/*
+ * Obtain a command slot and return its associated scatter list.
+ *
+ * @dd Pointer to the driver data structure.
+ * @tag Pointer to an int that will receive the allocated command
+ * slot tag.
+ *
+ * return value
+ * Pointer to the scatter list for the allocated command slot
+ * or NULL if no command slots are available.
+ */
+static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
+ int *tag)
+{
+ /*
+ * It is possible that, even with this semaphore, a thread
+ * may think that no command slots are available. Therefore, we
+ * need to make an attempt to get_slot().
+ */
+ down(&dd->port->cmd_slot);
+ *tag = get_slot(dd->port);
+
+ if (unlikely(*tag < 0))
+ return NULL;
+
+ return dd->port->commands[*tag].sg;
+}
+
+/*
+ * Sysfs register/status dump.
+ *
+ * @dev Pointer to the device structure, passed by the kernrel.
+ * @attr Pointer to the device_attribute structure passed by the kernel.
+ * @buf Pointer to the char buffer that will receive the stats info.
+ *
+ * return value
+ * The size, in bytes, of the data copied into buf.
+ */
+static ssize_t hw_show_registers(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u32 group_allocated;
+ struct driver_data *dd = dev_to_disk(dev)->private_data;
+ int size = 0;
+ int n;
+
+ size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->s_active[n]));
+
+ size += sprintf(&buf[size], "Command Issue:\n");
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->cmd_issue[n]));
+
+ size += sprintf(&buf[size], "Allocated:\n");
+
+ for (n = 0; n < dd->slot_groups; n++) {
+ if (sizeof(long) > sizeof(u32))
+ group_allocated =
+ dd->port->allocated[n/2] >> (32*(n&1));
+ else
+ group_allocated = dd->port->allocated[n];
+ size += sprintf(&buf[size], "0x%08x\n",
+ group_allocated);
+ }
+
+ size += sprintf(&buf[size], "completed:\n");
+
+ for (n = 0; n < dd->slot_groups; n++)
+ size += sprintf(&buf[size], "0x%08x\n",
+ readl(dd->port->completed[n]));
+
+ size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
+ readl(dd->port->mmio + PORT_IRQ_STAT));
+ size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
+ readl(dd->mmio + HOST_IRQ_STAT));
+
+ return size;
+}
+static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
+
+/*
+ * Create the sysfs related attributes.
+ *
+ * @dd Pointer to the driver data structure.
+ * @kobj Pointer to the kobj for the block device.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -EINVAL Invalid parameter.
+ */
+static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
+{
+ if (!kobj || !dd)
+ return -EINVAL;
+
+ if (sysfs_create_file(kobj, &dev_attr_registers.attr))
+ dev_warn(&dd->pdev->dev,
+ "Error creating registers sysfs entry\n");
+ return 0;
+}
+
+/*
+ * Remove the sysfs related attributes.
+ *
+ * @dd Pointer to the driver data structure.
+ * @kobj Pointer to the kobj for the block device.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -EINVAL Invalid parameter.
+ */
+static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
+{
+ if (!kobj || !dd)
+ return -EINVAL;
+
+ sysfs_remove_file(kobj, &dev_attr_registers.attr);
+
+ return 0;
+}
+
+/*
+ * Perform any init/resume time hardware setup
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * None
+ */
+static inline void hba_setup(struct driver_data *dd)
+{
+ u32 hwdata;
+ hwdata = readl(dd->mmio + HOST_HSORG);
+
+ /* interrupt bug workaround: use only 1 IS bit.*/
+ writel(hwdata |
+ HSORG_DISABLE_SLOTGRP_INTR |
+ HSORG_DISABLE_SLOTGRP_PXIS,
+ dd->mmio + HOST_HSORG);
+}
+
+/*
+ * Detect the details of the product, and store anything needed
+ * into the driver data structure. This includes product type and
+ * version and number of slot groups.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * None
+ */
+static void mtip_detect_product(struct driver_data *dd)
+{
+ u32 hwdata;
+ unsigned int rev, slotgroups;
+
+ /*
+ * HBA base + 0xFC [15:0] - vendor-specific hardware interface
+ * info register:
+ * [15:8] hardware/software interface rev#
+ * [ 3] asic-style interface
+ * [ 2:0] number of slot groups, minus 1 (only valid for asic-style).
+ */
+ hwdata = readl(dd->mmio + HOST_HSORG);
+
+ dd->product_type = MTIP_PRODUCT_UNKNOWN;
+ dd->slot_groups = 1;
+
+ if (hwdata & 0x8) {
+ dd->product_type = MTIP_PRODUCT_ASICFPGA;
+ rev = (hwdata & HSORG_HWREV) >> 8;
+ slotgroups = (hwdata & HSORG_SLOTGROUPS) + 1;
+ dev_info(&dd->pdev->dev,
+ "ASIC-FPGA design, HS rev 0x%x, "
+ "%i slot groups [%i slots]\n",
+ rev,
+ slotgroups,
+ slotgroups * 32);
+
+ if (slotgroups > MTIP_MAX_SLOT_GROUPS) {
+ dev_warn(&dd->pdev->dev,
+ "Warning: driver only supports "
+ "%i slot groups.\n", MTIP_MAX_SLOT_GROUPS);
+ slotgroups = MTIP_MAX_SLOT_GROUPS;
+ }
+ dd->slot_groups = slotgroups;
+ return;
+ }
+
+ dev_warn(&dd->pdev->dev, "Unrecognized product id\n");
+}
+
+/*
+ * Blocking wait for FTL rebuild to complete
+ *
+ * @dd Pointer to the DRIVER_DATA structure.
+ *
+ * return value
+ * 0 FTL rebuild completed successfully
+ * -EFAULT FTL rebuild error/timeout/interruption
+ */
+static int mtip_ftl_rebuild_poll(struct driver_data *dd)
+{
+ unsigned long timeout, cnt = 0, start;
+
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild in progress. Polling for completion.\n");
+
+ start = jiffies;
+ dd->ftlrebuildflag = 1;
+ timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
+
+ do {
+ if (mtip_check_surprise_removal(dd->pdev))
+ return -EFAULT;
+
+ if (mtip_get_identify(dd->port, NULL) < 0)
+ return -EFAULT;
+
+ if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
+ MTIP_FTL_REBUILD_MAGIC) {
+ ssleep(1);
+ /* Print message every 3 minutes */
+ if (cnt++ >= 180) {
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild in progress (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ cnt = 0;
+ }
+ } else {
+ dev_warn(&dd->pdev->dev,
+ "FTL rebuild complete (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ dd->ftlrebuildflag = 0;
+ mtip_block_initialize(dd);
+ break;
+ }
+ ssleep(10);
+ } while (time_before(jiffies, timeout));
+
+ /* Check for timeout */
+ if (dd->ftlrebuildflag) {
+ dev_err(&dd->pdev->dev,
+ "Timed out waiting for FTL rebuild to complete (%d secs).\n",
+ jiffies_to_msecs(jiffies - start) / 1000);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+/*
+ * service thread to issue queued commands
+ *
+ * @data Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+
+static int mtip_service_thread(void *data)
+{
+ struct driver_data *dd = (struct driver_data *)data;
+ unsigned long slot, slot_start, slot_wrap;
+ unsigned int num_cmd_slots = dd->slot_groups * 32;
+ struct mtip_port *port = dd->port;
+
+ while (1) {
+ /*
+ * the condition is to check neither an internal command is
+ * is in progress nor error handling is active
+ */
+ wait_event_interruptible(port->svc_wait, (port->flags) &&
+ !test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
+ !test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags));
+
+ if (kthread_should_stop())
+ break;
+
+ set_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
+ if (test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ slot = 1;
+ /* used to restrict the loop to one iteration */
+ slot_start = num_cmd_slots;
+ slot_wrap = 0;
+ while (1) {
+ slot = find_next_bit(port->cmds_to_issue,
+ num_cmd_slots, slot);
+ if (slot_wrap == 1) {
+ if ((slot_start >= slot) ||
+ (slot >= num_cmd_slots))
+ break;
+ }
+ if (unlikely(slot_start == num_cmd_slots))
+ slot_start = slot;
+
+ if (unlikely(slot == num_cmd_slots)) {
+ slot = 1;
+ slot_wrap = 1;
+ continue;
+ }
+
+ /* Issue the command to the hardware */
+ mtip_issue_ncq_command(port, slot);
+
+ /* Set the command's timeout value.*/
+ port->commands[slot].comp_time = jiffies +
+ msecs_to_jiffies(MTIP_NCQ_COMMAND_TIMEOUT_MS);
+
+ clear_bit(slot, port->cmds_to_issue);
+ }
+
+ clear_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
+ } else if (test_bit(MTIP_FLAG_REBUILD_BIT, &port->flags)) {
+ mtip_ftl_rebuild_poll(dd);
+ clear_bit(MTIP_FLAG_REBUILD_BIT, &port->flags);
+ }
+ clear_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
+
+ if (test_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &port->flags))
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Called once for each card.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 on success, else an error code.
+ */
+static int mtip_hw_init(struct driver_data *dd)
+{
+ int i;
+ int rv;
+ unsigned int num_command_slots;
+
+ dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
+
+ mtip_detect_product(dd);
+ if (dd->product_type == MTIP_PRODUCT_UNKNOWN) {
+ rv = -EIO;
+ goto out1;
+ }
+ num_command_slots = dd->slot_groups * 32;
+
+ hba_setup(dd);
+
+ tasklet_init(&dd->tasklet, mtip_tasklet, (unsigned long)dd);
+
+ dd->port = kzalloc(sizeof(struct mtip_port), GFP_KERNEL);
+ if (!dd->port) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: port structure\n");
+ return -ENOMEM;
+ }
+
+ /* Counting semaphore to track command slot usage */
+ sema_init(&dd->port->cmd_slot, num_command_slots - 1);
+
+ /* Spinlock to prevent concurrent issue */
+ spin_lock_init(&dd->port->cmd_issue_lock);
+
+ /* Set the port mmio base address. */
+ dd->port->mmio = dd->mmio + PORT_OFFSET;
+ dd->port->dd = dd;
+
+ /* Allocate memory for the command list. */
+ dd->port->command_list =
+ dmam_alloc_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ &dd->port->command_list_dma,
+ GFP_KERNEL);
+ if (!dd->port->command_list) {
+ dev_err(&dd->pdev->dev,
+ "Memory allocation: command list\n");
+ rv = -ENOMEM;
+ goto out1;
+ }
+
+ /* Clear the memory we have allocated. */
+ memset(dd->port->command_list,
+ 0,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
+
+ /* Setup the addresse of the RX FIS. */
+ dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
+ dd->port->rxfis_dma = dd->port->command_list_dma + HW_CMD_SLOT_SZ;
+
+ /* Setup the address of the command tables. */
+ dd->port->command_table = dd->port->rxfis + AHCI_RX_FIS_SZ;
+ dd->port->command_tbl_dma = dd->port->rxfis_dma + AHCI_RX_FIS_SZ;
+
+ /* Setup the address of the identify data. */
+ dd->port->identify = dd->port->command_table +
+ HW_CMD_TBL_AR_SZ;
+ dd->port->identify_dma = dd->port->command_tbl_dma +
+ HW_CMD_TBL_AR_SZ;
+
+ /* Setup the address of the sector buffer. */
+ dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
+ dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
+
+ /* Point the command headers at the command tables. */
+ for (i = 0; i < num_command_slots; i++) {
+ dd->port->commands[i].command_header =
+ dd->port->command_list +
+ (sizeof(struct mtip_cmd_hdr) * i);
+ dd->port->commands[i].command_header_dma =
+ dd->port->command_list_dma +
+ (sizeof(struct mtip_cmd_hdr) * i);
+
+ dd->port->commands[i].command =
+ dd->port->command_table + (HW_CMD_TBL_SZ * i);
+ dd->port->commands[i].command_dma =
+ dd->port->command_tbl_dma + (HW_CMD_TBL_SZ * i);
+
+ if (readl(dd->mmio + HOST_CAP) & HOST_CAP_64)
+ dd->port->commands[i].command_header->ctbau =
+ __force_bit2int cpu_to_le32(
+ (dd->port->commands[i].command_dma >> 16) >> 16);
+ dd->port->commands[i].command_header->ctba =
+ __force_bit2int cpu_to_le32(
+ dd->port->commands[i].command_dma & 0xFFFFFFFF);
+
+ /*
+ * If this is not done, a bug is reported by the stock
+ * FC11 i386. Due to the fact that it has lots of kernel
+ * debugging enabled.
+ */
+ sg_init_table(dd->port->commands[i].sg, MTIP_MAX_SG);
+
+ /* Mark all commands as currently inactive.*/
+ atomic_set(&dd->port->commands[i].active, 0);
+ }
+
+ /* Setup the pointers to the extended s_active and CI registers. */
+ for (i = 0; i < dd->slot_groups; i++) {
+ dd->port->s_active[i] =
+ dd->port->mmio + i*0x80 + PORT_SCR_ACT;
+ dd->port->cmd_issue[i] =
+ dd->port->mmio + i*0x80 + PORT_COMMAND_ISSUE;
+ dd->port->completed[i] =
+ dd->port->mmio + i*0x80 + PORT_SDBV;
+ }
+
+ /* Reset the HBA. */
+ if (mtip_hba_reset(dd) < 0) {
+ dev_err(&dd->pdev->dev,
+ "Card did not reset within timeout\n");
+ rv = -EIO;
+ goto out2;
+ }
+
+ mtip_init_port(dd->port);
+ mtip_start_port(dd->port);
+
+ /* Setup the ISR and enable interrupts. */
+ rv = devm_request_irq(&dd->pdev->dev,
+ dd->pdev->irq,
+ mtip_irq_handler,
+ IRQF_SHARED,
+ dev_driver_string(&dd->pdev->dev),
+ dd);
+
+ if (rv) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate IRQ %d\n", dd->pdev->irq);
+ goto out2;
+ }
+
+ /* Enable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ init_timer(&dd->port->cmd_timer);
+ init_waitqueue_head(&dd->port->svc_wait);
+
+ dd->port->cmd_timer.data = (unsigned long int) dd->port;
+ dd->port->cmd_timer.function = mtip_timeout_function;
+ mod_timer(&dd->port->cmd_timer,
+ jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+
+ if (mtip_get_identify(dd->port, NULL) < 0) {
+ rv = -EFAULT;
+ goto out3;
+ }
+
+ if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
+ MTIP_FTL_REBUILD_MAGIC) {
+ set_bit(MTIP_FLAG_REBUILD_BIT, &dd->port->flags);
+ return MTIP_FTL_REBUILD_MAGIC;
+ }
+ mtip_dump_identify(dd->port);
+ return rv;
+
+out3:
+ del_timer_sync(&dd->port->cmd_timer);
+
+ /* Disable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ /*Release the IRQ. */
+ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
+
+out2:
+ mtip_deinit_port(dd->port);
+
+ /* Free the command/command header memory. */
+ dmam_free_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ dd->port->command_list,
+ dd->port->command_list_dma);
+out1:
+ /* Free the memory allocated for the for structure. */
+ kfree(dd->port);
+
+ return rv;
+}
+
+/*
+ * Called to deinitialize an interface.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_hw_exit(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive so that it
+ * saves its state.
+ */
+ if (atomic_read(&dd->drv_cleanup_done) != true) {
+
+ mtip_standby_immediate(dd->port);
+
+ /* de-initialize the port. */
+ mtip_deinit_port(dd->port);
+
+ /* Disable interrupts on the HBA. */
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+ }
+
+ del_timer_sync(&dd->port->cmd_timer);
+
+ /* Release the IRQ. */
+ devm_free_irq(&dd->pdev->dev, dd->pdev->irq, dd);
+
+ /* Stop the bottom half tasklet. */
+ tasklet_kill(&dd->tasklet);
+
+ /* Free the command/command header memory. */
+ dmam_free_coherent(&dd->pdev->dev,
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ dd->port->command_list,
+ dd->port->command_list_dma);
+ /* Free the memory allocated for the for structure. */
+ kfree(dd->port);
+
+ return 0;
+}
+
+/*
+ * Issue a Standby Immediate command to the device.
+ *
+ * This function is called by the Block Layer just before the
+ * system powers off during a shutdown.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_hw_shutdown(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive so that it
+ * saves its state.
+ */
+ mtip_standby_immediate(dd->port);
+
+ return 0;
+}
+
+/*
+ * Suspend function
+ *
+ * This function is called by the Block Layer just before the
+ * system hibernates.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 Suspend was successful
+ * -EFAULT Suspend was not successful
+ */
+static int mtip_hw_suspend(struct driver_data *dd)
+{
+ /*
+ * Send standby immediate (E0h) to the drive
+ * so that it saves its state.
+ */
+ if (mtip_standby_immediate(dd->port) != 0) {
+ dev_err(&dd->pdev->dev,
+ "Failed standby-immediate command\n");
+ return -EFAULT;
+ }
+
+ /* Disable interrupts on the HBA.*/
+ writel(readl(dd->mmio + HOST_CTL) & ~HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+ mtip_deinit_port(dd->port);
+
+ return 0;
+}
+
+/*
+ * Resume function
+ *
+ * This function is called by the Block Layer as the
+ * system resumes.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 Resume was successful
+ * -EFAULT Resume was not successful
+ */
+static int mtip_hw_resume(struct driver_data *dd)
+{
+ /* Perform any needed hardware setup steps */
+ hba_setup(dd);
+
+ /* Reset the HBA */
+ if (mtip_hba_reset(dd) != 0) {
+ dev_err(&dd->pdev->dev,
+ "Unable to reset the HBA\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Enable the port, DMA engine, and FIS reception specific
+ * h/w in controller.
+ */
+ mtip_init_port(dd->port);
+ mtip_start_port(dd->port);
+
+ /* Enable interrupts on the HBA.*/
+ writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
+ dd->mmio + HOST_CTL);
+
+ return 0;
+}
+
+/*
+ * Helper function for reusing disk name
+ * upon hot insertion.
+ */
+static int rssd_disk_name_format(char *prefix,
+ int index,
+ char *buf,
+ int buflen)
+{
+ const int base = 'z' - 'a' + 1;
+ char *begin = buf + strlen(prefix);
+ char *end = buf + buflen;
+ char *p;
+ int unit;
+
+ p = end - 1;
+ *p = '\0';
+ unit = base;
+ do {
+ if (p == begin)
+ return -EINVAL;
+ *--p = 'a' + (index % unit);
+ index = (index / unit) - 1;
+ } while (index >= 0);
+
+ memmove(begin, p, end - p);
+ memcpy(buf, prefix, strlen(prefix));
+
+ return 0;
+}
+
+/*
+ * Block layer IOCTL handler.
+ *
+ * @dev Pointer to the block_device structure.
+ * @mode ignored
+ * @cmd IOCTL command passed from the user application.
+ * @arg Argument passed from the user application.
+ *
+ * return value
+ * 0 IOCTL completed successfully.
+ * -ENOTTY IOCTL not supported or invalid driver data
+ * structure pointer.
+ */
+static int mtip_block_ioctl(struct block_device *dev,
+ fmode_t mode,
+ unsigned cmd,
+ unsigned long arg)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!dd)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ return -ENOTTY;
+ default:
+ return mtip_hw_ioctl(dd, cmd, arg);
+ }
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Block layer compat IOCTL handler.
+ *
+ * @dev Pointer to the block_device structure.
+ * @mode ignored
+ * @cmd IOCTL command passed from the user application.
+ * @arg Argument passed from the user application.
+ *
+ * return value
+ * 0 IOCTL completed successfully.
+ * -ENOTTY IOCTL not supported or invalid driver data
+ * structure pointer.
+ */
+static int mtip_block_compat_ioctl(struct block_device *dev,
+ fmode_t mode,
+ unsigned cmd,
+ unsigned long arg)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ if (!dd)
+ return -ENOTTY;
+
+ switch (cmd) {
+ case BLKFLSBUF:
+ return -ENOTTY;
+ case HDIO_DRIVE_TASKFILE: {
+ struct mtip_compat_ide_task_request_s __user *compat_req_task;
+ ide_task_request_t req_task;
+ int compat_tasksize, outtotal, ret;
+
+ compat_tasksize =
+ sizeof(struct mtip_compat_ide_task_request_s);
+
+ compat_req_task =
+ (struct mtip_compat_ide_task_request_s __user *) arg;
+
+ if (copy_from_user(&req_task, (void __user *) arg,
+ compat_tasksize - (2 * sizeof(compat_long_t))))
+ return -EFAULT;
+
+ if (get_user(req_task.out_size, &compat_req_task->out_size))
+ return -EFAULT;
+
+ if (get_user(req_task.in_size, &compat_req_task->in_size))
+ return -EFAULT;
+
+ outtotal = sizeof(struct mtip_compat_ide_task_request_s);
+
+ ret = exec_drive_taskfile(dd, (void __user *) arg,
+ &req_task, outtotal);
+
+ if (copy_to_user((void __user *) arg, &req_task,
+ compat_tasksize -
+ (2 * sizeof(compat_long_t))))
+ return -EFAULT;
+
+ if (put_user(req_task.out_size, &compat_req_task->out_size))
+ return -EFAULT;
+
+ if (put_user(req_task.in_size, &compat_req_task->in_size))
+ return -EFAULT;
+
+ return ret;
+ }
+ default:
+ return mtip_hw_ioctl(dd, cmd, arg);
+ }
+}
+#endif
+
+/*
+ * Obtain the geometry of the device.
+ *
+ * You may think that this function is obsolete, but some applications,
+ * fdisk for example still used CHS values. This function describes the
+ * device as having 224 heads and 56 sectors per cylinder. These values are
+ * chosen so that each cylinder is aligned on a 4KB boundary. Since a
+ * partition is described in terms of a start and end cylinder this means
+ * that each partition is also 4KB aligned. Non-aligned partitions adversely
+ * affects performance.
+ *
+ * @dev Pointer to the block_device strucutre.
+ * @geo Pointer to a hd_geometry structure.
+ *
+ * return value
+ * 0 Operation completed successfully.
+ * -ENOTTY An error occurred while reading the drive capacity.
+ */
+static int mtip_block_getgeo(struct block_device *dev,
+ struct hd_geometry *geo)
+{
+ struct driver_data *dd = dev->bd_disk->private_data;
+ sector_t capacity;
+
+ if (!dd)
+ return -ENOTTY;
+
+ if (!(mtip_hw_get_capacity(dd, &capacity))) {
+ dev_warn(&dd->pdev->dev,
+ "Could not get drive capacity.\n");
+ return -ENOTTY;
+ }
+
+ geo->heads = 224;
+ geo->sectors = 56;
+ sector_div(capacity, (geo->heads * geo->sectors));
+ geo->cylinders = capacity;
+ return 0;
+}
+
+/*
+ * Block device operation function.
+ *
+ * This structure contains pointers to the functions required by the block
+ * layer.
+ */
+static const struct block_device_operations mtip_block_ops = {
+ .ioctl = mtip_block_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mtip_block_compat_ioctl,
+#endif
+ .getgeo = mtip_block_getgeo,
+ .owner = THIS_MODULE
+};
+
+/*
+ * Block layer make request function.
+ *
+ * This function is called by the kernel to process a BIO for
+ * the P320 device.
+ *
+ * @queue Pointer to the request queue. Unused other than to obtain
+ * the driver data structure.
+ * @bio Pointer to the BIO.
+ *
+ */
+static void mtip_make_request(struct request_queue *queue, struct bio *bio)
+{
+ struct driver_data *dd = queue->queuedata;
+ struct scatterlist *sg;
+ struct bio_vec *bvec;
+ int nents = 0;
+ int tag = 0;
+
+ if (unlikely(!bio_has_data(bio))) {
+ blk_queue_flush(queue, 0);
+ bio_endio(bio, 0);
+ return;
+ }
+
+ sg = mtip_hw_get_scatterlist(dd, &tag);
+ if (likely(sg != NULL)) {
+ blk_queue_bounce(queue, &bio);
+
+ if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
+ dev_warn(&dd->pdev->dev,
+ "Maximum number of SGL entries exceeded");
+ bio_io_error(bio);
+ mtip_hw_release_scatterlist(dd, tag);
+ return;
+ }
+
+ /* Create the scatter list for this bio. */
+ bio_for_each_segment(bvec, bio, nents) {
+ sg_set_page(&sg[nents],
+ bvec->bv_page,
+ bvec->bv_len,
+ bvec->bv_offset);
+ }
+
+ /* Issue the read/write. */
+ mtip_hw_submit_io(dd,
+ bio->bi_sector,
+ bio_sectors(bio),
+ nents,
+ tag,
+ bio_endio,
+ bio,
+ bio_data_dir(bio));
+ } else
+ bio_io_error(bio);
+}
+
+/*
+ * Block layer initialization function.
+ *
+ * This function is called once by the PCI layer for each P320
+ * device that is connected to the system.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 on success else an error code.
+ */
+static int mtip_block_initialize(struct driver_data *dd)
+{
+ int rv = 0, wait_for_rebuild = 0;
+ sector_t capacity;
+ unsigned int index = 0;
+ struct kobject *kobj;
+ unsigned char thd_name[16];
+
+ if (dd->disk)
+ goto skip_create_disk; /* hw init done, before rebuild */
+
+ /* Initialize the protocol layer. */
+ wait_for_rebuild = mtip_hw_init(dd);
+ if (wait_for_rebuild < 0) {
+ dev_err(&dd->pdev->dev,
+ "Protocol layer initialization failed\n");
+ rv = -EINVAL;
+ goto protocol_init_error;
+ }
+
+ dd->disk = alloc_disk(MTIP_MAX_MINORS);
+ if (dd->disk == NULL) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate gendisk structure\n");
+ rv = -EINVAL;
+ goto alloc_disk_error;
+ }
+
+ /* Generate the disk name, implemented same as in sd.c */
+ do {
+ if (!ida_pre_get(&rssd_index_ida, GFP_KERNEL))
+ goto ida_get_error;
+
+ spin_lock(&rssd_index_lock);
+ rv = ida_get_new(&rssd_index_ida, &index);
+ spin_unlock(&rssd_index_lock);
+ } while (rv == -EAGAIN);
+
+ if (rv)
+ goto ida_get_error;
+
+ rv = rssd_disk_name_format("rssd",
+ index,
+ dd->disk->disk_name,
+ DISK_NAME_LEN);
+ if (rv)
+ goto disk_index_error;
+
+ dd->disk->driverfs_dev = &dd->pdev->dev;
+ dd->disk->major = dd->major;
+ dd->disk->first_minor = dd->instance * MTIP_MAX_MINORS;
+ dd->disk->fops = &mtip_block_ops;
+ dd->disk->private_data = dd;
+ dd->index = index;
+
+ /*
+ * if rebuild pending, start the service thread, and delay the block
+ * queue creation and add_disk()
+ */
+ if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
+ goto start_service_thread;
+
+skip_create_disk:
+ /* Allocate the request queue. */
+ dd->queue = blk_alloc_queue(GFP_KERNEL);
+ if (dd->queue == NULL) {
+ dev_err(&dd->pdev->dev,
+ "Unable to allocate request queue\n");
+ rv = -ENOMEM;
+ goto block_queue_alloc_init_error;
+ }
+
+ /* Attach our request function to the request queue. */
+ blk_queue_make_request(dd->queue, mtip_make_request);
+
+ dd->disk->queue = dd->queue;
+ dd->queue->queuedata = dd;
+
+ /* Set device limits. */
+ set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
+ blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
+ blk_queue_physical_block_size(dd->queue, 4096);
+ blk_queue_io_min(dd->queue, 4096);
+ /*
+ * write back cache is not supported in the device. FUA depends on
+ * write back cache support, hence setting flush support to zero.
+ */
+ blk_queue_flush(dd->queue, 0);
+
+ /* Set the capacity of the device in 512 byte sectors. */
+ if (!(mtip_hw_get_capacity(dd, &capacity))) {
+ dev_warn(&dd->pdev->dev,
+ "Could not read drive capacity\n");
+ rv = -EIO;
+ goto read_capacity_error;
+ }
+ set_capacity(dd->disk, capacity);
+
+ /* Enable the block device and add it to /dev */
+ add_disk(dd->disk);
+
+ /*
+ * Now that the disk is active, initialize any sysfs attributes
+ * managed by the protocol layer.
+ */
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_init(dd, kobj);
+ kobject_put(kobj);
+ }
+
+ if (dd->mtip_svc_handler)
+ return rv; /* service thread created for handling rebuild */
+
+start_service_thread:
+ sprintf(thd_name, "mtip_svc_thd_%02d", index);
+
+ dd->mtip_svc_handler = kthread_run(mtip_service_thread,
+ dd, thd_name);
+
+ if (IS_ERR(dd->mtip_svc_handler)) {
+ printk(KERN_ERR "mtip32xx: service thread failed to start\n");
+ dd->mtip_svc_handler = NULL;
+ rv = -EFAULT;
+ goto kthread_run_error;
+ }
+
+ return rv;
+
+kthread_run_error:
+ /* Delete our gendisk. This also removes the device from /dev */
+ del_gendisk(dd->disk);
+
+read_capacity_error:
+ blk_cleanup_queue(dd->queue);
+
+block_queue_alloc_init_error:
+disk_index_error:
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, index);
+ spin_unlock(&rssd_index_lock);
+
+ida_get_error:
+ put_disk(dd->disk);
+
+alloc_disk_error:
+ mtip_hw_exit(dd); /* De-initialize the protocol layer. */
+
+protocol_init_error:
+ return rv;
+}
+
+/*
+ * Block layer deinitialization function.
+ *
+ * Called by the PCI layer as each P320 device is removed.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_block_remove(struct driver_data *dd)
+{
+ struct kobject *kobj;
+
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &dd->port->flags);
+ wake_up_interruptible(&dd->port->svc_wait);
+ kthread_stop(dd->mtip_svc_handler);
+ }
+
+ /* Clean up the sysfs attributes managed by the protocol layer. */
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
+
+ /*
+ * Delete our gendisk structure. This also removes the device
+ * from /dev
+ */
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->disk = NULL;
+ dd->queue = NULL;
+
+ /* De-initialize the protocol layer. */
+ mtip_hw_exit(dd);
+
+ return 0;
+}
+
+/*
+ * Function called by the PCI layer when just before the
+ * machine shuts down.
+ *
+ * If a protocol layer shutdown function is present it will be called
+ * by this function.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0
+ */
+static int mtip_block_shutdown(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev,
+ "Shutting down %s ...\n", dd->disk->disk_name);
+
+ /* Delete our gendisk structure, and cleanup the blk queue. */
+ del_gendisk(dd->disk);
+ blk_cleanup_queue(dd->queue);
+ dd->disk = NULL;
+ dd->queue = NULL;
+
+ mtip_hw_shutdown(dd);
+ return 0;
+}
+
+static int mtip_block_suspend(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev,
+ "Suspending %s ...\n", dd->disk->disk_name);
+ mtip_hw_suspend(dd);
+ return 0;
+}
+
+static int mtip_block_resume(struct driver_data *dd)
+{
+ dev_info(&dd->pdev->dev, "Resuming %s ...\n",
+ dd->disk->disk_name);
+ mtip_hw_resume(dd);
+ return 0;
+}
+
+/*
+ * Called for each supported PCI device detected.
+ *
+ * This function allocates the private data structure, enables the
+ * PCI device and then calls the block layer initialization function.
+ *
+ * return value
+ * 0 on success else an error code.
+ */
+static int mtip_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ int rv = 0;
+ struct driver_data *dd = NULL;
+
+ /* Allocate memory for this devices private data. */
+ dd = kzalloc(sizeof(struct driver_data), GFP_KERNEL);
+ if (dd == NULL) {
+ dev_err(&pdev->dev,
+ "Unable to allocate memory for driver data\n");
+ return -ENOMEM;
+ }
+
+ /* Set the atomic variable as 1 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ atomic_set(&dd->resumeflag, false);
+
+ /* Attach the private data to this PCI device. */
+ pci_set_drvdata(pdev, dd);
+
+ rv = pcim_enable_device(pdev);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "Unable to enable device\n");
+ goto iomap_err;
+ }
+
+ /* Map BAR5 to memory. */
+ rv = pcim_iomap_regions(pdev, 1 << MTIP_ABAR, MTIP_DRV_NAME);
+ if (rv < 0) {
+ dev_err(&pdev->dev, "Unable to map regions\n");
+ goto iomap_err;
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ rv = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+
+ if (rv) {
+ rv = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (rv) {
+ dev_warn(&pdev->dev,
+ "64-bit DMA enable failed\n");
+ goto setmask_err;
+ }
+ }
+ }
+
+ pci_set_master(pdev);
+
+ if (pci_enable_msi(pdev)) {
+ dev_warn(&pdev->dev,
+ "Unable to enable MSI interrupt.\n");
+ goto block_initialize_err;
+ }
+
+ /* Copy the info we may need later into the private data structure. */
+ dd->major = mtip_major;
+ dd->instance = instance;
+ dd->pdev = pdev;
+
+ /* Initialize the block layer. */
+ rv = mtip_block_initialize(dd);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Unable to initialize block layer\n");
+ goto block_initialize_err;
+ }
+
+ /*
+ * Increment the instance count so that each device has a unique
+ * instance number.
+ */
+ instance++;
+
+ goto done;
+
+block_initialize_err:
+ pci_disable_msi(pdev);
+
+setmask_err:
+ pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+
+iomap_err:
+ kfree(dd);
+ pci_set_drvdata(pdev, NULL);
+ return rv;
+done:
+ /* Set the atomic variable as 0 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ return rv;
+}
+
+/*
+ * Called for each probed device when the device is removed or the
+ * driver is unloaded.
+ *
+ * return value
+ * None
+ */
+static void mtip_pci_remove(struct pci_dev *pdev)
+{
+ struct driver_data *dd = pci_get_drvdata(pdev);
+ int counter = 0;
+
+ if (mtip_check_surprise_removal(pdev)) {
+ while (atomic_read(&dd->drv_cleanup_done) == false) {
+ counter++;
+ msleep(20);
+ if (counter == 10) {
+ /* Cleanup the outstanding commands */
+ mtip_command_cleanup(dd);
+ break;
+ }
+ }
+ }
+ /* Set the atomic variable as 1 in case of SRSI */
+ atomic_set(&dd->drv_cleanup_done, true);
+
+ /* Clean up the block layer. */
+ mtip_block_remove(dd);
+
+ pci_disable_msi(pdev);
+
+ kfree(dd);
+ pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
+}
+
+/*
+ * Called for each probed device when the device is suspended.
+ *
+ * return value
+ * 0 Success
+ * <0 Error
+ */
+static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
+{
+ int rv = 0;
+ struct driver_data *dd = pci_get_drvdata(pdev);
+
+ if (!dd) {
+ dev_err(&pdev->dev,
+ "Driver private datastructure is NULL\n");
+ return -EFAULT;
+ }
+
+ atomic_set(&dd->resumeflag, true);
+
+ /* Disable ports & interrupts then send standby immediate */
+ rv = mtip_block_suspend(dd);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Failed to suspend controller\n");
+ return rv;
+ }
+
+ /*
+ * Save the pci config space to pdev structure &
+ * disable the device
+ */
+ pci_save_state(pdev);
+ pci_disable_device(pdev);
+
+ /* Move to Low power state*/
+ pci_set_power_state(pdev, PCI_D3hot);
+
+ return rv;
+}
+
+/*
+ * Called for each probed device when the device is resumed.
+ *
+ * return value
+ * 0 Success
+ * <0 Error
+ */
+static int mtip_pci_resume(struct pci_dev *pdev)
+{
+ int rv = 0;
+ struct driver_data *dd;
+
+ dd = pci_get_drvdata(pdev);
+ if (!dd) {
+ dev_err(&pdev->dev,
+ "Driver private datastructure is NULL\n");
+ return -EFAULT;
+ }
+
+ /* Move the device to active State */
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* Restore PCI configuration space */
+ pci_restore_state(pdev);
+
+ /* Enable the PCI device*/
+ rv = pcim_enable_device(pdev);
+ if (rv < 0) {
+ dev_err(&pdev->dev,
+ "Failed to enable card during resume\n");
+ goto err;
+ }
+ pci_set_master(pdev);
+
+ /*
+ * Calls hbaReset, initPort, & startPort function
+ * then enables interrupts
+ */
+ rv = mtip_block_resume(dd);
+ if (rv < 0)
+ dev_err(&pdev->dev, "Unable to resume\n");
+
+err:
+ atomic_set(&dd->resumeflag, false);
+
+ return rv;
+}
+
+/*
+ * Shutdown routine
+ *
+ * return value
+ * None
+ */
+static void mtip_pci_shutdown(struct pci_dev *pdev)
+{
+ struct driver_data *dd = pci_get_drvdata(pdev);
+ if (dd)
+ mtip_block_shutdown(dd);
+}
+
+/* Table of device ids supported by this driver. */
+static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) },
+ { 0 }
+};
+
+/* Structure that describes the PCI driver functions. */
+static struct pci_driver mtip_pci_driver = {
+ .name = MTIP_DRV_NAME,
+ .id_table = mtip_pci_tbl,
+ .probe = mtip_pci_probe,
+ .remove = mtip_pci_remove,
+ .suspend = mtip_pci_suspend,
+ .resume = mtip_pci_resume,
+ .shutdown = mtip_pci_shutdown,
+};
+
+MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
+
+/*
+ * Module initialization function.
+ *
+ * Called once when the module is loaded. This function allocates a major
+ * block device number to the Cyclone devices and registers the PCI layer
+ * of the driver.
+ *
+ * Return value
+ * 0 on success else error code.
+ */
+static int __init mtip_init(void)
+{
+ printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
+
+ /* Allocate a major block device number to use with this driver. */
+ mtip_major = register_blkdev(0, MTIP_DRV_NAME);
+ if (mtip_major < 0) {
+ printk(KERN_ERR "Unable to register block device (%d)\n",
+ mtip_major);
+ return -EBUSY;
+ }
+
+ /* Register our PCI operations. */
+ return pci_register_driver(&mtip_pci_driver);
+}
+
+/*
+ * Module de-initialization function.
+ *
+ * Called once when the module is unloaded. This function deallocates
+ * the major block device number allocated by mtip_init() and
+ * unregisters the PCI layer of the driver.
+ *
+ * Return value
+ * none
+ */
+static void __exit mtip_exit(void)
+{
+ /* Release the allocated major block device number. */
+ unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+
+ /* Unregister the PCI driver. */
+ pci_unregister_driver(&mtip_pci_driver);
+}
+
+MODULE_AUTHOR("Micron Technology, Inc");
+MODULE_DESCRIPTION("Micron RealSSD PCIe Block Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MTIP_DRV_VERSION);
+
+module_init(mtip_init);
+module_exit(mtip_exit);
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
new file mode 100644
index 0000000..e0554a8
--- /dev/null
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -0,0 +1,418 @@
+/*
+ * mtip32xx.h - Header file for the P320 SSD Block Driver
+ * Copyright (C) 2011 Micron Technology, Inc.
+ *
+ * Portions of this code were derived from works subjected to the
+ * following copyright:
+ * Copyright (C) 2009 Integrated Device Technology, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MTIP32XX_H__
+#define __MTIP32XX_H__
+
+#include <linux/spinlock.h>
+#include <linux/rwsem.h>
+#include <linux/ata.h>
+#include <linux/interrupt.h>
+#include <linux/genhd.h>
+#include <linux/version.h>
+
+/* Offset of Subsystem Device ID in pci confoguration space */
+#define PCI_SUBSYSTEM_DEVICEID 0x2E
+
+/* offset of Device Control register in PCIe extended capabilites space */
+#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
+
+/* # of times to retry timed out IOs */
+#define MTIP_MAX_RETRIES 5
+
+/* Various timeout values in ms */
+#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
+#define MTIP_IOCTL_COMMAND_TIMEOUT_MS 5000
+#define MTIP_INTERNAL_COMMAND_TIMEOUT_MS 5000
+
+/* check for timeouts every 500ms */
+#define MTIP_TIMEOUT_CHECK_PERIOD 500
+
+/* ftl rebuild */
+#define MTIP_FTL_REBUILD_OFFSET 142
+#define MTIP_FTL_REBUILD_MAGIC 0xED51
+#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
+
+/* Macro to extract the tag bit number from a tag value. */
+#define MTIP_TAG_BIT(tag) (tag & 0x1F)
+
+/*
+ * Macro to extract the tag index from a tag value. The index
+ * is used to access the correct s_active/Command Issue register based
+ * on the tag value.
+ */
+#define MTIP_TAG_INDEX(tag) (tag >> 5)
+
+/*
+ * Maximum number of scatter gather entries
+ * a single command may have.
+ */
+#define MTIP_MAX_SG 128
+
+/*
+ * Maximum number of slot groups (Command Issue & s_active registers)
+ * NOTE: This is the driver maximum; check dd->slot_groups for actual value.
+ */
+#define MTIP_MAX_SLOT_GROUPS 8
+
+/* Internal command tag. */
+#define MTIP_TAG_INTERNAL 0
+
+/* Micron Vendor ID & P320x SSD Device ID */
+#define PCI_VENDOR_ID_MICRON 0x1344
+#define P320_DEVICE_ID 0x5150
+
+/* Driver name and version strings */
+#define MTIP_DRV_NAME "mtip32xx"
+#define MTIP_DRV_VERSION "1.2.6os3"
+
+/* Maximum number of minor device numbers per device. */
+#define MTIP_MAX_MINORS 16
+
+/* Maximum number of supported command slots. */
+#define MTIP_MAX_COMMAND_SLOTS (MTIP_MAX_SLOT_GROUPS * 32)
+
+/*
+ * Per-tag bitfield size in longs.
+ * Linux bit manipulation functions
+ * (i.e. test_and_set_bit, find_next_zero_bit)
+ * manipulate memory in longs, so we try to make the math work.
+ * take the slot groups and find the number of longs, rounding up.
+ * Careful! i386 and x86_64 use different size longs!
+ */
+#define U32_PER_LONG (sizeof(long) / sizeof(u32))
+#define SLOTBITS_IN_LONGS ((MTIP_MAX_SLOT_GROUPS + \
+ (U32_PER_LONG-1))/U32_PER_LONG)
+
+/* BAR number used to access the HBA registers. */
+#define MTIP_ABAR 5
+
+#ifdef DEBUG
+ #define dbg_printk(format, arg...) \
+ printk(pr_fmt(format), ##arg);
+#else
+ #define dbg_printk(format, arg...)
+#endif
+
+#define __force_bit2int (unsigned int __force)
+
+/* below are bit numbers in 'flags' defined in mtip_port */
+#define MTIP_FLAG_IC_ACTIVE_BIT 0
+#define MTIP_FLAG_EH_ACTIVE_BIT 1
+#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2
+#define MTIP_FLAG_ISSUE_CMDS_BIT 4
+#define MTIP_FLAG_REBUILD_BIT 5
+#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8
+
+/* Register Frame Information Structure (FIS), host to device. */
+struct host_to_dev_fis {
+ /*
+ * FIS type.
+ * - 27h Register FIS, host to device.
+ * - 34h Register FIS, device to host.
+ * - 39h DMA Activate FIS, device to host.
+ * - 41h DMA Setup FIS, bi-directional.
+ * - 46h Data FIS, bi-directional.
+ * - 58h BIST Activate FIS, bi-directional.
+ * - 5Fh PIO Setup FIS, device to host.
+ * - A1h Set Device Bits FIS, device to host.
+ */
+ unsigned char type;
+ unsigned char opts;
+ unsigned char command;
+ unsigned char features;
+
+ union {
+ unsigned char lba_low;
+ unsigned char sector;
+ };
+ union {
+ unsigned char lba_mid;
+ unsigned char cyl_low;
+ };
+ union {
+ unsigned char lba_hi;
+ unsigned char cyl_hi;
+ };
+ union {
+ unsigned char device;
+ unsigned char head;
+ };
+
+ union {
+ unsigned char lba_low_ex;
+ unsigned char sector_ex;
+ };
+ union {
+ unsigned char lba_mid_ex;
+ unsigned char cyl_low_ex;
+ };
+ union {
+ unsigned char lba_hi_ex;
+ unsigned char cyl_hi_ex;
+ };
+ unsigned char features_ex;
+
+ unsigned char sect_count;
+ unsigned char sect_cnt_ex;
+ unsigned char res2;
+ unsigned char control;
+
+ unsigned int res3;
+};
+
+/* Command header structure. */
+struct mtip_cmd_hdr {
+ /*
+ * Command options.
+ * - Bits 31:16 Number of PRD entries.
+ * - Bits 15:8 Unused in this implementation.
+ * - Bit 7 Prefetch bit, informs the drive to prefetch PRD entries.
+ * - Bit 6 Write bit, should be set when writing data to the device.
+ * - Bit 5 Unused in this implementation.
+ * - Bits 4:0 Length of the command FIS in DWords (DWord = 4 bytes).
+ */
+ unsigned int opts;
+ /* This field is unsed when using NCQ. */
+ union {
+ unsigned int byte_count;
+ unsigned int status;
+ };
+ /*
+ * Lower 32 bits of the command table address associated with this
+ * header. The command table addresses must be 128 byte aligned.
+ */
+ unsigned int ctba;
+ /*
+ * If 64 bit addressing is used this field is the upper 32 bits
+ * of the command table address associated with this command.
+ */
+ unsigned int ctbau;
+ /* Reserved and unused. */
+ unsigned int res[4];
+};
+
+/* Command scatter gather structure (PRD). */
+struct mtip_cmd_sg {
+ /*
+ * Low 32 bits of the data buffer address. For P320 this
+ * address must be 8 byte aligned signified by bits 2:0 being
+ * set to 0.
+ */
+ unsigned int dba;
+ /*
+ * When 64 bit addressing is used this field is the upper
+ * 32 bits of the data buffer address.
+ */
+ unsigned int dba_upper;
+ /* Unused. */
+ unsigned int reserved;
+ /*
+ * Bit 31: interrupt when this data block has been transferred.
+ * Bits 30..22: reserved
+ * Bits 21..0: byte count (minus 1). For P320 the byte count must be
+ * 8 byte aligned signified by bits 2:0 being set to 1.
+ */
+ unsigned int info;
+};
+struct mtip_port;
+
+/* Structure used to describe a command. */
+struct mtip_cmd {
+
+ struct mtip_cmd_hdr *command_header; /* ptr to command header entry */
+
+ dma_addr_t command_header_dma; /* corresponding physical address */
+
+ void *command; /* ptr to command table entry */
+
+ dma_addr_t command_dma; /* corresponding physical address */
+
+ void *comp_data; /* data passed to completion function comp_func() */
+ /*
+ * Completion function called by the ISR upon completion of
+ * a command.
+ */
+ void (*comp_func)(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status);
+ /* Additional callback function that may be called by comp_func() */
+ void (*async_callback)(void *data, int status);
+
+ void *async_data; /* Addl. data passed to async_callback() */
+
+ int scatter_ents; /* Number of scatter list entries used */
+
+ struct scatterlist sg[MTIP_MAX_SG]; /* Scatter list entries */
+
+ int retries; /* The number of retries left for this command. */
+
+ int direction; /* Data transfer direction */
+
+ unsigned long comp_time; /* command completion time, in jiffies */
+
+ atomic_t active; /* declares if this command sent to the drive. */
+};
+
+/* Structure used to describe a port. */
+struct mtip_port {
+ /* Pointer back to the driver data for this port. */
+ struct driver_data *dd;
+ /*
+ * Used to determine if the data pointed to by the
+ * identify field is valid.
+ */
+ unsigned long identify_valid;
+ /* Base address of the memory mapped IO for the port. */
+ void __iomem *mmio;
+ /* Array of pointers to the memory mapped s_active registers. */
+ void __iomem *s_active[MTIP_MAX_SLOT_GROUPS];
+ /* Array of pointers to the memory mapped completed registers. */
+ void __iomem *completed[MTIP_MAX_SLOT_GROUPS];
+ /* Array of pointers to the memory mapped Command Issue registers. */
+ void __iomem *cmd_issue[MTIP_MAX_SLOT_GROUPS];
+ /*
+ * Pointer to the beginning of the command header memory as used
+ * by the driver.
+ */
+ void *command_list;
+ /*
+ * Pointer to the beginning of the command header memory as used
+ * by the DMA.
+ */
+ dma_addr_t command_list_dma;
+ /*
+ * Pointer to the beginning of the RX FIS memory as used
+ * by the driver.
+ */
+ void *rxfis;
+ /*
+ * Pointer to the beginning of the RX FIS memory as used
+ * by the DMA.
+ */
+ dma_addr_t rxfis_dma;
+ /*
+ * Pointer to the beginning of the command table memory as used
+ * by the driver.
+ */
+ void *command_table;
+ /*
+ * Pointer to the beginning of the command table memory as used
+ * by the DMA.
+ */
+ dma_addr_t command_tbl_dma;
+ /*
+ * Pointer to the beginning of the identify data memory as used
+ * by the driver.
+ */
+ u16 *identify;
+ /*
+ * Pointer to the beginning of the identify data memory as used
+ * by the DMA.
+ */
+ dma_addr_t identify_dma;
+ /*
+ * Pointer to the beginning of a sector buffer that is used
+ * by the driver when issuing internal commands.
+ */
+ u16 *sector_buffer;
+ /*
+ * Pointer to the beginning of a sector buffer that is used
+ * by the DMA when the driver issues internal commands.
+ */
+ dma_addr_t sector_buffer_dma;
+ /*
+ * Bit significant, used to determine if a command slot has
+ * been allocated. i.e. the slot is in use. Bits are cleared
+ * when the command slot and all associated data structures
+ * are no longer needed.
+ */
+ unsigned long allocated[SLOTBITS_IN_LONGS];
+ /*
+ * used to queue commands when an internal command is in progress
+ * or error handling is active
+ */
+ unsigned long cmds_to_issue[SLOTBITS_IN_LONGS];
+ /*
+ * Array of command slots. Structure includes pointers to the
+ * command header and command table, and completion function and data
+ * pointers.
+ */
+ struct mtip_cmd commands[MTIP_MAX_COMMAND_SLOTS];
+ /* Used by mtip_service_thread to wait for an event */
+ wait_queue_head_t svc_wait;
+ /*
+ * indicates the state of the port. Also, helps the service thread
+ * to determine its action on wake up.
+ */
+ unsigned long flags;
+ /*
+ * Timer used to complete commands that have been active for too long.
+ */
+ struct timer_list cmd_timer;
+ /*
+ * Semaphore used to block threads if there are no
+ * command slots available.
+ */
+ struct semaphore cmd_slot;
+ /* Spinlock for working around command-issue bug. */
+ spinlock_t cmd_issue_lock;
+};
+
+/*
+ * Driver private data structure.
+ *
+ * One structure is allocated per probed device.
+ */
+struct driver_data {
+ void __iomem *mmio; /* Base address of the HBA registers. */
+
+ int major; /* Major device number. */
+
+ int instance; /* Instance number. First device probed is 0, ... */
+
+ struct gendisk *disk; /* Pointer to our gendisk structure. */
+
+ struct pci_dev *pdev; /* Pointer to the PCI device structure. */
+
+ struct request_queue *queue; /* Our request queue. */
+
+ struct mtip_port *port; /* Pointer to the port data structure. */
+
+ /* Tasklet used to process the bottom half of the ISR. */
+ struct tasklet_struct tasklet;
+
+ unsigned product_type; /* magic value declaring the product type */
+
+ unsigned slot_groups; /* number of slot groups the product supports */
+
+ atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
+
+ unsigned long index; /* Index to determine the disk name */
+
+ unsigned int ftlrebuildflag; /* FTL rebuild flag */
+
+ atomic_t resumeflag; /* Atomic variable to track suspend/resume */
+
+ struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
+};
+
+#endif
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644
index 0000000..1f3c1a7
--- /dev/null
+++ b/drivers/block/nvme.c
@@ -0,0 +1,1741 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
+#define NVME_Q_DEPTH 1024
+#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
+#define NVME_MINORS 64
+#define NVME_IO_TIMEOUT (5 * HZ)
+#define ADMIN_TIMEOUT (60 * HZ)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
+/*
+ * Represents an NVM Express device. Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+ struct list_head node;
+ struct nvme_queue **queues;
+ u32 __iomem *dbs;
+ struct pci_dev *pci_dev;
+ struct dma_pool *prp_page_pool;
+ struct dma_pool *prp_small_pool;
+ int instance;
+ int queue_count;
+ int db_stride;
+ u32 ctrl_config;
+ struct msix_entry *entry;
+ struct nvme_bar __iomem *bar;
+ struct list_head namespaces;
+ char serial[20];
+ char model[40];
+ char firmware_rev[8];
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+ struct list_head list;
+
+ struct nvme_dev *dev;
+ struct request_queue *queue;
+ struct gendisk *disk;
+
+ int ns_id;
+ int lba_shift;
+};
+
+/*
+ * An NVM Express queue. Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+ struct device *q_dmadev;
+ struct nvme_dev *dev;
+ spinlock_t q_lock;
+ struct nvme_command *sq_cmds;
+ volatile struct nvme_completion *cqes;
+ dma_addr_t sq_dma_addr;
+ dma_addr_t cq_dma_addr;
+ wait_queue_head_t sq_full;
+ wait_queue_t sq_cong_wait;
+ struct bio_list sq_cong;
+ u32 __iomem *q_db;
+ u16 q_depth;
+ u16 cq_vector;
+ u16 sq_head;
+ u16 sq_tail;
+ u16 cq_head;
+ u16 cq_phase;
+ unsigned long cmdid_data[];
+};
+
+/*
+ * Check we didin't inadvertently grow the command struct
+ */
+static inline void _nvme_check_size(void)
+{
+ BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+ BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+}
+
+typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+ struct nvme_completion *);
+
+struct nvme_cmd_info {
+ nvme_completion_fn fn;
+ void *ctx;
+ unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+ return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
+/**
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The function to call on completion
+ *
+ * Allocate a Command ID for a queue. The data passed in will
+ * be passed to the completion handler. This is implemented by using
+ * the bottom two bits of the ctx pointer to store the handler ID.
+ * Passing in a pointer that's not 4-byte aligned will cause a BUG.
+ * We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
+ */
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ int cmdid;
+
+ do {
+ cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
+ if (cmdid >= depth)
+ return -EBUSY;
+ } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+
+ info[cmdid].fn = handler;
+ info[cmdid].ctx = ctx;
+ info[cmdid].timeout = jiffies + timeout;
+ return cmdid;
+}
+
+static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
+ nvme_completion_fn handler, unsigned timeout)
+{
+ int cmdid;
+ wait_event_killable(nvmeq->sq_full,
+ (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
+ return (cmdid < 0) ? -EINTR : cmdid;
+}
+
+/* Special values must be less than 0x1000 */
+#define CMD_CTX_BASE ((void *)POISON_POINTER_DELTA)
+#define CMD_CTX_CANCELLED (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+
+static void special_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ if (ctx == CMD_CTX_CANCELLED)
+ return;
+ if (ctx == CMD_CTX_FLUSH)
+ return;
+ if (ctx == CMD_CTX_COMPLETED) {
+ dev_warn(&dev->pci_dev->dev,
+ "completed id %d twice on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+ if (ctx == CMD_CTX_INVALID) {
+ dev_warn(&dev->pci_dev->dev,
+ "invalid id %d completed on queue %d\n",
+ cqe->command_id, le16_to_cpup(&cqe->sq_id));
+ return;
+ }
+
+ dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (cmdid >= nvmeq->q_depth) {
+ *fn = special_completion;
+ return CMD_CTX_INVALID;
+ }
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_COMPLETED;
+ clear_bit(cmdid, nvmeq->cmdid_data);
+ wake_up(&nvmeq->sq_full);
+ return ctx;
+}
+
+static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
+ nvme_completion_fn *fn)
+{
+ void *ctx;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ if (fn)
+ *fn = info[cmdid].fn;
+ ctx = info[cmdid].ctx;
+ info[cmdid].fn = special_completion;
+ info[cmdid].ctx = CMD_CTX_CANCELLED;
+ return ctx;
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+{
+ return dev->queues[get_cpu() + 1];
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq)
+{
+ put_cpu();
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+ unsigned long flags;
+ u16 tail;
+ spin_lock_irqsave(&nvmeq->q_lock, flags);
+ tail = nvmeq->sq_tail;
+ memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+ if (++tail == nvmeq->q_depth)
+ tail = 0;
+ writel(tail, nvmeq->q_db);
+ nvmeq->sq_tail = tail;
+ spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+
+ return 0;
+}
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries. You can't see it in this data structure because C doesn't let
+ * me express that. Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+ void *private; /* For the use of the submitter of the I/O */
+ int npages; /* In the PRP list. 0 means small pool in use */
+ int offset; /* Of PRP list */
+ int nents; /* Used in scatterlist */
+ int length; /* Of data, in bytes */
+ dma_addr_t first_dma;
+ struct scatterlist sg[0];
+};
+
+static __le64 **iod_list(struct nvme_iod *iod)
+{
+ return ((void *)iod) + iod->offset;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed. This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size)
+{
+ unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
+ return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static struct nvme_iod *
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+{
+ struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
+ sizeof(__le64 *) * nvme_npages(nbytes) +
+ sizeof(struct scatterlist) * nseg, gfp);
+
+ if (iod) {
+ iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+ iod->npages = -1;
+ iod->length = nbytes;
+ }
+
+ return iod;
+}
+
+static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+{
+ const int last_prp = PAGE_SIZE / 8 - 1;
+ int i;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma = iod->first_dma;
+
+ if (iod->npages == 0)
+ dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+ for (i = 0; i < iod->npages; i++) {
+ __le64 *prp_list = list[i];
+ dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+ dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+ prp_dma = next_prp_dma;
+ }
+ kfree(iod);
+}
+
+static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
+{
+ struct nvme_queue *nvmeq = get_nvmeq(dev);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ put_nvmeq(nvmeq);
+ wake_up_process(nvme_thread);
+}
+
+static void bio_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct nvme_iod *iod = ctx;
+ struct bio *bio = iod->private;
+ u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ nvme_free_iod(dev, iod);
+ if (status) {
+ bio_endio(bio, -EIO);
+ } else if (bio->bi_vcnt > bio->bi_idx) {
+ requeue_bio(dev, bio);
+ } else {
+ bio_endio(bio, 0);
+ }
+}
+
+/* length is in bytes. gfp flags indicates whether we may sleep. */
+static int nvme_setup_prps(struct nvme_dev *dev,
+ struct nvme_common_command *cmd, struct nvme_iod *iod,
+ int total_len, gfp_t gfp)
+{
+ struct dma_pool *pool;
+ int length = total_len;
+ struct scatterlist *sg = iod->sg;
+ int dma_len = sg_dma_len(sg);
+ u64 dma_addr = sg_dma_address(sg);
+ int offset = offset_in_page(dma_addr);
+ __le64 *prp_list;
+ __le64 **list = iod_list(iod);
+ dma_addr_t prp_dma;
+ int nprps, i;
+
+ cmd->prp1 = cpu_to_le64(dma_addr);
+ length -= (PAGE_SIZE - offset);
+ if (length <= 0)
+ return total_len;
+
+ dma_len -= (PAGE_SIZE - offset);
+ if (dma_len) {
+ dma_addr += (PAGE_SIZE - offset);
+ } else {
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ if (length <= PAGE_SIZE) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ return total_len;
+ }
+
+ nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+ if (nprps <= (256 / 8)) {
+ pool = dev->prp_small_pool;
+ iod->npages = 0;
+ } else {
+ pool = dev->prp_page_pool;
+ iod->npages = 1;
+ }
+
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list) {
+ cmd->prp2 = cpu_to_le64(dma_addr);
+ iod->npages = -1;
+ return (total_len - length) + PAGE_SIZE;
+ }
+ list[0] = prp_list;
+ iod->first_dma = prp_dma;
+ cmd->prp2 = cpu_to_le64(prp_dma);
+ i = 0;
+ for (;;) {
+ if (i == PAGE_SIZE / 8) {
+ __le64 *old_prp_list = prp_list;
+ prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+ if (!prp_list)
+ return total_len - length;
+ list[iod->npages++] = prp_list;
+ prp_list[0] = old_prp_list[i - 1];
+ old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+ i = 1;
+ }
+ prp_list[i++] = cpu_to_le64(dma_addr);
+ dma_len -= PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ length -= PAGE_SIZE;
+ if (length <= 0)
+ break;
+ if (dma_len > 0)
+ continue;
+ BUG_ON(dma_len < 0);
+ sg = sg_next(sg);
+ dma_addr = sg_dma_address(sg);
+ dma_len = sg_dma_len(sg);
+ }
+
+ return total_len;
+}
+
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \
+ (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+ struct bio *bio, enum dma_data_direction dma_dir, int psegs)
+{
+ struct bio_vec *bvec, *bvprv = NULL;
+ struct scatterlist *sg = NULL;
+ int i, old_idx, length = 0, nsegs = 0;
+
+ sg_init_table(iod->sg, psegs);
+ old_idx = bio->bi_idx;
+ bio_for_each_segment(bvec, bio, i) {
+ if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+ sg->length += bvec->bv_len;
+ } else {
+ if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+ break;
+ sg = sg ? sg + 1 : iod->sg;
+ sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+ bvec->bv_offset);
+ nsegs++;
+ }
+ length += bvec->bv_len;
+ bvprv = bvec;
+ }
+ bio->bi_idx = i;
+ iod->nents = nsegs;
+ sg_mark_end(sg);
+ if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
+ bio->bi_idx = old_idx;
+ return -ENOMEM;
+ }
+ return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ int cmdid)
+{
+ struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ cmnd->common.opcode = nvme_cmd_flush;
+ cmnd->common.command_id = cmdid;
+ cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+}
+
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+ int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+ special_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ return cmdid;
+
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held. May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+ struct bio *bio)
+{
+ struct nvme_command *cmnd;
+ struct nvme_iod *iod;
+ enum dma_data_direction dma_dir;
+ int cmdid, length, result = -ENOMEM;
+ u16 control;
+ u32 dsmgmt;
+ int psegs = bio_phys_segments(ns->queue, bio);
+
+ if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+ result = nvme_submit_flush_data(nvmeq, ns);
+ if (result)
+ return result;
+ }
+
+ iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+ if (!iod)
+ goto nomem;
+ iod->private = bio;
+
+ result = -EBUSY;
+ cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
+ if (unlikely(cmdid < 0))
+ goto free_iod;
+
+ if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+ return nvme_submit_flush(nvmeq, ns, cmdid);
+
+ control = 0;
+ if (bio->bi_rw & REQ_FUA)
+ control |= NVME_RW_FUA;
+ if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+ control |= NVME_RW_LR;
+
+ dsmgmt = 0;
+ if (bio->bi_rw & REQ_RAHEAD)
+ dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+ cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+ memset(cmnd, 0, sizeof(*cmnd));
+ if (bio_data_dir(bio)) {
+ cmnd->rw.opcode = nvme_cmd_write;
+ dma_dir = DMA_TO_DEVICE;
+ } else {
+ cmnd->rw.opcode = nvme_cmd_read;
+ dma_dir = DMA_FROM_DEVICE;
+ }
+
+ result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
+ if (result < 0)
+ goto free_iod;
+ length = result;
+
+ cmnd->rw.command_id = cmdid;
+ cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+ length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
+ GFP_ATOMIC);
+ cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+ cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+ cmnd->rw.control = cpu_to_le16(control);
+ cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+ bio->bi_sector += length >> 9;
+
+ if (++nvmeq->sq_tail == nvmeq->q_depth)
+ nvmeq->sq_tail = 0;
+ writel(nvmeq->sq_tail, nvmeq->q_db);
+
+ return 0;
+
+ free_iod:
+ nvme_free_iod(nvmeq->dev, iod);
+ nomem:
+ return result;
+}
+
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct nvme_ns *ns = q->queuedata;
+ struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+ int result = -EBUSY;
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (bio_list_empty(&nvmeq->sq_cong))
+ result = nvme_submit_bio_queue(nvmeq, ns, bio);
+ if (unlikely(result)) {
+ if (bio_list_empty(&nvmeq->sq_cong))
+ add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+ bio_list_add(&nvmeq->sq_cong, bio);
+ }
+
+ spin_unlock_irq(&nvmeq->q_lock);
+ put_nvmeq(nvmeq);
+}
+
+static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+{
+ u16 head, phase;
+
+ head = nvmeq->cq_head;
+ phase = nvmeq->cq_phase;
+
+ for (;;) {
+ void *ctx;
+ nvme_completion_fn fn;
+ struct nvme_completion cqe = nvmeq->cqes[head];
+ if ((le16_to_cpu(cqe.status) & 1) != phase)
+ break;
+ nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
+ if (++head == nvmeq->q_depth) {
+ head = 0;
+ phase = !phase;
+ }
+
+ ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+
+ /* If the controller ignores the cq head doorbell and continuously
+ * writes to the queue, it is theoretically possible to wrap around
+ * the queue twice and mistakenly return IRQ_NONE. Linux only
+ * requires that 0.1% of your interrupts are handled, so this isn't
+ * a big problem.
+ */
+ if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
+ return IRQ_NONE;
+
+ writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ nvmeq->cq_head = head;
+ nvmeq->cq_phase = phase;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+ irqreturn_t result;
+ struct nvme_queue *nvmeq = data;
+ spin_lock(&nvmeq->q_lock);
+ result = nvme_process_cq(nvmeq);
+ spin_unlock(&nvmeq->q_lock);
+ return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+ struct nvme_queue *nvmeq = data;
+ struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+ if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+ return IRQ_NONE;
+ return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+ spin_lock_irq(&nvmeq->q_lock);
+ cancel_cmdid(nvmeq, cmdid, NULL);
+ spin_unlock_irq(&nvmeq->q_lock);
+}
+
+struct sync_cmd_info {
+ struct task_struct *task;
+ u32 result;
+ int status;
+};
+
+static void sync_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct sync_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ wake_up_process(cmdinfo->task);
+}
+
+/*
+ * Returns 0 on success. If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+ int cmdid;
+ struct sync_cmd_info cmdinfo;
+
+ cmdinfo.task = current;
+ cmdinfo.status = -EINTR;
+
+ cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
+ timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmd->common.command_id = cmdid;
+
+ set_current_state(TASK_KILLABLE);
+ nvme_submit_cmd(nvmeq, cmd);
+ schedule();
+
+ if (cmdinfo.status == -EINTR) {
+ nvme_abort_command(nvmeq, cmdid);
+ return -EINTR;
+ }
+
+ if (result)
+ *result = cmdinfo.result;
+
+ return cmdinfo.status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+ u32 *result)
+{
+ return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+ int status;
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(id);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+ memset(&c, 0, sizeof(c));
+ c.create_cq.opcode = nvme_admin_create_cq;
+ c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+ c.create_cq.cqid = cpu_to_le16(qid);
+ c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_cq.cq_flags = cpu_to_le16(flags);
+ c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+ struct nvme_queue *nvmeq)
+{
+ int status;
+ struct nvme_command c;
+ int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+
+ memset(&c, 0, sizeof(c));
+ c.create_sq.opcode = nvme_admin_create_sq;
+ c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+ c.create_sq.sqid = cpu_to_le16(qid);
+ c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+ c.create_sq.sq_flags = cpu_to_le16(flags);
+ c.create_sq.cqid = cpu_to_le16(qid);
+
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+ if (status)
+ return -EIO;
+ return 0;
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+ return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+ dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.identify.opcode = nvme_admin_identify;
+ c.identify.nsid = cpu_to_le32(nsid);
+ c.identify.prp1 = cpu_to_le64(dma_addr);
+ c.identify.cns = cpu_to_le32(cns);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_get_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+ unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.features.opcode = nvme_admin_set_features;
+ c.features.prp1 = cpu_to_le64(dma_addr);
+ c.features.fid = cpu_to_le32(fid);
+ c.features.dword11 = cpu_to_le32(dword11);
+
+ return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static void nvme_free_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+ int vector = dev->entry[nvmeq->cq_vector].vector;
+
+ irq_set_affinity_hint(vector, NULL);
+ free_irq(vector, nvmeq);
+
+ /* Don't tell the adapter to delete the admin queue */
+ if (qid) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+ int depth, int vector)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+ struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+ if (!nvmeq)
+ return NULL;
+
+ nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
+ &nvmeq->cq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->cqes)
+ goto free_nvmeq;
+ memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+ nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+ &nvmeq->sq_dma_addr, GFP_KERNEL);
+ if (!nvmeq->sq_cmds)
+ goto free_cqdma;
+
+ nvmeq->q_dmadev = dmadev;
+ nvmeq->dev = dev;
+ spin_lock_init(&nvmeq->q_lock);
+ nvmeq->cq_head = 0;
+ nvmeq->cq_phase = 1;
+ init_waitqueue_head(&nvmeq->sq_full);
+ init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
+ bio_list_init(&nvmeq->sq_cong);
+ nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_depth = depth;
+ nvmeq->cq_vector = vector;
+
+ return nvmeq;
+
+ free_cqdma:
+ dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+ nvmeq->cq_dma_addr);
+ free_nvmeq:
+ kfree(nvmeq);
+ return NULL;
+}
+
+static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+ const char *name)
+{
+ if (use_threaded_interrupts)
+ return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+ nvme_irq_check, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED,
+ name, nvmeq);
+ return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
+ IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+}
+
+static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+ int qid, int cq_size, int vector)
+{
+ int result;
+ struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+
+ if (!nvmeq)
+ return ERR_PTR(-ENOMEM);
+
+ result = adapter_alloc_cq(dev, qid, nvmeq);
+ if (result < 0)
+ goto free_nvmeq;
+
+ result = adapter_alloc_sq(dev, qid, nvmeq);
+ if (result < 0)
+ goto release_cq;
+
+ result = queue_request_irq(dev, nvmeq, "nvme");
+ if (result < 0)
+ goto release_sq;
+
+ return nvmeq;
+
+ release_sq:
+ adapter_delete_sq(dev, qid);
+ release_cq:
+ adapter_delete_cq(dev, qid);
+ free_nvmeq:
+ dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+ (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+ dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+ nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+ kfree(nvmeq);
+ return ERR_PTR(result);
+}
+
+static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+ int result;
+ u32 aqa;
+ u64 cap;
+ unsigned long timeout;
+ struct nvme_queue *nvmeq;
+
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+ nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+ if (!nvmeq)
+ return -ENOMEM;
+
+ aqa = nvmeq->q_depth - 1;
+ aqa |= aqa << 16;
+
+ dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
+ dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+ dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+ dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+ writel(0, &dev->bar->cc);
+ writel(aqa, &dev->bar->aqa);
+ writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+ writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+ writel(dev->ctrl_config, &dev->bar->cc);
+
+ cap = readq(&dev->bar->cap);
+ timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+ dev->db_stride = NVME_CAP_STRIDE(cap);
+
+ while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+ msleep(100);
+ if (fatal_signal_pending(current))
+ return -EINTR;
+ if (time_after(jiffies, timeout)) {
+ dev_err(&dev->pci_dev->dev,
+ "Device not ready; aborting initialisation\n");
+ return -ENODEV;
+ }
+ }
+
+ result = queue_request_irq(dev, nvmeq, "nvme admin");
+ dev->queues[0] = nvmeq;
+ return result;
+}
+
+static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+ unsigned long addr, unsigned length)
+{
+ int i, err, count, nents, offset;
+ struct scatterlist *sg;
+ struct page **pages;
+ struct nvme_iod *iod;
+
+ if (addr & 3)
+ return ERR_PTR(-EINVAL);
+ if (!length)
+ return ERR_PTR(-EINVAL);
+
+ offset = offset_in_page(addr);
+ count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+ pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+ err = get_user_pages_fast(addr, count, 1, pages);
+ if (err < count) {
+ count = err;
+ err = -EFAULT;
+ goto put_pages;
+ }
+
+ iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+ sg = iod->sg;
+ sg_init_table(sg, count);
+ for (i = 0; i < count; i++) {
+ sg_set_page(&sg[i], pages[i],
+ min_t(int, length, PAGE_SIZE - offset), offset);
+ length -= (PAGE_SIZE - offset);
+ offset = 0;
+ }
+ sg_mark_end(&sg[i - 1]);
+ iod->nents = count;
+
+ err = -ENOMEM;
+ nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ if (!nents)
+ goto free_iod;
+
+ kfree(pages);
+ return iod;
+
+ free_iod:
+ kfree(iod);
+ put_pages:
+ for (i = 0; i < count; i++)
+ put_page(pages[i]);
+ kfree(pages);
+ return ERR_PTR(err);
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+ struct nvme_iod *iod)
+{
+ int i;
+
+ dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+ write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ for (i = 0; i < iod->nents; i++)
+ put_page(sg_page(&iod->sg[i]));
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_queue *nvmeq;
+ struct nvme_user_io io;
+ struct nvme_command c;
+ unsigned length;
+ int status;
+ struct nvme_iod *iod;
+
+ if (copy_from_user(&io, uio, sizeof(io)))
+ return -EFAULT;
+ length = (io.nblocks + 1) << ns->lba_shift;
+
+ switch (io.opcode) {
+ case nvme_cmd_write:
+ case nvme_cmd_read:
+ case nvme_cmd_compare:
+ iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+
+ memset(&c, 0, sizeof(c));
+ c.rw.opcode = io.opcode;
+ c.rw.flags = io.flags;
+ c.rw.nsid = cpu_to_le32(ns->ns_id);
+ c.rw.slba = cpu_to_le64(io.slba);
+ c.rw.length = cpu_to_le16(io.nblocks);
+ c.rw.control = cpu_to_le16(io.control);
+ c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+ c.rw.reftag = io.reftag;
+ c.rw.apptag = io.apptag;
+ c.rw.appmask = io.appmask;
+ /* XXX: metadata */
+ length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+
+ nvmeq = get_nvmeq(dev);
+ /*
+ * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+ * disabled. We may be preempted at any point, and be rescheduled
+ * to a different CPU. That will cause cacheline bouncing, but no
+ * additional races since q_lock already protects against other CPUs.
+ */
+ put_nvmeq(nvmeq);
+ if (length != (io.nblocks + 1) << ns->lba_shift)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+ nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ return status;
+}
+
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+ struct nvme_admin_cmd __user *ucmd)
+{
+ struct nvme_dev *dev = ns->dev;
+ struct nvme_admin_cmd cmd;
+ struct nvme_command c;
+ int status, length;
+ struct nvme_iod *iod;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+ return -EFAULT;
+
+ memset(&c, 0, sizeof(c));
+ c.common.opcode = cmd.opcode;
+ c.common.flags = cmd.flags;
+ c.common.nsid = cpu_to_le32(cmd.nsid);
+ c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+ c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+ c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+ c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+ c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+ c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+ c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+ c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+ length = cmd.data_len;
+ if (cmd.data_len) {
+ iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
+ length);
+ if (IS_ERR(iod))
+ return PTR_ERR(iod);
+ length = nvme_setup_prps(dev, &c.common, iod, length,
+ GFP_KERNEL);
+ }
+
+ if (length != cmd.data_len)
+ status = -ENOMEM;
+ else
+ status = nvme_submit_admin_cmd(dev, &c, NULL);
+
+ if (cmd.data_len) {
+ nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
+ nvme_free_iod(dev, iod);
+ }
+ return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+ unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case NVME_IOCTL_ID:
+ return ns->ns_id;
+ case NVME_IOCTL_ADMIN_CMD:
+ return nvme_user_admin_cmd(ns, (void __user *)arg);
+ case NVME_IOCTL_SUBMIT_IO:
+ return nvme_submit_io(ns, (void __user *)arg);
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct block_device_operations nvme_fops = {
+ .owner = THIS_MODULE,
+ .ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_ioctl,
+};
+
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+ int depth = nvmeq->q_depth - 1;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+ unsigned long now = jiffies;
+ int cmdid;
+
+ for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+ void *ctx;
+ nvme_completion_fn fn;
+ static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+ if (!time_after(now, info[cmdid].timeout))
+ continue;
+ dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+ ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+ fn(nvmeq->dev, ctx, &cqe);
+ }
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+ while (bio_list_peek(&nvmeq->sq_cong)) {
+ struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+ struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+ if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+ bio_list_add_head(&nvmeq->sq_cong, bio);
+ break;
+ }
+ if (bio_list_empty(&nvmeq->sq_cong))
+ remove_wait_queue(&nvmeq->sq_full,
+ &nvmeq->sq_cong_wait);
+ }
+}
+
+static int nvme_kthread(void *data)
+{
+ struct nvme_dev *dev;
+
+ while (!kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
+ spin_lock(&dev_list_lock);
+ list_for_each_entry(dev, &dev_list, node) {
+ int i;
+ for (i = 0; i < dev->queue_count; i++) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ if (!nvmeq)
+ continue;
+ spin_lock_irq(&nvmeq->q_lock);
+ if (nvme_process_cq(nvmeq))
+ printk("process_cq did something\n");
+ nvme_timeout_ios(nvmeq);
+ nvme_resubmit_bios(nvmeq);
+ spin_unlock_irq(&nvmeq->q_lock);
+ }
+ }
+ spin_unlock(&dev_list_lock);
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+ return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+ int index, error;
+
+ do {
+ if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+ return -1;
+
+ spin_lock(&dev_list_lock);
+ error = ida_get_new(&nvme_index_ida, &index);
+ spin_unlock(&dev_list_lock);
+ } while (error == -EAGAIN);
+
+ if (error)
+ index = -1;
+ return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+ spin_lock(&dev_list_lock);
+ ida_remove(&nvme_index_ida, index);
+ spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+ struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+{
+ struct nvme_ns *ns;
+ struct gendisk *disk;
+ int lbaf;
+
+ if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
+ return NULL;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ if (!ns)
+ return NULL;
+ ns->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!ns->queue)
+ goto out_free_ns;
+ ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+ queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
+ blk_queue_make_request(ns->queue, nvme_make_request);
+ ns->dev = dev;
+ ns->queue->queuedata = ns;
+
+ disk = alloc_disk(NVME_MINORS);
+ if (!disk)
+ goto out_free_queue;
+ ns->ns_id = nsid;
+ ns->disk = disk;
+ lbaf = id->flbas & 0xf;
+ ns->lba_shift = id->lbaf[lbaf].ds;
+
+ disk->major = nvme_major;
+ disk->minors = NVME_MINORS;
+ disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->fops = &nvme_fops;
+ disk->private_data = ns;
+ disk->queue = ns->queue;
+ disk->driverfs_dev = &dev->pci_dev->dev;
+ sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
+ set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+ return ns;
+
+ out_free_queue:
+ blk_cleanup_queue(ns->queue);
+ out_free_ns:
+ kfree(ns);
+ return NULL;
+}
+
+static void nvme_ns_free(struct nvme_ns *ns)
+{
+ int index = ns->disk->first_minor / NVME_MINORS;
+ put_disk(ns->disk);
+ nvme_put_ns_idx(index);
+ blk_cleanup_queue(ns->queue);
+ kfree(ns);
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+ int status;
+ u32 result;
+ u32 q_count = (count - 1) | ((count - 1) << 16);
+
+ status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+ &result);
+ if (status)
+ return -EIO;
+ return min(result & 0xffff, result >> 16) + 1;
+}
+
+static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+{
+ int result, cpu, i, nr_io_queues, db_bar_size;
+
+ nr_io_queues = num_online_cpus();
+ result = set_queue_count(dev, nr_io_queues);
+ if (result < 0)
+ return result;
+ if (result < nr_io_queues)
+ nr_io_queues = result;
+
+ /* Deregister the admin queue's interrupt */
+ free_irq(dev->entry[0].vector, dev->queues[0]);
+
+ db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ if (db_bar_size > 8192) {
+ iounmap(dev->bar);
+ dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+ db_bar_size);
+ dev->dbs = ((void __iomem *)dev->bar) + 4096;
+ dev->queues[0]->q_db = dev->dbs;
+ }
+
+ for (i = 0; i < nr_io_queues; i++)
+ dev->entry[i].entry = i;
+ for (;;) {
+ result = pci_enable_msix(dev->pci_dev, dev->entry,
+ nr_io_queues);
+ if (result == 0) {
+ break;
+ } else if (result > 0) {
+ nr_io_queues = result;
+ continue;
+ } else {
+ nr_io_queues = 1;
+ break;
+ }
+ }
+
+ result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ /* XXX: handle failure here */
+
+ cpu = cpumask_first(cpu_online_mask);
+ for (i = 0; i < nr_io_queues; i++) {
+ irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+ cpu = cpumask_next(cpu, cpu_online_mask);
+ }
+
+ for (i = 0; i < nr_io_queues; i++) {
+ dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+ NVME_Q_DEPTH, i);
+ if (IS_ERR(dev->queues[i + 1]))
+ return PTR_ERR(dev->queues[i + 1]);
+ dev->queue_count++;
+ }
+
+ for (; i < num_possible_cpus(); i++) {
+ int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+ dev->queues[i + 1] = dev->queues[target + 1];
+ }
+
+ return 0;
+}
+
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+ int i;
+
+ for (i = dev->queue_count - 1; i >= 0; i--)
+ nvme_free_queue(dev, i);
+}
+
+static int __devinit nvme_dev_add(struct nvme_dev *dev)
+{
+ int res, nn, i;
+ struct nvme_ns *ns, *next;
+ struct nvme_id_ctrl *ctrl;
+ struct nvme_id_ns *id_ns;
+ void *mem;
+ dma_addr_t dma_addr;
+
+ res = nvme_setup_io_queues(dev);
+ if (res)
+ return res;
+
+ mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+ GFP_KERNEL);
+
+ res = nvme_identify(dev, 0, 1, dma_addr);
+ if (res) {
+ res = -EIO;
+ goto out_free;
+ }
+
+ ctrl = mem;
+ nn = le32_to_cpup(&ctrl->nn);
+ memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+ memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+ memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+
+ id_ns = mem;
+ for (i = 1; i <= nn; i++) {
+ res = nvme_identify(dev, i, 0, dma_addr);
+ if (res)
+ continue;
+
+ if (id_ns->ncap == 0)
+ continue;
+
+ res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+ dma_addr + 4096);
+ if (res)
+ continue;
+
+ ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
+ if (ns)
+ list_add_tail(&ns->list, &dev->namespaces);
+ }
+ list_for_each_entry(ns, &dev->namespaces, list)
+ add_disk(ns->disk);
+
+ goto out;
+
+ out_free:
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ nvme_ns_free(ns);
+ }
+
+ out:
+ dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
+ return res;
+}
+
+static int nvme_dev_remove(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ /* TODO: wait all I/O finished or cancel them */
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ del_gendisk(ns->disk);
+ nvme_ns_free(ns);
+ }
+
+ nvme_free_queues(dev);
+
+ return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+ struct device *dmadev = &dev->pci_dev->dev;
+ dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+ PAGE_SIZE, PAGE_SIZE, 0);
+ if (!dev->prp_page_pool)
+ return -ENOMEM;
+
+ /* Optimisation for I/Os between 4k and 128k */
+ dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+ 256, 256, 0);
+ if (!dev->prp_small_pool) {
+ dma_pool_destroy(dev->prp_page_pool);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+ dma_pool_destroy(dev->prp_page_pool);
+ dma_pool_destroy(dev->prp_small_pool);
+}
+
+/* XXX: Use an ida or something to let remove / add work correctly */
+static void nvme_set_instance(struct nvme_dev *dev)
+{
+ static int instance;
+ dev->instance = instance++;
+}
+
+static void nvme_release_instance(struct nvme_dev *dev)
+{
+}
+
+static int __devinit nvme_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int bars, result = -ENOMEM;
+ struct nvme_dev *dev;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+ dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
+ GFP_KERNEL);
+ if (!dev->entry)
+ goto free;
+ dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+ GFP_KERNEL);
+ if (!dev->queues)
+ goto free;
+
+ if (pci_enable_device_mem(pdev))
+ goto free;
+ pci_set_master(pdev);
+ bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_request_selected_regions(pdev, bars, "nvme"))
+ goto disable;
+
+ INIT_LIST_HEAD(&dev->namespaces);
+ dev->pci_dev = pdev;
+ pci_set_drvdata(pdev, dev);
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ nvme_set_instance(dev);
+ dev->entry[0].vector = pdev->irq;
+
+ result = nvme_setup_prp_pools(dev);
+ if (result)
+ goto disable_msix;
+
+ dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+ if (!dev->bar) {
+ result = -ENOMEM;
+ goto disable_msix;
+ }
+
+ result = nvme_configure_admin_queue(dev);
+ if (result)
+ goto unmap;
+ dev->queue_count++;
+
+ spin_lock(&dev_list_lock);
+ list_add(&dev->node, &dev_list);
+ spin_unlock(&dev_list_lock);
+
+ result = nvme_dev_add(dev);
+ if (result)
+ goto delete;
+
+ return 0;
+
+ delete:
+ spin_lock(&dev_list_lock);
+ list_del(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ nvme_free_queues(dev);
+ unmap:
+ iounmap(dev->bar);
+ disable_msix:
+ pci_disable_msix(pdev);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ disable:
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ free:
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+ return result;
+}
+
+static void __devexit nvme_remove(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_remove(dev);
+ pci_disable_msix(pdev);
+ iounmap(dev->bar);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(dev->queues);
+ kfree(dev->entry);
+ kfree(dev);
+}
+
+/* These functions are yet to be implemented */
+#define nvme_error_detected NULL
+#define nvme_dump_registers NULL
+#define nvme_link_reset NULL
+#define nvme_slot_reset NULL
+#define nvme_error_resume NULL
+#define nvme_suspend NULL
+#define nvme_resume NULL
+
+static struct pci_error_handlers nvme_err_handler = {
+ .error_detected = nvme_error_detected,
+ .mmio_enabled = nvme_dump_registers,
+ .link_reset = nvme_link_reset,
+ .slot_reset = nvme_slot_reset,
+ .resume = nvme_error_resume,
+};
+
+/* Move to pci_ids.h later */
+#define PCI_CLASS_STORAGE_EXPRESS 0x010802
+
+static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+ { 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+ .name = "nvme",
+ .id_table = nvme_id_table,
+ .probe = nvme_probe,
+ .remove = __devexit_p(nvme_remove),
+ .suspend = nvme_suspend,
+ .resume = nvme_resume,
+ .err_handler = &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+ int result = -EBUSY;
+
+ nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+ if (IS_ERR(nvme_thread))
+ return PTR_ERR(nvme_thread);
+
+ nvme_major = register_blkdev(nvme_major, "nvme");
+ if (nvme_major <= 0)
+ goto kill_kthread;
+
+ result = pci_register_driver(&nvme_driver);
+ if (result)
+ goto unregister_blkdev;
+ return 0;
+
+ unregister_blkdev:
+ unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+ kthread_stop(nvme_thread);
+ return result;
+}
+
+static void __exit nvme_exit(void)
+{
+ pci_unregister_driver(&nvme_driver);
+ unregister_blkdev(nvme_major, "nvme");
+ kthread_stop(nvme_thread);
+}
+
+MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.8");
+module_init(nvme_init);
+module_exit(nvme_exit);
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
index ad12452..ec64e7f 100644
--- a/drivers/block/paride/bpck6.c
+++ b/drivers/block/paride/bpck6.c
@@ -20,9 +20,6 @@
*/
-/* PARAMETERS */
-static int verbose; /* set this to 1 to see debugging messages and whatnot */
-
#define BACKPACK_VERSION "2.0.2"
#include <linux/module.h>
@@ -36,6 +33,8 @@ static int verbose; /* set this to 1 to see debugging messages and whatnot */
#include "ppc6lnx.c"
#include "paride.h"
+/* PARAMETERS */
+static bool verbose; /* set this to 1 to see debugging messages and whatnot */
#define PPCSTRUCT(pi) ((Interface *)(pi->private))
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 46b8136..ba2b6b5 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -144,7 +144,7 @@ enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
static DEFINE_MUTEX(pcd_mutex);
static DEFINE_SPINLOCK(pcd_lock);
-module_param(verbose, bool, 0644);
+module_param(verbose, int, 0644);
module_param(major, int, 0);
module_param(name, charp, 0);
module_param(nice, int, 0);
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
index 869e767..831e3ac 100644
--- a/drivers/block/paride/pd.c
+++ b/drivers/block/paride/pd.c
@@ -124,8 +124,9 @@
by default.
*/
+#include <linux/types.h>
-static int verbose = 0;
+static bool verbose = 0;
static int major = PD_MAJOR;
static char *name = PD_NAME;
static int cluster = 64;
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
index f21b520..ec8f9ed 100644
--- a/drivers/block/paride/pf.c
+++ b/drivers/block/paride/pf.c
@@ -118,13 +118,15 @@
#define PF_NAME "pf"
#define PF_UNITS 4
+#include <linux/types.h>
+
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is off
by default.
*/
-static int verbose = 0;
+static bool verbose = 0;
static int major = PF_MAJOR;
static char *name = PF_NAME;
static int cluster = 64;
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
index a79fb4f..4a27b1d 100644
--- a/drivers/block/paride/pg.c
+++ b/drivers/block/paride/pg.c
@@ -130,13 +130,14 @@
#define PI_PG 4
#endif
+#include <linux/types.h>
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is 0
by default.
*/
-static int verbose = 0;
+static bool verbose = 0;
static int major = PG_MAJOR;
static char *name = PG_NAME;
static int disable = 0;
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
index 7179f79..2596042 100644
--- a/drivers/block/paride/pt.c
+++ b/drivers/block/paride/pt.c
@@ -109,13 +109,15 @@
#define PT_NAME "pt"
#define PT_UNITS 4
+#include <linux/types.h>
+
/* Here are things one can override from the insmod command.
Most are autoprobed by paride unless set here. Verbose is on
by default.
*/
-static int verbose = 0;
+static bool verbose = 0;
static int major = PT_MAJOR;
static char *name = PT_NAME;
static int disable = 0;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index a63b0a2..d59edea 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -2817,7 +2817,7 @@ static const struct block_device_operations pktcdvd_ops = {
.check_events = pkt_check_events,
};
-static char *pktcdvd_devnode(struct gendisk *gd, mode_t *mode)
+static char *pktcdvd_devnode(struct gendisk *gd, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "pktcdvd/%s", gd->disk_name);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 148ab94..a6278e7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -380,6 +380,7 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
rbdc = __rbd_client_find(opt);
if (rbdc) {
ceph_destroy_options(opt);
+ kfree(rbd_opts);
/* using an existing client */
kref_get(&rbdc->kref);
@@ -406,15 +407,15 @@ done_err:
/*
* Destroy ceph client
+ *
+ * Caller must hold node_lock.
*/
static void rbd_client_release(struct kref *kref)
{
struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
dout("rbd_release_client %p\n", rbdc);
- spin_lock(&node_lock);
list_del(&rbdc->node);
- spin_unlock(&node_lock);
ceph_destroy_client(rbdc->client);
kfree(rbdc->rbd_opts);
@@ -427,7 +428,9 @@ static void rbd_client_release(struct kref *kref)
*/
static void rbd_put_client(struct rbd_device *rbd_dev)
{
+ spin_lock(&node_lock);
kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
+ spin_unlock(&node_lock);
rbd_dev->rbd_client = NULL;
rbd_dev->client = NULL;
}
@@ -2184,6 +2187,8 @@ static ssize_t rbd_add(struct bus_type *bus,
INIT_LIST_HEAD(&rbd_dev->node);
INIT_LIST_HEAD(&rbd_dev->snaps);
+ init_rwsem(&rbd_dev->header.snap_rwsem);
+
/* generate unique id: find highest unique id, add one */
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index fd5adcd..6d5a914 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -26,7 +26,6 @@
#include <linux/delay.h>
#include <linux/platform_device.h>
-#include <asm/macintosh.h>
#include <asm/mac_via.h>
#define CARDNAME "swim"
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
index b70f0fc..3fb6ab4 100644
--- a/drivers/block/sx8.c
+++ b/drivers/block/sx8.c
@@ -619,8 +619,10 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx)
host->state == HST_DEV_SCAN);
spin_unlock_irq(&host->lock);
- DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq);
+ DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
+ crq->rq->cmd_type = REQ_TYPE_SPECIAL;
+ crq->rq->special = crq;
+ blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
return 0;
@@ -658,8 +660,10 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func)
BUG_ON(rc < 0);
crq->msg_bucket = (u32) rc;
- DPRINTK("blk_insert_request, tag == %u\n", idx);
- blk_insert_request(host->oob_q, crq->rq, 1, crq);
+ DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx);
+ crq->rq->cmd_type = REQ_TYPE_SPECIAL;
+ crq->rq->special = crq;
+ blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL);
return 0;
}
@@ -1116,7 +1120,7 @@ static inline void carm_handle_resp(struct carm_host *host,
break;
case MISC_GET_FW_VER: {
struct carm_fw_ver *ver = (struct carm_fw_ver *)
- mem + sizeof(struct carm_msg_get_fw_ver);
+ (mem + sizeof(struct carm_msg_get_fw_ver));
if (!error) {
host->fw_ver = le32_to_cpu(ver->version);
host->flags |= (ver->features & FL_FW_VER_MASK);
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
index 0e376d4..7333b9e 100644
--- a/drivers/block/ub.c
+++ b/drivers/block/ub.c
@@ -1744,12 +1744,11 @@ static int ub_bd_release(struct gendisk *disk, fmode_t mode)
static int ub_bd_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
- struct gendisk *disk = bdev->bd_disk;
void __user *usermem = (void __user *) arg;
int ret;
mutex_lock(&ub_mutex);
- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem);
+ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, usermem);
mutex_unlock(&ub_mutex);
return ret;
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 4d0b70a..c4a60ba 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -4,6 +4,7 @@
#include <linux/blkdev.h>
#include <linux/hdreg.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/virtio.h>
#include <linux/virtio_blk.h>
#include <linux/scatterlist.h>
@@ -36,6 +37,12 @@ struct virtio_blk
/* Process context for config space updates */
struct work_struct config_work;
+ /* Lock for config space updates */
+ struct mutex config_lock;
+
+ /* enable config space updates */
+ bool config_enable;
+
/* What host tells us, plus 2 for header & tailer. */
unsigned int sg_elems;
@@ -172,7 +179,7 @@ static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
}
}
- if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
+ if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr, GFP_ATOMIC)<0) {
mempool_free(vbr, vblk->pool);
return false;
}
@@ -243,8 +250,8 @@ static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
return -ENOTTY;
- return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
- (void __user *)data);
+ return scsi_cmd_blk_ioctl(bdev, mode, cmd,
+ (void __user *)data);
}
/* We provide getgeo only to please some old bootloader/partitioning tools */
@@ -318,6 +325,10 @@ static void virtblk_config_changed_work(struct work_struct *work)
char cap_str_2[10], cap_str_10[10];
u64 capacity, size;
+ mutex_lock(&vblk->config_lock);
+ if (!vblk->config_enable)
+ goto done;
+
/* Host must always specify the capacity. */
vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
&capacity, sizeof(capacity));
@@ -340,6 +351,8 @@ static void virtblk_config_changed_work(struct work_struct *work)
cap_str_10, cap_str_2);
set_capacity(vblk->disk, capacity);
+done:
+ mutex_unlock(&vblk->config_lock);
}
static void virtblk_config_changed(struct virtio_device *vdev)
@@ -349,6 +362,18 @@ static void virtblk_config_changed(struct virtio_device *vdev)
queue_work(virtblk_wq, &vblk->config_work);
}
+static int init_vq(struct virtio_blk *vblk)
+{
+ int err = 0;
+
+ /* We expect one virtqueue, for output. */
+ vblk->vq = virtio_find_single_vq(vblk->vdev, blk_done, "requests");
+ if (IS_ERR(vblk->vq))
+ err = PTR_ERR(vblk->vq);
+
+ return err;
+}
+
static int __devinit virtblk_probe(struct virtio_device *vdev)
{
struct virtio_blk *vblk;
@@ -388,14 +413,13 @@ static int __devinit virtblk_probe(struct virtio_device *vdev)
vblk->vdev = vdev;
vblk->sg_elems = sg_elems;
sg_init_table(vblk->sg, vblk->sg_elems);
+ mutex_init(&vblk->config_lock);
INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
+ vblk->config_enable = true;
- /* We expect one virtqueue, for output. */
- vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
- if (IS_ERR(vblk->vq)) {
- err = PTR_ERR(vblk->vq);
+ err = init_vq(vblk);
+ if (err)
goto out_free_vblk;
- }
vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
if (!vblk->pool) {
@@ -542,7 +566,10 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
struct virtio_blk *vblk = vdev->priv;
int index = vblk->index;
- flush_work(&vblk->config_work);
+ /* Prevent config work handler from accessing the device. */
+ mutex_lock(&vblk->config_lock);
+ vblk->config_enable = false;
+ mutex_unlock(&vblk->config_lock);
/* Nothing should be pending. */
BUG_ON(!list_empty(&vblk->reqs));
@@ -550,6 +577,8 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
+ flush_work(&vblk->config_work);
+
del_gendisk(vblk->disk);
blk_cleanup_queue(vblk->disk->queue);
put_disk(vblk->disk);
@@ -559,6 +588,46 @@ static void __devexit virtblk_remove(struct virtio_device *vdev)
ida_simple_remove(&vd_index_ida, index);
}
+#ifdef CONFIG_PM
+static int virtblk_freeze(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+
+ /* Ensure we don't receive any more interrupts */
+ vdev->config->reset(vdev);
+
+ /* Prevent config work handler from accessing the device. */
+ mutex_lock(&vblk->config_lock);
+ vblk->config_enable = false;
+ mutex_unlock(&vblk->config_lock);
+
+ flush_work(&vblk->config_work);
+
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ blk_stop_queue(vblk->disk->queue);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ blk_sync_queue(vblk->disk->queue);
+
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int virtblk_restore(struct virtio_device *vdev)
+{
+ struct virtio_blk *vblk = vdev->priv;
+ int ret;
+
+ vblk->config_enable = true;
+ ret = init_vq(vdev->priv);
+ if (!ret) {
+ spin_lock_irq(vblk->disk->queue->queue_lock);
+ blk_start_queue(vblk->disk->queue);
+ spin_unlock_irq(vblk->disk->queue->queue_lock);
+ }
+ return ret;
+}
+#endif
+
static const struct virtio_device_id id_table[] = {
{ VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -584,6 +653,10 @@ static struct virtio_driver __refdata virtio_blk = {
.probe = virtblk_probe,
.remove = __devexit_p(virtblk_remove),
.config_changed = virtblk_config_changed,
+#ifdef CONFIG_PM
+ .freeze = virtblk_freeze,
+ .restore = virtblk_restore,
+#endif
};
static int __init init(void)
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
index 4abd2bc..51a9727 100644
--- a/drivers/block/xd.c
+++ b/drivers/block/xd.c
@@ -148,7 +148,7 @@ static volatile int xdc_busy;
static struct timer_list xd_watchdog_int;
static volatile u_char xd_error;
-static int nodma = XD_DONT_USE_DMA;
+static bool nodma = XD_DONT_USE_DMA;
static struct request_queue *xd_queue;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 15ec4db..0088bf6 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -39,9 +39,6 @@
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/freezer.h>
-#include <linux/loop.h>
-#include <linux/falloc.h>
-#include <linux/fs.h>
#include <xen/events.h>
#include <xen/page.h>
@@ -362,7 +359,7 @@ static int xen_blkbk_map(struct blkif_request *req,
{
struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
int i;
- int nseg = req->nr_segments;
+ int nseg = req->u.rw.nr_segments;
int ret = 0;
/*
@@ -416,30 +413,25 @@ static int xen_blkbk_map(struct blkif_request *req,
return ret;
}
-static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
+static int dispatch_discard_io(struct xen_blkif *blkif,
+ struct blkif_request *req)
{
int err = 0;
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
- if (blkif->blk_backend_type == BLKIF_BACKEND_PHY)
- /* just forward the discard request */
+ blkif->st_ds_req++;
+
+ xen_blkif_get(blkif);
+ if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
+ blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
+ unsigned long secure = (blkif->vbd.discard_secure &&
+ (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
+ BLKDEV_DISCARD_SECURE : 0;
err = blkdev_issue_discard(bdev,
req->u.discard.sector_number,
req->u.discard.nr_sectors,
- GFP_KERNEL, 0);
- else if (blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
- /* punch a hole in the backing file */
- struct loop_device *lo = bdev->bd_disk->private_data;
- struct file *file = lo->lo_backing_file;
-
- if (file->f_op->fallocate)
- err = file->f_op->fallocate(file,
- FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE,
- req->u.discard.sector_number << 9,
- req->u.discard.nr_sectors << 9);
- else
- err = -EOPNOTSUPP;
+ GFP_KERNEL, secure);
} else
err = -EOPNOTSUPP;
@@ -449,7 +441,9 @@ static void xen_blk_discard(struct xen_blkif *blkif, struct blkif_request *req)
} else if (err)
status = BLKIF_RSP_ERROR;
- make_response(blkif, req->id, req->operation, status);
+ make_response(blkif, req->u.discard.id, req->operation, status);
+ xen_blkif_put(blkif);
+ return err;
}
static void xen_blk_drain_io(struct xen_blkif *blkif)
@@ -573,8 +567,11 @@ __do_block_io_op(struct xen_blkif *blkif)
/* Apply all sanity checks to /private copy/ of request. */
barrier();
-
- if (dispatch_rw_block_io(blkif, &req, pending_req))
+ if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
+ free_req(pending_req);
+ if (dispatch_discard_io(blkif, &req))
+ break;
+ } else if (dispatch_rw_block_io(blkif, &req, pending_req))
break;
/* Yield point for this unbounded loop. */
@@ -633,10 +630,6 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
blkif->st_f_req++;
operation = WRITE_FLUSH;
break;
- case BLKIF_OP_DISCARD:
- blkif->st_ds_req++;
- operation = REQ_DISCARD;
- break;
default:
operation = 0; /* make gcc happy */
goto fail_response;
@@ -644,9 +637,9 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
}
/* Check that the number of segments is sane. */
- nseg = req->nr_segments;
- if (unlikely(nseg == 0 && operation != WRITE_FLUSH &&
- operation != REQ_DISCARD) ||
+ nseg = req->u.rw.nr_segments;
+
+ if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
nseg);
@@ -654,12 +647,12 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
goto fail_response;
}
- preq.dev = req->handle;
+ preq.dev = req->u.rw.handle;
preq.sector_number = req->u.rw.sector_number;
preq.nr_sects = 0;
pending_req->blkif = blkif;
- pending_req->id = req->id;
+ pending_req->id = req->u.rw.id;
pending_req->operation = req->operation;
pending_req->status = BLKIF_RSP_OKAY;
pending_req->nr_pages = nseg;
@@ -707,7 +700,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* the hypercall to unmap the grants - that is all done in
* xen_blkbk_unmap.
*/
- if (operation != REQ_DISCARD && xen_blkbk_map(req, pending_req, seg))
+ if (xen_blkbk_map(req, pending_req, seg))
goto fail_flush;
/*
@@ -739,23 +732,16 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
/* This will be hit if the operation was a flush or discard. */
if (!bio) {
- BUG_ON(operation != WRITE_FLUSH && operation != REQ_DISCARD);
+ BUG_ON(operation != WRITE_FLUSH);
- if (operation == WRITE_FLUSH) {
- bio = bio_alloc(GFP_KERNEL, 0);
- if (unlikely(bio == NULL))
- goto fail_put_bio;
+ bio = bio_alloc(GFP_KERNEL, 0);
+ if (unlikely(bio == NULL))
+ goto fail_put_bio;
- biolist[nbio++] = bio;
- bio->bi_bdev = preq.bdev;
- bio->bi_private = pending_req;
- bio->bi_end_io = end_block_io_op;
- } else if (operation == REQ_DISCARD) {
- xen_blk_discard(blkif, req);
- xen_blkif_put(blkif);
- free_req(pending_req);
- return 0;
- }
+ biolist[nbio++] = bio;
+ bio->bi_bdev = preq.bdev;
+ bio->bi_private = pending_req;
+ bio->bi_end_io = end_block_io_op;
}
/*
@@ -784,7 +770,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
xen_blkbk_unmap(pending_req);
fail_response:
/* Haven't submitted any bio's yet. */
- make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
+ make_response(blkif, req->u.rw.id, req->operation, BLKIF_RSP_ERROR);
free_req(pending_req);
msleep(1); /* back off a bit */
return -EIO;
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index dfb1b3a..d0ee7ed 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -60,58 +60,66 @@ struct blkif_common_response {
char dummy;
};
-/* i386 protocol version */
-#pragma pack(push, 4)
-
struct blkif_x86_32_request_rw {
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-};
+} __attribute__((__packed__));
struct blkif_x86_32_request_discard {
+ uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
+ blkif_vdev_t _pad1; /* was "handle" for read/write requests */
+ uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- uint64_t nr_sectors;
-};
+ uint64_t nr_sectors;
+} __attribute__((__packed__));
struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */
- uint8_t nr_segments; /* number of segments */
- blkif_vdev_t handle; /* only for read/write requests */
- uint64_t id; /* private guest value, echoed in resp */
union {
struct blkif_x86_32_request_rw rw;
struct blkif_x86_32_request_discard discard;
} u;
-};
+} __attribute__((__packed__));
+
+/* i386 protocol version */
+#pragma pack(push, 4)
struct blkif_x86_32_response {
uint64_t id; /* copied from request */
uint8_t operation; /* copied from request */
int16_t status; /* BLKIF_RSP_??? */
};
#pragma pack(pop)
-
/* x86_64 protocol version */
struct blkif_x86_64_request_rw {
+ uint8_t nr_segments; /* number of segments */
+ blkif_vdev_t handle; /* only for read/write requests */
+ uint32_t _pad1; /* offsetof(blkif_reqest..,u.rw.id)==8 */
+ uint64_t id;
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
-};
+} __attribute__((__packed__));
struct blkif_x86_64_request_discard {
+ uint8_t flag; /* BLKIF_DISCARD_SECURE or zero */
+ blkif_vdev_t _pad1; /* was "handle" for read/write requests */
+ uint32_t _pad2; /* offsetof(blkif_..,u.discard.id)==8 */
+ uint64_t id;
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- uint64_t nr_sectors;
-};
+ uint64_t nr_sectors;
+} __attribute__((__packed__));
struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */
- uint8_t nr_segments; /* number of segments */
- blkif_vdev_t handle; /* only for read/write requests */
- uint64_t __attribute__((__aligned__(8))) id;
union {
struct blkif_x86_64_request_rw rw;
struct blkif_x86_64_request_discard discard;
} u;
-};
+} __attribute__((__packed__));
+
struct blkif_x86_64_response {
uint64_t __attribute__((__aligned__(8))) id;
uint8_t operation; /* copied from request */
@@ -156,6 +164,7 @@ struct xen_vbd {
/* Cached size parameter. */
sector_t size;
bool flush_support;
+ bool discard_secure;
};
struct backend_info;
@@ -237,22 +246,23 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;
- dst->nr_segments = src->nr_segments;
- dst->handle = src->handle;
- dst->id = src->id;
switch (src->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = src->u.rw.nr_segments;
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
barrier();
- if (n > dst->nr_segments)
- n = dst->nr_segments;
+ if (n > dst->u.rw.nr_segments)
+ n = dst->u.rw.nr_segments;
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
@@ -266,22 +276,23 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
{
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
dst->operation = src->operation;
- dst->nr_segments = src->nr_segments;
- dst->handle = src->handle;
- dst->id = src->id;
switch (src->operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
+ dst->u.rw.nr_segments = src->u.rw.nr_segments;
+ dst->u.rw.handle = src->u.rw.handle;
+ dst->u.rw.id = src->u.rw.id;
dst->u.rw.sector_number = src->u.rw.sector_number;
barrier();
- if (n > dst->nr_segments)
- n = dst->nr_segments;
+ if (n > dst->u.rw.nr_segments)
+ n = dst->u.rw.nr_segments;
for (i = 0; i < n; i++)
dst->u.rw.seg[i] = src->u.rw.seg[i];
break;
case BLKIF_OP_DISCARD:
+ dst->u.discard.flag = src->u.discard.flag;
dst->u.discard.sector_number = src->u.discard.sector_number;
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f759ad4..24a2fb5 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -338,6 +338,9 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
if (q && q->flush_flags)
vbd->flush_support = true;
+ if (q && blk_queue_secdiscard(q))
+ vbd->discard_secure = true;
+
DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
handle, blkif->domid);
return 0;
@@ -420,6 +423,15 @@ int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
state = 1;
blkif->blk_backend_type = BLKIF_BACKEND_PHY;
}
+ /* Optional. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-secure", "%d",
+ blkif->vbd.discard_secure);
+ if (err) {
+ xenbus_dev_fatal(dev, err,
+ "writting discard-secure");
+ goto kfree;
+ }
}
} else {
err = PTR_ERR(type);
@@ -613,7 +625,7 @@ static void frontend_changed(struct xenbus_device *dev,
case XenbusStateConnected:
/*
* Ensure we connect even when two watches fire in
- * close successsion and we miss the intermediate value
+ * close succession and we miss the intermediate value
* of frontend_state.
*/
if (dev->state == XenbusStateConnected)
@@ -787,17 +799,14 @@ static const struct xenbus_device_id xen_blkbk_ids[] = {
};
-static struct xenbus_driver xen_blkbk = {
- .name = "vbd",
- .owner = THIS_MODULE,
- .ids = xen_blkbk_ids,
+static DEFINE_XENBUS_DRIVER(xen_blkbk, ,
.probe = xen_blkbk_probe,
.remove = xen_blkbk_remove,
.otherend_changed = frontend_changed
-};
+);
int xen_blkif_xenbus_init(void)
{
- return xenbus_register_backend(&xen_blkbk);
+ return xenbus_register_backend(&xen_blkbk_driver);
}
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 7b2ec59..2f22874 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -98,7 +98,8 @@ struct blkfront_info
unsigned long shadow_free;
unsigned int feature_flush;
unsigned int flush_op;
- unsigned int feature_discard;
+ unsigned int feature_discard:1;
+ unsigned int feature_secdiscard:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
int is_ready;
@@ -135,15 +136,15 @@ static int get_id_from_freelist(struct blkfront_info *info)
{
unsigned long free = info->shadow_free;
BUG_ON(free >= BLK_RING_SIZE);
- info->shadow_free = info->shadow[free].req.id;
- info->shadow[free].req.id = 0x0fffffee; /* debug */
+ info->shadow_free = info->shadow[free].req.u.rw.id;
+ info->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
return free;
}
static void add_id_to_freelist(struct blkfront_info *info,
unsigned long id)
{
- info->shadow[id].req.id = info->shadow_free;
+ info->shadow[id].req.u.rw.id = info->shadow_free;
info->shadow[id].request = NULL;
info->shadow_free = id;
}
@@ -156,7 +157,7 @@ static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
if (end > nr_minors) {
unsigned long *bitmap, *old;
- bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap),
+ bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
GFP_KERNEL);
if (bitmap == NULL)
return -ENOMEM;
@@ -287,9 +288,9 @@ static int blkif_queue_request(struct request *req)
id = get_id_from_freelist(info);
info->shadow[id].request = req;
- ring_req->id = id;
+ ring_req->u.rw.id = id;
ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
- ring_req->handle = info->handle;
+ ring_req->u.rw.handle = info->handle;
ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ;
@@ -305,16 +306,21 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = info->flush_op;
}
- if (unlikely(req->cmd_flags & REQ_DISCARD)) {
+ if (unlikely(req->cmd_flags & (REQ_DISCARD | REQ_SECURE))) {
/* id, sector_number and handle are set above. */
ring_req->operation = BLKIF_OP_DISCARD;
- ring_req->nr_segments = 0;
ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
+ if ((req->cmd_flags & REQ_SECURE) && info->feature_secdiscard)
+ ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
+ else
+ ring_req->u.discard.flag = 0;
} else {
- ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
- BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
+ ring_req->u.rw.nr_segments = blk_rq_map_sg(req->q, req,
+ info->sg);
+ BUG_ON(ring_req->u.rw.nr_segments >
+ BLKIF_MAX_SEGMENTS_PER_REQUEST);
- for_each_sg(info->sg, sg, ring_req->nr_segments, i) {
+ for_each_sg(info->sg, sg, ring_req->u.rw.nr_segments, i) {
buffer_mfn = pfn_to_mfn(page_to_pfn(sg_page(sg)));
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
@@ -424,6 +430,8 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
blk_queue_max_discard_sectors(rq, get_capacity(gd));
rq->limits.discard_granularity = info->discard_granularity;
rq->limits.discard_alignment = info->discard_alignment;
+ if (info->feature_secdiscard)
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, rq);
}
/* Hard sector size and max sectors impersonate the equiv. hardware. */
@@ -705,7 +713,9 @@ static void blkif_free(struct blkfront_info *info, int suspend)
static void blkif_completion(struct blk_shadow *s)
{
int i;
- for (i = 0; i < s->req.nr_segments; i++)
+ /* Do not let BLKIF_OP_DISCARD as nr_segment is in the same place
+ * flag. */
+ for (i = 0; i < s->req.u.rw.nr_segments; i++)
gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL);
}
@@ -736,7 +746,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
id = bret->id;
req = info->shadow[id].request;
- blkif_completion(&info->shadow[id]);
+ if (bret->operation != BLKIF_OP_DISCARD)
+ blkif_completion(&info->shadow[id]);
add_id_to_freelist(info, id);
@@ -749,7 +760,9 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
info->gd->disk_name);
error = -EOPNOTSUPP;
info->feature_discard = 0;
+ info->feature_secdiscard = 0;
queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
+ queue_flag_clear(QUEUE_FLAG_SECDISCARD, rq);
}
__blk_end_request_all(req, error);
break;
@@ -763,7 +776,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
error = -EOPNOTSUPP;
}
if (unlikely(bret->status == BLKIF_RSP_ERROR &&
- info->shadow[id].req.nr_segments == 0)) {
+ info->shadow[id].req.u.rw.nr_segments == 0)) {
printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n",
info->flush_op == BLKIF_OP_WRITE_BARRIER ?
"barrier" : "flush disk cache",
@@ -984,8 +997,8 @@ static int blkfront_probe(struct xenbus_device *dev,
INIT_WORK(&info->work, blkif_restart_queue);
for (i = 0; i < BLK_RING_SIZE; i++)
- info->shadow[i].req.id = i+1;
- info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+ info->shadow[i].req.u.rw.id = i+1;
+ info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
/* Front end dir is a number, which is used as the id. */
info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
@@ -1019,9 +1032,9 @@ static int blkif_recover(struct blkfront_info *info)
/* Stage 2: Set up free list. */
memset(&info->shadow, 0, sizeof(info->shadow));
for (i = 0; i < BLK_RING_SIZE; i++)
- info->shadow[i].req.id = i+1;
+ info->shadow[i].req.u.rw.id = i+1;
info->shadow_free = info->ring.req_prod_pvt;
- info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
+ info->shadow[BLK_RING_SIZE-1].req.u.rw.id = 0x0fffffff;
/* Stage 3: Find pending requests and requeue them. */
for (i = 0; i < BLK_RING_SIZE; i++) {
@@ -1034,17 +1047,19 @@ static int blkif_recover(struct blkfront_info *info)
*req = copy[i].req;
/* We get a new request id, and must reset the shadow state. */
- req->id = get_id_from_freelist(info);
- memcpy(&info->shadow[req->id], &copy[i], sizeof(copy[i]));
+ req->u.rw.id = get_id_from_freelist(info);
+ memcpy(&info->shadow[req->u.rw.id], &copy[i], sizeof(copy[i]));
+ if (req->operation != BLKIF_OP_DISCARD) {
/* Rewrite any grant references invalidated by susp/resume. */
- for (j = 0; j < req->nr_segments; j++)
- gnttab_grant_foreign_access_ref(
- req->u.rw.seg[j].gref,
- info->xbdev->otherend_id,
- pfn_to_mfn(info->shadow[req->id].frame[j]),
- rq_data_dir(info->shadow[req->id].request));
- info->shadow[req->id].req = *req;
+ for (j = 0; j < req->u.rw.nr_segments; j++)
+ gnttab_grant_foreign_access_ref(
+ req->u.rw.seg[j].gref,
+ info->xbdev->otherend_id,
+ pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
+ rq_data_dir(info->shadow[req->u.rw.id].request));
+ }
+ info->shadow[req->u.rw.id].req = *req;
info->ring.req_prod_pvt++;
}
@@ -1135,11 +1150,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
char *type;
unsigned int discard_granularity;
unsigned int discard_alignment;
+ unsigned int discard_secure;
type = xenbus_read(XBT_NIL, info->xbdev->otherend, "type", NULL);
if (IS_ERR(type))
return;
+ info->feature_secdiscard = 0;
if (strncmp(type, "phy", 3) == 0) {
err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
"discard-granularity", "%u", &discard_granularity,
@@ -1150,6 +1167,12 @@ static void blkfront_setup_discard(struct blkfront_info *info)
info->discard_granularity = discard_granularity;
info->discard_alignment = discard_alignment;
}
+ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
+ "discard-secure", "%d", &discard_secure,
+ NULL);
+ if (!err)
+ info->feature_secdiscard = discard_secure;
+
} else if (strncmp(type, "file", 4) == 0)
info->feature_discard = 1;
@@ -1437,16 +1460,13 @@ static const struct xenbus_device_id blkfront_ids[] = {
{ "" }
};
-static struct xenbus_driver blkfront = {
- .name = "vbd",
- .owner = THIS_MODULE,
- .ids = blkfront_ids,
+static DEFINE_XENBUS_DRIVER(blkfront, ,
.probe = blkfront_probe,
.remove = blkfront_remove,
.resume = blkfront_resume,
.otherend_changed = blkback_changed,
.is_ready = blkfront_is_ready,
-};
+);
static int __init xlblk_init(void)
{
@@ -1461,7 +1481,7 @@ static int __init xlblk_init(void)
return -ENODEV;
}
- ret = xenbus_register_frontend(&blkfront);
+ ret = xenbus_register_frontend(&blkfront_driver);
if (ret) {
unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
return ret;
@@ -1474,7 +1494,7 @@ module_init(xlblk_init);
static void __exit xlblk_exit(void)
{
- return xenbus_unregister_driver(&blkfront);
+ return xenbus_unregister_driver(&blkfront_driver);
}
module_exit(xlblk_exit);
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index fb1975d..1a17e33 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -456,7 +456,7 @@ static inline void ace_fsm_yieldirq(struct ace_device *ace)
{
dev_dbg(ace->dev, "ace_fsm_yieldirq()\n");
- if (ace->irq == NO_IRQ)
+ if (!ace->irq)
/* No IRQ assigned, so need to poll */
tasklet_schedule(&ace->fsm_tasklet);
ace->fsm_continue_flag = 0;
@@ -1034,12 +1034,12 @@ static int __devinit ace_setup(struct ace_device *ace)
ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ);
/* Now we can hook up the irq handler */
- if (ace->irq != NO_IRQ) {
+ if (ace->irq) {
rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace);
if (rc) {
/* Failure - fall back to polled mode */
dev_err(ace->dev, "request_irq failed\n");
- ace->irq = NO_IRQ;
+ ace->irq = 0;
}
}
@@ -1086,7 +1086,7 @@ static void __devexit ace_teardown(struct ace_device *ace)
tasklet_kill(&ace->fsm_tasklet);
- if (ace->irq != NO_IRQ)
+ if (ace->irq)
free_irq(ace->irq, ace);
iounmap(ace->baseaddr);
@@ -1156,7 +1156,7 @@ static int __devinit ace_probe(struct platform_device *dev)
resource_size_t physaddr = 0;
int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */
u32 id = dev->id;
- int irq = NO_IRQ;
+ int irq = 0;
int i;
dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 106beb1..07f14d1 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -30,6 +30,7 @@
#include <net/bluetooth/bluetooth.h>
#define VERSION "1.0"
+#define ATH3K_FIRMWARE "ath3k-1.fw"
#define ATH3K_DNLOAD 0x01
#define ATH3K_GETSTATE 0x05
@@ -400,9 +401,15 @@ static int ath3k_probe(struct usb_interface *intf,
return 0;
}
- if (request_firmware(&firmware, "ath3k-1.fw", &udev->dev) < 0) {
- BT_ERR("Error loading firmware");
- return -EIO;
+ ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev);
+ if (ret < 0) {
+ if (ret == -ENOENT)
+ BT_ERR("Firmware file \"%s\" not found",
+ ATH3K_FIRMWARE);
+ else
+ BT_ERR("Firmware file \"%s\" request failed (err=%d)",
+ ATH3K_FIRMWARE, ret);
+ return ret;
}
ret = ath3k_load_firmware(udev, firmware);
@@ -423,22 +430,10 @@ static struct usb_driver ath3k_driver = {
.id_table = ath3k_table,
};
-static int __init ath3k_init(void)
-{
- BT_INFO("Atheros AR30xx firmware driver ver %s", VERSION);
- return usb_register(&ath3k_driver);
-}
-
-static void __exit ath3k_exit(void)
-{
- usb_deregister(&ath3k_driver);
-}
-
-module_init(ath3k_init);
-module_exit(ath3k_exit);
+module_usb_driver(ath3k_driver);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Atheros AR30xx firmware driver");
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL");
-MODULE_FIRMWARE("ath3k-1.fw");
+MODULE_FIRMWARE(ATH3K_FIRMWARE);
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index 54952ab..1e742a5 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -281,26 +281,7 @@ static struct usb_driver bcm203x_driver = {
.id_table = bcm203x_table,
};
-static int __init bcm203x_init(void)
-{
- int err;
-
- BT_INFO("Broadcom Blutonium firmware driver ver %s", VERSION);
-
- err = usb_register(&bcm203x_driver);
- if (err < 0)
- BT_ERR("Failed to register USB driver");
-
- return err;
-}
-
-static void __exit bcm203x_exit(void)
-{
- usb_deregister(&bcm203x_driver);
-}
-
-module_init(bcm203x_init);
-module_exit(bcm203x_exit);
+module_usb_driver(bcm203x_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION);
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 61b5914..a323bae 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -751,9 +751,7 @@ static void bfusb_disconnect(struct usb_interface *intf)
bfusb_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
@@ -764,26 +762,7 @@ static struct usb_driver bfusb_driver = {
.id_table = bfusb_table,
};
-static int __init bfusb_init(void)
-{
- int err;
-
- BT_INFO("BlueFRITZ! USB driver ver %s", VERSION);
-
- err = usb_register(&bfusb_driver);
- if (err < 0)
- BT_ERR("Failed to register BlueFRITZ! USB driver");
-
- return err;
-}
-
-static void __exit bfusb_exit(void)
-{
- usb_deregister(&bfusb_driver);
-}
-
-module_init(bfusb_init);
-module_exit(bfusb_exit);
+module_usb_driver(bfusb_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index aed1904..c6a0c61 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -844,9 +844,7 @@ static int bluecard_close(bluecard_info_t *info)
/* Turn FPGA off */
outb(0x80, iobase + 0x30);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index 751b338..6283160 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -521,20 +521,7 @@ static struct usb_driver bpa10x_driver = {
.id_table = bpa10x_table,
};
-static int __init bpa10x_init(void)
-{
- BT_INFO("Digianswer Bluetooth USB driver ver %s", VERSION);
-
- return usb_register(&bpa10x_driver);
-}
-
-static void __exit bpa10x_exit(void)
-{
- usb_deregister(&bpa10x_driver);
-}
-
-module_init(bpa10x_init);
-module_exit(bpa10x_exit);
+module_usb_driver(bpa10x_driver);
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION);
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 4fc0194..0c97e5d 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -636,9 +636,7 @@ static int bt3c_close(bt3c_info_t *info)
bt3c_hci_close(hdev);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index a88a78c..6c3defa 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -475,8 +475,6 @@ static int btmrvl_service_main_thread(void *data)
init_waitqueue_entry(&wait, current);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
add_wait_queue(&thread->wait_q, &wait);
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 526b618..200b3a2 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -565,9 +565,7 @@ static int btuart_close(btuart_info_t *info)
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index eabc437..789c9b5 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -37,13 +37,13 @@
#define VERSION "0.6"
-static int ignore_dga;
-static int ignore_csr;
-static int ignore_sniffer;
-static int disable_scofix;
-static int force_scofix;
+static bool ignore_dga;
+static bool ignore_csr;
+static bool ignore_sniffer;
+static bool disable_scofix;
+static bool force_scofix;
-static int reset = 1;
+static bool reset = 1;
static struct usb_driver btusb_driver;
@@ -101,6 +101,8 @@ static struct usb_device_id btusb_table[] = {
{ USB_DEVICE(0x0c10, 0x0000) },
/* Broadcom BCM20702A0 */
+ { USB_DEVICE(0x0a5c, 0x21e3) },
+ { USB_DEVICE(0x0a5c, 0x21f3) },
{ USB_DEVICE(0x413c, 0x8197) },
{ } /* Terminating entry */
@@ -315,7 +317,8 @@ static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -400,7 +403,8 @@ static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -506,15 +510,10 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress);
- urb->dev = data->udev;
- urb->pipe = pipe;
- urb->context = hdev;
- urb->complete = btusb_isoc_complete;
- urb->interval = data->isoc_rx_ep->bInterval;
+ usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete,
+ hdev, data->isoc_rx_ep->bInterval);
urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP;
- urb->transfer_buffer = buf;
- urb->transfer_buffer_length = size;
__fill_isoc_descriptor(urb, size,
le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize));
@@ -523,7 +522,8 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
err = usb_submit_urb(urb, mem_flags);
if (err < 0) {
- BT_ERR("%s urb %p submission failed (%d)",
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
hdev->name, urb, -err);
usb_unanchor_urb(urb);
}
@@ -770,7 +770,9 @@ skip_waking:
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err < 0) {
- BT_ERR("%s urb %p submission failed", hdev->name, urb);
+ if (err != -EPERM && err != -ENODEV)
+ BT_ERR("%s urb %p submission failed (%d)",
+ hdev->name, urb, -err);
kfree(urb->setup_packet);
usb_unanchor_urb(urb);
} else {
@@ -1222,20 +1224,7 @@ static struct usb_driver btusb_driver = {
.supports_autosuspend = 1,
};
-static int __init btusb_init(void)
-{
- BT_INFO("Generic Bluetooth USB driver ver %s", VERSION);
-
- return usb_register(&btusb_driver);
-}
-
-static void __exit btusb_exit(void)
-{
- usb_deregister(&btusb_driver);
-}
-
-module_init(btusb_init);
-module_exit(btusb_exit);
+module_usb_driver(btusb_driver);
module_param(ignore_dga, bool, 0644);
MODULE_PARM_DESC(ignore_dga, "Ignore devices with id 08fd:0001");
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 5e4c2de..969bb22 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -551,9 +551,7 @@ static int dtl1_close(dtl1_info_t *info)
spin_unlock_irqrestore(&(info->lock), flags);
- if (hci_unregister_dev(hdev) < 0)
- BT_ERR("Can't unregister HCI device %s", hdev->name);
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
return 0;
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 9c5b2dc..a767d4de 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -49,8 +49,8 @@
#define VERSION "0.3"
-static int txcrc = 1;
-static int hciextn = 1;
+static bool txcrc = 1;
+static bool hciextn = 1;
#define BCSP_TXWINSIZE 4
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index 48ad2a7..0711448 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -48,7 +48,7 @@
#define VERSION "2.2"
-static int reset = 0;
+static bool reset = 0;
static struct hci_uart_proto *hup[HCI_UART_MAX_PROTO];
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 67c180c..2ed6ab1 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -41,6 +41,8 @@
#define VERSION "1.3"
+static bool amp;
+
struct vhci_data {
struct hci_dev *hdev;
@@ -239,6 +241,9 @@ static int vhci_open(struct inode *inode, struct file *file)
hdev->bus = HCI_VIRTUAL;
hdev->driver_data = data;
+ if (amp)
+ hdev->dev_type = HCI_AMP;
+
hdev->open = vhci_open_dev;
hdev->close = vhci_close_dev;
hdev->flush = vhci_flush;
@@ -264,10 +269,7 @@ static int vhci_release(struct inode *inode, struct file *file)
struct vhci_data *data = file->private_data;
struct hci_dev *hdev = data->hdev;
- if (hci_unregister_dev(hdev) < 0) {
- BT_ERR("Can't unregister HCI device %s", hdev->name);
- }
-
+ hci_unregister_dev(hdev);
hci_free_dev(hdev);
file->private_data = NULL;
@@ -306,6 +308,9 @@ static void __exit vhci_exit(void)
module_init(vhci_init);
module_exit(vhci_exit);
+module_param(amp, bool, 0644);
+MODULE_PARM_DESC(amp, "Create AMP controller device");
+
MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION);
MODULE_VERSION(VERSION);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index f997c27..d620b44 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -267,7 +267,6 @@
#include <linux/module.h>
#include <linux/fs.h>
-#include <linux/buffer_head.h>
#include <linux/major.h>
#include <linux/types.h>
#include <linux/errno.h>
@@ -286,17 +285,15 @@
#include <asm/uaccess.h>
/* used to tell the module to turn on full debugging messages */
-static int debug;
-/* used to keep tray locked at all times */
-static int keeplocked;
+static bool debug;
/* default compatibility mode */
-static int autoclose=1;
-static int autoeject;
-static int lockdoor = 1;
+static bool autoclose=1;
+static bool autoeject;
+static bool lockdoor = 1;
/* will we ever get to use this... sigh. */
-static int check_media_type;
+static bool check_media_type;
/* automatically restart mrw format */
-static int mrw_format_restart = 1;
+static bool mrw_format_restart = 1;
module_param(debug, bool, 0);
module_param(autoclose, bool, 0);
module_param(autoeject, bool, 0);
@@ -1205,7 +1202,7 @@ void cdrom_release(struct cdrom_device_info *cdi, fmode_t mode)
cdinfo(CD_CLOSE, "Use count for \"/dev/%s\" now zero\n", cdi->name);
cdrom_dvd_rw_close_write(cdi);
- if ((cdo->capability & CDC_LOCK) && !keeplocked) {
+ if ((cdo->capability & CDC_LOCK) && !cdi->keeplocked) {
cdinfo(CD_CLOSE, "Unlocking door!\n");
cdo->lock_door(cdi, 0);
}
@@ -1372,7 +1369,7 @@ static int cdrom_select_disc(struct cdrom_device_info *cdi, int slot)
curslot = info->hdr.curslot;
kfree(info);
- if (cdi->use_count > 1 || keeplocked) {
+ if (cdi->use_count > 1 || cdi->keeplocked) {
if (slot == CDSL_CURRENT) {
return curslot;
} else {
@@ -2120,11 +2117,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
if (!nr)
return -ENOMEM;
- if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) {
- ret = -EFAULT;
- goto out;
- }
-
cgc.data_direction = CGC_DATA_READ;
while (nframes > 0) {
if (nr > nframes)
@@ -2133,7 +2125,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW);
if (ret)
break;
- if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
+ if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) {
ret = -EFAULT;
break;
}
@@ -2141,7 +2133,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
nframes -= nr;
lba += nr;
}
-out:
kfree(cgc.buffer);
return ret;
}
@@ -2296,7 +2287,7 @@ static int cdrom_ioctl_eject(struct cdrom_device_info *cdi)
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
- if (cdi->use_count != 1 || keeplocked)
+ if (cdi->use_count != 1 || cdi->keeplocked)
return -EBUSY;
if (CDROM_CAN(CDC_LOCK)) {
int ret = cdi->ops->lock_door(cdi, 0);
@@ -2323,7 +2314,7 @@ static int cdrom_ioctl_eject_sw(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_OPEN_TRAY))
return -ENOSYS;
- if (keeplocked)
+ if (cdi->keeplocked)
return -EBUSY;
cdi->options &= ~(CDO_AUTO_CLOSE | CDO_AUTO_EJECT);
@@ -2454,7 +2445,7 @@ static int cdrom_ioctl_lock_door(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_LOCK))
return -EDRIVE_CANT_DO_THIS;
- keeplocked = arg ? 1 : 0;
+ cdi->keeplocked = arg ? 1 : 0;
/*
* Don't unlock the door on multiple opens by default, but allow
@@ -2747,12 +2738,11 @@ int cdrom_ioctl(struct cdrom_device_info *cdi, struct block_device *bdev,
{
void __user *argp = (void __user *)arg;
int ret;
- struct gendisk *disk = bdev->bd_disk;
/*
* Try the generic SCSI command ioctl's first.
*/
- ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp);
+ ret = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
if (ret != -ENOTTY)
return ret;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 780498d..444f8b6 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -33,7 +33,7 @@
#define ULI_X86_64_ENU_SCR_REG 0x54
static struct resource *aperture_resource;
-static int __initdata agp_try_unsupported = 1;
+static bool __initdata agp_try_unsupported = 1;
static int agp_bridges_found;
static void amd64_tlbflush(struct agp_memory *temp)
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 4b71647..317c28c 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -194,10 +194,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
err_out:
if (bridge->driver->needs_scratch_page) {
- void *va = page_address(bridge->scratch_page_page);
+ struct page *page = bridge->scratch_page_page;
- bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
- bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
}
if (got_gatt)
bridge->driver->free_gatt_table(bridge);
@@ -221,10 +221,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
if (bridge->driver->agp_destroy_page &&
bridge->driver->needs_scratch_page) {
- void *va = page_address(bridge->scratch_page_page);
+ struct page *page = bridge->scratch_page_page;
- bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
- bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+ bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
}
}
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index b072648..17e05d1 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -514,12 +514,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
switch (*bridge_agpstat & 7) {
case 4:
*bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X);
- printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate"
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. "
"Fixing up support for x2 & x1\n");
break;
case 2:
*bridge_agpstat |= AGPSTAT2_1X;
- printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate"
+ printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. "
"Fixing up support for x1\n");
break;
default:
@@ -693,7 +693,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_
*bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
*vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
} else {
- printk(KERN_INFO PFX "Fell back to AGPx4 mode because");
+ printk(KERN_INFO PFX "Fell back to AGPx4 mode because ");
if (!(*bridge_agpstat & AGPSTAT3_8X)) {
printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n",
*bridge_agpstat, origbridge);
@@ -956,7 +956,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge)
bridge->driver->cache_flush();
#ifdef CONFIG_X86
if (set_memory_uc((unsigned long)table, 1 << page_order))
- printk(KERN_WARNING "Could not set GATT table memory to UC!");
+ printk(KERN_WARNING "Could not set GATT table memory to UC!\n");
bridge->gatt_table = (void *)table;
#else
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index 29aacd8..08704ae 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -17,7 +17,7 @@
#define PCI_DEVICE_ID_SI_662 0x0662
#define PCI_DEVICE_ID_SI_671 0x0671
-static int __devinitdata agp_sis_force_delay = 0;
+static bool __devinitdata agp_sis_force_delay = 0;
static int __devinitdata agp_sis_agp_spec = -1;
static int sis_fetch_size(void)
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index 241df2e..f518b99 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -141,17 +141,7 @@ static struct platform_driver atmel_trng_driver = {
},
};
-static int __init atmel_trng_init(void)
-{
- return platform_driver_register(&atmel_trng_driver);
-}
-module_init(atmel_trng_init);
-
-static void __exit atmel_trng_exit(void)
-{
- platform_driver_unregister(&atmel_trng_driver);
-}
-module_exit(atmel_trng_exit);
+module_platform_driver(atmel_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
diff --git a/drivers/char/hw_random/n2-drv.c b/drivers/char/hw_random/n2-drv.c
index c3de70d..ebd48f0 100644
--- a/drivers/char/hw_random/n2-drv.c
+++ b/drivers/char/hw_random/n2-drv.c
@@ -770,15 +770,4 @@ static struct platform_driver n2rng_driver = {
.remove = __devexit_p(n2rng_remove),
};
-static int __init n2rng_init(void)
-{
- return platform_driver_register(&n2rng_driver);
-}
-
-static void __exit n2rng_exit(void)
-{
- platform_driver_unregister(&n2rng_driver);
-}
-
-module_init(n2rng_init);
-module_exit(n2rng_exit);
+module_platform_driver(n2rng_driver);
diff --git a/drivers/char/hw_random/nomadik-rng.c b/drivers/char/hw_random/nomadik-rng.c
index 52e08ca..3d3c1e6 100644
--- a/drivers/char/hw_random/nomadik-rng.c
+++ b/drivers/char/hw_random/nomadik-rng.c
@@ -95,6 +95,8 @@ static struct amba_id nmk_rng_ids[] = {
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, nmk_rng_ids);
+
static struct amba_driver nmk_rng_driver = {
.drv = {
.owner = THIS_MODULE,
diff --git a/drivers/char/hw_random/octeon-rng.c b/drivers/char/hw_random/octeon-rng.c
index 9cd0fec..0943edc 100644
--- a/drivers/char/hw_random/octeon-rng.c
+++ b/drivers/char/hw_random/octeon-rng.c
@@ -131,18 +131,7 @@ static struct platform_driver octeon_rng_driver = {
.remove = __exit_p(octeon_rng_remove),
};
-static int __init octeon_rng_mod_init(void)
-{
- return platform_driver_register(&octeon_rng_driver);
-}
-
-static void __exit octeon_rng_mod_exit(void)
-{
- platform_driver_unregister(&octeon_rng_driver);
-}
-
-module_init(octeon_rng_mod_init);
-module_exit(octeon_rng_mod_exit);
+module_platform_driver(octeon_rng_driver);
MODULE_AUTHOR("David Daney");
MODULE_LICENSE("GPL");
diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c
index 1d50481..3a63267 100644
--- a/drivers/char/hw_random/pasemi-rng.c
+++ b/drivers/char/hw_random/pasemi-rng.c
@@ -148,17 +148,7 @@ static struct platform_driver rng_driver = {
.remove = rng_remove,
};
-static int __init rng_init(void)
-{
- return platform_driver_register(&rng_driver);
-}
-module_init(rng_init);
-
-static void __exit rng_exit(void)
-{
- platform_driver_unregister(&rng_driver);
-}
-module_exit(rng_exit);
+module_platform_driver(rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/char/hw_random/picoxcell-rng.c b/drivers/char/hw_random/picoxcell-rng.c
index 990d55a..97bd891 100644
--- a/drivers/char/hw_random/picoxcell-rng.c
+++ b/drivers/char/hw_random/picoxcell-rng.c
@@ -191,17 +191,7 @@ static struct platform_driver picoxcell_trng_driver = {
},
};
-static int __init picoxcell_trng_init(void)
-{
- return platform_driver_register(&picoxcell_trng_driver);
-}
-module_init(picoxcell_trng_init);
-
-static void __exit picoxcell_trng_exit(void)
-{
- platform_driver_unregister(&picoxcell_trng_driver);
-}
-module_exit(picoxcell_trng_exit);
+module_platform_driver(picoxcell_trng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/char/hw_random/ppc4xx-rng.c b/drivers/char/hw_random/ppc4xx-rng.c
index b8afa6a..c51762c 100644
--- a/drivers/char/hw_random/ppc4xx-rng.c
+++ b/drivers/char/hw_random/ppc4xx-rng.c
@@ -139,17 +139,7 @@ static struct platform_driver ppc4xx_rng_driver = {
.remove = ppc4xx_rng_remove,
};
-static int __init ppc4xx_rng_init(void)
-{
- return platform_driver_register(&ppc4xx_rng_driver);
-}
-module_init(ppc4xx_rng_init);
-
-static void __exit ppc4xx_rng_exit(void)
-{
- platform_driver_unregister(&ppc4xx_rng_driver);
-}
-module_exit(ppc4xx_rng_exit);
+module_platform_driver(ppc4xx_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Josh Boyer <jwboyer@linux.vnet.ibm.com>");
diff --git a/drivers/char/hw_random/timeriomem-rng.c b/drivers/char/hw_random/timeriomem-rng.c
index a8428e6..f1a1618 100644
--- a/drivers/char/hw_random/timeriomem-rng.c
+++ b/drivers/char/hw_random/timeriomem-rng.c
@@ -149,18 +149,7 @@ static struct platform_driver timeriomem_rng_driver = {
.remove = __devexit_p(timeriomem_rng_remove),
};
-static int __init timeriomem_rng_init(void)
-{
- return platform_driver_register(&timeriomem_rng_driver);
-}
-
-static void __exit timeriomem_rng_exit(void)
-{
- platform_driver_unregister(&timeriomem_rng_driver);
-}
-
-module_init(timeriomem_rng_init);
-module_exit(timeriomem_rng_exit);
+module_platform_driver(timeriomem_rng_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index fd699cc..723725b 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -47,7 +47,7 @@ static void register_buffer(u8 *buf, size_t size)
sg_init_one(&sg, buf, size);
/* There should always be room for one buffer. */
- if (virtqueue_add_buf(vq, &sg, 0, 1, buf) < 0)
+ if (virtqueue_add_buf(vq, &sg, 0, 1, buf, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 6e40072..40cc0cf2 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -69,19 +69,19 @@ MODULE_AUTHOR("Massimo Dal Zotto (dz@debian.org)");
MODULE_DESCRIPTION("Driver for accessing SMM BIOS on Dell laptops");
MODULE_LICENSE("GPL");
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force loading without checking for supported models");
-static int ignore_dmi;
+static bool ignore_dmi;
module_param(ignore_dmi, bool, 0);
MODULE_PARM_DESC(ignore_dmi, "Continue probing hardware even if DMI data does not match");
-static int restricted;
+static bool restricted;
module_param(restricted, bool, 0);
MODULE_PARM_DESC(restricted, "Allow fan control if SYS_ADMIN capability set");
-static int power_status;
+static bool power_status;
module_param(power_status, bool, 0600);
MODULE_PARM_DESC(power_status, "Report power status in /proc/i8k");
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index 3ed20e8..cdd4c09f 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -560,7 +560,7 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time)
BT_CONTROL(BT_H_BUSY); /* set */
/*
- * Uncached, ordered writes should just proceeed serially but
+ * Uncached, ordered writes should just proceed serially but
* some BMCs don't clear B2H_ATN with one hit. Fast-path a
* workaround without too much penalty to the general case.
*/
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 9397ab4..50fcf9c 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -1227,7 +1227,7 @@ static int smi_num; /* Used to sequence the SMIs */
#define DEFAULT_REGSPACING 1
#define DEFAULT_REGSIZE 1
-static int si_trydefaults = 1;
+static bool si_trydefaults = 1;
static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30
static char si_type_str[MAX_SI_TYPE_STR];
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index 97c3edb..f434856 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -829,7 +829,7 @@ static struct console lpcons = {
static int parport_nr[LP_NO] = { [0 ... LP_NO-1] = LP_PARPORT_UNSPEC };
static char *parport[LP_NO];
-static int reset;
+static bool reset;
module_param_array(parport, charp, NULL, 0);
module_param(reset, bool, 0);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 1451790..d6e9d08 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -847,7 +847,7 @@ static const struct file_operations kmsg_fops = {
static const struct memdev {
const char *name;
- mode_t mode;
+ umode_t mode;
const struct file_operations *fops;
struct backing_dev_info *dev_info;
} devlist[] = {
@@ -901,7 +901,7 @@ static const struct file_operations memory_fops = {
.llseek = noop_llseek,
};
-static char *mem_devnode(struct device *dev, mode_t *mode)
+static char *mem_devnode(struct device *dev, umode_t *mode)
{
if (mode && devlist[MINOR(dev->devt)].mode)
*mode = devlist[MINOR(dev->devt)].mode;
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 778273c..522136d 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -258,7 +258,7 @@ int misc_deregister(struct miscdevice *misc)
EXPORT_SYMBOL(misc_register);
EXPORT_SYMBOL(misc_deregister);
-static char *misc_devnode(struct device *dev, mode_t *mode)
+static char *misc_devnode(struct device *dev, umode_t *mode)
{
struct miscdevice *c = dev_get_drvdata(dev);
diff --git a/drivers/char/nwflash.c b/drivers/char/nwflash.c
index a12f524..bf586ae 100644
--- a/drivers/char/nwflash.c
+++ b/drivers/char/nwflash.c
@@ -51,7 +51,7 @@ static int write_block(unsigned long p, const char __user *buf, int count);
#define KFLASH_ID 0x89A6 //Intel flash
#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg
-static int flashdebug; //if set - we will display progress msgs
+static bool flashdebug; //if set - we will display progress msgs
static int gbWriteEnable;
static int gbWriteBase64Enable;
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index 1578139..07f6a5a 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -439,7 +439,7 @@ static int mgslpc_device_count = 0;
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
-static int break_on_load=0;
+static bool break_on_load=0;
/*
* Driver major number, defaults to zero to get auto
diff --git a/drivers/char/ramoops.c b/drivers/char/ramoops.c
index 7c7f42a1f8..9fec323 100644
--- a/drivers/char/ramoops.c
+++ b/drivers/char/ramoops.c
@@ -83,8 +83,7 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
struct timeval timestamp;
if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC &&
- reason != KMSG_DUMP_KEXEC)
+ reason != KMSG_DUMP_PANIC)
return;
/* Only dump oopses if dump_oops is set */
@@ -126,8 +125,8 @@ static int __init ramoops_probe(struct platform_device *pdev)
goto fail3;
}
- rounddown_pow_of_two(pdata->mem_size);
- rounddown_pow_of_two(pdata->record_size);
+ pdata->mem_size = rounddown_pow_of_two(pdata->mem_size);
+ pdata->record_size = rounddown_pow_of_two(pdata->record_size);
/* Check for the minimum memory size */
if (pdata->mem_size < MIN_MEM_SIZE &&
@@ -148,14 +147,6 @@ static int __init ramoops_probe(struct platform_device *pdev)
cxt->phys_addr = pdata->mem_address;
cxt->record_size = pdata->record_size;
cxt->dump_oops = pdata->dump_oops;
- /*
- * Update the module parameter variables as well so they are visible
- * through /sys/module/ramoops/parameters/
- */
- mem_size = pdata->mem_size;
- mem_address = pdata->mem_address;
- record_size = pdata->record_size;
- dump_oops = pdata->dump_oops;
if (!request_mem_region(cxt->phys_addr, cxt->size, "ramoops")) {
pr_err("request mem region failed\n");
@@ -176,6 +167,15 @@ static int __init ramoops_probe(struct platform_device *pdev)
goto fail1;
}
+ /*
+ * Update the module parameter variables as well so they are visible
+ * through /sys/module/ramoops/parameters/
+ */
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ record_size = pdata->record_size;
+ dump_oops = pdata->dump_oops;
+
return 0;
fail1:
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6035ab8..54ca8b2 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -387,7 +387,7 @@ static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
#if 0
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
#define DEBUG_ENT(fmt, arg...) do { \
if (debug) \
@@ -624,8 +624,8 @@ static struct timer_rand_state input_timer_state;
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
struct {
- cycles_t cycles;
long jiffies;
+ unsigned cycles;
unsigned num;
} sample;
long delta, delta2, delta3;
@@ -637,7 +637,11 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
goto out;
sample.jiffies = jiffies;
- sample.cycles = get_cycles();
+
+ /* Use arch random value, fall back to cycles */
+ if (!arch_get_random_int(&sample.cycles))
+ sample.cycles = get_cycles();
+
sample.num = num;
mix_pool_bytes(&input_pool, &sample, sizeof(sample));
@@ -961,6 +965,7 @@ EXPORT_SYMBOL(get_random_bytes);
*/
static void init_std_data(struct entropy_store *r)
{
+ int i;
ktime_t now;
unsigned long flags;
@@ -970,6 +975,11 @@ static void init_std_data(struct entropy_store *r)
now = ktime_get_real();
mix_pool_bytes(r, &now, sizeof(now));
+ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
+ if (!arch_get_random_long(&flags))
+ break;
+ mix_pool_bytes(r, &flags, sizeof(flags));
+ }
mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
}
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index b6de2c0..54a3a6d 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -308,7 +308,7 @@ static const struct file_operations raw_ctl_fops = {
static struct cdev raw_cdev;
-static char *raw_devnode(struct device *dev, mode_t *mode)
+static char *raw_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "raw/%s", dev_name(dev));
}
diff --git a/drivers/char/tile-srom.c b/drivers/char/tile-srom.c
index cf3ee00..4dc0194 100644
--- a/drivers/char/tile-srom.c
+++ b/drivers/char/tile-srom.c
@@ -329,7 +329,7 @@ static struct device_attribute srom_dev_attrs[] = {
__ATTR_NULL
};
-static char *srom_devnode(struct device *dev, mode_t *mode)
+static char *srom_devnode(struct device *dev, umode_t *mode)
{
*mode = S_IRUGO | S_IWUSR;
return kasprintf(GFP_KERNEL, "srom/%s", dev_name(dev));
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig
index fa567f1..7fc75e4 100644
--- a/drivers/char/tpm/Kconfig
+++ b/drivers/char/tpm/Kconfig
@@ -27,6 +27,7 @@ if TCG_TPM
config TCG_TIS
tristate "TPM Interface Specification 1.2 Interface"
+ depends on X86
---help---
If you have a TPM security chip that is compliant with the
TCG TIS 1.2 TPM specification say Yes and it will be accessible
@@ -35,6 +36,7 @@ config TCG_TIS
config TCG_NSC
tristate "National Semiconductor TPM Interface"
+ depends on X86
---help---
If you have a TPM security chip from National Semiconductor
say Yes and it will be accessible from within Linux. To
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 361a1df..32362cf 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/freezer.h>
#include "tpm.h"
@@ -440,7 +441,6 @@ out:
}
#define TPM_DIGEST_SIZE 20
-#define TPM_ERROR_SIZE 10
#define TPM_RET_CODE_IDX 6
enum tpm_capabilities {
@@ -469,12 +469,14 @@ static ssize_t transmit_cmd(struct tpm_chip *chip, struct tpm_cmd_t *cmd,
len = tpm_transmit(chip,(u8 *) cmd, len);
if (len < 0)
return len;
- if (len == TPM_ERROR_SIZE) {
- err = be32_to_cpu(cmd->header.out.return_code);
- dev_dbg(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
- return err;
- }
- return 0;
+ else if (len < TPM_HEADER_SIZE)
+ return -EFAULT;
+
+ err = be32_to_cpu(cmd->header.out.return_code);
+ if (err != 0)
+ dev_err(chip->dev, "A TPM error (%d) occurred %s\n", err, desc);
+
+ return err;
}
#define TPM_INTERNAL_RESULT_SIZE 200
@@ -530,7 +532,7 @@ void tpm_gen_interrupt(struct tpm_chip *chip)
}
EXPORT_SYMBOL_GPL(tpm_gen_interrupt);
-void tpm_get_timeouts(struct tpm_chip *chip)
+int tpm_get_timeouts(struct tpm_chip *chip)
{
struct tpm_cmd_t tpm_cmd;
struct timeout_t *timeout_cap;
@@ -552,7 +554,7 @@ void tpm_get_timeouts(struct tpm_chip *chip)
if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
be32_to_cpu(tpm_cmd.header.out.length)
!= sizeof(tpm_cmd.header.out) + sizeof(u32) + 4 * sizeof(u32))
- return;
+ return -EINVAL;
timeout_cap = &tpm_cmd.params.getcap_out.cap.timeout;
/* Don't overwrite default if value is 0 */
@@ -583,12 +585,12 @@ duration:
rc = transmit_cmd(chip, &tpm_cmd, TPM_INTERNAL_RESULT_SIZE,
"attempting to determine the durations");
if (rc)
- return;
+ return rc;
if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 ||
be32_to_cpu(tpm_cmd.header.out.length)
!= sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
- return;
+ return -EINVAL;
duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
chip->vendor.duration[TPM_SHORT] =
@@ -610,20 +612,36 @@ duration:
chip->vendor.duration_adjusted = true;
dev_info(chip->dev, "Adjusting TPM timeout parameters.");
}
+ return 0;
}
EXPORT_SYMBOL_GPL(tpm_get_timeouts);
-void tpm_continue_selftest(struct tpm_chip *chip)
+#define TPM_ORD_CONTINUE_SELFTEST 83
+#define CONTINUE_SELFTEST_RESULT_SIZE 10
+
+static struct tpm_input_header continue_selftest_header = {
+ .tag = TPM_TAG_RQU_COMMAND,
+ .length = cpu_to_be32(10),
+ .ordinal = cpu_to_be32(TPM_ORD_CONTINUE_SELFTEST),
+};
+
+/**
+ * tpm_continue_selftest -- run TPM's selftest
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+static int tpm_continue_selftest(struct tpm_chip *chip)
{
- u8 data[] = {
- 0, 193, /* TPM_TAG_RQU_COMMAND */
- 0, 0, 0, 10, /* length */
- 0, 0, 0, 83, /* TPM_ORD_ContinueSelfTest */
- };
+ int rc;
+ struct tpm_cmd_t cmd;
- tpm_transmit(chip, data, sizeof(data));
+ cmd.header.in = continue_selftest_header;
+ rc = transmit_cmd(chip, &cmd, CONTINUE_SELFTEST_RESULT_SIZE,
+ "continue selftest");
+ return rc;
}
-EXPORT_SYMBOL_GPL(tpm_continue_selftest);
ssize_t tpm_show_enabled(struct device * dev, struct device_attribute * attr,
char *buf)
@@ -718,7 +736,7 @@ static struct tpm_input_header pcrread_header = {
.ordinal = TPM_ORDINAL_PCRREAD
};
-int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
+static int __tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf)
{
int rc;
struct tpm_cmd_t cmd;
@@ -798,6 +816,54 @@ int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash)
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+/**
+ * tpm_do_selftest - have the TPM continue its selftest and wait until it
+ * can receive further commands
+ * @chip: TPM chip to use
+ *
+ * Returns 0 on success, < 0 in case of fatal error or a value > 0 representing
+ * a TPM error code.
+ */
+int tpm_do_selftest(struct tpm_chip *chip)
+{
+ int rc;
+ u8 digest[TPM_DIGEST_SIZE];
+ unsigned int loops;
+ unsigned int delay_msec = 1000;
+ unsigned long duration;
+
+ duration = tpm_calc_ordinal_duration(chip,
+ TPM_ORD_CONTINUE_SELFTEST);
+
+ loops = jiffies_to_msecs(duration) / delay_msec;
+
+ rc = tpm_continue_selftest(chip);
+ /* This may fail if there was no TPM driver during a suspend/resume
+ * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
+ */
+ if (rc)
+ return rc;
+
+ do {
+ rc = __tpm_pcr_read(chip, 0, digest);
+ if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+ dev_info(chip->dev,
+ "TPM is disabled/deactivated (0x%X)\n", rc);
+ /* TPM is disabled and/or deactivated; driver can
+ * proceed and TPM does handle commands for
+ * suspend/resume correctly
+ */
+ return 0;
+ }
+ if (rc != TPM_WARN_DOING_SELFTEST)
+ return rc;
+ msleep(delay_msec);
+ } while (--loops > 0);
+
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_do_selftest);
+
int tpm_send(u32 chip_num, void *cmd, size_t buflen)
{
struct tpm_chip *chip;
@@ -1005,6 +1071,46 @@ ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
}
EXPORT_SYMBOL_GPL(tpm_store_cancel);
+int wait_for_tpm_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
+ wait_queue_head_t *queue)
+{
+ unsigned long stop;
+ long rc;
+ u8 status;
+
+ /* check current status */
+ status = chip->vendor.status(chip);
+ if ((status & mask) == mask)
+ return 0;
+
+ stop = jiffies + timeout;
+
+ if (chip->vendor.irq) {
+again:
+ timeout = stop - jiffies;
+ if ((long)timeout <= 0)
+ return -ETIME;
+ rc = wait_event_interruptible_timeout(*queue,
+ ((chip->vendor.status(chip)
+ & mask) == mask),
+ timeout);
+ if (rc > 0)
+ return 0;
+ if (rc == -ERESTARTSYS && freezing(current)) {
+ clear_thread_flag(TIF_SIGPENDING);
+ goto again;
+ }
+ } else {
+ do {
+ msleep(TPM_TIMEOUT);
+ status = chip->vendor.status(chip);
+ if ((status & mask) == mask)
+ return 0;
+ } while (time_before(jiffies, stop));
+ }
+ return -ETIME;
+}
+EXPORT_SYMBOL_GPL(wait_for_tpm_stat);
/*
* Device file system interface to the TPM
*
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 9c4163c..0105471 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -38,6 +38,11 @@ enum tpm_addr {
TPM_ADDR = 0x4E,
};
+#define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED 0x6
+#define TPM_ERR_DISABLED 0x7
+
+#define TPM_HEADER_SIZE 10
extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
char *);
extern ssize_t tpm_show_pcrs(struct device *, struct device_attribute *attr,
@@ -279,9 +284,9 @@ struct tpm_cmd_t {
ssize_t tpm_getcap(struct device *, __be32, cap_t *, const char *);
-extern void tpm_get_timeouts(struct tpm_chip *);
+extern int tpm_get_timeouts(struct tpm_chip *);
extern void tpm_gen_interrupt(struct tpm_chip *);
-extern void tpm_continue_selftest(struct tpm_chip *);
+extern int tpm_do_selftest(struct tpm_chip *);
extern unsigned long tpm_calc_ordinal_duration(struct tpm_chip *, u32);
extern struct tpm_chip* tpm_register_hardware(struct device *,
const struct tpm_vendor_specific *);
@@ -294,7 +299,8 @@ extern ssize_t tpm_read(struct file *, char __user *, size_t, loff_t *);
extern void tpm_remove_hardware(struct device *);
extern int tpm_pm_suspend(struct device *, pm_message_t);
extern int tpm_pm_resume(struct device *);
-
+extern int wait_for_tpm_stat(struct tpm_chip *, u8, unsigned long,
+ wait_queue_head_t *);
#ifdef CONFIG_ACPI
extern struct dentry ** tpm_bios_log_setup(char *);
extern void tpm_bios_log_teardown(struct dentry **);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 3f4051a..a174862 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -29,8 +29,6 @@
#include <linux/freezer.h>
#include "tpm.h"
-#define TPM_HEADER_SIZE 10
-
enum tis_access {
TPM_ACCESS_VALID = 0x80,
TPM_ACCESS_ACTIVE_LOCALITY = 0x20,
@@ -193,54 +191,14 @@ static int get_burstcount(struct tpm_chip *chip)
return -EBUSY;
}
-static int wait_for_stat(struct tpm_chip *chip, u8 mask, unsigned long timeout,
- wait_queue_head_t *queue)
-{
- unsigned long stop;
- long rc;
- u8 status;
-
- /* check current status */
- status = tpm_tis_status(chip);
- if ((status & mask) == mask)
- return 0;
-
- stop = jiffies + timeout;
-
- if (chip->vendor.irq) {
-again:
- timeout = stop - jiffies;
- if ((long)timeout <= 0)
- return -ETIME;
- rc = wait_event_interruptible_timeout(*queue,
- ((tpm_tis_status
- (chip) & mask) ==
- mask), timeout);
- if (rc > 0)
- return 0;
- if (rc == -ERESTARTSYS && freezing(current)) {
- clear_thread_flag(TIF_SIGPENDING);
- goto again;
- }
- } else {
- do {
- msleep(TPM_TIMEOUT);
- status = tpm_tis_status(chip);
- if ((status & mask) == mask)
- return 0;
- } while (time_before(jiffies, stop));
- }
- return -ETIME;
-}
-
static int recv_data(struct tpm_chip *chip, u8 *buf, size_t count)
{
int size = 0, burstcnt;
while (size < count &&
- wait_for_stat(chip,
- TPM_STS_DATA_AVAIL | TPM_STS_VALID,
- chip->vendor.timeout_c,
- &chip->vendor.read_queue)
+ wait_for_tpm_stat(chip,
+ TPM_STS_DATA_AVAIL | TPM_STS_VALID,
+ chip->vendor.timeout_c,
+ &chip->vendor.read_queue)
== 0) {
burstcnt = get_burstcount(chip);
for (; burstcnt > 0 && size < count; burstcnt--)
@@ -282,8 +240,8 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count)
goto out;
}
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
dev_err(chip->dev, "Error left over data\n");
@@ -297,7 +255,7 @@ out:
return size;
}
-static int itpm;
+static bool itpm;
module_param(itpm, bool, 0444);
MODULE_PARM_DESC(itpm, "Force iTPM workarounds (found on some Lenovo laptops)");
@@ -317,7 +275,7 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
status = tpm_tis_status(chip);
if ((status & TPM_STS_COMMAND_READY) == 0) {
tpm_tis_ready(chip);
- if (wait_for_stat
+ if (wait_for_tpm_stat
(chip, TPM_STS_COMMAND_READY, chip->vendor.timeout_b,
&chip->vendor.int_queue) < 0) {
rc = -ETIME;
@@ -333,8 +291,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
count++;
}
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
status = tpm_tis_status(chip);
if (!itpm && (status & TPM_STS_DATA_EXPECT) == 0) {
rc = -EIO;
@@ -345,8 +303,8 @@ static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len)
/* write last byte */
iowrite8(buf[count],
chip->vendor.iobase + TPM_DATA_FIFO(chip->vendor.locality));
- wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
- &chip->vendor.int_queue);
+ wait_for_tpm_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c,
+ &chip->vendor.int_queue);
status = tpm_tis_status(chip);
if ((status & TPM_STS_DATA_EXPECT) != 0) {
rc = -EIO;
@@ -381,7 +339,7 @@ static int tpm_tis_send(struct tpm_chip *chip, u8 *buf, size_t len)
if (chip->vendor.irq) {
ordinal = be32_to_cpu(*((__be32 *) (buf + 6)));
- if (wait_for_stat
+ if (wait_for_tpm_stat
(chip, TPM_STS_DATA_AVAIL | TPM_STS_VALID,
tpm_calc_ordinal_duration(chip, ordinal),
&chip->vendor.read_queue) < 0) {
@@ -432,6 +390,9 @@ static int probe_itpm(struct tpm_chip *chip)
out:
itpm = rem_itpm;
tpm_tis_ready(chip);
+ /* some TPMs need a break here otherwise they will not work
+ * correctly on the immediately subsequent command */
+ msleep(chip->vendor.timeout_b);
release_locality(chip, chip->vendor.locality, 0);
return rc;
@@ -539,7 +500,7 @@ static irqreturn_t tis_int_handler(int dummy, void *dev_id)
return IRQ_HANDLED;
}
-static int interrupts = 1;
+static bool interrupts = 1;
module_param(interrupts, bool, 0444);
MODULE_PARM_DESC(interrupts, "Enable interrupts");
@@ -614,7 +575,17 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
dev_dbg(dev, "\tData Avail Int Support\n");
/* get the timeouts before testing for irqs */
- tpm_get_timeouts(chip);
+ if (tpm_get_timeouts(chip)) {
+ dev_err(dev, "Could not get TPM timeouts and durations\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
+
+ if (tpm_do_selftest(chip)) {
+ dev_err(dev, "TPM self test failed\n");
+ rc = -ENODEV;
+ goto out_err;
+ }
/* INTERRUPT Setup */
init_waitqueue_head(&chip->vendor.read_queue);
@@ -722,7 +693,6 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
list_add(&chip->vendor.list, &tis_chips);
spin_unlock(&tis_lock);
- tpm_continue_selftest(chip);
return 0;
out_err:
@@ -790,7 +760,7 @@ static int tpm_tis_pnp_resume(struct pnp_dev *dev)
ret = tpm_pm_resume(&dev->dev);
if (!ret)
- tpm_continue_selftest(chip);
+ tpm_do_selftest(chip);
return ret;
}
@@ -858,7 +828,7 @@ static struct platform_driver tis_drv = {
static struct platform_device *pdev;
-static int force;
+static bool force;
module_param(force, bool, 0444);
MODULE_PARM_DESC(force, "Force device probe rather than using ACPI entry");
static int __init init_tis(void)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8e3c46d..b58b561 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -392,7 +392,7 @@ static int add_inbuf(struct virtqueue *vq, struct port_buffer *buf)
sg_init_one(sg, buf->buf, buf->size);
- ret = virtqueue_add_buf(vq, sg, 0, 1, buf);
+ ret = virtqueue_add_buf(vq, sg, 0, 1, buf, GFP_ATOMIC);
virtqueue_kick(vq);
return ret;
}
@@ -457,7 +457,7 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
- if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt) >= 0) {
+ if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) >= 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
@@ -506,7 +506,7 @@ static ssize_t send_buf(struct port *port, void *in_buf, size_t in_count,
reclaim_consumed_buffers(port);
sg_init_one(sg, in_buf, in_count);
- ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
+ ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf, GFP_ATOMIC);
/* Tell Host to go! */
virtqueue_kick(out_vq);
@@ -1271,6 +1271,20 @@ static void remove_port(struct kref *kref)
kfree(port);
}
+static void remove_port_data(struct port *port)
+{
+ struct port_buffer *buf;
+
+ /* Remove unused data this port might have received. */
+ discard_port_data(port);
+
+ reclaim_consumed_buffers(port);
+
+ /* Remove buffers we queued up for the Host to send us data in. */
+ while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
+ free_buf(buf);
+}
+
/*
* Port got unplugged. Remove port from portdev's list and drop the
* kref reference. If no userspace has this port opened, it will
@@ -1278,8 +1292,6 @@ static void remove_port(struct kref *kref)
*/
static void unplug_port(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->portdev->ports_lock);
list_del(&port->list);
spin_unlock_irq(&port->portdev->ports_lock);
@@ -1300,14 +1312,7 @@ static void unplug_port(struct port *port)
hvc_remove(port->cons.hvc);
}
- /* Remove unused data this port might have received. */
- discard_port_data(port);
-
- reclaim_consumed_buffers(port);
-
- /* Remove buffers we queued up for the Host to send us data in. */
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf);
+ remove_port_data(port);
/*
* We should just assume the device itself has gone off --
@@ -1659,6 +1664,28 @@ static const struct file_operations portdev_fops = {
.owner = THIS_MODULE,
};
+static void remove_vqs(struct ports_device *portdev)
+{
+ portdev->vdev->config->del_vqs(portdev->vdev);
+ kfree(portdev->in_vqs);
+ kfree(portdev->out_vqs);
+}
+
+static void remove_controlq_data(struct ports_device *portdev)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ if (!use_multiport(portdev))
+ return;
+
+ while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
+ free_buf(buf);
+
+ while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
+ free_buf(buf);
+}
+
/*
* Once we're further in boot, we get probed like any other virtio
* device.
@@ -1764,9 +1791,7 @@ free_vqs:
/* The host might want to notify mgmt sw about device add failure */
__send_control_msg(portdev, VIRTIO_CONSOLE_BAD_ID,
VIRTIO_CONSOLE_DEVICE_READY, 0);
- vdev->config->del_vqs(vdev);
- kfree(portdev->in_vqs);
- kfree(portdev->out_vqs);
+ remove_vqs(portdev);
free_chrdev:
unregister_chrdev(portdev->chr_major, "virtio-portsdev");
free:
@@ -1804,21 +1829,8 @@ static void virtcons_remove(struct virtio_device *vdev)
* have to just stop using the port, as the vqs are going
* away.
*/
- if (use_multiport(portdev)) {
- struct port_buffer *buf;
- unsigned int len;
-
- while ((buf = virtqueue_get_buf(portdev->c_ivq, &len)))
- free_buf(buf);
-
- while ((buf = virtqueue_detach_unused_buf(portdev->c_ivq)))
- free_buf(buf);
- }
-
- vdev->config->del_vqs(vdev);
- kfree(portdev->in_vqs);
- kfree(portdev->out_vqs);
-
+ remove_controlq_data(portdev);
+ remove_vqs(portdev);
kfree(portdev);
}
@@ -1832,6 +1844,68 @@ static unsigned int features[] = {
VIRTIO_CONSOLE_F_MULTIPORT,
};
+#ifdef CONFIG_PM
+static int virtcons_freeze(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+
+ portdev = vdev->priv;
+
+ vdev->config->reset(vdev);
+
+ virtqueue_disable_cb(portdev->c_ivq);
+ cancel_work_sync(&portdev->control_work);
+ /*
+ * Once more: if control_work_handler() was running, it would
+ * enable the cb as the last step.
+ */
+ virtqueue_disable_cb(portdev->c_ivq);
+ remove_controlq_data(portdev);
+
+ list_for_each_entry(port, &portdev->ports, list) {
+ virtqueue_disable_cb(port->in_vq);
+ virtqueue_disable_cb(port->out_vq);
+ /*
+ * We'll ask the host later if the new invocation has
+ * the port opened or closed.
+ */
+ port->host_connected = false;
+ remove_port_data(port);
+ }
+ remove_vqs(portdev);
+
+ return 0;
+}
+
+static int virtcons_restore(struct virtio_device *vdev)
+{
+ struct ports_device *portdev;
+ struct port *port;
+ int ret;
+
+ portdev = vdev->priv;
+
+ ret = init_vqs(portdev);
+ if (ret)
+ return ret;
+
+ if (use_multiport(portdev))
+ fill_queue(portdev->c_ivq, &portdev->cvq_lock);
+
+ list_for_each_entry(port, &portdev->ports, list) {
+ port->in_vq = portdev->in_vqs[port->id];
+ port->out_vq = portdev->out_vqs[port->id];
+
+ fill_queue(port->in_vq, &port->inbuf_lock);
+
+ /* Get port open/close status on the host */
+ send_control_msg(port, VIRTIO_CONSOLE_PORT_READY, 1);
+ }
+ return 0;
+}
+#endif
+
static struct virtio_driver virtio_console = {
.feature_table = features,
.feature_table_size = ARRAY_SIZE(features),
@@ -1841,6 +1915,10 @@ static struct virtio_driver virtio_console = {
.probe = virtcons_probe,
.remove = virtcons_remove,
.config_changed = config_intr,
+#ifdef CONFIG_PM
+ .freeze = virtcons_freeze,
+ .restore = virtcons_restore,
+#endif
};
static int __init init(void)
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 3530927..9b3cd08 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -3,5 +3,8 @@ config CLKDEV_LOOKUP
bool
select HAVE_CLK
+config HAVE_CLK_PREPARE
+ bool
+
config HAVE_MACH_CLKDEV
bool
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index effe797..6b5cf02 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -143,7 +143,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE,
#ifndef CONFIG_X86_64
#include <asm/mach_timer.h>
#define PMTMR_EXPECTED_RATE \
- ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (CLOCK_TICK_RATE>>10))
+ ((CALIBRATE_LATCH * (PMTMR_TICKS_PER_SEC >> 10)) / (PIT_TICK_RATE>>10))
/*
* Some boards have the PMTMR running way too fast. We check
* the PMTMR rate against PIT channel 2 to catch these cases.
diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c
index 59feefe..fb6b6d2 100644
--- a/drivers/clocksource/clksrc-dbx500-prcmu.c
+++ b/drivers/clocksource/clksrc-dbx500-prcmu.c
@@ -58,25 +58,15 @@ static struct clocksource clocksource_dbx500_prcmu = {
};
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
-static DEFINE_CLOCK_DATA(cd);
-unsigned long long notrace sched_clock(void)
+static u32 notrace dbx500_prcmu_sched_clock_read(void)
{
- u32 cyc;
-
if (unlikely(!clksrc_dbx500_timer_base))
return 0;
- cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
-
- return cyc_to_sched_clock(&cd, cyc, (u32)~0);
+ return clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
}
-static void notrace clksrc_dbx500_prcmu_update_sched_clock(void)
-{
- u32 cyc = clksrc_dbx500_prcmu_read(&clocksource_dbx500_prcmu);
- update_sched_clock(&cd, cyc, (u32)~0);
-}
#endif
void __init clksrc_dbx500_prcmu_init(void __iomem *base)
@@ -97,7 +87,7 @@ void __init clksrc_dbx500_prcmu_init(void __iomem *base)
clksrc_dbx500_timer_base + PRCMU_TIMER_REF);
}
#ifdef CONFIG_CLKSRC_DBX500_PRCMU_SCHED_CLOCK
- init_sched_clock(&cd, clksrc_dbx500_prcmu_update_sched_clock,
+ setup_sched_clock(dbx500_prcmu_sched_clock_read,
32, RATE_32K);
#endif
clocksource_calc_mult_shift(&clocksource_dbx500_prcmu,
diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c
index 27c49e6..e7cab2d 100644
--- a/drivers/clocksource/i8253.c
+++ b/drivers/clocksource/i8253.c
@@ -53,7 +53,7 @@ static cycle_t i8253_read(struct clocksource *cs)
count |= inb_p(PIT_CH0) << 8;
/* VIA686a test code... reset the latch if count > max + 1 */
- if (count > LATCH) {
+ if (count > PIT_LATCH) {
outb_p(0x34, PIT_MODE);
outb_p(PIT_LATCH & 0xff, PIT_CH0);
outb_p(PIT_LATCH >> 8, PIT_CH0);
@@ -114,8 +114,8 @@ static void init_pit_timer(enum clock_event_mode mode,
case CLOCK_EVT_MODE_PERIODIC:
/* binary, mode 2, LSB/MSB, ch 0 */
outb_p(0x34, PIT_MODE);
- outb_p(LATCH & 0xff , PIT_CH0); /* LSB */
- outb_p(LATCH >> 8 , PIT_CH0); /* MSB */
+ outb_p(PIT_LATCH & 0xff , PIT_CH0); /* LSB */
+ outb_p(PIT_LATCH >> 8 , PIT_CH0); /* MSB */
break;
case CLOCK_EVT_MODE_SHUTDOWN:
diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c
index 79c47e8..55d0f95 100644
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
@@ -59,7 +59,6 @@ static struct clocksource clksrc = {
.rating = 200,
.read = tc_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
- .shift = 18,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -256,7 +255,6 @@ static int __init tcb_clksrc_init(void)
best_divisor_idx = i;
}
- clksrc.mult = clocksource_hz2mult(divided_rate, clksrc.shift);
printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
divided_rate / 1000000,
@@ -292,7 +290,7 @@ static int __init tcb_clksrc_init(void)
__raw_writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
/* and away we go! */
- clocksource_register(&clksrc);
+ clocksource_register_hz(&clksrc, divided_rate);
/* channel 2: periodic and oneshot timer support */
setup_clkevents(tc, clk32k_divisor_idx);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 72a0044..e0664fe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -21,12 +21,19 @@ config ARM_S5PV210_CPUFREQ
If in doubt, say N.
+config ARM_EXYNOS_CPUFREQ
+ bool "SAMSUNG EXYNOS SoCs"
+ depends on ARCH_EXYNOS
+ select ARM_EXYNOS4210_CPUFREQ if CPU_EXYNOS4210
+ default y
+ help
+ This adds the CPUFreq driver common part for Samsung
+ EXYNOS SoCs.
+
+ If in doubt, say N.
+
config ARM_EXYNOS4210_CPUFREQ
bool "Samsung EXYNOS4210"
- depends on CPU_EXYNOS4210
- default y
help
This adds the CPUFreq driver for Samsung EXYNOS4210
SoC (S5PV310 or S5PC210).
-
- If in doubt, say N.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index a48bc02..ac000fa 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -42,7 +42,9 @@ obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o
obj-$(CONFIG_UX500_SOC_DB8500) += db8500-cpufreq.o
obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o
obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
+obj-$(CONFIG_ARCH_OMAP2PLUS) += omap-cpufreq.o
##################################################################################
# PowerPC platform drivers
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 987a165..622013f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -204,8 +204,7 @@ static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
pr_debug("saving %lu as reference value for loops_per_jiffy; "
"freq is %u kHz\n", l_p_j_ref, l_p_j_ref_freq);
}
- if ((val == CPUFREQ_PRECHANGE && ci->old < ci->new) ||
- (val == CPUFREQ_POSTCHANGE && ci->old > ci->new) ||
+ if ((val == CPUFREQ_POSTCHANGE && ci->old != ci->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
ci->new);
@@ -679,7 +678,7 @@ static struct kobj_type ktype_cpufreq = {
*/
static int cpufreq_add_dev_policy(unsigned int cpu,
struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+ struct device *dev)
{
int ret = 0;
#ifdef CONFIG_SMP
@@ -728,7 +727,7 @@ static int cpufreq_add_dev_policy(unsigned int cpu,
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
pr_debug("CPU already managed, adding link\n");
- ret = sysfs_create_link(&sys_dev->kobj,
+ ret = sysfs_create_link(&dev->kobj,
&managed_policy->kobj,
"cpufreq");
if (ret)
@@ -761,7 +760,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
for_each_cpu(j, policy->cpus) {
struct cpufreq_policy *managed_policy;
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
if (j == cpu)
continue;
@@ -770,8 +769,8 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
pr_debug("CPU %u already managed, adding link\n", j);
managed_policy = cpufreq_cpu_get(cpu);
- cpu_sys_dev = get_cpu_sysdev(j);
- ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
+ cpu_dev = get_cpu_device(j);
+ ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
"cpufreq");
if (ret) {
cpufreq_cpu_put(managed_policy);
@@ -783,7 +782,7 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
static int cpufreq_add_dev_interface(unsigned int cpu,
struct cpufreq_policy *policy,
- struct sys_device *sys_dev)
+ struct device *dev)
{
struct cpufreq_policy new_policy;
struct freq_attr **drv_attr;
@@ -793,7 +792,7 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
/* prepare interface data */
ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
- &sys_dev->kobj, "cpufreq");
+ &dev->kobj, "cpufreq");
if (ret)
return ret;
@@ -866,9 +865,9 @@ err_out_kobj_put:
* with with cpu hotplugging and all hell will break loose. Tried to clean this
* mess up, but more thorough testing is needed. - Mathieu
*/
-static int cpufreq_add_dev(struct sys_device *sys_dev)
+static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
int ret = 0, found = 0;
struct cpufreq_policy *policy;
unsigned long flags;
@@ -947,7 +946,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
- ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
+ ret = cpufreq_add_dev_policy(cpu, policy, dev);
if (ret) {
if (ret > 0)
/* This is a managed cpu, symlink created,
@@ -956,7 +955,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
goto err_unlock_policy;
}
- ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
+ ret = cpufreq_add_dev_interface(cpu, policy, dev);
if (ret)
goto err_out_unregister;
@@ -999,15 +998,15 @@ module_out:
* Caller should already have policy_rwsem in write mode for this CPU.
* This routine frees the rwsem before returning.
*/
-static int __cpufreq_remove_dev(struct sys_device *sys_dev)
+static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
unsigned long flags;
struct cpufreq_policy *data;
struct kobject *kobj;
struct completion *cmp;
#ifdef CONFIG_SMP
- struct sys_device *cpu_sys_dev;
+ struct device *cpu_dev;
unsigned int j;
#endif
@@ -1032,7 +1031,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
pr_debug("removing link\n");
cpumask_clear_cpu(cpu, data->cpus);
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- kobj = &sys_dev->kobj;
+ kobj = &dev->kobj;
cpufreq_cpu_put(data);
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
@@ -1071,8 +1070,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
strncpy(per_cpu(cpufreq_cpu_governor, j),
data->governor->name, CPUFREQ_NAME_LEN);
#endif
- cpu_sys_dev = get_cpu_sysdev(j);
- kobj = &cpu_sys_dev->kobj;
+ cpu_dev = get_cpu_device(j);
+ kobj = &cpu_dev->kobj;
unlock_policy_rwsem_write(cpu);
sysfs_remove_link(kobj, "cpufreq");
lock_policy_rwsem_write(cpu);
@@ -1112,11 +1111,11 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
if (unlikely(cpumask_weight(data->cpus) > 1)) {
/* first sibling now owns the new sysfs dir */
cpumask_clear_cpu(cpu, data->cpus);
- cpufreq_add_dev(get_cpu_sysdev(cpumask_first(data->cpus)));
+ cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL);
/* finally remove our own symlink */
lock_policy_rwsem_write(cpu);
- __cpufreq_remove_dev(sys_dev);
+ __cpufreq_remove_dev(dev, sif);
}
#endif
@@ -1128,9 +1127,9 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
}
-static int cpufreq_remove_dev(struct sys_device *sys_dev)
+static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
{
- unsigned int cpu = sys_dev->id;
+ unsigned int cpu = dev->id;
int retval;
if (cpu_is_offline(cpu))
@@ -1139,7 +1138,7 @@ static int cpufreq_remove_dev(struct sys_device *sys_dev)
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
- retval = __cpufreq_remove_dev(sys_dev);
+ retval = __cpufreq_remove_dev(dev, sif);
return retval;
}
@@ -1271,9 +1270,11 @@ out:
}
EXPORT_SYMBOL(cpufreq_get);
-static struct sysdev_driver cpufreq_sysdev_driver = {
- .add = cpufreq_add_dev,
- .remove = cpufreq_remove_dev,
+static struct subsys_interface cpufreq_interface = {
+ .name = "cpufreq",
+ .subsys = &cpu_subsys,
+ .add_dev = cpufreq_add_dev,
+ .remove_dev = cpufreq_remove_dev,
};
@@ -1765,25 +1766,25 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
- struct sys_device *sys_dev;
+ struct device *dev;
- sys_dev = get_cpu_sysdev(cpu);
- if (sys_dev) {
+ dev = get_cpu_device(cpu);
+ if (dev) {
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
- cpufreq_add_dev(sys_dev);
+ cpufreq_add_dev(dev, NULL);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
if (unlikely(lock_policy_rwsem_write(cpu)))
BUG();
- __cpufreq_remove_dev(sys_dev);
+ __cpufreq_remove_dev(dev, NULL);
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
- cpufreq_add_dev(sys_dev);
+ cpufreq_add_dev(dev, NULL);
break;
}
}
@@ -1830,8 +1831,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
cpufreq_driver = driver_data;
spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = sysdev_driver_register(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
+ ret = subsys_interface_register(&cpufreq_interface);
if (ret)
goto err_null_driver;
@@ -1850,7 +1850,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
if (ret) {
pr_debug("no CPU initialized for driver %s\n",
driver_data->name);
- goto err_sysdev_unreg;
+ goto err_if_unreg;
}
}
@@ -1858,9 +1858,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
pr_debug("driver %s up and running\n", driver_data->name);
return 0;
-err_sysdev_unreg:
- sysdev_driver_unregister(&cpu_sysdev_class,
- &cpufreq_sysdev_driver);
+err_if_unreg:
+ subsys_interface_unregister(&cpufreq_interface);
err_null_driver:
spin_lock_irqsave(&cpufreq_driver_lock, flags);
cpufreq_driver = NULL;
@@ -1887,7 +1886,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
pr_debug("unregistering driver %s\n", driver->name);
- sysdev_driver_unregister(&cpu_sysdev_class, &cpufreq_sysdev_driver);
+ subsys_interface_unregister(&cpufreq_interface);
unregister_hotcpu_notifier(&cpufreq_cpu_notifier);
spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -1907,8 +1906,7 @@ static int __init cpufreq_core_init(void)
init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
}
- cpufreq_global_kobject = kobject_create_and_add("cpufreq",
- &cpu_sysdev_class.kset.kobj);
+ cpufreq_global_kobject = kobject_create_and_add("cpufreq", &cpu_subsys.dev_root->kobj);
BUG_ON(!cpufreq_global_kobject);
register_syscore_ops(&cpufreq_syscore_ops);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index c97b468..235a340 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -95,27 +95,26 @@ static struct dbs_tuners {
.freq_step = 5,
};
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
- cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- cputime64_t idle_time;
- cputime64_t cur_wall_time;
- cputime64_t busy_time;
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
- kstat_cpu(cpu).cpustat.system);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- idle_time = cputime64_sub(cur_wall_time, busy_time);
+ idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+ *wall = jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);
+ return jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -272,7 +271,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
}
@@ -353,20 +352,20 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
- wall_time = (unsigned int) cputime64_sub(cur_wall_time,
- j_dbs_info->prev_cpu_wall);
+ wall_time = (unsigned int)
+ (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
- idle_time = (unsigned int) cputime64_sub(cur_idle_time,
- j_dbs_info->prev_cpu_idle);
+ idle_time = (unsigned int)
+ (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
+ u64 cur_nice;
unsigned long cur_nice_jiffies;
- cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
- j_dbs_info->prev_cpu_nice);
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ j_dbs_info->prev_cpu_nice;
/*
* Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys
@@ -374,7 +373,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
- j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
@@ -501,10 +500,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice) {
+ if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
- kstat_cpu(j).cpustat.nice;
- }
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
this_dbs_info->down_skip = 0;
this_dbs_info->requested_freq = policy->cur;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index fa8af4e..c3e0652 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -119,27 +119,26 @@ static struct dbs_tuners {
.powersave_bias = 0,
};
-static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
- cputime64_t *wall)
+static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
{
- cputime64_t idle_time;
- cputime64_t cur_wall_time;
- cputime64_t busy_time;
+ u64 idle_time;
+ u64 cur_wall_time;
+ u64 busy_time;
cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
- kstat_cpu(cpu).cpustat.system);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
- busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);
+ busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
+ busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- idle_time = cputime64_sub(cur_wall_time, busy_time);
+ idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
+ *wall = jiffies_to_usecs(cur_wall_time);
- return (cputime64_t)jiffies_to_usecs(idle_time);
+ return jiffies_to_usecs(idle_time);
}
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -345,7 +344,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall);
if (dbs_tuners_ins.ignore_nice)
- dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
return count;
@@ -442,24 +441,24 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);
- wall_time = (unsigned int) cputime64_sub(cur_wall_time,
- j_dbs_info->prev_cpu_wall);
+ wall_time = (unsigned int)
+ (cur_wall_time - j_dbs_info->prev_cpu_wall);
j_dbs_info->prev_cpu_wall = cur_wall_time;
- idle_time = (unsigned int) cputime64_sub(cur_idle_time,
- j_dbs_info->prev_cpu_idle);
+ idle_time = (unsigned int)
+ (cur_idle_time - j_dbs_info->prev_cpu_idle);
j_dbs_info->prev_cpu_idle = cur_idle_time;
- iowait_time = (unsigned int) cputime64_sub(cur_iowait_time,
- j_dbs_info->prev_cpu_iowait);
+ iowait_time = (unsigned int)
+ (cur_iowait_time - j_dbs_info->prev_cpu_iowait);
j_dbs_info->prev_cpu_iowait = cur_iowait_time;
if (dbs_tuners_ins.ignore_nice) {
- cputime64_t cur_nice;
+ u64 cur_nice;
unsigned long cur_nice_jiffies;
- cur_nice = cputime64_sub(kstat_cpu(j).cpustat.nice,
- j_dbs_info->prev_cpu_nice);
+ cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] -
+ j_dbs_info->prev_cpu_nice;
/*
* Assumption: nice time between sampling periods will
* be less than 2^32 jiffies for 32 bit sys
@@ -467,7 +466,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
cur_nice_jiffies = (unsigned long)
cputime64_to_jiffies64(cur_nice);
- j_dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+ j_dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
idle_time += jiffies_to_usecs(cur_nice_jiffies);
}
@@ -646,10 +645,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall);
- if (dbs_tuners_ins.ignore_nice) {
+ if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
- kstat_cpu(j).cpustat.nice;
- }
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
this_dbs_info->cpu = cpu;
this_dbs_info->rate_mult = 1;
@@ -715,11 +713,10 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
static int __init cpufreq_gov_dbs_init(void)
{
- cputime64_t wall;
u64 idle_time;
int cpu = get_cpu();
- idle_time = get_cpu_idle_time_us(cpu, &wall);
+ idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index c5072a9..b40ee14 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -11,7 +11,6 @@
#include <linux/kernel.h>
#include <linux/slab.h>
-#include <linux/sysdev.h>
#include <linux/cpu.h>
#include <linux/sysfs.h>
#include <linux/cpufreq.h>
@@ -61,9 +60,8 @@ static int cpufreq_stats_update(unsigned int cpu)
spin_lock(&cpufreq_stats_lock);
stat = per_cpu(cpufreq_stats_table, cpu);
if (stat->time_in_state)
- stat->time_in_state[stat->last_index] =
- cputime64_add(stat->time_in_state[stat->last_index],
- cputime_sub(cur_time, stat->last_time));
+ stat->time_in_state[stat->last_index] +=
+ cur_time - stat->last_time;
stat->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index f231015..bedac1a 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -47,9 +47,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
if (!per_cpu(cpu_is_managed, freq->cpu))
return 0;
- pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
- freq->cpu, freq->new);
- per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
+ if (val == CPUFREQ_POSTCHANGE) {
+ pr_debug("saving cpu_cur_freq of cpu %u to be %u kHz\n",
+ freq->cpu, freq->new);
+ per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
+ }
return 0;
}
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c
new file mode 100644
index 0000000..5467879
--- /dev/null
+++ b/drivers/cpufreq/exynos-cpufreq.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com
+ *
+ * EXYNOS - CPU frequency scaling support for EXYNOS series
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/regulator/consumer.h>
+#include <linux/cpufreq.h>
+#include <linux/suspend.h>
+
+#include <mach/cpufreq.h>
+
+#include <plat/cpu.h>
+
+static struct exynos_dvfs_info *exynos_info;
+
+static struct regulator *arm_regulator;
+static struct cpufreq_freqs freqs;
+
+static unsigned int locking_frequency;
+static bool frequency_locked;
+static DEFINE_MUTEX(cpufreq_lock);
+
+int exynos_verify_speed(struct cpufreq_policy *policy)
+{
+ return cpufreq_frequency_table_verify(policy,
+ exynos_info->freq_table);
+}
+
+unsigned int exynos_getspeed(unsigned int cpu)
+{
+ return clk_get_rate(exynos_info->cpu_clk) / 1000;
+}
+
+static int exynos_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int index, old_index;
+ unsigned int arm_volt, safe_arm_volt = 0;
+ int ret = 0;
+ struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
+ unsigned int *volt_table = exynos_info->volt_table;
+ unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
+
+ mutex_lock(&cpufreq_lock);
+
+ freqs.old = policy->cur;
+
+ if (frequency_locked && target_freq != locking_frequency) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ freqs.old, relation, &old_index)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (cpufreq_frequency_table_target(policy, freq_table,
+ target_freq, relation, &index)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ freqs.new = freq_table[index].frequency;
+ freqs.cpu = policy->cpu;
+
+ /*
+ * ARM clock source will be changed APLL to MPLL temporary
+ * To support this level, need to control regulator for
+ * required voltage level
+ */
+ if (exynos_info->need_apll_change != NULL) {
+ if (exynos_info->need_apll_change(old_index, index) &&
+ (freq_table[index].frequency < mpll_freq_khz) &&
+ (freq_table[old_index].frequency < mpll_freq_khz))
+ safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
+ }
+ arm_volt = volt_table[index];
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+
+ /* When the new frequency is higher than current frequency */
+ if ((freqs.new > freqs.old) && !safe_arm_volt) {
+ /* Firstly, voltage up to increase frequency */
+ regulator_set_voltage(arm_regulator, arm_volt,
+ arm_volt);
+ }
+
+ if (safe_arm_volt)
+ regulator_set_voltage(arm_regulator, safe_arm_volt,
+ safe_arm_volt);
+ if (freqs.new != freqs.old)
+ exynos_info->set_freq(old_index, index);
+
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+
+ /* When the new frequency is lower than current frequency */
+ if ((freqs.new < freqs.old) ||
+ ((freqs.new > freqs.old) && safe_arm_volt)) {
+ /* down the voltage after frequency change */
+ regulator_set_voltage(arm_regulator, arm_volt,
+ arm_volt);
+ }
+
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int exynos_cpufreq_suspend(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+
+static int exynos_cpufreq_resume(struct cpufreq_policy *policy)
+{
+ return 0;
+}
+#endif
+
+/**
+ * exynos_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
+ * context
+ * @notifier
+ * @pm_event
+ * @v
+ *
+ * While frequency_locked == true, target() ignores every frequency but
+ * locking_frequency. The locking_frequency value is the initial frequency,
+ * which is set by the bootloader. In order to eliminate possible
+ * inconsistency in clock values, we save and restore frequencies during
+ * suspend and resume and block CPUFREQ activities. Note that the standard
+ * suspend/resume cannot be used as they are too deep (syscore_ops) for
+ * regulator actions.
+ */
+static int exynos_cpufreq_pm_notifier(struct notifier_block *notifier,
+ unsigned long pm_event, void *v)
+{
+ struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
+ static unsigned int saved_frequency;
+ unsigned int temp;
+
+ mutex_lock(&cpufreq_lock);
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ if (frequency_locked)
+ goto out;
+
+ frequency_locked = true;
+
+ if (locking_frequency) {
+ saved_frequency = exynos_getspeed(0);
+
+ mutex_unlock(&cpufreq_lock);
+ exynos_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+ }
+ break;
+
+ case PM_POST_SUSPEND:
+ if (saved_frequency) {
+ /*
+ * While frequency_locked, only locking_frequency
+ * is valid for target(). In order to use
+ * saved_frequency while keeping frequency_locked,
+ * we temporarly overwrite locking_frequency.
+ */
+ temp = locking_frequency;
+ locking_frequency = saved_frequency;
+
+ mutex_unlock(&cpufreq_lock);
+ exynos_target(policy, locking_frequency,
+ CPUFREQ_RELATION_H);
+ mutex_lock(&cpufreq_lock);
+
+ locking_frequency = temp;
+ }
+ frequency_locked = false;
+ break;
+ }
+out:
+ mutex_unlock(&cpufreq_lock);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block exynos_cpufreq_nb = {
+ .notifier_call = exynos_cpufreq_pm_notifier,
+};
+
+static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
+{
+ policy->cur = policy->min = policy->max = exynos_getspeed(policy->cpu);
+
+ cpufreq_frequency_table_get_attr(exynos_info->freq_table, policy->cpu);
+
+ /* set the transition latency value */
+ policy->cpuinfo.transition_latency = 100000;
+
+ /*
+ * EXYNOS4 multi-core processors has 2 cores
+ * that the frequency cannot be set independently.
+ * Each cpu is bound to the same speed.
+ * So the affected cpu is all of the cpus.
+ */
+ if (num_online_cpus() == 1) {
+ cpumask_copy(policy->related_cpus, cpu_possible_mask);
+ cpumask_copy(policy->cpus, cpu_online_mask);
+ } else {
+ cpumask_setall(policy->cpus);
+ }
+
+ return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table);
+}
+
+static struct cpufreq_driver exynos_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = exynos_verify_speed,
+ .target = exynos_target,
+ .get = exynos_getspeed,
+ .init = exynos_cpufreq_cpu_init,
+ .name = "exynos_cpufreq",
+#ifdef CONFIG_PM
+ .suspend = exynos_cpufreq_suspend,
+ .resume = exynos_cpufreq_resume,
+#endif
+};
+
+static int __init exynos_cpufreq_init(void)
+{
+ int ret = -EINVAL;
+
+ exynos_info = kzalloc(sizeof(struct exynos_dvfs_info), GFP_KERNEL);
+ if (!exynos_info)
+ return -ENOMEM;
+
+ if (soc_is_exynos4210())
+ ret = exynos4210_cpufreq_init(exynos_info);
+ else
+ pr_err("%s: CPU type not found\n", __func__);
+
+ if (ret)
+ goto err_vdd_arm;
+
+ if (exynos_info->set_freq == NULL) {
+ pr_err("%s: No set_freq function (ERR)\n", __func__);
+ goto err_vdd_arm;
+ }
+
+ arm_regulator = regulator_get(NULL, "vdd_arm");
+ if (IS_ERR(arm_regulator)) {
+ pr_err("%s: failed to get resource vdd_arm\n", __func__);
+ goto err_vdd_arm;
+ }
+
+ register_pm_notifier(&exynos_cpufreq_nb);
+
+ if (cpufreq_register_driver(&exynos_driver)) {
+ pr_err("%s: failed to register cpufreq driver\n", __func__);
+ goto err_cpufreq;
+ }
+
+ return 0;
+err_cpufreq:
+ unregister_pm_notifier(&exynos_cpufreq_nb);
+
+ if (!IS_ERR(arm_regulator))
+ regulator_put(arm_regulator);
+err_vdd_arm:
+ kfree(exynos_info);
+ pr_debug("%s: failed initialization\n", __func__);
+ return -EINVAL;
+}
+late_initcall(exynos_cpufreq_init);
diff --git a/drivers/cpufreq/exynos4210-cpufreq.c b/drivers/cpufreq/exynos4210-cpufreq.c
index ab9741f..065da5b 100644
--- a/drivers/cpufreq/exynos4210-cpufreq.c
+++ b/drivers/cpufreq/exynos4210-cpufreq.c
@@ -2,61 +2,52 @@
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS4 - CPU frequency scaling support
+ * EXYNOS4210 - CPU frequency scaling support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/types.h>
+#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/slab.h>
-#include <linux/regulator/consumer.h>
#include <linux/cpufreq.h>
-#include <linux/notifier.h>
-#include <linux/suspend.h>
-#include <mach/map.h>
#include <mach/regs-clock.h>
-#include <mach/regs-mem.h>
+#include <mach/cpufreq.h>
-#include <plat/clock.h>
-#include <plat/pm.h>
+#define CPUFREQ_LEVEL_END L5
+
+static int max_support_idx = L0;
+static int min_support_idx = (CPUFREQ_LEVEL_END - 1);
static struct clk *cpu_clk;
static struct clk *moutcore;
static struct clk *mout_mpll;
static struct clk *mout_apll;
-static struct regulator *arm_regulator;
-static struct regulator *int_regulator;
-
-static struct cpufreq_freqs freqs;
-static unsigned int memtype;
-
-static unsigned int locking_frequency;
-static bool frequency_locked;
-static DEFINE_MUTEX(cpufreq_lock);
-
-enum exynos4_memory_type {
- DDR2 = 4,
- LPDDR2,
- DDR3,
+struct cpufreq_clkdiv {
+ unsigned int index;
+ unsigned int clkdiv;
};
-enum cpufreq_level_index {
- L0, L1, L2, L3, CPUFREQ_LEVEL_END,
+static unsigned int exynos4210_volt_table[CPUFREQ_LEVEL_END] = {
+ 1250000, 1150000, 1050000, 975000, 950000,
};
-static struct cpufreq_frequency_table exynos4_freq_table[] = {
- {L0, 1000*1000},
- {L1, 800*1000},
- {L2, 400*1000},
- {L3, 100*1000},
+
+static struct cpufreq_clkdiv exynos4210_clkdiv_table[CPUFREQ_LEVEL_END];
+
+static struct cpufreq_frequency_table exynos4210_freq_table[] = {
+ {L0, 1200*1000},
+ {L1, 1000*1000},
+ {L2, 800*1000},
+ {L3, 500*1000},
+ {L4, 200*1000},
{0, CPUFREQ_TABLE_END},
};
@@ -67,17 +58,20 @@ static unsigned int clkdiv_cpu0[CPUFREQ_LEVEL_END][7] = {
* DIVATB, DIVPCLK_DBG, DIVAPLL }
*/
- /* ARM L0: 1000MHz */
- { 0, 3, 7, 3, 3, 0, 1 },
+ /* ARM L0: 1200MHz */
+ { 0, 3, 7, 3, 4, 1, 7 },
- /* ARM L1: 800MHz */
- { 0, 3, 7, 3, 3, 0, 1 },
+ /* ARM L1: 1000MHz */
+ { 0, 3, 7, 3, 4, 1, 7 },
- /* ARM L2: 400MHz */
- { 0, 1, 3, 1, 3, 0, 1 },
+ /* ARM L2: 800MHz */
+ { 0, 3, 7, 3, 3, 1, 7 },
- /* ARM L3: 100MHz */
- { 0, 0, 1, 0, 3, 1, 1 },
+ /* ARM L3: 500MHz */
+ { 0, 3, 7, 3, 3, 1, 7 },
+
+ /* ARM L4: 200MHz */
+ { 0, 1, 3, 1, 3, 1, 0 },
};
static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
@@ -86,147 +80,46 @@ static unsigned int clkdiv_cpu1[CPUFREQ_LEVEL_END][2] = {
* { DIVCOPY, DIVHPM }
*/
- /* ARM L0: 1000MHz */
- { 3, 0 },
+ /* ARM L0: 1200MHz */
+ { 5, 0 },
- /* ARM L1: 800MHz */
- { 3, 0 },
+ /* ARM L1: 1000MHz */
+ { 4, 0 },
- /* ARM L2: 400MHz */
+ /* ARM L2: 800MHz */
{ 3, 0 },
- /* ARM L3: 100MHz */
+ /* ARM L3: 500MHz */
{ 3, 0 },
-};
-
-static unsigned int clkdiv_dmc0[CPUFREQ_LEVEL_END][8] = {
- /*
- * Clock divider value for following
- * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
- * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
- */
-
- /* DMC L0: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
-
- /* DMC L1: 400MHz */
- { 3, 1, 1, 1, 1, 1, 3, 1 },
-
- /* DMC L2: 266.7MHz */
- { 7, 1, 1, 2, 1, 1, 3, 1 },
-
- /* DMC L3: 200MHz */
- { 7, 1, 1, 3, 1, 1, 3, 1 },
-};
-
-static unsigned int clkdiv_top[CPUFREQ_LEVEL_END][5] = {
- /*
- * Clock divider value for following
- * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
- */
- /* ACLK200 L0: 200MHz */
- { 3, 7, 4, 5, 1 },
-
- /* ACLK200 L1: 200MHz */
- { 3, 7, 4, 5, 1 },
-
- /* ACLK200 L2: 160MHz */
- { 4, 7, 5, 7, 1 },
-
- /* ACLK200 L3: 133.3MHz */
- { 5, 7, 7, 7, 1 },
-};
-
-static unsigned int clkdiv_lr_bus[CPUFREQ_LEVEL_END][2] = {
- /*
- * Clock divider value for following
- * { DIVGDL/R, DIVGPL/R }
- */
-
- /* ACLK_GDL/R L0: 200MHz */
- { 3, 1 },
-
- /* ACLK_GDL/R L1: 200MHz */
- { 3, 1 },
-
- /* ACLK_GDL/R L2: 160MHz */
- { 4, 1 },
-
- /* ACLK_GDL/R L3: 133.3MHz */
- { 5, 1 },
-};
-
-struct cpufreq_voltage_table {
- unsigned int index; /* any */
- unsigned int arm_volt; /* uV */
- unsigned int int_volt;
+ /* ARM L4: 200MHz */
+ { 3, 0 },
};
-static struct cpufreq_voltage_table exynos4_volt_table[CPUFREQ_LEVEL_END] = {
- {
- .index = L0,
- .arm_volt = 1200000,
- .int_volt = 1100000,
- }, {
- .index = L1,
- .arm_volt = 1100000,
- .int_volt = 1100000,
- }, {
- .index = L2,
- .arm_volt = 1000000,
- .int_volt = 1000000,
- }, {
- .index = L3,
- .arm_volt = 900000,
- .int_volt = 1000000,
- },
-};
+static unsigned int exynos4210_apll_pms_table[CPUFREQ_LEVEL_END] = {
+ /* APLL FOUT L0: 1200MHz */
+ ((150 << 16) | (3 << 8) | 1),
-static unsigned int exynos4_apll_pms_table[CPUFREQ_LEVEL_END] = {
- /* APLL FOUT L0: 1000MHz */
+ /* APLL FOUT L1: 1000MHz */
((250 << 16) | (6 << 8) | 1),
- /* APLL FOUT L1: 800MHz */
+ /* APLL FOUT L2: 800MHz */
((200 << 16) | (6 << 8) | 1),
- /* APLL FOUT L2 : 400MHz */
- ((200 << 16) | (6 << 8) | 2),
+ /* APLL FOUT L3: 500MHz */
+ ((250 << 16) | (6 << 8) | 2),
- /* APLL FOUT L3: 100MHz */
- ((200 << 16) | (6 << 8) | 4),
+ /* APLL FOUT L4: 200MHz */
+ ((200 << 16) | (6 << 8) | 3),
};
-static int exynos4_verify_speed(struct cpufreq_policy *policy)
-{
- return cpufreq_frequency_table_verify(policy, exynos4_freq_table);
-}
-
-static unsigned int exynos4_getspeed(unsigned int cpu)
-{
- return clk_get_rate(cpu_clk) / 1000;
-}
-
-static void exynos4_set_clkdiv(unsigned int div_index)
+static void exynos4210_set_clkdiv(unsigned int div_index)
{
unsigned int tmp;
/* Change Divider - CPU0 */
- tmp = __raw_readl(S5P_CLKDIV_CPU);
-
- tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK | S5P_CLKDIV_CPU0_COREM0_MASK |
- S5P_CLKDIV_CPU0_COREM1_MASK | S5P_CLKDIV_CPU0_PERIPH_MASK |
- S5P_CLKDIV_CPU0_ATB_MASK | S5P_CLKDIV_CPU0_PCLKDBG_MASK |
- S5P_CLKDIV_CPU0_APLL_MASK);
-
- tmp |= ((clkdiv_cpu0[div_index][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
- (clkdiv_cpu0[div_index][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
- (clkdiv_cpu0[div_index][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
- (clkdiv_cpu0[div_index][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
- (clkdiv_cpu0[div_index][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
- (clkdiv_cpu0[div_index][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
- (clkdiv_cpu0[div_index][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
+ tmp = exynos4210_clkdiv_table[div_index].clkdiv;
__raw_writel(tmp, S5P_CLKDIV_CPU);
@@ -248,83 +141,9 @@ static void exynos4_set_clkdiv(unsigned int div_index)
do {
tmp = __raw_readl(S5P_CLKDIV_STATCPU1);
} while (tmp & 0x11);
-
- /* Change Divider - DMC0 */
-
- tmp = __raw_readl(S5P_CLKDIV_DMC0);
-
- tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK | S5P_CLKDIV_DMC0_ACPPCLK_MASK |
- S5P_CLKDIV_DMC0_DPHY_MASK | S5P_CLKDIV_DMC0_DMC_MASK |
- S5P_CLKDIV_DMC0_DMCD_MASK | S5P_CLKDIV_DMC0_DMCP_MASK |
- S5P_CLKDIV_DMC0_COPY2_MASK | S5P_CLKDIV_DMC0_CORETI_MASK);
-
- tmp |= ((clkdiv_dmc0[div_index][0] << S5P_CLKDIV_DMC0_ACP_SHIFT) |
- (clkdiv_dmc0[div_index][1] << S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
- (clkdiv_dmc0[div_index][2] << S5P_CLKDIV_DMC0_DPHY_SHIFT) |
- (clkdiv_dmc0[div_index][3] << S5P_CLKDIV_DMC0_DMC_SHIFT) |
- (clkdiv_dmc0[div_index][4] << S5P_CLKDIV_DMC0_DMCD_SHIFT) |
- (clkdiv_dmc0[div_index][5] << S5P_CLKDIV_DMC0_DMCP_SHIFT) |
- (clkdiv_dmc0[div_index][6] << S5P_CLKDIV_DMC0_COPY2_SHIFT) |
- (clkdiv_dmc0[div_index][7] << S5P_CLKDIV_DMC0_CORETI_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_DMC0);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
- } while (tmp & 0x11111111);
-
- /* Change Divider - TOP */
-
- tmp = __raw_readl(S5P_CLKDIV_TOP);
-
- tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK | S5P_CLKDIV_TOP_ACLK100_MASK |
- S5P_CLKDIV_TOP_ACLK160_MASK | S5P_CLKDIV_TOP_ACLK133_MASK |
- S5P_CLKDIV_TOP_ONENAND_MASK);
-
- tmp |= ((clkdiv_top[div_index][0] << S5P_CLKDIV_TOP_ACLK200_SHIFT) |
- (clkdiv_top[div_index][1] << S5P_CLKDIV_TOP_ACLK100_SHIFT) |
- (clkdiv_top[div_index][2] << S5P_CLKDIV_TOP_ACLK160_SHIFT) |
- (clkdiv_top[div_index][3] << S5P_CLKDIV_TOP_ACLK133_SHIFT) |
- (clkdiv_top[div_index][4] << S5P_CLKDIV_TOP_ONENAND_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_TOP);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
- } while (tmp & 0x11111);
-
- /* Change Divider - LEFTBUS */
-
- tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
-
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
- (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
- } while (tmp & 0x11);
-
- /* Change Divider - RIGHTBUS */
-
- tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
-
- tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
-
- tmp |= ((clkdiv_lr_bus[div_index][0] << S5P_CLKDIV_BUS_GDLR_SHIFT) |
- (clkdiv_lr_bus[div_index][1] << S5P_CLKDIV_BUS_GPLR_SHIFT));
-
- __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
-
- do {
- tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
- } while (tmp & 0x11);
}
-static void exynos4_set_apll(unsigned int index)
+static void exynos4210_set_apll(unsigned int index)
{
unsigned int tmp;
@@ -343,7 +162,7 @@ static void exynos4_set_apll(unsigned int index)
/* 3. Change PLL PMS values */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~((0x3ff << 16) | (0x3f << 8) | (0x7 << 0));
- tmp |= exynos4_apll_pms_table[index];
+ tmp |= exynos4210_apll_pms_table[index];
__raw_writel(tmp, S5P_APLL_CON0);
/* 4. wait_lock_time */
@@ -360,328 +179,126 @@ static void exynos4_set_apll(unsigned int index)
} while (tmp != (0x1 << S5P_CLKSRC_CPU_MUXCORE_SHIFT));
}
-static void exynos4_set_frequency(unsigned int old_index, unsigned int new_index)
+bool exynos4210_pms_change(unsigned int old_index, unsigned int new_index)
+{
+ unsigned int old_pm = (exynos4210_apll_pms_table[old_index] >> 8);
+ unsigned int new_pm = (exynos4210_apll_pms_table[new_index] >> 8);
+
+ return (old_pm == new_pm) ? 0 : 1;
+}
+
+static void exynos4210_set_frequency(unsigned int old_index,
+ unsigned int new_index)
{
unsigned int tmp;
if (old_index > new_index) {
- /* The frequency changing to L0 needs to change apll */
- if (freqs.new == exynos4_freq_table[L0].frequency) {
- /* 1. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
-
- /* 2. Change the apll m,p,s value */
- exynos4_set_apll(new_index);
- } else {
+ if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
+ exynos4210_set_clkdiv(new_index);
/* 2. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
+ tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0);
- }
- }
-
- else if (old_index < new_index) {
- /* The frequency changing from L0 needs to change apll */
- if (freqs.old == exynos4_freq_table[L0].frequency) {
- /* 1. Change the apll m,p,s value */
- exynos4_set_apll(new_index);
-
- /* 2. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
} else {
+ /* Clock Configuration Procedure */
+ /* 1. Change the system clock divider values */
+ exynos4210_set_clkdiv(new_index);
+ /* 2. Change the apll m,p,s value */
+ exynos4210_set_apll(new_index);
+ }
+ } else if (old_index < new_index) {
+ if (!exynos4210_pms_change(old_index, new_index)) {
/* 1. Change just s value in apll m,p,s value */
tmp = __raw_readl(S5P_APLL_CON0);
tmp &= ~(0x7 << 0);
- tmp |= (exynos4_apll_pms_table[new_index] & 0x7);
+ tmp |= (exynos4210_apll_pms_table[new_index] & 0x7);
__raw_writel(tmp, S5P_APLL_CON0);
/* 2. Change the system clock divider values */
- exynos4_set_clkdiv(new_index);
+ exynos4210_set_clkdiv(new_index);
+ } else {
+ /* Clock Configuration Procedure */
+ /* 1. Change the apll m,p,s value */
+ exynos4210_set_apll(new_index);
+ /* 2. Change the system clock divider values */
+ exynos4210_set_clkdiv(new_index);
}
}
}
-static int exynos4_target(struct cpufreq_policy *policy,
- unsigned int target_freq,
- unsigned int relation)
-{
- unsigned int index, old_index;
- unsigned int arm_volt, int_volt;
- int err = -EINVAL;
-
- freqs.old = exynos4_getspeed(policy->cpu);
-
- mutex_lock(&cpufreq_lock);
-
- if (frequency_locked && target_freq != locking_frequency) {
- err = -EAGAIN;
- goto out;
- }
-
- if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
- freqs.old, relation, &old_index))
- goto out;
-
- if (cpufreq_frequency_table_target(policy, exynos4_freq_table,
- target_freq, relation, &index))
- goto out;
-
- err = 0;
-
- freqs.new = exynos4_freq_table[index].frequency;
- freqs.cpu = policy->cpu;
-
- if (freqs.new == freqs.old)
- goto out;
-
- /* get the voltage value */
- arm_volt = exynos4_volt_table[index].arm_volt;
- int_volt = exynos4_volt_table[index].int_volt;
-
- cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
-
- /* control regulator */
- if (freqs.new > freqs.old) {
- /* Voltage up */
- regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- regulator_set_voltage(int_regulator, int_volt, int_volt);
- }
-
- /* Clock Configuration Procedure */
- exynos4_set_frequency(old_index, index);
-
- /* control regulator */
- if (freqs.new < freqs.old) {
- /* Voltage down */
- regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
- regulator_set_voltage(int_regulator, int_volt, int_volt);
- }
-
- cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
-
-out:
- mutex_unlock(&cpufreq_lock);
- return err;
-}
-
-#ifdef CONFIG_PM
-/*
- * These suspend/resume are used as syscore_ops, it is already too
- * late to set regulator voltages at this stage.
- */
-static int exynos4_cpufreq_suspend(struct cpufreq_policy *policy)
-{
- return 0;
-}
-
-static int exynos4_cpufreq_resume(struct cpufreq_policy *policy)
+int exynos4210_cpufreq_init(struct exynos_dvfs_info *info)
{
- return 0;
-}
-#endif
-
-/**
- * exynos4_cpufreq_pm_notifier - block CPUFREQ's activities in suspend-resume
- * context
- * @notifier
- * @pm_event
- * @v
- *
- * While frequency_locked == true, target() ignores every frequency but
- * locking_frequency. The locking_frequency value is the initial frequency,
- * which is set by the bootloader. In order to eliminate possible
- * inconsistency in clock values, we save and restore frequencies during
- * suspend and resume and block CPUFREQ activities. Note that the standard
- * suspend/resume cannot be used as they are too deep (syscore_ops) for
- * regulator actions.
- */
-static int exynos4_cpufreq_pm_notifier(struct notifier_block *notifier,
- unsigned long pm_event, void *v)
-{
- struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
- static unsigned int saved_frequency;
- unsigned int temp;
-
- mutex_lock(&cpufreq_lock);
- switch (pm_event) {
- case PM_SUSPEND_PREPARE:
- if (frequency_locked)
- goto out;
- frequency_locked = true;
-
- if (locking_frequency) {
- saved_frequency = exynos4_getspeed(0);
-
- mutex_unlock(&cpufreq_lock);
- exynos4_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
- }
-
- break;
- case PM_POST_SUSPEND:
-
- if (saved_frequency) {
- /*
- * While frequency_locked, only locking_frequency
- * is valid for target(). In order to use
- * saved_frequency while keeping frequency_locked,
- * we temporarly overwrite locking_frequency.
- */
- temp = locking_frequency;
- locking_frequency = saved_frequency;
-
- mutex_unlock(&cpufreq_lock);
- exynos4_target(policy, locking_frequency,
- CPUFREQ_RELATION_H);
- mutex_lock(&cpufreq_lock);
-
- locking_frequency = temp;
- }
-
- frequency_locked = false;
- break;
- }
-out:
- mutex_unlock(&cpufreq_lock);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block exynos4_cpufreq_nb = {
- .notifier_call = exynos4_cpufreq_pm_notifier,
-};
-
-static int exynos4_cpufreq_cpu_init(struct cpufreq_policy *policy)
-{
- int ret;
-
- policy->cur = policy->min = policy->max = exynos4_getspeed(policy->cpu);
-
- cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
-
- /* set the transition latency value */
- policy->cpuinfo.transition_latency = 100000;
-
- /*
- * EXYNOS4 multi-core processors has 2 cores
- * that the frequency cannot be set independently.
- * Each cpu is bound to the same speed.
- * So the affected cpu is all of the cpus.
- */
- cpumask_setall(policy->cpus);
-
- ret = cpufreq_frequency_table_cpuinfo(policy, exynos4_freq_table);
- if (ret)
- return ret;
-
- cpufreq_frequency_table_get_attr(exynos4_freq_table, policy->cpu);
-
- return 0;
-}
-
-static int exynos4_cpufreq_cpu_exit(struct cpufreq_policy *policy)
-{
- cpufreq_frequency_table_put_attr(policy->cpu);
- return 0;
-}
-
-static struct freq_attr *exynos4_cpufreq_attr[] = {
- &cpufreq_freq_attr_scaling_available_freqs,
- NULL,
-};
-
-static struct cpufreq_driver exynos4_driver = {
- .flags = CPUFREQ_STICKY,
- .verify = exynos4_verify_speed,
- .target = exynos4_target,
- .get = exynos4_getspeed,
- .init = exynos4_cpufreq_cpu_init,
- .exit = exynos4_cpufreq_cpu_exit,
- .name = "exynos4_cpufreq",
- .attr = exynos4_cpufreq_attr,
-#ifdef CONFIG_PM
- .suspend = exynos4_cpufreq_suspend,
- .resume = exynos4_cpufreq_resume,
-#endif
-};
+ int i;
+ unsigned int tmp;
+ unsigned long rate;
-static int __init exynos4_cpufreq_init(void)
-{
cpu_clk = clk_get(NULL, "armclk");
if (IS_ERR(cpu_clk))
return PTR_ERR(cpu_clk);
- locking_frequency = exynos4_getspeed(0);
-
moutcore = clk_get(NULL, "moutcore");
if (IS_ERR(moutcore))
- goto out;
+ goto err_moutcore;
mout_mpll = clk_get(NULL, "mout_mpll");
if (IS_ERR(mout_mpll))
- goto out;
+ goto err_mout_mpll;
+
+ rate = clk_get_rate(mout_mpll) / 1000;
mout_apll = clk_get(NULL, "mout_apll");
if (IS_ERR(mout_apll))
- goto out;
+ goto err_mout_apll;
- arm_regulator = regulator_get(NULL, "vdd_arm");
- if (IS_ERR(arm_regulator)) {
- printk(KERN_ERR "failed to get resource %s\n", "vdd_arm");
- goto out;
- }
+ tmp = __raw_readl(S5P_CLKDIV_CPU);
- int_regulator = regulator_get(NULL, "vdd_int");
- if (IS_ERR(int_regulator)) {
- printk(KERN_ERR "failed to get resource %s\n", "vdd_int");
- goto out;
+ for (i = L0; i < CPUFREQ_LEVEL_END; i++) {
+ tmp &= ~(S5P_CLKDIV_CPU0_CORE_MASK |
+ S5P_CLKDIV_CPU0_COREM0_MASK |
+ S5P_CLKDIV_CPU0_COREM1_MASK |
+ S5P_CLKDIV_CPU0_PERIPH_MASK |
+ S5P_CLKDIV_CPU0_ATB_MASK |
+ S5P_CLKDIV_CPU0_PCLKDBG_MASK |
+ S5P_CLKDIV_CPU0_APLL_MASK);
+
+ tmp |= ((clkdiv_cpu0[i][0] << S5P_CLKDIV_CPU0_CORE_SHIFT) |
+ (clkdiv_cpu0[i][1] << S5P_CLKDIV_CPU0_COREM0_SHIFT) |
+ (clkdiv_cpu0[i][2] << S5P_CLKDIV_CPU0_COREM1_SHIFT) |
+ (clkdiv_cpu0[i][3] << S5P_CLKDIV_CPU0_PERIPH_SHIFT) |
+ (clkdiv_cpu0[i][4] << S5P_CLKDIV_CPU0_ATB_SHIFT) |
+ (clkdiv_cpu0[i][5] << S5P_CLKDIV_CPU0_PCLKDBG_SHIFT) |
+ (clkdiv_cpu0[i][6] << S5P_CLKDIV_CPU0_APLL_SHIFT));
+
+ exynos4210_clkdiv_table[i].clkdiv = tmp;
}
- /*
- * Check DRAM type.
- * Because DVFS level is different according to DRAM type.
- */
- memtype = __raw_readl(S5P_VA_DMC0 + S5P_DMC0_MEMCON_OFFSET);
- memtype = (memtype >> S5P_DMC0_MEMTYPE_SHIFT);
- memtype &= S5P_DMC0_MEMTYPE_MASK;
-
- if ((memtype < DDR2) && (memtype > DDR3)) {
- printk(KERN_ERR "%s: wrong memtype= 0x%x\n", __func__, memtype);
- goto out;
- } else {
- printk(KERN_DEBUG "%s: memtype= 0x%x\n", __func__, memtype);
- }
-
- register_pm_notifier(&exynos4_cpufreq_nb);
-
- return cpufreq_register_driver(&exynos4_driver);
-
-out:
- if (!IS_ERR(cpu_clk))
- clk_put(cpu_clk);
+ info->mpll_freq_khz = rate;
+ info->pm_lock_idx = L2;
+ info->pll_safe_idx = L2;
+ info->max_support_idx = max_support_idx;
+ info->min_support_idx = min_support_idx;
+ info->cpu_clk = cpu_clk;
+ info->volt_table = exynos4210_volt_table;
+ info->freq_table = exynos4210_freq_table;
+ info->set_freq = exynos4210_set_frequency;
+ info->need_apll_change = exynos4210_pms_change;
- if (!IS_ERR(moutcore))
- clk_put(moutcore);
+ return 0;
+err_mout_apll:
if (!IS_ERR(mout_mpll))
clk_put(mout_mpll);
+err_mout_mpll:
+ if (!IS_ERR(moutcore))
+ clk_put(moutcore);
+err_moutcore:
+ if (!IS_ERR(cpu_clk))
+ clk_put(cpu_clk);
- if (!IS_ERR(mout_apll))
- clk_put(mout_apll);
-
- if (!IS_ERR(arm_regulator))
- regulator_put(arm_regulator);
-
- if (!IS_ERR(int_regulator))
- regulator_put(int_regulator);
-
- printk(KERN_ERR "%s: failed initialization\n", __func__);
-
+ pr_debug("%s: failed initialization\n", __func__);
return -EINVAL;
}
-late_initcall(exynos4_cpufreq_init);
+EXPORT_SYMBOL(exynos4210_cpufreq_init);
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
new file mode 100644
index 0000000..5d04c57
--- /dev/null
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -0,0 +1,274 @@
+/*
+ * CPU frequency scaling for OMAP using OPP information
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King
+ *
+ * Copyright (C) 2007-2011 Texas Instruments, Inc.
+ * - OMAP3/4 support by Rajendra Nayak, Santosh Shilimkar
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/opp.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/smp_plat.h>
+#include <asm/cpu.h>
+
+#include <plat/clock.h>
+#include <plat/omap-pm.h>
+#include <plat/common.h>
+#include <plat/omap_device.h>
+
+#include <mach/hardware.h>
+
+#ifdef CONFIG_SMP
+struct lpj_info {
+ unsigned long ref;
+ unsigned int freq;
+};
+
+static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
+static struct lpj_info global_lpj_ref;
+#endif
+
+static struct cpufreq_frequency_table *freq_table;
+static atomic_t freq_table_users = ATOMIC_INIT(0);
+static struct clk *mpu_clk;
+static char *mpu_clk_name;
+static struct device *mpu_dev;
+
+static int omap_verify_speed(struct cpufreq_policy *policy)
+{
+ if (!freq_table)
+ return -EINVAL;
+ return cpufreq_frequency_table_verify(policy, freq_table);
+}
+
+static unsigned int omap_getspeed(unsigned int cpu)
+{
+ unsigned long rate;
+
+ if (cpu >= NR_CPUS)
+ return 0;
+
+ rate = clk_get_rate(mpu_clk) / 1000;
+ return rate;
+}
+
+static int omap_target(struct cpufreq_policy *policy,
+ unsigned int target_freq,
+ unsigned int relation)
+{
+ unsigned int i;
+ int ret = 0;
+ struct cpufreq_freqs freqs;
+
+ if (!freq_table) {
+ dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__,
+ policy->cpu);
+ return -EINVAL;
+ }
+
+ ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
+ relation, &i);
+ if (ret) {
+ dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n",
+ __func__, policy->cpu, target_freq, ret);
+ return ret;
+ }
+ freqs.new = freq_table[i].frequency;
+ if (!freqs.new) {
+ dev_err(mpu_dev, "%s: cpu%d: no match for freq %d\n", __func__,
+ policy->cpu, target_freq);
+ return -EINVAL;
+ }
+
+ freqs.old = omap_getspeed(policy->cpu);
+ freqs.cpu = policy->cpu;
+
+ if (freqs.old == freqs.new && policy->cur == freqs.new)
+ return ret;
+
+ /* notifiers */
+ for_each_cpu(i, policy->cpus) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
+ }
+
+#ifdef CONFIG_CPU_FREQ_DEBUG
+ pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new);
+#endif
+
+ ret = clk_set_rate(mpu_clk, freqs.new * 1000);
+ freqs.new = omap_getspeed(policy->cpu);
+
+#ifdef CONFIG_SMP
+ /*
+ * Note that loops_per_jiffy is not updated on SMP systems in
+ * cpufreq driver. So, update the per-CPU loops_per_jiffy value
+ * on frequency transition. We need to update all dependent CPUs.
+ */
+ for_each_cpu(i, policy->cpus) {
+ struct lpj_info *lpj = &per_cpu(lpj_ref, i);
+ if (!lpj->freq) {
+ lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
+ lpj->freq = freqs.old;
+ }
+
+ per_cpu(cpu_data, i).loops_per_jiffy =
+ cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
+ }
+
+ /* And don't forget to adjust the global one */
+ if (!global_lpj_ref.freq) {
+ global_lpj_ref.ref = loops_per_jiffy;
+ global_lpj_ref.freq = freqs.old;
+ }
+ loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
+ freqs.new);
+#endif
+
+ /* notifiers */
+ for_each_cpu(i, policy->cpus) {
+ freqs.cpu = i;
+ cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
+ }
+
+ return ret;
+}
+
+static inline void freq_table_free(void)
+{
+ if (atomic_dec_and_test(&freq_table_users))
+ opp_free_cpufreq_table(mpu_dev, &freq_table);
+}
+
+static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy)
+{
+ int result = 0;
+
+ mpu_clk = clk_get(NULL, mpu_clk_name);
+ if (IS_ERR(mpu_clk))
+ return PTR_ERR(mpu_clk);
+
+ if (policy->cpu >= NR_CPUS) {
+ result = -EINVAL;
+ goto fail_ck;
+ }
+
+ policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu);
+
+ if (atomic_inc_return(&freq_table_users) == 1)
+ result = opp_init_cpufreq_table(mpu_dev, &freq_table);
+
+ if (result) {
+ dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n",
+ __func__, policy->cpu, result);
+ goto fail_ck;
+ }
+
+ result = cpufreq_frequency_table_cpuinfo(policy, freq_table);
+ if (result)
+ goto fail_table;
+
+ cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
+
+ policy->min = policy->cpuinfo.min_freq;
+ policy->max = policy->cpuinfo.max_freq;
+ policy->cur = omap_getspeed(policy->cpu);
+
+ /*
+ * On OMAP SMP configuartion, both processors share the voltage
+ * and clock. So both CPUs needs to be scaled together and hence
+ * needs software co-ordination. Use cpufreq affected_cpus
+ * interface to handle this scenario. Additional is_smp() check
+ * is to keep SMP_ON_UP build working.
+ */
+ if (is_smp()) {
+ policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+ cpumask_setall(policy->cpus);
+ }
+
+ /* FIXME: what's the actual transition time? */
+ policy->cpuinfo.transition_latency = 300 * 1000;
+
+ return 0;
+
+fail_table:
+ freq_table_free();
+fail_ck:
+ clk_put(mpu_clk);
+ return result;
+}
+
+static int omap_cpu_exit(struct cpufreq_policy *policy)
+{
+ freq_table_free();
+ clk_put(mpu_clk);
+ return 0;
+}
+
+static struct freq_attr *omap_cpufreq_attr[] = {
+ &cpufreq_freq_attr_scaling_available_freqs,
+ NULL,
+};
+
+static struct cpufreq_driver omap_driver = {
+ .flags = CPUFREQ_STICKY,
+ .verify = omap_verify_speed,
+ .target = omap_target,
+ .get = omap_getspeed,
+ .init = omap_cpu_init,
+ .exit = omap_cpu_exit,
+ .name = "omap",
+ .attr = omap_cpufreq_attr,
+};
+
+static int __init omap_cpufreq_init(void)
+{
+ if (cpu_is_omap24xx())
+ mpu_clk_name = "virt_prcm_set";
+ else if (cpu_is_omap34xx())
+ mpu_clk_name = "dpll1_ck";
+ else if (cpu_is_omap44xx())
+ mpu_clk_name = "dpll_mpu_ck";
+
+ if (!mpu_clk_name) {
+ pr_err("%s: unsupported Silicon?\n", __func__);
+ return -EINVAL;
+ }
+
+ mpu_dev = omap_device_get_by_hwmod_name("mpu");
+ if (!mpu_dev) {
+ pr_warning("%s: unable to get the mpu device\n", __func__);
+ return -EINVAL;
+ }
+
+ return cpufreq_register_driver(&omap_driver);
+}
+
+static void __exit omap_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&omap_driver);
+}
+
+MODULE_DESCRIPTION("cpufreq driver for OMAP SoCs");
+MODULE_LICENSE("GPL");
+module_init(omap_cpufreq_init);
+module_exit(omap_cpufreq_exit);
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index bce576d..8f9b2cee 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1,10 +1,11 @@
/*
- * (c) 2003-2010 Advanced Micro Devices, Inc.
+ * (c) 2003-2012 Advanced Micro Devices, Inc.
* Your use of this code is subject to the terms and conditions of the
* GNU general public license version 2. See "COPYING" or
* http://www.gnu.org/licenses/gpl.html
*
- * Support : mark.langsdorf@amd.com
+ * Maintainer:
+ * Andreas Herrmann <andreas.herrmann3@amd.com>
*
* Based on the powernow-k7.c module written by Dave Jones.
* (C) 2003 Dave Jones on behalf of SuSE Labs
@@ -16,12 +17,14 @@
* Valuable input gratefully received from Dave Jones, Pavel Machek,
* Dominik Brodowski, Jacob Shin, and others.
* Originally developed by Paul Devriendt.
- * Processor information obtained from Chapter 9 (Power and Thermal Management)
- * of the "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
- * Opteron Processors" available for download from www.amd.com
*
- * Tables for specific CPUs can be inferred from
- * http://www.amd.com/us-en/assets/content_type/white_papers_and_tech_docs/30430.pdf
+ * Processor information obtained from Chapter 9 (Power and Thermal
+ * Management) of the "BIOS and Kernel Developer's Guide (BKDG) for
+ * the AMD Athlon 64 and AMD Opteron Processors" and section "2.x
+ * Power Management" in BKDGs for newer AMD CPU families.
+ *
+ * Tables for specific CPUs can be inferred from AMD's processor
+ * power and thermal data sheets, (e.g. 30417.pdf, 30430.pdf, 43375.pdf)
*/
#include <linux/kernel.h>
@@ -54,6 +57,9 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
static int cpu_family = CPU_OPTERON;
+/* array to map SW pstate number to acpi state */
+static u32 ps_to_as[8];
+
/* core performance boost */
static bool cpb_capable, cpb_enabled;
static struct msr __percpu *msrs;
@@ -80,9 +86,9 @@ static u32 find_khz_freq_from_fid(u32 fid)
}
static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
- u32 pstate)
+ u32 pstate)
{
- return data[pstate].frequency;
+ return data[ps_to_as[pstate]].frequency;
}
/* Return the vco fid for an input fid
@@ -926,23 +932,27 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data,
invalidate_entry(powernow_table, i);
continue;
}
- rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
- if (!(hi & HW_PSTATE_VALID_MASK)) {
- pr_debug("invalid pstate %d, ignoring\n", index);
- invalidate_entry(powernow_table, i);
- continue;
- }
- powernow_table[i].index = index;
+ ps_to_as[index] = i;
/* Frequency may be rounded for these */
if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
|| boot_cpu_data.x86 == 0x11) {
+
+ rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
+ if (!(hi & HW_PSTATE_VALID_MASK)) {
+ pr_debug("invalid pstate %d, ignoring\n", index);
+ invalidate_entry(powernow_table, i);
+ continue;
+ }
+
powernow_table[i].frequency =
freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
} else
powernow_table[i].frequency =
data->acpi_data.states[i].core_frequency * 1000;
+
+ powernow_table[i].index = index;
}
return 0;
}
@@ -1189,7 +1199,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
powernow_k8_acpi_pst_values(data, newstate);
if (cpu_family == CPU_HW_PSTATE)
- ret = transition_frequency_pstate(data, newstate);
+ ret = transition_frequency_pstate(data,
+ data->powernow_table[newstate].index);
else
ret = transition_frequency_fidvid(data, newstate);
if (ret) {
@@ -1202,7 +1213,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
if (cpu_family == CPU_HW_PSTATE)
pol->cur = find_khz_freq_from_pstate(data->powernow_table,
- newstate);
+ data->powernow_table[newstate].index);
else
pol->cur = find_khz_freq_from_fid(data->currfid);
ret = 0;
diff --git a/drivers/cpufreq/s3c64xx-cpufreq.c b/drivers/cpufreq/s3c64xx-cpufreq.c
index 3475f65..a5e72cb 100644
--- a/drivers/cpufreq/s3c64xx-cpufreq.c
+++ b/drivers/cpufreq/s3c64xx-cpufreq.c
@@ -8,6 +8,8 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "cpufreq: " fmt
+
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -91,7 +93,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
if (freqs.old == freqs.new)
return 0;
- pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
+ pr_debug("Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
@@ -101,7 +103,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err;
}
@@ -110,7 +112,7 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
ret = clk_set_rate(armclk, freqs.new * 1000);
if (ret < 0) {
- pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
+ pr_err("Failed to set rate %dkHz: %d\n",
freqs.new, ret);
goto err;
}
@@ -123,14 +125,14 @@ static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
- pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
+ pr_err("Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err_clk;
}
}
#endif
- pr_debug("cpufreq: Set actual frequency %lukHz\n",
+ pr_debug("Set actual frequency %lukHz\n",
clk_get_rate(armclk) / 1000);
return 0;
@@ -153,7 +155,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
count = regulator_count_voltages(vddarm);
if (count < 0) {
- pr_err("cpufreq: Unable to check supported voltages\n");
+ pr_err("Unable to check supported voltages\n");
}
freq = s3c64xx_freq_table;
@@ -171,7 +173,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
}
if (!found) {
- pr_debug("cpufreq: %dkHz unsupported by regulator\n",
+ pr_debug("%dkHz unsupported by regulator\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
@@ -194,13 +196,13 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
return -EINVAL;
if (s3c64xx_freq_table == NULL) {
- pr_err("cpufreq: No frequency information for this CPU\n");
+ pr_err("No frequency information for this CPU\n");
return -ENODEV;
}
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
- pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
+ pr_err("Unable to obtain ARMCLK: %ld\n",
PTR_ERR(armclk));
return PTR_ERR(armclk);
}
@@ -209,12 +211,19 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm);
- pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
- pr_err("cpufreq: Only frequency scaling available\n");
+ pr_err("Failed to obtain VDDARM: %d\n", ret);
+ pr_err("Only frequency scaling available\n");
vddarm = NULL;
} else {
s3c64xx_cpufreq_config_regulator();
}
+
+ vddint = regulator_get(NULL, "vddint");
+ if (IS_ERR(vddint)) {
+ ret = PTR_ERR(vddint);
+ pr_err("Failed to obtain VDDINT: %d\n", ret);
+ vddint = NULL;
+ }
#endif
freq = s3c64xx_freq_table;
@@ -225,7 +234,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
r = clk_round_rate(armclk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
- pr_debug("cpufreq: %dkHz unsupported by clock\n",
+ pr_debug("%dkHz unsupported by clock\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
@@ -248,7 +257,7 @@ static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
if (ret != 0) {
- pr_err("cpufreq: Failed to configure frequency table: %d\n",
+ pr_err("Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
clk_put(armclk);
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig
index 7dbc4a8..78a666d 100644
--- a/drivers/cpuidle/Kconfig
+++ b/drivers/cpuidle/Kconfig
@@ -1,7 +1,7 @@
config CPU_IDLE
bool "CPU idle PM support"
- default ACPI
+ default y if ACPI || PPC_PSERIES
help
CPU idle is a generic framework for supporting software-controlled
idle processor power management. It includes modular cross-platform
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 06ce268..59f4261 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -291,10 +291,10 @@ EXPORT_SYMBOL_GPL(cpuidle_disable_device);
static int __cpuidle_register_device(struct cpuidle_device *dev)
{
int ret;
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
- if (!sys_dev)
+ if (!dev)
return -EINVAL;
if (!try_module_get(cpuidle_driver->owner))
return -EINVAL;
@@ -303,7 +303,7 @@ static int __cpuidle_register_device(struct cpuidle_device *dev)
per_cpu(cpuidle_devices, dev->cpu) = dev;
list_add(&dev->device_list, &cpuidle_detected_devices);
- if ((ret = cpuidle_add_sysfs(sys_dev))) {
+ if ((ret = cpuidle_add_sysfs(cpu_dev))) {
module_put(cpuidle_driver->owner);
return ret;
}
@@ -344,7 +344,7 @@ EXPORT_SYMBOL_GPL(cpuidle_register_device);
*/
void cpuidle_unregister_device(struct cpuidle_device *dev)
{
- struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
+ struct device *cpu_dev = get_cpu_device((unsigned long)dev->cpu);
struct cpuidle_driver *cpuidle_driver = cpuidle_get_driver();
if (dev->registered == 0)
@@ -354,7 +354,7 @@ void cpuidle_unregister_device(struct cpuidle_device *dev)
cpuidle_disable_device(dev);
- cpuidle_remove_sysfs(sys_dev);
+ cpuidle_remove_sysfs(cpu_dev);
list_del(&dev->device_list);
wait_for_completion(&dev->kobj_unregister);
per_cpu(cpuidle_devices, dev->cpu) = NULL;
@@ -411,7 +411,7 @@ static int __init cpuidle_init(void)
if (cpuidle_disabled())
return -ENODEV;
- ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
+ ret = cpuidle_add_interface(cpu_subsys.dev_root);
if (ret)
return ret;
diff --git a/drivers/cpuidle/cpuidle.h b/drivers/cpuidle/cpuidle.h
index 38c3fd8..7db1866 100644
--- a/drivers/cpuidle/cpuidle.h
+++ b/drivers/cpuidle/cpuidle.h
@@ -5,7 +5,7 @@
#ifndef __DRIVER_CPUIDLE_H
#define __DRIVER_CPUIDLE_H
-#include <linux/sysdev.h>
+#include <linux/device.h>
/* For internal use only */
extern struct cpuidle_governor *cpuidle_curr_governor;
@@ -23,11 +23,11 @@ extern void cpuidle_uninstall_idle_handler(void);
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
/* sysfs */
-extern int cpuidle_add_class_sysfs(struct sysdev_class *cls);
-extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls);
+extern int cpuidle_add_interface(struct device *dev);
+extern void cpuidle_remove_interface(struct device *dev);
extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
-extern int cpuidle_add_sysfs(struct sys_device *sysdev);
-extern void cpuidle_remove_sysfs(struct sys_device *sysdev);
+extern int cpuidle_add_sysfs(struct device *dev);
+extern void cpuidle_remove_sysfs(struct device *dev);
#endif /* __DRIVER_CPUIDLE_H */
diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
index 1e756e1..3fe41fe 100644
--- a/drivers/cpuidle/sysfs.c
+++ b/drivers/cpuidle/sysfs.c
@@ -22,8 +22,8 @@ static int __init cpuidle_sysfs_setup(char *unused)
}
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
-static ssize_t show_available_governors(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_available_governors(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t i = 0;
@@ -42,8 +42,8 @@ out:
return i;
}
-static ssize_t show_current_driver(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_current_driver(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t ret;
@@ -59,8 +59,8 @@ static ssize_t show_current_driver(struct sysdev_class *class,
return ret;
}
-static ssize_t show_current_governor(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t show_current_governor(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
ssize_t ret;
@@ -75,8 +75,8 @@ static ssize_t show_current_governor(struct sysdev_class *class,
return ret;
}
-static ssize_t store_current_governor(struct sysdev_class *class,
- struct sysdev_class_attribute *attr,
+static ssize_t store_current_governor(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
char gov_name[CPUIDLE_NAME_LEN];
@@ -109,50 +109,48 @@ static ssize_t store_current_governor(struct sysdev_class *class,
return count;
}
-static SYSDEV_CLASS_ATTR(current_driver, 0444, show_current_driver, NULL);
-static SYSDEV_CLASS_ATTR(current_governor_ro, 0444, show_current_governor,
- NULL);
+static DEVICE_ATTR(current_driver, 0444, show_current_driver, NULL);
+static DEVICE_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
-static struct attribute *cpuclass_default_attrs[] = {
- &attr_current_driver.attr,
- &attr_current_governor_ro.attr,
+static struct attribute *cpuidle_default_attrs[] = {
+ &dev_attr_current_driver.attr,
+ &dev_attr_current_governor_ro.attr,
NULL
};
-static SYSDEV_CLASS_ATTR(available_governors, 0444, show_available_governors,
- NULL);
-static SYSDEV_CLASS_ATTR(current_governor, 0644, show_current_governor,
- store_current_governor);
+static DEVICE_ATTR(available_governors, 0444, show_available_governors, NULL);
+static DEVICE_ATTR(current_governor, 0644, show_current_governor,
+ store_current_governor);
-static struct attribute *cpuclass_switch_attrs[] = {
- &attr_available_governors.attr,
- &attr_current_driver.attr,
- &attr_current_governor.attr,
+static struct attribute *cpuidle_switch_attrs[] = {
+ &dev_attr_available_governors.attr,
+ &dev_attr_current_driver.attr,
+ &dev_attr_current_governor.attr,
NULL
};
-static struct attribute_group cpuclass_attr_group = {
- .attrs = cpuclass_default_attrs,
+static struct attribute_group cpuidle_attr_group = {
+ .attrs = cpuidle_default_attrs,
.name = "cpuidle",
};
/**
- * cpuidle_add_class_sysfs - add CPU global sysfs attributes
+ * cpuidle_add_interface - add CPU global sysfs attributes
*/
-int cpuidle_add_class_sysfs(struct sysdev_class *cls)
+int cpuidle_add_interface(struct device *dev)
{
if (sysfs_switch)
- cpuclass_attr_group.attrs = cpuclass_switch_attrs;
+ cpuidle_attr_group.attrs = cpuidle_switch_attrs;
- return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group);
+ return sysfs_create_group(&dev->kobj, &cpuidle_attr_group);
}
/**
- * cpuidle_remove_class_sysfs - remove CPU global sysfs attributes
+ * cpuidle_remove_interface - remove CPU global sysfs attributes
*/
-void cpuidle_remove_class_sysfs(struct sysdev_class *cls)
+void cpuidle_remove_interface(struct device *dev)
{
- sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group);
+ sysfs_remove_group(&dev->kobj, &cpuidle_attr_group);
}
struct cpuidle_attr {
@@ -365,16 +363,16 @@ void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
/**
* cpuidle_add_sysfs - creates a sysfs instance for the target device
- * @sysdev: the target device
+ * @dev: the target device
*/
-int cpuidle_add_sysfs(struct sys_device *sysdev)
+int cpuidle_add_sysfs(struct device *cpu_dev)
{
- int cpu = sysdev->id;
+ int cpu = cpu_dev->id;
struct cpuidle_device *dev;
int error;
dev = per_cpu(cpuidle_devices, cpu);
- error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &sysdev->kobj,
+ error = kobject_init_and_add(&dev->kobj, &ktype_cpuidle, &cpu_dev->kobj,
"cpuidle");
if (!error)
kobject_uevent(&dev->kobj, KOBJ_ADD);
@@ -383,11 +381,11 @@ int cpuidle_add_sysfs(struct sys_device *sysdev)
/**
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
- * @sysdev: the target device
+ * @dev: the target device
*/
-void cpuidle_remove_sysfs(struct sys_device *sysdev)
+void cpuidle_remove_sysfs(struct device *cpu_dev)
{
- int cpu = sysdev->id;
+ int cpu = cpu_dev->id;
struct cpuidle_device *dev;
dev = per_cpu(cpuidle_devices, cpu);
diff --git a/drivers/crypto/amcc/crypto4xx_core.c b/drivers/crypto/amcc/crypto4xx_core.c
index 1d103f9..13f8e1a 100644
--- a/drivers/crypto/amcc/crypto4xx_core.c
+++ b/drivers/crypto/amcc/crypto4xx_core.c
@@ -1292,18 +1292,7 @@ static struct platform_driver crypto4xx_driver = {
.remove = crypto4xx_remove,
};
-static int __init crypto4xx_init(void)
-{
- return platform_driver_register(&crypto4xx_driver);
-}
-
-static void __exit crypto4xx_exit(void)
-{
- platform_driver_unregister(&crypto4xx_driver);
-}
-
-module_init(crypto4xx_init);
-module_exit(crypto4xx_exit);
+module_platform_driver(crypto4xx_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 4159265..e73cf2e 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -113,7 +113,7 @@ static inline void append_dec_shr_done(u32 *desc)
jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TEST_ALL);
set_jump_tgt_here(desc, jump_cmd);
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
/*
@@ -213,7 +213,7 @@ static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx,
set_jump_tgt_here(desc, key_jump_cmd);
/* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
}
static int aead_set_sh_desc(struct crypto_aead *aead)
@@ -310,7 +310,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
/* Only propagate error immediately if shared */
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
set_jump_tgt_here(desc, jump_cmd);
/* Class 2 operation */
@@ -683,7 +683,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
set_jump_tgt_here(desc, key_jump_cmd);
/* Propagate errors from shared to job descriptor */
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
/* Load iv */
append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT |
@@ -724,7 +724,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
/* For aead, only propagate error immediately if shared */
jump_cmd = append_jump(desc, JUMP_TEST_ALL);
set_jump_tgt_here(desc, key_jump_cmd);
- append_cmd(desc, SET_OK_PROP_ERRORS | CMD_LOAD);
+ append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD);
set_jump_tgt_here(desc, jump_cmd);
/* load IV */
@@ -1806,6 +1806,25 @@ struct caam_alg_template {
static struct caam_alg_template driver_algs[] = {
/* single-pass ipsec_esp descriptor */
{
+ .name = "authenc(hmac(md5),cbc(aes))",
+ .driver_name = "authenc-hmac-md5-cbc-aes-caam",
+ .blocksize = AES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = AES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(aes))",
.driver_name = "authenc-hmac-sha1-cbc-aes-caam",
.blocksize = AES_BLOCK_SIZE,
@@ -1865,6 +1884,25 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(md5),cbc(des3_ede))",
+ .driver_name = "authenc-hmac-md5-cbc-des3_ede-caam",
+ .blocksize = DES3_EDE_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES3_EDE_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_3DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(des3_ede))",
.driver_name = "authenc-hmac-sha1-cbc-des3_ede-caam",
.blocksize = DES3_EDE_BLOCK_SIZE,
@@ -1924,6 +1962,25 @@ static struct caam_alg_template driver_algs[] = {
.alg_op = OP_ALG_ALGSEL_SHA512 | OP_ALG_AAI_HMAC,
},
{
+ .name = "authenc(hmac(md5),cbc(des))",
+ .driver_name = "authenc-hmac-md5-cbc-des-caam",
+ .blocksize = DES_BLOCK_SIZE,
+ .type = CRYPTO_ALG_TYPE_AEAD,
+ .template_aead = {
+ .setkey = aead_setkey,
+ .setauthsize = aead_setauthsize,
+ .encrypt = aead_encrypt,
+ .decrypt = aead_decrypt,
+ .givencrypt = aead_givencrypt,
+ .geniv = "<built-in>",
+ .ivsize = DES_BLOCK_SIZE,
+ .maxauthsize = MD5_DIGEST_SIZE,
+ },
+ .class1_alg_type = OP_ALG_ALGSEL_DES | OP_ALG_AAI_CBC,
+ .class2_alg_type = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC_PRECOMP,
+ .alg_op = OP_ALG_ALGSEL_MD5 | OP_ALG_AAI_HMAC,
+ },
+ {
.name = "authenc(hmac(sha1),cbc(des))",
.driver_name = "authenc-hmac-sha1-cbc-des-caam",
.blocksize = DES_BLOCK_SIZE,
diff --git a/drivers/crypto/caam/compat.h b/drivers/crypto/caam/compat.h
index d38f2af..a63bc65 100644
--- a/drivers/crypto/caam/compat.h
+++ b/drivers/crypto/caam/compat.h
@@ -28,6 +28,7 @@
#include <crypto/aes.h>
#include <crypto/des.h>
#include <crypto/sha.h>
+#include <crypto/md5.h>
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <crypto/scatterwalk.h>
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 73988bb..8ae3ba2 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -52,8 +52,6 @@ static int caam_probe(struct platform_device *pdev)
struct caam_ctrl __iomem *ctrl;
struct caam_full __iomem *topregs;
struct caam_drv_private *ctrlpriv;
- struct caam_deco **deco;
- u32 deconum;
#ifdef CONFIG_DEBUG_FS
struct caam_perfmon *perfmon;
#endif
@@ -92,17 +90,6 @@ static int caam_probe(struct platform_device *pdev)
if (sizeof(dma_addr_t) == sizeof(u64))
dma_set_mask(dev, DMA_BIT_MASK(36));
- /* Find out how many DECOs are present */
- deconum = (rd_reg64(&topregs->ctrl.perfmon.cha_num) &
- CHA_NUM_DECONUM_MASK) >> CHA_NUM_DECONUM_SHIFT;
-
- ctrlpriv->deco = kmalloc(deconum * sizeof(struct caam_deco *),
- GFP_KERNEL);
-
- deco = (struct caam_deco __force **)&topregs->deco;
- for (d = 0; d < deconum; d++)
- ctrlpriv->deco[d] = deco[d];
-
/*
* Detect and enable JobRs
* First, find out how many ring spec'ed, allocate references
@@ -253,18 +240,7 @@ static struct platform_driver caam_driver = {
.remove = __devexit_p(caam_remove),
};
-static int __init caam_base_init(void)
-{
- return platform_driver_register(&caam_driver);
-}
-
-static void __exit caam_base_exit(void)
-{
- return platform_driver_unregister(&caam_driver);
-}
-
-module_init(caam_base_init);
-module_exit(caam_base_exit);
+module_platform_driver(caam_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("FSL CAAM request backend");
diff --git a/drivers/crypto/caam/desc.h b/drivers/crypto/caam/desc.h
index 974a758..a17c295 100644
--- a/drivers/crypto/caam/desc.h
+++ b/drivers/crypto/caam/desc.h
@@ -9,7 +9,7 @@
#define DESC_H
/* Max size of any CAAM descriptor in 32-bit words, inclusive of header */
-#define MAX_CAAM_DESCSIZE 64
+#define MAX_CAAM_DESCSIZE 64
/* Block size of any entity covered/uncovered with a KEK/TKEK */
#define KEK_BLOCKSIZE 16
@@ -18,38 +18,38 @@
* Supported descriptor command types as they show up
* inside a descriptor command word.
*/
-#define CMD_SHIFT 27
-#define CMD_MASK 0xf8000000
-
-#define CMD_KEY (0x00 << CMD_SHIFT)
-#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
-#define CMD_LOAD (0x02 << CMD_SHIFT)
-#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
-#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
-#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
-#define CMD_STORE (0x0a << CMD_SHIFT)
-#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
-#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
-#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
-#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
-#define CMD_MOVE (0x0f << CMD_SHIFT)
-#define CMD_OPERATION (0x10 << CMD_SHIFT)
-#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
-#define CMD_JUMP (0x14 << CMD_SHIFT)
-#define CMD_MATH (0x15 << CMD_SHIFT)
-#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
-#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
-#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
-#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
+#define CMD_SHIFT 27
+#define CMD_MASK 0xf8000000
+
+#define CMD_KEY (0x00 << CMD_SHIFT)
+#define CMD_SEQ_KEY (0x01 << CMD_SHIFT)
+#define CMD_LOAD (0x02 << CMD_SHIFT)
+#define CMD_SEQ_LOAD (0x03 << CMD_SHIFT)
+#define CMD_FIFO_LOAD (0x04 << CMD_SHIFT)
+#define CMD_SEQ_FIFO_LOAD (0x05 << CMD_SHIFT)
+#define CMD_STORE (0x0a << CMD_SHIFT)
+#define CMD_SEQ_STORE (0x0b << CMD_SHIFT)
+#define CMD_FIFO_STORE (0x0c << CMD_SHIFT)
+#define CMD_SEQ_FIFO_STORE (0x0d << CMD_SHIFT)
+#define CMD_MOVE_LEN (0x0e << CMD_SHIFT)
+#define CMD_MOVE (0x0f << CMD_SHIFT)
+#define CMD_OPERATION (0x10 << CMD_SHIFT)
+#define CMD_SIGNATURE (0x12 << CMD_SHIFT)
+#define CMD_JUMP (0x14 << CMD_SHIFT)
+#define CMD_MATH (0x15 << CMD_SHIFT)
+#define CMD_DESC_HDR (0x16 << CMD_SHIFT)
+#define CMD_SHARED_DESC_HDR (0x17 << CMD_SHIFT)
+#define CMD_SEQ_IN_PTR (0x1e << CMD_SHIFT)
+#define CMD_SEQ_OUT_PTR (0x1f << CMD_SHIFT)
/* General-purpose class selector for all commands */
-#define CLASS_SHIFT 25
-#define CLASS_MASK (0x03 << CLASS_SHIFT)
+#define CLASS_SHIFT 25
+#define CLASS_MASK (0x03 << CLASS_SHIFT)
-#define CLASS_NONE (0x00 << CLASS_SHIFT)
-#define CLASS_1 (0x01 << CLASS_SHIFT)
-#define CLASS_2 (0x02 << CLASS_SHIFT)
-#define CLASS_BOTH (0x03 << CLASS_SHIFT)
+#define CLASS_NONE (0x00 << CLASS_SHIFT)
+#define CLASS_1 (0x01 << CLASS_SHIFT)
+#define CLASS_2 (0x02 << CLASS_SHIFT)
+#define CLASS_BOTH (0x03 << CLASS_SHIFT)
/*
* Descriptor header command constructs
@@ -60,82 +60,82 @@
* Do Not Run - marks a descriptor inexecutable if there was
* a preceding error somewhere
*/
-#define HDR_DNR 0x01000000
+#define HDR_DNR 0x01000000
/*
* ONE - should always be set. Combination of ONE (always
* set) and ZRO (always clear) forms an endianness sanity check
*/
-#define HDR_ONE 0x00800000
-#define HDR_ZRO 0x00008000
+#define HDR_ONE 0x00800000
+#define HDR_ZRO 0x00008000
/* Start Index or SharedDesc Length */
-#define HDR_START_IDX_MASK 0x3f
-#define HDR_START_IDX_SHIFT 16
+#define HDR_START_IDX_MASK 0x3f
+#define HDR_START_IDX_SHIFT 16
/* If shared descriptor header, 6-bit length */
-#define HDR_DESCLEN_SHR_MASK 0x3f
+#define HDR_DESCLEN_SHR_MASK 0x3f
/* If non-shared header, 7-bit length */
-#define HDR_DESCLEN_MASK 0x7f
+#define HDR_DESCLEN_MASK 0x7f
/* This is a TrustedDesc (if not SharedDesc) */
-#define HDR_TRUSTED 0x00004000
+#define HDR_TRUSTED 0x00004000
/* Make into TrustedDesc (if not SharedDesc) */
-#define HDR_MAKE_TRUSTED 0x00002000
+#define HDR_MAKE_TRUSTED 0x00002000
/* Save context if self-shared (if SharedDesc) */
-#define HDR_SAVECTX 0x00001000
+#define HDR_SAVECTX 0x00001000
/* Next item points to SharedDesc */
-#define HDR_SHARED 0x00001000
+#define HDR_SHARED 0x00001000
/*
* Reverse Execution Order - execute JobDesc first, then
* execute SharedDesc (normally SharedDesc goes first).
*/
-#define HDR_REVERSE 0x00000800
+#define HDR_REVERSE 0x00000800
/* Propogate DNR property to SharedDesc */
-#define HDR_PROP_DNR 0x00000800
+#define HDR_PROP_DNR 0x00000800
/* JobDesc/SharedDesc share property */
-#define HDR_SD_SHARE_MASK 0x03
-#define HDR_SD_SHARE_SHIFT 8
-#define HDR_JD_SHARE_MASK 0x07
-#define HDR_JD_SHARE_SHIFT 8
+#define HDR_SD_SHARE_MASK 0x03
+#define HDR_SD_SHARE_SHIFT 8
+#define HDR_JD_SHARE_MASK 0x07
+#define HDR_JD_SHARE_SHIFT 8
-#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
-#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_NEVER (0x00 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_WAIT (0x01 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_SERIAL (0x02 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_ALWAYS (0x03 << HDR_SD_SHARE_SHIFT)
+#define HDR_SHARE_DEFER (0x04 << HDR_SD_SHARE_SHIFT)
/* JobDesc/SharedDesc descriptor length */
-#define HDR_JD_LENGTH_MASK 0x7f
-#define HDR_SD_LENGTH_MASK 0x3f
+#define HDR_JD_LENGTH_MASK 0x7f
+#define HDR_SD_LENGTH_MASK 0x3f
/*
* KEY/SEQ_KEY Command Constructs
*/
-/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
-#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
-#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
+/* Key Destination Class: 01 = Class 1, 02 - Class 2 */
+#define KEY_DEST_CLASS_SHIFT 25 /* use CLASS_1 or CLASS_2 */
+#define KEY_DEST_CLASS_MASK (0x03 << KEY_DEST_CLASS_SHIFT)
/* Scatter-Gather Table/Variable Length Field */
-#define KEY_SGF 0x01000000
-#define KEY_VLF 0x01000000
+#define KEY_SGF 0x01000000
+#define KEY_VLF 0x01000000
/* Immediate - Key follows command in the descriptor */
-#define KEY_IMM 0x00800000
+#define KEY_IMM 0x00800000
/*
* Encrypted - Key is encrypted either with the KEK, or
* with the TDKEK if TK is set
*/
-#define KEY_ENC 0x00400000
+#define KEY_ENC 0x00400000
/*
* No Write Back - Do not allow key to be FIFO STOREd
@@ -156,16 +156,16 @@
* KDEST - Key Destination: 0 - class key register,
* 1 - PKHA 'e', 2 - AFHA Sbox, 3 - MDHA split-key
*/
-#define KEY_DEST_SHIFT 16
-#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_SHIFT 16
+#define KEY_DEST_MASK (0x03 << KEY_DEST_SHIFT)
-#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
-#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
-#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
-#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
+#define KEY_DEST_CLASS_REG (0x00 << KEY_DEST_SHIFT)
+#define KEY_DEST_PKHA_E (0x01 << KEY_DEST_SHIFT)
+#define KEY_DEST_AFHA_SBOX (0x02 << KEY_DEST_SHIFT)
+#define KEY_DEST_MDHA_SPLIT (0x03 << KEY_DEST_SHIFT)
/* Length in bytes */
-#define KEY_LENGTH_MASK 0x000003ff
+#define KEY_LENGTH_MASK 0x000003ff
/*
* LOAD/SEQ_LOAD/STORE/SEQ_STORE Command Constructs
@@ -175,25 +175,25 @@
* Load/Store Destination: 0 = class independent CCB,
* 1 = class 1 CCB, 2 = class 2 CCB, 3 = DECO
*/
-#define LDST_CLASS_SHIFT 25
-#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
-#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_SHIFT 25
+#define LDST_CLASS_MASK (0x03 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_IND_CCB (0x00 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_1_CCB (0x01 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_2_CCB (0x02 << LDST_CLASS_SHIFT)
+#define LDST_CLASS_DECO (0x03 << LDST_CLASS_SHIFT)
/* Scatter-Gather Table/Variable Length Field */
-#define LDST_SGF 0x01000000
+#define LDST_SGF 0x01000000
#define LDST_VLF LDST_SGF
-/* Immediate - Key follows this command in descriptor */
-#define LDST_IMM_MASK 1
-#define LDST_IMM_SHIFT 23
-#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
+/* Immediate - Key follows this command in descriptor */
+#define LDST_IMM_MASK 1
+#define LDST_IMM_SHIFT 23
+#define LDST_IMM (LDST_IMM_MASK << LDST_IMM_SHIFT)
-/* SRC/DST - Destination for LOAD, Source for STORE */
-#define LDST_SRCDST_SHIFT 16
-#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
+/* SRC/DST - Destination for LOAD, Source for STORE */
+#define LDST_SRCDST_SHIFT 16
+#define LDST_SRCDST_MASK (0x7f << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_BYTE_CONTEXT (0x20 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_BYTE_KEY (0x40 << LDST_SRCDST_SHIFT)
@@ -205,64 +205,64 @@
#define LDST_SRCDST_WORD_DATASZ_REG (0x02 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_ICVSZ_REG (0x03 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_CHACTRL (0x06 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECOCTRL (0x06 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_IRQCTRL (0x07 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_PCLOVRD (0x07 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_CLRW (0x08 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH0 (0x08 << LDST_SRCDST_SHIFT)
#define LDST_SRCDST_WORD_STAT (0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
-#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
-
-/* Offset in source/destination */
-#define LDST_OFFSET_SHIFT 8
-#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH1 (0x09 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH2 (0x0a << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_AAD_SZ (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DECO_MATH3 (0x0b << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_CLASS1_ICV_SZ (0x0c << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_ALTDS_CLASS1 (0x0f << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_A_SZ (0x10 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_B_SZ (0x11 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_N_SZ (0x12 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_PKHA_E_SZ (0x13 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_DESCBUF (0x40 << LDST_SRCDST_SHIFT)
+#define LDST_SRCDST_WORD_INFO_FIFO (0x7a << LDST_SRCDST_SHIFT)
+
+/* Offset in source/destination */
+#define LDST_OFFSET_SHIFT 8
+#define LDST_OFFSET_MASK (0xff << LDST_OFFSET_SHIFT)
/* LDOFF definitions used when DST = LDST_SRCDST_WORD_DECOCTRL */
/* These could also be shifted by LDST_OFFSET_SHIFT - this reads better */
-#define LDOFF_CHG_SHARE_SHIFT 0
-#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_NO_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
-#define LDOFF_CHG_SHARE_OK_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
-
-#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
-#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
-
-#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
-#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
-
-#define LDOFF_CHG_SEQLIODN_SHIFT 6
-#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
-#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
-
-/* Data length in bytes */
-#define LDST_LEN_SHIFT 0
-#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
+#define LDOFF_CHG_SHARE_SHIFT 0
+#define LDOFF_CHG_SHARE_MASK (0x3 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_NEVER (0x1 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_PROP (0x2 << LDOFF_CHG_SHARE_SHIFT)
+#define LDOFF_CHG_SHARE_OK_NO_PROP (0x3 << LDOFF_CHG_SHARE_SHIFT)
+
+#define LDOFF_ENABLE_AUTO_NFIFO (1 << 2)
+#define LDOFF_DISABLE_AUTO_NFIFO (1 << 3)
+
+#define LDOFF_CHG_NONSEQLIODN_SHIFT 4
+#define LDOFF_CHG_NONSEQLIODN_MASK (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_SEQ (0x1 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+#define LDOFF_CHG_NONSEQLIODN_TRUSTED (0x3 << LDOFF_CHG_NONSEQLIODN_SHIFT)
+
+#define LDOFF_CHG_SEQLIODN_SHIFT 6
+#define LDOFF_CHG_SEQLIODN_MASK (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_SEQ (0x1 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_NON_SEQ (0x2 << LDOFF_CHG_SEQLIODN_SHIFT)
+#define LDOFF_CHG_SEQLIODN_TRUSTED (0x3 << LDOFF_CHG_SEQLIODN_SHIFT)
+
+/* Data length in bytes */
+#define LDST_LEN_SHIFT 0
+#define LDST_LEN_MASK (0xff << LDST_LEN_SHIFT)
/* Special Length definitions when dst=deco-ctrl */
-#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
-#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
-#define LDLEN_RST_OFIFO (1 << 5)
-#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
-#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
-#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
-#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
+#define LDLEN_ENABLE_OSL_COUNT (1 << 7)
+#define LDLEN_RST_CHA_OFIFO_PTR (1 << 6)
+#define LDLEN_RST_OFIFO (1 << 5)
+#define LDLEN_SET_OFIFO_OFF_VALID (1 << 4)
+#define LDLEN_SET_OFIFO_OFF_RSVD (1 << 3)
+#define LDLEN_SET_OFIFO_OFFSET_SHIFT 0
+#define LDLEN_SET_OFIFO_OFFSET_MASK (3 << LDLEN_SET_OFIFO_OFFSET_SHIFT)
/*
* FIFO_LOAD/FIFO_STORE/SEQ_FIFO_LOAD/SEQ_FIFO_STORE
@@ -274,808 +274,808 @@
* 1 = Load for Class1, 2 = Load for Class2, 3 = Load both
* Store Source: 0 = normal, 1 = Class1key, 2 = Class2key
*/
-#define FIFOLD_CLASS_SHIFT 25
-#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
-#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
-
-#define FIFOST_CLASS_SHIFT 25
-#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
-#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
+#define FIFOLD_CLASS_SHIFT 25
+#define FIFOLD_CLASS_MASK (0x03 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_SKIP (0x00 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS1 (0x01 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_CLASS2 (0x02 << FIFOLD_CLASS_SHIFT)
+#define FIFOLD_CLASS_BOTH (0x03 << FIFOLD_CLASS_SHIFT)
+
+#define FIFOST_CLASS_SHIFT 25
+#define FIFOST_CLASS_MASK (0x03 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_NORMAL (0x00 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS1KEY (0x01 << FIFOST_CLASS_SHIFT)
+#define FIFOST_CLASS_CLASS2KEY (0x02 << FIFOST_CLASS_SHIFT)
/*
* Scatter-Gather Table/Variable Length Field
* If set for FIFO_LOAD, refers to a SG table. Within
* SEQ_FIFO_LOAD, is variable input sequence
*/
-#define FIFOLDST_SGF_SHIFT 24
-#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
-#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF_SHIFT 24
+#define FIFOLDST_SGF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF_MASK (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_SGF (1 << FIFOLDST_SGF_SHIFT)
+#define FIFOLDST_VLF (1 << FIFOLDST_SGF_SHIFT)
/* Immediate - Data follows command in descriptor */
-#define FIFOLD_IMM_SHIFT 23
-#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
-#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM_SHIFT 23
+#define FIFOLD_IMM_MASK (1 << FIFOLD_IMM_SHIFT)
+#define FIFOLD_IMM (1 << FIFOLD_IMM_SHIFT)
/* Continue - Not the last FIFO store to come */
-#define FIFOST_CONT_SHIFT 23
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
-#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_SHIFT 23
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
+#define FIFOST_CONT_MASK (1 << FIFOST_CONT_SHIFT)
/*
* Extended Length - use 32-bit extended length that
* follows the pointer field. Illegal with IMM set
*/
-#define FIFOLDST_EXT_SHIFT 22
-#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
-#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT_SHIFT 22
+#define FIFOLDST_EXT_MASK (1 << FIFOLDST_EXT_SHIFT)
+#define FIFOLDST_EXT (1 << FIFOLDST_EXT_SHIFT)
/* Input data type.*/
-#define FIFOLD_TYPE_SHIFT 16
-#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
-#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_SHIFT 16
+#define FIFOLD_CONT_TYPE_SHIFT 19 /* shift past last-flush bits */
+#define FIFOLD_TYPE_MASK (0x3f << FIFOLD_TYPE_SHIFT)
/* PK types */
-#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_MASK (0x30 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_PK_TYPEMASK (0x0f << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A0 (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A2 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A3 (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B0 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B1 (0x05 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B2 (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B3 (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_N (0x08 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_A (0x0c << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_PK_B (0x0d << FIFOLD_TYPE_SHIFT)
/* Other types. Need to OR in last/flush bits as desired */
-#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG_MASK (0x38 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG (0x10 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_MSG1OUT2 (0x18 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_IV (0x20 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_BITDATA (0x28 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_AAD (0x30 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ICV (0x38 << FIFOLD_TYPE_SHIFT)
/* Last/Flush bits for use with "other" types above */
-#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_ACT_MASK (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_NOACTION (0x00 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_FLUSH1 (0x01 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST1 (0x02 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2FLUSH (0x03 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LAST2 (0x04 << FIFOLD_TYPE_SHIFT)
#define FIFOLD_TYPE_LAST2FLUSH1 (0x05 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
-#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTH (0x06 << FIFOLD_TYPE_SHIFT)
+#define FIFOLD_TYPE_LASTBOTHFL (0x07 << FIFOLD_TYPE_SHIFT)
-#define FIFOLDST_LEN_MASK 0xffff
-#define FIFOLDST_EXT_LEN_MASK 0xffffffff
+#define FIFOLDST_LEN_MASK 0xffff
+#define FIFOLDST_EXT_LEN_MASK 0xffffffff
/* Output data types */
-#define FIFOST_TYPE_SHIFT 16
-#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
-
-#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SHIFT 16
+#define FIFOST_TYPE_MASK (0x3f << FIFOST_TYPE_SHIFT)
+
+#define FIFOST_TYPE_PKHA_A0 (0x00 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A1 (0x01 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A2 (0x02 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A3 (0x03 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B0 (0x04 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B1 (0x05 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B2 (0x06 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B3 (0x07 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_N (0x08 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_A (0x0c << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_B (0x0d << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_JKEK (0x10 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_AF_SBOX_TKEK (0x21 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_JKEK (0x22 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_PKHA_E_TKEK (0x23 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_KEK (0x24 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_KEY_TKEK (0x25 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_KEK (0x26 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SPLIT_TKEK (0x27 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_OUTFIFO_KEK (0x28 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_OUTFIFO_TKEK (0x29 << FIFOST_TYPE_SHIFT)
#define FIFOST_TYPE_MESSAGE_DATA (0x30 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
-#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGSTORE (0x34 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_RNGFIFO (0x35 << FIFOST_TYPE_SHIFT)
+#define FIFOST_TYPE_SKIP (0x3f << FIFOST_TYPE_SHIFT)
/*
* OPERATION Command Constructs
*/
/* Operation type selectors - OP TYPE */
-#define OP_TYPE_SHIFT 24
-#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_SHIFT 24
+#define OP_TYPE_MASK (0x07 << OP_TYPE_SHIFT)
-#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
-#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
-#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
-#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
-#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
+#define OP_TYPE_UNI_PROTOCOL (0x00 << OP_TYPE_SHIFT)
+#define OP_TYPE_PK (0x01 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS1_ALG (0x02 << OP_TYPE_SHIFT)
+#define OP_TYPE_CLASS2_ALG (0x04 << OP_TYPE_SHIFT)
+#define OP_TYPE_DECAP_PROTOCOL (0x06 << OP_TYPE_SHIFT)
+#define OP_TYPE_ENCAP_PROTOCOL (0x07 << OP_TYPE_SHIFT)
/* ProtocolID selectors - PROTID */
-#define OP_PCLID_SHIFT 16
-#define OP_PCLID_MASK (0xff << 16)
+#define OP_PCLID_SHIFT 16
+#define OP_PCLID_MASK (0xff << 16)
/* Assuming OP_TYPE = OP_TYPE_UNI_PROTOCOL */
-#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
-#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
-#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
-#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
-#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
-#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV1_PRF (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_IKEV2_PRF (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30_PRF (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10_PRF (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11_PRF (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS10_PRF (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_PRF (0x06 << OP_PCLID_SHIFT)
+#define OP_PCLID_BLOB (0x0d << OP_PCLID_SHIFT)
+#define OP_PCLID_SECRETKEY (0x11 << OP_PCLID_SHIFT)
+#define OP_PCLID_PUBLICKEYPAIR (0x14 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSASIGN (0x15 << OP_PCLID_SHIFT)
+#define OP_PCLID_DSAVERIFY (0x16 << OP_PCLID_SHIFT)
/* Assuming OP_TYPE = OP_TYPE_DECAP_PROTOCOL/ENCAP_PROTOCOL */
-#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
-#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
-#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
-#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
-#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
-#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
-#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
+#define OP_PCLID_IPSEC (0x01 << OP_PCLID_SHIFT)
+#define OP_PCLID_SRTP (0x02 << OP_PCLID_SHIFT)
+#define OP_PCLID_MACSEC (0x03 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIFI (0x04 << OP_PCLID_SHIFT)
+#define OP_PCLID_WIMAX (0x05 << OP_PCLID_SHIFT)
+#define OP_PCLID_SSL30 (0x08 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS10 (0x09 << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS11 (0x0a << OP_PCLID_SHIFT)
+#define OP_PCLID_TLS12 (0x0b << OP_PCLID_SHIFT)
+#define OP_PCLID_DTLS (0x0c << OP_PCLID_SHIFT)
/*
* ProtocolInfo selectors
*/
-#define OP_PCLINFO_MASK 0xffff
+#define OP_PCLINFO_MASK 0xffff
/* for OP_PCLID_IPSEC */
-#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
-#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
-
-#define OP_PCL_IPSEC_DES_IV64 0x0100
-#define OP_PCL_IPSEC_DES 0x0200
-#define OP_PCL_IPSEC_3DES 0x0300
-#define OP_PCL_IPSEC_AES_CBC 0x0c00
-#define OP_PCL_IPSEC_AES_CTR 0x0d00
-#define OP_PCL_IPSEC_AES_XTS 0x1600
-#define OP_PCL_IPSEC_AES_CCM8 0x0e00
-#define OP_PCL_IPSEC_AES_CCM12 0x0f00
-#define OP_PCL_IPSEC_AES_CCM16 0x1000
-#define OP_PCL_IPSEC_AES_GCM8 0x1200
-#define OP_PCL_IPSEC_AES_GCM12 0x1300
-#define OP_PCL_IPSEC_AES_GCM16 0x1400
-
-#define OP_PCL_IPSEC_HMAC_NULL 0x0000
-#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
-#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
-#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
-#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
-#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
-#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
-#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
-#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
+#define OP_PCL_IPSEC_CIPHER_MASK 0xff00
+#define OP_PCL_IPSEC_AUTH_MASK 0x00ff
+
+#define OP_PCL_IPSEC_DES_IV64 0x0100
+#define OP_PCL_IPSEC_DES 0x0200
+#define OP_PCL_IPSEC_3DES 0x0300
+#define OP_PCL_IPSEC_AES_CBC 0x0c00
+#define OP_PCL_IPSEC_AES_CTR 0x0d00
+#define OP_PCL_IPSEC_AES_XTS 0x1600
+#define OP_PCL_IPSEC_AES_CCM8 0x0e00
+#define OP_PCL_IPSEC_AES_CCM12 0x0f00
+#define OP_PCL_IPSEC_AES_CCM16 0x1000
+#define OP_PCL_IPSEC_AES_GCM8 0x1200
+#define OP_PCL_IPSEC_AES_GCM12 0x1300
+#define OP_PCL_IPSEC_AES_GCM16 0x1400
+
+#define OP_PCL_IPSEC_HMAC_NULL 0x0000
+#define OP_PCL_IPSEC_HMAC_MD5_96 0x0001
+#define OP_PCL_IPSEC_HMAC_SHA1_96 0x0002
+#define OP_PCL_IPSEC_AES_XCBC_MAC_96 0x0005
+#define OP_PCL_IPSEC_HMAC_MD5_128 0x0006
+#define OP_PCL_IPSEC_HMAC_SHA1_160 0x0007
+#define OP_PCL_IPSEC_HMAC_SHA2_256_128 0x000c
+#define OP_PCL_IPSEC_HMAC_SHA2_384_192 0x000d
+#define OP_PCL_IPSEC_HMAC_SHA2_512_256 0x000e
/* For SRTP - OP_PCLID_SRTP */
-#define OP_PCL_SRTP_CIPHER_MASK 0xff00
-#define OP_PCL_SRTP_AUTH_MASK 0x00ff
+#define OP_PCL_SRTP_CIPHER_MASK 0xff00
+#define OP_PCL_SRTP_AUTH_MASK 0x00ff
-#define OP_PCL_SRTP_AES_CTR 0x0d00
+#define OP_PCL_SRTP_AES_CTR 0x0d00
-#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
+#define OP_PCL_SRTP_HMAC_SHA1_160 0x0007
/* For SSL 3.0 - OP_PCLID_SSL30 */
-#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
-#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
-#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
-
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
-
-#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
-#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
-#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
-#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
-#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
-#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
-#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
-#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_SSL30_RC4_128_MD5 0x0024
-#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
-#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_SSL30_RC4_40_MD5 0x002b
-#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
-#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_SSL30_RC4_128_SHA 0x0020
-#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
-#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
-#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
-#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
-#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
-#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
-#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
-#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
-#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_SSL30_RC4_40_SHA 0x0028
+#define OP_PCL_SSL30_AES_128_CBC_SHA 0x002f
+#define OP_PCL_SSL30_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_SSL30_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_SSL30_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_SSL30_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_SSL30_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_SSL30_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_SSL30_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_SSL30_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_SSL30_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_SSL30_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_SSL30_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_SSL30_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_SSL30_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_SSL30_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_SSL30_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_SSL30_AES_256_CBC_SHA 0x0035
+#define OP_PCL_SSL30_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_SSL30_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_SSL30_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_SSL30_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_SSL30_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_SSL30_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_SSL30_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_SSL30_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_SSL30_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_SSL30_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_SSL30_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_SSL30_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_SSL30_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_SSL30_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_SSL30_AES_256_CBC_SHA_17 0xc022
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_MD5 0x0023
+
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_SSL30_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_SSL30_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_SSL30_DES_CBC_MD5 0x0022
+
+#define OP_PCL_SSL30_DES40_CBC_SHA 0x0008
+#define OP_PCL_SSL30_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_SSL30_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_SSL30_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_SSL30_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_SSL30_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_SSL30_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_SSL30_DES_CBC_SHA 0x001e
+#define OP_PCL_SSL30_DES_CBC_SHA_2 0x0009
+#define OP_PCL_SSL30_DES_CBC_SHA_3 0x000c
+#define OP_PCL_SSL30_DES_CBC_SHA_4 0x000f
+#define OP_PCL_SSL30_DES_CBC_SHA_5 0x0012
+#define OP_PCL_SSL30_DES_CBC_SHA_6 0x0015
+#define OP_PCL_SSL30_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_SSL30_RC4_128_MD5 0x0024
+#define OP_PCL_SSL30_RC4_128_MD5_2 0x0004
+#define OP_PCL_SSL30_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_SSL30_RC4_40_MD5 0x002b
+#define OP_PCL_SSL30_RC4_40_MD5_2 0x0003
+#define OP_PCL_SSL30_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_SSL30_RC4_128_SHA 0x0020
+#define OP_PCL_SSL30_RC4_128_SHA_2 0x008a
+#define OP_PCL_SSL30_RC4_128_SHA_3 0x008e
+#define OP_PCL_SSL30_RC4_128_SHA_4 0x0092
+#define OP_PCL_SSL30_RC4_128_SHA_5 0x0005
+#define OP_PCL_SSL30_RC4_128_SHA_6 0xc002
+#define OP_PCL_SSL30_RC4_128_SHA_7 0xc007
+#define OP_PCL_SSL30_RC4_128_SHA_8 0xc00c
+#define OP_PCL_SSL30_RC4_128_SHA_9 0xc011
+#define OP_PCL_SSL30_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_SSL30_RC4_40_SHA 0x0028
/* For TLS 1.0 - OP_PCLID_TLS10 */
-#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS10_RC4_128_MD5 0x0024
-#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS10_RC4_40_MD5 0x002b
-#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS10_RC4_128_SHA 0x0020
-#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS10_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS10_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS10_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS10_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS10_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS10_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS10_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS10_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS10_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS10_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS10_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS10_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS10_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS10_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS10_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS10_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS10_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS10_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS10_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS10_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS10_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS10_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS10_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS10_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS10_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS10_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS10_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS10_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS10_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS10_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS10_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS10_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS10_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS10_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS10_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS10_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS10_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS10_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS10_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS10_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS10_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS10_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_TLS10_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS10_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS10_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS10_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS10_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS10_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS10_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS10_RC4_128_MD5 0x0024
+#define OP_PCL_TLS10_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS10_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS10_RC4_40_MD5 0x002b
+#define OP_PCL_TLS10_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS10_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS10_RC4_128_SHA 0x0020
+#define OP_PCL_TLS10_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS10_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS10_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS10_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS10_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS10_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS10_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS10_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS10_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS10_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS10_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS10_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS10_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS10_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS10_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS10_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS10_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS10_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS10_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS10_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS10_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS10_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS10_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS10_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS10_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS10_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS10_AES_256_CBC_SHA512 0xff65
/* For TLS 1.1 - OP_PCLID_TLS11 */
-#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS11_RC4_128_MD5 0x0024
-#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS11_RC4_40_MD5 0x002b
-#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS11_RC4_128_SHA 0x0020
-#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS11_RC4_40_SHA 0x0028
-
-#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS11_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS11_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS11_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS11_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS11_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS11_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS11_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS11_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS11_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS11_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS11_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS11_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS11_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS11_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS11_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS11_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS11_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS11_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS11_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS11_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS11_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS11_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS11_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS11_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS11_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS11_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS11_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS11_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS11_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS11_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS11_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS11_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS11_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS11_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS11_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS11_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS11_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS11_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS11_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS11_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS11_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS11_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS11_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS11_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS11_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS11_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS11_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS11_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS11_RC4_128_MD5 0x0024
+#define OP_PCL_TLS11_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS11_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS11_RC4_40_MD5 0x002b
+#define OP_PCL_TLS11_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS11_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS11_RC4_128_SHA 0x0020
+#define OP_PCL_TLS11_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS11_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS11_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS11_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS11_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS11_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS11_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS11_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS11_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS11_RC4_40_SHA 0x0028
+
+#define OP_PCL_TLS11_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS11_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS11_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS11_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS11_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS11_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS11_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS11_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS11_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS11_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS11_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS11_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS11_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS11_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS11_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS11_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS11_AES_256_CBC_SHA512 0xff65
/* For TLS 1.2 - OP_PCLID_TLS12 */
-#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
-#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
-#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
-
-#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
-#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
-
-#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
-#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
-#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
-#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
-#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
-#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
-#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
-
-#define OP_PCL_TLS12_RC4_128_MD5 0x0024
-#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
-#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
-
-#define OP_PCL_TLS12_RC4_40_MD5 0x002b
-#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
-#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
-
-#define OP_PCL_TLS12_RC4_128_SHA 0x0020
-#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
-#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
-#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
-#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
-#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
-#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
-#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
-#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
-#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
-
-#define OP_PCL_TLS12_RC4_40_SHA 0x0028
-
-/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
-#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
-
-/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
-#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
+#define OP_PCL_TLS12_AES_128_CBC_SHA 0x002f
+#define OP_PCL_TLS12_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_TLS12_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_TLS12_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_TLS12_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_TLS12_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_TLS12_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_TLS12_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_TLS12_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_TLS12_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_TLS12_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_TLS12_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_TLS12_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_TLS12_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_TLS12_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_TLS12_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_TLS12_AES_256_CBC_SHA 0x0035
+#define OP_PCL_TLS12_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_TLS12_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_TLS12_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_TLS12_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_TLS12_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_TLS12_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_TLS12_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_TLS12_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_TLS12_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_TLS12_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_TLS12_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_TLS12_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_TLS12_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_TLS12_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_TLS12_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_TLS12_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_TLS12_DES_CBC_MD5 0x0022
+
+#define OP_PCL_TLS12_DES40_CBC_SHA 0x0008
+#define OP_PCL_TLS12_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_TLS12_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_TLS12_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_TLS12_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_TLS12_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_TLS12_DES40_CBC_SHA_7 0x0026
+
+#define OP_PCL_TLS12_DES_CBC_SHA 0x001e
+#define OP_PCL_TLS12_DES_CBC_SHA_2 0x0009
+#define OP_PCL_TLS12_DES_CBC_SHA_3 0x000c
+#define OP_PCL_TLS12_DES_CBC_SHA_4 0x000f
+#define OP_PCL_TLS12_DES_CBC_SHA_5 0x0012
+#define OP_PCL_TLS12_DES_CBC_SHA_6 0x0015
+#define OP_PCL_TLS12_DES_CBC_SHA_7 0x001a
+
+#define OP_PCL_TLS12_RC4_128_MD5 0x0024
+#define OP_PCL_TLS12_RC4_128_MD5_2 0x0004
+#define OP_PCL_TLS12_RC4_128_MD5_3 0x0018
+
+#define OP_PCL_TLS12_RC4_40_MD5 0x002b
+#define OP_PCL_TLS12_RC4_40_MD5_2 0x0003
+#define OP_PCL_TLS12_RC4_40_MD5_3 0x0017
+
+#define OP_PCL_TLS12_RC4_128_SHA 0x0020
+#define OP_PCL_TLS12_RC4_128_SHA_2 0x008a
+#define OP_PCL_TLS12_RC4_128_SHA_3 0x008e
+#define OP_PCL_TLS12_RC4_128_SHA_4 0x0092
+#define OP_PCL_TLS12_RC4_128_SHA_5 0x0005
+#define OP_PCL_TLS12_RC4_128_SHA_6 0xc002
+#define OP_PCL_TLS12_RC4_128_SHA_7 0xc007
+#define OP_PCL_TLS12_RC4_128_SHA_8 0xc00c
+#define OP_PCL_TLS12_RC4_128_SHA_9 0xc011
+#define OP_PCL_TLS12_RC4_128_SHA_10 0xc016
+
+#define OP_PCL_TLS12_RC4_40_SHA 0x0028
+
+/* #define OP_PCL_TLS12_AES_128_CBC_SHA256 0x003c */
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_2 0x003e
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_3 0x003f
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_4 0x0040
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_5 0x0067
+#define OP_PCL_TLS12_AES_128_CBC_SHA256_6 0x006c
+
+/* #define OP_PCL_TLS12_AES_256_CBC_SHA256 0x003d */
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_2 0x0068
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_3 0x0069
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_4 0x006a
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_5 0x006b
+#define OP_PCL_TLS12_AES_256_CBC_SHA256_6 0x006d
/* AEAD_AES_xxx_CCM/GCM remain to be defined... */
-#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_TLS12_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_TLS12_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_TLS12_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_TLS12_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_TLS12_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_TLS12_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_TLS12_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_TLS12_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_TLS12_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_TLS12_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_TLS12_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_TLS12_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_TLS12_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_TLS12_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_TLS12_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_TLS12_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_TLS12_AES_256_CBC_SHA512 0xff65
/* For DTLS - OP_PCLID_DTLS */
-#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
-#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
-#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
-#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
-#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
-#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
-#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
-#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
-#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
-#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
-#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
-#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
-#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
-#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
-#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
-#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
-
-#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
-#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
-#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
-#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
-#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
-#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
-#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
-#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
-#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
-#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
-#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
-#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
-#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
-#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
-#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
-#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
-
-/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
-
-#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
-
-#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
-
-#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
-#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
-#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
-#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
-#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
-#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
-#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
-
-
-#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
-#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
-#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
-#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
-#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
-#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
-#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
-
-
-#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
-#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
-#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
-#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
-#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
-#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
-#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
-#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
-#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
-#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
-#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
-#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
-#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
-#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
-#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
-#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
-#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
+#define OP_PCL_DTLS_AES_128_CBC_SHA 0x002f
+#define OP_PCL_DTLS_AES_128_CBC_SHA_2 0x0030
+#define OP_PCL_DTLS_AES_128_CBC_SHA_3 0x0031
+#define OP_PCL_DTLS_AES_128_CBC_SHA_4 0x0032
+#define OP_PCL_DTLS_AES_128_CBC_SHA_5 0x0033
+#define OP_PCL_DTLS_AES_128_CBC_SHA_6 0x0034
+#define OP_PCL_DTLS_AES_128_CBC_SHA_7 0x008c
+#define OP_PCL_DTLS_AES_128_CBC_SHA_8 0x0090
+#define OP_PCL_DTLS_AES_128_CBC_SHA_9 0x0094
+#define OP_PCL_DTLS_AES_128_CBC_SHA_10 0xc004
+#define OP_PCL_DTLS_AES_128_CBC_SHA_11 0xc009
+#define OP_PCL_DTLS_AES_128_CBC_SHA_12 0xc00e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_13 0xc013
+#define OP_PCL_DTLS_AES_128_CBC_SHA_14 0xc018
+#define OP_PCL_DTLS_AES_128_CBC_SHA_15 0xc01d
+#define OP_PCL_DTLS_AES_128_CBC_SHA_16 0xc01e
+#define OP_PCL_DTLS_AES_128_CBC_SHA_17 0xc01f
+
+#define OP_PCL_DTLS_AES_256_CBC_SHA 0x0035
+#define OP_PCL_DTLS_AES_256_CBC_SHA_2 0x0036
+#define OP_PCL_DTLS_AES_256_CBC_SHA_3 0x0037
+#define OP_PCL_DTLS_AES_256_CBC_SHA_4 0x0038
+#define OP_PCL_DTLS_AES_256_CBC_SHA_5 0x0039
+#define OP_PCL_DTLS_AES_256_CBC_SHA_6 0x003a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_7 0x008d
+#define OP_PCL_DTLS_AES_256_CBC_SHA_8 0x0091
+#define OP_PCL_DTLS_AES_256_CBC_SHA_9 0x0095
+#define OP_PCL_DTLS_AES_256_CBC_SHA_10 0xc005
+#define OP_PCL_DTLS_AES_256_CBC_SHA_11 0xc00a
+#define OP_PCL_DTLS_AES_256_CBC_SHA_12 0xc00f
+#define OP_PCL_DTLS_AES_256_CBC_SHA_13 0xc014
+#define OP_PCL_DTLS_AES_256_CBC_SHA_14 0xc019
+#define OP_PCL_DTLS_AES_256_CBC_SHA_15 0xc020
+#define OP_PCL_DTLS_AES_256_CBC_SHA_16 0xc021
+#define OP_PCL_DTLS_AES_256_CBC_SHA_17 0xc022
+
+/* #define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0x0023 */
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA 0x001f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_2 0x008b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_3 0x008f
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_4 0x0093
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_5 0x000a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_6 0x000d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_7 0x0010
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_8 0x0013
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_9 0x0016
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_10 0x001b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_11 0xc003
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_12 0xc008
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_13 0xc00d
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_14 0xc012
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_15 0xc017
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_16 0xc01a
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_17 0xc01b
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA_18 0xc01c
+
+#define OP_PCL_DTLS_DES40_CBC_MD5 0x0029
+
+#define OP_PCL_DTLS_DES_CBC_MD5 0x0022
+
+#define OP_PCL_DTLS_DES40_CBC_SHA 0x0008
+#define OP_PCL_DTLS_DES40_CBC_SHA_2 0x000b
+#define OP_PCL_DTLS_DES40_CBC_SHA_3 0x000e
+#define OP_PCL_DTLS_DES40_CBC_SHA_4 0x0011
+#define OP_PCL_DTLS_DES40_CBC_SHA_5 0x0014
+#define OP_PCL_DTLS_DES40_CBC_SHA_6 0x0019
+#define OP_PCL_DTLS_DES40_CBC_SHA_7 0x0026
+
+
+#define OP_PCL_DTLS_DES_CBC_SHA 0x001e
+#define OP_PCL_DTLS_DES_CBC_SHA_2 0x0009
+#define OP_PCL_DTLS_DES_CBC_SHA_3 0x000c
+#define OP_PCL_DTLS_DES_CBC_SHA_4 0x000f
+#define OP_PCL_DTLS_DES_CBC_SHA_5 0x0012
+#define OP_PCL_DTLS_DES_CBC_SHA_6 0x0015
+#define OP_PCL_DTLS_DES_CBC_SHA_7 0x001a
+
+
+#define OP_PCL_DTLS_3DES_EDE_CBC_MD5 0xff23
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA160 0xff30
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA224 0xff34
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA256 0xff36
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA384 0xff33
+#define OP_PCL_DTLS_3DES_EDE_CBC_SHA512 0xff35
+#define OP_PCL_DTLS_AES_128_CBC_SHA160 0xff80
+#define OP_PCL_DTLS_AES_128_CBC_SHA224 0xff84
+#define OP_PCL_DTLS_AES_128_CBC_SHA256 0xff86
+#define OP_PCL_DTLS_AES_128_CBC_SHA384 0xff83
+#define OP_PCL_DTLS_AES_128_CBC_SHA512 0xff85
+#define OP_PCL_DTLS_AES_192_CBC_SHA160 0xff20
+#define OP_PCL_DTLS_AES_192_CBC_SHA224 0xff24
+#define OP_PCL_DTLS_AES_192_CBC_SHA256 0xff26
+#define OP_PCL_DTLS_AES_192_CBC_SHA384 0xff23
+#define OP_PCL_DTLS_AES_192_CBC_SHA512 0xff25
+#define OP_PCL_DTLS_AES_256_CBC_SHA160 0xff60
+#define OP_PCL_DTLS_AES_256_CBC_SHA224 0xff64
+#define OP_PCL_DTLS_AES_256_CBC_SHA256 0xff66
+#define OP_PCL_DTLS_AES_256_CBC_SHA384 0xff63
+#define OP_PCL_DTLS_AES_256_CBC_SHA512 0xff65
/* 802.16 WiMAX protinfos */
-#define OP_PCL_WIMAX_OFDM 0x0201
-#define OP_PCL_WIMAX_OFDMA 0x0231
+#define OP_PCL_WIMAX_OFDM 0x0201
+#define OP_PCL_WIMAX_OFDMA 0x0231
/* 802.11 WiFi protinfos */
-#define OP_PCL_WIFI 0xac04
+#define OP_PCL_WIFI 0xac04
/* MacSec protinfos */
-#define OP_PCL_MACSEC 0x0001
+#define OP_PCL_MACSEC 0x0001
/* PKI unidirectional protocol protinfo bits */
-#define OP_PCL_PKPROT_TEST 0x0008
-#define OP_PCL_PKPROT_DECRYPT 0x0004
-#define OP_PCL_PKPROT_ECC 0x0002
-#define OP_PCL_PKPROT_F2M 0x0001
+#define OP_PCL_PKPROT_TEST 0x0008
+#define OP_PCL_PKPROT_DECRYPT 0x0004
+#define OP_PCL_PKPROT_ECC 0x0002
+#define OP_PCL_PKPROT_F2M 0x0001
/* For non-protocol/alg-only op commands */
#define OP_ALG_TYPE_SHIFT 24
@@ -1181,114 +1181,114 @@
#define OP_ALG_ENCRYPT 1
/* PKHA algorithm type set */
-#define OP_ALG_PK 0x00800000
-#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
+#define OP_ALG_PK 0x00800000
+#define OP_ALG_PK_FUN_MASK 0x3f /* clrmem, modmath, or cpymem */
/* PKHA mode clear memory functions */
-#define OP_ALG_PKMODE_A_RAM 0x80000
-#define OP_ALG_PKMODE_B_RAM 0x40000
-#define OP_ALG_PKMODE_E_RAM 0x20000
-#define OP_ALG_PKMODE_N_RAM 0x10000
-#define OP_ALG_PKMODE_CLEARMEM 0x00001
+#define OP_ALG_PKMODE_A_RAM 0x80000
+#define OP_ALG_PKMODE_B_RAM 0x40000
+#define OP_ALG_PKMODE_E_RAM 0x20000
+#define OP_ALG_PKMODE_N_RAM 0x10000
+#define OP_ALG_PKMODE_CLEARMEM 0x00001
/* PKHA mode modular-arithmetic functions */
-#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
-#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
-#define OP_ALG_PKMODE_MOD_F2M 0x20000
-#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
-#define OP_ALG_PKMODE_PRJECTV 0x00800
-#define OP_ALG_PKMODE_TIME_EQ 0x400
-#define OP_ALG_PKMODE_OUT_B 0x000
-#define OP_ALG_PKMODE_OUT_A 0x100
-#define OP_ALG_PKMODE_MOD_ADD 0x002
-#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
-#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
-#define OP_ALG_PKMODE_MOD_MULT 0x005
-#define OP_ALG_PKMODE_MOD_EXPO 0x006
-#define OP_ALG_PKMODE_MOD_REDUCT 0x007
-#define OP_ALG_PKMODE_MOD_INV 0x008
-#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
-#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
-#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
-#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
-#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
-#define OP_ALG_PKMODE_MOD_GCD 0x00e
-#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
+#define OP_ALG_PKMODE_MOD_IN_MONTY 0x80000
+#define OP_ALG_PKMODE_MOD_OUT_MONTY 0x40000
+#define OP_ALG_PKMODE_MOD_F2M 0x20000
+#define OP_ALG_PKMODE_MOD_R2_IN 0x10000
+#define OP_ALG_PKMODE_PRJECTV 0x00800
+#define OP_ALG_PKMODE_TIME_EQ 0x400
+#define OP_ALG_PKMODE_OUT_B 0x000
+#define OP_ALG_PKMODE_OUT_A 0x100
+#define OP_ALG_PKMODE_MOD_ADD 0x002
+#define OP_ALG_PKMODE_MOD_SUB_AB 0x003
+#define OP_ALG_PKMODE_MOD_SUB_BA 0x004
+#define OP_ALG_PKMODE_MOD_MULT 0x005
+#define OP_ALG_PKMODE_MOD_EXPO 0x006
+#define OP_ALG_PKMODE_MOD_REDUCT 0x007
+#define OP_ALG_PKMODE_MOD_INV 0x008
+#define OP_ALG_PKMODE_MOD_ECC_ADD 0x009
+#define OP_ALG_PKMODE_MOD_ECC_DBL 0x00a
+#define OP_ALG_PKMODE_MOD_ECC_MULT 0x00b
+#define OP_ALG_PKMODE_MOD_MONT_CNST 0x00c
+#define OP_ALG_PKMODE_MOD_CRT_CNST 0x00d
+#define OP_ALG_PKMODE_MOD_GCD 0x00e
+#define OP_ALG_PKMODE_MOD_PRIMALITY 0x00f
/* PKHA mode copy-memory functions */
-#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
-#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_SHIFT 10
-#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
-#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
-#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-
-#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
-#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
-#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
+#define OP_ALG_PKMODE_SRC_REG_SHIFT 13
+#define OP_ALG_PKMODE_SRC_REG_MASK (7 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_SHIFT 10
+#define OP_ALG_PKMODE_DST_REG_MASK (7 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_SHIFT 8
+#define OP_ALG_PKMODE_SRC_SEG_MASK (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_SHIFT 6
+#define OP_ALG_PKMODE_DST_SEG_MASK (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+
+#define OP_ALG_PKMODE_SRC_REG_A (0 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_B (1 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_REG_N (3 << OP_ALG_PKMODE_SRC_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_A (0 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_B (1 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_E (2 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_DST_REG_N (3 << OP_ALG_PKMODE_DST_REG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_0 (0 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_1 (1 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_2 (2 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_SRC_SEG_3 (3 << OP_ALG_PKMODE_SRC_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_0 (0 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_1 (1 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_2 (2 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_DST_SEG_3 (3 << OP_ALG_PKMODE_DST_SEG_SHIFT)
+#define OP_ALG_PKMODE_CPYMEM_N_SZ 0x80
+#define OP_ALG_PKMODE_CPYMEM_SRC_SZ 0x81
/*
* SEQ_IN_PTR Command Constructs
*/
/* Release Buffers */
-#define SQIN_RBS 0x04000000
+#define SQIN_RBS 0x04000000
/* Sequence pointer is really a descriptor */
-#define SQIN_INL 0x02000000
+#define SQIN_INL 0x02000000
/* Sequence pointer is a scatter-gather table */
-#define SQIN_SGF 0x01000000
+#define SQIN_SGF 0x01000000
/* Appends to a previous pointer */
-#define SQIN_PRE 0x00800000
+#define SQIN_PRE 0x00800000
/* Use extended length following pointer */
-#define SQIN_EXT 0x00400000
+#define SQIN_EXT 0x00400000
/* Restore sequence with pointer/length */
-#define SQIN_RTO 0x00200000
+#define SQIN_RTO 0x00200000
/* Replace job descriptor */
-#define SQIN_RJD 0x00100000
+#define SQIN_RJD 0x00100000
-#define SQIN_LEN_SHIFT 0
-#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
+#define SQIN_LEN_SHIFT 0
+#define SQIN_LEN_MASK (0xffff << SQIN_LEN_SHIFT)
/*
* SEQ_OUT_PTR Command Constructs
*/
/* Sequence pointer is a scatter-gather table */
-#define SQOUT_SGF 0x01000000
+#define SQOUT_SGF 0x01000000
/* Appends to a previous pointer */
-#define SQOUT_PRE 0x00800000
+#define SQOUT_PRE 0x00800000
/* Restore sequence with pointer/length */
-#define SQOUT_RTO 0x00200000
+#define SQOUT_RTO 0x00200000
/* Use extended length following pointer */
-#define SQOUT_EXT 0x00400000
+#define SQOUT_EXT 0x00400000
-#define SQOUT_LEN_SHIFT 0
-#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
+#define SQOUT_LEN_SHIFT 0
+#define SQOUT_LEN_MASK (0xffff << SQOUT_LEN_SHIFT)
/*
@@ -1296,196 +1296,196 @@
*/
/* TYPE field is all that's relevant */
-#define SIGN_TYPE_SHIFT 16
-#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_SHIFT 16
+#define SIGN_TYPE_MASK (0x0f << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_FINAL (0x00 << SIGN_TYPE_SHIFT)
#define SIGN_TYPE_FINAL_RESTORE (0x01 << SIGN_TYPE_SHIFT)
#define SIGN_TYPE_FINAL_NONZERO (0x02 << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
-#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_2 (0x0a << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_3 (0x0b << SIGN_TYPE_SHIFT)
+#define SIGN_TYPE_IMM_4 (0x0c << SIGN_TYPE_SHIFT)
/*
* MOVE Command Constructs
*/
-#define MOVE_AUX_SHIFT 25
-#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
-#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
-
-#define MOVE_WAITCOMP_SHIFT 24
-#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
-#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
-
-#define MOVE_SRC_SHIFT 20
-#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
-#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
-
-#define MOVE_DEST_SHIFT 16
-#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
-#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
-#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
-
-#define MOVE_OFFSET_SHIFT 8
-#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
-
-#define MOVE_LEN_SHIFT 0
-#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
-
-#define MOVELEN_MRSEL_SHIFT 0
-#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
+#define MOVE_AUX_SHIFT 25
+#define MOVE_AUX_MASK (3 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_MS (2 << MOVE_AUX_SHIFT)
+#define MOVE_AUX_LS (1 << MOVE_AUX_SHIFT)
+
+#define MOVE_WAITCOMP_SHIFT 24
+#define MOVE_WAITCOMP_MASK (1 << MOVE_WAITCOMP_SHIFT)
+#define MOVE_WAITCOMP (1 << MOVE_WAITCOMP_SHIFT)
+
+#define MOVE_SRC_SHIFT 20
+#define MOVE_SRC_MASK (0x0f << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS1CTX (0x00 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_CLASS2CTX (0x01 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_OUTFIFO (0x02 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_DESCBUF (0x03 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH0 (0x04 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH1 (0x05 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH2 (0x06 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_MATH3 (0x07 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO (0x08 << MOVE_SRC_SHIFT)
+#define MOVE_SRC_INFIFO_CL (0x09 << MOVE_SRC_SHIFT)
+
+#define MOVE_DEST_SHIFT 16
+#define MOVE_DEST_MASK (0x0f << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1CTX (0x00 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2CTX (0x01 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_OUTFIFO (0x02 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_DESCBUF (0x03 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH0 (0x04 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH1 (0x05 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH2 (0x06 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_MATH3 (0x07 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1INFIFO (0x08 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2INFIFO (0x09 << MOVE_DEST_SHIFT)
+#define MOVE_DEST_PK_A (0x0c << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS1KEY (0x0d << MOVE_DEST_SHIFT)
+#define MOVE_DEST_CLASS2KEY (0x0e << MOVE_DEST_SHIFT)
+
+#define MOVE_OFFSET_SHIFT 8
+#define MOVE_OFFSET_MASK (0xff << MOVE_OFFSET_SHIFT)
+
+#define MOVE_LEN_SHIFT 0
+#define MOVE_LEN_MASK (0xff << MOVE_LEN_SHIFT)
+
+#define MOVELEN_MRSEL_SHIFT 0
+#define MOVELEN_MRSEL_MASK (0x3 << MOVE_LEN_SHIFT)
/*
* MATH Command Constructs
*/
-#define MATH_IFB_SHIFT 26
-#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
-#define MATH_IFB (1 << MATH_IFB_SHIFT)
+#define MATH_IFB_SHIFT 26
+#define MATH_IFB_MASK (1 << MATH_IFB_SHIFT)
+#define MATH_IFB (1 << MATH_IFB_SHIFT)
-#define MATH_NFU_SHIFT 25
-#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
-#define MATH_NFU (1 << MATH_NFU_SHIFT)
+#define MATH_NFU_SHIFT 25
+#define MATH_NFU_MASK (1 << MATH_NFU_SHIFT)
+#define MATH_NFU (1 << MATH_NFU_SHIFT)
-#define MATH_STL_SHIFT 24
-#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
-#define MATH_STL (1 << MATH_STL_SHIFT)
+#define MATH_STL_SHIFT 24
+#define MATH_STL_MASK (1 << MATH_STL_SHIFT)
+#define MATH_STL (1 << MATH_STL_SHIFT)
/* Function selectors */
-#define MATH_FUN_SHIFT 20
-#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
-#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
-#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
-#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
-#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
-#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
-#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
-#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
-#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
-#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
-#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
+#define MATH_FUN_SHIFT 20
+#define MATH_FUN_MASK (0x0f << MATH_FUN_SHIFT)
+#define MATH_FUN_ADD (0x00 << MATH_FUN_SHIFT)
+#define MATH_FUN_ADDC (0x01 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUB (0x02 << MATH_FUN_SHIFT)
+#define MATH_FUN_SUBB (0x03 << MATH_FUN_SHIFT)
+#define MATH_FUN_OR (0x04 << MATH_FUN_SHIFT)
+#define MATH_FUN_AND (0x05 << MATH_FUN_SHIFT)
+#define MATH_FUN_XOR (0x06 << MATH_FUN_SHIFT)
+#define MATH_FUN_LSHIFT (0x07 << MATH_FUN_SHIFT)
+#define MATH_FUN_RSHIFT (0x08 << MATH_FUN_SHIFT)
+#define MATH_FUN_SHLD (0x09 << MATH_FUN_SHIFT)
+#define MATH_FUN_ZBYT (0x0a << MATH_FUN_SHIFT)
/* Source 0 selectors */
-#define MATH_SRC0_SHIFT 16
-#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
-#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
-#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SHIFT 16
+#define MATH_SRC0_MASK (0x0f << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG0 (0x00 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG1 (0x01 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG2 (0x02 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_REG3 (0x03 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_IMM (0x04 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQINLEN (0x08 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_SEQOUTLEN (0x09 << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQINLEN (0x0a << MATH_SRC0_SHIFT)
+#define MATH_SRC0_VARSEQOUTLEN (0x0b << MATH_SRC0_SHIFT)
+#define MATH_SRC0_ZERO (0x0c << MATH_SRC0_SHIFT)
/* Source 1 selectors */
-#define MATH_SRC1_SHIFT 12
-#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
-#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
-#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
-#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
+#define MATH_SRC1_SHIFT 12
+#define MATH_SRC1_MASK (0x0f << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG0 (0x00 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG1 (0x01 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG2 (0x02 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_REG3 (0x03 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_IMM (0x04 << MATH_SRC1_SHIFT)
+#define MATH_SRC1_INFIFO (0x0a << MATH_SRC1_SHIFT)
+#define MATH_SRC1_OUTFIFO (0x0b << MATH_SRC1_SHIFT)
+#define MATH_SRC1_ONE (0x0c << MATH_SRC1_SHIFT)
/* Destination selectors */
-#define MATH_DEST_SHIFT 8
-#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
-#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
-#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
-#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
-#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
-#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_SHIFT 8
+#define MATH_DEST_MASK (0x0f << MATH_DEST_SHIFT)
+#define MATH_DEST_REG0 (0x00 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG1 (0x01 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG2 (0x02 << MATH_DEST_SHIFT)
+#define MATH_DEST_REG3 (0x03 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQINLEN (0x08 << MATH_DEST_SHIFT)
+#define MATH_DEST_SEQOUTLEN (0x09 << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQINLEN (0x0a << MATH_DEST_SHIFT)
+#define MATH_DEST_VARSEQOUTLEN (0x0b << MATH_DEST_SHIFT)
+#define MATH_DEST_NONE (0x0f << MATH_DEST_SHIFT)
/* Length selectors */
-#define MATH_LEN_SHIFT 0
-#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
-#define MATH_LEN_1BYTE 0x01
-#define MATH_LEN_2BYTE 0x02
-#define MATH_LEN_4BYTE 0x04
-#define MATH_LEN_8BYTE 0x08
+#define MATH_LEN_SHIFT 0
+#define MATH_LEN_MASK (0x0f << MATH_LEN_SHIFT)
+#define MATH_LEN_1BYTE 0x01
+#define MATH_LEN_2BYTE 0x02
+#define MATH_LEN_4BYTE 0x04
+#define MATH_LEN_8BYTE 0x08
/*
* JUMP Command Constructs
*/
-#define JUMP_CLASS_SHIFT 25
+#define JUMP_CLASS_SHIFT 25
#define JUMP_CLASS_MASK (3 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_NONE 0
#define JUMP_CLASS_CLASS1 (1 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_CLASS2 (2 << JUMP_CLASS_SHIFT)
#define JUMP_CLASS_BOTH (3 << JUMP_CLASS_SHIFT)
-#define JUMP_JSL_SHIFT 24
-#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
-#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL_SHIFT 24
+#define JUMP_JSL_MASK (1 << JUMP_JSL_SHIFT)
+#define JUMP_JSL (1 << JUMP_JSL_SHIFT)
-#define JUMP_TYPE_SHIFT 22
-#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
-#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_SHIFT 22
+#define JUMP_TYPE_MASK (0x03 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_LOCAL (0x00 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_NONLOCAL (0x01 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT (0x02 << JUMP_TYPE_SHIFT)
+#define JUMP_TYPE_HALT_USER (0x03 << JUMP_TYPE_SHIFT)
-#define JUMP_TEST_SHIFT 16
-#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
-#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_SHIFT 16
+#define JUMP_TEST_MASK (0x03 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ALL (0x00 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVALL (0x01 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_ANY (0x02 << JUMP_TEST_SHIFT)
+#define JUMP_TEST_INVANY (0x03 << JUMP_TEST_SHIFT)
/* Condition codes. JSL bit is factored in */
-#define JUMP_COND_SHIFT 8
-#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
-#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
-#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
-
-#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
-#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
-
-#define JUMP_OFFSET_SHIFT 0
-#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
+#define JUMP_COND_SHIFT 8
+#define JUMP_COND_MASK (0x100ff << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_0 (0x80 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_GCD_1 (0x40 << JUMP_COND_SHIFT)
+#define JUMP_COND_PK_PRIME (0x20 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_N (0x08 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_Z (0x04 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_C (0x02 << JUMP_COND_SHIFT)
+#define JUMP_COND_MATH_NV (0x01 << JUMP_COND_SHIFT)
+
+#define JUMP_COND_JRP ((0x80 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SHRD ((0x40 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_SELF ((0x20 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_CALM ((0x10 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIP ((0x08 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NIFP ((0x04 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NOP ((0x02 << JUMP_COND_SHIFT) | JUMP_JSL)
+#define JUMP_COND_NCP ((0x01 << JUMP_COND_SHIFT) | JUMP_JSL)
+
+#define JUMP_OFFSET_SHIFT 0
+#define JUMP_OFFSET_MASK (0xff << JUMP_OFFSET_SHIFT)
/*
* NFIFO ENTRY
@@ -1500,20 +1500,20 @@
#define NFIFOENTRY_DEST_BOTH (3 << NFIFOENTRY_DEST_SHIFT)
#define NFIFOENTRY_LC2_SHIFT 29
-#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
-#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2_MASK (1 << NFIFOENTRY_LC2_SHIFT)
+#define NFIFOENTRY_LC2 (1 << NFIFOENTRY_LC2_SHIFT)
#define NFIFOENTRY_LC1_SHIFT 28
-#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
-#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1_MASK (1 << NFIFOENTRY_LC1_SHIFT)
+#define NFIFOENTRY_LC1 (1 << NFIFOENTRY_LC1_SHIFT)
#define NFIFOENTRY_FC2_SHIFT 27
-#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
-#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2_MASK (1 << NFIFOENTRY_FC2_SHIFT)
+#define NFIFOENTRY_FC2 (1 << NFIFOENTRY_FC2_SHIFT)
#define NFIFOENTRY_FC1_SHIFT 26
-#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
-#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1_MASK (1 << NFIFOENTRY_FC1_SHIFT)
+#define NFIFOENTRY_FC1 (1 << NFIFOENTRY_FC1_SHIFT)
#define NFIFOENTRY_STYPE_SHIFT 24
#define NFIFOENTRY_STYPE_MASK (3 << NFIFOENTRY_STYPE_SHIFT)
@@ -1525,60 +1525,59 @@
#define NFIFOENTRY_DTYPE_SHIFT 20
#define NFIFOENTRY_DTYPE_MASK (0xF << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
-
-#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
-#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SBOX (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_AAD (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_IV (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SAD (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_ICV (0xA << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_SKIP (0xE << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_MSG (0xF << NFIFOENTRY_DTYPE_SHIFT)
+
+#define NFIFOENTRY_DTYPE_PK_A0 (0x0 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A1 (0x1 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A2 (0x2 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A3 (0x3 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B0 (0x4 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B1 (0x5 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B2 (0x6 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B3 (0x7 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_N (0x8 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_E (0x9 << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_A (0xC << NFIFOENTRY_DTYPE_SHIFT)
+#define NFIFOENTRY_DTYPE_PK_B (0xD << NFIFOENTRY_DTYPE_SHIFT)
#define NFIFOENTRY_BND_SHIFT 19
-#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
-#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND_MASK (1 << NFIFOENTRY_BND_SHIFT)
+#define NFIFOENTRY_BND (1 << NFIFOENTRY_BND_SHIFT)
#define NFIFOENTRY_PTYPE_SHIFT 16
#define NFIFOENTRY_PTYPE_MASK (0x7 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS (0x0 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NOZEROS (0x1 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_INCREMENT (0x2 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND (0x3 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_ZEROS_NZ (0x4 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_LZ (0x5 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_N (0x6 << NFIFOENTRY_PTYPE_SHIFT)
+#define NFIFOENTRY_PTYPE_RND_NZ_N (0x7 << NFIFOENTRY_PTYPE_SHIFT)
-#define NFIFOENTRY_OC_SHIFT 15
-#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC_SHIFT 15
+#define NFIFOENTRY_OC_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_OC (1 << NFIFOENTRY_OC_SHIFT)
#define NFIFOENTRY_AST_SHIFT 14
-#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST_MASK (1 << NFIFOENTRY_OC_SHIFT)
+#define NFIFOENTRY_AST (1 << NFIFOENTRY_OC_SHIFT)
-#define NFIFOENTRY_BM_SHIFT 11
-#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
-#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
-
-#define NFIFOENTRY_PS_SHIFT 10
-#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
-#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_BM_SHIFT 11
+#define NFIFOENTRY_BM_MASK (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_BM (1 << NFIFOENTRY_BM_SHIFT)
+#define NFIFOENTRY_PS_SHIFT 10
+#define NFIFOENTRY_PS_MASK (1 << NFIFOENTRY_PS_SHIFT)
+#define NFIFOENTRY_PS (1 << NFIFOENTRY_PS_SHIFT)
#define NFIFOENTRY_DLEN_SHIFT 0
#define NFIFOENTRY_DLEN_MASK (0xFFF << NFIFOENTRY_DLEN_SHIFT)
@@ -1591,15 +1590,15 @@
*/
/* IPSec ESP CBC Encap/Decap Options */
-#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
-#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
-#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
-#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
-#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
-#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
-#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
-#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
-#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
-#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
+#define PDBOPTS_ESPCBC_ARSNONE 0x00 /* no antireplay window */
+#define PDBOPTS_ESPCBC_ARS32 0x40 /* 32-entry antireplay window */
+#define PDBOPTS_ESPCBC_ARS64 0xc0 /* 64-entry antireplay window */
+#define PDBOPTS_ESPCBC_IVSRC 0x20 /* IV comes from internal random gen */
+#define PDBOPTS_ESPCBC_ESN 0x10 /* extended sequence included */
+#define PDBOPTS_ESPCBC_OUTFMT 0x08 /* output only decapsulation (decap) */
+#define PDBOPTS_ESPCBC_IPHDRSRC 0x08 /* IP header comes from PDB (encap) */
+#define PDBOPTS_ESPCBC_INCIPHDR 0x04 /* Prepend IP header to output frame */
+#define PDBOPTS_ESPCBC_IPVSN 0x02 /* process IPv6 header */
+#define PDBOPTS_ESPCBC_TUNNEL 0x01 /* tunnel mode next-header byte */
#endif /* DESC_H */
diff --git a/drivers/crypto/caam/desc_constr.h b/drivers/crypto/caam/desc_constr.h
index 0991323..348b882 100644
--- a/drivers/crypto/caam/desc_constr.h
+++ b/drivers/crypto/caam/desc_constr.h
@@ -18,9 +18,10 @@
#define PRINT_POS
#endif
-#define SET_OK_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
- LDST_SRCDST_WORD_DECOCTRL | \
- (LDOFF_CHG_SHARE_OK_PROP << LDST_OFFSET_SHIFT))
+#define SET_OK_NO_PROP_ERRORS (IMMEDIATE | LDST_CLASS_DECO | \
+ LDST_SRCDST_WORD_DECOCTRL | \
+ (LDOFF_CHG_SHARE_OK_NO_PROP << \
+ LDST_OFFSET_SHIFT))
#define DISABLE_AUTO_INFO_FIFO (IMMEDIATE | LDST_CLASS_DECO | \
LDST_SRCDST_WORD_DECOCTRL | \
(LDOFF_DISABLE_AUTO_NFIFO << LDST_OFFSET_SHIFT))
diff --git a/drivers/crypto/caam/regs.h b/drivers/crypto/caam/regs.h
index aee394e..e9f7a70 100644
--- a/drivers/crypto/caam/regs.h
+++ b/drivers/crypto/caam/regs.h
@@ -657,7 +657,6 @@ struct caam_full {
u64 rsvd[512];
struct caam_assurance assure;
struct caam_queue_if qi;
- struct caam_deco *deco;
};
#endif /* REGS_H */
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index dcd8bab..0d40cf6 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -714,6 +714,7 @@ static int mv_hash_final(struct ahash_request *req)
{
struct mv_req_hash_ctx *ctx = ahash_request_ctx(req);
+ ahash_request_set_crypt(req, NULL, req->result, 0);
mv_update_hash_req_ctx(ctx, 1, 0);
return mv_handle_req(&req->base);
}
@@ -1128,17 +1129,7 @@ static struct platform_driver marvell_crypto = {
};
MODULE_ALIAS("platform:mv_crypto");
-static int __init mv_crypto_init(void)
-{
- return platform_driver_register(&marvell_crypto);
-}
-module_init(mv_crypto_init);
-
-static void __exit mv_crypto_exit(void)
-{
- platform_driver_unregister(&marvell_crypto);
-}
-module_exit(mv_crypto_exit);
+module_platform_driver(marvell_crypto);
MODULE_AUTHOR("Sebastian Andrzej Siewior <sebastian@breakpoint.cc>");
MODULE_DESCRIPTION("Support for Marvell's cryptographic engine");
diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c
index a2b553e..58480d0 100644
--- a/drivers/crypto/picoxcell_crypto.c
+++ b/drivers/crypto/picoxcell_crypto.c
@@ -873,7 +873,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
* request for any other size (192 bits) then we need to do a software
* fallback.
*/
- if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
ctx->sw_cipher) {
/*
* Set the fallback transform to use the same request flags as
@@ -886,7 +886,7 @@ static int spacc_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
err = crypto_ablkcipher_setkey(ctx->sw_cipher, key, len);
if (err)
goto sw_setkey_failed;
- } else if ((len != AES_KEYSIZE_128 || len != AES_KEYSIZE_256) &&
+ } else if (len != AES_KEYSIZE_128 && len != AES_KEYSIZE_256 &&
!ctx->sw_cipher)
err = -EINVAL;
@@ -1854,17 +1854,7 @@ static struct platform_driver spacc_driver = {
.id_table = spacc_id_table,
};
-static int __init spacc_init(void)
-{
- return platform_driver_register(&spacc_driver);
-}
-module_init(spacc_init);
-
-static void __exit spacc_exit(void)
-{
- platform_driver_unregister(&spacc_driver);
-}
-module_exit(spacc_exit);
+module_platform_driver(spacc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jamie Iles");
diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
index 8115417..3376bca 100644
--- a/drivers/crypto/s5p-sss.c
+++ b/drivers/crypto/s5p-sss.c
@@ -683,18 +683,7 @@ static struct platform_driver s5p_aes_crypto = {
},
};
-static int __init s5p_aes_mod_init(void)
-{
- return platform_driver_register(&s5p_aes_crypto);
-}
-
-static void __exit s5p_aes_mod_exit(void)
-{
- platform_driver_unregister(&s5p_aes_crypto);
-}
-
-module_init(s5p_aes_mod_init);
-module_exit(s5p_aes_mod_exit);
+module_platform_driver(s5p_aes_crypto);
MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index dbe76b5..2d8c789 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -99,6 +99,8 @@ struct talitos_request {
/* per-channel fifo management */
struct talitos_channel {
+ void __iomem *reg;
+
/* request fifo */
struct talitos_request *fifo;
@@ -120,7 +122,7 @@ struct talitos_private {
struct device *dev;
struct platform_device *ofdev;
void __iomem *reg;
- int irq;
+ int irq[2];
/* SEC version geometry (from device tree node) */
unsigned int num_channels;
@@ -144,7 +146,7 @@ struct talitos_private {
atomic_t last_chan ____cacheline_aligned;
/* request callback tasklet */
- struct tasklet_struct done_task;
+ struct tasklet_struct done_task[2];
/* list of registered algorithms */
struct list_head alg_list;
@@ -157,6 +159,7 @@ struct talitos_private {
#define TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT 0x00000001
#define TALITOS_FTR_HW_AUTH_CHECK 0x00000002
#define TALITOS_FTR_SHA224_HWINIT 0x00000004
+#define TALITOS_FTR_HMAC_OK 0x00000008
static void to_talitos_ptr(struct talitos_ptr *talitos_ptr, dma_addr_t dma_addr)
{
@@ -196,9 +199,9 @@ static int reset_channel(struct device *dev, int ch)
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
- setbits32(priv->reg + TALITOS_CCCR(ch), TALITOS_CCCR_RESET);
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR, TALITOS_CCCR_RESET);
- while ((in_be32(priv->reg + TALITOS_CCCR(ch)) & TALITOS_CCCR_RESET)
+ while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) & TALITOS_CCCR_RESET)
&& --timeout)
cpu_relax();
@@ -208,12 +211,12 @@ static int reset_channel(struct device *dev, int ch)
}
/* set 36-bit addressing, done writeback enable and done IRQ enable */
- setbits32(priv->reg + TALITOS_CCCR_LO(ch), TALITOS_CCCR_LO_EAE |
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
/* and ICCR writeback, if available */
if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
- setbits32(priv->reg + TALITOS_CCCR_LO(ch),
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
TALITOS_CCCR_LO_IWSE);
return 0;
@@ -223,13 +226,19 @@ static int reset_device(struct device *dev)
{
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
+ u32 mcr = TALITOS_MCR_SWR;
- setbits32(priv->reg + TALITOS_MCR, TALITOS_MCR_SWR);
+ setbits32(priv->reg + TALITOS_MCR, mcr);
while ((in_be32(priv->reg + TALITOS_MCR) & TALITOS_MCR_SWR)
&& --timeout)
cpu_relax();
+ if (priv->irq[1]) {
+ mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
+ setbits32(priv->reg + TALITOS_MCR, mcr);
+ }
+
if (timeout == 0) {
dev_err(dev, "failed to reset device\n");
return -EIO;
@@ -327,8 +336,9 @@ static int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
/* GO! */
wmb();
- out_be32(priv->reg + TALITOS_FF(ch), upper_32_bits(request->dma_desc));
- out_be32(priv->reg + TALITOS_FF_LO(ch),
+ out_be32(priv->chan[ch].reg + TALITOS_FF,
+ upper_32_bits(request->dma_desc));
+ out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
lower_32_bits(request->dma_desc));
spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
@@ -397,21 +407,32 @@ static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
/*
* process completed requests for channels that have done status
*/
-static void talitos_done(unsigned long data)
-{
- struct device *dev = (struct device *)data;
- struct talitos_private *priv = dev_get_drvdata(dev);
- int ch;
-
- for (ch = 0; ch < priv->num_channels; ch++)
- flush_channel(dev, ch, 0, 0);
-
- /* At this point, all completed channels have been processed.
- * Unmask done interrupts for channels completed later on.
- */
- setbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_INIT);
- setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT);
+#define DEF_TALITOS_DONE(name, ch_done_mask) \
+static void talitos_done_##name(unsigned long data) \
+{ \
+ struct device *dev = (struct device *)data; \
+ struct talitos_private *priv = dev_get_drvdata(dev); \
+ \
+ if (ch_done_mask & 1) \
+ flush_channel(dev, 0, 0, 0); \
+ if (priv->num_channels == 1) \
+ goto out; \
+ if (ch_done_mask & (1 << 2)) \
+ flush_channel(dev, 1, 0, 0); \
+ if (ch_done_mask & (1 << 4)) \
+ flush_channel(dev, 2, 0, 0); \
+ if (ch_done_mask & (1 << 6)) \
+ flush_channel(dev, 3, 0, 0); \
+ \
+out: \
+ /* At this point, all completed channels have been processed */ \
+ /* Unmask done interrupts for channels completed later on. */ \
+ setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
+ setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
}
+DEF_TALITOS_DONE(4ch, TALITOS_ISR_4CHDONE)
+DEF_TALITOS_DONE(ch0_2, TALITOS_ISR_CH_0_2_DONE)
+DEF_TALITOS_DONE(ch1_3, TALITOS_ISR_CH_1_3_DONE)
/*
* locate current (offending) descriptor
@@ -422,7 +443,7 @@ static u32 current_desc_hdr(struct device *dev, int ch)
int tail = priv->chan[ch].tail;
dma_addr_t cur_desc;
- cur_desc = in_be32(priv->reg + TALITOS_CDPR_LO(ch));
+ cur_desc = in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
while (priv->chan[ch].fifo[tail].dma_desc != cur_desc) {
tail = (tail + 1) & (priv->fifo_len - 1);
@@ -444,7 +465,7 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
int i;
if (!desc_hdr)
- desc_hdr = in_be32(priv->reg + TALITOS_DESCBUF(ch));
+ desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
switch (desc_hdr & DESC_HDR_SEL0_MASK) {
case DESC_HDR_SEL0_AFEU:
@@ -506,16 +527,15 @@ static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
for (i = 0; i < 8; i++)
dev_err(dev, "DESCBUF 0x%08x_%08x\n",
- in_be32(priv->reg + TALITOS_DESCBUF(ch) + 8*i),
- in_be32(priv->reg + TALITOS_DESCBUF_LO(ch) + 8*i));
+ in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
+ in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
}
/*
* recover from error interrupts
*/
-static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
+static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
{
- struct device *dev = (struct device *)data;
struct talitos_private *priv = dev_get_drvdata(dev);
unsigned int timeout = TALITOS_TIMEOUT;
int ch, error, reset_dev = 0, reset_ch = 0;
@@ -528,8 +548,8 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
error = -EINVAL;
- v = in_be32(priv->reg + TALITOS_CCPSR(ch));
- v_lo = in_be32(priv->reg + TALITOS_CCPSR_LO(ch));
+ v = in_be32(priv->chan[ch].reg + TALITOS_CCPSR);
+ v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
if (v_lo & TALITOS_CCPSR_LO_DOF) {
dev_err(dev, "double fetch fifo overflow error\n");
@@ -567,10 +587,10 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
if (reset_ch) {
reset_channel(dev, ch);
} else {
- setbits32(priv->reg + TALITOS_CCCR(ch),
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR,
TALITOS_CCCR_CONT);
- setbits32(priv->reg + TALITOS_CCCR_LO(ch), 0);
- while ((in_be32(priv->reg + TALITOS_CCCR(ch)) &
+ setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
+ while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
TALITOS_CCCR_CONT) && --timeout)
cpu_relax();
if (timeout == 0) {
@@ -580,7 +600,7 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
}
}
}
- if (reset_dev || isr & ~TALITOS_ISR_CHERR || isr_lo) {
+ if (reset_dev || isr & ~TALITOS_ISR_4CHERR || isr_lo) {
dev_err(dev, "done overflow, internal time out, or rngu error: "
"ISR 0x%08x_%08x\n", isr, isr_lo);
@@ -593,30 +613,35 @@ static void talitos_error(unsigned long data, u32 isr, u32 isr_lo)
}
}
-static irqreturn_t talitos_interrupt(int irq, void *data)
-{
- struct device *dev = data;
- struct talitos_private *priv = dev_get_drvdata(dev);
- u32 isr, isr_lo;
-
- isr = in_be32(priv->reg + TALITOS_ISR);
- isr_lo = in_be32(priv->reg + TALITOS_ISR_LO);
- /* Acknowledge interrupt */
- out_be32(priv->reg + TALITOS_ICR, isr);
- out_be32(priv->reg + TALITOS_ICR_LO, isr_lo);
-
- if (unlikely((isr & ~TALITOS_ISR_CHDONE) || isr_lo))
- talitos_error((unsigned long)data, isr, isr_lo);
- else
- if (likely(isr & TALITOS_ISR_CHDONE)) {
- /* mask further done interrupts. */
- clrbits32(priv->reg + TALITOS_IMR, TALITOS_IMR_DONE);
- /* done_task will unmask done interrupts at exit */
- tasklet_schedule(&priv->done_task);
- }
-
- return (isr || isr_lo) ? IRQ_HANDLED : IRQ_NONE;
+#define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
+static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
+{ \
+ struct device *dev = data; \
+ struct talitos_private *priv = dev_get_drvdata(dev); \
+ u32 isr, isr_lo; \
+ \
+ isr = in_be32(priv->reg + TALITOS_ISR); \
+ isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
+ /* Acknowledge interrupt */ \
+ out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
+ out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
+ \
+ if (unlikely((isr & ~TALITOS_ISR_4CHDONE) & ch_err_mask || isr_lo)) \
+ talitos_error(dev, isr, isr_lo); \
+ else \
+ if (likely(isr & ch_done_mask)) { \
+ /* mask further done interrupts. */ \
+ clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
+ /* done_task will unmask done interrupts at exit */ \
+ tasklet_schedule(&priv->done_task[tlet]); \
+ } \
+ \
+ return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
+ IRQ_NONE; \
}
+DEF_TALITOS_INTERRUPT(4ch, TALITOS_ISR_4CHDONE, TALITOS_ISR_4CHERR, 0)
+DEF_TALITOS_INTERRUPT(ch0_2, TALITOS_ISR_CH_0_2_DONE, TALITOS_ISR_CH_0_2_ERR, 0)
+DEF_TALITOS_INTERRUPT(ch1_3, TALITOS_ISR_CH_1_3_DONE, TALITOS_ISR_CH_1_3_ERR, 1)
/*
* hwrng
@@ -1874,6 +1899,97 @@ static int ahash_digest(struct ahash_request *areq)
return ahash_process_req(areq, areq->nbytes);
}
+struct keyhash_result {
+ struct completion completion;
+ int err;
+};
+
+static void keyhash_complete(struct crypto_async_request *req, int err)
+{
+ struct keyhash_result *res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ complete(&res->completion);
+}
+
+static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
+ u8 *hash)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+
+ struct scatterlist sg[1];
+ struct ahash_request *req;
+ struct keyhash_result hresult;
+ int ret;
+
+ init_completion(&hresult.completion);
+
+ req = ahash_request_alloc(tfm, GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ /* Keep tfm keylen == 0 during hash of the long key */
+ ctx->keylen = 0;
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ keyhash_complete, &hresult);
+
+ sg_init_one(&sg[0], key, keylen);
+
+ ahash_request_set_crypt(req, sg, hash, keylen);
+ ret = crypto_ahash_digest(req);
+ switch (ret) {
+ case 0:
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ ret = wait_for_completion_interruptible(
+ &hresult.completion);
+ if (!ret)
+ ret = hresult.err;
+ break;
+ default:
+ break;
+ }
+ ahash_request_free(req);
+
+ return ret;
+}
+
+static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
+ unsigned int keylen)
+{
+ struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
+ unsigned int blocksize =
+ crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
+ unsigned int digestsize = crypto_ahash_digestsize(tfm);
+ unsigned int keysize = keylen;
+ u8 hash[SHA512_DIGEST_SIZE];
+ int ret;
+
+ if (keylen <= blocksize)
+ memcpy(ctx->key, key, keysize);
+ else {
+ /* Must get the hash of the long key */
+ ret = keyhash(tfm, key, keylen, hash);
+
+ if (ret) {
+ crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
+ keysize = digestsize;
+ memcpy(ctx->key, hash, digestsize);
+ }
+
+ ctx->keylen = keysize;
+
+ return 0;
+}
+
+
struct talitos_alg_template {
u32 type;
union {
@@ -2217,6 +2333,138 @@ static struct talitos_alg_template driver_algs[] = {
DESC_HDR_SEL0_MDEUB |
DESC_HDR_MODE0_MDEUB_SHA512,
},
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = MD5_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(md5)",
+ .cra_driver_name = "hmac-md5-talitos",
+ .cra_blocksize = MD5_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_MD5,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA1_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha1)",
+ .cra_driver_name = "hmac-sha1-talitos",
+ .cra_blocksize = SHA1_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA1,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA224_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha224)",
+ .cra_driver_name = "hmac-sha224-talitos",
+ .cra_blocksize = SHA224_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA224,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA256_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha256)",
+ .cra_driver_name = "hmac-sha256-talitos",
+ .cra_blocksize = SHA256_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUA |
+ DESC_HDR_MODE0_MDEU_SHA256,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA384_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha384)",
+ .cra_driver_name = "hmac-sha384-talitos",
+ .cra_blocksize = SHA384_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA384,
+ },
+ { .type = CRYPTO_ALG_TYPE_AHASH,
+ .alg.hash = {
+ .init = ahash_init,
+ .update = ahash_update,
+ .final = ahash_final,
+ .finup = ahash_finup,
+ .digest = ahash_digest,
+ .setkey = ahash_setkey,
+ .halg.digestsize = SHA512_DIGEST_SIZE,
+ .halg.base = {
+ .cra_name = "hmac(sha512)",
+ .cra_driver_name = "hmac-sha512-talitos",
+ .cra_blocksize = SHA512_BLOCK_SIZE,
+ .cra_flags = CRYPTO_ALG_TYPE_AHASH |
+ CRYPTO_ALG_ASYNC,
+ .cra_type = &crypto_ahash_type
+ }
+ },
+ .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
+ DESC_HDR_SEL0_MDEUB |
+ DESC_HDR_MODE0_MDEUB_SHA512,
+ }
};
struct talitos_crypto_alg {
@@ -2331,12 +2579,15 @@ static int talitos_remove(struct platform_device *ofdev)
kfree(priv->chan);
- if (priv->irq != NO_IRQ) {
- free_irq(priv->irq, dev);
- irq_dispose_mapping(priv->irq);
- }
+ for (i = 0; i < 2; i++)
+ if (priv->irq[i]) {
+ free_irq(priv->irq[i], dev);
+ irq_dispose_mapping(priv->irq[i]);
+ }
- tasklet_kill(&priv->done_task);
+ tasklet_kill(&priv->done_task[0]);
+ if (priv->irq[1])
+ tasklet_kill(&priv->done_task[1]);
iounmap(priv->reg);
@@ -2373,8 +2624,14 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
case CRYPTO_ALG_TYPE_AHASH:
alg = &t_alg->algt.alg.hash.halg.base;
alg->cra_init = talitos_cra_init_ahash;
+ if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
+ !strncmp(alg->cra_name, "hmac", 4)) {
+ kfree(t_alg);
+ return ERR_PTR(-ENOTSUPP);
+ }
if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
- !strcmp(alg->cra_name, "sha224")) {
+ (!strcmp(alg->cra_name, "sha224") ||
+ !strcmp(alg->cra_name, "hmac(sha224)"))) {
t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
t_alg->algt.desc_hdr_template =
DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
@@ -2397,6 +2654,54 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
return t_alg;
}
+static int talitos_probe_irq(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct talitos_private *priv = dev_get_drvdata(dev);
+ int err;
+
+ priv->irq[0] = irq_of_parse_and_map(np, 0);
+ if (!priv->irq[0]) {
+ dev_err(dev, "failed to map irq\n");
+ return -EINVAL;
+ }
+
+ priv->irq[1] = irq_of_parse_and_map(np, 1);
+
+ /* get the primary irq line */
+ if (!priv->irq[1]) {
+ err = request_irq(priv->irq[0], talitos_interrupt_4ch, 0,
+ dev_driver_string(dev), dev);
+ goto primary_out;
+ }
+
+ err = request_irq(priv->irq[0], talitos_interrupt_ch0_2, 0,
+ dev_driver_string(dev), dev);
+ if (err)
+ goto primary_out;
+
+ /* get the secondary irq line */
+ err = request_irq(priv->irq[1], talitos_interrupt_ch1_3, 0,
+ dev_driver_string(dev), dev);
+ if (err) {
+ dev_err(dev, "failed to request secondary irq\n");
+ irq_dispose_mapping(priv->irq[1]);
+ priv->irq[1] = 0;
+ }
+
+ return err;
+
+primary_out:
+ if (err) {
+ dev_err(dev, "failed to request primary irq\n");
+ irq_dispose_mapping(priv->irq[0]);
+ priv->irq[0] = 0;
+ }
+
+ return err;
+}
+
static int talitos_probe(struct platform_device *ofdev)
{
struct device *dev = &ofdev->dev;
@@ -2413,28 +2718,22 @@ static int talitos_probe(struct platform_device *ofdev)
priv->ofdev = ofdev;
- tasklet_init(&priv->done_task, talitos_done, (unsigned long)dev);
-
- INIT_LIST_HEAD(&priv->alg_list);
-
- priv->irq = irq_of_parse_and_map(np, 0);
-
- if (priv->irq == NO_IRQ) {
- dev_err(dev, "failed to map irq\n");
- err = -EINVAL;
+ err = talitos_probe_irq(ofdev);
+ if (err)
goto err_out;
- }
- /* get the irq line */
- err = request_irq(priv->irq, talitos_interrupt, 0,
- dev_driver_string(dev), dev);
- if (err) {
- dev_err(dev, "failed to request irq %d\n", priv->irq);
- irq_dispose_mapping(priv->irq);
- priv->irq = NO_IRQ;
- goto err_out;
+ if (!priv->irq[1]) {
+ tasklet_init(&priv->done_task[0], talitos_done_4ch,
+ (unsigned long)dev);
+ } else {
+ tasklet_init(&priv->done_task[0], talitos_done_ch0_2,
+ (unsigned long)dev);
+ tasklet_init(&priv->done_task[1], talitos_done_ch1_3,
+ (unsigned long)dev);
}
+ INIT_LIST_HEAD(&priv->alg_list);
+
priv->reg = of_iomap(np, 0);
if (!priv->reg) {
dev_err(dev, "failed to of_iomap\n");
@@ -2471,7 +2770,8 @@ static int talitos_probe(struct platform_device *ofdev)
if (of_device_is_compatible(np, "fsl,sec2.1"))
priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
- TALITOS_FTR_SHA224_HWINIT;
+ TALITOS_FTR_SHA224_HWINIT |
+ TALITOS_FTR_HMAC_OK;
priv->chan = kzalloc(sizeof(struct talitos_channel) *
priv->num_channels, GFP_KERNEL);
@@ -2482,6 +2782,12 @@ static int talitos_probe(struct platform_device *ofdev)
}
for (i = 0; i < priv->num_channels; i++) {
+ priv->chan[i].reg = priv->reg + TALITOS_CH_STRIDE * (i + 1);
+ if (!priv->irq[1] || !(i & 1))
+ priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
+ }
+
+ for (i = 0; i < priv->num_channels; i++) {
spin_lock_init(&priv->chan[i].head_lock);
spin_lock_init(&priv->chan[i].tail_lock);
}
@@ -2530,6 +2836,8 @@ static int talitos_probe(struct platform_device *ofdev)
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
+ if (err == -ENOTSUPP)
+ continue;
goto err_out;
}
@@ -2551,12 +2859,13 @@ static int talitos_probe(struct platform_device *ofdev)
dev_err(dev, "%s alg registration failed\n",
name);
kfree(t_alg);
- } else {
+ } else
list_add_tail(&t_alg->entry, &priv->alg_list);
- dev_info(dev, "%s\n", name);
- }
}
}
+ if (!list_empty(&priv->alg_list))
+ dev_info(dev, "%s algorithms registered in /proc/crypto\n",
+ (char *)of_get_property(np, "compatible", NULL));
return 0;
@@ -2584,17 +2893,7 @@ static struct platform_driver talitos_driver = {
.remove = talitos_remove,
};
-static int __init talitos_init(void)
-{
- return platform_driver_register(&talitos_driver);
-}
-module_init(talitos_init);
-
-static void __exit talitos_exit(void)
-{
- platform_driver_unregister(&talitos_driver);
-}
-module_exit(talitos_exit);
+module_platform_driver(talitos_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
diff --git a/drivers/crypto/talitos.h b/drivers/crypto/talitos.h
index 0b746ac..3c17395 100644
--- a/drivers/crypto/talitos.h
+++ b/drivers/crypto/talitos.h
@@ -1,7 +1,7 @@
/*
* Freescale SEC (talitos) device register and descriptor header defines
*
- * Copyright (c) 2006-2010 Freescale Semiconductor, Inc.
+ * Copyright (c) 2006-2011 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -34,28 +34,37 @@
/* global register offset addresses */
#define TALITOS_MCR 0x1030 /* master control register */
-#define TALITOS_MCR_LO 0x1038
+#define TALITOS_MCR_RCA0 (1 << 15) /* remap channel 0 */
+#define TALITOS_MCR_RCA1 (1 << 14) /* remap channel 1 */
+#define TALITOS_MCR_RCA2 (1 << 13) /* remap channel 2 */
+#define TALITOS_MCR_RCA3 (1 << 12) /* remap channel 3 */
#define TALITOS_MCR_SWR 0x1 /* s/w reset */
+#define TALITOS_MCR_LO 0x1034
#define TALITOS_IMR 0x1008 /* interrupt mask register */
#define TALITOS_IMR_INIT 0x100ff /* enable channel IRQs */
#define TALITOS_IMR_DONE 0x00055 /* done IRQs */
#define TALITOS_IMR_LO 0x100C
#define TALITOS_IMR_LO_INIT 0x20000 /* allow RNGU error IRQs */
#define TALITOS_ISR 0x1010 /* interrupt status register */
-#define TALITOS_ISR_CHERR 0xaa /* channel errors mask */
-#define TALITOS_ISR_CHDONE 0x55 /* channel done mask */
+#define TALITOS_ISR_4CHERR 0xaa /* 4 channel errors mask */
+#define TALITOS_ISR_4CHDONE 0x55 /* 4 channel done mask */
+#define TALITOS_ISR_CH_0_2_ERR 0x22 /* channels 0, 2 errors mask */
+#define TALITOS_ISR_CH_0_2_DONE 0x11 /* channels 0, 2 done mask */
+#define TALITOS_ISR_CH_1_3_ERR 0x88 /* channels 1, 3 errors mask */
+#define TALITOS_ISR_CH_1_3_DONE 0x44 /* channels 1, 3 done mask */
#define TALITOS_ISR_LO 0x1014
#define TALITOS_ICR 0x1018 /* interrupt clear register */
#define TALITOS_ICR_LO 0x101C
/* channel register address stride */
+#define TALITOS_CH_BASE_OFFSET 0x1000 /* default channel map base */
#define TALITOS_CH_STRIDE 0x100
/* channel configuration register */
-#define TALITOS_CCCR(ch) (ch * TALITOS_CH_STRIDE + 0x1108)
+#define TALITOS_CCCR 0x8
#define TALITOS_CCCR_CONT 0x2 /* channel continue */
#define TALITOS_CCCR_RESET 0x1 /* channel reset */
-#define TALITOS_CCCR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x110c)
+#define TALITOS_CCCR_LO 0xc
#define TALITOS_CCCR_LO_IWSE 0x80 /* chan. ICCR writeback enab. */
#define TALITOS_CCCR_LO_EAE 0x20 /* extended address enable */
#define TALITOS_CCCR_LO_CDWE 0x10 /* chan. done writeback enab. */
@@ -63,8 +72,8 @@
#define TALITOS_CCCR_LO_CDIE 0x2 /* channel done IRQ enable */
/* CCPSR: channel pointer status register */
-#define TALITOS_CCPSR(ch) (ch * TALITOS_CH_STRIDE + 0x1110)
-#define TALITOS_CCPSR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1114)
+#define TALITOS_CCPSR 0x10
+#define TALITOS_CCPSR_LO 0x14
#define TALITOS_CCPSR_LO_DOF 0x8000 /* double FF write oflow error */
#define TALITOS_CCPSR_LO_SOF 0x4000 /* single FF write oflow error */
#define TALITOS_CCPSR_LO_MDTE 0x2000 /* master data transfer error */
@@ -79,24 +88,24 @@
#define TALITOS_CCPSR_LO_SRL 0x0010 /* scatter return/length error */
/* channel fetch fifo register */
-#define TALITOS_FF(ch) (ch * TALITOS_CH_STRIDE + 0x1148)
-#define TALITOS_FF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x114c)
+#define TALITOS_FF 0x48
+#define TALITOS_FF_LO 0x4c
/* current descriptor pointer register */
-#define TALITOS_CDPR(ch) (ch * TALITOS_CH_STRIDE + 0x1140)
-#define TALITOS_CDPR_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1144)
+#define TALITOS_CDPR 0x40
+#define TALITOS_CDPR_LO 0x44
/* descriptor buffer register */
-#define TALITOS_DESCBUF(ch) (ch * TALITOS_CH_STRIDE + 0x1180)
-#define TALITOS_DESCBUF_LO(ch) (ch * TALITOS_CH_STRIDE + 0x1184)
+#define TALITOS_DESCBUF 0x80
+#define TALITOS_DESCBUF_LO 0x84
/* gather link table */
-#define TALITOS_GATHER(ch) (ch * TALITOS_CH_STRIDE + 0x11c0)
-#define TALITOS_GATHER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11c4)
+#define TALITOS_GATHER 0xc0
+#define TALITOS_GATHER_LO 0xc4
/* scatter link table */
-#define TALITOS_SCATTER(ch) (ch * TALITOS_CH_STRIDE + 0x11e0)
-#define TALITOS_SCATTER_LO(ch) (ch * TALITOS_CH_STRIDE + 0x11e4)
+#define TALITOS_SCATTER 0xe0
+#define TALITOS_SCATTER_LO 0xe4
/* execution unit interrupt status registers */
#define TALITOS_DEUISR 0x2030 /* DES unit */
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 8f04910..464fa21 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -65,4 +65,17 @@ config DEVFREQ_GOV_USERSPACE
comment "DEVFREQ Drivers"
+config ARM_EXYNOS4_BUS_DEVFREQ
+ bool "ARM Exynos4210/4212/4412 Memory Bus DEVFREQ Driver"
+ depends on CPU_EXYNOS4210 || CPU_EXYNOS4212 || CPU_EXYNOS4412
+ select ARCH_HAS_OPP
+ select DEVFREQ_GOV_SIMPLE_ONDEMAND
+ help
+ This adds the DEVFREQ driver for Exynos4210 memory bus (vdd_int)
+ and Exynos4212/4412 memory interface and bus (vdd_mif + vdd_int).
+ It reads PPMU counters of memory controllers and adjusts
+ the operating frequencies and voltages with OPP support.
+ To operate with optimal voltages, ASV support is required
+ (CONFIG_EXYNOS_ASV).
+
endif # PM_DEVFREQ
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index 4564a89..8c46423 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND) += governor_simpleondemand.o
obj-$(CONFIG_DEVFREQ_GOV_PERFORMANCE) += governor_performance.o
obj-$(CONFIG_DEVFREQ_GOV_POWERSAVE) += governor_powersave.o
obj-$(CONFIG_DEVFREQ_GOV_USERSPACE) += governor_userspace.o
+
+# DEVFREQ Drivers
+obj-$(CONFIG_ARM_EXYNOS4_BUS_DEVFREQ) += exynos4_bus.o
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 59d24e9..c189b82 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -347,7 +347,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (!IS_ERR(devfreq)) {
dev_err(dev, "%s: Unable to create devfreq for the device. It already has one.\n", __func__);
err = -EINVAL;
- goto out;
+ goto err_out;
}
}
@@ -356,7 +356,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
dev_err(dev, "%s: Unable to create devfreq for the device\n",
__func__);
err = -ENOMEM;
- goto out;
+ goto err_out;
}
mutex_init(&devfreq->lock);
@@ -399,17 +399,16 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->next_polling);
}
mutex_unlock(&devfreq_list_lock);
- goto out;
+out:
+ return devfreq;
+
err_init:
device_unregister(&devfreq->dev);
err_dev:
mutex_unlock(&devfreq->lock);
kfree(devfreq);
-out:
- if (err)
- return ERR_PTR(err);
- else
- return devfreq;
+err_out:
+ return ERR_PTR(err);
}
/**
diff --git a/drivers/devfreq/exynos4_bus.c b/drivers/devfreq/exynos4_bus.c
new file mode 100644
index 0000000..6460577
--- /dev/null
+++ b/drivers/devfreq/exynos4_bus.c
@@ -0,0 +1,1135 @@
+/* drivers/devfreq/exynos4210_memorybus.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * EXYNOS4 - Memory/Bus clock frequency scaling support in DEVFREQ framework
+ * This version supports EXYNOS4210 only. This changes bus frequencies
+ * and vddint voltages. Exynos4412/4212 should be able to be supported
+ * with minor modifications.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/suspend.h>
+#include <linux/opp.h>
+#include <linux/devfreq.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+
+/* Exynos4 ASV has been in the mailing list, but not upstreamed, yet. */
+#ifdef CONFIG_EXYNOS_ASV
+extern unsigned int exynos_result_of_asv;
+#endif
+
+#include <mach/regs-clock.h>
+
+#include <plat/map-s5p.h>
+
+#define MAX_SAFEVOLT 1200000 /* 1.2V */
+
+enum exynos4_busf_type {
+ TYPE_BUSF_EXYNOS4210,
+ TYPE_BUSF_EXYNOS4x12,
+};
+
+/* Assume that the bus is saturated if the utilization is 40% */
+#define BUS_SATURATION_RATIO 40
+
+enum ppmu_counter {
+ PPMU_PMNCNT0 = 0,
+ PPMU_PMCCNT1,
+ PPMU_PMNCNT2,
+ PPMU_PMNCNT3,
+ PPMU_PMNCNT_MAX,
+};
+struct exynos4_ppmu {
+ void __iomem *hw_base;
+ unsigned int ccnt;
+ unsigned int event;
+ unsigned int count[PPMU_PMNCNT_MAX];
+ bool ccnt_overflow;
+ bool count_overflow[PPMU_PMNCNT_MAX];
+};
+
+enum busclk_level_idx {
+ LV_0 = 0,
+ LV_1,
+ LV_2,
+ LV_3,
+ LV_4,
+ _LV_END
+};
+#define EX4210_LV_MAX LV_2
+#define EX4x12_LV_MAX LV_4
+#define EX4210_LV_NUM (LV_2 + 1)
+#define EX4x12_LV_NUM (LV_4 + 1)
+
+struct busfreq_data {
+ enum exynos4_busf_type type;
+ struct device *dev;
+ struct devfreq *devfreq;
+ bool disabled;
+ struct regulator *vdd_int;
+ struct regulator *vdd_mif; /* Exynos4412/4212 only */
+ struct opp *curr_opp;
+ struct exynos4_ppmu dmc[2];
+
+ struct notifier_block pm_notifier;
+ struct mutex lock;
+
+ /* Dividers calculated at boot/probe-time */
+ unsigned int dmc_divtable[_LV_END]; /* DMC0 */
+ unsigned int top_divtable[_LV_END];
+};
+
+struct bus_opp_table {
+ unsigned int idx;
+ unsigned long clk;
+ unsigned long volt;
+};
+
+/* 4210 controls clock of mif and voltage of int */
+static struct bus_opp_table exynos4210_busclk_table[] = {
+ {LV_0, 400000, 1150000},
+ {LV_1, 267000, 1050000},
+ {LV_2, 133000, 1025000},
+ {0, 0, 0},
+};
+
+/*
+ * MIF is the main control knob clock for exynox4x12 MIF/INT
+ * clock and voltage of both mif/int are controlled.
+ */
+static struct bus_opp_table exynos4x12_mifclk_table[] = {
+ {LV_0, 400000, 1100000},
+ {LV_1, 267000, 1000000},
+ {LV_2, 160000, 950000},
+ {LV_3, 133000, 950000},
+ {LV_4, 100000, 950000},
+ {0, 0, 0},
+};
+
+/*
+ * INT is not the control knob of 4x12. LV_x is not meant to represent
+ * the current performance. (MIF does)
+ */
+static struct bus_opp_table exynos4x12_intclk_table[] = {
+ {LV_0, 200000, 1000000},
+ {LV_1, 160000, 950000},
+ {LV_2, 133000, 925000},
+ {LV_3, 100000, 900000},
+ {0, 0, 0},
+};
+
+/* TODO: asv volt definitions are "__initdata"? */
+/* Some chips have different operating voltages */
+static unsigned int exynos4210_asv_volt[][EX4210_LV_NUM] = {
+ {1150000, 1050000, 1050000},
+ {1125000, 1025000, 1025000},
+ {1100000, 1000000, 1000000},
+ {1075000, 975000, 975000},
+ {1050000, 950000, 950000},
+};
+
+static unsigned int exynos4x12_mif_step_50[][EX4x12_LV_NUM] = {
+ /* 400 267 160 133 100 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV0 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV1 */
+ {1050000, 950000, 900000, 900000, 900000}, /* ASV2 */
+ {1050000, 900000, 900000, 900000, 900000}, /* ASV3 */
+ {1050000, 900000, 900000, 900000, 850000}, /* ASV4 */
+ {1050000, 900000, 900000, 850000, 850000}, /* ASV5 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV6 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV7 */
+ {1050000, 900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+static unsigned int exynos4x12_int_volt[][EX4x12_LV_NUM] = {
+ /* 200 160 133 100 */
+ {1000000, 950000, 925000, 900000}, /* ASV0 */
+ {975000, 925000, 925000, 900000}, /* ASV1 */
+ {950000, 925000, 900000, 875000}, /* ASV2 */
+ {950000, 900000, 900000, 875000}, /* ASV3 */
+ {925000, 875000, 875000, 875000}, /* ASV4 */
+ {900000, 850000, 850000, 850000}, /* ASV5 */
+ {900000, 850000, 850000, 850000}, /* ASV6 */
+ {900000, 850000, 850000, 850000}, /* ASV7 */
+ {900000, 850000, 850000, 850000}, /* ASV8 */
+};
+
+/*** Clock Divider Data for Exynos4210 ***/
+static unsigned int exynos4210_clkdiv_dmc0[][8] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP, DIVCOPY2, DIVCORE_TIMERS }
+ */
+
+ /* DMC L0: 400MHz */
+ { 3, 1, 1, 1, 1, 1, 3, 1 },
+ /* DMC L1: 266.7MHz */
+ { 4, 1, 1, 2, 1, 1, 3, 1 },
+ /* DMC L2: 133MHz */
+ { 5, 1, 1, 5, 1, 1, 3, 1 },
+};
+static unsigned int exynos4210_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK200, DIVACLK100, DIVACLK160, DIVACLK133, DIVONENAND }
+ */
+ /* ACLK200 L0: 200MHz */
+ { 3, 7, 4, 5, 1 },
+ /* ACLK200 L1: 160MHz */
+ { 4, 7, 5, 6, 1 },
+ /* ACLK200 L2: 133MHz */
+ { 5, 7, 7, 7, 1 },
+};
+static unsigned int exynos4210_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+ /* ACLK_GDL/R L1: 200MHz */
+ { 3, 1 },
+ /* ACLK_GDL/R L2: 160MHz */
+ { 4, 1 },
+ /* ACLK_GDL/R L3: 133MHz */
+ { 5, 1 },
+};
+
+/*** Clock Divider Data for Exynos4212/4412 ***/
+static unsigned int exynos4x12_clkdiv_dmc0[][6] = {
+ /*
+ * Clock divider value for following
+ * { DIVACP, DIVACP_PCLK, DIVDPHY, DIVDMC, DIVDMCD
+ * DIVDMCP}
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1, 1, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 1, 1, 2, 1, 1},
+ /* DMC L2: 160MHz */
+ {5, 1, 1, 4, 1, 1},
+ /* DMC L3: 133MHz */
+ {5, 1, 1, 5, 1, 1},
+ /* DMC L4: 100MHz */
+ {7, 1, 1, 7, 1, 1},
+};
+static unsigned int exynos4x12_clkdiv_dmc1[][6] = {
+ /*
+ * Clock divider value for following
+ * { G2DACP, DIVC2C, DIVC2C_ACLK }
+ */
+
+ /* DMC L0: 400MHz */
+ {3, 1, 1},
+ /* DMC L1: 266.7MHz */
+ {4, 2, 1},
+ /* DMC L2: 160MHz */
+ {5, 4, 1},
+ /* DMC L3: 133MHz */
+ {5, 5, 1},
+ /* DMC L4: 100MHz */
+ {7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_top[][5] = {
+ /*
+ * Clock divider value for following
+ * { DIVACLK266_GPS, DIVACLK100, DIVACLK160,
+ DIVACLK133, DIVONENAND }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {2, 7, 4, 5, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {4, 7, 5, 7, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 7, 7, 7, 1},
+};
+static unsigned int exynos4x12_clkdiv_lr_bus[][2] = {
+ /*
+ * Clock divider value for following
+ * { DIVGDL/R, DIVGPL/R }
+ */
+
+ /* ACLK_GDL/R L0: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L1: 200MHz */
+ {3, 1},
+ /* ACLK_GDL/R L2: 160MHz */
+ {4, 1},
+ /* ACLK_GDL/R L3: 133MHz */
+ {5, 1},
+ /* ACLK_GDL/R L4: 100MHz */
+ {7, 1},
+};
+static unsigned int exynos4x12_clkdiv_sclkip[][3] = {
+ /*
+ * Clock divider value for following
+ * { DIVMFC, DIVJPEG, DIVFIMC0~3}
+ */
+
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 200MHz */
+ {3, 3, 4},
+ /* SCLK_MFC: 160MHz */
+ {4, 4, 5},
+ /* SCLK_MFC: 133MHz */
+ {5, 5, 5},
+ /* SCLK_MFC: 100MHz */
+ {7, 7, 7},
+};
+
+
+static int exynos4210_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4210_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4210_busclk_table[index].clk)
+ break;
+
+ if (index == EX4210_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - TOP */
+ tmp = data->top_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4210_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4210_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ return 0;
+}
+
+static int exynos4x12_set_busclk(struct busfreq_data *data, struct opp *opp)
+{
+ unsigned int index;
+ unsigned int tmp;
+
+ for (index = LV_0; index < EX4x12_LV_NUM; index++)
+ if (opp_get_freq(opp) == exynos4x12_mifclk_table[index].clk)
+ break;
+
+ if (index == EX4x12_LV_NUM)
+ return -EINVAL;
+
+ /* Change Divider - DMC0 */
+ tmp = data->dmc_divtable[index];
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC0);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC0);
+ } while (tmp & 0x11111111);
+
+ /* Change Divider - DMC1 */
+ tmp = __raw_readl(S5P_CLKDIV_DMC1);
+
+ tmp &= ~(S5P_CLKDIV_DMC1_G2D_ACP_MASK |
+ S5P_CLKDIV_DMC1_C2C_MASK |
+ S5P_CLKDIV_DMC1_C2CACLK_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc1[index][0] <<
+ S5P_CLKDIV_DMC1_G2D_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][1] <<
+ S5P_CLKDIV_DMC1_C2C_SHIFT) |
+ (exynos4x12_clkdiv_dmc1[index][2] <<
+ S5P_CLKDIV_DMC1_C2CACLK_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_DMC1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_DMC1);
+ } while (tmp & 0x111111);
+
+ /* Change Divider - TOP */
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK266_GPS_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_top[index][0] <<
+ S5P_CLKDIV_TOP_ACLK266_GPS_SHIFT) |
+ (exynos4x12_clkdiv_top[index][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4x12_clkdiv_top[index][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4x12_clkdiv_top[index][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4x12_clkdiv_top[index][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_TOP);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_TOP);
+ } while (tmp & 0x11111);
+
+ /* Change Divider - LEFTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_LEFTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_LEFTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_LEFTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - RIGHTBUS */
+ tmp = __raw_readl(S5P_CLKDIV_RIGHTBUS);
+
+ tmp &= ~(S5P_CLKDIV_BUS_GDLR_MASK | S5P_CLKDIV_BUS_GPLR_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_lr_bus[index][0] <<
+ S5P_CLKDIV_BUS_GDLR_SHIFT) |
+ (exynos4x12_clkdiv_lr_bus[index][1] <<
+ S5P_CLKDIV_BUS_GPLR_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_RIGHTBUS);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_RIGHTBUS);
+ } while (tmp & 0x11);
+
+ /* Change Divider - MFC */
+ tmp = __raw_readl(S5P_CLKDIV_MFC);
+
+ tmp &= ~(S5P_CLKDIV_MFC_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][0] <<
+ S5P_CLKDIV_MFC_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_MFC);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_MFC);
+ } while (tmp & 0x1);
+
+ /* Change Divider - JPEG */
+ tmp = __raw_readl(S5P_CLKDIV_CAM1);
+
+ tmp &= ~(S5P_CLKDIV_CAM1_JPEG_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][1] <<
+ S5P_CLKDIV_CAM1_JPEG_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM1);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1);
+
+ /* Change Divider - FIMC0~3 */
+ tmp = __raw_readl(S5P_CLKDIV_CAM);
+
+ tmp &= ~(S5P_CLKDIV_CAM_FIMC0_MASK | S5P_CLKDIV_CAM_FIMC1_MASK |
+ S5P_CLKDIV_CAM_FIMC2_MASK | S5P_CLKDIV_CAM_FIMC3_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC0_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC1_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC2_SHIFT) |
+ (exynos4x12_clkdiv_sclkip[index][2] <<
+ S5P_CLKDIV_CAM_FIMC3_SHIFT));
+
+ __raw_writel(tmp, S5P_CLKDIV_CAM);
+
+ do {
+ tmp = __raw_readl(S5P_CLKDIV_STAT_CAM1);
+ } while (tmp & 0x1111);
+
+ return 0;
+}
+
+
+static void busfreq_mon_reset(struct busfreq_data *data)
+{
+ unsigned int i;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+
+ /* Reset PPMU */
+ __raw_writel(0x8000000f, ppmu_base + 0xf010);
+ __raw_writel(0x8000000f, ppmu_base + 0xf050);
+ __raw_writel(0x6, ppmu_base + 0xf000);
+ __raw_writel(0x0, ppmu_base + 0xf100);
+
+ /* Set PPMU Event */
+ data->dmc[i].event = 0x6;
+ __raw_writel(((data->dmc[i].event << 12) | 0x1),
+ ppmu_base + 0xfc);
+
+ /* Start PPMU */
+ __raw_writel(0x1, ppmu_base + 0xf000);
+ }
+}
+
+static void exynos4_read_ppmu(struct busfreq_data *data)
+{
+ int i, j;
+
+ for (i = 0; i < 2; i++) {
+ void __iomem *ppmu_base = data->dmc[i].hw_base;
+ u32 overflow;
+
+ /* Stop PPMU */
+ __raw_writel(0x0, ppmu_base + 0xf000);
+
+ /* Update local data from PPMU */
+ overflow = __raw_readl(ppmu_base + 0xf050);
+
+ data->dmc[i].ccnt = __raw_readl(ppmu_base + 0xf100);
+ data->dmc[i].ccnt_overflow = overflow & (1 << 31);
+
+ for (j = 0; j < PPMU_PMNCNT_MAX; j++) {
+ data->dmc[i].count[j] = __raw_readl(
+ ppmu_base + (0xf110 + (0x10 * j)));
+ data->dmc[i].count_overflow[j] = overflow & (1 << j);
+ }
+ }
+
+ busfreq_mon_reset(data);
+}
+
+static int exynos4x12_get_intspec(unsigned long mifclk)
+{
+ int i = 0;
+
+ while (exynos4x12_intclk_table[i].clk) {
+ if (exynos4x12_intclk_table[i].clk <= mifclk)
+ return i;
+ i++;
+ }
+
+ return -EINVAL;
+}
+
+static int exynos4_bus_setvolt(struct busfreq_data *data, struct opp *opp,
+ struct opp *oldopp)
+{
+ int err = 0, tmp;
+ unsigned long volt = opp_get_voltage(opp);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ /* OPP represents DMC clock + INT voltage */
+ err = regulator_set_voltage(data->vdd_int, volt,
+ MAX_SAFEVOLT);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ /* OPP represents MIF clock + MIF voltage */
+ err = regulator_set_voltage(data->vdd_mif, volt,
+ MAX_SAFEVOLT);
+ if (err)
+ break;
+
+ tmp = exynos4x12_get_intspec(opp_get_freq(opp));
+ if (tmp < 0) {
+ err = tmp;
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ }
+ err = regulator_set_voltage(data->vdd_int,
+ exynos4x12_intclk_table[tmp].volt,
+ MAX_SAFEVOLT);
+ /* Try to recover */
+ if (err)
+ regulator_set_voltage(data->vdd_mif,
+ opp_get_voltage(oldopp),
+ MAX_SAFEVOLT);
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int exynos4_bus_target(struct device *dev, unsigned long *_freq)
+{
+ int err = 0;
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ struct opp *opp = devfreq_recommended_opp(dev, _freq);
+ unsigned long old_freq = opp_get_freq(data->curr_opp);
+ unsigned long freq = opp_get_freq(opp);
+
+ if (old_freq == freq)
+ return 0;
+
+ dev_dbg(dev, "targetting %lukHz %luuV\n", freq, opp_get_voltage(opp));
+
+ mutex_lock(&data->lock);
+
+ if (data->disabled)
+ goto out;
+
+ if (old_freq < freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ if (old_freq != freq) {
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ }
+ if (err)
+ goto out;
+
+ if (old_freq > freq)
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto out;
+
+ data->curr_opp = opp;
+out:
+ mutex_unlock(&data->lock);
+ return err;
+}
+
+static int exynos4_get_busier_dmc(struct busfreq_data *data)
+{
+ u64 p0 = data->dmc[0].count[0];
+ u64 p1 = data->dmc[1].count[0];
+
+ p0 *= data->dmc[1].ccnt;
+ p1 *= data->dmc[0].ccnt;
+
+ if (data->dmc[1].ccnt == 0)
+ return 0;
+
+ if (p0 > p1)
+ return 0;
+ return 1;
+}
+
+static int exynos4_bus_get_dev_status(struct device *dev,
+ struct devfreq_dev_status *stat)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+ int busier_dmc;
+ int cycles_x2 = 2; /* 2 x cycles */
+ void __iomem *addr;
+ u32 timing;
+ u32 memctrl;
+
+ exynos4_read_ppmu(data);
+ busier_dmc = exynos4_get_busier_dmc(data);
+ stat->current_frequency = opp_get_freq(data->curr_opp);
+
+ if (busier_dmc)
+ addr = S5P_VA_DMC1;
+ else
+ addr = S5P_VA_DMC0;
+
+ memctrl = __raw_readl(addr + 0x04); /* one of DDR2/3/LPDDR2 */
+ timing = __raw_readl(addr + 0x38); /* CL or WL/RL values */
+
+ switch ((memctrl >> 8) & 0xf) {
+ case 0x4: /* DDR2 */
+ cycles_x2 = ((timing >> 16) & 0xf) * 2;
+ break;
+ case 0x5: /* LPDDR2 */
+ case 0x6: /* DDR3 */
+ cycles_x2 = ((timing >> 8) & 0xf) + ((timing >> 0) & 0xf);
+ break;
+ default:
+ pr_err("%s: Unknown Memory Type(%d).\n", __func__,
+ (memctrl >> 8) & 0xf);
+ return -EINVAL;
+ }
+
+ /* Number of cycles spent on memory access */
+ stat->busy_time = data->dmc[busier_dmc].count[0] / 2 * (cycles_x2 + 2);
+ stat->busy_time *= 100 / BUS_SATURATION_RATIO;
+ stat->total_time = data->dmc[busier_dmc].ccnt;
+
+ /* If the counters have overflown, retry */
+ if (data->dmc[busier_dmc].ccnt_overflow ||
+ data->dmc[busier_dmc].count_overflow[0])
+ return -EAGAIN;
+
+ return 0;
+}
+
+static void exynos4_bus_exit(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ devfreq_unregister_opp_notifier(dev, data->devfreq);
+}
+
+static struct devfreq_dev_profile exynos4_devfreq_profile = {
+ .initial_freq = 400000,
+ .polling_ms = 50,
+ .target = exynos4_bus_target,
+ .get_dev_status = exynos4_bus_get_dev_status,
+ .exit = exynos4_bus_exit,
+};
+
+static int exynos4210_init_tables(struct busfreq_data *data)
+{
+ u32 tmp;
+ int mgrp;
+ int i, err = 0;
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK |
+ S5P_CLKDIV_DMC0_COPY2_MASK |
+ S5P_CLKDIV_DMC0_CORETI_MASK);
+
+ tmp |= ((exynos4210_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][6] <<
+ S5P_CLKDIV_DMC0_COPY2_SHIFT) |
+ (exynos4210_clkdiv_dmc0[i][7] <<
+ S5P_CLKDIV_DMC0_CORETI_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+ tmp = __raw_readl(S5P_CLKDIV_TOP);
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_TOP_ACLK200_MASK |
+ S5P_CLKDIV_TOP_ACLK100_MASK |
+ S5P_CLKDIV_TOP_ACLK160_MASK |
+ S5P_CLKDIV_TOP_ACLK133_MASK |
+ S5P_CLKDIV_TOP_ONENAND_MASK);
+
+ tmp |= ((exynos4210_clkdiv_top[i][0] <<
+ S5P_CLKDIV_TOP_ACLK200_SHIFT) |
+ (exynos4210_clkdiv_top[i][1] <<
+ S5P_CLKDIV_TOP_ACLK100_SHIFT) |
+ (exynos4210_clkdiv_top[i][2] <<
+ S5P_CLKDIV_TOP_ACLK160_SHIFT) |
+ (exynos4210_clkdiv_top[i][3] <<
+ S5P_CLKDIV_TOP_ACLK133_SHIFT) |
+ (exynos4210_clkdiv_top[i][4] <<
+ S5P_CLKDIV_TOP_ONENAND_SHIFT));
+
+ data->top_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ pr_debug("ASV Group of Exynos4 is %d\n", tmp);
+ /* Use merged grouping for voltage */
+ switch (tmp) {
+ case 0:
+ mgrp = 0;
+ break;
+ case 1:
+ case 2:
+ mgrp = 1;
+ break;
+ case 3:
+ case 4:
+ mgrp = 2;
+ break;
+ case 5:
+ case 6:
+ mgrp = 3;
+ break;
+ case 7:
+ mgrp = 4;
+ break;
+ default:
+ pr_warn("Unknown ASV Group. Use max voltage.\n");
+ mgrp = 0;
+ }
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++)
+ exynos4210_busclk_table[i].volt = exynos4210_asv_volt[mgrp][i];
+
+ for (i = LV_0; i < EX4210_LV_NUM; i++) {
+ err = opp_add(data->dev, exynos4210_busclk_table[i].clk,
+ exynos4210_busclk_table[i].volt);
+ if (err) {
+ dev_err(data->dev, "Cannot add opp entries.\n");
+ return err;
+ }
+ }
+
+
+ return 0;
+}
+
+static int exynos4x12_init_tables(struct busfreq_data *data)
+{
+ unsigned int i;
+ unsigned int tmp;
+ int ret;
+
+ /* Enable pause function for DREX2 DVFS */
+ tmp = __raw_readl(S5P_DMC_PAUSE_CTRL);
+ tmp |= DMC_PAUSE_ENABLE;
+ __raw_writel(tmp, S5P_DMC_PAUSE_CTRL);
+
+ tmp = __raw_readl(S5P_CLKDIV_DMC0);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ tmp &= ~(S5P_CLKDIV_DMC0_ACP_MASK |
+ S5P_CLKDIV_DMC0_ACPPCLK_MASK |
+ S5P_CLKDIV_DMC0_DPHY_MASK |
+ S5P_CLKDIV_DMC0_DMC_MASK |
+ S5P_CLKDIV_DMC0_DMCD_MASK |
+ S5P_CLKDIV_DMC0_DMCP_MASK);
+
+ tmp |= ((exynos4x12_clkdiv_dmc0[i][0] <<
+ S5P_CLKDIV_DMC0_ACP_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][1] <<
+ S5P_CLKDIV_DMC0_ACPPCLK_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][2] <<
+ S5P_CLKDIV_DMC0_DPHY_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][3] <<
+ S5P_CLKDIV_DMC0_DMC_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][4] <<
+ S5P_CLKDIV_DMC0_DMCD_SHIFT) |
+ (exynos4x12_clkdiv_dmc0[i][5] <<
+ S5P_CLKDIV_DMC0_DMCP_SHIFT));
+
+ data->dmc_divtable[i] = tmp;
+ }
+
+#ifdef CONFIG_EXYNOS_ASV
+ tmp = exynos4_result_of_asv;
+#else
+ tmp = 0; /* Max voltages for the reliability of the unknown */
+#endif
+
+ if (tmp > 8)
+ tmp = 0;
+ pr_debug("ASV Group of Exynos4x12 is %d\n", tmp);
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ exynos4x12_mifclk_table[i].volt =
+ exynos4x12_mif_step_50[tmp][i];
+ exynos4x12_intclk_table[i].volt =
+ exynos4x12_int_volt[tmp][i];
+ }
+
+ for (i = 0; i < EX4x12_LV_NUM; i++) {
+ ret = opp_add(data->dev, exynos4x12_mifclk_table[i].clk,
+ exynos4x12_mifclk_table[i].volt);
+ if (ret) {
+ dev_err(data->dev, "Fail to add opp entries.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int exynos4_busfreq_pm_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct busfreq_data *data = container_of(this, struct busfreq_data,
+ pm_notifier);
+ struct opp *opp;
+ unsigned long maxfreq = ULONG_MAX;
+ int err = 0;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ /* Set Fastest and Deactivate DVFS */
+ mutex_lock(&data->lock);
+
+ data->disabled = true;
+
+ opp = opp_find_freq_floor(data->dev, &maxfreq);
+
+ err = exynos4_bus_setvolt(data, opp, data->curr_opp);
+ if (err)
+ goto unlock;
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_set_busclk(data, opp);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_set_busclk(data, opp);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ if (err)
+ goto unlock;
+
+ data->curr_opp = opp;
+unlock:
+ mutex_unlock(&data->lock);
+ if (err)
+ return err;
+ return NOTIFY_OK;
+ case PM_POST_RESTORE:
+ case PM_POST_SUSPEND:
+ /* Reactivate */
+ mutex_lock(&data->lock);
+ data->disabled = false;
+ mutex_unlock(&data->lock);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static __devinit int exynos4_busfreq_probe(struct platform_device *pdev)
+{
+ struct busfreq_data *data;
+ struct opp *opp;
+ struct device *dev = &pdev->dev;
+ int err = 0;
+
+ data = kzalloc(sizeof(struct busfreq_data), GFP_KERNEL);
+ if (data == NULL) {
+ dev_err(dev, "Cannot allocate memory.\n");
+ return -ENOMEM;
+ }
+
+ data->type = pdev->id_entry->driver_data;
+ data->dmc[0].hw_base = S5P_VA_DMC0;
+ data->dmc[1].hw_base = S5P_VA_DMC1;
+ data->pm_notifier.notifier_call = exynos4_busfreq_pm_notifier_event;
+ data->dev = dev;
+ mutex_init(&data->lock);
+
+ switch (data->type) {
+ case TYPE_BUSF_EXYNOS4210:
+ err = exynos4210_init_tables(data);
+ break;
+ case TYPE_BUSF_EXYNOS4x12:
+ err = exynos4x12_init_tables(data);
+ break;
+ default:
+ dev_err(dev, "Cannot determine the device id %d\n", data->type);
+ err = -EINVAL;
+ }
+ if (err)
+ goto err_regulator;
+
+ data->vdd_int = regulator_get(dev, "vdd_int");
+ if (IS_ERR(data->vdd_int)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_int\"\n");
+ err = PTR_ERR(data->vdd_int);
+ goto err_regulator;
+ }
+ if (data->type == TYPE_BUSF_EXYNOS4x12) {
+ data->vdd_mif = regulator_get(dev, "vdd_mif");
+ if (IS_ERR(data->vdd_mif)) {
+ dev_err(dev, "Cannot get the regulator \"vdd_mif\"\n");
+ err = PTR_ERR(data->vdd_mif);
+ regulator_put(data->vdd_int);
+ goto err_regulator;
+
+ }
+ }
+
+ opp = opp_find_freq_floor(dev, &exynos4_devfreq_profile.initial_freq);
+ if (IS_ERR(opp)) {
+ dev_err(dev, "Invalid initial frequency %lu kHz.\n",
+ exynos4_devfreq_profile.initial_freq);
+ err = PTR_ERR(opp);
+ goto err_opp_add;
+ }
+ data->curr_opp = opp;
+
+ platform_set_drvdata(pdev, data);
+
+ busfreq_mon_reset(data);
+
+ data->devfreq = devfreq_add_device(dev, &exynos4_devfreq_profile,
+ &devfreq_simple_ondemand, NULL);
+ if (IS_ERR(data->devfreq)) {
+ err = PTR_ERR(data->devfreq);
+ goto err_opp_add;
+ }
+
+ devfreq_register_opp_notifier(dev, data->devfreq);
+
+ err = register_pm_notifier(&data->pm_notifier);
+ if (err) {
+ dev_err(dev, "Failed to setup pm notifier\n");
+ goto err_devfreq_add;
+ }
+
+ return 0;
+err_devfreq_add:
+ devfreq_remove_device(data->devfreq);
+err_opp_add:
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ regulator_put(data->vdd_int);
+err_regulator:
+ kfree(data);
+ return err;
+}
+
+static __devexit int exynos4_busfreq_remove(struct platform_device *pdev)
+{
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ unregister_pm_notifier(&data->pm_notifier);
+ devfreq_remove_device(data->devfreq);
+ regulator_put(data->vdd_int);
+ if (data->vdd_mif)
+ regulator_put(data->vdd_mif);
+ kfree(data);
+
+ return 0;
+}
+
+static int exynos4_busfreq_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct busfreq_data *data = platform_get_drvdata(pdev);
+
+ busfreq_mon_reset(data);
+ return 0;
+}
+
+static const struct dev_pm_ops exynos4_busfreq_pm = {
+ .resume = exynos4_busfreq_resume,
+};
+
+static const struct platform_device_id exynos4_busfreq_id[] = {
+ { "exynos4210-busfreq", TYPE_BUSF_EXYNOS4210 },
+ { "exynos4412-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { "exynos4212-busfreq", TYPE_BUSF_EXYNOS4x12 },
+ { },
+};
+
+static struct platform_driver exynos4_busfreq_driver = {
+ .probe = exynos4_busfreq_probe,
+ .remove = __devexit_p(exynos4_busfreq_remove),
+ .id_table = exynos4_busfreq_id,
+ .driver = {
+ .name = "exynos4-busfreq",
+ .owner = THIS_MODULE,
+ .pm = &exynos4_busfreq_pm,
+ },
+};
+
+static int __init exynos4_busfreq_init(void)
+{
+ return platform_driver_register(&exynos4_busfreq_driver);
+}
+late_initcall(exynos4_busfreq_init);
+
+static void __exit exynos4_busfreq_exit(void)
+{
+ platform_driver_unregister(&exynos4_busfreq_driver);
+}
+module_exit(exynos4_busfreq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("EXYNOS4 busfreq driver with devfreq framework");
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_ALIAS("exynos4-busfreq");
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 5a99bb3..f1a2749 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -124,7 +124,7 @@ config MV_XOR
config MX3_IPU
bool "MX3x Image Processing Unit support"
- depends on SOC_IMX31 || SOC_IMX35
+ depends on ARCH_MXC
select DMA_ENGINE
default y
help
@@ -187,6 +187,13 @@ config TIMB_DMA
help
Enable support for the Timberdale FPGA DMA engine.
+config SIRF_DMA
+ tristate "CSR SiRFprimaII DMA support"
+ depends on ARCH_PRIMA2
+ select DMA_ENGINE
+ help
+ Enable support for the CSR SiRFprimaII DMA engine.
+
config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
bool
@@ -201,26 +208,26 @@ config PL330_DMA
platform_data for a dma-pl330 device.
config PCH_DMA
- tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
+ tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
depends on PCI && X86
select DMA_ENGINE
help
Enable support for Intel EG20T PCH DMA engine.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config IMX_SDMA
tristate "i.MX SDMA support"
- depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
+ depends on ARCH_MXC
select DMA_ENGINE
help
Support the i.MX SDMA engine. This engine is integrated into
- Freescale i.MX25/31/35/51 chips.
+ Freescale i.MX25/31/35/51/53 chips.
config IMX_DMA
tristate "i.MX DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 30cf3b1..009a222 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
obj-$(CONFIG_IMX_DMA) += imx-dma.o
obj-$(CONFIG_MXS_DMA) += mxs-dma.o
obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
obj-$(CONFIG_PL330_DMA) += pl330.o
obj-$(CONFIG_PCH_DMA) += pch_dma.o
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index b7cbd1a..8a28158 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
int ret;
/* Check if we already have a channel */
- if (plchan->phychan)
- return 0;
+ if (plchan->phychan) {
+ ch = plchan->phychan;
+ goto got_channel;
+ }
ch = pl08x_get_phy_channel(pl08x, plchan);
if (!ch) {
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
return -EBUSY;
}
ch->signal = ret;
-
- /* Assign the flow control signal to this channel */
- if (txd->direction == DMA_TO_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
- else if (txd->direction == DMA_FROM_DEVICE)
- txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
}
+ plchan->phychan = ch;
dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
ch->id,
ch->signal,
plchan->name);
+got_channel:
+ /* Assign the flow control signal to this channel */
+ if (txd->direction == DMA_MEM_TO_DEV)
+ txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+ else if (txd->direction == DMA_DEV_TO_MEM)
+ txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
plchan->phychan_hold++;
- plchan->phychan = ch;
return 0;
}
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
/* Transfer direction */
plchan->runtime_direction = config->direction;
- if (config->direction == DMA_TO_DEVICE) {
+ if (config->direction == DMA_MEM_TO_DEV) {
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
- } else if (config->direction == DMA_FROM_DEVICE) {
+ } else if (config->direction == DMA_DEV_TO_MEM) {
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
} else {
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
- if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+ if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
plchan->src_addr = config->src_addr;
plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
pl08x_select_bus(plchan->cd->periph_buses,
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
"configured channel %s (%s) for %s, data width %d, "
"maxburst %d words, LE, CCTL=0x%08x\n",
dma_chan_name(chan), plchan->name,
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
addr_width,
maxburst,
cctl);
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
*/
txd->direction = direction;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
txd->cctl = plchan->dst_cctl;
slave_addr = plchan->dst_addr;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
txd->cctl = plchan->src_cctl;
slave_addr = plchan->src_addr;
} else {
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
}
if (plchan->cd->device_fc)
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
PL080_FLOW_PER2MEM_PER;
else
- tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+ tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
PL080_FLOW_PER2MEM;
txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
list_add_tail(&dsg->node, &txd->dsg_list);
dsg->len = sg_dma_len(sg);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
dsg->src_addr = sg_phys(sg);
dsg->dst_addr = slave_addr;
} else {
@@ -2054,6 +2057,8 @@ static struct amba_id pl08x_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl08x_ids);
+
static struct amba_driver pl08x_amba_driver = {
.drv.name = DRIVER_NAME,
.id_table = pl08x_ids,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index fcfa0a8..f4aed5f 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -23,6 +23,8 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "at_hdmac_regs.h"
@@ -660,7 +662,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *
atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
sg_len,
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
flags);
if (unlikely(!atslave || !sg_len)) {
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrlb = ATC_IEN;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctrla |= ATC_DST_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_FIXED
| ATC_SRC_ADDR_MODE_INCR
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
total_len += len;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctrla |= ATC_SRC_WIDTH(reg_width);
ctrlb |= ATC_DST_ADDR_MODE_INCR
| ATC_SRC_ADDR_MODE_FIXED
@@ -787,7 +789,7 @@ err_desc_get:
*/
static int
atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
if (period_len > (ATC_BTSIZE_MAX << reg_width))
goto err_out;
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
goto err_out;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto err_out;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
goto err_out;
return 0;
@@ -810,7 +812,7 @@ err_out:
static int
atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
unsigned int period_index, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
u32 ctrla;
unsigned int reg_width = atslave->reg_width;
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| period_len >> reg_width;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.saddr = buf_addr + (period_len * period_index);
desc->lli.daddr = atslave->tx_reg;
desc->lli.ctrla = ctrla;
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
| ATC_DIF(AT_DMA_PER_IF);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.saddr = atslave->rx_reg;
desc->lli.daddr = buf_addr + (period_len * period_index);
desc->lli.ctrla = ctrla;
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
*/
static struct dma_async_tx_descriptor *
atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma_slave *atslave = chan->private;
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
unsigned int i;
dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+ direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
buf_addr,
periods, buf_len, period_len);
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
/*-- Module Management -----------------------------------------------*/
+/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
+static struct at_dma_platform_data at91sam9rl_config = {
+ .nr_channels = 2,
+};
+static struct at_dma_platform_data at91sam9g45_config = {
+ .nr_channels = 8,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_dma_dt_ids[] = {
+ {
+ .compatible = "atmel,at91sam9rl-dma",
+ .data = &at91sam9rl_config,
+ }, {
+ .compatible = "atmel,at91sam9g45-dma",
+ .data = &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
+#endif
+
+static const struct platform_device_id atdma_devtypes[] = {
+ {
+ .name = "at91sam9rl_dma",
+ .driver_data = (unsigned long) &at91sam9rl_config,
+ }, {
+ .name = "at91sam9g45_dma",
+ .driver_data = (unsigned long) &at91sam9g45_config,
+ }, {
+ /* sentinel */
+ }
+};
+
+static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
+ struct platform_device *pdev)
+{
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
+ if (match == NULL)
+ return NULL;
+ return match->data;
+ }
+ return (struct at_dma_platform_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
/**
* at_dma_off - disable DMA controller
* @atdma: the Atmel HDAMC device
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
static int __init at_dma_probe(struct platform_device *pdev)
{
- struct at_dma_platform_data *pdata;
struct resource *io;
struct at_dma *atdma;
size_t size;
int irq;
int err;
int i;
+ struct at_dma_platform_data *plat_dat;
- /* get DMA Controller parameters from platform */
- pdata = pdev->dev.platform_data;
- if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
+ /* setup platform data for each SoC */
+ dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+ dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+ dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+
+ /* get DMA parameters from controller type */
+ plat_dat = at_dma_get_driver_data(pdev);
+ if (!plat_dat)
+ return -ENODEV;
io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!io)
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
return irq;
size = sizeof(struct at_dma);
- size += pdata->nr_channels * sizeof(struct at_dma_chan);
+ size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
atdma = kzalloc(size, GFP_KERNEL);
if (!atdma)
return -ENOMEM;
- /* discover transaction capabilites from the platform data */
- atdma->dma_common.cap_mask = pdata->cap_mask;
- atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
+ /* discover transaction capabilities */
+ atdma->dma_common.cap_mask = plat_dat->cap_mask;
+ atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
size = resource_size(io);
if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
/* initialize channels related values */
INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++) {
+ for (i = 0; i < plat_dat->nr_channels; i++) {
struct at_dma_chan *atchan = &atdma->chan[i];
atchan->chan_common.device = &atdma->dma_common;
@@ -1286,7 +1343,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
tasklet_init(&atchan->tasklet, atc_tasklet,
(unsigned long)atchan);
- atc_enable_irq(atchan);
+ atc_enable_chan_irq(atdma, i);
}
/* set base routines */
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- pdata->nr_channels);
+ plat_dat->nr_channels);
dma_async_device_register(&atdma->dma_common);
@@ -1353,7 +1410,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
struct at_dma_chan *atchan = to_at_dma_chan(chan);
/* Disable interrupts */
- atc_disable_irq(atchan);
+ atc_disable_chan_irq(atdma, chan->chan_id);
tasklet_disable(&atchan->tasklet);
tasklet_kill(&atchan->tasklet);
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
static struct platform_driver at_dma_driver = {
.remove = __exit_p(at_dma_remove),
.shutdown = at_dma_shutdown,
+ .id_table = atdma_devtypes,
.driver = {
.name = "at_hdmac",
.pm = &at_dma_dev_pm_ops,
+ .of_match_table = of_match_ptr(atmel_dma_dt_ids),
},
};
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index aa4c9ae..a8d3277 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
/**
* struct at_dma - internal representation of an Atmel HDMA Controller
* @chan_common: common dmaengine dma_device object members
+ * @atdma_devtype: identifier of DMA controller compatibility
* @ch_regs: memory mapped register base
* @clk: dma controller clock
* @save_imr: interrupt mask register that is saved on suspend/resume cycle
@@ -326,28 +327,27 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
}
-static void atc_setup_irq(struct at_dma_chan *atchan, int on)
+static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
{
- struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
- u32 ebci;
+ u32 ebci;
/* enable interrupts on buffer transfer completion & error */
- ebci = AT_DMA_BTC(atchan->chan_common.chan_id)
- | AT_DMA_ERR(atchan->chan_common.chan_id);
+ ebci = AT_DMA_BTC(chan_id)
+ | AT_DMA_ERR(chan_id);
if (on)
dma_writel(atdma, EBCIER, ebci);
else
dma_writel(atdma, EBCIDR, ebci);
}
-static inline void atc_enable_irq(struct at_dma_chan *atchan)
+static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
{
- atc_setup_irq(atchan, 1);
+ atc_setup_irq(atdma, chan_id, 1);
}
-static inline void atc_disable_irq(struct at_dma_chan *atchan)
+static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
{
- atc_setup_irq(atchan, 0);
+ atc_setup_irq(atdma, chan_id, 0);
}
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 4234f41..d65a718 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,7 @@ struct coh901318_desc {
struct scatterlist *sg;
unsigned int sg_len;
struct coh901318_lli *lli;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
unsigned long flags;
u32 head_config;
u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_last |= cohc->runtime_ctrl;
ctrl |= cohc->runtime_ctrl;
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
- } else if (direction == DMA_FROM_DEVICE) {
+ } else if (direction == DMA_DEV_TO_MEM) {
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
int i = 0;
/* We only support mem to per or per to mem transfers */
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
addr = config->src_addr;
addr_width = config->src_addr_width;
maxburst = config->src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
addr = config->dst_addr;
addr_width = config->dst_addr_width;
maxburst = config->dst_maxburst;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 9f7e0e6a..6c0e2d4 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -7,11 +7,10 @@
* Author: Per Friden <per.friden@stericsson.com>
*/
-#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
-#include <linux/dmapool.h>
#include <linux/memory.h>
#include <linux/gfp.h>
+#include <linux/dmapool.h>
#include <mach/coh901318.h>
#include "coh901318_lli.h"
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
int s = size;
dma_addr_t src;
dma_addr_t dst;
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
src = buf;
dst = dev_addr;
- } else if (dir == DMA_FROM_DEVICE) {
+ } else if (dir == DMA_DEV_TO_MEM) {
src = dev_addr;
dst = buf;
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli = coh901318_lli_next(lli);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
src += block_size;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
dst += block_size;
}
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sgl, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask)
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask)
{
int i;
struct scatterlist *sg;
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
spin_lock(&pool->lock);
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
dst = dev_addr;
- else if (dir == DMA_FROM_DEVICE)
+ else if (dir == DMA_DEV_TO_MEM)
src = dev_addr;
else
goto err;
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if (dir == DMA_TO_DEVICE)
+ if (dir == DMA_MEM_TO_DEV)
/* increment source address */
src = sg_phys(sg);
else
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- if (dir == DMA_FROM_DEVICE)
+ if (dir == DMA_DEV_TO_MEM)
dst += elem_size;
else
src += elem_size;
diff --git a/drivers/dma/coh901318_lli.h b/drivers/dma/coh901318_lli.h
index 7a5c809..abff371 100644
--- a/drivers/dma/coh901318_lli.h
+++ b/drivers/dma/coh901318_lli.h
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
struct coh901318_lli *lli,
dma_addr_t buf, unsigned int size,
dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
- enum dma_data_direction dir);
+ enum dma_transfer_direction dir);
/**
* coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
struct scatterlist *sg, unsigned int nents,
dma_addr_t dev_addr, u32 ctrl_chained,
u32 ctrl, u32 ctrl_last,
- enum dma_data_direction dir, u32 ctrl_irq_mask);
+ enum dma_transfer_direction dir, u32 ctrl_irq_mask);
#endif /* COH901318_LLI_H */
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index b48967b..a6c6051 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
!device->device_prep_dma_interrupt);
BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
!device->device_prep_dma_sg);
- BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
- !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
!device->device_prep_dma_cyclic);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_control);
+ BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+ !device->device_prep_interleaved_dma);
BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index eb1d864..24225f0 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -214,9 +214,18 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start,
return error_count;
}
-static void dmatest_callback(void *completion)
+/* poor man's completion - we want to use wait_event_freezable() on it */
+struct dmatest_done {
+ bool done;
+ wait_queue_head_t *wait;
+};
+
+static void dmatest_callback(void *arg)
{
- complete(completion);
+ struct dmatest_done *done = arg;
+
+ done->done = true;
+ wake_up_all(done->wait);
}
/*
@@ -235,7 +244,9 @@ static void dmatest_callback(void *completion)
*/
static int dmatest_func(void *data)
{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait);
struct dmatest_thread *thread = data;
+ struct dmatest_done done = { .wait = &done_wait };
struct dma_chan *chan;
const char *thread_name;
unsigned int src_off, dst_off, len;
@@ -252,7 +263,7 @@ static int dmatest_func(void *data)
int i;
thread_name = current->comm;
- set_freezable_with_signal();
+ set_freezable();
ret = -ENOMEM;
@@ -306,9 +317,6 @@ static int dmatest_func(void *data)
struct dma_async_tx_descriptor *tx = NULL;
dma_addr_t dma_srcs[src_cnt];
dma_addr_t dma_dsts[dst_cnt];
- struct completion cmp;
- unsigned long start, tmo, end = 0 /* compiler... */;
- bool reload = true;
u8 align = 0;
total_tests++;
@@ -391,9 +399,9 @@ static int dmatest_func(void *data)
continue;
}
- init_completion(&cmp);
+ done.done = false;
tx->callback = dmatest_callback;
- tx->callback_param = &cmp;
+ tx->callback_param = &done;
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
@@ -407,20 +415,20 @@ static int dmatest_func(void *data)
}
dma_async_issue_pending(chan);
- do {
- start = jiffies;
- if (reload)
- end = start + msecs_to_jiffies(timeout);
- else if (end <= start)
- end = start + 1;
- tmo = wait_for_completion_interruptible_timeout(&cmp,
- end - start);
- reload = try_to_freeze();
- } while (tmo == -ERESTARTSYS);
+ wait_event_freezable_timeout(done_wait, done.done,
+ msecs_to_jiffies(timeout));
status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
- if (tmo == 0) {
+ if (!done.done) {
+ /*
+ * We're leaving the timed out dma operation with
+ * dangling pointer to done_wait. To make this
+ * correct, we'll need to allocate wait_done for
+ * each test iteration and perform "who's gonna
+ * free it this time?" dancing. For now, just
+ * leave it dangling.
+ */
pr_warning("%s: #%u: test timed out\n",
thread_name, total_tests - 1);
failed_tests++;
@@ -591,7 +599,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
}
if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
cnt = dmatest_add_threads(dtc, DMA_PQ);
- thread_count += cnt > 0 ?: 0;
+ thread_count += cnt > 0 ? cnt : 0;
}
pr_info("dmatest: Started %u threads using %s\n",
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 9bfd6d3..9b592b0 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
return cookie;
}
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+ struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+ struct dw_dma_slave *dws = dwc->chan.private;
+ u32 cfghi = DWC_CFGH_FIFO_MODE;
+ u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+ if (dwc->initialized == true)
+ return;
+
+ if (dws) {
+ /*
+ * We need controller-specific data to set up slave
+ * transfers.
+ */
+ BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+ cfghi = dws->cfg_hi;
+ cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+ }
+
+ channel_writel(dwc, CFG_LO, cfglo);
+ channel_writel(dwc, CFG_HI, cfghi);
+
+ /* Enable interrupts */
+ channel_set_bit(dw, MASK.XFER, dwc->mask);
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+ channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+ dwc->initialized = true;
+}
+
/*----------------------------------------------------------------------*/
/* Called with dwc->lock held and bh disabled */
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
return;
}
+ dwc_initialize(dwc);
+
channel_writel(dwc, LLP, first->txd.phys);
channel_writel(dwc, CTL_LO,
DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -696,7 +730,7 @@ err_desc_get:
static struct dma_async_tx_descriptor *
dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
prev = first = NULL;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_DST_WIDTH(reg_width)
| DWC_CTLL_DST_FIX
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc:
goto slave_sg_todev_fill_desc;
}
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
ctllo = (DWC_DEFAULT_CTLLO(chan->private)
| DWC_CTLL_SRC_WIDTH(reg_width)
| DWC_CTLL_DST_INC
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc;
- struct dw_dma_slave *dws;
int i;
- u32 cfghi;
- u32 cfglo;
unsigned long flags;
dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
dwc->completed = chan->cookie = 1;
- cfghi = DWC_CFGH_FIFO_MODE;
- cfglo = 0;
-
- dws = chan->private;
- if (dws) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
- cfghi = dws->cfg_hi;
- cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
- }
-
- cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
-
- channel_writel(dwc, CFG_LO, cfglo);
- channel_writel(dwc, CFG_HI, cfghi);
-
/*
* NOTE: some controllers may have additional features that we
* need to initialize here, like "scatter-gather" (which
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
i = ++dwc->descs_allocated;
}
- /* Enable interrupts */
- channel_set_bit(dw, MASK.XFER, dwc->mask);
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
- channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
spin_unlock_irqrestore(&dwc->lock, flags);
dev_dbg(chan2dev(chan),
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
spin_lock_irqsave(&dwc->lock, flags);
list_splice_init(&dwc->free_list, &list);
dwc->descs_allocated = 0;
+ dwc->initialized = false;
/* Disable interrupts */
channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
*/
struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
dma_addr_t buf_addr, size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_cyclic_desc *cdesc;
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err;
if (unlikely(buf_addr & ((1 << reg_width) - 1)))
goto out_err;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+ if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
goto out_err;
retval = ERR_PTR(-ENOMEM);
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
goto out_err_desc_get;
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->lli.dar = dws->tx_reg;
desc->lli.sar = buf_addr + (period_len * i);
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
| DWC_CTLL_FC(dws->fc)
| DWC_CTLL_INT_EN);
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->lli.dar = buf_addr + (period_len * i);
desc->lli.sar = dws->rx_reg;
desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
static void dw_dma_off(struct dw_dma *dw)
{
+ int i;
+
dma_writel(dw, CFG, 0);
channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
cpu_relax();
+
+ for (i = 0; i < dw->dma.chancnt; i++)
+ dw->chan[i].initialized = false;
}
static int __init dw_probe(struct platform_device *pdev)
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
dw_dma_off(platform_get_drvdata(pdev));
clk_disable(dw->clk);
+
return 0;
}
diff --git a/drivers/dma/dw_dmac_regs.h b/drivers/dma/dw_dmac_regs.h
index c341951..5eef694 100644
--- a/drivers/dma/dw_dmac_regs.h
+++ b/drivers/dma/dw_dmac_regs.h
@@ -140,6 +140,7 @@ struct dw_dma_chan {
u8 mask;
u8 priority;
bool paused;
+ bool initialized;
spinlock_t lock;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index b47e2b8..59e7a96 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
{
+ if (list_empty(&edmac->active))
+ return NULL;
+
return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
}
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
*/
static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
{
+ struct ep93xx_dma_desc *desc;
+
list_rotate_left(&edmac->active);
if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
return true;
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc)
+ return false;
+
/*
* If txd.cookie is set it means that we are back in the first
* descriptor in the chain and hence done with it.
*/
- return !ep93xx_dma_get_active(edmac)->txd.cookie;
+ return !desc->txd.cookie;
}
/*
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
u32 bus_addr;
- if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
+ return;
+ }
+
+ if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
bus_addr = desc->src_addr;
else
bus_addr = desc->dst_addr;
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control = (5 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_NO_HDSK;
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
control |= M2M_CONTROL_DAH;
control |= M2M_CONTROL_TM_TX;
control |= M2M_CONTROL_RSS_SSPTX;
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
* This IDE part is totally untested. Values below are taken
* from the EP93xx Users's Guide and might not be correct.
*/
- control |= M2M_CONTROL_NO_HDSK;
- control |= M2M_CONTROL_RSS_IDE;
- control |= M2M_CONTROL_PW_16;
-
- if (data->direction == DMA_TO_DEVICE) {
+ if (data->direction == DMA_MEM_TO_DEV) {
/* Worst case from the UG */
control = (3 << M2M_CONTROL_PWSC_SHIFT);
control |= M2M_CONTROL_DAH;
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
control |= M2M_CONTROL_SAH;
control |= M2M_CONTROL_TM_RX;
}
+
+ control |= M2M_CONTROL_NO_HDSK;
+ control |= M2M_CONTROL_RSS_IDE;
+ control |= M2M_CONTROL_PW_16;
break;
default:
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
{
- struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+ struct ep93xx_dma_desc *desc;
+
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
+ return;
+ }
if (edmac->buffer == 0) {
writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
{
struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
struct ep93xx_dma_desc *desc, *d;
- dma_async_tx_callback callback;
- void *callback_param;
+ dma_async_tx_callback callback = NULL;
+ void *callback_param = NULL;
LIST_HEAD(list);
spin_lock_irq(&edmac->lock);
+ /*
+ * If dma_terminate_all() was called before we get to run, the active
+ * list has become empty. If that happens we aren't supposed to do
+ * anything more than call ep93xx_dma_advance_work().
+ */
desc = ep93xx_dma_get_active(edmac);
- if (desc->complete) {
- edmac->last_completed = desc->txd.cookie;
- list_splice_init(&edmac->active, &list);
+ if (desc) {
+ if (desc->complete) {
+ edmac->last_completed = desc->txd.cookie;
+ list_splice_init(&edmac->active, &list);
+ }
+ callback = desc->txd.callback;
+ callback_param = desc->txd.callback_param;
}
spin_unlock_irq(&edmac->lock);
/* Pick up the next descriptor from the queue */
ep93xx_dma_advance_work(edmac);
- callback = desc->txd.callback;
- callback_param = desc->txd.callback_param;
-
/* Now we can release all the chained descriptors */
list_for_each_entry_safe(desc, d, &list, node) {
/*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
{
struct ep93xx_dma_chan *edmac = dev_id;
+ struct ep93xx_dma_desc *desc;
irqreturn_t ret = IRQ_HANDLED;
spin_lock(&edmac->lock);
+ desc = ep93xx_dma_get_active(edmac);
+ if (!desc) {
+ dev_warn(chan2dev(edmac),
+ "got interrupt while active list is empty\n");
+ spin_unlock(&edmac->lock);
+ return IRQ_NONE;
+ }
+
switch (edmac->edma->hw_interrupt(edmac)) {
case INTERRUPT_DONE:
- ep93xx_dma_get_active(edmac)->complete = true;
+ desc->complete = true;
tasklet_schedule(&edmac->tasklet);
break;
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
switch (data->port) {
case EP93XX_DMA_SSP:
case EP93XX_DMA_IDE:
- if (data->direction != DMA_TO_DEVICE &&
- data->direction != DMA_FROM_DEVICE)
+ if (data->direction != DMA_MEM_TO_DEV &&
+ data->direction != DMA_DEV_TO_MEM)
return -EINVAL;
break;
default:
@@ -952,7 +988,7 @@ fail:
*/
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction dir,
+ unsigned int sg_len, enum dma_transfer_direction dir,
unsigned long flags)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = sg_dma_address(sg);
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1032,7 +1068,7 @@ fail:
static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction dir)
+ enum dma_transfer_direction dir)
{
struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
struct ep93xx_dma_desc *desc, *first;
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
goto fail;
}
- if (dir == DMA_TO_DEVICE) {
+ if (dir == DMA_MEM_TO_DEV) {
desc->src_addr = dma_addr + offset;
desc->dst_addr = edmac->runtime_addr;
} else {
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
return -EINVAL;
switch (config->direction) {
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
width = config->src_addr_width;
addr = config->src_addr;
break;
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
width = config->dst_addr_width;
addr = config->dst_addr;
break;
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 8a78154..b98070c 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -772,7 +772,7 @@ fail:
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
/*
* This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
return -ENXIO;
/* we set the controller burst size depending on direction */
- if (config->direction == DMA_TO_DEVICE)
+ if (config->direction == DMA_MEM_TO_DEV)
size = config->dst_addr_width * config->dst_maxburst;
else
size = config->src_addr_width * config->src_maxburst;
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index 4be55f9..e4383ee 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
imx_dma_disable(imxdmac->imxdma_channel);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
imxdmac->per_address = dmaengine_cfg->src_addr;
imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg->length;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
imxdmac->sg_list[periods].page_link =
((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
dmamode = DMA_MODE_READ;
else
dmamode = DMA_MODE_WRITE;
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index f993955..8bc5acf 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -247,7 +247,7 @@ struct sdma_engine;
struct sdma_channel {
struct sdma_engine *sdma;
unsigned int channel;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
enum sdma_peripheral_type peripheral_type;
unsigned int event_id0;
unsigned int event_id1;
@@ -268,6 +268,8 @@ struct sdma_channel {
struct dma_async_tx_descriptor desc;
dma_cookie_t last_completed;
enum dma_status status;
+ unsigned int chn_count;
+ unsigned int chn_real_count;
};
#define IMX_DMA_SG_LOOP (1 << 0)
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd;
int i, error = 0;
+ sdmac->chn_real_count = 0;
/*
* non loop mode. Iterate over all descriptors, collect
* errors and call callback function
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
if (bd->mode.status & (BD_DONE | BD_RROR))
error = -EIO;
+ sdmac->chn_real_count += bd->mode.count;
}
if (error)
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
else
sdmac->status = DMA_SUCCESS;
+ sdmac->last_completed = sdmac->desc.cookie;
if (sdmac->desc.callback)
sdmac->desc.callback(sdmac->desc.callback_param);
- sdmac->last_completed = sdmac->desc.cookie;
}
static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
int ret;
- if (sdmac->direction == DMA_FROM_DEVICE) {
+ if (sdmac->direction == DMA_DEV_TO_MEM) {
load_address = sdmac->pc_from_device;
} else {
load_address = sdmac->pc_to_device;
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
{
+ unsigned long flags;
struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
struct sdma_engine *sdma = sdmac->sdma;
dma_cookie_t cookie;
- spin_lock_irq(&sdmac->lock);
+ spin_lock_irqsave(&sdmac->lock, flags);
cookie = sdma_assign_cookie(sdmac);
sdma_enable_channel(sdma, sdmac->channel);
- spin_unlock_irq(&sdmac->lock);
+ spin_unlock_irqrestore(&sdmac->lock, flags);
return cookie;
}
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
goto err_out;
}
+ sdmac->chn_count = 0;
for_each_sg(sgl, sg, sg_len, i) {
struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
int param;
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
}
bd->mode.count = count;
+ sdmac->chn_count += count;
if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
ret = -EINVAL;
@@ -1008,7 +1015,7 @@ err_out:
static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,15 +1100,18 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
sdma_disable_channel(sdmac);
return 0;
case DMA_SLAVE_CONFIG:
- if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+ if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
sdmac->per_address = dmaengine_cfg->src_addr;
- sdmac->watermark_level = dmaengine_cfg->src_maxburst;
+ sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+ dmaengine_cfg->src_addr_width;
sdmac->word_size = dmaengine_cfg->src_addr_width;
} else {
sdmac->per_address = dmaengine_cfg->dst_addr;
- sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+ sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
+ dmaengine_cfg->dst_addr_width;
sdmac->word_size = dmaengine_cfg->dst_addr_width;
}
+ sdmac->direction = dmaengine_cfg->direction;
return sdma_config_channel(sdmac);
default:
return -ENOSYS;
@@ -1119,7 +1129,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
last_used = chan->cookie;
- dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+ dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+ sdmac->chn_count - sdmac->chn_real_count);
return sdmac->status;
}
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index 19a0c64..74f70aa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
* callbacks but must be called with the lock held.
*/
static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
- struct intel_mid_dma_desc *desc)
+ struct intel_mid_dma_desc *desc)
+ __releases(&midc->lock) __acquires(&midc->lock)
{
struct dma_async_tx_descriptor *txd = &desc->txd;
dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
midc->dma->block_size);
/*Populate SAR and DAR values*/
sg_phy_addr = sg_phys(sg);
- if (desc->dirn == DMA_TO_DEVICE) {
+ if (desc->dirn == DMA_MEM_TO_DEV) {
lli_bloc_desc->sar = sg_phy_addr;
lli_bloc_desc->dar = mids->dma_slave.dst_addr;
- } else if (desc->dirn == DMA_FROM_DEVICE) {
+ } else if (desc->dirn == DMA_DEV_TO_MEM) {
lli_bloc_desc->sar = mids->dma_slave.src_addr;
lli_bloc_desc->dar = sg_phy_addr;
}
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
ret = dma_async_is_complete(cookie, last_complete, last_used);
if (ret != DMA_SUCCESS) {
+ spin_lock_bh(&midc->lock);
midc_scan_descriptors(to_middma_device(chan->device), midc);
+ spin_unlock_bh(&midc->lock);
last_complete = midc->completed;
last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
pci_pool_free(desc->lli_pool, desc->lli,
desc->lli_phys);
pci_pool_destroy(desc->lli_pool);
+ desc->lli = NULL;
}
list_move(&desc->desc_node, &midc->free_list);
}
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
if (midc->dma->pimr_mask) {
cfg_hi.cfgx.protctl = 0x0; /*default value*/
cfg_hi.cfgx.fifo_mode = 1;
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
cfg_hi.cfgx.src_per = 0;
if (mids->device_instance == 0)
cfg_hi.cfgx.dst_per = 3;
if (mids->device_instance == 1)
cfg_hi.cfgx.dst_per = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
if (mids->device_instance == 0)
cfg_hi.cfgx.src_per = 2;
if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 0;
} else {
- if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+ if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
ctl_lo.ctlx.sinc = 0;
ctl_lo.ctlx.dinc = 2;
ctl_lo.ctlx.tt_fc = 1;
- } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+ } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
ctl_lo.ctlx.sinc = 2;
ctl_lo.ctlx.dinc = 0;
ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ err_desc_get:
*/
static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
pm_runtime_get_sync(&mid->pdev->dev);
if (mid->state == SUSPENDED) {
- if (dma_resume(mid->pdev)) {
+ if (dma_resume(&mid->pdev->dev)) {
pr_err("ERR_MDMA: resume failed");
return -EFAULT;
}
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
LNW_PERIPHRAL_MASK_SIZE);
if (dma->mask_reg == NULL) {
pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
- return -ENOMEM;
+ err = -ENOMEM;
+ goto err_ioremap;
}
} else
dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
err_engine:
free_irq(pdev->irq, dma);
err_irq:
+ if (dma->mask_reg)
+ iounmap(dma->mask_reg);
+err_ioremap:
pci_pool_destroy(dma->dma_pool);
err_dma_pool:
pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
*
* This function is called by OS when a power event occurs
*/
-int dma_suspend(struct pci_dev *pci, pm_message_t state)
+static int dma_suspend(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int i;
struct middma_device *device = pci_get_drvdata(pci);
pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
*
* This function is called by OS when a power event occurs
*/
-int dma_resume(struct pci_dev *pci)
+int dma_resume(struct device *dev)
{
+ struct pci_dev *pci = to_pci_dev(dev);
int ret;
struct middma_device *device = pci_get_drvdata(pci);
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
.runtime_suspend = dma_runtime_suspend,
.runtime_resume = dma_runtime_resume,
.runtime_idle = dma_runtime_idle,
+ .suspend = dma_suspend,
+ .resume = dma_resume,
};
static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
.probe = intel_mid_dma_probe,
.remove = __devexit_p(intel_mid_dma_remove),
#ifdef CONFIG_PM
- .suspend = dma_suspend,
- .resume = dma_resume,
.driver = {
.pm = &intel_mid_dma_pm,
},
diff --git a/drivers/dma/intel_mid_dma_regs.h b/drivers/dma/intel_mid_dma_regs.h
index aea5ee8..c83d35b 100644
--- a/drivers/dma/intel_mid_dma_regs.h
+++ b/drivers/dma/intel_mid_dma_regs.h
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
unsigned int lli_length;
unsigned int current_lli;
dma_addr_t next;
- enum dma_data_direction dirn;
+ enum dma_transfer_direction dirn;
enum dma_status status;
enum dma_slave_buswidth width; /*width of DMA txn*/
enum intel_mid_dma_mode cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
}
-int dma_resume(struct pci_dev *pci);
+int dma_resume(struct device *dev);
#endif /*__INTEL_MID_DMAC_REGS_H__*/
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index e03f811..04be90b 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
spin_unlock_bh(&iop_chan->lock);
}
-MODULE_ALIAS("platform:iop-adma");
-
static struct platform_driver iop_adma_driver = {
.probe = iop_adma_probe,
.remove = __devexit_p(iop_adma_remove),
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
},
};
-static int __init iop_adma_init (void)
-{
- return platform_driver_register(&iop_adma_driver);
-}
-
-static void __exit iop_adma_exit (void)
-{
- platform_driver_unregister(&iop_adma_driver);
- return;
-}
-module_exit(iop_adma_exit);
-module_init(iop_adma_init);
+module_platform_driver(iop_adma_driver);
MODULE_AUTHOR("Intel Corporation");
MODULE_DESCRIPTION("IOP ADMA Engine Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iop-adma");
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 0e5ef33..6212b16 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
case IPU_PIX_FMT_RGB565:
params->ip.bpp = 2;
params->ip.pfs = 4;
- params->ip.npb = 7;
+ params->ip.npb = 15;
params->ip.sat = 2; /* SAT = 32-bit access */
params->ip.ofs0 = 0; /* Red bit offset */
params->ip.ofs1 = 5; /* Green bit offset */
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->pp.nsb = 1;
}
-static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
- uint16_t burst_pixels)
-{
- params->pp.npb = burst_pixels - 1;
-}
-
static void ipu_ch_param_set_buffer(union chan_param_mem *params,
dma_addr_t buf0, dma_addr_t buf1)
{
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
ipu_ch_param_set_rotation(&params, rot_mode);
- /* Some channels (rotation) have restriction on burst length */
- switch (channel) {
- case IDMAC_IC_7: /* Hangs with burst 8, 16, other values
- invalid - Table 44-30 */
-/*
- ipu_ch_param_set_burst_size(&params, 8);
- */
- break;
- case IDMAC_SDC_0:
- case IDMAC_SDC_1:
- /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
- ipu_ch_param_set_burst_size(&params, 16);
- break;
- case IDMAC_IC_0:
- default:
- break;
- }
spin_lock_irqsave(&ipu->lock, flags);
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
/* Allocate and initialise a transfer descriptor. */
static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long tx_flags)
+ enum dma_transfer_direction direction, unsigned long tx_flags)
{
struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac_tx_desc *desc = NULL;
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
chan->chan_id != IDMAC_IC_7)
return NULL;
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
return NULL;
}
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 8ba4edc..4d6d4cf 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
},
};
-static int __init mpc_dma_init(void)
-{
- return platform_driver_register(&mpc_dma_driver);
-}
-module_init(mpc_dma_init);
-
-static void __exit mpc_dma_exit(void)
-{
- platform_driver_unregister(&mpc_dma_driver);
-}
-module_exit(mpc_dma_exit);
+module_platform_driver(mpc_dma_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 9a353c2..e779b43 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -1250,7 +1250,7 @@ static int __devinit mv_xor_probe(struct platform_device *pdev)
static void
mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->xor_base;
u32 win_enable = 0;
@@ -1264,7 +1264,7 @@ mv_xor_conf_mbus_windows(struct mv_xor_shared_private *msp,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -1290,7 +1290,7 @@ static struct platform_driver mv_xor_driver = {
static int mv_xor_shared_probe(struct platform_device *pdev)
{
- struct mv_xor_platform_shared_data *msd = pdev->dev.platform_data;
+ const struct mbus_dram_target_info *dram;
struct mv_xor_shared_private *msp;
struct resource *res;
@@ -1323,8 +1323,9 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (msd != NULL && msd->dram != NULL)
- mv_xor_conf_mbus_windows(msp, msd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_xor_conf_mbus_windows(msp, dram);
return 0;
}
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index b4588bd..b06cd4c 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -44,7 +44,6 @@
#define HW_APBHX_CTRL0 0x000
#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
#define BM_APBH_CTRL0_APB_BURST_EN (1 << 28)
-#define BP_APBH_CTRL0_CLKGATE_CHANNEL 8
#define BP_APBH_CTRL0_RESET_CHANNEL 16
#define HW_APBHX_CTRL1 0x010
#define HW_APBHX_CTRL2 0x020
@@ -111,6 +110,7 @@ struct mxs_dma_chan {
int chan_irq;
struct mxs_dma_ccw *ccw;
dma_addr_t ccw_phys;
+ int desc_count;
dma_cookie_t last_completed;
enum dma_status status;
unsigned int flags;
@@ -130,23 +130,6 @@ struct mxs_dma_engine {
struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
};
-static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
-{
- struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
- int chan_id = mxs_chan->chan.chan_id;
- int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
-
- /* enable apbh channel clock */
- if (dma_is_apbh()) {
- if (apbh_is_old())
- writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- else
- writel(1 << chan_id,
- mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
- }
-}
-
static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
{
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_id = mxs_chan->chan.chan_id;
- /* clkgate needs to be enabled before writing other registers */
- mxs_dma_clkgate(mxs_chan, 1);
-
/* set cmd_addr up */
writel(mxs_chan->ccw_phys,
mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
{
- /* disable apbh channel clock */
- mxs_dma_clkgate(mxs_chan, 0);
-
mxs_chan->status = DMA_SUCCESS;
}
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
/*
* When both completion and error of termination bits set at the
* same time, we do not take it as an error. IOW, it only becomes
- * an error we need to handler here in case of ether it's (1) an bus
+ * an error we need to handle here in case of either it's (1) a bus
* error or (2) a termination error with no completion.
*/
stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -334,14 +311,11 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
goto err_irq;
}
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
goto err_clk;
- /* clkgate needs to be enabled for reset to finish */
- mxs_dma_clkgate(mxs_chan, 1);
mxs_dma_reset_chan(mxs_chan);
- mxs_dma_clkgate(mxs_chan, 0);
dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -372,12 +346,12 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE,
mxs_chan->ccw, mxs_chan->ccw_phys);
- clk_disable(mxs_dma->clk);
+ clk_disable_unprepare(mxs_dma->clk);
}
static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long append)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
struct scatterlist *sg;
int i, j;
u32 *pio;
- static int idx;
+ int idx = append ? mxs_chan->desc_count : 0;
if (mxs_chan->status == DMA_IN_PROGRESS && !append)
return NULL;
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
idx = 0;
}
- if (direction == DMA_NONE) {
+ if (direction == DMA_TRANS_NONE) {
ccw = &mxs_chan->ccw[idx++];
pio = (u32 *) sgl;
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
ccw->bits |= CCW_CHAIN;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
COMMAND);
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
}
}
}
+ mxs_chan->desc_count = idx;
return &mxs_chan->desc;
@@ -472,7 +447,7 @@ err_out:
static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
ccw->bits |= CCW_IRQ;
ccw->bits |= CCW_HALT_ON_TERM;
ccw->bits |= CCW_TERM_FLUSH;
- ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+ ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
dma_addr += period_len;
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
i++;
}
+ mxs_chan->desc_count = i;
return &mxs_chan->desc;
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
switch (cmd) {
case DMA_TERMINATE_ALL:
- mxs_dma_disable_chan(mxs_chan);
mxs_dma_reset_chan(mxs_chan);
+ mxs_dma_disable_chan(mxs_chan);
break;
case DMA_PAUSE:
mxs_dma_pause_chan(mxs_chan);
@@ -578,9 +554,9 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
{
int ret;
- ret = clk_enable(mxs_dma->clk);
+ ret = clk_prepare_enable(mxs_dma->clk);
if (ret)
- goto err_out;
+ return ret;
ret = mxs_reset_block(mxs_dma->base);
if (ret)
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
- clk_disable(mxs_dma->clk);
-
- return 0;
-
err_out:
+ clk_disable_unprepare(mxs_dma->clk);
return ret;
}
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index a6d0e3d..823f581 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -1,7 +1,7 @@
/*
* Topcliff PCH DMA controller driver
* Copyright (c) 2010 Intel Corporation
- * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
struct pch_dma_chan {
struct dma_chan chan;
void __iomem *membase;
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
struct tasklet_struct tasklet;
unsigned long err_status;
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * chan->chan_id));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
(DMA_CTL0_BITS_PER_CH * ch));
val &= mask_mode;
- if (pd_chan->dir == DMA_TO_DEVICE)
+ if (pd_chan->dir == DMA_MEM_TO_DEV)
val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
DMA_CTL0_DIR_SHIFT_BITS);
else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct pch_dma_chan *pd_chan = to_pd_chan(chan);
struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
return NULL;
}
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
reg = pd_slave->rx_reg;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
reg = pd_slave->tx_reg;
else
return NULL;
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E
#define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017
#define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B
+#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
+#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
{ 0, },
};
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
module_init(pch_dma_init);
module_exit(pch_dma_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
"DMA controller driver");
MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 57104147..b8ec03e 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -19,6 +19,7 @@
#include <linux/amba/pl330.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
+#include <linux/of.h>
#define NR_DEFAULT_DESC 16
@@ -116,6 +117,9 @@ struct dma_pl330_desc {
struct dma_pl330_chan *pchan;
};
+/* forward declaration */
+static struct amba_driver pl330_driver;
+
static inline struct dma_pl330_chan *
to_pchan(struct dma_chan *ch)
{
@@ -267,6 +271,32 @@ static void dma_pl330_rqcb(void *token, enum pl330_op_err err)
tasklet_schedule(&pch->task);
}
+bool pl330_filter(struct dma_chan *chan, void *param)
+{
+ u8 *peri_id;
+
+ if (chan->device->dev->driver != &pl330_driver.drv)
+ return false;
+
+#ifdef CONFIG_OF
+ if (chan->device->dev->of_node) {
+ const __be32 *prop_value;
+ phandle phandle;
+ struct device_node *node;
+
+ prop_value = ((struct property *)param)->value;
+ phandle = be32_to_cpup(prop_value++);
+ node = of_find_node_by_phandle(phandle);
+ return ((chan->private == node) &&
+ (chan->chan_id == be32_to_cpup(prop_value)));
+ }
+#endif
+
+ peri_id = chan->private;
+ return *peri_id == (unsigned)param;
+}
+EXPORT_SYMBOL(pl330_filter);
+
static int pl330_alloc_chan_resources(struct dma_chan *chan)
{
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -320,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
case DMA_SLAVE_CONFIG:
slave_config = (struct dma_slave_config *)arg;
- if (slave_config->direction == DMA_TO_DEVICE) {
+ if (slave_config->direction == DMA_MEM_TO_DEV) {
if (slave_config->dst_addr)
pch->fifo_addr = slave_config->dst_addr;
if (slave_config->dst_addr_width)
pch->burst_sz = __ffs(slave_config->dst_addr_width);
if (slave_config->dst_maxburst)
pch->burst_len = slave_config->dst_maxburst;
- } else if (slave_config->direction == DMA_FROM_DEVICE) {
+ } else if (slave_config->direction == DMA_DEV_TO_MEM) {
if (slave_config->src_addr)
pch->fifo_addr = slave_config->src_addr;
if (slave_config->src_addr_width)
@@ -497,7 +527,7 @@ pluck_desc(struct dma_pl330_dmac *pdmac)
static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
{
struct dma_pl330_dmac *pdmac = pch->dmac;
- struct dma_pl330_peri *peri = pch->chan.private;
+ u8 *peri_id = pch->chan.private;
struct dma_pl330_desc *desc;
/* Pluck one desc from the pool of DMAC */
@@ -522,13 +552,7 @@ static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
desc->txd.cookie = 0;
async_tx_ack(&desc->txd);
- if (peri) {
- desc->req.rqtype = peri->rqtype;
- desc->req.peri = pch->chan.chan_id;
- } else {
- desc->req.rqtype = MEMTOMEM;
- desc->req.peri = 0;
- }
+ desc->req.peri = peri_id ? pch->chan.chan_id : 0;
dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
@@ -597,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
- size_t period_len, enum dma_data_direction direction)
+ size_t period_len, enum dma_transfer_direction direction)
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
@@ -612,15 +636,17 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
}
switch (direction) {
- case DMA_TO_DEVICE:
+ case DMA_MEM_TO_DEV:
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
src = dma_addr;
dst = pch->fifo_addr;
break;
- case DMA_FROM_DEVICE:
+ case DMA_DEV_TO_MEM:
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
src = pch->fifo_addr;
dst = dma_addr;
break;
@@ -646,16 +672,12 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
{
struct dma_pl330_desc *desc;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct pl330_info *pi;
int burst;
if (unlikely(!pch || !len))
return NULL;
- if (peri && peri->rqtype != MEMTOMEM)
- return NULL;
-
pi = &pch->dmac->pif;
desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
@@ -664,6 +686,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = MEMTOMEM;
/* Select max possible burst size */
burst = pi->pcfg.data_bus_width / 8;
@@ -687,30 +710,19 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
static struct dma_async_tx_descriptor *
pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flg)
{
struct dma_pl330_desc *first, *desc = NULL;
struct dma_pl330_chan *pch = to_pchan(chan);
- struct dma_pl330_peri *peri = chan->private;
struct scatterlist *sg;
unsigned long flags;
int i;
dma_addr_t addr;
- if (unlikely(!pch || !sgl || !sg_len || !peri))
+ if (unlikely(!pch || !sgl || !sg_len))
return NULL;
- /* Make sure the direction is consistent */
- if ((direction == DMA_TO_DEVICE &&
- peri->rqtype != MEMTODEV) ||
- (direction == DMA_FROM_DEVICE &&
- peri->rqtype != DEVTOMEM)) {
- dev_err(pch->dmac->pif.dev, "%s:%d Invalid Direction\n",
- __func__, __LINE__);
- return NULL;
- }
-
addr = pch->fifo_addr;
first = NULL;
@@ -747,14 +759,16 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
else
list_add_tail(&desc->node, &first->node);
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->rqcfg.src_inc = 1;
desc->rqcfg.dst_inc = 0;
+ desc->req.rqtype = MEMTODEV;
fill_px(&desc->px,
addr, sg_dma_address(sg), sg_dma_len(sg));
} else {
desc->rqcfg.src_inc = 0;
desc->rqcfg.dst_inc = 1;
+ desc->req.rqtype = DEVTOMEM;
fill_px(&desc->px,
sg_dma_address(sg), addr, sg_dma_len(sg));
}
@@ -820,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pdmac);
-#ifdef CONFIG_PM_RUNTIME
- /* to use the runtime PM helper functions */
- pm_runtime_enable(&adev->dev);
-
- /* enable the power domain */
- if (pm_runtime_get_sync(&adev->dev)) {
- dev_err(&adev->dev, "failed to get runtime pm\n");
- ret = -ENODEV;
- goto probe_err1;
- }
-#else
+#ifndef CONFIG_PM_RUNTIME
/* enable dma clk */
clk_enable(pdmac->clk);
#endif
@@ -856,32 +860,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
INIT_LIST_HEAD(&pd->channels);
/* Initialize channel parameters */
- num_chan = max(pdat ? pdat->nr_valid_peri : 0, (u8)pi->pcfg.num_chan);
+ num_chan = max(pdat ? pdat->nr_valid_peri : (u8)pi->pcfg.num_peri,
+ (u8)pi->pcfg.num_chan);
pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL);
for (i = 0; i < num_chan; i++) {
pch = &pdmac->peripherals[i];
- if (pdat) {
- struct dma_pl330_peri *peri = &pdat->peri[i];
-
- switch (peri->rqtype) {
- case MEMTOMEM:
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- break;
- case MEMTODEV:
- case DEVTOMEM:
- dma_cap_set(DMA_SLAVE, pd->cap_mask);
- dma_cap_set(DMA_CYCLIC, pd->cap_mask);
- break;
- default:
- dev_err(&adev->dev, "DEVTODEV Not Supported\n");
- continue;
- }
- pch->chan.private = peri;
- } else {
- dma_cap_set(DMA_MEMCPY, pd->cap_mask);
- pch->chan.private = NULL;
- }
+ if (!adev->dev.of_node)
+ pch->chan.private = pdat ? &pdat->peri_id[i] : NULL;
+ else
+ pch->chan.private = adev->dev.of_node;
INIT_LIST_HEAD(&pch->work_list);
spin_lock_init(&pch->lock);
@@ -894,6 +882,15 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
}
pd->dev = &adev->dev;
+ if (pdat) {
+ pd->cap_mask = pdat->cap_mask;
+ } else {
+ dma_cap_set(DMA_MEMCPY, pd->cap_mask);
+ if (pi->pcfg.num_peri) {
+ dma_cap_set(DMA_SLAVE, pd->cap_mask);
+ dma_cap_set(DMA_CYCLIC, pd->cap_mask);
+ }
+ }
pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
pd->device_free_chan_resources = pl330_free_chan_resources;
@@ -970,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
res = &adev->res;
release_mem_region(res->start, resource_size(res));
-#ifdef CONFIG_PM_RUNTIME
- pm_runtime_put(&adev->dev);
- pm_runtime_disable(&adev->dev);
-#else
+#ifndef CONFIG_PM_RUNTIME
clk_disable(pdmac->clk);
#endif
@@ -990,6 +984,8 @@ static struct amba_id pl330_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl330_ids);
+
#ifdef CONFIG_PM_RUNTIME
static int pl330_runtime_suspend(struct device *dev)
{
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 81809c2..812fd76 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -23,7 +23,6 @@
#include <linux/interrupt.h>
#include <linux/dmaengine.h>
#include <linux/delay.h>
-#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+ struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+ __raw_writel(data, shdev->chan_reg +
+ shdev->pdata->channel[sh_dc->id].chclr_offset);
+}
static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
{
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
+ if (shdev->pdata->chclr_present) {
+ int i;
+ for (i = 0; i < shdev->pdata->channel_num; i++) {
+ struct sh_dmae_chan *sh_chan = shdev->chan[i];
+ if (sh_chan)
+ chclr_write(sh_chan, 0);
+ }
+ }
+
dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
return -EIO;
}
+ if (shdev->pdata->dmaor_init & ~dmaor)
+ dev_warn(shdev->common.dev,
+ "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+ dmaor, shdev->pdata->dmaor_init);
return 0;
}
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
return 0;
}
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
{
struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
sh_chan_xfer_ld_queue(sh_chan);
sh_chan->pm_state = DMAE_PM_ESTABLISHED;
}
+ } else {
+ sh_chan->pm_state = DMAE_PM_PENDING;
}
spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
* @sh_chan: DMA channel
* @flags: DMA transfer flags
* @dest: destination DMA address, incremented when direction equals
- * DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_DEV_TO_MEM
* @src: source DMA address, incremented when direction equals
- * DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ * DMA_MEM_TO_DEV
* @len: DMA transfer length
* @first: if NULL, set to the current descriptor and cookie set to -EBUSY
* @direction: needed for slave DMA to decide which address to keep constant,
- * equals DMA_BIDIRECTIONAL for MEMCPY
+ * equals DMA_MEM_TO_MEM for MEMCPY
* Returns 0 or an error
* Locks: called with desc_lock held
*/
static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
- struct sh_desc **first, enum dma_data_direction direction)
+ struct sh_desc **first, enum dma_transfer_direction direction)
{
struct sh_desc *new;
size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
new->direction = direction;
*len -= copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
*src += copy_size;
- if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+ if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
*dest += copy_size;
return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
* converted to scatter-gather to guarantee consistent locking and a correct
* list manipulation. For slave DMA direction carries the usual meaning, and,
* logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
* and the SG list contains only one element and points at the source buffer.
*/
static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct scatterlist *sg;
struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
i, sg, len, (unsigned long long)sg_addr);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
new = sh_dmae_add_desc(sh_chan, flags,
&sg_addr, addr, &len, &first,
direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
sg_dma_address(&sg) = dma_src;
sg_dma_len(&sg) = len;
- return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+ return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
flags);
}
static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct sh_dmae_slave *param;
struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
spin_lock_irq(&sh_chan->desc_lock);
list_for_each_entry(desc, &sh_chan->ld_queue, node) {
if (desc->mark == DESC_SUBMITTED &&
- ((desc->direction == DMA_FROM_DEVICE &&
+ ((desc->direction == DMA_DEV_TO_MEM &&
(desc->hw.dar + desc->hw.tcr) == dar_buf) ||
(desc->hw.sar + desc->hw.tcr) == sar_buf)) {
dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, shdev);
+ shdev->common.dev = &pdev->dev;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@@ -1239,7 +1262,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&shdev->common.channels);
- dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
+ if (!pdata->slave_only)
+ dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
if (pdata->slave && pdata->slave_num)
dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
@@ -1254,7 +1278,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
shdev->common.device_control = sh_dmae_control;
- shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */
shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
@@ -1435,22 +1458,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
#ifdef CONFIG_PM
static int sh_dmae_suspend(struct device *dev)
{
- struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
-
- for (i = 0; i < shdev->pdata->channel_num; i++) {
- struct sh_dmae_chan *sh_chan = shdev->chan[i];
- if (sh_chan->descs_allocated)
- sh_chan->pm_error = pm_runtime_put_sync(dev);
- }
-
return 0;
}
static int sh_dmae_resume(struct device *dev)
{
struct sh_dmae_device *shdev = dev_get_drvdata(dev);
- int i;
+ int i, ret;
+
+ ret = sh_dmae_rst(shdev);
+ if (ret < 0)
+ dev_err(dev, "Failed to reset!\n");
for (i = 0; i < shdev->pdata->channel_num; i++) {
struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1477,6 @@ static int sh_dmae_resume(struct device *dev)
if (!sh_chan->descs_allocated)
continue;
- if (!sh_chan->pm_error)
- pm_runtime_get_sync(dev);
-
if (param) {
const struct sh_dmae_slave_config *cfg = param->config;
dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644
index 0000000..2333810
--- /dev/null
+++ b/drivers/dma/sirf-dma.c
@@ -0,0 +1,707 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS 16
+#define SIRFSOC_DMA_CHANNELS 16
+
+#define SIRFSOC_DMA_CH_ADDR 0x00
+#define SIRFSOC_DMA_CH_XLEN 0x04
+#define SIRFSOC_DMA_CH_YLEN 0x08
+#define SIRFSOC_DMA_CH_CTRL 0x0C
+
+#define SIRFSOC_DMA_WIDTH_0 0x100
+#define SIRFSOC_DMA_CH_VALID 0x140
+#define SIRFSOC_DMA_CH_INT 0x144
+#define SIRFSOC_DMA_INT_EN 0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL 0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT 4
+#define SIRFSOC_DMA_DIR_CTRL_BIT 5
+
+/* xlen and dma_width register is in 4 bytes boundary */
+#define SIRFSOC_DMA_WORD_LEN 4
+
+struct sirfsoc_dma_desc {
+ struct dma_async_tx_descriptor desc;
+ struct list_head node;
+
+ /* SiRFprimaII 2D-DMA parameters */
+
+ int xlen; /* DMA xlen */
+ int ylen; /* DMA ylen */
+ int width; /* DMA width */
+ int dir;
+ bool cyclic; /* is loop DMA? */
+ u32 addr; /* DMA buffer address */
+};
+
+struct sirfsoc_dma_chan {
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head completed;
+ dma_cookie_t completed_cookie;
+ unsigned long happened_cyclic;
+ unsigned long completed_cyclic;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+
+ int mode;
+};
+
+struct sirfsoc_dma {
+ struct dma_device dma;
+ struct tasklet_struct tasklet;
+ struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS];
+ void __iomem *base;
+ int irq;
+};
+
+#define DRV_NAME "sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline
+struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+ return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/* Execute all queued DMA descriptors */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+
+ /*
+ * lock has been held by functions calling this, so we don't hold
+ * lock again
+ */
+
+ sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
+ node);
+ /* Move the first queued descriptor to active list */
+ list_move_tail(&schan->queued, &schan->active);
+
+ /* Start the DMA transfer */
+ writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
+ cid * 4);
+ writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+ (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
+ sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+ writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_XLEN);
+ writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
+ SIRFSOC_DMA_CH_YLEN);
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
+ (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+
+ /*
+ * writel has an implict memory write barrier to make sure data is
+ * flushed into memory before starting DMA
+ */
+ writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+
+ if (sdesc->cyclic) {
+ writel((1 << cid) | 1 << (cid + 16) |
+ readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+ schan->happened_cyclic = schan->completed_cyclic = 0;
+ }
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+ struct sirfsoc_dma *sdma = data;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ u32 is;
+ int ch;
+
+ is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
+ while ((ch = fls(is) - 1) >= 0) {
+ is &= ~(1 << ch);
+ writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
+ schan = &sdma->channels[ch];
+
+ spin_lock(&schan->lock);
+
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+ if (!sdesc->cyclic) {
+ /* Execute queued descriptors */
+ list_splice_tail_init(&schan->active, &schan->completed);
+ if (!list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+ } else
+ schan->happened_cyclic++;
+
+ spin_unlock(&schan->lock);
+ }
+
+ /* Schedule tasklet */
+ tasklet_schedule(&sdma->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+ dma_cookie_t last_cookie = 0;
+ struct sirfsoc_dma_chan *schan;
+ struct sirfsoc_dma_desc *sdesc;
+ struct dma_async_tx_descriptor *desc;
+ unsigned long flags;
+ unsigned long happened_cyclic;
+ LIST_HEAD(list);
+ int i;
+
+ for (i = 0; i < sdma->dma.chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ if (!list_empty(&schan->completed)) {
+ list_splice_tail_init(&schan->completed, &list);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(sdesc, &list, node) {
+ desc = &sdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&list, &schan->free);
+ schan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+ } else {
+ /* for cyclic channel, desc is always in active list */
+ sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+ node);
+
+ if (!sdesc || (sdesc && !sdesc->cyclic)) {
+ /* without active cyclic DMA */
+ spin_unlock_irqrestore(&schan->lock, flags);
+ continue;
+ }
+
+ /* cyclic DMA */
+ happened_cyclic = schan->happened_cyclic;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ desc = &sdesc->desc;
+ while (happened_cyclic != schan->completed_cyclic) {
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+ schan->completed_cyclic++;
+ }
+ }
+ }
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+ struct sirfsoc_dma *sdma = (void *)data;
+
+ sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Move descriptor to queue */
+ list_move_tail(&sdesc->node, &schan->queued);
+
+ /* Update cookie */
+ cookie = schan->chan.cookie + 1;
+ if (cookie <= 0)
+ cookie = 1;
+
+ schan->chan.cookie = cookie;
+ sdesc->desc.cookie = cookie;
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+ struct dma_slave_config *config)
+{
+ unsigned long flags;
+
+ if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+ (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+ return -EINVAL;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ schan->mode = (config->src_maxburst == 4 ? 1 : 0);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+ int cid = schan->chan.chan_id;
+ unsigned long flags;
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+ ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+ writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+
+ writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+ & ~((1 << cid) | 1 << (cid + 16)),
+ sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
+ spin_lock_irqsave(&schan->lock, flags);
+ list_splice_tail_init(&schan->active, &schan->free);
+ list_splice_tail_init(&schan->queued, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+ unsigned long arg)
+{
+ struct dma_slave_config *config;
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+ switch (cmd) {
+ case DMA_TERMINATE_ALL:
+ return sirfsoc_dma_terminate_all(schan);
+ case DMA_SLAVE_CONFIG:
+ config = (struct dma_slave_config *)arg;
+ return sirfsoc_dma_slave_config(schan, config);
+
+ default:
+ break;
+ }
+
+ return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc;
+ unsigned long flags;
+ LIST_HEAD(descs);
+ int i;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+ sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
+ if (!sdesc) {
+ dev_notice(sdma->dma.dev, "Memory allocation error. "
+ "Allocated only %u descriptors\n", i);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&sdesc->desc, chan);
+ sdesc->desc.flags = DMA_CTRL_ACK;
+ sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+ list_add_tail(&sdesc->node, &descs);
+ }
+
+ /* Return error only if no descriptors were allocated */
+ if (i == 0)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ list_splice_tail_init(&descs, &schan->free);
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ return i;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc, *tmp;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ /* Channel must be idle */
+ BUG_ON(!list_empty(&schan->prepared));
+ BUG_ON(!list_empty(&schan->queued));
+ BUG_ON(!list_empty(&schan->active));
+ BUG_ON(!list_empty(&schan->completed));
+
+ /* Move data */
+ list_splice_tail_init(&schan->free, &descs);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(sdesc, tmp, &descs, node)
+ kfree(sdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+
+ spin_lock_irqsave(&schan->lock, flags);
+
+ if (list_empty(&schan->active) && !list_empty(&schan->queued))
+ sirfsoc_dma_execute(schan);
+
+ spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+ struct dma_tx_state *txstate)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ unsigned long flags;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ spin_lock_irqsave(&schan->lock, flags);
+ last_used = schan->chan.cookie;
+ last_complete = schan->completed_cookie;
+ spin_unlock_irqrestore(&schan->lock, flags);
+
+ dma_set_tx_state(txstate, last_complete, last_used, 0);
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
+ struct dma_chan *chan, struct dma_interleaved_template *xt,
+ unsigned long flags)
+{
+ struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+ int ret;
+
+ if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
+ ret = -EINVAL;
+ goto err_dir;
+ }
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc) {
+ /* try to free completed descriptors */
+ sirfsoc_dma_process_completed(sdma);
+ ret = 0;
+ goto no_desc;
+ }
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+
+ /*
+ * Number of chunks in a frame can only be 1 for prima2
+ * and ylen (number of frame - 1) must be at least 0
+ */
+ if ((xt->frame_size == 1) && (xt->numf > 0)) {
+ sdesc->cyclic = 0;
+ sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
+ sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
+ SIRFSOC_DMA_WORD_LEN;
+ sdesc->ylen = xt->numf - 1;
+ if (xt->dir == DMA_MEM_TO_DEV) {
+ sdesc->addr = xt->src_start;
+ sdesc->dir = 1;
+ } else {
+ sdesc->addr = xt->dst_start;
+ sdesc->dir = 0;
+ }
+
+ list_add_tail(&sdesc->node, &schan->prepared);
+ } else {
+ pr_err("sirfsoc DMA Invalid xfer\n");
+ ret = -EINVAL;
+ goto err_xfer;
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+err_xfer:
+ spin_unlock_irqrestore(&schan->lock, iflags);
+no_desc:
+err_dir:
+ return ERR_PTR(ret);
+}
+
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
+ size_t buf_len, size_t period_len,
+ enum dma_transfer_direction direction)
+{
+ struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+ struct sirfsoc_dma_desc *sdesc = NULL;
+ unsigned long iflags;
+
+ /*
+ * we only support cycle transfer with 2 period
+ * If the X-length is set to 0, it would be the loop mode.
+ * The DMA address keeps increasing until reaching the end of a loop
+ * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
+ * the DMA address goes back to the beginning of this area.
+ * In loop mode, the DMA data region is divided into two parts, BUFA
+ * and BUFB. DMA controller generates interrupts twice in each loop:
+ * when the DMA address reaches the end of BUFA or the end of the
+ * BUFB
+ */
+ if (buf_len != 2 * period_len)
+ return ERR_PTR(-EINVAL);
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&schan->lock, iflags);
+ if (!list_empty(&schan->free)) {
+ sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+ node);
+ list_del(&sdesc->node);
+ }
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ if (!sdesc)
+ return 0;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&schan->lock, iflags);
+ sdesc->addr = addr;
+ sdesc->cyclic = 1;
+ sdesc->xlen = 0;
+ sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
+ sdesc->width = 1;
+ list_add_tail(&sdesc->node, &schan->prepared);
+ spin_unlock_irqrestore(&schan->lock, iflags);
+
+ return &sdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+ unsigned int ch_nr = (unsigned int) chan_id;
+
+ if (ch_nr == chan->chan_id +
+ chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
+ return true;
+
+ return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+ struct device_node *dn = op->dev.of_node;
+ struct device *dev = &op->dev;
+ struct dma_device *dma;
+ struct sirfsoc_dma *sdma;
+ struct sirfsoc_dma_chan *schan;
+ struct resource res;
+ ulong regs_start, regs_size;
+ u32 id;
+ int ret, i;
+
+ sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
+ if (!sdma) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ if (of_property_read_u32(dn, "cell-index", &id)) {
+ dev_err(dev, "Fail to get DMAC index\n");
+ ret = -ENODEV;
+ goto free_mem;
+ }
+
+ sdma->irq = irq_of_parse_and_map(dn, 0);
+ if (sdma->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ ret = -EINVAL;
+ goto free_mem;
+ }
+
+ ret = of_address_to_resource(dn, 0, &res);
+ if (ret) {
+ dev_err(dev, "Error parsing memory region!\n");
+ goto free_mem;
+ }
+
+ regs_start = res.start;
+ regs_size = resource_size(&res);
+
+ sdma->base = devm_ioremap(dev, regs_start, regs_size);
+ if (!sdma->base) {
+ dev_err(dev, "Error mapping memory region!\n");
+ ret = -ENOMEM;
+ goto irq_dispose;
+ }
+
+ ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+ sdma);
+ if (ret) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ ret = -EINVAL;
+ goto unmap_mem;
+ }
+
+ dma = &sdma->dma;
+ dma->dev = dev;
+ dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+ dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+ dma->device_issue_pending = sirfsoc_dma_issue_pending;
+ dma->device_control = sirfsoc_dma_control;
+ dma->device_tx_status = sirfsoc_dma_tx_status;
+ dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
+ dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_SLAVE, dma->cap_mask);
+ dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+ dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
+ dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ schan = &sdma->channels[i];
+
+ schan->chan.device = dma;
+ schan->chan.cookie = 1;
+ schan->completed_cookie = schan->chan.cookie;
+
+ INIT_LIST_HEAD(&schan->free);
+ INIT_LIST_HEAD(&schan->prepared);
+ INIT_LIST_HEAD(&schan->queued);
+ INIT_LIST_HEAD(&schan->active);
+ INIT_LIST_HEAD(&schan->completed);
+
+ spin_lock_init(&schan->lock);
+ list_add_tail(&schan->chan.device_node, &dma->channels);
+ }
+
+ tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+ /* Register DMA engine */
+ dev_set_drvdata(dev, sdma);
+ ret = dma_async_device_register(dma);
+ if (ret)
+ goto free_irq;
+
+ dev_info(dev, "initialized SIRFSOC DMAC driver\n");
+
+ return 0;
+
+free_irq:
+ devm_free_irq(dev, sdma->irq, sdma);
+irq_dispose:
+ irq_dispose_mapping(sdma->irq);
+unmap_mem:
+ iounmap(sdma->base);
+free_mem:
+ devm_kfree(dev, sdma);
+ return ret;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+ struct device *dev = &op->dev;
+ struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&sdma->dma);
+ devm_free_irq(dev, sdma->irq, sdma);
+ irq_dispose_mapping(sdma->irq);
+ iounmap(sdma->base);
+ devm_kfree(dev, sdma);
+ return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+ { .compatible = "sirf,prima2-dmac", },
+ {},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+ .probe = sirfsoc_dma_probe,
+ .remove = __devexit_p(sirfsoc_dma_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sirfsoc_dma_match,
+ },
+};
+
+module_platform_driver(sirfsoc_dma_driver);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+ "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 13259ca..cc5ecbc 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -14,6 +14,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/err.h>
#include <linux/amba/bus.h>
@@ -32,6 +34,9 @@
/* Maximum iterations taken before giving up suspending a channel */
#define D40_SUSPEND_MAX_IT 500
+/* Milliseconds */
+#define DMA40_AUTOSUSPEND_DELAY 100
+
/* Hardware requirement on LCLA alignment */
#define LCLA_ALIGNMENT 0x40000
@@ -62,6 +67,55 @@ enum d40_command {
D40_DMA_SUSPENDED = 3
};
+/*
+ * These are the registers that has to be saved and later restored
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+static u32 d40_backup_regs[] = {
+ D40_DREG_LCPA,
+ D40_DREG_LCLA,
+ D40_DREG_PRMSE,
+ D40_DREG_PRMSO,
+ D40_DREG_PRMOE,
+ D40_DREG_PRMOO,
+};
+
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
+
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
+static u32 d40_backup_regs_v3[] = {
+ D40_DREG_PSEG1,
+ D40_DREG_PSEG2,
+ D40_DREG_PSEG3,
+ D40_DREG_PSEG4,
+ D40_DREG_PCEG1,
+ D40_DREG_PCEG2,
+ D40_DREG_PCEG3,
+ D40_DREG_PCEG4,
+ D40_DREG_RSEG1,
+ D40_DREG_RSEG2,
+ D40_DREG_RSEG3,
+ D40_DREG_RSEG4,
+ D40_DREG_RCEG1,
+ D40_DREG_RCEG2,
+ D40_DREG_RCEG3,
+ D40_DREG_RCEG4,
+};
+
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+
+static u32 d40_backup_regs_chan[] = {
+ D40_CHAN_REG_SSCFG,
+ D40_CHAN_REG_SSELT,
+ D40_CHAN_REG_SSPTR,
+ D40_CHAN_REG_SSLNK,
+ D40_CHAN_REG_SDCFG,
+ D40_CHAN_REG_SDELT,
+ D40_CHAN_REG_SDPTR,
+ D40_CHAN_REG_SDLNK,
+};
+
/**
* struct d40_lli_pool - Structure for keeping LLIs in memory
*
@@ -96,7 +150,7 @@ struct d40_lli_pool {
* during a transfer.
* @node: List entry.
* @is_in_client_list: true if the client owns this descriptor.
- * the previous one.
+ * @cyclic: true if this is a cyclic job
*
* This descriptor is used for both logical and physical transfers.
*/
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
* channels.
*
* @lock: A lock protection this entity.
+ * @reserved: True if used by secure world or otherwise.
* @num: The physical channel number of this entity.
* @allocated_src: Bit mapped to show which src event line's are mapped to
* this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
*/
struct d40_phy_res {
spinlock_t lock;
+ bool reserved;
int num;
u32 allocated_src;
u32 allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
* @src_def_cfg: Default cfg register setting for src.
* @dst_def_cfg: Default cfg register setting for dst.
* @log_def: Default logical channel settings.
- * @lcla: Space for one dst src pair for logical channel transfers.
* @lcpa: Pointer to dst and src lcpa settings.
* @runtime_addr: runtime configured address.
* @runtime_direction: runtime configured direction.
@@ -217,7 +272,7 @@ struct d40_chan {
struct d40_log_lli_full *lcpa;
/* Runtime reconfiguration */
dma_addr_t runtime_addr;
- enum dma_data_direction runtime_direction;
+ enum dma_transfer_direction runtime_direction;
};
/**
@@ -241,6 +296,7 @@ struct d40_chan {
* @dma_both: dma_device channels that can do both memcpy and slave transfers.
* @dma_slave: dma_device channels that can do only do slave transfers.
* @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
* @log_chans: Room for all possible logical channels in system.
* @lookup_log_chans: Used to map interrupt number to logical channel. Points
* to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
* to phy_chans entries.
* @plat_data: Pointer to provided platform_data which is the driver
* configuration.
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
* @phy_res: Vector containing all physical channels.
* @lcla_pool: lcla pool settings and data.
* @lcpa_base: The virtual mapped address of LCPA.
* @phy_lcpa: The physical address of the LCPA.
* @lcpa_size: The size of the LCPA area.
* @desc_slab: cache for descriptors.
+ * @reg_val_backup: Here the values of some hardware registers are stored
+ * before the DMA is powered off. They are restored when the power is back on.
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
+ * later.
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
+ * @initialized: true if the dma has been initialized
*/
struct d40_base {
spinlock_t interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
struct d40_chan **lookup_log_chans;
struct d40_chan **lookup_phy_chans;
struct stedma40_platform_data *plat_data;
+ struct regulator *lcpa_regulator;
/* Physical half channels */
struct d40_phy_res *phy_res;
struct d40_lcla_pool lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
dma_addr_t phy_lcpa;
resource_size_t lcpa_size;
struct kmem_cache *desc_slab;
+ u32 reg_val_backup[BACKUP_REGS_SZ];
+ u32 reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+ u32 *reg_val_backup_chan;
+ u16 gcc_pwr_off_mask;
+ bool initialized;
};
/**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
struct d40_desc *d;
struct d40_desc *_d;
- list_for_each_entry_safe(d, _d, &d40c->client, node)
+ list_for_each_entry_safe(d, _d, &d40c->client, node) {
if (async_tx_test_ack(&d->txd)) {
d40_desc_remove(d);
desc = d;
memset(desc, 0, sizeof(*desc));
break;
}
+ }
}
if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
bool cyclic = desc->cyclic;
int curr_lcla = -EINVAL;
int first_lcla = 0;
+ bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
bool linkback;
/*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
&lli->src[lli_current],
next_lcla, flags);
- dma_sync_single_range_for_device(chan->base->dev,
- pool->dma_addr, lcla_offset,
- 2 * sizeof(struct d40_log_lli),
- DMA_TO_DEVICE);
-
+ /*
+ * Cache maintenance is not needed if lcla is
+ * mapped in esram
+ */
+ if (!use_esram_lcla) {
+ dma_sync_single_range_for_device(chan->base->dev,
+ pool->dma_addr, lcla_offset,
+ 2 * sizeof(struct d40_log_lli),
+ DMA_TO_DEVICE);
+ }
curr_lcla = next_lcla;
if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
return len;
}
-/* Support functions for logical channels */
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+ u32 *regaddr, int num, bool save)
+{
+ int i;
+
+ for (i = 0; i < num; i++) {
+ void __iomem *addr = baseaddr + regaddr[i];
+
+ if (save)
+ backup[i] = readl_relaxed(addr);
+ else
+ writel_relaxed(backup[i], addr);
+ }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+ int i;
+
+ /* Save/Restore channel specific registers */
+ for (i = 0; i < base->num_phy_chans; i++) {
+ void __iomem *addr;
+ int idx;
+
+ if (base->phy_res[i].reserved)
+ continue;
+
+ addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+ idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+ dma40_backup(addr, &base->reg_val_backup_chan[idx],
+ d40_backup_regs_chan,
+ ARRAY_SIZE(d40_backup_regs_chan),
+ save);
+ }
+
+ /* Save/Restore global registers */
+ dma40_backup(base->virtbase, base->reg_val_backup,
+ d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+ save);
+
+ /* Save/Restore registers only existing on dma40 v3 and later */
+ if (base->rev >= 3)
+ dma40_backup(base->virtbase, base->reg_val_backup_v3,
+ d40_backup_regs_v3,
+ ARRAY_SIZE(d40_backup_regs_v3),
+ save);
+}
+#else
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+}
+#endif
static int d40_channel_execute_command(struct d40_chan *d40c,
enum d40_command command)
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
/* Set LIDX for lcla */
writel(lidx, chanbase + D40_CHAN_REG_SSELT);
writel(lidx, chanbase + D40_CHAN_REG_SDELT);
+
+ /* Clear LNK which will be used by d40_chan_has_events() */
+ writel(0, chanbase + D40_CHAN_REG_SSLNK);
+ writel(0, chanbase + D40_CHAN_REG_SDLNK);
}
}
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
if (!d40c->busy)
return 0;
+ pm_runtime_get_sync(d40c->base->dev);
spin_lock_irqsave(&d40c->lock, flags);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
D40_DMA_RUN);
}
}
-
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
return 0;
spin_lock_irqsave(&d40c->lock, flags);
-
+ pm_runtime_get_sync(d40c->base->dev);
if (d40c->base->rev == 0)
if (chan_is_logical(d40c)) {
res = d40_channel_execute_command(d40c,
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
}
no_suspend:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return res;
}
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
d40d = d40_first_queued(d40c);
if (d40d != NULL) {
- d40c->busy = true;
+ if (!d40c->busy)
+ d40c->busy = true;
+
+ pm_runtime_get_sync(d40c->base->dev);
/* Remove from queue */
d40_desc_remove(d40d);
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
if (d40_queue_start(d40c) == NULL)
d40c->busy = false;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
}
d40c->pending_tx++;
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
return res;
}
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
- int log_event_line, bool is_log)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
+ bool is_src, int log_event_line, bool is_log,
+ bool *first_user)
{
unsigned long flags;
spin_lock_irqsave(&phy->lock, flags);
+
+ *first_user = ((phy->allocated_src | phy->allocated_dst)
+ == D40_ALLOC_FREE);
+
if (!is_log) {
/* Physical interrupts are masked per physical full channel */
if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1639,7 @@ out:
return is_free;
}
-static int d40_allocate_channel(struct d40_chan *d40c)
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
{
int dev_type;
int event_group;
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
for (i = 0; i < d40c->base->num_phy_chans; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- 0, is_log))
+ 0, is_log,
+ first_phy_user))
goto found_phy;
}
} else
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
if (d40_alloc_mask_set(&phys[i],
is_src,
0,
- is_log))
+ is_log,
+ first_phy_user))
goto found_phy;
}
}
@@ -1552,6 +1703,25 @@ found_phy:
/* Find logical channel */
for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
int phy_num = j + event_group * 2;
+
+ if (d40c->dma_cfg.use_fixed_channel) {
+ i = d40c->dma_cfg.phy_channel;
+
+ if ((i != phy_num) && (i != phy_num + 1)) {
+ dev_err(chan2dev(d40c),
+ "invalid fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
+ if (d40_alloc_mask_set(&phys[i], is_src, event_line,
+ is_log, first_phy_user))
+ goto found_log;
+
+ dev_err(chan2dev(d40c),
+ "could not allocate fixed phy channel %d\n", i);
+ return -EINVAL;
+ }
+
/*
* Spread logical channels across all available physical rather
* than pack every logical channel at the first available phy
@@ -1560,13 +1730,15 @@ found_phy:
if (is_src) {
for (i = phy_num; i < phy_num + 2; i++) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
} else {
for (i = phy_num + 1; i >= phy_num; i--) {
if (d40_alloc_mask_set(&phys[i], is_src,
- event_line, is_log))
+ event_line, is_log,
+ first_phy_user))
goto found_log;
}
}
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
+ pm_runtime_get_sync(d40c->base->dev);
res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
if (res) {
chan_err(d40c, "suspend failed\n");
- return res;
+ goto out;
}
if (chan_is_logical(d40c)) {
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
if (d40_chan_has_events(d40c)) {
res = d40_channel_execute_command(d40c,
D40_DMA_RUN);
- if (res) {
+ if (res)
chan_err(d40c,
"Executing RUN command\n");
- return res;
- }
}
- return 0;
+ goto out;
}
} else {
(void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
res = d40_channel_execute_command(d40c, D40_DMA_STOP);
if (res) {
chan_err(d40c, "Failed to stop channel\n");
- return res;
+ goto out;
}
+
+ if (d40c->busy) {
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ }
+
+ d40c->busy = false;
d40c->phy_chan = NULL;
d40c->configured = false;
d40c->base->lookup_phy_chans[phy->num] = NULL;
+out:
- return 0;
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
+ return res;
}
static bool d40_is_paused(struct d40_chan *d40c)
@@ -1855,7 +2036,7 @@ err:
}
static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
{
struct stedma40_platform_data *plat = chan->base->plat_data;
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
if (chan->runtime_addr)
return chan->runtime_addr;
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
addr = plat->dev_rx[cfg->src_dev_type];
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
addr = plat->dev_tx[cfg->dst_dev_type];
return addr;
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
static struct dma_async_tx_descriptor *
d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
struct scatterlist *sg_dst, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long dma_flags)
+ enum dma_transfer_direction direction, unsigned long dma_flags)
{
struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
dma_addr_t src_dev_addr = 0;
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
if (direction != DMA_NONE) {
dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
- if (direction == DMA_FROM_DEVICE)
+ if (direction == DMA_DEV_TO_MEM)
src_dev_addr = dev_addr;
- else if (direction == DMA_TO_DEVICE)
+ else if (direction == DMA_MEM_TO_DEV)
dst_dev_addr = dev_addr;
}
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
goto fail;
}
}
- is_free_phy = (d40c->phy_chan == NULL);
- err = d40_allocate_channel(d40c);
+ err = d40_allocate_channel(d40c, &is_free_phy);
if (err) {
chan_err(d40c, "Failed to allocate channel\n");
+ d40c->configured = false;
goto fail;
}
+ pm_runtime_get_sync(d40c->base->dev);
/* Fill in basic CFG register values */
d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
&d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
}
+ dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
+ chan_is_logical(d40c) ? "logical" : "physical",
+ d40c->phy_chan->num,
+ d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
+
+
/*
* Only write channel configuration to the DMA if the physical
* resource is free. In case of multiple logical channels
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
if (is_free_phy)
d40_config_write(d40c);
fail:
+ pm_runtime_mark_last_busy(d40c->base->dev);
+ pm_runtime_put_autosuspend(d40c->base->dev);
spin_unlock_irqrestore(&d40c->lock, flags);
return err;
}
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl,
unsigned int sg_len,
- enum dma_data_direction direction,
+ enum dma_transfer_direction direction,
unsigned long dma_flags)
{
- if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+ if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
return NULL;
return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
static struct dma_async_tx_descriptor *
dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
size_t buf_len, size_t period_len,
- enum dma_data_direction direction)
+ enum dma_transfer_direction direction)
{
unsigned int periods = buf_len / period_len;
struct dma_async_tx_descriptor *txd;
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
dst_addr_width = config->dst_addr_width;
dst_maxburst = config->dst_maxburst;
- if (config->direction == DMA_FROM_DEVICE) {
+ if (config->direction == DMA_DEV_TO_MEM) {
dma_addr_t dev_addr_rx =
d40c->base->plat_data->dev_rx[cfg->src_dev_type];
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
if (dst_maxburst == 0)
dst_maxburst = src_maxburst;
- } else if (config->direction == DMA_TO_DEVICE) {
+ } else if (config->direction == DMA_MEM_TO_DEV) {
dma_addr_t dev_addr_tx =
d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
"configured channel %s for %s, data width %d/%d, "
"maxburst %d/%d elements, LE, no flow control\n",
dma_chan_name(chan),
- (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+ (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
src_addr_width, dst_addr_width,
src_maxburst, dst_maxburst);
@@ -2519,6 +2709,72 @@ failure1:
return err;
}
+/* Suspend resume functionality */
+#ifdef CONFIG_PM
+static int dma40_pm_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+ if (!pm_runtime_suspended(dev))
+ return -EBUSY;
+
+ if (base->lcpa_regulator)
+ ret = regulator_disable(base->lcpa_regulator);
+ return ret;
+}
+
+static int dma40_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ d40_save_restore_registers(base, true);
+
+ /* Don't disable/enable clocks for v1 due to HW bugs */
+ if (base->rev != 1)
+ writel_relaxed(base->gcc_pwr_off_mask,
+ base->virtbase + D40_DREG_GCC);
+
+ return 0;
+}
+
+static int dma40_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+
+ if (base->initialized)
+ d40_save_restore_registers(base, false);
+
+ writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
+ base->virtbase + D40_DREG_GCC);
+ return 0;
+}
+
+static int dma40_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct d40_base *base = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ if (base->lcpa_regulator)
+ ret = regulator_enable(base->lcpa_regulator);
+
+ return ret;
+}
+
+static const struct dev_pm_ops dma40_pm_ops = {
+ .suspend = dma40_pm_suspend,
+ .runtime_suspend = dma40_runtime_suspend,
+ .runtime_resume = dma40_runtime_resume,
+ .resume = dma40_resume,
+};
+#define DMA40_PM_OPS (&dma40_pm_ops)
+#else
+#define DMA40_PM_OPS NULL
+#endif
+
/* Initialization functions. */
static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
int num_phy_chans_avail = 0;
u32 val[2];
int odd_even_bit = -2;
+ int gcc = D40_DREG_GCC_ENA;
val[0] = readl(base->virtbase + D40_DREG_PRSME);
val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
/* Mark security only channels as occupied */
base->phy_res[i].allocated_src = D40_ALLOC_PHY;
base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[i].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+ D40_DREG_GCC_DST);
+
+
} else {
base->phy_res[i].allocated_src = D40_ALLOC_FREE;
base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+ base->phy_res[i].reserved = false;
num_phy_chans_avail++;
}
spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+ base->phy_res[chan].reserved = true;
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_SRC);
+ gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+ D40_DREG_GCC_DST);
num_phy_chans_avail--;
}
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
val[0] = val[0] >> 2;
}
+ /*
+ * To keep things simple, Enable all clocks initially.
+ * The clocks will get managed later post channel allocation.
+ * The clocks for the event lines on which reserved channels exists
+ * are not managed here.
+ */
+ writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+ base->gcc_pwr_off_mask = gcc;
+
return num_phy_chans_avail;
}
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
goto failure;
}
- base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
- sizeof(struct d40_desc *) *
- D40_LCLA_LINK_PER_EVENT_GRP,
+ base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
+ sizeof(d40_backup_regs_chan),
GFP_KERNEL);
+ if (!base->reg_val_backup_chan)
+ goto failure;
+
+ base->lcla_pool.alloc_map =
+ kzalloc(num_phy_chans * sizeof(struct d40_desc *)
+ * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
if (!base->lcla_pool.alloc_map)
goto failure;
@@ -2741,9 +3025,9 @@ failure:
static void __init d40_hw_init(struct d40_base *base)
{
- static const struct d40_reg_val dma_init_reg[] = {
+ static struct d40_reg_val dma_init_reg[] = {
/* Clock every part of the DMA block from start */
- { .reg = D40_DREG_GCC, .val = 0x0000ff01},
+ { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
/* Interrupts on all logical channels */
{ .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
goto failure;
}
+ /* If lcla has to be located in ESRAM we don't need to allocate */
+ if (base->plat_data->use_esram_lcla) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "lcla_esram");
+ if (!res) {
+ ret = -ENOENT;
+ d40_err(&pdev->dev,
+ "No \"lcla_esram\" memory resource\n");
+ goto failure;
+ }
+ base->lcla_pool.base = ioremap(res->start,
+ resource_size(res));
+ if (!base->lcla_pool.base) {
+ ret = -ENOMEM;
+ d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
+ goto failure;
+ }
+ writel(res->start, base->virtbase + D40_DREG_LCLA);
- ret = d40_lcla_allocate(base);
- if (ret) {
- d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
- goto failure;
+ } else {
+ ret = d40_lcla_allocate(base);
+ if (ret) {
+ d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+ goto failure;
+ }
}
spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
goto failure;
}
+ pm_runtime_irq_safe(base->dev);
+ pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+ pm_runtime_use_autosuspend(base->dev);
+ pm_runtime_enable(base->dev);
+ pm_runtime_resume(base->dev);
+
+ if (base->plat_data->use_esram_lcla) {
+
+ base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
+ if (IS_ERR(base->lcpa_regulator)) {
+ d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+
+ ret = regulator_enable(base->lcpa_regulator);
+ if (ret) {
+ d40_err(&pdev->dev,
+ "Failed to enable lcpa_regulator\n");
+ regulator_put(base->lcpa_regulator);
+ base->lcpa_regulator = NULL;
+ goto failure;
+ }
+ }
+
+ base->initialized = true;
err = d40_dmaengine_init(base, num_reserved_chans);
if (err)
goto failure;
@@ -2976,6 +3306,11 @@ failure:
if (base->virtbase)
iounmap(base->virtbase);
+ if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+ iounmap(base->lcla_pool.base);
+ base->lcla_pool.base = NULL;
+ }
+
if (base->lcla_pool.dma_addr)
dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
SZ_1K * base->num_phy_chans,
@@ -2998,6 +3333,11 @@ failure:
clk_put(base->clk);
}
+ if (base->lcpa_regulator) {
+ regulator_disable(base->lcpa_regulator);
+ regulator_put(base->lcpa_regulator);
+ }
+
kfree(base->lcla_pool.alloc_map);
kfree(base->lookup_log_chans);
kfree(base->lookup_phy_chans);
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
.driver = {
.owner = THIS_MODULE,
.name = D40_NAME,
+ .pm = DMA40_PM_OPS,
},
};
diff --git a/drivers/dma/ste_dma40_ll.h b/drivers/dma/ste_dma40_ll.h
index b44c4551..8d3d490 100644
--- a/drivers/dma/ste_dma40_ll.h
+++ b/drivers/dma/ste_dma40_ll.h
@@ -16,6 +16,8 @@
#define D40_TYPE_TO_GROUP(type) (type / 16)
#define D40_TYPE_TO_EVENT(type) (type % 16)
+#define D40_GROUP_SIZE 8
+#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
/* Most bits of the CFG register are the same in log as in phy mode */
#define D40_SREG_CFG_MST_POS 15
@@ -123,6 +125,15 @@
/* DMA Register Offsets */
#define D40_DREG_GCC 0x000
+#define D40_DREG_GCC_ENA 0x1
+/* This assumes that there are only 4 event groups */
+#define D40_DREG_GCC_ENABLE_ALL 0xff01
+#define D40_DREG_GCC_EVTGRP_POS 8
+#define D40_DREG_GCC_SRC 0
+#define D40_DREG_GCC_DST 1
+#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
+ (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
+
#define D40_DREG_PRTYP 0x004
#define D40_DREG_PRSME 0x008
#define D40_DREG_PRSMO 0x00C
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index a4a398f..a6f9c16 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -90,7 +90,7 @@ struct timb_dma_chan {
struct list_head queue;
struct list_head free_list;
unsigned int bytes_per_line;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
unsigned int descs; /* Descriptors to allocate */
unsigned int desc_elems; /* number of elems per descriptor */
};
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
if (single)
dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
else
dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
- td_chan->direction);
+ DMA_TO_DEVICE);
}
static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
"td_chan: %p, chan: %d, membase: %p\n",
td_chan, td_chan->chan.chan_id, td_chan->membase);
- if (td_chan->direction == DMA_FROM_DEVICE) {
+ if (td_chan->direction == DMA_DEV_TO_MEM) {
/* descriptor address */
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
txd->cookie);
/* make sure to stop the transfer */
- if (td_chan->direction == DMA_FROM_DEVICE)
+ if (td_chan->direction == DMA_DEV_TO_MEM)
iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
/* Currently no support for stopping DMA transfers
else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
struct scatterlist *sgl, unsigned int sg_len,
- enum dma_data_direction direction, unsigned long flags)
+ enum dma_transfer_direction direction, unsigned long flags)
{
struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
}
dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
- td_desc->desc_list_len, DMA_TO_DEVICE);
+ td_desc->desc_list_len, DMA_MEM_TO_DEV);
return &td_desc->txd;
}
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
td_chan->descs = pchan->descriptors;
td_chan->desc_elems = pchan->descriptor_elements;
td_chan->bytes_per_line = pchan->bytes_per_line;
- td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE;
+ td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
+ DMA_MEM_TO_DEV;
td_chan->membase = td->membase +
(i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
.remove = __exit_p(td_remove),
};
-static int __init td_init(void)
-{
- return platform_driver_register(&td_driver);
-}
-module_init(td_init);
-
-static void __exit td_exit(void)
-{
- platform_driver_unregister(&td_driver);
-}
-module_exit(td_exit);
+module_platform_driver(td_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Timberdale DMA controller driver");
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index cbd83e36..6122c36 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
static struct dma_async_tx_descriptor *
txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
+ unsigned int sg_len, enum dma_transfer_direction direction,
unsigned long flags)
{
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
BUG_ON(!ds || !ds->reg_width);
if (ds->tx_reg)
- BUG_ON(direction != DMA_TO_DEVICE);
+ BUG_ON(direction != DMA_MEM_TO_DEV);
else
- BUG_ON(direction != DMA_FROM_DEVICE);
+ BUG_ON(direction != DMA_DEV_TO_MEM);
if (unlikely(!sg_len))
return NULL;
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
mem = sg_dma_address(sg);
if (__is_dmac64(ddev)) {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc.SAR = mem;
desc->hwdesc.DAR = ds->tx_reg;
} else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc.CNTR = sg_dma_len(sg);
} else {
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
desc->hwdesc32.SAR = mem;
desc->hwdesc32.DAR = ds->tx_reg;
} else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
}
desc->hwdesc32.CNTR = sg_dma_len(sg);
}
- if (direction == DMA_TO_DEVICE) {
+ if (direction == DMA_MEM_TO_DEV) {
sai = ds->reg_width;
dai = 0;
} else {
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index fe90cd4..e48ab31 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -32,7 +32,6 @@
#include <linux/completion.h>
#include <linux/kobject.h>
#include <linux/platform_device.h>
-#include <linux/sysdev.h>
#include <linux/workqueue.h>
#include <linux/edac.h>
@@ -243,8 +242,8 @@ struct edac_device_ctl_info {
*/
struct edac_dev_sysfs_attribute *sysfs_attributes;
- /* pointer to main 'edac' class in sysfs */
- struct sysdev_class *edac_class;
+ /* pointer to main 'edac' subsys in sysfs */
+ struct bus_type *edac_subsys;
/* the internal state of this controller instance */
int op_state;
@@ -342,7 +341,7 @@ struct edac_pci_ctl_info {
int pci_idx;
- struct sysdev_class *edac_class; /* pointer to class */
+ struct bus_type *edac_subsys; /* pointer to subsystem */
/* the internal state of this controller instance */
int op_state;
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index c3f6743..4b15459 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -23,7 +23,6 @@
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <asm/uaccess.h>
diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
index 86649df..b4ea185 100644
--- a/drivers/edac/edac_device_sysfs.c
+++ b/drivers/edac/edac_device_sysfs.c
@@ -1,5 +1,5 @@
/*
- * file for managing the edac_device class of devices for EDAC
+ * file for managing the edac_device subsystem of devices for EDAC
*
* (C) 2007 SoftwareBitMaker
*
@@ -230,21 +230,21 @@ static struct kobj_type ktype_device_ctrl = {
*/
int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
{
- struct sysdev_class *edac_class;
+ struct bus_type *edac_subsys;
int err;
debugf1("%s()\n", __func__);
/* get the /sys/devices/system/edac reference */
- edac_class = edac_get_sysfs_class();
- if (edac_class == NULL) {
- debugf1("%s() no edac_class error\n", __func__);
+ edac_subsys = edac_get_sysfs_subsys();
+ if (edac_subsys == NULL) {
+ debugf1("%s() no edac_subsys error\n", __func__);
err = -ENODEV;
goto err_out;
}
- /* Point to the 'edac_class' this instance 'reports' to */
- edac_dev->edac_class = edac_class;
+ /* Point to the 'edac_subsys' this instance 'reports' to */
+ edac_dev->edac_subsys = edac_subsys;
/* Init the devices's kobject */
memset(&edac_dev->kobj, 0, sizeof(struct kobject));
@@ -261,7 +261,7 @@ int edac_device_register_sysfs_main_kobj(struct edac_device_ctl_info *edac_dev)
/* register */
err = kobject_init_and_add(&edac_dev->kobj, &ktype_device_ctrl,
- &edac_class->kset.kobj,
+ &edac_subsys->dev_root->kobj,
"%s", edac_dev->name);
if (err) {
debugf1("%s()Failed to register '.../edac/%s'\n",
@@ -284,7 +284,7 @@ err_kobj_reg:
module_put(edac_dev->owner);
err_mod_get:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
err_out:
return err;
@@ -308,7 +308,7 @@ void edac_device_unregister_sysfs_main_kobj(struct edac_device_ctl_info *dev)
* b) 'kfree' the memory
*/
kobject_put(&dev->kobj);
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
/* edac_dev -> instance information */
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index d69144a..ca6c04d 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -25,7 +25,6 @@
#include <linux/jiffies.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/edac.h>
#include <asm/uaccess.h>
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 29ffa35..d56e634 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1021,19 +1021,19 @@ void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
int edac_sysfs_setup_mc_kset(void)
{
int err = -EINVAL;
- struct sysdev_class *edac_class;
+ struct bus_type *edac_subsys;
debugf1("%s()\n", __func__);
- /* get the /sys/devices/system/edac class reference */
- edac_class = edac_get_sysfs_class();
- if (edac_class == NULL) {
- debugf1("%s() no edac_class error=%d\n", __func__, err);
+ /* get the /sys/devices/system/edac subsys reference */
+ edac_subsys = edac_get_sysfs_subsys();
+ if (edac_subsys == NULL) {
+ debugf1("%s() no edac_subsys error=%d\n", __func__, err);
goto fail_out;
}
/* Init the MC's kobject */
- mc_kset = kset_create_and_add("mc", NULL, &edac_class->kset.kobj);
+ mc_kset = kset_create_and_add("mc", NULL, &edac_subsys->dev_root->kobj);
if (!mc_kset) {
err = -ENOMEM;
debugf1("%s() Failed to register '.../edac/mc'\n", __func__);
@@ -1045,7 +1045,7 @@ int edac_sysfs_setup_mc_kset(void)
return 0;
fail_kset:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
fail_out:
return err;
@@ -1059,6 +1059,6 @@ fail_out:
void edac_sysfs_teardown_mc_kset(void)
{
kset_unregister(mc_kset);
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 17aabb7..00f81b4 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -10,8 +10,6 @@
#ifndef __EDAC_MODULE_H__
#define __EDAC_MODULE_H__
-#include <linux/sysdev.h>
-
#include "edac_core.h"
/*
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 2b378207..63af1c5 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -19,7 +19,6 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/sysdev.h>
#include <linux/ctype.h>
#include <linux/workqueue.h>
#include <asm/uaccess.h>
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
index 495198a..97f5064 100644
--- a/drivers/edac/edac_pci_sysfs.c
+++ b/drivers/edac/edac_pci_sysfs.c
@@ -338,12 +338,12 @@ static struct kobj_type ktype_edac_pci_main_kobj = {
* edac_pci_main_kobj_setup()
*
* setup the sysfs for EDAC PCI attributes
- * assumes edac_class has already been initialized
+ * assumes edac_subsys has already been initialized
*/
static int edac_pci_main_kobj_setup(void)
{
int err;
- struct sysdev_class *edac_class;
+ struct bus_type *edac_subsys;
debugf0("%s()\n", __func__);
@@ -354,9 +354,9 @@ static int edac_pci_main_kobj_setup(void)
/* First time, so create the main kobject and its
* controls and attributes
*/
- edac_class = edac_get_sysfs_class();
- if (edac_class == NULL) {
- debugf1("%s() no edac_class\n", __func__);
+ edac_subsys = edac_get_sysfs_subsys();
+ if (edac_subsys == NULL) {
+ debugf1("%s() no edac_subsys\n", __func__);
err = -ENODEV;
goto decrement_count_fail;
}
@@ -381,7 +381,7 @@ static int edac_pci_main_kobj_setup(void)
/* Instanstiate the pci object */
err = kobject_init_and_add(edac_pci_top_main_kobj,
&ktype_edac_pci_main_kobj,
- &edac_class->kset.kobj, "pci");
+ &edac_subsys->dev_root->kobj, "pci");
if (err) {
debugf1("Failed to register '.../edac/pci'\n");
goto kobject_init_and_add_fail;
@@ -404,7 +404,7 @@ kzalloc_fail:
module_put(THIS_MODULE);
mod_get_fail:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
decrement_count_fail:
/* if are on this error exit, nothing to tear down */
@@ -432,7 +432,7 @@ static void edac_pci_main_kobj_teardown(void)
__func__);
kobject_put(edac_pci_top_main_kobj);
}
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
/*
diff --git a/drivers/edac/edac_stub.c b/drivers/edac/edac_stub.c
index 86ad2ee..670c448 100644
--- a/drivers/edac/edac_stub.c
+++ b/drivers/edac/edac_stub.c
@@ -26,7 +26,7 @@ EXPORT_SYMBOL_GPL(edac_handlers);
int edac_err_assert = 0;
EXPORT_SYMBOL_GPL(edac_err_assert);
-static atomic_t edac_class_valid = ATOMIC_INIT(0);
+static atomic_t edac_subsys_valid = ATOMIC_INIT(0);
/*
* called to determine if there is an EDAC driver interested in
@@ -54,36 +54,37 @@ EXPORT_SYMBOL_GPL(edac_atomic_assert_error);
* sysfs object: /sys/devices/system/edac
* need to export to other files
*/
-struct sysdev_class edac_class = {
+struct bus_type edac_subsys = {
.name = "edac",
+ .dev_name = "edac",
};
-EXPORT_SYMBOL_GPL(edac_class);
+EXPORT_SYMBOL_GPL(edac_subsys);
/* return pointer to the 'edac' node in sysfs */
-struct sysdev_class *edac_get_sysfs_class(void)
+struct bus_type *edac_get_sysfs_subsys(void)
{
int err = 0;
- if (atomic_read(&edac_class_valid))
+ if (atomic_read(&edac_subsys_valid))
goto out;
/* create the /sys/devices/system/edac directory */
- err = sysdev_class_register(&edac_class);
+ err = subsys_system_register(&edac_subsys, NULL);
if (err) {
printk(KERN_ERR "Error registering toplevel EDAC sysfs dir\n");
return NULL;
}
out:
- atomic_inc(&edac_class_valid);
- return &edac_class;
+ atomic_inc(&edac_subsys_valid);
+ return &edac_subsys;
}
-EXPORT_SYMBOL_GPL(edac_get_sysfs_class);
+EXPORT_SYMBOL_GPL(edac_get_sysfs_subsys);
-void edac_put_sysfs_class(void)
+void edac_put_sysfs_subsys(void)
{
/* last user unregisters it */
- if (atomic_dec_and_test(&edac_class_valid))
- sysdev_class_unregister(&edac_class);
+ if (atomic_dec_and_test(&edac_subsys_valid))
+ bus_unregister(&edac_subsys);
}
-EXPORT_SYMBOL_GPL(edac_put_sysfs_class);
+EXPORT_SYMBOL_GPL(edac_put_sysfs_subsys);
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index aa08497..73f55e200 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -15,6 +15,8 @@
#include <linux/io.h>
#include "edac_core.h"
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
#define I3200_REVISION "1.1"
#define EDAC_MOD_STR "i3200_edac"
@@ -101,19 +103,6 @@ struct i3200_priv {
static int nr_channels;
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
-{
- const volatile u32 __iomem *p = addr;
- u32 low, high;
-
- low = readl(p);
- high = readl(p + 1);
-
- return low + ((u64)high << 32);
-}
-#endif
-
static int how_many_channels(struct pci_dev *pdev)
{
unsigned char capid0_8b; /* 8th byte of CAPID0 */
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 70ad892..8568d9b 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -2234,7 +2234,7 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
if (pvt->enable_scrub)
disable_sdram_scrub_setting(mci);
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &i7_mce_dec);
+ mce_unregister_decode_chain(&i7_mce_dec);
/* Disable EDAC polling */
i7core_pci_ctl_release(pvt);
@@ -2336,7 +2336,7 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
/* DCLK for scrub rate setting */
pvt->dclk_freq = get_dclk_freq();
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &i7_mce_dec);
+ mce_register_decode_chain(&i7_mce_dec);
return 0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index a5da732..4184e01 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -277,11 +277,9 @@ static void i82975x_get_error_info(struct mem_ctl_info *mci,
static int i82975x_process_error_info(struct mem_ctl_info *mci,
struct i82975x_error_info *info, int handle_errors)
{
- int row, multi_chan, chan;
+ int row, chan;
unsigned long offst, page;
- multi_chan = mci->csrows[0].nr_channels - 1;
-
if (!(info->errsts2 & 0x0003))
return 0;
@@ -294,20 +292,30 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
}
page = (unsigned long) info->eap;
- if (info->xeap & 1)
- page |= 0x100000000ul;
- chan = page & 1;
page >>= 1;
- offst = page & ((1 << PAGE_SHIFT) - 1);
- page >>= PAGE_SHIFT;
+ if (info->xeap & 1)
+ page |= 0x80000000;
+ page >>= (PAGE_SHIFT - 1);
row = edac_mc_find_csrow_by_page(mci, page);
+ if (row == -1) {
+ i82975x_mc_printk(mci, KERN_ERR, "error processing EAP:\n"
+ "\tXEAP=%u\n"
+ "\t EAP=0x%08x\n"
+ "\tPAGE=0x%08x\n",
+ (info->xeap & 1) ? 1 : 0, info->eap, (unsigned int) page);
+ return 0;
+ }
+ chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
+ offst = info->eap
+ & ((1 << PAGE_SHIFT) -
+ (1 << mci->csrows[row].grain));
+
if (info->errsts & 0x0002)
edac_mc_handle_ue(mci, page, offst , row, "i82975x UE");
else
edac_mc_handle_ce(mci, page, offst, info->derrsyn, row,
- multi_chan ? chan : 0,
- "i82975x CE");
+ chan, "i82975x CE");
return 1;
}
@@ -410,7 +418,7 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
csrow->last_page = cumul_size - 1;
csrow->nr_pages = cumul_size - last_cumul_size;
last_cumul_size = cumul_size;
- csrow->grain = 1 << 6; /* I82975X_EAP has 64B resolution */
+ csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
csrow->dtype = i82975x_dram_type(mch_window, index);
csrow->edac_mode = EDAC_SECDED; /* only supported */
diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c
index d0864d9..bd926ea 100644
--- a/drivers/edac/mce_amd.c
+++ b/drivers/edac/mce_amd.c
@@ -884,7 +884,7 @@ static int __init mce_amd_init(void)
pr_info("MCE: In-kernel MCE decoding enabled.\n");
- atomic_notifier_chain_register(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ mce_register_decode_chain(&amd_mce_dec_nb);
return 0;
}
@@ -893,7 +893,7 @@ early_initcall(mce_amd_init);
#ifdef MODULE
static void __exit mce_amd_exit(void)
{
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain, &amd_mce_dec_nb);
+ mce_unregister_decode_chain(&amd_mce_dec_nb);
kfree(fam_ops);
}
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 73c3e26..885e8ad 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -11,7 +11,6 @@
*/
#include <linux/kobject.h>
-#include <linux/sysdev.h>
#include <linux/edac.h>
#include <linux/module.h>
#include <asm/mce.h>
@@ -116,14 +115,14 @@ static struct edac_mce_attr *sysfs_attrs[] = { &mce_attr_status, &mce_attr_misc,
static int __init edac_init_mce_inject(void)
{
- struct sysdev_class *edac_class = NULL;
+ struct bus_type *edac_subsys = NULL;
int i, err = 0;
- edac_class = edac_get_sysfs_class();
- if (!edac_class)
+ edac_subsys = edac_get_sysfs_subsys();
+ if (!edac_subsys)
return -EINVAL;
- mce_kobj = kobject_create_and_add("mce", &edac_class->kset.kobj);
+ mce_kobj = kobject_create_and_add("mce", &edac_subsys->dev_root->kobj);
if (!mce_kobj) {
printk(KERN_ERR "Error creating a mce kset.\n");
err = -ENOMEM;
@@ -147,7 +146,7 @@ err_sysfs_create:
kobject_del(mce_kobj);
err_mce_kobj:
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
return err;
}
@@ -161,7 +160,7 @@ static void __exit edac_exit_mce_inject(void)
kobject_del(mce_kobj);
- edac_put_sysfs_class();
+ edac_put_sysfs_subsys();
}
module_init(edac_init_mce_inject);
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3840096..fc75706 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -142,7 +142,7 @@
/*
* The ibm,sdram-4xx-ddr2 Device Control Registers (DCRs) are
- * indirectly acccessed and have a base and length defined by the
+ * indirectly accessed and have a base and length defined by the
* device tree. The base can be anything; however, we expect the
* length to be precisely two registers, the first for the address
* window and the second for the data window.
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index b153674..e294e1b 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -131,7 +131,7 @@ struct r82600_error_info {
u32 eapr;
};
-static unsigned int disable_hardware_scrub;
+static bool disable_hardware_scrub;
static struct edac_pci_ctl_info *r82600_pci;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 7a402bf..1dc118d 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -1609,11 +1609,9 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mce->cpuvendor, mce->cpuid, mce->time,
mce->socketid, mce->apicid);
-#ifdef CONFIG_SMP
/* Only handle if it is the right mc controller */
if (cpu_data(mce->cpu).phys_proc_id != pvt->sbridge_dev->mc)
return NOTIFY_DONE;
-#endif
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
@@ -1661,8 +1659,7 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
__func__, mci, &sbridge_dev->pdev[0]->dev);
- atomic_notifier_chain_unregister(&x86_mce_decoder_chain,
- &sbridge_mce_dec);
+ mce_unregister_decode_chain(&sbridge_mce_dec);
/* Remove MC sysfs nodes */
edac_mc_del_mc(mci->dev);
@@ -1731,8 +1728,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
goto fail0;
}
- atomic_notifier_chain_register(&x86_mce_decoder_chain,
- &sbridge_mce_dec);
+ mce_register_decode_chain(&sbridge_mce_dec);
return 0;
fail0:
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6628fea..7f5f0da 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -263,6 +263,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
#define PCI_DEVICE_ID_AGERE_FW643 0x5901
+#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
@@ -289,6 +290,9 @@ static const struct {
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
QUIRK_NO_MSI},
+ {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
+ QUIRK_RESET_PACKET},
+
{PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
QUIRK_NO_MSI},
@@ -299,7 +303,7 @@ static const struct {
QUIRK_NO_MSI},
{PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
- QUIRK_CYCLE_TIMER},
+ QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
{PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 68375bc..80e95aa 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -66,7 +66,7 @@
*
* Concurrent logins are useful together with cluster filesystems.
*/
-static int sbp2_param_exclusive_login = 1;
+static bool sbp2_param_exclusive_login = 1;
module_param_named(exclusive_login, sbp2_param_exclusive_login, bool, 0644);
MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device "
"(default = Y, use N for concurrent initiators)");
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index efba163..9b00072 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -145,18 +145,6 @@ config ISCSI_IBFT
detect iSCSI boot parameters dynamically during system boot, say Y.
Otherwise, say N.
-config SIGMA
- tristate "SigmaStudio firmware loader"
- depends on I2C
- select CRC32
- default n
- help
- Enable helper functions for working with Analog Devices SigmaDSP
- parts and binary firmwares produced by Analog Devices SigmaStudio.
-
- If unsure, say N here. Drivers that need these helpers will select
- this option automatically.
-
source "drivers/firmware/google/Kconfig"
endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 47338c9..5a7e273 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -12,6 +12,5 @@ obj-$(CONFIG_DMIID) += dmi-id.o
obj-$(CONFIG_ISCSI_IBFT_FIND) += iscsi_ibft_find.o
obj-$(CONFIG_ISCSI_IBFT) += iscsi_ibft.o
obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
-obj-$(CONFIG_SIGMA) += sigma.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
index b0a8117..d25599f 100644
--- a/drivers/firmware/efivars.c
+++ b/drivers/firmware/efivars.c
@@ -495,7 +495,8 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
return 0;
}
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
char name[DUMP_NAME_LEN];
@@ -565,7 +566,7 @@ static int efi_pstore_write(enum pstore_type_id type, u64 *id,
static int efi_pstore_erase(enum pstore_type_id type, u64 id,
struct pstore_info *psi)
{
- efi_pstore_write(type, &id, (unsigned int)id, 0, psi);
+ efi_pstore_write(type, 0, &id, (unsigned int)id, 0, psi);
return 0;
}
@@ -587,7 +588,8 @@ static ssize_t efi_pstore_read(u64 *id, enum pstore_type_id *type,
return -1;
}
-static int efi_pstore_write(enum pstore_type_id type, u64 *id,
+static int efi_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason, u64 *id,
unsigned int part, size_t size, struct pstore_info *psi)
{
return 0;
diff --git a/drivers/firmware/google/gsmi.c b/drivers/firmware/google/gsmi.c
index c4e7c59..91ddf0f 100644
--- a/drivers/firmware/google/gsmi.c
+++ b/drivers/firmware/google/gsmi.c
@@ -345,7 +345,8 @@ static efi_status_t gsmi_get_variable(efi_char16_t *name,
memcpy(&param, gsmi_dev.param_buf->start, sizeof(param));
/* The size reported is the min of all of our buffers */
- *data_size = min(*data_size, gsmi_dev.data_buf->length);
+ *data_size = min_t(unsigned long, *data_size,
+ gsmi_dev.data_buf->length);
*data_size = min_t(unsigned long, *data_size, param.data_len);
/* Copy data back to return buffer. */
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 2cce44a..3ee852c 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -433,11 +433,11 @@ static int __init ibft_check_device(void)
* Helper routiners to check to determine if the entry is valid
* in the proper iBFT structure.
*/
-static mode_t ibft_check_nic_for(void *data, int type)
+static umode_t ibft_check_nic_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_nic *nic = entry->nic;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_ETH_INDEX:
@@ -488,11 +488,11 @@ static mode_t ibft_check_nic_for(void *data, int type)
return rc;
}
-static mode_t __init ibft_check_tgt_for(void *data, int type)
+static umode_t __init ibft_check_tgt_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_tgt *tgt = entry->tgt;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_TGT_INDEX:
@@ -524,11 +524,11 @@ static mode_t __init ibft_check_tgt_for(void *data, int type)
return rc;
}
-static mode_t __init ibft_check_initiator_for(void *data, int type)
+static umode_t __init ibft_check_initiator_for(void *data, int type)
{
struct ibft_kobject *entry = data;
struct ibft_initiator *init = entry->initiator;
- mode_t rc = 0;
+ umode_t rc = 0;
switch (type) {
case ISCSI_BOOT_INI_INDEX:
diff --git a/drivers/firmware/sigma.c b/drivers/firmware/sigma.c
deleted file mode 100644
index 1eedb6f..0000000
--- a/drivers/firmware/sigma.c
+++ /dev/null
@@ -1,153 +0,0 @@
-/*
- * Load Analog Devices SigmaStudio firmware files
- *
- * Copyright 2009-2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/crc32.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/kernel.h>
-#include <linux/i2c.h>
-#include <linux/module.h>
-#include <linux/sigma.h>
-
-static size_t sigma_action_size(struct sigma_action *sa)
-{
- size_t payload = 0;
-
- switch (sa->instr) {
- case SIGMA_ACTION_WRITEXBYTES:
- case SIGMA_ACTION_WRITESINGLE:
- case SIGMA_ACTION_WRITESAFELOAD:
- payload = sigma_action_len(sa);
- break;
- default:
- break;
- }
-
- payload = ALIGN(payload, 2);
-
- return payload + sizeof(struct sigma_action);
-}
-
-/*
- * Returns a negative error value in case of an error, 0 if processing of
- * the firmware should be stopped after this action, 1 otherwise.
- */
-static int
-process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
-{
- size_t len = sigma_action_len(sa);
- int ret;
-
- pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
- sa->instr, sa->addr, len);
-
- switch (sa->instr) {
- case SIGMA_ACTION_WRITEXBYTES:
- case SIGMA_ACTION_WRITESINGLE:
- case SIGMA_ACTION_WRITESAFELOAD:
- ret = i2c_master_send(client, (void *)&sa->addr, len);
- if (ret < 0)
- return -EINVAL;
- break;
- case SIGMA_ACTION_DELAY:
- udelay(len);
- len = 0;
- break;
- case SIGMA_ACTION_END:
- return 0;
- default:
- return -EINVAL;
- }
-
- return 1;
-}
-
-static int
-process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
-{
- struct sigma_action *sa;
- size_t size;
- int ret;
-
- while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
- sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
-
- size = sigma_action_size(sa);
- ssfw->pos += size;
- if (ssfw->pos > ssfw->fw->size || size == 0)
- break;
-
- ret = process_sigma_action(client, sa);
-
- pr_debug("%s: action returned %i\n", __func__, ret);
-
- if (ret <= 0)
- return ret;
- }
-
- if (ssfw->pos != ssfw->fw->size)
- return -EINVAL;
-
- return 0;
-}
-
-int process_sigma_firmware(struct i2c_client *client, const char *name)
-{
- int ret;
- struct sigma_firmware_header *ssfw_head;
- struct sigma_firmware ssfw;
- const struct firmware *fw;
- u32 crc;
-
- pr_debug("%s: loading firmware %s\n", __func__, name);
-
- /* first load the blob */
- ret = request_firmware(&fw, name, &client->dev);
- if (ret) {
- pr_debug("%s: request_firmware() failed with %i\n", __func__, ret);
- return ret;
- }
- ssfw.fw = fw;
-
- /* then verify the header */
- ret = -EINVAL;
-
- /*
- * Reject too small or unreasonable large files. The upper limit has been
- * chosen a bit arbitrarily, but it should be enough for all practical
- * purposes and having the limit makes it easier to avoid integer
- * overflows later in the loading process.
- */
- if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
- goto done;
-
- ssfw_head = (void *)fw->data;
- if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
- goto done;
-
- crc = crc32(0, fw->data + sizeof(*ssfw_head),
- fw->size - sizeof(*ssfw_head));
- pr_debug("%s: crc=%x\n", __func__, crc);
- if (crc != le32_to_cpu(ssfw_head->crc))
- goto done;
-
- ssfw.pos = sizeof(*ssfw_head);
-
- /* finally process all of the actions */
- ret = process_sigma_actions(client, &ssfw);
-
- done:
- release_firmware(fw);
-
- pr_debug("%s: loaded %s\n", __func__, name);
-
- return ret;
-}
-EXPORT_SYMBOL(process_sigma_firmware);
-
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 8482a23..d0c4118 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -70,7 +70,7 @@ config GPIO_GENERIC
config GPIO_DA9052
tristate "Dialog DA9052 GPIO"
- depends on PMIC_DA9052
+ depends on PMIC_DA9052 && BROKEN
help
Say yes here to enable the GPIO driver for the DA9052 chip.
@@ -87,6 +87,7 @@ config GPIO_GENERIC_PLATFORM
config GPIO_IT8761E
tristate "IT8761E GPIO support"
+ depends on X86 # unconditional access to IO space.
help
Say yes here to support GPIO functionality of IT8761E super I/O chip.
@@ -138,9 +139,16 @@ config GPIO_MXS
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
+ select GENERIC_IRQ_CHIP
help
Say yes here to support the PrimeCell PL061 GPIO device
+config GPIO_PXA
+ bool "PXA GPIO support"
+ depends on ARCH_PXA || ARCH_MMP
+ help
+ Say yes here to support the PXA GPIO device
+
config GPIO_XILINX
bool "Xilinx GPIO support"
depends on PPC_OF || MICROBLAZE
@@ -170,15 +178,6 @@ config GPIO_SCH
The Intel Tunnel Creek processor has 5 GPIOs powered by the
core power rail and 9 from suspend power supply.
-config GPIO_U300
- bool "ST-Ericsson U300 COH 901 335/571 GPIO"
- depends on GPIOLIB && ARCH_U300
- help
- Say yes here to support GPIO interface on ST-Ericsson U300.
- The names of the two IP block variants supported are
- COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
- ports of 8 GPIO pins each.
-
config GPIO_VX855
tristate "VIA VX855/VX875 GPIO"
depends on PCI
@@ -356,7 +355,7 @@ comment "PCI GPIO expanders:"
config GPIO_CS5535
tristate "AMD CS5535/CS5536 GPIO support"
- depends on PCI && X86 && !CS5535_GPIO && MFD_CS5535
+ depends on PCI && X86 && MFD_CS5535
help
The AMD CS5535 and CS5536 southbridges support 28 GPIO pins that
can be used for quite a number of things. The CS5535/6 is found on
@@ -387,7 +386,7 @@ config GPIO_LANGWELL
Say Y here to support Intel Langwell/Penwell GPIO.
config GPIO_PCH
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR ML7223 IOH GPIO"
+ tristate "Intel EG20T PCH/LAPIS Semiconductor IOH(ML7223/ML7831) GPIO"
depends on PCI && X86
select GENERIC_IRQ_CHIP
help
@@ -395,11 +394,12 @@ config GPIO_PCH
which is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH GPIO device.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7223.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7223 and ML7831.
ML7223 IOH is for MP(Media Phone) use.
- ML7223 is companion chip for Intel Atom E6xx series.
- ML7223 is completely compatible for Intel EG20T PCH.
+ ML7831 IOH is for general purpose use.
+ ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7223/ML7831 is completely compatible for Intel EG20T PCH.
config GPIO_ML_IOH
tristate "OKI SEMICONDUCTOR ML7213 IOH GPIO support"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 4e018d6..fa10df6 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -2,7 +2,7 @@
ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG
-obj-$(CONFIG_GPIOLIB) += gpiolib.o
+obj-$(CONFIG_GPIOLIB) += gpiolib.o devres.o
# Device drivers. Generally keep list sorted alphabetically
obj-$(CONFIG_GPIO_GENERIC) += gpio-generic.o
@@ -40,7 +40,7 @@ obj-$(CONFIG_GPIO_PCA953X) += gpio-pca953x.o
obj-$(CONFIG_GPIO_PCF857X) += gpio-pcf857x.o
obj-$(CONFIG_GPIO_PCH) += gpio-pch.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
-obj-$(CONFIG_PLAT_PXA) += gpio-pxa.o
+obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RDC321X) += gpio-rdc321x.o
obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
@@ -54,7 +54,6 @@ obj-$(CONFIG_ARCH_DAVINCI_TNETV107X) += gpio-tnetv107x.o
obj-$(CONFIG_GPIO_TPS65910) += gpio-tps65910.o
obj-$(CONFIG_GPIO_TPS65912) += gpio-tps65912.o
obj-$(CONFIG_GPIO_TWL4030) += gpio-twl4030.o
-obj-$(CONFIG_MACH_U300) += gpio-u300.o
obj-$(CONFIG_GPIO_UCB1400) += gpio-ucb1400.o
obj-$(CONFIG_GPIO_VR41XX) += gpio-vr41xx.o
obj-$(CONFIG_GPIO_VX855) += gpio-vx855.o
diff --git a/drivers/gpio/devres.c b/drivers/gpio/devres.c
new file mode 100644
index 0000000..3dd2939
--- /dev/null
+++ b/drivers/gpio/devres.c
@@ -0,0 +1,90 @@
+/*
+ * drivers/gpio/devres.c - managed gpio resources
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This file is based on kernel/irq/devres.c
+ *
+ * Copyright (c) 2011 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/device.h>
+#include <linux/gfp.h>
+
+static void devm_gpio_release(struct device *dev, void *res)
+{
+ unsigned *gpio = res;
+
+ gpio_free(*gpio);
+}
+
+static int devm_gpio_match(struct device *dev, void *res, void *data)
+{
+ unsigned *this = res, *gpio = data;
+
+ return *this == *gpio;
+}
+
+/**
+ * devm_gpio_request - request a gpio for a managed device
+ * @dev: device to request the gpio for
+ * @gpio: gpio to allocate
+ * @label: the name of the requested gpio
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as
+ * gpio_request(). GPIOs requested with this function will be
+ * automatically freed on driver detach.
+ *
+ * If an GPIO allocated with this function needs to be freed
+ * separately, devm_gpio_free() must be used.
+ */
+
+int devm_gpio_request(struct device *dev, unsigned gpio, const char *label)
+{
+ unsigned *dr;
+ int rc;
+
+ dr = devres_alloc(devm_gpio_release, sizeof(unsigned), GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = gpio_request(gpio, label);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ *dr = gpio;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_gpio_request);
+
+/**
+ * devm_gpio_free - free an interrupt
+ * @dev: device to free gpio for
+ * @gpio: gpio to free
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as gpio_free().
+ * This function instead of gpio_free() should be used to manually
+ * free GPIOs allocated with devm_gpio_request().
+ */
+void devm_gpio_free(struct device *dev, unsigned int gpio)
+{
+
+ WARN_ON(devres_destroy(dev, devm_gpio_release, devm_gpio_match,
+ &gpio));
+ gpio_free(gpio);
+}
+EXPORT_SYMBOL(devm_gpio_free);
diff --git a/drivers/gpio/gpio-adp5520.c b/drivers/gpio/gpio-adp5520.c
index 9f27815..2f263cc 100644
--- a/drivers/gpio/gpio-adp5520.c
+++ b/drivers/gpio/gpio-adp5520.c
@@ -193,17 +193,7 @@ static struct platform_driver adp5520_gpio_driver = {
.remove = __devexit_p(adp5520_gpio_remove),
};
-static int __init adp5520_gpio_init(void)
-{
- return platform_driver_register(&adp5520_gpio_driver);
-}
-module_init(adp5520_gpio_init);
-
-static void __exit adp5520_gpio_exit(void)
-{
- platform_driver_unregister(&adp5520_gpio_driver);
-}
-module_exit(adp5520_gpio_exit);
+module_platform_driver(adp5520_gpio_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("GPIO ADP5520 Driver");
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 3525ad9..9ad1703 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -418,9 +418,8 @@ static int __devinit adp5588_gpio_probe(struct i2c_client *client,
if (ret)
goto err_irq;
- dev_info(&client->dev, "gpios %d..%d (IRQ Base %d) on a %s Rev. %d\n",
- gc->base, gc->base + gc->ngpio - 1,
- pdata->irq_base, client->name, revid);
+ dev_info(&client->dev, "IRQ Base: %d Rev.: %d\n",
+ pdata->irq_base, revid);
if (pdata->setup) {
ret = pdata->setup(client, gc->base, gc->ngpio, pdata->context);
diff --git a/drivers/gpio/gpio-bt8xx.c b/drivers/gpio/gpio-bt8xx.c
index ec57936..5ca4098 100644
--- a/drivers/gpio/gpio-bt8xx.c
+++ b/drivers/gpio/gpio-bt8xx.c
@@ -223,9 +223,6 @@ static int bt8xxgpio_probe(struct pci_dev *dev,
goto err_release_mem;
}
- printk(KERN_INFO "bt8xxgpio: Abusing BT8xx card for GPIOs %d to %d\n",
- bg->gpio.base, bg->gpio.base + BT8XXGPIO_NR_GPIOS - 1);
-
return 0;
err_release_mem:
diff --git a/drivers/gpio/gpio-cs5535.c b/drivers/gpio/gpio-cs5535.c
index 6e16cba..19eda1b 100644
--- a/drivers/gpio/gpio-cs5535.c
+++ b/drivers/gpio/gpio-cs5535.c
@@ -347,7 +347,6 @@ static int __devinit cs5535_gpio_probe(struct platform_device *pdev)
if (err)
goto release_region;
- dev_info(&pdev->dev, "GPIO support successfully loaded.\n");
return 0;
release_region:
@@ -382,18 +381,7 @@ static struct platform_driver cs5535_gpio_driver = {
.remove = __devexit_p(cs5535_gpio_remove),
};
-static int __init cs5535_gpio_init(void)
-{
- return platform_driver_register(&cs5535_gpio_driver);
-}
-
-static void __exit cs5535_gpio_exit(void)
-{
- platform_driver_unregister(&cs5535_gpio_driver);
-}
-
-module_init(cs5535_gpio_init);
-module_exit(cs5535_gpio_exit);
+module_platform_driver(cs5535_gpio_driver);
MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
MODULE_DESCRIPTION("AMD CS5535/CS5536 GPIO driver");
diff --git a/drivers/gpio/gpio-da9052.c b/drivers/gpio/gpio-da9052.c
index f8ce29e..56dd047 100644
--- a/drivers/gpio/gpio-da9052.c
+++ b/drivers/gpio/gpio-da9052.c
@@ -254,17 +254,7 @@ static struct platform_driver da9052_gpio_driver = {
},
};
-static int __init da9052_gpio_init(void)
-{
- return platform_driver_register(&da9052_gpio_driver);
-}
-module_init(da9052_gpio_init);
-
-static void __exit da9052_gpio_exit(void)
-{
- return platform_driver_unregister(&da9052_gpio_driver);
-}
-module_exit(da9052_gpio_exit);
+module_platform_driver(da9052_gpio_driver);
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
MODULE_DESCRIPTION("DA9052 GPIO Device Driver");
diff --git a/drivers/gpio/gpio-generic.c b/drivers/gpio/gpio-generic.c
index 4e24436..e38dd0c 100644
--- a/drivers/gpio/gpio-generic.c
+++ b/drivers/gpio/gpio-generic.c
@@ -524,17 +524,7 @@ static struct platform_driver bgpio_driver = {
.remove = __devexit_p(bgpio_pdev_remove),
};
-static int __init bgpio_platform_init(void)
-{
- return platform_driver_register(&bgpio_driver);
-}
-module_init(bgpio_platform_init);
-
-static void __exit bgpio_platform_exit(void)
-{
- platform_driver_unregister(&bgpio_driver);
-}
-module_exit(bgpio_platform_exit);
+module_platform_driver(bgpio_driver);
#endif /* CONFIG_GPIO_GENERIC_PLATFORM */
diff --git a/drivers/gpio/gpio-janz-ttl.c b/drivers/gpio/gpio-janz-ttl.c
index 813ac07..f2f000d 100644
--- a/drivers/gpio/gpio-janz-ttl.c
+++ b/drivers/gpio/gpio-janz-ttl.c
@@ -201,8 +201,6 @@ static int __devinit ttl_probe(struct platform_device *pdev)
goto out_iounmap_regs;
}
- dev_info(&pdev->dev, "module %d: registered GPIO device\n",
- pdata->modno);
return 0;
out_iounmap_regs:
@@ -239,20 +237,9 @@ static struct platform_driver ttl_driver = {
.remove = __devexit_p(ttl_remove),
};
-static int __init ttl_init(void)
-{
- return platform_driver_register(&ttl_driver);
-}
-
-static void __exit ttl_exit(void)
-{
- platform_driver_unregister(&ttl_driver);
-}
+module_platform_driver(ttl_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-TTL Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ttl");
-
-module_init(ttl_init);
-module_exit(ttl_exit);
diff --git a/drivers/gpio/gpio-lpc32xx.c b/drivers/gpio/gpio-lpc32xx.c
index 5b69480..ddfacc5 100644
--- a/drivers/gpio/gpio-lpc32xx.c
+++ b/drivers/gpio/gpio-lpc32xx.c
@@ -96,7 +96,7 @@ static const char *gpio_p2_names[LPC32XX_GPIO_P2_MAX] = {
};
static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = {
- "gpi000", "gpio01", "gpio02", "gpio03",
+ "gpio00", "gpio01", "gpio02", "gpio03",
"gpio04", "gpio05"
};
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index 461958f..f0febe5 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -248,7 +248,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
static int ioh_irq_type(struct irq_data *d, unsigned int type)
{
u32 im;
- u32 *im_reg;
+ void __iomem *im_reg;
u32 ien;
u32 im_pos;
int ch;
@@ -412,7 +412,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
int i, j;
struct ioh_gpio *chip;
void __iomem *base;
- void __iomem *chip_save;
+ void *chip_save;
int irq_base;
ret = pci_enable_device(pdev);
@@ -428,7 +428,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
}
base = pci_iomap(pdev, 1, 0);
- if (base == 0) {
+ if (!base) {
dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
ret = -ENOMEM;
goto err_iomap;
@@ -448,6 +448,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
chip->reg = chip->base;
chip->ch = i;
mutex_init(&chip->lock);
+ spin_lock_init(&chip->spinlock);
ioh_gpio_setup(chip, num_ports[i]);
ret = gpiochip_add(&chip->gpio);
if (ret) {
@@ -521,7 +522,7 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
int err;
int i;
struct ioh_gpio *chip = pci_get_drvdata(pdev);
- void __iomem *chip_save;
+ void *chip_save;
chip_save = chip;
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 1ebedfb..839624f 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -1150,8 +1150,8 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
nmk_gpio_init_irq(nmk_chip);
- dev_info(&dev->dev, "Bits %i-%i at address %p\n",
- nmk_chip->chip.base, nmk_chip->chip.base+31, nmk_chip->addr);
+ dev_info(&dev->dev, "at address %p\n",
+ nmk_chip->addr);
return 0;
out_free:
diff --git a/drivers/gpio/gpio-pcf857x.c b/drivers/gpio/gpio-pcf857x.c
index 3e1f1ec..2d1de9e 100644
--- a/drivers/gpio/gpio-pcf857x.c
+++ b/drivers/gpio/gpio-pcf857x.c
@@ -290,10 +290,7 @@ static int pcf857x_probe(struct i2c_client *client,
* methods can't be called from sleeping contexts.
*/
- dev_info(&client->dev, "gpios %d..%d on a %s%s\n",
- gpio->chip.base,
- gpio->chip.base + gpio->chip.ngpio - 1,
- client->name,
+ dev_info(&client->dev, "%s\n",
client->irq ? " (irq ignored)" : "");
/* Let platform code set up the GPIOs and their users.
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index a6008e1..e8729cc 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -49,8 +49,8 @@ struct pch_regs {
enum pch_type_t {
INTEL_EG20T_PCH,
- OKISEMI_ML7223m_IOH, /* OKISEMI ML7223 IOH PCIe Bus-m */
- OKISEMI_ML7223n_IOH /* OKISEMI ML7223 IOH PCIe Bus-n */
+ OKISEMI_ML7223m_IOH, /* LAPIS Semiconductor ML7223 IOH PCIe Bus-m */
+ OKISEMI_ML7223n_IOH /* LAPIS Semiconductor ML7223 IOH PCIe Bus-n */
};
/* Specifies number of GPIO PINS */
@@ -231,7 +231,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
static int pch_irq_type(struct irq_data *d, unsigned int type)
{
u32 im;
- u32 *im_reg;
+ u32 __iomem *im_reg;
u32 ien;
u32 im_pos;
int ch;
@@ -376,7 +376,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
}
chip->base = pci_iomap(pdev, 1, 0);
- if (chip->base == 0) {
+ if (!chip->base) {
dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
ret = -ENOMEM;
goto err_iomap;
@@ -392,6 +392,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
chip->reg = chip->base;
pci_set_drvdata(pdev, chip);
mutex_init(&chip->lock);
+ spin_lock_init(&chip->spinlock);
pch_gpio_setup(chip);
ret = gpiochip_add(&chip->gpio);
if (ret) {
@@ -524,6 +525,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8014) },
{ PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8043) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x8803) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
diff --git a/drivers/gpio/gpio-pl061.c b/drivers/gpio/gpio-pl061.c
index 4102f63..77c9cc7 100644
--- a/drivers/gpio/gpio-pl061.c
+++ b/drivers/gpio/gpio-pl061.c
@@ -12,7 +12,6 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/module.h>
-#include <linux/list.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
@@ -23,6 +22,8 @@
#include <linux/amba/bus.h>
#include <linux/amba/pl061.h>
#include <linux/slab.h>
+#include <linux/pm.h>
+#include <asm/mach/irq.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
@@ -35,25 +36,33 @@
#define PL061_GPIO_NR 8
-struct pl061_gpio {
- /* We use a list of pl061_gpio structs for each trigger IRQ in the main
- * interrupts controller of the system. We need this to support systems
- * in which more that one PL061s are connected to the same IRQ. The ISR
- * interates through this list to find the source of the interrupt.
- */
- struct list_head list;
+#ifdef CONFIG_PM
+struct pl061_context_save_regs {
+ u8 gpio_data;
+ u8 gpio_dir;
+ u8 gpio_is;
+ u8 gpio_ibe;
+ u8 gpio_iev;
+ u8 gpio_ie;
+};
+#endif
+struct pl061_gpio {
/* Each of the two spinlocks protects a different set of hardware
* regiters and data structurs. This decouples the code of the IRQ from
* the GPIO code. This also makes the case of a GPIO routine call from
* the IRQ code simpler.
*/
spinlock_t lock; /* GPIO registers */
- spinlock_t irq_lock; /* IRQ registers */
void __iomem *base;
- unsigned irq_base;
+ int irq_base;
+ struct irq_chip_generic *irq_gc;
struct gpio_chip gc;
+
+#ifdef CONFIG_PM
+ struct pl061_context_save_regs csave_regs;
+#endif
};
static int pl061_direction_input(struct gpio_chip *gc, unsigned offset)
@@ -118,46 +127,16 @@ static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
- if (chip->irq_base == NO_IRQ)
+ if (chip->irq_base <= 0)
return -EINVAL;
return chip->irq_base + offset;
}
-/*
- * PL061 GPIO IRQ
- */
-static void pl061_irq_disable(struct irq_data *d)
-{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
- int offset = d->irq - chip->irq_base;
- unsigned long flags;
- u8 gpioie;
-
- spin_lock_irqsave(&chip->irq_lock, flags);
- gpioie = readb(chip->base + GPIOIE);
- gpioie &= ~(1 << offset);
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock_irqrestore(&chip->irq_lock, flags);
-}
-
-static void pl061_irq_enable(struct irq_data *d)
-{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
- int offset = d->irq - chip->irq_base;
- unsigned long flags;
- u8 gpioie;
-
- spin_lock_irqsave(&chip->irq_lock, flags);
- gpioie = readb(chip->base + GPIOIE);
- gpioie |= 1 << offset;
- writeb(gpioie, chip->base + GPIOIE);
- spin_unlock_irqrestore(&chip->irq_lock, flags);
-}
-
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
- struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct pl061_gpio *chip = gc->private;
int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
@@ -165,7 +144,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
- spin_lock_irqsave(&chip->irq_lock, flags);
+ raw_spin_lock_irqsave(&gc->lock, flags);
gpioiev = readb(chip->base + GPIOIEV);
@@ -194,49 +173,54 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
writeb(gpioiev, chip->base + GPIOIEV);
- spin_unlock_irqrestore(&chip->irq_lock, flags);
+ raw_spin_unlock_irqrestore(&gc->lock, flags);
return 0;
}
-static struct irq_chip pl061_irqchip = {
- .name = "GPIO",
- .irq_enable = pl061_irq_enable,
- .irq_disable = pl061_irq_disable,
- .irq_set_type = pl061_irq_type,
-};
-
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
{
- struct list_head *chip_list = irq_get_handler_data(irq);
- struct list_head *ptr;
- struct pl061_gpio *chip;
-
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- list_for_each(ptr, chip_list) {
- unsigned long pending;
- int offset;
+ unsigned long pending;
+ int offset;
+ struct pl061_gpio *chip = irq_desc_get_handler_data(desc);
+ struct irq_chip *irqchip = irq_desc_get_chip(desc);
- chip = list_entry(ptr, struct pl061_gpio, list);
- pending = readb(chip->base + GPIOMIS);
- writeb(pending, chip->base + GPIOIC);
-
- if (pending == 0)
- continue;
+ chained_irq_enter(irqchip, desc);
+ pending = readb(chip->base + GPIOMIS);
+ writeb(pending, chip->base + GPIOIC);
+ if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
- desc->irq_data.chip->irq_unmask(&desc->irq_data);
+
+ chained_irq_exit(irqchip, desc);
+}
+
+static void __init pl061_init_gc(struct pl061_gpio *chip, int irq_base)
+{
+ struct irq_chip_type *ct;
+
+ chip->irq_gc = irq_alloc_generic_chip("gpio-pl061", 1, irq_base,
+ chip->base, handle_simple_irq);
+ chip->irq_gc->private = chip;
+
+ ct = chip->irq_gc->chip_types;
+ ct->chip.irq_mask = irq_gc_mask_clr_bit;
+ ct->chip.irq_unmask = irq_gc_mask_set_bit;
+ ct->chip.irq_set_type = pl061_irq_type;
+ ct->chip.irq_set_wake = irq_gc_set_wake;
+ ct->regs.mask = GPIOIE;
+
+ irq_setup_generic_chip(chip->irq_gc, IRQ_MSK(PL061_GPIO_NR),
+ IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
}
static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
- struct list_head *chip_list;
int ret, irq, i;
- static DECLARE_BITMAP(init_irq, NR_IRQS);
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@@ -248,7 +232,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
chip->irq_base = pdata->irq_base;
} else if (dev->dev.of_node) {
chip->gc.base = -1;
- chip->irq_base = NO_IRQ;
+ chip->irq_base = 0;
} else {
ret = -ENODEV;
goto free_mem;
@@ -267,8 +251,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
}
spin_lock_init(&chip->lock);
- spin_lock_init(&chip->irq_lock);
- INIT_LIST_HEAD(&chip->list);
chip->gc.direction_input = pl061_direction_input;
chip->gc.direction_output = pl061_direction_output;
@@ -288,9 +270,11 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
* irq_chip support
*/
- if (chip->irq_base == NO_IRQ)
+ if (chip->irq_base <= 0)
return 0;
+ pl061_init_gc(chip, chip->irq_base);
+
writeb(0, chip->base + GPIOIE); /* disable irqs */
irq = dev->irq[0];
if (irq < 0) {
@@ -298,18 +282,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
goto iounmap;
}
irq_set_chained_handler(irq, pl061_irq_handler);
- if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
- chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
- if (chip_list == NULL) {
- clear_bit(irq, init_irq);
- ret = -ENOMEM;
- goto iounmap;
- }
- INIT_LIST_HEAD(chip_list);
- irq_set_handler_data(irq, chip_list);
- } else
- chip_list = irq_get_handler_data(irq);
- list_add(&chip->list, chip_list);
+ irq_set_handler_data(irq, chip);
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
@@ -319,13 +292,10 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
else
pl061_direction_input(&chip->gc, i);
}
-
- irq_set_chip_and_handler(i + chip->irq_base, &pl061_irqchip,
- handle_simple_irq);
- set_irq_flags(i+chip->irq_base, IRQF_VALID);
- irq_set_chip_data(i + chip->irq_base, chip);
}
+ amba_set_drvdata(dev, chip);
+
return 0;
iounmap:
@@ -338,6 +308,53 @@ free_mem:
return ret;
}
+#ifdef CONFIG_PM
+static int pl061_suspend(struct device *dev)
+{
+ struct pl061_gpio *chip = dev_get_drvdata(dev);
+ int offset;
+
+ chip->csave_regs.gpio_data = 0;
+ chip->csave_regs.gpio_dir = readb(chip->base + GPIODIR);
+ chip->csave_regs.gpio_is = readb(chip->base + GPIOIS);
+ chip->csave_regs.gpio_ibe = readb(chip->base + GPIOIBE);
+ chip->csave_regs.gpio_iev = readb(chip->base + GPIOIEV);
+ chip->csave_regs.gpio_ie = readb(chip->base + GPIOIE);
+
+ for (offset = 0; offset < PL061_GPIO_NR; offset++) {
+ if (chip->csave_regs.gpio_dir & (1 << offset))
+ chip->csave_regs.gpio_data |=
+ pl061_get_value(&chip->gc, offset) << offset;
+ }
+
+ return 0;
+}
+
+static int pl061_resume(struct device *dev)
+{
+ struct pl061_gpio *chip = dev_get_drvdata(dev);
+ int offset;
+
+ for (offset = 0; offset < PL061_GPIO_NR; offset++) {
+ if (chip->csave_regs.gpio_dir & (1 << offset))
+ pl061_direction_output(&chip->gc, offset,
+ chip->csave_regs.gpio_data &
+ (1 << offset));
+ else
+ pl061_direction_input(&chip->gc, offset);
+ }
+
+ writeb(chip->csave_regs.gpio_is, chip->base + GPIOIS);
+ writeb(chip->csave_regs.gpio_ibe, chip->base + GPIOIBE);
+ writeb(chip->csave_regs.gpio_iev, chip->base + GPIOIEV);
+ writeb(chip->csave_regs.gpio_ie, chip->base + GPIOIE);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(pl061_dev_pm_ops, pl061_suspend, pl061_resume);
+#endif
+
static struct amba_id pl061_ids[] = {
{
.id = 0x00041061,
@@ -346,9 +363,14 @@ static struct amba_id pl061_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl061_ids);
+
static struct amba_driver pl061_gpio_driver = {
.drv = {
.name = "pl061_gpio",
+#ifdef CONFIG_PM
+ .pm = &pl061_dev_pm_ops,
+#endif
},
.id_table = pl061_ids,
.probe = pl061_probe,
diff --git a/drivers/gpio/gpio-pxa.c b/drivers/gpio/gpio-pxa.c
index ee13771..b2d3ee1 100644
--- a/drivers/gpio/gpio-pxa.c
+++ b/drivers/gpio/gpio-pxa.c
@@ -11,14 +11,46 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/clk.h>
+#include <linux/err.h>
#include <linux/gpio.h>
+#include <linux/gpio-pxa.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
#include <linux/slab.h>
-#include <mach/gpio-pxa.h>
+/*
+ * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with
+ * one set of registers. The register offsets are organized below:
+ *
+ * GPLR GPDR GPSR GPCR GRER GFER GEDR
+ * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048
+ * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C
+ * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050
+ *
+ * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148
+ * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C
+ * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150
+ *
+ * NOTE:
+ * BANK 3 is only available on PXA27x and later processors.
+ * BANK 4 and 5 are only available on PXA935
+ */
+
+#define GPLR_OFFSET 0x00
+#define GPDR_OFFSET 0x0C
+#define GPSR_OFFSET 0x18
+#define GPCR_OFFSET 0x24
+#define GRER_OFFSET 0x30
+#define GFER_OFFSET 0x3C
+#define GEDR_OFFSET 0x48
+#define GAFR_OFFSET 0x54
+#define ED_MASK_OFFSET 0x9C /* GPIO edge detection for AP side */
+
+#define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2))
int pxa_last_gpio;
@@ -39,8 +71,20 @@ struct pxa_gpio_chip {
#endif
};
+enum {
+ PXA25X_GPIO = 0,
+ PXA26X_GPIO,
+ PXA27X_GPIO,
+ PXA3XX_GPIO,
+ PXA93X_GPIO,
+ MMP_GPIO = 0x10,
+ MMP2_GPIO,
+};
+
static DEFINE_SPINLOCK(gpio_lock);
static struct pxa_gpio_chip *pxa_gpio_chips;
+static int gpio_type;
+static void __iomem *gpio_reg_base;
#define for_each_gpio_chip(i, c) \
for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++)
@@ -55,6 +99,122 @@ static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
return &pxa_gpio_chips[gpio_to_bank(gpio)];
}
+static inline int gpio_is_pxa_type(int type)
+{
+ return (type & MMP_GPIO) == 0;
+}
+
+static inline int gpio_is_mmp_type(int type)
+{
+ return (type & MMP_GPIO) != 0;
+}
+
+/* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted,
+ * as well as their Alternate Function value being '1' for GPIO in GAFRx.
+ */
+static inline int __gpio_is_inverted(int gpio)
+{
+ if ((gpio_type == PXA26X_GPIO) && (gpio > 85))
+ return 1;
+ return 0;
+}
+
+/*
+ * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate
+ * function of a GPIO, and GPDRx cannot be altered once configured. It
+ * is attributed as "occupied" here (I know this terminology isn't
+ * accurate, you are welcome to propose a better one :-)
+ */
+static inline int __gpio_is_occupied(unsigned gpio)
+{
+ struct pxa_gpio_chip *pxachip;
+ void __iomem *base;
+ unsigned long gafr = 0, gpdr = 0;
+ int ret, af = 0, dir = 0;
+
+ pxachip = gpio_to_pxachip(gpio);
+ base = gpio_chip_base(&pxachip->chip);
+ gpdr = readl_relaxed(base + GPDR_OFFSET);
+
+ switch (gpio_type) {
+ case PXA25X_GPIO:
+ case PXA26X_GPIO:
+ case PXA27X_GPIO:
+ gafr = readl_relaxed(base + GAFR_OFFSET);
+ af = (gafr >> ((gpio & 0xf) * 2)) & 0x3;
+ dir = gpdr & GPIO_bit(gpio);
+
+ if (__gpio_is_inverted(gpio))
+ ret = (af != 1) || (dir == 0);
+ else
+ ret = (af != 0) || (dir != 0);
+ break;
+ default:
+ ret = gpdr & GPIO_bit(gpio);
+ break;
+ }
+ return ret;
+}
+
+#ifdef CONFIG_ARCH_PXA
+static inline int __pxa_gpio_to_irq(int gpio)
+{
+ if (gpio_is_pxa_type(gpio_type))
+ return PXA_GPIO_TO_IRQ(gpio);
+ return -1;
+}
+
+static inline int __pxa_irq_to_gpio(int irq)
+{
+ if (gpio_is_pxa_type(gpio_type))
+ return irq - PXA_GPIO_TO_IRQ(0);
+ return -1;
+}
+#else
+static inline int __pxa_gpio_to_irq(int gpio) { return -1; }
+static inline int __pxa_irq_to_gpio(int irq) { return -1; }
+#endif
+
+#ifdef CONFIG_ARCH_MMP
+static inline int __mmp_gpio_to_irq(int gpio)
+{
+ if (gpio_is_mmp_type(gpio_type))
+ return MMP_GPIO_TO_IRQ(gpio);
+ return -1;
+}
+
+static inline int __mmp_irq_to_gpio(int irq)
+{
+ if (gpio_is_mmp_type(gpio_type))
+ return irq - MMP_GPIO_TO_IRQ(0);
+ return -1;
+}
+#else
+static inline int __mmp_gpio_to_irq(int gpio) { return -1; }
+static inline int __mmp_irq_to_gpio(int irq) { return -1; }
+#endif
+
+static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio, ret;
+
+ gpio = chip->base + offset;
+ ret = __pxa_gpio_to_irq(gpio);
+ if (ret >= 0)
+ return ret;
+ return __mmp_gpio_to_irq(gpio);
+}
+
+int pxa_irq_to_gpio(int irq)
+{
+ int ret;
+
+ ret = __pxa_irq_to_gpio(irq);
+ if (ret >= 0)
+ return ret;
+ return __mmp_irq_to_gpio(irq);
+}
+
static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
void __iomem *base = gpio_chip_base(chip);
@@ -63,12 +223,12 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
spin_lock_irqsave(&gpio_lock, flags);
- value = __raw_readl(base + GPDR_OFFSET);
+ value = readl_relaxed(base + GPDR_OFFSET);
if (__gpio_is_inverted(chip->base + offset))
value |= mask;
else
value &= ~mask;
- __raw_writel(value, base + GPDR_OFFSET);
+ writel_relaxed(value, base + GPDR_OFFSET);
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
@@ -81,16 +241,16 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
uint32_t tmp, mask = 1 << offset;
unsigned long flags;
- __raw_writel(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
+ writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET));
spin_lock_irqsave(&gpio_lock, flags);
- tmp = __raw_readl(base + GPDR_OFFSET);
+ tmp = readl_relaxed(base + GPDR_OFFSET);
if (__gpio_is_inverted(chip->base + offset))
tmp &= ~mask;
else
tmp |= mask;
- __raw_writel(tmp, base + GPDR_OFFSET);
+ writel_relaxed(tmp, base + GPDR_OFFSET);
spin_unlock_irqrestore(&gpio_lock, flags);
return 0;
@@ -98,16 +258,16 @@ static int pxa_gpio_direction_output(struct gpio_chip *chip,
static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return __raw_readl(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
+ return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset);
}
static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
- __raw_writel(1 << offset, gpio_chip_base(chip) +
+ writel_relaxed(1 << offset, gpio_chip_base(chip) +
(value ? GPSR_OFFSET : GPCR_OFFSET));
}
-static int __init pxa_init_gpio_chip(int gpio_end)
+static int __devinit pxa_init_gpio_chip(int gpio_end)
{
int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1;
struct pxa_gpio_chip *chips;
@@ -122,7 +282,7 @@ static int __init pxa_init_gpio_chip(int gpio_end)
struct gpio_chip *c = &chips[i].chip;
sprintf(chips[i].label, "gpio-%d", i);
- chips[i].regbase = GPIO_BANK(i);
+ chips[i].regbase = gpio_reg_base + BANK_OFF(i);
c->base = gpio;
c->label = chips[i].label;
@@ -131,6 +291,7 @@ static int __init pxa_init_gpio_chip(int gpio_end)
c->direction_output = pxa_gpio_direction_output;
c->get = pxa_gpio_get;
c->set = pxa_gpio_set;
+ c->to_irq = pxa_gpio_to_irq;
/* number of GPIOs on last bank may be less than 32 */
c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32;
@@ -147,18 +308,18 @@ static inline void update_edge_detect(struct pxa_gpio_chip *c)
{
uint32_t grer, gfer;
- grer = __raw_readl(c->regbase + GRER_OFFSET) & ~c->irq_mask;
- gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~c->irq_mask;
+ grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask;
+ gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask;
grer |= c->irq_edge_rise & c->irq_mask;
gfer |= c->irq_edge_fall & c->irq_mask;
- __raw_writel(grer, c->regbase + GRER_OFFSET);
- __raw_writel(gfer, c->regbase + GFER_OFFSET);
+ writel_relaxed(grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(gfer, c->regbase + GFER_OFFSET);
}
static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
{
struct pxa_gpio_chip *c;
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
unsigned long gpdr, mask = GPIO_bit(gpio);
c = gpio_to_pxachip(gpio);
@@ -176,12 +337,12 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
}
- gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
+ gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
if (__gpio_is_inverted(gpio))
- __raw_writel(gpdr | mask, c->regbase + GPDR_OFFSET);
+ writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET);
else
- __raw_writel(gpdr & ~mask, c->regbase + GPDR_OFFSET);
+ writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET);
if (type & IRQ_TYPE_EDGE_RISING)
c->irq_edge_rise |= mask;
@@ -212,9 +373,9 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
for_each_gpio_chip(gpio, c) {
gpio_base = c->chip.base;
- gedr = __raw_readl(c->regbase + GEDR_OFFSET);
+ gedr = readl_relaxed(c->regbase + GEDR_OFFSET);
gedr = gedr & c->irq_mask;
- __raw_writel(gedr, c->regbase + GEDR_OFFSET);
+ writel_relaxed(gedr, c->regbase + GEDR_OFFSET);
n = find_first_bit(&gedr, BITS_PER_LONG);
while (n < BITS_PER_LONG) {
@@ -229,29 +390,29 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
static void pxa_ack_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
- __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
+ writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
}
static void pxa_mask_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
uint32_t grer, gfer;
c->irq_mask &= ~GPIO_bit(gpio);
- grer = __raw_readl(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
- gfer = __raw_readl(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
- __raw_writel(grer, c->regbase + GRER_OFFSET);
- __raw_writel(gfer, c->regbase + GFER_OFFSET);
+ grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio);
+ gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio);
+ writel_relaxed(grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(gfer, c->regbase + GFER_OFFSET);
}
static void pxa_unmask_muxed_gpio(struct irq_data *d)
{
- int gpio = irq_to_gpio(d->irq);
+ int gpio = pxa_irq_to_gpio(d->irq);
struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
c->irq_mask |= GPIO_bit(gpio);
@@ -266,34 +427,143 @@ static struct irq_chip pxa_muxed_gpio_chip = {
.irq_set_type = pxa_gpio_irq_type,
};
-void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn)
+static int pxa_gpio_nums(void)
{
- struct pxa_gpio_chip *c;
- int gpio, irq;
+ int count = 0;
+
+#ifdef CONFIG_ARCH_PXA
+ if (cpu_is_pxa25x()) {
+#ifdef CONFIG_CPU_PXA26x
+ count = 89;
+ gpio_type = PXA26X_GPIO;
+#elif defined(CONFIG_PXA25x)
+ count = 84;
+ gpio_type = PXA26X_GPIO;
+#endif /* CONFIG_CPU_PXA26x */
+ } else if (cpu_is_pxa27x()) {
+ count = 120;
+ gpio_type = PXA27X_GPIO;
+ } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) {
+ count = 191;
+ gpio_type = PXA93X_GPIO;
+ } else if (cpu_is_pxa3xx()) {
+ count = 127;
+ gpio_type = PXA3XX_GPIO;
+ }
+#endif /* CONFIG_ARCH_PXA */
+
+#ifdef CONFIG_ARCH_MMP
+ if (cpu_is_pxa168() || cpu_is_pxa910()) {
+ count = 127;
+ gpio_type = MMP_GPIO;
+ } else if (cpu_is_mmp2()) {
+ count = 191;
+ gpio_type = MMP2_GPIO;
+ }
+#endif /* CONFIG_ARCH_MMP */
+ return count;
+}
- pxa_last_gpio = end;
+static int __devinit pxa_gpio_probe(struct platform_device *pdev)
+{
+ struct pxa_gpio_chip *c;
+ struct resource *res;
+ struct clk *clk;
+ int gpio, irq, ret;
+ int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0;
+
+ pxa_last_gpio = pxa_gpio_nums();
+ if (!pxa_last_gpio)
+ return -EINVAL;
+
+ irq0 = platform_get_irq_byname(pdev, "gpio0");
+ irq1 = platform_get_irq_byname(pdev, "gpio1");
+ irq_mux = platform_get_irq_byname(pdev, "gpio_mux");
+ if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0)
+ || (irq_mux <= 0))
+ return -EINVAL;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ gpio_reg_base = ioremap(res->start, resource_size(res));
+ if (!gpio_reg_base)
+ return -EINVAL;
+
+ if (irq0 > 0)
+ gpio_offset = 2;
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "Error %ld to get gpio clock\n",
+ PTR_ERR(clk));
+ iounmap(gpio_reg_base);
+ return PTR_ERR(clk);
+ }
+ ret = clk_prepare(clk);
+ if (ret) {
+ clk_put(clk);
+ iounmap(gpio_reg_base);
+ return ret;
+ }
+ ret = clk_enable(clk);
+ if (ret) {
+ clk_unprepare(clk);
+ clk_put(clk);
+ iounmap(gpio_reg_base);
+ return ret;
+ }
/* Initialize GPIO chips */
- pxa_init_gpio_chip(end);
+ pxa_init_gpio_chip(pxa_last_gpio);
/* clear all GPIO edge detects */
for_each_gpio_chip(gpio, c) {
- __raw_writel(0, c->regbase + GFER_OFFSET);
- __raw_writel(0, c->regbase + GRER_OFFSET);
- __raw_writel(~0,c->regbase + GEDR_OFFSET);
+ writel_relaxed(0, c->regbase + GFER_OFFSET);
+ writel_relaxed(0, c->regbase + GRER_OFFSET);
+ writel_relaxed(~0,c->regbase + GEDR_OFFSET);
+ /* unmask GPIO edge detect for AP side */
+ if (gpio_is_mmp_type(gpio_type))
+ writel_relaxed(~0, c->regbase + ED_MASK_OFFSET);
}
- for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) {
+#ifdef CONFIG_ARCH_PXA
+ irq = gpio_to_irq(0);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler);
+
+ irq = gpio_to_irq(1);
+ irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
+ handle_edge_irq);
+ set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
+ irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler);
+#endif
+
+ for (irq = gpio_to_irq(gpio_offset);
+ irq <= gpio_to_irq(pxa_last_gpio); irq++) {
irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip,
handle_edge_irq);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
- /* Install handler for GPIO>=2 edge detect interrupts */
- irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler);
- pxa_muxed_gpio_chip.irq_set_wake = fn;
+ irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler);
+ return 0;
}
+static struct platform_driver pxa_gpio_driver = {
+ .probe = pxa_gpio_probe,
+ .driver = {
+ .name = "pxa-gpio",
+ },
+};
+
+static int __init pxa_gpio_init(void)
+{
+ return platform_driver_register(&pxa_gpio_driver);
+}
+postcore_initcall(pxa_gpio_init);
+
#ifdef CONFIG_PM
static int pxa_gpio_suspend(void)
{
@@ -301,13 +571,13 @@ static int pxa_gpio_suspend(void)
int gpio;
for_each_gpio_chip(gpio, c) {
- c->saved_gplr = __raw_readl(c->regbase + GPLR_OFFSET);
- c->saved_gpdr = __raw_readl(c->regbase + GPDR_OFFSET);
- c->saved_grer = __raw_readl(c->regbase + GRER_OFFSET);
- c->saved_gfer = __raw_readl(c->regbase + GFER_OFFSET);
+ c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET);
+ c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET);
+ c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET);
+ c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET);
/* Clear GPIO transition detect bits */
- __raw_writel(0xffffffff, c->regbase + GEDR_OFFSET);
+ writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET);
}
return 0;
}
@@ -319,12 +589,12 @@ static void pxa_gpio_resume(void)
for_each_gpio_chip(gpio, c) {
/* restore level with set/clear */
- __raw_writel( c->saved_gplr, c->regbase + GPSR_OFFSET);
- __raw_writel(~c->saved_gplr, c->regbase + GPCR_OFFSET);
+ writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET);
+ writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET);
- __raw_writel(c->saved_grer, c->regbase + GRER_OFFSET);
- __raw_writel(c->saved_gfer, c->regbase + GFER_OFFSET);
- __raw_writel(c->saved_gpdr, c->regbase + GPDR_OFFSET);
+ writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET);
+ writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET);
+ writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET);
}
}
#else
@@ -336,3 +606,10 @@ struct syscore_ops pxa_gpio_syscore_ops = {
.suspend = pxa_gpio_suspend,
.resume = pxa_gpio_resume,
};
+
+static int __init pxa_gpio_sysinit(void)
+{
+ register_syscore_ops(&pxa_gpio_syscore_ops);
+ return 0;
+}
+postcore_initcall(pxa_gpio_sysinit);
diff --git a/drivers/gpio/gpio-rdc321x.c b/drivers/gpio/gpio-rdc321x.c
index 2762698..e97016a 100644
--- a/drivers/gpio/gpio-rdc321x.c
+++ b/drivers/gpio/gpio-rdc321x.c
@@ -227,18 +227,7 @@ static struct platform_driver rdc321x_gpio_driver = {
.remove = __devexit_p(rdc321x_gpio_remove),
};
-static int __init rdc321x_gpio_init(void)
-{
- return platform_driver_register(&rdc321x_gpio_driver);
-}
-
-static void __exit rdc321x_gpio_exit(void)
-{
- platform_driver_unregister(&rdc321x_gpio_driver);
-}
-
-module_init(rdc321x_gpio_init);
-module_exit(rdc321x_gpio_exit);
+module_platform_driver(rdc321x_gpio_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x GPIO driver");
diff --git a/drivers/gpio/gpio-sa1100.c b/drivers/gpio/gpio-sa1100.c
index b6c1f6d..7eecf69 100644
--- a/drivers/gpio/gpio-sa1100.c
+++ b/drivers/gpio/gpio-sa1100.c
@@ -47,12 +47,18 @@ static int sa1100_direction_output(struct gpio_chip *chip, unsigned offset, int
return 0;
}
+static int sa1100_to_irq(struct gpio_chip *chip, unsigned offset)
+{
+ return offset < 11 ? (IRQ_GPIO0 + offset) : (IRQ_GPIO11 - 11 + offset);
+}
+
static struct gpio_chip sa1100_gpio_chip = {
.label = "gpio",
.direction_input = sa1100_direction_input,
.direction_output = sa1100_direction_output,
.set = sa1100_gpio_set,
.get = sa1100_gpio_get,
+ .to_irq = sa1100_to_irq,
.base = 0,
.ngpio = GPIO_MAX + 1,
};
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 8662518..0a79a11 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -22,8 +22,11 @@
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/interrupt.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_address.h>
#include <asm/irq.h>
@@ -230,7 +233,7 @@ static int samsung_gpio_setcfg_2bit(struct samsung_gpio_chip *chip,
* @chip: The gpio chip that is being configured.
* @off: The offset for the GPIO being configured.
*
- * The reverse of samsung_gpio_setcfg_2bit(). Will return a value whicg
+ * The reverse of samsung_gpio_setcfg_2bit(). Will return a value which
* could be directly passed back to samsung_gpio_setcfg_2bit(), from the
* S3C_GPIO_SPECIAL() macro.
*/
@@ -467,33 +470,42 @@ static struct samsung_gpio_cfg s5p64x0_gpio_cfg_rbank = {
#endif
static struct samsung_gpio_cfg samsung_gpio_cfgs[] = {
- {
+ [0] = {
.cfg_eint = 0x0,
- }, {
+ },
+ [1] = {
.cfg_eint = 0x3,
- }, {
+ },
+ [2] = {
.cfg_eint = 0x7,
- }, {
+ },
+ [3] = {
.cfg_eint = 0xF,
- }, {
+ },
+ [4] = {
.cfg_eint = 0x0,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [5] = {
.cfg_eint = 0x2,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [6] = {
.cfg_eint = 0x3,
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [7] = {
.set_config = samsung_gpio_setcfg_2bit,
.get_config = samsung_gpio_getcfg_2bit,
- }, {
+ },
+ [8] = {
.set_pull = exynos4_gpio_setpull,
.get_pull = exynos4_gpio_getpull,
- }, {
+ },
+ [9] = {
.cfg_eint = 0x3,
.set_pull = exynos4_gpio_setpull,
.get_pull = exynos4_gpio_getpull,
@@ -2374,6 +2386,66 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
#endif
};
+#if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF)
+static int exynos4_gpio_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
+{
+ unsigned int pin;
+
+ if (WARN_ON(gc->of_gpio_n_cells < 4))
+ return -EINVAL;
+
+ if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
+ return -EINVAL;
+
+ if (gpiospec->args[0] > gc->ngpio)
+ return -EINVAL;
+
+ pin = gc->base + gpiospec->args[0];
+
+ if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1])))
+ pr_warn("gpio_xlate: failed to set pin function\n");
+ if (s3c_gpio_setpull(pin, gpiospec->args[2]))
+ pr_warn("gpio_xlate: failed to set pin pull up/down\n");
+ if (s5p_gpio_set_drvstr(pin, gpiospec->args[3]))
+ pr_warn("gpio_xlate: failed to set pin drive strength\n");
+
+ return gpiospec->args[0];
+}
+
+static const struct of_device_id exynos4_gpio_dt_match[] __initdata = {
+ { .compatible = "samsung,exynos4-gpio", },
+ {}
+};
+
+static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
+{
+ struct gpio_chip *gc = &chip->chip;
+ u64 address;
+
+ if (!of_have_populated_dt())
+ return;
+
+ address = chip->base ? base + ((u32)chip->base & 0xfff) : base + offset;
+ gc->of_node = of_find_matching_node_by_address(NULL,
+ exynos4_gpio_dt_match, address);
+ if (!gc->of_node) {
+ pr_info("gpio: device tree node not found for gpio controller"
+ " with base address %08llx\n", address);
+ return;
+ }
+ gc->of_gpio_n_cells = 4;
+ gc->of_xlate = exynos4_gpio_xlate;
+}
+#elif defined(CONFIG_ARCH_EXYNOS4)
+static __init void exynos4_gpiolib_attach_ofnode(struct samsung_gpio_chip *chip,
+ u64 base, u64 offset)
+{
+ return;
+}
+#endif /* defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF) */
+
/* TODO: cleanup soc_is_* */
static __init int samsung_gpiolib_init(void)
{
@@ -2455,6 +2527,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO1, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_1, nr_chips, S5P_VA_GPIO1);
@@ -2467,6 +2543,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO2, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_2, nr_chips, S5P_VA_GPIO2);
@@ -2479,6 +2559,10 @@ static __init int samsung_gpiolib_init(void)
chip->config = &exynos4_gpio_cfg;
chip->group = group++;
}
+#ifdef CONFIG_CPU_EXYNOS4210
+ exynos4_gpiolib_attach_ofnode(chip,
+ EXYNOS4_PA_GPIO3, i * 0x20);
+#endif
}
samsung_gpiolib_add_4bit_chips(exynos4_gpios_3, nr_chips, S5P_VA_GPIO3);
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 1635158..8cadf4d 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -297,18 +297,7 @@ static struct platform_driver sch_gpio_driver = {
.remove = __devexit_p(sch_gpio_remove),
};
-static int __init sch_gpio_init(void)
-{
- return platform_driver_register(&sch_gpio_driver);
-}
-
-static void __exit sch_gpio_exit(void)
-{
- platform_driver_unregister(&sch_gpio_driver);
-}
-
-module_init(sch_gpio_init);
-module_exit(sch_gpio_exit);
+module_platform_driver(sch_gpio_driver);
MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
MODULE_DESCRIPTION("GPIO interface for Intel Poulsbo SCH");
diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
index 4c980b5..87a68a8 100644
--- a/drivers/gpio/gpio-stmpe.c
+++ b/drivers/gpio/gpio-stmpe.c
@@ -65,7 +65,14 @@ static void stmpe_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
u8 reg = stmpe->regs[which] - (offset / 8);
u8 mask = 1 << (offset % 8);
- stmpe_reg_write(stmpe, reg, mask);
+ /*
+ * Some variants have single register for gpio set/clear functionality.
+ * For them we need to write 0 to clear and 1 to set.
+ */
+ if (stmpe->regs[STMPE_IDX_GPSR_LSB] == stmpe->regs[STMPE_IDX_GPCR_LSB])
+ stmpe_set_bits(stmpe, reg, mask, val ? mask : 0);
+ else
+ stmpe_reg_write(stmpe, reg, mask);
}
static int stmpe_gpio_direction_output(struct gpio_chip *chip,
@@ -132,6 +139,10 @@ static int stmpe_gpio_irq_set_type(struct irq_data *d, unsigned int type)
if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
return -EINVAL;
+ /* STMPE801 doesn't have RE and FE registers */
+ if (stmpe_gpio->stmpe->partnum == STMPE801)
+ return 0;
+
if (type == IRQ_TYPE_EDGE_RISING)
stmpe_gpio->regs[REG_RE][regoffset] |= mask;
else
@@ -165,6 +176,11 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
int i, j;
for (i = 0; i < CACHE_NR_REGS; i++) {
+ /* STMPE801 doesn't have RE and FE registers */
+ if ((stmpe->partnum == STMPE801) &&
+ (i != REG_IE))
+ continue;
+
for (j = 0; j < num_banks; j++) {
u8 old = stmpe_gpio->oldregs[i][j];
u8 new = stmpe_gpio->regs[i][j];
@@ -241,8 +257,11 @@ static irqreturn_t stmpe_gpio_irq(int irq, void *dev)
}
stmpe_reg_write(stmpe, statmsbreg + i, status[i]);
- stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_GPEDR_MSB] + i,
- status[i]);
+
+ /* Edge detect register is not present on 801 */
+ if (stmpe->partnum != STMPE801)
+ stmpe_reg_write(stmpe, stmpe->regs[STMPE_IDX_GPEDR_MSB]
+ + i, status[i]);
}
return IRQ_HANDLED;
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index 61044c8..bdc2937 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -361,14 +361,7 @@ static int __devinit tegra_gpio_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res),
- dev_name(&pdev->dev))) {
- dev_err(&pdev->dev, "Couldn't request MEM resource\n");
- return -ENODEV;
- }
-
- regs = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ regs = devm_request_and_ioremap(&pdev->dev, res);
if (!regs) {
dev_err(&pdev->dev, "Couldn't ioremap regs\n");
return -ENODEV;
diff --git a/drivers/gpio/gpio-timberdale.c b/drivers/gpio/gpio-timberdale.c
index c593bd4..031c6ad 100644
--- a/drivers/gpio/gpio-timberdale.c
+++ b/drivers/gpio/gpio-timberdale.c
@@ -359,18 +359,7 @@ static struct platform_driver timbgpio_platform_driver = {
/*--------------------------------------------------------------------------*/
-static int __init timbgpio_init(void)
-{
- return platform_driver_register(&timbgpio_platform_driver);
-}
-
-static void __exit timbgpio_exit(void)
-{
- platform_driver_unregister(&timbgpio_platform_driver);
-}
-
-module_init(timbgpio_init);
-module_exit(timbgpio_exit);
+module_platform_driver(timbgpio_platform_driver);
MODULE_DESCRIPTION("Timberdale GPIO driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index b9c1c29..91f45b9 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -52,7 +52,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
/* Set the initial value */
- tps65910_gpio_set(gc, 0, value);
+ tps65910_gpio_set(gc, offset, value);
return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
GPIO_CFG_MASK);
diff --git a/drivers/gpio/gpio-ucb1400.c b/drivers/gpio/gpio-ucb1400.c
index 50e6bd1..26405ef 100644
--- a/drivers/gpio/gpio-ucb1400.c
+++ b/drivers/gpio/gpio-ucb1400.c
@@ -103,23 +103,12 @@ static struct platform_driver ucb1400_gpio_driver = {
},
};
-static int __init ucb1400_gpio_init(void)
-{
- return platform_driver_register(&ucb1400_gpio_driver);
-}
-
-static void __exit ucb1400_gpio_exit(void)
-{
- platform_driver_unregister(&ucb1400_gpio_driver);
-}
-
void __init ucb1400_gpio_set_data(struct ucb1400_gpio_data *data)
{
ucbdata = data;
}
-module_init(ucb1400_gpio_init);
-module_exit(ucb1400_gpio_exit);
+module_platform_driver(ucb1400_gpio_driver);
MODULE_DESCRIPTION("Philips UCB1400 GPIO driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
index 98723cb..82d5c20 100644
--- a/drivers/gpio/gpio-vr41xx.c
+++ b/drivers/gpio/gpio-vr41xx.c
@@ -571,15 +571,4 @@ static struct platform_driver giu_device_driver = {
},
};
-static int __init vr41xx_giu_init(void)
-{
- return platform_driver_register(&giu_device_driver);
-}
-
-static void __exit vr41xx_giu_exit(void)
-{
- platform_driver_unregister(&giu_device_driver);
-}
-
-module_init(vr41xx_giu_init);
-module_exit(vr41xx_giu_exit);
+module_platform_driver(giu_device_driver);
diff --git a/drivers/gpio/gpio-vx855.c b/drivers/gpio/gpio-vx855.c
index ef5aabd..76ebfe5 100644
--- a/drivers/gpio/gpio-vx855.c
+++ b/drivers/gpio/gpio-vx855.c
@@ -315,17 +315,7 @@ static struct platform_driver vx855gpio_driver = {
.remove = __devexit_p(vx855gpio_remove),
};
-static int vx855gpio_init(void)
-{
- return platform_driver_register(&vx855gpio_driver);
-}
-module_init(vx855gpio_init);
-
-static void vx855gpio_exit(void)
-{
- platform_driver_unregister(&vx855gpio_driver);
-}
-module_exit(vx855gpio_exit);
+module_platform_driver(vx855gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/gpio/gpio-wm8994.c b/drivers/gpio/gpio-wm8994.c
index 96198f3..92ea535 100644
--- a/drivers/gpio/gpio-wm8994.c
+++ b/drivers/gpio/gpio-wm8994.c
@@ -117,6 +117,60 @@ static int wm8994_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
#ifdef CONFIG_DEBUG_FS
+static const char *wm8994_gpio_fn(u16 fn)
+{
+ switch (fn) {
+ case WM8994_GP_FN_PIN_SPECIFIC:
+ return "pin-specific";
+ case WM8994_GP_FN_GPIO:
+ return "GPIO";
+ case WM8994_GP_FN_SDOUT:
+ return "SDOUT";
+ case WM8994_GP_FN_IRQ:
+ return "IRQ";
+ case WM8994_GP_FN_TEMPERATURE:
+ return "Temperature";
+ case WM8994_GP_FN_MICBIAS1_DET:
+ return "MICBIAS1 detect";
+ case WM8994_GP_FN_MICBIAS1_SHORT:
+ return "MICBIAS1 short";
+ case WM8994_GP_FN_MICBIAS2_DET:
+ return "MICBIAS2 detect";
+ case WM8994_GP_FN_MICBIAS2_SHORT:
+ return "MICBIAS2 short";
+ case WM8994_GP_FN_FLL1_LOCK:
+ return "FLL1 lock";
+ case WM8994_GP_FN_FLL2_LOCK:
+ return "FLL2 lock";
+ case WM8994_GP_FN_SRC1_LOCK:
+ return "SRC1 lock";
+ case WM8994_GP_FN_SRC2_LOCK:
+ return "SRC2 lock";
+ case WM8994_GP_FN_DRC1_ACT:
+ return "DRC1 activity";
+ case WM8994_GP_FN_DRC2_ACT:
+ return "DRC2 activity";
+ case WM8994_GP_FN_DRC3_ACT:
+ return "DRC3 activity";
+ case WM8994_GP_FN_WSEQ_STATUS:
+ return "Write sequencer";
+ case WM8994_GP_FN_FIFO_ERROR:
+ return "FIFO error";
+ case WM8994_GP_FN_OPCLK:
+ return "OPCLK";
+ case WM8994_GP_FN_THW:
+ return "Thermal warning";
+ case WM8994_GP_FN_DCS_DONE:
+ return "DC servo";
+ case WM8994_GP_FN_FLL1_OUT:
+ return "FLL1 output";
+ case WM8994_GP_FN_FLL2_OUT:
+ return "FLL1 output";
+ default:
+ return "Unknown";
+ }
+}
+
static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
struct wm8994_gpio *wm8994_gpio = to_wm8994_gpio(chip);
@@ -148,8 +202,29 @@ static void wm8994_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
continue;
}
- /* No decode yet; note that GPIO2 is special */
- seq_printf(s, "(%x)\n", reg);
+ if (reg & WM8994_GPN_DIR)
+ seq_printf(s, "in ");
+ else
+ seq_printf(s, "out ");
+
+ if (reg & WM8994_GPN_PU)
+ seq_printf(s, "pull up ");
+
+ if (reg & WM8994_GPN_PD)
+ seq_printf(s, "pull down ");
+
+ if (reg & WM8994_GPN_POL)
+ seq_printf(s, "inverted ");
+ else
+ seq_printf(s, "noninverted ");
+
+ if (reg & WM8994_GPN_OP_CFG)
+ seq_printf(s, "open drain ");
+ else
+ seq_printf(s, "CMOS ");
+
+ seq_printf(s, "%s (%x)\n",
+ wm8994_gpio_fn(reg & WM8994_GPN_FN_MASK), reg);
}
}
#else
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index 0ce6ac9..79b0fe8 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -206,7 +206,6 @@ static int __devinit xgpio_of_probe(struct device_node *np)
np->full_name, status);
return status;
}
- pr_info("XGpio: %s: registered\n", np->full_name);
return 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index a971e3d..17fdf4b 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -114,7 +114,7 @@ static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
-static inline struct gpio_chip *gpio_to_chip(unsigned gpio)
+struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpio_desc[gpio].chip;
}
@@ -1089,6 +1089,10 @@ unlock:
if (status)
goto fail;
+ pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
+ chip->base, chip->base + chip->ngpio - 1,
+ chip->label ? : "generic");
+
return 0;
fail:
/* failures here can mean systems won't boot... */
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 1368826..2418429 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -162,3 +162,6 @@ config DRM_SAVAGE
source "drivers/gpu/drm/exynos/Kconfig"
source "drivers/gpu/drm/vmwgfx/Kconfig"
+
+source "drivers/gpu/drm/gma500/Kconfig"
+
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c0496f6..0cde1b8 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -9,7 +9,7 @@ drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
- drm_platform.o drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
+ drm_platform.o drm_sysfs.o drm_hashtab.o drm_mm.o \
drm_crtc.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o drm_encoder_slave.o \
drm_trace_points.o drm_global.o drm_usb.o
@@ -36,4 +36,5 @@ obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
obj-$(CONFIG_DRM_VIA) +=via/
obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
obj-$(CONFIG_DRM_EXYNOS) +=exynos/
+obj-$(CONFIG_DRM_GMA500) += gma500/
obj-y += i2c/
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index 3f46772..ba23790 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
{
struct drm_magic_entry *pt;
struct drm_hash_item *hash;
@@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
* If there is a magic number in drm_file::magic then use it, otherwise
* searches an unique non-zero magic number and add it associating it with \p
* file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
@@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
* \return zero if authentication successed, or a negative number otherwise.
*
* Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
*/
int drm_authmagic(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 6d440fb..325365f 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -154,8 +154,6 @@ int drm_getsareactx(struct drm_device *dev, void *data,
return -EINVAL;
}
- mutex_unlock(&dev->struct_mutex);
-
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
@@ -164,6 +162,9 @@ int drm_getsareactx(struct drm_device *dev, void *data,
break;
}
}
+
+ mutex_unlock(&dev->struct_mutex);
+
if (request->handle == NULL)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 8323fc3..5e818a8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -36,6 +36,7 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_edid.h"
+#include "drm_fourcc.h"
struct drm_prop_enum_list {
int type;
@@ -324,6 +325,7 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
{
struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
+ struct drm_plane *plane;
struct drm_mode_set set;
int ret;
@@ -340,6 +342,18 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb)
}
}
+ list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
+ if (plane->fb == fb) {
+ /* should turn off the crtc */
+ ret = plane->funcs->disable_plane(plane);
+ if (ret)
+ DRM_ERROR("failed to disable plane with busy fb\n");
+ /* disconnect the plane from the fb and crtc: */
+ plane->fb = NULL;
+ plane->crtc = NULL;
+ }
+ }
+
drm_mode_object_put(dev, &fb->base);
list_del(&fb->head);
dev->mode_config.num_fb--;
@@ -540,6 +554,63 @@ void drm_encoder_cleanup(struct drm_encoder *encoder)
}
EXPORT_SYMBOL(drm_encoder_cleanup);
+int drm_plane_init(struct drm_device *dev, struct drm_plane *plane,
+ unsigned long possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, uint32_t format_count,
+ bool priv)
+{
+ mutex_lock(&dev->mode_config.mutex);
+
+ plane->dev = dev;
+ drm_mode_object_get(dev, &plane->base, DRM_MODE_OBJECT_PLANE);
+ plane->funcs = funcs;
+ plane->format_types = kmalloc(sizeof(uint32_t) * format_count,
+ GFP_KERNEL);
+ if (!plane->format_types) {
+ DRM_DEBUG_KMS("out of memory when allocating plane\n");
+ drm_mode_object_put(dev, &plane->base);
+ mutex_unlock(&dev->mode_config.mutex);
+ return -ENOMEM;
+ }
+
+ memcpy(plane->format_types, formats, format_count * sizeof(uint32_t));
+ plane->format_count = format_count;
+ plane->possible_crtcs = possible_crtcs;
+
+ /* private planes are not exposed to userspace, but depending on
+ * display hardware, might be convenient to allow sharing programming
+ * for the scanout engine with the crtc implementation.
+ */
+ if (!priv) {
+ list_add_tail(&plane->head, &dev->mode_config.plane_list);
+ dev->mode_config.num_plane++;
+ } else {
+ INIT_LIST_HEAD(&plane->head);
+ }
+
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_init);
+
+void drm_plane_cleanup(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+
+ mutex_lock(&dev->mode_config.mutex);
+ kfree(plane->format_types);
+ drm_mode_object_put(dev, &plane->base);
+ /* if not added to a list, it must be a private plane */
+ if (!list_empty(&plane->head)) {
+ list_del(&plane->head);
+ dev->mode_config.num_plane--;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+}
+EXPORT_SYMBOL(drm_plane_cleanup);
+
/**
* drm_mode_create - create a new display mode
* @dev: DRM device
@@ -871,6 +942,7 @@ void drm_mode_config_init(struct drm_device *dev)
INIT_LIST_HEAD(&dev->mode_config.encoder_list);
INIT_LIST_HEAD(&dev->mode_config.property_list);
INIT_LIST_HEAD(&dev->mode_config.property_blob_list);
+ INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
mutex_lock(&dev->mode_config.mutex);
@@ -947,6 +1019,7 @@ void drm_mode_config_cleanup(struct drm_device *dev)
struct drm_encoder *encoder, *enct;
struct drm_framebuffer *fb, *fbt;
struct drm_property *property, *pt;
+ struct drm_plane *plane, *plt;
list_for_each_entry_safe(encoder, enct, &dev->mode_config.encoder_list,
head) {
@@ -971,6 +1044,10 @@ void drm_mode_config_cleanup(struct drm_device *dev)
crtc->funcs->destroy(crtc);
}
+ list_for_each_entry_safe(plane, plt, &dev->mode_config.plane_list,
+ head) {
+ plane->funcs->destroy(plane);
+ }
}
EXPORT_SYMBOL(drm_mode_config_cleanup);
@@ -1379,7 +1456,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
*/
if ((out_resp->count_modes >= mode_count) && mode_count) {
copied = 0;
- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
list_for_each_entry(mode, &connector->modes, head) {
drm_crtc_convert_to_umode(&u_mode, mode);
if (copy_to_user(mode_ptr + copied,
@@ -1394,8 +1471,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if ((out_resp->count_props >= props_count) && props_count) {
copied = 0;
- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
if (connector->property_ids[i] != 0) {
if (put_user(connector->property_ids[i],
@@ -1417,7 +1494,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
copied = 0;
- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
if (connector->encoder_ids[i] != 0) {
if (put_user(connector->encoder_ids[i],
@@ -1471,6 +1548,245 @@ out:
}
/**
+ * drm_mode_getplane_res - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return an plane count and set of IDs.
+ */
+int drm_mode_getplane_res(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane_res *plane_resp = data;
+ struct drm_mode_config *config;
+ struct drm_plane *plane;
+ uint32_t __user *plane_ptr;
+ int copied = 0, ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ config = &dev->mode_config;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (config->num_plane &&
+ (plane_resp->count_planes >= config->num_plane)) {
+ plane_ptr = (uint32_t __user *)(unsigned long)plane_resp->plane_id_ptr;
+
+ list_for_each_entry(plane, &config->plane_list, head) {
+ if (put_user(plane->base.id, plane_ptr + copied)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ copied++;
+ }
+ }
+ plane_resp->count_planes = config->num_plane;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_getplane - get plane info
+ * @dev: DRM device
+ * @data: ioctl data
+ * @file_priv: DRM file info
+ *
+ * Return plane info, including formats supported, gamma size, any
+ * current fb, etc.
+ */
+int drm_mode_getplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_get_plane *plane_resp = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ uint32_t __user *format_ptr;
+ int ret = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+ obj = drm_mode_object_find(dev, plane_resp->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ if (plane->crtc)
+ plane_resp->crtc_id = plane->crtc->base.id;
+ else
+ plane_resp->crtc_id = 0;
+
+ if (plane->fb)
+ plane_resp->fb_id = plane->fb->base.id;
+ else
+ plane_resp->fb_id = 0;
+
+ plane_resp->plane_id = plane->base.id;
+ plane_resp->possible_crtcs = plane->possible_crtcs;
+ plane_resp->gamma_size = plane->gamma_size;
+
+ /*
+ * This ioctl is called twice, once to determine how much space is
+ * needed, and the 2nd time to fill it.
+ */
+ if (plane->format_count &&
+ (plane_resp->count_format_types >= plane->format_count)) {
+ format_ptr = (uint32_t __user *)(unsigned long)plane_resp->format_type_ptr;
+ if (copy_to_user(format_ptr,
+ plane->format_types,
+ sizeof(uint32_t) * plane->format_count)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ }
+ plane_resp->count_format_types = plane->format_count;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+/**
+ * drm_mode_setplane - set up or tear down an plane
+ * @dev: DRM device
+ * @data: ioctl data*
+ * @file_prive: DRM file info
+ *
+ * Set plane info, including placement, fb, scaling, and other factors.
+ * Or pass a NULL fb to disable.
+ */
+int drm_mode_setplane(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_mode_set_plane *plane_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+ unsigned int fb_width, fb_height;
+ int i;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /*
+ * First, find the plane, crtc, and fb objects. If not available,
+ * we don't bother to call the driver.
+ */
+ obj = drm_mode_object_find(dev, plane_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ plane_req->plane_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ plane = obj_to_plane(obj);
+
+ /* No fb means shut it down */
+ if (!plane_req->fb_id) {
+ plane->funcs->disable_plane(plane);
+ plane->crtc = NULL;
+ plane->fb = NULL;
+ goto out;
+ }
+
+ obj = drm_mode_object_find(dev, plane_req->crtc_id,
+ DRM_MODE_OBJECT_CRTC);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown crtc ID %d\n",
+ plane_req->crtc_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ crtc = obj_to_crtc(obj);
+
+ obj = drm_mode_object_find(dev, plane_req->fb_id,
+ DRM_MODE_OBJECT_FB);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown framebuffer ID %d\n",
+ plane_req->fb_id);
+ ret = -ENOENT;
+ goto out;
+ }
+ fb = obj_to_fb(obj);
+
+ /* Check whether this plane supports the fb pixel format. */
+ for (i = 0; i < plane->format_count; i++)
+ if (fb->pixel_format == plane->format_types[i])
+ break;
+ if (i == plane->format_count) {
+ DRM_DEBUG_KMS("Invalid pixel format 0x%08x\n", fb->pixel_format);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ fb_width = fb->width << 16;
+ fb_height = fb->height << 16;
+
+ /* Make sure source coordinates are inside the fb. */
+ if (plane_req->src_w > fb_width ||
+ plane_req->src_x > fb_width - plane_req->src_w ||
+ plane_req->src_h > fb_height ||
+ plane_req->src_y > fb_height - plane_req->src_h) {
+ DRM_DEBUG_KMS("Invalid source coordinates "
+ "%u.%06ux%u.%06u+%u.%06u+%u.%06u\n",
+ plane_req->src_w >> 16,
+ ((plane_req->src_w & 0xffff) * 15625) >> 10,
+ plane_req->src_h >> 16,
+ ((plane_req->src_h & 0xffff) * 15625) >> 10,
+ plane_req->src_x >> 16,
+ ((plane_req->src_x & 0xffff) * 15625) >> 10,
+ plane_req->src_y >> 16,
+ ((plane_req->src_y & 0xffff) * 15625) >> 10);
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ /* Give drivers some help against integer overflows */
+ if (plane_req->crtc_w > INT_MAX ||
+ plane_req->crtc_x > INT_MAX - (int32_t) plane_req->crtc_w ||
+ plane_req->crtc_h > INT_MAX ||
+ plane_req->crtc_y > INT_MAX - (int32_t) plane_req->crtc_h) {
+ DRM_DEBUG_KMS("Invalid CRTC coordinates %ux%u+%d+%d\n",
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->crtc_x, plane_req->crtc_y);
+ ret = -ERANGE;
+ goto out;
+ }
+
+ ret = plane->funcs->update_plane(plane, crtc, fb,
+ plane_req->crtc_x, plane_req->crtc_y,
+ plane_req->crtc_w, plane_req->crtc_h,
+ plane_req->src_x, plane_req->src_y,
+ plane_req->src_w, plane_req->src_h);
+ if (!ret) {
+ plane->crtc = crtc;
+ plane->fb = fb;
+ }
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+
+ return ret;
+}
+
+/**
* drm_mode_setcrtc - set CRTC configuration
* @inode: inode from the ioctl
* @filp: file * from the ioctl
@@ -1576,7 +1892,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
}
for (i = 0; i < crtc_req->count_connectors; i++) {
- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
if (get_user(out_id, &set_connectors_ptr[i])) {
ret = -EFAULT;
goto out;
@@ -1625,10 +1941,8 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
- if (!req->flags) {
- DRM_ERROR("no operation set\n");
+ if (!req->flags)
return -EINVAL;
- }
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, req->crtc_id, DRM_MODE_OBJECT_CRTC);
@@ -1641,7 +1955,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (req->flags & DRM_MODE_CURSOR_BO) {
if (!crtc->funcs->cursor_set) {
- DRM_ERROR("crtc does not support cursor\n");
ret = -ENXIO;
goto out;
}
@@ -1654,7 +1967,6 @@ int drm_mode_cursor_ioctl(struct drm_device *dev,
if (crtc->funcs->cursor_move) {
ret = crtc->funcs->cursor_move(crtc, req->x, req->y);
} else {
- DRM_ERROR("crtc does not support cursor\n");
ret = -EFAULT;
goto out;
}
@@ -1664,6 +1976,42 @@ out:
return ret;
}
+/* Original addfb only supported RGB formats, so figure out which one */
+uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth)
+{
+ uint32_t fmt;
+
+ switch (bpp) {
+ case 8:
+ fmt = DRM_FORMAT_RGB332;
+ break;
+ case 16:
+ if (depth == 15)
+ fmt = DRM_FORMAT_XRGB1555;
+ else
+ fmt = DRM_FORMAT_RGB565;
+ break;
+ case 24:
+ fmt = DRM_FORMAT_RGB888;
+ break;
+ case 32:
+ if (depth == 24)
+ fmt = DRM_FORMAT_XRGB8888;
+ else if (depth == 30)
+ fmt = DRM_FORMAT_XRGB2101010;
+ else
+ fmt = DRM_FORMAT_ARGB8888;
+ break;
+ default:
+ DRM_ERROR("bad bpp, assuming x8r8g8b8 pixel format\n");
+ fmt = DRM_FORMAT_XRGB8888;
+ break;
+ }
+
+ return fmt;
+}
+EXPORT_SYMBOL(drm_mode_legacy_fb_format);
+
/**
* drm_mode_addfb - add an FB to the graphics configuration
* @inode: inode from the ioctl
@@ -1684,7 +2032,140 @@ out:
int drm_mode_addfb(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
- struct drm_mode_fb_cmd *r = data;
+ struct drm_mode_fb_cmd *or = data;
+ struct drm_mode_fb_cmd2 r = {};
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_framebuffer *fb;
+ int ret = 0;
+
+ /* Use new struct with format internally */
+ r.fb_id = or->fb_id;
+ r.width = or->width;
+ r.height = or->height;
+ r.pitches[0] = or->pitch;
+ r.pixel_format = drm_mode_legacy_fb_format(or->bpp, or->depth);
+ r.handles[0] = or->handle;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if ((config->min_width > r.width) || (r.width > config->max_width))
+ return -EINVAL;
+
+ if ((config->min_height > r.height) || (r.height > config->max_height))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ /* TODO check buffer is sufficiently large */
+ /* TODO setup destructor callback */
+
+ fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
+ if (IS_ERR(fb)) {
+ DRM_ERROR("could not create framebuffer\n");
+ ret = PTR_ERR(fb);
+ goto out;
+ }
+
+ or->fb_id = fb->base.id;
+ list_add(&fb->filp_head, &file_priv->fbs);
+ DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id);
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static int format_check(struct drm_mode_fb_cmd2 *r)
+{
+ uint32_t format = r->pixel_format & ~DRM_FORMAT_BIG_ENDIAN;
+
+ switch (format) {
+ case DRM_FORMAT_C8:
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ case DRM_FORMAT_XRGB4444:
+ case DRM_FORMAT_XBGR4444:
+ case DRM_FORMAT_RGBX4444:
+ case DRM_FORMAT_BGRX4444:
+ case DRM_FORMAT_ARGB4444:
+ case DRM_FORMAT_ABGR4444:
+ case DRM_FORMAT_RGBA4444:
+ case DRM_FORMAT_BGRA4444:
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_VYUY:
+ case DRM_FORMAT_AYUV:
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * drm_mode_addfb2 - add an FB to the graphics configuration
+ * @inode: inode from the ioctl
+ * @filp: file * from the ioctl
+ * @cmd: cmd from ioctl
+ * @arg: arg from ioctl
+ *
+ * LOCKING:
+ * Takes mode config lock.
+ *
+ * Add a new FB to the specified CRTC, given a user request with format.
+ *
+ * Called by the user via ioctl.
+ *
+ * RETURNS:
+ * Zero on success, errno on failure.
+ */
+int drm_mode_addfb2(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
int ret = 0;
@@ -1693,18 +2174,23 @@ int drm_mode_addfb(struct drm_device *dev,
return -EINVAL;
if ((config->min_width > r->width) || (r->width > config->max_width)) {
- DRM_ERROR("mode new framebuffer width not within limits\n");
+ DRM_ERROR("bad framebuffer width %d, should be >= %d && <= %d\n",
+ r->width, config->min_width, config->max_width);
return -EINVAL;
}
if ((config->min_height > r->height) || (r->height > config->max_height)) {
- DRM_ERROR("mode new framebuffer height not within limits\n");
+ DRM_ERROR("bad framebuffer height %d, should be >= %d && <= %d\n",
+ r->height, config->min_height, config->max_height);
return -EINVAL;
}
- mutex_lock(&dev->mode_config.mutex);
+ ret = format_check(r);
+ if (ret) {
+ DRM_ERROR("bad framebuffer format 0x%08x\n", r->pixel_format);
+ return ret;
+ }
- /* TODO check buffer is sufficiently large */
- /* TODO setup destructor callback */
+ mutex_lock(&dev->mode_config.mutex);
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
@@ -1756,7 +2242,6 @@ int drm_mode_rmfb(struct drm_device *dev,
obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB);
/* TODO check that we really get a framebuffer back. */
if (!obj) {
- DRM_ERROR("mode invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
@@ -1767,7 +2252,6 @@ int drm_mode_rmfb(struct drm_device *dev,
found = 1;
if (!found) {
- DRM_ERROR("tried to remove a fb that we didn't own\n");
ret = -EINVAL;
goto out;
}
@@ -1814,7 +2298,6 @@ int drm_mode_getfb(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out;
}
@@ -1824,7 +2307,7 @@ int drm_mode_getfb(struct drm_device *dev,
r->width = fb->width;
r->depth = fb->depth;
r->bpp = fb->bits_per_pixel;
- r->pitch = fb->pitch;
+ r->pitch = fb->pitches[0];
fb->funcs->create_handle(fb, file_priv, &r->handle);
out:
@@ -1850,14 +2333,13 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
mutex_lock(&dev->mode_config.mutex);
obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
if (!obj) {
- DRM_ERROR("invalid framebuffer id\n");
ret = -EINVAL;
goto out_err1;
}
fb = obj_to_fb(obj);
num_clips = r->num_clips;
- clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+ clips_ptr = (struct drm_clip_rect __user *)(unsigned long)r->clips_ptr;
if (!num_clips != !clips_ptr) {
ret = -EINVAL;
@@ -2253,7 +2735,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
struct drm_property_enum *prop_enum;
struct drm_mode_property_enum __user *enum_ptr;
struct drm_property_blob *prop_blob;
- uint32_t *blob_id_ptr;
+ uint32_t __user *blob_id_ptr;
uint64_t __user *values_ptr;
uint32_t __user *blob_length_ptr;
@@ -2283,7 +2765,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
out_resp->flags = property->flags;
if ((out_resp->count_values >= value_count) && value_count) {
- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
for (i = 0; i < value_count; i++) {
if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
ret = -EFAULT;
@@ -2296,7 +2778,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (property->flags & DRM_MODE_PROP_ENUM) {
if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
copied = 0;
- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
@@ -2318,8 +2800,8 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
if (property->flags & DRM_MODE_PROP_BLOB) {
if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
copied = 0;
- blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
+ blob_id_ptr = (uint32_t __user *)(unsigned long)out_resp->enum_blob_ptr;
+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
@@ -2380,7 +2862,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
struct drm_mode_get_blob *out_resp = data;
struct drm_property_blob *blob;
int ret = 0;
- void *blob_ptr;
+ void __user *blob_ptr;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
@@ -2394,7 +2876,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
blob = obj_to_blob(obj);
if (out_resp->length == blob->length) {
- blob_ptr = (void *)(unsigned long)out_resp->data;
+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
if (copy_to_user(blob_ptr, blob->data, blob->length)){
ret = -EFAULT;
goto done;
@@ -2788,3 +3270,71 @@ int drm_mode_destroy_dumb_ioctl(struct drm_device *dev,
return dev->driver->dumb_destroy(file_priv, dev, args->handle);
}
+
+/*
+ * Just need to support RGB formats here for compat with code that doesn't
+ * use pixel formats directly yet.
+ */
+void drm_fb_get_bpp_depth(uint32_t format, unsigned int *depth,
+ int *bpp)
+{
+ switch (format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_BGR233:
+ *depth = 8;
+ *bpp = 8;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ case DRM_FORMAT_XBGR1555:
+ case DRM_FORMAT_RGBX5551:
+ case DRM_FORMAT_BGRX5551:
+ case DRM_FORMAT_ARGB1555:
+ case DRM_FORMAT_ABGR1555:
+ case DRM_FORMAT_RGBA5551:
+ case DRM_FORMAT_BGRA5551:
+ *depth = 15;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_BGR565:
+ *depth = 16;
+ *bpp = 16;
+ break;
+ case DRM_FORMAT_RGB888:
+ case DRM_FORMAT_BGR888:
+ *depth = 24;
+ *bpp = 24;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_BGRX8888:
+ *depth = 24;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_XBGR2101010:
+ case DRM_FORMAT_RGBX1010102:
+ case DRM_FORMAT_BGRX1010102:
+ case DRM_FORMAT_ARGB2101010:
+ case DRM_FORMAT_ABGR2101010:
+ case DRM_FORMAT_RGBA1010102:
+ case DRM_FORMAT_BGRA1010102:
+ *depth = 30;
+ *bpp = 32;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_ABGR8888:
+ case DRM_FORMAT_RGBA8888:
+ case DRM_FORMAT_BGRA8888:
+ *depth = 32;
+ *bpp = 32;
+ break;
+ default:
+ DRM_DEBUG_KMS("unsupported pixel format\n");
+ *depth = 0;
+ *bpp = 0;
+ break;
+ }
+}
+EXPORT_SYMBOL(drm_fb_get_bpp_depth);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index d2619d7..84a4a80 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm_crtc.h"
+#include "drm_fourcc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
@@ -710,7 +711,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
for (i = 0; i < set->num_connectors; i++) {
DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
drm_get_connector_name(set->connectors[i]));
- set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON);
}
}
drm_helper_disable_unused_functions(dev);
@@ -847,13 +848,19 @@ void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
EXPORT_SYMBOL(drm_helper_connector_dpms);
int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
+ int i;
+
fb->width = mode_cmd->width;
fb->height = mode_cmd->height;
- fb->pitch = mode_cmd->pitch;
- fb->bits_per_pixel = mode_cmd->bpp;
- fb->depth = mode_cmd->depth;
+ for (i = 0; i < 4; i++) {
+ fb->pitches[i] = mode_cmd->pitches[i];
+ fb->offsets[i] = mode_cmd->offsets[i];
+ }
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &fb->depth,
+ &fb->bits_per_pixel);
+ fb->pixel_format = mode_cmd->pixel_format;
return 0;
}
@@ -1008,3 +1015,36 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
queue_delayed_work(system_nrt_wq, &dev->mode_config.output_poll_work, 0);
}
EXPORT_SYMBOL(drm_helper_hpd_irq_event);
+
+
+/**
+ * drm_format_num_planes - get the number of planes for format
+ * @format: pixel format (DRM_FORMAT_*)
+ *
+ * RETURNS:
+ * The number of planes used by the specified pixel format.
+ */
+int drm_format_num_planes(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_YUV410:
+ case DRM_FORMAT_YVU410:
+ case DRM_FORMAT_YUV411:
+ case DRM_FORMAT_YVU411:
+ case DRM_FORMAT_YUV420:
+ case DRM_FORMAT_YVU420:
+ case DRM_FORMAT_YUV422:
+ case DRM_FORMAT_YVU422:
+ case DRM_FORMAT_YUV444:
+ case DRM_FORMAT_YVU444:
+ return 3;
+ case DRM_FORMAT_NV12:
+ case DRM_FORMAT_NV21:
+ case DRM_FORMAT_NV16:
+ case DRM_FORMAT_NV61:
+ return 2;
+ default:
+ return 1;
+ }
+}
+EXPORT_SYMBOL(drm_format_num_planes);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 40c187c..ebf7d3f 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -61,14 +61,14 @@ static int drm_version(struct drm_device *dev, void *data,
/** Ioctl table */
static struct drm_ioctl_desc drm_ioctls[] = {
- DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CAP, drm_getcap, DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
@@ -136,8 +136,11 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANERESOURCES, drm_mode_getplane_res, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPLANE, drm_mode_getplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPLANE, drm_mode_setplane, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
@@ -150,6 +153,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 3e927ce..ece03fc 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -508,25 +508,10 @@ static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
int i, n = 0;
- u8 rev = ext[0x01], d = ext[0x02];
+ u8 d = ext[0x02];
u8 *det_base = ext + d;
- switch (rev) {
- case 0:
- /* can't happen */
- return;
- case 1:
- /* have to infer how many blocks we have, check pixel clock */
- for (i = 0; i < 6; i++)
- if (det_base[18*i] || det_base[18*i+1])
- n++;
- break;
- default:
- /* explicit count */
- n = min(ext[0x03] & 0x0f, 6);
- break;
- }
-
+ n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
}
@@ -1319,6 +1304,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define HDMI_IDENTIFIER 0x000C03
#define AUDIO_BLOCK 0x01
+#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define EDID_BASIC_AUDIO (1 << 6)
@@ -1349,6 +1335,47 @@ u8 *drm_find_cea_extension(struct edid *edid)
}
EXPORT_SYMBOL(drm_find_cea_extension);
+static int
+do_cea_modes (struct drm_connector *connector, u8 *db, u8 len)
+{
+ struct drm_device *dev = connector->dev;
+ u8 * mode, cea_mode;
+ int modes = 0;
+
+ for (mode = db; mode < db + len; mode++) {
+ cea_mode = (*mode & 127) - 1; /* CEA modes are numbered 1..127 */
+ if (cea_mode < drm_num_cea_modes) {
+ struct drm_display_mode *newmode;
+ newmode = drm_mode_duplicate(dev,
+ &edid_cea_modes[cea_mode]);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ modes++;
+ }
+ }
+ }
+
+ return modes;
+}
+
+static int
+add_cea_modes(struct drm_connector *connector, struct edid *edid)
+{
+ u8 * cea = drm_find_cea_extension(edid);
+ u8 * db, dbl;
+ int modes = 0;
+
+ if (cea && cea[1] >= 3) {
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+ if (((db[0] & 0xe0) >> 5) == VIDEO_BLOCK)
+ modes += do_cea_modes (connector, db+1, dbl);
+ }
+ }
+
+ return modes;
+}
+
static void
parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
{
@@ -1432,26 +1459,29 @@ void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
eld[18] = edid->prod_code[0];
eld[19] = edid->prod_code[1];
- for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
- dbl = db[0] & 0x1f;
-
- switch ((db[0] & 0xe0) >> 5) {
- case AUDIO_BLOCK: /* Audio Data Block, contains SADs */
- sad_count = dbl / 3;
- memcpy(eld + 20 + mnl, &db[1], dbl);
- break;
- case SPEAKER_BLOCK: /* Speaker Allocation Data Block */
- eld[7] = db[1];
- break;
- case VENDOR_BLOCK:
- /* HDMI Vendor-Specific Data Block */
- if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
- parse_hdmi_vsdb(connector, db);
- break;
- default:
- break;
+ if (cea[1] >= 3)
+ for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+ dbl = db[0] & 0x1f;
+
+ switch ((db[0] & 0xe0) >> 5) {
+ case AUDIO_BLOCK:
+ /* Audio Data Block, contains SADs */
+ sad_count = dbl / 3;
+ memcpy(eld + 20 + mnl, &db[1], dbl);
+ break;
+ case SPEAKER_BLOCK:
+ /* Speaker Allocation Data Block */
+ eld[7] = db[1];
+ break;
+ case VENDOR_BLOCK:
+ /* HDMI Vendor-Specific Data Block */
+ if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+ parse_hdmi_vsdb(connector, db);
+ break;
+ default:
+ break;
+ }
}
- }
eld[5] |= sad_count << 4;
eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
@@ -1722,6 +1752,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
num_modes += add_standard_modes(connector, edid);
num_modes += add_established_modes(connector, edid);
num_modes += add_inferred_modes(connector, edid);
+ num_modes += add_cea_modes(connector, edid);
if (quirks & (EDID_QUIRK_PREFER_LARGE_60 | EDID_QUIRK_PREFER_LARGE_75))
edid_fixup_preferred(connector, quirks);
diff --git a/drivers/gpu/drm/drm_edid_modes.h b/drivers/gpu/drm/drm_edid_modes.h
index 5f206448..a91ffb1 100644
--- a/drivers/gpu/drm/drm_edid_modes.h
+++ b/drivers/gpu/drm/drm_edid_modes.h
@@ -378,3 +378,287 @@ static const struct {
{ 1920, 1440, 75, 0 },
};
static const int num_est3_modes = sizeof(est3_modes) / sizeof(est3_modes[0]);
+
+/*
+ * Probably taken from CEA-861 spec.
+ * This table is converted from xorg's hw/xfree86/modes/xf86EdidModes.c.
+ */
+static const struct drm_display_mode edid_cea_modes[] = {
+ /* 640x480@60Hz */
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+ 752, 800, 0, 480, 490, 492, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@60Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 27000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x720@60Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080i@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x240@60Hz */
+ { DRM_MODE("1440x240", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1478,
+ 1602, 1716, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x480i@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x480i@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x240@60Hz */
+ { DRM_MODE("2880x240", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2956,
+ 3204, 3432, 0, 240, 244, 247, 262, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480@60Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1472,
+ 1596, 1716, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080@60Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@50Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 27000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1280x720@50Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x288@50Hz */
+ { DRM_MODE("1440x288", DRM_MODE_TYPE_DRIVER, 27000, 1440, 1464,
+ 1590, 1728, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576i@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x576i@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x288@50Hz */
+ { DRM_MODE("2880x288", DRM_MODE_TYPE_DRIVER, 54000, 2880, 2928,
+ 3180, 3456, 0, 288, 290, 293, 312, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576@50Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1592, 1728, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@24Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2558,
+ 2602, 2750, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@25Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@30Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 74250, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x480@60Hz */
+ { DRM_MODE("2880x480", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2944,
+ 3192, 3432, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 2880x576@50Hz */
+ { DRM_MODE("2880x576", DRM_MODE_TYPE_DRIVER, 108000, 2880, 2928,
+ 3184, 3456, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080i@50Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 72000, 1920, 1952,
+ 2120, 2304, 0, 1080, 1126, 1136, 1250, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1920x1080i@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@100Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1720,
+ 1760, 1980, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@100Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 54000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@100Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1920x1080i@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@120Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 148500, 1280, 1390,
+ 1430, 1650, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@120Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 54000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480i@120Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@120Hz */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 54000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x576@200Hz */
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 108000, 720, 732,
+ 796, 864, 0, 576, 581, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x576i@200Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x576i@200Hz */
+ { DRM_MODE("1440x576", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1464,
+ 1590, 1728, 0, 576, 580, 586, 625, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 720x480@240Hz */
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 108000, 720, 736,
+ 798, 858, 0, 480, 489, 495, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+ /* 1440x480i@240 */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1440x480i@240 */
+ { DRM_MODE("1440x480", DRM_MODE_TYPE_DRIVER, 108000, 1440, 1478,
+ 1602, 1716, 0, 480, 488, 494, 525, 0,
+ DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC |
+ DRM_MODE_FLAG_INTERLACE) },
+ /* 1280x720@24Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 59400, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x720@25Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3700,
+ 3740, 3960, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1280x720@30Hz */
+ { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 74250, 1280, 3040,
+ 3080, 3300, 0, 720, 725, 730, 750, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@120Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2008,
+ 2052, 2200, 0, 1080, 1084, 1089, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ /* 1920x1080@100Hz */
+ { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 297000, 1920, 2448,
+ 2492, 2640, 0, 1080, 1084, 1094, 1125, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+static const int drm_num_cea_modes =
+ sizeof (edid_cea_modes) / sizeof (edid_cea_modes[0]);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 80fe39d..aada26f 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -255,6 +255,13 @@ bool drm_fb_helper_force_kernel_mode(void)
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
+ /*
+ * It's a waste of time and effort to switch back to text console
+ * if the kernel should reboot before panic messages can be seen.
+ */
+ if (panic_timeout < 0)
+ return 0;
+
printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 4911e1d..6263b01 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -182,7 +182,7 @@ int drm_stub_open(struct inode *inode, struct file *filp)
goto out;
old_fops = filp->f_op;
- filp->f_op = fops_get(&dev->driver->fops);
+ filp->f_op = fops_get(dev->driver->fops);
if (filp->f_op == NULL) {
filp->f_op = old_fops;
goto out;
@@ -487,6 +487,11 @@ int drm_release(struct inode *inode, struct file *filp)
(long)old_encode_dev(file_priv->minor->device),
dev->open_count);
+ /* Release any auth tokens that might point to this file_priv,
+ (do that under the drm_global_mutex) */
+ if (file_priv->magic)
+ (void) drm_remove_magic(file_priv->master, file_priv->magic);
+
/* if the master has gone away we can't do anything with the lock */
if (file_priv->minor->master)
drm_master_release(dev, filp);
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 396e60c..f8625e2 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -140,7 +140,7 @@ int drm_gem_object_init(struct drm_device *dev,
obj->dev = dev;
obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
if (IS_ERR(obj->filp))
- return -ENOMEM;
+ return PTR_ERR(obj->filp);
kref_init(&obj->refcount);
atomic_set(&obj->handle_count, 0);
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index ddd70db..637fcc3 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -315,7 +315,8 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
if (err)
return err;
- if (__get_user(c32.auth, &client->auth)
+ if (__get_user(c32.idx, &client->idx)
+ || __get_user(c32.auth, &client->auth)
|| __get_user(c32.pid, &client->pid)
|| __get_user(c32.uid, &client->uid)
|| __get_user(c32.magic, &client->magic)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 904d7e9..956fd38 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -158,14 +158,11 @@ int drm_getmap(struct drm_device *dev, void *data,
int i;
idx = map->offset;
-
- mutex_lock(&dev->struct_mutex);
- if (idx < 0) {
- mutex_unlock(&dev->struct_mutex);
+ if (idx < 0)
return -EINVAL;
- }
i = 0;
+ mutex_lock(&dev->struct_mutex);
list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, struct drm_map_list, head);
@@ -211,9 +208,9 @@ int drm_getclient(struct drm_device *dev, void *data,
int i;
idx = client->idx;
- mutex_lock(&dev->struct_mutex);
-
i = 0;
+
+ mutex_lock(&dev->struct_mutex);
list_for_each_entry(pt, &dev->filelist, lhead) {
if (i++ >= idx) {
client->auth = pt->authenticated;
@@ -249,8 +246,6 @@ int drm_getstats(struct drm_device *dev, void *data,
memset(stats, 0, sizeof(*stats));
- mutex_lock(&dev->struct_mutex);
-
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
stats->data[i].value =
@@ -262,8 +257,6 @@ int drm_getstats(struct drm_device *dev, void *data,
stats->count = dev->counters;
- mutex_unlock(&dev->struct_mutex);
-
return 0;
}
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 632ae24..c79c713 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -33,6 +33,7 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/export.h>
#include "drmP.h"
static int drm_notifier(void *priv);
@@ -345,6 +346,7 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
+EXPORT_SYMBOL(drm_idlelock_take);
void drm_idlelock_release(struct drm_lock_data *lock_data)
{
@@ -364,6 +366,7 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
}
spin_unlock_bh(&lock_data->spinlock);
}
+EXPORT_SYMBOL(drm_idlelock_release);
int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/drm_sman.c b/drivers/gpu/drm/drm_sman.c
deleted file mode 100644
index cebce45..0000000
--- a/drivers/gpu/drm/drm_sman.c
+++ /dev/null
@@ -1,351 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Simple memory manager interface that keeps track on allocate regions on a
- * per "owner" basis. All regions associated with an "owner" can be released
- * with a simple call. Typically if the "owner" exists. The owner is any
- * "unsigned long" identifier. Can typically be a pointer to a file private
- * struct or a context identifier.
- *
- * Authors:
- * Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <linux/export.h>
-#include "drm_sman.h"
-
-struct drm_owner_item {
- struct drm_hash_item owner_hash;
- struct list_head sman_list;
- struct list_head mem_blocks;
-};
-
-void drm_sman_takedown(struct drm_sman * sman)
-{
- drm_ht_remove(&sman->user_hash_tab);
- drm_ht_remove(&sman->owner_hash_tab);
- kfree(sman->mm);
-}
-
-EXPORT_SYMBOL(drm_sman_takedown);
-
-int
-drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
- unsigned int user_order, unsigned int owner_order)
-{
- int ret = 0;
-
- sman->mm = kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL);
- if (!sman->mm) {
- ret = -ENOMEM;
- goto out;
- }
- sman->num_managers = num_managers;
- INIT_LIST_HEAD(&sman->owner_items);
- ret = drm_ht_create(&sman->owner_hash_tab, owner_order);
- if (ret)
- goto out1;
- ret = drm_ht_create(&sman->user_hash_tab, user_order);
- if (!ret)
- goto out;
-
- drm_ht_remove(&sman->owner_hash_tab);
-out1:
- kfree(sman->mm);
-out:
- return ret;
-}
-
-EXPORT_SYMBOL(drm_sman_init);
-
-static void *drm_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- struct drm_mm_node *tmp;
-
- tmp = drm_mm_search_free(mm, size, alignment, 1);
- if (!tmp) {
- return NULL;
- }
- tmp = drm_mm_get_block(tmp, size, alignment);
- return tmp;
-}
-
-static void drm_sman_mm_free(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
-
- drm_mm_put_block(node);
-}
-
-static void drm_sman_mm_destroy(void *private)
-{
- struct drm_mm *mm = (struct drm_mm *) private;
- drm_mm_takedown(mm);
- kfree(mm);
-}
-
-static unsigned long drm_sman_mm_offset(void *private, void *ref)
-{
- struct drm_mm_node *node = (struct drm_mm_node *) ref;
- return node->start;
-}
-
-int
-drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
- unsigned long start, unsigned long size)
-{
- struct drm_sman_mm *sman_mm;
- struct drm_mm *mm;
- int ret;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- mm = kzalloc(sizeof(*mm), GFP_KERNEL);
- if (!mm) {
- return -ENOMEM;
- }
- sman_mm->private = mm;
- ret = drm_mm_init(mm, start, size);
-
- if (ret) {
- kfree(mm);
- return ret;
- }
-
- sman_mm->allocate = drm_sman_mm_allocate;
- sman_mm->free = drm_sman_mm_free;
- sman_mm->destroy = drm_sman_mm_destroy;
- sman_mm->offset = drm_sman_mm_offset;
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_set_range);
-
-int
-drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
- struct drm_sman_mm * allocator)
-{
- BUG_ON(manager >= sman->num_managers);
- sman->mm[manager] = *allocator;
-
- return 0;
-}
-EXPORT_SYMBOL(drm_sman_set_manager);
-
-static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
- unsigned long owner)
-{
- int ret;
- struct drm_hash_item *owner_hash_item;
- struct drm_owner_item *owner_item;
-
- ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
- if (!ret) {
- return drm_hash_entry(owner_hash_item, struct drm_owner_item,
- owner_hash);
- }
-
- owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL);
- if (!owner_item)
- goto out;
-
- INIT_LIST_HEAD(&owner_item->mem_blocks);
- owner_item->owner_hash.key = owner;
- if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash))
- goto out1;
-
- list_add_tail(&owner_item->sman_list, &sman->owner_items);
- return owner_item;
-
-out1:
- kfree(owner_item);
-out:
- return NULL;
-}
-
-struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
- unsigned long size, unsigned alignment,
- unsigned long owner)
-{
- void *tmp;
- struct drm_sman_mm *sman_mm;
- struct drm_owner_item *owner_item;
- struct drm_memblock_item *memblock;
-
- BUG_ON(manager >= sman->num_managers);
-
- sman_mm = &sman->mm[manager];
- tmp = sman_mm->allocate(sman_mm->private, size, alignment);
-
- if (!tmp) {
- return NULL;
- }
-
- memblock = kzalloc(sizeof(*memblock), GFP_KERNEL);
-
- if (!memblock)
- goto out;
-
- memblock->mm_info = tmp;
- memblock->mm = sman_mm;
- memblock->sman = sman;
-
- if (drm_ht_just_insert_please
- (&sman->user_hash_tab, &memblock->user_hash,
- (unsigned long)memblock, 32, 0, 0))
- goto out1;
-
- owner_item = drm_sman_get_owner_item(sman, owner);
- if (!owner_item)
- goto out2;
-
- list_add_tail(&memblock->owner_list, &owner_item->mem_blocks);
-
- return memblock;
-
-out2:
- drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash);
-out1:
- kfree(memblock);
-out:
- sman_mm->free(sman_mm->private, tmp);
-
- return NULL;
-}
-
-EXPORT_SYMBOL(drm_sman_alloc);
-
-static void drm_sman_free(struct drm_memblock_item *item)
-{
- struct drm_sman *sman = item->sman;
-
- list_del(&item->owner_list);
- drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
- item->mm->free(item->mm->private, item->mm_info);
- kfree(item);
-}
-
-int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
-{
- struct drm_hash_item *hash_item;
- struct drm_memblock_item *memblock_item;
-
- if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
- return -EINVAL;
-
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
- user_hash);
- drm_sman_free(memblock_item);
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_free_key);
-
-static void drm_sman_remove_owner(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- list_del(&owner_item->sman_list);
- drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
- kfree(owner_item);
-}
-
-int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
- return -1;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
- drm_sman_remove_owner(sman, owner_item);
- return -1;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(drm_sman_owner_clean);
-
-static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
- struct drm_owner_item *owner_item)
-{
- struct drm_memblock_item *entry, *next;
-
- list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
- owner_list) {
- drm_sman_free(entry);
- }
- drm_sman_remove_owner(sman, owner_item);
-}
-
-void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
-{
-
- struct drm_hash_item *hash_item;
- struct drm_owner_item *owner_item;
-
- if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
-
- return;
- }
-
- owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
- drm_sman_do_owner_cleanup(sman, owner_item);
-}
-
-EXPORT_SYMBOL(drm_sman_owner_cleanup);
-
-void drm_sman_cleanup(struct drm_sman *sman)
-{
- struct drm_owner_item *entry, *next;
- unsigned int i;
- struct drm_sman_mm *sman_mm;
-
- list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
- drm_sman_do_owner_cleanup(sman, entry);
- }
- if (sman->mm) {
- for (i = 0; i < sman->num_managers; ++i) {
- sman_mm = &sman->mm[i];
- if (sman_mm->private) {
- sman_mm->destroy(sman_mm->private);
- sman_mm->private = NULL;
- }
- }
- }
-}
-
-EXPORT_SYMBOL(drm_sman_cleanup);
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 0f9ef9b..62c3675 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -72,7 +72,7 @@ static int drm_class_resume(struct device *dev)
return 0;
}
-static char *drm_devnode(struct device *dev, mode_t *mode)
+static char *drm_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "dri/%s", dev_name(dev));
}
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 847466a..b9e5266 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -13,8 +13,15 @@ config DRM_EXYNOS
config DRM_EXYNOS_FIMD
tristate "Exynos DRM FIMD"
- depends on DRM_EXYNOS
+ depends on DRM_EXYNOS && !FB_S3C
default n
help
Choose this option if you want to use Exynos FIMD for DRM.
If M is selected, the module will be called exynos_drm_fimd
+
+config DRM_EXYNOS_HDMI
+ tristate "Exynos DRM HDMI"
+ depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
+ help
+ Choose this option if you want to use Exynos HDMI for DRM.
+ If M is selected, the module will be called exynos_drm_hdmi
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 0496d3f..395e69c 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -5,7 +5,10 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
- exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
+ exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o \
+ exynos_drm_plane.o
obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o
+obj-$(CONFIG_DRM_EXYNOS_HDMI) += exynos_hdmi.o exynos_mixer.o exynos_ddc.o \
+ exynos_hdmiphy.o exynos_drm_hdmi.o
diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c
new file mode 100644
index 0000000..84b614f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_ddc.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+static int s5p_ddc_probe(struct i2c_client *client,
+ const struct i2c_device_id *dev_id)
+{
+ hdmi_attach_ddc_client(client);
+
+ dev_info(&client->adapter->dev, "attached s5p_ddc "
+ "into i2c adapter successfully\n");
+
+ return 0;
+}
+
+static int s5p_ddc_remove(struct i2c_client *client)
+{
+ dev_info(&client->adapter->dev, "detached s5p_ddc "
+ "from i2c adapter successfully\n");
+
+ return 0;
+}
+
+static struct i2c_device_id ddc_idtable[] = {
+ {"s5p_ddc", 0},
+ { },
+};
+
+struct i2c_driver ddc_driver = {
+ .driver = {
+ .name = "s5p_ddc",
+ .owner = THIS_MODULE,
+ },
+ .id_table = ddc_idtable,
+ .probe = s5p_ddc_probe,
+ .remove = __devexit_p(s5p_ddc_remove),
+ .command = NULL,
+};
+EXPORT_SYMBOL(ddc_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 2bb07bc..3cf785c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -73,7 +73,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer) {
DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
buffer->size = size;
@@ -84,8 +84,7 @@ struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
*/
if (lowlevel_buffer_allocate(dev, buffer) < 0) {
kfree(buffer);
- buffer = NULL;
- return ERR_PTR(-ENOMEM);
+ return NULL;
}
return buffer;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 6e91f9c..c913f2b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -30,9 +30,6 @@
struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
unsigned int size);
-/* get memory information of a drm framebuffer. */
-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
-
/* remove allocated physical memory. */
void exynos_drm_buf_destroy(struct drm_device *dev,
struct exynos_drm_gem_buf *buffer);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index d620b07..618bd4d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -28,6 +28,7 @@
#include "drmP.h"
#include "drm_crtc_helper.h"
+#include <drm/exynos_drm.h>
#include "exynos_drm_drv.h"
#include "exynos_drm_encoder.h"
@@ -44,8 +45,9 @@ struct exynos_drm_connector {
/* convert exynos_video_timings to drm_display_mode */
static inline void
convert_to_display_mode(struct drm_display_mode *mode,
- struct fb_videomode *timing)
+ struct exynos_drm_panel_info *panel)
{
+ struct fb_videomode *timing = &panel->timing;
DRM_DEBUG_KMS("%s\n", __FILE__);
mode->clock = timing->pixclock / 1000;
@@ -60,6 +62,8 @@ convert_to_display_mode(struct drm_display_mode *mode,
mode->vsync_start = mode->vdisplay + timing->upper_margin;
mode->vsync_end = mode->vsync_start + timing->vsync_len;
mode->vtotal = mode->vsync_end + timing->lower_margin;
+ mode->width_mm = panel->width_mm;
+ mode->height_mm = panel->height_mm;
if (timing->vmode & FB_VMODE_INTERLACED)
mode->flags |= DRM_MODE_FLAG_INTERLACE;
@@ -148,16 +152,18 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
connector->display_info.raw_edid = edid;
} else {
struct drm_display_mode *mode = drm_mode_create(connector->dev);
- struct fb_videomode *timing;
+ struct exynos_drm_panel_info *panel;
- if (display_ops->get_timing)
- timing = display_ops->get_timing(manager->dev);
+ if (display_ops->get_panel)
+ panel = display_ops->get_panel(manager->dev);
else {
drm_mode_destroy(connector->dev, mode);
return 0;
}
- convert_to_display_mode(mode, timing);
+ convert_to_display_mode(mode, panel);
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
drm_mode_set_name(mode);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 661a035..d08a558 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -193,6 +193,9 @@ int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
return err;
}
+ /* setup possible_clones. */
+ exynos_drm_encoder_setup(drm_dev);
+
/*
* if any specific driver such as fimd or hdmi driver called
* exynos_drm_subdrv_register() later than drm_load(),
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index ee43cc2..de81883 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -34,7 +34,6 @@
#include "exynos_drm_fb.h"
#include "exynos_drm_encoder.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
drm_crtc)
@@ -52,11 +51,13 @@
* drm framework doesn't support multiple irq yet.
* we can refer to the crtc to current hardware interrupt occured through
* this pipe value.
+ * @dpms: store the crtc dpms value
*/
struct exynos_drm_crtc {
struct drm_crtc drm_crtc;
struct exynos_drm_overlay overlay;
unsigned int pipe;
+ unsigned int dpms;
};
static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
@@ -78,19 +79,23 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
struct exynos_drm_gem_buf *buffer;
unsigned int actual_w;
unsigned int actual_h;
+ int nr = exynos_drm_format_num_buffers(fb->pixel_format);
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ buffer = exynos_drm_fb_buffer(fb, i);
+ if (!buffer) {
+ DRM_LOG_KMS("buffer is null\n");
+ return -EFAULT;
+ }
- buffer = exynos_drm_fb_get_buf(fb);
- if (!buffer) {
- DRM_LOG_KMS("buffer is null.\n");
- return -EFAULT;
- }
-
- overlay->dma_addr = buffer->dma_addr;
- overlay->vaddr = buffer->kvaddr;
+ overlay->dma_addr[i] = buffer->dma_addr;
+ overlay->vaddr[i] = buffer->kvaddr;
- DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
- (unsigned long)overlay->vaddr,
- (unsigned long)overlay->dma_addr);
+ DRM_DEBUG_KMS("buffer: %d, vaddr = 0x%lx, dma_addr = 0x%lx\n",
+ i, (unsigned long)overlay->vaddr[i],
+ (unsigned long)overlay->dma_addr[i]);
+ }
actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -101,7 +106,8 @@ int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
overlay->fb_width = fb->width;
overlay->fb_height = fb->height;
overlay->bpp = fb->bits_per_pixel;
- overlay->pitch = fb->pitch;
+ overlay->pitch = fb->pitches[0];
+ overlay->pixel_format = fb->pixel_format;
/* set overlay range to be displayed. */
overlay->crtc_x = pos->crtc_x;
@@ -153,26 +159,37 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
{
+ struct drm_device *dev = crtc->dev;
struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
+ if (exynos_crtc->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
switch (mode) {
case DRM_MODE_DPMS_ON:
- exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
- exynos_drm_encoder_crtc_commit);
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* TODO */
- exynos_drm_fn_encoder(crtc, NULL,
- exynos_drm_encoder_crtc_disable);
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_crtc_dpms);
+ exynos_crtc->dpms = mode;
break;
default:
- DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ DRM_ERROR("unspecified mode %d\n", mode);
break;
}
+
+ mutex_unlock(&dev->struct_mutex);
}
static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -188,6 +205,28 @@ static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /*
+ * when set_crtc is requested from user or at booting time,
+ * crtc->commit would be called without dpms call so if dpms is
+ * no power on then crtc->dpms should be called
+ * with DRM_MODE_DPMS_ON for the hardware power to be on.
+ */
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON) {
+ int mode = DRM_MODE_DPMS_ON;
+
+ /*
+ * enable hardware(power on) to all encoders hdmi connected
+ * to current crtc.
+ */
+ exynos_drm_crtc_dpms(crtc, mode);
+ /*
+ * enable dma to all encoders connected to current crtc and
+ * lcd panel.
+ */
+ exynos_drm_fn_encoder(crtc, &mode,
+ exynos_drm_encoder_dpms_from_crtc);
+ }
+
exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
exynos_drm_encoder_crtc_commit);
}
@@ -268,9 +307,6 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
*/
event->pipe = exynos_crtc->pipe;
- list_add_tail(&event->base.link,
- &dev_priv->pageflip_event_list);
-
ret = drm_vblank_get(dev, exynos_crtc->pipe);
if (ret) {
DRM_DEBUG("failed to acquire vblank counter\n");
@@ -279,6 +315,9 @@ static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
goto out;
}
+ list_add_tail(&event->base.link,
+ &dev_priv->pageflip_event_list);
+
crtc->fb = fb;
ret = exynos_drm_crtc_update(crtc);
if (ret) {
@@ -344,6 +383,8 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
}
exynos_crtc->pipe = nr;
+ exynos_crtc->dpms = DRM_MODE_DPMS_OFF;
+ exynos_crtc->overlay.zpos = DEFAULT_ZPOS;
crtc = &exynos_crtc->drm_crtc;
private->crtc[nr] = crtc;
@@ -357,9 +398,14 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[crtc]);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return -EPERM;
+
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_enable_vblank);
@@ -369,9 +415,14 @@ int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
{
struct exynos_drm_private *private = dev->dev_private;
+ struct exynos_drm_crtc *exynos_crtc =
+ to_exynos_crtc(private->crtc[crtc]);
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (exynos_crtc->dpms != DRM_MODE_DPMS_ON)
+ return;
+
exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
exynos_drm_disable_vblank);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 53e2216..58820eb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -33,16 +33,20 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
#include "exynos_drm_fbdev.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
+#include "exynos_drm_plane.h"
-#define DRIVER_NAME "exynos-drm"
+#define DRIVER_NAME "exynos"
#define DRIVER_DESC "Samsung SoC DRM"
#define DRIVER_DATE "20110530"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
+#define VBLANK_OFF_DELAY 50000
+
static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
{
struct exynos_drm_private *private;
@@ -77,6 +81,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
goto err_crtc;
}
+ for (nr = 0; nr < MAX_PLANE; nr++) {
+ ret = exynos_plane_init(dev, nr);
+ if (ret)
+ goto err_crtc;
+ }
+
ret = drm_vblank_init(dev, MAX_CRTC);
if (ret)
goto err_crtc;
@@ -90,6 +100,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto err_vblank;
+ /* setup possible_clones. */
+ exynos_drm_encoder_setup(dev);
+
/*
* create and configure fb helper and also exynos specific
* fbdev object.
@@ -100,6 +113,8 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
goto err_drm_device;
}
+ drm_vblank_offdelay = VBLANK_OFF_DELAY;
+
return 0;
err_drm_device:
@@ -130,16 +145,21 @@ static int exynos_drm_unload(struct drm_device *dev)
}
static void exynos_drm_preclose(struct drm_device *dev,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct exynos_drm_private *dev_priv = dev->dev_private;
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
- /*
- * drm framework frees all events at release time,
- * so private event list should be cleared.
- */
- if (!list_empty(&dev_priv->pageflip_event_list))
- INIT_LIST_HEAD(&dev_priv->pageflip_event_list);
+}
+
+static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+ if (!file->driver_priv)
+ return;
+
+ kfree(file->driver_priv);
+ file->driver_priv = NULL;
}
static void exynos_drm_lastclose(struct drm_device *dev)
@@ -163,6 +183,18 @@ static struct drm_ioctl_desc exynos_ioctls[] = {
DRM_AUTH),
DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(EXYNOS_PLANE_SET_ZPOS, exynos_plane_set_zpos_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+};
+
+static const struct file_operations exynos_drm_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .mmap = exynos_drm_gem_mmap,
+ .poll = drm_poll,
+ .read = drm_read,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
};
static struct drm_driver exynos_drm_driver = {
@@ -172,6 +204,7 @@ static struct drm_driver exynos_drm_driver = {
.unload = exynos_drm_unload,
.preclose = exynos_drm_preclose,
.lastclose = exynos_drm_lastclose,
+ .postclose = exynos_drm_postclose,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = exynos_drm_crtc_enable_vblank,
.disable_vblank = exynos_drm_crtc_disable_vblank,
@@ -182,15 +215,7 @@ static struct drm_driver exynos_drm_driver = {
.dumb_map_offset = exynos_drm_gem_dumb_map_offset,
.dumb_destroy = exynos_drm_gem_dumb_destroy,
.ioctls = exynos_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .mmap = exynos_drm_gem_mmap,
- .poll = drm_poll,
- .read = drm_read,
- .unlocked_ioctl = drm_ioctl,
- .release = drm_release,
- },
+ .fops = &exynos_drm_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 5e02e6e..13540de 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -33,11 +33,16 @@
#include "drm.h"
#define MAX_CRTC 2
+#define MAX_PLANE 5
+#define MAX_FB_BUFFER 3
+#define DEFAULT_ZPOS -1
struct drm_device;
struct exynos_drm_overlay;
struct drm_connector;
+extern unsigned int drm_vblank_offdelay;
+
/* this enumerates display type. */
enum exynos_drm_output_type {
EXYNOS_DISPLAY_TYPE_NONE,
@@ -57,8 +62,8 @@ enum exynos_drm_output_type {
struct exynos_drm_overlay_ops {
void (*mode_set)(struct device *subdrv_dev,
struct exynos_drm_overlay *overlay);
- void (*commit)(struct device *subdrv_dev);
- void (*disable)(struct device *subdrv_dev);
+ void (*commit)(struct device *subdrv_dev, int zpos);
+ void (*disable)(struct device *subdrv_dev, int zpos);
};
/*
@@ -80,9 +85,11 @@ struct exynos_drm_overlay_ops {
* @scan_flag: interlace or progressive way.
* (it could be DRM_MODE_FLAG_*)
* @bpp: pixel size.(in bit)
- * @dma_addr: bus(accessed by dma) address to the memory region allocated
- * for a overlay.
- * @vaddr: virtual memory addresss to this overlay.
+ * @pixel_format: fourcc pixel format of this overlay
+ * @dma_addr: array of bus(accessed by dma) address to the memory region
+ * allocated for a overlay.
+ * @vaddr: array of virtual memory addresss to this overlay.
+ * @zpos: order of overlay layer(z position).
* @default_win: a window to be enabled.
* @color_key: color key on or off.
* @index_color: if using color key feature then this value would be used
@@ -109,8 +116,10 @@ struct exynos_drm_overlay {
unsigned int scan_flag;
unsigned int bpp;
unsigned int pitch;
- dma_addr_t dma_addr;
- void __iomem *vaddr;
+ uint32_t pixel_format;
+ dma_addr_t dma_addr[MAX_FB_BUFFER];
+ void __iomem *vaddr[MAX_FB_BUFFER];
+ int zpos;
bool default_win;
bool color_key;
@@ -127,7 +136,7 @@ struct exynos_drm_overlay {
* @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
* @is_connected: check for that display is connected or not.
* @get_edid: get edid modes from display driver.
- * @get_timing: get timing object from display driver.
+ * @get_panel: get panel object from display driver.
* @check_timing: check if timing is valid or not.
* @power_on: display device on or off.
*/
@@ -136,7 +145,7 @@ struct exynos_drm_display_ops {
bool (*is_connected)(struct device *dev);
int (*get_edid)(struct device *dev, struct drm_connector *connector,
u8 *edid, int len);
- void *(*get_timing)(struct device *dev);
+ void *(*get_panel)(struct device *dev);
int (*check_timing)(struct device *dev, void *timing);
int (*power_on)(struct device *dev, int mode);
};
@@ -144,17 +153,19 @@ struct exynos_drm_display_ops {
/*
* Exynos drm manager ops
*
+ * @dpms: control device power.
+ * @apply: set timing, vblank and overlay data to registers.
* @mode_set: convert drm_display_mode to hw specific display mode and
* would be called by encoder->mode_set().
* @commit: set current hw specific display mode to hw.
- * @disable: disable hardware specific display mode.
* @enable_vblank: specific driver callback for enabling vblank interrupt.
* @disable_vblank: specific driver callback for disabling vblank interrupt.
*/
struct exynos_drm_manager_ops {
+ void (*dpms)(struct device *subdrv_dev, int mode);
+ void (*apply)(struct device *subdrv_dev);
void (*mode_set)(struct device *subdrv_dev, void *mode);
void (*commit)(struct device *subdrv_dev);
- void (*disable)(struct device *subdrv_dev);
int (*enable_vblank)(struct device *subdrv_dev);
void (*disable_vblank)(struct device *subdrv_dev);
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 1530614..ef4754f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -42,49 +42,68 @@
* @drm_encoder: encoder object.
* @manager: specific encoder has its own manager to control a hardware
* appropriately and we can access a hardware drawing on this manager.
+ * @dpms: store the encoder dpms value.
*/
struct exynos_drm_encoder {
struct drm_encoder drm_encoder;
struct exynos_drm_manager *manager;
+ int dpms;
};
-static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+static void exynos_drm_display_power(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
struct drm_connector *connector;
struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder == encoder) {
+ struct exynos_drm_display_ops *display_ops =
+ manager->display_ops;
+
+ DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
+ connector->base.id, mode);
+ if (display_ops && display_ops->power_on)
+ display_ops->power_on(manager->dev, mode);
+ }
+ }
+}
+
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+ if (exynos_encoder->dpms == mode) {
+ DRM_DEBUG_KMS("desired dpms mode is same as previous one.\n");
+ return;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
switch (mode) {
case DRM_MODE_DPMS_ON:
- if (manager_ops && manager_ops->commit)
- manager_ops->commit(manager->dev);
+ if (manager_ops && manager_ops->apply)
+ manager_ops->apply(manager->dev);
+ exynos_drm_display_power(encoder, mode);
+ exynos_encoder->dpms = mode;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- /* TODO */
- if (manager_ops && manager_ops->disable)
- manager_ops->disable(manager->dev);
+ exynos_drm_display_power(encoder, mode);
+ exynos_encoder->dpms = mode;
break;
default:
DRM_ERROR("unspecified mode %d\n", mode);
break;
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder == encoder) {
- struct exynos_drm_display_ops *display_ops =
- manager->display_ops;
-
- DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
- connector->base.id, mode);
- if (display_ops && display_ops->power_on)
- display_ops->power_on(manager->dev, mode);
- }
- }
+ mutex_unlock(&dev->struct_mutex);
}
static bool
@@ -169,7 +188,6 @@ static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
exynos_encoder->manager->pipe = -1;
drm_encoder_cleanup(encoder);
- encoder->dev->mode_config.num_encoder--;
kfree(exynos_encoder);
}
@@ -177,6 +195,40 @@ static struct drm_encoder_funcs exynos_encoder_funcs = {
.destroy = exynos_drm_encoder_destroy,
};
+static unsigned int exynos_drm_encoder_clones(struct drm_encoder *encoder)
+{
+ struct drm_encoder *clone;
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_display_ops *display_ops =
+ exynos_encoder->manager->display_ops;
+ unsigned int clone_mask = 0;
+ int cnt = 0;
+
+ list_for_each_entry(clone, &dev->mode_config.encoder_list, head) {
+ switch (display_ops->type) {
+ case EXYNOS_DISPLAY_TYPE_LCD:
+ case EXYNOS_DISPLAY_TYPE_HDMI:
+ clone_mask |= (1 << (cnt++));
+ break;
+ default:
+ continue;
+ }
+ }
+
+ return clone_mask;
+}
+
+void exynos_drm_encoder_setup(struct drm_device *dev)
+{
+ struct drm_encoder *encoder;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ encoder->possible_clones = exynos_drm_encoder_clones(encoder);
+}
+
struct drm_encoder *
exynos_drm_encoder_create(struct drm_device *dev,
struct exynos_drm_manager *manager,
@@ -199,6 +251,7 @@ exynos_drm_encoder_create(struct drm_device *dev,
return NULL;
}
+ exynos_encoder->dpms = DRM_MODE_DPMS_OFF;
exynos_encoder->manager = manager;
encoder = &exynos_encoder->drm_encoder;
encoder->possible_crtcs = possible_crtcs;
@@ -275,12 +328,27 @@ void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
manager_ops->disable_vblank(manager->dev);
}
-void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+ void *data)
{
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
+
+ if (data)
+ zpos = *(int *)data;
+
+ if (overlay_ops && overlay_ops->commit)
+ overlay_ops->commit(manager->dev, zpos);
+}
+
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_manager *manager =
+ to_exynos_encoder(encoder)->manager;
int crtc = *(int *)data;
+ int zpos = DEFAULT_ZPOS;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -290,8 +358,53 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
*/
manager->pipe = crtc;
- if (overlay_ops && overlay_ops->commit)
- overlay_ops->commit(manager->dev);
+ exynos_drm_encoder_crtc_plane_commit(encoder, &zpos);
+}
+
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder, void *data)
+{
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ int mode = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_encoder_dpms(encoder, mode);
+
+ exynos_encoder->dpms = mode;
+}
+
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data)
+{
+ struct drm_device *dev = encoder->dev;
+ struct exynos_drm_encoder *exynos_encoder = to_exynos_encoder(encoder);
+ struct exynos_drm_manager *manager = exynos_encoder->manager;
+ struct exynos_drm_manager_ops *manager_ops = manager->ops;
+ struct drm_connector *connector;
+ int mode = *(int *)data;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (manager_ops && manager_ops->dpms)
+ manager_ops->dpms(manager->dev, mode);
+
+ /*
+ * set current dpms mode to the connector connected to
+ * current encoder. connector->dpms would be checked
+ * at drm_helper_connector_dpms()
+ */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+ if (connector->encoder == encoder)
+ connector->dpms = mode;
+
+ /*
+ * if this condition is ok then it means that the crtc is already
+ * detached from encoder and last function for detaching is properly
+ * done, so clear pipe from manager to prevent repeated call.
+ */
+ if (mode > DRM_MODE_DPMS_ON) {
+ if (!encoder->crtc)
+ manager->pipe = -1;
+ }
}
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -310,19 +423,15 @@ void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
struct exynos_drm_manager *manager =
to_exynos_encoder(encoder)->manager;
struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+ int zpos = DEFAULT_ZPOS;
DRM_DEBUG_KMS("\n");
- if (overlay_ops && overlay_ops->disable)
- overlay_ops->disable(manager->dev);
+ if (data)
+ zpos = *(int *)data;
- /*
- * crtc is already detached from encoder and last
- * function for detaching is properly done, so
- * clear pipe from manager to prevent repeated call
- */
- if (!encoder->crtc)
- manager->pipe = -1;
+ if (overlay_ops && overlay_ops->disable)
+ overlay_ops->disable(manager->dev, zpos);
}
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index a22acfb..eb7d231 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -30,6 +30,7 @@
struct exynos_drm_manager;
+void exynos_drm_encoder_setup(struct drm_device *dev);
struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
struct exynos_drm_manager *mgr,
unsigned int possible_crtcs);
@@ -39,7 +40,12 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
void (*fn)(struct drm_encoder *, void *));
void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_plane_commit(struct drm_encoder *encoder,
+ void *data);
void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_dpms_from_crtc(struct drm_encoder *encoder,
+ void *data);
+void exynos_drm_encoder_crtc_dpms(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 5bf4a1a..3733fe6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -33,7 +33,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
-#include "exynos_drm_buf.h"
#include "exynos_drm_gem.h"
#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb)
@@ -42,15 +41,11 @@
* exynos specific framebuffer structure.
*
* @fb: drm framebuffer obejct.
- * @exynos_gem_obj: exynos specific gem object containing a gem object.
- * @buffer: pointer to exynos_drm_gem_buffer object.
- * - contain the memory information to memory region allocated
- * at default framebuffer creation.
+ * @exynos_gem_obj: array of exynos specific gem object containing a gem object.
*/
struct exynos_drm_fb {
struct drm_framebuffer fb;
- struct exynos_drm_gem_obj *exynos_gem_obj;
- struct exynos_drm_gem_buf *buffer;
+ struct exynos_drm_gem_obj *exynos_gem_obj[MAX_FB_BUFFER];
};
static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -61,13 +56,6 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
drm_framebuffer_cleanup(fb);
- /*
- * default framebuffer has no gem object so
- * a buffer of the default framebuffer should be released at here.
- */
- if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
- exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
-
kfree(exynos_fb);
exynos_fb = NULL;
}
@@ -81,7 +69,7 @@ static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
DRM_DEBUG_KMS("%s\n", __FILE__);
return drm_gem_handle_create(file_priv,
- &exynos_fb->exynos_gem_obj->base, handle);
+ &exynos_fb->exynos_gem_obj[0]->base, handle);
}
static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
@@ -102,134 +90,88 @@ static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
.dirty = exynos_drm_fb_dirty,
};
-static struct drm_framebuffer *
-exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd)
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj)
{
struct exynos_drm_fb *exynos_fb;
- struct drm_framebuffer *fb;
- struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
- struct drm_gem_object *obj;
- unsigned int size;
int ret;
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- mode_cmd->pitch = max(mode_cmd->pitch,
- mode_cmd->width * (mode_cmd->bpp >> 3));
-
- DRM_LOG_KMS("drm fb create(%dx%d)\n",
- mode_cmd->width, mode_cmd->height);
-
exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
if (!exynos_fb) {
- DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
+ DRM_ERROR("failed to allocate exynos drm framebuffer\n");
return ERR_PTR(-ENOMEM);
}
- fb = &exynos_fb->fb;
- ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
+ ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs);
if (ret) {
- DRM_ERROR("failed to initialize framebuffer.\n");
- goto err_init;
+ DRM_ERROR("failed to initialize framebuffer\n");
+ return ERR_PTR(ret);
}
- DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
+ drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
+ exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
- size = mode_cmd->pitch * mode_cmd->height;
+ return &exynos_fb->fb;
+}
- /*
- * mode_cmd->handle could be NULL at booting time or
- * with user request. if NULL, a new buffer or a gem object
- * would be allocated.
- */
- if (!mode_cmd->handle) {
- if (!file_priv) {
- struct exynos_drm_gem_buf *buffer;
-
- /*
- * in case that file_priv is NULL, it allocates
- * only buffer and this buffer would be used
- * for default framebuffer.
- */
- buffer = exynos_drm_buf_create(dev, size);
- if (IS_ERR(buffer)) {
- ret = PTR_ERR(buffer);
- goto err_buffer;
- }
-
- exynos_fb->buffer = buffer;
-
- DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
- (unsigned long)buffer->dma_addr, size);
-
- goto out;
- } else {
- exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
- &mode_cmd->handle,
- size);
- if (IS_ERR(exynos_gem_obj)) {
- ret = PTR_ERR(exynos_gem_obj);
- goto err_buffer;
- }
- }
- } else {
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- goto err_buffer;
- }
+static struct drm_framebuffer *
+exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *obj;
+ struct drm_framebuffer *fb;
+ struct exynos_drm_fb *exynos_fb;
+ int nr;
+ int i;
- exynos_gem_obj = to_exynos_gem_obj(obj);
+ DRM_DEBUG_KMS("%s\n", __FILE__);
- drm_gem_object_unreference_unlocked(obj);
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ return ERR_PTR(-ENOENT);
}
- /*
- * if got a exynos_gem_obj from either a handle or
- * a new creation then exynos_fb->exynos_gem_obj is NULL
- * so that default framebuffer has no its own gem object,
- * only its own buffer object.
- */
- exynos_fb->buffer = exynos_gem_obj->buffer;
-
- DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
- (unsigned long)exynos_fb->buffer->dma_addr, size,
- (unsigned int)&exynos_gem_obj->base);
+ drm_gem_object_unreference_unlocked(obj);
-out:
- exynos_fb->exynos_gem_obj = exynos_gem_obj;
+ fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
+ if (IS_ERR(fb))
+ return fb;
- drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ exynos_fb = to_exynos_fb(fb);
+ nr = exynos_drm_format_num_buffers(fb->pixel_format);
- return fb;
-
-err_buffer:
- drm_framebuffer_cleanup(fb);
-
-err_init:
- kfree(exynos_fb);
+ for (i = 1; i < nr; i++) {
+ obj = drm_gem_object_lookup(dev, file_priv,
+ mode_cmd->handles[i]);
+ if (!obj) {
+ DRM_ERROR("failed to lookup gem object\n");
+ exynos_drm_fb_destroy(fb);
+ return ERR_PTR(-ENOENT);
+ }
- return ERR_PTR(ret);
-}
+ drm_gem_object_unreference_unlocked(obj);
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
-{
- DRM_DEBUG_KMS("%s\n", __FILE__);
+ exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
+ }
- return exynos_drm_fb_init(file_priv, dev, mode_cmd);
+ return fb;
}
-struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index)
{
struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
struct exynos_drm_gem_buf *buffer;
DRM_DEBUG_KMS("%s\n", __FILE__);
- buffer = exynos_fb->buffer;
+ if (index >= MAX_FB_BUFFER)
+ return NULL;
+
+ buffer = exynos_fb->exynos_gem_obj[index]->buffer;
if (!buffer)
return NULL;
@@ -250,7 +192,7 @@ static void exynos_drm_output_poll_changed(struct drm_device *dev)
}
static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
- .fb_create = exynos_drm_fb_create,
+ .fb_create = exynos_user_fb_create,
.output_poll_changed = exynos_drm_output_poll_changed,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index eb35931..3ecb30d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -28,9 +28,27 @@
#ifndef _EXYNOS_DRM_FB_H_
#define _EXYNOS_DRM_FB_H
-struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
- struct drm_file *filp,
- struct drm_mode_fb_cmd *mode_cmd);
+static inline int exynos_drm_format_num_buffers(uint32_t format)
+{
+ switch (format) {
+ case DRM_FORMAT_NV12M:
+ case DRM_FORMAT_NV12MT:
+ return 2;
+ case DRM_FORMAT_YUV420M:
+ return 3;
+ default:
+ return 1;
+ }
+}
+
+struct drm_framebuffer *
+exynos_drm_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct drm_gem_object *obj);
+
+/* get memory information of a drm framebuffer */
+struct exynos_drm_gem_buf *exynos_drm_fb_buffer(struct drm_framebuffer *fb,
+ int index);
void exynos_drm_mode_config_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 836f4100..3508700 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -34,7 +34,6 @@
#include "exynos_drm_drv.h"
#include "exynos_drm_fb.h"
#include "exynos_drm_gem.h"
-#include "exynos_drm_buf.h"
#define MAX_CONNECTOR 4
#define PREFERRED_BPP 32
@@ -43,8 +42,8 @@
drm_fb_helper)
struct exynos_drm_fbdev {
- struct drm_fb_helper drm_fb_helper;
- struct drm_framebuffer *fb;
+ struct drm_fb_helper drm_fb_helper;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
};
static int exynos_drm_fbdev_set_par(struct fb_info *info)
@@ -90,26 +89,24 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
{
struct fb_info *fbi = helper->fbdev;
struct drm_device *dev = helper->dev;
- struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
struct exynos_drm_gem_buf *buffer;
unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_fb->fb = fb;
-
- drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
- buffer = exynos_drm_fb_get_buf(fb);
+ /* RGB formats use only one buffer */
+ buffer = exynos_drm_fb_buffer(fb, 0);
if (!buffer) {
DRM_LOG_KMS("buffer is null.\n");
return -EFAULT;
}
offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
- offset += fbi->var.yoffset * fb->pitch;
+ offset += fbi->var.yoffset * fb->pitches[0];
dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
fbi->screen_base = buffer->kvaddr + offset;
@@ -124,10 +121,12 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+ struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_device *dev = helper->dev;
struct fb_info *fbi;
- struct drm_mode_fb_cmd mode_cmd = { 0 };
+ struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct platform_device *pdev = dev->platformdev;
+ unsigned long size;
int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -138,8 +137,9 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = sizes->surface_width * (sizes->surface_bpp >> 3);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
mutex_lock(&dev->struct_mutex);
@@ -150,14 +150,23 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
goto out;
}
- exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
- if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ exynos_gem_obj = exynos_drm_gem_create(dev, size);
+ if (IS_ERR(exynos_gem_obj)) {
+ ret = PTR_ERR(exynos_gem_obj);
+ goto out;
+ }
+
+ exynos_fbdev->exynos_gem_obj = exynos_gem_obj;
+
+ helper->fb = exynos_drm_framebuffer_init(dev, &mode_cmd,
+ &exynos_gem_obj->base);
+ if (IS_ERR_OR_NULL(helper->fb)) {
DRM_ERROR("failed to create drm framebuffer.\n");
- ret = PTR_ERR(exynos_fbdev->fb);
+ ret = PTR_ERR(helper->fb);
goto out;
}
- helper->fb = exynos_fbdev->fb;
helper->fbdev = fbi;
fbi->par = helper;
@@ -171,8 +180,10 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
}
ret = exynos_drm_fbdev_update(helper, helper->fb);
- if (ret < 0)
+ if (ret < 0) {
fb_dealloc_cmap(&fbi->cmap);
+ goto out;
+ }
/*
* if failed, all resources allocated above would be released by
@@ -184,58 +195,6 @@ out:
return ret;
}
-static bool
-exynos_drm_fbdev_is_samefb(struct drm_framebuffer *fb,
- struct drm_fb_helper_surface_size *sizes)
-{
- if (fb->width != sizes->surface_width)
- return false;
- if (fb->height != sizes->surface_height)
- return false;
- if (fb->bits_per_pixel != sizes->surface_bpp)
- return false;
- if (fb->depth != sizes->surface_depth)
- return false;
-
- return true;
-}
-
-static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- struct drm_device *dev = helper->dev;
- struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
- struct drm_framebuffer *fb = exynos_fbdev->fb;
- struct drm_mode_fb_cmd mode_cmd = { 0 };
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- if (helper->fb != fb) {
- DRM_ERROR("drm framebuffer is different\n");
- return -EINVAL;
- }
-
- if (exynos_drm_fbdev_is_samefb(fb, sizes))
- return 0;
-
- mode_cmd.width = sizes->surface_width;
- mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
-
- if (fb->funcs->destroy)
- fb->funcs->destroy(fb);
-
- exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
- if (IS_ERR(exynos_fbdev->fb)) {
- DRM_ERROR("failed to allocate fb.\n");
- return PTR_ERR(exynos_fbdev->fb);
- }
-
- helper->fb = exynos_fbdev->fb;
- return exynos_drm_fbdev_update(helper, helper->fb);
-}
-
static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
@@ -243,6 +202,10 @@ static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
DRM_DEBUG_KMS("%s\n", __FILE__);
+ /*
+ * with !helper->fb, it means that this funcion is called first time
+ * and after that, the helper->fb would be used as clone mode.
+ */
if (!helper->fb) {
ret = exynos_drm_fbdev_create(helper, sizes);
if (ret < 0) {
@@ -255,12 +218,6 @@ static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
* because register_framebuffer() should be called.
*/
ret = 1;
- } else {
- ret = exynos_drm_fbdev_recreate(helper, sizes);
- if (ret < 0) {
- DRM_ERROR("failed to reconfigure fbdev\n");
- return ret;
- }
}
return ret;
@@ -366,6 +323,9 @@ void exynos_drm_fbdev_fini(struct drm_device *dev)
fbdev = to_exynos_fbdev(private->fb_helper);
+ if (fbdev->exynos_gem_obj)
+ exynos_drm_gem_destroy(fbdev->exynos_gem_obj);
+
exynos_drm_fbdev_destroy(dev, private->fb_helper);
kfree(fbdev);
private->fb_helper = NULL;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index db3b3d9..360adf2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <drm/exynos_drm.h>
#include <plat/regs-fb-v4.h>
@@ -68,6 +69,7 @@ struct fimd_win_data {
void __iomem *vaddr;
unsigned int buf_offsize;
unsigned int line_size; /* bytes */
+ bool enabled;
};
struct fimd_context {
@@ -84,8 +86,10 @@ struct fimd_context {
unsigned long irq_flags;
u32 vidcon0;
u32 vidcon1;
+ bool suspended;
+ struct mutex lock;
- struct fb_videomode *timing;
+ struct exynos_drm_panel_info *panel;
};
static bool fimd_display_is_connected(struct device *dev)
@@ -97,13 +101,13 @@ static bool fimd_display_is_connected(struct device *dev)
return true;
}
-static void *fimd_get_timing(struct device *dev)
+static void *fimd_get_panel(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
DRM_DEBUG_KMS("%s\n", __FILE__);
- return ctx->timing;
+ return ctx->panel;
}
static int fimd_check_timing(struct device *dev, void *timing)
@@ -119,7 +123,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
{
DRM_DEBUG_KMS("%s\n", __FILE__);
- /* TODO. */
+ /* TODO */
return 0;
}
@@ -127,17 +131,75 @@ static int fimd_display_power_on(struct device *dev, int mode)
static struct exynos_drm_display_ops fimd_display_ops = {
.type = EXYNOS_DISPLAY_TYPE_LCD,
.is_connected = fimd_display_is_connected,
- .get_timing = fimd_get_timing,
+ .get_panel = fimd_get_panel,
.check_timing = fimd_check_timing,
.power_on = fimd_display_power_on,
};
+static void fimd_dpms(struct device *subdrv_dev, int mode)
+{
+ struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s, %d\n", __FILE__, mode);
+
+ mutex_lock(&ctx->lock);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ /*
+ * enable fimd hardware only if suspended status.
+ *
+ * P.S. fimd_dpms function would be called at booting time so
+ * clk_enable could be called double time.
+ */
+ if (ctx->suspended)
+ pm_runtime_get_sync(subdrv_dev);
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (!ctx->suspended)
+ pm_runtime_put_sync(subdrv_dev);
+ break;
+ default:
+ DRM_DEBUG_KMS("unspecified mode %d\n", mode);
+ break;
+ }
+
+ mutex_unlock(&ctx->lock);
+}
+
+static void fimd_apply(struct device *subdrv_dev)
+{
+ struct fimd_context *ctx = get_fimd_context(subdrv_dev);
+ struct exynos_drm_manager *mgr = &ctx->subdrv.manager;
+ struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
+ struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
+ struct fimd_win_data *win_data;
+ int i;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ for (i = 0; i < WINDOWS_NR; i++) {
+ win_data = &ctx->win_data[i];
+ if (win_data->enabled && (ovl_ops && ovl_ops->commit))
+ ovl_ops->commit(subdrv_dev, i);
+ }
+
+ if (mgr_ops && mgr_ops->commit)
+ mgr_ops->commit(subdrv_dev);
+}
+
static void fimd_commit(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
- struct fb_videomode *timing = ctx->timing;
+ struct exynos_drm_panel_info *panel = ctx->panel;
+ struct fb_videomode *timing = &panel->timing;
u32 val;
+ if (ctx->suspended)
+ return;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
/* setup polarity values from machine code. */
@@ -177,40 +239,6 @@ static void fimd_commit(struct device *dev)
writel(val, ctx->regs + VIDCON0);
}
-static void fimd_disable(struct device *dev)
-{
- struct fimd_context *ctx = get_fimd_context(dev);
- struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
- struct drm_device *drm_dev = subdrv->drm_dev;
- struct exynos_drm_manager *manager = &subdrv->manager;
- u32 val;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /* fimd dma off */
- val = readl(ctx->regs + VIDCON0);
- val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
- writel(val, ctx->regs + VIDCON0);
-
- /*
- * if vblank is enabled status with dma off then
- * it disables vsync interrupt.
- */
- if (drm_dev->vblank_enabled[manager->pipe] &&
- atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
- drm_vblank_put(drm_dev, manager->pipe);
-
- /*
- * if vblank_disable_allowed is 0 then disable
- * vsync interrupt right now else the vsync interrupt
- * would be disabled by drm timer once a current process
- * gives up ownershop of vblank event.
- */
- if (!drm_dev->vblank_disable_allowed)
- drm_vblank_off(drm_dev, manager->pipe);
- }
-}
-
static int fimd_enable_vblank(struct device *dev)
{
struct fimd_context *ctx = get_fimd_context(dev);
@@ -218,6 +246,9 @@ static int fimd_enable_vblank(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return -EPERM;
+
if (!test_and_set_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -242,6 +273,9 @@ static void fimd_disable_vblank(struct device *dev)
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return;
+
if (test_and_clear_bit(0, &ctx->irq_flags)) {
val = readl(ctx->regs + VIDINTCON0);
@@ -253,8 +287,9 @@ static void fimd_disable_vblank(struct device *dev)
}
static struct exynos_drm_manager_ops fimd_manager_ops = {
+ .dpms = fimd_dpms,
+ .apply = fimd_apply,
.commit = fimd_commit,
- .disable = fimd_disable,
.enable_vblank = fimd_enable_vblank,
.disable_vblank = fimd_disable_vblank,
};
@@ -264,6 +299,7 @@ static void fimd_win_mode_set(struct device *dev,
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
+ int win;
unsigned long offset;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -273,12 +309,19 @@ static void fimd_win_mode_set(struct device *dev,
return;
}
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
+ if (win < 0 || win > WINDOWS_NR)
+ return;
+
offset = overlay->fb_x * (overlay->bpp >> 3);
offset += overlay->fb_y * overlay->pitch;
DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
- win_data = &ctx->win_data[ctx->default_win];
+ win_data = &ctx->win_data[win];
win_data->offset_x = overlay->crtc_x;
win_data->offset_y = overlay->crtc_y;
@@ -286,8 +329,8 @@ static void fimd_win_mode_set(struct device *dev,
win_data->ovl_height = overlay->crtc_height;
win_data->fb_width = overlay->fb_width;
win_data->fb_height = overlay->fb_height;
- win_data->dma_addr = overlay->dma_addr + offset;
- win_data->vaddr = overlay->vaddr + offset;
+ win_data->dma_addr = overlay->dma_addr[0] + offset;
+ win_data->vaddr = overlay->vaddr[0] + offset;
win_data->bpp = overlay->bpp;
win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
(overlay->bpp >> 3);
@@ -381,15 +424,21 @@ static void fimd_win_set_colkey(struct device *dev, unsigned int win)
writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
}
-static void fimd_win_commit(struct device *dev)
+static void fimd_win_commit(struct device *dev, int zpos)
{
struct fimd_context *ctx = get_fimd_context(dev);
struct fimd_win_data *win_data;
- int win = ctx->default_win;
+ int win = zpos;
unsigned long val, alpha, size;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (ctx->suspended)
+ return;
+
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
if (win < 0 || win > WINDOWS_NR)
return;
@@ -472,24 +521,37 @@ static void fimd_win_commit(struct device *dev)
if (win != 0)
fimd_win_set_colkey(dev, win);
+ /* wincon */
+ val = readl(ctx->regs + WINCON(win));
+ val |= WINCONx_ENWIN;
+ writel(val, ctx->regs + WINCON(win));
+
/* Enable DMA channel and unprotect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_CHx_ENABLE(win);
val &= ~SHADOWCON_WINx_PROTECT(win);
writel(val, ctx->regs + SHADOWCON);
+
+ win_data->enabled = true;
}
-static void fimd_win_disable(struct device *dev)
+static void fimd_win_disable(struct device *dev, int zpos)
{
struct fimd_context *ctx = get_fimd_context(dev);
- int win = ctx->default_win;
+ struct fimd_win_data *win_data;
+ int win = zpos;
u32 val;
DRM_DEBUG_KMS("%s\n", __FILE__);
+ if (win == DEFAULT_ZPOS)
+ win = ctx->default_win;
+
if (win < 0 || win > WINDOWS_NR)
return;
+ win_data = &ctx->win_data[win];
+
/* protect windows */
val = readl(ctx->regs + SHADOWCON);
val |= SHADOWCON_WINx_PROTECT(win);
@@ -505,6 +567,8 @@ static void fimd_win_disable(struct device *dev)
val &= ~SHADOWCON_CHx_ENABLE(win);
val &= ~SHADOWCON_WINx_PROTECT(win);
writel(val, ctx->regs + SHADOWCON);
+
+ win_data->enabled = false;
}
static struct exynos_drm_overlay_ops fimd_overlay_ops = {
@@ -540,8 +604,21 @@ static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
wake_up_interruptible(&e->base.file_priv->event_wait);
}
- if (is_checked)
- drm_vblank_put(drm_dev, crtc);
+ if (is_checked) {
+ /*
+ * call drm_vblank_put only in case that drm_vblank_get was
+ * called.
+ */
+ if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
+ drm_vblank_put(drm_dev, crtc);
+
+ /*
+ * don't off vblank if vblank_disable_allowed is 1,
+ * because vblank would be off by timer handler.
+ */
+ if (!drm_dev->vblank_disable_allowed)
+ drm_vblank_off(drm_dev, crtc);
+ }
spin_unlock_irqrestore(&drm_dev->event_lock, flags);
}
@@ -560,19 +637,14 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
/* VSYNC interrupt */
writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
- /*
- * in case that vblank_disable_allowed is 1, it could induce
- * the problem that manager->pipe could be -1 because with
- * disable callback, vsync interrupt isn't disabled and at this moment,
- * vsync interrupt could occur. the vsync interrupt would be disabled
- * by timer handler later.
- */
- if (manager->pipe == -1)
- return IRQ_HANDLED;
+ /* check the crtc is detached already from encoder */
+ if (manager->pipe < 0)
+ goto out;
drm_handle_vblank(drm_dev, manager->pipe);
fimd_finish_pageflip(drm_dev, manager->pipe);
+out:
return IRQ_HANDLED;
}
@@ -590,6 +662,13 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
*/
drm_dev->irq_enabled = 1;
+ /*
+ * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+ * by drm timer once a current process gives up ownership of
+ * vblank event.(after drm_vblank_put function is called)
+ */
+ drm_dev->vblank_disable_allowed = 1;
+
return 0;
}
@@ -662,13 +741,53 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
writel(val, ctx->regs + SHADOWCON);
}
+static int fimd_power_on(struct fimd_context *ctx, bool enable)
+{
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct device *dev = subdrv->manager.dev;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (enable != false && enable != true)
+ return -EINVAL;
+
+ if (enable) {
+ int ret;
+
+ ret = clk_enable(ctx->bus_clk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_enable(ctx->lcd_clk);
+ if (ret < 0) {
+ clk_disable(ctx->bus_clk);
+ return ret;
+ }
+
+ ctx->suspended = false;
+
+ /* if vblank was enabled status, enable it again. */
+ if (test_and_clear_bit(0, &ctx->irq_flags))
+ fimd_enable_vblank(dev);
+
+ fimd_apply(dev);
+ } else {
+ clk_disable(ctx->lcd_clk);
+ clk_disable(ctx->bus_clk);
+
+ ctx->suspended = true;
+ }
+
+ return 0;
+}
+
static int __devinit fimd_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct fimd_context *ctx;
struct exynos_drm_subdrv *subdrv;
struct exynos_drm_fimd_pdata *pdata;
- struct fb_videomode *timing;
+ struct exynos_drm_panel_info *panel;
struct resource *res;
int win;
int ret = -EINVAL;
@@ -681,9 +800,9 @@ static int __devinit fimd_probe(struct platform_device *pdev)
return -EINVAL;
}
- timing = &pdata->timing;
- if (!timing) {
- dev_err(dev, "timing is null.\n");
+ panel = &pdata->panel;
+ if (!panel) {
+ dev_err(dev, "panel is null.\n");
return -EINVAL;
}
@@ -739,25 +858,22 @@ static int __devinit fimd_probe(struct platform_device *pdev)
ctx->irq = res->start;
- for (win = 0; win < WINDOWS_NR; win++)
- fimd_clear_win(ctx, win);
-
ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
if (ret < 0) {
dev_err(dev, "irq request failed.\n");
goto err_req_irq;
}
- ctx->clkdiv = fimd_calc_clkdiv(ctx, timing);
+ ctx->clkdiv = fimd_calc_clkdiv(ctx, &panel->timing);
ctx->vidcon0 = pdata->vidcon0;
ctx->vidcon1 = pdata->vidcon1;
ctx->default_win = pdata->default_win;
- ctx->timing = timing;
+ ctx->panel = panel;
- timing->pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
+ panel->timing.pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
- timing->pixclock, ctx->clkdiv);
+ panel->timing.pixclock, ctx->clkdiv);
subdrv = &ctx->subdrv;
@@ -769,7 +885,17 @@ static int __devinit fimd_probe(struct platform_device *pdev)
subdrv->manager.display_ops = &fimd_display_ops;
subdrv->manager.dev = dev;
+ mutex_init(&ctx->lock);
+
platform_set_drvdata(pdev, ctx);
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
+
+ for (win = 0; win < WINDOWS_NR; win++)
+ fimd_clear_win(ctx, win);
+
exynos_drm_subdrv_register(subdrv);
return 0;
@@ -797,14 +923,25 @@ err_clk_get:
static int __devexit fimd_remove(struct platform_device *pdev)
{
+ struct device *dev = &pdev->dev;
struct fimd_context *ctx = platform_get_drvdata(pdev);
DRM_DEBUG_KMS("%s\n", __FILE__);
exynos_drm_subdrv_unregister(&ctx->subdrv);
+ if (ctx->suspended)
+ goto out;
+
clk_disable(ctx->lcd_clk);
clk_disable(ctx->bus_clk);
+
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_sync(dev);
+
+out:
+ pm_runtime_disable(dev);
+
clk_put(ctx->lcd_clk);
clk_put(ctx->bus_clk);
@@ -818,12 +955,70 @@ static int __devexit fimd_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int fimd_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ if (pm_runtime_suspended(dev))
+ return 0;
+
+ /*
+ * do not use pm_runtime_suspend(). if pm_runtime_suspend() is
+ * called here, an error would be returned by that interface
+ * because the usage_count of pm runtime is more than 1.
+ */
+ return fimd_power_on(ctx, false);
+}
+
+static int fimd_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ /*
+ * if entered to sleep when lcd panel was on, the usage_count
+ * of pm runtime would still be 1 so in this case, fimd driver
+ * should be on directly not drawing on pm runtime interface.
+ */
+ if (!pm_runtime_suspended(dev))
+ return fimd_power_on(ctx, true);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_RUNTIME
+static int fimd_runtime_suspend(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return fimd_power_on(ctx, false);
+}
+
+static int fimd_runtime_resume(struct device *dev)
+{
+ struct fimd_context *ctx = get_fimd_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return fimd_power_on(ctx, true);
+}
+#endif
+
+static const struct dev_pm_ops fimd_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume)
+ SET_RUNTIME_PM_OPS(fimd_runtime_suspend, fimd_runtime_resume, NULL)
+};
+
static struct platform_driver fimd_driver = {
.probe = fimd_probe,
.remove = __devexit_p(fimd_remove),
.driver = {
.name = "exynos4-fb",
.owner = THIS_MODULE,
+ .pm = &fimd_pm_ops,
},
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index aba0fe4..025abb3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -55,17 +55,54 @@ static unsigned int convert_to_vm_err_msg(int msg)
return out_msg;
}
-static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
+ struct drm_file *file_priv,
+ unsigned int *handle)
{
+ int ret;
+
+ /*
+ * allocate a id of idr table where the obj is registered
+ * and handle has the id what user can see.
+ */
+ ret = drm_gem_handle_create(file_priv, obj, handle);
+ if (ret)
+ return ret;
+
+ DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+ /* drop reference from allocate - handle holds it now. */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
+}
+
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
+{
+ struct drm_gem_object *obj;
+
DRM_DEBUG_KMS("%s\n", __FILE__);
- return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+ if (!exynos_gem_obj)
+ return;
+
+ obj = &exynos_gem_obj->base;
+
+ DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
+
+ exynos_drm_buf_destroy(obj->dev, exynos_gem_obj->buffer);
+
+ if (obj->map_list.map)
+ drm_gem_free_mmap_offset(obj);
+
+ /* release file pointer to gem object. */
+ drm_gem_object_release(obj);
+
+ kfree(exynos_gem_obj);
}
-static struct exynos_drm_gem_obj
- *exynos_drm_gem_init(struct drm_device *drm_dev,
- struct drm_file *file_priv, unsigned int *handle,
- unsigned int size)
+static struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
+ unsigned long size)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
@@ -73,75 +110,41 @@ static struct exynos_drm_gem_obj
exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
if (!exynos_gem_obj) {
- DRM_ERROR("failed to allocate exynos gem object.\n");
- return ERR_PTR(-ENOMEM);
+ DRM_ERROR("failed to allocate exynos gem object\n");
+ return NULL;
}
obj = &exynos_gem_obj->base;
- ret = drm_gem_object_init(drm_dev, obj, size);
+ ret = drm_gem_object_init(dev, obj, size);
if (ret < 0) {
- DRM_ERROR("failed to initialize gem object.\n");
- ret = -EINVAL;
- goto err_object_init;
+ DRM_ERROR("failed to initialize gem object\n");
+ kfree(exynos_gem_obj);
+ return NULL;
}
DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
- ret = drm_gem_create_mmap_offset(obj);
- if (ret < 0) {
- DRM_ERROR("failed to allocate mmap offset.\n");
- goto err_create_mmap_offset;
- }
-
- /*
- * allocate a id of idr table where the obj is registered
- * and handle has the id what user can see.
- */
- ret = drm_gem_handle_create(file_priv, obj, handle);
- if (ret)
- goto err_handle_create;
-
- DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
-
- /* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
-
return exynos_gem_obj;
-
-err_handle_create:
- drm_gem_free_mmap_offset(obj);
-
-err_create_mmap_offset:
- drm_gem_object_release(obj);
-
-err_object_init:
- kfree(exynos_gem_obj);
-
- return ERR_PTR(ret);
}
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
- struct drm_file *file_priv,
- unsigned int *handle, unsigned long size)
+ unsigned long size)
{
-
- struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
struct exynos_drm_gem_buf *buffer;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
size = roundup(size, PAGE_SIZE);
-
DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
buffer = exynos_drm_buf_create(dev, size);
- if (IS_ERR(buffer)) {
- return ERR_CAST(buffer);
- }
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
- exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
- if (IS_ERR(exynos_gem_obj)) {
+ exynos_gem_obj = exynos_drm_gem_init(dev, size);
+ if (!exynos_gem_obj) {
exynos_drm_buf_destroy(dev, buffer);
- return exynos_gem_obj;
+ return ERR_PTR(-ENOMEM);
}
exynos_gem_obj->buffer = buffer;
@@ -150,23 +153,30 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
}
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_create *args = data;
- struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+ struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
- exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
- &args->handle, args->size);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
return 0;
}
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_map_off *args = data;
@@ -185,7 +195,7 @@ int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
}
static int exynos_drm_gem_mmap_buffer(struct file *filp,
- struct vm_area_struct *vma)
+ struct vm_area_struct *vma)
{
struct drm_gem_object *obj = filp->private_data;
struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
@@ -196,6 +206,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
vma->vm_flags |= (VM_IO | VM_RESERVED);
+ /* in case of direct mapping, always having non-cachable attribute */
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
vma->vm_file = filp;
@@ -232,7 +243,7 @@ static const struct file_operations exynos_drm_gem_fops = {
};
int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file_priv)
{
struct drm_exynos_gem_mmap *args = data;
struct drm_gem_object *obj;
@@ -278,32 +289,19 @@ int exynos_drm_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
+void exynos_drm_gem_free_object(struct drm_gem_object *obj)
{
- struct exynos_drm_gem_obj *exynos_gem_obj;
-
DRM_DEBUG_KMS("%s\n", __FILE__);
- DRM_DEBUG_KMS("handle count = %d\n",
- atomic_read(&gem_obj->handle_count));
-
- if (gem_obj->map_list.map)
- drm_gem_free_mmap_offset(gem_obj);
-
- /* release file pointer to gem object. */
- drm_gem_object_release(gem_obj);
-
- exynos_gem_obj = to_exynos_gem_obj(gem_obj);
-
- exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
-
- kfree(exynos_gem_obj);
+ exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
}
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args)
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
+ int ret;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -316,19 +314,27 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * args->bpp >> 3;
args->size = args->pitch * args->height;
- exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
- args->size);
+ exynos_gem_obj = exynos_drm_gem_create(dev, args->size);
if (IS_ERR(exynos_gem_obj))
return PTR_ERR(exynos_gem_obj);
+ ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
+ &args->handle);
+ if (ret) {
+ exynos_drm_gem_destroy(exynos_gem_obj);
+ return ret;
+ }
+
return 0;
}
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle, uint64_t *offset)
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
{
struct exynos_drm_gem_obj *exynos_gem_obj;
struct drm_gem_object *obj;
+ int ret = 0;
DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -343,19 +349,46 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
obj = drm_gem_object_lookup(dev, file_priv, handle);
if (!obj) {
DRM_ERROR("failed to lookup gem object.\n");
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
+ ret = -EINVAL;
+ goto unlock;
}
exynos_gem_obj = to_exynos_gem_obj(obj);
- *offset = get_gem_mmap_offset(&exynos_gem_obj->base);
-
- drm_gem_object_unreference(obj);
+ if (!exynos_gem_obj->base.map_list.map) {
+ ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
+ if (ret)
+ goto out;
+ }
+ *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT;
DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+out:
+ drm_gem_object_unreference(obj);
+unlock:
mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ unsigned int handle)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ /*
+ * obj->refcount and obj->handle_count are decreased and
+ * if both them are 0 then exynos_drm_gem_free_object()
+ * would be called by callback to release resources.
+ */
+ ret = drm_gem_handle_delete(file_priv, handle);
+ if (ret < 0) {
+ DRM_ERROR("failed to delete drm_gem_handle.\n");
+ return ret;
+ }
return 0;
}
@@ -403,28 +436,6 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return ret;
}
-
-int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int handle)
-{
- int ret;
-
- DRM_DEBUG_KMS("%s\n", __FILE__);
-
- /*
- * obj->refcount and obj->handle_count are decreased and
- * if both them are 0 then exynos_drm_gem_free_object()
- * would be called by callback to release resources.
- */
- ret = drm_gem_handle_delete(file_priv, handle);
- if (ret < 0) {
- DRM_ERROR("failed to delete drm_gem_handle.\n");
- return ret;
- }
-
- return 0;
-}
-
MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index ef87973..67cdc91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -60,14 +60,16 @@ struct exynos_drm_gem_buf {
* user can access the buffer through kms_bo.handle.
*/
struct exynos_drm_gem_obj {
- struct drm_gem_object base;
- struct exynos_drm_gem_buf *buffer;
+ struct drm_gem_object base;
+ struct exynos_drm_gem_buf *buffer;
};
-/* create a new buffer and get a new gem handle. */
+/* destroy a buffer with gem object */
+void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj);
+
+/* create a new buffer with gem object */
struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
- struct drm_file *file_priv,
- unsigned int *handle, unsigned long size);
+ unsigned long size);
/*
* request gem object creation and buffer allocation as the size
@@ -75,15 +77,18 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
* height and bpp.
*/
int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
/* get buffer offset to map to user space. */
int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+ struct drm_file *file_priv);
-/* unmap a buffer from user space. */
-int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* initialize gem object. */
int exynos_drm_gem_init_object(struct drm_gem_object *obj);
@@ -93,24 +98,13 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
/* create memory region for drm framebuffer. */
int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
- struct drm_device *dev, struct drm_mode_create_dumb *args);
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
/* map memory region for drm framebuffer to user space. */
int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle, uint64_t *offset);
-
-/* page fault handler and mmap fault address(virtual) to physical memory. */
-int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-
-/*
- * mmap the physically continuous memory that a gem object contains
- * to user space.
- */
-int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-/* set vm_flags and we can change the vm attribute to other one at here. */
-int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset);
/*
* destroy memory region allocated.
@@ -118,6 +112,13 @@ int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
* would be released by drm_gem_handle_delete().
*/
int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
- struct drm_device *dev, unsigned int handle);
+ struct drm_device *dev,
+ unsigned int handle);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
new file mode 100644
index 0000000..ed8a319e
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#define to_context(dev) platform_get_drvdata(to_platform_device(dev))
+#define to_subdrv(dev) to_context(dev)
+#define get_ctx_from_subdrv(subdrv) container_of(subdrv,\
+ struct drm_hdmi_context, subdrv);
+
+/* these callback points shoud be set by specific drivers. */
+static struct exynos_hdmi_display_ops *hdmi_display_ops;
+static struct exynos_hdmi_manager_ops *hdmi_manager_ops;
+static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
+
+struct drm_hdmi_context {
+ struct exynos_drm_subdrv subdrv;
+ struct exynos_drm_hdmi_context *hdmi_ctx;
+ struct exynos_drm_hdmi_context *mixer_ctx;
+ struct work_struct work;
+};
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+ *display_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (display_ops)
+ hdmi_display_ops = display_ops;
+}
+EXPORT_SYMBOL(exynos_drm_display_ops_register);
+
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+ *manager_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (manager_ops)
+ hdmi_manager_ops = manager_ops;
+}
+EXPORT_SYMBOL(exynos_drm_manager_ops_register);
+
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+ *overlay_ops)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (overlay_ops)
+ hdmi_overlay_ops = overlay_ops;
+}
+EXPORT_SYMBOL(exynos_drm_overlay_ops_register);
+
+static bool drm_hdmi_is_connected(struct device *dev)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->is_connected)
+ return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx);
+
+ return false;
+}
+
+static int drm_hdmi_get_edid(struct device *dev,
+ struct drm_connector *connector, u8 *edid, int len)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->get_edid)
+ return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx,
+ connector, edid, len);
+
+ return 0;
+}
+
+static int drm_hdmi_check_timing(struct device *dev, void *timing)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->check_timing)
+ return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx,
+ timing);
+
+ return 0;
+}
+
+static int drm_hdmi_power_on(struct device *dev, int mode)
+{
+ struct drm_hdmi_context *ctx = to_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_display_ops && hdmi_display_ops->power_on)
+ return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode);
+
+ return 0;
+}
+
+static struct exynos_drm_display_ops drm_hdmi_display_ops = {
+ .type = EXYNOS_DISPLAY_TYPE_HDMI,
+ .is_connected = drm_hdmi_is_connected,
+ .get_edid = drm_hdmi_get_edid,
+ .check_timing = drm_hdmi_check_timing,
+ .power_on = drm_hdmi_power_on,
+};
+
+static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+ struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+ struct exynos_drm_manager *manager = &subdrv->manager;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank)
+ return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx,
+ manager->pipe);
+
+ return 0;
+}
+
+static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank)
+ return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx);
+}
+
+static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->mode_set)
+ hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
+}
+
+static void drm_hdmi_commit(struct device *subdrv_dev)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_manager_ops && hdmi_manager_ops->commit)
+ hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx);
+}
+
+static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ if (hdmi_manager_ops && hdmi_manager_ops->disable)
+ hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx);
+ break;
+ default:
+ DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
+ break;
+ }
+}
+
+static struct exynos_drm_manager_ops drm_hdmi_manager_ops = {
+ .dpms = drm_hdmi_dpms,
+ .enable_vblank = drm_hdmi_enable_vblank,
+ .disable_vblank = drm_hdmi_disable_vblank,
+ .mode_set = drm_hdmi_mode_set,
+ .commit = drm_hdmi_commit,
+};
+
+static void drm_mixer_mode_set(struct device *subdrv_dev,
+ struct exynos_drm_overlay *overlay)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set)
+ hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
+}
+
+static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit)
+ hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
+}
+
+static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
+{
+ struct drm_hdmi_context *ctx = to_context(subdrv_dev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable)
+ hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
+}
+
+static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
+ .mode_set = drm_mixer_mode_set,
+ .commit = drm_mixer_commit,
+ .disable = drm_mixer_disable,
+};
+
+
+static int hdmi_subdrv_probe(struct drm_device *drm_dev,
+ struct device *dev)
+{
+ struct exynos_drm_subdrv *subdrv = to_subdrv(dev);
+ struct drm_hdmi_context *ctx;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos_drm_common_hdmi_pd *pd;
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ pd = pdev->dev.platform_data;
+
+ if (!pd) {
+ DRM_DEBUG_KMS("platform data is null.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->hdmi_dev) {
+ DRM_DEBUG_KMS("hdmi device is null.\n");
+ return -EFAULT;
+ }
+
+ if (!pd->mixer_dev) {
+ DRM_DEBUG_KMS("mixer device is null.\n");
+ return -EFAULT;
+ }
+
+ ret = platform_driver_register(&hdmi_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register hdmi driver.\n");
+ return ret;
+ }
+
+ ret = platform_driver_register(&mixer_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register mixer driver.\n");
+ goto err_hdmidrv;
+ }
+
+ ctx = get_ctx_from_subdrv(subdrv);
+
+ ctx->hdmi_ctx = (struct exynos_drm_hdmi_context *)
+ to_context(pd->hdmi_dev);
+ if (!ctx->hdmi_ctx) {
+ DRM_DEBUG_KMS("hdmi context is null.\n");
+ ret = -EFAULT;
+ goto err_mixerdrv;
+ }
+
+ ctx->hdmi_ctx->drm_dev = drm_dev;
+
+ ctx->mixer_ctx = (struct exynos_drm_hdmi_context *)
+ to_context(pd->mixer_dev);
+ if (!ctx->mixer_ctx) {
+ DRM_DEBUG_KMS("mixer context is null.\n");
+ ret = -EFAULT;
+ goto err_mixerdrv;
+ }
+
+ ctx->mixer_ctx->drm_dev = drm_dev;
+
+ return 0;
+
+err_mixerdrv:
+ platform_driver_unregister(&mixer_driver);
+err_hdmidrv:
+ platform_driver_unregister(&hdmi_driver);
+ return ret;
+}
+
+static void hdmi_subdrv_remove(struct drm_device *drm_dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ platform_driver_unregister(&hdmi_driver);
+ platform_driver_unregister(&mixer_driver);
+}
+
+static void exynos_drm_hdmi_late_probe(struct work_struct *work)
+{
+ struct drm_hdmi_context *ctx = container_of(work,
+ struct drm_hdmi_context, work);
+
+ /*
+ * this function calls subdrv->probe() so this must be called
+ * after probe context.
+ *
+ * PS. subdrv->probe() will call platform_driver_register() to probe
+ * hdmi and mixer driver.
+ */
+ exynos_drm_subdrv_register(&ctx->subdrv);
+}
+
+static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_subdrv *subdrv;
+ struct drm_hdmi_context *ctx;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_LOG_KMS("failed to alloc common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ subdrv = &ctx->subdrv;
+
+ subdrv->probe = hdmi_subdrv_probe;
+ subdrv->remove = hdmi_subdrv_remove;
+ subdrv->manager.pipe = -1;
+ subdrv->manager.ops = &drm_hdmi_manager_ops;
+ subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
+ subdrv->manager.display_ops = &drm_hdmi_display_ops;
+ subdrv->manager.dev = dev;
+
+ platform_set_drvdata(pdev, subdrv);
+
+ INIT_WORK(&ctx->work, exynos_drm_hdmi_late_probe);
+
+ schedule_work(&ctx->work);
+
+ return 0;
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev)
+{
+ struct drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ exynos_drm_subdrv_unregister(&ctx->subdrv);
+ kfree(ctx);
+
+ return 0;
+}
+
+static struct platform_driver exynos_drm_common_hdmi_driver = {
+ .probe = exynos_drm_hdmi_probe,
+ .remove = __devexit_p(exynos_drm_hdmi_remove),
+ .driver = {
+ .name = "exynos-drm-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+
+static int __init exynos_drm_hdmi_init(void)
+{
+ int ret;
+
+ DRM_DEBUG_KMS("%s\n", __FILE__);
+
+ ret = platform_driver_register(&exynos_drm_common_hdmi_driver);
+ if (ret) {
+ DRM_DEBUG_KMS("failed to register hdmi common driver.\n");
+ return ret;
+ }
+
+ return ret;
+}
+
+static void __exit exynos_drm_hdmi_exit(void)
+{
+ platform_driver_unregister(&exynos_drm_common_hdmi_driver);
+}
+
+module_init(exynos_drm_hdmi_init);
+module_exit(exynos_drm_hdmi_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM HDMI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
new file mode 100644
index 0000000..3c29f79
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -0,0 +1,73 @@
+/* exynos_drm_hdmi.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_HDMI_H_
+#define _EXYNOS_DRM_HDMI_H_
+
+/*
+ * exynos hdmi common context structure.
+ *
+ * @drm_dev: pointer to drm_device.
+ * @ctx: pointer to the context of specific device driver.
+ * this context should be hdmi_context or mixer_context.
+ */
+struct exynos_drm_hdmi_context {
+ struct drm_device *drm_dev;
+ void *ctx;
+};
+
+struct exynos_hdmi_display_ops {
+ bool (*is_connected)(void *ctx);
+ int (*get_edid)(void *ctx, struct drm_connector *connector,
+ u8 *edid, int len);
+ int (*check_timing)(void *ctx, void *timing);
+ int (*power_on)(void *ctx, int mode);
+};
+
+struct exynos_hdmi_manager_ops {
+ void (*mode_set)(void *ctx, void *mode);
+ void (*commit)(void *ctx);
+ void (*disable)(void *ctx);
+};
+
+struct exynos_hdmi_overlay_ops {
+ int (*enable_vblank)(void *ctx, int pipe);
+ void (*disable_vblank)(void *ctx);
+ void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
+ void (*win_commit)(void *ctx, int zpos);
+ void (*win_disable)(void *ctx, int zpos);
+};
+
+extern struct platform_driver hdmi_driver;
+extern struct platform_driver mixer_driver;
+
+void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
+ *display_ops);
+void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
+ *manager_ops);
+void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
+ *overlay_ops);
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
new file mode 100644
index 0000000..bdcf770
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "exynos_drm.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+struct exynos_plane {
+ struct drm_plane base;
+ struct exynos_drm_overlay overlay;
+ bool enabled;
+};
+
+static int
+exynos_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+ struct exynos_drm_crtc_pos pos;
+ unsigned int x = src_x >> 16;
+ unsigned int y = src_y >> 16;
+ int ret;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
+ pos.crtc_x = crtc_x;
+ pos.crtc_y = crtc_y;
+ pos.crtc_w = crtc_w;
+ pos.crtc_h = crtc_h;
+
+ pos.fb_x = x;
+ pos.fb_y = y;
+
+ /* TODO: scale feature */
+ ret = exynos_drm_overlay_update(overlay, fb, &crtc->mode, &pos);
+ if (ret < 0)
+ return ret;
+
+ exynos_drm_fn_encoder(crtc, overlay,
+ exynos_drm_encoder_crtc_mode_set);
+ exynos_drm_fn_encoder(crtc, &overlay->zpos,
+ exynos_drm_encoder_crtc_plane_commit);
+
+ exynos_plane->enabled = true;
+
+ return 0;
+}
+
+static int exynos_disable_plane(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+ struct exynos_drm_overlay *overlay = &exynos_plane->overlay;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!exynos_plane->enabled)
+ return 0;
+
+ exynos_drm_fn_encoder(plane->crtc, &overlay->zpos,
+ exynos_drm_encoder_crtc_disable);
+
+ exynos_plane->enabled = false;
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+ return 0;
+}
+
+static void exynos_plane_destroy(struct drm_plane *plane)
+{
+ struct exynos_plane *exynos_plane =
+ container_of(plane, struct exynos_plane, base);
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ exynos_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(exynos_plane);
+}
+
+static struct drm_plane_funcs exynos_plane_funcs = {
+ .update_plane = exynos_update_plane,
+ .disable_plane = exynos_disable_plane,
+ .destroy = exynos_plane_destroy,
+};
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr)
+{
+ struct exynos_plane *exynos_plane;
+ uint32_t possible_crtcs;
+
+ exynos_plane = kzalloc(sizeof(struct exynos_plane), GFP_KERNEL);
+ if (!exynos_plane)
+ return -ENOMEM;
+
+ /* all CRTCs are available */
+ possible_crtcs = (1 << MAX_CRTC) - 1;
+
+ exynos_plane->overlay.zpos = DEFAULT_ZPOS;
+
+ /* TODO: format */
+ return drm_plane_init(dev, &exynos_plane->base, possible_crtcs,
+ &exynos_plane_funcs, NULL, 0, false);
+}
+
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_exynos_plane_set_zpos *zpos_req = data;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct exynos_plane *exynos_plane;
+ int ret = 0;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ if (zpos_req->zpos < 0 || zpos_req->zpos >= MAX_PLANE) {
+ if (zpos_req->zpos != DEFAULT_ZPOS) {
+ DRM_ERROR("zpos not within limits\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, zpos_req->plane_id,
+ DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ DRM_DEBUG_KMS("Unknown plane ID %d\n",
+ zpos_req->plane_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ plane = obj_to_plane(obj);
+ exynos_plane = container_of(plane, struct exynos_plane, base);
+
+ exynos_plane->overlay.zpos = zpos_req->zpos;
+
+out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.h b/drivers/gpu/drm/exynos/exynos_drm_plane.h
new file mode 100644
index 0000000..16b71f8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.h
@@ -0,0 +1,14 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+int exynos_plane_init(struct drm_device *dev, unsigned int nr);
+int exynos_plane_set_zpos_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
new file mode 100644
index 0000000..3429d3f
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -0,0 +1,1176 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/hdmi_drv.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+#include "drm_edid.h"
+#include "drm_crtc_helper.h"
+
+#include "regs-hdmi.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+
+#include "exynos_hdmi.h"
+
+#define HDMI_OVERLAY_NUMBER 3
+#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 hdmiphy_conf27[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x1C, 0x30, 0x40,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf27_027[32] = {
+ 0x01, 0x05, 0x00, 0xD4, 0x10, 0x9C, 0x09, 0x64,
+ 0x6B, 0x10, 0x02, 0x51, 0xDF, 0xF2, 0x54, 0x87,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xE3, 0x26, 0x00, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_175[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xef, 0x5B,
+ 0x6D, 0x10, 0x01, 0x51, 0xef, 0xF3, 0x54, 0xb9,
+ 0x84, 0x00, 0x30, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa5, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf74_25[32] = {
+ 0x01, 0x05, 0x00, 0xd8, 0x10, 0x9c, 0xf8, 0x40,
+ 0x6a, 0x10, 0x01, 0x51, 0xff, 0xf1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xe0,
+ 0x22, 0x40, 0xa4, 0x26, 0x01, 0x00, 0x00, 0x00,
+};
+
+static const u8 hdmiphy_conf148_5[32] = {
+ 0x01, 0x05, 0x00, 0xD8, 0x10, 0x9C, 0xf8, 0x40,
+ 0x6A, 0x18, 0x00, 0x51, 0xff, 0xF1, 0x54, 0xba,
+ 0x84, 0x00, 0x10, 0x38, 0x00, 0x08, 0x10, 0xE0,
+ 0x22, 0x40, 0xa4, 0x26, 0x02, 0x00, 0x00, 0x00,
+};
+
+struct hdmi_tg_regs {
+ u8 cmd;
+ u8 h_fsz_l;
+ u8 h_fsz_h;
+ u8 hact_st_l;
+ u8 hact_st_h;
+ u8 hact_sz_l;
+ u8 hact_sz_h;
+ u8 v_fsz_l;
+ u8 v_fsz_h;
+ u8 vsync_l;
+ u8 vsync_h;
+ u8 vsync2_l;
+ u8 vsync2_h;
+ u8 vact_st_l;
+ u8 vact_st_h;
+ u8 vact_sz_l;
+ u8 vact_sz_h;
+ u8 field_chg_l;
+ u8 field_chg_h;
+ u8 vact_st2_l;
+ u8 vact_st2_h;
+ u8 vsync_top_hdmi_l;
+ u8 vsync_top_hdmi_h;
+ u8 vsync_bot_hdmi_l;
+ u8 vsync_bot_hdmi_h;
+ u8 field_top_hdmi_l;
+ u8 field_top_hdmi_h;
+ u8 field_bot_hdmi_l;
+ u8 field_bot_hdmi_h;
+};
+
+struct hdmi_core_regs {
+ u8 h_blank[2];
+ u8 v_blank[3];
+ u8 h_v_line[3];
+ u8 vsync_pol[1];
+ u8 int_pro_mode[1];
+ u8 v_blank_f[3];
+ u8 h_sync_gen[3];
+ u8 v_sync_gen1[3];
+ u8 v_sync_gen2[3];
+ u8 v_sync_gen3[3];
+};
+
+struct hdmi_preset_conf {
+ struct hdmi_core_regs core;
+ struct hdmi_tg_regs tg;
+};
+
+static const struct hdmi_preset_conf hdmi_conf_480p = {
+ .core = {
+ .h_blank = {0x8a, 0x00},
+ .v_blank = {0x0d, 0x6a, 0x01},
+ .h_v_line = {0x0d, 0xa2, 0x35},
+ .vsync_pol = {0x01},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00},
+ .h_sync_gen = {0x0e, 0x30, 0x11},
+ .v_sync_gen1 = {0x0f, 0x90, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x5a, 0x03, /* h_fsz */
+ 0x8a, 0x00, 0xd0, 0x02, /* hact */
+ 0x0d, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0xe0, 0x01, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_720p60 = {
+ .core = {
+ .h_blank = {0x72, 0x01},
+ .v_blank = {0xee, 0xf2, 0x00},
+ .h_v_line = {0xee, 0x22, 0x67},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x6c, 0x50, 0x02},
+ .v_sync_gen1 = {0x0a, 0x50, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x72, 0x06, /* h_fsz */
+ 0x71, 0x01, 0x01, 0x05, /* hact */
+ 0xee, 0x02, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x1e, 0x00, 0xd0, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x32, 0xB2, 0x00},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f = {0x49, 0x2A, 0x23},
+ .h_sync_gen = {0x0E, 0xEA, 0x08},
+ .v_sync_gen1 = {0x07, 0x20, 0x00},
+ .v_sync_gen2 = {0x39, 0x42, 0x23},
+ .v_sync_gen3 = {0x38, 0x87, 0x73},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0A, /* h_fsz */
+ 0xCF, 0x02, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p50 = {
+ .core = {
+ .h_blank = {0xd0, 0x02},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x04, 0xa5},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x0e, 0xea, 0x08},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x50, 0x0A, /* h_fsz */
+ 0xCF, 0x02, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080i60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x32, 0xB2, 0x00},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x01},
+ .v_blank_f = {0x49, 0x2A, 0x23},
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x07, 0x20, 0x00},
+ .v_sync_gen2 = {0x39, 0x42, 0x23},
+ .v_sync_gen3 = {0xa4, 0x44, 0x4a},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x17, 0x01, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x16, 0x00, 0x1c, 0x02, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x49, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x33, 0x02, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_preset_conf hdmi_conf_1080p60 = {
+ .core = {
+ .h_blank = {0x18, 0x01},
+ .v_blank = {0x65, 0x6c, 0x01},
+ .h_v_line = {0x65, 0x84, 0x89},
+ .vsync_pol = {0x00},
+ .int_pro_mode = {0x00},
+ .v_blank_f = {0x00, 0x00, 0x00}, /* don't care */
+ .h_sync_gen = {0x56, 0x08, 0x02},
+ .v_sync_gen1 = {0x09, 0x40, 0x00},
+ .v_sync_gen2 = {0x01, 0x10, 0x00},
+ .v_sync_gen3 = {0x01, 0x10, 0x00},
+ /* other don't care */
+ },
+ .tg = {
+ 0x00, /* cmd */
+ 0x98, 0x08, /* h_fsz */
+ 0x17, 0x01, 0x81, 0x07, /* hact */
+ 0x65, 0x04, /* v_fsz */
+ 0x01, 0x00, 0x33, 0x02, /* vsync */
+ 0x2d, 0x00, 0x38, 0x04, /* vact */
+ 0x33, 0x02, /* field_chg */
+ 0x48, 0x02, /* vact_st2 */
+ 0x01, 0x00, 0x01, 0x00, /* vsync top/bot */
+ 0x01, 0x00, 0x33, 0x02, /* field top/bot */
+ },
+};
+
+static const struct hdmi_conf hdmi_confs[] = {
+ { 1280, 720, 60, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 1280, 720, 50, false, hdmiphy_conf74_25, &hdmi_conf_720p60 },
+ { 720, 480, 60, false, hdmiphy_conf27_027, &hdmi_conf_480p },
+ { 1920, 1080, 50, true, hdmiphy_conf74_25, &hdmi_conf_1080i50 },
+ { 1920, 1080, 50, false, hdmiphy_conf148_5, &hdmi_conf_1080p50 },
+ { 1920, 1080, 60, true, hdmiphy_conf74_25, &hdmi_conf_1080i60 },
+ { 1920, 1080, 60, false, hdmiphy_conf148_5, &hdmi_conf_1080p60 },
+};
+
+
+static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
+{
+ return readl(hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writeb(struct hdmi_context *hdata,
+ u32 reg_id, u8 value)
+{
+ writeb(value, hdata->regs + reg_id);
+}
+
+static inline void hdmi_reg_writemask(struct hdmi_context *hdata,
+ u32 reg_id, u32 value, u32 mask)
+{
+ u32 old = readl(hdata->regs + reg_id);
+ value = (value & mask) | (old & ~mask);
+ writel(value, hdata->regs + reg_id);
+}
+
+static void hdmi_regs_dump(struct hdmi_context *hdata, char *prefix)
+{
+#define DUMPREG(reg_id) \
+ DRM_DEBUG_KMS("%s:" #reg_id " = %08x\n", prefix, \
+ readl(hdata->regs + reg_id))
+ DRM_DEBUG_KMS("%s: ---- CONTROL REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_INTC_FLAG);
+ DUMPREG(HDMI_INTC_CON);
+ DUMPREG(HDMI_HPD_STATUS);
+ DUMPREG(HDMI_PHY_RSTOUT);
+ DUMPREG(HDMI_PHY_VPLL);
+ DUMPREG(HDMI_PHY_CMU);
+ DUMPREG(HDMI_CORE_RSTOUT);
+
+ DRM_DEBUG_KMS("%s: ---- CORE REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_CON_0);
+ DUMPREG(HDMI_CON_1);
+ DUMPREG(HDMI_CON_2);
+ DUMPREG(HDMI_SYS_STATUS);
+ DUMPREG(HDMI_PHY_STATUS);
+ DUMPREG(HDMI_STATUS_EN);
+ DUMPREG(HDMI_HPD);
+ DUMPREG(HDMI_MODE_SEL);
+ DUMPREG(HDMI_HPD_GEN);
+ DUMPREG(HDMI_DC_CONTROL);
+ DUMPREG(HDMI_VIDEO_PATTERN_GEN);
+
+ DRM_DEBUG_KMS("%s: ---- CORE SYNC REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_H_BLANK_0);
+ DUMPREG(HDMI_H_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_0);
+ DUMPREG(HDMI_V_BLANK_1);
+ DUMPREG(HDMI_V_BLANK_2);
+ DUMPREG(HDMI_H_V_LINE_0);
+ DUMPREG(HDMI_H_V_LINE_1);
+ DUMPREG(HDMI_H_V_LINE_2);
+ DUMPREG(HDMI_VSYNC_POL);
+ DUMPREG(HDMI_INT_PRO_MODE);
+ DUMPREG(HDMI_V_BLANK_F_0);
+ DUMPREG(HDMI_V_BLANK_F_1);
+ DUMPREG(HDMI_V_BLANK_F_2);
+ DUMPREG(HDMI_H_SYNC_GEN_0);
+ DUMPREG(HDMI_H_SYNC_GEN_1);
+ DUMPREG(HDMI_H_SYNC_GEN_2);
+ DUMPREG(HDMI_V_SYNC_GEN_1_0);
+ DUMPREG(HDMI_V_SYNC_GEN_1_1);
+ DUMPREG(HDMI_V_SYNC_GEN_1_2);
+ DUMPREG(HDMI_V_SYNC_GEN_2_0);
+ DUMPREG(HDMI_V_SYNC_GEN_2_1);
+ DUMPREG(HDMI_V_SYNC_GEN_2_2);
+ DUMPREG(HDMI_V_SYNC_GEN_3_0);
+ DUMPREG(HDMI_V_SYNC_GEN_3_1);
+ DUMPREG(HDMI_V_SYNC_GEN_3_2);
+
+ DRM_DEBUG_KMS("%s: ---- TG REGISTERS ----\n", prefix);
+ DUMPREG(HDMI_TG_CMD);
+ DUMPREG(HDMI_TG_H_FSZ_L);
+ DUMPREG(HDMI_TG_H_FSZ_H);
+ DUMPREG(HDMI_TG_HACT_ST_L);
+ DUMPREG(HDMI_TG_HACT_ST_H);
+ DUMPREG(HDMI_TG_HACT_SZ_L);
+ DUMPREG(HDMI_TG_HACT_SZ_H);
+ DUMPREG(HDMI_TG_V_FSZ_L);
+ DUMPREG(HDMI_TG_V_FSZ_H);
+ DUMPREG(HDMI_TG_VSYNC_L);
+ DUMPREG(HDMI_TG_VSYNC_H);
+ DUMPREG(HDMI_TG_VSYNC2_L);
+ DUMPREG(HDMI_TG_VSYNC2_H);
+ DUMPREG(HDMI_TG_VACT_ST_L);
+ DUMPREG(HDMI_TG_VACT_ST_H);
+ DUMPREG(HDMI_TG_VACT_SZ_L);
+ DUMPREG(HDMI_TG_VACT_SZ_H);
+ DUMPREG(HDMI_TG_FIELD_CHG_L);
+ DUMPREG(HDMI_TG_FIELD_CHG_H);
+ DUMPREG(HDMI_TG_VACT_ST2_L);
+ DUMPREG(HDMI_TG_VACT_ST2_H);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_VSYNC_BOT_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_TOP_HDMI_H);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_L);
+ DUMPREG(HDMI_TG_FIELD_BOT_HDMI_H);
+#undef DUMPREG
+}
+
+static int hdmi_conf_index(struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+ if (hdmi_confs[i].width == mode->hdisplay &&
+ hdmi_confs[i].height == mode->vdisplay &&
+ hdmi_confs[i].vrefresh == mode->vrefresh &&
+ hdmi_confs[i].interlace ==
+ ((mode->flags & DRM_MODE_FLAG_INTERLACE) ?
+ true : false))
+ return i;
+
+ return -1;
+}
+
+static bool hdmi_is_connected(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
+
+ if (val)
+ return true;
+
+ return false;
+}
+
+static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
+ u8 *edid, int len)
+{
+ struct edid *raw_edid;
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!hdata->ddc_port)
+ return -ENODEV;
+
+ raw_edid = drm_get_edid(connector, hdata->ddc_port->adapter);
+ if (raw_edid) {
+ memcpy(edid, raw_edid, min((1 + raw_edid->extensions)
+ * EDID_LENGTH, len));
+ DRM_DEBUG_KMS("width[%d] x height[%d]\n",
+ raw_edid->width_cm, raw_edid->height_cm);
+ } else {
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int hdmi_check_timing(void *ctx, void *timing)
+{
+ struct fb_videomode *check_timing = timing;
+ int i;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ DRM_DEBUG_KMS("[%d]x[%d] [%d]Hz [%x]\n", check_timing->xres,
+ check_timing->yres, check_timing->refresh,
+ check_timing->vmode);
+
+ for (i = 0; i < ARRAY_SIZE(hdmi_confs); ++i)
+ if (hdmi_confs[i].width == check_timing->xres &&
+ hdmi_confs[i].height == check_timing->yres &&
+ hdmi_confs[i].vrefresh == check_timing->refresh &&
+ hdmi_confs[i].interlace ==
+ ((check_timing->vmode & FB_VMODE_INTERLACED) ?
+ true : false))
+ return 0;
+
+ return -EINVAL;
+}
+
+static int hdmi_display_power_on(void *ctx, int mode)
+{
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ DRM_DEBUG_KMS("hdmi [on]\n");
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG_KMS("hdmi [off]\n");
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static struct exynos_hdmi_display_ops display_ops = {
+ .is_connected = hdmi_is_connected,
+ .get_edid = hdmi_get_edid,
+ .check_timing = hdmi_check_timing,
+ .power_on = hdmi_display_power_on,
+};
+
+static void hdmi_conf_reset(struct hdmi_context *hdata)
+{
+ /* disable hpd handle for drm */
+ hdata->hpd_handle = false;
+
+ /* resetting HDMI core */
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, 0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_CORE_RSTOUT, ~0, HDMI_CORE_SW_RSTOUT);
+ mdelay(10);
+
+ /* enable hpd handle for drm */
+ hdata->hpd_handle = true;
+}
+
+static void hdmi_conf_init(struct hdmi_context *hdata)
+{
+ /* disable hpd handle for drm */
+ hdata->hpd_handle = false;
+
+ /* enable HPD interrupts */
+ hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_INTC_CON, ~0, HDMI_INTC_EN_GLOBAL |
+ HDMI_INTC_EN_HPD_PLUG | HDMI_INTC_EN_HPD_UNPLUG);
+
+ /* choose HDMI mode */
+ hdmi_reg_writemask(hdata, HDMI_MODE_SEL,
+ HDMI_MODE_HDMI_EN, HDMI_MODE_MASK);
+ /* disable bluescreen */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, 0, HDMI_BLUE_SCR_EN);
+ /* choose bluescreen (fecal) color */
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_0, 0x12);
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_1, 0x34);
+ hdmi_reg_writeb(hdata, HDMI_BLUE_SCREEN_2, 0x56);
+ /* enable AVI packet every vsync, fixes purple line problem */
+ hdmi_reg_writeb(hdata, HDMI_AVI_CON, 0x02);
+ /* force RGB, look to CEA-861-D, table 7 for more detail */
+ hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(0), 0 << 5);
+ hdmi_reg_writemask(hdata, HDMI_CON_1, 0x10 << 5, 0x11 << 5);
+
+ hdmi_reg_writeb(hdata, HDMI_SPD_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
+ hdmi_reg_writeb(hdata, HDMI_ACR_CON, 0x04);
+
+ /* enable hpd handle for drm */
+ hdata->hpd_handle = true;
+}
+
+static void hdmi_timing_apply(struct hdmi_context *hdata,
+ const struct hdmi_preset_conf *conf)
+{
+ const struct hdmi_core_regs *core = &conf->core;
+ const struct hdmi_tg_regs *tg = &conf->tg;
+ int tries;
+
+ /* setting core registers */
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_0, core->h_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_BLANK_1, core->h_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_0, core->v_blank[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_1, core->v_blank[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_2, core->v_blank[2]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_0, core->h_v_line[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_1, core->h_v_line[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_V_LINE_2, core->h_v_line[2]);
+ hdmi_reg_writeb(hdata, HDMI_VSYNC_POL, core->vsync_pol[0]);
+ hdmi_reg_writeb(hdata, HDMI_INT_PRO_MODE, core->int_pro_mode[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_0, core->v_blank_f[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_1, core->v_blank_f[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_BLANK_F_2, core->v_blank_f[2]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_0, core->h_sync_gen[0]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_1, core->h_sync_gen[1]);
+ hdmi_reg_writeb(hdata, HDMI_H_SYNC_GEN_2, core->h_sync_gen[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_0, core->v_sync_gen1[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_1, core->v_sync_gen1[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_1_2, core->v_sync_gen1[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_0, core->v_sync_gen2[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_1, core->v_sync_gen2[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_2_2, core->v_sync_gen2[2]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_0, core->v_sync_gen3[0]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_1, core->v_sync_gen3[1]);
+ hdmi_reg_writeb(hdata, HDMI_V_SYNC_GEN_3_2, core->v_sync_gen3[2]);
+ /* Timing generator registers */
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_L, tg->h_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_H_FSZ_H, tg->h_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_L, tg->hact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_ST_H, tg->hact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_L, tg->hact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_HACT_SZ_H, tg->hact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_L, tg->v_fsz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_V_FSZ_H, tg->v_fsz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_L, tg->vsync_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_H, tg->vsync_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_L, tg->vsync2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC2_H, tg->vsync2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_L, tg->vact_st_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST_H, tg->vact_st_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_L, tg->vact_sz_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_SZ_H, tg->vact_sz_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_L, tg->field_chg_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_CHG_H, tg->field_chg_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_L, tg->vact_st2_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VACT_ST2_H, tg->vact_st2_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_L, tg->vsync_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_TOP_HDMI_H, tg->vsync_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_L, tg->vsync_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_VSYNC_BOT_HDMI_H, tg->vsync_bot_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_L, tg->field_top_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_TOP_HDMI_H, tg->field_top_hdmi_h);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_L, tg->field_bot_hdmi_l);
+ hdmi_reg_writeb(hdata, HDMI_TG_FIELD_BOT_HDMI_H, tg->field_bot_hdmi_h);
+
+ /* waiting for HDMIPHY's PLL to get to steady state */
+ for (tries = 100; tries; --tries) {
+ u32 val = hdmi_reg_read(hdata, HDMI_PHY_STATUS);
+ if (val & HDMI_PHY_STATUS_READY)
+ break;
+ mdelay(1);
+ }
+ /* steady state not achieved */
+ if (tries == 0) {
+ DRM_ERROR("hdmiphy's pll could not reach steady state.\n");
+ hdmi_regs_dump(hdata, "timing apply");
+ }
+
+ clk_disable(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_hdmiphy);
+ clk_enable(hdata->res.sclk_hdmi);
+
+ /* enable HDMI and timing generator */
+ hdmi_reg_writemask(hdata, HDMI_CON_0, ~0, HDMI_EN);
+ if (core->int_pro_mode[0])
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN |
+ HDMI_FIELD_EN);
+ else
+ hdmi_reg_writemask(hdata, HDMI_TG_CMD, ~0, HDMI_TG_EN);
+}
+
+static void hdmiphy_conf_reset(struct hdmi_context *hdata)
+{
+ u8 buffer[2];
+
+ clk_disable(hdata->res.sclk_hdmi);
+ clk_set_parent(hdata->res.sclk_hdmi, hdata->res.sclk_pixel);
+ clk_enable(hdata->res.sclk_hdmi);
+
+ /* operation mode */
+ buffer[0] = 0x1f;
+ buffer[1] = 0x00;
+
+ if (hdata->hdmiphy_port)
+ i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+
+ /* reset hdmiphy */
+ hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, ~0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+ hdmi_reg_writemask(hdata, HDMI_PHY_RSTOUT, 0, HDMI_PHY_SW_RSTOUT);
+ mdelay(10);
+}
+
+static void hdmiphy_conf_apply(struct hdmi_context *hdata)
+{
+ u8 buffer[32];
+ u8 operation[2];
+ u8 read_buffer[32] = {0, };
+ int ret;
+ int i;
+
+ if (!hdata->hdmiphy_port) {
+ DRM_ERROR("hdmiphy is not attached\n");
+ return;
+ }
+
+ /* pixel clock */
+ memcpy(buffer, hdmi_confs[hdata->cur_conf].hdmiphy_data, 32);
+ ret = i2c_master_send(hdata->hdmiphy_port, buffer, 32);
+ if (ret != 32) {
+ DRM_ERROR("failed to configure HDMIPHY via I2C\n");
+ return;
+ }
+
+ mdelay(10);
+
+ /* operation mode */
+ operation[0] = 0x1f;
+ operation[1] = 0x80;
+
+ ret = i2c_master_send(hdata->hdmiphy_port, operation, 2);
+ if (ret != 2) {
+ DRM_ERROR("failed to enable hdmiphy\n");
+ return;
+ }
+
+ ret = i2c_master_recv(hdata->hdmiphy_port, read_buffer, 32);
+ if (ret < 0) {
+ DRM_ERROR("failed to read hdmiphy config\n");
+ return;
+ }
+
+ for (i = 0; i < ret; i++)
+ DRM_DEBUG_KMS("hdmiphy[0x%02x] write[0x%02x] - "
+ "recv [0x%02x]\n", i, buffer[i], read_buffer[i]);
+}
+
+static void hdmi_conf_apply(struct hdmi_context *hdata)
+{
+ const struct hdmi_preset_conf *conf =
+ hdmi_confs[hdata->cur_conf].conf;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmiphy_conf_reset(hdata);
+ hdmiphy_conf_apply(hdata);
+
+ hdmi_conf_reset(hdata);
+ hdmi_conf_init(hdata);
+
+ /* setting core registers */
+ hdmi_timing_apply(hdata, conf);
+
+ hdmi_regs_dump(hdata, "start");
+}
+
+static void hdmi_mode_set(void *ctx, void *mode)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+ int conf_idx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ conf_idx = hdmi_conf_index(mode);
+ if (conf_idx >= 0 && conf_idx < ARRAY_SIZE(hdmi_confs))
+ hdata->cur_conf = conf_idx;
+ else
+ DRM_DEBUG_KMS("not supported mode\n");
+}
+
+static void hdmi_commit(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_conf_apply(hdata);
+
+ hdata->enabled = true;
+}
+
+static void hdmi_disable(void *ctx)
+{
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (hdata->enabled) {
+ hdmiphy_conf_reset(hdata);
+ hdmi_conf_reset(hdata);
+ }
+}
+
+static struct exynos_hdmi_manager_ops manager_ops = {
+ .mode_set = hdmi_mode_set,
+ .commit = hdmi_commit,
+ .disable = hdmi_disable,
+};
+
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void hdmi_hotplug_func(struct work_struct *work)
+{
+ struct hdmi_context *hdata =
+ container_of(work, struct hdmi_context, hotplug_work);
+ struct exynos_drm_hdmi_context *ctx =
+ (struct exynos_drm_hdmi_context *)hdata->parent_ctx;
+
+ drm_helper_hpd_irq_event(ctx->drm_dev);
+}
+
+static irqreturn_t hdmi_irq_handler(int irq, void *arg)
+{
+ struct exynos_drm_hdmi_context *ctx = arg;
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+ u32 intc_flag;
+
+ intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
+ /* clearing flags for HPD plug/unplug */
+ if (intc_flag & HDMI_INTC_FLAG_HPD_UNPLUG) {
+ DRM_DEBUG_KMS("unplugged, handling:%d\n", hdata->hpd_handle);
+ hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_UNPLUG);
+ }
+ if (intc_flag & HDMI_INTC_FLAG_HPD_PLUG) {
+ DRM_DEBUG_KMS("plugged, handling:%d\n", hdata->hpd_handle);
+ hdmi_reg_writemask(hdata, HDMI_INTC_FLAG, ~0,
+ HDMI_INTC_FLAG_HPD_PLUG);
+ }
+
+ if (ctx->drm_dev && hdata->hpd_handle)
+ queue_work(hdata->wq, &hdata->hotplug_work);
+
+ return IRQ_HANDLED;
+}
+
+static int __devinit hdmi_resources_init(struct hdmi_context *hdata)
+{
+ struct device *dev = hdata->dev;
+ struct hdmi_resources *res = &hdata->res;
+ static char *supply[] = {
+ "hdmi-en",
+ "vdd",
+ "vdd_osc",
+ "vdd_pll",
+ };
+ int i, ret;
+
+ DRM_DEBUG_KMS("HDMI resource init\n");
+
+ memset(res, 0, sizeof *res);
+
+ /* get clocks, power */
+ res->hdmi = clk_get(dev, "hdmi");
+ if (IS_ERR_OR_NULL(res->hdmi)) {
+ DRM_ERROR("failed to get clock 'hdmi'\n");
+ goto fail;
+ }
+ res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(res->sclk_hdmi)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmi'\n");
+ goto fail;
+ }
+ res->sclk_pixel = clk_get(dev, "sclk_pixel");
+ if (IS_ERR_OR_NULL(res->sclk_pixel)) {
+ DRM_ERROR("failed to get clock 'sclk_pixel'\n");
+ goto fail;
+ }
+ res->sclk_hdmiphy = clk_get(dev, "sclk_hdmiphy");
+ if (IS_ERR_OR_NULL(res->sclk_hdmiphy)) {
+ DRM_ERROR("failed to get clock 'sclk_hdmiphy'\n");
+ goto fail;
+ }
+ res->hdmiphy = clk_get(dev, "hdmiphy");
+ if (IS_ERR_OR_NULL(res->hdmiphy)) {
+ DRM_ERROR("failed to get clock 'hdmiphy'\n");
+ goto fail;
+ }
+
+ clk_set_parent(res->sclk_hdmi, res->sclk_pixel);
+
+ res->regul_bulk = kzalloc(ARRAY_SIZE(supply) *
+ sizeof res->regul_bulk[0], GFP_KERNEL);
+ if (!res->regul_bulk) {
+ DRM_ERROR("failed to get memory for regulators\n");
+ goto fail;
+ }
+ for (i = 0; i < ARRAY_SIZE(supply); ++i) {
+ res->regul_bulk[i].supply = supply[i];
+ res->regul_bulk[i].consumer = NULL;
+ }
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(supply), res->regul_bulk);
+ if (ret) {
+ DRM_ERROR("failed to get regulators\n");
+ goto fail;
+ }
+ res->regul_count = ARRAY_SIZE(supply);
+
+ return 0;
+fail:
+ DRM_ERROR("HDMI resource init - failed\n");
+ return -ENODEV;
+}
+
+static int hdmi_resources_cleanup(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ regulator_bulk_free(res->regul_count, res->regul_bulk);
+ /* kfree is NULL-safe */
+ kfree(res->regul_bulk);
+ if (!IS_ERR_OR_NULL(res->hdmiphy))
+ clk_put(res->hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmiphy))
+ clk_put(res->sclk_hdmiphy);
+ if (!IS_ERR_OR_NULL(res->sclk_pixel))
+ clk_put(res->sclk_pixel);
+ if (!IS_ERR_OR_NULL(res->sclk_hdmi))
+ clk_put(res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(res->hdmi))
+ clk_put(res->hdmi);
+ memset(res, 0, sizeof *res);
+
+ return 0;
+}
+
+static void hdmi_resource_poweron(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* turn HDMI power on */
+ regulator_bulk_enable(res->regul_count, res->regul_bulk);
+ /* power-on hdmi physical interface */
+ clk_enable(res->hdmiphy);
+ /* turn clocks on */
+ clk_enable(res->hdmi);
+ clk_enable(res->sclk_hdmi);
+
+ hdmiphy_conf_reset(hdata);
+ hdmi_conf_reset(hdata);
+ hdmi_conf_init(hdata);
+
+}
+
+static void hdmi_resource_poweroff(struct hdmi_context *hdata)
+{
+ struct hdmi_resources *res = &hdata->res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* turn clocks off */
+ clk_disable(res->sclk_hdmi);
+ clk_disable(res->hdmi);
+ /* power-off hdmiphy */
+ clk_disable(res->hdmiphy);
+ /* turn HDMI power off */
+ regulator_bulk_disable(res->regul_count, res->regul_bulk);
+}
+
+static int hdmi_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx);
+
+ return 0;
+}
+
+static int hdmi_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_hdmi_context(dev);
+
+ DRM_DEBUG_KMS("%s\n", __func__);
+
+ hdmi_resource_poweron((struct hdmi_context *)ctx->ctx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops hdmi_pm_ops = {
+ .runtime_suspend = hdmi_runtime_suspend,
+ .runtime_resume = hdmi_runtime_resume,
+};
+
+static struct i2c_client *hdmi_ddc, *hdmi_hdmiphy;
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc)
+{
+ if (ddc)
+ hdmi_ddc = ddc;
+}
+EXPORT_SYMBOL(hdmi_attach_ddc_client);
+
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy)
+{
+ if (hdmiphy)
+ hdmi_hdmiphy = hdmiphy;
+}
+EXPORT_SYMBOL(hdmi_attach_hdmiphy_client);
+
+static int __devinit hdmi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct hdmi_context *hdata;
+ struct exynos_drm_hdmi_pdata *pdata;
+ struct resource *res;
+ int ret;
+
+ DRM_DEBUG_KMS("[%d]\n", __LINE__);
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ DRM_ERROR("no platform data specified\n");
+ return -EINVAL;
+ }
+
+ drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx) {
+ DRM_ERROR("failed to allocate common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ hdata = kzalloc(sizeof(struct hdmi_context), GFP_KERNEL);
+ if (!hdata) {
+ DRM_ERROR("out of memory\n");
+ kfree(drm_hdmi_ctx);
+ return -ENOMEM;
+ }
+
+ drm_hdmi_ctx->ctx = (void *)hdata;
+ hdata->parent_ctx = (void *)drm_hdmi_ctx;
+
+ platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+ hdata->default_win = pdata->default_win;
+ hdata->default_timing = &pdata->timing;
+ hdata->default_bpp = pdata->bpp;
+ hdata->dev = dev;
+
+ ret = hdmi_resources_init(hdata);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_data;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ DRM_ERROR("failed to find registers\n");
+ ret = -ENOENT;
+ goto err_resource;
+ }
+
+ hdata->regs_res = request_mem_region(res->start, resource_size(res),
+ dev_name(dev));
+ if (!hdata->regs_res) {
+ DRM_ERROR("failed to claim register region\n");
+ ret = -ENOENT;
+ goto err_resource;
+ }
+
+ hdata->regs = ioremap(res->start, resource_size(res));
+ if (!hdata->regs) {
+ DRM_ERROR("failed to map registers\n");
+ ret = -ENXIO;
+ goto err_req_region;
+ }
+
+ /* DDC i2c driver */
+ if (i2c_add_driver(&ddc_driver)) {
+ DRM_ERROR("failed to register ddc i2c driver\n");
+ ret = -ENOENT;
+ goto err_iomap;
+ }
+
+ hdata->ddc_port = hdmi_ddc;
+
+ /* hdmiphy i2c driver */
+ if (i2c_add_driver(&hdmiphy_driver)) {
+ DRM_ERROR("failed to register hdmiphy i2c driver\n");
+ ret = -ENOENT;
+ goto err_ddc;
+ }
+
+ hdata->hdmiphy_port = hdmi_hdmiphy;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ DRM_ERROR("get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto err_hdmiphy;
+ }
+
+ /* create workqueue and hotplug work */
+ hdata->wq = alloc_workqueue("exynos-drm-hdmi",
+ WQ_UNBOUND | WQ_NON_REENTRANT, 1);
+ if (hdata->wq == NULL) {
+ DRM_ERROR("Failed to create workqueue.\n");
+ ret = -ENOMEM;
+ goto err_hdmiphy;
+ }
+ INIT_WORK(&hdata->hotplug_work, hdmi_hotplug_func);
+
+ /* register hpd interrupt */
+ ret = request_irq(res->start, hdmi_irq_handler, 0, "drm_hdmi",
+ drm_hdmi_ctx);
+ if (ret) {
+ DRM_ERROR("request interrupt failed.\n");
+ goto err_workqueue;
+ }
+ hdata->irq = res->start;
+
+ /* register specific callbacks to common hdmi. */
+ exynos_drm_display_ops_register(&display_ops);
+ exynos_drm_manager_ops_register(&manager_ops);
+
+ hdmi_resource_poweron(hdata);
+
+ return 0;
+
+err_workqueue:
+ destroy_workqueue(hdata->wq);
+err_hdmiphy:
+ i2c_del_driver(&hdmiphy_driver);
+err_ddc:
+ i2c_del_driver(&ddc_driver);
+err_iomap:
+ iounmap(hdata->regs);
+err_req_region:
+ release_mem_region(hdata->regs_res->start,
+ resource_size(hdata->regs_res));
+err_resource:
+ hdmi_resources_cleanup(hdata);
+err_data:
+ kfree(hdata);
+ kfree(drm_hdmi_ctx);
+ return ret;
+}
+
+static int __devexit hdmi_remove(struct platform_device *pdev)
+{
+ struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
+ struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ hdmi_resource_poweroff(hdata);
+
+ disable_irq(hdata->irq);
+ free_irq(hdata->irq, hdata);
+
+ cancel_work_sync(&hdata->hotplug_work);
+ destroy_workqueue(hdata->wq);
+
+ hdmi_resources_cleanup(hdata);
+
+ iounmap(hdata->regs);
+
+ release_mem_region(hdata->regs_res->start,
+ resource_size(hdata->regs_res));
+
+ /* hdmiphy i2c driver */
+ i2c_del_driver(&hdmiphy_driver);
+ /* DDC i2c driver */
+ i2c_del_driver(&ddc_driver);
+
+ kfree(hdata);
+
+ return 0;
+}
+
+struct platform_driver hdmi_driver = {
+ .probe = hdmi_probe,
+ .remove = __devexit_p(hdmi_remove),
+ .driver = {
+ .name = "exynos4-hdmi",
+ .owner = THIS_MODULE,
+ .pm = &hdmi_pm_ops,
+ },
+};
+EXPORT_SYMBOL(hdmi_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI core Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.h b/drivers/gpu/drm/exynos/exynos_hdmi.h
new file mode 100644
index 0000000..31d6cf8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.h
@@ -0,0 +1,87 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Inki Dae <inki.dae@samsung.com>
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_HDMI_H_
+#define _EXYNOS_HDMI_H_
+
+struct hdmi_conf {
+ int width;
+ int height;
+ int vrefresh;
+ bool interlace;
+ const u8 *hdmiphy_data;
+ const struct hdmi_preset_conf *conf;
+};
+
+struct hdmi_resources {
+ struct clk *hdmi;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_pixel;
+ struct clk *sclk_hdmiphy;
+ struct clk *hdmiphy;
+ struct regulator_bulk_data *regul_bulk;
+ int regul_count;
+};
+
+struct hdmi_context {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct fb_videomode *default_timing;
+ unsigned int default_win;
+ unsigned int default_bpp;
+ bool hpd_handle;
+ bool enabled;
+
+ struct resource *regs_res;
+ /** base address of HDMI registers */
+ void __iomem *regs;
+ /** HDMI hotplug interrupt */
+ unsigned int irq;
+ /** workqueue for delayed work */
+ struct workqueue_struct *wq;
+ /** hotplug handling work */
+ struct work_struct hotplug_work;
+
+ struct i2c_client *ddc_port;
+ struct i2c_client *hdmiphy_port;
+
+ /** current hdmiphy conf index */
+ int cur_conf;
+ /** other resources */
+ struct hdmi_resources res;
+
+ void *parent_ctx;
+};
+
+
+void hdmi_attach_ddc_client(struct i2c_client *ddc);
+void hdmi_attach_hdmiphy_client(struct i2c_client *hdmiphy);
+
+extern struct i2c_driver hdmiphy_driver;
+extern struct i2c_driver ddc_driver;
+
+#endif
diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
new file mode 100644
index 0000000..9fe2995
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_hdmi.h"
+
+
+static int hdmiphy_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ hdmi_attach_hdmiphy_client(client);
+
+ dev_info(&client->adapter->dev, "attached s5p_hdmiphy "
+ "into i2c adapter successfully\n");
+
+ return 0;
+}
+
+static int hdmiphy_remove(struct i2c_client *client)
+{
+ dev_info(&client->adapter->dev, "detached s5p_hdmiphy "
+ "from i2c adapter successfully\n");
+
+ return 0;
+}
+
+static const struct i2c_device_id hdmiphy_id[] = {
+ { "s5p_hdmiphy", 0 },
+ { },
+};
+
+struct i2c_driver hdmiphy_driver = {
+ .driver = {
+ .name = "s5p-hdmiphy",
+ .owner = THIS_MODULE,
+ },
+ .id_table = hdmiphy_id,
+ .probe = hdmiphy_probe,
+ .remove = __devexit_p(hdmiphy_remove),
+ .command = NULL,
+};
+EXPORT_SYMBOL(hdmiphy_driver);
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
new file mode 100644
index 0000000..93846e8
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -0,0 +1,1075 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ * Joonyoung Shim <jy0922.shim@samsung.com>
+ *
+ * Based on drivers/media/video/s5p-tv/mixer_reg.c
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include "drmP.h"
+
+#include "regs-mixer.h"
+#include "regs-vp.h"
+
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_hdmi.h"
+#include "exynos_hdmi.h"
+#include "exynos_mixer.h"
+
+#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
+
+static const u8 filter_y_horiz_tap8[] = {
+ 0, -1, -1, -1, -1, -1, -1, -1,
+ -1, -1, -1, -1, -1, 0, 0, 0,
+ 0, 2, 4, 5, 6, 6, 6, 6,
+ 6, 5, 5, 4, 3, 2, 1, 1,
+ 0, -6, -12, -16, -18, -20, -21, -20,
+ -20, -18, -16, -13, -10, -8, -5, -2,
+ 127, 126, 125, 121, 114, 107, 99, 89,
+ 79, 68, 57, 46, 35, 25, 16, 8,
+};
+
+static const u8 filter_y_vert_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+ 0, 5, 11, 19, 27, 37, 48, 59,
+ 70, 81, 92, 102, 111, 118, 124, 126,
+ 0, 0, -1, -1, -2, -3, -4, -5,
+ -6, -7, -8, -8, -8, -8, -6, -3,
+};
+
+static const u8 filter_cr_horiz_tap4[] = {
+ 0, -3, -6, -8, -8, -8, -8, -7,
+ -6, -5, -4, -3, -2, -1, -1, 0,
+ 127, 126, 124, 118, 111, 102, 92, 81,
+ 70, 59, 48, 37, 27, 19, 11, 5,
+};
+
+static inline u32 vp_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline void vp_reg_writemask(struct mixer_resources *res, u32 reg_id,
+ u32 val, u32 mask)
+{
+ u32 old = vp_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->vp_regs + reg_id);
+}
+
+static inline u32 mixer_reg_read(struct mixer_resources *res, u32 reg_id)
+{
+ return readl(res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_write(struct mixer_resources *res, u32 reg_id,
+ u32 val)
+{
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static inline void mixer_reg_writemask(struct mixer_resources *res,
+ u32 reg_id, u32 val, u32 mask)
+{
+ u32 old = mixer_reg_read(res, reg_id);
+
+ val = (val & mask) | (old & ~mask);
+ writel(val, res->mixer_regs + reg_id);
+}
+
+static void mixer_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(MXR_STATUS);
+ DUMPREG(MXR_CFG);
+ DUMPREG(MXR_INT_EN);
+ DUMPREG(MXR_INT_STATUS);
+
+ DUMPREG(MXR_LAYER_CFG);
+ DUMPREG(MXR_VIDEO_CFG);
+
+ DUMPREG(MXR_GRAPHIC0_CFG);
+ DUMPREG(MXR_GRAPHIC0_BASE);
+ DUMPREG(MXR_GRAPHIC0_SPAN);
+ DUMPREG(MXR_GRAPHIC0_WH);
+ DUMPREG(MXR_GRAPHIC0_SXY);
+ DUMPREG(MXR_GRAPHIC0_DXY);
+
+ DUMPREG(MXR_GRAPHIC1_CFG);
+ DUMPREG(MXR_GRAPHIC1_BASE);
+ DUMPREG(MXR_GRAPHIC1_SPAN);
+ DUMPREG(MXR_GRAPHIC1_WH);
+ DUMPREG(MXR_GRAPHIC1_SXY);
+ DUMPREG(MXR_GRAPHIC1_DXY);
+#undef DUMPREG
+}
+
+static void vp_regs_dump(struct mixer_context *ctx)
+{
+#define DUMPREG(reg_id) \
+do { \
+ DRM_DEBUG_KMS(#reg_id " = %08x\n", \
+ (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \
+} while (0)
+
+ DUMPREG(VP_ENABLE);
+ DUMPREG(VP_SRESET);
+ DUMPREG(VP_SHADOW_UPDATE);
+ DUMPREG(VP_FIELD_ID);
+ DUMPREG(VP_MODE);
+ DUMPREG(VP_IMG_SIZE_Y);
+ DUMPREG(VP_IMG_SIZE_C);
+ DUMPREG(VP_PER_RATE_CTRL);
+ DUMPREG(VP_TOP_Y_PTR);
+ DUMPREG(VP_BOT_Y_PTR);
+ DUMPREG(VP_TOP_C_PTR);
+ DUMPREG(VP_BOT_C_PTR);
+ DUMPREG(VP_ENDIAN_MODE);
+ DUMPREG(VP_SRC_H_POSITION);
+ DUMPREG(VP_SRC_V_POSITION);
+ DUMPREG(VP_SRC_WIDTH);
+ DUMPREG(VP_SRC_HEIGHT);
+ DUMPREG(VP_DST_H_POSITION);
+ DUMPREG(VP_DST_V_POSITION);
+ DUMPREG(VP_DST_WIDTH);
+ DUMPREG(VP_DST_HEIGHT);
+ DUMPREG(VP_H_RATIO);
+ DUMPREG(VP_V_RATIO);
+
+#undef DUMPREG
+}
+
+static inline void vp_filter_set(struct mixer_resources *res,
+ int reg_id, const u8 *data, unsigned int size)
+{
+ /* assure 4-byte align */
+ BUG_ON(size & 3);
+ for (; size; size -= 4, reg_id += 4, data += 4) {
+ u32 val = (data[0] << 24) | (data[1] << 16) |
+ (data[2] << 8) | data[3];
+ vp_reg_write(res, reg_id, val);
+ }
+}
+
+static void vp_default_filter(struct mixer_resources *res)
+{
+ vp_filter_set(res, VP_POLY8_Y0_LL,
+ filter_y_horiz_tap8, sizeof filter_y_horiz_tap8);
+ vp_filter_set(res, VP_POLY4_Y0_LL,
+ filter_y_vert_tap4, sizeof filter_y_vert_tap4);
+ vp_filter_set(res, VP_POLY4_C0_LL,
+ filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4);
+}
+
+static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ /* block update on vsync */
+ mixer_reg_writemask(res, MXR_STATUS, enable ?
+ MXR_STATUS_SYNC_ENABLE : 0, MXR_STATUS_SYNC_ENABLE);
+
+ vp_reg_write(res, VP_SHADOW_UPDATE, enable ?
+ VP_SHADOW_UPDATE_ENABLE : 0);
+}
+
+static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ /* choosing between interlace and progressive mode */
+ val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE :
+ MXR_CFG_SCAN_PROGRASSIVE);
+
+ /* choosing between porper HD and SD mode */
+ if (height == 480)
+ val |= MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD;
+ else if (height == 576)
+ val |= MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD;
+ else if (height == 720)
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+ else if (height == 1080)
+ val |= MXR_CFG_SCAN_HD_1080 | MXR_CFG_SCAN_HD;
+ else
+ val |= MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD;
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_SCAN_MASK);
+}
+
+static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val;
+
+ if (height == 480) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 576) {
+ val = MXR_CFG_RGB601_0_255;
+ } else if (height == 720) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else if (height == 1080) {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ } else {
+ val = MXR_CFG_RGB709_16_235;
+ mixer_reg_write(res, MXR_CM_COEFF_Y,
+ (1 << 30) | (94 << 20) | (314 << 10) |
+ (32 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CB,
+ (972 << 20) | (851 << 10) | (225 << 0));
+ mixer_reg_write(res, MXR_CM_COEFF_CR,
+ (225 << 20) | (820 << 10) | (1004 << 0));
+ }
+
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
+}
+
+static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val = enable ? ~0 : 0;
+
+ switch (win) {
+ case 0:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP0_ENABLE);
+ break;
+ case 1:
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_GRP1_ENABLE);
+ break;
+ case 2:
+ vp_reg_writemask(res, VP_ENABLE, val, VP_ENABLE_ON);
+ mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_VP_ENABLE);
+ break;
+ }
+}
+
+static void mixer_run(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
+
+ mixer_regs_dump(ctx);
+}
+
+static void vp_video_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int full_width, full_height, width, height;
+ unsigned int x_ratio, y_ratio;
+ unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int mode_width, mode_height;
+ unsigned int buf_num;
+ dma_addr_t luma_addr[2], chroma_addr[2];
+ bool tiled_mode = false;
+ bool crcb_mode = false;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ switch (win_data->pixel_format) {
+ case DRM_FORMAT_NV12MT:
+ tiled_mode = true;
+ case DRM_FORMAT_NV12M:
+ crcb_mode = false;
+ buf_num = 2;
+ break;
+ /* TODO: single buffer format NV12, NV21 */
+ default:
+ /* ignore pixel format at disable time */
+ if (!win_data->dma_addr)
+ break;
+
+ DRM_ERROR("pixel format for vp is wrong [%d].\n",
+ win_data->pixel_format);
+ return;
+ }
+
+ full_width = win_data->fb_width;
+ full_height = win_data->fb_height;
+ width = win_data->crtc_width;
+ height = win_data->crtc_height;
+ mode_width = win_data->mode_width;
+ mode_height = win_data->mode_height;
+
+ /* scaling feature: (src << 16) / dst */
+ x_ratio = (width << 16) / width;
+ y_ratio = (height << 16) / height;
+
+ src_x_offset = win_data->fb_x;
+ src_y_offset = win_data->fb_y;
+ dst_x_offset = win_data->crtc_x;
+ dst_y_offset = win_data->crtc_y;
+
+ if (buf_num == 2) {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->chroma_dma_addr;
+ } else {
+ luma_addr[0] = win_data->dma_addr;
+ chroma_addr[0] = win_data->dma_addr
+ + (full_width * full_height);
+ }
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE) {
+ ctx->interlace = true;
+ if (tiled_mode) {
+ luma_addr[1] = luma_addr[0] + 0x40;
+ chroma_addr[1] = chroma_addr[0] + 0x40;
+ } else {
+ luma_addr[1] = luma_addr[0] + full_width;
+ chroma_addr[1] = chroma_addr[0] + full_width;
+ }
+ } else {
+ ctx->interlace = false;
+ luma_addr[1] = 0;
+ chroma_addr[1] = 0;
+ }
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* interlace or progressive scan mode */
+ val = (ctx->interlace ? ~0 : 0);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_LINE_SKIP);
+
+ /* setup format */
+ val = (crcb_mode ? VP_MODE_NV21 : VP_MODE_NV12);
+ val |= (tiled_mode ? VP_MODE_MEM_TILED : VP_MODE_MEM_LINEAR);
+ vp_reg_writemask(res, VP_MODE, val, VP_MODE_FMT_MASK);
+
+ /* setting size of input image */
+ vp_reg_write(res, VP_IMG_SIZE_Y, VP_IMG_HSIZE(full_width) |
+ VP_IMG_VSIZE(full_height));
+ /* chroma height has to reduced by 2 to avoid chroma distorions */
+ vp_reg_write(res, VP_IMG_SIZE_C, VP_IMG_HSIZE(full_width) |
+ VP_IMG_VSIZE(full_height / 2));
+
+ vp_reg_write(res, VP_SRC_WIDTH, width);
+ vp_reg_write(res, VP_SRC_HEIGHT, height);
+ vp_reg_write(res, VP_SRC_H_POSITION,
+ VP_SRC_H_POSITION_VAL(src_x_offset));
+ vp_reg_write(res, VP_SRC_V_POSITION, src_y_offset);
+
+ vp_reg_write(res, VP_DST_WIDTH, width);
+ vp_reg_write(res, VP_DST_H_POSITION, dst_x_offset);
+ if (ctx->interlace) {
+ vp_reg_write(res, VP_DST_HEIGHT, height / 2);
+ vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset / 2);
+ } else {
+ vp_reg_write(res, VP_DST_HEIGHT, height);
+ vp_reg_write(res, VP_DST_V_POSITION, dst_y_offset);
+ }
+
+ vp_reg_write(res, VP_H_RATIO, x_ratio);
+ vp_reg_write(res, VP_V_RATIO, y_ratio);
+
+ vp_reg_write(res, VP_ENDIAN_MODE, VP_ENDIAN_MODE_LITTLE);
+
+ /* set buffer address to vp */
+ vp_reg_write(res, VP_TOP_Y_PTR, luma_addr[0]);
+ vp_reg_write(res, VP_BOT_Y_PTR, luma_addr[1]);
+ vp_reg_write(res, VP_TOP_C_PTR, chroma_addr[0]);
+ vp_reg_write(res, VP_BOT_C_PTR, chroma_addr[1]);
+
+ mixer_cfg_scan(ctx, mode_height);
+ mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_layer(ctx, win, true);
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+
+ vp_regs_dump(ctx);
+}
+
+static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ struct hdmi_win_data *win_data;
+ unsigned int full_width, width, height;
+ unsigned int x_ratio, y_ratio;
+ unsigned int src_x_offset, src_y_offset, dst_x_offset, dst_y_offset;
+ unsigned int mode_width, mode_height;
+ dma_addr_t dma_addr;
+ unsigned int fmt;
+ u32 val;
+
+ win_data = &ctx->win_data[win];
+
+ #define RGB565 4
+ #define ARGB1555 5
+ #define ARGB4444 6
+ #define ARGB8888 7
+
+ switch (win_data->bpp) {
+ case 16:
+ fmt = ARGB4444;
+ break;
+ case 32:
+ fmt = ARGB8888;
+ break;
+ default:
+ fmt = ARGB8888;
+ }
+
+ dma_addr = win_data->dma_addr;
+ full_width = win_data->fb_width;
+ width = win_data->crtc_width;
+ height = win_data->crtc_height;
+ mode_width = win_data->mode_width;
+ mode_height = win_data->mode_height;
+
+ /* 2x scaling feature */
+ x_ratio = 0;
+ y_ratio = 0;
+
+ src_x_offset = win_data->fb_x;
+ src_y_offset = win_data->fb_y;
+ dst_x_offset = win_data->crtc_x;
+ dst_y_offset = win_data->crtc_y;
+
+ /* converting dma address base and source offset */
+ dma_addr = dma_addr
+ + (src_x_offset * win_data->bpp >> 3)
+ + (src_y_offset * full_width * win_data->bpp >> 3);
+ src_x_offset = 0;
+ src_y_offset = 0;
+
+ if (win_data->scan_flags & DRM_MODE_FLAG_INTERLACE)
+ ctx->interlace = true;
+ else
+ ctx->interlace = false;
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ /* setup format */
+ mixer_reg_writemask(res, MXR_GRAPHIC_CFG(win),
+ MXR_GRP_CFG_FORMAT_VAL(fmt), MXR_GRP_CFG_FORMAT_MASK);
+
+ /* setup geometry */
+ mixer_reg_write(res, MXR_GRAPHIC_SPAN(win), full_width);
+
+ val = MXR_GRP_WH_WIDTH(width);
+ val |= MXR_GRP_WH_HEIGHT(height);
+ val |= MXR_GRP_WH_H_SCALE(x_ratio);
+ val |= MXR_GRP_WH_V_SCALE(y_ratio);
+ mixer_reg_write(res, MXR_GRAPHIC_WH(win), val);
+
+ /* setup offsets in source image */
+ val = MXR_GRP_SXY_SX(src_x_offset);
+ val |= MXR_GRP_SXY_SY(src_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_SXY(win), val);
+
+ /* setup offsets in display image */
+ val = MXR_GRP_DXY_DX(dst_x_offset);
+ val |= MXR_GRP_DXY_DY(dst_y_offset);
+ mixer_reg_write(res, MXR_GRAPHIC_DXY(win), val);
+
+ /* set buffer address to mixer */
+ mixer_reg_write(res, MXR_GRAPHIC_BASE(win), dma_addr);
+
+ mixer_cfg_scan(ctx, mode_height);
+ mixer_cfg_rgb_fmt(ctx, mode_height);
+ mixer_cfg_layer(ctx, win, true);
+ mixer_run(ctx);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void vp_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ int tries = 100;
+
+ vp_reg_write(res, VP_SRESET, VP_SRESET_PROCESSING);
+ for (tries = 100; tries; --tries) {
+ /* waiting until VP_SRESET_PROCESSING is 0 */
+ if (~vp_reg_read(res, VP_SRESET) & VP_SRESET_PROCESSING)
+ break;
+ mdelay(10);
+ }
+ WARN(tries == 0, "failed to reset Video Processor\n");
+}
+
+static int mixer_enable_vblank(void *ctx, int pipe)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ mixer_ctx->pipe = pipe;
+
+ /* enable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC,
+ MXR_INT_EN_VSYNC);
+
+ return 0;
+}
+
+static void mixer_disable_vblank(void *ctx)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ /* disable vsync interrupt */
+ mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
+}
+
+static void mixer_win_mode_set(void *ctx,
+ struct exynos_drm_overlay *overlay)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct hdmi_win_data *win_data;
+ int win;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ if (!overlay) {
+ DRM_ERROR("overlay is NULL\n");
+ return;
+ }
+
+ DRM_DEBUG_KMS("set [%d]x[%d] at (%d,%d) to [%d]x[%d] at (%d,%d)\n",
+ overlay->fb_width, overlay->fb_height,
+ overlay->fb_x, overlay->fb_y,
+ overlay->crtc_width, overlay->crtc_height,
+ overlay->crtc_x, overlay->crtc_y);
+
+ win = overlay->zpos;
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ win_data = &mixer_ctx->win_data[win];
+
+ win_data->dma_addr = overlay->dma_addr[0];
+ win_data->vaddr = overlay->vaddr[0];
+ win_data->chroma_dma_addr = overlay->dma_addr[1];
+ win_data->chroma_vaddr = overlay->vaddr[1];
+ win_data->pixel_format = overlay->pixel_format;
+ win_data->bpp = overlay->bpp;
+
+ win_data->crtc_x = overlay->crtc_x;
+ win_data->crtc_y = overlay->crtc_y;
+ win_data->crtc_width = overlay->crtc_width;
+ win_data->crtc_height = overlay->crtc_height;
+
+ win_data->fb_x = overlay->fb_x;
+ win_data->fb_y = overlay->fb_y;
+ win_data->fb_width = overlay->fb_width;
+ win_data->fb_height = overlay->fb_height;
+
+ win_data->mode_width = overlay->mode_width;
+ win_data->mode_height = overlay->mode_height;
+
+ win_data->scan_flags = overlay->scan_flag;
+}
+
+static void mixer_win_commit(void *ctx, int zpos)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ if (win > 1)
+ vp_video_buffer(mixer_ctx, win);
+ else
+ mixer_graph_buffer(mixer_ctx, win);
+}
+
+static void mixer_win_disable(void *ctx, int zpos)
+{
+ struct mixer_context *mixer_ctx = ctx;
+ struct mixer_resources *res = &mixer_ctx->mixer_res;
+ unsigned long flags;
+ int win = zpos;
+
+ DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
+
+ if (win == DEFAULT_ZPOS)
+ win = mixer_ctx->default_win;
+
+ if (win < 0 || win > HDMI_OVERLAY_NUMBER) {
+ DRM_ERROR("overlay plane[%d] is wrong\n", win);
+ return;
+ }
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(mixer_ctx, false);
+
+ mixer_cfg_layer(mixer_ctx, win, false);
+
+ mixer_vsync_set_update(mixer_ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static struct exynos_hdmi_overlay_ops overlay_ops = {
+ .enable_vblank = mixer_enable_vblank,
+ .disable_vblank = mixer_disable_vblank,
+ .win_mode_set = mixer_win_mode_set,
+ .win_commit = mixer_win_commit,
+ .win_disable = mixer_win_disable,
+};
+
+/* for pageflip event */
+static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+ struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+ struct drm_pending_vblank_event *e, *t;
+ struct timeval now;
+ unsigned long flags;
+ bool is_checked = false;
+
+ spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+ list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+ base.link) {
+ /* if event's pipe isn't same as crtc then ignore it. */
+ if (crtc != e->pipe)
+ continue;
+
+ is_checked = true;
+ do_gettimeofday(&now);
+ e->event.sequence = 0;
+ e->event.tv_sec = now.tv_sec;
+ e->event.tv_usec = now.tv_usec;
+
+ list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+ wake_up_interruptible(&e->base.file_priv->event_wait);
+ }
+
+ if (is_checked)
+ /*
+ * call drm_vblank_put only in case that drm_vblank_get was
+ * called.
+ */
+ if (atomic_read(&drm_dev->vblank_refcount[crtc]) > 0)
+ drm_vblank_put(drm_dev, crtc);
+
+ spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t mixer_irq_handler(int irq, void *arg)
+{
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
+ struct mixer_context *ctx =
+ (struct mixer_context *)drm_hdmi_ctx->ctx;
+ struct mixer_resources *res = &ctx->mixer_res;
+ u32 val, val_base;
+
+ spin_lock(&res->reg_slock);
+
+ /* read interrupt status for handling and clearing flags for VSYNC */
+ val = mixer_reg_read(res, MXR_INT_STATUS);
+
+ /* handling VSYNC */
+ if (val & MXR_INT_STATUS_VSYNC) {
+ /* interlace scan need to check shadow register */
+ if (ctx->interlace) {
+ val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(0));
+ if (ctx->win_data[0].dma_addr != val_base)
+ goto out;
+
+ val_base = mixer_reg_read(res, MXR_GRAPHIC_BASE_S(1));
+ if (ctx->win_data[1].dma_addr != val_base)
+ goto out;
+ }
+
+ drm_handle_vblank(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ mixer_finish_pageflip(drm_hdmi_ctx->drm_dev, ctx->pipe);
+ }
+
+out:
+ /* clear interrupts */
+ if (~val & MXR_INT_EN_VSYNC) {
+ /* vsync interrupt use different bit for read and clear */
+ val &= ~MXR_INT_EN_VSYNC;
+ val |= MXR_INT_CLEAR_VSYNC;
+ }
+ mixer_reg_write(res, MXR_INT_STATUS, val);
+
+ spin_unlock(&res->reg_slock);
+
+ return IRQ_HANDLED;
+}
+
+static void mixer_win_reset(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+ unsigned long flags;
+ u32 val; /* value stored to register */
+
+ spin_lock_irqsave(&res->reg_slock, flags);
+ mixer_vsync_set_update(ctx, false);
+
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_DST_HDMI, MXR_CFG_DST_MASK);
+
+ /* set output in RGB888 mode */
+ mixer_reg_writemask(res, MXR_CFG, MXR_CFG_OUT_RGB888, MXR_CFG_OUT_MASK);
+
+ /* 16 beat burst in DMA */
+ mixer_reg_writemask(res, MXR_STATUS, MXR_STATUS_16_BURST,
+ MXR_STATUS_BURST_MASK);
+
+ /* setting default layer priority: layer1 > layer0 > video
+ * because typical usage scenario would be
+ * layer1 - OSD
+ * layer0 - framebuffer
+ * video - video overlay
+ */
+ val = MXR_LAYER_CFG_GRP1_VAL(3);
+ val |= MXR_LAYER_CFG_GRP0_VAL(2);
+ val |= MXR_LAYER_CFG_VP_VAL(1);
+ mixer_reg_write(res, MXR_LAYER_CFG, val);
+
+ /* setting background color */
+ mixer_reg_write(res, MXR_BG_COLOR0, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR1, 0x008080);
+ mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
+
+ /* setting graphical layers */
+
+ val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
+ val |= MXR_GRP_CFG_WIN_BLEND_EN;
+ val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
+
+ /* the same configuration for both layers */
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
+
+ val |= MXR_GRP_CFG_BLEND_PRE_MUL;
+ val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
+ mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
+
+ /* configuration of Video Processor Registers */
+ vp_win_reset(ctx);
+ vp_default_filter(res);
+
+ /* disable all layers */
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP0_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_GRP1_ENABLE);
+ mixer_reg_writemask(res, MXR_CFG, 0, MXR_CFG_VP_ENABLE);
+
+ mixer_vsync_set_update(ctx, true);
+ spin_unlock_irqrestore(&res->reg_slock, flags);
+}
+
+static void mixer_resource_poweron(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ clk_enable(res->mixer);
+ clk_enable(res->vp);
+ clk_enable(res->sclk_mixer);
+
+ mixer_win_reset(ctx);
+}
+
+static void mixer_resource_poweroff(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
+
+ clk_disable(res->mixer);
+ clk_disable(res->vp);
+ clk_disable(res->sclk_mixer);
+}
+
+static int mixer_runtime_resume(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+ DRM_DEBUG_KMS("resume - start\n");
+
+ mixer_resource_poweron((struct mixer_context *)ctx->ctx);
+
+ return 0;
+}
+
+static int mixer_runtime_suspend(struct device *dev)
+{
+ struct exynos_drm_hdmi_context *ctx = get_mixer_context(dev);
+
+ DRM_DEBUG_KMS("suspend - start\n");
+
+ mixer_resource_poweroff((struct mixer_context *)ctx->ctx);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mixer_pm_ops = {
+ .runtime_suspend = mixer_runtime_suspend,
+ .runtime_resume = mixer_runtime_resume,
+};
+
+static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
+ struct platform_device *pdev)
+{
+ struct mixer_context *mixer_ctx =
+ (struct mixer_context *)ctx->ctx;
+ struct device *dev = &pdev->dev;
+ struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
+ struct resource *res;
+ int ret;
+
+ mixer_res->dev = dev;
+ spin_lock_init(&mixer_res->reg_slock);
+
+ mixer_res->mixer = clk_get(dev, "mixer");
+ if (IS_ERR_OR_NULL(mixer_res->mixer)) {
+ dev_err(dev, "failed to get clock 'mixer'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->vp = clk_get(dev, "vp");
+ if (IS_ERR_OR_NULL(mixer_res->vp)) {
+ dev_err(dev, "failed to get clock 'vp'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_mixer = clk_get(dev, "sclk_mixer");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_mixer)) {
+ dev_err(dev, "failed to get clock 'sclk_mixer'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_hdmi = clk_get(dev, "sclk_hdmi");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_hdmi)) {
+ dev_err(dev, "failed to get clock 'sclk_hdmi'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ mixer_res->sclk_dac = clk_get(dev, "sclk_dac");
+ if (IS_ERR_OR_NULL(mixer_res->sclk_dac)) {
+ dev_err(dev, "failed to get clock 'sclk_dac'\n");
+ ret = -ENODEV;
+ goto fail;
+ }
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mxr");
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ clk_set_parent(mixer_res->sclk_mixer, mixer_res->sclk_hdmi);
+
+ mixer_res->mixer_regs = ioremap(res->start, resource_size(res));
+ if (mixer_res->mixer_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vp");
+ if (res == NULL) {
+ dev_err(dev, "get memory resource failed.\n");
+ ret = -ENXIO;
+ goto fail_mixer_regs;
+ }
+
+ mixer_res->vp_regs = ioremap(res->start, resource_size(res));
+ if (mixer_res->vp_regs == NULL) {
+ dev_err(dev, "register mapping failed.\n");
+ ret = -ENXIO;
+ goto fail_mixer_regs;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq");
+ if (res == NULL) {
+ dev_err(dev, "get interrupt resource failed.\n");
+ ret = -ENXIO;
+ goto fail_vp_regs;
+ }
+
+ ret = request_irq(res->start, mixer_irq_handler, 0, "drm_mixer", ctx);
+ if (ret) {
+ dev_err(dev, "request interrupt failed.\n");
+ goto fail_vp_regs;
+ }
+ mixer_res->irq = res->start;
+
+ return 0;
+
+fail_vp_regs:
+ iounmap(mixer_res->vp_regs);
+
+fail_mixer_regs:
+ iounmap(mixer_res->mixer_regs);
+
+fail:
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_dac))
+ clk_put(mixer_res->sclk_dac);
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_hdmi))
+ clk_put(mixer_res->sclk_hdmi);
+ if (!IS_ERR_OR_NULL(mixer_res->sclk_mixer))
+ clk_put(mixer_res->sclk_mixer);
+ if (!IS_ERR_OR_NULL(mixer_res->vp))
+ clk_put(mixer_res->vp);
+ if (!IS_ERR_OR_NULL(mixer_res->mixer))
+ clk_put(mixer_res->mixer);
+ mixer_res->dev = NULL;
+ return ret;
+}
+
+static void mixer_resources_cleanup(struct mixer_context *ctx)
+{
+ struct mixer_resources *res = &ctx->mixer_res;
+
+ disable_irq(res->irq);
+ free_irq(res->irq, ctx);
+
+ iounmap(res->vp_regs);
+ iounmap(res->mixer_regs);
+}
+
+static int __devinit mixer_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx;
+ struct mixer_context *ctx;
+ int ret;
+
+ dev_info(dev, "probe start\n");
+
+ drm_hdmi_ctx = kzalloc(sizeof(*drm_hdmi_ctx), GFP_KERNEL);
+ if (!drm_hdmi_ctx) {
+ DRM_ERROR("failed to allocate common hdmi context.\n");
+ return -ENOMEM;
+ }
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ DRM_ERROR("failed to alloc mixer context.\n");
+ kfree(drm_hdmi_ctx);
+ return -ENOMEM;
+ }
+
+ drm_hdmi_ctx->ctx = (void *)ctx;
+
+ platform_set_drvdata(pdev, drm_hdmi_ctx);
+
+ /* acquire resources: regs, irqs, clocks */
+ ret = mixer_resources_init(drm_hdmi_ctx, pdev);
+ if (ret)
+ goto fail;
+
+ /* register specific callback point to common hdmi. */
+ exynos_drm_overlay_ops_register(&overlay_ops);
+
+ mixer_resource_poweron(ctx);
+
+ return 0;
+
+
+fail:
+ dev_info(dev, "probe failed\n");
+ return ret;
+}
+
+static int mixer_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct exynos_drm_hdmi_context *drm_hdmi_ctx =
+ platform_get_drvdata(pdev);
+ struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx;
+
+ dev_info(dev, "remove successful\n");
+
+ mixer_resource_poweroff(ctx);
+ mixer_resources_cleanup(ctx);
+
+ return 0;
+}
+
+struct platform_driver mixer_driver = {
+ .driver = {
+ .name = "s5p-mixer",
+ .owner = THIS_MODULE,
+ .pm = &mixer_pm_ops,
+ },
+ .probe = mixer_probe,
+ .remove = __devexit_p(mixer_remove),
+};
+EXPORT_SYMBOL(mixer_driver);
+
+MODULE_AUTHOR("Seung-Woo Kim, <sw0312.kim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM HDMI mixer Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.h b/drivers/gpu/drm/exynos/exynos_mixer.h
new file mode 100644
index 0000000..cebacfe
--- /dev/null
+++ b/drivers/gpu/drm/exynos/exynos_mixer.h
@@ -0,0 +1,92 @@
+/*
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ * Seung-Woo Kim <sw0312.kim@samsung.com>
+ * Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_MIXER_H_
+#define _EXYNOS_MIXER_H_
+
+#define HDMI_OVERLAY_NUMBER 3
+
+struct hdmi_win_data {
+ dma_addr_t dma_addr;
+ void __iomem *vaddr;
+ dma_addr_t chroma_dma_addr;
+ void __iomem *chroma_vaddr;
+ uint32_t pixel_format;
+ unsigned int bpp;
+ unsigned int crtc_x;
+ unsigned int crtc_y;
+ unsigned int crtc_width;
+ unsigned int crtc_height;
+ unsigned int fb_x;
+ unsigned int fb_y;
+ unsigned int fb_width;
+ unsigned int fb_height;
+ unsigned int mode_width;
+ unsigned int mode_height;
+ unsigned int scan_flags;
+};
+
+struct mixer_resources {
+ struct device *dev;
+ /** interrupt index */
+ int irq;
+ /** pointer to Mixer registers */
+ void __iomem *mixer_regs;
+ /** pointer to Video Processor registers */
+ void __iomem *vp_regs;
+ /** spinlock for protection of registers */
+ spinlock_t reg_slock;
+ /** other resources */
+ struct clk *mixer;
+ struct clk *vp;
+ struct clk *sclk_mixer;
+ struct clk *sclk_hdmi;
+ struct clk *sclk_dac;
+};
+
+struct mixer_context {
+ unsigned int default_win;
+ struct fb_videomode *default_timing;
+ unsigned int default_bpp;
+
+ /** mixer interrupt */
+ unsigned int irq;
+ /** current crtc pipe for vblank */
+ int pipe;
+ /** interlace scan mode */
+ bool interlace;
+ /** vp enabled status */
+ bool vp_enabled;
+
+ /** mixer and vp resources */
+ struct mixer_resources mixer_res;
+
+ /** overlay window data */
+ struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER];
+};
+
+#endif
diff --git a/drivers/gpu/drm/exynos/regs-hdmi.h b/drivers/gpu/drm/exynos/regs-hdmi.h
new file mode 100644
index 0000000..72e6b52
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-hdmi.h
@@ -0,0 +1,147 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-hdmi.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * HDMI register header file for Samsung TVOUT driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef SAMSUNG_REGS_HDMI_H
+#define SAMSUNG_REGS_HDMI_H
+
+/*
+ * Register part
+*/
+
+#define HDMI_CTRL_BASE(x) ((x) + 0x00000000)
+#define HDMI_CORE_BASE(x) ((x) + 0x00010000)
+#define HDMI_TG_BASE(x) ((x) + 0x00050000)
+
+/* Control registers */
+#define HDMI_INTC_CON HDMI_CTRL_BASE(0x0000)
+#define HDMI_INTC_FLAG HDMI_CTRL_BASE(0x0004)
+#define HDMI_HPD_STATUS HDMI_CTRL_BASE(0x000C)
+#define HDMI_PHY_RSTOUT HDMI_CTRL_BASE(0x0014)
+#define HDMI_PHY_VPLL HDMI_CTRL_BASE(0x0018)
+#define HDMI_PHY_CMU HDMI_CTRL_BASE(0x001C)
+#define HDMI_CORE_RSTOUT HDMI_CTRL_BASE(0x0020)
+
+/* Core registers */
+#define HDMI_CON_0 HDMI_CORE_BASE(0x0000)
+#define HDMI_CON_1 HDMI_CORE_BASE(0x0004)
+#define HDMI_CON_2 HDMI_CORE_BASE(0x0008)
+#define HDMI_SYS_STATUS HDMI_CORE_BASE(0x0010)
+#define HDMI_PHY_STATUS HDMI_CORE_BASE(0x0014)
+#define HDMI_STATUS_EN HDMI_CORE_BASE(0x0020)
+#define HDMI_HPD HDMI_CORE_BASE(0x0030)
+#define HDMI_MODE_SEL HDMI_CORE_BASE(0x0040)
+#define HDMI_BLUE_SCREEN_0 HDMI_CORE_BASE(0x0050)
+#define HDMI_BLUE_SCREEN_1 HDMI_CORE_BASE(0x0054)
+#define HDMI_BLUE_SCREEN_2 HDMI_CORE_BASE(0x0058)
+#define HDMI_H_BLANK_0 HDMI_CORE_BASE(0x00A0)
+#define HDMI_H_BLANK_1 HDMI_CORE_BASE(0x00A4)
+#define HDMI_V_BLANK_0 HDMI_CORE_BASE(0x00B0)
+#define HDMI_V_BLANK_1 HDMI_CORE_BASE(0x00B4)
+#define HDMI_V_BLANK_2 HDMI_CORE_BASE(0x00B8)
+#define HDMI_H_V_LINE_0 HDMI_CORE_BASE(0x00C0)
+#define HDMI_H_V_LINE_1 HDMI_CORE_BASE(0x00C4)
+#define HDMI_H_V_LINE_2 HDMI_CORE_BASE(0x00C8)
+#define HDMI_VSYNC_POL HDMI_CORE_BASE(0x00E4)
+#define HDMI_INT_PRO_MODE HDMI_CORE_BASE(0x00E8)
+#define HDMI_V_BLANK_F_0 HDMI_CORE_BASE(0x0110)
+#define HDMI_V_BLANK_F_1 HDMI_CORE_BASE(0x0114)
+#define HDMI_V_BLANK_F_2 HDMI_CORE_BASE(0x0118)
+#define HDMI_H_SYNC_GEN_0 HDMI_CORE_BASE(0x0120)
+#define HDMI_H_SYNC_GEN_1 HDMI_CORE_BASE(0x0124)
+#define HDMI_H_SYNC_GEN_2 HDMI_CORE_BASE(0x0128)
+#define HDMI_V_SYNC_GEN_1_0 HDMI_CORE_BASE(0x0130)
+#define HDMI_V_SYNC_GEN_1_1 HDMI_CORE_BASE(0x0134)
+#define HDMI_V_SYNC_GEN_1_2 HDMI_CORE_BASE(0x0138)
+#define HDMI_V_SYNC_GEN_2_0 HDMI_CORE_BASE(0x0140)
+#define HDMI_V_SYNC_GEN_2_1 HDMI_CORE_BASE(0x0144)
+#define HDMI_V_SYNC_GEN_2_2 HDMI_CORE_BASE(0x0148)
+#define HDMI_V_SYNC_GEN_3_0 HDMI_CORE_BASE(0x0150)
+#define HDMI_V_SYNC_GEN_3_1 HDMI_CORE_BASE(0x0154)
+#define HDMI_V_SYNC_GEN_3_2 HDMI_CORE_BASE(0x0158)
+#define HDMI_ACR_CON HDMI_CORE_BASE(0x0180)
+#define HDMI_AVI_CON HDMI_CORE_BASE(0x0300)
+#define HDMI_AVI_BYTE(n) HDMI_CORE_BASE(0x0320 + 4 * (n))
+#define HDMI_DC_CONTROL HDMI_CORE_BASE(0x05C0)
+#define HDMI_VIDEO_PATTERN_GEN HDMI_CORE_BASE(0x05C4)
+#define HDMI_HPD_GEN HDMI_CORE_BASE(0x05C8)
+#define HDMI_AUI_CON HDMI_CORE_BASE(0x0360)
+#define HDMI_SPD_CON HDMI_CORE_BASE(0x0400)
+
+/* Timing generator registers */
+#define HDMI_TG_CMD HDMI_TG_BASE(0x0000)
+#define HDMI_TG_H_FSZ_L HDMI_TG_BASE(0x0018)
+#define HDMI_TG_H_FSZ_H HDMI_TG_BASE(0x001C)
+#define HDMI_TG_HACT_ST_L HDMI_TG_BASE(0x0020)
+#define HDMI_TG_HACT_ST_H HDMI_TG_BASE(0x0024)
+#define HDMI_TG_HACT_SZ_L HDMI_TG_BASE(0x0028)
+#define HDMI_TG_HACT_SZ_H HDMI_TG_BASE(0x002C)
+#define HDMI_TG_V_FSZ_L HDMI_TG_BASE(0x0030)
+#define HDMI_TG_V_FSZ_H HDMI_TG_BASE(0x0034)
+#define HDMI_TG_VSYNC_L HDMI_TG_BASE(0x0038)
+#define HDMI_TG_VSYNC_H HDMI_TG_BASE(0x003C)
+#define HDMI_TG_VSYNC2_L HDMI_TG_BASE(0x0040)
+#define HDMI_TG_VSYNC2_H HDMI_TG_BASE(0x0044)
+#define HDMI_TG_VACT_ST_L HDMI_TG_BASE(0x0048)
+#define HDMI_TG_VACT_ST_H HDMI_TG_BASE(0x004C)
+#define HDMI_TG_VACT_SZ_L HDMI_TG_BASE(0x0050)
+#define HDMI_TG_VACT_SZ_H HDMI_TG_BASE(0x0054)
+#define HDMI_TG_FIELD_CHG_L HDMI_TG_BASE(0x0058)
+#define HDMI_TG_FIELD_CHG_H HDMI_TG_BASE(0x005C)
+#define HDMI_TG_VACT_ST2_L HDMI_TG_BASE(0x0060)
+#define HDMI_TG_VACT_ST2_H HDMI_TG_BASE(0x0064)
+#define HDMI_TG_VSYNC_TOP_HDMI_L HDMI_TG_BASE(0x0078)
+#define HDMI_TG_VSYNC_TOP_HDMI_H HDMI_TG_BASE(0x007C)
+#define HDMI_TG_VSYNC_BOT_HDMI_L HDMI_TG_BASE(0x0080)
+#define HDMI_TG_VSYNC_BOT_HDMI_H HDMI_TG_BASE(0x0084)
+#define HDMI_TG_FIELD_TOP_HDMI_L HDMI_TG_BASE(0x0088)
+#define HDMI_TG_FIELD_TOP_HDMI_H HDMI_TG_BASE(0x008C)
+#define HDMI_TG_FIELD_BOT_HDMI_L HDMI_TG_BASE(0x0090)
+#define HDMI_TG_FIELD_BOT_HDMI_H HDMI_TG_BASE(0x0094)
+
+/*
+ * Bit definition part
+ */
+
+/* HDMI_INTC_CON */
+#define HDMI_INTC_EN_GLOBAL (1 << 6)
+#define HDMI_INTC_EN_HPD_PLUG (1 << 3)
+#define HDMI_INTC_EN_HPD_UNPLUG (1 << 2)
+
+/* HDMI_INTC_FLAG */
+#define HDMI_INTC_FLAG_HPD_PLUG (1 << 3)
+#define HDMI_INTC_FLAG_HPD_UNPLUG (1 << 2)
+
+/* HDMI_PHY_RSTOUT */
+#define HDMI_PHY_SW_RSTOUT (1 << 0)
+
+/* HDMI_CORE_RSTOUT */
+#define HDMI_CORE_SW_RSTOUT (1 << 0)
+
+/* HDMI_CON_0 */
+#define HDMI_BLUE_SCR_EN (1 << 5)
+#define HDMI_EN (1 << 0)
+
+/* HDMI_PHY_STATUS */
+#define HDMI_PHY_STATUS_READY (1 << 0)
+
+/* HDMI_MODE_SEL */
+#define HDMI_MODE_HDMI_EN (1 << 1)
+#define HDMI_MODE_DVI_EN (1 << 0)
+#define HDMI_MODE_MASK (3 << 0)
+
+/* HDMI_TG_CMD */
+#define HDMI_TG_EN (1 << 0)
+#define HDMI_FIELD_EN (1 << 1)
+
+#endif /* SAMSUNG_REGS_HDMI_H */
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
new file mode 100644
index 0000000..fd2f4d1
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -0,0 +1,141 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-mixer.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Mixer register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+#ifndef SAMSUNG_REGS_MIXER_H
+#define SAMSUNG_REGS_MIXER_H
+
+/*
+ * Register part
+ */
+#define MXR_STATUS 0x0000
+#define MXR_CFG 0x0004
+#define MXR_INT_EN 0x0008
+#define MXR_INT_STATUS 0x000C
+#define MXR_LAYER_CFG 0x0010
+#define MXR_VIDEO_CFG 0x0014
+#define MXR_GRAPHIC0_CFG 0x0020
+#define MXR_GRAPHIC0_BASE 0x0024
+#define MXR_GRAPHIC0_SPAN 0x0028
+#define MXR_GRAPHIC0_SXY 0x002C
+#define MXR_GRAPHIC0_WH 0x0030
+#define MXR_GRAPHIC0_DXY 0x0034
+#define MXR_GRAPHIC0_BLANK 0x0038
+#define MXR_GRAPHIC1_CFG 0x0040
+#define MXR_GRAPHIC1_BASE 0x0044
+#define MXR_GRAPHIC1_SPAN 0x0048
+#define MXR_GRAPHIC1_SXY 0x004C
+#define MXR_GRAPHIC1_WH 0x0050
+#define MXR_GRAPHIC1_DXY 0x0054
+#define MXR_GRAPHIC1_BLANK 0x0058
+#define MXR_BG_CFG 0x0060
+#define MXR_BG_COLOR0 0x0064
+#define MXR_BG_COLOR1 0x0068
+#define MXR_BG_COLOR2 0x006C
+#define MXR_CM_COEFF_Y 0x0080
+#define MXR_CM_COEFF_CB 0x0084
+#define MXR_CM_COEFF_CR 0x0088
+#define MXR_GRAPHIC0_BASE_S 0x2024
+#define MXR_GRAPHIC1_BASE_S 0x2044
+
+/* for parametrized access to layer registers */
+#define MXR_GRAPHIC_CFG(i) (0x0020 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE(i) (0x0024 + (i) * 0x20)
+#define MXR_GRAPHIC_SPAN(i) (0x0028 + (i) * 0x20)
+#define MXR_GRAPHIC_SXY(i) (0x002C + (i) * 0x20)
+#define MXR_GRAPHIC_WH(i) (0x0030 + (i) * 0x20)
+#define MXR_GRAPHIC_DXY(i) (0x0034 + (i) * 0x20)
+#define MXR_GRAPHIC_BLANK(i) (0x0038 + (i) * 0x20)
+#define MXR_GRAPHIC_BASE_S(i) (0x2024 + (i) * 0x20)
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+#define MXR_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define MXR_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & MXR_MASK(high_bit, low_bit))
+
+/* bits for MXR_STATUS */
+#define MXR_STATUS_16_BURST (1 << 7)
+#define MXR_STATUS_BURST_MASK (1 << 7)
+#define MXR_STATUS_BIG_ENDIAN (1 << 3)
+#define MXR_STATUS_ENDIAN_MASK (1 << 3)
+#define MXR_STATUS_SYNC_ENABLE (1 << 2)
+#define MXR_STATUS_REG_RUN (1 << 0)
+
+/* bits for MXR_CFG */
+#define MXR_CFG_RGB601_0_255 (0 << 9)
+#define MXR_CFG_RGB601_16_235 (1 << 9)
+#define MXR_CFG_RGB709_0_255 (2 << 9)
+#define MXR_CFG_RGB709_16_235 (3 << 9)
+#define MXR_CFG_RGB_FMT_MASK 0x600
+#define MXR_CFG_OUT_YUV444 (0 << 8)
+#define MXR_CFG_OUT_RGB888 (1 << 8)
+#define MXR_CFG_OUT_MASK (1 << 8)
+#define MXR_CFG_DST_SDO (0 << 7)
+#define MXR_CFG_DST_HDMI (1 << 7)
+#define MXR_CFG_DST_MASK (1 << 7)
+#define MXR_CFG_SCAN_HD_720 (0 << 6)
+#define MXR_CFG_SCAN_HD_1080 (1 << 6)
+#define MXR_CFG_GRP1_ENABLE (1 << 5)
+#define MXR_CFG_GRP0_ENABLE (1 << 4)
+#define MXR_CFG_VP_ENABLE (1 << 3)
+#define MXR_CFG_SCAN_INTERLACE (0 << 2)
+#define MXR_CFG_SCAN_PROGRASSIVE (1 << 2)
+#define MXR_CFG_SCAN_NTSC (0 << 1)
+#define MXR_CFG_SCAN_PAL (1 << 1)
+#define MXR_CFG_SCAN_SD (0 << 0)
+#define MXR_CFG_SCAN_HD (1 << 0)
+#define MXR_CFG_SCAN_MASK 0x47
+
+/* bits for MXR_GRAPHICn_CFG */
+#define MXR_GRP_CFG_COLOR_KEY_DISABLE (1 << 21)
+#define MXR_GRP_CFG_BLEND_PRE_MUL (1 << 20)
+#define MXR_GRP_CFG_WIN_BLEND_EN (1 << 17)
+#define MXR_GRP_CFG_PIXEL_BLEND_EN (1 << 16)
+#define MXR_GRP_CFG_FORMAT_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_GRP_CFG_FORMAT_MASK MXR_GRP_CFG_FORMAT_VAL(~0)
+#define MXR_GRP_CFG_ALPHA_VAL(x) MXR_MASK_VAL(x, 7, 0)
+
+/* bits for MXR_GRAPHICn_WH */
+#define MXR_GRP_WH_H_SCALE(x) MXR_MASK_VAL(x, 28, 28)
+#define MXR_GRP_WH_V_SCALE(x) MXR_MASK_VAL(x, 12, 12)
+#define MXR_GRP_WH_WIDTH(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_WH_HEIGHT(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_SXY */
+#define MXR_GRP_SXY_SX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_SXY_SY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_GRAPHICn_DXY */
+#define MXR_GRP_DXY_DX(x) MXR_MASK_VAL(x, 26, 16)
+#define MXR_GRP_DXY_DY(x) MXR_MASK_VAL(x, 10, 0)
+
+/* bits for MXR_INT_EN */
+#define MXR_INT_EN_VSYNC (1 << 11)
+#define MXR_INT_EN_ALL (0x0f << 8)
+
+/* bit for MXR_INT_STATUS */
+#define MXR_INT_CLEAR_VSYNC (1 << 11)
+#define MXR_INT_STATUS_VSYNC (1 << 0)
+
+/* bit for MXR_LAYER_CFG */
+#define MXR_LAYER_CFG_GRP1_VAL(x) MXR_MASK_VAL(x, 11, 8)
+#define MXR_LAYER_CFG_GRP0_VAL(x) MXR_MASK_VAL(x, 7, 4)
+#define MXR_LAYER_CFG_VP_VAL(x) MXR_MASK_VAL(x, 3, 0)
+
+#endif /* SAMSUNG_REGS_MIXER_H */
+
diff --git a/drivers/gpu/drm/exynos/regs-vp.h b/drivers/gpu/drm/exynos/regs-vp.h
new file mode 100644
index 0000000..10b737a
--- /dev/null
+++ b/drivers/gpu/drm/exynos/regs-vp.h
@@ -0,0 +1,91 @@
+/*
+ *
+ * Cloned from drivers/media/video/s5p-tv/regs-vp.h
+ *
+ * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
+ * http://www.samsung.com/
+ *
+ * Video processor register header file for Samsung Mixer driver
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SAMSUNG_REGS_VP_H
+#define SAMSUNG_REGS_VP_H
+
+/*
+ * Register part
+ */
+
+#define VP_ENABLE 0x0000
+#define VP_SRESET 0x0004
+#define VP_SHADOW_UPDATE 0x0008
+#define VP_FIELD_ID 0x000C
+#define VP_MODE 0x0010
+#define VP_IMG_SIZE_Y 0x0014
+#define VP_IMG_SIZE_C 0x0018
+#define VP_PER_RATE_CTRL 0x001C
+#define VP_TOP_Y_PTR 0x0028
+#define VP_BOT_Y_PTR 0x002C
+#define VP_TOP_C_PTR 0x0030
+#define VP_BOT_C_PTR 0x0034
+#define VP_ENDIAN_MODE 0x03CC
+#define VP_SRC_H_POSITION 0x0044
+#define VP_SRC_V_POSITION 0x0048
+#define VP_SRC_WIDTH 0x004C
+#define VP_SRC_HEIGHT 0x0050
+#define VP_DST_H_POSITION 0x0054
+#define VP_DST_V_POSITION 0x0058
+#define VP_DST_WIDTH 0x005C
+#define VP_DST_HEIGHT 0x0060
+#define VP_H_RATIO 0x0064
+#define VP_V_RATIO 0x0068
+#define VP_POLY8_Y0_LL 0x006C
+#define VP_POLY4_Y0_LL 0x00EC
+#define VP_POLY4_C0_LL 0x012C
+
+/*
+ * Bit definition part
+ */
+
+/* generates mask for range of bits */
+
+#define VP_MASK(high_bit, low_bit) \
+ (((2 << ((high_bit) - (low_bit))) - 1) << (low_bit))
+
+#define VP_MASK_VAL(val, high_bit, low_bit) \
+ (((val) << (low_bit)) & VP_MASK(high_bit, low_bit))
+
+ /* VP_ENABLE */
+#define VP_ENABLE_ON (1 << 0)
+
+/* VP_SRESET */
+#define VP_SRESET_PROCESSING (1 << 0)
+
+/* VP_SHADOW_UPDATE */
+#define VP_SHADOW_UPDATE_ENABLE (1 << 0)
+
+/* VP_MODE */
+#define VP_MODE_NV12 (0 << 6)
+#define VP_MODE_NV21 (1 << 6)
+#define VP_MODE_LINE_SKIP (1 << 5)
+#define VP_MODE_MEM_LINEAR (0 << 4)
+#define VP_MODE_MEM_TILED (1 << 4)
+#define VP_MODE_FMT_MASK (5 << 4)
+#define VP_MODE_FIELD_ID_AUTO_TOGGLING (1 << 2)
+#define VP_MODE_2D_IPC (1 << 1)
+
+/* VP_IMG_SIZE_Y */
+/* VP_IMG_SIZE_C */
+#define VP_IMG_HSIZE(x) VP_MASK_VAL(x, 29, 16)
+#define VP_IMG_VSIZE(x) VP_MASK_VAL(x, 13, 0)
+
+/* VP_SRC_H_POSITION */
+#define VP_SRC_H_POSITION_VAL(x) VP_MASK_VAL(x, 14, 4)
+
+/* VP_ENDIAN_MODE */
+#define VP_ENDIAN_MODE_LITTLE (1 << 0)
+
+#endif /* SAMSUNG_REGS_VP_H */
diff --git a/drivers/staging/gma500/Kconfig b/drivers/gpu/drm/gma500/Kconfig
index bfe2166..754e14b 100644
--- a/drivers/staging/gma500/Kconfig
+++ b/drivers/gpu/drm/gma500/Kconfig
@@ -1,6 +1,6 @@
-config DRM_PSB
+config DRM_GMA500
tristate "Intel GMA5/600 KMS Framebuffer"
- depends on DRM && PCI && X86
+ depends on DRM && PCI && X86 && EXPERIMENTAL
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT
@@ -11,23 +11,17 @@ config DRM_PSB
Intel GMA500 ('Poulsbo') and other Intel IMG based graphics
devices.
-config DRM_PSB_MRST
+config DRM_GMA600
bool "Intel GMA600 support (Experimental)"
- depends on DRM_PSB
+ depends on DRM_GMA500
help
Say yes to include support for GMA600 (Intel Moorestown/Oaktrail)
platforms with LVDS ports. HDMI and MIPI are not currently
supported.
-config DRM_PSB_MFLD
- bool "Intel Medfield support (Experimental)"
- depends on DRM_PSB
+config DRM_GMA3600
+ bool "Intel GMA3600/3650 support (Experimental)"
+ depends on DRM_GMA500
help
- Say yes to include support for Intel Medfield platforms with MIPI
- interfaces.
-
-config DRM_PSB_CDV
- bool "Intel Cedarview support (Experimental)"
- depends on DRM_PSB
- help
- Say yes to include support for Intel Cedarview platforms
+ Say yes to include basic support for Intel GMA3600/3650 (Intel
+ Cedar Trail) platforms.
diff --git a/drivers/gpu/drm/gma500/Makefile b/drivers/gpu/drm/gma500/Makefile
new file mode 100644
index 0000000..81c103b
--- /dev/null
+++ b/drivers/gpu/drm/gma500/Makefile
@@ -0,0 +1,40 @@
+#
+# KMS driver for the GMA500
+#
+ccflags-y += -Iinclude/drm
+
+gma500_gfx-y += gem_glue.o \
+ accel_2d.o \
+ backlight.o \
+ framebuffer.o \
+ gem.o \
+ gtt.o \
+ intel_bios.o \
+ intel_i2c.o \
+ intel_gmbus.o \
+ intel_opregion.o \
+ mmu.o \
+ power.o \
+ psb_drv.o \
+ psb_intel_display.o \
+ psb_intel_lvds.o \
+ psb_intel_modes.o \
+ psb_intel_sdvo.o \
+ psb_lid.o \
+ psb_irq.o \
+ psb_device.o \
+ mid_bios.o
+
+gma500_gfx-$(CONFIG_DRM_GMA3600) += cdv_device.o \
+ cdv_intel_crt.o \
+ cdv_intel_display.o \
+ cdv_intel_hdmi.o \
+ cdv_intel_lvds.o
+
+gma500_gfx-$(CONFIG_DRM_GMA600) += oaktrail_device.o \
+ oaktrail_crtc.o \
+ oaktrail_lvds.o \
+ oaktrail_hdmi.o \
+ oaktrail_hdmi_i2c.o
+
+obj-$(CONFIG_DRM_GMA500) += gma500_gfx.o
diff --git a/drivers/staging/gma500/accel_2d.c b/drivers/gpu/drm/gma500/accel_2d.c
index 114b99a..d5ef1a5 100644
--- a/drivers/staging/gma500/accel_2d.c
+++ b/drivers/gpu/drm/gma500/accel_2d.c
@@ -253,7 +253,7 @@ static void psbfb_copyarea_accel(struct fb_info *info,
return;
offset = psbfb->gtt->offset;
- stride = fb->pitch;
+ stride = fb->pitches[0];
switch (fb->depth) {
case 8:
@@ -362,53 +362,3 @@ out:
spin_unlock_irqrestore(&dev_priv->lock_2d, flags);
return (busy) ? -EBUSY : 0;
}
-
-int psb_accel_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_psb_2d_op *op = data;
- u32 *op_ptr = &op->cmd[0];
- int i;
- struct drm_gem_object *obj;
- struct gtt_range *gtt;
- int err = -EINVAL;
-
- if (!dev_priv->ops->accel_2d)
- return -EOPNOTSUPP;
- if (op->size > PSB_2D_OP_BUFLEN)
- return -EINVAL;
-
- /* The GEM object being used. We need to support separate src/dst/etc
- in the end but for now keep them all the same */
- obj = drm_gem_object_lookup(dev, file, op->src);
- if (obj == NULL)
- return -ENOENT;
- gtt = container_of(obj, struct gtt_range, gem);
-
- if (psb_gtt_pin(gtt) < 0)
- goto bad_2;
- for (i = 0; i < op->size; i++, op_ptr++) {
- u32 r = *op_ptr & 0xF0000000;
- /* Fill in the GTT offsets for the command buffer */
- if (r == PSB_2D_SRC_SURF_BH ||
- r == PSB_2D_DST_SURF_BH ||
- r == PSB_2D_MASK_SURF_BH ||
- r == PSB_2D_PAT_SURF_BH) {
- i++;
- op_ptr++;
- if (i == op->size)
- goto bad;
- if (*op_ptr)
- goto bad;
- *op_ptr = gtt->offset;
- continue;
- }
- }
- psbfb_2d_submit(dev_priv, op->cmd, op->size);
- err = 0;
-bad:
- psb_gtt_unpin(gtt);
-bad_2:
- drm_gem_object_unreference(obj);
- return err;
-}
diff --git a/drivers/staging/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 2079395..2079395 100644
--- a/drivers/staging/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
diff --git a/drivers/staging/gma500/cdv_device.c b/drivers/gpu/drm/gma500/cdv_device.c
index 8ec10ca..53404af 100644
--- a/drivers/staging/gma500/cdv_device.c
+++ b/drivers/gpu/drm/gma500/cdv_device.c
@@ -20,7 +20,7 @@
#include <linux/backlight.h>
#include <drm/drmP.h>
#include <drm/drm.h>
-#include "psb_drm.h"
+#include "gma_drm.h"
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
@@ -321,15 +321,18 @@ static int cdv_chip_setup(struct drm_device *dev)
cdv_get_core_freq(dev);
gma_intel_opregion_init(dev);
psb_intel_init_bios(dev);
+ REG_WRITE(PORT_HOTPLUG_EN, 0);
+ REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
return 0;
}
/* CDV is much like Poulsbo but has MID like SGX offsets and PM */
const struct psb_ops cdv_chip_ops = {
- .name = "Cedartrail",
+ .name = "GMA3600/3650",
.accel_2d = 0,
.pipes = 2,
+ .crtcs = 2,
.sgx_offset = MRST_SGX_OFFSET,
.chip_setup = cdv_chip_setup,
diff --git a/drivers/staging/gma500/cdv_device.h b/drivers/gpu/drm/gma500/cdv_device.h
index 2a88b7b..2a88b7b 100644
--- a/drivers/staging/gma500/cdv_device.h
+++ b/drivers/gpu/drm/gma500/cdv_device.h
diff --git a/drivers/staging/gma500/cdv_intel_crt.c b/drivers/gpu/drm/gma500/cdv_intel_crt.c
index efda63b..c100f3e9 100644
--- a/drivers/staging/gma500/cdv_intel_crt.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_crt.c
@@ -66,6 +66,7 @@ static void cdv_intel_crt_dpms(struct drm_encoder *encoder, int mode)
static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
@@ -82,6 +83,11 @@ static int cdv_intel_crt_mode_valid(struct drm_connector *connector,
if (mode->hdisplay > 1680 || mode->vdisplay > 1050)
return MODE_PANEL;
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
return MODE_OK;
}
@@ -204,9 +210,10 @@ static enum drm_connector_status cdv_intel_crt_detect(
static void cdv_intel_crt_destroy(struct drm_connector *connector)
{
- struct psb_intel_output *intel_output = to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
- psb_intel_i2c_destroy(intel_output->ddc_bus);
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -214,9 +221,9 @@ static void cdv_intel_crt_destroy(struct drm_connector *connector)
static int cdv_intel_crt_get_modes(struct drm_connector *connector)
{
- struct psb_intel_output *intel_output =
- to_psb_intel_output(connector);
- return psb_intel_ddc_get_modes(intel_output);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ return psb_intel_ddc_get_modes(connector, &psb_intel_encoder->ddc_bus->adapter);
}
static int cdv_intel_crt_set_property(struct drm_connector *connector,
@@ -266,27 +273,31 @@ void cdv_intel_crt_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_output *psb_intel_output;
+ struct psb_intel_connector *psb_intel_connector;
+ struct psb_intel_encoder *psb_intel_encoder;
struct drm_connector *connector;
struct drm_encoder *encoder;
u32 i2c_reg;
- psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
- if (!psb_intel_output)
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
return;
- psb_intel_output->mode_dev = mode_dev;
- connector = &psb_intel_output->base;
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
drm_connector_init(dev, connector,
&cdv_intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- encoder = &psb_intel_output->enc;
+ encoder = &psb_intel_encoder->base;
drm_encoder_init(dev, encoder,
&cdv_intel_crt_enc_funcs, DRM_MODE_ENCODER_DAC);
- drm_mode_connector_attach_encoder(&psb_intel_output->base,
- &psb_intel_output->enc);
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
/* Set up the DDC bus. */
i2c_reg = GPIOA;
@@ -295,15 +306,15 @@ void cdv_intel_crt_init(struct drm_device *dev,
if (dev_priv->crt_ddc_bus != 0)
i2c_reg = dev_priv->crt_ddc_bus;
}*/
- psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
- i2c_reg, "CRTDDC_A");
- if (!psb_intel_output->ddc_bus) {
+ psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
+ i2c_reg, "CRTDDC_A");
+ if (!psb_intel_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
"failed.\n");
goto failed_ddc;
}
- psb_intel_output->type = INTEL_OUTPUT_ANALOG;
+ psb_intel_encoder->type = INTEL_OUTPUT_ANALOG;
/*
psb_intel_output->clone_mask = (1 << INTEL_ANALOG_CLONE_BIT);
psb_intel_output->crtc_mask = (1 << 0) | (1 << 1);
@@ -319,8 +330,10 @@ void cdv_intel_crt_init(struct drm_device *dev,
return;
failed_ddc:
- drm_encoder_cleanup(&psb_intel_output->enc);
- drm_connector_cleanup(&psb_intel_output->base);
- kfree(psb_intel_output);
+ drm_encoder_cleanup(&psb_intel_encoder->base);
+ drm_connector_cleanup(&psb_intel_connector->base);
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
return;
}
diff --git a/drivers/staging/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 7b97c60..18d1152 100644
--- a/drivers/staging/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -342,7 +342,7 @@ cdv_dpll_set_clock_cdv(struct drm_device *dev, struct drm_crtc *crtc,
}
/*
- * Returns whether any output on the specified pipe is of the specified type
+ * Returns whether any encoder on the specified pipe is of the specified type
*/
bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
{
@@ -352,9 +352,9 @@ bool cdv_intel_pipe_has_type(struct drm_crtc *crtc, int type)
list_for_each_entry(l_entry, &mode_config->connector_list, head) {
if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(l_entry);
- if (psb_intel_output->type == type)
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(l_entry);
+ if (psb_intel_encoder->type == type)
return true;
}
}
@@ -507,9 +507,9 @@ int cdv_intel_pipe_set_base(struct drm_crtc *crtc,
if (ret < 0)
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -752,14 +752,14 @@ static int cdv_intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_connector *connector;
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- switch (psb_intel_output->type) {
+ switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
diff --git a/drivers/staging/gma500/cdv_intel_hdmi.c b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
index cbca2b0..de25560 100644
--- a/drivers/staging/gma500/cdv_intel_hdmi.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_hdmi.c
@@ -63,8 +63,8 @@ static void cdv_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+ struct psb_intel_encoder *psb_intel_encoder = to_psb_intel_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
u32 hdmib;
struct drm_crtc *crtc = encoder->crtc;
struct psb_intel_crtc *intel_crtc = to_psb_intel_crtc(crtc);
@@ -98,8 +98,9 @@ static bool cdv_hdmi_mode_fixup(struct drm_encoder *encoder,
static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
u32 hdmib;
hdmib = REG_READ(hdmi_priv->hdmi_reg);
@@ -114,8 +115,9 @@ static void cdv_hdmi_dpms(struct drm_encoder *encoder, int mode)
static void cdv_hdmi_save(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_output *output = to_psb_intel_output(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
hdmi_priv->save_HDMIB = REG_READ(hdmi_priv->hdmi_reg);
}
@@ -123,8 +125,9 @@ static void cdv_hdmi_save(struct drm_connector *connector)
static void cdv_hdmi_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_output *output = to_psb_intel_output(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = output->dev_priv;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
REG_WRITE(hdmi_priv->hdmi_reg, hdmi_priv->save_HDMIB);
REG_READ(hdmi_priv->hdmi_reg);
@@ -133,14 +136,15 @@ static void cdv_hdmi_restore(struct drm_connector *connector)
static enum drm_connector_status cdv_hdmi_detect(
struct drm_connector *connector, bool force)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_output->dev_priv;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_connector *psb_intel_connector =
+ to_psb_intel_connector(connector);
+ struct mid_intel_hdmi_priv *hdmi_priv = psb_intel_encoder->dev_priv;
struct edid *edid = NULL;
enum drm_connector_status status = connector_status_disconnected;
- edid = drm_get_edid(&psb_intel_output->base,
- psb_intel_output->hdmi_i2c_adapter);
+ edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
hdmi_priv->has_hdmi_sink = false;
hdmi_priv->has_hdmi_audio = false;
@@ -153,7 +157,7 @@ static enum drm_connector_status cdv_hdmi_detect(
drm_detect_monitor_audio(edid);
}
- psb_intel_output->base.display_info.raw_edid = NULL;
+ psb_intel_connector->base.display_info.raw_edid = NULL;
kfree(edid);
}
return status;
@@ -220,17 +224,15 @@ static int cdv_hdmi_set_property(struct drm_connector *connector,
*/
static int cdv_hdmi_get_modes(struct drm_connector *connector)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
struct edid *edid = NULL;
int ret = 0;
- edid = drm_get_edid(&psb_intel_output->base,
- psb_intel_output->hdmi_i2c_adapter);
+ edid = drm_get_edid(connector, &psb_intel_encoder->i2c_bus->adapter);
if (edid) {
- drm_mode_connector_update_edid_property(&psb_intel_output->
- base, edid);
- ret = drm_add_edid_modes(&psb_intel_output->base, edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return ret;
@@ -239,6 +241,7 @@ static int cdv_hdmi_get_modes(struct drm_connector *connector)
static int cdv_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
@@ -253,24 +256,21 @@ static int cdv_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
return MODE_NO_INTERLACE;
- /*
- * FIXME: for now we limit the size to 1680x1050 on CDV, otherwise it
- * will go beyond the stolen memory size allocated to the framebuffer
- */
- if (mode->hdisplay > 1680)
- return MODE_PANEL;
- if (mode->vdisplay > 1050)
- return MODE_PANEL;
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
return MODE_OK;
}
static void cdv_hdmi_destroy(struct drm_connector *connector)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
- if (psb_intel_output->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -304,34 +304,45 @@ static const struct drm_connector_funcs cdv_hdmi_connector_funcs = {
void cdv_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int reg)
{
- struct psb_intel_output *psb_intel_output;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
struct mid_intel_hdmi_priv *hdmi_priv;
int ddc_bus;
- psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
- sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
- if (!psb_intel_output)
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ GFP_KERNEL);
+
+ if (!psb_intel_encoder)
return;
- hdmi_priv = (struct mid_intel_hdmi_priv *)(psb_intel_output + 1);
- psb_intel_output->mode_dev = mode_dev;
- connector = &psb_intel_output->base;
- encoder = &psb_intel_output->enc;
- drm_connector_init(dev, &psb_intel_output->base,
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ GFP_KERNEL);
+
+ if (!psb_intel_connector)
+ goto err_connector;
+
+ hdmi_priv = kzalloc(sizeof(struct mid_intel_hdmi_priv), GFP_KERNEL);
+
+ if (!hdmi_priv)
+ goto err_priv;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
&cdv_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_mode_connector_attach_encoder(&psb_intel_output->base,
- &psb_intel_output->enc);
- psb_intel_output->type = INTEL_OUTPUT_HDMI;
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
hdmi_priv->hdmi_reg = reg;
hdmi_priv->has_hdmi_sink = false;
- psb_intel_output->dev_priv = hdmi_priv;
+ psb_intel_encoder->dev_priv = hdmi_priv;
drm_encoder_helper_add(encoder, &cdv_hdmi_helper_funcs);
drm_connector_helper_add(connector,
@@ -341,7 +352,8 @@ void cdv_hdmi_init(struct drm_device *dev,
connector->doublescan_allowed = false;
drm_connector_attach_property(connector,
- dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
+ dev->mode_config.scaling_mode_property,
+ DRM_MODE_SCALE_FULLSCREEN);
switch (reg) {
case SDVOB:
@@ -356,21 +368,25 @@ void cdv_hdmi_init(struct drm_device *dev,
break;
}
- psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+ psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
ddc_bus, (reg == SDVOB) ? "HDMIB" : "HDMIC");
- if (!psb_intel_output->ddc_bus) {
+ if (!psb_intel_encoder->i2c_bus) {
dev_err(dev->dev, "No ddc adapter available!\n");
goto failed_ddc;
}
- psb_intel_output->hdmi_i2c_adapter =
- &(psb_intel_output->ddc_bus->adapter);
+
+ hdmi_priv->hdmi_i2c_adapter =
+ &(psb_intel_encoder->i2c_bus->adapter);
hdmi_priv->dev = dev;
drm_sysfs_connector_add(connector);
return;
failed_ddc:
- drm_encoder_cleanup(&psb_intel_output->enc);
- drm_connector_cleanup(&psb_intel_output->base);
- kfree(psb_intel_output);
+ drm_encoder_cleanup(encoder);
+ drm_connector_cleanup(connector);
+err_priv:
+ kfree(psb_intel_connector);
+err_connector:
+ kfree(psb_intel_encoder);
}
diff --git a/drivers/staging/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 988b2d0..50e744b 100644
--- a/drivers/staging/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -195,8 +195,9 @@ static void cdv_intel_lvds_set_backlight(struct drm_device *dev, int level)
* Sets the power state for the panel.
*/
static void cdv_intel_lvds_set_power(struct drm_device *dev,
- struct psb_intel_output *output, bool on)
+ struct drm_encoder *encoder, bool on)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
u32 pp_status;
if (!gma_power_begin(dev, true))
@@ -210,8 +211,7 @@ static void cdv_intel_lvds_set_power(struct drm_device *dev,
} while ((pp_status & PP_ON) == 0);
cdv_intel_lvds_set_backlight(dev,
- output->
- mode_dev->backlight_duty_cycle);
+ dev_priv->mode_dev.backlight_duty_cycle);
} else {
cdv_intel_lvds_set_backlight(dev, 0);
@@ -227,11 +227,10 @@ static void cdv_intel_lvds_set_power(struct drm_device *dev,
static void cdv_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
if (mode == DRM_MODE_DPMS_ON)
- cdv_intel_lvds_set_power(dev, output, true);
+ cdv_intel_lvds_set_power(dev, encoder, true);
else
- cdv_intel_lvds_set_power(dev, output, false);
+ cdv_intel_lvds_set_power(dev, encoder, false);
/* XXX: We never power down the LVDS pairs. */
}
@@ -244,12 +243,12 @@ static void cdv_intel_lvds_restore(struct drm_connector *connector)
}
int cdv_intel_lvds_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
+ struct drm_display_mode *mode)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct drm_device *dev = connector->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode =
- psb_intel_output->mode_dev->panel_fixed_mode;
+ dev_priv->mode_dev.panel_fixed_mode;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -272,9 +271,9 @@ bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct psb_intel_mode_device *mode_dev =
- enc_to_psb_intel_output(encoder)->mode_dev;
struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
@@ -321,8 +320,8 @@ bool cdv_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct psb_intel_mode_device *mode_dev = output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
return;
@@ -331,7 +330,7 @@ static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
- cdv_intel_lvds_set_power(dev, output, false);
+ cdv_intel_lvds_set_power(dev, encoder, false);
gma_power_end(dev);
}
@@ -339,14 +338,14 @@ static void cdv_intel_lvds_prepare(struct drm_encoder *encoder)
static void cdv_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct psb_intel_mode_device *mode_dev = output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
cdv_intel_lvds_get_max_backlight(dev);
- cdv_intel_lvds_set_power(dev, output, true);
+ cdv_intel_lvds_set_power(dev, encoder, true);
}
static void cdv_intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -401,13 +400,13 @@ static enum drm_connector_status cdv_intel_lvds_detect(
static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- struct psb_intel_mode_device *mode_dev =
- psb_intel_output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int ret;
- ret = psb_intel_ddc_get_modes(psb_intel_output);
+ ret = psb_intel_ddc_get_modes(connector, &psb_intel_encoder->i2c_bus->adapter);
if (ret)
return ret;
@@ -439,11 +438,11 @@ static int cdv_intel_lvds_get_modes(struct drm_connector *connector)
*/
void cdv_intel_lvds_destroy(struct drm_connector *connector)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
- if (psb_intel_output->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -565,7 +564,8 @@ const struct drm_encoder_funcs cdv_intel_lvds_enc_funcs = {
void cdv_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_output *psb_intel_output;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
struct cdv_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -575,32 +575,38 @@ void cdv_intel_lvds_init(struct drm_device *dev,
u32 lvds;
int pipe;
- psb_intel_output = kzalloc(sizeof(struct psb_intel_output) +
- sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
- if (!psb_intel_output)
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder),
+ GFP_KERNEL);
+ if (!psb_intel_encoder)
return;
- lvds_priv = (struct cdv_intel_lvds_priv *)(psb_intel_output + 1);
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector),
+ GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
- psb_intel_output->dev_priv = lvds_priv;
+ lvds_priv = kzalloc(sizeof(struct cdv_intel_lvds_priv), GFP_KERNEL);
+ if (!lvds_priv)
+ goto failed_lvds_priv;
- psb_intel_output->mode_dev = mode_dev;
- connector = &psb_intel_output->base;
- encoder = &psb_intel_output->enc;
+ psb_intel_encoder->dev_priv = lvds_priv;
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
- drm_connector_init(dev, &psb_intel_output->base,
+
+ drm_connector_init(dev, connector,
&cdv_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &psb_intel_output->enc,
+ drm_encoder_init(dev, encoder,
&cdv_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&psb_intel_output->base,
- &psb_intel_output->enc);
- psb_intel_output->type = INTEL_OUTPUT_LVDS;
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &cdv_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -621,16 +627,16 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
- psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
+ psb_intel_encoder->i2c_bus = psb_intel_i2c_create(dev,
GPIOB,
"LVDSBLC_B");
- if (!psb_intel_output->i2c_bus) {
+ if (!psb_intel_encoder->i2c_bus) {
dev_printk(KERN_ERR,
&dev->pdev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
- psb_intel_output->i2c_bus->slave_addr = 0x2C;
- dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
+ psb_intel_encoder->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = psb_intel_encoder->i2c_bus;
/*
* LVDS discovery:
@@ -643,10 +649,10 @@ void cdv_intel_lvds_init(struct drm_device *dev,
*/
/* Set up the DDC bus. */
- psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
+ psb_intel_encoder->ddc_bus = psb_intel_i2c_create(dev,
GPIOC,
"LVDSDDC_C");
- if (!psb_intel_output->ddc_bus) {
+ if (!psb_intel_encoder->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
@@ -656,7 +662,8 @@ void cdv_intel_lvds_init(struct drm_device *dev,
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- psb_intel_ddc_get_modes(psb_intel_output);
+ psb_intel_ddc_get_modes(connector,
+ &psb_intel_encoder->ddc_bus->adapter);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
@@ -707,15 +714,19 @@ out:
failed_find:
printk(KERN_ERR "Failed find\n");
- if (psb_intel_output->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ if (psb_intel_encoder->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
failed_ddc:
printk(KERN_ERR "Failed DDC\n");
- if (psb_intel_output->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
+ if (psb_intel_encoder->i2c_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->i2c_bus);
failed_blc_i2c:
printk(KERN_ERR "Failed BLC\n");
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(lvds_priv);
+failed_lvds_priv:
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
}
diff --git a/drivers/staging/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 3f39a37..be61673 100644
--- a/drivers/staging/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -32,6 +32,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
#include <drm/drm_crtc.h>
+#include <drm/drm_fb_helper.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
@@ -39,8 +40,6 @@
#include "framebuffer.h"
#include "gtt.h"
-#include "mdfld_output.h"
-
static void psb_user_framebuffer_destroy(struct drm_framebuffer *fb);
static int psb_user_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
@@ -103,19 +102,23 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
* panning is part of the hardware that can be invoked before
* the actual fb is mapped. In our case that isn't quite true.
*/
- if (psbfb->gtt->npage)
- psb_gtt_roll(dev, psbfb->gtt, var->yoffset);
- return 0;
+ if (psbfb->gtt->npage) {
+ /* GTT roll shifts in 4K pages, we need to shift the right
+ number of pages */
+ int pages = info->fix.line_length >> 12;
+ psb_gtt_roll(dev, psbfb->gtt, var->yoffset * pages);
+ }
+ return 0;
}
void psbfb_suspend(struct drm_device *dev)
{
- struct drm_framebuffer *fb = 0;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct drm_framebuffer *fb;
console_lock();
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct fb_info *info = psbfb->fbdev;
fb_set_suspend(info, 1);
drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
@@ -126,12 +129,12 @@ void psbfb_suspend(struct drm_device *dev)
void psbfb_resume(struct drm_device *dev)
{
- struct drm_framebuffer *fb = 0;
- struct psb_framebuffer *psbfb = to_psb_fb(fb);
+ struct drm_framebuffer *fb;
console_lock();
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+ struct psb_framebuffer *psbfb = to_psb_fb(fb);
struct fb_info *info = psbfb->fbdev;
fb_set_suspend(info, 0);
drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
@@ -244,7 +247,6 @@ static struct fb_ops psbfb_roll_ops = {
.fb_imageblit = cfb_imageblit,
.fb_pan_display = psbfb_pan,
.fb_mmap = psbfb_mmap,
- .fb_sync = psbfb_sync,
.fb_ioctl = psbfb_ioctl,
};
@@ -273,14 +275,17 @@ static struct fb_ops psbfb_unaccel_ops = {
*/
static int psb_framebuffer_init(struct drm_device *dev,
struct psb_framebuffer *fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
+ u32 bpp, depth;
int ret;
- if (mode_cmd->pitch & 63)
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
+
+ if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (mode_cmd->bpp) {
+ switch (bpp) {
case 8:
case 16:
case 24:
@@ -313,7 +318,7 @@ static int psb_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *psb_framebuffer_create
(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct gtt_range *gt)
{
struct psb_framebuffer *fb;
@@ -339,13 +344,12 @@ static struct drm_framebuffer *psb_framebuffer_create
*
* Allocate the frame buffer. In the usual case we get a GTT range that
* is stolen memory backed and life is simple. If there isn't sufficient
- * stolen memory or the system has no stolen memory we allocate a range
- * and back it with a GEM object.
+ * we fail as we don't have the virtual mapping space to really vmap it
+ * and the kernel console code can't handle non linear framebuffers.
*
- * In this case the GEM object has no handle.
+ * Re-address this as and if the framebuffer layer grows this ability.
*/
-static struct gtt_range *psbfb_alloc(struct drm_device *dev,
- int aligned_size, int force)
+static struct gtt_range *psbfb_alloc(struct drm_device *dev, int aligned_size)
{
struct gtt_range *backing;
/* Begin by trying to use stolen memory backing */
@@ -356,20 +360,7 @@ static struct gtt_range *psbfb_alloc(struct drm_device *dev,
return backing;
psb_gtt_free_range(dev, backing);
}
- if (!force)
- return NULL;
-
- /* Next try using GEM host memory */
- backing = psb_gtt_alloc_range(dev, aligned_size, "fb(gem)", 0);
- if (backing == NULL)
- return NULL;
-
- /* Now back it with an object */
- if (drm_gem_object_init(dev, &backing->gem, aligned_size) != 0) {
- psb_gtt_free_range(dev, backing);
- return NULL;
- }
- return backing;
+ return NULL;
}
/**
@@ -387,31 +378,48 @@ static int psbfb_create(struct psb_fbdev *fbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct psb_framebuffer *psbfb = &fbdev->pfb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct device *device = &dev->pdev->dev;
int size;
int ret;
struct gtt_range *backing;
- int gtt_roll = 1;
+ u32 bpp, depth;
+ int gtt_roll = 0;
+ int pitch_lines = 0;
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
+ bpp = sizes->surface_bpp;
/* No 24bit packed */
- if (mode_cmd.bpp == 24)
- mode_cmd.bpp = 32;
+ if (bpp == 24)
+ bpp = 32;
+
+ do {
+ /*
+ * Acceleration via the GTT requires pitch to be
+ * power of two aligned. Preferably page but less
+ * is ok with some fonts
+ */
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 4096 >> pitch_lines);
+ depth = sizes->surface_depth;
- /* Acceleration via the GTT requires pitch to be 4096 byte aligned
- (ie 1024 or 2048 pixels in normal use) */
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 4096);
- mode_cmd.depth = sizes->surface_depth;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
+ size = ALIGN(size, PAGE_SIZE);
- size = mode_cmd.pitch * mode_cmd.height;
- size = ALIGN(size, PAGE_SIZE);
+ /* Allocate the fb in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
+
+ if (pitch_lines)
+ pitch_lines *= 2;
+ else
+ pitch_lines = 1;
+ gtt_roll++;
+ } while (backing == NULL && pitch_lines <= 16);
+
+ /* The final pitch we accepted if we succeeded */
+ pitch_lines /= 2;
- /* Allocate the framebuffer in the GTT with stolen page backing */
- backing = psbfb_alloc(dev, size, 0);
if (backing == NULL) {
/*
* We couldn't get the space we wanted, fall back to the
@@ -420,16 +428,15 @@ static int psbfb_create(struct psb_fbdev *fbdev,
*/
gtt_roll = 0; /* Don't use GTT accelerated scrolling */
+ pitch_lines = 64;
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 7) / 8), 64);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- /* Allocate the framebuffer in the GTT with stolen page
- backing when there is room */
- backing = psbfb_alloc(dev, size, 1);
+ /* Allocate the framebuffer in the GTT with stolen page backing */
+ backing = psbfb_alloc(dev, size);
if (backing == NULL)
return -ENOMEM;
}
@@ -443,6 +450,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
}
info->par = fbdev;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
+
ret = psb_framebuffer_init(dev, psbfb, &mode_cmd, backing);
if (ret)
goto out_unref;
@@ -456,13 +465,12 @@ static int psbfb_create(struct psb_fbdev *fbdev,
strcpy(info->fix.id, "psbfb");
info->flags = FBINFO_DEFAULT;
- if (gtt_roll) { /* GTT rolling seems best */
+ if (dev_priv->ops->accel_2d && pitch_lines > 8) /* 2D engine */
+ info->fbops = &psbfb_ops;
+ else if (gtt_roll) { /* GTT rolling seems best */
info->fbops = &psbfb_roll_ops;
info->flags |= FBINFO_HWACCEL_YPAN;
- }
- else if (dev_priv->ops->accel_2d) /* 2D engine */
- info->fbops = &psbfb_ops;
- else /* Software */
+ } else /* Software */
info->fbops = &psbfb_unaccel_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
@@ -474,24 +482,11 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->fix.smem_start = dev->mode_config.fb_base;
info->fix.smem_len = size;
info->fix.ywrapstep = gtt_roll;
- info->fix.ypanstep = gtt_roll;
+ info->fix.ypanstep = 0;
- if (backing->stolen) {
- /* Accessed stolen memory directly */
- info->screen_base = (char *)dev_priv->vram_addr +
+ /* Accessed stolen memory directly */
+ info->screen_base = (char *)dev_priv->vram_addr +
backing->offset;
- } else {
- /* Pin the pages into the GTT and create a mapping to them */
- psb_gtt_pin(backing);
- info->screen_base = vm_map_ram(backing->pages, backing->npage,
- -1, PAGE_KERNEL);
- if (info->screen_base == NULL) {
- psb_gtt_unpin(backing);
- ret = -ENOMEM;
- goto out_unref;
- }
- psbfb->vm_map = 1;
- }
info->screen_size = size;
if (dev_priv->gtt.stolen_size) {
@@ -504,7 +499,7 @@ static int psbfb_create(struct psb_fbdev *fbdev,
info->apertures->ranges[0].size = dev_priv->gtt.stolen_size;
}
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
sizes->fb_width, sizes->fb_height);
@@ -525,11 +520,8 @@ static int psbfb_create(struct psb_fbdev *fbdev,
out_unref:
if (backing->stolen)
psb_gtt_free_range(dev, backing);
- else {
- if (psbfb->vm_map)
- vm_unmap_ram(info->screen_base, backing->npage);
+ else
drm_gem_object_unreference(&backing->gem);
- }
out_err1:
mutex_unlock(&dev->struct_mutex);
psb_gtt_free_range(dev, backing);
@@ -546,7 +538,7 @@ out_err1:
*/
static struct drm_framebuffer *psb_user_framebuffer_create
(struct drm_device *dev, struct drm_file *filp,
- struct drm_mode_fb_cmd *cmd)
+ struct drm_mode_fb_cmd2 *cmd)
{
struct gtt_range *r;
struct drm_gem_object *obj;
@@ -555,7 +547,7 @@ static struct drm_framebuffer *psb_user_framebuffer_create
* Find the GEM object and thus the gtt range object that is
* to back this space
*/
- obj = drm_gem_object_lookup(dev, filp, cmd->handle);
+ obj = drm_gem_object_lookup(dev, filp, cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
@@ -603,13 +595,6 @@ int psb_fbdev_destroy(struct drm_device *dev, struct psb_fbdev *fbdev)
if (fbdev->psb_fb_helper.fbdev) {
info = fbdev->psb_fb_helper.fbdev;
-
- /* If this is our base framebuffer then kill any virtual map
- for the framebuffer layer and unpin it */
- if (psbfb->vm_map) {
- vm_unmap_ram(info->screen_base, psbfb->gtt->npage);
- psb_gtt_unpin(psbfb->gtt);
- }
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
@@ -761,13 +746,13 @@ static void psb_setup_outputs(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- struct drm_encoder *encoder = &psb_intel_output->enc;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct drm_encoder *encoder = &psb_intel_encoder->base;
int crtc_mask = 0, clone_mask = 0;
/* valid crtcs */
- switch (psb_intel_output->type) {
+ switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_ANALOG:
crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_ANALOG);
@@ -792,13 +777,10 @@ static void psb_setup_outputs(struct drm_device *dev)
clone_mask = (1 << INTEL_OUTPUT_MIPI2);
break;
case INTEL_OUTPUT_HDMI:
- /* HDMI on crtc 1 for SoC devices and crtc 0 for
- Cedarview. HDMI on Poulsbo is only via external
- logic */
- if (IS_MFLD(dev) || IS_MRST(dev))
+ if (IS_MFLD(dev))
crtc_mask = (1 << 1);
- else
- crtc_mask = (1 << 0); /* Cedarview */
+ else
+ crtc_mask = (1 << 0);
clone_mask = (1 << INTEL_OUTPUT_HDMI);
break;
}
@@ -810,8 +792,7 @@ static void psb_setup_outputs(struct drm_device *dev)
void psb_modeset_init(struct drm_device *dev)
{
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
int i;
@@ -823,7 +804,7 @@ void psb_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = (void *) &psb_mode_funcs;
/* set memory base */
- /* MRST and PSB should use BAR 2*/
+ /* Oaktrail and Poulsbo should use BAR 2*/
pci_read_config_dword(dev->pdev, PSB_BSM, (u32 *)
&(dev->mode_config.fb_base));
diff --git a/drivers/staging/gma500/framebuffer.h b/drivers/gpu/drm/gma500/framebuffer.h
index d1b2289..989558a 100644
--- a/drivers/staging/gma500/framebuffer.h
+++ b/drivers/gpu/drm/gma500/framebuffer.h
@@ -32,7 +32,6 @@ struct psb_framebuffer {
struct address_space *addr_space;
struct fb_info *fbdev;
struct gtt_range *gtt;
- bool vm_map; /* True if we must undo a vm_map_ram */
};
struct psb_fbdev {
diff --git a/drivers/staging/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index f6433c0..9fbb868 100644
--- a/drivers/staging/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -25,7 +25,7 @@
#include <drm/drmP.h>
#include <drm/drm.h>
-#include "psb_drm.h"
+#include "gma_drm.h"
#include "psb_drv.h"
int psb_gem_init_object(struct drm_gem_object *obj)
@@ -271,13 +271,13 @@ int psb_gem_create_ioctl(struct drm_device *dev, void *data,
{
struct drm_psb_gem_create *args = data;
int ret;
- if (args->flags & PSB_GEM_CREATE_STOLEN) {
+ if (args->flags & GMA_GEM_CREATE_STOLEN) {
ret = psb_gem_create_stolen(file, dev, args->size,
&args->handle);
if (ret == 0)
return 0;
/* Fall throguh */
- args->flags &= ~PSB_GEM_CREATE_STOLEN;
+ args->flags &= ~GMA_GEM_CREATE_STOLEN;
}
return psb_gem_create(file, dev, args->size, &args->handle);
}
diff --git a/drivers/staging/gma500/gem_glue.c b/drivers/gpu/drm/gma500/gem_glue.c
index daac121..daac121 100644
--- a/drivers/staging/gma500/gem_glue.c
+++ b/drivers/gpu/drm/gma500/gem_glue.c
diff --git a/drivers/staging/gma500/gem_glue.h b/drivers/gpu/drm/gma500/gem_glue.h
index ce5ce30..ce5ce30 100644
--- a/drivers/staging/gma500/gem_glue.h
+++ b/drivers/gpu/drm/gma500/gem_glue.h
diff --git a/drivers/staging/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c
index e770bd1..aff194f 100644
--- a/drivers/staging/gma500/gtt.c
+++ b/drivers/gpu/drm/gma500/gtt.c
@@ -20,6 +20,7 @@
*/
#include <drm/drmP.h>
+#include <linux/shmem_fs.h>
#include "psb_drv.h"
@@ -203,9 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
gt->npage = pages;
for (i = 0; i < pages; i++) {
- /* FIXME: needs updating as per mail from Hugh Dickins */
- p = read_cache_page_gfp(mapping, i,
- __GFP_COLD | GFP_KERNEL);
+ p = shmem_read_mapping_page(mapping, i);
if (IS_ERR(p))
goto err;
gt->pages[i] = p;
@@ -447,10 +446,9 @@ int psb_gtt_init(struct drm_device *dev, int resume)
pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
>> PAGE_SHIFT;
- /* Some CDV firmware doesn't report this currently. In which case the
- system has 64 gtt pages */
+ /* CDV doesn't report this. In which case the system has 64 gtt pages */
if (pg->gtt_start == 0 || gtt_pages == 0) {
- dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+ dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
gtt_pages = 64;
pg->gtt_start = dev_priv->pge_ctl;
}
@@ -462,10 +460,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
static struct resource fudge; /* Preferably peppermint */
- /* This can occur on CDV SDV systems. Fudge it in this case.
+ /* This can occur on CDV systems. Fudge it in this case.
We really don't care what imaginary space is being allocated
at this point */
- dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+ dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
pg->gatt_start = 0x40000000;
pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
/* This is a little confusing but in fact the GTT is providing
diff --git a/drivers/staging/gma500/gtt.h b/drivers/gpu/drm/gma500/gtt.h
index aa17423..aa17423 100644
--- a/drivers/staging/gma500/gtt.h
+++ b/drivers/gpu/drm/gma500/gtt.h
diff --git a/drivers/staging/gma500/intel_bios.c b/drivers/gpu/drm/gma500/intel_bios.c
index 096757f..d4d0c5b 100644
--- a/drivers/staging/gma500/intel_bios.c
+++ b/drivers/gpu/drm/gma500/intel_bios.c
@@ -20,7 +20,7 @@
*/
#include <drm/drmP.h>
#include <drm/drm.h>
-#include "psb_drm.h"
+#include "gma_drm.h"
#include "psb_drv.h"
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/staging/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index 70f1bf0..70f1bf0 100644
--- a/drivers/staging/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
new file mode 100644
index 0000000..147584a
--- /dev/null
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -0,0 +1,493 @@
+/*
+ * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2008,2010 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drmP.h"
+#include "drm.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_reg.h"
+
+#define _wait_for(COND, MS, W) ({ \
+ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \
+ int ret__ = 0; \
+ while (! (COND)) { \
+ if (time_after(jiffies, timeout__)) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ } \
+ ret__; \
+})
+
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+ return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+ struct i2c_adapter adapter;
+ struct i2c_algo_bit_data algo;
+ struct drm_psb_private *dev_priv;
+ u32 reg;
+};
+
+void
+gma_intel_i2c_reset(struct drm_device *dev)
+{
+ REG_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable)
+{
+ /* When using bit bashing for I2C, this bit needs to be set to 1 */
+ /* FIXME: We are never Pineview, right?
+
+ u32 val;
+
+ if (!IS_PINEVIEW(dev_priv->dev))
+ return;
+
+ val = REG_READ(DSPCLK_GATE_D);
+ if (enable)
+ val |= DPCUNIT_CLOCK_GATE_DISABLE;
+ else
+ val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+ REG_WRITE(DSPCLK_GATE_D, val);
+
+ return;
+ */
+}
+
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = 0;
+
+ /* On most chips, these bits must be preserved in software. */
+ reserved = REG_READ(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
+
+ return reserved;
+}
+
+static int get_clock(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ REG_WRITE(gpio->reg, reserved);
+ return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+}
+
+static int get_data(void *data)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ REG_WRITE(gpio->reg, reserved);
+ return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+}
+
+static void set_clock(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ u32 clock_bits;
+
+ if (state_high)
+ clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
+ else
+ clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
+ GPIO_CLOCK_VAL_MASK;
+
+ REG_WRITE(gpio->reg, reserved | clock_bits);
+ REG_READ(gpio->reg); /* Posting */
+}
+
+static void set_data(void *data, int state_high)
+{
+ struct intel_gpio *gpio = data;
+ struct drm_psb_private *dev_priv = gpio->dev_priv;
+ struct drm_device *dev = dev_priv->dev;
+ u32 reserved = get_reserved(gpio);
+ u32 data_bits;
+
+ if (state_high)
+ data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
+ else
+ data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
+ GPIO_DATA_VAL_MASK;
+
+ REG_WRITE(gpio->reg, reserved | data_bits);
+ REG_READ(gpio->reg);
+}
+
+static struct i2c_adapter *
+intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin)
+{
+ static const int map_pin_to_reg[] = {
+ 0,
+ GPIOB,
+ GPIOA,
+ GPIOC,
+ GPIOD,
+ GPIOE,
+ 0,
+ GPIOF,
+ };
+ struct intel_gpio *gpio;
+
+ if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
+ return NULL;
+
+ gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+ if (gpio == NULL)
+ return NULL;
+
+ gpio->reg = map_pin_to_reg[pin];
+ gpio->dev_priv = dev_priv;
+
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "gma500 GPIO%c", "?BACDE?F"[pin]);
+ gpio->adapter.owner = THIS_MODULE;
+ gpio->adapter.algo_data = &gpio->algo;
+ gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+ gpio->algo.setsda = set_data;
+ gpio->algo.setscl = set_clock;
+ gpio->algo.getsda = get_data;
+ gpio->algo.getscl = get_clock;
+ gpio->algo.udelay = I2C_RISEFALL_TIME;
+ gpio->algo.timeout = usecs_to_jiffies(2200);
+ gpio->algo.data = gpio;
+
+ if (i2c_bit_add_bus(&gpio->adapter))
+ goto out_free;
+
+ return &gpio->adapter;
+
+out_free:
+ kfree(gpio);
+ return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv,
+ struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gpio *gpio = container_of(adapter,
+ struct intel_gpio,
+ adapter);
+ int ret;
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ intel_i2c_quirk_set(dev_priv, true);
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ udelay(I2C_RISEFALL_TIME);
+
+ ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+ set_data(gpio, 1);
+ set_clock(gpio, 1);
+ intel_i2c_quirk_set(dev_priv, false);
+
+ return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ struct drm_device *dev = dev_priv->dev;
+ int i, reg_offset;
+
+ if (bus->force_bit)
+ return intel_i2c_quirk_xfer(dev_priv,
+ bus->force_bit, msgs, num);
+
+ reg_offset = 0;
+
+ REG_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+ for (i = 0; i < num; i++) {
+ u16 len = msgs[i].len;
+ u8 *buf = msgs[i].buf;
+
+ if (msgs[i].flags & I2C_M_RD) {
+ REG_WRITE(GMBUS1 + reg_offset,
+ GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+ (len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ REG_READ(GMBUS2+reg_offset);
+ do {
+ u32 val, loop = 0;
+
+ if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = REG_READ(GMBUS3 + reg_offset);
+ do {
+ *buf++ = val & 0xff;
+ val >>= 8;
+ } while (--len && ++loop < 4);
+ } while (len);
+ } else {
+ u32 val, loop;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ REG_WRITE(GMBUS3 + reg_offset, val);
+ REG_WRITE(GMBUS1 + reg_offset,
+ (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+ (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+ (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+ GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ REG_READ(GMBUS2+reg_offset);
+
+ while (len) {
+ if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+
+ val = loop = 0;
+ do {
+ val |= *buf++ << (8 * loop);
+ } while (--len && ++loop < 4);
+
+ REG_WRITE(GMBUS3 + reg_offset, val);
+ REG_READ(GMBUS2+reg_offset);
+ }
+ }
+
+ if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+ goto timeout;
+ if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+ goto clear_err;
+ }
+
+ goto done;
+
+clear_err:
+ /* Toggle the Software Clear Interrupt bit. This has the effect
+ * of resetting the GMBUS controller and so clearing the
+ * BUS_ERROR raised by the slave's NAK.
+ */
+ REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
+ REG_WRITE(GMBUS1 + reg_offset, 0);
+
+done:
+ /* Mark the GMBUS interface as disabled. We will re-enable it at the
+ * start of the next xfer, till then let it sleep.
+ */
+ REG_WRITE(GMBUS0 + reg_offset, 0);
+ return i;
+
+timeout:
+ DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+ bus->reg0 & 0xff, bus->adapter.name);
+ REG_WRITE(GMBUS0 + reg_offset, 0);
+
+ /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+ if (!bus->force_bit)
+ return -ENOMEM;
+
+ return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
+}
+
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+ struct intel_gmbus *bus = container_of(adapter,
+ struct intel_gmbus,
+ adapter);
+
+ if (bus->force_bit)
+ bus->force_bit->algo->functionality(bus->force_bit);
+
+ return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+ /* I2C_FUNC_10BIT_ADDR | */
+ I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+ I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+ .master_xfer = gmbus_xfer,
+ .functionality = gmbus_func
+};
+
+/**
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * @dev: DRM device
+ */
+int gma_intel_setup_gmbus(struct drm_device *dev)
+{
+ static const char *names[GMBUS_NUM_PORTS] = {
+ "disabled",
+ "ssc",
+ "vga",
+ "panel",
+ "dpc",
+ "dpb",
+ "reserved",
+ "dpd",
+ };
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+ GFP_KERNEL);
+ if (dev_priv->gmbus == NULL)
+ return -ENOMEM;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+
+ bus->adapter.owner = THIS_MODULE;
+ bus->adapter.class = I2C_CLASS_DDC;
+ snprintf(bus->adapter.name,
+ sizeof(bus->adapter.name),
+ "gma500 gmbus %s",
+ names[i]);
+
+ bus->adapter.dev.parent = &dev->pdev->dev;
+ bus->adapter.algo_data = dev_priv;
+
+ bus->adapter.algo = &gmbus_algorithm;
+ ret = i2c_add_adapter(&bus->adapter);
+ if (ret)
+ goto err;
+
+ /* By default use a conservative clock rate */
+ bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+ /* XXX force bit banging until GMBUS is fully debugged */
+ bus->force_bit = intel_gpio_create(dev_priv, i);
+ }
+
+ gma_intel_i2c_reset(dev_priv->dev);
+
+ return 0;
+
+err:
+ while (--i) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ i2c_del_adapter(&bus->adapter);
+ }
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+ return ret;
+}
+
+void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ /* speed:
+ * 0x0 = 100 KHz
+ * 0x1 = 50 KHz
+ * 0x2 = 400 KHz
+ * 0x3 = 1000 Khz
+ */
+ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+ struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+ if (force_bit) {
+ if (bus->force_bit == NULL) {
+ struct drm_psb_private *dev_priv = adapter->algo_data;
+ bus->force_bit = intel_gpio_create(dev_priv,
+ bus->reg0 & 0xff);
+ }
+ } else {
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ bus->force_bit = NULL;
+ }
+ }
+}
+
+void gma_intel_teardown_gmbus(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ int i;
+
+ if (dev_priv->gmbus == NULL)
+ return;
+
+ for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+ struct intel_gmbus *bus = &dev_priv->gmbus[i];
+ if (bus->force_bit) {
+ i2c_del_adapter(bus->force_bit);
+ kfree(bus->force_bit);
+ }
+ i2c_del_adapter(&bus->adapter);
+ }
+
+ kfree(dev_priv->gmbus);
+ dev_priv->gmbus = NULL;
+}
diff --git a/drivers/staging/gma500/intel_i2c.c b/drivers/gpu/drm/gma500/intel_i2c.c
index 51cbf65..98a28c2 100644
--- a/drivers/staging/gma500/intel_i2c.c
+++ b/drivers/gpu/drm/gma500/intel_i2c.c
@@ -17,10 +17,9 @@
* Authors:
* Eric Anholt <eric@anholt.net>
*/
-
+#include <linux/export.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <linux/export.h>
#include "psb_drv.h"
#include "psb_intel_reg.h"
diff --git a/drivers/staging/gma500/intel_opregion.c b/drivers/gpu/drm/gma500/intel_opregion.c
index d946bc1..d946bc1 100644
--- a/drivers/staging/gma500/intel_opregion.c
+++ b/drivers/gpu/drm/gma500/intel_opregion.c
diff --git a/drivers/staging/gma500/mid_bios.c b/drivers/gpu/drm/gma500/mid_bios.c
index ee3c036..5eee9ad 100644
--- a/drivers/staging/gma500/mid_bios.c
+++ b/drivers/gpu/drm/gma500/mid_bios.c
@@ -23,18 +23,11 @@
* - Check ioremap failures
*/
-#include <linux/moduleparam.h>
#include <drm/drmP.h>
#include <drm/drm.h>
-#include "psb_drm.h"
+#include "gma_drm.h"
#include "psb_drv.h"
#include "mid_bios.h"
-#include "mdfld_output.h"
-
-static int panel_id = GCT_DETECT;
-module_param_named(panel_id, panel_id, int, 0600);
-MODULE_PARM_DESC(panel_id, "Panel Identifier");
-
static void mid_get_fuse_settings(struct drm_device *dev)
{
@@ -52,6 +45,12 @@ static void mid_get_fuse_settings(struct drm_device *dev)
#define FB_SKU_100 0
#define FB_SKU_100L 1
#define FB_SKU_83 2
+ if (pci_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
+
+
pci_write_config_dword(pci_root, 0xD0, FB_REG06);
pci_read_config_dword(pci_root, 0xD4, &fuse_value);
@@ -108,6 +107,10 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
uint32_t platform_rev_id = 0;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
+ if (pci_gfx_root == NULL) {
+ WARN_ON(1);
+ return;
+ }
pci_read_config_dword(pci_gfx_root, 0x08, &platform_rev_id);
dev_priv->platform_rev_id = (uint8_t) platform_rev_id;
pci_dev_put(pci_gfx_root);
@@ -118,13 +121,13 @@ static void mid_get_pci_revID(struct drm_psb_private *dev_priv)
static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
- struct mrst_vbt *vbt = &dev_priv->vbt_data;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
u32 addr;
u16 new_size;
u8 *vbt_virtual;
u8 bpi;
u8 number_desc = 0;
- struct mrst_timing_info *dp_ti = &dev_priv->gct_data.DTD;
+ struct oaktrail_timing_info *dp_ti = &dev_priv->gct_data.DTD;
struct gct_r10_timing_info ti;
void *pGCT;
struct pci_dev *pci_gfx_root = pci_get_bus_and_slot(0, PCI_DEVFN(2, 0));
@@ -145,51 +148,61 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
/* get the virtual address of the vbt */
vbt_virtual = ioremap(addr, sizeof(*vbt));
+ if (vbt_virtual == NULL) {
+ vbt->size = 0;
+ return;
+ }
memcpy(vbt, vbt_virtual, sizeof(*vbt));
iounmap(vbt_virtual); /* Free virtual address space */
+ /* No matching signature don't process the data */
+ if (memcmp(vbt->signature, "$GCT", 4)) {
+ vbt->size = 0;
+ return;
+ }
+
dev_dbg(dev->dev, "GCT revision is %x\n", vbt->revision);
switch (vbt->revision) {
case 0:
- vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->mrst_gct;
- bpi = ((struct mrst_gct_v1 *)pGCT)->PD.BootPanelIndex;
+ pGCT = vbt->oaktrail_gct;
+ bpi = ((struct oaktrail_gct_v1 *)pGCT)->PD.BootPanelIndex;
dev_priv->gct_data.bpi = bpi;
dev_priv->gct_data.pt =
- ((struct mrst_gct_v1 *)pGCT)->PD.PanelType;
+ ((struct oaktrail_gct_v1 *)pGCT)->PD.PanelType;
memcpy(&dev_priv->gct_data.DTD,
- &((struct mrst_gct_v1 *)pGCT)->panel[bpi].DTD,
- sizeof(struct mrst_timing_info));
+ &((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct oaktrail_timing_info));
dev_priv->gct_data.Panel_Port_Control =
- ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
+ ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_Port_Control;
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct mrst_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ ((struct oaktrail_gct_v1 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
break;
case 1:
- vbt->mrst_gct = ioremap(addr + sizeof(*vbt) - 4,
+ vbt->oaktrail_gct = ioremap(addr + sizeof(*vbt) - 4,
vbt->size - sizeof(*vbt) + 4);
- pGCT = vbt->mrst_gct;
- bpi = ((struct mrst_gct_v2 *)pGCT)->PD.BootPanelIndex;
+ pGCT = vbt->oaktrail_gct;
+ bpi = ((struct oaktrail_gct_v2 *)pGCT)->PD.BootPanelIndex;
dev_priv->gct_data.bpi = bpi;
dev_priv->gct_data.pt =
- ((struct mrst_gct_v2 *)pGCT)->PD.PanelType;
+ ((struct oaktrail_gct_v2 *)pGCT)->PD.PanelType;
memcpy(&dev_priv->gct_data.DTD,
- &((struct mrst_gct_v2 *)pGCT)->panel[bpi].DTD,
- sizeof(struct mrst_timing_info));
+ &((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].DTD,
+ sizeof(struct oaktrail_timing_info));
dev_priv->gct_data.Panel_Port_Control =
- ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
+ ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_Port_Control;
dev_priv->gct_data.Panel_MIPI_Display_Descriptor =
- ((struct mrst_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
+ ((struct oaktrail_gct_v2 *)pGCT)->panel[bpi].Panel_MIPI_Display_Descriptor;
break;
case 0x10:
/*header definition changed from rev 01 (v2) to rev 10h. */
/*so, some values have changed location*/
new_size = vbt->checksum; /*checksum contains lo size byte*/
- /*LSB of mrst_gct contains hi size byte*/
- new_size |= ((0xff & (unsigned int)vbt->mrst_gct)) << 8;
+ /*LSB of oaktrail_gct contains hi size byte*/
+ new_size |= ((0xff & (unsigned int)(long)vbt->oaktrail_gct)) << 8;
vbt->checksum = vbt->size; /*size contains the checksum*/
if (new_size > 0xff)
@@ -198,11 +211,11 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
vbt->size = new_size;
/* number of descriptors defined in the GCT */
- number_desc = ((0xff00 & (unsigned int)vbt->mrst_gct)) >> 8;
- bpi = ((0xff0000 & (unsigned int)vbt->mrst_gct)) >> 16;
- vbt->mrst_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
+ number_desc = ((0xff00 & (unsigned int)(long)vbt->oaktrail_gct)) >> 8;
+ bpi = ((0xff0000 & (unsigned int)(long)vbt->oaktrail_gct)) >> 16;
+ vbt->oaktrail_gct = ioremap(addr + GCT_R10_HEADER_SIZE,
GCT_R10_DISPLAY_DESC_SIZE * number_desc);
- pGCT = vbt->mrst_gct;
+ pGCT = vbt->oaktrail_gct;
pGCT = (u8 *)pGCT + (bpi*GCT_R10_DISPLAY_DESC_SIZE);
dev_priv->gct_data.bpi = bpi; /*save boot panel id*/
@@ -238,26 +251,6 @@ static void mid_get_vbt_data(struct drm_psb_private *dev_priv)
dev_err(dev->dev, "Unknown revision of GCT!\n");
vbt->size = 0;
}
- if (IS_MFLD(dev_priv->dev)) {
- if (panel_id == GCT_DETECT) {
- if (dev_priv->gct_data.bpi == 2) {
- dev_info(dev->dev, "[GFX] PYR Panel Detected\n");
- dev_priv->panel_id = PYR_CMD;
- panel_id = PYR_CMD;
- } else if (dev_priv->gct_data.bpi == 0) {
- dev_info(dev->dev, "[GFX] TMD Panel Detected.\n");
- dev_priv->panel_id = TMD_VID;
- panel_id = TMD_VID;
- } else {
- dev_info(dev->dev, "[GFX] Default Panel (TPO)\n");
- dev_priv->panel_id = TPO_CMD;
- panel_id = TPO_CMD;
- }
- } else {
- dev_info(dev->dev, "[GFX] Panel Parameter Passed in through cmd line\n");
- dev_priv->panel_id = panel_id;
- }
- }
}
int mid_chip_setup(struct drm_device *dev)
diff --git a/drivers/staging/gma500/mid_bios.h b/drivers/gpu/drm/gma500/mid_bios.h
index 00e7d56..00e7d56 100644
--- a/drivers/staging/gma500/mid_bios.h
+++ b/drivers/gpu/drm/gma500/mid_bios.h
diff --git a/drivers/staging/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index c904d73..c904d73 100644
--- a/drivers/staging/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
diff --git a/drivers/staging/gma500/mrst.h b/drivers/gpu/drm/gma500/oaktrail.h
index b563dbc..2da1f36 100644
--- a/drivers/staging/gma500/mrst.h
+++ b/drivers/gpu/drm/gma500/oaktrail.h
@@ -19,15 +19,15 @@
/* MID device specific descriptors */
-struct mrst_vbt {
+struct oaktrail_vbt {
s8 signature[4]; /*4 bytes,"$GCT" */
u8 revision;
u8 size;
u8 checksum;
- void *mrst_gct;
+ void *oaktrail_gct;
} __packed;
-struct mrst_timing_info {
+struct oaktrail_timing_info {
u16 pixel_clock;
u8 hactive_lo;
u8 hblank_lo;
@@ -84,14 +84,14 @@ struct gct_r10_timing_info {
u16 rsvd_2:3;
} __packed;
-struct mrst_panel_descriptor_v1 {
+struct oaktrail_panel_descriptor_v1 {
u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
/* 0x61190 if MIPI */
u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
u32 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 dword */
/* Register 0x61210 */
- struct mrst_timing_info DTD;/*18 bytes, Standard definition */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
u16 Panel_Backlight_Inverter_Descriptor;/* 16 bits, as follows */
/* Bit 0, Frequency, 15 bits,0 - 32767Hz */
/* Bit 15, Polarity, 1 bit, 0: Normal, 1: Inverted */
@@ -113,14 +113,14 @@ struct mrst_panel_descriptor_v1 {
/* Bit 14, Reserved, 2 bits, 00b */
} __packed;
-struct mrst_panel_descriptor_v2 {
+struct oaktrail_panel_descriptor_v2 {
u32 Panel_Port_Control; /* 1 dword, Register 0x61180 if LVDS */
/* 0x61190 if MIPI */
u32 Panel_Power_On_Sequencing;/*1 dword,Register 0x61208,*/
u32 Panel_Power_Off_Sequencing;/*1 dword,Register 0x6120C,*/
u8 Panel_Power_Cycle_Delay_and_Reference_Divisor;/* 1 byte */
/* Register 0x61210 */
- struct mrst_timing_info DTD;/*18 bytes, Standard definition */
+ struct oaktrail_timing_info DTD;/*18 bytes, Standard definition */
u16 Panel_Backlight_Inverter_Descriptor;/*16 bits, as follows*/
/*Bit 0, Frequency, 16 bits, 0 - 32767Hz*/
u8 Panel_Initial_Brightness;/* [7:0] 0 - 100% */
@@ -143,7 +143,7 @@ struct mrst_panel_descriptor_v2 {
/* Bit 14, Reserved, 2 bits, 00b */
} __packed;
-union mrst_panel_rx {
+union oaktrail_panel_rx {
struct {
u16 NumberOfLanes:2; /*Num of Lanes, 2 bits,0 = 1 lane,*/
/* 1 = 2 lanes, 2 = 3 lanes, 3 = 4 lanes. */
@@ -161,7 +161,7 @@ union mrst_panel_rx {
u16 panel_receiver;
} __packed;
-struct mrst_gct_v1 {
+struct oaktrail_gct_v1 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -174,11 +174,11 @@ struct mrst_gct_v1 {
} PD;
u8 PanelDescriptor;
};
- struct mrst_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
- union mrst_panel_rx panelrx[4]; /* panel receivers*/
+ struct oaktrail_panel_descriptor_v1 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
-struct mrst_gct_v2 {
+struct oaktrail_gct_v2 {
union { /*8 bits,Defined as follows: */
struct {
u8 PanelType:4; /*4 bits, Bit field for panels*/
@@ -191,14 +191,14 @@ struct mrst_gct_v2 {
} PD;
u8 PanelDescriptor;
};
- struct mrst_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
- union mrst_panel_rx panelrx[4]; /* panel receivers*/
+ struct oaktrail_panel_descriptor_v2 panel[4];/*panel descrs,38 bytes each*/
+ union oaktrail_panel_rx panelrx[4]; /* panel receivers*/
} __packed;
-struct mrst_gct_data {
+struct oaktrail_gct_data {
u8 bpi; /* boot panel index, number of panel used during boot */
u8 pt; /* panel type, 4 bit field, 0=lvds, 1=mipi */
- struct mrst_timing_info DTD; /* timing info for the selected panel */
+ struct oaktrail_timing_info DTD; /* timing info for the selected panel */
u32 Panel_Port_Control;
u32 PP_On_Sequencing;/*1 dword,Register 0x61208,*/
u32 PP_Off_Sequencing;/*1 dword,Register 0x6120C,*/
@@ -220,7 +220,7 @@ struct mrst_gct_data {
* Moorestown HDMI interfaces
*/
-struct mrst_hdmi_dev {
+struct oaktrail_hdmi_dev {
struct pci_dev *dev;
void __iomem *regs;
unsigned int mmio, mmio_len;
@@ -243,10 +243,10 @@ struct mrst_hdmi_dev {
u32 savePCH_PIPEBSRC;
};
-extern void mrst_hdmi_setup(struct drm_device *dev);
-extern void mrst_hdmi_teardown(struct drm_device *dev);
-extern int mrst_hdmi_i2c_init(struct pci_dev *dev);
-extern void mrst_hdmi_i2c_exit(struct pci_dev *dev);
-extern void mrst_hdmi_save(struct drm_device *dev);
-extern void mrst_hdmi_restore(struct drm_device *dev);
-extern void mrst_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
+extern void oaktrail_hdmi_setup(struct drm_device *dev);
+extern void oaktrail_hdmi_teardown(struct drm_device *dev);
+extern int oaktrail_hdmi_i2c_init(struct pci_dev *dev);
+extern void oaktrail_hdmi_i2c_exit(struct pci_dev *dev);
+extern void oaktrail_hdmi_save(struct drm_device *dev);
+extern void oaktrail_hdmi_restore(struct drm_device *dev);
+extern void oaktrail_hdmi_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev);
diff --git a/drivers/staging/gma500/mrst_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c
index c9311a5..9d12a3e 100644
--- a/drivers/staging/gma500/mrst_crtc.c
+++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c
@@ -30,11 +30,11 @@ struct psb_intel_range_t {
int min, max;
};
-struct mrst_limit_t {
+struct oaktrail_limit_t {
struct psb_intel_range_t dot, m, p1;
};
-struct mrst_clock_t {
+struct oaktrail_clock_t {
/* derived values */
int dot;
int m;
@@ -57,7 +57,7 @@ struct mrst_clock_t {
#define MRST_P1_MAX_0 7
#define MRST_P1_MAX_1 8
-static const struct mrst_limit_t mrst_limits[] = {
+static const struct oaktrail_limit_t oaktrail_limits[] = {
{ /* MRST_LIMIT_LVDS_100L */
.dot = {.min = MRST_DOT_MIN, .max = MRST_DOT_MAX},
.m = {.min = MRST_M_MIN_100L, .max = MRST_M_MAX_100L},
@@ -76,15 +76,15 @@ static const struct mrst_limit_t mrst_limits[] = {
};
#define MRST_M_MIN 10
-static const u32 mrst_m_converts[] = {
+static const u32 oaktrail_m_converts[] = {
0x2B, 0x15, 0x2A, 0x35, 0x1A, 0x0D, 0x26, 0x33, 0x19, 0x2C,
0x36, 0x3B, 0x1D, 0x2E, 0x37, 0x1B, 0x2D, 0x16, 0x0B, 0x25,
0x12, 0x09, 0x24, 0x32, 0x39, 0x1c,
};
-static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
+static const struct oaktrail_limit_t *oaktrail_limit(struct drm_crtc *crtc)
{
- const struct mrst_limit_t *limit = NULL;
+ const struct oaktrail_limit_t *limit = NULL;
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -92,30 +92,30 @@ static const struct mrst_limit_t *mrst_limit(struct drm_crtc *crtc)
|| psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)) {
switch (dev_priv->core_freq) {
case 100:
- limit = &mrst_limits[MRST_LIMIT_LVDS_100L];
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_100L];
break;
case 166:
- limit = &mrst_limits[MRST_LIMIT_LVDS_83];
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_83];
break;
case 200:
- limit = &mrst_limits[MRST_LIMIT_LVDS_100];
+ limit = &oaktrail_limits[MRST_LIMIT_LVDS_100];
break;
}
} else {
limit = NULL;
- dev_err(dev->dev, "mrst_limit Wrong display type.\n");
+ dev_err(dev->dev, "oaktrail_limit Wrong display type.\n");
}
return limit;
}
/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void mrst_clock(int refclk, struct mrst_clock_t *clock)
+static void oaktrail_clock(int refclk, struct oaktrail_clock_t *clock)
{
clock->dot = (refclk * clock->m) / (14 * clock->p1);
}
-void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
+void mrstPrintPll(char *prefix, struct oaktrail_clock_t *clock)
{
pr_debug("%s: dotclock = %d, m = %d, p1 = %d.\n",
prefix, clock->dot, clock->m, clock->p1);
@@ -127,10 +127,10 @@ void mrstPrintPll(char *prefix, struct mrst_clock_t *clock)
*/
static bool
mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
- struct mrst_clock_t *best_clock)
+ struct oaktrail_clock_t *best_clock)
{
- struct mrst_clock_t clock;
- const struct mrst_limit_t *limit = mrst_limit(crtc);
+ struct oaktrail_clock_t clock;
+ const struct oaktrail_limit_t *limit = oaktrail_limit(crtc);
int err = target;
memset(best_clock, 0, sizeof(*best_clock));
@@ -140,7 +140,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
clock.p1++) {
int this_err;
- mrst_clock(refclk, &clock);
+ oaktrail_clock(refclk, &clock);
this_err = abs(clock.dot - target);
if (this_err < err) {
@@ -159,7 +159,7 @@ mrstFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
* This code should probably grow support for turning the cursor off and back
* on appropriately at the same time as we're turning the pipe off/on.
*/
-static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
+static void oaktrail_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
@@ -273,7 +273,7 @@ static void mrst_crtc_dpms(struct drm_crtc *crtc, int mode)
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int mrst_panel_fitter_pipe(struct drm_device *dev)
+static int oaktrail_panel_fitter_pipe(struct drm_device *dev)
{
u32 pfit_control;
@@ -285,7 +285,7 @@ static int mrst_panel_fitter_pipe(struct drm_device *dev)
return (pfit_control >> 29) & 3;
}
-static int mrst_crtc_mode_set(struct drm_crtc *crtc,
+static int oaktrail_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
@@ -307,15 +307,15 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
int vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int refclk = 0;
- struct mrst_clock_t clock;
+ struct oaktrail_clock_t clock;
u32 dpll = 0, fp = 0, dspcntr, pipeconf;
bool ok, is_sdvo = false;
bool is_crt = false, is_lvds = false, is_tv = false;
bool is_mipi = false;
struct drm_mode_config *mode_config = &dev->mode_config;
- struct psb_intel_output *psb_intel_output = NULL;
+ struct psb_intel_encoder *psb_intel_encoder = NULL;
uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
- struct drm_encoder *encoder;
+ struct drm_connector *connector;
if (!gma_power_begin(dev, true))
return 0;
@@ -327,13 +327,13 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode,
sizeof(struct drm_display_mode));
- list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-
- if (encoder->crtc != crtc)
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
continue;
- psb_intel_output = enc_to_psb_intel_output(encoder);
- switch (psb_intel_output->type) {
+ psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+ switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -356,15 +356,15 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
/* Disable the panel fitter if it was on our pipe */
- if (mrst_panel_fitter_pipe(dev) == pipe)
+ if (oaktrail_panel_fitter_pipe(dev) == pipe)
REG_WRITE(PFIT_CONTROL, 0);
REG_WRITE(pipesrc_reg,
((mode->crtc_hdisplay - 1) << 16) |
(mode->crtc_vdisplay - 1));
- if (psb_intel_output)
- drm_connector_property_get_value(&psb_intel_output->base,
+ if (psb_intel_encoder)
+ drm_connector_property_get_value(connector,
dev->mode_config.scaling_mode_property, &scalingType);
if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
@@ -432,7 +432,7 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
dev_priv->pipeconf = pipeconf |= PIPEACONF_ENABLE;
if (is_mipi)
- goto mrst_crtc_mode_set_exit;
+ goto oaktrail_crtc_mode_set_exit;
refclk = dev_priv->core_freq * 1000;
@@ -441,14 +441,14 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
ok = mrstFindBestPLL(crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
- dev_dbg(dev->dev, "mrstFindBestPLL fail in mrst_crtc_mode_set.\n");
+ dev_dbg(dev->dev, "mrstFindBestPLL fail in oaktrail_crtc_mode_set.\n");
} else {
- dev_dbg(dev->dev, "mrst_crtc_mode_set pixel clock = %d,"
+ dev_dbg(dev->dev, "oaktrail_crtc_mode_set pixel clock = %d,"
"m = %x, p1 = %x.\n", clock.dot, clock.m,
clock.p1);
}
- fp = mrst_m_converts[(clock.m - MRST_M_MIN)] << 8;
+ fp = oaktrail_m_converts[(clock.m - MRST_M_MIN)] << 8;
dpll |= DPLL_VGA_MODE_DIS;
@@ -505,19 +505,19 @@ static int mrst_crtc_mode_set(struct drm_crtc *crtc,
REG_WRITE(dspcntr_reg, dspcntr);
psb_intel_wait_for_vblank(dev);
-mrst_crtc_mode_set_exit:
+oaktrail_crtc_mode_set_exit:
gma_power_end(dev);
return 0;
}
-static bool mrst_crtc_mode_fixup(struct drm_crtc *crtc,
+static bool oaktrail_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
-int mrst_pipe_set_base(struct drm_crtc *crtc,
+int oaktrail_pipe_set_base(struct drm_crtc *crtc,
int x, int y, struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
@@ -543,9 +543,9 @@ int mrst_pipe_set_base(struct drm_crtc *crtc,
return 0;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -581,24 +581,24 @@ pipe_set_base_exit:
return ret;
}
-static void mrst_crtc_prepare(struct drm_crtc *crtc)
+static void oaktrail_crtc_prepare(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
}
-static void mrst_crtc_commit(struct drm_crtc *crtc)
+static void oaktrail_crtc_commit(struct drm_crtc *crtc)
{
struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
}
-const struct drm_crtc_helper_funcs mrst_helper_funcs = {
- .dpms = mrst_crtc_dpms,
- .mode_fixup = mrst_crtc_mode_fixup,
- .mode_set = mrst_crtc_mode_set,
- .mode_set_base = mrst_pipe_set_base,
- .prepare = mrst_crtc_prepare,
- .commit = mrst_crtc_commit,
+const struct drm_crtc_helper_funcs oaktrail_helper_funcs = {
+ .dpms = oaktrail_crtc_dpms,
+ .mode_fixup = oaktrail_crtc_mode_fixup,
+ .mode_set = oaktrail_crtc_mode_set,
+ .mode_set_base = oaktrail_pipe_set_base,
+ .prepare = oaktrail_crtc_prepare,
+ .commit = oaktrail_crtc_commit,
};
diff --git a/drivers/staging/gma500/mrst_device.c b/drivers/gpu/drm/gma500/oaktrail_device.c
index 6707faf..63aea2f 100644
--- a/drivers/staging/gma500/mrst_device.c
+++ b/drivers/gpu/drm/gma500/oaktrail_device.c
@@ -22,56 +22,24 @@
#include <linux/dmi.h>
#include <drm/drmP.h>
#include <drm/drm.h>
-#include "psb_drm.h"
+#include "gma_drm.h"
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
#include <asm/mrst.h>
#include <asm/intel_scu_ipc.h>
#include "mid_bios.h"
+#include "intel_bios.h"
-static int devtype;
-
-module_param_named(type, devtype, int, 0600);
-MODULE_PARM_DESC(type, "Moorestown/Oaktrail device type");
-
-#define DEVICE_MOORESTOWN 1
-#define DEVICE_OAKTRAIL 2
-#define DEVICE_MOORESTOWN_MM 3
-
-static int mrst_device_ident(struct drm_device *dev)
-{
- /* User forced */
- if (devtype)
- return devtype;
- if (dmi_match(DMI_PRODUCT_NAME, "OakTrail") ||
- dmi_match(DMI_PRODUCT_NAME, "OakTrail platform"))
- return DEVICE_OAKTRAIL;
-#if defined(CONFIG_X86_MRST)
- if (dmi_match(DMI_PRODUCT_NAME, "MM") ||
- dmi_match(DMI_PRODUCT_NAME, "MM 10"))
- return DEVICE_MOORESTOWN_MM;
- if (mrst_identify_cpu())
- return DEVICE_MOORESTOWN;
-#endif
- return DEVICE_OAKTRAIL;
-}
-
-
-/* IPC message and command defines used to enable/disable mipi panel voltages */
-#define IPC_MSG_PANEL_ON_OFF 0xE9
-#define IPC_CMD_PANEL_ON 1
-#define IPC_CMD_PANEL_OFF 0
-
-static int mrst_output_init(struct drm_device *dev)
+static int oaktrail_output_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
if (dev_priv->iLVDS_enable)
- mrst_lvds_init(dev, &dev_priv->mode_dev);
+ oaktrail_lvds_init(dev, &dev_priv->mode_dev);
else
dev_err(dev->dev, "DSI is not supported\n");
if (dev_priv->hdmi_priv)
- mrst_hdmi_init(dev, &dev_priv->mode_dev);
+ oaktrail_hdmi_init(dev, &dev_priv->mode_dev);
return 0;
}
@@ -87,12 +55,12 @@ static int mrst_output_init(struct drm_device *dev)
#define MHz 1000000
#define BLC_ADJUSTMENT_MAX 100
-static struct backlight_device *mrst_backlight_device;
-static int mrst_brightness;
+static struct backlight_device *oaktrail_backlight_device;
+static int oaktrail_brightness;
-static int mrst_set_brightness(struct backlight_device *bd)
+static int oaktrail_set_brightness(struct backlight_device *bd)
{
- struct drm_device *dev = bl_get_data(mrst_backlight_device);
+ struct drm_device *dev = bl_get_data(oaktrail_backlight_device);
struct drm_psb_private *dev_priv = dev->dev_private;
int level = bd->props.brightness;
u32 blc_pwm_ctl;
@@ -124,16 +92,16 @@ static int mrst_set_brightness(struct backlight_device *bd)
REG_WRITE(BLC_PWM_CTL, (max_pwm_blc << 16) | blc_pwm_ctl);
gma_power_end(dev);
}
- mrst_brightness = level;
+ oaktrail_brightness = level;
return 0;
}
-static int mrst_get_brightness(struct backlight_device *bd)
+static int oaktrail_get_brightness(struct backlight_device *bd)
{
/* return locally cached var instead of HW read (due to DPST etc.) */
/* FIXME: ideally return actual value in case firmware fiddled with
it */
- return mrst_brightness;
+ return oaktrail_brightness;
}
static int device_backlight_init(struct drm_device *dev)
@@ -168,12 +136,12 @@ static int device_backlight_init(struct drm_device *dev)
return 0;
}
-static const struct backlight_ops mrst_ops = {
- .get_brightness = mrst_get_brightness,
- .update_status = mrst_set_brightness,
+static const struct backlight_ops oaktrail_ops = {
+ .get_brightness = oaktrail_get_brightness,
+ .update_status = oaktrail_set_brightness,
};
-int mrst_backlight_init(struct drm_device *dev)
+int oaktrail_backlight_init(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
int ret;
@@ -183,21 +151,21 @@ int mrst_backlight_init(struct drm_device *dev)
props.max_brightness = 100;
props.type = BACKLIGHT_PLATFORM;
- mrst_backlight_device = backlight_device_register("mrst-bl",
- NULL, (void *)dev, &mrst_ops, &props);
+ oaktrail_backlight_device = backlight_device_register("oaktrail-bl",
+ NULL, (void *)dev, &oaktrail_ops, &props);
- if (IS_ERR(mrst_backlight_device))
- return PTR_ERR(mrst_backlight_device);
+ if (IS_ERR(oaktrail_backlight_device))
+ return PTR_ERR(oaktrail_backlight_device);
ret = device_backlight_init(dev);
if (ret < 0) {
- backlight_device_unregister(mrst_backlight_device);
+ backlight_device_unregister(oaktrail_backlight_device);
return ret;
}
- mrst_backlight_device->props.brightness = 100;
- mrst_backlight_device->props.max_brightness = 100;
- backlight_update_status(mrst_backlight_device);
- dev_priv->backlight_device = mrst_backlight_device;
+ oaktrail_backlight_device->props.brightness = 100;
+ oaktrail_backlight_device->props.max_brightness = 100;
+ backlight_update_status(oaktrail_backlight_device);
+ dev_priv->backlight_device = oaktrail_backlight_device;
return 0;
}
@@ -208,18 +176,18 @@ int mrst_backlight_init(struct drm_device *dev)
* for power management
*/
-static void mrst_init_pm(struct drm_device *dev)
+static void oaktrail_init_pm(struct drm_device *dev)
{
}
/**
- * mrst_save_display_registers - save registers lost on suspend
+ * oaktrail_save_display_registers - save registers lost on suspend
* @dev: our DRM device
*
* Save the state we need in order to be able to restore the interface
* upon resume from suspend
*/
-static int mrst_save_display_registers(struct drm_device *dev)
+static int oaktrail_save_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
int i;
@@ -265,7 +233,7 @@ static int mrst_save_display_registers(struct drm_device *dev)
dev_priv->save_palette_a[i] = PSB_RVDC32(PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
- mrst_hdmi_save(dev);
+ oaktrail_hdmi_save(dev);
/* Save performance state */
dev_priv->savePERF_MODE = PSB_RVDC32(MRST_PERF_MODE);
@@ -326,12 +294,12 @@ static int mrst_save_display_registers(struct drm_device *dev)
}
/**
- * mrst_restore_display_registers - restore lost register state
+ * oaktrail_restore_display_registers - restore lost register state
* @dev: our DRM device
*
* Restore register state that was lost during suspend and resume.
*/
-static int mrst_restore_display_registers(struct drm_device *dev)
+static int oaktrail_restore_display_registers(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 pp_stat;
@@ -394,7 +362,7 @@ static int mrst_restore_display_registers(struct drm_device *dev)
PSB_WVDC32(dev_priv->save_palette_a[i], PALETTE_A + (i << 2));
if (dev_priv->hdmi_priv)
- mrst_hdmi_restore(dev);
+ oaktrail_hdmi_restore(dev);
if (dev_priv->iLVDS_enable) {
PSB_WVDC32(dev_priv->saveBLC_PWM_CTL2, BLC_PWM_CTL2);
@@ -439,12 +407,12 @@ static int mrst_restore_display_registers(struct drm_device *dev)
}
/**
- * mrst_power_down - power down the display island
+ * oaktrail_power_down - power down the display island
* @dev: our DRM device
*
* Power down the display interface of our device
*/
-static int mrst_power_down(struct drm_device *dev)
+static int oaktrail_power_down(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 pwr_mask ;
@@ -464,11 +432,11 @@ static int mrst_power_down(struct drm_device *dev)
}
/*
- * mrst_power_up
+ * oaktrail_power_up
*
* Restore power to the specified island(s) (powergating)
*/
-static int mrst_power_up(struct drm_device *dev)
+static int oaktrail_power_up(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 pwr_mask = PSB_PWRGT_DISPLAY_MASK;
@@ -488,147 +456,57 @@ static int mrst_power_up(struct drm_device *dev)
return 0;
}
-#if defined(CONFIG_X86_MRST)
-static void mrst_lvds_cache_bl(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- intel_scu_ipc_ioread8(0x28, &(dev_priv->saveBKLTCNT));
- intel_scu_ipc_ioread8(0x29, &(dev_priv->saveBKLTREQ));
- intel_scu_ipc_ioread8(0x2A, &(dev_priv->saveBKLTBRTL));
-}
-
-static void mrst_mm_bl_power(struct drm_device *dev, bool on)
+static int oaktrail_chip_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (on) {
- intel_scu_ipc_iowrite8(0x2A, dev_priv->saveBKLTBRTL);
- intel_scu_ipc_iowrite8(0x28, dev_priv->saveBKLTCNT);
- intel_scu_ipc_iowrite8(0x29, dev_priv->saveBKLTREQ);
- } else {
- intel_scu_ipc_iowrite8(0x2A, 0);
- intel_scu_ipc_iowrite8(0x28, 0);
- intel_scu_ipc_iowrite8(0x29, 0);
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+ int ret;
+
+ ret = mid_chip_setup(dev);
+ if (ret < 0)
+ return ret;
+ if (vbt->size == 0) {
+ /* Now pull the BIOS data */
+ gma_intel_opregion_init(dev);
+ psb_intel_init_bios(dev);
}
+ return 0;
}
-static const struct psb_ops mrst_mm_chip_ops = {
- .name = "Moorestown MM ",
- .accel_2d = 1,
- .pipes = 1,
- .crtcs = 1,
- .sgx_offset = MRST_SGX_OFFSET,
-
- .crtc_helper = &mrst_helper_funcs,
- .crtc_funcs = &psb_intel_crtc_funcs,
-
- .output_init = mrst_output_init,
-
- .lvds_bl_power = mrst_mm_bl_power,
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = mrst_backlight_init,
-#endif
-
- .init_pm = mrst_init_pm,
- .save_regs = mrst_save_display_registers,
- .restore_regs = mrst_restore_display_registers,
- .power_down = mrst_power_down,
- .power_up = mrst_power_up,
-
- .i2c_bus = 0,
-};
-
-#endif
-
static void oaktrail_teardown(struct drm_device *dev)
{
- mrst_hdmi_teardown(dev);
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct oaktrail_vbt *vbt = &dev_priv->vbt_data;
+
+ oaktrail_hdmi_teardown(dev);
+ if (vbt->size == 0)
+ psb_intel_destroy_bios(dev);
}
-static const struct psb_ops oaktrail_chip_ops = {
+const struct psb_ops oaktrail_chip_ops = {
.name = "Oaktrail",
.accel_2d = 1,
.pipes = 2,
.crtcs = 2,
.sgx_offset = MRST_SGX_OFFSET,
- .chip_setup = mid_chip_setup,
+ .chip_setup = oaktrail_chip_setup,
.chip_teardown = oaktrail_teardown,
- .crtc_helper = &mrst_helper_funcs,
+ .crtc_helper = &oaktrail_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
- .output_init = mrst_output_init,
+ .output_init = oaktrail_output_init,
#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = mrst_backlight_init,
+ .backlight_init = oaktrail_backlight_init,
#endif
- .init_pm = mrst_init_pm,
- .save_regs = mrst_save_display_registers,
- .restore_regs = mrst_restore_display_registers,
- .power_down = mrst_power_down,
- .power_up = mrst_power_up,
+ .init_pm = oaktrail_init_pm,
+ .save_regs = oaktrail_save_display_registers,
+ .restore_regs = oaktrail_restore_display_registers,
+ .power_down = oaktrail_power_down,
+ .power_up = oaktrail_power_up,
.i2c_bus = 1,
};
-
-/**
- * mrst_chip_setup - perform the initial chip init
- * @dev: Our drm_device
- *
- * Figure out which incarnation we are and then scan the firmware for
- * tables and information.
- */
-static int mrst_chip_setup(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- switch (mrst_device_ident(dev)) {
- case DEVICE_OAKTRAIL:
- /* Dual CRTC, PC compatible, HDMI, I2C #2 */
- dev_priv->ops = &oaktrail_chip_ops;
- mrst_hdmi_setup(dev);
- return mid_chip_setup(dev);
-#if defined(CONFIG_X86_MRST)
- case DEVICE_MOORESTOWN_MM:
- /* Single CRTC, No HDMI, I2C #0, BL control */
- mrst_lvds_cache_bl(dev);
- dev_priv->ops = &mrst_mm_chip_ops;
- return mid_chip_setup(dev);
- case DEVICE_MOORESTOWN:
- /* Dual CRTC, No HDMI(?), I2C #1 */
- return mid_chip_setup(dev);
-#endif
- default:
- dev_err(dev->dev, "unsupported device type.\n");
- return -ENODEV;
- }
-}
-
-const struct psb_ops mrst_chip_ops = {
- .name = "Moorestown",
- .accel_2d = 1,
- .pipes = 2,
- .crtcs = 2,
- .sgx_offset = MRST_SGX_OFFSET,
-
- .chip_setup = mrst_chip_setup,
- .crtc_helper = &mrst_helper_funcs,
- .crtc_funcs = &psb_intel_crtc_funcs,
-
- .output_init = mrst_output_init,
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = mrst_backlight_init,
-#endif
-
- .init_pm = mrst_init_pm,
- .save_regs = mrst_save_display_registers,
- .restore_regs = mrst_restore_display_registers,
- .power_down = mrst_power_down,
- .power_up = mrst_power_up,
-
- .i2c_bus = 2,
-};
-
diff --git a/drivers/staging/gma500/mrst_hdmi.c b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
index e66607e..025d309 100644
--- a/drivers/staging/gma500/mrst_hdmi.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi.c
@@ -98,11 +98,11 @@ struct intel_range {
int min, max;
};
-struct mrst_hdmi_limit {
+struct oaktrail_hdmi_limit {
struct intel_range vco, np, nr, nf;
};
-struct mrst_hdmi_clock {
+struct oaktrail_hdmi_clock {
int np;
int nr;
int nf;
@@ -118,7 +118,7 @@ struct mrst_hdmi_clock {
#define NF_MIN 2
#define NF_MAX 4095
-static const struct mrst_hdmi_limit mrst_hdmi_limit = {
+static const struct oaktrail_hdmi_limit oaktrail_hdmi_limit = {
.vco = { .min = VCO_MIN, .max = VCO_MAX },
.np = { .min = NP_MIN, .max = NP_MAX },
.nr = { .min = NR_MIN, .max = NR_MAX },
@@ -150,7 +150,7 @@ static void scu_busy_loop(void *scu_base)
}
}
-static void mrst_hdmi_reset(struct drm_device *dev)
+static void oaktrail_hdmi_reset(struct drm_device *dev)
{
void *base;
/* FIXME: at least make these defines */
@@ -178,10 +178,10 @@ static void mrst_hdmi_reset(struct drm_device *dev)
iounmap(base);
}
-static void mrst_hdmi_audio_enable(struct drm_device *dev)
+static void oaktrail_hdmi_audio_enable(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(HDMI_HCR, 0x67);
HDMI_READ(HDMI_HCR);
@@ -193,10 +193,10 @@ static void mrst_hdmi_audio_enable(struct drm_device *dev)
HDMI_READ(HDMI_AUDIO_CTRL);
}
-static void mrst_hdmi_audio_disable(struct drm_device *dev)
+static void oaktrail_hdmi_audio_disable(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
HDMI_WRITE(0x51a8, 0x0);
HDMI_READ(0x51a8);
@@ -208,7 +208,7 @@ static void mrst_hdmi_audio_disable(struct drm_device *dev)
HDMI_READ(HDMI_HCR);
}
-void mrst_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
+void oaktrail_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
{
struct drm_device *dev = crtc->dev;
u32 temp;
@@ -306,13 +306,13 @@ void mrst_crtc_hdmi_dpms(struct drm_crtc *crtc, int mode)
}
-static void mrst_hdmi_dpms(struct drm_encoder *encoder, int mode)
+static void oaktrail_hdmi_dpms(struct drm_encoder *encoder, int mode)
{
static int dpms_mode = -1;
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
if (dpms_mode == mode)
@@ -342,25 +342,25 @@ static unsigned int htotal_calculate(struct drm_display_mode *mode)
return (mode->crtc_hdisplay - 1) | (new_crtc_htotal << 16);
}
-static void mrst_hdmi_find_dpll(struct drm_crtc *crtc, int target,
- int refclk, struct mrst_hdmi_clock *best_clock)
+static void oaktrail_hdmi_find_dpll(struct drm_crtc *crtc, int target,
+ int refclk, struct oaktrail_hdmi_clock *best_clock)
{
int np_min, np_max, nr_min, nr_max;
int np, nr, nf;
- np_min = DIV_ROUND_UP(mrst_hdmi_limit.vco.min, target * 10);
- np_max = mrst_hdmi_limit.vco.max / (target * 10);
- if (np_min < mrst_hdmi_limit.np.min)
- np_min = mrst_hdmi_limit.np.min;
- if (np_max > mrst_hdmi_limit.np.max)
- np_max = mrst_hdmi_limit.np.max;
+ np_min = DIV_ROUND_UP(oaktrail_hdmi_limit.vco.min, target * 10);
+ np_max = oaktrail_hdmi_limit.vco.max / (target * 10);
+ if (np_min < oaktrail_hdmi_limit.np.min)
+ np_min = oaktrail_hdmi_limit.np.min;
+ if (np_max > oaktrail_hdmi_limit.np.max)
+ np_max = oaktrail_hdmi_limit.np.max;
nr_min = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_max));
nr_max = DIV_ROUND_UP((refclk * 1000), (target * 10 * np_min));
- if (nr_min < mrst_hdmi_limit.nr.min)
- nr_min = mrst_hdmi_limit.nr.min;
- if (nr_max > mrst_hdmi_limit.nr.max)
- nr_max = mrst_hdmi_limit.nr.max;
+ if (nr_min < oaktrail_hdmi_limit.nr.min)
+ nr_min = oaktrail_hdmi_limit.nr.min;
+ if (nr_max > oaktrail_hdmi_limit.nr.max)
+ nr_max = oaktrail_hdmi_limit.nr.max;
np = DIV_ROUND_UP((refclk * 1000), (target * 10 * nr_max));
nr = DIV_ROUND_UP((refclk * 1000), (target * 10 * np));
@@ -376,7 +376,7 @@ static void mrst_hdmi_find_dpll(struct drm_crtc *crtc, int target,
best_clock->nf = (nf << 14);
}
-int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
+int oaktrail_crtc_hdmi_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
@@ -384,7 +384,7 @@ int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
{
struct drm_device *dev = crtc->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int pipe = 1;
int htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
int hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
@@ -397,7 +397,7 @@ int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
int refclk;
- struct mrst_hdmi_clock clock;
+ struct oaktrail_hdmi_clock clock;
u32 dspcntr, pipeconf, dpll, temp;
int dspcntr_reg = DSPBCNTR;
@@ -417,11 +417,11 @@ int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
/* reset controller: FIXME - can we sort out the ioremap mess ? */
iounmap(hdmi_dev->regs);
- mrst_hdmi_reset(dev);
+ oaktrail_hdmi_reset(dev);
/* program and enable dpll */
refclk = 25000;
- mrst_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
+ oaktrail_hdmi_find_dpll(crtc, adjusted_mode->clock, refclk, &clock);
/* Setting DPLL */
dpll = REG_READ(DPLL_CTRL);
@@ -503,9 +503,10 @@ int mrst_crtc_hdmi_mode_set(struct drm_crtc *crtc,
return 0;
}
-static int mrst_hdmi_mode_valid(struct drm_connector *connector,
+static int oaktrail_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
if (mode->clock > 165000)
return MODE_CLOCK_HIGH;
if (mode->clock < 20000)
@@ -514,10 +515,15 @@ static int mrst_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
return MODE_OK;
}
-static bool mrst_hdmi_mode_fixup(struct drm_encoder *encoder,
+static bool oaktrail_hdmi_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
@@ -525,12 +531,12 @@ static bool mrst_hdmi_mode_fixup(struct drm_encoder *encoder,
}
static enum drm_connector_status
-mrst_hdmi_detect(struct drm_connector *connector, bool force)
+oaktrail_hdmi_detect(struct drm_connector *connector, bool force)
{
enum drm_connector_status status;
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
u32 temp;
temp = HDMI_READ(HDMI_HSR);
@@ -558,7 +564,7 @@ static const unsigned char raw_edid[] = {
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, 0x8d
};
-static int mrst_hdmi_get_modes(struct drm_connector *connector)
+static int oaktrail_hdmi_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -594,80 +600,84 @@ static int mrst_hdmi_get_modes(struct drm_connector *connector)
return ret - i;
}
-static void mrst_hdmi_mode_set(struct drm_encoder *encoder,
+static void oaktrail_hdmi_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
- mrst_hdmi_audio_enable(dev);
+ oaktrail_hdmi_audio_enable(dev);
return;
}
-static void mrst_hdmi_destroy(struct drm_connector *connector)
+static void oaktrail_hdmi_destroy(struct drm_connector *connector)
{
return;
}
-static const struct drm_encoder_helper_funcs mrst_hdmi_helper_funcs = {
- .dpms = mrst_hdmi_dpms,
- .mode_fixup = mrst_hdmi_mode_fixup,
+static const struct drm_encoder_helper_funcs oaktrail_hdmi_helper_funcs = {
+ .dpms = oaktrail_hdmi_dpms,
+ .mode_fixup = oaktrail_hdmi_mode_fixup,
.prepare = psb_intel_encoder_prepare,
- .mode_set = mrst_hdmi_mode_set,
+ .mode_set = oaktrail_hdmi_mode_set,
.commit = psb_intel_encoder_commit,
};
static const struct drm_connector_helper_funcs
- mrst_hdmi_connector_helper_funcs = {
- .get_modes = mrst_hdmi_get_modes,
- .mode_valid = mrst_hdmi_mode_valid,
+ oaktrail_hdmi_connector_helper_funcs = {
+ .get_modes = oaktrail_hdmi_get_modes,
+ .mode_valid = oaktrail_hdmi_mode_valid,
.best_encoder = psb_intel_best_encoder,
};
-static const struct drm_connector_funcs mrst_hdmi_connector_funcs = {
+static const struct drm_connector_funcs oaktrail_hdmi_connector_funcs = {
.dpms = drm_helper_connector_dpms,
- .detect = mrst_hdmi_detect,
+ .detect = oaktrail_hdmi_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = mrst_hdmi_destroy,
+ .destroy = oaktrail_hdmi_destroy,
};
-static void mrst_hdmi_enc_destroy(struct drm_encoder *encoder)
+static void oaktrail_hdmi_enc_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
}
-static const struct drm_encoder_funcs mrst_hdmi_enc_funcs = {
- .destroy = mrst_hdmi_enc_destroy,
+static const struct drm_encoder_funcs oaktrail_hdmi_enc_funcs = {
+ .destroy = oaktrail_hdmi_enc_destroy,
};
-void mrst_hdmi_init(struct drm_device *dev,
+void oaktrail_hdmi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_output *psb_intel_output;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
- if (!psb_intel_output)
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
return;
- psb_intel_output->mode_dev = mode_dev;
- connector = &psb_intel_output->base;
- encoder = &psb_intel_output->enc;
- drm_connector_init(dev, &psb_intel_output->base,
- &mrst_hdmi_connector_funcs,
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
+ &oaktrail_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_DVID);
- drm_encoder_init(dev, &psb_intel_output->enc,
- &mrst_hdmi_enc_funcs,
+ drm_encoder_init(dev, encoder,
+ &oaktrail_hdmi_enc_funcs,
DRM_MODE_ENCODER_TMDS);
- drm_mode_connector_attach_encoder(&psb_intel_output->base,
- &psb_intel_output->enc);
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
- psb_intel_output->type = INTEL_OUTPUT_HDMI;
- drm_encoder_helper_add(encoder, &mrst_hdmi_helper_funcs);
- drm_connector_helper_add(connector, &mrst_hdmi_connector_helper_funcs);
+ psb_intel_encoder->type = INTEL_OUTPUT_HDMI;
+ drm_encoder_helper_add(encoder, &oaktrail_hdmi_helper_funcs);
+ drm_connector_helper_add(connector, &oaktrail_hdmi_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
@@ -675,6 +685,9 @@ void mrst_hdmi_init(struct drm_device *dev,
drm_sysfs_connector_add(connector);
return;
+
+failed_connector:
+ kfree(psb_intel_encoder);
}
static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
@@ -682,18 +695,18 @@ static DEFINE_PCI_DEVICE_TABLE(hdmi_ids) = {
{}
};
-void mrst_hdmi_setup(struct drm_device *dev)
+void oaktrail_hdmi_setup(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
struct pci_dev *pdev;
- struct mrst_hdmi_dev *hdmi_dev;
+ struct oaktrail_hdmi_dev *hdmi_dev;
int ret;
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x080d, NULL);
if (!pdev)
return;
- hdmi_dev = kzalloc(sizeof(struct mrst_hdmi_dev), GFP_KERNEL);
+ hdmi_dev = kzalloc(sizeof(struct oaktrail_hdmi_dev), GFP_KERNEL);
if (!hdmi_dev) {
dev_err(dev->dev, "failed to allocate memory\n");
goto out;
@@ -718,12 +731,12 @@ void mrst_hdmi_setup(struct drm_device *dev)
pci_set_drvdata(pdev, hdmi_dev);
/* Initialize i2c controller */
- ret = mrst_hdmi_i2c_init(hdmi_dev->dev);
+ ret = oaktrail_hdmi_i2c_init(hdmi_dev->dev);
if (ret)
dev_err(dev->dev, "HDMI I2C initialization failed\n");
dev_priv->hdmi_priv = hdmi_dev;
- mrst_hdmi_audio_disable(dev);
+ oaktrail_hdmi_audio_disable(dev);
return;
free:
@@ -732,16 +745,16 @@ out:
return;
}
-void mrst_hdmi_teardown(struct drm_device *dev)
+void oaktrail_hdmi_teardown(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
struct pci_dev *pdev;
if (hdmi_dev) {
pdev = hdmi_dev->dev;
pci_set_drvdata(pdev, NULL);
- mrst_hdmi_i2c_exit(pdev);
+ oaktrail_hdmi_i2c_exit(pdev);
iounmap(hdmi_dev->regs);
kfree(hdmi_dev);
pci_dev_put(pdev);
@@ -749,10 +762,10 @@ void mrst_hdmi_teardown(struct drm_device *dev)
}
/* save HDMI register state */
-void mrst_hdmi_save(struct drm_device *dev)
+void oaktrail_hdmi_save(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int i;
/* dpll */
@@ -800,10 +813,10 @@ void mrst_hdmi_save(struct drm_device *dev)
}
/* restore HDMI register state */
-void mrst_hdmi_restore(struct drm_device *dev)
+void oaktrail_hdmi_restore(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev_priv->hdmi_priv;
int i;
/* dpll */
diff --git a/drivers/staging/gma500/mrst_hdmi_i2c.c b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
index 36e7edc..7054408 100644
--- a/drivers/staging/gma500/mrst_hdmi_i2c.c
+++ b/drivers/gpu/drm/gma500/oaktrail_hdmi_i2c.c
@@ -24,12 +24,12 @@
* Li Peng <peng.li@intel.com>
*/
+#include <linux/export.h>
#include <linux/mutex.h>
#include <linux/pci.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include "psb_drv.h"
#define HDMI_READ(reg) readl(hdmi_dev->regs + (reg))
@@ -74,7 +74,7 @@ struct hdmi_i2c_dev {
int buf_offset;
};
-static void hdmi_i2c_irq_enable(struct mrst_hdmi_dev *hdmi_dev)
+static void hdmi_i2c_irq_enable(struct oaktrail_hdmi_dev *hdmi_dev)
{
u32 temp;
@@ -84,7 +84,7 @@ static void hdmi_i2c_irq_enable(struct mrst_hdmi_dev *hdmi_dev)
HDMI_READ(HDMI_HICR);
}
-static void hdmi_i2c_irq_disable(struct mrst_hdmi_dev *hdmi_dev)
+static void hdmi_i2c_irq_disable(struct oaktrail_hdmi_dev *hdmi_dev)
{
HDMI_WRITE(HDMI_HICR, 0x0);
HDMI_READ(HDMI_HICR);
@@ -92,7 +92,7 @@ static void hdmi_i2c_irq_disable(struct mrst_hdmi_dev *hdmi_dev)
static int xfer_read(struct i2c_adapter *adap, struct i2c_msg *pmsg)
{
- struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
u32 temp;
@@ -121,11 +121,11 @@ static int xfer_write(struct i2c_adapter *adap, struct i2c_msg *pmsg)
return 0;
}
-static int mrst_hdmi_i2c_access(struct i2c_adapter *adap,
+static int oaktrail_hdmi_i2c_access(struct i2c_adapter *adap,
struct i2c_msg *pmsg,
int num)
{
- struct mrst_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
+ struct oaktrail_hdmi_dev *hdmi_dev = i2c_get_adapdata(adap);
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
int i, err = 0;
@@ -154,25 +154,25 @@ static int mrst_hdmi_i2c_access(struct i2c_adapter *adap,
return i;
}
-static u32 mrst_hdmi_i2c_func(struct i2c_adapter *adapter)
+static u32 oaktrail_hdmi_i2c_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR;
}
-static const struct i2c_algorithm mrst_hdmi_i2c_algorithm = {
- .master_xfer = mrst_hdmi_i2c_access,
- .functionality = mrst_hdmi_i2c_func,
+static const struct i2c_algorithm oaktrail_hdmi_i2c_algorithm = {
+ .master_xfer = oaktrail_hdmi_i2c_access,
+ .functionality = oaktrail_hdmi_i2c_func,
};
-static struct i2c_adapter mrst_hdmi_i2c_adapter = {
- .name = "mrst_hdmi_i2c",
+static struct i2c_adapter oaktrail_hdmi_i2c_adapter = {
+ .name = "oaktrail_hdmi_i2c",
.nr = 3,
.owner = THIS_MODULE,
.class = I2C_CLASS_DDC,
- .algo = &mrst_hdmi_i2c_algorithm,
+ .algo = &oaktrail_hdmi_i2c_algorithm,
};
-static void hdmi_i2c_read(struct mrst_hdmi_dev *hdmi_dev)
+static void hdmi_i2c_read(struct oaktrail_hdmi_dev *hdmi_dev)
{
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
struct i2c_msg *msg = i2c_dev->msg;
@@ -201,7 +201,7 @@ static void hdmi_i2c_read(struct mrst_hdmi_dev *hdmi_dev)
return;
}
-static void hdmi_i2c_transaction_done(struct mrst_hdmi_dev *hdmi_dev)
+static void hdmi_i2c_transaction_done(struct oaktrail_hdmi_dev *hdmi_dev)
{
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
u32 temp;
@@ -220,9 +220,9 @@ static void hdmi_i2c_transaction_done(struct mrst_hdmi_dev *hdmi_dev)
return;
}
-static irqreturn_t mrst_hdmi_i2c_handler(int this_irq, void *dev)
+static irqreturn_t oaktrail_hdmi_i2c_handler(int this_irq, void *dev)
{
- struct mrst_hdmi_dev *hdmi_dev = dev;
+ struct oaktrail_hdmi_dev *hdmi_dev = dev;
struct hdmi_i2c_dev *i2c_dev = hdmi_dev->i2c_dev;
u32 stat;
@@ -248,7 +248,7 @@ static irqreturn_t mrst_hdmi_i2c_handler(int this_irq, void *dev)
* choose alternate function 2 of GPIO pin 52, 53,
* which is used by HDMI I2C logic
*/
-static void mrst_hdmi_i2c_gpio_fix(void)
+static void oaktrail_hdmi_i2c_gpio_fix(void)
{
void *base;
unsigned int gpio_base = 0xff12c000;
@@ -270,9 +270,9 @@ static void mrst_hdmi_i2c_gpio_fix(void)
iounmap(base);
}
-int mrst_hdmi_i2c_init(struct pci_dev *dev)
+int oaktrail_hdmi_i2c_init(struct pci_dev *dev)
{
- struct mrst_hdmi_dev *hdmi_dev;
+ struct oaktrail_hdmi_dev *hdmi_dev;
struct hdmi_i2c_dev *i2c_dev;
int ret;
@@ -285,26 +285,26 @@ int mrst_hdmi_i2c_init(struct pci_dev *dev)
goto exit;
}
- i2c_dev->adap = &mrst_hdmi_i2c_adapter;
+ i2c_dev->adap = &oaktrail_hdmi_i2c_adapter;
i2c_dev->status = I2C_STAT_INIT;
init_completion(&i2c_dev->complete);
mutex_init(&i2c_dev->i2c_lock);
- i2c_set_adapdata(&mrst_hdmi_i2c_adapter, hdmi_dev);
+ i2c_set_adapdata(&oaktrail_hdmi_i2c_adapter, hdmi_dev);
hdmi_dev->i2c_dev = i2c_dev;
/* Enable HDMI I2C function on gpio */
- mrst_hdmi_i2c_gpio_fix();
+ oaktrail_hdmi_i2c_gpio_fix();
/* request irq */
- ret = request_irq(dev->irq, mrst_hdmi_i2c_handler, IRQF_SHARED,
- mrst_hdmi_i2c_adapter.name, hdmi_dev);
+ ret = request_irq(dev->irq, oaktrail_hdmi_i2c_handler, IRQF_SHARED,
+ oaktrail_hdmi_i2c_adapter.name, hdmi_dev);
if (ret) {
DRM_ERROR("Failed to request IRQ for I2C controller\n");
goto err;
}
/* Adapter registration */
- ret = i2c_add_numbered_adapter(&mrst_hdmi_i2c_adapter);
+ ret = i2c_add_numbered_adapter(&oaktrail_hdmi_i2c_adapter);
return ret;
err:
@@ -313,13 +313,13 @@ exit:
return ret;
}
-void mrst_hdmi_i2c_exit(struct pci_dev *dev)
+void oaktrail_hdmi_i2c_exit(struct pci_dev *dev)
{
- struct mrst_hdmi_dev *hdmi_dev;
+ struct oaktrail_hdmi_dev *hdmi_dev;
struct hdmi_i2c_dev *i2c_dev;
hdmi_dev = pci_get_drvdata(dev);
- if (i2c_del_adapter(&mrst_hdmi_i2c_adapter))
+ if (i2c_del_adapter(&oaktrail_hdmi_i2c_adapter))
DRM_DEBUG_DRIVER("Failed to delete hdmi-i2c adapter\n");
i2c_dev = hdmi_dev->i2c_dev;
diff --git a/drivers/staging/gma500/mrst_lvds.c b/drivers/gpu/drm/gma500/oaktrail_lvds.c
index e7999a2..238bbe1 100644
--- a/drivers/staging/gma500/mrst_lvds.c
+++ b/drivers/gpu/drm/gma500/oaktrail_lvds.c
@@ -42,8 +42,9 @@
/**
* Sets the power state for the panel.
*/
-static void mrst_lvds_set_power(struct drm_device *dev,
- struct psb_intel_output *output, bool on)
+static void oaktrail_lvds_set_power(struct drm_device *dev,
+ struct psb_intel_encoder *psb_intel_encoder,
+ bool on)
{
u32 pp_status;
struct drm_psb_private *dev_priv = dev->dev_private;
@@ -74,27 +75,30 @@ static void mrst_lvds_set_power(struct drm_device *dev,
gma_power_end(dev);
}
-static void mrst_lvds_dpms(struct drm_encoder *encoder, int mode)
+static void oaktrail_lvds_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
if (mode == DRM_MODE_DPMS_ON)
- mrst_lvds_set_power(dev, output, true);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
else
- mrst_lvds_set_power(dev, output, false);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
/* XXX: We never power down the LVDS pairs. */
}
-static void mrst_lvds_mode_set(struct drm_encoder *encoder,
+static void oaktrail_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct psb_intel_mode_device *mode_dev =
- enc_to_psb_intel_output(encoder)->mode_dev;
struct drm_device *dev = encoder->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct drm_mode_config *mode_config = &dev->mode_config;
+ struct drm_connector *connector = NULL;
+ struct drm_crtc *crtc = encoder->crtc;
u32 lvds_port;
uint64_t v = DRM_MODE_SCALE_FULLSCREEN;
@@ -118,8 +122,19 @@ static void mrst_lvds_mode_set(struct drm_encoder *encoder,
REG_WRITE(LVDS, lvds_port);
+ /* Find the connector we're trying to set up */
+ list_for_each_entry(connector, &mode_config->connector_list, head) {
+ if (!connector->encoder || connector->encoder->crtc != crtc)
+ continue;
+ }
+
+ if (!connector) {
+ DRM_ERROR("Couldn't find connector when setting mode");
+ return;
+ }
+
drm_connector_property_get_value(
- &enc_to_psb_intel_output(encoder)->base,
+ connector,
dev->mode_config.scaling_mode_property,
&v);
@@ -147,11 +162,13 @@ static void mrst_lvds_mode_set(struct drm_encoder *encoder,
gma_power_end(dev);
}
-static void mrst_lvds_prepare(struct drm_encoder *encoder)
+static void oaktrail_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct psb_intel_mode_device *mode_dev = output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
return;
@@ -159,11 +176,11 @@ static void mrst_lvds_prepare(struct drm_encoder *encoder)
mode_dev->saveBLC_PWM_CTL = REG_READ(BLC_PWM_CTL);
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
- mrst_lvds_set_power(dev, output, false);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, false);
gma_power_end(dev);
}
-static u32 mrst_lvds_get_max_backlight(struct drm_device *dev)
+static u32 oaktrail_lvds_get_max_backlight(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
u32 ret;
@@ -182,24 +199,26 @@ static u32 mrst_lvds_get_max_backlight(struct drm_device *dev)
return ret;
}
-static void mrst_lvds_commit(struct drm_encoder *encoder)
+static void oaktrail_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct psb_intel_mode_device *mode_dev = output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
- mrst_lvds_get_max_backlight(dev);
- mrst_lvds_set_power(dev, output, true);
+ oaktrail_lvds_get_max_backlight(dev);
+ oaktrail_lvds_set_power(dev, psb_intel_encoder, true);
}
-static const struct drm_encoder_helper_funcs mrst_lvds_helper_funcs = {
- .dpms = mrst_lvds_dpms,
+static const struct drm_encoder_helper_funcs oaktrail_lvds_helper_funcs = {
+ .dpms = oaktrail_lvds_dpms,
.mode_fixup = psb_intel_lvds_mode_fixup,
- .prepare = mrst_lvds_prepare,
- .mode_set = mrst_lvds_mode_set,
- .commit = mrst_lvds_commit,
+ .prepare = oaktrail_lvds_prepare,
+ .mode_set = oaktrail_lvds_mode_set,
+ .commit = oaktrail_lvds_commit,
};
static struct drm_display_mode lvds_configuration_modes[] = {
@@ -228,17 +247,20 @@ static struct drm_display_mode lvds_configuration_modes[] = {
/* Returns the panel fixed mode from configuration. */
-static struct drm_display_mode *
-mrst_lvds_get_configuration_mode(struct drm_device *dev)
+static void oaktrail_lvds_get_configuration_mode(struct drm_device *dev,
+ struct psb_intel_mode_device *mode_dev)
{
struct drm_display_mode *mode = NULL;
struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
+ struct oaktrail_timing_info *ti = &dev_priv->gct_data.DTD;
+ mode_dev->panel_fixed_mode = NULL;
+
+ /* Use the firmware provided data on Moorestown */
if (dev_priv->vbt_data.size != 0x00) { /*if non-zero, then use vbt*/
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode)
- return NULL;
+ return;
mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
@@ -270,55 +292,72 @@ mrst_lvds_get_configuration_mode(struct drm_device *dev)
printk(KERN_INFO "vtotal is %d\n", mode->vtotal);
printk(KERN_INFO "clock is %d\n", mode->clock);
#endif
- } else
- mode = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
+ mode_dev->panel_fixed_mode = mode;
+ }
- return mode;
+ /* Use the BIOS VBT mode if available */
+ if (mode_dev->panel_fixed_mode == NULL && mode_dev->vbt_mode)
+ mode_dev->panel_fixed_mode = drm_mode_duplicate(dev,
+ mode_dev->vbt_mode);
+
+ /* Then try the LVDS VBT mode */
+ if (mode_dev->panel_fixed_mode == NULL)
+ if (dev_priv->lfp_lvds_vbt_mode)
+ mode_dev->panel_fixed_mode =
+ drm_mode_duplicate(dev,
+ dev_priv->lfp_lvds_vbt_mode);
+ /* Then guess */
+ if (mode_dev->panel_fixed_mode == NULL)
+ mode_dev->panel_fixed_mode
+ = drm_mode_duplicate(dev, &lvds_configuration_modes[2]);
+
+ drm_mode_set_name(mode_dev->panel_fixed_mode);
+ drm_mode_set_crtcinfo(mode_dev->panel_fixed_mode, 0);
}
/**
- * mrst_lvds_init - setup LVDS connectors on this device
+ * oaktrail_lvds_init - setup LVDS connectors on this device
* @dev: drm device
*
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void mrst_lvds_init(struct drm_device *dev,
+void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_output *psb_intel_output;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_psb_private *dev_priv =
- (struct drm_psb_private *) dev->dev_private;
+ struct drm_psb_private *dev_priv = dev->dev_private;
struct edid *edid;
int ret = 0;
struct i2c_adapter *i2c_adap;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
- psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
- if (!psb_intel_output)
+ psb_intel_encoder = kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+ if (!psb_intel_encoder)
return;
- psb_intel_output->mode_dev = mode_dev;
- connector = &psb_intel_output->base;
- encoder = &psb_intel_output->enc;
+ psb_intel_connector = kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+ if (!psb_intel_connector)
+ goto failed_connector;
+
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
dev_priv->is_lvds_on = true;
- drm_connector_init(dev, &psb_intel_output->base,
+ drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_lvds_enc_funcs,
+ drm_encoder_init(dev, encoder, &psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&psb_intel_output->base,
- &psb_intel_output->enc);
- psb_intel_output->type = INTEL_OUTPUT_LVDS;
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
- drm_encoder_helper_add(encoder, &mrst_lvds_helper_funcs);
+ drm_encoder_helper_add(encoder, &oaktrail_lvds_helper_funcs);
drm_connector_helper_add(connector,
&psb_intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -336,6 +375,8 @@ void mrst_lvds_init(struct drm_device *dev,
if (dev_priv->vbt_data.size != 0x00)
mode_dev->panel_wants_dither = (dev_priv->gct_data.
Panel_Port_Control & MRST_PANEL_8TO6_DITHER_ENABLE);
+ if (dev_priv->lvds_dither)
+ mode_dev->panel_wants_dither = 1;
/*
* LVDS discovery:
@@ -348,7 +389,6 @@ void mrst_lvds_init(struct drm_device *dev,
*/
i2c_adap = i2c_get_adapter(dev_priv->ops->i2c_bus);
-
if (i2c_adap == NULL)
dev_err(dev->dev, "No ddc adapter available!\n");
/*
@@ -376,7 +416,7 @@ void mrst_lvds_init(struct drm_device *dev,
* If we didn't get EDID, try geting panel timing
* from configuration data
*/
- mode_dev->panel_fixed_mode = mrst_lvds_get_configuration_mode(dev);
+ oaktrail_lvds_get_configuration_mode(dev, mode_dev);
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
@@ -395,13 +435,15 @@ out:
failed_find:
dev_dbg(dev->dev, "No LVDS modes found, disabling.\n");
- if (psb_intel_output->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ if (psb_intel_encoder->ddc_bus)
+ psb_intel_i2c_destroy(psb_intel_encoder->ddc_bus);
/* failed_ddc: */
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
- kfree(connector);
+ kfree(psb_intel_connector);
+failed_connector:
+ kfree(psb_intel_encoder);
}
diff --git a/drivers/staging/gma500/power.c b/drivers/gpu/drm/gma500/power.c
index 436fe97..9402569 100644
--- a/drivers/staging/gma500/power.c
+++ b/drivers/gpu/drm/gma500/power.c
@@ -83,7 +83,7 @@ static void gma_suspend_display(struct drm_device *dev)
{
struct drm_psb_private *dev_priv = dev->dev_private;
- if (!dev_priv->display_power)
+ if (dev_priv->suspended)
return;
dev_priv->ops->save_regs(dev);
dev_priv->ops->power_down(dev);
@@ -101,7 +101,7 @@ static void gma_resume_display(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct drm_psb_private *dev_priv = dev->dev_private;
- if (dev_priv->display_power)
+ if (dev_priv->suspended == false)
return;
/* turn on the display power island */
@@ -265,8 +265,6 @@ bool gma_power_begin(struct drm_device *dev, bool force_on)
/* Ok power up needed */
ret = gma_resume_pci(dev->pdev);
if (ret == 0) {
- /* FIXME: we want to defer this for Medfield/Oaktrail */
- gma_resume_display(dev);
psb_irq_preinstall(dev);
psb_irq_postinstall(dev);
pm_runtime_get(&dev->pdev->dev);
diff --git a/drivers/staging/gma500/power.h b/drivers/gpu/drm/gma500/power.h
index 1969d2e..1969d2e 100644
--- a/drivers/staging/gma500/power.h
+++ b/drivers/gpu/drm/gma500/power.h
diff --git a/drivers/staging/gma500/psb_device.c b/drivers/gpu/drm/gma500/psb_device.c
index b97aa78..e5f5906 100644
--- a/drivers/staging/gma500/psb_device.c
+++ b/drivers/gpu/drm/gma500/psb_device.c
@@ -20,7 +20,7 @@
#include <linux/backlight.h>
#include <drm/drmP.h>
#include <drm/drm.h>
-#include "psb_drm.h"
+#include "gma_drm.h"
#include "psb_drv.h"
#include "psb_reg.h"
#include "psb_intel_reg.h"
@@ -290,11 +290,17 @@ static void psb_get_core_freq(struct drm_device *dev)
static int psb_chip_setup(struct drm_device *dev)
{
psb_get_core_freq(dev);
+ gma_intel_setup_gmbus(dev);
gma_intel_opregion_init(dev);
psb_intel_init_bios(dev);
return 0;
}
+static void psb_chip_teardown(struct drm_device *dev)
+{
+ gma_intel_teardown_gmbus(dev);
+}
+
const struct psb_ops psb_chip_ops = {
.name = "Poulsbo",
.accel_2d = 1,
@@ -302,6 +308,7 @@ const struct psb_ops psb_chip_ops = {
.crtcs = 2,
.sgx_offset = PSB_SGX_OFFSET,
.chip_setup = psb_chip_setup,
+ .chip_teardown = psb_chip_teardown,
.crtc_helper = &psb_intel_helper_funcs,
.crtc_funcs = &psb_intel_crtc_funcs,
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
new file mode 100644
index 0000000..f14768f
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -0,0 +1,703 @@
+/**************************************************************************
+ * Copyright (c) 2007-2011, Intel Corporation.
+ * All Rights Reserved.
+ * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ **************************************************************************/
+
+#include <drm/drmP.h>
+#include <drm/drm.h>
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "framebuffer.h"
+#include "psb_reg.h"
+#include "psb_intel_reg.h"
+#include "intel_bios.h"
+#include "mid_bios.h"
+#include <drm/drm_pciids.h>
+#include "power.h"
+#include <linux/cpu.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/pm_runtime.h>
+#include <acpi/video.h>
+#include <linux/module.h>
+
+static int drm_psb_trap_pagefaults;
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+
+MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
+module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
+
+
+static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
+ { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+ { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
+#if defined(CONFIG_DRM_GMA600)
+ { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+ /* Atom E620 */
+ { 0x8086, 0x4108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &oaktrail_chip_ops},
+#endif
+#if defined(CONFIG_DRM_GMA3600)
+ { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+ { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
+#endif
+ { 0, 0, 0}
+};
+MODULE_DEVICE_TABLE(pci, pciidlist);
+
+/*
+ * Standard IOCTLs.
+ */
+
+#define DRM_IOCTL_PSB_ADB \
+ DRM_IOWR(DRM_GMA_ADB + DRM_COMMAND_BASE, uint32_t)
+#define DRM_IOCTL_PSB_MODE_OPERATION \
+ DRM_IOWR(DRM_GMA_MODE_OPERATION + DRM_COMMAND_BASE, \
+ struct drm_psb_mode_operation_arg)
+#define DRM_IOCTL_PSB_STOLEN_MEMORY \
+ DRM_IOWR(DRM_GMA_STOLEN_MEMORY + DRM_COMMAND_BASE, \
+ struct drm_psb_stolen_memory_arg)
+#define DRM_IOCTL_PSB_GAMMA \
+ DRM_IOWR(DRM_GMA_GAMMA + DRM_COMMAND_BASE, \
+ struct drm_psb_dpst_lut_arg)
+#define DRM_IOCTL_PSB_DPST_BL \
+ DRM_IOWR(DRM_GMA_DPST_BL + DRM_COMMAND_BASE, \
+ uint32_t)
+#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
+ DRM_IOWR(DRM_GMA_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
+ struct drm_psb_get_pipe_from_crtc_id_arg)
+#define DRM_IOCTL_PSB_GEM_CREATE \
+ DRM_IOWR(DRM_GMA_GEM_CREATE + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_create)
+#define DRM_IOCTL_PSB_GEM_MMAP \
+ DRM_IOWR(DRM_GMA_GEM_MMAP + DRM_COMMAND_BASE, \
+ struct drm_psb_gem_mmap)
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#define PSB_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
+
+static struct drm_ioctl_desc psb_ioctls[] = {
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
+ DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
+ DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
+ psb_intel_get_pipe_from_crtc_id, 0),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+ PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
+ DRM_UNLOCKED | DRM_AUTH),
+};
+
+static void psb_lastclose(struct drm_device *dev)
+{
+ return;
+}
+
+static void psb_do_takedown(struct drm_device *dev)
+{
+}
+
+static int psb_do_init(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ uint32_t stolen_gtt;
+
+ int ret = -ENOMEM;
+
+ if (pg->mmu_gatt_start & 0x0FFFFFFF) {
+ dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
+ ret = -EINVAL;
+ goto out_err;
+ }
+
+
+ stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
+ stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ stolen_gtt =
+ (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
+
+ dev_priv->gatt_free_offset = pg->mmu_gatt_start +
+ (stolen_gtt << PAGE_SHIFT) * 1024;
+
+ if (1 || drm_debug) {
+ uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
+ uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
+ DRM_INFO("SGX core id = 0x%08x\n", core_id);
+ DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
+ (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
+ _PSB_CC_REVISION_MAJOR_SHIFT,
+ (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
+ _PSB_CC_REVISION_MINOR_SHIFT);
+ DRM_INFO
+ ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
+ (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
+ _PSB_CC_REVISION_MAINTENANCE_SHIFT,
+ (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
+ _PSB_CC_REVISION_DESIGNER_SHIFT);
+ }
+
+
+ spin_lock_init(&dev_priv->irqmask_lock);
+ spin_lock_init(&dev_priv->lock_2d);
+
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
+ PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
+ PSB_RSGX32(PSB_CR_BIF_BANK1);
+ PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
+ PSB_CR_BIF_CTRL);
+ psb_spank(dev_priv);
+
+ /* mmu_gatt ?? */
+ PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
+ return 0;
+out_err:
+ psb_do_takedown(dev);
+ return ret;
+}
+
+static int psb_driver_unload(struct drm_device *dev)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ /* Kill vblank etc here */
+
+ gma_backlight_exit(dev);
+
+ psb_modeset_cleanup(dev);
+
+ if (dev_priv) {
+ psb_lid_timer_takedown(dev_priv);
+ gma_intel_opregion_exit(dev);
+
+ if (dev_priv->ops->chip_teardown)
+ dev_priv->ops->chip_teardown(dev);
+ psb_do_takedown(dev);
+
+
+ if (dev_priv->pf_pd) {
+ psb_mmu_free_pagedir(dev_priv->pf_pd);
+ dev_priv->pf_pd = NULL;
+ }
+ if (dev_priv->mmu) {
+ struct psb_gtt *pg = &dev_priv->gtt;
+
+ down_read(&pg->sem);
+ psb_mmu_remove_pfn_sequence(
+ psb_mmu_get_default_pd
+ (dev_priv->mmu),
+ pg->mmu_gatt_start,
+ dev_priv->vram_stolen_size >> PAGE_SHIFT);
+ up_read(&pg->sem);
+ psb_mmu_driver_takedown(dev_priv->mmu);
+ dev_priv->mmu = NULL;
+ }
+ psb_gtt_takedown(dev);
+ if (dev_priv->scratch_page) {
+ __free_page(dev_priv->scratch_page);
+ dev_priv->scratch_page = NULL;
+ }
+ if (dev_priv->vdc_reg) {
+ iounmap(dev_priv->vdc_reg);
+ dev_priv->vdc_reg = NULL;
+ }
+ if (dev_priv->sgx_reg) {
+ iounmap(dev_priv->sgx_reg);
+ dev_priv->sgx_reg = NULL;
+ }
+
+ kfree(dev_priv);
+ dev->dev_private = NULL;
+
+ /*destroy VBT data*/
+ psb_intel_destroy_bios(dev);
+ }
+
+ gma_power_uninit(dev);
+
+ return 0;
+}
+
+
+static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
+{
+ struct drm_psb_private *dev_priv;
+ unsigned long resource_start;
+ struct psb_gtt *pg;
+ unsigned long irqflags;
+ int ret = -ENOMEM;
+ uint32_t tt_pages;
+ struct drm_connector *connector;
+ struct psb_intel_encoder *psb_intel_encoder;
+
+ dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+
+ dev_priv->ops = (struct psb_ops *)chipset;
+ dev_priv->dev = dev;
+ dev->dev_private = (void *) dev_priv;
+
+ if (!IS_PSB(dev)) {
+ if (pci_enable_msi(dev->pdev))
+ dev_warn(dev->dev, "Enabling MSI failed!\n");
+ }
+
+ dev_priv->num_pipe = dev_priv->ops->pipes;
+
+ resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
+
+ dev_priv->vdc_reg =
+ ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
+ if (!dev_priv->vdc_reg)
+ goto out_err;
+
+ dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
+ PSB_SGX_SIZE);
+ if (!dev_priv->sgx_reg)
+ goto out_err;
+
+ ret = dev_priv->ops->chip_setup(dev);
+ if (ret)
+ goto out_err;
+
+ /* Init OSPM support */
+ gma_power_init(dev);
+
+ ret = -ENOMEM;
+
+ dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
+ if (!dev_priv->scratch_page)
+ goto out_err;
+
+ set_pages_uc(dev_priv->scratch_page, 1);
+
+ ret = psb_gtt_init(dev, 0);
+ if (ret)
+ goto out_err;
+
+ dev_priv->mmu = psb_mmu_driver_init((void *)0,
+ drm_psb_trap_pagefaults, 0,
+ dev_priv);
+ if (!dev_priv->mmu)
+ goto out_err;
+
+ pg = &dev_priv->gtt;
+
+ tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
+ (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
+
+
+ dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
+ if (!dev_priv->pf_pd)
+ goto out_err;
+
+ psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
+ psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
+
+ ret = psb_do_init(dev);
+ if (ret)
+ return ret;
+
+ PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
+ PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
+
+/* igd_opregion_init(&dev_priv->opregion_dev); */
+ acpi_video_register();
+ if (dev_priv->lid_state)
+ psb_lid_timer_init(dev_priv);
+
+ ret = drm_vblank_init(dev, dev_priv->num_pipe);
+ if (ret)
+ goto out_err;
+
+ /*
+ * Install interrupt handlers prior to powering off SGX or else we will
+ * crash.
+ */
+ dev_priv->vdc_irq_mask = 0;
+ dev_priv->pipestat[0] = 0;
+ dev_priv->pipestat[1] = 0;
+ dev_priv->pipestat[2] = 0;
+ spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
+ PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
+ PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
+ PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
+ spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
+ if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
+ drm_irq_install(dev);
+
+ dev->vblank_disable_allowed = 1;
+
+ dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+ dev->driver->get_vblank_counter = psb_get_vblank_counter;
+
+ psb_modeset_init(dev);
+ psb_fbdev_init(dev);
+ drm_kms_helper_poll_init(dev);
+
+ /* Only add backlight support if we have LVDS output */
+ list_for_each_entry(connector, &dev->mode_config.connector_list,
+ head) {
+ psb_intel_encoder = psb_intel_attached_encoder(connector);
+
+ switch (psb_intel_encoder->type) {
+ case INTEL_OUTPUT_LVDS:
+ case INTEL_OUTPUT_MIPI:
+ ret = gma_backlight_init(dev);
+ break;
+ }
+ }
+
+ if (ret)
+ return ret;
+#if 0
+ /*enable runtime pm at last*/
+ pm_runtime_enable(&dev->pdev->dev);
+ pm_runtime_set_active(&dev->pdev->dev);
+#endif
+ /*Intel drm driver load is done, continue doing pvr load*/
+ return 0;
+out_err:
+ psb_driver_unload(dev);
+ return ret;
+}
+
+int psb_driver_device_is_agp(struct drm_device *dev)
+{
+ return 0;
+}
+
+static inline void get_brightness(struct backlight_device *bd)
+{
+#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
+ if (bd) {
+ bd->props.brightness = bd->ops->get_brightness(bd);
+ backlight_update_status(bd);
+ }
+#endif
+}
+
+static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ uint32_t *arg = data;
+
+ dev_priv->blc_adj2 = *arg;
+ get_brightness(dev_priv->backlight_device);
+ return 0;
+}
+
+static int psb_adb_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ uint32_t *arg = data;
+
+ dev_priv->blc_adj1 = *arg;
+ get_brightness(dev_priv->backlight_device);
+ return 0;
+}
+
+static int psb_gamma_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_dpst_lut_arg *lut_arg = data;
+ struct drm_mode_object *obj;
+ struct drm_crtc *crtc;
+ struct drm_connector *connector;
+ struct psb_intel_crtc *psb_intel_crtc;
+ int i = 0;
+ int32_t obj_id;
+
+ obj_id = lut_arg->output_id;
+ obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ dev_dbg(dev->dev, "Invalid Connector object.\n");
+ return -EINVAL;
+ }
+
+ connector = obj_to_connector(obj);
+ crtc = connector->encoder->crtc;
+ psb_intel_crtc = to_psb_intel_crtc(crtc);
+
+ for (i = 0; i < 256; i++)
+ psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
+
+ psb_intel_crtc_load_lut(crtc);
+
+ return 0;
+}
+
+static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ uint32_t obj_id;
+ uint16_t op;
+ struct drm_mode_modeinfo *umode;
+ struct drm_display_mode *mode = NULL;
+ struct drm_psb_mode_operation_arg *arg;
+ struct drm_mode_object *obj;
+ struct drm_connector *connector;
+ struct drm_connector_helper_funcs *connector_funcs;
+ int ret = 0;
+ int resp = MODE_OK;
+
+ arg = (struct drm_psb_mode_operation_arg *)data;
+ obj_id = arg->obj_id;
+ op = arg->operation;
+
+ switch (op) {
+ case PSB_MODE_OPERATION_MODE_VALID:
+ umode = &arg->mode;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, obj_id,
+ DRM_MODE_OBJECT_CONNECTOR);
+ if (!obj) {
+ ret = -EINVAL;
+ goto mode_op_out;
+ }
+
+ connector = obj_to_connector(obj);
+
+ mode = drm_mode_create(dev);
+ if (!mode) {
+ ret = -ENOMEM;
+ goto mode_op_out;
+ }
+
+ /* drm_crtc_convert_umode(mode, umode); */
+ {
+ mode->clock = umode->clock;
+ mode->hdisplay = umode->hdisplay;
+ mode->hsync_start = umode->hsync_start;
+ mode->hsync_end = umode->hsync_end;
+ mode->htotal = umode->htotal;
+ mode->hskew = umode->hskew;
+ mode->vdisplay = umode->vdisplay;
+ mode->vsync_start = umode->vsync_start;
+ mode->vsync_end = umode->vsync_end;
+ mode->vtotal = umode->vtotal;
+ mode->vscan = umode->vscan;
+ mode->vrefresh = umode->vrefresh;
+ mode->flags = umode->flags;
+ mode->type = umode->type;
+ strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
+ mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
+ }
+
+ connector_funcs = (struct drm_connector_helper_funcs *)
+ connector->helper_private;
+
+ if (connector_funcs->mode_valid) {
+ resp = connector_funcs->mode_valid(connector, mode);
+ arg->data = resp;
+ }
+
+ /*do some clean up work*/
+ if (mode)
+ drm_mode_destroy(dev, mode);
+mode_op_out:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+
+ default:
+ dev_dbg(dev->dev, "Unsupported psb mode operation\n");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_psb_private *dev_priv = psb_priv(dev);
+ struct drm_psb_stolen_memory_arg *arg = data;
+
+ arg->base = dev_priv->stolen_base;
+ arg->size = dev_priv->vram_stolen_size;
+
+ return 0;
+}
+
+static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
+{
+ return 0;
+}
+
+static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->minor->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ static unsigned int runtime_allowed;
+
+ if (runtime_allowed == 1 && dev_priv->is_lvds_on) {
+ runtime_allowed++;
+ pm_runtime_allow(&dev->pdev->dev);
+ dev_priv->rpm_enabled = 1;
+ }
+ return drm_ioctl(filp, cmd, arg);
+ /* FIXME: do we need to wrap the other side of this */
+}
+
+
+/* When a client dies:
+ * - Check for and clean up flipped page state
+ */
+void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
+{
+}
+
+static void psb_remove(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_put_dev(dev);
+}
+
+static const struct dev_pm_ops psb_pm_ops = {
+ .resume = gma_power_resume,
+ .suspend = gma_power_suspend,
+ .runtime_suspend = psb_runtime_suspend,
+ .runtime_resume = psb_runtime_resume,
+ .runtime_idle = psb_runtime_idle,
+};
+
+static struct vm_operations_struct psb_gem_vm_ops = {
+ .fault = psb_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations psb_gem_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = psb_unlocked_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+};
+
+static struct drm_driver driver = {
+ .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
+ DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
+ .load = psb_driver_load,
+ .unload = psb_driver_unload,
+
+ .ioctls = psb_ioctls,
+ .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
+ .device_is_agp = psb_driver_device_is_agp,
+ .irq_preinstall = psb_irq_preinstall,
+ .irq_postinstall = psb_irq_postinstall,
+ .irq_uninstall = psb_irq_uninstall,
+ .irq_handler = psb_irq_handler,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
+ .lastclose = psb_lastclose,
+ .open = psb_driver_open,
+ .preclose = psb_driver_preclose,
+ .postclose = psb_driver_close,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+
+ .gem_init_object = psb_gem_init_object,
+ .gem_free_object = psb_gem_free_object,
+ .gem_vm_ops = &psb_gem_vm_ops,
+ .dumb_create = psb_gem_dumb_create,
+ .dumb_map_offset = psb_gem_dumb_map_gtt,
+ .dumb_destroy = psb_gem_dumb_destroy,
+ .fops = &psb_gem_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = PSB_DRM_DRIVER_DATE,
+ .major = PSB_DRM_DRIVER_MAJOR,
+ .minor = PSB_DRM_DRIVER_MINOR,
+ .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
+};
+
+static struct pci_driver psb_pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = psb_probe,
+ .remove = psb_remove,
+ .driver.pm = &psb_pm_ops,
+};
+
+static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_pci_dev(pdev, ent, &driver);
+}
+
+static int __init psb_init(void)
+{
+ return drm_pci_init(&driver, &psb_pci_driver);
+}
+
+static void __exit psb_exit(void)
+{
+ drm_pci_exit(&driver, &psb_pci_driver);
+}
+
+late_initcall(psb_init);
+module_exit(psb_exit);
+
+MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 11d963a..eb1568a 100644
--- a/drivers/staging/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -25,13 +25,12 @@
#include <drm/drmP.h>
#include "drm_global.h"
#include "gem_glue.h"
-#include "psb_drm.h"
+#include "gma_drm.h"
#include "psb_reg.h"
#include "psb_intel_drv.h"
#include "gtt.h"
#include "power.h"
-#include "mrst.h"
-#include "medfield.h"
+#include "oaktrail.h"
/* Append new drm mode definition here, align with libdrm definition */
#define DRM_MODE_SCALE_NO_SCALE 2
@@ -261,8 +260,26 @@ struct psb_intel_opregion {
int enabled;
};
+struct sdvo_device_mapping {
+ u8 initialized;
+ u8 dvo_port;
+ u8 slave_addr;
+ u8 dvo_wiring;
+ u8 i2c_pin;
+ u8 i2c_speed;
+ u8 ddc_pin;
+};
+
+struct intel_gmbus {
+ struct i2c_adapter adapter;
+ struct i2c_adapter *force_bit;
+ u32 reg0;
+};
+
struct psb_ops;
+#define PSB_NUM_PIPE 3
+
struct drm_psb_private {
struct drm_device *dev;
const struct psb_ops *ops;
@@ -329,14 +346,24 @@ struct drm_psb_private {
* Sizes info
*/
- struct drm_psb_sizes_arg sizes;
-
u32 fuse_reg_value;
u32 video_device_fuse;
/* PCI revision ID for B0:D2:F0 */
uint8_t platform_rev_id;
+ /* gmbus */
+ struct intel_gmbus *gmbus;
+
+ /* Used by SDVO */
+ int crt_ddc_pin;
+ /* FIXME: The mappings should be parsed from bios but for now we can
+ pretend there are no mappings available */
+ struct sdvo_device_mapping sdvo_mappings[2];
+ u32 hotplug_supported_mask;
+ struct drm_property *broadcast_rgb_property;
+ struct drm_property *force_audio_property;
+
/*
* LVDS info
*/
@@ -347,7 +374,7 @@ struct drm_psb_private {
struct drm_display_mode *sdvo_lvds_vbt_mode;
struct bdb_lvds_backlight *lvds_bl; /* LVDS backlight info from VBT */
- struct psb_intel_i2c_chan *lvds_i2c_bus;
+ struct psb_intel_i2c_chan *lvds_i2c_bus; /* FIXME: Remove this? */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
@@ -367,8 +394,8 @@ struct drm_psb_private {
int rpm_enabled;
/* MID specific */
- struct mrst_vbt vbt_data;
- struct mrst_gct_data gct_data;
+ struct oaktrail_vbt vbt_data;
+ struct oaktrail_gct_data gct_data;
/* MIPI Panel type etc */
int panel_id;
@@ -380,7 +407,7 @@ struct drm_psb_private {
u32 dsr_fb_update; /* DSR FB update counter */
/* Moorestown HDMI state */
- struct mrst_hdmi_dev *hdmi_priv;
+ struct oaktrail_hdmi_dev *hdmi_priv;
/* Moorestown pipe config register value cache */
uint32_t pipeconf;
@@ -605,24 +632,9 @@ struct drm_psb_private {
uint32_t blc_adj2;
void *fbdev;
- /* DPST state */
- uint32_t dsr_idle_count;
- bool is_in_idle;
- bool dsr_enable;
- void (*exit_idle)(struct drm_device *dev, u32 update_src);
/* 2D acceleration */
spinlock_t lock_2d;
-
- /* FIXME: Arrays anyone ? */
- struct mdfld_dsi_encoder *encoder0;
- struct mdfld_dsi_encoder *encoder2;
- struct mdfld_dsi_dbi_output * dbi_output;
- struct mdfld_dsi_dbi_output * dbi_output2;
- u32 bpp;
- u32 bpp2;
-
- bool dispstatus;
};
@@ -742,9 +754,6 @@ psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
extern u32 psb_get_vblank_counter(struct drm_device *dev, int crtc);
-extern int mdfld_enable_te(struct drm_device *dev, int pipe);
-extern void mdfld_disable_te(struct drm_device *dev, int pipe);
-
/*
* intel_opregion.c
*/
@@ -764,8 +773,6 @@ extern void psbfb_copyarea(struct fb_info *info,
const struct fb_copyarea *region);
extern int psbfb_sync(struct fb_info *info);
extern void psb_spank(struct drm_psb_private *dev_priv);
-extern int psb_accel_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file);
/*
* psb_reset.c
@@ -784,11 +791,11 @@ extern int psb_fbdev_init(struct drm_device *dev);
int gma_backlight_init(struct drm_device *dev);
void gma_backlight_exit(struct drm_device *dev);
-/* mrst_crtc.c */
-extern const struct drm_crtc_helper_funcs mrst_helper_funcs;
+/* oaktrail_crtc.c */
+extern const struct drm_crtc_helper_funcs oaktrail_helper_funcs;
-/* mrst_lvds.c */
-extern void mrst_lvds_init(struct drm_device *dev,
+/* oaktrail_lvds.c */
+extern void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
/* psb_intel_display.c */
@@ -820,11 +827,8 @@ extern int psb_gem_mmap_ioctl(struct drm_device *dev, void *data,
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
-/* mrst_device.c */
-extern const struct psb_ops mrst_chip_ops;
-
-/* mdfld_device.c */
-extern const struct psb_ops mdfld_chip_ops;
+/* oaktrail_device.c */
+extern const struct psb_ops oaktrail_chip_ops;
/* cdv_device.c */
extern const struct psb_ops cdv_chip_ops;
diff --git a/drivers/staging/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index caa9d86..49e9835 100644
--- a/drivers/staging/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -29,8 +29,6 @@
#include "psb_intel_display.h"
#include "power.h"
-#include "mdfld_output.h"
-
struct psb_intel_clock_t {
/* given values */
int n;
@@ -216,9 +214,9 @@ bool psb_intel_pipe_has_type(struct drm_crtc *crtc, int type)
list_for_each_entry(l_entry, &mode_config->connector_list, head) {
if (l_entry->encoder && l_entry->encoder->crtc == crtc) {
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(l_entry);
- if (psb_intel_output->type == type)
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(l_entry);
+ if (psb_intel_encoder->type == type)
return true;
}
}
@@ -367,9 +365,9 @@ int psb_intel_pipe_set_base(struct drm_crtc *crtc,
goto psb_intel_pipe_set_base_exit;
start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
+ offset = y * crtc->fb->pitches[0] + x * (crtc->fb->bits_per_pixel / 8);
- REG_WRITE(dspstride, crtc->fb->pitch);
+ REG_WRITE(dspstride, crtc->fb->pitches[0]);
dspcntr = REG_READ(dspcntr_reg);
dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
@@ -554,6 +552,14 @@ void psb_intel_encoder_commit(struct drm_encoder *encoder)
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
+void psb_intel_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct psb_intel_encoder *intel_encoder = to_psb_intel_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(intel_encoder);
+}
+
static bool psb_intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -617,14 +623,14 @@ static int psb_intel_crtc_mode_set(struct drm_crtc *crtc,
}
list_for_each_entry(connector, &mode_config->connector_list, head) {
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
if (!connector->encoder
|| connector->encoder->crtc != crtc)
continue;
- switch (psb_intel_output->type) {
+ switch (psb_intel_encoder->type) {
case INTEL_OUTPUT_LVDS:
is_lvds = true;
break;
@@ -1102,6 +1108,10 @@ static int psb_crtc_set_config(struct drm_mode_set *set)
{
int ret;
struct drm_device *dev = set->crtc->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+
+ if (!dev_priv->rpm_enabled)
+ return drm_crtc_helper_set_config(set);
pm_runtime_forbid(&dev->pdev->dev);
ret = drm_crtc_helper_set_config(set);
@@ -1400,9 +1410,9 @@ int psb_intel_connector_clones(struct drm_device *dev, int type_mask)
list_for_each_entry(connector, &dev->mode_config.connector_list,
head) {
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- if (type_mask & (1 << psb_intel_output->type))
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ if (type_mask & (1 << psb_intel_encoder->type))
index_mask |= (1 << entry);
entry++;
}
@@ -1421,9 +1431,16 @@ void psb_intel_modeset_cleanup(struct drm_device *dev)
*/
struct drm_encoder *psb_intel_best_encoder(struct drm_connector *connector)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
- return &psb_intel_output->enc;
+ return &psb_intel_encoder->base;
}
+void psb_intel_connector_attach_encoder(struct psb_intel_connector *connector,
+ struct psb_intel_encoder *encoder)
+{
+ connector->encoder = encoder;
+ drm_mode_connector_attach_encoder(&connector->base,
+ &encoder->base);
+}
diff --git a/drivers/staging/gma500/psb_intel_display.h b/drivers/gpu/drm/gma500/psb_intel_display.h
index 535b49a..535b49a 100644
--- a/drivers/staging/gma500/psb_intel_display.h
+++ b/drivers/gpu/drm/gma500/psb_intel_display.h
diff --git a/drivers/staging/gma500/psb_intel_drv.h b/drivers/gpu/drm/gma500/psb_intel_drv.h
index 36b554b..f40535e 100644
--- a/drivers/staging/gma500/psb_intel_drv.h
+++ b/drivers/gpu/drm/gma500/psb_intel_drv.h
@@ -39,6 +39,25 @@
#define INTEL_I2C_BUS_DVO 1
#define INTEL_I2C_BUS_SDVO 2
+/* Intel Pipe Clone Bit */
+#define INTEL_HDMIB_CLONE_BIT 1
+#define INTEL_HDMIC_CLONE_BIT 2
+#define INTEL_HDMID_CLONE_BIT 3
+#define INTEL_HDMIE_CLONE_BIT 4
+#define INTEL_HDMIF_CLONE_BIT 5
+#define INTEL_SDVO_NON_TV_CLONE_BIT 6
+#define INTEL_SDVO_TV_CLONE_BIT 7
+#define INTEL_SDVO_LVDS_CLONE_BIT 8
+#define INTEL_ANALOG_CLONE_BIT 9
+#define INTEL_TV_CLONE_BIT 10
+#define INTEL_DP_B_CLONE_BIT 11
+#define INTEL_DP_C_CLONE_BIT 12
+#define INTEL_DP_D_CLONE_BIT 13
+#define INTEL_LVDS_CLONE_BIT 14
+#define INTEL_DVO_TMDS_CLONE_BIT 15
+#define INTEL_DVO_LVDS_CLONE_BIT 16
+#define INTEL_EDP_CLONE_BIT 17
+
/* these are outputs from the chip - integrated only
* external chips are via DVO or SDVO output */
#define INTEL_OUTPUT_UNUSED 0
@@ -56,6 +75,25 @@
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+psb_intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+ int multiplier)
+{
+ mode->clock *= multiplier;
+ mode->private_flags |= multiplier;
+}
+
+static inline int
+psb_intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK)
+ >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
+
+
/*
* Hold information useally put on the device driver privates here,
* since it needs to be shared across multiple of devices drivers privates.
@@ -93,19 +131,24 @@ struct psb_intel_i2c_chan {
u8 slave_addr;
};
-struct psb_intel_output {
- struct drm_connector base;
-
- struct drm_encoder enc;
+struct psb_intel_encoder {
+ struct drm_encoder base;
int type;
+ bool needs_tv_clock;
+ void (*hot_plug)(struct psb_intel_encoder *);
+ int crtc_mask;
+ int clone_mask;
+ void *dev_priv; /* For sdvo_priv, lvds_priv, etc... */
+
+ /* FIXME: Either make SDVO and LVDS store it's i2c here or give CDV it's
+ own set of output privates */
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
+};
- struct psb_intel_i2c_chan *i2c_bus; /* for control functions */
- struct psb_intel_i2c_chan *ddc_bus; /* for DDC only stuff */
- bool load_detect_temp;
- void *dev_priv;
-
- struct psb_intel_mode_device *mode_dev;
- struct i2c_adapter *hdmi_i2c_adapter; /* for control functions */
+struct psb_intel_connector {
+ struct drm_connector base;
+ struct psb_intel_encoder *encoder;
};
struct psb_intel_crtc_state {
@@ -156,32 +199,33 @@ struct psb_intel_crtc {
#define to_psb_intel_crtc(x) \
container_of(x, struct psb_intel_crtc, base)
-#define to_psb_intel_output(x) \
- container_of(x, struct psb_intel_output, base)
-#define enc_to_psb_intel_output(x) \
- container_of(x, struct psb_intel_output, enc)
+#define to_psb_intel_connector(x) \
+ container_of(x, struct psb_intel_connector, base)
+#define to_psb_intel_encoder(x) \
+ container_of(x, struct psb_intel_encoder, base)
#define to_psb_intel_framebuffer(x) \
container_of(x, struct psb_intel_framebuffer, base)
struct psb_intel_i2c_chan *psb_intel_i2c_create(struct drm_device *dev,
const u32 reg, const char *name);
void psb_intel_i2c_destroy(struct psb_intel_i2c_chan *chan);
-int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output);
-extern bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output);
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter);
+extern bool psb_intel_ddc_probe(struct i2c_adapter *adapter);
extern void psb_intel_crtc_init(struct drm_device *dev, int pipe,
struct psb_intel_mode_device *mode_dev);
extern void psb_intel_crt_init(struct drm_device *dev);
-extern void psb_intel_sdvo_init(struct drm_device *dev, int output_device);
+extern bool psb_intel_sdvo_init(struct drm_device *dev, int output_device);
extern void psb_intel_dvo_init(struct drm_device *dev);
extern void psb_intel_tv_init(struct drm_device *dev);
extern void psb_intel_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
extern void psb_intel_lvds_set_brightness(struct drm_device *dev, int level);
-extern void mrst_lvds_init(struct drm_device *dev,
+extern void oaktrail_lvds_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
-extern void mrst_wait_for_INTR_PKT_SENT(struct drm_device *dev);
-extern void mrst_dsi_init(struct drm_device *dev,
+extern void oaktrail_wait_for_INTR_PKT_SENT(struct drm_device *dev);
+extern void oaktrail_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev);
extern void mid_dsi_init(struct drm_device *dev,
struct psb_intel_mode_device *mode_dev, int dsi_num);
@@ -189,6 +233,17 @@ extern void mid_dsi_init(struct drm_device *dev,
extern void psb_intel_crtc_load_lut(struct drm_crtc *crtc);
extern void psb_intel_encoder_prepare(struct drm_encoder *encoder);
extern void psb_intel_encoder_commit(struct drm_encoder *encoder);
+extern void psb_intel_encoder_destroy(struct drm_encoder *encoder);
+
+static inline struct psb_intel_encoder *psb_intel_attached_encoder(
+ struct drm_connector *connector)
+{
+ return to_psb_intel_connector(connector)->encoder;
+}
+
+extern void psb_intel_connector_attach_encoder(
+ struct psb_intel_connector *connector,
+ struct psb_intel_encoder *encoder);
extern struct drm_encoder *psb_intel_best_encoder(struct drm_connector
*connector);
@@ -224,7 +279,11 @@ extern int psb_intel_lvds_set_property(struct drm_connector *connector,
extern void psb_intel_lvds_destroy(struct drm_connector *connector);
extern const struct drm_encoder_funcs psb_intel_lvds_enc_funcs;
-extern void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe);
-extern void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe);
+/* intel_gmbus.c */
+extern void gma_intel_i2c_reset(struct drm_device *dev);
+extern int gma_intel_setup_gmbus(struct drm_device *dev);
+extern void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern void gma_intel_teardown_gmbus(struct drm_device *dev);
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/staging/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index 21022e1..a25e4ca 100644
--- a/drivers/staging/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -59,6 +59,9 @@ struct psb_intel_lvds_priv {
uint32_t savePFIT_CONTROL;
uint32_t savePFIT_PGM_RATIOS;
uint32_t saveBLC_PWM_CTL;
+
+ struct psb_intel_i2c_chan *i2c_bus;
+ struct psb_intel_i2c_chan *ddc_bus;
};
@@ -214,9 +217,10 @@ static void psb_intel_lvds_set_backlight(struct drm_device *dev, int level)
/*
* Sets the power state for the panel.
*/
-static void psb_intel_lvds_set_power(struct drm_device *dev,
- struct psb_intel_output *output, bool on)
+static void psb_intel_lvds_set_power(struct drm_device *dev, bool on)
{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
u32 pp_status;
if (!gma_power_begin(dev, true)) {
@@ -232,8 +236,7 @@ static void psb_intel_lvds_set_power(struct drm_device *dev,
} while ((pp_status & PP_ON) == 0);
psb_intel_lvds_set_backlight(dev,
- output->
- mode_dev->backlight_duty_cycle);
+ mode_dev->backlight_duty_cycle);
} else {
psb_intel_lvds_set_backlight(dev, 0);
@@ -250,12 +253,11 @@ static void psb_intel_lvds_set_power(struct drm_device *dev,
static void psb_intel_lvds_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
if (mode == DRM_MODE_DPMS_ON)
- psb_intel_lvds_set_power(dev, output, true);
+ psb_intel_lvds_set_power(dev, true);
else
- psb_intel_lvds_set_power(dev, output, false);
+ psb_intel_lvds_set_power(dev, false);
/* XXX: We never power down the LVDS pairs. */
}
@@ -265,10 +267,10 @@ static void psb_intel_lvds_save(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv =
(struct drm_psb_private *)dev->dev_private;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
- (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
+ (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
lvds_priv->savePP_ON = REG_READ(LVDSPP_ON);
lvds_priv->savePP_OFF = REG_READ(LVDSPP_OFF);
@@ -305,10 +307,10 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
u32 pp_status;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
struct psb_intel_lvds_priv *lvds_priv =
- (struct psb_intel_lvds_priv *)psb_intel_output->dev_priv;
+ (struct psb_intel_lvds_priv *)psb_intel_encoder->dev_priv;
dev_dbg(dev->dev, "(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
lvds_priv->savePP_ON,
@@ -346,13 +348,14 @@ static void psb_intel_lvds_restore(struct drm_connector *connector)
int psb_intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
struct drm_display_mode *fixed_mode =
- psb_intel_output->mode_dev->panel_fixed_mode;
+ dev_priv->mode_dev.panel_fixed_mode;
- if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
- fixed_mode = psb_intel_output->mode_dev->panel_fixed_mode2;
+ if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
+ fixed_mode = dev_priv->mode_dev.panel_fixed_mode2;
/* just in case */
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -375,17 +378,17 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct psb_intel_mode_device *mode_dev =
- enc_to_psb_intel_output(encoder)->mode_dev;
struct drm_device *dev = encoder->dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
struct psb_intel_crtc *psb_intel_crtc =
to_psb_intel_crtc(encoder->crtc);
struct drm_encoder *tmp_encoder;
struct drm_display_mode *panel_fixed_mode = mode_dev->panel_fixed_mode;
- struct psb_intel_output *psb_intel_output =
- enc_to_psb_intel_output(encoder);
+ struct psb_intel_encoder *psb_intel_encoder =
+ to_psb_intel_encoder(encoder);
- if (psb_intel_output->type == INTEL_OUTPUT_MIPI2)
+ if (psb_intel_encoder->type == INTEL_OUTPUT_MIPI2)
panel_fixed_mode = mode_dev->panel_fixed_mode2;
/* PSB requires the LVDS is on pipe B, MRST has only one pipe anyway */
@@ -440,8 +443,8 @@ bool psb_intel_lvds_mode_fixup(struct drm_encoder *encoder,
static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct psb_intel_mode_device *mode_dev = output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (!gma_power_begin(dev, true))
return;
@@ -450,7 +453,7 @@ static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
mode_dev->backlight_duty_cycle = (mode_dev->saveBLC_PWM_CTL &
BACKLIGHT_DUTY_CYCLE_MASK);
- psb_intel_lvds_set_power(dev, output, false);
+ psb_intel_lvds_set_power(dev, false);
gma_power_end(dev);
}
@@ -458,14 +461,14 @@ static void psb_intel_lvds_prepare(struct drm_encoder *encoder)
static void psb_intel_lvds_commit(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
- struct psb_intel_output *output = enc_to_psb_intel_output(encoder);
- struct psb_intel_mode_device *mode_dev = output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
if (mode_dev->backlight_duty_cycle == 0)
mode_dev->backlight_duty_cycle =
psb_intel_lvds_get_max_backlight(dev);
- psb_intel_lvds_set_power(dev, output, true);
+ psb_intel_lvds_set_power(dev, true);
}
static void psb_intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -520,14 +523,15 @@ static enum drm_connector_status psb_intel_lvds_detect(struct drm_connector
static int psb_intel_lvds_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- struct psb_intel_mode_device *mode_dev =
- psb_intel_output->mode_dev;
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_mode_device *mode_dev = &dev_priv->mode_dev;
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
int ret = 0;
if (!IS_MRST(dev))
- ret = psb_intel_ddc_get_modes(psb_intel_output);
+ ret = psb_intel_ddc_get_modes(connector, &lvds_priv->i2c_bus->adapter);
if (ret)
return ret;
@@ -560,11 +564,12 @@ static int psb_intel_lvds_get_modes(struct drm_connector *connector)
*/
void psb_intel_lvds_destroy(struct drm_connector *connector)
{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
+ struct psb_intel_encoder *psb_intel_encoder =
+ psb_intel_attached_encoder(connector);
+ struct psb_intel_lvds_priv *lvds_priv = psb_intel_encoder->dev_priv;
- if (psb_intel_output->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
@@ -693,9 +698,10 @@ const struct drm_encoder_funcs psb_intel_lvds_enc_funcs = {
* modes we can display on the LVDS panel (if present).
*/
void psb_intel_lvds_init(struct drm_device *dev,
- struct psb_intel_mode_device *mode_dev)
+ struct psb_intel_mode_device *mode_dev)
{
- struct psb_intel_output *psb_intel_output;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_connector *psb_intel_connector;
struct psb_intel_lvds_priv *lvds_priv;
struct drm_connector *connector;
struct drm_encoder *encoder;
@@ -705,33 +711,43 @@ void psb_intel_lvds_init(struct drm_device *dev,
u32 lvds;
int pipe;
- psb_intel_output = kzalloc(sizeof(struct psb_intel_output), GFP_KERNEL);
- if (!psb_intel_output)
+ psb_intel_encoder =
+ kzalloc(sizeof(struct psb_intel_encoder), GFP_KERNEL);
+
+ if (!psb_intel_encoder) {
+ dev_err(dev->dev, "psb_intel_encoder allocation error\n");
return;
+ }
+
+ psb_intel_connector =
+ kzalloc(sizeof(struct psb_intel_connector), GFP_KERNEL);
+
+ if (!psb_intel_connector) {
+ kfree(psb_intel_encoder);
+ dev_err(dev->dev, "psb_intel_connector allocation error\n");
+ }
lvds_priv = kzalloc(sizeof(struct psb_intel_lvds_priv), GFP_KERNEL);
if (!lvds_priv) {
- kfree(psb_intel_output);
dev_err(dev->dev, "LVDS private allocation error\n");
- return;
+ goto failed_connector;
}
- psb_intel_output->dev_priv = lvds_priv;
- psb_intel_output->mode_dev = mode_dev;
+ psb_intel_encoder->dev_priv = lvds_priv;
- connector = &psb_intel_output->base;
- encoder = &psb_intel_output->enc;
- drm_connector_init(dev, &psb_intel_output->base,
+ connector = &psb_intel_connector->base;
+ encoder = &psb_intel_encoder->base;
+ drm_connector_init(dev, connector,
&psb_intel_lvds_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
- drm_encoder_init(dev, &psb_intel_output->enc,
+ drm_encoder_init(dev, encoder,
&psb_intel_lvds_enc_funcs,
DRM_MODE_ENCODER_LVDS);
- drm_mode_connector_attach_encoder(&psb_intel_output->base,
- &psb_intel_output->enc);
- psb_intel_output->type = INTEL_OUTPUT_LVDS;
+ psb_intel_connector_attach_encoder(psb_intel_connector,
+ psb_intel_encoder);
+ psb_intel_encoder->type = INTEL_OUTPUT_LVDS;
drm_encoder_helper_add(encoder, &psb_intel_lvds_helper_funcs);
drm_connector_helper_add(connector,
@@ -752,16 +768,14 @@ void psb_intel_lvds_init(struct drm_device *dev,
* Set up I2C bus
* FIXME: distroy i2c_bus when exit
*/
- psb_intel_output->i2c_bus = psb_intel_i2c_create(dev,
- GPIOB,
- "LVDSBLC_B");
- if (!psb_intel_output->i2c_bus) {
+ lvds_priv->i2c_bus = psb_intel_i2c_create(dev, GPIOB, "LVDSBLC_B");
+ if (!lvds_priv->i2c_bus) {
dev_printk(KERN_ERR,
&dev->pdev->dev, "I2C bus registration failed.\n");
goto failed_blc_i2c;
}
- psb_intel_output->i2c_bus->slave_addr = 0x2C;
- dev_priv->lvds_i2c_bus = psb_intel_output->i2c_bus;
+ lvds_priv->i2c_bus->slave_addr = 0x2C;
+ dev_priv->lvds_i2c_bus = lvds_priv->i2c_bus;
/*
* LVDS discovery:
@@ -774,10 +788,8 @@ void psb_intel_lvds_init(struct drm_device *dev,
*/
/* Set up the DDC bus. */
- psb_intel_output->ddc_bus = psb_intel_i2c_create(dev,
- GPIOC,
- "LVDSDDC_C");
- if (!psb_intel_output->ddc_bus) {
+ lvds_priv->ddc_bus = psb_intel_i2c_create(dev, GPIOC, "LVDSDDC_C");
+ if (!lvds_priv->ddc_bus) {
dev_printk(KERN_ERR, &dev->pdev->dev,
"DDC bus registration " "failed.\n");
goto failed_ddc;
@@ -787,7 +799,7 @@ void psb_intel_lvds_init(struct drm_device *dev,
* Attempt to get the fixed panel mode from DDC. Assume that the
* preferred mode is the right one.
*/
- psb_intel_ddc_get_modes(psb_intel_output);
+ psb_intel_ddc_get_modes(connector, &lvds_priv->ddc_bus->adapter);
list_for_each_entry(scan, &connector->probed_modes, head) {
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
@@ -841,14 +853,16 @@ out:
return;
failed_find:
- if (psb_intel_output->ddc_bus)
- psb_intel_i2c_destroy(psb_intel_output->ddc_bus);
+ if (lvds_priv->ddc_bus)
+ psb_intel_i2c_destroy(lvds_priv->ddc_bus);
failed_ddc:
- if (psb_intel_output->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
+ if (lvds_priv->i2c_bus)
+ psb_intel_i2c_destroy(lvds_priv->i2c_bus);
failed_blc_i2c:
drm_encoder_cleanup(encoder);
drm_connector_cleanup(connector);
- kfree(connector);
+failed_connector:
+ if (psb_intel_connector)
+ kfree(psb_intel_connector);
}
diff --git a/drivers/staging/gma500/psb_intel_modes.c b/drivers/gpu/drm/gma500/psb_intel_modes.c
index bde1aff..4fca0d6 100644
--- a/drivers/staging/gma500/psb_intel_modes.c
+++ b/drivers/gpu/drm/gma500/psb_intel_modes.c
@@ -26,7 +26,7 @@
* psb_intel_ddc_probe
*
*/
-bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
+bool psb_intel_ddc_probe(struct i2c_adapter *adapter)
{
u8 out_buf[] = { 0x0, 0x0 };
u8 buf[2];
@@ -46,7 +46,7 @@ bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
}
};
- ret = i2c_transfer(&psb_intel_output->ddc_bus->adapter, msgs, 2);
+ ret = i2c_transfer(adapter, msgs, 2);
if (ret == 2)
return true;
@@ -59,18 +59,16 @@ bool psb_intel_ddc_probe(struct psb_intel_output *psb_intel_output)
*
* Fetch the EDID information from @connector using the DDC bus.
*/
-int psb_intel_ddc_get_modes(struct psb_intel_output *psb_intel_output)
+int psb_intel_ddc_get_modes(struct drm_connector *connector,
+ struct i2c_adapter *adapter)
{
struct edid *edid;
int ret = 0;
- edid =
- drm_get_edid(&psb_intel_output->base,
- &psb_intel_output->ddc_bus->adapter);
+ edid = drm_get_edid(connector, adapter);
if (edid) {
- drm_mode_connector_update_edid_property(&psb_intel_output->
- base, edid);
- ret = drm_add_edid_modes(&psb_intel_output->base, edid);
+ drm_mode_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
kfree(edid);
}
return ret;
diff --git a/drivers/staging/gma500/psb_intel_reg.h b/drivers/gpu/drm/gma500/psb_intel_reg.h
index 1ac16aa..fcc0af0 100644
--- a/drivers/staging/gma500/psb_intel_reg.h
+++ b/drivers/gpu/drm/gma500/psb_intel_reg.h
@@ -17,6 +17,78 @@
#ifndef __PSB_INTEL_REG_H__
#define __PSB_INTEL_REG_H__
+/*
+ * GPIO regs
+ */
+#define GPIOA 0x5010
+#define GPIOB 0x5014
+#define GPIOC 0x5018
+#define GPIOD 0x501c
+#define GPIOE 0x5020
+#define GPIOF 0x5024
+#define GPIOG 0x5028
+#define GPIOH 0x502c
+# define GPIO_CLOCK_DIR_MASK (1 << 0)
+# define GPIO_CLOCK_DIR_IN (0 << 1)
+# define GPIO_CLOCK_DIR_OUT (1 << 1)
+# define GPIO_CLOCK_VAL_MASK (1 << 2)
+# define GPIO_CLOCK_VAL_OUT (1 << 3)
+# define GPIO_CLOCK_VAL_IN (1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE (1 << 5)
+# define GPIO_DATA_DIR_MASK (1 << 8)
+# define GPIO_DATA_DIR_IN (0 << 9)
+# define GPIO_DATA_DIR_OUT (1 << 9)
+# define GPIO_DATA_VAL_MASK (1 << 10)
+# define GPIO_DATA_VAL_OUT (1 << 11)
+# define GPIO_DATA_VAL_IN (1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE (1 << 13)
+
+#define GMBUS0 0x5100 /* clock/port select */
+#define GMBUS_RATE_100KHZ (0<<8)
+#define GMBUS_RATE_50KHZ (1<<8)
+#define GMBUS_RATE_400KHZ (2<<8) /* reserved on Pineview */
+#define GMBUS_RATE_1MHZ (3<<8) /* reserved on Pineview */
+#define GMBUS_HOLD_EXT (1<<7) /* 300ns hold time, rsvd on Pineview */
+#define GMBUS_PORT_DISABLED 0
+#define GMBUS_PORT_SSC 1
+#define GMBUS_PORT_VGADDC 2
+#define GMBUS_PORT_PANEL 3
+#define GMBUS_PORT_DPC 4 /* HDMIC */
+#define GMBUS_PORT_DPB 5 /* SDVO, HDMIB */
+ /* 6 reserved */
+#define GMBUS_PORT_DPD 7 /* HDMID */
+#define GMBUS_NUM_PORTS 8
+#define GMBUS1 0x5104 /* command/status */
+#define GMBUS_SW_CLR_INT (1<<31)
+#define GMBUS_SW_RDY (1<<30)
+#define GMBUS_ENT (1<<29) /* enable timeout */
+#define GMBUS_CYCLE_NONE (0<<25)
+#define GMBUS_CYCLE_WAIT (1<<25)
+#define GMBUS_CYCLE_INDEX (2<<25)
+#define GMBUS_CYCLE_STOP (4<<25)
+#define GMBUS_BYTE_COUNT_SHIFT 16
+#define GMBUS_SLAVE_INDEX_SHIFT 8
+#define GMBUS_SLAVE_ADDR_SHIFT 1
+#define GMBUS_SLAVE_READ (1<<0)
+#define GMBUS_SLAVE_WRITE (0<<0)
+#define GMBUS2 0x5108 /* status */
+#define GMBUS_INUSE (1<<15)
+#define GMBUS_HW_WAIT_PHASE (1<<14)
+#define GMBUS_STALL_TIMEOUT (1<<13)
+#define GMBUS_INT (1<<12)
+#define GMBUS_HW_RDY (1<<11)
+#define GMBUS_SATOER (1<<10)
+#define GMBUS_ACTIVE (1<<9)
+#define GMBUS3 0x510c /* data buffer bytes 3-0 */
+#define GMBUS4 0x5110 /* interrupt mask (Pineview+) */
+#define GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define GMBUS_NAK_EN (1<<3)
+#define GMBUS_IDLE_EN (1<<2)
+#define GMBUS_HW_WAIT_EN (1<<1)
+#define GMBUS_HW_RDY_EN (1<<0)
+#define GMBUS5 0x5120 /* byte index */
+#define GMBUS_2BYTE_INDEX_EN (1<<31)
+
#define BLC_PWM_CTL 0x61254
#define BLC_PWM_CTL2 0x61250
#define BLC_PWM_CTL_C 0x62254
@@ -304,6 +376,8 @@
#define SDVO_PIPE_B_SELECT (1 << 30)
#define SDVO_STALL_SELECT (1 << 29)
#define SDVO_INTERRUPT_ENABLE (1 << 26)
+#define SDVO_COLOR_RANGE_16_235 (1 << 8)
+#define SDVO_AUDIO_ENABLE (1 << 6)
/**
* 915G/GM SDVO pixel multiplier.
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
new file mode 100644
index 0000000..88b4297
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c
@@ -0,0 +1,2623 @@
+/*
+ * Copyright 2006 Dave Airlie <airlied@linux.ie>
+ * Copyright © 2006-2007 Intel Corporation
+ * Jesse Barnes <jesse.barnes@intel.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include "drmP.h"
+#include "drm.h"
+#include "drm_crtc.h"
+#include "drm_edid.h"
+#include "psb_intel_drv.h"
+#include "gma_drm.h"
+#include "psb_drv.h"
+#include "psb_intel_sdvo_regs.h"
+#include "psb_intel_reg.h"
+
+#define SDVO_TMDS_MASK (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)
+#define SDVO_RGB_MASK (SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1)
+#define SDVO_LVDS_MASK (SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1)
+#define SDVO_TV_MASK (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
+
+#define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
+ SDVO_TV_MASK)
+
+#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
+#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
+#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
+#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+
+
+static const char *tv_format_names[] = {
+ "NTSC_M" , "NTSC_J" , "NTSC_443",
+ "PAL_B" , "PAL_D" , "PAL_G" ,
+ "PAL_H" , "PAL_I" , "PAL_M" ,
+ "PAL_N" , "PAL_NC" , "PAL_60" ,
+ "SECAM_B" , "SECAM_D" , "SECAM_G" ,
+ "SECAM_K" , "SECAM_K1", "SECAM_L" ,
+ "SECAM_60"
+};
+
+#define TV_FORMAT_NUM (sizeof(tv_format_names) / sizeof(*tv_format_names))
+
+struct psb_intel_sdvo {
+ struct psb_intel_encoder base;
+
+ struct i2c_adapter *i2c;
+ u8 slave_addr;
+
+ struct i2c_adapter ddc;
+
+ /* Register for the SDVO device: SDVOB or SDVOC */
+ int sdvo_reg;
+
+ /* Active outputs controlled by this SDVO output */
+ uint16_t controlled_output;
+
+ /*
+ * Capabilities of the SDVO device returned by
+ * i830_sdvo_get_capabilities()
+ */
+ struct psb_intel_sdvo_caps caps;
+
+ /* Pixel clock limitations reported by the SDVO device, in kHz */
+ int pixel_clock_min, pixel_clock_max;
+
+ /*
+ * For multiple function SDVO device,
+ * this is for current attached outputs.
+ */
+ uint16_t attached_output;
+
+ /**
+ * This is used to select the color range of RBG outputs in HDMI mode.
+ * It is only valid when using TMDS encoding and 8 bit per color mode.
+ */
+ uint32_t color_range;
+
+ /**
+ * This is set if we're going to treat the device as TV-out.
+ *
+ * While we have these nice friendly flags for output types that ought
+ * to decide this for us, the S-Video output on our HDMI+S-Video card
+ * shows up as RGB1 (VGA).
+ */
+ bool is_tv;
+
+ /* This is for current tv format name */
+ int tv_format_index;
+
+ /**
+ * This is set if we treat the device as HDMI, instead of DVI.
+ */
+ bool is_hdmi;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
+
+ /**
+ * This is set if we detect output of sdvo device as LVDS and
+ * have a valid fixed mode to use with the panel.
+ */
+ bool is_lvds;
+
+ /**
+ * This is sdvo fixed pannel mode pointer
+ */
+ struct drm_display_mode *sdvo_lvds_fixed_mode;
+
+ /* DDC bus used by this SDVO encoder */
+ uint8_t ddc_bus;
+
+ /* Input timings for adjusted_mode */
+ struct psb_intel_sdvo_dtd input_dtd;
+};
+
+struct psb_intel_sdvo_connector {
+ struct psb_intel_connector base;
+
+ /* Mark the type of connector */
+ uint16_t output_flag;
+
+ int force_audio;
+
+ /* This contains all current supported TV format */
+ u8 tv_format_supported[TV_FORMAT_NUM];
+ int format_supported_num;
+ struct drm_property *tv_format;
+
+ /* add the property for the SDVO-TV */
+ struct drm_property *left;
+ struct drm_property *right;
+ struct drm_property *top;
+ struct drm_property *bottom;
+ struct drm_property *hpos;
+ struct drm_property *vpos;
+ struct drm_property *contrast;
+ struct drm_property *saturation;
+ struct drm_property *hue;
+ struct drm_property *sharpness;
+ struct drm_property *flicker_filter;
+ struct drm_property *flicker_filter_adaptive;
+ struct drm_property *flicker_filter_2d;
+ struct drm_property *tv_chroma_filter;
+ struct drm_property *tv_luma_filter;
+ struct drm_property *dot_crawl;
+
+ /* add the property for the SDVO-TV/LVDS */
+ struct drm_property *brightness;
+
+ /* Add variable to record current setting for the above property */
+ u32 left_margin, right_margin, top_margin, bottom_margin;
+
+ /* this is to get the range of margin.*/
+ u32 max_hscan, max_vscan;
+ u32 max_hpos, cur_hpos;
+ u32 max_vpos, cur_vpos;
+ u32 cur_brightness, max_brightness;
+ u32 cur_contrast, max_contrast;
+ u32 cur_saturation, max_saturation;
+ u32 cur_hue, max_hue;
+ u32 cur_sharpness, max_sharpness;
+ u32 cur_flicker_filter, max_flicker_filter;
+ u32 cur_flicker_filter_adaptive, max_flicker_filter_adaptive;
+ u32 cur_flicker_filter_2d, max_flicker_filter_2d;
+ u32 cur_tv_chroma_filter, max_tv_chroma_filter;
+ u32 cur_tv_luma_filter, max_tv_luma_filter;
+ u32 cur_dot_crawl, max_dot_crawl;
+};
+
+static struct psb_intel_sdvo *to_psb_intel_sdvo(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct psb_intel_sdvo, base.base);
+}
+
+static struct psb_intel_sdvo *intel_attached_sdvo(struct drm_connector *connector)
+{
+ return container_of(psb_intel_attached_encoder(connector),
+ struct psb_intel_sdvo, base);
+}
+
+static struct psb_intel_sdvo_connector *to_psb_intel_sdvo_connector(struct drm_connector *connector)
+{
+ return container_of(to_psb_intel_connector(connector), struct psb_intel_sdvo_connector, base);
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags);
+static bool
+psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type);
+static bool
+psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector);
+
+/**
+ * Writes the SDVOB or SDVOC with the given value, but always writes both
+ * SDVOB and SDVOC to work around apparent hardware issues (according to
+ * comments in the BIOS).
+ */
+static void psb_intel_sdvo_write_sdvox(struct psb_intel_sdvo *psb_intel_sdvo, u32 val)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ u32 bval = val, cval = val;
+ int i;
+
+ if (psb_intel_sdvo->sdvo_reg == SDVOB) {
+ cval = REG_READ(SDVOC);
+ } else {
+ bval = REG_READ(SDVOB);
+ }
+ /*
+ * Write the registers twice for luck. Sometimes,
+ * writing them only once doesn't appear to 'stick'.
+ * The BIOS does this too. Yay, magic
+ */
+ for (i = 0; i < 2; i++)
+ {
+ REG_WRITE(SDVOB, bval);
+ REG_READ(SDVOB);
+ REG_WRITE(SDVOC, cval);
+ REG_READ(SDVOC);
+ }
+}
+
+static bool psb_intel_sdvo_read_byte(struct psb_intel_sdvo *psb_intel_sdvo, u8 addr, u8 *ch)
+{
+ struct i2c_msg msgs[] = {
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = 0,
+ .len = 1,
+ .buf = &addr,
+ },
+ {
+ .addr = psb_intel_sdvo->slave_addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = ch,
+ }
+ };
+ int ret;
+
+ if ((ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, 2)) == 2)
+ return true;
+
+ DRM_DEBUG_KMS("i2c transfer returned %d\n", ret);
+ return false;
+}
+
+#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
+/** Mapping of command numbers to names, for debug output */
+static const struct _sdvo_cmd_name {
+ u8 cmd;
+ const char *name;
+} sdvo_cmd_names[] = {
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+ /* Add the op code for SDVO enhancements */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+ /* HDMI op code */
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+ SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+};
+
+#define IS_SDVOB(reg) (reg == SDVOB)
+#define SDVO_NAME(svdo) (IS_SDVOB((svdo)->sdvo_reg) ? "SDVOB" : "SDVOC")
+
+static void psb_intel_sdvo_debug_write(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ int i;
+
+ DRM_DEBUG_KMS("%s: W: %02X ",
+ SDVO_NAME(psb_intel_sdvo), cmd);
+ for (i = 0; i < args_len; i++)
+ DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
+ for (; i < 8; i++)
+ DRM_LOG_KMS(" ");
+ for (i = 0; i < ARRAY_SIZE(sdvo_cmd_names); i++) {
+ if (cmd == sdvo_cmd_names[i].cmd) {
+ DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
+ break;
+ }
+ }
+ if (i == ARRAY_SIZE(sdvo_cmd_names))
+ DRM_LOG_KMS("(%02X)", cmd);
+ DRM_LOG_KMS("\n");
+}
+
+static const char *cmd_status_names[] = {
+ "Power on",
+ "Success",
+ "Not supported",
+ "Invalid arg",
+ "Pending",
+ "Target not specified",
+ "Scaling not supported"
+};
+
+static bool psb_intel_sdvo_write_cmd(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ const void *args, int args_len)
+{
+ u8 buf[args_len*2 + 2], status;
+ struct i2c_msg msgs[args_len + 3];
+ int i, ret;
+
+ psb_intel_sdvo_debug_write(psb_intel_sdvo, cmd, args, args_len);
+
+ for (i = 0; i < args_len; i++) {
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2 *i;
+ buf[2*i + 0] = SDVO_I2C_ARG_0 - i;
+ buf[2*i + 1] = ((u8*)args)[i];
+ }
+ msgs[i].addr = psb_intel_sdvo->slave_addr;
+ msgs[i].flags = 0;
+ msgs[i].len = 2;
+ msgs[i].buf = buf + 2*i;
+ buf[2*i + 0] = SDVO_I2C_OPCODE;
+ buf[2*i + 1] = cmd;
+
+ /* the following two are to read the response */
+ status = SDVO_I2C_CMD_STATUS;
+ msgs[i+1].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+1].flags = 0;
+ msgs[i+1].len = 1;
+ msgs[i+1].buf = &status;
+
+ msgs[i+2].addr = psb_intel_sdvo->slave_addr;
+ msgs[i+2].flags = I2C_M_RD;
+ msgs[i+2].len = 1;
+ msgs[i+2].buf = &status;
+
+ ret = i2c_transfer(psb_intel_sdvo->i2c, msgs, i+3);
+ if (ret < 0) {
+ DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
+ return false;
+ }
+ if (ret != i+3) {
+ /* failure in I2C transfer */
+ DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
+ return false;
+ }
+
+ return true;
+}
+
+static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo,
+ void *response, int response_len)
+{
+ u8 retry = 5;
+ u8 status;
+ int i;
+
+ DRM_DEBUG_KMS("%s: R: ", SDVO_NAME(psb_intel_sdvo));
+
+ /*
+ * The documentation states that all commands will be
+ * processed within 15µs, and that we need only poll
+ * the status byte a maximum of 3 times in order for the
+ * command to be complete.
+ *
+ * Check 5 times in case the hardware failed to read the docs.
+ */
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+
+ while (status == SDVO_CMD_STATUS_PENDING && retry--) {
+ udelay(15);
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_CMD_STATUS,
+ &status))
+ goto log_fail;
+ }
+
+ if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
+ DRM_LOG_KMS("(%s)", cmd_status_names[status]);
+ else
+ DRM_LOG_KMS("(??? %d)", status);
+
+ if (status != SDVO_CMD_STATUS_SUCCESS)
+ goto log_fail;
+
+ /* Read the command response */
+ for (i = 0; i < response_len; i++) {
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo,
+ SDVO_I2C_RETURN_0 + i,
+ &((u8 *)response)[i]))
+ goto log_fail;
+ DRM_LOG_KMS(" %02X", ((u8 *)response)[i]);
+ }
+ DRM_LOG_KMS("\n");
+ return true;
+
+log_fail:
+ DRM_LOG_KMS("... failed\n");
+ return false;
+}
+
+static int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
+{
+ if (mode->clock >= 100000)
+ return 1;
+ else if (mode->clock >= 50000)
+ return 2;
+ else
+ return 4;
+}
+
+static bool psb_intel_sdvo_set_control_bus_switch(struct psb_intel_sdvo *psb_intel_sdvo,
+ u8 ddc_bus)
+{
+ /* This must be the immediately preceding write before the i2c xfer */
+ return psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_SET_CONTROL_BUS_SWITCH,
+ &ddc_bus, 1);
+}
+
+static bool psb_intel_sdvo_set_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, const void *data, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, data, len))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, NULL, 0);
+}
+
+static bool
+psb_intel_sdvo_get_value(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd, void *value, int len)
+{
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo, cmd, NULL, 0))
+ return false;
+
+ return psb_intel_sdvo_read_response(psb_intel_sdvo, value, len);
+}
+
+static bool psb_intel_sdvo_set_target_input(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_set_target_input_args targets = {0};
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_INPUT,
+ &targets, sizeof(targets));
+}
+
+/**
+ * Return whether each input is trained.
+ *
+ * This function is making an assumption about the layout of the response,
+ * which should be checked against the docs.
+ */
+static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_sdvo *psb_intel_sdvo, bool *input_1, bool *input_2)
+{
+ struct psb_intel_sdvo_get_trained_inputs_response response;
+
+ BUILD_BUG_ON(sizeof(response) != 1);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_TRAINED_INPUTS,
+ &response, sizeof(response)))
+ return false;
+
+ *input_1 = response.input0_trained;
+ *input_2 = response.input1_trained;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ACTIVE_OUTPUTS,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_sdvo *psb_intel_sdvo,
+ int mode)
+{
+ u8 state = SDVO_ENCODER_STATE_ON;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ state = SDVO_ENCODER_STATE_ON;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ state = SDVO_ENCODER_STATE_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ state = SDVO_ENCODER_STATE_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ state = SDVO_ENCODER_STATE_OFF;
+ break;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_ENCODER_POWER_STATE, &state, sizeof(state));
+}
+
+static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_sdvo *psb_intel_sdvo,
+ int *clock_min,
+ int *clock_max)
+{
+ struct psb_intel_sdvo_pixel_clock_range clocks;
+
+ BUILD_BUG_ON(sizeof(clocks) != 4);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE,
+ &clocks, sizeof(clocks)))
+ return false;
+
+ /* Convert the values from units of 10 kHz to kHz. */
+ *clock_min = clocks.min * 10;
+ *clock_max = clocks.max * 10;
+ return true;
+}
+
+static bool psb_intel_sdvo_set_target_output(struct psb_intel_sdvo *psb_intel_sdvo,
+ u16 outputs)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TARGET_OUTPUT,
+ &outputs, sizeof(outputs));
+}
+
+static bool psb_intel_sdvo_set_timing(struct psb_intel_sdvo *psb_intel_sdvo, u8 cmd,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_set_value(psb_intel_sdvo, cmd + 1, &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_INPUT_TIMINGS_PART1, dtd);
+}
+
+static bool psb_intel_sdvo_set_output_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ return psb_intel_sdvo_set_timing(psb_intel_sdvo,
+ SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
+}
+
+static bool
+psb_intel_sdvo_create_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint16_t clock,
+ uint16_t width,
+ uint16_t height)
+{
+ struct psb_intel_sdvo_preferred_input_timing_args args;
+
+ memset(&args, 0, sizeof(args));
+ args.clock = clock;
+ args.width = width;
+ args.height = height;
+ args.interlace = 0;
+
+ if (psb_intel_sdvo->is_lvds &&
+ (psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay != width ||
+ psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay != height))
+ args.scaled = 1;
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
+ &args, sizeof(args));
+}
+
+static bool psb_intel_sdvo_get_preferred_input_timing(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_dtd *dtd)
+{
+ BUILD_BUG_ON(sizeof(dtd->part1) != 8);
+ BUILD_BUG_ON(sizeof(dtd->part2) != 8);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
+ &dtd->part1, sizeof(dtd->part1)) &&
+ psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
+ &dtd->part2, sizeof(dtd->part2));
+}
+
+static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_sdvo *psb_intel_sdvo, u8 val)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_CLOCK_RATE_MULT, &val, 1);
+}
+
+static void psb_intel_sdvo_get_dtd_from_mode(struct psb_intel_sdvo_dtd *dtd,
+ const struct drm_display_mode *mode)
+{
+ uint16_t width, height;
+ uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
+ uint16_t h_sync_offset, v_sync_offset;
+
+ width = mode->crtc_hdisplay;
+ height = mode->crtc_vdisplay;
+
+ /* do some mode translations */
+ h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
+ h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
+
+ v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
+ v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
+
+ h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
+ v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
+
+ dtd->part1.clock = mode->clock / 10;
+ dtd->part1.h_active = width & 0xff;
+ dtd->part1.h_blank = h_blank_len & 0xff;
+ dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
+ ((h_blank_len >> 8) & 0xf);
+ dtd->part1.v_active = height & 0xff;
+ dtd->part1.v_blank = v_blank_len & 0xff;
+ dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
+ ((v_blank_len >> 8) & 0xf);
+
+ dtd->part2.h_sync_off = h_sync_offset & 0xff;
+ dtd->part2.h_sync_width = h_sync_len & 0xff;
+ dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
+ (v_sync_len & 0xf);
+ dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
+ ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
+ ((v_sync_len & 0x30) >> 4);
+
+ dtd->part2.dtd_flags = 0x18;
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ dtd->part2.dtd_flags |= 0x2;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ dtd->part2.dtd_flags |= 0x4;
+
+ dtd->part2.sdvo_flags = 0;
+ dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
+ dtd->part2.reserved = 0;
+}
+
+static void psb_intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
+ const struct psb_intel_sdvo_dtd *dtd)
+{
+ mode->hdisplay = dtd->part1.h_active;
+ mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
+ mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
+ mode->hsync_start += (dtd->part2.sync_off_width_high & 0xc0) << 2;
+ mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
+ mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
+ mode->htotal = mode->hdisplay + dtd->part1.h_blank;
+ mode->htotal += (dtd->part1.h_high & 0xf) << 8;
+
+ mode->vdisplay = dtd->part1.v_active;
+ mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
+ mode->vsync_start = mode->vdisplay;
+ mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
+ mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0c) << 2;
+ mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
+ mode->vsync_end = mode->vsync_start +
+ (dtd->part2.v_sync_off_width & 0xf);
+ mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
+ mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
+ mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
+
+ mode->clock = dtd->part1.clock * 10;
+
+ mode->flags &= ~(DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ if (dtd->part2.dtd_flags & 0x2)
+ mode->flags |= DRM_MODE_FLAG_PHSYNC;
+ if (dtd->part2.dtd_flags & 0x4)
+ mode->flags |= DRM_MODE_FLAG_PVSYNC;
+}
+
+static bool psb_intel_sdvo_check_supp_encode(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_encode encode;
+
+ BUILD_BUG_ON(sizeof(encode) != 2);
+ return psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPP_ENCODE,
+ &encode, sizeof(encode));
+}
+
+static bool psb_intel_sdvo_set_encode(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_ENCODE, &mode, 1);
+}
+
+static bool psb_intel_sdvo_set_colorimetry(struct psb_intel_sdvo *psb_intel_sdvo,
+ uint8_t mode)
+{
+ return psb_intel_sdvo_set_value(psb_intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
+}
+
+#if 0
+static void psb_intel_sdvo_dump_hdmi_buf(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ int i, j;
+ uint8_t set_buf_index[2];
+ uint8_t av_split;
+ uint8_t buf_size;
+ uint8_t buf[48];
+ uint8_t *pos;
+
+ psb_intel_sdvo_get_value(encoder, SDVO_CMD_GET_HBUF_AV_SPLIT, &av_split, 1);
+
+ for (i = 0; i <= av_split; i++) {
+ set_buf_index[0] = i; set_buf_index[1] = 0;
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2);
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
+ psb_intel_sdvo_read_response(encoder, &buf_size, 1);
+
+ pos = buf;
+ for (j = 0; j <= buf_size; j += 8) {
+ psb_intel_sdvo_write_cmd(encoder, SDVO_CMD_GET_HBUF_DATA,
+ NULL, 0);
+ psb_intel_sdvo_read_response(encoder, pos, 8);
+ pos += 8;
+ }
+ }
+}
+#endif
+
+static bool psb_intel_sdvo_set_avi_infoframe(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ DRM_INFO("HDMI is not supported yet");
+
+ return false;
+#if 0
+ struct dip_infoframe avi_if = {
+ .type = DIP_TYPE_AVI,
+ .ver = DIP_VERSION_AVI,
+ .len = DIP_LEN_AVI,
+ };
+ uint8_t tx_rate = SDVO_HBUF_TX_VSYNC;
+ uint8_t set_buf_index[2] = { 1, 0 };
+ uint64_t *data = (uint64_t *)&avi_if;
+ unsigned i;
+
+ intel_dip_infoframe_csum(&avi_if);
+
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_INDEX,
+ set_buf_index, 2))
+ return false;
+
+ for (i = 0; i < sizeof(avi_if); i += 8) {
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_DATA,
+ data, 8))
+ return false;
+ data++;
+ }
+
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_HBUF_TXRATE,
+ &tx_rate, 1);
+#endif
+}
+
+static bool psb_intel_sdvo_set_tv_format(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map;
+
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memset(&format, 0, sizeof(format));
+ memcpy(&format, &format_map, min(sizeof(format), sizeof(format_map)));
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ return psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_TV_FORMAT,
+ &format, sizeof(format));
+}
+
+static bool
+psb_intel_sdvo_set_output_timings_from_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct drm_display_mode *mode)
+{
+ struct psb_intel_sdvo_dtd output_dtd;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return false;
+
+ psb_intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
+ if (!psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &output_dtd))
+ return false;
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_set_input_timings_for_mode(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ /* Reset the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return false;
+
+ if (!psb_intel_sdvo_create_preferred_input_timing(psb_intel_sdvo,
+ mode->clock / 10,
+ mode->hdisplay,
+ mode->vdisplay))
+ return false;
+
+ if (!psb_intel_sdvo_get_preferred_input_timing(psb_intel_sdvo,
+ &psb_intel_sdvo->input_dtd))
+ return false;
+
+ psb_intel_sdvo_get_mode_from_dtd(adjusted_mode, &psb_intel_sdvo->input_dtd);
+
+ drm_mode_set_crtcinfo(adjusted_mode, 0);
+ return true;
+}
+
+static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ int multiplier;
+
+ /* We need to construct preferred input timings based on our
+ * output timings. To do that, we have to set the output
+ * timings, even though this isn't really the right place in
+ * the sequence to do it. Oh well.
+ */
+ if (psb_intel_sdvo->is_tv) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo, mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ } else if (psb_intel_sdvo->is_lvds) {
+ if (!psb_intel_sdvo_set_output_timings_from_mode(psb_intel_sdvo,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode))
+ return false;
+
+ (void) psb_intel_sdvo_set_input_timings_for_mode(psb_intel_sdvo,
+ mode,
+ adjusted_mode);
+ }
+
+ /* Make the CRTC code factor in the SDVO pixel multiplier. The
+ * SDVO device will factor out the multiplier during mode_set.
+ */
+ multiplier = psb_intel_sdvo_get_pixel_multiplier(adjusted_mode);
+ psb_intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
+
+ return true;
+}
+
+static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct drm_crtc *crtc = encoder->crtc;
+ struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 sdvox;
+ struct psb_intel_sdvo_in_out_map in_out;
+ struct psb_intel_sdvo_dtd input_dtd;
+ int pixel_multiplier = psb_intel_mode_get_pixel_multiplier(adjusted_mode);
+ int rate;
+
+ if (!mode)
+ return;
+
+ /* First, set the input mapping for the first input to our controlled
+ * output. This is only correct if we're a single-input device, in
+ * which case the first input is the output from the appropriate SDVO
+ * channel on the motherboard. In a two-input device, the first input
+ * will be SDVOB and the second SDVOC.
+ */
+ in_out.in0 = psb_intel_sdvo->attached_output;
+ in_out.in1 = 0;
+
+ psb_intel_sdvo_set_value(psb_intel_sdvo,
+ SDVO_CMD_SET_IN_OUT_MAP,
+ &in_out, sizeof(in_out));
+
+ /* Set the output timings to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ /* We have tried to get input timing in mode_fixup, and filled into
+ * adjusted_mode.
+ */
+ if (psb_intel_sdvo->is_tv || psb_intel_sdvo->is_lvds) {
+ input_dtd = psb_intel_sdvo->input_dtd;
+ } else {
+ /* Set the output timing to the screen */
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo,
+ psb_intel_sdvo->attached_output))
+ return;
+
+ psb_intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode);
+ (void) psb_intel_sdvo_set_output_timing(psb_intel_sdvo, &input_dtd);
+ }
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ return;
+
+ if (psb_intel_sdvo->has_hdmi_monitor) {
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_HDMI);
+ psb_intel_sdvo_set_colorimetry(psb_intel_sdvo,
+ SDVO_COLORIMETRY_RGB256);
+ psb_intel_sdvo_set_avi_infoframe(psb_intel_sdvo);
+ } else
+ psb_intel_sdvo_set_encode(psb_intel_sdvo, SDVO_ENCODE_DVI);
+
+ if (psb_intel_sdvo->is_tv &&
+ !psb_intel_sdvo_set_tv_format(psb_intel_sdvo))
+ return;
+
+ (void) psb_intel_sdvo_set_input_timing(psb_intel_sdvo, &input_dtd);
+
+ switch (pixel_multiplier) {
+ default:
+ case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
+ case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
+ case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break;
+ }
+ if (!psb_intel_sdvo_set_clock_rate_mult(psb_intel_sdvo, rate))
+ return;
+
+ /* Set the SDVO control regs. */
+ sdvox = REG_READ(psb_intel_sdvo->sdvo_reg);
+ switch (psb_intel_sdvo->sdvo_reg) {
+ case SDVOB:
+ sdvox &= SDVOB_PRESERVE_MASK;
+ break;
+ case SDVOC:
+ sdvox &= SDVOC_PRESERVE_MASK;
+ break;
+ }
+ sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
+
+ if (psb_intel_crtc->pipe == 1)
+ sdvox |= SDVO_PIPE_B_SELECT;
+ if (psb_intel_sdvo->has_hdmi_audio)
+ sdvox |= SDVO_AUDIO_ENABLE;
+
+ /* FIXME: Check if this is needed for PSB
+ sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
+ */
+
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ sdvox |= SDVO_STALL_SELECT;
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, sdvox);
+}
+
+static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct drm_device *dev = encoder->dev;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+ u32 temp;
+
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ DRM_DEBUG("DPMS_ON");
+ break;
+ case DRM_MODE_DPMS_OFF:
+ DRM_DEBUG("DPMS_OFF");
+ break;
+ default:
+ DRM_DEBUG("DPMS: %d", mode);
+ }
+
+ if (mode != DRM_MODE_DPMS_ON) {
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, 0);
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+
+ if (mode == DRM_MODE_DPMS_OFF) {
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) != 0) {
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp & ~SDVO_ENABLE);
+ }
+ }
+ } else {
+ bool input1, input2;
+ int i;
+ u8 status;
+
+ temp = REG_READ(psb_intel_sdvo->sdvo_reg);
+ if ((temp & SDVO_ENABLE) == 0)
+ psb_intel_sdvo_write_sdvox(psb_intel_sdvo, temp | SDVO_ENABLE);
+ for (i = 0; i < 2; i++)
+ psb_intel_wait_for_vblank(dev);
+
+ status = psb_intel_sdvo_get_trained_inputs(psb_intel_sdvo, &input1, &input2);
+ /* Warn if the device reported failure to sync.
+ * A lot of SDVO devices fail to notify of sync, but it's
+ * a given it the status is a success, we succeeded.
+ */
+ if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
+ DRM_DEBUG_KMS("First %s output reported failure to "
+ "sync\n", SDVO_NAME(psb_intel_sdvo));
+ }
+
+ if (0)
+ psb_intel_sdvo_set_encoder_power_state(psb_intel_sdvo, mode);
+ psb_intel_sdvo_set_active_outputs(psb_intel_sdvo, psb_intel_sdvo->attached_output);
+ }
+ return;
+}
+
+static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
+ if (psb_intel_sdvo->pixel_clock_min > mode->clock)
+ return MODE_CLOCK_LOW;
+
+ if (psb_intel_sdvo->pixel_clock_max < mode->clock)
+ return MODE_CLOCK_HIGH;
+
+ if (psb_intel_sdvo->is_lvds) {
+ if (mode->hdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->hdisplay)
+ return MODE_PANEL;
+
+ if (mode->vdisplay > psb_intel_sdvo->sdvo_lvds_fixed_mode->vdisplay)
+ return MODE_PANEL;
+ }
+
+ /* We assume worst case scenario of 32 bpp here, since we don't know */
+ if ((ALIGN(mode->hdisplay * 4, 64) * mode->vdisplay) >
+ dev_priv->vram_stolen_size)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+static bool psb_intel_sdvo_get_capabilities(struct psb_intel_sdvo *psb_intel_sdvo, struct psb_intel_sdvo_caps *caps)
+{
+ BUILD_BUG_ON(sizeof(*caps) != 8);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_DEVICE_CAPS,
+ caps, sizeof(*caps)))
+ return false;
+
+ DRM_DEBUG_KMS("SDVO capabilities:\n"
+ " vendor_id: %d\n"
+ " device_id: %d\n"
+ " device_rev_id: %d\n"
+ " sdvo_version_major: %d\n"
+ " sdvo_version_minor: %d\n"
+ " sdvo_inputs_mask: %d\n"
+ " smooth_scaling: %d\n"
+ " sharp_scaling: %d\n"
+ " up_scaling: %d\n"
+ " down_scaling: %d\n"
+ " stall_support: %d\n"
+ " output_flags: %d\n",
+ caps->vendor_id,
+ caps->device_id,
+ caps->device_rev_id,
+ caps->sdvo_version_major,
+ caps->sdvo_version_minor,
+ caps->sdvo_inputs_mask,
+ caps->smooth_scaling,
+ caps->sharp_scaling,
+ caps->up_scaling,
+ caps->down_scaling,
+ caps->stall_support,
+ caps->output_flags);
+
+ return true;
+}
+
+/* No use! */
+#if 0
+struct drm_connector* psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
+{
+ struct drm_connector *connector = NULL;
+ struct psb_intel_sdvo *iout = NULL;
+ struct psb_intel_sdvo *sdvo;
+
+ /* find the sdvo connector */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ iout = to_psb_intel_sdvo(connector);
+
+ if (iout->type != INTEL_OUTPUT_SDVO)
+ continue;
+
+ sdvo = iout->dev_priv;
+
+ if (sdvo->sdvo_reg == SDVOB && sdvoB)
+ return connector;
+
+ if (sdvo->sdvo_reg == SDVOC && !sdvoB)
+ return connector;
+
+ }
+
+ return NULL;
+}
+
+int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ DRM_DEBUG_KMS("\n");
+
+ if (!connector)
+ return 0;
+
+ psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ return psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT,
+ &response, 2) && response[0];
+}
+
+void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
+{
+ u8 response[2];
+ u8 status;
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(connector);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ if (on) {
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0);
+ status = psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ } else {
+ response[0] = 0;
+ response[1] = 0;
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2);
+ }
+
+ psb_intel_sdvo_write_cmd(psb_intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0);
+ psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2);
+}
+#endif
+
+static bool
+psb_intel_sdvo_multifunc_encoder(struct psb_intel_sdvo *psb_intel_sdvo)
+{
+ /* Is there more than one type of output? */
+ int caps = psb_intel_sdvo->caps.output_flags & 0xf;
+ return caps & -caps;
+}
+
+static struct edid *
+psb_intel_sdvo_get_edid(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *sdvo = intel_attached_sdvo(connector);
+ return drm_get_edid(connector, &sdvo->ddc);
+}
+
+/* Mac mini hack -- use the same DDC as the analog connector */
+static struct edid *
+psb_intel_sdvo_get_analog_edid(struct drm_connector *connector)
+{
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ return NULL;
+}
+
+enum drm_connector_status
+psb_intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ enum drm_connector_status status;
+ struct edid *edid;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ if (edid == NULL && psb_intel_sdvo_multifunc_encoder(psb_intel_sdvo)) {
+ u8 ddc, saved_ddc = psb_intel_sdvo->ddc_bus;
+
+ /*
+ * Don't use the 1 as the argument of DDC bus switch to get
+ * the EDID. It is used for SDVO SPD ROM.
+ */
+ for (ddc = psb_intel_sdvo->ddc_bus >> 1; ddc > 1; ddc >>= 1) {
+ psb_intel_sdvo->ddc_bus = ddc;
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid)
+ break;
+ }
+ /*
+ * If we found the EDID on the other bus,
+ * assume that is the correct DDC bus.
+ */
+ if (edid == NULL)
+ psb_intel_sdvo->ddc_bus = saved_ddc;
+ }
+
+ /*
+ * When there is no edid and no monitor is connected with VGA
+ * port, try to use the CRT ddc to read the EDID for DVI-connector.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ status = connector_status_unknown;
+ if (edid != NULL) {
+ /* DDC bus is shared, match EDID to connector type */
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ status = connector_status_connected;
+ if (psb_intel_sdvo->is_hdmi) {
+ psb_intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ psb_intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
+ } else
+ status = connector_status_disconnected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+
+ if (status == connector_status_connected) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ if (psb_intel_sdvo_connector->force_audio)
+ psb_intel_sdvo->has_hdmi_audio = psb_intel_sdvo_connector->force_audio > 0;
+ }
+
+ return status;
+}
+
+static enum drm_connector_status
+psb_intel_sdvo_detect(struct drm_connector *connector, bool force)
+{
+ uint16_t response;
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ enum drm_connector_status ret;
+
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
+ return connector_status_unknown;
+
+ /* add 30ms delay when the output type might be TV */
+ if (psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
+ mdelay(30);
+
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &response, 2))
+ return connector_status_unknown;
+
+ DRM_DEBUG_KMS("SDVO response %d %d [%x]\n",
+ response & 0xff, response >> 8,
+ psb_intel_sdvo_connector->output_flag);
+
+ if (response == 0)
+ return connector_status_disconnected;
+
+ psb_intel_sdvo->attached_output = response;
+
+ psb_intel_sdvo->has_hdmi_monitor = false;
+ psb_intel_sdvo->has_hdmi_audio = false;
+
+ if ((psb_intel_sdvo_connector->output_flag & response) == 0)
+ ret = connector_status_disconnected;
+ else if (IS_TMDS(psb_intel_sdvo_connector))
+ ret = psb_intel_sdvo_hdmi_sink_detect(connector);
+ else {
+ struct edid *edid;
+
+ /* if we have an edid check it matches the connection */
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+ if (edid != NULL) {
+ if (edid->input & DRM_EDID_INPUT_DIGITAL)
+ ret = connector_status_disconnected;
+ else
+ ret = connector_status_connected;
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ } else
+ ret = connector_status_connected;
+ }
+
+ /* May update encoder flag for like clock for SDVO TV, etc.*/
+ if (ret == connector_status_connected) {
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->is_lvds = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+
+ if (response & SDVO_TV_MASK) {
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ }
+ if (response & SDVO_LVDS_MASK)
+ psb_intel_sdvo->is_lvds = psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL;
+ }
+
+ return ret;
+}
+
+static void psb_intel_sdvo_get_ddc_modes(struct drm_connector *connector)
+{
+ struct edid *edid;
+
+ /* set the bus switch and get the modes */
+ edid = psb_intel_sdvo_get_edid(connector);
+
+ /*
+ * Mac mini hack. On this device, the DVI-I connector shares one DDC
+ * link between analog and digital outputs. So, if the regular SDVO
+ * DDC fails, check to see if the analog output is disconnected, in
+ * which case we'll look there for the digital DDC data.
+ */
+ if (edid == NULL)
+ edid = psb_intel_sdvo_get_analog_edid(connector);
+
+ if (edid != NULL) {
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_TMDS(psb_intel_sdvo_connector);
+
+ if (connector_is_digital == monitor_is_digital) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
+
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+}
+
+/*
+ * Set of SDVO TV modes.
+ * Note! This is in reply order (see loop in get_tv_modes).
+ * XXX: all 60Hz refresh?
+ */
+static const struct drm_display_mode sdvo_tv_modes[] = {
+ { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815, 320, 321, 384,
+ 416, 0, 200, 201, 232, 233, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814, 320, 321, 384,
+ 416, 0, 240, 241, 272, 273, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910, 400, 401, 464,
+ 496, 0, 300, 301, 332, 333, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913, 640, 641, 704,
+ 736, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121, 640, 641, 704,
+ 736, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 22654, 640, 641, 704,
+ 736, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624, 704, 705, 768,
+ 800, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232, 704, 705, 768,
+ 800, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751, 720, 721, 784,
+ 816, 0, 350, 351, 382, 383, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199, 720, 721, 784,
+ 816, 0, 400, 401, 432, 433, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116, 720, 721, 784,
+ 816, 0, 480, 481, 512, 513, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054, 720, 721, 784,
+ 816, 0, 540, 541, 572, 573, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816, 720, 721, 784,
+ 816, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570, 768, 769, 832,
+ 864, 0, 576, 577, 608, 609, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030, 800, 801, 864,
+ 896, 0, 600, 601, 632, 633, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581, 832, 833, 896,
+ 928, 0, 624, 625, 656, 657, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707, 920, 921, 984,
+ 1016, 0, 766, 767, 798, 799, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827, 1024, 1025, 1088,
+ 1120, 0, 768, 769, 800, 801, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+ { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265, 1280, 1281, 1344,
+ 1376, 0, 1024, 1025, 1056, 1057, 0,
+ DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+};
+
+static void psb_intel_sdvo_get_tv_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_sdtv_resolution_request tv_res;
+ uint32_t reply = 0, format_map = 0;
+ int i;
+
+ /* Read the list of supported input resolutions for the selected TV
+ * format.
+ */
+ format_map = 1 << psb_intel_sdvo->tv_format_index;
+ memcpy(&tv_res, &format_map,
+ min(sizeof(format_map), sizeof(struct psb_intel_sdvo_sdtv_resolution_request)));
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, psb_intel_sdvo->attached_output))
+ return;
+
+ BUILD_BUG_ON(sizeof(tv_res) != 3);
+ if (!psb_intel_sdvo_write_cmd(psb_intel_sdvo,
+ SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
+ &tv_res, sizeof(tv_res)))
+ return;
+ if (!psb_intel_sdvo_read_response(psb_intel_sdvo, &reply, 3))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
+ if (reply & (1 << i)) {
+ struct drm_display_mode *nmode;
+ nmode = drm_mode_duplicate(connector->dev,
+ &sdvo_tv_modes[i]);
+ if (nmode)
+ drm_mode_probed_add(connector, nmode);
+ }
+}
+
+static void psb_intel_sdvo_get_lvds_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ struct drm_display_mode *newmode;
+
+ /*
+ * Attempt to get the mode list from DDC.
+ * Assume that the preferred modes are
+ * arranged in priority order.
+ */
+ psb_intel_ddc_get_modes(connector, psb_intel_sdvo->i2c);
+ if (list_empty(&connector->probed_modes) == false)
+ goto end;
+
+ /* Fetch modes from VBT */
+ if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
+ newmode = drm_mode_duplicate(connector->dev,
+ dev_priv->sdvo_lvds_vbt_mode);
+ if (newmode != NULL) {
+ /* Guarantee the mode is preferred */
+ newmode->type = (DRM_MODE_TYPE_PREFERRED |
+ DRM_MODE_TYPE_DRIVER);
+ drm_mode_probed_add(connector, newmode);
+ }
+ }
+
+end:
+ list_for_each_entry(newmode, &connector->probed_modes, head) {
+ if (newmode->type & DRM_MODE_TYPE_PREFERRED) {
+ psb_intel_sdvo->sdvo_lvds_fixed_mode =
+ drm_mode_duplicate(connector->dev, newmode);
+
+ drm_mode_set_crtcinfo(psb_intel_sdvo->sdvo_lvds_fixed_mode,
+ 0);
+
+ psb_intel_sdvo->is_lvds = true;
+ break;
+ }
+ }
+
+}
+
+static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_tv_modes(connector);
+ else if (IS_LVDS(psb_intel_sdvo_connector))
+ psb_intel_sdvo_get_lvds_modes(connector);
+ else
+ psb_intel_sdvo_get_ddc_modes(connector);
+
+ return !list_empty(&connector->probed_modes);
+}
+
+static void
+psb_intel_sdvo_destroy_enhance_property(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_device *dev = connector->dev;
+
+ if (psb_intel_sdvo_connector->left)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->left);
+ if (psb_intel_sdvo_connector->right)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->right);
+ if (psb_intel_sdvo_connector->top)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->top);
+ if (psb_intel_sdvo_connector->bottom)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->bottom);
+ if (psb_intel_sdvo_connector->hpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hpos);
+ if (psb_intel_sdvo_connector->vpos)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->vpos);
+ if (psb_intel_sdvo_connector->saturation)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->saturation);
+ if (psb_intel_sdvo_connector->contrast)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->contrast);
+ if (psb_intel_sdvo_connector->hue)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->hue);
+ if (psb_intel_sdvo_connector->sharpness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->sharpness);
+ if (psb_intel_sdvo_connector->flicker_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter);
+ if (psb_intel_sdvo_connector->flicker_filter_2d)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_2d);
+ if (psb_intel_sdvo_connector->flicker_filter_adaptive)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->flicker_filter_adaptive);
+ if (psb_intel_sdvo_connector->tv_luma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_luma_filter);
+ if (psb_intel_sdvo_connector->tv_chroma_filter)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->tv_chroma_filter);
+ if (psb_intel_sdvo_connector->dot_crawl)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->dot_crawl);
+ if (psb_intel_sdvo_connector->brightness)
+ drm_property_destroy(dev, psb_intel_sdvo_connector->brightness);
+}
+
+static void psb_intel_sdvo_destroy(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+
+ if (psb_intel_sdvo_connector->tv_format)
+ drm_property_destroy(connector->dev,
+ psb_intel_sdvo_connector->tv_format);
+
+ psb_intel_sdvo_destroy_enhance_property(connector);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(connector);
+}
+
+static bool psb_intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct edid *edid;
+ bool has_audio = false;
+
+ if (!psb_intel_sdvo->is_hdmi)
+ return false;
+
+ edid = psb_intel_sdvo_get_edid(connector);
+ if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
+ has_audio = drm_detect_monitor_audio(edid);
+
+ return has_audio;
+}
+
+static int
+psb_intel_sdvo_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = intel_attached_sdvo(connector);
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector = to_psb_intel_sdvo_connector(connector);
+ struct drm_psb_private *dev_priv = connector->dev->dev_private;
+ uint16_t temp_value;
+ uint8_t cmd;
+ int ret;
+
+ ret = drm_connector_property_set_value(connector, property, val);
+ if (ret)
+ return ret;
+
+ if (property == dev_priv->force_audio_property) {
+ int i = val;
+ bool has_audio;
+
+ if (i == psb_intel_sdvo_connector->force_audio)
+ return 0;
+
+ psb_intel_sdvo_connector->force_audio = i;
+
+ if (i == 0)
+ has_audio = psb_intel_sdvo_detect_hdmi_audio(connector);
+ else
+ has_audio = i > 0;
+
+ if (has_audio == psb_intel_sdvo->has_hdmi_audio)
+ return 0;
+
+ psb_intel_sdvo->has_hdmi_audio = has_audio;
+ goto done;
+ }
+
+ if (property == dev_priv->broadcast_rgb_property) {
+ if (val == !!psb_intel_sdvo->color_range)
+ return 0;
+
+ psb_intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
+ goto done;
+ }
+
+#define CHECK_PROPERTY(name, NAME) \
+ if (psb_intel_sdvo_connector->name == property) { \
+ if (psb_intel_sdvo_connector->cur_##name == temp_value) return 0; \
+ if (psb_intel_sdvo_connector->max_##name < temp_value) return -EINVAL; \
+ cmd = SDVO_CMD_SET_##NAME; \
+ psb_intel_sdvo_connector->cur_##name = temp_value; \
+ goto set_value; \
+ }
+
+ if (property == psb_intel_sdvo_connector->tv_format) {
+ if (val >= TV_FORMAT_NUM)
+ return -EINVAL;
+
+ if (psb_intel_sdvo->tv_format_index ==
+ psb_intel_sdvo_connector->tv_format_supported[val])
+ return 0;
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[val];
+ goto done;
+ } else if (IS_TV_OR_LVDS(psb_intel_sdvo_connector)) {
+ temp_value = val;
+ if (psb_intel_sdvo_connector->left == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->right, val);
+ if (psb_intel_sdvo_connector->left_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->right == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->left, val);
+ if (psb_intel_sdvo_connector->right_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->left_margin = temp_value;
+ psb_intel_sdvo_connector->right_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_hscan -
+ psb_intel_sdvo_connector->left_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_H;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->top == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->bottom, val);
+ if (psb_intel_sdvo_connector->top_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ } else if (psb_intel_sdvo_connector->bottom == property) {
+ drm_connector_property_set_value(connector,
+ psb_intel_sdvo_connector->top, val);
+ if (psb_intel_sdvo_connector->bottom_margin == temp_value)
+ return 0;
+
+ psb_intel_sdvo_connector->top_margin = temp_value;
+ psb_intel_sdvo_connector->bottom_margin = temp_value;
+ temp_value = psb_intel_sdvo_connector->max_vscan -
+ psb_intel_sdvo_connector->top_margin;
+ cmd = SDVO_CMD_SET_OVERSCAN_V;
+ goto set_value;
+ }
+ CHECK_PROPERTY(hpos, HPOS)
+ CHECK_PROPERTY(vpos, VPOS)
+ CHECK_PROPERTY(saturation, SATURATION)
+ CHECK_PROPERTY(contrast, CONTRAST)
+ CHECK_PROPERTY(hue, HUE)
+ CHECK_PROPERTY(brightness, BRIGHTNESS)
+ CHECK_PROPERTY(sharpness, SHARPNESS)
+ CHECK_PROPERTY(flicker_filter, FLICKER_FILTER)
+ CHECK_PROPERTY(flicker_filter_2d, FLICKER_FILTER_2D)
+ CHECK_PROPERTY(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE)
+ CHECK_PROPERTY(tv_chroma_filter, TV_CHROMA_FILTER)
+ CHECK_PROPERTY(tv_luma_filter, TV_LUMA_FILTER)
+ CHECK_PROPERTY(dot_crawl, DOT_CRAWL)
+ }
+
+ return -EINVAL; /* unknown property */
+
+set_value:
+ if (!psb_intel_sdvo_set_value(psb_intel_sdvo, cmd, &temp_value, 2))
+ return -EIO;
+
+
+done:
+ if (psb_intel_sdvo->base.base.crtc) {
+ struct drm_crtc *crtc = psb_intel_sdvo->base.base.crtc;
+ drm_crtc_helper_set_mode(crtc, &crtc->mode, crtc->x,
+ crtc->y, crtc->fb);
+ }
+
+ return 0;
+#undef CHECK_PROPERTY
+}
+
+static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
+ .dpms = psb_intel_sdvo_dpms,
+ .mode_fixup = psb_intel_sdvo_mode_fixup,
+ .prepare = psb_intel_encoder_prepare,
+ .mode_set = psb_intel_sdvo_mode_set,
+ .commit = psb_intel_encoder_commit,
+};
+
+static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
+ .dpms = drm_helper_connector_dpms,
+ .detect = psb_intel_sdvo_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .set_property = psb_intel_sdvo_set_property,
+ .destroy = psb_intel_sdvo_destroy,
+};
+
+static const struct drm_connector_helper_funcs psb_intel_sdvo_connector_helper_funcs = {
+ .get_modes = psb_intel_sdvo_get_modes,
+ .mode_valid = psb_intel_sdvo_mode_valid,
+ .best_encoder = psb_intel_best_encoder,
+};
+
+static void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
+{
+ struct psb_intel_sdvo *psb_intel_sdvo = to_psb_intel_sdvo(encoder);
+
+ if (psb_intel_sdvo->sdvo_lvds_fixed_mode != NULL)
+ drm_mode_destroy(encoder->dev,
+ psb_intel_sdvo->sdvo_lvds_fixed_mode);
+
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ psb_intel_encoder_destroy(encoder);
+}
+
+static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
+ .destroy = psb_intel_sdvo_enc_destroy,
+};
+
+static void
+psb_intel_sdvo_guess_ddc_bus(struct psb_intel_sdvo *sdvo)
+{
+ /* FIXME: At the moment, ddc_bus = 2 is the only thing that works.
+ * We need to figure out if this is true for all available poulsbo
+ * hardware, or if we need to fiddle with the guessing code above.
+ * The problem might go away if we can parse sdvo mappings from bios */
+ sdvo->ddc_bus = 2;
+
+#if 0
+ uint16_t mask = 0;
+ unsigned int num_bits;
+
+ /* Make a mask of outputs less than or equal to our own priority in the
+ * list.
+ */
+ switch (sdvo->controlled_output) {
+ case SDVO_OUTPUT_LVDS1:
+ mask |= SDVO_OUTPUT_LVDS1;
+ case SDVO_OUTPUT_LVDS0:
+ mask |= SDVO_OUTPUT_LVDS0;
+ case SDVO_OUTPUT_TMDS1:
+ mask |= SDVO_OUTPUT_TMDS1;
+ case SDVO_OUTPUT_TMDS0:
+ mask |= SDVO_OUTPUT_TMDS0;
+ case SDVO_OUTPUT_RGB1:
+ mask |= SDVO_OUTPUT_RGB1;
+ case SDVO_OUTPUT_RGB0:
+ mask |= SDVO_OUTPUT_RGB0;
+ break;
+ }
+
+ /* Count bits to find what number we are in the priority list. */
+ mask &= sdvo->caps.output_flags;
+ num_bits = hweight16(mask);
+ /* If more than 3 outputs, default to DDC bus 3 for now. */
+ if (num_bits > 3)
+ num_bits = 3;
+
+ /* Corresponds to SDVO_CONTROL_BUS_DDCx */
+ sdvo->ddc_bus = 1 << num_bits;
+#endif
+}
+
+/**
+ * Choose the appropriate DDC bus for control bus switch command for this
+ * SDVO output based on the controlled output.
+ *
+ * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
+ * outputs, then LVDS outputs.
+ */
+static void
+psb_intel_sdvo_select_ddc_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+
+ if (IS_SDVOB(reg))
+ mapping = &(dev_priv->sdvo_mappings[0]);
+ else
+ mapping = &(dev_priv->sdvo_mappings[1]);
+
+ if (mapping->initialized)
+ sdvo->ddc_bus = 1 << ((mapping->ddc_pin & 0xf0) >> 4);
+ else
+ psb_intel_sdvo_guess_ddc_bus(sdvo);
+}
+
+static void
+psb_intel_sdvo_select_i2c_bus(struct drm_psb_private *dev_priv,
+ struct psb_intel_sdvo *sdvo, u32 reg)
+{
+ struct sdvo_device_mapping *mapping;
+ u8 pin, speed;
+
+ if (IS_SDVOB(reg))
+ mapping = &dev_priv->sdvo_mappings[0];
+ else
+ mapping = &dev_priv->sdvo_mappings[1];
+
+ pin = GMBUS_PORT_DPB;
+ speed = GMBUS_RATE_1MHZ >> 8;
+ if (mapping->initialized) {
+ pin = mapping->i2c_pin;
+ speed = mapping->i2c_speed;
+ }
+
+ if (pin < GMBUS_NUM_PORTS) {
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ gma_intel_gmbus_set_speed(sdvo->i2c, speed);
+ gma_intel_gmbus_force_bit(sdvo->i2c, true);
+ } else
+ sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+}
+
+static bool
+psb_intel_sdvo_is_hdmi_connector(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ return psb_intel_sdvo_check_supp_encode(psb_intel_sdvo);
+}
+
+static u8
+psb_intel_sdvo_get_slave_addr(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct sdvo_device_mapping *my_mapping, *other_mapping;
+
+ if (IS_SDVOB(sdvo_reg)) {
+ my_mapping = &dev_priv->sdvo_mappings[0];
+ other_mapping = &dev_priv->sdvo_mappings[1];
+ } else {
+ my_mapping = &dev_priv->sdvo_mappings[1];
+ other_mapping = &dev_priv->sdvo_mappings[0];
+ }
+
+ /* If the BIOS described our SDVO device, take advantage of it. */
+ if (my_mapping->slave_addr)
+ return my_mapping->slave_addr;
+
+ /* If the BIOS only described a different SDVO device, use the
+ * address that it isn't using.
+ */
+ if (other_mapping->slave_addr) {
+ if (other_mapping->slave_addr == 0x70)
+ return 0x72;
+ else
+ return 0x70;
+ }
+
+ /* No SDVO device info is found for another DVO port,
+ * so use mapping assumption we had before BIOS parsing.
+ */
+ if (IS_SDVOB(sdvo_reg))
+ return 0x70;
+ else
+ return 0x72;
+}
+
+static void
+psb_intel_sdvo_connector_init(struct psb_intel_sdvo_connector *connector,
+ struct psb_intel_sdvo *encoder)
+{
+ drm_connector_init(encoder->base.base.dev,
+ &connector->base.base,
+ &psb_intel_sdvo_connector_funcs,
+ connector->base.base.connector_type);
+
+ drm_connector_helper_add(&connector->base.base,
+ &psb_intel_sdvo_connector_helper_funcs);
+
+ connector->base.base.interlace_allowed = 0;
+ connector->base.base.doublescan_allowed = 0;
+ connector->base.base.display_info.subpixel_order = SubPixelHorizontalRGB;
+
+ psb_intel_connector_attach_encoder(&connector->base, &encoder->base);
+ drm_sysfs_connector_add(&connector->base.base);
+}
+
+static void
+psb_intel_sdvo_add_hdmi_properties(struct psb_intel_sdvo_connector *connector)
+{
+ /* FIXME: We don't support HDMI at the moment
+ struct drm_device *dev = connector->base.base.dev;
+
+ intel_attach_force_audio_property(&connector->base.base);
+ if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
+ intel_attach_broadcast_rgb_property(&connector->base.base);
+ */
+}
+
+static bool
+psb_intel_sdvo_dvi_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1;
+ }
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ // connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_DVID;
+
+ if (psb_intel_sdvo_is_hdmi_connector(psb_intel_sdvo, device)) {
+ connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ psb_intel_sdvo->is_hdmi = true;
+ }
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (psb_intel_sdvo->is_hdmi)
+ psb_intel_sdvo_add_hdmi_properties(psb_intel_sdvo_connector);
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_tv_init(struct psb_intel_sdvo *psb_intel_sdvo, int type)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO;
+
+ psb_intel_sdvo->controlled_output |= type;
+ psb_intel_sdvo_connector->output_flag = type;
+
+ psb_intel_sdvo->is_tv = true;
+ psb_intel_sdvo->base.needs_tv_clock = true;
+ psb_intel_sdvo->base.clone_mask = 1 << INTEL_SDVO_TV_CLONE_BIT;
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+
+ if (!psb_intel_sdvo_tv_create_property(psb_intel_sdvo, psb_intel_sdvo_connector, type))
+ goto err;
+
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_analog_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ encoder->encoder_type = DRM_MODE_ENCODER_DAC;
+ connector->connector_type = DRM_MODE_CONNECTOR_VGA;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
+ (1 << INTEL_ANALOG_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector,
+ psb_intel_sdvo);
+ return true;
+}
+
+static bool
+psb_intel_sdvo_lvds_init(struct psb_intel_sdvo *psb_intel_sdvo, int device)
+{
+ struct drm_encoder *encoder = &psb_intel_sdvo->base.base;
+ struct drm_connector *connector;
+ struct psb_intel_connector *intel_connector;
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector;
+
+ psb_intel_sdvo_connector = kzalloc(sizeof(struct psb_intel_sdvo_connector), GFP_KERNEL);
+ if (!psb_intel_sdvo_connector)
+ return false;
+
+ intel_connector = &psb_intel_sdvo_connector->base;
+ connector = &intel_connector->base;
+ encoder->encoder_type = DRM_MODE_ENCODER_LVDS;
+ connector->connector_type = DRM_MODE_CONNECTOR_LVDS;
+
+ if (device == 0) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0;
+ } else if (device == 1) {
+ psb_intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1;
+ psb_intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1;
+ }
+
+ psb_intel_sdvo->base.clone_mask = ((1 << INTEL_ANALOG_CLONE_BIT) |
+ (1 << INTEL_SDVO_LVDS_CLONE_BIT));
+
+ psb_intel_sdvo_connector_init(psb_intel_sdvo_connector, psb_intel_sdvo);
+ if (!psb_intel_sdvo_create_enhance_property(psb_intel_sdvo, psb_intel_sdvo_connector))
+ goto err;
+
+ return true;
+
+err:
+ psb_intel_sdvo_destroy(connector);
+ return false;
+}
+
+static bool
+psb_intel_sdvo_output_setup(struct psb_intel_sdvo *psb_intel_sdvo, uint16_t flags)
+{
+ psb_intel_sdvo->is_tv = false;
+ psb_intel_sdvo->base.needs_tv_clock = false;
+ psb_intel_sdvo->is_lvds = false;
+
+ /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/
+
+ if (flags & SDVO_OUTPUT_TMDS0)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK)
+ if (!psb_intel_sdvo_dvi_init(psb_intel_sdvo, 1))
+ return false;
+
+ /* TV has no XXX1 function block */
+ if (flags & SDVO_OUTPUT_SVID0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_SVID0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_CVBS0)
+ if (!psb_intel_sdvo_tv_init(psb_intel_sdvo, SDVO_OUTPUT_CVBS0))
+ return false;
+
+ if (flags & SDVO_OUTPUT_RGB0)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK)
+ if (!psb_intel_sdvo_analog_init(psb_intel_sdvo, 1))
+ return false;
+
+ if (flags & SDVO_OUTPUT_LVDS0)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 0))
+ return false;
+
+ if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK)
+ if (!psb_intel_sdvo_lvds_init(psb_intel_sdvo, 1))
+ return false;
+
+ if ((flags & SDVO_OUTPUT_MASK) == 0) {
+ unsigned char bytes[2];
+
+ psb_intel_sdvo->controlled_output = 0;
+ memcpy(bytes, &psb_intel_sdvo->caps.output_flags, 2);
+ DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n",
+ SDVO_NAME(psb_intel_sdvo),
+ bytes[0], bytes[1]);
+ return false;
+ }
+ psb_intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+
+ return true;
+}
+
+static bool psb_intel_sdvo_tv_create_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ int type)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct psb_intel_sdvo_tv_format format;
+ uint32_t format_map, i;
+
+ if (!psb_intel_sdvo_set_target_output(psb_intel_sdvo, type))
+ return false;
+
+ BUILD_BUG_ON(sizeof(format) != 6);
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_TV_FORMATS,
+ &format, sizeof(format)))
+ return false;
+
+ memcpy(&format_map, &format, min(sizeof(format_map), sizeof(format)));
+
+ if (format_map == 0)
+ return false;
+
+ psb_intel_sdvo_connector->format_supported_num = 0;
+ for (i = 0 ; i < TV_FORMAT_NUM; i++)
+ if (format_map & (1 << i))
+ psb_intel_sdvo_connector->tv_format_supported[psb_intel_sdvo_connector->format_supported_num++] = i;
+
+
+ psb_intel_sdvo_connector->tv_format =
+ drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "mode", psb_intel_sdvo_connector->format_supported_num);
+ if (!psb_intel_sdvo_connector->tv_format)
+ return false;
+
+ for (i = 0; i < psb_intel_sdvo_connector->format_supported_num; i++)
+ drm_property_add_enum(
+ psb_intel_sdvo_connector->tv_format, i,
+ i, tv_format_names[psb_intel_sdvo_connector->tv_format_supported[i]]);
+
+ psb_intel_sdvo->tv_format_index = psb_intel_sdvo_connector->tv_format_supported[0];
+ drm_connector_attach_property(&psb_intel_sdvo_connector->base.base,
+ psb_intel_sdvo_connector->tv_format, 0);
+ return true;
+
+}
+
+#define ENHANCEMENT(name, NAME) do { \
+ if (enhancements.name) { \
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_MAX_##NAME, &data_value, 4) || \
+ !psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_##NAME, &response, 2)) \
+ return false; \
+ psb_intel_sdvo_connector->max_##name = data_value[0]; \
+ psb_intel_sdvo_connector->cur_##name = response; \
+ psb_intel_sdvo_connector->name = \
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, #name, 2); \
+ if (!psb_intel_sdvo_connector->name) return false; \
+ psb_intel_sdvo_connector->name->values[0] = 0; \
+ psb_intel_sdvo_connector->name->values[1] = data_value[0]; \
+ drm_connector_attach_property(connector, \
+ psb_intel_sdvo_connector->name, \
+ psb_intel_sdvo_connector->cur_##name); \
+ DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
+ data_value[0], data_value[1], response); \
+ } \
+} while(0)
+
+static bool
+psb_intel_sdvo_create_enhance_property_tv(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ /* when horizontal overscan is supported, Add the left/right property */
+ if (enhancements.overscan_h) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_H,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_H,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_hscan = data_value[0];
+ psb_intel_sdvo_connector->left_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->right_margin = psb_intel_sdvo_connector->left_margin;
+ psb_intel_sdvo_connector->left =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "left_margin", 2);
+ if (!psb_intel_sdvo_connector->left)
+ return false;
+
+ psb_intel_sdvo_connector->left->values[0] = 0;
+ psb_intel_sdvo_connector->left->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->left,
+ psb_intel_sdvo_connector->left_margin);
+
+ psb_intel_sdvo_connector->right =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "right_margin", 2);
+ if (!psb_intel_sdvo_connector->right)
+ return false;
+
+ psb_intel_sdvo_connector->right->values[0] = 0;
+ psb_intel_sdvo_connector->right->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->right,
+ psb_intel_sdvo_connector->right_margin);
+ DRM_DEBUG_KMS("h_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ if (enhancements.overscan_v) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_MAX_OVERSCAN_V,
+ &data_value, 4))
+ return false;
+
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_OVERSCAN_V,
+ &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_vscan = data_value[0];
+ psb_intel_sdvo_connector->top_margin = data_value[0] - response;
+ psb_intel_sdvo_connector->bottom_margin = psb_intel_sdvo_connector->top_margin;
+ psb_intel_sdvo_connector->top =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "top_margin", 2);
+ if (!psb_intel_sdvo_connector->top)
+ return false;
+
+ psb_intel_sdvo_connector->top->values[0] = 0;
+ psb_intel_sdvo_connector->top->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->top,
+ psb_intel_sdvo_connector->top_margin);
+
+ psb_intel_sdvo_connector->bottom =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "bottom_margin", 2);
+ if (!psb_intel_sdvo_connector->bottom)
+ return false;
+
+ psb_intel_sdvo_connector->bottom->values[0] = 0;
+ psb_intel_sdvo_connector->bottom->values[1] = data_value[0];
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->bottom,
+ psb_intel_sdvo_connector->bottom_margin);
+ DRM_DEBUG_KMS("v_overscan: max %d, "
+ "default %d, current %d\n",
+ data_value[0], data_value[1], response);
+ }
+
+ ENHANCEMENT(hpos, HPOS);
+ ENHANCEMENT(vpos, VPOS);
+ ENHANCEMENT(saturation, SATURATION);
+ ENHANCEMENT(contrast, CONTRAST);
+ ENHANCEMENT(hue, HUE);
+ ENHANCEMENT(sharpness, SHARPNESS);
+ ENHANCEMENT(brightness, BRIGHTNESS);
+ ENHANCEMENT(flicker_filter, FLICKER_FILTER);
+ ENHANCEMENT(flicker_filter_adaptive, FLICKER_FILTER_ADAPTIVE);
+ ENHANCEMENT(flicker_filter_2d, FLICKER_FILTER_2D);
+ ENHANCEMENT(tv_chroma_filter, TV_CHROMA_FILTER);
+ ENHANCEMENT(tv_luma_filter, TV_LUMA_FILTER);
+
+ if (enhancements.dot_crawl) {
+ if (!psb_intel_sdvo_get_value(psb_intel_sdvo, SDVO_CMD_GET_DOT_CRAWL, &response, 2))
+ return false;
+
+ psb_intel_sdvo_connector->max_dot_crawl = 1;
+ psb_intel_sdvo_connector->cur_dot_crawl = response & 0x1;
+ psb_intel_sdvo_connector->dot_crawl =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE, "dot_crawl", 2);
+ if (!psb_intel_sdvo_connector->dot_crawl)
+ return false;
+
+ psb_intel_sdvo_connector->dot_crawl->values[0] = 0;
+ psb_intel_sdvo_connector->dot_crawl->values[1] = 1;
+ drm_connector_attach_property(connector,
+ psb_intel_sdvo_connector->dot_crawl,
+ psb_intel_sdvo_connector->cur_dot_crawl);
+ DRM_DEBUG_KMS("dot crawl: current %d\n", response);
+ }
+
+ return true;
+}
+
+static bool
+psb_intel_sdvo_create_enhance_property_lvds(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector,
+ struct psb_intel_sdvo_enhancements_reply enhancements)
+{
+ struct drm_device *dev = psb_intel_sdvo->base.base.dev;
+ struct drm_connector *connector = &psb_intel_sdvo_connector->base.base;
+ uint16_t response, data_value[2];
+
+ ENHANCEMENT(brightness, BRIGHTNESS);
+
+ return true;
+}
+#undef ENHANCEMENT
+
+static bool psb_intel_sdvo_create_enhance_property(struct psb_intel_sdvo *psb_intel_sdvo,
+ struct psb_intel_sdvo_connector *psb_intel_sdvo_connector)
+{
+ union {
+ struct psb_intel_sdvo_enhancements_reply reply;
+ uint16_t response;
+ } enhancements;
+
+ BUILD_BUG_ON(sizeof(enhancements) != 2);
+
+ enhancements.response = 0;
+ psb_intel_sdvo_get_value(psb_intel_sdvo,
+ SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS,
+ &enhancements, sizeof(enhancements));
+ if (enhancements.response == 0) {
+ DRM_DEBUG_KMS("No enhancement is supported\n");
+ return true;
+ }
+
+ if (IS_TV(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_tv(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else if(IS_LVDS(psb_intel_sdvo_connector))
+ return psb_intel_sdvo_create_enhance_property_lvds(psb_intel_sdvo, psb_intel_sdvo_connector, enhancements.reply);
+ else
+ return true;
+}
+
+static int psb_intel_sdvo_ddc_proxy_xfer(struct i2c_adapter *adapter,
+ struct i2c_msg *msgs,
+ int num)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+
+ if (!psb_intel_sdvo_set_control_bus_switch(sdvo, sdvo->ddc_bus))
+ return -EIO;
+
+ return sdvo->i2c->algo->master_xfer(sdvo->i2c, msgs, num);
+}
+
+static u32 psb_intel_sdvo_ddc_proxy_func(struct i2c_adapter *adapter)
+{
+ struct psb_intel_sdvo *sdvo = adapter->algo_data;
+ return sdvo->i2c->algo->functionality(sdvo->i2c);
+}
+
+static const struct i2c_algorithm psb_intel_sdvo_ddc_proxy = {
+ .master_xfer = psb_intel_sdvo_ddc_proxy_xfer,
+ .functionality = psb_intel_sdvo_ddc_proxy_func
+};
+
+static bool
+psb_intel_sdvo_init_ddc_proxy(struct psb_intel_sdvo *sdvo,
+ struct drm_device *dev)
+{
+ sdvo->ddc.owner = THIS_MODULE;
+ sdvo->ddc.class = I2C_CLASS_DDC;
+ snprintf(sdvo->ddc.name, I2C_NAME_SIZE, "SDVO DDC proxy");
+ sdvo->ddc.dev.parent = &dev->pdev->dev;
+ sdvo->ddc.algo_data = sdvo;
+ sdvo->ddc.algo = &psb_intel_sdvo_ddc_proxy;
+
+ return i2c_add_adapter(&sdvo->ddc) == 0;
+}
+
+bool psb_intel_sdvo_init(struct drm_device *dev, int sdvo_reg)
+{
+ struct drm_psb_private *dev_priv = dev->dev_private;
+ struct psb_intel_encoder *psb_intel_encoder;
+ struct psb_intel_sdvo *psb_intel_sdvo;
+ int i;
+
+ psb_intel_sdvo = kzalloc(sizeof(struct psb_intel_sdvo), GFP_KERNEL);
+ if (!psb_intel_sdvo)
+ return false;
+
+ psb_intel_sdvo->sdvo_reg = sdvo_reg;
+ psb_intel_sdvo->slave_addr = psb_intel_sdvo_get_slave_addr(dev, sdvo_reg) >> 1;
+ psb_intel_sdvo_select_i2c_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+ if (!psb_intel_sdvo_init_ddc_proxy(psb_intel_sdvo, dev)) {
+ kfree(psb_intel_sdvo);
+ return false;
+ }
+
+ /* encoder type will be decided later */
+ psb_intel_encoder = &psb_intel_sdvo->base;
+ psb_intel_encoder->type = INTEL_OUTPUT_SDVO;
+ drm_encoder_init(dev, &psb_intel_encoder->base, &psb_intel_sdvo_enc_funcs, 0);
+
+ /* Read the regs to test if we can talk to the device */
+ for (i = 0; i < 0x40; i++) {
+ u8 byte;
+
+ if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, i, &byte)) {
+ DRM_DEBUG_KMS("No SDVO device found on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+ }
+
+ if (IS_SDVOB(sdvo_reg))
+ dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS;
+ else
+ dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS;
+
+ drm_encoder_helper_add(&psb_intel_encoder->base, &psb_intel_sdvo_helper_funcs);
+
+ /* In default case sdvo lvds is false */
+ if (!psb_intel_sdvo_get_capabilities(psb_intel_sdvo, &psb_intel_sdvo->caps))
+ goto err;
+
+ if (psb_intel_sdvo_output_setup(psb_intel_sdvo,
+ psb_intel_sdvo->caps.output_flags) != true) {
+ DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n",
+ IS_SDVOB(sdvo_reg) ? 'B' : 'C');
+ goto err;
+ }
+
+ psb_intel_sdvo_select_ddc_bus(dev_priv, psb_intel_sdvo, sdvo_reg);
+
+ /* Set the input timing to the screen. Assume always input 0. */
+ if (!psb_intel_sdvo_set_target_input(psb_intel_sdvo))
+ goto err;
+
+ if (!psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_sdvo,
+ &psb_intel_sdvo->pixel_clock_min,
+ &psb_intel_sdvo->pixel_clock_max))
+ goto err;
+
+ DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ "clock range %dMHz - %dMHz, "
+ "input 1: %c, input 2: %c, "
+ "output 1: %c, output 2: %c\n",
+ SDVO_NAME(psb_intel_sdvo),
+ psb_intel_sdvo->caps.vendor_id, psb_intel_sdvo->caps.device_id,
+ psb_intel_sdvo->caps.device_rev_id,
+ psb_intel_sdvo->pixel_clock_min / 1000,
+ psb_intel_sdvo->pixel_clock_max / 1000,
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
+ (psb_intel_sdvo->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
+ /* check currently supported outputs */
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
+ psb_intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
+ return true;
+
+err:
+ drm_encoder_cleanup(&psb_intel_encoder->base);
+ i2c_del_adapter(&psb_intel_sdvo->ddc);
+ kfree(psb_intel_sdvo);
+
+ return false;
+}
diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
new file mode 100644
index 0000000..600e797
--- /dev/null
+++ b/drivers/gpu/drm/gma500/psb_intel_sdvo_regs.h
@@ -0,0 +1,723 @@
+/*
+ * Copyright ? 2006-2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ */
+
+/**
+ * @file SDVO command definitions and structures.
+ */
+
+#define SDVO_OUTPUT_FIRST (0)
+#define SDVO_OUTPUT_TMDS0 (1 << 0)
+#define SDVO_OUTPUT_RGB0 (1 << 1)
+#define SDVO_OUTPUT_CVBS0 (1 << 2)
+#define SDVO_OUTPUT_SVID0 (1 << 3)
+#define SDVO_OUTPUT_YPRPB0 (1 << 4)
+#define SDVO_OUTPUT_SCART0 (1 << 5)
+#define SDVO_OUTPUT_LVDS0 (1 << 6)
+#define SDVO_OUTPUT_TMDS1 (1 << 8)
+#define SDVO_OUTPUT_RGB1 (1 << 9)
+#define SDVO_OUTPUT_CVBS1 (1 << 10)
+#define SDVO_OUTPUT_SVID1 (1 << 11)
+#define SDVO_OUTPUT_YPRPB1 (1 << 12)
+#define SDVO_OUTPUT_SCART1 (1 << 13)
+#define SDVO_OUTPUT_LVDS1 (1 << 14)
+#define SDVO_OUTPUT_LAST (14)
+
+struct psb_intel_sdvo_caps {
+ u8 vendor_id;
+ u8 device_id;
+ u8 device_rev_id;
+ u8 sdvo_version_major;
+ u8 sdvo_version_minor;
+ unsigned int sdvo_inputs_mask:2;
+ unsigned int smooth_scaling:1;
+ unsigned int sharp_scaling:1;
+ unsigned int up_scaling:1;
+ unsigned int down_scaling:1;
+ unsigned int stall_support:1;
+ unsigned int pad:1;
+ u16 output_flags;
+} __attribute__((packed));
+
+/** This matches the EDID DTD structure, more or less */
+struct psb_intel_sdvo_dtd {
+ struct {
+ u16 clock; /**< pixel clock, in 10kHz units */
+ u8 h_active; /**< lower 8 bits (pixels) */
+ u8 h_blank; /**< lower 8 bits (pixels) */
+ u8 h_high; /**< upper 4 bits each h_active, h_blank */
+ u8 v_active; /**< lower 8 bits (lines) */
+ u8 v_blank; /**< lower 8 bits (lines) */
+ u8 v_high; /**< upper 4 bits each v_active, v_blank */
+ } part1;
+
+ struct {
+ u8 h_sync_off; /**< lower 8 bits, from hblank start */
+ u8 h_sync_width; /**< lower 8 bits (pixels) */
+ /** lower 4 bits each vsync offset, vsync width */
+ u8 v_sync_off_width;
+ /**
+ * 2 high bits of hsync offset, 2 high bits of hsync width,
+ * bits 4-5 of vsync offset, and 2 high bits of vsync width.
+ */
+ u8 sync_off_width_high;
+ u8 dtd_flags;
+ u8 sdvo_flags;
+ /** bits 6-7 of vsync offset at bits 6-7 */
+ u8 v_sync_off_high;
+ u8 reserved;
+ } part2;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_pixel_clock_range {
+ u16 min; /**< pixel clock, in 10kHz units */
+ u16 max; /**< pixel clock, in 10kHz units */
+} __attribute__((packed));
+
+struct psb_intel_sdvo_preferred_input_timing_args {
+ u16 clock;
+ u16 width;
+ u16 height;
+ u8 interlace:1;
+ u8 scaled:1;
+ u8 pad:6;
+} __attribute__((packed));
+
+/* I2C registers for SDVO */
+#define SDVO_I2C_ARG_0 0x07
+#define SDVO_I2C_ARG_1 0x06
+#define SDVO_I2C_ARG_2 0x05
+#define SDVO_I2C_ARG_3 0x04
+#define SDVO_I2C_ARG_4 0x03
+#define SDVO_I2C_ARG_5 0x02
+#define SDVO_I2C_ARG_6 0x01
+#define SDVO_I2C_ARG_7 0x00
+#define SDVO_I2C_OPCODE 0x08
+#define SDVO_I2C_CMD_STATUS 0x09
+#define SDVO_I2C_RETURN_0 0x0a
+#define SDVO_I2C_RETURN_1 0x0b
+#define SDVO_I2C_RETURN_2 0x0c
+#define SDVO_I2C_RETURN_3 0x0d
+#define SDVO_I2C_RETURN_4 0x0e
+#define SDVO_I2C_RETURN_5 0x0f
+#define SDVO_I2C_RETURN_6 0x10
+#define SDVO_I2C_RETURN_7 0x11
+#define SDVO_I2C_VENDOR_BEGIN 0x20
+
+/* Status results */
+#define SDVO_CMD_STATUS_POWER_ON 0x0
+#define SDVO_CMD_STATUS_SUCCESS 0x1
+#define SDVO_CMD_STATUS_NOTSUPP 0x2
+#define SDVO_CMD_STATUS_INVALID_ARG 0x3
+#define SDVO_CMD_STATUS_PENDING 0x4
+#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
+#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
+
+/* SDVO commands, argument/result registers */
+
+#define SDVO_CMD_RESET 0x01
+
+/** Returns a struct intel_sdvo_caps */
+#define SDVO_CMD_GET_DEVICE_CAPS 0x02
+
+#define SDVO_CMD_GET_FIRMWARE_REV 0x86
+# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
+# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
+# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
+
+/**
+ * Reports which inputs are trained (managed to sync).
+ *
+ * Devices must have trained within 2 vsyncs of a mode change.
+ */
+#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
+struct psb_intel_sdvo_get_trained_inputs_response {
+ unsigned int input0_trained:1;
+ unsigned int input1_trained:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/** Returns a struct intel_sdvo_output_flags of active outputs. */
+#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
+
+/**
+ * Sets the current set of active outputs.
+ *
+ * Takes a struct intel_sdvo_output_flags. Must be preceded by a SET_IN_OUT_MAP
+ * on multi-output devices.
+ */
+#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
+
+/**
+ * Returns the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Returns two struct intel_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_GET_IN_OUT_MAP 0x06
+struct psb_intel_sdvo_in_out_map {
+ u16 in0, in1;
+};
+
+/**
+ * Sets the current mapping of SDVO inputs to outputs on the device.
+ *
+ * Takes two struct i380_sdvo_output_flags structures.
+ */
+#define SDVO_CMD_SET_IN_OUT_MAP 0x07
+
+/**
+ * Returns a struct intel_sdvo_output_flags of attached displays.
+ */
+#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
+
+/**
+ * Returns a struct intel_sdvo_ouptut_flags of displays supporting hot plugging.
+ */
+#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
+
+/**
+ * Takes a struct intel_sdvo_output_flags.
+ */
+#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
+
+/**
+ * Returns a struct intel_sdvo_output_flags of displays with hot plug
+ * interrupts enabled.
+ */
+#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
+
+#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
+struct intel_sdvo_get_interrupt_event_source_response {
+ u16 interrupt_status;
+ unsigned int ambient_light_interrupt:1;
+ unsigned int hdmi_audio_encrypt_change:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+/**
+ * Selects which input is affected by future input commands.
+ *
+ * Commands affected include SET_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
+ * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
+ */
+#define SDVO_CMD_SET_TARGET_INPUT 0x10
+struct psb_intel_sdvo_set_target_input_args {
+ unsigned int target_1:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/**
+ * Takes a struct intel_sdvo_output_flags of which outputs are targeted by
+ * future output commands.
+ *
+ * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
+ * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
+ */
+#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
+
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
+#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
+#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
+#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
+#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
+/* Part 1 */
+# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
+# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
+# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
+# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
+# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
+# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
+/* Part 2 */
+# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
+# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
+# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
+# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
+# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
+# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
+# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
+# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
+# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
+# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
+# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
+# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
+# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
+# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
+# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
+
+/**
+ * Generates a DTD based on the given width, height, and flags.
+ *
+ * This will be supported by any device supporting scaling or interlaced
+ * modes.
+ */
+#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
+# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
+# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
+# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
+# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
+
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
+#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
+
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
+/** Returns a struct intel_sdvo_pixel_clock_range */
+#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
+
+/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
+#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
+
+/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
+/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
+#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
+# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
+# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
+# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
+/** 6 bytes of bit flags for TV formats shared by all TV format functions */
+struct psb_intel_sdvo_tv_format {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:3;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_TV_FORMAT 0x28
+
+#define SDVO_CMD_SET_TV_FORMAT 0x29
+
+/** Returns the resolutiosn that can be used with the given TV format */
+#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
+struct psb_intel_sdvo_sdtv_resolution_request {
+ unsigned int ntsc_m:1;
+ unsigned int ntsc_j:1;
+ unsigned int ntsc_443:1;
+ unsigned int pal_b:1;
+ unsigned int pal_d:1;
+ unsigned int pal_g:1;
+ unsigned int pal_h:1;
+ unsigned int pal_i:1;
+
+ unsigned int pal_m:1;
+ unsigned int pal_n:1;
+ unsigned int pal_nc:1;
+ unsigned int pal_60:1;
+ unsigned int secam_b:1;
+ unsigned int secam_d:1;
+ unsigned int secam_g:1;
+ unsigned int secam_k:1;
+
+ unsigned int secam_k1:1;
+ unsigned int secam_l:1;
+ unsigned int secam_60:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_sdtv_resolution_reply {
+ unsigned int res_320x200:1;
+ unsigned int res_320x240:1;
+ unsigned int res_400x300:1;
+ unsigned int res_640x350:1;
+ unsigned int res_640x400:1;
+ unsigned int res_640x480:1;
+ unsigned int res_704x480:1;
+ unsigned int res_704x576:1;
+
+ unsigned int res_720x350:1;
+ unsigned int res_720x400:1;
+ unsigned int res_720x480:1;
+ unsigned int res_720x540:1;
+ unsigned int res_720x576:1;
+ unsigned int res_768x576:1;
+ unsigned int res_800x600:1;
+ unsigned int res_832x624:1;
+
+ unsigned int res_920x766:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x1024:1;
+ unsigned int pad:5;
+} __attribute__((packed));
+
+/* Get supported resolution with squire pixel aspect ratio that can be
+ scaled for the requested HDTV format */
+#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
+
+struct psb_intel_sdvo_hdtv_resolution_request {
+ unsigned int hdtv_std_smpte_240m_1080i_59:1;
+ unsigned int hdtv_std_smpte_240m_1080i_60:1;
+ unsigned int hdtv_std_smpte_260m_1080i_59:1;
+ unsigned int hdtv_std_smpte_260m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080i_50:1;
+ unsigned int hdtv_std_smpte_274m_1080i_59:1;
+ unsigned int hdtv_std_smpte_274m_1080i_60:1;
+ unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+ unsigned int hdtv_std_smpte_274m_1080p_24:1;
+ unsigned int hdtv_std_smpte_274m_1080p_25:1;
+ unsigned int hdtv_std_smpte_274m_1080p_29:1;
+ unsigned int hdtv_std_smpte_274m_1080p_30:1;
+ unsigned int hdtv_std_smpte_274m_1080p_50:1;
+ unsigned int hdtv_std_smpte_274m_1080p_59:1;
+ unsigned int hdtv_std_smpte_274m_1080p_60:1;
+ unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+ unsigned int hdtv_std_smpte_295m_1080p_50:1;
+ unsigned int hdtv_std_smpte_296m_720p_59:1;
+ unsigned int hdtv_std_smpte_296m_720p_60:1;
+ unsigned int hdtv_std_smpte_296m_720p_50:1;
+ unsigned int hdtv_std_smpte_293m_480p_59:1;
+ unsigned int hdtv_std_smpte_170m_480i_59:1;
+ unsigned int hdtv_std_iturbt601_576i_50:1;
+ unsigned int hdtv_std_iturbt601_576p_50:1;
+
+ unsigned int hdtv_std_eia_7702a_480i_60:1;
+ unsigned int hdtv_std_eia_7702a_480p_60:1;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+struct psb_intel_sdvo_hdtv_resolution_reply {
+ unsigned int res_640x480:1;
+ unsigned int res_800x600:1;
+ unsigned int res_1024x768:1;
+ unsigned int res_1280x960:1;
+ unsigned int res_1400x1050:1;
+ unsigned int res_1600x1200:1;
+ unsigned int res_1920x1440:1;
+ unsigned int res_2048x1536:1;
+
+ unsigned int res_2560x1920:1;
+ unsigned int res_3200x2400:1;
+ unsigned int res_3840x2880:1;
+ unsigned int pad1:5;
+
+ unsigned int res_848x480:1;
+ unsigned int res_1064x600:1;
+ unsigned int res_1280x720:1;
+ unsigned int res_1360x768:1;
+ unsigned int res_1704x960:1;
+ unsigned int res_1864x1050:1;
+ unsigned int res_1920x1080:1;
+ unsigned int res_2128x1200:1;
+
+ unsigned int res_2560x1400:1;
+ unsigned int res_2728x1536:1;
+ unsigned int res_3408x1920:1;
+ unsigned int res_4264x2400:1;
+ unsigned int res_5120x2880:1;
+ unsigned int pad2:3;
+
+ unsigned int res_768x480:1;
+ unsigned int res_960x600:1;
+ unsigned int res_1152x720:1;
+ unsigned int res_1124x768:1;
+ unsigned int res_1536x960:1;
+ unsigned int res_1680x1050:1;
+ unsigned int res_1728x1080:1;
+ unsigned int res_1920x1200:1;
+
+ unsigned int res_2304x1440:1;
+ unsigned int res_2456x1536:1;
+ unsigned int res_3072x1920:1;
+ unsigned int res_3840x2400:1;
+ unsigned int res_4608x2880:1;
+ unsigned int pad3:3;
+
+ unsigned int res_1280x1024:1;
+ unsigned int pad4:7;
+
+ unsigned int res_1280x768:1;
+ unsigned int pad5:7;
+} __attribute__((packed));
+
+/* Get supported power state returns info for encoder and monitor, rely on
+ last SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
+/* Get power state returns info for encoder and monitor, rely on last
+ SetTargetInput and SetTargetOutput calls */
+#define SDVO_CMD_GET_POWER_STATE 0x2b
+#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
+#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
+# define SDVO_ENCODER_STATE_ON (1 << 0)
+# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
+# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
+# define SDVO_ENCODER_STATE_OFF (1 << 3)
+# define SDVO_MONITOR_STATE_ON (1 << 4)
+# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
+# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
+# define SDVO_MONITOR_STATE_OFF (1 << 7)
+
+#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
+#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
+#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
+/**
+ * The panel power sequencing parameters are in units of milliseconds.
+ * The high fields are bits 8:9 of the 10-bit values.
+ */
+struct psb_sdvo_panel_power_sequencing {
+ u8 t0;
+ u8 t1;
+ u8 t2;
+ u8 t3;
+ u8 t4;
+
+ unsigned int t0_high:2;
+ unsigned int t1_high:2;
+ unsigned int t2_high:2;
+ unsigned int t3_high:2;
+
+ unsigned int t4_high:2;
+ unsigned int pad:6;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
+struct sdvo_max_backlight_reply {
+ u8 max_value;
+ u8 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
+#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
+
+#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
+struct sdvo_get_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ u16 value;
+} __attribute__((packed));
+#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
+struct sdvo_set_ambient_light_reply {
+ u16 trip_low;
+ u16 trip_high;
+ unsigned int enable:1;
+ unsigned int pad:7;
+} __attribute__((packed));
+
+/* Set display power state */
+#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
+# define SDVO_DISPLAY_STATE_ON (1 << 0)
+# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
+# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
+# define SDVO_DISPLAY_STATE_OFF (1 << 3)
+
+#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
+struct psb_intel_sdvo_enhancements_reply {
+ unsigned int flicker_filter:1;
+ unsigned int flicker_filter_adaptive:1;
+ unsigned int flicker_filter_2d:1;
+ unsigned int saturation:1;
+ unsigned int hue:1;
+ unsigned int brightness:1;
+ unsigned int contrast:1;
+ unsigned int overscan_h:1;
+
+ unsigned int overscan_v:1;
+ unsigned int hpos:1;
+ unsigned int vpos:1;
+ unsigned int sharpness:1;
+ unsigned int dot_crawl:1;
+ unsigned int dither:1;
+ unsigned int tv_chroma_filter:1;
+ unsigned int tv_luma_filter:1;
+} __attribute__((packed));
+
+/* Picture enhancement limits below are dependent on the current TV format,
+ * and thus need to be queried and set after it.
+ */
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER 0x4d
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE 0x7b
+#define SDVO_CMD_GET_MAX_FLICKER_FILTER_2D 0x52
+#define SDVO_CMD_GET_MAX_SATURATION 0x55
+#define SDVO_CMD_GET_MAX_HUE 0x58
+#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
+#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
+#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
+#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
+#define SDVO_CMD_GET_MAX_HPOS 0x67
+#define SDVO_CMD_GET_MAX_VPOS 0x6a
+#define SDVO_CMD_GET_MAX_SHARPNESS 0x6d
+#define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER 0x74
+#define SDVO_CMD_GET_MAX_TV_LUMA_FILTER 0x77
+struct psb_intel_sdvo_enhancement_limits_reply {
+ u16 max_value;
+ u16 default_value;
+} __attribute__((packed));
+
+#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
+#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
+# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
+# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
+# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
+# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
+# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
+# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
+
+#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
+#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
+#define SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE 0x50
+#define SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE 0x51
+#define SDVO_CMD_GET_FLICKER_FILTER_2D 0x53
+#define SDVO_CMD_SET_FLICKER_FILTER_2D 0x54
+#define SDVO_CMD_GET_SATURATION 0x56
+#define SDVO_CMD_SET_SATURATION 0x57
+#define SDVO_CMD_GET_HUE 0x59
+#define SDVO_CMD_SET_HUE 0x5a
+#define SDVO_CMD_GET_BRIGHTNESS 0x5c
+#define SDVO_CMD_SET_BRIGHTNESS 0x5d
+#define SDVO_CMD_GET_CONTRAST 0x5f
+#define SDVO_CMD_SET_CONTRAST 0x60
+#define SDVO_CMD_GET_OVERSCAN_H 0x62
+#define SDVO_CMD_SET_OVERSCAN_H 0x63
+#define SDVO_CMD_GET_OVERSCAN_V 0x65
+#define SDVO_CMD_SET_OVERSCAN_V 0x66
+#define SDVO_CMD_GET_HPOS 0x68
+#define SDVO_CMD_SET_HPOS 0x69
+#define SDVO_CMD_GET_VPOS 0x6b
+#define SDVO_CMD_SET_VPOS 0x6c
+#define SDVO_CMD_GET_SHARPNESS 0x6e
+#define SDVO_CMD_SET_SHARPNESS 0x6f
+#define SDVO_CMD_GET_TV_CHROMA_FILTER 0x75
+#define SDVO_CMD_SET_TV_CHROMA_FILTER 0x76
+#define SDVO_CMD_GET_TV_LUMA_FILTER 0x78
+#define SDVO_CMD_SET_TV_LUMA_FILTER 0x79
+struct psb_intel_sdvo_enhancements_arg {
+ u16 value;
+}__attribute__((packed));
+
+#define SDVO_CMD_GET_DOT_CRAWL 0x70
+#define SDVO_CMD_SET_DOT_CRAWL 0x71
+# define SDVO_DOT_CRAWL_ON (1 << 0)
+# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_GET_DITHER 0x72
+#define SDVO_CMD_SET_DITHER 0x73
+# define SDVO_DITHER_ON (1 << 0)
+# define SDVO_DITHER_DEFAULT_ON (1 << 1)
+
+#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
+# define SDVO_CONTROL_BUS_PROM (1 << 0)
+# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
+# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
+# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
+
+/* HDMI op codes */
+#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
+#define SDVO_CMD_GET_ENCODE 0x9e
+#define SDVO_CMD_SET_ENCODE 0x9f
+ #define SDVO_ENCODE_DVI 0x0
+ #define SDVO_ENCODE_HDMI 0x1
+#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
+#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
+#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
+#define SDVO_CMD_SET_COLORIMETRY 0x8e
+ #define SDVO_COLORIMETRY_RGB256 0x0
+ #define SDVO_COLORIMETRY_RGB220 0x1
+ #define SDVO_COLORIMETRY_YCrCb422 0x3
+ #define SDVO_COLORIMETRY_YCrCb444 0x4
+#define SDVO_CMD_GET_COLORIMETRY 0x8f
+#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
+#define SDVO_CMD_SET_AUDIO_STAT 0x91
+#define SDVO_CMD_GET_AUDIO_STAT 0x92
+#define SDVO_CMD_SET_HBUF_INDEX 0x93
+#define SDVO_CMD_GET_HBUF_INDEX 0x94
+#define SDVO_CMD_GET_HBUF_INFO 0x95
+#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
+#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
+#define SDVO_CMD_SET_HBUF_DATA 0x98
+#define SDVO_CMD_GET_HBUF_DATA 0x99
+#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
+#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
+ #define SDVO_HBUF_TX_DISABLED (0 << 6)
+ #define SDVO_HBUF_TX_ONCE (2 << 6)
+ #define SDVO_HBUF_TX_VSYNC (3 << 6)
+#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
+#define SDVO_NEED_TO_STALL (1 << 7)
+
+struct psb_intel_sdvo_encode {
+ u8 dvi_rev;
+ u8 hdmi_rev;
+} __attribute__ ((packed));
diff --git a/drivers/staging/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 36dd630..7be802b 100644
--- a/drivers/staging/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -27,7 +27,6 @@
#include "psb_reg.h"
#include "psb_intel_reg.h"
#include "power.h"
-#include "mdfld_output.h"
/*
* inline functions
@@ -454,12 +453,6 @@ int psb_enable_vblank(struct drm_device *dev, int pipe)
uint32_t reg_val = 0;
uint32_t pipeconf_reg = mid_pipeconf(pipe);
-#if defined(CONFIG_DRM_PSB_MFLD)
- /* Medfield is different - we should perhaps extract out vblank
- and blacklight etc ops */
- if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
- return mdfld_enable_te(dev, pipe);
-#endif
if (gma_power_begin(dev, false)) {
reg_val = REG_READ(pipeconf_reg);
gma_power_end(dev);
@@ -492,10 +485,6 @@ void psb_disable_vblank(struct drm_device *dev, int pipe)
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
-#if defined(CONFIG_DRM_PSB_MFLD)
- if (IS_MFLD(dev) && !mdfld_panel_dpi(dev))
- mdfld_disable_te(dev, pipe);
-#endif
spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
if (pipe == 0)
@@ -510,58 +499,6 @@ void psb_disable_vblank(struct drm_device *dev, int pipe)
spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
}
-/**
- * mdfld_enable_te - enable TE events
- * @dev: our DRM device
- * @pipe: which pipe to work on
- *
- * Enable TE events on a Medfield display pipe. Medfield specific.
- */
-int mdfld_enable_te(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long flags;
- uint32_t reg_val = 0;
- uint32_t pipeconf_reg = mid_pipeconf(pipe);
-
- if (gma_power_begin(dev, false)) {
- reg_val = REG_READ(pipeconf_reg);
- gma_power_end(dev);
- }
-
- if (!(reg_val & PIPEACONF_ENABLE))
- return -EINVAL;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
-
- mid_enable_pipe_event(dev_priv, pipe);
- psb_enable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
-
- return 0;
-}
-
-/**
- * mdfld_disable_te - disable TE events
- * @dev: our DRM device
- * @pipe: which pipe to work on
- *
- * Disable TE events on a Medfield display pipe. Medfield specific.
- */
-void mdfld_disable_te(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->irqmask_lock, flags);
-
- mid_disable_pipe_event(dev_priv, pipe);
- psb_disable_pipestat(dev_priv, pipe, PIPE_TE_ENABLE);
-
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, flags);
-}
-
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
diff --git a/drivers/staging/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 216fda3..216fda3 100644
--- a/drivers/staging/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
diff --git a/drivers/staging/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c
index b867aabe..b867aabe 100644
--- a/drivers/staging/gma500/psb_lid.c
+++ b/drivers/gpu/drm/gma500/psb_lid.c
diff --git a/drivers/staging/gma500/psb_reg.h b/drivers/gpu/drm/gma500/psb_reg.h
index b81c7c1..b81c7c1 100644
--- a/drivers/staging/gma500/psb_reg.h
+++ b/drivers/gpu/drm/gma500/psb_reg.h
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 8f371e8..7f4b4e1 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -222,8 +222,6 @@ static int i810_dma_cleanup(struct drm_device *dev)
pci_free_consistent(dev->pdev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
- /* Need to rewrite hardware status page */
- I810_WRITE(0x02080, 0x1ffff000);
}
kfree(dev->dev_private);
dev->dev_private = NULL;
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c
index d4266bd..ec12f7d 100644
--- a/drivers/gpu/drm/i810/i810_drv.c
+++ b/drivers/gpu/drm/i810/i810_drv.c
@@ -43,6 +43,17 @@ static struct pci_device_id pciidlist[] = {
i810_PCI_IDS
};
+static const struct file_operations i810_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR |
@@ -55,17 +66,7 @@ static struct drm_driver driver = {
.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
.dma_quiescent = i810_driver_dma_quiescent,
.ioctls = i810_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &i810_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 0ae6a7c..808b255 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -28,6 +28,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
intel_dvo.o \
intel_ringbuffer.o \
intel_overlay.o \
+ intel_sprite.o \
intel_opregion.o \
dvo_ch7xxx.o \
dvo_ch7017.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 004b048..deaa657 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -121,11 +121,11 @@ static const char *cache_level_str(int type)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
+ seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
- obj->base.size,
+ obj->base.size / 1024,
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
@@ -653,7 +653,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
seq_printf(m, " Size : %08x\n", ring->size);
seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
- if (IS_GEN6(dev)) {
+ if (IS_GEN6(dev) || IS_GEN7(dev)) {
seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
}
@@ -1001,7 +1001,7 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
return 0;
}
-static int i915_drpc_info(struct seq_file *m, void *unused)
+static int ironlake_drpc_info(struct seq_file *m)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
@@ -1068,6 +1068,95 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
return 0;
}
+static int gen6_drpc_info(struct seq_file *m)
+{
+
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rpmodectl1, gt_core_status, rcctl1;
+ unsigned forcewake_count;
+ int count=0, ret;
+
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ spin_lock_irq(&dev_priv->gt_lock);
+ forcewake_count = dev_priv->forcewake_count;
+ spin_unlock_irq(&dev_priv->gt_lock);
+
+ if (forcewake_count) {
+ seq_printf(m, "RC information inaccurate because somebody "
+ "holds a forcewake reference \n");
+ } else {
+ /* NB: we cannot use forcewake, else we read the wrong values */
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+ seq_printf(m, "RC information accurate: %s\n", yesno(count < 51));
+ }
+
+ gt_core_status = readl(dev_priv->regs + GEN6_GT_CORE_STATUS);
+ trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4);
+
+ rpmodectl1 = I915_READ(GEN6_RP_CONTROL);
+ rcctl1 = I915_READ(GEN6_RC_CONTROL);
+ mutex_unlock(&dev->struct_mutex);
+
+ seq_printf(m, "Video Turbo Mode: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_MEDIA_TURBO));
+ seq_printf(m, "HW control enabled: %s\n",
+ yesno(rpmodectl1 & GEN6_RP_ENABLE));
+ seq_printf(m, "SW control enabled: %s\n",
+ yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
+ GEN6_RP_MEDIA_SW_MODE));
+ seq_printf(m, "RC1e Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
+ seq_printf(m, "RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
+ seq_printf(m, "Deep RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
+ seq_printf(m, "Deepest RC6 Enabled: %s\n",
+ yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
+ seq_printf(m, "Current RC state: ");
+ switch (gt_core_status & GEN6_RCn_MASK) {
+ case GEN6_RC0:
+ if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
+ seq_printf(m, "Core Power Down\n");
+ else
+ seq_printf(m, "on\n");
+ break;
+ case GEN6_RC3:
+ seq_printf(m, "RC3\n");
+ break;
+ case GEN6_RC6:
+ seq_printf(m, "RC6\n");
+ break;
+ case GEN6_RC7:
+ seq_printf(m, "RC7\n");
+ break;
+ default:
+ seq_printf(m, "Unknown\n");
+ break;
+ }
+
+ seq_printf(m, "Core Power Down: %s\n",
+ yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
+ return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ return gen6_drpc_info(m);
+ else
+ return ironlake_drpc_info(m);
+}
+
static int i915_fbc_status(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1314,9 +1403,13 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned forcewake_count;
+
+ spin_lock_irq(&dev_priv->gt_lock);
+ forcewake_count = dev_priv->forcewake_count;
+ spin_unlock_irq(&dev_priv->gt_lock);
- seq_printf(m, "forcewake count = %d\n",
- atomic_read(&dev_priv->forcewake_count));
+ seq_printf(m, "forcewake count = %u\n", forcewake_count);
return 0;
}
@@ -1581,7 +1674,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- if (!IS_GEN6(dev))
+ if (INTEL_INFO(dev)->gen < 6)
return 0;
ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1598,7 +1691,7 @@ int i915_forcewake_release(struct inode *inode, struct file *file)
struct drm_device *dev = inode->i_private;
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_GEN6(dev))
+ if (INTEL_INFO(dev)->gen < 6)
return 0;
/*
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index a9ae374..ddfe3d9 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -781,6 +781,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_RELAXED_DELTA:
value = 1;
break;
+ case I915_PARAM_HAS_GEN7_SOL_RESET:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -2042,6 +2045,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
+ spin_lock_init(&dev_priv->gt_lock);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->rps_lock);
@@ -2305,6 +2309,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, intel_sprite_get_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index a1103fc..308f819 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -368,11 +368,12 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
*/
void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
- WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ unsigned long irqflags;
- /* Forcewake is atomic in case we get in here without the lock */
- if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (dev_priv->forcewake_count++ == 0)
dev_priv->display.force_wake_get(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
@@ -392,10 +393,12 @@ void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
*/
void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
- WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+ unsigned long irqflags;
- if (atomic_dec_and_test(&dev_priv->forcewake_count))
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+ if (--dev_priv->forcewake_count == 0)
dev_priv->display.force_wake_put(dev_priv);
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -597,9 +600,36 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
static int gen6_do_reset(struct drm_device *dev, u8 flags)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+ unsigned long irqflags;
- I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
- return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+ /* Hold gt_lock across reset to prevent any register access
+ * with forcewake not set correctly
+ */
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+
+ /* Reset the chip */
+
+ /* GEN6_GDRST is not in the gt power well, no need to check
+ * for fifo space for the write or forcewake the chip for
+ * the read
+ */
+ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+
+ /* Spin waiting for the device to ack the reset request */
+ ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+ /* If reset with a user forcewake, try to restore, otherwise turn it off */
+ if (dev_priv->forcewake_count)
+ dev_priv->display.force_wake_get(dev_priv);
+ else
+ dev_priv->display.force_wake_put(dev_priv);
+
+ /* Restore fifo count */
+ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+ return ret;
}
/**
@@ -643,9 +673,6 @@ int i915_reset(struct drm_device *dev, u8 flags)
case 7:
case 6:
ret = gen6_do_reset(dev, flags);
- /* If reset with a user forcewake, try to restore */
- if (atomic_read(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_get(dev_priv);
break;
case 5:
ret = ironlake_do_reset(dev, flags);
@@ -810,6 +837,21 @@ static struct vm_operations_struct i915_gem_vm_ops = {
.close = drm_gem_vm_close,
};
+static const struct file_operations i915_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = i915_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
/* Don't use MTRRs here; the Xserver or userspace app should
* deal with them for Intel hardware.
@@ -843,21 +885,7 @@ static struct drm_driver driver = {
.dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = i915_gem_dumb_destroy,
.ioctls = i915_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = i915_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &i915_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -922,20 +950,18 @@ MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
-/* We give fast paths for the really cool registers */
-#define NEEDS_FORCE_WAKE(dev_priv, reg) \
- (((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE) && \
- ((reg) != ECOBUS))
-
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
u##x val = 0; \
if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
- gen6_gt_force_wake_get(dev_priv); \
+ unsigned long irqflags; \
+ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
+ if (dev_priv->forcewake_count == 0) \
+ dev_priv->display.force_wake_get(dev_priv); \
val = read##y(dev_priv->regs + reg); \
- gen6_gt_force_wake_put(dev_priv); \
+ if (dev_priv->forcewake_count == 0) \
+ dev_priv->display.force_wake_put(dev_priv); \
+ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 554bef7..9689ca3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -207,6 +207,8 @@ struct drm_i915_display_funcs {
int (*get_display_clock_speed)(struct drm_device *dev);
int (*get_fifo_size)(struct drm_device *dev, int plane);
void (*update_wm)(struct drm_device *dev);
+ void (*update_sprite_wm)(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size);
int (*crtc_mode_set)(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -286,7 +288,13 @@ typedef struct drm_i915_private {
int relative_constants_mode;
void __iomem *regs;
- u32 gt_fifo_count;
+ /** gt_fifo_count and the subsequent register write are synchronized
+ * with dev->struct_mutex. */
+ unsigned gt_fifo_count;
+ /** forcewake_count is protected by gt_lock */
+ unsigned forcewake_count;
+ /** gt_lock is also taken in irq contexts. */
+ struct spinlock gt_lock;
struct intel_gmbus {
struct i2c_adapter adapter;
@@ -337,6 +345,8 @@ typedef struct drm_i915_private {
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
+ uint32_t last_acthd_bsd;
+ uint32_t last_acthd_blt;
uint32_t last_instdone;
uint32_t last_instdone1;
@@ -350,6 +360,7 @@ typedef struct drm_i915_private {
/* overlay */
struct intel_overlay *overlay;
+ bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
@@ -736,8 +747,6 @@ typedef struct drm_i915_private {
struct drm_property *broadcast_rgb_property;
struct drm_property *force_audio_property;
-
- atomic_t forcewake_count;
} drm_i915_private_t;
enum i915_cache_level {
@@ -1362,8 +1371,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
((reg) < 0x40000) && \
- ((reg) != FORCEWAKE) && \
- ((reg) != ECOBUS))
+ ((reg) != FORCEWAKE))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8359dc7..e55badb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2006,9 +2006,9 @@ i915_wait_request(struct intel_ring_buffer *ring,
|| atomic_read(&dev_priv->mm.wedged));
ring->irq_put(ring);
- } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
- seqno) ||
- atomic_read(&dev_priv->mm.wedged), 3000))
+ } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
ret = -EBUSY;
ring->waiting_seqno = 0;
@@ -3309,6 +3309,10 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
ret = -EIO;
+ } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000)) {
+ ret = -EBUSY;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b9da890..65e1f00 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -971,6 +971,31 @@ i915_gem_execbuffer_retire_commands(struct drm_device *dev,
}
static int
+i915_reset_gen7_sol_offsets(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int ret, i;
+
+ if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS])
+ return 0;
+
+ ret = intel_ring_begin(ring, 4 * 3);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < 4; i++) {
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
+ intel_ring_emit(ring, 0);
+ }
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+static int
i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_file *file,
struct drm_i915_gem_execbuffer2 *args,
@@ -984,6 +1009,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct intel_ring_buffer *ring;
u32 exec_start, exec_len;
u32 seqno;
+ u32 mask;
int ret, mode, i;
if (!i915_gem_check_execbuffer(args)) {
@@ -1021,6 +1047,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ mask = I915_EXEC_CONSTANTS_MASK;
switch (mode) {
case I915_EXEC_CONSTANTS_REL_GENERAL:
case I915_EXEC_CONSTANTS_ABSOLUTE:
@@ -1034,18 +1061,9 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
mode == I915_EXEC_CONSTANTS_REL_SURFACE)
return -EINVAL;
- ret = intel_ring_begin(ring, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit(ring, INSTPM);
- intel_ring_emit(ring,
- I915_EXEC_CONSTANTS_MASK << 16 | mode);
- intel_ring_advance(ring);
-
- dev_priv->relative_constants_mode = mode;
+ /* The HW changed the meaning on this bit on gen6 */
+ if (INTEL_INFO(dev)->gen >= 6)
+ mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
}
break;
default:
@@ -1176,6 +1194,27 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
}
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ goto err;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring, mask << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+
+ if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
+ ret = i915_reset_gen7_sol_offsets(dev, ring);
+ if (ret)
+ goto err;
+ }
+
trace_i915_gem_ring_dispatch(ring, seqno);
exec_start = batch_obj->gtt_offset + args->batch_start_offset;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b40004b..5bd4361 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1205,7 +1205,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
} else {
int dspaddr = DSPADDR(intel_crtc->plane);
stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
- crtc->y * crtc->fb->pitch +
+ crtc->y * crtc->fb->pitches[0] +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -1649,13 +1649,6 @@ static bool kick_ring(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring, tmp);
return true;
}
- if (IS_GEN6(dev) &&
- (tmp & RING_WAIT_SEMAPHORE)) {
- DRM_ERROR("Kicking stuck semaphore on %s\n",
- ring->name);
- I915_WRITE_CTL(ring, tmp);
- return true;
- }
return false;
}
@@ -1669,7 +1662,7 @@ void i915_hangcheck_elapsed(unsigned long data)
{
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t acthd, instdone, instdone1;
+ uint32_t acthd, instdone, instdone1, acthd_bsd, acthd_blt;
bool err = false;
if (!i915_enable_hangcheck)
@@ -1686,16 +1679,21 @@ void i915_hangcheck_elapsed(unsigned long data)
}
if (INTEL_INFO(dev)->gen < 4) {
- acthd = I915_READ(ACTHD);
instdone = I915_READ(INSTDONE);
instdone1 = 0;
} else {
- acthd = I915_READ(ACTHD_I965);
instdone = I915_READ(INSTDONE_I965);
instdone1 = I915_READ(INSTDONE1);
}
+ acthd = intel_ring_get_active_head(&dev_priv->ring[RCS]);
+ acthd_bsd = HAS_BSD(dev) ?
+ intel_ring_get_active_head(&dev_priv->ring[VCS]) : 0;
+ acthd_blt = HAS_BLT(dev) ?
+ intel_ring_get_active_head(&dev_priv->ring[BCS]) : 0;
if (dev_priv->last_acthd == acthd &&
+ dev_priv->last_acthd_bsd == acthd_bsd &&
+ dev_priv->last_acthd_blt == acthd_blt &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
if (dev_priv->hangcheck_count++ > 1) {
@@ -1727,6 +1725,8 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->hangcheck_count = 0;
dev_priv->last_acthd = acthd;
+ dev_priv->last_acthd_bsd = acthd_bsd;
+ dev_priv->last_acthd_blt = acthd_blt;
dev_priv->last_instdone = instdone;
dev_priv->last_instdone1 = instdone1;
}
@@ -1751,7 +1751,8 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
I915_WRITE(HWSTAM, 0xeffe);
- if (IS_GEN6(dev) || IS_GEN7(dev)) {
+
+ if (IS_GEN6(dev)) {
/* Workaround stalls observed on Sandy Bridge GPUs by
* making the blitter command streamer generate a
* write to the Hardware Status Page for
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index a26d5b0..03c53fc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -442,6 +442,7 @@
#define INSTPM_AGPBUSY_DIS (1<<11) /* gen3: when disabled, pending interrupts
will not assert AGPBUSY# and will only
be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
@@ -2321,6 +2322,7 @@
#define PIPECONF_PROGRESSIVE (0 << 21)
#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21)
#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21)
+#define PIPECONF_INTERLACE_MASK (7 << 21)
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_BPP_MASK (0x000000e0)
#define PIPECONF_BPP_8 (0<<5)
@@ -2459,6 +2461,8 @@
#define WM3_LP_ILK 0x45110
#define WM3_LP_EN (1<<31)
#define WM1S_LP_ILK 0x45120
+#define WM2S_LP_IVB 0x45124
+#define WM3S_LP_IVB 0x45128
#define WM1S_LP_EN (1<<31)
/* Memory latency timer register */
@@ -2675,6 +2679,140 @@
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
+/* Sprite A control */
+#define _DVSACNTR 0x72180
+#define DVS_ENABLE (1<<31)
+#define DVS_GAMMA_ENABLE (1<<30)
+#define DVS_PIXFORMAT_MASK (3<<25)
+#define DVS_FORMAT_YUV422 (0<<25)
+#define DVS_FORMAT_RGBX101010 (1<<25)
+#define DVS_FORMAT_RGBX888 (2<<25)
+#define DVS_FORMAT_RGBX161616 (3<<25)
+#define DVS_SOURCE_KEY (1<<22)
+#define DVS_RGB_ORDER_RGBX (1<<20)
+#define DVS_YUV_BYTE_ORDER_MASK (3<<16)
+#define DVS_YUV_ORDER_YUYV (0<<16)
+#define DVS_YUV_ORDER_UYVY (1<<16)
+#define DVS_YUV_ORDER_YVYU (2<<16)
+#define DVS_YUV_ORDER_VYUY (3<<16)
+#define DVS_DEST_KEY (1<<2)
+#define DVS_TRICKLE_FEED_DISABLE (1<<14)
+#define DVS_TILED (1<<10)
+#define _DVSALINOFF 0x72184
+#define _DVSASTRIDE 0x72188
+#define _DVSAPOS 0x7218c
+#define _DVSASIZE 0x72190
+#define _DVSAKEYVAL 0x72194
+#define _DVSAKEYMSK 0x72198
+#define _DVSASURF 0x7219c
+#define _DVSAKEYMAXVAL 0x721a0
+#define _DVSATILEOFF 0x721a4
+#define _DVSASURFLIVE 0x721ac
+#define _DVSASCALE 0x72204
+#define DVS_SCALE_ENABLE (1<<31)
+#define DVS_FILTER_MASK (3<<29)
+#define DVS_FILTER_MEDIUM (0<<29)
+#define DVS_FILTER_ENHANCING (1<<29)
+#define DVS_FILTER_SOFTENING (2<<29)
+#define DVS_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define DVS_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _DVSAGAMC 0x72300
+
+#define _DVSBCNTR 0x73180
+#define _DVSBLINOFF 0x73184
+#define _DVSBSTRIDE 0x73188
+#define _DVSBPOS 0x7318c
+#define _DVSBSIZE 0x73190
+#define _DVSBKEYVAL 0x73194
+#define _DVSBKEYMSK 0x73198
+#define _DVSBSURF 0x7319c
+#define _DVSBKEYMAXVAL 0x731a0
+#define _DVSBTILEOFF 0x731a4
+#define _DVSBSURFLIVE 0x731ac
+#define _DVSBSCALE 0x73204
+#define _DVSBGAMC 0x73300
+
+#define DVSCNTR(pipe) _PIPE(pipe, _DVSACNTR, _DVSBCNTR)
+#define DVSLINOFF(pipe) _PIPE(pipe, _DVSALINOFF, _DVSBLINOFF)
+#define DVSSTRIDE(pipe) _PIPE(pipe, _DVSASTRIDE, _DVSBSTRIDE)
+#define DVSPOS(pipe) _PIPE(pipe, _DVSAPOS, _DVSBPOS)
+#define DVSSURF(pipe) _PIPE(pipe, _DVSASURF, _DVSBSURF)
+#define DVSKEYMAX(pipe) _PIPE(pipe, _DVSAKEYMAXVAL, _DVSBKEYMAXVAL)
+#define DVSSIZE(pipe) _PIPE(pipe, _DVSASIZE, _DVSBSIZE)
+#define DVSSCALE(pipe) _PIPE(pipe, _DVSASCALE, _DVSBSCALE)
+#define DVSTILEOFF(pipe) _PIPE(pipe, _DVSATILEOFF, _DVSBTILEOFF)
+#define DVSKEYVAL(pipe) _PIPE(pipe, _DVSAKEYVAL, _DVSBKEYVAL)
+#define DVSKEYMSK(pipe) _PIPE(pipe, _DVSAKEYMSK, _DVSBKEYMSK)
+
+#define _SPRA_CTL 0x70280
+#define SPRITE_ENABLE (1<<31)
+#define SPRITE_GAMMA_ENABLE (1<<30)
+#define SPRITE_PIXFORMAT_MASK (7<<25)
+#define SPRITE_FORMAT_YUV422 (0<<25)
+#define SPRITE_FORMAT_RGBX101010 (1<<25)
+#define SPRITE_FORMAT_RGBX888 (2<<25)
+#define SPRITE_FORMAT_RGBX161616 (3<<25)
+#define SPRITE_FORMAT_YUV444 (4<<25)
+#define SPRITE_FORMAT_XR_BGR101010 (5<<25) /* Extended range */
+#define SPRITE_CSC_ENABLE (1<<24)
+#define SPRITE_SOURCE_KEY (1<<22)
+#define SPRITE_RGB_ORDER_RGBX (1<<20) /* only for 888 and 161616 */
+#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1<<19)
+#define SPRITE_YUV_CSC_FORMAT_BT709 (1<<18) /* 0 is BT601 */
+#define SPRITE_YUV_BYTE_ORDER_MASK (3<<16)
+#define SPRITE_YUV_ORDER_YUYV (0<<16)
+#define SPRITE_YUV_ORDER_UYVY (1<<16)
+#define SPRITE_YUV_ORDER_YVYU (2<<16)
+#define SPRITE_YUV_ORDER_VYUY (3<<16)
+#define SPRITE_TRICKLE_FEED_DISABLE (1<<14)
+#define SPRITE_INT_GAMMA_ENABLE (1<<13)
+#define SPRITE_TILED (1<<10)
+#define SPRITE_DEST_KEY (1<<2)
+#define _SPRA_LINOFF 0x70284
+#define _SPRA_STRIDE 0x70288
+#define _SPRA_POS 0x7028c
+#define _SPRA_SIZE 0x70290
+#define _SPRA_KEYVAL 0x70294
+#define _SPRA_KEYMSK 0x70298
+#define _SPRA_SURF 0x7029c
+#define _SPRA_KEYMAX 0x702a0
+#define _SPRA_TILEOFF 0x702a4
+#define _SPRA_SCALE 0x70304
+#define SPRITE_SCALE_ENABLE (1<<31)
+#define SPRITE_FILTER_MASK (3<<29)
+#define SPRITE_FILTER_MEDIUM (0<<29)
+#define SPRITE_FILTER_ENHANCING (1<<29)
+#define SPRITE_FILTER_SOFTENING (2<<29)
+#define SPRITE_VERTICAL_OFFSET_HALF (1<<28) /* must be enabled below */
+#define SPRITE_VERTICAL_OFFSET_ENABLE (1<<27)
+#define _SPRA_GAMC 0x70400
+
+#define _SPRB_CTL 0x71280
+#define _SPRB_LINOFF 0x71284
+#define _SPRB_STRIDE 0x71288
+#define _SPRB_POS 0x7128c
+#define _SPRB_SIZE 0x71290
+#define _SPRB_KEYVAL 0x71294
+#define _SPRB_KEYMSK 0x71298
+#define _SPRB_SURF 0x7129c
+#define _SPRB_KEYMAX 0x712a0
+#define _SPRB_TILEOFF 0x712a4
+#define _SPRB_SCALE 0x71304
+#define _SPRB_GAMC 0x71400
+
+#define SPRCTL(pipe) _PIPE(pipe, _SPRA_CTL, _SPRB_CTL)
+#define SPRLINOFF(pipe) _PIPE(pipe, _SPRA_LINOFF, _SPRB_LINOFF)
+#define SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _SPRB_STRIDE)
+#define SPRPOS(pipe) _PIPE(pipe, _SPRA_POS, _SPRB_POS)
+#define SPRSIZE(pipe) _PIPE(pipe, _SPRA_SIZE, _SPRB_SIZE)
+#define SPRKEYVAL(pipe) _PIPE(pipe, _SPRA_KEYVAL, _SPRB_KEYVAL)
+#define SPRKEYMSK(pipe) _PIPE(pipe, _SPRA_KEYMSK, _SPRB_KEYMSK)
+#define SPRSURF(pipe) _PIPE(pipe, _SPRA_SURF, _SPRB_SURF)
+#define SPRKEYMAX(pipe) _PIPE(pipe, _SPRA_KEYMAX, _SPRB_KEYMAX)
+#define SPRTILEOFF(pipe) _PIPE(pipe, _SPRA_TILEOFF, _SPRB_TILEOFF)
+#define SPRSCALE(pipe) _PIPE(pipe, _SPRA_SCALE, _SPRB_SCALE)
+#define SPRGAMC(pipe) _PIPE(pipe, _SPRA_GAMC, _SPRB_GAMC)
+
/* VBIOS regs */
#define VGACNTRL 0x71400
# define VGA_DISP_DISABLE (1 << 31)
@@ -2882,10 +3020,28 @@
#define ILK_DPFC_DIS1 (1<<8)
#define ILK_DPFC_DIS2 (1<<9)
+#define IVB_CHICKEN3 0x4200c
+# define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE (1 << 5)
+# define CHICKEN3_DGMG_DONE_FIX_DISABLE (1 << 2)
+
#define DISP_ARB_CTL 0x45000
#define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15)
+/* GEN7 chicken */
+#define GEN7_COMMON_SLICE_CHICKEN1 0x7010
+# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
+
+#define GEN7_L3CNTLREG1 0xB01C
+#define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C4FFF8C
+
+#define GEN7_L3_CHICKEN_MODE_REGISTER 0xB030
+#define GEN7_WA_L3_CHICKEN_MODE 0x20000000
+
+/* WaCatErrorRejectionIssue */
+#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
+#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
+
/* PCH */
/* south display engine interrupt */
@@ -3476,6 +3632,7 @@
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#define GEN6_UCGCTL2 0x9404
+# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
@@ -3500,7 +3657,11 @@
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
-#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
+#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
+#define GEN6_RP_MEDIA_HW_TURBO_MODE (3<<9)
+#define GEN6_RP_MEDIA_HW_NORMAL_MODE (2<<9)
+#define GEN6_RP_MEDIA_HW_MODE (1<<9)
+#define GEN6_RP_MEDIA_SW_MODE (0<<9)
#define GEN6_RP_MEDIA_IS_GFX (1<<8)
#define GEN6_RP_ENABLE (1<<7)
#define GEN6_RP_UP_IDLE_MIN (0x1<<3)
@@ -3557,6 +3718,14 @@
#define GEN6_PCODE_DATA 0x138128
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
+#define GEN6_GT_CORE_STATUS 0x138060
+#define GEN6_CORE_CPD_STATE_MASK (7<<4)
+#define GEN6_RCn_MASK 7
+#define GEN6_RC0 0
+#define GEN6_RC3 2
+#define GEN6_RC6 3
+#define GEN6_RC7 4
+
#define G4X_AUD_VID_DID 0x62020
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
@@ -3569,17 +3738,23 @@
#define G4X_ELD_ACK (1 << 4)
#define G4X_HDMIW_HDMIEDID 0x6210C
-#define GEN5_HDMIW_HDMIEDID_A 0xE2050
-#define GEN5_AUD_CNTL_ST_A 0xE20B4
-#define GEN5_ELD_BUFFER_SIZE (0x1f << 10)
-#define GEN5_ELD_ADDRESS (0x1f << 5)
-#define GEN5_ELD_ACK (1 << 4)
-#define GEN5_AUD_CNTL_ST2 0xE20C0
-#define GEN5_ELD_VALIDB (1 << 0)
-#define GEN5_CP_READYB (1 << 1)
-
-#define GEN7_HDMIW_HDMIEDID_A 0xE5050
-#define GEN7_AUD_CNTRL_ST_A 0xE50B4
-#define GEN7_AUD_CNTRL_ST2 0xE50C0
+#define IBX_HDMIW_HDMIEDID_A 0xE2050
+#define IBX_AUD_CNTL_ST_A 0xE20B4
+#define IBX_ELD_BUFFER_SIZE (0x1f << 10)
+#define IBX_ELD_ADDRESS (0x1f << 5)
+#define IBX_ELD_ACK (1 << 4)
+#define IBX_AUD_CNTL_ST2 0xE20C0
+#define IBX_ELD_VALIDB (1 << 0)
+#define IBX_CP_READYB (1 << 1)
+
+#define CPT_HDMIW_HDMIEDID_A 0xE5050
+#define CPT_AUD_CNTL_ST_A 0xE50B4
+#define CPT_AUD_CNTRL_ST2 0xE50C0
+
+/* These are the 4 32-bit write offset registers for each stream
+ * output buffer. It determines the offset from the
+ * 3DSTATE_SO_BUFFERs that the next streamed vertex output goes to.
+ */
+#define GEN7_SO_WRITE_OFFSET(n) (0x5280 + (n) * 4)
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 7886e4f..2b5eb22 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -28,14 +28,19 @@
#include "drm.h"
#include "i915_drm.h"
#include "intel_drv.h"
+#include "i915_reg.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
+ /* On IVB, 3rd pipe shares PLL with another one */
+ if (pipe > 1)
+ return false;
+
if (HAS_PCH_SPLIT(dev))
- dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
+ dpll_reg = PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
@@ -822,7 +827,7 @@ int i915_save_state(struct drm_device *dev)
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
- if (IS_GEN6(dev))
+ if (INTEL_INFO(dev)->gen >= 6)
gen6_disable_rps(dev);
/* Cache mode state */
@@ -881,7 +886,7 @@ int i915_restore_state(struct drm_device *dev)
intel_init_emon(dev);
}
- if (IS_GEN6(dev)) {
+ if (INTEL_INFO(dev)->gen >= 6) {
gen6_enable_rps(dev_priv);
gen6_update_ring_freq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index 8af3735..dbda6e3 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -467,8 +467,12 @@ struct edp_link_params {
struct bdb_edp {
struct edp_power_seq power_seqs[16];
u32 color_depth;
- u32 sdrrs_msa_timing_delay;
struct edp_link_params link_params[16];
+ u32 sdrrs_msa_timing_delay;
+
+ /* ith bit indicates enabled/disabled for (i+1)th panel */
+ u16 edp_s3d_feature;
+ u16 edp_t3_optimization;
} __attribute__ ((packed));
void intel_setup_bios(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index fee0ad0..dd729d4 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -24,6 +24,7 @@
* Eric Anholt <eric@anholt.net>
*/
+#include <linux/dmi.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include "drmP.h"
@@ -540,6 +541,24 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
.destroy = intel_encoder_destroy,
};
+static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+ return 1;
+}
+
+static const struct dmi_system_id intel_no_crt[] = {
+ {
+ .callback = intel_no_crt_dmi_callback,
+ .ident = "ACER ZGB",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+ },
+ },
+ { }
+};
+
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
@@ -547,6 +566,10 @@ void intel_crt_init(struct drm_device *dev)
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
+ /* Skip machines without VGA that falsely report hotplug events */
+ if (dmi_check_system(intel_no_crt))
+ return;
+
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
if (!crt)
return;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index daa5743..f851db7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -915,8 +915,8 @@ static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
pipe_name(pipe));
}
-static void assert_pipe(struct drm_i915_private *dev_priv,
- enum pipe pipe, bool state)
+void assert_pipe(struct drm_i915_private *dev_priv,
+ enum pipe pipe, bool state)
{
int reg;
u32 val;
@@ -929,8 +929,6 @@ static void assert_pipe(struct drm_i915_private *dev_priv,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
}
-#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
-#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
static void assert_plane_enabled(struct drm_i915_private *dev_priv,
enum plane plane)
@@ -1206,7 +1204,8 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
int reg;
- u32 val;
+ u32 val, pll_mask = TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL,
+ pll_sel = TRANSC_DPLL_ENABLE;
if (pipe > 1)
return;
@@ -1217,6 +1216,15 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv,
/* Make sure transcoder isn't still depending on us */
assert_transcoder_disabled(dev_priv, pipe);
+ if (pipe == 0)
+ pll_sel |= TRANSC_DPLLA_SEL;
+ else if (pipe == 1)
+ pll_sel |= TRANSC_DPLLB_SEL;
+
+
+ if ((I915_READ(PCH_DPLL_SEL) & pll_mask) == pll_sel)
+ return;
+
reg = PCH_DPLL(pipe);
val = I915_READ(reg);
val &= ~DPLL_VCO_ENABLE;
@@ -1511,8 +1519,8 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
u32 fbc_ctl, fbc_ctl2;
cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
- if (fb->pitch < cfb_pitch)
- cfb_pitch = fb->pitch;
+ if (fb->pitches[0] < cfb_pitch)
+ cfb_pitch = fb->pitches[0];
/* FBC_CTL wants 64B units */
cfb_pitch = (cfb_pitch / 64) - 1;
@@ -1864,7 +1872,7 @@ static void intel_update_fbc(struct drm_device *dev)
if (enable_fbc < 0) {
DRM_DEBUG_KMS("fbc set to per-chip default\n");
enable_fbc = 1;
- if (INTEL_INFO(dev)->gen <= 5)
+ if (INTEL_INFO(dev)->gen <= 6)
enable_fbc = 0;
}
if (!enable_fbc) {
@@ -2073,11 +2081,11 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
- Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitch);
- I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
@@ -2154,11 +2162,11 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
I915_WRITE(reg, dspcntr);
Start = obj->gtt_offset;
- Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
+ Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
- Start, Offset, x, y, fb->pitch);
- I915_WRITE(DSPSTRIDE(plane), fb->pitch);
+ Start, Offset, x, y, fb->pitches[0]);
+ I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
I915_WRITE(DSPSURF(plane), Start);
I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
I915_WRITE(DSPADDR(plane), Offset);
@@ -4509,7 +4517,7 @@ static void ironlake_update_wm(struct drm_device *dev)
*/
}
-static void sandybridge_update_wm(struct drm_device *dev)
+void sandybridge_update_wm(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
@@ -4569,7 +4577,8 @@ static void sandybridge_update_wm(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
- if (!single_plane_enabled(enabled))
+ if (!single_plane_enabled(enabled) ||
+ dev_priv->sprite_scaling_enabled)
return;
enabled = ffs(enabled) - 1;
@@ -4619,6 +4628,158 @@ static void sandybridge_update_wm(struct drm_device *dev)
cursor_wm);
}
+static bool
+sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int display_latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ int clock;
+ int entries, tlb_miss;
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ if (crtc->fb == NULL || !crtc->enabled) {
+ *sprite_wm = display->guard_size;
+ return false;
+ }
+
+ clock = crtc->mode.clock;
+
+ /* Use the small buffer method to calculate the sprite watermark */
+ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
+ tlb_miss = display->fifo_size*display->cacheline_size -
+ sprite_width * 8;
+ if (tlb_miss > 0)
+ entries += tlb_miss;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+ if (*sprite_wm > (int)display->max_wm)
+ *sprite_wm = display->max_wm;
+
+ return true;
+}
+
+static bool
+sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
+ uint32_t sprite_width, int pixel_size,
+ const struct intel_watermark_params *display,
+ int latency_ns, int *sprite_wm)
+{
+ struct drm_crtc *crtc;
+ unsigned long line_time_us;
+ int clock;
+ int line_count, line_size;
+ int small, large;
+ int entries;
+
+ if (!latency_ns) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ crtc = intel_get_crtc_for_plane(dev, plane);
+ clock = crtc->mode.clock;
+ if (!clock) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_time_us = (sprite_width * 1000) / clock;
+ if (!line_time_us) {
+ *sprite_wm = 0;
+ return false;
+ }
+
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = sprite_width * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
+ *sprite_wm = entries + display->guard_size;
+
+ return *sprite_wm > 0x3ff ? false : true;
+}
+
+static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
+ int sprite_wm, reg;
+ int ret;
+
+ switch (pipe) {
+ case 0:
+ reg = WM0_PIPEA_ILK;
+ break;
+ case 1:
+ reg = WM0_PIPEB_ILK;
+ break;
+ case 2:
+ reg = WM0_PIPEC_IVB;
+ break;
+ default:
+ return; /* bad pipe */
+ }
+
+ ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
+ &sandybridge_display_wm_info,
+ latency, &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
+ pipe);
+ return;
+ }
+
+ I915_WRITE(reg, I915_READ(reg) | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
+ DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
+
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM1_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM1S_LP_ILK, sprite_wm);
+
+ /* Only IVB has two more LP watermarks for sprite */
+ if (!IS_IVYBRIDGE(dev))
+ return;
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM2_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM2S_LP_IVB, sprite_wm);
+
+ ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
+ pixel_size,
+ &sandybridge_display_srwm_info,
+ SNB_READ_WM3_LATENCY() * 500,
+ &sprite_wm);
+ if (!ret) {
+ DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
+ pipe);
+ return;
+ }
+ I915_WRITE(WM3S_LP_IVB, sprite_wm);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -4659,6 +4820,16 @@ static void intel_update_watermarks(struct drm_device *dev)
dev_priv->display.update_wm(dev);
}
+void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width, int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->display.update_sprite_wm)
+ dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
+ pixel_size);
+}
+
static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
{
if (i915_panel_use_ssc >= 0)
@@ -5145,6 +5316,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
@@ -5155,7 +5327,7 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+ pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5646,12 +5818,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
temp = I915_READ(PCH_LVDS);
temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
- if (HAS_PCH_CPT(dev))
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~PORT_TRANS_SEL_MASK;
temp |= PORT_TRANS_SEL_CPT(pipe);
- else if (pipe == 1)
- temp |= LVDS_PIPEB_SELECT;
- else
- temp &= ~LVDS_PIPEB_SELECT;
+ } else {
+ if (pipe == 1)
+ temp |= LVDS_PIPEB_SELECT;
+ else
+ temp &= ~LVDS_PIPEB_SELECT;
+ }
/* set the corresponsding LVDS_BORDER bit */
temp |= dev_priv->lvds_border_bits;
@@ -5737,6 +5912,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
}
}
+ pipeconf &= ~PIPECONF_INTERLACE_MASK;
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
/* the chip adds 2 halflines automatically */
@@ -5747,7 +5923,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
adjusted_mode->crtc_vsync_end -= 1;
adjusted_mode->crtc_vsync_start -= 1;
} else
- pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */
+ pipeconf |= PIPECONF_PROGRESSIVE;
I915_WRITE(HTOTAL(pipe),
(adjusted_mode->crtc_hdisplay - 1) |
@@ -5822,14 +5998,45 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
x, y, old_fb);
-
drm_vblank_post_modeset(dev, pipe);
- intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
+ if (ret)
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
+ else
+ intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
return ret;
}
+static bool intel_eld_uptodate(struct drm_connector *connector,
+ int reg_eldv, uint32_t bits_eldv,
+ int reg_elda, uint32_t bits_elda,
+ int reg_edid)
+{
+ struct drm_i915_private *dev_priv = connector->dev->dev_private;
+ uint8_t *eld = connector->eld;
+ uint32_t i;
+
+ i = I915_READ(reg_eldv);
+ i &= bits_eldv;
+
+ if (!eld[0])
+ return !i;
+
+ if (!i)
+ return false;
+
+ i = I915_READ(reg_elda);
+ i &= ~bits_elda;
+ I915_WRITE(reg_elda, i);
+
+ for (i = 0; i < eld[2]; i++)
+ if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
+ return false;
+
+ return true;
+}
+
static void g4x_write_eld(struct drm_connector *connector,
struct drm_crtc *crtc)
{
@@ -5846,6 +6053,12 @@ static void g4x_write_eld(struct drm_connector *connector,
else
eldv = G4X_ELDV_DEVCTG;
+ if (intel_eld_uptodate(connector,
+ G4X_AUD_CNTL_ST, eldv,
+ G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
+ G4X_HDMIW_HDMIEDID))
+ return;
+
i = I915_READ(G4X_AUD_CNTL_ST);
i &= ~(eldv | G4X_ELD_ADDR);
len = (i >> 9) & 0x1f; /* ELD buffer size */
@@ -5876,14 +6089,14 @@ static void ironlake_write_eld(struct drm_connector *connector,
int aud_cntl_st;
int aud_cntrl_st2;
- if (IS_IVYBRIDGE(connector->dev)) {
- hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A;
- aud_cntl_st = GEN7_AUD_CNTRL_ST_A;
- aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2;
+ if (HAS_PCH_IBX(connector->dev)) {
+ hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
+ aud_cntl_st = IBX_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
} else {
- hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A;
- aud_cntl_st = GEN5_AUD_CNTL_ST_A;
- aud_cntrl_st2 = GEN5_AUD_CNTL_ST2;
+ hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
+ aud_cntl_st = CPT_AUD_CNTL_ST_A;
+ aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
}
i = to_intel_crtc(crtc)->pipe;
@@ -5897,14 +6110,25 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!i) {
DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
/* operate blindly on all ports */
- eldv = GEN5_ELD_VALIDB;
- eldv |= GEN5_ELD_VALIDB << 4;
- eldv |= GEN5_ELD_VALIDB << 8;
+ eldv = IBX_ELD_VALIDB;
+ eldv |= IBX_ELD_VALIDB << 4;
+ eldv |= IBX_ELD_VALIDB << 8;
} else {
DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
- eldv = GEN5_ELD_VALIDB << ((i - 1) * 4);
+ eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
}
+ if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
+ DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
+ eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
+ }
+
+ if (intel_eld_uptodate(connector,
+ aud_cntrl_st2, eldv,
+ aud_cntl_st, IBX_ELD_ADDRESS,
+ hdmiw_hdmiedid))
+ return;
+
i = I915_READ(aud_cntrl_st2);
i &= ~eldv;
I915_WRITE(aud_cntrl_st2, i);
@@ -5912,13 +6136,8 @@ static void ironlake_write_eld(struct drm_connector *connector,
if (!eld[0])
return;
- if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
- DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
- eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
- }
-
i = I915_READ(aud_cntl_st);
- i &= ~GEN5_ELD_ADDRESS;
+ i &= ~IBX_ELD_ADDRESS;
I915_WRITE(aud_cntl_st, i);
len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
@@ -5965,7 +6184,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
int i;
/* The clocks have to be on to load the palette. */
- if (!crtc->enabled)
+ if (!crtc->enabled || !intel_crtc->active)
return;
/* use legacy palette for Ironlake */
@@ -6298,7 +6517,7 @@ static struct drm_display_mode load_detect_mode = {
static struct drm_framebuffer *
intel_framebuffer_create(struct drm_device *dev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
struct intel_framebuffer *intel_fb;
@@ -6340,7 +6559,7 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
int depth, int bpp)
{
struct drm_i915_gem_object *obj;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
obj = i915_gem_alloc_object(dev,
intel_framebuffer_size_for_mode(mode, bpp));
@@ -6349,9 +6568,9 @@ intel_framebuffer_create_for_mode(struct drm_device *dev,
mode_cmd.width = mode->hdisplay;
mode_cmd.height = mode->vdisplay;
- mode_cmd.depth = depth;
- mode_cmd.bpp = bpp;
- mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp);
+ mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
+ bpp);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
return intel_framebuffer_create(dev, &mode_cmd, obj);
}
@@ -6372,11 +6591,11 @@ mode_fits_in_fbdev(struct drm_device *dev,
return NULL;
fb = &dev_priv->fbdev->ifb.base;
- if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay,
- fb->bits_per_pixel))
+ if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
+ fb->bits_per_pixel))
return NULL;
- if (obj->base.size < mode->vdisplay * fb->pitch)
+ if (obj->base.size < mode->vdisplay * fb->pitches[0])
return NULL;
return fb;
@@ -7009,7 +7228,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7026,7 +7245,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
@@ -7050,7 +7269,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
goto out;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
- offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
+ offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
ret = BEGIN_LP_RING(6);
if (ret)
@@ -7064,7 +7283,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
OUT_RING(MI_NOOP);
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
@@ -7097,7 +7316,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
*/
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch);
+ OUT_RING(fb->pitches[0]);
OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
@@ -7132,7 +7351,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(fb->pitches[0] | obj->tiling_mode);
OUT_RING(obj->gtt_offset);
pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
@@ -7168,7 +7387,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
goto out;
intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
- intel_ring_emit(ring, (fb->pitch | obj->tiling_mode));
+ intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
intel_ring_emit(ring, (obj->gtt_offset));
intel_ring_emit(ring, (MI_NOOP));
intel_ring_advance(ring);
@@ -7594,7 +7813,7 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj)
{
int ret;
@@ -7602,21 +7821,25 @@ int intel_framebuffer_init(struct drm_device *dev,
if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
- if (mode_cmd->pitch & 63)
+ if (mode_cmd->pitches[0] & 63)
return -EINVAL;
- switch (mode_cmd->bpp) {
- case 8:
- case 16:
- /* Only pre-ILK can handle 5:5:5 */
- if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev))
- return -EINVAL;
+ switch (mode_cmd->pixel_format) {
+ case DRM_FORMAT_RGB332:
+ case DRM_FORMAT_RGB565:
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ case DRM_FORMAT_XRGB2101010:
+ case DRM_FORMAT_ARGB2101010:
+ /* RGB formats are common across chipsets */
break;
-
- case 24:
- case 32:
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_YVYU:
+ case DRM_FORMAT_VYUY:
break;
default:
+ DRM_ERROR("unsupported pixel format\n");
return -EINVAL;
}
@@ -7634,11 +7857,12 @@ int intel_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_i915_gem_object *obj;
- obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
+ mode_cmd->handles[0]));
if (&obj->base == NULL)
return ERR_PTR(-ENOENT);
@@ -7969,8 +8193,8 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
if (intel_enable_rc6(dev_priv->dev))
- rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
- GEN6_RC_CTL_RC6_ENABLE;
+ rc6_mask = GEN6_RC_CTL_RC6_ENABLE |
+ ((IS_GEN7(dev_priv->dev)) ? GEN6_RC_CTL_RC6p_ENABLE : 0);
I915_WRITE(GEN6_RC_CONTROL,
rc6_mask |
@@ -7995,7 +8219,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
I915_WRITE(GEN6_RP_CONTROL,
GEN6_RP_MEDIA_TURBO |
- GEN6_RP_USE_NORMAL_FREQ |
+ GEN6_RP_MEDIA_HW_MODE |
GEN6_RP_MEDIA_IS_GFX |
GEN6_RP_ENABLE |
GEN6_RP_UP_BUSY_AVG |
@@ -8248,8 +8472,32 @@ static void ivybridge_init_clock_gating(struct drm_device *dev)
I915_WRITE(WM2_LP_ILK, 0);
I915_WRITE(WM1_LP_ILK, 0);
+ /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
+ * This implements the WaDisableRCZUnitClockGating workaround.
+ */
+ I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
+
I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
+ I915_WRITE(IVB_CHICKEN3,
+ CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
+ CHICKEN3_DGMG_DONE_FIX_DISABLE);
+
+ /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
+ I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
+ GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
+
+ /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
+ I915_WRITE(GEN7_L3CNTLREG1,
+ GEN7_WA_FOR_GEN7_L3_CONTROL);
+ I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
+ GEN7_WA_L3_CHICKEN_MODE);
+
+ /* This is required by WaCatErrorRejectionIssue */
+ I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
+ I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
+ GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
+
for_each_pipe(pipe) {
I915_WRITE(DSPCNTR(pipe),
I915_READ(DSPCNTR(pipe)) |
@@ -8543,9 +8791,15 @@ static void intel_init_display(struct drm_device *dev)
if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
+ /* A small trick here - if the bios hasn't configured MT forcewake,
+ * and if the device is in RC6, then force_wake_mt_get will not wake
+ * the device and the ECOBUS read will return zero. Which will be
+ * (correctly) interpreted by the test below as MT forcewake being
+ * disabled.
+ */
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
- ecobus = I915_READ(ECOBUS);
+ ecobus = I915_READ_NOTRACE(ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@@ -8577,6 +8831,7 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_GEN6(dev)) {
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -8590,6 +8845,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
if (SNB_READ_WM0_LATENCY()) {
dev_priv->display.update_wm = sandybridge_update_wm;
+ dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
} else {
DRM_DEBUG_KMS("Failed to read display plane latency. "
"Disable CxSR\n");
@@ -8773,7 +9029,7 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
drm_mode_config_init(dev);
@@ -8803,6 +9059,9 @@ void intel_modeset_init(struct drm_device *dev)
for (i = 0; i < dev_priv->num_pipe; i++) {
intel_crtc_init(dev, i);
+ ret = intel_plane_init(dev, i);
+ if (ret)
+ DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
}
/* Just disable it once at startup */
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 92b041b..94f860c 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -208,17 +208,8 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
+intel_dp_link_required(int pixel_clock, int bpp)
{
- struct drm_crtc *crtc = intel_dp->base.base.crtc;
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- int bpp = 24;
-
- if (check_bpp)
- bpp = check_bpp;
- else if (intel_crtc)
- bpp = intel_crtc->bpp;
-
return (pixel_clock * bpp + 9) / 10;
}
@@ -245,12 +236,11 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
- mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ mode_rate = intel_dp_link_required(mode->clock, 24);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
if (mode_rate > max_rate) {
- mode_rate = intel_dp_link_required(intel_dp,
- mode->clock, 18);
+ mode_rate = intel_dp_link_required(mode->clock, 18);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
else
@@ -683,7 +673,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
- int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -701,7 +691,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock, bpp)
+ if (intel_dp_link_required(mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@@ -1926,6 +1916,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
}
+ DP &= ~DP_AUDIO_OUTPUT_ENABLE;
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
msleep(intel_dp->panel_power_down_delay);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a1b4343..1348705 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -26,6 +26,7 @@
#define __INTEL_DRV_H__
#include <linux/i2c.h>
+#include "i915_drm.h"
#include "i915_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
@@ -39,7 +40,7 @@
ret__ = -ETIMEDOUT; \
break; \
} \
- if (W && !(in_atomic() || in_dbg_master())) msleep(W); \
+ if (W && drm_can_sleep()) msleep(W); \
} \
ret__; \
})
@@ -47,13 +48,6 @@
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
-#define MSLEEP(x) do { \
- if (in_dbg_master()) \
- mdelay(x); \
- else \
- msleep(x); \
-} while (0)
-
#define KHz(x) (1000*x)
#define MHz(x) KHz(1000*x)
@@ -177,10 +171,32 @@ struct intel_crtc {
bool use_pll_a;
};
+struct intel_plane {
+ struct drm_plane base;
+ enum pipe pipe;
+ struct drm_i915_gem_object *obj;
+ bool primary_disabled;
+ int max_downscale;
+ u32 lut_r[1024], lut_g[1024], lut_b[1024];
+ void (*update_plane)(struct drm_plane *plane,
+ struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h);
+ void (*disable_plane)(struct drm_plane *plane);
+ int (*update_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+ void (*get_colorkey)(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key);
+};
+
#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
#define to_intel_connector(x) container_of(x, struct intel_connector, base)
#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
#define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
+#define to_intel_plane(x) container_of(x, struct intel_plane, base)
#define DIP_HEADER_SIZE 5
@@ -290,6 +306,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
extern bool intel_dpd_is_edp(struct drm_device *dev);
extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
+extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
/* intel_panel.c */
extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -360,7 +377,7 @@ extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
@@ -380,9 +397,25 @@ extern int intel_overlay_attrs(struct drm_device *dev, void *data,
extern void intel_fb_output_poll_changed(struct drm_device *dev);
extern void intel_fb_restore_mode(struct drm_device *dev);
+extern void assert_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
+ bool state);
+#define assert_pipe_enabled(d, p) assert_pipe(d, p, true)
+#define assert_pipe_disabled(d, p) assert_pipe(d, p, false)
+
extern void intel_init_clock_gating(struct drm_device *dev);
extern void intel_write_eld(struct drm_encoder *encoder,
struct drm_display_mode *mode);
extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+/* For use by IVB LP watermark workaround in intel_sprite.c */
+extern void sandybridge_update_wm(struct drm_device *dev);
+extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
+ uint32_t sprite_width,
+ int pixel_size);
+
+extern int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ec49bae..571375a 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,7 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct drm_i915_private *dev_priv = dev->dev_private;
struct fb_info *info;
struct drm_framebuffer *fb;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
@@ -77,11 +77,12 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 7) / 8), 64);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((sizes->surface_bpp + 7) /
+ 8), 64);
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
- size = mode_cmd.pitch * mode_cmd.height;
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
obj = i915_gem_alloc_object(dev, size);
if (!obj) {
@@ -148,7 +149,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
// memset(info->screen_base, 0, size);
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &ifbdev->helper, sizes->fb_width, sizes->fb_height);
info->pixmap.size = 64*1024;
@@ -269,8 +270,14 @@ void intel_fb_restore_mode(struct drm_device *dev)
{
int ret;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_mode_config *config = &dev->mode_config;
+ struct drm_plane *plane;
ret = drm_fb_helper_restore_fbdev_mode(&dev_priv->fbdev->helper);
if (ret)
DRM_DEBUG("failed to restore crtc mode\n");
+
+ /* Be sure to shut off any planes that may be active */
+ list_for_each_entry(plane, &config->plane_list, head)
+ plane->funcs->disable_plane(plane);
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d4f5a0b..64541f7 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -269,6 +269,10 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
+ u32 enable_bits = SDVO_ENABLE;
+
+ if (intel_hdmi->has_audio)
+ enable_bits |= SDVO_AUDIO_ENABLE;
temp = I915_READ(intel_hdmi->sdvox_reg);
@@ -281,9 +285,9 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
}
if (mode != DRM_MODE_DPMS_ON) {
- temp &= ~SDVO_ENABLE;
+ temp &= ~enable_bits;
} else {
- temp |= SDVO_ENABLE;
+ temp |= enable_bits;
}
I915_WRITE(intel_hdmi->sdvox_reg, temp);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index e441911..aa84832 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -694,6 +694,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
{
.callback = intel_no_lvds_dmi_callback,
+ .ident = "AOpen i45GMx-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AOpen"),
+ DMI_MATCH(DMI_BOARD_NAME, "i45GMx-I"),
+ },
+ },
+ {
+ .callback = intel_no_lvds_dmi_callback,
.ident = "Aopen i945GTt-VFA",
.matches = {
DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
@@ -708,6 +716,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
},
},
{
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Clientron E830",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+ },
+ },
+ {
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus EeeBox PC EB1007",
.matches = {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ca70e2f..5361915 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -301,7 +301,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_REPORT_64K | RING_VALID);
+ | RING_VALID);
/* If the head is still not zero, the ring is dead */
if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
@@ -414,6 +414,11 @@ static int init_render_ring(struct intel_ring_buffer *ring)
return ret;
}
+ if (INTEL_INFO(dev)->gen >= 6) {
+ I915_WRITE(INSTPM,
+ INSTPM_FORCE_ORDERING << 16 | INSTPM_FORCE_ORDERING);
+ }
+
return ret;
}
@@ -631,6 +636,19 @@ render_ring_add_request(struct intel_ring_buffer *ring,
}
static u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ /* Workaround to force correct ordering between irq and seqno writes on
+ * ivb (and maybe also on snb) by reading from a CS register (like
+ * ACTHD) before reading the status page. */
+ if (IS_GEN7(dev))
+ intel_ring_get_active_head(ring);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
+static u32
ring_get_seqno(struct intel_ring_buffer *ring)
{
return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
@@ -795,6 +813,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
if (!dev->irq_enabled)
return false;
+ /* It looks like we need to prevent the gt from suspending while waiting
+ * for an notifiy irq, otherwise irqs seem to get lost on at least the
+ * blt/bsd rings on ivb. */
+ if (IS_GEN7(dev))
+ gen6_gt_force_wake_get(dev_priv);
+
spin_lock(&ring->irq_lock);
if (ring->irq_refcount++ == 0) {
ring->irq_mask &= ~rflag;
@@ -819,6 +843,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
ironlake_disable_irq(dev_priv, gflag);
}
spin_unlock(&ring->irq_lock);
+
+ if (IS_GEN7(dev))
+ gen6_gt_force_wake_put(dev_priv);
}
static bool
@@ -1105,21 +1132,18 @@ int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
struct drm_device *dev = ring->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
- u32 head;
-
- /* If the reported head position has wrapped or hasn't advanced,
- * fallback to the slow and accurate path.
- */
- head = intel_read_status_page(ring, 4);
- if (head > ring->head) {
- ring->head = head;
- ring->space = ring_space(ring);
- if (ring->space >= n)
- return 0;
- }
trace_i915_ring_wait_begin(ring);
- end = jiffies + 3 * HZ;
+ if (drm_core_check_feature(dev, DRIVER_GEM))
+ /* With GEM the hangcheck timer should kick us out of the loop,
+ * leaving it early runs the risk of corrupting GEM state (due
+ * to running on almost untested codepaths). But on resume
+ * timers don't work yet, so prevent a complete hang in that
+ * case by choosing an insanely large timeout. */
+ end = jiffies + 60 * HZ;
+ else
+ end = jiffies + 3 * HZ;
+
do {
ring->head = I915_READ_HEAD(ring);
ring->space = ring_space(ring);
@@ -1316,7 +1340,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
.write_tail = gen6_bsd_ring_write_tail,
.flush = gen6_ring_flush,
.add_request = gen6_add_request,
- .get_seqno = ring_get_seqno,
+ .get_seqno = gen6_ring_get_seqno,
.irq_get = gen6_bsd_ring_get_irq,
.irq_put = gen6_bsd_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
@@ -1451,7 +1475,7 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
.add_request = gen6_add_request,
- .get_seqno = ring_get_seqno,
+ .get_seqno = gen6_ring_get_seqno,
.irq_get = blt_ring_get_irq,
.irq_put = blt_ring_put_irq,
.dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
@@ -1474,6 +1498,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
ring->flush = gen6_render_ring_flush;
ring->irq_get = gen6_render_ring_get_irq;
ring->irq_put = gen6_render_ring_put_irq;
+ ring->get_seqno = gen6_ring_get_seqno;
} else if (IS_GEN5(dev)) {
ring->add_request = pc_render_add_request;
ring->get_seqno = pc_render_get_seqno;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index f7b9268..e334ec3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1066,15 +1066,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
- sdvox = 0;
+ /* The real mode polarity is set by the SDVO commands, using
+ * struct intel_sdvo_dtd. */
+ sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
- sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
- if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
- sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
} else {
sdvox = I915_READ(intel_sdvo->sdvo_reg);
switch (intel_sdvo->sdvo_reg) {
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 4aa6f34..6b7b22f 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2006-2007 Intel Corporation
+ * Copyright © 2006-2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
new file mode 100644
index 0000000..2288abf
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -0,0 +1,664 @@
+/*
+ * Copyright © 2011 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * New plane/sprite handling.
+ *
+ * The older chips had a separate interface for programming plane related
+ * registers; newer ones are much simpler and we can use the new DRM plane
+ * support.
+ */
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fourcc.h"
+#include "intel_drv.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static void
+ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+ u32 sprctl, sprscale = 0;
+ int pixel_size;
+
+ sprctl = I915_READ(SPRCTL(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ sprctl &= ~SPRITE_PIXFORMAT_MASK;
+ sprctl &= ~SPRITE_RGB_ORDER_RGBX;
+ sprctl &= ~SPRITE_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ sprctl |= SPRITE_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ sprctl |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ sprctl |= SPRITE_TILED;
+
+ /* must disable */
+ sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
+ sprctl |= SPRITE_ENABLE;
+ sprctl |= SPRITE_DEST_KEY;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ /*
+ * IVB workaround: must disable low power watermarks for at least
+ * one frame before enabling scaling. LP watermarks can be re-enabled
+ * when scaling is disabled.
+ */
+ if (crtc_w != src_w || crtc_h != src_h) {
+ dev_priv->sprite_scaling_enabled = true;
+ sandybridge_update_wm(dev);
+ intel_wait_for_vblank(dev, pipe);
+ sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
+ } else {
+ dev_priv->sprite_scaling_enabled = false;
+ /* potentially re-enable LP watermarks */
+ sandybridge_update_wm(dev);
+ }
+
+ I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(SPRTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(SPRLINOFF(pipe), offset);
+ }
+ I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(SPRSCALE(pipe), sprscale);
+ I915_WRITE(SPRCTL(pipe), sprctl);
+ I915_WRITE(SPRSURF(pipe), obj->gtt_offset);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static void
+ivb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(SPRCTL(pipe), I915_READ(SPRCTL(pipe)) & ~SPRITE_ENABLE);
+ /* Can't leave the scaler enabled... */
+ I915_WRITE(SPRSCALE(pipe), 0);
+ /* Activate double buffered register update */
+ I915_WRITE(SPRSURF(pipe), 0);
+ POSTING_READ(SPRSURF(pipe));
+}
+
+static int
+ivb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(SPRKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(SPRKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(SPRKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+ sprctl &= ~(SPRITE_SOURCE_KEY | SPRITE_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ sprctl |= SPRITE_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ sprctl |= SPRITE_SOURCE_KEY;
+ I915_WRITE(SPRCTL(intel_plane->pipe), sprctl);
+
+ POSTING_READ(SPRKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 sprctl;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(SPRKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(SPRKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(SPRKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ sprctl = I915_READ(SPRCTL(intel_plane->pipe));
+
+ if (sprctl & SPRITE_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (sprctl & SPRITE_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static void
+snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
+ struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t x, uint32_t y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe, pixel_size;
+ u32 dvscntr, dvsscale = 0;
+
+ dvscntr = I915_READ(DVSCNTR(pipe));
+
+ /* Mask out pixel format bits in case we change it */
+ dvscntr &= ~DVS_PIXFORMAT_MASK;
+ dvscntr &= ~DVS_RGB_ORDER_RGBX;
+ dvscntr &= ~DVS_YUV_BYTE_ORDER_MASK;
+
+ switch (fb->pixel_format) {
+ case DRM_FORMAT_XBGR8888:
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ dvscntr |= DVS_FORMAT_RGBX888 | DVS_RGB_ORDER_RGBX;
+ pixel_size = 4;
+ break;
+ case DRM_FORMAT_YUYV:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_YVYU:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YVYU;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_UYVY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_UYVY;
+ pixel_size = 2;
+ break;
+ case DRM_FORMAT_VYUY:
+ dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_VYUY;
+ pixel_size = 2;
+ break;
+ default:
+ DRM_DEBUG_DRIVER("bad pixel format, assuming RGBX888\n");
+ dvscntr |= DVS_FORMAT_RGBX888;
+ pixel_size = 4;
+ break;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ dvscntr |= DVS_TILED;
+
+ /* must disable */
+ dvscntr |= DVS_TRICKLE_FEED_DISABLE;
+ dvscntr |= DVS_ENABLE;
+
+ /* Sizes are 0 based */
+ src_w--;
+ src_h--;
+ crtc_w--;
+ crtc_h--;
+
+ intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
+
+ if (crtc_w != src_w || crtc_h != src_h)
+ dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
+
+ I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
+ I915_WRITE(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ I915_WRITE(DVSTILEOFF(pipe), (y << 16) | x);
+ } else {
+ unsigned long offset;
+
+ offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
+ I915_WRITE(DVSLINOFF(pipe), offset);
+ }
+ I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ I915_WRITE(DVSSCALE(pipe), dvsscale);
+ I915_WRITE(DVSCNTR(pipe), dvscntr);
+ I915_WRITE(DVSSURF(pipe), obj->gtt_offset);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+snb_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int pipe = intel_plane->pipe;
+
+ I915_WRITE(DVSCNTR(pipe), I915_READ(DVSCNTR(pipe)) & ~DVS_ENABLE);
+ /* Disable the scaler */
+ I915_WRITE(DVSSCALE(pipe), 0);
+ /* Flush double buffered register updates */
+ I915_WRITE(DVSSURF(pipe), 0);
+ POSTING_READ(DVSSURF(pipe));
+}
+
+static void
+intel_enable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) | DISPLAY_PLANE_ENABLE);
+}
+
+static void
+intel_disable_primary(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int reg = DSPCNTR(intel_crtc->plane);
+
+ I915_WRITE(reg, I915_READ(reg) & ~DISPLAY_PLANE_ENABLE);
+}
+
+static int
+snb_update_colorkey(struct drm_plane *plane,
+ struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+ int ret = 0;
+
+ intel_plane = to_intel_plane(plane);
+
+ I915_WRITE(DVSKEYVAL(intel_plane->pipe), key->min_value);
+ I915_WRITE(DVSKEYMAX(intel_plane->pipe), key->max_value);
+ I915_WRITE(DVSKEYMSK(intel_plane->pipe), key->channel_mask);
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+ dvscntr &= ~(DVS_SOURCE_KEY | DVS_DEST_KEY);
+ if (key->flags & I915_SET_COLORKEY_DESTINATION)
+ dvscntr |= DVS_DEST_KEY;
+ else if (key->flags & I915_SET_COLORKEY_SOURCE)
+ dvscntr |= DVS_SOURCE_KEY;
+ I915_WRITE(DVSCNTR(intel_plane->pipe), dvscntr);
+
+ POSTING_READ(DVSKEYMSK(intel_plane->pipe));
+
+ return ret;
+}
+
+static void
+snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_plane *intel_plane;
+ u32 dvscntr;
+
+ intel_plane = to_intel_plane(plane);
+
+ key->min_value = I915_READ(DVSKEYVAL(intel_plane->pipe));
+ key->max_value = I915_READ(DVSKEYMAX(intel_plane->pipe));
+ key->channel_mask = I915_READ(DVSKEYMSK(intel_plane->pipe));
+ key->flags = 0;
+
+ dvscntr = I915_READ(DVSCNTR(intel_plane->pipe));
+
+ if (dvscntr & DVS_DEST_KEY)
+ key->flags = I915_SET_COLORKEY_DESTINATION;
+ else if (dvscntr & DVS_SOURCE_KEY)
+ key->flags = I915_SET_COLORKEY_SOURCE;
+ else
+ key->flags = I915_SET_COLORKEY_NONE;
+}
+
+static int
+intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
+ struct drm_framebuffer *fb, int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ struct intel_framebuffer *intel_fb;
+ struct drm_i915_gem_object *obj, *old_obj;
+ int pipe = intel_plane->pipe;
+ int ret = 0;
+ int x = src_x >> 16, y = src_y >> 16;
+ int primary_w = crtc->mode.hdisplay, primary_h = crtc->mode.vdisplay;
+ bool disable_primary = false;
+
+ intel_fb = to_intel_framebuffer(fb);
+ obj = intel_fb->obj;
+
+ old_obj = intel_plane->obj;
+
+ /* Pipe must be running... */
+ if (!(I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE))
+ return -EINVAL;
+
+ if (crtc_x >= primary_w || crtc_y >= primary_h)
+ return -EINVAL;
+
+ /* Don't modify another pipe's plane */
+ if (intel_plane->pipe != intel_crtc->pipe)
+ return -EINVAL;
+
+ /*
+ * Clamp the width & height into the visible area. Note we don't
+ * try to scale the source if part of the visible region is offscreen.
+ * The caller must handle that by adjusting source offset and size.
+ */
+ if ((crtc_x < 0) && ((crtc_x + crtc_w) > 0)) {
+ crtc_w += crtc_x;
+ crtc_x = 0;
+ }
+ if ((crtc_x + crtc_w) <= 0) /* Nothing to display */
+ goto out;
+ if ((crtc_x + crtc_w) > primary_w)
+ crtc_w = primary_w - crtc_x;
+
+ if ((crtc_y < 0) && ((crtc_y + crtc_h) > 0)) {
+ crtc_h += crtc_y;
+ crtc_y = 0;
+ }
+ if ((crtc_y + crtc_h) <= 0) /* Nothing to display */
+ goto out;
+ if (crtc_y + crtc_h > primary_h)
+ crtc_h = primary_h - crtc_y;
+
+ if (!crtc_w || !crtc_h) /* Again, nothing to display */
+ goto out;
+
+ /*
+ * We can take a larger source and scale it down, but
+ * only so much... 16x is the max on SNB.
+ */
+ if (((src_w * src_h) / (crtc_w * crtc_h)) > intel_plane->max_downscale)
+ return -EINVAL;
+
+ /*
+ * If the sprite is completely covering the primary plane,
+ * we can disable the primary and save power.
+ */
+ if ((crtc_x == 0) && (crtc_y == 0) &&
+ (crtc_w == primary_w) && (crtc_h == primary_h))
+ disable_primary = true;
+
+ mutex_lock(&dev->struct_mutex);
+
+ ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ if (ret)
+ goto out_unlock;
+
+ intel_plane->obj = obj;
+
+ /*
+ * Be sure to re-enable the primary before the sprite is no longer
+ * covering it fully.
+ */
+ if (!disable_primary && intel_plane->primary_disabled) {
+ intel_enable_primary(crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->update_plane(plane, fb, obj, crtc_x, crtc_y,
+ crtc_w, crtc_h, x, y, src_w, src_h);
+
+ if (disable_primary) {
+ intel_disable_primary(crtc);
+ intel_plane->primary_disabled = true;
+ }
+
+ /* Unpin old obj after new one is active to avoid ugliness */
+ if (old_obj) {
+ /*
+ * It's fairly common to simply update the position of
+ * an existing object. In that case, we don't need to
+ * wait for vblank to avoid ugliness, we only need to
+ * do the pin & ref bookkeeping.
+ */
+ if (old_obj != obj) {
+ mutex_unlock(&dev->struct_mutex);
+ intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
+ mutex_lock(&dev->struct_mutex);
+ }
+ i915_gem_object_unpin(old_obj);
+ }
+
+out_unlock:
+ mutex_unlock(&dev->struct_mutex);
+out:
+ return ret;
+}
+
+static int
+intel_disable_plane(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ int ret = 0;
+
+ if (intel_plane->primary_disabled) {
+ intel_enable_primary(plane->crtc);
+ intel_plane->primary_disabled = false;
+ }
+
+ intel_plane->disable_plane(plane);
+
+ if (!intel_plane->obj)
+ goto out;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_object_unpin(intel_plane->obj);
+ intel_plane->obj = NULL;
+ mutex_unlock(&dev->struct_mutex);
+out:
+
+ return ret;
+}
+
+static void intel_destroy_plane(struct drm_plane *plane)
+{
+ struct intel_plane *intel_plane = to_intel_plane(plane);
+ intel_disable_plane(plane);
+ drm_plane_cleanup(plane);
+ kfree(intel_plane);
+}
+
+int intel_sprite_set_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *set = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ /* Make sure we don't try to enable both src & dest simultaneously */
+ if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, set->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ ret = intel_plane->update_colorkey(plane, set);
+
+out_unlock:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_intel_sprite_colorkey *get = data;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_mode_object *obj;
+ struct drm_plane *plane;
+ struct intel_plane *intel_plane;
+ int ret = 0;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ mutex_lock(&dev->mode_config.mutex);
+
+ obj = drm_mode_object_find(dev, get->plane_id, DRM_MODE_OBJECT_PLANE);
+ if (!obj) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ plane = obj_to_plane(obj);
+ intel_plane = to_intel_plane(plane);
+ intel_plane->get_colorkey(plane, get);
+
+out_unlock:
+ mutex_unlock(&dev->mode_config.mutex);
+ return ret;
+}
+
+static const struct drm_plane_funcs intel_plane_funcs = {
+ .update_plane = intel_update_plane,
+ .disable_plane = intel_disable_plane,
+ .destroy = intel_destroy_plane,
+};
+
+static uint32_t snb_plane_formats[] = {
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+};
+
+int
+intel_plane_init(struct drm_device *dev, enum pipe pipe)
+{
+ struct intel_plane *intel_plane;
+ unsigned long possible_crtcs;
+ int ret;
+
+ if (!(IS_GEN6(dev) || IS_GEN7(dev)))
+ return -ENODEV;
+
+ intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
+ if (!intel_plane)
+ return -ENOMEM;
+
+ if (IS_GEN6(dev)) {
+ intel_plane->max_downscale = 16;
+ intel_plane->update_plane = snb_update_plane;
+ intel_plane->disable_plane = snb_disable_plane;
+ intel_plane->update_colorkey = snb_update_colorkey;
+ intel_plane->get_colorkey = snb_get_colorkey;
+ } else if (IS_GEN7(dev)) {
+ intel_plane->max_downscale = 2;
+ intel_plane->update_plane = ivb_update_plane;
+ intel_plane->disable_plane = ivb_disable_plane;
+ intel_plane->update_colorkey = ivb_update_colorkey;
+ intel_plane->get_colorkey = ivb_get_colorkey;
+ }
+
+ intel_plane->pipe = pipe;
+ possible_crtcs = (1 << pipe);
+ ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
+ &intel_plane_funcs, snb_plane_formats,
+ ARRAY_SIZE(snb_plane_formats), false);
+ if (ret)
+ kfree(intel_plane);
+
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index f3c6a9a..1571be3 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -417,7 +417,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
@@ -460,7 +460,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-443",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
/* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
@@ -502,7 +502,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-J",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -545,7 +545,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "PAL-M",
.clock = 108000,
- .refresh = 29970,
+ .refresh = 59940,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -589,7 +589,7 @@ static const struct tv_mode tv_modes[] = {
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL-N",
.clock = 108000,
- .refresh = 25000,
+ .refresh = 50000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -634,7 +634,7 @@ static const struct tv_mode tv_modes[] = {
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL",
.clock = 108000,
- .refresh = 25000,
+ .refresh = 50000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@@ -674,78 +674,6 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
- .name = "480p@59.94Hz",
- .clock = 107520,
- .refresh = 59940,
- .oversample = TV_OVERSAMPLE_4X,
- .component_only = 1,
-
- .hsync_end = 64, .hblank_end = 122,
- .hblank_start = 842, .htotal = 857,
-
- .progressive = true, .trilevel_sync = false,
-
- .vsync_start_f1 = 12, .vsync_start_f2 = 12,
- .vsync_len = 12,
-
- .veq_ena = false,
-
- .vi_end_f1 = 44, .vi_end_f2 = 44,
- .nbr_end = 479,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
- {
- .name = "480p@60Hz",
- .clock = 107520,
- .refresh = 60000,
- .oversample = TV_OVERSAMPLE_4X,
- .component_only = 1,
-
- .hsync_end = 64, .hblank_end = 122,
- .hblank_start = 842, .htotal = 856,
-
- .progressive = true, .trilevel_sync = false,
-
- .vsync_start_f1 = 12, .vsync_start_f2 = 12,
- .vsync_len = 12,
-
- .veq_ena = false,
-
- .vi_end_f1 = 44, .vi_end_f2 = 44,
- .nbr_end = 479,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
- {
- .name = "576p",
- .clock = 107520,
- .refresh = 50000,
- .oversample = TV_OVERSAMPLE_4X,
- .component_only = 1,
-
- .hsync_end = 64, .hblank_end = 139,
- .hblank_start = 859, .htotal = 863,
-
- .progressive = true, .trilevel_sync = false,
-
- .vsync_start_f1 = 10, .vsync_start_f2 = 10,
- .vsync_len = 10,
-
- .veq_ena = false,
-
- .vi_end_f1 = 48, .vi_end_f2 = 48,
- .nbr_end = 575,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
- {
.name = "720p@60Hz",
.clock = 148800,
.refresh = 60000,
@@ -770,30 +698,6 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
{
- .name = "720p@59.94Hz",
- .clock = 148800,
- .refresh = 59940,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
-
- .hsync_end = 80, .hblank_end = 300,
- .hblank_start = 1580, .htotal = 1651,
-
- .progressive = true, .trilevel_sync = true,
-
- .vsync_start_f1 = 10, .vsync_start_f2 = 10,
- .vsync_len = 10,
-
- .veq_ena = false,
-
- .vi_end_f1 = 29, .vi_end_f2 = 29,
- .nbr_end = 719,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
- {
.name = "720p@50Hz",
.clock = 148800,
.refresh = 50000,
@@ -821,7 +725,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "1080i@50Hz",
.clock = 148800,
- .refresh = 25000,
+ .refresh = 50000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
@@ -847,7 +751,7 @@ static const struct tv_mode tv_modes[] = {
{
.name = "1080i@60Hz",
.clock = 148800,
- .refresh = 30000,
+ .refresh = 60000,
.oversample = TV_OVERSAMPLE_2X,
.component_only = 1,
@@ -870,32 +774,6 @@ static const struct tv_mode tv_modes[] = {
.filter_table = filter_table,
},
- {
- .name = "1080i@59.94Hz",
- .clock = 148800,
- .refresh = 29970,
- .oversample = TV_OVERSAMPLE_2X,
- .component_only = 1,
-
- .hsync_end = 88, .hblank_end = 235,
- .hblank_start = 2155, .htotal = 2201,
-
- .progressive = false, .trilevel_sync = true,
-
- .vsync_start_f1 = 4, .vsync_start_f2 = 5,
- .vsync_len = 10,
-
- .veq_ena = true, .veq_start_f1 = 4,
- .veq_start_f2 = 4, .veq_len = 10,
-
-
- .vi_end_f1 = 21, .vi_end_f2 = 22,
- .nbr_end = 539,
-
- .burst_ena = false,
-
- .filter_table = filter_table,
- },
};
static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c
index 33daa29..f9a925d 100644
--- a/drivers/gpu/drm/mga/mga_drv.c
+++ b/drivers/gpu/drm/mga/mga_drv.c
@@ -44,6 +44,20 @@ static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
};
+static const struct file_operations mga_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mga_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
@@ -64,20 +78,7 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = mga_ioctls,
.dma_ioctl = mga_dma_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mga_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &mga_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 35ef5b1..9f27e3d 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -9,9 +9,9 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_dp.o nouveau_ramht.o \
+ nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
- nouveau_mm.o nouveau_vm.o \
+ nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -19,9 +19,12 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o nvc0_graph.o \
nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
- nv84_crypt.o \
+ nv84_crypt.o nv98_crypt.o \
nva3_copy.o nvc0_copy.o \
nv31_mpeg.o nv50_mpeg.o \
+ nv84_bsp.o \
+ nv84_vp.o \
+ nv98_ppp.o \
nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 525744d..7814a76 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -18,12 +18,6 @@
#include <linux/vga_switcheroo.h>
-#define NOUVEAU_DSM_SUPPORTED 0x00
-#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
-
-#define NOUVEAU_DSM_ACTIVE 0x01
-#define NOUVEAU_DSM_ACTIVE_QUERY 0x00
-
#define NOUVEAU_DSM_LED 0x02
#define NOUVEAU_DSM_LED_STATE 0x00
#define NOUVEAU_DSM_LED_OFF 0x10
@@ -35,6 +29,9 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
+#define NOUVEAU_DSM_OPTIMUS_FN 0x1A
+#define NOUVEAU_DSM_OPTIMUS_ARGS 0x03000001
+
static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
@@ -61,7 +58,8 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
- int err;
+ int i, err;
+ char args_buff[4];
input.count = 4;
input.pointer = params;
@@ -73,7 +71,11 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
params[2].type = ACPI_TYPE_INTEGER;
params[2].integer.value = func;
params[3].type = ACPI_TYPE_BUFFER;
- params[3].buffer.length = 0;
+ params[3].buffer.length = 4;
+ /* ACPI is little endian, AABBCCDD becomes {DD,CC,BB,AA} */
+ for (i = 0; i < 4; i++)
+ args_buff[i] = (arg >> i * 8) & 0xFF;
+ params[3].buffer.pointer = args_buff;
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
@@ -148,6 +150,23 @@ static int nouveau_dsm(acpi_handle handle, int func, int arg, uint32_t *result)
return 0;
}
+/* Returns 1 if a DSM function is usable and 0 otherwise */
+static int nouveau_test_dsm(acpi_handle test_handle,
+ int (*dsm_func)(acpi_handle, int, int, uint32_t *),
+ int sfnc)
+{
+ u32 result = 0;
+
+ /* Function 0 returns a Buffer containing available functions. The args
+ * parameter is ignored for function 0, so just put 0 in it */
+ if (dsm_func(test_handle, 0, 0, &result))
+ return 0;
+
+ /* ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported. If
+ * the n-th bit is enabled, function n is supported */
+ return result & 1 && result & (1 << sfnc);
+}
+
static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
mxm_wmi_call_mxmx(mux_id == NOUVEAU_DSM_LED_STAMINA ? MXM_MXDS_ADAPTER_IGD : MXM_MXDS_ADAPTER_0);
@@ -168,6 +187,10 @@ static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switchero
static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
{
+ /* perhaps the _DSM functions are mutually exclusive, but prepare for
+ * the future */
+ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ return 0;
if (id == VGA_SWITCHEROO_IGD)
return nouveau_dsm_switch_mux(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_LED_STAMINA);
else
@@ -180,6 +203,11 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
if (id == VGA_SWITCHEROO_IGD)
return 0;
+ /* Optimus laptops have the card already disabled in
+ * nouveau_switcheroo_set_state */
+ if (!nouveau_dsm_priv.dsm_detected && nouveau_dsm_priv.optimus_detected)
+ return 0;
+
return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state);
}
@@ -212,8 +240,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, nvidia_handle;
acpi_status status;
- int ret, retval = 0;
- uint32_t result;
+ int retval = 0;
dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
if (!dhandle)
@@ -224,13 +251,11 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
return false;
}
- ret = nouveau_dsm(dhandle, NOUVEAU_DSM_SUPPORTED,
- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
- if (ret == 0)
+ if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- ret = nouveau_optimus_dsm(dhandle, 0, 0, &result);
- if (ret == 0)
+ if (nouveau_test_dsm(dhandle, nouveau_optimus_dsm,
+ NOUVEAU_DSM_OPTIMUS_FN))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval)
@@ -269,15 +294,22 @@ static bool nouveau_dsm_detect(void)
}
if (vga_count == 2 && has_dsm && guid_valid) {
- acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
+ &buffer);
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
- acpi_method_name);
+ acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
ret = true;
}
- if (has_optimus == 1)
+ if (has_optimus == 1) {
+ acpi_get_name(nouveau_dsm_priv.dhandle, ACPI_FULL_PATHNAME,
+ &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected Optimus DSM method %s handle\n",
+ acpi_method_name);
nouveau_dsm_priv.optimus_detected = true;
+ ret = true;
+ }
return ret;
}
@@ -293,6 +325,17 @@ void nouveau_register_dsm_handler(void)
vga_switcheroo_register_handler(&nouveau_dsm_handler);
}
+/* Must be called for Optimus models before the card can be turned off */
+void nouveau_switcheroo_optimus_dsm(void)
+{
+ u32 result = 0;
+ if (!nouveau_dsm_priv.optimus_detected)
+ return;
+
+ nouveau_optimus_dsm(nouveau_dsm_priv.dhandle, NOUVEAU_DSM_OPTIMUS_FN,
+ NOUVEAU_DSM_OPTIMUS_ARGS, &result);
+}
+
void nouveau_unregister_dsm_handler(void)
{
vga_switcheroo_unregister_handler();
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 5fc201b..e5cbead 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -27,6 +27,7 @@
#include "nouveau_drv.h"
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
+#include "nouveau_gpio.h"
#include <linux/io-mapping.h>
@@ -34,9 +35,6 @@
#define NV_CIO_CRE_44_HEADA 0x0
#define NV_CIO_CRE_44_HEADB 0x3
#define FEATURE_MOBILE 0x10 /* also FEATURE_QUADRO for BMP */
-#define LEGACY_I2C_CRT 0x80
-#define LEGACY_I2C_PANEL 0x81
-#define LEGACY_I2C_TV 0x82
#define EDID1_LEN 128
@@ -723,115 +721,19 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
return dcb_entry;
}
-static int
-read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, int index, struct dcb_i2c_entry *i2c)
-{
- uint8_t dcb_i2c_ver = dcb_version, headerlen = 0, entry_len = 4;
- int i2c_entries = DCB_MAX_NUM_I2C_ENTRIES;
- int recordoffset = 0, rdofs = 1, wrofs = 0;
- uint8_t port_type = 0;
-
- if (!i2ctable)
- return -EINVAL;
-
- if (dcb_version >= 0x30) {
- if (i2ctable[0] != dcb_version) /* necessary? */
- NV_WARN(dev,
- "DCB I2C table version mismatch (%02X vs %02X)\n",
- i2ctable[0], dcb_version);
- dcb_i2c_ver = i2ctable[0];
- headerlen = i2ctable[1];
- if (i2ctable[2] <= DCB_MAX_NUM_I2C_ENTRIES)
- i2c_entries = i2ctable[2];
- else
- NV_WARN(dev,
- "DCB I2C table has more entries than indexable "
- "(%d entries, max %d)\n", i2ctable[2],
- DCB_MAX_NUM_I2C_ENTRIES);
- entry_len = i2ctable[3];
- /* [4] is i2c_default_indices, read in parse_dcb_table() */
- }
- /*
- * It's your own fault if you call this function on a DCB 1.1 BIOS --
- * the test below is for DCB 1.2
- */
- if (dcb_version < 0x14) {
- recordoffset = 2;
- rdofs = 0;
- wrofs = 1;
- }
-
- if (index == 0xf)
- return 0;
- if (index >= i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
- index, i2ctable[2]);
- return -ENOENT;
- }
- if (i2ctable[headerlen + entry_len * index + 3] == 0xff) {
- NV_ERROR(dev, "DCB I2C entry invalid\n");
- return -EINVAL;
- }
-
- if (dcb_i2c_ver >= 0x30) {
- port_type = i2ctable[headerlen + recordoffset + 3 + entry_len * index];
-
- /*
- * Fixup for chips using same address offset for read and
- * write.
- */
- if (port_type == 4) /* seen on C51 */
- rdofs = wrofs = 1;
- if (port_type >= 5) /* G80+ */
- rdofs = wrofs = 0;
- }
-
- if (dcb_i2c_ver >= 0x40) {
- if (port_type != 5 && port_type != 6)
- NV_WARN(dev, "DCB I2C table has port type %d\n", port_type);
-
- i2c->entry = ROM32(i2ctable[headerlen + recordoffset + entry_len * index]);
- }
-
- i2c->port_type = port_type;
- i2c->read = i2ctable[headerlen + recordoffset + rdofs + entry_len * index];
- i2c->write = i2ctable[headerlen + recordoffset + wrofs + entry_len * index];
-
- return 0;
-}
-
static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
-
if (i2c_index == 0xff) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
- int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
- int default_indices = dcb->i2c_default_indices;
+ int idx = dcb_entry_idx_from_crtchead(dev);
+ i2c_index = NV_I2C_DEFAULT(0);
if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
- shift = 4;
-
- i2c_index = (default_indices >> shift) & 0xf;
+ i2c_index = NV_I2C_DEFAULT(1);
}
- if (i2c_index == 0x80) /* g80+ */
- i2c_index = dcb->i2c_default_indices & 0xf;
- else
- if (i2c_index == 0x81)
- i2c_index = (dcb->i2c_default_indices & 0xf0) >> 4;
-
- if (i2c_index >= DCB_MAX_NUM_I2C_ENTRIES) {
- NV_ERROR(dev, "invalid i2c_index 0x%x\n", i2c_index);
- return NULL;
- }
-
- /* Make sure i2c table entry has been parsed, it may not
- * have been if this is a bus not referenced by a DCB encoder
- */
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- i2c_index, &dcb->i2c[i2c_index]);
return nouveau_i2c_find(dev, i2c_index);
}
@@ -1199,13 +1101,9 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
switch (cond) {
case 0:
- {
- struct dcb_connector_table_entry *ent =
- &bios->dcb.connector.entry[dcb->connector];
-
- if (ent->type != DCB_CONNECTOR_eDP)
+ entry = dcb_conn(dev, dcb->connector);
+ if (!entry || entry[0] != DCB_CONNECTOR_eDP)
iexec->execute = false;
- }
break;
case 1:
case 2:
@@ -3227,49 +3125,6 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
return 1;
}
-static void
-init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
- const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
- u32 r, s, v;
-
- /* Not a clue, needs de-magicing */
- r = nv50_gpio_ctl[gpio->line >> 4];
- s = (gpio->line & 0x0f);
- v = bios_rd32(bios, r) & ~(0x00010001 << s);
- switch ((gpio->entry & 0x06000000) >> 25) {
- case 1:
- v |= (0x00000001 << s);
- break;
- case 2:
- v |= (0x00010000 << s);
- break;
- default:
- break;
- }
-
- bios_wr32(bios, r, v);
-}
-
-static void
-init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
-{
- u32 v, i;
-
- v = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
- v &= 0xffffff00;
- v |= (gpio->entry & 0x00ff0000) >> 16;
- bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
-
- i = (gpio->entry & 0x1f000000) >> 24;
- if (i) {
- v = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
- v &= 0xffffff00;
- v |= gpio->line;
- bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
- }
-}
-
static int
init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
{
@@ -3282,35 +3137,8 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
* each GPIO according to various values listed in each entry
*/
- struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int i;
-
- if (dev_priv->card_type < NV_50) {
- NV_ERROR(bios->dev, "INIT_GPIO on unsupported chipset\n");
- return 1;
- }
-
- if (!iexec->execute)
- return 1;
-
- for (i = 0; i < bios->dcb.gpio.entries; i++) {
- struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
-
- BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
-
- BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
- offset, gpio->tag, gpio->state_default);
-
- if (!bios->execute)
- continue;
-
- pgpio->set(bios->dev, gpio->tag, gpio->state_default);
- if (dev_priv->card_type < NV_D0)
- init_gpio_unknv50(bios, gpio);
- else
- init_gpio_unknvd0(bios, gpio);
- }
+ if (iexec->execute && bios->execute)
+ nouveau_gpio_reset(bios->dev);
return 1;
}
@@ -4407,18 +4235,6 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
break;
}
- /* Dell Latitude D620 reports a too-high value for the dual-link
- * transition freq, causing us to program the panel incorrectly.
- *
- * It doesn't appear the VBIOS actually uses its transition freq
- * (90000kHz), instead it uses the "Number of LVDS channels" field
- * out of the panel ID structure (http://www.spwg.org/).
- *
- * For the moment, a quirk will do :)
- */
- if (nv_match_device(dev, 0x01d7, 0x1028, 0x01c2))
- bios->fp.duallink_transition_clk = 80000;
-
/* set dual_link flag for EDID case */
if (pxclk && (chip_version < 0x25 || chip_version > 0x28))
bios->fp.dual_link = (pxclk >= bios->fp.duallink_transition_clk);
@@ -4541,7 +4357,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
dcbent->type, dcbent->location, dcbent->or);
for (i = 0; i < table[3]; i++) {
- otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ otable = ROMPTR(dev, table[table[1] + (i * table[2])]);
if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
break;
}
@@ -4719,7 +4535,7 @@ static struct pll_mapping nv84_pll_mapping[] = {
{ PLL_CORE , 0x004028 },
{ PLL_SHADER, 0x004020 },
{ PLL_MEMORY, 0x004008 },
- { PLL_UNK05 , 0x004030 },
+ { PLL_VDEC , 0x004030 },
{ PLL_UNK41 , 0x00e818 },
{ PLL_VPLL0 , 0x614100 },
{ PLL_VPLL1 , 0x614900 },
@@ -5485,6 +5301,9 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
struct nvbios *bios = &dev_priv->vbios;
u8 entries, *entry;
+ if (bios->type != NVBIOS_BIT)
+ return -ENODEV;
+
entries = bios->data[bios->offset + 10];
entry = &bios->data[bios->offset + 12];
while (entries--) {
@@ -5493,7 +5312,7 @@ bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
bit->version = entry[1];
bit->length = ROM16(entry[2]);
bit->offset = ROM16(entry[4]);
- bit->data = ROMPTR(bios, entry[4]);
+ bit->data = ROMPTR(dev, entry[4]);
return 0;
}
@@ -5598,10 +5417,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
- bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
- bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
- bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
- bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
@@ -5709,14 +5524,6 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- if (bios->data[legacy_i2c_offset + 4])
- bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- if (bios->data[legacy_i2c_offset + 5])
- bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- if (bios->data[legacy_i2c_offset + 6])
- bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- if (bios->data[legacy_i2c_offset + 7])
- bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -5767,286 +5574,128 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
return 0;
}
-static struct dcb_gpio_entry *
-new_gpio_entry(struct nvbios *bios)
-{
- struct drm_device *dev = bios->dev;
- struct dcb_gpio_table *gpio = &bios->dcb.gpio;
-
- if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
- NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
- return NULL;
- }
-
- return &gpio->entry[gpio->entries++];
-}
-
-struct dcb_gpio_entry *
-nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
+void *
+dcb_table(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- int i;
-
- for (i = 0; i < bios->dcb.gpio.entries; i++) {
- if (bios->dcb.gpio.entry[i].tag != tag)
- continue;
+ u8 *dcb = NULL;
- return &bios->dcb.gpio.entry[i];
+ if (dev_priv->card_type > NV_04)
+ dcb = ROMPTR(dev, dev_priv->vbios.data[0x36]);
+ if (!dcb) {
+ NV_WARNONCE(dev, "No DCB data found in VBIOS\n");
+ return NULL;
}
- return NULL;
-}
-
-static void
-parse_dcb_gpio_table(struct nvbios *bios)
-{
- struct drm_device *dev = bios->dev;
- struct dcb_gpio_entry *e;
- u8 headerlen, entries, recordlen;
- u8 *dcb, *gpio = NULL, *entry;
- int i;
-
- dcb = ROMPTR(bios, bios->data[0x36]);
+ if (dcb[0] >= 0x41) {
+ NV_WARNONCE(dev, "DCB version 0x%02x unknown\n", dcb[0]);
+ return NULL;
+ } else
if (dcb[0] >= 0x30) {
- gpio = ROMPTR(bios, dcb[10]);
- if (!gpio)
- goto no_table;
-
- headerlen = gpio[1];
- entries = gpio[2];
- recordlen = gpio[3];
+ if (ROM32(dcb[6]) == 0x4edcbdcb)
+ return dcb;
} else
- if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
- gpio = ROMPTR(bios, dcb[-15]);
- if (!gpio)
- goto no_table;
-
- headerlen = 3;
- entries = gpio[2];
- recordlen = gpio[1];
+ if (dcb[0] >= 0x20) {
+ if (ROM32(dcb[4]) == 0x4edcbdcb)
+ return dcb;
} else
- if (dcb[0] >= 0x22) {
- /* No GPIO table present, parse the TVDAC GPIO data. */
- uint8_t *tvdac_gpio = &dcb[-5];
-
- if (tvdac_gpio[0] & 1) {
- e = new_gpio_entry(bios);
- e->tag = DCB_GPIO_TVDAC0;
- e->line = tvdac_gpio[1] >> 4;
- e->invert = tvdac_gpio[0] & 2;
- }
-
- goto no_table;
+ if (dcb[0] >= 0x15) {
+ if (!memcmp(&dcb[-7], "DEV_REC", 7))
+ return dcb;
} else {
- NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
- goto no_table;
- }
-
- entry = gpio + headerlen;
- for (i = 0; i < entries; i++, entry += recordlen) {
- e = new_gpio_entry(bios);
- if (!e)
- break;
-
- if (gpio[0] < 0x40) {
- e->entry = ROM16(entry[0]);
- e->tag = (e->entry & 0x07e0) >> 5;
- if (e->tag == 0x3f) {
- bios->dcb.gpio.entries--;
- continue;
- }
-
- e->line = (e->entry & 0x001f);
- e->invert = ((e->entry & 0xf800) >> 11) != 4;
- } else {
- e->entry = ROM32(entry[0]);
- e->tag = (e->entry & 0x0000ff00) >> 8;
- if (e->tag == 0xff) {
- bios->dcb.gpio.entries--;
- continue;
- }
-
- e->line = (e->entry & 0x0000001f) >> 0;
- if (gpio[0] == 0x40) {
- e->state_default = (e->entry & 0x01000000) >> 24;
- e->state[0] = (e->entry & 0x18000000) >> 27;
- e->state[1] = (e->entry & 0x60000000) >> 29;
- } else {
- e->state_default = (e->entry & 0x00000080) >> 7;
- e->state[0] = (entry[4] >> 4) & 3;
- e->state[1] = (entry[4] >> 6) & 3;
- }
- }
- }
-
-no_table:
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- e = new_gpio_entry(bios);
- if (e) {
- e->tag = DCB_GPIO_TVDAC0;
- e->line = 4;
- }
- }
-}
-
-struct dcb_connector_table_entry *
-nouveau_bios_connector_entry(struct drm_device *dev, int index)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct dcb_connector_table_entry *cte;
-
- if (index >= bios->dcb.connector.entries)
- return NULL;
-
- cte = &bios->dcb.connector.entry[index];
- if (cte->type == 0xff)
+ /*
+ * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but
+ * always has the same single (crt) entry, even when tv-out
+ * present, so the conclusion is this version cannot really
+ * be used.
+ *
+ * v1.2 tables (some NV6/10, and NV15+) normally have the
+ * same 5 entries, which are not specific to the card and so
+ * no use.
+ *
+ * v1.2 does have an I2C table that read_dcb_i2c_table can
+ * handle, but cards exist (nv11 in #14821) with a bad i2c
+ * table pointer, so use the indices parsed in
+ * parse_bmp_structure.
+ *
+ * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ */
+ NV_WARNONCE(dev, "No useful DCB data in VBIOS\n");
return NULL;
+ }
- return cte;
+ NV_WARNONCE(dev, "DCB header validation failed\n");
+ return NULL;
}
-static enum dcb_connector_type
-divine_connector_type(struct nvbios *bios, int index)
+void *
+dcb_outp(struct drm_device *dev, u8 idx)
{
- struct dcb_table *dcb = &bios->dcb;
- unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
- int i;
-
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].connector == index)
- encoders |= (1 << dcb->entry[i].type);
- }
-
- if (encoders & (1 << OUTPUT_DP)) {
- if (encoders & (1 << OUTPUT_TMDS))
- type = DCB_CONNECTOR_DP;
- else
- type = DCB_CONNECTOR_eDP;
- } else
- if (encoders & (1 << OUTPUT_TMDS)) {
- if (encoders & (1 << OUTPUT_ANALOG))
- type = DCB_CONNECTOR_DVI_I;
- else
- type = DCB_CONNECTOR_DVI_D;
- } else
- if (encoders & (1 << OUTPUT_ANALOG)) {
- type = DCB_CONNECTOR_VGA;
+ u8 *dcb = dcb_table(dev);
+ if (dcb && dcb[0] >= 0x30) {
+ if (idx < dcb[2])
+ return dcb + dcb[1] + (idx * dcb[3]);
} else
- if (encoders & (1 << OUTPUT_LVDS)) {
- type = DCB_CONNECTOR_LVDS;
+ if (dcb && dcb[0] >= 0x20) {
+ u8 *i2c = ROMPTR(dev, dcb[2]);
+ u8 *ent = dcb + 8 + (idx * 8);
+ if (i2c && ent < i2c)
+ return ent;
} else
- if (encoders & (1 << OUTPUT_TV)) {
- type = DCB_CONNECTOR_TV_0;
+ if (dcb && dcb[0] >= 0x15) {
+ u8 *i2c = ROMPTR(dev, dcb[2]);
+ u8 *ent = dcb + 4 + (idx * 10);
+ if (i2c && ent < i2c)
+ return ent;
}
- return type;
+ return NULL;
}
-static void
-apply_dcb_connector_quirks(struct nvbios *bios, int idx)
-{
- struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
- struct drm_device *dev = bios->dev;
+int
+dcb_outp_foreach(struct drm_device *dev, void *data,
+ int (*exec)(struct drm_device *, void *, int idx, u8 *outp))
+{
+ int ret, idx = -1;
+ u8 *outp = NULL;
+ while ((outp = dcb_outp(dev, ++idx))) {
+ if (ROM32(outp[0]) == 0x00000000)
+ break; /* seen on an NV11 with DCB v1.5 */
+ if (ROM32(outp[0]) == 0xffffffff)
+ break; /* seen on an NV17 with DCB v2.0 */
+
+ if ((outp[0] & 0x0f) == OUTPUT_UNUSED)
+ continue;
+ if ((outp[0] & 0x0f) == OUTPUT_EOL)
+ break;
- /* Gigabyte NX85T */
- if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
- if (cte->type == DCB_CONNECTOR_HDMI_1)
- cte->type = DCB_CONNECTOR_DVI_I;
+ ret = exec(dev, data, idx, outp);
+ if (ret)
+ return ret;
}
- /* Gigabyte GV-NX86T512H */
- if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
- if (cte->type == DCB_CONNECTOR_HDMI_1)
- cte->type = DCB_CONNECTOR_DVI_I;
- }
+ return 0;
}
-static const u8 hpd_gpio[16] = {
- 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
- 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
-};
-
-static void
-parse_dcb_connector_table(struct nvbios *bios)
+u8 *
+dcb_conntab(struct drm_device *dev)
{
- struct drm_device *dev = bios->dev;
- struct dcb_connector_table *ct = &bios->dcb.connector;
- struct dcb_connector_table_entry *cte;
- uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
- uint8_t *entry;
- int i;
-
- if (!bios->dcb.connector_table_ptr) {
- NV_DEBUG_KMS(dev, "No DCB connector table present\n");
- return;
- }
-
- NV_INFO(dev, "DCB connector table: VHER 0x%02x %d %d %d\n",
- conntab[0], conntab[1], conntab[2], conntab[3]);
- if ((conntab[0] != 0x30 && conntab[0] != 0x40) ||
- (conntab[3] != 2 && conntab[3] != 4)) {
- NV_ERROR(dev, " Unknown! Please report.\n");
- return;
+ u8 *dcb = dcb_table(dev);
+ if (dcb && dcb[0] >= 0x30 && dcb[1] >= 0x16) {
+ u8 *conntab = ROMPTR(dev, dcb[0x14]);
+ if (conntab && conntab[0] >= 0x30 && conntab[0] <= 0x40)
+ return conntab;
}
+ return NULL;
+}
- ct->entries = conntab[2];
-
- entry = conntab + conntab[1];
- cte = &ct->entry[0];
- for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
- cte->index = i;
- if (conntab[3] == 2)
- cte->entry = ROM16(entry[0]);
- else
- cte->entry = ROM32(entry[0]);
-
- cte->type = (cte->entry & 0x000000ff) >> 0;
- cte->index2 = (cte->entry & 0x00000f00) >> 8;
-
- cte->gpio_tag = ffs((cte->entry & 0x07033000) >> 12);
- cte->gpio_tag = hpd_gpio[cte->gpio_tag];
-
- if (cte->type == 0xff)
- continue;
-
- apply_dcb_connector_quirks(bios, i);
-
- NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
- i, cte->entry, cte->type, cte->index, cte->gpio_tag);
-
- /* check for known types, fallback to guessing the type
- * from attached encoders if we hit an unknown.
- */
- switch (cte->type) {
- case DCB_CONNECTOR_VGA:
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- case DCB_CONNECTOR_DVI_I:
- case DCB_CONNECTOR_DVI_D:
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- case DCB_CONNECTOR_DP:
- case DCB_CONNECTOR_eDP:
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- break;
- default:
- cte->type = divine_connector_type(bios, cte->index);
- NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
- break;
- }
-
- if (nouveau_override_conntype) {
- int type = divine_connector_type(bios, cte->index);
- if (type != cte->type)
- NV_WARN(dev, " -> type 0x%02x\n", cte->type);
- }
-
- }
+u8 *
+dcb_conn(struct drm_device *dev, u8 idx)
+{
+ u8 *conntab = dcb_conntab(dev);
+ if (conntab && idx < conntab[2])
+ return conntab + conntab[1] + (idx * conntab[3]);
+ return NULL;
}
static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
@@ -6079,8 +5728,7 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
- if (dcb->version >= 0x40)
- entry->connector = (conn >> 12) & 0xf;
+ entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
entry->or = (conn >> 24) & 0xf;
@@ -6252,25 +5900,6 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
return true;
}
-static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
- uint32_t conn, uint32_t conf)
-{
- struct dcb_entry *entry = new_dcb_entry(dcb);
- bool ret;
-
- if (dcb->version >= 0x20)
- ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
- else
- ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
- if (!ret)
- return ret;
-
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- entry->i2c_index, &dcb->i2c[entry->i2c_index]);
-
- return true;
-}
-
static
void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
@@ -6431,154 +6060,118 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#endif
/* Make up some sane defaults */
- fabricate_dcb_output(dcb, OUTPUT_ANALOG, LEGACY_I2C_CRT, 1, 1);
+ fabricate_dcb_output(dcb, OUTPUT_ANALOG,
+ bios->legacy.i2c_indices.crt, 1, 1);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
- fabricate_dcb_output(dcb, OUTPUT_TV, LEGACY_I2C_TV,
+ fabricate_dcb_output(dcb, OUTPUT_TV,
+ bios->legacy.i2c_indices.tv,
all_heads, 0);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
- fabricate_dcb_output(dcb, OUTPUT_TMDS, LEGACY_I2C_PANEL,
+ fabricate_dcb_output(dcb, OUTPUT_TMDS,
+ bios->legacy.i2c_indices.panel,
all_heads, 1);
}
static int
-parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+parse_dcb_entry(struct drm_device *dev, void *data, int idx, u8 *outp)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &bios->dcb;
- uint16_t dcbptr = 0, i2ctabptr = 0;
- uint8_t *dcbtable;
- uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
- bool configblock = true;
- int recordlength = 8, confofs = 4;
- int i;
-
- /* get the offset from 0x36 */
- if (dev_priv->card_type > NV_04) {
- dcbptr = ROM16(bios->data[0x36]);
- if (dcbptr == 0x0000)
- NV_WARN(dev, "No output data (DCB) found in BIOS\n");
- }
-
- /* this situation likely means a really old card, pre DCB */
- if (dcbptr == 0x0) {
- fabricate_dcb_encoder_table(dev, bios);
- return 0;
- }
-
- dcbtable = &bios->data[dcbptr];
-
- /* get DCB version */
- dcb->version = dcbtable[0];
- NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
- dcb->version >> 4, dcb->version & 0xf);
-
- if (dcb->version >= 0x20) { /* NV17+ */
- uint32_t sig;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
+ u32 conf = (dcb->version >= 0x20) ? ROM32(outp[4]) : ROM32(outp[6]);
+ u32 conn = ROM32(outp[0]);
+ bool ret;
- if (dcb->version >= 0x30) { /* NV40+ */
- headerlen = dcbtable[1];
- entries = dcbtable[2];
- recordlength = dcbtable[3];
- i2ctabptr = ROM16(dcbtable[4]);
- sig = ROM32(dcbtable[6]);
- dcb->gpio_table_ptr = ROM16(dcbtable[10]);
- dcb->connector_table_ptr = ROM16(dcbtable[20]);
- } else {
- i2ctabptr = ROM16(dcbtable[2]);
- sig = ROM32(dcbtable[4]);
- headerlen = 8;
- }
+ if (apply_dcb_encoder_quirks(dev, idx, &conn, &conf)) {
+ struct dcb_entry *entry = new_dcb_entry(dcb);
- if (sig != 0x4edcbdcb) {
- NV_ERROR(dev, "Bad Display Configuration Block "
- "signature (%08X)\n", sig);
- return -EINVAL;
- }
- } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
- char sig[8] = { 0 };
+ NV_TRACEWARN(dev, "DCB outp %02d: %08x %08x\n", idx, conn, conf);
- strncpy(sig, (char *)&dcbtable[-7], 7);
- i2ctabptr = ROM16(dcbtable[2]);
- recordlength = 10;
- confofs = 6;
+ if (dcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
+ else
+ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
+ if (!ret)
+ return 1; /* stop parsing */
- if (strcmp(sig, "DEV_REC")) {
- NV_ERROR(dev, "Bad Display Configuration Block "
- "signature (%s)\n", sig);
- return -EINVAL;
- }
- } else {
- /*
- * v1.4 (some NV15/16, NV11+) seems the same as v1.5, but always
- * has the same single (crt) entry, even when tv-out present, so
- * the conclusion is this version cannot really be used.
- * v1.2 tables (some NV6/10, and NV15+) normally have the same
- * 5 entries, which are not specific to the card and so no use.
- * v1.2 does have an I2C table that read_dcb_i2c_table can
- * handle, but cards exist (nv11 in #14821) with a bad i2c table
- * pointer, so use the indices parsed in parse_bmp_structure.
- * v1.1 (NV5+, maybe some NV4) is entirely unhelpful
+ /* Ignore the I2C index for on-chip TV-out, as there
+ * are cards with bogus values (nv31m in bug 23212),
+ * and it's otherwise useless.
*/
- NV_TRACEWARN(dev, "No useful information in BIOS output table; "
- "adding all possible outputs\n");
- fabricate_dcb_encoder_table(dev, bios);
- return 0;
+ if (entry->type == OUTPUT_TV &&
+ entry->location == DCB_LOC_ON_CHIP)
+ entry->i2c_index = 0x0f;
}
- if (!i2ctabptr)
- NV_WARN(dev, "No pointer to DCB I2C port table\n");
- else {
- dcb->i2c_table = &bios->data[i2ctabptr];
- if (dcb->version >= 0x30)
- dcb->i2c_default_indices = dcb->i2c_table[4];
+ return 0;
+}
- /*
- * Parse the "management" I2C bus, used for hardware
- * monitoring and some external TMDS transmitters.
- */
- if (dcb->version >= 0x22) {
- int idx = (dcb->version >= 0x40 ?
- dcb->i2c_default_indices & 0xf :
- 2);
+static void
+dcb_fake_connectors(struct nvbios *bios)
+{
+ struct dcb_table *dcbt = &bios->dcb;
+ u8 map[16] = { };
+ int i, idx = 0;
- read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
- idx, &dcb->i2c[idx]);
- }
+ /* heuristic: if we ever get a non-zero connector field, assume
+ * that all the indices are valid and we don't need fake them.
+ */
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector)
+ return;
}
- if (entries > DCB_MAX_NUM_ENTRIES)
- entries = DCB_MAX_NUM_ENTRIES;
-
- for (i = 0; i < entries; i++) {
- uint32_t connection, config = 0;
-
- connection = ROM32(dcbtable[headerlen + recordlength * i]);
- if (configblock)
- config = ROM32(dcbtable[headerlen + confofs + recordlength * i]);
-
- /* seen on an NV11 with DCB v1.5 */
- if (connection == 0x00000000)
- break;
+ /* no useful connector info available, we need to make it up
+ * ourselves. the rule here is: anything on the same i2c bus
+ * is considered to be on the same connector. any output
+ * without an associated i2c bus is assigned its own unique
+ * connector index.
+ */
+ for (i = 0; i < dcbt->entries; i++) {
+ u8 i2c = dcbt->entry[i].i2c_index;
+ if (i2c == 0x0f) {
+ dcbt->entry[i].connector = idx++;
+ } else {
+ if (!map[i2c])
+ map[i2c] = ++idx;
+ dcbt->entry[i].connector = map[i2c] - 1;
+ }
+ }
- /* seen on an NV17 with DCB v2.0 */
- if (connection == 0xffffffff)
- break;
+ /* if we created more than one connector, destroy the connector
+ * table - just in case it has random, rather than stub, entries.
+ */
+ if (i > 1) {
+ u8 *conntab = dcb_conntab(bios->dev);
+ if (conntab)
+ conntab[0] = 0x00;
+ }
+}
- if ((connection & 0x0000000f) == 0x0000000f)
- continue;
+static int
+parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ u8 *dcbt, *conn;
+ int idx;
+
+ dcbt = dcb_table(dev);
+ if (!dcbt) {
+ /* handle pre-DCB boards */
+ if (bios->type == NVBIOS_BMP) {
+ fabricate_dcb_encoder_table(dev, bios);
+ return 0;
+ }
- if (!apply_dcb_encoder_quirks(dev, i, &connection, &config))
- continue;
+ return -EINVAL;
+ }
- NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
- dcb->entries, connection, config);
+ NV_TRACE(dev, "DCB version %d.%d\n", dcbt[0] >> 4, dcbt[0] & 0xf);
- if (!parse_dcb_entry(dev, dcb, connection, config))
- break;
- }
+ dcb->version = dcbt[0];
+ dcb_outp_foreach(dev, NULL, parse_dcb_entry);
/*
* apart for v2.1+ not being known for requiring merging, this
@@ -6590,77 +6183,19 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios)
if (!dcb->entries)
return -ENXIO;
- parse_dcb_gpio_table(bios);
- parse_dcb_connector_table(bios);
- return 0;
-}
-
-static void
-fixup_legacy_connector(struct nvbios *bios)
-{
- struct dcb_table *dcb = &bios->dcb;
- int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
-
- /*
- * DCB 3.0 also has the table in most cases, but there are some cards
- * where the table is filled with stub entries, and the DCB entriy
- * indices are all 0. We don't need the connector indices on pre-G80
- * chips (yet?) so limit the use to DCB 4.0 and above.
- */
- if (dcb->version >= 0x40)
- return;
-
- dcb->connector.entries = 0;
-
- /*
- * No known connector info before v3.0, so make it up. the rule here
- * is: anything on the same i2c bus is considered to be on the same
- * connector. any output without an associated i2c bus is assigned
- * its own unique connector index.
- */
- for (i = 0; i < dcb->entries; i++) {
- /*
- * Ignore the I2C index for on-chip TV-out, as there
- * are cards with bogus values (nv31m in bug 23212),
- * and it's otherwise useless.
- */
- if (dcb->entry[i].type == OUTPUT_TV &&
- dcb->entry[i].location == DCB_LOC_ON_CHIP)
- dcb->entry[i].i2c_index = 0xf;
- i2c = dcb->entry[i].i2c_index;
-
- if (i2c_conn[i2c]) {
- dcb->entry[i].connector = i2c_conn[i2c] - 1;
- continue;
+ /* dump connector table entries to log, if any exist */
+ idx = -1;
+ while ((conn = dcb_conn(dev, ++idx))) {
+ if (conn[0] != 0xff) {
+ NV_TRACE(dev, "DCB conn %02d: ", idx);
+ if (dcb_conntab(dev)[3] < 4)
+ printk("%04x\n", ROM16(conn[0]));
+ else
+ printk("%08x\n", ROM32(conn[0]));
}
-
- dcb->entry[i].connector = dcb->connector.entries++;
- if (i2c != 0xf)
- i2c_conn[i2c] = dcb->connector.entries;
- }
-
- /* Fake the connector table as well as just connector indices */
- for (i = 0; i < dcb->connector.entries; i++) {
- dcb->connector.entry[i].index = i;
- dcb->connector.entry[i].type = divine_connector_type(bios, i);
- dcb->connector.entry[i].gpio_tag = 0xff;
- }
-}
-
-static void
-fixup_legacy_i2c(struct nvbios *bios)
-{
- struct dcb_table *dcb = &bios->dcb;
- int i;
-
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index == LEGACY_I2C_CRT)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.crt;
- if (dcb->entry[i].i2c_index == LEGACY_I2C_PANEL)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.panel;
- if (dcb->entry[i].i2c_index == LEGACY_I2C_TV)
- dcb->entry[i].i2c_index = bios->legacy.i2c_indices.tv;
}
+ dcb_fake_connectors(bios);
+ return 0;
}
static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bios, uint16_t hwsq_offset, int entry)
@@ -6879,19 +6414,6 @@ nouveau_run_vbios_init(struct drm_device *dev)
return ret;
}
-static void
-nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct dcb_i2c_entry *entry;
- int i;
-
- entry = &bios->dcb.i2c[0];
- for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
- nouveau_i2c_fini(dev, entry);
-}
-
static bool
nouveau_bios_posted(struct drm_device *dev)
{
@@ -6928,12 +6450,17 @@ nouveau_bios_init(struct drm_device *dev)
if (ret)
return ret;
- ret = parse_dcb_table(dev, bios);
+ ret = nouveau_i2c_init(dev);
if (ret)
return ret;
- fixup_legacy_i2c(bios);
- fixup_legacy_connector(bios);
+ ret = nouveau_mxm_init(dev);
+ if (ret)
+ return ret;
+
+ ret = parse_dcb_table(dev, bios);
+ if (ret)
+ return ret;
if (!bios->major_version) /* we don't run version 0 bios */
return 0;
@@ -6971,5 +6498,6 @@ nouveau_bios_init(struct drm_device *dev)
void
nouveau_bios_takedown(struct drm_device *dev)
{
- nouveau_bios_i2c_devices_takedown(dev);
+ nouveau_mxm_fini(dev);
+ nouveau_i2c_fini(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 8adb69e..a37c31e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,14 @@
#define DCB_LOC_ON_CHIP 0
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+#define ROM16(x) le16_to_cpu(*(u16 *)&(x))
+#define ROM32(x) le32_to_cpu(*(u32 *)&(x))
+#define ROM48(x) ({ u8 *p = &(x); (u64)ROM16(p[4]) << 32 | ROM32(p[0]); })
+#define ROM64(x) le64_to_cpu(*(u64 *)&(x))
+#define ROMPTR(d,x) ({ \
+ struct drm_nouveau_private *dev_priv = (d)->dev_private; \
+ ROM16(x) ? &dev_priv->vbios.data[ROM16(x)] : NULL; \
+})
struct bit_entry {
uint8_t id;
@@ -48,30 +53,13 @@ struct bit_entry {
int bit_table(struct drm_device *, u8 id, struct bit_entry *);
-struct dcb_i2c_entry {
- uint32_t entry;
- uint8_t port_type;
- uint8_t read, write;
- struct nouveau_i2c_chan *chan;
-};
-
enum dcb_gpio_tag {
- DCB_GPIO_TVDAC0 = 0xc,
+ DCB_GPIO_PANEL_POWER = 0x01,
+ DCB_GPIO_TVDAC0 = 0x0c,
DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
- enum dcb_gpio_tag tag;
- int line;
- bool invert;
- uint32_t entry;
- uint8_t state_default;
- uint8_t state[2];
-};
-
-struct dcb_gpio_table {
- int entries;
- struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+ DCB_GPIO_PWM_FAN = 0x09,
+ DCB_GPIO_FAN_SENSE = 0x3d,
+ DCB_GPIO_UNUSED = 0xff
};
enum dcb_connector_type {
@@ -90,20 +78,6 @@ enum dcb_connector_type {
DCB_CONNECTOR_NONE = 0xff
};
-struct dcb_connector_table_entry {
- uint8_t index;
- uint32_t entry;
- enum dcb_connector_type type;
- uint8_t index2;
- uint8_t gpio_tag;
- void *drm;
-};
-
-struct dcb_connector_table {
- int entries;
- struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
enum dcb_type {
OUTPUT_ANALOG = 0,
OUTPUT_TV = 1,
@@ -111,6 +85,7 @@ enum dcb_type {
OUTPUT_LVDS = 3,
OUTPUT_DP = 6,
OUTPUT_EOL = 14, /* DCB 4.0+, appears to be end-of-list */
+ OUTPUT_UNUSED = 15,
OUTPUT_ANY = -1
};
@@ -155,18 +130,8 @@ struct dcb_entry {
struct dcb_table {
uint8_t version;
-
int entries;
struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
-
- uint8_t *i2c_table;
- uint8_t i2c_default_indices;
- struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-
- uint16_t gpio_table_ptr;
- struct dcb_gpio_table gpio;
- uint16_t connector_table_ptr;
- struct dcb_connector_table connector;
};
enum nouveau_or {
@@ -195,7 +160,7 @@ enum pll_types {
PLL_SHADER = 0x02,
PLL_UNK03 = 0x03,
PLL_MEMORY = 0x04,
- PLL_UNK05 = 0x05,
+ PLL_VDEC = 0x05,
PLL_UNK40 = 0x40,
PLL_UNK41 = 0x41,
PLL_UNK42 = 0x42,
@@ -333,4 +298,11 @@ struct nvbios {
} legacy;
};
+void *dcb_table(struct drm_device *);
+void *dcb_outp(struct drm_device *, u8 idx);
+int dcb_outp_foreach(struct drm_device *, void *data,
+ int (*)(struct drm_device *, void *, int idx, u8 *outp));
+u8 *dcb_conntab(struct drm_device *);
+u8 *dcb_conn(struct drm_device *, u8 idx);
+
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7cc37e6..ec54364 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -28,6 +28,7 @@
*/
#include "drmP.h"
+#include "ttm/ttm_page_alloc.h"
#include "nouveau_drm.h"
#include "nouveau_drv.h"
@@ -92,6 +93,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *nvbo;
+ size_t acc_size;
int ret;
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
@@ -114,9 +116,12 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
+ acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+ sizeof(struct nouveau_bo));
+
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
ttm_bo_type_device, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, size,
+ align >> PAGE_SHIFT, 0, false, NULL, acc_size,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -343,8 +348,10 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
*mem = val;
}
-static struct ttm_backend *
-nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
+static struct ttm_tt *
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
struct drm_device *dev = dev_priv->dev;
@@ -352,11 +359,13 @@ nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
case NOUVEAU_GART_AGP:
- return ttm_agp_backend_init(bdev, dev->agp->bridge);
+ return ttm_agp_tt_create(bdev, dev->agp->bridge,
+ size, page_flags, dummy_read_page);
#endif
case NOUVEAU_GART_PDMA:
case NOUVEAU_GART_HW:
- return nouveau_sgdma_init_ttm(dev);
+ return nouveau_sgdma_create_ttm(bdev, size, page_flags,
+ dummy_read_page);
default:
NV_ERROR(dev, "Unknown GART type %d\n",
dev_priv->gart_info.type);
@@ -673,8 +682,7 @@ nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
if (mem->mem_type == TTM_PL_VRAM)
nouveau_vm_map(vma, node);
else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT,
- node, node->pages);
+ nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
return 0;
}
@@ -801,19 +809,22 @@ out:
static void
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
{
- struct nouveau_mem *node = new_mem->mm_node;
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct nouveau_vma *vma;
+ /* ttm can now (stupidly) pass the driver bos it didn't create... */
+ if (bo->destroy != nouveau_bo_del_ttm)
+ return;
+
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem->mem_type == TTM_PL_VRAM) {
+ if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
nouveau_vm_map(vma, new_mem->mm_node);
} else
- if (new_mem->mem_type == TTM_PL_TT &&
+ if (new_mem && new_mem->mem_type == TTM_PL_TT &&
nvbo->page_shift == vma->vm->spg_shift) {
nouveau_vm_map_sg(vma, 0, new_mem->
num_pages << PAGE_SHIFT,
- node, node->pages);
+ new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -1044,8 +1055,94 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
nouveau_fence_unref(&old_fence);
}
+static int
+nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ return ttm_agp_tt_populate(ttm);
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate((void *)ttm, dev->dev);
+ }
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ ttm_dma->dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void
+nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct ttm_dma_tt *ttm_dma = (void *)ttm;
+ struct drm_nouveau_private *dev_priv;
+ struct drm_device *dev;
+ unsigned i;
+
+ dev_priv = nouveau_bdev(ttm->bdev);
+ dev = dev_priv->dev;
+
+#if __OS_HAS_AGP
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ ttm_agp_tt_unpopulate(ttm);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate((void *)ttm, dev->dev);
+ return;
+ }
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (ttm_dma->dma_address[i]) {
+ pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
struct ttm_bo_driver nouveau_bo_driver = {
- .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+ .ttm_tt_create = &nouveau_ttm_tt_create,
+ .ttm_tt_populate = &nouveau_ttm_tt_populate,
+ .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.evict_flags = nouveau_bo_evict_flags,
@@ -1091,7 +1188,7 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
else
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- nouveau_vm_map_sg(vma, 0, size, node, node->pages);
+ nouveau_vm_map_sg(vma, 0, size, node);
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index bb6ec9e..a018def 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -187,6 +187,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
nouveau_dma_pre_init(chan);
chan->user_put = 0x40;
chan->user_get = 0x44;
+ if (dev_priv->card_type >= NV_50)
+ chan->user_get_hi = 0x60;
/* disable the fifo caches */
pfifo->reassign(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index cea6696..f3ce34b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -35,6 +35,7 @@
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
#include "nouveau_hw.h"
static void nouveau_connector_hotplug(void *, int);
@@ -78,29 +79,11 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
return NULL;
}
-/*TODO: This could use improvement, and learn to handle the fixed
- * BIOS tables etc. It's fine currently, for its only user.
- */
-int
-nouveau_connector_bpp(struct drm_connector *connector)
-{
- struct nouveau_connector *nv_connector = nouveau_connector(connector);
-
- if (nv_connector->edid && nv_connector->edid->revision >= 4) {
- u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
- if (bpc > 4)
- return bpc;
- }
-
- return 18;
-}
-
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_nouveau_private *dev_priv;
- struct nouveau_gpio_engine *pgpio;
struct drm_device *dev;
if (!nv_connector)
@@ -110,10 +93,9 @@ nouveau_connector_destroy(struct drm_connector *connector)
dev_priv = dev->dev_private;
NV_DEBUG_KMS(dev, "\n");
- pgpio = &dev_priv->engine.gpio;
- if (pgpio->irq_unregister) {
- pgpio->irq_unregister(dev, nv_connector->dcb->gpio_tag,
- nouveau_connector_hotplug, connector);
+ if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+ nouveau_gpio_isr_del(dev, 0, nv_connector->hpd, 0xff,
+ nouveau_connector_hotplug, connector);
}
kfree(nv_connector->edid);
@@ -198,6 +180,10 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
return;
nv_connector->detected_encoder = nv_encoder;
+ if (dev_priv->card_type >= NV_50) {
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
+ } else
if (nv_encoder->dcb->type == OUTPUT_LVDS ||
nv_encoder->dcb->type == OUTPUT_TMDS) {
connector->doublescan_allowed = false;
@@ -214,7 +200,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = true;
}
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -397,7 +383,7 @@ nouveau_connector_force(struct drm_connector *connector)
struct nouveau_encoder *nv_encoder;
int type;
- if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -420,15 +406,21 @@ static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
+ struct nouveau_crtc *nv_crtc;
int ret;
+ nv_crtc = NULL;
+ if (connector->encoder && connector->encoder->crtc)
+ nv_crtc = nouveau_crtc(connector->encoder->crtc);
+
/* Scaling mode */
if (property == dev->mode_config.scaling_mode_property) {
- struct nouveau_crtc *nv_crtc = NULL;
bool modeset = false;
switch (value) {
@@ -454,8 +446,6 @@ nouveau_connector_set_property(struct drm_connector *connector,
modeset = true;
nv_connector->scaling_mode = value;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
if (!nv_crtc)
return 0;
@@ -467,7 +457,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
if (!ret)
return -EINVAL;
} else {
- ret = nv_crtc->set_scale(nv_crtc, value, true);
+ ret = nv_crtc->set_scale(nv_crtc, true);
if (ret)
return ret;
}
@@ -475,23 +465,58 @@ nouveau_connector_set_property(struct drm_connector *connector,
return 0;
}
- /* Dithering */
- if (property == dev->mode_config.dithering_mode_property) {
- struct nouveau_crtc *nv_crtc = NULL;
+ /* Underscan */
+ if (property == disp->underscan_property) {
+ if (nv_connector->underscan != value) {
+ nv_connector->underscan = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
- if (value == DRM_MODE_DITHERING_ON)
- nv_connector->use_dithering = true;
- else
- nv_connector->use_dithering = false;
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ if (property == disp->underscan_hborder_property) {
+ if (nv_connector->underscan_hborder != value) {
+ nv_connector->underscan_hborder = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
+
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ if (property == disp->underscan_vborder_property) {
+ if (nv_connector->underscan_vborder != value) {
+ nv_connector->underscan_vborder = value;
+ if (!nv_crtc || !nv_crtc->set_scale)
+ return 0;
+
+ return nv_crtc->set_scale(nv_crtc, true);
+ }
+
+ return 0;
+ }
+
+ /* Dithering */
+ if (property == disp->dithering_mode) {
+ nv_connector->dithering_mode = value;
+ if (!nv_crtc || !nv_crtc->set_dither)
+ return 0;
- if (connector->encoder && connector->encoder->crtc)
- nv_crtc = nouveau_crtc(connector->encoder->crtc);
+ return nv_crtc->set_dither(nv_crtc, true);
+ }
+ if (property == disp->dithering_depth) {
+ nv_connector->dithering_depth = value;
if (!nv_crtc || !nv_crtc->set_dither)
return 0;
- return nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering,
- true);
+ return nv_crtc->set_dither(nv_crtc, true);
}
if (nv_encoder && nv_encoder->dcb->type == OUTPUT_TV)
@@ -602,6 +627,46 @@ nouveau_connector_scaler_modes_add(struct drm_connector *connector)
return modes;
}
+static void
+nouveau_connector_detect_depth(struct drm_connector *connector)
+{
+ struct drm_nouveau_private *dev_priv = connector->dev->dev_private;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
+ struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct drm_display_mode *mode = nv_connector->native_mode;
+ bool duallink;
+
+ /* if the edid is feeling nice enough to provide this info, use it */
+ if (nv_connector->edid && connector->display_info.bpc)
+ return;
+
+ /* if not, we're out of options unless we're LVDS, default to 6bpc */
+ connector->display_info.bpc = 6;
+ if (nv_encoder->dcb->type != OUTPUT_LVDS)
+ return;
+
+ /* LVDS: panel straps */
+ if (bios->fp_no_ddc) {
+ if (bios->fp.if_is_24bit)
+ connector->display_info.bpc = 8;
+ return;
+ }
+
+ /* LVDS: DDC panel, need to first determine the number of links to
+ * know which if_is_24bit flag to check...
+ */
+ if (nv_connector->edid &&
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
+ duallink = ((u8 *)nv_connector->edid)[121] == 2;
+ else
+ duallink = mode->clock >= bios->fp.duallink_transition_clk;
+
+ if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
+ ( duallink && (bios->fp.strapless_is_24bit & 2)))
+ connector->display_info.bpc = 8;
+}
+
static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
@@ -631,6 +696,12 @@ nouveau_connector_get_modes(struct drm_connector *connector)
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
+ /* Determine display colour depth for everything except LVDS now,
+ * DP requires this before mode_valid() is called.
+ */
+ if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
* the list of modes.
@@ -646,12 +717,19 @@ nouveau_connector_get_modes(struct drm_connector *connector)
ret = 1;
}
+ /* Determine LVDS colour depth, must happen after determining
+ * "native" mode as some VBIOS tables require us to use the
+ * pixel clock as part of the lookup...
+ */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ nouveau_connector_detect_depth(connector);
+
if (nv_encoder->dcb->type == OUTPUT_TV)
ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS ||
- nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG ||
- nv_connector->dcb->type == DCB_CONNECTOR_eDP)
+ if (nv_connector->type == DCB_CONNECTOR_LVDS ||
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
+ nv_connector->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -710,7 +788,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
case OUTPUT_DP:
max_clock = nv_encoder->dp.link_nr;
max_clock *= nv_encoder->dp.link_bw;
- clock = clock * nouveau_connector_bpp(connector) / 10;
+ clock = clock * (connector->display_info.bpc * 3) / 10;
break;
default:
BUG_ON(1);
@@ -768,96 +846,175 @@ nouveau_connector_funcs_lvds = {
.force = nouveau_connector_force
};
+static int
+drm_conntype_from_dcb(enum dcb_connector_type dcb)
+{
+ switch (dcb) {
+ case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA;
+ case DCB_CONNECTOR_TV_0 :
+ case DCB_CONNECTOR_TV_1 :
+ case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
+ case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
+ case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
+ case DCB_CONNECTOR_LVDS :
+ case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
+ case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
+ case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
+ case DCB_CONNECTOR_HDMI_0 :
+ case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA;
+ default:
+ break;
+ }
+
+ return DRM_MODE_CONNECTOR_Unknown;
+}
+
struct drm_connector *
nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
struct nouveau_connector *nv_connector = NULL;
- struct dcb_connector_table_entry *dcb = NULL;
struct drm_connector *connector;
int type, ret = 0;
+ bool dummy;
NV_DEBUG_KMS(dev, "\n");
- if (index >= dev_priv->vbios.dcb.connector.entries)
- return ERR_PTR(-EINVAL);
-
- dcb = &dev_priv->vbios.dcb.connector.entry[index];
- if (dcb->drm)
- return dcb->drm;
-
- switch (dcb->type) {
- case DCB_CONNECTOR_VGA:
- type = DRM_MODE_CONNECTOR_VGA;
- break;
- case DCB_CONNECTOR_TV_0:
- case DCB_CONNECTOR_TV_1:
- case DCB_CONNECTOR_TV_3:
- type = DRM_MODE_CONNECTOR_TV;
- break;
- case DCB_CONNECTOR_DVI_I:
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case DCB_CONNECTOR_DVI_D:
- type = DRM_MODE_CONNECTOR_DVID;
- break;
- case DCB_CONNECTOR_HDMI_0:
- case DCB_CONNECTOR_HDMI_1:
- type = DRM_MODE_CONNECTOR_HDMIA;
- break;
- case DCB_CONNECTOR_LVDS:
- case DCB_CONNECTOR_LVDS_SPWG:
- type = DRM_MODE_CONNECTOR_LVDS;
- funcs = &nouveau_connector_funcs_lvds;
- break;
- case DCB_CONNECTOR_DP:
- type = DRM_MODE_CONNECTOR_DisplayPort;
- break;
- case DCB_CONNECTOR_eDP:
- type = DRM_MODE_CONNECTOR_eDP;
- break;
- default:
- NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
- return ERR_PTR(-EINVAL);
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ nv_connector = nouveau_connector(connector);
+ if (nv_connector->index == index)
+ return connector;
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
return ERR_PTR(-ENOMEM);
- nv_connector->dcb = dcb;
+
connector = &nv_connector->base;
+ nv_connector->index = index;
+
+ /* attempt to parse vbios connector type and hotplug gpio */
+ nv_connector->dcb = dcb_conn(dev, index);
+ if (nv_connector->dcb) {
+ static const u8 hpd[16] = {
+ 0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
+ };
+
+ u32 entry = ROM16(nv_connector->dcb[0]);
+ if (dcb_conntab(dev)[3] >= 4)
+ entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
+
+ nv_connector->hpd = ffs((entry & 0x07033000) >> 12);
+ nv_connector->hpd = hpd[nv_connector->hpd];
+
+ nv_connector->type = nv_connector->dcb[0];
+ if (drm_conntype_from_dcb(nv_connector->type) ==
+ DRM_MODE_CONNECTOR_Unknown) {
+ NV_WARN(dev, "unknown connector type %02x\n",
+ nv_connector->type);
+ nv_connector->type = DCB_CONNECTOR_NONE;
+ }
- /* defaults, will get overridden in detect() */
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
+ /* Gigabyte NX85T */
+ if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
+ if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ }
- drm_connector_init(dev, connector, funcs, type);
- drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+ /* Gigabyte GV-NX86T512H */
+ if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
+ if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ }
+ } else {
+ nv_connector->type = DCB_CONNECTOR_NONE;
+ nv_connector->hpd = DCB_GPIO_UNUSED;
+ }
+
+ /* no vbios data, or an unknown dcb connector type - attempt to
+ * figure out something suitable ourselves
+ */
+ if (nv_connector->type == DCB_CONNECTOR_NONE) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct dcb_table *dcbt = &dev_priv->vbios.dcb;
+ u32 encoders = 0;
+ int i;
+
+ for (i = 0; i < dcbt->entries; i++) {
+ if (dcbt->entry[i].connector == nv_connector->index)
+ encoders |= (1 << dcbt->entry[i].type);
+ }
- /* Check if we need dithering enabled */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
- bool dummy, is_24bit = false;
+ if (encoders & (1 << OUTPUT_DP)) {
+ if (encoders & (1 << OUTPUT_TMDS))
+ nv_connector->type = DCB_CONNECTOR_DP;
+ else
+ nv_connector->type = DCB_CONNECTOR_eDP;
+ } else
+ if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ nv_connector->type = DCB_CONNECTOR_DVI_I;
+ else
+ nv_connector->type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << OUTPUT_ANALOG)) {
+ nv_connector->type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << OUTPUT_LVDS)) {
+ nv_connector->type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << OUTPUT_TV)) {
+ nv_connector->type = DCB_CONNECTOR_TV_0;
+ }
+ }
- ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &is_24bit);
+ type = drm_conntype_from_dcb(nv_connector->type);
+ if (type == DRM_MODE_CONNECTOR_LVDS) {
+ ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
if (ret) {
- NV_ERROR(dev, "Error parsing LVDS table, disabling "
- "LVDS\n");
- goto fail;
+ NV_ERROR(dev, "Error parsing LVDS table, disabling\n");
+ kfree(nv_connector);
+ return ERR_PTR(ret);
}
- nv_connector->use_dithering = !is_24bit;
+ funcs = &nouveau_connector_funcs_lvds;
+ } else {
+ funcs = &nouveau_connector_funcs;
}
+ /* defaults, will get overridden in detect() */
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
+
+ drm_connector_init(dev, connector, funcs, type);
+ drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+
/* Init DVI-I specific properties */
- if (dcb->type == DCB_CONNECTOR_DVI_I) {
- drm_mode_create_dvi_i_properties(dev);
+ if (nv_connector->type == DCB_CONNECTOR_DVI_I)
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
- drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
+
+ /* Add overscan compensation options to digital outputs */
+ if (disp->underscan_property &&
+ (nv_connector->type == DCB_CONNECTOR_DVI_D ||
+ nv_connector->type == DCB_CONNECTOR_DVI_I ||
+ nv_connector->type == DCB_CONNECTOR_HDMI_0 ||
+ nv_connector->type == DCB_CONNECTOR_HDMI_1 ||
+ nv_connector->type == DCB_CONNECTOR_DP)) {
+ drm_connector_attach_property(connector,
+ disp->underscan_property,
+ UNDERSCAN_OFF);
+ drm_connector_attach_property(connector,
+ disp->underscan_hborder_property,
+ 0);
+ drm_connector_attach_property(connector,
+ disp->underscan_vborder_property,
+ 0);
}
- switch (dcb->type) {
+ switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
@@ -876,32 +1033,32 @@ nouveau_connector_create(struct drm_device *dev, int index)
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
- drm_connector_attach_property(connector,
- dev->mode_config.dithering_mode_property,
- nv_connector->use_dithering ?
- DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+ if (disp->dithering_mode) {
+ nv_connector->dithering_mode = DITHERING_MODE_AUTO;
+ drm_connector_attach_property(connector,
+ disp->dithering_mode,
+ nv_connector->dithering_mode);
+ }
+ if (disp->dithering_depth) {
+ nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
+ drm_connector_attach_property(connector,
+ disp->dithering_depth,
+ nv_connector->dithering_depth);
+ }
break;
}
- if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
- pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
- nouveau_connector_hotplug, connector);
-
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- } else {
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ if (nv_connector->hpd != DCB_GPIO_UNUSED) {
+ ret = nouveau_gpio_isr_add(dev, 0, nv_connector->hpd, 0xff,
+ nouveau_connector_hotplug,
+ connector);
+ if (ret == 0)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
}
drm_sysfs_connector_add(connector);
-
- dcb->drm = connector;
- return dcb->drm;
-
-fail:
- drm_connector_cleanup(connector);
- kfree(connector);
- return ERR_PTR(ret);
-
+ return connector;
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 711b1e9..e485702 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -30,13 +30,43 @@
#include "drm_edid.h"
#include "nouveau_i2c.h"
+enum nouveau_underscan_type {
+ UNDERSCAN_OFF,
+ UNDERSCAN_ON,
+ UNDERSCAN_AUTO,
+};
+
+/* the enum values specifically defined here match nv50/nvd0 hw values, and
+ * the code relies on this
+ */
+enum nouveau_dithering_mode {
+ DITHERING_MODE_OFF = 0x00,
+ DITHERING_MODE_ON = 0x01,
+ DITHERING_MODE_DYNAMIC2X2 = 0x10 | DITHERING_MODE_ON,
+ DITHERING_MODE_STATIC2X2 = 0x18 | DITHERING_MODE_ON,
+ DITHERING_MODE_TEMPORAL = 0x20 | DITHERING_MODE_ON,
+ DITHERING_MODE_AUTO
+};
+
+enum nouveau_dithering_depth {
+ DITHERING_DEPTH_6BPC = 0x00,
+ DITHERING_DEPTH_8BPC = 0x02,
+ DITHERING_DEPTH_AUTO
+};
+
struct nouveau_connector {
struct drm_connector base;
+ enum dcb_connector_type type;
+ u8 index;
+ u8 *dcb;
+ u8 hpd;
- struct dcb_connector_table_entry *dcb;
-
+ int dithering_mode;
+ int dithering_depth;
int scaling_mode;
- bool use_dithering;
+ enum nouveau_underscan_type underscan;
+ u32 underscan_hborder;
+ u32 underscan_vborder;
struct nouveau_encoder *detected_encoder;
struct edid *edid;
diff --git a/drivers/gpu/drm/nouveau/nouveau_crtc.h b/drivers/gpu/drm/nouveau/nouveau_crtc.h
index bf8e128..686f6b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_crtc.h
+++ b/drivers/gpu/drm/nouveau/nouveau_crtc.h
@@ -32,8 +32,6 @@ struct nouveau_crtc {
int index;
- struct drm_display_mode *mode;
-
uint32_t dpms_saved_fp_control;
uint32_t fp_users;
int saturation;
@@ -67,8 +65,8 @@ struct nouveau_crtc {
int depth;
} lut;
- int (*set_dither)(struct nouveau_crtc *crtc, bool on, bool update);
- int (*set_scale)(struct nouveau_crtc *crtc, int mode, bool update);
+ int (*set_dither)(struct nouveau_crtc *crtc, bool update);
+ int (*set_scale)(struct nouveau_crtc *crtc, bool update);
};
static inline struct nouveau_crtc *nouveau_crtc(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index 8e15923..fa2ec49 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -44,7 +44,7 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
seq_printf(m, "channel id : %d\n", chan->id);
seq_printf(m, "cpu fifo state:\n");
- seq_printf(m, " base: 0x%08x\n", chan->pushbuf_base);
+ seq_printf(m, " base: 0x%10llx\n", chan->pushbuf_base);
seq_printf(m, " max: 0x%08x\n", chan->dma.max << 2);
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
@@ -178,6 +178,7 @@ static struct drm_info_list nouveau_debugfs_list[] = {
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
{ "ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL },
+ { "ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index b12fd2c..795a9e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -32,6 +32,8 @@
#include "nouveau_hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nouveau_connector.h"
+#include "nouveau_gpio.h"
#include "nv50_display.h"
static void
@@ -64,7 +66,7 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
int
nouveau_framebuffer_init(struct drm_device *dev,
struct nouveau_framebuffer *nv_fb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct nouveau_bo *nvbo)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -107,14 +109,14 @@ nouveau_framebuffer_init(struct drm_device *dev,
if (!tile_flags) {
if (dev_priv->card_type < NV_D0)
- nv_fb->r_pitch = 0x00100000 | fb->pitch;
+ nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
else
- nv_fb->r_pitch = 0x01000000 | fb->pitch;
+ nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
} else {
u32 mode = nvbo->tile_mode;
if (dev_priv->card_type >= NV_C0)
mode >>= 4;
- nv_fb->r_pitch = ((fb->pitch / 4) << 4) | mode;
+ nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
}
}
@@ -124,13 +126,13 @@ nouveau_framebuffer_init(struct drm_device *dev,
static struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
int ret;
- gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
@@ -147,11 +149,196 @@ nouveau_user_framebuffer_create(struct drm_device *dev,
return &nouveau_fb->base;
}
-const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
+
+struct drm_prop_enum_list {
+ u8 gen_mask;
+ int type;
+ char *name;
+};
+
+static struct drm_prop_enum_list underscan[] = {
+ { 6, UNDERSCAN_AUTO, "auto" },
+ { 6, UNDERSCAN_OFF, "off" },
+ { 6, UNDERSCAN_ON, "on" },
+ {}
+};
+
+static struct drm_prop_enum_list dither_mode[] = {
+ { 7, DITHERING_MODE_AUTO, "auto" },
+ { 7, DITHERING_MODE_OFF, "off" },
+ { 1, DITHERING_MODE_ON, "on" },
+ { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
+ { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
+ { 4, DITHERING_MODE_TEMPORAL, "temporal" },
+ {}
+};
+
+static struct drm_prop_enum_list dither_depth[] = {
+ { 6, DITHERING_DEPTH_AUTO, "auto" },
+ { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
+ { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
+ {}
+};
+
+#define PROP_ENUM(p,gen,n,list) do { \
+ struct drm_prop_enum_list *l = (list); \
+ int c = 0; \
+ while (l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) \
+ c++; \
+ l++; \
+ } \
+ if (c) { \
+ p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
+ l = (list); \
+ c = 0; \
+ while (p && l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) { \
+ drm_property_add_enum(p, c, l->type, l->name); \
+ c++; \
+ } \
+ l++; \
+ } \
+ } \
+} while(0)
+
+int
+nouveau_display_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ struct drm_connector *connector;
+ int ret;
+
+ ret = disp->init(dev);
+ if (ret)
+ return ret;
+
+ /* power on internal panel if it's not already. the init tables of
+ * some vbios default this to off for some reason, causing the
+ * panel to not work after resume
+ */
+ if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) {
+ nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true);
+ msleep(300);
+ }
+
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(dev);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, true);
+ }
+
+ return ret;
+}
+
+void
+nouveau_display_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ struct drm_connector *connector;
+
+ /* disable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ nouveau_gpio_irq(dev, 0, conn->hpd, 0xff, false);
+ }
+
+ drm_kms_helper_poll_disable(dev);
+ disp->fini(dev);
+}
+
+int
+nouveau_display_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+ int ret, gen;
+
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dvi_i_properties(dev);
+
+ if (dev_priv->card_type < NV_50)
+ gen = 0;
+ else
+ if (dev_priv->card_type < NV_D0)
+ gen = 1;
+ else
+ gen = 2;
+
+ PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
+ PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
+ PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
+
+ disp->underscan_hborder_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "underscan hborder", 2);
+ disp->underscan_hborder_property->values[0] = 0;
+ disp->underscan_hborder_property->values[1] = 128;
+
+ disp->underscan_vborder_property =
+ drm_property_create(dev, DRM_MODE_PROP_RANGE,
+ "underscan vborder", 2);
+ disp->underscan_vborder_property->values[0] = 0;
+ disp->underscan_vborder_property->values[1] = 128;
+
+ dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+ dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ if (dev_priv->card_type < NV_10) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
+
+ drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_disable(dev);
+
+ ret = disp->create(dev);
+ if (ret)
+ return ret;
+
+ if (dev->mode_config.num_crtc) {
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+nouveau_display_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display_engine *disp = &dev_priv->engine.display;
+
+ drm_vblank_cleanup(dev);
+
+ disp->destroy(dev);
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+}
+
int
nouveau_vblank_enable(struct drm_device *dev, int crtc)
{
@@ -294,7 +481,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
{ { }, event, nouveau_crtc(crtc)->index,
- fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+ fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
new_bo->bo.offset };
/* Choose the channel the flip will be handled in */
@@ -305,7 +492,10 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
/* Emit a page flip */
if (dev_priv->card_type >= NV_50) {
- ret = nv50_display_flip_next(crtc, fb, chan);
+ if (dev_priv->card_type >= NV_D0)
+ ret = nvd0_display_flip_next(crtc, fb, chan, 0);
+ else
+ ret = nv50_display_flip_next(crtc, fb, chan);
if (ret) {
nouveau_channel_put(&chan);
goto fail_unreserve;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 00bc6ea..4c2e4e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -134,11 +134,13 @@ OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
* -EBUSY if timeout exceeded
*/
static inline int
-READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
+READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout)
{
- uint32_t val;
+ uint64_t val;
val = nvchan_rd32(chan, chan->user_get);
+ if (chan->user_get_hi)
+ val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32;
/* reset counter as long as GET is still advancing, this is
* to avoid misdetecting a GPU lockup if the GPU happens to
@@ -218,8 +220,8 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
static int
nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
{
- uint32_t cnt = 0, prev_get = 0;
- int ret;
+ uint64_t prev_get = 0;
+ int ret, cnt = 0;
ret = nv50_dma_push_wait(chan, slots + 1);
if (unlikely(ret))
@@ -261,8 +263,8 @@ nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
int
nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
- uint32_t prev_get = 0, cnt = 0;
- int get;
+ uint64_t prev_get = 0;
+ int cnt = 0, get;
if (chan->dma.ib_max)
return nv50_dma_wait(chan, slots, size);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c
index de5efe7..9b93b70 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dp.c
@@ -29,6 +29,7 @@
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
/******************************************************************************
* aux channel util functions
@@ -273,8 +274,6 @@ nouveau_dp_tu_update(struct drm_device *dev, int or, int link, u32 clk, u32 bpp)
u8 *
nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
struct bit_entry d;
u8 *table;
int i;
@@ -289,7 +288,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
return NULL;
}
- table = ROMPTR(bios, d.data[0]);
+ table = ROMPTR(dev, d.data[0]);
if (!table) {
NV_ERROR(dev, "displayport table pointer invalid\n");
return NULL;
@@ -306,7 +305,7 @@ nouveau_dp_bios_data(struct drm_device *dev, struct dcb_entry *dcb, u8 **entry)
}
for (i = 0; i < table[3]; i++) {
- *entry = ROMPTR(bios, table[table[1] + (i * table[2])]);
+ *entry = ROMPTR(dev, table[table[1] + (i * table[2])]);
if (*entry && bios_encoder_match(dcb, ROM32((*entry)[0])))
return table;
}
@@ -336,7 +335,6 @@ struct dp_state {
static void
dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
int or = dp->or, link = dp->link;
u8 *entry, sink[2];
u32 dp_ctrl;
@@ -360,7 +358,7 @@ dp_set_link_config(struct drm_device *dev, struct dp_state *dp)
* table, that has (among other things) pointers to more scripts that
* need to be executed, this time depending on link speed.
*/
- entry = ROMPTR(&dev_priv->vbios, dp->entry[10]);
+ entry = ROMPTR(dev, dp->entry[10]);
if (entry) {
if (dp->table[0] < 0x30) {
while (dp->link_bw < (ROM16(entry[0]) * 10))
@@ -559,8 +557,6 @@ dp_link_train_eq(struct drm_device *dev, struct dp_state *dp)
bool
nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector =
@@ -581,7 +577,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
dp.dcb = nv_encoder->dcb;
dp.crtc = nv_crtc->index;
- dp.auxch = auxch->rd;
+ dp.auxch = auxch->drive;
dp.or = nv_encoder->or;
dp.link = !(nv_encoder->dcb->sorconf.link & 1);
dp.dpcd = nv_encoder->dp.dpcd;
@@ -590,7 +586,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
* we take during link training (DP_SET_POWER is one), we need
* to ignore them for the moment to avoid races.
*/
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, false);
+ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, false);
/* enable down-spreading, if possible */
if (dp.table[1] >= 16) {
@@ -639,7 +635,7 @@ nouveau_dp_link_train(struct drm_encoder *encoder, u32 datarate)
nouveau_bios_run_init_table(dev, ROM16(dp.entry[8]), dp.dcb, dp.crtc);
/* re-enable hotplug detect */
- pgpio->irq_enable(dev, nv_connector->dcb->gpio_tag, true);
+ nouveau_gpio_irq(dev, 0, nv_connector->hpd, 0xff, true);
return true;
}
@@ -656,7 +652,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
if (!auxch)
return false;
- ret = auxch_tx(dev, auxch->rd, 9, DP_DPCD_REV, dpcd, 8);
+ ret = auxch_tx(dev, auxch->drive, 9, DP_DPCD_REV, dpcd, 8);
if (ret)
return false;
@@ -684,7 +680,7 @@ int
nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr)
{
- return auxch_tx(auxch->dev, auxch->rd, cmd, addr, data, data_nr);
+ return auxch_tx(auxch->dev, auxch->drive, cmd, addr, data, data_nr);
}
static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9791d13..81d7962 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -124,6 +124,10 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
int nouveau_ctxfw;
module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
+MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS\n");
+int nouveau_mxmdcb = 1;
+module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
+
int nouveau_fbpercrtc;
#if 0
module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -178,8 +182,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- NV_INFO(dev, "Disabling fbcon acceleration...\n");
- nouveau_fbcon_save_disable_accel(dev);
+ NV_INFO(dev, "Disabling display...\n");
+ nouveau_display_fini(dev);
+
+ NV_INFO(dev, "Disabling fbcon...\n");
+ nouveau_fbcon_set_suspend(dev, 1);
NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -220,7 +227,7 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
ret = dev_priv->eng[e]->fini(dev, e, true);
if (ret) {
- NV_ERROR(dev, "... engine %d failed: %d\n", i, ret);
+ NV_ERROR(dev, "... engine %d failed: %d\n", e, ret);
goto out_abort;
}
}
@@ -246,10 +253,6 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
pci_set_power_state(pdev, PCI_D3hot);
}
- console_lock();
- nouveau_fbcon_set_suspend(dev, 1);
- console_unlock();
- nouveau_fbcon_restore_accel(dev);
return 0;
out_abort:
@@ -275,8 +278,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- nouveau_fbcon_save_disable_accel(dev);
-
NV_INFO(dev, "We're back, enabling device...\n");
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
@@ -296,8 +297,6 @@ nouveau_pci_resume(struct pci_dev *pdev)
if (ret)
return ret;
- nouveau_pm_resume(dev);
-
if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
ret = nouveau_mem_init_agp(dev);
if (ret) {
@@ -337,6 +336,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
}
}
+ nouveau_pm_resume(dev);
+
NV_INFO(dev, "Restoring mode...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
@@ -358,16 +359,10 @@ nouveau_pci_resume(struct pci_dev *pdev)
NV_ERROR(dev, "Could not pin/map cursor.\n");
}
- engine->display.init(dev);
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+ nouveau_fbcon_set_suspend(dev, 0);
+ nouveau_fbcon_zfill_all(dev);
- nv_crtc->cursor.set_offset(nv_crtc, offset);
- nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
- nv_crtc->cursor_saved_y);
- }
+ nouveau_display_init(dev);
/* Force CLUT to get re-loaded during modeset */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -376,18 +371,35 @@ nouveau_pci_resume(struct pci_dev *pdev)
nv_crtc->lut.depth = 0;
}
- console_lock();
- nouveau_fbcon_set_suspend(dev, 0);
- console_unlock();
+ drm_helper_resume_force_mode(dev);
- nouveau_fbcon_zfill_all(dev);
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.offset;
- drm_helper_resume_force_mode(dev);
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
- nouveau_fbcon_restore_accel(dev);
return 0;
}
+static const struct file_operations nouveau_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = nouveau_ttm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = nouveau_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
@@ -413,21 +425,7 @@ static struct drm_driver driver = {
.disable_vblank = nouveau_vblank_disable,
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = nouveau_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = nouveau_ttm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = nouveau_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &nouveau_driver_fops,
.gem_init_object = nouveau_gem_object_new,
.gem_free_object = nouveau_gem_object_del,
.gem_open_object = nouveau_gem_object_open,
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 4c0be3a..b827098 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -163,6 +163,9 @@ enum nouveau_flags {
#define NVOBJ_ENGINE_COPY0 3
#define NVOBJ_ENGINE_COPY1 4
#define NVOBJ_ENGINE_MPEG 5
+#define NVOBJ_ENGINE_PPP NVOBJ_ENGINE_MPEG
+#define NVOBJ_ENGINE_BSP 6
+#define NVOBJ_ENGINE_VP 7
#define NVOBJ_ENGINE_DISPLAY 15
#define NVOBJ_ENGINE_NR 16
@@ -229,6 +232,7 @@ struct nouveau_channel {
/* mapping of the regs controlling the fifo */
void __iomem *user;
uint32_t user_get;
+ uint32_t user_get_hi;
uint32_t user_put;
/* Fencing */
@@ -246,7 +250,7 @@ struct nouveau_channel {
struct nouveau_gpuobj *pushbuf;
struct nouveau_bo *pushbuf_bo;
struct nouveau_vma pushbuf_vma;
- uint32_t pushbuf_base;
+ uint64_t pushbuf_base;
/* Notifier memory */
struct nouveau_bo *notifier_bo;
@@ -393,24 +397,25 @@ struct nouveau_display_engine {
int (*early_init)(struct drm_device *);
void (*late_takedown)(struct drm_device *);
int (*create)(struct drm_device *);
- int (*init)(struct drm_device *);
void (*destroy)(struct drm_device *);
+ int (*init)(struct drm_device *);
+ void (*fini)(struct drm_device *);
+
+ struct drm_property *dithering_mode;
+ struct drm_property *dithering_depth;
+ struct drm_property *underscan_property;
+ struct drm_property *underscan_hborder_property;
+ struct drm_property *underscan_vborder_property;
};
struct nouveau_gpio_engine {
- void *priv;
-
- int (*init)(struct drm_device *);
- void (*takedown)(struct drm_device *);
-
- int (*get)(struct drm_device *, enum dcb_gpio_tag);
- int (*set)(struct drm_device *, enum dcb_gpio_tag, int state);
-
- int (*irq_register)(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
- void (*irq_unregister)(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
- bool (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
+ spinlock_t lock;
+ struct list_head isr;
+ int (*init)(struct drm_device *);
+ void (*fini)(struct drm_device *);
+ int (*drive)(struct drm_device *, int line, int dir, int out);
+ int (*sense)(struct drm_device *, int line);
+ void (*irq_enable)(struct drm_device *, int line, bool);
};
struct nouveau_pm_voltage_level {
@@ -484,7 +489,7 @@ struct nouveau_pm_level {
u32 copy;
u32 daemon;
u32 vdec;
- u32 unk05; /* nv50:nva3, roughly.. */
+ u32 dom6;
u32 unka0; /* nva3:nvc0 */
u32 hub01; /* nvc0- */
u32 hub06; /* nvc0- */
@@ -518,6 +523,12 @@ struct nouveau_pm_memtimings {
int nr_timing;
};
+struct nouveau_pm_fan {
+ u32 min_duty;
+ u32 max_duty;
+ u32 pwm_freq;
+};
+
struct nouveau_pm_engine {
struct nouveau_pm_voltage voltage;
struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
@@ -525,6 +536,8 @@ struct nouveau_pm_engine {
struct nouveau_pm_memtimings memtimings;
struct nouveau_pm_temp_sensor_constants sensor_constants;
struct nouveau_pm_threshold_temp threshold_temp;
+ struct nouveau_pm_fan fan;
+ u32 pwm_divisor;
struct nouveau_pm_level boot;
struct nouveau_pm_level *cur;
@@ -532,19 +545,14 @@ struct nouveau_pm_engine {
struct device *hwmon;
struct notifier_block acpi_nb;
- int (*clock_get)(struct drm_device *, u32 id);
- void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
- void (*clock_set)(struct drm_device *, void *);
-
int (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
- void (*clocks_set)(struct drm_device *, void *);
+ int (*clocks_set)(struct drm_device *, void *);
int (*voltage_get)(struct drm_device *);
int (*voltage_set)(struct drm_device *, int voltage);
- int (*fanspeed_get)(struct drm_device *);
- int (*fanspeed_set)(struct drm_device *, int fanspeed);
+ int (*pwm_get)(struct drm_device *, int line, u32*, u32*);
+ int (*pwm_set)(struct drm_device *, int line, u32, u32);
int (*temp_get)(struct drm_device *);
};
@@ -780,6 +788,8 @@ struct drm_nouveau_private {
struct nouveau_vm *chan_vm;
struct nvbios vbios;
+ u8 *mxms;
+ struct list_head i2c_ports;
struct nv04_mode_state mode_reg;
struct nv04_mode_state saved_reg;
@@ -850,6 +860,7 @@ extern char *nouveau_perflvl;
extern int nouveau_perflvl_wr;
extern int nouveau_msi;
extern int nouveau_ctxfw;
+extern int nouveau_mxmdcb;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -1000,7 +1011,10 @@ extern int nouveau_sgdma_init(struct drm_device *);
extern void nouveau_sgdma_takedown(struct drm_device *);
extern uint32_t nouveau_sgdma_get_physical(struct drm_device *,
uint32_t offset);
-extern struct ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+extern struct ttm_tt *nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+ unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
/* nouveau_debugfs.c */
#if defined(CONFIG_DRM_NOUVEAU_DEBUG)
@@ -1041,12 +1055,14 @@ extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
#if defined(CONFIG_ACPI)
void nouveau_register_dsm_handler(void);
void nouveau_unregister_dsm_handler(void);
+void nouveau_switcheroo_optimus_dsm(void);
int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len);
bool nouveau_acpi_rom_supported(struct pci_dev *pdev);
int nouveau_acpi_edid(struct drm_device *, struct drm_connector *);
#else
static inline void nouveau_register_dsm_handler(void) {}
static inline void nouveau_unregister_dsm_handler(void) {}
+static inline void nouveau_switcheroo_optimus_dsm(void) {}
static inline bool nouveau_acpi_rom_supported(struct pci_dev *pdev) { return false; }
static inline int nouveau_acpi_get_bios_chunk(uint8_t *bios, int offset, int len) { return -EINVAL; }
static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector) { return -EINVAL; }
@@ -1072,8 +1088,6 @@ extern int nouveau_run_vbios_init(struct drm_device *);
extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
struct dcb_entry *, int crtc);
extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
-extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
- enum dcb_gpio_tag);
extern struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *, int index);
extern u32 get_pll_register(struct drm_device *, enum pll_types);
@@ -1091,11 +1105,18 @@ extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
enum LVDS_script, int pxclk);
bool bios_encoder_match(struct dcb_entry *, u32 hash);
+/* nouveau_mxm.c */
+int nouveau_mxm_init(struct drm_device *dev);
+void nouveau_mxm_fini(struct drm_device *dev);
+
/* nouveau_ttm.c */
int nouveau_ttm_global_init(struct drm_nouveau_private *);
void nouveau_ttm_global_release(struct drm_nouveau_private *);
int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
+/* nouveau_hdmi.c */
+void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
+
/* nouveau_dp.c */
int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
uint8_t *data, int data_nr);
@@ -1222,6 +1243,9 @@ extern int nvc0_graph_isr_chid(struct drm_device *dev, u64 inst);
/* nv84_crypt.c */
extern int nv84_crypt_create(struct drm_device *);
+/* nv98_crypt.c */
+extern int nv98_crypt_create(struct drm_device *dev);
+
/* nva3_copy.c */
extern int nva3_copy_create(struct drm_device *dev);
@@ -1234,6 +1258,17 @@ extern int nv31_mpeg_create(struct drm_device *dev);
/* nv50_mpeg.c */
extern int nv50_mpeg_create(struct drm_device *dev);
+/* nv84_bsp.c */
+/* nv98_bsp.c */
+extern int nv84_bsp_create(struct drm_device *dev);
+
+/* nv84_vp.c */
+/* nv98_vp.c */
+extern int nv84_vp_create(struct drm_device *dev);
+
+/* nv98_ppp.c */
+extern int nv98_ppp_create(struct drm_device *dev);
+
/* nv04_instmem.c */
extern int nv04_instmem_init(struct drm_device *);
extern void nv04_instmem_takedown(struct drm_device *);
@@ -1311,13 +1346,19 @@ extern int nv17_tv_create(struct drm_connector *, struct dcb_entry *);
extern int nv04_display_early_init(struct drm_device *);
extern void nv04_display_late_takedown(struct drm_device *);
extern int nv04_display_create(struct drm_device *);
-extern int nv04_display_init(struct drm_device *);
extern void nv04_display_destroy(struct drm_device *);
+extern int nv04_display_init(struct drm_device *);
+extern void nv04_display_fini(struct drm_device *);
/* nvd0_display.c */
extern int nvd0_display_create(struct drm_device *);
-extern int nvd0_display_init(struct drm_device *);
extern void nvd0_display_destroy(struct drm_device *);
+extern int nvd0_display_init(struct drm_device *);
+extern void nvd0_display_fini(struct drm_device *);
+struct nouveau_bo *nvd0_display_crtc_sema(struct drm_device *, int crtc);
+void nvd0_display_flip_stop(struct drm_crtc *);
+int nvd0_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
+ struct nouveau_channel *, u32 swap_interval);
/* nv04_crtc.c */
extern int nv04_crtc_create(struct drm_device *, int index);
@@ -1412,6 +1453,10 @@ extern int nouveau_gem_ioctl_info(struct drm_device *, void *,
struct drm_file *);
/* nouveau_display.c */
+int nouveau_display_create(struct drm_device *dev);
+void nouveau_display_destroy(struct drm_device *dev);
+int nouveau_display_init(struct drm_device *dev);
+void nouveau_display_fini(struct drm_device *dev);
int nouveau_vblank_enable(struct drm_device *dev, int crtc);
void nouveau_vblank_disable(struct drm_device *dev, int crtc);
int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
@@ -1426,23 +1471,22 @@ int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
uint32_t handle);
/* nv10_gpio.c */
-int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nv10_gpio_init(struct drm_device *dev);
+void nv10_gpio_fini(struct drm_device *dev);
+int nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv10_gpio_sense(struct drm_device *dev, int line);
+void nv10_gpio_irq_enable(struct drm_device *, int line, bool on);
/* nv50_gpio.c */
int nv50_gpio_init(struct drm_device *dev);
void nv50_gpio_fini(struct drm_device *dev);
-int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
-int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
-int nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
-void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
- void (*)(void *, int), void *);
-bool nv50_gpio_irq_enable(struct drm_device *, enum dcb_gpio_tag, bool on);
-
-/* nv50_calc. */
+int nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nv50_gpio_sense(struct drm_device *dev, int line);
+void nv50_gpio_irq_enable(struct drm_device *, int line, bool on);
+int nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out);
+int nvd0_gpio_sense(struct drm_device *dev, int line);
+
+/* nv50_calc.c */
int nv50_calc_pll(struct drm_device *, struct pll_lims *, int clk,
int *N1, int *M1, int *N2, int *M2, int *P);
int nva3_calc_pll(struct drm_device *, struct pll_lims *,
@@ -1565,6 +1609,13 @@ extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
#define NV_TRACEWARN(d, fmt, arg...) NV_PRINTK(KERN_NOTICE, d, fmt, ##arg)
#define NV_TRACE(d, fmt, arg...) NV_PRINTK(KERN_INFO, d, fmt, ##arg)
#define NV_WARN(d, fmt, arg...) NV_PRINTK(KERN_WARNING, d, fmt, ##arg)
+#define NV_WARNONCE(d, fmt, arg...) do { \
+ static int _warned = 0; \
+ if (!_warned) { \
+ NV_WARN(d, fmt, ##arg); \
+ _warned = 1; \
+ } \
+} while(0)
/* nouveau_reg_debug bitmask */
enum {
diff --git a/drivers/gpu/drm/nouveau/nouveau_fb.h b/drivers/gpu/drm/nouveau/nouveau_fb.h
index 95c843e..f3fb649 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fb.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fb.h
@@ -42,8 +42,6 @@ nouveau_framebuffer(struct drm_framebuffer *fb)
return container_of(fb, struct nouveau_framebuffer, base);
}
-extern const struct drm_mode_config_funcs nouveau_mode_config_funcs;
-
int nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo);
+ struct drm_mode_fb_cmd2 *mode_cmd, struct nouveau_bo *nvbo);
#endif /* __NOUVEAU_FB_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 3a4cc32..9892218 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
#include <linux/init.h>
#include <linux/screen_info.h>
#include <linux/vga_switcheroo.h>
+#include <linux/console.h>
#include "drmP.h"
#include "drm.h"
@@ -281,7 +282,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
struct nouveau_framebuffer *nouveau_fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct pci_dev *pdev = dev->pdev;
struct device *device = &pdev->dev;
int size, ret;
@@ -289,12 +290,13 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
- mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pitches[0] = mode_cmd.width * (sizes->surface_bpp >> 3);
+ mode_cmd.pitches[0] = roundup(mode_cmd.pitches[0], 256);
- size = mode_cmd.pitch * mode_cmd.height;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ size = mode_cmd.pitches[0] * mode_cmd.height;
size = roundup(size, PAGE_SIZE);
ret = nouveau_gem_new(dev, size, 0, NOUVEAU_GEM_DOMAIN_VRAM,
@@ -369,7 +371,7 @@ nouveau_fbcon_create(struct nouveau_fbdev *nfbdev,
info->screen_base = nvbo_kmap_obj_iovirtual(nouveau_fb->nvbo);
info->screen_size = size;
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &nfbdev->helper, sizes->fb_width, sizes->fb_height);
/* Set aperture base/size for vesafb takeover */
@@ -547,7 +549,13 @@ void nouveau_fbcon_restore_accel(struct drm_device *dev)
void nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ console_lock();
+ if (state == 0)
+ nouveau_fbcon_save_disable_accel(dev);
fb_set_suspend(dev_priv->nfbdev->helper.fbdev, state);
+ if (state == 1)
+ nouveau_fbcon_restore_accel(dev);
+ console_unlock();
}
void nouveau_fbcon_zfill_all(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 5f0bc57..7ce3fde 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -380,6 +380,25 @@ retry:
}
static int
+validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
+{
+ struct nouveau_fence *fence = NULL;
+ int ret = 0;
+
+ spin_lock(&nvbo->bo.bdev->fence_lock);
+ if (nvbo->bo.sync_obj)
+ fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+ spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+ if (fence) {
+ ret = nouveau_fence_sync(fence, chan);
+ nouveau_fence_unref(&fence);
+ }
+
+ return ret;
+}
+
+static int
validate_list(struct nouveau_channel *chan, struct list_head *list,
struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
{
@@ -393,7 +412,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
+ ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
NV_ERROR(dev, "fail pre-validate sync\n");
return ret;
@@ -416,7 +435,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
return ret;
}
- ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
+ ret = validate_sync(chan, nvbo);
if (unlikely(ret)) {
NV_ERROR(dev, "fail post-validate sync\n");
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.c b/drivers/gpu/drm/nouveau/nouveau_gpio.c
new file mode 100644
index 0000000..a580cc6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_i2c.h"
+#include "nouveau_gpio.h"
+
+static u8 *
+dcb_gpio_table(struct drm_device *dev)
+{
+ u8 *dcb = dcb_table(dev);
+ if (dcb) {
+ if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
+ return ROMPTR(dev, dcb[0x0a]);
+ if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
+ return ROMPTR(dev, dcb[-15]);
+ }
+ return NULL;
+}
+
+static u8 *
+dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
+{
+ u8 *table = dcb_gpio_table(dev);
+ if (table) {
+ *version = table[0];
+ if (*version < 0x30 && ent < table[2])
+ return table + 3 + (ent * table[1]);
+ else if (ent < table[2])
+ return table + table[1] + (ent * table[3]);
+ }
+ return NULL;
+}
+
+int
+nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
+}
+
+int
+nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
+}
+
+int
+nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
+ struct gpio_func *gpio)
+{
+ u8 *table, *entry, version;
+ int i = -1;
+
+ if (line == 0xff && func == 0xff)
+ return -EINVAL;
+
+ while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
+ if (version < 0x40) {
+ u16 data = ROM16(entry[0]);
+ *gpio = (struct gpio_func) {
+ .line = (data & 0x001f) >> 0,
+ .func = (data & 0x07e0) >> 5,
+ .log[0] = (data & 0x1800) >> 11,
+ .log[1] = (data & 0x6000) >> 13,
+ };
+ } else
+ if (version < 0x41) {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x1f,
+ .func = entry[1],
+ .log[0] = (entry[3] & 0x18) >> 3,
+ .log[1] = (entry[3] & 0x60) >> 5,
+ };
+ } else {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x3f,
+ .func = entry[1],
+ .log[0] = (entry[4] & 0x30) >> 4,
+ .log[1] = (entry[4] & 0xc0) >> 6,
+ };
+ }
+
+ if ((line == 0xff || line == gpio->line) &&
+ (func == 0xff || func == gpio->func))
+ return 0;
+ }
+
+ /* DCB 2.2, fixed TVDAC GPIO data */
+ if ((table = dcb_table(dev)) && table[0] >= 0x22) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = table[-4] >> 4,
+ .log[0] = !!(table[-5] & 2),
+ .log[1] = !(table[-5] & 2),
+ };
+ return 0;
+ }
+ }
+
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = 4,
+ .log[0] = 0,
+ .log[1] = 1,
+ };
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ int dir = !!(gpio.log[state] & 0x02);
+ int out = !!(gpio.log[state] & 0x01);
+ ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ ret = nouveau_gpio_sense(dev, idx, gpio.line);
+ if (ret >= 0)
+ ret = (ret == (gpio.log[1] & 1));
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ if (idx == 0 && pgpio->irq_enable)
+ pgpio->irq_enable(dev, gpio.line, on);
+ else
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+struct gpio_isr {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ int idx;
+ struct gpio_func func;
+ void (*handler)(void *, int);
+ void *data;
+ bool inhibit;
+};
+
+static void
+nouveau_gpio_isr_bh(struct work_struct *work)
+{
+ struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
+ struct drm_device *dev = isr->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ unsigned long flags;
+ int state;
+
+ state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
+ if (state >= 0)
+ isr->handler(isr->data, state);
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ isr->inhibit = false;
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+}
+
+void
+nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+
+ if (idx != 0)
+ return;
+
+ spin_lock(&pgpio->lock);
+ list_for_each_entry(isr, &pgpio->isr, head) {
+ if (line_mask & (1 << isr->func.line)) {
+ if (isr->inhibit)
+ continue;
+ isr->inhibit = true;
+ schedule_work(&isr->work);
+ }
+ }
+ spin_unlock(&pgpio->lock);
+}
+
+int
+nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+ unsigned long flags;
+ int ret;
+
+ isr = kzalloc(sizeof(*isr), GFP_KERNEL);
+ if (!isr)
+ return -ENOMEM;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
+ if (ret) {
+ kfree(isr);
+ return ret;
+ }
+
+ INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
+ isr->dev = dev;
+ isr->handler = handler;
+ isr->data = data;
+ isr->idx = idx;
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_add(&isr->head, &pgpio->isr);
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+ return 0;
+}
+
+void
+nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr, *tmp;
+ struct gpio_func func;
+ unsigned long flags;
+ LIST_HEAD(tofree);
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &func);
+ if (ret == 0) {
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
+ if (memcmp(&isr->func, &func, sizeof(func)) ||
+ isr->idx != idx ||
+ isr->handler != handler || isr->data != data)
+ continue;
+ list_move(&isr->head, &tofree);
+ }
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+
+ list_for_each_entry_safe(isr, tmp, &tofree, head) {
+ flush_work_sync(&isr->work);
+ kfree(isr);
+ }
+ }
+}
+
+int
+nouveau_gpio_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ INIT_LIST_HEAD(&pgpio->isr);
+ spin_lock_init(&pgpio->lock);
+
+ return nouveau_gpio_init(dev);
+}
+
+void
+nouveau_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ nouveau_gpio_fini(dev);
+ BUG_ON(!list_empty(&pgpio->isr));
+}
+
+int
+nouveau_gpio_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ int ret = 0;
+
+ if (pgpio->init)
+ ret = pgpio->init(dev);
+
+ return ret;
+}
+
+void
+nouveau_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ if (pgpio->fini)
+ pgpio->fini(dev);
+}
+
+void
+nouveau_gpio_reset(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u8 *entry, version;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
+ u8 func = 0xff, line, defs, unk0, unk1;
+ if (version >= 0x41) {
+ defs = !!(entry[0] & 0x80);
+ line = entry[0] & 0x3f;
+ func = entry[1];
+ unk0 = entry[2];
+ unk1 = entry[3] & 0x1f;
+ } else
+ if (version >= 0x40) {
+ line = entry[0] & 0x1f;
+ func = entry[1];
+ defs = !!(entry[3] & 0x01);
+ unk0 = !!(entry[3] & 0x02);
+ unk1 = !!(entry[3] & 0x04);
+ } else {
+ break;
+ }
+
+ if (func == 0xff)
+ continue;
+
+ nouveau_gpio_func_set(dev, func, defs);
+
+ if (dev_priv->card_type >= NV_D0) {
+ nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line);
+ } else
+ if (dev_priv->card_type >= NV_50) {
+ static const u32 regs[] = { 0xe100, 0xe28c };
+ u32 val = (unk1 << 16) | unk0;
+ u32 reg = regs[line >> 4]; line &= 0x0f;
+
+ nv_mask(dev, reg, 0x00010001 << line, val << line);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_gpio.h b/drivers/gpu/drm/nouveau/nouveau_gpio.h
new file mode 100644
index 0000000..64c5cb0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_gpio.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+struct gpio_func {
+ u8 func;
+ u8 line;
+ u8 log[2];
+};
+
+/* nouveau_gpio.c */
+int nouveau_gpio_create(struct drm_device *);
+void nouveau_gpio_destroy(struct drm_device *);
+int nouveau_gpio_init(struct drm_device *);
+void nouveau_gpio_fini(struct drm_device *);
+void nouveau_gpio_reset(struct drm_device *);
+int nouveau_gpio_drive(struct drm_device *, int idx, int line,
+ int dir, int out);
+int nouveau_gpio_sense(struct drm_device *, int idx, int line);
+int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
+ struct gpio_func *);
+int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
+int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
+int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
+void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
+int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+
+static inline bool
+nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
+{
+ struct gpio_func func;
+ return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
+}
+
+static inline int
+nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
+{
+ return nouveau_gpio_set(dev, 0, tag, 0xff, state);
+}
+
+static inline int
+nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
+{
+ return nouveau_gpio_get(dev, 0, tag, 0xff);
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_hdmi.c b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
new file mode 100644
index 0000000..59ea1c1
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hdmi.c
@@ -0,0 +1,258 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_connector.h"
+#include "nouveau_encoder.h"
+#include "nouveau_crtc.h"
+
+static bool
+hdmi_sor(struct drm_encoder *encoder)
+{
+ struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ if (dev_priv->chipset < 0xa3)
+ return false;
+ return true;
+}
+
+static inline u32
+hdmi_base(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ if (!hdmi_sor(encoder))
+ return 0x616500 + (nv_crtc->index * 0x800);
+ return 0x61c500 + (nv_encoder->or * 0x800);
+}
+
+static void
+hdmi_wr32(struct drm_encoder *encoder, u32 reg, u32 val)
+{
+ nv_wr32(encoder->dev, hdmi_base(encoder) + reg, val);
+}
+
+static u32
+hdmi_rd32(struct drm_encoder *encoder, u32 reg)
+{
+ return nv_rd32(encoder->dev, hdmi_base(encoder) + reg);
+}
+
+static u32
+hdmi_mask(struct drm_encoder *encoder, u32 reg, u32 mask, u32 val)
+{
+ u32 tmp = hdmi_rd32(encoder, reg);
+ hdmi_wr32(encoder, reg, (tmp & ~mask) | val);
+ return tmp;
+}
+
+static void
+nouveau_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ u32 or = nv_encoder->or * 0x800;
+
+ if (hdmi_sor(encoder)) {
+ nv_mask(dev, 0x61c448 + or, 0x00000003, 0x00000000);
+ }
+}
+
+static void
+nouveau_audio_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ u32 or = nv_encoder->or * 0x800;
+ int i;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid)) {
+ nouveau_audio_disconnect(encoder);
+ return;
+ }
+
+ if (hdmi_sor(encoder)) {
+ nv_mask(dev, 0x61c448 + or, 0x00000001, 0x00000001);
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ if (nv_connector->base.eld[0]) {
+ u8 *eld = nv_connector->base.eld;
+ for (i = 0; i < eld[2] * 4; i++)
+ nv_wr32(dev, 0x61c440 + or, (i << 8) | eld[i]);
+ for (i = eld[2] * 4; i < 0x60; i++)
+ nv_wr32(dev, 0x61c440 + or, (i << 8) | 0x00);
+ nv_mask(dev, 0x61c448 + or, 0x00000002, 0x00000002);
+ }
+ }
+}
+
+static void
+nouveau_hdmi_infoframe(struct drm_encoder *encoder, u32 ctrl, u8 *frame)
+{
+ /* calculate checksum for the infoframe */
+ u8 sum = 0, i;
+ for (i = 0; i < frame[2]; i++)
+ sum += frame[i];
+ frame[3] = 256 - sum;
+
+ /* disable infoframe, and write header */
+ hdmi_mask(encoder, ctrl + 0x00, 0x00000001, 0x00000000);
+ hdmi_wr32(encoder, ctrl + 0x08, *(u32 *)frame & 0xffffff);
+
+ /* register scans tell me the audio infoframe has only one set of
+ * subpack regs, according to tegra (gee nvidia, it'd be nice if we
+ * could get those docs too!), the hdmi block pads out the rest of
+ * the packet on its own.
+ */
+ if (ctrl == 0x020)
+ frame[2] = 6;
+
+ /* write out checksum and data, weird weird 7 byte register pairs */
+ for (i = 0; i < frame[2] + 1; i += 7) {
+ u32 rsubpack = ctrl + 0x0c + ((i / 7) * 8);
+ u32 *subpack = (u32 *)&frame[3 + i];
+ hdmi_wr32(encoder, rsubpack + 0, subpack[0]);
+ hdmi_wr32(encoder, rsubpack + 4, subpack[1] & 0xffffff);
+ }
+
+ /* enable the infoframe */
+ hdmi_mask(encoder, ctrl, 0x00000001, 0x00000001);
+}
+
+static void
+nouveau_hdmi_video_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ const u8 Y = 0, A = 0, B = 0, S = 0, C = 0, M = 0, R = 0;
+ const u8 ITC = 0, EC = 0, Q = 0, SC = 0, VIC = 0, PR = 0;
+ const u8 bar_top = 0, bar_bottom = 0, bar_left = 0, bar_right = 0;
+ u8 frame[20];
+
+ frame[0x00] = 0x82; /* AVI infoframe */
+ frame[0x01] = 0x02; /* version */
+ frame[0x02] = 0x0d; /* length */
+ frame[0x03] = 0x00;
+ frame[0x04] = (Y << 5) | (A << 4) | (B << 2) | S;
+ frame[0x05] = (C << 6) | (M << 4) | R;
+ frame[0x06] = (ITC << 7) | (EC << 4) | (Q << 2) | SC;
+ frame[0x07] = VIC;
+ frame[0x08] = PR;
+ frame[0x09] = bar_top & 0xff;
+ frame[0x0a] = bar_top >> 8;
+ frame[0x0b] = bar_bottom & 0xff;
+ frame[0x0c] = bar_bottom >> 8;
+ frame[0x0d] = bar_left & 0xff;
+ frame[0x0e] = bar_left >> 8;
+ frame[0x0f] = bar_right & 0xff;
+ frame[0x10] = bar_right >> 8;
+ frame[0x11] = 0x00;
+ frame[0x12] = 0x00;
+ frame[0x13] = 0x00;
+
+ nouveau_hdmi_infoframe(encoder, 0x020, frame);
+}
+
+static void
+nouveau_hdmi_audio_infoframe(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ const u8 CT = 0x00, CC = 0x01, ceaSS = 0x00, SF = 0x00, FMT = 0x00;
+ const u8 CA = 0x00, DM_INH = 0, LSV = 0x00;
+ u8 frame[12];
+
+ frame[0x00] = 0x84; /* Audio infoframe */
+ frame[0x01] = 0x01; /* version */
+ frame[0x02] = 0x0a; /* length */
+ frame[0x03] = 0x00;
+ frame[0x04] = (CT << 4) | CC;
+ frame[0x05] = (SF << 2) | ceaSS;
+ frame[0x06] = FMT;
+ frame[0x07] = CA;
+ frame[0x08] = (DM_INH << 7) | (LSV << 3);
+ frame[0x09] = 0x00;
+ frame[0x0a] = 0x00;
+ frame[0x0b] = 0x00;
+
+ nouveau_hdmi_infoframe(encoder, 0x000, frame);
+}
+
+static void
+nouveau_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ nouveau_audio_disconnect(encoder);
+
+ /* disable audio and avi infoframes */
+ hdmi_mask(encoder, 0x000, 0x00000001, 0x00000000);
+ hdmi_mask(encoder, 0x020, 0x00000001, 0x00000000);
+
+ /* disable hdmi */
+ hdmi_mask(encoder, 0x0a4, 0x40000000, 0x00000000);
+}
+
+void
+nouveau_hdmi_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ u32 max_ac_packet, rekey;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!mode || !nv_connector || !nv_connector->edid ||
+ !drm_detect_hdmi_monitor(nv_connector->edid)) {
+ nouveau_hdmi_disconnect(encoder);
+ return;
+ }
+
+ nouveau_hdmi_video_infoframe(encoder, mode);
+ nouveau_hdmi_audio_infoframe(encoder, mode);
+
+ hdmi_mask(encoder, 0x0d0, 0x00070001, 0x00010001); /* SPARE, HW_CTS */
+ hdmi_mask(encoder, 0x068, 0x00010101, 0x00000000); /* ACR_CTRL, ?? */
+ hdmi_mask(encoder, 0x078, 0x80000000, 0x80000000); /* ACR_0441_ENABLE */
+
+ nv_mask(dev, 0x61733c, 0x00100000, 0x00100000); /* RESETF */
+ nv_mask(dev, 0x61733c, 0x10000000, 0x10000000); /* LOOKUP_EN */
+ nv_mask(dev, 0x61733c, 0x00100000, 0x00000000); /* !RESETF */
+
+ /* value matches nvidia binary driver, and tegra constant */
+ rekey = 56;
+
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ /* enable hdmi */
+ hdmi_mask(encoder, 0x0a4, 0x5f1f003f, 0x40000000 | /* enable */
+ 0x1f000000 | /* unknown */
+ max_ac_packet << 16 |
+ rekey);
+
+ nouveau_audio_mode_set(encoder, mode);
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwsq.h b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
new file mode 100644
index 0000000..6976875
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_hwsq.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_HWSQ_H__
+#define __NOUVEAU_HWSQ_H__
+
+struct hwsq_ucode {
+ u8 data[0x200];
+ union {
+ u8 *u08;
+ u16 *u16;
+ u32 *u32;
+ } ptr;
+ u16 len;
+
+ u32 reg;
+ u32 val;
+};
+
+static inline void
+hwsq_init(struct hwsq_ucode *hwsq)
+{
+ hwsq->ptr.u08 = hwsq->data;
+ hwsq->reg = 0xffffffff;
+ hwsq->val = 0xffffffff;
+}
+
+static inline void
+hwsq_fini(struct hwsq_ucode *hwsq)
+{
+ do {
+ *hwsq->ptr.u08++ = 0x7f;
+ hwsq->len = hwsq->ptr.u08 - hwsq->data;
+ } while (hwsq->len & 3);
+ hwsq->ptr.u08 = hwsq->data;
+}
+
+static inline void
+hwsq_usec(struct hwsq_ucode *hwsq, u8 usec)
+{
+ u32 shift = 0;
+ while (usec & ~3) {
+ usec >>= 2;
+ shift++;
+ }
+
+ *hwsq->ptr.u08++ = (shift << 2) | usec;
+}
+
+static inline void
+hwsq_setf(struct hwsq_ucode *hwsq, u8 flag, int val)
+{
+ flag += 0x80;
+ if (val >= 0)
+ flag += 0x20;
+ if (val >= 1)
+ flag += 0x20;
+ *hwsq->ptr.u08++ = flag;
+}
+
+static inline void
+hwsq_op5f(struct hwsq_ucode *hwsq, u8 v0, u8 v1)
+{
+ *hwsq->ptr.u08++ = 0x5f;
+ *hwsq->ptr.u08++ = v0;
+ *hwsq->ptr.u08++ = v1;
+}
+
+static inline void
+hwsq_wr32(struct hwsq_ucode *hwsq, u32 reg, u32 val)
+{
+ if (val != hwsq->val) {
+ if ((val & 0xffff0000) == (hwsq->val & 0xffff0000)) {
+ *hwsq->ptr.u08++ = 0x42;
+ *hwsq->ptr.u16++ = (val & 0x0000ffff);
+ } else {
+ *hwsq->ptr.u08++ = 0xe2;
+ *hwsq->ptr.u32++ = val;
+ }
+
+ hwsq->val = val;
+ }
+
+ if ((reg & 0xffff0000) == (hwsq->reg & 0xffff0000)) {
+ *hwsq->ptr.u08++ = 0x40;
+ *hwsq->ptr.u16++ = (reg & 0x0000ffff);
+ } else {
+ *hwsq->ptr.u08++ = 0xe0;
+ *hwsq->ptr.u32++ = reg;
+ }
+ hwsq->reg = reg;
+}
+
+#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index d39b220..820ae7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -29,262 +29,465 @@
#include "nouveau_i2c.h"
#include "nouveau_hw.h"
+#define T_TIMEOUT 2200000
+#define T_RISEFALL 1000
+#define T_HOLD 5000
+
static void
-nv04_i2c_setscl(void *data, int state)
+i2c_drive_scl(void *data, int state)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
- NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x01;
+ else port->state &= 0xfe;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
}
static void
-nv04_i2c_setsda(void *data, int state)
+i2c_drive_sda(void *data, int state)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (NVReadVgaCrtc(dev, 0, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
- NVWriteVgaCrtc(dev, 0, i2c->wr, val | 0x01);
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x02;
+ else port->state &= 0xfd;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
}
static int
-nv04_i2c_getscl(void *data)
+i2c_sense_scl(void *data)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 4);
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x01);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x10);
+ }
+ return 0;
}
static int
-nv04_i2c_getsda(void *data)
+i2c_sense_sda(void *data)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!(NVReadVgaCrtc(dev, 0, i2c->rd) & 8);
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x02);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x20);
+ }
+ return 0;
}
static void
-nv4e_i2c_setscl(void *data, int state)
+i2c_delay(struct nouveau_i2c_chan *port, u32 nsec)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
-
- val = (nv_rd32(dev, i2c->wr) & 0xd0) | (state ? 0x20 : 0);
- nv_wr32(dev, i2c->wr, val | 0x01);
+ udelay((nsec + 500) / 1000);
}
-static void
-nv4e_i2c_setsda(void *data, int state)
+static bool
+i2c_raise_scl(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
- uint8_t val;
+ u32 timeout = T_TIMEOUT / T_RISEFALL;
+
+ i2c_drive_scl(port, 1);
+ do {
+ i2c_delay(port, T_RISEFALL);
+ } while (!i2c_sense_scl(port) && --timeout);
- val = (nv_rd32(dev, i2c->wr) & 0xe0) | (state ? 0x10 : 0);
- nv_wr32(dev, i2c->wr, val | 0x01);
+ return timeout != 0;
}
static int
-nv4e_i2c_getscl(void *data)
+i2c_start(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ int ret = 0;
+
+ port->state = i2c_sense_scl(port);
+ port->state |= i2c_sense_sda(port) << 1;
+ if (port->state != 3) {
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 1);
+ if (!i2c_raise_scl(port))
+ ret = -EBUSY;
+ }
- return !!((nv_rd32(dev, i2c->rd) >> 16) & 4);
+ i2c_drive_sda(port, 0);
+ i2c_delay(port, T_HOLD);
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return ret;
}
-static int
-nv4e_i2c_getsda(void *data)
+static void
+i2c_stop(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
-
- return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 0);
+ i2c_delay(port, T_RISEFALL);
+
+ i2c_drive_scl(port, 1);
+ i2c_delay(port, T_HOLD);
+ i2c_drive_sda(port, 1);
+ i2c_delay(port, T_HOLD);
}
-static const uint32_t nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
-};
-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
-
static int
-nv50_i2c_getscl(void *data)
+i2c_bitw(struct nouveau_i2c_chan *port, int sda)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ i2c_drive_sda(port, sda);
+ i2c_delay(port, T_RISEFALL);
- return !!(nv_rd32(dev, i2c->rd) & 1);
-}
+ if (!i2c_raise_scl(port))
+ return -ETIMEDOUT;
+ i2c_delay(port, T_HOLD);
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return 0;
+}
static int
-nv50_i2c_getsda(void *data)
+i2c_bitr(struct nouveau_i2c_chan *port)
{
- struct nouveau_i2c_chan *i2c = data;
- struct drm_device *dev = i2c->dev;
+ int sda;
+
+ i2c_drive_sda(port, 1);
+ i2c_delay(port, T_RISEFALL);
- return !!(nv_rd32(dev, i2c->rd) & 2);
+ if (!i2c_raise_scl(port))
+ return -ETIMEDOUT;
+ i2c_delay(port, T_HOLD);
+
+ sda = i2c_sense_sda(port);
+
+ i2c_drive_scl(port, 0);
+ i2c_delay(port, T_HOLD);
+ return sda;
}
-static void
-nv50_i2c_setscl(void *data, int state)
+static int
+i2c_get_byte(struct nouveau_i2c_chan *port, u8 *byte, bool last)
{
- struct nouveau_i2c_chan *i2c = data;
+ int i, bit;
+
+ *byte = 0;
+ for (i = 7; i >= 0; i--) {
+ bit = i2c_bitr(port);
+ if (bit < 0)
+ return bit;
+ *byte |= bit << i;
+ }
- nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+ return i2c_bitw(port, last ? 1 : 0);
}
-static void
-nv50_i2c_setsda(void *data, int state)
+static int
+i2c_put_byte(struct nouveau_i2c_chan *port, u8 byte)
{
- struct nouveau_i2c_chan *i2c = data;
+ int i, ret;
+ for (i = 7; i >= 0; i--) {
+ ret = i2c_bitw(port, !!(byte & (1 << i)));
+ if (ret < 0)
+ return ret;
+ }
- nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
- i2c->data = state;
+ ret = i2c_bitr(port);
+ if (ret == 1) /* nack */
+ ret = -EIO;
+ return ret;
}
static int
-nvd0_i2c_getscl(void *data)
+i2c_addr(struct nouveau_i2c_chan *port, struct i2c_msg *msg)
{
- struct nouveau_i2c_chan *i2c = data;
- return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+ u32 addr = msg->addr << 1;
+ if (msg->flags & I2C_M_RD)
+ addr |= 1;
+ return i2c_put_byte(port, addr);
}
static int
-nvd0_i2c_getsda(void *data)
+i2c_bit_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
- struct nouveau_i2c_chan *i2c = data;
- return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+ struct nouveau_i2c_chan *port = (struct nouveau_i2c_chan *)adap;
+ struct i2c_msg *msg = msgs;
+ int ret = 0, mcnt = num;
+
+ while (!ret && mcnt--) {
+ u8 remaining = msg->len;
+ u8 *ptr = msg->buf;
+
+ ret = i2c_start(port);
+ if (ret == 0)
+ ret = i2c_addr(port, msg);
+
+ if (msg->flags & I2C_M_RD) {
+ while (!ret && remaining--)
+ ret = i2c_get_byte(port, ptr++, !remaining);
+ } else {
+ while (!ret && remaining--)
+ ret = i2c_put_byte(port, *ptr++);
+ }
+
+ msg++;
+ }
+
+ i2c_stop(port);
+ return (ret < 0) ? ret : num;
}
-int
-nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
+static u32
+i2c_bit_func(struct i2c_adapter *adap)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *i2c;
- int ret;
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+const struct i2c_algorithm i2c_bit_algo = {
+ .master_xfer = i2c_bit_xfer,
+ .functionality = i2c_bit_func
+};
+
+static const uint32_t nv50_i2c_port[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
- if (entry->chan)
- return -EEXIST;
+static u8 *
+i2c_table(struct drm_device *dev, u8 *version)
+{
+ u8 *dcb = dcb_table(dev), *i2c = NULL;
+ if (dcb) {
+ if (dcb[0] >= 0x15)
+ i2c = ROMPTR(dev, dcb[2]);
+ if (dcb[0] >= 0x30)
+ i2c = ROMPTR(dev, dcb[4]);
+ }
- if (dev_priv->card_type >= NV_50 &&
- dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
- NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
- return -EINVAL;
+ /* early revisions had no version number, use dcb version */
+ if (i2c) {
+ *version = dcb[0];
+ if (*version >= 0x30)
+ *version = i2c[0];
}
- i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
- if (i2c == NULL)
- return -ENOMEM;
-
- switch (entry->port_type) {
- case 0:
- i2c->bit.setsda = nv04_i2c_setsda;
- i2c->bit.setscl = nv04_i2c_setscl;
- i2c->bit.getsda = nv04_i2c_getsda;
- i2c->bit.getscl = nv04_i2c_getscl;
- i2c->rd = entry->read;
- i2c->wr = entry->write;
- break;
- case 4:
- i2c->bit.setsda = nv4e_i2c_setsda;
- i2c->bit.setscl = nv4e_i2c_setscl;
- i2c->bit.getsda = nv4e_i2c_getsda;
- i2c->bit.getscl = nv4e_i2c_getscl;
- i2c->rd = 0x600800 + entry->read;
- i2c->wr = 0x600800 + entry->write;
- break;
- case 5:
- i2c->bit.setsda = nv50_i2c_setsda;
- i2c->bit.setscl = nv50_i2c_setscl;
- if (dev_priv->card_type < NV_D0) {
- i2c->bit.getsda = nv50_i2c_getsda;
- i2c->bit.getscl = nv50_i2c_getscl;
- i2c->rd = nv50_i2c_port[entry->read];
- i2c->wr = i2c->rd;
- } else {
- i2c->bit.getsda = nvd0_i2c_getsda;
- i2c->bit.getscl = nvd0_i2c_getscl;
- i2c->rd = 0x00d014 + (entry->read * 0x20);
- i2c->wr = i2c->rd;
- }
- break;
- case 6:
- i2c->rd = entry->read;
- i2c->wr = entry->write;
- break;
- default:
- NV_ERROR(dev, "DCB I2C port type %d unknown\n",
- entry->port_type);
- kfree(i2c);
- return -EINVAL;
+ return i2c;
+}
+
+int
+nouveau_i2c_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct nouveau_i2c_chan *port;
+ u8 *i2c, *entry, legacy[2][4] = {};
+ u8 version, entries, recordlen;
+ int ret, i;
+
+ INIT_LIST_HEAD(&dev_priv->i2c_ports);
+
+ i2c = i2c_table(dev, &version);
+ if (!i2c) {
+ u8 *bmp = &bios->data[bios->offset];
+ if (bios->type != NVBIOS_BMP)
+ return -ENODEV;
+
+ legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
+ legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
+ legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
+ legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
+
+ /* BMP (from v4.0) has i2c info in the structure, it's in a
+ * fixed location on earlier VBIOS
+ */
+ if (bmp[5] < 4)
+ i2c = &bios->data[0x48];
+ else
+ i2c = &bmp[0x36];
+
+ if (i2c[4]) legacy[0][0] = i2c[4];
+ if (i2c[5]) legacy[0][1] = i2c[5];
+ if (i2c[6]) legacy[1][0] = i2c[6];
+ if (i2c[7]) legacy[1][1] = i2c[7];
}
- snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
- "nouveau-%s-%d", pci_name(dev->pdev), index);
- i2c->adapter.owner = THIS_MODULE;
- i2c->adapter.dev.parent = &dev->pdev->dev;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
-
- if (entry->port_type < 6) {
- i2c->adapter.algo_data = &i2c->bit;
- i2c->bit.udelay = 40;
- i2c->bit.timeout = usecs_to_jiffies(5000);
- i2c->bit.data = i2c;
- ret = i2c_bit_add_bus(&i2c->adapter);
+ if (i2c && version >= 0x30) {
+ entry = i2c[1] + i2c;
+ entries = i2c[2];
+ recordlen = i2c[3];
+ } else
+ if (i2c) {
+ entry = i2c;
+ entries = 16;
+ recordlen = 4;
} else {
- i2c->adapter.algo = &nouveau_dp_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
+ entry = legacy[0];
+ entries = 2;
+ recordlen = 4;
}
- if (ret) {
- NV_ERROR(dev, "Failed to register i2c %d\n", index);
- kfree(i2c);
- return ret;
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (port == NULL) {
+ nouveau_i2c_fini(dev);
+ return -ENOMEM;
+ }
+
+ port->type = entry[3];
+ if (version < 0x30) {
+ port->type &= 0x07;
+ if (port->type == 0x07)
+ port->type = 0xff;
+ }
+
+ if (port->type == 0xff) {
+ kfree(port);
+ continue;
+ }
+
+ switch (port->type) {
+ case 0: /* NV04:NV50 */
+ port->drive = entry[0];
+ port->sense = entry[1];
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 4: /* NV4E */
+ port->drive = 0x600800 + entry[1];
+ port->sense = port->drive;
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 5: /* NV50- */
+ port->drive = entry[0] & 0x0f;
+ if (dev_priv->card_type < NV_D0) {
+ if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
+ break;
+ port->drive = nv50_i2c_port[port->drive];
+ port->sense = port->drive;
+ } else {
+ port->drive = 0x00d014 + (port->drive * 0x20);
+ port->sense = port->drive;
+ }
+ port->adapter.algo = &i2c_bit_algo;
+ break;
+ case 6: /* NV50- DP AUX */
+ port->drive = entry[0];
+ port->sense = port->drive;
+ port->adapter.algo = &nouveau_dp_i2c_algo;
+ break;
+ default:
+ break;
+ }
+
+ if (!port->adapter.algo) {
+ NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
+ i, port->type, port->drive, port->sense);
+ kfree(port);
+ continue;
+ }
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "nouveau-%s-%d", pci_name(dev->pdev), i);
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.parent = &dev->pdev->dev;
+ port->dev = dev;
+ port->index = i;
+ port->dcb = ROM32(entry[0]);
+ i2c_set_adapdata(&port->adapter, i2c);
+
+ ret = i2c_add_adapter(&port->adapter);
+ if (ret) {
+ NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
+ kfree(port);
+ continue;
+ }
+
+ list_add_tail(&port->head, &dev_priv->i2c_ports);
}
- entry->chan = i2c;
return 0;
}
void
-nouveau_i2c_fini(struct drm_device *dev, struct dcb_i2c_entry *entry)
+nouveau_i2c_fini(struct drm_device *dev)
{
- if (!entry->chan)
- return;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *port, *tmp;
- i2c_del_adapter(&entry->chan->adapter);
- kfree(entry->chan);
- entry->chan = NULL;
+ list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
+ i2c_del_adapter(&port->adapter);
+ kfree(port);
+ }
}
struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, int index)
+nouveau_i2c_find(struct drm_device *dev, u8 index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_i2c_entry *i2c = &dev_priv->vbios.dcb.i2c[index];
+ struct nouveau_i2c_chan *port;
+
+ if (index == NV_I2C_DEFAULT(0) ||
+ index == NV_I2C_DEFAULT(1)) {
+ u8 version, *i2c = i2c_table(dev, &version);
+ if (i2c && version >= 0x30) {
+ if (index == NV_I2C_DEFAULT(0))
+ index = (i2c[4] & 0x0f);
+ else
+ index = (i2c[4] & 0xf0) >> 4;
+ } else {
+ index = 2;
+ }
+ }
- if (index >= DCB_MAX_NUM_I2C_ENTRIES)
- return NULL;
+ list_for_each_entry(port, &dev_priv->i2c_ports, head) {
+ if (port->index == index)
+ break;
+ }
- if (dev_priv->card_type >= NV_50 && (i2c->entry & 0x00000100)) {
- uint32_t reg = 0xe500, val;
+ if (&port->head == &dev_priv->i2c_ports)
+ return NULL;
- if (i2c->port_type == 6) {
- reg += i2c->read * 0x50;
+ if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
+ u32 reg = 0x00e500, val;
+ if (port->type == 6) {
+ reg += port->drive * 0x50;
val = 0x2002;
} else {
- reg += ((i2c->entry & 0x1e00) >> 9) * 0x50;
+ reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
val = 0xe001;
}
@@ -294,9 +497,7 @@ nouveau_i2c_find(struct drm_device *dev, int index)
nv_mask(dev, reg + 0x00, 0x0000f003, val);
}
- if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))
- return NULL;
- return i2c->chan;
+ return port;
}
bool
@@ -331,9 +532,13 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
int i;
- NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
+ if (!i2c) {
+ NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
+ return -ENODEV;
+ }
- for (i = 0; i2c && info[i].addr; i++) {
+ NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
+ for (i = 0; info[i].addr; i++) {
if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
(!match || match(i2c, &info[i]))) {
NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
@@ -342,6 +547,5 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
}
NV_DEBUG(dev, "No devices found.\n");
-
return -ENODEV;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.h b/drivers/gpu/drm/nouveau/nouveau_i2c.h
index 422b62f..4d2e4e9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.h
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.h
@@ -27,20 +27,25 @@
#include <linux/i2c-algo-bit.h>
#include "drm_dp_helper.h"
-struct dcb_i2c_entry;
+#define NV_I2C_PORT(n) (0x00 + (n))
+#define NV_I2C_PORT_NUM 0x10
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
struct nouveau_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
- struct i2c_algo_bit_data bit;
- unsigned rd;
- unsigned wr;
- unsigned data;
+ struct list_head head;
+ u8 index;
+ u8 type;
+ u32 dcb;
+ u32 drive;
+ u32 sense;
+ u32 state;
};
-int nouveau_i2c_init(struct drm_device *, struct dcb_i2c_entry *, int index);
-void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
+int nouveau_i2c_init(struct drm_device *);
+void nouveau_i2c_fini(struct drm_device *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
int nouveau_i2c_identify(struct drm_device *dev, const char *what,
struct i2c_board_info *info,
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 36bec48..c3a5745 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -407,6 +407,12 @@ nouveau_mem_vram_init(struct drm_device *dev)
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
if (ret)
return ret;
+ ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
+ if (ret) {
+ /* Reset to default value. */
+ pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
+ }
+
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
@@ -638,10 +644,10 @@ nouveau_mem_timing_init(struct drm_device *dev)
return;
if (P.version == 1)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[4]);
else
if (P.version == 2)
- hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
+ hdr = (struct nouveau_pm_tbl_header *) ROMPTR(dev, P.data[8]);
else {
NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_mxm.c b/drivers/gpu/drm/nouveau/nouveau_mxm.c
new file mode 100644
index 0000000..e5a64f0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nouveau_mxm.c
@@ -0,0 +1,686 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/acpi.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define MXM_DBG(dev, fmt, args...) NV_DEBUG((dev), "MXM: " fmt, ##args)
+#define MXM_MSG(dev, fmt, args...) NV_INFO((dev), "MXM: " fmt, ##args)
+
+static u8 *
+mxms_data(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ return dev_priv->mxms;
+
+}
+
+static u16
+mxms_version(struct drm_device *dev)
+{
+ u8 *mxms = mxms_data(dev);
+ u16 version = (mxms[4] << 8) | mxms[5];
+ switch (version ) {
+ case 0x0200:
+ case 0x0201:
+ case 0x0300:
+ return version;
+ default:
+ break;
+ }
+
+ MXM_DBG(dev, "unknown version %d.%d\n", mxms[4], mxms[5]);
+ return 0x0000;
+}
+
+static u16
+mxms_headerlen(struct drm_device *dev)
+{
+ return 8;
+}
+
+static u16
+mxms_structlen(struct drm_device *dev)
+{
+ return *(u16 *)&mxms_data(dev)[6];
+}
+
+static bool
+mxms_checksum(struct drm_device *dev)
+{
+ u16 size = mxms_headerlen(dev) + mxms_structlen(dev);
+ u8 *mxms = mxms_data(dev), sum = 0;
+ while (size--)
+ sum += *mxms++;
+ if (sum) {
+ MXM_DBG(dev, "checksum invalid\n");
+ return false;
+ }
+ return true;
+}
+
+static bool
+mxms_valid(struct drm_device *dev)
+{
+ u8 *mxms = mxms_data(dev);
+ if (*(u32 *)mxms != 0x5f4d584d) {
+ MXM_DBG(dev, "signature invalid\n");
+ return false;
+ }
+
+ if (!mxms_version(dev) || !mxms_checksum(dev))
+ return false;
+
+ return true;
+}
+
+static bool
+mxms_foreach(struct drm_device *dev, u8 types,
+ bool (*exec)(struct drm_device *, u8 *, void *), void *info)
+{
+ u8 *mxms = mxms_data(dev);
+ u8 *desc = mxms + mxms_headerlen(dev);
+ u8 *fini = desc + mxms_structlen(dev) - 1;
+ while (desc < fini) {
+ u8 type = desc[0] & 0x0f;
+ u8 headerlen = 0;
+ u8 recordlen = 0;
+ u8 entries = 0;
+
+ switch (type) {
+ case 0: /* Output Device Structure */
+ if (mxms_version(dev) >= 0x0300)
+ headerlen = 8;
+ else
+ headerlen = 6;
+ break;
+ case 1: /* System Cooling Capability Structure */
+ case 2: /* Thermal Structure */
+ case 3: /* Input Power Structure */
+ headerlen = 4;
+ break;
+ case 4: /* GPIO Device Structure */
+ headerlen = 4;
+ recordlen = 2;
+ entries = (ROM32(desc[0]) & 0x01f00000) >> 20;
+ break;
+ case 5: /* Vendor Specific Structure */
+ headerlen = 8;
+ break;
+ case 6: /* Backlight Control Structure */
+ if (mxms_version(dev) >= 0x0300) {
+ headerlen = 4;
+ recordlen = 8;
+ entries = (desc[1] & 0xf0) >> 4;
+ } else {
+ headerlen = 8;
+ }
+ break;
+ case 7: /* Fan Control Structure */
+ headerlen = 8;
+ recordlen = 4;
+ entries = desc[1] & 0x07;
+ break;
+ default:
+ MXM_DBG(dev, "unknown descriptor type %d\n", type);
+ return false;
+ }
+
+ if ((drm_debug & DRM_UT_DRIVER) && (exec == NULL)) {
+ static const char * mxms_desc_name[] = {
+ "ODS", "SCCS", "TS", "IPS",
+ "GSD", "VSS", "BCS", "FCS",
+ };
+ u8 *dump = desc;
+ int i, j;
+
+ MXM_DBG(dev, "%4s: ", mxms_desc_name[type]);
+ for (j = headerlen - 1; j >= 0; j--)
+ printk("%02x", dump[j]);
+ printk("\n");
+ dump += headerlen;
+
+ for (i = 0; i < entries; i++, dump += recordlen) {
+ MXM_DBG(dev, " ");
+ for (j = recordlen - 1; j >= 0; j--)
+ printk("%02x", dump[j]);
+ printk("\n");
+ }
+ }
+
+ if (types & (1 << type)) {
+ if (!exec(dev, desc, info))
+ return false;
+ }
+
+ desc += headerlen + (entries * recordlen);
+ }
+
+ return true;
+}
+
+static u8 *
+mxm_table(struct drm_device *dev, u8 *size)
+{
+ struct bit_entry x;
+
+ if (bit_table(dev, 'x', &x)) {
+ MXM_DBG(dev, "BIT 'x' table not present\n");
+ return NULL;
+ }
+
+ if (x.version != 1 || x.length < 3) {
+ MXM_MSG(dev, "BIT x table %d/%d unknown\n",
+ x.version, x.length);
+ return NULL;
+ }
+
+ *size = x.length;
+ return x.data;
+}
+
+/* These map MXM v2.x digital connection values to the appropriate SOR/link,
+ * hopefully they're correct for all boards within the same chipset...
+ *
+ * MXM v3.x VBIOS are nicer and provide pointers to these tables.
+ */
+static u8 nv84_sor_map[16] = {
+ 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv92_sor_map[16] = {
+ 0x00, 0x12, 0x22, 0x11, 0x32, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv94_sor_map[16] = {
+ 0x00, 0x14, 0x24, 0x11, 0x34, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv96_sor_map[16] = {
+ 0x00, 0x14, 0x24, 0x00, 0x34, 0x00, 0x11, 0x31,
+ 0x11, 0x31, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8 nv98_sor_map[16] = {
+ 0x00, 0x14, 0x12, 0x11, 0x00, 0x31, 0x11, 0x31,
+ 0x11, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+static u8
+mxm_sor_map(struct drm_device *dev, u8 conn)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u8 len, *mxm = mxm_table(dev, &len);
+ if (mxm && len >= 6) {
+ u8 *map = ROMPTR(dev, mxm[4]);
+ if (map) {
+ if (map[0] == 0x10) {
+ if (conn < map[3])
+ return map[map[1] + conn];
+ return 0x00;
+ }
+
+ MXM_MSG(dev, "unknown sor map 0x%02x\n", map[0]);
+ }
+ }
+
+ if (dev_priv->chipset == 0x84 || dev_priv->chipset == 0x86)
+ return nv84_sor_map[conn];
+ if (dev_priv->chipset == 0x92)
+ return nv92_sor_map[conn];
+ if (dev_priv->chipset == 0x94)
+ return nv94_sor_map[conn];
+ if (dev_priv->chipset == 0x96)
+ return nv96_sor_map[conn];
+ if (dev_priv->chipset == 0x98)
+ return nv98_sor_map[conn];
+
+ MXM_MSG(dev, "missing sor map\n");
+ return 0x00;
+}
+
+static u8
+mxm_ddc_map(struct drm_device *dev, u8 port)
+{
+ u8 len, *mxm = mxm_table(dev, &len);
+ if (mxm && len >= 8) {
+ u8 *map = ROMPTR(dev, mxm[6]);
+ if (map) {
+ if (map[0] == 0x10) {
+ if (port < map[3])
+ return map[map[1] + port];
+ return 0x00;
+ }
+
+ MXM_MSG(dev, "unknown ddc map 0x%02x\n", map[0]);
+ }
+ }
+
+ /* v2.x: directly write port as dcb i2cidx */
+ return (port << 4) | port;
+}
+
+struct mxms_odev {
+ u8 outp_type;
+ u8 conn_type;
+ u8 ddc_port;
+ u8 dig_conn;
+};
+
+static void
+mxms_output_device(struct drm_device *dev, u8 *pdata, struct mxms_odev *desc)
+{
+ u64 data = ROM32(pdata[0]);
+ if (mxms_version(dev) >= 0x0300)
+ data |= (u64)ROM16(pdata[4]) << 32;
+
+ desc->outp_type = (data & 0x00000000000000f0ULL) >> 4;
+ desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8;
+ desc->conn_type = (data & 0x000000000001f000ULL) >> 12;
+ desc->dig_conn = (data & 0x0000000000780000ULL) >> 19;
+}
+
+struct context {
+ u32 *outp;
+ struct mxms_odev desc;
+};
+
+static bool
+mxm_match_tmds_partner(struct drm_device *dev, u8 *data, void *info)
+{
+ struct context *ctx = info;
+ struct mxms_odev desc;
+
+ mxms_output_device(dev, data, &desc);
+ if (desc.outp_type == 2 &&
+ desc.dig_conn == ctx->desc.dig_conn)
+ return false;
+ return true;
+}
+
+static bool
+mxm_match_dcb(struct drm_device *dev, u8 *data, void *info)
+{
+ struct context *ctx = info;
+ u64 desc = *(u64 *)data;
+
+ mxms_output_device(dev, data, &ctx->desc);
+
+ /* match dcb encoder type to mxm-ods device type */
+ if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type)
+ return true;
+
+ /* digital output, have some extra stuff to match here, there's a
+ * table in the vbios that provides a mapping from the mxm digital
+ * connection enum values to SOR/link
+ */
+ if ((desc & 0x00000000000000f0) >= 0x20) {
+ /* check against sor index */
+ u8 link = mxm_sor_map(dev, ctx->desc.dig_conn);
+ if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24)
+ return true;
+
+ /* check dcb entry has a compatible link field */
+ link = (link & 0x30) >> 4;
+ if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link)
+ return true;
+ }
+
+ /* mark this descriptor accounted for by setting invalid device type,
+ * except of course some manufactures don't follow specs properly and
+ * we need to avoid killing off the TMDS function on DP connectors
+ * if MXM-SIS is missing an entry for it.
+ */
+ data[0] &= ~0xf0;
+ if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 &&
+ mxms_foreach(dev, 0x01, mxm_match_tmds_partner, ctx)) {
+ data[0] |= 0x20; /* modify descriptor to match TMDS now */
+ } else {
+ data[0] |= 0xf0;
+ }
+
+ return false;
+}
+
+static int
+mxm_dcb_sanitise_entry(struct drm_device *dev, void *data, int idx, u8 *dcbe)
+{
+ struct context ctx = { .outp = (u32 *)dcbe };
+ u8 type, i2cidx, link;
+ u8 *conn;
+
+ /* look for an output device structure that matches this dcb entry.
+ * if one isn't found, disable it.
+ */
+ if (mxms_foreach(dev, 0x01, mxm_match_dcb, &ctx)) {
+ MXM_DBG(dev, "disable %d: 0x%08x 0x%08x\n",
+ idx, ctx.outp[0], ctx.outp[1]);
+ ctx.outp[0] |= 0x0000000f;
+ return 0;
+ }
+
+ /* modify the output's ddc/aux port, there's a pointer to a table
+ * with the mapping from mxm ddc/aux port to dcb i2c_index in the
+ * vbios mxm table
+ */
+ i2cidx = mxm_ddc_map(dev, ctx.desc.ddc_port);
+ if ((ctx.outp[0] & 0x0000000f) != OUTPUT_DP)
+ i2cidx = (i2cidx & 0x0f) << 4;
+ else
+ i2cidx = (i2cidx & 0xf0);
+
+ if (i2cidx != 0xf0) {
+ ctx.outp[0] &= ~0x000000f0;
+ ctx.outp[0] |= i2cidx;
+ }
+
+ /* override dcb sorconf.link, based on what mxm data says */
+ switch (ctx.desc.outp_type) {
+ case 0x00: /* Analog CRT */
+ case 0x01: /* Analog TV/HDTV */
+ break;
+ default:
+ link = mxm_sor_map(dev, ctx.desc.dig_conn) & 0x30;
+ ctx.outp[1] &= ~0x00000030;
+ ctx.outp[1] |= link;
+ break;
+ }
+
+ /* we may need to fixup various other vbios tables based on what
+ * the descriptor says the connector type should be.
+ *
+ * in a lot of cases, the vbios tables will claim DVI-I is possible,
+ * and the mxm data says the connector is really HDMI. another
+ * common example is DP->eDP.
+ */
+ conn = dcb_conn(dev, (ctx.outp[0] & 0x0000f000) >> 12);
+ type = conn[0];
+ switch (ctx.desc.conn_type) {
+ case 0x01: /* LVDS */
+ ctx.outp[1] |= 0x00000004; /* use_power_scripts */
+ /* XXX: modify default link width in LVDS table */
+ break;
+ case 0x02: /* HDMI */
+ type = DCB_CONNECTOR_HDMI_1;
+ break;
+ case 0x03: /* DVI-D */
+ type = DCB_CONNECTOR_DVI_D;
+ break;
+ case 0x0e: /* eDP, falls through to DPint */
+ ctx.outp[1] |= 0x00010000;
+ case 0x07: /* DP internal, wtf is this?? HP8670w */
+ ctx.outp[1] |= 0x00000004; /* use_power_scripts? */
+ type = DCB_CONNECTOR_eDP;
+ break;
+ default:
+ break;
+ }
+
+ if (mxms_version(dev) >= 0x0300)
+ conn[0] = type;
+
+ return 0;
+}
+
+static bool
+mxm_show_unmatched(struct drm_device *dev, u8 *data, void *info)
+{
+ u64 desc = *(u64 *)data;
+ if ((desc & 0xf0) != 0xf0)
+ MXM_MSG(dev, "unmatched output device 0x%016llx\n", desc);
+ return true;
+}
+
+static void
+mxm_dcb_sanitise(struct drm_device *dev)
+{
+ u8 *dcb = dcb_table(dev);
+ if (!dcb || dcb[0] != 0x40) {
+ MXM_DBG(dev, "unsupported DCB version\n");
+ return;
+ }
+
+ dcb_outp_foreach(dev, NULL, mxm_dcb_sanitise_entry);
+ mxms_foreach(dev, 0x01, mxm_show_unmatched, NULL);
+}
+
+static bool
+mxm_shadow_rom_fetch(struct nouveau_i2c_chan *i2c, u8 addr,
+ u8 offset, u8 size, u8 *data)
+{
+ struct i2c_msg msgs[] = {
+ { .addr = addr, .flags = 0, .len = 1, .buf = &offset },
+ { .addr = addr, .flags = I2C_M_RD, .len = size, .buf = data, },
+ };
+
+ return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+static bool
+mxm_shadow_rom(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *i2c = NULL;
+ u8 i2cidx, mxms[6], addr, size;
+
+ i2cidx = mxm_ddc_map(dev, 1 /* LVDS_DDC */) & 0x0f;
+ if (i2cidx < 0x0f)
+ i2c = nouveau_i2c_find(dev, i2cidx);
+ if (!i2c)
+ return false;
+
+ addr = 0x54;
+ if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms)) {
+ addr = 0x56;
+ if (!mxm_shadow_rom_fetch(i2c, addr, 0, 6, mxms))
+ return false;
+ }
+
+ dev_priv->mxms = mxms;
+ size = mxms_headerlen(dev) + mxms_structlen(dev);
+ dev_priv->mxms = kmalloc(size, GFP_KERNEL);
+
+ if (dev_priv->mxms &&
+ mxm_shadow_rom_fetch(i2c, addr, 0, size, dev_priv->mxms))
+ return true;
+
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+ return false;
+}
+
+#if defined(CONFIG_ACPI)
+static bool
+mxm_shadow_dsm(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ static char muid[] = {
+ 0x00, 0xA4, 0x04, 0x40, 0x7D, 0x91, 0xF2, 0x4C,
+ 0xB8, 0x9C, 0x79, 0xB6, 0x2F, 0xD5, 0x56, 0x65
+ };
+ u32 mxms_args[] = { 0x00000000 };
+ union acpi_object args[4] = {
+ /* _DSM MUID */
+ { .buffer.type = 3,
+ .buffer.length = sizeof(muid),
+ .buffer.pointer = muid,
+ },
+ /* spec says this can be zero to mean "highest revision", but
+ * of course there's at least one bios out there which fails
+ * unless you pass in exactly the version it supports..
+ */
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = (version & 0xf0) << 4 | (version & 0x0f),
+ },
+ /* MXMS function */
+ { .integer.type = ACPI_TYPE_INTEGER,
+ .integer.value = 0x00000010,
+ },
+ /* Pointer to MXMS arguments */
+ { .buffer.type = ACPI_TYPE_BUFFER,
+ .buffer.length = sizeof(mxms_args),
+ .buffer.pointer = (char *)mxms_args,
+ },
+ };
+ struct acpi_object_list list = { ARRAY_SIZE(args), args };
+ struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_handle handle;
+ int ret;
+
+ handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
+ if (!handle)
+ return false;
+
+ ret = acpi_evaluate_object(handle, "_DSM", &list, &retn);
+ if (ret) {
+ MXM_DBG(dev, "DSM MXMS failed: %d\n", ret);
+ return false;
+ }
+
+ obj = retn.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ dev_priv->mxms = kmemdup(obj->buffer.pointer,
+ obj->buffer.length, GFP_KERNEL);
+ } else
+ if (obj->type == ACPI_TYPE_INTEGER) {
+ MXM_DBG(dev, "DSM MXMS returned 0x%llx\n", obj->integer.value);
+ }
+
+ kfree(obj);
+ return dev_priv->mxms != NULL;
+}
+#endif
+
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+
+#define WMI_WMMX_GUID "F6CB5C3C-9CAE-4EBD-B577-931EA32A2CC0"
+
+static bool
+mxm_shadow_wmi(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mxms_args[] = { 0x534D584D /* MXMS */, version, 0 };
+ struct acpi_buffer args = { sizeof(mxms_args), mxms_args };
+ struct acpi_buffer retn = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+
+ if (!wmi_has_guid(WMI_WMMX_GUID))
+ return false;
+
+ status = wmi_evaluate_method(WMI_WMMX_GUID, 0, 0, &args, &retn);
+ if (ACPI_FAILURE(status)) {
+ MXM_DBG(dev, "WMMX MXMS returned %d\n", status);
+ return false;
+ }
+
+ obj = retn.pointer;
+ if (obj->type == ACPI_TYPE_BUFFER) {
+ dev_priv->mxms = kmemdup(obj->buffer.pointer,
+ obj->buffer.length, GFP_KERNEL);
+ }
+
+ kfree(obj);
+ return dev_priv->mxms != NULL;
+}
+#endif
+
+struct mxm_shadow_h {
+ const char *name;
+ bool (*exec)(struct drm_device *, u8 version);
+} _mxm_shadow[] = {
+ { "ROM", mxm_shadow_rom },
+#if defined(CONFIG_ACPI)
+ { "DSM", mxm_shadow_dsm },
+#endif
+#if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE)
+ { "WMI", mxm_shadow_wmi },
+#endif
+ {}
+};
+
+static int
+mxm_shadow(struct drm_device *dev, u8 version)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct mxm_shadow_h *shadow = _mxm_shadow;
+ do {
+ MXM_DBG(dev, "checking %s\n", shadow->name);
+ if (shadow->exec(dev, version)) {
+ if (mxms_valid(dev))
+ return 0;
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+ }
+ } while ((++shadow)->name);
+ return -ENOENT;
+}
+
+int
+nouveau_mxm_init(struct drm_device *dev)
+{
+ u8 mxm_size, *mxm = mxm_table(dev, &mxm_size);
+ if (!mxm || !mxm[0]) {
+ MXM_MSG(dev, "no VBIOS data, nothing to do\n");
+ return 0;
+ }
+
+ MXM_MSG(dev, "BIOS version %d.%d\n", mxm[0] >> 4, mxm[0] & 0x0f);
+
+ if (mxm_shadow(dev, mxm[0])) {
+ MXM_MSG(dev, "failed to locate valid SIS\n");
+#if 0
+ /* we should, perhaps, fall back to some kind of limited
+ * mode here if the x86 vbios hasn't already done the
+ * work for us (so we prevent loading with completely
+ * whacked vbios tables).
+ */
+ return -EINVAL;
+#else
+ return 0;
+#endif
+ }
+
+ MXM_MSG(dev, "MXMS Version %d.%d\n",
+ mxms_version(dev) >> 8, mxms_version(dev) & 0xff);
+ mxms_foreach(dev, 0, NULL, NULL);
+
+ if (nouveau_mxmdcb)
+ mxm_dcb_sanitise(dev);
+ return 0;
+}
+
+void
+nouveau_mxm_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ kfree(dev_priv->mxms);
+ dev_priv->mxms = NULL;
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index 6abdbe6..2ef883c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *nobj = NULL;
struct drm_mm_node *mem;
- uint32_t offset;
+ uint64_t offset;
int target, ret;
mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 960c0ae..cc419fae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -723,14 +723,14 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
nv_wo32(chan->ramin, 0x020c, 0x000000ff);
/* map display semaphore buffers into channel's vm */
- if (dev_priv->card_type >= NV_D0)
- return 0;
-
- for (i = 0; i < 2; i++) {
- struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
-
- ret = nouveau_bo_vma_add(dispc->sem.bo, chan->vm,
- &chan->dispc_vma[i]);
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo;
+ if (dev_priv->card_type >= NV_D0)
+ bo = nvd0_display_crtc_sema(dev, i);
+ else
+ bo = nv50_display(dev)->crtc[i].sem.bo;
+
+ ret = nouveau_bo_vma_add(bo, chan->vm, &chan->dispc_vma[i]);
if (ret)
return ret;
}
@@ -879,9 +879,14 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
NV_DEBUG(dev, "ch%d\n", chan->id);
- if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
+ if (dev_priv->card_type >= NV_D0) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ struct nouveau_bo *bo = nvd0_display_crtc_sema(dev, i);
+ nouveau_bo_vma_del(bo, &chan->dispc_vma[i]);
+ }
+ } else
+ if (dev_priv->card_type >= NV_50) {
struct nv50_display *disp = nv50_display(dev);
-
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct nv50_display_crtc *dispc = &disp->crtc[i];
nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 33d03fb..58f4973 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -41,7 +41,7 @@ legacy_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, bmp[0x73]);
+ perf = ROMPTR(dev, bmp[0x73]);
if (!perf) {
NV_DEBUG(dev, "No memclock table pointer found.\n");
return;
@@ -87,7 +87,7 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
* ramcfg to select the correct subentry
*/
if (P->version == 2) {
- u8 *tmap = ROMPTR(bios, P->data[4]);
+ u8 *tmap = ROMPTR(dev, P->data[4]);
if (!tmap) {
NV_DEBUG(dev, "no timing map pointer\n");
return NULL;
@@ -140,7 +140,6 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
u8 *vmap;
int id;
@@ -165,7 +164,7 @@ nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
return;
}
- vmap = ROMPTR(bios, P->data[32]);
+ vmap = ROMPTR(dev, P->data[32]);
if (!vmap) {
NV_DEBUG(dev, "volt map table pointer invalid\n");
return;
@@ -200,12 +199,14 @@ nouveau_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, P.data[0]);
+ perf = ROMPTR(dev, P.data[0]);
version = perf[0];
headerlen = perf[1];
if (version < 0x40) {
recordlen = perf[3] + (perf[4] * perf[5]);
entries = perf[2];
+
+ pm->pwm_divisor = ROM16(perf[6]);
} else {
recordlen = perf[2] + (perf[3] * perf[4]);
entries = perf[5];
@@ -216,7 +217,7 @@ nouveau_perf_init(struct drm_device *dev)
return;
}
- perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+ perf = ROMPTR(dev, bios->data[bios->offset + 0x94]);
if (!perf) {
NV_DEBUG(dev, "perf table pointer invalid\n");
return;
@@ -283,7 +284,6 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->memory = ROM16(entry[11]) * 1000;
else
perflvl->memory = ROM16(entry[11]) * 2000;
-
break;
case 0x25:
perflvl->fanspeed = entry[4];
@@ -300,8 +300,8 @@ nouveau_perf_init(struct drm_device *dev)
perflvl->core = ROM16(entry[8]) * 1000;
perflvl->shader = ROM16(entry[10]) * 1000;
perflvl->memory = ROM16(entry[12]) * 1000;
- /*XXX: confirm on 0x35 */
- perflvl->unk05 = ROM16(entry[16]) * 1000;
+ perflvl->vdec = ROM16(entry[16]) * 1000;
+ perflvl->dom6 = ROM16(entry[20]) * 1000;
break;
case 0x40:
#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index a539fd2..9064d7f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_gpio.h"
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -35,22 +36,95 @@
#include <linux/hwmon-sysfs.h>
static int
-nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u8 id, u32 khz)
+nouveau_pwmfan_get(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
- void *pre_state;
+ struct gpio_func gpio;
+ u32 divs, duty;
+ int ret;
- if (khz == 0)
- return 0;
+ if (!pm->pwm_get)
+ return -ENODEV;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+ if (ret == 0) {
+ ret = pm->pwm_get(dev, gpio.line, &divs, &duty);
+ if (ret == 0) {
+ divs = max(divs, duty);
+ if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+ duty = divs - duty;
+ return (duty * 100) / divs;
+ }
+
+ return nouveau_gpio_func_get(dev, gpio.func) * 100;
+ }
+
+ return -ENODEV;
+}
+
+static int
+nouveau_pwmfan_set(struct drm_device *dev, int percent)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct gpio_func gpio;
+ u32 divs, duty;
+ int ret;
+
+ if (!pm->pwm_set)
+ return -ENODEV;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_PWM_FAN, 0xff, &gpio);
+ if (ret == 0) {
+ divs = pm->pwm_divisor;
+ if (pm->fan.pwm_freq) {
+ /*XXX: PNVIO clock more than likely... */
+ divs = 135000 / pm->fan.pwm_freq;
+ if (dev_priv->chipset < 0xa3)
+ divs /= 4;
+ }
+
+ duty = ((divs * percent) + 99) / 100;
+ if (dev_priv->card_type <= NV_40 || (gpio.log[0] & 1))
+ duty = divs - duty;
- pre_state = pm->clock_pre(dev, perflvl, id, khz);
- if (IS_ERR(pre_state))
- return PTR_ERR(pre_state);
+ return pm->pwm_set(dev, gpio.line, divs, duty);
+ }
+
+ return -ENODEV;
+}
+
+static int
+nouveau_pm_perflvl_aux(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+ struct nouveau_pm_level *a, struct nouveau_pm_level *b)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret;
+
+ /*XXX: not on all boards, we should control based on temperature
+ * on recent boards.. or maybe on some other factor we don't
+ * know about?
+ */
+ if (a->fanspeed && b->fanspeed && b->fanspeed > a->fanspeed) {
+ ret = nouveau_pwmfan_set(dev, perflvl->fanspeed);
+ if (ret && ret != -ENODEV) {
+ NV_ERROR(dev, "fanspeed set failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (pm->voltage.supported && pm->voltage_set) {
+ if (perflvl->volt_min && b->volt_min > a->volt_min) {
+ ret = pm->voltage_set(dev, perflvl->volt_min);
+ if (ret) {
+ NV_ERROR(dev, "voltage set failed: %d\n", ret);
+ return ret;
+ }
+ }
+ }
- if (pre_state)
- pm->clock_set(dev, pre_state);
return 0;
}
@@ -59,31 +133,24 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ void *state;
int ret;
if (perflvl == pm->cur)
return 0;
- if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
- ret = pm->voltage_set(dev, perflvl->volt_min);
- if (ret) {
- NV_ERROR(dev, "voltage_set %d failed: %d\n",
- perflvl->volt_min, ret);
- }
- }
+ ret = nouveau_pm_perflvl_aux(dev, perflvl, pm->cur, perflvl);
+ if (ret)
+ return ret;
- if (pm->clocks_pre) {
- void *state = pm->clocks_pre(dev, perflvl);
- if (IS_ERR(state))
- return PTR_ERR(state);
- pm->clocks_set(dev, state);
- } else
- if (pm->clock_set) {
- nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
- nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
- nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
- nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
- }
+ state = pm->clocks_pre(dev, perflvl);
+ if (IS_ERR(state))
+ return PTR_ERR(state);
+ pm->clocks_set(dev, state);
+
+ ret = nouveau_pm_perflvl_aux(dev, perflvl, perflvl, pm->cur);
+ if (ret)
+ return ret;
pm->cur = perflvl;
return 0;
@@ -130,28 +197,9 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
memset(perflvl, 0, sizeof(*perflvl));
- if (pm->clocks_get) {
- ret = pm->clocks_get(dev, perflvl);
- if (ret)
- return ret;
- } else
- if (pm->clock_get) {
- ret = pm->clock_get(dev, PLL_CORE);
- if (ret > 0)
- perflvl->core = ret;
-
- ret = pm->clock_get(dev, PLL_MEMORY);
- if (ret > 0)
- perflvl->memory = ret;
-
- ret = pm->clock_get(dev, PLL_SHADER);
- if (ret > 0)
- perflvl->shader = ret;
-
- ret = pm->clock_get(dev, PLL_UNK05);
- if (ret > 0)
- perflvl->unk05 = ret;
- }
+ ret = pm->clocks_get(dev, perflvl);
+ if (ret)
+ return ret;
if (pm->voltage.supported && pm->voltage_get) {
ret = pm->voltage_get(dev);
@@ -161,6 +209,10 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
}
}
+ ret = nouveau_pwmfan_get(dev);
+ if (ret > 0)
+ perflvl->fanspeed = ret;
+
return 0;
}
@@ -412,6 +464,172 @@ static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
nouveau_hwmon_show_update_rate,
NULL, 0);
+static ssize_t
+nouveau_hwmon_show_fan0_input(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct gpio_func gpio;
+ u32 cycles, cur, prev;
+ u64 start;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, 0, DCB_GPIO_FAN_SENSE, 0xff, &gpio);
+ if (ret)
+ return ret;
+
+ /* Monitor the GPIO input 0x3b for 250ms.
+ * When the fan spins, it changes the value of GPIO FAN_SENSE.
+ * We get 4 changes (0 -> 1 -> 0 -> 1 -> [...]) per complete rotation.
+ */
+ start = ptimer->read(dev);
+ prev = nouveau_gpio_sense(dev, 0, gpio.line);
+ cycles = 0;
+ do {
+ cur = nouveau_gpio_sense(dev, 0, gpio.line);
+ if (prev != cur) {
+ cycles++;
+ prev = cur;
+ }
+
+ usleep_range(500, 1000); /* supports 0 < rpm < 7500 */
+ } while (ptimer->read(dev) - start < 250000000);
+
+ /* interpolate to get rpm */
+ return sprintf(buf, "%i\n", cycles / 4 * 4 * 60);
+}
+static SENSOR_DEVICE_ATTR(fan0_input, S_IRUGO, nouveau_hwmon_show_fan0_input,
+ NULL, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0(struct device *d, struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ int ret;
+
+ ret = nouveau_pwmfan_get(dev);
+ if (ret < 0)
+ return ret;
+
+ return sprintf(buf, "%i\n", ret);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ int ret = -ENODEV;
+ long value;
+
+ if (nouveau_perflvl_wr != 7777)
+ return -EPERM;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < pm->fan.min_duty)
+ value = pm->fan.min_duty;
+ if (value > pm->fan.max_duty)
+ value = pm->fan.max_duty;
+
+ ret = nouveau_pwmfan_set(dev, value);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0,
+ nouveau_hwmon_set_pwm0, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_min(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return sprintf(buf, "%i\n", pm->fan.min_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_min(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < 0)
+ value = 0;
+
+ if (pm->fan.max_duty - value < 10)
+ value = pm->fan.max_duty - 10;
+
+ if (value < 10)
+ pm->fan.min_duty = 10;
+ else
+ pm->fan.min_duty = value;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_min, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0_min,
+ nouveau_hwmon_set_pwm0_min, 0);
+
+static ssize_t
+nouveau_hwmon_get_pwm0_max(struct device *d,
+ struct device_attribute *a, char *buf)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+ return sprintf(buf, "%i\n", pm->fan.max_duty);
+}
+
+static ssize_t
+nouveau_hwmon_set_pwm0_max(struct device *d, struct device_attribute *a,
+ const char *buf, size_t count)
+{
+ struct drm_device *dev = dev_get_drvdata(d);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ long value;
+
+ if (strict_strtol(buf, 10, &value) == -EINVAL)
+ return -EINVAL;
+
+ if (value < 0)
+ value = 0;
+
+ if (value - pm->fan.min_duty < 10)
+ value = pm->fan.min_duty + 10;
+
+ if (value > 100)
+ pm->fan.max_duty = 100;
+ else
+ pm->fan.max_duty = value;
+
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(pwm0_max, S_IRUGO | S_IWUSR,
+ nouveau_hwmon_get_pwm0_max,
+ nouveau_hwmon_set_pwm0_max, 0);
+
static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
@@ -420,20 +638,36 @@ static struct attribute *hwmon_attributes[] = {
&sensor_dev_attr_update_rate.dev_attr.attr,
NULL
};
+static struct attribute *hwmon_fan_rpm_attributes[] = {
+ &sensor_dev_attr_fan0_input.dev_attr.attr,
+ NULL
+};
+static struct attribute *hwmon_pwm_fan_attributes[] = {
+ &sensor_dev_attr_pwm0.dev_attr.attr,
+ &sensor_dev_attr_pwm0_min.dev_attr.attr,
+ &sensor_dev_attr_pwm0_max.dev_attr.attr,
+ NULL
+};
static const struct attribute_group hwmon_attrgroup = {
.attrs = hwmon_attributes,
};
+static const struct attribute_group hwmon_fan_rpm_attrgroup = {
+ .attrs = hwmon_fan_rpm_attributes,
+};
+static const struct attribute_group hwmon_pwm_fan_attrgroup = {
+ .attrs = hwmon_pwm_fan_attributes,
+};
#endif
static int
nouveau_hwmon_init(struct drm_device *dev)
{
-#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
struct device *hwmon_dev;
- int ret;
+ int ret = 0;
if (!pm->temp_get)
return -ENODEV;
@@ -446,17 +680,46 @@ nouveau_hwmon_init(struct drm_device *dev)
return ret;
}
dev_set_drvdata(hwmon_dev, dev);
+
+ /* default sysfs entries */
ret = sysfs_create_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
if (ret) {
- NV_ERROR(dev,
- "Unable to create hwmon sysfs file: %d\n", ret);
- hwmon_device_unregister(hwmon_dev);
- return ret;
+ if (ret)
+ goto error;
+ }
+
+ /* if the card has a pwm fan */
+ /*XXX: incorrect, need better detection for this, some boards have
+ * the gpio entries for pwm fan control even when there's no
+ * actual fan connected to it... therm table? */
+ if (nouveau_pwmfan_get(dev) >= 0) {
+ ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ &hwmon_pwm_fan_attrgroup);
+ if (ret)
+ goto error;
+ }
+
+ /* if the card can read the fan rpm */
+ if (nouveau_gpio_func_valid(dev, DCB_GPIO_FAN_SENSE)) {
+ ret = sysfs_create_group(&dev->pdev->dev.kobj,
+ &hwmon_fan_rpm_attrgroup);
+ if (ret)
+ goto error;
}
pm->hwmon = hwmon_dev;
-#endif
+
+ return 0;
+
+error:
+ NV_ERROR(dev, "Unable to create some hwmon sysfs files: %d\n", ret);
+ hwmon_device_unregister(hwmon_dev);
+ pm->hwmon = NULL;
+ return ret;
+#else
+ pm->hwmon = NULL;
return 0;
+#endif
}
static void
@@ -468,6 +731,9 @@ nouveau_hwmon_fini(struct drm_device *dev)
if (pm->hwmon) {
sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_pwm_fan_attrgroup);
+ sysfs_remove_group(&dev->pdev->dev.kobj, &hwmon_fan_rpm_attrgroup);
+
hwmon_device_unregister(pm->hwmon);
}
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.h b/drivers/gpu/drm/nouveau/nouveau_pm.h
index 8ac02cd..2f8e14f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.h
@@ -47,29 +47,33 @@ void nouveau_mem_timing_init(struct drm_device *);
void nouveau_mem_timing_fini(struct drm_device *);
/* nv04_pm.c */
-int nv04_pm_clock_get(struct drm_device *, u32 id);
-void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
-void nv04_pm_clock_set(struct drm_device *, void *);
+int nv04_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv04_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv04_pm_clocks_set(struct drm_device *, void *);
/* nv40_pm.c */
int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_clocks_set(struct drm_device *, void *);
+int nv40_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv40_pm_pwm_set(struct drm_device *, int, u32, u32);
/* nv50_pm.c */
-int nv50_pm_clock_get(struct drm_device *, u32 id);
-void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
- u32 id, int khz);
-void nv50_pm_clock_set(struct drm_device *, void *);
+int nv50_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv50_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nv50_pm_clocks_set(struct drm_device *, void *);
+int nv50_pm_pwm_get(struct drm_device *, int, u32 *, u32 *);
+int nv50_pm_pwm_set(struct drm_device *, int, u32, u32);
/* nva3_pm.c */
int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
-void nva3_pm_clocks_set(struct drm_device *, void *);
+int nva3_pm_clocks_set(struct drm_device *, void *);
/* nvc0_pm.c */
int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nvc0_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+int nvc0_pm_clocks_set(struct drm_device *, void *);
/* nouveau_temp.c */
void nouveau_temp_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c8a463b..47f245e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,91 +8,30 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be {
- struct ttm_backend backend;
+ /* this has to be the first field so populate/unpopulated in
+ * nouve_bo.c works properly, otherwise have to move them here
+ */
+ struct ttm_dma_tt ttm;
struct drm_device *dev;
-
- dma_addr_t *pages;
- unsigned nr_pages;
- bool unmap_pages;
-
u64 offset;
- bool bound;
};
-static int
-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
- int i;
-
- NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
-
- nvbe->pages = dma_addrs;
- nvbe->nr_pages = num_pages;
- nvbe->unmap_pages = true;
-
- /* this code path isn't called and is incorrect anyways */
- if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
- nvbe->unmap_pages = false;
- return 0;
- }
-
- for (i = 0; i < num_pages; i++) {
- nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
- nvbe->nr_pages = --i;
- be->func->clear(be);
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
static void
-nouveau_sgdma_clear(struct ttm_backend *be)
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
-
- if (nvbe->bound)
- be->func->unbind(be);
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- if (nvbe->unmap_pages) {
- while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- nvbe->unmap_pages = false;
- }
-
- nvbe->pages = NULL;
-}
-
-static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-
- if (be) {
+ if (ttm) {
NV_DEBUG(nvbe->dev, "\n");
-
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
- }
+ ttm_dma_tt_fini(&nvbe->ttm);
+ kfree(nvbe);
}
}
static int
-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -102,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
- dma_addr_t dma_offset = nvbe->pages[i];
+ for (i = 0; i < ttm->num_pages; i++) {
+ dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -112,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
}
- nvbe->bound = true;
return 0;
}
static int
-nv04_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -127,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
NV_DEBUG(dev, "\n");
- if (!nvbe->bound)
+ if (ttm->state != tt_bound)
return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
+ for (i = 0; i < ttm->num_pages; i++) {
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
@@ -161,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
}
static int
-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -178,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = true;
return 0;
}
static int
-nv41_sgdma_unbind(struct ttm_backend *be)
+nv41_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
@@ -197,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv41_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+nv44_sgdma_flush(struct ttm_tt *ttm)
{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
- nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -273,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
}
static int
-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2, tmp[4];
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
int i;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -308,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = true;
+ nv44_sgdma_flush(ttm);
return 0;
}
static int
-nv44_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
@@ -342,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = false;
+ nv44_sgdma_flush(ttm);
return 0;
}
static struct ttm_backend_func nv44_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
+
/* noop: bound in move_notify() */
- node->pages = nvbe->pages;
- nvbe->pages = (dma_addr_t *)node;
- nvbe->bound = true;
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv50_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
- nvbe->pages = node->pages;
- node->pages = NULL;
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
-struct ttm_backend *
-nouveau_sgdma_init_ttm(struct drm_device *dev)
+struct ttm_tt *
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -398,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
return NULL;
nvbe->dev = dev;
+ nvbe->ttm.ttm.func = dev_priv->gart_info.func;
- nvbe->backend.func = dev_priv->gart_info.func;
- return &nvbe->backend;
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(nvbe);
+ return NULL;
+ }
+ return &nvbe->ttm.ttm;
}
int
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index d8831ab..f80c5e0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -36,6 +36,7 @@
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
#include "nouveau_ramht.h"
+#include "nouveau_gpio.h"
#include "nouveau_pm.h"
#include "nv50_display.h"
@@ -80,16 +81,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = NULL;
- engine->gpio.set = NULL;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -129,16 +126,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -178,16 +173,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -227,16 +220,14 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
- engine->pm.clock_get = nv04_pm_clock_get;
- engine->pm.clock_pre = nv04_pm_clock_pre;
- engine->pm.clock_set = nv04_pm_clock_set;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->pm.clocks_get = nv04_pm_clocks_get;
+ engine->pm.clocks_pre = nv04_pm_clocks_pre;
+ engine->pm.clocks_set = nv04_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->vram.init = nouveau_mem_detect;
@@ -279,19 +270,22 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv04_display_early_init;
engine->display.late_takedown = nv04_display_late_takedown;
engine->display.create = nv04_display_create;
- engine->display.init = nv04_display_init;
engine->display.destroy = nv04_display_destroy;
- engine->gpio.init = nouveau_stub_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv10_gpio_get;
- engine->gpio.set = nv10_gpio_set;
- engine->gpio.irq_enable = NULL;
+ engine->display.init = nv04_display_init;
+ engine->display.fini = nv04_display_fini;
+ engine->gpio.init = nv10_gpio_init;
+ engine->gpio.fini = nv10_gpio_fini;
+ engine->gpio.drive = nv10_gpio_drive;
+ engine->gpio.sense = nv10_gpio_sense;
+ engine->gpio.irq_enable = nv10_gpio_irq_enable;
engine->pm.clocks_get = nv40_pm_clocks_get;
engine->pm.clocks_pre = nv40_pm_clocks_pre;
engine->pm.clocks_set = nv40_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
engine->pm.temp_get = nv40_temp_get;
+ engine->pm.pwm_get = nv40_pm_pwm_get;
+ engine->pm.pwm_set = nv40_pm_pwm_set;
engine->vram.init = nouveau_mem_detect;
engine->vram.takedown = nouveau_stub_takedown;
engine->vram.flags_valid = nouveau_mem_flags_valid;
@@ -334,14 +328,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
+ engine->display.init = nv50_display_init;
+ engine->display.fini = nv50_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nv50_gpio_fini;
- engine->gpio.get = nv50_gpio_get;
- engine->gpio.set = nv50_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nv50_gpio_drive;
+ engine->gpio.sense = nv50_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
switch (dev_priv->chipset) {
case 0x84:
@@ -354,9 +347,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
case 0xaa:
case 0xac:
case 0x50:
- engine->pm.clock_get = nv50_pm_clock_get;
- engine->pm.clock_pre = nv50_pm_clock_pre;
- engine->pm.clock_set = nv50_pm_clock_set;
+ engine->pm.clocks_get = nv50_pm_clocks_get;
+ engine->pm.clocks_pre = nv50_pm_clocks_pre;
+ engine->pm.clocks_set = nv50_pm_clocks_set;
break;
default:
engine->pm.clocks_get = nva3_pm_clocks_get;
@@ -370,6 +363,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->pm.temp_get = nv84_temp_get;
else
engine->pm.temp_get = nv40_temp_get;
+ engine->pm.pwm_get = nv50_pm_pwm_get;
+ engine->pm.pwm_set = nv50_pm_pwm_set;
engine->vram.init = nv50_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nv50_vram_new;
@@ -407,14 +402,13 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nv50_display_early_init;
engine->display.late_takedown = nv50_display_late_takedown;
engine->display.create = nv50_display_create;
- engine->display.init = nv50_display_init;
engine->display.destroy = nv50_display_destroy;
+ engine->display.init = nv50_display_init;
+ engine->display.fini = nv50_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nv50_gpio_get;
- engine->gpio.set = nv50_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nv50_gpio_drive;
+ engine->gpio.sense = nv50_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
engine->vram.takedown = nv50_vram_fini;
@@ -423,8 +417,12 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->vram.flags_valid = nvc0_vram_flags_valid;
engine->pm.temp_get = nv84_temp_get;
engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.clocks_pre = nvc0_pm_clocks_pre;
+ engine->pm.clocks_set = nvc0_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
+ engine->pm.pwm_get = nv50_pm_pwm_get;
+ engine->pm.pwm_set = nv50_pm_pwm_set;
break;
case 0xd0:
engine->instmem.init = nvc0_instmem_init;
@@ -457,21 +455,23 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->display.early_init = nouveau_stub_init;
engine->display.late_takedown = nouveau_stub_takedown;
engine->display.create = nvd0_display_create;
- engine->display.init = nvd0_display_init;
engine->display.destroy = nvd0_display_destroy;
+ engine->display.init = nvd0_display_init;
+ engine->display.fini = nvd0_display_fini;
engine->gpio.init = nv50_gpio_init;
- engine->gpio.takedown = nouveau_stub_takedown;
- engine->gpio.get = nvd0_gpio_get;
- engine->gpio.set = nvd0_gpio_set;
- engine->gpio.irq_register = nv50_gpio_irq_register;
- engine->gpio.irq_unregister = nv50_gpio_irq_unregister;
+ engine->gpio.fini = nv50_gpio_fini;
+ engine->gpio.drive = nvd0_gpio_drive;
+ engine->gpio.sense = nvd0_gpio_sense;
engine->gpio.irq_enable = nv50_gpio_irq_enable;
engine->vram.init = nvc0_vram_init;
engine->vram.takedown = nv50_vram_fini;
engine->vram.get = nvc0_vram_new;
engine->vram.put = nv50_vram_del;
engine->vram.flags_valid = nvc0_vram_flags_valid;
+ engine->pm.temp_get = nv84_temp_get;
engine->pm.clocks_get = nvc0_pm_clocks_get;
+ engine->pm.clocks_pre = nvc0_pm_clocks_pre;
+ engine->pm.clocks_set = nvc0_pm_clocks_set;
engine->pm.voltage_get = nouveau_voltage_gpio_get;
engine->pm.voltage_set = nouveau_voltage_gpio_set;
break;
@@ -525,6 +525,7 @@ static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(dev);
+ nouveau_switcheroo_optimus_dsm();
nouveau_pci_suspend(pdev, pmm);
dev->switch_power_state = DRM_SWITCH_POWER_OFF;
}
@@ -615,7 +616,7 @@ nouveau_card_init(struct drm_device *dev)
goto out_gart;
/* PGPIO */
- ret = engine->gpio.init(dev);
+ ret = nouveau_gpio_create(dev);
if (ret)
goto out_mc;
@@ -648,6 +649,7 @@ nouveau_card_init(struct drm_device *dev)
nv50_graph_create(dev);
break;
case NV_C0:
+ case NV_D0:
nvc0_graph_create(dev);
break;
default:
@@ -663,6 +665,11 @@ nouveau_card_init(struct drm_device *dev)
case 0xa0:
nv84_crypt_create(dev);
break;
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ nv98_crypt_create(dev);
+ break;
}
switch (dev_priv->card_type) {
@@ -684,15 +691,25 @@ nouveau_card_init(struct drm_device *dev)
break;
}
+ if (dev_priv->chipset >= 0xa3 || dev_priv->chipset == 0x98) {
+ nv84_bsp_create(dev);
+ nv84_vp_create(dev);
+ nv98_ppp_create(dev);
+ } else
+ if (dev_priv->chipset >= 0x84) {
+ nv50_mpeg_create(dev);
+ nv84_bsp_create(dev);
+ nv84_vp_create(dev);
+ } else
+ if (dev_priv->chipset >= 0x50) {
+ nv50_mpeg_create(dev);
+ } else
if (dev_priv->card_type == NV_40 ||
dev_priv->chipset == 0x31 ||
dev_priv->chipset == 0x34 ||
- dev_priv->chipset == 0x36)
+ dev_priv->chipset == 0x36) {
nv31_mpeg_create(dev);
- else
- if (dev_priv->card_type == NV_50 &&
- (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
- nv50_mpeg_create(dev);
+ }
for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
if (dev_priv->eng[e]) {
@@ -712,27 +729,7 @@ nouveau_card_init(struct drm_device *dev)
if (ret)
goto out_fifo;
- /* initialise general modesetting */
- drm_mode_config_init(dev);
- drm_mode_create_scaling_mode_property(dev);
- drm_mode_create_dithering_property(dev);
- dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
- dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
- dev->mode_config.min_width = 0;
- dev->mode_config.min_height = 0;
- if (dev_priv->card_type < NV_10) {
- dev->mode_config.max_width = 2048;
- dev->mode_config.max_height = 2048;
- } else
- if (dev_priv->card_type < NV_50) {
- dev->mode_config.max_width = 4096;
- dev->mode_config.max_height = 4096;
- } else {
- dev->mode_config.max_width = 8192;
- dev->mode_config.max_height = 8192;
- }
-
- ret = engine->display.create(dev);
+ ret = nouveau_display_create(dev);
if (ret)
goto out_irq;
@@ -752,12 +749,11 @@ nouveau_card_init(struct drm_device *dev)
}
if (dev->mode_config.num_crtc) {
- ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ ret = nouveau_display_init(dev);
if (ret)
goto out_chan;
nouveau_fbcon_init(dev);
- drm_kms_helper_poll_init(dev);
}
return 0;
@@ -768,7 +764,7 @@ out_fence:
nouveau_fence_fini(dev);
out_disp:
nouveau_backlight_exit(dev);
- engine->display.destroy(dev);
+ nouveau_display_destroy(dev);
out_irq:
nouveau_irq_fini(dev);
out_fifo:
@@ -788,7 +784,7 @@ out_engine:
out_timer:
engine->timer.takedown(dev);
out_gpio:
- engine->gpio.takedown(dev);
+ nouveau_gpio_destroy(dev);
out_mc:
engine->mc.takedown(dev);
out_gart:
@@ -818,9 +814,8 @@ static void nouveau_card_takedown(struct drm_device *dev)
int e;
if (dev->mode_config.num_crtc) {
- drm_kms_helper_poll_fini(dev);
nouveau_fbcon_fini(dev);
- drm_vblank_cleanup(dev);
+ nouveau_display_fini(dev);
}
if (dev_priv->channel) {
@@ -829,8 +824,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
nouveau_backlight_exit(dev);
- engine->display.destroy(dev);
- drm_mode_config_cleanup(dev);
+ nouveau_display_destroy(dev);
if (!dev_priv->noaccel) {
engine->fifo.takedown(dev);
@@ -843,7 +837,7 @@ static void nouveau_card_takedown(struct drm_device *dev)
}
engine->fb.takedown(dev);
engine->timer.takedown(dev);
- engine->gpio.takedown(dev);
+ nouveau_gpio_destroy(dev);
engine->mc.takedown(dev);
engine->display.late_takedown(dev);
@@ -1110,13 +1104,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
dev_priv->noaccel = !!nouveau_noaccel;
if (nouveau_noaccel == -1) {
switch (dev_priv->chipset) {
-#if 0
- case 0xXX: /* known broken */
+ case 0xd9: /* known broken */
NV_INFO(dev, "acceleration disabled by default, pass "
"noaccel=0 to force enable\n");
dev_priv->noaccel = true;
break;
-#endif
default:
dev_priv->noaccel = false;
break;
@@ -1238,7 +1230,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
getparam->value = 1;
break;
case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
- getparam->value = dev_priv->card_type < NV_D0;
+ getparam->value = 1;
break;
case NOUVEAU_GETPARAM_GRAPH_UNITS:
/* NV40 and NV50 versions are quite different, but register
diff --git a/drivers/gpu/drm/nouveau/nouveau_temp.c b/drivers/gpu/drm/nouveau/nouveau_temp.c
index 5a46446..0f5a301 100644
--- a/drivers/gpu/drm/nouveau/nouveau_temp.c
+++ b/drivers/gpu/drm/nouveau/nouveau_temp.c
@@ -55,6 +55,10 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
temps->down_clock = 100;
temps->fan_boost = 90;
+ /* Set the default range for the pwm fan */
+ pm->fan.min_duty = 30;
+ pm->fan.max_duty = 100;
+
/* Set the known default values to setup the temperature sensor */
if (dev_priv->card_type >= NV_40) {
switch (dev_priv->chipset) {
@@ -156,11 +160,26 @@ nouveau_temp_vbios_parse(struct drm_device *dev, u8 *temp)
case 0x13:
sensor->slope_div = value;
break;
+ case 0x22:
+ pm->fan.min_duty = value & 0xff;
+ pm->fan.max_duty = (value & 0xff00) >> 8;
+ break;
+ case 0x26:
+ pm->fan.pwm_freq = value;
+ break;
}
temp += recordlen;
}
nouveau_temp_safety_checks(dev);
+
+ /* check the fan min/max settings */
+ if (pm->fan.min_duty < 10)
+ pm->fan.min_duty = 10;
+ if (pm->fan.max_duty > 100)
+ pm->fan.max_duty = 100;
+ if (pm->fan.max_duty < pm->fan.min_duty)
+ pm->fan.max_duty = pm->fan.min_duty;
}
static int
@@ -267,8 +286,6 @@ probe_monitoring_device(struct nouveau_i2c_chan *i2c,
static void
nouveau_temp_probe_i2c(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct i2c_board_info info[] = {
{ I2C_BOARD_INFO("w83l785ts", 0x2d) },
{ I2C_BOARD_INFO("w83781d", 0x2d) },
@@ -277,11 +294,9 @@ nouveau_temp_probe_i2c(struct drm_device *dev)
{ I2C_BOARD_INFO("lm99", 0x4c) },
{ }
};
- int idx = (dcb->version >= 0x40 ?
- dcb->i2c_default_indices & 0xf : 2);
nouveau_i2c_identify(dev, "monitoring device", info,
- probe_monitoring_device, idx);
+ probe_monitoring_device, NV_I2C_DEFAULT(0));
}
void
@@ -297,9 +312,9 @@ nouveau_temp_init(struct drm_device *dev)
return;
if (P.version == 1)
- temp = ROMPTR(bios, P.data[12]);
+ temp = ROMPTR(dev, P.data[12]);
else if (P.version == 2)
- temp = ROMPTR(bios, P.data[16]);
+ temp = ROMPTR(dev, P.data[16]);
else
NV_WARN(dev, "unknown temp for BIT P %d\n", P.version);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index ef0832b..2bf6c03 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -78,9 +78,10 @@ nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem, dma_addr_t *list)
+ struct nouveau_mem *mem)
{
struct nouveau_vm *vm = vma->vm;
+ dma_addr_t *list = mem->pages;
int big = vma->node->type != vm->spg_shift;
u32 offset = vma->node->offset + (delta >> 12);
u32 bits = vma->node->type - 12;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.h b/drivers/gpu/drm/nouveau/nouveau_vm.h
index 6ce995f..4fb6e72 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.h
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.h
@@ -89,7 +89,7 @@ void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *, dma_addr_t *);
+ struct nouveau_mem *);
/* nv50_vm.c */
void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
diff --git a/drivers/gpu/drm/nouveau/nouveau_volt.c b/drivers/gpu/drm/nouveau/nouveau_volt.c
index 86d03e1..b010cb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_volt.c
+++ b/drivers/gpu/drm/nouveau/nouveau_volt.c
@@ -26,6 +26,7 @@
#include "nouveau_drv.h"
#include "nouveau_pm.h"
+#include "nouveau_gpio.h"
static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
@@ -34,7 +35,6 @@ int
nouveau_voltage_gpio_get(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
u8 vid = 0;
int i;
@@ -43,7 +43,7 @@ nouveau_voltage_gpio_get(struct drm_device *dev)
if (!(volt->vid_mask & (1 << i)))
continue;
- vid |= gpio->get(dev, vidtag[i]) << i;
+ vid |= nouveau_gpio_func_get(dev, vidtag[i]) << i;
}
return nouveau_volt_lvl_lookup(dev, vid);
@@ -53,7 +53,6 @@ int
nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nouveau_pm_voltage *volt = &dev_priv->engine.pm.voltage;
int vid, i;
@@ -65,7 +64,7 @@ nouveau_voltage_gpio_set(struct drm_device *dev, int voltage)
if (!(volt->vid_mask & (1 << i)))
continue;
- gpio->set(dev, vidtag[i], !!(vid & (1 << i)));
+ nouveau_gpio_func_set(dev, vidtag[i], !!(vid & (1 << i)));
}
return 0;
@@ -117,10 +116,10 @@ nouveau_volt_init(struct drm_device *dev)
return;
if (P.version == 1)
- volt = ROMPTR(bios, P.data[16]);
+ volt = ROMPTR(dev, P.data[16]);
else
if (P.version == 2)
- volt = ROMPTR(bios, P.data[12]);
+ volt = ROMPTR(dev, P.data[12]);
else {
NV_WARN(dev, "unknown volt for BIT P %d\n", P.version);
}
@@ -130,7 +129,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- volt = ROMPTR(bios, bios->data[bios->offset + 0x98]);
+ volt = ROMPTR(dev, bios->data[bios->offset + 0x98]);
}
if (!volt) {
@@ -194,7 +193,7 @@ nouveau_volt_init(struct drm_device *dev)
return;
}
- if (!nouveau_bios_gpio_entry(dev, vidtag[i])) {
+ if (!nouveau_gpio_func_valid(dev, vidtag[i])) {
NV_DEBUG(dev, "vid bit %d has no gpio tag\n", i);
return;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 5e45398..728d075 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -364,7 +364,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
regp->CRTC[NV_CIO_CR_VRE_INDEX] = 1 << 5 | XLATE(vertEnd, 0, NV_CIO_CR_VRE_3_0);
regp->CRTC[NV_CIO_CR_VDE_INDEX] = vertDisplay;
/* framebuffer can be larger than crtc scanout area. */
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitch / 8;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = fb->pitches[0] / 8;
regp->CRTC[NV_CIO_CR_ULINE_INDEX] = 0x00;
regp->CRTC[NV_CIO_CR_VBS_INDEX] = vertBlankStart;
regp->CRTC[NV_CIO_CR_VBE_INDEX] = vertBlankEnd;
@@ -377,9 +377,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
/* framebuffer can be larger than crtc scanout area. */
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(fb->pitches[0] / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -835,18 +835,18 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_GENERAL_CONTROL,
regp->ramdac_gen_ctrl);
- regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
+ regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitches[0] >> 3;
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
- XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
+ XLATE(drm_fb->pitches[0] >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
regp->CRTC[NV_CIO_CRE_42] =
- XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
+ XLATE(drm_fb->pitches[0] / 8, 11, NV_CIO_CRE_42_OFFSET_11);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
/* Update the framebuffer location. */
regp->fb_start = nv_crtc->fb.offset & ~3;
- regp->fb_start += (y * drm_fb->pitch) + (x * drm_fb->bits_per_pixel / 8);
+ regp->fb_start += (y * drm_fb->pitches[0]) + (x * drm_fb->bits_per_pixel / 8);
nv_set_crtc_base(dev, nv_crtc->index, regp->fb_start);
/* Update the arbitration parameters. */
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index e000455..8300266 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -32,6 +32,7 @@
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
#include "nvreg.h"
int nv04_dac_output_offset(struct drm_encoder *encoder)
@@ -220,7 +221,6 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
@@ -252,11 +252,11 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4 & 0xffffffcf);
}
- saved_gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
- saved_gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+ saved_gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+ saved_gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
- gpio->set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
- gpio->set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, dcb->type == OUTPUT_TV);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, dcb->type == OUTPUT_TV);
msleep(4);
@@ -306,8 +306,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
nvWriteMC(dev, NV_PBUS_POWERCTRL_4, saved_powerctrl_4);
nvWriteMC(dev, NV_PBUS_POWERCTRL_2, saved_powerctrl_2);
- gpio->set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
- gpio->set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
return sample;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 12098bf..2258746 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -289,6 +289,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
struct nouveau_connector *nv_connector = nouveau_crtc_connector_get(nv_crtc);
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct drm_display_mode *output_mode = &nv_encoder->mode;
+ struct drm_connector *connector = &nv_connector->base;
uint32_t mode_ratio, panel_ratio;
NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
@@ -340,10 +341,15 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
output_mode->clock > 165000)
regp->fp_control |= (2 << 24);
if (nv_encoder->dcb->type == OUTPUT_LVDS) {
- bool duallink, dummy;
+ bool duallink = false, dummy;
+ if (nv_connector->edid &&
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
+ duallink = (((u8 *)nv_connector->edid)[121] == 2);
+ } else {
+ nouveau_bios_parse_lvds_table(dev, output_mode->clock,
+ &duallink, &dummy);
+ }
- nouveau_bios_parse_lvds_table(dev, output_mode->clock,
- &duallink, &dummy);
if (duallink)
regp->fp_control |= (8 << 28);
} else
@@ -407,7 +413,9 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
}
/* Output property. */
- if (nv_connector->use_dithering) {
+ if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
+ (nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
+ encoder->crtc->fb->depth > connector->display_info.bpc * 3)) {
if (dev_priv->chipset == 0x11)
regp->dither = savep->dither | 0x00010000;
else {
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index 6bd8518..7047d37 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -243,6 +243,11 @@ nv04_display_init(struct drm_device *dev)
return 0;
}
+void
+nv04_display_fini(struct drm_device *dev)
+{
+}
+
static void
nv04_vblank_crtc0_isr(struct drm_device *dev)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_pm.c b/drivers/gpu/drm/nouveau/nv04_pm.c
index 9ae92a8..6e75899 100644
--- a/drivers/gpu/drm/nouveau/nv04_pm.c
+++ b/drivers/gpu/drm/nouveau/nv04_pm.c
@@ -27,68 +27,111 @@
#include "nouveau_hw.h"
#include "nouveau_pm.h"
-struct nv04_pm_state {
+int
+nv04_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ int ret;
+
+ ret = nouveau_hw_get_clock(dev, PLL_CORE);
+ if (ret < 0)
+ return ret;
+ perflvl->core = ret;
+
+ ret = nouveau_hw_get_clock(dev, PLL_MEMORY);
+ if (ret < 0)
+ return ret;
+ perflvl->memory = ret;
+
+ return 0;
+}
+
+struct nv04_pm_clock {
struct pll_lims pll;
struct nouveau_pll_vals calc;
};
-int
-nv04_pm_clock_get(struct drm_device *dev, u32 id)
+struct nv04_pm_state {
+ struct nv04_pm_clock core;
+ struct nv04_pm_clock memory;
+};
+
+static int
+calc_pll(struct drm_device *dev, u32 id, int khz, struct nv04_pm_clock *clk)
{
- return nouveau_hw_get_clock(dev, id);
+ int ret;
+
+ ret = get_pll_limits(dev, id, &clk->pll);
+ if (ret)
+ return ret;
+
+ ret = nouveau_calc_pll_mnp(dev, &clk->pll, khz, &clk->calc);
+ if (!ret)
+ return -EINVAL;
+
+ return 0;
}
void *
-nv04_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u32 id, int khz)
+nv04_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct nv04_pm_state *state;
+ struct nv04_pm_state *info;
int ret;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
return ERR_PTR(-ENOMEM);
- ret = get_pll_limits(dev, id, &state->pll);
- if (ret) {
- kfree(state);
- return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
- }
+ ret = calc_pll(dev, PLL_CORE, perflvl->core, &info->core);
+ if (ret)
+ goto error;
- ret = nouveau_calc_pll_mnp(dev, &state->pll, khz, &state->calc);
- if (!ret) {
- kfree(state);
- return ERR_PTR(-EINVAL);
+ if (perflvl->memory) {
+ ret = calc_pll(dev, PLL_MEMORY, perflvl->memory, &info->memory);
+ if (ret)
+ goto error;
}
- return state;
+ return info;
+error:
+ kfree(info);
+ return ERR_PTR(ret);
}
-void
-nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
+static void
+prog_pll(struct drm_device *dev, struct nv04_pm_clock *clk)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv04_pm_state *state = pre_state;
- u32 reg = state->pll.reg;
+ u32 reg = clk->pll.reg;
/* thank the insane nouveau_hw_setpll() interface for this */
if (dev_priv->card_type >= NV_40)
reg += 4;
- nouveau_hw_setpll(dev, reg, &state->calc);
+ nouveau_hw_setpll(dev, reg, &clk->calc);
+}
+
+int
+nv04_pm_clocks_set(struct drm_device *dev, void *pre_state)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv04_pm_state *state = pre_state;
+
+ prog_pll(dev, &state->core);
- if (dev_priv->card_type < NV_30 && reg == NV_PRAMDAC_MPLL_COEFF) {
- if (dev_priv->card_type == NV_20)
- nv_mask(dev, 0x1002c4, 0, 1 << 20);
+ if (state->memory.pll.reg) {
+ prog_pll(dev, &state->memory);
+ if (dev_priv->card_type < NV_30) {
+ if (dev_priv->card_type == NV_20)
+ nv_mask(dev, 0x1002c4, 0, 1 << 20);
- /* Reset the DLLs */
- nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ /* Reset the DLLs */
+ nv_mask(dev, 0x1002c0, 0, 1 << 8);
+ }
}
- if (reg == NV_PRAMDAC_NVPLL_COEFF)
- ptimer->init(dev);
+ ptimer->init(dev);
kfree(state);
+ return 0;
}
-
diff --git a/drivers/gpu/drm/nouveau/nv04_timer.c b/drivers/gpu/drm/nouveau/nv04_timer.c
index 263301b..55c9452 100644
--- a/drivers/gpu/drm/nouveau/nv04_timer.c
+++ b/drivers/gpu/drm/nouveau/nv04_timer.c
@@ -2,6 +2,7 @@
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
+#include "nouveau_hw.h"
int
nv04_timer_init(struct drm_device *dev)
@@ -17,7 +18,7 @@ nv04_timer_init(struct drm_device *dev)
/* determine base clock for timer source */
if (dev_priv->chipset < 0x40) {
- n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
+ n = nouveau_hw_get_clock(dev, PLL_CORE);
} else
if (dev_priv->chipset == 0x40) {
/*XXX: figure this out */
diff --git a/drivers/gpu/drm/nouveau/nv10_gpio.c b/drivers/gpu/drm/nouveau/nv10_gpio.c
index 007fc29..550ad3f 100644
--- a/drivers/gpu/drm/nouveau/nv10_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv10_gpio.c
@@ -27,66 +27,97 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
-static bool
-get_gpio_location(struct dcb_gpio_entry *ent, uint32_t *reg, uint32_t *shift,
- uint32_t *mask)
+int
+nv10_gpio_sense(struct drm_device *dev, int line)
{
- if (ent->line < 2) {
- *reg = NV_PCRTC_GPIO;
- *shift = ent->line * 16;
- *mask = 0x11;
-
- } else if (ent->line < 10) {
- *reg = NV_PCRTC_GPIO_EXT;
- *shift = (ent->line - 2) * 4;
- *mask = 0x3;
+ if (line < 2) {
+ line = line * 16;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
+ return !!(line & 0x0100);
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
+ return !!(line & 0x04);
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
+ return !!(line & 0x04);
+ }
- } else if (ent->line < 14) {
- *reg = NV_PCRTC_850;
- *shift = (ent->line - 10) * 4;
- *mask = 0x3;
+ return -EINVAL;
+}
+int
+nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+ u32 reg, mask, data;
+
+ if (line < 2) {
+ line = line * 16;
+ reg = NV_PCRTC_GPIO;
+ mask = 0x00000011;
+ data = (dir << 4) | out;
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ reg = NV_PCRTC_GPIO_EXT;
+ mask = 0x00000003 << ((line - 2) * 4);
+ data = (dir << 1) | out;
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ reg = NV_PCRTC_850;
+ mask = 0x00000003;
+ data = (dir << 1) | out;
} else {
- return false;
+ return -EINVAL;
}
- return true;
+ mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
+ NVWriteCRTC(dev, 0, reg, mask | (data << line));
+ return 0;
}
-int
-nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
{
- struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
- uint32_t reg, shift, mask, value;
+ u32 mask = 0x00010001 << line;
- if (!ent)
- return -ENODEV;
+ nv_wr32(dev, 0x001104, mask);
+ nv_mask(dev, 0x001144, mask, on ? mask : 0);
+}
- if (!get_gpio_location(ent, &reg, &shift, &mask))
- return -ENODEV;
+static void
+nv10_gpio_isr(struct drm_device *dev)
+{
+ u32 intr = nv_rd32(dev, 0x1104);
+ u32 hi = (intr & 0x0000ffff) >> 0;
+ u32 lo = (intr & 0xffff0000) >> 16;
- value = NVReadCRTC(dev, 0, reg) >> shift;
+ nouveau_gpio_isr(dev, 0, hi | lo);
- return (ent->invert ? 1 : 0) ^ (value & 1);
+ nv_wr32(dev, 0x001104, intr);
}
int
-nv10_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv10_gpio_init(struct drm_device *dev)
{
- struct dcb_gpio_entry *ent = nouveau_bios_gpio_entry(dev, tag);
- uint32_t reg, shift, mask, value;
-
- if (!ent)
- return -ENODEV;
-
- if (!get_gpio_location(ent, &reg, &shift, &mask))
- return -ENODEV;
-
- value = ((ent->invert ? 1 : 0) ^ (state ? 1 : 0)) << shift;
- mask = ~(mask << shift);
-
- NVWriteCRTC(dev, 0, reg, value | (NVReadCRTC(dev, 0, reg) & mask));
-
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001100, 0xffffffff);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nv_wr32(dev, 0x001104, 0xffffffff);
+ nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
return 0;
}
+
+void
+nv10_gpio_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nouveau_irq_unregister(dev, 28);
+}
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 3900ceb..696d7e7 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -30,6 +30,7 @@
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
+#include "nouveau_gpio.h"
#include "nouveau_hw.h"
#include "nv17_tv.h"
@@ -37,7 +38,6 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
@@ -53,8 +53,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
head = (dacclk & 0x100) >> 8;
/* Save the previous state. */
- gpio1 = gpio->get(dev, DCB_GPIO_TVDAC1);
- gpio0 = gpio->get(dev, DCB_GPIO_TVDAC0);
+ gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1);
+ gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0);
fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
@@ -65,8 +65,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
/* Prepare the DAC for load detection. */
- gpio->set(dev, DCB_GPIO_TVDAC1, true);
- gpio->set(dev, DCB_GPIO_TVDAC0, true);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
@@ -111,8 +111,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
- gpio->set(dev, DCB_GPIO_TVDAC1, gpio1);
- gpio->set(dev, DCB_GPIO_TVDAC0, gpio0);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0);
return sample;
}
@@ -357,8 +357,6 @@ static bool nv17_tv_mode_fixup(struct drm_encoder *encoder,
static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *gpio = &dev_priv->engine.gpio;
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
@@ -383,8 +381,8 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
nv_load_ptv(dev, regs, 200);
- gpio->set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
- gpio->set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON);
+ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON);
nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON);
}
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index e676b0d..c761538 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -222,7 +222,7 @@ nv40_pm_gr_idle(void *data)
return true;
}
-void
+int
nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -231,7 +231,7 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
struct bit_entry M;
u32 crtc_mask = 0;
u8 sr1[2];
- int i;
+ int i, ret = -EAGAIN;
/* determine which CRTCs are active, fetch VGA_SR1 for each */
for (i = 0; i < 2; i++) {
@@ -263,6 +263,8 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
if (!nv_wait_cb(dev, nv40_pm_gr_idle, dev))
goto resume;
+ ret = 0;
+
/* set engine clocks */
nv_mask(dev, 0x00c040, 0x00000333, 0x00000000);
nv_wr32(dev, 0x004004, info->npll_coef);
@@ -345,4 +347,48 @@ resume:
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
kfree(info);
+ return ret;
+}
+
+int
+nv40_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+ if (line == 2) {
+ u32 reg = nv_rd32(dev, 0x0010f0);
+ if (reg & 0x80000000) {
+ *duty = (reg & 0x7fff0000) >> 16;
+ *divs = (reg & 0x00007fff);
+ return 0;
+ }
+ } else
+ if (line == 9) {
+ u32 reg = nv_rd32(dev, 0x0015f4);
+ if (reg & 0x80000000) {
+ *divs = nv_rd32(dev, 0x0015f8);
+ *duty = (reg & 0x7fffffff);
+ return 0;
+ }
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+
+ return -EINVAL;
+}
+
+int
+nv40_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+ if (line == 2) {
+ nv_wr32(dev, 0x0010f0, 0x80000000 | (duty << 16) | divs);
+ } else
+ if (line == 9) {
+ nv_wr32(dev, 0x0015f8, divs);
+ nv_wr32(dev, 0x0015f4, duty | 0x80000000);
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", line);
+ return -ENODEV;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index 882080e..8f6c2ac 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -132,33 +132,42 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
}
static int
-nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_device *dev = nv_crtc->base.dev;
- struct nouveau_channel *evo = nv50_display(dev)->master;
- int ret;
-
- NV_DEBUG_KMS(dev, "\n");
+ struct nouveau_channel *evo = nv50_display(nv_crtc->base.dev)->master;
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ int head = nv_crtc->index, ret;
+ u32 mode = 0x00;
- ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
- if (ret) {
- NV_ERROR(dev, "no space while setting dither\n");
- return ret;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DITHER_CTRL), 1);
- if (on)
- OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_ON);
- else
- OUT_RING(evo, NV50_EVO_CRTC_DITHER_CTRL_OFF);
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
+ }
- if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, NV50_EVO_CRTC(head, DITHER_CTRL), 1);
+ OUT_RING (evo, mode);
+ if (update) {
+ BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
+ OUT_RING (evo, 0);
+ FIRE_RING (evo);
+ }
}
- return 0;
+ return ret;
}
struct nouveau_connector *
@@ -180,80 +189,103 @@ nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
}
static int
-nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
+nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
{
- struct nouveau_connector *nv_connector =
- nouveau_crtc_connector_get(nv_crtc);
- struct drm_device *dev = nv_crtc->base.dev;
+ struct nouveau_connector *nv_connector;
+ struct drm_crtc *crtc = &nv_crtc->base;
+ struct drm_device *dev = crtc->dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
- struct drm_display_mode *native_mode = NULL;
- struct drm_display_mode *mode = &nv_crtc->base.mode;
- uint32_t outX, outY, horiz, vert;
- int ret;
+ struct drm_display_mode *umode = &crtc->mode;
+ struct drm_display_mode *omode;
+ int scaling_mode, ret;
+ u32 ctrl = 0, oX, oY;
NV_DEBUG_KMS(dev, "\n");
- switch (scaling_mode) {
- case DRM_MODE_SCALE_NONE:
- break;
- default:
- if (!nv_connector || !nv_connector->native_mode) {
- NV_ERROR(dev, "No native mode, forcing panel scaling\n");
- scaling_mode = DRM_MODE_SCALE_NONE;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ if (!nv_connector || !nv_connector->native_mode) {
+ NV_ERROR(dev, "no native mode, forcing panel scaling\n");
+ scaling_mode = DRM_MODE_SCALE_NONE;
+ } else {
+ scaling_mode = nv_connector->scaling_mode;
+ }
+
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
+ if (scaling_mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
} else {
- native_mode = nv_connector->native_mode;
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
- break;
}
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
switch (scaling_mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
case DRM_MODE_SCALE_ASPECT:
- horiz = (native_mode->hdisplay << 19) / mode->hdisplay;
- vert = (native_mode->vdisplay << 19) / mode->vdisplay;
-
- if (vert > horiz) {
- outX = (mode->hdisplay * horiz) >> 19;
- outY = (mode->vdisplay * horiz) >> 19;
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
} else {
- outX = (mode->hdisplay * vert) >> 19;
- outY = (mode->vdisplay * vert) >> 19;
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
break;
- case DRM_MODE_SCALE_FULLSCREEN:
- outX = native_mode->hdisplay;
- outY = native_mode->vdisplay;
- break;
- case DRM_MODE_SCALE_CENTER:
- case DRM_MODE_SCALE_NONE:
default:
- outX = mode->hdisplay;
- outY = mode->vdisplay;
break;
}
- ret = RING_SPACE(evo, update ? 7 : 5);
+ if (umode->hdisplay != oX || umode->vdisplay != oY ||
+ umode->flags & DRM_MODE_FLAG_INTERLACE ||
+ umode->flags & DRM_MODE_FLAG_DBLSCAN)
+ ctrl |= NV50_EVO_CRTC_SCALE_CTRL_ACTIVE;
+
+ ret = RING_SPACE(evo, 5);
if (ret)
return ret;
- /* Got a better name for SCALER_ACTIVE? */
- /* One day i've got to really figure out why this is needed. */
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CTRL), 1);
- if ((mode->flags & DRM_MODE_FLAG_DBLSCAN) ||
- (mode->flags & DRM_MODE_FLAG_INTERLACE) ||
- mode->hdisplay != outX || mode->vdisplay != outY) {
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_ACTIVE);
- } else {
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CTRL_INACTIVE);
- }
-
+ OUT_RING (evo, ctrl);
BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_RES1), 2);
- OUT_RING(evo, outY << 16 | outX);
- OUT_RING(evo, outY << 16 | outX);
+ OUT_RING (evo, oY << 16 | oX);
+ OUT_RING (evo, oY << 16 | oX);
if (update) {
- BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
- OUT_RING(evo, 0);
- FIRE_RING(evo);
+ nv50_display_flip_stop(crtc);
+ nv50_display_sync(dev);
+ nv50_display_flip_next(crtc, crtc->fb, NULL);
}
return 0;
@@ -333,7 +365,6 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
nouveau_bo_unmap(nv_crtc->cursor.nvbo);
nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
- kfree(nv_crtc->mode);
kfree(nv_crtc);
}
@@ -441,39 +472,6 @@ nv50_crtc_dpms(struct drm_crtc *crtc, int mode)
{
}
-static int
-nv50_crtc_wait_complete(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- struct nv50_display *disp = nv50_display(dev);
- struct nouveau_channel *evo = disp->master;
- u64 start;
- int ret;
-
- ret = RING_SPACE(evo, 6);
- if (ret)
- return ret;
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x80000000);
- BEGIN_RING(evo, 0, 0x0080, 1);
- OUT_RING (evo, 0);
- BEGIN_RING(evo, 0, 0x0084, 1);
- OUT_RING (evo, 0x00000000);
-
- nv_wo32(disp->ntfy, 0x000, 0x00000000);
- FIRE_RING (evo);
-
- start = ptimer->read(dev);
- do {
- if (nv_ro32(disp->ntfy, 0x000))
- return 0;
- } while (ptimer->read(dev) - start < 2000000000ULL);
-
- return -EBUSY;
-}
-
static void
nv50_crtc_prepare(struct drm_crtc *crtc)
{
@@ -497,7 +495,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
nv50_crtc_blank(nv_crtc, false);
drm_vblank_post_modeset(dev, nv_crtc->index);
- nv50_crtc_wait_complete(crtc);
+ nv50_display_sync(dev);
nv50_display_flip_next(crtc, crtc->fb, NULL);
}
@@ -593,90 +591,76 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc,
}
static int
-nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode, int x, int y,
+nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
+ struct drm_display_mode *mode, int x, int y,
struct drm_framebuffer *old_fb)
{
struct drm_device *dev = crtc->dev;
struct nouveau_channel *evo = nv50_display(dev)->master;
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- struct nouveau_connector *nv_connector = NULL;
- uint32_t hsync_dur, vsync_dur, hsync_start_to_end, vsync_start_to_end;
- uint32_t hunk1, vunk1, vunk2a, vunk2b;
+ u32 head = nv_crtc->index * 0x400;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
int ret;
- /* Find the connector attached to this CRTC */
- nv_connector = nouveau_crtc_connector_get(nv_crtc);
-
- *nv_crtc->mode = *adjusted_mode;
-
- NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
+ /* hw timing description looks like this:
+ *
+ * <sync> <back porch> <---------display---------> <front porch>
+ * ______
+ * |____________|---------------------------|____________|
+ *
+ * ^ synce ^ blanke ^ blanks ^ active
+ *
+ * interlaced modes also have 2 additional values pointing at the end
+ * and start of the next field's blanking period.
+ */
- hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
- vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
- hsync_start_to_end = adjusted_mode->htotal - adjusted_mode->hsync_start;
- vsync_start_to_end = adjusted_mode->vtotal - adjusted_mode->vsync_start;
- /* I can't give this a proper name, anyone else can? */
- hunk1 = adjusted_mode->htotal -
- adjusted_mode->hsync_start + adjusted_mode->hdisplay;
- vunk1 = adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vdisplay;
- /* Another strange value, this time only for interlaced adjusted_modes. */
- vunk2a = 2 * adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vdisplay;
- vunk2b = adjusted_mode->vtotal -
- adjusted_mode->vsync_start + adjusted_mode->vtotal;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- vsync_dur /= 2;
- vsync_start_to_end /= 2;
- vunk1 /= 2;
- vunk2a /= 2;
- vunk2b /= 2;
- /* magic */
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) {
- vsync_start_to_end -= 1;
- vunk1 -= 1;
- vunk2a -= 1;
- vunk2b -= 1;
- }
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
}
- ret = RING_SPACE(evo, 17);
- if (ret)
- return ret;
-
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, CLOCK), 2);
- OUT_RING(evo, adjusted_mode->clock | 0x800000);
- OUT_RING(evo, (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 0);
-
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, DISPLAY_START), 5);
- OUT_RING(evo, 0);
- OUT_RING(evo, (adjusted_mode->vtotal << 16) | adjusted_mode->htotal);
- OUT_RING(evo, (vsync_dur - 1) << 16 | (hsync_dur - 1));
- OUT_RING(evo, (vsync_start_to_end - 1) << 16 |
- (hsync_start_to_end - 1));
- OUT_RING(evo, (vunk1 - 1) << 16 | (hunk1 - 1));
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK0824), 1);
- OUT_RING(evo, (vunk2b - 1) << 16 | (vunk2a - 1));
- } else {
- OUT_RING(evo, 0);
- OUT_RING(evo, 0);
+ ret = RING_SPACE(evo, 18);
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, 0x0804 + head, 2);
+ OUT_RING (evo, 0x00800000 | mode->clock);
+ OUT_RING (evo, (ilace == 2) ? 2 : 0);
+ BEGIN_RING(evo, 0, 0x0810 + head, 6);
+ OUT_RING (evo, 0x00000000); /* border colour */
+ OUT_RING (evo, (vactive << 16) | hactive);
+ OUT_RING (evo, ( vsynce << 16) | hsynce);
+ OUT_RING (evo, (vblanke << 16) | hblanke);
+ OUT_RING (evo, (vblanks << 16) | hblanks);
+ OUT_RING (evo, (vblan2e << 16) | vblan2s);
+ BEGIN_RING(evo, 0, 0x082c + head, 1);
+ OUT_RING (evo, 0x00000000);
+ BEGIN_RING(evo, 0, 0x0900 + head, 1);
+ OUT_RING (evo, 0x00000311); /* makes sync channel work */
+ BEGIN_RING(evo, 0, 0x08c8 + head, 1);
+ OUT_RING (evo, (umode->vdisplay << 16) | umode->hdisplay);
+ BEGIN_RING(evo, 0, 0x08d4 + head, 1);
+ OUT_RING (evo, 0x00000000); /* screen position */
}
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, UNK082C), 1);
- OUT_RING(evo, 0);
-
- /* This is the actual resolution of the mode. */
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, REAL_RES), 1);
- OUT_RING(evo, (mode->vdisplay << 16) | mode->hdisplay);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(nv_crtc->index, SCALE_CENTER_OFFSET), 1);
- OUT_RING(evo, NV50_EVO_CRTC_SCALE_CENTER_OFFSET_VAL(0, 0));
-
- nv_crtc->set_dither(nv_crtc, nv_connector->use_dithering, false);
- nv_crtc->set_scale(nv_crtc, nv_connector->scaling_mode, false);
+ nv_crtc->set_dither(nv_crtc, false);
+ nv_crtc->set_scale(nv_crtc, false);
return nv50_crtc_do_mode_set_base(crtc, old_fb, x, y, false);
}
@@ -692,7 +676,7 @@ nv50_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
- ret = nv50_crtc_wait_complete(crtc);
+ ret = nv50_display_sync(crtc->dev);
if (ret)
return ret;
@@ -711,7 +695,7 @@ nv50_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
if (ret)
return ret;
- return nv50_crtc_wait_complete(crtc);
+ return nv50_display_sync(crtc->dev);
}
static const struct drm_crtc_helper_funcs nv50_crtc_helper_funcs = {
@@ -737,12 +721,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
if (!nv_crtc)
return -ENOMEM;
- nv_crtc->mode = kzalloc(sizeof(*nv_crtc->mode), GFP_KERNEL);
- if (!nv_crtc->mode) {
- kfree(nv_crtc);
- return -ENOMEM;
- }
-
/* Default CLUT parameters, will be activated on the hw upon
* first mode set.
*/
@@ -764,7 +742,6 @@ nv50_crtc_create(struct drm_device *dev, int index)
}
if (ret) {
- kfree(nv_crtc->mode);
kfree(nv_crtc);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index 808f3ec..a0f2beb 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -200,11 +200,6 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
static void
-nv50_dac_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void
nv50_dac_commit(struct drm_encoder *encoder)
{
}
@@ -266,7 +261,7 @@ static const struct drm_encoder_helper_funcs nv50_dac_helper_funcs = {
.save = nv50_dac_save,
.restore = nv50_dac_restore,
.mode_fixup = nv50_dac_mode_fixup,
- .prepare = nv50_dac_prepare,
+ .prepare = nv50_dac_disconnect,
.commit = nv50_dac_commit,
.mode_set = nv50_dac_mode_set,
.get_crtc = nv50_dac_crtc_get,
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 06de250..7ba28e0 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -50,9 +50,53 @@ nv50_sor_nr(struct drm_device *dev)
return 4;
}
+static int
+evo_icmd(struct drm_device *dev, int ch, u32 mthd, u32 data)
+{
+ int ret = 0;
+ nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x610304 + (ch * 0x08), data);
+ nv_wr32(dev, 0x610300 + (ch * 0x08), 0x80000001 | mthd);
+ if (!nv_wait(dev, 0x610300 + (ch * 0x08), 0x80000000, 0x00000000))
+ ret = -EBUSY;
+ if (ret || (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO))
+ NV_INFO(dev, "EvoPIO: %d 0x%04x 0x%08x\n", ch, mthd, data);
+ nv_mask(dev, 0x610300 + (ch * 0x08), 0x00000001, 0x00000000);
+ return ret;
+}
+
int
nv50_display_early_init(struct drm_device *dev)
{
+ u32 ctrl = nv_rd32(dev, 0x610200);
+ int i;
+
+ /* check if master evo channel is already active, a good a sign as any
+ * that the display engine is in a weird state (hibernate/kexec), if
+ * it is, do our best to reset the display engine...
+ */
+ if ((ctrl & 0x00000003) == 0x00000003) {
+ NV_INFO(dev, "PDISP: EVO(0) 0x%08x, resetting...\n", ctrl);
+
+ /* deactivate both heads first, PDISP will disappear forever
+ * (well, until you power cycle) on some boards as soon as
+ * PMC_ENABLE is hit unless they are..
+ */
+ for (i = 0; i < 2; i++) {
+ evo_icmd(dev, 0, 0x0880 + (i * 0x400), 0x05000000);
+ evo_icmd(dev, 0, 0x089c + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0840 + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0844 + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x085c + (i * 0x400), 0);
+ evo_icmd(dev, 0, 0x0874 + (i * 0x400), 0);
+ }
+ evo_icmd(dev, 0, 0x0080, 0);
+
+ /* reset PDISP */
+ nv_mask(dev, 0x000200, 0x40000000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x40000000, 0x40000000);
+ }
+
return 0;
}
@@ -62,11 +106,40 @@ nv50_display_late_takedown(struct drm_device *dev)
}
int
-nv50_display_init(struct drm_device *dev)
+nv50_display_sync(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct drm_connector *connector;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ struct nv50_display *disp = nv50_display(dev);
+ struct nouveau_channel *evo = disp->master;
+ u64 start;
+ int ret;
+
+ ret = RING_SPACE(evo, 6);
+ if (ret == 0) {
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x80000000);
+ BEGIN_RING(evo, 0, 0x0080, 1);
+ OUT_RING (evo, 0);
+ BEGIN_RING(evo, 0, 0x0084, 1);
+ OUT_RING (evo, 0x00000000);
+
+ nv_wo32(disp->ntfy, 0x000, 0x00000000);
+ FIRE_RING (evo);
+
+ start = ptimer->read(dev);
+ do {
+ if (nv_ro32(disp->ntfy, 0x000))
+ return 0;
+ } while (ptimer->read(dev) - start < 2000000000ULL);
+ }
+
+ return -EBUSY;
+}
+
+int
+nv50_display_init(struct drm_device *dev)
+{
struct nouveau_channel *evo;
int ret, i;
u32 val;
@@ -161,16 +234,6 @@ nv50_display_init(struct drm_device *dev)
NV50_PDISPLAY_INTR_EN_1_CLK_UNK20 |
NV50_PDISPLAY_INTR_EN_1_CLK_UNK40);
- /* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct nouveau_connector *conn = nouveau_connector(connector);
-
- if (conn->dcb->gpio_tag == 0xff)
- continue;
-
- pgpio->irq_enable(dev, conn->dcb->gpio_tag, true);
- }
-
ret = nv50_evo_init(dev);
if (ret)
return ret;
@@ -178,36 +241,19 @@ nv50_display_init(struct drm_device *dev)
nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->vinst >> 8) | 9);
- ret = RING_SPACE(evo, 15);
+ ret = RING_SPACE(evo, 3);
if (ret)
return ret;
BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
- OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
- OUT_RING(evo, NvEvoSync);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
- OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
- OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
- OUT_RING(evo, 0);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
- OUT_RING(evo, 0);
- /* required to make display sync channels not hate life */
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK900), 1);
- OUT_RING (evo, 0x00000311);
- BEGIN_RING(evo, 0, NV50_EVO_CRTC(1, UNK900), 1);
- OUT_RING (evo, 0x00000311);
- FIRE_RING(evo);
- if (!nv_wait(dev, 0x640004, 0xffffffff, evo->dma.put << 2))
- NV_ERROR(dev, "evo pushbuf stalled\n");
-
+ OUT_RING (evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
+ OUT_RING (evo, NvEvoSync);
- return 0;
+ return nv50_display_sync(dev);
}
-static int nv50_display_disable(struct drm_device *dev)
+void
+nv50_display_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
struct nouveau_channel *evo = disp->master;
struct drm_crtc *drm_crtc;
@@ -270,18 +316,10 @@ static int nv50_display_disable(struct drm_device *dev)
/* disable interrupts. */
nv_wr32(dev, NV50_PDISPLAY_INTR_EN_1, 0x00000000);
-
- /* disable hotplug interrupts */
- nv_wr32(dev, 0xe054, 0xffffffff);
- nv_wr32(dev, 0xe050, 0x00000000);
- if (dev_priv->chipset >= 0x90) {
- nv_wr32(dev, 0xe074, 0xffffffff);
- nv_wr32(dev, 0xe070, 0x00000000);
- }
- return 0;
}
-int nv50_display_create(struct drm_device *dev)
+int
+nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct dcb_table *dcb = &dev_priv->vbios.dcb;
@@ -341,7 +379,7 @@ int nv50_display_create(struct drm_device *dev)
tasklet_init(&priv->tasklet, nv50_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nv50_display_isr);
- ret = nv50_display_init(dev);
+ ret = nv50_evo_create(dev);
if (ret) {
nv50_display_destroy(dev);
return ret;
@@ -357,7 +395,7 @@ nv50_display_destroy(struct drm_device *dev)
NV_DEBUG_KMS(dev, "\n");
- nv50_display_disable(dev);
+ nv50_evo_destroy(dev);
nouveau_irq_unregister(dev, 26);
kfree(disp);
}
@@ -521,7 +559,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcb,
} else {
/* determine number of lvds links */
if (nv_connector && nv_connector->edid &&
- nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+ nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
/* http://www.spwg.org */
if (((u8 *)nv_connector->edid)[121] == 2)
script |= 0x0100;
@@ -722,8 +760,8 @@ nv50_display_unk20_handler(struct drm_device *dev)
if (crtc >= 0) {
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(crtc, CLOCK));
pclk &= 0x003fffff;
-
- nv50_crtc_set_clock(dev, crtc, pclk);
+ if (pclk)
+ nv50_crtc_set_clock(dev, crtc, pclk);
tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(crtc));
tmp &= ~0x000000f;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.h b/drivers/gpu/drm/nouveau/nv50_display.h
index c2da503..95874f7 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.h
+++ b/drivers/gpu/drm/nouveau/nv50_display.h
@@ -69,14 +69,18 @@ int nv50_display_early_init(struct drm_device *dev);
void nv50_display_late_takedown(struct drm_device *dev);
int nv50_display_create(struct drm_device *dev);
int nv50_display_init(struct drm_device *dev);
+void nv50_display_fini(struct drm_device *dev);
void nv50_display_destroy(struct drm_device *dev);
int nv50_crtc_blank(struct nouveau_crtc *, bool blank);
int nv50_crtc_set_clock(struct drm_device *, int head, int pclk);
+int nv50_display_sync(struct drm_device *);
int nv50_display_flip_next(struct drm_crtc *, struct drm_framebuffer *,
struct nouveau_channel *chan);
void nv50_display_flip_stop(struct drm_crtc *);
+int nv50_evo_create(struct drm_device *dev);
+void nv50_evo_destroy(struct drm_device *dev);
int nv50_evo_init(struct drm_device *dev);
void nv50_evo_fini(struct drm_device *dev);
void nv50_evo_dmaobj_init(struct nouveau_gpuobj *, u32 memtype, u64 base,
diff --git a/drivers/gpu/drm/nouveau/nv50_evo.c b/drivers/gpu/drm/nouveau/nv50_evo.c
index c99d975..9b962e9 100644
--- a/drivers/gpu/drm/nouveau/nv50_evo.c
+++ b/drivers/gpu/drm/nouveau/nv50_evo.c
@@ -218,7 +218,7 @@ nv50_evo_channel_fini(struct nouveau_channel *evo)
}
}
-static void
+void
nv50_evo_destroy(struct drm_device *dev)
{
struct nv50_display *disp = nv50_display(dev);
@@ -235,7 +235,7 @@ nv50_evo_destroy(struct drm_device *dev)
nv50_evo_channel_del(&disp->master);
}
-static int
+int
nv50_evo_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -388,12 +388,6 @@ nv50_evo_init(struct drm_device *dev)
struct nv50_display *disp = nv50_display(dev);
int ret, i;
- if (!disp->master) {
- ret = nv50_evo_create(dev);
- if (ret)
- return ret;
- }
-
ret = nv50_evo_channel_init(disp->master);
if (ret)
return ret;
@@ -420,6 +414,4 @@ nv50_evo_fini(struct drm_device *dev)
if (disp->master)
nv50_evo_channel_fini(disp->master);
-
- nv50_evo_destroy(dev);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index c34a074..3bc2a56 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -230,6 +230,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
+ uint64_t ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
unsigned long flags;
int ret;
@@ -280,8 +281,9 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
nv_wo32(ramfc, 0x7c, 0x30000001);
nv_wo32(ramfc, 0x78, 0x00000000);
nv_wo32(ramfc, 0x3c, 0x403f6078);
- nv_wo32(ramfc, 0x50, chan->pushbuf_base + chan->dma.ib_base * 4);
- nv_wo32(ramfc, 0x54, drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
if (dev_priv->chipset != 0x50) {
nv_wo32(chan->ramin, 0, chan->id);
diff --git a/drivers/gpu/drm/nouveau/nv50_gpio.c b/drivers/gpu/drm/nouveau/nv50_gpio.c
index 793a5cc..f429e6a 100644
--- a/drivers/gpu/drm/nouveau/nv50_gpio.c
+++ b/drivers/gpu/drm/nouveau/nv50_gpio.c
@@ -25,229 +25,95 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_hw.h"
+#include "nouveau_gpio.h"
#include "nv50_display.h"
-static void nv50_gpio_isr(struct drm_device *dev);
-static void nv50_gpio_isr_bh(struct work_struct *work);
-
-struct nv50_gpio_priv {
- struct list_head handlers;
- spinlock_t lock;
-};
-
-struct nv50_gpio_handler {
- struct drm_device *dev;
- struct list_head head;
- struct work_struct work;
- bool inhibit;
-
- struct dcb_gpio_entry *gpio;
-
- void (*handler)(void *data, int state);
- void *data;
-};
-
static int
-nv50_gpio_location(struct dcb_gpio_entry *gpio, uint32_t *reg, uint32_t *shift)
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
{
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
- if (gpio->line >= 32)
+ if (line >= 32)
return -EINVAL;
- *reg = nv50_gpio_reg[gpio->line >> 3];
- *shift = (gpio->line & 7) << 2;
+ *reg = nv50_gpio_reg[line >> 3];
+ *shift = (line & 7) << 2;
return 0;
}
int
-nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
{
- struct dcb_gpio_entry *gpio;
- uint32_t r, s, v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg, shift;
- if (nv50_gpio_location(gpio, &r, &s))
+ if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- v = nv_rd32(dev, r) >> (s + 2);
- return ((v & 1) == (gpio->state[1] & 1));
+ nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+ return 0;
}
int
-nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nv50_gpio_sense(struct drm_device *dev, int line)
{
- struct dcb_gpio_entry *gpio;
- uint32_t r, s, v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg, shift;
- if (nv50_gpio_location(gpio, &r, &s))
+ if (nv50_gpio_location(line, &reg, &shift))
return -EINVAL;
- v = nv_rd32(dev, r) & ~(0x3 << s);
- v |= (gpio->state[state] ^ 2) << s;
- nv_wr32(dev, r, v);
- return 0;
+ return !!(nv_rd32(dev, reg) & (4 << shift));
}
-int
-nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag)
+void
+nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
{
- struct dcb_gpio_entry *gpio;
- u32 v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
+ u32 reg = line < 16 ? 0xe050 : 0xe070;
+ u32 mask = 0x00010001 << (line & 0xf);
- v = nv_rd32(dev, 0x00d610 + (gpio->line * 4));
- v &= 0x00004000;
- return (!!v == (gpio->state[1] & 1));
+ nv_wr32(dev, reg + 4, mask);
+ nv_mask(dev, reg + 0, mask, on ? mask : 0);
}
int
-nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state)
+nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
{
- struct dcb_gpio_entry *gpio;
- u32 v;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
-
- v = gpio->state[state] ^ 2;
-
- nv_mask(dev, 0x00d610 + (gpio->line * 4), 0x00003000, v << 12);
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
+ nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
return 0;
}
int
-nv50_gpio_irq_register(struct drm_device *dev, enum dcb_gpio_tag tag,
- void (*handler)(void *, int), void *data)
+nvd0_gpio_sense(struct drm_device *dev, int line)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh;
- struct dcb_gpio_entry *gpio;
- unsigned long flags;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return -ENOENT;
-
- gpioh = kzalloc(sizeof(*gpioh), GFP_KERNEL);
- if (!gpioh)
- return -ENOMEM;
-
- INIT_WORK(&gpioh->work, nv50_gpio_isr_bh);
- gpioh->dev = dev;
- gpioh->gpio = gpio;
- gpioh->handler = handler;
- gpioh->data = data;
-
- spin_lock_irqsave(&priv->lock, flags);
- list_add(&gpioh->head, &priv->handlers);
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
+ return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
}
-void
-nv50_gpio_irq_unregister(struct drm_device *dev, enum dcb_gpio_tag tag,
- void (*handler)(void *, int), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh, *tmp;
- struct dcb_gpio_entry *gpio;
- LIST_HEAD(tofree);
- unsigned long flags;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return;
-
- spin_lock_irqsave(&priv->lock, flags);
- list_for_each_entry_safe(gpioh, tmp, &priv->handlers, head) {
- if (gpioh->gpio != gpio ||
- gpioh->handler != handler ||
- gpioh->data != data)
- continue;
- list_move(&gpioh->head, &tofree);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- list_for_each_entry_safe(gpioh, tmp, &tofree, head) {
- flush_work_sync(&gpioh->work);
- kfree(gpioh);
- }
-}
-
-bool
-nv50_gpio_irq_enable(struct drm_device *dev, enum dcb_gpio_tag tag, bool on)
-{
- struct dcb_gpio_entry *gpio;
- u32 reg, mask;
-
- gpio = nouveau_bios_gpio_entry(dev, tag);
- if (!gpio)
- return false;
-
- reg = gpio->line < 16 ? 0xe050 : 0xe070;
- mask = 0x00010001 << (gpio->line & 0xf);
-
- nv_wr32(dev, reg + 4, mask);
- reg = nv_mask(dev, reg + 0, mask, on ? mask : 0);
- return (reg & mask) == mask;
-}
-
-static int
-nv50_gpio_create(struct drm_device *dev)
+static void
+nv50_gpio_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo;
- INIT_LIST_HEAD(&priv->handlers);
- spin_lock_init(&priv->lock);
- pgpio->priv = priv;
- return 0;
-}
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-static void
-nv50_gpio_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ nouveau_gpio_isr(dev, 0, hi | lo);
- kfree(pgpio->priv);
- pgpio->priv = NULL;
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
}
int
nv50_gpio_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int ret;
-
- if (!pgpio->priv) {
- ret = nv50_gpio_create(dev);
- if (ret)
- return ret;
- }
/* disable, and ack any pending gpio interrupts */
nv_wr32(dev, 0xe050, 0x00000000);
@@ -270,64 +136,4 @@ nv50_gpio_fini(struct drm_device *dev)
if (dev_priv->chipset >= 0x90)
nv_wr32(dev, 0xe070, 0x00000000);
nouveau_irq_unregister(dev, 21);
-
- nv50_gpio_destroy(dev);
-}
-
-static void
-nv50_gpio_isr_bh(struct work_struct *work)
-{
- struct nv50_gpio_handler *gpioh =
- container_of(work, struct nv50_gpio_handler, work);
- struct drm_nouveau_private *dev_priv = gpioh->dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- unsigned long flags;
- int state;
-
- state = pgpio->get(gpioh->dev, gpioh->gpio->tag);
- if (state < 0)
- return;
-
- gpioh->handler(gpioh->data, state);
-
- spin_lock_irqsave(&priv->lock, flags);
- gpioh->inhibit = false;
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void
-nv50_gpio_isr(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct nv50_gpio_priv *priv = pgpio->priv;
- struct nv50_gpio_handler *gpioh;
- u32 intr0, intr1 = 0;
- u32 hi, lo, ch;
-
- intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-
- hi = (intr0 & 0x0000ffff) | (intr1 << 16);
- lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- ch = hi | lo;
-
- nv_wr32(dev, 0xe054, intr0);
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, intr1);
-
- spin_lock(&priv->lock);
- list_for_each_entry(gpioh, &priv->handlers, head) {
- if (!(ch & (1 << gpioh->gpio->line)))
- continue;
-
- if (gpioh->inhibit)
- continue;
- gpioh->inhibit = true;
-
- schedule_work(&gpioh->work);
- }
- spin_unlock(&priv->lock);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index ac601f7..33d5711 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -616,9 +616,9 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
}
break;
case 7: /* MP error */
- if (ustatus & 0x00010000) {
+ if (ustatus & 0x04030000) {
nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x00010000;
+ ustatus &= ~0x04030000;
}
break;
case 8: /* TPDMA error */
diff --git a/drivers/gpu/drm/nouveau/nv50_pm.c b/drivers/gpu/drm/nouveau/nv50_pm.c
index 3d5a86b..ec5481d 100644
--- a/drivers/gpu/drm/nouveau/nv50_pm.c
+++ b/drivers/gpu/drm/nouveau/nv50_pm.c
@@ -25,122 +25,745 @@
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_bios.h"
+#include "nouveau_hw.h"
#include "nouveau_pm.h"
+#include "nouveau_hwsq.h"
-struct nv50_pm_state {
- struct nouveau_pm_level *perflvl;
- struct pll_lims pll;
- enum pll_types type;
- int N, M, P;
+enum clk_src {
+ clk_src_crystal,
+ clk_src_href,
+ clk_src_hclk,
+ clk_src_hclkm3,
+ clk_src_hclkm3d2,
+ clk_src_host,
+ clk_src_nvclk,
+ clk_src_sclk,
+ clk_src_mclk,
+ clk_src_vdec,
+ clk_src_dom6
};
+static u32 read_clk(struct drm_device *, enum clk_src);
+
+static u32
+read_div(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50: /* it exists, but only has bit 31, not the dividers.. */
+ case 0x84:
+ case 0x86:
+ case 0x98:
+ case 0xa0:
+ return nv_rd32(dev, 0x004700);
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ return nv_rd32(dev, 0x004800);
+ default:
+ return 0x00000000;
+ }
+}
+
+static u32
+read_pll_src(struct drm_device *dev, u32 base)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 coef, ref = read_clk(dev, clk_src_crystal);
+ u32 rsel = nv_rd32(dev, 0x00e18c);
+ int P, N, M, id;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0xa0:
+ switch (base) {
+ case 0x4020:
+ case 0x4028: id = !!(rsel & 0x00000004); break;
+ case 0x4008: id = !!(rsel & 0x00000008); break;
+ case 0x4030: id = 0; break;
+ default:
+ NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ coef = nv_rd32(dev, 0x00e81c + (id * 0x0c));
+ ref *= (coef & 0x01000000) ? 2 : 4;
+ P = (coef & 0x00070000) >> 16;
+ N = ((coef & 0x0000ff00) >> 8) + 1;
+ M = ((coef & 0x000000ff) >> 0) + 1;
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ coef = nv_rd32(dev, 0x00e81c);
+ P = (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ rsel = nv_rd32(dev, 0x00c050);
+ switch (base) {
+ case 0x4020: rsel = (rsel & 0x00000003) >> 0; break;
+ case 0x4008: rsel = (rsel & 0x0000000c) >> 2; break;
+ case 0x4028: rsel = (rsel & 0x00001800) >> 11; break;
+ case 0x4030: rsel = 3; break;
+ default:
+ NV_ERROR(dev, "ref: bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ switch (rsel) {
+ case 0: id = 1; break;
+ case 1: return read_clk(dev, clk_src_crystal);
+ case 2: return read_clk(dev, clk_src_href);
+ case 3: id = 0; break;
+ }
+
+ coef = nv_rd32(dev, 0x00e81c + (id * 0x28));
+ P = (nv_rd32(dev, 0x00e824 + (id * 0x28)) >> 16) & 7;
+ P += (coef & 0x00070000) >> 16;
+ N = (coef & 0x0000ff00) >> 8;
+ M = (coef & 0x000000ff) >> 0;
+ break;
+ default:
+ BUG_ON(1);
+ }
+
+ if (M)
+ return (ref * N / M) >> P;
+ return 0;
+}
+
+static u32
+read_pll_ref(struct drm_device *dev, u32 base)
+{
+ u32 src, mast = nv_rd32(dev, 0x00c040);
+
+ switch (base) {
+ case 0x004028:
+ src = !!(mast & 0x00200000);
+ break;
+ case 0x004020:
+ src = !!(mast & 0x00400000);
+ break;
+ case 0x004008:
+ src = !!(mast & 0x00010000);
+ break;
+ case 0x004030:
+ src = !!(mast & 0x02000000);
+ break;
+ case 0x00e810:
+ return read_clk(dev, clk_src_crystal);
+ default:
+ NV_ERROR(dev, "bad pll 0x%06x\n", base);
+ return 0;
+ }
+
+ if (src)
+ return read_clk(dev, clk_src_href);
+ return read_pll_src(dev, base);
+}
+
+static u32
+read_pll(struct drm_device *dev, u32 base)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 ctrl = nv_rd32(dev, base + 0);
+ u32 coef = nv_rd32(dev, base + 4);
+ u32 ref = read_pll_ref(dev, base);
+ u32 clk = 0;
+ int N1, N2, M1, M2;
+
+ if (base == 0x004028 && (mast & 0x00100000)) {
+ /* wtf, appears to only disable post-divider on nva0 */
+ if (dev_priv->chipset != 0xa0)
+ return read_clk(dev, clk_src_dom6);
+ }
+
+ N2 = (coef & 0xff000000) >> 24;
+ M2 = (coef & 0x00ff0000) >> 16;
+ N1 = (coef & 0x0000ff00) >> 8;
+ M1 = (coef & 0x000000ff);
+ if ((ctrl & 0x80000000) && M1) {
+ clk = ref * N1 / M1;
+ if ((ctrl & 0x40000100) == 0x40000000) {
+ if (M2)
+ clk = clk * N2 / M2;
+ else
+ clk = 0;
+ }
+ }
+
+ return clk;
+}
+
+static u32
+read_clk(struct drm_device *dev, enum clk_src src)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 P = 0;
+
+ switch (src) {
+ case clk_src_crystal:
+ return dev_priv->crystal;
+ case clk_src_href:
+ return 100000; /* PCIE reference clock */
+ case clk_src_hclk:
+ return read_clk(dev, clk_src_href) * 27778 / 10000;
+ case clk_src_hclkm3:
+ return read_clk(dev, clk_src_hclk) * 3;
+ case clk_src_hclkm3d2:
+ return read_clk(dev, clk_src_hclk) * 3 / 2;
+ case clk_src_host:
+ switch (mast & 0x30000000) {
+ case 0x00000000: return read_clk(dev, clk_src_href);
+ case 0x10000000: break;
+ case 0x20000000: /* !0x50 */
+ case 0x30000000: return read_clk(dev, clk_src_hclk);
+ }
+ break;
+ case clk_src_nvclk:
+ if (!(mast & 0x00100000))
+ P = (nv_rd32(dev, 0x004028) & 0x00070000) >> 16;
+ switch (mast & 0x00000003) {
+ case 0x00000000: return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000001: return read_clk(dev, clk_src_dom6);
+ case 0x00000002: return read_pll(dev, 0x004020) >> P;
+ case 0x00000003: return read_pll(dev, 0x004028) >> P;
+ }
+ break;
+ case clk_src_sclk:
+ P = (nv_rd32(dev, 0x004020) & 0x00070000) >> 16;
+ switch (mast & 0x00000030) {
+ case 0x00000000:
+ if (mast & 0x00000080)
+ return read_clk(dev, clk_src_host) >> P;
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000010: break;
+ case 0x00000020: return read_pll(dev, 0x004028) >> P;
+ case 0x00000030: return read_pll(dev, 0x004020) >> P;
+ }
+ break;
+ case clk_src_mclk:
+ P = (nv_rd32(dev, 0x004008) & 0x00070000) >> 16;
+ if (nv_rd32(dev, 0x004008) & 0x00000200) {
+ switch (mast & 0x0000c000) {
+ case 0x00000000:
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00008000:
+ case 0x0000c000:
+ return read_clk(dev, clk_src_href) >> P;
+ }
+ } else {
+ return read_pll(dev, 0x004008) >> P;
+ }
+ break;
+ case clk_src_vdec:
+ P = (read_div(dev) & 0x00000700) >> 8;
+ switch (dev_priv->chipset) {
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa0:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ if (dev_priv->chipset == 0xa0) /* wtf?? */
+ return read_clk(dev, clk_src_nvclk) >> P;
+ return read_clk(dev, clk_src_crystal) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ if (mast & 0x01000000)
+ return read_pll(dev, 0x004028) >> P;
+ return read_pll(dev, 0x004030) >> P;
+ case 0x00000c00:
+ return read_clk(dev, clk_src_nvclk) >> P;
+ }
+ break;
+ case 0x98:
+ switch (mast & 0x00000c00) {
+ case 0x00000000:
+ return read_clk(dev, clk_src_nvclk) >> P;
+ case 0x00000400:
+ return 0;
+ case 0x00000800:
+ return read_clk(dev, clk_src_hclkm3d2) >> P;
+ case 0x00000c00:
+ return read_clk(dev, clk_src_mclk) >> P;
+ }
+ break;
+ }
+ break;
+ case clk_src_dom6:
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0xa0:
+ return read_pll(dev, 0x00e810) >> 2;
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ P = (read_div(dev) & 0x00000007) >> 0;
+ switch (mast & 0x0c000000) {
+ case 0x00000000: return read_clk(dev, clk_src_href);
+ case 0x04000000: break;
+ case 0x08000000: return read_clk(dev, clk_src_hclk);
+ case 0x0c000000:
+ return read_clk(dev, clk_src_hclkm3) >> P;
+ }
+ break;
+ default:
+ break;
+ }
+ default:
+ break;
+ }
+
+ NV_DEBUG(dev, "unknown clock source %d 0x%08x\n", src, mast);
+ return 0;
+}
+
int
-nv50_pm_clock_get(struct drm_device *dev, u32 id)
+nv50_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct pll_lims pll;
- int P, N, M, ret;
- u32 reg0, reg1;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
+ return 0;
- ret = get_pll_limits(dev, id, &pll);
+ perflvl->core = read_clk(dev, clk_src_nvclk);
+ perflvl->shader = read_clk(dev, clk_src_sclk);
+ perflvl->memory = read_clk(dev, clk_src_mclk);
+ if (dev_priv->chipset != 0x50) {
+ perflvl->vdec = read_clk(dev, clk_src_vdec);
+ perflvl->dom6 = read_clk(dev, clk_src_dom6);
+ }
+
+ return 0;
+}
+
+struct nv50_pm_state {
+ struct hwsq_ucode mclk_hwsq;
+ u32 mscript;
+
+ u32 emast;
+ u32 nctrl;
+ u32 ncoef;
+ u32 sctrl;
+ u32 scoef;
+
+ u32 amast;
+ u32 pdivs;
+};
+
+static u32
+calc_pll(struct drm_device *dev, u32 reg, struct pll_lims *pll,
+ u32 clk, int *N1, int *M1, int *log2P)
+{
+ struct nouveau_pll_vals coef;
+ int ret;
+
+ ret = get_pll_limits(dev, reg, pll);
if (ret)
- return ret;
+ return 0;
+
+ pll->vco2.maxfreq = 0;
+ pll->refclk = read_pll_ref(dev, reg);
+ if (!pll->refclk)
+ return 0;
- reg0 = nv_rd32(dev, pll.reg + 0);
- reg1 = nv_rd32(dev, pll.reg + 4);
+ ret = nouveau_calc_pll_mnp(dev, pll, clk, &coef);
+ if (ret == 0)
+ return 0;
- if ((reg0 & 0x80000000) == 0) {
- if (id == PLL_SHADER) {
- NV_DEBUG(dev, "Shader PLL is disabled. "
- "Shader clock is twice the core\n");
- ret = nv50_pm_clock_get(dev, PLL_CORE);
- if (ret > 0)
- return ret << 1;
- } else if (id == PLL_MEMORY) {
- NV_DEBUG(dev, "Memory PLL is disabled. "
- "Memory clock is equal to the ref_clk\n");
- return pll.refclk;
+ *N1 = coef.N1;
+ *M1 = coef.M1;
+ *log2P = coef.log2P;
+ return ret;
+}
+
+static inline u32
+calc_div(u32 src, u32 target, int *div)
+{
+ u32 clk0 = src, clk1 = src;
+ for (*div = 0; *div <= 7; (*div)++) {
+ if (clk0 <= target) {
+ clk1 = clk0 << (*div ? 1 : 0);
+ break;
}
+ clk0 >>= 1;
+ }
+
+ if (target - clk0 <= clk1 - target)
+ return clk0;
+ (*div)--;
+ return clk1;
+}
+
+static inline u32
+clk_same(u32 a, u32 b)
+{
+ return ((a / 1000) == (b / 1000));
+}
+
+static int
+calc_mclk(struct drm_device *dev, u32 freq, struct hwsq_ucode *hwsq)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pll_lims pll;
+ u32 mast = nv_rd32(dev, 0x00c040);
+ u32 ctrl = nv_rd32(dev, 0x004008);
+ u32 coef = nv_rd32(dev, 0x00400c);
+ u32 orig = ctrl;
+ u32 crtc_mask = 0;
+ int N, M, P;
+ int ret, i;
+
+ /* use pcie refclock if possible, otherwise use mpll */
+ ctrl &= ~0x81ff0200;
+ if (clk_same(freq, read_clk(dev, clk_src_href))) {
+ ctrl |= 0x00000200 | (pll.log2p_bias << 19);
+ } else {
+ ret = calc_pll(dev, 0x4008, &pll, freq, &N, &M, &P);
+ if (ret == 0)
+ return -EINVAL;
+
+ ctrl |= 0x80000000 | (P << 22) | (P << 16);
+ ctrl |= pll.log2p_bias << 19;
+ coef = (N << 8) | M;
+ }
+
+ mast &= ~0xc0000000; /* get MCLK_2 from HREF */
+ mast |= 0x0000c000; /* use MCLK_2 as MPLL_BYPASS clock */
+
+ /* determine active crtcs */
+ for (i = 0; i < 2; i++) {
+ if (nv_rd32(dev, NV50_PDISPLAY_CRTC_C(i, CLOCK)))
+ crtc_mask |= (1 << i);
+ }
+
+ /* build the ucode which will reclock the memory for us */
+ hwsq_init(hwsq);
+ if (crtc_mask) {
+ hwsq_op5f(hwsq, crtc_mask, 0x00); /* wait for scanout */
+ hwsq_op5f(hwsq, crtc_mask, 0x01); /* wait for vblank */
}
+ if (dev_priv->chipset >= 0x92)
+ hwsq_wr32(hwsq, 0x611200, 0x00003300); /* disable scanout */
+ hwsq_setf(hwsq, 0x10, 0); /* disable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x01); /* no idea :s */
+
+ /* prepare memory controller */
+ hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+ hwsq_wr32(hwsq, 0x1002d0, 0x00000001); /* force refresh */
+ hwsq_wr32(hwsq, 0x100210, 0x00000000); /* stop the automatic refresh */
+ hwsq_wr32(hwsq, 0x1002dc, 0x00000001); /* start self refresh mode */
- P = (reg0 & 0x00070000) >> 16;
- N = (reg1 & 0x0000ff00) >> 8;
- M = (reg1 & 0x000000ff);
+ /* reclock memory */
+ hwsq_wr32(hwsq, 0xc040, mast);
+ hwsq_wr32(hwsq, 0x4008, orig | 0x00000200); /* bypass MPLL */
+ hwsq_wr32(hwsq, 0x400c, coef);
+ hwsq_wr32(hwsq, 0x4008, ctrl);
- return ((pll.refclk * N / M) >> P);
+ /* restart memory controller */
+ hwsq_wr32(hwsq, 0x1002d4, 0x00000001); /* precharge banks and idle */
+ hwsq_wr32(hwsq, 0x1002dc, 0x00000000); /* stop self refresh mode */
+ hwsq_wr32(hwsq, 0x100210, 0x80000000); /* restart automatic refresh */
+ hwsq_usec(hwsq, 12); /* wait for the PLL to stabilize */
+
+ hwsq_usec(hwsq, 48); /* may be unnecessary: causes flickering */
+ hwsq_setf(hwsq, 0x10, 1); /* enable bus access */
+ hwsq_op5f(hwsq, 0x00, 0x00); /* no idea, reverse of 0x00, 0x01? */
+ if (dev_priv->chipset >= 0x92)
+ hwsq_wr32(hwsq, 0x611200, 0x00003330); /* enable scanout */
+ hwsq_fini(hwsq);
+ return 0;
}
void *
-nv50_pm_clock_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl,
- u32 id, int khz)
+nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
- struct nv50_pm_state *state;
- int dummy, ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_pm_state *info;
+ struct pll_lims pll;
+ int clk, ret = -EINVAL;
+ int N, M, P1, P2;
+ u32 out;
- state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (!state)
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac)
+ return ERR_PTR(-ENODEV);
+
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
return ERR_PTR(-ENOMEM);
- state->type = id;
- state->perflvl = perflvl;
- ret = get_pll_limits(dev, id, &state->pll);
- if (ret < 0) {
- kfree(state);
- return (ret == -ENOENT) ? NULL : ERR_PTR(ret);
+ /* core: for the moment at least, always use nvpll */
+ clk = calc_pll(dev, 0x4028, &pll, perflvl->core, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+
+ info->emast = 0x00000003;
+ info->nctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+ info->ncoef = (N << 8) | M;
+
+ /* shader: tie to nvclk if possible, otherwise use spll. have to be
+ * very careful that the shader clock is at least twice the core, or
+ * some chipsets will be very unhappy. i expect most or all of these
+ * cases will be handled by tying to nvclk, but it's possible there's
+ * corners
+ */
+ if (P1-- && perflvl->shader == (perflvl->core << 1)) {
+ info->emast |= 0x00000020;
+ info->sctrl = 0x00000000 | (P1 << 19) | (P1 << 16);
+ info->scoef = nv_rd32(dev, 0x004024);
+ } else {
+ clk = calc_pll(dev, 0x4020, &pll, perflvl->shader, &N, &M, &P1);
+ if (clk == 0)
+ goto error;
+
+ info->emast |= 0x00000030;
+ info->sctrl = 0x80000000 | (P1 << 19) | (P1 << 16);
+ info->scoef = (N << 8) | M;
+ }
+
+ /* memory: build hwsq ucode which we'll use to reclock memory */
+ info->mclk_hwsq.len = 0;
+ if (perflvl->memory) {
+ clk = calc_mclk(dev, perflvl->memory, &info->mclk_hwsq);
+ if (clk < 0) {
+ ret = clk;
+ goto error;
+ }
+
+ info->mscript = perflvl->memscript;
+ }
+
+ /* vdec: avoid modifying xpll until we know exactly how the other
+ * clock domains work, i suspect at least some of them can also be
+ * tied to xpll...
+ */
+ info->amast = nv_rd32(dev, 0x00c040);
+ info->pdivs = read_div(dev);
+ if (perflvl->vdec) {
+ /* see how close we can get using nvclk as a source */
+ clk = calc_div(perflvl->core, perflvl->vdec, &P1);
+
+ /* see how close we can get using xpll/hclk as a source */
+ if (dev_priv->chipset != 0x98)
+ out = read_pll(dev, 0x004030);
+ else
+ out = read_clk(dev, clk_src_hclkm3d2);
+ out = calc_div(out, perflvl->vdec, &P2);
+
+ /* select whichever gets us closest */
+ info->amast &= ~0x00000c00;
+ info->pdivs &= ~0x00000700;
+ if (abs((int)perflvl->vdec - clk) <=
+ abs((int)perflvl->vdec - out)) {
+ if (dev_priv->chipset != 0x98)
+ info->amast |= 0x00000c00;
+ info->pdivs |= P1 << 8;
+ } else {
+ info->amast |= 0x00000800;
+ info->pdivs |= P2 << 8;
+ }
+ }
+
+ /* dom6: nfi what this is, but we're limited to various combinations
+ * of the host clock frequency
+ */
+ if (perflvl->dom6) {
+ info->amast &= ~0x0c000000;
+ if (clk_same(perflvl->dom6, read_clk(dev, clk_src_href))) {
+ info->amast |= 0x00000000;
+ } else
+ if (clk_same(perflvl->dom6, read_clk(dev, clk_src_hclk))) {
+ info->amast |= 0x08000000;
+ } else {
+ clk = read_clk(dev, clk_src_hclk) * 3;
+ clk = calc_div(clk, perflvl->dom6, &P1);
+
+ info->amast |= 0x0c000000;
+ info->pdivs = (info->pdivs & ~0x00000007) | P1;
+ }
}
- ret = nv50_calc_pll(dev, &state->pll, khz, &state->N, &state->M,
- &dummy, &dummy, &state->P);
- if (ret < 0) {
- kfree(state);
- return ERR_PTR(ret);
+ return info;
+error:
+ kfree(info);
+ return ERR_PTR(ret);
+}
+
+static int
+prog_mclk(struct drm_device *dev, struct hwsq_ucode *hwsq)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 hwsq_data, hwsq_kick;
+ int i;
+
+ if (dev_priv->chipset < 0x90) {
+ hwsq_data = 0x001400;
+ hwsq_kick = 0x00000003;
+ } else {
+ hwsq_data = 0x080000;
+ hwsq_kick = 0x00000001;
}
- return state;
+ /* upload hwsq ucode */
+ nv_mask(dev, 0x001098, 0x00000008, 0x00000000);
+ nv_wr32(dev, 0x001304, 0x00000000);
+ for (i = 0; i < hwsq->len / 4; i++)
+ nv_wr32(dev, hwsq_data + (i * 4), hwsq->ptr.u32[i]);
+ nv_mask(dev, 0x001098, 0x00000018, 0x00000018);
+
+ /* launch, and wait for completion */
+ nv_wr32(dev, 0x00130c, hwsq_kick);
+ if (!nv_wait(dev, 0x001308, 0x00000100, 0x00000000)) {
+ NV_ERROR(dev, "hwsq ucode exec timed out\n");
+ NV_ERROR(dev, "0x001308: 0x%08x\n", nv_rd32(dev, 0x001308));
+ for (i = 0; i < hwsq->len / 4; i++) {
+ NV_ERROR(dev, "0x%06x: 0x%08x\n", 0x1400 + (i * 4),
+ nv_rd32(dev, 0x001400 + (i * 4)));
+ }
+
+ return -EIO;
+ }
+
+ return 0;
}
-void
-nv50_pm_clock_set(struct drm_device *dev, void *pre_state)
+int
+nv50_pm_clocks_set(struct drm_device *dev, void *data)
{
- struct nv50_pm_state *state = pre_state;
- struct nouveau_pm_level *perflvl = state->perflvl;
- u32 reg = state->pll.reg, tmp;
- struct bit_entry BIT_M;
- u16 script;
- int N = state->N;
- int M = state->M;
- int P = state->P;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_pm_state *info = data;
+ struct bit_entry M;
+ int ret = 0;
- if (state->type == PLL_MEMORY && perflvl->memscript &&
- bit_table(dev, 'M', &BIT_M) == 0 &&
- BIT_M.version == 1 && BIT_M.length >= 0x0b) {
- script = ROM16(BIT_M.data[0x05]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
- script = ROM16(BIT_M.data[0x07]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
- script = ROM16(BIT_M.data[0x09]);
- if (script)
- nouveau_bios_run_init_table(dev, script, NULL, -1);
+ /* halt and idle execution engines */
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
+ if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010))
+ goto error;
- nouveau_bios_run_init_table(dev, perflvl->memscript, NULL, -1);
+ /* memory: it is *very* important we change this first, the ucode
+ * we build in pre() now has hardcoded 0xc040 values, which can't
+ * change before we execute it or the engine clocks may end up
+ * messed up.
+ */
+ if (info->mclk_hwsq.len) {
+ /* execute some scripts that do ??? from the vbios.. */
+ if (!bit_table(dev, 'M', &M) && M.version == 1) {
+ if (M.length >= 6)
+ nouveau_bios_init_exec(dev, ROM16(M.data[5]));
+ if (M.length >= 8)
+ nouveau_bios_init_exec(dev, ROM16(M.data[7]));
+ if (M.length >= 10)
+ nouveau_bios_init_exec(dev, ROM16(M.data[9]));
+ nouveau_bios_init_exec(dev, info->mscript);
+ }
+
+ ret = prog_mclk(dev, &info->mclk_hwsq);
+ if (ret)
+ goto resume;
}
- if (state->type == PLL_MEMORY) {
- nv_wr32(dev, 0x100210, 0);
- nv_wr32(dev, 0x1002dc, 1);
+ /* reclock vdec/dom6 */
+ nv_mask(dev, 0x00c040, 0x00000c00, 0x00000000);
+ switch (dev_priv->chipset) {
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ nv_mask(dev, 0x004800, 0x00000707, info->pdivs);
+ break;
+ default:
+ nv_mask(dev, 0x004700, 0x00000707, info->pdivs);
+ break;
}
+ nv_mask(dev, 0x00c040, 0x0c000c00, info->amast);
- tmp = nv_rd32(dev, reg + 0) & 0xfff8ffff;
- tmp |= 0x80000000 | (P << 16);
- nv_wr32(dev, reg + 0, tmp);
- nv_wr32(dev, reg + 4, (N << 8) | M);
+ /* core/shader: make sure sclk/nvclk are disconnected from their
+ * plls (nvclk to dom6, sclk to hclk), modify the plls, and
+ * reconnect sclk/nvclk to their new clock source
+ */
+ if (dev_priv->chipset < 0x92)
+ nv_mask(dev, 0x00c040, 0x001000b0, 0x00100080); /* grrr! */
+ else
+ nv_mask(dev, 0x00c040, 0x000000b3, 0x00000081);
+ nv_mask(dev, 0x004020, 0xc03f0100, info->sctrl);
+ nv_wr32(dev, 0x004024, info->scoef);
+ nv_mask(dev, 0x004028, 0xc03f0100, info->nctrl);
+ nv_wr32(dev, 0x00402c, info->ncoef);
+ nv_mask(dev, 0x00c040, 0x00100033, info->emast);
+
+ goto resume;
+error:
+ ret = -EBUSY;
+resume:
+ nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
+ kfree(info);
+ return ret;
+}
- if (state->type == PLL_MEMORY) {
- nv_wr32(dev, 0x1002dc, 0);
- nv_wr32(dev, 0x100210, 0x80000000);
+static int
+pwm_info(struct drm_device *dev, int *line, int *ctrl, int *indx)
+{
+ if (*line == 0x04) {
+ *ctrl = 0x00e100;
+ *line = 4;
+ *indx = 0;
+ } else
+ if (*line == 0x09) {
+ *ctrl = 0x00e100;
+ *line = 9;
+ *indx = 1;
+ } else
+ if (*line == 0x10) {
+ *ctrl = 0x00e28c;
+ *line = 0;
+ *indx = 0;
+ } else {
+ NV_ERROR(dev, "unknown pwm ctrl for gpio %d\n", *line);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+int
+nv50_pm_pwm_get(struct drm_device *dev, int line, u32 *divs, u32 *duty)
+{
+ int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+ if (ret)
+ return ret;
+
+ if (nv_rd32(dev, ctrl) & (1 << line)) {
+ *divs = nv_rd32(dev, 0x00e114 + (id * 8));
+ *duty = nv_rd32(dev, 0x00e118 + (id * 8));
+ return 0;
}
- kfree(state);
+ return -EINVAL;
}
+int
+nv50_pm_pwm_set(struct drm_device *dev, int line, u32 divs, u32 duty)
+{
+ int ctrl, id, ret = pwm_info(dev, &line, &ctrl, &id);
+ if (ret)
+ return ret;
+
+ nv_mask(dev, ctrl, 0x00010001 << line, 0x00000001 << line);
+ nv_wr32(dev, 0x00e114 + (id * 8), divs);
+ nv_wr32(dev, 0x00e118 + (id * 8), duty | 0x80000000);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c
index 2633aa8..c4423ba 100644
--- a/drivers/gpu/drm/nouveau/nv50_sor.c
+++ b/drivers/gpu/drm/nouveau/nv50_sor.c
@@ -60,6 +60,8 @@ nv50_sor_disconnect(struct drm_encoder *encoder)
BEGIN_RING(evo, 0, NV50_EVO_UPDATE, 1);
OUT_RING (evo, 0);
+ nouveau_hdmi_mode_set(encoder, NULL);
+
nv_encoder->crtc = NULL;
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
@@ -172,6 +174,12 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
static void
nv50_sor_prepare(struct drm_encoder *encoder)
{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ nv50_sor_disconnect(encoder);
+ if (nv_encoder->dcb->type == OUTPUT_DP) {
+ /* avoid race between link training and supervisor intr */
+ nv50_display_sync(encoder->dev);
+ }
}
static void
@@ -180,8 +188,8 @@ nv50_sor_commit(struct drm_encoder *encoder)
}
static void
-nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
+ struct drm_display_mode *mode)
{
struct nouveau_channel *evo = nv50_display(encoder->dev)->master;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
@@ -193,24 +201,27 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
NV_DEBUG_KMS(dev, "or %d type %d -> crtc %d\n",
nv_encoder->or, nv_encoder->dcb->type, crtc->index);
+ nv_encoder->crtc = encoder->crtc;
switch (nv_encoder->dcb->type) {
case OUTPUT_TMDS:
if (nv_encoder->dcb->sorconf.link & 1) {
- if (adjusted_mode->clock < 165000)
+ if (mode->clock < 165000)
mode_ctl = 0x0100;
else
mode_ctl = 0x0500;
} else
mode_ctl = 0x0200;
+
+ nouveau_hdmi_mode_set(encoder, mode);
break;
case OUTPUT_DP:
nv_connector = nouveau_encoder_connector_get(nv_encoder);
if (nv_connector && nv_connector->base.display_info.bpc == 6) {
- nv_encoder->dp.datarate = crtc->mode->clock * 18 / 8;
+ nv_encoder->dp.datarate = mode->clock * 18 / 8;
mode_ctl |= 0x00020000;
} else {
- nv_encoder->dp.datarate = crtc->mode->clock * 24 / 8;
+ nv_encoder->dp.datarate = mode->clock * 24 / 8;
mode_ctl |= 0x00050000;
}
@@ -228,10 +239,10 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_CRTC0;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NHSYNC;
- if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
mode_ctl |= NV50_EVO_SOR_MODE_CTRL_NVSYNC;
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
@@ -239,12 +250,11 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
ret = RING_SPACE(evo, 2);
if (ret) {
NV_ERROR(dev, "no space while connecting SOR\n");
+ nv_encoder->crtc = NULL;
return;
}
BEGIN_RING(evo, 0, NV50_EVO_SOR(nv_encoder->or, MODE_CTRL), 1);
OUT_RING(evo, mode_ctl);
-
- nv_encoder->crtc = encoder->crtc;
}
static struct drm_crtc *
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 40b84f2..6f38cea 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -48,7 +48,7 @@ nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
phys |= 0x60;
else if (coverage <= 64 * 1024 * 1024)
phys |= 0x40;
- else if (coverage < 128 * 1024 * 1024)
+ else if (coverage <= 128 * 1024 * 1024)
phys |= 0x20;
}
diff --git a/drivers/gpu/drm/nouveau/nv84_bsp.c b/drivers/gpu/drm/nouveau/nv84_bsp.c
new file mode 100644
index 0000000..7487573
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_bsp.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_bsp.c...
+ */
+
+struct nv84_bsp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00008000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_bsp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
+ return 0;
+}
+
+static void
+nv84_bsp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, BSP);
+
+ kfree(pbsp);
+}
+
+int
+nv84_bsp_create(struct drm_device *dev)
+{
+ struct nv84_bsp_engine *pbsp;
+
+ pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
+ if (!pbsp)
+ return -ENOMEM;
+
+ pbsp->base.destroy = nv84_bsp_destroy;
+ pbsp->base.init = nv84_bsp_init;
+ pbsp->base.fini = nv84_bsp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv84_vp.c b/drivers/gpu/drm/nouveau/nv84_vp.c
new file mode 100644
index 0000000..6570d30
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv84_vp.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_vp.c...
+ */
+
+struct nv84_vp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00020000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_vp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
+ return 0;
+}
+
+static void
+nv84_vp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_vp_engine *pvp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, VP);
+
+ kfree(pvp);
+}
+
+int
+nv84_vp_create(struct drm_device *dev)
+{
+ struct nv84_vp_engine *pvp;
+
+ pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
+ if (!pvp)
+ return -ENOMEM;
+
+ pvp->base.destroy = nv84_vp_destroy;
+ pvp->base.init = nv84_vp_init;
+ pvp->base.fini = nv84_vp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_crypt.c b/drivers/gpu/drm/nouveau/nv98_crypt.c
new file mode 100644
index 0000000..db94ff0
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_crypt.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_crypt_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00004000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_crypt_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+ return 0;
+}
+
+static void
+nv98_crypt_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_crypt_engine *pcrypt = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, CRYPT);
+
+ kfree(pcrypt);
+}
+
+int
+nv98_crypt_create(struct drm_device *dev)
+{
+ struct nv98_crypt_engine *pcrypt;
+
+ pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
+ if (!pcrypt)
+ return -ENOMEM;
+
+ pcrypt->base.destroy = nv98_crypt_destroy;
+ pcrypt->base.init = nv98_crypt_init;
+ pcrypt->base.fini = nv98_crypt_fini;
+
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nv98_ppp.c b/drivers/gpu/drm/nouveau/nv98_ppp.c
new file mode 100644
index 0000000..a987dd6
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv98_ppp.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include "nouveau_vm.h"
+#include "nouveau_ramht.h"
+
+struct nv98_ppp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00000002))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_ppp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+ return 0;
+}
+
+static void
+nv98_ppp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, PPP);
+
+ kfree(pppp);
+}
+
+int
+nv98_ppp_create(struct drm_device *dev)
+{
+ struct nv98_ppp_engine *pppp;
+
+ pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
+ if (!pppp)
+ return -ENOMEM;
+
+ pppp->base.destroy = nv98_ppp_destroy;
+ pppp->base.init = nv98_ppp_init;
+ pppp->base.fini = nv98_ppp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
index eaf35f8..abc3662 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
@@ -31,8 +31,9 @@
*/
ifdef(`NVA3',
-.section nva3_pcopy_data,
-.section nvc0_pcopy_data
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
)
ctx_object: .b32 0
@@ -42,7 +43,7 @@ ctx_dma_query: .b32 0
ctx_dma_src: .b32 0
ctx_dma_dst: .b32 0
,)
-.equ ctx_dma_count 3
+.equ #ctx_dma_count 3
ctx_query_address_high: .b32 0
ctx_query_address_low: .b32 0
ctx_query_counter: .b32 0
@@ -78,64 +79,65 @@ ctx_ycnt: .b32 0
dispatch_table:
// mthd 0x0000, NAME
.b16 0x000 1
-.b32 ctx_object ~0xffffffff
+.b32 #ctx_object ~0xffffffff
// mthd 0x0100, NOP
.b16 0x040 1
-.b32 0x00010000 + cmd_nop ~0xffffffff
+.b32 0x00010000 + #cmd_nop ~0xffffffff
// mthd 0x0140, PM_TRIGGER
.b16 0x050 1
-.b32 0x00010000 + cmd_pm_trigger ~0xffffffff
+.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
ifdef(`NVA3', `
// mthd 0x0180-0x018c, DMA_
-.b16 0x060 ctx_dma_count
+.b16 0x060 #ctx_dma_count
dispatch_dma:
-.b32 0x00010000 + cmd_dma ~0xffffffff
-.b32 0x00010000 + cmd_dma ~0xffffffff
-.b32 0x00010000 + cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
',)
// mthd 0x0200-0x0218, SRC_TILE
.b16 0x80 7
-.b32 ctx_src_tile_mode ~0x00000fff
-.b32 ctx_src_xsize ~0x0007ffff
-.b32 ctx_src_ysize ~0x00001fff
-.b32 ctx_src_zsize ~0x000007ff
-.b32 ctx_src_zoff ~0x00000fff
-.b32 ctx_src_xoff ~0x0007ffff
-.b32 ctx_src_yoff ~0x00001fff
+.b32 #ctx_src_tile_mode ~0x00000fff
+.b32 #ctx_src_xsize ~0x0007ffff
+.b32 #ctx_src_ysize ~0x00001fff
+.b32 #ctx_src_zsize ~0x000007ff
+.b32 #ctx_src_zoff ~0x00000fff
+.b32 #ctx_src_xoff ~0x0007ffff
+.b32 #ctx_src_yoff ~0x00001fff
// mthd 0x0220-0x0238, DST_TILE
.b16 0x88 7
-.b32 ctx_dst_tile_mode ~0x00000fff
-.b32 ctx_dst_xsize ~0x0007ffff
-.b32 ctx_dst_ysize ~0x00001fff
-.b32 ctx_dst_zsize ~0x000007ff
-.b32 ctx_dst_zoff ~0x00000fff
-.b32 ctx_dst_xoff ~0x0007ffff
-.b32 ctx_dst_yoff ~0x00001fff
+.b32 #ctx_dst_tile_mode ~0x00000fff
+.b32 #ctx_dst_xsize ~0x0007ffff
+.b32 #ctx_dst_ysize ~0x00001fff
+.b32 #ctx_dst_zsize ~0x000007ff
+.b32 #ctx_dst_zoff ~0x00000fff
+.b32 #ctx_dst_xoff ~0x0007ffff
+.b32 #ctx_dst_yoff ~0x00001fff
// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
.b16 0xc0 2
-.b32 0x00010000 + cmd_exec ~0xffffffff
-.b32 0x00010000 + cmd_wrcache_flush ~0xffffffff
+.b32 0x00010000 + #cmd_exec ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
// mthd 0x030c-0x0340, various stuff
.b16 0xc3 14
-.b32 ctx_src_address_high ~0x000000ff
-.b32 ctx_src_address_low ~0xfffffff0
-.b32 ctx_dst_address_high ~0x000000ff
-.b32 ctx_dst_address_low ~0xfffffff0
-.b32 ctx_src_pitch ~0x0007ffff
-.b32 ctx_dst_pitch ~0x0007ffff
-.b32 ctx_xcnt ~0x0000ffff
-.b32 ctx_ycnt ~0x00001fff
-.b32 ctx_format ~0x0333ffff
-.b32 ctx_swz_const0 ~0xffffffff
-.b32 ctx_swz_const1 ~0xffffffff
-.b32 ctx_query_address_high ~0x000000ff
-.b32 ctx_query_address_low ~0xffffffff
-.b32 ctx_query_counter ~0xffffffff
+.b32 #ctx_src_address_high ~0x000000ff
+.b32 #ctx_src_address_low ~0xfffffff0
+.b32 #ctx_dst_address_high ~0x000000ff
+.b32 #ctx_dst_address_low ~0xfffffff0
+.b32 #ctx_src_pitch ~0x0007ffff
+.b32 #ctx_dst_pitch ~0x0007ffff
+.b32 #ctx_xcnt ~0x0000ffff
+.b32 #ctx_ycnt ~0x00001fff
+.b32 #ctx_format ~0x0333ffff
+.b32 #ctx_swz_const0 ~0xffffffff
+.b32 #ctx_swz_const1 ~0xffffffff
+.b32 #ctx_query_address_high ~0x000000ff
+.b32 #ctx_query_address_low ~0xffffffff
+.b32 #ctx_query_counter ~0xffffffff
.b16 0x800 0
ifdef(`NVA3',
-.section nva3_pcopy_code,
-.section nvc0_pcopy_code
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
)
main:
@@ -143,12 +145,12 @@ main:
mov $sp $r0
// setup i0 handler and route fifo and ctxswitch to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
movw $r2 0xfff3
sethi $r2 0
- iowr I[$r2 + 0x300] $r2
+ iowr I[$r1 + 0x300] $r2
// enable interrupts
or $r2 0xc
@@ -164,19 +166,19 @@ main:
bset $flags $p0
spin:
sleep $p0
- bra spin
+ bra #spin
// i0 handler
ih:
iord $r1 I[$r0 + 0x200]
and $r2 $r1 0x00000008
- bra e ih_no_chsw
- call chsw
+ bra e #ih_no_chsw
+ call #chsw
ih_no_chsw:
and $r2 $r1 0x00000004
- bra e ih_no_cmd
- call dispatch
+ bra e #ih_no_cmd
+ call #dispatch
ih_no_cmd:
and $r1 $r1 0x0000000c
@@ -235,9 +237,9 @@ ifdef(`NVA3', `
sethi $r4 0x60000
// swap!
- bra $p1 swctx_load
+ bra $p1 #swctx_load
xdst $r0 $r4
- bra swctx_done
+ bra #swctx_done
swctx_load:
xdld $r0 $r4
swctx_done:
@@ -251,9 +253,9 @@ chsw:
// if it's active, unload it and return
xbit $r15 $r3 0x1e
- bra e chsw_no_unload
+ bra e #chsw_no_unload
bclr $flags $p1
- call swctx
+ call #swctx
bclr $r3 0x1e
iowr I[$r2] $r3
mov $r4 1
@@ -266,20 +268,20 @@ chsw:
// is there a channel waiting to be loaded?
xbit $r13 $r3 0x1e
- bra e chsw_finish_load
+ bra e #chsw_finish_load
bset $flags $p1
- call swctx
+ call #swctx
ifdef(`NVA3',
// load dma objects back into TARGET regs
- mov $r5 ctx_dma
- mov $r6 ctx_dma_count
+ mov $r5 #ctx_dma
+ mov $r6 #ctx_dma_count
chsw_load_ctx_dma:
ld b32 $r7 D[$r5 + $r6 * 4]
add b32 $r8 $r6 0x180
shl b32 $r8 8
iowr I[$r8] $r7
sub b32 $r6 1
- bra nc chsw_load_ctx_dma
+ bra nc #chsw_load_ctx_dma
,)
chsw_finish_load:
@@ -297,7 +299,7 @@ dispatch:
shl b32 $r2 0x10
// lookup method in the dispatch table, ILLEGAL_MTHD if not found
- mov $r5 dispatch_table
+ mov $r5 #dispatch_table
clear b32 $r6
clear b32 $r7
dispatch_loop:
@@ -305,14 +307,14 @@ dispatch:
ld b16 $r7 D[$r5 + 2]
add b32 $r5 4
cmpu b32 $r4 $r6
- bra c dispatch_illegal_mthd
+ bra c #dispatch_illegal_mthd
add b32 $r7 $r6
cmpu b32 $r4 $r7
- bra c dispatch_valid_mthd
+ bra c #dispatch_valid_mthd
sub b32 $r7 $r6
shl b32 $r7 3
add b32 $r5 $r7
- bra dispatch_loop
+ bra #dispatch_loop
// ensure no bits set in reserved fields, INVALID_BITFIELD
dispatch_valid_mthd:
@@ -322,20 +324,20 @@ dispatch:
ld b32 $r5 D[$r4 + 4]
and $r5 $r3
cmpu b32 $r5 0
- bra ne dispatch_invalid_bitfield
+ bra ne #dispatch_invalid_bitfield
// depending on dispatch flags: execute method, or save data as state
ld b16 $r5 D[$r4 + 0]
ld b16 $r6 D[$r4 + 2]
cmpu b32 $r6 0
- bra ne dispatch_cmd
+ bra ne #dispatch_cmd
st b32 D[$r5] $r3
- bra dispatch_done
+ bra #dispatch_done
dispatch_cmd:
bclr $flags $p1
call $r5
- bra $p1 dispatch_error
- bra dispatch_done
+ bra $p1 #dispatch_error
+ bra #dispatch_done
dispatch_invalid_bitfield:
or $r2 2
@@ -353,7 +355,7 @@ dispatch:
iord $r2 I[$r0 + 0x200]
and $r2 0x40
cmpu b32 $r2 0
- bra ne hostirq_wait
+ bra ne #hostirq_wait
dispatch_done:
mov $r2 0x1d00
@@ -409,10 +411,10 @@ ifdef(`NVA3',
// $r2: hostirq state
// $r3: data
cmd_dma:
- sub b32 $r4 dispatch_dma
+ sub b32 $r4 #dispatch_dma
shr b32 $r4 1
bset $r3 0x1e
- st b32 D[$r4 + ctx_dma] $r3
+ st b32 D[$r4 + #ctx_dma] $r3
add b32 $r4 0x600
shl b32 $r4 6
iowr I[$r4] $r3
@@ -430,7 +432,7 @@ cmd_exec_set_format:
st b32 D[$sp + 0x0c] $r0
// extract cpp, src_ncomp and dst_ncomp from FORMAT
- ld b32 $r4 D[$r0 + ctx_format]
+ ld b32 $r4 D[$r0 + #ctx_format]
extr $r5 $r4 16:17
add b32 $r5 1
extr $r6 $r4 20:21
@@ -448,22 +450,22 @@ cmd_exec_set_format:
clear b32 $r11
bpc_loop:
cmpu b8 $r10 4
- bra nc cmp_c0
+ bra nc #cmp_c0
mulu $r12 $r10 $r5
add b32 $r12 $r11
bset $flags $p2
- bra bpc_next
+ bra #bpc_next
cmp_c0:
- bra ne cmp_c1
+ bra ne #cmp_c1
mov $r12 0x10
add b32 $r12 $r11
- bra bpc_next
+ bra #bpc_next
cmp_c1:
cmpu b8 $r10 6
- bra nc cmp_zero
+ bra nc #cmp_zero
mov $r12 0x14
add b32 $r12 $r11
- bra bpc_next
+ bra #bpc_next
cmp_zero:
mov $r12 0x80
bpc_next:
@@ -471,22 +473,22 @@ cmd_exec_set_format:
add b32 $r8 1
add b32 $r11 1
cmpu b32 $r11 $r5
- bra c bpc_loop
+ bra c #bpc_loop
add b32 $r9 1
cmpu b32 $r9 $r7
- bra c ncomp_loop
+ bra c #ncomp_loop
// SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
mulu $r6 $r5
- st b32 D[$r0 + ctx_src_cpp] $r6
- ld b32 $r8 D[$r0 + ctx_xcnt]
+ st b32 D[$r0 + #ctx_src_cpp] $r6
+ ld b32 $r8 D[$r0 + #ctx_xcnt]
mulu $r6 $r8
- bra $p2 dst_xcnt
+ bra $p2 #dst_xcnt
clear b32 $r6
dst_xcnt:
mulu $r7 $r5
- st b32 D[$r0 + ctx_dst_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
mulu $r7 $r8
mov $r5 0x810
@@ -494,10 +496,10 @@ cmd_exec_set_format:
iowr I[$r5 + 0x000] $r6
iowr I[$r5 + 0x100] $r7
add b32 $r5 0x800
- ld b32 $r6 D[$r0 + ctx_dst_cpp]
+ ld b32 $r6 D[$r0 + #ctx_dst_cpp]
sub b32 $r6 1
shl b32 $r6 8
- ld b32 $r7 D[$r0 + ctx_src_cpp]
+ ld b32 $r7 D[$r0 + #ctx_src_cpp]
sub b32 $r7 1
or $r6 $r7
iowr I[$r5 + 0x000] $r6
@@ -511,9 +513,9 @@ cmd_exec_set_format:
ld b32 $r6 D[$sp + 0x0c]
iowr I[$r5 + 0x300] $r6
add b32 $r5 0x400
- ld b32 $r6 D[$r0 + ctx_swz_const0]
+ ld b32 $r6 D[$r0 + #ctx_swz_const0]
iowr I[$r5 + 0x000] $r6
- ld b32 $r6 D[$r0 + ctx_swz_const1]
+ ld b32 $r6 D[$r0 + #ctx_swz_const1]
iowr I[$r5 + 0x100] $r6
add $sp 0x10
ret
@@ -543,7 +545,7 @@ cmd_exec_set_format:
//
cmd_exec_set_surface_tiled:
// translate TILE_MODE into Tp, Th, Td shift values
- ld b32 $r7 D[$r5 + ctx_src_tile_mode]
+ ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
extr $r9 $r7 8:11
extr $r8 $r7 4:7
ifdef(`NVA3',
@@ -553,9 +555,9 @@ ifdef(`NVA3',
)
extr $r7 $r7 0:3
cmp b32 $r7 0xe
- bra ne xtile64
+ bra ne #xtile64
mov $r7 4
- bra xtileok
+ bra #xtileok
xtile64:
xbit $r7 $flags $p2
add b32 $r7 17
@@ -565,8 +567,8 @@ ifdef(`NVA3',
// Op = (x * cpp) & ((1 << Tp) - 1)
// Tx = (x * cpp) >> Tp
- ld b32 $r10 D[$r5 + ctx_src_xoff]
- ld b32 $r11 D[$r5 + ctx_src_cpp]
+ ld b32 $r10 D[$r5 + #ctx_src_xoff]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
mulu $r10 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -576,7 +578,7 @@ ifdef(`NVA3',
// Tyo = y & ((1 << Th) - 1)
// Ty = y >> Th
- ld b32 $r13 D[$r5 + ctx_src_yoff]
+ ld b32 $r13 D[$r5 + #ctx_src_yoff]
mov $r14 1
shl b32 $r14 $r8
sub b32 $r14 1
@@ -598,8 +600,8 @@ ifdef(`NVA3',
add b32 $r12 $r11
// nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
- ld b32 $r15 D[$r5 + ctx_src_xsize]
- ld b32 $r11 D[$r5 + ctx_src_cpp]
+ ld b32 $r15 D[$r5 + #ctx_src_xsize]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
mulu $r15 $r11
mov $r11 1
shl b32 $r11 $r7
@@ -609,7 +611,7 @@ ifdef(`NVA3',
push $r15
// nTy = (h + ((1 << Th) - 1)) >> Th
- ld b32 $r15 D[$r5 + ctx_src_ysize]
+ ld b32 $r15 D[$r5 + #ctx_src_ysize]
mov $r11 1
shl b32 $r11 $r8
sub b32 $r11 1
@@ -629,7 +631,7 @@ ifdef(`NVA3',
// Tz = z >> Td
// Op += Tzo << Tys
// Ts = Tys + Td
- ld b32 $r8 D[$r5 + ctx_src_zoff]
+ ld b32 $r8 D[$r5 + #ctx_src_zoff]
mov $r14 1
shl b32 $r14 $r9
sub b32 $r14 1
@@ -656,8 +658,8 @@ ifdef(`NVA3',
// SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
// CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
- ld b32 $r7 D[$r5 + ctx_src_address_low]
- ld b32 $r8 D[$r5 + ctx_src_address_high]
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ ld b32 $r8 D[$r5 + #ctx_src_address_high]
add b32 $r10 $r12
add b32 $r7 $r10
adc b32 $r8 0
@@ -677,14 +679,14 @@ cmd_exec_set_surface_linear:
xbit $r6 $flags $p2
add b32 $r6 0x202
shl b32 $r6 8
- ld b32 $r7 D[$r5 + ctx_src_address_low]
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + ctx_src_address_high]
+ ld b32 $r7 D[$r5 + #ctx_src_address_high]
shl b32 $r7 16
iowr I[$r6 + 0x000] $r7
add b32 $r6 0x400
- ld b32 $r7 D[$r5 + ctx_src_pitch]
+ ld b32 $r7 D[$r5 + #ctx_src_pitch]
iowr I[$r6 + 0x000] $r7
ret
@@ -697,7 +699,7 @@ cmd_exec_wait:
loop:
iord $r1 I[$r0]
and $r1 1
- bra ne loop
+ bra ne #loop
pop $r1
pop $r0
ret
@@ -705,18 +707,18 @@ cmd_exec_wait:
cmd_exec_query:
// if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
xbit $r4 $r3 13
- bra ne query_counter
- call cmd_exec_wait
+ bra ne #query_counter
+ call #cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + ctx_query_address_low]
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
add b32 $r5 4
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0xc
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + ctx_query_address_high]
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -741,16 +743,16 @@ cmd_exec_query:
// write COUNTER
query_counter:
- call cmd_exec_wait
+ call #cmd_exec_wait
mov $r4 0x80c
shl b32 $r4 6
- ld b32 $r5 D[$r0 + ctx_query_address_low]
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
iowr I[$r4 + 0x000] $r5
iowr I[$r4 + 0x100] $r0
mov $r5 0x4
iowr I[$r4 + 0x200] $r5
add b32 $r4 0x400
- ld b32 $r5 D[$r0 + ctx_query_address_high]
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
shl b32 $r5 16
iowr I[$r4 + 0x000] $r5
add b32 $r4 0x500
@@ -759,7 +761,7 @@ cmd_exec_query:
mov $r5 0x00001110
sethi $r5 0x13120000
iowr I[$r4 + 0x100] $r5
- ld b32 $r5 D[$r0 + ctx_query_counter]
+ ld b32 $r5 D[$r0 + #ctx_query_counter]
add b32 $r4 0x500
iowr I[$r4 + 0x000] $r5
mov $r5 0x00002601
@@ -787,22 +789,22 @@ cmd_exec_query:
// $r2: hostirq state
// $r3: data
cmd_exec:
- call cmd_exec_wait
+ call #cmd_exec_wait
// if format requested, call function to calculate it, otherwise
// fill in cpp/xcnt for both surfaces as if (cpp == 1)
xbit $r15 $r3 0
- bra e cmd_exec_no_format
- call cmd_exec_set_format
+ bra e #cmd_exec_no_format
+ call #cmd_exec_set_format
mov $r4 0x200
- bra cmd_exec_init_src_surface
+ bra #cmd_exec_init_src_surface
cmd_exec_no_format:
mov $r6 0x810
shl b32 $r6 6
mov $r7 1
- st b32 D[$r0 + ctx_src_cpp] $r7
- st b32 D[$r0 + ctx_dst_cpp] $r7
- ld b32 $r7 D[$r0 + ctx_xcnt]
+ st b32 D[$r0 + #ctx_src_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
+ ld b32 $r7 D[$r0 + #ctx_xcnt]
iowr I[$r6 + 0x000] $r7
iowr I[$r6 + 0x100] $r7
clear b32 $r4
@@ -811,28 +813,28 @@ cmd_exec:
bclr $flags $p2
clear b32 $r5
xbit $r15 $r3 4
- bra e src_tiled
- call cmd_exec_set_surface_linear
- bra cmd_exec_init_dst_surface
+ bra e #src_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_init_dst_surface
src_tiled:
- call cmd_exec_set_surface_tiled
+ call #cmd_exec_set_surface_tiled
bset $r4 7
cmd_exec_init_dst_surface:
bset $flags $p2
- mov $r5 ctx_dst_address_high - ctx_src_address_high
+ mov $r5 #ctx_dst_address_high - #ctx_src_address_high
xbit $r15 $r3 8
- bra e dst_tiled
- call cmd_exec_set_surface_linear
- bra cmd_exec_kick
+ bra e #dst_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_kick
dst_tiled:
- call cmd_exec_set_surface_tiled
+ call #cmd_exec_set_surface_tiled
bset $r4 8
cmd_exec_kick:
mov $r5 0x800
shl b32 $r5 6
- ld b32 $r6 D[$r0 + ctx_ycnt]
+ ld b32 $r6 D[$r0 + #ctx_ycnt]
iowr I[$r5 + 0x100] $r6
mov $r6 0x0041
// SRC_TARGET = 1, DST_TARGET = 2
@@ -842,8 +844,8 @@ cmd_exec:
// if requested, queue up a QUERY write after the copy has completed
xbit $r15 $r3 12
- bra e cmd_exec_done
- call cmd_exec_query
+ bra e #cmd_exec_done
+ call #cmd_exec_query
cmd_exec_done:
ret
diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
index 2731de2..1f33fbd 100644
--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
@@ -152,7 +152,7 @@ uint32_t nva3_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x22d00023,
+ 0x12d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nva3_pm.c b/drivers/gpu/drm/nouveau/nva3_pm.c
index 618c144..9e636e6 100644
--- a/drivers/gpu/drm/nouveau/nva3_pm.c
+++ b/drivers/gpu/drm/nouveau/nva3_pm.c
@@ -287,12 +287,13 @@ nva3_pm_grcp_idle(void *data)
return false;
}
-void
+int
nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nva3_pm_state *info = pre_state;
unsigned long flags;
+ int ret = -EAGAIN;
/* prevent any new grctx switches from starting */
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
@@ -328,6 +329,8 @@ nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
nv_wr32(dev, 0x100210, 0x80000000);
}
+ ret = 0;
+
cleanup:
/* unfreeze PFIFO */
nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
@@ -339,4 +342,5 @@ cleanup:
nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
kfree(info);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
index 4199038..a8d1745 100644
--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
@@ -145,7 +145,7 @@ uint32_t nvc0_pcopy_code[] = {
0xf10010fe,
0xf1040017,
0xf0fff327,
- 0x22d00023,
+ 0x12d00023,
0x0c25f0c0,
0xf40012d0,
0x17f11031,
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index ecfafd7..8ee3963 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -875,14 +875,16 @@ nvc0_graph_create(struct drm_device *dev)
case 0xcf: /* 4/0/0/0, 3 */
priv->magic_not_rop_nr = 0x03;
break;
+ case 0xd9: /* 1/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
}
if (!priv->magic_not_rop_nr) {
NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
priv->tp_nr[3], priv->rop_nr);
- /* use 0xc3's values... */
- priv->magic_not_rop_nr = 0x03;
+ priv->magic_not_rop_nr = 0x00;
}
NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.fuc b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
index 2a4b6dc..e6b2288 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.fuc
@@ -71,9 +71,9 @@ queue_put:
ld b32 $r9 D[$r13 + 0x4] // PUT
xor $r8 8
cmpu b32 $r8 $r9
- bra ne queue_put_next
+ bra ne #queue_put_next
mov $r15 E_CMD_OVERFLOW
- call error
+ call #error
ret
// store cmd/data on queue
@@ -104,7 +104,7 @@ queue_get:
ld b32 $r8 D[$r13 + 0x0] // GET
ld b32 $r9 D[$r13 + 0x4] // PUT
cmpu b32 $r8 $r9
- bra e queue_get_done
+ bra e #queue_get_done
// fetch first cmd/data pair
and $r9 $r8 7
shl b32 $r9 3
@@ -135,9 +135,9 @@ nv_rd32:
nv_rd32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
- bra ne nv_rd32_wait
+ bra ne #nv_rd32_wait
mov $r10 6 // DONE_MMIO_RD
- call wait_doneo
+ call #wait_doneo
iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
ret
@@ -157,7 +157,7 @@ nv_wr32:
nv_wr32_wait:
iord $r12 I[$r11 + 0x000]
xbit $r12 $r12 31
- bra ne nv_wr32_wait
+ bra ne #nv_wr32_wait
ret
// (re)set watchdog timer
@@ -193,7 +193,7 @@ $1:
shl b32 $r8 6
iord $r8 I[$r8 + 0x000] // DONE
xbit $r8 $r8 $r10
- bra $2 wait_done_$1
+ bra $2 #wait_done_$1
trace_clr(T_WAIT)
ret
')
@@ -216,7 +216,7 @@ mmctx_size:
add b32 $r9 $r8
add b32 $r14 4
cmpu b32 $r14 $r15
- bra ne nv_mmctx_size_loop
+ bra ne #nv_mmctx_size_loop
mov b32 $r15 $r9
ret
@@ -238,12 +238,12 @@ mmctx_xfer:
shl b32 $r8 6
clear b32 $r9
or $r11 $r11
- bra e mmctx_base_disabled
+ bra e #mmctx_base_disabled
iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
bset $r9 0 // BASE_EN
mmctx_base_disabled:
or $r14 $r14
- bra e mmctx_multi_disabled
+ bra e #mmctx_multi_disabled
iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
bset $r9 1 // MULTI_EN
@@ -264,7 +264,7 @@ mmctx_xfer:
mmctx_wait_free:
iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
and $r14 0x1f
- bra e mmctx_wait_free
+ bra e #mmctx_wait_free
// queue up an entry
ld b32 $r14 D[$r12]
@@ -272,19 +272,19 @@ mmctx_xfer:
iowr I[$r8 + 0x300] $r14
add b32 $r12 4
cmpu b32 $r12 $r13
- bra ne mmctx_exec_loop
+ bra ne #mmctx_exec_loop
xbit $r11 $r10 2
- bra ne mmctx_stop
+ bra ne #mmctx_stop
// wait for queue to empty
mmctx_fini_wait:
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
and $r11 0x1f
cmpu b32 $r11 0x10
- bra ne mmctx_fini_wait
+ bra ne #mmctx_fini_wait
mov $r10 2 // DONE_MMCTX
- call wait_donez
- bra mmctx_done
+ call #wait_donez
+ bra #mmctx_done
mmctx_stop:
xbit $r11 $r10 0
shl b32 $r11 16 // DIR
@@ -295,7 +295,7 @@ mmctx_xfer:
// wait for STOP_TRIGGER to clear
iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
xbit $r11 $r11 18
- bra ne mmctx_stop_wait
+ bra ne #mmctx_stop_wait
mmctx_done:
trace_clr(T_MMCTX)
ret
@@ -305,7 +305,7 @@ mmctx_xfer:
strand_wait:
push $r10
mov $r10 2
- call wait_donez
+ call #wait_donez
pop $r10
ret
@@ -316,7 +316,7 @@ strand_pre:
sethi $r8 0x20000
mov $r9 0xc
iowr I[$r8] $r9
- call strand_wait
+ call #strand_wait
ret
// unknown - call after issuing strand commands
@@ -326,7 +326,7 @@ strand_post:
sethi $r8 0x20000
mov $r9 0xd
iowr I[$r8] $r9
- call strand_wait
+ call #strand_wait
ret
// Selects strand set?!
@@ -341,11 +341,11 @@ strand_set:
iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
mov $r12 0xb
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
- call strand_wait
+ call #strand_wait
iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
mov $r12 0xa
iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
- call strand_wait
+ call #strand_wait
ret
// Initialise strand context data
@@ -357,22 +357,22 @@ strand_set:
//
strand_ctx_init:
trace_set(T_STRINIT)
- call strand_pre
+ call #strand_pre
mov $r14 3
- call strand_set
+ call #strand_set
mov $r10 0x46fc
sethi $r10 0x20000
add b32 $r11 $r10 0x400
iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
mov $r12 1
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
- call strand_wait
+ call #strand_wait
sub b32 $r12 $r0 1
iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
mov $r12 2
iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
- call strand_wait
- call strand_post
+ call #strand_wait
+ call #strand_post
// read the size of each strand, poke the context offset of
// each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
@@ -391,7 +391,7 @@ strand_ctx_init:
add b32 $r14 $r10
add b32 $r8 4
sub b32 $r9 1
- bra ne ctx_init_strand_loop
+ bra ne #ctx_init_strand_loop
shl b32 $r14 8
sub b32 $r15 $r14 $r15
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.h b/drivers/gpu/drm/nouveau/nvc0_graph.h
index 636fe98..91d44ea 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.h
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.h
@@ -87,6 +87,7 @@ nvc0_graph_class(struct drm_device *dev)
case 0xc1:
return 0x9197;
case 0xc8:
+ case 0xd9:
return 0x9297;
default:
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index 96b0b93..de77842 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1268,6 +1268,17 @@ nvc0_grctx_generate_9039(struct drm_device *dev)
static void
nvc0_grctx_generate_90c0(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+ }
nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
@@ -1276,6 +1287,12 @@ nvc0_grctx_generate_90c0(struct drm_device *dev)
nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+ nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+ }
nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
@@ -1471,14 +1488,20 @@ nvc0_grctx_generate_shaders(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (dev_priv->chipset != 0xc1) {
- nv_wr32(dev, 0x405800, 0x078000bf);
- nv_wr32(dev, 0x405830, 0x02180000);
- } else {
+ if (dev_priv->chipset == 0xd9) {
nv_wr32(dev, 0x405800, 0x0f8000bf);
nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x08000000);
+ } else
+ if (dev_priv->chipset == 0xc1) {
+ nv_wr32(dev, 0x405800, 0x0f8000bf);
+ nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ } else {
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
}
- nv_wr32(dev, 0x405834, 0x00000000);
nv_wr32(dev, 0x405838, 0x00000000);
nv_wr32(dev, 0x405854, 0x00000000);
nv_wr32(dev, 0x405870, 0x00000001);
@@ -1509,7 +1532,10 @@ nvc0_grctx_generate_unk64xx(struct drm_device *dev)
nv_wr32(dev, 0x4064ac, 0x00003fff);
nv_wr32(dev, 0x4064b4, 0x00000000);
nv_wr32(dev, 0x4064b8, 0x00000000);
- if (dev_priv->chipset == 0xc1) {
+ if (dev_priv->chipset == 0xd9)
+ nv_wr32(dev, 0x4064bc, 0x00000000);
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9) {
nv_wr32(dev, 0x4064c0, 0x80140078);
nv_wr32(dev, 0x4064c4, 0x0086ffff);
}
@@ -1550,10 +1576,23 @@ nvc0_grctx_generate_rop(struct drm_device *dev)
/* ROPC_BROADCAST */
nv_wr32(dev, 0x408800, 0x02802a3c);
nv_wr32(dev, 0x408804, 0x00000040);
- nv_wr32(dev, 0x408808, chipset != 0xc1 ? 0x0003e00d : 0x1003e005);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, chipset != 0xc1 ? 0x02000001 : 0x62000001);
- nv_wr32(dev, 0x408908, 0x00c80929);
+ if (chipset == 0xd9) {
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x1043e005);
+ nv_wr32(dev, 0x408908, 0x00c8102f);
+ } else
+ if (chipset == 0xc1) {
+ nv_wr32(dev, 0x408808, 0x1003e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ } else {
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ }
nv_wr32(dev, 0x40890c, 0x00000000);
nv_wr32(dev, 0x408980, 0x0000011d);
}
@@ -1572,7 +1611,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418408, 0x00000000);
nv_wr32(dev, 0x41840c, 0x00001008);
nv_wr32(dev, 0x418410, 0x0fff0fff);
- nv_wr32(dev, 0x418414, 0x00200fff);
+ nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
nv_wr32(dev, 0x418450, 0x00000000);
nv_wr32(dev, 0x418454, 0x00000000);
nv_wr32(dev, 0x418458, 0x00000000);
@@ -1587,14 +1626,17 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418700, 0x00000002);
nv_wr32(dev, 0x418704, 0x00000080);
nv_wr32(dev, 0x418708, 0x00000000);
- nv_wr32(dev, 0x41870c, 0x07c80000);
+ nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
nv_wr32(dev, 0x418710, 0x00000000);
- nv_wr32(dev, 0x418800, 0x0006860a);
+ nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
nv_wr32(dev, 0x418808, 0x00000000);
nv_wr32(dev, 0x41880c, 0x00000000);
nv_wr32(dev, 0x418810, 0x00000000);
nv_wr32(dev, 0x418828, 0x00008442);
- nv_wr32(dev, 0x418830, chipset != 0xc1 ? 0x00000001 : 0x10000001);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x418830, 0x10000001);
+ else
+ nv_wr32(dev, 0x418830, 0x00000001);
nv_wr32(dev, 0x4188d8, 0x00000008);
nv_wr32(dev, 0x4188e0, 0x01000000);
nv_wr32(dev, 0x4188e8, 0x00000000);
@@ -1602,7 +1644,12 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x4188f0, 0x00000000);
nv_wr32(dev, 0x4188f4, 0x00000000);
nv_wr32(dev, 0x4188f8, 0x00000000);
- nv_wr32(dev, 0x4188fc, chipset != 0xc1 ? 0x00100000 : 0x00100018);
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x4188fc, 0x20100008);
+ else if (chipset == 0xc1)
+ nv_wr32(dev, 0x4188fc, 0x00100018);
+ else
+ nv_wr32(dev, 0x4188fc, 0x00100000);
nv_wr32(dev, 0x41891c, 0x00ff00ff);
nv_wr32(dev, 0x418924, 0x00000000);
nv_wr32(dev, 0x418928, 0x00ffff00);
@@ -1616,7 +1663,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
}
- nv_wr32(dev, 0x418b00, 0x00000000);
+ nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
nv_wr32(dev, 0x418b08, 0x0a418820);
nv_wr32(dev, 0x418b0c, 0x062080e6);
nv_wr32(dev, 0x418b10, 0x020398a4);
@@ -1633,7 +1680,7 @@ nvc0_grctx_generate_gpc(struct drm_device *dev)
nv_wr32(dev, 0x418c24, 0x00000000);
nv_wr32(dev, 0x418c28, 0x00000000);
nv_wr32(dev, 0x418c2c, 0x00000000);
- if (chipset == 0xc1)
+ if (chipset == 0xc1 || chipset == 0xd9)
nv_wr32(dev, 0x418c6c, 0x00000001);
nv_wr32(dev, 0x418c80, 0x20200004);
nv_wr32(dev, 0x418c8c, 0x00000001);
@@ -1653,7 +1700,10 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419818, 0x00000000);
nv_wr32(dev, 0x41983c, 0x00038bc7);
nv_wr32(dev, 0x419848, 0x00000000);
- nv_wr32(dev, 0x419864, chipset != 0xc1 ? 0x0000012a : 0x00000129);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419864, 0x00000129);
+ else
+ nv_wr32(dev, 0x419864, 0x0000012a);
nv_wr32(dev, 0x419888, 0x00000000);
nv_wr32(dev, 0x419a00, 0x000001f0);
nv_wr32(dev, 0x419a04, 0x00000001);
@@ -1663,7 +1713,9 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419a14, 0x00000200);
nv_wr32(dev, 0x419a1c, 0x00000000);
nv_wr32(dev, 0x419a20, 0x00000800);
- if (chipset != 0xc0 && chipset != 0xc8)
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x00419ac4, 0x0017f440);
+ else if (chipset != 0xc0 && chipset != 0xc8)
nv_wr32(dev, 0x00419ac4, 0x0007f440);
nv_wr32(dev, 0x419b00, 0x0a418820);
nv_wr32(dev, 0x419b04, 0x062080e6);
@@ -1672,21 +1724,33 @@ nvc0_grctx_generate_tp(struct drm_device *dev)
nv_wr32(dev, 0x419b10, 0x0a418820);
nv_wr32(dev, 0x419b14, 0x000000e6);
nv_wr32(dev, 0x419bd0, 0x00900103);
- nv_wr32(dev, 0x419be0, chipset != 0xc1 ? 0x00000001 : 0x00400001);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419be0, 0x00400001);
+ else
+ nv_wr32(dev, 0x419be0, 0x00000001);
nv_wr32(dev, 0x419be4, 0x00000000);
- nv_wr32(dev, 0x419c00, 0x00000002);
+ nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
nv_wr32(dev, 0x419c04, 0x00000006);
nv_wr32(dev, 0x419c08, 0x00000002);
nv_wr32(dev, 0x419c20, 0x00000000);
- if (chipset == 0xce || chipset == 0xcf)
+ if (dev_priv->chipset == 0xd9) {
+ nv_wr32(dev, 0x419c24, 0x00084210);
+ nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
nv_wr32(dev, 0x419cb0, 0x00020048);
- else
+ } else
+ if (chipset == 0xce || chipset == 0xcf) {
+ nv_wr32(dev, 0x419cb0, 0x00020048);
+ } else {
nv_wr32(dev, 0x419cb0, 0x00060048);
+ }
nv_wr32(dev, 0x419ce8, 0x00000000);
nv_wr32(dev, 0x419cf4, 0x00000183);
- nv_wr32(dev, 0x419d20, chipset != 0xc1 ? 0x02180000 : 0x12180000);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419d20, 0x12180000);
+ else
+ nv_wr32(dev, 0x419d20, 0x02180000);
nv_wr32(dev, 0x419d24, 0x00001fff);
- if (chipset == 0xc1)
+ if (chipset == 0xc1 || chipset == 0xd9)
nv_wr32(dev, 0x419d44, 0x02180218);
nv_wr32(dev, 0x419e04, 0x00000000);
nv_wr32(dev, 0x419e08, 0x00000000);
@@ -1986,6 +2050,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000215, 0x00000040);
nv_icmd(dev, 0x00000216, 0x00000040);
nv_icmd(dev, 0x00000217, 0x00000040);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0400; i <= 0x0417; i++)
+ nv_icmd(dev, i, 0x00000040);
+ }
nv_icmd(dev, 0x00000218, 0x0000c080);
nv_icmd(dev, 0x00000219, 0x0000c080);
nv_icmd(dev, 0x0000021a, 0x0000c080);
@@ -1994,6 +2062,10 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000021d, 0x0000c080);
nv_icmd(dev, 0x0000021e, 0x0000c080);
nv_icmd(dev, 0x0000021f, 0x0000c080);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0440; i <= 0x0457; i++)
+ nv_icmd(dev, i, 0x0000c080);
+ }
nv_icmd(dev, 0x000000ad, 0x0000013e);
nv_icmd(dev, 0x000000e1, 0x00000010);
nv_icmd(dev, 0x00000290, 0x00000000);
@@ -2556,7 +2628,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x0000053f, 0xffff0000);
nv_icmd(dev, 0x00000585, 0x0000003f);
nv_icmd(dev, 0x00000576, 0x00000003);
- if (dev_priv->chipset == 0xc1)
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9)
nv_icmd(dev, 0x0000057b, 0x00000059);
nv_icmd(dev, 0x00000586, 0x00000040);
nv_icmd(dev, 0x00000582, 0x00000080);
@@ -2658,6 +2731,8 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
nv_icmd(dev, 0x00000957, 0x00000003);
nv_icmd(dev, 0x0000095e, 0x20164010);
nv_icmd(dev, 0x0000095f, 0x00000020);
+ if (dev_priv->chipset == 0xd9)
+ nv_icmd(dev, 0x0000097d, 0x00000020);
nv_icmd(dev, 0x00000683, 0x00000006);
nv_icmd(dev, 0x00000685, 0x003fffff);
nv_icmd(dev, 0x00000687, 0x00000c48);
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
index 06f5e26..15272be 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc
@@ -32,7 +32,7 @@
* - watchdog timer around ctx operations
*/
-.section nvc0_grgpc_data
+.section #nvc0_grgpc_data
include(`nvc0_graph.fuc')
gpc_id: .b32 0
gpc_mmio_list_head: .b32 0
@@ -48,40 +48,45 @@ cmd_queue: queue_init
// chipset descriptions
chipsets:
.b8 0xc0 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
.b8 0xc1 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc1_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc1_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc1_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc1_tpc_mmio_tail
.b8 0xc3 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xc4 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xc8 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc0_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
.b8 0xce 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvc3_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
.b8 0xcf 0 0 0
-.b16 nvc0_gpc_mmio_head
-.b16 nvc0_gpc_mmio_tail
-.b16 nvc0_tpc_mmio_head
-.b16 nvcf_tpc_mmio_tail
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvcf_tpc_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
.b8 0 0 0 0
// GPC mmio lists
@@ -114,6 +119,35 @@ nvc0_gpc_mmio_tail:
mmctx_data(0x000c6c, 1);
nvc1_gpc_mmio_tail:
+nvd9_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvd9_gpc_mmio_tail:
+
// TPC mmio lists
nvc0_tpc_mmio_head:
mmctx_data(0x000018, 1)
@@ -146,9 +180,34 @@ nvc3_tpc_mmio_tail:
mmctx_data(0x000544, 1)
nvc1_tpc_mmio_tail:
+nvd9_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000544, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x0006e0, 1)
+mmctx_data(0x000750, 3)
+nvd9_tpc_mmio_tail:
-.section nvc0_grgpc_code
-bra init
+.section #nvc0_grgpc_code
+bra #init
define(`include_code')
include(`nvc0_graph.fuc')
@@ -160,10 +219,10 @@ error:
push $r14
mov $r14 -0x67ec // 0x9814
sethi $r14 0x400000
- call nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
+ call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
add b32 $r14 0x41c
mov $r15 1
- call nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
pop $r14
ret
@@ -190,7 +249,7 @@ init:
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -210,24 +269,24 @@ init:
and $r2 0x1f
shl b32 $r3 $r2
sub b32 $r3 1
- st b32 D[$r0 + tpc_count] $r2
- st b32 D[$r0 + tpc_mask] $r3
+ st b32 D[$r0 + #tpc_count] $r2
+ st b32 D[$r0 + #tpc_mask] $r3
add b32 $r1 0x400
iord $r2 I[$r1 + 0x000] // MYINDEX
- st b32 D[$r0 + gpc_id] $r2
+ st b32 D[$r0 + #gpc_id] $r2
// find context data for this chipset
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r1 chipsets - 12
+ mov $r1 #chipsets - 12
init_find_chipset:
add b32 $r1 12
ld b32 $r3 D[$r1 + 0x00]
cmpu b32 $r3 $r2
- bra e init_context
+ bra e #init_context
cmpu b32 $r3 0
- bra ne init_find_chipset
+ bra ne #init_find_chipset
// unknown chipset
ret
@@ -253,19 +312,19 @@ init:
clear b32 $r15
ld b16 $r14 D[$r1 + 4]
ld b16 $r15 D[$r1 + 6]
- st b16 D[$r0 + gpc_mmio_list_head] $r14
- st b16 D[$r0 + gpc_mmio_list_tail] $r15
- call mmctx_size
+ st b16 D[$r0 + #gpc_mmio_list_head] $r14
+ st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+ call #mmctx_size
add b32 $r2 $r15
add b32 $r3 $r15
// calculate per-TPC mmio context size, store the list pointers
ld b16 $r14 D[$r1 + 8]
ld b16 $r15 D[$r1 + 10]
- st b16 D[$r0 + tpc_mmio_list_head] $r14
- st b16 D[$r0 + tpc_mmio_list_tail] $r15
- call mmctx_size
- ld b32 $r14 D[$r0 + tpc_count]
+ st b16 D[$r0 + #tpc_mmio_list_head] $r14
+ st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+ call #mmctx_size
+ ld b32 $r14 D[$r0 + #tpc_count]
mulu $r14 $r15
add b32 $r2 $r14
add b32 $r3 $r14
@@ -283,7 +342,7 @@ init:
// calculate size of strand context data
mov b32 $r15 $r2
- call strand_ctx_init
+ call #strand_ctx_init
add b32 $r3 $r15
// save context size, and tell HUB we're done
@@ -301,13 +360,13 @@ init:
main:
bset $flags $p0
sleep $p0
- mov $r13 cmd_queue
- call queue_get
- bra $p1 main
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
// 0x0000-0x0003 are all context transfers
cmpu b32 $r14 0x04
- bra nc main_not_ctx_xfer
+ bra nc #main_not_ctx_xfer
// fetch $flags and mask off $p1/$p2
mov $r1 $flags
mov $r2 0x0006
@@ -318,14 +377,14 @@ main:
or $r1 $r14
mov $flags $r1
// transfer context data
- call ctx_xfer
- bra main
+ call #ctx_xfer
+ bra #main
main_not_ctx_xfer:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call error
- bra main
+ call #error
+ bra #main
// interrupt handler
ih:
@@ -342,13 +401,13 @@ ih:
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
- bra e ih_no_fifo
+ bra e #ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call queue_put
+ call #queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -374,11 +433,11 @@ ih:
//
hub_barrier_done:
mov $r15 1
- ld b32 $r14 D[$r0 + gpc_id]
+ ld b32 $r14 D[$r0 + #gpc_id]
shl b32 $r15 $r14
mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
sethi $r14 0x400000
- call nv_wr32
+ call #nv_wr32
ret
// Disables various things, waits a bit, and re-enables them..
@@ -395,7 +454,7 @@ ctx_redswitch:
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
- bra ne ctx_redswitch_delay
+ bra ne #ctx_redswitch_delay
mov $r15 0xa20
iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
ret
@@ -413,8 +472,8 @@ ctx_xfer:
mov $r1 0xa04
shl b32 $r1 6
iowr I[$r1 + 0x000] $r15// MEM_BASE
- bra not $p1 ctx_xfer_not_load
- call ctx_redswitch
+ bra not $p1 #ctx_xfer_not_load
+ call #ctx_redswitch
ctx_xfer_not_load:
// strands
@@ -422,7 +481,7 @@ ctx_xfer:
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call strand_wait
+ call #strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -435,46 +494,46 @@ ctx_xfer:
or $r10 2 // first
mov $r11 0x0000
sethi $r11 0x500000
- ld b32 $r12 D[$r0 + gpc_id]
+ ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
- ld b32 $r12 D[$r0 + gpc_mmio_list_head]
- ld b32 $r13 D[$r0 + gpc_mmio_list_tail]
+ ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
mov $r14 0 // not multi
- call mmctx_xfer
+ call #mmctx_xfer
// per-TPC mmio context
xbit $r10 $flags $p1 // direction
or $r10 4 // last
mov $r11 0x4000
sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
- ld b32 $r12 D[$r0 + gpc_id]
+ ld b32 $r12 D[$r0 + #gpc_id]
shl b32 $r12 15
add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
- ld b32 $r12 D[$r0 + tpc_mmio_list_head]
- ld b32 $r13 D[$r0 + tpc_mmio_list_tail]
- ld b32 $r15 D[$r0 + tpc_mask]
+ ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+ ld b32 $r15 D[$r0 + #tpc_mask]
mov $r14 0x800 // stride = 0x800
- call mmctx_xfer
+ call #mmctx_xfer
// wait for strands to finish
- call strand_wait
+ call #strand_wait
// if load, or a save without a load following, do some
// unknown stuff that's done after finishing a block of
// strand commands
- bra $p1 ctx_xfer_post
- bra not $p2 ctx_xfer_done
+ bra $p1 #ctx_xfer_post
+ bra not $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xd
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
- call strand_wait
+ call #strand_wait
// mark completion in HUB's barrier
ctx_xfer_done:
- call hub_barrier_done
+ call #hub_barrier_done
ret
.align 256
diff --git a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
index 6f82032..a988b8a 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grgpc.fuc.h
@@ -25,26 +25,29 @@ uint32_t nvc0_grgpc_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x011c00bc,
- 0x01700120,
+ 0x012800c8,
+ 0x01e40194,
0x000000c1,
- 0x012000bc,
- 0x01840120,
+ 0x012c00c8,
+ 0x01f80194,
0x000000c3,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000c4,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000c8,
- 0x011c00bc,
- 0x01700120,
+ 0x012800c8,
+ 0x01e40194,
0x000000ce,
- 0x011c00bc,
- 0x01800120,
+ 0x012800c8,
+ 0x01f40194,
0x000000cf,
- 0x011c00bc,
- 0x017c0120,
+ 0x012800c8,
+ 0x01f00194,
+ 0x000000d9,
+ 0x0194012c,
+ 0x025401f8,
0x00000000,
0x00000380,
0x14000400,
@@ -71,6 +74,32 @@ uint32_t nvc0_grgpc_data[] = {
0x08001000,
0x00001014,
0x00000c6c,
+ 0x00000380,
+ 0x04000400,
+ 0x0800040c,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c6c,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
0x00000018,
0x0000003c,
0x00000048,
@@ -96,6 +125,29 @@ uint32_t nvc0_grgpc_data[] = {
0x000006e0,
0x000004bc,
0x00000544,
+ 0x00000018,
+ 0x0000003c,
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x000002c4,
+ 0x14000300,
+ 0x000003d0,
+ 0x040003e0,
+ 0x08000400,
+ 0x08000420,
+ 0x000004b0,
+ 0x000004e8,
+ 0x000004f4,
+ 0x04000520,
+ 0x00000544,
+ 0x0c000604,
+ 0x4c000644,
+ 0x00000698,
+ 0x000006e0,
+ 0x08000750,
};
uint32_t nvc0_grgpc_code[] = {
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
index e4f8c7e..98acddb 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc
@@ -27,7 +27,7 @@
* m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
*/
-.section nvc0_grhub_data
+.section #nvc0_grhub_data
include(`nvc0_graph.fuc')
gpc_count: .b32 0
rop_count: .b32 0
@@ -39,26 +39,29 @@ ctx_current: .b32 0
chipsets:
.b8 0xc0 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc1 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc1_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc1_hub_mmio_tail
.b8 0xc3 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc4 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xc8 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xce 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
.b8 0xcf 0 0 0
-.b16 nvc0_hub_mmio_head
-.b16 nvc0_hub_mmio_tail
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
.b8 0 0 0 0
nvc0_hub_mmio_head:
@@ -105,6 +108,48 @@ nvc0_hub_mmio_tail:
mmctx_data(0x4064c0, 2)
nvc1_hub_mmio_tail:
+nvd9_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 10)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404178, 2)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 5)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvd9_hub_mmio_tail:
+
.align 256
chan_data:
chan_mmio_count: .b32 0
@@ -113,8 +158,8 @@ chan_mmio_address: .b32 0
.align 256
xfer_data: .b32 0
-.section nvc0_grhub_code
-bra init
+.section #nvc0_grhub_code
+bra #init
define(`include_code')
include(`nvc0_graph.fuc')
@@ -157,7 +202,7 @@ init:
iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
// setup i0 handler, and route all interrupts to it
- mov $r1 ih
+ mov $r1 #ih
mov $iv0 $r1
mov $r1 0x400
iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
@@ -201,11 +246,11 @@ init:
// fetch enabled GPC/ROP counts
mov $r14 -0x69fc // 0x409604
sethi $r14 0x400000
- call nv_rd32
+ call #nv_rd32
extr $r1 $r15 16:20
- st b32 D[$r0 + rop_count] $r1
+ st b32 D[$r0 + #rop_count] $r1
and $r15 0x1f
- st b32 D[$r0 + gpc_count] $r15
+ st b32 D[$r0 + #gpc_count] $r15
// set BAR_REQMASK to GPC mask
mov $r1 1
@@ -220,14 +265,14 @@ init:
mov $r2 0x800
shl b32 $r2 6
iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r15 chipsets - 8
+ mov $r15 #chipsets - 8
init_find_chipset:
add b32 $r15 8
ld b32 $r3 D[$r15 + 0x00]
cmpu b32 $r3 $r2
- bra e init_context
+ bra e #init_context
cmpu b32 $r3 0
- bra ne init_find_chipset
+ bra ne #init_find_chipset
// unknown chipset
ret
@@ -239,9 +284,9 @@ init:
ld b16 $r14 D[$r15 + 4]
ld b16 $r15 D[$r15 + 6]
sethi $r14 0
- st b32 D[$r0 + hub_mmio_list_head] $r14
- st b32 D[$r0 + hub_mmio_list_tail] $r15
- call mmctx_size
+ st b32 D[$r0 + #hub_mmio_list_head] $r14
+ st b32 D[$r0 + #hub_mmio_list_tail] $r15
+ call #mmctx_size
// set mmctx base addresses now so we don't have to do it later,
// they don't (currently) ever change
@@ -260,7 +305,7 @@ init:
add b32 $r1 1
shl b32 $r1 8
mov b32 $r15 $r1
- call strand_ctx_init
+ call #strand_ctx_init
add b32 $r1 $r15
// initialise each GPC in sequence by passing in the offset of its
@@ -271,40 +316,40 @@ init:
// when it has completed, and return the size of its context data
// in GPCn_CC_SCRATCH[1]
//
- ld b32 $r3 D[$r0 + gpc_count]
+ ld b32 $r3 D[$r0 + #gpc_count]
mov $r4 0x2000
sethi $r4 0x500000
init_gpc:
// setup, and start GPC ucode running
add b32 $r14 $r4 0x804
mov b32 $r15 $r1
- call nv_wr32 // CC_SCRATCH[1] = ctx offset
+ call #nv_wr32 // CC_SCRATCH[1] = ctx offset
add b32 $r14 $r4 0x800
mov b32 $r15 $r2
- call nv_wr32 // CC_SCRATCH[0] = chipset
+ call #nv_wr32 // CC_SCRATCH[0] = chipset
add b32 $r14 $r4 0x10c
clear b32 $r15
- call nv_wr32
+ call #nv_wr32
add b32 $r14 $r4 0x104
- call nv_wr32 // ENTRY
+ call #nv_wr32 // ENTRY
add b32 $r14 $r4 0x100
mov $r15 2 // CTRL_START_TRIGGER
- call nv_wr32 // CTRL
+ call #nv_wr32 // CTRL
// wait for it to complete, and adjust context size
add b32 $r14 $r4 0x800
init_gpc_wait:
- call nv_rd32
+ call #nv_rd32
xbit $r15 $r15 31
- bra e init_gpc_wait
+ bra e #init_gpc_wait
add b32 $r14 $r4 0x804
- call nv_rd32
+ call #nv_rd32
add b32 $r1 $r15
// next!
add b32 $r4 0x8000
sub b32 $r3 1
- bra ne init_gpc
+ bra ne #init_gpc
// save context size, and tell host we're ready
mov $r2 0x800
@@ -322,13 +367,13 @@ main:
// sleep until we have something to do
bset $flags $p0
sleep $p0
- mov $r13 cmd_queue
- call queue_get
- bra $p1 main
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
// context switch, requested by GPU?
cmpu b32 $r14 0x4001
- bra ne main_not_ctx_switch
+ bra ne #main_not_ctx_switch
trace_set(T_AUTO)
mov $r1 0xb00
shl b32 $r1 6
@@ -336,39 +381,39 @@ main:
iord $r1 I[$r1 + 0x000] // CHAN_CUR
xbit $r3 $r1 31
- bra e chsw_no_prev
+ bra e #chsw_no_prev
xbit $r3 $r2 31
- bra e chsw_prev_no_next
+ bra e #chsw_prev_no_next
push $r2
mov b32 $r2 $r1
trace_set(T_SAVE)
bclr $flags $p1
bset $flags $p2
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_SAVE);
pop $r2
trace_set(T_LOAD);
bset $flags $p1
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_LOAD);
- bra chsw_done
+ bra #chsw_done
chsw_prev_no_next:
push $r2
mov b32 $r2 $r1
bclr $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
pop $r2
mov $r1 0xb00
shl b32 $r1 6
iowr I[$r1] $r2
- bra chsw_done
+ bra #chsw_done
chsw_no_prev:
xbit $r3 $r2 31
- bra e chsw_done
+ bra e #chsw_done
bset $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
// ack the context switch request
chsw_done:
@@ -377,32 +422,32 @@ main:
mov $r2 1
iowr I[$r1 + 0x000] $r2 // 0x409b0c
trace_clr(T_AUTO)
- bra main
+ bra #main
// request to set current channel? (*not* a context switch)
main_not_ctx_switch:
cmpu b32 $r14 0x0001
- bra ne main_not_ctx_chan
+ bra ne #main_not_ctx_chan
mov b32 $r2 $r15
- call ctx_chan
- bra main_done
+ call #ctx_chan
+ bra #main_done
// request to store current channel context?
main_not_ctx_chan:
cmpu b32 $r14 0x0002
- bra ne main_not_ctx_save
+ bra ne #main_not_ctx_save
trace_set(T_SAVE)
bclr $flags $p1
bclr $flags $p2
- call ctx_xfer
+ call #ctx_xfer
trace_clr(T_SAVE)
- bra main_done
+ bra #main_done
main_not_ctx_save:
shl b32 $r15 $r14 16
or $r15 E_BAD_COMMAND
- call error
- bra main
+ call #error
+ bra #main
main_done:
mov $r1 0x820
@@ -410,7 +455,7 @@ main:
clear b32 $r2
bset $r2 31
iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
- bra main
+ bra #main
// interrupt handler
ih:
@@ -427,13 +472,13 @@ ih:
// incoming fifo command?
iord $r10 I[$r0 + 0x200] // INTR
and $r11 $r10 0x00000004
- bra e ih_no_fifo
+ bra e #ih_no_fifo
// queue incoming fifo command for later processing
mov $r11 0x1900
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
iord $r14 I[$r11 + 0x100] // FIFO_CMD
iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call queue_put
+ call #queue_put
add b32 $r11 0x400
mov $r14 1
iowr I[$r11 + 0x000] $r14 // FIFO_ACK
@@ -441,18 +486,18 @@ ih:
// context switch request?
ih_no_fifo:
and $r11 $r10 0x00000100
- bra e ih_no_ctxsw
+ bra e #ih_no_ctxsw
// enqueue a context switch for later processing
- mov $r13 cmd_queue
+ mov $r13 #cmd_queue
mov $r14 0x4001
- call queue_put
+ call #queue_put
// anything we didn't handle, bring it to the host's attention
ih_no_ctxsw:
mov $r11 0x104
not b32 $r11
and $r11 $r10 $r11
- bra e ih_no_other
+ bra e #ih_no_other
mov $r10 0xc1c
shl b32 $r10 6
iowr I[$r10] $r11 // INTR_UP_SET
@@ -478,11 +523,11 @@ ctx_4160s:
mov $r14 0x4160
sethi $r14 0x400000
mov $r15 1
- call nv_wr32
+ call #nv_wr32
ctx_4160s_wait:
- call nv_rd32
+ call #nv_rd32
xbit $r15 $r15 4
- bra e ctx_4160s_wait
+ bra e #ctx_4160s_wait
ret
// Without clearing again at end of xfer, some things cause PGRAPH
@@ -492,7 +537,7 @@ ctx_4160c:
mov $r14 0x4160
sethi $r14 0x400000
clear b32 $r15
- call nv_wr32
+ call #nv_wr32
ret
// Again, not real sure
@@ -503,7 +548,7 @@ ctx_4170s:
mov $r14 0x4170
sethi $r14 0x400000
or $r15 0x10
- call nv_wr32
+ call #nv_wr32
ret
// Waits for a ctx_4170s() call to complete
@@ -511,9 +556,9 @@ ctx_4170s:
ctx_4170w:
mov $r14 0x4170
sethi $r14 0x400000
- call nv_rd32
+ call #nv_rd32
and $r15 0x10
- bra ne ctx_4170w
+ bra ne #ctx_4170w
ret
// Disables various things, waits a bit, and re-enables them..
@@ -530,7 +575,7 @@ ctx_redswitch:
mov $r15 8
ctx_redswitch_delay:
sub b32 $r15 1
- bra ne ctx_redswitch_delay
+ bra ne #ctx_redswitch_delay
mov $r15 0x770
iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
ret
@@ -546,10 +591,10 @@ ctx_86c:
iowr I[$r14] $r15 // HUB(0x86c) = val
mov $r14 -0x75ec
sethi $r14 0x400000
- call nv_wr32 // ROP(0xa14) = val
+ call #nv_wr32 // ROP(0xa14) = val
mov $r14 -0x5794
sethi $r14 0x410000
- call nv_wr32 // GPC(0x86c) = val
+ call #nv_wr32 // GPC(0x86c) = val
ret
// ctx_load - load's a channel's ctxctl data, and selects its vm
@@ -561,7 +606,7 @@ ctx_load:
// switch to channel, somewhat magic in parts..
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa24
shl b32 $r1 6
iowr I[$r1 + 0x000] $r0 // 0x409a24
@@ -576,7 +621,7 @@ ctx_load:
ctx_chan_wait_0:
iord $r4 I[$r1 + 0x100]
and $r4 0x1f
- bra ne ctx_chan_wait_0
+ bra ne #ctx_chan_wait_0
iowr I[$r3 + 0x000] $r2 // CHAN_CUR
// load channel header, fetch PGRAPH context pointer
@@ -595,19 +640,19 @@ ctx_load:
sethi $r2 0x80000000
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
mov $r1 0x10 // chan + 0x0210
- mov $r2 xfer_data
+ mov $r2 #xfer_data
sethi $r2 0x00020000 // 16 bytes
xdld $r1 $r2
xdwait
trace_clr(T_LCHAN)
// update current context
- ld b32 $r1 D[$r0 + xfer_data + 4]
+ ld b32 $r1 D[$r0 + #xfer_data + 4]
shl b32 $r1 24
- ld b32 $r2 D[$r0 + xfer_data + 0]
+ ld b32 $r2 D[$r0 + #xfer_data + 0]
shr b32 $r2 8
or $r1 $r2
- st b32 D[$r0 + ctx_current] $r1
+ st b32 D[$r0 + #ctx_current] $r1
// set transfer base to start of context, and fetch context header
trace_set(T_LCTXH)
@@ -618,7 +663,7 @@ ctx_load:
mov $r1 0xa20
shl b32 $r1 6
iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
- mov $r1 chan_data
+ mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdld $r0 $r1
xdwait
@@ -635,10 +680,10 @@ ctx_load:
// In: $r2 channel address
//
ctx_chan:
- call ctx_4160s
- call ctx_load
+ call #ctx_4160s
+ call #ctx_load
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
@@ -646,8 +691,8 @@ ctx_chan:
ctx_chan_wait:
iord $r2 I[$r1 + 0x000]
or $r2 $r2
- bra ne ctx_chan_wait
- call ctx_4160c
+ bra ne #ctx_chan_wait
+ call #ctx_4160c
ret
// Execute per-context state overrides list
@@ -661,7 +706,7 @@ ctx_chan:
//
ctx_mmio_exec:
// set transfer base to be the mmio list
- ld b32 $r3 D[$r0 + chan_mmio_address]
+ ld b32 $r3 D[$r0 + #chan_mmio_address]
mov $r2 0xa04
shl b32 $r2 6
iowr I[$r2 + 0x000] $r3 // MEM_BASE
@@ -670,31 +715,31 @@ ctx_mmio_exec:
ctx_mmio_loop:
// fetch next 256 bytes of mmio list if necessary
and $r4 $r3 0xff
- bra ne ctx_mmio_pull
- mov $r5 xfer_data
+ bra ne #ctx_mmio_pull
+ mov $r5 #xfer_data
sethi $r5 0x00060000 // 256 bytes
xdld $r3 $r5
xdwait
// execute a single list entry
ctx_mmio_pull:
- ld b32 $r14 D[$r4 + xfer_data + 0x00]
- ld b32 $r15 D[$r4 + xfer_data + 0x04]
- call nv_wr32
+ ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+ ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+ call #nv_wr32
// next!
add b32 $r3 8
sub b32 $r1 1
- bra ne ctx_mmio_loop
+ bra ne #ctx_mmio_loop
// set transfer base back to the current context
ctx_mmio_done:
- ld b32 $r3 D[$r0 + ctx_current]
+ ld b32 $r3 D[$r0 + #ctx_current]
iowr I[$r2 + 0x000] $r3 // MEM_BASE
// disable the mmio list now, we don't need/want to execute it again
- st b32 D[$r0 + chan_mmio_count] $r0
- mov $r1 chan_data
+ st b32 D[$r0 + #chan_mmio_count] $r0
+ mov $r1 #chan_data
sethi $r1 0x00060000 // 256 bytes
xdst $r0 $r1
xdwait
@@ -709,46 +754,46 @@ ctx_mmio_exec:
// on load it means: "a save preceeded this load"
//
ctx_xfer:
- bra not $p1 ctx_xfer_pre
- bra $p2 ctx_xfer_pre_load
+ bra not $p1 #ctx_xfer_pre
+ bra $p2 #ctx_xfer_pre_load
ctx_xfer_pre:
mov $r15 0x10
- call ctx_86c
- call ctx_4160s
- bra not $p1 ctx_xfer_exec
+ call #ctx_86c
+ call #ctx_4160s
+ bra not $p1 #ctx_xfer_exec
ctx_xfer_pre_load:
mov $r15 2
- call ctx_4170s
- call ctx_4170w
- call ctx_redswitch
+ call #ctx_4170s
+ call #ctx_4170w
+ call #ctx_redswitch
clear b32 $r15
- call ctx_4170s
- call ctx_load
+ call #ctx_4170s
+ call #ctx_load
// fetch context pointer, and initiate xfer on all GPCs
ctx_xfer_exec:
- ld b32 $r1 D[$r0 + ctx_current]
+ ld b32 $r1 D[$r0 + #ctx_current]
mov $r2 0x414
shl b32 $r2 6
iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
mov $r14 -0x5b00
sethi $r14 0x410000
mov b32 $r15 $r1
- call nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
+ call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
add b32 $r14 4
xbit $r15 $flags $p1
xbit $r2 $flags $p2
shl b32 $r2 1
or $r15 $r2
- call nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+ call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
// strands
mov $r1 0x4afc
sethi $r1 0x20000
mov $r2 0xc
iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call strand_wait
+ call #strand_wait
mov $r2 0x47fc
sethi $r2 0x20000
iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
@@ -760,22 +805,22 @@ ctx_xfer:
xbit $r10 $flags $p1 // direction
or $r10 6 // first, last
mov $r11 0 // base = 0
- ld b32 $r12 D[$r0 + hub_mmio_list_head]
- ld b32 $r13 D[$r0 + hub_mmio_list_tail]
+ ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+ ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
mov $r14 0 // not multi
- call mmctx_xfer
+ call #mmctx_xfer
// wait for GPCs to all complete
mov $r10 8 // DONE_BAR
- call wait_doneo
+ call #wait_doneo
// wait for strand xfer to complete
- call strand_wait
+ call #strand_wait
// post-op
- bra $p1 ctx_xfer_post
+ bra $p1 #ctx_xfer_post
mov $r10 12 // DONE_UNK12
- call wait_donez
+ call #wait_donez
mov $r1 0xa10
shl b32 $r1 6
mov $r2 5
@@ -783,27 +828,27 @@ ctx_xfer:
ctx_xfer_post_save_wait:
iord $r2 I[$r1]
or $r2 $r2
- bra ne ctx_xfer_post_save_wait
+ bra ne #ctx_xfer_post_save_wait
- bra $p2 ctx_xfer_done
+ bra $p2 #ctx_xfer_done
ctx_xfer_post:
mov $r15 2
- call ctx_4170s
+ call #ctx_4170s
clear b32 $r15
- call ctx_86c
- call strand_post
- call ctx_4170w
+ call #ctx_86c
+ call #strand_post
+ call #ctx_4170w
clear b32 $r15
- call ctx_4170s
+ call #ctx_4170s
- bra not $p1 ctx_xfer_no_post_mmio
- ld b32 $r1 D[$r0 + chan_mmio_count]
+ bra not $p1 #ctx_xfer_no_post_mmio
+ ld b32 $r1 D[$r0 + #chan_mmio_count]
or $r1 $r1
- bra e ctx_xfer_no_post_mmio
- call ctx_mmio_exec
+ bra e #ctx_xfer_no_post_mmio
+ call #ctx_mmio_exec
ctx_xfer_no_post_mmio:
- call ctx_4160c
+ call #ctx_4160c
ctx_xfer_done:
ret
diff --git a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
index 241d326..c5ed307 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
+++ b/drivers/gpu/drm/nouveau/nvc0_grhub.fuc.h
@@ -23,19 +23,21 @@ uint32_t nvc0_grhub_data[] = {
0x00000000,
0x00000000,
0x000000c0,
- 0x01340098,
+ 0x013c00a0,
0x000000c1,
- 0x01380098,
+ 0x014000a0,
0x000000c3,
- 0x01340098,
+ 0x013c00a0,
0x000000c4,
- 0x01340098,
+ 0x013c00a0,
0x000000c8,
- 0x01340098,
+ 0x013c00a0,
0x000000ce,
- 0x01340098,
+ 0x013c00a0,
0x000000cf,
- 0x01340098,
+ 0x013c00a0,
+ 0x000000d9,
+ 0x01dc0140,
0x00000000,
0x0417e91c,
0x04400204,
@@ -77,47 +79,45 @@ uint32_t nvc0_grhub_data[] = {
0x0c408900,
0x00408980,
0x044064c0,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
+ 0x0417e91c,
+ 0x04400204,
+ 0x24404004,
+ 0x00404044,
+ 0x34404094,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x04404164,
+ 0x04404178,
+ 0x1c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x7c404618,
+ 0x50404698,
+ 0x044046f0,
+ 0x54404700,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x104064b4,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x0c408900,
+ 0x00408980,
0x00000000,
0x00000000,
0x00000000,
diff --git a/drivers/gpu/drm/nouveau/nvc0_pm.c b/drivers/gpu/drm/nouveau/nvc0_pm.c
index 929aded..e9992f6 100644
--- a/drivers/gpu/drm/nouveau/nvc0_pm.c
+++ b/drivers/gpu/drm/nouveau/nvc0_pm.c
@@ -153,3 +153,240 @@ nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
perflvl->vdec = read_clk(dev, 0x0e);
return 0;
}
+
+struct nvc0_pm_clock {
+ u32 freq;
+ u32 ssel;
+ u32 mdiv;
+ u32 dsrc;
+ u32 ddiv;
+ u32 coef;
+};
+
+struct nvc0_pm_state {
+ struct nvc0_pm_clock eng[16];
+};
+
+static u32
+calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
+{
+ u32 div = min((ref * 2) / freq, (u32)65);
+ if (div < 2)
+ div = 2;
+
+ *ddiv = div - 2;
+ return (ref * 2) / div;
+}
+
+static u32
+calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
+{
+ u32 sclk;
+
+ /* use one of the fixed frequencies if possible */
+ *ddiv = 0x00000000;
+ switch (freq) {
+ case 27000:
+ case 108000:
+ *dsrc = 0x00000000;
+ if (freq == 108000)
+ *dsrc |= 0x00030000;
+ return freq;
+ case 100000:
+ *dsrc = 0x00000002;
+ return freq;
+ default:
+ *dsrc = 0x00000003;
+ break;
+ }
+
+ /* otherwise, calculate the closest divider */
+ sclk = read_vco(dev, clk);
+ if (clk < 7)
+ sclk = calc_div(dev, clk, sclk, freq, ddiv);
+ return sclk;
+}
+
+static u32
+calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
+{
+ struct pll_lims limits;
+ int N, M, P, ret;
+
+ ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
+ if (ret)
+ return 0;
+
+ limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
+ if (!limits.refclk)
+ return 0;
+
+ ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
+ if (ret <= 0)
+ return 0;
+
+ *coef = (P << 16) | (N << 8) | M;
+ return ret;
+}
+
+/* A (likely rather simplified and incomplete) view of the clock tree
+ *
+ * Key:
+ *
+ * S: source select
+ * D: divider
+ * P: pll
+ * F: switch
+ *
+ * Engine clocks:
+ *
+ * 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
+ * (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
+ *
+ * Not all registers exist for all clocks. For example: clocks >= 8 don't
+ * have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
+ * they have the divider at 1371d0, though the source selection at 137160
+ * still exists. You must use the divider at 137250 for these instead.
+ *
+ * Memory clock:
+ *
+ * TBD, read_mem() above is likely very wrong...
+ *
+ */
+
+static int
+calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
+{
+ u32 src0, div0, div1D, div1P = 0;
+ u32 clk0, clk1 = 0;
+
+ /* invalid clock domain */
+ if (!freq)
+ return 0;
+
+ /* first possible path, using only dividers */
+ clk0 = calc_src(dev, clk, freq, &src0, &div0);
+ clk0 = calc_div(dev, clk, clk0, freq, &div1D);
+
+ /* see if we can get any closer using PLLs */
+ if (clk0 != freq) {
+ if (clk < 7)
+ clk1 = calc_pll(dev, clk, freq, &info->coef);
+ else
+ clk1 = read_pll(dev, 0x1370e0);
+ clk1 = calc_div(dev, clk, clk1, freq, &div1P);
+ }
+
+ /* select the method which gets closest to target freq */
+ if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
+ info->dsrc = src0;
+ if (div0) {
+ info->ddiv |= 0x80000000;
+ info->ddiv |= div0 << 8;
+ info->ddiv |= div0;
+ }
+ if (div1D) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1D;
+ }
+ info->ssel = 0;
+ info->freq = clk0;
+ } else {
+ if (div1P) {
+ info->mdiv |= 0x80000000;
+ info->mdiv |= div1P << 8;
+ }
+ info->ssel = (1 << clk);
+ info->freq = clk1;
+ }
+
+ return 0;
+}
+
+void *
+nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_pm_state *info;
+ int ret;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return ERR_PTR(-ENOMEM);
+
+ /* NFI why this is still in the performance table, the ROPCs appear
+ * to get their clock from clock 2 ("hub07", actually hub05 on this
+ * chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
+ * are always the same freq with the binary driver even when the
+ * performance table says they should differ.
+ */
+ if (dev_priv->chipset == 0xd9)
+ perflvl->rop = 0;
+
+ if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
+ (ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
+ (ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
+ (ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
+ (ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
+ (ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
+ (ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
+ (ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
+ kfree(info);
+ return ERR_PTR(ret);
+ }
+
+ return info;
+}
+
+static void
+prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
+{
+ /* program dividers at 137160/1371d0 first */
+ if (clk < 7 && !info->ssel) {
+ nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+ nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
+ }
+
+ /* switch clock to non-pll mode */
+ nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
+ nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
+
+ /* reprogram pll */
+ if (clk < 7) {
+ /* make sure it's disabled first... */
+ u32 base = 0x137000 + (clk * 0x20);
+ u32 ctrl = nv_rd32(dev, base + 0x00);
+ if (ctrl & 0x00000001) {
+ nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
+ nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
+ }
+ /* program it to new values, if necessary */
+ if (info->ssel) {
+ nv_wr32(dev, base + 0x04, info->coef);
+ nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
+ nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
+ nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
+ }
+ }
+
+ /* select pll/non-pll mode, and program final clock divider */
+ nv_mask(dev, 0x137100, (1 << clk), info->ssel);
+ nv_wait(dev, 0x137100, (1 << clk), info->ssel);
+ nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+}
+
+int
+nvc0_pm_clocks_set(struct drm_device *dev, void *data)
+{
+ struct nvc0_pm_state *info = data;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ if (!info->eng[i].freq)
+ continue;
+ prog_clk(dev, i, &info->eng[i]);
+ }
+
+ kfree(info);
+ return 0;
+}
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index cb006a7..d2ba2f0 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -35,12 +35,34 @@
#include "nouveau_fb.h"
#include "nv50_display.h"
+#define EVO_DMA_NR 9
+
+#define EVO_MASTER (0x00)
+#define EVO_FLIP(c) (0x01 + (c))
+#define EVO_OVLY(c) (0x05 + (c))
+#define EVO_OIMM(c) (0x09 + (c))
+#define EVO_CURS(c) (0x0d + (c))
+
+/* offsets in shared sync bo of various structures */
+#define EVO_SYNC(c, o) ((c) * 0x0100 + (o))
+#define EVO_MAST_NTFY EVO_SYNC( 0, 0x00)
+#define EVO_FLIP_SEM0(c) EVO_SYNC((c), 0x00)
+#define EVO_FLIP_SEM1(c) EVO_SYNC((c), 0x10)
+
+struct evo {
+ int idx;
+ dma_addr_t handle;
+ u32 *ptr;
+ struct {
+ u32 offset;
+ u16 value;
+ } sem;
+};
+
struct nvd0_display {
struct nouveau_gpuobj *mem;
- struct {
- dma_addr_t handle;
- u32 *ptr;
- } evo[1];
+ struct nouveau_bo *sync;
+ struct evo evo[9];
struct tasklet_struct tasklet;
u32 modeset;
@@ -53,6 +75,15 @@ nvd0_display(struct drm_device *dev)
return dev_priv->engine.display.priv;
}
+static struct drm_crtc *
+nvd0_display_crtc_get(struct drm_encoder *encoder)
+{
+ return nouveau_encoder(encoder)->crtc;
+}
+
+/******************************************************************************
+ * EVO channel helpers
+ *****************************************************************************/
static inline int
evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
{
@@ -84,6 +115,9 @@ evo_wait(struct drm_device *dev, int id, int nr)
put = 0;
}
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO)
+ NV_INFO(dev, "Evo%d: %p START\n", id, disp->evo[id].ptr + put);
+
return disp->evo[id].ptr + put;
}
@@ -91,40 +125,264 @@ static void
evo_kick(u32 *push, struct drm_device *dev, int id)
{
struct nvd0_display *disp = nvd0_display(dev);
+
+ if (nouveau_reg_debug & NOUVEAU_REG_DEBUG_EVO) {
+ u32 curp = nv_rd32(dev, 0x640000 + (id * 0x1000)) >> 2;
+ u32 *cur = disp->evo[id].ptr + curp;
+
+ while (cur < push)
+ NV_INFO(dev, "Evo%d: 0x%08x\n", id, *cur++);
+ NV_INFO(dev, "Evo%d: %p KICK!\n", id, push);
+ }
+
nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
}
#define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
#define evo_data(p,d) *((p)++) = (d)
-static struct drm_crtc *
-nvd0_display_crtc_get(struct drm_encoder *encoder)
+static int
+evo_init_dma(struct drm_device *dev, int ch)
{
- return nouveau_encoder(encoder)->crtc;
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 flags;
+
+ flags = 0x00000000;
+ if (ch == EVO_MASTER)
+ flags |= 0x01000000;
+
+ nv_wr32(dev, 0x610494 + (ch * 0x0010), (disp->evo[ch].handle >> 8) | 3);
+ nv_wr32(dev, 0x610498 + (ch * 0x0010), 0x00010000);
+ nv_wr32(dev, 0x61049c + (ch * 0x0010), 0x00000001);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x640000 + (ch * 0x1000), 0x00000000);
+ nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000013 | flags);
+ if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000)) {
+ NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+ nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+ return -EBUSY;
+ }
+
+ nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+ nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+ return 0;
+}
+
+static void
+evo_fini_dma(struct drm_device *dev, int ch)
+{
+ if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000010))
+ return;
+
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000000);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000003, 0x00000000);
+ nv_wait(dev, 0x610490 + (ch * 0x0010), 0x80000000, 0x00000000);
+ nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+ nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static inline void
+evo_piow(struct drm_device *dev, int ch, u16 mthd, u32 data)
+{
+ nv_wr32(dev, 0x640000 + (ch * 0x1000) + mthd, data);
+}
+
+static int
+evo_init_pio(struct drm_device *dev, int ch)
+{
+ nv_wr32(dev, 0x610490 + (ch * 0x0010), 0x00000001);
+ if (!nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00010000)) {
+ NV_ERROR(dev, "PDISP: ch%d 0x%08x\n", ch,
+ nv_rd32(dev, 0x610490 + (ch * 0x0010)));
+ return -EBUSY;
+ }
+
+ nv_mask(dev, 0x610090, (1 << ch), (1 << ch));
+ nv_mask(dev, 0x6100a0, (1 << ch), (1 << ch));
+ return 0;
+}
+
+static void
+evo_fini_pio(struct drm_device *dev, int ch)
+{
+ if (!(nv_rd32(dev, 0x610490 + (ch * 0x0010)) & 0x00000001))
+ return;
+
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000010, 0x00000010);
+ nv_mask(dev, 0x610490 + (ch * 0x0010), 0x00000001, 0x00000000);
+ nv_wait(dev, 0x610490 + (ch * 0x0010), 0x00010000, 0x00000000);
+ nv_mask(dev, 0x610090, (1 << ch), 0x00000000);
+ nv_mask(dev, 0x6100a0, (1 << ch), 0x00000000);
+}
+
+static bool
+evo_sync_wait(void *data)
+{
+ return nouveau_bo_rd32(data, EVO_MAST_NTFY) != 0x00000000;
+}
+
+static int
+evo_sync(struct drm_device *dev, int ch)
+{
+ struct nvd0_display *disp = nvd0_display(dev);
+ u32 *push = evo_wait(dev, ch, 8);
+ if (push) {
+ nouveau_bo_wr32(disp->sync, EVO_MAST_NTFY, 0x00000000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x80000000 | EVO_MAST_NTFY);
+ evo_mthd(push, 0x0080, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_kick(push, dev, ch);
+ if (nv_wait_cb(dev, evo_sync_wait, disp->sync))
+ return 0;
+ }
+
+ return -EBUSY;
+}
+
+/******************************************************************************
+ * Page flipping channel
+ *****************************************************************************/
+struct nouveau_bo *
+nvd0_display_crtc_sema(struct drm_device *dev, int crtc)
+{
+ return nvd0_display(dev)->sync;
+}
+
+void
+nvd0_display_flip_stop(struct drm_crtc *crtc)
+{
+ struct nvd0_display *disp = nvd0_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+ u32 *push;
+
+ push = evo_wait(crtc->dev, evo->idx, 8);
+ if (push) {
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0094, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, crtc->dev, evo->idx);
+ }
+}
+
+int
+nvd0_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ struct nouveau_channel *chan, u32 swap_interval)
+{
+ struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
+ struct nvd0_display *disp = nvd0_display(crtc->dev);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ struct evo *evo = &disp->evo[EVO_FLIP(nv_crtc->index)];
+ u64 offset;
+ u32 *push;
+ int ret;
+
+ swap_interval <<= 4;
+ if (swap_interval == 0)
+ swap_interval |= 0x100;
+
+ push = evo_wait(crtc->dev, evo->idx, 128);
+ if (unlikely(push == NULL))
+ return -EBUSY;
+
+ /* synchronise with the rendering channel, if necessary */
+ if (likely(chan)) {
+ ret = RING_SPACE(chan, 10);
+ if (ret)
+ return ret;
+
+ offset = chan->dispc_vma[nv_crtc->index].offset;
+ offset += evo->sem.offset;
+
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset));
+ OUT_RING (chan, 0xf00d0000 | evo->sem.value);
+ OUT_RING (chan, 0x1002);
+ BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0010, 4);
+ OUT_RING (chan, upper_32_bits(offset));
+ OUT_RING (chan, lower_32_bits(offset ^ 0x10));
+ OUT_RING (chan, 0x74b1e000);
+ OUT_RING (chan, 0x1001);
+ FIRE_RING (chan);
+ } else {
+ nouveau_bo_wr32(disp->sync, evo->sem.offset / 4,
+ 0xf00d0000 | evo->sem.value);
+ evo_sync(crtc->dev, EVO_MASTER);
+ }
+
+ /* queue the flip */
+ evo_mthd(push, 0x0100, 1);
+ evo_data(push, 0xfffe0000);
+ evo_mthd(push, 0x0084, 1);
+ evo_data(push, swap_interval);
+ if (!(swap_interval & 0x00000100)) {
+ evo_mthd(push, 0x00e0, 1);
+ evo_data(push, 0x40000000);
+ }
+ evo_mthd(push, 0x0088, 4);
+ evo_data(push, evo->sem.offset);
+ evo_data(push, 0xf00d0000 | evo->sem.value);
+ evo_data(push, 0x74b1e000);
+ evo_data(push, NvEvoSync);
+ evo_mthd(push, 0x00a0, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x00c0, 1);
+ evo_data(push, nv_fb->r_dma);
+ evo_mthd(push, 0x0110, 2);
+ evo_data(push, 0x00000000);
+ evo_data(push, 0x00000000);
+ evo_mthd(push, 0x0400, 5);
+ evo_data(push, nv_fb->nvbo->bo.offset >> 8);
+ evo_data(push, 0);
+ evo_data(push, (fb->height << 16) | fb->width);
+ evo_data(push, nv_fb->r_pitch);
+ evo_data(push, nv_fb->r_format);
+ evo_mthd(push, 0x0080, 1);
+ evo_data(push, 0x00000000);
+ evo_kick(push, crtc->dev, evo->idx);
+
+ evo->sem.offset ^= 0x10;
+ evo->sem.value++;
+ return 0;
}
/******************************************************************************
* CRTC
*****************************************************************************/
static int
-nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
+nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- u32 *push, mode;
+ struct nouveau_connector *nv_connector;
+ struct drm_connector *connector;
+ u32 *push, mode = 0x00;
- mode = 0x00000000;
- if (on) {
- /* 0x11: 6bpc dynamic 2x2
- * 0x13: 8bpc dynamic 2x2
- * 0x19: 6bpc static 2x2
- * 0x1b: 8bpc static 2x2
- * 0x21: 6bpc temporal
- * 0x23: 8bpc temporal
- */
- mode = 0x00000011;
+ nv_connector = nouveau_crtc_connector_get(nv_crtc);
+ connector = &nv_connector->base;
+ if (nv_connector->dithering_mode == DITHERING_MODE_AUTO) {
+ if (nv_crtc->base.fb->depth > connector->display_info.bpc * 3)
+ mode = DITHERING_MODE_DYNAMIC2X2;
+ } else {
+ mode = nv_connector->dithering_mode;
+ }
+
+ if (nv_connector->dithering_depth == DITHERING_DEPTH_AUTO) {
+ if (connector->display_info.bpc >= 8)
+ mode |= DITHERING_DEPTH_8BPC;
+ } else {
+ mode |= nv_connector->dithering_depth;
}
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
evo_data(push, mode);
@@ -132,63 +390,98 @@ nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
return 0;
}
static int
-nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
+nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
{
- struct drm_display_mode *mode = &nv_crtc->base.mode;
+ struct drm_display_mode *omode, *umode = &nv_crtc->base.mode;
struct drm_device *dev = nv_crtc->base.dev;
+ struct drm_crtc *crtc = &nv_crtc->base;
struct nouveau_connector *nv_connector;
- u32 *push, outX, outY;
-
- outX = mode->hdisplay;
- outY = mode->vdisplay;
+ int mode = DRM_MODE_SCALE_NONE;
+ u32 oX, oY, *push;
+ /* start off at the resolution we programmed the crtc for, this
+ * effectively handles NONE/FULL scaling
+ */
nv_connector = nouveau_crtc_connector_get(nv_crtc);
- if (nv_connector && nv_connector->native_mode) {
- struct drm_display_mode *native = nv_connector->native_mode;
- u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
- u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
-
- switch (type) {
- case DRM_MODE_SCALE_ASPECT:
- if (xratio > yratio) {
- outX = (mode->hdisplay * yratio) >> 19;
- outY = (mode->vdisplay * yratio) >> 19;
- } else {
- outX = (mode->hdisplay * xratio) >> 19;
- outY = (mode->vdisplay * xratio) >> 19;
- }
- break;
- case DRM_MODE_SCALE_FULLSCREEN:
- outX = native->hdisplay;
- outY = native->vdisplay;
- break;
- default:
- break;
+ if (nv_connector && nv_connector->native_mode)
+ mode = nv_connector->scaling_mode;
+
+ if (mode != DRM_MODE_SCALE_NONE)
+ omode = nv_connector->native_mode;
+ else
+ omode = umode;
+
+ oX = omode->hdisplay;
+ oY = omode->vdisplay;
+ if (omode->flags & DRM_MODE_FLAG_DBLSCAN)
+ oY *= 2;
+
+ /* add overscan compensation if necessary, will keep the aspect
+ * ratio the same as the backend mode unless overridden by the
+ * user setting both hborder and vborder properties.
+ */
+ if (nv_connector && ( nv_connector->underscan == UNDERSCAN_ON ||
+ (nv_connector->underscan == UNDERSCAN_AUTO &&
+ nv_connector->edid &&
+ drm_detect_hdmi_monitor(nv_connector->edid)))) {
+ u32 bX = nv_connector->underscan_hborder;
+ u32 bY = nv_connector->underscan_vborder;
+ u32 aspect = (oY << 19) / oX;
+
+ if (bX) {
+ oX -= (bX * 2);
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ } else {
+ oX -= (oX >> 4) + 32;
+ if (bY) oY -= (bY * 2);
+ else oY = ((oX * aspect) + (aspect / 2)) >> 19;
+ }
+ }
+
+ /* handle CENTER/ASPECT scaling, taking into account the areas
+ * removed already for overscan compensation
+ */
+ switch (mode) {
+ case DRM_MODE_SCALE_CENTER:
+ oX = min((u32)umode->hdisplay, oX);
+ oY = min((u32)umode->vdisplay, oY);
+ /* fall-through */
+ case DRM_MODE_SCALE_ASPECT:
+ if (oY < oX) {
+ u32 aspect = (umode->hdisplay << 19) / umode->vdisplay;
+ oX = ((oY * aspect) + (aspect / 2)) >> 19;
+ } else {
+ u32 aspect = (umode->vdisplay << 19) / umode->hdisplay;
+ oY = ((oX * aspect) + (aspect / 2)) >> 19;
}
+ break;
+ default:
+ break;
}
- push = evo_wait(dev, 0, 16);
+ push = evo_wait(dev, EVO_MASTER, 8);
if (push) {
evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
- evo_data(push, (outY << 16) | outX);
- evo_data(push, (outY << 16) | outX);
- evo_data(push, (outY << 16) | outX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
+ evo_data(push, (oY << 16) | oX);
evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
- evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
+ evo_data(push, (umode->vdisplay << 16) | umode->hdisplay);
+ evo_kick(push, dev, EVO_MASTER);
if (update) {
- evo_mthd(push, 0x0080, 1);
- evo_data(push, 0x00000000);
+ nvd0_display_flip_stop(crtc);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
}
- evo_kick(push, dev, 0);
}
return 0;
@@ -201,7 +494,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
u32 *push;
- push = evo_wait(fb->dev, 0, 16);
+ push = evo_wait(fb->dev, EVO_MASTER, 16);
if (push) {
evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
evo_data(push, nvfb->nvbo->bo.offset >> 8);
@@ -216,7 +509,7 @@ nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
}
- evo_kick(push, fb->dev, 0);
+ evo_kick(push, fb->dev, EVO_MASTER);
}
nv_crtc->fb.tile_flags = nvfb->r_dma;
@@ -227,7 +520,7 @@ static void
nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
{
struct drm_device *dev = nv_crtc->base.dev;
- u32 *push = evo_wait(dev, 0, 16);
+ u32 *push = evo_wait(dev, EVO_MASTER, 16);
if (push) {
if (show) {
evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
@@ -247,7 +540,7 @@ nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
evo_data(push, 0x00000000);
}
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
}
@@ -262,7 +555,9 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 *push;
- push = evo_wait(crtc->dev, 0, 2);
+ nvd0_display_flip_stop(crtc);
+
+ push = evo_wait(crtc->dev, EVO_MASTER, 2);
if (push) {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
@@ -270,7 +565,7 @@ nvd0_crtc_prepare(struct drm_crtc *crtc)
evo_data(push, 0x03000000);
evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000);
- evo_kick(push, crtc->dev, 0);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
nvd0_crtc_cursor_show(nv_crtc, false, false);
@@ -282,7 +577,7 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
u32 *push;
- push = evo_wait(crtc->dev, 0, 32);
+ push = evo_wait(crtc->dev, EVO_MASTER, 32);
if (push) {
evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
evo_data(push, nv_crtc->fb.tile_flags);
@@ -295,10 +590,11 @@ nvd0_crtc_commit(struct drm_crtc *crtc)
evo_data(push, NvEvoVRAM);
evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
evo_data(push, 0xffffff00);
- evo_kick(push, crtc->dev, 0);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
- nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
+ nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, false);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
}
static bool
@@ -333,21 +629,35 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
struct nouveau_connector *nv_connector;
- u32 htotal = mode->htotal;
- u32 vtotal = mode->vtotal;
- u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
- u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
- u32 hfrntp = mode->hsync_start - mode->hdisplay;
- u32 vfrntp = mode->vsync_start - mode->vdisplay;
- u32 hbackp = mode->htotal - mode->hsync_end;
- u32 vbackp = mode->vtotal - mode->vsync_end;
- u32 hss2be = hsyncw + hbackp;
- u32 vss2be = vsyncw + vbackp;
- u32 hss2de = htotal - hfrntp;
- u32 vss2de = vtotal - vfrntp;
+ u32 ilace = (mode->flags & DRM_MODE_FLAG_INTERLACE) ? 2 : 1;
+ u32 vscan = (mode->flags & DRM_MODE_FLAG_DBLSCAN) ? 2 : 1;
+ u32 hactive, hsynce, hbackp, hfrontp, hblanke, hblanks;
+ u32 vactive, vsynce, vbackp, vfrontp, vblanke, vblanks;
+ u32 vblan2e = 0, vblan2s = 1;
+ u32 magic = 0x31ec6000;
u32 syncs, *push;
int ret;
+ hactive = mode->htotal;
+ hsynce = mode->hsync_end - mode->hsync_start - 1;
+ hbackp = mode->htotal - mode->hsync_end;
+ hblanke = hsynce + hbackp;
+ hfrontp = mode->hsync_start - mode->hdisplay;
+ hblanks = mode->htotal - hfrontp - 1;
+
+ vactive = mode->vtotal * vscan / ilace;
+ vsynce = ((mode->vsync_end - mode->vsync_start) * vscan / ilace) - 1;
+ vbackp = (mode->vtotal - mode->vsync_end) * vscan / ilace;
+ vblanke = vsynce + vbackp;
+ vfrontp = (mode->vsync_start - mode->vdisplay) * vscan / ilace;
+ vblanks = vactive - vfrontp - 1;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ vblan2e = vactive + vsynce + vbackp;
+ vblan2s = vblan2e + (mode->vdisplay * vscan / ilace);
+ vactive = (vactive * 2) + 1;
+ magic |= 0x00000001;
+ }
+
syncs = 0x00000001;
if (mode->flags & DRM_MODE_FLAG_NHSYNC)
syncs |= 0x00000008;
@@ -358,28 +668,33 @@ nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
if (ret)
return ret;
- push = evo_wait(crtc->dev, 0, 64);
+ push = evo_wait(crtc->dev, EVO_MASTER, 64);
if (push) {
- evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
+ evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 6);
evo_data(push, 0x00000000);
- evo_data(push, (vtotal << 16) | htotal);
- evo_data(push, (vsyncw << 16) | hsyncw);
- evo_data(push, (vss2be << 16) | hss2be);
- evo_data(push, (vss2de << 16) | hss2de);
+ evo_data(push, (vactive << 16) | hactive);
+ evo_data(push, ( vsynce << 16) | hsynce);
+ evo_data(push, (vblanke << 16) | hblanke);
+ evo_data(push, (vblanks << 16) | hblanks);
+ evo_data(push, (vblan2e << 16) | vblan2s);
evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
evo_data(push, 0x00000000); /* ??? */
evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
evo_data(push, mode->clock * 1000);
evo_data(push, 0x00200000); /* ??? */
evo_data(push, mode->clock * 1000);
- evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
+ evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 2);
evo_data(push, syncs);
- evo_kick(push, crtc->dev, 0);
+ evo_data(push, magic);
+ evo_mthd(push, 0x04d0 + (nv_crtc->index * 0x300), 2);
+ evo_data(push, 0x00000311);
+ evo_data(push, 0x00000100);
+ evo_kick(push, crtc->dev, EVO_MASTER);
}
nv_connector = nouveau_crtc_connector_get(nv_crtc);
- nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
- nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
+ nvd0_crtc_set_dither(nv_crtc, false);
+ nvd0_crtc_set_scale(nv_crtc, false);
nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
return 0;
}
@@ -400,7 +715,9 @@ nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
if (ret)
return ret;
+ nvd0_display_flip_stop(crtc);
nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
+ nvd0_display_flip_next(crtc, crtc->fb, NULL, 1);
return 0;
}
@@ -410,6 +727,7 @@ nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
enum mode_set_atomic state)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ nvd0_display_flip_stop(crtc);
nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
return 0;
}
@@ -472,10 +790,10 @@ static int
nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
- const u32 data = (y << 16) | x;
+ int ch = EVO_CURS(nv_crtc->index);
- nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
- nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
+ evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x);
+ evo_piow(crtc->dev, ch, 0x0080, 0x00000000);
return 0;
}
@@ -525,6 +843,7 @@ static const struct drm_crtc_funcs nvd0_crtc_func = {
.gamma_set = nvd0_crtc_gamma_set,
.set_config = drm_crtc_helper_set_config,
.destroy = nvd0_crtc_destroy,
+ .page_flip = nouveau_crtc_page_flip,
};
static void
@@ -659,12 +978,12 @@ nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(encoder->dev, 0, 4);
+ push = evo_wait(encoder->dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
evo_data(push, 1 << nv_crtc->index);
evo_data(push, 0x00ff);
- evo_kick(push, encoder->dev, 0);
+ evo_kick(push, encoder->dev, EVO_MASTER);
}
nv_encoder->crtc = encoder->crtc;
@@ -680,13 +999,13 @@ nvd0_dac_disconnect(struct drm_encoder *encoder)
if (nv_encoder->crtc) {
nvd0_crtc_prepare(nv_encoder->crtc);
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
nv_encoder->crtc = NULL;
@@ -760,6 +1079,108 @@ nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
}
/******************************************************************************
+ * Audio
+ *****************************************************************************/
+static void
+nvd0_audio_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ int i, or = nv_encoder->or * 0x30;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_monitor_audio(nv_connector->edid))
+ return;
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000001);
+
+ drm_edid_to_eld(&nv_connector->base, nv_connector->edid);
+ if (nv_connector->base.eld[0]) {
+ u8 *eld = nv_connector->base.eld;
+
+ for (i = 0; i < eld[2] * 4; i++)
+ nv_wr32(dev, 0x10ec00 + or, (i << 8) | eld[i]);
+ for (i = eld[2] * 4; i < 0x60; i++)
+ nv_wr32(dev, 0x10ec00 + or, (i << 8) | 0x00);
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000002, 0x80000002);
+ }
+}
+
+static void
+nvd0_audio_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ int or = nv_encoder->or * 0x30;
+
+ nv_mask(dev, 0x10ec10 + or, 0x80000003, 0x80000000);
+}
+
+/******************************************************************************
+ * HDMI
+ *****************************************************************************/
+static void
+nvd0_hdmi_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
+ struct nouveau_connector *nv_connector;
+ struct drm_device *dev = encoder->dev;
+ int head = nv_crtc->index * 0x800;
+ u32 rekey = 56; /* binary driver, and tegra constant */
+ u32 max_ac_packet;
+
+ nv_connector = nouveau_encoder_connector_get(nv_encoder);
+ if (!drm_detect_hdmi_monitor(nv_connector->edid))
+ return;
+
+ max_ac_packet = mode->htotal - mode->hdisplay;
+ max_ac_packet -= rekey;
+ max_ac_packet -= 18; /* constant from tegra */
+ max_ac_packet /= 32;
+
+ /* AVI InfoFrame */
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x61671c + head, 0x000d0282);
+ nv_wr32(dev, 0x616720 + head, 0x0000006f);
+ nv_wr32(dev, 0x616724 + head, 0x00000000);
+ nv_wr32(dev, 0x616728 + head, 0x00000000);
+ nv_wr32(dev, 0x61672c + head, 0x00000000);
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000001);
+
+ /* ??? InfoFrame? */
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x6167ac + head, 0x00000010);
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000001);
+
+ /* HDMI_CTRL */
+ nv_mask(dev, 0x616798 + head, 0x401f007f, 0x40000000 | rekey |
+ max_ac_packet << 16);
+
+ /* NFI, audio doesn't work without it though.. */
+ nv_mask(dev, 0x616548 + head, 0x00000070, 0x00000000);
+
+ nvd0_audio_mode_set(encoder, mode);
+}
+
+static void
+nvd0_hdmi_disconnect(struct drm_encoder *encoder)
+{
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(nv_encoder->crtc);
+ struct drm_device *dev = encoder->dev;
+ int head = nv_crtc->index * 0x800;
+
+ nvd0_audio_disconnect(encoder);
+
+ nv_mask(dev, 0x616798 + head, 0x40000000, 0x00000000);
+ nv_mask(dev, 0x6167a4 + head, 0x00000001, 0x00000000);
+ nv_mask(dev, 0x616714 + head, 0x00000001, 0x00000000);
+}
+
+/******************************************************************************
* SOR
*****************************************************************************/
static void
@@ -829,7 +1250,8 @@ static void
nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
struct drm_display_mode *mode)
{
- struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
+ struct drm_device *dev = encoder->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
struct nouveau_connector *nv_connector;
@@ -852,6 +1274,8 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
or_config = (mode_ctrl & 0x00000f00) >> 8;
if (mode->clock >= 165000)
or_config |= 0x0100;
+
+ nvd0_hdmi_mode_set(encoder, mode);
break;
case OUTPUT_LVDS:
or_config = (mode_ctrl & 0x00000f00) >> 8;
@@ -861,7 +1285,7 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
if (bios->fp.if_is_24bit)
or_config |= 0x0200;
} else {
- if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
+ if (nv_connector->type == DCB_CONNECTOR_LVDS_SPWG) {
if (((u8 *)nv_connector->edid)[121] == 2)
or_config |= 0x0100;
} else
@@ -889,12 +1313,12 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
- push = evo_wait(encoder->dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
evo_data(push, mode_ctrl);
evo_data(push, or_config);
- evo_kick(push, encoder->dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
nv_encoder->crtc = encoder->crtc;
@@ -910,15 +1334,17 @@ nvd0_sor_disconnect(struct drm_encoder *encoder)
if (nv_encoder->crtc) {
nvd0_crtc_prepare(nv_encoder->crtc);
- push = evo_wait(dev, 0, 4);
+ push = evo_wait(dev, EVO_MASTER, 4);
if (push) {
evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
evo_data(push, 0x00000000);
evo_mthd(push, 0x0080, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
}
+ nvd0_hdmi_disconnect(encoder);
+
nv_encoder->crtc = NULL;
nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
}
@@ -1159,6 +1585,12 @@ nvd0_display_intr(struct drm_device *dev)
struct nvd0_display *disp = nvd0_display(dev);
u32 intr = nv_rd32(dev, 0x610088);
+ if (intr & 0x00000001) {
+ u32 stat = nv_rd32(dev, 0x61008c);
+ nv_wr32(dev, 0x61008c, stat);
+ intr &= ~0x00000001;
+ }
+
if (intr & 0x00000002) {
u32 stat = nv_rd32(dev, 0x61009c);
int chid = ffs(stat) - 1;
@@ -1215,38 +1647,29 @@ nvd0_display_intr(struct drm_device *dev)
/******************************************************************************
* Init
*****************************************************************************/
-static void
+void
nvd0_display_fini(struct drm_device *dev)
{
int i;
- /* fini cursors */
- for (i = 14; i >= 13; i--) {
- if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
- continue;
-
- nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
- nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
- nv_mask(dev, 0x610090, 1 << i, 0x00000000);
- nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
+ /* fini cursors + overlays + flips */
+ for (i = 1; i >= 0; i--) {
+ evo_fini_pio(dev, EVO_CURS(i));
+ evo_fini_pio(dev, EVO_OIMM(i));
+ evo_fini_dma(dev, EVO_OVLY(i));
+ evo_fini_dma(dev, EVO_FLIP(i));
}
/* fini master */
- if (nv_rd32(dev, 0x610490) & 0x00000010) {
- nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
- nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
- nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
- nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
- nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
- }
+ evo_fini_dma(dev, EVO_MASTER);
}
int
nvd0_display_init(struct drm_device *dev)
{
struct nvd0_display *disp = nvd0_display(dev);
+ int ret, i;
u32 *push;
- int i;
if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
nv_wr32(dev, 0x6100ac, 0x00000100);
@@ -1271,7 +1694,7 @@ nvd0_display_init(struct drm_device *dev)
nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
}
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
@@ -1285,36 +1708,24 @@ nvd0_display_init(struct drm_device *dev)
nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
/* init master */
- nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
- nv_wr32(dev, 0x610498, 0x00010000);
- nv_wr32(dev, 0x61049c, 0x00000001);
- nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
- nv_wr32(dev, 0x640000, 0x00000000);
- nv_wr32(dev, 0x610490, 0x01000013);
- if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
- NV_ERROR(dev, "PDISP: master 0x%08x\n",
- nv_rd32(dev, 0x610490));
- return -EBUSY;
+ ret = evo_init_dma(dev, EVO_MASTER);
+ if (ret)
+ goto error;
+
+ /* init flips + overlays + cursors */
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ if ((ret = evo_init_dma(dev, EVO_FLIP(i))) ||
+ (ret = evo_init_dma(dev, EVO_OVLY(i))) ||
+ (ret = evo_init_pio(dev, EVO_OIMM(i))) ||
+ (ret = evo_init_pio(dev, EVO_CURS(i))))
+ goto error;
}
- nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
- nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
- /* init cursors */
- for (i = 13; i <= 14; i++) {
- nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
- if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
- NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
- nv_rd32(dev, 0x610490 + (i * 0x10)));
- return -EBUSY;
- }
-
- nv_mask(dev, 0x610090, 1 << i, 1 << i);
- nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
+ push = evo_wait(dev, EVO_MASTER, 32);
+ if (!push) {
+ ret = -EBUSY;
+ goto error;
}
-
- push = evo_wait(dev, 0, 32);
- if (!push)
- return -EBUSY;
evo_mthd(push, 0x0088, 1);
evo_data(push, NvEvoSync);
evo_mthd(push, 0x0084, 1);
@@ -1323,9 +1734,12 @@ nvd0_display_init(struct drm_device *dev)
evo_data(push, 0x80000000);
evo_mthd(push, 0x008c, 1);
evo_data(push, 0x00000000);
- evo_kick(push, dev, 0);
+ evo_kick(push, dev, EVO_MASTER);
- return 0;
+error:
+ if (ret)
+ nvd0_display_fini(dev);
+ return ret;
}
void
@@ -1334,11 +1748,16 @@ nvd0_display_destroy(struct drm_device *dev)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvd0_display *disp = nvd0_display(dev);
struct pci_dev *pdev = dev->pdev;
+ int i;
- nvd0_display_fini(dev);
+ for (i = 0; i < EVO_DMA_NR; i++) {
+ struct evo *evo = &disp->evo[i];
+ pci_free_consistent(pdev, PAGE_SIZE, evo->ptr, evo->handle);
+ }
- pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
nouveau_gpuobj_ref(NULL, &disp->mem);
+ nouveau_bo_unmap(disp->sync);
+ nouveau_bo_ref(NULL, &disp->sync);
nouveau_irq_unregister(dev, 26);
dev_priv->engine.display.priv = NULL;
@@ -1410,61 +1829,83 @@ nvd0_display_create(struct drm_device *dev)
tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
nouveau_irq_register(dev, 26, nvd0_display_intr);
+ /* small shared memory area we use for notifiers and semaphores */
+ ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
+ 0, 0x0000, &disp->sync);
+ if (!ret) {
+ ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(disp->sync);
+ if (ret)
+ nouveau_bo_ref(NULL, &disp->sync);
+ }
+
+ if (ret)
+ goto out;
+
/* hash table and dma objects for the memory areas we care about */
ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
if (ret)
goto out;
- nv_wo32(disp->mem, 0x1000, 0x00000049);
- nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
- nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
- nv_wo32(disp->mem, 0x100c, 0x00000000);
- nv_wo32(disp->mem, 0x1010, 0x00000000);
- nv_wo32(disp->mem, 0x1014, 0x00000000);
- nv_wo32(disp->mem, 0x0000, NvEvoSync);
- nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1020, 0x00000049);
- nv_wo32(disp->mem, 0x1024, 0x00000000);
- nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x102c, 0x00000000);
- nv_wo32(disp->mem, 0x1030, 0x00000000);
- nv_wo32(disp->mem, 0x1034, 0x00000000);
- nv_wo32(disp->mem, 0x0008, NvEvoVRAM);
- nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1040, 0x00000009);
- nv_wo32(disp->mem, 0x1044, 0x00000000);
- nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x104c, 0x00000000);
- nv_wo32(disp->mem, 0x1050, 0x00000000);
- nv_wo32(disp->mem, 0x1054, 0x00000000);
- nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
- nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
-
- nv_wo32(disp->mem, 0x1060, 0x0fe00009);
- nv_wo32(disp->mem, 0x1064, 0x00000000);
- nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
- nv_wo32(disp->mem, 0x106c, 0x00000000);
- nv_wo32(disp->mem, 0x1070, 0x00000000);
- nv_wo32(disp->mem, 0x1074, 0x00000000);
- nv_wo32(disp->mem, 0x0018, NvEvoFB32);
- nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
-
- pinstmem->flush(dev);
+ /* create evo dma channels */
+ for (i = 0; i < EVO_DMA_NR; i++) {
+ struct evo *evo = &disp->evo[i];
+ u64 offset = disp->sync->bo.offset;
+ u32 dmao = 0x1000 + (i * 0x100);
+ u32 hash = 0x0000 + (i * 0x040);
+
+ evo->idx = i;
+ evo->sem.offset = EVO_SYNC(evo->idx, 0x00);
+ evo->ptr = pci_alloc_consistent(pdev, PAGE_SIZE, &evo->handle);
+ if (!evo->ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
- /* push buffers for evo channels */
- disp->evo[0].ptr =
- pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
- if (!disp->evo[0].ptr) {
- ret = -ENOMEM;
- goto out;
+ nv_wo32(disp->mem, dmao + 0x00, 0x00000049);
+ nv_wo32(disp->mem, dmao + 0x04, (offset + 0x0000) >> 8);
+ nv_wo32(disp->mem, dmao + 0x08, (offset + 0x0fff) >> 8);
+ nv_wo32(disp->mem, dmao + 0x0c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x10, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x14, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x00, NvEvoSync);
+ nv_wo32(disp->mem, hash + 0x04, 0x00000001 | (i << 27) |
+ ((dmao + 0x00) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x20, 0x00000049);
+ nv_wo32(disp->mem, dmao + 0x24, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x28, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x2c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x30, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x34, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x08, NvEvoVRAM);
+ nv_wo32(disp->mem, hash + 0x0c, 0x00000001 | (i << 27) |
+ ((dmao + 0x20) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x40, 0x00000009);
+ nv_wo32(disp->mem, dmao + 0x44, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x48, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x4c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x50, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x54, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x10, NvEvoVRAM_LP);
+ nv_wo32(disp->mem, hash + 0x14, 0x00000001 | (i << 27) |
+ ((dmao + 0x40) << 9));
+
+ nv_wo32(disp->mem, dmao + 0x60, 0x0fe00009);
+ nv_wo32(disp->mem, dmao + 0x64, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x68, (dev_priv->vram_size - 1) >> 8);
+ nv_wo32(disp->mem, dmao + 0x6c, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x70, 0x00000000);
+ nv_wo32(disp->mem, dmao + 0x74, 0x00000000);
+ nv_wo32(disp->mem, hash + 0x18, NvEvoFB32);
+ nv_wo32(disp->mem, hash + 0x1c, 0x00000001 | (i << 27) |
+ ((dmao + 0x60) << 9));
}
- ret = nvd0_display_init(dev);
- if (ret)
- goto out;
+ pinstmem->flush(dev);
out:
if (ret)
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index 4c8796b..6a5f439 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -42,6 +42,20 @@ static struct pci_device_id pciidlist[] = {
r128_PCI_IDS
};
+static const struct file_operations r128_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = r128_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -60,21 +74,7 @@ static struct drm_driver driver = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = r128_ioctls,
.dma_ioctl = r128_cce_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = r128_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
-
+ .fops = &r128_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index cf8b4bc..2139fe8 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -70,7 +70,8 @@ radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
evergreen.o evergreen_cs.o evergreen_blit_shaders.o evergreen_blit_kms.o \
- radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o
+ radeon_trace_points.o ni.o cayman_blit_shaders.o atombios_encoders.o \
+ radeon_semaphore.o radeon_sa.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
@@ -78,4 +79,4 @@ radeon-$(CONFIG_ACPI) += radeon_acpi.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
-CFLAGS_radeon_trace_points.o := -I$(src) \ No newline at end of file
+CFLAGS_radeon_trace_points.o := -I$(src)
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 14cc88a..d1bd239 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -665,6 +665,8 @@ static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg)
SDEBUG(" count: %d\n", count);
if (arg == ATOM_UNIT_MICROSEC)
udelay(count);
+ else if (!drm_can_sleep())
+ mdelay(count);
else
msleep(count);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 2b97262..742f17f 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -355,15 +355,12 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
-static void atombios_disable_ss(struct drm_crtc *crtc)
+static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
{
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
u32 ss_cntl;
if (ASIC_IS_DCE4(rdev)) {
- switch (radeon_crtc->pll_id) {
+ switch (pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
@@ -379,7 +376,7 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
return;
}
} else if (ASIC_IS_AVIVO(rdev)) {
- switch (radeon_crtc->pll_id) {
+ switch (pll_id) {
case ATOM_PPLL1:
ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
ss_cntl &= ~1;
@@ -406,13 +403,11 @@ union atom_enable_ss {
ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
};
-static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+static void atombios_crtc_program_ss(struct radeon_device *rdev,
int enable,
int pll_id,
struct radeon_atom_ss *ss)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
union atom_enable_ss args;
@@ -479,7 +474,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
} else if (ASIC_IS_AVIVO(rdev)) {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
- atombios_disable_ss(crtc);
+ atombios_disable_ss(rdev, pll_id);
return;
}
args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
@@ -491,7 +486,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
} else {
if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
(ss->type & ATOM_EXTERNAL_SS_MASK)) {
- atombios_disable_ss(crtc);
+ atombios_disable_ss(rdev, pll_id);
return;
}
args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
@@ -523,6 +518,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
int encoder_mode = 0;
u32 dp_clock = mode->clock;
int bpc = 8;
+ bool is_duallink = false;
/* reset the pll flags */
pll->flags = 0;
@@ -554,9 +550,10 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
connector = radeon_get_connector_for_encoder(encoder);
- if (connector)
+ if (connector && connector->display_info.bpc)
bpc = connector->display_info.bpc;
encoder_mode = atombios_get_encoder_mode(encoder);
+ is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
(radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
if (connector) {
@@ -652,7 +649,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
if (dig->coherent_mode)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_COHERENT_MODE;
- if (mode->clock > 165000)
+ if (is_duallink)
args.v3.sInput.ucDispPllConfig |=
DISPPLL_CONFIG_DUAL_LINK;
}
@@ -702,11 +699,9 @@ union set_pixel_clock {
/* on DCE5, make sure the voltage is high enough to support the
* required disp clk.
*/
-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+static void atombios_crtc_set_dcpll(struct radeon_device *rdev,
u32 dispclk)
{
- struct drm_device *dev = crtc->dev;
- struct radeon_device *rdev = dev->dev_private;
u8 frev, crev;
int index;
union set_pixel_clock args;
@@ -996,7 +991,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
&ref_div, &post_div);
- atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1019,7 +1014,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
ss.step = step_size;
}
- atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+ atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
}
}
@@ -1184,12 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
- crtc->mode.vdisplay);
+ target_fb->height);
x &= ~3;
y &= ~1;
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1353,12 +1348,12 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
WREG32(AVIVO_D1GRPH_X_END + radeon_crtc->crtc_offset, target_fb->width);
WREG32(AVIVO_D1GRPH_Y_END + radeon_crtc->crtc_offset, target_fb->height);
- fb_pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ fb_pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
WREG32(AVIVO_D1GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
- crtc->mode.vdisplay);
+ target_fb->height);
x &= ~3;
y &= ~1;
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1494,6 +1489,24 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
}
+void radeon_atom_dcpll_init(struct radeon_device *rdev)
+{
+ /* always set DCPLL */
+ if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_atom_ss ss;
+ bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+ ASIC_INTERNAL_SS_ON_DCPLL,
+ rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
+ /* XXX: DCE5, make sure voltage, dispclk is high enough */
+ atombios_crtc_set_dcpll(rdev, rdev->clock.default_dispclk);
+ if (ss_enabled)
+ atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
+ }
+
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -1515,19 +1528,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
}
}
- /* always set DCPLL */
- if (ASIC_IS_DCE4(rdev)) {
- struct radeon_atom_ss ss;
- bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
- ASIC_INTERNAL_SS_ON_DCPLL,
- rdev->clock.default_dispclk);
- if (ss_enabled)
- atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
- /* XXX: DCE5, make sure voltage, dispclk is high enough */
- atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
- if (ss_enabled)
- atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
- }
atombios_crtc_set_pll(crtc, adjusted_mode);
if (ASIC_IS_DCE4(rdev))
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 6fb335a..552b436 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -549,8 +549,8 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
return false;
}
-static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
- struct drm_connector *connector)
+int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ struct drm_connector *connector)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
@@ -558,28 +558,33 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
if (!ASIC_IS_DCE4(rdev))
- return;
+ return panel_mode;
if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
ENCODER_OBJECT_ID_NUTMEG)
panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
- ENCODER_OBJECT_ID_TRAVIS)
- panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
- else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ ENCODER_OBJECT_ID_TRAVIS) {
+ u8 id[6];
+ int i;
+ for (i = 0; i < 6; i++)
+ id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
+ if (id[0] == 0x73 &&
+ id[1] == 0x69 &&
+ id[2] == 0x76 &&
+ id[3] == 0x61 &&
+ id[4] == 0x72 &&
+ id[5] == 0x54)
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+ else
+ panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+ } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
if (tmp & 1)
panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
}
- atombios_dig_encoder_setup(encoder,
- ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
- panel_mode);
-
- if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
- (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
- radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
- }
+ return panel_mode;
}
void radeon_dp_set_link_config(struct drm_connector *connector,
@@ -717,6 +722,8 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
u8 tmp;
/* power up the sink */
@@ -732,7 +739,10 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
radeon_write_dpcd_reg(dp_info->radeon_connector,
DP_DOWNSPREAD_CTRL, 0);
- radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
+ if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+ (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+ radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+ }
/* set the lane count on the sink */
tmp = dp_info->dp_lane_count;
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 39c04c1..b88c460 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -57,22 +57,6 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
}
}
-static struct drm_connector *
-radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- radeon_connector = to_radeon_connector(connector);
- if (radeon_encoder->devices & radeon_connector->devices)
- return connector;
- }
- return NULL;
-}
-
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -253,7 +237,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
/* R4xx, R5xx */
args.ext_tmds.sXTmdsEncoder.ucEnable = action;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
@@ -265,7 +249,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
/* DFP1, CRT1, TV1 depending on the type of port */
args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
break;
case 3:
@@ -349,7 +333,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
} else {
if (dig->linkb)
args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
/*if (pScrn->rgbBits == 8) */
args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
@@ -388,7 +372,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
} else {
if (dig->linkb)
args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
}
break;
@@ -409,8 +393,6 @@ int
atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
struct radeon_connector_atom_dig *dig_connector;
@@ -434,13 +416,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
switch (connector->connector_type) {
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else if (radeon_connector->use_digital)
+ if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else if (radeon_connector->use_digital)
return ATOM_ENCODER_MODE_DVI;
else
return ATOM_ENCODER_MODE_CRT;
@@ -448,13 +427,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
case DRM_MODE_CONNECTOR_DVID:
case DRM_MODE_CONNECTOR_HDMIA:
default:
- if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_LVDS:
@@ -465,13 +441,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
(dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
- else if (drm_detect_monitor_audio(radeon_connector->edid) && radeon_audio) {
- /* fix me */
- if (ASIC_IS_DCE4(rdev))
- return ATOM_ENCODER_MODE_DVI;
- else
- return ATOM_ENCODER_MODE_HDMI;
- } else
+ else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
+ radeon_audio)
+ return ATOM_ENCODER_MODE_HDMI;
+ else
return ATOM_ENCODER_MODE_DVI;
break;
case DRM_MODE_CONNECTOR_eDP:
@@ -598,7 +571,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
args.v1.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.ucLaneNum = 8;
else
args.v1.ucLaneNum = 4;
@@ -633,7 +606,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
args.v3.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
@@ -673,7 +646,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
args.v4.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.ucLaneNum = 8;
else
args.v4.ucLaneNum = 4;
@@ -817,7 +790,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if (is_dp)
args.v1.usPixelClock =
cpu_to_le16(dp_clock / 10);
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -832,7 +805,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if ((rdev->flags & RADEON_IS_IGP) &&
(radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
- if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+ if (is_dp ||
+ !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
if (igp_lane_info & 0x1)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
else if (igp_lane_info & 0x2)
@@ -859,7 +833,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
break;
@@ -874,7 +848,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if (is_dp)
args.v2.usPixelClock =
cpu_to_le16(dp_clock / 10);
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -902,7 +876,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
} else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v2.acConfig.fCoherentMode = 1;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v2.acConfig.fDualLinkConnector = 1;
}
break;
@@ -917,7 +891,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if (is_dp)
args.v3.usPixelClock =
cpu_to_le16(dp_clock / 10);
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -925,7 +899,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if (is_dp)
args.v3.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.ucLaneNum = 8;
else
args.v3.ucLaneNum = 4;
@@ -962,7 +936,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v3.acConfig.fCoherentMode = 1;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.acConfig.fDualLinkConnector = 1;
}
break;
@@ -977,7 +951,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if (is_dp)
args.v4.usPixelClock =
cpu_to_le16(dp_clock / 10);
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
else
args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -985,7 +959,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
if (is_dp)
args.v4.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.ucLaneNum = 8;
else
args.v4.ucLaneNum = 4;
@@ -1025,7 +999,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v4.acConfig.fCoherentMode = 1;
- if (radeon_encoder->pixel_clock > 165000)
+ if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v4.acConfig.fDualLinkConnector = 1;
}
break;
@@ -1148,7 +1122,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
if (dp_clock == 270000)
args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
+ } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v1.sDigEncoder.ucLaneNum = 8;
else
args.v1.sDigEncoder.ucLaneNum = 4;
@@ -1167,7 +1141,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
else if (dp_clock == 540000)
args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
+ } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v3.sExtEncoder.ucLaneNum = 8;
else
args.v3.sExtEncoder.ucLaneNum = 4;
@@ -1352,7 +1326,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
/* some early dce3.2 boards have a bug in their transmitter control table */
- if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
+ ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
else
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
@@ -1362,8 +1337,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
ATOM_TRANSMITTER_ACTION_POWER_ON);
radeon_dig_connector->edp_on = true;
}
- if (ASIC_IS_DCE4(rdev))
- atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
radeon_dp_link_train(encoder, connector);
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
@@ -1374,7 +1347,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ else
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1821,7 +1797,21 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (!connector)
+ dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+ else
+ dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+ atombios_dig_encoder_setup(encoder,
+ ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+ dig->panel_mode);
+ } else if (ASIC_IS_DCE4(rdev)) {
/* disable the transmitter */
atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
/* setup and enable the encoder */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 92c9628..f58254a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -40,6 +40,8 @@
static void evergreen_gpu_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
void evergreen_pcie_gen2_enable(struct radeon_device *rdev);
+extern void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl);
void evergreen_fix_pci_max_read_req_size(struct radeon_device *rdev)
{
@@ -1311,18 +1313,20 @@ void evergreen_mc_program(struct radeon_device *rdev)
*/
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
/* set to DX10/11 mode */
- radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
/* FIXME: implement */
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(rdev, ib->length_dw);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
}
@@ -1360,71 +1364,73 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
static int evergreen_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.evergreen.max_hw_contexts - 1);
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.evergreen.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(CP_ME_CNTL, cp_me);
- r = radeon_ring_lock(rdev, evergreen_default_size + 19);
+ r = radeon_ring_lock(rdev, ring, evergreen_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < evergreen_default_size; i++)
- radeon_ring_write(rdev, evergreen_default_state[i]);
+ radeon_ring_write(ring, evergreen_default_state[i]);
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* Clear consts */
- radeon_ring_write(rdev, 0xc0036f00);
- radeon_ring_write(rdev, 0x00000bc4);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(rdev, 0xc0026900);
- radeon_ring_write(rdev, 0x00000316);
- radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(rdev, 0x00000010); /* */
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int evergreen_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1442,13 +1448,14 @@ int evergreen_cp_resume(struct radeon_device *rdev)
RREG32(GRBM_SOFT_RESET);
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1456,8 +1463,8 @@ int evergreen_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -1475,16 +1482,16 @@ int evergreen_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ ring->rptr = RREG32(CP_RB_RPTR);
evergreen_cp_start(rdev);
- rdev->cp.ready = true;
- r = radeon_ring_test(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, ring);
if (r) {
- rdev->cp.ready = false;
+ ring->ready = false;
return r;
}
return 0;
@@ -2353,7 +2360,7 @@ int evergreen_mc_init(struct radeon_device *rdev)
return 0;
}
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -2366,19 +2373,19 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
@@ -2470,7 +2477,13 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
{
u32 tmp;
- WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0,
+ CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
+ cayman_cp_int_cntl_setup(rdev, 1, 0);
+ cayman_cp_int_cntl_setup(rdev, 2, 0);
+ } else
+ WREG32(CP_INT_CNTL, CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE);
WREG32(GRBM_INT_CNTL, 0);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
@@ -2515,6 +2528,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev)
int evergreen_irq_set(struct radeon_device *rdev)
{
u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
+ u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6;
u32 grbm_int_cntl = 0;
@@ -2539,11 +2553,28 @@ int evergreen_irq_set(struct radeon_device *rdev)
hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
- if (rdev->irq.sw_int) {
- DRM_DEBUG("evergreen_irq_set: sw int\n");
- cp_int_cntl |= RB_INT_ENABLE;
- cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ if (rdev->family >= CHIP_CAYMAN) {
+ /* enable CP interrupts on all rings */
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
+ if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP1_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp1\n");
+ cp_int_cntl1 |= TIME_STAMP_INT_ENABLE;
+ }
+ if (rdev->irq.sw_int[CAYMAN_RING_TYPE_CP2_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int cp2\n");
+ cp_int_cntl2 |= TIME_STAMP_INT_ENABLE;
+ }
+ } else {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
+ DRM_DEBUG("evergreen_irq_set: sw int gfx\n");
+ cp_int_cntl |= RB_INT_ENABLE;
+ cp_int_cntl |= TIME_STAMP_INT_ENABLE;
+ }
}
+
if (rdev->irq.crtc_vblank_int[0] ||
rdev->irq.pflip[0]) {
DRM_DEBUG("evergreen_irq_set: vblank 0\n");
@@ -2603,7 +2634,12 @@ int evergreen_irq_set(struct radeon_device *rdev)
grbm_int_cntl |= GUI_IDLE_INT_ENABLE;
}
- WREG32(CP_INT_CNTL, cp_int_cntl);
+ if (rdev->family >= CHIP_CAYMAN) {
+ cayman_cp_int_cntl_setup(rdev, 0, cp_int_cntl);
+ cayman_cp_int_cntl_setup(rdev, 1, cp_int_cntl1);
+ cayman_cp_int_cntl_setup(rdev, 2, cp_int_cntl2);
+ } else
+ WREG32(CP_INT_CNTL, cp_int_cntl);
WREG32(GRBM_INT_CNTL, grbm_int_cntl);
WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
@@ -3018,11 +3054,24 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- radeon_fence_process(rdev);
+ if (rdev->family >= CHIP_CAYMAN) {
+ switch (src_data) {
+ case 0:
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ break;
+ case 1:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ break;
+ case 2:
+ radeon_fence_process(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ break;
+ }
+ } else
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3052,6 +3101,7 @@ restart_ih:
static int evergreen_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -3106,6 +3156,12 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -3115,7 +3171,9 @@ static int evergreen_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = evergreen_cp_load_microcode(rdev);
@@ -3125,6 +3183,23 @@ static int evergreen_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
@@ -3144,15 +3219,11 @@ int evergreen_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = evergreen_startup(rdev);
if (r) {
DRM_ERROR("evergreen startup failed on resume\n");
- return r;
- }
-
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -3162,13 +3233,17 @@ int evergreen_resume(struct radeon_device *rdev)
int evergreen_suspend(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
r700_cp_stop(rdev);
- rdev->cp.ready = false;
+ ring->ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
evergreen_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -3243,8 +3318,8 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -3253,29 +3328,24 @@ int evergreen_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = evergreen_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- }
- }
/* Don't start up if the MC ucode is missing on BTC parts.
* The default clocks and voltages before the MC ucode
@@ -3293,15 +3363,17 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
+ r600_audio_fini(rdev);
r600_blit_fini(rdev);
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index 914e5af..2379849 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -49,6 +49,7 @@ static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -62,23 +63,23 @@ set_render_target(struct radeon_device *rdev, int format,
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
- radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, pitch);
- radeon_ring_write(rdev, slice);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, cb_color_info);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, (w - 1) | ((h - 1) << 16));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 15));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, pitch);
+ radeon_ring_write(ring, slice);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, cb_color_info);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (w - 1) | ((h - 1) << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
}
/* emits 5dw */
@@ -87,6 +88,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -99,39 +101,40 @@ cp_set_surface_sync(struct radeon_device *rdev,
* to the RB directly. For IBs, the CP programs this as part of the
* surface_sync packet.
*/
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0); /* CP_COHER_CNTL2 */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (0x85e8 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0); /* CP_COHER_CNTL2 */
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, sync_type);
- radeon_ring_write(rdev, cp_coher_size);
- radeon_ring_write(rdev, mc_addr >> 8);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
}
/* emits 11dw + 1 surface sync = 16dw */
static void
set_shaders(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
- radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 3));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 2);
+ radeon_ring_write(ring, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
- radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, 1);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 4));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, 1);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 2);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -141,6 +144,7 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2, sq_vtx_constant_word3;
/* high addr, stride */
@@ -155,16 +159,16 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
SQ_VTCX_SEL_Z(SQ_SEL_Z) |
SQ_VTCX_SEL_W(SQ_SEL_W);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(rdev, 0x580);
- radeon_ring_write(rdev, gpu_addr & 0xffffffff);
- radeon_ring_write(rdev, 48 - 1); /* size */
- radeon_ring_write(rdev, sq_vtx_constant_word2);
- radeon_ring_write(rdev, sq_vtx_constant_word3);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0x580);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1); /* size */
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, sq_vtx_constant_word3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, S__SQ_CONSTANT_TYPE(SQ_TEX_VTX_VALID_BUFFER));
if ((rdev->family == CHIP_CEDAR) ||
(rdev->family == CHIP_PALM) ||
@@ -185,6 +189,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_tex_resource_word0, sq_tex_resource_word1;
u32 sq_tex_resource_word4, sq_tex_resource_word7;
@@ -208,16 +213,16 @@ set_tex_resource(struct radeon_device *rdev,
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 8));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word0);
- radeon_ring_write(rdev, sq_tex_resource_word1);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, sq_tex_resource_word4);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word7);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 8));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word7);
}
/* emits 12 */
@@ -225,6 +230,7 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
/* workaround some hw bugs */
if (x2 == 0)
x1 = 1;
@@ -235,43 +241,44 @@ set_scissors(struct radeon_device *rdev, int x1, int y1,
x2 = 2;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, DI_PT_RECTLIST);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
- radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
- radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(rdev, 3);
- radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
}
@@ -279,6 +286,7 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2, sq_gpr_resource_mgmt_3;
u32 sq_thread_resource_mgmt, sq_thread_resource_mgmt_2;
u32 sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2, sq_stack_resource_mgmt_3;
@@ -292,8 +300,8 @@ set_default_state(struct radeon_device *rdev)
int dwords;
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
if (rdev->family < CHIP_CAYMAN) {
switch (rdev->family) {
@@ -550,60 +558,60 @@ set_default_state(struct radeon_device *rdev)
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
/* disable dyn gprs */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
/* setup LDS */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, 0x10001000);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (SQ_LDS_RESOURCE_MGMT - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0x10001000);
/* SQ config */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
- radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
- radeon_ring_write(rdev, sq_config);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_thread_resource_mgmt);
- radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 11));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_3);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_thread_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_3);
}
/* CONTEXT_CONTROL */
- radeon_ring_write(rdev, 0xc0012800);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
+ radeon_ring_write(ring, 0xc0012800);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* SET_SAMPLER */
- radeon_ring_write(rdev, 0xc0036e00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000012);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0036e00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000012);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* set to DX10/11 mode */
- radeon_ring_write(rdev, PACKET3(PACKET3_MODE_CONTROL, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
- radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(rdev, dwords);
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring, gpu_addr & 0xFFFFFFFC);
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
}
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index cd4590a..8e8cd85 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -520,7 +520,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
break;
case DB_Z_INFO:
track->db_z_info = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -649,7 +649,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR7_INFO:
tmp = (reg - CB_COLOR0_INFO) / 0x3c;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -666,7 +666,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case CB_COLOR11_INFO:
tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = evergreen_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1355,7 +1355,7 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
ib[idx+1+(i*8)+1] |=
TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
@@ -1572,3 +1572,242 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
return 0;
}
+/* vm parser */
+static bool evergreen_vm_reg_valid(u32 reg)
+{
+ /* context regs are fine */
+ if (reg >= 0x28000)
+ return true;
+
+ /* check config regs */
+ switch (reg) {
+ case GRBM_GFX_INDEX:
+ case VGT_VTX_VECT_EJECT_REG:
+ case VGT_CACHE_INVALIDATION:
+ case VGT_GS_VERTEX_REUSE:
+ case VGT_PRIMITIVE_TYPE:
+ case VGT_INDEX_TYPE:
+ case VGT_NUM_INDICES:
+ case VGT_NUM_INSTANCES:
+ case VGT_COMPUTE_DIM_X:
+ case VGT_COMPUTE_DIM_Y:
+ case VGT_COMPUTE_DIM_Z:
+ case VGT_COMPUTE_START_X:
+ case VGT_COMPUTE_START_Y:
+ case VGT_COMPUTE_START_Z:
+ case VGT_COMPUTE_INDEX:
+ case VGT_COMPUTE_THREAD_GROUP_SIZE:
+ case VGT_HS_OFFCHIP_PARAM:
+ case PA_CL_ENHANCE:
+ case PA_SU_LINE_STIPPLE_VALUE:
+ case PA_SC_LINE_STIPPLE_STATE:
+ case PA_SC_ENHANCE:
+ case SQ_DYN_GPR_CNTL_PS_FLUSH_REQ:
+ case SQ_DYN_GPR_SIMD_LOCK_EN:
+ case SQ_CONFIG:
+ case SQ_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_1:
+ case SQ_GLOBAL_GPR_RESOURCE_MGMT_2:
+ case SQ_CONST_MEM_BASE:
+ case SQ_STATIC_THREAD_MGMT_1:
+ case SQ_STATIC_THREAD_MGMT_2:
+ case SQ_STATIC_THREAD_MGMT_3:
+ case SPI_CONFIG_CNTL:
+ case SPI_CONFIG_CNTL_1:
+ case TA_CNTL_AUX:
+ case DB_DEBUG:
+ case DB_DEBUG2:
+ case DB_DEBUG3:
+ case DB_DEBUG4:
+ case DB_WATERMARKS:
+ case TD_PS_BORDER_COLOR_INDEX:
+ case TD_PS_BORDER_COLOR_RED:
+ case TD_PS_BORDER_COLOR_GREEN:
+ case TD_PS_BORDER_COLOR_BLUE:
+ case TD_PS_BORDER_COLOR_ALPHA:
+ case TD_VS_BORDER_COLOR_INDEX:
+ case TD_VS_BORDER_COLOR_RED:
+ case TD_VS_BORDER_COLOR_GREEN:
+ case TD_VS_BORDER_COLOR_BLUE:
+ case TD_VS_BORDER_COLOR_ALPHA:
+ case TD_GS_BORDER_COLOR_INDEX:
+ case TD_GS_BORDER_COLOR_RED:
+ case TD_GS_BORDER_COLOR_GREEN:
+ case TD_GS_BORDER_COLOR_BLUE:
+ case TD_GS_BORDER_COLOR_ALPHA:
+ case TD_HS_BORDER_COLOR_INDEX:
+ case TD_HS_BORDER_COLOR_RED:
+ case TD_HS_BORDER_COLOR_GREEN:
+ case TD_HS_BORDER_COLOR_BLUE:
+ case TD_HS_BORDER_COLOR_ALPHA:
+ case TD_LS_BORDER_COLOR_INDEX:
+ case TD_LS_BORDER_COLOR_RED:
+ case TD_LS_BORDER_COLOR_GREEN:
+ case TD_LS_BORDER_COLOR_BLUE:
+ case TD_LS_BORDER_COLOR_ALPHA:
+ case TD_CS_BORDER_COLOR_INDEX:
+ case TD_CS_BORDER_COLOR_RED:
+ case TD_CS_BORDER_COLOR_GREEN:
+ case TD_CS_BORDER_COLOR_BLUE:
+ case TD_CS_BORDER_COLOR_ALPHA:
+ case SQ_ESGS_RING_SIZE:
+ case SQ_GSVS_RING_SIZE:
+ case SQ_ESTMP_RING_SIZE:
+ case SQ_GSTMP_RING_SIZE:
+ case SQ_HSTMP_RING_SIZE:
+ case SQ_LSTMP_RING_SIZE:
+ case SQ_PSTMP_RING_SIZE:
+ case SQ_VSTMP_RING_SIZE:
+ case SQ_ESGS_RING_ITEMSIZE:
+ case SQ_ESTMP_RING_ITEMSIZE:
+ case SQ_GSTMP_RING_ITEMSIZE:
+ case SQ_GSVS_RING_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE:
+ case SQ_GS_VERT_ITEMSIZE_1:
+ case SQ_GS_VERT_ITEMSIZE_2:
+ case SQ_GS_VERT_ITEMSIZE_3:
+ case SQ_GSVS_RING_OFFSET_1:
+ case SQ_GSVS_RING_OFFSET_2:
+ case SQ_GSVS_RING_OFFSET_3:
+ case SQ_HSTMP_RING_ITEMSIZE:
+ case SQ_LSTMP_RING_ITEMSIZE:
+ case SQ_PSTMP_RING_ITEMSIZE:
+ case SQ_VSTMP_RING_ITEMSIZE:
+ case VGT_TF_RING_SIZE:
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_HSTMP_RING_BASE:
+ case SQ_LSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ case CAYMAN_VGT_OFFCHIP_LDS_BASE:
+ case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static int evergreen_vm_packet3_check(struct radeon_device *rdev,
+ u32 *ib, struct radeon_cs_packet *pkt)
+{
+ u32 idx = pkt->idx + 1;
+ u32 idx_value = ib[idx];
+ u32 start_reg, end_reg, reg, i;
+
+ switch (pkt->opcode) {
+ case PACKET3_NOP:
+ case PACKET3_SET_BASE:
+ case PACKET3_CLEAR_STATE:
+ case PACKET3_INDEX_BUFFER_SIZE:
+ case PACKET3_DISPATCH_DIRECT:
+ case PACKET3_DISPATCH_INDIRECT:
+ case PACKET3_MODE_CONTROL:
+ case PACKET3_SET_PREDICATION:
+ case PACKET3_COND_EXEC:
+ case PACKET3_PRED_EXEC:
+ case PACKET3_DRAW_INDIRECT:
+ case PACKET3_DRAW_INDEX_INDIRECT:
+ case PACKET3_INDEX_BASE:
+ case PACKET3_DRAW_INDEX_2:
+ case PACKET3_CONTEXT_CONTROL:
+ case PACKET3_DRAW_INDEX_OFFSET:
+ case PACKET3_INDEX_TYPE:
+ case PACKET3_DRAW_INDEX:
+ case PACKET3_DRAW_INDEX_AUTO:
+ case PACKET3_DRAW_INDEX_IMMD:
+ case PACKET3_NUM_INSTANCES:
+ case PACKET3_DRAW_INDEX_MULTI_AUTO:
+ case PACKET3_STRMOUT_BUFFER_UPDATE:
+ case PACKET3_DRAW_INDEX_OFFSET_2:
+ case PACKET3_DRAW_INDEX_MULTI_ELEMENT:
+ case PACKET3_MPEG_INDEX:
+ case PACKET3_WAIT_REG_MEM:
+ case PACKET3_MEM_WRITE:
+ case PACKET3_SURFACE_SYNC:
+ case PACKET3_EVENT_WRITE:
+ case PACKET3_EVENT_WRITE_EOP:
+ case PACKET3_EVENT_WRITE_EOS:
+ case PACKET3_SET_CONTEXT_REG:
+ case PACKET3_SET_BOOL_CONST:
+ case PACKET3_SET_LOOP_CONST:
+ case PACKET3_SET_RESOURCE:
+ case PACKET3_SET_SAMPLER:
+ case PACKET3_SET_CTL_CONST:
+ case PACKET3_SET_RESOURCE_OFFSET:
+ case PACKET3_SET_CONTEXT_REG_INDIRECT:
+ case PACKET3_SET_RESOURCE_INDIRECT:
+ case CAYMAN_PACKET3_DEALLOC_STATE:
+ break;
+ case PACKET3_COND_WRITE:
+ if (idx_value & 0x100) {
+ reg = ib[idx + 5] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_COPY_DW:
+ if (idx_value & 0x2) {
+ reg = ib[idx + 3] * 4;
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ case PACKET3_SET_CONFIG_REG:
+ start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_START;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_CONFIG_REG_START) ||
+ (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
+ (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
+ DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < pkt->count; i++) {
+ reg = start_reg + (4 * i);
+ if (!evergreen_vm_reg_valid(reg))
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ int ret = 0;
+ u32 idx = 0;
+ struct radeon_cs_packet pkt;
+
+ do {
+ pkt.idx = idx;
+ pkt.type = CP_PACKET_GET_TYPE(ib->ptr[idx]);
+ pkt.count = CP_PACKET_GET_COUNT(ib->ptr[idx]);
+ pkt.one_reg_wr = 0;
+ switch (pkt.type) {
+ case PACKET_TYPE0:
+ dev_err(rdev->dev, "Packet0 not allowed!\n");
+ ret = -EINVAL;
+ break;
+ case PACKET_TYPE2:
+ idx += 1;
+ break;
+ case PACKET_TYPE3:
+ pkt.opcode = CP_PACKET3_GET_OPCODE(ib->ptr[idx]);
+ ret = evergreen_vm_packet3_check(rdev, ib->ptr, &pkt);
+ idx += pkt.count + 2;
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown packet type %d !\n", pkt.type);
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ break;
+ } while (idx < ib->length_dw);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 7d7f215..4215de9 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -35,6 +35,14 @@
#define EVERGREEN_P1PLL_SS_CNTL 0x414
#define EVERGREEN_P2PLL_SS_CNTL 0x454
# define EVERGREEN_PxPLL_SS_EN (1 << 12)
+
+#define EVERGREEN_AUDIO_PLL1_MUL 0x5b0
+#define EVERGREEN_AUDIO_PLL1_DIV 0x5b4
+#define EVERGREEN_AUDIO_PLL1_UNK 0x5bc
+
+#define EVERGREEN_AUDIO_ENABLE 0x5e78
+#define EVERGREEN_AUDIO_VENDOR_ID 0x5ec0
+
/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
#define EVERGREEN_GRPH_ENABLE 0x6800
#define EVERGREEN_GRPH_CONTROL 0x6804
@@ -220,4 +228,9 @@
#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
+/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
+#define EVERGREEN_HDMI_BASE 0x7030
+
+#define EVERGREEN_HDMI_CONFIG_OFFSET 0xf0
+
#endif
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index e00039e..74713d4 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -108,6 +108,7 @@
#define CP_RB_WPTR_ADDR_HI 0xC11C
#define CP_RB_WPTR_DELAY 0x8704
#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
#define CP_DEBUG 0xC1FC
@@ -242,6 +243,7 @@
#define PA_CL_ENHANCE 0x8A14
#define CLIP_VTX_REORDER_ENA (1 << 0)
#define NUM_CLIP_SEQ(x) ((x) << 1)
+#define PA_SC_ENHANCE 0x8BF0
#define PA_SC_AA_CONFIG 0x28C04
#define MSAA_NUM_SAMPLES_SHIFT 0
#define MSAA_NUM_SAMPLES_MASK 0x3
@@ -319,6 +321,8 @@
#define SQ_GPR_RESOURCE_MGMT_3 0x8C0C
#define NUM_HS_GPRS(x) ((x) << 0)
#define NUM_LS_GPRS(x) ((x) << 16)
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_1 0x8C10
+#define SQ_GLOBAL_GPR_RESOURCE_MGMT_2 0x8C14
#define SQ_THREAD_RESOURCE_MGMT 0x8C18
#define NUM_PS_THREADS(x) ((x) << 0)
#define NUM_VS_THREADS(x) ((x) << 8)
@@ -337,6 +341,10 @@
#define NUM_HS_STACK_ENTRIES(x) ((x) << 0)
#define NUM_LS_STACK_ENTRIES(x) ((x) << 16)
#define SQ_DYN_GPR_CNTL_PS_FLUSH_REQ 0x8D8C
+#define SQ_DYN_GPR_SIMD_LOCK_EN 0x8D94
+#define SQ_STATIC_THREAD_MGMT_1 0x8E20
+#define SQ_STATIC_THREAD_MGMT_2 0x8E24
+#define SQ_STATIC_THREAD_MGMT_3 0x8E28
#define SQ_LDS_RESOURCE_MGMT 0x8E2C
#define SQ_MS_FIFO_SIZES 0x8CF0
@@ -691,6 +699,7 @@
#define PACKET3_DRAW_INDEX_MULTI_ELEMENT 0x36
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
+#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_INDIRECT_BUFFER 0x32
@@ -768,6 +777,8 @@
#define SQ_TEX_VTX_VALID_TEXTURE 0x2
#define SQ_TEX_VTX_VALID_BUFFER 0x3
+#define VGT_VTX_VECT_EJECT_REG 0x88b0
+
#define SQ_CONST_MEM_BASE 0x8df8
#define SQ_ESGS_RING_BASE 0x8c40
@@ -892,8 +903,27 @@
#define PA_SC_SCREEN_SCISSOR_TL 0x28030
#define PA_SC_GENERIC_SCISSOR_TL 0x28240
#define PA_SC_WINDOW_SCISSOR_TL 0x28204
-#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_PRIMITIVE_TYPE 0x8958
+#define VGT_INDEX_TYPE 0x895C
+
+#define VGT_NUM_INDICES 0x8970
+
+#define VGT_COMPUTE_DIM_X 0x8990
+#define VGT_COMPUTE_DIM_Y 0x8994
+#define VGT_COMPUTE_DIM_Z 0x8998
+#define VGT_COMPUTE_START_X 0x899C
+#define VGT_COMPUTE_START_Y 0x89A0
+#define VGT_COMPUTE_START_Z 0x89A4
+#define VGT_COMPUTE_INDEX 0x89A8
+#define VGT_COMPUTE_THREAD_GROUP_SIZE 0x89AC
+#define VGT_HS_OFFCHIP_PARAM 0x89B0
+
+#define DB_DEBUG 0x9830
+#define DB_DEBUG2 0x9834
+#define DB_DEBUG3 0x9838
+#define DB_DEBUG4 0x983C
+#define DB_WATERMARKS 0x9854
#define DB_DEPTH_CONTROL 0x28800
#define DB_DEPTH_VIEW 0x28008
#define DB_HTILE_DATA_BASE 0x28014
@@ -1189,8 +1219,40 @@
#define SQ_VTX_CONSTANT_WORD6_0 0x30018
#define SQ_VTX_CONSTANT_WORD7_0 0x3001c
+#define TD_PS_BORDER_COLOR_INDEX 0xA400
+#define TD_PS_BORDER_COLOR_RED 0xA404
+#define TD_PS_BORDER_COLOR_GREEN 0xA408
+#define TD_PS_BORDER_COLOR_BLUE 0xA40C
+#define TD_PS_BORDER_COLOR_ALPHA 0xA410
+#define TD_VS_BORDER_COLOR_INDEX 0xA414
+#define TD_VS_BORDER_COLOR_RED 0xA418
+#define TD_VS_BORDER_COLOR_GREEN 0xA41C
+#define TD_VS_BORDER_COLOR_BLUE 0xA420
+#define TD_VS_BORDER_COLOR_ALPHA 0xA424
+#define TD_GS_BORDER_COLOR_INDEX 0xA428
+#define TD_GS_BORDER_COLOR_RED 0xA42C
+#define TD_GS_BORDER_COLOR_GREEN 0xA430
+#define TD_GS_BORDER_COLOR_BLUE 0xA434
+#define TD_GS_BORDER_COLOR_ALPHA 0xA438
+#define TD_HS_BORDER_COLOR_INDEX 0xA43C
+#define TD_HS_BORDER_COLOR_RED 0xA440
+#define TD_HS_BORDER_COLOR_GREEN 0xA444
+#define TD_HS_BORDER_COLOR_BLUE 0xA448
+#define TD_HS_BORDER_COLOR_ALPHA 0xA44C
+#define TD_LS_BORDER_COLOR_INDEX 0xA450
+#define TD_LS_BORDER_COLOR_RED 0xA454
+#define TD_LS_BORDER_COLOR_GREEN 0xA458
+#define TD_LS_BORDER_COLOR_BLUE 0xA45C
+#define TD_LS_BORDER_COLOR_ALPHA 0xA460
+#define TD_CS_BORDER_COLOR_INDEX 0xA464
+#define TD_CS_BORDER_COLOR_RED 0xA468
+#define TD_CS_BORDER_COLOR_GREEN 0xA46C
+#define TD_CS_BORDER_COLOR_BLUE 0xA470
+#define TD_CS_BORDER_COLOR_ALPHA 0xA474
+
/* cayman 3D regs */
-#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B0
+#define CAYMAN_VGT_OFFCHIP_LDS_BASE 0x89B4
+#define CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS 0x8E48
#define CAYMAN_DB_EQAA 0x28804
#define CAYMAN_DB_DEPTH_INFO 0x2803C
#define CAYMAN_PA_SC_AA_CONFIG 0x28BE0
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 0e57998..2509c50 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -934,7 +934,7 @@ void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev)
int cayman_pcie_gart_enable(struct radeon_device *rdev)
{
- int r;
+ int i, r;
if (rdev->gart.robj == NULL) {
dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
@@ -945,9 +945,12 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
return r;
radeon_gart_restore(rdev);
/* Setup TLB control */
- WREG32(MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB |
+ WREG32(MC_VM_MX_L1_TLB_CNTL,
+ (0xA << 7) |
+ ENABLE_L1_TLB |
ENABLE_L1_FRAGMENT_PROCESSING |
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ ENABLE_ADVANCED_DRIVER_MODEL |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE |
@@ -967,9 +970,26 @@ int cayman_pcie_gart_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT0_CNTL2, 0);
WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
- /* disable context1-7 */
+
+ WREG32(0x15D4, 0);
+ WREG32(0x15D8, 0);
+ WREG32(0x15DC, 0);
+
+ /* empty context1-7 */
+ for (i = 1; i < 8; i++) {
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
+ rdev->gart.table_addr >> 12);
+ }
+
+ /* enable context1-7 */
+ WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
WREG32(VM_CONTEXT1_CNTL2, 0);
WREG32(VM_CONTEXT1_CNTL, 0);
+ WREG32(VM_CONTEXT1_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
cayman_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -1006,9 +1026,69 @@ void cayman_pcie_gart_fini(struct radeon_device *rdev)
radeon_gart_fini(rdev);
}
+void cayman_cp_int_cntl_setup(struct radeon_device *rdev,
+ int ring, u32 cp_int_cntl)
+{
+ u32 srbm_gfx_cntl = RREG32(SRBM_GFX_CNTL) & ~3;
+
+ WREG32(SRBM_GFX_CNTL, srbm_gfx_cntl | (ring & 3));
+ WREG32(CP_INT_CNTL, cp_int_cntl);
+}
+
/*
* CP.
*/
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence)
+{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ /* EVENT_WRITE_EOP - flush caches, send int */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
+}
+
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
+ /* set to DX10/11 mode */
+ radeon_ring_write(ring, PACKET3(PACKET3_MODE_CONTROL, 0));
+ radeon_ring_write(ring, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
+#ifdef __BIG_ENDIAN
+ (2 << 0) |
+#endif
+ (ib->gpu_addr & 0xFFFFFFFC));
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw | (ib->vm_id << 24));
+
+ /* flush read cache over gart for this vmid */
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (CP_COHER_CNTL2 - PACKET3_SET_CONFIG_REG_START) >> 2);
+ radeon_ring_write(ring, ib->vm_id);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA | PACKET3_SH_ACTION_ENA);
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+}
+
static void cayman_cp_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
@@ -1049,63 +1129,64 @@ static int cayman_cp_load_microcode(struct radeon_device *rdev)
static int cayman_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r, i;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.cayman.max_hw_contexts - 1);
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.cayman.max_hw_contexts - 1);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cayman_cp_enable(rdev, true);
- r = radeon_ring_lock(rdev, cayman_default_size + 19);
+ r = radeon_ring_lock(rdev, ring, cayman_default_size + 19);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
/* setup clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE);
for (i = 0; i < cayman_default_size; i++)
- radeon_ring_write(rdev, cayman_default_state[i]);
+ radeon_ring_write(ring, cayman_default_state[i]);
- radeon_ring_write(rdev, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
- radeon_ring_write(rdev, PACKET3_PREAMBLE_END_CLEAR_STATE);
+ radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
+ radeon_ring_write(ring, PACKET3_PREAMBLE_END_CLEAR_STATE);
/* set clear context state */
- radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ radeon_ring_write(ring, 0);
/* SQ_VTX_BASE_VTX_LOC */
- radeon_ring_write(rdev, 0xc0026f00);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
- radeon_ring_write(rdev, 0x00000000);
+ radeon_ring_write(ring, 0xc0026f00);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
+ radeon_ring_write(ring, 0x00000000);
/* Clear consts */
- radeon_ring_write(rdev, 0xc0036f00);
- radeon_ring_write(rdev, 0x00000bc4);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
- radeon_ring_write(rdev, 0xffffffff);
+ radeon_ring_write(ring, 0xc0036f00);
+ radeon_ring_write(ring, 0x00000bc4);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
+ radeon_ring_write(ring, 0xffffffff);
- radeon_ring_write(rdev, 0xc0026900);
- radeon_ring_write(rdev, 0x00000316);
- radeon_ring_write(rdev, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
- radeon_ring_write(rdev, 0x00000010); /* */
+ radeon_ring_write(ring, 0xc0026900);
+ radeon_ring_write(ring, 0x00000316);
+ radeon_ring_write(ring, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
+ radeon_ring_write(ring, 0x00000010); /* */
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
/* XXX init other rings */
@@ -1115,11 +1196,12 @@ static int cayman_cp_start(struct radeon_device *rdev)
static void cayman_cp_fini(struct radeon_device *rdev)
{
cayman_cp_enable(rdev, false);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
int cayman_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring;
u32 tmp;
u32 rb_bufsz;
int r;
@@ -1136,7 +1218,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
RREG32(GRBM_SOFT_RESET);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
+ WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -1145,7 +1228,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* ring 0 - compute and gfx */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1154,8 +1238,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB0_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp.wptr = 0;
- WREG32(CP_RB0_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB0_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB0_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1172,13 +1256,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB0_CNTL, tmp);
- WREG32(CP_RB0_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB0_BASE, ring->gpu_addr >> 8);
- rdev->cp.rptr = RREG32(CP_RB0_RPTR);
+ ring->rptr = RREG32(CP_RB0_RPTR);
/* ring1 - compute only */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp1.ring_size / 8);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1187,8 +1272,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB1_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp1.wptr = 0;
- WREG32(CP_RB1_WPTR, rdev->cp1.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB1_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB1_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP1_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1197,13 +1282,14 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB1_CNTL, tmp);
- WREG32(CP_RB1_BASE, rdev->cp1.gpu_addr >> 8);
+ WREG32(CP_RB1_BASE, ring->gpu_addr >> 8);
- rdev->cp1.rptr = RREG32(CP_RB1_RPTR);
+ ring->rptr = RREG32(CP_RB1_RPTR);
/* ring2 - compute only */
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp2.ring_size / 8);
+ ring = &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX];
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
@@ -1212,8 +1298,8 @@ int cayman_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB2_CNTL, tmp | RB_RPTR_WR_ENA);
- rdev->cp2.wptr = 0;
- WREG32(CP_RB2_WPTR, rdev->cp2.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB2_WPTR, ring->wptr);
/* set the wb address wether it's enabled or not */
WREG32(CP_RB2_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP2_RPTR_OFFSET) & 0xFFFFFFFC);
@@ -1222,28 +1308,28 @@ int cayman_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB2_CNTL, tmp);
- WREG32(CP_RB2_BASE, rdev->cp2.gpu_addr >> 8);
+ WREG32(CP_RB2_BASE, ring->gpu_addr >> 8);
- rdev->cp2.rptr = RREG32(CP_RB2_RPTR);
+ ring->rptr = RREG32(CP_RB2_RPTR);
/* start the rings */
cayman_cp_start(rdev);
- rdev->cp.ready = true;
- rdev->cp1.ready = true;
- rdev->cp2.ready = true;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = true;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
/* this only test cp0 */
- r = radeon_ring_test(rdev);
+ r = radeon_ring_test(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
if (r) {
- rdev->cp.ready = false;
- rdev->cp1.ready = false;
- rdev->cp2.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
return r;
}
return 0;
}
-bool cayman_gpu_is_lockup(struct radeon_device *rdev)
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -1256,20 +1342,20 @@ bool cayman_gpu_is_lockup(struct radeon_device *rdev)
grbm_status_se0 = RREG32(GRBM_STATUS_SE0);
grbm_status_se1 = RREG32(GRBM_STATUS_SE1);
if (!(grbm_status & GUI_ACTIVE)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
/* XXX deal with CP0,1,2 */
- rdev->cp.rptr = RREG32(CP_RB0_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
static int cayman_gpu_soft_reset(struct radeon_device *rdev)
@@ -1289,6 +1375,15 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14F8));
+ dev_info(rdev->dev, " VM_CONTEXT0_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14D8));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
+ RREG32(0x14FC));
+ dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
+ RREG32(0x14DC));
+
evergreen_mc_stop(rdev, &save);
if (evergreen_mc_wait_for_idle(rdev)) {
dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
@@ -1319,6 +1414,7 @@ static int cayman_gpu_soft_reset(struct radeon_device *rdev)
(void)RREG32(GRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
+
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
RREG32(GRBM_STATUS));
dev_info(rdev->dev, " GRBM_STATUS_SE0=0x%08X\n",
@@ -1338,6 +1434,7 @@ int cayman_asic_reset(struct radeon_device *rdev)
static int cayman_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -1378,6 +1475,24 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP1_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
+ r = radeon_fence_driver_start_ring(rdev, CAYMAN_RING_TYPE_CP2_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1387,7 +1502,9 @@ static int cayman_startup(struct radeon_device *rdev)
}
evergreen_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ CP_RB0_RPTR, CP_RB0_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = cayman_cp_load_microcode(rdev);
@@ -1397,6 +1514,21 @@ static int cayman_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
+ r = radeon_vm_manager_start(rdev);
+ if (r)
+ return r;
+
return 0;
}
@@ -1411,32 +1543,27 @@ int cayman_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = cayman_startup(rdev);
if (r) {
DRM_ERROR("cayman startup failed on resume\n");
+ rdev->accel_working = false;
return r;
}
-
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failled testing IB (%d).\n", r);
- return r;
- }
-
return r;
-
}
int cayman_suspend(struct radeon_device *rdev)
{
/* FIXME: we should wait for ring to be empty */
+ radeon_ib_pool_suspend(rdev);
+ radeon_vm_manager_suspend(rdev);
+ r600_blit_suspend(rdev);
cayman_cp_enable(rdev, false);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
evergreen_irq_suspend(rdev);
radeon_wb_disable(rdev);
cayman_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
-
return 0;
}
@@ -1448,6 +1575,7 @@ int cayman_suspend(struct radeon_device *rdev)
*/
int cayman_init(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* This don't do much */
@@ -1500,8 +1628,8 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ ring->ring_obj = NULL;
+ r600_ring_init(rdev, ring, 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1510,29 +1638,29 @@ int cayman_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = radeon_vm_manager_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "vm manager initialization failed (%d).\n", r);
+ }
+
r = cayman_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
+ radeon_vm_manager_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
- rdev->accel_working = false;
- }
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
- rdev->accel_working = false;
- }
- }
/* Don't start up if the MC ucode is missing.
* The default clocks and voltages before the MC ucode
@@ -1552,11 +1680,13 @@ void cayman_fini(struct radeon_device *rdev)
cayman_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ radeon_vm_manager_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
cayman_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -1564,3 +1694,84 @@ void cayman_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
+/*
+ * vm
+ */
+int cayman_vm_init(struct radeon_device *rdev)
+{
+ /* number of VMs */
+ rdev->vm_manager.nvm = 8;
+ /* base offset of vram pages */
+ rdev->vm_manager.vram_base_offset = 0;
+ return 0;
+}
+
+void cayman_vm_fini(struct radeon_device *rdev)
+{
+}
+
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id)
+{
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (id << 2), vm->last_pfn);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (id << 2), vm->pt_gpu_addr >> 12);
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << id);
+ return 0;
+}
+
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (vm->id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (vm->id << 2), 0);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm->id << 2), 0);
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ if (vm->id == -1)
+ return;
+
+ /* flush hdp cache */
+ WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+ /* bits 0-7 are the VM contexts0-7 */
+ WREG32(VM_INVALIDATE_REQUEST, 1 << vm->id);
+}
+
+#define R600_PTE_VALID (1 << 0)
+#define R600_PTE_SYSTEM (1 << 1)
+#define R600_PTE_SNOOPED (1 << 2)
+#define R600_PTE_READABLE (1 << 5)
+#define R600_PTE_WRITEABLE (1 << 6)
+
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags)
+{
+ uint32_t r600_flags = 0;
+
+ r600_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
+ r600_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
+ if (flags & RADEON_VM_PAGE_SYSTEM) {
+ r600_flags |= R600_PTE_SYSTEM;
+ r600_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
+ }
+ return r600_flags;
+}
+
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags)
+{
+ void __iomem *ptr = (void *)vm->pt;
+
+ addr = addr & 0xFFFFFFFFFFFFF000ULL;
+ addr |= flags;
+ writeq(addr, ptr + (pfn * 8));
+}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 4672869..9a7f3b6 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -42,6 +42,9 @@
#define CAYMAN_MAX_TCC_MASK 0xFF
#define DMIF_ADDR_CONFIG 0xBD4
+#define SRBM_GFX_CNTL 0x0E44
+#define RINGID(x) (((x) & 0x3) << 0)
+#define VMID(x) (((x) & 0x7) << 0)
#define SRBM_STATUS 0x0E50
#define VM_CONTEXT0_REQUEST_RESPONSE 0x1470
@@ -219,6 +222,8 @@
#define SCRATCH_UMSK 0x8540
#define SCRATCH_ADDR 0x8544
#define CP_SEM_WAIT_TIMER 0x85BC
+#define CP_SEM_INCOMPLETE_TIMER_CNTL 0x85C8
+#define CP_COHER_CNTL2 0x85E8
#define CP_ME_CNTL 0x86D8
#define CP_ME_HALT (1 << 28)
#define CP_PFP_HALT (1 << 26)
@@ -394,6 +399,12 @@
#define CP_RB0_RPTR_ADDR 0xC10C
#define CP_RB0_RPTR_ADDR_HI 0xC110
#define CP_RB0_WPTR 0xC114
+
+#define CP_INT_CNTL 0xC124
+# define CNTX_BUSY_INT_ENABLE (1 << 19)
+# define CNTX_EMPTY_INT_ENABLE (1 << 20)
+# define TIME_STAMP_INT_ENABLE (1 << 26)
+
#define CP_RB1_BASE 0xC180
#define CP_RB1_CNTL 0xC184
#define CP_RB1_RPTR_ADDR 0xC188
@@ -411,6 +422,10 @@
#define CP_ME_RAM_DATA 0xC160
#define CP_DEBUG 0xC1FC
+#define VGT_EVENT_INITIATOR 0x28a90
+# define CACHE_FLUSH_AND_INV_EVENT_TS (0x14 << 0)
+# define CACHE_FLUSH_AND_INV_EVENT (0x16 << 0)
+
/*
* PM4
*/
@@ -445,6 +460,7 @@
#define PACKET3_DISPATCH_DIRECT 0x15
#define PACKET3_DISPATCH_INDIRECT 0x16
#define PACKET3_INDIRECT_BUFFER_END 0x17
+#define PACKET3_MODE_CONTROL 0x18
#define PACKET3_SET_PREDICATION 0x20
#define PACKET3_REG_RMW 0x21
#define PACKET3_COND_EXEC 0x22
@@ -494,7 +510,27 @@
#define PACKET3_ME_INITIALIZE_DEVICE_ID(x) ((x) << 16)
#define PACKET3_COND_WRITE 0x45
#define PACKET3_EVENT_WRITE 0x46
+#define EVENT_TYPE(x) ((x) << 0)
+#define EVENT_INDEX(x) ((x) << 8)
+ /* 0 - any non-TS event
+ * 1 - ZPASS_DONE
+ * 2 - SAMPLE_PIPELINESTAT
+ * 3 - SAMPLE_STREAMOUTSTAT*
+ * 4 - *S_PARTIAL_FLUSH
+ * 5 - TS events
+ */
#define PACKET3_EVENT_WRITE_EOP 0x47
+#define DATA_SEL(x) ((x) << 29)
+ /* 0 - discard
+ * 1 - send low 32bit data
+ * 2 - send 64bit data
+ * 3 - send 64bit counter value
+ */
+#define INT_SEL(x) ((x) << 24)
+ /* 0 - none
+ * 1 - interrupt only (DATA_SEL = 0)
+ * 2 - interrupt when data write is confirmed
+ */
#define PACKET3_EVENT_WRITE_EOS 0x48
#define PACKET3_PREAMBLE_CNTL 0x4A
# define PACKET3_PREAMBLE_BEGIN_CLEAR_STATE (2 << 28)
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index bfc08f6..333cde9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -667,7 +667,7 @@ int r100_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= RADEON_SW_INT_ENABLE;
}
if (rdev->irq.gui_idle) {
@@ -739,7 +739,7 @@ int r100_irq_process(struct radeon_device *rdev)
while (status) {
/* SW interrupt */
if (status & RADEON_SW_INT_TEST) {
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* gui idle interrupt */
if (status & RADEON_GUI_IDLE_STAT) {
@@ -789,9 +789,7 @@ int r100_irq_process(struct radeon_device *rdev)
WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM);
break;
default:
- msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
- WREG32(RADEON_MSI_REARM_EN, msi_rearm);
- WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
break;
}
}
@@ -811,25 +809,36 @@ u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc)
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
/* We have to make sure that caches are flushed before
* CPU might read something from VRAM. */
- radeon_ring_write(rdev, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB3D_DC_FLUSH_ALL);
- radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r100.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
+}
+
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ /* Unused on older asics, since we don't have semaphores or multiple rings */
+ BUG();
}
int r100_copy_blit(struct radeon_device *rdev,
@@ -838,6 +847,7 @@ int r100_copy_blit(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t cur_pages;
uint32_t stride_bytes = RADEON_GPU_PAGE_SIZE;
uint32_t pitch;
@@ -855,7 +865,7 @@ int r100_copy_blit(struct radeon_device *rdev,
/* Ask for enough room for blit + flush + fence */
ndw = 64 + (10 * num_loops);
- r = radeon_ring_lock(rdev, ndw);
+ r = radeon_ring_lock(rdev, ring, ndw);
if (r) {
DRM_ERROR("radeon: moving bo (%d) asking for %u dw.\n", r, ndw);
return -EINVAL;
@@ -869,8 +879,8 @@ int r100_copy_blit(struct radeon_device *rdev,
/* pages are in Y direction - height
page width in X direction - width */
- radeon_ring_write(rdev, PACKET3(PACKET3_BITBLT_MULTI, 8));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_BITBLT_MULTI, 8));
+ radeon_ring_write(ring,
RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
RADEON_GMC_DST_PITCH_OFFSET_CNTL |
RADEON_GMC_SRC_CLIPPING |
@@ -882,26 +892,26 @@ int r100_copy_blit(struct radeon_device *rdev,
RADEON_DP_SRC_SOURCE_MEMORY |
RADEON_GMC_CLR_CMP_CNTL_DIS |
RADEON_GMC_WR_MSK_DIS);
- radeon_ring_write(rdev, (pitch << 22) | (src_offset >> 10));
- radeon_ring_write(rdev, (pitch << 22) | (dst_offset >> 10));
- radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16));
- radeon_ring_write(rdev, num_gpu_pages);
- radeon_ring_write(rdev, num_gpu_pages);
- radeon_ring_write(rdev, cur_pages | (stride_pixels << 16));
- }
- radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RADEON_RB2D_DC_FLUSH_ALL);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, (pitch << 22) | (src_offset >> 10));
+ radeon_ring_write(ring, (pitch << 22) | (dst_offset >> 10));
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, (0x1fff) | (0x1fff << 16));
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, num_gpu_pages);
+ radeon_ring_write(ring, cur_pages | (stride_pixels << 16));
+ }
+ radeon_ring_write(ring, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RADEON_RB2D_DC_FLUSH_ALL);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_HOST_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return r;
}
@@ -922,19 +932,20 @@ static int r100_cp_wait_for_idle(struct radeon_device *rdev)
void r100_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
}
@@ -1035,6 +1046,7 @@ static void r100_cp_load_microcode(struct radeon_device *rdev)
int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned rb_bufsz;
unsigned rb_blksz;
unsigned max_fetch;
@@ -1060,7 +1072,9 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
r100_cp_load_microcode(rdev);
- r = radeon_ring_init(rdev, ring_size);
+ r = radeon_ring_init(rdev, ring, ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ RADEON_CP_RB_RPTR, RADEON_CP_RB_WPTR,
+ 0, 0x7fffff, RADEON_CP_PACKET2);
if (r) {
return r;
}
@@ -1069,7 +1083,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
rb_blksz = 9;
/* cp will read 128bytes at a time (4 dwords) */
max_fetch = 1;
- rdev->cp.align_mask = 16 - 1;
+ ring->align_mask = 16 - 1;
/* Write to CP_RB_WPTR will be delayed for pre_write_timer clocks */
pre_write_timer = 64;
/* Force CP_RB_WPTR write if written more than one time before the
@@ -1099,13 +1113,13 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_NO_UPDATE);
/* Set ring address */
- DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)rdev->cp.gpu_addr);
- WREG32(RADEON_CP_RB_BASE, rdev->cp.gpu_addr);
+ DRM_INFO("radeon: ring at 0x%016lX\n", (unsigned long)ring->gpu_addr);
+ WREG32(RADEON_CP_RB_BASE, ring->gpu_addr);
/* Force read & write ptr to 0 */
WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA | RADEON_RB_NO_UPDATE);
WREG32(RADEON_CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(RADEON_CP_RB_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
WREG32(R_00070C_CP_RB_RPTR_ADDR,
@@ -1121,7 +1135,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_RB_CNTL, tmp);
udelay(10);
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1130,12 +1144,12 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
WREG32(RADEON_CP_CSQ_MODE, 0x00004D4D);
WREG32(RADEON_CP_CSQ_CNTL, RADEON_CSQ_PRIBM_INDBM);
radeon_ring_start(rdev);
- r = radeon_ring_test(rdev);
+ r = radeon_ring_test(rdev, ring);
if (r) {
DRM_ERROR("radeon: cp isn't working (%d).\n", r);
return r;
}
- rdev->cp.ready = true;
+ ring->ready = true;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
return 0;
}
@@ -1147,7 +1161,7 @@ void r100_cp_fini(struct radeon_device *rdev)
}
/* Disable ring */
r100_cp_disable(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
DRM_INFO("radeon: cp finalized\n");
}
@@ -1155,7 +1169,7 @@ void r100_cp_disable(struct radeon_device *rdev)
{
/* Disable ring */
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(RADEON_CP_CSQ_MODE, 0);
WREG32(RADEON_CP_CSQ_CNTL, 0);
WREG32(R_000770_SCRATCH_UMSK, 0);
@@ -1165,13 +1179,6 @@ void r100_cp_disable(struct radeon_device *rdev)
}
}
-void r100_cp_commit(struct radeon_device *rdev)
-{
- WREG32(RADEON_CP_RB_WPTR, rdev->cp.wptr);
- (void)RREG32(RADEON_CP_RB_WPTR);
-}
-
-
/*
* CS functions
*/
@@ -2099,9 +2106,9 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
return -1;
}
-void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
}
@@ -2126,20 +2133,20 @@ void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup, struct radeon_cp *cp
* false positive when CP is just gived nothing to do.
*
**/
-bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_cp *cp)
+bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *lockup, struct radeon_ring *ring)
{
unsigned long cjiffies, elapsed;
cjiffies = jiffies;
if (!time_after(cjiffies, lockup->last_jiffies)) {
/* likely a wrap around */
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
- if (cp->rptr != lockup->last_cp_rptr) {
+ if (ring->rptr != lockup->last_cp_rptr) {
/* CP is still working no lockup */
- lockup->last_cp_rptr = cp->rptr;
+ lockup->last_cp_rptr = ring->rptr;
lockup->last_jiffies = jiffies;
return false;
}
@@ -2152,26 +2159,26 @@ bool r100_gpu_cp_is_lockup(struct radeon_device *rdev, struct r100_gpu_lockup *l
return false;
}
-bool r100_gpu_is_lockup(struct radeon_device *rdev)
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r100.lockup, &rdev->cp);
+ r100_gpu_lockup_update(&rdev->config.r100.lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r100.lockup, ring);
}
void r100_bm_disable(struct radeon_device *rdev)
@@ -2187,8 +2194,7 @@ void r100_bm_disable(struct radeon_device *rdev)
WREG32(R_000030_BUS_CNTL, (tmp & 0xFFFFFFFF) | 0x00000040);
tmp = RREG32(RADEON_BUS_CNTL);
mdelay(1);
- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
+ pci_clear_master(rdev->pdev);
mdelay(1);
}
@@ -2579,21 +2585,22 @@ static int r100_debugfs_cp_ring_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t rdp, wdp;
unsigned count, i, j;
- radeon_ring_free_size(rdev);
+ radeon_ring_free_size(rdev, ring);
rdp = RREG32(RADEON_CP_RB_RPTR);
wdp = RREG32(RADEON_CP_RB_WPTR);
- count = (rdp + rdev->cp.ring_size - wdp) & rdev->cp.ptr_mask;
+ count = (rdp + ring->ring_size - wdp) & ring->ptr_mask;
seq_printf(m, "CP_STAT 0x%08x\n", RREG32(RADEON_CP_STAT));
seq_printf(m, "CP_RB_WPTR 0x%08x\n", wdp);
seq_printf(m, "CP_RB_RPTR 0x%08x\n", rdp);
- seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
seq_printf(m, "%u dwords in ring\n", count);
for (j = 0; j <= count; j++) {
- i = (rdp + j) & rdev->cp.ptr_mask;
- seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
+ i = (rdp + j) & ring->ptr_mask;
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
}
return 0;
}
@@ -3635,7 +3642,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
}
}
-int r100_ring_test(struct radeon_device *rdev)
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
@@ -3648,15 +3655,15 @@ int r100_ring_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(rdev, PACKET0(scratch, 0));
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET0(scratch, 0));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF) {
@@ -3677,9 +3684,11 @@ int r100_ring_test(struct radeon_device *rdev)
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
- radeon_ring_write(rdev, PACKET0(RADEON_CP_IB_BASE, 1));
- radeon_ring_write(rdev, ib->gpu_addr);
- radeon_ring_write(rdev, ib->length_dw);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
+ radeon_ring_write(ring, PACKET0(RADEON_CP_IB_BASE, 1));
+ radeon_ring_write(ring, ib->gpu_addr);
+ radeon_ring_write(ring, ib->length_dw);
}
int r100_ib_test(struct radeon_device *rdev)
@@ -3696,7 +3705,7 @@ int r100_ib_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, &ib);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX, &ib, 256);
if (r) {
return r;
}
@@ -3740,34 +3749,16 @@ int r100_ib_test(struct radeon_device *rdev)
void r100_ib_fini(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
radeon_ib_pool_fini(rdev);
}
-int r100_ib_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "failed initializing IB pool (%d).\n", r);
- r100_ib_fini(rdev);
- return r;
- }
- r = r100_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "failed testing IB (%d).\n", r);
- r100_ib_fini(rdev);
- return r;
- }
- return 0;
-}
-
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Shutdown CP we shouldn't need to do that but better be safe than
* sorry
*/
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
WREG32(R_000740_CP_CSQ_CNTL, 0);
/* Save few CRTC registers */
@@ -3905,6 +3896,12 @@ static int r100_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -3914,16 +3911,25 @@ static int r100_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
int r100_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCI)
r100_pci_gart_disable(rdev);
@@ -3941,11 +3947,18 @@ int r100_resume(struct radeon_device *rdev)
r100_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return r100_startup(rdev);
+
+ rdev->accel_working = true;
+ r = r100_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int r100_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -4064,7 +4077,14 @@ int r100_init(struct radeon_device *rdev)
return r;
}
r100_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r100_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index a1f3ba0..eba4cbf 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -87,6 +87,7 @@ int r200_copy_dma(struct radeon_device *rdev,
unsigned num_gpu_pages,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t size;
uint32_t cur_size;
int i, num_loops;
@@ -95,33 +96,33 @@ int r200_copy_dma(struct radeon_device *rdev,
/* radeon pitch is /64 */
size = num_gpu_pages << RADEON_GPU_PAGE_SHIFT;
num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
- r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+ r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64);
if (r) {
DRM_ERROR("radeon: moving bo (%d).\n", r);
return r;
}
/* Must wait for 2D idle & clean before DMA or hangs might happen */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, (1 << 16));
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (1 << 16));
for (i = 0; i < num_loops; i++) {
cur_size = size;
if (cur_size > 0x1FFFFF) {
cur_size = 0x1FFFFF;
}
size -= cur_size;
- radeon_ring_write(rdev, PACKET0(0x720, 2));
- radeon_ring_write(rdev, src_offset);
- radeon_ring_write(rdev, dst_offset);
- radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+ radeon_ring_write(ring, PACKET0(0x720, 2));
+ radeon_ring_write(ring, src_offset);
+ radeon_ring_write(ring, dst_offset);
+ radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30));
src_offset += cur_size;
dst_offset += cur_size;
}
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, RADEON_WAIT_DMA_GUI_IDLE);
if (fence) {
r = radeon_fence_emit(rdev, fence);
}
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return r;
}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index c93bc64..6829638 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -175,37 +175,40 @@ void rv370_pcie_gart_fini(struct radeon_device *rdev)
void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
- radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0));
+ radeon_ring_write(ring, 0);
/* Flush 3D cache */
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring, (RADEON_WAIT_3D_IDLECLEAN |
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_DMA_GUI_IDLE));
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
- radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
- radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
+ radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+ radeon_ring_write(ring, rdev->config.r300.hdp_cntl);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(RADEON_GEN_INT_STATUS, 0));
- radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
+ radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0));
+ radeon_ring_write(ring, RADEON_SW_INT_FIRE);
}
void r300_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
unsigned gb_tile_config;
int r;
@@ -227,44 +230,44 @@ void r300_ring_start(struct radeon_device *rdev)
break;
}
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(RADEON_ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
RADEON_ISYNC_ANY2D_IDLE3D |
RADEON_ISYNC_ANY3D_IDLE2D |
RADEON_ISYNC_WAIT_IDLEGUI |
RADEON_ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(rdev, PACKET0(R300_GB_TILE_CONFIG, 0));
- radeon_ring_write(rdev, gb_tile_config);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0));
+ radeon_ring_write(ring, gb_tile_config);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(ring,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_GB_AA_CONFIG, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_ZC_FLUSH | R300_ZC_FREE);
- radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS0, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE);
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0));
+ radeon_ring_write(ring,
((6 << R300_MS_X0_SHIFT) |
(6 << R300_MS_Y0_SHIFT) |
(6 << R300_MS_X1_SHIFT) |
@@ -273,8 +276,8 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_Y2_SHIFT) |
(6 << R300_MSBD0_Y_SHIFT) |
(6 << R300_MSBD0_X_SHIFT)));
- radeon_ring_write(rdev, PACKET0(R300_GB_MSPOS1, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0));
+ radeon_ring_write(ring,
((6 << R300_MS_X3_SHIFT) |
(6 << R300_MS_Y3_SHIFT) |
(6 << R300_MS_X4_SHIFT) |
@@ -282,16 +285,16 @@ void r300_ring_start(struct radeon_device *rdev)
(6 << R300_MS_X5_SHIFT) |
(6 << R300_MS_Y5_SHIFT) |
(6 << R300_MSBD1_SHIFT)));
- radeon_ring_write(rdev, PACKET0(R300_GA_ENHANCE, 0));
- radeon_ring_write(rdev, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
- radeon_ring_write(rdev, PACKET0(R300_GA_POLY_MODE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0));
+ radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0));
+ radeon_ring_write(ring,
R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE);
- radeon_ring_write(rdev, PACKET0(R300_GA_ROUND_MODE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0));
+ radeon_ring_write(ring,
R300_GEOMETRY_ROUND_NEAREST |
R300_COLOR_ROUND_NEAREST);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
}
void r300_errata(struct radeon_device *rdev)
@@ -375,26 +378,26 @@ void r300_gpu_init(struct radeon_device *rdev)
rdev->num_gb_pipes, rdev->num_z_pipes);
}
-bool r300_gpu_is_lockup(struct radeon_device *rdev)
+bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 rbbm_status;
int r;
rbbm_status = RREG32(R_000E40_RBBM_STATUS);
if (!G_000E40_GUI_ACTIVE(rbbm_status)) {
- r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
+ r100_gpu_lockup_update(&rdev->config.r300.lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
+ ring->rptr = RREG32(RADEON_CP_RB_RPTR);
+ return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, ring);
}
int r300_asic_reset(struct radeon_device *rdev)
@@ -701,7 +704,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
- if (p->keep_tiling_flags) {
+ if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) {
ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
} else {
@@ -765,7 +768,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
/* RB3D_COLORPITCH1 */
/* RB3D_COLORPITCH2 */
/* RB3D_COLORPITCH3 */
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -850,7 +853,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F24:
/* ZB_DEPTHPITCH */
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
@@ -1396,6 +1399,12 @@ static int r300_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -1405,16 +1414,25 @@ static int r300_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
int r300_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -1434,11 +1452,18 @@ int r300_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return r300_startup(rdev);
+
+ rdev->accel_working = true;
+ r = r300_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int r300_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -1539,7 +1564,14 @@ int r300_init(struct radeon_device *rdev)
return r;
}
r300_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r300_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 417fab8..b143230 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -199,6 +199,8 @@ static void r420_clock_resume(struct radeon_device *rdev)
static void r420_cp_errata_init(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
/* RV410 and R420 can lock up if CP DMA to host memory happens
* while the 2D engine is busy.
*
@@ -206,22 +208,24 @@ static void r420_cp_errata_init(struct radeon_device *rdev)
* of the CP init, apparently.
*/
radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
- radeon_ring_lock(rdev, 8);
- radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
- radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1));
+ radeon_ring_write(ring, rdev->config.r300.resync_scratch);
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
}
static void r420_cp_errata_fini(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+
/* Catch the RESYNC we dispatched all the way back,
* at the very beginning of the CP init.
*/
- radeon_ring_lock(rdev, 8);
- radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_lock(rdev, ring, 8);
+ radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, R300_RB3D_DC_FINISH);
+ radeon_ring_unlock_commit(rdev, ring);
radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
}
@@ -254,6 +258,12 @@ static int r420_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -264,16 +274,25 @@ static int r420_startup(struct radeon_device *rdev)
return r;
}
r420_cp_errata_init(rdev);
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
int r420_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -297,11 +316,18 @@ int r420_resume(struct radeon_device *rdev)
r420_clock_resume(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return r420_startup(rdev);
+
+ rdev->accel_working = true;
+ r = r420_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int r420_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r420_cp_errata_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -414,7 +440,14 @@ int r420_init(struct radeon_device *rdev)
return r;
}
r420_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r420_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index fc43705..3bd8f1b 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -573,6 +573,7 @@
#define AVIVO_TMDSA_CNTL 0x7880
# define AVIVO_TMDSA_CNTL_ENABLE (1 << 0)
+# define AVIVO_TMDSA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_TMDSA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_TMDSA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_TMDSA_CNTL_SYNC_PHASE (1 << 12)
@@ -633,6 +634,7 @@
#define AVIVO_LVTMA_CNTL 0x7a80
# define AVIVO_LVTMA_CNTL_ENABLE (1 << 0)
+# define AVIVO_LVTMA_CNTL_HDMI_EN (1 << 2)
# define AVIVO_LVTMA_CNTL_HPD_MASK (1 << 4)
# define AVIVO_LVTMA_CNTL_HPD_SELECT (1 << 8)
# define AVIVO_LVTMA_CNTL_SYNC_PHASE (1 << 12)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 3081d07..25084e8 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -187,6 +187,12 @@ static int r520_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -196,9 +202,15 @@ static int r520_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
return 0;
@@ -206,6 +218,8 @@ static int r520_startup(struct radeon_device *rdev)
int r520_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -223,7 +237,13 @@ int r520_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return r520_startup(rdev);
+
+ rdev->accel_working = true;
+ r = r520_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int r520_init(struct radeon_device *rdev)
@@ -292,7 +312,14 @@ int r520_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r520_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 9cdda0b..17ca72c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1344,7 +1344,7 @@ int r600_gpu_soft_reset(struct radeon_device *rdev)
return 0;
}
-bool r600_gpu_is_lockup(struct radeon_device *rdev)
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring)
{
u32 srbm_status;
u32 grbm_status;
@@ -1361,19 +1361,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
- r100_gpu_lockup_update(lockup, &rdev->cp);
+ r100_gpu_lockup_update(lockup, ring);
return false;
}
/* force CP activities */
- r = radeon_ring_lock(rdev, 2);
+ r = radeon_ring_lock(rdev, ring, 2);
if (!r) {
/* PACKET2 NOP */
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_write(rdev, 0x80000000);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_write(ring, 0x80000000);
+ radeon_ring_unlock_commit(rdev, ring);
}
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
+ ring->rptr = RREG32(ring->rptr_reg);
+ return r100_gpu_cp_is_lockup(rdev, lockup, ring);
}
int r600_asic_reset(struct radeon_device *rdev)
@@ -2144,27 +2144,28 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
int r600_cp_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
uint32_t cp_me;
- r = radeon_ring_lock(rdev, 7);
+ r = radeon_ring_lock(rdev, ring, 7);
if (r) {
DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_ME_INITIALIZE, 5));
- radeon_ring_write(rdev, 0x1);
+ radeon_ring_write(ring, PACKET3(PACKET3_ME_INITIALIZE, 5));
+ radeon_ring_write(ring, 0x1);
if (rdev->family >= CHIP_RV770) {
- radeon_ring_write(rdev, 0x0);
- radeon_ring_write(rdev, rdev->config.rv770.max_hw_contexts - 1);
+ radeon_ring_write(ring, 0x0);
+ radeon_ring_write(ring, rdev->config.rv770.max_hw_contexts - 1);
} else {
- radeon_ring_write(rdev, 0x3);
- radeon_ring_write(rdev, rdev->config.r600.max_hw_contexts - 1);
+ radeon_ring_write(ring, 0x3);
+ radeon_ring_write(ring, rdev->config.r600.max_hw_contexts - 1);
}
- radeon_ring_write(rdev, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
cp_me = 0xff;
WREG32(R_0086D8_CP_ME_CNTL, cp_me);
@@ -2173,6 +2174,7 @@ int r600_cp_start(struct radeon_device *rdev)
int r600_cp_resume(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 tmp;
u32 rb_bufsz;
int r;
@@ -2184,13 +2186,13 @@ int r600_cp_resume(struct radeon_device *rdev)
WREG32(GRBM_SOFT_RESET, 0);
/* Set ring buffer size */
- rb_bufsz = drm_order(rdev->cp.ring_size / 8);
+ rb_bufsz = drm_order(ring->ring_size / 8);
tmp = (drm_order(RADEON_GPU_PAGE_SIZE/8) << 8) | rb_bufsz;
#ifdef __BIG_ENDIAN
tmp |= BUF_SWAP_32BIT;
#endif
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_SEM_WAIT_TIMER, 0x4);
+ WREG32(CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
WREG32(CP_RB_WPTR_DELAY, 0);
@@ -2198,8 +2200,8 @@ int r600_cp_resume(struct radeon_device *rdev)
/* Initialize the ring buffer's read and write pointers */
WREG32(CP_RB_CNTL, tmp | RB_RPTR_WR_ENA);
WREG32(CP_RB_RPTR_WR, 0);
- rdev->cp.wptr = 0;
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
+ ring->wptr = 0;
+ WREG32(CP_RB_WPTR, ring->wptr);
/* set the wb address whether it's enabled or not */
WREG32(CP_RB_RPTR_ADDR,
@@ -2217,42 +2219,36 @@ int r600_cp_resume(struct radeon_device *rdev)
mdelay(1);
WREG32(CP_RB_CNTL, tmp);
- WREG32(CP_RB_BASE, rdev->cp.gpu_addr >> 8);
+ WREG32(CP_RB_BASE, ring->gpu_addr >> 8);
WREG32(CP_DEBUG, (1 << 27) | (1 << 28));
- rdev->cp.rptr = RREG32(CP_RB_RPTR);
+ ring->rptr = RREG32(CP_RB_RPTR);
r600_cp_start(rdev);
- rdev->cp.ready = true;
- r = radeon_ring_test(rdev);
+ ring->ready = true;
+ r = radeon_ring_test(rdev, ring);
if (r) {
- rdev->cp.ready = false;
+ ring->ready = false;
return r;
}
return 0;
}
-void r600_cp_commit(struct radeon_device *rdev)
-{
- WREG32(CP_RB_WPTR, rdev->cp.wptr);
- (void)RREG32(CP_RB_WPTR);
-}
-
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size)
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size)
{
u32 rb_bufsz;
/* Align ring size */
rb_bufsz = drm_order(ring_size / 8);
ring_size = (1 << (rb_bufsz + 1)) * 4;
- rdev->cp.ring_size = ring_size;
- rdev->cp.align_mask = 16 - 1;
+ ring->ring_size = ring_size;
+ ring->align_mask = 16 - 1;
}
void r600_cp_fini(struct radeon_device *rdev)
{
r600_cp_stop(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
@@ -2271,11 +2267,11 @@ void r600_scratch_init(struct radeon_device *rdev)
}
}
-int r600_ring_test(struct radeon_device *rdev)
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
{
uint32_t scratch;
uint32_t tmp = 0;
- unsigned i;
+ unsigned i, ridx = radeon_ring_index(rdev, ring);
int r;
r = radeon_scratch_get(rdev, &scratch);
@@ -2284,16 +2280,16 @@ int r600_ring_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ring_lock(rdev, 3);
+ r = radeon_ring_lock(rdev, ring, 3);
if (r) {
- DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r);
+ DRM_ERROR("radeon: cp failed to lock ring %d (%d).\n", ridx, r);
radeon_scratch_free(rdev, scratch);
return r;
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, 0xDEADBEEF);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((scratch - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, 0xDEADBEEF);
+ radeon_ring_unlock_commit(rdev, ring);
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(scratch);
if (tmp == 0xDEADBEEF)
@@ -2301,10 +2297,10 @@ int r600_ring_test(struct radeon_device *rdev)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ring test succeeded in %d usecs\n", i);
+ DRM_INFO("ring test on %d succeeded in %d usecs\n", ridx, i);
} else {
- DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
- scratch, tmp);
+ DRM_ERROR("radeon: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
+ ridx, scratch, tmp);
r = -EINVAL;
}
radeon_scratch_free(rdev, scratch);
@@ -2314,49 +2310,66 @@ int r600_ring_test(struct radeon_device *rdev)
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
+ struct radeon_ring *ring = &rdev->ring[fence->ring];
+
if (rdev->wb.use_event) {
- u64 addr = rdev->wb.gpu_addr + R600_WB_EVENT_OFFSET +
- (u64)(rdev->fence_drv.scratch_reg - rdev->scratch.reg_base);
+ u64 addr = rdev->fence_drv[fence->ring].gpu_addr;
/* flush read cache over gart */
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
- radeon_ring_write(rdev, 0xFFFFFFFF);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
/* EVENT_WRITE_EOP - flush caches, send int */
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
- radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
- radeon_ring_write(rdev, addr & 0xffffffff);
- radeon_ring_write(rdev, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
- radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE_EOP, 4));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT_TS) | EVENT_INDEX(5));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | DATA_SEL(1) | INT_SEL(2));
+ radeon_ring_write(ring, fence->seq);
+ radeon_ring_write(ring, 0);
} else {
/* flush read cache over gart */
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, PACKET3_TC_ACTION_ENA |
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, PACKET3_TC_ACTION_ENA |
PACKET3_VC_ACTION_ENA |
PACKET3_SH_ACTION_ENA);
- radeon_ring_write(rdev, 0xFFFFFFFF);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 10); /* poll interval */
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
+ radeon_ring_write(ring, 0xFFFFFFFF);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(ring, EVENT_TYPE(CACHE_FLUSH_AND_INV_EVENT) | EVENT_INDEX(0));
/* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
- radeon_ring_write(rdev, fence->seq);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, ((rdev->fence_drv[fence->ring].scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
+ radeon_ring_write(ring, fence->seq);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
- radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
- radeon_ring_write(rdev, RB_INT_STAT);
+ radeon_ring_write(ring, PACKET0(CP_INT_STATUS, 0));
+ radeon_ring_write(ring, RB_INT_STAT);
}
}
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+ unsigned sel = emit_wait ? PACKET3_SEM_SEL_WAIT : PACKET3_SEM_SEL_SIGNAL;
+
+ if (rdev->family < CHIP_CAYMAN)
+ sel |= PACKET3_SEM_WAIT_ON_SIGNAL;
+
+ radeon_ring_write(ring, PACKET3(PACKET3_MEM_SEMAPHORE, 1));
+ radeon_ring_write(ring, addr & 0xffffffff);
+ radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel);
+}
+
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset,
uint64_t dst_offset,
@@ -2409,6 +2422,7 @@ void r600_clear_surface_reg(struct radeon_device *rdev, int reg)
int r600_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -2447,6 +2461,12 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -2456,7 +2476,10 @@ int r600_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
+
if (r)
return r;
r = r600_cp_load_microcode(rdev);
@@ -2466,6 +2489,17 @@ int r600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
return 0;
}
@@ -2494,15 +2528,11 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = r600_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
- return r;
- }
-
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -2518,13 +2548,14 @@ int r600_resume(struct radeon_device *rdev)
int r600_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r600_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
r600_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -2595,8 +2626,8 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -2605,30 +2636,24 @@ int r600_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = r600_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- } else {
- r = r600_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
- }
- }
- }
r = r600_audio_init(rdev);
if (r)
@@ -2643,12 +2668,13 @@ void r600_fini(struct radeon_device *rdev)
r600_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
r600_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_agp_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_bo_fini(rdev);
radeon_atombios_fini(rdev);
@@ -2662,18 +2688,20 @@ void r600_fini(struct radeon_device *rdev)
*/
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
+
/* FIXME: implement */
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(ib->gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
- radeon_ring_write(rdev, ib->length_dw);
+ radeon_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFF);
+ radeon_ring_write(ring, ib->length_dw);
}
-int r600_ib_test(struct radeon_device *rdev)
+int r600_ib_test(struct radeon_device *rdev, int ring)
{
struct radeon_ib *ib;
uint32_t scratch;
@@ -2687,7 +2715,7 @@ int r600_ib_test(struct radeon_device *rdev)
return r;
}
WREG32(scratch, 0xCAFEDEAD);
- r = radeon_ib_get(rdev, &ib);
+ r = radeon_ib_get(rdev, ring, &ib, 256);
if (r) {
DRM_ERROR("radeon: failed to get ib (%d).\n", r);
return r;
@@ -2728,7 +2756,7 @@ int r600_ib_test(struct radeon_device *rdev)
DRM_UDELAY(1);
}
if (i < rdev->usec_timeout) {
- DRM_INFO("ib test succeeded in %u usecs\n", i);
+ DRM_INFO("ib test on ring %d succeeded in %u usecs\n", ib->fence->ring, i);
} else {
DRM_ERROR("radeon: ib test failed (scratch(0x%04X)=0x%08X)\n",
scratch, tmp);
@@ -3075,7 +3103,7 @@ int r600_irq_set(struct radeon_device *rdev)
hpd3 = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & ~DC_HPDx_INT_EN;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
DRM_DEBUG("r600_irq_set: sw int\n");
cp_int_cntl |= RB_INT_ENABLE;
cp_int_cntl |= TIME_STAMP_INT_ENABLE;
@@ -3459,11 +3487,11 @@ restart_ih:
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
DRM_DEBUG("IH: CP int: 0x%08x\n", src_data);
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 181: /* CP EOP event */
DRM_DEBUG("IH: CP EOP\n");
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
break;
case 233: /* GUI IDLE */
DRM_DEBUG("IH: GUI idle\n");
@@ -3496,30 +3524,6 @@ restart_ih:
*/
#if defined(CONFIG_DEBUG_FS)
-static int r600_debugfs_cp_ring_info(struct seq_file *m, void *data)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- struct radeon_device *rdev = dev->dev_private;
- unsigned count, i, j;
-
- radeon_ring_free_size(rdev);
- count = (rdev->cp.ring_size / 4) - rdev->cp.ring_free_dw;
- seq_printf(m, "CP_STAT 0x%08x\n", RREG32(CP_STAT));
- seq_printf(m, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR));
- seq_printf(m, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR));
- seq_printf(m, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev->cp.wptr);
- seq_printf(m, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev->cp.rptr);
- seq_printf(m, "%u free dwords in ring\n", rdev->cp.ring_free_dw);
- seq_printf(m, "%u dwords in ring\n", count);
- i = rdev->cp.rptr;
- for (j = 0; j <= count; j++) {
- seq_printf(m, "r[%04d]=0x%08x\n", i, rdev->cp.ring[i]);
- i = (i + 1) & rdev->cp.ptr_mask;
- }
- return 0;
-}
-
static int r600_debugfs_mc_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -3533,7 +3537,6 @@ static int r600_debugfs_mc_info(struct seq_file *m, void *data)
static struct drm_info_list r600_mc_info_list[] = {
{"r600_mc_info", r600_debugfs_mc_info, 0, NULL},
- {"r600_ring_info", r600_debugfs_cp_ring_info, 0, NULL},
};
#endif
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 846fae5..ba66f30 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -36,7 +36,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && rdev->family < CHIP_CEDAR)
+ return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev))
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -161,8 +161,18 @@ static void r600_audio_update_hdmi(unsigned long param)
*/
static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
{
+ u32 value = 0;
DRM_INFO("%s audio support\n", enable ? "Enabling" : "Disabling");
- WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+ if (ASIC_IS_DCE4(rdev)) {
+ if (enable) {
+ value |= 0x81000000; /* Required to enable audio */
+ value |= 0x0e1000f0; /* fglrx sets that too */
+ }
+ WREG32(EVERGREEN_AUDIO_ENABLE, value);
+ } else {
+ WREG32_P(R600_AUDIO_ENABLE,
+ enable ? 0x81000000 : 0x0, ~0x81000000);
+ }
rdev->audio_enabled = enable;
}
@@ -248,22 +258,33 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
return;
}
- switch (dig->dig_encoder) {
- case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 0);
- break;
-
- case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
- WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
- WREG32(R600_AUDIO_CLK_SRCSEL, 1);
- break;
- default:
- dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
- radeon_encoder->encoder_id);
- return;
+ if (ASIC_IS_DCE4(rdev)) {
+ /* TODO: other PLLs? */
+ WREG32(EVERGREEN_AUDIO_PLL1_MUL, base_rate * 10);
+ WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
+ WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
+
+ /* Some magic trigger or src sel? */
+ WREG32_P(0x5ac, 0x01, ~0x77);
+ } else {
+ switch (dig->dig_encoder) {
+ case 0:
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 0);
+ break;
+
+ case 1:
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
+ WREG32(R600_AUDIO_CLK_SRCSEL, 1);
+ break;
+ default:
+ dev_err(rdev->dev,
+ "Unsupported DIG on encoder 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index e09d281..accc032 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -50,6 +50,7 @@ static void
set_render_target(struct radeon_device *rdev, int format,
int w, int h, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cb_color_info;
int pitch, slice;
@@ -63,38 +64,38 @@ set_render_target(struct radeon_device *rdev, int format,
pitch = (w / 8) - 1;
slice = ((w * h) / 64) - 1;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) {
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
- radeon_ring_write(rdev, 2 << 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0));
+ radeon_ring_write(ring, 2 << 0);
}
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (pitch << 0) | (slice << 10));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (pitch << 0) | (slice << 10));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, cb_color_info);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, cb_color_info);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
}
/* emits 5dw */
@@ -103,6 +104,7 @@ cp_set_surface_sync(struct radeon_device *rdev,
u32 sync_type, u32 size,
u64 mc_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 cp_coher_size;
if (size == 0xffffffff)
@@ -110,17 +112,18 @@ cp_set_surface_sync(struct radeon_device *rdev,
else
cp_coher_size = ((size + 255) >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3));
- radeon_ring_write(rdev, sync_type);
- radeon_ring_write(rdev, cp_coher_size);
- radeon_ring_write(rdev, mc_addr >> 8);
- radeon_ring_write(rdev, 10); /* poll interval */
+ radeon_ring_write(ring, PACKET3(PACKET3_SURFACE_SYNC, 3));
+ radeon_ring_write(ring, sync_type);
+ radeon_ring_write(ring, cp_coher_size);
+ radeon_ring_write(ring, mc_addr >> 8);
+ radeon_ring_write(ring, 10); /* poll interval */
}
/* emits 21dw + 1 surface sync = 26dw */
static void
set_shaders(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u64 gpu_addr;
u32 sq_pgm_resources;
@@ -129,35 +132,35 @@ set_shaders(struct radeon_device *rdev)
/* VS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_pgm_resources);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
/* PS */
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, gpu_addr >> 8);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, gpu_addr >> 8);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_pgm_resources | (1 << 28));
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_pgm_resources | (1 << 28));
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 2);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
- radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, 0);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 1));
+ radeon_ring_write(ring, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, 0);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset;
cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr);
@@ -167,6 +170,7 @@ set_shaders(struct radeon_device *rdev)
static void
set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_vtx_constant_word2;
sq_vtx_constant_word2 = SQ_VTXC_BASE_ADDR_HI(upper_32_bits(gpu_addr) & 0xff) |
@@ -175,15 +179,15 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
sq_vtx_constant_word2 |= SQ_VTXC_ENDIAN_SWAP(SQ_ENDIAN_8IN32);
#endif
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(rdev, 0x460);
- radeon_ring_write(rdev, gpu_addr & 0xffffffff);
- radeon_ring_write(rdev, 48 - 1);
- radeon_ring_write(rdev, sq_vtx_constant_word2);
- radeon_ring_write(rdev, 1 << 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0x460);
+ radeon_ring_write(ring, gpu_addr & 0xffffffff);
+ radeon_ring_write(ring, 48 - 1);
+ radeon_ring_write(ring, sq_vtx_constant_word2);
+ radeon_ring_write(ring, 1 << 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_BUFFER << 30);
if ((rdev->family == CHIP_RV610) ||
(rdev->family == CHIP_RV620) ||
@@ -203,6 +207,7 @@ set_tex_resource(struct radeon_device *rdev,
int format, int w, int h, int pitch,
u64 gpu_addr, u32 size)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4;
if (h < 1)
@@ -225,15 +230,15 @@ set_tex_resource(struct radeon_device *rdev,
cp_set_surface_sync(rdev,
PACKET3_TC_ACTION_ENA, size, gpu_addr);
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, sq_tex_resource_word0);
- radeon_ring_write(rdev, sq_tex_resource_word1);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, gpu_addr >> 8);
- radeon_ring_write(rdev, sq_tex_resource_word4);
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_RESOURCE, 7));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, sq_tex_resource_word0);
+ radeon_ring_write(ring, sq_tex_resource_word1);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, gpu_addr >> 8);
+ radeon_ring_write(ring, sq_tex_resource_word4);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, SQ_TEX_VTX_VALID_TEXTURE << 30);
}
/* emits 12 */
@@ -241,43 +246,45 @@ static void
set_scissors(struct radeon_device *rdev, int x1, int y1,
int x2, int y2)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
-
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
- radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31));
- radeon_ring_write(rdev, (x2 << 0) | (y2 << 16));
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
+
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
+ radeon_ring_write(ring, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, (x1 << 0) | (y1 << 16) | (1 << 31));
+ radeon_ring_write(ring, (x2 << 0) | (y2 << 16));
}
/* emits 10 */
static void
draw_auto(struct radeon_device *rdev)
{
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, DI_PT_RECTLIST);
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(ring, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, DI_PT_RECTLIST);
- radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDEX_TYPE, 0));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 2) |
#endif
DI_INDEX_SIZE_16_BIT);
- radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
- radeon_ring_write(rdev, 1);
+ radeon_ring_write(ring, PACKET3(PACKET3_NUM_INSTANCES, 0));
+ radeon_ring_write(ring, 1);
- radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
- radeon_ring_write(rdev, 3);
- radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX);
+ radeon_ring_write(ring, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1));
+ radeon_ring_write(ring, 3);
+ radeon_ring_write(ring, DI_SRC_SEL_AUTO_INDEX);
}
@@ -285,6 +292,7 @@ draw_auto(struct radeon_device *rdev)
static void
set_default_state(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2;
u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2;
int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs;
@@ -440,47 +448,62 @@ set_default_state(struct radeon_device *rdev)
/* emit an IB pointing at default state */
dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
- radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
+ radeon_ring_write(ring,
#ifdef __BIG_ENDIAN
(2 << 0) |
#endif
(gpu_addr & 0xFFFFFFFC));
- radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
- radeon_ring_write(rdev, dwords);
+ radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xFF);
+ radeon_ring_write(ring, dwords);
/* SQ config */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
- radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, sq_config);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
- radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
- radeon_ring_write(rdev, sq_thread_resource_mgmt);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
- radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
+ radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 6));
+ radeon_ring_write(ring, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(ring, sq_config);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_1);
+ radeon_ring_write(ring, sq_gpr_resource_mgmt_2);
+ radeon_ring_write(ring, sq_thread_resource_mgmt);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_1);
+ radeon_ring_write(ring, sq_stack_resource_mgmt_2);
}
+#define I2F_MAX_BITS 15
+#define I2F_MAX_INPUT ((1 << I2F_MAX_BITS) - 1)
+#define I2F_SHIFT (24 - I2F_MAX_BITS)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Conversion is not universal and only works for the range from 0
+ * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
+ * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
+ * I2F_MAX_BITS can be increased, but that will add to the loop iterations
+ * and slow us down. Conversion is done by shifting the input and counting
+ * down until the first 1 reaches bit position 23. The resulting counter
+ * and the shifted input are, respectively, the exponent and the fraction.
+ * The sign is always zero.
+ */
static uint32_t i2f(uint32_t input)
{
u32 result, i, exponent, fraction;
- if ((input & 0x3fff) == 0)
- result = 0; /* 0 is a special case */
+ WARN_ON_ONCE(input > I2F_MAX_INPUT);
+
+ if ((input & I2F_MAX_INPUT) == 0)
+ result = 0;
else {
- exponent = 140; /* exponent biased by 127; */
- fraction = (input & 0x3fff) << 10; /* cheat and only
- handle numbers below 2^^15 */
- for (i = 0; i < 14; i++) {
+ exponent = 126 + I2F_MAX_BITS;
+ fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
+
+ for (i = 0; i < I2F_MAX_BITS; i++) {
if (fraction & 0x800000)
break;
else {
- fraction = fraction << 1; /* keep
- shifting left until top bit = 1 */
+ fraction = fraction << 1;
exponent = exponent - 1;
}
}
- result = exponent << 23 | (fraction & 0x7fffff); /* mask
- off top bit; assumed 1 */
+ result = exponent << 23 | (fraction & 0x7fffff);
}
return result;
}
@@ -611,16 +634,17 @@ void r600_blit_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->r600_blit.shader_obj);
}
-static int r600_vb_ib_get(struct radeon_device *rdev)
+static int r600_vb_ib_get(struct radeon_device *rdev, unsigned size)
{
int r;
- r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib);
+ r = radeon_ib_get(rdev, RADEON_RING_TYPE_GFX_INDEX,
+ &rdev->r600_blit.vb_ib, size);
if (r) {
DRM_ERROR("failed to get IB for vertex buffer\n");
return r;
}
- rdev->r600_blit.vb_total = 64*1024;
+ rdev->r600_blit.vb_total = size;
rdev->r600_blit.vb_used = 0;
return 0;
}
@@ -679,15 +703,12 @@ static unsigned r600_blit_create_rect(unsigned num_gpu_pages,
int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
int ring_size;
int num_loops = 0;
int dwords_per_loop = rdev->r600_blit.ring_size_per_loop;
- r = r600_vb_ib_get(rdev);
- if (r)
- return r;
-
/* num loops */
while (num_gpu_pages) {
num_gpu_pages -=
@@ -696,10 +717,15 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, unsigned num_gpu_pages)
num_loops++;
}
+ /* 48 bytes for vertex per loop */
+ r = r600_vb_ib_get(rdev, (num_loops*48)+256);
+ if (r)
+ return r;
+
/* calculate number of loops correctly */
ring_size = num_loops * dwords_per_loop;
ring_size += rdev->r600_blit.ring_size_common;
- r = radeon_ring_lock(rdev, ring_size);
+ r = radeon_ring_lock(rdev, ring, ring_size);
if (r)
return r;
@@ -718,7 +744,7 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
if (fence)
r = radeon_fence_emit(rdev, fence);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
void r600_kms_blit_copy(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index 2d1f6c5..73e2c7c 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -314,6 +314,10 @@ const u32 r6xx_default_state[] =
0x00000000, /* VGT_VTX_CNT_EN */
0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
@@ -626,6 +630,10 @@ const u32 r7xx_default_state[] =
0x00000000, /* VGT_VTX_CNT_EN */
0xc0016900,
+ 0x000000d4,
+ 0x00000000, /* SX_MISC */
+
+ 0xc0016900,
0x000002c8,
0x00000000, /* VGT_STRMOUT_BUFFER_EN */
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index c9db493..84c5462 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1815,7 +1815,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
dev_priv->ring.size_l2qw);
#endif
- RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x4);
+ RADEON_WRITE(R600_CP_SEM_WAIT_TIMER, 0x0);
/* Set the write pointer delay */
RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cb1acff..387fcc9 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
case R_028010_DB_DEPTH_INFO:
- if (!p->keep_tiling_flags &&
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -993,7 +993,7 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_0280B4_CB_COLOR5_INFO:
case R_0280B8_CB_COLOR6_INFO:
case R_0280BC_CB_COLOR7_INFO:
- if (!p->keep_tiling_flags &&
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) &&
r600_cs_packet_next_is_pkt3_nop(p)) {
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -1293,7 +1293,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
mip_offset <<= 8;
word0 = radeon_get_ib_value(p, idx + 0);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (tiling_flags & RADEON_TILING_MACRO)
word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (tiling_flags & RADEON_TILING_MICRO)
@@ -1304,6 +1304,7 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
h0 = G_038004_TEX_HEIGHT(word1) + 1;
d0 = G_038004_TEX_DEPTH(word1);
nfaces = 1;
+ array = 0;
switch (G_038000_DIM(word0)) {
case V_038000_SQ_TEX_DIM_1D:
case V_038000_SQ_TEX_DIM_2D:
@@ -1625,7 +1626,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- if (!p->keep_tiling_flags) {
+ if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) {
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index f5ac7e7..0b59206 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -196,6 +196,13 @@ static void r600_hdmi_videoinfoframe(
frame[0xD] = (right_bar >> 8);
r600_hdmi_infoframe_checksum(0x82, 0x02, 0x0D, frame);
+ /* Our header values (type, version, length) should be alright, Intel
+ * is using the same. Checksum function also seems to be OK, it works
+ * fine for audio infoframe. However calculated value is always lower
+ * by 2 in comparison to fglrx. It breaks displaying anything in case
+ * of TVs that strictly check the checksum. Hack it manually here to
+ * workaround this issue. */
+ frame[0x0] += 2;
WREG32(offset+R600_HDMI_VIDEOINFOFRAME_0,
frame[0x0] | (frame[0x1] << 8) | (frame[0x2] << 16) | (frame[0x3] << 24));
@@ -313,7 +320,7 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
struct radeon_device *rdev = dev->dev_private;
uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
if (!offset)
@@ -455,13 +462,31 @@ static void r600_hdmi_assign_block(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ u16 eg_offsets[] = {
+ EVERGREEN_CRTC0_REGISTER_OFFSET,
+ EVERGREEN_CRTC1_REGISTER_OFFSET,
+ EVERGREEN_CRTC2_REGISTER_OFFSET,
+ EVERGREEN_CRTC3_REGISTER_OFFSET,
+ EVERGREEN_CRTC4_REGISTER_OFFSET,
+ EVERGREEN_CRTC5_REGISTER_OFFSET,
+ };
+
if (!dig) {
dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
}
- if (ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
/* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ if (dig->dig_encoder >= ARRAY_SIZE(eg_offsets)) {
+ dev_err(rdev->dev, "Enabling HDMI on unknown dig\n");
+ return;
+ }
+ radeon_encoder->hdmi_offset = EVERGREEN_HDMI_BASE +
+ eg_offsets[dig->dig_encoder];
+ radeon_encoder->hdmi_config_offset = radeon_encoder->hdmi_offset
+ + EVERGREEN_HDMI_CONFIG_OFFSET;
} else if (ASIC_IS_DCE3(rdev)) {
radeon_encoder->hdmi_offset = dig->dig_encoder ?
R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
@@ -484,7 +509,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
if (!radeon_encoder->hdmi_offset) {
@@ -497,16 +522,24 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
}
offset = radeon_encoder->hdmi_offset;
- if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0x1, ~0x1);
+ } else if (ASIC_IS_DCE32(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
- } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ } else if (ASIC_IS_DCE3(rdev)) {
+ /* TODO */
+ } else if (rdev->family >= CHIP_R600) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+ WREG32_P(AVIVO_TMDSA_CNTL, AVIVO_TMDSA_CNTL_HDMI_EN,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+ WREG32_P(AVIVO_LVTMA_CNTL, AVIVO_LVTMA_CNTL_HDMI_EN,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
default:
@@ -518,8 +551,8 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
if (rdev->irq.installed
&& rdev->family != CHIP_RS600
&& rdev->family != CHIP_RS690
- && rdev->family != CHIP_RS740) {
-
+ && rdev->family != CHIP_RS740
+ && !ASIC_IS_DCE4(rdev)) {
/* if irq is available use it */
rdev->irq.hdmi[offset == R600_HDMI_BLOCK1 ? 0 : 1] = true;
radeon_irq_set(rdev);
@@ -544,7 +577,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t offset;
- if (ASIC_IS_DCE4(rdev))
+ if (ASIC_IS_DCE5(rdev))
return;
offset = radeon_encoder->hdmi_offset;
@@ -563,16 +596,22 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
/* disable polling */
r600_audio_disable_polling(encoder);
- if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ if (ASIC_IS_DCE5(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0xc, 0, ~0x1);
+ } else if (ASIC_IS_DCE32(rdev)) {
WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
} else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+ WREG32_P(AVIVO_TMDSA_CNTL, 0,
+ ~AVIVO_TMDSA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+ WREG32_P(AVIVO_LVTMA_CNTL, 0,
+ ~AVIVO_LVTMA_CNTL_HDMI_EN);
WREG32(offset + R600_HDMI_ENABLE, 0);
break;
default:
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index bfe1b5d..9b23670 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -831,6 +831,9 @@
#define PACKET3_STRMOUT_BUFFER_UPDATE 0x34
#define PACKET3_INDIRECT_BUFFER_MP 0x38
#define PACKET3_MEM_SEMAPHORE 0x39
+# define PACKET3_SEM_WAIT_ON_SIGNAL (0x1 << 12)
+# define PACKET3_SEM_SEL_SIGNAL (0x6 << 29)
+# define PACKET3_SEM_SEL_WAIT (0x7 << 29)
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
#define PACKET3_MEM_WRITE 0x3D
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 8227e76..1668ec1 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -107,6 +107,21 @@ extern int radeon_msi;
#define RADEONFB_CONN_LIMIT 4
#define RADEON_BIOS_NUM_SCRATCH 8
+/* max number of rings */
+#define RADEON_NUM_RINGS 3
+
+/* internal ring indices */
+/* r1xx+ has gfx CP ring */
+#define RADEON_RING_TYPE_GFX_INDEX 0
+
+/* cayman has 2 compute CP rings */
+#define CAYMAN_RING_TYPE_CP1_INDEX 1
+#define CAYMAN_RING_TYPE_CP2_INDEX 2
+
+/* hardcode those limit for now */
+#define RADEON_VA_RESERVED_SIZE (8 << 20)
+#define RADEON_IB_VM_MAX_SIZE (64 << 10)
+
/*
* Errata workarounds.
*/
@@ -142,6 +157,47 @@ bool radeon_get_bios(struct radeon_device *rdev);
/*
+ * Mutex which allows recursive locking from the same process.
+ */
+struct radeon_mutex {
+ struct mutex mutex;
+ struct task_struct *owner;
+ int level;
+};
+
+static inline void radeon_mutex_init(struct radeon_mutex *mutex)
+{
+ mutex_init(&mutex->mutex);
+ mutex->owner = NULL;
+ mutex->level = 0;
+}
+
+static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
+{
+ if (mutex_trylock(&mutex->mutex)) {
+ /* The mutex was unlocked before, so it's ours now */
+ mutex->owner = current;
+ } else if (mutex->owner != current) {
+ /* Another process locked the mutex, take it */
+ mutex_lock(&mutex->mutex);
+ mutex->owner = current;
+ }
+ /* Otherwise the mutex was already locked by this process */
+
+ mutex->level++;
+}
+
+static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
+{
+ if (--mutex->level > 0)
+ return;
+
+ mutex->owner = NULL;
+ mutex_unlock(&mutex->mutex);
+}
+
+
+/*
* Dummy page
*/
struct radeon_dummy_page {
@@ -192,14 +248,15 @@ extern int sumo_get_temp(struct radeon_device *rdev);
*/
struct radeon_fence_driver {
uint32_t scratch_reg;
+ uint64_t gpu_addr;
+ volatile uint32_t *cpu_addr;
atomic_t seq;
uint32_t last_seq;
unsigned long last_jiffies;
unsigned long last_timeout;
wait_queue_head_t queue;
- rwlock_t lock;
struct list_head created;
- struct list_head emited;
+ struct list_head emitted;
struct list_head signaled;
bool initialized;
};
@@ -210,21 +267,26 @@ struct radeon_fence {
struct list_head list;
/* protected by radeon_fence.lock */
uint32_t seq;
- bool emited;
+ bool emitted;
bool signaled;
+ /* RB, DMA, etc. */
+ int ring;
+ struct radeon_semaphore *semaphore;
};
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring);
int radeon_fence_driver_init(struct radeon_device *rdev);
void radeon_fence_driver_fini(struct radeon_device *rdev);
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence);
+int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence, int ring);
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence);
-void radeon_fence_process(struct radeon_device *rdev);
+void radeon_fence_process(struct radeon_device *rdev, int ring);
bool radeon_fence_signaled(struct radeon_fence *fence);
int radeon_fence_wait(struct radeon_fence *fence, bool interruptible);
-int radeon_fence_wait_next(struct radeon_device *rdev);
-int radeon_fence_wait_last(struct radeon_device *rdev);
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring);
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring);
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence);
void radeon_fence_unref(struct radeon_fence **fence);
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring);
/*
* Tiling registers
@@ -246,6 +308,21 @@ struct radeon_mman {
bool initialized;
};
+/* bo virtual address in a specific vm */
+struct radeon_bo_va {
+ /* bo list is protected by bo being reserved */
+ struct list_head bo_list;
+ /* vm list is protected by vm mutex */
+ struct list_head vm_list;
+ /* constant after initialization */
+ struct radeon_vm *vm;
+ struct radeon_bo *bo;
+ uint64_t soffset;
+ uint64_t eoffset;
+ uint32_t flags;
+ bool valid;
+};
+
struct radeon_bo {
/* Protected by gem.mutex */
struct list_head list;
@@ -259,6 +336,10 @@ struct radeon_bo {
u32 tiling_flags;
u32 pitch;
int surface_reg;
+ /* list of all virtual address to which this bo
+ * is associated to
+ */
+ struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
struct drm_gem_object gem_base;
@@ -274,6 +355,48 @@ struct radeon_bo_list {
u32 tiling_flags;
};
+/* sub-allocation manager, it has to be protected by another lock.
+ * By conception this is an helper for other part of the driver
+ * like the indirect buffer or semaphore, which both have their
+ * locking.
+ *
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size.
+ *
+ * Hole are not considered for allocation to keep things simple.
+ * Assumption is that there won't be hole (all object on same
+ * alignment).
+ */
+struct radeon_sa_manager {
+ struct radeon_bo *bo;
+ struct list_head sa_bo;
+ unsigned size;
+ uint64_t gpu_addr;
+ void *cpu_ptr;
+ uint32_t domain;
+};
+
+struct radeon_sa_bo;
+
+/* sub-allocation buffer */
+struct radeon_sa_bo {
+ struct list_head list;
+ struct radeon_sa_manager *manager;
+ unsigned offset;
+ unsigned size;
+};
+
/*
* GEM objects.
*/
@@ -303,6 +426,46 @@ int radeon_mode_dumb_destroy(struct drm_file *file_priv,
uint32_t handle);
/*
+ * Semaphores.
+ */
+struct radeon_ring;
+
+#define RADEON_SEMAPHORE_BO_SIZE 256
+
+struct radeon_semaphore_driver {
+ rwlock_t lock;
+ struct list_head bo;
+};
+
+struct radeon_semaphore_bo;
+
+/* everything here is constant */
+struct radeon_semaphore {
+ struct list_head list;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+ struct radeon_semaphore_bo *bo;
+};
+
+struct radeon_semaphore_bo {
+ struct list_head list;
+ struct radeon_ib *ib;
+ struct list_head free;
+ struct radeon_semaphore semaphores[RADEON_SEMAPHORE_BO_SIZE/8];
+ unsigned nused;
+};
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev);
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore);
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore);
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore);
+
+/*
* GART structures, functions & helpers
*/
struct radeon_mc;
@@ -310,6 +473,7 @@ struct radeon_mc;
#define RADEON_GPU_PAGE_SIZE 4096
#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
#define RADEON_GPU_PAGE_SHIFT 12
+#define RADEON_GPU_PAGE_ALIGN(a) (((a) + RADEON_GPU_PAGE_MASK) & ~RADEON_GPU_PAGE_MASK)
struct radeon_gart {
dma_addr_t table_addr;
@@ -320,7 +484,6 @@ struct radeon_gart {
unsigned table_size;
struct page **pages;
dma_addr_t *pages_addr;
- bool *ttm_alloced;
bool ready;
};
@@ -434,7 +597,7 @@ union radeon_irq_stat_regs {
struct radeon_irq {
bool installed;
- bool sw_int;
+ bool sw_int[RADEON_NUM_RINGS];
bool crtc_vblank_int[RADEON_MAX_CRTCS];
bool pflip[RADEON_MAX_CRTCS];
wait_queue_head_t vblank_queue;
@@ -444,7 +607,7 @@ struct radeon_irq {
wait_queue_head_t idle_queue;
bool hdmi[RADEON_MAX_HDMI_BLOCKS];
spinlock_t sw_lock;
- int sw_refcount;
+ int sw_refcount[RADEON_NUM_RINGS];
union radeon_irq_stat_regs stat_regs;
spinlock_t pflip_lock[RADEON_MAX_CRTCS];
int pflip_refcount[RADEON_MAX_CRTCS];
@@ -452,22 +615,23 @@ struct radeon_irq {
int radeon_irq_kms_init(struct radeon_device *rdev);
void radeon_irq_kms_fini(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring);
void radeon_irq_kms_pflip_irq_get(struct radeon_device *rdev, int crtc);
void radeon_irq_kms_pflip_irq_put(struct radeon_device *rdev, int crtc);
/*
- * CP & ring.
+ * CP & rings.
*/
+
struct radeon_ib {
- struct list_head list;
+ struct radeon_sa_bo sa_bo;
unsigned idx;
+ uint32_t length_dw;
uint64_t gpu_addr;
- struct radeon_fence *fence;
uint32_t *ptr;
- uint32_t length_dw;
- bool free;
+ struct radeon_fence *fence;
+ unsigned vm_id;
};
/*
@@ -475,20 +639,22 @@ struct radeon_ib {
* mutex protects scheduled_ibs, ready, alloc_bm
*/
struct radeon_ib_pool {
- struct mutex mutex;
- struct radeon_bo *robj;
- struct list_head bogus_ib;
- struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
- bool ready;
- unsigned head_id;
+ struct radeon_mutex mutex;
+ struct radeon_sa_manager sa_manager;
+ struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
+ bool ready;
+ unsigned head_id;
};
-struct radeon_cp {
+struct radeon_ring {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
+ unsigned rptr_offs;
+ unsigned rptr_reg;
unsigned wptr;
unsigned wptr_old;
+ unsigned wptr_reg;
unsigned ring_size;
unsigned ring_free_dw;
int count_dw;
@@ -497,6 +663,61 @@ struct radeon_cp {
uint32_t ptr_mask;
struct mutex mutex;
bool ready;
+ u32 ptr_reg_shift;
+ u32 ptr_reg_mask;
+ u32 nop;
+};
+
+/*
+ * VM
+ */
+struct radeon_vm {
+ struct list_head list;
+ struct list_head va;
+ int id;
+ unsigned last_pfn;
+ u64 pt_gpu_addr;
+ u64 *pt;
+ struct radeon_sa_bo sa_bo;
+ struct mutex mutex;
+ /* last fence for cs using this vm */
+ struct radeon_fence *fence;
+};
+
+struct radeon_vm_funcs {
+ int (*init)(struct radeon_device *rdev);
+ void (*fini)(struct radeon_device *rdev);
+ /* cs mutex must be lock for schedule_ib */
+ int (*bind)(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+ void (*unbind)(struct radeon_device *rdev, struct radeon_vm *vm);
+ void (*tlb_flush)(struct radeon_device *rdev, struct radeon_vm *vm);
+ uint32_t (*page_flags)(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags);
+ void (*set_page)(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags);
+};
+
+struct radeon_vm_manager {
+ struct list_head lru_vm;
+ uint32_t use_bitmap;
+ struct radeon_sa_manager sa_manager;
+ uint32_t max_pfn;
+ /* fields constant after init */
+ const struct radeon_vm_funcs *funcs;
+ /* number of VMIDs */
+ unsigned nvm;
+ /* vram base address for page table entry */
+ u64 vram_base_offset;
+ /* is vm enabled? */
+ bool enabled;
+};
+
+/*
+ * file private structure
+ */
+struct radeon_fpriv {
+ struct radeon_vm vm;
};
/*
@@ -506,6 +727,7 @@ struct r600_ih {
struct radeon_bo *ring_obj;
volatile uint32_t *ring;
unsigned rptr;
+ unsigned rptr_offs;
unsigned wptr;
unsigned wptr_old;
unsigned ring_size;
@@ -549,23 +771,29 @@ struct r600_blit {
void r600_blit_suspend(struct radeon_device *rdev);
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib);
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib **ib, unsigned size);
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib);
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
+int radeon_ib_pool_start(struct radeon_device *rdev);
+int radeon_ib_pool_suspend(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
-extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
-void radeon_ring_free_size(struct radeon_device *rdev);
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw);
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
-void radeon_ring_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_commit(struct radeon_device *rdev);
-void radeon_ring_unlock_undo(struct radeon_device *rdev);
-int radeon_ring_test(struct radeon_device *rdev);
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size);
-void radeon_ring_fini(struct radeon_device *rdev);
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ndw);
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *cp);
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop);
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *cp);
/*
@@ -582,12 +810,12 @@ struct radeon_cs_reloc {
struct radeon_cs_chunk {
uint32_t chunk_id;
uint32_t length_dw;
- int kpage_idx[2];
- uint32_t *kpage[2];
+ int kpage_idx[2];
+ uint32_t *kpage[2];
uint32_t *kdata;
- void __user *user_ptr;
- int last_copied_page;
- int last_page_index;
+ void __user *user_ptr;
+ int last_copied_page;
+ int last_page_index;
};
struct radeon_cs_parser {
@@ -605,14 +833,18 @@ struct radeon_cs_parser {
struct radeon_cs_reloc *relocs;
struct radeon_cs_reloc **relocs_ptr;
struct list_head validated;
+ bool sync_to_ring[RADEON_NUM_RINGS];
/* indices of various chunks */
int chunk_ib_idx;
int chunk_relocs_idx;
+ int chunk_flags_idx;
struct radeon_ib *ib;
void *track;
unsigned family;
int parser_error;
- bool keep_tiling_flags;
+ u32 cs_flags;
+ u32 ring;
+ s32 priority;
};
extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -869,11 +1101,20 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number);
* Testing
*/
void radeon_test_moves(struct radeon_device *rdev);
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *cpA,
+ struct radeon_ring *cpB);
+void radeon_test_syncing(struct radeon_device *rdev);
/*
* Debugfs
*/
+struct radeon_debugfs {
+ struct drm_info_list *files;
+ unsigned num_files;
+};
+
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles);
@@ -889,21 +1130,27 @@ struct radeon_asic {
int (*resume)(struct radeon_device *rdev);
int (*suspend)(struct radeon_device *rdev);
void (*vga_set_state)(struct radeon_device *rdev, bool state);
- bool (*gpu_is_lockup)(struct radeon_device *rdev);
+ bool (*gpu_is_lockup)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*asic_reset)(struct radeon_device *rdev);
void (*gart_tlb_flush)(struct radeon_device *rdev);
int (*gart_set_page)(struct radeon_device *rdev, int i, uint64_t addr);
int (*cp_init)(struct radeon_device *rdev, unsigned ring_size);
void (*cp_fini)(struct radeon_device *rdev);
void (*cp_disable)(struct radeon_device *rdev);
- void (*cp_commit)(struct radeon_device *rdev);
void (*ring_start)(struct radeon_device *rdev);
- int (*ring_test)(struct radeon_device *rdev);
- void (*ring_ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+
+ struct {
+ void (*ib_execute)(struct radeon_device *rdev, struct radeon_ib *ib);
+ int (*ib_parse)(struct radeon_device *rdev, struct radeon_ib *ib);
+ void (*emit_fence)(struct radeon_device *rdev, struct radeon_fence *fence);
+ void (*emit_semaphore)(struct radeon_device *rdev, struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore, bool emit_wait);
+ } ring[RADEON_NUM_RINGS];
+
+ int (*ring_test)(struct radeon_device *rdev, struct radeon_ring *cp);
int (*irq_set)(struct radeon_device *rdev);
int (*irq_process)(struct radeon_device *rdev);
u32 (*get_vblank_counter)(struct radeon_device *rdev, int crtc);
- void (*fence_ring_emit)(struct radeon_device *rdev, struct radeon_fence *fence);
int (*cs_parse)(struct radeon_cs_parser *p);
int (*copy_blit)(struct radeon_device *rdev,
uint64_t src_offset,
@@ -1132,6 +1379,8 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp);
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
@@ -1147,47 +1396,6 @@ struct r600_vram_scratch {
/*
- * Mutex which allows recursive locking from the same process.
- */
-struct radeon_mutex {
- struct mutex mutex;
- struct task_struct *owner;
- int level;
-};
-
-static inline void radeon_mutex_init(struct radeon_mutex *mutex)
-{
- mutex_init(&mutex->mutex);
- mutex->owner = NULL;
- mutex->level = 0;
-}
-
-static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
-{
- if (mutex_trylock(&mutex->mutex)) {
- /* The mutex was unlocked before, so it's ours now */
- mutex->owner = current;
- } else if (mutex->owner != current) {
- /* Another process locked the mutex, take it */
- mutex_lock(&mutex->mutex);
- mutex->owner = current;
- }
- /* Otherwise the mutex was already locked by this process */
-
- mutex->level++;
-}
-
-static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
-{
- if (--mutex->level > 0)
- return;
-
- mutex->owner = NULL;
- mutex_unlock(&mutex->mutex);
-}
-
-
-/*
* Core structure, functions and helpers.
*/
typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t);
@@ -1231,11 +1439,10 @@ struct radeon_device {
struct radeon_mode_info mode_info;
struct radeon_scratch scratch;
struct radeon_mman mman;
- struct radeon_fence_driver fence_drv;
- struct radeon_cp cp;
- /* cayman compute rings */
- struct radeon_cp cp1;
- struct radeon_cp cp2;
+ rwlock_t fence_lock;
+ struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
+ struct radeon_semaphore_driver semaphore_drv;
+ struct radeon_ring ring[RADEON_NUM_RINGS];
struct radeon_ib_pool ib_pool;
struct radeon_irq irq;
struct radeon_asic *asic;
@@ -1279,6 +1486,13 @@ struct radeon_device {
struct drm_file *cmask_filp;
/* i2c buses */
struct radeon_i2c_chan *i2c_bus[RADEON_MAX_I2C_BUS];
+ /* debugfs */
+ struct radeon_debugfs debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
+ unsigned debugfs_count;
+ /* virtual memory */
+ struct radeon_vm_manager vm_manager;
+ /* ring used for bo copies */
+ u32 copy_ring;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -1414,18 +1628,17 @@ void radeon_atombios_fini(struct radeon_device *rdev);
/*
* RING helpers.
*/
-
#if DRM_DEBUG_CODE == 0
-static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
- rdev->cp.ring[rdev->cp.wptr++] = v;
- rdev->cp.wptr &= rdev->cp.ptr_mask;
- rdev->cp.count_dw--;
- rdev->cp.ring_free_dw--;
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
}
#else
/* With debugging this is just too big to inline */
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v);
#endif
/*
@@ -1437,18 +1650,19 @@ void radeon_ring_write(struct radeon_device *rdev, uint32_t v);
#define radeon_suspend(rdev) (rdev)->asic->suspend((rdev))
#define radeon_cs_parse(p) rdev->asic->cs_parse((p))
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
-#define radeon_gpu_is_lockup(rdev) (rdev)->asic->gpu_is_lockup((rdev))
+#define radeon_gpu_is_lockup(rdev, cp) (rdev)->asic->gpu_is_lockup((rdev), (cp))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart_tlb_flush((rdev))
#define radeon_gart_set_page(rdev, i, p) (rdev)->asic->gart_set_page((rdev), (i), (p))
-#define radeon_cp_commit(rdev) (rdev)->asic->cp_commit((rdev))
#define radeon_ring_start(rdev) (rdev)->asic->ring_start((rdev))
-#define radeon_ring_test(rdev) (rdev)->asic->ring_test((rdev))
-#define radeon_ring_ib_execute(rdev, ib) (rdev)->asic->ring_ib_execute((rdev), (ib))
+#define radeon_ring_test(rdev, cp) (rdev)->asic->ring_test((rdev), (cp))
+#define radeon_ring_ib_execute(rdev, r, ib) (rdev)->asic->ring[(r)].ib_execute((rdev), (ib))
+#define radeon_ring_ib_parse(rdev, r, ib) (rdev)->asic->ring[(r)].ib_parse((rdev), (ib))
#define radeon_irq_set(rdev) (rdev)->asic->irq_set((rdev))
#define radeon_irq_process(rdev) (rdev)->asic->irq_process((rdev))
#define radeon_get_vblank_counter(rdev, crtc) (rdev)->asic->get_vblank_counter((rdev), (crtc))
-#define radeon_fence_ring_emit(rdev, fence) (rdev)->asic->fence_ring_emit((rdev), (fence))
+#define radeon_fence_ring_emit(rdev, r, fence) (rdev)->asic->ring[(r)].emit_fence((rdev), (fence))
+#define radeon_semaphore_ring_emit(rdev, r, cp, semaphore, emit_wait) (rdev)->asic->ring[(r)].emit_semaphore((rdev), (cp), (semaphore), (emit_wait))
#define radeon_copy_blit(rdev, s, d, np, f) (rdev)->asic->copy_blit((rdev), (s), (d), (np), (f))
#define radeon_copy_dma(rdev, s, d, np, f) (rdev)->asic->copy_dma((rdev), (s), (d), (np), (f))
#define radeon_copy(rdev, s, d, np, f) (rdev)->asic->copy((rdev), (s), (d), (np), (f))
@@ -1503,6 +1717,33 @@ extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
/*
+ * vm
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev);
+void radeon_vm_manager_fini(struct radeon_device *rdev);
+int radeon_vm_manager_start(struct radeon_device *rdev);
+int radeon_vm_manager_suspend(struct radeon_device *rdev);
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm);
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem);
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo);
+int radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ uint64_t offset,
+ uint32_t flags);
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo);
+
+
+/*
* R600 vram scratch functions
*/
int r600_vram_scratch_init(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index a2e1eae..36a6192 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -138,14 +138,18 @@ static struct radeon_asic r100_asic = {
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
.cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = NULL,
@@ -186,14 +190,18 @@ static struct radeon_asic r200_asic = {
.asic_reset = &r100_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r100_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r100_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
.cs_parse = &r100_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -233,14 +241,18 @@ static struct radeon_asic r300_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &r100_pci_gart_tlb_flush,
.gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -281,14 +293,18 @@ static struct radeon_asic r300_asic_pcie = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -328,14 +344,18 @@ static struct radeon_asic r420_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -376,14 +396,18 @@ static struct radeon_asic rs400_asic = {
.asic_reset = &r300_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &r100_irq_set,
.irq_process = &r100_irq_process,
.get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -424,14 +448,18 @@ static struct radeon_asic rs600_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs600_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -472,14 +500,18 @@ static struct radeon_asic rs690_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rs400_gart_tlb_flush,
.gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &r300_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -520,14 +552,18 @@ static struct radeon_asic rv515_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -568,14 +604,18 @@ static struct radeon_asic r520_asic = {
.asic_reset = &rs600_asic_reset,
.gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
.gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
.ring_start = &rv515_ring_start,
.ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ }
+ },
.irq_set = &rs600_irq_set,
.irq_process = &rs600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
.copy_dma = &r200_copy_dma,
@@ -611,18 +651,22 @@ static struct radeon_asic r600_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
.vga_set_state = &r600_vga_set_state,
.gpu_is_lockup = &r600_gpu_is_lockup,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -658,18 +702,22 @@ static struct radeon_asic rs780_asic = {
.fini = &r600_fini,
.suspend = &r600_suspend,
.resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.asic_reset = &r600_asic_reset,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -705,18 +753,22 @@ static struct radeon_asic rv770_asic = {
.fini = &rv770_fini,
.suspend = &rv770_suspend,
.resume = &rv770_resume,
- .cp_commit = &r600_cp_commit,
.asic_reset = &r600_asic_reset,
.gpu_is_lockup = &r600_gpu_is_lockup,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &r600_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &r600_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &r600_irq_set,
.irq_process = &r600_irq_process,
.get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &r600_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -752,18 +804,22 @@ static struct radeon_asic evergreen_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -799,18 +855,22 @@ static struct radeon_asic sumo_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -846,18 +906,22 @@ static struct radeon_asic btc_asic = {
.fini = &evergreen_fini,
.suspend = &evergreen_suspend,
.resume = &evergreen_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &evergreen_gpu_is_lockup,
.asic_reset = &evergreen_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &evergreen_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &evergreen_ring_ib_execute,
+ .emit_fence = &r600_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -888,23 +952,50 @@ static struct radeon_asic btc_asic = {
.post_page_flip = &evergreen_post_page_flip,
};
+static const struct radeon_vm_funcs cayman_vm_funcs = {
+ .init = &cayman_vm_init,
+ .fini = &cayman_vm_fini,
+ .bind = &cayman_vm_bind,
+ .unbind = &cayman_vm_unbind,
+ .tlb_flush = &cayman_vm_tlb_flush,
+ .page_flags = &cayman_vm_page_flags,
+ .set_page = &cayman_vm_set_page,
+};
+
static struct radeon_asic cayman_asic = {
.init = &cayman_init,
.fini = &cayman_fini,
.suspend = &cayman_suspend,
.resume = &cayman_resume,
- .cp_commit = &r600_cp_commit,
.gpu_is_lockup = &cayman_gpu_is_lockup,
.asic_reset = &cayman_asic_reset,
.vga_set_state = &r600_vga_set_state,
.gart_tlb_flush = &cayman_pcie_gart_tlb_flush,
.gart_set_page = &rs600_gart_set_page,
.ring_test = &r600_ring_test,
- .ring_ib_execute = &evergreen_ring_ib_execute,
+ .ring = {
+ [RADEON_RING_TYPE_GFX_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ },
+ [CAYMAN_RING_TYPE_CP1_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ },
+ [CAYMAN_RING_TYPE_CP2_INDEX] = {
+ .ib_execute = &cayman_ring_ib_execute,
+ .ib_parse = &evergreen_ib_parse,
+ .emit_fence = &cayman_fence_ring_emit,
+ .emit_semaphore = &r600_semaphore_ring_emit,
+ }
+ },
.irq_set = &evergreen_irq_set,
.irq_process = &evergreen_irq_process,
.get_vblank_counter = &evergreen_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
.cs_parse = &evergreen_cs_parse,
.copy_blit = &r600_copy_blit,
.copy_dma = NULL,
@@ -945,6 +1036,9 @@ int radeon_asic_init(struct radeon_device *rdev)
else
rdev->num_crtc = 2;
+ /* set the ring used for bo copies */
+ rdev->copy_ring = RADEON_RING_TYPE_GFX_INDEX;
+
switch (rdev->family) {
case CHIP_R100:
case CHIP_RV100:
@@ -1050,6 +1144,7 @@ int radeon_asic_init(struct radeon_device *rdev)
rdev->asic = &cayman_asic;
/* set num crtcs */
rdev->num_crtc = 6;
+ rdev->vm_manager.funcs = &cayman_vm_funcs;
break;
default:
/* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 5991484..6304aef 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -58,17 +58,20 @@ void r100_fini(struct radeon_device *rdev);
int r100_suspend(struct radeon_device *rdev);
int r100_resume(struct radeon_device *rdev);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
-bool r100_gpu_is_lockup(struct radeon_device *rdev);
+bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-void r100_cp_commit(struct radeon_device *rdev);
void r100_ring_start(struct radeon_device *rdev);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
void r100_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
+void r100_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
int r100_cs_parse(struct radeon_cs_parser *p);
void r100_pll_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
uint32_t r100_pll_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -83,7 +86,7 @@ int r100_set_surface_reg(struct radeon_device *rdev, int reg,
void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r100_ring_test(struct radeon_device *rdev);
+int r100_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
void r100_hpd_init(struct radeon_device *rdev);
void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -101,12 +104,12 @@ void r100_pci_gart_disable(struct radeon_device *rdev);
int r100_debugfs_mc_info_init(struct radeon_device *rdev);
int r100_gui_wait_for_idle(struct radeon_device *rdev);
void r100_gpu_lockup_update(struct r100_gpu_lockup *lockup,
- struct radeon_cp *cp);
+ struct radeon_ring *cp);
bool r100_gpu_cp_is_lockup(struct radeon_device *rdev,
struct r100_gpu_lockup *lockup,
- struct radeon_cp *cp);
+ struct radeon_ring *cp);
void r100_ib_fini(struct radeon_device *rdev);
-int r100_ib_init(struct radeon_device *rdev);
+int r100_ib_test(struct radeon_device *rdev);
void r100_irq_disable(struct radeon_device *rdev);
void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
@@ -154,7 +157,7 @@ extern int r300_init(struct radeon_device *rdev);
extern void r300_fini(struct radeon_device *rdev);
extern int r300_suspend(struct radeon_device *rdev);
extern int r300_resume(struct radeon_device *rdev);
-extern bool r300_gpu_is_lockup(struct radeon_device *rdev);
+extern bool r300_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
extern int r300_asic_reset(struct radeon_device *rdev);
extern void r300_ring_start(struct radeon_device *rdev);
extern void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -293,22 +296,25 @@ int r600_resume(struct radeon_device *rdev);
void r600_vga_set_state(struct radeon_device *rdev, bool state);
int r600_wb_init(struct radeon_device *rdev);
void r600_wb_fini(struct radeon_device *rdev);
-void r600_cp_commit(struct radeon_device *rdev);
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
uint32_t r600_pciep_rreg(struct radeon_device *rdev, uint32_t reg);
void r600_pciep_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int r600_cs_parse(struct radeon_cs_parser *p);
void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
-bool r600_gpu_is_lockup(struct radeon_device *rdev);
+void r600_semaphore_ring_emit(struct radeon_device *rdev,
+ struct radeon_ring *cp,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
+bool r600_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_asic_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
-int r600_ib_test(struct radeon_device *rdev);
+int r600_ib_test(struct radeon_device *rdev, int ring);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
-int r600_ring_test(struct radeon_device *rdev);
+int r600_ring_test(struct radeon_device *rdev, struct radeon_ring *cp);
int r600_copy_blit(struct radeon_device *rdev,
uint64_t src_offset, uint64_t dst_offset,
unsigned num_gpu_pages, struct radeon_fence *fence);
@@ -328,7 +334,7 @@ extern int r600_get_pcie_lanes(struct radeon_device *rdev);
bool r600_card_posted(struct radeon_device *rdev);
void r600_cp_stop(struct radeon_device *rdev);
int r600_cp_start(struct radeon_device *rdev);
-void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
+void r600_ring_init(struct radeon_device *rdev, struct radeon_ring *cp, unsigned ring_size);
int r600_cp_resume(struct radeon_device *rdev);
void r600_cp_fini(struct radeon_device *rdev);
int r600_count_pipe_bits(uint32_t val);
@@ -397,7 +403,7 @@ int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
int evergreen_suspend(struct radeon_device *rdev);
int evergreen_resume(struct radeon_device *rdev);
-bool evergreen_gpu_is_lockup(struct radeon_device *rdev);
+bool evergreen_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int evergreen_asic_reset(struct radeon_device *rdev);
void evergreen_bandwidth_update(struct radeon_device *rdev);
void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
@@ -423,12 +429,26 @@ int evergreen_blit_init(struct radeon_device *rdev);
/*
* cayman
*/
+void cayman_fence_ring_emit(struct radeon_device *rdev,
+ struct radeon_fence *fence);
void cayman_pcie_gart_tlb_flush(struct radeon_device *rdev);
int cayman_init(struct radeon_device *rdev);
void cayman_fini(struct radeon_device *rdev);
int cayman_suspend(struct radeon_device *rdev);
int cayman_resume(struct radeon_device *rdev);
-bool cayman_gpu_is_lockup(struct radeon_device *rdev);
+bool cayman_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int cayman_asic_reset(struct radeon_device *rdev);
+void cayman_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
+int cayman_vm_init(struct radeon_device *rdev);
+void cayman_vm_fini(struct radeon_device *rdev);
+int cayman_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id);
+void cayman_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm);
+void cayman_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm);
+uint32_t cayman_vm_page_flags(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ uint32_t flags);
+void cayman_vm_set_page(struct radeon_device *rdev, struct radeon_vm *vm,
+ unsigned pfn, uint64_t addr, uint32_t flags);
+int evergreen_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib);
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5082d17..1f53ae7 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -2931,6 +2931,20 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5;
}
}
+ if ((radeon_encoder->devices & ATOM_DEVICE_DFP6_SUPPORT) &&
+ (radeon_connector->devices & ATOM_DEVICE_DFP6_SUPPORT)) {
+ if (connected) {
+ DRM_DEBUG_KMS("DFP6 connected\n");
+ bios_0_scratch |= ATOM_S0_DFP6;
+ bios_3_scratch |= ATOM_S3_DFP6_ACTIVE;
+ bios_6_scratch |= ATOM_S6_ACC_REQ_DFP6;
+ } else {
+ DRM_DEBUG_KMS("DFP6 disconnected\n");
+ bios_0_scratch &= ~ATOM_S0_DFP6;
+ bios_3_scratch &= ~ATOM_S3_DFP6_ACTIVE;
+ bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP6;
+ }
+ }
if (rdev->family >= CHIP_R600) {
WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch);
@@ -2951,6 +2965,9 @@ radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_3_scratch;
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
if (rdev->family >= CHIP_R600)
bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH);
else
@@ -3003,6 +3020,9 @@ radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on)
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
uint32_t bios_2_scratch;
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
if (rdev->family >= CHIP_R600)
bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH);
else
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d95792..98724fc 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -58,7 +58,8 @@ static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
}
obj = (union acpi_object *)buffer.pointer;
- memcpy(bios+offset, obj->buffer.pointer, len);
+ memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+ len = obj->buffer.length;
kfree(buffer.pointer);
return len;
}
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 17e1a9b..815f234 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -43,7 +43,7 @@ static int radeon_benchmark_do_move(struct radeon_device *rdev, unsigned size,
start_jiffies = jiffies;
for (i = 0; i < n; i++) {
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r)
return r;
@@ -229,21 +229,21 @@ void radeon_benchmark(struct radeon_device *rdev, int test_number)
break;
case 6:
/* GTT to VRAM, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_GTT,
RADEON_GEM_DOMAIN_VRAM);
break;
case 7:
/* VRAM to GTT, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_GTT);
break;
case 8:
/* VRAM to VRAM, buffer size sweep, common modes */
- for (i = 1; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
+ for (i = 0; i < RADEON_BENCHMARK_COMMON_MODES_N; i++)
radeon_benchmark_move(rdev, common_modes[i],
RADEON_GEM_DOMAIN_VRAM,
RADEON_GEM_DOMAIN_VRAM);
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 229a20f..501f488 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -120,7 +120,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
ret = radeon_atrm_get_bios_chunk(rdev->bios,
(i * ATRM_BIOS_PAGE),
ATRM_BIOS_PAGE);
- if (ret <= 0)
+ if (ret < ATRM_BIOS_PAGE)
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index e7cb3ab..8c9a811 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1057,7 +1057,7 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
(radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
return MODE_OK;
else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
- if (ASIC_IS_DCE3(rdev)) {
+ if (0) {
/* HDMI 1.3+ supports max clock of 340 Mhz */
if (mode->clock > 340000)
return MODE_CLOCK_HIGH;
@@ -1117,13 +1117,23 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
struct drm_display_mode *mode;
- if (!radeon_dig_connector->edp_on)
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_ON);
- ret = radeon_ddc_get_modes(radeon_connector);
- if (!radeon_dig_connector->edp_on)
- atombios_set_edp_panel_power(connector,
- ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_ON);
+ ret = radeon_ddc_get_modes(radeon_connector);
+ if (!radeon_dig_connector->edp_on)
+ atombios_set_edp_panel_power(connector,
+ ATOM_TRANSMITTER_ACTION_POWER_OFF);
+ } else {
+ /* need to setup ddc on the bridge */
+ if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) !=
+ ENCODER_OBJECT_ID_NONE) {
+ if (encoder)
+ radeon_atom_ext_encoder_setup_ddc(encoder);
+ }
+ ret = radeon_ddc_get_modes(radeon_connector);
+ }
if (ret > 0) {
if (encoder) {
@@ -1134,7 +1144,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
return ret;
}
- encoder = radeon_best_single_encoder(connector);
if (!encoder)
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 29afd71..e64bec4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -58,7 +58,7 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
duplicate = false;
r = (struct drm_radeon_cs_reloc *)&chunk->kdata[i*4];
- for (j = 0; j < p->nrelocs; j++) {
+ for (j = 0; j < i; j++) {
if (r->handle == p->relocs[j].handle) {
p->relocs_ptr[i] = &p->relocs[j];
duplicate = true;
@@ -84,16 +84,75 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->relocs[i].flags = r->flags;
radeon_bo_list_add_object(&p->relocs[i].lobj,
&p->validated);
- }
+
+ if (p->relocs[i].robj->tbo.sync_obj && !(r->flags & RADEON_RELOC_DONT_SYNC)) {
+ struct radeon_fence *fence = p->relocs[i].robj->tbo.sync_obj;
+ if (!radeon_fence_signaled(fence)) {
+ p->sync_to_ring[fence->ring] = true;
+ }
+ }
+ } else
+ p->relocs[i].handle = 0;
}
return radeon_bo_list_validate(&p->validated);
}
+static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority)
+{
+ p->priority = priority;
+
+ switch (ring) {
+ default:
+ DRM_ERROR("unknown ring id: %d\n", ring);
+ return -EINVAL;
+ case RADEON_CS_RING_GFX:
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ case RADEON_CS_RING_COMPUTE:
+ /* for now */
+ p->ring = RADEON_RING_TYPE_GFX_INDEX;
+ break;
+ }
+ return 0;
+}
+
+static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
+{
+ int i, r;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (i == p->ring || !p->sync_to_ring[i] || !p->rdev->ring[i].ready)
+ continue;
+
+ if (!p->ib->fence->semaphore) {
+ r = radeon_semaphore_create(p->rdev, &p->ib->fence->semaphore);
+ if (r)
+ return r;
+ }
+
+ r = radeon_ring_lock(p->rdev, &p->rdev->ring[i], 3);
+ if (r)
+ return r;
+ radeon_semaphore_emit_signal(p->rdev, i, p->ib->fence->semaphore);
+ radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[i]);
+
+ r = radeon_ring_lock(p->rdev, &p->rdev->ring[p->ring], 3);
+ if (r)
+ return r;
+ radeon_semaphore_emit_wait(p->rdev, p->ring, p->ib->fence->semaphore);
+ radeon_ring_unlock_commit(p->rdev, &p->rdev->ring[p->ring]);
+ }
+ return 0;
+}
+
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
struct drm_radeon_cs *cs = data;
uint64_t *chunk_array_ptr;
- unsigned size, i, flags = 0;
+ unsigned size, i;
+ u32 ring = RADEON_CS_RING_GFX;
+ s32 priority = 0;
if (!cs->num_chunks) {
return 0;
@@ -103,6 +162,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->idx = 0;
p->chunk_ib_idx = -1;
p->chunk_relocs_idx = -1;
+ p->chunk_flags_idx = -1;
p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
if (p->chunks_array == NULL) {
return -ENOMEM;
@@ -112,6 +172,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
sizeof(uint64_t)*cs->num_chunks)) {
return -EFAULT;
}
+ p->cs_flags = 0;
p->nchunks = cs->num_chunks;
p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
if (p->chunks == NULL) {
@@ -140,16 +201,19 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
if (p->chunks[i].length_dw == 0)
return -EINVAL;
}
- if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
- !p->chunks[i].length_dw) {
- return -EINVAL;
+ if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
+ p->chunk_flags_idx = i;
+ /* zero length flags aren't useful */
+ if (p->chunks[i].length_dw == 0)
+ return -EINVAL;
}
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
- if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
+ if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
+ (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
size = p->chunks[i].length_dw * sizeof(uint32_t);
p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
if (p->chunks[i].kdata == NULL) {
@@ -160,29 +224,58 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EFAULT;
}
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
- flags = p->chunks[i].kdata[0];
+ p->cs_flags = p->chunks[i].kdata[0];
+ if (p->chunks[i].length_dw > 1)
+ ring = p->chunks[i].kdata[1];
+ if (p->chunks[i].length_dw > 2)
+ priority = (s32)p->chunks[i].kdata[2];
}
- } else {
- p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
- kfree(p->chunks[i].kpage[0]);
- kfree(p->chunks[i].kpage[1]);
- return -ENOMEM;
- }
- p->chunks[i].kpage_idx[0] = -1;
- p->chunks[i].kpage_idx[1] = -1;
- p->chunks[i].last_copied_page = -1;
- p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
}
}
- if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
- DRM_ERROR("cs IB too big: %d\n",
- p->chunks[p->chunk_ib_idx].length_dw);
+
+ if ((p->cs_flags & RADEON_CS_USE_VM) &&
+ !p->rdev->vm_manager.enabled) {
+ DRM_ERROR("VM not active on asic!\n");
+ if (p->chunk_relocs_idx != -1)
+ kfree(p->chunks[p->chunk_relocs_idx].kdata);
+ if (p->chunk_flags_idx != -1)
+ kfree(p->chunks[p->chunk_flags_idx].kdata);
return -EINVAL;
}
- p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
+ if (radeon_cs_get_ring(p, ring, priority)) {
+ if (p->chunk_relocs_idx != -1)
+ kfree(p->chunks[p->chunk_relocs_idx].kdata);
+ if (p->chunk_flags_idx != -1)
+ kfree(p->chunks[p->chunk_flags_idx].kdata);
+ return -EINVAL;
+ }
+
+
+ /* deal with non-vm */
+ if ((p->chunk_ib_idx != -1) &&
+ ((p->cs_flags & RADEON_CS_USE_VM) == 0) &&
+ (p->chunks[p->chunk_ib_idx].chunk_id == RADEON_CHUNK_ID_IB)) {
+ if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
+ DRM_ERROR("cs IB too big: %d\n",
+ p->chunks[p->chunk_ib_idx].length_dw);
+ return -EINVAL;
+ }
+ p->chunks[p->chunk_ib_idx].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ p->chunks[p->chunk_ib_idx].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (p->chunks[p->chunk_ib_idx].kpage[0] == NULL ||
+ p->chunks[p->chunk_ib_idx].kpage[1] == NULL) {
+ kfree(p->chunks[p->chunk_ib_idx].kpage[0]);
+ kfree(p->chunks[p->chunk_ib_idx].kpage[1]);
+ return -ENOMEM;
+ }
+ p->chunks[p->chunk_ib_idx].kpage_idx[0] = -1;
+ p->chunks[p->chunk_ib_idx].kpage_idx[1] = -1;
+ p->chunks[p->chunk_ib_idx].last_copied_page = -1;
+ p->chunks[p->chunk_ib_idx].last_page_index =
+ ((p->chunks[p->chunk_ib_idx].length_dw * 4) - 1) / PAGE_SIZE;
+ }
+
return 0;
}
@@ -224,14 +317,146 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
radeon_ib_free(parser->rdev, &parser->ib);
}
+static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if (parser->cs_flags & RADEON_CS_USE_VM)
+ return 0;
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ /* Copy the packet into the IB, the parser will read from the
+ * input memory (cached) and write to the IB (which can be
+ * uncached).
+ */
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib->length_dw = ib_chunk->length_dw;
+ r = radeon_cs_parse(parser);
+ if (r || parser->parser_error) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ r = radeon_cs_finish_pages(parser);
+ if (r) {
+ DRM_ERROR("Invalid command stream !\n");
+ return r;
+ }
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ DRM_ERROR("Failed to synchronize rings !\n");
+ }
+ parser->ib->vm_id = 0;
+ r = radeon_ib_schedule(rdev, parser->ib);
+ if (r) {
+ DRM_ERROR("Failed to schedule IB !\n");
+ }
+ return 0;
+}
+
+static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_list *lobj;
+ struct radeon_bo *bo;
+ int r;
+
+ list_for_each_entry(lobj, &parser->validated, tv.head) {
+ bo = lobj->bo;
+ r = radeon_vm_bo_update_pte(parser->rdev, vm, bo, &bo->tbo.mem);
+ if (r) {
+ return r;
+ }
+ }
+ return 0;
+}
+
+static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
+ struct radeon_cs_parser *parser)
+{
+ struct radeon_cs_chunk *ib_chunk;
+ struct radeon_fpriv *fpriv = parser->filp->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ int r;
+
+ if (parser->chunk_ib_idx == -1)
+ return 0;
+
+ if ((parser->cs_flags & RADEON_CS_USE_VM) == 0)
+ return 0;
+
+ ib_chunk = &parser->chunks[parser->chunk_ib_idx];
+ if (ib_chunk->length_dw > RADEON_IB_VM_MAX_SIZE) {
+ DRM_ERROR("cs IB too big: %d\n", ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ r = radeon_ib_get(rdev, parser->ring, &parser->ib,
+ ib_chunk->length_dw * 4);
+ if (r) {
+ DRM_ERROR("Failed to get ib !\n");
+ return r;
+ }
+ parser->ib->length_dw = ib_chunk->length_dw;
+ /* Copy the packet into the IB */
+ if (DRM_COPY_FROM_USER(parser->ib->ptr, ib_chunk->user_ptr,
+ ib_chunk->length_dw * 4)) {
+ return -EFAULT;
+ }
+ r = radeon_ring_ib_parse(rdev, parser->ring, parser->ib);
+ if (r) {
+ return r;
+ }
+
+ mutex_lock(&vm->mutex);
+ r = radeon_vm_bind(rdev, vm);
+ if (r) {
+ goto out;
+ }
+ r = radeon_bo_vm_update_pte(parser, vm);
+ if (r) {
+ goto out;
+ }
+ r = radeon_cs_sync_rings(parser);
+ if (r) {
+ DRM_ERROR("Failed to synchronize rings !\n");
+ }
+ parser->ib->vm_id = vm->id;
+ /* ib pool is bind at 0 in virtual address space to gpu_addr is the
+ * offset inside the pool bo
+ */
+ parser->ib->gpu_addr = parser->ib->sa_bo.offset;
+ r = radeon_ib_schedule(rdev, parser->ib);
+out:
+ if (!r) {
+ if (vm->fence) {
+ radeon_fence_unref(&vm->fence);
+ }
+ vm->fence = radeon_fence_ref(parser->ib->fence);
+ }
+ mutex_unlock(&fpriv->vm.mutex);
+ return r;
+}
+
int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct radeon_device *rdev = dev->dev_private;
struct radeon_cs_parser parser;
- struct radeon_cs_chunk *ib_chunk;
int r;
radeon_mutex_lock(&rdev->cs_mutex);
+ if (!rdev->accel_working) {
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ return -EBUSY;
+ }
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -245,13 +470,6 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
- r = radeon_ib_get(rdev, &parser.ib);
- if (r) {
- DRM_ERROR("Failed to get ib !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
- }
r = radeon_cs_parser_relocs(&parser);
if (r) {
if (r != -ERESTARTSYS)
@@ -260,29 +478,15 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
}
- /* Copy the packet into the IB, the parser will read from the
- * input memory (cached) and write to the IB (which can be
- * uncached). */
- ib_chunk = &parser.chunks[parser.chunk_ib_idx];
- parser.ib->length_dw = ib_chunk->length_dw;
- r = radeon_cs_parse(&parser);
- if (r || parser.parser_error) {
- DRM_ERROR("Invalid command stream !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
- }
- r = radeon_cs_finish_pages(&parser);
+ r = radeon_cs_ib_chunk(rdev, &parser);
if (r) {
- DRM_ERROR("Invalid command stream !\n");
- radeon_cs_parser_fini(&parser, r);
- radeon_mutex_unlock(&rdev->cs_mutex);
- return r;
+ goto out;
}
- r = radeon_ib_schedule(rdev, parser.ib);
+ r = radeon_cs_ib_vm_chunk(rdev, &parser);
if (r) {
- DRM_ERROR("Failed to schedule IB !\n");
+ goto out;
}
+out:
radeon_cs_parser_fini(&parser, r);
radeon_mutex_unlock(&rdev->cs_mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c4d00a1..49f7cb7 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -224,8 +224,11 @@ int radeon_wb_init(struct radeon_device *rdev)
if (radeon_no_wb == 1)
rdev->wb.enabled = false;
else {
- /* often unreliable on AGP */
if (rdev->flags & RADEON_IS_AGP) {
+ /* often unreliable on AGP */
+ rdev->wb.enabled = false;
+ } else if (rdev->family < CHIP_R300) {
+ /* often unreliable on pre-r300 */
rdev->wb.enabled = false;
} else {
rdev->wb.enabled = true;
@@ -717,18 +720,25 @@ int radeon_device_init(struct radeon_device *rdev,
/* mutex initialization are all done here so we
* can recall function without having locking issues */
radeon_mutex_init(&rdev->cs_mutex);
- mutex_init(&rdev->ib_pool.mutex);
- mutex_init(&rdev->cp.mutex);
+ radeon_mutex_init(&rdev->ib_pool.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ mutex_init(&rdev->ring[i].mutex);
mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
mutex_init(&rdev->pm.mutex);
mutex_init(&rdev->vram_mutex);
- rwlock_init(&rdev->fence_drv.lock);
+ rwlock_init(&rdev->fence_lock);
+ rwlock_init(&rdev->semaphore_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
init_waitqueue_head(&rdev->irq.vblank_queue);
init_waitqueue_head(&rdev->irq.idle_queue);
+ INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
+ /* initialize vm here */
+ rdev->vm_manager.use_bitmap = 1;
+ rdev->vm_manager.max_pfn = 1 << 20;
+ INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
/* Set asic functions */
r = radeon_asic_init(rdev);
@@ -765,8 +775,14 @@ int radeon_device_init(struct radeon_device *rdev,
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
if (r) {
rdev->need_dma32 = true;
+ dma_bits = 32;
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
}
+ r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
+ if (r) {
+ pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
+ printk(KERN_WARNING "radeon: No coherent DMA available.\n");
+ }
/* Registers mapping */
/* TODO: block userspace mapping of io register */
@@ -814,15 +830,20 @@ int radeon_device_init(struct radeon_device *rdev,
if (r)
return r;
}
- if (radeon_testing) {
+ if ((radeon_testing & 1)) {
radeon_test_moves(rdev);
}
+ if ((radeon_testing & 2)) {
+ radeon_test_syncing(rdev);
+ }
if (radeon_benchmarking) {
radeon_benchmark(rdev, radeon_benchmarking);
}
return 0;
}
+static void radeon_debugfs_remove_files(struct radeon_device *rdev);
+
void radeon_device_fini(struct radeon_device *rdev)
{
DRM_INFO("radeon: finishing device.\n");
@@ -837,6 +858,7 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->rio_mem = NULL;
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
+ radeon_debugfs_remove_files(rdev);
}
@@ -848,7 +870,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
struct radeon_device *rdev;
struct drm_crtc *crtc;
struct drm_connector *connector;
- int r;
+ int i, r;
if (dev == NULL || dev->dev_private == NULL) {
return -ENODEV;
@@ -861,6 +883,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
+ drm_kms_helper_poll_disable(dev);
+
/* turn off display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
@@ -887,7 +911,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
/* evict vram memory */
radeon_bo_evict_vram(rdev);
/* wait for gpu to finish processing current batch */
- radeon_fence_wait_last(rdev);
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ radeon_fence_wait_last(rdev, i);
radeon_save_bios_scratch_regs(rdev);
@@ -936,9 +961,11 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_fbdev_set_suspend(rdev, 0);
console_unlock();
- /* init dig PHYs */
- if (rdev->is_atom_bios)
+ /* init dig PHYs, disp eng pll */
+ if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
+ radeon_atom_dcpll_init(rdev);
+ }
/* reset hpd state */
radeon_hpd_init(rdev);
/* blat the mode back in */
@@ -947,6 +974,8 @@ int radeon_resume_kms(struct drm_device *dev)
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
+
+ drm_kms_helper_poll_enable(dev);
return 0;
}
@@ -986,36 +1015,29 @@ int radeon_gpu_reset(struct radeon_device *rdev)
/*
* Debugfs
*/
-struct radeon_debugfs {
- struct drm_info_list *files;
- unsigned num_files;
-};
-static struct radeon_debugfs _radeon_debugfs[RADEON_DEBUGFS_MAX_COMPONENTS];
-static unsigned _radeon_debugfs_count = 0;
-
int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
- for (i = 0; i < _radeon_debugfs_count; i++) {
- if (_radeon_debugfs[i].files == files) {
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ if (rdev->debugfs[i].files == files) {
/* Already registered */
return 0;
}
}
- i = _radeon_debugfs_count + 1;
+ i = rdev->debugfs_count + 1;
if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase "
"RADEON_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
- _radeon_debugfs[_radeon_debugfs_count].files = files;
- _radeon_debugfs[_radeon_debugfs_count].num_files = nfiles;
- _radeon_debugfs_count = i;
+ rdev->debugfs[rdev->debugfs_count].files = files;
+ rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
+ rdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
rdev->ddev->control->debugfs_root,
@@ -1027,6 +1049,22 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
return 0;
}
+static void radeon_debugfs_remove_files(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ unsigned i;
+
+ for (i = 0; i < rdev->debugfs_count; i++) {
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->control);
+ drm_debugfs_remove_files(rdev->debugfs[i].files,
+ rdev->debugfs[i].num_files,
+ rdev->ddev->primary);
+ }
+#endif
+}
+
#if defined(CONFIG_DEBUG_FS)
int radeon_debugfs_init(struct drm_minor *minor)
{
@@ -1035,11 +1073,5 @@ int radeon_debugfs_init(struct drm_minor *minor)
void radeon_debugfs_cleanup(struct drm_minor *minor)
{
- unsigned i;
-
- for (i = 0; i < _radeon_debugfs_count; i++) {
- drm_debugfs_remove_files(_radeon_debugfs[i].files,
- _radeon_debugfs[i].num_files, minor);
- }
}
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index a22d6e6..3d31433 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -406,7 +406,7 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
if (!ASIC_IS_AVIVO(rdev)) {
/* crtc offset is from display base addr not FB location */
base -= radeon_crtc->legacy_display_base_addr;
- pitch_pixels = fb->pitch / (fb->bits_per_pixel / 8);
+ pitch_pixels = fb->pitches[0] / (fb->bits_per_pixel / 8);
if (tiling_flags & RADEON_TILING_MACRO) {
if (ASIC_IS_R300(rdev)) {
@@ -1078,29 +1078,36 @@ static const struct drm_framebuffer_funcs radeon_fb_funcs = {
.create_handle = radeon_user_framebuffer_create_handle,
};
-void
+int
radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj)
{
+ int ret;
rfb->obj = obj;
- drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ ret = drm_framebuffer_init(dev, &rfb->base, &radeon_fb_funcs);
+ if (ret) {
+ rfb->obj = NULL;
+ return ret;
+ }
drm_helper_mode_fill_fb_struct(&rfb->base, mode_cmd);
+ return 0;
}
static struct drm_framebuffer *
radeon_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct radeon_framebuffer *radeon_fb;
+ int ret;
- obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (obj == NULL) {
dev_err(&dev->pdev->dev, "No GEM object associated to handle 0x%08X, "
- "can't create framebuffer\n", mode_cmd->handle);
+ "can't create framebuffer\n", mode_cmd->handles[0]);
return ERR_PTR(-ENOENT);
}
@@ -1108,7 +1115,12 @@ radeon_user_framebuffer_create(struct drm_device *dev,
if (radeon_fb == NULL)
return ERR_PTR(-ENOMEM);
- radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ ret = radeon_framebuffer_init(dev, radeon_fb, mode_cmd, obj);
+ if (ret) {
+ kfree(radeon_fb);
+ drm_gem_object_unreference_unlocked(obj);
+ return NULL;
+ }
return &radeon_fb->base;
}
@@ -1305,9 +1317,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
- /* init dig PHYs */
- if (rdev->is_atom_bios)
+ /* init dig PHYs, disp eng pll */
+ if (rdev->is_atom_bios) {
radeon_atom_encoder_init(rdev);
+ radeon_atom_dcpll_init(rdev);
+ }
/* initialize hpd */
radeon_hpd_init(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 71499fc..8032f1f 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -54,9 +54,10 @@
* 2.10.0 - fusion 2D tiling
* 2.11.0 - backend map, initial compute support for the CS checker
* 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
+ * 2.13.0 - virtual memory support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 12
+#define KMS_DRIVER_MINOR 13
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -84,6 +85,10 @@ int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
struct drm_file *file_priv);
int radeon_gem_object_init(struct drm_gem_object *obj);
void radeon_gem_object_free(struct drm_gem_object *obj);
+int radeon_gem_object_open(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv);
extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, int crtc,
int *vpos, int *hpos);
extern struct drm_ioctl_desc radeon_ioctls_kms[];
@@ -140,7 +145,7 @@ module_param_named(vramlimit, radeon_vram_limit, int, 0600);
MODULE_PARM_DESC(agpmode, "AGP Mode (-1 == PCI)");
module_param_named(agpmode, radeon_agpmode, int, 0444);
-MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32,64, etc)\n");
+MODULE_PARM_DESC(gartsize, "Size of PCIE/IGP gart to setup in megabytes (32, 64, etc)");
module_param_named(gartsize, radeon_gart_size, int, 0600);
MODULE_PARM_DESC(benchmark, "Run benchmark");
@@ -206,6 +211,21 @@ static struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
#endif
+static const struct file_operations radeon_driver_old_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver_old = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -232,21 +252,7 @@ static struct drm_driver driver_old = {
.reclaim_buffers = drm_core_reclaim_buffers,
.ioctls = radeon_ioctls,
.dma_ioctl = radeon_cp_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
-
+ .fops = &radeon_driver_old_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
@@ -304,6 +310,20 @@ radeon_pci_resume(struct pci_dev *pdev)
return radeon_resume_kms(dev);
}
+static const struct file_operations radeon_driver_kms_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = radeon_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = radeon_kms_compat_ioctl,
+#endif
+};
+
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
@@ -335,24 +355,13 @@ static struct drm_driver kms_driver = {
.ioctls = radeon_ioctls_kms,
.gem_init_object = radeon_gem_object_init,
.gem_free_object = radeon_gem_object_free,
+ .gem_open_object = radeon_gem_object_open,
+ .gem_close_object = radeon_gem_object_close,
.dma_ioctl = radeon_dma_ioctl_kms,
.dumb_create = radeon_mode_dumb_create,
.dumb_map_offset = radeon_mode_dumb_mmap,
.dumb_destroy = radeon_mode_dumb_destroy,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = radeon_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = radeon_kms_compat_ioctl,
-#endif
- },
-
+ .fops = &radeon_driver_kms_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 4b27efa..26e9270 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -202,6 +202,22 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
+struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ radeon_connector = to_radeon_connector(connector);
+ if (radeon_encoder->devices & radeon_connector->devices)
+ return connector;
+ }
+ return NULL;
+}
+
struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
@@ -288,3 +304,62 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
}
+bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+ u32 pixel_clock)
+{
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ /* if we don't have an active device yet, just use one of
+ * the connectors tied to the encoder.
+ */
+ if (!connector)
+ connector = radeon_get_connector_for_encoder_init(encoder);
+ radeon_connector = to_radeon_connector(connector);
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ if (radeon_connector->use_digital) {
+ /* HDMI 1.3 supports up to 340 Mhz over single link */
+ if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (pixel_clock > 340000)
+ return true;
+ else
+ return false;
+ } else {
+ if (pixel_clock > 165000)
+ return true;
+ else
+ return false;
+ }
+ } else
+ return false;
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ return false;
+ else {
+ /* HDMI 1.3 supports up to 340 Mhz over single link */
+ if (0 && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+ if (pixel_clock > 340000)
+ return true;
+ else
+ return false;
+ } else {
+ if (pixel_clock > 165000)
+ return true;
+ else
+ return false;
+ }
+ }
+ default:
+ return false;
+ }
+}
+
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 0b7b486..195471c 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -103,7 +103,7 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
}
static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct radeon_device *rdev = rfbdev->rdev;
@@ -114,13 +114,17 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
int ret;
int aligned_size, size;
int height = mode_cmd->height;
+ u32 bpp, depth;
+
+ drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
/* need to align pitch with crtc limits */
- mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
+ mode_cmd->pitches[0] = radeon_align_pitch(rdev, mode_cmd->width, bpp,
+ fb_tiled) * ((bpp + 1) / 8);
if (rdev->family >= CHIP_R600)
height = ALIGN(mode_cmd->height, 8);
- size = mode_cmd->pitch * height;
+ size = mode_cmd->pitches[0] * height;
aligned_size = ALIGN(size, PAGE_SIZE);
ret = radeon_gem_object_create(rdev, aligned_size, 0,
RADEON_GEM_DOMAIN_VRAM,
@@ -137,7 +141,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
tiling_flags = RADEON_TILING_MACRO;
#ifdef __BIG_ENDIAN
- switch (mode_cmd->bpp) {
+ switch (bpp) {
case 32:
tiling_flags |= RADEON_TILING_SWAP_32BIT;
break;
@@ -151,7 +155,7 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
if (tiling_flags) {
ret = radeon_bo_set_tiling_flags(rbo,
tiling_flags | RADEON_TILING_SURFACE,
- mode_cmd->pitch);
+ mode_cmd->pitches[0]);
if (ret)
dev_err(rdev->dev, "FB failed to set tiling flags\n");
}
@@ -187,7 +191,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
struct radeon_device *rdev = rfbdev->rdev;
struct fb_info *info;
struct drm_framebuffer *fb = NULL;
- struct drm_mode_fb_cmd mode_cmd;
+ struct drm_mode_fb_cmd2 mode_cmd;
struct drm_gem_object *gobj = NULL;
struct radeon_bo *rbo = NULL;
struct device *device = &rdev->pdev->dev;
@@ -201,10 +205,15 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev))
sizes->surface_bpp = 32;
- mode_cmd.bpp = sizes->surface_bpp;
- mode_cmd.depth = sizes->surface_depth;
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj);
+ if (ret) {
+ DRM_ERROR("failed to create fbcon object %d\n", ret);
+ return ret;
+ }
+
rbo = gem_to_radeon_bo(gobj);
/* okay we have an object now allocate the framebuffer */
@@ -216,7 +225,11 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
info->par = rfbdev;
- radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
+ if (ret) {
+ DRM_ERROR("failed to initalise framebuffer %d\n", ret);
+ goto out_unref;
+ }
fb = &rfbdev->rfb.base;
@@ -228,7 +241,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
strcpy(info->fix.id, "radeondrmfb");
- drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
+ drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT;
info->fbops = &radeonfb_ops;
@@ -271,7 +284,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base);
DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo));
DRM_INFO("fb depth is %d\n", fb->depth);
- DRM_INFO(" pitch is %d\n", fb->pitch);
+ DRM_INFO(" pitch is %d\n", fb->pitches[0]);
vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 76ec0e9..4bd36a3 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,32 +40,24 @@
#include "radeon.h"
#include "radeon_trace.h"
-static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
+static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
{
if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);
- } else
- WREG32(rdev->fence_drv.scratch_reg, seq);
+ *rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
+ } else {
+ WREG32(rdev->fence_drv[ring].scratch_reg, seq);
+ }
}
-static u32 radeon_fence_read(struct radeon_device *rdev)
+static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
{
- u32 seq;
+ u32 seq = 0;
if (rdev->wb.enabled) {
- u32 scratch_index;
- if (rdev->wb.use_event)
- scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- else
- scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
- seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
- } else
- seq = RREG32(rdev->fence_drv.scratch_reg);
+ seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
+ } else {
+ seq = RREG32(rdev->fence_drv[ring].scratch_reg);
+ }
return seq;
}
@@ -73,28 +65,28 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
unsigned long irq_flags;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (fence->emited) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (fence->emitted) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
- if (!rdev->cp.ready)
+ fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
+ if (!rdev->ring[fence->ring].ready)
/* FIXME: cp is not running assume everythings is done right
* away
*/
- radeon_fence_write(rdev, fence->seq);
+ radeon_fence_write(rdev, fence->seq, fence->ring);
else
- radeon_fence_ring_emit(rdev, fence);
+ radeon_fence_ring_emit(rdev, fence->ring, fence);
trace_radeon_fence_emit(rdev->ddev, fence->seq);
- fence->emited = true;
- list_move_tail(&fence->list, &rdev->fence_drv.emited);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ fence->emitted = true;
+ list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-static bool radeon_fence_poll_locked(struct radeon_device *rdev)
+static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
{
struct radeon_fence *fence;
struct list_head *i, *n;
@@ -102,34 +94,34 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
bool wake = false;
unsigned long cjiffies;
- seq = radeon_fence_read(rdev);
- if (seq != rdev->fence_drv.last_seq) {
- rdev->fence_drv.last_seq = seq;
- rdev->fence_drv.last_jiffies = jiffies;
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ seq = radeon_fence_read(rdev, ring);
+ if (seq != rdev->fence_drv[ring].last_seq) {
+ rdev->fence_drv[ring].last_seq = seq;
+ rdev->fence_drv[ring].last_jiffies = jiffies;
+ rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
} else {
cjiffies = jiffies;
- if (time_after(cjiffies, rdev->fence_drv.last_jiffies)) {
- cjiffies -= rdev->fence_drv.last_jiffies;
- if (time_after(rdev->fence_drv.last_timeout, cjiffies)) {
+ if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
+ cjiffies -= rdev->fence_drv[ring].last_jiffies;
+ if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
/* update the timeout */
- rdev->fence_drv.last_timeout -= cjiffies;
+ rdev->fence_drv[ring].last_timeout -= cjiffies;
} else {
/* the 500ms timeout is elapsed we should test
* for GPU lockup
*/
- rdev->fence_drv.last_timeout = 1;
+ rdev->fence_drv[ring].last_timeout = 1;
}
} else {
/* wrap around update last jiffies, we will just wait
* a little longer
*/
- rdev->fence_drv.last_jiffies = cjiffies;
+ rdev->fence_drv[ring].last_jiffies = cjiffies;
}
return false;
}
n = NULL;
- list_for_each(i, &rdev->fence_drv.emited) {
+ list_for_each(i, &rdev->fence_drv[ring].emitted) {
fence = list_entry(i, struct radeon_fence, list);
if (fence->seq == seq) {
n = i;
@@ -141,11 +133,11 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
i = n;
do {
n = i->prev;
- list_move_tail(i, &rdev->fence_drv.signaled);
+ list_move_tail(i, &rdev->fence_drv[ring].signaled);
fence = list_entry(i, struct radeon_fence, list);
fence->signaled = true;
i = n;
- } while (i != &rdev->fence_drv.emited);
+ } while (i != &rdev->fence_drv[ring].emitted);
wake = true;
}
return wake;
@@ -157,14 +149,18 @@ static void radeon_fence_destroy(struct kref *kref)
struct radeon_fence *fence;
fence = container_of(kref, struct radeon_fence, kref);
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
list_del(&fence->list);
- fence->emited = false;
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ fence->emitted = false;
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
+ if (fence->semaphore)
+ radeon_semaphore_free(fence->rdev, fence->semaphore);
kfree(fence);
}
-int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
+int radeon_fence_create(struct radeon_device *rdev,
+ struct radeon_fence **fence,
+ int ring)
{
unsigned long irq_flags;
@@ -174,18 +170,19 @@ int radeon_fence_create(struct radeon_device *rdev, struct radeon_fence **fence)
}
kref_init(&((*fence)->kref));
(*fence)->rdev = rdev;
- (*fence)->emited = false;
+ (*fence)->emitted = false;
(*fence)->signaled = false;
(*fence)->seq = 0;
+ (*fence)->ring = ring;
+ (*fence)->semaphore = NULL;
INIT_LIST_HEAD(&(*fence)->list);
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- list_add_tail(&(*fence)->list, &rdev->fence_drv.created);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
-
bool radeon_fence_signaled(struct radeon_fence *fence)
{
unsigned long irq_flags;
@@ -197,21 +194,21 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
if (fence->rdev->gpu_lockup)
return true;
- write_lock_irqsave(&fence->rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
signaled = fence->signaled;
/* if we are shuting down report all fence as signaled */
if (fence->rdev->shutdown) {
signaled = true;
}
- if (!fence->emited) {
- WARN(1, "Querying an unemited fence : %p !\n", fence);
+ if (!fence->emitted) {
+ WARN(1, "Querying an unemitted fence : %p !\n", fence);
signaled = true;
}
if (!signaled) {
- radeon_fence_poll_locked(fence->rdev);
+ radeon_fence_poll_locked(fence->rdev, fence->ring);
signaled = fence->signaled;
}
- write_unlock_irqrestore(&fence->rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
return signaled;
}
@@ -230,24 +227,24 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
if (radeon_fence_signaled(fence)) {
return 0;
}
- timeout = rdev->fence_drv.last_timeout;
+ timeout = rdev->fence_drv[fence->ring].last_timeout;
retry:
/* save current sequence used to check for GPU lockup */
- seq = rdev->fence_drv.last_seq;
+ seq = rdev->fence_drv[fence->ring].last_seq;
trace_radeon_fence_wait_begin(rdev->ddev, seq);
if (intr) {
- radeon_irq_kms_sw_irq_get(rdev);
- r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
+ radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+ r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev);
+ radeon_irq_kms_sw_irq_put(rdev, fence->ring);
if (unlikely(r < 0)) {
return r;
}
} else {
- radeon_irq_kms_sw_irq_get(rdev);
- r = wait_event_timeout(rdev->fence_drv.queue,
+ radeon_irq_kms_sw_irq_get(rdev, fence->ring);
+ r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
radeon_fence_signaled(fence), timeout);
- radeon_irq_kms_sw_irq_put(rdev);
+ radeon_irq_kms_sw_irq_put(rdev, fence->ring);
}
trace_radeon_fence_wait_end(rdev->ddev, seq);
if (unlikely(!radeon_fence_signaled(fence))) {
@@ -258,10 +255,11 @@ retry:
timeout = r;
goto retry;
}
- /* don't protect read access to rdev->fence_drv.last_seq
+ /* don't protect read access to rdev->fence_drv[t].last_seq
* if we experiencing a lockup the value doesn't change
*/
- if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
+ if (seq == rdev->fence_drv[fence->ring].last_seq &&
+ radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
/* good news we believe it's a lockup */
printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
fence->seq, seq);
@@ -272,20 +270,20 @@ retry:
r = radeon_gpu_reset(rdev);
if (r)
return r;
- radeon_fence_write(rdev, fence->seq);
+ radeon_fence_write(rdev, fence->seq, fence->ring);
rdev->gpu_lockup = false;
}
timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- rdev->fence_drv.last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
- rdev->fence_drv.last_jiffies = jiffies;
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
+ rdev->fence_drv[fence->ring].last_jiffies = jiffies;
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
goto retry;
}
return 0;
}
-int radeon_fence_wait_next(struct radeon_device *rdev)
+int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -294,21 +292,21 @@ int radeon_fence_wait_next(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (list_empty(&rdev->fence_drv.emited)) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv.emited.next,
+ fence = list_entry(rdev->fence_drv[ring].emitted.next,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
}
-int radeon_fence_wait_last(struct radeon_device *rdev)
+int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
struct radeon_fence *fence;
@@ -317,15 +315,15 @@ int radeon_fence_wait_last(struct radeon_device *rdev)
if (rdev->gpu_lockup) {
return 0;
}
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (list_empty(&rdev->fence_drv.emited)) {
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (list_empty(&rdev->fence_drv[ring].emitted)) {
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
return 0;
}
- fence = list_entry(rdev->fence_drv.emited.prev,
+ fence = list_entry(rdev->fence_drv[ring].emitted.prev,
struct radeon_fence, list);
radeon_fence_ref(fence);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
r = radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
return r;
@@ -347,39 +345,97 @@ void radeon_fence_unref(struct radeon_fence **fence)
}
}
-void radeon_fence_process(struct radeon_device *rdev)
+void radeon_fence_process(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
bool wake;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- wake = radeon_fence_poll_locked(rdev);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ wake = radeon_fence_poll_locked(rdev, ring);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (wake) {
- wake_up_all(&rdev->fence_drv.queue);
+ wake_up_all(&rdev->fence_drv[ring].queue);
}
}
-int radeon_fence_driver_init(struct radeon_device *rdev)
+int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
+{
+ unsigned long irq_flags;
+ int not_processed = 0;
+
+ read_lock_irqsave(&rdev->fence_lock, irq_flags);
+ if (!rdev->fence_drv[ring].initialized) {
+ read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return 0;
+ }
+
+ if (!list_empty(&rdev->fence_drv[ring].emitted)) {
+ struct list_head *ptr;
+ list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
+ /* count up to 3, that's enought info */
+ if (++not_processed >= 3)
+ break;
+ }
+ }
+ read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return not_processed;
+}
+
+int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
{
unsigned long irq_flags;
+ uint64_t index;
int r;
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
- if (r) {
- dev_err(rdev->dev, "fence failed to get scratch register\n");
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- return r;
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ if (rdev->wb.use_event) {
+ rdev->fence_drv[ring].scratch_reg = 0;
+ index = R600_WB_EVENT_OFFSET + ring * 4;
+ } else {
+ r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
+ if (r) {
+ dev_err(rdev->dev, "fence failed to get scratch register\n");
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return r;
+ }
+ index = RADEON_WB_SCRATCH_OFFSET +
+ rdev->fence_drv[ring].scratch_reg -
+ rdev->scratch.reg_base;
+ }
+ rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
+ rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
+ radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
+ rdev->fence_drv[ring].initialized = true;
+ DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
+ ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ return 0;
+}
+
+static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
+{
+ rdev->fence_drv[ring].scratch_reg = -1;
+ rdev->fence_drv[ring].cpu_addr = NULL;
+ rdev->fence_drv[ring].gpu_addr = 0;
+ atomic_set(&rdev->fence_drv[ring].seq, 0);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
+ INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
+ init_waitqueue_head(&rdev->fence_drv[ring].queue);
+ rdev->fence_drv[ring].initialized = false;
+}
+
+int radeon_fence_driver_init(struct radeon_device *rdev)
+{
+ unsigned long irq_flags;
+ int ring;
+
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ radeon_fence_driver_init_ring(rdev, ring);
}
- radeon_fence_write(rdev, 0);
- atomic_set(&rdev->fence_drv.seq, 0);
- INIT_LIST_HEAD(&rdev->fence_drv.created);
- INIT_LIST_HEAD(&rdev->fence_drv.emited);
- INIT_LIST_HEAD(&rdev->fence_drv.signaled);
- init_waitqueue_head(&rdev->fence_drv.queue);
- rdev->fence_drv.initialized = true;
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
if (radeon_debugfs_fence_init(rdev)) {
dev_err(rdev->dev, "fence debugfs file creation failed\n");
}
@@ -389,14 +445,18 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
unsigned long irq_flags;
-
- if (!rdev->fence_drv.initialized)
- return;
- wake_up_all(&rdev->fence_drv.queue);
- write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
- write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
- rdev->fence_drv.initialized = false;
+ int ring;
+
+ for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
+ if (!rdev->fence_drv[ring].initialized)
+ continue;
+ radeon_fence_wait_last(rdev, ring);
+ wake_up_all(&rdev->fence_drv[ring].queue);
+ write_lock_irqsave(&rdev->fence_lock, irq_flags);
+ radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
+ write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
+ rdev->fence_drv[ring].initialized = false;
+ }
}
@@ -410,14 +470,21 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_fence *fence;
-
- seq_printf(m, "Last signaled fence 0x%08X\n",
- radeon_fence_read(rdev));
- if (!list_empty(&rdev->fence_drv.emited)) {
- fence = list_entry(rdev->fence_drv.emited.prev,
- struct radeon_fence, list);
- seq_printf(m, "Last emited fence %p with 0x%08X\n",
- fence, fence->seq);
+ int i;
+
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (!rdev->fence_drv[i].initialized)
+ continue;
+
+ seq_printf(m, "--- ring %d ---\n", i);
+ seq_printf(m, "Last signaled fence 0x%08X\n",
+ radeon_fence_read(rdev, i));
+ if (!list_empty(&rdev->fence_drv[i].emitted)) {
+ fence = list_entry(rdev->fence_drv[i].emitted.prev,
+ struct radeon_fence, list);
+ seq_printf(m, "Last emitted fence %p with 0x%08X\n",
+ fence, fence->seq);
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index ba7ab79..c58a036 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -157,9 +157,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
- if (!rdev->gart.ttm_alloced[p])
- pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
page_base = rdev->gart.pages_addr[p];
@@ -191,23 +188,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- /* we reverted the patch using dma_addr in TTM for now but this
- * code stops building on alpha so just comment it out for now */
- if (0) { /*dma_addr[i] != DMA_ERROR_CODE) */
- rdev->gart.ttm_alloced[p] = true;
- rdev->gart.pages_addr[p] = dma_addr[i];
- } else {
- /* we need to support large memory configurations */
- /* assume that unbind have already been call on the range */
- rdev->gart.pages_addr[p] = pci_map_page(rdev->pdev, pagelist[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(rdev->pdev, rdev->gart.pages_addr[p])) {
- /* FIXME: failed to map page (return -ENOMEM?) */
- radeon_gart_unbind(rdev, offset, pages);
- return -ENOMEM;
- }
- }
+ rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
if (rdev->gart.ptr) {
page_base = rdev->gart.pages_addr[p];
@@ -274,12 +255,6 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.ttm_alloced = kzalloc(sizeof(bool) *
- rdev->gart.num_cpu_pages, GFP_KERNEL);
- if (rdev->gart.ttm_alloced == NULL) {
- radeon_gart_fini(rdev);
- return -ENOMEM;
- }
/* set GART entry to point to the dummy page by default */
for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
@@ -296,10 +271,404 @@ void radeon_gart_fini(struct radeon_device *rdev)
rdev->gart.ready = false;
kfree(rdev->gart.pages);
kfree(rdev->gart.pages_addr);
- kfree(rdev->gart.ttm_alloced);
rdev->gart.pages = NULL;
rdev->gart.pages_addr = NULL;
- rdev->gart.ttm_alloced = NULL;
radeon_dummy_page_fini(rdev);
}
+
+/*
+ * vm helpers
+ *
+ * TODO bind a default page at vm initialization for default address
+ */
+int radeon_vm_manager_init(struct radeon_device *rdev)
+{
+ int r;
+
+ rdev->vm_manager.enabled = false;
+
+ /* mark first vm as always in use, it's the system one */
+ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager,
+ rdev->vm_manager.max_pfn * 8,
+ RADEON_GEM_DOMAIN_VRAM);
+ if (r) {
+ dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n",
+ (rdev->vm_manager.max_pfn * 8) >> 10);
+ return r;
+ }
+
+ r = rdev->vm_manager.funcs->init(rdev);
+ if (r == 0)
+ rdev->vm_manager.enabled = true;
+
+ return r;
+}
+
+/* cs mutex must be lock */
+static void radeon_vm_unbind_locked(struct radeon_device *rdev,
+ struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+
+ if (vm->id == -1) {
+ return;
+ }
+
+ /* wait for vm use to end */
+ if (vm->fence) {
+ radeon_fence_wait(vm->fence, false);
+ radeon_fence_unref(&vm->fence);
+ }
+
+ /* hw unbind */
+ rdev->vm_manager.funcs->unbind(rdev, vm);
+ rdev->vm_manager.use_bitmap &= ~(1 << vm->id);
+ list_del_init(&vm->list);
+ vm->id = -1;
+ radeon_sa_bo_free(rdev, &vm->sa_bo);
+ vm->pt = NULL;
+
+ list_for_each_entry(bo_va, &vm->va, vm_list) {
+ bo_va->valid = false;
+ }
+}
+
+void radeon_vm_manager_fini(struct radeon_device *rdev)
+{
+ if (rdev->vm_manager.sa_manager.bo == NULL)
+ return;
+ radeon_vm_manager_suspend(rdev);
+ rdev->vm_manager.funcs->fini(rdev);
+ radeon_sa_bo_manager_fini(rdev, &rdev->vm_manager.sa_manager);
+ rdev->vm_manager.enabled = false;
+}
+
+int radeon_vm_manager_start(struct radeon_device *rdev)
+{
+ if (rdev->vm_manager.sa_manager.bo == NULL) {
+ return -EINVAL;
+ }
+ return radeon_sa_bo_manager_start(rdev, &rdev->vm_manager.sa_manager);
+}
+
+int radeon_vm_manager_suspend(struct radeon_device *rdev)
+{
+ struct radeon_vm *vm, *tmp;
+
+ radeon_mutex_lock(&rdev->cs_mutex);
+ /* unbind all active vm */
+ list_for_each_entry_safe(vm, tmp, &rdev->vm_manager.lru_vm, list) {
+ radeon_vm_unbind_locked(rdev, vm);
+ }
+ rdev->vm_manager.funcs->fini(rdev);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->vm_manager.sa_manager);
+}
+
+/* cs mutex must be lock */
+void radeon_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ mutex_lock(&vm->mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ mutex_unlock(&vm->mutex);
+}
+
+/* cs mutex must be lock & vm mutex must be lock */
+int radeon_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_vm *vm_evict;
+ unsigned i;
+ int id = -1, r;
+
+ if (vm == NULL) {
+ return -EINVAL;
+ }
+
+ if (vm->id != -1) {
+ /* update lru */
+ list_del_init(&vm->list);
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ return 0;
+ }
+
+retry:
+ r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->sa_bo,
+ RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8),
+ RADEON_GPU_PAGE_SIZE);
+ if (r) {
+ if (list_empty(&rdev->vm_manager.lru_vm)) {
+ return r;
+ }
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+ radeon_vm_unbind(rdev, vm_evict);
+ goto retry;
+ }
+ vm->pt = rdev->vm_manager.sa_manager.cpu_ptr;
+ vm->pt += (vm->sa_bo.offset >> 3);
+ vm->pt_gpu_addr = rdev->vm_manager.sa_manager.gpu_addr;
+ vm->pt_gpu_addr += vm->sa_bo.offset;
+ memset(vm->pt, 0, RADEON_GPU_PAGE_ALIGN(vm->last_pfn * 8));
+
+retry_id:
+ /* search for free vm */
+ for (i = 0; i < rdev->vm_manager.nvm; i++) {
+ if (!(rdev->vm_manager.use_bitmap & (1 << i))) {
+ id = i;
+ break;
+ }
+ }
+ /* evict vm if necessary */
+ if (id == -1) {
+ vm_evict = list_first_entry(&rdev->vm_manager.lru_vm, struct radeon_vm, list);
+ radeon_vm_unbind(rdev, vm_evict);
+ goto retry_id;
+ }
+
+ /* do hw bind */
+ r = rdev->vm_manager.funcs->bind(rdev, vm, id);
+ if (r) {
+ radeon_sa_bo_free(rdev, &vm->sa_bo);
+ return r;
+ }
+ rdev->vm_manager.use_bitmap |= 1 << id;
+ vm->id = id;
+ list_add_tail(&vm->list, &rdev->vm_manager.lru_vm);
+ return radeon_vm_bo_update_pte(rdev, vm, rdev->ib_pool.sa_manager.bo,
+ &rdev->ib_pool.sa_manager.bo->tbo.mem);
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_add(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ uint64_t offset,
+ uint32_t flags)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ struct list_head *head;
+ uint64_t size = radeon_bo_size(bo), last_offset = 0;
+ unsigned last_pfn;
+
+ bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+ if (bo_va == NULL) {
+ return -ENOMEM;
+ }
+ bo_va->vm = vm;
+ bo_va->bo = bo;
+ bo_va->soffset = offset;
+ bo_va->eoffset = offset + size;
+ bo_va->flags = flags;
+ bo_va->valid = false;
+ INIT_LIST_HEAD(&bo_va->bo_list);
+ INIT_LIST_HEAD(&bo_va->vm_list);
+ /* make sure object fit at this offset */
+ if (bo_va->soffset >= bo_va->eoffset) {
+ kfree(bo_va);
+ return -EINVAL;
+ }
+
+ last_pfn = bo_va->eoffset / RADEON_GPU_PAGE_SIZE;
+ if (last_pfn > rdev->vm_manager.max_pfn) {
+ kfree(bo_va);
+ dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
+ last_pfn, rdev->vm_manager.max_pfn);
+ return -EINVAL;
+ }
+
+ mutex_lock(&vm->mutex);
+ if (last_pfn > vm->last_pfn) {
+ /* grow va space 32M by 32M */
+ unsigned align = ((32 << 20) >> 12) - 1;
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ vm->last_pfn = (last_pfn + align) & ~align;
+ }
+ head = &vm->va;
+ last_offset = 0;
+ list_for_each_entry(tmp, &vm->va, vm_list) {
+ if (bo_va->soffset >= last_offset && bo_va->eoffset < tmp->soffset) {
+ /* bo can be added before this one */
+ break;
+ }
+ if (bo_va->soffset >= tmp->soffset && bo_va->soffset < tmp->eoffset) {
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
+ bo, (unsigned)bo_va->soffset, tmp->bo,
+ (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
+ kfree(bo_va);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ last_offset = tmp->eoffset;
+ head = &tmp->vm_list;
+ }
+ list_add(&bo_va->vm_list, head);
+ list_add_tail(&bo_va->bo_list, &bo->va);
+ mutex_unlock(&vm->mutex);
+ return 0;
+}
+
+static u64 radeon_vm_get_addr(struct radeon_device *rdev,
+ struct ttm_mem_reg *mem,
+ unsigned pfn)
+{
+ u64 addr = 0;
+
+ switch (mem->mem_type) {
+ case TTM_PL_VRAM:
+ addr = (mem->start << PAGE_SHIFT);
+ addr += pfn * RADEON_GPU_PAGE_SIZE;
+ addr += rdev->vm_manager.vram_base_offset;
+ break;
+ case TTM_PL_TT:
+ /* offset inside page table */
+ addr = mem->start << PAGE_SHIFT;
+ addr += pfn * RADEON_GPU_PAGE_SIZE;
+ addr = addr >> PAGE_SHIFT;
+ /* page table offset */
+ addr = rdev->gart.pages_addr[addr];
+ /* in case cpu page size != gpu page size*/
+ addr += (pfn * RADEON_GPU_PAGE_SIZE) & (~PAGE_MASK);
+ break;
+ default:
+ break;
+ }
+ return addr;
+}
+
+/* object have to be reserved & cs mutex took & vm mutex took */
+int radeon_vm_bo_update_pte(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct radeon_bo_va *bo_va;
+ unsigned ngpu_pages, i;
+ uint64_t addr = 0, pfn;
+ uint32_t flags;
+
+ /* nothing to do if vm isn't bound */
+ if (vm->id == -1)
+ return 0;;
+
+ bo_va = radeon_bo_va(bo, vm);
+ if (bo_va == NULL) {
+ dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+ return -EINVAL;
+ }
+
+ if (bo_va->valid)
+ return 0;
+
+ ngpu_pages = radeon_bo_ngpu_pages(bo);
+ bo_va->flags &= ~RADEON_VM_PAGE_VALID;
+ bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
+ if (mem) {
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ bo_va->flags |= RADEON_VM_PAGE_VALID;
+ bo_va->valid = true;
+ }
+ if (mem->mem_type == TTM_PL_TT) {
+ bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
+ }
+ }
+ pfn = bo_va->soffset / RADEON_GPU_PAGE_SIZE;
+ flags = rdev->vm_manager.funcs->page_flags(rdev, bo_va->vm, bo_va->flags);
+ for (i = 0, addr = 0; i < ngpu_pages; i++) {
+ if (mem && bo_va->valid) {
+ addr = radeon_vm_get_addr(rdev, mem, i);
+ }
+ rdev->vm_manager.funcs->set_page(rdev, bo_va->vm, i + pfn, addr, flags);
+ }
+ rdev->vm_manager.funcs->tlb_flush(rdev, bo_va->vm);
+ return 0;
+}
+
+/* object have to be reserved */
+int radeon_vm_bo_rmv(struct radeon_device *rdev,
+ struct radeon_vm *vm,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ bo_va = radeon_bo_va(bo, vm);
+ if (bo_va == NULL)
+ return 0;
+
+ mutex_lock(&vm->mutex);
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&vm->mutex);
+ list_del(&bo_va->bo_list);
+
+ kfree(bo_va);
+ return 0;
+}
+
+void radeon_vm_bo_invalidate(struct radeon_device *rdev,
+ struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va;
+
+ BUG_ON(!atomic_read(&bo->tbo.reserved));
+ list_for_each_entry(bo_va, &bo->va, bo_list) {
+ bo_va->valid = false;
+ }
+}
+
+int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ int r;
+
+ vm->id = -1;
+ vm->fence = NULL;
+ mutex_init(&vm->mutex);
+ INIT_LIST_HEAD(&vm->list);
+ INIT_LIST_HEAD(&vm->va);
+ vm->last_pfn = 0;
+ /* map the ib pool buffer at 0 in virtual address space, set
+ * read only
+ */
+ r = radeon_vm_bo_add(rdev, vm, rdev->ib_pool.sa_manager.bo, 0,
+ RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED);
+ return r;
+}
+
+void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+ int r;
+
+ mutex_lock(&vm->mutex);
+
+ radeon_mutex_lock(&rdev->cs_mutex);
+ radeon_vm_unbind_locked(rdev, vm);
+ radeon_mutex_unlock(&rdev->cs_mutex);
+
+ /* remove all bo */
+ r = radeon_bo_reserve(rdev->ib_pool.sa_manager.bo, false);
+ if (!r) {
+ bo_va = radeon_bo_va(rdev->ib_pool.sa_manager.bo, vm);
+ list_del_init(&bo_va->bo_list);
+ list_del_init(&bo_va->vm_list);
+ radeon_bo_unreserve(rdev->ib_pool.sa_manager.bo);
+ kfree(bo_va);
+ }
+ if (!list_empty(&vm->va)) {
+ dev_err(rdev->dev, "still active bo inside vm\n");
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
+ list_del_init(&bo_va->vm_list);
+ r = radeon_bo_reserve(bo_va->bo, false);
+ if (!r) {
+ list_del_init(&bo_va->bo_list);
+ radeon_bo_unreserve(bo_va->bo);
+ kfree(bo_va);
+ }
+ }
+ mutex_unlock(&vm->mutex);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index aa1ca2d..7337850 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -142,6 +142,44 @@ void radeon_gem_fini(struct radeon_device *rdev)
radeon_bo_force_delete(rdev);
}
+/*
+ * Call from drm_gem_handle_create which appear in both new and open ioctl
+ * case.
+ */
+int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
+{
+ return 0;
+}
+
+void radeon_gem_object_close(struct drm_gem_object *obj,
+ struct drm_file *file_priv)
+{
+ struct radeon_bo *rbo = gem_to_radeon_bo(obj);
+ struct radeon_device *rdev = rbo->rdev;
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+ struct radeon_vm *vm = &fpriv->vm;
+ struct radeon_bo_va *bo_va, *tmp;
+
+ if (rdev->family < CHIP_CAYMAN) {
+ return;
+ }
+
+ if (radeon_bo_reserve(rbo, false)) {
+ return;
+ }
+ list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ /* remove from this vm address space */
+ mutex_lock(&vm->mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&vm->mutex);
+ list_del(&bo_va->bo_list);
+ kfree(bo_va);
+ }
+ }
+ radeon_bo_unreserve(rbo);
+}
+
/*
* GEM ioctls.
@@ -152,6 +190,7 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
struct radeon_device *rdev = dev->dev_private;
struct drm_radeon_gem_info *args = data;
struct ttm_mem_type_manager *man;
+ unsigned i;
man = &rdev->mman.bdev.man[TTM_PL_VRAM];
@@ -160,8 +199,9 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
if (rdev->stollen_vga_memory)
args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
args->vram_visible -= radeon_fbdev_total_size(rdev);
- args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
- RADEON_IB_POOL_SIZE*64*1024;
+ args->gart_size = rdev->mc.gtt_size - 4096 - RADEON_IB_POOL_SIZE*64*1024;
+ for(i = 0; i < RADEON_NUM_RINGS; ++i)
+ args->gart_size -= rdev->ring[i].ring_size;
return 0;
}
@@ -352,6 +392,109 @@ out:
return r;
}
+int radeon_gem_va_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *filp)
+{
+ struct drm_radeon_gem_va *args = data;
+ struct drm_gem_object *gobj;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_fpriv *fpriv = filp->driver_priv;
+ struct radeon_bo *rbo;
+ struct radeon_bo_va *bo_va;
+ u32 invalid_flags;
+ int r = 0;
+
+ if (!rdev->vm_manager.enabled) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOTTY;
+ }
+
+ /* !! DONT REMOVE !!
+ * We don't support vm_id yet, to be sure we don't have have broken
+ * userspace, reject anyone trying to use non 0 value thus moving
+ * forward we can use those fields without breaking existant userspace
+ */
+ if (args->vm_id) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ if (args->offset < RADEON_VA_RESERVED_SIZE) {
+ dev_err(&dev->pdev->dev,
+ "offset 0x%lX is in reserved area 0x%X\n",
+ (unsigned long)args->offset,
+ RADEON_VA_RESERVED_SIZE);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ /* don't remove, we need to enforce userspace to set the snooped flag
+ * otherwise we will endup with broken userspace and we won't be able
+ * to enable this feature without adding new interface
+ */
+ invalid_flags = RADEON_VM_PAGE_VALID | RADEON_VM_PAGE_SYSTEM;
+ if ((args->flags & invalid_flags)) {
+ dev_err(&dev->pdev->dev, "invalid flags 0x%08X vs 0x%08X\n",
+ args->flags, invalid_flags);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+ if (!(args->flags & RADEON_VM_PAGE_SNOOPED)) {
+ dev_err(&dev->pdev->dev, "only supported snooped mapping for now\n");
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ case RADEON_VA_UNMAP:
+ break;
+ default:
+ dev_err(&dev->pdev->dev, "unsupported operation %d\n",
+ args->operation);
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -EINVAL;
+ }
+
+ gobj = drm_gem_object_lookup(dev, filp, args->handle);
+ if (gobj == NULL) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ return -ENOENT;
+ }
+ rbo = gem_to_radeon_bo(gobj);
+ r = radeon_bo_reserve(rbo, false);
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+ }
+ switch (args->operation) {
+ case RADEON_VA_MAP:
+ bo_va = radeon_bo_va(rbo, &fpriv->vm);
+ if (bo_va) {
+ args->operation = RADEON_VA_RESULT_VA_EXIST;
+ args->offset = bo_va->soffset;
+ goto out;
+ }
+ r = radeon_vm_bo_add(rdev, &fpriv->vm, rbo,
+ args->offset, args->flags);
+ break;
+ case RADEON_VA_UNMAP:
+ r = radeon_vm_bo_rmv(rdev, &fpriv->vm, rbo);
+ break;
+ default:
+ break;
+ }
+ args->operation = RADEON_VA_RESULT_OK;
+ if (r) {
+ args->operation = RADEON_VA_RESULT_ERROR;
+ }
+out:
+ radeon_bo_unreserve(rbo);
+ drm_gem_object_unreference_unlocked(gobj);
+ return r;
+}
+
int radeon_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 7bb1b07..98a8ad6 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -897,6 +897,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
if (rec->mm_i2c ||
@@ -957,6 +958,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
i2c->adapter.class = I2C_CLASS_DDC;
+ i2c->adapter.dev.parent = &dev->pdev->dev;
i2c->dev = dev;
snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
"Radeon aux bus %s", name);
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 8f86aeb..66d5fe1 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -65,7 +65,8 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
unsigned i;
/* Disable *all* interrupts */
- rdev->irq.sw_int = false;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
@@ -81,9 +82,11 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
int radeon_driver_irq_postinstall_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ unsigned i;
dev->max_vblank_count = 0x001fffff;
- rdev->irq.sw_int = true;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = true;
radeon_irq_set(rdev);
return 0;
}
@@ -97,7 +100,8 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
return;
}
/* Disable *all* interrupts */
- rdev->irq.sw_int = false;
+ for (i = 0; i < RADEON_NUM_RINGS; i++)
+ rdev->irq.sw_int[i] = false;
rdev->irq.gui_idle = false;
for (i = 0; i < RADEON_MAX_HPD_PINS; i++)
rdev->irq.hpd[i] = false;
@@ -134,6 +138,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
/* Dell RS690 only seems to work with MSIs. */
if ((rdev->pdev->device == 0x791f) &&
(rdev->pdev->subsystem_vendor == 0x1028) &&
+ (rdev->pdev->subsystem_device == 0x01fc))
+ return true;
+
+ /* Dell RS690 only seems to work with MSIs. */
+ if ((rdev->pdev->device == 0x791f) &&
+ (rdev->pdev->subsystem_vendor == 0x1028) &&
(rdev->pdev->subsystem_device == 0x01fd))
return true;
@@ -194,26 +204,26 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
flush_work_sync(&rdev->hotplug_work);
}
-void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
- rdev->irq.sw_int = true;
+ if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount[ring] == 1)) {
+ rdev->irq.sw_int[ring] = true;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
}
-void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring)
{
unsigned long irqflags;
spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
- BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
- if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
- rdev->irq.sw_int = false;
+ BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount[ring] <= 0);
+ if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount[ring] == 0)) {
+ rdev->irq.sw_int[ring] = false;
radeon_irq_set(rdev);
}
spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index be2c122..d335288 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -250,6 +250,18 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
+ case RADEON_INFO_VA_START:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ value = RADEON_VA_RESERVED_SIZE;
+ break;
+ case RADEON_INFO_IB_VM_MAX_SIZE:
+ /* this is where we report if vm is supported or not */
+ if (rdev->family < CHIP_CAYMAN)
+ return -EINVAL;
+ value = RADEON_IB_VM_MAX_SIZE;
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
@@ -270,7 +282,6 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
return 0;
}
-
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
vga_switcheroo_process_delayed_switch();
@@ -278,12 +289,45 @@ void radeon_driver_lastclose_kms(struct drm_device *dev)
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ file_priv->driver_priv = NULL;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN) {
+ struct radeon_fpriv *fpriv;
+ int r;
+
+ fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
+ if (unlikely(!fpriv)) {
+ return -ENOMEM;
+ }
+
+ r = radeon_vm_init(rdev, &fpriv->vm);
+ if (r) {
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ return r;
+ }
+
+ file_priv->driver_priv = fpriv;
+ }
return 0;
}
void radeon_driver_postclose_kms(struct drm_device *dev,
struct drm_file *file_priv)
{
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* new gpu have virtual address space support */
+ if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
+ struct radeon_fpriv *fpriv = file_priv->driver_priv;
+
+ radeon_vm_fini(rdev, &fpriv->vm);
+ kfree(fpriv);
+ file_priv->driver_priv = NULL;
+ }
}
void radeon_driver_preclose_kms(struct drm_device *dev,
@@ -451,5 +495,6 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF_DRV(RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF_DRV(RADEON_GEM_VA, radeon_gem_va_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index daadf21..25a19c4 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -437,7 +437,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
crtc_offset_cntl = 0;
- pitch_pixels = target_fb->pitch / (target_fb->bits_per_pixel / 8);
+ pitch_pixels = target_fb->pitches[0] / (target_fb->bits_per_pixel / 8);
crtc_pitch = (((pitch_pixels * target_fb->bits_per_pixel) +
((target_fb->bits_per_pixel * 8) - 1)) /
(target_fb->bits_per_pixel * 8));
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 2c2e75e..8a85598 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -362,6 +362,7 @@ struct radeon_encoder_atom_dig {
struct backlight_device *bl_dev;
int dpms_mode;
uint8_t backlight_level;
+ int panel_mode;
};
struct radeon_encoder_atom_dac {
@@ -466,6 +467,10 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
extern struct drm_connector *
radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+extern struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
+extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+ u32 pixel_clock);
extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
@@ -482,8 +487,11 @@ extern void radeon_dp_link_train(struct drm_encoder *encoder,
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+ struct drm_connector *connector);
extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
extern void radeon_atom_encoder_init(struct radeon_device *rdev);
+extern void radeon_atom_dcpll_init(struct radeon_device *rdev);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
@@ -641,9 +649,9 @@ extern void radeon_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void radeon_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-void radeon_framebuffer_init(struct drm_device *dev,
+int radeon_framebuffer_init(struct drm_device *dev,
struct radeon_framebuffer *rfb,
- struct drm_mode_fb_cmd *mode_cmd,
+ struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_gem_object *obj);
int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 1c85152..d45df17 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -46,6 +46,20 @@ static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
* function are calling it.
*/
+void radeon_bo_clear_va(struct radeon_bo *bo)
+{
+ struct radeon_bo_va *bo_va, *tmp;
+
+ list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
+ /* remove from all vm address space */
+ mutex_lock(&bo_va->vm->mutex);
+ list_del(&bo_va->vm_list);
+ mutex_unlock(&bo_va->vm->mutex);
+ list_del(&bo_va->bo_list);
+ kfree(bo_va);
+ }
+}
+
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct radeon_bo *bo;
@@ -55,6 +69,7 @@ static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
+ radeon_bo_clear_va(bo);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
@@ -95,6 +110,7 @@ int radeon_bo_create(struct radeon_device *rdev,
enum ttm_bo_type type;
unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
unsigned long max_size = 0;
+ size_t acc_size;
int r;
size = ALIGN(size, PAGE_SIZE);
@@ -117,6 +133,9 @@ int radeon_bo_create(struct radeon_device *rdev,
return -ENOMEM;
}
+ acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+ sizeof(struct radeon_bo));
+
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
@@ -130,12 +149,13 @@ retry:
bo->gem_base.driver_private = NULL;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->va);
radeon_ttm_placement_from_domain(bo, domain);
/* Kernel allocation are uninterruptible */
mutex_lock(&rdev->vram_mutex);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
- &bo->placement, page_align, 0, !kernel, NULL, size,
- &radeon_ttm_bo_destroy);
+ &bo->placement, page_align, 0, !kernel, NULL,
+ acc_size, &radeon_ttm_bo_destroy);
mutex_unlock(&rdev->vram_mutex);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS) {
@@ -483,6 +503,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo,
return;
rbo = container_of(bo, struct radeon_bo, tbo);
radeon_bo_check_tiling(rbo, 0, 1);
+ radeon_vm_bo_invalidate(rbo->rdev, rbo);
}
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
@@ -556,3 +577,16 @@ int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
}
return 0;
}
+
+/* object have to be reserved */
+struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
+{
+ struct radeon_bo_va *bo_va;
+
+ list_for_each_entry(bo_va, &rbo->va, bo_list) {
+ if (bo_va->vm == vm) {
+ return bo_va;
+ }
+ }
+ return NULL;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index b07f0f9..cde4303 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -83,6 +83,16 @@ static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
return !!atomic_read(&bo->tbo.reserved);
}
+static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
+{
+ return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
+static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
+{
+ return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+}
+
/**
* radeon_bo_mmap_offset - return mmap offset of bo
* @bo: radeon object for which we query the offset
@@ -128,4 +138,26 @@ extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem);
extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
+extern struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo,
+ struct radeon_vm *vm);
+
+/*
+ * sub allocation
+ */
+extern int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 domain);
+extern void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager);
+extern int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align);
+extern void radeon_sa_bo_free(struct radeon_device *rdev,
+ struct radeon_sa_bo *sa_bo);
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 78a665b..095148e 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -252,7 +252,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
mutex_lock(&rdev->ddev->struct_mutex);
mutex_lock(&rdev->vram_mutex);
- mutex_lock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->ring[i].ring_obj)
+ mutex_lock(&rdev->ring[i].mutex);
+ }
/* gui idle int has issues on older chips it seems */
if (rdev->family >= CHIP_R600) {
@@ -268,12 +271,13 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
radeon_irq_set(rdev);
}
} else {
- if (rdev->cp.ready) {
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
+ if (ring->ready) {
struct radeon_fence *fence;
- radeon_ring_alloc(rdev, 64);
- radeon_fence_create(rdev, &fence);
+ radeon_ring_alloc(rdev, ring, 64);
+ radeon_fence_create(rdev, &fence, radeon_ring_index(rdev, ring));
radeon_fence_emit(rdev, fence);
- radeon_ring_commit(rdev);
+ radeon_ring_commit(rdev, ring);
radeon_fence_wait(fence, false);
radeon_fence_unref(&fence);
}
@@ -307,7 +311,10 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.dynpm_planned_action = DYNPM_ACTION_NONE;
- mutex_unlock(&rdev->cp.mutex);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ if (rdev->ring[i].ring_obj)
+ mutex_unlock(&rdev->ring[i].mutex);
+ }
mutex_unlock(&rdev->vram_mutex);
mutex_unlock(&rdev->ddev->struct_mutex);
}
@@ -795,19 +802,14 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
mutex_lock(&rdev->pm.mutex);
if (rdev->pm.dynpm_state == DYNPM_STATE_ACTIVE) {
- unsigned long irq_flags;
int not_processed = 0;
+ int i;
- read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
- if (!list_empty(&rdev->fence_drv.emited)) {
- struct list_head *ptr;
- list_for_each(ptr, &rdev->fence_drv.emited) {
- /* count up to 3, that's enought info */
- if (++not_processed >= 3)
- break;
- }
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ not_processed += radeon_fence_count_emitted(rdev, i);
+ if (not_processed >= 3)
+ break;
}
- read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
if (not_processed >= 3) { /* should upclock */
if (rdev->pm.dynpm_planned_action == DYNPM_ACTION_DOWNCLOCK) {
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 49d5820..92c9ea4 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,7 @@
#include "atom.h"
int radeon_debugfs_ib_init(struct radeon_device *rdev);
+int radeon_debugfs_ring_init(struct radeon_device *rdev);
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
@@ -60,105 +61,106 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
return idx_value;
}
-void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
+void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
{
#if DRM_DEBUG_CODE
- if (rdev->cp.count_dw <= 0) {
+ if (ring->count_dw <= 0) {
DRM_ERROR("radeon: writting more dword to ring than expected !\n");
}
#endif
- rdev->cp.ring[rdev->cp.wptr++] = v;
- rdev->cp.wptr &= rdev->cp.ptr_mask;
- rdev->cp.count_dw--;
- rdev->cp.ring_free_dw--;
+ ring->ring[ring->wptr++] = v;
+ ring->wptr &= ring->ptr_mask;
+ ring->count_dw--;
+ ring->ring_free_dw--;
}
-void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
-{
- struct radeon_ib *ib, *n;
-
- list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
- list_del(&ib->list);
- vfree(ib->ptr);
- kfree(ib);
- }
-}
-
-void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+/*
+ * IB.
+ */
+bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
{
- struct radeon_ib *bib;
-
- bib = kmalloc(sizeof(*bib), GFP_KERNEL);
- if (bib == NULL)
- return;
- bib->ptr = vmalloc(ib->length_dw * 4);
- if (bib->ptr == NULL) {
- kfree(bib);
- return;
+ bool done = false;
+
+ /* only free ib which have been emited */
+ if (ib->fence && ib->fence->emitted) {
+ if (radeon_fence_signaled(ib->fence)) {
+ radeon_fence_unref(&ib->fence);
+ radeon_sa_bo_free(rdev, &ib->sa_bo);
+ done = true;
+ }
}
- memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
- bib->length_dw = ib->length_dw;
- mutex_lock(&rdev->ib_pool.mutex);
- list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
- mutex_unlock(&rdev->ib_pool.mutex);
+ return done;
}
-/*
- * IB.
- */
-int radeon_ib_get(struct radeon_device *rdev, struct radeon_ib **ib)
+int radeon_ib_get(struct radeon_device *rdev, int ring,
+ struct radeon_ib **ib, unsigned size)
{
struct radeon_fence *fence;
- struct radeon_ib *nib;
- int r = 0, i, c;
+ unsigned cretry = 0;
+ int r = 0, i, idx;
*ib = NULL;
- r = radeon_fence_create(rdev, &fence);
+ /* align size on 256 bytes */
+ size = ALIGN(size, 256);
+
+ r = radeon_fence_create(rdev, &fence, ring);
if (r) {
dev_err(rdev->dev, "failed to create fence for new IB\n");
return r;
}
- mutex_lock(&rdev->ib_pool.mutex);
- for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
- i &= (RADEON_IB_POOL_SIZE - 1);
- if (rdev->ib_pool.ibs[i].free) {
- nib = &rdev->ib_pool.ibs[i];
- break;
- }
- }
- if (nib == NULL) {
- /* This should never happen, it means we allocated all
- * IB and haven't scheduled one yet, return EBUSY to
- * userspace hoping that on ioctl recall we get better
- * luck
- */
- dev_err(rdev->dev, "no free indirect buffer !\n");
- mutex_unlock(&rdev->ib_pool.mutex);
+
+ radeon_mutex_lock(&rdev->ib_pool.mutex);
+ idx = rdev->ib_pool.head_id;
+retry:
+ if (cretry > 5) {
+ dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
+ radeon_mutex_unlock(&rdev->ib_pool.mutex);
radeon_fence_unref(&fence);
- return -EBUSY;
+ return -ENOMEM;
}
- rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
- nib->free = false;
- if (nib->fence) {
- mutex_unlock(&rdev->ib_pool.mutex);
- r = radeon_fence_wait(nib->fence, false);
- if (r) {
- dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
- nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
- mutex_lock(&rdev->ib_pool.mutex);
- nib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
- radeon_fence_unref(&fence);
- return r;
+ cretry++;
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
+ if (rdev->ib_pool.ibs[idx].fence == NULL) {
+ r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
+ &rdev->ib_pool.ibs[idx].sa_bo,
+ size, 256);
+ if (!r) {
+ *ib = &rdev->ib_pool.ibs[idx];
+ (*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ (*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
+ (*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ (*ib)->gpu_addr += (*ib)->sa_bo.offset;
+ (*ib)->fence = fence;
+ (*ib)->vm_id = 0;
+ /* ib are most likely to be allocated in a ring fashion
+ * thus rdev->ib_pool.head_id should be the id of the
+ * oldest ib
+ */
+ rdev->ib_pool.head_id = (1 + idx);
+ rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
+ radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
+ }
}
- mutex_lock(&rdev->ib_pool.mutex);
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
}
- radeon_fence_unref(&nib->fence);
- nib->fence = fence;
- nib->length_dw = 0;
- mutex_unlock(&rdev->ib_pool.mutex);
- *ib = nib;
- return 0;
+ /* this should be rare event, ie all ib scheduled none signaled yet.
+ */
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
+ r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
+ if (!r) {
+ goto retry;
+ }
+ /* an error happened */
+ break;
+ }
+ idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
+ }
+ radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_fence_unref(&fence);
+ return r;
}
void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
@@ -169,247 +171,258 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
if (tmp == NULL) {
return;
}
- if (!tmp->fence->emited)
+ radeon_mutex_lock(&rdev->ib_pool.mutex);
+ if (tmp->fence && !tmp->fence->emitted) {
+ radeon_sa_bo_free(rdev, &tmp->sa_bo);
radeon_fence_unref(&tmp->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- tmp->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
+ }
+ radeon_mutex_unlock(&rdev->ib_pool.mutex);
}
int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
+ struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
int r = 0;
- if (!ib->length_dw || !rdev->cp.ready) {
+ if (!ib->length_dw || !ring->ready) {
/* TODO: Nothings in the ib we should report. */
DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
return -EINVAL;
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
return r;
}
- radeon_ring_ib_execute(rdev, ib);
+ radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
radeon_fence_emit(rdev, ib->fence);
- mutex_lock(&rdev->ib_pool.mutex);
- /* once scheduled IB is considered free and protected by the fence */
- ib->free = true;
- mutex_unlock(&rdev->ib_pool.mutex);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_unlock_commit(rdev, ring);
return 0;
}
int radeon_ib_pool_init(struct radeon_device *rdev)
{
- void *ptr;
- uint64_t gpu_addr;
- int i;
- int r = 0;
+ struct radeon_sa_manager tmp;
+ int i, r;
- if (rdev->ib_pool.robj)
- return 0;
- INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
- /* Allocate 1M object buffer */
- r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
- PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
- &rdev->ib_pool.robj);
- if (r) {
- DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
- return r;
- }
- r = radeon_bo_reserve(rdev->ib_pool.robj, false);
- if (unlikely(r != 0))
- return r;
- r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+ r = radeon_sa_bo_manager_init(rdev, &tmp,
+ RADEON_IB_POOL_SIZE*64*1024,
+ RADEON_GEM_DOMAIN_GTT);
if (r) {
- radeon_bo_unreserve(rdev->ib_pool.robj);
- DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
return r;
}
- r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
- radeon_bo_unreserve(rdev->ib_pool.robj);
- if (r) {
- DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
- return r;
+
+ radeon_mutex_lock(&rdev->ib_pool.mutex);
+ if (rdev->ib_pool.ready) {
+ radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ radeon_sa_bo_manager_fini(rdev, &tmp);
+ return 0;
}
- for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
- unsigned offset;
- offset = i * 64 * 1024;
- rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
- rdev->ib_pool.ibs[i].ptr = ptr + offset;
+ rdev->ib_pool.sa_manager = tmp;
+ INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ rdev->ib_pool.ibs[i].fence = NULL;
rdev->ib_pool.ibs[i].idx = i;
rdev->ib_pool.ibs[i].length_dw = 0;
- rdev->ib_pool.ibs[i].free = true;
+ INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
}
rdev->ib_pool.head_id = 0;
rdev->ib_pool.ready = true;
DRM_INFO("radeon: ib pool ready.\n");
+
if (radeon_debugfs_ib_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for IB !\n");
}
- return r;
+ if (radeon_debugfs_ring_init(rdev)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ radeon_mutex_unlock(&rdev->ib_pool.mutex);
+ return 0;
}
void radeon_ib_pool_fini(struct radeon_device *rdev)
{
- int r;
- struct radeon_bo *robj;
+ unsigned i;
- if (!rdev->ib_pool.ready) {
- return;
- }
- mutex_lock(&rdev->ib_pool.mutex);
- radeon_ib_bogus_cleanup(rdev);
- robj = rdev->ib_pool.robj;
- rdev->ib_pool.robj = NULL;
- mutex_unlock(&rdev->ib_pool.mutex);
-
- if (robj) {
- r = radeon_bo_reserve(robj, false);
- if (likely(r == 0)) {
- radeon_bo_kunmap(robj);
- radeon_bo_unpin(robj);
- radeon_bo_unreserve(robj);
+ radeon_mutex_lock(&rdev->ib_pool.mutex);
+ if (rdev->ib_pool.ready) {
+ for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
+ radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
+ radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
}
- radeon_bo_unref(&robj);
+ radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
+ rdev->ib_pool.ready = false;
}
+ radeon_mutex_unlock(&rdev->ib_pool.mutex);
+}
+
+int radeon_ib_pool_start(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
}
+int radeon_ib_pool_suspend(struct radeon_device *rdev)
+{
+ return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
+}
/*
* Ring.
*/
-void radeon_ring_free_size(struct radeon_device *rdev)
+int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
{
- if (rdev->wb.enabled)
- rdev->cp.rptr = le32_to_cpu(rdev->wb.wb[RADEON_WB_CP_RPTR_OFFSET/4]);
- else {
- if (rdev->family >= CHIP_R600)
- rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
- else
- rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
+ /* r1xx-r5xx only has CP ring */
+ if (rdev->family < CHIP_R600)
+ return RADEON_RING_TYPE_GFX_INDEX;
+
+ if (rdev->family >= CHIP_CAYMAN) {
+ if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
+ return CAYMAN_RING_TYPE_CP1_INDEX;
+ else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
+ return CAYMAN_RING_TYPE_CP2_INDEX;
}
+ return RADEON_RING_TYPE_GFX_INDEX;
+}
+
+void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
+{
+ u32 rptr;
+
+ if (rdev->wb.enabled)
+ rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
+ else
+ rptr = RREG32(ring->rptr_reg);
+ ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
/* This works because ring_size is a power of 2 */
- rdev->cp.ring_free_dw = (rdev->cp.rptr + (rdev->cp.ring_size / 4));
- rdev->cp.ring_free_dw -= rdev->cp.wptr;
- rdev->cp.ring_free_dw &= rdev->cp.ptr_mask;
- if (!rdev->cp.ring_free_dw) {
- rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
+ ring->ring_free_dw -= ring->wptr;
+ ring->ring_free_dw &= ring->ptr_mask;
+ if (!ring->ring_free_dw) {
+ ring->ring_free_dw = ring->ring_size / 4;
}
}
-int radeon_ring_alloc(struct radeon_device *rdev, unsigned ndw)
+
+int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
/* Align requested size with padding so unlock_commit can
* pad safely */
- ndw = (ndw + rdev->cp.align_mask) & ~rdev->cp.align_mask;
- while (ndw > (rdev->cp.ring_free_dw - 1)) {
- radeon_ring_free_size(rdev);
- if (ndw < rdev->cp.ring_free_dw) {
+ ndw = (ndw + ring->align_mask) & ~ring->align_mask;
+ while (ndw > (ring->ring_free_dw - 1)) {
+ radeon_ring_free_size(rdev, ring);
+ if (ndw < ring->ring_free_dw) {
break;
}
- r = radeon_fence_wait_next(rdev);
+ r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
if (r)
return r;
}
- rdev->cp.count_dw = ndw;
- rdev->cp.wptr_old = rdev->cp.wptr;
+ ring->count_dw = ndw;
+ ring->wptr_old = ring->wptr;
return 0;
}
-int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw)
+int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
{
int r;
- mutex_lock(&rdev->cp.mutex);
- r = radeon_ring_alloc(rdev, ndw);
+ mutex_lock(&ring->mutex);
+ r = radeon_ring_alloc(rdev, ring, ndw);
if (r) {
- mutex_unlock(&rdev->cp.mutex);
+ mutex_unlock(&ring->mutex);
return r;
}
return 0;
}
-void radeon_ring_commit(struct radeon_device *rdev)
+void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
unsigned count_dw_pad;
unsigned i;
/* We pad to match fetch size */
- count_dw_pad = (rdev->cp.align_mask + 1) -
- (rdev->cp.wptr & rdev->cp.align_mask);
+ count_dw_pad = (ring->align_mask + 1) -
+ (ring->wptr & ring->align_mask);
for (i = 0; i < count_dw_pad; i++) {
- radeon_ring_write(rdev, 2 << 30);
+ radeon_ring_write(ring, ring->nop);
}
DRM_MEMORYBARRIER();
- radeon_cp_commit(rdev);
+ WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
+ (void)RREG32(ring->wptr_reg);
}
-void radeon_ring_unlock_commit(struct radeon_device *rdev)
+void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
{
- radeon_ring_commit(rdev);
- mutex_unlock(&rdev->cp.mutex);
+ radeon_ring_commit(rdev, ring);
+ mutex_unlock(&ring->mutex);
}
-void radeon_ring_unlock_undo(struct radeon_device *rdev)
+void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
{
- rdev->cp.wptr = rdev->cp.wptr_old;
- mutex_unlock(&rdev->cp.mutex);
+ ring->wptr = ring->wptr_old;
+ mutex_unlock(&ring->mutex);
}
-int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
+int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
+ unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
+ u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
{
int r;
- rdev->cp.ring_size = ring_size;
+ ring->ring_size = ring_size;
+ ring->rptr_offs = rptr_offs;
+ ring->rptr_reg = rptr_reg;
+ ring->wptr_reg = wptr_reg;
+ ring->ptr_reg_shift = ptr_reg_shift;
+ ring->ptr_reg_mask = ptr_reg_mask;
+ ring->nop = nop;
/* Allocate ring buffer */
- if (rdev->cp.ring_obj == NULL) {
- r = radeon_bo_create(rdev, rdev->cp.ring_size, PAGE_SIZE, true,
+ if (ring->ring_obj == NULL) {
+ r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.ring_obj);
+ &ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring create failed\n", r);
return r;
}
- r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+ r = radeon_bo_reserve(ring->ring_obj, false);
if (unlikely(r != 0))
return r;
- r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
- &rdev->cp.gpu_addr);
+ r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
+ &ring->gpu_addr);
if (r) {
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ radeon_bo_unreserve(ring->ring_obj);
dev_err(rdev->dev, "(%d) ring pin failed\n", r);
return r;
}
- r = radeon_bo_kmap(rdev->cp.ring_obj,
- (void **)&rdev->cp.ring);
- radeon_bo_unreserve(rdev->cp.ring_obj);
+ r = radeon_bo_kmap(ring->ring_obj,
+ (void **)&ring->ring);
+ radeon_bo_unreserve(ring->ring_obj);
if (r) {
dev_err(rdev->dev, "(%d) ring map failed\n", r);
return r;
}
}
- rdev->cp.ptr_mask = (rdev->cp.ring_size / 4) - 1;
- rdev->cp.ring_free_dw = rdev->cp.ring_size / 4;
+ ring->ptr_mask = (ring->ring_size / 4) - 1;
+ ring->ring_free_dw = ring->ring_size / 4;
return 0;
}
-void radeon_ring_fini(struct radeon_device *rdev)
+void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
{
int r;
struct radeon_bo *ring_obj;
- mutex_lock(&rdev->cp.mutex);
- ring_obj = rdev->cp.ring_obj;
- rdev->cp.ring = NULL;
- rdev->cp.ring_obj = NULL;
- mutex_unlock(&rdev->cp.mutex);
+ mutex_lock(&ring->mutex);
+ ring_obj = ring->ring_obj;
+ ring->ring = NULL;
+ ring->ring_obj = NULL;
+ mutex_unlock(&ring->mutex);
if (ring_obj) {
r = radeon_bo_reserve(ring_obj, false);
@@ -422,72 +435,86 @@ void radeon_ring_fini(struct radeon_device *rdev)
}
}
-
/*
* Debugfs info
*/
#if defined(CONFIG_DEBUG_FS)
-static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
+
+static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_ib *ib = node->info_ent->data;
- unsigned i;
-
- if (ib == NULL) {
- return 0;
- }
- seq_printf(m, "IB %04u\n", ib->idx);
- seq_printf(m, "IB fence %p\n", ib->fence);
- seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
- for (i = 0; i < ib->length_dw; i++) {
- seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ struct drm_device *dev = node->minor->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int ridx = *(int*)node->info_ent->data;
+ struct radeon_ring *ring = &rdev->ring[ridx];
+ unsigned count, i, j;
+
+ radeon_ring_free_size(rdev, ring);
+ count = (ring->ring_size / 4) - ring->ring_free_dw;
+ seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
+ seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
+ seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
+ seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
+ seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
+ seq_printf(m, "%u dwords in ring\n", count);
+ i = ring->rptr;
+ for (j = 0; j <= count; j++) {
+ seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
+ i = (i + 1) & ring->ptr_mask;
}
return 0;
}
-static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
+static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
+static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;
+
+static struct drm_info_list radeon_debugfs_ring_info_list[] = {
+ {"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
+ {"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
+ {"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
+};
+
+static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct radeon_device *rdev = node->info_ent->data;
- struct radeon_ib *ib;
+ struct radeon_ib *ib = node->info_ent->data;
unsigned i;
- mutex_lock(&rdev->ib_pool.mutex);
- if (list_empty(&rdev->ib_pool.bogus_ib)) {
- mutex_unlock(&rdev->ib_pool.mutex);
- seq_printf(m, "no bogus IB recorded\n");
+ if (ib == NULL) {
return 0;
}
- ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
- list_del_init(&ib->list);
- mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "IB %04u\n", ib->idx);
+ seq_printf(m, "IB fence %p\n", ib->fence);
seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
for (i = 0; i < ib->length_dw; i++) {
seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
}
- vfree(ib->ptr);
- kfree(ib);
return 0;
}
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+#endif
-static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
- {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
-};
+int radeon_debugfs_ring_init(struct radeon_device *rdev)
+{
+#if defined(CONFIG_DEBUG_FS)
+ if (rdev->family >= CHIP_CAYMAN)
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
+ ARRAY_SIZE(radeon_debugfs_ring_info_list));
+ else
+ return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list, 1);
+#else
+ return 0;
#endif
+}
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
- int r;
- radeon_debugfs_ib_bogus_info_list[0].data = rdev;
- r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
- if (r)
- return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
new file mode 100644
index 0000000..4cce47e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright 2011 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Jerome Glisse <glisse@freedesktop.org>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+int radeon_sa_bo_manager_init(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ unsigned size, u32 domain)
+{
+ int r;
+
+ sa_manager->bo = NULL;
+ sa_manager->size = size;
+ sa_manager->domain = domain;
+ INIT_LIST_HEAD(&sa_manager->sa_bo);
+
+ r = radeon_bo_create(rdev, size, RADEON_GPU_PAGE_SIZE, true,
+ RADEON_GEM_DOMAIN_CPU, &sa_manager->bo);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
+ return r;
+ }
+
+ return r;
+}
+
+void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ struct radeon_sa_bo *sa_bo, *tmp;
+
+ if (!list_empty(&sa_manager->sa_bo)) {
+ dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
+ }
+ list_for_each_entry_safe(sa_bo, tmp, &sa_manager->sa_bo, list) {
+ list_del_init(&sa_bo->list);
+ }
+ radeon_bo_unref(&sa_manager->bo);
+ sa_manager->size = 0;
+}
+
+int radeon_sa_bo_manager_start(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ /* map the buffer */
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (r) {
+ dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
+ if (r) {
+ radeon_bo_unreserve(sa_manager->bo);
+ dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
+ return r;
+ }
+ r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
+ radeon_bo_unreserve(sa_manager->bo);
+ return r;
+}
+
+int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager)
+{
+ int r;
+
+ if (sa_manager->bo == NULL) {
+ dev_err(rdev->dev, "no bo for sa manager\n");
+ return -EINVAL;
+ }
+
+ r = radeon_bo_reserve(sa_manager->bo, false);
+ if (!r) {
+ radeon_bo_kunmap(sa_manager->bo);
+ radeon_bo_unpin(sa_manager->bo);
+ radeon_bo_unreserve(sa_manager->bo);
+ }
+ return r;
+}
+
+/*
+ * Principe is simple, we keep a list of sub allocation in offset
+ * order (first entry has offset == 0, last entry has the highest
+ * offset).
+ *
+ * When allocating new object we first check if there is room at
+ * the end total_size - (last_object_offset + last_object_size) >=
+ * alloc_size. If so we allocate new object there.
+ *
+ * When there is not enough room at the end, we start waiting for
+ * each sub object until we reach object_offset+object_size >=
+ * alloc_size, this object then become the sub object we return.
+ *
+ * Alignment can't be bigger than page size
+ */
+int radeon_sa_bo_new(struct radeon_device *rdev,
+ struct radeon_sa_manager *sa_manager,
+ struct radeon_sa_bo *sa_bo,
+ unsigned size, unsigned align)
+{
+ struct radeon_sa_bo *tmp;
+ struct list_head *head;
+ unsigned offset = 0, wasted = 0;
+
+ BUG_ON(align > RADEON_GPU_PAGE_SIZE);
+ BUG_ON(size > sa_manager->size);
+
+ /* no one ? */
+ head = sa_manager->sa_bo.prev;
+ if (list_empty(&sa_manager->sa_bo)) {
+ goto out;
+ }
+
+ /* look for a hole big enough */
+ offset = 0;
+ list_for_each_entry(tmp, &sa_manager->sa_bo, list) {
+ /* room before this object ? */
+ if ((tmp->offset - offset) >= size) {
+ head = tmp->list.prev;
+ goto out;
+ }
+ offset = tmp->offset + tmp->size;
+ wasted = offset % align;
+ if (wasted) {
+ wasted = align - wasted;
+ }
+ offset += wasted;
+ }
+ /* room at the end ? */
+ head = sa_manager->sa_bo.prev;
+ tmp = list_entry(head, struct radeon_sa_bo, list);
+ offset = tmp->offset + tmp->size;
+ wasted = offset % align;
+ if (wasted) {
+ wasted = align - wasted;
+ }
+ offset += wasted;
+ if ((sa_manager->size - offset) < size) {
+ /* failed to find somethings big enough */
+ return -ENOMEM;
+ }
+
+out:
+ sa_bo->manager = sa_manager;
+ sa_bo->offset = offset;
+ sa_bo->size = size;
+ list_add(&sa_bo->list, head);
+ return 0;
+}
+
+void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo *sa_bo)
+{
+ list_del_init(&sa_bo->list);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
new file mode 100644
index 0000000..61dd4e3
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -0,0 +1,178 @@
+/*
+ * Copyright 2011 Christian König.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+/*
+ * Authors:
+ * Christian König <deathsimple@vodafone.de>
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "radeon.h"
+
+static int radeon_semaphore_add_bo(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo;
+ unsigned long irq_flags;
+ uint64_t gpu_addr;
+ uint32_t *cpu_ptr;
+ int r, i;
+
+
+ bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
+ if (bo == NULL) {
+ return -ENOMEM;
+ }
+ INIT_LIST_HEAD(&bo->free);
+ INIT_LIST_HEAD(&bo->list);
+ bo->nused = 0;
+
+ r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
+ if (r) {
+ dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
+ kfree(bo);
+ return r;
+ }
+ gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
+ gpu_addr += bo->ib->sa_bo.offset;
+ cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
+ cpu_ptr += (bo->ib->sa_bo.offset >> 2);
+ for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
+ bo->semaphores[i].gpu_addr = gpu_addr;
+ bo->semaphores[i].cpu_ptr = cpu_ptr;
+ bo->semaphores[i].bo = bo;
+ list_add_tail(&bo->semaphores[i].list, &bo->free);
+ gpu_addr += 8;
+ cpu_ptr += 2;
+ }
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+ return 0;
+}
+
+static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
+ struct radeon_semaphore_bo *bo)
+{
+ radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
+ radeon_fence_unref(&bo->ib->fence);
+ list_del(&bo->list);
+ kfree(bo);
+}
+
+void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo, *n;
+
+ if (list_empty(&rdev->semaphore_drv.bo)) {
+ return;
+ }
+ /* only shrink if first bo has free semaphore */
+ bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
+ if (list_empty(&bo->free)) {
+ return;
+ }
+ list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
+ if (bo->nused)
+ continue;
+ radeon_semaphore_del_bo_locked(rdev, bo);
+ }
+}
+
+int radeon_semaphore_create(struct radeon_device *rdev,
+ struct radeon_semaphore **semaphore)
+{
+ struct radeon_semaphore_bo *bo;
+ unsigned long irq_flags;
+ bool do_retry = true;
+ int r;
+
+retry:
+ *semaphore = NULL;
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
+ if (list_empty(&bo->free))
+ continue;
+ *semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
+ (*semaphore)->cpu_ptr[0] = 0;
+ (*semaphore)->cpu_ptr[1] = 0;
+ list_del(&(*semaphore)->list);
+ bo->nused++;
+ break;
+ }
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+
+ if (*semaphore == NULL) {
+ if (do_retry) {
+ do_retry = false;
+ r = radeon_semaphore_add_bo(rdev);
+ if (r)
+ return r;
+ goto retry;
+ }
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
+}
+
+void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
+ struct radeon_semaphore *semaphore)
+{
+ radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
+}
+
+void radeon_semaphore_free(struct radeon_device *rdev,
+ struct radeon_semaphore *semaphore)
+{
+ unsigned long irq_flags;
+
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ semaphore->bo->nused--;
+ list_add_tail(&semaphore->list, &semaphore->bo->free);
+ radeon_semaphore_shrink_locked(rdev);
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
+
+void radeon_semaphore_driver_fini(struct radeon_device *rdev)
+{
+ struct radeon_semaphore_bo *bo, *n;
+ unsigned long irq_flags;
+
+ write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
+ /* we force to free everything */
+ list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
+ if (!list_empty(&bo->free)) {
+ dev_err(rdev->dev, "still in use semaphore\n");
+ }
+ radeon_semaphore_del_bo_locked(rdev, bo);
+ }
+ write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 602fa35..dc5dcf4 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -42,7 +42,9 @@ void radeon_test_moves(struct radeon_device *rdev)
/* Number of tests =
* (Total GTT - IB pool - writeback page - ring buffers) / test size
*/
- n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - rdev->cp.ring_size;
+ n = rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024;
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ n -= rdev->ring[i].ring_size;
if (rdev->wb.wb_obj)
n -= RADEON_GPU_PAGE_SIZE;
if (rdev->ih.ring_obj)
@@ -104,7 +106,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("Failed to create GTT->VRAM fence %d\n", i);
goto out_cleanup;
@@ -153,7 +155,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(vram_obj);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, RADEON_RING_TYPE_GFX_INDEX);
if (r) {
DRM_ERROR("Failed to create VRAM->GTT fence %d\n", i);
goto out_cleanup;
@@ -232,3 +234,264 @@ out_cleanup:
printk(KERN_WARNING "Error while testing BO move.\n");
}
}
+
+void radeon_test_ring_sync(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB)
+{
+ struct radeon_fence *fence1 = NULL, *fence2 = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ int ridxA = radeon_ring_index(rdev, ringA);
+ int ridxB = radeon_ring_index(rdev, ringB);
+ int r;
+
+ r = radeon_fence_create(rdev, &fence1, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 1\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_create(rdev, &fence2, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fence1);
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fence2);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence1)) {
+ DRM_ERROR("Fence 1 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence1, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fence2)) {
+ DRM_ERROR("Fence 2 signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxB, semaphore);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ r = radeon_fence_wait(fence2, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence 1\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ if (semaphore)
+ radeon_semaphore_free(rdev, semaphore);
+
+ if (fence1)
+ radeon_fence_unref(&fence1);
+
+ if (fence2)
+ radeon_fence_unref(&fence2);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_ring_sync2(struct radeon_device *rdev,
+ struct radeon_ring *ringA,
+ struct radeon_ring *ringB,
+ struct radeon_ring *ringC)
+{
+ struct radeon_fence *fenceA = NULL, *fenceB = NULL;
+ struct radeon_semaphore *semaphore = NULL;
+ int ridxA = radeon_ring_index(rdev, ringA);
+ int ridxB = radeon_ring_index(rdev, ringB);
+ int ridxC = radeon_ring_index(rdev, ringC);
+ bool sigA, sigB;
+ int i, r;
+
+ r = radeon_fence_create(rdev, &fenceA, ridxA);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 1\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_create(rdev, &fenceB, ridxB);
+ if (r) {
+ DRM_ERROR("Failed to create sync fence 2\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_semaphore_create(rdev, &semaphore);
+ if (r) {
+ DRM_ERROR("Failed to create semaphore\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringA, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring A %d\n", ridxA);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxA, semaphore);
+ radeon_fence_emit(rdev, fenceA);
+ radeon_ring_unlock_commit(rdev, ringA);
+
+ r = radeon_ring_lock(rdev, ringB, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %d\n", ridxB);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_wait(rdev, ridxB, semaphore);
+ radeon_fence_emit(rdev, fenceB);
+ radeon_ring_unlock_commit(rdev, ringB);
+
+ mdelay(1000);
+
+ if (radeon_fence_signaled(fenceA)) {
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+ if (radeon_fence_signaled(fenceB)) {
+ DRM_ERROR("Fence A signaled without waiting for semaphore.\n");
+ goto out_cleanup;
+ }
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ for (i = 0; i < 30; ++i) {
+ mdelay(100);
+ sigA = radeon_fence_signaled(fenceA);
+ sigB = radeon_fence_signaled(fenceB);
+ if (sigA || sigB)
+ break;
+ }
+
+ if (!sigA && !sigB) {
+ DRM_ERROR("Neither fence A nor B has been signaled\n");
+ goto out_cleanup;
+ } else if (sigA && sigB) {
+ DRM_ERROR("Both fence A and B has been signaled\n");
+ goto out_cleanup;
+ }
+
+ DRM_INFO("Fence %c was first signaled\n", sigA ? 'A' : 'B');
+
+ r = radeon_ring_lock(rdev, ringC, 64);
+ if (r) {
+ DRM_ERROR("Failed to lock ring B %p\n", ringC);
+ goto out_cleanup;
+ }
+ radeon_semaphore_emit_signal(rdev, ridxC, semaphore);
+ radeon_ring_unlock_commit(rdev, ringC);
+
+ mdelay(1000);
+
+ r = radeon_fence_wait(fenceA, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence A\n");
+ goto out_cleanup;
+ }
+ r = radeon_fence_wait(fenceB, false);
+ if (r) {
+ DRM_ERROR("Failed to wait for sync fence B\n");
+ goto out_cleanup;
+ }
+
+out_cleanup:
+ if (semaphore)
+ radeon_semaphore_free(rdev, semaphore);
+
+ if (fenceA)
+ radeon_fence_unref(&fenceA);
+
+ if (fenceB)
+ radeon_fence_unref(&fenceB);
+
+ if (r)
+ printk(KERN_WARNING "Error while testing ring sync (%d).\n", r);
+}
+
+void radeon_test_syncing(struct radeon_device *rdev)
+{
+ int i, j, k;
+
+ for (i = 1; i < RADEON_NUM_RINGS; ++i) {
+ struct radeon_ring *ringA = &rdev->ring[i];
+ if (!ringA->ready)
+ continue;
+
+ for (j = 0; j < i; ++j) {
+ struct radeon_ring *ringB = &rdev->ring[j];
+ if (!ringB->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", i, j);
+ radeon_test_ring_sync(rdev, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d and %d...\n", j, i);
+ radeon_test_ring_sync(rdev, ringB, ringA);
+
+ for (k = 0; k < j; ++k) {
+ struct radeon_ring *ringC = &rdev->ring[k];
+ if (!ringC->ready)
+ continue;
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, j, k);
+ radeon_test_ring_sync2(rdev, ringA, ringB, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", i, k, j);
+ radeon_test_ring_sync2(rdev, ringA, ringC, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, i, k);
+ radeon_test_ring_sync2(rdev, ringB, ringA, ringC);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", j, k, i);
+ radeon_test_ring_sync2(rdev, ringB, ringC, ringA);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, i, j);
+ radeon_test_ring_sync2(rdev, ringC, ringA, ringB);
+
+ DRM_INFO("Testing syncing between rings %d, %d and %d...\n", k, j, i);
+ radeon_test_ring_sync2(rdev, ringC, ringB, ringA);
+ }
+ }
+ }
+}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 0b5468b..c421e77 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -114,24 +114,6 @@ static void radeon_ttm_global_fini(struct radeon_device *rdev)
}
}
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
-
-static struct ttm_backend*
-radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
-{
- struct radeon_device *rdev;
-
- rdev = radeon_get_rdev(bdev);
-#if __OS_HAS_AGP
- if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
- } else
-#endif
- {
- return radeon_ttm_backend_create(rdev);
- }
-}
-
static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
return 0;
@@ -206,7 +188,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo,
rbo = container_of(bo, struct radeon_bo, tbo);
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- if (rbo->rdev->cp.ready == false)
+ if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
else
radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
@@ -241,10 +223,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
struct radeon_device *rdev;
uint64_t old_start, new_start;
struct radeon_fence *fence;
- int r;
+ int r, i;
rdev = radeon_get_rdev(bo->bdev);
- r = radeon_fence_create(rdev, &fence);
+ r = radeon_fence_create(rdev, &fence, rdev->copy_ring);
if (unlikely(r)) {
return r;
}
@@ -273,13 +255,43 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
return -EINVAL;
}
- if (!rdev->cp.ready) {
- DRM_ERROR("Trying to move memory with CP turned off.\n");
+ if (!rdev->ring[rdev->copy_ring].ready) {
+ DRM_ERROR("Trying to move memory with ring turned off.\n");
return -EINVAL;
}
BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);
+ /* sync other rings */
+ if (rdev->family >= CHIP_R600) {
+ for (i = 0; i < RADEON_NUM_RINGS; ++i) {
+ /* no need to sync to our own or unused rings */
+ if (i == rdev->copy_ring || !rdev->ring[i].ready)
+ continue;
+
+ if (!fence->semaphore) {
+ r = radeon_semaphore_create(rdev, &fence->semaphore);
+ /* FIXME: handle semaphore error */
+ if (r)
+ continue;
+ }
+
+ r = radeon_ring_lock(rdev, &rdev->ring[i], 3);
+ /* FIXME: handle ring lock error */
+ if (r)
+ continue;
+ radeon_semaphore_emit_signal(rdev, i, fence->semaphore);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[i]);
+
+ r = radeon_ring_lock(rdev, &rdev->ring[rdev->copy_ring], 3);
+ /* FIXME: handle ring lock error */
+ if (r)
+ continue;
+ radeon_semaphore_emit_wait(rdev, rdev->copy_ring, fence->semaphore);
+ radeon_ring_unlock_commit(rdev, &rdev->ring[rdev->copy_ring]);
+ }
+ }
+
r = radeon_copy(rdev, old_start, new_start,
new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
fence);
@@ -398,7 +410,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo,
radeon_move_null(bo, new_mem);
return 0;
}
- if (!rdev->cp.ready || rdev->asic->copy == NULL) {
+ if (!rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready || rdev->asic->copy == NULL) {
/* use memcpy */
goto memcpy;
}
@@ -515,8 +527,166 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}
+/*
+ * TTM backend functions.
+ */
+struct radeon_ttm_tt {
+ struct ttm_dma_tt ttm;
+ struct radeon_device *rdev;
+ u64 offset;
+};
+
+static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
+ struct ttm_mem_reg *bo_mem)
+{
+ struct radeon_ttm_tt *gtt = (void*)ttm;
+ int r;
+
+ gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
+ if (!ttm->num_pages) {
+ WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+ ttm->num_pages, bo_mem, ttm);
+ }
+ r = radeon_gart_bind(gtt->rdev, gtt->offset,
+ ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
+ if (r) {
+ DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
+ ttm->num_pages, (unsigned)gtt->offset);
+ return r;
+ }
+ return 0;
+}
+
+static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
+ return 0;
+}
+
+static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
+{
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+
+ ttm_dma_tt_fini(&gtt->ttm);
+ kfree(gtt);
+}
+
+static struct ttm_backend_func radeon_backend_func = {
+ .bind = &radeon_ttm_backend_bind,
+ .unbind = &radeon_ttm_backend_unbind,
+ .destroy = &radeon_ttm_backend_destroy,
+};
+
+struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt;
+
+ rdev = radeon_get_rdev(bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
+ size, page_flags, dummy_read_page);
+ }
+#endif
+
+ gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
+ if (gtt == NULL) {
+ return NULL;
+ }
+ gtt->ttm.ttm.func = &radeon_backend_func;
+ gtt->rdev = rdev;
+ if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(gtt);
+ return NULL;
+ }
+ return &gtt->ttm.ttm;
+}
+
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+ int r;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ return ttm_agp_tt_populate(ttm);
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ return ttm_dma_populate(&gtt->ttm, rdev->dev);
+ }
+#endif
+
+ r = ttm_pool_populate(ttm);
+ if (r) {
+ return r;
+ }
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
+ 0, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
+ while (--i) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ gtt->ttm.dma_address[i] = 0;
+ }
+ ttm_pool_unpopulate(ttm);
+ return -EFAULT;
+ }
+ }
+ return 0;
+}
+
+static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
+{
+ struct radeon_device *rdev;
+ struct radeon_ttm_tt *gtt = (void *)ttm;
+ unsigned i;
+
+ rdev = radeon_get_rdev(ttm->bdev);
+#if __OS_HAS_AGP
+ if (rdev->flags & RADEON_IS_AGP) {
+ ttm_agp_tt_unpopulate(ttm);
+ return;
+ }
+#endif
+
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
+ return;
+ }
+#endif
+
+ for (i = 0; i < ttm->num_pages; i++) {
+ if (gtt->ttm.dma_address[i]) {
+ pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ }
+ }
+
+ ttm_pool_unpopulate(ttm);
+}
+
static struct ttm_bo_driver radeon_bo_driver = {
- .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
+ .ttm_tt_create = &radeon_ttm_tt_create,
+ .ttm_tt_populate = &radeon_ttm_tt_populate,
+ .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
.invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.evict_flags = &radeon_evict_flags,
@@ -680,124 +850,6 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
}
-/*
- * TTM backend functions.
- */
-struct radeon_ttm_backend {
- struct ttm_backend backend;
- struct radeon_device *rdev;
- unsigned long num_pages;
- struct page **pages;
- struct page *dummy_read_page;
- dma_addr_t *dma_addrs;
- bool populated;
- bool bound;
- unsigned offset;
-};
-
-static int radeon_ttm_backend_populate(struct ttm_backend *backend,
- unsigned long num_pages,
- struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->pages = pages;
- gtt->dma_addrs = dma_addrs;
- gtt->num_pages = num_pages;
- gtt->dummy_read_page = dummy_read_page;
- gtt->populated = true;
- return 0;
-}
-
-static void radeon_ttm_backend_clear(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->pages = NULL;
- gtt->dma_addrs = NULL;
- gtt->num_pages = 0;
- gtt->dummy_read_page = NULL;
- gtt->populated = false;
- gtt->bound = false;
-}
-
-
-static int radeon_ttm_backend_bind(struct ttm_backend *backend,
- struct ttm_mem_reg *bo_mem)
-{
- struct radeon_ttm_backend *gtt;
- int r;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- gtt->offset = bo_mem->start << PAGE_SHIFT;
- if (!gtt->num_pages) {
- WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
- gtt->num_pages, bo_mem, backend);
- }
- r = radeon_gart_bind(gtt->rdev, gtt->offset,
- gtt->num_pages, gtt->pages, gtt->dma_addrs);
- if (r) {
- DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
- gtt->num_pages, gtt->offset);
- return r;
- }
- gtt->bound = true;
- return 0;
-}
-
-static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
- gtt->bound = false;
- return 0;
-}
-
-static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = container_of(backend, struct radeon_ttm_backend, backend);
- if (gtt->bound) {
- radeon_ttm_backend_unbind(backend);
- }
- kfree(gtt);
-}
-
-static struct ttm_backend_func radeon_backend_func = {
- .populate = &radeon_ttm_backend_populate,
- .clear = &radeon_ttm_backend_clear,
- .bind = &radeon_ttm_backend_bind,
- .unbind = &radeon_ttm_backend_unbind,
- .destroy = &radeon_ttm_backend_destroy,
-};
-
-struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
-{
- struct radeon_ttm_backend *gtt;
-
- gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
- if (gtt == NULL) {
- return NULL;
- }
- gtt->backend.bdev = &rdev->mman.bdev;
- gtt->backend.flags = 0;
- gtt->backend.func = &radeon_backend_func;
- gtt->rdev = rdev;
- gtt->pages = NULL;
- gtt->num_pages = 0;
- gtt->dummy_read_page = NULL;
- gtt->populated = false;
- gtt->bound = false;
- return &gtt->backend;
-}
-
#define RADEON_DEBUGFS_MEM_TYPES 2
#if defined(CONFIG_DEBUG_FS)
@@ -820,8 +872,8 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
+ static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
unsigned i;
for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
@@ -843,8 +895,17 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
radeon_mem_types_list[i].name = radeon_mem_types_names[i];
radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
radeon_mem_types_list[i].driver_features = 0;
- radeon_mem_types_list[i].data = NULL;
- return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
+ radeon_mem_types_list[i++].data = NULL;
+#ifdef CONFIG_SWIOTLB
+ if (swiotlb_nr_tbl()) {
+ sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
+ radeon_mem_types_list[i].name = radeon_mem_types_names[i];
+ radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
+ radeon_mem_types_list[i].driver_features = 0;
+ radeon_mem_types_list[i++].data = NULL;
+ }
+#endif
+ return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
#endif
return 0;
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 06b90c8..866a05b 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -410,6 +410,12 @@ static int rs400_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r100_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -419,16 +425,25 @@ static int rs400_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
+
return 0;
}
int rs400_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
rs400_gart_disable(rdev);
/* Resume clock before doing reset */
@@ -447,11 +462,18 @@ int rs400_resume(struct radeon_device *rdev)
r300_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return rs400_startup(rdev);
+
+ rdev->accel_working = true;
+ r = rs400_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int rs400_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
r100_irq_disable(rdev);
@@ -530,7 +552,14 @@ int rs400_init(struct radeon_device *rdev)
if (r)
return r;
r300_set_reg_safe(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs400_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b1053d6..4fc7006 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -322,16 +322,6 @@ void rs600_hpd_fini(struct radeon_device *rdev)
}
}
-void rs600_bm_disable(struct radeon_device *rdev)
-{
- u32 tmp;
-
- /* disable bus mastering */
- pci_read_config_word(rdev->pdev, 0x4, (u16*)&tmp);
- pci_write_config_word(rdev->pdev, 0x4, tmp & 0xFFFB);
- mdelay(1);
-}
-
int rs600_asic_reset(struct radeon_device *rdev)
{
struct rv515_mc_save save;
@@ -355,7 +345,8 @@ int rs600_asic_reset(struct radeon_device *rdev)
WREG32(RADEON_CP_RB_CNTL, tmp);
pci_save_state(rdev->pdev);
/* disable bus mastering */
- rs600_bm_disable(rdev);
+ pci_clear_master(rdev->pdev);
+ mdelay(1);
/* reset GA+VAP */
WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) |
S_0000F0_SOFT_RESET_GA(1));
@@ -549,7 +540,7 @@ int rs600_irq_set(struct radeon_device *rdev)
WREG32(R_000040_GEN_INT_CNTL, 0);
return -EINVAL;
}
- if (rdev->irq.sw_int) {
+ if (rdev->irq.sw_int[RADEON_RING_TYPE_GFX_INDEX]) {
tmp |= S_000040_SW_INT_EN(1);
}
if (rdev->irq.gui_idle) {
@@ -642,7 +633,7 @@ int rs600_irq_process(struct radeon_device *rdev)
while (status || rdev->irq.stat_regs.r500.disp_int) {
/* SW interrupt */
if (G_000044_SW_INT(status)) {
- radeon_fence_process(rdev);
+ radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
}
/* GUI idle */
if (G_000040_GUI_IDLE(status)) {
@@ -693,9 +684,7 @@ int rs600_irq_process(struct radeon_device *rdev)
WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM);
break;
default:
- msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN;
- WREG32(RADEON_MSI_REARM_EN, msi_rearm);
- WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN);
+ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN);
break;
}
}
@@ -849,6 +838,12 @@ static int rs600_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -858,15 +853,21 @@ static int rs600_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -875,6 +876,8 @@ static int rs600_startup(struct radeon_device *rdev)
int rs600_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
rs600_gart_disable(rdev);
/* Resume clock before doing reset */
@@ -891,11 +894,18 @@ int rs600_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return rs600_startup(rdev);
+
+ rdev->accel_working = true;
+ r = rs600_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int rs600_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -976,7 +986,14 @@ int rs600_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs600_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index a9049ed..f68dff2 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -621,6 +621,12 @@ static int rs690_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -630,15 +636,21 @@ static int rs690_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = r600_audio_init(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed initializing audio\n");
return r;
}
- r = r600_audio_init(rdev);
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing audio\n");
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -647,6 +659,8 @@ static int rs690_startup(struct radeon_device *rdev)
int rs690_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
rs400_gart_disable(rdev);
/* Resume clock before doing reset */
@@ -663,11 +677,18 @@ int rs690_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return rs690_startup(rdev);
+
+ rdev->accel_working = true;
+ r = rs690_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int rs690_suspend(struct radeon_device *rdev)
{
+ radeon_ib_pool_suspend(rdev);
r600_audio_fini(rdev);
r100_cp_disable(rdev);
radeon_wb_disable(rdev);
@@ -749,7 +770,14 @@ int rs690_init(struct radeon_device *rdev)
if (r)
return r;
rs600_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rs690_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 6613ee9..959bf44 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -55,44 +55,45 @@ void rv515_debugfs(struct radeon_device *rdev)
void rv515_ring_start(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
- r = radeon_ring_lock(rdev, 64);
+ r = radeon_ring_lock(rdev, ring, 64);
if (r) {
return;
}
- radeon_ring_write(rdev, PACKET0(ISYNC_CNTL, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(ISYNC_CNTL, 0));
+ radeon_ring_write(ring,
ISYNC_ANY2D_IDLE3D |
ISYNC_ANY3D_IDLE2D |
ISYNC_WAIT_IDLEGUI |
ISYNC_CPSCRATCH_IDLEGUI);
- radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
- radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
- radeon_ring_write(rdev, PACKET0(GB_SELECT, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(GB_ENABLE, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(R500_SU_REG_DEST, 0));
- radeon_ring_write(rdev, (1 << rdev->num_gb_pipes) - 1);
- radeon_ring_write(rdev, PACKET0(VAP_INDEX_OFFSET, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(rdev, PACKET0(WAIT_UNTIL, 0));
- radeon_ring_write(rdev, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(GB_AA_CONFIG, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, RB3D_DC_FLUSH | RB3D_DC_FREE);
- radeon_ring_write(rdev, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
- radeon_ring_write(rdev, ZC_FLUSH | ZC_FREE);
- radeon_ring_write(rdev, PACKET0(GB_MSPOS0, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG);
+ radeon_ring_write(ring, PACKET0(GB_SELECT, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(GB_ENABLE, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(R500_SU_REG_DEST, 0));
+ radeon_ring_write(ring, (1 << rdev->num_gb_pipes) - 1);
+ radeon_ring_write(ring, PACKET0(VAP_INDEX_OFFSET, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(WAIT_UNTIL, 0));
+ radeon_ring_write(ring, WAIT_2D_IDLECLEAN | WAIT_3D_IDLECLEAN);
+ radeon_ring_write(ring, PACKET0(GB_AA_CONFIG, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, PACKET0(RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, RB3D_DC_FLUSH | RB3D_DC_FREE);
+ radeon_ring_write(ring, PACKET0(ZB_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(ring, ZC_FLUSH | ZC_FREE);
+ radeon_ring_write(ring, PACKET0(GB_MSPOS0, 0));
+ radeon_ring_write(ring,
((6 << MS_X0_SHIFT) |
(6 << MS_Y0_SHIFT) |
(6 << MS_X1_SHIFT) |
@@ -101,8 +102,8 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_Y2_SHIFT) |
(6 << MSBD0_Y_SHIFT) |
(6 << MSBD0_X_SHIFT)));
- radeon_ring_write(rdev, PACKET0(GB_MSPOS1, 0));
- radeon_ring_write(rdev,
+ radeon_ring_write(ring, PACKET0(GB_MSPOS1, 0));
+ radeon_ring_write(ring,
((6 << MS_X3_SHIFT) |
(6 << MS_Y3_SHIFT) |
(6 << MS_X4_SHIFT) |
@@ -110,15 +111,15 @@ void rv515_ring_start(struct radeon_device *rdev)
(6 << MS_X5_SHIFT) |
(6 << MS_Y5_SHIFT) |
(6 << MSBD1_SHIFT)));
- radeon_ring_write(rdev, PACKET0(GA_ENHANCE, 0));
- radeon_ring_write(rdev, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
- radeon_ring_write(rdev, PACKET0(GA_POLY_MODE, 0));
- radeon_ring_write(rdev, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
- radeon_ring_write(rdev, PACKET0(GA_ROUND_MODE, 0));
- radeon_ring_write(rdev, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
- radeon_ring_write(rdev, PACKET0(0x20C8, 0));
- radeon_ring_write(rdev, 0);
- radeon_ring_unlock_commit(rdev);
+ radeon_ring_write(ring, PACKET0(GA_ENHANCE, 0));
+ radeon_ring_write(ring, GA_DEADLOCK_CNTL | GA_FASTSYNC_CNTL);
+ radeon_ring_write(ring, PACKET0(GA_POLY_MODE, 0));
+ radeon_ring_write(ring, FRONT_PTYPE_TRIANGE | BACK_PTYPE_TRIANGE);
+ radeon_ring_write(ring, PACKET0(GA_ROUND_MODE, 0));
+ radeon_ring_write(ring, GEOMETRY_ROUND_NEAREST | COLOR_ROUND_NEAREST);
+ radeon_ring_write(ring, PACKET0(0x20C8, 0));
+ radeon_ring_write(ring, 0);
+ radeon_ring_unlock_commit(rdev, ring);
}
int rv515_mc_wait_for_idle(struct radeon_device *rdev)
@@ -392,6 +393,12 @@ static int rv515_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
rs600_irq_set(rdev);
rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
@@ -401,9 +408,15 @@ static int rv515_startup(struct radeon_device *rdev)
dev_err(rdev->dev, "failed initializing CP (%d).\n", r);
return r;
}
- r = r100_ib_init(rdev);
+
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r100_ib_test(rdev);
if (r) {
- dev_err(rdev->dev, "failed initializing IB (%d).\n", r);
+ dev_err(rdev->dev, "failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
return 0;
@@ -411,6 +424,8 @@ static int rv515_startup(struct radeon_device *rdev)
int rv515_resume(struct radeon_device *rdev)
{
+ int r;
+
/* Make sur GART are not working */
if (rdev->flags & RADEON_IS_PCIE)
rv370_pcie_gart_disable(rdev);
@@ -428,7 +443,13 @@ int rv515_resume(struct radeon_device *rdev)
rv515_clock_startup(rdev);
/* Initialize surface registers */
radeon_surface_init(rdev);
- return rv515_startup(rdev);
+
+ rdev->accel_working = true;
+ r = rv515_startup(rdev);
+ if (r) {
+ rdev->accel_working = false;
+ }
+ return r;
}
int rv515_suspend(struct radeon_device *rdev)
@@ -524,7 +545,14 @@ int rv515_init(struct radeon_device *rdev)
if (r)
return r;
rv515_set_safe_registers(rdev);
+
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rv515_startup(rdev);
if (r) {
/* Somethings want wront with the accel init stop accel */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 23ae1c6..c049c0c 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -357,7 +357,7 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
void r700_cp_fini(struct radeon_device *rdev)
{
r700_cp_stop(rdev);
- radeon_ring_fini(rdev);
+ radeon_ring_fini(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]);
}
/*
@@ -1043,6 +1043,7 @@ int rv770_mc_init(struct radeon_device *rdev)
static int rv770_startup(struct radeon_device *rdev)
{
+ struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX];
int r;
/* enable pcie gen2 link */
@@ -1082,6 +1083,12 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "failed initializing CP fences (%d).\n", r);
+ return r;
+ }
+
/* Enable IRQ */
r = r600_irq_init(rdev);
if (r) {
@@ -1091,7 +1098,9 @@ static int rv770_startup(struct radeon_device *rdev)
}
r600_irq_set(rdev);
- r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ r = radeon_ring_init(rdev, ring, ring->ring_size, RADEON_WB_CP_RPTR_OFFSET,
+ R600_CP_RB_RPTR, R600_CP_RB_WPTR,
+ 0, 0xfffff, RADEON_CP_PACKET2);
if (r)
return r;
r = rv770_cp_load_microcode(rdev);
@@ -1101,6 +1110,17 @@ static int rv770_startup(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_start(rdev);
+ if (r)
+ return r;
+
+ r = r600_ib_test(rdev, RADEON_RING_TYPE_GFX_INDEX);
+ if (r) {
+ dev_err(rdev->dev, "IB test failed (%d).\n", r);
+ rdev->accel_working = false;
+ return r;
+ }
+
return 0;
}
@@ -1115,15 +1135,11 @@ int rv770_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
+ rdev->accel_working = true;
r = rv770_startup(rdev);
if (r) {
DRM_ERROR("r600 startup failed on resume\n");
- return r;
- }
-
- r = r600_ib_test(rdev);
- if (r) {
- DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
return r;
}
@@ -1140,13 +1156,14 @@ int rv770_resume(struct radeon_device *rdev)
int rv770_suspend(struct radeon_device *rdev)
{
r600_audio_fini(rdev);
+ radeon_ib_pool_suspend(rdev);
+ r600_blit_suspend(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
- rdev->cp.ready = false;
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready = false;
r600_irq_suspend(rdev);
radeon_wb_disable(rdev);
rv770_pcie_gart_disable(rdev);
- r600_blit_suspend(rdev);
return 0;
}
@@ -1215,8 +1232,8 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
- rdev->cp.ring_obj = NULL;
- r600_ring_init(rdev, 1024 * 1024);
+ rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL;
+ r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024);
rdev->ih.ring_obj = NULL;
r600_ih_ring_init(rdev, 64 * 1024);
@@ -1225,30 +1242,24 @@ int rv770_init(struct radeon_device *rdev)
if (r)
return r;
+ r = radeon_ib_pool_init(rdev);
rdev->accel_working = true;
+ if (r) {
+ dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
+ rdev->accel_working = false;
+ }
+
r = rv770_startup(rdev);
if (r) {
dev_err(rdev->dev, "disabling GPU acceleration\n");
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
rdev->accel_working = false;
}
- if (rdev->accel_working) {
- r = radeon_ib_pool_init(rdev);
- if (r) {
- dev_err(rdev->dev, "IB initialization failed (%d).\n", r);
- rdev->accel_working = false;
- } else {
- r = r600_ib_test(rdev);
- if (r) {
- dev_err(rdev->dev, "IB test failed (%d).\n", r);
- rdev->accel_working = false;
- }
- }
- }
r = r600_audio_init(rdev);
if (r) {
@@ -1265,11 +1276,12 @@ void rv770_fini(struct radeon_device *rdev)
r700_cp_fini(rdev);
r600_irq_fini(rdev);
radeon_wb_fini(rdev);
- radeon_ib_pool_fini(rdev);
+ r100_ib_fini(rdev);
radeon_irq_kms_fini(rdev);
rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
+ radeon_semaphore_driver_fini(rdev);
radeon_fence_driver_fini(rdev);
radeon_agp_fini(rdev);
radeon_bo_fini(rdev);
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c
index 5468d1c..89afe0b 100644
--- a/drivers/gpu/drm/savage/savage_drv.c
+++ b/drivers/gpu/drm/savage/savage_drv.c
@@ -35,6 +35,17 @@ static struct pci_device_id pciidlist[] = {
savage_PCI_IDS
};
+static const struct file_operations savage_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
@@ -46,17 +57,7 @@ static struct drm_driver driver = {
.reclaim_buffers = savage_reclaim_buffers,
.ioctls = savage_ioctls,
.dma_ioctl = savage_bci_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &savage_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index a9c5716..573220c 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -40,7 +40,6 @@ static struct pci_device_id pciidlist[] = {
static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_sis_private_t *dev_priv;
- int ret;
dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
if (dev_priv == NULL)
@@ -48,43 +47,69 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
- ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret)
- kfree(dev_priv);
+ idr_init(&dev->object_name_idr);
- return ret;
+ return 0;
}
static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sman_takedown(&dev_priv->sman);
+ idr_remove_all(&dev_priv->object_idr);
+ idr_destroy(&dev_priv->object_idr);
+
kfree(dev_priv);
return 0;
}
+static const struct file_operations sis_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
+static int sis_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct sis_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+void sis_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct sis_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static struct drm_driver driver = {
.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR,
.load = sis_driver_load,
.unload = sis_driver_unload,
+ .open = sis_driver_open,
+ .postclose = sis_driver_postclose,
.dma_quiescent = sis_idle,
.reclaim_buffers = NULL,
.reclaim_buffers_idlelocked = sis_reclaim_buffers_locked,
.lastclose = sis_lastclose,
.ioctls = sis_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &sis_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/sis/sis_drv.h b/drivers/gpu/drm/sis/sis_drv.h
index 194303c..573758b 100644
--- a/drivers/gpu/drm/sis/sis_drv.h
+++ b/drivers/gpu/drm/sis/sis_drv.h
@@ -44,7 +44,7 @@ enum sis_family {
SIS_CHIP_315 = 1,
};
-#include "drm_sman.h"
+#include "drm_mm.h"
#define SIS_BASE (dev_priv->mmio)
@@ -54,12 +54,15 @@ enum sis_family {
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
- struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
unsigned long agp_offset;
+ struct drm_mm vram_mm;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
} drm_sis_private_t;
extern int sis_idle(struct drm_device *dev);
diff --git a/drivers/gpu/drm/sis/sis_mm.c b/drivers/gpu/drm/sis/sis_mm.c
index 7fe2b63..dd4a316 100644
--- a/drivers/gpu/drm/sis/sis_mm.c
+++ b/drivers/gpu/drm/sis/sis_mm.c
@@ -41,40 +41,18 @@
#define AGP_TYPE 1
+struct sis_memblock {
+ struct drm_mm_node mm_node;
+ struct sis_memreq req;
+ struct list_head owner_list;
+};
+
#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
/* fb management via fb device */
#define SIS_MM_ALIGN_SHIFT 0
#define SIS_MM_ALIGN_MASK 0
-static void *sis_sman_mm_allocate(void *private, unsigned long size,
- unsigned alignment)
-{
- struct sis_memreq req;
-
- req.size = size;
- sis_malloc(&req);
- if (req.size == 0)
- return NULL;
- else
- return (void *)(unsigned long)~req.offset;
-}
-
-static void sis_sman_mm_free(void *private, void *ref)
-{
- sis_free(~((unsigned long)ref));
-}
-
-static void sis_sman_mm_destroy(void *private)
-{
- ;
-}
-
-static unsigned long sis_sman_mm_offset(void *private, void *ref)
-{
- return ~((unsigned long)ref);
-}
-
#else /* CONFIG_FB_SIS[_MODULE] */
#define SIS_MM_ALIGN_SHIFT 4
@@ -86,30 +64,11 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_fb_t *fb = data;
- int ret;
mutex_lock(&dev->struct_mutex);
-#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
- {
- struct drm_sman_mm sman_mm;
- sman_mm.private = (void *)0xFFFFFFFF;
- sman_mm.allocate = sis_sman_mm_allocate;
- sman_mm.free = sis_sman_mm_free;
- sman_mm.destroy = sis_sman_mm_destroy;
- sman_mm.offset = sis_sman_mm_offset;
- ret =
- drm_sman_set_manager(&dev_priv->sman, VIDEO_TYPE, &sman_mm);
- }
-#else
- ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
- fb->size >> SIS_MM_ALIGN_SHIFT);
-#endif
-
- if (ret) {
- DRM_ERROR("VRAM memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ /* Unconditionally init the drm_mm, even though we don't use it when the
+ * fb sis driver is available - make cleanup easier. */
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> SIS_MM_ALIGN_SHIFT);
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
@@ -120,13 +79,15 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
return 0;
}
-static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file,
void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- int retval = 0;
- struct drm_memblock_item *item;
+ int retval = 0, user_key;
+ struct sis_memblock *item;
+ struct sis_file_private *file_priv = file->driver_priv;
+ unsigned long offset;
mutex_lock(&dev->struct_mutex);
@@ -138,25 +99,68 @@ static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
return -EINVAL;
}
- mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
- (unsigned long)file_priv);
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
- mutex_unlock(&dev->struct_mutex);
- if (item) {
- mem->offset = ((pool == 0) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (item->mm->
- offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
- mem->free = item->user_hash.key;
- mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+ mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+ if (pool == AGP_TYPE) {
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ mem->size, 0);
+ offset = item->mm_node.start;
} else {
- mem->offset = 0;
- mem->size = 0;
- mem->free = 0;
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ item->req.size = mem->size;
+ sis_malloc(&item->req);
+ if (item->req.size == 0)
+ retval = -ENOMEM;
+ offset = item->req.offset;
+#else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ mem->size, 0);
+ offset = item->mm_node.start;
+#endif
+ }
+ if (retval)
+ goto fail_alloc;
+
+again:
+ if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
retval = -ENOMEM;
+ goto fail_idr;
}
+ retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+ if (retval == -EAGAIN)
+ goto again;
+ if (retval)
+ goto fail_idr;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((pool == 0) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ (offset << SIS_MM_ALIGN_SHIFT);
+ mem->free = user_key;
+ mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->free = 0;
+
DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
mem->offset);
@@ -167,14 +171,28 @@ static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *fil
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_mem_t *mem = data;
- int ret;
+ struct sis_memblock *obj;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem->free);
+ obj = idr_find(&dev_priv->object_idr, mem->free);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->free);
+ list_del(&obj->owner_list);
+ if (drm_mm_node_allocated(&obj->mm_node))
+ drm_mm_remove_node(&obj->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ else
+ sis_free(obj->req.offset);
+#endif
+ kfree(obj);
mutex_unlock(&dev->struct_mutex);
DRM_DEBUG("free = 0x%lx\n", mem->free);
- return ret;
+ return 0;
}
static int sis_fb_alloc(struct drm_device *dev, void *data,
@@ -188,18 +206,10 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
{
drm_sis_private_t *dev_priv = dev->dev_private;
drm_sis_agp_t *agp = data;
- int ret;
dev_priv = dev->dev_private;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
- agp->size >> SIS_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("AGP memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> SIS_MM_ALIGN_SHIFT);
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
@@ -293,20 +303,26 @@ void sis_lastclose(struct drm_device *dev)
return;
mutex_lock(&dev->struct_mutex);
- drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = 0;
- dev_priv->agp_initialized = 0;
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
dev_priv->mmio = NULL;
mutex_unlock(&dev->struct_mutex);
}
void sis_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- drm_sis_private_t *dev_priv = dev->dev_private;
+ struct sis_file_private *file_priv = file->driver_priv;
+ struct sis_memblock *entry, *next;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+ if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -314,7 +330,18 @@ void sis_reclaim_buffers_locked(struct drm_device *dev,
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ if (drm_mm_node_allocated(&entry->mm_node))
+ drm_mm_remove_node(&entry->mm_node);
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
+ else
+ sis_free(entry->req.offset);
+#endif
+ kfree(entry);
+ }
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c
index cda2991..1613c78 100644
--- a/drivers/gpu/drm/tdfx/tdfx_drv.c
+++ b/drivers/gpu/drm/tdfx/tdfx_drv.c
@@ -41,20 +41,21 @@ static struct pci_device_id pciidlist[] = {
tdfx_PCI_IDS
};
+static const struct file_operations tdfx_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_USE_MTRR,
.reclaim_buffers = drm_core_reclaim_buffers,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &tdfx_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index f3cf6f0..b2b33dd 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -7,4 +7,8 @@ ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_object.o ttm_lock.o ttm_execbuf_util.o ttm_page_alloc.o \
ttm_bo_manager.o
+ifeq ($(CONFIG_SWIOTLB),y)
+ttm-y += ttm_page_alloc_dma.o
+endif
+
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 1c4a72f..747c141 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -31,6 +31,7 @@
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
#ifdef TTM_HAS_AGP
#include "ttm/ttm_placement.h"
#include <linux/agp_backend.h>
@@ -40,45 +41,33 @@
#include <asm/agp.h>
struct ttm_agp_backend {
- struct ttm_backend backend;
+ struct ttm_tt ttm;
struct agp_memory *mem;
struct agp_bridge_data *bridge;
};
-static int ttm_agp_populate(struct ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
+static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct page **cur_page, **last_page = pages + num_pages;
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
+ struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
+ int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ unsigned i;
- mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+ mem = agp_allocate_memory(agp_be->bridge, ttm->num_pages, AGP_USER_MEMORY);
if (unlikely(mem == NULL))
return -ENOMEM;
mem->page_count = 0;
- for (cur_page = pages; cur_page < last_page; ++cur_page) {
- struct page *page = *cur_page;
+ for (i = 0; i < ttm->num_pages; i++) {
+ struct page *page = ttm->pages[i];
+
if (!page)
- page = dummy_read_page;
+ page = ttm->dummy_read_page;
mem->pages[mem->page_count++] = page;
}
agp_be->mem = mem;
- return 0;
-}
-
-static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct drm_mm_node *node = bo_mem->mm_node;
- struct agp_memory *mem = agp_be->mem;
- int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
- int ret;
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
@@ -90,50 +79,39 @@ static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
return ret;
}
-static int ttm_agp_unbind(struct ttm_backend *backend)
+static int ttm_agp_unbind(struct ttm_tt *ttm)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
- if (agp_be->mem->is_bound)
- return agp_unbind_memory(agp_be->mem);
- else
- return 0;
-}
-
-static void ttm_agp_clear(struct ttm_backend *backend)
-{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
- struct agp_memory *mem = agp_be->mem;
-
- if (mem) {
- ttm_agp_unbind(backend);
- agp_free_memory(mem);
+ if (agp_be->mem) {
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ agp_free_memory(agp_be->mem);
+ agp_be->mem = NULL;
}
- agp_be->mem = NULL;
+ return 0;
}
-static void ttm_agp_destroy(struct ttm_backend *backend)
+static void ttm_agp_destroy(struct ttm_tt *ttm)
{
- struct ttm_agp_backend *agp_be =
- container_of(backend, struct ttm_agp_backend, backend);
+ struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
if (agp_be->mem)
- ttm_agp_clear(backend);
+ ttm_agp_unbind(ttm);
+ ttm_tt_fini(ttm);
kfree(agp_be);
}
static struct ttm_backend_func ttm_agp_func = {
- .populate = ttm_agp_populate,
- .clear = ttm_agp_clear,
.bind = ttm_agp_bind,
.unbind = ttm_agp_unbind,
.destroy = ttm_agp_destroy,
};
-struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
- struct agp_bridge_data *bridge)
+struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
struct ttm_agp_backend *agp_be;
@@ -143,10 +121,29 @@ struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
agp_be->mem = NULL;
agp_be->bridge = bridge;
- agp_be->backend.func = &ttm_agp_func;
- agp_be->backend.bdev = bdev;
- return &agp_be->backend;
+ agp_be->ttm.func = &ttm_agp_func;
+
+ if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ return NULL;
+ }
+
+ return &agp_be->ttm;
+}
+EXPORT_SYMBOL(ttm_agp_tt_create);
+
+int ttm_agp_tt_populate(struct ttm_tt *ttm)
+{
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ return ttm_pool_populate(ttm);
+}
+EXPORT_SYMBOL(ttm_agp_tt_populate);
+
+void ttm_agp_tt_unpopulate(struct ttm_tt *ttm)
+{
+ ttm_pool_unpopulate(ttm);
}
-EXPORT_SYMBOL(ttm_agp_backend_init);
+EXPORT_SYMBOL(ttm_agp_tt_unpopulate);
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0bb0f5f..7c3a57d 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -137,6 +137,7 @@ static void ttm_bo_release_list(struct kref *list_kref)
struct ttm_buffer_object *bo =
container_of(list_kref, struct ttm_buffer_object, list_kref);
struct ttm_bo_device *bdev = bo->bdev;
+ size_t acc_size = bo->acc_size;
BUG_ON(atomic_read(&bo->list_kref.refcount));
BUG_ON(atomic_read(&bo->kref.refcount));
@@ -152,9 +153,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->destroy)
bo->destroy(bo);
else {
- ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
kfree(bo);
}
+ ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
@@ -337,27 +338,11 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (zero_alloc)
page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
case ttm_bo_type_kernel:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags, glob->dummy_read_page);
+ bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, glob->dummy_read_page);
if (unlikely(bo->ttm == NULL))
ret = -ENOMEM;
break;
- case ttm_bo_type_user:
- bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
- page_flags | TTM_PAGE_FLAG_USER,
- glob->dummy_read_page);
- if (unlikely(bo->ttm == NULL)) {
- ret = -ENOMEM;
- break;
- }
-
- ret = ttm_tt_set_user(bo->ttm, current,
- bo->buffer_start, bo->num_pages);
- if (unlikely(ret != 0)) {
- ttm_tt_destroy(bo->ttm);
- bo->ttm = NULL;
- }
- break;
default:
printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
ret = -EINVAL;
@@ -431,8 +416,17 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
else
ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
- if (ret)
+ if (ret) {
+ if (bdev->driver->move_notify) {
+ struct ttm_mem_reg tmp_mem = *mem;
+ *mem = bo->mem;
+ bo->mem = tmp_mem;
+ bdev->driver->move_notify(bo, mem);
+ bo->mem = *mem;
+ }
+
goto out_err;
+ }
moved:
if (bo->evicted) {
@@ -472,6 +466,9 @@ out_err:
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
+ if (bo->bdev->driver->move_notify)
+ bo->bdev->driver->move_notify(bo, NULL);
+
if (bo->ttm) {
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
@@ -913,16 +910,12 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
}
static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
- bool disallow_fixed,
uint32_t mem_type,
uint32_t proposed_placement,
uint32_t *masked_placement)
{
uint32_t cur_flags = ttm_bo_type_flags(mem_type);
- if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
- return false;
-
if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
return false;
@@ -967,7 +960,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
man = &bdev->man[mem_type];
type_ok = ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
mem_type,
placement->placement[i],
&cur_flags);
@@ -1015,7 +1007,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!man->has_type)
continue;
if (!ttm_bo_mt_compatible(man,
- bo->type == ttm_bo_type_user,
mem_type,
placement->busy_placement[i],
&cur_flags))
@@ -1185,6 +1176,17 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
{
int ret = 0;
unsigned long num_pages;
+ struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+
+ ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
+ if (ret) {
+ printk(KERN_ERR TTM_PFX "Out of kernel memory.\n");
+ if (destroy)
+ (*destroy)(bo);
+ else
+ kfree(bo);
+ return -ENOMEM;
+ }
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1255,14 +1257,34 @@ out_err:
}
EXPORT_SYMBOL(ttm_bo_init);
-static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
+size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
{
- size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
- PAGE_MASK;
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
- return glob->ttm_bo_size + 2 * page_array_size;
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += ttm_round_pot(sizeof(struct ttm_tt));
+ return size;
}
+EXPORT_SYMBOL(ttm_bo_acc_size);
+
+size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
+ unsigned long bo_size,
+ unsigned struct_size)
+{
+ unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
+ size_t size = 0;
+
+ size += ttm_round_pot(struct_size);
+ size += PAGE_ALIGN(npages * sizeof(void *));
+ size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
+ size += ttm_round_pot(sizeof(struct ttm_dma_tt));
+ return size;
+}
+EXPORT_SYMBOL(ttm_bo_dma_acc_size);
int ttm_bo_create(struct ttm_bo_device *bdev,
unsigned long size,
@@ -1276,10 +1298,10 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
{
struct ttm_buffer_object *bo;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ size_t acc_size;
int ret;
- size_t acc_size =
- ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (unlikely(ret != 0))
return ret;
@@ -1465,13 +1487,6 @@ int ttm_bo_global_init(struct drm_global_reference *ref)
goto out_no_shrink;
}
- glob->ttm_bo_extra_size =
- ttm_round_pot(sizeof(struct ttm_tt)) +
- ttm_round_pot(sizeof(struct ttm_backend));
-
- glob->ttm_bo_size = glob->ttm_bo_extra_size +
- ttm_round_pot(sizeof(struct ttm_buffer_object));
-
atomic_set(&glob->bo_count, 0);
ret = kobject_init_and_add(
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 082fcae..f8187ea 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -244,7 +244,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
unsigned long page,
pgprot_t prot)
{
- struct page *d = ttm_tt_get_page(ttm, page);
+ struct page *d = ttm->pages[page];
void *dst;
if (!d)
@@ -281,7 +281,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
unsigned long page,
pgprot_t prot)
{
- struct page *s = ttm_tt_get_page(ttm, page);
+ struct page *s = ttm->pages[page];
void *src;
if (!s)
@@ -342,6 +342,12 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (old_iomap == NULL && ttm == NULL)
goto out2;
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ goto out1;
+ }
+
add = 0;
dir = 1;
@@ -439,6 +445,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->list_kref);
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
+ fbo->acc_size = 0;
*new_obj = fbo;
return 0;
@@ -502,10 +509,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
{
struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
struct ttm_tt *ttm = bo->ttm;
- struct page *d;
- int i;
+ int ret;
BUG_ON(!ttm);
+
+ if (ttm->state == tt_unpopulated) {
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ if (ret)
+ return ret;
+ }
+
if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
/*
* We're mapping a single page, and the desired
@@ -513,18 +526,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
*/
map->bo_kmap_type = ttm_bo_map_kmap;
- map->page = ttm_tt_get_page(ttm, start_page);
+ map->page = ttm->pages[start_page];
map->virtual = kmap(map->page);
} else {
- /*
- * Populate the part we're mapping;
- */
- for (i = start_page; i < start_page + num_pages; ++i) {
- d = ttm_tt_get_page(ttm, i);
- if (!d)
- return -ENOMEM;
- }
-
/*
* We need to use vmap to get the desired page protection
* or to make the buffer object look contiguous.
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 221b924..5441284 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -174,18 +174,23 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
vma->vm_page_prot = (bo->mem.placement & TTM_PL_FLAG_CACHED) ?
vm_get_page_prot(vma->vm_flags) :
ttm_io_prot(bo->mem.placement, vma->vm_page_prot);
+
+ /* Allocate all page at once, most common usage */
+ if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ retval = VM_FAULT_OOM;
+ goto out_io_unlock;
+ }
}
/*
* Speculatively prefault a number of pages. Only error on
* first page.
*/
-
for (i = 0; i < TTM_BO_VM_NUM_PREFAULT; ++i) {
if (bo->mem.bus.is_iomem)
pfn = ((bo->mem.bus.base + bo->mem.bus.offset) >> PAGE_SHIFT) + page_offset;
else {
- page = ttm_tt_get_page(ttm, page_offset);
+ page = ttm->pages[page_offset];
if (unlikely(!page && i == 0)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index e70ddd8..9eba8e9 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -395,6 +395,7 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
zone->name, (unsigned long long) zone->max_mem >> 10);
}
ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
+ ttm_dma_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
return 0;
out_no_zone:
ttm_mem_global_release(glob);
@@ -409,6 +410,7 @@ void ttm_mem_global_release(struct ttm_mem_global *glob)
/* let the page allocator first stop the shrink work. */
ttm_page_alloc_fini();
+ ttm_dma_page_alloc_fini();
flush_workqueue(glob->swap_queue);
destroy_workqueue(glob->swap_queue);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 727e93d..499debd 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -619,8 +619,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
* @return count of pages still required to fulfill the request.
*/
static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
- struct list_head *pages, int ttm_flags,
- enum ttm_caching_state cstate, unsigned count)
+ struct list_head *pages,
+ int ttm_flags,
+ enum ttm_caching_state cstate,
+ unsigned count)
{
unsigned long irq_flags;
struct list_head *p;
@@ -660,17 +662,67 @@ out:
return count;
}
+/* Put all pages in pages list to correct pool to wait for reuse */
+static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
+{
+ unsigned long irq_flags;
+ struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ unsigned i;
+
+ if (pool == NULL) {
+ /* No pool for this memory type so free the pages */
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ if (page_count(pages[i]) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ __free_page(pages[i]);
+ pages[i] = NULL;
+ }
+ }
+ return;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ for (i = 0; i < npages; i++) {
+ if (pages[i]) {
+ if (page_count(pages[i]) != 1)
+ printk(KERN_ERR TTM_PFX
+ "Erroneous page count. "
+ "Leaking pages.\n");
+ list_add_tail(&pages[i]->lru, &pool->list);
+ pages[i] = NULL;
+ pool->npages++;
+ }
+ }
+ /* Check that we don't go over the pool limit */
+ npages = 0;
+ if (pool->npages > _manager->options.max_size) {
+ npages = pool->npages - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ if (npages)
+ ttm_page_pool_free(pool, npages);
+}
+
/*
* On success pages list will hold count number of correctly
* cached pages.
*/
-int ttm_get_pages(struct list_head *pages, int flags,
- enum ttm_caching_state cstate, unsigned count,
- dma_addr_t *dma_address)
+static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
+ enum ttm_caching_state cstate)
{
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
+ struct list_head plist;
struct page *p = NULL;
gfp_t gfp_flags = GFP_USER;
+ unsigned count;
int r;
/* set zero flag for page allocation if required */
@@ -684,7 +736,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
else
gfp_flags |= GFP_HIGHUSER;
- for (r = 0; r < count; ++r) {
+ for (r = 0; r < npages; ++r) {
p = alloc_page(gfp_flags);
if (!p) {
@@ -693,87 +745,53 @@ int ttm_get_pages(struct list_head *pages, int flags,
return -ENOMEM;
}
- list_add(&p->lru, pages);
+ pages[r] = p;
}
return 0;
}
-
/* combine zero flag to pool flags */
gfp_flags |= pool->gfp_flags;
/* First we take pages from the pool */
- count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
+ INIT_LIST_HEAD(&plist);
+ npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
+ count = 0;
+ list_for_each_entry(p, &plist, lru) {
+ pages[count++] = p;
+ }
/* clear the pages coming from the pool if requested */
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
- list_for_each_entry(p, pages, lru) {
+ list_for_each_entry(p, &plist, lru) {
clear_page(page_address(p));
}
}
/* If pool didn't have enough pages allocate new one. */
- if (count > 0) {
+ if (npages > 0) {
/* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel.
**/
- r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
+ INIT_LIST_HEAD(&plist);
+ r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
+ list_for_each_entry(p, &plist, lru) {
+ pages[count++] = p;
+ }
if (r) {
/* If there is any pages in the list put them back to
* the pool. */
printk(KERN_ERR TTM_PFX
"Failed to allocate extra pages "
"for large request.");
- ttm_put_pages(pages, 0, flags, cstate, NULL);
+ ttm_put_pages(pages, count, flags, cstate);
return r;
}
}
-
return 0;
}
-/* Put all pages in pages list to correct pool to wait for reuse */
-void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
- enum ttm_caching_state cstate, dma_addr_t *dma_address)
-{
- unsigned long irq_flags;
- struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
- struct page *p, *tmp;
-
- if (pool == NULL) {
- /* No pool for this memory type so free the pages */
-
- list_for_each_entry_safe(p, tmp, pages, lru) {
- __free_page(p);
- }
- /* Make the pages list empty */
- INIT_LIST_HEAD(pages);
- return;
- }
- if (page_count == 0) {
- list_for_each_entry_safe(p, tmp, pages, lru) {
- ++page_count;
- }
- }
-
- spin_lock_irqsave(&pool->lock, irq_flags);
- list_splice_init(pages, &pool->list);
- pool->npages += page_count;
- /* Check that we don't go over the pool limit */
- page_count = 0;
- if (pool->npages > _manager->options.max_size) {
- page_count = pool->npages - _manager->options.max_size;
- /* free at least NUM_PAGES_TO_ALLOC number of pages
- * to reduce calls to set_memory_wb */
- if (page_count < NUM_PAGES_TO_ALLOC)
- page_count = NUM_PAGES_TO_ALLOC;
- }
- spin_unlock_irqrestore(&pool->lock, irq_flags);
- if (page_count)
- ttm_page_pool_free(pool, page_count);
-}
-
static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
char *name)
{
@@ -836,6 +854,62 @@ void ttm_page_alloc_fini(void)
_manager = NULL;
}
+int ttm_pool_populate(struct ttm_tt *ttm)
+{
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ unsigned i;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_get_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ if (ret != 0) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_pool_unpopulate(ttm);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL(ttm_pool_populate);
+
+void ttm_pool_unpopulate(struct ttm_tt *ttm)
+{
+ unsigned i;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ if (ttm->pages[i]) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ ttm_put_pages(&ttm->pages[i], 1,
+ ttm->page_flags,
+ ttm->caching_state);
+ }
+ }
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL(ttm_pool_unpopulate);
+
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
struct ttm_page_pool *p;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
new file mode 100644
index 0000000..0c46d8c
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -0,0 +1,1142 @@
+/*
+ * Copyright 2011 (c) Oracle Corp.
+
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
+ */
+
+/*
+ * A simple DMA pool losely based on dmapool.c. It has certain advantages
+ * over the DMA pools:
+ * - Pool collects resently freed pages for reuse (and hooks up to
+ * the shrinker).
+ * - Tracks currently in use pages
+ * - Tracks whether the page is UC, WB or cached (and reverts to WB
+ * when freed).
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/list.h>
+#include <linux/seq_file.h> /* for seq_printf */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/highmem.h>
+#include <linux/mm_types.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/atomic.h>
+#include <linux/device.h>
+#include <linux/kthread.h>
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_page_alloc.h"
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
+
+#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
+#define SMALL_ALLOCATION 4
+#define FREE_ALL_PAGES (~0U)
+/* times are in msecs */
+#define IS_UNDEFINED (0)
+#define IS_WC (1<<1)
+#define IS_UC (1<<2)
+#define IS_CACHED (1<<3)
+#define IS_DMA32 (1<<4)
+
+enum pool_type {
+ POOL_IS_UNDEFINED,
+ POOL_IS_WC = IS_WC,
+ POOL_IS_UC = IS_UC,
+ POOL_IS_CACHED = IS_CACHED,
+ POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
+ POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
+ POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
+};
+/*
+ * The pool structure. There are usually six pools:
+ * - generic (not restricted to DMA32):
+ * - write combined, uncached, cached.
+ * - dma32 (up to 2^32 - so up 4GB):
+ * - write combined, uncached, cached.
+ * for each 'struct device'. The 'cached' is for pages that are actively used.
+ * The other ones can be shrunk by the shrinker API if neccessary.
+ * @pools: The 'struct device->dma_pools' link.
+ * @type: Type of the pool
+ * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
+ * used with irqsave/irqrestore variants because pool allocator maybe called
+ * from delayed work.
+ * @inuse_list: Pool of pages that are in use. The order is very important and
+ * it is in the order that the TTM pages that are put back are in.
+ * @free_list: Pool of pages that are free to be used. No order requirements.
+ * @dev: The device that is associated with these pools.
+ * @size: Size used during DMA allocation.
+ * @npages_free: Count of available pages for re-use.
+ * @npages_in_use: Count of pages that are in use.
+ * @nfrees: Stats when pool is shrinking.
+ * @nrefills: Stats when the pool is grown.
+ * @gfp_flags: Flags to pass for alloc_page.
+ * @name: Name of the pool.
+ * @dev_name: Name derieved from dev - similar to how dev_info works.
+ * Used during shutdown as the dev_info during release is unavailable.
+ */
+struct dma_pool {
+ struct list_head pools; /* The 'struct device->dma_pools link */
+ enum pool_type type;
+ spinlock_t lock;
+ struct list_head inuse_list;
+ struct list_head free_list;
+ struct device *dev;
+ unsigned size;
+ unsigned npages_free;
+ unsigned npages_in_use;
+ unsigned long nfrees; /* Stats when shrunk. */
+ unsigned long nrefills; /* Stats when grown. */
+ gfp_t gfp_flags;
+ char name[13]; /* "cached dma32" */
+ char dev_name[64]; /* Constructed from dev */
+};
+
+/*
+ * The accounting page keeping track of the allocated page along with
+ * the DMA address.
+ * @page_list: The link to the 'page_list' in 'struct dma_pool'.
+ * @vaddr: The virtual address of the page
+ * @dma: The bus address of the page. If the page is not allocated
+ * via the DMA API, it will be -1.
+ */
+struct dma_page {
+ struct list_head page_list;
+ void *vaddr;
+ struct page *p;
+ dma_addr_t dma;
+};
+
+/*
+ * Limits for the pool. They are handled without locks because only place where
+ * they may change is in sysfs store. They won't have immediate effect anyway
+ * so forcing serialization to access them is pointless.
+ */
+
+struct ttm_pool_opts {
+ unsigned alloc_size;
+ unsigned max_size;
+ unsigned small;
+};
+
+/*
+ * Contains the list of all of the 'struct device' and their corresponding
+ * DMA pools. Guarded by _mutex->lock.
+ * @pools: The link to 'struct ttm_pool_manager->pools'
+ * @dev: The 'struct device' associated with the 'pool'
+ * @pool: The 'struct dma_pool' associated with the 'dev'
+ */
+struct device_pools {
+ struct list_head pools;
+ struct device *dev;
+ struct dma_pool *pool;
+};
+
+/*
+ * struct ttm_pool_manager - Holds memory pools for fast allocation
+ *
+ * @lock: Lock used when adding/removing from pools
+ * @pools: List of 'struct device' and 'struct dma_pool' tuples.
+ * @options: Limits for the pool.
+ * @npools: Total amount of pools in existence.
+ * @shrinker: The structure used by [un|]register_shrinker
+ */
+struct ttm_pool_manager {
+ struct mutex lock;
+ struct list_head pools;
+ struct ttm_pool_opts options;
+ unsigned npools;
+ struct shrinker mm_shrink;
+ struct kobject kobj;
+};
+
+static struct ttm_pool_manager *_manager;
+
+static struct attribute ttm_page_pool_max = {
+ .name = "pool_max_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_small = {
+ .name = "pool_small_allocation",
+ .mode = S_IRUGO | S_IWUSR
+};
+static struct attribute ttm_page_pool_alloc_size = {
+ .name = "pool_allocation_size",
+ .mode = S_IRUGO | S_IWUSR
+};
+
+static struct attribute *ttm_pool_attrs[] = {
+ &ttm_page_pool_max,
+ &ttm_page_pool_small,
+ &ttm_page_pool_alloc_size,
+ NULL
+};
+
+static void ttm_pool_kobj_release(struct kobject *kobj)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ kfree(m);
+}
+
+static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
+ const char *buffer, size_t size)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ int chars;
+ unsigned val;
+ chars = sscanf(buffer, "%u", &val);
+ if (chars == 0)
+ return size;
+
+ /* Convert kb to number of pages */
+ val = val / (PAGE_SIZE >> 10);
+
+ if (attr == &ttm_page_pool_max)
+ m->options.max_size = val;
+ else if (attr == &ttm_page_pool_small)
+ m->options.small = val;
+ else if (attr == &ttm_page_pool_alloc_size) {
+ if (val > NUM_PAGES_TO_ALLOC*8) {
+ printk(KERN_ERR TTM_PFX
+ "Setting allocation size to %lu "
+ "is not allowed. Recommended size is "
+ "%lu\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ return size;
+ } else if (val > NUM_PAGES_TO_ALLOC) {
+ printk(KERN_WARNING TTM_PFX
+ "Setting allocation size to "
+ "larger than %lu is not recommended.\n",
+ NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+ }
+ m->options.alloc_size = val;
+ }
+
+ return size;
+}
+
+static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
+ char *buffer)
+{
+ struct ttm_pool_manager *m =
+ container_of(kobj, struct ttm_pool_manager, kobj);
+ unsigned val = 0;
+
+ if (attr == &ttm_page_pool_max)
+ val = m->options.max_size;
+ else if (attr == &ttm_page_pool_small)
+ val = m->options.small;
+ else if (attr == &ttm_page_pool_alloc_size)
+ val = m->options.alloc_size;
+
+ val = val * (PAGE_SIZE >> 10);
+
+ return snprintf(buffer, PAGE_SIZE, "%u\n", val);
+}
+
+static const struct sysfs_ops ttm_pool_sysfs_ops = {
+ .show = &ttm_pool_show,
+ .store = &ttm_pool_store,
+};
+
+static struct kobj_type ttm_pool_kobj_type = {
+ .release = &ttm_pool_kobj_release,
+ .sysfs_ops = &ttm_pool_sysfs_ops,
+ .default_attrs = ttm_pool_attrs,
+};
+
+#ifndef CONFIG_X86
+static int set_pages_array_wb(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_wc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+
+static int set_pages_array_uc(struct page **pages, int addrinarray)
+{
+#ifdef TTM_HAS_AGP
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+#endif
+ return 0;
+}
+#endif /* for !CONFIG_X86 */
+
+static int ttm_set_pages_caching(struct dma_pool *pool,
+ struct page **pages, unsigned cpages)
+{
+ int r = 0;
+ /* Set page caching */
+ if (pool->type & IS_UC) {
+ r = set_pages_array_uc(pages, cpages);
+ if (r)
+ pr_err(TTM_PFX
+ "%s: Failed to set %d pages to uc!\n",
+ pool->dev_name, cpages);
+ }
+ if (pool->type & IS_WC) {
+ r = set_pages_array_wc(pages, cpages);
+ if (r)
+ pr_err(TTM_PFX
+ "%s: Failed to set %d pages to wc!\n",
+ pool->dev_name, cpages);
+ }
+ return r;
+}
+
+static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
+{
+ dma_addr_t dma = d_page->dma;
+ dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
+
+ kfree(d_page);
+ d_page = NULL;
+}
+static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
+{
+ struct dma_page *d_page;
+
+ d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
+ if (!d_page)
+ return NULL;
+
+ d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
+ &d_page->dma,
+ pool->gfp_flags);
+ if (d_page->vaddr)
+ d_page->p = virt_to_page(d_page->vaddr);
+ else {
+ kfree(d_page);
+ d_page = NULL;
+ }
+ return d_page;
+}
+static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
+{
+ enum pool_type type = IS_UNDEFINED;
+
+ if (flags & TTM_PAGE_FLAG_DMA32)
+ type |= IS_DMA32;
+ if (cstate == tt_cached)
+ type |= IS_CACHED;
+ else if (cstate == tt_uncached)
+ type |= IS_UC;
+ else
+ type |= IS_WC;
+
+ return type;
+}
+
+static void ttm_pool_update_free_locked(struct dma_pool *pool,
+ unsigned freed_pages)
+{
+ pool->npages_free -= freed_pages;
+ pool->nfrees += freed_pages;
+
+}
+
+/* set memory back to wb and free the pages. */
+static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
+ struct page *pages[], unsigned npages)
+{
+ struct dma_page *d_page, *tmp;
+
+ /* Don't set WB on WB page pool. */
+ if (npages && !(pool->type & IS_CACHED) &&
+ set_pages_array_wb(pages, npages))
+ pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, npages);
+
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ }
+}
+
+static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
+{
+ /* Don't set WB on WB page pool. */
+ if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
+ pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, 1);
+
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+}
+
+/*
+ * Free pages from pool.
+ *
+ * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
+ * number of pages in one go.
+ *
+ * @pool: to free the pages from
+ * @nr_free: If set to true will free all pages in pool
+ **/
+static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
+{
+ unsigned long irq_flags;
+ struct dma_page *dma_p, *tmp;
+ struct page **pages_to_free;
+ struct list_head d_pages;
+ unsigned freed_pages = 0,
+ npages_to_free = nr_free;
+
+ if (NUM_PAGES_TO_ALLOC < nr_free)
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+#if 0
+ if (nr_free > 1) {
+ pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
+ pool->dev_name, pool->name, current->pid,
+ npages_to_free, nr_free);
+ }
+#endif
+ pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!pages_to_free) {
+ pr_err(TTM_PFX
+ "%s: Failed to allocate memory for pool free operation.\n",
+ pool->dev_name);
+ return 0;
+ }
+ INIT_LIST_HEAD(&d_pages);
+restart:
+ spin_lock_irqsave(&pool->lock, irq_flags);
+
+ /* We picking the oldest ones off the list */
+ list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
+ page_list) {
+ if (freed_pages >= npages_to_free)
+ break;
+
+ /* Move the dma_page from one list to another. */
+ list_move(&dma_p->page_list, &d_pages);
+
+ pages_to_free[freed_pages++] = dma_p->p;
+ /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
+ if (freed_pages >= NUM_PAGES_TO_ALLOC) {
+
+ ttm_pool_update_free_locked(pool, freed_pages);
+ /**
+ * Because changing page caching is costly
+ * we unlock the pool to prevent stalling.
+ */
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free,
+ freed_pages);
+
+ INIT_LIST_HEAD(&d_pages);
+
+ if (likely(nr_free != FREE_ALL_PAGES))
+ nr_free -= freed_pages;
+
+ if (NUM_PAGES_TO_ALLOC >= nr_free)
+ npages_to_free = nr_free;
+ else
+ npages_to_free = NUM_PAGES_TO_ALLOC;
+
+ freed_pages = 0;
+
+ /* free all so restart the processing */
+ if (nr_free)
+ goto restart;
+
+ /* Not allowed to fall through or break because
+ * following context is inside spinlock while we are
+ * outside here.
+ */
+ goto out;
+
+ }
+ }
+
+ /* remove range of pages from the pool */
+ if (freed_pages) {
+ ttm_pool_update_free_locked(pool, freed_pages);
+ nr_free -= freed_pages;
+ }
+
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (freed_pages)
+ ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
+out:
+ kfree(pages_to_free);
+ return nr_free;
+}
+
+static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
+{
+ struct device_pools *p;
+ struct dma_pool *pool;
+
+ if (!dev)
+ return;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry_reverse(p, &_manager->pools, pools) {
+ if (p->dev != dev)
+ continue;
+ pool = p->pool;
+ if (pool->type != type)
+ continue;
+
+ list_del(&p->pools);
+ kfree(p);
+ _manager->npools--;
+ break;
+ }
+ list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ /* Takes a spinlock.. */
+ ttm_dma_page_pool_free(pool, FREE_ALL_PAGES);
+ WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
+ /* This code path is called after _all_ references to the
+ * struct device has been dropped - so nobody should be
+ * touching it. In case somebody is trying to _add_ we are
+ * guarded by the mutex. */
+ list_del(&pool->pools);
+ kfree(pool);
+ break;
+ }
+ mutex_unlock(&_manager->lock);
+}
+
+/*
+ * On free-ing of the 'struct device' this deconstructor is run.
+ * Albeit the pool might have already been freed earlier.
+ */
+static void ttm_dma_pool_release(struct device *dev, void *res)
+{
+ struct dma_pool *pool = *(struct dma_pool **)res;
+
+ if (pool)
+ ttm_dma_free_pool(dev, pool->type);
+}
+
+static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
+{
+ return *(struct dma_pool **)res == match_data;
+}
+
+static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
+ enum pool_type type)
+{
+ char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
+ enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
+ struct device_pools *sec_pool = NULL;
+ struct dma_pool *pool = NULL, **ptr;
+ unsigned i;
+ int ret = -ENODEV;
+ char *p;
+
+ if (!dev)
+ return NULL;
+
+ ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ ret = -ENOMEM;
+
+ pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!pool)
+ goto err_mem;
+
+ sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
+ dev_to_node(dev));
+ if (!sec_pool)
+ goto err_mem;
+
+ INIT_LIST_HEAD(&sec_pool->pools);
+ sec_pool->dev = dev;
+ sec_pool->pool = pool;
+
+ INIT_LIST_HEAD(&pool->free_list);
+ INIT_LIST_HEAD(&pool->inuse_list);
+ INIT_LIST_HEAD(&pool->pools);
+ spin_lock_init(&pool->lock);
+ pool->dev = dev;
+ pool->npages_free = pool->npages_in_use = 0;
+ pool->nfrees = 0;
+ pool->gfp_flags = flags;
+ pool->size = PAGE_SIZE;
+ pool->type = type;
+ pool->nrefills = 0;
+ p = pool->name;
+ for (i = 0; i < 5; i++) {
+ if (type & t[i]) {
+ p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ "%s", n[i]);
+ }
+ }
+ *p = 0;
+ /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
+ * - the kobj->name has already been deallocated.*/
+ snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
+ dev_driver_string(dev), dev_name(dev));
+ mutex_lock(&_manager->lock);
+ /* You can get the dma_pool from either the global: */
+ list_add(&sec_pool->pools, &_manager->pools);
+ _manager->npools++;
+ /* or from 'struct device': */
+ list_add(&pool->pools, &dev->dma_pools);
+ mutex_unlock(&_manager->lock);
+
+ *ptr = pool;
+ devres_add(dev, ptr);
+
+ return pool;
+err_mem:
+ devres_free(ptr);
+ kfree(sec_pool);
+ kfree(pool);
+ return ERR_PTR(ret);
+}
+
+static struct dma_pool *ttm_dma_find_pool(struct device *dev,
+ enum pool_type type)
+{
+ struct dma_pool *pool, *tmp, *found = NULL;
+
+ if (type == IS_UNDEFINED)
+ return found;
+
+ /* NB: We iterate on the 'struct dev' which has no spinlock, but
+ * it does have a kref which we have taken. The kref is taken during
+ * graphic driver loading - in the drm_pci_init it calls either
+ * pci_dev_get or pci_register_driver which both end up taking a kref
+ * on 'struct device'.
+ *
+ * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
+ * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
+ * thing is at that point of time there are no pages associated with the
+ * driver so this function will not be called.
+ */
+ list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
+ if (pool->type != type)
+ continue;
+ found = pool;
+ break;
+ }
+ return found;
+}
+
+/*
+ * Free pages the pages that failed to change the caching state. If there
+ * are pages that have changed their caching state already put them to the
+ * pool.
+ */
+static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
+ struct list_head *d_pages,
+ struct page **failed_pages,
+ unsigned cpages)
+{
+ struct dma_page *d_page, *tmp;
+ struct page *p;
+ unsigned i = 0;
+
+ p = failed_pages[0];
+ if (!p)
+ return;
+ /* Find the failed page. */
+ list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
+ if (d_page->p != p)
+ continue;
+ /* .. and then progress over the full list. */
+ list_del(&d_page->page_list);
+ __ttm_dma_free_page(pool, d_page);
+ if (++i < cpages)
+ p = failed_pages[i];
+ else
+ break;
+ }
+
+}
+
+/*
+ * Allocate 'count' pages, and put 'need' number of them on the
+ * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
+ * The full list of pages should also be on 'd_pages'.
+ * We return zero for success, and negative numbers as errors.
+ */
+static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
+ struct list_head *d_pages,
+ unsigned count)
+{
+ struct page **caching_array;
+ struct dma_page *dma_p;
+ struct page *p;
+ int r = 0;
+ unsigned i, cpages;
+ unsigned max_cpages = min(count,
+ (unsigned)(PAGE_SIZE/sizeof(struct page *)));
+
+ /* allocate array for page caching change */
+ caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
+
+ if (!caching_array) {
+ pr_err(TTM_PFX
+ "%s: Unable to allocate table for new pages.",
+ pool->dev_name);
+ return -ENOMEM;
+ }
+
+ if (count > 1) {
+ pr_debug("%s: (%s:%d) Getting %d pages\n",
+ pool->dev_name, pool->name, current->pid,
+ count);
+ }
+
+ for (i = 0, cpages = 0; i < count; ++i) {
+ dma_p = __ttm_dma_alloc_page(pool);
+ if (!dma_p) {
+ pr_err(TTM_PFX "%s: Unable to get page %u.\n",
+ pool->dev_name, i);
+
+ /* store already allocated pages in the pool after
+ * setting the caching state */
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ }
+ r = -ENOMEM;
+ goto out;
+ }
+ p = dma_p->p;
+#ifdef CONFIG_HIGHMEM
+ /* gfp flags of highmem page should never be dma32 so we
+ * we should be fine in such case
+ */
+ if (!PageHighMem(p))
+#endif
+ {
+ caching_array[cpages++] = p;
+ if (cpages == max_cpages) {
+ /* Note: Cannot hold the spinlock */
+ r = ttm_set_pages_caching(pool, caching_array,
+ cpages);
+ if (r) {
+ ttm_dma_handle_caching_state_failure(
+ pool, d_pages, caching_array,
+ cpages);
+ goto out;
+ }
+ cpages = 0;
+ }
+ }
+ list_add(&dma_p->page_list, d_pages);
+ }
+
+ if (cpages) {
+ r = ttm_set_pages_caching(pool, caching_array, cpages);
+ if (r)
+ ttm_dma_handle_caching_state_failure(pool, d_pages,
+ caching_array, cpages);
+ }
+out:
+ kfree(caching_array);
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ */
+static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
+ unsigned long *irq_flags)
+{
+ unsigned count = _manager->options.small;
+ int r = pool->npages_free;
+
+ if (count > pool->npages_free) {
+ struct list_head d_pages;
+
+ INIT_LIST_HEAD(&d_pages);
+
+ spin_unlock_irqrestore(&pool->lock, *irq_flags);
+
+ /* Returns how many more are neccessary to fulfill the
+ * request. */
+ r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
+
+ spin_lock_irqsave(&pool->lock, *irq_flags);
+ if (!r) {
+ /* Add the fresh to the end.. */
+ list_splice(&d_pages, &pool->free_list);
+ ++pool->nrefills;
+ pool->npages_free += count;
+ r = count;
+ } else {
+ struct dma_page *d_page;
+ unsigned cpages = 0;
+
+ pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n",
+ pool->dev_name, pool->name, r);
+
+ list_for_each_entry(d_page, &d_pages, page_list) {
+ cpages++;
+ }
+ list_splice_tail(&d_pages, &pool->free_list);
+ pool->npages_free += cpages;
+ r = cpages;
+ }
+ }
+ return r;
+}
+
+/*
+ * @return count of pages still required to fulfill the request.
+ * The populate list is actually a stack (not that is matters as TTM
+ * allocates one page at a time.
+ */
+static int ttm_dma_pool_get_pages(struct dma_pool *pool,
+ struct ttm_dma_tt *ttm_dma,
+ unsigned index)
+{
+ struct dma_page *d_page;
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ unsigned long irq_flags;
+ int count, r = -ENOMEM;
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
+ if (count) {
+ d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
+ ttm->pages[index] = d_page->p;
+ ttm_dma->dma_address[index] = d_page->dma;
+ list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
+ r = 0;
+ pool->npages_in_use += 1;
+ pool->npages_free -= 1;
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+ return r;
+}
+
+/*
+ * On success pages list will hold count number of correctly
+ * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
+ */
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
+ struct dma_pool *pool;
+ enum pool_type type;
+ unsigned i;
+ gfp_t gfp_flags;
+ int ret;
+
+ if (ttm->state != tt_unpopulated)
+ return 0;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
+ gfp_flags = GFP_USER | GFP_DMA32;
+ else
+ gfp_flags = GFP_HIGHUSER;
+ if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
+ gfp_flags |= __GFP_ZERO;
+
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool) {
+ pool = ttm_dma_pool_init(dev, gfp_flags, type);
+ if (IS_ERR_OR_NULL(pool)) {
+ return -ENOMEM;
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; ++i) {
+ ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
+ if (ret != 0) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+
+ ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
+ false, false);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return -ENOMEM;
+ }
+ }
+
+ if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
+ ret = ttm_tt_swapin(ttm);
+ if (unlikely(ret != 0)) {
+ ttm_dma_unpopulate(ttm_dma, dev);
+ return ret;
+ }
+ }
+
+ ttm->state = tt_unbound;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_populate);
+
+/* Get good estimation how many pages are free in pools */
+static int ttm_dma_pool_get_num_unused_pages(void)
+{
+ struct device_pools *p;
+ unsigned total = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ total += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return total;
+}
+
+/* Put all pages in pages list to correct pool to wait for reuse */
+void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ struct dma_pool *pool;
+ struct dma_page *d_page, *next;
+ enum pool_type type;
+ bool is_cached = false;
+ unsigned count = 0, i, npages = 0;
+ unsigned long irq_flags;
+
+ type = ttm_to_type(ttm->page_flags, ttm->caching_state);
+ pool = ttm_dma_find_pool(dev, type);
+ if (!pool)
+ return;
+
+ is_cached = (ttm_dma_find_pool(pool->dev,
+ ttm_to_type(ttm->page_flags, tt_cached)) == pool);
+
+ /* make sure pages array match list and count number of pages */
+ list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
+ ttm->pages[count] = d_page->p;
+ count++;
+ }
+
+ spin_lock_irqsave(&pool->lock, irq_flags);
+ pool->npages_in_use -= count;
+ if (is_cached) {
+ pool->nfrees += count;
+ } else {
+ pool->npages_free += count;
+ list_splice(&ttm_dma->pages_list, &pool->free_list);
+ npages = count;
+ if (pool->npages_free > _manager->options.max_size) {
+ npages = pool->npages_free - _manager->options.max_size;
+ /* free at least NUM_PAGES_TO_ALLOC number of pages
+ * to reduce calls to set_memory_wb */
+ if (npages < NUM_PAGES_TO_ALLOC)
+ npages = NUM_PAGES_TO_ALLOC;
+ }
+ }
+ spin_unlock_irqrestore(&pool->lock, irq_flags);
+
+ if (is_cached) {
+ list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ d_page->p);
+ ttm_dma_page_put(pool, d_page);
+ }
+ } else {
+ for (i = 0; i < count; i++) {
+ ttm_mem_global_free_page(ttm->glob->mem_glob,
+ ttm->pages[i]);
+ }
+ }
+
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ for (i = 0; i < ttm->num_pages; i++) {
+ ttm->pages[i] = NULL;
+ ttm_dma->dma_address[i] = 0;
+ }
+
+ /* shrink pool if necessary (only on !is_cached pools)*/
+ if (npages)
+ ttm_dma_page_pool_free(pool, npages);
+ ttm->state = tt_unpopulated;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
+
+/**
+ * Callback for mm to request pool to reduce number of page held.
+ */
+static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ static atomic_t start_pool = ATOMIC_INIT(0);
+ unsigned idx = 0;
+ unsigned pool_offset = atomic_add_return(1, &start_pool);
+ unsigned shrink_pages = sc->nr_to_scan;
+ struct device_pools *p;
+
+ if (list_empty(&_manager->pools))
+ return 0;
+
+ mutex_lock(&_manager->lock);
+ pool_offset = pool_offset % _manager->npools;
+ list_for_each_entry(p, &_manager->pools, pools) {
+ unsigned nr_free;
+
+ if (!p->dev)
+ continue;
+ if (shrink_pages == 0)
+ break;
+ /* Do it in round-robin fashion. */
+ if (++idx < pool_offset)
+ continue;
+ nr_free = shrink_pages;
+ shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
+ p->pool->dev_name, p->pool->name, current->pid, nr_free,
+ shrink_pages);
+ }
+ mutex_unlock(&_manager->lock);
+ /* return estimated number of unused pages in pool */
+ return ttm_dma_pool_get_num_unused_pages();
+}
+
+static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
+{
+ manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.seeks = 1;
+ register_shrinker(&manager->mm_shrink);
+}
+
+static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
+{
+ unregister_shrinker(&manager->mm_shrink);
+}
+
+int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
+{
+ int ret = -ENOMEM;
+
+ WARN_ON(_manager);
+
+ printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n");
+
+ _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
+ if (!_manager)
+ goto err_manager;
+
+ mutex_init(&_manager->lock);
+ INIT_LIST_HEAD(&_manager->pools);
+
+ _manager->options.max_size = max_pages;
+ _manager->options.small = SMALL_ALLOCATION;
+ _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
+
+ /* This takes care of auto-freeing the _manager */
+ ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+ &glob->kobj, "dma_pool");
+ if (unlikely(ret != 0)) {
+ kobject_put(&_manager->kobj);
+ goto err;
+ }
+ ttm_dma_pool_mm_shrink_init(_manager);
+ return 0;
+err_manager:
+ kfree(_manager);
+ _manager = NULL;
+err:
+ return ret;
+}
+
+void ttm_dma_page_alloc_fini(void)
+{
+ struct device_pools *p, *t;
+
+ printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n");
+ ttm_dma_pool_mm_shrink_fini(_manager);
+
+ list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
+ dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
+ current->pid);
+ WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
+ ttm_dma_pool_match, p->pool));
+ ttm_dma_free_pool(p->dev, p->pool->type);
+ }
+ kobject_put(&_manager->kobj);
+ _manager = NULL;
+}
+
+int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
+{
+ struct device_pools *p;
+ struct dma_pool *pool = NULL;
+ char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
+ "name", "virt", "busaddr"};
+
+ if (!_manager) {
+ seq_printf(m, "No pool allocator running.\n");
+ return 0;
+ }
+ seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
+ h[0], h[1], h[2], h[3], h[4], h[5]);
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools) {
+ struct device *dev = p->dev;
+ if (!dev)
+ continue;
+ pool = p->pool;
+ seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
+ pool->name, pool->nrefills,
+ pool->nfrees, pool->npages_in_use,
+ pool->npages_free,
+ pool->dev_name);
+ }
+ mutex_unlock(&_manager->lock);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index f9cc548..2f75d20 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -43,139 +43,20 @@
#include "ttm/ttm_placement.h"
#include "ttm/ttm_page_alloc.h"
-static int ttm_tt_swapin(struct ttm_tt *ttm);
-
/**
* Allocates storage for pointers to the pages that back the ttm.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
- ttm->dma_address = drm_calloc_large(ttm->num_pages,
- sizeof(*ttm->dma_address));
-}
-
-static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
-{
- drm_free_large(ttm->pages);
- ttm->pages = NULL;
- drm_free_large(ttm->dma_address);
- ttm->dma_address = NULL;
-}
-
-static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
-{
- int write;
- int dirty;
- struct page *page;
- int i;
- struct ttm_backend *be = ttm->be;
-
- BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
- write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
- dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
-
- if (be)
- be->func->clear(be);
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = ttm->pages[i];
- if (page == NULL)
- continue;
-
- if (page == ttm->dummy_read_page) {
- BUG_ON(write);
- continue;
- }
-
- if (write && dirty && !PageReserved(page))
- set_page_dirty_lock(page);
-
- ttm->pages[i] = NULL;
- ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
- put_page(page);
- }
- ttm->state = tt_unpopulated;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
+ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(void*));
}
-static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
+static void ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
{
- struct page *p;
- struct list_head h;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- int ret;
-
- while (NULL == (p = ttm->pages[index])) {
-
- INIT_LIST_HEAD(&h);
-
- ret = ttm_get_pages(&h, ttm->page_flags, ttm->caching_state, 1,
- &ttm->dma_address[index]);
-
- if (ret != 0)
- return NULL;
-
- p = list_first_entry(&h, struct page, lru);
-
- ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
- if (unlikely(ret != 0))
- goto out_err;
-
- if (PageHighMem(p))
- ttm->pages[--ttm->first_himem_page] = p;
- else
- ttm->pages[++ttm->last_lomem_page] = p;
- }
- return p;
-out_err:
- put_page(p);
- return NULL;
-}
-
-struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
-{
- int ret;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return NULL;
- }
- return __ttm_tt_get_page(ttm, index);
-}
-
-int ttm_tt_populate(struct ttm_tt *ttm)
-{
- struct page *page;
- unsigned long i;
- struct ttm_backend *be;
- int ret;
-
- if (ttm->state != tt_unpopulated)
- return 0;
-
- if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
- ret = ttm_tt_swapin(ttm);
- if (unlikely(ret != 0))
- return ret;
- }
-
- be = ttm->be;
-
- for (i = 0; i < ttm->num_pages; ++i) {
- page = __ttm_tt_get_page(ttm, i);
- if (!page)
- return -ENOMEM;
- }
-
- be->func->populate(be, ttm->num_pages, ttm->pages,
- ttm->dummy_read_page, ttm->dma_address);
- ttm->state = tt_unbound;
- return 0;
+ ttm->ttm.pages = drm_calloc_large(ttm->ttm.num_pages, sizeof(void*));
+ ttm->dma_address = drm_calloc_large(ttm->ttm.num_pages,
+ sizeof(*ttm->dma_address));
}
-EXPORT_SYMBOL(ttm_tt_populate);
#ifdef CONFIG_X86
static inline int ttm_tt_set_page_caching(struct page *p,
@@ -278,153 +159,100 @@ int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
}
EXPORT_SYMBOL(ttm_tt_set_placement_caching);
-static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
-{
- int i;
- unsigned count = 0;
- struct list_head h;
- struct page *cur_page;
- struct ttm_backend *be = ttm->be;
-
- INIT_LIST_HEAD(&h);
-
- if (be)
- be->func->clear(be);
- for (i = 0; i < ttm->num_pages; ++i) {
-
- cur_page = ttm->pages[i];
- ttm->pages[i] = NULL;
- if (cur_page) {
- if (page_count(cur_page) != 1)
- printk(KERN_ERR TTM_PFX
- "Erroneous page count. "
- "Leaking pages.\n");
- ttm_mem_global_free_page(ttm->glob->mem_glob,
- cur_page);
- list_add(&cur_page->lru, &h);
- count++;
- }
- }
- ttm_put_pages(&h, count, ttm->page_flags, ttm->caching_state,
- ttm->dma_address);
- ttm->state = tt_unpopulated;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
-}
-
void ttm_tt_destroy(struct ttm_tt *ttm)
{
- struct ttm_backend *be;
-
if (unlikely(ttm == NULL))
return;
- be = ttm->be;
- if (likely(be != NULL)) {
- be->func->destroy(be);
- ttm->be = NULL;
+ if (ttm->state == tt_bound) {
+ ttm_tt_unbind(ttm);
}
if (likely(ttm->pages != NULL)) {
- if (ttm->page_flags & TTM_PAGE_FLAG_USER)
- ttm_tt_free_user_pages(ttm);
- else
- ttm_tt_free_alloced_pages(ttm);
-
- ttm_tt_free_page_directory(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
}
if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTENT_SWAP) &&
ttm->swap_storage)
fput(ttm->swap_storage);
- kfree(ttm);
+ ttm->swap_storage = NULL;
+ ttm->func->destroy(ttm);
}
-int ttm_tt_set_user(struct ttm_tt *ttm,
- struct task_struct *tsk,
- unsigned long start, unsigned long num_pages)
+int ttm_tt_init(struct ttm_tt *ttm, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct mm_struct *mm = tsk->mm;
- int ret;
- int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
- struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
-
- BUG_ON(num_pages != ttm->num_pages);
- BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
-
- /**
- * Account user pages as lowmem pages for now.
- */
-
- ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
- false, false);
- if (unlikely(ret != 0))
- return ret;
-
- down_read(&mm->mmap_sem);
- ret = get_user_pages(tsk, mm, start, num_pages,
- write, 0, ttm->pages, NULL);
- up_read(&mm->mmap_sem);
+ ttm->bdev = bdev;
+ ttm->glob = bdev->glob;
+ ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ ttm->caching_state = tt_cached;
+ ttm->page_flags = page_flags;
+ ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
- if (ret != num_pages && write) {
- ttm_tt_free_user_pages(ttm);
- ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
+ ttm_tt_alloc_page_directory(ttm);
+ if (!ttm->pages) {
+ ttm_tt_destroy(ttm);
+ printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
return -ENOMEM;
}
-
- ttm->tsk = tsk;
- ttm->start = start;
- ttm->state = tt_unbound;
-
return 0;
}
+EXPORT_SYMBOL(ttm_tt_init);
-struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
- uint32_t page_flags, struct page *dummy_read_page)
+void ttm_tt_fini(struct ttm_tt *ttm)
{
- struct ttm_bo_driver *bo_driver = bdev->driver;
- struct ttm_tt *ttm;
-
- if (!bo_driver)
- return NULL;
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+}
+EXPORT_SYMBOL(ttm_tt_fini);
- ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
- if (!ttm)
- return NULL;
+int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+ ttm->bdev = bdev;
ttm->glob = bdev->glob;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->first_himem_page = ttm->num_pages;
- ttm->last_lomem_page = -1;
ttm->caching_state = tt_cached;
ttm->page_flags = page_flags;
-
ttm->dummy_read_page = dummy_read_page;
+ ttm->state = tt_unpopulated;
+ ttm->swap_storage = NULL;
- ttm_tt_alloc_page_directory(ttm);
- if (!ttm->pages) {
+ INIT_LIST_HEAD(&ttm_dma->pages_list);
+ ttm_dma_tt_alloc_page_directory(ttm_dma);
+ if (!ttm->pages || !ttm_dma->dma_address) {
ttm_tt_destroy(ttm);
printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
- return NULL;
- }
- ttm->be = bo_driver->create_ttm_backend_entry(bdev);
- if (!ttm->be) {
- ttm_tt_destroy(ttm);
- printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
- return NULL;
+ return -ENOMEM;
}
- ttm->state = tt_unpopulated;
- return ttm;
+ return 0;
}
+EXPORT_SYMBOL(ttm_dma_tt_init);
+
+void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma)
+{
+ struct ttm_tt *ttm = &ttm_dma->ttm;
+
+ drm_free_large(ttm->pages);
+ ttm->pages = NULL;
+ drm_free_large(ttm_dma->dma_address);
+ ttm_dma->dma_address = NULL;
+}
+EXPORT_SYMBOL(ttm_dma_tt_fini);
void ttm_tt_unbind(struct ttm_tt *ttm)
{
int ret;
- struct ttm_backend *be = ttm->be;
if (ttm->state == tt_bound) {
- ret = be->func->unbind(be);
+ ret = ttm->func->unbind(ttm);
BUG_ON(ret);
ttm->state = tt_unbound;
}
@@ -433,7 +261,6 @@ void ttm_tt_unbind(struct ttm_tt *ttm)
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
int ret = 0;
- struct ttm_backend *be;
if (!ttm)
return -EINVAL;
@@ -441,25 +268,21 @@ int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
if (ttm->state == tt_bound)
return 0;
- be = ttm->be;
-
- ret = ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm);
if (ret)
return ret;
- ret = be->func->bind(be, bo_mem);
+ ret = ttm->func->bind(ttm, bo_mem);
if (unlikely(ret != 0))
return ret;
ttm->state = tt_bound;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER)
- ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
return 0;
}
EXPORT_SYMBOL(ttm_tt_bind);
-static int ttm_tt_swapin(struct ttm_tt *ttm)
+int ttm_tt_swapin(struct ttm_tt *ttm)
{
struct address_space *swap_space;
struct file *swap_storage;
@@ -470,16 +293,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
int i;
int ret = -ENOMEM;
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
- ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
- ttm->num_pages);
- if (unlikely(ret != 0))
- return ret;
-
- ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
- return 0;
- }
-
swap_storage = ttm->swap_storage;
BUG_ON(swap_storage == NULL);
@@ -491,7 +304,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
ret = PTR_ERR(from_page);
goto out_err;
}
- to_page = __ttm_tt_get_page(ttm, i);
+ to_page = ttm->pages[i];
if (unlikely(to_page == NULL))
goto out_err;
@@ -512,7 +325,6 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
- ttm_tt_free_alloced_pages(ttm);
return ret;
}
@@ -530,18 +342,6 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
- /*
- * For user buffers, just unpin the pages, as there should be
- * vma references.
- */
-
- if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
- ttm_tt_free_user_pages(ttm);
- ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
- ttm->swap_storage = NULL;
- return 0;
- }
-
if (!persistent_swap_storage) {
swap_storage = shmem_file_setup("ttm swap",
ttm->num_pages << PAGE_SHIFT,
@@ -576,7 +376,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
page_cache_release(to_page);
}
- ttm_tt_free_alloced_pages(ttm);
+ ttm->bdev->driver->ttm_tt_unpopulate(ttm);
ttm->swap_storage = swap_storage;
ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
if (persistent_swap_storage)
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c
index a83e86d..02661f3 100644
--- a/drivers/gpu/drm/via/via_drv.c
+++ b/drivers/gpu/drm/via/via_drv.c
@@ -30,16 +30,52 @@
#include "drm_pciids.h"
+static int via_driver_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv;
+
+ DRM_DEBUG_DRIVER("\n");
+ file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ file->driver_priv = file_priv;
+
+ INIT_LIST_HEAD(&file_priv->obj_list);
+
+ return 0;
+}
+
+void via_driver_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct via_file_private *file_priv = file->driver_priv;
+
+ kfree(file_priv);
+}
+
static struct pci_device_id pciidlist[] = {
viadrv_PCI_IDS
};
+static const struct file_operations via_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
DRIVER_IRQ_SHARED,
.load = via_driver_load,
.unload = via_driver_unload,
+ .open = via_driver_open,
+ .postclose = via_driver_postclose,
.context_dtor = via_final_context,
.get_vblank_counter = via_get_vblank_counter,
.enable_vblank = via_enable_vblank,
@@ -54,17 +90,7 @@ static struct drm_driver driver = {
.reclaim_buffers_idlelocked = via_reclaim_buffers_locked,
.lastclose = via_lastclose,
.ioctls = via_ioctls,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = drm_ioctl,
- .mmap = drm_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .llseek = noop_llseek,
- },
-
+ .fops = &via_driver_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
index 9cf87d9..88edacc 100644
--- a/drivers/gpu/drm/via/via_drv.h
+++ b/drivers/gpu/drm/via/via_drv.h
@@ -24,7 +24,7 @@
#ifndef _VIA_DRV_H_
#define _VIA_DRV_H_
-#include "drm_sman.h"
+#include "drm_mm.h"
#define DRIVER_AUTHOR "Various"
#define DRIVER_NAME "via"
@@ -88,9 +88,12 @@ typedef struct drm_via_private {
uint32_t irq_pending_mask;
int *irq_map;
unsigned int idle_fault;
- struct drm_sman sman;
int vram_initialized;
+ struct drm_mm vram_mm;
int agp_initialized;
+ struct drm_mm agp_mm;
+ /** Mapping of userspace keys to mm objects */
+ struct idr object_idr;
unsigned long vram_offset;
unsigned long agp_offset;
drm_via_blitq_t blit_queues[VIA_NUM_BLIT_ENGINES];
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 6cca9a7..a2ab343 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -104,15 +104,10 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->chipset = chipset;
- ret = drm_sman_init(&dev_priv->sman, 2, 12, 8);
- if (ret) {
- kfree(dev_priv);
- return ret;
- }
+ idr_init(&dev->object_name_idr);
ret = drm_vblank_init(dev, 1);
if (ret) {
- drm_sman_takedown(&dev_priv->sman);
kfree(dev_priv);
return ret;
}
@@ -124,7 +119,8 @@ int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
- drm_sman_takedown(&dev_priv->sman);
+ idr_remove_all(&dev_priv->object_idr);
+ idr_destroy(&dev_priv->object_idr);
kfree(dev_priv);
diff --git a/drivers/gpu/drm/via/via_mm.c b/drivers/gpu/drm/via/via_mm.c
index 6cc2dad..a3574d0 100644
--- a/drivers/gpu/drm/via/via_mm.c
+++ b/drivers/gpu/drm/via/via_mm.c
@@ -28,26 +28,22 @@
#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
-#include "drm_sman.h"
#define VIA_MM_ALIGN_SHIFT 4
#define VIA_MM_ALIGN_MASK ((1 << VIA_MM_ALIGN_SHIFT) - 1)
+struct via_memblock {
+ struct drm_mm_node mm_node;
+ struct list_head owner_list;
+};
+
int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_agp_t *agp = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
- agp->size >> VIA_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("AGP memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->agp_mm, 0, agp->size >> VIA_MM_ALIGN_SHIFT);
dev_priv->agp_initialized = 1;
dev_priv->agp_offset = agp->offset;
@@ -61,17 +57,9 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_fb_t *fb = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
- fb->size >> VIA_MM_ALIGN_SHIFT);
-
- if (ret) {
- DRM_ERROR("VRAM memory manager initialisation error\n");
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ drm_mm_init(&dev_priv->vram_mm, 0, fb->size >> VIA_MM_ALIGN_SHIFT);
dev_priv->vram_initialized = 1;
dev_priv->vram_offset = fb->offset;
@@ -108,19 +96,25 @@ void via_lastclose(struct drm_device *dev)
return;
mutex_lock(&dev->struct_mutex);
- drm_sman_cleanup(&dev_priv->sman);
- dev_priv->vram_initialized = 0;
- dev_priv->agp_initialized = 0;
+ if (dev_priv->vram_initialized) {
+ drm_mm_takedown(&dev_priv->vram_mm);
+ dev_priv->vram_initialized = 0;
+ }
+ if (dev_priv->agp_initialized) {
+ drm_mm_takedown(&dev_priv->agp_mm);
+ dev_priv->agp_initialized = 0;
+ }
mutex_unlock(&dev->struct_mutex);
}
int via_mem_alloc(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
drm_via_mem_t *mem = data;
- int retval = 0;
- struct drm_memblock_item *item;
+ int retval = 0, user_key;
+ struct via_memblock *item;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
unsigned long tmpSize;
if (mem->type > VIA_MEM_AGP) {
@@ -136,24 +130,57 @@ int via_mem_alloc(struct drm_device *dev, void *data,
return -EINVAL;
}
+ item = kzalloc(sizeof(*item), GFP_KERNEL);
+ if (!item) {
+ retval = -ENOMEM;
+ goto fail_alloc;
+ }
+
tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
- (unsigned long)file_priv);
- mutex_unlock(&dev->struct_mutex);
- if (item) {
- mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
- dev_priv->vram_offset : dev_priv->agp_offset) +
- (item->mm->
- offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
- mem->index = item->user_hash.key;
- } else {
- mem->offset = 0;
- mem->size = 0;
- mem->index = 0;
- DRM_DEBUG("Video memory allocation failed\n");
+ if (mem->type == VIA_MEM_AGP)
+ retval = drm_mm_insert_node(&dev_priv->agp_mm,
+ &item->mm_node,
+ tmpSize, 0);
+ else
+ retval = drm_mm_insert_node(&dev_priv->vram_mm,
+ &item->mm_node,
+ tmpSize, 0);
+ if (retval)
+ goto fail_alloc;
+
+again:
+ if (idr_pre_get(&dev_priv->object_idr, GFP_KERNEL) == 0) {
retval = -ENOMEM;
+ goto fail_idr;
}
+ retval = idr_get_new_above(&dev_priv->object_idr, item, 1, &user_key);
+ if (retval == -EAGAIN)
+ goto again;
+ if (retval)
+ goto fail_idr;
+
+ list_add(&item->owner_list, &file_priv->obj_list);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
+ dev_priv->vram_offset : dev_priv->agp_offset) +
+ ((item->mm_node.start) << VIA_MM_ALIGN_SHIFT);
+ mem->index = user_key;
+
+ return 0;
+
+fail_idr:
+ drm_mm_remove_node(&item->mm_node);
+fail_alloc:
+ kfree(item);
+ mutex_unlock(&dev->struct_mutex);
+
+ mem->offset = 0;
+ mem->size = 0;
+ mem->index = 0;
+ DRM_DEBUG("Video memory allocation failed\n");
+
return retval;
}
@@ -161,24 +188,35 @@ int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
drm_via_mem_t *mem = data;
- int ret;
+ struct via_memblock *obj;
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem->index);
+ obj = idr_find(&dev_priv->object_idr, mem->index);
+ if (obj == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+
+ idr_remove(&dev_priv->object_idr, mem->index);
+ list_del(&obj->owner_list);
+ drm_mm_remove_node(&obj->mm_node);
+ kfree(obj);
mutex_unlock(&dev->struct_mutex);
+
DRM_DEBUG("free = 0x%lx\n", mem->index);
- return ret;
+ return 0;
}
void via_reclaim_buffers_locked(struct drm_device *dev,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- drm_via_private_t *dev_priv = dev->dev_private;
+ struct via_file_private *file_priv = file->driver_priv;
+ struct via_memblock *entry, *next;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
+ if (list_empty(&file_priv->obj_list)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -186,7 +224,12 @@ void via_reclaim_buffers_locked(struct drm_device *dev,
if (dev->driver->dma_quiescent)
dev->driver->dma_quiescent(dev);
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
+ list_for_each_entry_safe(entry, next, &file_priv->obj_list,
+ owner_list) {
+ list_del(&entry->owner_list);
+ drm_mm_remove_node(&entry->mm_node);
+ kfree(entry);
+ }
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
index 5a72ed9..1e2c0fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c
@@ -28,6 +28,7 @@
#include "vmwgfx_drv.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
+#include "ttm/ttm_page_alloc.h"
static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
TTM_PL_FLAG_CACHED;
@@ -139,85 +140,63 @@ struct ttm_placement vmw_srf_placement = {
.busy_placement = gmr_vram_placement_flags
};
-struct vmw_ttm_backend {
- struct ttm_backend backend;
- struct page **pages;
- unsigned long num_pages;
+struct vmw_ttm_tt {
+ struct ttm_tt ttm;
struct vmw_private *dev_priv;
int gmr_id;
};
-static int vmw_ttm_populate(struct ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
+static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
-
- vmw_be->pages = pages;
- vmw_be->num_pages = num_pages;
-
- return 0;
-}
-
-static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
-{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_be->gmr_id = bo_mem->start;
- return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
- vmw_be->num_pages, vmw_be->gmr_id);
+ return vmw_gmr_bind(vmw_be->dev_priv, ttm->pages,
+ ttm->num_pages, vmw_be->gmr_id);
}
-static int vmw_ttm_unbind(struct ttm_backend *backend)
+static int vmw_ttm_unbind(struct ttm_tt *ttm)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
return 0;
}
-static void vmw_ttm_clear(struct ttm_backend *backend)
+static void vmw_ttm_destroy(struct ttm_tt *ttm)
{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
-
- vmw_be->pages = NULL;
- vmw_be->num_pages = 0;
-}
-
-static void vmw_ttm_destroy(struct ttm_backend *backend)
-{
- struct vmw_ttm_backend *vmw_be =
- container_of(backend, struct vmw_ttm_backend, backend);
+ struct vmw_ttm_tt *vmw_be = container_of(ttm, struct vmw_ttm_tt, ttm);
+ ttm_tt_fini(ttm);
kfree(vmw_be);
}
static struct ttm_backend_func vmw_ttm_func = {
- .populate = vmw_ttm_populate,
- .clear = vmw_ttm_clear,
.bind = vmw_ttm_bind,
.unbind = vmw_ttm_unbind,
.destroy = vmw_ttm_destroy,
};
-struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
+struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct vmw_ttm_backend *vmw_be;
+ struct vmw_ttm_tt *vmw_be;
vmw_be = kmalloc(sizeof(*vmw_be), GFP_KERNEL);
if (!vmw_be)
return NULL;
- vmw_be->backend.func = &vmw_ttm_func;
+ vmw_be->ttm.func = &vmw_ttm_func;
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
- return &vmw_be->backend;
+ if (ttm_tt_init(&vmw_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(vmw_be);
+ return NULL;
+ }
+
+ return &vmw_be->ttm;
}
int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
@@ -357,7 +336,9 @@ static int vmw_sync_obj_wait(void *sync_obj, void *sync_arg,
}
struct ttm_bo_driver vmw_bo_driver = {
- .create_ttm_backend_entry = vmw_ttm_backend_init,
+ .ttm_tt_create = &vmw_ttm_tt_create,
+ .ttm_tt_populate = &ttm_pool_populate,
+ .ttm_tt_unpopulate = &ttm_pool_unpopulate,
.invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index dff8fc7..f390f5f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -1064,6 +1064,21 @@ static const struct dev_pm_ops vmw_pm_ops = {
.resume = vmw_pm_resume,
};
+static const struct file_operations vmwgfx_driver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = vmw_unlocked_ioctl,
+ .mmap = vmw_mmap,
+ .poll = vmw_fops_poll,
+ .read = vmw_fops_read,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT)
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .llseek = noop_llseek,
+};
+
static struct drm_driver driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
DRIVER_MODESET,
@@ -1088,20 +1103,7 @@ static struct drm_driver driver = {
.master_drop = vmw_master_drop,
.open = vmw_driver_open,
.postclose = vmw_postclose,
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = vmw_unlocked_ioctl,
- .mmap = vmw_mmap,
- .poll = vmw_fops_poll,
- .read = vmw_fops_read,
- .fasync = drm_fasync,
-#if defined(CONFIG_COMPAT)
- .compat_ioctl = drm_compat_ioctl,
-#endif
- .llseek = noop_llseek,
- },
+ .fops = &vmwgfx_driver_fops,
.name = VMWGFX_DRIVER_NAME,
.desc = VMWGFX_DRIVER_DESC,
.date = VMWGFX_DRIVER_DATE,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f94b33a..b66ef0e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -378,7 +378,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
unsigned int *handle)
{
if (handle)
- handle = 0;
+ *handle = 0;
return 0;
}
@@ -690,7 +690,7 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbs->base.base.pitch = mode_cmd->pitch;
+ vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
@@ -804,7 +804,7 @@ static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = framebuffer->base.pitch;
+ cmd->body.bytesPerLine = framebuffer->base.pitches[0];
cmd->body.ptr.gmrId = framebuffer->user_handle;
cmd->body.ptr.offset = 0;
@@ -1056,7 +1056,7 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
- vfbd->base.base.pitch = mode_cmd->pitch;
+ vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
@@ -1085,7 +1085,7 @@ out_err1:
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd2)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -1093,8 +1093,16 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
+ struct drm_mode_fb_cmd mode_cmd;
int ret;
+ mode_cmd.width = mode_cmd2->width;
+ mode_cmd.height = mode_cmd2->height;
+ mode_cmd.pitch = mode_cmd2->pitches[0];
+ mode_cmd.handle = mode_cmd2->handles[0];
+ drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
+ &mode_cmd.bpp);
+
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
@@ -1102,8 +1110,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
- mode_cmd->pitch,
- mode_cmd->height)) {
+ mode_cmd.pitch,
+ mode_cmd.height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
@@ -1117,15 +1125,19 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
* command stream using user-space handles.
*/
- user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+ user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
}
+ /**
+ * End conditioned code.
+ */
+
/* returns either a dmabuf or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
- mode_cmd->handle,
+ mode_cmd.handle,
&surface, &bo);
if (ret)
goto err_out;
@@ -1133,10 +1145,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
/* Create the new framebuffer depending one what we got back */
if (bo)
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
- mode_cmd);
+ &mode_cmd);
else if (surface)
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
- surface, &vfb, mode_cmd);
+ surface, &vfb, &mode_cmd);
else
BUG();
@@ -1344,7 +1356,7 @@ int vmw_kms_readback(struct vmw_private *dev_priv,
cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
cmd->body.format.colorDepth = vfb->base.depth;
cmd->body.format.reserved = 0;
- cmd->body.bytesPerLine = vfb->base.pitch;
+ cmd->body.bytesPerLine = vfb->base.pitches[0];
cmd->body.ptr.gmrId = vfb->user_handle;
cmd->body.ptr.offset = 0;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
index e1cb855..a4f7f03 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h
@@ -29,6 +29,7 @@
#define VMWGFX_KMS_H_
#include "drmP.h"
+#include "drm_crtc_helper.h"
#include "vmwgfx_drv.h"
#define VMWGFX_NUM_DISPLAY_UNITS 8
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 8f8dbd4..f77b184 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -95,7 +95,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
return 0;
fb = entry->base.crtc.fb;
- return vmw_kms_write_svga(dev_priv, w, h, fb->pitch,
+ return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
@@ -103,7 +103,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
entry = list_entry(lds->active.next, typeof(*entry), active);
fb = entry->base.crtc.fb;
- vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitch,
+ vmw_kms_write_svga(dev_priv, fb->width, fb->height, fb->pitches[0],
fb->bits_per_pixel, fb->depth);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 1c7f09e..a37abb5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -1540,29 +1540,10 @@ out_bad_surface:
/**
* Buffer management.
*/
-
-static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
-{
- static size_t bo_user_size = ~0;
-
- size_t page_array_size =
- (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
-
- if (unlikely(bo_user_size == ~0)) {
- bo_user_size = glob->ttm_bo_extra_size +
- ttm_round_pot(sizeof(struct vmw_dma_buffer));
- }
-
- return bo_user_size + page_array_size;
-}
-
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_bo);
}
@@ -1573,24 +1554,12 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
void (*bo_free) (struct ttm_buffer_object *bo))
{
struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
size_t acc_size;
int ret;
BUG_ON(!bo_free);
- acc_size =
- vmw_dmabuf_acc_size(bdev->glob,
- (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
-
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0)) {
- /* we must free the bo here as
- * ttm_buffer_object_init does so as well */
- bo_free(&vmw_bo->base);
- return ret;
- }
-
+ acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
memset(vmw_bo, 0, sizeof(*vmw_bo));
INIT_LIST_HEAD(&vmw_bo->validate_list);
@@ -1605,9 +1574,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
kfree(vmw_user_bo);
}
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 22a4a05..a421abd 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -31,6 +31,11 @@ config HID
If unsure, say Y.
+config HID_BATTERY_STRENGTH
+ bool
+ depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+ default y
+
config HIDRAW
bool "/dev/hidraw raw HID device support"
depends on HID
@@ -335,6 +340,7 @@ config HID_MULTITOUCH
Say Y here if you have one of the following devices:
- 3M PCT touch screens
- ActionStar dual touch panels
+ - Atmel panels
- Cando dual touch panels
- Chunghwa panels
- CVTouch panels
@@ -349,12 +355,15 @@ config HID_MULTITOUCH
- Lumio CrystalTouch panels
- MosArt dual-touch panels
- PenMount dual touch panels
+ - PixArt optical touch screen
- Pixcir dual touch panels
+ - Quanta panels
- eGalax dual-touch panels, including the Joojoo and Wetab tablets
- Stantum multitouch panels
- Touch International Panels
- Unitec Panels
- XAT optical touch panels
+ - Xiroku optical touch panels
If unsure, say N.
@@ -466,12 +475,6 @@ config HID_PRIMAX
Support for Primax devices that are not fully compliant with the
HID standard.
-config HID_QUANTA
- tristate "Quanta Optical Touch panels"
- depends on USB_HID
- ---help---
- Support for Quanta Optical Touch dual-touch panels.
-
config HID_ROCCAT
tristate "Roccat special event support"
depends on USB_HID
@@ -492,6 +495,13 @@ config HID_ROCCAT_ARVO
---help---
Support for Roccat Arvo keyboard.
+config HID_ROCCAT_ISKU
+ tristate "Roccat Isku keyboard support"
+ depends on USB_HID
+ depends on HID_ROCCAT
+ ---help---
+ Support for Roccat Isku keyboard.
+
config HID_ROCCAT_KONE
tristate "Roccat Kone Mouse support"
depends on USB_HID
@@ -560,6 +570,12 @@ config GREENASIA_FF
(like MANTA Warrior MM816 and SpeedLink Strike2 SL-6635) or adapter
and want to enable force feedback support for it.
+config HID_HYPERV_MOUSE
+ tristate "Microsoft Hyper-V mouse driver"
+ depends on HYPERV
+ ---help---
+ Select this option to enable the Hyper-V mouse driver.
+
config HID_SMARTJOYPLUS
tristate "SmartJoy PLUS PS2/USB adapter support"
depends on USB_HID
@@ -620,9 +636,19 @@ config HID_WIIMOTE
depends on BT_HIDP
depends on LEDS_CLASS
select POWER_SUPPLY
+ select INPUT_FF_MEMLESS
---help---
Support for the Nintendo Wii Remote bluetooth device.
+config HID_WIIMOTE_EXT
+ bool "Nintendo Wii Remote Extension support"
+ depends on HID_WIIMOTE
+ default HID_WIIMOTE
+ ---help---
+ Support for extension controllers of the Nintendo Wii Remote. Say yes
+ here if you want to use the Nintendo Motion+, Nunchuck or Classic
+ extension controllers with your Wii Remote.
+
config HID_ZEROPLUS
tristate "Zeroplus based game controller support"
depends on USB_HID
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index 1e0d2a6..8aefdc9 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -25,6 +25,14 @@ ifdef CONFIG_LOGIWHEELS_FF
hid-logitech-y += hid-lg4ff.o
endif
+hid-wiimote-y := hid-wiimote-core.o
+ifdef CONFIG_HID_WIIMOTE_EXT
+ hid-wiimote-y += hid-wiimote-ext.o
+endif
+ifdef CONFIG_DEBUG_FS
+ hid-wiimote-y += hid-wiimote-debug.o
+endif
+
obj-$(CONFIG_HID_A4TECH) += hid-a4tech.o
obj-$(CONFIG_HID_ACRUX) += hid-axff.o
obj-$(CONFIG_HID_APPLE) += hid-apple.o
@@ -38,6 +46,7 @@ obj-$(CONFIG_HID_ELECOM) += hid-elecom.o
obj-$(CONFIG_HID_EZKEY) += hid-ezkey.o
obj-$(CONFIG_HID_GYRATION) += hid-gyration.o
obj-$(CONFIG_HID_HOLTEK) += hid-holtekff.o
+obj-$(CONFIG_HID_HYPERV_MOUSE) += hid-hyperv.o
obj-$(CONFIG_HID_KENSINGTON) += hid-kensington.o
obj-$(CONFIG_HID_KEYTOUCH) += hid-keytouch.o
obj-$(CONFIG_HID_KYE) += hid-kye.o
@@ -51,7 +60,6 @@ obj-$(CONFIG_HID_MULTITOUCH) += hid-multitouch.o
obj-$(CONFIG_HID_NTRIG) += hid-ntrig.o
obj-$(CONFIG_HID_ORTEK) += hid-ortek.o
obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o
-obj-$(CONFIG_HID_QUANTA) += hid-quanta.o
obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o
obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o
obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o
@@ -59,6 +67,7 @@ obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o
obj-$(CONFIG_HID_ROCCAT_COMMON) += hid-roccat-common.o
obj-$(CONFIG_HID_ROCCAT_ARVO) += hid-roccat-arvo.o
+obj-$(CONFIG_HID_ROCCAT_ISKU) += hid-roccat-isku.o
obj-$(CONFIG_HID_ROCCAT_KONE) += hid-roccat-kone.o
obj-$(CONFIG_HID_ROCCAT_KONEPLUS) += hid-roccat-koneplus.o
obj-$(CONFIG_HID_ROCCAT_KOVAPLUS) += hid-roccat-kovaplus.o
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index af35384..af08ce7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -90,7 +90,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
struct hid_field *field;
if (report->maxfield == HID_MAX_FIELDS) {
- dbg_hid("too many fields in report\n");
+ hid_err(report->device, "too many fields in report\n");
return NULL;
}
@@ -121,7 +121,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
usage = parser->local.usage[0];
if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
- dbg_hid("collection stack overflow\n");
+ hid_err(parser->device, "collection stack overflow\n");
return -1;
}
@@ -129,7 +129,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
collection = kmalloc(sizeof(struct hid_collection) *
parser->device->collection_size * 2, GFP_KERNEL);
if (collection == NULL) {
- dbg_hid("failed to reallocate collection array\n");
+ hid_err(parser->device, "failed to reallocate collection array\n");
return -1;
}
memcpy(collection, parser->device->collection,
@@ -165,7 +165,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
static int close_collection(struct hid_parser *parser)
{
if (!parser->collection_stack_ptr) {
- dbg_hid("collection stack underflow\n");
+ hid_err(parser->device, "collection stack underflow\n");
return -1;
}
parser->collection_stack_ptr--;
@@ -197,7 +197,7 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
static int hid_add_usage(struct hid_parser *parser, unsigned usage)
{
if (parser->local.usage_index >= HID_MAX_USAGES) {
- dbg_hid("usage index exceeded\n");
+ hid_err(parser->device, "usage index exceeded\n");
return -1;
}
parser->local.usage[parser->local.usage_index] = usage;
@@ -222,12 +222,13 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
report = hid_register_report(parser->device, report_type, parser->global.report_id);
if (!report) {
- dbg_hid("hid_register_report failed\n");
+ hid_err(parser->device, "hid_register_report failed\n");
return -1;
}
if (parser->global.logical_maximum < parser->global.logical_minimum) {
- dbg_hid("logical range invalid %d %d\n", parser->global.logical_minimum, parser->global.logical_maximum);
+ hid_err(parser->device, "logical range invalid %d %d\n",
+ parser->global.logical_minimum, parser->global.logical_maximum);
return -1;
}
@@ -307,7 +308,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_PUSH:
if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
- dbg_hid("global environment stack overflow\n");
+ hid_err(parser->device, "global environment stack overflow\n");
return -1;
}
@@ -318,7 +319,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_POP:
if (!parser->global_stack_ptr) {
- dbg_hid("global environment stack underflow\n");
+ hid_err(parser->device, "global environment stack underflow\n");
return -1;
}
@@ -362,8 +363,8 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
parser->global.report_size = item_udata(item);
- if (parser->global.report_size > 32) {
- dbg_hid("invalid report_size %d\n",
+ if (parser->global.report_size > 96) {
+ hid_err(parser->device, "invalid report_size %d\n",
parser->global.report_size);
return -1;
}
@@ -372,7 +373,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
parser->global.report_count = item_udata(item);
if (parser->global.report_count > HID_MAX_USAGES) {
- dbg_hid("invalid report_count %d\n",
+ hid_err(parser->device, "invalid report_count %d\n",
parser->global.report_count);
return -1;
}
@@ -381,13 +382,13 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
case HID_GLOBAL_ITEM_TAG_REPORT_ID:
parser->global.report_id = item_udata(item);
if (parser->global.report_id == 0) {
- dbg_hid("report_id 0 is invalid\n");
+ hid_err(parser->device, "report_id 0 is invalid\n");
return -1;
}
return 0;
default:
- dbg_hid("unknown global tag 0x%x\n", item->tag);
+ hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
return -1;
}
}
@@ -414,14 +415,14 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
* items and the first delimiter set.
*/
if (parser->local.delimiter_depth != 0) {
- dbg_hid("nested delimiters\n");
+ hid_err(parser->device, "nested delimiters\n");
return -1;
}
parser->local.delimiter_depth++;
parser->local.delimiter_branch++;
} else {
if (parser->local.delimiter_depth < 1) {
- dbg_hid("bogus close delimiter\n");
+ hid_err(parser->device, "bogus close delimiter\n");
return -1;
}
parser->local.delimiter_depth--;
@@ -506,7 +507,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
break;
default:
- dbg_hid("unknown main item tag 0x%x\n", item->tag);
+ hid_err(parser->device, "unknown main item tag 0x%x\n", item->tag);
ret = 0;
}
@@ -678,12 +679,12 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
while ((start = fetch_item(start, end, &item)) != NULL) {
if (item.format != HID_ITEM_FORMAT_SHORT) {
- dbg_hid("unexpected long global item\n");
+ hid_err(device, "unexpected long global item\n");
goto err;
}
if (dispatch_type[item.type](parser, &item)) {
- dbg_hid("item %u %u %u %u parsing failed\n",
+ hid_err(device, "item %u %u %u %u parsing failed\n",
item.format, (unsigned)item.size,
(unsigned)item.type, (unsigned)item.tag);
goto err;
@@ -691,11 +692,11 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
if (start == end) {
if (parser->collection_stack_ptr) {
- dbg_hid("unbalanced collection at end of report description\n");
+ hid_err(device, "unbalanced collection at end of report description\n");
goto err;
}
if (parser->local.delimiter_depth) {
- dbg_hid("unbalanced delimiter at end of report description\n");
+ hid_err(device, "unbalanced delimiter at end of report description\n");
goto err;
}
vfree(parser);
@@ -703,7 +704,7 @@ int hid_parse_report(struct hid_device *device, __u8 *start,
}
}
- dbg_hid("item fetching failed at offset %d\n", (int)(end - start));
+ hid_err(device, "item fetching failed at offset %d\n", (int)(end - start));
err:
vfree(parser);
return ret;
@@ -873,7 +874,7 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field,
ret = hdrv->event(hid, field, usage, value);
if (ret != 0) {
if (ret < 0)
- dbg_hid("%s's event failed with %d\n",
+ hid_err(hid, "%s's event failed with %d\n",
hdrv->name, ret);
return;
}
@@ -995,12 +996,13 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
hid_dump_input(field->report->device, field->usage + offset, value);
if (offset >= field->report_count) {
- dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
+ hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
+ offset, field->report_count);
return -1;
}
if (field->logical_minimum < 0) {
if (value != snto32(s32ton(value, size), size)) {
- dbg_hid("value %d is out of range\n", value);
+ hid_err(field->report->device, "value %d is out of range\n", value);
return -1;
}
}
@@ -1157,7 +1159,7 @@ static bool hid_match_one_id(struct hid_device *hdev,
(id->product == HID_ANY_ID || id->product == hdev->product);
}
-static const struct hid_device_id *hid_match_id(struct hid_device *hdev,
+const struct hid_device_id *hid_match_id(struct hid_device *hdev,
const struct hid_device_id *id)
{
for (; id->bus; id++)
@@ -1404,11 +1406,13 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
- { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) },
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
@@ -1423,6 +1427,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HANVON, USB_DEVICE_ID_HANVON_MULTITOUCH) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT, USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM, USB_DEVICE_ID_IDEACOM_IDC6650) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ILITEK, USB_DEVICE_ID_ILITEK_MULTITOUCH) },
@@ -1498,11 +1503,15 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_PCI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONEPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KOVAPLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) },
@@ -1544,11 +1553,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XAT, USB_DEVICE_ID_XAT_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR1) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_SPX2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_MPX2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_XIROKU, USB_DEVICE_ID_XIROKU_CSR2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
@@ -1768,6 +1787,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EARTHMATE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELORME, USB_DEVICE_ID_DELORME_EM_LT20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, 0x000a) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index ee80d73..01dd9a7 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -114,6 +114,14 @@ static const struct hid_usage_entry hid_usage_table[] = {
{0, 0xbd, "FlareRelease"},
{0, 0xbe, "LandingGear"},
{0, 0xbf, "ToeBrake"},
+ { 6, 0, "GenericDeviceControls" },
+ {0, 0x20, "BatteryStrength" },
+ {0, 0x21, "WirelessChannel" },
+ {0, 0x22, "WirelessID" },
+ {0, 0x23, "DiscoverWirelessControl" },
+ {0, 0x24, "SecurityCodeCharacterEntered" },
+ {0, 0x25, "SecurityCodeCharactedErased" },
+ {0, 0x26, "SecurityCodeCleared" },
{ 7, 0, "Keyboard" },
{ 8, 0, "LED" },
{0, 0x01, "NumLock"},
diff --git a/drivers/hid/hid-emsff.c b/drivers/hid/hid-emsff.c
index 9bdde86..2630d48 100644
--- a/drivers/hid/hid-emsff.c
+++ b/drivers/hid/hid-emsff.c
@@ -140,7 +140,7 @@ err:
}
static const struct hid_device_id ems_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_EMS, 0x118) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
{ }
};
MODULE_DEVICE_TABLE(hid, ems_devices);
diff --git a/drivers/staging/hv/hv_mouse.c b/drivers/hid/hid-hyperv.c
index ccd39c7..4066324 100644
--- a/drivers/staging/hv/hv_mouse.c
+++ b/drivers/hid/hid-hyperv.c
@@ -14,11 +14,8 @@
*/
#include <linux/init.h>
#include <linux/module.h>
-#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-#include <linux/wait.h>
+#include <linux/completion.h>
#include <linux/input.h>
#include <linux/hid.h>
#include <linux/hiddev.h>
@@ -118,7 +115,6 @@ struct synthhid_input_report {
#define INPUTVSC_SEND_RING_BUFFER_SIZE (10*PAGE_SIZE)
#define INPUTVSC_RECV_RING_BUFFER_SIZE (10*PAGE_SIZE)
-#define NBITS(x) (((x)/BITS_PER_LONG)+1)
enum pipe_prot_msg_type {
PIPE_MESSAGE_INVALID,
@@ -148,7 +144,8 @@ struct mousevsc_prt_msg {
*/
struct mousevsc_dev {
struct hv_device *device;
- unsigned char init_complete;
+ bool init_complete;
+ bool connected;
struct mousevsc_prt_msg protocol_req;
struct mousevsc_prt_msg protocol_resp;
/* Synchronize the request/response if needed */
@@ -159,12 +156,11 @@ struct mousevsc_dev {
unsigned char *report_desc;
u32 report_desc_size;
struct hv_input_dev_info hid_dev_info;
- int connected;
struct hid_device *hid_device;
};
-static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
+static struct mousevsc_dev *mousevsc_alloc_device(struct hv_device *device)
{
struct mousevsc_dev *input_dev;
@@ -176,11 +172,12 @@ static struct mousevsc_dev *alloc_input_device(struct hv_device *device)
input_dev->device = device;
hv_set_drvdata(device, input_dev);
init_completion(&input_dev->wait_event);
+ input_dev->init_complete = false;
return input_dev;
}
-static void free_input_device(struct mousevsc_dev *device)
+static void mousevsc_free_device(struct mousevsc_dev *device)
{
kfree(device->hid_desc);
kfree(device->report_desc);
@@ -188,7 +185,6 @@ static void free_input_device(struct mousevsc_dev *device)
kfree(device);
}
-
static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
struct synthhid_device_info *device_info)
{
@@ -196,14 +192,12 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
struct hid_descriptor *desc;
struct mousevsc_prt_msg ack;
- /* Assume success for now */
- input_device->dev_info_status = 0;
-
- memcpy(&input_device->hid_dev_info, &device_info->hid_dev_info,
- sizeof(struct hv_input_dev_info));
+ input_device->dev_info_status = -ENOMEM;
+ input_device->hid_dev_info = device_info->hid_dev_info;
desc = &device_info->hid_descriptor;
- WARN_ON(desc->bLength == 0);
+ if (desc->bLength == 0)
+ goto cleanup;
input_device->hid_desc = kzalloc(desc->bLength, GFP_ATOMIC);
@@ -213,13 +207,18 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
memcpy(input_device->hid_desc, desc, desc->bLength);
input_device->report_desc_size = desc->desc[0].wDescriptorLength;
- if (input_device->report_desc_size == 0)
+ if (input_device->report_desc_size == 0) {
+ input_device->dev_info_status = -EINVAL;
goto cleanup;
+ }
+
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
- if (!input_device->report_desc)
+ if (!input_device->report_desc) {
+ input_device->dev_info_status = -ENOMEM;
goto cleanup;
+ }
memcpy(input_device->report_desc,
((unsigned char *)desc) + desc->bLength,
@@ -242,22 +241,14 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
(unsigned long)&ack,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
- goto cleanup;
-
- complete(&input_device->wait_event);
- return;
+ if (!ret)
+ input_device->dev_info_status = 0;
cleanup:
- kfree(input_device->hid_desc);
- input_device->hid_desc = NULL;
-
- kfree(input_device->report_desc);
- input_device->report_desc = NULL;
-
- input_device->dev_info_status = -1;
complete(&input_device->wait_event);
+
+ return;
}
static void mousevsc_on_receive(struct hv_device *device,
@@ -274,10 +265,22 @@ static void mousevsc_on_receive(struct hv_device *device,
if (pipe_msg->type != PIPE_MESSAGE_DATA)
return;
- hid_msg = (struct synthhid_msg *)&pipe_msg->data[0];
+ hid_msg = (struct synthhid_msg *)pipe_msg->data;
switch (hid_msg->header.type) {
case SYNTH_HID_PROTOCOL_RESPONSE:
+ /*
+ * While it will be impossible for us to protect against
+ * malicious/buggy hypervisor/host, add a check here to
+ * ensure we don't corrupt memory.
+ */
+ if ((pipe_msg->size + sizeof(struct pipe_prt_msg)
+ - sizeof(unsigned char))
+ > sizeof(struct mousevsc_prt_msg)) {
+ WARN_ON(1);
+ break;
+ }
+
memcpy(&input_dev->protocol_resp, pipe_msg,
pipe_msg->size + sizeof(struct pipe_prt_msg) -
sizeof(unsigned char));
@@ -292,11 +295,11 @@ static void mousevsc_on_receive(struct hv_device *device,
* hid desc and report desc
*/
mousevsc_on_receive_device_info(input_dev,
- (struct synthhid_device_info *)&pipe_msg->data[0]);
+ (struct synthhid_device_info *)pipe_msg->data);
break;
case SYNTH_HID_INPUT_REPORT:
input_report =
- (struct synthhid_input_report *)&pipe_msg->data[0];
+ (struct synthhid_input_report *)pipe_msg->data;
if (!input_dev->init_complete)
break;
hid_input_report(input_dev->hid_device,
@@ -313,73 +316,60 @@ static void mousevsc_on_receive(struct hv_device *device,
static void mousevsc_on_channel_callback(void *context)
{
- const int packetSize = 0x100;
- int ret = 0;
- struct hv_device *device = (struct hv_device *)context;
-
+ const int packet_size = 0x100;
+ int ret;
+ struct hv_device *device = context;
u32 bytes_recvd;
u64 req_id;
- unsigned char packet[0x100];
struct vmpacket_descriptor *desc;
- unsigned char *buffer = packet;
- int bufferlen = packetSize;
+ unsigned char *buffer;
+ int bufferlen = packet_size;
+ buffer = kmalloc(bufferlen, GFP_ATOMIC);
+ if (!buffer)
+ return;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer,
bufferlen, &bytes_recvd, &req_id);
- if (ret == 0) {
- if (bytes_recvd > 0) {
- desc = (struct vmpacket_descriptor *)buffer;
-
- switch (desc->type) {
- case VM_PKT_COMP:
- break;
-
- case VM_PKT_DATA_INBAND:
- mousevsc_on_receive(
- device, desc);
- break;
-
- default:
- pr_err("unhandled packet type %d, tid %llx len %d\n",
- desc->type,
- req_id,
- bytes_recvd);
- break;
- }
-
- /* reset */
- if (bufferlen > packetSize) {
- kfree(buffer);
-
- buffer = packet;
- bufferlen = packetSize;
- }
- } else {
- if (bufferlen > packetSize) {
- kfree(buffer);
-
- buffer = packet;
- bufferlen = packetSize;
- }
+ switch (ret) {
+ case 0:
+ if (bytes_recvd <= 0) {
+ kfree(buffer);
+ return;
+ }
+ desc = (struct vmpacket_descriptor *)buffer;
+
+ switch (desc->type) {
+ case VM_PKT_COMP:
+ break;
+
+ case VM_PKT_DATA_INBAND:
+ mousevsc_on_receive(device, desc);
+ break;
+
+ default:
+ pr_err("unhandled packet type %d, tid %llx len %d\n",
+ desc->type, req_id, bytes_recvd);
break;
}
- } else if (ret == -ENOBUFS) {
+
+ break;
+
+ case -ENOBUFS:
+ kfree(buffer);
/* Handle large packet */
bufferlen = bytes_recvd;
- buffer = kzalloc(bytes_recvd, GFP_ATOMIC);
+ buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
- if (buffer == NULL) {
- buffer = packet;
- bufferlen = packetSize;
- break;
- }
+ if (!buffer)
+ return;
+
+ break;
}
} while (1);
- return;
}
static int mousevsc_connect_to_vsp(struct hv_device *device)
@@ -390,19 +380,15 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
struct mousevsc_prt_msg *request;
struct mousevsc_prt_msg *response;
-
request = &input_dev->protocol_req;
-
memset(request, 0, sizeof(struct mousevsc_prt_msg));
request->type = PIPE_MESSAGE_DATA;
request->size = sizeof(struct synthhid_protocol_request);
-
request->request.header.type = SYNTH_HID_PROTOCOL_REQUEST;
request->request.header.size = sizeof(unsigned int);
request->request.version_requested.version = SYNTHHID_INPUT_VERSION;
-
ret = vmbus_sendpacket(device->channel, request,
sizeof(struct pipe_prt_msg) -
sizeof(unsigned char) +
@@ -410,11 +396,11 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
- if (ret != 0)
+ if (ret)
goto cleanup;
t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
- if (t == 0) {
+ if (!t) {
ret = -ETIMEDOUT;
goto cleanup;
}
@@ -422,14 +408,14 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
response = &input_dev->protocol_resp;
if (!response->response.approved) {
- pr_err("synthhid protocol request failed (version %d)",
+ pr_err("synthhid protocol request failed (version %d)\n",
SYNTHHID_INPUT_VERSION);
ret = -ENODEV;
goto cleanup;
}
t = wait_for_completion_timeout(&input_dev->wait_event, 5*HZ);
- if (t == 0) {
+ if (!t) {
ret = -ETIMEDOUT;
goto cleanup;
}
@@ -438,11 +424,9 @@ static int mousevsc_connect_to_vsp(struct hv_device *device)
* We should have gotten the device attr, hid desc and report
* desc at this point
*/
- if (input_dev->dev_info_status)
- ret = -ENOMEM;
+ ret = input_dev->dev_info_status;
cleanup:
-
return ret;
}
@@ -451,61 +435,40 @@ static int mousevsc_hid_open(struct hid_device *hid)
return 0;
}
+static int mousevsc_hid_start(struct hid_device *hid)
+{
+ return 0;
+}
+
static void mousevsc_hid_close(struct hid_device *hid)
{
}
+static void mousevsc_hid_stop(struct hid_device *hid)
+{
+}
+
static struct hid_ll_driver mousevsc_ll_driver = {
.open = mousevsc_hid_open,
.close = mousevsc_hid_close,
+ .start = mousevsc_hid_start,
+ .stop = mousevsc_hid_stop,
};
static struct hid_driver mousevsc_hid_driver;
-static void reportdesc_callback(struct hv_device *dev, void *packet, u32 len)
-{
- struct hid_device *hid_dev;
- struct mousevsc_dev *input_device = hv_get_drvdata(dev);
-
- hid_dev = hid_allocate_device();
- if (IS_ERR(hid_dev))
- return;
-
- hid_dev->ll_driver = &mousevsc_ll_driver;
- hid_dev->driver = &mousevsc_hid_driver;
-
- if (hid_parse_report(hid_dev, packet, len))
- return;
-
- hid_dev->bus = BUS_VIRTUAL;
- hid_dev->vendor = input_device->hid_dev_info.vendor;
- hid_dev->product = input_device->hid_dev_info.product;
- hid_dev->version = input_device->hid_dev_info.version;
-
- sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
-
- if (!hidinput_connect(hid_dev, 0)) {
- hid_dev->claimed |= HID_CLAIMED_INPUT;
-
- input_device->connected = 1;
-
- }
-
- input_device->hid_device = hid_dev;
-}
-
-static int mousevsc_on_device_add(struct hv_device *device)
+static int mousevsc_probe(struct hv_device *device,
+ const struct hv_vmbus_device_id *dev_id)
{
- int ret = 0;
+ int ret;
struct mousevsc_dev *input_dev;
+ struct hid_device *hid_dev;
- input_dev = alloc_input_device(device);
+ input_dev = mousevsc_alloc_device(device);
if (!input_dev)
return -ENOMEM;
- input_dev->init_complete = false;
-
ret = vmbus_open(device->channel,
INPUTVSC_SEND_RING_BUFFER_SIZE,
INPUTVSC_RECV_RING_BUFFER_SIZE,
@@ -515,54 +478,79 @@ static int mousevsc_on_device_add(struct hv_device *device)
device
);
- if (ret != 0) {
- free_input_device(input_dev);
- return ret;
- }
-
+ if (ret)
+ goto probe_err0;
ret = mousevsc_connect_to_vsp(device);
- if (ret != 0) {
- vmbus_close(device->channel);
- free_input_device(input_dev);
- return ret;
- }
-
+ if (ret)
+ goto probe_err1;
/* workaround SA-167 */
if (input_dev->report_desc[14] == 0x25)
input_dev->report_desc[14] = 0x29;
- reportdesc_callback(device, input_dev->report_desc,
- input_dev->report_desc_size);
+ hid_dev = hid_allocate_device();
+ if (IS_ERR(hid_dev)) {
+ ret = PTR_ERR(hid_dev);
+ goto probe_err1;
+ }
+
+ hid_dev->ll_driver = &mousevsc_ll_driver;
+ hid_dev->driver = &mousevsc_hid_driver;
+ hid_dev->bus = BUS_VIRTUAL;
+ hid_dev->vendor = input_dev->hid_dev_info.vendor;
+ hid_dev->product = input_dev->hid_dev_info.product;
+ hid_dev->version = input_dev->hid_dev_info.version;
+ input_dev->hid_device = hid_dev;
+
+ sprintf(hid_dev->name, "%s", "Microsoft Vmbus HID-compliant Mouse");
+
+ ret = hid_add_device(hid_dev);
+ if (ret)
+ goto probe_err1;
+
+ ret = hid_parse_report(hid_dev, input_dev->report_desc,
+ input_dev->report_desc_size);
+
+ if (ret) {
+ hid_err(hid_dev, "parse failed\n");
+ goto probe_err2;
+ }
+ ret = hid_hw_start(hid_dev, HID_CONNECT_HIDINPUT | HID_CONNECT_HIDDEV);
+
+ if (ret) {
+ hid_err(hid_dev, "hw start failed\n");
+ goto probe_err2;
+ }
+
+ input_dev->connected = true;
input_dev->init_complete = true;
return ret;
-}
-static int mousevsc_probe(struct hv_device *dev,
- const struct hv_vmbus_device_id *dev_id)
-{
+probe_err2:
+ hid_destroy_device(hid_dev);
- return mousevsc_on_device_add(dev);
+probe_err1:
+ vmbus_close(device->channel);
+probe_err0:
+ mousevsc_free_device(input_dev);
+
+ return ret;
}
+
static int mousevsc_remove(struct hv_device *dev)
{
struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
vmbus_close(dev->channel);
-
- if (input_dev->connected) {
- hidinput_disconnect(input_dev->hid_device);
- input_dev->connected = 0;
- hid_destroy_device(input_dev->hid_device);
- }
-
- free_input_device(input_dev);
+ hid_hw_stop(input_dev->hid_device);
+ hid_destroy_device(input_dev->hid_device);
+ mousevsc_free_device(input_dev);
return 0;
}
@@ -577,7 +565,7 @@ static const struct hv_vmbus_device_id id_table[] = {
MODULE_DEVICE_TABLE(vmbus, id_table);
static struct hv_driver mousevsc_drv = {
- .name = "mousevsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = mousevsc_probe,
.remove = mousevsc_remove,
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4a441a6..63552e3 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -21,6 +21,7 @@
#define USB_VENDOR_ID_3M 0x0596
#define USB_DEVICE_ID_3M1968 0x0500
#define USB_DEVICE_ID_3M2256 0x0502
+#define USB_DEVICE_ID_3M3266 0x0506
#define USB_VENDOR_ID_A4TECH 0x09da
#define USB_DEVICE_ID_A4TECH_WCP32PU 0x0006
@@ -58,6 +59,9 @@
#define USB_VENDOR_ID_AIRCABLE 0x16CA
#define USB_DEVICE_ID_AIRCABLE1 0x1502
+#define USB_VENDOR_ID_AIREN 0x1a2c
+#define USB_DEVICE_ID_AIREN_SLIMPLUS 0x0002
+
#define USB_VENDOR_ID_ALCOR 0x058f
#define USB_DEVICE_ID_ALCOR_USBRS232 0x9720
@@ -124,6 +128,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI 0x0239
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO 0x023a
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
@@ -145,6 +150,9 @@
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
+#define USB_VENDOR_ID_ATMEL 0x03eb
+#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
+
#define USB_VENDOR_ID_AVERMEDIA 0x07ca
#define USB_DEVICE_ID_AVER_FM_MR800 0xb800
@@ -230,11 +238,14 @@
#define USB_VENDOR_ID_DWAV 0x0eef
#define USB_DEVICE_ID_EGALAX_TOUCHCONTROLLER 0x0001
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH 0x480d
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1 0x720c
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2 0x72a1
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3 0x480e
-#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4 0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D 0x480d
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E 0x480e
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C 0x720c
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B 0x726b
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1 0x72a1
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302
+#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001
#define USB_VENDOR_ID_ELECOM 0x056e
#define USB_DEVICE_ID_ELECOM_BM084 0x0061
@@ -356,6 +367,9 @@
#define USB_VENDOR_ID_HANVON 0x20b3
#define USB_DEVICE_ID_HANVON_MULTITOUCH 0x0a18
+#define USB_VENDOR_ID_HANVON_ALT 0x22ed
+#define USB_DEVICE_ID_HANVON_ALT_MULTITOUCH 0x1010
+
#define USB_VENDOR_ID_HAPP 0x078b
#define USB_DEVICE_ID_UGCI_DRIVING 0x0010
#define USB_DEVICE_ID_UGCI_FLYING 0x0020
@@ -571,6 +585,11 @@
#define USB_VENDOR_ID_PI_ENGINEERING 0x05f3
#define USB_DEVICE_ID_PI_ENGINEERING_VEC_USB_FOOTPEDAL 0xff
+#define USB_VENDOR_ID_PIXART 0x093a
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN 0x8001
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1 0x8002
+#define USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2 0x8003
+
#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
@@ -581,11 +600,14 @@
#define USB_DEVICE_ID_PRODIGE_CORDLESS 0x3062
#define USB_VENDOR_ID_QUANTA 0x0408
-#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
+#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
+#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c
#define USB_DEVICE_ID_ROCCAT_KONE 0x2ced
#define USB_DEVICE_ID_ROCCAT_KONEPLUS 0x2d51
#define USB_DEVICE_ID_ROCCAT_KOVAPLUS 0x2d50
@@ -679,6 +701,7 @@
#define USB_VENDOR_ID_WACOM 0x056a
#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
+#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD
#define USB_VENDOR_ID_WALTOP 0x172f
#define USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH 0x0032
@@ -707,6 +730,17 @@
#define USB_VENDOR_ID_XAT 0x2505
#define USB_DEVICE_ID_XAT_CSR 0x0220
+#define USB_VENDOR_ID_XIROKU 0x1477
+#define USB_DEVICE_ID_XIROKU_SPX 0x1006
+#define USB_DEVICE_ID_XIROKU_MPX 0x1007
+#define USB_DEVICE_ID_XIROKU_CSR 0x100e
+#define USB_DEVICE_ID_XIROKU_SPX1 0x1021
+#define USB_DEVICE_ID_XIROKU_CSR1 0x1022
+#define USB_DEVICE_ID_XIROKU_MPX1 0x1023
+#define USB_DEVICE_ID_XIROKU_SPX2 0x1024
+#define USB_DEVICE_ID_XIROKU_CSR2 0x1025
+#define USB_DEVICE_ID_XIROKU_MPX2 0x1026
+
#define USB_VENDOR_ID_YEALINK 0x6993
#define USB_DEVICE_ID_YEALINK_P1K_P4K_B2K 0xb001
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index f333139..627850a 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -32,6 +32,8 @@
#include <linux/hid.h>
#include <linux/hid-debug.h>
+#include "hid-ids.h"
+
#define unk KEY_UNKNOWN
static const unsigned char hid_keyboard[256] = {
@@ -271,6 +273,161 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
return logical_extents / physical_extents;
}
+#ifdef CONFIG_HID_BATTERY_STRENGTH
+static enum power_supply_property hidinput_battery_props[] = {
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_MODEL_NAME,
+ POWER_SUPPLY_PROP_STATUS
+};
+
+#define HID_BATTERY_QUIRK_PERCENT (1 << 0) /* always reports percent */
+#define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */
+
+static const struct hid_device_id hid_battery_quirks[] = {
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ {}
+};
+
+static unsigned find_battery_quirk(struct hid_device *hdev)
+{
+ unsigned quirks = 0;
+ const struct hid_device_id *match;
+
+ match = hid_match_id(hdev, hid_battery_quirks);
+ if (match != NULL)
+ quirks = match->driver_data;
+
+ return quirks;
+}
+
+static int hidinput_get_battery_property(struct power_supply *psy,
+ enum power_supply_property prop,
+ union power_supply_propval *val)
+{
+ struct hid_device *dev = container_of(psy, struct hid_device, battery);
+ int ret = 0;
+ __u8 buf[2] = {};
+
+ switch (prop) {
+ case POWER_SUPPLY_PROP_PRESENT:
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval = 1;
+ break;
+
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = dev->hid_get_raw_report(dev, dev->battery_report_id,
+ buf, sizeof(buf),
+ dev->battery_report_type);
+
+ if (ret != 2) {
+ if (ret >= 0)
+ ret = -EINVAL;
+ break;
+ }
+
+ if (dev->battery_min < dev->battery_max &&
+ buf[1] >= dev->battery_min &&
+ buf[1] <= dev->battery_max)
+ val->intval = (100 * (buf[1] - dev->battery_min)) /
+ (dev->battery_max - dev->battery_min);
+ break;
+
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = dev->name;
+ break;
+
+ case POWER_SUPPLY_PROP_STATUS:
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field)
+{
+ struct power_supply *battery = &dev->battery;
+ int ret;
+ unsigned quirks;
+ s32 min, max;
+
+ if (field->usage->hid != HID_DC_BATTERYSTRENGTH)
+ return false; /* no match */
+
+ if (battery->name != NULL)
+ goto out; /* already initialized? */
+
+ battery->name = kasprintf(GFP_KERNEL, "hid-%s-battery", dev->uniq);
+ if (battery->name == NULL)
+ goto out;
+
+ battery->type = POWER_SUPPLY_TYPE_BATTERY;
+ battery->properties = hidinput_battery_props;
+ battery->num_properties = ARRAY_SIZE(hidinput_battery_props);
+ battery->use_for_apm = 0;
+ battery->get_property = hidinput_get_battery_property;
+
+ quirks = find_battery_quirk(dev);
+
+ hid_dbg(dev, "device %x:%x:%x %d quirks %d\n",
+ dev->bus, dev->vendor, dev->product, dev->version, quirks);
+
+ min = field->logical_minimum;
+ max = field->logical_maximum;
+
+ if (quirks & HID_BATTERY_QUIRK_PERCENT) {
+ min = 0;
+ max = 100;
+ }
+
+ if (quirks & HID_BATTERY_QUIRK_FEATURE)
+ report_type = HID_FEATURE_REPORT;
+
+ dev->battery_min = min;
+ dev->battery_max = max;
+ dev->battery_report_type = report_type;
+ dev->battery_report_id = field->report->id;
+
+ ret = power_supply_register(&dev->dev, battery);
+ if (ret != 0) {
+ hid_warn(dev, "can't register power supply: %d\n", ret);
+ kfree(battery->name);
+ battery->name = NULL;
+ }
+
+out:
+ return true;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+ if (!dev->battery.name)
+ return;
+
+ power_supply_unregister(&dev->battery);
+ kfree(dev->battery.name);
+ dev->battery.name = NULL;
+}
+#else /* !CONFIG_HID_BATTERY_STRENGTH */
+static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
+ struct hid_field *field)
+{
+ return false;
+}
+
+static void hidinput_cleanup_battery(struct hid_device *dev)
+{
+}
+#endif /* CONFIG_HID_BATTERY_STRENGTH */
+
static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field,
struct hid_usage *usage)
{
@@ -629,6 +786,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
}
break;
+ case HID_UP_GENDEVCTRLS:
+ if (hidinput_setup_battery(device, HID_INPUT_REPORT, field))
+ goto ignore;
+ else
+ goto unknown;
+ break;
+
case HID_UP_HPVENDOR: /* Reported on a Dutch layout HP5308 */
set_bit(EV_REP, input->evbit);
switch (usage->hid & HID_USAGE) {
@@ -822,6 +986,17 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
return;
}
+ /*
+ * Ignore out-of-range values as per HID specification,
+ * section 5.10 and 6.2.25
+ */
+ if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
+ (value < field->logical_minimum ||
+ value > field->logical_maximum)) {
+ dbg_hid("Ignoring out-of-range value %x\n", value);
+ return;
+ }
+
/* report the usage code as scancode if the key status has changed */
if (usage->type == EV_KEY && !!test_bit(usage->code, input->key) != value)
input_event(input, EV_MSC, MSC_SCAN, usage->hid);
@@ -861,6 +1036,48 @@ int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int
}
EXPORT_SYMBOL_GPL(hidinput_find_field);
+struct hid_field *hidinput_get_led_field(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct hid_field *field;
+ int i, j;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+ list) {
+ for (i = 0; i < report->maxfield; i++) {
+ field = report->field[i];
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].type == EV_LED)
+ return field;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(hidinput_get_led_field);
+
+unsigned int hidinput_count_leds(struct hid_device *hid)
+{
+ struct hid_report *report;
+ struct hid_field *field;
+ int i, j;
+ unsigned int count = 0;
+
+ list_for_each_entry(report,
+ &hid->report_enum[HID_OUTPUT_REPORT].report_list,
+ list) {
+ for (i = 0; i < report->maxfield; i++) {
+ field = report->field[i];
+ for (j = 0; j < field->maxusage; j++)
+ if (field->usage[j].type == EV_LED &&
+ field->value[j])
+ count += 1;
+ }
+ }
+ return count;
+}
+EXPORT_SYMBOL_GPL(hidinput_count_leds);
+
static int hidinput_open(struct input_dev *dev)
{
struct hid_device *hid = input_get_drvdata(dev);
@@ -882,15 +1099,17 @@ static void report_features(struct hid_device *hid)
struct hid_report *rep;
int i, j;
- if (!drv->feature_mapping)
- return;
-
rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
list_for_each_entry(rep, &rep_enum->report_list, list)
for (i = 0; i < rep->maxfield; i++)
- for (j = 0; j < rep->field[i]->maxusage; j++)
- drv->feature_mapping(hid, rep->field[i],
- rep->field[i]->usage + j);
+ for (j = 0; j < rep->field[i]->maxusage; j++) {
+ /* Verify if Battery Strength feature is available */
+ hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]);
+
+ if (drv->feature_mapping)
+ drv->feature_mapping(hid, rep->field[i],
+ rep->field[i]->usage + j);
+ }
}
/*
@@ -1010,6 +1229,8 @@ void hidinput_disconnect(struct hid_device *hid)
{
struct hid_input *hidinput, *next;
+ hidinput_cleanup_battery(hid);
+
list_for_each_entry_safe(hidinput, next, &hid->inputs, list) {
list_del(&hidinput->list);
input_unregister_device(hidinput->input);
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index 103f30d..6ecc9e2 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -430,7 +430,7 @@ int lg4ff_init(struct hid_device *hid)
}
/* Add the device to device_list */
- entry = (struct lg4ff_device_entry *)kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(struct lg4ff_device_entry), GFP_KERNEL);
if (!entry) {
hid_err(hid, "Cannot add device, insufficient memory.\n");
return -ENOMEM;
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f1c909f..24fc442 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -50,7 +50,6 @@ MODULE_LICENSE("GPL");
#define MT_QUIRK_ALWAYS_VALID (1 << 4)
#define MT_QUIRK_VALID_IS_INRANGE (1 << 5)
#define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6)
-#define MT_QUIRK_EGALAX_XYZ_FIXUP (1 << 7)
#define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8)
struct mt_slot {
@@ -60,9 +59,19 @@ struct mt_slot {
bool seen_in_this_frame;/* has this slot been updated */
};
+struct mt_class {
+ __s32 name; /* MT_CLS */
+ __s32 quirks;
+ __s32 sn_move; /* Signal/noise ratio for move events */
+ __s32 sn_width; /* Signal/noise ratio for width events */
+ __s32 sn_height; /* Signal/noise ratio for height events */
+ __s32 sn_pressure; /* Signal/noise ratio for pressure events */
+ __u8 maxcontacts;
+};
+
struct mt_device {
struct mt_slot curdata; /* placeholder of incoming data */
- struct mt_class *mtclass; /* our mt device class */
+ struct mt_class mtclass; /* our mt device class */
unsigned last_field_index; /* last field index of the report */
unsigned last_slot_field; /* the last field of a slot */
int last_mt_collection; /* last known mt-related collection */
@@ -74,30 +83,23 @@ struct mt_device {
struct mt_slot *slots;
};
-struct mt_class {
- __s32 name; /* MT_CLS */
- __s32 quirks;
- __s32 sn_move; /* Signal/noise ratio for move events */
- __s32 sn_width; /* Signal/noise ratio for width events */
- __s32 sn_height; /* Signal/noise ratio for height events */
- __s32 sn_pressure; /* Signal/noise ratio for pressure events */
- __u8 maxcontacts;
-};
-
/* classes of device behavior */
#define MT_CLS_DEFAULT 0x0001
#define MT_CLS_SERIAL 0x0002
#define MT_CLS_CONFIDENCE 0x0003
-#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0004
-#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0005
-#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0006
-#define MT_CLS_DUAL_NSMU_CONTACTID 0x0007
+#define MT_CLS_CONFIDENCE_CONTACT_ID 0x0004
+#define MT_CLS_CONFIDENCE_MINUS_ONE 0x0005
+#define MT_CLS_DUAL_INRANGE_CONTACTID 0x0006
+#define MT_CLS_DUAL_INRANGE_CONTACTNUMBER 0x0007
+#define MT_CLS_DUAL_NSMU_CONTACTID 0x0008
+#define MT_CLS_INRANGE_CONTACTNUMBER 0x0009
/* vendor specific classes */
#define MT_CLS_3M 0x0101
#define MT_CLS_CYPRESS 0x0102
#define MT_CLS_EGALAX 0x0103
+#define MT_CLS_EGALAX_SERIAL 0x0104
#define MT_DEFAULT_MAXCONTACT 10
@@ -133,13 +135,16 @@ static int find_slot_from_contactid(struct mt_device *td)
return -1;
}
-struct mt_class mt_classes[] = {
+static struct mt_class mt_classes[] = {
{ .name = MT_CLS_DEFAULT,
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP },
{ .name = MT_CLS_SERIAL,
.quirks = MT_QUIRK_ALWAYS_VALID},
{ .name = MT_CLS_CONFIDENCE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE },
+ { .name = MT_CLS_CONFIDENCE_CONTACT_ID,
+ .quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
+ MT_QUIRK_SLOT_IS_CONTACTID },
{ .name = MT_CLS_CONFIDENCE_MINUS_ONE,
.quirks = MT_QUIRK_VALID_IS_CONFIDENCE |
MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE },
@@ -155,6 +160,9 @@ struct mt_class mt_classes[] = {
.quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
MT_QUIRK_SLOT_IS_CONTACTID,
.maxcontacts = 2 },
+ { .name = MT_CLS_INRANGE_CONTACTNUMBER,
+ .quirks = MT_QUIRK_VALID_IS_INRANGE |
+ MT_QUIRK_SLOT_IS_CONTACTNUMBER },
/*
* vendor specific classes
@@ -171,9 +179,13 @@ struct mt_class mt_classes[] = {
.maxcontacts = 10 },
{ .name = MT_CLS_EGALAX,
.quirks = MT_QUIRK_SLOT_IS_CONTACTID |
- MT_QUIRK_VALID_IS_INRANGE |
- MT_QUIRK_EGALAX_XYZ_FIXUP,
- .maxcontacts = 2,
+ MT_QUIRK_VALID_IS_INRANGE,
+ .sn_move = 4096,
+ .sn_pressure = 32,
+ },
+ { .name = MT_CLS_EGALAX_SERIAL,
+ .quirks = MT_QUIRK_SLOT_IS_CONTACTID |
+ MT_QUIRK_ALWAYS_VALID,
.sn_move = 4096,
.sn_pressure = 32,
},
@@ -181,6 +193,44 @@ struct mt_class mt_classes[] = {
{ }
};
+static ssize_t mt_show_quirks(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct mt_device *td = hid_get_drvdata(hdev);
+
+ return sprintf(buf, "%u\n", td->mtclass.quirks);
+}
+
+static ssize_t mt_set_quirks(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct hid_device *hdev = container_of(dev, struct hid_device, dev);
+ struct mt_device *td = hid_get_drvdata(hdev);
+
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val))
+ return -EINVAL;
+
+ td->mtclass.quirks = val;
+
+ return count;
+}
+
+static DEVICE_ATTR(quirks, S_IWUSR | S_IRUGO, mt_show_quirks, mt_set_quirks);
+
+static struct attribute *sysfs_attrs[] = {
+ &dev_attr_quirks.attr,
+ NULL
+};
+
+static struct attribute_group mt_attribute_group = {
+ .attrs = sysfs_attrs
+};
+
static void mt_feature_mapping(struct hid_device *hdev,
struct hid_field *field, struct hid_usage *usage)
{
@@ -192,9 +242,9 @@ static void mt_feature_mapping(struct hid_device *hdev,
break;
case HID_DG_CONTACTMAX:
td->maxcontacts = field->value[0];
- if (td->mtclass->maxcontacts)
+ if (td->mtclass.maxcontacts)
/* check if the maxcontacts is given by the class */
- td->maxcontacts = td->mtclass->maxcontacts;
+ td->maxcontacts = td->mtclass.maxcontacts;
break;
}
@@ -214,8 +264,7 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
struct mt_device *td = hid_get_drvdata(hdev);
- struct mt_class *cls = td->mtclass;
- __s32 quirks = cls->quirks;
+ struct mt_class *cls = &td->mtclass;
/* Only map fields from TouchScreen or TouchPad collections.
* We need to ignore fields that belong to other collections
@@ -227,13 +276,17 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
else
return 0;
+ /* eGalax devices provide a Digitizer.Stylus input which overrides
+ * the correct Digitizers.Finger X/Y ranges.
+ * Let's just ignore this input. */
+ if (field->physical == HID_DG_STYLUS)
+ return -1;
+
switch (usage->hid & HID_USAGE_PAGE) {
case HID_UP_GENDESK:
switch (usage->hid) {
case HID_GD_X:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_X);
set_abs(hi->input, ABS_MT_POSITION_X, field,
@@ -246,8 +299,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
return 1;
case HID_GD_Y:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_maximum = 32760;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_POSITION_Y);
set_abs(hi->input, ABS_MT_POSITION_Y, field,
@@ -315,8 +366,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
}
return 1;
case HID_DG_TIPPRESSURE:
- if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
- field->logical_minimum = 0;
hid_map_usage(hi, usage, bit, max,
EV_ABS, ABS_MT_PRESSURE);
set_abs(hi->input, ABS_MT_PRESSURE, field,
@@ -363,7 +412,7 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi,
static int mt_compute_slot(struct mt_device *td)
{
- __s32 quirks = td->mtclass->quirks;
+ __s32 quirks = td->mtclass.quirks;
if (quirks & MT_QUIRK_SLOT_IS_CONTACTID)
return td->curdata.contactid;
@@ -407,7 +456,7 @@ static void mt_emit_event(struct mt_device *td, struct input_dev *input)
for (i = 0; i < td->maxcontacts; ++i) {
struct mt_slot *s = &(td->slots[i]);
- if ((td->mtclass->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
+ if ((td->mtclass.quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) &&
!s->seen_in_this_frame) {
s->touch_state = false;
}
@@ -444,7 +493,7 @@ static int mt_event(struct hid_device *hid, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct mt_device *td = hid_get_drvdata(hid);
- __s32 quirks = td->mtclass->quirks;
+ __s32 quirks = td->mtclass.quirks;
if (hid->claimed & HID_CLAIMED_INPUT && td->slots) {
switch (usage->hid) {
@@ -552,7 +601,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
dev_err(&hdev->dev, "cannot allocate multitouch data\n");
return -ENOMEM;
}
- td->mtclass = mtclass;
+ td->mtclass = *mtclass;
td->inputmode = -1;
td->last_mt_collection = -1;
hid_set_drvdata(hdev, td);
@@ -574,6 +623,8 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto fail;
}
+ ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group);
+
mt_set_input_mode(hdev);
return 0;
@@ -594,6 +645,7 @@ static int mt_reset_resume(struct hid_device *hdev)
static void mt_remove(struct hid_device *hdev)
{
struct mt_device *td = hid_get_drvdata(hdev);
+ sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group);
hid_hw_stop(hdev);
kfree(td->slots);
kfree(td);
@@ -609,12 +661,20 @@ static const struct hid_device_id mt_devices[] = {
{ .driver_data = MT_CLS_3M,
HID_USB_DEVICE(USB_VENDOR_ID_3M,
USB_DEVICE_ID_3M2256) },
+ { .driver_data = MT_CLS_3M,
+ HID_USB_DEVICE(USB_VENDOR_ID_3M,
+ USB_DEVICE_ID_3M3266) },
/* ActionStar panels */
{ .driver_data = MT_CLS_DEFAULT,
HID_USB_DEVICE(USB_VENDOR_ID_ACTIONSTAR,
USB_DEVICE_ID_ACTIONSTAR_1011) },
+ /* Atmel panels */
+ { .driver_data = MT_CLS_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_ATMEL,
+ USB_DEVICE_ID_ATMEL_MULTITOUCH) },
+
/* Cando panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER,
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
@@ -645,23 +705,32 @@ static const struct hid_device_id mt_devices[] = {
USB_DEVICE_ID_CYPRESS_TRUETOUCH) },
/* eGalax devices (resistive) */
- { .driver_data = MT_CLS_EGALAX,
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480D) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH3) },
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_480E) },
/* eGalax devices (capacitive) */
- { .driver_data = MT_CLS_EGALAX,
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH1) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_720C) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH2) },
- { .driver_data = MT_CLS_EGALAX,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_726B) },
+ { .driver_data = MT_CLS_EGALAX,
HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
- USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH4) },
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72A1) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA) },
+ { .driver_data = MT_CLS_EGALAX,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302) },
+ { .driver_data = MT_CLS_EGALAX_SERIAL,
+ HID_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) },
/* Elo TouchSystems IntelliTouch Plus panel */
{ .driver_data = MT_CLS_DUAL_NSMU_CONTACTID,
@@ -678,6 +747,11 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_GOODTOUCH,
USB_DEVICE_ID_GOODTOUCH_000f) },
+ /* Hanvon panels */
+ { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
+ HID_USB_DEVICE(USB_VENDOR_ID_HANVON_ALT,
+ USB_DEVICE_ID_HANVON_ALT_MULTITOUCH) },
+
/* Ideacom panel */
{ .driver_data = MT_CLS_SERIAL,
HID_USB_DEVICE(USB_VENDOR_ID_IDEACOM,
@@ -722,6 +796,17 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT,
USB_DEVICE_ID_PENMOUNT_PCI) },
+ /* PixArt optical touch screen */
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN) },
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1) },
+ { .driver_data = MT_CLS_INRANGE_CONTACTNUMBER,
+ HID_USB_DEVICE(USB_VENDOR_ID_PIXART,
+ USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2) },
+
/* PixCir-based panels */
{ .driver_data = MT_CLS_DUAL_INRANGE_CONTACTID,
HID_USB_DEVICE(USB_VENDOR_ID_HANVON,
@@ -730,6 +815,17 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
USB_DEVICE_ID_CANDO_PIXCIR_MULTI_TOUCH) },
+ /* Quanta-based panels */
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001) },
+ { .driver_data = MT_CLS_CONFIDENCE_CONTACT_ID,
+ HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
+ USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008) },
+
/* Stantum panels */
{ .driver_data = MT_CLS_CONFIDENCE,
HID_USB_DEVICE(USB_VENDOR_ID_STANTUM,
@@ -758,6 +854,35 @@ static const struct hid_device_id mt_devices[] = {
HID_USB_DEVICE(USB_VENDOR_ID_XAT,
USB_DEVICE_ID_XAT_CSR) },
+ /* Xiroku */
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR1) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_SPX2) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_MPX2) },
+ { .driver_data = MT_CLS_DEFAULT,
+ HID_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR2) },
+
{ }
};
MODULE_DEVICE_TABLE(hid, mt_devices);
diff --git a/drivers/hid/hid-picolcd.c b/drivers/hid/hid-picolcd.c
index 01e7d2c..12f9777 100644
--- a/drivers/hid/hid-picolcd.c
+++ b/drivers/hid/hid-picolcd.c
@@ -633,7 +633,7 @@ struct picolcd_fb_cleanup_item {
struct picolcd_fb_cleanup_item *next;
};
static struct picolcd_fb_cleanup_item *fb_pending;
-DEFINE_SPINLOCK(fb_pending_lock);
+static DEFINE_SPINLOCK(fb_pending_lock);
static void picolcd_fb_do_cleanup(struct work_struct *data)
{
@@ -658,7 +658,7 @@ static void picolcd_fb_do_cleanup(struct work_struct *data)
} while (item);
}
-DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
+static DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup);
static int picolcd_fb_open(struct fb_info *info, int u)
{
diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c
index 070f93a..47ed74c 100644
--- a/drivers/hid/hid-pl.c
+++ b/drivers/hid/hid-pl.c
@@ -9,10 +9,10 @@
* - contains two reports, one for each port (HID_QUIRK_MULTI_INPUT)
*
* 0e8f:0003 "GreenAsia Inc. USB Joystick "
- * - tested with Knig Gaming gamepad
+ * - tested with König Gaming gamepad
*
* 0e8f:0003 "GASIA USB Gamepad"
- * - another version of the Knig gamepad
+ * - another version of the König gamepad
*
* Copyright (c) 2007, 2009 Anssi Hannula <anssi.hannula@gmail.com>
*/
diff --git a/drivers/hid/hid-prodikeys.c b/drivers/hid/hid-prodikeys.c
index f779009..b71b77a 100644
--- a/drivers/hid/hid-prodikeys.c
+++ b/drivers/hid/hid-prodikeys.c
@@ -90,7 +90,7 @@ static const char longname[] = "Prodikeys PC-MIDI Keyboard";
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
module_param_array(id, charp, NULL, 0444);
diff --git a/drivers/hid/hid-quanta.c b/drivers/hid/hid-quanta.c
deleted file mode 100644
index 87a54df..0000000
--- a/drivers/hid/hid-quanta.c
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * HID driver for Quanta Optical Touch dual-touch panels
- *
- * Copyright (c) 2009-2010 Stephane Chatty <chatty@enac.fr>
- *
- */
-
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
-#include <linux/device.h>
-#include <linux/hid.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-
-MODULE_AUTHOR("Stephane Chatty <chatty@enac.fr>");
-MODULE_DESCRIPTION("Quanta dual-touch panel");
-MODULE_LICENSE("GPL");
-
-#include "hid-ids.h"
-
-struct quanta_data {
- __u16 x, y;
- __u8 id;
- bool valid; /* valid finger data, or just placeholder? */
- bool first; /* is this the first finger in this frame? */
- bool activity_now; /* at least one active finger in this frame? */
- bool activity; /* at least one active finger previously? */
-};
-
-static int quanta_input_mapping(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- switch (usage->hid & HID_USAGE_PAGE) {
-
- case HID_UP_GENDESK:
- switch (usage->hid) {
- case HID_GD_X:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_X);
- /* touchscreen emulation */
- input_set_abs_params(hi->input, ABS_X,
- field->logical_minimum,
- field->logical_maximum, 0, 0);
- return 1;
- case HID_GD_Y:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_POSITION_Y);
- /* touchscreen emulation */
- input_set_abs_params(hi->input, ABS_Y,
- field->logical_minimum,
- field->logical_maximum, 0, 0);
- return 1;
- }
- return 0;
-
- case HID_UP_DIGITIZER:
- switch (usage->hid) {
- case HID_DG_CONFIDENCE:
- case HID_DG_TIPSWITCH:
- case HID_DG_INPUTMODE:
- case HID_DG_DEVICEINDEX:
- case HID_DG_CONTACTCOUNT:
- case HID_DG_CONTACTMAX:
- case HID_DG_TIPPRESSURE:
- case HID_DG_WIDTH:
- case HID_DG_HEIGHT:
- return -1;
- case HID_DG_INRANGE:
- /* touchscreen emulation */
- hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
- return 1;
- case HID_DG_CONTACTID:
- hid_map_usage(hi, usage, bit, max,
- EV_ABS, ABS_MT_TRACKING_ID);
- return 1;
- }
- return 0;
-
- case 0xff000000:
- /* ignore vendor-specific features */
- return -1;
- }
-
- return 0;
-}
-
-static int quanta_input_mapped(struct hid_device *hdev, struct hid_input *hi,
- struct hid_field *field, struct hid_usage *usage,
- unsigned long **bit, int *max)
-{
- if (usage->type == EV_KEY || usage->type == EV_ABS)
- clear_bit(usage->code, *bit);
-
- return 0;
-}
-
-/*
- * this function is called when a whole finger has been parsed,
- * so that it can decide what to send to the input layer.
- */
-static void quanta_filter_event(struct quanta_data *td, struct input_dev *input)
-{
-
- td->first = !td->first; /* touchscreen emulation */
-
- if (!td->valid) {
- /*
- * touchscreen emulation: if no finger in this frame is valid
- * and there previously was finger activity, this is a release
- */
- if (!td->first && !td->activity_now && td->activity) {
- input_event(input, EV_KEY, BTN_TOUCH, 0);
- td->activity = false;
- }
- return;
- }
-
- input_event(input, EV_ABS, ABS_MT_TRACKING_ID, td->id);
- input_event(input, EV_ABS, ABS_MT_POSITION_X, td->x);
- input_event(input, EV_ABS, ABS_MT_POSITION_Y, td->y);
-
- input_mt_sync(input);
- td->valid = false;
-
- /* touchscreen emulation: if first active finger in this frame... */
- if (!td->activity_now) {
- /* if there was no previous activity, emit touch event */
- if (!td->activity) {
- input_event(input, EV_KEY, BTN_TOUCH, 1);
- td->activity = true;
- }
- td->activity_now = true;
- /* and in any case this is our preferred finger */
- input_event(input, EV_ABS, ABS_X, td->x);
- input_event(input, EV_ABS, ABS_Y, td->y);
- }
-}
-
-
-static int quanta_event(struct hid_device *hid, struct hid_field *field,
- struct hid_usage *usage, __s32 value)
-{
- struct quanta_data *td = hid_get_drvdata(hid);
-
- if (hid->claimed & HID_CLAIMED_INPUT) {
- struct input_dev *input = field->hidinput->input;
-
- switch (usage->hid) {
- case HID_DG_INRANGE:
- td->valid = !!value;
- break;
- case HID_GD_X:
- td->x = value;
- break;
- case HID_GD_Y:
- td->y = value;
- quanta_filter_event(td, input);
- break;
- case HID_DG_CONTACTID:
- td->id = value;
- break;
- case HID_DG_CONTACTCOUNT:
- /* touch emulation: this is the last field in a frame */
- td->first = false;
- td->activity_now = false;
- break;
- case HID_DG_CONFIDENCE:
- case HID_DG_TIPSWITCH:
- /* avoid interference from generic hidinput handling */
- break;
-
- default:
- /* fallback to the generic hidinput handling */
- return 0;
- }
- }
-
- /* we have handled the hidinput part, now remains hiddev */
- if (hid->claimed & HID_CLAIMED_HIDDEV && hid->hiddev_hid_event)
- hid->hiddev_hid_event(hid, field, usage, value);
-
- return 1;
-}
-
-static int quanta_probe(struct hid_device *hdev, const struct hid_device_id *id)
-{
- int ret;
- struct quanta_data *td;
-
- td = kmalloc(sizeof(struct quanta_data), GFP_KERNEL);
- if (!td) {
- hid_err(hdev, "cannot allocate Quanta Touch data\n");
- return -ENOMEM;
- }
- td->valid = false;
- td->activity = false;
- td->activity_now = false;
- td->first = false;
- hid_set_drvdata(hdev, td);
-
- ret = hid_parse(hdev);
- if (!ret)
- ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
-
- if (ret)
- kfree(td);
-
- return ret;
-}
-
-static void quanta_remove(struct hid_device *hdev)
-{
- hid_hw_stop(hdev);
- kfree(hid_get_drvdata(hdev));
- hid_set_drvdata(hdev, NULL);
-}
-
-static const struct hid_device_id quanta_devices[] = {
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH) },
- { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA,
- USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) },
- { }
-};
-MODULE_DEVICE_TABLE(hid, quanta_devices);
-
-static const struct hid_usage_id quanta_grabbed_usages[] = {
- { HID_ANY_ID, HID_ANY_ID, HID_ANY_ID },
- { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1}
-};
-
-static struct hid_driver quanta_driver = {
- .name = "quanta-touch",
- .id_table = quanta_devices,
- .probe = quanta_probe,
- .remove = quanta_remove,
- .input_mapping = quanta_input_mapping,
- .input_mapped = quanta_input_mapped,
- .usage_table = quanta_grabbed_usages,
- .event = quanta_event,
-};
-
-static int __init quanta_init(void)
-{
- return hid_register_driver(&quanta_driver);
-}
-
-static void __exit quanta_exit(void)
-{
- hid_unregister_driver(&quanta_driver);
-}
-
-module_init(quanta_init);
-module_exit(quanta_exit);
-
diff --git a/drivers/hid/hid-roccat-common.c b/drivers/hid/hid-roccat-common.c
index b07e7f9..a6d9399 100644
--- a/drivers/hid/hid-roccat-common.c
+++ b/drivers/hid/hid-roccat-common.c
@@ -49,12 +49,10 @@ int roccat_common_send(struct usb_device *usb_dev, uint report_id,
char *buf;
int len;
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, data, size);
-
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
diff --git a/drivers/hid/hid-roccat-isku.c b/drivers/hid/hid-roccat-isku.c
new file mode 100644
index 0000000..0e4a0ab
--- /dev/null
+++ b/drivers/hid/hid-roccat-isku.c
@@ -0,0 +1,487 @@
+/*
+ * Roccat Isku driver for Linux
+ *
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+/*
+ * Roccat Isku is a gamer keyboard with macro keys that can be configured in
+ * 5 profiles.
+ */
+
+#include <linux/device.h>
+#include <linux/input.h>
+#include <linux/hid.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/hid-roccat.h>
+#include "hid-ids.h"
+#include "hid-roccat-common.h"
+#include "hid-roccat-isku.h"
+
+static struct class *isku_class;
+
+static void isku_profile_activated(struct isku_device *isku, uint new_profile)
+{
+ isku->actual_profile = new_profile;
+}
+
+static int isku_receive(struct usb_device *usb_dev, uint command,
+ void *buf, uint size)
+{
+ return roccat_common_receive(usb_dev, command, buf, size);
+}
+
+static int isku_receive_control_status(struct usb_device *usb_dev)
+{
+ int retval;
+ struct isku_control control;
+
+ do {
+ msleep(50);
+ retval = isku_receive(usb_dev, ISKU_COMMAND_CONTROL,
+ &control, sizeof(struct isku_control));
+
+ if (retval)
+ return retval;
+
+ switch (control.value) {
+ case ISKU_CONTROL_VALUE_STATUS_OK:
+ return 0;
+ case ISKU_CONTROL_VALUE_STATUS_WAIT:
+ continue;
+ case ISKU_CONTROL_VALUE_STATUS_INVALID:
+ /* seems to be critical - replug necessary */
+ case ISKU_CONTROL_VALUE_STATUS_OVERLOAD:
+ return -EINVAL;
+ default:
+ hid_err(usb_dev, "isku_receive_control_status: "
+ "unknown response value 0x%x\n",
+ control.value);
+ return -EINVAL;
+ }
+
+ } while (1);
+}
+
+static int isku_send(struct usb_device *usb_dev, uint command,
+ void const *buf, uint size)
+{
+ int retval;
+
+ retval = roccat_common_send(usb_dev, command, buf, size);
+ if (retval)
+ return retval;
+
+ return isku_receive_control_status(usb_dev);
+}
+
+static int isku_get_actual_profile(struct usb_device *usb_dev)
+{
+ struct isku_actual_profile buf;
+ int retval;
+
+ retval = isku_receive(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE,
+ &buf, sizeof(struct isku_actual_profile));
+ return retval ? retval : buf.actual_profile;
+}
+
+static int isku_set_actual_profile(struct usb_device *usb_dev, int new_profile)
+{
+ struct isku_actual_profile buf;
+
+ buf.command = ISKU_COMMAND_ACTUAL_PROFILE;
+ buf.size = sizeof(struct isku_actual_profile);
+ buf.actual_profile = new_profile;
+ return isku_send(usb_dev, ISKU_COMMAND_ACTUAL_PROFILE, &buf,
+ sizeof(struct isku_actual_profile));
+}
+
+static ssize_t isku_sysfs_show_actual_profile(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct isku_device *isku =
+ hid_get_drvdata(dev_get_drvdata(dev->parent->parent));
+ return snprintf(buf, PAGE_SIZE, "%d\n", isku->actual_profile);
+}
+
+static ssize_t isku_sysfs_set_actual_profile(struct device *dev,
+ struct device_attribute *attr, char const *buf, size_t size)
+{
+ struct isku_device *isku;
+ struct usb_device *usb_dev;
+ unsigned long profile;
+ int retval;
+ struct isku_roccat_report roccat_report;
+
+ dev = dev->parent->parent;
+ isku = hid_get_drvdata(dev_get_drvdata(dev));
+ usb_dev = interface_to_usbdev(to_usb_interface(dev));
+
+ retval = strict_strtoul(buf, 10, &profile);
+ if (retval)
+ return retval;
+
+ if (profile > 4)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+
+ retval = isku_set_actual_profile(usb_dev, profile);
+ if (retval) {
+ mutex_unlock(&isku->isku_lock);
+ return retval;
+ }
+
+ isku_profile_activated(isku, profile);
+
+ roccat_report.event = ISKU_REPORT_BUTTON_EVENT_PROFILE;
+ roccat_report.data1 = profile + 1;
+ roccat_report.data2 = 0;
+ roccat_report.profile = profile + 1;
+ roccat_report_event(isku->chrdev_minor, (uint8_t const *)&roccat_report);
+
+ mutex_unlock(&isku->isku_lock);
+
+ return size;
+}
+
+static struct device_attribute isku_attributes[] = {
+ __ATTR(actual_profile, 0660,
+ isku_sysfs_show_actual_profile,
+ isku_sysfs_set_actual_profile),
+ __ATTR_NULL
+};
+
+static ssize_t isku_sysfs_read(struct file *fp, struct kobject *kobj,
+ char *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off >= real_size)
+ return 0;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+ retval = isku_receive(usb_dev, command, buf, real_size);
+ mutex_unlock(&isku->isku_lock);
+
+ return retval ? retval : real_size;
+}
+
+static ssize_t isku_sysfs_write(struct file *fp, struct kobject *kobj,
+ void const *buf, loff_t off, size_t count,
+ size_t real_size, uint command)
+{
+ struct device *dev =
+ container_of(kobj, struct device, kobj)->parent->parent;
+ struct isku_device *isku = hid_get_drvdata(dev_get_drvdata(dev));
+ struct usb_device *usb_dev = interface_to_usbdev(to_usb_interface(dev));
+ int retval;
+
+ if (off != 0 || count != real_size)
+ return -EINVAL;
+
+ mutex_lock(&isku->isku_lock);
+ retval = isku_send(usb_dev, command, (void *)buf, real_size);
+ mutex_unlock(&isku->isku_lock);
+
+ return retval ? retval : real_size;
+}
+
+#define ISKU_SYSFS_W(thingy, THINGY) \
+static ssize_t isku_sysfs_write_ ## thingy(struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return isku_sysfs_write(fp, kobj, buf, off, count, \
+ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_R(thingy, THINGY) \
+static ssize_t isku_sysfs_read_ ## thingy(struct file *fp, struct kobject *kobj, \
+ struct bin_attribute *attr, char *buf, \
+ loff_t off, size_t count) \
+{ \
+ return isku_sysfs_read(fp, kobj, buf, off, count, \
+ sizeof(struct isku_ ## thingy), ISKU_COMMAND_ ## THINGY); \
+}
+
+#define ISKU_SYSFS_RW(thingy, THINGY) \
+ISKU_SYSFS_R(thingy, THINGY) \
+ISKU_SYSFS_W(thingy, THINGY)
+
+#define ISKU_BIN_ATTR_RW(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0660 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .read = isku_sysfs_read_ ## thingy, \
+ .write = isku_sysfs_write_ ## thingy \
+}
+
+#define ISKU_BIN_ATTR_R(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0440 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .read = isku_sysfs_read_ ## thingy, \
+}
+
+#define ISKU_BIN_ATTR_W(thingy) \
+{ \
+ .attr = { .name = #thingy, .mode = 0220 }, \
+ .size = sizeof(struct isku_ ## thingy), \
+ .write = isku_sysfs_write_ ## thingy \
+}
+
+ISKU_SYSFS_RW(macro, MACRO)
+ISKU_SYSFS_RW(keys_function, KEYS_FUNCTION)
+ISKU_SYSFS_RW(keys_easyzone, KEYS_EASYZONE)
+ISKU_SYSFS_RW(keys_media, KEYS_MEDIA)
+ISKU_SYSFS_RW(keys_thumbster, KEYS_THUMBSTER)
+ISKU_SYSFS_RW(keys_macro, KEYS_MACRO)
+ISKU_SYSFS_RW(keys_capslock, KEYS_CAPSLOCK)
+ISKU_SYSFS_RW(light, LIGHT)
+ISKU_SYSFS_RW(key_mask, KEY_MASK)
+ISKU_SYSFS_RW(last_set, LAST_SET)
+ISKU_SYSFS_W(talk, TALK)
+ISKU_SYSFS_R(info, INFO)
+ISKU_SYSFS_W(control, CONTROL)
+
+static struct bin_attribute isku_bin_attributes[] = {
+ ISKU_BIN_ATTR_RW(macro),
+ ISKU_BIN_ATTR_RW(keys_function),
+ ISKU_BIN_ATTR_RW(keys_easyzone),
+ ISKU_BIN_ATTR_RW(keys_media),
+ ISKU_BIN_ATTR_RW(keys_thumbster),
+ ISKU_BIN_ATTR_RW(keys_macro),
+ ISKU_BIN_ATTR_RW(keys_capslock),
+ ISKU_BIN_ATTR_RW(light),
+ ISKU_BIN_ATTR_RW(key_mask),
+ ISKU_BIN_ATTR_RW(last_set),
+ ISKU_BIN_ATTR_W(talk),
+ ISKU_BIN_ATTR_R(info),
+ ISKU_BIN_ATTR_W(control),
+ __ATTR_NULL
+};
+
+static int isku_init_isku_device_struct(struct usb_device *usb_dev,
+ struct isku_device *isku)
+{
+ int retval;
+
+ mutex_init(&isku->isku_lock);
+
+ retval = isku_get_actual_profile(usb_dev);
+ if (retval < 0)
+ return retval;
+ isku_profile_activated(isku, retval);
+
+ return 0;
+}
+
+static int isku_init_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct usb_device *usb_dev = interface_to_usbdev(intf);
+ struct isku_device *isku;
+ int retval;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL) {
+ hid_set_drvdata(hdev, NULL);
+ return 0;
+ }
+
+ isku = kzalloc(sizeof(*isku), GFP_KERNEL);
+ if (!isku) {
+ hid_err(hdev, "can't alloc device descriptor\n");
+ return -ENOMEM;
+ }
+ hid_set_drvdata(hdev, isku);
+
+ retval = isku_init_isku_device_struct(usb_dev, isku);
+ if (retval) {
+ hid_err(hdev, "couldn't init struct isku_device\n");
+ goto exit_free;
+ }
+
+ retval = roccat_connect(isku_class, hdev,
+ sizeof(struct isku_roccat_report));
+ if (retval < 0) {
+ hid_err(hdev, "couldn't init char dev\n");
+ } else {
+ isku->chrdev_minor = retval;
+ isku->roccat_claimed = 1;
+ }
+
+ return 0;
+exit_free:
+ kfree(isku);
+ return retval;
+}
+
+static void isku_remove_specials(struct hid_device *hdev)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct isku_device *isku;
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL)
+ return;
+
+ isku = hid_get_drvdata(hdev);
+ if (isku->roccat_claimed)
+ roccat_disconnect(isku->chrdev_minor);
+ kfree(isku);
+}
+
+static int isku_probe(struct hid_device *hdev,
+ const struct hid_device_id *id)
+{
+ int retval;
+
+ retval = hid_parse(hdev);
+ if (retval) {
+ hid_err(hdev, "parse failed\n");
+ goto exit;
+ }
+
+ retval = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+ if (retval) {
+ hid_err(hdev, "hw start failed\n");
+ goto exit;
+ }
+
+ retval = isku_init_specials(hdev);
+ if (retval) {
+ hid_err(hdev, "couldn't install keyboard\n");
+ goto exit_stop;
+ }
+
+ return 0;
+
+exit_stop:
+ hid_hw_stop(hdev);
+exit:
+ return retval;
+}
+
+static void isku_remove(struct hid_device *hdev)
+{
+ isku_remove_specials(hdev);
+ hid_hw_stop(hdev);
+}
+
+static void isku_keep_values_up_to_date(struct isku_device *isku,
+ u8 const *data)
+{
+ struct isku_report_button const *button_report;
+
+ switch (data[0]) {
+ case ISKU_REPORT_NUMBER_BUTTON:
+ button_report = (struct isku_report_button const *)data;
+ switch (button_report->event) {
+ case ISKU_REPORT_BUTTON_EVENT_PROFILE:
+ isku_profile_activated(isku, button_report->data1 - 1);
+ break;
+ }
+ break;
+ }
+}
+
+static void isku_report_to_chrdev(struct isku_device const *isku,
+ u8 const *data)
+{
+ struct isku_roccat_report roccat_report;
+ struct isku_report_button const *button_report;
+
+ if (data[0] != ISKU_REPORT_NUMBER_BUTTON)
+ return;
+
+ button_report = (struct isku_report_button const *)data;
+
+ roccat_report.event = button_report->event;
+ roccat_report.data1 = button_report->data1;
+ roccat_report.data2 = button_report->data2;
+ roccat_report.profile = isku->actual_profile + 1;
+ roccat_report_event(isku->chrdev_minor,
+ (uint8_t const *)&roccat_report);
+}
+
+static int isku_raw_event(struct hid_device *hdev,
+ struct hid_report *report, u8 *data, int size)
+{
+ struct usb_interface *intf = to_usb_interface(hdev->dev.parent);
+ struct isku_device *isku = hid_get_drvdata(hdev);
+
+ if (intf->cur_altsetting->desc.bInterfaceProtocol
+ != ISKU_USB_INTERFACE_PROTOCOL)
+ return 0;
+
+ if (isku == NULL)
+ return 0;
+
+ isku_keep_values_up_to_date(isku, data);
+
+ if (isku->roccat_claimed)
+ isku_report_to_chrdev(isku, data);
+
+ return 0;
+}
+
+static const struct hid_device_id isku_devices[] = {
+ { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
+ { }
+};
+
+MODULE_DEVICE_TABLE(hid, isku_devices);
+
+static struct hid_driver isku_driver = {
+ .name = "isku",
+ .id_table = isku_devices,
+ .probe = isku_probe,
+ .remove = isku_remove,
+ .raw_event = isku_raw_event
+};
+
+static int __init isku_init(void)
+{
+ int retval;
+ isku_class = class_create(THIS_MODULE, "isku");
+ if (IS_ERR(isku_class))
+ return PTR_ERR(isku_class);
+ isku_class->dev_attrs = isku_attributes;
+ isku_class->dev_bin_attrs = isku_bin_attributes;
+
+ retval = hid_register_driver(&isku_driver);
+ if (retval)
+ class_destroy(isku_class);
+ return retval;
+}
+
+static void __exit isku_exit(void)
+{
+ hid_unregister_driver(&isku_driver);
+ class_destroy(isku_class);
+}
+
+module_init(isku_init);
+module_exit(isku_exit);
+
+MODULE_AUTHOR("Stefan Achatz");
+MODULE_DESCRIPTION("USB Roccat Isku driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hid/hid-roccat-isku.h b/drivers/hid/hid-roccat-isku.h
new file mode 100644
index 0000000..075f6ef
--- /dev/null
+++ b/drivers/hid/hid-roccat-isku.h
@@ -0,0 +1,147 @@
+#ifndef __HID_ROCCAT_ISKU_H
+#define __HID_ROCCAT_ISKU_H
+
+/*
+ * Copyright (c) 2011 Stefan Achatz <erazor_de@users.sourceforge.net>
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/types.h>
+
+enum {
+ ISKU_PROFILE_NUM = 5,
+ ISKU_USB_INTERFACE_PROTOCOL = 0,
+};
+
+struct isku_control {
+ uint8_t command; /* ISKU_COMMAND_CONTROL */
+ uint8_t value;
+ uint8_t request;
+} __packed;
+
+enum isku_control_values {
+ ISKU_CONTROL_VALUE_STATUS_OVERLOAD = 0,
+ ISKU_CONTROL_VALUE_STATUS_OK = 1,
+ ISKU_CONTROL_VALUE_STATUS_INVALID = 2,
+ ISKU_CONTROL_VALUE_STATUS_WAIT = 3,
+};
+
+struct isku_actual_profile {
+ uint8_t command; /* ISKU_COMMAND_ACTUAL_PROFILE */
+ uint8_t size; /* always 3 */
+ uint8_t actual_profile;
+} __packed;
+
+struct isku_key_mask {
+ uint8_t command; /* ISKU_COMMAND_KEY_MASK */
+ uint8_t size; /* 6 */
+ uint8_t profile_number; /* 0-4 */
+ uint8_t mask;
+ uint16_t checksum;
+} __packed;
+
+struct isku_keys_function {
+ uint8_t data[0x29];
+} __packed;
+
+struct isku_keys_easyzone {
+ uint8_t data[0x41];
+} __packed;
+
+struct isku_keys_media {
+ uint8_t data[0x1d];
+} __packed;
+
+struct isku_keys_thumbster {
+ uint8_t data[0x17];
+} __packed;
+
+struct isku_keys_macro {
+ uint8_t data[0x23];
+} __packed;
+
+struct isku_keys_capslock {
+ uint8_t data[0x6];
+} __packed;
+
+struct isku_macro {
+ uint8_t data[0x823];
+} __packed;
+
+struct isku_light {
+ uint8_t data[0xa];
+} __packed;
+
+struct isku_info {
+ uint8_t data[2];
+ uint8_t firmware_version;
+ uint8_t unknown[3];
+} __packed;
+
+struct isku_talk {
+ uint8_t data[0x10];
+} __packed;
+
+struct isku_last_set {
+ uint8_t data[0x14];
+} __packed;
+
+enum isku_commands {
+ ISKU_COMMAND_CONTROL = 0x4,
+ ISKU_COMMAND_ACTUAL_PROFILE = 0x5,
+ ISKU_COMMAND_KEY_MASK = 0x7,
+ ISKU_COMMAND_KEYS_FUNCTION = 0x8,
+ ISKU_COMMAND_KEYS_EASYZONE = 0x9,
+ ISKU_COMMAND_KEYS_MEDIA = 0xa,
+ ISKU_COMMAND_KEYS_THUMBSTER = 0xb,
+ ISKU_COMMAND_KEYS_MACRO = 0xd,
+ ISKU_COMMAND_MACRO = 0xe,
+ ISKU_COMMAND_INFO = 0xf,
+ ISKU_COMMAND_LIGHT = 0x10,
+ ISKU_COMMAND_KEYS_CAPSLOCK = 0x13,
+ ISKU_COMMAND_LAST_SET = 0x14,
+ ISKU_COMMAND_15 = 0x15,
+ ISKU_COMMAND_TALK = 0x16,
+ ISKU_COMMAND_FIRMWARE_WRITE = 0x1b,
+ ISKU_COMMAND_FIRMWARE_WRITE_CONTROL = 0x1c,
+};
+
+struct isku_report_button {
+ uint8_t number; /* ISKU_REPORT_NUMBER_BUTTON */
+ uint8_t zero;
+ uint8_t event;
+ uint8_t data1;
+ uint8_t data2;
+};
+
+enum isku_report_numbers {
+ ISKU_REPORT_NUMBER_BUTTON = 3,
+};
+
+enum isku_report_button_events {
+ ISKU_REPORT_BUTTON_EVENT_PROFILE = 0x2,
+};
+
+struct isku_roccat_report {
+ uint8_t event;
+ uint8_t data1;
+ uint8_t data2;
+ uint8_t profile;
+} __packed;
+
+struct isku_device {
+ int roccat_claimed;
+ int chrdev_minor;
+
+ struct mutex isku_lock;
+
+ int actual_profile;
+};
+
+#endif
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index e2072af..40090d6 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -78,12 +78,10 @@ static int kone_send(struct usb_device *usb_dev, uint usb_command,
char *buf;
int len;
- buf = kmalloc(size, GFP_KERNEL);
+ buf = kmemdup(data, size, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, data, size);
-
len = usb_control_msg(usb_dev, usb_sndctrlpipe(usb_dev, 0),
HID_REQ_SET_REPORT,
USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_OUT,
diff --git a/drivers/hid/hid-twinhan.c b/drivers/hid/hid-twinhan.c
index c40afc5..f23456b 100644
--- a/drivers/hid/hid-twinhan.c
+++ b/drivers/hid/hid-twinhan.c
@@ -3,7 +3,7 @@
*
* Based on hid-gyration.c
*
- * Copyright (c) 2009 Bruno Prmont <bonbons@linux-vserver.org>
+ * Copyright (c) 2009 Bruno Prémont <bonbons@linux-vserver.org>
*/
/*
diff --git a/drivers/hid/hid-wacom.c b/drivers/hid/hid-wacom.c
index 17bb88f..acab74c 100644
--- a/drivers/hid/hid-wacom.c
+++ b/drivers/hid/hid-wacom.c
@@ -9,6 +9,7 @@
* Copyright (c) 2008 Jiri Slaby <jirislaby@gmail.com>
* Copyright (c) 2006 Andrew Zabolotny <zap@homelink.ru>
* Copyright (c) 2009 Bastien Nocera <hadess@hadess.net>
+ * Copyright (c) 2011 Przemysław Firszt <przemo@firszt.eu>
*/
/*
@@ -33,6 +34,7 @@
struct wacom_data {
__u16 tool;
unsigned char butstate;
+ __u8 features;
unsigned char high_speed;
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
int battery_capacity;
@@ -47,12 +49,14 @@ static unsigned short batcap[8] = { 1, 15, 25, 35, 50, 70, 100, 0 };
static enum power_supply_property wacom_battery_props[] = {
POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_CAPACITY
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
};
static enum power_supply_property wacom_ac_props[] = {
POWER_SUPPLY_PROP_PRESENT,
- POWER_SUPPLY_PROP_ONLINE
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_SCOPE,
};
static int wacom_battery_get_property(struct power_supply *psy,
@@ -68,6 +72,9 @@ static int wacom_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_PRESENT:
val->intval = 1;
break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
case POWER_SUPPLY_PROP_CAPACITY:
/* show 100% battery capacity when charging */
if (power_state == 0)
@@ -99,6 +106,9 @@ static int wacom_ac_get_property(struct power_supply *psy,
else
val->intval = 0;
break;
+ case POWER_SUPPLY_PROP_SCOPE:
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ break;
default:
ret = -EINVAL;
break;
@@ -107,6 +117,19 @@ static int wacom_ac_get_property(struct power_supply *psy,
}
#endif
+static void wacom_set_features(struct hid_device *hdev)
+{
+ int ret;
+ __u8 rep_data[2];
+
+ /*set high speed, tablet mode*/
+ rep_data[0] = 0x03;
+ rep_data[1] = 0x20;
+ ret = hdev->hid_output_raw_report(hdev, rep_data, 2,
+ HID_FEATURE_REPORT);
+ return;
+}
+
static void wacom_poke(struct hid_device *hdev, u8 speed)
{
struct wacom_data *wdata = hid_get_drvdata(hdev);
@@ -177,26 +200,13 @@ static ssize_t wacom_store_speed(struct device *dev,
static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP,
wacom_show_speed, wacom_store_speed);
-static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
- u8 *raw_data, int size)
+static int wacom_gr_parse_report(struct hid_device *hdev,
+ struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
{
- struct wacom_data *wdata = hid_get_drvdata(hdev);
- struct hid_input *hidinput;
- struct input_dev *input;
- unsigned char *data = (unsigned char *) raw_data;
int tool, x, y, rw;
- if (!(hdev->claimed & HID_CLAIMED_INPUT))
- return 0;
-
tool = 0;
- hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
- input = hidinput->input;
-
- /* Check if this is a tablet report */
- if (data[0] != 0x03)
- return 0;
-
/* Get X & Y positions */
x = le16_to_cpu(*(__le16 *) &data[2]);
y = le16_to_cpu(*(__le16 *) &data[4]);
@@ -304,6 +314,121 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
return 1;
}
+static void wacom_i4_parse_pen_report(struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
+{
+ __u16 x, y, pressure;
+ __u32 id;
+
+ switch (data[1]) {
+ case 0x80: /* Out of proximity report */
+ wdata->tool = 0;
+ input_report_key(input, BTN_TOUCH, 0);
+ input_report_abs(input, ABS_PRESSURE, 0);
+ input_report_key(input, wdata->tool, 0);
+ input_sync(input);
+ break;
+ case 0xC2: /* Tool report */
+ id = ((data[2] << 4) | (data[3] >> 4) |
+ ((data[7] & 0x0f) << 20) |
+ ((data[8] & 0xf0) << 12)) & 0xfffff;
+
+ switch (id) {
+ case 0x802:
+ wdata->tool = BTN_TOOL_PEN;
+ break;
+ case 0x80A:
+ wdata->tool = BTN_TOOL_RUBBER;
+ break;
+ }
+ break;
+ default: /* Position/pressure report */
+ x = data[2] << 9 | data[3] << 1 | ((data[9] & 0x02) >> 1);
+ y = data[4] << 9 | data[5] << 1 | (data[9] & 0x01);
+ pressure = (data[6] << 3) | ((data[7] & 0xC0) >> 5)
+ | (data[1] & 0x01);
+
+ input_report_key(input, BTN_TOUCH, pressure > 1);
+
+ input_report_key(input, BTN_STYLUS, data[1] & 0x02);
+ input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
+ input_report_key(input, wdata->tool, 1);
+ input_report_abs(input, ABS_X, x);
+ input_report_abs(input, ABS_Y, y);
+ input_report_abs(input, ABS_PRESSURE, pressure);
+ input_sync(input);
+ break;
+ }
+
+ return;
+}
+
+static void wacom_i4_parse_report(struct hid_device *hdev,
+ struct wacom_data *wdata,
+ struct input_dev *input, unsigned char *data)
+{
+ switch (data[0]) {
+ case 0x00: /* Empty report */
+ break;
+ case 0x02: /* Pen report */
+ wacom_i4_parse_pen_report(wdata, input, data);
+ break;
+ case 0x03: /* Features Report */
+ wdata->features = data[2];
+ break;
+ case 0x0C: /* Button report */
+ break;
+ default:
+ hid_err(hdev, "Unknown report: %d,%d\n", data[0], data[1]);
+ break;
+ }
+}
+
+static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
+ u8 *raw_data, int size)
+{
+ struct wacom_data *wdata = hid_get_drvdata(hdev);
+ struct hid_input *hidinput;
+ struct input_dev *input;
+ unsigned char *data = (unsigned char *) raw_data;
+ int i;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT))
+ return 0;
+
+ hidinput = list_entry(hdev->inputs.next, struct hid_input, list);
+ input = hidinput->input;
+
+ /* Check if this is a tablet report */
+ if (data[0] != 0x03)
+ return 0;
+
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ return wacom_gr_parse_report(hdev, wdata, input, data);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ i = 1;
+
+ switch (data[0]) {
+ case 0x04:
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ i += 10;
+ /* fall through */
+ case 0x03:
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ i += 10;
+ wacom_i4_parse_report(hdev, wdata, input, data + i);
+ break;
+ default:
+ hid_err(hdev, "Unknown report: %d,%d size:%d\n",
+ data[0], data[1], size);
+ return 0;
+ }
+ }
+ return 1;
+}
+
static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
struct hid_field *field, struct hid_usage *usage, unsigned long **bit,
int *max)
@@ -338,10 +463,19 @@ static int wacom_input_mapped(struct hid_device *hdev, struct hid_input *hi,
__set_bit(BTN_TOOL_RUBBER, input->keybit);
__set_bit(BTN_TOOL_MOUSE, input->keybit);
- input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
- input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
- input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
- input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ input_set_abs_params(input, ABS_X, 0, 16704, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 12064, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 511, 0, 0);
+ input_set_abs_params(input, ABS_DISTANCE, 0, 32, 0, 0);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ input_set_abs_params(input, ABS_X, 0, 40640, 4, 0);
+ input_set_abs_params(input, ABS_Y, 0, 25400, 4, 0);
+ input_set_abs_params(input, ABS_PRESSURE, 0, 2047, 0, 0);
+ break;
+ }
return 0;
}
@@ -378,8 +512,16 @@ static int wacom_probe(struct hid_device *hdev,
hid_warn(hdev,
"can't create sysfs speed attribute err: %d\n", ret);
- /* Set Wacom mode 2 with high reporting speed */
- wacom_poke(hdev, 1);
+ switch (hdev->product) {
+ case USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH:
+ /* Set Wacom mode 2 with high reporting speed */
+ wacom_poke(hdev, 1);
+ break;
+ case USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH:
+ wdata->features = 0;
+ wacom_set_features(hdev);
+ break;
+ }
#ifdef CONFIG_HID_WACOM_POWER_SUPPLY
wdata->battery.properties = wacom_battery_props;
@@ -389,6 +531,7 @@ static int wacom_probe(struct hid_device *hdev,
wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
wdata->battery.use_for_apm = 0;
+
ret = power_supply_register(&hdev->dev, &wdata->battery);
if (ret) {
hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n",
@@ -396,6 +539,8 @@ static int wacom_probe(struct hid_device *hdev,
goto err_battery;
}
+ power_supply_powers(&wdata->battery, &hdev->dev);
+
wdata->ac.properties = wacom_ac_props;
wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
wdata->ac.get_property = wacom_ac_get_property;
@@ -409,6 +554,8 @@ static int wacom_probe(struct hid_device *hdev,
"can't create ac battery attribute, err: %d\n", ret);
goto err_ac;
}
+
+ power_supply_powers(&wdata->ac, &hdev->dev);
#endif
return 0;
@@ -441,6 +588,7 @@ static void wacom_remove(struct hid_device *hdev)
static const struct hid_device_id wacom_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_WACOM, USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) },
{ }
};
diff --git a/drivers/hid/hid-wiimote.c b/drivers/hid/hid-wiimote-core.c
index 76739c0..cac3589 100644
--- a/drivers/hid/hid-wiimote.c
+++ b/drivers/hid/hid-wiimote-core.c
@@ -20,91 +20,7 @@
#include <linux/power_supply.h>
#include <linux/spinlock.h>
#include "hid-ids.h"
-
-#define WIIMOTE_VERSION "0.2"
-#define WIIMOTE_NAME "Nintendo Wii Remote"
-#define WIIMOTE_BUFSIZE 32
-
-struct wiimote_buf {
- __u8 data[HID_MAX_BUFFER_SIZE];
- size_t size;
-};
-
-struct wiimote_state {
- spinlock_t lock;
- __u8 flags;
- __u8 accel_split[2];
-
- /* synchronous cmd requests */
- struct mutex sync;
- struct completion ready;
- int cmd;
- __u32 opt;
-
- /* results of synchronous requests */
- __u8 cmd_battery;
- __u8 cmd_err;
-};
-
-struct wiimote_data {
- struct hid_device *hdev;
- struct input_dev *input;
- struct led_classdev *leds[4];
- struct input_dev *accel;
- struct input_dev *ir;
- struct power_supply battery;
-
- spinlock_t qlock;
- __u8 head;
- __u8 tail;
- struct wiimote_buf outq[WIIMOTE_BUFSIZE];
- struct work_struct worker;
-
- struct wiimote_state state;
-};
-
-#define WIIPROTO_FLAG_LED1 0x01
-#define WIIPROTO_FLAG_LED2 0x02
-#define WIIPROTO_FLAG_LED3 0x04
-#define WIIPROTO_FLAG_LED4 0x08
-#define WIIPROTO_FLAG_RUMBLE 0x10
-#define WIIPROTO_FLAG_ACCEL 0x20
-#define WIIPROTO_FLAG_IR_BASIC 0x40
-#define WIIPROTO_FLAG_IR_EXT 0x80
-#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
-#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
- WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
-#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
- WIIPROTO_FLAG_IR_FULL)
-
-/* return flag for led \num */
-#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
-
-enum wiiproto_reqs {
- WIIPROTO_REQ_NULL = 0x0,
- WIIPROTO_REQ_RUMBLE = 0x10,
- WIIPROTO_REQ_LED = 0x11,
- WIIPROTO_REQ_DRM = 0x12,
- WIIPROTO_REQ_IR1 = 0x13,
- WIIPROTO_REQ_SREQ = 0x15,
- WIIPROTO_REQ_WMEM = 0x16,
- WIIPROTO_REQ_RMEM = 0x17,
- WIIPROTO_REQ_IR2 = 0x1a,
- WIIPROTO_REQ_STATUS = 0x20,
- WIIPROTO_REQ_DATA = 0x21,
- WIIPROTO_REQ_RETURN = 0x22,
- WIIPROTO_REQ_DRM_K = 0x30,
- WIIPROTO_REQ_DRM_KA = 0x31,
- WIIPROTO_REQ_DRM_KE = 0x32,
- WIIPROTO_REQ_DRM_KAI = 0x33,
- WIIPROTO_REQ_DRM_KEE = 0x34,
- WIIPROTO_REQ_DRM_KAE = 0x35,
- WIIPROTO_REQ_DRM_KIE = 0x36,
- WIIPROTO_REQ_DRM_KAIE = 0x37,
- WIIPROTO_REQ_DRM_E = 0x3d,
- WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
- WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
-};
+#include "hid-wiimote.h"
enum wiiproto_keys {
WIIPROTO_KEY_LEFT,
@@ -136,55 +52,10 @@ static __u16 wiiproto_keymap[] = {
};
static enum power_supply_property wiimote_battery_props[] = {
- POWER_SUPPLY_PROP_CAPACITY
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_SCOPE,
};
-/* requires the state.lock spinlock to be held */
-static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
- __u32 opt)
-{
- return wdata->state.cmd == cmd && wdata->state.opt == opt;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
-{
- wdata->state.cmd = WIIPROTO_REQ_NULL;
- complete(&wdata->state.ready);
-}
-
-static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
-{
- return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
-}
-
-/* requires the state.lock spinlock to be held */
-static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
- __u32 opt)
-{
- INIT_COMPLETION(wdata->state.ready);
- wdata->state.cmd = cmd;
- wdata->state.opt = opt;
-}
-
-static inline void wiimote_cmd_release(struct wiimote_data *wdata)
-{
- mutex_unlock(&wdata->state.sync);
-}
-
-static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
-{
- int ret;
-
- ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
- if (ret < 0)
- return -ERESTARTSYS;
- else if (ret == 0)
- return -EIO;
- else
- return 0;
-}
-
static ssize_t wiimote_hid_send(struct hid_device *hdev, __u8 *buffer,
size_t count)
{
@@ -329,6 +200,7 @@ static void wiiproto_req_leds(struct wiimote_data *wdata, int leds)
static __u8 select_drm(struct wiimote_data *wdata)
{
__u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR;
+ bool ext = wiiext_active(wdata);
if (ir == WIIPROTO_FLAG_IR_BASIC) {
if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
@@ -340,14 +212,21 @@ static __u8 select_drm(struct wiimote_data *wdata)
} else if (ir == WIIPROTO_FLAG_IR_FULL) {
return WIIPROTO_REQ_DRM_SKAI1;
} else {
- if (wdata->state.flags & WIIPROTO_FLAG_ACCEL)
- return WIIPROTO_REQ_DRM_KA;
- else
- return WIIPROTO_REQ_DRM_K;
+ if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) {
+ if (ext)
+ return WIIPROTO_REQ_DRM_KAE;
+ else
+ return WIIPROTO_REQ_DRM_KA;
+ } else {
+ if (ext)
+ return WIIPROTO_REQ_DRM_KE;
+ else
+ return WIIPROTO_REQ_DRM_K;
+ }
}
}
-static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
+void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
{
__u8 cmd[3];
@@ -358,6 +237,7 @@ static void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm)
cmd[1] = 0;
cmd[2] = drm;
+ wdata->state.drm = drm;
wiiproto_keep_rumble(wdata, &cmd[1]);
wiimote_queue(wdata, cmd, sizeof(cmd));
}
@@ -440,8 +320,33 @@ static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom,
wiimote_queue(wdata, cmd, sizeof(cmd));
}
+void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom, __u32 offset,
+ __u16 size)
+{
+ __u8 cmd[7];
+
+ if (size == 0) {
+ hid_warn(wdata->hdev, "Invalid length %d rmem request\n", size);
+ return;
+ }
+
+ cmd[0] = WIIPROTO_REQ_RMEM;
+ cmd[1] = 0;
+ cmd[2] = (offset >> 16) & 0xff;
+ cmd[3] = (offset >> 8) & 0xff;
+ cmd[4] = offset & 0xff;
+ cmd[5] = (size >> 8) & 0xff;
+ cmd[6] = size & 0xff;
+
+ if (!eeprom)
+ cmd[1] |= 0x04;
+
+ wiiproto_keep_rumble(wdata, &cmd[1]);
+ wiimote_queue(wdata, cmd, sizeof(cmd));
+}
+
/* requries the cmd-mutex to be held */
-static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
const __u8 *wmem, __u8 size)
{
unsigned long flags;
@@ -459,6 +364,36 @@ static int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
return ret;
}
+/* requries the cmd-mutex to be held */
+ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset, __u8 *rmem,
+ __u8 size)
+{
+ unsigned long flags;
+ ssize_t ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_size = size;
+ wdata->state.cmd_read_buf = rmem;
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, offset & 0xffff);
+ wiiproto_req_rreg(wdata, offset, size);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_buf = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ if (!ret) {
+ if (wdata->state.cmd_read_size == 0)
+ ret = -EIO;
+ else
+ ret = wdata->state.cmd_read_size;
+ }
+
+ return ret;
+}
+
static int wiimote_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
@@ -468,6 +403,11 @@ static int wiimote_battery_get_property(struct power_supply *psy,
int ret = 0, state;
unsigned long flags;
+ if (psp == POWER_SUPPLY_PROP_SCOPE) {
+ val->intval = POWER_SUPPLY_SCOPE_DEVICE;
+ return 0;
+ }
+
ret = wiimote_cmd_acquire(wdata);
if (ret)
return ret;
@@ -862,6 +802,8 @@ static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
/* on status reports the drm is reset so we need to resend the drm */
wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL);
+ wiiext_event(wdata, payload[2] & 0x02);
+
if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) {
wdata->state.cmd_battery = payload[5];
wiimote_cmd_complete(wdata);
@@ -870,7 +812,23 @@ static void handler_status(struct wiimote_data *wdata, const __u8 *payload)
static void handler_data(struct wiimote_data *wdata, const __u8 *payload)
{
+ __u16 offset = payload[3] << 8 | payload[4];
+ __u8 size = (payload[2] >> 4) + 1;
+ __u8 err = payload[2] & 0x0f;
+
handler_keys(wdata, payload);
+
+ if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_RMEM, offset)) {
+ if (err)
+ size = 0;
+ else if (size > wdata->state.cmd_read_size)
+ size = wdata->state.cmd_read_size;
+
+ wdata->state.cmd_read_size = size;
+ if (wdata->state.cmd_read_buf)
+ memcpy(wdata->state.cmd_read_buf, &payload[5], size);
+ wiimote_cmd_complete(wdata);
+ }
}
static void handler_return(struct wiimote_data *wdata, const __u8 *payload)
@@ -898,6 +856,7 @@ static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload)
static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
+ wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
@@ -914,6 +873,7 @@ static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload)
static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
+ wiiext_handle(wdata, &payload[2]);
}
static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
@@ -924,12 +884,14 @@ static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload)
ir_to_input2(wdata, &payload[7], false);
ir_to_input3(wdata, &payload[9], true);
input_sync(wdata->ir);
+ wiiext_handle(wdata, &payload[12]);
}
static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload)
{
handler_keys(wdata, payload);
handler_accel(wdata, payload);
+ wiiext_handle(wdata, &payload[5]);
}
static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
@@ -941,10 +903,12 @@ static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload)
ir_to_input2(wdata, &payload[10], false);
ir_to_input3(wdata, &payload[12], true);
input_sync(wdata->ir);
+ wiiext_handle(wdata, &payload[15]);
}
static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload)
{
+ wiiext_handle(wdata, payload);
}
static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload)
@@ -1182,6 +1146,7 @@ static struct wiimote_data *wiimote_create(struct hid_device *hdev)
spin_lock_init(&wdata->state.lock);
init_completion(&wdata->state.ready);
mutex_init(&wdata->state.sync);
+ wdata->state.drm = WIIPROTO_REQ_DRM_K;
return wdata;
@@ -1196,6 +1161,8 @@ err:
static void wiimote_destroy(struct wiimote_data *wdata)
{
+ wiidebug_deinit(wdata);
+ wiiext_deinit(wdata);
wiimote_leds_destroy(wdata);
power_supply_unregister(&wdata->battery);
@@ -1214,6 +1181,8 @@ static int wiimote_hid_probe(struct hid_device *hdev,
struct wiimote_data *wdata;
int ret;
+ hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS;
+
wdata = wiimote_create(hdev);
if (!wdata) {
hid_err(hdev, "Can't alloc device\n");
@@ -1263,10 +1232,20 @@ static int wiimote_hid_probe(struct hid_device *hdev,
goto err_battery;
}
+ power_supply_powers(&wdata->battery, &hdev->dev);
+
ret = wiimote_leds_create(wdata);
if (ret)
goto err_free;
+ ret = wiiext_init(wdata);
+ if (ret)
+ goto err_free;
+
+ ret = wiidebug_init(wdata);
+ if (ret)
+ goto err_free;
+
hid_info(hdev, "New device registered\n");
/* by default set led1 after device initialization */
@@ -1343,4 +1322,3 @@ module_exit(wiimote_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>");
MODULE_DESCRIPTION(WIIMOTE_NAME " Device Driver");
-MODULE_VERSION(WIIMOTE_VERSION);
diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
new file mode 100644
index 0000000..17dabc1
--- /dev/null
+++ b/drivers/hid/hid-wiimote-debug.c
@@ -0,0 +1,227 @@
+/*
+ * Debug support for HID Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include "hid-wiimote.h"
+
+struct wiimote_debug {
+ struct wiimote_data *wdata;
+ struct dentry *eeprom;
+ struct dentry *drm;
+};
+
+static int wiidebug_eeprom_open(struct inode *i, struct file *f)
+{
+ f->private_data = i->i_private;
+ return 0;
+}
+
+static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
+ loff_t *off)
+{
+ struct wiimote_debug *dbg = f->private_data;
+ struct wiimote_data *wdata = dbg->wdata;
+ unsigned long flags;
+ ssize_t ret;
+ char buf[16];
+ __u16 size;
+
+ if (s == 0)
+ return -EINVAL;
+ if (*off > 0xffffff)
+ return 0;
+ if (s > 16)
+ s = 16;
+
+ ret = wiimote_cmd_acquire(wdata);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_size = s;
+ wdata->state.cmd_read_buf = buf;
+ wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, *off & 0xffff);
+ wiiproto_req_reeprom(wdata, *off, s);
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ ret = wiimote_cmd_wait(wdata);
+ if (!ret)
+ size = wdata->state.cmd_read_size;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->state.cmd_read_buf = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ wiimote_cmd_release(wdata);
+
+ if (ret)
+ return ret;
+ else if (size == 0)
+ return -EIO;
+
+ if (copy_to_user(u, buf, size))
+ return -EFAULT;
+
+ *off += size;
+ ret = size;
+
+ return ret;
+}
+
+static const struct file_operations wiidebug_eeprom_fops = {
+ .owner = THIS_MODULE,
+ .open = wiidebug_eeprom_open,
+ .read = wiidebug_eeprom_read,
+ .llseek = generic_file_llseek,
+};
+
+static const char *wiidebug_drmmap[] = {
+ [WIIPROTO_REQ_NULL] = "NULL",
+ [WIIPROTO_REQ_DRM_K] = "K",
+ [WIIPROTO_REQ_DRM_KA] = "KA",
+ [WIIPROTO_REQ_DRM_KE] = "KE",
+ [WIIPROTO_REQ_DRM_KAI] = "KAI",
+ [WIIPROTO_REQ_DRM_KEE] = "KEE",
+ [WIIPROTO_REQ_DRM_KAE] = "KAE",
+ [WIIPROTO_REQ_DRM_KIE] = "KIE",
+ [WIIPROTO_REQ_DRM_KAIE] = "KAIE",
+ [WIIPROTO_REQ_DRM_E] = "E",
+ [WIIPROTO_REQ_DRM_SKAI1] = "SKAI1",
+ [WIIPROTO_REQ_DRM_SKAI2] = "SKAI2",
+ [WIIPROTO_REQ_MAX] = NULL
+};
+
+static int wiidebug_drm_show(struct seq_file *f, void *p)
+{
+ struct wiimote_debug *dbg = f->private;
+ const char *str = NULL;
+ unsigned long flags;
+ __u8 drm;
+
+ spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+ drm = dbg->wdata->state.drm;
+ spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+ if (drm < WIIPROTO_REQ_MAX)
+ str = wiidebug_drmmap[drm];
+ if (!str)
+ str = "unknown";
+
+ seq_printf(f, "%s\n", str);
+
+ return 0;
+}
+
+static int wiidebug_drm_open(struct inode *i, struct file *f)
+{
+ return single_open(f, wiidebug_drm_show, i->i_private);
+}
+
+static ssize_t wiidebug_drm_write(struct file *f, const char __user *u,
+ size_t s, loff_t *off)
+{
+ struct wiimote_debug *dbg = f->private_data;
+ unsigned long flags;
+ char buf[16];
+ ssize_t len;
+ int i;
+
+ if (s == 0)
+ return -EINVAL;
+
+ len = min((size_t) 15, s);
+ if (copy_from_user(buf, u, len))
+ return -EFAULT;
+
+ buf[15] = 0;
+
+ for (i = 0; i < WIIPROTO_REQ_MAX; ++i) {
+ if (!wiidebug_drmmap[i])
+ continue;
+ if (!strcasecmp(buf, wiidebug_drmmap[i]))
+ break;
+ }
+
+ if (i == WIIPROTO_REQ_MAX)
+ i = simple_strtoul(buf, NULL, 10);
+
+ spin_lock_irqsave(&dbg->wdata->state.lock, flags);
+ wiiproto_req_drm(dbg->wdata, (__u8) i);
+ spin_unlock_irqrestore(&dbg->wdata->state.lock, flags);
+
+ return len;
+}
+
+static const struct file_operations wiidebug_drm_fops = {
+ .owner = THIS_MODULE,
+ .open = wiidebug_drm_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = wiidebug_drm_write,
+ .release = single_release,
+};
+
+int wiidebug_init(struct wiimote_data *wdata)
+{
+ struct wiimote_debug *dbg;
+ unsigned long flags;
+ int ret = -ENOMEM;
+
+ dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
+ if (!dbg)
+ return -ENOMEM;
+
+ dbg->wdata = wdata;
+
+ dbg->eeprom = debugfs_create_file("eeprom", S_IRUSR,
+ dbg->wdata->hdev->debug_dir, dbg, &wiidebug_eeprom_fops);
+ if (!dbg->eeprom)
+ goto err;
+
+ dbg->drm = debugfs_create_file("drm", S_IRUSR,
+ dbg->wdata->hdev->debug_dir, dbg, &wiidebug_drm_fops);
+ if (!dbg->drm)
+ goto err_drm;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->debug = dbg;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+
+err_drm:
+ debugfs_remove(dbg->eeprom);
+err:
+ kfree(dbg);
+ return ret;
+}
+
+void wiidebug_deinit(struct wiimote_data *wdata)
+{
+ struct wiimote_debug *dbg = wdata->debug;
+ unsigned long flags;
+
+ if (!dbg)
+ return;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->debug = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ debugfs_remove(dbg->drm);
+ debugfs_remove(dbg->eeprom);
+ kfree(dbg);
+}
diff --git a/drivers/hid/hid-wiimote-ext.c b/drivers/hid/hid-wiimote-ext.c
new file mode 100644
index 0000000..aa95870
--- /dev/null
+++ b/drivers/hid/hid-wiimote-ext.c
@@ -0,0 +1,752 @@
+/*
+ * HID driver for Nintendo Wiimote extension devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include "hid-wiimote.h"
+
+struct wiimote_ext {
+ struct wiimote_data *wdata;
+ struct work_struct worker;
+ struct input_dev *input;
+ struct input_dev *mp_input;
+
+ atomic_t opened;
+ atomic_t mp_opened;
+ bool plugged;
+ bool mp_plugged;
+ bool motionp;
+ __u8 ext_type;
+};
+
+enum wiiext_type {
+ WIIEXT_NONE, /* placeholder */
+ WIIEXT_CLASSIC, /* Nintendo classic controller */
+ WIIEXT_NUNCHUCK, /* Nintendo nunchuck controller */
+};
+
+enum wiiext_keys {
+ WIIEXT_KEY_C,
+ WIIEXT_KEY_Z,
+ WIIEXT_KEY_A,
+ WIIEXT_KEY_B,
+ WIIEXT_KEY_X,
+ WIIEXT_KEY_Y,
+ WIIEXT_KEY_ZL,
+ WIIEXT_KEY_ZR,
+ WIIEXT_KEY_PLUS,
+ WIIEXT_KEY_MINUS,
+ WIIEXT_KEY_HOME,
+ WIIEXT_KEY_LEFT,
+ WIIEXT_KEY_RIGHT,
+ WIIEXT_KEY_UP,
+ WIIEXT_KEY_DOWN,
+ WIIEXT_KEY_LT,
+ WIIEXT_KEY_RT,
+ WIIEXT_KEY_COUNT
+};
+
+static __u16 wiiext_keymap[] = {
+ BTN_C, /* WIIEXT_KEY_C */
+ BTN_Z, /* WIIEXT_KEY_Z */
+ BTN_A, /* WIIEXT_KEY_A */
+ BTN_B, /* WIIEXT_KEY_B */
+ BTN_X, /* WIIEXT_KEY_X */
+ BTN_Y, /* WIIEXT_KEY_Y */
+ BTN_TL2, /* WIIEXT_KEY_ZL */
+ BTN_TR2, /* WIIEXT_KEY_ZR */
+ KEY_NEXT, /* WIIEXT_KEY_PLUS */
+ KEY_PREVIOUS, /* WIIEXT_KEY_MINUS */
+ BTN_MODE, /* WIIEXT_KEY_HOME */
+ KEY_LEFT, /* WIIEXT_KEY_LEFT */
+ KEY_RIGHT, /* WIIEXT_KEY_RIGHT */
+ KEY_UP, /* WIIEXT_KEY_UP */
+ KEY_DOWN, /* WIIEXT_KEY_DOWN */
+ BTN_TL, /* WIIEXT_KEY_LT */
+ BTN_TR, /* WIIEXT_KEY_RT */
+};
+
+/* diable all extensions */
+static void ext_disable(struct wiimote_ext *ext)
+{
+ unsigned long flags;
+ __u8 wmem = 0x55;
+
+ if (!wiimote_cmd_acquire(ext->wdata)) {
+ wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+ wiimote_cmd_release(ext->wdata);
+ }
+
+ spin_lock_irqsave(&ext->wdata->state.lock, flags);
+ ext->motionp = false;
+ ext->ext_type = WIIEXT_NONE;
+ wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static bool motionp_read(struct wiimote_ext *ext)
+{
+ __u8 rmem[2], wmem;
+ ssize_t ret;
+ bool avail = false;
+
+ if (!atomic_read(&ext->mp_opened))
+ return false;
+
+ if (wiimote_cmd_acquire(ext->wdata))
+ return false;
+
+ /* initialize motion plus */
+ wmem = 0x55;
+ ret = wiimote_cmd_write(ext->wdata, 0xa600f0, &wmem, sizeof(wmem));
+ if (ret)
+ goto error;
+
+ /* read motion plus ID */
+ ret = wiimote_cmd_read(ext->wdata, 0xa600fe, rmem, 2);
+ if (ret == 2 || rmem[1] == 0x5)
+ avail = true;
+
+error:
+ wiimote_cmd_release(ext->wdata);
+ return avail;
+}
+
+static __u8 ext_read(struct wiimote_ext *ext)
+{
+ ssize_t ret;
+ __u8 rmem[2], wmem;
+ __u8 type = WIIEXT_NONE;
+
+ if (!ext->plugged || !atomic_read(&ext->opened))
+ return WIIEXT_NONE;
+
+ if (wiimote_cmd_acquire(ext->wdata))
+ return WIIEXT_NONE;
+
+ /* initialize extension */
+ wmem = 0x55;
+ ret = wiimote_cmd_write(ext->wdata, 0xa400f0, &wmem, sizeof(wmem));
+ if (!ret) {
+ /* disable encryption */
+ wmem = 0x0;
+ wiimote_cmd_write(ext->wdata, 0xa400fb, &wmem, sizeof(wmem));
+ }
+
+ /* read extension ID */
+ ret = wiimote_cmd_read(ext->wdata, 0xa400fe, rmem, 2);
+ if (ret == 2) {
+ if (rmem[0] == 0 && rmem[1] == 0)
+ type = WIIEXT_NUNCHUCK;
+ else if (rmem[0] == 0x01 && rmem[1] == 0x01)
+ type = WIIEXT_CLASSIC;
+ }
+
+ wiimote_cmd_release(ext->wdata);
+
+ return type;
+}
+
+static void ext_enable(struct wiimote_ext *ext, bool motionp, __u8 ext_type)
+{
+ unsigned long flags;
+ __u8 wmem;
+ int ret;
+
+ if (motionp) {
+ if (wiimote_cmd_acquire(ext->wdata))
+ return;
+
+ if (ext_type == WIIEXT_CLASSIC)
+ wmem = 0x07;
+ else if (ext_type == WIIEXT_NUNCHUCK)
+ wmem = 0x05;
+ else
+ wmem = 0x04;
+
+ ret = wiimote_cmd_write(ext->wdata, 0xa600fe, &wmem, sizeof(wmem));
+ wiimote_cmd_release(ext->wdata);
+ if (ret)
+ return;
+ }
+
+ spin_lock_irqsave(&ext->wdata->state.lock, flags);
+ ext->motionp = motionp;
+ ext->ext_type = ext_type;
+ wiiproto_req_drm(ext->wdata, WIIPROTO_REQ_NULL);
+ spin_unlock_irqrestore(&ext->wdata->state.lock, flags);
+}
+
+static void wiiext_worker(struct work_struct *work)
+{
+ struct wiimote_ext *ext = container_of(work, struct wiimote_ext,
+ worker);
+ bool motionp;
+ __u8 ext_type;
+
+ ext_disable(ext);
+ motionp = motionp_read(ext);
+ ext_type = ext_read(ext);
+ ext_enable(ext, motionp, ext_type);
+}
+
+/* schedule work only once, otherwise mark for reschedule */
+static void wiiext_schedule(struct wiimote_ext *ext)
+{
+ queue_work(system_nrt_wq, &ext->worker);
+}
+
+/*
+ * Reacts on extension port events
+ * Whenever the driver gets an event from the wiimote that an extension has been
+ * plugged or unplugged, this funtion shall be called. It checks what extensions
+ * are connected and initializes and activates them.
+ * This can be called in atomic context. The initialization is done in a
+ * separate worker thread. The state.lock spinlock must be held by the caller.
+ */
+void wiiext_event(struct wiimote_data *wdata, bool plugged)
+{
+ if (!wdata->ext)
+ return;
+
+ if (wdata->ext->plugged == plugged)
+ return;
+
+ wdata->ext->plugged = plugged;
+
+ if (!plugged)
+ wdata->ext->mp_plugged = false;
+
+ /*
+ * We need to call wiiext_schedule(wdata->ext) here, however, the
+ * extension initialization logic is not fully understood and so
+ * automatic initialization is not supported, yet.
+ */
+}
+
+/*
+ * Returns true if the current DRM mode should contain extension data and false
+ * if there is no interest in extension data.
+ * All supported extensions send 6 byte extension data so any DRM that contains
+ * extension bytes is fine.
+ * The caller must hold the state.lock spinlock.
+ */
+bool wiiext_active(struct wiimote_data *wdata)
+{
+ if (!wdata->ext)
+ return false;
+
+ return wdata->ext->motionp || wdata->ext->ext_type;
+}
+
+static void handler_motionp(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s32 x, y, z;
+ bool plugged;
+
+ /* | 8 7 6 5 4 3 | 2 | 1 |
+ * -----+------------------------------+-----+-----+
+ * 1 | Yaw Speed <7:0> |
+ * 2 | Roll Speed <7:0> |
+ * 3 | Pitch Speed <7:0> |
+ * -----+------------------------------+-----+-----+
+ * 4 | Yaw Speed <13:8> | Yaw |Pitch|
+ * -----+------------------------------+-----+-----+
+ * 5 | Roll Speed <13:8> |Roll | Ext |
+ * -----+------------------------------+-----+-----+
+ * 6 | Pitch Speed <13:8> | 1 | 0 |
+ * -----+------------------------------+-----+-----+
+ * The single bits Yaw, Roll, Pitch in the lower right corner specify
+ * whether the wiimote is rotating fast (0) or slow (1). Speed for slow
+ * roation is 440 deg/s and for fast rotation 2000 deg/s. To get a
+ * linear scale we multiply by 2000/440 = ~4.5454 which is 18 for fast
+ * and 9 for slow.
+ * If the wiimote is not rotating the sensor reports 2^13 = 8192.
+ * Ext specifies whether an extension is connected to the motionp.
+ */
+
+ x = payload[0];
+ y = payload[1];
+ z = payload[2];
+
+ x |= (((__u16)payload[3]) << 6) & 0xff00;
+ y |= (((__u16)payload[4]) << 6) & 0xff00;
+ z |= (((__u16)payload[5]) << 6) & 0xff00;
+
+ x -= 8192;
+ y -= 8192;
+ z -= 8192;
+
+ if (!(payload[3] & 0x02))
+ x *= 18;
+ else
+ x *= 9;
+ if (!(payload[4] & 0x02))
+ y *= 18;
+ else
+ y *= 9;
+ if (!(payload[3] & 0x01))
+ z *= 18;
+ else
+ z *= 9;
+
+ input_report_abs(ext->mp_input, ABS_RX, x);
+ input_report_abs(ext->mp_input, ABS_RY, y);
+ input_report_abs(ext->mp_input, ABS_RZ, z);
+ input_sync(ext->mp_input);
+
+ plugged = payload[5] & 0x01;
+ if (plugged != ext->mp_plugged)
+ ext->mp_plugged = plugged;
+}
+
+static void handler_nunchuck(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s16 x, y, z, bx, by;
+
+ /* Byte | 8 7 | 6 5 | 4 3 | 2 | 1 |
+ * -----+----------+---------+---------+----+-----+
+ * 1 | Button X <7:0> |
+ * 2 | Button Y <7:0> |
+ * -----+----------+---------+---------+----+-----+
+ * 3 | Speed X <9:2> |
+ * 4 | Speed Y <9:2> |
+ * 5 | Speed Z <9:2> |
+ * -----+----------+---------+---------+----+-----+
+ * 6 | Z <1:0> | Y <1:0> | X <1:0> | BC | BZ |
+ * -----+----------+---------+---------+----+-----+
+ * Button X/Y is the analog stick. Speed X, Y and Z are the
+ * accelerometer data in the same format as the wiimote's accelerometer.
+ * The 6th byte contains the LSBs of the accelerometer data.
+ * BC and BZ are the C and Z buttons: 0 means pressed
+ *
+ * If reported interleaved with motionp, then the layout changes. The
+ * 5th and 6th byte changes to:
+ * -----+-----------------------------------+-----+
+ * 5 | Speed Z <9:3> | EXT |
+ * -----+--------+-----+-----+----+----+----+-----+
+ * 6 |Z <2:1> |Y <1>|X <1>| BC | BZ | 0 | 0 |
+ * -----+--------+-----+-----+----+----+----+-----+
+ * All three accelerometer values lose their LSB. The other data is
+ * still available but slightly moved.
+ *
+ * Center data for button values is 128. Center value for accelerometer
+ * values it 512 / 0x200
+ */
+
+ bx = payload[0];
+ by = payload[1];
+ bx -= 128;
+ by -= 128;
+
+ x = payload[2] << 2;
+ y = payload[3] << 2;
+ z = payload[4] << 2;
+
+ if (ext->motionp) {
+ x |= (payload[5] >> 3) & 0x02;
+ y |= (payload[5] >> 4) & 0x02;
+ z &= ~0x4;
+ z |= (payload[5] >> 5) & 0x06;
+ } else {
+ x |= (payload[5] >> 2) & 0x03;
+ y |= (payload[5] >> 4) & 0x03;
+ z |= (payload[5] >> 6) & 0x03;
+ }
+
+ x -= 0x200;
+ y -= 0x200;
+ z -= 0x200;
+
+ input_report_abs(ext->input, ABS_HAT0X, bx);
+ input_report_abs(ext->input, ABS_HAT0Y, by);
+
+ input_report_abs(ext->input, ABS_RX, x);
+ input_report_abs(ext->input, ABS_RY, y);
+ input_report_abs(ext->input, ABS_RZ, z);
+
+ if (ext->motionp) {
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x04));
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x08));
+ } else {
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_Z], !!(payload[5] & 0x01));
+ input_report_key(ext->input,
+ wiiext_keymap[WIIEXT_KEY_C], !!(payload[5] & 0x02));
+ }
+
+ input_sync(ext->input);
+}
+
+static void handler_classic(struct wiimote_ext *ext, const __u8 *payload)
+{
+ __s8 rx, ry, lx, ly, lt, rt;
+
+ /* Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | RX <5:4> | LX <5:0> |
+ * 2 | RX <3:2> | LY <5:0> |
+ * -----+-----+-----+-----+-----------------------------+
+ * 3 |RX<1>| LT <5:4> | RY <5:1> |
+ * -----+-----+-----------+-----------------------------+
+ * 4 | LT <3:1> | RT <5:1> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BZL | BB | BY | BA | BX | BZR | BDL | BDU |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * All buttons are 0 if pressed
+ * RX and RY are right analog stick
+ * LX and LY are left analog stick
+ * LT is left trigger, RT is right trigger
+ * BLT is 0 if left trigger is fully pressed
+ * BRT is 0 if right trigger is fully pressed
+ * BDR, BDD, BDL, BDU form the D-Pad with right, down, left, up buttons
+ * BZL is left Z button and BZR is right Z button
+ * B-, BH, B+ are +, HOME and - buttons
+ * BB, BY, BA, BX are A, B, X, Y buttons
+ * LSB of RX, RY, LT, and RT are not transmitted and always 0.
+ *
+ * With motionp enabled it changes slightly to this:
+ * Byte | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 1 | RX <4:3> | LX <5:1> | BDU |
+ * 2 | RX <2:1> | LY <5:1> | BDL |
+ * -----+-----+-----+-----+-----------------------+-----+
+ * 3 |RX<0>| LT <4:3> | RY <4:0> |
+ * -----+-----+-----------+-----------------------------+
+ * 4 | LT <2:0> | RT <4:0> |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 5 | BDR | BDD | BLT | B- | BH | B+ | BRT | EXT |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * 6 | BZL | BB | BY | BA | BX | BZR | 0 | 0 |
+ * -----+-----+-----+-----+-----+-----+-----+-----+-----+
+ * Only the LSBs of LX and LY are lost. BDU and BDL are moved, the rest
+ * is the same as before.
+ */
+
+ if (ext->motionp) {
+ lx = payload[0] & 0x3e;
+ ly = payload[0] & 0x3e;
+ } else {
+ lx = payload[0] & 0x3f;
+ ly = payload[0] & 0x3f;
+ }
+
+ rx = (payload[0] >> 3) & 0x14;
+ rx |= (payload[1] >> 5) & 0x06;
+ rx |= (payload[2] >> 7) & 0x01;
+ ry = payload[2] & 0x1f;
+
+ rt = payload[3] & 0x1f;
+ lt = (payload[2] >> 2) & 0x18;
+ lt |= (payload[3] >> 5) & 0x07;
+
+ rx <<= 1;
+ ry <<= 1;
+ rt <<= 1;
+ lt <<= 1;
+
+ input_report_abs(ext->input, ABS_HAT1X, lx - 0x20);
+ input_report_abs(ext->input, ABS_HAT1Y, ly - 0x20);
+ input_report_abs(ext->input, ABS_HAT2X, rx - 0x20);
+ input_report_abs(ext->input, ABS_HAT2Y, ry - 0x20);
+ input_report_abs(ext->input, ABS_HAT3X, rt - 0x20);
+ input_report_abs(ext->input, ABS_HAT3Y, lt - 0x20);
+
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RIGHT],
+ !!(payload[4] & 0x80));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_DOWN],
+ !!(payload[4] & 0x40));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LT],
+ !!(payload[4] & 0x20));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_MINUS],
+ !!(payload[4] & 0x10));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_HOME],
+ !!(payload[4] & 0x08));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_PLUS],
+ !!(payload[4] & 0x04));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_RT],
+ !!(payload[4] & 0x02));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZL],
+ !!(payload[5] & 0x80));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_B],
+ !!(payload[5] & 0x40));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_Y],
+ !!(payload[5] & 0x20));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_A],
+ !!(payload[5] & 0x10));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_X],
+ !!(payload[5] & 0x08));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_ZR],
+ !!(payload[5] & 0x04));
+
+ if (ext->motionp) {
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+ !!(payload[0] & 0x01));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+ !!(payload[1] & 0x01));
+ } else {
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_UP],
+ !!(payload[5] & 0x01));
+ input_report_key(ext->input, wiiext_keymap[WIIEXT_KEY_LEFT],
+ !!(payload[5] & 0x02));
+ }
+
+ input_sync(ext->input);
+}
+
+/* call this with state.lock spinlock held */
+void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload)
+{
+ struct wiimote_ext *ext = wdata->ext;
+
+ if (!ext)
+ return;
+
+ if (ext->motionp && (payload[5] & 0x02)) {
+ handler_motionp(ext, payload);
+ } else if (ext->ext_type == WIIEXT_NUNCHUCK) {
+ handler_nunchuck(ext, payload);
+ } else if (ext->ext_type == WIIEXT_CLASSIC) {
+ handler_classic(ext, payload);
+ }
+}
+
+static ssize_t wiiext_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct wiimote_data *wdata = dev_to_wii(dev);
+ __u8 type = WIIEXT_NONE;
+ bool motionp = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ if (wdata->ext) {
+ motionp = wdata->ext->motionp;
+ type = wdata->ext->ext_type;
+ }
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ if (type == WIIEXT_NUNCHUCK) {
+ if (motionp)
+ return sprintf(buf, "motionp+nunchuck\n");
+ else
+ return sprintf(buf, "nunchuck\n");
+ } else if (type == WIIEXT_CLASSIC) {
+ if (motionp)
+ return sprintf(buf, "motionp+classic\n");
+ else
+ return sprintf(buf, "classic\n");
+ } else {
+ if (motionp)
+ return sprintf(buf, "motionp\n");
+ else
+ return sprintf(buf, "none\n");
+ }
+}
+
+static DEVICE_ATTR(extension, S_IRUGO, wiiext_show, NULL);
+
+static int wiiext_input_open(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(ext->wdata->hdev);
+ if (ret)
+ return ret;
+
+ atomic_inc(&ext->opened);
+ wiiext_schedule(ext);
+
+ return 0;
+}
+
+static void wiiext_input_close(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+
+ atomic_dec(&ext->opened);
+ wiiext_schedule(ext);
+ hid_hw_close(ext->wdata->hdev);
+}
+
+static int wiiext_mp_open(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+ int ret;
+
+ ret = hid_hw_open(ext->wdata->hdev);
+ if (ret)
+ return ret;
+
+ atomic_inc(&ext->mp_opened);
+ wiiext_schedule(ext);
+
+ return 0;
+}
+
+static void wiiext_mp_close(struct input_dev *dev)
+{
+ struct wiimote_ext *ext = input_get_drvdata(dev);
+
+ atomic_dec(&ext->mp_opened);
+ wiiext_schedule(ext);
+ hid_hw_close(ext->wdata->hdev);
+}
+
+/* Initializes the extension driver of a wiimote */
+int wiiext_init(struct wiimote_data *wdata)
+{
+ struct wiimote_ext *ext;
+ unsigned long flags;
+ int ret, i;
+
+ ext = kzalloc(sizeof(*ext), GFP_KERNEL);
+ if (!ext)
+ return -ENOMEM;
+
+ ext->wdata = wdata;
+ INIT_WORK(&ext->worker, wiiext_worker);
+
+ ext->input = input_allocate_device();
+ if (!ext->input) {
+ ret = -ENOMEM;
+ goto err_input;
+ }
+
+ input_set_drvdata(ext->input, ext);
+ ext->input->open = wiiext_input_open;
+ ext->input->close = wiiext_input_close;
+ ext->input->dev.parent = &wdata->hdev->dev;
+ ext->input->id.bustype = wdata->hdev->bus;
+ ext->input->id.vendor = wdata->hdev->vendor;
+ ext->input->id.product = wdata->hdev->product;
+ ext->input->id.version = wdata->hdev->version;
+ ext->input->name = WIIMOTE_NAME " Extension";
+
+ set_bit(EV_KEY, ext->input->evbit);
+ for (i = 0; i < WIIEXT_KEY_COUNT; ++i)
+ set_bit(wiiext_keymap[i], ext->input->keybit);
+
+ set_bit(EV_ABS, ext->input->evbit);
+ set_bit(ABS_HAT0X, ext->input->absbit);
+ set_bit(ABS_HAT0Y, ext->input->absbit);
+ set_bit(ABS_HAT1X, ext->input->absbit);
+ set_bit(ABS_HAT1Y, ext->input->absbit);
+ set_bit(ABS_HAT2X, ext->input->absbit);
+ set_bit(ABS_HAT2Y, ext->input->absbit);
+ set_bit(ABS_HAT3X, ext->input->absbit);
+ set_bit(ABS_HAT3Y, ext->input->absbit);
+ input_set_abs_params(ext->input, ABS_HAT0X, -120, 120, 2, 4);
+ input_set_abs_params(ext->input, ABS_HAT0Y, -120, 120, 2, 4);
+ input_set_abs_params(ext->input, ABS_HAT1X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT1Y, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT2X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT2Y, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT3X, -30, 30, 1, 1);
+ input_set_abs_params(ext->input, ABS_HAT3Y, -30, 30, 1, 1);
+ set_bit(ABS_RX, ext->input->absbit);
+ set_bit(ABS_RY, ext->input->absbit);
+ set_bit(ABS_RZ, ext->input->absbit);
+ input_set_abs_params(ext->input, ABS_RX, -500, 500, 2, 4);
+ input_set_abs_params(ext->input, ABS_RY, -500, 500, 2, 4);
+ input_set_abs_params(ext->input, ABS_RZ, -500, 500, 2, 4);
+
+ ret = input_register_device(ext->input);
+ if (ret) {
+ input_free_device(ext->input);
+ goto err_input;
+ }
+
+ ext->mp_input = input_allocate_device();
+ if (!ext->mp_input) {
+ ret = -ENOMEM;
+ goto err_mp;
+ }
+
+ input_set_drvdata(ext->mp_input, ext);
+ ext->mp_input->open = wiiext_mp_open;
+ ext->mp_input->close = wiiext_mp_close;
+ ext->mp_input->dev.parent = &wdata->hdev->dev;
+ ext->mp_input->id.bustype = wdata->hdev->bus;
+ ext->mp_input->id.vendor = wdata->hdev->vendor;
+ ext->mp_input->id.product = wdata->hdev->product;
+ ext->mp_input->id.version = wdata->hdev->version;
+ ext->mp_input->name = WIIMOTE_NAME " Motion+";
+
+ set_bit(EV_ABS, ext->mp_input->evbit);
+ set_bit(ABS_RX, ext->mp_input->absbit);
+ set_bit(ABS_RY, ext->mp_input->absbit);
+ set_bit(ABS_RZ, ext->mp_input->absbit);
+ input_set_abs_params(ext->mp_input, ABS_RX, -160000, 160000, 4, 8);
+ input_set_abs_params(ext->mp_input, ABS_RY, -160000, 160000, 4, 8);
+ input_set_abs_params(ext->mp_input, ABS_RZ, -160000, 160000, 4, 8);
+
+ ret = input_register_device(ext->mp_input);
+ if (ret) {
+ input_free_device(ext->mp_input);
+ goto err_mp;
+ }
+
+ ret = device_create_file(&wdata->hdev->dev, &dev_attr_extension);
+ if (ret)
+ goto err_dev;
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->ext = ext;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ return 0;
+
+err_dev:
+ input_unregister_device(ext->mp_input);
+err_mp:
+ input_unregister_device(ext->input);
+err_input:
+ kfree(ext);
+ return ret;
+}
+
+/* Deinitializes the extension driver of a wiimote */
+void wiiext_deinit(struct wiimote_data *wdata)
+{
+ struct wiimote_ext *ext = wdata->ext;
+ unsigned long flags;
+
+ if (!ext)
+ return;
+
+ /*
+ * We first unset wdata->ext to avoid further input from the wiimote
+ * core. The worker thread does not access this pointer so it is not
+ * affected by this.
+ * We kill the worker after this so it does not get respawned during
+ * deinitialization.
+ */
+
+ spin_lock_irqsave(&wdata->state.lock, flags);
+ wdata->ext = NULL;
+ spin_unlock_irqrestore(&wdata->state.lock, flags);
+
+ device_remove_file(&wdata->hdev->dev, &dev_attr_extension);
+ input_unregister_device(ext->mp_input);
+ input_unregister_device(ext->input);
+
+ cancel_work_sync(&ext->worker);
+ kfree(ext);
+}
diff --git a/drivers/hid/hid-wiimote.h b/drivers/hid/hid-wiimote.h
new file mode 100644
index 0000000..c81dbeb
--- /dev/null
+++ b/drivers/hid/hid-wiimote.h
@@ -0,0 +1,208 @@
+#ifndef __HID_WIIMOTE_H
+#define __HID_WIIMOTE_H
+
+/*
+ * HID driver for Nintendo Wiimote devices
+ * Copyright (c) 2011 David Herrmann
+ */
+
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/hid.h>
+#include <linux/input.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/power_supply.h>
+#include <linux/spinlock.h>
+
+#define WIIMOTE_NAME "Nintendo Wii Remote"
+#define WIIMOTE_BUFSIZE 32
+
+#define WIIPROTO_FLAG_LED1 0x01
+#define WIIPROTO_FLAG_LED2 0x02
+#define WIIPROTO_FLAG_LED3 0x04
+#define WIIPROTO_FLAG_LED4 0x08
+#define WIIPROTO_FLAG_RUMBLE 0x10
+#define WIIPROTO_FLAG_ACCEL 0x20
+#define WIIPROTO_FLAG_IR_BASIC 0x40
+#define WIIPROTO_FLAG_IR_EXT 0x80
+#define WIIPROTO_FLAG_IR_FULL 0xc0 /* IR_BASIC | IR_EXT */
+#define WIIPROTO_FLAGS_LEDS (WIIPROTO_FLAG_LED1 | WIIPROTO_FLAG_LED2 | \
+ WIIPROTO_FLAG_LED3 | WIIPROTO_FLAG_LED4)
+#define WIIPROTO_FLAGS_IR (WIIPROTO_FLAG_IR_BASIC | WIIPROTO_FLAG_IR_EXT | \
+ WIIPROTO_FLAG_IR_FULL)
+
+/* return flag for led \num */
+#define WIIPROTO_FLAG_LED(num) (WIIPROTO_FLAG_LED1 << (num - 1))
+
+struct wiimote_buf {
+ __u8 data[HID_MAX_BUFFER_SIZE];
+ size_t size;
+};
+
+struct wiimote_state {
+ spinlock_t lock;
+ __u8 flags;
+ __u8 accel_split[2];
+ __u8 drm;
+
+ /* synchronous cmd requests */
+ struct mutex sync;
+ struct completion ready;
+ int cmd;
+ __u32 opt;
+
+ /* results of synchronous requests */
+ __u8 cmd_battery;
+ __u8 cmd_err;
+ __u8 *cmd_read_buf;
+ __u8 cmd_read_size;
+};
+
+struct wiimote_data {
+ struct hid_device *hdev;
+ struct input_dev *input;
+ struct led_classdev *leds[4];
+ struct input_dev *accel;
+ struct input_dev *ir;
+ struct power_supply battery;
+ struct wiimote_ext *ext;
+ struct wiimote_debug *debug;
+
+ spinlock_t qlock;
+ __u8 head;
+ __u8 tail;
+ struct wiimote_buf outq[WIIMOTE_BUFSIZE];
+ struct work_struct worker;
+
+ struct wiimote_state state;
+};
+
+enum wiiproto_reqs {
+ WIIPROTO_REQ_NULL = 0x0,
+ WIIPROTO_REQ_RUMBLE = 0x10,
+ WIIPROTO_REQ_LED = 0x11,
+ WIIPROTO_REQ_DRM = 0x12,
+ WIIPROTO_REQ_IR1 = 0x13,
+ WIIPROTO_REQ_SREQ = 0x15,
+ WIIPROTO_REQ_WMEM = 0x16,
+ WIIPROTO_REQ_RMEM = 0x17,
+ WIIPROTO_REQ_IR2 = 0x1a,
+ WIIPROTO_REQ_STATUS = 0x20,
+ WIIPROTO_REQ_DATA = 0x21,
+ WIIPROTO_REQ_RETURN = 0x22,
+ WIIPROTO_REQ_DRM_K = 0x30,
+ WIIPROTO_REQ_DRM_KA = 0x31,
+ WIIPROTO_REQ_DRM_KE = 0x32,
+ WIIPROTO_REQ_DRM_KAI = 0x33,
+ WIIPROTO_REQ_DRM_KEE = 0x34,
+ WIIPROTO_REQ_DRM_KAE = 0x35,
+ WIIPROTO_REQ_DRM_KIE = 0x36,
+ WIIPROTO_REQ_DRM_KAIE = 0x37,
+ WIIPROTO_REQ_DRM_E = 0x3d,
+ WIIPROTO_REQ_DRM_SKAI1 = 0x3e,
+ WIIPROTO_REQ_DRM_SKAI2 = 0x3f,
+ WIIPROTO_REQ_MAX
+};
+
+#define dev_to_wii(pdev) hid_get_drvdata(container_of(pdev, struct hid_device, \
+ dev))
+
+extern void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm);
+extern int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset,
+ const __u8 *wmem, __u8 size);
+extern ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset,
+ __u8 *rmem, __u8 size);
+
+#define wiiproto_req_rreg(wdata, os, sz) \
+ wiiproto_req_rmem((wdata), false, (os), (sz))
+#define wiiproto_req_reeprom(wdata, os, sz) \
+ wiiproto_req_rmem((wdata), true, (os), (sz))
+extern void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom,
+ __u32 offset, __u16 size);
+
+#ifdef CONFIG_HID_WIIMOTE_EXT
+
+extern int wiiext_init(struct wiimote_data *wdata);
+extern void wiiext_deinit(struct wiimote_data *wdata);
+extern void wiiext_event(struct wiimote_data *wdata, bool plugged);
+extern bool wiiext_active(struct wiimote_data *wdata);
+extern void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload);
+
+#else
+
+static inline int wiiext_init(void *u) { return 0; }
+static inline void wiiext_deinit(void *u) { }
+static inline void wiiext_event(void *u, bool p) { }
+static inline bool wiiext_active(void *u) { return false; }
+static inline void wiiext_handle(void *u, const __u8 *p) { }
+
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+extern int wiidebug_init(struct wiimote_data *wdata);
+extern void wiidebug_deinit(struct wiimote_data *wdata);
+
+#else
+
+static inline int wiidebug_init(void *u) { return 0; }
+static inline void wiidebug_deinit(void *u) { }
+
+#endif
+
+/* requires the state.lock spinlock to be held */
+static inline bool wiimote_cmd_pending(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ return wdata->state.cmd == cmd && wdata->state.opt == opt;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_complete(struct wiimote_data *wdata)
+{
+ wdata->state.cmd = WIIPROTO_REQ_NULL;
+ complete(&wdata->state.ready);
+}
+
+static inline int wiimote_cmd_acquire(struct wiimote_data *wdata)
+{
+ return mutex_lock_interruptible(&wdata->state.sync) ? -ERESTARTSYS : 0;
+}
+
+/* requires the state.lock spinlock to be held */
+static inline void wiimote_cmd_set(struct wiimote_data *wdata, int cmd,
+ __u32 opt)
+{
+ INIT_COMPLETION(wdata->state.ready);
+ wdata->state.cmd = cmd;
+ wdata->state.opt = opt;
+}
+
+static inline void wiimote_cmd_release(struct wiimote_data *wdata)
+{
+ mutex_unlock(&wdata->state.sync);
+}
+
+static inline int wiimote_cmd_wait(struct wiimote_data *wdata)
+{
+ int ret;
+
+ ret = wait_for_completion_interruptible_timeout(&wdata->state.ready, HZ);
+ if (ret < 0)
+ return -ERESTARTSYS;
+ else if (ret == 0)
+ return -EIO;
+ else
+ return 0;
+}
+
+#endif
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index b403fce..5bf91db 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -197,16 +197,24 @@ static int usbhid_restart_out_queue(struct usbhid_device *usbhid)
{
struct hid_device *hid = usb_get_intfdata(usbhid->intf);
int kicked;
+ int r;
if (!hid)
return 0;
if ((kicked = (usbhid->outhead != usbhid->outtail))) {
dbg("Kicking head %d tail %d", usbhid->outhead, usbhid->outtail);
+
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return r;
+ /* Asynchronously flush queue. */
+ set_bit(HID_OUT_RUNNING, &usbhid->iofl);
if (hid_submit_out(hid)) {
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
+ usb_autopm_put_interface_async(usbhid->intf);
}
+ wake_up(&usbhid->wait);
}
return kicked;
}
@@ -215,6 +223,7 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
{
struct hid_device *hid = usb_get_intfdata(usbhid->intf);
int kicked;
+ int r;
WARN_ON(hid == NULL);
if (!hid)
@@ -222,10 +231,17 @@ static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid)
if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) {
dbg("Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail);
+
+ r = usb_autopm_get_interface_async(usbhid->intf);
+ if (r < 0)
+ return r;
+ /* Asynchronously flush queue. */
+ set_bit(HID_CTRL_RUNNING, &usbhid->iofl);
if (hid_submit_ctrl(hid)) {
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
+ usb_autopm_put_interface_async(usbhid->intf);
}
+ wake_up(&usbhid->wait);
}
return kicked;
}
@@ -304,30 +320,21 @@ static int hid_submit_out(struct hid_device *hid)
report = usbhid->out[usbhid->outtail].report;
raw_report = usbhid->out[usbhid->outtail].raw_report;
- r = usb_autopm_get_interface_async(usbhid->intf);
- if (r < 0)
- return -1;
-
- /*
- * if the device hasn't been woken, we leave the output
- * to resume()
- */
- if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
- usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- usbhid->urbout->dev = hid_to_usb_dev(hid);
- memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length);
- kfree(raw_report);
+ usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) +
+ 1 + (report->id > 0);
+ usbhid->urbout->dev = hid_to_usb_dev(hid);
+ memcpy(usbhid->outbuf, raw_report,
+ usbhid->urbout->transfer_buffer_length);
+ kfree(raw_report);
- dbg_hid("submitting out urb\n");
+ dbg_hid("submitting out urb\n");
- if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
- hid_err(hid, "usb_submit_urb(out) failed\n");
- usb_autopm_put_interface_async(usbhid->intf);
- return -1;
- }
- usbhid->last_out = jiffies;
+ r = usb_submit_urb(usbhid->urbout, GFP_ATOMIC);
+ if (r < 0) {
+ hid_err(hid, "usb_submit_urb(out) failed: %d\n", r);
+ return r;
}
-
+ usbhid->last_out = jiffies;
return 0;
}
@@ -343,50 +350,48 @@ static int hid_submit_ctrl(struct hid_device *hid)
raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report;
dir = usbhid->ctrl[usbhid->ctrltail].dir;
- r = usb_autopm_get_interface_async(usbhid->intf);
- if (r < 0)
- return -1;
- if (!test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) {
- len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- if (dir == USB_DIR_OUT) {
- usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
- usbhid->urbctrl->transfer_buffer_length = len;
- memcpy(usbhid->ctrlbuf, raw_report, len);
- kfree(raw_report);
- } else {
- int maxpacket, padlen;
-
- usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
- maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe, 0);
- if (maxpacket > 0) {
- padlen = DIV_ROUND_UP(len, maxpacket);
- padlen *= maxpacket;
- if (padlen > usbhid->bufsize)
- padlen = usbhid->bufsize;
- } else
- padlen = 0;
- usbhid->urbctrl->transfer_buffer_length = padlen;
- }
- usbhid->urbctrl->dev = hid_to_usb_dev(hid);
-
- usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
- usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT;
- usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) | report->id);
- usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
- usbhid->cr->wLength = cpu_to_le16(len);
-
- dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
- usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report",
- usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
-
- if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
- usb_autopm_put_interface_async(usbhid->intf);
- hid_err(hid, "usb_submit_urb(ctrl) failed\n");
- return -1;
- }
- usbhid->last_ctrl = jiffies;
+ len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
+ if (dir == USB_DIR_OUT) {
+ usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0);
+ usbhid->urbctrl->transfer_buffer_length = len;
+ memcpy(usbhid->ctrlbuf, raw_report, len);
+ kfree(raw_report);
+ } else {
+ int maxpacket, padlen;
+
+ usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0);
+ maxpacket = usb_maxpacket(hid_to_usb_dev(hid),
+ usbhid->urbctrl->pipe, 0);
+ if (maxpacket > 0) {
+ padlen = DIV_ROUND_UP(len, maxpacket);
+ padlen *= maxpacket;
+ if (padlen > usbhid->bufsize)
+ padlen = usbhid->bufsize;
+ } else
+ padlen = 0;
+ usbhid->urbctrl->transfer_buffer_length = padlen;
}
-
+ usbhid->urbctrl->dev = hid_to_usb_dev(hid);
+
+ usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir;
+ usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT :
+ HID_REQ_GET_REPORT;
+ usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) |
+ report->id);
+ usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
+ usbhid->cr->wLength = cpu_to_le16(len);
+
+ dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
+ usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" :
+ "Get_Report",
+ usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
+
+ r = usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC);
+ if (r < 0) {
+ hid_err(hid, "usb_submit_urb(ctrl) failed: %d\n", r);
+ return r;
+ }
+ usbhid->last_ctrl = jiffies;
return 0;
}
@@ -423,11 +428,8 @@ static void hid_irq_out(struct urb *urb)
else
usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1);
- if (usbhid->outhead != usbhid->outtail) {
- if (hid_submit_out(hid)) {
- clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
- }
+ if (usbhid->outhead != usbhid->outtail && !hid_submit_out(hid)) {
+ /* Successfully submitted next urb in queue */
spin_unlock_irqrestore(&usbhid->lock, flags);
return;
}
@@ -474,13 +476,9 @@ static void hid_ctrl(struct urb *urb)
else
usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1);
- if (usbhid->ctrlhead != usbhid->ctrltail) {
- if (hid_submit_ctrl(hid)) {
- clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
- wake_up(&usbhid->wait);
- }
+ if (usbhid->ctrlhead != usbhid->ctrltail && !hid_submit_ctrl(hid)) {
+ /* Successfully submitted next urb in queue */
spin_unlock(&usbhid->lock);
- usb_autopm_put_interface_async(usbhid->intf);
return;
}
@@ -515,9 +513,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->out[usbhid->outhead].report = report;
usbhid->outhead = head;
+ /* Try to awake from autosuspend... */
+ if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+ return;
+
+ /*
+ * But if still suspended, leave urb enqueued, don't submit.
+ * Submission will occur if/when resume() drains the queue.
+ */
+ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+ return;
+
if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) {
- if (hid_submit_out(hid))
+ if (hid_submit_out(hid)) {
clear_bit(HID_OUT_RUNNING, &usbhid->iofl);
+ usb_autopm_put_interface_async(usbhid->intf);
+ }
+ wake_up(&usbhid->wait);
} else {
/*
* the queue is known to run
@@ -549,9 +561,23 @@ static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *re
usbhid->ctrl[usbhid->ctrlhead].dir = dir;
usbhid->ctrlhead = head;
+ /* Try to awake from autosuspend... */
+ if (usb_autopm_get_interface_async(usbhid->intf) < 0)
+ return;
+
+ /*
+ * If already suspended, leave urb enqueued, but don't submit.
+ * Submission will occur if/when resume() drains the queue.
+ */
+ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl))
+ return;
+
if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) {
- if (hid_submit_ctrl(hid))
+ if (hid_submit_ctrl(hid)) {
clear_bit(HID_CTRL_RUNNING, &usbhid->iofl);
+ usb_autopm_put_interface_async(usbhid->intf);
+ }
+ wake_up(&usbhid->wait);
} else {
/*
* the queue is known to run
@@ -576,6 +602,30 @@ void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, uns
}
EXPORT_SYMBOL_GPL(usbhid_submit_report);
+/* Workqueue routine to send requests to change LEDs */
+static void hid_led(struct work_struct *work)
+{
+ struct usbhid_device *usbhid =
+ container_of(work, struct usbhid_device, led_work);
+ struct hid_device *hid = usbhid->hid;
+ struct hid_field *field;
+ unsigned long flags;
+
+ field = hidinput_get_led_field(hid);
+ if (!field) {
+ hid_warn(hid, "LED event field not found\n");
+ return;
+ }
+
+ spin_lock_irqsave(&usbhid->lock, flags);
+ if (!test_bit(HID_DISCONNECTED, &usbhid->iofl)) {
+ usbhid->ledcount = hidinput_count_leds(hid);
+ hid_dbg(usbhid->hid, "New ledcount = %u\n", usbhid->ledcount);
+ __usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+ }
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+}
+
static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value)
{
struct hid_device *hid = input_get_drvdata(dev);
@@ -595,17 +645,15 @@ static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, un
return -1;
}
+ spin_lock_irqsave(&usbhid->lock, flags);
hid_set_field(field, offset, value);
- if (value) {
- spin_lock_irqsave(&usbhid->lock, flags);
- usbhid->ledcount++;
- spin_unlock_irqrestore(&usbhid->lock, flags);
- } else {
- spin_lock_irqsave(&usbhid->lock, flags);
- usbhid->ledcount--;
- spin_unlock_irqrestore(&usbhid->lock, flags);
- }
- usbhid_submit_report(hid, field->report, USB_DIR_OUT);
+ spin_unlock_irqrestore(&usbhid->lock, flags);
+
+ /*
+ * Defer performing requested LED action.
+ * This is more likely gather all LED changes into a single URB.
+ */
+ schedule_work(&usbhid->led_work);
return 0;
}
@@ -1100,7 +1148,7 @@ static void usbhid_stop(struct hid_device *hid)
return;
clear_bit(HID_STARTED, &usbhid->iofl);
- spin_lock_irq(&usbhid->lock); /* Sync with error handler */
+ spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */
set_bit(HID_DISCONNECTED, &usbhid->iofl);
spin_unlock_irq(&usbhid->lock);
usb_kill_urb(usbhid->urbin);
@@ -1234,6 +1282,8 @@ static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *
setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid);
spin_lock_init(&usbhid->lock);
+ INIT_WORK(&usbhid->led_work, hid_led);
+
ret = hid_add_device(hid);
if (ret) {
if (ret != -ENODEV)
@@ -1266,6 +1316,7 @@ static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid)
{
del_timer_sync(&usbhid->io_retry);
cancel_work_sync(&usbhid->reset_work);
+ cancel_work_sync(&usbhid->led_work);
}
static void hid_cease_io(struct usbhid_device *usbhid)
@@ -1367,16 +1418,6 @@ static int hid_suspend(struct usb_interface *intf, pm_message_t message)
return -EIO;
}
- if (!ignoreled && PMSG_IS_AUTO(message)) {
- spin_lock_irq(&usbhid->lock);
- if (test_bit(HID_LED_ON, &usbhid->iofl)) {
- spin_unlock_irq(&usbhid->lock);
- usbhid_mark_busy(usbhid);
- return -EBUSY;
- }
- spin_unlock_irq(&usbhid->lock);
- }
-
hid_cancel_delayed_stuff(usbhid);
hid_cease_io(usbhid);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 5028d60..57d4e1e 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -47,12 +47,14 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_AFATECH, USB_DEVICE_ID_AFATECH_AF9016, HID_QUIRK_FULLSPEED_INTERVAL },
+ { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
{ USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
@@ -67,6 +69,9 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 4ef02b2..b1ec0e2 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -859,7 +859,7 @@ static const struct file_operations hiddev_fops = {
.llseek = noop_llseek,
};
-static char *hiddev_devnode(struct device *dev, mode_t *mode)
+static char *hiddev_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
@@ -922,11 +922,11 @@ void hiddev_disconnect(struct hid_device *hid)
struct hiddev *hiddev = hid->hiddev;
struct usbhid_device *usbhid = hid->driver_data;
+ usb_deregister_dev(usbhid->intf, &hiddev_class);
+
mutex_lock(&hiddev->existancelock);
hiddev->exist = 0;
- usb_deregister_dev(usbhid->intf, &hiddev_class);
-
if (hiddev->open) {
mutex_unlock(&hiddev->existancelock);
usbhid_close(hiddev->hid);
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index 1673cac..cb8f703 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -55,7 +55,6 @@ struct usb_interface *usbhid_find_interface(int minor);
#define HID_STARTED 8
#define HID_REPORTED_IDLE 9
#define HID_KEYS_PRESSED 10
-#define HID_LED_ON 11
/*
* USB-specific HID struct, to be pointed to
@@ -97,6 +96,8 @@ struct usbhid_device {
struct work_struct reset_work; /* Task context for resets */
wait_queue_head_t wait; /* For sleeping */
int ledcount; /* counting the number of active leds */
+
+ struct work_struct led_work; /* Task context for setting LEDs */
};
#define hid_to_usb_dev(hid_dev) \
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 0658173..7960869 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -64,6 +64,32 @@ static const unsigned char usb_kbd_keycode[256] = {
150,158,159,128,136,177,178,176,142,152,173,140
};
+
+/**
+ * struct usb_kbd - state of each attached keyboard
+ * @dev: input device associated with this keyboard
+ * @usbdev: usb device associated with this keyboard
+ * @old: data received in the past from the @irq URB representing which
+ * keys were pressed. By comparing with the current list of keys
+ * that are pressed, we are able to see key releases.
+ * @irq: URB for receiving a list of keys that are pressed when a
+ * new key is pressed or a key that was pressed is released.
+ * @led: URB for sending LEDs (e.g. numlock, ...)
+ * @newleds: data that will be sent with the @led URB representing which LEDs
+ should be on
+ * @name: Name of the keyboard. @dev's name field points to this buffer
+ * @phys: Physical path of the keyboard. @dev's phys field points to this
+ * buffer
+ * @new: Buffer for the @irq URB
+ * @cr: Control request for @led URB
+ * @leds: Buffer for the @led URB
+ * @new_dma: DMA address for @irq URB
+ * @leds_dma: DMA address for @led URB
+ * @leds_lock: spinlock that protects @leds, @newleds, and @led_urb_submitted
+ * @led_urb_submitted: indicates whether @led is in progress, i.e. it has been
+ * submitted and its completion handler has not returned yet
+ * without resubmitting @led
+ */
struct usb_kbd {
struct input_dev *dev;
struct usb_device *usbdev;
@@ -78,6 +104,10 @@ struct usb_kbd {
unsigned char *leds;
dma_addr_t new_dma;
dma_addr_t leds_dma;
+
+ spinlock_t leds_lock;
+ bool led_urb_submitted;
+
};
static void usb_kbd_irq(struct urb *urb)
@@ -136,44 +166,66 @@ resubmit:
static int usb_kbd_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
+ unsigned long flags;
struct usb_kbd *kbd = input_get_drvdata(dev);
if (type != EV_LED)
return -1;
+ spin_lock_irqsave(&kbd->leds_lock, flags);
kbd->newleds = (!!test_bit(LED_KANA, dev->led) << 3) | (!!test_bit(LED_COMPOSE, dev->led) << 3) |
(!!test_bit(LED_SCROLLL, dev->led) << 2) | (!!test_bit(LED_CAPSL, dev->led) << 1) |
(!!test_bit(LED_NUML, dev->led));
- if (kbd->led->status == -EINPROGRESS)
+ if (kbd->led_urb_submitted){
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
+ }
- if (*(kbd->leds) == kbd->newleds)
+ if (*(kbd->leds) == kbd->newleds){
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return 0;
+ }
*(kbd->leds) = kbd->newleds;
+
kbd->led->dev = kbd->usbdev;
if (usb_submit_urb(kbd->led, GFP_ATOMIC))
pr_err("usb_submit_urb(leds) failed\n");
-
+ else
+ kbd->led_urb_submitted = true;
+
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
+
return 0;
}
static void usb_kbd_led(struct urb *urb)
{
+ unsigned long flags;
struct usb_kbd *kbd = urb->context;
if (urb->status)
hid_warn(urb->dev, "led urb status %d received\n",
urb->status);
- if (*(kbd->leds) == kbd->newleds)
+ spin_lock_irqsave(&kbd->leds_lock, flags);
+
+ if (*(kbd->leds) == kbd->newleds){
+ kbd->led_urb_submitted = false;
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
return;
+ }
*(kbd->leds) = kbd->newleds;
+
kbd->led->dev = kbd->usbdev;
- if (usb_submit_urb(kbd->led, GFP_ATOMIC))
+ if (usb_submit_urb(kbd->led, GFP_ATOMIC)){
hid_err(urb->dev, "usb_submit_urb(leds) failed\n");
+ kbd->led_urb_submitted = false;
+ }
+ spin_unlock_irqrestore(&kbd->leds_lock, flags);
+
}
static int usb_kbd_open(struct input_dev *dev)
@@ -252,6 +304,7 @@ static int usb_kbd_probe(struct usb_interface *iface,
kbd->usbdev = dev;
kbd->dev = input_dev;
+ spin_lock_init(&kbd->leds_lock);
if (dev->manufacturer)
strlcpy(kbd->name, dev->manufacturer, sizeof(kbd->name));
@@ -334,6 +387,7 @@ static void usb_kbd_disconnect(struct usb_interface *intf)
if (kbd) {
usb_kill_urb(kbd->irq);
input_unregister_device(kbd->dev);
+ usb_kill_urb(kbd->led);
usb_kbd_free_mem(interface_to_usbdev(intf), kbd);
kfree(kbd);
}
@@ -354,19 +408,4 @@ static struct usb_driver usb_kbd_driver = {
.id_table = usb_kbd_id_table,
};
-static int __init usb_kbd_init(void)
-{
- int result = usb_register(&usb_kbd_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit usb_kbd_exit(void)
-{
- usb_deregister(&usb_kbd_driver);
-}
-
-module_init(usb_kbd_init);
-module_exit(usb_kbd_exit);
+module_usb_driver(usb_kbd_driver);
diff --git a/drivers/hid/usbhid/usbmouse.c b/drivers/hid/usbhid/usbmouse.c
index 79b2bf8..0f6be45 100644
--- a/drivers/hid/usbhid/usbmouse.c
+++ b/drivers/hid/usbhid/usbmouse.c
@@ -241,19 +241,4 @@ static struct usb_driver usb_mouse_driver = {
.id_table = usb_mouse_id_table,
};
-static int __init usb_mouse_init(void)
-{
- int retval = usb_register(&usb_mouse_driver);
- if (retval == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return retval;
-}
-
-static void __exit usb_mouse_exit(void)
-{
- usb_deregister(&usb_mouse_driver);
-}
-
-module_init(usb_mouse_init);
-module_exit(usb_mouse_exit);
+module_usb_driver(usb_mouse_driver);
diff --git a/drivers/hv/Kconfig b/drivers/hv/Kconfig
index 9fa09ac..70f5dde 100644
--- a/drivers/hv/Kconfig
+++ b/drivers/hv/Kconfig
@@ -1,3 +1,5 @@
+menu "Microsoft Hyper-V guest support"
+
config HYPERV
tristate "Microsoft Hyper-V client drivers"
depends on X86 && ACPI && PCI
@@ -11,4 +13,4 @@ config HYPERV_UTILS
help
Select this option to enable the Hyper-V Utilities.
-
+endmenu
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 12b85ff..36484db 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -223,6 +223,17 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
vmbus_device_unregister(channel->device_obj);
}
+void vmbus_free_channels(void)
+{
+ struct vmbus_channel *channel;
+
+ list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
+ vmbus_device_unregister(channel->device_obj);
+ kfree(channel->device_obj);
+ free_channel(channel);
+ }
+}
+
/*
* vmbus_process_offer - Process the offer by creating a channel/device
* associated with this offer
@@ -287,6 +298,7 @@ static void vmbus_process_offer(struct work_struct *work)
spin_lock_irqsave(&vmbus_connection.channel_lock, flags);
list_del(&newchannel->listentry);
spin_unlock_irqrestore(&vmbus_connection.channel_lock, flags);
+ kfree(newchannel->device_obj);
free_channel(newchannel);
} else {
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 0fb100e..12aa97f 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -164,11 +164,6 @@ int hv_init(void)
max_leaf = query_hypervisor_info();
- rdmsrl(HV_X64_MSR_GUEST_OS_ID, hv_context.guestid);
-
- if (hv_context.guestid != 0)
- goto cleanup;
-
/* Write our OS info */
wrmsrl(HV_X64_MSR_GUEST_OS_ID, HV_LINUX_GUEST_ID);
hv_context.guestid = HV_LINUX_GUEST_ID;
@@ -237,6 +232,9 @@ void hv_cleanup(void)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
+ /* Reset our OS id */
+ wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
+
kfree(hv_context.signal_event_buffer);
hv_context.signal_event_buffer = NULL;
hv_context.signal_event_param = NULL;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 89f5244..0e8343f 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -212,11 +212,13 @@ kvp_respond_to_host(char *key, char *value, int error)
* The windows host expects the key/value pair to be encoded
* in utf16.
*/
- keylen = utf8s_to_utf16s(key_name, strlen(key_name),
- (wchar_t *)kvp_data->data.key);
+ keylen = utf8s_to_utf16s(key_name, strlen(key_name), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.key,
+ HV_KVP_EXCHANGE_MAX_KEY_SIZE / 2);
kvp_data->data.key_size = 2*(keylen + 1); /* utf16 encoding */
- valuelen = utf8s_to_utf16s(value, strlen(value),
- (wchar_t *)kvp_data->data.value);
+ valuelen = utf8s_to_utf16s(value, strlen(value), UTF16_HOST_ENDIAN,
+ (wchar_t *) kvp_data->data.value,
+ HV_KVP_EXCHANGE_MAX_VALUE_SIZE / 2);
kvp_data->data.value_size = 2*(valuelen + 1); /* utf16 encoding */
kvp_data->data.value_type = REG_SZ; /* all our values are strings */
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 0aee112..6d7d286 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -611,6 +611,7 @@ void vmbus_device_unregister(struct hv_device *device_obj);
struct vmbus_channel *relid2channel(u32 relid);
+void vmbus_free_channels(void);
/* Connection interface */
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 0c048dd..a220e57 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -62,6 +62,14 @@ struct hv_device_info {
struct hv_dev_port_info outbound;
};
+static int vmbus_exists(void)
+{
+ if (hv_acpi_dev == NULL)
+ return -ENODEV;
+
+ return 0;
+}
+
static void get_channel_info(struct hv_device *device,
struct hv_device_info *info)
@@ -590,6 +598,10 @@ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, c
pr_info("registering driver %s\n", hv_driver->name);
+ ret = vmbus_exists();
+ if (ret < 0)
+ return ret;
+
hv_driver->driver.name = hv_driver->name;
hv_driver->driver.owner = owner;
hv_driver->driver.mod_name = mod_name;
@@ -614,8 +626,8 @@ void vmbus_driver_unregister(struct hv_driver *hv_driver)
{
pr_info("unregistering driver %s\n", hv_driver->name);
- driver_unregister(&hv_driver->driver);
-
+ if (!vmbus_exists())
+ driver_unregister(&hv_driver->driver);
}
EXPORT_SYMBOL_GPL(vmbus_driver_unregister);
@@ -776,11 +788,23 @@ static int __init hv_acpi_init(void)
cleanup:
acpi_bus_unregister_driver(&vmbus_acpi_driver);
+ hv_acpi_dev = NULL;
return ret;
}
+static void __exit vmbus_exit(void)
+{
+
+ free_irq(irq, hv_acpi_dev);
+ vmbus_free_channels();
+ bus_unregister(&hv_bus);
+ hv_cleanup();
+ acpi_bus_unregister_driver(&vmbus_acpi_driver);
+}
+
MODULE_LICENSE("GPL");
MODULE_VERSION(HV_DRV_VERSION);
-module_init(hv_acpi_init);
+subsys_initcall(hv_acpi_init);
+module_exit(vmbus_exit);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 91be41f..dad895f 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -367,11 +367,11 @@ config SENSORS_F71882FG
will be called f71882fg.
config SENSORS_F75375S
- tristate "Fintek F75375S/SP and F75373"
+ tristate "Fintek F75375S/SP, F75373 and F75387"
depends on I2C
help
If you say yes here you get support for hardware monitoring
- features of the Fintek F75375S/SP and F75373
+ features of the Fintek F75375S/SP, F75373 and F75387
This driver can also be built as a module. If so, the module
will be called f75375s.
@@ -474,8 +474,8 @@ config SENSORS_IT87
select HWMON_VID
help
If you say yes here you get support for ITE IT8705F, IT8712F,
- IT8716F, IT8718F, IT8720F, IT8721F, IT8726F and IT8758E sensor
- chips, and the SiS960 clone.
+ IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F and IT8758E
+ sensor chips, and the SiS960 clone.
This driver can also be built as a module. If so, the module
will be called it87.
@@ -497,8 +497,9 @@ config SENSORS_JC42
If you say yes here, you get support for JEDEC JC42.4 compliant
temperature sensors, which are used on many DDR3 memory modules for
mobile devices and servers. Support will include, but not be limited
- to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
- MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
+ to, ADT7408, AT30TS00, CAT34TS02, CAT6095, MAX6604, MCP9804, MCP9805,
+ MCP98242, MCP98243, MCP9843, SE97, SE98, STTS424(E), STTS2002,
+ STTS3000, TSE2002B3, TSE2002GB2, TS3000B3, and TS3000GB2.
This driver can also be built as a module. If so, the module
will be called jc42.
@@ -515,11 +516,11 @@ config SENSORS_LINEAGE
will be called lineage-pem.
config SENSORS_LM63
- tristate "National Semiconductor LM63 and LM64"
+ tristate "National Semiconductor LM63 and compatibles"
depends on I2C
help
If you say yes here you get support for the National
- Semiconductor LM63 and LM64 remote diode digital temperature
+ Semiconductor LM63, LM64, and LM96163 remote diode digital temperature
sensors with integrated fan control. Such chips are found
on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
others.
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index 65a35cf..3b728e8 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -145,7 +145,7 @@ static const u8 abituguru_pwm_max[5] = { 0, 255, 255, 75, 75 };
/* Insmod parameters */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Set to one to force detection.");
static int bank1_types[ABIT_UGURU_MAX_BANK1_SENSORS] = { -1, -1, -1, -1, -1,
diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
index d30855a..34a14a7 100644
--- a/drivers/hwmon/abituguru3.c
+++ b/drivers/hwmon/abituguru3.c
@@ -603,11 +603,11 @@ static const struct abituguru3_motherboard_info abituguru3_motherboards[] = {
/* Insmod parameters */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Set to one to force detection.");
/* Default verbose is 1, since this driver is still in the testing phase */
-static int verbose = 1;
+static bool verbose = 1;
module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Enable/disable verbose error reporting");
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 66f6729..554f046 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -58,7 +58,7 @@ ACPI_MODULE_NAME(ACPI_POWER_METER_NAME);
#define POWER_ALARM_NAME "power1_alarm"
static int cap_in_hardware;
-static int force_cap_on;
+static bool force_cap_on;
static int can_cap_in_hardware(void)
{
@@ -170,7 +170,7 @@ static ssize_t set_avg_interval(struct device *dev,
unsigned long long data;
acpi_status status;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
@@ -241,7 +241,7 @@ static ssize_t set_cap(struct device *dev, struct device_attribute *devattr,
unsigned long long data;
acpi_status status;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
@@ -311,7 +311,7 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
int res;
unsigned long temp;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index b2cacbe..ceb24a3 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -125,7 +125,7 @@ static ssize_t adcxx_set_max(struct device *dev,
struct adcxx *adc = spi_get_drvdata(spi);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
if (mutex_lock_interruptible(&adc->lock))
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 1ad0a88..0158cc3 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -103,7 +103,7 @@ static int adm1021_remove(struct i2c_client *client);
static struct adm1021_data *adm1021_update_device(struct device *dev);
/* (amalysh) read only mode, otherwise any limit's writing confuse BIOS */
-static int read_only;
+static bool read_only;
static const struct i2c_device_id adm1021_id[] = {
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index 0683e6b..97e2cfb 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -155,7 +155,8 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
#define TEMP_OFFSET_FROM_REG(val) TEMP_FROM_REG((val) < 0 ? \
(val) | 0x70 : (val))
-#define FAN_FROM_REG(reg, div) ((reg) ? (11250 * 60) / ((reg) * (div)) : 0)
+#define FAN_FROM_REG(reg, div) ((reg) ? \
+ (11250 * 60) / ((reg) * (div)) : 0)
static int FAN_TO_REG(int reg, int div)
{
@@ -174,8 +175,8 @@ static int FAN_TO_REG(int reg, int div)
(((reg) & 0x1F) | (((val) << 5) & 0xe0))
#define AUTO_TEMP_MIN_TO_REG(val, reg) \
- ((((val)/500) & 0xf8)|((reg) & 0x7))
-#define AUTO_TEMP_RANGE_FROM_REG(reg) (5000 * (1<< ((reg)&0x7)))
+ ((((val) / 500) & 0xf8) | ((reg) & 0x7))
+#define AUTO_TEMP_RANGE_FROM_REG(reg) (5000 * (1 << ((reg) & 0x7)))
#define AUTO_TEMP_MIN_FROM_REG(reg) (1000 * ((((reg) >> 3) & 0x1f) << 2))
#define AUTO_TEMP_MIN_FROM_REG_DEG(reg) ((((reg) >> 3) & 0x1f) << 2)
@@ -202,7 +203,7 @@ static int AUTO_TEMP_MAX_TO_REG(int val, int reg, int pwm)
/* FAN auto control */
#define GET_FAN_AUTO_BITFIELD(data, idx) \
- (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx%2]
+ (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx % 2]
/* The tables below contains the possible values for the auto fan
* control bitfields. the index in the table is the register value.
@@ -230,7 +231,7 @@ static const auto_chan_table_t auto_channel_select_table_adm1030 = {
*/
static int
get_fan_auto_nearest(struct adm1031_data *data,
- int chan, u8 val, u8 reg, u8 * new_reg)
+ int chan, u8 val, u8 reg, u8 *new_reg)
{
int i;
int first_match = -1, exact_match = -1;
@@ -258,13 +259,13 @@ get_fan_auto_nearest(struct adm1031_data *data,
}
}
- if (exact_match >= 0) {
+ if (exact_match >= 0)
*new_reg = exact_match;
- } else if (first_match >= 0) {
+ else if (first_match >= 0)
*new_reg = first_match;
- } else {
+ else
return -EINVAL;
- }
+
return 0;
}
@@ -283,23 +284,28 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
u8 reg;
int ret;
u8 old_fan_mode;
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
+
old_fan_mode = data->conf1;
mutex_lock(&data->update_lock);
- if ((ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg))) {
+ ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg);
+ if (ret) {
mutex_unlock(&data->update_lock);
return ret;
}
data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1);
if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) ^
(old_fan_mode & ADM1031_CONF1_AUTO_MODE)) {
- if (data->conf1 & ADM1031_CONF1_AUTO_MODE){
+ if (data->conf1 & ADM1031_CONF1_AUTO_MODE) {
/* Switch to Auto Fan Mode
* Save PWM registers
* Set PWM registers to 33% Both */
@@ -350,7 +356,12 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
@@ -374,10 +385,16 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
- data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]);
+ data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
+ data->pwm[nr]);
adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
data->temp_max[nr]);
mutex_unlock(&data->update_lock);
@@ -410,8 +427,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
- int reg;
+ long val;
+ int ret, reg;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) &&
@@ -449,9 +470,13 @@ static int trust_fan_readings(struct adm1031_data *data, int chan)
if (data->conf1 & ADM1031_CONF1_AUTO_MODE) {
switch (data->conf1 & 0x60) {
- case 0x00: /* remote temp1 controls fan1 remote temp2 controls fan2 */
+ case 0x00:
+ /*
+ * remote temp1 controls fan1,
+ * remote temp2 controls fan2
+ */
res = data->temp[chan+1] >=
- AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]);
+ AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]);
break;
case 0x20: /* remote temp1 controls both fans */
res =
@@ -515,7 +540,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
mutex_lock(&data->update_lock);
if (val) {
@@ -534,10 +564,15 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val = simple_strtol(buf, NULL, 10);
+ long val;
u8 tmp;
int old_div;
int new_min;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
tmp = val == 8 ? 0xc0 :
val == 4 ? 0x80 :
@@ -631,9 +666,13 @@ static ssize_t set_temp_offset(struct device *dev,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -15000, 15000);
mutex_lock(&data->update_lock);
data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
@@ -648,9 +687,13 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_min[nr] = TEMP_TO_REG(val);
@@ -665,9 +708,13 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_max[nr] = TEMP_TO_REG(val);
@@ -682,9 +729,13 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
struct i2c_client *client = to_i2c_client(dev);
struct adm1031_data *data = i2c_get_clientdata(client);
int nr = to_sensor_dev_attr(attr)->index;
- int val;
+ long val;
+ int ret;
+
+ ret = kstrtol(buf, 10, &val);
+ if (ret)
+ return ret;
- val = simple_strtol(buf, NULL, 10);
val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
mutex_lock(&data->update_lock);
data->temp_crit[nr] = TEMP_TO_REG(val);
@@ -711,7 +762,8 @@ temp_reg(2);
temp_reg(3);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct adm1031_data *data = adm1031_update_device(dev);
return sprintf(buf, "%d\n", data->alarm);
@@ -767,7 +819,7 @@ static ssize_t set_update_interval(struct device *dev,
int i, err;
u8 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -919,12 +971,13 @@ static int adm1031_probe(struct i2c_client *client,
adm1031_init_client(client);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group)))
+ err = sysfs_create_group(&client->dev.kobj, &adm1031_group);
+ if (err)
goto exit_free;
if (data->chip_type == adm1031) {
- if ((err = sysfs_create_group(&client->dev.kobj,
- &adm1031_group_opt)))
+ err = sysfs_create_group(&client->dev.kobj, &adm1031_group_opt);
+ if (err)
goto exit_remove;
}
@@ -970,14 +1023,13 @@ static void adm1031_init_client(struct i2c_client *client)
}
/* Initialize the ADM1031 chip (enables fan speed reading ) */
read_val = adm1031_read_value(client, ADM1031_REG_CONF2);
- if ((read_val | mask) != read_val) {
- adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask);
- }
+ if ((read_val | mask) != read_val)
+ adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask);
read_val = adm1031_read_value(client, ADM1031_REG_CONF1);
if ((read_val | ADM1031_CONF1_MONITOR_ENABLE) != read_val) {
- adm1031_write_value(client, ADM1031_REG_CONF1, read_val |
- ADM1031_CONF1_MONITOR_ENABLE);
+ adm1031_write_value(client, ADM1031_REG_CONF1,
+ read_val | ADM1031_CONF1_MONITOR_ENABLE);
}
/* Read the chip's update rate */
@@ -1024,8 +1076,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
/* oldh is actually newer */
if (newh != oldh)
dev_warn(&client->dev,
- "Remote temperature may be "
- "wrong.\n");
+ "Remote temperature may be wrong.\n");
#endif
}
data->temp[chan] = newh;
@@ -1052,22 +1103,24 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
data->conf2 = adm1031_read_value(client, ADM1031_REG_CONF2);
data->alarm = adm1031_read_value(client, ADM1031_REG_STATUS(0))
- | (adm1031_read_value(client, ADM1031_REG_STATUS(1))
- << 8);
- if (data->chip_type == adm1030) {
+ | (adm1031_read_value(client, ADM1031_REG_STATUS(1)) << 8);
+ if (data->chip_type == adm1030)
data->alarm &= 0xc0ff;
- }
- for (chan=0; chan<(data->chip_type == adm1030 ? 1 : 2); chan++) {
+ for (chan = 0; chan < (data->chip_type == adm1030 ? 1 : 2);
+ chan++) {
data->fan_div[chan] =
- adm1031_read_value(client, ADM1031_REG_FAN_DIV(chan));
+ adm1031_read_value(client,
+ ADM1031_REG_FAN_DIV(chan));
data->fan_min[chan] =
- adm1031_read_value(client, ADM1031_REG_FAN_MIN(chan));
+ adm1031_read_value(client,
+ ADM1031_REG_FAN_MIN(chan));
data->fan[chan] =
- adm1031_read_value(client, ADM1031_REG_FAN_SPEED(chan));
+ adm1031_read_value(client,
+ ADM1031_REG_FAN_SPEED(chan));
data->pwm[chan] =
- 0xf & (adm1031_read_value(client, ADM1031_REG_PWM) >>
- (4*chan));
+ (adm1031_read_value(client,
+ ADM1031_REG_PWM) >> (4 * chan)) & 0x0f;
}
data->last_updated = jiffies;
data->valid = 1;
diff --git a/drivers/hwmon/adm9240.c b/drivers/hwmon/adm9240.c
index 9e234b9..3f63f5f 100644
--- a/drivers/hwmon/adm9240.c
+++ b/drivers/hwmon/adm9240.c
@@ -503,7 +503,7 @@ static ssize_t chassis_clear(struct device *dev,
struct adm9240_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index eedca3c..dd87ae9 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -271,7 +271,7 @@ static int ads1015_probe(struct i2c_client *client,
continue;
err = device_create_file(&client->dev, &ads1015_in[k].dev_attr);
if (err)
- goto exit_free;
+ goto exit_remove;
}
data->hwmon_dev = hwmon_device_register(&client->dev);
@@ -285,7 +285,6 @@ static int ads1015_probe(struct i2c_client *client,
exit_remove:
for (k = 0; k < ADS1015_CHANNELS; ++k)
device_remove_file(&client->dev, &ads1015_in[k].dev_attr);
-exit_free:
kfree(data);
exit:
return err;
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c
index cfcc3b6..ed60242 100644
--- a/drivers/hwmon/ads7828.c
+++ b/drivers/hwmon/ads7828.c
@@ -48,8 +48,8 @@ static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4b,
I2C_CLIENT_END };
/* Module parameters */
-static int se_input = 1; /* Default is SE, 0 == diff */
-static int int_vref = 1; /* Default is internal ref ON */
+static bool se_input = 1; /* Default is SE, 0 == diff */
+static bool int_vref = 1; /* Default is internal ref ON */
static int vref_mv = ADS7828_INT_VREF_MV; /* set if vref != 2.5V */
module_param(se_input, bool, S_IRUGO);
module_param(int_vref, bool, S_IRUGO);
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index 5cc3e37..5b02f7a 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -197,7 +197,7 @@ static ssize_t adt7411_set_bit(struct device *dev,
int ret;
unsigned long flag;
- ret = strict_strtoul(buf, 0, &flag);
+ ret = kstrtoul(buf, 0, &flag);
if (ret || flag > 1)
return -EINVAL;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 2af0c7b..7a14948 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -833,7 +833,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+ if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -871,7 +871,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
+ if (kstrtol(buf, 10, &temp) || !temp_enabled(data, attr->index))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -935,7 +935,7 @@ static ssize_t set_volt_max(struct device *dev,
int x = voltage_multiplier(data, attr->index);
long temp;
- if (strict_strtol(buf, 10, &temp) || !x)
+ if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
@@ -977,7 +977,7 @@ static ssize_t set_volt_min(struct device *dev,
int x = voltage_multiplier(data, attr->index);
long temp;
- if (strict_strtol(buf, 10, &temp) || !x)
+ if (kstrtol(buf, 10, &temp) || !x)
return -EINVAL;
temp *= 1000; /* convert mV to uV */
@@ -1066,7 +1066,7 @@ static ssize_t set_fan_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp ||
+ if (kstrtol(buf, 10, &temp) || !temp ||
!fan_enabled(data, attr->index))
return -EINVAL;
@@ -1115,7 +1115,7 @@ static ssize_t set_force_pwm_max(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
mutex_lock(&data->lock);
@@ -1147,7 +1147,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1177,7 +1177,7 @@ static ssize_t set_pwm_max(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1209,7 +1209,7 @@ static ssize_t set_pwm_min(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -1243,7 +1243,7 @@ static ssize_t set_pwm_hyst(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -1289,7 +1289,7 @@ static ssize_t set_pwm_tmax(struct device *dev,
int tmin, trange_value;
long trange;
- if (strict_strtol(buf, 10, &trange))
+ if (kstrtol(buf, 10, &trange))
return -EINVAL;
/* trange = tmax - tmin */
@@ -1330,7 +1330,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000) + 64;
@@ -1387,7 +1387,7 @@ static ssize_t set_pwm_auto(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
switch (temp) {
@@ -1446,7 +1446,7 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
struct adt7462_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index c6d1ce0..5e10c79 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -449,7 +449,7 @@ static ssize_t set_auto_update_interval(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 60000);
@@ -478,7 +478,7 @@ static ssize_t set_num_temp_sensors(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, -1, 10);
@@ -511,7 +511,7 @@ static ssize_t set_temp_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -545,7 +545,7 @@ static ssize_t set_temp_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -600,7 +600,7 @@ static ssize_t set_fan_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp)
+ if (kstrtol(buf, 10, &temp) || !temp)
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
@@ -637,7 +637,7 @@ static ssize_t set_fan_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp) || !temp)
+ if (kstrtol(buf, 10, &temp) || !temp)
return -EINVAL;
temp = FAN_RPM_TO_PERIOD(temp);
@@ -682,7 +682,7 @@ static ssize_t set_force_pwm_max(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
mutex_lock(&data->lock);
@@ -714,7 +714,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -746,7 +746,7 @@ static ssize_t set_pwm_max(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -779,7 +779,7 @@ static ssize_t set_pwm_min(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = SENSORS_LIMIT(temp, 0, 255);
@@ -822,7 +822,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
struct adt7470_data *data = i2c_get_clientdata(client);
long temp;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = DIV_ROUND_CLOSEST(temp, 1000);
@@ -859,7 +859,7 @@ static ssize_t set_pwm_auto(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
if (attr->index % 2)
@@ -919,7 +919,7 @@ static ssize_t set_pwm_auto_temp(struct device *dev,
long temp;
u8 reg;
- if (strict_strtol(buf, 10, &temp))
+ if (kstrtol(buf, 10, &temp))
return -EINVAL;
temp = cvt_auto_temp(temp);
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index b5fcd87..7dab354 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -343,7 +343,7 @@ static ssize_t set_voltage(struct device *dev, struct device_attribute *attr,
unsigned char reg;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -432,7 +432,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *attr,
int temp;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -546,7 +546,7 @@ static ssize_t set_point2(struct device *dev, struct device_attribute *attr,
int temp;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -602,7 +602,7 @@ static ssize_t set_tach(struct device *dev, struct device_attribute *attr,
struct adt7475_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -653,7 +653,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
unsigned char reg = 0;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -758,7 +758,7 @@ static ssize_t set_pwmchan(struct device *dev, struct device_attribute *attr,
int r;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -781,7 +781,7 @@ static ssize_t set_pwmctrl(struct device *dev, struct device_attribute *attr,
int r;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->lock);
@@ -819,7 +819,7 @@ static ssize_t set_pwmfreq(struct device *dev, struct device_attribute *attr,
int out;
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
out = find_nearest(val, pwmfreq_table, ARRAY_SIZE(pwmfreq_table));
@@ -853,7 +853,7 @@ static ssize_t set_pwm_at_crit(struct device *dev,
struct adt7475_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
@@ -883,7 +883,7 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *devattr,
struct adt7475_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val < 0 || val > 255)
return -EINVAL;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index 4033974..89a6b9d 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -238,7 +238,7 @@ static ssize_t set_temp(
int ix = to_sensor_dev_attr(attr)->index;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
val = SENSORS_LIMIT(val / 1000, -128, 127);
@@ -327,7 +327,7 @@ static ssize_t set_pwm1(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -356,7 +356,7 @@ static ssize_t set_pwm1_enable(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int config = strict_strtol(buf, 10, &val);
+ int config = kstrtol(buf, 10, &val);
if (config)
return config;
@@ -477,7 +477,7 @@ static ssize_t set_temp_auto_point_temp(
u8 reg;
int dpwm;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -556,7 +556,7 @@ static ssize_t set_pwm1_auto_point_pwm(
struct amc6821_data *data = i2c_get_clientdata(client);
int dpwm;
long val;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
@@ -623,7 +623,7 @@ static ssize_t set_fan(
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
int ix = to_sensor_dev_attr(attr)->index;
- int ret = strict_strtol(buf, 10, &val);
+ int ret = kstrtol(buf, 10, &val);
if (ret)
return ret;
val = 1 > val ? 0xFFFF : 6000000/val;
@@ -665,7 +665,7 @@ static ssize_t set_fan1_div(
struct i2c_client *client = to_i2c_client(dev);
struct amc6821_data *data = i2c_get_clientdata(client);
long val;
- int config = strict_strtol(buf, 10, &val);
+ int config = kstrtol(buf, 10, &val);
if (config)
return config;
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 4c07436..b989553 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -782,7 +782,7 @@ static ssize_t applesmc_store_fan_speed(struct device *dev,
char newkey[5];
u8 buffer[2];
- if (strict_strtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
+ if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000)
return -EINVAL; /* Bigger than a 14-bit value */
sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr));
@@ -822,7 +822,7 @@ static ssize_t applesmc_store_fan_manual(struct device *dev,
unsigned long input;
u16 val;
- if (strict_strtoul(sysfsbuf, 10, &input) < 0)
+ if (kstrtoul(sysfsbuf, 10, &input) < 0)
return -EINVAL;
ret = applesmc_read_key(FANS_MANUAL, buffer, 2);
@@ -977,7 +977,7 @@ static ssize_t applesmc_key_at_index_store(struct device *dev,
{
unsigned long newkey;
- if (strict_strtoul(sysfsbuf, 10, &newkey) < 0
+ if (kstrtoul(sysfsbuf, 10, &newkey) < 0
|| newkey >= smcreg.key_count)
return -EINVAL;
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
index d2596ce..3efd324 100644
--- a/drivers/hwmon/asc7621.c
+++ b/drivers/hwmon/asc7621.c
@@ -188,7 +188,7 @@ static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
SETUP_STORE_data_param(dev, attr);
long reqval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, 255);
@@ -221,7 +221,7 @@ static ssize_t store_bitmask(struct device *dev,
long reqval;
u8 currval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
@@ -265,7 +265,7 @@ static ssize_t store_fan16(struct device *dev,
SETUP_STORE_data_param(dev, attr);
long reqval;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
/* If a minimum RPM of zero is requested, then we set the register to
@@ -338,7 +338,7 @@ static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
long reqval;
u8 nr = sda->index;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, 0, 0xffff);
@@ -371,7 +371,7 @@ static ssize_t store_temp8(struct device *dev,
long reqval;
s8 temp;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, -127000, 127000);
@@ -427,7 +427,7 @@ static ssize_t store_temp62(struct device *dev,
long reqval, i, f;
s8 temp;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
reqval = SENSORS_LIMIT(reqval, -32000, 31750);
@@ -482,7 +482,7 @@ static ssize_t store_ap2_temp(struct device *dev,
int i;
u8 currval, newval = 0;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -538,7 +538,7 @@ static ssize_t store_pwm_ac(struct device *dev,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
};
- if (strict_strtoul(buf, 10, &reqval))
+ if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
if (reqval > 31)
@@ -601,7 +601,7 @@ static ssize_t store_pwm_enable(struct device *dev,
long reqval;
u8 currval, config, altbit, newval, minoff = 255;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
switch (reqval) {
@@ -675,7 +675,7 @@ static ssize_t store_pwm_freq(struct device *dev,
u8 currval, newval = 255;
int i;
- if (strict_strtoul(buf, 10, &reqval))
+ if (kstrtoul(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_freq_map); i++) {
@@ -724,7 +724,7 @@ static ssize_t store_pwm_ast(struct device *dev,
u8 currval, newval = 255;
u32 i;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_pwm_auto_spinup_map); i++) {
@@ -771,7 +771,7 @@ static ssize_t store_temp_st(struct device *dev,
u8 currval, newval = 255;
u32 i;
- if (strict_strtol(buf, 10, &reqval))
+ if (kstrtol(buf, 10, &reqval))
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(asc7621_temp_smoothing_time_map); i++) {
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 104b376..a6c6ec3 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -57,16 +57,15 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
#define TOTAL_ATTRS (MAX_CORE_ATTRS + 1)
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#ifdef CONFIG_SMP
#define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
#define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
+#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
#else
-#define TO_PHYS_ID(cpu) (cpu)
-#define TO_CORE_ID(cpu) (cpu)
#define for_each_sibling(i, cpu) for (i = 0; false; )
#endif
-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
/*
* Per-Core Temperature Data
@@ -191,7 +190,8 @@ static ssize_t show_temp(struct device *dev,
return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
}
-static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
+ struct device *dev)
{
/* The 100C is default for both mobile and non mobile CPUs */
@@ -285,7 +285,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return tjmax;
}
-static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+static int __cpuinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
+ struct device *dev)
{
int err;
u32 eax, edx;
@@ -324,7 +325,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
return adjust_tjmax(c, id, dev);
}
-static int create_name_attr(struct platform_data *pdata, struct device *dev)
+static int __devinit create_name_attr(struct platform_data *pdata,
+ struct device *dev)
{
sysfs_attr_init(&pdata->name_attr.attr);
pdata->name_attr.attr.name = "name";
@@ -333,8 +335,8 @@ static int create_name_attr(struct platform_data *pdata, struct device *dev)
return device_create_file(dev, &pdata->name_attr);
}
-static int create_core_attrs(struct temp_data *tdata, struct device *dev,
- int attr_no)
+static int __cpuinit create_core_attrs(struct temp_data *tdata,
+ struct device *dev, int attr_no)
{
int err, i;
static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -384,7 +386,7 @@ static int __cpuinit chk_ucode_version(unsigned int cpu)
return 0;
}
-static struct platform_device *coretemp_get_pdev(unsigned int cpu)
+static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu)
{
u16 phys_proc_id = TO_PHYS_ID(cpu);
struct pdev_entry *p;
@@ -401,7 +403,8 @@ static struct platform_device *coretemp_get_pdev(unsigned int cpu)
return NULL;
}
-static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
+static struct temp_data __cpuinit *init_temp_data(unsigned int cpu,
+ int pkg_flag)
{
struct temp_data *tdata;
@@ -419,7 +422,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
return tdata;
}
-static int create_core_data(struct platform_device *pdev,
+static int __cpuinit create_core_data(struct platform_device *pdev,
unsigned int cpu, int pkg_flag)
{
struct temp_data *tdata;
@@ -490,7 +493,7 @@ exit_free:
return err;
}
-static void coretemp_add_core(unsigned int cpu, int pkg_flag)
+static void __cpuinit coretemp_add_core(unsigned int cpu, int pkg_flag)
{
struct platform_device *pdev = coretemp_get_pdev(cpu);
int err;
@@ -619,7 +622,7 @@ exit:
return err;
}
-static void coretemp_device_remove(unsigned int cpu)
+static void __cpuinit coretemp_device_remove(unsigned int cpu)
{
struct pdev_entry *p, *n;
u16 phys_proc_id = TO_PHYS_ID(cpu);
@@ -635,7 +638,7 @@ static void coretemp_device_remove(unsigned int cpu)
mutex_unlock(&pdev_list_mutex);
}
-static bool is_any_core_online(struct platform_data *pdata)
+static bool __cpuinit is_any_core_online(struct platform_data *pdata)
{
int i;
diff --git a/drivers/hwmon/dme1737.c b/drivers/hwmon/dme1737.c
index d9c5927..ffb229a 100644
--- a/drivers/hwmon/dme1737.c
+++ b/drivers/hwmon/dme1737.c
@@ -45,7 +45,7 @@
static struct platform_device *pdev;
/* Module load parameters */
-static int force_start;
+static bool force_start;
module_param(force_start, bool, 0);
MODULE_PARM_DESC(force_start, "Force the chip to start monitoring inputs");
@@ -53,7 +53,7 @@ static unsigned short force_id;
module_param(force_id, ushort, 0);
MODULE_PARM_DESC(force_id, "Override the detected device ID");
-static int probe_all_addr;
+static bool probe_all_addr;
module_param(probe_all_addr, bool, 0);
MODULE_PARM_DESC(probe_all_addr, "Include probing of non-standard LPC "
"addresses");
@@ -1223,7 +1223,7 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
}
static struct attribute *dme1737_pwm_chmod_attr[];
-static void dme1737_chmod_file(struct device*, struct attribute*, mode_t);
+static void dme1737_chmod_file(struct device*, struct attribute*, umode_t);
static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
@@ -1961,7 +1961,7 @@ static inline void dme1737_sio_outb(int sio_cip, int reg, int val)
static int dme1737_i2c_get_features(int, struct dme1737_data*);
static void dme1737_chmod_file(struct device *dev,
- struct attribute *attr, mode_t mode)
+ struct attribute *attr, umode_t mode)
{
if (sysfs_chmod_file(&dev->kobj, attr, mode)) {
dev_warn(dev, "Failed to change permissions of %s.\n",
@@ -1971,7 +1971,7 @@ static void dme1737_chmod_file(struct device *dev,
static void dme1737_chmod_group(struct device *dev,
const struct attribute_group *group,
- mode_t mode)
+ umode_t mode)
{
struct attribute **attr;
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c
index 225ae4f..300c3d4 100644
--- a/drivers/hwmon/ds620.c
+++ b/drivers/hwmon/ds620.c
@@ -161,7 +161,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
struct ds620_data *data = i2c_get_clientdata(client);
- res = strict_strtol(buf, 10, &val);
+ res = kstrtol(buf, 10, &val);
if (res)
return res;
diff --git a/drivers/hwmon/emc1403.c b/drivers/hwmon/emc1403.c
index cd2a6e4..270ffab 100644
--- a/drivers/hwmon/emc1403.c
+++ b/drivers/hwmon/emc1403.c
@@ -80,7 +80,7 @@ static ssize_t store_temp(struct device *dev,
unsigned long val;
int retval;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
retval = i2c_smbus_write_byte_data(client, sda->index,
DIV_ROUND_CLOSEST(val, 1000));
@@ -98,7 +98,7 @@ static ssize_t store_bit(struct device *dev,
unsigned long val;
int retval;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->mutex);
@@ -151,7 +151,7 @@ static ssize_t store_hyst(struct device *dev,
int hyst;
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->mutex);
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index af914ad..8650639 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -55,7 +55,7 @@ static const u8 REG_TEMP_MAX[4] = { 0x34, 0x30, 0x31, 0x32 };
* it. Default is to leave the device in the state it's already in (-1).
* This parameter allows APD mode to be optionally forced on or off */
static int apd = -1;
-module_param(apd, bool, 0);
+module_param(apd, bint, 0);
MODULE_PARM_DESC(init, "Set to zero to disable anti-parallel diode mode");
struct temperature {
@@ -244,7 +244,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
- int result = strict_strtol(buf, 10, &val);
+ int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
@@ -268,7 +268,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
struct emc2103_data *data = i2c_get_clientdata(client);
long val;
- int result = strict_strtol(buf, 10, &val);
+ int result = kstrtol(buf, 10, &val);
if (result < 0)
return -EINVAL;
@@ -314,7 +314,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da,
int new_range_bits, old_div = 8 / data->fan_multiplier;
long new_div;
- int status = strict_strtol(buf, 10, &new_div);
+ int status = kstrtol(buf, 10, &new_div);
if (status < 0)
return -EINVAL;
@@ -388,7 +388,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
struct i2c_client *client = to_i2c_client(dev);
long rpm_target;
- int result = strict_strtol(buf, 10, &rpm_target);
+ int result = kstrtol(buf, 10, &rpm_target);
if (result < 0)
return -EINVAL;
@@ -434,7 +434,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
long new_value;
u8 conf_reg;
- int result = strict_strtol(buf, 10, &new_value);
+ int result = kstrtol(buf, 10, &new_value);
if (result < 0)
return -EINVAL;
diff --git a/drivers/hwmon/emc6w201.c b/drivers/hwmon/emc6w201.c
index 0064432..6ebb9b7 100644
--- a/drivers/hwmon/emc6w201.c
+++ b/drivers/hwmon/emc6w201.c
@@ -212,7 +212,7 @@ static ssize_t set_in(struct device *dev, struct device_attribute *devattr,
long val;
u8 reg;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -249,7 +249,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *devattr,
long val;
u8 reg;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -291,7 +291,7 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *devattr,
int err;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
index 92f9497..6dbfd3e 100644
--- a/drivers/hwmon/f71805f.c
+++ b/drivers/hwmon/f71805f.c
@@ -283,11 +283,11 @@ static inline long temp_from_reg(u8 reg)
static inline u8 temp_to_reg(long val)
{
- if (val < 0)
- val = 0;
- else if (val > 1000 * 0xff)
- val = 0xff;
- return ((val + 500) / 1000);
+ if (val <= 0)
+ return 0;
+ if (val >= 1000 * 0xff)
+ return 0xff;
+ return (val + 500) / 1000;
}
/*
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 59dd881..e503058 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -1333,7 +1333,7 @@ static ssize_t store_fan_full_speed(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1367,7 +1367,7 @@ static ssize_t store_fan_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1420,7 +1420,7 @@ static ssize_t store_in_max(struct device *dev, struct device_attribute
int err;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1454,7 +1454,7 @@ static ssize_t store_in_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1524,7 +1524,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1566,7 +1566,7 @@ static ssize_t store_temp_max_hyst(struct device *dev, struct device_attribute
u8 reg;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1609,7 +1609,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1670,7 +1670,7 @@ static ssize_t store_temp_beep(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -1737,7 +1737,7 @@ static ssize_t store_pwm(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1788,7 +1788,7 @@ static ssize_t store_simple_pwm(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1835,7 +1835,7 @@ static ssize_t store_pwm_enable(struct device *dev, struct device_attribute
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1915,7 +1915,7 @@ static ssize_t store_pwm_auto_point_pwm(struct device *dev,
int point = to_sensor_dev_attr_2(devattr)->nr;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -1969,7 +1969,7 @@ static ssize_t store_pwm_auto_point_temp_hyst(struct device *dev,
u8 reg;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -2015,7 +2015,7 @@ static ssize_t store_pwm_interpolate(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
unsigned long val;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
@@ -2055,7 +2055,7 @@ static ssize_t store_pwm_auto_point_channel(struct device *dev,
int err, nr = to_sensor_dev_attr_2(devattr)->index;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
@@ -2106,7 +2106,7 @@ static ssize_t store_pwm_auto_point_temp(struct device *dev,
int point = to_sensor_dev_attr_2(devattr)->nr;
long val;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err)
return err;
diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c
index 95cbfb3..6aa5a9f 100644
--- a/drivers/hwmon/f75375s.c
+++ b/drivers/hwmon/f75375s.c
@@ -1,16 +1,19 @@
/*
- * f75375s.c - driver for the Fintek F75375/SP and F75373
- * hardware monitoring features
+ * f75375s.c - driver for the Fintek F75375/SP, F75373 and
+ * F75387SG/RG hardware monitoring features
* Copyright (C) 2006-2007 Riku Voipio
*
* Datasheets available at:
*
* f75375:
- * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
+ * http://www.fintek.com.tw/files/productfiles/F75375_V026P.pdf
*
* f75373:
* http://www.fintek.com.tw/files/productfiles/F75373_V025P.pdf
*
+ * f75387:
+ * http://www.fintek.com.tw/files/productfiles/F75387_V027P.pdf
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -40,7 +43,7 @@
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2d, 0x2e, I2C_CLIENT_END };
-enum chips { f75373, f75375 };
+enum chips { f75373, f75375, f75387 };
/* Fintek F75375 registers */
#define F75375_REG_CONFIG0 0x0
@@ -59,6 +62,7 @@ enum chips { f75373, f75375 };
#define F75375_REG_VOLT_LOW(nr) (0x21 + (nr) * 2)
#define F75375_REG_TEMP(nr) (0x14 + (nr))
+#define F75387_REG_TEMP11_LSB(nr) (0x1a + (nr))
#define F75375_REG_TEMP_HIGH(nr) (0x28 + (nr) * 2)
#define F75375_REG_TEMP_HYST(nr) (0x29 + (nr) * 2)
@@ -78,8 +82,11 @@ enum chips { f75373, f75375 };
#define F75375_REG_PWM1_DROP_DUTY 0x6B
#define F75375_REG_PWM2_DROP_DUTY 0x6C
-#define FAN_CTRL_LINEAR(nr) (4 + nr)
+#define F75375_FAN_CTRL_LINEAR(nr) (4 + nr)
+#define F75387_FAN_CTRL_LINEAR(nr) (1 + ((nr) * 4))
#define FAN_CTRL_MODE(nr) (4 + ((nr) * 2))
+#define F75387_FAN_DUTY_MODE(nr) (2 + ((nr) * 4))
+#define F75387_FAN_MANU_MODE(nr) ((nr) * 4)
/*
* Data structures and manipulation thereof
@@ -102,13 +109,18 @@ struct f75375_data {
u8 in_min[4];
u16 fan[2];
u16 fan_min[2];
- u16 fan_full[2];
- u16 fan_exp[2];
+ u16 fan_max[2];
+ u16 fan_target[2];
u8 fan_timer;
u8 pwm[2];
u8 pwm_mode[2];
u8 pwm_enable[2];
- s8 temp[2];
+ /*
+ * f75387: For remote temperature reading, it uses signed 11-bit
+ * values with LSB = 0.125 degree Celsius, left-justified in 16-bit
+ * registers. For original 8-bit temp readings, the LSB just is 0.
+ */
+ s16 temp11[2];
s8 temp_high[2];
s8 temp_max_hyst[2];
};
@@ -122,6 +134,7 @@ static int f75375_remove(struct i2c_client *client);
static const struct i2c_device_id f75375_id[] = {
{ "f75373", f75373 },
{ "f75375", f75375 },
+ { "f75387", f75387 },
{ }
};
MODULE_DEVICE_TABLE(i2c, f75375_id);
@@ -146,8 +159,8 @@ static inline int f75375_read8(struct i2c_client *client, u8 reg)
/* in most cases, should be called while holding update_lock */
static inline u16 f75375_read16(struct i2c_client *client, u8 reg)
{
- return ((i2c_smbus_read_byte_data(client, reg) << 8)
- | i2c_smbus_read_byte_data(client, reg + 1));
+ return (i2c_smbus_read_byte_data(client, reg) << 8)
+ | i2c_smbus_read_byte_data(client, reg + 1);
}
static inline void f75375_write8(struct i2c_client *client, u8 reg,
@@ -159,12 +172,22 @@ static inline void f75375_write8(struct i2c_client *client, u8 reg,
static inline void f75375_write16(struct i2c_client *client, u8 reg,
u16 value)
{
- int err = i2c_smbus_write_byte_data(client, reg, (value << 8));
+ int err = i2c_smbus_write_byte_data(client, reg, (value >> 8));
if (err)
return;
i2c_smbus_write_byte_data(client, reg + 1, (value & 0xFF));
}
+static void f75375_write_pwm(struct i2c_client *client, int nr)
+{
+ struct f75375_data *data = i2c_get_clientdata(client);
+ if (data->kind == f75387)
+ f75375_write16(client, F75375_REG_FAN_EXP(nr), data->pwm[nr]);
+ else
+ f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
+ data->pwm[nr]);
+}
+
static struct f75375_data *f75375_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
@@ -181,15 +204,12 @@ static struct f75375_data *f75375_update_device(struct device *dev)
f75375_read8(client, F75375_REG_TEMP_HIGH(nr));
data->temp_max_hyst[nr] =
f75375_read8(client, F75375_REG_TEMP_HYST(nr));
- data->fan_full[nr] =
+ data->fan_max[nr] =
f75375_read16(client, F75375_REG_FAN_FULL(nr));
data->fan_min[nr] =
f75375_read16(client, F75375_REG_FAN_MIN(nr));
- data->fan_exp[nr] =
+ data->fan_target[nr] =
f75375_read16(client, F75375_REG_FAN_EXP(nr));
- data->pwm[nr] = f75375_read8(client,
- F75375_REG_FAN_PWM_DUTY(nr));
-
}
for (nr = 0; nr < 4; nr++) {
data->in_max[nr] =
@@ -205,8 +225,16 @@ static struct f75375_data *f75375_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + 2 * HZ)
|| !data->valid) {
for (nr = 0; nr < 2; nr++) {
- data->temp[nr] =
- f75375_read8(client, F75375_REG_TEMP(nr));
+ data->pwm[nr] = f75375_read8(client,
+ F75375_REG_FAN_PWM_DUTY(nr));
+ /* assign MSB, therefore shift it by 8 bits */
+ data->temp11[nr] =
+ f75375_read8(client, F75375_REG_TEMP(nr)) << 8;
+ if (data->kind == f75387)
+ /* merge F75387's temperature LSB (11-bit) */
+ data->temp11[nr] |=
+ f75375_read8(client,
+ F75387_REG_TEMP11_LSB(nr));
data->fan[nr] =
f75375_read16(client, F75375_REG_FAN(nr));
}
@@ -226,14 +254,44 @@ static inline u16 rpm_from_reg(u16 reg)
{
if (reg == 0 || reg == 0xffff)
return 0;
- return (1500000 / reg);
+ return 1500000 / reg;
}
static inline u16 rpm_to_reg(int rpm)
{
if (rpm < 367 || rpm > 0xffff)
return 0xffff;
- return (1500000 / rpm);
+ return 1500000 / rpm;
+}
+
+static bool duty_mode_enabled(u8 pwm_enable)
+{
+ switch (pwm_enable) {
+ case 0: /* Manual, duty mode (full speed) */
+ case 1: /* Manual, duty mode */
+ case 4: /* Auto, duty mode */
+ return true;
+ case 2: /* Auto, speed mode */
+ case 3: /* Manual, speed mode */
+ return false;
+ default:
+ BUG();
+ }
+}
+
+static bool auto_mode_enabled(u8 pwm_enable)
+{
+ switch (pwm_enable) {
+ case 0: /* Manual, duty mode (full speed) */
+ case 1: /* Manual, duty mode */
+ case 3: /* Manual, speed mode */
+ return false;
+ case 2: /* Auto, speed mode */
+ case 4: /* Auto, duty mode */
+ return true;
+ default:
+ BUG();
+ }
}
static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
@@ -242,7 +300,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
data->fan_min[nr] = rpm_to_reg(val);
@@ -251,17 +314,27 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t set_fan_exp(struct device *dev, struct device_attribute *attr,
+static ssize_t set_fan_target(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (auto_mode_enabled(data->pwm_enable[nr]))
+ return -EINVAL;
+ if (data->kind == f75387 && duty_mode_enabled(data->pwm_enable[nr]))
+ return -EINVAL;
mutex_lock(&data->update_lock);
- data->fan_exp[nr] = rpm_to_reg(val);
- f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_exp[nr]);
+ data->fan_target[nr] = rpm_to_reg(val);
+ f75375_write16(client, F75375_REG_FAN_EXP(nr), data->fan_target[nr]);
mutex_unlock(&data->update_lock);
return count;
}
@@ -272,11 +345,20 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
+ if (auto_mode_enabled(data->pwm_enable[nr]) ||
+ !duty_mode_enabled(data->pwm_enable[nr]))
+ return -EINVAL;
mutex_lock(&data->update_lock);
data->pwm[nr] = SENSORS_LIMIT(val, 0, 255);
- f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr), data->pwm[nr]);
+ f75375_write_pwm(client, nr);
mutex_unlock(&data->update_lock);
return count;
}
@@ -298,26 +380,58 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val)
return -EINVAL;
fanmode = f75375_read8(client, F75375_REG_FAN_TIMER);
- fanmode &= ~(3 << FAN_CTRL_MODE(nr));
-
- switch (val) {
- case 0: /* Full speed */
- fanmode |= (3 << FAN_CTRL_MODE(nr));
- data->pwm[nr] = 255;
- f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
- data->pwm[nr]);
- break;
- case 1: /* PWM */
- fanmode |= (3 << FAN_CTRL_MODE(nr));
- break;
- case 2: /* AUTOMATIC*/
- fanmode |= (2 << FAN_CTRL_MODE(nr));
- break;
- case 3: /* fan speed */
- break;
+ if (data->kind == f75387) {
+ /* For now, deny dangerous toggling of duty mode */
+ if (duty_mode_enabled(data->pwm_enable[nr]) !=
+ duty_mode_enabled(val))
+ return -EOPNOTSUPP;
+ /* clear each fanX_mode bit before setting them properly */
+ fanmode &= ~(1 << F75387_FAN_DUTY_MODE(nr));
+ fanmode &= ~(1 << F75387_FAN_MANU_MODE(nr));
+ switch (val) {
+ case 0: /* full speed */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ data->pwm[nr] = 255;
+ break;
+ case 1: /* PWM */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ break;
+ case 2: /* Automatic, speed mode */
+ break;
+ case 3: /* fan speed */
+ fanmode |= (1 << F75387_FAN_MANU_MODE(nr));
+ break;
+ case 4: /* Automatic, pwm */
+ fanmode |= (1 << F75387_FAN_DUTY_MODE(nr));
+ break;
+ }
+ } else {
+ /* clear each fanX_mode bit before setting them properly */
+ fanmode &= ~(3 << FAN_CTRL_MODE(nr));
+ switch (val) {
+ case 0: /* full speed */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ data->pwm[nr] = 255;
+ break;
+ case 1: /* PWM */
+ fanmode |= (3 << FAN_CTRL_MODE(nr));
+ break;
+ case 2: /* AUTOMATIC*/
+ fanmode |= (1 << FAN_CTRL_MODE(nr));
+ break;
+ case 3: /* fan speed */
+ break;
+ case 4: /* Automatic pwm */
+ return -EINVAL;
+ }
}
+
f75375_write8(client, F75375_REG_FAN_TIMER, fanmode);
data->pwm_enable[nr] = val;
+ if (val == 0)
+ f75375_write_pwm(client, nr);
return 0;
}
@@ -327,8 +441,12 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
- int err = 0;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
mutex_lock(&data->update_lock);
err = set_pwm_enable_direct(client, nr, val);
@@ -342,20 +460,39 @@ static ssize_t set_pwm_mode(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
- u8 conf = 0;
+ unsigned long val;
+ int err;
+ u8 conf;
+ char reg, ctrl;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
if (!(val == 0 || val == 1))
return -EINVAL;
+ /* F75373 does not support DC (linear voltage) fan control mode */
+ if (data->kind == f75373 && val == 0)
+ return -EINVAL;
+
+ /* take care for different registers */
+ if (data->kind == f75387) {
+ reg = F75375_REG_FAN_TIMER;
+ ctrl = F75387_FAN_CTRL_LINEAR(nr);
+ } else {
+ reg = F75375_REG_CONFIG1;
+ ctrl = F75375_FAN_CTRL_LINEAR(nr);
+ }
+
mutex_lock(&data->update_lock);
- conf = f75375_read8(client, F75375_REG_CONFIG1);
- conf &= ~(1 << FAN_CTRL_LINEAR(nr));
+ conf = f75375_read8(client, reg);
+ conf &= ~(1 << ctrl);
if (val == 0)
- conf |= (1 << FAN_CTRL_LINEAR(nr)) ;
+ conf |= (1 << ctrl);
- f75375_write8(client, F75375_REG_CONFIG1, conf);
+ f75375_write8(client, reg, conf);
data->pwm_mode[nr] = val;
mutex_unlock(&data->update_lock);
return count;
@@ -410,7 +547,13 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_max[nr] = val;
@@ -425,7 +568,13 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(VOLT_TO_REG(val), 0, 0xff);
mutex_lock(&data->update_lock);
data->in_min[nr] = val;
@@ -435,13 +584,14 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
}
#define TEMP_FROM_REG(val) ((val) * 1000)
#define TEMP_TO_REG(val) ((val) / 1000)
+#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
-static ssize_t show_temp(struct device *dev, struct device_attribute *attr,
+static ssize_t show_temp11(struct device *dev, struct device_attribute *attr,
char *buf)
{
int nr = to_sensor_dev_attr(attr)->index;
struct f75375_data *data = f75375_update_device(dev);
- return sprintf(buf, "%d\n", TEMP_FROM_REG(data->temp[nr]));
+ return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[nr]));
}
static ssize_t show_temp_max(struct device *dev, struct device_attribute *attr,
@@ -466,7 +616,13 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtol(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_high[nr] = val;
@@ -481,7 +637,13 @@ static ssize_t set_temp_max_hyst(struct device *dev,
int nr = to_sensor_dev_attr(attr)->index;
struct i2c_client *client = to_i2c_client(dev);
struct f75375_data *data = i2c_get_clientdata(client);
- int val = simple_strtol(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err < 0)
+ return err;
+
val = SENSORS_LIMIT(TEMP_TO_REG(val), 0, 127);
mutex_lock(&data->update_lock);
data->temp_max_hyst[nr] = val;
@@ -502,8 +664,8 @@ static ssize_t show_##thing(struct device *dev, struct device_attribute *attr, \
show_fan(fan);
show_fan(fan_min);
-show_fan(fan_full);
-show_fan(fan_exp);
+show_fan(fan_max);
+show_fan(fan_target);
static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_in, NULL, 0);
static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO|S_IWUSR,
@@ -525,28 +687,28 @@ static SENSOR_DEVICE_ATTR(in3_max, S_IRUGO|S_IWUSR,
show_in_max, set_in_max, 3);
static SENSOR_DEVICE_ATTR(in3_min, S_IRUGO|S_IWUSR,
show_in_min, set_in_min, 3);
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 1);
static SENSOR_DEVICE_ATTR(temp2_max_hyst, S_IRUGO|S_IWUSR,
show_temp_max_hyst, set_temp_max_hyst, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IRUGO|S_IWUSR,
show_temp_max, set_temp_max, 1);
static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan1_full, S_IRUGO, show_fan_full, NULL, 0);
+static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, show_fan_max, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan1_exp, S_IRUGO|S_IWUSR,
- show_fan_exp, set_fan_exp, 0);
+static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO|S_IWUSR,
+ show_fan_target, set_fan_target, 0);
static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan2_full, S_IRUGO, show_fan_full, NULL, 1);
+static SENSOR_DEVICE_ATTR(fan2_max, S_IRUGO, show_fan_max, NULL, 1);
static SENSOR_DEVICE_ATTR(fan2_min, S_IRUGO|S_IWUSR,
show_fan_min, set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan2_exp, S_IRUGO|S_IWUSR,
- show_fan_exp, set_fan_exp, 1);
+static SENSOR_DEVICE_ATTR(fan2_target, S_IRUGO|S_IWUSR,
+ show_fan_target, set_fan_target, 1);
static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO|S_IWUSR,
show_pwm, set_pwm, 0);
static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO|S_IWUSR,
@@ -568,13 +730,13 @@ static struct attribute *f75375_attributes[] = {
&sensor_dev_attr_temp2_max.dev_attr.attr,
&sensor_dev_attr_temp2_max_hyst.dev_attr.attr,
&sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan1_full.dev_attr.attr,
+ &sensor_dev_attr_fan1_max.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
- &sensor_dev_attr_fan1_exp.dev_attr.attr,
+ &sensor_dev_attr_fan1_target.dev_attr.attr,
&sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan2_full.dev_attr.attr,
+ &sensor_dev_attr_fan2_max.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
- &sensor_dev_attr_fan2_exp.dev_attr.attr,
+ &sensor_dev_attr_fan2_target.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
&sensor_dev_attr_pwm1_mode.dev_attr.attr,
@@ -604,12 +766,62 @@ static void f75375_init(struct i2c_client *client, struct f75375_data *data,
struct f75375s_platform_data *f75375s_pdata)
{
int nr;
+
+ if (!f75375s_pdata) {
+ u8 conf, mode;
+ int nr;
+
+ conf = f75375_read8(client, F75375_REG_CONFIG1);
+ mode = f75375_read8(client, F75375_REG_FAN_TIMER);
+ for (nr = 0; nr < 2; nr++) {
+ if (data->kind == f75387) {
+ bool manu, duty;
+
+ if (!(mode & (1 << F75387_FAN_CTRL_LINEAR(nr))))
+ data->pwm_mode[nr] = 1;
+
+ manu = ((mode >> F75387_FAN_MANU_MODE(nr)) & 1);
+ duty = ((mode >> F75387_FAN_DUTY_MODE(nr)) & 1);
+ if (!manu && duty)
+ /* auto, pwm */
+ data->pwm_enable[nr] = 4;
+ else if (manu && !duty)
+ /* manual, speed */
+ data->pwm_enable[nr] = 3;
+ else if (!manu && !duty)
+ /* automatic, speed */
+ data->pwm_enable[nr] = 2;
+ else
+ /* manual, pwm */
+ data->pwm_enable[nr] = 1;
+ } else {
+ if (!(conf & (1 << F75375_FAN_CTRL_LINEAR(nr))))
+ data->pwm_mode[nr] = 1;
+
+ switch ((mode >> FAN_CTRL_MODE(nr)) & 3) {
+ case 0: /* speed */
+ data->pwm_enable[nr] = 3;
+ break;
+ case 1: /* automatic */
+ data->pwm_enable[nr] = 2;
+ break;
+ default: /* manual */
+ data->pwm_enable[nr] = 1;
+ break;
+ }
+ }
+ }
+ return;
+ }
+
set_pwm_enable_direct(client, 0, f75375s_pdata->pwm_enable[0]);
set_pwm_enable_direct(client, 1, f75375s_pdata->pwm_enable[1]);
for (nr = 0; nr < 2; nr++) {
+ if (auto_mode_enabled(f75375s_pdata->pwm_enable[nr]) ||
+ !duty_mode_enabled(f75375s_pdata->pwm_enable[nr]))
+ continue;
data->pwm[nr] = SENSORS_LIMIT(f75375s_pdata->pwm[nr], 0, 255);
- f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr),
- data->pwm[nr]);
+ f75375_write_pwm(client, nr);
}
}
@@ -624,17 +836,19 @@ static int f75375_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
- if (!(data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL)))
+ data = kzalloc(sizeof(struct f75375_data), GFP_KERNEL);
+ if (!data)
return -ENOMEM;
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
data->kind = id->driver_data;
- if ((err = sysfs_create_group(&client->dev.kobj, &f75375_group)))
+ err = sysfs_create_group(&client->dev.kobj, &f75375_group);
+ if (err)
goto exit_free;
- if (data->kind == f75375) {
+ if (data->kind != f75373) {
err = sysfs_chmod_file(&client->dev.kobj,
&sensor_dev_attr_pwm1_mode.dev_attr.attr,
S_IRUGO | S_IWUSR);
@@ -653,8 +867,7 @@ static int f75375_probe(struct i2c_client *client,
goto exit_remove;
}
- if (f75375s_pdata != NULL)
- f75375_init(client, data, f75375s_pdata);
+ f75375_init(client, data, f75375s_pdata);
return 0;
@@ -685,10 +898,15 @@ static int f75375_detect(struct i2c_client *client,
vendid = f75375_read16(client, F75375_REG_VENDOR);
chipid = f75375_read16(client, F75375_CHIP_ID);
- if (chipid == 0x0306 && vendid == 0x1934)
+ if (vendid != 0x1934)
+ return -ENODEV;
+
+ if (chipid == 0x0306)
name = "f75375";
- else if (chipid == 0x0204 && vendid == 0x1934)
+ else if (chipid == 0x0204)
name = "f75373";
+ else if (chipid == 0x0410)
+ name = "f75387";
else
return -ENODEV;
@@ -711,7 +929,7 @@ static void __exit sensors_f75375_exit(void)
MODULE_AUTHOR("Riku Voipio");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("F75373/F75375 hardware monitoring driver");
+MODULE_DESCRIPTION("F75373/F75375/F75387 hardware monitoring driver");
module_init(sensors_f75375_init);
module_exit(sensors_f75375_exit);
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 1d6a6fa..781277d 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -166,7 +166,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *da,
struct g760a_data *data = g760a_update_client(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 9ba38f3..2ce8c44 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -224,7 +224,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
int speed_index;
int ret = count;
- if (strict_strtoul(buf, 10, &pwm) || pwm > 255)
+ if (kstrtoul(buf, 10, &pwm) || pwm > 255)
return -EINVAL;
mutex_lock(&fan_data->lock);
@@ -257,7 +257,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *attr,
struct gpio_fan_data *fan_data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ if (kstrtoul(buf, 10, &val) || val > 1)
return -EINVAL;
if (fan_data->pwm_enable == val)
@@ -314,7 +314,7 @@ static ssize_t set_rpm(struct device *dev, struct device_attribute *attr,
unsigned long rpm;
int ret = count;
- if (strict_strtoul(buf, 10, &rpm))
+ if (kstrtoul(buf, 10, &rpm))
return -EINVAL;
mutex_lock(&fan_data->lock);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 6a967d7..cc2981f 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -904,7 +904,7 @@ static ssize_t aem_set_power_period(struct device *dev,
unsigned long temp;
int res;
- res = strict_strtoul(buf, 10, &temp);
+ res = kstrtoul(buf, 10, &temp);
if (res)
return res;
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index d912649..0054d6f 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -17,6 +17,7 @@
* IT8720F Super I/O chip w/LPC interface
* IT8721F Super I/O chip w/LPC interface
* IT8726F Super I/O chip w/LPC interface
+ * IT8728F Super I/O chip w/LPC interface
* IT8758E Super I/O chip w/LPC interface
* Sis950 A clone of the IT8705F
*
@@ -58,7 +59,7 @@
#define DRVNAME "it87"
-enum chips { it87, it8712, it8716, it8718, it8720, it8721 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728 };
static unsigned short force_id;
module_param(force_id, ushort, 0);
@@ -135,6 +136,7 @@ static inline void superio_exit(void)
#define IT8720F_DEVID 0x8720
#define IT8721F_DEVID 0x8721
#define IT8726F_DEVID 0x8726
+#define IT8728F_DEVID 0x8728
#define IT87_ACT_REG 0x30
#define IT87_BASE_REG 0x60
@@ -146,10 +148,10 @@ static inline void superio_exit(void)
#define IT87_SIO_BEEP_PIN_REG 0xf6 /* Beep pin mapping */
/* Update battery voltage after every reading if true */
-static int update_vbat;
+static bool update_vbat;
/* Not all BIOSes properly configure the PWM registers */
-static int fix_pwm_polarity;
+static bool fix_pwm_polarity;
/* Many IT87 constants specified below */
@@ -274,11 +276,31 @@ struct it87_data {
s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
};
+static inline int has_12mv_adc(const struct it87_data *data)
+{
+ /*
+ * IT8721F and later have a 12 mV ADC, also with internal scaling
+ * on selected inputs.
+ */
+ return data->type == it8721
+ || data->type == it8728;
+}
+
+static inline int has_newer_autopwm(const struct it87_data *data)
+{
+ /*
+ * IT8721F and later have separate registers for the temperature
+ * mapping and the manual duty cycle.
+ */
+ return data->type == it8721
+ || data->type == it8728;
+}
+
static u8 in_to_reg(const struct it87_data *data, int nr, long val)
{
long lsb;
- if (data->type == it8721) {
+ if (has_12mv_adc(data)) {
if (data->in_scaled & (1 << nr))
lsb = 24;
else
@@ -292,7 +314,7 @@ static u8 in_to_reg(const struct it87_data *data, int nr, long val)
static int in_from_reg(const struct it87_data *data, int nr, int val)
{
- if (data->type == it8721) {
+ if (has_12mv_adc(data)) {
if (data->in_scaled & (1 << nr))
return val * 24;
else
@@ -329,7 +351,7 @@ static inline u16 FAN16_TO_REG(long rpm)
static u8 pwm_to_reg(const struct it87_data *data, long val)
{
- if (data->type == it8721)
+ if (has_newer_autopwm(data))
return val;
else
return val >> 1;
@@ -337,7 +359,7 @@ static u8 pwm_to_reg(const struct it87_data *data, long val)
static int pwm_from_reg(const struct it87_data *data, u8 reg)
{
- if (data->type == it8721)
+ if (has_newer_autopwm(data))
return reg;
else
return (reg & 0x7f) << 1;
@@ -374,7 +396,8 @@ static inline int has_16bit_fans(const struct it87_data *data)
|| data->type == it8716
|| data->type == it8718
|| data->type == it8720
- || data->type == it8721;
+ || data->type == it8721
+ || data->type == it8728;
}
static inline int has_old_autopwm(const struct it87_data *data)
@@ -444,7 +467,7 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -463,7 +486,7 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -539,7 +562,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -557,7 +580,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -604,7 +627,7 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
reg = it87_read_value(data, IT87_REG_TEMP_ENABLE);
@@ -718,7 +741,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -751,7 +774,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
int min;
u8 old;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -820,7 +843,7 @@ static ssize_t set_pwm_enable(struct device *dev,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 2)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 2)
return -EINVAL;
/* Check trip points before switching to automatic mode */
@@ -842,7 +865,7 @@ static ssize_t set_pwm_enable(struct device *dev,
data->fan_main_ctrl);
} else {
if (val == 1) /* Manual mode */
- data->pwm_ctrl[nr] = data->type == it8721 ?
+ data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
data->pwm_temp_map[nr] :
data->pwm_duty[nr];
else /* Automatic mode */
@@ -866,11 +889,11 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
return -EINVAL;
mutex_lock(&data->update_lock);
- if (data->type == it8721) {
+ if (has_newer_autopwm(data)) {
/* If we are in automatic mode, the PWM duty cycle register
* is read-only so we can't write the value */
if (data->pwm_ctrl[nr] & 0x80) {
@@ -900,7 +923,7 @@ static ssize_t set_pwm_freq(struct device *dev,
unsigned long val;
int i;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
/* Search for the nearest available frequency */
@@ -949,7 +972,7 @@ static ssize_t set_pwm_temp_map(struct device *dev,
return -EINVAL;
}
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
switch (val) {
@@ -1001,7 +1024,7 @@ static ssize_t set_auto_pwm(struct device *dev,
int point = sensor_attr->index;
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+ if (kstrtol(buf, 10, &val) < 0 || val < 0 || val > 255)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1034,7 +1057,7 @@ static ssize_t set_auto_temp(struct device *dev,
int point = sensor_attr->index;
long val;
- if (strict_strtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
+ if (kstrtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1126,7 +1149,7 @@ static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1180,7 +1203,7 @@ static ssize_t clear_intrusion(struct device *dev, struct device_attribute
long val;
int config;
- if (strict_strtol(buf, 10, &val) < 0 || val != 0)
+ if (kstrtol(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1231,7 +1254,7 @@ static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
long val;
- if (strict_strtol(buf, 10, &val) < 0
+ if (kstrtol(buf, 10, &val) < 0
|| (val != 0 && val != 1))
return -EINVAL;
@@ -1278,7 +1301,7 @@ static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
data->vrm = val;
@@ -1311,8 +1334,8 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
struct it87_data *data = dev_get_drvdata(dev);
int nr = to_sensor_dev_attr(attr)->index;
- return sprintf(buf, "%s\n", data->type == it8721 ? labels_it8721[nr]
- : labels[nr]);
+ return sprintf(buf, "%s\n", has_12mv_adc(data) ? labels_it8721[nr]
+ : labels[nr]);
}
static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
@@ -1605,6 +1628,9 @@ static int __init it87_find(unsigned short *address,
case IT8721F_DEVID:
sio_data->type = it8721;
break;
+ case IT8728F_DEVID:
+ sio_data->type = it8728;
+ break;
case 0xffff: /* No device at all */
goto exit;
default:
@@ -1646,8 +1672,11 @@ static int __init it87_find(unsigned short *address,
superio_select(GPIO);
reg = superio_inb(IT87_SIO_GPIO3_REG);
- if (sio_data->type == it8721) {
- /* The IT8721F/IT8758E doesn't have VID pins at all */
+ if (sio_data->type == it8721 || sio_data->type == it8728) {
+ /*
+ * The IT8721F/IT8758E doesn't have VID pins at all,
+ * not sure about the IT8728F.
+ */
sio_data->skip_vid = 1;
} else {
/* We need at least 4 VID pins */
@@ -1692,7 +1721,8 @@ static int __init it87_find(unsigned short *address,
}
if (reg & (1 << 0))
sio_data->internal |= (1 << 0);
- if ((reg & (1 << 1)) || sio_data->type == it8721)
+ if ((reg & (1 << 1)) || sio_data->type == it8721 ||
+ sio_data->type == it8728)
sio_data->internal |= (1 << 1);
sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
@@ -1770,6 +1800,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
"it8718",
"it8720",
"it8721",
+ "it8728",
};
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1807,7 +1838,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
enable_pwm_interface = it87_check_pwm(dev);
/* Starting with IT8721F, we handle scaling of internal voltages */
- if (data->type == it8721) {
+ if (has_12mv_adc(data)) {
if (sio_data->internal & (1 << 0))
data->in_scaled |= (1 << 3); /* in3 is AVCC */
if (sio_data->internal & (1 << 1))
@@ -2093,7 +2124,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
{
data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
- if (data->type == it8721) {
+ if (has_newer_autopwm(data)) {
data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
data->pwm_duty[nr] = it87_read_value(data,
IT87_REG_PWM_DUTY(nr));
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 2d3d728..b927ee5 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -64,6 +64,7 @@ static const unsigned short normal_i2c[] = {
/* Manufacturer IDs */
#define ADT_MANID 0x11d4 /* Analog Devices */
+#define ATMEL_MANID 0x001f /* Atmel */
#define MAX_MANID 0x004d /* Maxim */
#define IDT_MANID 0x00b3 /* IDT */
#define MCP_MANID 0x0054 /* Microchip */
@@ -77,15 +78,25 @@ static const unsigned short normal_i2c[] = {
#define ADT7408_DEVID 0x0801
#define ADT7408_DEVID_MASK 0xffff
+/* Atmel */
+#define AT30TS00_DEVID 0x8201
+#define AT30TS00_DEVID_MASK 0xffff
+
/* IDT */
#define TS3000B3_DEVID 0x2903 /* Also matches TSE2002B3 */
#define TS3000B3_DEVID_MASK 0xffff
+#define TS3000GB2_DEVID 0x2912 /* Also matches TSE2002GB2 */
+#define TS3000GB2_DEVID_MASK 0xffff
+
/* Maxim */
#define MAX6604_DEVID 0x3e00
#define MAX6604_DEVID_MASK 0xffff
/* Microchip */
+#define MCP9804_DEVID 0x0200
+#define MCP9804_DEVID_MASK 0xfffc
+
#define MCP98242_DEVID 0x2000
#define MCP98242_DEVID_MASK 0xfffc
@@ -113,6 +124,12 @@ static const unsigned short normal_i2c[] = {
#define STTS424E_DEVID 0x0000
#define STTS424E_DEVID_MASK 0xfffe
+#define STTS2002_DEVID 0x0300
+#define STTS2002_DEVID_MASK 0xffff
+
+#define STTS3000_DEVID 0x0200
+#define STTS3000_DEVID_MASK 0xffff
+
static u16 jc42_hysteresis[] = { 0, 1500, 3000, 6000 };
struct jc42_chips {
@@ -123,8 +140,11 @@ struct jc42_chips {
static struct jc42_chips jc42_chips[] = {
{ ADT_MANID, ADT7408_DEVID, ADT7408_DEVID_MASK },
+ { ATMEL_MANID, AT30TS00_DEVID, AT30TS00_DEVID_MASK },
{ IDT_MANID, TS3000B3_DEVID, TS3000B3_DEVID_MASK },
+ { IDT_MANID, TS3000GB2_DEVID, TS3000GB2_DEVID_MASK },
{ MAX_MANID, MAX6604_DEVID, MAX6604_DEVID_MASK },
+ { MCP_MANID, MCP9804_DEVID, MCP9804_DEVID_MASK },
{ MCP_MANID, MCP98242_DEVID, MCP98242_DEVID_MASK },
{ MCP_MANID, MCP98243_DEVID, MCP98243_DEVID_MASK },
{ MCP_MANID, MCP9843_DEVID, MCP9843_DEVID_MASK },
@@ -133,6 +153,8 @@ static struct jc42_chips jc42_chips[] = {
{ NXP_MANID, SE98_DEVID, SE98_DEVID_MASK },
{ STM_MANID, STTS424_DEVID, STTS424_DEVID_MASK },
{ STM_MANID, STTS424E_DEVID, STTS424E_DEVID_MASK },
+ { STM_MANID, STTS2002_DEVID, STTS2002_DEVID_MASK },
+ { STM_MANID, STTS3000_DEVID, STTS3000_DEVID_MASK },
};
/* Each client has this additional data */
@@ -159,10 +181,12 @@ static struct jc42_data *jc42_update_device(struct device *dev);
static const struct i2c_device_id jc42_id[] = {
{ "adt7408", 0 },
+ { "at30ts00", 0 },
{ "cat94ts02", 0 },
{ "cat6095", 0 },
{ "jc42", 0 },
{ "max6604", 0 },
+ { "mcp9804", 0 },
{ "mcp9805", 0 },
{ "mcp98242", 0 },
{ "mcp98243", 0 },
@@ -171,8 +195,10 @@ static const struct i2c_device_id jc42_id[] = {
{ "se97b", 0 },
{ "se98", 0 },
{ "stts424", 0 },
- { "tse2002b3", 0 },
- { "ts3000b3", 0 },
+ { "stts2002", 0 },
+ { "stts3000", 0 },
+ { "tse2002", 0 },
+ { "ts3000", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, jc42_id);
@@ -309,7 +335,7 @@ static ssize_t set_##value(struct device *dev, \
struct jc42_data *data = i2c_get_clientdata(client); \
int err, ret = count; \
long val; \
- if (strict_strtol(buf, 10, &val) < 0) \
+ if (kstrtol(buf, 10, &val) < 0) \
return -EINVAL; \
mutex_lock(&data->update_lock); \
data->value = jc42_temp_to_reg(val, data->extended); \
@@ -337,7 +363,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
int err;
int ret = count;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
diff = jc42_temp_from_reg(data->temp_crit) - val;
@@ -413,7 +439,7 @@ static struct attribute *jc42_attributes[] = {
NULL
};
-static mode_t jc42_attribute_mode(struct kobject *kobj,
+static umode_t jc42_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/lm63.c b/drivers/hwmon/lm63.c
index 508cb29..5e6457a 100644
--- a/drivers/hwmon/lm63.c
+++ b/drivers/hwmon/lm63.c
@@ -47,10 +47,14 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/sysfs.h>
+#include <linux/types.h>
/*
* Addresses to scan
- * Address is fully defined internally and cannot be changed.
+ * Address is fully defined internally and cannot be changed except for
+ * LM64 which has one pin dedicated to address selection.
+ * LM63 and LM96163 have address 0x4c.
+ * LM64 can have address 0x18 or 0x4e.
*/
static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
@@ -60,6 +64,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
*/
#define LM63_REG_CONFIG1 0x03
+#define LM63_REG_CONVRATE 0x04
#define LM63_REG_CONFIG2 0xBF
#define LM63_REG_CONFIG_FAN 0x4A
@@ -70,6 +75,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
#define LM63_REG_PWM_VALUE 0x4C
#define LM63_REG_PWM_FREQ 0x4D
+#define LM63_REG_LUT_TEMP_HYST 0x4F
+#define LM63_REG_LUT_TEMP(nr) (0x50 + 2 * (nr))
+#define LM63_REG_LUT_PWM(nr) (0x51 + 2 * (nr))
#define LM63_REG_LOCAL_TEMP 0x00
#define LM63_REG_LOCAL_HIGH 0x05
@@ -91,6 +99,16 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
#define LM63_REG_MAN_ID 0xFE
#define LM63_REG_CHIP_ID 0xFF
+#define LM96163_REG_TRUTHERM 0x30
+#define LM96163_REG_REMOTE_TEMP_U_MSB 0x31
+#define LM96163_REG_REMOTE_TEMP_U_LSB 0x32
+#define LM96163_REG_CONFIG_ENHANCED 0x45
+
+#define LM63_MAX_CONVRATE 9
+
+#define LM63_MAX_CONVRATE_HZ 32
+#define LM96163_MAX_CONVRATE_HZ 26
+
/*
* Conversions and various macros
* For tachometer counts, the LM63 uses 16-bit values.
@@ -112,15 +130,24 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
(val) >= 127000 ? 127 : \
(val) < 0 ? ((val) - 500) / 1000 : \
((val) + 500) / 1000)
+#define TEMP8U_TO_REG(val) ((val) <= 0 ? 0 : \
+ (val) >= 255000 ? 255 : \
+ ((val) + 500) / 1000)
#define TEMP11_FROM_REG(reg) ((reg) / 32 * 125)
#define TEMP11_TO_REG(val) ((val) <= -128000 ? 0x8000 : \
(val) >= 127875 ? 0x7FE0 : \
(val) < 0 ? ((val) - 62) / 125 * 32 : \
((val) + 62) / 125 * 32)
+#define TEMP11U_TO_REG(val) ((val) <= 0 ? 0 : \
+ (val) >= 255875 ? 0xFFE0 : \
+ ((val) + 62) / 125 * 32)
#define HYST_TO_REG(val) ((val) <= 0 ? 0 : \
(val) >= 127000 ? 127 : \
((val) + 500) / 1000)
+#define UPDATE_INTERVAL(max, rate) \
+ ((1000 << (LM63_MAX_CONVRATE - (rate))) / (max))
+
/*
* Functions declaration
*/
@@ -134,7 +161,7 @@ static struct lm63_data *lm63_update_device(struct device *dev);
static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
static void lm63_init_client(struct i2c_client *client);
-enum chips { lm63, lm64 };
+enum chips { lm63, lm64, lm96163 };
/*
* Driver data (common to all clients)
@@ -143,6 +170,7 @@ enum chips { lm63, lm64 };
static const struct i2c_device_id lm63_id[] = {
{ "lm63", lm63 },
{ "lm64", lm64 },
+ { "lm96163", lm96163 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -167,26 +195,53 @@ struct lm63_data {
struct device *hwmon_dev;
struct mutex update_lock;
char valid; /* zero until following fields are valid */
+ char lut_valid; /* zero until lut fields are valid */
unsigned long last_updated; /* in jiffies */
- int kind;
+ unsigned long lut_last_updated; /* in jiffies */
+ enum chips kind;
int temp2_offset;
+ int update_interval; /* in milliseconds */
+ int max_convrate_hz;
+ int lut_size; /* 8 or 12 */
+
/* registers values */
u8 config, config_fan;
u16 fan[2]; /* 0: input
1: low limit */
u8 pwm1_freq;
- u8 pwm1_value;
- s8 temp8[3]; /* 0: local input
+ u8 pwm1[13]; /* 0: current output
+ 1-12: lookup table */
+ s8 temp8[15]; /* 0: local input
1: local high limit
- 2: remote critical limit */
- s16 temp11[3]; /* 0: remote input
+ 2: remote critical limit
+ 3-14: lookup table */
+ s16 temp11[4]; /* 0: remote input
1: remote low limit
- 2: remote high limit */
+ 2: remote high limit
+ 3: remote offset */
+ u16 temp11u; /* remote input (unsigned) */
u8 temp2_crit_hyst;
+ u8 lut_temp_hyst;
u8 alarms;
+ bool pwm_highres;
+ bool lut_temp_highres;
+ bool remote_unsigned; /* true if unsigned remote upper limits */
+ bool trutherm;
};
+static inline int temp8_from_reg(struct lm63_data *data, int nr)
+{
+ if (data->remote_unsigned)
+ return TEMP8_FROM_REG((u8)data->temp8[nr]);
+ return TEMP8_FROM_REG(data->temp8[nr]);
+}
+
+static inline int lut_temp_from_reg(struct lm63_data *data, int nr)
+{
+ return data->temp8[nr] * (data->lut_temp_highres ? 500 : 1000);
+}
+
/*
* Sysfs callback functions and files
*/
@@ -204,7 +259,12 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
data->fan[1] = FAN_TO_REG(val);
@@ -216,13 +276,22 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
return count;
}
-static ssize_t show_pwm1(struct device *dev, struct device_attribute *dummy,
+static ssize_t show_pwm1(struct device *dev, struct device_attribute *devattr,
char *buf)
{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm1_value >= 2 * data->pwm1_freq ?
- 255 : (data->pwm1_value * 255 + data->pwm1_freq) /
- (2 * data->pwm1_freq));
+ int nr = attr->index;
+ int pwm;
+
+ if (data->pwm_highres)
+ pwm = data->pwm1[nr];
+ else
+ pwm = data->pwm1[nr] >= 2 * data->pwm1_freq ?
+ 255 : (data->pwm1[nr] * 255 + data->pwm1_freq) /
+ (2 * data->pwm1_freq);
+
+ return sprintf(buf, "%d\n", pwm);
}
static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy,
@@ -231,22 +300,26 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy,
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
unsigned long val;
-
+ int err;
+
if (!(data->config_fan & 0x20)) /* register is read-only */
return -EPERM;
- val = simple_strtoul(buf, NULL, 10);
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ val = SENSORS_LIMIT(val, 0, 255);
mutex_lock(&data->update_lock);
- data->pwm1_value = val <= 0 ? 0 :
- val >= 255 ? 2 * data->pwm1_freq :
- (val * data->pwm1_freq * 2 + 127) / 255;
- i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1_value);
+ data->pwm1[0] = data->pwm_highres ? val :
+ (val * data->pwm1_freq * 2 + 127) / 255;
+ i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1[0]);
mutex_unlock(&data->update_lock);
return count;
}
-static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dummy,
- char *buf)
+static ssize_t show_pwm1_enable(struct device *dev,
+ struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
@@ -273,21 +346,47 @@ static ssize_t show_remote_temp8(struct device *dev,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+ return sprintf(buf, "%d\n", temp8_from_reg(data, attr->index)
+ + data->temp2_offset);
+}
+
+static ssize_t show_lut_temp(struct device *dev,
+ struct device_attribute *devattr,
+ char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct lm63_data *data = lm63_update_device(dev);
+ return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
+ data->temp2_offset);
}
-static ssize_t set_local_temp8(struct device *dev,
- struct device_attribute *dummy,
- const char *buf, size_t count)
+static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
+ const char *buf, size_t count)
{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
+ int nr = attr->index;
+ int reg = nr == 2 ? LM63_REG_REMOTE_TCRIT : LM63_REG_LOCAL_HIGH;
+ long val;
+ int err;
+ int temp;
+
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
mutex_lock(&data->update_lock);
- data->temp8[1] = TEMP8_TO_REG(val);
- i2c_smbus_write_byte_data(client, LM63_REG_LOCAL_HIGH, data->temp8[1]);
+ if (nr == 2) {
+ if (data->remote_unsigned)
+ temp = TEMP8U_TO_REG(val - data->temp2_offset);
+ else
+ temp = TEMP8_TO_REG(val - data->temp2_offset);
+ } else {
+ temp = TEMP8_TO_REG(val);
+ }
+ data->temp8[nr] = temp;
+ i2c_smbus_write_byte_data(client, reg, temp);
mutex_unlock(&data->update_lock);
return count;
}
@@ -297,28 +396,56 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
- + data->temp2_offset);
+ int nr = attr->index;
+ int temp;
+
+ if (!nr) {
+ /*
+ * Use unsigned temperature unless its value is zero.
+ * If it is zero, use signed temperature.
+ */
+ if (data->temp11u)
+ temp = TEMP11_FROM_REG(data->temp11u);
+ else
+ temp = TEMP11_FROM_REG(data->temp11[nr]);
+ } else {
+ if (data->remote_unsigned && nr == 2)
+ temp = TEMP11_FROM_REG((u16)data->temp11[nr]);
+ else
+ temp = TEMP11_FROM_REG(data->temp11[nr]);
+ }
+ return sprintf(buf, "%d\n", temp + data->temp2_offset);
}
static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
const char *buf, size_t count)
{
- static const u8 reg[4] = {
+ static const u8 reg[6] = {
LM63_REG_REMOTE_LOW_MSB,
LM63_REG_REMOTE_LOW_LSB,
LM63_REG_REMOTE_HIGH_MSB,
LM63_REG_REMOTE_HIGH_LSB,
+ LM63_REG_REMOTE_OFFSET_MSB,
+ LM63_REG_REMOTE_OFFSET_LSB,
};
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+ int err;
int nr = attr->index;
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
mutex_lock(&data->update_lock);
- data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
+ if (data->remote_unsigned && nr == 2)
+ data->temp11[nr] = TEMP11U_TO_REG(val - data->temp2_offset);
+ else
+ data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
+
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
data->temp11[nr] >> 8);
i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -327,35 +454,143 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
return count;
}
-/* Hysteresis register holds a relative value, while we want to present
- an absolute to user-space */
-static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy,
- char *buf)
+/*
+ * Hysteresis register holds a relative value, while we want to present
+ * an absolute to user-space
+ */
+static ssize_t show_temp2_crit_hyst(struct device *dev,
+ struct device_attribute *dummy, char *buf)
{
struct lm63_data *data = lm63_update_device(dev);
- return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+ return sprintf(buf, "%d\n", temp8_from_reg(data, 2)
+ data->temp2_offset
- TEMP8_FROM_REG(data->temp2_crit_hyst));
}
-/* And now the other way around, user-space provides an absolute
- hysteresis value and we have to store a relative one */
-static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy,
+static ssize_t show_lut_temp_hyst(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+ struct lm63_data *data = lm63_update_device(dev);
+
+ return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
+ + data->temp2_offset
+ - TEMP8_FROM_REG(data->lut_temp_hyst));
+}
+
+/*
+ * And now the other way around, user-space provides an absolute
+ * hysteresis value and we have to store a relative one
+ */
+static ssize_t set_temp2_crit_hyst(struct device *dev,
+ struct device_attribute *dummy,
const char *buf, size_t count)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
- long val = simple_strtol(buf, NULL, 10);
+ long val;
+ int err;
long hyst;
+ err = kstrtol(buf, 10, &val);
+ if (err)
+ return err;
+
mutex_lock(&data->update_lock);
- hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
+ hyst = temp8_from_reg(data, 2) + data->temp2_offset - val;
i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
HYST_TO_REG(hyst));
mutex_unlock(&data->update_lock);
return count;
}
+/*
+ * Set conversion rate.
+ * client->update_lock must be held when calling this function.
+ */
+static void lm63_set_convrate(struct i2c_client *client, struct lm63_data *data,
+ unsigned int interval)
+{
+ int i;
+ unsigned int update_interval;
+
+ /* Shift calculations to avoid rounding errors */
+ interval <<= 6;
+
+ /* find the nearest update rate */
+ update_interval = (1 << (LM63_MAX_CONVRATE + 6)) * 1000
+ / data->max_convrate_hz;
+ for (i = 0; i < LM63_MAX_CONVRATE; i++, update_interval >>= 1)
+ if (interval >= update_interval * 3 / 4)
+ break;
+
+ i2c_smbus_write_byte_data(client, LM63_REG_CONVRATE, i);
+ data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, i);
+}
+
+static ssize_t show_update_interval(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct lm63_data *data = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%u\n", data->update_interval);
+}
+
+static ssize_t set_update_interval(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 10, &val);
+ if (err)
+ return err;
+
+ mutex_lock(&data->update_lock);
+ lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_type(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+
+ return sprintf(buf, data->trutherm ? "1\n" : "2\n");
+}
+
+static ssize_t set_type(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+ unsigned long val;
+ int ret;
+ u8 reg;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+ if (val != 1 && val != 2)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->trutherm = val == 1;
+ reg = i2c_smbus_read_byte_data(client, LM96163_REG_TRUTHERM) & ~0x02;
+ i2c_smbus_write_byte_data(client, LM96163_REG_TRUTHERM,
+ reg | (data->trutherm ? 0x02 : 0x00));
+ data->valid = 0;
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
char *buf)
{
@@ -377,27 +612,87 @@ static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
set_fan, 1);
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1, 0);
static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO, show_pwm1, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp, S_IRUGO,
+ show_lut_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IRUGO, show_pwm1, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp, S_IRUGO,
+ show_lut_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO, show_pwm1, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp, S_IRUGO,
+ show_lut_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_pwm, S_IRUGO, show_pwm1, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp, S_IRUGO,
+ show_lut_temp, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_pwm, S_IRUGO, show_pwm1, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp, S_IRUGO,
+ show_lut_temp, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_pwm, S_IRUGO, show_pwm1, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp, S_IRUGO,
+ show_lut_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_pwm, S_IRUGO, show_pwm1, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp, S_IRUGO,
+ show_lut_temp, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_pwm, S_IRUGO, show_pwm1, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp, S_IRUGO,
+ show_lut_temp, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_pwm, S_IRUGO, show_pwm1, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp, S_IRUGO,
+ show_lut_temp, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_pwm, S_IRUGO, show_pwm1, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp, S_IRUGO,
+ show_lut_temp, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_pwm, S_IRUGO, show_pwm1, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp, S_IRUGO,
+ show_lut_temp, NULL, 13);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 13);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_pwm, S_IRUGO, show_pwm1, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp, S_IRUGO,
+ show_lut_temp, NULL, 14);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp_hyst, S_IRUGO,
+ show_lut_temp_hyst, NULL, 14);
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
- set_local_temp8, 1);
+ set_temp8, 1);
static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 1);
static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
set_temp11, 2);
-/*
- * On LM63, temp2_crit can be set only once, which should be job
- * of the bootloader.
- */
+static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
+ set_temp11, 3);
static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
- NULL, 2);
+ set_temp8, 2);
static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
set_temp2_crit_hyst);
+static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type);
+
/* Individual alarm files */
static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
@@ -408,14 +703,43 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
/* Raw alarm file for compatibility */
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+ set_update_interval);
+
static struct attribute *lm63_attributes[] = {
- &dev_attr_pwm1.attr,
+ &sensor_dev_attr_pwm1.dev_attr.attr,
&dev_attr_pwm1_enable.attr,
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point5_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point6_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point7_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point8_temp_hyst.dev_attr.attr,
+
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_temp2_input.dev_attr.attr,
&sensor_dev_attr_temp2_min.dev_attr.attr,
&sensor_dev_attr_temp1_max.dev_attr.attr,
&sensor_dev_attr_temp2_max.dev_attr.attr,
+ &sensor_dev_attr_temp2_offset.dev_attr.attr,
&sensor_dev_attr_temp2_crit.dev_attr.attr,
&dev_attr_temp2_crit_hyst.attr,
@@ -425,10 +749,54 @@ static struct attribute *lm63_attributes[] = {
&sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
&sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
&dev_attr_alarms.attr,
+ &dev_attr_update_interval.attr,
NULL
};
+static struct attribute *lm63_attributes_extra_lut[] = {
+ &sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point9_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point10_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point10_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point10_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point11_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point11_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point11_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point12_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point12_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point12_temp_hyst.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group lm63_group_extra_lut = {
+ .attrs = lm63_attributes_extra_lut,
+};
+
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ * On LM64, temp2_crit can always be set.
+ * On LM96163, temp2_crit can be set if bit 1 of the configuration
+ * register is true.
+ */
+static umode_t lm63_attribute_mode(struct kobject *kobj,
+ struct attribute *attr, int index)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct lm63_data *data = i2c_get_clientdata(client);
+
+ if (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr
+ && (data->kind == lm64 ||
+ (data->kind == lm96163 && (data->config & 0x02))))
+ return attr->mode | S_IWUSR;
+
+ return attr->mode;
+}
+
static const struct attribute_group lm63_group = {
+ .is_visible = lm63_attribute_mode,
.attrs = lm63_attributes,
};
@@ -487,6 +855,8 @@ static int lm63_detect(struct i2c_client *new_client,
strlcpy(info->type, "lm63", I2C_NAME_SIZE);
else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+ else if (chip_id == 0x49 && address == 0x4c)
+ strlcpy(info->type, "lm96163", I2C_NAME_SIZE);
else
return -ENODEV;
@@ -518,12 +888,24 @@ static int lm63_probe(struct i2c_client *new_client,
lm63_init_client(new_client);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&new_client->dev.kobj,
- &lm63_group)))
+ err = sysfs_create_group(&new_client->dev.kobj, &lm63_group);
+ if (err)
goto exit_free;
if (data->config & 0x04) { /* tachometer enabled */
- if ((err = sysfs_create_group(&new_client->dev.kobj,
- &lm63_group_fan1)))
+ err = sysfs_create_group(&new_client->dev.kobj,
+ &lm63_group_fan1);
+ if (err)
+ goto exit_remove_files;
+ }
+ if (data->kind == lm96163) {
+ err = device_create_file(&new_client->dev,
+ &dev_attr_temp2_type);
+ if (err)
+ goto exit_remove_files;
+
+ err = sysfs_create_group(&new_client->dev.kobj,
+ &lm63_group_extra_lut);
+ if (err)
goto exit_remove_files;
}
@@ -538,17 +920,25 @@ static int lm63_probe(struct i2c_client *new_client,
exit_remove_files:
sysfs_remove_group(&new_client->dev.kobj, &lm63_group);
sysfs_remove_group(&new_client->dev.kobj, &lm63_group_fan1);
+ if (data->kind == lm96163) {
+ device_remove_file(&new_client->dev, &dev_attr_temp2_type);
+ sysfs_remove_group(&new_client->dev.kobj,
+ &lm63_group_extra_lut);
+ }
exit_free:
kfree(data);
exit:
return err;
}
-/* Idealy we shouldn't have to initialize anything, since the BIOS
- should have taken care of everything */
+/*
+ * Ideally we shouldn't have to initialize anything, since the BIOS
+ * should have taken care of everything
+ */
static void lm63_init_client(struct i2c_client *client)
{
struct lm63_data *data = i2c_get_clientdata(client);
+ u8 convrate;
data->config = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1);
data->config_fan = i2c_smbus_read_byte_data(client,
@@ -561,16 +951,57 @@ static void lm63_init_client(struct i2c_client *client)
i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1,
data->config);
}
+ /* Tachometer is always enabled on LM64 */
+ if (data->kind == lm64)
+ data->config |= 0x04;
/* We may need pwm1_freq before ever updating the client data */
data->pwm1_freq = i2c_smbus_read_byte_data(client, LM63_REG_PWM_FREQ);
if (data->pwm1_freq == 0)
data->pwm1_freq = 1;
+ switch (data->kind) {
+ case lm63:
+ case lm64:
+ data->max_convrate_hz = LM63_MAX_CONVRATE_HZ;
+ data->lut_size = 8;
+ break;
+ case lm96163:
+ data->max_convrate_hz = LM96163_MAX_CONVRATE_HZ;
+ data->lut_size = 12;
+ data->trutherm
+ = i2c_smbus_read_byte_data(client,
+ LM96163_REG_TRUTHERM) & 0x02;
+ break;
+ }
+ convrate = i2c_smbus_read_byte_data(client, LM63_REG_CONVRATE);
+ if (unlikely(convrate > LM63_MAX_CONVRATE))
+ convrate = LM63_MAX_CONVRATE;
+ data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz,
+ convrate);
+
+ /*
+ * For LM96163, check if high resolution PWM
+ * and unsigned temperature format is enabled.
+ */
+ if (data->kind == lm96163) {
+ u8 config_enhanced
+ = i2c_smbus_read_byte_data(client,
+ LM96163_REG_CONFIG_ENHANCED);
+ if (config_enhanced & 0x20)
+ data->lut_temp_highres = true;
+ if ((config_enhanced & 0x10)
+ && !(data->config_fan & 0x08) && data->pwm1_freq == 8)
+ data->pwm_highres = true;
+ if (config_enhanced & 0x08)
+ data->remote_unsigned = true;
+ }
+
/* Show some debug info about the LM63 configuration */
- dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
- (data->config & 0x04) ? "tachometer input" :
- "alert output");
+ if (data->kind == lm63)
+ dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
+ (data->config & 0x04) ? "tachometer input" :
+ "alert output");
dev_dbg(&client->dev, "PWM clock %s kHz, output frequency %u Hz\n",
(data->config_fan & 0x08) ? "1.4" : "360",
((data->config_fan & 0x08) ? 700 : 180000) / data->pwm1_freq);
@@ -586,6 +1017,10 @@ static int lm63_remove(struct i2c_client *client)
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &lm63_group);
sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
+ if (data->kind == lm96163) {
+ device_remove_file(&client->dev, &dev_attr_temp2_type);
+ sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
+ }
kfree(data);
return 0;
@@ -595,10 +1030,15 @@ static struct lm63_data *lm63_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm63_data *data = i2c_get_clientdata(client);
+ unsigned long next_update;
+ int i;
mutex_lock(&data->update_lock);
- if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+ next_update = data->last_updated
+ + msecs_to_jiffies(data->update_interval) + 1;
+
+ if (time_after(jiffies, next_update) || !data->valid) {
if (data->config & 0x04) { /* tachometer enabled */
/* order matters for fan1_input */
data->fan[0] = i2c_smbus_read_byte_data(client,
@@ -615,8 +1055,8 @@ static struct lm63_data *lm63_update_device(struct device *dev)
LM63_REG_PWM_FREQ);
if (data->pwm1_freq == 0)
data->pwm1_freq = 1;
- data->pwm1_value = i2c_smbus_read_byte_data(client,
- LM63_REG_PWM_VALUE);
+ data->pwm1[0] = i2c_smbus_read_byte_data(client,
+ LM63_REG_PWM_VALUE);
data->temp8[0] = i2c_smbus_read_byte_data(client,
LM63_REG_LOCAL_TEMP);
@@ -636,6 +1076,17 @@ static struct lm63_data *lm63_update_device(struct device *dev)
LM63_REG_REMOTE_HIGH_MSB) << 8)
| i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_HIGH_LSB);
+ data->temp11[3] = (i2c_smbus_read_byte_data(client,
+ LM63_REG_REMOTE_OFFSET_MSB) << 8)
+ | i2c_smbus_read_byte_data(client,
+ LM63_REG_REMOTE_OFFSET_LSB);
+
+ if (data->kind == lm96163)
+ data->temp11u = (i2c_smbus_read_byte_data(client,
+ LM96163_REG_REMOTE_TEMP_U_MSB) << 8)
+ | i2c_smbus_read_byte_data(client,
+ LM96163_REG_REMOTE_TEMP_U_LSB);
+
data->temp8[2] = i2c_smbus_read_byte_data(client,
LM63_REG_REMOTE_TCRIT);
data->temp2_crit_hyst = i2c_smbus_read_byte_data(client,
@@ -648,6 +1099,21 @@ static struct lm63_data *lm63_update_device(struct device *dev)
data->valid = 1;
}
+ if (time_after(jiffies, data->lut_last_updated + 5 * HZ) ||
+ !data->lut_valid) {
+ for (i = 0; i < data->lut_size; i++) {
+ data->pwm1[1 + i] = i2c_smbus_read_byte_data(client,
+ LM63_REG_LUT_PWM(i));
+ data->temp8[3 + i] = i2c_smbus_read_byte_data(client,
+ LM63_REG_LUT_TEMP(i));
+ }
+ data->lut_temp_hyst = i2c_smbus_read_byte_data(client,
+ LM63_REG_LUT_TEMP_HYST);
+
+ data->lut_last_updated = jiffies;
+ data->lut_valid = 1;
+ }
+
mutex_unlock(&data->update_lock);
return data;
diff --git a/drivers/hwmon/lm73.c b/drivers/hwmon/lm73.c
index 9e64d96..9c8093c 100644
--- a/drivers/hwmon/lm73.c
+++ b/drivers/hwmon/lm73.c
@@ -50,7 +50,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
long temp;
short value;
- int status = strict_strtol(buf, 10, &temp);
+ int status = kstrtol(buf, 10, &temp);
if (status < 0)
return status;
diff --git a/drivers/hwmon/lm75.c b/drivers/hwmon/lm75.c
index 1888dd0..b3311b1 100644
--- a/drivers/hwmon/lm75.c
+++ b/drivers/hwmon/lm75.c
@@ -93,6 +93,10 @@ static ssize_t show_temp(struct device *dev, struct device_attribute *da,
{
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct lm75_data *data = lm75_update_device(dev);
+
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
return sprintf(buf, "%d\n",
LM75_TEMP_FROM_REG(data->temp[attr->index]));
}
@@ -107,7 +111,7 @@ static ssize_t set_temp(struct device *dev, struct device_attribute *da,
long temp;
int error;
- error = strict_strtol(buf, 10, &temp);
+ error = kstrtol(buf, 10, &temp);
if (error)
return error;
@@ -402,6 +406,7 @@ static struct lm75_data *lm75_update_device(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct lm75_data *data = i2c_get_clientdata(client);
+ struct lm75_data *ret = data;
mutex_lock(&data->update_lock);
@@ -414,19 +419,23 @@ static struct lm75_data *lm75_update_device(struct device *dev)
int status;
status = lm75_read_value(client, LM75_REG_TEMP[i]);
- if (status < 0)
- dev_dbg(&client->dev, "reg %d, err %d\n",
- LM75_REG_TEMP[i], status);
- else
- data->temp[i] = status;
+ if (unlikely(status < 0)) {
+ dev_dbg(dev,
+ "LM75: Failed to read value: reg %d, error %d\n",
+ LM75_REG_TEMP[i], status);
+ ret = ERR_PTR(status);
+ data->valid = 0;
+ goto abort;
+ }
+ data->temp[i] = status;
}
data->last_updated = jiffies;
data->valid = 1;
}
+abort:
mutex_unlock(&data->update_lock);
-
- return data;
+ return ret;
}
/*-----------------------------------------------------------------------*/
diff --git a/drivers/hwmon/lm75.h b/drivers/hwmon/lm75.h
index e547a3e..89aa909 100644
--- a/drivers/hwmon/lm75.h
+++ b/drivers/hwmon/lm75.h
@@ -1,6 +1,6 @@
/*
lm75.h - Part of lm_sensors, Linux kernel modules for hardware
- monitoring
+ monitoring
Copyright (c) 2003 Mark M. Hoffman <mhoffman@lightlink.com>
This program is free software; you can redistribute it and/or modify
@@ -37,7 +37,7 @@
static inline u16 LM75_TEMP_TO_REG(long temp)
{
int ntemp = SENSORS_LIMIT(temp, LM75_TEMP_MIN, LM75_TEMP_MAX);
- ntemp += (ntemp<0 ? -250 : 250);
+ ntemp += (ntemp < 0 ? -250 : 250);
return (u16)((ntemp / 500) << 7);
}
@@ -47,4 +47,3 @@ static inline int LM75_TEMP_FROM_REG(u16 reg)
guarantee arithmetic shift and preserve the sign */
return ((s16)reg / 128) * 500;
}
-
diff --git a/drivers/hwmon/lm80.c b/drivers/hwmon/lm80.c
index 18a0e6c..0891b38 100644
--- a/drivers/hwmon/lm80.c
+++ b/drivers/hwmon/lm80.c
@@ -66,19 +66,19 @@ static const unsigned short normal_i2c[] = { 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d,
these macros are called: arguments may be evaluated more than once.
Fixing this is just not worth it. */
-#define IN_TO_REG(val) (SENSORS_LIMIT(((val)+5)/10,0,255))
-#define IN_FROM_REG(val) ((val)*10)
+#define IN_TO_REG(val) (SENSORS_LIMIT(((val) + 5) / 10, 0, 255))
+#define IN_FROM_REG(val) ((val) * 10)
static inline unsigned char FAN_TO_REG(unsigned rpm, unsigned div)
{
if (rpm == 0)
return 255;
rpm = SENSORS_LIMIT(rpm, 1, 1000000);
- return SENSORS_LIMIT((1350000 + rpm*div / 2) / (rpm*div), 1, 254);
+ return SENSORS_LIMIT((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
-#define FAN_FROM_REG(val,div) ((val)==0?-1:\
- (val)==255?0:1350000/((div)*(val)))
+#define FAN_FROM_REG(val, div) ((val) == 0 ? -1 : \
+ (val) == 255 ? 0 : 1350000/((div) * (val)))
static inline long TEMP_FROM_REG(u16 temp)
{
@@ -93,10 +93,11 @@ static inline long TEMP_FROM_REG(u16 temp)
return res / 10;
}
-#define TEMP_LIMIT_FROM_REG(val) (((val)>0x80?(val)-0x100:(val))*1000)
+#define TEMP_LIMIT_FROM_REG(val) (((val) > 0x80 ? \
+ (val) - 0x100 : (val)) * 1000)
-#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val)<0?\
- ((val)-500)/1000:((val)+500)/1000,0,255)
+#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) < 0 ? \
+ ((val) - 500) / 1000 : ((val) + 500) / 1000, 0, 255)
#define DIV_FROM_REG(val) (1 << (val))
@@ -164,7 +165,8 @@ static struct i2c_driver lm80_driver = {
*/
#define show_in(suffix, value) \
-static ssize_t show_in_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_in_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
@@ -175,14 +177,14 @@ show_in(max, in_max)
show_in(input, in)
#define set_in(suffix, value, reg) \
-static ssize_t set_in_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
- size_t count) \
+static ssize_t set_in_##suffix(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val = simple_strtol(buf, NULL, 10); \
- \
+\
mutex_lock(&data->update_lock);\
data->value[nr] = IN_TO_REG(val); \
lm80_write_value(client, reg(nr), data->value[nr]); \
@@ -193,7 +195,8 @@ set_in(min, in_min, LM80_REG_IN_MIN)
set_in(max, in_max, LM80_REG_IN_MAX)
#define show_fan(suffix, value) \
-static ssize_t show_fan_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_fan_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
int nr = to_sensor_dev_attr(attr)->index; \
struct lm80_data *data = lm80_update_device(dev); \
@@ -245,10 +248,18 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
DIV_FROM_REG(data->fan_div[nr]));
switch (val) {
- case 1: data->fan_div[nr] = 0; break;
- case 2: data->fan_div[nr] = 1; break;
- case 4: data->fan_div[nr] = 2; break;
- case 8: data->fan_div[nr] = 3; break;
+ case 1:
+ data->fan_div[nr] = 0;
+ break;
+ case 2:
+ data->fan_div[nr] = 1;
+ break;
+ case 4:
+ data->fan_div[nr] = 2;
+ break;
+ case 8:
+ data->fan_div[nr] = 3;
+ break;
default:
dev_err(&client->dev, "fan_div value %ld not "
"supported. Choose one of 1, 2, 4 or 8!\n", val);
@@ -268,14 +279,16 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
return count;
}
-static ssize_t show_temp_input1(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_temp_input1(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct lm80_data *data = lm80_update_device(dev);
return sprintf(buf, "%ld\n", TEMP_FROM_REG(data->temp));
}
#define show_temp(suffix, value) \
-static ssize_t show_temp_##suffix(struct device *dev, struct device_attribute *attr, char *buf) \
+static ssize_t show_temp_##suffix(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
{ \
struct lm80_data *data = lm80_update_device(dev); \
return sprintf(buf, "%d\n", TEMP_LIMIT_FROM_REG(data->value)); \
@@ -286,13 +299,13 @@ show_temp(os_max, temp_os_max);
show_temp(os_hyst, temp_os_hyst);
#define set_temp(suffix, value, reg) \
-static ssize_t set_temp_##suffix(struct device *dev, struct device_attribute *attr, const char *buf, \
- size_t count) \
+static ssize_t set_temp_##suffix(struct device *dev, \
+ struct device_attribute *attr, const char *buf, size_t count) \
{ \
struct i2c_client *client = to_i2c_client(dev); \
struct lm80_data *data = i2c_get_clientdata(client); \
long val = simple_strtoul(buf, NULL, 10); \
- \
+\
mutex_lock(&data->update_lock); \
data->value = TEMP_LIMIT_TO_REG(val); \
lm80_write_value(client, reg, data->value); \
@@ -366,13 +379,13 @@ static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO,
show_fan_div, set_fan_div, 1);
static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp_input1, NULL);
static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_hot_max,
- set_temp_hot_max);
+ set_temp_hot_max);
static DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, show_temp_hot_hyst,
- set_temp_hot_hyst);
+ set_temp_hot_hyst);
static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, show_temp_os_max,
- set_temp_os_max);
+ set_temp_os_max);
static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, show_temp_os_hyst,
- set_temp_os_hyst);
+ set_temp_os_hyst);
static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
static SENSOR_DEVICE_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0);
static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1);
@@ -459,7 +472,7 @@ static int lm80_detect(struct i2c_client *client, struct i2c_board_info *info)
if ((i2c_smbus_read_byte_data(client, i + 0x40) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0x80) != cur)
|| (i2c_smbus_read_byte_data(client, i + 0xc0) != cur))
- return -ENODEV;
+ return -ENODEV;
}
strlcpy(info->type, "lm80", I2C_NAME_SIZE);
@@ -490,7 +503,8 @@ static int lm80_probe(struct i2c_client *client,
data->fan_min[1] = lm80_read_value(client, LM80_REG_FAN_MIN(2));
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&client->dev.kobj, &lm80_group)))
+ err = sysfs_create_group(&client->dev.kobj, &lm80_group);
+ if (err)
goto error_free;
data->hwmon_dev = hwmon_device_register(&client->dev);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 615bc4f..d2dd5f9 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -730,7 +730,7 @@ static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -798,7 +798,7 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -859,7 +859,7 @@ static ssize_t set_temphyst(struct device *dev, struct device_attribute *dummy,
int err;
int temp;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -912,12 +912,12 @@ static ssize_t set_update_interval(struct device *dev,
unsigned long val;
int err;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err)
return err;
mutex_lock(&data->update_lock);
- lm90_set_convrate(client, data, val);
+ lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
mutex_unlock(&data->update_lock);
return count;
@@ -1080,7 +1080,7 @@ static ssize_t set_pec(struct device *dev, struct device_attribute *dummy,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/lm93.c b/drivers/hwmon/lm93.c
index 3b43df4..8bd6c5c 100644
--- a/drivers/hwmon/lm93.c
+++ b/drivers/hwmon/lm93.c
@@ -151,12 +151,12 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
/* Insmod parameters */
-static int disable_block;
+static bool disable_block;
module_param(disable_block, bool, 0);
MODULE_PARM_DESC(disable_block,
"Set to non-zero to disable SMBus block data transactions.");
-static int init;
+static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to non-zero to force chip initialization.");
diff --git a/drivers/hwmon/lm95241.c b/drivers/hwmon/lm95241.c
index 513901d..70bca67 100644
--- a/drivers/hwmon/lm95241.c
+++ b/drivers/hwmon/lm95241.c
@@ -169,7 +169,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
int shift;
u8 mask = to_sensor_dev_attr(attr)->index;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 1 && val != 2)
return -EINVAL;
@@ -216,7 +216,7 @@ static ssize_t set_min(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
if (val < -128000)
return -EINVAL;
@@ -254,7 +254,7 @@ static ssize_t set_max(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
long val;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
if (val >= 256000)
return -EINVAL;
@@ -290,7 +290,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
struct lm95241_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
data->interval = val * HZ / 1000;
diff --git a/drivers/hwmon/lm95245.c b/drivers/hwmon/lm95245.c
index dce9e68..5e5fc1b 100644
--- a/drivers/hwmon/lm95245.c
+++ b/drivers/hwmon/lm95245.c
@@ -254,7 +254,7 @@ static ssize_t set_limit(struct device *dev, struct device_attribute *attr,
int index = to_sensor_dev_attr(attr)->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -279,7 +279,7 @@ static ssize_t set_crit_hyst(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -316,7 +316,7 @@ static ssize_t set_type(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 1 && val != 2)
return -EINVAL;
@@ -363,7 +363,7 @@ static ssize_t set_interval(struct device *dev, struct device_attribute *attr,
struct lm95245_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 4b50601..ce52355 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -85,6 +85,7 @@ static struct ltc4261_data *ltc4261_update_device(struct device *dev)
"Failed to read ADC value: error %d\n",
val);
ret = ERR_PTR(val);
+ data->valid = 0;
goto abort;
}
data->regs[i] = val;
diff --git a/drivers/hwmon/max1111.c b/drivers/hwmon/max1111.c
index c97b78e..482ca90 100644
--- a/drivers/hwmon/max1111.c
+++ b/drivers/hwmon/max1111.c
@@ -6,7 +6,7 @@
* Copyright (C) 2004-2005 Richard Purdie
*
* Copyright (C) 2008 Marvell International Ltd.
- * Eric Miao <eric.miao@marvell.com>
+ * Eric Miao <eric.miao@marvell.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -106,11 +106,14 @@ static ssize_t show_adc(struct device *dev,
if (ret < 0)
return ret;
- return sprintf(buf, "%d\n", ret);
+ /* assume the reference voltage to be 2.048V, with an 8-bit sample,
+ * the LSB weight is 8mV
+ */
+ return sprintf(buf, "%d\n", ret * 8);
}
#define MAX1111_ADC_ATTR(_id) \
- SENSOR_DEVICE_ATTR(adc##_id##_in, S_IRUGO, show_adc, NULL, _id)
+ SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id)
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static MAX1111_ADC_ATTR(0);
@@ -120,10 +123,10 @@ static MAX1111_ADC_ATTR(3);
static struct attribute *max1111_attributes[] = {
&dev_attr_name.attr,
- &sensor_dev_attr_adc0_in.dev_attr.attr,
- &sensor_dev_attr_adc1_in.dev_attr.attr,
- &sensor_dev_attr_adc2_in.dev_attr.attr,
- &sensor_dev_attr_adc3_in.dev_attr.attr,
+ &sensor_dev_attr_in0_input.dev_attr.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
NULL,
};
diff --git a/drivers/hwmon/max16065.c b/drivers/hwmon/max16065.c
index 385886a..f8e323a 100644
--- a/drivers/hwmon/max16065.c
+++ b/drivers/hwmon/max16065.c
@@ -230,7 +230,7 @@ static ssize_t max16065_set_limit(struct device *dev,
int err;
int limit;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (unlikely(err < 0))
return err;
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index 20d1b2d..88953f9 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -59,7 +59,7 @@ static unsigned short max1668_addr_list[] = {
#define DEV_ID_MAX1989 0xb
/* read only mode module parameter */
-static int read_only;
+static bool read_only;
module_param(read_only, bool, 0);
MODULE_PARM_DESC(read_only, "Don't set any values, read only mode");
@@ -335,10 +335,10 @@ static struct attribute *max1668_attribute_unique[] = {
NULL
};
-static mode_t max1668_attribute_mode(struct kobject *kobj,
+static umode_t max1668_attribute_mode(struct kobject *kobj,
struct attribute *attr, int index)
{
- int ret = S_IRUGO;
+ umode_t ret = S_IRUGO;
if (read_only)
return ret;
if (attr == &sensor_dev_attr_temp1_max.dev_attr.attr ||
diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
index f20d997..a6760ba 100644
--- a/drivers/hwmon/max6639.c
+++ b/drivers/hwmon/max6639.c
@@ -72,8 +72,8 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END };
static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 };
-#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \
- (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val)))
+#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \
+ 0 : (rpm_ranges[rpm_range] * 30) / (val))
#define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255)
/*
@@ -208,7 +208,7 @@ static ssize_t set_temp_max(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -241,7 +241,7 @@ static ssize_t set_temp_crit(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -275,7 +275,7 @@ static ssize_t set_temp_emergency(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -308,7 +308,7 @@ static ssize_t set_pwm(struct device *dev,
unsigned long val;
int res;
- res = strict_strtoul(buf, 10, &val);
+ res = kstrtoul(buf, 10, &val);
if (res)
return res;
@@ -333,7 +333,7 @@ static ssize_t show_fan_input(struct device *dev,
return PTR_ERR(data);
return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index],
- data->ppr, data->rpm_range));
+ data->rpm_range));
}
static ssize_t show_alarm(struct device *dev,
@@ -429,9 +429,9 @@ static int max6639_init_client(struct i2c_client *client)
struct max6639_data *data = i2c_get_clientdata(client);
struct max6639_platform_data *max6639_info =
client->dev.platform_data;
- int i = 0;
+ int i;
int rpm_range = 1; /* default: 4000 RPM */
- int err = 0;
+ int err;
/* Reset chip to default values, see below for GCONFIG setup */
err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG,
@@ -446,11 +446,6 @@ static int max6639_init_client(struct i2c_client *client)
else
data->ppr = 2;
data->ppr -= 1;
- err = i2c_smbus_write_byte_data(client,
- MAX6639_REG_FAN_PPR(i),
- data->ppr << 5);
- if (err)
- goto exit;
if (max6639_info)
rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
@@ -458,6 +453,13 @@ static int max6639_init_client(struct i2c_client *client)
for (i = 0; i < 2; i++) {
+ /* Set Fan pulse per revolution */
+ err = i2c_smbus_write_byte_data(client,
+ MAX6639_REG_FAN_PPR(i),
+ data->ppr << 6);
+ if (err)
+ goto exit;
+
/* Fans config PWM, RPM */
err = i2c_smbus_write_byte_data(client,
MAX6639_REG_FAN_CONFIG1(i),
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index e855d3b..209e8a5 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -234,7 +234,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
struct max6642_data *data = i2c_get_clientdata(client);
struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
diff --git a/drivers/hwmon/max6650.c b/drivers/hwmon/max6650.c
index ece3aaf..2fc034a 100644
--- a/drivers/hwmon/max6650.c
+++ b/drivers/hwmon/max6650.c
@@ -464,7 +464,7 @@ static SENSOR_DEVICE_ATTR(gpio1_alarm, S_IRUGO, get_alarm, NULL,
static SENSOR_DEVICE_ATTR(gpio2_alarm, S_IRUGO, get_alarm, NULL,
MAX6650_ALRM_GPIO2);
-static mode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
+static umode_t max6650_attrs_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
index 8da2181..cb35461 100644
--- a/drivers/hwmon/pc87427.c
+++ b/drivers/hwmon/pc87427.c
@@ -418,7 +418,7 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute
unsigned long val;
int iobase = data->address[LD_FAN];
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->lock);
@@ -572,7 +572,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute
int nr = to_sensor_dev_attr(devattr)->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0 || val > 2)
+ if (kstrtoul(buf, 10, &val) < 0 || val > 2)
return -EINVAL;
/* Can't go to automatic mode if it isn't configured */
if (val == 2 && !(data->pwm_auto_ok & (1 << nr)))
@@ -604,7 +604,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute
int iobase = data->address[LD_FAN];
u8 mode;
- if (strict_strtoul(buf, 10, &val) < 0 || val > 0xff)
+ if (kstrtoul(buf, 10, &val) < 0 || val > 0xff)
return -EINVAL;
mutex_lock(&data->lock);
diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig
index 4b26f51..cfec923 100644
--- a/drivers/hwmon/pmbus/Kconfig
+++ b/drivers/hwmon/pmbus/Kconfig
@@ -19,8 +19,8 @@ config SENSORS_PMBUS
default y
help
If you say yes here you get hardware monitoring support for generic
- PMBus devices, including but not limited to ADP4000, BMR450, BMR451,
- BMR453, BMR454, NCP4200, and NCP4208.
+ PMBus devices, including but not limited to ADP4000, BMR453, BMR454,
+ NCP4200, and NCP4208.
This driver can also be built as a module. If so, the module will
be called pmbus.
@@ -113,8 +113,9 @@ config SENSORS_ZL6100
default n
help
If you say yes here you get hardware monitoring support for Intersil
- ZL2004, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105 Digital
- DC/DC Controllers.
+ ZL2004, ZL2005, ZL2006, ZL2008, ZL2105, ZL2106, ZL6100, and ZL6105
+ Digital DC/DC Controllers, as well as for Ericsson BMR450, BMR451,
+ BMR462, BMR463, and BMR464.
This driver can also be built as a module. If so, the module will
be called zl6100.
diff --git a/drivers/hwmon/pmbus/adm1275.c b/drivers/hwmon/pmbus/adm1275.c
index 980a4d9..81c7c2e 100644
--- a/drivers/hwmon/pmbus/adm1275.c
+++ b/drivers/hwmon/pmbus/adm1275.c
@@ -170,35 +170,71 @@ static int adm1275_read_byte_data(struct i2c_client *client, int page, int reg)
return ret;
}
+static const struct i2c_device_id adm1275_id[] = {
+ { "adm1275", adm1275 },
+ { "adm1276", adm1276 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, adm1275_id);
+
static int adm1275_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
+ u8 block_buffer[I2C_SMBUS_BLOCK_MAX + 1];
int config, device_config;
int ret;
struct pmbus_driver_info *info;
struct adm1275_data *data;
+ const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA))
+ I2C_FUNC_SMBUS_READ_BYTE_DATA
+ | I2C_FUNC_SMBUS_BLOCK_DATA))
return -ENODEV;
- data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
- if (!data)
- return -ENOMEM;
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer ID\n");
+ return ret;
+ }
+ if (ret != 3 || strncmp(block_buffer, "ADI", 3)) {
+ dev_err(&client->dev, "Unsupported Manufacturer ID\n");
+ return -ENODEV;
+ }
- config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
- if (config < 0) {
- ret = config;
- goto err_mem;
+ ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, block_buffer);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read Manufacturer Model\n");
+ return ret;
+ }
+ for (mid = adm1275_id; mid->name[0]; mid++) {
+ if (!strncasecmp(mid->name, block_buffer, strlen(mid->name)))
+ break;
+ }
+ if (!mid->name[0]) {
+ dev_err(&client->dev, "Unsupported device\n");
+ return -ENODEV;
}
+ if (id->driver_data != mid->driver_data)
+ dev_notice(&client->dev,
+ "Device mismatch: Configured %s, detected %s\n",
+ id->name, mid->name);
+
+ config = i2c_smbus_read_byte_data(client, ADM1275_PMON_CONFIG);
+ if (config < 0)
+ return config;
+
device_config = i2c_smbus_read_byte_data(client, ADM1275_DEVICE_CONFIG);
- if (device_config < 0) {
- ret = device_config;
- goto err_mem;
- }
+ if (device_config < 0)
+ return device_config;
+
+ data = kzalloc(sizeof(struct adm1275_data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->id = mid->driver_data;
- data->id = id->driver_data;
info = &data->info;
info->pages = 1;
@@ -233,7 +269,7 @@ static int adm1275_probe(struct i2c_client *client,
if (device_config & ADM1275_IOUT_WARN2_SELECT)
data->have_oc_fault = true;
- switch (id->driver_data) {
+ switch (data->id) {
case adm1275:
if (config & ADM1275_VIN_VOUT_SELECT)
info->func[0] |=
@@ -281,13 +317,6 @@ static int adm1275_remove(struct i2c_client *client)
return 0;
}
-static const struct i2c_device_id adm1275_id[] = {
- { "adm1275", adm1275 },
- { "adm1276", adm1276 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, adm1275_id);
-
static struct i2c_driver adm1275_driver = {
.driver = {
.name = "adm1275",
diff --git a/drivers/hwmon/pmbus/max34440.c b/drivers/hwmon/pmbus/max34440.c
index beaf5a8..9b97a5b 100644
--- a/drivers/hwmon/pmbus/max34440.c
+++ b/drivers/hwmon/pmbus/max34440.c
@@ -82,7 +82,7 @@ static int max34440_write_word_data(struct i2c_client *client, int page,
case PMBUS_VIRT_RESET_TEMP_HISTORY:
ret = pmbus_write_word_data(client, page,
MAX34440_MFR_TEMPERATURE_PEAK,
- 0xffff);
+ 0x8000);
break;
default:
ret = -ENODATA;
diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c
index 995e873..18a385e 100644
--- a/drivers/hwmon/pmbus/pmbus.c
+++ b/drivers/hwmon/pmbus/pmbus.c
@@ -200,8 +200,6 @@ static int pmbus_remove(struct i2c_client *client)
*/
static const struct i2c_device_id pmbus_id[] = {
{"adp4000", 1},
- {"bmr450", 1},
- {"bmr451", 1},
{"bmr453", 1},
{"bmr454", 1},
{"ncp4200", 1},
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 00460d8..d89b339 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -54,7 +54,8 @@
lcrit_alarm, crit_alarm */
#define PMBUS_IOUT_BOOLEANS_PER_PAGE 3 /* alarm, lcrit_alarm,
crit_alarm */
-#define PMBUS_POUT_BOOLEANS_PER_PAGE 2 /* alarm, crit_alarm */
+#define PMBUS_POUT_BOOLEANS_PER_PAGE 3 /* cap_alarm, alarm, crit_alarm
+ */
#define PMBUS_MAX_BOOLEANS_PER_FAN 2 /* alarm, fault */
#define PMBUS_MAX_BOOLEANS_PER_TEMP 4 /* min_alarm, max_alarm,
lcrit_alarm, crit_alarm */
diff --git a/drivers/hwmon/pmbus/zl6100.c b/drivers/hwmon/pmbus/zl6100.c
index 2bc9800..880b90c 100644
--- a/drivers/hwmon/pmbus/zl6100.c
+++ b/drivers/hwmon/pmbus/zl6100.c
@@ -28,18 +28,22 @@
#include <linux/delay.h>
#include "pmbus.h"
-enum chips { zl2004, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
+enum chips { zl2004, zl2005, zl2006, zl2008, zl2105, zl2106, zl6100, zl6105 };
struct zl6100_data {
int id;
ktime_t access; /* chip access time */
+ int delay; /* Delay between chip accesses in uS */
struct pmbus_driver_info info;
};
#define to_zl6100_data(x) container_of(x, struct zl6100_data, info)
+#define ZL6100_MFR_CONFIG 0xd0
#define ZL6100_DEVICE_ID 0xe4
+#define ZL6100_MFR_XTEMP_ENABLE (1 << 7)
+
#define ZL6100_WAIT_TIME 1000 /* uS */
static ushort delay = ZL6100_WAIT_TIME;
@@ -49,10 +53,10 @@ MODULE_PARM_DESC(delay, "Delay between chip accesses in uS");
/* Some chips need a delay between accesses */
static inline void zl6100_wait(const struct zl6100_data *data)
{
- if (delay) {
+ if (data->delay) {
s64 delta = ktime_us_delta(ktime_get(), data->access);
- if (delta < delay)
- udelay(delay - delta);
+ if (delta < data->delay)
+ udelay(data->delay - delta);
}
}
@@ -65,6 +69,19 @@ static int zl6100_read_word_data(struct i2c_client *client, int page, int reg)
if (page || reg >= PMBUS_VIRT_BASE)
return -ENXIO;
+ if (data->id == zl2005) {
+ /*
+ * Limit register detection is not reliable on ZL2005.
+ * Make sure registers are not erroneously detected.
+ */
+ switch (reg) {
+ case PMBUS_VOUT_OV_WARN_LIMIT:
+ case PMBUS_VOUT_UV_WARN_LIMIT:
+ case PMBUS_IOUT_OC_WARN_LIMIT:
+ return -ENXIO;
+ }
+ }
+
zl6100_wait(data);
ret = pmbus_read_word_data(client, page, reg);
data->access = ktime_get();
@@ -122,7 +139,13 @@ static int zl6100_write_byte(struct i2c_client *client, int page, u8 value)
}
static const struct i2c_device_id zl6100_id[] = {
+ {"bmr450", zl2005},
+ {"bmr451", zl2005},
+ {"bmr462", zl2008},
+ {"bmr463", zl2008},
+ {"bmr464", zl2008},
{"zl2004", zl2004},
+ {"zl2005", zl2005},
{"zl2006", zl2006},
{"zl2008", zl2008},
{"zl2105", zl2105},
@@ -143,7 +166,7 @@ static int zl6100_probe(struct i2c_client *client,
const struct i2c_device_id *mid;
if (!i2c_check_functionality(client->adapter,
- I2C_FUNC_SMBUS_READ_BYTE_DATA
+ I2C_FUNC_SMBUS_READ_WORD_DATA
| I2C_FUNC_SMBUS_READ_BLOCK_DATA))
return -ENODEV;
@@ -177,25 +200,24 @@ static int zl6100_probe(struct i2c_client *client,
data->id = mid->driver_data;
/*
- * ZL2008, ZL2105, and ZL6100 are known to require a wait time
+ * ZL2005, ZL2008, ZL2105, and ZL6100 are known to require a wait time
* between I2C accesses. ZL2004 and ZL6105 are known to be safe.
+ * Other chips have not yet been tested.
*
* Only clear the wait time for chips known to be safe. The wait time
* can be cleared later for additional chips if tests show that it
* is not needed (in other words, better be safe than sorry).
*/
+ data->delay = delay;
if (data->id == zl2004 || data->id == zl6105)
- delay = 0;
+ data->delay = 0;
/*
* Since there was a direct I2C device access above, wait before
* accessing the chip again.
- * Set the timestamp, wait, then set it again. This should provide
- * enough buffer time to be safe.
*/
data->access = ktime_get();
zl6100_wait(data);
- data->access = ktime_get();
info = &data->info;
@@ -203,7 +225,16 @@ static int zl6100_probe(struct i2c_client *client,
info->func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_STATUS_INPUT
| PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT
| PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT
- | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_STATUS_TEMP;
+ | PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP;
+
+ ret = i2c_smbus_read_word_data(client, ZL6100_MFR_CONFIG);
+ if (ret < 0)
+ goto err_mem;
+ if (ret & ZL6100_MFR_XTEMP_ENABLE)
+ info->func[0] |= PMBUS_HAVE_TEMP2;
+
+ data->access = ktime_get();
+ zl6100_wait(data);
info->read_word_data = zl6100_read_word_data;
info->read_byte_data = zl6100_read_byte_data;
diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
index fe4104c..91fdd1f 100644
--- a/drivers/hwmon/sht15.c
+++ b/drivers/hwmon/sht15.c
@@ -683,7 +683,7 @@ static ssize_t sht15_store_heater(struct device *dev,
long value;
u8 status;
- if (strict_strtol(buf, 10, &value))
+ if (kstrtol(buf, 10, &value))
return -EINVAL;
mutex_lock(&data->read_lock);
@@ -883,7 +883,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
static int __devinit sht15_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int ret;
struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
u8 status = 0;
@@ -901,6 +901,7 @@ static int __devinit sht15_probe(struct platform_device *pdev)
init_waitqueue_head(&data->wait_queue);
if (pdev->dev.platform_data == NULL) {
+ ret = -EINVAL;
dev_err(&pdev->dev, "no platform data supplied\n");
goto err_free_data;
}
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c
index 643aa8c..c08eee2 100644
--- a/drivers/hwmon/tmp102.c
+++ b/drivers/hwmon/tmp102.c
@@ -112,7 +112,7 @@ static ssize_t tmp102_set_temp(struct device *dev,
long val;
int status;
- if (strict_strtol(buf, 10, &val) < 0)
+ if (kstrtol(buf, 10, &val) < 0)
return -EINVAL;
val = SENSORS_LIMIT(val, -256000, 255000);
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index ad8d535..8b9a774 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -334,7 +334,7 @@ static ssize_t store_temp_min(struct device *dev, struct device_attribute
long val;
u16 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_temp_to_register(val, data->config);
@@ -361,7 +361,7 @@ static ssize_t store_temp_max(struct device *dev, struct device_attribute
long val;
u16 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_temp_to_register(val, data->config);
@@ -388,7 +388,7 @@ static ssize_t store_temp_crit(struct device *dev, struct device_attribute
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
reg = tmp401_crit_temp_to_register(val, data->config);
@@ -413,7 +413,7 @@ static ssize_t store_temp_crit_hyst(struct device *dev, struct device_attribute
long val;
u8 reg;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (data->config & TMP401_CONFIG_RANGE)
@@ -447,7 +447,7 @@ static ssize_t reset_temp_history(struct device *dev,
{
long val;
- if (strict_strtol(buf, 10, &val))
+ if (kstrtol(buf, 10, &val))
return -EINVAL;
if (val != 1) {
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 0517a8f..c48381f 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -157,7 +157,7 @@ static ssize_t show_fault(struct device *dev,
return sprintf(buf, "0\n");
}
-static mode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
+static umode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 93f5fc7..5276d19 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -937,7 +937,7 @@ store_in_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
@@ -1054,7 +1054,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
unsigned int reg;
u8 new_div;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1199,7 +1199,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
int err; \
long val; \
- err = strict_strtol(buf, 10, &val); \
+ err = kstrtol(buf, 10, &val); \
if (err < 0) \
return err; \
mutex_lock(&data->update_lock); \
@@ -1319,17 +1319,23 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
{
struct w83627ehf_data *data = dev_get_drvdata(dev);
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ struct w83627ehf_sio_data *sio_data = dev->platform_data;
int nr = sensor_attr->index;
unsigned long val;
int err;
u16 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
if (val > 1)
return -EINVAL;
+
+ /* On NCT67766F, DC mode is only supported for pwm1 */
+ if (sio_data->kind == nct6776 && nr && val != 1)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
data->pwm_mode[nr] = val;
@@ -1351,7 +1357,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
unsigned long val;
int err;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1376,7 +1382,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
int err;
u16 reg;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err < 0)
return err;
@@ -1430,7 +1436,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -1455,7 +1461,7 @@ store_tolerance(struct device *dev, struct device_attribute *attr,
long val;
int err;
- err = strict_strtol(buf, 10, &val);
+ err = kstrtol(buf, 10, &val);
if (err < 0)
return err;
@@ -1556,7 +1562,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
val = SENSORS_LIMIT(val, 1, 255); \
@@ -1595,7 +1601,7 @@ store_##reg(struct device *dev, struct device_attribute *attr, \
int nr = sensor_attr->index; \
unsigned long val; \
int err; \
- err = strict_strtoul(buf, 10, &val); \
+ err = kstrtoul(buf, 10, &val); \
if (err < 0) \
return err; \
val = step_time_to_reg(val, data->pwm_mode[nr]); \
@@ -1702,7 +1708,7 @@ clear_caseopen(struct device *dev, struct device_attribute *attr,
unsigned long val;
u16 reg, mask;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mask = to_sensor_dev_attr_2(attr)->nr;
@@ -1914,9 +1920,26 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
fan4min = 0;
fan5pin = 0;
} else if (sio_data->kind == nct6776) {
- fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
- fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
- fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+ bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
+
+ superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+ regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
+
+ if (regval & 0x80)
+ fan3pin = gpok;
+ else
+ fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+
+ if (regval & 0x40)
+ fan4pin = gpok;
+ else
+ fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
+
+ if (regval & 0x20)
+ fan5pin = gpok;
+ else
+ fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+
fan4min = fan4pin;
} else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
fan3pin = 1;
@@ -2331,11 +2354,6 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev)
for (i = 0; i < data->pwm_num; i++)
data->pwm_enable_orig[i] = data->pwm_enable[i];
- /* Read pwm data to save original values */
- w83627ehf_update_pwm_common(dev, data);
- for (i = 0; i < data->pwm_num; i++)
- data->pwm_enable_orig[i] = data->pwm_enable[i];
-
/* Register sysfs hooks */
for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) {
err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr);
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index bde50e3..374118f 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -71,7 +71,7 @@ module_param(force_i2c, byte, 0);
MODULE_PARM_DESC(force_i2c,
"Initialize the i2c address of the sensors");
-static int init = 1;
+static bool init = 1;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization");
diff --git a/drivers/hwmon/w83781d.c b/drivers/hwmon/w83781d.c
index 65b685e..17a8fa2 100644
--- a/drivers/hwmon/w83781d.c
+++ b/drivers/hwmon/w83781d.c
@@ -67,11 +67,11 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to one to reset chip on load");
-static int init = 1;
+static bool init = 1;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to zero to bypass chip initialization");
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
index 8c2844e..35aa514 100644
--- a/drivers/hwmon/w83791d.c
+++ b/drivers/hwmon/w83791d.c
@@ -58,11 +58,11 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to one to force a hardware chip reset");
-static int init;
+static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to one to force extra software initialization");
@@ -711,7 +711,7 @@ static ssize_t store_pwm(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -756,7 +756,7 @@ static ssize_t store_pwmenable(struct device *dev,
u8 val_shift = 0;
u8 keep_mask = 0;
- int ret = strict_strtoul(buf, 10, &val);
+ int ret = kstrtoul(buf, 10, &val);
if (ret || val < 1 || val > 3)
return -EINVAL;
@@ -819,7 +819,7 @@ static ssize_t store_temp_target(struct device *dev,
unsigned long val;
u8 target_mask;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -863,7 +863,7 @@ static ssize_t store_temp_tolerance(struct device *dev,
u8 val_shift = 0;
u8 keep_mask = 0;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
switch (nr) {
diff --git a/drivers/hwmon/w83792d.c b/drivers/hwmon/w83792d.c
index f3e7130..d3100ea 100644
--- a/drivers/hwmon/w83792d.c
+++ b/drivers/hwmon/w83792d.c
@@ -56,7 +56,7 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int init;
+static bool init;
module_param(init, bool, 0);
MODULE_PARM_DESC(init, "Set to one to force chip initialization");
@@ -749,7 +749,7 @@ store_chassis_clear(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 reg;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 854f911..45ec7e7 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -61,7 +61,7 @@ module_param_array(force_subclients, short, NULL, 0);
MODULE_PARM_DESC(force_subclients, "List of subclient addresses: "
"{bus, clientaddr, subclientaddr1, subclientaddr2}");
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
@@ -450,7 +450,7 @@ store_chassis_clear(struct device *dev,
unsigned long val;
u8 reg;
- if (strict_strtoul(buf, 10, &val) || val != 0)
+ if (kstrtoul(buf, 10, &val) || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
diff --git a/drivers/hwmon/w83795.c b/drivers/hwmon/w83795.c
index 845232d..aa58b25 100644
--- a/drivers/hwmon/w83795.c
+++ b/drivers/hwmon/w83795.c
@@ -42,7 +42,7 @@ static const unsigned short normal_i2c[] = {
};
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
@@ -730,7 +730,7 @@ store_beep(struct device *dev, struct device_attribute *attr,
u8 beep_bit = 1 << shift;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val != 0 && val != 1)
return -EINVAL;
@@ -755,7 +755,7 @@ store_chassis_clear(struct device *dev,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0 || val != 0)
+ if (kstrtoul(buf, 10, &val) < 0 || val != 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -801,7 +801,7 @@ store_fan_min(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val))
+ if (kstrtoul(buf, 10, &val))
return -EINVAL;
val = fan_to_reg(val);
@@ -863,7 +863,7 @@ store_pwm(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -924,7 +924,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
unsigned long val;
int i;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if (val < 1 || val > 2)
return -EINVAL;
@@ -1021,7 +1021,7 @@ store_temp_src(struct device *dev, struct device_attribute *attr,
unsigned long channel;
u8 val = index / 2;
- if (strict_strtoul(buf, 10, &channel) < 0 ||
+ if (kstrtoul(buf, 10, &channel) < 0 ||
channel < 1 || channel > 14)
return -EINVAL;
@@ -1088,7 +1088,7 @@ store_temp_pwm_enable(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long tmp;
- if (strict_strtoul(buf, 10, &tmp) < 0)
+ if (kstrtoul(buf, 10, &tmp) < 0)
return -EINVAL;
switch (nr) {
@@ -1149,7 +1149,7 @@ store_fanin(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1198,7 +1198,7 @@ store_temp_pwm(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 tmp;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -1257,7 +1257,7 @@ store_sf4_pwm(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1293,7 +1293,7 @@ store_sf4_temp(struct device *dev, struct device_attribute *attr,
int index = sensor_attr->index;
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val /= 1000;
@@ -1333,7 +1333,7 @@ store_temp(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
- if (strict_strtol(buf, 10, &tmp) < 0)
+ if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1394,7 +1394,7 @@ store_dts_ext(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
long tmp;
- if (strict_strtol(buf, 10, &tmp) < 0)
+ if (kstrtol(buf, 10, &tmp) < 0)
return -EINVAL;
mutex_lock(&data->update_lock);
@@ -1436,7 +1436,7 @@ store_temp_mode(struct device *dev, struct device_attribute *attr,
unsigned long val;
u8 tmp;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
if ((val != 4) && (val != 3))
return -EINVAL;
@@ -1512,7 +1512,7 @@ store_in(struct device *dev, struct device_attribute *attr,
u8 tmp;
u8 lsb_idx;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
val = in_to_reg(index, val);
@@ -1569,7 +1569,7 @@ store_sf_setup(struct device *dev, struct device_attribute *attr,
struct w83795_data *data = i2c_get_clientdata(client);
unsigned long val;
- if (strict_strtoul(buf, 10, &val) < 0)
+ if (kstrtoul(buf, 10, &val) < 0)
return -EINVAL;
switch (nr) {
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 0254e18..063bd95 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -39,7 +39,7 @@ static const unsigned short normal_i2c[] = { 0x2e, 0x2f, I2C_CLIENT_END };
/* Insmod parameters */
-static int reset;
+static bool reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index a3afac4..3101dd5 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -299,11 +299,11 @@ config I2C_AT91
unless your system can cope with those limitations.
config I2C_AU1550
- tristate "Au1550/Au1200 SMBus interface"
+ tristate "Au1550/Au1200/Au1300 SMBus interface"
depends on MIPS_ALCHEMY
help
If you say yes to this option, support will be included for the
- Au1550 and Au1200 SMBus interface.
+ Au1550/Au1200/Au1300 SMBus interface.
This driver can also be built as a module. If so, the module
will be called i2c-au1550.
@@ -682,19 +682,19 @@ config I2C_XILINX
will be called xilinx_i2c.
config I2C_EG20T
- tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
depends on PCI
help
This driver is for PCH(Platform controller Hub) I2C of EG20T which
is an IOH(Input/Output Hub) for x86 embedded processor.
This driver can access PCH I2C bus device.
- This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
- Output Hub), ML7213 and ML7223.
- ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
- for MP(Media Phone) use.
- ML7213/ML7223 is companion chip for Intel Atom E6xx series.
- ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+ This driver also can be used for LAPIS Semiconductor IOH(Input/
+ Output Hub), ML7213, ML7223 and ML7831.
+ ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+ for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+ ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+ ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
comment "External I2C/SMBus adapter drivers"
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index b6807db..e66d248 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -132,7 +132,8 @@
#define ALI1535_SMBIO_EN 0x04 /* SMB I/O Space enable */
static struct pci_driver ali1535_driver;
-static unsigned short ali1535_smba;
+static unsigned long ali1535_smba;
+static unsigned short ali1535_offset;
/* Detect whether a ALI1535 can be found, and initialize it, where necessary.
Note the differences between kernels with the old PCI BIOS interface and
@@ -140,7 +141,7 @@ static unsigned short ali1535_smba;
defined to make the transition easier. */
static int __devinit ali1535_setup(struct pci_dev *dev)
{
- int retval = -ENODEV;
+ int retval;
unsigned char temp;
/* Check the following things:
@@ -149,15 +150,28 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
- We can use the addresses
*/
+ retval = pci_enable_device(dev);
+ if (retval) {
+ dev_err(&dev->dev, "ALI1535_smb can't enable device\n");
+ goto exit;
+ }
+
/* Determine the address of the SMBus area */
- pci_read_config_word(dev, SMBBA, &ali1535_smba);
- ali1535_smba &= (0xffff & ~(ALI1535_SMB_IOSIZE - 1));
- if (ali1535_smba == 0) {
+ pci_read_config_word(dev, SMBBA, &ali1535_offset);
+ dev_dbg(&dev->dev, "ALI1535_smb is at offset 0x%04x\n", ali1535_offset);
+ ali1535_offset &= (0xffff & ~(ALI1535_SMB_IOSIZE - 1));
+ if (ali1535_offset == 0) {
dev_warn(&dev->dev,
"ALI1535_smb region uninitialized - upgrade BIOS?\n");
+ retval = -ENODEV;
goto exit;
}
+ if (pci_resource_flags(dev, 0) & IORESOURCE_IO)
+ ali1535_smba = pci_resource_start(dev, 0) + ali1535_offset;
+ else
+ ali1535_smba = ali1535_offset;
+
retval = acpi_check_region(ali1535_smba, ALI1535_SMB_IOSIZE,
ali1535_driver.name);
if (retval)
@@ -165,8 +179,9 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
if (!request_region(ali1535_smba, ALI1535_SMB_IOSIZE,
ali1535_driver.name)) {
- dev_err(&dev->dev, "ALI1535_smb region 0x%x already in use!\n",
+ dev_err(&dev->dev, "ALI1535_smb region 0x%lx already in use!\n",
ali1535_smba);
+ retval = -EBUSY;
goto exit;
}
@@ -174,6 +189,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
pci_read_config_byte(dev, SMBCFG, &temp);
if ((temp & ALI1535_SMBIO_EN) == 0) {
dev_err(&dev->dev, "SMB device not enabled - upgrade BIOS?\n");
+ retval = -ENODEV;
goto exit_free;
}
@@ -181,6 +197,7 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
pci_read_config_byte(dev, SMBHSTCFG, &temp);
if ((temp & 1) == 0) {
dev_err(&dev->dev, "SMBus controller not enabled - upgrade BIOS?\n");
+ retval = -ENODEV;
goto exit_free;
}
@@ -196,14 +213,13 @@ static int __devinit ali1535_setup(struct pci_dev *dev)
*/
pci_read_config_byte(dev, SMBREV, &temp);
dev_dbg(&dev->dev, "SMBREV = 0x%X\n", temp);
- dev_dbg(&dev->dev, "ALI1535_smba = 0x%X\n", ali1535_smba);
+ dev_dbg(&dev->dev, "ALI1535_smba = 0x%lx\n", ali1535_smba);
- retval = 0;
-exit:
- return retval;
+ return 0;
exit_free:
release_region(ali1535_smba, ALI1535_SMB_IOSIZE);
+exit:
return retval;
}
@@ -498,7 +514,7 @@ static int __devinit ali1535_probe(struct pci_dev *dev, const struct pci_device_
ali1535_adapter.dev.parent = &dev->dev;
snprintf(ali1535_adapter.name, sizeof(ali1535_adapter.name),
- "SMBus ALI1535 adapter at %04x", ali1535_smba);
+ "SMBus ALI1535 adapter at %04x", ali1535_offset);
return i2c_add_adapter(&ali1535_adapter);
}
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index a409cfc..47ae009 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev)
ali1563_shutdown(dev);
}
-static const struct pci_device_id ali1563_id_table[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(ali1563_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
{},
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 83e8a60..087ea9c 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id ali15x3_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(ali15x3_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 03bcd07..eb778bf1 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@ static const char* chipname[] = {
"nVidia nForce", "AMD8111",
};
-static const struct pci_device_id amd756_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd756_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 6b6a6b1..e5ac53b 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -415,7 +415,7 @@ static const struct i2c_algorithm smbus_algorithm = {
};
-static const struct pci_device_id amd8111_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd8111_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 305c075..1679dee 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -295,9 +295,6 @@ static int at91_i2c_resume(struct platform_device *pdev)
#define at91_i2c_resume NULL
#endif
-/* work with "modprobe at91_i2c" from hotplugging or coldplugging */
-MODULE_ALIAS("platform:at91_i2c");
-
static struct platform_driver at91_i2c_driver = {
.probe = at91_i2c_probe,
.remove = __devexit_p(at91_i2c_remove),
@@ -309,19 +306,9 @@ static struct platform_driver at91_i2c_driver = {
},
};
-static int __init at91_i2c_init(void)
-{
- return platform_driver_register(&at91_i2c_driver);
-}
-
-static void __exit at91_i2c_exit(void)
-{
- platform_driver_unregister(&at91_i2c_driver);
-}
-
-module_init(at91_i2c_init);
-module_exit(at91_i2c_exit);
+module_platform_driver(at91_i2c_driver);
MODULE_AUTHOR("Rick Bronson");
MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:at91_i2c");
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index f314d7f..582d616 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -426,20 +426,9 @@ static struct platform_driver au1xpsc_smbus_driver = {
.remove = __devexit_p(i2c_au1550_remove),
};
-static int __init i2c_au1550_init(void)
-{
- return platform_driver_register(&au1xpsc_smbus_driver);
-}
-
-static void __exit i2c_au1550_exit(void)
-{
- platform_driver_unregister(&au1xpsc_smbus_driver);
-}
+module_platform_driver(au1xpsc_smbus_driver);
MODULE_AUTHOR("Dan Malek, Embedded Edge, LLC.");
MODULE_DESCRIPTION("SMBus adapter Alchemy pb1550");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:au1xpsc_smbus");
-
-module_init (i2c_au1550_init);
-module_exit (i2c_au1550_exit);
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index b1d9cd2..c1e1096 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -724,18 +724,7 @@ static struct platform_driver cpm_i2c_driver = {
},
};
-static int __init cpm_i2c_init(void)
-{
- return platform_driver_register(&cpm_i2c_driver);
-}
-
-static void __exit cpm_i2c_exit(void)
-{
- platform_driver_unregister(&cpm_i2c_driver);
-}
-
-module_init(cpm_i2c_init);
-module_exit(cpm_i2c_exit);
+module_platform_driver(cpm_i2c_driver);
MODULE_AUTHOR("Jochen Friedrich <jochen@scram.de>");
MODULE_DESCRIPTION("I2C-Bus adapter routines for CPM boards");
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 9e89e73..37f4211 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -349,7 +349,7 @@ static void __devexit i2c_dw_pci_remove(struct pci_dev *pdev)
/* work with hotplug and coldplug */
MODULE_ALIAS("i2c_designware-pci");
-DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
+static DEFINE_PCI_DEVICE_TABLE(i2_designware_pci_ids) = {
/* Moorestown */
{ PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
{ PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 2d3657a..5244c47 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/of_i2c.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
@@ -137,6 +138,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
sizeof(adap->name));
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
@@ -144,6 +146,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_free_irq;
}
+ of_i2c_register_devices(adap);
return 0;
@@ -187,6 +190,14 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_OF
+static const struct of_device_id dw_i2c_of_match[] = {
+ { .compatible = "snps,designware-i2c", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
+#endif
+
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
@@ -195,6 +206,7 @@ static struct platform_driver dw_i2c_driver = {
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(dw_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
index 7636671..7eb19a5 100644
--- a/drivers/i2c/busses/i2c-diolan-u2c.c
+++ b/drivers/i2c/busses/i2c-diolan-u2c.c
@@ -515,20 +515,7 @@ static struct usb_driver diolan_u2c_driver = {
.id_table = diolan_u2c_table,
};
-static int __init diolan_u2c_init(void)
-{
- /* register this driver with the USB subsystem */
- return usb_register(&diolan_u2c_driver);
-}
-
-static void __exit diolan_u2c_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&diolan_u2c_driver);
-}
-
-module_init(diolan_u2c_init);
-module_exit(diolan_u2c_exit);
+module_usb_driver(diolan_u2c_driver);
MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
MODULE_DESCRIPTION(DRIVER_NAME " driver");
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 18936ac..ca88776 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -136,7 +136,8 @@
/*
Set the number of I2C instance max
Intel EG20T PCH : 1ch
-OKI SEMICONDUCTOR ML7213 IOH : 2ch
+LAPIS Semiconductor ML7213 IOH : 2ch
+LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_I2C_MAX_DEV 2
@@ -180,15 +181,17 @@ static int pch_clk = 50000; /* specifies I2C clock speed in KHz */
static wait_queue_head_t pch_event;
static DEFINE_MUTEX(pch_mutex);
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_I2C 0x802D
#define PCI_DEVICE_ID_ML7223_I2C 0x8010
+#define PCI_DEVICE_ID_ML7831_I2C 0x8817
-static struct pci_device_id __devinitdata pch_pcidev_id[] = {
+static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
{0,}
};
@@ -243,7 +246,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
if (pch_clk > PCH_MAX_CLK)
pch_clk = 62500;
- pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+ pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
/* Set transfer speed in I2CBC */
iowrite32(pch_i2cbc, p + PCH_I2CBC);
@@ -918,7 +921,9 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
pch_i2c_init(&adap_info->pch_data[i]);
- ret = i2c_add_adapter(pch_adap);
+
+ pch_adap->nr = i;
+ ret = i2c_add_numbered_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
goto err_add_adapter;
@@ -1058,8 +1063,8 @@ static void __exit pch_pci_exit(void)
}
module_exit(pch_pci_exit);
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
diff --git a/drivers/i2c/busses/i2c-highlander.c b/drivers/i2c/busses/i2c-highlander.c
index 63bb1cc..19515df 100644
--- a/drivers/i2c/busses/i2c-highlander.c
+++ b/drivers/i2c/busses/i2c-highlander.c
@@ -52,7 +52,7 @@ struct highlander_i2c_dev {
size_t buf_len;
};
-static int iic_force_poll, iic_force_normal;
+static bool iic_force_poll, iic_force_normal;
static int iic_timeout = 1000, iic_read_delay;
static inline void highlander_i2c_irq_enable(struct highlander_i2c_dev *dev)
@@ -468,18 +468,7 @@ static struct platform_driver highlander_i2c_driver = {
.remove = __devexit_p(highlander_i2c_remove),
};
-static int __init highlander_i2c_init(void)
-{
- return platform_driver_register(&highlander_i2c_driver);
-}
-
-static void __exit highlander_i2c_exit(void)
-{
- platform_driver_unregister(&highlander_i2c_driver);
-}
-
-module_init(highlander_i2c_init);
-module_exit(highlander_i2c_exit);
+module_platform_driver(highlander_i2c_driver);
MODULE_AUTHOR("Paul Mundt");
MODULE_DESCRIPTION("Renesas Highlander FPGA I2C/SMBus adapter");
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 9ff1695..c527de1 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = {
.algo_data = &hydra_bit_data,
};
-static const struct pci_device_id hydra_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(hydra_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index ab26840d..5d2e281 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -609,7 +609,7 @@ static const struct i2c_algorithm smbus_algorithm = {
.functionality = i801_func,
};
-static const struct pci_device_id i801_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(i801_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c
index 3c110fb..806e225 100644
--- a/drivers/i2c/busses/i2c-ibm_iic.c
+++ b/drivers/i2c/busses/i2c-ibm_iic.c
@@ -51,11 +51,11 @@
MODULE_DESCRIPTION("IBM IIC driver v" DRIVER_VERSION);
MODULE_LICENSE("GPL");
-static int iic_force_poll;
+static bool iic_force_poll;
module_param(iic_force_poll, bool, 0);
MODULE_PARM_DESC(iic_force_poll, "Force polling mode");
-static int iic_force_fast;
+static bool iic_force_fast;
module_param(iic_force_fast, bool, 0);
MODULE_PARM_DESC(iic_force_fast, "Force fast mode (400 kHz)");
@@ -815,15 +815,4 @@ static struct platform_driver ibm_iic_driver = {
.remove = __devexit_p(iic_remove),
};
-static int __init iic_init(void)
-{
- return platform_driver_register(&ibm_iic_driver);
-}
-
-static void __exit iic_exit(void)
-{
- platform_driver_unregister(&ibm_iic_driver);
-}
-
-module_init(iic_init);
-module_exit(iic_exit);
+module_platform_driver(ibm_iic_driver);
diff --git a/drivers/i2c/busses/i2c-intel-mid.c b/drivers/i2c/busses/i2c-intel-mid.c
index e828ac8..365bad5 100644
--- a/drivers/i2c/busses/i2c-intel-mid.c
+++ b/drivers/i2c/busses/i2c-intel-mid.c
@@ -1093,7 +1093,7 @@ static void __devexit intel_mid_i2c_remove(struct pci_dev *dev)
pci_release_region(dev, 0);
}
-static struct pci_device_id intel_mid_i2c_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(intel_mid_i2c_ids) = {
/* Moorestown */
{ PCI_VDEVICE(INTEL, 0x0802), 0 },
{ PCI_VDEVICE(INTEL, 0x0803), 1 },
diff --git a/drivers/i2c/busses/i2c-iop3xx.c b/drivers/i2c/busses/i2c-iop3xx.c
index f09c931..93f147a 100644
--- a/drivers/i2c/busses/i2c-iop3xx.c
+++ b/drivers/i2c/busses/i2c-iop3xx.c
@@ -523,21 +523,7 @@ static struct platform_driver iop3xx_i2c_driver = {
},
};
-static int __init
-i2c_iop3xx_init (void)
-{
- return platform_driver_register(&iop3xx_i2c_driver);
-}
-
-static void __exit
-i2c_iop3xx_exit (void)
-{
- platform_driver_unregister(&iop3xx_i2c_driver);
- return;
-}
-
-module_init (i2c_iop3xx_init);
-module_exit (i2c_iop3xx_exit);
+module_platform_driver(iop3xx_i2c_driver);
MODULE_AUTHOR("D-TACQ Solutions Ltd <www.d-tacq.com>");
MODULE_DESCRIPTION("IOP3xx iic algorithm and driver");
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 0682f8f..6561d27 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -306,20 +306,9 @@ static struct platform_driver smbus_sch_driver = {
.remove = __devexit_p(smbus_sch_remove),
};
-static int __init i2c_sch_init(void)
-{
- return platform_driver_register(&smbus_sch_driver);
-}
-
-static void __exit i2c_sch_exit(void)
-{
- platform_driver_unregister(&smbus_sch_driver);
-}
+module_platform_driver(smbus_sch_driver);
MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com>");
MODULE_DESCRIPTION("Intel SCH SMBus driver");
MODULE_LICENSE("GPL");
-
-module_init(i2c_sch_init);
-module_exit(i2c_sch_exit);
MODULE_ALIAS("platform:isch_smbus");
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
index c01e951..5d263f90 100644
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ b/drivers/i2c/busses/i2c-ixp2000.c
@@ -148,18 +148,7 @@ static struct platform_driver ixp2000_i2c_driver = {
},
};
-static int __init ixp2000_i2c_init(void)
-{
- return platform_driver_register(&ixp2000_i2c_driver);
-}
-
-static void __exit ixp2000_i2c_exit(void)
-{
- platform_driver_unregister(&ixp2000_i2c_driver);
-}
-
-module_init(ixp2000_i2c_init);
-module_exit(ixp2000_i2c_exit);
+module_platform_driver(ixp2000_i2c_driver);
MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 107397a..a8ebb84 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -715,18 +715,7 @@ static struct platform_driver mpc_i2c_driver = {
},
};
-static int __init fsl_i2c_init(void)
-{
- return platform_driver_register(&mpc_i2c_driver);
-}
-
-static void __exit fsl_i2c_exit(void)
-{
- platform_driver_unregister(&mpc_i2c_driver);
-}
-
-module_init(fsl_i2c_init);
-module_exit(fsl_i2c_exit);
+module_platform_driver(mpc_i2c_driver);
MODULE_AUTHOR("Adrian Cox <adrian@humboldt.co.uk>");
MODULE_DESCRIPTION("I2C-Bus adapter for MPC107 bridge and "
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index a9941c6..4f44a33 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -611,20 +611,7 @@ static struct platform_driver mv64xxx_i2c_driver = {
},
};
-static int __init
-mv64xxx_i2c_init(void)
-{
- return platform_driver_register(&mv64xxx_i2c_driver);
-}
-
-static void __exit
-mv64xxx_i2c_exit(void)
-{
- platform_driver_unregister(&mv64xxx_i2c_driver);
-}
-
-module_init(mv64xxx_i2c_init);
-module_exit(mv64xxx_i2c_exit);
+module_platform_driver(mv64xxx_i2c_driver);
MODULE_AUTHOR("Mark A. Greer <mgreer@mvista.com>");
MODULE_DESCRIPTION("Marvell mv64xxx host bridge i2c ctlr driver");
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7e78f7c..3d471d5 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -72,6 +72,7 @@
#define MXS_I2C_QUEUESTAT (0x70)
#define MXS_I2C_QUEUESTAT_RD_QUEUE_EMPTY 0x00002000
+#define MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK 0x0000001F
#define MXS_I2C_QUEUECMD (0x80)
@@ -219,14 +220,14 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg,
int ret;
int flags;
- init_completion(&i2c->cmd_complete);
-
dev_dbg(i2c->dev, "addr: 0x%04x, len: %d, flags: 0x%x, stop: %d\n",
msg->addr, msg->len, msg->flags, stop);
if (msg->len == 0)
return -EINVAL;
+ init_completion(&i2c->cmd_complete);
+
flags = stop ? MXS_I2C_CTRL0_POST_SEND_STOP : 0;
if (msg->flags & I2C_M_RD)
@@ -286,6 +287,7 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
{
struct mxs_i2c_dev *i2c = dev_id;
u32 stat = readl(i2c->regs + MXS_I2C_CTRL1) & MXS_I2C_IRQ_MASK;
+ bool is_last_cmd;
if (!stat)
return IRQ_NONE;
@@ -300,9 +302,14 @@ static irqreturn_t mxs_i2c_isr(int this_irq, void *dev_id)
else
i2c->cmd_err = 0;
- complete(&i2c->cmd_complete);
+ is_last_cmd = (readl(i2c->regs + MXS_I2C_QUEUESTAT) &
+ MXS_I2C_QUEUESTAT_WRITE_QUEUE_CNT_MASK) == 0;
+
+ if (is_last_cmd || i2c->cmd_err)
+ complete(&i2c->cmd_complete);
writel(stat, i2c->regs + MXS_I2C_CTRL1_CLR);
+
return IRQ_HANDLED;
}
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ff1e127..43a96a1 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -309,7 +309,7 @@ static struct i2c_algorithm smbus_algorithm = {
};
-static const struct pci_device_id nforce2_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(nforce2_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
@@ -356,7 +356,7 @@ static int __devinit nforce2_probe_smb (struct pci_dev *dev, int bar,
error = acpi_check_region(smbus->base, smbus->size,
nforce2_driver.name);
if (error)
- return -1;
+ return error;
if (!request_region(smbus->base, smbus->size, nforce2_driver.name)) {
dev_err(&smbus->adapter.dev, "Error requesting region %02x .. %02X for %s\n",
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 1b46a9d..18068de 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -394,9 +394,6 @@ static struct of_device_id ocores_i2c_match[] = {
};
MODULE_DEVICE_TABLE(of, ocores_i2c_match);
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:ocores-i2c");
-
static struct platform_driver ocores_i2c_driver = {
.probe = ocores_i2c_probe,
.remove = __devexit_p(ocores_i2c_remove),
@@ -409,19 +406,9 @@ static struct platform_driver ocores_i2c_driver = {
},
};
-static int __init ocores_i2c_init(void)
-{
- return platform_driver_register(&ocores_i2c_driver);
-}
-
-static void __exit ocores_i2c_exit(void)
-{
- platform_driver_unregister(&ocores_i2c_driver);
-}
-
-module_init(ocores_i2c_init);
-module_exit(ocores_i2c_exit);
+module_platform_driver(ocores_i2c_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
MODULE_DESCRIPTION("OpenCores I2C bus driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ocores-i2c");
diff --git a/drivers/i2c/busses/i2c-octeon.c b/drivers/i2c/busses/i2c-octeon.c
index 56dbe54..ee139a5 100644
--- a/drivers/i2c/busses/i2c-octeon.c
+++ b/drivers/i2c/busses/i2c-octeon.c
@@ -629,24 +629,10 @@ static struct platform_driver octeon_i2c_driver = {
},
};
-static int __init octeon_i2c_init(void)
-{
- int rv;
-
- rv = platform_driver_register(&octeon_i2c_driver);
- return rv;
-}
-
-static void __exit octeon_i2c_exit(void)
-{
- platform_driver_unregister(&octeon_i2c_driver);
-}
+module_platform_driver(octeon_i2c_driver);
MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>");
MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_NAME);
-
-module_init(octeon_i2c_init);
-module_exit(octeon_i2c_exit);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index fa23faa..801df60 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -37,6 +37,9 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
+#include <linux/of_device.h>
#include <linux/slab.h>
#include <linux/i2c-omap.h>
#include <linux/pm_runtime.h>
@@ -182,7 +185,9 @@ struct omap_i2c_dev {
u32 latency; /* maximum mpu wkup latency */
void (*set_mpu_wkup_lat)(struct device *dev,
long latency);
- u32 speed; /* Speed of bus in Khz */
+ u32 speed; /* Speed of bus in kHz */
+ u32 dtrev; /* extra revision from DT */
+ u32 flags;
u16 cmd_err;
u8 *buf;
u8 *regs;
@@ -235,7 +240,7 @@ static const u8 reg_map_ip_v2[] = {
[OMAP_I2C_BUF_REG] = 0x94,
[OMAP_I2C_CNT_REG] = 0x98,
[OMAP_I2C_DATA_REG] = 0x9c,
- [OMAP_I2C_SYSC_REG] = 0x20,
+ [OMAP_I2C_SYSC_REG] = 0x10,
[OMAP_I2C_CON_REG] = 0xa4,
[OMAP_I2C_OA_REG] = 0xa8,
[OMAP_I2C_SA_REG] = 0xac,
@@ -266,11 +271,7 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
static void omap_i2c_unidle(struct omap_i2c_dev *dev)
{
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
-
- if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -291,13 +292,10 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
static void omap_i2c_idle(struct omap_i2c_dev *dev)
{
- struct omap_i2c_bus_platform_data *pdata;
u16 iv;
- pdata = dev->dev->platform_data;
-
dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
- if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
else
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -320,9 +318,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
unsigned long timeout;
unsigned long internal_clk = 0;
struct clk *fclk;
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
/* Disable I2C controller before soft reset */
@@ -373,7 +368,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
}
omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
- if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+ if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
/*
* The I2C functional clock is the armxor_ck, so there's
* no need to get "armxor_ck" separately. Now, if OMAP2420
@@ -397,7 +392,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
psc = fclk_rate / 12000000;
}
- if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+ if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
/*
* HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -406,7 +401,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
* The filter is iclk (fclk for HS) period.
*/
if (dev->speed > 400 ||
- pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+ dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
internal_clk = 19200;
else if (dev->speed > 100)
internal_clk = 9600;
@@ -475,7 +470,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
dev->errata = 0;
- if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+ if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
dev->errata |= I2C_OMAP_ERRATA_I207;
/* Enable interrupts */
@@ -484,7 +479,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
OMAP_I2C_IE_AL) | ((dev->fifo_size) ?
(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
- if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+ if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
dev->pscstate = psc;
dev->scllstate = scll;
dev->sclhstate = sclh;
@@ -804,9 +799,6 @@ omap_i2c_isr(int this_irq, void *dev_id)
u16 bits;
u16 stat, w;
int err, count = 0;
- struct omap_i2c_bus_platform_data *pdata;
-
- pdata = dev->dev->platform_data;
if (pm_runtime_suspended(dev->dev))
return IRQ_NONE;
@@ -830,11 +822,9 @@ complete:
~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
- if (stat & OMAP_I2C_STAT_NACK) {
+ if (stat & OMAP_I2C_STAT_NACK)
err |= OMAP_I2C_STAT_NACK;
- omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
- OMAP_I2C_CON_STP);
- }
+
if (stat & OMAP_I2C_STAT_AL) {
dev_err(dev->dev, "Arbitration lost\n");
err |= OMAP_I2C_STAT_AL;
@@ -875,7 +865,7 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (pdata->flags &
+ if (dev->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
*dev->buf++ = w >> 8;
@@ -918,7 +908,7 @@ complete:
* Data reg in 2430, omap3 and
* omap4 is 8 bit wide
*/
- if (pdata->flags &
+ if (dev->flags &
OMAP_I2C_FLAG_16BIT_DATA_REG) {
if (dev->buf_len) {
w |= *dev->buf++ << 8;
@@ -965,6 +955,32 @@ static const struct i2c_algorithm omap_i2c_algo = {
.functionality = omap_i2c_func,
};
+#ifdef CONFIG_OF
+static struct omap_i2c_bus_platform_data omap3_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_1,
+ .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
+ OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
+ OMAP_I2C_FLAG_BUS_SHIFT_2,
+};
+
+static struct omap_i2c_bus_platform_data omap4_pdata = {
+ .rev = OMAP_I2C_IP_VERSION_2,
+};
+
+static const struct of_device_id omap_i2c_of_match[] = {
+ {
+ .compatible = "ti,omap4-i2c",
+ .data = &omap4_pdata,
+ },
+ {
+ .compatible = "ti,omap3-i2c",
+ .data = &omap3_pdata,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
+#endif
+
static int __devinit
omap_i2c_probe(struct platform_device *pdev)
{
@@ -972,9 +988,10 @@ omap_i2c_probe(struct platform_device *pdev)
struct i2c_adapter *adap;
struct resource *mem, *irq, *ioarea;
struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
irq_handler_t isr;
int r;
- u32 speed = 0;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1001,15 +1018,24 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_release_region;
}
- if (pdata != NULL) {
- speed = pdata->clkrate;
+ match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
+ if (match) {
+ u32 freq = 100000; /* default to 100000 Hz */
+
+ pdata = match->data;
+ dev->dtrev = pdata->rev;
+ dev->flags = pdata->flags;
+
+ of_property_read_u32(node, "clock-frequency", &freq);
+ /* convert DT freq value in Hz into kHz for speed */
+ dev->speed = freq / 1000;
+ } else if (pdata != NULL) {
+ dev->speed = pdata->clkrate;
+ dev->flags = pdata->flags;
dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
- } else {
- speed = 100; /* Default speed */
- dev->set_mpu_wkup_lat = NULL;
+ dev->dtrev = pdata->rev;
}
- dev->speed = speed;
dev->dev = &pdev->dev;
dev->irq = irq->start;
dev->base = ioremap(mem->start, resource_size(mem));
@@ -1020,9 +1046,9 @@ omap_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+ dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
- if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+ if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
dev->regs = (u8 *)reg_map_ip_v2;
else
dev->regs = (u8 *)reg_map_ip_v1;
@@ -1035,7 +1061,7 @@ omap_i2c_probe(struct platform_device *pdev)
if (dev->rev <= OMAP_I2C_REV_ON_3430)
dev->errata |= I2C_OMAP3_1P153;
- if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+ if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
u16 s;
/* Set up the fifo size - Get total size */
@@ -1058,7 +1084,7 @@ omap_i2c_probe(struct platform_device *pdev)
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /
- (1000 * speed / 8);
+ (1000 * dev->speed / 8);
}
/* reset ASAP, clearing any IRQs */
@@ -1074,7 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
}
dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
- pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+ dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
pm_runtime_put(dev->dev);
@@ -1085,6 +1111,7 @@ omap_i2c_probe(struct platform_device *pdev)
strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
adap->algo = &omap_i2c_algo;
adap->dev.parent = &pdev->dev;
+ adap->dev.of_node = pdev->dev.of_node;
/* i2c device drivers may be active on return from add_adapter() */
adap->nr = pdev->id;
@@ -1094,6 +1121,8 @@ omap_i2c_probe(struct platform_device *pdev)
goto err_free_irq;
}
+ of_i2c_register_devices(adap);
+
return 0;
err_free_irq:
@@ -1166,6 +1195,7 @@ static struct platform_driver omap_i2c_driver = {
.name = "omap_i2c",
.owner = THIS_MODULE,
.pm = OMAP_I2C_PM_OPS,
+ .of_match_table = of_match_ptr(omap_i2c_of_match),
},
};
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 837b8c1..eaaea73 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -401,7 +401,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev)
kfree(smbus);
}
-static const struct pci_device_id pasemi_smb_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_smb_ids) = {
{ PCI_DEVICE(0x1959, 0xa003) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index ace6799..2adbf1a 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -286,20 +286,8 @@ static struct platform_driver i2c_pca_pf_driver = {
},
};
-static int __init i2c_pca_pf_init(void)
-{
- return platform_driver_register(&i2c_pca_pf_driver);
-}
-
-static void __exit i2c_pca_pf_exit(void)
-{
- platform_driver_unregister(&i2c_pca_pf_driver);
-}
+module_platform_driver(i2c_pca_pf_driver);
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
MODULE_DESCRIPTION("I2C-PCA9564/PCA9665 platform driver");
MODULE_LICENSE("GPL");
-
-module_init(i2c_pca_pf_init);
-module_exit(i2c_pca_pf_exit);
-
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 6d14ac2..c14d48d 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id piix4_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 127051b..07b7447 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -627,9 +627,6 @@ static struct i2c_adapter pmcmsptwi_adapter = {
.name = DRV_NAME,
};
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:" DRV_NAME);
-
static struct platform_driver pmcmsptwi_driver = {
.probe = pmcmsptwi_probe,
.remove = __devexit_p(pmcmsptwi_remove),
@@ -639,18 +636,8 @@ static struct platform_driver pmcmsptwi_driver = {
},
};
-static int __init pmcmsptwi_init(void)
-{
- return platform_driver_register(&pmcmsptwi_driver);
-}
-
-static void __exit pmcmsptwi_exit(void)
-{
- platform_driver_unregister(&pmcmsptwi_driver);
-}
+module_platform_driver(pmcmsptwi_driver);
MODULE_DESCRIPTION("PMC MSP TWI/SMBus/I2C driver");
MODULE_LICENSE("GPL");
-
-module_init(pmcmsptwi_init);
-module_exit(pmcmsptwi_exit);
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index b289ec9..7b397c6 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -312,10 +312,6 @@ static int __devinit i2c_powermac_probe(struct platform_device *dev)
return rc;
}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:i2c-powermac");
-
static struct platform_driver i2c_powermac_driver = {
.probe = i2c_powermac_probe,
.remove = __devexit_p(i2c_powermac_remove),
@@ -325,17 +321,6 @@ static struct platform_driver i2c_powermac_driver = {
},
};
-static int __init i2c_powermac_init(void)
-{
- platform_driver_register(&i2c_powermac_driver);
- return 0;
-}
+module_platform_driver(i2c_powermac_driver);
-
-static void __exit i2c_powermac_cleanup(void)
-{
- platform_driver_unregister(&i2c_powermac_driver);
-}
-
-module_init(i2c_powermac_init);
-module_exit(i2c_powermac_cleanup);
+MODULE_ALIAS("platform:i2c-powermac");
diff --git a/drivers/i2c/busses/i2c-puv3.c b/drivers/i2c/busses/i2c-puv3.c
index fac6739..93709fb 100644
--- a/drivers/i2c/busses/i2c-puv3.c
+++ b/drivers/i2c/busses/i2c-puv3.c
@@ -276,8 +276,6 @@ static int puv3_i2c_resume(struct platform_device *dev)
#define puv3_i2c_resume NULL
#endif
-MODULE_ALIAS("platform:puv3_i2c");
-
static struct platform_driver puv3_i2c_driver = {
.probe = puv3_i2c_probe,
.remove = __devexit_p(puv3_i2c_remove),
@@ -289,18 +287,8 @@ static struct platform_driver puv3_i2c_driver = {
}
};
-static int __init puv3_i2c_init(void)
-{
- return platform_driver_register(&puv3_i2c_driver);
-}
-
-static void __exit puv3_i2c_exit(void)
-{
- platform_driver_unregister(&puv3_i2c_driver);
-}
-
-module_init(puv3_i2c_init);
-module_exit(puv3_i2c_exit);
+module_platform_driver(puv3_i2c_driver);
MODULE_DESCRIPTION("PKUnity v3 I2C driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:puv3_i2c");
diff --git a/drivers/i2c/busses/i2c-pxa-pci.c b/drivers/i2c/busses/i2c-pxa-pci.c
index 632e088..a058179 100644
--- a/drivers/i2c/busses/i2c-pxa-pci.c
+++ b/drivers/i2c/busses/i2c-pxa-pci.c
@@ -150,7 +150,7 @@ static void __devexit ce4100_i2c_remove(struct pci_dev *dev)
kfree(sds);
}
-static struct pci_device_id ce4100_i2c_devices[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ce4100_i2c_devices) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)},
{ },
};
diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c
index a67132b..c0c9dff 100644
--- a/drivers/i2c/busses/i2c-sh7760.c
+++ b/drivers/i2c/busses/i2c-sh7760.c
@@ -560,18 +560,7 @@ static struct platform_driver sh7760_i2c_drv = {
.remove = __devexit_p(sh7760_i2c_remove),
};
-static int __init sh7760_i2c_init(void)
-{
- return platform_driver_register(&sh7760_i2c_drv);
-}
-
-static void __exit sh7760_i2c_exit(void)
-{
- platform_driver_unregister(&sh7760_i2c_drv);
-}
-
-module_init(sh7760_i2c_init);
-module_exit(sh7760_i2c_exit);
+module_platform_driver(sh7760_i2c_drv);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SH7760 I2C bus driver");
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 2fc08fb..4fc87e7 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -156,12 +156,8 @@ static int simtec_i2c_remove(struct platform_device *dev)
return 0;
}
-
/* device driver */
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:simtec-i2c");
-
static struct platform_driver simtec_i2c_driver = {
.driver = {
.name = "simtec-i2c",
@@ -171,19 +167,9 @@ static struct platform_driver simtec_i2c_driver = {
.remove = simtec_i2c_remove,
};
-static int __init i2c_adap_simtec_init(void)
-{
- return platform_driver_register(&simtec_i2c_driver);
-}
-
-static void __exit i2c_adap_simtec_exit(void)
-{
- platform_driver_unregister(&simtec_i2c_driver);
-}
-
-module_init(i2c_adap_simtec_init);
-module_exit(i2c_adap_simtec_exit);
+module_platform_driver(simtec_i2c_driver);
MODULE_DESCRIPTION("Simtec Generic I2C Bus driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:simtec-i2c");
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 4375866..87e5126 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -147,7 +147,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
u16 a;
u8 val;
int *i;
- int retval = -ENODEV;
+ int retval;
/* Look for imposters */
for (i = blacklist; *i != 0; i++) {
@@ -223,7 +223,7 @@ static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev)
error:
release_region(sis5595_base + SMB_INDEX, 2);
- return retval;
+ return -ENODEV;
}
static int sis5595_transaction(struct i2c_adapter *adap)
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id sis5595_ids[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index e6f539e..15cf78f 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -93,8 +93,8 @@
static struct pci_driver sis630_driver;
/* insmod parameters */
-static int high_clock;
-static int force;
+static bool high_clock;
+static bool force;
module_param(high_clock, bool, 0);
MODULE_PARM_DESC(high_clock, "Set Host Master Clock to 56KHz (default 14KHz).");
module_param(force, bool, 0);
@@ -393,7 +393,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
{
unsigned char b;
struct pci_dev *dummy = NULL;
- int retval = -ENODEV, i;
+ int retval, i;
/* check for supported SiS devices */
for (i=0; supported[i] > 0 ; i++) {
@@ -418,18 +418,21 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
*/
if (pci_read_config_byte(sis630_dev, SIS630_BIOS_CTL_REG,&b)) {
dev_err(&sis630_dev->dev, "Error: Can't read bios ctl reg\n");
+ retval = -ENODEV;
goto exit;
}
/* if ACPI already enabled , do nothing */
if (!(b & 0x80) &&
pci_write_config_byte(sis630_dev, SIS630_BIOS_CTL_REG, b | 0x80)) {
dev_err(&sis630_dev->dev, "Error: Can't enable ACPI\n");
+ retval = -ENODEV;
goto exit;
}
/* Determine the ACPI base address */
if (pci_read_config_word(sis630_dev,SIS630_ACPI_BASE_REG,&acpi_base)) {
dev_err(&sis630_dev->dev, "Error: Can't determine ACPI base address\n");
+ retval = -ENODEV;
goto exit;
}
@@ -445,6 +448,7 @@ static int __devinit sis630_setup(struct pci_dev *sis630_dev)
sis630_driver.name)) {
dev_err(&sis630_dev->dev, "SMBus registers 0x%04x-0x%04x already "
"in use!\n", acpi_base + SMB_STS, acpi_base + SMB_SAA);
+ retval = -EBUSY;
goto exit;
}
@@ -468,7 +472,7 @@ static struct i2c_adapter sis630_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id sis630_ids[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(sis630_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 86837f0..cc5d149 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = {
.algo = &smbus_algorithm,
};
-static const struct pci_device_id sis96x_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(sis96x_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 46b6500..0ab4a95 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -558,7 +558,7 @@ static const struct i2c_algorithm tegra_i2c_algo = {
.functionality = tegra_i2c_func,
};
-static int tegra_i2c_probe(struct platform_device *pdev)
+static int __devinit tegra_i2c_probe(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev;
struct tegra_i2c_platform_data *pdata = pdev->dev.platform_data;
@@ -636,7 +636,10 @@ static int tegra_i2c_probe(struct platform_device *pdev)
i2c_dev->bus_clk_rate = be32_to_cpup(prop);
}
- if (pdev->id == 3)
+ if (pdev->dev.of_node)
+ i2c_dev->is_dvc = of_device_is_compatible(pdev->dev.of_node,
+ "nvidia,tegra20-i2c-dvc");
+ else if (pdev->id == 3)
i2c_dev->is_dvc = 1;
init_completion(&i2c_dev->msg_complete);
@@ -690,7 +693,7 @@ err_iounmap:
return ret;
}
-static int tegra_i2c_remove(struct platform_device *pdev)
+static int __devexit tegra_i2c_remove(struct platform_device *pdev)
{
struct tegra_i2c_dev *i2c_dev = platform_get_drvdata(pdev);
i2c_del_adapter(&i2c_dev->adapter);
@@ -742,6 +745,7 @@ static int tegra_i2c_resume(struct platform_device *pdev)
/* Match table for of_platform binding */
static const struct of_device_id tegra_i2c_of_match[] __devinitconst = {
{ .compatible = "nvidia,tegra20-i2c", },
+ { .compatible = "nvidia,tegra20-i2c-dvc", },
{},
};
MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
@@ -751,7 +755,7 @@ MODULE_DEVICE_TABLE(of, tegra_i2c_of_match);
static struct platform_driver tegra_i2c_driver = {
.probe = tegra_i2c_probe,
- .remove = tegra_i2c_remove,
+ .remove = __devexit_p(tegra_i2c_remove),
#ifdef CONFIG_PM
.suspend = tegra_i2c_suspend,
.resume = tegra_i2c_resume,
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index d03b040..f07307f 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -262,20 +262,7 @@ static struct usb_driver i2c_tiny_usb_driver = {
.id_table = i2c_tiny_usb_table,
};
-static int __init usb_i2c_tiny_usb_init(void)
-{
- /* register this driver with the USB subsystem */
- return usb_register(&i2c_tiny_usb_driver);
-}
-
-static void __exit usb_i2c_tiny_usb_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&i2c_tiny_usb_driver);
-}
-
-module_init(usb_i2c_tiny_usb_init);
-module_exit(usb_i2c_tiny_usb_exit);
+module_usb_driver(i2c_tiny_usb_driver);
/* ----- end of usb layer ------------------------------------------------ */
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 7799fe5..713d31a 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = {
};
-static const struct pci_device_id vt586b_ids[] __devinitconst = {
+static DEFINE_PCI_DEVICE_TABLE(vt586b_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 0b012f1..333011c 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -91,7 +91,7 @@ static unsigned short SMBHSTCFG = 0xD2;
/* If force is set to anything different from 0, we forcibly enable the
VT596. DANGEROUS! */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Forcibly enable the SMBus. DANGEROUS!");
@@ -324,7 +324,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
unsigned char temp;
- int error = -ENODEV;
+ int error;
/* Determine the address of the SMBus areas */
if (force_addr) {
@@ -390,6 +390,7 @@ found:
dev_err(&pdev->dev, "SMBUS: Error: Host SMBus "
"controller not enabled! - upgrade BIOS or "
"use force=1\n");
+ error = -ENODEV;
goto release_region;
}
}
@@ -422,9 +423,11 @@ found:
"SMBus Via Pro adapter at %04x", vt596_smba);
vt596_pdev = pci_dev_get(pdev);
- if (i2c_add_adapter(&vt596_adapter)) {
+ error = i2c_add_adapter(&vt596_adapter);
+ if (error) {
pci_dev_put(vt596_pdev);
vt596_pdev = NULL;
+ goto release_region;
}
/* Always return failure here. This is to allow other drivers to bind
@@ -438,7 +441,7 @@ release_region:
return error;
}
-static const struct pci_device_id vt596_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(vt596_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
.driver_data = SMBBA1 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 4bb68f3..2bded76 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -426,7 +426,7 @@ static void xiic_process(struct xiic_i2c *i2c)
xiic_wakeup(i2c, STATE_ERROR);
} else if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
- /* Transmit register/FIFO is empty or empty */
+ /* Transmit register/FIFO is empty or ½ empty */
clr = pend &
(XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK);
@@ -795,10 +795,6 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
return 0;
}
-
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:"DRIVER_NAME);
-
static struct platform_driver xiic_i2c_driver = {
.probe = xiic_i2c_probe,
.remove = __devexit_p(xiic_i2c_remove),
@@ -808,19 +804,9 @@ static struct platform_driver xiic_i2c_driver = {
},
};
-static int __init xiic_i2c_init(void)
-{
- return platform_driver_register(&xiic_i2c_driver);
-}
-
-static void __exit xiic_i2c_exit(void)
-{
- platform_driver_unregister(&xiic_i2c_driver);
-}
-
-module_init(xiic_i2c_init);
-module_exit(xiic_i2c_exit);
+module_platform_driver(xiic_i2c_driver);
MODULE_AUTHOR("info@mocean-labs.com");
MODULE_DESCRIPTION("Xilinx I2C bus driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:"DRIVER_NAME);
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index 91e349c..2eacb77 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -559,7 +559,7 @@ static struct platform_driver scx200_pci_driver = {
.remove = __devexit_p(scx200_remove),
};
-static const struct pci_device_id scx200_isa[] __initconst = {
+static DEFINE_PCI_DEVICE_TABLE(scx200_isa) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
{ 0, }
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 57a45ce8..10e7f1e 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -251,15 +251,10 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
if (rdwr_arg.nmsgs > I2C_RDRW_IOCTL_MAX_MSGS)
return -EINVAL;
- rdwr_pa = kmalloc(rdwr_arg.nmsgs * sizeof(struct i2c_msg), GFP_KERNEL);
- if (!rdwr_pa)
- return -ENOMEM;
-
- if (copy_from_user(rdwr_pa, rdwr_arg.msgs,
- rdwr_arg.nmsgs * sizeof(struct i2c_msg))) {
- kfree(rdwr_pa);
- return -EFAULT;
- }
+ rdwr_pa = memdup_user(rdwr_arg.msgs,
+ rdwr_arg.nmsgs * sizeof(struct i2c_msg));
+ if (IS_ERR(rdwr_pa))
+ return PTR_ERR(rdwr_pa);
data_ptrs = kmalloc(rdwr_arg.nmsgs * sizeof(u8 __user *), GFP_KERNEL);
if (data_ptrs == NULL) {
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/gpio-i2cmux.c
index 7b6ce62..e5fa695 100644
--- a/drivers/i2c/muxes/gpio-i2cmux.c
+++ b/drivers/i2c/muxes/gpio-i2cmux.c
@@ -165,18 +165,7 @@ static struct platform_driver gpiomux_driver = {
},
};
-static int __init gpiomux_init(void)
-{
- return platform_driver_register(&gpiomux_driver);
-}
-
-static void __exit gpiomux_exit(void)
-{
- platform_driver_unregister(&gpiomux_driver);
-}
-
-module_init(gpiomux_init);
-module_exit(gpiomux_exit);
+module_platform_driver(gpiomux_driver);
MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
diff --git a/drivers/ide/Makefile b/drivers/ide/Makefile
index 7f879b2..af8d016 100644
--- a/drivers/ide/Makefile
+++ b/drivers/ide/Makefile
@@ -116,4 +116,3 @@ obj-$(CONFIG_BLK_DEV_IDE_AU1XXX) += au1xxx-ide.o
obj-$(CONFIG_BLK_DEV_IDE_TX4938) += tx4938ide.o
obj-$(CONFIG_BLK_DEV_IDE_TX4939) += tx4939ide.o
-obj-$(CONFIG_BLK_DEV_IDE_AT91) += at91_ide.o
diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c
index 25b9fe3..d3be99f 100644
--- a/drivers/ide/ali14xx.c
+++ b/drivers/ide/ali14xx.c
@@ -221,7 +221,7 @@ static int __init ali14xx_probe(void)
return ide_legacy_device_add(&ali14xx_port_info, 0);
}
-static int probe_ali14xx;
+static bool probe_ali14xx;
module_param_named(probe, probe_ali14xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for ALI M14xx chipsets");
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
deleted file mode 100644
index 6dede8f..0000000
--- a/drivers/ide/at91_ide.c
+++ /dev/null
@@ -1,366 +0,0 @@
-/*
- * IDE host driver for AT91 (SAM9, CAP9, AT572D940HF) Static Memory Controller
- * with Compact Flash True IDE logic
- *
- * Copyright (c) 2008, 2009 Kelvatek Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/clk.h>
-#include <linux/err.h>
-#include <linux/ide.h>
-#include <linux/platform_device.h>
-
-#include <mach/board.h>
-#include <asm/gpio.h>
-#include <mach/at91sam9_smc.h>
-
-#define DRV_NAME "at91_ide"
-
-#define perr(fmt, args...) pr_err(DRV_NAME ": " fmt, ##args)
-#define pdbg(fmt, args...) pr_debug("%s " fmt, __func__, ##args)
-
-/*
- * Access to IDE device is possible through EBI Static Memory Controller
- * with Compact Flash logic. For details see EBI and SMC datasheet sections
- * of any microcontroller from AT91SAM9 family.
- *
- * Within SMC chip select address space, lines A[23:21] distinguish Compact
- * Flash modes (I/O, common memory, attribute memory, True IDE). IDE modes are:
- * 0x00c0000 - True IDE
- * 0x00e0000 - Alternate True IDE (Alt Status Register)
- *
- * On True IDE mode Task File and Data Register are mapped at the same address.
- * To distinguish access between these two different bus data width is used:
- * 8Bit for Task File, 16Bit for Data I/O.
- *
- * After initialization we do 8/16 bit flipping (changes in SMC MODE register)
- * only inside IDE callback routines which are serialized by IDE layer,
- * so no additional locking needed.
- */
-
-#define TASK_FILE 0x00c00000
-#define ALT_MODE 0x00e00000
-#define REGS_SIZE 8
-
-#define enter_16bit(cs, mode) do { \
- mode = at91_sys_read(AT91_SMC_MODE(cs)); \
- at91_sys_write(AT91_SMC_MODE(cs), mode | AT91_SMC_DBW_16); \
-} while (0)
-
-#define leave_16bit(cs, mode) at91_sys_write(AT91_SMC_MODE(cs), mode);
-
-static void set_smc_timings(const u8 chipselect, const u16 cycle,
- const u16 setup, const u16 pulse,
- const u16 data_float, int use_iordy)
-{
- unsigned long mode = AT91_SMC_READMODE | AT91_SMC_WRITEMODE |
- AT91_SMC_BAT_SELECT;
-
- /* disable or enable waiting for IORDY signal */
- if (use_iordy)
- mode |= AT91_SMC_EXNWMODE_READY;
-
- /* add data float cycles if needed */
- if (data_float)
- mode |= AT91_SMC_TDF_(data_float);
-
- at91_sys_write(AT91_SMC_MODE(chipselect), mode);
-
- /* setup timings in SMC */
- at91_sys_write(AT91_SMC_SETUP(chipselect), AT91_SMC_NWESETUP_(setup) |
- AT91_SMC_NCS_WRSETUP_(0) |
- AT91_SMC_NRDSETUP_(setup) |
- AT91_SMC_NCS_RDSETUP_(0));
- at91_sys_write(AT91_SMC_PULSE(chipselect), AT91_SMC_NWEPULSE_(pulse) |
- AT91_SMC_NCS_WRPULSE_(cycle) |
- AT91_SMC_NRDPULSE_(pulse) |
- AT91_SMC_NCS_RDPULSE_(cycle));
- at91_sys_write(AT91_SMC_CYCLE(chipselect), AT91_SMC_NWECYCLE_(cycle) |
- AT91_SMC_NRDCYCLE_(cycle));
-}
-
-static unsigned int calc_mck_cycles(unsigned int ns, unsigned int mck_hz)
-{
- u64 tmp = ns;
-
- tmp *= mck_hz;
- tmp += 1000*1000*1000 - 1; /* round up */
- do_div(tmp, 1000*1000*1000);
- return (unsigned int) tmp;
-}
-
-static void apply_timings(const u8 chipselect, const u8 pio,
- const struct ide_timing *timing, int use_iordy)
-{
- unsigned int t0, t1, t2, t6z;
- unsigned int cycle, setup, pulse, data_float;
- unsigned int mck_hz;
- struct clk *mck;
-
- /* see table 22 of Compact Flash standard 4.1 for the meaning,
- * we do not stretch active (t2) time, so setup (t1) + hold time (th)
- * assure at least minimal recovery (t2i) time */
- t0 = timing->cyc8b;
- t1 = timing->setup;
- t2 = timing->act8b;
- t6z = (pio < 5) ? 30 : 20;
-
- pdbg("t0=%u t1=%u t2=%u t6z=%u\n", t0, t1, t2, t6z);
-
- mck = clk_get(NULL, "mck");
- BUG_ON(IS_ERR(mck));
- mck_hz = clk_get_rate(mck);
- pdbg("mck_hz=%u\n", mck_hz);
-
- cycle = calc_mck_cycles(t0, mck_hz);
- setup = calc_mck_cycles(t1, mck_hz);
- pulse = calc_mck_cycles(t2, mck_hz);
- data_float = calc_mck_cycles(t6z, mck_hz);
-
- pdbg("cycle=%u setup=%u pulse=%u data_float=%u\n",
- cycle, setup, pulse, data_float);
-
- set_smc_timings(chipselect, cycle, setup, pulse, data_float, use_iordy);
-}
-
-static void at91_ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- u8 chipselect = hwif->select_data;
- unsigned long mode;
-
- pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
-
- len++;
-
- enter_16bit(chipselect, mode);
- readsw((void __iomem *)io_ports->data_addr, buf, len / 2);
- leave_16bit(chipselect, mode);
-}
-
-static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
- void *buf, unsigned int len)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
- u8 chipselect = hwif->select_data;
- unsigned long mode;
-
- pdbg("cs %u buf %p len %d\n", chipselect, buf, len);
-
- enter_16bit(chipselect, mode);
- writesw((void __iomem *)io_ports->data_addr, buf, len / 2);
- leave_16bit(chipselect, mode);
-}
-
-static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
- struct ide_timing *timing;
- u8 chipselect = hwif->select_data;
- int use_iordy = 0;
- const u8 pio = drive->pio_mode - XFER_PIO_0;
-
- pdbg("chipselect %u pio %u\n", chipselect, pio);
-
- timing = ide_timing_find_mode(XFER_PIO_0 + pio);
- BUG_ON(!timing);
-
- if (ide_pio_need_iordy(drive, pio))
- use_iordy = 1;
-
- apply_timings(chipselect, pio, timing, use_iordy);
-}
-
-static const struct ide_tp_ops at91_ide_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = ide_write_devctl,
-
- .dev_select = ide_dev_select,
- .tf_load = ide_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = at91_ide_input_data,
- .output_data = at91_ide_output_data,
-};
-
-static const struct ide_port_ops at91_ide_port_ops = {
- .set_pio_mode = at91_ide_set_pio_mode,
-};
-
-static const struct ide_port_info at91_ide_port_info __initdata = {
- .port_ops = &at91_ide_port_ops,
- .tp_ops = &at91_ide_tp_ops,
- .host_flags = IDE_HFLAG_MMIO | IDE_HFLAG_NO_DMA | IDE_HFLAG_SINGLE |
- IDE_HFLAG_NO_IO_32BIT | IDE_HFLAG_UNMASK_IRQS,
- .pio_mask = ATA_PIO6,
- .chipset = ide_generic,
-};
-
-/*
- * If interrupt is delivered through GPIO, IRQ are triggered on falling
- * and rising edge of signal. Whereas IDE device request interrupt on high
- * level (rising edge in our case). This mean we have fake interrupts, so
- * we need to check interrupt pin and exit instantly from ISR when line
- * is on low level.
- */
-
-irqreturn_t at91_irq_handler(int irq, void *dev_id)
-{
- int ntries = 8;
- int pin_val1, pin_val2;
-
- /* additional deglitch, line can be noisy in badly designed PCB */
- do {
- pin_val1 = at91_get_gpio_value(irq);
- pin_val2 = at91_get_gpio_value(irq);
- } while (pin_val1 != pin_val2 && --ntries > 0);
-
- if (pin_val1 == 0 || ntries <= 0)
- return IRQ_HANDLED;
-
- return ide_intr(irq, dev_id);
-}
-
-static int __init at91_ide_probe(struct platform_device *pdev)
-{
- int ret;
- struct ide_hw hw, *hws[] = { &hw };
- struct ide_host *host;
- struct resource *res;
- unsigned long tf_base = 0, ctl_base = 0;
- struct at91_cf_data *board = pdev->dev.platform_data;
-
- if (!board)
- return -ENODEV;
-
- if (board->det_pin && at91_get_gpio_value(board->det_pin) != 0) {
- perr("no device detected\n");
- return -ENODEV;
- }
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- perr("can't get memory resource\n");
- return -ENODEV;
- }
-
- if (!devm_request_mem_region(&pdev->dev, res->start + TASK_FILE,
- REGS_SIZE, "ide") ||
- !devm_request_mem_region(&pdev->dev, res->start + ALT_MODE,
- REGS_SIZE, "alt")) {
- perr("memory resources in use\n");
- return -EBUSY;
- }
-
- pdbg("chipselect %u irq %u res %08lx\n", board->chipselect,
- board->irq_pin, (unsigned long) res->start);
-
- tf_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + TASK_FILE,
- REGS_SIZE);
- ctl_base = (unsigned long) devm_ioremap(&pdev->dev, res->start + ALT_MODE,
- REGS_SIZE);
- if (!tf_base || !ctl_base) {
- perr("can't map memory regions\n");
- return -EBUSY;
- }
-
- memset(&hw, 0, sizeof(hw));
-
- if (board->flags & AT91_IDE_SWAP_A0_A2) {
- /* workaround for stupid hardware bug */
- hw.io_ports.data_addr = tf_base + 0;
- hw.io_ports.error_addr = tf_base + 4;
- hw.io_ports.nsect_addr = tf_base + 2;
- hw.io_ports.lbal_addr = tf_base + 6;
- hw.io_ports.lbam_addr = tf_base + 1;
- hw.io_ports.lbah_addr = tf_base + 5;
- hw.io_ports.device_addr = tf_base + 3;
- hw.io_ports.command_addr = tf_base + 7;
- hw.io_ports.ctl_addr = ctl_base + 3;
- } else
- ide_std_init_ports(&hw, tf_base, ctl_base + 6);
-
- hw.irq = board->irq_pin;
- hw.dev = &pdev->dev;
-
- host = ide_host_alloc(&at91_ide_port_info, hws, 1);
- if (!host) {
- perr("failed to allocate ide host\n");
- return -ENOMEM;
- }
-
- /* setup Static Memory Controller - PIO 0 as default */
- apply_timings(board->chipselect, 0, ide_timing_find_mode(XFER_PIO_0), 0);
-
- /* with GPIO interrupt we have to do quirks in handler */
- if (board->irq_pin >= PIN_BASE)
- host->irq_handler = at91_irq_handler;
-
- host->ports[0]->select_data = board->chipselect;
-
- ret = ide_host_register(host, &at91_ide_port_info, hws);
- if (ret) {
- perr("failed to register ide host\n");
- goto err_free_host;
- }
- platform_set_drvdata(pdev, host);
- return 0;
-
-err_free_host:
- ide_host_free(host);
- return ret;
-}
-
-static int __exit at91_ide_remove(struct platform_device *pdev)
-{
- struct ide_host *host = platform_get_drvdata(pdev);
-
- ide_host_remove(host);
- return 0;
-}
-
-static struct platform_driver at91_ide_driver = {
- .driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- },
- .remove = __exit_p(at91_ide_remove),
-};
-
-static int __init at91_ide_init(void)
-{
- return platform_driver_probe(&at91_ide_driver, at91_ide_probe);
-}
-
-static void __exit at91_ide_exit(void)
-{
- platform_driver_unregister(&at91_ide_driver);
-}
-
-module_init(at91_ide_init);
-module_exit(at91_ide_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Stanislaw Gruszka <stf_xl@wp.pl>");
-
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index a81bd75..1471730 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -111,7 +111,7 @@
#define DRV_NAME "cmd640"
-static int cmd640_vlb;
+static bool cmd640_vlb;
/*
* CMD640 specific registers definition.
diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c
index 6929f7f..46af474 100644
--- a/drivers/ide/dtc2278.c
+++ b/drivers/ide/dtc2278.c
@@ -130,7 +130,7 @@ static int __init dtc2278_probe(void)
return ide_legacy_device_add(&dtc2278_port_info, 0);
}
-static int probe_dtc2278;
+static bool probe_dtc2278;
module_param_named(probe, probe_dtc2278, bool, 0);
MODULE_PARM_DESC(probe, "probe for DTC2278xx chipsets");
diff --git a/drivers/ide/gayle.c b/drivers/ide/gayle.c
index 3feaa26..51beb85 100644
--- a/drivers/ide/gayle.c
+++ b/drivers/ide/gayle.c
@@ -50,7 +50,7 @@
GAYLE_NUM_HWIFS-1)
#define GAYLE_HAS_CONTROL_REG (!ide_doubler)
-static int ide_doubler;
+static bool ide_doubler;
module_param_named(doubler, ide_doubler, bool, 0);
MODULE_PARM_DESC(doubler, "enable support for IDE doublers");
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index 808bcdc..986f251 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -317,7 +317,7 @@ static void __init ht6560b_init_dev(ide_drive_t *drive)
ide_set_drivedata(drive, (void *)t);
}
-static int probe_ht6560b;
+static bool probe_ht6560b;
module_param_named(probe, probe_ht6560b, bool, 0);
MODULE_PARM_DESC(probe, "probe for HT6560B chipset");
diff --git a/drivers/ide/ide-4drives.c b/drivers/ide/ide-4drives.c
index 979d342..547d7cf 100644
--- a/drivers/ide/ide-4drives.c
+++ b/drivers/ide/ide-4drives.c
@@ -6,7 +6,7 @@
#define DRV_NAME "ide-4drives"
-static int probe_4drives;
+static bool probe_4drives;
module_param_named(probe, probe_4drives, bool, 0);
MODULE_PARM_DESC(probe, "probe for generic IDE chipset with 4 drives/port");
diff --git a/drivers/ide/ide-acpi.c b/drivers/ide/ide-acpi.c
index f22edc6..f1a6796b 100644
--- a/drivers/ide/ide-acpi.c
+++ b/drivers/ide/ide-acpi.c
@@ -53,15 +53,15 @@ struct ide_acpi_hwif_link {
#define DEBPRINT(fmt, args...) do {} while (0)
#endif /* DEBUGGING */
-static int ide_noacpi;
+static bool ide_noacpi;
module_param_named(noacpi, ide_noacpi, bool, 0);
MODULE_PARM_DESC(noacpi, "disable IDE ACPI support");
-static int ide_acpigtf;
+static bool ide_acpigtf;
module_param_named(acpigtf, ide_acpigtf, bool, 0);
MODULE_PARM_DESC(acpigtf, "enable IDE ACPI _GTF support");
-static int ide_acpionboot;
+static bool ide_acpionboot;
module_param_named(acpionboot, ide_acpionboot, bool, 0);
MODULE_PARM_DESC(acpionboot, "call IDE ACPI methods on boot");
diff --git a/drivers/ide/ide-floppy_ioctl.c b/drivers/ide/ide-floppy_ioctl.c
index d267b7a..a22ca84 100644
--- a/drivers/ide/ide-floppy_ioctl.c
+++ b/drivers/ide/ide-floppy_ioctl.c
@@ -292,8 +292,7 @@ int ide_floppy_ioctl(ide_drive_t *drive, struct block_device *bdev,
* and CDROM_SEND_PACKET (legacy) ioctls
*/
if (cmd != CDROM_SEND_PACKET && cmd != SCSI_IOCTL_SEND_COMMAND)
- err = scsi_cmd_ioctl(bdev->bd_disk->queue, bdev->bd_disk,
- mode, cmd, argp);
+ err = scsi_cmd_blk_ioctl(bdev, mode, cmd, argp);
if (err == -ENOTTY)
err = generic_ide_ioctl(drive, bdev, cmd, arg);
diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
index a743e68..7f56b73 100644
--- a/drivers/ide/ide-pci-generic.c
+++ b/drivers/ide/ide-pci-generic.c
@@ -28,7 +28,7 @@
#define DRV_NAME "ide_pci_generic"
-static int ide_generic_all; /* Set to claim all devices */
+static bool ide_generic_all; /* Set to claim all devices */
module_param_named(all_generic_ide, ide_generic_all, bool, 0444);
MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers.");
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 3f0244f..8bbfe55 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -417,7 +417,7 @@ static int __init qd_probe(int base)
return rc;
}
-static int probe_qd65xx;
+static bool probe_qd65xx;
module_param_named(probe, probe_qd65xx, bool, 0);
MODULE_PARM_DESC(probe, "probe for QD65xx chipsets");
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 47adcd0..5cfb781 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -160,7 +160,7 @@ static int __init umc8672_probe(void)
return ide_legacy_device_add(&umc8672_port_info, 0);
}
-static int probe_umc8672;
+static bool probe_umc8672;
module_param_named(probe, probe_umc8672, bool, 0);
MODULE_PARM_DESC(probe, "probe for UMC8672 chipset");
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 5d2f8e1..54ab97b 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
.enter = &intel_idle },
};
-static int get_driver_data(int cstate)
+static long get_driver_data(int cstate)
{
int driver_data;
switch (cstate) {
@@ -232,6 +232,7 @@ static int get_driver_data(int cstate)
* @drv: cpuidle driver
* @index: index of cpuidle state
*
+ * Must be called under local_irq_disable().
*/
static int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
@@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev,
cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
- local_irq_disable();
-
/*
* leave_mm() to avoid costly and often unnecessary wakeups
* for flushing the user TLB's associated with the active mm.
@@ -348,7 +347,8 @@ static int intel_idle_probe(void)
cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
- !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+ !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+ !mwait_substates)
return -ENODEV;
pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@ static int intel_idle_probe(void)
if (boot_cpu_has(X86_FEATURE_ARAT)) /* Always Reliable APIC Timer */
lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
else {
- smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+ on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
register_cpu_notifier(&setup_broadcast_notifier);
}
@@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void)
}
if (auto_demotion_disable_flags)
- smp_call_function(auto_demotion_disable, NULL, 1);
+ on_each_cpu(auto_demotion_disable, NULL, 1);
return 0;
}
/*
- * intel_idle_cpuidle_devices_init()
+ * intel_idle_cpu_init()
* allocate, initialize, register cpuidle_devices
+ * @cpu: cpu/core to initialize
*/
-static int intel_idle_cpuidle_devices_init(void)
+int intel_idle_cpu_init(int cpu)
{
- int i, cstate;
+ int cstate;
struct cpuidle_device *dev;
- intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
- if (intel_idle_cpuidle_devices == NULL)
- return -ENOMEM;
-
- for_each_online_cpu(i) {
- dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+ dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
- dev->state_count = 1;
+ dev->state_count = 1;
- for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
- int num_substates;
+ for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+ int num_substates;
- if (cstate > max_cstate) {
- printk(PREFIX "max_cstate %d reached\n",
- max_cstate);
- break;
- }
+ if (cstate > max_cstate) {
+ printk(PREFIX "max_cstate %d reached\n",
+ max_cstate);
+ break;
+ }
- /* does the state exist in CPUID.MWAIT? */
- num_substates = (mwait_substates >> ((cstate) * 4))
- & MWAIT_SUBSTATE_MASK;
- if (num_substates == 0)
- continue;
- /* is the state not enabled? */
- if (cpuidle_state_table[cstate].enter == NULL) {
- continue;
- }
+ /* does the state exist in CPUID.MWAIT? */
+ num_substates = (mwait_substates >> ((cstate) * 4))
+ & MWAIT_SUBSTATE_MASK;
+ if (num_substates == 0)
+ continue;
+ /* is the state not enabled? */
+ if (cpuidle_state_table[cstate].enter == NULL)
+ continue;
- dev->states_usage[dev->state_count].driver_data =
- (void *)get_driver_data(cstate);
+ dev->states_usage[dev->state_count].driver_data =
+ (void *)get_driver_data(cstate);
dev->state_count += 1;
}
+ dev->cpu = cpu;
- dev->cpu = i;
- if (cpuidle_register_device(dev)) {
- pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
- i);
- intel_idle_cpuidle_devices_uninit();
- return -EIO;
- }
+ if (cpuidle_register_device(dev)) {
+ pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+ intel_idle_cpuidle_devices_uninit();
+ return -EIO;
}
+ if (auto_demotion_disable_flags)
+ smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+
return 0;
}
-
+EXPORT_SYMBOL_GPL(intel_idle_cpu_init);
static int __init intel_idle_init(void)
{
- int retval;
+ int retval, i;
/* Do not load intel_idle at all for now if idle= is passed */
if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -553,10 +549,16 @@ static int __init intel_idle_init(void)
return retval;
}
- retval = intel_idle_cpuidle_devices_init();
- if (retval) {
- cpuidle_unregister_driver(&intel_idle_driver);
- return retval;
+ intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+ if (intel_idle_cpuidle_devices == NULL)
+ return -ENOMEM;
+
+ for_each_online_cpu(i) {
+ retval = intel_idle_cpu_init(i);
+ if (retval) {
+ cpuidle_unregister_driver(&intel_idle_driver);
+ return retval;
+ }
}
return 0;
@@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void)
cpuidle_unregister_driver(&intel_idle_driver);
if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
- smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+ on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
unregister_cpu_notifier(&setup_broadcast_notifier);
}
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index eb0e2cc..73d4531 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -343,7 +343,7 @@ static void ieee802154_fake_setup(struct net_device *dev)
{
dev->addr_len = IEEE802154_ADDR_LEN;
memset(dev->broadcast, 0xff, IEEE802154_ADDR_LEN);
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
dev->needed_tailroom = 2; /* FCS */
dev->mtu = 127;
dev->tx_queue_len = 10;
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index 0f9a84c..eb0add3 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig"
source "drivers/infiniband/ulp/ipoib/Kconfig"
source "drivers/infiniband/ulp/srp/Kconfig"
+source "drivers/infiniband/ulp/srpt/Kconfig"
source "drivers/infiniband/ulp/iser/Kconfig"
diff --git a/drivers/infiniband/Makefile b/drivers/infiniband/Makefile
index 9cc7a47..a3b2d8e 100644
--- a/drivers/infiniband/Makefile
+++ b/drivers/infiniband/Makefile
@@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND) += hw/mlx4/
obj-$(CONFIG_INFINIBAND_NES) += hw/nes/
obj-$(CONFIG_INFINIBAND_IPOIB) += ulp/ipoib/
obj-$(CONFIG_INFINIBAND_SRP) += ulp/srp/
+obj-$(CONFIG_INFINIBAND_SRPT) += ulp/srpt/
obj-$(CONFIG_INFINIBAND_ISER) += ulp/iser/
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index e9cf51b..1612cfd 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -178,6 +178,25 @@ static void queue_req(struct addr_req *req)
mutex_unlock(&lock);
}
+static int dst_fetch_ha(struct dst_entry *dst, struct rdma_dev_addr *addr)
+{
+ struct neighbour *n;
+ int ret;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ if (!n || !(n->nud_state & NUD_VALID)) {
+ if (n)
+ neigh_event_send(n, NULL);
+ ret = -ENODATA;
+ } else {
+ ret = rdma_copy_addr(addr, dst->dev, n->ha);
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+
static int addr4_resolve(struct sockaddr_in *src_in,
struct sockaddr_in *dst_in,
struct rdma_dev_addr *addr)
@@ -185,7 +204,6 @@ static int addr4_resolve(struct sockaddr_in *src_in,
__be32 src_ip = src_in->sin_addr.s_addr;
__be32 dst_ip = dst_in->sin_addr.s_addr;
struct rtable *rt;
- struct neighbour *neigh;
struct flowi4 fl4;
int ret;
@@ -214,20 +232,7 @@ static int addr4_resolve(struct sockaddr_in *src_in,
goto put;
}
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- rcu_read_lock();
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
- rcu_read_unlock();
- ret = -ENODATA;
- if (neigh)
- goto release;
- goto put;
- }
-
- ret = rdma_copy_addr(addr, neigh->dev, neigh->ha);
-release:
- neigh_release(neigh);
+ ret = dst_fetch_ha(&rt->dst, addr);
put:
ip_rt_put(rt);
out:
@@ -240,13 +245,12 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
struct rdma_dev_addr *addr)
{
struct flowi6 fl6;
- struct neighbour *neigh;
struct dst_entry *dst;
int ret;
memset(&fl6, 0, sizeof fl6);
- ipv6_addr_copy(&fl6.daddr, &dst_in->sin6_addr);
- ipv6_addr_copy(&fl6.saddr, &src_in->sin6_addr);
+ fl6.daddr = dst_in->sin6_addr;
+ fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
dst = ip6_route_output(&init_net, NULL, &fl6);
@@ -260,7 +264,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
src_in->sin6_family = AF_INET6;
- ipv6_addr_copy(&src_in->sin6_addr, &fl6.saddr);
+ src_in->sin6_addr = fl6.saddr;
}
if (dst->dev->flags & IFF_LOOPBACK) {
@@ -276,16 +280,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
goto put;
}
- rcu_read_lock();
- neigh = dst_get_neighbour(dst);
- if (!neigh || !(neigh->nud_state & NUD_VALID)) {
- if (neigh)
- neigh_event_send(neigh, NULL);
- ret = -ENODATA;
- } else {
- ret = rdma_copy_addr(addr, dst->dev, neigh->ha);
- }
- rcu_read_unlock();
+ ret = dst_fetch_ha(dst, addr);
put:
dst_release(dst);
return ret;
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 8b72f39..c889aae 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -3659,7 +3659,7 @@ static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj
};
-static char *cm_devnode(struct device *dev, mode_t *mode)
+static char *cm_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index 505db2a..7da9b21 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -799,6 +799,7 @@ struct cm_apr_msg {
u8 info_length;
u8 ap_status;
+ __be16 rsvd;
u8 info[IB_CM_APR_INFO_LENGTH];
u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d0d4aa9..e3e470f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1110,7 +1110,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
if (cma_any_addr((struct sockaddr *) &rt->addr.src_addr)) {
rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
- ib_addr_set_pkey(&rt->addr.dev_addr, rt->path_rec[0].pkey);
+ ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
} else {
ret = rdma_translate_ip((struct sockaddr *) &rt->addr.src_addr,
&rt->addr.dev_addr);
@@ -2005,11 +2005,11 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)
if (cma_zero_addr(src)) {
dst = (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
if ((src->sa_family = dst->sa_family) == AF_INET) {
- ((struct sockaddr_in *) src)->sin_addr.s_addr =
- ((struct sockaddr_in *) dst)->sin_addr.s_addr;
+ ((struct sockaddr_in *)src)->sin_addr =
+ ((struct sockaddr_in *)dst)->sin_addr;
} else {
- ipv6_addr_copy(&((struct sockaddr_in6 *) src)->sin6_addr,
- &((struct sockaddr_in6 *) dst)->sin6_addr);
+ ((struct sockaddr_in6 *)src)->sin6_addr =
+ ((struct sockaddr_in6 *)dst)->sin6_addr;
}
}
@@ -2926,7 +2926,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
mutex_lock(&id_priv->qp_mutex);
if (!status && id_priv->id.qp)
status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
- multicast->rec.mlid);
+ be16_to_cpu(multicast->rec.mlid));
mutex_unlock(&id_priv->qp_mutex);
memset(&event, 0, sizeof event);
@@ -3187,7 +3187,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
if (id->qp)
ib_detach_mcast(id->qp,
&mc->multicast.ib->rec.mgid,
- mc->multicast.ib->rec.mlid);
+ be16_to_cpu(mc->multicast.ib->rec.mlid));
if (rdma_node_get_transport(id_priv->cma_dev->device->node_type) == RDMA_TRANSPORT_IB) {
switch (rdma_port_get_link_layer(id->device, id->port_num)) {
case IB_LINK_LAYER_INFINIBAND:
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index b8a0b4a..06f0871 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -106,9 +106,6 @@ enum {
IB_UCM_MAX_DEVICES = 32
};
-/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
-extern struct class cm_class;
-
#define IB_UCM_BASE_DEV MKDEV(IB_UCM_MAJOR, IB_UCM_BASE_MINOR)
static void ib_ucm_add_one(struct ib_device *device);
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index b37b0c0..5034a87 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -808,9 +808,12 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
return PTR_ERR(ctx);
if (cmd.conn_param.valid) {
- ctx->uid = cmd.uid;
ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+ mutex_lock(&file->mut);
ret = rdma_accept(ctx->cm_id, &conn_param);
+ if (!ret)
+ ctx->uid = cmd.uid;
+ mutex_unlock(&file->mut);
} else
ret = rdma_accept(ctx->cm_id, NULL);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 07db229..f0d588f 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1175,7 +1175,7 @@ static void ib_umad_remove_one(struct ib_device *device)
kref_put(&umad_dev->ref, ib_umad_release_dev);
}
-static char *umad_devnode(struct device *dev, mode_t *mode)
+static char *umad_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 254f164..4d27e4c3 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -241,11 +241,24 @@ static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
}
+static struct ib_qp *idr_write_qp(int qp_handle, struct ib_ucontext *context)
+{
+ struct ib_uobject *uobj;
+
+ uobj = idr_write_uobj(&ib_uverbs_qp_idr, qp_handle, context);
+ return uobj ? uobj->object : NULL;
+}
+
static void put_qp_read(struct ib_qp *qp)
{
put_uobj_read(qp->uobject);
}
+static void put_qp_write(struct ib_qp *qp)
+{
+ put_uobj_write(qp->uobject);
+}
+
static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
{
return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
@@ -1472,6 +1485,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
qp->event_handler = attr.event_handler;
qp->qp_context = attr.qp_context;
qp->qp_type = attr.qp_type;
+ atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
atomic_inc(&attr.send_cq->usecnt);
if (attr.recv_cq)
@@ -2375,7 +2389,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
if (!qp)
return -EINVAL;
@@ -2404,7 +2418,7 @@ ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
kfree(mcast);
out_put:
- put_qp_read(qp);
+ put_qp_write(qp);
return ret ? ret : in_len;
}
@@ -2422,7 +2436,7 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- qp = idr_read_qp(cmd.qp_handle, file->ucontext);
+ qp = idr_write_qp(cmd.qp_handle, file->ucontext);
if (!qp)
return -EINVAL;
@@ -2441,14 +2455,14 @@ ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
}
out_put:
- put_qp_read(qp);
+ put_qp_write(qp);
return ret ? ret : in_len;
}
-int __uverbs_create_xsrq(struct ib_uverbs_file *file,
- struct ib_uverbs_create_xsrq *cmd,
- struct ib_udata *udata)
+static int __uverbs_create_xsrq(struct ib_uverbs_file *file,
+ struct ib_uverbs_create_xsrq *cmd,
+ struct ib_udata *udata)
{
struct ib_uverbs_create_srq_resp resp;
struct ib_usrq_object *obj;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 8796367..604556d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -846,7 +846,7 @@ static void ib_uverbs_remove_one(struct ib_device *device)
kfree(uverbs_dev);
}
-static char *uverbs_devnode(struct device *dev, mode_t *mode)
+static char *uverbs_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 602b1bd..575b780 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -421,6 +421,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->uobject = NULL;
qp->qp_type = qp_init_attr->qp_type;
+ atomic_set(&qp->usecnt, 0);
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
qp->event_handler = __ib_shared_qp_event_handler;
qp->qp_context = qp;
@@ -430,7 +431,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
qp->xrcd = qp_init_attr->xrcd;
atomic_inc(&qp_init_attr->xrcd->usecnt);
INIT_LIST_HEAD(&qp->open_list);
- atomic_set(&qp->usecnt, 0);
real_qp = qp;
qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index c88b12b..740dcc0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1338,7 +1338,6 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
struct iwch_ep *child_ep, *parent_ep = ctx;
struct cpl_pass_accept_req *req = cplhdr(skb);
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
struct l2t_entry *l2t;
struct rtable *rt;
@@ -1375,10 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
goto reject;
}
dst = &rt->dst;
- rcu_read_lock();
- neigh = dst_get_neighbour(dst);
- l2t = t3_l2t_get(tdev, neigh, neigh->dev);
- rcu_read_unlock();
+ l2t = t3_l2t_get(tdev, dst, NULL);
if (!l2t) {
printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
@@ -1889,7 +1885,6 @@ static int is_loopback_dst(struct iw_cm_id *cm_id)
int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
struct iwch_dev *h = to_iwch_dev(cm_id->device);
- struct neighbour *neigh;
struct iwch_ep *ep;
struct rtable *rt;
int err = 0;
@@ -1947,13 +1942,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
goto fail3;
}
ep->dst = &rt->dst;
-
- rcu_read_lock();
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- ep->l2t = t3_l2t_get(ep->com.tdev, neigh, neigh->dev);
- rcu_read_unlock();
+ ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL);
if (!ep->l2t) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
err = -ENOMEM;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 0747004..0668bb3 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1556,6 +1556,67 @@ static void get_4tuple(struct cpl_pass_accept_req *req,
return;
}
+static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
+ struct c4iw_dev *cdev, bool clear_mpa_v1)
+{
+ struct neighbour *n;
+ int err, step;
+
+ rcu_read_lock();
+ n = dst_get_neighbour_noref(dst);
+ err = -ENODEV;
+ if (!n)
+ goto out;
+ err = -ENOMEM;
+ if (n->dev->flags & IFF_LOOPBACK) {
+ struct net_device *pdev;
+
+ pdev = ip_dev_find(&init_net, peer_ip);
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, pdev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = pdev->mtu;
+ ep->tx_chan = cxgb4_port_chan(pdev);
+ ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(pdev) * step;
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->ctrlq_idx = cxgb4_port_idx(pdev);
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(pdev) * step];
+ dev_put(pdev);
+ } else {
+ ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
+ n, n->dev, 0);
+ if (!ep->l2t)
+ goto out;
+ ep->mtu = dst_mtu(ep->dst);
+ ep->tx_chan = cxgb4_port_chan(n->dev);
+ ep->smac_idx = (cxgb4_port_viid(n->dev) & 0x7F) << 1;
+ step = cdev->rdev.lldi.ntxq /
+ cdev->rdev.lldi.nchan;
+ ep->txq_idx = cxgb4_port_idx(n->dev) * step;
+ ep->ctrlq_idx = cxgb4_port_idx(n->dev);
+ step = cdev->rdev.lldi.nrxq /
+ cdev->rdev.lldi.nchan;
+ ep->rss_qid = cdev->rdev.lldi.rxq_ids[
+ cxgb4_port_idx(n->dev) * step];
+
+ if (clear_mpa_v1) {
+ ep->retry_with_mpa_v1 = 0;
+ ep->tried_with_mpa_v1 = 0;
+ }
+ }
+ err = 0;
+out:
+ rcu_read_unlock();
+
+ return err;
+}
+
static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
{
struct c4iw_ep *child_ep, *parent_ep;
@@ -1563,18 +1624,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
struct tid_info *t = dev->rdev.lldi.tids;
unsigned int hwtid = GET_TID(req);
- struct neighbour *neigh;
struct dst_entry *dst;
- struct l2t_entry *l2t;
struct rtable *rt;
__be32 local_ip, peer_ip;
__be16 local_port, peer_port;
- struct net_device *pdev;
- u32 tx_chan, smac_idx;
- u16 rss_qid;
- u32 mtu;
- int step;
- int txq_idx, ctrlq_idx;
+ int err;
parent_ep = lookup_stid(t, stid);
PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
@@ -1596,49 +1650,24 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject;
}
dst = &rt->dst;
- rcu_read_lock();
- neigh = dst_get_neighbour(dst);
- if (neigh->dev->flags & IFF_LOOPBACK) {
- pdev = ip_dev_find(&init_net, peer_ip);
- BUG_ON(!pdev);
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, pdev, 0);
- mtu = pdev->mtu;
- tx_chan = cxgb4_port_chan(pdev);
- smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(pdev) * step;
- ctrlq_idx = cxgb4_port_idx(pdev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, neigh->dev, 0);
- mtu = dst_mtu(dst);
- tx_chan = cxgb4_port_chan(neigh->dev);
- smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
- txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
- rss_qid = dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- rcu_read_unlock();
- if (!l2t) {
- printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
+
+ child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
+ if (!child_ep) {
+ printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
__func__);
dst_release(dst);
goto reject;
}
- child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
- if (!child_ep) {
- printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
+ err = import_ep(child_ep, peer_ip, dst, dev, false);
+ if (err) {
+ printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
__func__);
- cxgb4_l2t_release(l2t);
dst_release(dst);
+ kfree(child_ep);
goto reject;
}
+
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
@@ -1651,18 +1680,11 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
- child_ep->l2t = l2t;
child_ep->dst = dst;
child_ep->hwtid = hwtid;
- child_ep->tx_chan = tx_chan;
- child_ep->smac_idx = smac_idx;
- child_ep->rss_qid = rss_qid;
- child_ep->mtu = mtu;
- child_ep->txq_idx = txq_idx;
- child_ep->ctrlq_idx = ctrlq_idx;
PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
- tx_chan, smac_idx, rss_qid);
+ child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
init_timer(&child_ep->timer);
cxgb4_insert_tid(t, child_ep, hwtid);
@@ -1792,11 +1814,8 @@ static int is_neg_adv_abort(unsigned int status)
static int c4iw_reconnect(struct c4iw_ep *ep)
{
- int err = 0;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
init_timer(&ep->timer);
@@ -1824,47 +1843,10 @@ static int c4iw_reconnect(struct c4iw_ep *ep)
}
ep->dst = &rt->dst;
- rcu_read_lock();
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- ep->com.cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- }
- rcu_read_unlock();
- if (!ep->l2t) {
+ err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, false);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
@@ -2240,13 +2222,10 @@ err:
int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
{
- int err = 0;
struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
struct c4iw_ep *ep;
struct rtable *rt;
- struct net_device *pdev;
- struct neighbour *neigh;
- int step;
+ int err = 0;
if ((conn_param->ord > c4iw_max_read_depth) ||
(conn_param->ird > c4iw_max_read_depth)) {
@@ -2307,49 +2286,10 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
}
ep->dst = &rt->dst;
- rcu_read_lock();
- neigh = dst_get_neighbour(ep->dst);
-
- /* get a l2t entry */
- if (neigh->dev->flags & IFF_LOOPBACK) {
- PDBG("%s LOOPBACK\n", __func__);
- pdev = ip_dev_find(&init_net,
- cm_id->remote_addr.sin_addr.s_addr);
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, pdev, 0);
- ep->mtu = pdev->mtu;
- ep->tx_chan = cxgb4_port_chan(pdev);
- ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(pdev) * step;
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->ctrlq_idx = cxgb4_port_idx(pdev);
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(pdev) * step];
- dev_put(pdev);
- } else {
- ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
- neigh, neigh->dev, 0);
- ep->mtu = dst_mtu(ep->dst);
- ep->tx_chan = cxgb4_port_chan(neigh->dev);
- ep->smac_idx = (cxgb4_port_viid(neigh->dev) & 0x7F) << 1;
- step = ep->com.dev->rdev.lldi.ntxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->txq_idx = cxgb4_port_idx(neigh->dev) * step;
- ep->ctrlq_idx = cxgb4_port_idx(neigh->dev);
- step = ep->com.dev->rdev.lldi.nrxq /
- ep->com.dev->rdev.lldi.nchan;
- ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
- cxgb4_port_idx(neigh->dev) * step];
- ep->retry_with_mpa_v1 = 0;
- ep->tried_with_mpa_v1 = 0;
- }
- rcu_read_unlock();
- if (!ep->l2t) {
+ err = import_ep(ep, cm_id->remote_addr.sin_addr.s_addr,
+ ep->dst, ep->com.dev, true);
+ if (err) {
printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
- err = -ENOMEM;
goto fail4;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_classes.h b/drivers/infiniband/hw/ehca/ehca_classes.h
index aaf6023..f08f6ea 100644
--- a/drivers/infiniband/hw/ehca/ehca_classes.h
+++ b/drivers/infiniband/hw/ehca/ehca_classes.h
@@ -379,8 +379,8 @@ extern spinlock_t shca_list_lock;
extern int ehca_static_rate;
extern int ehca_port_act_time;
-extern int ehca_use_hp_mr;
-extern int ehca_scaling_code;
+extern bool ehca_use_hp_mr;
+extern bool ehca_scaling_code;
extern int ehca_lock_hcalls;
extern int ehca_nr_ports;
extern int ehca_max_cq;
diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c
index c240e99..832e7a7 100644
--- a/drivers/infiniband/hw/ehca/ehca_main.c
+++ b/drivers/infiniband/hw/ehca/ehca_main.c
@@ -59,16 +59,16 @@ MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
MODULE_DESCRIPTION("IBM eServer HCA InfiniBand Device Driver");
MODULE_VERSION(HCAD_VERSION);
-static int ehca_open_aqp1 = 0;
+static bool ehca_open_aqp1 = 0;
static int ehca_hw_level = 0;
-static int ehca_poll_all_eqs = 1;
+static bool ehca_poll_all_eqs = 1;
int ehca_debug_level = 0;
int ehca_nr_ports = -1;
-int ehca_use_hp_mr = 0;
+bool ehca_use_hp_mr = 0;
int ehca_port_act_time = 30;
int ehca_static_rate = -1;
-int ehca_scaling_code = 0;
+bool ehca_scaling_code = 0;
int ehca_lock_hcalls = -1;
int ehca_max_cq = -1;
int ehca_max_qp = -1;
@@ -82,7 +82,7 @@ module_param_named(port_act_time, ehca_port_act_time, int, S_IRUGO);
module_param_named(poll_all_eqs, ehca_poll_all_eqs, bool, S_IRUGO);
module_param_named(static_rate, ehca_static_rate, int, S_IRUGO);
module_param_named(scaling_code, ehca_scaling_code, bool, S_IRUGO);
-module_param_named(lock_hcalls, ehca_lock_hcalls, bool, S_IRUGO);
+module_param_named(lock_hcalls, ehca_lock_hcalls, bint, S_IRUGO);
module_param_named(number_of_cqs, ehca_max_cq, int, S_IRUGO);
module_param_named(number_of_qps, ehca_max_qp, int, S_IRUGO);
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
index 31ae1b1..a4de9d5 100644
--- a/drivers/infiniband/hw/ipath/ipath_fs.c
+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
@@ -46,7 +46,7 @@
static struct super_block *ipath_super;
static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, const struct file_operations *fops,
+ umode_t mode, const struct file_operations *fops,
void *data)
{
int error;
@@ -61,7 +61,7 @@ static int ipathfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
inode->i_private = data;
- if ((mode & S_IFMT) == S_IFDIR) {
+ if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
@@ -76,7 +76,7 @@ bail:
return error;
}
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
@@ -89,7 +89,7 @@ static int create_file(const char *name, mode_t mode,
error = ipathfs_mknod(parent->d_inode, *dentry,
mode, fops, data);
else
- error = PTR_ERR(dentry);
+ error = PTR_ERR(*dentry);
mutex_unlock(&parent->d_inode->i_mutex);
return error;
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c
index 4b8f9c4..a251bec 100644
--- a/drivers/infiniband/hw/mlx4/ah.c
+++ b/drivers/infiniband/hw/mlx4/ah.c
@@ -126,7 +126,7 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr
ah->av.ib.dlid = cpu_to_be16(0xc000);
memcpy(ah->av.eth.dgid, ah_attr->grh.dgid.raw, 16);
- ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 28);
+ ah->av.eth.sl_tclass_flowlabel = cpu_to_be32(ah_attr->sl << 29);
return &ah->ibah;
}
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index e8df155..5ecf38d 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -715,13 +715,17 @@ repoll:
}
wc->slid = be16_to_cpu(cqe->rlid);
- wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn);
wc->src_qp = g_mlpath_rqpn & 0xffffff;
wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f;
wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0;
wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f;
wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum);
+ if (rdma_port_get_link_layer(wc->qp->device,
+ (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET)
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 13;
+ else
+ wc->sl = be16_to_cpu(cqe->sl_vid) >> 12;
}
return 0;
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index f36da99..259b067 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -109,7 +109,8 @@ int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int ignore_mkey, int ignore_bkey,
err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma,
in_modifier, op_modifier,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
memcpy(response_mad, outmailbox->buf, 256);
@@ -256,12 +257,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_SUCCESS;
/*
- * Don't process SMInfo queries or vendor-specific
- * MADs -- the SMA can't handle them.
+ * Don't process SMInfo queries -- the SMA can't handle them.
*/
- if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
- ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
- IB_SMP_ATTR_VENDOR_MASK))
+ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
return IB_MAD_RESULT_SUCCESS;
} else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1 ||
@@ -330,7 +328,8 @@ static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
return IB_MAD_RESULT_FAILURE;
err = mlx4_cmd_box(dev->dev, 0, mailbox->dma, inmod, 0,
- MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_QUERY_IF_STAT, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_WRAPPED);
if (err)
err = IB_MAD_RESULT_FAILURE;
else {
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 18836cd..7b445df 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -177,7 +177,7 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
{
struct mlx4_dev *dev = to_mdev(device)->dev;
- return dev->caps.port_mask & (1 << (port_num - 1)) ?
+ return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ?
IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
}
@@ -434,7 +434,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
memset(mailbox->buf, 0, 256);
memcpy(mailbox->buf, props->node_desc, 64);
mlx4_cmd(to_mdev(ibdev)->dev, mailbox->dma, 1, 0,
- MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_SET_NODE, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(to_mdev(ibdev)->dev, mailbox);
@@ -463,7 +463,7 @@ static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
}
err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev->dev, mailbox);
return err;
@@ -899,7 +899,8 @@ static void update_gids_task(struct work_struct *work)
memcpy(gids, gw->gids, sizeof gw->gids);
err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
- 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
+ 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
printk(KERN_WARNING "set port command failed\n");
else {
@@ -1074,6 +1075,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
printk_once(KERN_INFO "%s", mlx4_ib_version);
+ if (mlx4_is_mfunc(dev)) {
+ printk(KERN_WARNING "IB not yet supported in SRIOV\n");
+ return NULL;
+ }
+
mlx4_foreach_ib_transport_port(i, dev)
num_ports++;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index a16f0c8..aa2aefa 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -962,7 +962,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah,
if (is_eth) {
path->sched_queue = MLX4_IB_DEFAULT_SCHED_QUEUE |
- ((port - 1) << 6) | ((ah->sl & 7) << 3) | ((ah->sl & 8) >> 1);
+ ((port - 1) << 6) | ((ah->sl & 7) << 3);
if (!(ah->ah_flags & IB_AH_GRH))
return -1;
@@ -1437,7 +1437,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
u16 pcp;
sqp->ud_header.vlan.type = cpu_to_be16(MLX4_IB_IBOE_ETHERTYPE);
- pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 27 & 3) << 13;
+ pcp = (be32_to_cpu(ah->av.ib.sl_tclass_flowlabel) >> 29) << 13;
sqp->ud_header.vlan.tag = cpu_to_be16(vlan | pcp);
}
} else {
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 5965b3d..7140199 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@@ -96,7 +96,7 @@ unsigned int wqm_quanta = 0x10000;
module_param(wqm_quanta, int, 0644);
MODULE_PARM_DESC(wqm_quanta, "WQM quanta");
-static unsigned int limit_maxrdreqsz;
+static bool limit_maxrdreqsz;
module_param(limit_maxrdreqsz, bool, 0644);
MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 568b4f1..c438e46 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 0a52d72..a4972ab 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -233,6 +233,7 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
u8 *start_ptr = &start_addr;
u8 **start_buff = &start_ptr;
u16 buff_len = 0;
+ struct ietf_mpa_v1 *mpa_frame;
skb = dev_alloc_skb(MAX_CM_BUFFER);
if (!skb) {
@@ -242,6 +243,8 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
/* send an MPA reject frame */
cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY);
+ mpa_frame = (struct ietf_mpa_v1 *)*start_buff;
+ mpa_frame->flags |= IETF_MPA_FLAGS_REJECT;
form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN);
cm_node->state = NES_CM_STATE_FIN_WAIT1;
@@ -1348,7 +1351,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
else
netdev = nesvnic->netdev;
- neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, netdev);
+ rcu_read_lock();
+ neigh = dst_get_neighbour_noref(&rt->dst);
if (neigh) {
if (neigh->nud_state & NUD_VALID) {
nes_debug(NES_DBG_CM, "Neighbor MAC address for 0x%08X"
@@ -1359,9 +1363,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
neigh->ha, ETH_ALEN)) {
/* Mac address same as in nes_arp_table */
- neigh_release(neigh);
- ip_rt_put(rt);
- return rc;
+ goto out;
}
nes_manage_arp_cache(nesvnic->netdev,
@@ -1373,15 +1375,13 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
dst_ip, NES_ARP_ADD);
rc = nes_arp_table(nesvnic->nesdev, dst_ip, NULL,
NES_ARP_RESOLVE);
+ } else {
+ neigh_event_send(neigh, NULL);
}
- neigh_release(neigh);
}
- if ((neigh == NULL) || (!(neigh->nud_state & NUD_VALID))) {
- rcu_read_lock();
- neigh_event_send(dst_get_neighbour(&rt->dst), NULL);
- rcu_read_unlock();
- }
+out:
+ rcu_read_unlock();
ip_rt_put(rt);
return rc;
}
@@ -2838,6 +2838,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
issue_disconn = 1;
issue_close = 1;
nesqp->cm_id = NULL;
+ del_timer(&nesqp->terminate_timer);
if (nesqp->flush_issued == 0) {
nesqp->flush_issued = 1;
issue_flush = 1;
diff --git a/drivers/infiniband/hw/nes/nes_cm.h b/drivers/infiniband/hw/nes/nes_cm.h
index bdfa1fb..4646e66 100644
--- a/drivers/infiniband/hw/nes/nes_cm.h
+++ b/drivers/infiniband/hw/nes/nes_cm.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_context.h b/drivers/infiniband/hw/nes/nes_context.h
index b4393a1..a69eef1 100644
--- a/drivers/infiniband/hw/nes/nes_context.h
+++ b/drivers/infiniband/hw/nes/nes_context.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 7c0ff19..d42c9f4 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -1529,7 +1529,7 @@ int nes_init_phy(struct nes_device *nesdev)
} else {
/* setup 10G MDIO operation */
tx_config &= 0xFFFFFFE3;
- tx_config |= 0x15;
+ tx_config |= 0x1D;
}
nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
@@ -3619,10 +3619,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
}
break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
- if (nesqp->term_flags) {
- nes_terminate_done(nesqp, 0);
- return;
- }
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 0b590e1..d748e4b 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+* Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
index b3b2a24..3ba7be3 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.c
+++ b/drivers/infiniband/hw/nes/nes_mgt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_mgt.h b/drivers/infiniband/hw/nes/nes_mgt.h
index 8c8af25..4f7f701 100644
--- a/drivers/infiniband/hw/nes/nes_mgt.h
+++ b/drivers/infiniband/hw/nes/nes_mgt.h
@@ -1,5 +1,5 @@
/*
-* Copyright (c) 2010 Intel-NE, Inc. All rights reserved.
+* Copyright (c) 2006 - 2011 Intel-NE, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index c00d2f3..f3a3ecf 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -1589,7 +1589,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
.set_pauseparam = nes_netdev_set_pauseparam,
};
-static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, u32 features)
+static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev, netdev_features_t features)
{
struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 u32temp;
@@ -1610,7 +1610,7 @@ static void nes_vlan_mode(struct net_device *netdev, struct nes_device *nesdev,
spin_unlock_irqrestore(&nesadapter->phy_lock, flags);
}
-static u32 nes_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t nes_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1624,7 +1624,7 @@ static u32 nes_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int nes_set_features(struct net_device *netdev, u32 features)
+static int nes_set_features(struct net_device *netdev, netdev_features_t features)
{
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
diff --git a/drivers/infiniband/hw/nes/nes_user.h b/drivers/infiniband/hw/nes/nes_user.h
index 71e133a..4926de7 100644
--- a/drivers/infiniband/hw/nes/nes_user.h
+++ b/drivers/infiniband/hw/nes/nes_user.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
diff --git a/drivers/infiniband/hw/nes/nes_utils.c b/drivers/infiniband/hw/nes/nes_utils.c
index cd10968..e98f4fc 100644
--- a/drivers/infiniband/hw/nes/nes_utils.c
+++ b/drivers/infiniband/hw/nes/nes_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -56,7 +56,7 @@ static u16 nes_read16_eeprom(void __iomem *addr, u16 offset);
u32 mh_detected;
u32 mh_pauses_sent;
-u32 nes_set_pau(struct nes_device *nesdev)
+static u32 nes_set_pau(struct nes_device *nesdev)
{
u32 ret = 0;
u32 counter;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 5095bc4..0927b5c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -3428,6 +3428,8 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
ib_wr->wr.fast_reg.length);
set_wqe_32bit_value(wqe->wqe_words,
+ NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
+ set_wqe_32bit_value(wqe->wqe_words,
NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
ib_wr->wr.fast_reg.rkey);
/* Set page size: */
@@ -3724,7 +3726,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
entry->opcode = IB_WC_SEND;
break;
case NES_IWARP_SQ_OP_LOCINV:
- entry->opcode = IB_WR_LOCAL_INV;
+ entry->opcode = IB_WC_LOCAL_INV;
break;
case NES_IWARP_SQ_OP_FAST_REG:
entry->opcode = IB_WC_FAST_REG_MR;
diff --git a/drivers/infiniband/hw/nes/nes_verbs.h b/drivers/infiniband/hw/nes/nes_verbs.h
index fe6b6e9..0eff7c4 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.h
+++ b/drivers/infiniband/hw/nes/nes_verbs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006 - 2009 Intel Corporation. All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
diff --git a/drivers/infiniband/hw/qib/qib_7220.h b/drivers/infiniband/hw/qib/qib_7220.h
index 21f374a..a5356cb 100644
--- a/drivers/infiniband/hw/qib/qib_7220.h
+++ b/drivers/infiniband/hw/qib/qib_7220.h
@@ -97,7 +97,7 @@ struct qib_chippport_specific {
u64 iblnkerrsnap;
u64 ibcctrl; /* kr_ibcctrl shadow */
u64 ibcddrctrl; /* kr_ibcddrctrl shadow */
- u64 chase_end;
+ unsigned long chase_end;
u32 last_delay_mult;
};
diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c
index c90a55f..6fc9365 100644
--- a/drivers/infiniband/hw/qib/qib_driver.c
+++ b/drivers/infiniband/hw/qib/qib_driver.c
@@ -371,9 +371,8 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
lnh == QIB_LRH_GRH,
qp,
be32_to_cpu(ohdr->bth[0]));
- if (ruc_res) {
+ if (ruc_res)
goto unlock;
- }
/* Only deal with RDMA Writes for now */
if (opcode <
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c
index df7fa25..05e0f17 100644
--- a/drivers/infiniband/hw/qib/qib_fs.c
+++ b/drivers/infiniband/hw/qib/qib_fs.c
@@ -47,7 +47,7 @@ static struct super_block *qib_super;
#define private2dd(file) ((file)->f_dentry->d_inode->i_private)
static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
- int mode, const struct file_operations *fops,
+ umode_t mode, const struct file_operations *fops,
void *data)
{
int error;
@@ -67,7 +67,7 @@ static int qibfs_mknod(struct inode *dir, struct dentry *dentry,
inode->i_mtime = inode->i_atime;
inode->i_ctime = inode->i_atime;
inode->i_private = data;
- if ((mode & S_IFMT) == S_IFDIR) {
+ if (S_ISDIR(mode)) {
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
inc_nlink(dir);
@@ -82,7 +82,7 @@ bail:
return error;
}
-static int create_file(const char *name, mode_t mode,
+static int create_file(const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry,
const struct file_operations *fops, void *data)
{
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 781a802..d0c64d5 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -2076,9 +2076,11 @@ static void qib_6120_config_ctxts(struct qib_devdata *dd)
static void qib_update_6120_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd, u32 npkts)
{
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_6120_hdrqempty(struct qib_ctxtdata *rcd)
@@ -2103,7 +2105,7 @@ static void alloc_dummy_hdrq(struct qib_devdata *dd)
dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
dd->rcd[0]->rcvhdrq_size,
&dd->cspec->dummy_hdrq_phys,
- GFP_KERNEL | __GFP_COMP);
+ GFP_ATOMIC | __GFP_COMP);
if (!dd->cspec->dummy_hdrq) {
qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
/* fallback to just 0'ing */
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 439d3c5..3c722f7 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -1051,7 +1051,7 @@ static void reenable_7220_chase(unsigned long opaque)
static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
{
u8 ibclt;
- u64 tnow;
+ unsigned long tnow;
ibclt = (u8)SYM_FIELD(ibcst, IBCStatus, LinkTrainingState);
@@ -1066,9 +1066,9 @@ static void handle_7220_chase(struct qib_pportdata *ppd, u64 ibcst)
case IB_7220_LT_STATE_CFGWAITRMT:
case IB_7220_LT_STATE_TXREVLANES:
case IB_7220_LT_STATE_CFGENH:
- tnow = get_jiffies_64();
+ tnow = jiffies;
if (ppd->cpspec->chase_end &&
- time_after64(tnow, ppd->cpspec->chase_end)) {
+ time_after(tnow, ppd->cpspec->chase_end)) {
ppd->cpspec->chase_end = 0;
qib_set_ib_7220_lstate(ppd,
QLOGIC_IB_IBCC_LINKCMD_DOWN,
@@ -2725,9 +2725,11 @@ static int qib_7220_set_loopback(struct qib_pportdata *ppd, const char *what)
static void qib_update_7220_usrhead(struct qib_ctxtdata *rcd, u64 hd,
u32 updegr, u32 egrhd, u32 npkts)
{
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_7220_hdrqempty(struct qib_ctxtdata *rcd)
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 1d58959..41e9208 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -615,8 +615,8 @@ struct qib_chippport_specific {
u64 ibmalfsnap;
u64 ibcctrl_a; /* krp_ibcctrl_a shadow */
u64 ibcctrl_b; /* krp_ibcctrl_b shadow */
- u64 qdr_dfe_time;
- u64 chase_end;
+ unsigned long qdr_dfe_time;
+ unsigned long chase_end;
u32 autoneg_tries;
u32 recovery_init;
u32 qdr_dfe_on;
@@ -1672,7 +1672,8 @@ static void reenable_chase(unsigned long opaque)
QLOGIC_IB_IBCC_LINKINITCMD_POLL);
}
-static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
+static void disable_chase(struct qib_pportdata *ppd, unsigned long tnow,
+ u8 ibclt)
{
ppd->cpspec->chase_end = 0;
@@ -1688,7 +1689,7 @@ static void disable_chase(struct qib_pportdata *ppd, u64 tnow, u8 ibclt)
static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
{
u8 ibclt;
- u64 tnow;
+ unsigned long tnow;
ibclt = (u8)SYM_FIELD(ibcst, IBCStatusA_0, LinkTrainingState);
@@ -1703,9 +1704,9 @@ static void handle_serdes_issues(struct qib_pportdata *ppd, u64 ibcst)
case IB_7322_LT_STATE_CFGWAITRMT:
case IB_7322_LT_STATE_TXREVLANES:
case IB_7322_LT_STATE_CFGENH:
- tnow = get_jiffies_64();
+ tnow = jiffies;
if (ppd->cpspec->chase_end &&
- time_after64(tnow, ppd->cpspec->chase_end))
+ time_after(tnow, ppd->cpspec->chase_end))
disable_chase(ppd, tnow, ibclt);
else if (!ppd->cpspec->chase_end)
ppd->cpspec->chase_end = tnow + QIB_CHASE_TIME;
@@ -2714,7 +2715,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
pins >>= SYM_LSB(EXTStatus, GPIOIn);
if (!(pins & mask)) {
++handled;
- qd->t_insert = get_jiffies_64();
+ qd->t_insert = jiffies;
queue_work(ib_wq, &qd->work);
}
}
@@ -3602,7 +3603,7 @@ static void qib_7322_config_ctxts(struct qib_devdata *dd)
if (qib_rcvhdrcnt)
dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt, qib_rcvhdrcnt);
else
- dd->rcvhdrcnt = max(dd->cspec->rcvegrcnt,
+ dd->rcvhdrcnt = 2 * max(dd->cspec->rcvegrcnt,
dd->num_pports > 1 ? 1024U : 2048U);
}
@@ -4082,10 +4083,12 @@ static void qib_update_7322_usrhead(struct qib_ctxtdata *rcd, u64 hd,
*/
if (hd >> IBA7322_HDRHEAD_PKTINT_SHIFT)
adjust_rcv_timeout(rcd, npkts);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
- qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
if (updegr)
qib_write_ureg(rcd->dd, ur_rcvegrindexhead, egrhd, rcd->ctxt);
+ mmiowb();
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ qib_write_ureg(rcd->dd, ur_rcvhdrhead, hd, rcd->ctxt);
+ mmiowb();
}
static u32 qib_7322_hdrqempty(struct qib_ctxtdata *rcd)
@@ -4794,7 +4797,7 @@ static void qib_get_7322_faststats(unsigned long opaque)
(ppd->lflags & (QIBL_LINKINIT | QIBL_LINKARMED |
QIBL_LINKACTIVE)) &&
ppd->cpspec->qdr_dfe_time &&
- time_after64(get_jiffies_64(), ppd->cpspec->qdr_dfe_time)) {
+ time_is_before_jiffies(ppd->cpspec->qdr_dfe_time)) {
ppd->cpspec->qdr_dfe_on = 0;
qib_write_kreg_port(ppd, krp_static_adapt_dis(2),
@@ -5240,7 +5243,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs)
/* schedule the qsfp refresh which should turn the link
off */
if (ppd->dd->flags & QIB_HAS_QSFP) {
- qd->t_insert = get_jiffies_64();
+ qd->t_insert = jiffies;
queue_work(ib_wq, &qd->work);
}
spin_lock_irqsave(&ppd->sdma_lock, flags);
@@ -5592,7 +5595,7 @@ static void qsfp_7322_event(struct work_struct *work)
{
struct qib_qsfp_data *qd;
struct qib_pportdata *ppd;
- u64 pwrup;
+ unsigned long pwrup;
unsigned long flags;
int ret;
u32 le2;
@@ -5620,8 +5623,7 @@ static void qsfp_7322_event(struct work_struct *work)
* to insertion.
*/
while (1) {
- u64 now = get_jiffies_64();
- if (time_after64(now, pwrup))
+ if (time_is_before_jiffies(pwrup))
break;
msleep(20);
}
@@ -7506,7 +7508,7 @@ static int serdes_7322_init_old(struct qib_pportdata *ppd)
static int serdes_7322_init_new(struct qib_pportdata *ppd)
{
- u64 tstart;
+ unsigned long tend;
u32 le_val, rxcaldone;
int chan, chan_done = (1 << SERDES_CHANS) - 1;
@@ -7611,10 +7613,8 @@ static int serdes_7322_init_new(struct qib_pportdata *ppd)
msleep(20);
/* Start Calibration */
ibsd_wr_allchans(ppd, 4, (1 << 10), BMASK(10, 10));
- tstart = get_jiffies_64();
- while (chan_done &&
- !time_after64(get_jiffies_64(),
- tstart + msecs_to_jiffies(500))) {
+ tend = jiffies + msecs_to_jiffies(500);
+ while (chan_done && !time_is_before_jiffies(tend)) {
msleep(20);
for (chan = 0; chan < SERDES_CHANS; ++chan) {
rxcaldone = ahb_mod(ppd->dd, IBSD(ppd->hw_pidx),
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 58b0f8a..cf0cd30 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -1015,7 +1015,7 @@ static int __devinit qib_init_one(struct pci_dev *,
#define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "
#define PFX QIB_DRV_NAME ": "
-static const struct pci_device_id qib_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(qib_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PATHSCALE, PCI_DEVICE_ID_QLOGIC_IB_6120) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7220) },
{ PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, PCI_DEVICE_ID_QLOGIC_IB_7322) },
diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c
index 97a8bdf..0fde788 100644
--- a/drivers/infiniband/hw/qib/qib_pcie.c
+++ b/drivers/infiniband/hw/qib/qib_pcie.c
@@ -562,7 +562,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
*/
static int qib_pcie_caps;
module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
-MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (4lsb), ReadReq (D4..7)");
+MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
static int qib_tune_pcie_caps(struct qib_devdata *dd)
{
diff --git a/drivers/infiniband/hw/qib/qib_qsfp.h b/drivers/infiniband/hw/qib/qib_qsfp.h
index 46002a9..91908f5 100644
--- a/drivers/infiniband/hw/qib/qib_qsfp.h
+++ b/drivers/infiniband/hw/qib/qib_qsfp.h
@@ -177,7 +177,7 @@ struct qib_qsfp_data {
struct qib_pportdata *ppd;
struct work_struct work;
struct qib_qsfp_cache cache;
- u64 t_insert;
+ unsigned long t_insert;
u8 modpresent;
};
diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c
index de1a4b2..ac065dd 100644
--- a/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -300,7 +300,7 @@ bail:
}
static void qib_sd_trimdone_monitor(struct qib_devdata *dd,
- const char *where)
+ const char *where)
{
int ret, chn, baduns;
u64 val;
diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c
index 78fbd56..dae5160 100644
--- a/drivers/infiniband/hw/qib/qib_sysfs.c
+++ b/drivers/infiniband/hw/qib/qib_sysfs.c
@@ -150,7 +150,7 @@ static ssize_t show_status(struct qib_pportdata *ppd, char *buf)
* For userland compatibility, these offsets must remain fixed.
* They are strings for QIB_STATUS_*
*/
-static const char *qib_status_str[] = {
+static const char * const qib_status_str[] = {
"Initted",
"",
"",
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index a894762..7b6c3bf 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -913,8 +913,8 @@ static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss,
__raw_writel(last, piobuf);
}
-static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
- struct qib_qp *qp, int *retp)
+static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp)
{
struct qib_verbs_txreq *tx;
unsigned long flags;
@@ -926,8 +926,9 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
struct list_head *l = dev->txreq_free.next;
list_del(l);
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
- *retp = 0;
} else {
if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK &&
list_empty(&qp->iowait)) {
@@ -935,14 +936,33 @@ static struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
qp->s_flags |= QIB_S_WAIT_TX;
list_add_tail(&qp->iowait, &dev->txwait);
}
- tx = NULL;
qp->s_flags &= ~QIB_S_BUSY;
- *retp = -EBUSY;
+ spin_unlock(&dev->pending_lock);
+ spin_unlock_irqrestore(&qp->s_lock, flags);
+ tx = ERR_PTR(-EBUSY);
}
+ return tx;
+}
- spin_unlock(&dev->pending_lock);
- spin_unlock_irqrestore(&qp->s_lock, flags);
+static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev,
+ struct qib_qp *qp)
+{
+ struct qib_verbs_txreq *tx;
+ unsigned long flags;
+ spin_lock_irqsave(&dev->pending_lock, flags);
+ /* assume the list non empty */
+ if (likely(!list_empty(&dev->txreq_free))) {
+ struct list_head *l = dev->txreq_free.next;
+
+ list_del(l);
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ tx = list_entry(l, struct qib_verbs_txreq, txreq.list);
+ } else {
+ /* call slow path to get the extra lock */
+ spin_unlock_irqrestore(&dev->pending_lock, flags);
+ tx = __get_txreq(dev, qp);
+ }
return tx;
}
@@ -1122,9 +1142,9 @@ static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr,
goto bail;
}
- tx = get_txreq(dev, qp, &ret);
- if (!tx)
- goto bail;
+ tx = get_txreq(dev, qp);
+ if (IS_ERR(tx))
+ goto bail_tx;
control = dd->f_setpbc_control(ppd, plen, qp->s_srate,
be16_to_cpu(hdr->lrh[0]) >> 12);
@@ -1195,6 +1215,9 @@ unaligned:
ibp->n_unaligned++;
bail:
return ret;
+bail_tx:
+ ret = PTR_ERR(tx);
+ goto bail;
}
/*
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index b3cc1e0..86df632 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -44,6 +44,7 @@
#include <linux/mutex.h>
#include <net/neighbour.h>
+#include <net/sch_generic.h>
#include <linux/atomic.h>
@@ -117,8 +118,9 @@ struct ipoib_header {
u16 reserved;
};
-struct ipoib_pseudoheader {
- u8 hwaddr[INFINIBAND_ALEN];
+struct ipoib_cb {
+ struct qdisc_skb_cb qdisc_cb;
+ u8 hwaddr[INFINIBAND_ALEN];
};
/* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 83695b4..3974c29 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -171,7 +171,7 @@ static int ipoib_stop(struct net_device *dev)
return 0;
}
-static u32 ipoib_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_features_t features)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -556,15 +556,13 @@ static int path_rec_start(struct net_device *dev,
}
/* called with rcu_read_lock */
-static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
+static void neigh_add_path(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
struct ipoib_neigh *neigh;
- struct neighbour *n;
unsigned long flags;
- n = dst_get_neighbour(skb_dst(skb));
neigh = ipoib_neigh_alloc(n, skb->dev);
if (!neigh) {
++dev->stats.tx_dropped;
@@ -638,16 +636,13 @@ err_drop:
}
/* called with rcu_read_lock */
-static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
+static void ipoib_path_lookup(struct sk_buff *skb, struct neighbour *n, struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(skb->dev);
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n;
/* Look up path record for unicasts */
- n = dst_get_neighbour(dst);
if (n->ha[4] != 0xff) {
- neigh_add_path(skb, dev);
+ neigh_add_path(skb, n, dev);
return;
}
@@ -658,7 +653,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev)
}
static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
- struct ipoib_pseudoheader *phdr)
+ struct ipoib_cb *cb)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_path *path;
@@ -666,17 +661,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->lock, flags);
- path = __path_find(dev, phdr->hwaddr + 4);
+ path = __path_find(dev, cb->hwaddr + 4);
if (!path || !path->valid) {
int new_path = 0;
if (!path) {
- path = path_rec_create(dev, phdr->hwaddr + 4);
+ path = path_rec_create(dev, cb->hwaddr + 4);
new_path = 1;
}
if (path) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb);
if (!path->query && path_rec_start(dev, path)) {
@@ -700,12 +693,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
be16_to_cpu(path->pathrec.dlid));
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+ ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr));
return;
} else if ((path->query || !path_rec_start(dev, path)) &&
skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof *phdr);
__skb_queue_tail(&path->queue, skb);
} else {
++dev->stats.tx_dropped;
@@ -723,12 +714,17 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
rcu_read_lock();
- if (likely(skb_dst(skb)))
- n = dst_get_neighbour(skb_dst(skb));
-
+ if (likely(skb_dst(skb))) {
+ n = dst_get_neighbour_noref(skb_dst(skb));
+ if (!n) {
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ goto unlock;
+ }
+ }
if (likely(n)) {
if (unlikely(!*to_ipoib_neigh(n))) {
- ipoib_path_lookup(skb, dev);
+ ipoib_path_lookup(skb, n, dev);
goto unlock;
}
@@ -751,7 +747,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
list_del(&neigh->list);
ipoib_neigh_free(dev, neigh);
spin_unlock_irqrestore(&priv->lock, flags);
- ipoib_path_lookup(skb, dev);
+ ipoib_path_lookup(skb, n, dev);
goto unlock;
}
@@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb);
}
} else {
- struct ipoib_pseudoheader *phdr =
- (struct ipoib_pseudoheader *) skb->data;
- skb_pull(skb, sizeof *phdr);
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
- if (phdr->hwaddr[4] == 0xff) {
+ if (cb->hwaddr[4] == 0xff) {
/* Add in the P_Key for multicast*/
- phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff;
- phdr->hwaddr[9] = priv->pkey & 0xff;
+ cb->hwaddr[8] = (priv->pkey >> 8) & 0xff;
+ cb->hwaddr[9] = priv->pkey & 0xff;
- ipoib_mcast_send(dev, phdr->hwaddr + 4, skb);
+ ipoib_mcast_send(dev, cb->hwaddr + 4, skb);
} else {
/* unicast GID -- should be ARP or RARP reply */
@@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n",
skb_dst(skb) ? "neigh" : "dst",
be16_to_cpup((__be16 *) skb->data),
- IPOIB_QPN(phdr->hwaddr),
- phdr->hwaddr + 4);
+ IPOIB_QPN(cb->hwaddr),
+ cb->hwaddr + 4);
dev_kfree_skb_any(skb);
++dev->stats.tx_dropped;
goto unlock;
}
- unicast_arp_send(skb, dev, phdr);
+ unicast_arp_send(skb, dev, cb);
}
}
unlock:
@@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb,
const void *daddr, const void *saddr, unsigned len)
{
struct ipoib_header *header;
- struct dst_entry *dst;
- struct neighbour *n;
header = (struct ipoib_header *) skb_push(skb, sizeof *header);
@@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb,
header->reserved = 0;
/*
- * If we don't have a neighbour structure, stuff the
- * destination address onto the front of the skb so we can
- * figure out where to send the packet later.
+ * If we don't have a dst_entry structure, stuff the
+ * destination address into skb->cb so we can figure out where
+ * to send the packet later.
*/
- dst = skb_dst(skb);
- n = NULL;
- if (dst)
- n = dst_get_neighbour_raw(dst);
- if ((!dst || !n) && daddr) {
- struct ipoib_pseudoheader *phdr =
- (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr);
- memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN);
+ if (!skb_dst(skb)) {
+ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb;
+ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN);
}
return 0;
@@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev)
dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
- /*
- * We add in INFINIBAND_ALEN to allow for the destination
- * address "pseudoheader" for skbs without neighbour struct.
- */
- dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
+ dev->hard_header_len = IPOIB_ENCAP_LEN;
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
@@ -1222,6 +1205,8 @@ static struct net_device *ipoib_add_port(const char *format,
priv->dev->mtu = IPOIB_UD_MTU(priv->max_ib_mtu);
priv->mcast_mtu = priv->admin_mtu = priv->dev->mtu;
+ priv->dev->neigh_priv_len = sizeof(struct ipoib_neigh);
+
result = ib_query_pkey(hca, port, 0, &priv->pkey);
if (result) {
printk(KERN_WARNING "%s: ib_query_pkey port %d failed (ret = %d)\n",
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 873bff9..20ebc6f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -262,21 +262,13 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
netif_tx_lock_bh(dev);
while (!skb_queue_empty(&mcast->pkt_queue)) {
struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
- struct dst_entry *dst = skb_dst(skb);
- struct neighbour *n = NULL;
netif_tx_unlock_bh(dev);
skb->dev = dev;
- if (dst)
- n = dst_get_neighbour_raw(dst);
- if (!dst || !n) {
- /* put pseudoheader back on for next time */
- skb_push(skb, sizeof (struct ipoib_pseudoheader));
- }
-
if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n");
+
netif_tx_lock_bh(dev);
}
netif_tx_unlock_bh(dev);
@@ -728,7 +720,7 @@ out:
rcu_read_lock();
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n && !*to_ipoib_neigh(n)) {
struct ipoib_neigh *neigh = ipoib_neigh_alloc(n,
skb->dev);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 7e7373a..9a43cb0 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -638,7 +638,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep)
iser_conn_terminate(ib_conn);
}
-static mode_t iser_attr_is_visible(int param_type, int param)
+static umode_t iser_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644
index 0000000..31ee83d
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Kconfig
@@ -0,0 +1,12 @@
+config INFINIBAND_SRPT
+ tristate "InfiniBand SCSI RDMA Protocol target support"
+ depends on INFINIBAND && TARGET_CORE
+ ---help---
+
+ Support for the SCSI RDMA Protocol (SRP) Target driver. The
+ SRP protocol is a protocol that allows an initiator to access
+ a block storage device on another host (target) over a network
+ that supports the RDMA protocol. Currently the RDMA protocol is
+ supported by InfiniBand and by iWarp network hardware. More
+ information about the SRP protocol can be found on the website
+ of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644
index 0000000..e3ee4bd
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/Makefile
@@ -0,0 +1,2 @@
+ccflags-y := -Idrivers/target
+obj-$(CONFIG_INFINIBAND_SRPT) += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644
index 0000000..fb1de1f
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_DM_MAD_H
+#define IB_DM_MAD_H
+
+#include <linux/types.h>
+
+#include <rdma/ib_mad.h>
+
+enum {
+ /*
+ * See also section 13.4.7 Status Field, table 115 MAD Common Status
+ * Field Bit Values and also section 16.3.1.1 Status Field in the
+ * InfiniBand Architecture Specification.
+ */
+ DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
+ DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
+ DM_MAD_STATUS_INVALID_FIELD = 0x001c,
+ DM_MAD_STATUS_NO_IOC = 0x0100,
+
+ /*
+ * See also the Device Management chapter, section 16.3.3 Attributes,
+ * table 279 Device Management Attributes in the InfiniBand
+ * Architecture Specification.
+ */
+ DM_ATTR_CLASS_PORT_INFO = 0x01,
+ DM_ATTR_IOU_INFO = 0x10,
+ DM_ATTR_IOC_PROFILE = 0x11,
+ DM_ATTR_SVC_ENTRIES = 0x12
+};
+
+struct ib_dm_hdr {
+ u8 reserved[28];
+};
+
+/*
+ * Structure of management datagram sent by the SRP target implementation.
+ * Contains a management datagram header, reliable multi-packet transaction
+ * protocol (RMPP) header and ib_dm_hdr. Notes:
+ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
+ * management datagrams.
+ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
+ * is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
+ * - The maximum supported size for a management datagram when not using RMPP
+ * is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
+ */
+struct ib_dm_mad {
+ struct ib_mad_hdr mad_hdr;
+ struct ib_rmpp_hdr rmpp_hdr;
+ struct ib_dm_hdr dm_hdr;
+ u8 data[IB_MGMT_DEVICE_DATA];
+};
+
+/*
+ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
+ * Architecture Specification.
+ */
+struct ib_dm_iou_info {
+ __be16 change_id;
+ u8 max_controllers;
+ u8 op_rom;
+ u8 controller_list[128];
+};
+
+/*
+ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
+ * the InfiniBand Architecture Specification.
+ */
+struct ib_dm_ioc_profile {
+ __be64 guid;
+ __be32 vendor_id;
+ __be32 device_id;
+ __be16 device_version;
+ __be16 reserved1;
+ __be32 subsys_vendor_id;
+ __be32 subsys_device_id;
+ __be16 io_class;
+ __be16 io_subclass;
+ __be16 protocol;
+ __be16 protocol_version;
+ __be16 service_conn;
+ __be16 initiators_supported;
+ __be16 send_queue_depth;
+ u8 reserved2;
+ u8 rdma_read_depth;
+ __be32 send_size;
+ __be32 rdma_size;
+ u8 op_cap_mask;
+ u8 svc_cap_mask;
+ u8 num_svc_entries;
+ u8 reserved3[9];
+ u8 id_string[64];
+};
+
+struct ib_dm_svc_entry {
+ u8 name[40];
+ __be64 id;
+};
+
+/*
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
+ */
+struct ib_dm_svc_entries {
+ struct ib_dm_svc_entry service_entries[4];
+};
+
+#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644
index 0000000..2b73d43
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -0,0 +1,4070 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/kthread.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <scsi/scsi_tcq.h>
+#include <target/configfs_macros.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include "ib_srpt.h"
+
+/* Name of this kernel module. */
+#define DRV_NAME "ib_srpt"
+#define DRV_VERSION "2.0.0"
+#define DRV_RELDATE "2011-02-14"
+
+#define SRPT_ID_STRING "Linux SRP target"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME " " fmt
+
+MODULE_AUTHOR("Vu Pham and Bart Van Assche");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
+ "v" DRV_VERSION " (" DRV_RELDATE ")");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * Global Variables
+ */
+
+static u64 srpt_service_guid;
+static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
+static LIST_HEAD(srpt_dev_list); /* List of srpt_device structures. */
+
+static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+module_param(srp_max_req_size, int, 0444);
+MODULE_PARM_DESC(srp_max_req_size,
+ "Maximum size of SRP request messages in bytes.");
+
+static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
+module_param(srpt_srq_size, int, 0444);
+MODULE_PARM_DESC(srpt_srq_size,
+ "Shared receive queue (SRQ) size.");
+
+static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+{
+ return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+}
+module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+ 0444);
+MODULE_PARM_DESC(srpt_service_guid,
+ "Using this value for ioc_guid, id_ext, and cm_listen_id"
+ " instead of using the node_guid of the first HCA.");
+
+static struct ib_client srpt_client;
+static struct target_fabric_configfs *srpt_target;
+static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static int srpt_queue_status(struct se_cmd *cmd);
+
+/**
+ * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
+ */
+static inline
+enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
+{
+ switch (dir) {
+ case DMA_TO_DEVICE: return DMA_FROM_DEVICE;
+ case DMA_FROM_DEVICE: return DMA_TO_DEVICE;
+ default: return dir;
+ }
+}
+
+/**
+ * srpt_sdev_name() - Return the name associated with the HCA.
+ *
+ * Examples are ib0, ib1, ...
+ */
+static inline const char *srpt_sdev_name(struct srpt_device *sdev)
+{
+ return sdev->device->name;
+}
+
+static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
+{
+ unsigned long flags;
+ enum rdma_ch_state state;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ state = ch->state;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return state;
+}
+
+static enum rdma_ch_state
+srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
+{
+ unsigned long flags;
+ enum rdma_ch_state prev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev = ch->state;
+ ch->state = new_state;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return prev;
+}
+
+/**
+ * srpt_test_and_set_ch_state() - Test and set the channel state.
+ *
+ * Returns true if and only if the channel state has been set to the new state.
+ */
+static bool
+srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
+ enum rdma_ch_state new)
+{
+ unsigned long flags;
+ enum rdma_ch_state prev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev = ch->state;
+ if (prev == old)
+ ch->state = new;
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+ return prev == old;
+}
+
+/**
+ * srpt_event_handler() - Asynchronous IB event callback function.
+ *
+ * Callback function called by the InfiniBand core when an asynchronous IB
+ * event occurs. This callback may occur in interrupt context. See also
+ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
+ * Architecture Specification.
+ */
+static void srpt_event_handler(struct ib_event_handler *handler,
+ struct ib_event *event)
+{
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+
+ sdev = ib_get_client_data(event->device, &srpt_client);
+ if (!sdev || sdev->device != event->device)
+ return;
+
+ pr_debug("ASYNC event= %d on device= %s\n", event->event,
+ srpt_sdev_name(sdev));
+
+ switch (event->event) {
+ case IB_EVENT_PORT_ERR:
+ if (event->element.port_num <= sdev->device->phys_port_cnt) {
+ sport = &sdev->port[event->element.port_num - 1];
+ sport->lid = 0;
+ sport->sm_lid = 0;
+ }
+ break;
+ case IB_EVENT_PORT_ACTIVE:
+ case IB_EVENT_LID_CHANGE:
+ case IB_EVENT_PKEY_CHANGE:
+ case IB_EVENT_SM_CHANGE:
+ case IB_EVENT_CLIENT_REREGISTER:
+ /* Refresh port data asynchronously. */
+ if (event->element.port_num <= sdev->device->phys_port_cnt) {
+ sport = &sdev->port[event->element.port_num - 1];
+ if (!sport->lid && !sport->sm_lid)
+ schedule_work(&sport->work);
+ }
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB event %d\n",
+ event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_srq_event() - SRQ event callback function.
+ */
+static void srpt_srq_event(struct ib_event *event, void *ctx)
+{
+ printk(KERN_INFO "SRQ event %d\n", event->event);
+}
+
+/**
+ * srpt_qp_event() - QP event callback function.
+ */
+static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+{
+ pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
+ event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+
+ switch (event->event) {
+ case IB_EVENT_COMM_EST:
+ ib_cm_notify(ch->cm_id, event->event);
+ break;
+ case IB_EVENT_QP_LAST_WQE_REACHED:
+ if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
+ CH_RELEASING))
+ srpt_release_channel(ch);
+ else
+ pr_debug("%s: state %d - ignored LAST_WQE.\n",
+ ch->sess_name, srpt_get_ch_state(ch));
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB QP event %d\n",
+ event->event);
+ break;
+ }
+}
+
+/**
+ * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
+ *
+ * @slot: one-based slot number.
+ * @value: four-bit value.
+ *
+ * Copies the lowest four bits of value in element slot of the array of four
+ * bit elements called c_list (controller list). The index slot is one-based.
+ */
+static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
+{
+ u16 id;
+ u8 tmp;
+
+ id = (slot - 1) / 2;
+ if (slot & 0x1) {
+ tmp = c_list[id] & 0xf;
+ c_list[id] = (value << 4) | tmp;
+ } else {
+ tmp = c_list[id] & 0xf0;
+ c_list[id] = (value & 0xf) | tmp;
+ }
+}
+
+/**
+ * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ *
+ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
+ * Specification.
+ */
+static void srpt_get_class_port_info(struct ib_dm_mad *mad)
+{
+ struct ib_class_port_info *cif;
+
+ cif = (struct ib_class_port_info *)mad->data;
+ memset(cif, 0, sizeof *cif);
+ cif->base_version = 1;
+ cif->class_version = 1;
+ cif->resp_time_value = 20;
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ *
+ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.6 in the SRP r16a document.
+ */
+static void srpt_get_iou(struct ib_dm_mad *mad)
+{
+ struct ib_dm_iou_info *ioui;
+ u8 slot;
+ int i;
+
+ ioui = (struct ib_dm_iou_info *)mad->data;
+ ioui->change_id = __constant_cpu_to_be16(1);
+ ioui->max_controllers = 16;
+
+ /* set present for slot 1 and empty for the rest */
+ srpt_set_ioc(ioui->controller_list, 1, 1);
+ for (i = 1, slot = 2; i < 16; i++, slot++)
+ srpt_set_ioc(ioui->controller_list, slot, 0);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ *
+ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
+ * Architecture Specification. See also section B.7, table B.7 in the SRP
+ * r16a document.
+ */
+static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
+ struct ib_dm_mad *mad)
+{
+ struct srpt_device *sdev = sport->sdev;
+ struct ib_dm_ioc_profile *iocp;
+
+ iocp = (struct ib_dm_ioc_profile *)mad->data;
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ memset(iocp, 0, sizeof *iocp);
+ strcpy(iocp->id_string, SRPT_ID_STRING);
+ iocp->guid = cpu_to_be64(srpt_service_guid);
+ iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+ iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
+ iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
+ iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+ iocp->subsys_device_id = 0x0;
+ iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+ iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
+ iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
+ iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+ iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+ iocp->rdma_read_depth = 4;
+ iocp->send_size = cpu_to_be32(srp_max_req_size);
+ iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
+ 1U << 24));
+ iocp->num_svc_entries = 1;
+ iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
+ SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ *
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the SRP r16a document.
+ */
+static void srpt_get_svc_entries(u64 ioc_guid,
+ u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
+{
+ struct ib_dm_svc_entries *svc_entries;
+
+ WARN_ON(!ioc_guid);
+
+ if (!slot || slot > 16) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+ return;
+ }
+
+ if (slot > 2 || lo > hi || hi > 1) {
+ mad->mad_hdr.status
+ = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+ return;
+ }
+
+ svc_entries = (struct ib_dm_svc_entries *)mad->data;
+ memset(svc_entries, 0, sizeof *svc_entries);
+ svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
+ snprintf(svc_entries->service_entries[0].name,
+ sizeof(svc_entries->service_entries[0].name),
+ "%s%016llx",
+ SRP_SERVICE_NAME_PREFIX,
+ ioc_guid);
+
+ mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_mgmt_method_get() - Process a received management datagram.
+ * @sp: source port through which the MAD has been received.
+ * @rq_mad: received MAD.
+ * @rsp_mad: response MAD.
+ */
+static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
+ struct ib_dm_mad *rsp_mad)
+{
+ u16 attr_id;
+ u32 slot;
+ u8 hi, lo;
+
+ attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
+ switch (attr_id) {
+ case DM_ATTR_CLASS_PORT_INFO:
+ srpt_get_class_port_info(rsp_mad);
+ break;
+ case DM_ATTR_IOU_INFO:
+ srpt_get_iou(rsp_mad);
+ break;
+ case DM_ATTR_IOC_PROFILE:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ srpt_get_ioc(sp, slot, rsp_mad);
+ break;
+ case DM_ATTR_SVC_ENTRIES:
+ slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+ hi = (u8) ((slot >> 8) & 0xff);
+ lo = (u8) (slot & 0xff);
+ slot = (u16) ((slot >> 16) & 0xffff);
+ srpt_get_svc_entries(srpt_service_guid,
+ slot, hi, lo, rsp_mad);
+ break;
+ default:
+ rsp_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ }
+}
+
+/**
+ * srpt_mad_send_handler() - Post MAD-send callback function.
+ */
+static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_send_wc *mad_wc)
+{
+ ib_destroy_ah(mad_wc->send_buf->ah);
+ ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * srpt_mad_recv_handler() - MAD reception callback function.
+ */
+static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
+ struct ib_mad_recv_wc *mad_wc)
+{
+ struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
+ struct ib_ah *ah;
+ struct ib_mad_send_buf *rsp;
+ struct ib_dm_mad *dm_mad;
+
+ if (!mad_wc || !mad_wc->recv_buf.mad)
+ return;
+
+ ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+ mad_wc->recv_buf.grh, mad_agent->port_num);
+ if (IS_ERR(ah))
+ goto err;
+
+ BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
+
+ rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+ mad_wc->wc->pkey_index, 0,
+ IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
+ GFP_KERNEL);
+ if (IS_ERR(rsp))
+ goto err_rsp;
+
+ rsp->ah = ah;
+
+ dm_mad = rsp->mad;
+ memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+ dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+ dm_mad->mad_hdr.status = 0;
+
+ switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+ case IB_MGMT_METHOD_GET:
+ srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
+ break;
+ case IB_MGMT_METHOD_SET:
+ dm_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+ break;
+ default:
+ dm_mad->mad_hdr.status =
+ __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+ break;
+ }
+
+ if (!ib_post_send_mad(rsp, NULL)) {
+ ib_free_recv_mad(mad_wc);
+ /* will destroy_ah & free_send_mad in send completion */
+ return;
+ }
+
+ ib_free_send_mad(rsp);
+
+err_rsp:
+ ib_destroy_ah(ah);
+err:
+ ib_free_recv_mad(mad_wc);
+}
+
+/**
+ * srpt_refresh_port() - Configure a HCA port.
+ *
+ * Enable InfiniBand management datagram processing, update the cached sm_lid,
+ * lid and gid values, and register a callback function for processing MADs
+ * on the specified port.
+ *
+ * Note: It is safe to call this function more than once for the same port.
+ */
+static int srpt_refresh_port(struct srpt_port *sport)
+{
+ struct ib_mad_reg_req reg_req;
+ struct ib_port_modify port_modify;
+ struct ib_port_attr port_attr;
+ int ret;
+
+ memset(&port_modify, 0, sizeof port_modify);
+ port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ port_modify.clr_port_cap_mask = 0;
+
+ ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+ if (ret)
+ goto err_mod_port;
+
+ ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
+ if (ret)
+ goto err_query_port;
+
+ sport->sm_lid = port_attr.sm_lid;
+ sport->lid = port_attr.lid;
+
+ ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
+ if (ret)
+ goto err_query_port;
+
+ if (!sport->mad_agent) {
+ memset(&reg_req, 0, sizeof reg_req);
+ reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
+ reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
+ set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+ set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+ sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
+ sport->port,
+ IB_QPT_GSI,
+ &reg_req, 0,
+ srpt_mad_send_handler,
+ srpt_mad_recv_handler,
+ sport);
+ if (IS_ERR(sport->mad_agent)) {
+ ret = PTR_ERR(sport->mad_agent);
+ sport->mad_agent = NULL;
+ goto err_query_port;
+ }
+ }
+
+ return 0;
+
+err_query_port:
+
+ port_modify.set_port_cap_mask = 0;
+ port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+ ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+
+err_mod_port:
+
+ return ret;
+}
+
+/**
+ * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ *
+ * Note: It is safe to call this function more than once for the same device.
+ */
+static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+{
+ struct ib_port_modify port_modify = {
+ .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
+ };
+ struct srpt_port *sport;
+ int i;
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ WARN_ON(sport->port != i);
+ if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
+ printk(KERN_ERR "disabling MAD processing failed.\n");
+ if (sport->mad_agent) {
+ ib_unregister_mad_agent(sport->mad_agent);
+ sport->mad_agent = NULL;
+ }
+ }
+}
+
+/**
+ * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ */
+static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
+ int ioctx_size, int dma_size,
+ enum dma_data_direction dir)
+{
+ struct srpt_ioctx *ioctx;
+
+ ioctx = kmalloc(ioctx_size, GFP_KERNEL);
+ if (!ioctx)
+ goto err;
+
+ ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
+ if (!ioctx->buf)
+ goto err_free_ioctx;
+
+ ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
+ if (ib_dma_mapping_error(sdev->device, ioctx->dma))
+ goto err_free_buf;
+
+ return ioctx;
+
+err_free_buf:
+ kfree(ioctx->buf);
+err_free_ioctx:
+ kfree(ioctx);
+err:
+ return NULL;
+}
+
+/**
+ * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ */
+static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
+ int dma_size, enum dma_data_direction dir)
+{
+ if (!ioctx)
+ return;
+
+ ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
+ kfree(ioctx->buf);
+ kfree(ioctx);
+}
+
+/**
+ * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * @sdev: Device to allocate the I/O context ring for.
+ * @ring_size: Number of elements in the I/O context ring.
+ * @ioctx_size: I/O context size.
+ * @dma_size: DMA buffer size.
+ * @dir: DMA data direction.
+ */
+static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
+ int ring_size, int ioctx_size,
+ int dma_size, enum dma_data_direction dir)
+{
+ struct srpt_ioctx **ring;
+ int i;
+
+ WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
+ && ioctx_size != sizeof(struct srpt_send_ioctx));
+
+ ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
+ if (!ring)
+ goto out;
+ for (i = 0; i < ring_size; ++i) {
+ ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
+ if (!ring[i])
+ goto err;
+ ring[i]->index = i;
+ }
+ goto out;
+
+err:
+ while (--i >= 0)
+ srpt_free_ioctx(sdev, ring[i], dma_size, dir);
+ kfree(ring);
+ ring = NULL;
+out:
+ return ring;
+}
+
+/**
+ * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ */
+static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
+ struct srpt_device *sdev, int ring_size,
+ int dma_size, enum dma_data_direction dir)
+{
+ int i;
+
+ for (i = 0; i < ring_size; ++i)
+ srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
+ kfree(ioctx_ring);
+}
+
+/**
+ * srpt_get_cmd_state() - Get the state of a SCSI command.
+ */
+static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+ return state;
+}
+
+/**
+ * srpt_set_cmd_state() - Set the state of a SCSI command.
+ *
+ * Does not modify the state of aborted commands. Returns the previous command
+ * state.
+ */
+static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ previous = ioctx->state;
+ if (previous != SRPT_STATE_DONE)
+ ioctx->state = new;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ return previous;
+}
+
+/**
+ * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ *
+ * Returns true if and only if the previous command state was equal to 'old'.
+ */
+static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
+ enum srpt_command_state old,
+ enum srpt_command_state new)
+{
+ enum srpt_command_state previous;
+ unsigned long flags;
+
+ WARN_ON(!ioctx);
+ WARN_ON(old == SRPT_STATE_DONE);
+ WARN_ON(new == SRPT_STATE_NEW);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ previous = ioctx->state;
+ if (previous == old)
+ ioctx->state = new;
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+ return previous == old;
+}
+
+/**
+ * srpt_post_recv() - Post an IB receive request.
+ */
+static int srpt_post_recv(struct srpt_device *sdev,
+ struct srpt_recv_ioctx *ioctx)
+{
+ struct ib_sge list;
+ struct ib_recv_wr wr, *bad_wr;
+
+ BUG_ON(!sdev);
+ wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
+
+ list.addr = ioctx->ioctx.dma;
+ list.length = srp_max_req_size;
+ list.lkey = sdev->mr->lkey;
+
+ wr.next = NULL;
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+
+ return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+}
+
+/**
+ * srpt_post_send() - Post an IB send request.
+ *
+ * Returns zero upon success and a non-zero value upon failure.
+ */
+static int srpt_post_send(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, int len)
+{
+ struct ib_sge list;
+ struct ib_send_wr wr, *bad_wr;
+ struct srpt_device *sdev = ch->sport->sdev;
+ int ret;
+
+ atomic_inc(&ch->req_lim);
+
+ ret = -ENOMEM;
+ if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
+ printk(KERN_WARNING "IB send queue full (needed 1)\n");
+ goto out;
+ }
+
+ ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
+ DMA_TO_DEVICE);
+
+ list.addr = ioctx->ioctx.dma;
+ list.length = len;
+ list.lkey = sdev->mr->lkey;
+
+ wr.next = NULL;
+ wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
+ wr.sg_list = &list;
+ wr.num_sge = 1;
+ wr.opcode = IB_WR_SEND;
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
+
+out:
+ if (ret < 0) {
+ atomic_inc(&ch->sq_wr_avail);
+ atomic_dec(&ch->req_lim);
+ }
+ return ret;
+}
+
+/**
+ * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * @ioctx: Pointer to the I/O context associated with the request.
+ * @srp_cmd: Pointer to the SRP_CMD request data.
+ * @dir: Pointer to the variable to which the transfer direction will be
+ * written.
+ * @data_len: Pointer to the variable to which the total data length of all
+ * descriptors in the SRP_CMD request will be written.
+ *
+ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
+ *
+ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
+ * -ENOMEM when memory allocation fails and zero upon success.
+ */
+static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
+ struct srp_cmd *srp_cmd,
+ enum dma_data_direction *dir, u64 *data_len)
+{
+ struct srp_indirect_buf *idb;
+ struct srp_direct_buf *db;
+ unsigned add_cdb_offset;
+ int ret;
+
+ /*
+ * The pointer computations below will only be compiled correctly
+ * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+ * whether srp_cmd::add_data has been declared as a byte pointer.
+ */
+ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
+ && !__same_type(srp_cmd->add_data[0], (u8)0));
+
+ BUG_ON(!dir);
+ BUG_ON(!data_len);
+
+ ret = 0;
+ *data_len = 0;
+
+ /*
+ * The lower four bits of the buffer format field contain the DATA-IN
+ * buffer descriptor format, and the highest four bits contain the
+ * DATA-OUT buffer descriptor format.
+ */
+ *dir = DMA_NONE;
+ if (srp_cmd->buf_fmt & 0xf)
+ /* DATA-IN: transfer data from target to initiator (read). */
+ *dir = DMA_FROM_DEVICE;
+ else if (srp_cmd->buf_fmt >> 4)
+ /* DATA-OUT: transfer data from initiator to target (write). */
+ *dir = DMA_TO_DEVICE;
+
+ /*
+ * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+ * CDB LENGTH' field are reserved and the size in bytes of this field
+ * is four times the value specified in bits 3..7. Hence the "& ~3".
+ */
+ add_cdb_offset = srp_cmd->add_cdb_len & ~3;
+ if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+ ioctx->n_rbuf = 1;
+ ioctx->rbufs = &ioctx->single_rbuf;
+
+ db = (struct srp_direct_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+ memcpy(ioctx->rbufs, db, sizeof *db);
+ *data_len = be32_to_cpu(db->len);
+ } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+ ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+ idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+ + add_cdb_offset);
+
+ ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+
+ if (ioctx->n_rbuf >
+ (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
+ printk(KERN_ERR "received unsupported SRP_CMD request"
+ " type (%u out + %u in != %u / %zu)\n",
+ srp_cmd->data_out_desc_cnt,
+ srp_cmd->data_in_desc_cnt,
+ be32_to_cpu(idb->table_desc.len),
+ sizeof(*db));
+ ioctx->n_rbuf = 0;
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (ioctx->n_rbuf == 1)
+ ioctx->rbufs = &ioctx->single_rbuf;
+ else {
+ ioctx->rbufs =
+ kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+ if (!ioctx->rbufs) {
+ ioctx->n_rbuf = 0;
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+
+ db = idb->desc_list;
+ memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+ *data_len = be32_to_cpu(idb->len);
+ }
+out:
+ return ret;
+}
+
+/**
+ * srpt_init_ch_qp() - Initialize queue pair attributes.
+ *
+ * Initialized the attributes of queue pair 'qp' by allowing local write,
+ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
+ */
+static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr *attr;
+ int ret;
+
+ attr = kzalloc(sizeof *attr, GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ attr->qp_state = IB_QPS_INIT;
+ attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+ IB_ACCESS_REMOTE_WRITE;
+ attr->port_num = ch->sport->port;
+ attr->pkey_index = 0;
+
+ ret = ib_modify_qp(qp, attr,
+ IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
+ IB_QP_PKEY_INDEX);
+
+ kfree(attr);
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ qp_attr.qp_state = IB_QPS_RTR;
+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_dest_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+ struct ib_qp_attr qp_attr;
+ int attr_mask;
+ int ret;
+
+ qp_attr.qp_state = IB_QPS_RTS;
+ ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+ if (ret)
+ goto out;
+
+ qp_attr.max_rd_atomic = 4;
+
+ ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+ return ret;
+}
+
+/**
+ * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ */
+static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_attr qp_attr;
+
+ qp_attr.qp_state = IB_QPS_ERR;
+ return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
+}
+
+/**
+ * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
+ */
+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct scatterlist *sg;
+ enum dma_data_direction dir;
+
+ BUG_ON(!ch);
+ BUG_ON(!ioctx);
+ BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
+
+ while (ioctx->n_rdma)
+ kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
+
+ kfree(ioctx->rdma_ius);
+ ioctx->rdma_ius = NULL;
+
+ if (ioctx->mapped_sg_count) {
+ sg = ioctx->sg;
+ WARN_ON(!sg);
+ dir = ioctx->cmd.data_direction;
+ BUG_ON(dir == DMA_NONE);
+ ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
+ opposite_dma_dir(dir));
+ ioctx->mapped_sg_count = 0;
+ }
+}
+
+/**
+ * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
+ */
+static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct se_cmd *cmd;
+ struct scatterlist *sg, *sg_orig;
+ int sg_cnt;
+ enum dma_data_direction dir;
+ struct rdma_iu *riu;
+ struct srp_direct_buf *db;
+ dma_addr_t dma_addr;
+ struct ib_sge *sge;
+ u64 raddr;
+ u32 rsize;
+ u32 tsize;
+ u32 dma_len;
+ int count, nrdma;
+ int i, j, k;
+
+ BUG_ON(!ch);
+ BUG_ON(!ioctx);
+ cmd = &ioctx->cmd;
+ dir = cmd->data_direction;
+ BUG_ON(dir == DMA_NONE);
+
+ transport_do_task_sg_chain(cmd);
+ ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
+ ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
+
+ count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
+ opposite_dma_dir(dir));
+ if (unlikely(!count))
+ return -EAGAIN;
+
+ ioctx->mapped_sg_count = count;
+
+ if (ioctx->rdma_ius && ioctx->n_rdma_ius)
+ nrdma = ioctx->n_rdma_ius;
+ else {
+ nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
+ + ioctx->n_rbuf;
+
+ ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
+ if (!ioctx->rdma_ius)
+ goto free_mem;
+
+ ioctx->n_rdma_ius = nrdma;
+ }
+
+ db = ioctx->rbufs;
+ tsize = cmd->data_length;
+ dma_len = sg_dma_len(&sg[0]);
+ riu = ioctx->rdma_ius;
+
+ /*
+ * For each remote desc - calculate the #ib_sge.
+ * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
+ * each remote desc rdma_iu is required a rdma wr;
+ * else
+ * we need to allocate extra rdma_iu to carry extra #ib_sge in
+ * another rdma wr
+ */
+ for (i = 0, j = 0;
+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+ rsize = be32_to_cpu(db->len);
+ raddr = be64_to_cpu(db->va);
+ riu->raddr = raddr;
+ riu->rkey = be32_to_cpu(db->key);
+ riu->sge_cnt = 0;
+
+ /* calculate how many sge required for this remote_buf */
+ while (rsize > 0 && tsize > 0) {
+
+ if (rsize >= dma_len) {
+ tsize -= dma_len;
+ rsize -= dma_len;
+ raddr += dma_len;
+
+ if (tsize > 0) {
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+ dma_len = sg_dma_len(sg);
+ }
+ }
+ } else {
+ tsize -= rsize;
+ dma_len -= rsize;
+ rsize = 0;
+ }
+
+ ++riu->sge_cnt;
+
+ if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
+ ++ioctx->n_rdma;
+ riu->sge =
+ kmalloc(riu->sge_cnt * sizeof *riu->sge,
+ GFP_KERNEL);
+ if (!riu->sge)
+ goto free_mem;
+
+ ++riu;
+ riu->sge_cnt = 0;
+ riu->raddr = raddr;
+ riu->rkey = be32_to_cpu(db->key);
+ }
+ }
+
+ ++ioctx->n_rdma;
+ riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
+ GFP_KERNEL);
+ if (!riu->sge)
+ goto free_mem;
+ }
+
+ db = ioctx->rbufs;
+ tsize = cmd->data_length;
+ riu = ioctx->rdma_ius;
+ sg = sg_orig;
+ dma_len = sg_dma_len(&sg[0]);
+ dma_addr = sg_dma_address(&sg[0]);
+
+ /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
+ for (i = 0, j = 0;
+ j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+ rsize = be32_to_cpu(db->len);
+ sge = riu->sge;
+ k = 0;
+
+ while (rsize > 0 && tsize > 0) {
+ sge->addr = dma_addr;
+ sge->lkey = ch->sport->sdev->mr->lkey;
+
+ if (rsize >= dma_len) {
+ sge->length =
+ (tsize < dma_len) ? tsize : dma_len;
+ tsize -= dma_len;
+ rsize -= dma_len;
+
+ if (tsize > 0) {
+ ++j;
+ if (j < count) {
+ sg = sg_next(sg);
+ dma_len = sg_dma_len(sg);
+ dma_addr = sg_dma_address(sg);
+ }
+ }
+ } else {
+ sge->length = (tsize < rsize) ? tsize : rsize;
+ tsize -= rsize;
+ dma_len -= rsize;
+ dma_addr += rsize;
+ rsize = 0;
+ }
+
+ ++k;
+ if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
+ ++riu;
+ sge = riu->sge;
+ k = 0;
+ } else if (rsize > 0 && tsize > 0)
+ ++sge;
+ }
+ }
+
+ return 0;
+
+free_mem:
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+
+ return -ENOMEM;
+}
+
+/**
+ * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ */
+static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
+{
+ struct srpt_send_ioctx *ioctx;
+ unsigned long flags;
+
+ BUG_ON(!ch);
+
+ ioctx = NULL;
+ spin_lock_irqsave(&ch->spinlock, flags);
+ if (!list_empty(&ch->free_list)) {
+ ioctx = list_first_entry(&ch->free_list,
+ struct srpt_send_ioctx, free_list);
+ list_del(&ioctx->free_list);
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ if (!ioctx)
+ return ioctx;
+
+ BUG_ON(ioctx->ch != ch);
+ kref_init(&ioctx->kref);
+ spin_lock_init(&ioctx->spinlock);
+ ioctx->state = SRPT_STATE_NEW;
+ ioctx->n_rbuf = 0;
+ ioctx->rbufs = NULL;
+ ioctx->n_rdma = 0;
+ ioctx->n_rdma_ius = 0;
+ ioctx->rdma_ius = NULL;
+ ioctx->mapped_sg_count = 0;
+ init_completion(&ioctx->tx_done);
+ ioctx->queue_status_only = false;
+ /*
+ * transport_init_se_cmd() does not initialize all fields, so do it
+ * here.
+ */
+ memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+ memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+
+ return ioctx;
+}
+
+/**
+ * srpt_put_send_ioctx() - Free up resources.
+ */
+static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
+{
+ struct srpt_rdma_ch *ch;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
+
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+ transport_generic_free_cmd(&ioctx->cmd, 0);
+
+ if (ioctx->n_rbuf > 1) {
+ kfree(ioctx->rbufs);
+ ioctx->rbufs = NULL;
+ ioctx->n_rbuf = 0;
+ }
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ list_add(&ioctx->free_list, &ch->free_list);
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+}
+
+static void srpt_put_send_ioctx_kref(struct kref *kref)
+{
+ srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
+}
+
+/**
+ * srpt_abort_cmd() - Abort a SCSI command.
+ * @ioctx: I/O context associated with the SCSI command.
+ * @context: Preferred execution context.
+ */
+static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+ unsigned long flags;
+
+ BUG_ON(!ioctx);
+
+ /*
+ * If the command is in a state where the target core is waiting for
+ * the ib_srpt driver, change the state to the next state. Changing
+ * the state of the command from SRPT_STATE_NEED_DATA to
+ * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
+ * function a second time.
+ */
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEED_DATA:
+ ioctx->state = SRPT_STATE_DATA_IN;
+ break;
+ case SRPT_STATE_DATA_IN:
+ case SRPT_STATE_CMD_RSP_SENT:
+ case SRPT_STATE_MGMT_RSP_SENT:
+ ioctx->state = SRPT_STATE_DONE;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ if (state == SRPT_STATE_DONE)
+ goto out;
+
+ pr_debug("Aborting cmd with state %d and tag %lld\n", state,
+ ioctx->tag);
+
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ case SRPT_STATE_MGMT:
+ /*
+ * Do nothing - defer abort processing until
+ * srpt_queue_response() is invoked.
+ */
+ WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
+ break;
+ case SRPT_STATE_NEED_DATA:
+ /* DMA_TO_DEVICE (write) - RDMA read error. */
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ transport_generic_handle_data(&ioctx->cmd);
+ break;
+ case SRPT_STATE_CMD_RSP_SENT:
+ /*
+ * SRP_RSP sending failed or the SRP_RSP send completion has
+ * not been received in time.
+ */
+ srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ break;
+ case SRPT_STATE_MGMT_RSP_SENT:
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ break;
+ default:
+ WARN_ON("ERROR: unexpected command state");
+ break;
+ }
+
+out:
+ return state;
+}
+
+/**
+ * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
+ */
+static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
+{
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state state;
+ struct se_cmd *cmd;
+ u32 index;
+
+ atomic_inc(&ch->sq_wr_avail);
+
+ index = idx_from_wr_id(wr_id);
+ ioctx = ch->ioctx_ring[index];
+ state = srpt_get_cmd_state(ioctx);
+ cmd = &ioctx->cmd;
+
+ WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+ && state != SRPT_STATE_MGMT_RSP_SENT
+ && state != SRPT_STATE_NEED_DATA
+ && state != SRPT_STATE_DONE);
+
+ /* If SRP_RSP sending failed, undo the ch->req_lim change. */
+ if (state == SRPT_STATE_CMD_RSP_SENT
+ || state == SRPT_STATE_MGMT_RSP_SENT)
+ atomic_dec(&ch->req_lim);
+
+ srpt_abort_cmd(ioctx);
+}
+
+/**
+ * srpt_handle_send_comp() - Process an IB send completion notification.
+ */
+static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ enum srpt_command_state state;
+
+ atomic_inc(&ch->sq_wr_avail);
+
+ state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+
+ if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+ && state != SRPT_STATE_MGMT_RSP_SENT
+ && state != SRPT_STATE_DONE))
+ pr_debug("state = %d\n", state);
+
+ if (state != SRPT_STATE_DONE)
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ else
+ printk(KERN_ERR "IB completion has been received too late for"
+ " wr_id = %u.\n", ioctx->ioctx.index);
+}
+
+/**
+ * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
+ *
+ * Note: transport_generic_handle_data() is asynchronous so unmapping the
+ * data that has been transferred via IB RDMA must be postponed until the
+ * check_stop_free() callback.
+ */
+static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ enum srpt_opcode opcode)
+{
+ WARN_ON(ioctx->n_rdma <= 0);
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+
+ if (opcode == SRPT_RDMA_READ_LAST) {
+ if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
+ SRPT_STATE_DATA_IN))
+ transport_generic_handle_data(&ioctx->cmd);
+ else
+ printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
+ __LINE__, srpt_get_cmd_state(ioctx));
+ } else if (opcode == SRPT_RDMA_ABORT) {
+ ioctx->rdma_aborted = true;
+ } else {
+ WARN(true, "unexpected opcode %d\n", opcode);
+ }
+}
+
+/**
+ * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
+ */
+static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ enum srpt_opcode opcode)
+{
+ struct se_cmd *cmd;
+ enum srpt_command_state state;
+
+ cmd = &ioctx->cmd;
+ state = srpt_get_cmd_state(ioctx);
+ switch (opcode) {
+ case SRPT_RDMA_READ_LAST:
+ if (ioctx->n_rdma <= 0) {
+ printk(KERN_ERR "Received invalid RDMA read"
+ " error completion with idx %d\n",
+ ioctx->ioctx.index);
+ break;
+ }
+ atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+ if (state == SRPT_STATE_NEED_DATA)
+ srpt_abort_cmd(ioctx);
+ else
+ printk(KERN_ERR "%s[%d]: wrong state = %d\n",
+ __func__, __LINE__, state);
+ break;
+ case SRPT_RDMA_WRITE_LAST:
+ atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+ break;
+ default:
+ printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
+ __LINE__, opcode);
+ break;
+ }
+}
+
+/**
+ * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context associated with the SRP_CMD request. The response will
+ * be built in the buffer ioctx->buf points at and hence this function will
+ * overwrite the request data.
+ * @tag: tag of the request for which this response is being generated.
+ * @status: value for the STATUS field of the SRP_RSP information unit.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response. See also SPC-2 for more information about sense data.
+ */
+static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx, u64 tag,
+ int status)
+{
+ struct srp_rsp *srp_rsp;
+ const u8 *sense_data;
+ int sense_data_len, max_sense_len;
+
+ /*
+ * The lowest bit of all SAM-3 status codes is zero (see also
+ * paragraph 5.3 in SAM-3).
+ */
+ WARN_ON(status & 1);
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+
+ sense_data = ioctx->sense_data;
+ sense_data_len = ioctx->cmd.scsi_sense_length;
+ WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
+
+ memset(srp_rsp, 0, sizeof *srp_rsp);
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta =
+ __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+ srp_rsp->status = status;
+
+ if (sense_data_len) {
+ BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
+ max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
+ if (sense_data_len > max_sense_len) {
+ printk(KERN_WARNING "truncated sense data from %d to %d"
+ " bytes\n", sense_data_len, max_sense_len);
+ sense_data_len = max_sense_len;
+ }
+
+ srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+ srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
+ memcpy(srp_rsp + 1, sense_data, sense_data_len);
+ }
+
+ return sizeof(*srp_rsp) + sense_data_len;
+}
+
+/**
+ * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context in which the SRP_RSP response will be built.
+ * @rsp_code: RSP_CODE that will be stored in the response.
+ * @tag: Tag of the request for which this response is being generated.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response.
+ */
+static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx,
+ u8 rsp_code, u64 tag)
+{
+ struct srp_rsp *srp_rsp;
+ int resp_data_len;
+ int resp_len;
+
+ resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
+ resp_len = sizeof(*srp_rsp) + resp_data_len;
+
+ srp_rsp = ioctx->ioctx.buf;
+ BUG_ON(!srp_rsp);
+ memset(srp_rsp, 0, sizeof *srp_rsp);
+
+ srp_rsp->opcode = SRP_RSP;
+ srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
+ + atomic_xchg(&ch->req_lim_delta, 0));
+ srp_rsp->tag = tag;
+
+ if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
+ srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+ srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+ srp_rsp->data[3] = rsp_code;
+ }
+
+ return resp_len;
+}
+
+#define NO_SUCH_LUN ((uint64_t)-1LL)
+
+/*
+ * SCSI LUN addressing method. See also SAM-2 and the section about
+ * eight byte LUNs.
+ */
+enum scsi_lun_addr_method {
+ SCSI_LUN_ADDR_METHOD_PERIPHERAL = 0,
+ SCSI_LUN_ADDR_METHOD_FLAT = 1,
+ SCSI_LUN_ADDR_METHOD_LUN = 2,
+ SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
+};
+
+/*
+ * srpt_unpack_lun() - Convert from network LUN to linear LUN.
+ *
+ * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
+ * order (big endian) to a linear LUN. Supports three LUN addressing methods:
+ * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
+ */
+static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
+{
+ uint64_t res = NO_SUCH_LUN;
+ int addressing_method;
+
+ if (unlikely(len < 2)) {
+ printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
+ "more", len);
+ goto out;
+ }
+
+ switch (len) {
+ case 8:
+ if ((*((__be64 *)lun) &
+ __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+ goto out_err;
+ break;
+ case 4:
+ if (*((__be16 *)&lun[2]) != 0)
+ goto out_err;
+ break;
+ case 6:
+ if (*((__be32 *)&lun[2]) != 0)
+ goto out_err;
+ break;
+ case 2:
+ break;
+ default:
+ goto out_err;
+ }
+
+ addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
+ switch (addressing_method) {
+ case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
+ case SCSI_LUN_ADDR_METHOD_FLAT:
+ case SCSI_LUN_ADDR_METHOD_LUN:
+ res = *(lun + 1) | (((*lun) & 0x3f) << 8);
+ break;
+
+ case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
+ default:
+ printk(KERN_ERR "Unimplemented LUN addressing method %u",
+ addressing_method);
+ break;
+ }
+
+out:
+ return res;
+
+out_err:
+ printk(KERN_ERR "Support for multi-level LUNs has not yet been"
+ " implemented");
+ goto out;
+}
+
+static int srpt_check_stop_free(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+}
+
+/**
+ * srpt_handle_cmd() - Process SRP_CMD.
+ */
+static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct se_cmd *cmd;
+ struct srp_cmd *srp_cmd;
+ uint64_t unpacked_lun;
+ u64 data_len;
+ enum dma_data_direction dir;
+ int ret;
+
+ BUG_ON(!send_ioctx);
+
+ srp_cmd = recv_ioctx->ioctx.buf;
+ kref_get(&send_ioctx->kref);
+ cmd = &send_ioctx->cmd;
+ send_ioctx->tag = srp_cmd->tag;
+
+ switch (srp_cmd->task_attr) {
+ case SRP_CMD_SIMPLE_Q:
+ cmd->sam_task_attr = MSG_SIMPLE_TAG;
+ break;
+ case SRP_CMD_ORDERED_Q:
+ default:
+ cmd->sam_task_attr = MSG_ORDERED_TAG;
+ break;
+ case SRP_CMD_HEAD_OF_Q:
+ cmd->sam_task_attr = MSG_HEAD_TAG;
+ break;
+ case SRP_CMD_ACA:
+ cmd->sam_task_attr = MSG_ACA_TAG;
+ break;
+ }
+
+ ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
+ if (ret) {
+ printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
+ srp_cmd->tag);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ goto send_sense;
+ }
+
+ cmd->data_length = data_len;
+ cmd->data_direction = dir;
+ unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
+ sizeof(srp_cmd->lun));
+ if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
+ goto send_sense;
+ ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
+ if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+ srpt_queue_status(cmd);
+ else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
+ goto send_sense;
+ else
+ WARN_ON_ONCE(ret);
+
+ transport_handle_cdb_direct(cmd);
+ return 0;
+
+send_sense:
+ transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
+ 0);
+ return -1;
+}
+
+/**
+ * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+ * @ch: RDMA channel of the task management request.
+ * @fn: Task management function to perform.
+ * @req_tag: Tag of the SRP task management request.
+ * @mgmt_ioctx: I/O context of the task management request.
+ *
+ * Returns zero if the target core will process the task management
+ * request asynchronously.
+ *
+ * Note: It is assumed that the initiator serializes tag-based task management
+ * requests.
+ */
+static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+{
+ struct srpt_device *sdev;
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *target;
+ int ret, i;
+
+ ret = -EINVAL;
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+ BUG_ON(!ch->sport);
+ sdev = ch->sport->sdev;
+ BUG_ON(!sdev);
+ spin_lock_irq(&sdev->spinlock);
+ for (i = 0; i < ch->rq_size; ++i) {
+ target = ch->ioctx_ring[i];
+ if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+ target->tag == tag &&
+ srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+ ret = 0;
+ /* now let the target core abort &target->cmd; */
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+ return ret;
+}
+
+static int srp_tmr_to_tcm(int fn)
+{
+ switch (fn) {
+ case SRP_TSK_ABORT_TASK:
+ return TMR_ABORT_TASK;
+ case SRP_TSK_ABORT_TASK_SET:
+ return TMR_ABORT_TASK_SET;
+ case SRP_TSK_CLEAR_TASK_SET:
+ return TMR_CLEAR_TASK_SET;
+ case SRP_TSK_LUN_RESET:
+ return TMR_LUN_RESET;
+ case SRP_TSK_CLEAR_ACA:
+ return TMR_CLEAR_ACA;
+ default:
+ return -1;
+ }
+}
+
+/**
+ * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ *
+ * Returns 0 if and only if the request will be processed by the target core.
+ *
+ * For more information about SRP_TSK_MGMT information units, see also section
+ * 6.7 in the SRP r16a document.
+ */
+static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct srp_tsk_mgmt *srp_tsk;
+ struct se_cmd *cmd;
+ uint64_t unpacked_lun;
+ int tcm_tmr;
+ int res;
+
+ BUG_ON(!send_ioctx);
+
+ srp_tsk = recv_ioctx->ioctx.buf;
+ cmd = &send_ioctx->cmd;
+
+ pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
+ " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
+ srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+
+ srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+ send_ioctx->tag = srp_tsk->tag;
+ tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+ if (tcm_tmr < 0) {
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response =
+ TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+ goto process_tmr;
+ }
+ cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
+ if (!cmd->se_tmr_req) {
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+ goto process_tmr;
+ }
+
+ unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+ sizeof(srp_tsk->lun));
+ res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
+ if (res) {
+ pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
+ send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+ goto process_tmr;
+ }
+
+ if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
+ srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+
+process_tmr:
+ kref_get(&send_ioctx->kref);
+ if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+ transport_generic_handle_tmr(&send_ioctx->cmd);
+ else
+ transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+
+}
+
+/**
+ * srpt_handle_new_iu() - Process a newly received information unit.
+ * @ch: RDMA channel through which the information unit has been received.
+ * @ioctx: SRPT I/O context associated with the information unit.
+ */
+static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
+ struct srpt_recv_ioctx *recv_ioctx,
+ struct srpt_send_ioctx *send_ioctx)
+{
+ struct srp_cmd *srp_cmd;
+ enum rdma_ch_state ch_state;
+
+ BUG_ON(!ch);
+ BUG_ON(!recv_ioctx);
+
+ ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
+ recv_ioctx->ioctx.dma, srp_max_req_size,
+ DMA_FROM_DEVICE);
+
+ ch_state = srpt_get_ch_state(ch);
+ if (unlikely(ch_state == CH_CONNECTING)) {
+ list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+ goto out;
+ }
+
+ if (unlikely(ch_state != CH_LIVE))
+ goto out;
+
+ srp_cmd = recv_ioctx->ioctx.buf;
+ if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
+ if (!send_ioctx)
+ send_ioctx = srpt_get_send_ioctx(ch);
+ if (unlikely(!send_ioctx)) {
+ list_add_tail(&recv_ioctx->wait_list,
+ &ch->cmd_wait_list);
+ goto out;
+ }
+ }
+
+ transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
+ 0, DMA_NONE, MSG_SIMPLE_TAG,
+ send_ioctx->sense_data);
+
+ switch (srp_cmd->opcode) {
+ case SRP_CMD:
+ srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_TSK_MGMT:
+ srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
+ break;
+ case SRP_I_LOGOUT:
+ printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
+ break;
+ case SRP_CRED_RSP:
+ pr_debug("received SRP_CRED_RSP\n");
+ break;
+ case SRP_AER_RSP:
+ pr_debug("received SRP_AER_RSP\n");
+ break;
+ case SRP_RSP:
+ printk(KERN_ERR "Received SRP_RSP\n");
+ break;
+ default:
+ printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
+ srp_cmd->opcode);
+ break;
+ }
+
+ srpt_post_recv(ch->sport->sdev, recv_ioctx);
+out:
+ return;
+}
+
+static void srpt_process_rcv_completion(struct ib_cq *cq,
+ struct srpt_rdma_ch *ch,
+ struct ib_wc *wc)
+{
+ struct srpt_device *sdev = ch->sport->sdev;
+ struct srpt_recv_ioctx *ioctx;
+ u32 index;
+
+ index = idx_from_wr_id(wc->wr_id);
+ if (wc->status == IB_WC_SUCCESS) {
+ int req_lim;
+
+ req_lim = atomic_dec_return(&ch->req_lim);
+ if (unlikely(req_lim < 0))
+ printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
+ ioctx = sdev->ioctx_ring[index];
+ srpt_handle_new_iu(ch, ioctx, NULL);
+ } else {
+ printk(KERN_INFO "receiving failed for idx %u with status %d\n",
+ index, wc->status);
+ }
+}
+
+/**
+ * srpt_process_send_completion() - Process an IB send completion.
+ *
+ * Note: Although this has not yet been observed during tests, at least in
+ * theory it is possible that the srpt_get_send_ioctx() call invoked by
+ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
+ * value in each response is set to one, and it is possible that this response
+ * makes the initiator send a new request before the send completion for that
+ * response has been processed. This could e.g. happen if the call to
+ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
+ * if IB retransmission causes generation of the send completion to be
+ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
+ * are queued on cmd_wait_list. The code below processes these delayed
+ * requests one at a time.
+ */
+static void srpt_process_send_completion(struct ib_cq *cq,
+ struct srpt_rdma_ch *ch,
+ struct ib_wc *wc)
+{
+ struct srpt_send_ioctx *send_ioctx;
+ uint32_t index;
+ enum srpt_opcode opcode;
+
+ index = idx_from_wr_id(wc->wr_id);
+ opcode = opcode_from_wr_id(wc->wr_id);
+ send_ioctx = ch->ioctx_ring[index];
+ if (wc->status == IB_WC_SUCCESS) {
+ if (opcode == SRPT_SEND)
+ srpt_handle_send_comp(ch, send_ioctx);
+ else {
+ WARN_ON(opcode != SRPT_RDMA_ABORT &&
+ wc->opcode != IB_WC_RDMA_READ);
+ srpt_handle_rdma_comp(ch, send_ioctx, opcode);
+ }
+ } else {
+ if (opcode == SRPT_SEND) {
+ printk(KERN_INFO "sending response for idx %u failed"
+ " with status %d\n", index, wc->status);
+ srpt_handle_send_err_comp(ch, wc->wr_id);
+ } else if (opcode != SRPT_RDMA_MID) {
+ printk(KERN_INFO "RDMA t %d for idx %u failed with"
+ " status %d", opcode, index, wc->status);
+ srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
+ }
+ }
+
+ while (unlikely(opcode == SRPT_SEND
+ && !list_empty(&ch->cmd_wait_list)
+ && srpt_get_ch_state(ch) == CH_LIVE
+ && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
+ struct srpt_recv_ioctx *recv_ioctx;
+
+ recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+ struct srpt_recv_ioctx,
+ wait_list);
+ list_del(&recv_ioctx->wait_list);
+ srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
+ }
+}
+
+static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
+{
+ struct ib_wc *const wc = ch->wc;
+ int i, n;
+
+ WARN_ON(cq != ch->cq);
+
+ ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+ while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
+ for (i = 0; i < n; i++) {
+ if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
+ srpt_process_rcv_completion(cq, ch, &wc[i]);
+ else
+ srpt_process_send_completion(cq, ch, &wc[i]);
+ }
+ }
+}
+
+/**
+ * srpt_completion() - IB completion queue callback function.
+ *
+ * Notes:
+ * - It is guaranteed that a completion handler will never be invoked
+ * concurrently on two different CPUs for the same completion queue. See also
+ * Documentation/infiniband/core_locking.txt and the implementation of
+ * handle_edge_irq() in kernel/irq/chip.c.
+ * - When threaded IRQs are enabled, completion handlers are invoked in thread
+ * context instead of interrupt context.
+ */
+static void srpt_completion(struct ib_cq *cq, void *ctx)
+{
+ struct srpt_rdma_ch *ch = ctx;
+
+ wake_up_interruptible(&ch->wait_queue);
+}
+
+static int srpt_compl_thread(void *arg)
+{
+ struct srpt_rdma_ch *ch;
+
+ /* Hibernation / freezing of the SRPT kernel thread is not supported. */
+ current->flags |= PF_NOFREEZE;
+
+ ch = arg;
+ BUG_ON(!ch);
+ printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
+ ch->sess_name, ch->thread->comm, current->pid);
+ while (!kthread_should_stop()) {
+ wait_event_interruptible(ch->wait_queue,
+ (srpt_process_completion(ch->cq, ch),
+ kthread_should_stop()));
+ }
+ printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
+ ch->sess_name, ch->thread->comm, current->pid);
+ return 0;
+}
+
+/**
+ * srpt_create_ch_ib() - Create receive and send completion queues.
+ */
+static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+{
+ struct ib_qp_init_attr *qp_init;
+ struct srpt_port *sport = ch->sport;
+ struct srpt_device *sdev = sport->sdev;
+ u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+ int ret;
+
+ WARN_ON(ch->rq_size < 1);
+
+ ret = -ENOMEM;
+ qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+ if (!qp_init)
+ goto out;
+
+ ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+ ch->rq_size + srp_sq_size, 0);
+ if (IS_ERR(ch->cq)) {
+ ret = PTR_ERR(ch->cq);
+ printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
+ ch->rq_size + srp_sq_size, ret);
+ goto out;
+ }
+
+ qp_init->qp_context = (void *)ch;
+ qp_init->event_handler
+ = (void(*)(struct ib_event *, void*))srpt_qp_event;
+ qp_init->send_cq = ch->cq;
+ qp_init->recv_cq = ch->cq;
+ qp_init->srq = sdev->srq;
+ qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_init->qp_type = IB_QPT_RC;
+ qp_init->cap.max_send_wr = srp_sq_size;
+ qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+
+ ch->qp = ib_create_qp(sdev->pd, qp_init);
+ if (IS_ERR(ch->qp)) {
+ ret = PTR_ERR(ch->qp);
+ printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
+
+ atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
+
+ pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+ __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
+ qp_init->cap.max_send_wr, ch->cm_id);
+
+ ret = srpt_init_ch_qp(ch, ch->qp);
+ if (ret)
+ goto err_destroy_qp;
+
+ init_waitqueue_head(&ch->wait_queue);
+
+ pr_debug("creating thread for session %s\n", ch->sess_name);
+
+ ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
+ if (IS_ERR(ch->thread)) {
+ printk(KERN_ERR "failed to create kernel thread %ld\n",
+ PTR_ERR(ch->thread));
+ ch->thread = NULL;
+ goto err_destroy_qp;
+ }
+
+out:
+ kfree(qp_init);
+ return ret;
+
+err_destroy_qp:
+ ib_destroy_qp(ch->qp);
+err_destroy_cq:
+ ib_destroy_cq(ch->cq);
+ goto out;
+}
+
+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
+{
+ if (ch->thread)
+ kthread_stop(ch->thread);
+
+ ib_destroy_qp(ch->qp);
+ ib_destroy_cq(ch->cq);
+}
+
+/**
+ * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ *
+ * Reset the QP and make sure all resources associated with the channel will
+ * be deallocated at an appropriate time.
+ *
+ * Note: The caller must hold ch->sport->sdev->spinlock.
+ */
+static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
+ enum rdma_ch_state prev_state;
+ unsigned long flags;
+
+ sdev = ch->sport->sdev;
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ prev_state = ch->state;
+ switch (prev_state) {
+ case CH_CONNECTING:
+ case CH_LIVE:
+ ch->state = CH_DISCONNECTING;
+ break;
+ default:
+ break;
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ switch (prev_state) {
+ case CH_CONNECTING:
+ ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
+ NULL, 0);
+ /* fall through */
+ case CH_LIVE:
+ if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
+ printk(KERN_ERR "sending CM DREQ failed.\n");
+ break;
+ case CH_DISCONNECTING:
+ break;
+ case CH_DRAINING:
+ case CH_RELEASING:
+ break;
+ }
+}
+
+/**
+ * srpt_close_ch() - Close an RDMA channel.
+ */
+static void srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+ struct srpt_device *sdev;
+
+ sdev = ch->sport->sdev;
+ spin_lock_irq(&sdev->spinlock);
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+}
+
+/**
+ * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
+ * @cm_id: Pointer to the CM ID of the channel to be drained.
+ *
+ * Note: Must be called from inside srpt_cm_handler to avoid a race between
+ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
+ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
+ * waits until all target sessions for the associated IB device have been
+ * unregistered and target session registration involves a call to
+ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
+ * this function has finished).
+ */
+static void srpt_drain_channel(struct ib_cm_id *cm_id)
+{
+ struct srpt_device *sdev;
+ struct srpt_rdma_ch *ch;
+ int ret;
+ bool do_reset = false;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ sdev = cm_id->context;
+ BUG_ON(!sdev);
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->cm_id == cm_id) {
+ do_reset = srpt_test_and_set_ch_state(ch,
+ CH_CONNECTING, CH_DRAINING) ||
+ srpt_test_and_set_ch_state(ch,
+ CH_LIVE, CH_DRAINING) ||
+ srpt_test_and_set_ch_state(ch,
+ CH_DISCONNECTING, CH_DRAINING);
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+
+ if (do_reset) {
+ ret = srpt_ch_qp_err(ch);
+ if (ret < 0)
+ printk(KERN_ERR "Setting queue pair in error state"
+ " failed: %d\n", ret);
+ }
+}
+
+/**
+ * srpt_find_channel() - Look up an RDMA channel.
+ * @cm_id: Pointer to the CM ID of the channel to be looked up.
+ *
+ * Return NULL if no matching RDMA channel has been found.
+ */
+static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
+ struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ bool found;
+
+ WARN_ON_ONCE(irqs_disabled());
+ BUG_ON(!sdev);
+
+ found = false;
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry(ch, &sdev->rch_list, list) {
+ if (ch->cm_id == cm_id) {
+ found = true;
+ break;
+ }
+ }
+ spin_unlock_irq(&sdev->spinlock);
+
+ return found ? ch : NULL;
+}
+
+/**
+ * srpt_release_channel() - Release channel resources.
+ *
+ * Schedules the actual release because:
+ * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
+ * trigger a deadlock.
+ * - It is not safe to call TCM transport_* functions from interrupt context.
+ */
+static void srpt_release_channel(struct srpt_rdma_ch *ch)
+{
+ schedule_work(&ch->release_work);
+}
+
+static void srpt_release_channel_work(struct work_struct *w)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_device *sdev;
+
+ ch = container_of(w, struct srpt_rdma_ch, release_work);
+ pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
+ ch->release_done);
+
+ sdev = ch->sport->sdev;
+ BUG_ON(!sdev);
+
+ transport_deregister_session_configfs(ch->sess);
+ transport_deregister_session(ch->sess);
+ ch->sess = NULL;
+
+ srpt_destroy_ch_ib(ch);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_size, DMA_TO_DEVICE);
+
+ spin_lock_irq(&sdev->spinlock);
+ list_del(&ch->list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ ib_destroy_cm_id(ch->cm_id);
+
+ if (ch->release_done)
+ complete(ch->release_done);
+
+ wake_up(&sdev->ch_releaseQ);
+
+ kfree(ch);
+}
+
+static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
+ u8 i_port_id[16])
+{
+ struct srpt_node_acl *nacl;
+
+ list_for_each_entry(nacl, &sport->port_acl_list, list)
+ if (memcmp(nacl->i_port_id, i_port_id,
+ sizeof(nacl->i_port_id)) == 0)
+ return nacl;
+
+ return NULL;
+}
+
+static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
+ u8 i_port_id[16])
+{
+ struct srpt_node_acl *nacl;
+
+ spin_lock_irq(&sport->port_acl_lock);
+ nacl = __srpt_lookup_acl(sport, i_port_id);
+ spin_unlock_irq(&sport->port_acl_lock);
+
+ return nacl;
+}
+
+/**
+ * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ *
+ * Ownership of the cm_id is transferred to the target session if this
+ * functions returns zero. Otherwise the caller remains the owner of cm_id.
+ */
+static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
+ struct ib_cm_req_event_param *param,
+ void *private_data)
+{
+ struct srpt_device *sdev = cm_id->context;
+ struct srpt_port *sport = &sdev->port[param->port - 1];
+ struct srp_login_req *req;
+ struct srp_login_rsp *rsp;
+ struct srp_login_rej *rej;
+ struct ib_cm_rep_param *rep_param;
+ struct srpt_rdma_ch *ch, *tmp_ch;
+ struct srpt_node_acl *nacl;
+ u32 it_iu_len;
+ int i;
+ int ret = 0;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ if (WARN_ON(!sdev || !private_data))
+ return -EINVAL;
+
+ req = (struct srp_login_req *)private_data;
+
+ it_iu_len = be32_to_cpu(req->req_it_iu_len);
+
+ printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
+ " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
+ " (guid=0x%llx:0x%llx)\n",
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
+ be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
+ it_iu_len,
+ param->port,
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
+ be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+
+ rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
+ rej = kzalloc(sizeof *rej, GFP_KERNEL);
+ rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+
+ if (!rsp || !rej || !rep_param) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+ ret = -EINVAL;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
+ " length (%d bytes) is out of range (%d .. %d)\n",
+ it_iu_len, 64, srp_max_req_size);
+ goto reject;
+ }
+
+ if (!sport->enabled) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ ret = -EINVAL;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
+ " has not yet been enabled\n");
+ goto reject;
+ }
+
+ if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+ spin_lock_irq(&sdev->spinlock);
+
+ list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
+ if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
+ && !memcmp(ch->t_port_id, req->target_port_id, 16)
+ && param->port == ch->sport->port
+ && param->listen_id == ch->sport->sdev->cm_id
+ && ch->cm_id) {
+ enum rdma_ch_state ch_state;
+
+ ch_state = srpt_get_ch_state(ch);
+ if (ch_state != CH_CONNECTING
+ && ch_state != CH_LIVE)
+ continue;
+
+ /* found an existing channel */
+ pr_debug("Found existing channel %s"
+ " cm_id= %p state= %d\n",
+ ch->sess_name, ch->cm_id, ch_state);
+
+ __srpt_close_ch(ch);
+
+ rsp->rsp_flags =
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+ }
+ }
+
+ spin_unlock_irq(&sdev->spinlock);
+
+ } else
+ rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+
+ if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
+ || *(__be64 *)(req->target_port_id + 8) !=
+ cpu_to_be64(srpt_service_guid)) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+ ret = -ENOMEM;
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
+ " has an invalid target port identifier.\n");
+ goto reject;
+ }
+
+ ch = kzalloc(sizeof *ch, GFP_KERNEL);
+ if (!ch) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
+ ret = -ENOMEM;
+ goto reject;
+ }
+
+ INIT_WORK(&ch->release_work, srpt_release_channel_work);
+ memcpy(ch->i_port_id, req->initiator_port_id, 16);
+ memcpy(ch->t_port_id, req->target_port_id, 16);
+ ch->sport = &sdev->port[param->port - 1];
+ ch->cm_id = cm_id;
+ /*
+ * Avoid QUEUE_FULL conditions by limiting the number of buffers used
+ * for the SRP protocol to the command queue size.
+ */
+ ch->rq_size = SRPT_RQ_SIZE;
+ spin_lock_init(&ch->spinlock);
+ ch->state = CH_CONNECTING;
+ INIT_LIST_HEAD(&ch->cmd_wait_list);
+ ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+ ch->ioctx_ring = (struct srpt_send_ioctx **)
+ srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+ sizeof(*ch->ioctx_ring[0]),
+ ch->rsp_size, DMA_TO_DEVICE);
+ if (!ch->ioctx_ring)
+ goto free_ch;
+
+ INIT_LIST_HEAD(&ch->free_list);
+ for (i = 0; i < ch->rq_size; i++) {
+ ch->ioctx_ring[i]->ch = ch;
+ list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+ }
+
+ ret = srpt_create_ch_ib(ch);
+ if (ret) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
+ " a new RDMA channel failed.\n");
+ goto free_ring;
+ }
+
+ ret = srpt_ch_qp_rtr(ch, ch->qp);
+ if (ret) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
+ " RTR failed (error code = %d)\n", ret);
+ goto destroy_ib;
+ }
+ /*
+ * Use the initator port identifier as the session name.
+ */
+ snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
+ be64_to_cpu(*(__be64 *)ch->i_port_id),
+ be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+
+ pr_debug("registering session %s\n", ch->sess_name);
+
+ nacl = srpt_lookup_acl(sport, ch->i_port_id);
+ if (!nacl) {
+ printk(KERN_INFO "Rejected login because no ACL has been"
+ " configured yet for initiator %s.\n", ch->sess_name);
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+ goto destroy_ib;
+ }
+
+ ch->sess = transport_init_session();
+ if (IS_ERR(ch->sess)) {
+ rej->reason = __constant_cpu_to_be32(
+ SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+ pr_debug("Failed to create session\n");
+ goto deregister_session;
+ }
+ ch->sess->se_node_acl = &nacl->nacl;
+ transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
+
+ pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
+ ch->sess_name, ch->cm_id);
+
+ /* create srp_login_response */
+ rsp->opcode = SRP_LOGIN_RSP;
+ rsp->tag = req->tag;
+ rsp->max_it_iu_len = req->req_it_iu_len;
+ rsp->max_ti_iu_len = req->req_it_iu_len;
+ ch->max_ti_iu_len = it_iu_len;
+ rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
+ rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
+ atomic_set(&ch->req_lim, ch->rq_size);
+ atomic_set(&ch->req_lim_delta, 0);
+
+ /* create cm reply */
+ rep_param->qp_num = ch->qp->qp_num;
+ rep_param->private_data = (void *)rsp;
+ rep_param->private_data_len = sizeof *rsp;
+ rep_param->rnr_retry_count = 7;
+ rep_param->flow_control = 1;
+ rep_param->failover_accepted = 0;
+ rep_param->srq = 1;
+ rep_param->responder_resources = 4;
+ rep_param->initiator_depth = 4;
+
+ ret = ib_send_cm_rep(cm_id, rep_param);
+ if (ret) {
+ printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
+ " (error code = %d)\n", ret);
+ goto release_channel;
+ }
+
+ spin_lock_irq(&sdev->spinlock);
+ list_add_tail(&ch->list, &sdev->rch_list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ goto out;
+
+release_channel:
+ srpt_set_ch_state(ch, CH_RELEASING);
+ transport_deregister_session_configfs(ch->sess);
+
+deregister_session:
+ transport_deregister_session(ch->sess);
+ ch->sess = NULL;
+
+destroy_ib:
+ srpt_destroy_ch_ib(ch);
+
+free_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+ ch->sport->sdev, ch->rq_size,
+ ch->rsp_size, DMA_TO_DEVICE);
+free_ch:
+ kfree(ch);
+
+reject:
+ rej->opcode = SRP_LOGIN_REJ;
+ rej->tag = req->tag;
+ rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+ | SRP_BUF_FORMAT_INDIRECT);
+
+ ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+ (void *)rej, sizeof *rej);
+
+out:
+ kfree(rep_param);
+ kfree(rsp);
+ kfree(rej);
+
+ return ret;
+}
+
+static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ *
+ * An IB_CM_RTU_RECEIVED message indicates that the connection is established
+ * and that the recipient may begin transmitting (RTU = ready to use).
+ */
+static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ int ret;
+
+ ch = srpt_find_channel(cm_id->context, cm_id);
+ BUG_ON(!ch);
+
+ if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
+ struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
+
+ ret = srpt_ch_qp_rts(ch, ch->qp);
+
+ list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
+ wait_list) {
+ list_del(&ioctx->wait_list);
+ srpt_handle_new_iu(ch, ioctx, NULL);
+ }
+ if (ret)
+ srpt_close_ch(ch);
+ }
+}
+
+static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_dreq_recv() - Process reception of a DREQ message.
+ */
+static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
+{
+ struct srpt_rdma_ch *ch;
+ unsigned long flags;
+ bool send_drep = false;
+
+ ch = srpt_find_channel(cm_id->context, cm_id);
+ BUG_ON(!ch);
+
+ pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
+
+ spin_lock_irqsave(&ch->spinlock, flags);
+ switch (ch->state) {
+ case CH_CONNECTING:
+ case CH_LIVE:
+ send_drep = true;
+ ch->state = CH_DISCONNECTING;
+ break;
+ case CH_DISCONNECTING:
+ case CH_DRAINING:
+ case CH_RELEASING:
+ WARN(true, "unexpected channel state %d\n", ch->state);
+ break;
+ }
+ spin_unlock_irqrestore(&ch->spinlock, flags);
+
+ if (send_drep) {
+ if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
+ printk(KERN_ERR "Sending IB DREP failed.\n");
+ printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
+ ch->sess_name);
+ }
+}
+
+/**
+ * srpt_cm_drep_recv() - Process reception of a DREP message.
+ */
+static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
+{
+ printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
+ cm_id);
+ srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_handler() - IB connection manager callback function.
+ *
+ * A non-zero return value will cause the caller destroy the CM ID.
+ *
+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
+ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
+ * a non-zero value in any other case will trigger a race with the
+ * ib_destroy_cm_id() call in srpt_release_channel().
+ */
+static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+{
+ int ret;
+
+ ret = 0;
+ switch (event->event) {
+ case IB_CM_REQ_RECEIVED:
+ ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
+ event->private_data);
+ break;
+ case IB_CM_REJ_RECEIVED:
+ srpt_cm_rej_recv(cm_id);
+ break;
+ case IB_CM_RTU_RECEIVED:
+ case IB_CM_USER_ESTABLISHED:
+ srpt_cm_rtu_recv(cm_id);
+ break;
+ case IB_CM_DREQ_RECEIVED:
+ srpt_cm_dreq_recv(cm_id);
+ break;
+ case IB_CM_DREP_RECEIVED:
+ srpt_cm_drep_recv(cm_id);
+ break;
+ case IB_CM_TIMEWAIT_EXIT:
+ srpt_cm_timewait_exit(cm_id);
+ break;
+ case IB_CM_REP_ERROR:
+ srpt_cm_rep_error(cm_id);
+ break;
+ case IB_CM_DREQ_ERROR:
+ printk(KERN_INFO "Received IB DREQ ERROR event.\n");
+ break;
+ case IB_CM_MRA_RECEIVED:
+ printk(KERN_INFO "Received IB MRA event\n");
+ break;
+ default:
+ printk(KERN_ERR "received unrecognized IB CM event %d\n",
+ event->event);
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * srpt_perform_rdmas() - Perform IB RDMA.
+ *
+ * Returns zero upon success or a negative number upon failure.
+ */
+static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ struct ib_send_wr wr;
+ struct ib_send_wr *bad_wr;
+ struct rdma_iu *riu;
+ int i;
+ int ret;
+ int sq_wr_avail;
+ enum dma_data_direction dir;
+ const int n_rdma = ioctx->n_rdma;
+
+ dir = ioctx->cmd.data_direction;
+ if (dir == DMA_TO_DEVICE) {
+ /* write */
+ ret = -ENOMEM;
+ sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
+ if (sq_wr_avail < 0) {
+ printk(KERN_WARNING "IB send queue full (needed %d)\n",
+ n_rdma);
+ goto out;
+ }
+ }
+
+ ioctx->rdma_aborted = false;
+ ret = 0;
+ riu = ioctx->rdma_ius;
+ memset(&wr, 0, sizeof wr);
+
+ for (i = 0; i < n_rdma; ++i, ++riu) {
+ if (dir == DMA_FROM_DEVICE) {
+ wr.opcode = IB_WR_RDMA_WRITE;
+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+ SRPT_RDMA_WRITE_LAST :
+ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ } else {
+ wr.opcode = IB_WR_RDMA_READ;
+ wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+ SRPT_RDMA_READ_LAST :
+ SRPT_RDMA_MID,
+ ioctx->ioctx.index);
+ }
+ wr.next = NULL;
+ wr.wr.rdma.remote_addr = riu->raddr;
+ wr.wr.rdma.rkey = riu->rkey;
+ wr.num_sge = riu->sge_cnt;
+ wr.sg_list = riu->sge;
+
+ /* only get completion event for the last rdma write */
+ if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
+ wr.send_flags = IB_SEND_SIGNALED;
+
+ ret = ib_post_send(ch->qp, &wr, &bad_wr);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
+ __func__, __LINE__, ret, i, n_rdma);
+ if (ret && i > 0) {
+ wr.num_sge = 0;
+ wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
+ wr.send_flags = IB_SEND_SIGNALED;
+ while (ch->state == CH_LIVE &&
+ ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
+ printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
+ ioctx->ioctx.index);
+ msleep(1000);
+ }
+ while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
+ printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
+ ioctx->ioctx.index);
+ msleep(1000);
+ }
+ }
+out:
+ if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
+ atomic_add(n_rdma, &ch->sq_wr_avail);
+ return ret;
+}
+
+/**
+ * srpt_xfer_data() - Start data transfer from initiator to target.
+ */
+static int srpt_xfer_data(struct srpt_rdma_ch *ch,
+ struct srpt_send_ioctx *ioctx)
+{
+ int ret;
+
+ ret = srpt_map_sg_to_ib_sge(ch, ioctx);
+ if (ret) {
+ printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
+ goto out;
+ }
+
+ ret = srpt_perform_rdmas(ch, ioctx);
+ if (ret) {
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
+ __func__, __LINE__, ret);
+ else
+ printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
+ __func__, __LINE__, ret);
+ goto out_unmap;
+ }
+
+out:
+ return ret;
+out_unmap:
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ goto out;
+}
+
+static int srpt_write_pending_status(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+}
+
+/*
+ * srpt_write_pending() - Start data transfer from initiator to target (write).
+ */
+static int srpt_write_pending(struct se_cmd *se_cmd)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state new_state;
+ enum rdma_ch_state ch_state;
+ int ret;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+
+ new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
+ WARN_ON(new_state == SRPT_STATE_DONE);
+
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ ch_state = srpt_get_ch_state(ch);
+ switch (ch_state) {
+ case CH_CONNECTING:
+ WARN(true, "unexpected channel state %d\n", ch_state);
+ ret = -EINVAL;
+ goto out;
+ case CH_LIVE:
+ break;
+ case CH_DISCONNECTING:
+ case CH_DRAINING:
+ case CH_RELEASING:
+ pr_debug("cmd with tag %lld: channel disconnecting\n",
+ ioctx->tag);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = srpt_xfer_data(ch, ioctx);
+
+out:
+ return ret;
+}
+
+static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
+{
+ switch (tcm_mgmt_status) {
+ case TMR_FUNCTION_COMPLETE:
+ return SRP_TSK_MGMT_SUCCESS;
+ case TMR_FUNCTION_REJECTED:
+ return SRP_TSK_MGMT_FUNC_NOT_SUPP;
+ }
+ return SRP_TSK_MGMT_FAILED;
+}
+
+/**
+ * srpt_queue_response() - Transmits the response to a SCSI command.
+ *
+ * Callback function called by the TCM core. Must not block since it can be
+ * invoked on the context of the IB completion handler.
+ */
+static int srpt_queue_response(struct se_cmd *cmd)
+{
+ struct srpt_rdma_ch *ch;
+ struct srpt_send_ioctx *ioctx;
+ enum srpt_command_state state;
+ unsigned long flags;
+ int ret;
+ enum dma_data_direction dir;
+ int resp_len;
+ u8 srp_tm_status;
+
+ ret = 0;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ ch = ioctx->ch;
+ BUG_ON(!ch);
+
+ spin_lock_irqsave(&ioctx->spinlock, flags);
+ state = ioctx->state;
+ switch (state) {
+ case SRPT_STATE_NEW:
+ case SRPT_STATE_DATA_IN:
+ ioctx->state = SRPT_STATE_CMD_RSP_SENT;
+ break;
+ case SRPT_STATE_MGMT:
+ ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
+ break;
+ default:
+ WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
+ ch, ioctx->ioctx.index, ioctx->state);
+ break;
+ }
+ spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+ if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
+ || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
+ atomic_inc(&ch->req_lim_delta);
+ srpt_abort_cmd(ioctx);
+ goto out;
+ }
+
+ dir = ioctx->cmd.data_direction;
+
+ /* For read commands, transfer the data to the initiator. */
+ if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
+ !ioctx->queue_status_only) {
+ ret = srpt_xfer_data(ch, ioctx);
+ if (ret) {
+ printk(KERN_ERR "xfer_data failed for tag %llu\n",
+ ioctx->tag);
+ goto out;
+ }
+ }
+
+ if (state != SRPT_STATE_MGMT)
+ resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
+ cmd->scsi_status);
+ else {
+ srp_tm_status
+ = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
+ resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
+ ioctx->tag);
+ }
+ ret = srpt_post_send(ch, ioctx, resp_len);
+ if (ret) {
+ printk(KERN_ERR "sending cmd response failed for tag %llu\n",
+ ioctx->tag);
+ srpt_unmap_sg_to_ib_sge(ch, ioctx);
+ srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+ kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+ }
+
+out:
+ return ret;
+}
+
+static int srpt_queue_status(struct se_cmd *cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+ BUG_ON(ioctx->sense_data != cmd->sense_buffer);
+ if (cmd->se_cmd_flags &
+ (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
+ WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
+ ioctx->queue_status_only = true;
+ return srpt_queue_response(cmd);
+}
+
+static void srpt_refresh_port_work(struct work_struct *work)
+{
+ struct srpt_port *sport = container_of(work, struct srpt_port, work);
+
+ srpt_refresh_port(sport);
+}
+
+static int srpt_ch_list_empty(struct srpt_device *sdev)
+{
+ int res;
+
+ spin_lock_irq(&sdev->spinlock);
+ res = list_empty(&sdev->rch_list);
+ spin_unlock_irq(&sdev->spinlock);
+
+ return res;
+}
+
+/**
+ * srpt_release_sdev() - Free the channel resources associated with a target.
+ */
+static int srpt_release_sdev(struct srpt_device *sdev)
+{
+ struct srpt_rdma_ch *ch, *tmp_ch;
+ int res;
+
+ WARN_ON_ONCE(irqs_disabled());
+
+ BUG_ON(!sdev);
+
+ spin_lock_irq(&sdev->spinlock);
+ list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+
+ res = wait_event_interruptible(sdev->ch_releaseQ,
+ srpt_ch_list_empty(sdev));
+ if (res)
+ printk(KERN_ERR "%s: interrupted.\n", __func__);
+
+ return 0;
+}
+
+static struct srpt_port *__srpt_lookup_port(const char *name)
+{
+ struct ib_device *dev;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ int i;
+
+ list_for_each_entry(sdev, &srpt_dev_list, list) {
+ dev = sdev->device;
+ if (!dev)
+ continue;
+
+ for (i = 0; i < dev->phys_port_cnt; i++) {
+ sport = &sdev->port[i];
+
+ if (!strcmp(sport->port_guid, name))
+ return sport;
+ }
+ }
+
+ return NULL;
+}
+
+static struct srpt_port *srpt_lookup_port(const char *name)
+{
+ struct srpt_port *sport;
+
+ spin_lock(&srpt_dev_lock);
+ sport = __srpt_lookup_port(name);
+ spin_unlock(&srpt_dev_lock);
+
+ return sport;
+}
+
+/**
+ * srpt_add_one() - Infiniband device addition callback function.
+ */
+static void srpt_add_one(struct ib_device *device)
+{
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+ struct ib_srq_init_attr srq_attr;
+ int i;
+
+ pr_debug("device = %p, device->dma_ops = %p\n", device,
+ device->dma_ops);
+
+ sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+ if (!sdev)
+ goto err;
+
+ sdev->device = device;
+ INIT_LIST_HEAD(&sdev->rch_list);
+ init_waitqueue_head(&sdev->ch_releaseQ);
+ spin_lock_init(&sdev->spinlock);
+
+ if (ib_query_device(device, &sdev->dev_attr))
+ goto free_dev;
+
+ sdev->pd = ib_alloc_pd(device);
+ if (IS_ERR(sdev->pd))
+ goto free_dev;
+
+ sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
+ if (IS_ERR(sdev->mr))
+ goto err_pd;
+
+ sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
+
+ srq_attr.event_handler = srpt_srq_event;
+ srq_attr.srq_context = (void *)sdev;
+ srq_attr.attr.max_wr = sdev->srq_size;
+ srq_attr.attr.max_sge = 1;
+ srq_attr.attr.srq_limit = 0;
+
+ sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
+ if (IS_ERR(sdev->srq))
+ goto err_mr;
+
+ pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
+ __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
+ device->name);
+
+ if (!srpt_service_guid)
+ srpt_service_guid = be64_to_cpu(device->node_guid);
+
+ sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
+ if (IS_ERR(sdev->cm_id))
+ goto err_srq;
+
+ /* print out target login information */
+ pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
+ "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
+ srpt_service_guid, srpt_service_guid);
+
+ /*
+ * We do not have a consistent service_id (ie. also id_ext of target_id)
+ * to identify this target. We currently use the guid of the first HCA
+ * in the system as service_id; therefore, the target_id will change
+ * if this HCA is gone bad and replaced by different HCA
+ */
+ if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
+ goto err_cm;
+
+ INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+ srpt_event_handler);
+ if (ib_register_event_handler(&sdev->event_handler))
+ goto err_cm;
+
+ sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+ srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+ sizeof(*sdev->ioctx_ring[0]),
+ srp_max_req_size, DMA_FROM_DEVICE);
+ if (!sdev->ioctx_ring)
+ goto err_event;
+
+ for (i = 0; i < sdev->srq_size; ++i)
+ srpt_post_recv(sdev, sdev->ioctx_ring[i]);
+
+ WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
+
+ for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+ sport = &sdev->port[i - 1];
+ sport->sdev = sdev;
+ sport->port = i;
+ sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
+ sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
+ sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+ INIT_WORK(&sport->work, srpt_refresh_port_work);
+ INIT_LIST_HEAD(&sport->port_acl_list);
+ spin_lock_init(&sport->port_acl_lock);
+
+ if (srpt_refresh_port(sport)) {
+ printk(KERN_ERR "MAD registration failed for %s-%d.\n",
+ srpt_sdev_name(sdev), i);
+ goto err_ring;
+ }
+ snprintf(sport->port_guid, sizeof(sport->port_guid),
+ "0x%016llx%016llx",
+ be64_to_cpu(sport->gid.global.subnet_prefix),
+ be64_to_cpu(sport->gid.global.interface_id));
+ }
+
+ spin_lock(&srpt_dev_lock);
+ list_add_tail(&sdev->list, &srpt_dev_list);
+ spin_unlock(&srpt_dev_lock);
+
+out:
+ ib_set_client_data(device, &srpt_client, sdev);
+ pr_debug("added %s.\n", device->name);
+ return;
+
+err_ring:
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size,
+ DMA_FROM_DEVICE);
+err_event:
+ ib_unregister_event_handler(&sdev->event_handler);
+err_cm:
+ ib_destroy_cm_id(sdev->cm_id);
+err_srq:
+ ib_destroy_srq(sdev->srq);
+err_mr:
+ ib_dereg_mr(sdev->mr);
+err_pd:
+ ib_dealloc_pd(sdev->pd);
+free_dev:
+ kfree(sdev);
+err:
+ sdev = NULL;
+ printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
+ goto out;
+}
+
+/**
+ * srpt_remove_one() - InfiniBand device removal callback function.
+ */
+static void srpt_remove_one(struct ib_device *device)
+{
+ struct srpt_device *sdev;
+ int i;
+
+ sdev = ib_get_client_data(device, &srpt_client);
+ if (!sdev) {
+ printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
+ device->name);
+ return;
+ }
+
+ srpt_unregister_mad_agent(sdev);
+
+ ib_unregister_event_handler(&sdev->event_handler);
+
+ /* Cancel any work queued by the just unregistered IB event handler. */
+ for (i = 0; i < sdev->device->phys_port_cnt; i++)
+ cancel_work_sync(&sdev->port[i].work);
+
+ ib_destroy_cm_id(sdev->cm_id);
+
+ /*
+ * Unregistering a target must happen after destroying sdev->cm_id
+ * such that no new SRP_LOGIN_REQ information units can arrive while
+ * destroying the target.
+ */
+ spin_lock(&srpt_dev_lock);
+ list_del(&sdev->list);
+ spin_unlock(&srpt_dev_lock);
+ srpt_release_sdev(sdev);
+
+ ib_destroy_srq(sdev->srq);
+ ib_dereg_mr(sdev->mr);
+ ib_dealloc_pd(sdev->pd);
+
+ srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+ sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
+ sdev->ioctx_ring = NULL;
+ kfree(sdev);
+}
+
+static struct ib_client srpt_client = {
+ .name = DRV_NAME,
+ .add = srpt_add_one,
+ .remove = srpt_remove_one
+};
+
+static int srpt_check_true(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static int srpt_check_false(struct se_portal_group *se_tpg)
+{
+ return 0;
+}
+
+static char *srpt_get_fabric_name(void)
+{
+ return "srpt";
+}
+
+static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+ return SCSI_TRANSPORTID_PROTOCOLID_SRP;
+}
+
+static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
+{
+ struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+
+ return sport->port_guid;
+}
+
+static u16 srpt_get_tag(struct se_portal_group *tpg)
+{
+ return 1;
+}
+
+static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code, unsigned char *buf)
+{
+ struct srpt_node_acl *nacl;
+ struct spc_rdma_transport_id *tr_id;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ tr_id = (void *)buf;
+ tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
+ memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
+ return sizeof(*tr_id);
+}
+
+static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl,
+ struct t10_pr_registration *pr_reg,
+ int *format_code)
+{
+ *format_code = 0;
+ return sizeof(struct spc_rdma_transport_id);
+}
+
+static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+ const char *buf, u32 *out_tid_len,
+ char **port_nexus_ptr)
+{
+ struct spc_rdma_transport_id *tr_id;
+
+ *port_nexus_ptr = NULL;
+ *out_tid_len = sizeof(struct spc_rdma_transport_id);
+ tr_id = (void *)buf;
+ return (char *)tr_id->i_port_id;
+}
+
+static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+ struct srpt_node_acl *nacl;
+
+ nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
+ if (!nacl) {
+ printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
+ return NULL;
+ }
+
+ return &nacl->nacl;
+}
+
+static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
+ struct se_node_acl *se_nacl)
+{
+ struct srpt_node_acl *nacl;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ kfree(nacl);
+}
+
+static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+ return 1;
+}
+
+static void srpt_release_cmd(struct se_cmd *se_cmd)
+{
+}
+
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+ return true;
+}
+
+/**
+ * srpt_close_session() - Forcibly close a session.
+ *
+ * Callback function invoked by the TCM core to clean up sessions associated
+ * with a node ACL when the user invokes
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_close_session(struct se_session *se_sess)
+{
+ DECLARE_COMPLETION_ONSTACK(release_done);
+ struct srpt_rdma_ch *ch;
+ struct srpt_device *sdev;
+ int res;
+
+ ch = se_sess->fabric_sess_ptr;
+ WARN_ON(ch->sess != se_sess);
+
+ pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+
+ sdev = ch->sport->sdev;
+ spin_lock_irq(&sdev->spinlock);
+ BUG_ON(ch->release_done);
+ ch->release_done = &release_done;
+ __srpt_close_ch(ch);
+ spin_unlock_irq(&sdev->spinlock);
+
+ res = wait_for_completion_timeout(&release_done, 60 * HZ);
+ WARN_ON(res <= 0);
+}
+
+/**
+ * To do: Find out whether stop_session() has a meaning for transports
+ * other than iSCSI.
+ */
+static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
+ int conn_sleep)
+{
+}
+
+static void srpt_reset_nexus(struct se_session *sess)
+{
+ printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
+}
+
+static int srpt_sess_logged_in(struct se_session *se_sess)
+{
+ return true;
+}
+
+/**
+ * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ *
+ * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
+ * This object represents an arbitrary integer used to uniquely identify a
+ * particular attached remote initiator port to a particular SCSI target port
+ * within a particular SCSI target device within a particular SCSI instance.
+ */
+static u32 srpt_sess_get_index(struct se_session *se_sess)
+{
+ return 0;
+}
+
+static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return ioctx->tag;
+}
+
+/* Note: only used from inside debug printk's by the TCM core. */
+static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
+{
+ struct srpt_send_ioctx *ioctx;
+
+ ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+ return srpt_get_cmd_state(ioctx);
+}
+
+static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
+{
+ return 0;
+}
+
+static u16 srpt_get_fabric_sense_len(void)
+{
+ return 0;
+}
+
+static int srpt_is_state_remove(struct se_cmd *se_cmd)
+{
+ return 0;
+}
+
+/**
+ * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * @name: ASCII representation of a 128-bit initiator port ID.
+ * @i_port_id: Binary 128-bit port ID.
+ */
+static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+{
+ const char *p;
+ unsigned len, count, leading_zero_bytes;
+ int ret, rc;
+
+ p = name;
+ if (strnicmp(p, "0x", 2) == 0)
+ p += 2;
+ ret = -EINVAL;
+ len = strlen(p);
+ if (len % 2)
+ goto out;
+ count = min(len / 2, 16U);
+ leading_zero_bytes = 16 - count;
+ memset(i_port_id, 0, leading_zero_bytes);
+ rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
+ if (rc < 0)
+ pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
+ ret = 0;
+out:
+ return ret;
+}
+
+/*
+ * configfs callback function invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+ struct se_node_acl *se_nacl, *se_nacl_new;
+ struct srpt_node_acl *nacl;
+ int ret = 0;
+ u32 nexus_depth = 1;
+ u8 i_port_id[16];
+
+ if (srpt_parse_i_port_id(i_port_id, name) < 0) {
+ printk(KERN_ERR "invalid initiator port ID %s\n", name);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ se_nacl_new = srpt_alloc_fabric_acl(tpg);
+ if (!se_nacl_new) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ /*
+ * nacl_new may be released by core_tpg_add_initiator_node_acl()
+ * when converting a node ACL from demo mode to explict
+ */
+ se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
+ nexus_depth);
+ if (IS_ERR(se_nacl)) {
+ ret = PTR_ERR(se_nacl);
+ goto err;
+ }
+ /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
+ nacl->sport = sport;
+
+ spin_lock_irq(&sport->port_acl_lock);
+ list_add_tail(&nacl->list, &sport->port_acl_list);
+ spin_unlock_irq(&sport->port_acl_lock);
+
+ return se_nacl;
+err:
+ return ERR_PTR(ret);
+}
+
+/*
+ * configfs callback function invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
+{
+ struct srpt_node_acl *nacl;
+ struct srpt_device *sdev;
+ struct srpt_port *sport;
+
+ nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+ sport = nacl->sport;
+ sdev = sport->sdev;
+ spin_lock_irq(&sport->port_acl_lock);
+ list_del(&nacl->list);
+ spin_unlock_irq(&sport->port_acl_lock);
+ core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
+ srpt_release_fabric_acl(NULL, se_nacl);
+}
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RDMA_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
+ MAX_SRPT_RDMA_SIZE);
+ return -EINVAL;
+ }
+ if (val < DEFAULT_MAX_RDMA_SIZE) {
+ pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
+ val, DEFAULT_MAX_RDMA_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rdma_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_RSP_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
+ MAX_SRPT_RSP_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_MAX_RSP_SIZE) {
+ pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
+ MIN_MAX_RSP_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_max_rsp_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_sq_size(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_sq_size(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &val);
+ if (ret < 0) {
+ pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ return -EINVAL;
+ }
+ if (val > MAX_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
+ MAX_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ if (val < MIN_SRPT_SRQ_SIZE) {
+ pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
+ MIN_SRPT_SRQ_SIZE);
+ return -EINVAL;
+ }
+ sport->port_attrib.srp_sq_size = val;
+
+ return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
+ &srpt_tpg_attrib_srp_max_rdma_size.attr,
+ &srpt_tpg_attrib_srp_max_rsp_size.attr,
+ &srpt_tpg_attrib_srp_sq_size.attr,
+ NULL,
+};
+
+static ssize_t srpt_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
+}
+
+static ssize_t srpt_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+ unsigned long tmp;
+ int ret;
+
+ ret = strict_strtoul(page, 0, &tmp);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
+ return -EINVAL;
+ }
+
+ if ((tmp != 0) && (tmp != 1)) {
+ printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
+ return -EINVAL;
+ }
+ if (tmp == 1)
+ sport->enabled = true;
+ else
+ sport->enabled = false;
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrs[] = {
+ &srpt_tpg_enable.attr,
+ NULL,
+};
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+ int res;
+
+ /* Initialize sport->port_wwn and sport->port_tpg_1 */
+ res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
+ &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
+ if (res)
+ return ERR_PTR(res);
+
+ return &sport->port_tpg_1;
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static void srpt_drop_tpg(struct se_portal_group *tpg)
+{
+ struct srpt_port *sport = container_of(tpg,
+ struct srpt_port, port_tpg_1);
+
+ sport->enabled = false;
+ core_tpg_deregister(&sport->port_tpg_1);
+}
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port
+ */
+static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
+ struct config_group *group,
+ const char *name)
+{
+ struct srpt_port *sport;
+ int ret;
+
+ sport = srpt_lookup_port(name);
+ pr_debug("make_tport(%s)\n", name);
+ ret = -EINVAL;
+ if (!sport)
+ goto err;
+
+ return &sport->port_wwn;
+
+err:
+ return ERR_PTR(ret);
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port
+ */
+static void srpt_drop_tport(struct se_wwn *wwn)
+{
+ struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+
+ pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
+}
+
+static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
+ char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+TF_WWN_ATTR_RO(srpt, version);
+
+static struct configfs_attribute *srpt_wwn_attrs[] = {
+ &srpt_wwn_version.attr,
+ NULL,
+};
+
+static struct target_core_fabric_ops srpt_template = {
+ .get_fabric_name = srpt_get_fabric_name,
+ .get_fabric_proto_ident = srpt_get_fabric_proto_ident,
+ .tpg_get_wwn = srpt_get_fabric_wwn,
+ .tpg_get_tag = srpt_get_tag,
+ .tpg_get_default_depth = srpt_get_default_depth,
+ .tpg_get_pr_transport_id = srpt_get_pr_transport_id,
+ .tpg_get_pr_transport_id_len = srpt_get_pr_transport_id_len,
+ .tpg_parse_pr_out_transport_id = srpt_parse_pr_out_transport_id,
+ .tpg_check_demo_mode = srpt_check_false,
+ .tpg_check_demo_mode_cache = srpt_check_true,
+ .tpg_check_demo_mode_write_protect = srpt_check_true,
+ .tpg_check_prod_mode_write_protect = srpt_check_false,
+ .tpg_alloc_fabric_acl = srpt_alloc_fabric_acl,
+ .tpg_release_fabric_acl = srpt_release_fabric_acl,
+ .tpg_get_inst_index = srpt_tpg_get_inst_index,
+ .release_cmd = srpt_release_cmd,
+ .check_stop_free = srpt_check_stop_free,
+ .shutdown_session = srpt_shutdown_session,
+ .close_session = srpt_close_session,
+ .stop_session = srpt_stop_session,
+ .fall_back_to_erl0 = srpt_reset_nexus,
+ .sess_logged_in = srpt_sess_logged_in,
+ .sess_get_index = srpt_sess_get_index,
+ .sess_get_initiator_sid = NULL,
+ .write_pending = srpt_write_pending,
+ .write_pending_status = srpt_write_pending_status,
+ .set_default_node_attributes = srpt_set_default_node_attrs,
+ .get_task_tag = srpt_get_task_tag,
+ .get_cmd_state = srpt_get_tcm_cmd_state,
+ .queue_data_in = srpt_queue_response,
+ .queue_status = srpt_queue_status,
+ .queue_tm_rsp = srpt_queue_response,
+ .get_fabric_sense_len = srpt_get_fabric_sense_len,
+ .set_fabric_sense_len = srpt_set_fabric_sense_len,
+ .is_state_remove = srpt_is_state_remove,
+ /*
+ * Setup function pointers for generic logic in
+ * target_core_fabric_configfs.c
+ */
+ .fabric_make_wwn = srpt_make_tport,
+ .fabric_drop_wwn = srpt_drop_tport,
+ .fabric_make_tpg = srpt_make_tpg,
+ .fabric_drop_tpg = srpt_drop_tpg,
+ .fabric_post_link = NULL,
+ .fabric_pre_unlink = NULL,
+ .fabric_make_np = NULL,
+ .fabric_drop_np = NULL,
+ .fabric_make_nodeacl = srpt_make_nodeacl,
+ .fabric_drop_nodeacl = srpt_drop_nodeacl,
+};
+
+/**
+ * srpt_init_module() - Kernel module initialization.
+ *
+ * Note: Since ib_register_client() registers callback functions, and since at
+ * least one of these callback functions (srpt_add_one()) calls target core
+ * functions, this driver must be registered with the target core before
+ * ib_register_client() is called.
+ */
+static int __init srpt_init_module(void)
+{
+ int ret;
+
+ ret = -EINVAL;
+ if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
+ printk(KERN_ERR "invalid value %d for kernel module parameter"
+ " srp_max_req_size -- must be at least %d.\n",
+ srp_max_req_size, MIN_MAX_REQ_SIZE);
+ goto out;
+ }
+
+ if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
+ || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
+ printk(KERN_ERR "invalid value %d for kernel module parameter"
+ " srpt_srq_size -- must be in the range [%d..%d].\n",
+ srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
+ goto out;
+ }
+
+ srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
+ if (IS_ERR(srpt_target)) {
+ printk(KERN_ERR "couldn't register\n");
+ ret = PTR_ERR(srpt_target);
+ goto out;
+ }
+
+ srpt_target->tf_ops = srpt_template;
+
+ /* Enable SG chaining */
+ srpt_target->tf_ops.task_sg_chaining = true;
+
+ /*
+ * Set up default attribute lists.
+ */
+ srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
+ srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+ srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+
+ ret = target_fabric_configfs_register(srpt_target);
+ if (ret < 0) {
+ printk(KERN_ERR "couldn't register\n");
+ goto out_free_target;
+ }
+
+ ret = ib_register_client(&srpt_client);
+ if (ret) {
+ printk(KERN_ERR "couldn't register IB client\n");
+ goto out_unregister_target;
+ }
+
+ return 0;
+
+out_unregister_target:
+ target_fabric_configfs_deregister(srpt_target);
+ srpt_target = NULL;
+out_free_target:
+ if (srpt_target)
+ target_fabric_configfs_free(srpt_target);
+out:
+ return ret;
+}
+
+static void __exit srpt_cleanup_module(void)
+{
+ ib_unregister_client(&srpt_client);
+ target_fabric_configfs_deregister(srpt_target);
+ srpt_target = NULL;
+}
+
+module_init(srpt_init_module);
+module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644
index 0000000..61e52b8
--- /dev/null
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.h
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc. All rights reserved.
+ * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_SRPT_H
+#define IB_SRPT_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
+
+#include <scsi/srp.h>
+
+#include "ib_dm_mad.h"
+
+/*
+ * The prefix the ServiceName field must start with in the device management
+ * ServiceEntries attribute pair. See also the SRP specification.
+ */
+#define SRP_SERVICE_NAME_PREFIX "SRP.T10:"
+
+enum {
+ /*
+ * SRP IOControllerProfile attributes for SRP target ports that have
+ * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
+ * in the SRP specification.
+ */
+ SRP_PROTOCOL = 0x0108,
+ SRP_PROTOCOL_VERSION = 0x0001,
+ SRP_IO_SUBCLASS = 0x609e,
+ SRP_SEND_TO_IOC = 0x01,
+ SRP_SEND_FROM_IOC = 0x02,
+ SRP_RDMA_READ_FROM_IOC = 0x08,
+ SRP_RDMA_WRITE_FROM_IOC = 0x20,
+
+ /*
+ * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
+ * specification.
+ */
+ SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
+ SRP_LOSOLNT = 0x10, /* logout solicited notification */
+ SRP_CRSOLNT = 0x20, /* credit request solicited notification */
+ SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
+
+ /*
+ * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
+ * 18 and 20 in the SRP specification.
+ */
+ SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
+ SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
+
+ /*
+ * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
+ * 16 and 22 in the SRP specification.
+ */
+ SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
+
+ /* See also table 24 in the SRP specification. */
+ SRP_TSK_MGMT_SUCCESS = 0x00,
+ SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
+ SRP_TSK_MGMT_FAILED = 0x05,
+
+ /* See also table 21 in the SRP specification. */
+ SRP_CMD_SIMPLE_Q = 0x0,
+ SRP_CMD_HEAD_OF_Q = 0x1,
+ SRP_CMD_ORDERED_Q = 0x2,
+ SRP_CMD_ACA = 0x4,
+
+ SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
+ SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
+ SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
+
+ SRPT_DEF_SG_TABLESIZE = 128,
+ SRPT_DEF_SG_PER_WQE = 16,
+
+ MIN_SRPT_SQ_SIZE = 16,
+ DEF_SRPT_SQ_SIZE = 4096,
+ SRPT_RQ_SIZE = 128,
+ MIN_SRPT_SRQ_SIZE = 4,
+ DEFAULT_SRPT_SRQ_SIZE = 4095,
+ MAX_SRPT_SRQ_SIZE = 65535,
+ MAX_SRPT_RDMA_SIZE = 1U << 24,
+ MAX_SRPT_RSP_SIZE = 1024,
+
+ MIN_MAX_REQ_SIZE = 996,
+ DEFAULT_MAX_REQ_SIZE
+ = sizeof(struct srp_cmd)/*48*/
+ + sizeof(struct srp_indirect_buf)/*20*/
+ + 128 * sizeof(struct srp_direct_buf)/*16*/,
+
+ MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
+ DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
+
+ DEFAULT_MAX_RDMA_SIZE = 65536,
+};
+
+enum srpt_opcode {
+ SRPT_RECV,
+ SRPT_SEND,
+ SRPT_RDMA_MID,
+ SRPT_RDMA_ABORT,
+ SRPT_RDMA_READ_LAST,
+ SRPT_RDMA_WRITE_LAST,
+};
+
+static inline u64 encode_wr_id(u8 opcode, u32 idx)
+{
+ return ((u64)opcode << 32) | idx;
+}
+static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
+{
+ return wr_id >> 32;
+}
+static inline u32 idx_from_wr_id(u64 wr_id)
+{
+ return (u32)wr_id;
+}
+
+struct rdma_iu {
+ u64 raddr;
+ u32 rkey;
+ struct ib_sge *sge;
+ u32 sge_cnt;
+ int mem_id;
+};
+
+/**
+ * enum srpt_command_state - SCSI command state managed by SRPT.
+ * @SRPT_STATE_NEW: New command arrived and is being processed.
+ * @SRPT_STATE_NEED_DATA: Processing a write or bidir command and waiting
+ * for data arrival.
+ * @SRPT_STATE_DATA_IN: Data for the write or bidir command arrived and is
+ * being processed.
+ * @SRPT_STATE_CMD_RSP_SENT: SRP_RSP for SRP_CMD has been sent.
+ * @SRPT_STATE_MGMT: Processing a SCSI task management command.
+ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
+ * @SRPT_STATE_DONE: Command processing finished successfully, command
+ * processing has been aborted or command processing
+ * failed.
+ */
+enum srpt_command_state {
+ SRPT_STATE_NEW = 0,
+ SRPT_STATE_NEED_DATA = 1,
+ SRPT_STATE_DATA_IN = 2,
+ SRPT_STATE_CMD_RSP_SENT = 3,
+ SRPT_STATE_MGMT = 4,
+ SRPT_STATE_MGMT_RSP_SENT = 5,
+ SRPT_STATE_DONE = 6,
+};
+
+/**
+ * struct srpt_ioctx - Shared SRPT I/O context information.
+ * @buf: Pointer to the buffer.
+ * @dma: DMA address of the buffer.
+ * @index: Index of the I/O context in its ioctx_ring array.
+ */
+struct srpt_ioctx {
+ void *buf;
+ dma_addr_t dma;
+ uint32_t index;
+};
+
+/**
+ * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * @ioctx: See above.
+ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
+ */
+struct srpt_recv_ioctx {
+ struct srpt_ioctx ioctx;
+ struct list_head wait_list;
+};
+
+/**
+ * struct srpt_send_ioctx - SRPT send I/O context.
+ * @ioctx: See above.
+ * @ch: Channel pointer.
+ * @free_list: Node in srpt_rdma_ch.free_list.
+ * @n_rbuf: Number of data buffers in the received SRP command.
+ * @rbufs: Pointer to SRP data buffer array.
+ * @single_rbuf: SRP data buffer if the command has only a single buffer.
+ * @sg: Pointer to sg-list associated with this I/O context.
+ * @sg_cnt: SG-list size.
+ * @mapped_sg_count: ib_dma_map_sg() return value.
+ * @n_rdma_ius: Number of elements in the rdma_ius array.
+ * @rdma_ius: Array with information about the RDMA mapping.
+ * @tag: Tag of the received SRP information unit.
+ * @spinlock: Protects 'state'.
+ * @state: I/O context state.
+ * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
+ * the already initiated transfers have finished.
+ * @cmd: Target core command data structure.
+ * @sense_data: SCSI sense data.
+ */
+struct srpt_send_ioctx {
+ struct srpt_ioctx ioctx;
+ struct srpt_rdma_ch *ch;
+ struct kref kref;
+ struct rdma_iu *rdma_ius;
+ struct srp_direct_buf *rbufs;
+ struct srp_direct_buf single_rbuf;
+ struct scatterlist *sg;
+ struct list_head free_list;
+ spinlock_t spinlock;
+ enum srpt_command_state state;
+ bool rdma_aborted;
+ struct se_cmd cmd;
+ struct completion tx_done;
+ u64 tag;
+ int sg_cnt;
+ int mapped_sg_count;
+ u16 n_rdma_ius;
+ u8 n_rdma;
+ u8 n_rbuf;
+ bool queue_status_only;
+ u8 sense_data[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * enum rdma_ch_state - SRP channel state.
+ * @CH_CONNECTING: QP is in RTR state; waiting for RTU.
+ * @CH_LIVE: QP is in RTS state.
+ * @CH_DISCONNECTING: DREQ has been received; waiting for DREP
+ * or DREQ has been send and waiting for DREP
+ * or .
+ * @CH_DRAINING: QP is in ERR state; waiting for last WQE event.
+ * @CH_RELEASING: Last WQE event has been received; releasing resources.
+ */
+enum rdma_ch_state {
+ CH_CONNECTING,
+ CH_LIVE,
+ CH_DISCONNECTING,
+ CH_DRAINING,
+ CH_RELEASING
+};
+
+/**
+ * struct srpt_rdma_ch - RDMA channel.
+ * @wait_queue: Allows the kernel thread to wait for more work.
+ * @thread: Kernel thread that processes the IB queues associated with
+ * the channel.
+ * @cm_id: IB CM ID associated with the channel.
+ * @qp: IB queue pair used for communicating over this channel.
+ * @cq: IB completion queue for this channel.
+ * @rq_size: IB receive queue size.
+ * @rsp_size IB response message size in bytes.
+ * @sq_wr_avail: number of work requests available in the send queue.
+ * @sport: pointer to the information of the HCA port used by this
+ * channel.
+ * @i_port_id: 128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id: 128-bit target port identifier copied from SRP_LOGIN_REQ.
+ * @max_ti_iu_len: maximum target-to-initiator information unit length.
+ * @req_lim: request limit: maximum number of requests that may be sent
+ * by the initiator without having received a response.
+ * @req_lim_delta: Number of credits not yet sent back to the initiator.
+ * @spinlock: Protects free_list and state.
+ * @free_list: Head of list with free send I/O contexts.
+ * @state: channel state. See also enum rdma_ch_state.
+ * @ioctx_ring: Send ring.
+ * @wc: IB work completion array for srpt_process_completion().
+ * @list: Node for insertion in the srpt_device.rch_list list.
+ * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
+ * list contains struct srpt_ioctx elements and is protected
+ * against concurrent modification by the cm_id spinlock.
+ * @sess: Session information associated with this SRP channel.
+ * @sess_name: Session name.
+ * @release_work: Allows scheduling of srpt_release_channel().
+ * @release_done: Enables waiting for srpt_release_channel() completion.
+ */
+struct srpt_rdma_ch {
+ wait_queue_head_t wait_queue;
+ struct task_struct *thread;
+ struct ib_cm_id *cm_id;
+ struct ib_qp *qp;
+ struct ib_cq *cq;
+ int rq_size;
+ u32 rsp_size;
+ atomic_t sq_wr_avail;
+ struct srpt_port *sport;
+ u8 i_port_id[16];
+ u8 t_port_id[16];
+ int max_ti_iu_len;
+ atomic_t req_lim;
+ atomic_t req_lim_delta;
+ spinlock_t spinlock;
+ struct list_head free_list;
+ enum rdma_ch_state state;
+ struct srpt_send_ioctx **ioctx_ring;
+ struct ib_wc wc[16];
+ struct list_head list;
+ struct list_head cmd_wait_list;
+ struct se_session *sess;
+ u8 sess_name[36];
+ struct work_struct release_work;
+ struct completion *release_done;
+};
+
+/**
+ * struct srpt_port_attib - Attributes for SRPT port
+ * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
+ * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
+ * @srp_sq_size: Shared receive queue (SRQ) size.
+ */
+struct srpt_port_attrib {
+ u32 srp_max_rdma_size;
+ u32 srp_max_rsp_size;
+ u32 srp_sq_size;
+};
+
+/**
+ * struct srpt_port - Information associated by SRPT with a single IB port.
+ * @sdev: backpointer to the HCA information.
+ * @mad_agent: per-port management datagram processing information.
+ * @enabled: Whether or not this target port is enabled.
+ * @port_guid: ASCII representation of Port GUID
+ * @port: one-based port number.
+ * @sm_lid: cached value of the port's sm_lid.
+ * @lid: cached value of the port's lid.
+ * @gid: cached value of the port's gid.
+ * @port_acl_lock spinlock for port_acl_list:
+ * @work: work structure for refreshing the aforementioned cached values.
+ * @port_tpg_1 Target portal group = 1 data.
+ * @port_wwn: Target core WWN data.
+ * @port_acl_list: Head of the list with all node ACLs for this port.
+ */
+struct srpt_port {
+ struct srpt_device *sdev;
+ struct ib_mad_agent *mad_agent;
+ bool enabled;
+ u8 port_guid[64];
+ u8 port;
+ u16 sm_lid;
+ u16 lid;
+ union ib_gid gid;
+ spinlock_t port_acl_lock;
+ struct work_struct work;
+ struct se_portal_group port_tpg_1;
+ struct se_wwn port_wwn;
+ struct list_head port_acl_list;
+ struct srpt_port_attrib port_attrib;
+};
+
+/**
+ * struct srpt_device - Information associated by SRPT with a single HCA.
+ * @device: Backpointer to the struct ib_device managed by the IB core.
+ * @pd: IB protection domain.
+ * @mr: L_Key (local key) with write access to all local memory.
+ * @srq: Per-HCA SRQ (shared receive queue).
+ * @cm_id: Connection identifier.
+ * @dev_attr: Attributes of the InfiniBand device as obtained during the
+ * ib_client.add() callback.
+ * @srq_size: SRQ size.
+ * @ioctx_ring: Per-HCA SRQ.
+ * @rch_list: Per-device channel list -- see also srpt_rdma_ch.list.
+ * @ch_releaseQ: Enables waiting for removal from rch_list.
+ * @spinlock: Protects rch_list and tpg.
+ * @port: Information about the ports owned by this HCA.
+ * @event_handler: Per-HCA asynchronous IB event handler.
+ * @list: Node in srpt_dev_list.
+ */
+struct srpt_device {
+ struct ib_device *device;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct ib_srq *srq;
+ struct ib_cm_id *cm_id;
+ struct ib_device_attr dev_attr;
+ int srq_size;
+ struct srpt_recv_ioctx **ioctx_ring;
+ struct list_head rch_list;
+ wait_queue_head_t ch_releaseQ;
+ spinlock_t spinlock;
+ struct srpt_port port[2];
+ struct ib_event_handler event_handler;
+ struct list_head list;
+};
+
+/**
+ * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
+ * @i_port_id: 128-bit SRP initiator port ID.
+ * @sport: port information.
+ * @nacl: Target core node ACL information.
+ * @list: Element of the per-HCA ACL list.
+ */
+struct srpt_node_acl {
+ u8 i_port_id[16];
+ struct srpt_port *sport;
+ struct se_node_acl nacl;
+ struct list_head list;
+};
+
+/*
+ * SRP-releated SCSI persistent reservation definitions.
+ *
+ * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
+ * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
+ * SCSI over an RDMA interface).
+ */
+
+enum {
+ SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
+};
+
+struct spc_rdma_transport_id {
+ uint8_t protocol_identifier;
+ uint8_t reserved[7];
+ uint8_t i_port_id[16];
+};
+
+#endif /* IB_SRPT_H */
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 4cf2534..7df5bfe 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -332,7 +332,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
- int retval;
+ int retval = 0;
if (count < input_event_size())
return -EINVAL;
@@ -369,7 +369,7 @@ static int evdev_fetch_next_event(struct evdev_client *client,
spin_lock_irq(&client->buffer_lock);
- have_event = client->head != client->tail;
+ have_event = client->packet_head != client->tail;
if (have_event) {
*event = client->buffer[client->tail++];
client->tail &= client->bufsize - 1;
@@ -386,19 +386,18 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
struct evdev_client *client = file->private_data;
struct evdev *evdev = client->evdev;
struct input_event event;
- int retval;
+ int retval = 0;
if (count < input_event_size())
return -EINVAL;
- if (client->packet_head == client->tail && evdev->exist &&
- (file->f_flags & O_NONBLOCK))
- return -EAGAIN;
-
- retval = wait_event_interruptible(evdev->wait,
- client->packet_head != client->tail || !evdev->exist);
- if (retval)
- return retval;
+ if (!(file->f_flags & O_NONBLOCK)) {
+ retval = wait_event_interruptible(evdev->wait,
+ client->packet_head != client->tail ||
+ !evdev->exist);
+ if (retval)
+ return retval;
+ }
if (!evdev->exist)
return -ENODEV;
@@ -412,6 +411,9 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
retval += input_event_size();
}
+ if (retval == 0 && (file->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
return retval;
}
diff --git a/drivers/input/input-polldev.c b/drivers/input/input-polldev.c
index 7dfe100..7f161d9 100644
--- a/drivers/input/input-polldev.c
+++ b/drivers/input/input-polldev.c
@@ -84,10 +84,12 @@ static ssize_t input_polldev_set_poll(struct device *dev,
{
struct input_polled_dev *polldev = dev_get_drvdata(dev);
struct input_dev *input = polldev->input;
- unsigned long interval;
+ unsigned int interval;
+ int err;
- if (strict_strtoul(buf, 0, &interval))
- return -EINVAL;
+ err = kstrtouint(buf, 0, &interval);
+ if (err)
+ return err;
if (interval < polldev->poll_interval_min)
return -EINVAL;
diff --git a/drivers/input/input.c b/drivers/input/input.c
index da38d97..1f78c957 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1624,7 +1624,7 @@ static struct device_type input_dev_type = {
#endif
};
-static char *input_devnode(struct device *dev, mode_t *mode)
+static char *input_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
}
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d728875..fd7a0d5 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -98,15 +98,15 @@
#define XTYPE_XBOX360W 2
#define XTYPE_UNKNOWN 3
-static int dpad_to_buttons;
+static bool dpad_to_buttons;
module_param(dpad_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(dpad_to_buttons, "Map D-PAD to buttons rather than axes for unknown pads");
-static int triggers_to_buttons;
+static bool triggers_to_buttons;
module_param(triggers_to_buttons, bool, S_IRUGO);
MODULE_PARM_DESC(triggers_to_buttons, "Map triggers to buttons rather than axes for unknown pads");
-static int sticks_to_null;
+static bool sticks_to_null;
module_param(sticks_to_null, bool, S_IRUGO);
MODULE_PARM_DESC(sticks_to_null, "Do not map sticks at all for unknown pads");
@@ -1041,18 +1041,7 @@ static struct usb_driver xpad_driver = {
.id_table = xpad_table,
};
-static int __init usb_xpad_init(void)
-{
- return usb_register(&xpad_driver);
-}
-
-static void __exit usb_xpad_exit(void)
-{
- usb_deregister(&xpad_driver);
-}
-
-module_init(usb_xpad_init);
-module_exit(usb_xpad_exit);
+module_usb_driver(xpad_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 615c21f..cdc385b 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -221,6 +221,22 @@ config KEYBOARD_TCA6416
To compile this driver as a module, choose M here: the
module will be called tca6416_keypad.
+config KEYBOARD_TCA8418
+ tristate "TCA8418 Keypad Support"
+ depends on I2C
+ help
+ This driver implements basic keypad functionality
+ for keys connected through TCA8418 keypad decoder.
+
+ Say Y here if your device has keys connected to
+ TCA8418 keypad decoder.
+
+ If enabled the complete TCA8418 device will be managed through
+ this driver.
+
+ To compile this driver as a module, choose M here: the
+ module will be called tca8418_keypad.
+
config KEYBOARD_MATRIX
tristate "GPIO driven matrix keypad support"
depends on GENERIC_GPIO
@@ -425,9 +441,10 @@ config KEYBOARD_PMIC8XXX
config KEYBOARD_SAMSUNG
tristate "Samsung keypad support"
- depends on SAMSUNG_DEV_KEYPAD
+ depends on HAVE_CLK
help
- Say Y here if you want to use the Samsung keypad.
+ Say Y here if you want to use the keypad on your Samsung mobile
+ device.
To compile this driver as a module, choose M here: the
module will be called samsung-keypad.
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index ddde0fd..df7061f 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
+obj-$(CONFIG_KEYBOARD_TCA8418) += tca8418_keypad.o
obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
obj-$(CONFIG_KEYBOARD_IMX) += imx_keypad.o
diff --git a/drivers/input/keyboard/adp5520-keys.c b/drivers/input/keyboard/adp5520-keys.c
index 3db8006..e9e8674 100644
--- a/drivers/input/keyboard/adp5520-keys.c
+++ b/drivers/input/keyboard/adp5520-keys.c
@@ -202,18 +202,7 @@ static struct platform_driver adp5520_keys_driver = {
.probe = adp5520_keys_probe,
.remove = __devexit_p(adp5520_keys_remove),
};
-
-static int __init adp5520_keys_init(void)
-{
- return platform_driver_register(&adp5520_keys_driver);
-}
-module_init(adp5520_keys_init);
-
-static void __exit adp5520_keys_exit(void)
-{
- platform_driver_unregister(&adp5520_keys_driver);
-}
-module_exit(adp5520_keys_exit);
+module_platform_driver(adp5520_keys_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Keys ADP5520 Driver");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index 19cfc0c..e05a2e7 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -1305,7 +1305,7 @@ static ssize_t atkbd_show_extra(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_extra;
unsigned char old_set;
@@ -1313,7 +1313,11 @@ static ssize_t atkbd_set_extra(struct atkbd *atkbd, const char *buf, size_t coun
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->extra != value) {
@@ -1389,11 +1393,15 @@ static ssize_t atkbd_show_scroll(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_scroll(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_scroll;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->scroll != value) {
@@ -1433,7 +1441,7 @@ static ssize_t atkbd_show_set(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
unsigned char old_set;
bool old_extra;
@@ -1441,7 +1449,11 @@ static ssize_t atkbd_set_set(struct atkbd *atkbd, const char *buf, size_t count)
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || (value != 2 && value != 3))
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value != 2 && value != 3)
return -EINVAL;
if (atkbd->set != value) {
@@ -1484,14 +1496,18 @@ static ssize_t atkbd_show_softrepeat(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_softrepeat(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_softrepeat, old_softraw;
if (!atkbd->write)
return -EIO;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->softrepeat != value) {
@@ -1534,11 +1550,15 @@ static ssize_t atkbd_show_softraw(struct atkbd *atkbd, char *buf)
static ssize_t atkbd_set_softraw(struct atkbd *atkbd, const char *buf, size_t count)
{
struct input_dev *old_dev, *new_dev;
- unsigned long value;
+ unsigned int value;
int err;
bool old_softraw;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (atkbd->softraw != value) {
diff --git a/drivers/input/keyboard/bf54x-keys.c b/drivers/input/keyboard/bf54x-keys.c
index 7d98960..8eb9116 100644
--- a/drivers/input/keyboard/bf54x-keys.c
+++ b/drivers/input/keyboard/bf54x-keys.c
@@ -384,7 +384,7 @@ static int bfin_kpad_resume(struct platform_device *pdev)
# define bfin_kpad_resume NULL
#endif
-struct platform_driver bfin_kpad_device_driver = {
+static struct platform_driver bfin_kpad_device_driver = {
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
@@ -394,19 +394,7 @@ struct platform_driver bfin_kpad_device_driver = {
.suspend = bfin_kpad_suspend,
.resume = bfin_kpad_resume,
};
-
-static int __init bfin_kpad_init(void)
-{
- return platform_driver_register(&bfin_kpad_device_driver);
-}
-
-static void __exit bfin_kpad_exit(void)
-{
- platform_driver_unregister(&bfin_kpad_device_driver);
-}
-
-module_init(bfin_kpad_init);
-module_exit(bfin_kpad_exit);
+module_platform_driver(bfin_kpad_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
diff --git a/drivers/input/keyboard/ep93xx_keypad.c b/drivers/input/keyboard/ep93xx_keypad.c
index 4662c5d..0ba69f3 100644
--- a/drivers/input/keyboard/ep93xx_keypad.c
+++ b/drivers/input/keyboard/ep93xx_keypad.c
@@ -390,19 +390,7 @@ static struct platform_driver ep93xx_keypad_driver = {
.suspend = ep93xx_keypad_suspend,
.resume = ep93xx_keypad_resume,
};
-
-static int __init ep93xx_keypad_init(void)
-{
- return platform_driver_register(&ep93xx_keypad_driver);
-}
-
-static void __exit ep93xx_keypad_exit(void)
-{
- platform_driver_unregister(&ep93xx_keypad_driver);
-}
-
-module_init(ep93xx_keypad_init);
-module_exit(ep93xx_keypad_exit);
+module_platform_driver(ep93xx_keypad_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 4c17aff..20c8ab1 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -241,19 +241,7 @@ static struct platform_driver gpio_keys_polled_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init gpio_keys_polled_init(void)
-{
- return platform_driver_register(&gpio_keys_polled_driver);
-}
-
-static void __exit gpio_keys_polled_exit(void)
-{
- platform_driver_unregister(&gpio_keys_polled_driver);
-}
-
-module_init(gpio_keys_polled_init);
-module_exit(gpio_keys_polled_exit);
+module_platform_driver(gpio_keys_polled_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
diff --git a/drivers/input/keyboard/imx_keypad.c b/drivers/input/keyboard/imx_keypad.c
index ccebd2d..fb87b3b 100644
--- a/drivers/input/keyboard/imx_keypad.c
+++ b/drivers/input/keyboard/imx_keypad.c
@@ -619,19 +619,7 @@ static struct platform_driver imx_keypad_driver = {
.probe = imx_keypad_probe,
.remove = __devexit_p(imx_keypad_remove),
};
-
-static int __init imx_keypad_init(void)
-{
- return platform_driver_register(&imx_keypad_driver);
-}
-
-static void __exit imx_keypad_exit(void)
-{
- platform_driver_unregister(&imx_keypad_driver);
-}
-
-module_init(imx_keypad_init);
-module_exit(imx_keypad_exit);
+module_platform_driver(imx_keypad_driver);
MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
MODULE_DESCRIPTION("IMX Keypad Port Driver");
diff --git a/drivers/input/keyboard/jornada680_kbd.c b/drivers/input/keyboard/jornada680_kbd.c
index 7197c56..24f3ea0 100644
--- a/drivers/input/keyboard/jornada680_kbd.c
+++ b/drivers/input/keyboard/jornada680_kbd.c
@@ -260,19 +260,7 @@ static struct platform_driver jornada680kbd_driver = {
.probe = jornada680kbd_probe,
.remove = __devexit_p(jornada680kbd_remove),
};
-
-static int __init jornada680kbd_init(void)
-{
- return platform_driver_register(&jornada680kbd_driver);
-}
-
-static void __exit jornada680kbd_exit(void)
-{
- platform_driver_unregister(&jornada680kbd_driver);
-}
-
-module_init(jornada680kbd_init);
-module_exit(jornada680kbd_exit);
+module_platform_driver(jornada680kbd_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 620/660/680/690 Keyboard Driver");
diff --git a/drivers/input/keyboard/jornada720_kbd.c b/drivers/input/keyboard/jornada720_kbd.c
index 0aa6740..eeafc30 100644
--- a/drivers/input/keyboard/jornada720_kbd.c
+++ b/drivers/input/keyboard/jornada720_kbd.c
@@ -174,16 +174,4 @@ static struct platform_driver jornada720_kbd_driver = {
.probe = jornada720_kbd_probe,
.remove = __devexit_p(jornada720_kbd_remove),
};
-
-static int __init jornada720_kbd_init(void)
-{
- return platform_driver_register(&jornada720_kbd_driver);
-}
-
-static void __exit jornada720_kbd_exit(void)
-{
- platform_driver_unregister(&jornada720_kbd_driver);
-}
-
-module_init(jornada720_kbd_init);
-module_exit(jornada720_kbd_exit);
+module_platform_driver(jornada720_kbd_driver);
diff --git a/drivers/input/keyboard/lm8323.c b/drivers/input/keyboard/lm8323.c
index 82d1dc8..21823bf 100644
--- a/drivers/input/keyboard/lm8323.c
+++ b/drivers/input/keyboard/lm8323.c
@@ -545,13 +545,12 @@ static ssize_t lm8323_pwm_store_time(struct device *dev,
{
struct led_classdev *led_cdev = dev_get_drvdata(dev);
struct lm8323_pwm *pwm = cdev_to_pwm(led_cdev);
- int ret;
- unsigned long time;
+ int ret, time;
- ret = strict_strtoul(buf, 10, &time);
+ ret = kstrtoint(buf, 10, &time);
/* Numbers only, please. */
if (ret)
- return -EINVAL;
+ return ret;
pwm->fade_time = time;
@@ -613,9 +612,9 @@ static ssize_t lm8323_set_disable(struct device *dev,
{
struct lm8323_chip *lm = dev_get_drvdata(dev);
int ret;
- unsigned long i;
+ unsigned int i;
- ret = strict_strtoul(buf, 10, &i);
+ ret = kstrtouint(buf, 10, &i);
mutex_lock(&lm->lock);
lm->kp_enabled = !i;
diff --git a/drivers/input/keyboard/matrix_keypad.c b/drivers/input/keyboard/matrix_keypad.c
index e2ae657..9b223d7 100644
--- a/drivers/input/keyboard/matrix_keypad.c
+++ b/drivers/input/keyboard/matrix_keypad.c
@@ -496,19 +496,7 @@ static struct platform_driver matrix_keypad_driver = {
#endif
},
};
-
-static int __init matrix_keypad_init(void)
-{
- return platform_driver_register(&matrix_keypad_driver);
-}
-
-static void __exit matrix_keypad_exit(void)
-{
- platform_driver_unregister(&matrix_keypad_driver);
-}
-
-module_init(matrix_keypad_init);
-module_exit(matrix_keypad_exit);
+module_platform_driver(matrix_keypad_driver);
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
MODULE_DESCRIPTION("GPIO Driven Matrix Keypad Driver");
diff --git a/drivers/input/keyboard/nomadik-ske-keypad.c b/drivers/input/keyboard/nomadik-ske-keypad.c
index fcdec5e..e35566a 100644
--- a/drivers/input/keyboard/nomadik-ske-keypad.c
+++ b/drivers/input/keyboard/nomadik-ske-keypad.c
@@ -379,7 +379,7 @@ static const struct dev_pm_ops ske_keypad_dev_pm_ops = {
};
#endif
-struct platform_driver ske_keypad_driver = {
+static struct platform_driver ske_keypad_driver = {
.driver = {
.name = "nmk-ske-keypad",
.owner = THIS_MODULE,
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index 323bcdf..6b630d9 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -473,20 +473,7 @@ static struct platform_driver omap_kp_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init omap_kp_init(void)
-{
- printk(KERN_INFO "OMAP Keypad Driver\n");
- return platform_driver_register(&omap_kp_driver);
-}
-
-static void __exit omap_kp_exit(void)
-{
- platform_driver_unregister(&omap_kp_driver);
-}
-
-module_init(omap_kp_init);
-module_exit(omap_kp_exit);
+module_platform_driver(omap_kp_driver);
MODULE_AUTHOR("Timo Teräs");
MODULE_DESCRIPTION("OMAP Keypad Driver");
diff --git a/drivers/input/keyboard/omap4-keypad.c b/drivers/input/keyboard/omap4-keypad.c
index c51a3c4..d5c5d77 100644
--- a/drivers/input/keyboard/omap4-keypad.c
+++ b/drivers/input/keyboard/omap4-keypad.c
@@ -335,18 +335,7 @@ static struct platform_driver omap4_keypad_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init omap4_keypad_init(void)
-{
- return platform_driver_register(&omap4_keypad_driver);
-}
-module_init(omap4_keypad_init);
-
-static void __exit omap4_keypad_exit(void)
-{
- platform_driver_unregister(&omap4_keypad_driver);
-}
-module_exit(omap4_keypad_exit);
+module_platform_driver(omap4_keypad_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("OMAP4 Keypad Driver");
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index 1f1a556..abe728c 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -163,18 +163,7 @@ static struct platform_driver opencores_kbd_device_driver = {
.name = "opencores-kbd",
},
};
-
-static int __init opencores_kbd_init(void)
-{
- return platform_driver_register(&opencores_kbd_device_driver);
-}
-module_init(opencores_kbd_init);
-
-static void __exit opencores_kbd_exit(void)
-{
- platform_driver_unregister(&opencores_kbd_device_driver);
-}
-module_exit(opencores_kbd_exit);
+module_platform_driver(opencores_kbd_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Javier Herrero <jherrero@hvsistemas.es>");
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c
index e7cc51d..01a1c9f 100644
--- a/drivers/input/keyboard/pmic8xxx-keypad.c
+++ b/drivers/input/keyboard/pmic8xxx-keypad.c
@@ -780,18 +780,7 @@ static struct platform_driver pmic8xxx_kp_driver = {
.pm = &pm8xxx_kp_pm_ops,
},
};
-
-static int __init pmic8xxx_kp_init(void)
-{
- return platform_driver_register(&pmic8xxx_kp_driver);
-}
-module_init(pmic8xxx_kp_init);
-
-static void __exit pmic8xxx_kp_exit(void)
-{
- platform_driver_unregister(&pmic8xxx_kp_driver);
-}
-module_exit(pmic8xxx_kp_exit);
+module_platform_driver(pmic8xxx_kp_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("PMIC8XXX keypad driver");
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index eca6ae6..29fe1b2 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -602,19 +602,7 @@ static struct platform_driver pxa27x_keypad_driver = {
#endif
},
};
-
-static int __init pxa27x_keypad_init(void)
-{
- return platform_driver_register(&pxa27x_keypad_driver);
-}
-
-static void __exit pxa27x_keypad_exit(void)
-{
- platform_driver_unregister(&pxa27x_keypad_driver);
-}
-
-module_init(pxa27x_keypad_init);
-module_exit(pxa27x_keypad_exit);
+module_platform_driver(pxa27x_keypad_driver);
MODULE_DESCRIPTION("PXA27x Keypad Controller Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/pxa930_rotary.c b/drivers/input/keyboard/pxa930_rotary.c
index 35451bf..d7f1134 100644
--- a/drivers/input/keyboard/pxa930_rotary.c
+++ b/drivers/input/keyboard/pxa930_rotary.c
@@ -195,18 +195,7 @@ static struct platform_driver pxa930_rotary_driver = {
.probe = pxa930_rotary_probe,
.remove = __devexit_p(pxa930_rotary_remove),
};
-
-static int __init pxa930_rotary_init(void)
-{
- return platform_driver_register(&pxa930_rotary_driver);
-}
-module_init(pxa930_rotary_init);
-
-static void __exit pxa930_rotary_exit(void)
-{
- platform_driver_unregister(&pxa930_rotary_driver);
-}
-module_exit(pxa930_rotary_exit);
+module_platform_driver(pxa930_rotary_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Driver for PXA93x Enhanced Rotary Controller");
diff --git a/drivers/input/keyboard/samsung-keypad.c b/drivers/input/keyboard/samsung-keypad.c
index f689f49..17ba7f9 100644
--- a/drivers/input/keyboard/samsung-keypad.c
+++ b/drivers/input/keyboard/samsung-keypad.c
@@ -20,9 +20,13 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/sched.h>
-#include <plat/keypad.h>
+#include <linux/input/samsung-keypad.h>
#define SAMSUNG_KEYIFCON 0x00
#define SAMSUNG_KEYIFSTSCLR 0x04
@@ -63,36 +67,33 @@ enum samsung_keypad_type {
struct samsung_keypad {
struct input_dev *input_dev;
+ struct platform_device *pdev;
struct clk *clk;
void __iomem *base;
wait_queue_head_t wait;
bool stopped;
+ bool wake_enabled;
int irq;
+ enum samsung_keypad_type type;
unsigned int row_shift;
unsigned int rows;
unsigned int cols;
unsigned int row_state[SAMSUNG_MAX_COLS];
+#ifdef CONFIG_OF
+ int row_gpios[SAMSUNG_MAX_ROWS];
+ int col_gpios[SAMSUNG_MAX_COLS];
+#endif
unsigned short keycodes[];
};
-static int samsung_keypad_is_s5pv210(struct device *dev)
-{
- struct platform_device *pdev = to_platform_device(dev);
- enum samsung_keypad_type type =
- platform_get_device_id(pdev)->driver_data;
-
- return type == KEYPAD_TYPE_S5PV210;
-}
-
static void samsung_keypad_scan(struct samsung_keypad *keypad,
unsigned int *row_state)
{
- struct device *dev = keypad->input_dev->dev.parent;
unsigned int col;
unsigned int val;
for (col = 0; col < keypad->cols; col++) {
- if (samsung_keypad_is_s5pv210(dev)) {
+ if (keypad->type == KEYPAD_TYPE_S5PV210) {
val = S5PV210_KEYIFCOLEN_MASK;
val &= ~(1 << col) << 8;
} else {
@@ -158,6 +159,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
unsigned int val;
bool key_down;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
do {
val = readl(keypad->base + SAMSUNG_KEYIFSTSCLR);
/* Clear interrupt. */
@@ -172,6 +175,8 @@ static irqreturn_t samsung_keypad_irq(int irq, void *dev_id)
} while (key_down && !keypad->stopped);
+ pm_runtime_put_sync(&keypad->pdev->dev);
+
return IRQ_HANDLED;
}
@@ -179,6 +184,8 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
{
unsigned int val;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
/* Tell IRQ thread that it may poll the device. */
keypad->stopped = false;
@@ -191,12 +198,16 @@ static void samsung_keypad_start(struct samsung_keypad *keypad)
/* KEYIFCOL reg clear. */
writel(0, keypad->base + SAMSUNG_KEYIFCOL);
+
+ pm_runtime_put_sync(&keypad->pdev->dev);
}
static void samsung_keypad_stop(struct samsung_keypad *keypad)
{
unsigned int val;
+ pm_runtime_get_sync(&keypad->pdev->dev);
+
/* Signal IRQ thread to stop polling and disable the handler. */
keypad->stopped = true;
wake_up(&keypad->wait);
@@ -217,6 +228,8 @@ static void samsung_keypad_stop(struct samsung_keypad *keypad)
* re-enable the handler.
*/
enable_irq(keypad->irq);
+
+ pm_runtime_put_sync(&keypad->pdev->dev);
}
static int samsung_keypad_open(struct input_dev *input_dev)
@@ -235,6 +248,126 @@ static void samsung_keypad_close(struct input_dev *input_dev)
samsung_keypad_stop(keypad);
}
+#ifdef CONFIG_OF
+static struct samsung_keypad_platdata *samsung_keypad_parse_dt(
+ struct device *dev)
+{
+ struct samsung_keypad_platdata *pdata;
+ struct matrix_keymap_data *keymap_data;
+ uint32_t *keymap, num_rows = 0, num_cols = 0;
+ struct device_node *np = dev->of_node, *key_np;
+ unsigned int key_count = 0;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata) {
+ dev_err(dev, "could not allocate memory for platform data\n");
+ return NULL;
+ }
+
+ of_property_read_u32(np, "samsung,keypad-num-rows", &num_rows);
+ of_property_read_u32(np, "samsung,keypad-num-columns", &num_cols);
+ if (!num_rows || !num_cols) {
+ dev_err(dev, "number of keypad rows/columns not specified\n");
+ return NULL;
+ }
+ pdata->rows = num_rows;
+ pdata->cols = num_cols;
+
+ keymap_data = devm_kzalloc(dev, sizeof(*keymap_data), GFP_KERNEL);
+ if (!keymap_data) {
+ dev_err(dev, "could not allocate memory for keymap data\n");
+ return NULL;
+ }
+ pdata->keymap_data = keymap_data;
+
+ for_each_child_of_node(np, key_np)
+ key_count++;
+
+ keymap_data->keymap_size = key_count;
+ keymap = devm_kzalloc(dev, sizeof(uint32_t) * key_count, GFP_KERNEL);
+ if (!keymap) {
+ dev_err(dev, "could not allocate memory for keymap\n");
+ return NULL;
+ }
+ keymap_data->keymap = keymap;
+
+ for_each_child_of_node(np, key_np) {
+ u32 row, col, key_code;
+ of_property_read_u32(key_np, "keypad,row", &row);
+ of_property_read_u32(key_np, "keypad,column", &col);
+ of_property_read_u32(key_np, "linux,code", &key_code);
+ *keymap++ = KEY(row, col, key_code);
+ }
+
+ if (of_get_property(np, "linux,input-no-autorepeat", NULL))
+ pdata->no_autorepeat = true;
+ if (of_get_property(np, "linux,input-wakeup", NULL))
+ pdata->wakeup = true;
+
+ return pdata;
+}
+
+static void samsung_keypad_parse_dt_gpio(struct device *dev,
+ struct samsung_keypad *keypad)
+{
+ struct device_node *np = dev->of_node;
+ int gpio, ret, row, col;
+
+ for (row = 0; row < keypad->rows; row++) {
+ gpio = of_get_named_gpio(np, "row-gpios", row);
+ keypad->row_gpios[row] = gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "keypad row[%d]: invalid gpio %d\n",
+ row, gpio);
+ continue;
+ }
+
+ ret = gpio_request(gpio, "keypad-row");
+ if (ret)
+ dev_err(dev, "keypad row[%d] gpio request failed\n",
+ row);
+ }
+
+ for (col = 0; col < keypad->cols; col++) {
+ gpio = of_get_named_gpio(np, "col-gpios", col);
+ keypad->col_gpios[col] = gpio;
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "keypad column[%d]: invalid gpio %d\n",
+ col, gpio);
+ continue;
+ }
+
+ ret = gpio_request(gpio, "keypad-col");
+ if (ret)
+ dev_err(dev, "keypad column[%d] gpio request failed\n",
+ col);
+ }
+}
+
+static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
+{
+ int cnt;
+
+ for (cnt = 0; cnt < keypad->rows; cnt++)
+ if (gpio_is_valid(keypad->row_gpios[cnt]))
+ gpio_free(keypad->row_gpios[cnt]);
+
+ for (cnt = 0; cnt < keypad->cols; cnt++)
+ if (gpio_is_valid(keypad->col_gpios[cnt]))
+ gpio_free(keypad->col_gpios[cnt]);
+}
+#else
+static
+struct samsung_keypad_platdata *samsung_keypad_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+
+static void samsung_keypad_dt_gpio_free(struct samsung_keypad *keypad)
+{
+}
+#endif
+
static int __devinit samsung_keypad_probe(struct platform_device *pdev)
{
const struct samsung_keypad_platdata *pdata;
@@ -246,7 +379,10 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
unsigned int keymap_size;
int error;
- pdata = pdev->dev.platform_data;
+ if (pdev->dev.of_node)
+ pdata = samsung_keypad_parse_dt(&pdev->dev);
+ else
+ pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data defined\n");
return -EINVAL;
@@ -298,11 +434,23 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
}
keypad->input_dev = input_dev;
+ keypad->pdev = pdev;
keypad->row_shift = row_shift;
keypad->rows = pdata->rows;
keypad->cols = pdata->cols;
+ keypad->stopped = true;
init_waitqueue_head(&keypad->wait);
+ if (pdev->dev.of_node) {
+#ifdef CONFIG_OF
+ samsung_keypad_parse_dt_gpio(&pdev->dev, keypad);
+ keypad->type = of_device_is_compatible(pdev->dev.of_node,
+ "samsung,s5pv210-keypad");
+#endif
+ } else {
+ keypad->type = platform_get_device_id(pdev)->driver_data;
+ }
+
input_dev->name = pdev->name;
input_dev->id.bustype = BUS_HOST;
input_dev->dev.parent = &pdev->dev;
@@ -337,18 +485,29 @@ static int __devinit samsung_keypad_probe(struct platform_device *pdev)
goto err_put_clk;
}
+ device_init_wakeup(&pdev->dev, pdata->wakeup);
+ platform_set_drvdata(pdev, keypad);
+ pm_runtime_enable(&pdev->dev);
+
error = input_register_device(keypad->input_dev);
if (error)
goto err_free_irq;
- device_init_wakeup(&pdev->dev, pdata->wakeup);
- platform_set_drvdata(pdev, keypad);
+ if (pdev->dev.of_node) {
+ devm_kfree(&pdev->dev, (void *)pdata->keymap_data->keymap);
+ devm_kfree(&pdev->dev, (void *)pdata->keymap_data);
+ devm_kfree(&pdev->dev, (void *)pdata);
+ }
return 0;
err_free_irq:
free_irq(keypad->irq, keypad);
+ pm_runtime_disable(&pdev->dev);
+ device_init_wakeup(&pdev->dev, 0);
+ platform_set_drvdata(pdev, NULL);
err_put_clk:
clk_put(keypad->clk);
+ samsung_keypad_dt_gpio_free(keypad);
err_unmap_base:
iounmap(keypad->base);
err_free_mem:
@@ -362,6 +521,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
{
struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ pm_runtime_disable(&pdev->dev);
device_init_wakeup(&pdev->dev, 0);
platform_set_drvdata(pdev, NULL);
@@ -374,6 +534,7 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
free_irq(keypad->irq, keypad);
clk_put(keypad->clk);
+ samsung_keypad_dt_gpio_free(keypad);
iounmap(keypad->base);
kfree(keypad);
@@ -381,11 +542,57 @@ static int __devexit samsung_keypad_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_RUNTIME
+static int samsung_keypad_runtime_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned int val;
+ int error;
+
+ if (keypad->stopped)
+ return 0;
+
+ /* This may fail on some SoCs due to lack of controller support */
+ error = enable_irq_wake(keypad->irq);
+ if (!error)
+ keypad->wake_enabled = true;
+
+ val = readl(keypad->base + SAMSUNG_KEYIFCON);
+ val |= SAMSUNG_KEYIFCON_WAKEUPEN;
+ writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+ clk_disable(keypad->clk);
+
+ return 0;
+}
+
+static int samsung_keypad_runtime_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct samsung_keypad *keypad = platform_get_drvdata(pdev);
+ unsigned int val;
+
+ if (keypad->stopped)
+ return 0;
+
+ clk_enable(keypad->clk);
+
+ val = readl(keypad->base + SAMSUNG_KEYIFCON);
+ val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
+ writel(val, keypad->base + SAMSUNG_KEYIFCON);
+
+ if (keypad->wake_enabled)
+ disable_irq_wake(keypad->irq);
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
bool enable)
{
- struct device *dev = keypad->input_dev->dev.parent;
unsigned int val;
clk_enable(keypad->clk);
@@ -393,11 +600,11 @@ static void samsung_keypad_toggle_wakeup(struct samsung_keypad *keypad,
val = readl(keypad->base + SAMSUNG_KEYIFCON);
if (enable) {
val |= SAMSUNG_KEYIFCON_WAKEUPEN;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(&keypad->pdev->dev))
enable_irq_wake(keypad->irq);
} else {
val &= ~SAMSUNG_KEYIFCON_WAKEUPEN;
- if (device_may_wakeup(dev))
+ if (device_may_wakeup(&keypad->pdev->dev))
disable_irq_wake(keypad->irq);
}
writel(val, keypad->base + SAMSUNG_KEYIFCON);
@@ -440,11 +647,23 @@ static int samsung_keypad_resume(struct device *dev)
return 0;
}
+#endif
static const struct dev_pm_ops samsung_keypad_pm_ops = {
- .suspend = samsung_keypad_suspend,
- .resume = samsung_keypad_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(samsung_keypad_suspend, samsung_keypad_resume)
+ SET_RUNTIME_PM_OPS(samsung_keypad_runtime_suspend,
+ samsung_keypad_runtime_resume, NULL)
};
+
+#ifdef CONFIG_OF
+static const struct of_device_id samsung_keypad_dt_match[] = {
+ { .compatible = "samsung,s3c6410-keypad" },
+ { .compatible = "samsung,s5pv210-keypad" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, samsung_keypad_dt_match);
+#else
+#define samsung_keypad_dt_match NULL
#endif
static struct platform_device_id samsung_keypad_driver_ids[] = {
@@ -465,27 +684,14 @@ static struct platform_driver samsung_keypad_driver = {
.driver = {
.name = "samsung-keypad",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
+ .of_match_table = samsung_keypad_dt_match,
.pm = &samsung_keypad_pm_ops,
-#endif
},
.id_table = samsung_keypad_driver_ids,
};
-
-static int __init samsung_keypad_init(void)
-{
- return platform_driver_register(&samsung_keypad_driver);
-}
-module_init(samsung_keypad_init);
-
-static void __exit samsung_keypad_exit(void)
-{
- platform_driver_unregister(&samsung_keypad_driver);
-}
-module_exit(samsung_keypad_exit);
+module_platform_driver(samsung_keypad_driver);
MODULE_DESCRIPTION("Samsung keypad driver");
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
MODULE_AUTHOR("Donghwa Lee <dh09.lee@samsung.com>");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:samsung-keypad");
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 934aeb583..da54ad5 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -337,19 +337,7 @@ static struct platform_driver sh_keysc_device_driver = {
.pm = &sh_keysc_dev_pm_ops,
}
};
-
-static int __init sh_keysc_init(void)
-{
- return platform_driver_register(&sh_keysc_device_driver);
-}
-
-static void __exit sh_keysc_exit(void)
-{
- platform_driver_unregister(&sh_keysc_device_driver);
-}
-
-module_init(sh_keysc_init);
-module_exit(sh_keysc_exit);
+module_platform_driver(sh_keysc_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("SuperH KEYSC Keypad Driver");
diff --git a/drivers/input/keyboard/spear-keyboard.c b/drivers/input/keyboard/spear-keyboard.c
index d712dff..c88bd63 100644
--- a/drivers/input/keyboard/spear-keyboard.c
+++ b/drivers/input/keyboard/spear-keyboard.c
@@ -326,18 +326,7 @@ static struct platform_driver spear_kbd_driver = {
#endif
},
};
-
-static int __init spear_kbd_init(void)
-{
- return platform_driver_register(&spear_kbd_driver);
-}
-module_init(spear_kbd_init);
-
-static void __exit spear_kbd_exit(void)
-{
- platform_driver_unregister(&spear_kbd_driver);
-}
-module_exit(spear_kbd_exit);
+module_platform_driver(spear_kbd_driver);
MODULE_AUTHOR("Rajeev Kumar");
MODULE_DESCRIPTION("SPEAr Keyboard Driver");
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ab7610c..9397cf9 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -368,18 +368,7 @@ static struct platform_driver stmpe_keypad_driver = {
.probe = stmpe_keypad_probe,
.remove = __devexit_p(stmpe_keypad_remove),
};
-
-static int __init stmpe_keypad_init(void)
-{
- return platform_driver_register(&stmpe_keypad_driver);
-}
-module_init(stmpe_keypad_init);
-
-static void __exit stmpe_keypad_exit(void)
-{
- platform_driver_unregister(&stmpe_keypad_driver);
-}
-module_exit(stmpe_keypad_exit);
+module_platform_driver(stmpe_keypad_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("STMPExxxx keypad driver");
diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c
index f60c9e8..2dee3e4 100644
--- a/drivers/input/keyboard/tc3589x-keypad.c
+++ b/drivers/input/keyboard/tc3589x-keypad.c
@@ -74,11 +74,13 @@
/**
* struct tc_keypad - data structure used by keypad driver
+ * @tc3589x: pointer to tc35893
* @input: pointer to input device object
* @board: keypad platform device
* @krow: number of rows
* @kcol: number of coloumns
* @keymap: matrix scan code table for keycodes
+ * @keypad_stopped: holds keypad status
*/
struct tc_keypad {
struct tc3589x *tc3589x;
@@ -453,18 +455,7 @@ static struct platform_driver tc3589x_keypad_driver = {
.probe = tc3589x_keypad_probe,
.remove = __devexit_p(tc3589x_keypad_remove),
};
-
-static int __init tc3589x_keypad_init(void)
-{
- return platform_driver_register(&tc3589x_keypad_driver);
-}
-module_init(tc3589x_keypad_init);
-
-static void __exit tc3589x_keypad_exit(void)
-{
- return platform_driver_unregister(&tc3589x_keypad_driver);
-}
-module_exit(tc3589x_keypad_exit);
+module_platform_driver(tc3589x_keypad_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Jayeeta Banerjee/Sundar Iyer");
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c
new file mode 100644
index 0000000..958ec10
--- /dev/null
+++ b/drivers/input/keyboard/tca8418_keypad.c
@@ -0,0 +1,430 @@
+/*
+ * Driver for TCA8418 I2C keyboard
+ *
+ * Copyright (C) 2011 Fuel7, Inc. All rights reserved.
+ *
+ * Author: Kyle Manna <kyle.manna@fuel7.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ *
+ * If you can't comply with GPLv2, alternative licensing terms may be
+ * arranged. Please contact Fuel7, Inc. (http://fuel7.com/) for proprietary
+ * alternative licensing inquiries.
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/tca8418_keypad.h>
+
+/* TCA8418 hardware limits */
+#define TCA8418_MAX_ROWS 8
+#define TCA8418_MAX_COLS 10
+
+/* TCA8418 register offsets */
+#define REG_CFG 0x01
+#define REG_INT_STAT 0x02
+#define REG_KEY_LCK_EC 0x03
+#define REG_KEY_EVENT_A 0x04
+#define REG_KEY_EVENT_B 0x05
+#define REG_KEY_EVENT_C 0x06
+#define REG_KEY_EVENT_D 0x07
+#define REG_KEY_EVENT_E 0x08
+#define REG_KEY_EVENT_F 0x09
+#define REG_KEY_EVENT_G 0x0A
+#define REG_KEY_EVENT_H 0x0B
+#define REG_KEY_EVENT_I 0x0C
+#define REG_KEY_EVENT_J 0x0D
+#define REG_KP_LCK_TIMER 0x0E
+#define REG_UNLOCK1 0x0F
+#define REG_UNLOCK2 0x10
+#define REG_GPIO_INT_STAT1 0x11
+#define REG_GPIO_INT_STAT2 0x12
+#define REG_GPIO_INT_STAT3 0x13
+#define REG_GPIO_DAT_STAT1 0x14
+#define REG_GPIO_DAT_STAT2 0x15
+#define REG_GPIO_DAT_STAT3 0x16
+#define REG_GPIO_DAT_OUT1 0x17
+#define REG_GPIO_DAT_OUT2 0x18
+#define REG_GPIO_DAT_OUT3 0x19
+#define REG_GPIO_INT_EN1 0x1A
+#define REG_GPIO_INT_EN2 0x1B
+#define REG_GPIO_INT_EN3 0x1C
+#define REG_KP_GPIO1 0x1D
+#define REG_KP_GPIO2 0x1E
+#define REG_KP_GPIO3 0x1F
+#define REG_GPI_EM1 0x20
+#define REG_GPI_EM2 0x21
+#define REG_GPI_EM3 0x22
+#define REG_GPIO_DIR1 0x23
+#define REG_GPIO_DIR2 0x24
+#define REG_GPIO_DIR3 0x25
+#define REG_GPIO_INT_LVL1 0x26
+#define REG_GPIO_INT_LVL2 0x27
+#define REG_GPIO_INT_LVL3 0x28
+#define REG_DEBOUNCE_DIS1 0x29
+#define REG_DEBOUNCE_DIS2 0x2A
+#define REG_DEBOUNCE_DIS3 0x2B
+#define REG_GPIO_PULL1 0x2C
+#define REG_GPIO_PULL2 0x2D
+#define REG_GPIO_PULL3 0x2E
+
+/* TCA8418 bit definitions */
+#define CFG_AI BIT(7)
+#define CFG_GPI_E_CFG BIT(6)
+#define CFG_OVR_FLOW_M BIT(5)
+#define CFG_INT_CFG BIT(4)
+#define CFG_OVR_FLOW_IEN BIT(3)
+#define CFG_K_LCK_IEN BIT(2)
+#define CFG_GPI_IEN BIT(1)
+#define CFG_KE_IEN BIT(0)
+
+#define INT_STAT_CAD_INT BIT(4)
+#define INT_STAT_OVR_FLOW_INT BIT(3)
+#define INT_STAT_K_LCK_INT BIT(2)
+#define INT_STAT_GPI_INT BIT(1)
+#define INT_STAT_K_INT BIT(0)
+
+/* TCA8418 register masks */
+#define KEY_LCK_EC_KEC 0x7
+#define KEY_EVENT_CODE 0x7f
+#define KEY_EVENT_VALUE 0x80
+
+
+static const struct i2c_device_id tca8418_id[] = {
+ { TCA8418_NAME, 8418, },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca8418_id);
+
+struct tca8418_keypad {
+ unsigned int rows;
+ unsigned int cols;
+ unsigned int keypad_mask; /* Mask for keypad col/rol regs */
+ unsigned int irq;
+ unsigned int row_shift;
+
+ struct i2c_client *client;
+ struct input_dev *input;
+
+ /* Flexible array member, must be at end of struct */
+ unsigned short keymap[];
+};
+
+/*
+ * Write a byte to the TCA8418
+ */
+static int tca8418_write_byte(struct tca8418_keypad *keypad_data,
+ int reg, u8 val)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(keypad_data->client, reg, val);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "%s failed, reg: %d, val: %d, error: %d\n",
+ __func__, reg, val, error);
+ return error;
+ }
+
+ return 0;
+}
+
+/*
+ * Read a byte from the TCA8418
+ */
+static int tca8418_read_byte(struct tca8418_keypad *keypad_data,
+ int reg, u8 *val)
+{
+ int error;
+
+ error = i2c_smbus_read_byte_data(keypad_data->client, reg);
+ if (error < 0) {
+ dev_err(&keypad_data->client->dev,
+ "%s failed, reg: %d, error: %d\n",
+ __func__, reg, error);
+ return error;
+ }
+
+ *val = (u8)error;
+
+ return 0;
+}
+
+static void tca8418_read_keypad(struct tca8418_keypad *keypad_data)
+{
+ int error, col, row;
+ u8 reg, state, code;
+
+ /* Initial read of the key event FIFO */
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+
+ /* Assume that key code 0 signifies empty FIFO */
+ while (error >= 0 && reg > 0) {
+ state = reg & KEY_EVENT_VALUE;
+ code = reg & KEY_EVENT_CODE;
+
+ row = code / TCA8418_MAX_COLS;
+ col = code % TCA8418_MAX_COLS;
+
+ row = (col) ? row : row - 1;
+ col = (col) ? col - 1 : TCA8418_MAX_COLS - 1;
+
+ code = MATRIX_SCAN_CODE(row, col, keypad_data->row_shift);
+ input_event(keypad_data->input, EV_MSC, MSC_SCAN, code);
+ input_report_key(keypad_data->input,
+ keypad_data->keymap[code], state);
+
+ /* Read for next loop */
+ error = tca8418_read_byte(keypad_data, REG_KEY_EVENT_A, &reg);
+ }
+
+ if (error < 0)
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_KEY_EVENT_A\n");
+
+ input_sync(keypad_data->input);
+}
+
+/*
+ * Threaded IRQ handler and this can (and will) sleep.
+ */
+static irqreturn_t tca8418_irq_handler(int irq, void *dev_id)
+{
+ struct tca8418_keypad *keypad_data = dev_id;
+ u8 reg;
+ int error;
+
+ error = tca8418_read_byte(keypad_data, REG_INT_STAT, &reg);
+ if (error) {
+ dev_err(&keypad_data->client->dev,
+ "unable to read REG_INT_STAT\n");
+ goto exit;
+ }
+
+ if (reg & INT_STAT_OVR_FLOW_INT)
+ dev_warn(&keypad_data->client->dev, "overflow occurred\n");
+
+ if (reg & INT_STAT_K_INT)
+ tca8418_read_keypad(keypad_data);
+
+exit:
+ /* Clear all interrupts, even IRQs we didn't check (GPI, CAD, LCK) */
+ reg = 0xff;
+ error = tca8418_write_byte(keypad_data, REG_INT_STAT, reg);
+ if (error)
+ dev_err(&keypad_data->client->dev,
+ "unable to clear REG_INT_STAT\n");
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Configure the TCA8418 for keypad operation
+ */
+static int __devinit tca8418_configure(struct tca8418_keypad *keypad_data)
+{
+ int reg, error;
+
+ /* Write config register, if this fails assume device not present */
+ error = tca8418_write_byte(keypad_data, REG_CFG,
+ CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN);
+ if (error < 0)
+ return -ENODEV;
+
+
+ /* Assemble a mask for row and column registers */
+ reg = ~(~0 << keypad_data->rows);
+ reg += (~(~0 << keypad_data->cols)) << 8;
+ keypad_data->keypad_mask = reg;
+
+ /* Set registers to keypad mode */
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO1, reg);
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO2, reg >> 8);
+ error |= tca8418_write_byte(keypad_data, REG_KP_GPIO3, reg >> 16);
+
+ /* Enable column debouncing */
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS1, reg);
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8);
+ error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16);
+
+ return error;
+}
+
+static int __devinit tca8418_keypad_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct tca8418_keypad_platform_data *pdata =
+ client->dev.platform_data;
+ struct tca8418_keypad *keypad_data;
+ struct input_dev *input;
+ int error, row_shift, max_keys;
+
+ /* Copy the platform data */
+ if (!pdata) {
+ dev_dbg(&client->dev, "no platform data\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->keymap_data) {
+ dev_err(&client->dev, "no keymap data defined\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->rows || pdata->rows > TCA8418_MAX_ROWS) {
+ dev_err(&client->dev, "invalid rows\n");
+ return -EINVAL;
+ }
+
+ if (!pdata->cols || pdata->cols > TCA8418_MAX_COLS) {
+ dev_err(&client->dev, "invalid columns\n");
+ return -EINVAL;
+ }
+
+ /* Check i2c driver capabilities */
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) {
+ dev_err(&client->dev, "%s adapter not supported\n",
+ dev_driver_string(&client->adapter->dev));
+ return -ENODEV;
+ }
+
+ row_shift = get_count_order(pdata->cols);
+ max_keys = pdata->rows << row_shift;
+
+ /* Allocate memory for keypad_data, keymap and input device */
+ keypad_data = kzalloc(sizeof(*keypad_data) +
+ max_keys * sizeof(keypad_data->keymap[0]), GFP_KERNEL);
+ if (!keypad_data)
+ return -ENOMEM;
+
+ keypad_data->rows = pdata->rows;
+ keypad_data->cols = pdata->cols;
+ keypad_data->client = client;
+ keypad_data->row_shift = row_shift;
+
+ /* Initialize the chip or fail if chip isn't present */
+ error = tca8418_configure(keypad_data);
+ if (error < 0)
+ goto fail1;
+
+ /* Configure input device */
+ input = input_allocate_device();
+ if (!input) {
+ error = -ENOMEM;
+ goto fail1;
+ }
+ keypad_data->input = input;
+
+ input->name = client->name;
+ input->dev.parent = &client->dev;
+
+ input->id.bustype = BUS_I2C;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x001;
+ input->id.version = 0x0001;
+
+ input->keycode = keypad_data->keymap;
+ input->keycodesize = sizeof(keypad_data->keymap[0]);
+ input->keycodemax = max_keys;
+
+ __set_bit(EV_KEY, input->evbit);
+ if (pdata->rep)
+ __set_bit(EV_REP, input->evbit);
+
+ input_set_capability(input, EV_MSC, MSC_SCAN);
+
+ input_set_drvdata(input, keypad_data);
+
+ matrix_keypad_build_keymap(pdata->keymap_data, row_shift,
+ input->keycode, input->keybit);
+
+ if (pdata->irq_is_gpio)
+ client->irq = gpio_to_irq(client->irq);
+
+ error = request_threaded_irq(client->irq, NULL, tca8418_irq_handler,
+ IRQF_TRIGGER_FALLING,
+ client->name, keypad_data);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to claim irq %d; error %d\n",
+ client->irq, error);
+ goto fail2;
+ }
+
+ error = input_register_device(input);
+ if (error) {
+ dev_dbg(&client->dev,
+ "Unable to register input device, error: %d\n", error);
+ goto fail3;
+ }
+
+ i2c_set_clientdata(client, keypad_data);
+ return 0;
+
+fail3:
+ free_irq(client->irq, keypad_data);
+fail2:
+ input_free_device(input);
+fail1:
+ kfree(keypad_data);
+ return error;
+}
+
+static int __devexit tca8418_keypad_remove(struct i2c_client *client)
+{
+ struct tca8418_keypad *keypad_data = i2c_get_clientdata(client);
+
+ free_irq(keypad_data->client->irq, keypad_data);
+
+ input_unregister_device(keypad_data->input);
+
+ kfree(keypad_data);
+
+ return 0;
+}
+
+
+static struct i2c_driver tca8418_keypad_driver = {
+ .driver = {
+ .name = TCA8418_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = tca8418_keypad_probe,
+ .remove = __devexit_p(tca8418_keypad_remove),
+ .id_table = tca8418_id,
+};
+
+static int __init tca8418_keypad_init(void)
+{
+ return i2c_add_driver(&tca8418_keypad_driver);
+}
+subsys_initcall(tca8418_keypad_init);
+
+static void __exit tca8418_keypad_exit(void)
+{
+ i2c_del_driver(&tca8418_keypad_driver);
+}
+module_exit(tca8418_keypad_exit);
+
+MODULE_AUTHOR("Kyle Manna <kyle.manna@fuel7.com>");
+MODULE_DESCRIPTION("Keypad driver for TCA8418");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index cf3228b..a136e2e 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/of.h>
#include <linux/clk.h>
#include <linux/slab.h>
#include <mach/clk.h>
@@ -52,6 +53,7 @@
/* KBC Interrupt Register */
#define KBC_INT_0 0x4
#define KBC_INT_FIFO_CNT_INT_STATUS (1 << 2)
+#define KBC_INT_KEYPRESS_INT_STATUS (1 << 0)
#define KBC_ROW_CFG0_0 0x8
#define KBC_COL_CFG0_0 0x18
@@ -74,15 +76,17 @@ struct tegra_kbc {
unsigned int cp_to_wkup_dly;
bool use_fn_map;
bool use_ghost_filter;
+ bool keypress_caused_wake;
const struct tegra_kbc_platform_data *pdata;
unsigned short keycode[KBC_MAX_KEY * 2];
unsigned short current_keys[KBC_MAX_KPENT];
unsigned int num_pressed_keys;
+ u32 wakeup_key;
struct timer_list timer;
struct clk *clk;
};
-static const u32 tegra_kbc_default_keymap[] = {
+static const u32 tegra_kbc_default_keymap[] __devinitdata = {
KEY(0, 2, KEY_W),
KEY(0, 3, KEY_S),
KEY(0, 4, KEY_A),
@@ -217,7 +221,8 @@ static const u32 tegra_kbc_default_keymap[] = {
KEY(31, 4, KEY_HELP),
};
-static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
+static const
+struct matrix_keymap_data tegra_kbc_default_keymap_data __devinitdata = {
.keymap = tegra_kbc_default_keymap,
.keymap_size = ARRAY_SIZE(tegra_kbc_default_keymap),
};
@@ -409,6 +414,9 @@ static irqreturn_t tegra_kbc_isr(int irq, void *args)
*/
tegra_kbc_set_fifo_interrupt(kbc, false);
mod_timer(&kbc->timer, jiffies + kbc->cp_dly_jiffies);
+ } else if (val & KBC_INT_KEYPRESS_INT_STATUS) {
+ /* We can be here only through system resume path */
+ kbc->keypress_caused_wake = true;
}
spin_unlock_irqrestore(&kbc->lock, flags);
@@ -576,6 +584,56 @@ tegra_kbc_check_pin_cfg(const struct tegra_kbc_platform_data *pdata,
return true;
}
+#ifdef CONFIG_OF
+static struct tegra_kbc_platform_data * __devinit
+tegra_kbc_dt_parse_pdata(struct platform_device *pdev)
+{
+ struct tegra_kbc_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+
+ if (!np)
+ return NULL;
+
+ pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ if (!of_property_read_u32(np, "debounce-delay", &prop))
+ pdata->debounce_cnt = prop;
+
+ if (!of_property_read_u32(np, "repeat-delay", &prop))
+ pdata->repeat_cnt = prop;
+
+ if (of_find_property(np, "needs-ghost-filter", NULL))
+ pdata->use_ghost_filter = true;
+
+ if (of_find_property(np, "wakeup-source", NULL))
+ pdata->wakeup = true;
+
+ /*
+ * All currently known keymaps with device tree support use the same
+ * pin_cfg, so set it up here.
+ */
+ for (i = 0; i < KBC_MAX_ROW; i++) {
+ pdata->pin_cfg[i].num = i;
+ pdata->pin_cfg[i].is_row = true;
+ }
+
+ for (i = 0; i < KBC_MAX_COL; i++) {
+ pdata->pin_cfg[KBC_MAX_ROW + i].num = i;
+ pdata->pin_cfg[KBC_MAX_ROW + i].is_row = false;
+ }
+
+ return pdata;
+}
+#else
+static inline struct tegra_kbc_platform_data *tegra_kbc_dt_parse_pdata(
+ struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
static int __devinit tegra_kbc_probe(struct platform_device *pdev)
{
const struct tegra_kbc_platform_data *pdata = pdev->dev.platform_data;
@@ -590,21 +648,28 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
unsigned int scan_time_rows;
if (!pdata)
- return -EINVAL;
+ pdata = tegra_kbc_dt_parse_pdata(pdev);
- if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows))
+ if (!pdata)
return -EINVAL;
+ if (!tegra_kbc_check_pin_cfg(pdata, &pdev->dev, &num_rows)) {
+ err = -EINVAL;
+ goto err_free_pdata;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
- return -ENXIO;
+ err = -ENXIO;
+ goto err_free_pdata;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get keyboard IRQ\n");
- return -ENXIO;
+ err = -ENXIO;
+ goto err_free_pdata;
}
kbc = kzalloc(sizeof(*kbc), GFP_KERNEL);
@@ -674,9 +739,10 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
input_dev->keycode, input_dev->keybit);
+ kbc->wakeup_key = pdata->wakeup_key;
- err = request_irq(kbc->irq, tegra_kbc_isr, IRQF_TRIGGER_HIGH,
- pdev->name, kbc);
+ err = request_irq(kbc->irq, tegra_kbc_isr,
+ IRQF_NO_SUSPEND | IRQF_TRIGGER_HIGH, pdev->name, kbc);
if (err) {
dev_err(&pdev->dev, "failed to request keyboard IRQ\n");
goto err_put_clk;
@@ -706,6 +772,9 @@ err_free_mem_region:
err_free_mem:
input_free_device(input_dev);
kfree(kbc);
+err_free_pdata:
+ if (!pdev->dev.platform_data)
+ kfree(pdata);
return err;
}
@@ -715,6 +784,8 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
struct tegra_kbc *kbc = platform_get_drvdata(pdev);
struct resource *res;
+ platform_set_drvdata(pdev, NULL);
+
free_irq(kbc->irq, pdev);
clk_put(kbc->clk);
@@ -723,9 +794,14 @@ static int __devexit tegra_kbc_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
- kfree(kbc);
+ /*
+ * If we do not have platform data attached to the device we
+ * allocated it ourselves and thus need to free it.
+ */
+ if (!pdev->dev.platform_data)
+ kfree(kbc->pdata);
- platform_set_drvdata(pdev, NULL);
+ kfree(kbc);
return 0;
}
@@ -754,6 +830,8 @@ static int tegra_kbc_suspend(struct device *dev)
tegra_kbc_setup_wakekeys(kbc, true);
msleep(30);
+ kbc->keypress_caused_wake = false;
+ enable_irq(kbc->irq);
enable_irq_wake(kbc->irq);
} else {
if (kbc->idev->users)
@@ -780,7 +858,19 @@ static int tegra_kbc_resume(struct device *dev)
tegra_kbc_set_fifo_interrupt(kbc, true);
- enable_irq(kbc->irq);
+ if (kbc->keypress_caused_wake && kbc->wakeup_key) {
+ /*
+ * We can't report events directly from the ISR
+ * because timekeeping is stopped when processing
+ * wakeup request and we get a nasty warning when
+ * we try to call do_gettimeofday() in evdev
+ * handler.
+ */
+ input_report_key(kbc->idev, kbc->wakeup_key, 1);
+ input_sync(kbc->idev);
+ input_report_key(kbc->idev, kbc->wakeup_key, 0);
+ input_sync(kbc->idev);
+ }
} else {
if (kbc->idev->users)
err = tegra_kbc_start(kbc);
@@ -793,6 +883,12 @@ static int tegra_kbc_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(tegra_kbc_pm_ops, tegra_kbc_suspend, tegra_kbc_resume);
+static const struct of_device_id tegra_kbc_of_match[] = {
+ { .compatible = "nvidia,tegra20-kbc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_kbc_of_match);
+
static struct platform_driver tegra_kbc_driver = {
.probe = tegra_kbc_probe,
.remove = __devexit_p(tegra_kbc_remove),
@@ -800,20 +896,10 @@ static struct platform_driver tegra_kbc_driver = {
.name = "tegra-kbc",
.owner = THIS_MODULE,
.pm = &tegra_kbc_pm_ops,
+ .of_match_table = tegra_kbc_of_match,
},
};
-
-static void __exit tegra_kbc_exit(void)
-{
- platform_driver_unregister(&tegra_kbc_driver);
-}
-module_exit(tegra_kbc_exit);
-
-static int __init tegra_kbc_init(void)
-{
- return platform_driver_register(&tegra_kbc_driver);
-}
-module_init(tegra_kbc_init);
+module_platform_driver(tegra_kbc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Rakesh Iyer <riyer@nvidia.com>");
diff --git a/drivers/input/keyboard/tnetv107x-keypad.c b/drivers/input/keyboard/tnetv107x-keypad.c
index 66e55e5..fb39c94 100644
--- a/drivers/input/keyboard/tnetv107x-keypad.c
+++ b/drivers/input/keyboard/tnetv107x-keypad.c
@@ -322,19 +322,7 @@ static struct platform_driver keypad_driver = {
.driver.name = "tnetv107x-keypad",
.driver.owner = THIS_MODULE,
};
-
-static int __init keypad_init(void)
-{
- return platform_driver_register(&keypad_driver);
-}
-
-static void __exit keypad_exit(void)
-{
- platform_driver_unregister(&keypad_driver);
-}
-
-module_init(keypad_init);
-module_exit(keypad_exit);
+module_platform_driver(keypad_driver);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Keypad Driver");
diff --git a/drivers/input/keyboard/twl4030_keypad.c b/drivers/input/keyboard/twl4030_keypad.c
index a26922c..67bec14 100644
--- a/drivers/input/keyboard/twl4030_keypad.c
+++ b/drivers/input/keyboard/twl4030_keypad.c
@@ -34,7 +34,6 @@
#include <linux/i2c/twl.h>
#include <linux/slab.h>
-
/*
* The TWL4030 family chips include a keypad controller that supports
* up to an 8x8 switch matrix. The controller can issue system wakeup
@@ -302,7 +301,7 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
if (twl4030_kpwrite_u8(kp, i, KEYP_DEB) < 0)
return -EIO;
- /* Set timeout period to 100 ms */
+ /* Set timeout period to 200 ms */
i = KEYP_PERIOD_US(200000, PTV_PRESCALER);
if (twl4030_kpwrite_u8(kp, (i & 0xFF), KEYP_TIMEOUT_L) < 0)
return -EIO;
@@ -460,21 +459,9 @@ static struct platform_driver twl4030_kp_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init twl4030_kp_init(void)
-{
- return platform_driver_register(&twl4030_kp_driver);
-}
-module_init(twl4030_kp_init);
-
-static void __exit twl4030_kp_exit(void)
-{
- platform_driver_unregister(&twl4030_kp_driver);
-}
-module_exit(twl4030_kp_exit);
+module_platform_driver(twl4030_kp_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("TWL4030 Keypad Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:twl4030_keypad");
-
diff --git a/drivers/input/keyboard/w90p910_keypad.c b/drivers/input/keyboard/w90p910_keypad.c
index 318586d..99bbb7e 100644
--- a/drivers/input/keyboard/w90p910_keypad.c
+++ b/drivers/input/keyboard/w90p910_keypad.c
@@ -262,19 +262,7 @@ static struct platform_driver w90p910_keypad_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init w90p910_keypad_init(void)
-{
- return platform_driver_register(&w90p910_keypad_driver);
-}
-
-static void __exit w90p910_keypad_exit(void)
-{
- platform_driver_unregister(&w90p910_keypad_driver);
-}
-
-module_init(w90p910_keypad_init);
-module_exit(w90p910_keypad_exit);
+module_platform_driver(w90p910_keypad_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 keypad driver");
diff --git a/drivers/input/misc/88pm860x_onkey.c b/drivers/input/misc/88pm860x_onkey.c
index 3dca3c1..f2e0cbc 100644
--- a/drivers/input/misc/88pm860x_onkey.c
+++ b/drivers/input/misc/88pm860x_onkey.c
@@ -137,18 +137,7 @@ static struct platform_driver pm860x_onkey_driver = {
.probe = pm860x_onkey_probe,
.remove = __devexit_p(pm860x_onkey_remove),
};
-
-static int __init pm860x_onkey_init(void)
-{
- return platform_driver_register(&pm860x_onkey_driver);
-}
-module_init(pm860x_onkey_init);
-
-static void __exit pm860x_onkey_exit(void)
-{
- platform_driver_unregister(&pm860x_onkey_driver);
-}
-module_exit(pm860x_onkey_exit);
+module_platform_driver(pm860x_onkey_driver);
MODULE_DESCRIPTION("Marvell 88PM860x ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 22d875f..7b46781 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -179,6 +179,31 @@ config INPUT_APANEL
To compile this driver as a module, choose M here: the module will
be called apanel.
+config INPUT_GP2A
+ tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver"
+ depends on I2C
+ depends on GENERIC_GPIO
+ help
+ Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip
+ hooked to an I2C bus.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gp2ap002a00f.
+
+config INPUT_GPIO_TILT_POLLED
+ tristate "Polled GPIO tilt switch"
+ depends on GENERIC_GPIO
+ select INPUT_POLLDEV
+ help
+ This driver implements support for tilt switches connected
+ to GPIO pins that are not capable of generating interrupts.
+
+ The list of gpios to use and the mapping of their states
+ to specific angles is done via platform data.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio_tilt_polled.
+
config INPUT_IXP4XX_BEEPER
tristate "IXP4XX Beeper support"
depends on ARCH_IXP4XX
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index a244fc6..46671a8 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -22,6 +22,8 @@ obj-$(CONFIG_INPUT_CMA3000) += cma3000_d0x.o
obj-$(CONFIG_INPUT_CMA3000_I2C) += cma3000_d0x_i2c.o
obj-$(CONFIG_INPUT_COBALT_BTNS) += cobalt_btns.o
obj-$(CONFIG_INPUT_DM355EVM) += dm355evm_keys.o
+obj-$(CONFIG_INPUT_GP2A) += gp2ap002a00f.o
+obj-$(CONFIG_INPUT_GPIO_TILT_POLLED) += gpio_tilt_polled.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_IXP4XX_BEEPER) += ixp4xx-beeper.o
obj-$(CONFIG_INPUT_KEYSPAN_REMOTE) += keyspan_remote.o
diff --git a/drivers/input/misc/ab8500-ponkey.c b/drivers/input/misc/ab8500-ponkey.c
index 3d3288a..350fd0c 100644
--- a/drivers/input/misc/ab8500-ponkey.c
+++ b/drivers/input/misc/ab8500-ponkey.c
@@ -12,7 +12,7 @@
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/slab.h>
/**
@@ -139,18 +139,7 @@ static struct platform_driver ab8500_ponkey_driver = {
.probe = ab8500_ponkey_probe,
.remove = __devexit_p(ab8500_ponkey_remove),
};
-
-static int __init ab8500_ponkey_init(void)
-{
- return platform_driver_register(&ab8500_ponkey_driver);
-}
-module_init(ab8500_ponkey_init);
-
-static void __exit ab8500_ponkey_exit(void)
-{
- platform_driver_unregister(&ab8500_ponkey_driver);
-}
-module_exit(ab8500_ponkey_exit);
+module_platform_driver(ab8500_ponkey_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
diff --git a/drivers/input/misc/adxl34x-spi.c b/drivers/input/misc/adxl34x-spi.c
index f29de22..34d401e 100644
--- a/drivers/input/misc/adxl34x-spi.c
+++ b/drivers/input/misc/adxl34x-spi.c
@@ -122,7 +122,6 @@ static SIMPLE_DEV_PM_OPS(adxl34x_spi_pm, adxl34x_spi_suspend,
static struct spi_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &adxl34x_spi_pm,
},
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 0924480..1cf72fe 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -452,10 +452,10 @@ static ssize_t adxl34x_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -541,10 +541,10 @@ static ssize_t adxl34x_rate_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned char val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtou8(buf, 10, &val);
if (error)
return error;
@@ -576,10 +576,10 @@ static ssize_t adxl34x_autosleep_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -623,13 +623,13 @@ static ssize_t adxl34x_write_store(struct device *dev,
const char *buf, size_t count)
{
struct adxl34x *ac = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
/*
* This allows basic ADXL register write access for debug purposes.
*/
- error = strict_strtoul(buf, 16, &val);
+ error = kstrtouint(buf, 16, &val);
if (error)
return error;
diff --git a/drivers/input/misc/ati_remote2.c b/drivers/input/misc/ati_remote2.c
index 8d345e8..f63341f 100644
--- a/drivers/input/misc/ati_remote2.c
+++ b/drivers/input/misc/ati_remote2.c
@@ -42,13 +42,13 @@ static int ati_remote2_set_mask(const char *val,
const struct kernel_param *kp,
unsigned int max)
{
- unsigned long mask;
+ unsigned int mask;
int ret;
if (!val)
return -EINVAL;
- ret = strict_strtoul(val, 0, &mask);
+ ret = kstrtouint(val, 0, &mask);
if (ret)
return ret;
@@ -720,11 +720,12 @@ static ssize_t ati_remote2_store_channel_mask(struct device *dev,
struct usb_device *udev = to_usb_device(dev);
struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
struct ati_remote2 *ar2 = usb_get_intfdata(intf);
- unsigned long mask;
+ unsigned int mask;
int r;
- if (strict_strtoul(buf, 0, &mask))
- return -EINVAL;
+ r = kstrtouint(buf, 0, &mask);
+ if (r)
+ return r;
if (mask & ~ATI_REMOTE2_MAX_CHANNEL_MASK)
return -EINVAL;
@@ -769,10 +770,12 @@ static ssize_t ati_remote2_store_mode_mask(struct device *dev,
struct usb_device *udev = to_usb_device(dev);
struct usb_interface *intf = usb_ifnum_to_if(udev, 0);
struct ati_remote2 *ar2 = usb_get_intfdata(intf);
- unsigned long mask;
+ unsigned int mask;
+ int err;
- if (strict_strtoul(buf, 0, &mask))
- return -EINVAL;
+ err = kstrtouint(buf, 0, &mask);
+ if (err)
+ return err;
if (mask & ~ATI_REMOTE2_MAX_MODE_MASK)
return -EINVAL;
@@ -1010,23 +1013,4 @@ static int ati_remote2_post_reset(struct usb_interface *interface)
return r;
}
-static int __init ati_remote2_init(void)
-{
- int r;
-
- r = usb_register(&ati_remote2_driver);
- if (r)
- printk(KERN_ERR "ati_remote2: usb_register() = %d\n", r);
- else
- printk(KERN_INFO "ati_remote2: " DRIVER_DESC " " DRIVER_VERSION "\n");
-
- return r;
-}
-
-static void __exit ati_remote2_exit(void)
-{
- usb_deregister(&ati_remote2_driver);
-}
-
-module_init(ati_remote2_init);
-module_exit(ati_remote2_exit);
+module_usb_driver(ati_remote2_driver);
diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c
index d00edc9..1c4146f 100644
--- a/drivers/input/misc/bfin_rotary.c
+++ b/drivers/input/misc/bfin_rotary.c
@@ -264,18 +264,7 @@ static struct platform_driver bfin_rotary_device_driver = {
#endif
},
};
-
-static int __init bfin_rotary_init(void)
-{
- return platform_driver_register(&bfin_rotary_device_driver);
-}
-module_init(bfin_rotary_init);
-
-static void __exit bfin_rotary_exit(void)
-{
- platform_driver_unregister(&bfin_rotary_device_driver);
-}
-module_exit(bfin_rotary_exit);
+module_platform_driver(bfin_rotary_device_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
diff --git a/drivers/input/misc/cobalt_btns.c b/drivers/input/misc/cobalt_btns.c
index fd8407a..53e43d2 100644
--- a/drivers/input/misc/cobalt_btns.c
+++ b/drivers/input/misc/cobalt_btns.c
@@ -163,16 +163,4 @@ static struct platform_driver cobalt_buttons_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init cobalt_buttons_init(void)
-{
- return platform_driver_register(&cobalt_buttons_driver);
-}
-
-static void __exit cobalt_buttons_exit(void)
-{
- platform_driver_unregister(&cobalt_buttons_driver);
-}
-
-module_init(cobalt_buttons_init);
-module_exit(cobalt_buttons_exit);
+module_platform_driver(cobalt_buttons_driver);
diff --git a/drivers/input/misc/dm355evm_keys.c b/drivers/input/misc/dm355evm_keys.c
index 7283dd2..35083c6 100644
--- a/drivers/input/misc/dm355evm_keys.c
+++ b/drivers/input/misc/dm355evm_keys.c
@@ -267,17 +267,6 @@ static struct platform_driver dm355evm_keys_driver = {
.name = "dm355evm_keys",
},
};
-
-static int __init dm355evm_keys_init(void)
-{
- return platform_driver_register(&dm355evm_keys_driver);
-}
-module_init(dm355evm_keys_init);
-
-static void __exit dm355evm_keys_exit(void)
-{
- platform_driver_unregister(&dm355evm_keys_driver);
-}
-module_exit(dm355evm_keys_exit);
+module_platform_driver(dm355evm_keys_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/gp2ap002a00f.c b/drivers/input/misc/gp2ap002a00f.c
new file mode 100644
index 0000000..71fba8c
--- /dev/null
+++ b/drivers/input/misc/gp2ap002a00f.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2011 Sony Ericsson Mobile Communications Inc.
+ *
+ * Author: Courtney Cavin <courtney.cavin@sonyericsson.com>
+ * Prepared for up-stream by: Oskar Andero <oskar.andero@sonyericsson.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/i2c.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/input/gp2ap002a00f.h>
+
+struct gp2a_data {
+ struct input_dev *input;
+ const struct gp2a_platform_data *pdata;
+ struct i2c_client *i2c_client;
+};
+
+enum gp2a_addr {
+ GP2A_ADDR_PROX = 0x0,
+ GP2A_ADDR_GAIN = 0x1,
+ GP2A_ADDR_HYS = 0x2,
+ GP2A_ADDR_CYCLE = 0x3,
+ GP2A_ADDR_OPMOD = 0x4,
+ GP2A_ADDR_CON = 0x6
+};
+
+enum gp2a_controls {
+ /* Software Shutdown control: 0 = shutdown, 1 = normal operation */
+ GP2A_CTRL_SSD = 0x01
+};
+
+static int gp2a_report(struct gp2a_data *dt)
+{
+ int vo = gpio_get_value(dt->pdata->vout_gpio);
+
+ input_report_switch(dt->input, SW_FRONT_PROXIMITY, !vo);
+ input_sync(dt->input);
+
+ return 0;
+}
+
+static irqreturn_t gp2a_irq(int irq, void *handle)
+{
+ struct gp2a_data *dt = handle;
+
+ gp2a_report(dt);
+
+ return IRQ_HANDLED;
+}
+
+static int gp2a_enable(struct gp2a_data *dt)
+{
+ return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+ GP2A_CTRL_SSD);
+}
+
+static int gp2a_disable(struct gp2a_data *dt)
+{
+ return i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_OPMOD,
+ 0x00);
+}
+
+static int gp2a_device_open(struct input_dev *dev)
+{
+ struct gp2a_data *dt = input_get_drvdata(dev);
+ int error;
+
+ error = gp2a_enable(dt);
+ if (error < 0) {
+ dev_err(&dt->i2c_client->dev,
+ "unable to activate, err %d\n", error);
+ return error;
+ }
+
+ gp2a_report(dt);
+
+ return 0;
+}
+
+static void gp2a_device_close(struct input_dev *dev)
+{
+ struct gp2a_data *dt = input_get_drvdata(dev);
+ int error;
+
+ error = gp2a_disable(dt);
+ if (error < 0)
+ dev_err(&dt->i2c_client->dev,
+ "unable to deactivate, err %d\n", error);
+}
+
+static int __devinit gp2a_initialize(struct gp2a_data *dt)
+{
+ int error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_GAIN,
+ 0x08);
+ if (error < 0)
+ return error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_HYS,
+ 0xc2);
+ if (error < 0)
+ return error;
+
+ error = i2c_smbus_write_byte_data(dt->i2c_client, GP2A_ADDR_CYCLE,
+ 0x04);
+ if (error < 0)
+ return error;
+
+ error = gp2a_disable(dt);
+
+ return error;
+}
+
+static int __devinit gp2a_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct gp2a_platform_data *pdata = client->dev.platform_data;
+ struct gp2a_data *dt;
+ int error;
+
+ if (!pdata)
+ return -EINVAL;
+
+ if (pdata->hw_setup) {
+ error = pdata->hw_setup(client);
+ if (error < 0)
+ return error;
+ }
+
+ error = gpio_request_one(pdata->vout_gpio, GPIOF_IN, GP2A_I2C_NAME);
+ if (error)
+ goto err_hw_shutdown;
+
+ dt = kzalloc(sizeof(struct gp2a_data), GFP_KERNEL);
+ if (!dt) {
+ error = -ENOMEM;
+ goto err_free_gpio;
+ }
+
+ dt->pdata = pdata;
+ dt->i2c_client = client;
+
+ error = gp2a_initialize(dt);
+ if (error < 0)
+ goto err_free_mem;
+
+ dt->input = input_allocate_device();
+ if (!dt->input) {
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ input_set_drvdata(dt->input, dt);
+
+ dt->input->open = gp2a_device_open;
+ dt->input->close = gp2a_device_close;
+ dt->input->name = GP2A_I2C_NAME;
+ dt->input->id.bustype = BUS_I2C;
+ dt->input->dev.parent = &client->dev;
+
+ input_set_capability(dt->input, EV_SW, SW_FRONT_PROXIMITY);
+
+ error = request_threaded_irq(client->irq, NULL, gp2a_irq,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ GP2A_I2C_NAME, dt);
+ if (error) {
+ dev_err(&client->dev, "irq request failed\n");
+ goto err_free_input_dev;
+ }
+
+ error = input_register_device(dt->input);
+ if (error) {
+ dev_err(&client->dev, "device registration failed\n");
+ goto err_free_irq;
+ }
+
+ device_init_wakeup(&client->dev, pdata->wakeup);
+ i2c_set_clientdata(client, dt);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, dt);
+err_free_input_dev:
+ input_free_device(dt->input);
+err_free_mem:
+ kfree(dt);
+err_free_gpio:
+ gpio_free(pdata->vout_gpio);
+err_hw_shutdown:
+ if (pdata->hw_shutdown)
+ pdata->hw_shutdown(client);
+ return error;
+}
+
+static int __devexit gp2a_remove(struct i2c_client *client)
+{
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ const struct gp2a_platform_data *pdata = dt->pdata;
+
+ device_init_wakeup(&client->dev, false);
+
+ free_irq(client->irq, dt);
+
+ input_unregister_device(dt->input);
+ kfree(dt);
+
+ gpio_free(pdata->vout_gpio);
+
+ if (pdata->hw_shutdown)
+ pdata->hw_shutdown(client);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int gp2a_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ int retval = 0;
+
+ if (device_may_wakeup(&client->dev)) {
+ enable_irq_wake(client->irq);
+ } else {
+ mutex_lock(&dt->input->mutex);
+ if (dt->input->users)
+ retval = gp2a_disable(dt);
+ mutex_unlock(&dt->input->mutex);
+ }
+
+ return retval;
+}
+
+static int gp2a_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct gp2a_data *dt = i2c_get_clientdata(client);
+ int retval = 0;
+
+ if (device_may_wakeup(&client->dev)) {
+ disable_irq_wake(client->irq);
+ } else {
+ mutex_lock(&dt->input->mutex);
+ if (dt->input->users)
+ retval = gp2a_enable(dt);
+ mutex_unlock(&dt->input->mutex);
+ }
+
+ return retval;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(gp2a_pm, gp2a_suspend, gp2a_resume);
+
+static const struct i2c_device_id gp2a_i2c_id[] = {
+ { GP2A_I2C_NAME, 0 },
+ { }
+};
+
+static struct i2c_driver gp2a_i2c_driver = {
+ .driver = {
+ .name = GP2A_I2C_NAME,
+ .owner = THIS_MODULE,
+ .pm = &gp2a_pm,
+ },
+ .probe = gp2a_probe,
+ .remove = __devexit_p(gp2a_remove),
+ .id_table = gp2a_i2c_id,
+};
+
+static int __init gp2a_init(void)
+{
+ return i2c_add_driver(&gp2a_i2c_driver);
+}
+
+static void __exit gp2a_exit(void)
+{
+ i2c_del_driver(&gp2a_i2c_driver);
+}
+
+module_init(gp2a_init);
+module_exit(gp2a_exit);
+
+MODULE_AUTHOR("Courtney Cavin <courtney.cavin@sonyericsson.com>");
+MODULE_DESCRIPTION("Sharp GP2AP002A00F I2C Proximity/Opto sensor driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/input/misc/gpio_tilt_polled.c b/drivers/input/misc/gpio_tilt_polled.c
new file mode 100644
index 0000000..277a057
--- /dev/null
+++ b/drivers/input/misc/gpio_tilt_polled.c
@@ -0,0 +1,213 @@
+/*
+ * Driver for tilt switches connected via GPIO lines
+ * not capable of generating interrupts
+ *
+ * Copyright (C) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * based on: drivers/input/keyboard/gpio_keys_polled.c
+ *
+ * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
+ * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/input-polldev.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/input/gpio_tilt.h>
+
+#define DRV_NAME "gpio-tilt-polled"
+
+struct gpio_tilt_polled_dev {
+ struct input_polled_dev *poll_dev;
+ struct device *dev;
+ const struct gpio_tilt_platform_data *pdata;
+
+ int last_state;
+
+ int threshold;
+ int count;
+};
+
+static void gpio_tilt_polled_poll(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+ struct input_dev *input = dev->input;
+ struct gpio_tilt_state *tilt_state = NULL;
+ int state, i;
+
+ if (tdev->count < tdev->threshold) {
+ tdev->count++;
+ } else {
+ state = 0;
+ for (i = 0; i < pdata->nr_gpios; i++)
+ state |= (!!gpio_get_value(pdata->gpios[i].gpio) << i);
+
+ if (state != tdev->last_state) {
+ for (i = 0; i < pdata->nr_states; i++)
+ if (pdata->states[i].gpios == state)
+ tilt_state = &pdata->states[i];
+
+ if (tilt_state) {
+ for (i = 0; i < pdata->nr_axes; i++)
+ input_report_abs(input,
+ pdata->axes[i].axis,
+ tilt_state->axes[i]);
+
+ input_sync(input);
+ }
+
+ tdev->count = 0;
+ tdev->last_state = state;
+ }
+ }
+}
+
+static void gpio_tilt_polled_open(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ if (pdata->enable)
+ pdata->enable(tdev->dev);
+
+ /* report initial state of the axes */
+ tdev->last_state = -1;
+ tdev->count = tdev->threshold;
+ gpio_tilt_polled_poll(tdev->poll_dev);
+}
+
+static void gpio_tilt_polled_close(struct input_polled_dev *dev)
+{
+ struct gpio_tilt_polled_dev *tdev = dev->private;
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ if (pdata->disable)
+ pdata->disable(tdev->dev);
+}
+
+static int __devinit gpio_tilt_polled_probe(struct platform_device *pdev)
+{
+ const struct gpio_tilt_platform_data *pdata = pdev->dev.platform_data;
+ struct device *dev = &pdev->dev;
+ struct gpio_tilt_polled_dev *tdev;
+ struct input_polled_dev *poll_dev;
+ struct input_dev *input;
+ int error, i;
+
+ if (!pdata || !pdata->poll_interval)
+ return -EINVAL;
+
+ tdev = kzalloc(sizeof(struct gpio_tilt_polled_dev), GFP_KERNEL);
+ if (!tdev) {
+ dev_err(dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ error = gpio_request_array(pdata->gpios, pdata->nr_gpios);
+ if (error) {
+ dev_err(dev,
+ "Could not request tilt GPIOs: %d\n", error);
+ goto err_free_tdev;
+ }
+
+ poll_dev = input_allocate_polled_device();
+ if (!poll_dev) {
+ dev_err(dev, "no memory for polled device\n");
+ error = -ENOMEM;
+ goto err_free_gpios;
+ }
+
+ poll_dev->private = tdev;
+ poll_dev->poll = gpio_tilt_polled_poll;
+ poll_dev->poll_interval = pdata->poll_interval;
+ poll_dev->open = gpio_tilt_polled_open;
+ poll_dev->close = gpio_tilt_polled_close;
+
+ input = poll_dev->input;
+
+ input->name = pdev->name;
+ input->phys = DRV_NAME"/input0";
+ input->dev.parent = &pdev->dev;
+
+ input->id.bustype = BUS_HOST;
+ input->id.vendor = 0x0001;
+ input->id.product = 0x0001;
+ input->id.version = 0x0100;
+
+ __set_bit(EV_ABS, input->evbit);
+ for (i = 0; i < pdata->nr_axes; i++)
+ input_set_abs_params(input, pdata->axes[i].axis,
+ pdata->axes[i].min, pdata->axes[i].max,
+ pdata->axes[i].fuzz, pdata->axes[i].flat);
+
+ tdev->threshold = DIV_ROUND_UP(pdata->debounce_interval,
+ pdata->poll_interval);
+
+ tdev->poll_dev = poll_dev;
+ tdev->dev = dev;
+ tdev->pdata = pdata;
+
+ error = input_register_polled_device(poll_dev);
+ if (error) {
+ dev_err(dev, "unable to register polled device, err=%d\n",
+ error);
+ goto err_free_polldev;
+ }
+
+ platform_set_drvdata(pdev, tdev);
+
+ return 0;
+
+err_free_polldev:
+ input_free_polled_device(poll_dev);
+err_free_gpios:
+ gpio_free_array(pdata->gpios, pdata->nr_gpios);
+err_free_tdev:
+ kfree(tdev);
+
+ return error;
+}
+
+static int __devexit gpio_tilt_polled_remove(struct platform_device *pdev)
+{
+ struct gpio_tilt_polled_dev *tdev = platform_get_drvdata(pdev);
+ const struct gpio_tilt_platform_data *pdata = tdev->pdata;
+
+ platform_set_drvdata(pdev, NULL);
+
+ input_unregister_polled_device(tdev->poll_dev);
+ input_free_polled_device(tdev->poll_dev);
+
+ gpio_free_array(pdata->gpios, pdata->nr_gpios);
+
+ kfree(tdev);
+
+ return 0;
+}
+
+static struct platform_driver gpio_tilt_polled_driver = {
+ .probe = gpio_tilt_polled_probe,
+ .remove = __devexit_p(gpio_tilt_polled_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(gpio_tilt_polled_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
+MODULE_DESCRIPTION("Polled GPIO tilt driver");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/input/misc/ixp4xx-beeper.c b/drivers/input/misc/ixp4xx-beeper.c
index 302ab46..50e2830 100644
--- a/drivers/input/misc/ixp4xx-beeper.c
+++ b/drivers/input/misc/ixp4xx-beeper.c
@@ -168,16 +168,5 @@ static struct platform_driver ixp4xx_spkr_platform_driver = {
.remove = __devexit_p(ixp4xx_spkr_remove),
.shutdown = ixp4xx_spkr_shutdown,
};
+module_platform_driver(ixp4xx_spkr_platform_driver);
-static int __init ixp4xx_spkr_init(void)
-{
- return platform_driver_register(&ixp4xx_spkr_platform_driver);
-}
-
-static void __exit ixp4xx_spkr_exit(void)
-{
- platform_driver_unregister(&ixp4xx_spkr_platform_driver);
-}
-
-module_init(ixp4xx_spkr_init);
-module_exit(ixp4xx_spkr_exit);
diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c
index fc62256..d99151a 100644
--- a/drivers/input/misc/keyspan_remote.c
+++ b/drivers/input/misc/keyspan_remote.c
@@ -580,26 +580,7 @@ static struct usb_driver keyspan_driver =
.id_table = keyspan_table
};
-static int __init usb_keyspan_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&keyspan_driver);
- if (result)
- err("usb_register failed. Error number %d\n", result);
-
- return result;
-}
-
-static void __exit usb_keyspan_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&keyspan_driver);
-}
-
-module_init(usb_keyspan_init);
-module_exit(usb_keyspan_exit);
+module_usb_driver(keyspan_driver);
MODULE_DEVICE_TABLE(usb, keyspan_table);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/input/misc/max8925_onkey.c b/drivers/input/misc/max8925_onkey.c
index 7de0ded..23cf082 100644
--- a/drivers/input/misc/max8925_onkey.c
+++ b/drivers/input/misc/max8925_onkey.c
@@ -166,18 +166,7 @@ static struct platform_driver max8925_onkey_driver = {
.probe = max8925_onkey_probe,
.remove = __devexit_p(max8925_onkey_remove),
};
-
-static int __init max8925_onkey_init(void)
-{
- return platform_driver_register(&max8925_onkey_driver);
-}
-module_init(max8925_onkey_init);
-
-static void __exit max8925_onkey_exit(void)
-{
- platform_driver_unregister(&max8925_onkey_driver);
-}
-module_exit(max8925_onkey_exit);
+module_platform_driver(max8925_onkey_driver);
MODULE_DESCRIPTION("Maxim MAX8925 ONKEY driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/misc/mc13783-pwrbutton.c b/drivers/input/misc/mc13783-pwrbutton.c
index 09b0522..8428f1e 100644
--- a/drivers/input/misc/mc13783-pwrbutton.c
+++ b/drivers/input/misc/mc13783-pwrbutton.c
@@ -255,7 +255,7 @@ static int __devexit mc13783_pwrbutton_remove(struct platform_device *pdev)
return 0;
}
-struct platform_driver mc13783_pwrbutton_driver = {
+static struct platform_driver mc13783_pwrbutton_driver = {
.probe = mc13783_pwrbutton_probe,
.remove = __devexit_p(mc13783_pwrbutton_remove),
.driver = {
@@ -264,17 +264,7 @@ struct platform_driver mc13783_pwrbutton_driver = {
},
};
-static int __init mc13783_pwrbutton_init(void)
-{
- return platform_driver_register(&mc13783_pwrbutton_driver);
-}
-module_init(mc13783_pwrbutton_init);
-
-static void __exit mc13783_pwrbutton_exit(void)
-{
- platform_driver_unregister(&mc13783_pwrbutton_driver);
-}
-module_exit(mc13783_pwrbutton_exit);
+module_platform_driver(mc13783_pwrbutton_driver);
MODULE_ALIAS("platform:mc13783-pwrbutton");
MODULE_DESCRIPTION("MC13783 Power Button");
diff --git a/drivers/input/misc/mpu3050.c b/drivers/input/misc/mpu3050.c
index f71dc72..208d1a1 100644
--- a/drivers/input/misc/mpu3050.c
+++ b/drivers/input/misc/mpu3050.c
@@ -41,18 +41,67 @@
#include <linux/slab.h>
#include <linux/pm_runtime.h>
-#define MPU3050_CHIP_ID_REG 0x00
#define MPU3050_CHIP_ID 0x69
-#define MPU3050_XOUT_H 0x1D
-#define MPU3050_PWR_MGM 0x3E
-#define MPU3050_PWR_MGM_POS 6
-#define MPU3050_PWR_MGM_MASK 0x40
#define MPU3050_AUTO_DELAY 1000
#define MPU3050_MIN_VALUE -32768
#define MPU3050_MAX_VALUE 32767
+#define MPU3050_DEFAULT_POLL_INTERVAL 200
+#define MPU3050_DEFAULT_FS_RANGE 3
+
+/* Register map */
+#define MPU3050_CHIP_ID_REG 0x00
+#define MPU3050_SMPLRT_DIV 0x15
+#define MPU3050_DLPF_FS_SYNC 0x16
+#define MPU3050_INT_CFG 0x17
+#define MPU3050_XOUT_H 0x1D
+#define MPU3050_PWR_MGM 0x3E
+#define MPU3050_PWR_MGM_POS 6
+
+/* Register bits */
+
+/* DLPF_FS_SYNC */
+#define MPU3050_EXT_SYNC_NONE 0x00
+#define MPU3050_EXT_SYNC_TEMP 0x20
+#define MPU3050_EXT_SYNC_GYROX 0x40
+#define MPU3050_EXT_SYNC_GYROY 0x60
+#define MPU3050_EXT_SYNC_GYROZ 0x80
+#define MPU3050_EXT_SYNC_ACCELX 0xA0
+#define MPU3050_EXT_SYNC_ACCELY 0xC0
+#define MPU3050_EXT_SYNC_ACCELZ 0xE0
+#define MPU3050_EXT_SYNC_MASK 0xE0
+#define MPU3050_FS_250DPS 0x00
+#define MPU3050_FS_500DPS 0x08
+#define MPU3050_FS_1000DPS 0x10
+#define MPU3050_FS_2000DPS 0x18
+#define MPU3050_FS_MASK 0x18
+#define MPU3050_DLPF_CFG_256HZ_NOLPF2 0x00
+#define MPU3050_DLPF_CFG_188HZ 0x01
+#define MPU3050_DLPF_CFG_98HZ 0x02
+#define MPU3050_DLPF_CFG_42HZ 0x03
+#define MPU3050_DLPF_CFG_20HZ 0x04
+#define MPU3050_DLPF_CFG_10HZ 0x05
+#define MPU3050_DLPF_CFG_5HZ 0x06
+#define MPU3050_DLPF_CFG_2100HZ_NOLPF 0x07
+#define MPU3050_DLPF_CFG_MASK 0x07
+/* INT_CFG */
+#define MPU3050_RAW_RDY_EN 0x01
+#define MPU3050_MPU_RDY_EN 0x02
+#define MPU3050_LATCH_INT_EN 0x04
+/* PWR_MGM */
+#define MPU3050_PWR_MGM_PLL_X 0x01
+#define MPU3050_PWR_MGM_PLL_Y 0x02
+#define MPU3050_PWR_MGM_PLL_Z 0x03
+#define MPU3050_PWR_MGM_CLKSEL 0x07
+#define MPU3050_PWR_MGM_STBY_ZG 0x08
+#define MPU3050_PWR_MGM_STBY_YG 0x10
+#define MPU3050_PWR_MGM_STBY_XG 0x20
+#define MPU3050_PWR_MGM_SLEEP 0x40
+#define MPU3050_PWR_MGM_RESET 0x80
+#define MPU3050_PWR_MGM_MASK 0x40
+
struct axis_data {
s16 x;
s16 y;
@@ -148,9 +197,20 @@ static void mpu3050_set_power_mode(struct i2c_client *client, u8 val)
static int mpu3050_input_open(struct input_dev *input)
{
struct mpu3050_sensor *sensor = input_get_drvdata(input);
+ int error;
pm_runtime_get(sensor->dev);
+ /* Enable interrupts */
+ error = i2c_smbus_write_byte_data(sensor->client, MPU3050_INT_CFG,
+ MPU3050_LATCH_INT_EN |
+ MPU3050_RAW_RDY_EN |
+ MPU3050_MPU_RDY_EN);
+ if (error < 0) {
+ pm_runtime_put(sensor->dev);
+ return error;
+ }
+
return 0;
}
@@ -192,6 +252,51 @@ static irqreturn_t mpu3050_interrupt_thread(int irq, void *data)
}
/**
+ * mpu3050_hw_init - initialize hardware
+ * @sensor: the sensor
+ *
+ * Called during device probe; configures the sampling method.
+ */
+static int __devinit mpu3050_hw_init(struct mpu3050_sensor *sensor)
+{
+ struct i2c_client *client = sensor->client;
+ int ret;
+ u8 reg;
+
+ /* Reset */
+ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM,
+ MPU3050_PWR_MGM_RESET);
+ if (ret < 0)
+ return ret;
+
+ ret = i2c_smbus_read_byte_data(client, MPU3050_PWR_MGM);
+ if (ret < 0)
+ return ret;
+
+ ret &= ~MPU3050_PWR_MGM_CLKSEL;
+ ret |= MPU3050_PWR_MGM_PLL_Z;
+ ret = i2c_smbus_write_byte_data(client, MPU3050_PWR_MGM, ret);
+ if (ret < 0)
+ return ret;
+
+ /* Output frequency divider. The poll interval */
+ ret = i2c_smbus_write_byte_data(client, MPU3050_SMPLRT_DIV,
+ MPU3050_DEFAULT_POLL_INTERVAL - 1);
+ if (ret < 0)
+ return ret;
+
+ /* Set low pass filter and full scale */
+ reg = MPU3050_DEFAULT_FS_RANGE;
+ reg |= MPU3050_DLPF_CFG_42HZ << 3;
+ reg |= MPU3050_EXT_SYNC_NONE << 5;
+ ret = i2c_smbus_write_byte_data(client, MPU3050_DLPF_FS_SYNC, reg);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
* mpu3050_probe - device detection callback
* @client: i2c client of found device
* @id: id match information
@@ -256,10 +361,14 @@ static int __devinit mpu3050_probe(struct i2c_client *client,
pm_runtime_set_active(&client->dev);
+ error = mpu3050_hw_init(sensor);
+ if (error)
+ goto err_pm_set_suspended;
+
error = request_threaded_irq(client->irq,
NULL, mpu3050_interrupt_thread,
IRQF_TRIGGER_RISING,
- "mpu_int", sensor);
+ "mpu3050", sensor);
if (error) {
dev_err(&client->dev,
"can't get IRQ %d, error %d\n", client->irq, error);
@@ -348,11 +457,18 @@ static const struct i2c_device_id mpu3050_ids[] = {
};
MODULE_DEVICE_TABLE(i2c, mpu3050_ids);
+static const struct of_device_id mpu3050_of_match[] = {
+ { .compatible = "invn,mpu3050", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mpu3050_of_match);
+
static struct i2c_driver mpu3050_i2c_driver = {
.driver = {
.name = "mpu3050",
.owner = THIS_MODULE,
.pm = &mpu3050_pm,
+ .of_match_table = mpu3050_of_match,
},
.probe = mpu3050_probe,
.remove = __devexit_p(mpu3050_remove),
diff --git a/drivers/input/misc/pcap_keys.c b/drivers/input/misc/pcap_keys.c
index 99335c2..e09b4fe 100644
--- a/drivers/input/misc/pcap_keys.c
+++ b/drivers/input/misc/pcap_keys.c
@@ -125,19 +125,7 @@ static struct platform_driver pcap_keys_device_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init pcap_keys_init(void)
-{
- return platform_driver_register(&pcap_keys_device_driver);
-};
-
-static void __exit pcap_keys_exit(void)
-{
- platform_driver_unregister(&pcap_keys_device_driver);
-};
-
-module_init(pcap_keys_init);
-module_exit(pcap_keys_exit);
+module_platform_driver(pcap_keys_device_driver);
MODULE_DESCRIPTION("Motorola PCAP2 input events driver");
MODULE_AUTHOR("Ilya Petrov <ilya.muromec@gmail.com>");
diff --git a/drivers/input/misc/pcf50633-input.c b/drivers/input/misc/pcf50633-input.c
index 9556273..53891de 100644
--- a/drivers/input/misc/pcf50633-input.c
+++ b/drivers/input/misc/pcf50633-input.c
@@ -113,18 +113,7 @@ static struct platform_driver pcf50633_input_driver = {
.probe = pcf50633_input_probe,
.remove = __devexit_p(pcf50633_input_remove),
};
-
-static int __init pcf50633_input_init(void)
-{
- return platform_driver_register(&pcf50633_input_driver);
-}
-module_init(pcf50633_input_init);
-
-static void __exit pcf50633_input_exit(void)
-{
- platform_driver_unregister(&pcf50633_input_driver);
-}
-module_exit(pcf50633_input_exit);
+module_platform_driver(pcf50633_input_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 input driver");
diff --git a/drivers/input/misc/pcspkr.c b/drivers/input/misc/pcspkr.c
index 34f4d2e..b2484aa 100644
--- a/drivers/input/misc/pcspkr.c
+++ b/drivers/input/misc/pcspkr.c
@@ -134,17 +134,5 @@ static struct platform_driver pcspkr_platform_driver = {
.remove = __devexit_p(pcspkr_remove),
.shutdown = pcspkr_shutdown,
};
+module_platform_driver(pcspkr_platform_driver);
-
-static int __init pcspkr_init(void)
-{
- return platform_driver_register(&pcspkr_platform_driver);
-}
-
-static void __exit pcspkr_exit(void)
-{
- platform_driver_unregister(&pcspkr_platform_driver);
-}
-
-module_init(pcspkr_init);
-module_exit(pcspkr_exit);
diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c
index 4319293..dfbfb46 100644
--- a/drivers/input/misc/pm8xxx-vibrator.c
+++ b/drivers/input/misc/pm8xxx-vibrator.c
@@ -277,18 +277,7 @@ static struct platform_driver pm8xxx_vib_driver = {
.pm = &pm8xxx_vib_pm_ops,
},
};
-
-static int __init pm8xxx_vib_init(void)
-{
- return platform_driver_register(&pm8xxx_vib_driver);
-}
-module_init(pm8xxx_vib_init);
-
-static void __exit pm8xxx_vib_exit(void)
-{
- platform_driver_unregister(&pm8xxx_vib_driver);
-}
-module_exit(pm8xxx_vib_exit);
+module_platform_driver(pm8xxx_vib_driver);
MODULE_ALIAS("platform:pm8xxx_vib");
MODULE_DESCRIPTION("PMIC8xxx vibrator driver based on ff-memless framework");
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c
index b3cfb9c..0f83d0f 100644
--- a/drivers/input/misc/pmic8xxx-pwrkey.c
+++ b/drivers/input/misc/pmic8xxx-pwrkey.c
@@ -213,18 +213,7 @@ static struct platform_driver pmic8xxx_pwrkey_driver = {
.pm = &pm8xxx_pwr_key_pm_ops,
},
};
-
-static int __init pmic8xxx_pwrkey_init(void)
-{
- return platform_driver_register(&pmic8xxx_pwrkey_driver);
-}
-module_init(pmic8xxx_pwrkey_init);
-
-static void __exit pmic8xxx_pwrkey_exit(void)
-{
- platform_driver_unregister(&pmic8xxx_pwrkey_driver);
-}
-module_exit(pmic8xxx_pwrkey_exit);
+module_platform_driver(pmic8xxx_pwrkey_driver);
MODULE_ALIAS("platform:pmic8xxx_pwrkey");
MODULE_DESCRIPTION("PMIC8XXX Power Key driver");
diff --git a/drivers/input/misc/powermate.c b/drivers/input/misc/powermate.c
index f459471..538f704 100644
--- a/drivers/input/misc/powermate.c
+++ b/drivers/input/misc/powermate.c
@@ -441,18 +441,7 @@ static struct usb_driver powermate_driver = {
.id_table = powermate_devices,
};
-static int __init powermate_init(void)
-{
- return usb_register(&powermate_driver);
-}
-
-static void __exit powermate_cleanup(void)
-{
- usb_deregister(&powermate_driver);
-}
-
-module_init(powermate_init);
-module_exit(powermate_cleanup);
+module_usb_driver(powermate_driver);
MODULE_AUTHOR( "William R Sowerbutts" );
MODULE_DESCRIPTION( "Griffin Technology, Inc PowerMate driver" );
diff --git a/drivers/input/misc/pwm-beeper.c b/drivers/input/misc/pwm-beeper.c
index 57c294f..fc84c8a 100644
--- a/drivers/input/misc/pwm-beeper.c
+++ b/drivers/input/misc/pwm-beeper.c
@@ -180,18 +180,7 @@ static struct platform_driver pwm_beeper_driver = {
.pm = PWM_BEEPER_PM_OPS,
},
};
-
-static int __init pwm_beeper_init(void)
-{
- return platform_driver_register(&pwm_beeper_driver);
-}
-module_init(pwm_beeper_init);
-
-static void __exit pwm_beeper_exit(void)
-{
- platform_driver_unregister(&pwm_beeper_driver);
-}
-module_exit(pwm_beeper_exit);
+module_platform_driver(pwm_beeper_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("PWM beeper driver");
diff --git a/drivers/input/misc/rb532_button.c b/drivers/input/misc/rb532_button.c
index e2c7f62..aeb02bc 100644
--- a/drivers/input/misc/rb532_button.c
+++ b/drivers/input/misc/rb532_button.c
@@ -100,19 +100,7 @@ static struct platform_driver rb532_button_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init rb532_button_init(void)
-{
- return platform_driver_register(&rb532_button_driver);
-}
-
-static void __exit rb532_button_exit(void)
-{
- platform_driver_unregister(&rb532_button_driver);
-}
-
-module_init(rb532_button_init);
-module_exit(rb532_button_exit);
+module_platform_driver(rb532_button_driver);
MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 2be2169..f07f784 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -284,19 +284,7 @@ static struct platform_driver rotary_encoder_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init rotary_encoder_init(void)
-{
- return platform_driver_register(&rotary_encoder_driver);
-}
-
-static void __exit rotary_encoder_exit(void)
-{
- platform_driver_unregister(&rotary_encoder_driver);
-}
-
-module_init(rotary_encoder_init);
-module_exit(rotary_encoder_exit);
+module_platform_driver(rotary_encoder_driver);
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_DESCRIPTION("GPIO rotary encoder driver");
diff --git a/drivers/input/misc/sgi_btns.c b/drivers/input/misc/sgi_btns.c
index 1a80c0d..5d9fd55 100644
--- a/drivers/input/misc/sgi_btns.c
+++ b/drivers/input/misc/sgi_btns.c
@@ -164,17 +164,6 @@ static struct platform_driver sgi_buttons_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init sgi_buttons_init(void)
-{
- return platform_driver_register(&sgi_buttons_driver);
-}
-
-static void __exit sgi_buttons_exit(void)
-{
- platform_driver_unregister(&sgi_buttons_driver);
-}
+module_platform_driver(sgi_buttons_driver);
MODULE_LICENSE("GPL");
-module_init(sgi_buttons_init);
-module_exit(sgi_buttons_exit);
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index 3c1a432..f3bc418 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -172,7 +172,7 @@ static void twl4030_vibra_close(struct input_dev *input)
}
/*** Module ***/
-#if CONFIG_PM
+#if CONFIG_PM_SLEEP
static int twl4030_vibra_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -189,10 +189,10 @@ static int twl4030_vibra_resume(struct device *dev)
vibra_disable_leds();
return 0;
}
+#endif
static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
twl4030_vibra_suspend, twl4030_vibra_resume);
-#endif
static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
{
@@ -273,26 +273,12 @@ static struct platform_driver twl4030_vibra_driver = {
.driver = {
.name = "twl4030-vibra",
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &twl4030_vibra_pm_ops,
-#endif
},
};
-
-static int __init twl4030_vibra_init(void)
-{
- return platform_driver_register(&twl4030_vibra_driver);
-}
-module_init(twl4030_vibra_init);
-
-static void __exit twl4030_vibra_exit(void)
-{
- platform_driver_unregister(&twl4030_vibra_driver);
-}
-module_exit(twl4030_vibra_exit);
+module_platform_driver(twl4030_vibra_driver);
MODULE_ALIAS("platform:twl4030-vibra");
-
MODULE_DESCRIPTION("TWL4030 Vibra driver");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/input/misc/twl6040-vibra.c b/drivers/input/misc/twl6040-vibra.c
index ad153a4..45874fe 100644
--- a/drivers/input/misc/twl6040-vibra.c
+++ b/drivers/input/misc/twl6040-vibra.c
@@ -410,18 +410,7 @@ static struct platform_driver twl6040_vibra_driver = {
.pm = &twl6040_vibra_pm_ops,
},
};
-
-static int __init twl6040_vibra_init(void)
-{
- return platform_driver_register(&twl6040_vibra_driver);
-}
-module_init(twl6040_vibra_init);
-
-static void __exit twl6040_vibra_exit(void)
-{
- platform_driver_unregister(&twl6040_vibra_driver);
-}
-module_exit(twl6040_vibra_exit);
+module_platform_driver(twl6040_vibra_driver);
MODULE_ALIAS("platform:twl6040-vibra");
MODULE_DESCRIPTION("TWL6040 Vibra driver");
diff --git a/drivers/input/misc/wistron_btns.c b/drivers/input/misc/wistron_btns.c
index 52b4193..e2bdfd4 100644
--- a/drivers/input/misc/wistron_btns.c
+++ b/drivers/input/misc/wistron_btns.c
@@ -48,7 +48,7 @@ MODULE_DESCRIPTION("Wistron laptop button driver");
MODULE_LICENSE("GPL v2");
MODULE_VERSION("0.3");
-static int force; /* = 0; */
+static bool force; /* = 0; */
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Load even if computer is not in database");
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index c3d7ba5..47f18d6 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -145,18 +145,7 @@ static struct platform_driver wm831x_on_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init wm831x_on_init(void)
-{
- return platform_driver_register(&wm831x_on_driver);
-}
-module_init(wm831x_on_init);
-
-static void __exit wm831x_on_exit(void)
-{
- platform_driver_unregister(&wm831x_on_driver);
-}
-module_exit(wm831x_on_exit);
+module_platform_driver(wm831x_on_driver);
MODULE_ALIAS("platform:wm831x-on");
MODULE_DESCRIPTION("WM831x ON pin");
diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c
index ad2e51c..02ca868 100644
--- a/drivers/input/misc/xen-kbdfront.c
+++ b/drivers/input/misc/xen-kbdfront.c
@@ -361,15 +361,12 @@ static const struct xenbus_device_id xenkbd_ids[] = {
{ "" }
};
-static struct xenbus_driver xenkbd_driver = {
- .name = "vkbd",
- .owner = THIS_MODULE,
- .ids = xenkbd_ids,
+static DEFINE_XENBUS_DRIVER(xenkbd, ,
.probe = xenkbd_probe,
.remove = xenkbd_remove,
.resume = xenkbd_resume,
.otherend_changed = xenkbd_backend_changed,
-};
+);
static int __init xenkbd_init(void)
{
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 41201c6..f4776e7 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -988,22 +988,7 @@ static struct usb_driver yealink_driver = {
.id_table = usb_table,
};
-static int __init yealink_dev_init(void)
-{
- int ret = usb_register(&yealink_driver);
- if (ret == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return ret;
-}
-
-static void __exit yealink_dev_exit(void)
-{
- usb_deregister(&yealink_driver);
-}
-
-module_init(yealink_dev_init);
-module_exit(yealink_dev_exit);
+module_usb_driver(yealink_driver);
MODULE_DEVICE_TABLE (usb, usb_table);
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 003587c..4c6a72d 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -17,13 +17,63 @@
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/input/mt.h>
#include <linux/serio.h>
#include <linux/libps2.h>
#include "psmouse.h"
#include "alps.h"
-#define ALPS_OLDPROTO 0x01 /* old style input */
+/*
+ * Definitions for ALPS version 3 and 4 command mode protocol
+ */
+#define ALPS_V3_X_MAX 2000
+#define ALPS_V3_Y_MAX 1400
+
+#define ALPS_BITMAP_X_BITS 15
+#define ALPS_BITMAP_Y_BITS 11
+
+#define ALPS_CMD_NIBBLE_10 0x01f2
+
+static const struct alps_nibble_commands alps_v3_nibble_commands[] = {
+ { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */
+ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
+ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
+ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
+ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
+ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
+ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
+ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
+ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
+ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
+ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
+ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
+ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
+ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
+ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
+ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
+};
+
+static const struct alps_nibble_commands alps_v4_nibble_commands[] = {
+ { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */
+ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */
+ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */
+ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */
+ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */
+ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */
+ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */
+ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */
+ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */
+ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */
+ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */
+ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */
+ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */
+ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */
+ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */
+ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */
+};
+
+
#define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */
#define ALPS_PASS 0x04 /* device has a pass-through port */
@@ -35,30 +85,33 @@
6-byte ALPS packet */
static const struct alps_model_info alps_model_data[] = {
- { { 0x32, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
- { { 0x33, 0x02, 0x0a }, 0x88, 0xf8, ALPS_OLDPROTO }, /* UMAX-530T */
- { { 0x53, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x53, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
- { { 0x60, 0x03, 0xc8 }, 0xf8, 0xf8, 0 }, /* HP ze1115 */
- { { 0x63, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x02, 0x14 }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x02, 0x28 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
- { { 0x63, 0x02, 0x3c }, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
- { { 0x63, 0x02, 0x50 }, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
- { { 0x63, 0x02, 0x64 }, 0xf8, 0xf8, 0 },
- { { 0x63, 0x03, 0xc8 }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
- { { 0x73, 0x00, 0x0a }, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
- { { 0x73, 0x02, 0x0a }, 0xf8, 0xf8, 0 },
- { { 0x73, 0x02, 0x14 }, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
- { { 0x20, 0x02, 0x0e }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
- { { 0x22, 0x02, 0x0a }, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
- { { 0x22, 0x02, 0x14 }, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
+ { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */
+ { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */
+ { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */
+ { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */
+ { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */
+ { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */
+ { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */
+ { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */
+ { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
+ { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */
+ { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */
+ { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
+ { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */
/* Dell Latitude E5500, E6400, E6500, Precision M4400 */
- { { 0x62, 0x02, 0x14 }, 0xcf, 0xcf,
+ { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
- { { 0x73, 0x02, 0x50 }, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
- { { 0x52, 0x01, 0x14 }, 0xff, 0xff,
- ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+ { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */
+ { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
+ ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */
+ { { 0x73, 0x02, 0x64 }, 0x9b, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+ { { 0x73, 0x02, 0x64 }, 0x9d, ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT },
+ { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
};
/*
@@ -67,42 +120,7 @@ static const struct alps_model_info alps_model_data[] = {
* isn't valid per PS/2 spec.
*/
-/*
- * PS/2 packet format
- *
- * byte 0: 0 0 YSGN XSGN 1 M R L
- * byte 1: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 2: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- *
- * Note that the device never signals overflow condition.
- *
- * ALPS absolute Mode - new format
- *
- * byte 0: 1 ? ? ? 1 ? ? ?
- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 ? fin ges
- * byte 3: 0 y9 y8 y7 1 M R L
- * byte 4: 0 y6 y5 y4 y3 y2 y1 y0
- * byte 5: 0 z6 z5 z4 z3 z2 z1 z0
- *
- * Dualpoint device -- interleaved packet format
- *
- * byte 0: 1 1 0 0 1 1 1 1
- * byte 1: 0 x6 x5 x4 x3 x2 x1 x0
- * byte 2: 0 x10 x9 x8 x7 0 fin ges
- * byte 3: 0 0 YSGN XSGN 1 1 1 1
- * byte 4: X7 X6 X5 X4 X3 X2 X1 X0
- * byte 5: Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y0
- * byte 6: 0 y9 y8 y7 1 m r l
- * byte 7: 0 y6 y5 y4 y3 y2 y1 y0
- * byte 8: 0 z6 z5 z4 z3 z2 z1 z0
- *
- * CAPITALS = stick, miniscules = touchpad
- *
- * ?'s can have different meanings on different models,
- * such as wheel rotation, extra buttons, stick buttons
- * on a dualpoint, etc.
- */
+/* Packet formats are described in Documentation/input/alps.txt */
static bool alps_is_valid_first_byte(const struct alps_model_info *model,
unsigned char data)
@@ -137,7 +155,7 @@ static void alps_report_buttons(struct psmouse *psmouse,
input_sync(dev2);
}
-static void alps_process_packet(struct psmouse *psmouse)
+static void alps_process_packet_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
const struct alps_model_info *model = priv->i;
@@ -147,7 +165,7 @@ static void alps_process_packet(struct psmouse *psmouse)
int x, y, z, ges, fin, left, right, middle;
int back = 0, forward = 0;
- if (model->flags & ALPS_OLDPROTO) {
+ if (model->proto_version == ALPS_PROTO_V1) {
left = packet[2] & 0x10;
right = packet[2] & 0x08;
middle = 0;
@@ -239,6 +257,403 @@ static void alps_process_packet(struct psmouse *psmouse)
input_sync(dev);
}
+/*
+ * Process bitmap data from v3 and v4 protocols. Returns the number of
+ * fingers detected. A return value of 0 means at least one of the
+ * bitmaps was empty.
+ *
+ * The bitmaps don't have enough data to track fingers, so this function
+ * only generates points representing a bounding box of all contacts.
+ * These points are returned in x1, y1, x2, and y2 when the return value
+ * is greater than 0.
+ */
+static int alps_process_bitmap(unsigned int x_map, unsigned int y_map,
+ int *x1, int *y1, int *x2, int *y2)
+{
+ struct alps_bitmap_point {
+ int start_bit;
+ int num_bits;
+ };
+
+ int fingers_x = 0, fingers_y = 0, fingers;
+ int i, bit, prev_bit;
+ struct alps_bitmap_point x_low = {0,}, x_high = {0,};
+ struct alps_bitmap_point y_low = {0,}, y_high = {0,};
+ struct alps_bitmap_point *point;
+
+ if (!x_map || !y_map)
+ return 0;
+
+ *x1 = *y1 = *x2 = *y2 = 0;
+
+ prev_bit = 0;
+ point = &x_low;
+ for (i = 0; x_map != 0; i++, x_map >>= 1) {
+ bit = x_map & 1;
+ if (bit) {
+ if (!prev_bit) {
+ point->start_bit = i;
+ fingers_x++;
+ }
+ point->num_bits++;
+ } else {
+ if (prev_bit)
+ point = &x_high;
+ else
+ point->num_bits = 0;
+ }
+ prev_bit = bit;
+ }
+
+ /*
+ * y bitmap is reversed for what we need (lower positions are in
+ * higher bits), so we process from the top end.
+ */
+ y_map = y_map << (sizeof(y_map) * BITS_PER_BYTE - ALPS_BITMAP_Y_BITS);
+ prev_bit = 0;
+ point = &y_low;
+ for (i = 0; y_map != 0; i++, y_map <<= 1) {
+ bit = y_map & (1 << (sizeof(y_map) * BITS_PER_BYTE - 1));
+ if (bit) {
+ if (!prev_bit) {
+ point->start_bit = i;
+ fingers_y++;
+ }
+ point->num_bits++;
+ } else {
+ if (prev_bit)
+ point = &y_high;
+ else
+ point->num_bits = 0;
+ }
+ prev_bit = bit;
+ }
+
+ /*
+ * Fingers can overlap, so we use the maximum count of fingers
+ * on either axis as the finger count.
+ */
+ fingers = max(fingers_x, fingers_y);
+
+ /*
+ * If total fingers is > 1 but either axis reports only a single
+ * contact, we have overlapping or adjacent fingers. For the
+ * purposes of creating a bounding box, divide the single contact
+ * (roughly) equally between the two points.
+ */
+ if (fingers > 1) {
+ if (fingers_x == 1) {
+ i = x_low.num_bits / 2;
+ x_low.num_bits = x_low.num_bits - i;
+ x_high.start_bit = x_low.start_bit + i;
+ x_high.num_bits = max(i, 1);
+ } else if (fingers_y == 1) {
+ i = y_low.num_bits / 2;
+ y_low.num_bits = y_low.num_bits - i;
+ y_high.start_bit = y_low.start_bit + i;
+ y_high.num_bits = max(i, 1);
+ }
+ }
+
+ *x1 = (ALPS_V3_X_MAX * (2 * x_low.start_bit + x_low.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_X_BITS - 1));
+ *y1 = (ALPS_V3_Y_MAX * (2 * y_low.start_bit + y_low.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_Y_BITS - 1));
+
+ if (fingers > 1) {
+ *x2 = (ALPS_V3_X_MAX * (2 * x_high.start_bit + x_high.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_X_BITS - 1));
+ *y2 = (ALPS_V3_Y_MAX * (2 * y_high.start_bit + y_high.num_bits - 1)) /
+ (2 * (ALPS_BITMAP_Y_BITS - 1));
+ }
+
+ return fingers;
+}
+
+static void alps_set_slot(struct input_dev *dev, int slot, bool active,
+ int x, int y)
+{
+ input_mt_slot(dev, slot);
+ input_mt_report_slot_state(dev, MT_TOOL_FINGER, active);
+ if (active) {
+ input_report_abs(dev, ABS_MT_POSITION_X, x);
+ input_report_abs(dev, ABS_MT_POSITION_Y, y);
+ }
+}
+
+static void alps_report_semi_mt_data(struct input_dev *dev, int num_fingers,
+ int x1, int y1, int x2, int y2)
+{
+ alps_set_slot(dev, 0, num_fingers != 0, x1, y1);
+ alps_set_slot(dev, 1, num_fingers == 2, x2, y2);
+}
+
+static void alps_process_trackstick_packet_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = priv->dev2;
+ int x, y, z, left, right, middle;
+
+ /* Sanity check packet */
+ if (!(packet[0] & 0x40)) {
+ psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n");
+ return;
+ }
+
+ /*
+ * There's a special packet that seems to indicate the end
+ * of a stream of trackstick data. Filter these out.
+ */
+ if (packet[1] == 0x7f && packet[2] == 0x7f && packet[4] == 0x7f)
+ return;
+
+ x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f));
+ y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f));
+ z = (packet[4] & 0x7c) >> 2;
+
+ /*
+ * The x and y values tend to be quite large, and when used
+ * alone the trackstick is difficult to use. Scale them down
+ * to compensate.
+ */
+ x /= 8;
+ y /= 8;
+
+ input_report_rel(dev, REL_X, x);
+ input_report_rel(dev, REL_Y, -y);
+
+ /*
+ * Most ALPS models report the trackstick buttons in the touchpad
+ * packets, but a few report them here. No reliable way has been
+ * found to differentiate between the models upfront, so we enable
+ * the quirk in response to seeing a button press in the trackstick
+ * packet.
+ */
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+ middle = packet[3] & 0x04;
+
+ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) &&
+ (left || right || middle))
+ priv->quirks |= ALPS_QUIRK_TRACKSTICK_BUTTONS;
+
+ if (priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) {
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+ input_report_key(dev, BTN_MIDDLE, middle);
+ }
+
+ input_sync(dev);
+ return;
+}
+
+static void alps_process_touchpad_packet_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ struct input_dev *dev2 = priv->dev2;
+ int x, y, z;
+ int left, right, middle;
+ int x1 = 0, y1 = 0, x2 = 0, y2 = 0;
+ int fingers = 0, bmap_fingers;
+ unsigned int x_bitmap, y_bitmap;
+
+ /*
+ * There's no single feature of touchpad position and bitmap packets
+ * that can be used to distinguish between them. We rely on the fact
+ * that a bitmap packet should always follow a position packet with
+ * bit 6 of packet[4] set.
+ */
+ if (priv->multi_packet) {
+ /*
+ * Sometimes a position packet will indicate a multi-packet
+ * sequence, but then what follows is another position
+ * packet. Check for this, and when it happens process the
+ * position packet as usual.
+ */
+ if (packet[0] & 0x40) {
+ fingers = (packet[5] & 0x3) + 1;
+ x_bitmap = ((packet[4] & 0x7e) << 8) |
+ ((packet[1] & 0x7f) << 2) |
+ ((packet[0] & 0x30) >> 4);
+ y_bitmap = ((packet[3] & 0x70) << 4) |
+ ((packet[2] & 0x7f) << 1) |
+ (packet[4] & 0x01);
+
+ bmap_fingers = alps_process_bitmap(x_bitmap, y_bitmap,
+ &x1, &y1, &x2, &y2);
+
+ /*
+ * We shouldn't report more than one finger if
+ * we don't have two coordinates.
+ */
+ if (fingers > 1 && bmap_fingers < 2)
+ fingers = bmap_fingers;
+
+ /* Now process position packet */
+ packet = priv->multi_data;
+ } else {
+ priv->multi_packet = 0;
+ }
+ }
+
+ /*
+ * Bit 6 of byte 0 is not usually set in position packets. The only
+ * times it seems to be set is in situations where the data is
+ * suspect anyway, e.g. a palm resting flat on the touchpad. Given
+ * this combined with the fact that this bit is useful for filtering
+ * out misidentified bitmap packets, we reject anything with this
+ * bit set.
+ */
+ if (packet[0] & 0x40)
+ return;
+
+ if (!priv->multi_packet && (packet[4] & 0x40)) {
+ priv->multi_packet = 1;
+ memcpy(priv->multi_data, packet, sizeof(priv->multi_data));
+ return;
+ }
+
+ priv->multi_packet = 0;
+
+ left = packet[3] & 0x01;
+ right = packet[3] & 0x02;
+ middle = packet[3] & 0x04;
+
+ x = ((packet[1] & 0x7f) << 4) | ((packet[4] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+ y = ((packet[2] & 0x7f) << 4) | (packet[4] & 0x0f);
+ z = packet[5] & 0x7f;
+
+ /*
+ * Sometimes the hardware sends a single packet with z = 0
+ * in the middle of a stream. Real releases generate packets
+ * with x, y, and z all zero, so these seem to be flukes.
+ * Ignore them.
+ */
+ if (x && y && !z)
+ return;
+
+ /*
+ * If we don't have MT data or the bitmaps were empty, we have
+ * to rely on ST data.
+ */
+ if (!fingers) {
+ x1 = x;
+ y1 = y;
+ fingers = z > 0 ? 1 : 0;
+ }
+
+ if (z >= 64)
+ input_report_key(dev, BTN_TOUCH, 1);
+ else
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ alps_report_semi_mt_data(dev, fingers, x1, y1, x2, y2);
+
+ input_report_key(dev, BTN_TOOL_FINGER, fingers == 1);
+ input_report_key(dev, BTN_TOOL_DOUBLETAP, fingers == 2);
+ input_report_key(dev, BTN_TOOL_TRIPLETAP, fingers == 3);
+ input_report_key(dev, BTN_TOOL_QUADTAP, fingers == 4);
+
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+ input_report_key(dev, BTN_MIDDLE, middle);
+
+ if (z > 0) {
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ }
+ input_report_abs(dev, ABS_PRESSURE, z);
+
+ input_sync(dev);
+
+ if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) {
+ left = packet[3] & 0x10;
+ right = packet[3] & 0x20;
+ middle = packet[3] & 0x40;
+
+ input_report_key(dev2, BTN_LEFT, left);
+ input_report_key(dev2, BTN_RIGHT, right);
+ input_report_key(dev2, BTN_MIDDLE, middle);
+ input_sync(dev2);
+ }
+}
+
+static void alps_process_packet_v3(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+
+ /*
+ * v3 protocol packets come in three types, two representing
+ * touchpad data and one representing trackstick data.
+ * Trackstick packets seem to be distinguished by always
+ * having 0x3f in the last byte. This value has never been
+ * observed in the last byte of either of the other types
+ * of packets.
+ */
+ if (packet[5] == 0x3f) {
+ alps_process_trackstick_packet_v3(psmouse);
+ return;
+ }
+
+ alps_process_touchpad_packet_v3(psmouse);
+}
+
+static void alps_process_packet_v4(struct psmouse *psmouse)
+{
+ unsigned char *packet = psmouse->packet;
+ struct input_dev *dev = psmouse->dev;
+ int x, y, z;
+ int left, right;
+
+ left = packet[4] & 0x01;
+ right = packet[4] & 0x02;
+
+ x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) |
+ ((packet[0] & 0x30) >> 4);
+ y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f);
+ z = packet[5] & 0x7f;
+
+ if (z >= 64)
+ input_report_key(dev, BTN_TOUCH, 1);
+ else
+ input_report_key(dev, BTN_TOUCH, 0);
+
+ if (z > 0) {
+ input_report_abs(dev, ABS_X, x);
+ input_report_abs(dev, ABS_Y, y);
+ }
+ input_report_abs(dev, ABS_PRESSURE, z);
+
+ input_report_key(dev, BTN_TOOL_FINGER, z > 0);
+ input_report_key(dev, BTN_LEFT, left);
+ input_report_key(dev, BTN_RIGHT, right);
+
+ input_sync(dev);
+}
+
+static void alps_process_packet(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ alps_process_packet_v1_v2(psmouse);
+ break;
+ case ALPS_PROTO_V3:
+ alps_process_packet_v3(psmouse);
+ break;
+ case ALPS_PROTO_V4:
+ alps_process_packet_v4(psmouse);
+ break;
+ }
+}
+
static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
unsigned char packet[],
bool report_buttons)
@@ -344,7 +759,7 @@ static void alps_flush_packet(unsigned long data)
serio_pause_rx(psmouse->ps2dev.serio);
- if (psmouse->pktcnt == 6) {
+ if (psmouse->pktcnt == psmouse->pktsize) {
/*
* We did not any more data in reasonable amount of time.
@@ -395,8 +810,8 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
- /* Bytes 2 - 6 should have 0 in the highest bit */
- if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= 6 &&
+ /* Bytes 2 - pktsize should have 0 in the highest bit */
+ if (psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
(psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
psmouse->pktcnt - 1,
@@ -404,7 +819,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_BAD_DATA;
}
- if (psmouse->pktcnt == 6) {
+ if (psmouse->pktcnt == psmouse->pktsize) {
alps_process_packet(psmouse);
return PSMOUSE_FULL_PACKET;
}
@@ -412,16 +827,134 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
return PSMOUSE_GOOD_DATA;
}
+static int alps_command_mode_send_nibble(struct psmouse *psmouse, int nibble)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ struct alps_data *priv = psmouse->private;
+ int command;
+ unsigned char *param;
+ unsigned char dummy[4];
+
+ BUG_ON(nibble > 0xf);
+
+ command = priv->nibble_commands[nibble].command;
+ param = (command & 0x0f00) ?
+ dummy : (unsigned char *)&priv->nibble_commands[nibble].data;
+
+ if (ps2_command(ps2dev, param, command))
+ return -1;
+
+ return 0;
+}
+
+static int alps_command_mode_set_addr(struct psmouse *psmouse, int addr)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ struct alps_data *priv = psmouse->private;
+ int i, nibble;
+
+ if (ps2_command(ps2dev, NULL, priv->addr_command))
+ return -1;
+
+ for (i = 12; i >= 0; i -= 4) {
+ nibble = (addr >> i) & 0xf;
+ if (alps_command_mode_send_nibble(psmouse, nibble))
+ return -1;
+ }
+
+ return 0;
+}
+
+static int __alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4];
+
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO))
+ return -1;
+
+ /*
+ * The address being read is returned in the first two bytes
+ * of the result. Check that this address matches the expected
+ * address.
+ */
+ if (addr != ((param[0] << 8) | param[1]))
+ return -1;
+
+ return param[2];
+}
+
+static int alps_command_mode_read_reg(struct psmouse *psmouse, int addr)
+{
+ if (alps_command_mode_set_addr(psmouse, addr))
+ return -1;
+ return __alps_command_mode_read_reg(psmouse, addr);
+}
+
+static int __alps_command_mode_write_reg(struct psmouse *psmouse, u8 value)
+{
+ if (alps_command_mode_send_nibble(psmouse, (value >> 4) & 0xf))
+ return -1;
+ if (alps_command_mode_send_nibble(psmouse, value & 0xf))
+ return -1;
+ return 0;
+}
+
+static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr,
+ u8 value)
+{
+ if (alps_command_mode_set_addr(psmouse, addr))
+ return -1;
+ return __alps_command_mode_write_reg(psmouse, value);
+}
+
+static int alps_enter_command_mode(struct psmouse *psmouse,
+ unsigned char *resp)
+{
+ unsigned char param[4];
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_err(psmouse, "failed to enter command mode\n");
+ return -1;
+ }
+
+ if (param[0] != 0x88 && param[1] != 0x07) {
+ psmouse_dbg(psmouse,
+ "unknown response while entering command mode: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
+ return -1;
+ }
+
+ if (resp)
+ *resp = param[2];
+ return 0;
+}
+
+static inline int alps_exit_command_mode(struct psmouse *psmouse)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM))
+ return -1;
+ return 0;
+}
+
static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int *version)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
static const unsigned char rates[] = { 0, 10, 20, 40, 60, 80, 100, 200 };
unsigned char param[4];
+ const struct alps_model_info *model = NULL;
int i;
/*
* First try "E6 report".
- * ALPS should return 0,0,10 or 0,0,100
+ * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+ * The bits 0-2 of the first byte will be 1s if some buttons are
+ * pressed.
*/
param[0] = 0;
if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
@@ -437,7 +970,8 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
param[0], param[1], param[2]);
- if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100))
+ if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
+ (param[2] != 10 && param[2] != 100))
return NULL;
/*
@@ -464,12 +998,41 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
*version = (param[0] << 8) | (param[1] << 4) | i;
}
- for (i = 0; i < ARRAY_SIZE(alps_model_data); i++)
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
if (!memcmp(param, alps_model_data[i].signature,
- sizeof(alps_model_data[i].signature)))
- return alps_model_data + i;
+ sizeof(alps_model_data[i].signature))) {
+ model = alps_model_data + i;
+ break;
+ }
+ }
- return NULL;
+ if (model && model->proto_version > ALPS_PROTO_V2) {
+ /*
+ * Need to check command mode response to identify
+ * model
+ */
+ model = NULL;
+ if (alps_enter_command_mode(psmouse, param)) {
+ psmouse_warn(psmouse,
+ "touchpad failed to enter command mode\n");
+ } else {
+ for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) {
+ if (alps_model_data[i].proto_version > ALPS_PROTO_V2 &&
+ alps_model_data[i].command_mode_resp == param[0]) {
+ model = alps_model_data + i;
+ break;
+ }
+ }
+ alps_exit_command_mode(psmouse);
+
+ if (!model)
+ psmouse_dbg(psmouse,
+ "Unknown command mode response %2.2x\n",
+ param[0]);
+ }
+ }
+
+ return model;
}
/*
@@ -477,7 +1040,7 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
* subsequent commands. It looks like glidepad is behind stickpointer,
* I'd thought it would be other way around...
*/
-static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
+static int alps_passthrough_mode_v2(struct psmouse *psmouse, bool enable)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11;
@@ -494,7 +1057,7 @@ static int alps_passthrough_mode(struct psmouse *psmouse, bool enable)
return 0;
}
-static int alps_absolute_mode(struct psmouse *psmouse)
+static int alps_absolute_mode_v1_v2(struct psmouse *psmouse)
{
struct ps2dev *ps2dev = &psmouse->ps2dev;
@@ -565,17 +1128,17 @@ static int alps_tap_mode(struct psmouse *psmouse, int enable)
static int alps_poll(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
- unsigned char buf[6];
+ unsigned char buf[sizeof(psmouse->packet)];
bool poll_failed;
if (priv->i->flags & ALPS_PASS)
- alps_passthrough_mode(psmouse, true);
+ alps_passthrough_mode_v2(psmouse, true);
poll_failed = ps2_command(&psmouse->ps2dev, buf,
PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0;
if (priv->i->flags & ALPS_PASS)
- alps_passthrough_mode(psmouse, false);
+ alps_passthrough_mode_v2(psmouse, false);
if (poll_failed || (buf[0] & priv->i->mask0) != priv->i->byte0)
return -1;
@@ -592,13 +1155,13 @@ static int alps_poll(struct psmouse *psmouse)
return 0;
}
-static int alps_hw_init(struct psmouse *psmouse)
+static int alps_hw_init_v1_v2(struct psmouse *psmouse)
{
struct alps_data *priv = psmouse->private;
const struct alps_model_info *model = priv->i;
if ((model->flags & ALPS_PASS) &&
- alps_passthrough_mode(psmouse, true)) {
+ alps_passthrough_mode_v2(psmouse, true)) {
return -1;
}
@@ -607,13 +1170,13 @@ static int alps_hw_init(struct psmouse *psmouse)
return -1;
}
- if (alps_absolute_mode(psmouse)) {
+ if (alps_absolute_mode_v1_v2(psmouse)) {
psmouse_err(psmouse, "Failed to enable absolute mode\n");
return -1;
}
if ((model->flags & ALPS_PASS) &&
- alps_passthrough_mode(psmouse, false)) {
+ alps_passthrough_mode_v2(psmouse, false)) {
return -1;
}
@@ -626,6 +1189,297 @@ static int alps_hw_init(struct psmouse *psmouse)
return 0;
}
+/*
+ * Enable or disable passthrough mode to the trackstick. Must be in
+ * command mode when calling this function.
+ */
+static int alps_passthrough_mode_v3(struct psmouse *psmouse, bool enable)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ if (reg_val == -1)
+ return -1;
+
+ if (enable)
+ reg_val |= 0x01;
+ else
+ reg_val &= ~0x01;
+
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v3(struct psmouse *psmouse)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+ if (reg_val == -1)
+ return -1;
+
+ reg_val |= 0x06;
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+static int alps_hw_init_v3(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ int reg_val;
+ unsigned char param[4];
+
+ priv->nibble_commands = alps_v3_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error;
+
+ /* Check for trackstick */
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0008);
+ if (reg_val == -1)
+ goto error;
+ if (reg_val & 0x80) {
+ if (alps_passthrough_mode_v3(psmouse, true))
+ goto error;
+ if (alps_exit_command_mode(psmouse))
+ goto error;
+
+ /*
+ * E7 report for the trackstick
+ *
+ * There have been reports of failures to seem to trace back
+ * to the above trackstick check failing. When these occur
+ * this E7 report fails, so when that happens we continue
+ * with the assumption that there isn't a trackstick after
+ * all.
+ */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_warn(psmouse, "trackstick E7 report failed\n");
+ } else {
+ psmouse_dbg(psmouse,
+ "trackstick E7 report: %2.2x %2.2x %2.2x\n",
+ param[0], param[1], param[2]);
+
+ /*
+ * Not sure what this does, but it is absolutely
+ * essential. Without it, the touchpad does not
+ * work at all and the trackstick just emits normal
+ * PS/2 packets.
+ */
+ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) ||
+ alps_command_mode_send_nibble(psmouse, 0x9) ||
+ alps_command_mode_send_nibble(psmouse, 0x4)) {
+ psmouse_err(psmouse,
+ "Error sending magic E6 sequence\n");
+ goto error_passthrough;
+ }
+ }
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error_passthrough;
+ if (alps_passthrough_mode_v3(psmouse, false))
+ goto error;
+ }
+
+ if (alps_absolute_mode_v3(psmouse)) {
+ psmouse_err(psmouse, "Failed to enter absolute mode\n");
+ goto error;
+ }
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0006);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+ goto error;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0007);
+ if (reg_val == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0144) == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, 0x04))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0159) == -1)
+ goto error;
+ if (__alps_command_mode_write_reg(psmouse, 0x03))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0163) == -1)
+ goto error;
+ if (alps_command_mode_write_reg(psmouse, 0x0163, 0x03))
+ goto error;
+
+ if (alps_command_mode_read_reg(psmouse, 0x0162) == -1)
+ goto error;
+ if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04))
+ goto error;
+
+ /*
+ * This ensures the trackstick packets are in the format
+ * supported by this driver. If bit 1 isn't set the packet
+ * format is different.
+ */
+ if (alps_command_mode_write_reg(psmouse, 0x0008, 0x82))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+
+ /* Set rate and enable data reporting */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+ psmouse_err(psmouse, "Failed to enable data reporting\n");
+ return -1;
+ }
+
+ return 0;
+
+error_passthrough:
+ /* Something failed while in passthrough mode, so try to get out */
+ if (!alps_enter_command_mode(psmouse, NULL))
+ alps_passthrough_mode_v3(psmouse, false);
+error:
+ /*
+ * Leaving the touchpad in command mode will essentially render
+ * it unusable until the machine reboots, so exit it here just
+ * to be safe
+ */
+ alps_exit_command_mode(psmouse);
+ return -1;
+}
+
+/* Must be in command mode when calling this function */
+static int alps_absolute_mode_v4(struct psmouse *psmouse)
+{
+ int reg_val;
+
+ reg_val = alps_command_mode_read_reg(psmouse, 0x0004);
+ if (reg_val == -1)
+ return -1;
+
+ reg_val |= 0x02;
+ if (__alps_command_mode_write_reg(psmouse, reg_val))
+ return -1;
+
+ return 0;
+}
+
+static int alps_hw_init_v4(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+ unsigned char param[4];
+
+ priv->nibble_commands = alps_v4_nibble_commands;
+ priv->addr_command = PSMOUSE_CMD_DISABLE;
+
+ if (alps_enter_command_mode(psmouse, NULL))
+ goto error;
+
+ if (alps_absolute_mode_v4(psmouse)) {
+ psmouse_err(psmouse, "Failed to enter absolute mode\n");
+ goto error;
+ }
+
+ if (alps_command_mode_write_reg(psmouse, 0x0007, 0x8c))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0149, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0160, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x017f, 0x15))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0151, 0x01))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0168, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x014a, 0x03))
+ goto error;
+
+ if (alps_command_mode_write_reg(psmouse, 0x0161, 0x03))
+ goto error;
+
+ alps_exit_command_mode(psmouse);
+
+ /*
+ * This sequence changes the output from a 9-byte to an
+ * 8-byte format. All the same data seems to be present,
+ * just in a more compact format.
+ */
+ param[0] = 0xc8;
+ param[1] = 0x64;
+ param[2] = 0x50;
+ if (ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, &param[2], PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETID))
+ return -1;
+
+ /* Set rate and enable data reporting */
+ param[0] = 0x64;
+ if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) ||
+ ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) {
+ psmouse_err(psmouse, "Failed to enable data reporting\n");
+ return -1;
+ }
+
+ return 0;
+
+error:
+ /*
+ * Leaving the touchpad in command mode will essentially render
+ * it unusable until the machine reboots, so exit it here just
+ * to be safe
+ */
+ alps_exit_command_mode(psmouse);
+ return -1;
+}
+
+static int alps_hw_init(struct psmouse *psmouse)
+{
+ struct alps_data *priv = psmouse->private;
+ const struct alps_model_info *model = priv->i;
+ int ret = -1;
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ ret = alps_hw_init_v1_v2(psmouse);
+ break;
+ case ALPS_PROTO_V3:
+ ret = alps_hw_init_v3(psmouse);
+ break;
+ case ALPS_PROTO_V4:
+ ret = alps_hw_init_v4(psmouse);
+ break;
+ }
+
+ return ret;
+}
+
static int alps_reconnect(struct psmouse *psmouse)
{
const struct alps_model_info *model;
@@ -666,6 +1520,8 @@ int alps_init(struct psmouse *psmouse)
psmouse->private = priv;
+ psmouse_reset(psmouse);
+
model = alps_get_model(psmouse, &version);
if (!model)
goto init_fail;
@@ -693,8 +1549,29 @@ int alps_init(struct psmouse *psmouse)
BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS);
- input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
- input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+
+ switch (model->proto_version) {
+ case ALPS_PROTO_V1:
+ case ALPS_PROTO_V2:
+ input_set_abs_params(dev1, ABS_X, 0, 1023, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, 767, 0, 0);
+ break;
+ case ALPS_PROTO_V3:
+ set_bit(INPUT_PROP_SEMI_MT, dev1->propbit);
+ input_mt_init_slots(dev1, 2);
+ input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0);
+ input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+
+ set_bit(BTN_TOOL_DOUBLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit);
+ set_bit(BTN_TOOL_QUADTAP, dev1->keybit);
+ /* fall through */
+ case ALPS_PROTO_V4:
+ input_set_abs_params(dev1, ABS_X, 0, ALPS_V3_X_MAX, 0, 0);
+ input_set_abs_params(dev1, ABS_Y, 0, ALPS_V3_Y_MAX, 0, 0);
+ break;
+ }
+
input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0);
if (model->flags & ALPS_WHEEL) {
@@ -737,7 +1614,7 @@ int alps_init(struct psmouse *psmouse)
psmouse->poll = alps_poll;
psmouse->disconnect = alps_disconnect;
psmouse->reconnect = alps_reconnect;
- psmouse->pktsize = 6;
+ psmouse->pktsize = model->proto_version == ALPS_PROTO_V4 ? 8 : 6;
/* We are having trouble resyncing ALPS touchpads so disable it for now */
psmouse->resync_time = 0;
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index 904ed8b..a00a4ab 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -12,20 +12,39 @@
#ifndef _ALPS_H
#define _ALPS_H
+#define ALPS_PROTO_V1 0
+#define ALPS_PROTO_V2 1
+#define ALPS_PROTO_V3 2
+#define ALPS_PROTO_V4 3
+
struct alps_model_info {
unsigned char signature[3];
+ unsigned char command_mode_resp; /* v3/v4 only */
+ unsigned char proto_version;
unsigned char byte0, mask0;
unsigned char flags;
};
+struct alps_nibble_commands {
+ int command;
+ unsigned char data;
+};
+
struct alps_data {
struct input_dev *dev2; /* Relative device */
char phys[32]; /* Phys */
const struct alps_model_info *i;/* Info */
+ const struct alps_nibble_commands *nibble_commands;
+ int addr_command; /* Command to set register address */
int prev_fin; /* Finger bit from previous packet */
+ int multi_packet; /* Multi-packet data in progress */
+ unsigned char multi_data[6]; /* Saved multi-packet data */
+ u8 quirks;
struct timer_list timer;
};
+#define ALPS_QUIRK_TRACKSTICK_BUTTONS 1 /* trakcstick buttons in trackstick packet */
+
#ifdef CONFIG_MOUSE_PS2_ALPS
int alps_detect(struct psmouse *psmouse, bool set_properties);
int alps_init(struct psmouse *psmouse);
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index b77f999..0acbc7d 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -938,15 +938,4 @@ static struct usb_driver atp_driver = {
.id_table = atp_table,
};
-static int __init atp_init(void)
-{
- return usb_register(&atp_driver);
-}
-
-static void __exit atp_exit(void)
-{
- usb_deregister(&atp_driver);
-}
-
-module_init(atp_init);
-module_exit(atp_exit);
+module_usb_driver(atp_driver);
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index 5ec617e..927e479 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -433,6 +433,9 @@ static void setup_events_to_report(struct input_dev *input_dev,
__set_bit(BTN_TOOL_QUADTAP, input_dev->keybit);
__set_bit(BTN_LEFT, input_dev->keybit);
+ if (cfg->caps & HAS_INTEGRATED_BUTTON)
+ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
+
input_set_events_per_packet(input_dev, 60);
}
@@ -940,16 +943,4 @@ static struct usb_driver bcm5974_driver = {
.supports_autosuspend = 1,
};
-static int __init bcm5974_init(void)
-{
- return usb_register(&bcm5974_driver);
-}
-
-static void __exit bcm5974_exit(void)
-{
- usb_deregister(&bcm5974_driver);
-}
-
-module_init(bcm5974_init);
-module_exit(bcm5974_exit);
-
+module_usb_driver(bcm5974_driver);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index e2a9867..d2c0db1 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -43,6 +43,24 @@ static int synaptics_send_cmd(struct psmouse *psmouse, unsigned char c,
}
/*
+ * V3 and later support this fast command
+ */
+static int elantech_send_cmd(struct psmouse *psmouse, unsigned char c,
+ unsigned char *param)
+{
+ struct ps2dev *ps2dev = &psmouse->ps2dev;
+
+ if (ps2_command(ps2dev, NULL, ETP_PS2_CUSTOM_COMMAND) ||
+ ps2_command(ps2dev, NULL, c) ||
+ ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) {
+ psmouse_err(psmouse, "%s query 0x%02x failed.\n", __func__, c);
+ return -1;
+ }
+
+ return 0;
+}
+
+/*
* A retrying version of ps2_command
*/
static int elantech_ps2_command(struct psmouse *psmouse,
@@ -863,13 +881,13 @@ static int elantech_set_range(struct psmouse *psmouse,
i = (etd->fw_version > 0x020800 &&
etd->fw_version < 0x020900) ? 1 : 2;
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
fixed_dpi = param[1] & 0x10;
if (((etd->fw_version >> 16) == 0x14) && fixed_dpi) {
- if (synaptics_send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, param))
return -1;
*x_max = (etd->capabilities[1] - i) * param[1] / 2;
@@ -888,7 +906,7 @@ static int elantech_set_range(struct psmouse *psmouse,
break;
case 3:
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
*x_max = (0x0f & param[0]) << 8 | param[1];
@@ -896,7 +914,7 @@ static int elantech_set_range(struct psmouse *psmouse,
break;
case 4:
- if (synaptics_send_cmd(psmouse, ETP_FW_ID_QUERY, param))
+ if (etd->send_cmd(psmouse, ETP_FW_ID_QUERY, param))
return -1;
*x_max = (0x0f & param[0]) << 8 | param[1];
@@ -913,6 +931,30 @@ static int elantech_set_range(struct psmouse *psmouse,
}
/*
+ * (value from firmware) * 10 + 790 = dpi
+ * we also have to convert dpi to dots/mm (*10/254 to avoid floating point)
+ */
+static unsigned int elantech_convert_res(unsigned int val)
+{
+ return (val * 10 + 790) * 10 / 254;
+}
+
+static int elantech_get_resolution_v4(struct psmouse *psmouse,
+ unsigned int *x_res,
+ unsigned int *y_res)
+{
+ unsigned char param[3];
+
+ if (elantech_send_cmd(psmouse, ETP_RESOLUTION_QUERY, param))
+ return -1;
+
+ *x_res = elantech_convert_res(param[1] & 0x0f);
+ *y_res = elantech_convert_res((param[1] & 0xf0) >> 4);
+
+ return 0;
+}
+
+/*
* Set the appropriate event bits for the input subsystem
*/
static int elantech_set_input_params(struct psmouse *psmouse)
@@ -920,6 +962,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
struct input_dev *dev = psmouse->dev;
struct elantech_data *etd = psmouse->private;
unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
+ unsigned int x_res = 0, y_res = 0;
if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
return -1;
@@ -967,10 +1010,20 @@ static int elantech_set_input_params(struct psmouse *psmouse)
break;
case 4:
+ if (elantech_get_resolution_v4(psmouse, &x_res, &y_res)) {
+ /*
+ * if query failed, print a warning and leave the values
+ * zero to resemble synaptics.c behavior.
+ */
+ psmouse_warn(psmouse, "couldn't query resolution data.\n");
+ }
+
__set_bit(BTN_TOOL_QUADTAP, dev->keybit);
/* For X to recognize me as touchpad. */
input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
+ input_abs_set_res(dev, ABS_X, x_res);
+ input_abs_set_res(dev, ABS_Y, y_res);
/*
* range of pressure and width is the same as v2,
* report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -983,6 +1036,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
input_mt_init_slots(dev, ETP_MAX_FINGERS);
input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
+ input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
+ input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
ETP_PMAX_V2, 0, 0);
/*
@@ -1031,16 +1086,13 @@ static ssize_t elantech_set_int_attr(struct psmouse *psmouse,
struct elantech_data *etd = psmouse->private;
struct elantech_attr_data *attr = data;
unsigned char *reg = (unsigned char *) etd + attr->field_offset;
- unsigned long value;
+ unsigned char value;
int err;
- err = strict_strtoul(buf, 16, &value);
+ err = kstrtou8(buf, 16, &value);
if (err)
return err;
- if (value > 0xff)
- return -EINVAL;
-
/* Do we need to preserve some bits for version 2 hardware too? */
if (etd->hw_version == 1) {
if (attr->reg == 0x10)
@@ -1233,9 +1285,11 @@ static int elantech_set_properties(struct elantech_data *etd)
}
}
- /*
- * Turn on packet checking by default.
- */
+ /* decide which send_cmd we're gonna use early */
+ etd->send_cmd = etd->hw_version >= 3 ? elantech_send_cmd :
+ synaptics_send_cmd;
+
+ /* Turn on packet checking by default */
etd->paritycheck = 1;
/*
@@ -1291,7 +1345,7 @@ int elantech_init(struct psmouse *psmouse)
"assuming hardware version %d (with firmware version 0x%02x%02x%02x)\n",
etd->hw_version, param[0], param[1], param[2]);
- if (synaptics_send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
+ if (etd->send_cmd(psmouse, ETP_CAPABILITIES_QUERY,
etd->capabilities)) {
psmouse_err(psmouse, "failed to query capabilities.\n");
goto init_fail;
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index 9e5f1aa..46db3be 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -20,6 +20,7 @@
#define ETP_FW_VERSION_QUERY 0x01
#define ETP_CAPABILITIES_QUERY 0x02
#define ETP_SAMPLE_QUERY 0x03
+#define ETP_RESOLUTION_QUERY 0x04
/*
* Command values for register reading or writing
@@ -135,6 +136,7 @@ struct elantech_data {
unsigned int width;
struct finger_pos mt[ETP_MAX_FINGERS];
unsigned char parity[256];
+ int (*send_cmd)(struct psmouse *psmouse, unsigned char c, unsigned char *param);
};
#ifdef CONFIG_MOUSE_PS2_ELANTECH
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index 58902fb..a9ad8e1 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -178,18 +178,7 @@ static struct platform_driver gpio_mouse_device_driver = {
.owner = THIS_MODULE,
}
};
-
-static int __init gpio_mouse_init(void)
-{
- return platform_driver_register(&gpio_mouse_device_driver);
-}
-module_init(gpio_mouse_init);
-
-static void __exit gpio_mouse_exit(void)
-{
- platform_driver_unregister(&gpio_mouse_device_driver);
-}
-module_exit(gpio_mouse_exit);
+module_platform_driver(gpio_mouse_device_driver);
MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
MODULE_DESCRIPTION("GPIO mouse driver");
diff --git a/drivers/input/mouse/hgpk.c b/drivers/input/mouse/hgpk.c
index 0470dd4..1c5d521 100644
--- a/drivers/input/mouse/hgpk.c
+++ b/drivers/input/mouse/hgpk.c
@@ -789,11 +789,14 @@ static ssize_t hgpk_set_powered(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct hgpk_data *priv = psmouse->private;
- unsigned long value;
+ unsigned int value;
int err;
- err = strict_strtoul(buf, 10, &value);
- if (err || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
if (value != priv->powered) {
@@ -881,11 +884,14 @@ static ssize_t hgpk_trigger_recal(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct hgpk_data *priv = psmouse->private;
- unsigned long value;
+ unsigned int value;
int err;
- err = strict_strtoul(buf, 10, &value);
- if (err || value != 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value != 1)
return -EINVAL;
/*
diff --git a/drivers/input/mouse/logips2pp.c b/drivers/input/mouse/logips2pp.c
index faac2c3..84de2fc 100644
--- a/drivers/input/mouse/logips2pp.c
+++ b/drivers/input/mouse/logips2pp.c
@@ -155,9 +155,14 @@ static ssize_t ps2pp_attr_show_smartscroll(struct psmouse *psmouse,
static ssize_t ps2pp_attr_set_smartscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
return -EINVAL;
ps2pp_set_smartscroll(psmouse, value);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 9f352fb..e6c9931 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -60,7 +60,7 @@ static unsigned int psmouse_rate = 100;
module_param_named(rate, psmouse_rate, uint, 0644);
MODULE_PARM_DESC(rate, "Report rate, in reports per second.");
-static unsigned int psmouse_smartscroll = 1;
+static bool psmouse_smartscroll = 1;
module_param_named(smartscroll, psmouse_smartscroll, bool, 0644);
MODULE_PARM_DESC(smartscroll, "Logitech Smartscroll autorepeat, 1 = enabled (default), 0 = disabled.");
@@ -127,7 +127,7 @@ struct psmouse_protocol {
* relevant events to the input module once full packet has arrived.
*/
-static psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
{
struct input_dev *dev = psmouse->dev;
unsigned char *packet = psmouse->packet;
@@ -418,6 +418,49 @@ int psmouse_reset(struct psmouse *psmouse)
return 0;
}
+/*
+ * Here we set the mouse resolution.
+ */
+
+void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
+{
+ static const unsigned char params[] = { 0, 1, 2, 2, 3 };
+ unsigned char p;
+
+ if (resolution == 0 || resolution > 200)
+ resolution = 200;
+
+ p = params[resolution / 50];
+ ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
+ psmouse->resolution = 25 << p;
+}
+
+/*
+ * Here we set the mouse report rate.
+ */
+
+static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
+{
+ static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
+ unsigned char r;
+ int i = 0;
+
+ while (rates[i] > rate) i++;
+ r = rates[i];
+ ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
+ psmouse->rate = r;
+}
+
+/*
+ * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
+ */
+
+static int psmouse_poll(struct psmouse *psmouse)
+{
+ return ps2_command(&psmouse->ps2dev, psmouse->packet,
+ PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
+}
+
/*
* Genius NetMouse magic init.
@@ -603,6 +646,56 @@ static int cortron_detect(struct psmouse *psmouse, bool set_properties)
}
/*
+ * Apply default settings to the psmouse structure. Most of them will
+ * be overridden by individual protocol initialization routines.
+ */
+
+static void psmouse_apply_defaults(struct psmouse *psmouse)
+{
+ struct input_dev *input_dev = psmouse->dev;
+
+ memset(input_dev->evbit, 0, sizeof(input_dev->evbit));
+ memset(input_dev->keybit, 0, sizeof(input_dev->keybit));
+ memset(input_dev->relbit, 0, sizeof(input_dev->relbit));
+ memset(input_dev->absbit, 0, sizeof(input_dev->absbit));
+ memset(input_dev->mscbit, 0, sizeof(input_dev->mscbit));
+
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(EV_REL, input_dev->evbit);
+
+ __set_bit(BTN_LEFT, input_dev->keybit);
+ __set_bit(BTN_RIGHT, input_dev->keybit);
+
+ __set_bit(REL_X, input_dev->relbit);
+ __set_bit(REL_Y, input_dev->relbit);
+
+ psmouse->set_rate = psmouse_set_rate;
+ psmouse->set_resolution = psmouse_set_resolution;
+ psmouse->poll = psmouse_poll;
+ psmouse->protocol_handler = psmouse_process_byte;
+ psmouse->pktsize = 3;
+ psmouse->reconnect = NULL;
+ psmouse->disconnect = NULL;
+ psmouse->cleanup = NULL;
+ psmouse->pt_activate = NULL;
+ psmouse->pt_deactivate = NULL;
+}
+
+/*
+ * Apply default settings to the psmouse structure and call specified
+ * protocol detection or initialization routine.
+ */
+static int psmouse_do_detect(int (*detect)(struct psmouse *psmouse,
+ bool set_properties),
+ struct psmouse *psmouse, bool set_properties)
+{
+ if (set_properties)
+ psmouse_apply_defaults(psmouse);
+
+ return detect(psmouse, set_properties);
+}
+
+/*
* psmouse_extensions() probes for any extensions to the basic PS/2 protocol
* the mouse may have.
*/
@@ -616,7 +709,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* We always check for lifebook because it does not disturb mouse
* (it only checks DMI information).
*/
- if (lifebook_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(lifebook_detect, psmouse, set_properties) == 0) {
if (max_proto > PSMOUSE_IMEX) {
if (!set_properties || lifebook_init(psmouse) == 0)
return PSMOUSE_LIFEBOOK;
@@ -628,15 +721,18 @@ static int psmouse_extensions(struct psmouse *psmouse,
* upsets the thinkingmouse).
*/
- if (max_proto > PSMOUSE_IMEX && thinking_detect(psmouse, set_properties) == 0)
+ if (max_proto > PSMOUSE_IMEX &&
+ psmouse_do_detect(thinking_detect, psmouse, set_properties) == 0) {
return PSMOUSE_THINKPS;
+ }
/*
* Try Synaptics TouchPad. Note that probing is done even if Synaptics protocol
* support is disabled in config - we need to know if it is synaptics so we
* can reset it properly after probing for intellimouse.
*/
- if (max_proto > PSMOUSE_PS2 && synaptics_detect(psmouse, set_properties) == 0) {
+ if (max_proto > PSMOUSE_PS2 &&
+ psmouse_do_detect(synaptics_detect, psmouse, set_properties) == 0) {
synaptics_hardware = true;
if (max_proto > PSMOUSE_IMEX) {
@@ -667,7 +763,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
*/
if (max_proto > PSMOUSE_IMEX) {
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
- if (alps_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(alps_detect,
+ psmouse, set_properties) == 0) {
if (!set_properties || alps_init(psmouse) == 0)
return PSMOUSE_ALPS;
/*
@@ -681,7 +778,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Try OLPC HGPK touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
- hgpk_detect(psmouse, set_properties) == 0) {
+ psmouse_do_detect(hgpk_detect, psmouse, set_properties) == 0) {
if (!set_properties || hgpk_init(psmouse) == 0)
return PSMOUSE_HGPK;
/*
@@ -694,7 +791,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Try Elantech touchpad.
*/
if (max_proto > PSMOUSE_IMEX &&
- elantech_detect(psmouse, set_properties) == 0) {
+ psmouse_do_detect(elantech_detect, psmouse, set_properties) == 0) {
if (!set_properties || elantech_init(psmouse) == 0)
return PSMOUSE_ELANTECH;
/*
@@ -703,18 +800,21 @@ static int psmouse_extensions(struct psmouse *psmouse,
max_proto = PSMOUSE_IMEX;
}
-
if (max_proto > PSMOUSE_IMEX) {
- if (genius_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(genius_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_GENPS;
- if (ps2pp_init(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(ps2pp_init,
+ psmouse, set_properties) == 0)
return PSMOUSE_PS2PP;
- if (trackpoint_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(trackpoint_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_TRACKPOINT;
- if (touchkit_ps2_detect(psmouse, set_properties) == 0)
+ if (psmouse_do_detect(touchkit_ps2_detect,
+ psmouse, set_properties) == 0)
return PSMOUSE_TOUCHKIT_PS2;
}
@@ -723,7 +823,8 @@ static int psmouse_extensions(struct psmouse *psmouse,
* Trackpoint devices (causing TP_READ_ID command to time out).
*/
if (max_proto > PSMOUSE_IMEX) {
- if (fsp_detect(psmouse, set_properties) == 0) {
+ if (psmouse_do_detect(fsp_detect,
+ psmouse, set_properties) == 0) {
if (!set_properties || fsp_init(psmouse) == 0)
return PSMOUSE_FSP;
/*
@@ -741,17 +842,23 @@ static int psmouse_extensions(struct psmouse *psmouse,
ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_RESET_DIS);
psmouse_reset(psmouse);
- if (max_proto >= PSMOUSE_IMEX && im_explorer_detect(psmouse, set_properties) == 0)
+ if (max_proto >= PSMOUSE_IMEX &&
+ psmouse_do_detect(im_explorer_detect,
+ psmouse, set_properties) == 0) {
return PSMOUSE_IMEX;
+ }
- if (max_proto >= PSMOUSE_IMPS && intellimouse_detect(psmouse, set_properties) == 0)
+ if (max_proto >= PSMOUSE_IMPS &&
+ psmouse_do_detect(intellimouse_detect,
+ psmouse, set_properties) == 0) {
return PSMOUSE_IMPS;
+ }
/*
* Okay, all failed, we have a standard mouse here. The number of the buttons
* is still a question, though. We assume 3.
*/
- ps2bare_detect(psmouse, set_properties);
+ psmouse_do_detect(ps2bare_detect, psmouse, set_properties);
if (synaptics_hardware) {
/*
@@ -819,6 +926,13 @@ static const struct psmouse_protocol psmouse_protocols[] = {
.detect = synaptics_detect,
.init = synaptics_init,
},
+ {
+ .type = PSMOUSE_SYNAPTICS_RELATIVE,
+ .name = "SynRelPS/2",
+ .alias = "synaptics-relative",
+ .detect = synaptics_detect,
+ .init = synaptics_init_relative,
+ },
#endif
#ifdef CONFIG_MOUSE_PS2_ALPS
{
@@ -958,39 +1072,6 @@ static int psmouse_probe(struct psmouse *psmouse)
}
/*
- * Here we set the mouse resolution.
- */
-
-void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution)
-{
- static const unsigned char params[] = { 0, 1, 2, 2, 3 };
- unsigned char p;
-
- if (resolution == 0 || resolution > 200)
- resolution = 200;
-
- p = params[resolution / 50];
- ps2_command(&psmouse->ps2dev, &p, PSMOUSE_CMD_SETRES);
- psmouse->resolution = 25 << p;
-}
-
-/*
- * Here we set the mouse report rate.
- */
-
-static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate)
-{
- static const unsigned char rates[] = { 200, 100, 80, 60, 40, 20, 10, 0 };
- unsigned char r;
- int i = 0;
-
- while (rates[i] > rate) i++;
- r = rates[i];
- ps2_command(&psmouse->ps2dev, &r, PSMOUSE_CMD_SETRATE);
- psmouse->rate = r;
-}
-
-/*
* psmouse_initialize() initializes the mouse to a sane state.
*/
@@ -1035,16 +1116,6 @@ static void psmouse_deactivate(struct psmouse *psmouse)
psmouse_set_state(psmouse, PSMOUSE_CMD_MODE);
}
-/*
- * psmouse_poll() - default poll handler. Everyone except for ALPS uses it.
- */
-
-static int psmouse_poll(struct psmouse *psmouse)
-{
- return ps2_command(&psmouse->ps2dev, psmouse->packet,
- PSMOUSE_CMD_POLL | (psmouse->pktsize << 8));
-}
-
/*
* psmouse_resync() attempts to re-validate current protocol.
@@ -1245,18 +1316,9 @@ static int psmouse_switch_protocol(struct psmouse *psmouse,
input_dev->dev.parent = &psmouse->ps2dev.serio->dev;
- input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
- input_dev->keybit[BIT_WORD(BTN_MOUSE)] =
- BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT);
- input_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-
- psmouse->set_rate = psmouse_set_rate;
- psmouse->set_resolution = psmouse_set_resolution;
- psmouse->poll = psmouse_poll;
- psmouse->protocol_handler = psmouse_process_byte;
- psmouse->pktsize = 3;
-
if (proto && (proto->detect || proto->init)) {
+ psmouse_apply_defaults(psmouse);
+
if (proto->detect && proto->detect(psmouse, true) < 0)
return -1;
@@ -1558,13 +1620,12 @@ static ssize_t psmouse_show_int_attr(struct psmouse *psmouse, void *offset, char
static ssize_t psmouse_set_int_attr(struct psmouse *psmouse, void *offset, const char *buf, size_t count)
{
unsigned int *field = (unsigned int *)((char *)psmouse + (size_t)offset);
- unsigned long value;
-
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ unsigned int value;
+ int err;
- if ((unsigned int)value != value)
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
*field = value;
@@ -1671,10 +1732,12 @@ static ssize_t psmouse_attr_set_protocol(struct psmouse *psmouse, void *data, co
static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
psmouse->set_rate(psmouse, value);
return count;
@@ -1682,10 +1745,12 @@ static ssize_t psmouse_attr_set_rate(struct psmouse *psmouse, void *data, const
static ssize_t psmouse_attr_set_resolution(struct psmouse *psmouse, void *data, const char *buf, size_t count)
{
- unsigned long value;
+ unsigned int value;
+ int err;
- if (strict_strtoul(buf, 10, &value))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
psmouse->set_resolution(psmouse, value);
return count;
diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
index 9b84b0c..6a41709 100644
--- a/drivers/input/mouse/psmouse.h
+++ b/drivers/input/mouse/psmouse.h
@@ -8,6 +8,7 @@
#define PSMOUSE_CMD_SETSTREAM 0x00ea
#define PSMOUSE_CMD_SETPOLL 0x00f0
#define PSMOUSE_CMD_POLL 0x00eb /* caller sets number of bytes to receive */
+#define PSMOUSE_CMD_RESET_WRAP 0x00ec
#define PSMOUSE_CMD_GETID 0x02f2
#define PSMOUSE_CMD_SETRATE 0x10f3
#define PSMOUSE_CMD_ENABLE 0x00f4
@@ -93,6 +94,7 @@ enum psmouse_type {
PSMOUSE_HGPK,
PSMOUSE_ELANTECH,
PSMOUSE_FSP,
+ PSMOUSE_SYNAPTICS_RELATIVE,
PSMOUSE_AUTO /* This one should always be last */
};
@@ -102,6 +104,7 @@ int psmouse_sliced_command(struct psmouse *psmouse, unsigned char command);
int psmouse_reset(struct psmouse *psmouse);
void psmouse_set_state(struct psmouse *psmouse, enum psmouse_state new_state);
void psmouse_set_resolution(struct psmouse *psmouse, unsigned int resolution);
+psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse);
struct psmouse_attribute {
struct device_attribute dattr;
diff --git a/drivers/input/mouse/pxa930_trkball.c b/drivers/input/mouse/pxa930_trkball.c
index ee3b0ca..a9e4bfd 100644
--- a/drivers/input/mouse/pxa930_trkball.c
+++ b/drivers/input/mouse/pxa930_trkball.c
@@ -250,19 +250,7 @@ static struct platform_driver pxa930_trkball_driver = {
.probe = pxa930_trkball_probe,
.remove = __devexit_p(pxa930_trkball_remove),
};
-
-static int __init pxa930_trkball_init(void)
-{
- return platform_driver_register(&pxa930_trkball_driver);
-}
-
-static void __exit pxa930_trkball_exit(void)
-{
- platform_driver_unregister(&pxa930_trkball_driver);
-}
-
-module_init(pxa930_trkball_init);
-module_exit(pxa930_trkball_exit);
+module_platform_driver(pxa930_trkball_driver);
MODULE_AUTHOR("Yong Yao <yaoyong@marvell.com>");
MODULE_DESCRIPTION("PXA930 Trackball Mouse Driver");
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index 86d6f39..e36847d 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -408,7 +408,7 @@ static int fsp_onpad_hscr(struct psmouse *psmouse, bool enable)
static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long reg, val;
+ int reg, val;
char *rest;
ssize_t retval;
@@ -416,7 +416,11 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
if (rest == buf || *rest != ' ' || reg > 0xff)
return -EINVAL;
- if (strict_strtoul(rest + 1, 16, &val) || val > 0xff)
+ retval = kstrtoint(rest + 1, 16, &val);
+ if (retval)
+ return retval;
+
+ if (val > 0xff)
return -EINVAL;
if (fsp_reg_write_enable(psmouse, true))
@@ -448,10 +452,13 @@ static ssize_t fsp_attr_set_getreg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
struct fsp_data *pad = psmouse->private;
- unsigned long reg;
- int val;
+ int reg, val, err;
+
+ err = kstrtoint(buf, 16, &reg);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 16, &reg) || reg > 0xff)
+ if (reg > 0xff)
return -EINVAL;
if (fsp_reg_read(psmouse, reg, &val))
@@ -480,9 +487,13 @@ static ssize_t fsp_attr_show_pagereg(struct psmouse *psmouse,
static ssize_t fsp_attr_set_pagereg(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ int val, err;
- if (strict_strtoul(buf, 16, &val) || val > 0xff)
+ err = kstrtoint(buf, 16, &val);
+ if (err)
+ return err;
+
+ if (val > 0xff)
return -EINVAL;
if (fsp_page_reg_write(psmouse, val))
@@ -505,9 +516,14 @@ static ssize_t fsp_attr_show_vscroll(struct psmouse *psmouse,
static ssize_t fsp_attr_set_vscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ if (val > 1)
return -EINVAL;
fsp_onpad_vscr(psmouse, val);
@@ -529,9 +545,14 @@ static ssize_t fsp_attr_show_hscroll(struct psmouse *psmouse,
static ssize_t fsp_attr_set_hscroll(struct psmouse *psmouse, void *data,
const char *buf, size_t count)
{
- unsigned long val;
+ unsigned int val;
+ int err;
+
+ err = kstrtouint(buf, 10, &val);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 10, &val) || val > 1)
+ if (val > 1)
return -EINVAL;
fsp_onpad_hscr(psmouse, val);
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index a6dcd18..8081a0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -269,19 +269,49 @@ static int synaptics_query_hardware(struct psmouse *psmouse)
return 0;
}
-static int synaptics_set_absolute_mode(struct psmouse *psmouse)
+static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
+{
+ static unsigned char param = 0xc8;
+ struct synaptics_data *priv = psmouse->private;
+
+ if (!SYN_CAP_ADV_GESTURE(priv->ext_cap_0c))
+ return 0;
+
+ if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
+ return -1;
+
+ if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
+ return -1;
+
+ /* Advanced gesture mode also sends multi finger data */
+ priv->capabilities |= BIT(1);
+
+ return 0;
+}
+
+static int synaptics_set_mode(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
- priv->mode = SYN_BIT_ABSOLUTE_MODE;
- if (SYN_ID_MAJOR(priv->identity) >= 4)
+ priv->mode = 0;
+ if (priv->absolute_mode)
+ priv->mode |= SYN_BIT_ABSOLUTE_MODE;
+ if (priv->disable_gesture)
priv->mode |= SYN_BIT_DISABLE_GESTURE;
+ if (psmouse->rate >= 80)
+ priv->mode |= SYN_BIT_HIGH_RATE;
if (SYN_CAP_EXTENDED(priv->capabilities))
priv->mode |= SYN_BIT_W_MODE;
if (synaptics_mode_cmd(psmouse, priv->mode))
return -1;
+ if (priv->absolute_mode &&
+ synaptics_set_advanced_gesture_mode(psmouse)) {
+ psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+ return -1;
+ }
+
return 0;
}
@@ -300,26 +330,6 @@ static void synaptics_set_rate(struct psmouse *psmouse, unsigned int rate)
synaptics_mode_cmd(psmouse, priv->mode);
}
-static int synaptics_set_advanced_gesture_mode(struct psmouse *psmouse)
-{
- static unsigned char param = 0xc8;
- struct synaptics_data *priv = psmouse->private;
-
- if (!(SYN_CAP_ADV_GESTURE(priv->ext_cap_0c) ||
- SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)))
- return 0;
-
- if (psmouse_sliced_command(psmouse, SYN_QUE_MODEL))
- return -1;
- if (ps2_command(&psmouse->ps2dev, &param, PSMOUSE_CMD_SETRATE))
- return -1;
-
- /* Advanced gesture mode also sends multi finger data */
- priv->capabilities |= BIT(1);
-
- return 0;
-}
-
/*****************************************************************************
* Synaptics pass-through PS/2 port support
****************************************************************************/
@@ -1143,8 +1153,24 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
{
int i;
+ /* Things that apply to both modes */
__set_bit(INPUT_PROP_POINTER, dev->propbit);
+ __set_bit(EV_KEY, dev->evbit);
+ __set_bit(BTN_LEFT, dev->keybit);
+ __set_bit(BTN_RIGHT, dev->keybit);
+ if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
+ __set_bit(BTN_MIDDLE, dev->keybit);
+
+ if (!priv->absolute_mode) {
+ /* Relative mode */
+ __set_bit(EV_REL, dev->evbit);
+ __set_bit(REL_X, dev->relbit);
+ __set_bit(REL_Y, dev->relbit);
+ return;
+ }
+
+ /* Absolute mode */
__set_bit(EV_ABS, dev->evbit);
set_abs_position_params(dev, priv, ABS_X, ABS_Y);
input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0);
@@ -1170,20 +1196,14 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
if (SYN_CAP_PALMDETECT(priv->capabilities))
input_set_abs_params(dev, ABS_TOOL_WIDTH, 0, 15, 0, 0);
- __set_bit(EV_KEY, dev->evbit);
__set_bit(BTN_TOUCH, dev->keybit);
__set_bit(BTN_TOOL_FINGER, dev->keybit);
- __set_bit(BTN_LEFT, dev->keybit);
- __set_bit(BTN_RIGHT, dev->keybit);
if (SYN_CAP_MULTIFINGER(priv->capabilities)) {
__set_bit(BTN_TOOL_DOUBLETAP, dev->keybit);
__set_bit(BTN_TOOL_TRIPLETAP, dev->keybit);
}
- if (SYN_CAP_MIDDLE_BUTTON(priv->capabilities))
- __set_bit(BTN_MIDDLE, dev->keybit);
-
if (SYN_CAP_FOUR_BUTTON(priv->capabilities) ||
SYN_CAP_MIDDLE_BUTTON(priv->capabilities)) {
__set_bit(BTN_FORWARD, dev->keybit);
@@ -1205,10 +1225,58 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv)
}
}
+static ssize_t synaptics_show_disable_gesture(struct psmouse *psmouse,
+ void *data, char *buf)
+{
+ struct synaptics_data *priv = psmouse->private;
+
+ return sprintf(buf, "%c\n", priv->disable_gesture ? '1' : '0');
+}
+
+static ssize_t synaptics_set_disable_gesture(struct psmouse *psmouse,
+ void *data, const char *buf,
+ size_t len)
+{
+ struct synaptics_data *priv = psmouse->private;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value > 1)
+ return -EINVAL;
+
+ if (value == priv->disable_gesture)
+ return len;
+
+ priv->disable_gesture = value;
+ if (value)
+ priv->mode |= SYN_BIT_DISABLE_GESTURE;
+ else
+ priv->mode &= ~SYN_BIT_DISABLE_GESTURE;
+
+ if (synaptics_mode_cmd(psmouse, priv->mode))
+ return -EIO;
+
+ return len;
+}
+
+PSMOUSE_DEFINE_ATTR(disable_gesture, S_IWUSR | S_IRUGO, NULL,
+ synaptics_show_disable_gesture,
+ synaptics_set_disable_gesture);
+
static void synaptics_disconnect(struct psmouse *psmouse)
{
+ struct synaptics_data *priv = psmouse->private;
+
+ if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity))
+ device_remove_file(&psmouse->ps2dev.serio->dev,
+ &psmouse_attr_disable_gesture.dattr);
+
synaptics_reset(psmouse);
- kfree(psmouse->private);
+ kfree(priv);
psmouse->private = NULL;
}
@@ -1245,17 +1313,11 @@ static int synaptics_reconnect(struct psmouse *psmouse)
return -1;
}
- if (synaptics_set_absolute_mode(psmouse)) {
+ if (synaptics_set_mode(psmouse)) {
psmouse_err(psmouse, "Unable to initialize device.\n");
return -1;
}
- if (synaptics_set_advanced_gesture_mode(psmouse)) {
- psmouse_err(psmouse,
- "Advanced gesture mode reconnect failed.\n");
- return -1;
- }
-
if (old_priv.identity != priv->identity ||
old_priv.model_id != priv->model_id ||
old_priv.capabilities != priv->capabilities ||
@@ -1332,20 +1394,18 @@ void __init synaptics_module_init(void)
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
}
-int synaptics_init(struct psmouse *psmouse)
+static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
{
struct synaptics_data *priv;
+ int err = -1;
/*
- * The OLPC XO has issues with Synaptics' absolute mode; similarly to
- * the HGPK, it quickly degrades and the hardware becomes jumpy and
- * overly sensitive. Not only that, but the constant packet spew
- * (even at a lowered 40pps rate) overloads the EC such that key
- * presses on the keyboard are missed. Given all of that, don't
- * even attempt to use Synaptics mode. Relative mode seems to work
- * just fine.
+ * The OLPC XO has issues with Synaptics' absolute mode; the constant
+ * packet spew overloads the EC such that key presses on the keyboard
+ * are missed. Given that, don't even attempt to use Absolute mode.
+ * Relative mode seems to work just fine.
*/
- if (broken_olpc_ec) {
+ if (absolute_mode && broken_olpc_ec) {
psmouse_info(psmouse,
"OLPC XO detected, not enabling Synaptics protocol.\n");
return -ENODEV;
@@ -1362,13 +1422,12 @@ int synaptics_init(struct psmouse *psmouse)
goto init_fail;
}
- if (synaptics_set_absolute_mode(psmouse)) {
- psmouse_err(psmouse, "Unable to initialize device.\n");
- goto init_fail;
- }
+ priv->absolute_mode = absolute_mode;
+ if (SYN_ID_DISGEST_SUPPORTED(priv->identity))
+ priv->disable_gesture = true;
- if (synaptics_set_advanced_gesture_mode(psmouse)) {
- psmouse_err(psmouse, "Advanced gesture mode init failed.\n");
+ if (synaptics_set_mode(psmouse)) {
+ psmouse_err(psmouse, "Unable to initialize device.\n");
goto init_fail;
}
@@ -1393,12 +1452,19 @@ int synaptics_init(struct psmouse *psmouse)
psmouse->model = ((priv->model_id & 0x00ff0000) >> 8) |
(priv->model_id & 0x000000ff);
- psmouse->protocol_handler = synaptics_process_byte;
+ if (absolute_mode) {
+ psmouse->protocol_handler = synaptics_process_byte;
+ psmouse->pktsize = 6;
+ } else {
+ /* Relative mode follows standard PS/2 mouse protocol */
+ psmouse->protocol_handler = psmouse_process_byte;
+ psmouse->pktsize = 3;
+ }
+
psmouse->set_rate = synaptics_set_rate;
psmouse->disconnect = synaptics_disconnect;
psmouse->reconnect = synaptics_reconnect;
psmouse->cleanup = synaptics_reset;
- psmouse->pktsize = 6;
/* Synaptics can usually stay in sync without extra help */
psmouse->resync_time = 0;
@@ -1417,11 +1483,32 @@ int synaptics_init(struct psmouse *psmouse)
psmouse->rate = 40;
}
+ if (!priv->absolute_mode && SYN_ID_DISGEST_SUPPORTED(priv->identity)) {
+ err = device_create_file(&psmouse->ps2dev.serio->dev,
+ &psmouse_attr_disable_gesture.dattr);
+ if (err) {
+ psmouse_err(psmouse,
+ "Failed to create disable_gesture attribute (%d)",
+ err);
+ goto init_fail;
+ }
+ }
+
return 0;
init_fail:
kfree(priv);
- return -1;
+ return err;
+}
+
+int synaptics_init(struct psmouse *psmouse)
+{
+ return __synaptics_init(psmouse, true);
+}
+
+int synaptics_init_relative(struct psmouse *psmouse)
+{
+ return __synaptics_init(psmouse, false);
}
bool synaptics_supported(void)
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 622aea8..fd26ccc 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -100,6 +100,7 @@
#define SYN_ID_MINOR(i) (((i) >> 16) & 0xff)
#define SYN_ID_FULL(i) ((SYN_ID_MAJOR(i) << 8) | SYN_ID_MINOR(i))
#define SYN_ID_IS_SYNAPTICS(i) ((((i) >> 8) & 0xff) == 0x47)
+#define SYN_ID_DISGEST_SUPPORTED(i) (SYN_ID_MAJOR(i) >= 4)
/* synaptics special commands */
#define SYN_PS_SET_MODE2 0x14
@@ -159,6 +160,9 @@ struct synaptics_data {
unsigned char mode; /* current mode byte */
int scroll;
+ bool absolute_mode; /* run in Absolute mode */
+ bool disable_gesture; /* disable gestures */
+
struct serio *pt_port; /* Pass-through serio port */
struct synaptics_mt_state mt_state; /* Current mt finger state */
@@ -175,6 +179,7 @@ struct synaptics_data {
void synaptics_module_init(void);
int synaptics_detect(struct psmouse *psmouse, bool set_properties);
int synaptics_init(struct psmouse *psmouse);
+int synaptics_init_relative(struct psmouse *psmouse);
void synaptics_reset(struct psmouse *psmouse);
bool synaptics_supported(void);
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index 4b755cb..1c58aaf 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -185,17 +185,17 @@
#define NO_DATA_SLEEP_MSECS (MSEC_PER_SEC / 4)
/* Control touchpad's No Deceleration option */
-static int no_decel = 1;
+static bool no_decel = 1;
module_param(no_decel, bool, 0644);
MODULE_PARM_DESC(no_decel, "No Deceleration. Default = 1 (on)");
/* Control touchpad's Reduced Reporting option */
-static int reduce_report;
+static bool reduce_report;
module_param(reduce_report, bool, 0644);
MODULE_PARM_DESC(reduce_report, "Reduced Reporting. Default = 0 (off)");
/* Control touchpad's No Filter option */
-static int no_filter;
+static bool no_filter;
module_param(no_filter, bool, 0644);
MODULE_PARM_DESC(no_filter, "No Filter. Default = 0 (off)");
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 54b2fa8..22b2180 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -89,10 +89,12 @@ static ssize_t trackpoint_set_int_attr(struct psmouse *psmouse, void *data,
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned long value;
+ unsigned char value;
+ int err;
- if (strict_strtoul(buf, 10, &value) || value > 255)
- return -EINVAL;
+ err = kstrtou8(buf, 10, &value);
+ if (err)
+ return err;
*field = value;
trackpoint_write(&psmouse->ps2dev, attr->command, value);
@@ -115,9 +117,14 @@ static ssize_t trackpoint_set_bit_attr(struct psmouse *psmouse, void *data,
struct trackpoint_data *tp = psmouse->private;
struct trackpoint_attr_data *attr = data;
unsigned char *field = (unsigned char *)((char *)tp + attr->field_offset);
- unsigned long value;
+ unsigned int value;
+ int err;
+
+ err = kstrtouint(buf, 10, &value);
+ if (err)
+ return err;
- if (strict_strtoul(buf, 10, &value) || value > 1)
+ if (value > 1)
return -EINVAL;
if (attr->inverted)
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index d363dc4..35864c6 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -196,18 +196,7 @@ static struct platform_driver altera_ps2_driver = {
.of_match_table = altera_ps2_match,
},
};
-
-static int __init altera_ps2_init(void)
-{
- return platform_driver_register(&altera_ps2_driver);
-}
-module_init(altera_ps2_init);
-
-static void __exit altera_ps2_exit(void)
-{
- platform_driver_unregister(&altera_ps2_driver);
-}
-module_exit(altera_ps2_exit);
+module_platform_driver(altera_ps2_driver);
MODULE_DESCRIPTION("Altera University Program PS2 controller driver");
MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
diff --git a/drivers/input/serio/ambakmi.c b/drivers/input/serio/ambakmi.c
index 12abc50..8407d5b 100644
--- a/drivers/input/serio/ambakmi.c
+++ b/drivers/input/serio/ambakmi.c
@@ -195,6 +195,8 @@ static struct amba_id amba_kmi_idtable[] = {
{ 0, 0 }
};
+MODULE_DEVICE_TABLE(amba, amba_kmi_idtable);
+
static struct amba_driver ambakmi_driver = {
.drv = {
.name = "kmi-pl050",
diff --git a/drivers/input/serio/hp_sdc.c b/drivers/input/serio/hp_sdc.c
index 979c443..be33160 100644
--- a/drivers/input/serio/hp_sdc.c
+++ b/drivers/input/serio/hp_sdc.c
@@ -105,7 +105,7 @@ EXPORT_SYMBOL(__hp_sdc_enqueue_transaction);
EXPORT_SYMBOL(hp_sdc_enqueue_transaction);
EXPORT_SYMBOL(hp_sdc_dequeue_transaction);
-static unsigned int hp_sdc_disabled;
+static bool hp_sdc_disabled;
module_param_named(no_hpsdc, hp_sdc_disabled, bool, 0);
MODULE_PARM_DESC(no_hpsdc, "Do not enable HP SDC driver.");
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b4cfc6c..5ec774d 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -512,6 +512,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
},
},
+ {
+ /* Lenovo Ideapad U455 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index d37a48e..8656441 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -991,7 +991,7 @@ static int i8042_controller_init(void)
* Reset the controller and reset CRT to the original value set by BIOS.
*/
-static void i8042_controller_reset(void)
+static void i8042_controller_reset(bool force_reset)
{
i8042_flush();
@@ -1016,7 +1016,7 @@ static void i8042_controller_reset(void)
* Reset the controller if requested.
*/
- if (i8042_reset)
+ if (i8042_reset || force_reset)
i8042_controller_selftest();
/*
@@ -1139,9 +1139,9 @@ static int i8042_controller_resume(bool force_reset)
* upsetting it.
*/
-static int i8042_pm_reset(struct device *dev)
+static int i8042_pm_suspend(struct device *dev)
{
- i8042_controller_reset();
+ i8042_controller_reset(true);
return 0;
}
@@ -1163,13 +1163,20 @@ static int i8042_pm_thaw(struct device *dev)
return 0;
}
+static int i8042_pm_reset(struct device *dev)
+{
+ i8042_controller_reset(false);
+
+ return 0;
+}
+
static int i8042_pm_restore(struct device *dev)
{
return i8042_controller_resume(false);
}
static const struct dev_pm_ops i8042_pm_ops = {
- .suspend = i8042_pm_reset,
+ .suspend = i8042_pm_suspend,
.resume = i8042_pm_resume,
.thaw = i8042_pm_thaw,
.poweroff = i8042_pm_reset,
@@ -1185,7 +1192,7 @@ static const struct dev_pm_ops i8042_pm_ops = {
static void i8042_shutdown(struct platform_device *dev)
{
- i8042_controller_reset();
+ i8042_controller_reset(false);
}
static int __init i8042_create_kbd_port(void)
@@ -1424,7 +1431,7 @@ static int __init i8042_probe(struct platform_device *dev)
out_fail:
i8042_free_aux_ports(); /* in case KBD failed but AUX not */
i8042_free_irqs();
- i8042_controller_reset();
+ i8042_controller_reset(false);
i8042_platform_device = NULL;
return error;
@@ -1434,7 +1441,7 @@ static int __devexit i8042_remove(struct platform_device *dev)
{
i8042_unregister_ports();
i8042_free_irqs();
- i8042_controller_reset();
+ i8042_controller_reset(false);
i8042_platform_device = NULL;
return 0;
diff --git a/drivers/input/serio/rpckbd.c b/drivers/input/serio/rpckbd.c
index 7ec3c97..8b44ddc 100644
--- a/drivers/input/serio/rpckbd.c
+++ b/drivers/input/serio/rpckbd.c
@@ -143,16 +143,4 @@ static struct platform_driver rpckbd_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init rpckbd_init(void)
-{
- return platform_driver_register(&rpckbd_driver);
-}
-
-static void __exit rpckbd_exit(void)
-{
- platform_driver_unregister(&rpckbd_driver);
-}
-
-module_init(rpckbd_init);
-module_exit(rpckbd_exit);
+module_platform_driver(rpckbd_driver);
diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
index 4d4cd14..4494233 100644
--- a/drivers/input/serio/serio_raw.c
+++ b/drivers/input/serio/serio_raw.c
@@ -164,7 +164,8 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
struct serio_raw_client *client = file->private_data;
struct serio_raw *serio_raw = client->serio_raw;
char uninitialized_var(c);
- ssize_t retval = 0;
+ ssize_t read = 0;
+ int retval;
if (serio_raw->dead)
return -ENODEV;
@@ -180,13 +181,15 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
if (serio_raw->dead)
return -ENODEV;
- while (retval < count && serio_raw_fetch_byte(serio_raw, &c)) {
- if (put_user(c, buffer++))
- return -EFAULT;
- retval++;
+ while (read < count && serio_raw_fetch_byte(serio_raw, &c)) {
+ if (put_user(c, buffer++)) {
+ retval = -EFAULT;
+ break;
+ }
+ read++;
}
- return retval;
+ return read ?: retval;
}
static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
@@ -220,11 +223,11 @@ static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
goto out;
}
written++;
- };
+ }
out:
mutex_unlock(&serio_raw_mutex);
- return written;
+ return written ?: retval;
}
static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
@@ -237,9 +240,9 @@ static unsigned int serio_raw_poll(struct file *file, poll_table *wait)
mask = serio_raw->dead ? POLLHUP | POLLERR : POLLOUT | POLLWRNORM;
if (serio_raw->head != serio_raw->tail)
- return POLLIN | POLLRDNORM;
+ mask |= POLLIN | POLLRDNORM;
- return 0;
+ return mask;
}
static const struct file_operations serio_raw_fops = {
diff --git a/drivers/input/serio/xilinx_ps2.c b/drivers/input/serio/xilinx_ps2.c
index d64c5a4..d96d4c2 100644
--- a/drivers/input/serio/xilinx_ps2.c
+++ b/drivers/input/serio/xilinx_ps2.c
@@ -253,7 +253,7 @@ static int __devinit xps2_of_probe(struct platform_device *ofdev)
}
/* Get IRQ for the device */
- if (of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq) == NO_IRQ) {
+ if (!of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq)) {
dev_err(dev, "no IRQ found\n");
return -ENODEV;
}
@@ -369,19 +369,7 @@ static struct platform_driver xps2_of_driver = {
.probe = xps2_of_probe,
.remove = __devexit_p(xps2_of_remove),
};
-
-static int __init xps2_init(void)
-{
- return platform_driver_register(&xps2_of_driver);
-}
-
-static void __exit xps2_cleanup(void)
-{
- platform_driver_unregister(&xps2_of_driver);
-}
-
-module_init(xps2_init);
-module_exit(xps2_cleanup);
+module_platform_driver(xps2_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx XPS PS/2 driver");
diff --git a/drivers/input/tablet/Kconfig b/drivers/input/tablet/Kconfig
index 58a8775..e53f408 100644
--- a/drivers/input/tablet/Kconfig
+++ b/drivers/input/tablet/Kconfig
@@ -77,6 +77,8 @@ config TABLET_USB_WACOM
tristate "Wacom Intuos/Graphire tablet support (USB)"
depends on USB_ARCH_HAS_HCD
select USB
+ select NEW_LEDS
+ select LEDS_CLASS
help
Say Y here if you want to use the USB version of the Wacom Intuos
or Graphire tablet. Make sure to say Y to "Mouse support"
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index d94f7e9..f8b0b1d 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -269,19 +269,4 @@ static struct usb_driver usb_acecad_driver = {
.id_table = usb_acecad_id_table,
};
-static int __init usb_acecad_init(void)
-{
- int result = usb_register(&usb_acecad_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit usb_acecad_exit(void)
-{
- usb_deregister(&usb_acecad_driver);
-}
-
-module_init(usb_acecad_init);
-module_exit(usb_acecad_exit);
+module_usb_driver(usb_acecad_driver);
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c
index 6d89fd1..205d16a 100644
--- a/drivers/input/tablet/aiptek.c
+++ b/drivers/input/tablet/aiptek.c
@@ -1198,9 +1198,9 @@ static ssize_t
store_tabletXtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long x;
+ int x;
- if (strict_strtol(buf, 10, &x)) {
+ if (kstrtoint(buf, 10, &x)) {
size_t len = buf[count - 1] == '\n' ? count - 1 : count;
if (strncmp(buf, "disable", len))
@@ -1240,9 +1240,9 @@ static ssize_t
store_tabletYtilt(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long y;
+ int y;
- if (strict_strtol(buf, 10, &y)) {
+ if (kstrtoint(buf, 10, &y)) {
size_t len = buf[count - 1] == '\n' ? count - 1 : count;
if (strncmp(buf, "disable", len))
@@ -1277,12 +1277,13 @@ static ssize_t
store_tabletJitterDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long j;
+ int err, j;
- if (strict_strtol(buf, 10, &j))
- return -EINVAL;
+ err = kstrtoint(buf, 10, &j);
+ if (err)
+ return err;
- aiptek->newSetting.jitterDelay = (int)j;
+ aiptek->newSetting.jitterDelay = j;
return count;
}
@@ -1306,12 +1307,13 @@ static ssize_t
store_tabletProgrammableDelay(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long d;
+ int err, d;
- if (strict_strtol(buf, 10, &d))
- return -EINVAL;
+ err = kstrtoint(buf, 10, &d);
+ if (err)
+ return err;
- aiptek->newSetting.programmableDelay = (int)d;
+ aiptek->newSetting.programmableDelay = d;
return count;
}
@@ -1557,11 +1559,13 @@ static ssize_t
store_tabletWheel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct aiptek *aiptek = dev_get_drvdata(dev);
- long w;
+ int err, w;
- if (strict_strtol(buf, 10, &w)) return -EINVAL;
+ err = kstrtoint(buf, 10, &w);
+ if (err)
+ return err;
- aiptek->newSetting.wheel = (int)w;
+ aiptek->newSetting.wheel = w;
return count;
}
@@ -1919,21 +1923,7 @@ static struct usb_driver aiptek_driver = {
.id_table = aiptek_ids,
};
-static int __init aiptek_init(void)
-{
- int result = usb_register(&aiptek_driver);
- if (result == 0) {
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_AUTHOR "\n");
- }
- return result;
-}
-
-static void __exit aiptek_exit(void)
-{
- usb_deregister(&aiptek_driver);
-}
+module_usb_driver(aiptek_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
@@ -1943,6 +1933,3 @@ module_param(programmableDelay, int, 0);
MODULE_PARM_DESC(programmableDelay, "delay used during tablet programming");
module_param(jitterDelay, int, 0);
MODULE_PARM_DESC(jitterDelay, "stylus/mouse settlement delay");
-
-module_init(aiptek_init);
-module_exit(aiptek_exit);
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c
index 8ea6afe..89a2978 100644
--- a/drivers/input/tablet/gtco.c
+++ b/drivers/input/tablet/gtco.c
@@ -1022,33 +1022,7 @@ static struct usb_driver gtco_driverinfo_table = {
.disconnect = gtco_disconnect,
};
-/*
- * Register this module with the USB subsystem
- */
-static int __init gtco_init(void)
-{
- int error;
-
- error = usb_register(&gtco_driverinfo_table);
- if (error) {
- err("usb_register() failed rc=0x%x", error);
- return error;
- }
-
- printk("GTCO usb driver version: %s", GTCO_VERSION);
- return 0;
-}
-
-/*
- * Deregister this module with the USB subsystem
- */
-static void __exit gtco_exit(void)
-{
- usb_deregister(&gtco_driverinfo_table);
-}
-
-module_init(gtco_init);
-module_exit(gtco_exit);
+module_usb_driver(gtco_driverinfo_table);
MODULE_DESCRIPTION("GTCO digitizer USB driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index 6504b62..b2db3cf 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -432,15 +432,4 @@ static struct usb_driver hanwang_driver = {
.id_table = hanwang_ids,
};
-static int __init hanwang_init(void)
-{
- return usb_register(&hanwang_driver);
-}
-
-static void __exit hanwang_exit(void)
-{
- usb_deregister(&hanwang_driver);
-}
-
-module_init(hanwang_init);
-module_exit(hanwang_exit);
+module_usb_driver(hanwang_driver);
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index 290f4e5..85a5b40 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -198,22 +198,4 @@ static struct usb_driver kbtab_driver = {
.id_table = kbtab_ids,
};
-static int __init kbtab_init(void)
-{
- int retval;
- retval = usb_register(&kbtab_driver);
- if (retval)
- goto out;
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-out:
- return retval;
-}
-
-static void __exit kbtab_exit(void)
-{
- usb_deregister(&kbtab_driver);
-}
-
-module_init(kbtab_init);
-module_exit(kbtab_exit);
+module_usb_driver(kbtab_driver);
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index 1c1b7b4..2a97b7e 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -28,7 +28,9 @@
#define HID_USAGE_Y_TILT 0x3e
#define HID_USAGE_FINGER 0x22
#define HID_USAGE_STYLUS 0x20
-#define HID_COLLECTION 0xc0
+#define HID_COLLECTION 0xa1
+#define HID_COLLECTION_LOGICAL 0x02
+#define HID_COLLECTION_END 0xc0
enum {
WCM_UNDEFINED = 0,
@@ -66,7 +68,8 @@ static int wacom_get_report(struct usb_interface *intf, u8 type, u8 id,
do {
retval = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
USB_REQ_GET_REPORT,
- USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+ USB_DIR_IN | USB_TYPE_CLASS |
+ USB_RECIP_INTERFACE,
(type << 8) + id,
intf->altsetting[0].desc.bInterfaceNumber,
buf, size, 100);
@@ -164,7 +167,70 @@ static void wacom_close(struct input_dev *dev)
usb_autopm_put_interface(wacom->intf);
}
-static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hid_desc,
+static int wacom_parse_logical_collection(unsigned char *report,
+ struct wacom_features *features)
+{
+ int length = 0;
+
+ if (features->type == BAMBOO_PT) {
+
+ /* Logical collection is only used by 3rd gen Bamboo Touch */
+ features->pktlen = WACOM_PKGLEN_BBTOUCH3;
+ features->device_type = BTN_TOOL_DOUBLETAP;
+
+ /*
+ * Stylus and Touch have same active area
+ * so compute physical size based on stylus
+ * data before its overwritten.
+ */
+ features->x_phy =
+ (features->x_max * features->x_resolution) / 100;
+ features->y_phy =
+ (features->y_max * features->y_resolution) / 100;
+
+ features->x_max = features->y_max =
+ get_unaligned_le16(&report[10]);
+
+ length = 11;
+ }
+ return length;
+}
+
+/*
+ * Interface Descriptor of wacom devices can be incomplete and
+ * inconsistent so wacom_features table is used to store stylus
+ * device's packet lengths, various maximum values, and tablet
+ * resolution based on product ID's.
+ *
+ * For devices that contain 2 interfaces, wacom_features table is
+ * inaccurate for the touch interface. Since the Interface Descriptor
+ * for touch interfaces has pretty complete data, this function exists
+ * to query tablet for this missing information instead of hard coding in
+ * an additional table.
+ *
+ * A typical Interface Descriptor for a stylus will contain a
+ * boot mouse application collection that is not of interest and this
+ * function will ignore it.
+ *
+ * It also contains a digitizer application collection that also is not
+ * of interest since any information it contains would be duplicate
+ * of what is in wacom_features. Usually it defines a report of an array
+ * of bytes that could be used as max length of the stylus packet returned.
+ * If it happens to define a Digitizer-Stylus Physical Collection then
+ * the X and Y logical values contain valid data but it is ignored.
+ *
+ * A typical Interface Descriptor for a touch interface will contain a
+ * Digitizer-Finger Physical Collection which will define both logical
+ * X/Y maximum as well as the physical size of tablet. Since touch
+ * interfaces haven't supported pressure or distance, this is enough
+ * information to override invalid values in the wacom_features table.
+ *
+ * 3rd gen Bamboo Touch no longer define a Digitizer-Finger Pysical
+ * Collection. Instead they define a Logical Collection with a single
+ * Logical Maximum for both X and Y.
+ */
+static int wacom_parse_hid(struct usb_interface *intf,
+ struct hid_descriptor *hid_desc,
struct wacom_features *features)
{
struct usb_device *dev = interface_to_usbdev(intf);
@@ -244,8 +310,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- if (features->type == BAMBOO_PT)
- features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->x_max =
get_unaligned_le16(&report[i + 3]);
@@ -287,8 +351,6 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
/* penabled only accepts exact bytes of data */
if (features->type == TABLETPC2FG)
features->pktlen = WACOM_PKGLEN_GRAPHIRE;
- if (features->type == BAMBOO_PT)
- features->pktlen = WACOM_PKGLEN_BBFUN;
features->device_type = BTN_TOOL_PEN;
features->y_max =
get_unaligned_le16(&report[i + 3]);
@@ -302,6 +364,11 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
i++;
break;
+ /*
+ * Requiring Stylus Usage will ignore boot mouse
+ * X/Y values and some cases of invalid Digitizer X/Y
+ * values commonly reported.
+ */
case HID_USAGE_STYLUS:
pen = 1;
i++;
@@ -309,10 +376,20 @@ static int wacom_parse_hid(struct usb_interface *intf, struct hid_descriptor *hi
}
break;
- case HID_COLLECTION:
+ case HID_COLLECTION_END:
/* reset UsagePage and Finger */
finger = usage = 0;
break;
+
+ case HID_COLLECTION:
+ i++;
+ switch (report[i]) {
+ case HID_COLLECTION_LOGICAL:
+ i += wacom_parse_logical_collection(&report[i],
+ features);
+ break;
+ }
+ break;
}
}
@@ -348,7 +425,8 @@ static int wacom_query_tablet_data(struct usb_interface *intf, struct wacom_feat
WAC_HID_FEATURE_REPORT,
report_id, rep_data, 4, 1);
} while ((error < 0 || rep_data[1] != 4) && limit++ < WAC_MSG_RETRIES);
- } else if (features->type != TABLETPC) {
+ } else if (features->type != TABLETPC &&
+ features->device_type == BTN_TOOL_PEN) {
do {
rep_data[0] = 2;
rep_data[1] = 2;
@@ -485,7 +563,8 @@ static int wacom_led_control(struct wacom *wacom)
if (!buf)
return -ENOMEM;
- if (wacom->wacom_wac.features.type == WACOM_21UX2)
+ if (wacom->wacom_wac.features.type == WACOM_21UX2 ||
+ wacom->wacom_wac.features.type == WACOM_24HD)
led = (wacom->led.select[1] << 4) | 0x40;
led |= wacom->led.select[0] | 0x4;
@@ -704,6 +783,7 @@ static int wacom_initialize_leds(struct wacom *wacom)
&intuos4_led_attr_group);
break;
+ case WACOM_24HD:
case WACOM_21UX2:
wacom->led.select[0] = 0;
wacom->led.select[1] = 0;
@@ -738,6 +818,7 @@ static void wacom_destroy_leds(struct wacom *wacom)
&intuos4_led_attr_group);
break;
+ case WACOM_24HD:
case WACOM_21UX2:
sysfs_remove_group(&wacom->intf->dev.kobj,
&cintiq_led_attr_group);
@@ -919,21 +1000,4 @@ static struct usb_driver wacom_driver = {
.supports_autosuspend = 1,
};
-static int __init wacom_init(void)
-{
- int result;
-
- result = usb_register(&wacom_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit wacom_exit(void)
-{
- usb_deregister(&wacom_driver);
-}
-
-module_init(wacom_init);
-module_exit(wacom_exit);
+module_usb_driver(wacom_driver);
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 2ee47d0..cd3ed29 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -452,7 +452,7 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
if ((data[1] & 0xb8) == 0xa0) {
t = (data[6] << 2) | ((data[7] >> 6) & 3);
if ((features->type >= INTUOS4S && features->type <= INTUOS4L) ||
- features->type == WACOM_21UX2) {
+ features->type == WACOM_21UX2 || features->type == WACOM_24HD) {
t = (t << 1) | (data[1] & 1);
}
input_report_abs(input, ABS_PRESSURE, t);
@@ -519,6 +519,56 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
input_report_key(input, wacom->tool[1], 0);
input_report_abs(input, ABS_MISC, 0);
}
+ } else if (features->type == WACOM_24HD) {
+ input_report_key(input, BTN_0, (data[6] & 0x01));
+ input_report_key(input, BTN_1, (data[6] & 0x02));
+ input_report_key(input, BTN_2, (data[6] & 0x04));
+ input_report_key(input, BTN_3, (data[6] & 0x08));
+ input_report_key(input, BTN_4, (data[6] & 0x10));
+ input_report_key(input, BTN_5, (data[6] & 0x20));
+ input_report_key(input, BTN_6, (data[6] & 0x40));
+ input_report_key(input, BTN_7, (data[6] & 0x80));
+ input_report_key(input, BTN_8, (data[8] & 0x01));
+ input_report_key(input, BTN_9, (data[8] & 0x02));
+ input_report_key(input, BTN_A, (data[8] & 0x04));
+ input_report_key(input, BTN_B, (data[8] & 0x08));
+ input_report_key(input, BTN_C, (data[8] & 0x10));
+ input_report_key(input, BTN_X, (data[8] & 0x20));
+ input_report_key(input, BTN_Y, (data[8] & 0x40));
+ input_report_key(input, BTN_Z, (data[8] & 0x80));
+
+ /*
+ * Three "buttons" are available on the 24HD which are
+ * physically implemented as a touchstrip. Each button
+ * is approximately 3 bits wide with a 2 bit spacing.
+ * The raw touchstrip bits are stored at:
+ * ((data[3] & 0x1f) << 8) | data[4])
+ */
+ input_report_key(input, KEY_PROG1, data[4] & 0x07);
+ input_report_key(input, KEY_PROG2, data[4] & 0xE0);
+ input_report_key(input, KEY_PROG3, data[3] & 0x1C);
+
+ if (data[1] & 0x80) {
+ input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f));
+ } else {
+ /* Out of proximity, clear wheel value. */
+ input_report_abs(input, ABS_WHEEL, 0);
+ }
+
+ if (data[2] & 0x80) {
+ input_report_abs(input, ABS_THROTTLE, (data[2] & 0x7f));
+ } else {
+ /* Out of proximity, clear second wheel value. */
+ input_report_abs(input, ABS_THROTTLE, 0);
+ }
+
+ if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
+ input_report_key(input, wacom->tool[1], 1);
+ input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+ } else {
+ input_report_key(input, wacom->tool[1], 0);
+ input_report_abs(input, ABS_MISC, 0);
+ }
} else {
if (features->type == WACOM_21UX2) {
input_report_key(input, BTN_0, (data[5] & 0x01));
@@ -799,6 +849,9 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
unsigned char *data = wacom->data;
int i;
+ if (data[0] != 0x02)
+ return 0;
+
for (i = 0; i < 2; i++) {
int offset = (data[1] & 0x80) ? (8 * i) : (9 * i);
bool touch = data[offset + 3] & 0x80;
@@ -837,18 +890,77 @@ static int wacom_bpt_touch(struct wacom_wac *wacom)
return 0;
}
+static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+ struct input_dev *input = wacom->input;
+ int slot_id = data[0] - 2; /* data[0] is between 2 and 17 */
+ bool touch = data[1] & 0x80;
+
+ touch = touch && !wacom->shared->stylus_in_proximity;
+
+ input_mt_slot(input, slot_id);
+ input_mt_report_slot_state(input, MT_TOOL_FINGER, touch);
+
+ if (touch) {
+ int x = (data[2] << 4) | (data[4] >> 4);
+ int y = (data[3] << 4) | (data[4] & 0x0f);
+ int w = data[6];
+
+ input_report_abs(input, ABS_MT_POSITION_X, x);
+ input_report_abs(input, ABS_MT_POSITION_Y, y);
+ input_report_abs(input, ABS_MT_TOUCH_MAJOR, w);
+ }
+}
+
+static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data)
+{
+ struct input_dev *input = wacom->input;
+
+ input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0);
+ input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0);
+ input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0);
+ input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0);
+}
+
+static int wacom_bpt3_touch(struct wacom_wac *wacom)
+{
+ struct input_dev *input = wacom->input;
+ unsigned char *data = wacom->data;
+ int count = data[1] & 0x07;
+ int i;
+
+ if (data[0] != 0x02)
+ return 0;
+
+ /* data has up to 7 fixed sized 8-byte messages starting at data[2] */
+ for (i = 0; i < count; i++) {
+ int offset = (8 * i) + 2;
+ int msg_id = data[offset];
+
+ if (msg_id >= 2 && msg_id <= 17)
+ wacom_bpt3_touch_msg(wacom, data + offset);
+ else if (msg_id == 128)
+ wacom_bpt3_button_msg(wacom, data + offset);
+
+ }
+
+ input_mt_report_pointer_emulation(input, true);
+
+ input_sync(input);
+
+ return 0;
+}
+
static int wacom_bpt_pen(struct wacom_wac *wacom)
{
struct input_dev *input = wacom->input;
unsigned char *data = wacom->data;
int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0;
- /*
- * Similar to Graphire protocol, data[1] & 0x20 is proximity and
- * data[1] & 0x18 is tool ID. 0x30 is safety check to ignore
- * 2 unused tool ID's.
- */
- prox = (data[1] & 0x30) == 0x30;
+ if (data[0] != 0x02)
+ return 0;
+
+ prox = (data[1] & 0x20) == 0x20;
/*
* All reports shared between PEN and RUBBER tool must be
@@ -912,7 +1024,9 @@ static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len)
{
if (len == WACOM_PKGLEN_BBTOUCH)
return wacom_bpt_touch(wacom);
- else if (len == WACOM_PKGLEN_BBFUN)
+ else if (len == WACOM_PKGLEN_BBTOUCH3)
+ return wacom_bpt3_touch(wacom);
+ else if (len == WACOM_PKGLEN_BBFUN || len == WACOM_PKGLEN_BBPEN)
return wacom_bpt_pen(wacom);
return 0;
@@ -955,6 +1069,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
case CINTIQ:
case WACOM_BEE:
case WACOM_21UX2:
+ case WACOM_24HD:
sync = wacom_intuos_irq(wacom_wac);
break;
@@ -1031,9 +1146,9 @@ void wacom_setup_device_quirks(struct wacom_features *features)
features->type == BAMBOO_PT)
features->quirks |= WACOM_QUIRK_MULTI_INPUT;
- /* quirks for bamboo touch */
+ /* quirk for bamboo touch with 2 low res touches */
if (features->type == BAMBOO_PT &&
- features->device_type == BTN_TOOL_DOUBLETAP) {
+ features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->x_max <<= 5;
features->y_max <<= 5;
features->x_fuzz <<= 5;
@@ -1110,6 +1225,26 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(INPUT_PROP_POINTER, input_dev->propbit);
break;
+ case WACOM_24HD:
+ __set_bit(BTN_A, input_dev->keybit);
+ __set_bit(BTN_B, input_dev->keybit);
+ __set_bit(BTN_C, input_dev->keybit);
+ __set_bit(BTN_X, input_dev->keybit);
+ __set_bit(BTN_Y, input_dev->keybit);
+ __set_bit(BTN_Z, input_dev->keybit);
+
+ for (i = 0; i < 10; i++)
+ __set_bit(BTN_0 + i, input_dev->keybit);
+
+ __set_bit(KEY_PROG1, input_dev->keybit);
+ __set_bit(KEY_PROG2, input_dev->keybit);
+ __set_bit(KEY_PROG3, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
+ input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
+ wacom_setup_cintiq(wacom_wac);
+ break;
+
case WACOM_21UX2:
__set_bit(BTN_A, input_dev->keybit);
__set_bit(BTN_B, input_dev->keybit);
@@ -1240,7 +1375,21 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev,
__set_bit(BTN_TOOL_FINGER, input_dev->keybit);
__set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
- input_mt_init_slots(input_dev, 2);
+ if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
+ __set_bit(BTN_TOOL_TRIPLETAP,
+ input_dev->keybit);
+ __set_bit(BTN_TOOL_QUADTAP,
+ input_dev->keybit);
+
+ input_mt_init_slots(input_dev, 16);
+
+ input_set_abs_params(input_dev,
+ ABS_MT_TOUCH_MAJOR,
+ 0, 255, 0, 0);
+ } else {
+ input_mt_init_slots(input_dev, 2);
+ }
+
input_set_abs_params(input_dev, ABS_MT_POSITION_X,
0, features->x_max,
features->x_fuzz, 0);
@@ -1425,6 +1574,9 @@ static const struct wacom_features wacom_features_0xBB =
static const struct wacom_features wacom_features_0xBC =
{ "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+static const struct wacom_features wacom_features_0xF4 =
+ { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047,
+ 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x3F =
{ "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023,
63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
@@ -1509,6 +1661,15 @@ static const struct wacom_features wacom_features_0xDA =
static struct wacom_features wacom_features_0xDB =
{ "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023,
31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDD =
+ { "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDE =
+ { "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
+static const struct wacom_features wacom_features_0xDF =
+ { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023,
+ 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x6004 =
{ "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1604,6 +1765,9 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xD8) },
{ USB_DEVICE_WACOM(0xDA) },
{ USB_DEVICE_WACOM(0xDB) },
+ { USB_DEVICE_WACOM(0xDD) },
+ { USB_DEVICE_WACOM(0xDE) },
+ { USB_DEVICE_WACOM(0xDF) },
{ USB_DEVICE_WACOM(0xF0) },
{ USB_DEVICE_WACOM(0xCC) },
{ USB_DEVICE_WACOM(0x90) },
@@ -1616,6 +1780,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE6) },
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
+ { USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
};
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 53eb71b..050acae 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -12,7 +12,7 @@
#include <linux/types.h>
/* maximum packet length for USB devices */
-#define WACOM_PKGLEN_MAX 32
+#define WACOM_PKGLEN_MAX 64
/* packet length for individual models */
#define WACOM_PKGLEN_PENPRTN 7
@@ -22,6 +22,8 @@
#define WACOM_PKGLEN_TPC1FG 5
#define WACOM_PKGLEN_TPC2FG 14
#define WACOM_PKGLEN_BBTOUCH 20
+#define WACOM_PKGLEN_BBTOUCH3 64
+#define WACOM_PKGLEN_BBPEN 10
/* device IDs */
#define STYLUS_DEVICE_ID 0x02
@@ -57,6 +59,7 @@ enum {
INTUOS4S,
INTUOS4,
INTUOS4L,
+ WACOM_24HD,
WACOM_21UX2,
CINTIQ,
WACOM_BEE,
diff --git a/drivers/input/touchscreen/88pm860x-ts.c b/drivers/input/touchscreen/88pm860x-ts.c
index b3aebc2..05f30b7 100644
--- a/drivers/input/touchscreen/88pm860x-ts.c
+++ b/drivers/input/touchscreen/88pm860x-ts.c
@@ -217,18 +217,7 @@ static struct platform_driver pm860x_touch_driver = {
.probe = pm860x_touch_probe,
.remove = __devexit_p(pm860x_touch_remove),
};
-
-static int __init pm860x_touch_init(void)
-{
- return platform_driver_register(&pm860x_touch_driver);
-}
-module_init(pm860x_touch_init);
-
-static void __exit pm860x_touch_exit(void)
-{
- platform_driver_unregister(&pm860x_touch_driver);
-}
-module_exit(pm860x_touch_exit);
+module_platform_driver(pm860x_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Marvell Semiconductor 88PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index 3488ffe..4af2a18 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -98,6 +98,19 @@ config TOUCHSCREEN_ATMEL_MXT
To compile this driver as a module, choose M here: the
module will be called atmel_mxt_ts.
+config TOUCHSCREEN_AUO_PIXCIR
+ tristate "AUO in-cell touchscreen using Pixcir ICs"
+ depends on I2C
+ depends on GPIOLIB
+ help
+ Say Y here if you have a AUO display with in-cell touchscreen
+ using Pixcir ICs.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called auo-pixcir-ts.
+
config TOUCHSCREEN_BITSY
tristate "Compaq iPAQ H3600 (Bitsy) touchscreen"
depends on SA1100_BITSY
@@ -177,6 +190,16 @@ config TOUCHSCREEN_EETI
To compile this driver as a module, choose M here: the
module will be called eeti_ts.
+config TOUCHSCREEN_EGALAX
+ tristate "EETI eGalax multi-touch panel support"
+ depends on I2C
+ help
+ Say Y here to enable support for I2C connected EETI
+ eGalax multi-touch panels.
+
+ To compile this driver as a module, choose M here: the
+ module will be called egalax_ts.
+
config TOUCHSCREEN_FUJITSU
tristate "Fujitsu serial touchscreen"
select SERIO
@@ -435,6 +458,18 @@ config TOUCHSCREEN_UCB1400
To compile this driver as a module, choose M here: the
module will be called ucb1400_ts.
+config TOUCHSCREEN_PIXCIR
+ tristate "PIXCIR I2C touchscreens"
+ depends on I2C
+ help
+ Say Y here if you have a pixcir i2c touchscreen
+ controller.
+
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called pixcir_i2c_ts.
+
config TOUCHSCREEN_WM831X
tristate "Support for WM831x touchscreen controllers"
depends on MFD_WM831X
@@ -541,6 +576,7 @@ config TOUCHSCREEN_USB_COMPOSITE
- GoTop Super_Q2/GogoPen/PenPower tablets
- JASTEC USB Touch Controller/DigiTech DTR-02U
- Zytronic controllers
+ - Elo TouchSystems 2700 IntelliTouch
Have a look at <http://linux.chapter7.ch/touchkit/> for
a usage description and the required user-space stuff.
@@ -620,6 +656,11 @@ config TOUCHSCREEN_USB_JASTEC
bool "JASTEC/DigiTech DTR-02U USB touch controller device support" if EXPERT
depends on TOUCHSCREEN_USB_COMPOSITE
+config TOUCHSCREEN_USB_ELO
+ default y
+ bool "Elo TouchSystems 2700 IntelliTouch controller device support" if EXPERT
+ depends on TOUCHSCREEN_USB_COMPOSITE
+
config TOUCHSCREEN_USB_E2I
default y
bool "e2i Touchscreen controller (e.g. from Mimo 740)"
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index f957676..496091e 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_TOUCHSCREEN_AD7879_SPI) += ad7879-spi.o
obj-$(CONFIG_TOUCHSCREEN_ADS7846) += ads7846.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_MXT) += atmel_mxt_ts.o
obj-$(CONFIG_TOUCHSCREEN_ATMEL_TSADCC) += atmel_tsadcc.o
+obj-$(CONFIG_TOUCHSCREEN_AUO_PIXCIR) += auo-pixcir-ts.o
obj-$(CONFIG_TOUCHSCREEN_BITSY) += h3600_ts_input.o
obj-$(CONFIG_TOUCHSCREEN_BU21013) += bu21013_ts.o
obj-$(CONFIG_TOUCHSCREEN_CY8CTMG110) += cy8ctmg110_ts.o
@@ -23,6 +24,7 @@ obj-$(CONFIG_TOUCHSCREEN_HAMPSHIRE) += hampshire.o
obj-$(CONFIG_TOUCHSCREEN_GUNZE) += gunze.o
obj-$(CONFIG_TOUCHSCREEN_EETI) += eeti_ts.o
obj-$(CONFIG_TOUCHSCREEN_ELO) += elo.o
+obj-$(CONFIG_TOUCHSCREEN_EGALAX) += egalax_ts.o
obj-$(CONFIG_TOUCHSCREEN_FUJITSU) += fujitsu_ts.o
obj-$(CONFIG_TOUCHSCREEN_INEXIO) += inexio.o
obj-$(CONFIG_TOUCHSCREEN_INTEL_MID) += intel-mid-touch.o
@@ -39,6 +41,7 @@ obj-$(CONFIG_TOUCHSCREEN_HTCPEN) += htcpen.o
obj-$(CONFIG_TOUCHSCREEN_USB_COMPOSITE) += usbtouchscreen.o
obj-$(CONFIG_TOUCHSCREEN_PCAP) += pcap_ts.o
obj-$(CONFIG_TOUCHSCREEN_PENMOUNT) += penmount.o
+obj-$(CONFIG_TOUCHSCREEN_PIXCIR) += pixcir_i2c_ts.o
obj-$(CONFIG_TOUCHSCREEN_S3C2410) += s3c2410_ts.o
obj-$(CONFIG_TOUCHSCREEN_ST1232) += st1232.o
obj-$(CONFIG_TOUCHSCREEN_STMPE) += stmpe-ts.o
diff --git a/drivers/input/touchscreen/ad7877.c b/drivers/input/touchscreen/ad7877.c
index 400131d..49a36df 100644
--- a/drivers/input/touchscreen/ad7877.c
+++ b/drivers/input/touchscreen/ad7877.c
@@ -488,10 +488,10 @@ static ssize_t ad7877_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -518,10 +518,10 @@ static ssize_t ad7877_dac_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -548,10 +548,10 @@ static ssize_t ad7877_gpio3_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -579,10 +579,10 @@ static ssize_t ad7877_gpio4_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7877 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
@@ -612,10 +612,10 @@ static struct attribute *ad7877_attributes[] = {
NULL
};
-static mode_t ad7877_attr_is_visible(struct kobject *kobj,
+static umode_t ad7877_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (attr == &dev_attr_aux3.attr) {
if (gpio3)
@@ -853,7 +853,6 @@ static SIMPLE_DEV_PM_OPS(ad7877_pm, ad7877_suspend, ad7877_resume);
static struct spi_driver ad7877_driver = {
.driver = {
.name = "ad7877",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &ad7877_pm,
},
diff --git a/drivers/input/touchscreen/ad7879-i2c.c b/drivers/input/touchscreen/ad7879-i2c.c
index c789b97..0dac671 100644
--- a/drivers/input/touchscreen/ad7879-i2c.c
+++ b/drivers/input/touchscreen/ad7879-i2c.c
@@ -16,30 +16,6 @@
#define AD7879_DEVID 0x79 /* AD7879-1/AD7889-1 */
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_i2c_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ad7879 *ts = i2c_get_clientdata(client);
-
- ad7879_suspend(ts);
-
- return 0;
-}
-
-static int ad7879_i2c_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct ad7879 *ts = i2c_get_clientdata(client);
-
- ad7879_resume(ts);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_i2c_pm, ad7879_i2c_suspend, ad7879_i2c_resume);
-
/* All registers are word-sized.
* AD7879 uses a high-byte first convention.
*/
@@ -47,7 +23,7 @@ static int ad7879_i2c_read(struct device *dev, u8 reg)
{
struct i2c_client *client = to_i2c_client(dev);
- return swab16(i2c_smbus_read_word_data(client, reg));
+ return i2c_smbus_read_word_swapped(client, reg);
}
static int ad7879_i2c_multi_read(struct device *dev,
@@ -68,7 +44,7 @@ static int ad7879_i2c_write(struct device *dev, u8 reg, u16 val)
{
struct i2c_client *client = to_i2c_client(dev);
- return i2c_smbus_write_word_data(client, reg, swab16(val));
+ return i2c_smbus_write_word_swapped(client, reg, val);
}
static const struct ad7879_bus_ops ad7879_i2c_bus_ops = {
@@ -119,7 +95,7 @@ static struct i2c_driver ad7879_i2c_driver = {
.driver = {
.name = "ad7879",
.owner = THIS_MODULE,
- .pm = &ad7879_i2c_pm,
+ .pm = &ad7879_pm_ops,
},
.probe = ad7879_i2c_probe,
.remove = __devexit_p(ad7879_i2c_remove),
@@ -141,4 +117,3 @@ module_exit(ad7879_i2c_exit);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("AD7879(-1) touchscreen I2C bus driver");
MODULE_LICENSE("GPL");
-MODULE_ALIAS("i2c:ad7879");
diff --git a/drivers/input/touchscreen/ad7879-spi.c b/drivers/input/touchscreen/ad7879-spi.c
index b1643c8..9b2e1c2 100644
--- a/drivers/input/touchscreen/ad7879-spi.c
+++ b/drivers/input/touchscreen/ad7879-spi.c
@@ -22,30 +22,6 @@
#define AD7879_WRITECMD(reg) (AD7879_CMD(reg))
#define AD7879_READCMD(reg) (AD7879_CMD(reg) | AD7879_CMD_READ)
-#ifdef CONFIG_PM_SLEEP
-static int ad7879_spi_suspend(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct ad7879 *ts = spi_get_drvdata(spi);
-
- ad7879_suspend(ts);
-
- return 0;
-}
-
-static int ad7879_spi_resume(struct device *dev)
-{
- struct spi_device *spi = to_spi_device(dev);
- struct ad7879 *ts = spi_get_drvdata(spi);
-
- ad7879_resume(ts);
-
- return 0;
-}
-#endif
-
-static SIMPLE_DEV_PM_OPS(ad7879_spi_pm, ad7879_spi_suspend, ad7879_spi_resume);
-
/*
* ad7879_read/write are only used for initial setup and for sysfs controls.
* The main traffic is done in ad7879_collect().
@@ -174,9 +150,8 @@ static int __devexit ad7879_spi_remove(struct spi_device *spi)
static struct spi_driver ad7879_spi_driver = {
.driver = {
.name = "ad7879",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
- .pm = &ad7879_spi_pm,
+ .pm = &ad7879_pm_ops,
},
.probe = ad7879_spi_probe,
.remove = __devexit_p(ad7879_spi_remove),
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index 3b2e9ed..e2482b4 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -281,8 +281,11 @@ static void ad7879_close(struct input_dev* input)
__ad7879_disable(ts);
}
-void ad7879_suspend(struct ad7879 *ts)
+#ifdef CONFIG_PM_SLEEP
+static int ad7879_suspend(struct device *dev)
{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
mutex_lock(&ts->input->mutex);
if (!ts->suspended && !ts->disabled && ts->input->users)
@@ -291,11 +294,14 @@ void ad7879_suspend(struct ad7879 *ts)
ts->suspended = true;
mutex_unlock(&ts->input->mutex);
+
+ return 0;
}
-EXPORT_SYMBOL(ad7879_suspend);
-void ad7879_resume(struct ad7879 *ts)
+static int ad7879_resume(struct device *dev)
{
+ struct ad7879 *ts = dev_get_drvdata(dev);
+
mutex_lock(&ts->input->mutex);
if (ts->suspended && !ts->disabled && ts->input->users)
@@ -304,8 +310,13 @@ void ad7879_resume(struct ad7879 *ts)
ts->suspended = false;
mutex_unlock(&ts->input->mutex);
+
+ return 0;
}
-EXPORT_SYMBOL(ad7879_resume);
+#endif
+
+SIMPLE_DEV_PM_OPS(ad7879_pm_ops, ad7879_suspend, ad7879_resume);
+EXPORT_SYMBOL(ad7879_pm_ops);
static void ad7879_toggle(struct ad7879 *ts, bool disable)
{
@@ -340,10 +351,10 @@ static ssize_t ad7879_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ad7879 *ts = dev_get_drvdata(dev);
- unsigned long val;
+ unsigned int val;
int error;
- error = strict_strtoul(buf, 10, &val);
+ error = kstrtouint(buf, 10, &val);
if (error)
return error;
diff --git a/drivers/input/touchscreen/ad7879.h b/drivers/input/touchscreen/ad7879.h
index 6b45a27..6fd13c4 100644
--- a/drivers/input/touchscreen/ad7879.h
+++ b/drivers/input/touchscreen/ad7879.h
@@ -21,8 +21,8 @@ struct ad7879_bus_ops {
int (*write)(struct device *dev, u8 reg, u16 val);
};
-void ad7879_suspend(struct ad7879 *);
-void ad7879_resume(struct ad7879 *);
+extern const struct dev_pm_ops ad7879_pm_ops;
+
struct ad7879 *ad7879_probe(struct device *dev, u8 devid, unsigned irq,
const struct ad7879_bus_ops *bops);
void ad7879_remove(struct ad7879 *);
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index de31ec6..23fd901 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -602,10 +602,12 @@ static ssize_t ads7846_disable_store(struct device *dev,
const char *buf, size_t count)
{
struct ads7846 *ts = dev_get_drvdata(dev);
- unsigned long i;
+ unsigned int i;
+ int err;
- if (strict_strtoul(buf, 10, &i))
- return -EINVAL;
+ err = kstrtouint(buf, 10, &i);
+ if (err)
+ return err;
if (i)
ads7846_disable(ts);
@@ -1424,7 +1426,6 @@ static int __devexit ads7846_remove(struct spi_device *spi)
static struct spi_driver ads7846_driver = {
.driver = {
.name = "ads7846",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &ads7846_pm,
},
diff --git a/drivers/input/touchscreen/atmel_tsadcc.c b/drivers/input/touchscreen/atmel_tsadcc.c
index 122a878..201b2d2 100644
--- a/drivers/input/touchscreen/atmel_tsadcc.c
+++ b/drivers/input/touchscreen/atmel_tsadcc.c
@@ -351,20 +351,7 @@ static struct platform_driver atmel_tsadcc_driver = {
.name = "atmel_tsadcc",
},
};
-
-static int __init atmel_tsadcc_init(void)
-{
- return platform_driver_register(&atmel_tsadcc_driver);
-}
-
-static void __exit atmel_tsadcc_exit(void)
-{
- platform_driver_unregister(&atmel_tsadcc_driver);
-}
-
-module_init(atmel_tsadcc_init);
-module_exit(atmel_tsadcc_exit);
-
+module_platform_driver(atmel_tsadcc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Atmel TouchScreen Driver");
diff --git a/drivers/input/touchscreen/auo-pixcir-ts.c b/drivers/input/touchscreen/auo-pixcir-ts.c
new file mode 100644
index 0000000..94fb9fbb
--- /dev/null
+++ b/drivers/input/touchscreen/auo-pixcir-ts.c
@@ -0,0 +1,652 @@
+/*
+ * Driver for AUO in-cell touchscreens
+ *
+ * Copyright (c) 2011 Heiko Stuebner <heiko@sntech.de>
+ *
+ * loosely based on auo_touch.c from Dell Streak vendor-kernel
+ *
+ * Copyright (c) 2008 QUALCOMM Incorporated.
+ * Copyright (c) 2008 QUALCOMM USA, INC.
+ *
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/input.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/input/auo-pixcir-ts.h>
+
+/*
+ * Coordinate calculation:
+ * X1 = X1_LSB + X1_MSB*256
+ * Y1 = Y1_LSB + Y1_MSB*256
+ * X2 = X2_LSB + X2_MSB*256
+ * Y2 = Y2_LSB + Y2_MSB*256
+ */
+#define AUO_PIXCIR_REG_X1_LSB 0x00
+#define AUO_PIXCIR_REG_X1_MSB 0x01
+#define AUO_PIXCIR_REG_Y1_LSB 0x02
+#define AUO_PIXCIR_REG_Y1_MSB 0x03
+#define AUO_PIXCIR_REG_X2_LSB 0x04
+#define AUO_PIXCIR_REG_X2_MSB 0x05
+#define AUO_PIXCIR_REG_Y2_LSB 0x06
+#define AUO_PIXCIR_REG_Y2_MSB 0x07
+
+#define AUO_PIXCIR_REG_STRENGTH 0x0d
+#define AUO_PIXCIR_REG_STRENGTH_X1_LSB 0x0e
+#define AUO_PIXCIR_REG_STRENGTH_X1_MSB 0x0f
+
+#define AUO_PIXCIR_REG_RAW_DATA_X 0x2b
+#define AUO_PIXCIR_REG_RAW_DATA_Y 0x4f
+
+#define AUO_PIXCIR_REG_X_SENSITIVITY 0x6f
+#define AUO_PIXCIR_REG_Y_SENSITIVITY 0x70
+#define AUO_PIXCIR_REG_INT_SETTING 0x71
+#define AUO_PIXCIR_REG_INT_WIDTH 0x72
+#define AUO_PIXCIR_REG_POWER_MODE 0x73
+
+#define AUO_PIXCIR_REG_VERSION 0x77
+#define AUO_PIXCIR_REG_CALIBRATE 0x78
+
+#define AUO_PIXCIR_REG_TOUCHAREA_X1 0x1e
+#define AUO_PIXCIR_REG_TOUCHAREA_Y1 0x1f
+#define AUO_PIXCIR_REG_TOUCHAREA_X2 0x20
+#define AUO_PIXCIR_REG_TOUCHAREA_Y2 0x21
+
+#define AUO_PIXCIR_REG_EEPROM_CALIB_X 0x42
+#define AUO_PIXCIR_REG_EEPROM_CALIB_Y 0xad
+
+#define AUO_PIXCIR_INT_TPNUM_MASK 0xe0
+#define AUO_PIXCIR_INT_TPNUM_SHIFT 5
+#define AUO_PIXCIR_INT_RELEASE (1 << 4)
+#define AUO_PIXCIR_INT_ENABLE (1 << 3)
+#define AUO_PIXCIR_INT_POL_HIGH (1 << 2)
+#define AUO_PIXCIR_INT_MODE_MASK 0x03
+
+/*
+ * Power modes:
+ * active: scan speed 60Hz
+ * sleep: scan speed 10Hz can be auto-activated, wakeup on 1st touch
+ * deep sleep: scan speed 1Hz can only be entered or left manually.
+ */
+#define AUO_PIXCIR_POWER_ACTIVE 0x00
+#define AUO_PIXCIR_POWER_SLEEP 0x01
+#define AUO_PIXCIR_POWER_DEEP_SLEEP 0x02
+#define AUO_PIXCIR_POWER_MASK 0x03
+
+#define AUO_PIXCIR_POWER_ALLOW_SLEEP (1 << 2)
+#define AUO_PIXCIR_POWER_IDLE_TIME(ms) ((ms & 0xf) << 4)
+
+#define AUO_PIXCIR_CALIBRATE 0x03
+
+#define AUO_PIXCIR_EEPROM_CALIB_X_LEN 62
+#define AUO_PIXCIR_EEPROM_CALIB_Y_LEN 36
+
+#define AUO_PIXCIR_RAW_DATA_X_LEN 18
+#define AUO_PIXCIR_RAW_DATA_Y_LEN 11
+
+#define AUO_PIXCIR_STRENGTH_ENABLE (1 << 0)
+
+/* Touchscreen absolute values */
+#define AUO_PIXCIR_REPORT_POINTS 2
+#define AUO_PIXCIR_MAX_AREA 0xff
+#define AUO_PIXCIR_PENUP_TIMEOUT_MS 10
+
+struct auo_pixcir_ts {
+ struct i2c_client *client;
+ struct input_dev *input;
+ char phys[32];
+
+ /* special handling for touch_indicate interupt mode */
+ bool touch_ind_mode;
+
+ wait_queue_head_t wait;
+ bool stopped;
+};
+
+struct auo_point_t {
+ int coord_x;
+ int coord_y;
+ int area_major;
+ int area_minor;
+ int orientation;
+};
+
+static int auo_pixcir_collect_data(struct auo_pixcir_ts *ts,
+ struct auo_point_t *point)
+{
+ struct i2c_client *client = ts->client;
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ uint8_t raw_coord[8];
+ uint8_t raw_area[4];
+ int i, ret;
+
+ /* touch coordinates */
+ ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_X1_LSB,
+ 8, raw_coord);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read coordinate, %d\n", ret);
+ return ret;
+ }
+
+ /* touch area */
+ ret = i2c_smbus_read_i2c_block_data(client, AUO_PIXCIR_REG_TOUCHAREA_X1,
+ 4, raw_area);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not read touch area, %d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+ point[i].coord_x =
+ raw_coord[4 * i + 1] << 8 | raw_coord[4 * i];
+ point[i].coord_y =
+ raw_coord[4 * i + 3] << 8 | raw_coord[4 * i + 2];
+
+ if (point[i].coord_x > pdata->x_max ||
+ point[i].coord_y > pdata->y_max) {
+ dev_warn(&client->dev, "coordinates (%d,%d) invalid\n",
+ point[i].coord_x, point[i].coord_y);
+ point[i].coord_x = point[i].coord_y = 0;
+ }
+
+ /* determine touch major, minor and orientation */
+ point[i].area_major = max(raw_area[2 * i], raw_area[2 * i + 1]);
+ point[i].area_minor = min(raw_area[2 * i], raw_area[2 * i + 1]);
+ point[i].orientation = raw_area[2 * i] > raw_area[2 * i + 1];
+ }
+
+ return 0;
+}
+
+static irqreturn_t auo_pixcir_interrupt(int irq, void *dev_id)
+{
+ struct auo_pixcir_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ struct auo_point_t point[AUO_PIXCIR_REPORT_POINTS];
+ int i;
+ int ret;
+ int fingers = 0;
+ int abs = -1;
+
+ while (!ts->stopped) {
+
+ /* check for up event in touch touch_ind_mode */
+ if (ts->touch_ind_mode) {
+ if (gpio_get_value(pdata->gpio_int) == 0) {
+ input_mt_sync(ts->input);
+ input_report_key(ts->input, BTN_TOUCH, 0);
+ input_sync(ts->input);
+ break;
+ }
+ }
+
+ ret = auo_pixcir_collect_data(ts, point);
+ if (ret < 0) {
+ /* we want to loop only in touch_ind_mode */
+ if (!ts->touch_ind_mode)
+ break;
+
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+ continue;
+ }
+
+ for (i = 0; i < AUO_PIXCIR_REPORT_POINTS; i++) {
+ if (point[i].coord_x > 0 || point[i].coord_y > 0) {
+ input_report_abs(ts->input, ABS_MT_POSITION_X,
+ point[i].coord_x);
+ input_report_abs(ts->input, ABS_MT_POSITION_Y,
+ point[i].coord_y);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MAJOR,
+ point[i].area_major);
+ input_report_abs(ts->input, ABS_MT_TOUCH_MINOR,
+ point[i].area_minor);
+ input_report_abs(ts->input, ABS_MT_ORIENTATION,
+ point[i].orientation);
+ input_mt_sync(ts->input);
+
+ /* use first finger as source for singletouch */
+ if (fingers == 0)
+ abs = i;
+
+ /* number of touch points could also be queried
+ * via i2c but would require an additional call
+ */
+ fingers++;
+ }
+ }
+
+ input_report_key(ts->input, BTN_TOUCH, fingers > 0);
+
+ if (abs > -1) {
+ input_report_abs(ts->input, ABS_X, point[abs].coord_x);
+ input_report_abs(ts->input, ABS_Y, point[abs].coord_y);
+ }
+
+ input_sync(ts->input);
+
+ /* we want to loop only in touch_ind_mode */
+ if (!ts->touch_ind_mode)
+ break;
+
+ wait_event_timeout(ts->wait, ts->stopped,
+ msecs_to_jiffies(AUO_PIXCIR_PENUP_TIMEOUT_MS));
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Set the power mode of the device.
+ * Valid modes are
+ * - AUO_PIXCIR_POWER_ACTIVE
+ * - AUO_PIXCIR_POWER_SLEEP - automatically left on first touch
+ * - AUO_PIXCIR_POWER_DEEP_SLEEP
+ */
+static int auo_pixcir_power_mode(struct auo_pixcir_ts *ts, int mode)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_POWER_MODE);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ ret &= ~AUO_PIXCIR_POWER_MASK;
+ ret |= mode;
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_POWER_MODE, ret);
+ if (ret) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_POWER_MODE, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static __devinit int auo_pixcir_int_config(struct auo_pixcir_ts *ts,
+ int int_setting)
+{
+ struct i2c_client *client = ts->client;
+ struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ ret &= ~AUO_PIXCIR_INT_MODE_MASK;
+ ret |= int_setting;
+ ret |= AUO_PIXCIR_INT_POL_HIGH; /* always use high for interrupts */
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+ ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ ts->touch_ind_mode = pdata->int_setting == AUO_PIXCIR_INT_TOUCH_IND;
+
+ return 0;
+}
+
+/* control the generation of interrupts on the device side */
+static int auo_pixcir_int_toggle(struct auo_pixcir_ts *ts, bool enable)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_INT_SETTING);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to read reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ if (enable)
+ ret |= AUO_PIXCIR_INT_ENABLE;
+ else
+ ret &= ~AUO_PIXCIR_INT_ENABLE;
+
+ ret = i2c_smbus_write_byte_data(client, AUO_PIXCIR_REG_INT_SETTING,
+ ret);
+ if (ret < 0) {
+ dev_err(&client->dev, "unable to write reg %Xh, %d\n",
+ AUO_PIXCIR_REG_INT_SETTING, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int auo_pixcir_start(struct auo_pixcir_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_ACTIVE);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not set power mode, %d\n",
+ ret);
+ return ret;
+ }
+
+ ts->stopped = false;
+ mb();
+ enable_irq(client->irq);
+
+ ret = auo_pixcir_int_toggle(ts, 1);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not enable interrupt, %d\n",
+ ret);
+ disable_irq(client->irq);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int auo_pixcir_stop(struct auo_pixcir_ts *ts)
+{
+ struct i2c_client *client = ts->client;
+ int ret;
+
+ ret = auo_pixcir_int_toggle(ts, 0);
+ if (ret < 0) {
+ dev_err(&client->dev, "could not disable interrupt, %d\n",
+ ret);
+ return ret;
+ }
+
+ /* disable receiving of interrupts */
+ disable_irq(client->irq);
+ ts->stopped = true;
+ mb();
+ wake_up(&ts->wait);
+
+ return auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_DEEP_SLEEP);
+}
+
+static int auo_pixcir_input_open(struct input_dev *dev)
+{
+ struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+ int ret;
+
+ ret = auo_pixcir_start(ts);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void auo_pixcir_input_close(struct input_dev *dev)
+{
+ struct auo_pixcir_ts *ts = input_get_drvdata(dev);
+
+ auo_pixcir_stop(ts);
+
+ return;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int auo_pixcir_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ /* when configured as wakeup source, device should always wake system
+ * therefore start device if necessary
+ */
+ if (device_may_wakeup(&client->dev)) {
+ /* need to start device if not open, to be wakeup source */
+ if (!input->users) {
+ ret = auo_pixcir_start(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ enable_irq_wake(client->irq);
+ ret = auo_pixcir_power_mode(ts, AUO_PIXCIR_POWER_SLEEP);
+ } else if (input->users) {
+ ret = auo_pixcir_stop(ts);
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+
+static int auo_pixcir_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ struct input_dev *input = ts->input;
+ int ret = 0;
+
+ mutex_lock(&input->mutex);
+
+ if (device_may_wakeup(&client->dev)) {
+ disable_irq_wake(client->irq);
+
+ /* need to stop device if it was not open on suspend */
+ if (!input->users) {
+ ret = auo_pixcir_stop(ts);
+ if (ret)
+ goto unlock;
+ }
+
+ /* device wakes automatically from SLEEP */
+ } else if (input->users) {
+ ret = auo_pixcir_start(ts);
+ }
+
+unlock:
+ mutex_unlock(&input->mutex);
+
+ return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(auo_pixcir_pm_ops, auo_pixcir_suspend,
+ auo_pixcir_resume);
+
+static int __devinit auo_pixcir_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+ struct auo_pixcir_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+
+ if (!pdata)
+ return -EINVAL;
+
+ ts = kzalloc(sizeof(struct auo_pixcir_ts), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ret = gpio_request(pdata->gpio_int, "auo_pixcir_ts_int");
+ if (ret) {
+ dev_err(&client->dev, "request of gpio %d failed, %d\n",
+ pdata->gpio_int, ret);
+ goto err_gpio_int;
+ }
+
+ if (pdata->init_hw)
+ pdata->init_hw(client);
+
+ ts->client = client;
+ ts->touch_ind_mode = 0;
+ init_waitqueue_head(&ts->wait);
+
+ snprintf(ts->phys, sizeof(ts->phys),
+ "%s/input0", dev_name(&client->dev));
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "could not allocate input device\n");
+ goto err_input_alloc;
+ }
+
+ ts->input = input_dev;
+
+ input_dev->name = "AUO-Pixcir touchscreen";
+ input_dev->phys = ts->phys;
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ input_dev->open = auo_pixcir_input_open;
+ input_dev->close = auo_pixcir_input_close;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ /* For single touch */
+ input_set_abs_params(input_dev, ABS_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, pdata->y_max, 0, 0);
+
+ /* For multi touch */
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0,
+ pdata->x_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0,
+ pdata->y_max, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0,
+ AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, 0,
+ AUO_PIXCIR_MAX_AREA, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_ORIENTATION, 0, 1, 0, 0);
+
+ ret = i2c_smbus_read_byte_data(client, AUO_PIXCIR_REG_VERSION);
+ if (ret < 0)
+ goto err_fw_vers;
+ dev_info(&client->dev, "firmware version 0x%X\n", ret);
+
+ ret = auo_pixcir_int_config(ts, pdata->int_setting);
+ if (ret)
+ goto err_fw_vers;
+
+ input_set_drvdata(ts->input, ts);
+ ts->stopped = true;
+
+ ret = request_threaded_irq(client->irq, NULL, auo_pixcir_interrupt,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ input_dev->name, ts);
+ if (ret) {
+ dev_err(&client->dev, "irq %d requested failed\n", client->irq);
+ goto err_fw_vers;
+ }
+
+ /* stop device and put it into deep sleep until it is opened */
+ ret = auo_pixcir_stop(ts);
+ if (ret < 0)
+ goto err_input_register;
+
+ ret = input_register_device(input_dev);
+ if (ret) {
+ dev_err(&client->dev, "could not register input device\n");
+ goto err_input_register;
+ }
+
+ i2c_set_clientdata(client, ts);
+
+ return 0;
+
+err_input_register:
+ free_irq(client->irq, ts);
+err_fw_vers:
+ input_free_device(input_dev);
+err_input_alloc:
+ if (pdata->exit_hw)
+ pdata->exit_hw(client);
+ gpio_free(pdata->gpio_int);
+err_gpio_int:
+ kfree(ts);
+
+ return ret;
+}
+
+static int __devexit auo_pixcir_remove(struct i2c_client *client)
+{
+ struct auo_pixcir_ts *ts = i2c_get_clientdata(client);
+ const struct auo_pixcir_ts_platdata *pdata = client->dev.platform_data;
+
+ free_irq(client->irq, ts);
+
+ input_unregister_device(ts->input);
+
+ if (pdata->exit_hw)
+ pdata->exit_hw(client);
+
+ gpio_free(pdata->gpio_int);
+
+ kfree(ts);
+
+ return 0;
+}
+
+static const struct i2c_device_id auo_pixcir_idtable[] = {
+ { "auo_pixcir_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, auo_pixcir_idtable);
+
+static struct i2c_driver auo_pixcir_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "auo_pixcir_ts",
+ .pm = &auo_pixcir_pm_ops,
+ },
+ .probe = auo_pixcir_probe,
+ .remove = __devexit_p(auo_pixcir_remove),
+ .id_table = auo_pixcir_idtable,
+};
+
+static int __init auo_pixcir_init(void)
+{
+ return i2c_add_driver(&auo_pixcir_driver);
+}
+module_init(auo_pixcir_init);
+
+static void __exit auo_pixcir_exit(void)
+{
+ i2c_del_driver(&auo_pixcir_driver);
+}
+module_exit(auo_pixcir_exit);
+
+MODULE_DESCRIPTION("AUO-PIXCIR touchscreen driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
diff --git a/drivers/input/touchscreen/da9034-ts.c b/drivers/input/touchscreen/da9034-ts.c
index 2b72a59..36b65cf 100644
--- a/drivers/input/touchscreen/da9034-ts.c
+++ b/drivers/input/touchscreen/da9034-ts.c
@@ -379,18 +379,7 @@ static struct platform_driver da9034_touch_driver = {
.probe = da9034_touch_probe,
.remove = __devexit_p(da9034_touch_remove),
};
-
-static int __init da9034_touch_init(void)
-{
- return platform_driver_register(&da9034_touch_driver);
-}
-module_init(da9034_touch_init);
-
-static void __exit da9034_touch_exit(void)
-{
- platform_driver_unregister(&da9034_touch_driver);
-}
-module_exit(da9034_touch_exit);
+module_platform_driver(da9034_touch_driver);
MODULE_DESCRIPTION("Touchscreen driver for Dialog Semiconductor DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>, Bin Yang <bin.yang@marvell.com>");
diff --git a/drivers/input/touchscreen/eeti_ts.c b/drivers/input/touchscreen/eeti_ts.c
index 7f8f538..1df19bb 100644
--- a/drivers/input/touchscreen/eeti_ts.c
+++ b/drivers/input/touchscreen/eeti_ts.c
@@ -35,11 +35,11 @@
#include <linux/input/eeti_ts.h>
#include <linux/slab.h>
-static int flip_x;
+static bool flip_x;
module_param(flip_x, bool, 0644);
MODULE_PARM_DESC(flip_x, "flip x coordinate");
-static int flip_y;
+static bool flip_y;
module_param(flip_y, bool, 0644);
MODULE_PARM_DESC(flip_y, "flip y coordinate");
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
new file mode 100644
index 0000000..eadcc2e
--- /dev/null
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -0,0 +1,303 @@
+/*
+ * Driver for EETI eGalax Multiple Touch Controller
+ *
+ * Copyright (C) 2011 Freescale Semiconductor, Inc.
+ *
+ * based on max11801_ts.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/* EETI eGalax serial touch screen controller is a I2C based multiple
+ * touch screen controller, it supports 5 point multiple touch. */
+
+/* TODO:
+ - auto idle mode support
+*/
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/irq.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/input/mt.h>
+
+/*
+ * Mouse Mode: some panel may configure the controller to mouse mode,
+ * which can only report one point at a given time.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_MOUSE 0x1
+/*
+ * Vendor Mode: this mode is used to transfer some vendor specific
+ * messages.
+ * This driver will ignore events in this mode.
+ */
+#define REPORT_MODE_VENDOR 0x3
+/* Multiple Touch Mode */
+#define REPORT_MODE_MTTOUCH 0x4
+
+#define MAX_SUPPORT_POINTS 5
+
+#define EVENT_VALID_OFFSET 7
+#define EVENT_VALID_MASK (0x1 << EVENT_VALID_OFFSET)
+#define EVENT_ID_OFFSET 2
+#define EVENT_ID_MASK (0xf << EVENT_ID_OFFSET)
+#define EVENT_IN_RANGE (0x1 << 1)
+#define EVENT_DOWN_UP (0X1 << 0)
+
+#define MAX_I2C_DATA_LEN 10
+
+#define EGALAX_MAX_X 32760
+#define EGALAX_MAX_Y 32760
+#define EGALAX_MAX_TRIES 100
+
+struct egalax_ts {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+};
+
+static irqreturn_t egalax_ts_interrupt(int irq, void *dev_id)
+{
+ struct egalax_ts *ts = dev_id;
+ struct input_dev *input_dev = ts->input_dev;
+ struct i2c_client *client = ts->client;
+ u8 buf[MAX_I2C_DATA_LEN];
+ int id, ret, x, y, z;
+ int tries = 0;
+ bool down, valid;
+ u8 state;
+
+ do {
+ ret = i2c_master_recv(client, buf, MAX_I2C_DATA_LEN);
+ } while (ret == -EAGAIN && tries++ < EGALAX_MAX_TRIES);
+
+ if (ret < 0)
+ return IRQ_HANDLED;
+
+ if (buf[0] != REPORT_MODE_MTTOUCH) {
+ /* ignore mouse events and vendor events */
+ return IRQ_HANDLED;
+ }
+
+ state = buf[1];
+ x = (buf[3] << 8) | buf[2];
+ y = (buf[5] << 8) | buf[4];
+ z = (buf[7] << 8) | buf[6];
+
+ valid = state & EVENT_VALID_MASK;
+ id = (state & EVENT_ID_MASK) >> EVENT_ID_OFFSET;
+ down = state & EVENT_DOWN_UP;
+
+ if (!valid || id > MAX_SUPPORT_POINTS) {
+ dev_dbg(&client->dev, "point invalid\n");
+ return IRQ_HANDLED;
+ }
+
+ input_mt_slot(input_dev, id);
+ input_mt_report_slot_state(input_dev, MT_TOOL_FINGER, down);
+
+ dev_dbg(&client->dev, "%s id:%d x:%d y:%d z:%d",
+ down ? "down" : "up", id, x, y, z);
+
+ if (down) {
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_report_abs(input_dev, ABS_MT_PRESSURE, z);
+ }
+
+ input_mt_report_pointer_emulation(input_dev, true);
+ input_sync(input_dev);
+
+ return IRQ_HANDLED;
+}
+
+/* wake up controller by an falling edge of interrupt gpio. */
+static int egalax_wake_up_device(struct i2c_client *client)
+{
+ int gpio = irq_to_gpio(client->irq);
+ int ret;
+
+ ret = gpio_request(gpio, "egalax_irq");
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "request gpio failed, cannot wake up controller: %d\n",
+ ret);
+ return ret;
+ }
+
+ /* wake up controller via an falling edge on IRQ gpio. */
+ gpio_direction_output(gpio, 0);
+ gpio_set_value(gpio, 1);
+
+ /* controller should be waken up, return irq. */
+ gpio_direction_input(gpio);
+ gpio_free(gpio);
+
+ return 0;
+}
+
+static int __devinit egalax_firmware_version(struct i2c_client *client)
+{
+ static const u8 cmd[MAX_I2C_DATA_LEN] = { 0x03, 0x03, 0xa, 0x01, 0x41 };
+ int ret;
+
+ ret = i2c_master_send(client, cmd, MAX_I2C_DATA_LEN);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int __devinit egalax_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct egalax_ts *ts;
+ struct input_dev *input_dev;
+ int ret;
+ int error;
+
+ ts = kzalloc(sizeof(struct egalax_ts), GFP_KERNEL);
+ if (!ts) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ return -ENOMEM;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ dev_err(&client->dev, "Failed to allocate memory\n");
+ error = -ENOMEM;
+ goto err_free_ts;
+ }
+
+ ts->client = client;
+ ts->input_dev = input_dev;
+
+ /* controller may be in sleep, wake it up. */
+ egalax_wake_up_device(client);
+
+ ret = egalax_firmware_version(client);
+ if (ret < 0) {
+ dev_err(&client->dev, "Failed to read firmware version\n");
+ error = -EIO;
+ goto err_free_dev;
+ }
+
+ input_dev->name = "EETI eGalax Touch Screen";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->dev.parent = &client->dev;
+
+ __set_bit(EV_ABS, input_dev->evbit);
+ __set_bit(EV_KEY, input_dev->evbit);
+ __set_bit(BTN_TOUCH, input_dev->keybit);
+
+ input_set_abs_params(input_dev, ABS_X, 0, EGALAX_MAX_X, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, EGALAX_MAX_Y, 0, 0);
+ input_set_abs_params(input_dev,
+ ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
+ input_set_abs_params(input_dev,
+ ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0);
+ input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS);
+
+ input_set_drvdata(input_dev, ts);
+
+ error = request_threaded_irq(client->irq, NULL, egalax_ts_interrupt,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ "egalax_ts", ts);
+ if (error < 0) {
+ dev_err(&client->dev, "Failed to register interrupt\n");
+ goto err_free_dev;
+ }
+
+ error = input_register_device(ts->input_dev);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, ts);
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, ts);
+err_free_dev:
+ input_free_device(input_dev);
+err_free_ts:
+ kfree(ts);
+
+ return error;
+}
+
+static __devexit int egalax_ts_remove(struct i2c_client *client)
+{
+ struct egalax_ts *ts = i2c_get_clientdata(client);
+
+ free_irq(client->irq, ts);
+
+ input_unregister_device(ts->input_dev);
+ kfree(ts);
+
+ return 0;
+}
+
+static const struct i2c_device_id egalax_ts_id[] = {
+ { "egalax_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, egalax_ts_id);
+
+#ifdef CONFIG_PM_SLEEP
+static int egalax_ts_suspend(struct device *dev)
+{
+ static const u8 suspend_cmd[MAX_I2C_DATA_LEN] = {
+ 0x3, 0x6, 0xa, 0x3, 0x36, 0x3f, 0x2, 0, 0, 0
+ };
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret;
+
+ ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
+ return ret > 0 ? 0 : ret;
+}
+
+static int egalax_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return egalax_wake_up_device(client);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(egalax_ts_pm_ops, egalax_ts_suspend, egalax_ts_resume);
+
+static struct i2c_driver egalax_ts_driver = {
+ .driver = {
+ .name = "egalax_ts",
+ .owner = THIS_MODULE,
+ .pm = &egalax_ts_pm_ops,
+ },
+ .id_table = egalax_ts_id,
+ .probe = egalax_ts_probe,
+ .remove = __devexit_p(egalax_ts_remove),
+};
+
+static int __init egalax_ts_init(void)
+{
+ return i2c_add_driver(&egalax_ts_driver);
+}
+
+static void __exit egalax_ts_exit(void)
+{
+ i2c_del_driver(&egalax_ts_driver);
+}
+
+module_init(egalax_ts_init);
+module_exit(egalax_ts_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("Touchscreen driver for EETI eGalax touch controller");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/htcpen.c b/drivers/input/touchscreen/htcpen.c
index 62811de..d13143b 100644
--- a/drivers/input/touchscreen/htcpen.c
+++ b/drivers/input/touchscreen/htcpen.c
@@ -40,19 +40,13 @@ MODULE_LICENSE("GPL");
#define X_AXIS_MAX 2040
#define Y_AXIS_MAX 2040
-static int invert_x;
+static bool invert_x;
module_param(invert_x, bool, 0644);
MODULE_PARM_DESC(invert_x, "If set, X axis is inverted");
-static int invert_y;
+static bool invert_y;
module_param(invert_y, bool, 0644);
MODULE_PARM_DESC(invert_y, "If set, Y axis is inverted");
-static struct pnp_device_id pnp_ids[] = {
- { .id = "PNP0cc0" },
- { .id = "" }
-};
-MODULE_DEVICE_TABLE(pnp, pnp_ids);
-
static irqreturn_t htcpen_interrupt(int irq, void *handle)
{
struct input_dev *htcpen_dev = handle;
@@ -237,6 +231,7 @@ static struct dmi_system_id __initdata htcshift_dmi_table[] = {
},
{ }
};
+MODULE_DEVICE_TABLE(dmi, htcshift_dmi_table);
static int __init htcpen_isa_init(void)
{
diff --git a/drivers/input/touchscreen/intel-mid-touch.c b/drivers/input/touchscreen/intel-mid-touch.c
index 3276952..3cd7a83 100644
--- a/drivers/input/touchscreen/intel-mid-touch.c
+++ b/drivers/input/touchscreen/intel-mid-touch.c
@@ -664,18 +664,7 @@ static struct platform_driver mrstouch_driver = {
.probe = mrstouch_probe,
.remove = __devexit_p(mrstouch_remove),
};
-
-static int __init mrstouch_init(void)
-{
- return platform_driver_register(&mrstouch_driver);
-}
-module_init(mrstouch_init);
-
-static void __exit mrstouch_exit(void)
-{
- platform_driver_unregister(&mrstouch_driver);
-}
-module_exit(mrstouch_exit);
+module_platform_driver(mrstouch_driver);
MODULE_AUTHOR("Sreedhara Murthy. D.S, sreedhara.ds@intel.com");
MODULE_DESCRIPTION("Intel Moorestown Resistive Touch Screen Driver");
diff --git a/drivers/input/touchscreen/jornada720_ts.c b/drivers/input/touchscreen/jornada720_ts.c
index 50076c2..c3848ad 100644
--- a/drivers/input/touchscreen/jornada720_ts.c
+++ b/drivers/input/touchscreen/jornada720_ts.c
@@ -172,16 +172,4 @@ static struct platform_driver jornada720_ts_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init jornada720_ts_init(void)
-{
- return platform_driver_register(&jornada720_ts_driver);
-}
-
-static void __exit jornada720_ts_exit(void)
-{
- platform_driver_unregister(&jornada720_ts_driver);
-}
-
-module_init(jornada720_ts_init);
-module_exit(jornada720_ts_exit);
+module_platform_driver(jornada720_ts_driver);
diff --git a/drivers/input/touchscreen/lpc32xx_ts.c b/drivers/input/touchscreen/lpc32xx_ts.c
index 0a484ed..afcd069 100644
--- a/drivers/input/touchscreen/lpc32xx_ts.c
+++ b/drivers/input/touchscreen/lpc32xx_ts.c
@@ -392,18 +392,7 @@ static struct platform_driver lpc32xx_ts_driver = {
.pm = LPC32XX_TS_PM_OPS,
},
};
-
-static int __init lpc32xx_ts_init(void)
-{
- return platform_driver_register(&lpc32xx_ts_driver);
-}
-module_init(lpc32xx_ts_init);
-
-static void __exit lpc32xx_ts_exit(void)
-{
- platform_driver_unregister(&lpc32xx_ts_driver);
-}
-module_exit(lpc32xx_ts_exit);
+module_platform_driver(lpc32xx_ts_driver);
MODULE_AUTHOR("Kevin Wells <kevin.wells@nxp.com");
MODULE_DESCRIPTION("LPC32XX TSC Driver");
diff --git a/drivers/input/touchscreen/mainstone-wm97xx.c b/drivers/input/touchscreen/mainstone-wm97xx.c
index e966c29..7d2b213 100644
--- a/drivers/input/touchscreen/mainstone-wm97xx.c
+++ b/drivers/input/touchscreen/mainstone-wm97xx.c
@@ -302,19 +302,7 @@ static struct platform_driver mainstone_wm97xx_driver = {
.name = "wm97xx-touch",
},
};
-
-static int __init mainstone_wm97xx_init(void)
-{
- return platform_driver_register(&mainstone_wm97xx_driver);
-}
-
-static void __exit mainstone_wm97xx_exit(void)
-{
- platform_driver_unregister(&mainstone_wm97xx_driver);
-}
-
-module_init(mainstone_wm97xx_init);
-module_exit(mainstone_wm97xx_exit);
+module_platform_driver(mainstone_wm97xx_driver);
/* Module information */
MODULE_AUTHOR("Liam Girdwood <lrg@slimlogic.co.uk>");
diff --git a/drivers/input/touchscreen/migor_ts.c b/drivers/input/touchscreen/migor_ts.c
index 5803bd0..5226194 100644
--- a/drivers/input/touchscreen/migor_ts.c
+++ b/drivers/input/touchscreen/migor_ts.c
@@ -36,7 +36,6 @@
struct migor_ts_priv {
struct i2c_client *client;
struct input_dev *input;
- struct delayed_work work;
int irq;
};
@@ -44,15 +43,24 @@ static const u_int8_t migor_ts_ena_seq[17] = { 0x33, 0x22, 0x11,
0x01, 0x06, 0x07, };
static const u_int8_t migor_ts_dis_seq[17] = { };
-static void migor_ts_poscheck(struct work_struct *work)
+static irqreturn_t migor_ts_isr(int irq, void *dev_id)
{
- struct migor_ts_priv *priv = container_of(work,
- struct migor_ts_priv,
- work.work);
+ struct migor_ts_priv *priv = dev_id;
unsigned short xpos, ypos;
unsigned char event;
u_int8_t buf[16];
+ /*
+ * The touch screen controller chip is hooked up to the CPU
+ * using I2C and a single interrupt line. The interrupt line
+ * is pulled low whenever someone taps the screen. To deassert
+ * the interrupt line we need to acknowledge the interrupt by
+ * communicating with the controller over the slow i2c bus.
+ *
+ * Since I2C bus controller may sleep we are using threaded
+ * IRQ here.
+ */
+
memset(buf, 0, sizeof(buf));
/* Set Index 0 */
@@ -72,41 +80,25 @@ static void migor_ts_poscheck(struct work_struct *work)
xpos = ((buf[11] & 0x03) << 8 | buf[10]);
event = buf[12];
- if (event == EVENT_PENDOWN || event == EVENT_REPEAT) {
+ switch (event) {
+ case EVENT_PENDOWN:
+ case EVENT_REPEAT:
input_report_key(priv->input, BTN_TOUCH, 1);
input_report_abs(priv->input, ABS_X, ypos); /*X-Y swap*/
input_report_abs(priv->input, ABS_Y, xpos);
input_sync(priv->input);
- } else if (event == EVENT_PENUP) {
+ break;
+
+ case EVENT_PENUP:
input_report_key(priv->input, BTN_TOUCH, 0);
input_sync(priv->input);
+ break;
}
- out:
- enable_irq(priv->irq);
-}
-
-static irqreturn_t migor_ts_isr(int irq, void *dev_id)
-{
- struct migor_ts_priv *priv = dev_id;
-
- /* the touch screen controller chip is hooked up to the cpu
- * using i2c and a single interrupt line. the interrupt line
- * is pulled low whenever someone taps the screen. to deassert
- * the interrupt line we need to acknowledge the interrupt by
- * communicating with the controller over the slow i2c bus.
- *
- * we can't acknowledge from interrupt context since the i2c
- * bus controller may sleep, so we just disable the interrupt
- * here and handle the acknowledge using delayed work.
- */
-
- disable_irq_nosync(irq);
- schedule_delayed_work(&priv->work, HZ / 20);
+ out:
return IRQ_HANDLED;
}
-
static int migor_ts_open(struct input_dev *dev)
{
struct migor_ts_priv *priv = input_get_drvdata(dev);
@@ -131,15 +123,6 @@ static void migor_ts_close(struct input_dev *dev)
disable_irq(priv->irq);
- /* cancel pending work and wait for migor_ts_poscheck() to finish */
- if (cancel_delayed_work_sync(&priv->work)) {
- /*
- * if migor_ts_poscheck was canceled we need to enable IRQ
- * here to balance disable done in migor_ts_isr.
- */
- enable_irq(priv->irq);
- }
-
/* disable controller */
i2c_master_send(client, migor_ts_dis_seq, sizeof(migor_ts_dis_seq));
@@ -154,23 +137,20 @@ static int migor_ts_probe(struct i2c_client *client,
int error;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- dev_err(&client->dev, "failed to allocate driver data\n");
- error = -ENOMEM;
- goto err0;
- }
-
- dev_set_drvdata(&client->dev, priv);
-
input = input_allocate_device();
- if (!input) {
- dev_err(&client->dev, "Failed to allocate input device.\n");
+ if (!priv || !input) {
+ dev_err(&client->dev, "failed to allocate memory\n");
error = -ENOMEM;
- goto err1;
+ goto err_free_mem;
}
+ priv->client = client;
+ priv->input = input;
+ priv->irq = client->irq;
+
input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
- input->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
+
+ __set_bit(BTN_TOUCH, input->keybit);
input_set_abs_params(input, ABS_X, 95, 955, 0, 0);
input_set_abs_params(input, ABS_Y, 85, 935, 0, 0);
@@ -184,39 +164,34 @@ static int migor_ts_probe(struct i2c_client *client,
input_set_drvdata(input, priv);
- priv->client = client;
- priv->input = input;
- INIT_DELAYED_WORK(&priv->work, migor_ts_poscheck);
- priv->irq = client->irq;
-
- error = input_register_device(input);
- if (error)
- goto err1;
-
- error = request_irq(priv->irq, migor_ts_isr, IRQF_TRIGGER_LOW,
- client->name, priv);
+ error = request_threaded_irq(priv->irq, NULL, migor_ts_isr,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ client->name, priv);
if (error) {
dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
- goto err2;
+ goto err_free_mem;
}
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, priv);
device_init_wakeup(&client->dev, 1);
+
return 0;
- err2:
- input_unregister_device(input);
- input = NULL; /* so we dont try to free it below */
- err1:
+ err_free_irq:
+ free_irq(priv->irq, priv);
+ err_free_mem:
input_free_device(input);
kfree(priv);
- err0:
- dev_set_drvdata(&client->dev, NULL);
return error;
}
static int migor_ts_remove(struct i2c_client *client)
{
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
free_irq(priv->irq, priv);
input_unregister_device(priv->input);
@@ -230,7 +205,7 @@ static int migor_ts_remove(struct i2c_client *client)
static int migor_ts_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
enable_irq_wake(priv->irq);
@@ -241,7 +216,7 @@ static int migor_ts_suspend(struct device *dev)
static int migor_ts_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
- struct migor_ts_priv *priv = dev_get_drvdata(&client->dev);
+ struct migor_ts_priv *priv = i2c_get_clientdata(client);
if (device_may_wakeup(&client->dev))
disable_irq_wake(priv->irq);
diff --git a/drivers/input/touchscreen/pcap_ts.c b/drivers/input/touchscreen/pcap_ts.c
index ea6ef16..f57aeb8 100644
--- a/drivers/input/touchscreen/pcap_ts.c
+++ b/drivers/input/touchscreen/pcap_ts.c
@@ -252,19 +252,7 @@ static struct platform_driver pcap_ts_driver = {
.pm = PCAP_TS_PM_OPS,
},
};
-
-static int __init pcap_ts_init(void)
-{
- return platform_driver_register(&pcap_ts_driver);
-}
-
-static void __exit pcap_ts_exit(void)
-{
- platform_driver_unregister(&pcap_ts_driver);
-}
-
-module_init(pcap_ts_init);
-module_exit(pcap_ts_exit);
+module_platform_driver(pcap_ts_driver);
MODULE_DESCRIPTION("Motorola PCAP2 touchscreen driver");
MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
diff --git a/drivers/input/touchscreen/pixcir_i2c_ts.c b/drivers/input/touchscreen/pixcir_i2c_ts.c
new file mode 100644
index 0000000..d5ac09a
--- /dev/null
+++ b/drivers/input/touchscreen/pixcir_i2c_ts.c
@@ -0,0 +1,239 @@
+/*
+ * Driver for Pixcir I2C touchscreen controllers.
+ *
+ * Copyright (C) 2010-2011 Pixcir, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/input/pixcir_ts.h>
+
+struct pixcir_i2c_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ const struct pixcir_ts_platform_data *chip;
+ bool exiting;
+};
+
+static void pixcir_ts_poscheck(struct pixcir_i2c_ts_data *data)
+{
+ struct pixcir_i2c_ts_data *tsdata = data;
+ u8 rdbuf[10], wrbuf[1] = { 0 };
+ u8 touch;
+ int ret;
+
+ ret = i2c_master_send(tsdata->client, wrbuf, sizeof(wrbuf));
+ if (ret != sizeof(wrbuf)) {
+ dev_err(&tsdata->client->dev,
+ "%s: i2c_master_send failed(), ret=%d\n",
+ __func__, ret);
+ return;
+ }
+
+ ret = i2c_master_recv(tsdata->client, rdbuf, sizeof(rdbuf));
+ if (ret != sizeof(rdbuf)) {
+ dev_err(&tsdata->client->dev,
+ "%s: i2c_master_recv failed(), ret=%d\n",
+ __func__, ret);
+ return;
+ }
+
+ touch = rdbuf[0];
+ if (touch) {
+ u16 posx1 = (rdbuf[3] << 8) | rdbuf[2];
+ u16 posy1 = (rdbuf[5] << 8) | rdbuf[4];
+ u16 posx2 = (rdbuf[7] << 8) | rdbuf[6];
+ u16 posy2 = (rdbuf[9] << 8) | rdbuf[8];
+
+ input_report_key(tsdata->input, BTN_TOUCH, 1);
+ input_report_abs(tsdata->input, ABS_X, posx1);
+ input_report_abs(tsdata->input, ABS_Y, posy1);
+
+ input_report_abs(tsdata->input, ABS_MT_POSITION_X, posx1);
+ input_report_abs(tsdata->input, ABS_MT_POSITION_Y, posy1);
+ input_mt_sync(tsdata->input);
+
+ if (touch == 2) {
+ input_report_abs(tsdata->input,
+ ABS_MT_POSITION_X, posx2);
+ input_report_abs(tsdata->input,
+ ABS_MT_POSITION_Y, posy2);
+ input_mt_sync(tsdata->input);
+ }
+ } else {
+ input_report_key(tsdata->input, BTN_TOUCH, 0);
+ }
+
+ input_sync(tsdata->input);
+}
+
+static irqreturn_t pixcir_ts_isr(int irq, void *dev_id)
+{
+ struct pixcir_i2c_ts_data *tsdata = dev_id;
+
+ while (!tsdata->exiting) {
+ pixcir_ts_poscheck(tsdata);
+
+ if (tsdata->chip->attb_read_val())
+ break;
+
+ msleep(20);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pixcir_i2c_ts_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ enable_irq_wake(client->irq);
+
+ return 0;
+}
+
+static int pixcir_i2c_ts_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ if (device_may_wakeup(&client->dev))
+ disable_irq_wake(client->irq);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(pixcir_dev_pm_ops,
+ pixcir_i2c_ts_suspend, pixcir_i2c_ts_resume);
+
+static int __devinit pixcir_i2c_ts_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ const struct pixcir_ts_platform_data *pdata = client->dev.platform_data;
+ struct pixcir_i2c_ts_data *tsdata;
+ struct input_dev *input;
+ int error;
+
+ if (!pdata) {
+ dev_err(&client->dev, "platform data not defined\n");
+ return -EINVAL;
+ }
+
+ tsdata = kzalloc(sizeof(*tsdata), GFP_KERNEL);
+ input = input_allocate_device();
+ if (!tsdata || !input) {
+ dev_err(&client->dev, "Failed to allocate driver data!\n");
+ error = -ENOMEM;
+ goto err_free_mem;
+ }
+
+ tsdata->client = client;
+ tsdata->input = input;
+ tsdata->chip = pdata;
+
+ input->name = client->name;
+ input->id.bustype = BUS_I2C;
+ input->dev.parent = &client->dev;
+
+ __set_bit(EV_KEY, input->evbit);
+ __set_bit(EV_ABS, input->evbit);
+ __set_bit(BTN_TOUCH, input->keybit);
+ input_set_abs_params(input, ABS_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input, ABS_Y, 0, pdata->y_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, pdata->x_max, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, pdata->y_max, 0, 0);
+
+ input_set_drvdata(input, tsdata);
+
+ error = request_threaded_irq(client->irq, NULL, pixcir_ts_isr,
+ IRQF_TRIGGER_FALLING,
+ client->name, tsdata);
+ if (error) {
+ dev_err(&client->dev, "Unable to request touchscreen IRQ.\n");
+ goto err_free_mem;
+ }
+
+ error = input_register_device(input);
+ if (error)
+ goto err_free_irq;
+
+ i2c_set_clientdata(client, tsdata);
+ device_init_wakeup(&client->dev, 1);
+
+ return 0;
+
+err_free_irq:
+ free_irq(client->irq, tsdata);
+err_free_mem:
+ input_free_device(input);
+ kfree(tsdata);
+ return error;
+}
+
+static int __devexit pixcir_i2c_ts_remove(struct i2c_client *client)
+{
+ struct pixcir_i2c_ts_data *tsdata = i2c_get_clientdata(client);
+
+ device_init_wakeup(&client->dev, 0);
+
+ tsdata->exiting = true;
+ mb();
+ free_irq(client->irq, tsdata);
+
+ input_unregister_device(tsdata->input);
+ kfree(tsdata);
+
+ return 0;
+}
+
+static const struct i2c_device_id pixcir_i2c_ts_id[] = {
+ { "pixcir_ts", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, pixcir_i2c_ts_id);
+
+static struct i2c_driver pixcir_i2c_ts_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "pixcir_ts",
+ .pm = &pixcir_dev_pm_ops,
+ },
+ .probe = pixcir_i2c_ts_probe,
+ .remove = __devexit_p(pixcir_i2c_ts_remove),
+ .id_table = pixcir_i2c_ts_id,
+};
+
+static int __init pixcir_i2c_ts_init(void)
+{
+ return i2c_add_driver(&pixcir_i2c_ts_driver);
+}
+module_init(pixcir_i2c_ts_init);
+
+static void __exit pixcir_i2c_ts_exit(void)
+{
+ i2c_del_driver(&pixcir_i2c_ts_driver);
+}
+module_exit(pixcir_i2c_ts_exit);
+
+MODULE_AUTHOR("Jianchun Bian <jcbian@pixcir.com.cn>, Dequan Meng <dqmeng@pixcir.com.cn>");
+MODULE_DESCRIPTION("Pixcir I2C Touchscreen Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c
index 64ce697..bf1a064 100644
--- a/drivers/input/touchscreen/s3c2410_ts.c
+++ b/drivers/input/touchscreen/s3c2410_ts.c
@@ -432,19 +432,7 @@ static struct platform_driver s3c_ts_driver = {
.probe = s3c2410ts_probe,
.remove = __devexit_p(s3c2410ts_remove),
};
-
-static int __init s3c2410ts_init(void)
-{
- return platform_driver_register(&s3c_ts_driver);
-}
-
-static void __exit s3c2410ts_exit(void)
-{
- platform_driver_unregister(&s3c_ts_driver);
-}
-
-module_init(s3c2410ts_init);
-module_exit(s3c2410ts_exit);
+module_platform_driver(s3c_ts_driver);
MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>, "
"Ben Dooks <ben@simtec.co.uk>, "
diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c
index 4ab3713..8825fe3 100644
--- a/drivers/input/touchscreen/st1232.c
+++ b/drivers/input/touchscreen/st1232.c
@@ -23,6 +23,7 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/pm_qos.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -46,6 +47,7 @@ struct st1232_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
struct st1232_ts_finger finger[MAX_FINGERS];
+ struct dev_pm_qos_request low_latency_req;
};
static int st1232_ts_read_data(struct st1232_ts_data *ts)
@@ -118,8 +120,17 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id)
}
/* SYN_MT_REPORT only if no contact */
- if (!count)
+ if (!count) {
input_mt_sync(input_dev);
+ if (ts->low_latency_req.dev) {
+ dev_pm_qos_remove_request(&ts->low_latency_req);
+ ts->low_latency_req.dev = NULL;
+ }
+ } else if (!ts->low_latency_req.dev) {
+ /* First contact, request 100 us latency. */
+ dev_pm_qos_add_ancestor_request(&ts->client->dev,
+ &ts->low_latency_req, 100);
+ }
/* SYN_REPORT */
input_sync(input_dev);
diff --git a/drivers/input/touchscreen/stmpe-ts.c b/drivers/input/touchscreen/stmpe-ts.c
index ae88e13..692b685 100644
--- a/drivers/input/touchscreen/stmpe-ts.c
+++ b/drivers/input/touchscreen/stmpe-ts.c
@@ -379,20 +379,7 @@ static struct platform_driver stmpe_ts_driver = {
.probe = stmpe_input_probe,
.remove = __devexit_p(stmpe_ts_remove),
};
-
-static int __init stmpe_ts_init(void)
-{
- return platform_driver_register(&stmpe_ts_driver);
-}
-
-module_init(stmpe_ts_init);
-
-static void __exit stmpe_ts_exit(void)
-{
- platform_driver_unregister(&stmpe_ts_driver);
-}
-
-module_exit(stmpe_ts_exit);
+module_platform_driver(stmpe_ts_driver);
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("STMPEXXX touchscreen driver");
diff --git a/drivers/input/touchscreen/tnetv107x-ts.c b/drivers/input/touchscreen/tnetv107x-ts.c
index 0e8f63e..7e74880 100644
--- a/drivers/input/touchscreen/tnetv107x-ts.c
+++ b/drivers/input/touchscreen/tnetv107x-ts.c
@@ -378,19 +378,7 @@ static struct platform_driver tsc_driver = {
.driver.name = "tnetv107x-ts",
.driver.owner = THIS_MODULE,
};
-
-static int __init tsc_init(void)
-{
- return platform_driver_register(&tsc_driver);
-}
-
-static void __exit tsc_exit(void)
-{
- platform_driver_unregister(&tsc_driver);
-}
-
-module_init(tsc_init);
-module_exit(tsc_exit);
+module_platform_driver(tsc_driver);
MODULE_AUTHOR("Cyril Chemparathy");
MODULE_DESCRIPTION("TNETV107X Touchscreen Driver");
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 4303149..6c6f6d8 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -371,18 +371,7 @@ static struct platform_driver tps6507x_ts_driver = {
.probe = tps6507x_ts_probe,
.remove = __devexit_p(tps6507x_ts_remove),
};
-
-static int __init tps6507x_ts_init(void)
-{
- return platform_driver_register(&tps6507x_ts_driver);
-}
-module_init(tps6507x_ts_init);
-
-static void __exit tps6507x_ts_exit(void)
-{
- platform_driver_unregister(&tps6507x_ts_driver);
-}
-module_exit(tps6507x_ts_exit);
+module_platform_driver(tps6507x_ts_driver);
MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
diff --git a/drivers/input/touchscreen/tsc2005.c b/drivers/input/touchscreen/tsc2005.c
index cbf0ff3..067d956 100644
--- a/drivers/input/touchscreen/tsc2005.c
+++ b/drivers/input/touchscreen/tsc2005.c
@@ -450,13 +450,13 @@ static struct attribute *tsc2005_attrs[] = {
NULL
};
-static mode_t tsc2005_attr_is_visible(struct kobject *kobj,
+static umode_t tsc2005_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct spi_device *spi = to_spi_device(dev);
struct tsc2005 *ts = spi_get_drvdata(spi);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (attr == &dev_attr_selftest.attr) {
if (!ts->set_reset)
diff --git a/drivers/input/touchscreen/ucb1400_ts.c b/drivers/input/touchscreen/ucb1400_ts.c
index 3b5b5df..46e83ad 100644
--- a/drivers/input/touchscreen/ucb1400_ts.c
+++ b/drivers/input/touchscreen/ucb1400_ts.c
@@ -20,24 +20,24 @@
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/completion.h>
#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
#include <linux/input.h>
#include <linux/device.h>
#include <linux/interrupt.h>
-#include <linux/suspend.h>
-#include <linux/kthread.h>
-#include <linux/freezer.h>
#include <linux/ucb1400.h>
-static int adcsync;
+#define UCB1400_TS_POLL_PERIOD 10 /* ms */
+
+static bool adcsync;
static int ts_delay = 55; /* us */
static int ts_delay_pressure; /* us */
/* Switch to interrupt mode. */
-static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
+static void ucb1400_ts_mode_int(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_TS_CR,
+ ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_INT);
@@ -47,13 +47,15 @@ static inline void ucb1400_ts_mode_int(struct snd_ac97 *ac97)
* Switch to pressure mode, and read pressure. We don't need to wait
* here, since both plates are being driven.
*/
-static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW |
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND |
UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA);
+
udelay(ts_delay_pressure);
+
return ucb1400_adc_read(ucb->ac97, UCB_ADC_INP_TSPY, adcsync);
}
@@ -63,7 +65,7 @@ static inline unsigned int ucb1400_ts_read_pressure(struct ucb1400_ts *ucb)
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
-static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -86,7 +88,7 @@ static inline unsigned int ucb1400_ts_read_xpos(struct ucb1400_ts *ucb)
* gives a faster response time. Even so, we need to wait about 55us
* for things to stabilise.
*/
-static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
+static int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -107,7 +109,7 @@ static inline unsigned int ucb1400_ts_read_ypos(struct ucb1400_ts *ucb)
* Switch to X plate resistance mode. Set MX to ground, PX to
* supply. Measure current.
*/
-static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW |
@@ -119,7 +121,7 @@ static inline unsigned int ucb1400_ts_read_xres(struct ucb1400_ts *ucb)
* Switch to Y plate resistance mode. Set MY to ground, PY to
* supply. Measure current.
*/
-static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
+static unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
{
ucb1400_reg_write(ucb->ac97, UCB_TS_CR,
UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW |
@@ -127,26 +129,26 @@ static inline unsigned int ucb1400_ts_read_yres(struct ucb1400_ts *ucb)
return ucb1400_adc_read(ucb->ac97, 0, adcsync);
}
-static inline int ucb1400_ts_pen_up(struct snd_ac97 *ac97)
+static int ucb1400_ts_pen_up(struct ucb1400_ts *ucb)
{
- unsigned short val = ucb1400_reg_read(ac97, UCB_TS_CR);
+ unsigned short val = ucb1400_reg_read(ucb->ac97, UCB_TS_CR);
return val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW);
}
-static inline void ucb1400_ts_irq_enable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_enable(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
- ucb1400_reg_write(ac97, UCB_IE_CLEAR, 0);
- ucb1400_reg_write(ac97, UCB_IE_FAL, UCB_IE_TSPX);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, UCB_IE_TSPX);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, UCB_IE_TSPX);
}
-static inline void ucb1400_ts_irq_disable(struct snd_ac97 *ac97)
+static void ucb1400_ts_irq_disable(struct ucb1400_ts *ucb)
{
- ucb1400_reg_write(ac97, UCB_IE_FAL, 0);
+ ucb1400_reg_write(ucb->ac97, UCB_IE_FAL, 0);
}
-static void ucb1400_ts_evt_add(struct input_dev *idev, u16 pressure, u16 x, u16 y)
+static void ucb1400_ts_report_event(struct input_dev *idev, u16 pressure, u16 x, u16 y)
{
input_report_abs(idev, ABS_X, x);
input_report_abs(idev, ABS_Y, y);
@@ -162,7 +164,7 @@ static void ucb1400_ts_event_release(struct input_dev *idev)
input_sync(idev);
}
-static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
+static void ucb1400_clear_pending_irq(struct ucb1400_ts *ucb)
{
unsigned int isr;
@@ -171,32 +173,34 @@ static void ucb1400_handle_pending_irq(struct ucb1400_ts *ucb)
ucb1400_reg_write(ucb->ac97, UCB_IE_CLEAR, 0);
if (isr & UCB_IE_TSPX)
- ucb1400_ts_irq_disable(ucb->ac97);
+ ucb1400_ts_irq_disable(ucb);
else
- dev_dbg(&ucb->ts_idev->dev, "ucb1400: unexpected IE_STATUS = %#x\n", isr);
- enable_irq(ucb->irq);
+ dev_dbg(&ucb->ts_idev->dev,
+ "ucb1400: unexpected IE_STATUS = %#x\n", isr);
}
-static int ucb1400_ts_thread(void *_ucb)
+/*
+ * A restriction with interrupts exists when using the ucb1400, as
+ * the codec read/write routines may sleep while waiting for codec
+ * access completion and uses semaphores for access control to the
+ * AC97 bus. Therefore the driver is forced to use threaded interrupt
+ * handler.
+ */
+static irqreturn_t ucb1400_irq(int irqnr, void *devid)
{
- struct ucb1400_ts *ucb = _ucb;
- struct task_struct *tsk = current;
- int valid = 0;
- struct sched_param param = { .sched_priority = 1 };
+ struct ucb1400_ts *ucb = devid;
+ unsigned int x, y, p;
+ bool penup;
- sched_setscheduler(tsk, SCHED_FIFO, &param);
+ if (unlikely(irqnr != ucb->irq))
+ return IRQ_NONE;
- set_freezable();
- while (!kthread_should_stop()) {
- unsigned int x, y, p;
- long timeout;
+ ucb1400_clear_pending_irq(ucb);
- ucb->ts_restart = 0;
+ /* Start with a small delay before checking pendown state */
+ msleep(UCB1400_TS_POLL_PERIOD);
- if (ucb->irq_pending) {
- ucb->irq_pending = 0;
- ucb1400_handle_pending_irq(ucb);
- }
+ while (!ucb->stopped && !(penup = ucb1400_ts_pen_up(ucb))) {
ucb1400_adc_enable(ucb->ac97);
x = ucb1400_ts_read_xpos(ucb);
@@ -204,91 +208,62 @@ static int ucb1400_ts_thread(void *_ucb)
p = ucb1400_ts_read_pressure(ucb);
ucb1400_adc_disable(ucb->ac97);
- /* Switch back to interrupt mode. */
- ucb1400_ts_mode_int(ucb->ac97);
-
- msleep(10);
-
- if (ucb1400_ts_pen_up(ucb->ac97)) {
- ucb1400_ts_irq_enable(ucb->ac97);
-
- /*
- * If we spat out a valid sample set last time,
- * spit out a "pen off" sample here.
- */
- if (valid) {
- ucb1400_ts_event_release(ucb->ts_idev);
- valid = 0;
- }
-
- timeout = MAX_SCHEDULE_TIMEOUT;
- } else {
- valid = 1;
- ucb1400_ts_evt_add(ucb->ts_idev, p, x, y);
- timeout = msecs_to_jiffies(10);
- }
+ ucb1400_ts_report_event(ucb->ts_idev, p, x, y);
- wait_event_freezable_timeout(ucb->ts_wait,
- ucb->irq_pending || ucb->ts_restart ||
- kthread_should_stop(), timeout);
+ wait_event_timeout(ucb->ts_wait, ucb->stopped,
+ msecs_to_jiffies(UCB1400_TS_POLL_PERIOD));
}
- /* Send the "pen off" if we are stopping with the pen still active */
- if (valid)
- ucb1400_ts_event_release(ucb->ts_idev);
+ ucb1400_ts_event_release(ucb->ts_idev);
- ucb->ts_task = NULL;
- return 0;
+ if (!ucb->stopped) {
+ /* Switch back to interrupt mode. */
+ ucb1400_ts_mode_int(ucb);
+ ucb1400_ts_irq_enable(ucb);
+ }
+
+ return IRQ_HANDLED;
}
-/*
- * A restriction with interrupts exists when using the ucb1400, as
- * the codec read/write routines may sleep while waiting for codec
- * access completion and uses semaphores for access control to the
- * AC97 bus. A complete codec read cycle could take anywhere from
- * 60 to 100uSec so we *definitely* don't want to spin inside the
- * interrupt handler waiting for codec access. So, we handle the
- * interrupt by scheduling a RT kernel thread to run in process
- * context instead of interrupt context.
- */
-static irqreturn_t ucb1400_hard_irq(int irqnr, void *devid)
+static void ucb1400_ts_stop(struct ucb1400_ts *ucb)
{
- struct ucb1400_ts *ucb = devid;
+ /* Signal IRQ thread to stop polling and disable the handler. */
+ ucb->stopped = true;
+ mb();
+ wake_up(&ucb->ts_wait);
+ disable_irq(ucb->irq);
- if (irqnr == ucb->irq) {
- disable_irq_nosync(ucb->irq);
- ucb->irq_pending = 1;
- wake_up(&ucb->ts_wait);
- return IRQ_HANDLED;
- }
- return IRQ_NONE;
+ ucb1400_ts_irq_disable(ucb);
+ ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+}
+
+/* Must be called with ts->lock held */
+static void ucb1400_ts_start(struct ucb1400_ts *ucb)
+{
+ /* Tell IRQ thread that it may poll the device. */
+ ucb->stopped = false;
+ mb();
+
+ ucb1400_ts_mode_int(ucb);
+ ucb1400_ts_irq_enable(ucb);
+
+ enable_irq(ucb->irq);
}
static int ucb1400_ts_open(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
- int ret = 0;
- BUG_ON(ucb->ts_task);
+ ucb1400_ts_start(ucb);
- ucb->ts_task = kthread_run(ucb1400_ts_thread, ucb, "UCB1400_ts");
- if (IS_ERR(ucb->ts_task)) {
- ret = PTR_ERR(ucb->ts_task);
- ucb->ts_task = NULL;
- }
-
- return ret;
+ return 0;
}
static void ucb1400_ts_close(struct input_dev *idev)
{
struct ucb1400_ts *ucb = input_get_drvdata(idev);
- if (ucb->ts_task)
- kthread_stop(ucb->ts_task);
-
- ucb1400_ts_irq_disable(ucb->ac97);
- ucb1400_reg_write(ucb->ac97, UCB_TS_CR, 0);
+ ucb1400_ts_stop(ucb);
}
#ifndef NO_IRQ
@@ -299,7 +274,8 @@ static void ucb1400_ts_close(struct input_dev *idev)
* Try to probe our interrupt, rather than relying on lots of
* hard-coded machine dependencies.
*/
-static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
+static int __devinit ucb1400_ts_detect_irq(struct ucb1400_ts *ucb,
+ struct platform_device *pdev)
{
unsigned long mask, timeout;
@@ -321,7 +297,7 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
UCB_ADC_DAT_VALID)) {
cpu_relax();
if (time_after(jiffies, timeout)) {
- printk(KERN_ERR "ucb1400: timed out in IRQ probe\n");
+ dev_err(&pdev->dev, "timed out in IRQ probe\n");
probe_irq_off(mask);
return -ENODEV;
}
@@ -342,11 +318,11 @@ static int ucb1400_ts_detect_irq(struct ucb1400_ts *ucb)
return 0;
}
-static int ucb1400_ts_probe(struct platform_device *dev)
+static int __devinit ucb1400_ts_probe(struct platform_device *pdev)
{
+ struct ucb1400_ts *ucb = pdev->dev.platform_data;
int error, x_res, y_res;
u16 fcsr;
- struct ucb1400_ts *ucb = dev->dev.platform_data;
ucb->ts_idev = input_allocate_device();
if (!ucb->ts_idev) {
@@ -356,27 +332,19 @@ static int ucb1400_ts_probe(struct platform_device *dev)
/* Only in case the IRQ line wasn't supplied, try detecting it */
if (ucb->irq < 0) {
- error = ucb1400_ts_detect_irq(ucb);
+ error = ucb1400_ts_detect_irq(ucb, pdev);
if (error) {
- printk(KERN_ERR "UCB1400: IRQ probe failed\n");
+ dev_err(&pdev->dev, "IRQ probe failed\n");
goto err_free_devs;
}
}
+ dev_dbg(&pdev->dev, "found IRQ %d\n", ucb->irq);
init_waitqueue_head(&ucb->ts_wait);
- error = request_irq(ucb->irq, ucb1400_hard_irq, IRQF_TRIGGER_RISING,
- "UCB1400", ucb);
- if (error) {
- printk(KERN_ERR "ucb1400: unable to grab irq%d: %d\n",
- ucb->irq, error);
- goto err_free_devs;
- }
- printk(KERN_DEBUG "UCB1400: found IRQ %d\n", ucb->irq);
-
input_set_drvdata(ucb->ts_idev, ucb);
- ucb->ts_idev->dev.parent = &dev->dev;
+ ucb->ts_idev->dev.parent = &pdev->dev;
ucb->ts_idev->name = "UCB1400 touchscreen interface";
ucb->ts_idev->id.vendor = ucb1400_reg_read(ucb->ac97,
AC97_VENDOR_ID1);
@@ -398,12 +366,23 @@ static int ucb1400_ts_probe(struct platform_device *dev)
x_res = ucb1400_ts_read_xres(ucb);
y_res = ucb1400_ts_read_yres(ucb);
ucb1400_adc_disable(ucb->ac97);
- printk(KERN_DEBUG "UCB1400: x/y = %d/%d\n", x_res, y_res);
+ dev_dbg(&pdev->dev, "x/y = %d/%d\n", x_res, y_res);
input_set_abs_params(ucb->ts_idev, ABS_X, 0, x_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_Y, 0, y_res, 0, 0);
input_set_abs_params(ucb->ts_idev, ABS_PRESSURE, 0, 0, 0, 0);
+ ucb1400_ts_stop(ucb);
+
+ error = request_threaded_irq(ucb->irq, NULL, ucb1400_irq,
+ IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+ "UCB1400", ucb);
+ if (error) {
+ dev_err(&pdev->dev,
+ "unable to grab irq%d: %d\n", ucb->irq, error);
+ goto err_free_devs;
+ }
+
error = input_register_device(ucb->ts_idev);
if (error)
goto err_free_irq;
@@ -416,56 +395,61 @@ err_free_devs:
input_free_device(ucb->ts_idev);
err:
return error;
-
}
-static int ucb1400_ts_remove(struct platform_device *dev)
+static int __devexit ucb1400_ts_remove(struct platform_device *pdev)
{
- struct ucb1400_ts *ucb = dev->dev.platform_data;
+ struct ucb1400_ts *ucb = pdev->dev.platform_data;
free_irq(ucb->irq, ucb);
input_unregister_device(ucb->ts_idev);
+
return 0;
}
-#ifdef CONFIG_PM
-static int ucb1400_ts_resume(struct platform_device *dev)
+#ifdef CONFIG_PM_SLEEP
+static int ucb1400_ts_suspend(struct device *dev)
{
- struct ucb1400_ts *ucb = dev->dev.platform_data;
-
- if (ucb->ts_task) {
- /*
- * Restart the TS thread to ensure the
- * TS interrupt mode is set up again
- * after sleep.
- */
- ucb->ts_restart = 1;
- wake_up(&ucb->ts_wait);
- }
+ struct ucb1400_ts *ucb = dev->platform_data;
+ struct input_dev *idev = ucb->ts_idev;
+
+ mutex_lock(&idev->mutex);
+
+ if (idev->users)
+ ucb1400_ts_start(ucb);
+
+ mutex_unlock(&idev->mutex);
+ return 0;
+}
+
+static int ucb1400_ts_resume(struct device *dev)
+{
+ struct ucb1400_ts *ucb = dev->platform_data;
+ struct input_dev *idev = ucb->ts_idev;
+
+ mutex_lock(&idev->mutex);
+
+ if (idev->users)
+ ucb1400_ts_stop(ucb);
+
+ mutex_unlock(&idev->mutex);
return 0;
}
-#else
-#define ucb1400_ts_resume NULL
#endif
+static SIMPLE_DEV_PM_OPS(ucb1400_ts_pm_ops,
+ ucb1400_ts_suspend, ucb1400_ts_resume);
+
static struct platform_driver ucb1400_ts_driver = {
.probe = ucb1400_ts_probe,
- .remove = ucb1400_ts_remove,
- .resume = ucb1400_ts_resume,
+ .remove = __devexit_p(ucb1400_ts_remove),
.driver = {
.name = "ucb1400_ts",
+ .owner = THIS_MODULE,
+ .pm = &ucb1400_ts_pm_ops,
},
};
-
-static int __init ucb1400_ts_init(void)
-{
- return platform_driver_register(&ucb1400_ts_driver);
-}
-
-static void __exit ucb1400_ts_exit(void)
-{
- platform_driver_unregister(&ucb1400_ts_driver);
-}
+module_platform_driver(ucb1400_ts_driver);
module_param(adcsync, bool, 0444);
MODULE_PARM_DESC(adcsync, "Synchronize touch readings with ADCSYNC pin.");
@@ -479,8 +463,5 @@ MODULE_PARM_DESC(ts_delay_pressure,
"delay between panel setup and pressure read."
" Default = 0us.");
-module_init(ucb1400_ts_init);
-module_exit(ucb1400_ts_exit);
-
MODULE_DESCRIPTION("Philips UCB1400 touchscreen driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index 73fd664..3a5ebf4 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -16,6 +16,7 @@
* - JASTEC USB touch controller/DigiTech DTR-02U
* - Zytronic capacitive touchscreen
* - NEXIO/iNexio
+ * - Elo TouchSystems 2700 IntelliTouch
*
* Copyright (C) 2004-2007 by Daniel Ritz <daniel.ritz@gmx.ch>
* Copyright (C) by Todd E. Johnson (mtouchusb.c)
@@ -59,11 +60,11 @@
#define DRIVER_AUTHOR "Daniel Ritz <daniel.ritz@gmx.ch>"
#define DRIVER_DESC "USB Touchscreen Driver"
-static int swap_xy;
+static bool swap_xy;
module_param(swap_xy, bool, 0644);
MODULE_PARM_DESC(swap_xy, "If set X and Y axes are swapped.");
-static int hwcalib_xy;
+static bool hwcalib_xy;
module_param(hwcalib_xy, bool, 0644);
MODULE_PARM_DESC(hwcalib_xy, "If set hw-calibrated X/Y are used if available");
@@ -138,6 +139,7 @@ enum {
DEVTYPE_ZYTRONIC,
DEVTYPE_TC45USB,
DEVTYPE_NEXIO,
+ DEVTYPE_ELO,
};
#define USB_DEVICE_HID_CLASS(vend, prod) \
@@ -239,6 +241,10 @@ static const struct usb_device_id usbtouch_devices[] = {
.driver_info = DEVTYPE_NEXIO},
#endif
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ {USB_DEVICE(0x04e7, 0x0020), .driver_info = DEVTYPE_ELO},
+#endif
+
{}
};
@@ -945,6 +951,24 @@ static int nexio_read_data(struct usbtouch_usb *usbtouch, unsigned char *pkt)
/*****************************************************************************
+ * ELO part
+ */
+
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+
+static int elo_read_data(struct usbtouch_usb *dev, unsigned char *pkt)
+{
+ dev->x = (pkt[3] << 8) | pkt[2];
+ dev->y = (pkt[5] << 8) | pkt[4];
+ dev->touch = pkt[6] > 0;
+ dev->press = pkt[6];
+
+ return 1;
+}
+#endif
+
+
+/*****************************************************************************
* the different device descriptors
*/
#ifdef MULTI_PACKET
@@ -953,6 +977,18 @@ static void usbtouch_process_multi(struct usbtouch_usb *usbtouch,
#endif
static struct usbtouch_device_info usbtouch_dev_info[] = {
+#ifdef CONFIG_TOUCHSCREEN_USB_ELO
+ [DEVTYPE_ELO] = {
+ .min_xc = 0x0,
+ .max_xc = 0x0fff,
+ .min_yc = 0x0,
+ .max_yc = 0x0fff,
+ .max_press = 0xff,
+ .rept_size = 8,
+ .read_data = elo_read_data,
+ },
+#endif
+
#ifdef CONFIG_TOUCHSCREEN_USB_EGALAX
[DEVTYPE_EGALAX] = {
.min_xc = 0x0,
@@ -1580,18 +1616,7 @@ static struct usb_driver usbtouch_driver = {
.supports_autosuspend = 1,
};
-static int __init usbtouch_init(void)
-{
- return usb_register(&usbtouch_driver);
-}
-
-static void __exit usbtouch_cleanup(void)
-{
- usb_deregister(&usbtouch_driver);
-}
-
-module_init(usbtouch_init);
-module_exit(usbtouch_cleanup);
+module_usb_driver(usbtouch_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/input/touchscreen/w90p910_ts.c b/drivers/input/touchscreen/w90p910_ts.c
index 217aa51..9396b21 100644
--- a/drivers/input/touchscreen/w90p910_ts.c
+++ b/drivers/input/touchscreen/w90p910_ts.c
@@ -331,19 +331,7 @@ static struct platform_driver w90x900ts_driver = {
.owner = THIS_MODULE,
},
};
-
-static int __init w90x900ts_init(void)
-{
- return platform_driver_register(&w90x900ts_driver);
-}
-
-static void __exit w90x900ts_exit(void)
-{
- platform_driver_unregister(&w90x900ts_driver);
-}
-
-module_init(w90x900ts_init);
-module_exit(w90x900ts_exit);
+module_platform_driver(w90x900ts_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 touch screen driver!");
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 9175d49..4bc851a 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -401,18 +401,7 @@ static struct platform_driver wm831x_ts_driver = {
.probe = wm831x_ts_probe,
.remove = __devexit_p(wm831x_ts_remove),
};
-
-static int __init wm831x_ts_init(void)
-{
- return platform_driver_register(&wm831x_ts_driver);
-}
-module_init(wm831x_ts_init);
-
-static void __exit wm831x_ts_exit(void)
-{
- platform_driver_unregister(&wm831x_ts_driver);
-}
-module_exit(wm831x_ts_exit);
+module_platform_driver(wm831x_ts_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/input/touchscreen/zylonite-wm97xx.c b/drivers/input/touchscreen/zylonite-wm97xx.c
index f6328c0..bf0869a 100644
--- a/drivers/input/touchscreen/zylonite-wm97xx.c
+++ b/drivers/input/touchscreen/zylonite-wm97xx.c
@@ -22,6 +22,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
+#include <linux/gpio.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -192,8 +193,8 @@ static int zylonite_wm97xx_probe(struct platform_device *pdev)
else
gpio_touch_irq = mfp_to_gpio(MFP_PIN_GPIO26);
- wm->pen_irq = IRQ_GPIO(gpio_touch_irq);
- irq_set_irq_type(IRQ_GPIO(gpio_touch_irq), IRQ_TYPE_EDGE_BOTH);
+ wm->pen_irq = gpio_to_irq(gpio_touch_irq);
+ irq_set_irq_type(wm->pen_irq, IRQ_TYPE_EDGE_BOTH);
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
WM97XX_GPIO_POL_HIGH,
@@ -223,19 +224,7 @@ static struct platform_driver zylonite_wm97xx_driver = {
.name = "wm97xx-touch",
},
};
-
-static int __init zylonite_wm97xx_init(void)
-{
- return platform_driver_register(&zylonite_wm97xx_driver);
-}
-
-static void __exit zylonite_wm97xx_exit(void)
-{
- platform_driver_unregister(&zylonite_wm97xx_driver);
-}
-
-module_init(zylonite_wm97xx_init);
-module_exit(zylonite_wm97xx_exit);
+module_platform_driver(zylonite_wm97xx_driver);
/* Module information */
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 5414253b..6bea696 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -34,7 +34,9 @@ config AMD_IOMMU
bool "AMD IOMMU support"
select SWIOTLB
select PCI_MSI
- select PCI_IOV
+ select PCI_ATS
+ select PCI_PRI
+ select PCI_PASID
select IOMMU_API
depends on X86_64 && PCI && ACPI
---help---
@@ -58,6 +60,15 @@ config AMD_IOMMU_STATS
information to userspace via debugfs.
If unsure, say N.
+config AMD_IOMMU_V2
+ tristate "AMD IOMMU Version 2 driver (EXPERIMENTAL)"
+ depends on AMD_IOMMU && PROFILING && EXPERIMENTAL
+ select MMU_NOTIFIER
+ ---help---
+ This option enables support for the AMD IOMMUv2 features of the IOMMU
+ hardware. Select this option if you want to use devices that support
+ the the PCI PRI and PASID interface.
+
# Intel IOMMU support
config DMAR_TABLE
bool
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 2f44487..0e36b49 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_IOMMU_API) += iommu.o
obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
+obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IRQ_REMAP) += intr_remapping.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 4ee277a..f75e060 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -17,6 +17,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/ratelimit.h>
#include <linux/pci.h>
#include <linux/pci-ats.h>
#include <linux/bitmap.h>
@@ -28,6 +29,8 @@
#include <linux/iommu.h>
#include <linux/delay.h>
#include <linux/amd-iommu.h>
+#include <linux/notifier.h>
+#include <linux/export.h>
#include <asm/msidef.h>
#include <asm/proto.h>
#include <asm/iommu.h>
@@ -41,6 +44,24 @@
#define LOOP_TIMEOUT 100000
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define AMD_IOMMU_PGSIZES (~0xFFFUL)
+
static DEFINE_RWLOCK(amd_iommu_devtable_lock);
/* A list of preallocated protection domains */
@@ -59,6 +80,9 @@ static struct protection_domain *pt_domain;
static struct iommu_ops amd_iommu_ops;
+static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
+int amd_iommu_max_glx_val = -1;
+
/*
* general struct to manage commands send to an IOMMU
*/
@@ -67,6 +91,7 @@ struct iommu_cmd {
};
static void update_domain(struct protection_domain *domain);
+static int __init alloc_passthrough_domain(void);
/****************************************************************************
*
@@ -147,6 +172,33 @@ static struct iommu_dev_data *get_dev_data(struct device *dev)
return dev->archdata.iommu;
}
+static bool pci_iommuv2_capable(struct pci_dev *pdev)
+{
+ static const int caps[] = {
+ PCI_EXT_CAP_ID_ATS,
+ PCI_EXT_CAP_ID_PRI,
+ PCI_EXT_CAP_ID_PASID,
+ };
+ int i, pos;
+
+ for (i = 0; i < 3; ++i) {
+ pos = pci_find_ext_capability(pdev, caps[i]);
+ if (pos == 0)
+ return false;
+ }
+
+ return true;
+}
+
+static bool pdev_pri_erratum(struct pci_dev *pdev, u32 erratum)
+{
+ struct iommu_dev_data *dev_data;
+
+ dev_data = get_dev_data(&pdev->dev);
+
+ return dev_data->errata & (1 << erratum) ? true : false;
+}
+
/*
* In this function the list of preallocated protection domains is traversed to
* find the domain for a specific device
@@ -204,6 +256,7 @@ static bool check_device(struct device *dev)
static int iommu_init_device(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct iommu_dev_data *dev_data;
u16 alias;
@@ -228,6 +281,13 @@ static int iommu_init_device(struct device *dev)
dev_data->alias_data = alias_data;
}
+ if (pci_iommuv2_capable(pdev)) {
+ struct amd_iommu *iommu;
+
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+ dev_data->iommu_v2 = iommu->is_iommu_v2;
+ }
+
dev->archdata.iommu = dev_data;
return 0;
@@ -317,6 +377,11 @@ DECLARE_STATS_COUNTER(domain_flush_single);
DECLARE_STATS_COUNTER(domain_flush_all);
DECLARE_STATS_COUNTER(alloced_io_mem);
DECLARE_STATS_COUNTER(total_map_requests);
+DECLARE_STATS_COUNTER(complete_ppr);
+DECLARE_STATS_COUNTER(invalidate_iotlb);
+DECLARE_STATS_COUNTER(invalidate_iotlb_all);
+DECLARE_STATS_COUNTER(pri_requests);
+
static struct dentry *stats_dir;
static struct dentry *de_fflush;
@@ -351,6 +416,10 @@ static void amd_iommu_stats_init(void)
amd_iommu_stats_add(&domain_flush_all);
amd_iommu_stats_add(&alloced_io_mem);
amd_iommu_stats_add(&total_map_requests);
+ amd_iommu_stats_add(&complete_ppr);
+ amd_iommu_stats_add(&invalidate_iotlb);
+ amd_iommu_stats_add(&invalidate_iotlb_all);
+ amd_iommu_stats_add(&pri_requests);
}
#endif
@@ -365,8 +434,8 @@ static void dump_dte_entry(u16 devid)
{
int i;
- for (i = 0; i < 8; ++i)
- pr_err("AMD-Vi: DTE[%d]: %08x\n", i,
+ for (i = 0; i < 4; ++i)
+ pr_err("AMD-Vi: DTE[%d]: %016llx\n", i,
amd_iommu_dev_table[devid].data[i]);
}
@@ -461,12 +530,84 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+{
+ struct amd_iommu_fault fault;
+ volatile u64 *raw;
+ int i;
+
+ INC_STATS_COUNTER(pri_requests);
+
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is written to
+ * memory. If this happens we need to wait for the entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
+ pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
+ return;
+ }
+
+ fault.address = raw[1];
+ fault.pasid = PPR_PASID(raw[0]);
+ fault.device_id = PPR_DEVID(raw[0]);
+ fault.tag = PPR_TAG(raw[0]);
+ fault.flags = PPR_FLAGS(raw[0]);
+
+ /*
+ * To detect the hardware bug we need to clear the entry
+ * to back to zero.
+ */
+ raw[0] = raw[1] = 0;
+
+ atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
+}
+
+static void iommu_poll_ppr_log(struct amd_iommu *iommu)
+{
+ unsigned long flags;
+ u32 head, tail;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ while (head != tail) {
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, head);
+
+ /* Update and refresh ring-buffer state*/
+ head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
+ writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+ }
+
+ /* enable ppr interrupts again */
+ writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
+ spin_unlock_irqrestore(&iommu->lock, flags);
+}
+
irqreturn_t amd_iommu_int_thread(int irq, void *data)
{
struct amd_iommu *iommu;
- for_each_iommu(iommu)
+ for_each_iommu(iommu) {
iommu_poll_events(iommu);
+ iommu_poll_ppr_log(iommu);
+ }
return IRQ_HANDLED;
}
@@ -595,6 +736,60 @@ static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep,
cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
}
+static void build_inv_iommu_pasid(struct iommu_cmd *cmd, u16 domid, int pasid,
+ u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = pasid & PASID_MASK;
+ cmd->data[1] = domid;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[3] = upper_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
+}
+
+static void build_inv_iotlb_pasid(struct iommu_cmd *cmd, u16 devid, int pasid,
+ int qdep, u64 address, bool size)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ address &= ~(0xfffULL);
+
+ cmd->data[0] = devid;
+ cmd->data[0] |= (pasid & 0xff) << 16;
+ cmd->data[0] |= (qdep & 0xff) << 24;
+ cmd->data[1] = devid;
+ cmd->data[1] |= ((pasid >> 8) & 0xfff) << 16;
+ cmd->data[2] = lower_32_bits(address);
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_GN_MASK;
+ cmd->data[3] = upper_32_bits(address);
+ if (size)
+ cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
+ CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES);
+}
+
+static void build_complete_ppr(struct iommu_cmd *cmd, u16 devid, int pasid,
+ int status, int tag, bool gn)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->data[0] = devid;
+ if (gn) {
+ cmd->data[1] = pasid & PASID_MASK;
+ cmd->data[2] = CMD_INV_IOMMU_PAGES_GN_MASK;
+ }
+ cmd->data[3] = tag & 0x1ff;
+ cmd->data[3] |= (status & PPR_STATUS_MASK) << PPR_STATUS_SHIFT;
+
+ CMD_SET_TYPE(cmd, CMD_COMPLETE_PPR);
+}
+
static void build_inv_all(struct iommu_cmd *cmd)
{
memset(cmd, 0, sizeof(*cmd));
@@ -1496,6 +1691,48 @@ static void free_pagetable(struct protection_domain *domain)
domain->pt_root = NULL;
}
+static void free_gcr3_tbl_level1(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = __va(tbl[i] & PAGE_MASK);
+
+ free_page((unsigned long)ptr);
+ }
+}
+
+static void free_gcr3_tbl_level2(u64 *tbl)
+{
+ u64 *ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (!(tbl[i] & GCR3_VALID))
+ continue;
+
+ ptr = __va(tbl[i] & PAGE_MASK);
+
+ free_gcr3_tbl_level1(ptr);
+ }
+}
+
+static void free_gcr3_table(struct protection_domain *domain)
+{
+ if (domain->glx == 2)
+ free_gcr3_tbl_level2(domain->gcr3_tbl);
+ else if (domain->glx == 1)
+ free_gcr3_tbl_level1(domain->gcr3_tbl);
+ else if (domain->glx != 0)
+ BUG();
+
+ free_page((unsigned long)domain->gcr3_tbl);
+}
+
/*
* Free a domain, only used if something went wrong in the
* allocation path and we need to free an already allocated page table
@@ -1582,20 +1819,52 @@ static bool dma_ops_domain(struct protection_domain *domain)
static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats)
{
- u64 pte_root = virt_to_phys(domain->pt_root);
- u32 flags = 0;
+ u64 pte_root = 0;
+ u64 flags = 0;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ pte_root = virt_to_phys(domain->pt_root);
pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK)
<< DEV_ENTRY_MODE_SHIFT;
pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV;
+ flags = amd_iommu_dev_table[devid].data[1];
+
if (ats)
flags |= DTE_FLAG_IOTLB;
- amd_iommu_dev_table[devid].data[3] |= flags;
- amd_iommu_dev_table[devid].data[2] = domain->id;
- amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root);
- amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root);
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ u64 gcr3 = __pa(domain->gcr3_tbl);
+ u64 glx = domain->glx;
+ u64 tmp;
+
+ pte_root |= DTE_FLAG_GV;
+ pte_root |= (glx & DTE_GLX_MASK) << DTE_GLX_SHIFT;
+
+ /* First mask out possible old values for GCR3 table */
+ tmp = DTE_GCR3_VAL_B(~0ULL) << DTE_GCR3_SHIFT_B;
+ flags &= ~tmp;
+
+ tmp = DTE_GCR3_VAL_C(~0ULL) << DTE_GCR3_SHIFT_C;
+ flags &= ~tmp;
+
+ /* Encode GCR3 table into DTE */
+ tmp = DTE_GCR3_VAL_A(gcr3) << DTE_GCR3_SHIFT_A;
+ pte_root |= tmp;
+
+ tmp = DTE_GCR3_VAL_B(gcr3) << DTE_GCR3_SHIFT_B;
+ flags |= tmp;
+
+ tmp = DTE_GCR3_VAL_C(gcr3) << DTE_GCR3_SHIFT_C;
+ flags |= tmp;
+ }
+
+ flags &= ~(0xffffUL);
+ flags |= domain->id;
+
+ amd_iommu_dev_table[devid].data[1] = flags;
+ amd_iommu_dev_table[devid].data[0] = pte_root;
}
static void clear_dte_entry(u16 devid)
@@ -1603,7 +1872,6 @@ static void clear_dte_entry(u16 devid)
/* remove entry from the device table seen by the hardware */
amd_iommu_dev_table[devid].data[0] = IOMMU_PTE_P | IOMMU_PTE_TV;
amd_iommu_dev_table[devid].data[1] = 0;
- amd_iommu_dev_table[devid].data[2] = 0;
amd_iommu_apply_erratum_63(devid);
}
@@ -1696,6 +1964,93 @@ out_unlock:
return ret;
}
+
+static void pdev_iommuv2_disable(struct pci_dev *pdev)
+{
+ pci_disable_ats(pdev);
+ pci_disable_pri(pdev);
+ pci_disable_pasid(pdev);
+}
+
+/* FIXME: Change generic reset-function to do the same */
+static int pri_reset_while_enabled(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return -EINVAL;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control |= PCI_PRI_CTRL_RESET;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
+
+ return 0;
+}
+
+static int pdev_iommuv2_enable(struct pci_dev *pdev)
+{
+ bool reset_enable;
+ int reqs, ret;
+
+ /* FIXME: Hardcode number of outstanding requests for now */
+ reqs = 32;
+ if (pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_LIMIT_REQ_ONE))
+ reqs = 1;
+ reset_enable = pdev_pri_erratum(pdev, AMD_PRI_DEV_ERRATUM_ENABLE_RESET);
+
+ /* Only allow access to user-accessible pages */
+ ret = pci_enable_pasid(pdev, 0);
+ if (ret)
+ goto out_err;
+
+ /* First reset the PRI state of the device */
+ ret = pci_reset_pri(pdev);
+ if (ret)
+ goto out_err;
+
+ /* Enable PRI */
+ ret = pci_enable_pri(pdev, reqs);
+ if (ret)
+ goto out_err;
+
+ if (reset_enable) {
+ ret = pri_reset_while_enabled(pdev);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = pci_enable_ats(pdev, PAGE_SHIFT);
+ if (ret)
+ goto out_err;
+
+ return 0;
+
+out_err:
+ pci_disable_pri(pdev);
+ pci_disable_pasid(pdev);
+
+ return ret;
+}
+
+/* FIXME: Move this to PCI code */
+#define PCI_PRI_TLP_OFF (1 << 2)
+
+bool pci_pri_tlp_required(struct pci_dev *pdev)
+{
+ u16 control;
+ int pos;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (!pos)
+ return false;
+
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+
+ return (control & PCI_PRI_TLP_OFF) ? true : false;
+}
+
/*
* If a device is not yet associated with a domain, this function does
* assigns it visible for the hardware
@@ -1710,7 +2065,18 @@ static int attach_device(struct device *dev,
dev_data = get_dev_data(dev);
- if (amd_iommu_iotlb_sup && pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
+ if (domain->flags & PD_IOMMUV2_MASK) {
+ if (!dev_data->iommu_v2 || !dev_data->passthrough)
+ return -EINVAL;
+
+ if (pdev_iommuv2_enable(pdev) != 0)
+ return -EINVAL;
+
+ dev_data->ats.enabled = true;
+ dev_data->ats.qdep = pci_ats_queue_depth(pdev);
+ dev_data->pri_tlp = pci_pri_tlp_required(pdev);
+ } else if (amd_iommu_iotlb_sup &&
+ pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
}
@@ -1760,7 +2126,7 @@ static void __detach_device(struct iommu_dev_data *dev_data)
* passthrough domain if it is detached from any other domain.
* Make sure we can deassign from the pt_domain itself.
*/
- if (iommu_pass_through &&
+ if (dev_data->passthrough &&
(dev_data->domain == NULL && domain != pt_domain))
__attach_device(dev_data, pt_domain);
}
@@ -1770,20 +2136,24 @@ static void __detach_device(struct iommu_dev_data *dev_data)
*/
static void detach_device(struct device *dev)
{
+ struct protection_domain *domain;
struct iommu_dev_data *dev_data;
unsigned long flags;
dev_data = get_dev_data(dev);
+ domain = dev_data->domain;
/* lock device table */
write_lock_irqsave(&amd_iommu_devtable_lock, flags);
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
- if (dev_data->ats.enabled) {
+ if (domain->flags & PD_IOMMUV2_MASK)
+ pdev_iommuv2_disable(to_pci_dev(dev));
+ else if (dev_data->ats.enabled)
pci_disable_ats(to_pci_dev(dev));
- dev_data->ats.enabled = false;
- }
+
+ dev_data->ats.enabled = false;
}
/*
@@ -1818,18 +2188,20 @@ static struct protection_domain *domain_for_device(struct device *dev)
static int device_change_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct device *dev = data;
- u16 devid;
- struct protection_domain *domain;
struct dma_ops_domain *dma_domain;
+ struct protection_domain *domain;
+ struct iommu_dev_data *dev_data;
+ struct device *dev = data;
struct amd_iommu *iommu;
unsigned long flags;
+ u16 devid;
if (!check_device(dev))
return 0;
- devid = get_device_id(dev);
- iommu = amd_iommu_rlookup_table[devid];
+ devid = get_device_id(dev);
+ iommu = amd_iommu_rlookup_table[devid];
+ dev_data = get_dev_data(dev);
switch (action) {
case BUS_NOTIFY_UNBOUND_DRIVER:
@@ -1838,7 +2210,7 @@ static int device_change_notifier(struct notifier_block *nb,
if (!domain)
goto out;
- if (iommu_pass_through)
+ if (dev_data->passthrough)
break;
detach_device(dev);
break;
@@ -2434,8 +2806,9 @@ static int amd_iommu_dma_supported(struct device *dev, u64 mask)
*/
static void prealloc_protection_domains(void)
{
- struct pci_dev *dev = NULL;
+ struct iommu_dev_data *dev_data;
struct dma_ops_domain *dma_dom;
+ struct pci_dev *dev = NULL;
u16 devid;
for_each_pci_dev(dev) {
@@ -2444,6 +2817,16 @@ static void prealloc_protection_domains(void)
if (!check_device(&dev->dev))
continue;
+ dev_data = get_dev_data(&dev->dev);
+ if (!amd_iommu_force_isolation && dev_data->iommu_v2) {
+ /* Make sure passthrough domain is allocated */
+ alloc_passthrough_domain();
+ dev_data->passthrough = true;
+ attach_device(&dev->dev, pt_domain);
+ pr_info("AMD-Vi: Using passthough domain for device %s\n",
+ dev_name(&dev->dev));
+ }
+
/* Is there already any domain for it? */
if (domain_for_device(&dev->dev))
continue;
@@ -2474,16 +2857,25 @@ static struct dma_map_ops amd_iommu_dma_ops = {
static unsigned device_dma_ops_init(void)
{
+ struct iommu_dev_data *dev_data;
struct pci_dev *pdev = NULL;
unsigned unhandled = 0;
for_each_pci_dev(pdev) {
if (!check_device(&pdev->dev)) {
+
+ iommu_ignore_device(&pdev->dev);
+
unhandled += 1;
continue;
}
- pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ dev_data = get_dev_data(&pdev->dev);
+
+ if (!dev_data->passthrough)
+ pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
+ else
+ pdev->dev.archdata.dma_ops = &nommu_dma_ops;
}
return unhandled;
@@ -2610,6 +3002,20 @@ out_err:
return NULL;
}
+static int __init alloc_passthrough_domain(void)
+{
+ if (pt_domain != NULL)
+ return 0;
+
+ /* allocate passthrough domain */
+ pt_domain = protection_domain_alloc();
+ if (!pt_domain)
+ return -ENOMEM;
+
+ pt_domain->mode = PAGE_MODE_NONE;
+
+ return 0;
+}
static int amd_iommu_domain_init(struct iommu_domain *dom)
{
struct protection_domain *domain;
@@ -2623,6 +3029,8 @@ static int amd_iommu_domain_init(struct iommu_domain *dom)
if (!domain->pt_root)
goto out_free;
+ domain->iommu_domain = dom;
+
dom->priv = domain;
return 0;
@@ -2645,7 +3053,11 @@ static void amd_iommu_domain_destroy(struct iommu_domain *dom)
BUG_ON(domain->dev_cnt != 0);
- free_pagetable(domain);
+ if (domain->mode != PAGE_MODE_NONE)
+ free_pagetable(domain);
+
+ if (domain->flags & PD_IOMMUV2_MASK)
+ free_gcr3_table(domain);
protection_domain_free(domain);
@@ -2702,13 +3114,15 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
}
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int iommu_prot)
+ phys_addr_t paddr, size_t page_size, int iommu_prot)
{
- unsigned long page_size = 0x1000UL << gfp_order;
struct protection_domain *domain = dom->priv;
int prot = 0;
int ret;
+ if (domain->mode == PAGE_MODE_NONE)
+ return -EINVAL;
+
if (iommu_prot & IOMMU_READ)
prot |= IOMMU_PROT_IR;
if (iommu_prot & IOMMU_WRITE)
@@ -2721,13 +3135,14 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
return ret;
}
-static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
- int gfp_order)
+static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
+ size_t page_size)
{
struct protection_domain *domain = dom->priv;
- unsigned long page_size, unmap_size;
+ size_t unmap_size;
- page_size = 0x1000UL << gfp_order;
+ if (domain->mode == PAGE_MODE_NONE)
+ return -EINVAL;
mutex_lock(&domain->api_lock);
unmap_size = iommu_unmap_page(domain, iova, page_size);
@@ -2735,7 +3150,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
domain_flush_tlb_pde(domain);
- return get_order(unmap_size);
+ return unmap_size;
}
static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
@@ -2746,6 +3161,9 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
phys_addr_t paddr;
u64 *pte, __pte;
+ if (domain->mode == PAGE_MODE_NONE)
+ return iova;
+
pte = fetch_pte(domain, iova);
if (!pte || !IOMMU_PTE_PRESENT(*pte))
@@ -2773,6 +3191,26 @@ static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+static int amd_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ struct iommu_dev_data *dev_data = dev->archdata.iommu;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ u16 devid;
+
+ if (!dev_data)
+ return -ENODEV;
+
+ if (pdev->is_virtfn || !iommu_group_mf)
+ devid = dev_data->devid;
+ else
+ devid = calc_devid(pdev->bus->number,
+ PCI_DEVFN(PCI_SLOT(pdev->devfn), 0));
+
+ *groupid = amd_iommu_alias_table[devid];
+
+ return 0;
+}
+
static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy,
@@ -2782,6 +3220,8 @@ static struct iommu_ops amd_iommu_ops = {
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
+ .device_group = amd_iommu_device_group,
+ .pgsize_bitmap = AMD_IOMMU_PGSIZES,
};
/*****************************************************************************
@@ -2796,21 +3236,23 @@ static struct iommu_ops amd_iommu_ops = {
int __init amd_iommu_init_passthrough(void)
{
- struct amd_iommu *iommu;
+ struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
+ struct amd_iommu *iommu;
u16 devid;
+ int ret;
- /* allocate passthrough domain */
- pt_domain = protection_domain_alloc();
- if (!pt_domain)
- return -ENOMEM;
-
- pt_domain->mode |= PAGE_MODE_NONE;
+ ret = alloc_passthrough_domain();
+ if (ret)
+ return ret;
for_each_pci_dev(dev) {
if (!check_device(&dev->dev))
continue;
+ dev_data = get_dev_data(&dev->dev);
+ dev_data->passthrough = true;
+
devid = get_device_id(&dev->dev);
iommu = amd_iommu_rlookup_table[devid];
@@ -2820,7 +3262,375 @@ int __init amd_iommu_init_passthrough(void)
attach_device(&dev->dev, pt_domain);
}
+ amd_iommu_stats_init();
+
pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
return 0;
}
+
+/* IOMMUv2 specific functions */
+int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_register_ppr_notifier);
+
+int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&ppr_notifier, nb);
+}
+EXPORT_SYMBOL(amd_iommu_unregister_ppr_notifier);
+
+void amd_iommu_domain_direct_map(struct iommu_domain *dom)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ /* Update data structure */
+ domain->mode = PAGE_MODE_NONE;
+ domain->updated = true;
+
+ /* Make changes visible to IOMMUs */
+ update_domain(domain);
+
+ /* Page-table is not visible to IOMMU anymore, so free it */
+ free_pagetable(domain);
+
+ spin_unlock_irqrestore(&domain->lock, flags);
+}
+EXPORT_SYMBOL(amd_iommu_domain_direct_map);
+
+int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int levels, ret;
+
+ if (pasids <= 0 || pasids > (PASID_MASK + 1))
+ return -EINVAL;
+
+ /* Number of GCR3 table levels required */
+ for (levels = 0; (pasids - 1) & ~0x1ff; pasids >>= 9)
+ levels += 1;
+
+ if (levels > amd_iommu_max_glx_val)
+ return -EINVAL;
+
+ spin_lock_irqsave(&domain->lock, flags);
+
+ /*
+ * Save us all sanity checks whether devices already in the
+ * domain support IOMMUv2. Just force that the domain has no
+ * devices attached when it is switched into IOMMUv2 mode.
+ */
+ ret = -EBUSY;
+ if (domain->dev_cnt > 0 || domain->flags & PD_IOMMUV2_MASK)
+ goto out;
+
+ ret = -ENOMEM;
+ domain->gcr3_tbl = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (domain->gcr3_tbl == NULL)
+ goto out;
+
+ domain->glx = levels;
+ domain->flags |= PD_IOMMUV2_MASK;
+ domain->updated = true;
+
+ update_domain(domain);
+
+ ret = 0;
+
+out:
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_enable_v2);
+
+static int __flush_pasid(struct protection_domain *domain, int pasid,
+ u64 address, bool size)
+{
+ struct iommu_dev_data *dev_data;
+ struct iommu_cmd cmd;
+ int i, ret;
+
+ if (!(domain->flags & PD_IOMMUV2_MASK))
+ return -EINVAL;
+
+ build_inv_iommu_pasid(&cmd, domain->id, pasid, address, size);
+
+ /*
+ * IOMMU TLB needs to be flushed before Device TLB to
+ * prevent device TLB refill from IOMMU TLB
+ */
+ for (i = 0; i < amd_iommus_present; ++i) {
+ if (domain->dev_iommu[i] == 0)
+ continue;
+
+ ret = iommu_queue_command(amd_iommus[i], &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until IOMMU TLB flushes are complete */
+ domain_flush_complete(domain);
+
+ /* Now flush device TLBs */
+ list_for_each_entry(dev_data, &domain->dev_list, list) {
+ struct amd_iommu *iommu;
+ int qdep;
+
+ BUG_ON(!dev_data->ats.enabled);
+
+ qdep = dev_data->ats.qdep;
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ build_inv_iotlb_pasid(&cmd, dev_data->devid, pasid,
+ qdep, address, size);
+
+ ret = iommu_queue_command(iommu, &cmd);
+ if (ret != 0)
+ goto out;
+ }
+
+ /* Wait until all device TLBs are flushed */
+ domain_flush_complete(domain);
+
+ ret = 0;
+
+out:
+
+ return ret;
+}
+
+static int __amd_iommu_flush_page(struct protection_domain *domain, int pasid,
+ u64 address)
+{
+ INC_STATS_COUNTER(invalidate_iotlb);
+
+ return __flush_pasid(domain, pasid, address, false);
+}
+
+int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_page(domain, pasid, address);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_page);
+
+static int __amd_iommu_flush_tlb(struct protection_domain *domain, int pasid)
+{
+ INC_STATS_COUNTER(invalidate_iotlb_all);
+
+ return __flush_pasid(domain, pasid, CMD_INV_IOMMU_ALL_PAGES_ADDRESS,
+ true);
+}
+
+int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __amd_iommu_flush_tlb(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_flush_tlb);
+
+static u64 *__get_gcr3_pte(u64 *root, int level, int pasid, bool alloc)
+{
+ int index;
+ u64 *pte;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ pte = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (!(*pte & GCR3_VALID)) {
+ if (!alloc)
+ return NULL;
+
+ root = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (root == NULL)
+ return NULL;
+
+ *pte = __pa(root) | GCR3_VALID;
+ }
+
+ root = __va(*pte & PAGE_MASK);
+
+ level -= 1;
+ }
+
+ return pte;
+}
+
+static int __set_gcr3(struct protection_domain *domain, int pasid,
+ unsigned long cr3)
+{
+ u64 *pte;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, true);
+ if (pte == NULL)
+ return -ENOMEM;
+
+ *pte = (cr3 & PAGE_MASK) | GCR3_VALID;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+static int __clear_gcr3(struct protection_domain *domain, int pasid)
+{
+ u64 *pte;
+
+ if (domain->mode != PAGE_MODE_NONE)
+ return -EINVAL;
+
+ pte = __get_gcr3_pte(domain->gcr3_tbl, domain->glx, pasid, false);
+ if (pte == NULL)
+ return 0;
+
+ *pte = 0;
+
+ return __amd_iommu_flush_tlb(domain, pasid);
+}
+
+int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __set_gcr3(domain, pasid, cr3);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_set_gcr3);
+
+int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid)
+{
+ struct protection_domain *domain = dom->priv;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&domain->lock, flags);
+ ret = __clear_gcr3(domain, pasid);
+ spin_unlock_irqrestore(&domain->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_domain_clear_gcr3);
+
+int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag)
+{
+ struct iommu_dev_data *dev_data;
+ struct amd_iommu *iommu;
+ struct iommu_cmd cmd;
+
+ INC_STATS_COUNTER(complete_ppr);
+
+ dev_data = get_dev_data(&pdev->dev);
+ iommu = amd_iommu_rlookup_table[dev_data->devid];
+
+ build_complete_ppr(&cmd, dev_data->devid, pasid, status,
+ tag, dev_data->pri_tlp);
+
+ return iommu_queue_command(iommu, &cmd);
+}
+EXPORT_SYMBOL(amd_iommu_complete_ppr);
+
+struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev)
+{
+ struct protection_domain *domain;
+
+ domain = get_domain(&pdev->dev);
+ if (IS_ERR(domain))
+ return NULL;
+
+ /* Only return IOMMUv2 domains */
+ if (!(domain->flags & PD_IOMMUV2_MASK))
+ return NULL;
+
+ return domain->iommu_domain;
+}
+EXPORT_SYMBOL(amd_iommu_get_v2_domain);
+
+void amd_iommu_enable_device_erratum(struct pci_dev *pdev, u32 erratum)
+{
+ struct iommu_dev_data *dev_data;
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ dev_data = get_dev_data(&pdev->dev);
+ dev_data->errata |= (1 << erratum);
+}
+EXPORT_SYMBOL(amd_iommu_enable_device_erratum);
+
+int amd_iommu_device_info(struct pci_dev *pdev,
+ struct amd_iommu_device_info *info)
+{
+ int max_pasids;
+ int pos;
+
+ if (pdev == NULL || info == NULL)
+ return -EINVAL;
+
+ if (!amd_iommu_v2_supported())
+ return -EINVAL;
+
+ memset(info, 0, sizeof(*info));
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_ATS_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
+ if (pos)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRI_SUP;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
+ if (pos) {
+ int features;
+
+ max_pasids = 1 << (9 * (amd_iommu_max_glx_val + 1));
+ max_pasids = min(max_pasids, (1 << 20));
+
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PASID_SUP;
+ info->max_pasids = min(pci_max_pasids(pdev), max_pasids);
+
+ features = pci_pasid_features(pdev);
+ if (features & PCI_PASID_CAP_EXEC)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_EXEC_SUP;
+ if (features & PCI_PASID_CAP_PRIV)
+ info->flags |= AMD_IOMMU_DEVICE_FLAG_PRIV_SUP;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(amd_iommu_device_info);
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 82d2410..a35e98a 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -25,6 +25,7 @@
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/amd-iommu.h>
+#include <linux/export.h>
#include <asm/pci-direct.h>
#include <asm/iommu.h>
#include <asm/gart.h>
@@ -141,6 +142,12 @@ int amd_iommus_present;
bool amd_iommu_np_cache __read_mostly;
bool amd_iommu_iotlb_sup __read_mostly = true;
+u32 amd_iommu_max_pasids __read_mostly = ~0;
+
+bool amd_iommu_v2_present __read_mostly;
+
+bool amd_iommu_force_isolation __read_mostly;
+
/*
* The ACPI table parsing functions set this variable on an error
*/
@@ -268,7 +275,7 @@ static void iommu_set_exclusion_range(struct amd_iommu *iommu)
}
/* Programs the physical address of the device table into the IOMMU hardware */
-static void __init iommu_set_device_table(struct amd_iommu *iommu)
+static void iommu_set_device_table(struct amd_iommu *iommu)
{
u64 entry;
@@ -299,6 +306,16 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit)
writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
}
+static void iommu_set_inv_tlb_timeout(struct amd_iommu *iommu, int timeout)
+{
+ u32 ctrl;
+
+ ctrl = readl(iommu->mmio_base + MMIO_CONTROL_OFFSET);
+ ctrl &= ~CTRL_INV_TO_MASK;
+ ctrl |= (timeout << CONTROL_INV_TIMEOUT) & CTRL_INV_TO_MASK;
+ writel(ctrl, iommu->mmio_base + MMIO_CONTROL_OFFSET);
+}
+
/* Function to enable the hardware */
static void iommu_enable(struct amd_iommu *iommu)
{
@@ -581,21 +598,69 @@ static void __init free_event_buffer(struct amd_iommu *iommu)
free_pages((unsigned long)iommu->evt_buf, get_order(EVT_BUFFER_SIZE));
}
+/* allocates the memory where the IOMMU will log its events to */
+static u8 * __init alloc_ppr_log(struct amd_iommu *iommu)
+{
+ iommu->ppr_log = (u8 *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(PPR_LOG_SIZE));
+
+ if (iommu->ppr_log == NULL)
+ return NULL;
+
+ return iommu->ppr_log;
+}
+
+static void iommu_enable_ppr_log(struct amd_iommu *iommu)
+{
+ u64 entry;
+
+ if (iommu->ppr_log == NULL)
+ return;
+
+ entry = (u64)virt_to_phys(iommu->ppr_log) | PPR_LOG_SIZE_512;
+
+ memcpy_toio(iommu->mmio_base + MMIO_PPR_LOG_OFFSET,
+ &entry, sizeof(entry));
+
+ /* set head and tail to zero manually */
+ writel(0x00, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+ writel(0x00, iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
+
+ iommu_feature_enable(iommu, CONTROL_PPFLOG_EN);
+ iommu_feature_enable(iommu, CONTROL_PPR_EN);
+}
+
+static void __init free_ppr_log(struct amd_iommu *iommu)
+{
+ if (iommu->ppr_log == NULL)
+ return;
+
+ free_pages((unsigned long)iommu->ppr_log, get_order(PPR_LOG_SIZE));
+}
+
+static void iommu_enable_gt(struct amd_iommu *iommu)
+{
+ if (!iommu_feature(iommu, FEATURE_GT))
+ return;
+
+ iommu_feature_enable(iommu, CONTROL_GT_EN);
+}
+
/* sets a specific bit in the device table entry. */
static void set_dev_entry_bit(u16 devid, u8 bit)
{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
- amd_iommu_dev_table[devid].data[i] |= (1 << _bit);
+ amd_iommu_dev_table[devid].data[i] |= (1UL << _bit);
}
static int get_dev_entry_bit(u16 devid, u8 bit)
{
- int i = (bit >> 5) & 0x07;
- int _bit = bit & 0x1f;
+ int i = (bit >> 6) & 0x03;
+ int _bit = bit & 0x3f;
- return (amd_iommu_dev_table[devid].data[i] & (1 << _bit)) >> _bit;
+ return (amd_iommu_dev_table[devid].data[i] & (1UL << _bit)) >> _bit;
}
@@ -699,6 +764,32 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu)
iommu->features = ((u64)high << 32) | low;
+ if (iommu_feature(iommu, FEATURE_GT)) {
+ int glxval;
+ u32 pasids;
+ u64 shift;
+
+ shift = iommu->features & FEATURE_PASID_MASK;
+ shift >>= FEATURE_PASID_SHIFT;
+ pasids = (1 << shift);
+
+ amd_iommu_max_pasids = min(amd_iommu_max_pasids, pasids);
+
+ glxval = iommu->features & FEATURE_GLXVAL_MASK;
+ glxval >>= FEATURE_GLXVAL_SHIFT;
+
+ if (amd_iommu_max_glx_val == -1)
+ amd_iommu_max_glx_val = glxval;
+ else
+ amd_iommu_max_glx_val = min(amd_iommu_max_glx_val, glxval);
+ }
+
+ if (iommu_feature(iommu, FEATURE_GT) &&
+ iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->is_iommu_v2 = true;
+ amd_iommu_v2_present = true;
+ }
+
if (!is_rd890_iommu(iommu->dev))
return;
@@ -901,6 +992,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
{
free_command_buffer(iommu);
free_event_buffer(iommu);
+ free_ppr_log(iommu);
iommu_unmap_mmio_space(iommu);
}
@@ -964,6 +1056,12 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
init_iommu_from_acpi(iommu, h);
init_iommu_devices(iommu);
+ if (iommu_feature(iommu, FEATURE_PPR)) {
+ iommu->ppr_log = alloc_ppr_log(iommu);
+ if (!iommu->ppr_log)
+ return -ENOMEM;
+ }
+
if (iommu->cap & (1UL << IOMMU_CAP_NPCACHE))
amd_iommu_np_cache = true;
@@ -1050,6 +1148,9 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
iommu->int_enabled = true;
iommu_feature_enable(iommu, CONTROL_EVT_INT_EN);
+ if (iommu->ppr_log != NULL)
+ iommu_feature_enable(iommu, CONTROL_PPFINT_EN);
+
return 0;
}
@@ -1209,6 +1310,9 @@ static void iommu_init_flags(struct amd_iommu *iommu)
* make IOMMU memory accesses cache coherent
*/
iommu_feature_enable(iommu, CONTROL_COHERENT_EN);
+
+ /* Set IOTLB invalidation timeout to 1s */
+ iommu_set_inv_tlb_timeout(iommu, CTRL_INV_TO_1S);
}
static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
@@ -1274,6 +1378,8 @@ static void enable_iommus(void)
iommu_set_device_table(iommu);
iommu_enable_command_buffer(iommu);
iommu_enable_event_buffer(iommu);
+ iommu_enable_ppr_log(iommu);
+ iommu_enable_gt(iommu);
iommu_set_exclusion_range(iommu);
iommu_init_msi(iommu);
iommu_enable(iommu);
@@ -1303,13 +1409,6 @@ static void amd_iommu_resume(void)
/* re-load the hardware */
enable_iommus();
-
- /*
- * we have to flush after the IOMMUs are enabled because a
- * disabled IOMMU will never execute the commands we send
- */
- for_each_iommu(iommu)
- iommu_flush_all_caches(iommu);
}
static int amd_iommu_suspend(void)
@@ -1560,6 +1659,8 @@ static int __init parse_amd_iommu_options(char *str)
amd_iommu_unmap_flush = true;
if (strncmp(str, "off", 3) == 0)
amd_iommu_disabled = true;
+ if (strncmp(str, "force_isolation", 15) == 0)
+ amd_iommu_force_isolation = true;
}
return 1;
@@ -1572,3 +1673,9 @@ IOMMU_INIT_FINISH(amd_iommu_detect,
gart_iommu_hole_init,
0,
0);
+
+bool amd_iommu_v2_supported(void)
+{
+ return amd_iommu_v2_present;
+}
+EXPORT_SYMBOL(amd_iommu_v2_supported);
diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h
index 7ffaa64..1a7f41c 100644
--- a/drivers/iommu/amd_iommu_proto.h
+++ b/drivers/iommu/amd_iommu_proto.h
@@ -31,6 +31,30 @@ extern int amd_iommu_init_devices(void);
extern void amd_iommu_uninit_devices(void);
extern void amd_iommu_init_notifier(void);
extern void amd_iommu_init_api(void);
+
+/* IOMMUv2 specific functions */
+struct iommu_domain;
+
+extern bool amd_iommu_v2_supported(void);
+extern int amd_iommu_register_ppr_notifier(struct notifier_block *nb);
+extern int amd_iommu_unregister_ppr_notifier(struct notifier_block *nb);
+extern void amd_iommu_domain_direct_map(struct iommu_domain *dom);
+extern int amd_iommu_domain_enable_v2(struct iommu_domain *dom, int pasids);
+extern int amd_iommu_flush_page(struct iommu_domain *dom, int pasid,
+ u64 address);
+extern int amd_iommu_flush_tlb(struct iommu_domain *dom, int pasid);
+extern int amd_iommu_domain_set_gcr3(struct iommu_domain *dom, int pasid,
+ unsigned long cr3);
+extern int amd_iommu_domain_clear_gcr3(struct iommu_domain *dom, int pasid);
+extern struct iommu_domain *amd_iommu_get_v2_domain(struct pci_dev *pdev);
+
+#define PPR_SUCCESS 0x0
+#define PPR_INVALID 0x1
+#define PPR_FAILURE 0xf
+
+extern int amd_iommu_complete_ppr(struct pci_dev *pdev, int pasid,
+ int status, int tag);
+
#ifndef CONFIG_AMD_IOMMU_STATS
static inline void amd_iommu_stats_init(void) { }
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 5b9c507..2452f3b 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -69,11 +69,14 @@
#define MMIO_EXCL_BASE_OFFSET 0x0020
#define MMIO_EXCL_LIMIT_OFFSET 0x0028
#define MMIO_EXT_FEATURES 0x0030
+#define MMIO_PPR_LOG_OFFSET 0x0038
#define MMIO_CMD_HEAD_OFFSET 0x2000
#define MMIO_CMD_TAIL_OFFSET 0x2008
#define MMIO_EVT_HEAD_OFFSET 0x2010
#define MMIO_EVT_TAIL_OFFSET 0x2018
#define MMIO_STATUS_OFFSET 0x2020
+#define MMIO_PPR_HEAD_OFFSET 0x2030
+#define MMIO_PPR_TAIL_OFFSET 0x2038
/* Extended Feature Bits */
@@ -87,8 +90,17 @@
#define FEATURE_HE (1ULL<<8)
#define FEATURE_PC (1ULL<<9)
+#define FEATURE_PASID_SHIFT 32
+#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT)
+
+#define FEATURE_GLXVAL_SHIFT 14
+#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT)
+
+#define PASID_MASK 0x000fffff
+
/* MMIO status bits */
-#define MMIO_STATUS_COM_WAIT_INT_MASK 0x04
+#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2)
+#define MMIO_STATUS_PPR_INT_MASK (1 << 6)
/* event logging constants */
#define EVENT_ENTRY_SIZE 0x10
@@ -115,6 +127,7 @@
#define CONTROL_EVT_LOG_EN 0x02ULL
#define CONTROL_EVT_INT_EN 0x03ULL
#define CONTROL_COMWAIT_EN 0x04ULL
+#define CONTROL_INV_TIMEOUT 0x05ULL
#define CONTROL_PASSPW_EN 0x08ULL
#define CONTROL_RESPASSPW_EN 0x09ULL
#define CONTROL_COHERENT_EN 0x0aULL
@@ -122,18 +135,34 @@
#define CONTROL_CMDBUF_EN 0x0cULL
#define CONTROL_PPFLOG_EN 0x0dULL
#define CONTROL_PPFINT_EN 0x0eULL
+#define CONTROL_PPR_EN 0x0fULL
+#define CONTROL_GT_EN 0x10ULL
+
+#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT)
+#define CTRL_INV_TO_NONE 0
+#define CTRL_INV_TO_1MS 1
+#define CTRL_INV_TO_10MS 2
+#define CTRL_INV_TO_100MS 3
+#define CTRL_INV_TO_1S 4
+#define CTRL_INV_TO_10S 5
+#define CTRL_INV_TO_100S 6
/* command specific defines */
#define CMD_COMPL_WAIT 0x01
#define CMD_INV_DEV_ENTRY 0x02
#define CMD_INV_IOMMU_PAGES 0x03
#define CMD_INV_IOTLB_PAGES 0x04
+#define CMD_COMPLETE_PPR 0x07
#define CMD_INV_ALL 0x08
#define CMD_COMPL_WAIT_STORE_MASK 0x01
#define CMD_COMPL_WAIT_INT_MASK 0x02
#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01
#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02
+#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04
+
+#define PPR_STATUS_MASK 0xf
+#define PPR_STATUS_SHIFT 12
#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL
@@ -165,6 +194,23 @@
#define EVT_BUFFER_SIZE 8192 /* 512 entries */
#define EVT_LEN_MASK (0x9ULL << 56)
+/* Constants for PPR Log handling */
+#define PPR_LOG_ENTRIES 512
+#define PPR_LOG_SIZE_SHIFT 56
+#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT)
+#define PPR_ENTRY_SIZE 16
+#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES)
+
+#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL)
+#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL)
+#define PPR_DEVID(x) ((x) & 0xffffULL)
+#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL)
+#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL)
+#define PPR_PASID2(x) (((x) >> 42) & 0xfULL)
+#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x))
+
+#define PPR_REQ_FAULT 0x01
+
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02
@@ -230,7 +276,24 @@
#define IOMMU_PTE_IR (1ULL << 61)
#define IOMMU_PTE_IW (1ULL << 62)
-#define DTE_FLAG_IOTLB 0x01
+#define DTE_FLAG_IOTLB (0x01UL << 32)
+#define DTE_FLAG_GV (0x01ULL << 55)
+#define DTE_GLX_SHIFT (56)
+#define DTE_GLX_MASK (3)
+
+#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL)
+#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL)
+#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL)
+
+#define DTE_GCR3_INDEX_A 0
+#define DTE_GCR3_INDEX_B 1
+#define DTE_GCR3_INDEX_C 1
+
+#define DTE_GCR3_SHIFT_A 58
+#define DTE_GCR3_SHIFT_B 16
+#define DTE_GCR3_SHIFT_C 43
+
+#define GCR3_VALID 0x01ULL
#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL)
#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P)
@@ -257,6 +320,7 @@
domain for an IOMMU */
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */
+#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */
extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \
@@ -285,6 +349,29 @@ extern bool amd_iommu_iotlb_sup;
#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT)
#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL)
+
+/*
+ * This struct is used to pass information about
+ * incoming PPR faults around.
+ */
+struct amd_iommu_fault {
+ u64 address; /* IO virtual address of the fault*/
+ u32 pasid; /* Address space identifier */
+ u16 device_id; /* Originating PCI device id */
+ u16 tag; /* PPR tag */
+ u16 flags; /* Fault flags */
+
+};
+
+#define PPR_FAULT_EXEC (1 << 1)
+#define PPR_FAULT_READ (1 << 2)
+#define PPR_FAULT_WRITE (1 << 5)
+#define PPR_FAULT_USER (1 << 6)
+#define PPR_FAULT_RSVD (1 << 7)
+#define PPR_FAULT_GN (1 << 8)
+
+struct iommu_domain;
+
/*
* This structure contains generic data for IOMMU protection domains
* independent of their use.
@@ -297,11 +384,15 @@ struct protection_domain {
u16 id; /* the domain id written to the device table */
int mode; /* paging mode (0-6 levels) */
u64 *pt_root; /* page table root pointer */
+ int glx; /* Number of levels for GCR3 table */
+ u64 *gcr3_tbl; /* Guest CR3 table */
unsigned long flags; /* flags to find out type of domain */
bool updated; /* complete domain flush required */
unsigned dev_cnt; /* devices assigned to this domain */
unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */
void *priv; /* private data */
+ struct iommu_domain *iommu_domain; /* Pointer to generic
+ domain structure */
};
@@ -315,10 +406,15 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
atomic_t bind; /* Domain attach reverent count */
u16 devid; /* PCI Device ID */
+ bool iommu_v2; /* Device can make use of IOMMUv2 */
+ bool passthrough; /* Default for device is pt_domain */
struct {
bool enabled;
int qdep;
} ats; /* ATS state */
+ bool pri_tlp; /* PASID TLB required for
+ PPR completions */
+ u32 errata; /* Bitmap for errata to apply */
};
/*
@@ -399,6 +495,9 @@ struct amd_iommu {
/* Extended features */
u64 features;
+ /* IOMMUv2 */
+ bool is_iommu_v2;
+
/*
* Capability pointer. There could be more than one IOMMU per PCI
* device function if there are more than one AMD IOMMU capability
@@ -431,6 +530,9 @@ struct amd_iommu {
/* MSI number for event interrupt */
u16 evt_msi_num;
+ /* Base of the PPR log, if present */
+ u8 *ppr_log;
+
/* true if interrupts for this IOMMU are already enabled */
bool int_enabled;
@@ -484,7 +586,7 @@ extern struct list_head amd_iommu_pd_list;
* Structure defining one entry in the device table
*/
struct dev_table_entry {
- u32 data[8];
+ u64 data[4];
};
/*
@@ -549,6 +651,16 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
*/
extern bool amd_iommu_unmap_flush;
+/* Smallest number of PASIDs supported by any IOMMU in the system */
+extern u32 amd_iommu_max_pasids;
+
+extern bool amd_iommu_v2_present;
+
+extern bool amd_iommu_force_isolation;
+
+/* Max levels of glxval supported */
+extern int amd_iommu_max_glx_val;
+
/* takes bus and device/function and returns the device id
* FIXME: should that be in generic PCI code? */
static inline u16 calc_devid(u8 bus, u8 devfn)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
new file mode 100644
index 0000000..8add9f1
--- /dev/null
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) 2010-2012 Advanced Micro Devices, Inc.
+ * Author: Joerg Roedel <joerg.roedel@amd.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/mmu_notifier.h>
+#include <linux/amd-iommu.h>
+#include <linux/mm_types.h>
+#include <linux/profile.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/iommu.h>
+#include <linux/wait.h>
+#include <linux/pci.h>
+#include <linux/gfp.h>
+
+#include "amd_iommu_types.h"
+#include "amd_iommu_proto.h"
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Joerg Roedel <joerg.roedel@amd.com>");
+
+#define MAX_DEVICES 0x10000
+#define PRI_QUEUE_SIZE 512
+
+struct pri_queue {
+ atomic_t inflight;
+ bool finish;
+ int status;
+};
+
+struct pasid_state {
+ struct list_head list; /* For global state-list */
+ atomic_t count; /* Reference count */
+ struct task_struct *task; /* Task bound to this PASID */
+ struct mm_struct *mm; /* mm_struct for the faults */
+ struct mmu_notifier mn; /* mmu_otifier handle */
+ struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
+ struct device_state *device_state; /* Link to our device_state */
+ int pasid; /* PASID index */
+ spinlock_t lock; /* Protect pri_queues */
+ wait_queue_head_t wq; /* To wait for count == 0 */
+};
+
+struct device_state {
+ atomic_t count;
+ struct pci_dev *pdev;
+ struct pasid_state **states;
+ struct iommu_domain *domain;
+ int pasid_levels;
+ int max_pasids;
+ amd_iommu_invalid_ppr_cb inv_ppr_cb;
+ amd_iommu_invalidate_ctx inv_ctx_cb;
+ spinlock_t lock;
+ wait_queue_head_t wq;
+};
+
+struct fault {
+ struct work_struct work;
+ struct device_state *dev_state;
+ struct pasid_state *state;
+ struct mm_struct *mm;
+ u64 address;
+ u16 devid;
+ u16 pasid;
+ u16 tag;
+ u16 finish;
+ u16 flags;
+};
+
+struct device_state **state_table;
+static spinlock_t state_lock;
+
+/* List and lock for all pasid_states */
+static LIST_HEAD(pasid_state_list);
+static DEFINE_SPINLOCK(ps_lock);
+
+static struct workqueue_struct *iommu_wq;
+
+/*
+ * Empty page table - Used between
+ * mmu_notifier_invalidate_range_start and
+ * mmu_notifier_invalidate_range_end
+ */
+static u64 *empty_page_table;
+
+static void free_pasid_states(struct device_state *dev_state);
+static void unbind_pasid(struct device_state *dev_state, int pasid);
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data);
+
+static u16 device_id(struct pci_dev *pdev)
+{
+ u16 devid;
+
+ devid = pdev->bus->number;
+ devid = (devid << 8) | pdev->devfn;
+
+ return devid;
+}
+
+static struct device_state *get_device_state(u16 devid)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state_lock, flags);
+ dev_state = state_table[devid];
+ if (dev_state != NULL)
+ atomic_inc(&dev_state->count);
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return dev_state;
+}
+
+static void free_device_state(struct device_state *dev_state)
+{
+ /*
+ * First detach device from domain - No more PRI requests will arrive
+ * from that device after it is unbound from the IOMMUv2 domain.
+ */
+ iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
+
+ /* Everything is down now, free the IOMMUv2 domain */
+ iommu_domain_free(dev_state->domain);
+
+ /* Finally get rid of the device-state */
+ kfree(dev_state);
+}
+
+static void put_device_state(struct device_state *dev_state)
+{
+ if (atomic_dec_and_test(&dev_state->count))
+ wake_up(&dev_state->wq);
+}
+
+static void put_device_state_wait(struct device_state *dev_state)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&dev_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+ if (!atomic_dec_and_test(&dev_state->count))
+ schedule();
+ finish_wait(&dev_state->wq, &wait);
+
+ free_device_state(dev_state);
+}
+
+static struct notifier_block profile_nb = {
+ .notifier_call = task_exit,
+};
+
+static void link_pasid_state(struct pasid_state *pasid_state)
+{
+ spin_lock(&ps_lock);
+ list_add_tail(&pasid_state->list, &pasid_state_list);
+ spin_unlock(&ps_lock);
+}
+
+static void __unlink_pasid_state(struct pasid_state *pasid_state)
+{
+ list_del(&pasid_state->list);
+}
+
+static void unlink_pasid_state(struct pasid_state *pasid_state)
+{
+ spin_lock(&ps_lock);
+ __unlink_pasid_state(pasid_state);
+ spin_unlock(&ps_lock);
+}
+
+/* Must be called under dev_state->lock */
+static struct pasid_state **__get_pasid_state_ptr(struct device_state *dev_state,
+ int pasid, bool alloc)
+{
+ struct pasid_state **root, **ptr;
+ int level, index;
+
+ level = dev_state->pasid_levels;
+ root = dev_state->states;
+
+ while (true) {
+
+ index = (pasid >> (9 * level)) & 0x1ff;
+ ptr = &root[index];
+
+ if (level == 0)
+ break;
+
+ if (*ptr == NULL) {
+ if (!alloc)
+ return NULL;
+
+ *ptr = (void *)get_zeroed_page(GFP_ATOMIC);
+ if (*ptr == NULL)
+ return NULL;
+ }
+
+ root = (struct pasid_state **)*ptr;
+ level -= 1;
+ }
+
+ return ptr;
+}
+
+static int set_pasid_state(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ int pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ ret = -ENOMEM;
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = -ENOMEM;
+ if (*ptr != NULL)
+ goto out_unlock;
+
+ *ptr = pasid_state;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void clear_pasid_state(struct device_state *dev_state, int pasid)
+{
+ struct pasid_state **ptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, true);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ *ptr = NULL;
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+}
+
+static struct pasid_state *get_pasid_state(struct device_state *dev_state,
+ int pasid)
+{
+ struct pasid_state **ptr, *ret = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_state->lock, flags);
+ ptr = __get_pasid_state_ptr(dev_state, pasid, false);
+
+ if (ptr == NULL)
+ goto out_unlock;
+
+ ret = *ptr;
+ if (ret)
+ atomic_inc(&ret->count);
+
+out_unlock:
+ spin_unlock_irqrestore(&dev_state->lock, flags);
+
+ return ret;
+}
+
+static void free_pasid_state(struct pasid_state *pasid_state)
+{
+ kfree(pasid_state);
+}
+
+static void put_pasid_state(struct pasid_state *pasid_state)
+{
+ if (atomic_dec_and_test(&pasid_state->count)) {
+ put_device_state(pasid_state->device_state);
+ wake_up(&pasid_state->wq);
+ }
+}
+
+static void put_pasid_state_wait(struct pasid_state *pasid_state)
+{
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
+
+ if (atomic_dec_and_test(&pasid_state->count))
+ put_device_state(pasid_state->device_state);
+ else
+ schedule();
+
+ finish_wait(&pasid_state->wq, &wait);
+ mmput(pasid_state->mm);
+ free_pasid_state(pasid_state);
+}
+
+static void __unbind_pasid(struct pasid_state *pasid_state)
+{
+ struct iommu_domain *domain;
+
+ domain = pasid_state->device_state->domain;
+
+ amd_iommu_domain_clear_gcr3(domain, pasid_state->pasid);
+ clear_pasid_state(pasid_state->device_state, pasid_state->pasid);
+
+ /* Make sure no more pending faults are in the queue */
+ flush_workqueue(iommu_wq);
+
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+ put_pasid_state(pasid_state); /* Reference taken in bind() function */
+}
+
+static void unbind_pasid(struct device_state *dev_state, int pasid)
+{
+ struct pasid_state *pasid_state;
+
+ pasid_state = get_pasid_state(dev_state, pasid);
+ if (pasid_state == NULL)
+ return;
+
+ unlink_pasid_state(pasid_state);
+ __unbind_pasid(pasid_state);
+ put_pasid_state_wait(pasid_state); /* Reference taken in this function */
+}
+
+static void free_pasid_states_level1(struct pasid_state **tbl)
+{
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ free_page((unsigned long)tbl[i]);
+ }
+}
+
+static void free_pasid_states_level2(struct pasid_state **tbl)
+{
+ struct pasid_state **ptr;
+ int i;
+
+ for (i = 0; i < 512; ++i) {
+ if (tbl[i] == NULL)
+ continue;
+
+ ptr = (struct pasid_state **)tbl[i];
+ free_pasid_states_level1(ptr);
+ }
+}
+
+static void free_pasid_states(struct device_state *dev_state)
+{
+ struct pasid_state *pasid_state;
+ int i;
+
+ for (i = 0; i < dev_state->max_pasids; ++i) {
+ pasid_state = get_pasid_state(dev_state, i);
+ if (pasid_state == NULL)
+ continue;
+
+ put_pasid_state(pasid_state);
+ unbind_pasid(dev_state, i);
+ }
+
+ if (dev_state->pasid_levels == 2)
+ free_pasid_states_level2(dev_state->states);
+ else if (dev_state->pasid_levels == 1)
+ free_pasid_states_level1(dev_state->states);
+ else if (dev_state->pasid_levels != 0)
+ BUG();
+
+ free_page((unsigned long)dev_state->states);
+}
+
+static struct pasid_state *mn_to_state(struct mmu_notifier *mn)
+{
+ return container_of(mn, struct pasid_state, mn);
+}
+
+static void __mn_flush_page(struct mmu_notifier *mn,
+ unsigned long address)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_flush_page(dev_state->domain, pasid_state->pasid, address);
+}
+
+static int mn_clear_flush_young(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ __mn_flush_page(mn, address);
+
+ return 0;
+}
+
+static void mn_change_pte(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address,
+ pte_t pte)
+{
+ __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_page(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long address)
+{
+ __mn_flush_page(mn, address);
+}
+
+static void mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+ __pa(empty_page_table));
+}
+
+static void mn_invalidate_range_end(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+
+ pasid_state = mn_to_state(mn);
+ dev_state = pasid_state->device_state;
+
+ amd_iommu_domain_set_gcr3(dev_state->domain, pasid_state->pasid,
+ __pa(pasid_state->mm->pgd));
+}
+
+static struct mmu_notifier_ops iommu_mn = {
+ .clear_flush_young = mn_clear_flush_young,
+ .change_pte = mn_change_pte,
+ .invalidate_page = mn_invalidate_page,
+ .invalidate_range_start = mn_invalidate_range_start,
+ .invalidate_range_end = mn_invalidate_range_end,
+};
+
+static void set_pri_tag_status(struct pasid_state *pasid_state,
+ u16 tag, int status)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ pasid_state->pri[tag].status = status;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void finish_pri_tag(struct device_state *dev_state,
+ struct pasid_state *pasid_state,
+ u16 tag)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ if (atomic_dec_and_test(&pasid_state->pri[tag].inflight) &&
+ pasid_state->pri[tag].finish) {
+ amd_iommu_complete_ppr(dev_state->pdev, pasid_state->pasid,
+ pasid_state->pri[tag].status, tag);
+ pasid_state->pri[tag].finish = false;
+ pasid_state->pri[tag].status = PPR_SUCCESS;
+ }
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+}
+
+static void do_fault(struct work_struct *work)
+{
+ struct fault *fault = container_of(work, struct fault, work);
+ int npages, write;
+ struct page *page;
+
+ write = !!(fault->flags & PPR_FAULT_WRITE);
+
+ npages = get_user_pages(fault->state->task, fault->state->mm,
+ fault->address, 1, write, 0, &page, NULL);
+
+ if (npages == 1) {
+ put_page(page);
+ } else if (fault->dev_state->inv_ppr_cb) {
+ int status;
+
+ status = fault->dev_state->inv_ppr_cb(fault->dev_state->pdev,
+ fault->pasid,
+ fault->address,
+ fault->flags);
+ switch (status) {
+ case AMD_IOMMU_INV_PRI_RSP_SUCCESS:
+ set_pri_tag_status(fault->state, fault->tag, PPR_SUCCESS);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_INVALID:
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ break;
+ case AMD_IOMMU_INV_PRI_RSP_FAIL:
+ set_pri_tag_status(fault->state, fault->tag, PPR_FAILURE);
+ break;
+ default:
+ BUG();
+ }
+ } else {
+ set_pri_tag_status(fault->state, fault->tag, PPR_INVALID);
+ }
+
+ finish_pri_tag(fault->dev_state, fault->state, fault->tag);
+
+ put_pasid_state(fault->state);
+
+ kfree(fault);
+}
+
+static int ppr_notifier(struct notifier_block *nb, unsigned long e, void *data)
+{
+ struct amd_iommu_fault *iommu_fault;
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ unsigned long flags;
+ struct fault *fault;
+ bool finish;
+ u16 tag;
+ int ret;
+
+ iommu_fault = data;
+ tag = iommu_fault->tag & 0x1ff;
+ finish = (iommu_fault->tag >> 9) & 1;
+
+ ret = NOTIFY_DONE;
+ dev_state = get_device_state(iommu_fault->device_id);
+ if (dev_state == NULL)
+ goto out;
+
+ pasid_state = get_pasid_state(dev_state, iommu_fault->pasid);
+ if (pasid_state == NULL) {
+ /* We know the device but not the PASID -> send INVALID */
+ amd_iommu_complete_ppr(dev_state->pdev, iommu_fault->pasid,
+ PPR_INVALID, tag);
+ goto out_drop_state;
+ }
+
+ spin_lock_irqsave(&pasid_state->lock, flags);
+ atomic_inc(&pasid_state->pri[tag].inflight);
+ if (finish)
+ pasid_state->pri[tag].finish = true;
+ spin_unlock_irqrestore(&pasid_state->lock, flags);
+
+ fault = kzalloc(sizeof(*fault), GFP_ATOMIC);
+ if (fault == NULL) {
+ /* We are OOM - send success and let the device re-fault */
+ finish_pri_tag(dev_state, pasid_state, tag);
+ goto out_drop_state;
+ }
+
+ fault->dev_state = dev_state;
+ fault->address = iommu_fault->address;
+ fault->state = pasid_state;
+ fault->tag = tag;
+ fault->finish = finish;
+ fault->flags = iommu_fault->flags;
+ INIT_WORK(&fault->work, do_fault);
+
+ queue_work(iommu_wq, &fault->work);
+
+ ret = NOTIFY_OK;
+
+out_drop_state:
+ put_device_state(dev_state);
+
+out:
+ return ret;
+}
+
+static struct notifier_block ppr_nb = {
+ .notifier_call = ppr_notifier,
+};
+
+static int task_exit(struct notifier_block *nb, unsigned long e, void *data)
+{
+ struct pasid_state *pasid_state;
+ struct task_struct *task;
+
+ task = data;
+
+ /*
+ * Using this notifier is a hack - but there is no other choice
+ * at the moment. What I really want is a sleeping notifier that
+ * is called when an MM goes down. But such a notifier doesn't
+ * exist yet. The notifier needs to sleep because it has to make
+ * sure that the device does not use the PASID and the address
+ * space anymore before it is destroyed. This includes waiting
+ * for pending PRI requests to pass the workqueue. The
+ * MMU-Notifiers would be a good fit, but they use RCU and so
+ * they are not allowed to sleep. Lets see how we can solve this
+ * in a more intelligent way in the future.
+ */
+again:
+ spin_lock(&ps_lock);
+ list_for_each_entry(pasid_state, &pasid_state_list, list) {
+ struct device_state *dev_state;
+ int pasid;
+
+ if (pasid_state->task != task)
+ continue;
+
+ /* Drop Lock and unbind */
+ spin_unlock(&ps_lock);
+
+ dev_state = pasid_state->device_state;
+ pasid = pasid_state->pasid;
+
+ if (pasid_state->device_state->inv_ctx_cb)
+ dev_state->inv_ctx_cb(dev_state->pdev, pasid);
+
+ unbind_pasid(dev_state, pasid);
+
+ /* Task may be in the list multiple times */
+ goto again;
+ }
+ spin_unlock(&ps_lock);
+
+ return NOTIFY_OK;
+}
+
+int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+ struct task_struct *task)
+{
+ struct pasid_state *pasid_state;
+ struct device_state *dev_state;
+ u16 devid;
+ int ret;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+ dev_state = get_device_state(devid);
+
+ if (dev_state == NULL)
+ return -EINVAL;
+
+ ret = -EINVAL;
+ if (pasid < 0 || pasid >= dev_state->max_pasids)
+ goto out;
+
+ ret = -ENOMEM;
+ pasid_state = kzalloc(sizeof(*pasid_state), GFP_KERNEL);
+ if (pasid_state == NULL)
+ goto out;
+
+ atomic_set(&pasid_state->count, 1);
+ init_waitqueue_head(&pasid_state->wq);
+ pasid_state->task = task;
+ pasid_state->mm = get_task_mm(task);
+ pasid_state->device_state = dev_state;
+ pasid_state->pasid = pasid;
+ pasid_state->mn.ops = &iommu_mn;
+
+ if (pasid_state->mm == NULL)
+ goto out_free;
+
+ mmu_notifier_register(&pasid_state->mn, pasid_state->mm);
+
+ ret = set_pasid_state(dev_state, pasid_state, pasid);
+ if (ret)
+ goto out_unregister;
+
+ ret = amd_iommu_domain_set_gcr3(dev_state->domain, pasid,
+ __pa(pasid_state->mm->pgd));
+ if (ret)
+ goto out_clear_state;
+
+ link_pasid_state(pasid_state);
+
+ return 0;
+
+out_clear_state:
+ clear_pasid_state(dev_state, pasid);
+
+out_unregister:
+ mmu_notifier_unregister(&pasid_state->mn, pasid_state->mm);
+
+out_free:
+ free_pasid_state(pasid_state);
+
+out:
+ put_device_state(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_bind_pasid);
+
+void amd_iommu_unbind_pasid(struct pci_dev *pdev, int pasid)
+{
+ struct device_state *dev_state;
+ u16 devid;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ devid = device_id(pdev);
+ dev_state = get_device_state(devid);
+ if (dev_state == NULL)
+ return;
+
+ if (pasid < 0 || pasid >= dev_state->max_pasids)
+ goto out;
+
+ unbind_pasid(dev_state, pasid);
+
+out:
+ put_device_state(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_unbind_pasid);
+
+int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ int ret, tmp;
+ u16 devid;
+
+ might_sleep();
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ if (pasids <= 0 || pasids > (PASID_MASK + 1))
+ return -EINVAL;
+
+ devid = device_id(pdev);
+
+ dev_state = kzalloc(sizeof(*dev_state), GFP_KERNEL);
+ if (dev_state == NULL)
+ return -ENOMEM;
+
+ spin_lock_init(&dev_state->lock);
+ init_waitqueue_head(&dev_state->wq);
+ dev_state->pdev = pdev;
+
+ tmp = pasids;
+ for (dev_state->pasid_levels = 0; (tmp - 1) & ~0x1ff; tmp >>= 9)
+ dev_state->pasid_levels += 1;
+
+ atomic_set(&dev_state->count, 1);
+ dev_state->max_pasids = pasids;
+
+ ret = -ENOMEM;
+ dev_state->states = (void *)get_zeroed_page(GFP_KERNEL);
+ if (dev_state->states == NULL)
+ goto out_free_dev_state;
+
+ dev_state->domain = iommu_domain_alloc(&pci_bus_type);
+ if (dev_state->domain == NULL)
+ goto out_free_states;
+
+ amd_iommu_domain_direct_map(dev_state->domain);
+
+ ret = amd_iommu_domain_enable_v2(dev_state->domain, pasids);
+ if (ret)
+ goto out_free_domain;
+
+ ret = iommu_attach_device(dev_state->domain, &pdev->dev);
+ if (ret != 0)
+ goto out_free_domain;
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ if (state_table[devid] != NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ ret = -EBUSY;
+ goto out_free_domain;
+ }
+
+ state_table[devid] = dev_state;
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return 0;
+
+out_free_domain:
+ iommu_domain_free(dev_state->domain);
+
+out_free_states:
+ free_page((unsigned long)dev_state->states);
+
+out_free_dev_state:
+ kfree(dev_state);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_init_device);
+
+void amd_iommu_free_device(struct pci_dev *pdev)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+
+ if (!amd_iommu_v2_supported())
+ return;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ dev_state = state_table[devid];
+ if (dev_state == NULL) {
+ spin_unlock_irqrestore(&state_lock, flags);
+ return;
+ }
+
+ state_table[devid] = NULL;
+
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ /* Get rid of any remaining pasid states */
+ free_pasid_states(dev_state);
+
+ put_device_state_wait(dev_state);
+}
+EXPORT_SYMBOL(amd_iommu_free_device);
+
+int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev,
+ amd_iommu_invalid_ppr_cb cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = state_table[devid];
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ppr_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalid_ppr_cb);
+
+int amd_iommu_set_invalidate_ctx_cb(struct pci_dev *pdev,
+ amd_iommu_invalidate_ctx cb)
+{
+ struct device_state *dev_state;
+ unsigned long flags;
+ u16 devid;
+ int ret;
+
+ if (!amd_iommu_v2_supported())
+ return -ENODEV;
+
+ devid = device_id(pdev);
+
+ spin_lock_irqsave(&state_lock, flags);
+
+ ret = -EINVAL;
+ dev_state = state_table[devid];
+ if (dev_state == NULL)
+ goto out_unlock;
+
+ dev_state->inv_ctx_cb = cb;
+
+ ret = 0;
+
+out_unlock:
+ spin_unlock_irqrestore(&state_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(amd_iommu_set_invalidate_ctx_cb);
+
+static int __init amd_iommu_v2_init(void)
+{
+ size_t state_table_size;
+ int ret;
+
+ pr_info("AMD IOMMUv2 driver by Joerg Roedel <joerg.roedel@amd.com>");
+
+ spin_lock_init(&state_lock);
+
+ state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+ state_table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
+ get_order(state_table_size));
+ if (state_table == NULL)
+ return -ENOMEM;
+
+ ret = -ENOMEM;
+ iommu_wq = create_workqueue("amd_iommu_v2");
+ if (iommu_wq == NULL)
+ goto out_free;
+
+ ret = -ENOMEM;
+ empty_page_table = (u64 *)get_zeroed_page(GFP_KERNEL);
+ if (empty_page_table == NULL)
+ goto out_destroy_wq;
+
+ amd_iommu_register_ppr_notifier(&ppr_nb);
+ profile_event_register(PROFILE_TASK_EXIT, &profile_nb);
+
+ return 0;
+
+out_destroy_wq:
+ destroy_workqueue(iommu_wq);
+
+out_free:
+ free_pages((unsigned long)state_table, get_order(state_table_size));
+
+ return ret;
+}
+
+static void __exit amd_iommu_v2_exit(void)
+{
+ struct device_state *dev_state;
+ size_t state_table_size;
+ int i;
+
+ profile_event_unregister(PROFILE_TASK_EXIT, &profile_nb);
+ amd_iommu_unregister_ppr_notifier(&ppr_nb);
+
+ flush_workqueue(iommu_wq);
+
+ /*
+ * The loop below might call flush_workqueue(), so call
+ * destroy_workqueue() after it
+ */
+ for (i = 0; i < MAX_DEVICES; ++i) {
+ dev_state = get_device_state(i);
+
+ if (dev_state == NULL)
+ continue;
+
+ WARN_ON_ONCE(1);
+
+ put_device_state(dev_state);
+ amd_iommu_free_device(dev_state->pdev);
+ }
+
+ destroy_workqueue(iommu_wq);
+
+ state_table_size = MAX_DEVICES * sizeof(struct device_state *);
+ free_pages((unsigned long)state_table, get_order(state_table_size));
+
+ free_page((unsigned long)empty_page_table);
+}
+
+module_init(amd_iommu_v2_init);
+module_exit(amd_iommu_v2_exit);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bdc447f..c9c6053 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -41,6 +41,7 @@
#include <linux/tboot.h>
#include <linux/dmi.h>
#include <linux/pci-ats.h>
+#include <linux/memblock.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -78,6 +79,24 @@
#define LEVEL_STRIDE (9)
#define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1)
+/*
+ * This bitmap is used to advertise the page sizes our hardware support
+ * to the IOMMU core, which will then use this information to split
+ * physically contiguous memory regions it is mapping into page sizes
+ * that we support.
+ *
+ * Traditionally the IOMMU core just handed us the mappings directly,
+ * after making sure the size is an order of a 4KiB page and that the
+ * mapping has natural alignment.
+ *
+ * To retain this behavior, we currently advertise that we support
+ * all page sizes that are an order of 4KiB.
+ *
+ * If at some point we'd like to utilize the IOMMU core's new behavior,
+ * we could change this to advertise the real page sizes we support.
+ */
+#define INTEL_IOMMU_PGSIZES (~0xFFFUL)
+
static inline int agaw_to_level(int agaw)
{
return agaw + 2;
@@ -2188,18 +2207,6 @@ static inline void iommu_prepare_isa(void)
static int md_domain_init(struct dmar_domain *domain, int guest_width);
-static int __init si_domain_work_fn(unsigned long start_pfn,
- unsigned long end_pfn, void *datax)
-{
- int *ret = datax;
-
- *ret = iommu_domain_identity_map(si_domain,
- (uint64_t)start_pfn << PAGE_SHIFT,
- (uint64_t)end_pfn << PAGE_SHIFT);
- return *ret;
-
-}
-
static int __init si_domain_init(int hw)
{
struct dmar_drhd_unit *drhd;
@@ -2231,9 +2238,15 @@ static int __init si_domain_init(int hw)
return 0;
for_each_online_node(nid) {
- work_with_active_regions(nid, si_domain_work_fn, &ret);
- if (ret)
- return ret;
+ unsigned long start_pfn, end_pfn;
+ int i;
+
+ for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+ ret = iommu_domain_identity_map(si_domain,
+ PFN_PHYS(start_pfn), PFN_PHYS(end_pfn));
+ if (ret)
+ return ret;
+ }
}
return 0;
@@ -3984,12 +3997,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
static int intel_iommu_map(struct iommu_domain *domain,
unsigned long iova, phys_addr_t hpa,
- int gfp_order, int iommu_prot)
+ size_t size, int iommu_prot)
{
struct dmar_domain *dmar_domain = domain->priv;
u64 max_addr;
int prot = 0;
- size_t size;
int ret;
if (iommu_prot & IOMMU_READ)
@@ -3999,7 +4011,6 @@ static int intel_iommu_map(struct iommu_domain *domain,
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
- size = PAGE_SIZE << gfp_order;
max_addr = iova + size;
if (dmar_domain->max_addr < max_addr) {
u64 end;
@@ -4022,11 +4033,10 @@ static int intel_iommu_map(struct iommu_domain *domain,
return ret;
}
-static int intel_iommu_unmap(struct iommu_domain *domain,
- unsigned long iova, int gfp_order)
+static size_t intel_iommu_unmap(struct iommu_domain *domain,
+ unsigned long iova, size_t size)
{
struct dmar_domain *dmar_domain = domain->priv;
- size_t size = PAGE_SIZE << gfp_order;
int order;
order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
@@ -4035,7 +4045,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova;
- return order;
+ return PAGE_SIZE << order;
}
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -4065,6 +4075,54 @@ static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
return 0;
}
+/*
+ * Group numbers are arbitrary. Device with the same group number
+ * indicate the iommu cannot differentiate between them. To avoid
+ * tracking used groups we just use the seg|bus|devfn of the lowest
+ * level we're able to differentiate devices
+ */
+static int intel_iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct pci_dev *bridge;
+ union {
+ struct {
+ u8 devfn;
+ u8 bus;
+ u16 segment;
+ } pci;
+ u32 group;
+ } id;
+
+ if (iommu_no_mapping(dev))
+ return -ENODEV;
+
+ id.pci.segment = pci_domain_nr(pdev->bus);
+ id.pci.bus = pdev->bus->number;
+ id.pci.devfn = pdev->devfn;
+
+ if (!device_to_iommu(id.pci.segment, id.pci.bus, id.pci.devfn))
+ return -ENODEV;
+
+ bridge = pci_find_upstream_pcie_bridge(pdev);
+ if (bridge) {
+ if (pci_is_pcie(bridge)) {
+ id.pci.bus = bridge->subordinate->number;
+ id.pci.devfn = 0;
+ } else {
+ id.pci.bus = bridge->bus->number;
+ id.pci.devfn = bridge->devfn;
+ }
+ }
+
+ if (!pdev->is_virtfn && iommu_group_mf)
+ id.pci.devfn = PCI_DEVFN(PCI_SLOT(id.pci.devfn), 0);
+
+ *groupid = id.group;
+
+ return 0;
+}
+
static struct iommu_ops intel_iommu_ops = {
.domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy,
@@ -4074,6 +4132,8 @@ static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap,
.iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
+ .device_group = intel_iommu_device_group,
+ .pgsize_bitmap = INTEL_IOMMU_PGSIZES,
};
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 5b5fa5c..2198b2d 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -16,6 +16,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/bug.h>
@@ -25,8 +27,59 @@
#include <linux/errno.h>
#include <linux/iommu.h>
+static ssize_t show_iommu_group(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid))
+ return 0;
+
+ return sprintf(buf, "%u", groupid);
+}
+static DEVICE_ATTR(iommu_group, S_IRUGO, show_iommu_group, NULL);
+
+static int add_iommu_group(struct device *dev, void *data)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid) == 0)
+ return device_create_file(dev, &dev_attr_iommu_group);
+
+ return 0;
+}
+
+static int remove_iommu_group(struct device *dev)
+{
+ unsigned int groupid;
+
+ if (iommu_device_group(dev, &groupid) == 0)
+ device_remove_file(dev, &dev_attr_iommu_group);
+
+ return 0;
+}
+
+static int iommu_device_notifier(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ return add_iommu_group(dev, NULL);
+ else if (action == BUS_NOTIFY_DEL_DEVICE)
+ return remove_iommu_group(dev);
+
+ return 0;
+}
+
+static struct notifier_block iommu_device_nb = {
+ .notifier_call = iommu_device_notifier,
+};
+
static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
{
+ bus_register_notifier(bus, &iommu_device_nb);
+ bus_for_each_dev(bus, NULL, NULL, add_iommu_group);
}
/**
@@ -157,32 +210,134 @@ int iommu_domain_has_cap(struct iommu_domain *domain,
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
int iommu_map(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, int gfp_order, int prot)
+ phys_addr_t paddr, size_t size, int prot)
{
- size_t size;
+ unsigned long orig_iova = iova;
+ unsigned int min_pagesz;
+ size_t orig_size = size;
+ int ret = 0;
if (unlikely(domain->ops->map == NULL))
return -ENODEV;
- size = PAGE_SIZE << gfp_order;
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ /*
+ * both the virtual address and the physical one, as well as
+ * the size of the mapping, must be aligned (at least) to the
+ * size of the smallest page supported by the hardware
+ */
+ if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
+ pr_err("unaligned: iova 0x%lx pa 0x%lx size 0x%lx min_pagesz "
+ "0x%x\n", iova, (unsigned long)paddr,
+ (unsigned long)size, min_pagesz);
+ return -EINVAL;
+ }
+
+ pr_debug("map: iova 0x%lx pa 0x%lx size 0x%lx\n", iova,
+ (unsigned long)paddr, (unsigned long)size);
+
+ while (size) {
+ unsigned long pgsize, addr_merge = iova | paddr;
+ unsigned int pgsize_idx;
+
+ /* Max page size that still fits into 'size' */
+ pgsize_idx = __fls(size);
+
+ /* need to consider alignment requirements ? */
+ if (likely(addr_merge)) {
+ /* Max page size allowed by both iova and paddr */
+ unsigned int align_pgsize_idx = __ffs(addr_merge);
+
+ pgsize_idx = min(pgsize_idx, align_pgsize_idx);
+ }
+
+ /* build a mask of acceptable page sizes */
+ pgsize = (1UL << (pgsize_idx + 1)) - 1;
- BUG_ON(!IS_ALIGNED(iova | paddr, size));
+ /* throw away page sizes not supported by the hardware */
+ pgsize &= domain->ops->pgsize_bitmap;
- return domain->ops->map(domain, iova, paddr, gfp_order, prot);
+ /* make sure we're still sane */
+ BUG_ON(!pgsize);
+
+ /* pick the biggest page */
+ pgsize_idx = __fls(pgsize);
+ pgsize = 1UL << pgsize_idx;
+
+ pr_debug("mapping: iova 0x%lx pa 0x%lx pgsize %lu\n", iova,
+ (unsigned long)paddr, pgsize);
+
+ ret = domain->ops->map(domain, iova, paddr, pgsize, prot);
+ if (ret)
+ break;
+
+ iova += pgsize;
+ paddr += pgsize;
+ size -= pgsize;
+ }
+
+ /* unroll mapping in case something went wrong */
+ if (ret)
+ iommu_unmap(domain, orig_iova, orig_size - size);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_map);
-int iommu_unmap(struct iommu_domain *domain, unsigned long iova, int gfp_order)
+size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
- size_t size;
+ size_t unmapped_page, unmapped = 0;
+ unsigned int min_pagesz;
if (unlikely(domain->ops->unmap == NULL))
return -ENODEV;
- size = PAGE_SIZE << gfp_order;
+ /* find out the minimum page size supported */
+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+
+ /*
+ * The virtual address, as well as the size of the mapping, must be
+ * aligned (at least) to the size of the smallest page supported
+ * by the hardware
+ */
+ if (!IS_ALIGNED(iova | size, min_pagesz)) {
+ pr_err("unaligned: iova 0x%lx size 0x%lx min_pagesz 0x%x\n",
+ iova, (unsigned long)size, min_pagesz);
+ return -EINVAL;
+ }
+
+ pr_debug("unmap this: iova 0x%lx size 0x%lx\n", iova,
+ (unsigned long)size);
+
+ /*
+ * Keep iterating until we either unmap 'size' bytes (or more)
+ * or we hit an area that isn't mapped.
+ */
+ while (unmapped < size) {
+ size_t left = size - unmapped;
+
+ unmapped_page = domain->ops->unmap(domain, iova, left);
+ if (!unmapped_page)
+ break;
+
+ pr_debug("unmapped: iova 0x%lx size %lx\n", iova,
+ (unsigned long)unmapped_page);
+
+ iova += unmapped_page;
+ unmapped += unmapped_page;
+ }
+
+ return unmapped;
+}
+EXPORT_SYMBOL_GPL(iommu_unmap);
- BUG_ON(!IS_ALIGNED(iova, size));
+int iommu_device_group(struct device *dev, unsigned int *groupid)
+{
+ if (iommu_present(dev->bus) && dev->bus->iommu_ops->device_group)
+ return dev->bus->iommu_ops->device_group(dev, groupid);
- return domain->ops->unmap(domain, iova, gfp_order);
+ return -ENODEV;
}
-EXPORT_SYMBOL_GPL(iommu_unmap);
+EXPORT_SYMBOL_GPL(iommu_device_group);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 5865dd2..cee307e 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -42,6 +42,9 @@ __asm__ __volatile__ ( \
#define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0)
#define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1)
+/* bitmap of the page sizes currently supported */
+#define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
static int msm_iommu_tex_class[4];
DEFINE_SPINLOCK(msm_iommu_lock);
@@ -352,7 +355,7 @@ fail:
}
static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t len, int prot)
{
struct msm_priv *priv;
unsigned long flags;
@@ -363,7 +366,6 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_pte;
unsigned long sl_offset;
unsigned int pgprot;
- size_t len = 0x1000UL << order;
int ret = 0, tex, sh;
spin_lock_irqsave(&msm_iommu_lock, flags);
@@ -463,8 +465,8 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
- int order)
+static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
+ size_t len)
{
struct msm_priv *priv;
unsigned long flags;
@@ -474,30 +476,25 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
unsigned long *sl_table;
unsigned long *sl_pte;
unsigned long sl_offset;
- size_t len = 0x1000UL << order;
int i, ret = 0;
spin_lock_irqsave(&msm_iommu_lock, flags);
priv = domain->priv;
- if (!priv) {
- ret = -ENODEV;
+ if (!priv)
goto fail;
- }
fl_table = priv->pgtable;
if (len != SZ_16M && len != SZ_1M &&
len != SZ_64K && len != SZ_4K) {
pr_debug("Bad length: %d\n", len);
- ret = -EINVAL;
goto fail;
}
if (!fl_table) {
pr_debug("Null page table\n");
- ret = -EINVAL;
goto fail;
}
@@ -506,7 +503,6 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
if (*fl_pte == 0) {
pr_debug("First level PTE is 0\n");
- ret = -ENODEV;
goto fail;
}
@@ -544,15 +540,12 @@ static int msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
ret = __flush_iotlb(domain);
- /*
- * the IOMMU API requires us to return the order of the unmapped
- * page (on success).
- */
- if (!ret)
- ret = order;
fail:
spin_unlock_irqrestore(&msm_iommu_lock, flags);
- return ret;
+
+ /* the IOMMU API requires us to return how many bytes were unmapped */
+ len = ret ? 0 : len;
+ return len;
}
static phys_addr_t msm_iommu_iova_to_phys(struct iommu_domain *domain,
@@ -684,7 +677,8 @@ static struct iommu_ops msm_iommu_ops = {
.map = msm_iommu_map,
.unmap = msm_iommu_unmap,
.iova_to_phys = msm_iommu_iova_to_phys,
- .domain_has_cap = msm_iommu_domain_has_cap
+ .domain_has_cap = msm_iommu_domain_has_cap,
+ .pgsize_bitmap = MSM_IOMMU_PGSIZES,
};
static int __init get_tex_class(int icp, int ocp, int mt, int nos)
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 288da5c..103dbd9 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -44,7 +44,8 @@ static ssize_t debug_read_ver(struct file *file, char __user *userbuf,
static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct omap_iommu *obj = file->private_data;
+ struct device *dev = file->private_data;
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf;
ssize_t bytes;
@@ -67,7 +68,8 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct omap_iommu *obj = file->private_data;
+ struct device *dev = file->private_data;
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf;
ssize_t bytes, rest;
@@ -97,7 +99,8 @@ static ssize_t debug_write_pagetable(struct file *file,
struct iotlb_entry e;
struct cr_regs cr;
int err;
- struct omap_iommu *obj = file->private_data;
+ struct device *dev = file->private_data;
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
char buf[MAXCOLUMN], *p = buf;
count = min(count, sizeof(buf));
@@ -184,7 +187,8 @@ out:
static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct omap_iommu *obj = file->private_data;
+ struct device *dev = file->private_data;
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf;
size_t bytes;
@@ -212,7 +216,8 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct omap_iommu *obj = file->private_data;
+ struct device *dev = file->private_data;
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf;
struct iovm_struct *tmp;
int uninitialized_var(i);
@@ -254,7 +259,7 @@ static ssize_t debug_read_mmap(struct file *file, char __user *userbuf,
static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct omap_iommu *obj = file->private_data;
+ struct device *dev = file->private_data;
char *p, *buf;
struct iovm_struct *area;
ssize_t bytes;
@@ -268,8 +273,8 @@ static ssize_t debug_read_mem(struct file *file, char __user *userbuf,
mutex_lock(&iommu_debug_lock);
- area = omap_find_iovm_area(obj, (u32)ppos);
- if (IS_ERR(area)) {
+ area = omap_find_iovm_area(dev, (u32)ppos);
+ if (!area) {
bytes = -EINVAL;
goto err_out;
}
@@ -287,7 +292,7 @@ err_out:
static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
- struct omap_iommu *obj = file->private_data;
+ struct device *dev = file->private_data;
struct iovm_struct *area;
char *p, *buf;
@@ -305,8 +310,8 @@ static ssize_t debug_write_mem(struct file *file, const char __user *userbuf,
goto err_out;
}
- area = omap_find_iovm_area(obj, (u32)ppos);
- if (IS_ERR(area)) {
+ area = omap_find_iovm_area(dev, (u32)ppos);
+ if (!area) {
count = -EINVAL;
goto err_out;
}
@@ -350,7 +355,7 @@ DEBUG_FOPS(mem);
{ \
struct dentry *dent; \
dent = debugfs_create_file(#attr, mode, parent, \
- obj, &debug_##attr##_fops); \
+ dev, &debug_##attr##_fops); \
if (!dent) \
return -ENOMEM; \
}
@@ -362,20 +367,29 @@ static int iommu_debug_register(struct device *dev, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_iommu *obj = platform_get_drvdata(pdev);
+ struct omap_iommu_arch_data *arch_data;
struct dentry *d, *parent;
if (!obj || !obj->dev)
return -EINVAL;
+ arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
+ if (!arch_data)
+ return -ENOMEM;
+
+ arch_data->iommu_dev = obj;
+
+ dev->archdata.iommu = arch_data;
+
d = debugfs_create_dir(obj->name, iommu_debug_root);
if (!d)
- return -ENOMEM;
+ goto nomem;
parent = d;
d = debugfs_create_u8("nr_tlb_entries", 400, parent,
(u8 *)&obj->nr_tlb_entries);
if (!d)
- return -ENOMEM;
+ goto nomem;
DEBUG_ADD_FILE_RO(ver);
DEBUG_ADD_FILE_RO(regs);
@@ -385,6 +399,22 @@ static int iommu_debug_register(struct device *dev, void *data)
DEBUG_ADD_FILE(mem);
return 0;
+
+nomem:
+ kfree(arch_data);
+ return -ENOMEM;
+}
+
+static int iommu_debug_unregister(struct device *dev, void *data)
+{
+ if (!dev->archdata.iommu)
+ return 0;
+
+ kfree(dev->archdata.iommu);
+
+ dev->archdata.iommu = NULL;
+
+ return 0;
}
static int __init iommu_debug_init(void)
@@ -411,6 +441,7 @@ module_init(iommu_debug_init)
static void __exit iommu_debugfs_exit(void)
{
debugfs_remove_recursive(iommu_debug_root);
+ omap_foreach_iommu_device(NULL, iommu_debug_unregister);
}
module_exit(iommu_debugfs_exit)
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 8f32b2b..6899dcd 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -33,6 +33,9 @@
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
__i++)
+/* bitmap of the page sizes currently supported */
+#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
+
/**
* struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table
@@ -86,20 +89,24 @@ EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
/**
* omap_iommu_save_ctx - Save registers for pm off-mode support
- * @obj: target iommu
+ * @dev: client device
**/
-void omap_iommu_save_ctx(struct omap_iommu *obj)
+void omap_iommu_save_ctx(struct device *dev)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
arch_iommu->save_ctx(obj);
}
EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
/**
* omap_iommu_restore_ctx - Restore registers for pm off-mode support
- * @obj: target iommu
+ * @dev: client device
**/
-void omap_iommu_restore_ctx(struct omap_iommu *obj)
+void omap_iommu_restore_ctx(struct device *dev)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
+
arch_iommu->restore_ctx(obj);
}
EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
@@ -820,35 +827,23 @@ static int device_match_by_alias(struct device *dev, void *data)
}
/**
- * omap_find_iommu_device() - find an omap iommu device by name
- * @name: name of the iommu device
- *
- * The generic iommu API requires the caller to provide the device
- * he wishes to attach to a certain iommu domain.
- *
- * Drivers generally should not bother with this as it should just
- * be taken care of by the DMA-API using dev_archdata.
- *
- * This function is provided as an interim solution until the latter
- * materializes, and omap3isp is fully migrated to the DMA-API.
- */
-struct device *omap_find_iommu_device(const char *name)
-{
- return driver_find_device(&omap_iommu_driver.driver, NULL,
- (void *)name,
- device_match_by_alias);
-}
-EXPORT_SYMBOL_GPL(omap_find_iommu_device);
-
-/**
* omap_iommu_attach() - attach iommu device to an iommu domain
- * @dev: target omap iommu device
+ * @name: name of target omap iommu device
* @iopgd: page table
**/
-static struct omap_iommu *omap_iommu_attach(struct device *dev, u32 *iopgd)
+static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
{
int err = -ENOMEM;
- struct omap_iommu *obj = to_iommu(dev);
+ struct device *dev;
+ struct omap_iommu *obj;
+
+ dev = driver_find_device(&omap_iommu_driver.driver, NULL,
+ (void *)name,
+ device_match_by_alias);
+ if (!dev)
+ return NULL;
+
+ obj = to_iommu(dev);
spin_lock(&obj->iommu_lock);
@@ -1019,12 +1014,11 @@ static void iopte_cachep_ctor(void *iopte)
}
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
- phys_addr_t pa, int order, int prot)
+ phys_addr_t pa, size_t bytes, int prot)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t bytes = PAGE_SIZE << order;
struct iotlb_entry e;
int omap_pgsz;
u32 ret, flags;
@@ -1049,19 +1043,16 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
return ret;
}
-static int omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
- int order)
+static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
+ size_t size)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu = omap_domain->iommu_dev;
struct device *dev = oiommu->dev;
- size_t unmap_size;
-
- dev_dbg(dev, "unmapping da 0x%lx order %d\n", da, order);
- unmap_size = iopgtable_clear_entry(oiommu, da);
+ dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
- return unmap_size ? get_order(unmap_size) : -EINVAL;
+ return iopgtable_clear_entry(oiommu, da);
}
static int
@@ -1069,6 +1060,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
struct omap_iommu *oiommu;
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
int ret = 0;
spin_lock(&omap_domain->lock);
@@ -1081,14 +1073,14 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
/* get a handle to and enable the omap iommu */
- oiommu = omap_iommu_attach(dev, omap_domain->pgtable);
+ oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
if (IS_ERR(oiommu)) {
ret = PTR_ERR(oiommu);
dev_err(dev, "can't get omap iommu: %d\n", ret);
goto out;
}
- omap_domain->iommu_dev = oiommu;
+ omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
oiommu->domain = domain;
out:
@@ -1100,7 +1092,8 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev)
{
struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu *oiommu = to_iommu(dev);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
+ struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
spin_lock(&omap_domain->lock);
@@ -1114,7 +1107,7 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
- omap_domain->iommu_dev = NULL;
+ omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
out:
spin_unlock(&omap_domain->lock);
@@ -1183,14 +1176,14 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
else if (iopte_is_large(*pte))
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else
- dev_err(dev, "bogus pte 0x%x", *pte);
+ dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
} else {
if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
else if (iopgd_is_super(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else
- dev_err(dev, "bogus pgd 0x%x", *pgd);
+ dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
}
return ret;
@@ -1211,6 +1204,7 @@ static struct iommu_ops omap_iommu_ops = {
.unmap = omap_iommu_unmap,
.iova_to_phys = omap_iommu_iova_to_phys,
.domain_has_cap = omap_iommu_domain_has_cap,
+ .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
};
static int __init omap_iommu_init(void)
@@ -1229,7 +1223,8 @@ static int __init omap_iommu_init(void)
return platform_driver_register(&omap_iommu_driver);
}
-module_init(omap_iommu_init);
+/* must be ready before omap3isp is probed */
+subsys_initcall(omap_iommu_init);
static void __exit omap_iommu_exit(void)
{
diff --git a/drivers/iommu/omap-iovmm.c b/drivers/iommu/omap-iovmm.c
index 46be456..2e10c3e 100644
--- a/drivers/iommu/omap-iovmm.c
+++ b/drivers/iommu/omap-iovmm.c
@@ -231,12 +231,14 @@ static struct iovm_struct *__find_iovm_area(struct omap_iommu *obj,
/**
* omap_find_iovm_area - find iovma which includes @da
+ * @dev: client device
* @da: iommu device virtual address
*
* Find the existing iovma starting at @da
*/
-struct iovm_struct *omap_find_iovm_area(struct omap_iommu *obj, u32 da)
+struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct iovm_struct *area;
mutex_lock(&obj->mmap_lock);
@@ -343,14 +345,15 @@ static void free_iovm_area(struct omap_iommu *obj, struct iovm_struct *area)
/**
* omap_da_to_va - convert (d) to (v)
- * @obj: objective iommu
+ * @dev: client device
* @da: iommu device virtual address
* @va: mpu virtual address
*
* Returns mpu virtual addr which corresponds to a given device virtual addr
*/
-void *omap_da_to_va(struct omap_iommu *obj, u32 da)
+void *omap_da_to_va(struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va = NULL;
struct iovm_struct *area;
@@ -410,7 +413,6 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
unsigned int i, j;
struct scatterlist *sg;
u32 da = new->da_start;
- int order;
if (!domain || !sgt)
return -EINVAL;
@@ -429,12 +431,10 @@ static int map_iovm_area(struct iommu_domain *domain, struct iovm_struct *new,
if (bytes_to_iopgsz(bytes) < 0)
goto err_out;
- order = get_order(bytes);
-
pr_debug("%s: [%d] %08x %08x(%x)\n", __func__,
i, da, pa, bytes);
- err = iommu_map(domain, da, pa, order, flags);
+ err = iommu_map(domain, da, pa, bytes, flags);
if (err)
goto err_out;
@@ -449,10 +449,9 @@ err_out:
size_t bytes;
bytes = sg->length + sg->offset;
- order = get_order(bytes);
/* ignore failures.. we're already handling one */
- iommu_unmap(domain, da, order);
+ iommu_unmap(domain, da, bytes);
da += bytes;
}
@@ -467,7 +466,8 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
size_t total = area->da_end - area->da_start;
const struct sg_table *sgt = area->sgt;
struct scatterlist *sg;
- int i, err;
+ int i;
+ size_t unmapped;
BUG_ON(!sgtable_ok(sgt));
BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE));
@@ -475,13 +475,11 @@ static void unmap_iovm_area(struct iommu_domain *domain, struct omap_iommu *obj,
start = area->da_start;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
size_t bytes;
- int order;
bytes = sg->length + sg->offset;
- order = get_order(bytes);
- err = iommu_unmap(domain, start, order);
- if (err < 0)
+ unmapped = iommu_unmap(domain, start, bytes);
+ if (unmapped < bytes)
break;
dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n",
@@ -582,16 +580,18 @@ __iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj,
/**
* omap_iommu_vmap - (d)-(p)-(v) address mapper
- * @obj: objective iommu
+ * @domain: iommu domain
+ * @dev: client device
* @sgt: address of scatter gather table
* @flags: iovma and page property
*
* Creates 1-n-1 mapping with given @sgt and returns @da.
* All @sgt element must be io page size aligned.
*/
-u32 omap_iommu_vmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+u32 omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da,
const struct sg_table *sgt, u32 flags)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
size_t bytes;
void *va = NULL;
@@ -622,15 +622,17 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmap);
/**
* omap_iommu_vunmap - release virtual mapping obtained by 'omap_iommu_vmap()'
- * @obj: objective iommu
+ * @domain: iommu domain
+ * @dev: client device
* @da: iommu device virtual address
*
* Free the iommu virtually contiguous memory area starting at
* @da, which was returned by 'omap_iommu_vmap()'.
*/
struct sg_table *
-omap_iommu_vunmap(struct iommu_domain *domain, struct omap_iommu *obj, u32 da)
+omap_iommu_vunmap(struct iommu_domain *domain, struct device *dev, u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
/*
* 'sgt' is allocated before 'omap_iommu_vmalloc()' is called.
@@ -647,7 +649,7 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
/**
* omap_iommu_vmalloc - (d)-(p)-(v) address allocator and mapper
- * @obj: objective iommu
+ * @dev: client device
* @da: contiguous iommu virtual memory
* @bytes: allocation size
* @flags: iovma and page property
@@ -656,9 +658,10 @@ EXPORT_SYMBOL_GPL(omap_iommu_vunmap);
* @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set.
*/
u32
-omap_iommu_vmalloc(struct iommu_domain *domain, struct omap_iommu *obj, u32 da,
+omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, u32 da,
size_t bytes, u32 flags)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
void *va;
struct sg_table *sgt;
@@ -698,15 +701,16 @@ EXPORT_SYMBOL_GPL(omap_iommu_vmalloc);
/**
* omap_iommu_vfree - release memory allocated by 'omap_iommu_vmalloc()'
- * @obj: objective iommu
+ * @dev: client device
* @da: iommu device virtual address
*
* Frees the iommu virtually continuous memory area starting at
* @da, as obtained from 'omap_iommu_vmalloc()'.
*/
-void omap_iommu_vfree(struct iommu_domain *domain, struct omap_iommu *obj,
+void omap_iommu_vfree(struct iommu_domain *domain, struct device *dev,
const u32 da)
{
+ struct omap_iommu *obj = dev_to_omap_iommu(dev);
struct sg_table *sgt;
sgt = unmap_vm_area(domain, obj, da, vfree,
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 04231cb..1793ba1 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -624,8 +624,6 @@ int gigaset_isdn_regdev(struct cardstate *cs, const char *isdnid)
{
isdn_if *iif;
- pr_info("ISDN4Linux interface\n");
-
iif = kmalloc(sizeof *iif, GFP_KERNEL);
if (!iif) {
pr_err("out of memory\n");
@@ -684,6 +682,7 @@ void gigaset_isdn_unregdev(struct cardstate *cs)
*/
void gigaset_isdn_regdrv(void)
{
+ pr_info("ISDN4Linux interface\n");
/* nothing to do */
}
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 9c8d7aa0..a0ed668 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -40,7 +40,7 @@ MODULE_DESCRIPTION("CAPI4Linux: DMA support for active AVM cards");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
-static int suppress_pollack = 0;
+static bool suppress_pollack = 0;
module_param(suppress_pollack, bool, 0);
/* ------------------------------------------------------------- */
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index d3530f6..9743b24 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -40,7 +40,7 @@ static char *revision = "$Revision: 1.1.2.2 $";
/* ------------------------------------------------------------- */
-static int suppress_pollack;
+static bool suppress_pollack;
static struct pci_device_id c4_pci_tbl[] = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 },
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 71a8eb6..0e1f4d5 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -2154,30 +2154,4 @@ static struct usb_driver hfcsusb_drv = {
.disconnect = hfcsusb_disconnect,
};
-static int __init
-hfcsusb_init(void)
-{
- printk(KERN_INFO DRIVER_NAME " driver Rev. %s debug(0x%x) poll(%i)\n",
- hfcsusb_rev, debug, poll);
-
- if (usb_register(&hfcsusb_drv)) {
- printk(KERN_INFO DRIVER_NAME
- ": Unable to register hfcsusb module at usb stack\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit
-hfcsusb_cleanup(void)
-{
- if (debug & DBG_HFC_CALL_TRACE)
- printk(KERN_INFO DRIVER_NAME ": %s\n", __func__);
-
- /* unregister Hardware */
- usb_deregister(&hfcsusb_drv); /* release our driver */
-}
-
-module_init(hfcsusb_init);
-module_exit(hfcsusb_cleanup);
+module_usb_driver(hfcsusb_drv);
diff --git a/drivers/isdn/hisax/enternow_pci.c b/drivers/isdn/hisax/enternow_pci.c
index 26264ab..f55d29d 100644
--- a/drivers/isdn/hisax/enternow_pci.c
+++ b/drivers/isdn/hisax/enternow_pci.c
@@ -333,7 +333,7 @@ static void __devinit en_cs_init(struct IsdnCard *card,
cs->hw.njet.isac = cs->hw.njet.base + 0xC0; // Fenster zum AMD
/* Reset an */
- cs->hw.njet.ctrl_reg = 0x07; // gendert von 0xff
+ cs->hw.njet.ctrl_reg = 0x07; // geändert von 0xff
outb(cs->hw.njet.ctrl_reg, cs->hw.njet.base + NETJET_CTRL);
/* 20 ms Pause */
mdelay(20);
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 9c6650e..2302fbe 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -6,7 +6,7 @@ if ISDN_I4L
config ISDN_PPP
bool "Support synchronous PPP"
- depends on INET
+ depends on INET && NETDEVICES
select SLHC
help
Over digital connections such as ISDN, there is no need to
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 2339d73..802ab87 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -1901,7 +1901,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
{
isdn_net_local *lp = netdev_priv(dev);
unsigned char *p;
- ushort len = 0;
+ int len = 0;
switch (lp->p_encap) {
case ISDN_NET_ENCAP_ETHER:
diff --git a/drivers/isdn/sc/init.c b/drivers/isdn/sc/init.c
index ca710ab..023de78 100644
--- a/drivers/isdn/sc/init.c
+++ b/drivers/isdn/sc/init.c
@@ -30,7 +30,7 @@ static const char *boardname[] = { "DataCommute/BRI", "DataCommute/PRI", "TeleCo
static unsigned int io[] = {0,0,0,0};
static unsigned char irq[] = {0,0,0,0};
static unsigned long ram[] = {0,0,0,0};
-static int do_reset = 0;
+static bool do_reset = 0;
module_param_array(io, int, NULL, 0);
module_param_array(irq, int, NULL, 0);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff203a4..9ca28fc 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -347,7 +347,8 @@ config LEDS_MC13783
config LEDS_NS2
tristate "LED support for Network Space v2 GPIO LEDs"
depends on LEDS_CLASS
- depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2
+ depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || \
+ MACH_NETSPACE_MAX_V2 || MACH_D2NET_V2
default y
help
This option enable support for the dual-GPIO LED found on the
@@ -387,6 +388,28 @@ config LEDS_RENESAS_TPU
pin function. The latter to support brightness control.
Brightness control is supported but hardware blinking is not.
+config LEDS_TCA6507
+ tristate "LED Support for TCA6507 I2C chip"
+ depends on LEDS_CLASS && I2C
+ help
+ This option enables support for LEDs connected to TC6507
+ LED driver chips accessed via the I2C bus.
+ Driver support brightness control and hardware-assisted blinking.
+
+config LEDS_MAX8997
+ tristate "LED support for MAX8997 PMIC"
+ depends on LEDS_CLASS && MFD_MAX8997
+ help
+ This option enables support for on-chip LED drivers on
+ MAXIM MAX8997 PMIC.
+
+config LEDS_OT200
+ tristate "LED support for the Bachmann OT200"
+ depends on LEDS_CLASS && HAS_IOMEM
+ help
+ This option enables support for the LEDs on the Bachmann OT200.
+ Say Y to enable LEDs on the Bachmann OT200.
+
config LEDS_TRIGGERS
bool "LED Trigger support"
depends on LEDS_CLASS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e4f6bf5..1fc6875 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -25,8 +25,10 @@ obj-$(CONFIG_LEDS_GPIO) += leds-gpio.o
obj-$(CONFIG_LEDS_LP3944) += leds-lp3944.o
obj-$(CONFIG_LEDS_LP5521) += leds-lp5521.o
obj-$(CONFIG_LEDS_LP5523) += leds-lp5523.o
+obj-$(CONFIG_LEDS_TCA6507) += leds-tca6507.o
obj-$(CONFIG_LEDS_CLEVO_MAIL) += leds-clevo-mail.o
obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
+obj-$(CONFIG_LEDS_OT200) += leds-ot200.o
obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
@@ -43,6 +45,7 @@ obj-$(CONFIG_LEDS_NS2) += leds-ns2.o
obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o
obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o
obj-$(CONFIG_LEDS_RENESAS_TPU) += leds-renesas-tpu.o
+obj-$(CONFIG_LEDS_MAX8997) += leds-max8997.o
# LED SPI Drivers
obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 6d5628b..0c8739c 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -15,7 +15,6 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/err.h>
#include <linux/ctype.h>
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index 6f1ff93..46b4c76 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -17,7 +17,6 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/device.h>
-#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/rwsem.h>
#include <linux/leds.h>
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c
index 0810604..4ca0062 100644
--- a/drivers/leds/leds-88pm860x.c
+++ b/drivers/leds/leds-88pm860x.c
@@ -238,17 +238,7 @@ static struct platform_driver pm860x_led_driver = {
.remove = pm860x_led_remove,
};
-static int __devinit pm860x_led_init(void)
-{
- return platform_driver_register(&pm860x_led_driver);
-}
-module_init(pm860x_led_init);
-
-static void __devexit pm860x_led_exit(void)
-{
- platform_driver_unregister(&pm860x_led_driver);
-}
-module_exit(pm860x_led_exit);
+module_platform_driver(pm860x_led_driver);
MODULE_DESCRIPTION("LED driver for Marvell PM860x");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/leds/leds-adp5520.c b/drivers/leds/leds-adp5520.c
index 7ba4c7b..b1400db 100644
--- a/drivers/leds/leds-adp5520.c
+++ b/drivers/leds/leds-adp5520.c
@@ -213,17 +213,7 @@ static struct platform_driver adp5520_led_driver = {
.remove = __devexit_p(adp5520_led_remove),
};
-static int __init adp5520_led_init(void)
-{
- return platform_driver_register(&adp5520_led_driver);
-}
-module_init(adp5520_led_init);
-
-static void __exit adp5520_led_exit(void)
-{
- platform_driver_unregister(&adp5520_led_driver);
-}
-module_exit(adp5520_led_exit);
+module_platform_driver(adp5520_led_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("LEDS ADP5520(01) Driver");
diff --git a/drivers/leds/leds-ams-delta.c b/drivers/leds/leds-ams-delta.c
index 8c00937..0742835 100644
--- a/drivers/leds/leds-ams-delta.c
+++ b/drivers/leds/leds-ams-delta.c
@@ -118,18 +118,7 @@ static struct platform_driver ams_delta_led_driver = {
},
};
-static int __init ams_delta_led_init(void)
-{
- return platform_driver_register(&ams_delta_led_driver);
-}
-
-static void __exit ams_delta_led_exit(void)
-{
- platform_driver_unregister(&ams_delta_led_driver);
-}
-
-module_init(ams_delta_led_init);
-module_exit(ams_delta_led_exit);
+module_platform_driver(ams_delta_led_driver);
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
MODULE_DESCRIPTION("Amstrad Delta LED driver");
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c
index 48d9fe6..525a924 100644
--- a/drivers/leds/leds-asic3.c
+++ b/drivers/leds/leds-asic3.c
@@ -179,21 +179,9 @@ static struct platform_driver asic3_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-asic3");
-
-static int __init asic3_led_init(void)
-{
- return platform_driver_register(&asic3_led_driver);
-}
-
-static void __exit asic3_led_exit(void)
-{
- platform_driver_unregister(&asic3_led_driver);
-}
-
-module_init(asic3_led_init);
-module_exit(asic3_led_exit);
+module_platform_driver(asic3_led_driver);
MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>");
MODULE_DESCRIPTION("HTC ASIC3 LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-asic3");
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c
index 109c875..800243b 100644
--- a/drivers/leds/leds-atmel-pwm.c
+++ b/drivers/leds/leds-atmel-pwm.c
@@ -134,29 +134,18 @@ static int __exit pwmled_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:leds-atmel-pwm");
-
static struct platform_driver pwmled_driver = {
.driver = {
.name = "leds-atmel-pwm",
.owner = THIS_MODULE,
},
/* REVISIT add suspend() and resume() methods */
+ .probe = pwmled_probe,
.remove = __exit_p(pwmled_remove),
};
-static int __init modinit(void)
-{
- return platform_driver_probe(&pwmled_driver, pwmled_probe);
-}
-module_init(modinit);
-
-static void __exit modexit(void)
-{
- platform_driver_unregister(&pwmled_driver);
-}
-module_exit(modexit);
+module_platform_driver(pwmled_driver);
MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-atmel-pwm");
diff --git a/drivers/leds/leds-bd2802.c b/drivers/leds/leds-bd2802.c
index ea21855..591cbdf 100644
--- a/drivers/leds/leds-bd2802.c
+++ b/drivers/leds/leds-bd2802.c
@@ -688,8 +688,7 @@ static int __devinit bd2802_probe(struct i2c_client *client,
i2c_set_clientdata(client, led);
/* Configure RESET GPIO (L: RESET, H: RESET cancel) */
- gpio_request(pdata->reset_gpio, "RGB_RESETB");
- gpio_direction_output(pdata->reset_gpio, 1);
+ gpio_request_one(pdata->reset_gpio, GPIOF_OUT_INIT_HIGH, "RGB_RESETB");
/* Tacss = min 0.1ms */
udelay(100);
@@ -813,17 +812,7 @@ static struct i2c_driver bd2802_i2c_driver = {
.id_table = bd2802_id,
};
-static int __init bd2802_init(void)
-{
- return i2c_add_driver(&bd2802_i2c_driver);
-}
-module_init(bd2802_init);
-
-static void __exit bd2802_exit(void)
-{
- i2c_del_driver(&bd2802_i2c_driver);
-}
-module_exit(bd2802_exit);
+module_i2c_driver(bd2802_i2c_driver);
MODULE_AUTHOR("Kim Kyuwon <q1.kim@samsung.com>");
MODULE_DESCRIPTION("BD2802 LED driver");
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index a498135..1ed1677 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -18,7 +18,7 @@ MODULE_AUTHOR("Márton Németh <nm127@freemail.hu>");
MODULE_DESCRIPTION("Clevo mail LED driver");
MODULE_LICENSE("GPL");
-static unsigned int __initdata nodetect;
+static bool __initdata nodetect;
module_param_named(nodetect, nodetect, bool, 0);
MODULE_PARM_DESC(nodetect, "Skip DMI hardware detection");
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index da5fb01..6a8725c 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -75,9 +75,6 @@ static int __devexit cobalt_qube_led_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:cobalt-qube-leds");
-
static struct platform_driver cobalt_qube_led_driver = {
.probe = cobalt_qube_led_probe,
.remove = __devexit_p(cobalt_qube_led_remove),
@@ -87,19 +84,9 @@ static struct platform_driver cobalt_qube_led_driver = {
},
};
-static int __init cobalt_qube_led_init(void)
-{
- return platform_driver_register(&cobalt_qube_led_driver);
-}
-
-static void __exit cobalt_qube_led_exit(void)
-{
- platform_driver_unregister(&cobalt_qube_led_driver);
-}
-
-module_init(cobalt_qube_led_init);
-module_exit(cobalt_qube_led_exit);
+module_platform_driver(cobalt_qube_led_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Front LED support for Cobalt Server");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_ALIAS("platform:cobalt-qube-leds");
diff --git a/drivers/leds/leds-da903x.c b/drivers/leds/leds-da903x.c
index f28931c..d9cd73e 100644
--- a/drivers/leds/leds-da903x.c
+++ b/drivers/leds/leds-da903x.c
@@ -158,17 +158,7 @@ static struct platform_driver da903x_led_driver = {
.remove = __devexit_p(da903x_led_remove),
};
-static int __init da903x_led_init(void)
-{
- return platform_driver_register(&da903x_led_driver);
-}
-module_init(da903x_led_init);
-
-static void __exit da903x_led_exit(void)
-{
- platform_driver_unregister(&da903x_led_driver);
-}
-module_exit(da903x_led_exit);
+module_platform_driver(da903x_led_driver);
MODULE_DESCRIPTION("LEDs driver for Dialog Semiconductor DA9030/DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
diff --git a/drivers/leds/leds-dac124s085.c b/drivers/leds/leds-dac124s085.c
index 31cf0d6..d56c142 100644
--- a/drivers/leds/leds-dac124s085.c
+++ b/drivers/leds/leds-dac124s085.c
@@ -131,18 +131,7 @@ static struct spi_driver dac124s085_driver = {
},
};
-static int __init dac124s085_leds_init(void)
-{
- return spi_register_driver(&dac124s085_driver);
-}
-
-static void __exit dac124s085_leds_exit(void)
-{
- spi_unregister_driver(&dac124s085_driver);
-}
-
-module_init(dac124s085_leds_init);
-module_exit(dac124s085_leds_exit);
+module_spi_driver(dac124s085_driver);
MODULE_AUTHOR("Guennadi Liakhovetski <lg@denx.de>");
MODULE_DESCRIPTION("DAC124S085 LED driver");
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c
index 49aceff..b9053fa 100644
--- a/drivers/leds/leds-fsg.c
+++ b/drivers/leds/leds-fsg.c
@@ -224,20 +224,7 @@ static struct platform_driver fsg_led_driver = {
},
};
-
-static int __init fsg_led_init(void)
-{
- return platform_driver_register(&fsg_led_driver);
-}
-
-static void __exit fsg_led_exit(void)
-{
- platform_driver_unregister(&fsg_led_driver);
-}
-
-
-module_init(fsg_led_init);
-module_exit(fsg_led_exit);
+module_platform_driver(fsg_led_driver);
MODULE_AUTHOR("Rod Whitby <rod@whitby.id.au>");
MODULE_DESCRIPTION("Freecom FSG-3 LED driver");
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 399a86f..7df74cb 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -293,21 +293,9 @@ static struct platform_driver gpio_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-gpio");
-
-static int __init gpio_led_init(void)
-{
- return platform_driver_register(&gpio_led_driver);
-}
-
-static void __exit gpio_led_exit(void)
-{
- platform_driver_unregister(&gpio_led_driver);
-}
-
-module_init(gpio_led_init);
-module_exit(gpio_led_exit);
+module_platform_driver(gpio_led_driver);
MODULE_AUTHOR("Raphael Assenat <raph@8d.com>, Trent Piepho <tpiepho@freescale.com>");
MODULE_DESCRIPTION("GPIO LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-gpio");
diff --git a/drivers/leds/leds-hp6xx.c b/drivers/leds/leds-hp6xx.c
index bcfbd3a..366b605 100644
--- a/drivers/leds/leds-hp6xx.c
+++ b/drivers/leds/leds-hp6xx.c
@@ -79,9 +79,6 @@ static int hp6xxled_remove(struct platform_device *pdev)
return 0;
}
-/* work with hotplug and coldplug */
-MODULE_ALIAS("platform:hp6xx-led");
-
static struct platform_driver hp6xxled_driver = {
.probe = hp6xxled_probe,
.remove = hp6xxled_remove,
@@ -91,19 +88,9 @@ static struct platform_driver hp6xxled_driver = {
},
};
-static int __init hp6xxled_init(void)
-{
- return platform_driver_register(&hp6xxled_driver);
-}
-
-static void __exit hp6xxled_exit(void)
-{
- platform_driver_unregister(&hp6xxled_driver);
-}
-
-module_init(hp6xxled_init);
-module_exit(hp6xxled_exit);
+module_platform_driver(hp6xxled_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 6xx LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hp6xx-led");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 0630e4f..e59c166 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -164,8 +164,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
if (drvdata->mode == LM3530_BL_MODE_ALS) {
if (pltfm->als_vmax == 0) {
- pltfm->als_vmin = als_vmin = 0;
- pltfm->als_vmin = als_vmax = LM3530_ALS_WINDOW_mV;
+ pltfm->als_vmin = 0;
+ pltfm->als_vmax = LM3530_ALS_WINDOW_mV;
}
als_vmin = pltfm->als_vmin;
@@ -457,18 +457,7 @@ static struct i2c_driver lm3530_i2c_driver = {
},
};
-static int __init lm3530_init(void)
-{
- return i2c_add_driver(&lm3530_i2c_driver);
-}
-
-static void __exit lm3530_exit(void)
-{
- i2c_del_driver(&lm3530_i2c_driver);
-}
-
-module_init(lm3530_init);
-module_exit(lm3530_exit);
+module_i2c_driver(lm3530_i2c_driver);
MODULE_DESCRIPTION("Back Light driver for LM3530");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 9010c05..b8f9f0a 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -453,18 +453,7 @@ static struct i2c_driver lp3944_driver = {
.id_table = lp3944_id,
};
-static int __init lp3944_module_init(void)
-{
- return i2c_add_driver(&lp3944_driver);
-}
-
-static void __exit lp3944_module_exit(void)
-{
- i2c_del_driver(&lp3944_driver);
-}
-
-module_init(lp3944_module_init);
-module_exit(lp3944_module_exit);
+module_i2c_driver(lp3944_driver);
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("LP3944 Fun Light Chip");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index cb641f1..d62a798 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -797,25 +797,7 @@ static struct i2c_driver lp5521_driver = {
.id_table = lp5521_id,
};
-static int __init lp5521_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&lp5521_driver);
-
- if (ret < 0)
- printk(KERN_ALERT "Adding lp5521 driver failed\n");
-
- return ret;
-}
-
-static void __exit lp5521_exit(void)
-{
- i2c_del_driver(&lp5521_driver);
-}
-
-module_init(lp5521_init);
-module_exit(lp5521_exit);
+module_i2c_driver(lp5521_driver);
MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
MODULE_DESCRIPTION("LP5521 LED engine");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 5971e309..73e791a 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -870,8 +870,6 @@ static int __devinit lp5523_init_led(struct lp5523_led *led, struct device *dev,
return 0;
}
-static struct i2c_driver lp5523_driver;
-
static int __devinit lp5523_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -1021,25 +1019,7 @@ static struct i2c_driver lp5523_driver = {
.id_table = lp5523_id,
};
-static int __init lp5523_init(void)
-{
- int ret;
-
- ret = i2c_add_driver(&lp5523_driver);
-
- if (ret < 0)
- printk(KERN_ALERT "Adding lp5523 driver failed\n");
-
- return ret;
-}
-
-static void __exit lp5523_exit(void)
-{
- i2c_del_driver(&lp5523_driver);
-}
-
-module_init(lp5523_init);
-module_exit(lp5523_exit);
+module_i2c_driver(lp5523_driver);
MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
MODULE_DESCRIPTION("LP5523 LED engine");
diff --git a/drivers/leds/leds-lt3593.c b/drivers/leds/leds-lt3593.c
index 53f67b8..e311a96c 100644
--- a/drivers/leds/leds-lt3593.c
+++ b/drivers/leds/leds-lt3593.c
@@ -199,21 +199,9 @@ static struct platform_driver lt3593_led_driver = {
},
};
-MODULE_ALIAS("platform:leds-lt3593");
-
-static int __init lt3593_led_init(void)
-{
- return platform_driver_register(&lt3593_led_driver);
-}
-
-static void __exit lt3593_led_exit(void)
-{
- platform_driver_unregister(&lt3593_led_driver);
-}
-
-module_init(lt3593_led_init);
-module_exit(lt3593_led_exit);
+module_platform_driver(lt3593_led_driver);
MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>");
MODULE_DESCRIPTION("LED driver for LT3593 controllers");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-lt3593");
diff --git a/drivers/leds/leds-max8997.c b/drivers/leds/leds-max8997.c
new file mode 100644
index 0000000..f4c0e37
--- /dev/null
+++ b/drivers/leds/leds-max8997.c
@@ -0,0 +1,372 @@
+/*
+ * leds-max8997.c - LED class driver for MAX8997 LEDs.
+ *
+ * Copyright (C) 2011 Samsung Electronics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/leds.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+#include <linux/platform_device.h>
+
+#define MAX8997_LED_FLASH_SHIFT 3
+#define MAX8997_LED_FLASH_CUR_MASK 0xf8
+#define MAX8997_LED_MOVIE_SHIFT 4
+#define MAX8997_LED_MOVIE_CUR_MASK 0xf0
+
+#define MAX8997_LED_FLASH_MAX_BRIGHTNESS 0x1f
+#define MAX8997_LED_MOVIE_MAX_BRIGHTNESS 0xf
+#define MAX8997_LED_NONE_MAX_BRIGHTNESS 0
+
+#define MAX8997_LED0_FLASH_MASK 0x1
+#define MAX8997_LED0_FLASH_PIN_MASK 0x5
+#define MAX8997_LED0_MOVIE_MASK 0x8
+#define MAX8997_LED0_MOVIE_PIN_MASK 0x28
+
+#define MAX8997_LED1_FLASH_MASK 0x2
+#define MAX8997_LED1_FLASH_PIN_MASK 0x6
+#define MAX8997_LED1_MOVIE_MASK 0x10
+#define MAX8997_LED1_MOVIE_PIN_MASK 0x30
+
+#define MAX8997_LED_BOOST_ENABLE_MASK (1 << 6)
+
+struct max8997_led {
+ struct max8997_dev *iodev;
+ struct led_classdev cdev;
+ bool enabled;
+ int id;
+ enum max8997_led_mode led_mode;
+ struct mutex mutex;
+};
+
+static void max8997_led_clear_mode(struct max8997_led *led,
+ enum max8997_led_mode mode)
+{
+ struct i2c_client *client = led->iodev->i2c;
+ u8 val = 0, mask = 0;
+ int ret;
+
+ switch (mode) {
+ case MAX8997_FLASH_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
+ break;
+ case MAX8997_MOVIE_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
+ break;
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
+ break;
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
+ break;
+ default:
+ break;
+ }
+
+ if (mask) {
+ ret = max8997_update_reg(client,
+ MAX8997_REG_LEN_CNTL, val, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+ }
+}
+
+static void max8997_led_set_mode(struct max8997_led *led,
+ enum max8997_led_mode mode)
+{
+ int ret;
+ struct i2c_client *client = led->iodev->i2c;
+ u8 mask = 0;
+
+ /* First, clear the previous mode */
+ max8997_led_clear_mode(led, led->led_mode);
+
+ switch (mode) {
+ case MAX8997_FLASH_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_MASK : MAX8997_LED0_FLASH_MASK;
+ led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
+ break;
+ case MAX8997_MOVIE_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_MASK : MAX8997_LED0_MOVIE_MASK;
+ led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
+ break;
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_FLASH_PIN_MASK : MAX8997_LED0_FLASH_PIN_MASK;
+ led->cdev.max_brightness = MAX8997_LED_FLASH_MAX_BRIGHTNESS;
+ break;
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ mask = led->id ?
+ MAX8997_LED1_MOVIE_PIN_MASK : MAX8997_LED0_MOVIE_PIN_MASK;
+ led->cdev.max_brightness = MAX8997_LED_MOVIE_MAX_BRIGHTNESS;
+ break;
+ default:
+ led->cdev.max_brightness = MAX8997_LED_NONE_MAX_BRIGHTNESS;
+ break;
+ }
+
+ if (mask) {
+ ret = max8997_update_reg(client,
+ MAX8997_REG_LEN_CNTL, mask, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+ }
+
+ led->led_mode = mode;
+}
+
+static void max8997_led_enable(struct max8997_led *led, bool enable)
+{
+ int ret;
+ struct i2c_client *client = led->iodev->i2c;
+ u8 val = 0, mask = MAX8997_LED_BOOST_ENABLE_MASK;
+
+ if (led->enabled == enable)
+ return;
+
+ val = enable ? MAX8997_LED_BOOST_ENABLE_MASK : 0;
+
+ ret = max8997_update_reg(client, MAX8997_REG_BOOST_CNTL, val, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+
+ led->enabled = enable;
+}
+
+static void max8997_led_set_current(struct max8997_led *led,
+ enum led_brightness value)
+{
+ int ret;
+ struct i2c_client *client = led->iodev->i2c;
+ u8 val = 0, mask = 0, reg = 0;
+
+ switch (led->led_mode) {
+ case MAX8997_FLASH_MODE:
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ val = value << MAX8997_LED_FLASH_SHIFT;
+ mask = MAX8997_LED_FLASH_CUR_MASK;
+ reg = led->id ? MAX8997_REG_FLASH2_CUR : MAX8997_REG_FLASH1_CUR;
+ break;
+ case MAX8997_MOVIE_MODE:
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ val = value << MAX8997_LED_MOVIE_SHIFT;
+ mask = MAX8997_LED_MOVIE_CUR_MASK;
+ reg = MAX8997_REG_MOVIE_CUR;
+ break;
+ default:
+ break;
+ }
+
+ if (mask) {
+ ret = max8997_update_reg(client, reg, val, mask);
+ if (ret)
+ dev_err(led->iodev->dev,
+ "failed to update register(%d)\n", ret);
+ }
+}
+
+static void max8997_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct max8997_led *led =
+ container_of(led_cdev, struct max8997_led, cdev);
+
+ if (value) {
+ max8997_led_set_current(led, value);
+ max8997_led_enable(led, true);
+ } else {
+ max8997_led_set_current(led, value);
+ max8997_led_enable(led, false);
+ }
+}
+
+static ssize_t max8997_led_show_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct max8997_led *led =
+ container_of(led_cdev, struct max8997_led, cdev);
+ ssize_t ret = 0;
+
+ mutex_lock(&led->mutex);
+
+ switch (led->led_mode) {
+ case MAX8997_FLASH_MODE:
+ ret += sprintf(buf, "FLASH\n");
+ break;
+ case MAX8997_MOVIE_MODE:
+ ret += sprintf(buf, "MOVIE\n");
+ break;
+ case MAX8997_FLASH_PIN_CONTROL_MODE:
+ ret += sprintf(buf, "FLASH_PIN_CONTROL\n");
+ break;
+ case MAX8997_MOVIE_PIN_CONTROL_MODE:
+ ret += sprintf(buf, "MOVIE_PIN_CONTROL\n");
+ break;
+ default:
+ ret += sprintf(buf, "NONE\n");
+ break;
+ }
+
+ mutex_unlock(&led->mutex);
+
+ return ret;
+}
+
+static ssize_t max8997_led_store_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct led_classdev *led_cdev = dev_get_drvdata(dev);
+ struct max8997_led *led =
+ container_of(led_cdev, struct max8997_led, cdev);
+ enum max8997_led_mode mode;
+
+ mutex_lock(&led->mutex);
+
+ if (!strncmp(buf, "FLASH_PIN_CONTROL", 17))
+ mode = MAX8997_FLASH_PIN_CONTROL_MODE;
+ else if (!strncmp(buf, "MOVIE_PIN_CONTROL", 17))
+ mode = MAX8997_MOVIE_PIN_CONTROL_MODE;
+ else if (!strncmp(buf, "FLASH", 5))
+ mode = MAX8997_FLASH_MODE;
+ else if (!strncmp(buf, "MOVIE", 5))
+ mode = MAX8997_MOVIE_MODE;
+ else
+ mode = MAX8997_NONE;
+
+ max8997_led_set_mode(led, mode);
+
+ mutex_unlock(&led->mutex);
+
+ return size;
+}
+
+static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode);
+
+static int __devinit max8997_led_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8997_led *led;
+ char name[20];
+ int ret = 0;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "no platform data\n");
+ return -ENODEV;
+ }
+
+ led = kzalloc(sizeof(*led), GFP_KERNEL);
+ if (led == NULL) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ led->id = pdev->id;
+ snprintf(name, sizeof(name), "max8997-led%d", pdev->id);
+
+ led->cdev.name = name;
+ led->cdev.brightness_set = max8997_led_brightness_set;
+ led->cdev.flags |= LED_CORE_SUSPENDRESUME;
+ led->cdev.brightness = 0;
+ led->iodev = iodev;
+
+ /* initialize mode and brightness according to platform_data */
+ if (pdata->led_pdata) {
+ u8 mode = 0, brightness = 0;
+
+ mode = pdata->led_pdata->mode[led->id];
+ brightness = pdata->led_pdata->brightness[led->id];
+
+ max8997_led_set_mode(led, pdata->led_pdata->mode[led->id]);
+
+ if (brightness > led->cdev.max_brightness)
+ brightness = led->cdev.max_brightness;
+ max8997_led_set_current(led, brightness);
+ led->cdev.brightness = brightness;
+ } else {
+ max8997_led_set_mode(led, MAX8997_NONE);
+ max8997_led_set_current(led, 0);
+ }
+
+ mutex_init(&led->mutex);
+
+ platform_set_drvdata(pdev, led);
+
+ ret = led_classdev_register(&pdev->dev, &led->cdev);
+ if (ret < 0)
+ goto err_led;
+
+ ret = device_create_file(led->cdev.dev, &dev_attr_mode);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "failed to create file: %d\n", ret);
+ goto err_file;
+ }
+
+ return 0;
+
+err_file:
+ led_classdev_unregister(&led->cdev);
+err_led:
+ kfree(led);
+err_mem:
+ return ret;
+}
+
+static int __devexit max8997_led_remove(struct platform_device *pdev)
+{
+ struct max8997_led *led = platform_get_drvdata(pdev);
+
+ device_remove_file(led->cdev.dev, &dev_attr_mode);
+ led_classdev_unregister(&led->cdev);
+ kfree(led);
+
+ return 0;
+}
+
+static struct platform_driver max8997_led_driver = {
+ .driver = {
+ .name = "max8997-led",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_led_probe,
+ .remove = __devexit_p(max8997_led_remove),
+};
+
+static int __init max8997_led_init(void)
+{
+ return platform_driver_register(&max8997_led_driver);
+}
+module_init(max8997_led_init);
+
+static void __exit max8997_led_exit(void)
+{
+ platform_driver_unregister(&max8997_led_driver);
+}
+module_exit(max8997_led_exit);
+
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_DESCRIPTION("MAX8997 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:max8997-led");
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index b3393a9..8bc4915 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -275,7 +275,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (pdata->num_leds < 1 || pdata->num_leds > MC13783_LED_MAX) {
+ if (pdata->num_leds < 1 || pdata->num_leds > (MC13783_LED_MAX + 1)) {
dev_err(&pdev->dev, "Invalid led count %d\n", pdata->num_leds);
return -EINVAL;
}
@@ -385,17 +385,7 @@ static struct platform_driver mc13783_led_driver = {
.remove = __devexit_p(mc13783_led_remove),
};
-static int __init mc13783_led_init(void)
-{
- return platform_driver_register(&mc13783_led_driver);
-}
-module_init(mc13783_led_init);
-
-static void __exit mc13783_led_exit(void)
-{
- platform_driver_unregister(&mc13783_led_driver);
-}
-module_exit(mc13783_led_exit);
+module_platform_driver(mc13783_led_driver);
MODULE_DESCRIPTION("LEDs driver for Freescale MC13783 PMIC");
MODULE_AUTHOR("Philippe Retornaz <philippe.retornaz@epfl.ch>");
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index f2e51c1..d8433f2 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -81,35 +81,23 @@ static int __devinit gpio_ext_init(struct netxbig_gpio_ext *gpio_ext)
/* Configure address GPIOs. */
for (i = 0; i < gpio_ext->num_addr; i++) {
- err = gpio_request(gpio_ext->addr[i], "GPIO extension addr");
+ err = gpio_request_one(gpio_ext->addr[i], GPIOF_OUT_INIT_LOW,
+ "GPIO extension addr");
if (err)
goto err_free_addr;
- err = gpio_direction_output(gpio_ext->addr[i], 0);
- if (err) {
- gpio_free(gpio_ext->addr[i]);
- goto err_free_addr;
- }
}
/* Configure data GPIOs. */
for (i = 0; i < gpio_ext->num_data; i++) {
- err = gpio_request(gpio_ext->data[i], "GPIO extension data");
+ err = gpio_request_one(gpio_ext->data[i], GPIOF_OUT_INIT_LOW,
+ "GPIO extension data");
if (err)
goto err_free_data;
- err = gpio_direction_output(gpio_ext->data[i], 0);
- if (err) {
- gpio_free(gpio_ext->data[i]);
- goto err_free_data;
- }
}
/* Configure "enable select" GPIO. */
- err = gpio_request(gpio_ext->enable, "GPIO extension enable");
+ err = gpio_request_one(gpio_ext->enable, GPIOF_OUT_INIT_LOW,
+ "GPIO extension enable");
if (err)
goto err_free_data;
- err = gpio_direction_output(gpio_ext->enable, 0);
- if (err) {
- gpio_free(gpio_ext->enable);
- goto err_free_data;
- }
return 0;
@@ -429,21 +417,10 @@ static struct platform_driver netxbig_led_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:leds-netxbig");
-static int __init netxbig_led_init(void)
-{
- return platform_driver_register(&netxbig_led_driver);
-}
-
-static void __exit netxbig_led_exit(void)
-{
- platform_driver_unregister(&netxbig_led_driver);
-}
-
-module_init(netxbig_led_init);
-module_exit(netxbig_led_exit);
+module_platform_driver(netxbig_led_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("LED driver for LaCie xBig Network boards");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-netxbig");
diff --git a/drivers/leds/leds-ns2.c b/drivers/leds/leds-ns2.c
index 37b7d0c..2f0a144 100644
--- a/drivers/leds/leds-ns2.c
+++ b/drivers/leds/leds-ns2.c
@@ -323,21 +323,10 @@ static struct platform_driver ns2_led_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:leds-ns2");
-
-static int __init ns2_led_init(void)
-{
- return platform_driver_register(&ns2_led_driver);
-}
-static void __exit ns2_led_exit(void)
-{
- platform_driver_unregister(&ns2_led_driver);
-}
-
-module_init(ns2_led_init);
-module_exit(ns2_led_exit);
+module_platform_driver(ns2_led_driver);
MODULE_AUTHOR("Simon Guinot <sguinot@lacie.com>");
MODULE_DESCRIPTION("Network Space v2 LED driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ns2");
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
new file mode 100644
index 0000000..c464682
--- /dev/null
+++ b/drivers/leds/leds-ot200.c
@@ -0,0 +1,171 @@
+/*
+ * Bachmann ot200 leds driver.
+ *
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * License: GPL as published by the FSF.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+
+struct ot200_led {
+ struct led_classdev cdev;
+ const char *name;
+ unsigned long port;
+ u8 mask;
+};
+
+/*
+ * The device has three leds on the back panel (led_err, led_init and led_run)
+ * and can handle up to seven leds on the front panel.
+ */
+
+static struct ot200_led leds[] = {
+ {
+ .name = "led_run",
+ .port = 0x5a,
+ .mask = BIT(0),
+ },
+ {
+ .name = "led_init",
+ .port = 0x5a,
+ .mask = BIT(1),
+ },
+ {
+ .name = "led_err",
+ .port = 0x5a,
+ .mask = BIT(2),
+ },
+ {
+ .name = "led_1",
+ .port = 0x49,
+ .mask = BIT(7),
+ },
+ {
+ .name = "led_2",
+ .port = 0x49,
+ .mask = BIT(6),
+ },
+ {
+ .name = "led_3",
+ .port = 0x49,
+ .mask = BIT(5),
+ },
+ {
+ .name = "led_4",
+ .port = 0x49,
+ .mask = BIT(4),
+ },
+ {
+ .name = "led_5",
+ .port = 0x49,
+ .mask = BIT(3),
+ },
+ {
+ .name = "led_6",
+ .port = 0x49,
+ .mask = BIT(2),
+ },
+ {
+ .name = "led_7",
+ .port = 0x49,
+ .mask = BIT(1),
+ }
+};
+
+static DEFINE_SPINLOCK(value_lock);
+
+/*
+ * we need to store the current led states, as it is not
+ * possible to read the current led state via inb().
+ */
+static u8 leds_back;
+static u8 leds_front;
+
+static void ot200_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
+{
+ struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev);
+ u8 *val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&value_lock, flags);
+
+ if (led->port == 0x49)
+ val = &leds_front;
+ else if (led->port == 0x5a)
+ val = &leds_back;
+ else
+ BUG();
+
+ if (value == LED_OFF)
+ *val &= ~led->mask;
+ else
+ *val |= led->mask;
+
+ outb(*val, led->port);
+ spin_unlock_irqrestore(&value_lock, flags);
+}
+
+static int __devinit ot200_led_probe(struct platform_device *pdev)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(leds); i++) {
+
+ leds[i].cdev.name = leds[i].name;
+ leds[i].cdev.brightness_set = ot200_led_brightness_set;
+
+ ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
+ if (ret < 0)
+ goto err;
+ }
+
+ leds_front = 0; /* turn off all front leds */
+ leds_back = BIT(1); /* turn on init led */
+ outb(leds_front, 0x49);
+ outb(leds_back, 0x5a);
+
+ return 0;
+
+err:
+ for (i = i - 1; i >= 0; i--)
+ led_classdev_unregister(&leds[i].cdev);
+
+ return ret;
+}
+
+static int __devexit ot200_led_remove(struct platform_device *pdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(leds); i++)
+ led_classdev_unregister(&leds[i].cdev);
+
+ return 0;
+}
+
+static struct platform_driver ot200_led_driver = {
+ .probe = ot200_led_probe,
+ .remove = __devexit_p(ot200_led_remove),
+ .driver = {
+ .name = "leds-ot200",
+ .owner = THIS_MODULE,
+ },
+};
+
+module_platform_driver(ot200_led_driver);
+
+MODULE_AUTHOR("Sebastian A. Siewior <bigeasy@linutronix.de>");
+MODULE_DESCRIPTION("ot200 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ot200");
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index a2c8746..ceccab4 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -489,20 +489,8 @@ static int pca9532_remove(struct i2c_client *client)
return 0;
}
-static int __init pca9532_init(void)
-{
- return i2c_add_driver(&pca9532_driver);
-}
-
-static void __exit pca9532_exit(void)
-{
- i2c_del_driver(&pca9532_driver);
-}
+module_i2c_driver(pca9532_driver);
MODULE_AUTHOR("Riku Voipio");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PCA 9532 LED dimmer");
-
-module_init(pca9532_init);
-module_exit(pca9532_exit);
-
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index 66aa3e8..dcc3bc3 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -371,18 +371,7 @@ static struct i2c_driver pca955x_driver = {
.id_table = pca955x_id,
};
-static int __init pca955x_leds_init(void)
-{
- return i2c_add_driver(&pca955x_driver);
-}
-
-static void __exit pca955x_leds_exit(void)
-{
- i2c_del_driver(&pca955x_driver);
-}
-
-module_init(pca955x_leds_init);
-module_exit(pca955x_leds_exit);
+module_i2c_driver(pca955x_driver);
MODULE_AUTHOR("Nate Case <ncase@xes-inc.com>");
MODULE_DESCRIPTION("PCA955x LED driver");
diff --git a/drivers/leds/leds-pwm.c b/drivers/leds/leds-pwm.c
index 666daf7..3ed92f3 100644
--- a/drivers/leds/leds-pwm.c
+++ b/drivers/leds/leds-pwm.c
@@ -135,18 +135,7 @@ static struct platform_driver led_pwm_driver = {
},
};
-static int __init led_pwm_init(void)
-{
- return platform_driver_register(&led_pwm_driver);
-}
-
-static void __exit led_pwm_exit(void)
-{
- platform_driver_unregister(&led_pwm_driver);
-}
-
-module_init(led_pwm_init);
-module_exit(led_pwm_exit);
+module_platform_driver(led_pwm_driver);
MODULE_AUTHOR("Luotao Fu <l.fu@pengutronix.de>");
MODULE_DESCRIPTION("PWM LED driver for PXA");
diff --git a/drivers/leds/leds-rb532.c b/drivers/leds/leds-rb532.c
index c3525f3..a7815b6 100644
--- a/drivers/leds/leds-rb532.c
+++ b/drivers/leds/leds-rb532.c
@@ -57,21 +57,9 @@ static struct platform_driver rb532_led_driver = {
},
};
-static int __init rb532_led_init(void)
-{
- return platform_driver_register(&rb532_led_driver);
-}
-
-static void __exit rb532_led_exit(void)
-{
- platform_driver_unregister(&rb532_led_driver);
-}
-
-module_init(rb532_led_init);
-module_exit(rb532_led_exit);
-
-MODULE_ALIAS("platform:rb532-led");
+module_platform_driver(rb532_led_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("User LED support for Routerboard532");
MODULE_AUTHOR("Phil Sutter <n0-1@freewrt.org>");
+MODULE_ALIAS("platform:rb532-led");
diff --git a/drivers/leds/leds-regulator.c b/drivers/leds/leds-regulator.c
index 8497f56..df7e963 100644
--- a/drivers/leds/leds-regulator.c
+++ b/drivers/leds/leds-regulator.c
@@ -229,17 +229,7 @@ static struct platform_driver regulator_led_driver = {
.remove = __devexit_p(regulator_led_remove),
};
-static int __init regulator_led_init(void)
-{
- return platform_driver_register(&regulator_led_driver);
-}
-module_init(regulator_led_init);
-
-static void __exit regulator_led_exit(void)
-{
- platform_driver_unregister(&regulator_led_driver);
-}
-module_exit(regulator_led_exit);
+module_platform_driver(regulator_led_driver);
MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>");
MODULE_DESCRIPTION("Regulator driven LED driver");
diff --git a/drivers/leds/leds-renesas-tpu.c b/drivers/leds/leds-renesas-tpu.c
index 3ee540e..32fe337 100644
--- a/drivers/leds/leds-renesas-tpu.c
+++ b/drivers/leds/leds-renesas-tpu.c
@@ -339,18 +339,7 @@ static struct platform_driver r_tpu_device_driver = {
}
};
-static int __init r_tpu_init(void)
-{
- return platform_driver_register(&r_tpu_device_driver);
-}
-
-static void __exit r_tpu_exit(void)
-{
- platform_driver_unregister(&r_tpu_device_driver);
-}
-
-module_init(r_tpu_init);
-module_exit(r_tpu_exit);
+module_platform_driver(r_tpu_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas TPU LED Driver");
diff --git a/drivers/leds/leds-s3c24xx.c b/drivers/leds/leds-s3c24xx.c
index 29f8b0f..bd0a5ed 100644
--- a/drivers/leds/leds-s3c24xx.c
+++ b/drivers/leds/leds-s3c24xx.c
@@ -121,18 +121,7 @@ static struct platform_driver s3c24xx_led_driver = {
},
};
-static int __init s3c24xx_led_init(void)
-{
- return platform_driver_register(&s3c24xx_led_driver);
-}
-
-static void __exit s3c24xx_led_exit(void)
-{
- platform_driver_unregister(&s3c24xx_led_driver);
-}
-
-module_init(s3c24xx_led_init);
-module_exit(s3c24xx_led_exit);
+module_platform_driver(s3c24xx_led_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("S3C24XX LED driver");
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 614ebeb..57371e1 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -79,7 +79,7 @@ static int __init ss4200_led_dmi_callback(const struct dmi_system_id *id)
return 1;
}
-static unsigned int __initdata nodetect;
+static bool __initdata nodetect;
module_param_named(nodetect, nodetect, bool, 0);
MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
diff --git a/drivers/leds/leds-tca6507.c b/drivers/leds/leds-tca6507.c
new file mode 100644
index 0000000..133f89f
--- /dev/null
+++ b/drivers/leds/leds-tca6507.c
@@ -0,0 +1,779 @@
+/*
+ * leds-tca6507
+ *
+ * The TCA6507 is a programmable LED controller that can drive 7
+ * separate lines either by holding them low, or by pulsing them
+ * with modulated width.
+ * The modulation can be varied in a simple pattern to produce a blink or
+ * double-blink.
+ *
+ * This driver can configure each line either as a 'GPIO' which is out-only
+ * (no pull-up) or as an LED with variable brightness and hardware-assisted
+ * blinking.
+ *
+ * Apart from OFF and ON there are three programmable brightness levels which
+ * can be programmed from 0 to 15 and indicate how many 500usec intervals in
+ * each 8msec that the led is 'on'. The levels are named MASTER, BANK0 and
+ * BANK1.
+ *
+ * There are two different blink rates that can be programmed, each with
+ * separate time for rise, on, fall, off and second-off. Thus if 3 or more
+ * different non-trivial rates are required, software must be used for the extra
+ * rates. The two different blink rates must align with the two levels BANK0 and
+ * BANK1.
+ * This driver does not support double-blink so 'second-off' always matches
+ * 'off'.
+ *
+ * Only 16 different times can be programmed in a roughly logarithmic scale from
+ * 64ms to 16320ms. To be precise the possible times are:
+ * 0, 64, 128, 192, 256, 384, 512, 768,
+ * 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+ *
+ * Times that cannot be closely matched with these must be
+ * handled in software. This driver allows 12.5% error in matching.
+ *
+ * This driver does not allow rise/fall rates to be set explicitly. When trying
+ * to match a given 'on' or 'off' period, an appropriate pair of 'change' and
+ * 'hold' times are chosen to get a close match. If the target delay is even,
+ * the 'change' number will be the smaller; if odd, the 'hold' number will be
+ * the smaller.
+
+ * Choosing pairs of delays with 12.5% errors allows us to match delays in the
+ * ranges: 56-72, 112-144, 168-216, 224-27504, 28560-36720.
+ * 26% of the achievable sums can be matched by multiple pairings. For example
+ * 1536 == 1536+0, 1024+512, or 768+768. This driver will always choose the
+ * pairing with the least maximum - 768+768 in this case. Other pairings are
+ * not available.
+ *
+ * Access to the 3 levels and 2 blinks are on a first-come, first-served basis.
+ * Access can be shared by multiple leds if they have the same level and
+ * either same blink rates, or some don't blink.
+ * When a led changes, it relinquishes access and tries again, so it might
+ * lose access to hardware blink.
+ * If a blink engine cannot be allocated, software blink is used.
+ * If the desired brightness cannot be allocated, the closest available non-zero
+ * brightness is used. As 'full' is always available, the worst case would be
+ * to have two different blink rates at '1', with Max at '2', then other leds
+ * will have to choose between '2' and '16'. Hopefully this is not likely.
+ *
+ * Each bank (BANK0 and BANK1) has two usage counts - LEDs using the brightness
+ * and LEDs using the blink. It can only be reprogrammed when the appropriate
+ * counter is zero. The MASTER level has a single usage count.
+ *
+ * Each Led has programmable 'on' and 'off' time as milliseconds. With each
+ * there is a flag saying if it was explicitly requested or defaulted.
+ * Similarly the banks know if each time was explicit or a default. Defaults
+ * are permitted to be changed freely - they are not recognised when matching.
+ *
+ *
+ * An led-tca6507 device must be provided with platform data. This data
+ * lists for each output: the name, default trigger, and whether the signal
+ * is being used as a GPiO rather than an led. 'struct led_plaform_data'
+ * is used for this. If 'name' is NULL, the output isn't used. If 'flags'
+ * is TCA6507_MAKE_CPIO, the output is a GPO.
+ * The "struct led_platform_data" can be embedded in a
+ * "struct tca6507_platform_data" which adds a 'gpio_base' for the GPiOs,
+ * and a 'setup' callback which is called once the GPiOs are available.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/gpio.h>
+#include <linux/workqueue.h>
+#include <linux/leds-tca6507.h>
+
+/* LED select registers determine the source that drives LED outputs */
+#define TCA6507_LS_LED_OFF 0x0 /* Output HI-Z (off) */
+#define TCA6507_LS_LED_OFF1 0x1 /* Output HI-Z (off) - not used */
+#define TCA6507_LS_LED_PWM0 0x2 /* Output LOW with Bank0 rate */
+#define TCA6507_LS_LED_PWM1 0x3 /* Output LOW with Bank1 rate */
+#define TCA6507_LS_LED_ON 0x4 /* Output LOW (on) */
+#define TCA6507_LS_LED_MIR 0x5 /* Output LOW with Master Intensity */
+#define TCA6507_LS_BLINK0 0x6 /* Blink at Bank0 rate */
+#define TCA6507_LS_BLINK1 0x7 /* Blink at Bank1 rate */
+
+enum {
+ BANK0,
+ BANK1,
+ MASTER,
+};
+static int bank_source[3] = {
+ TCA6507_LS_LED_PWM0,
+ TCA6507_LS_LED_PWM1,
+ TCA6507_LS_LED_MIR,
+};
+static int blink_source[2] = {
+ TCA6507_LS_BLINK0,
+ TCA6507_LS_BLINK1,
+};
+
+/* PWM registers */
+#define TCA6507_REG_CNT 11
+
+/*
+ * 0x00, 0x01, 0x02 encode the TCA6507_LS_* values, each output
+ * owns one bit in each register
+ */
+#define TCA6507_FADE_ON 0x03
+#define TCA6507_FULL_ON 0x04
+#define TCA6507_FADE_OFF 0x05
+#define TCA6507_FIRST_OFF 0x06
+#define TCA6507_SECOND_OFF 0x07
+#define TCA6507_MAX_INTENSITY 0x08
+#define TCA6507_MASTER_INTENSITY 0x09
+#define TCA6507_INITIALIZE 0x0A
+
+#define INIT_CODE 0x8
+
+#define TIMECODES 16
+static int time_codes[TIMECODES] = {
+ 0, 64, 128, 192, 256, 384, 512, 768,
+ 1024, 1536, 2048, 3072, 4096, 5760, 8128, 16320
+};
+
+/* Convert an led.brightness level (0..255) to a TCA6507 level (0..15) */
+static inline int TO_LEVEL(int brightness)
+{
+ return brightness >> 4;
+}
+
+/* ...and convert back */
+static inline int TO_BRIGHT(int level)
+{
+ if (level)
+ return (level << 4) | 0xf;
+ return 0;
+}
+
+#define NUM_LEDS 7
+struct tca6507_chip {
+ int reg_set; /* One bit per register where
+ * a '1' means the register
+ * should be written */
+ u8 reg_file[TCA6507_REG_CNT];
+ /* Bank 2 is Master Intensity and doesn't use times */
+ struct bank {
+ int level;
+ int ontime, offtime;
+ int on_dflt, off_dflt;
+ int time_use, level_use;
+ } bank[3];
+ struct i2c_client *client;
+ struct work_struct work;
+ spinlock_t lock;
+
+ struct tca6507_led {
+ struct tca6507_chip *chip;
+ struct led_classdev led_cdev;
+ int num;
+ int ontime, offtime;
+ int on_dflt, off_dflt;
+ int bank; /* Bank used, or -1 */
+ int blink; /* Set if hardware-blinking */
+ } leds[NUM_LEDS];
+#ifdef CONFIG_GPIOLIB
+ struct gpio_chip gpio;
+ const char *gpio_name[NUM_LEDS];
+ int gpio_map[NUM_LEDS];
+#endif
+};
+
+static const struct i2c_device_id tca6507_id[] = {
+ { "tca6507" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tca6507_id);
+
+static int choose_times(int msec, int *c1p, int *c2p)
+{
+ /*
+ * Choose two timecodes which add to 'msec' as near as possible.
+ * The first returned is the 'on' or 'off' time. The second is to be
+ * used as a 'fade-on' or 'fade-off' time. If 'msec' is even,
+ * the first will not be smaller than the second. If 'msec' is odd,
+ * the first will not be larger than the second.
+ * If we cannot get a sum within 1/8 of 'msec' fail with -EINVAL,
+ * otherwise return the sum that was achieved, plus 1 if the first is
+ * smaller.
+ * If two possibilities are equally good (e.g. 512+0, 256+256), choose
+ * the first pair so there is more change-time visible (i.e. it is
+ * softer).
+ */
+ int c1, c2;
+ int tmax = msec * 9 / 8;
+ int tmin = msec * 7 / 8;
+ int diff = 65536;
+
+ /* We start at '1' to ensure we never even think of choosing a
+ * total time of '0'.
+ */
+ for (c1 = 1; c1 < TIMECODES; c1++) {
+ int t = time_codes[c1];
+ if (t*2 < tmin)
+ continue;
+ if (t > tmax)
+ break;
+ for (c2 = 0; c2 <= c1; c2++) {
+ int tt = t + time_codes[c2];
+ int d;
+ if (tt < tmin)
+ continue;
+ if (tt > tmax)
+ break;
+ /* This works! */
+ d = abs(msec - tt);
+ if (d >= diff)
+ continue;
+ /* Best yet */
+ *c1p = c1;
+ *c2p = c2;
+ diff = d;
+ if (d == 0)
+ return msec;
+ }
+ }
+ if (diff < 65536) {
+ int actual;
+ if (msec & 1) {
+ c1 = *c2p;
+ *c2p = *c1p;
+ *c1p = c1;
+ }
+ actual = time_codes[*c1p] + time_codes[*c2p];
+ if (*c1p < *c2p)
+ return actual + 1;
+ else
+ return actual;
+ }
+ /* No close match */
+ return -EINVAL;
+}
+
+/*
+ * Update the register file with the appropriate 3-bit state for
+ * the given led.
+ */
+static void set_select(struct tca6507_chip *tca, int led, int val)
+{
+ int mask = (1 << led);
+ int bit;
+
+ for (bit = 0; bit < 3; bit++) {
+ int n = tca->reg_file[bit] & ~mask;
+ if (val & (1 << bit))
+ n |= mask;
+ if (tca->reg_file[bit] != n) {
+ tca->reg_file[bit] = n;
+ tca->reg_set |= (1 << bit);
+ }
+ }
+}
+
+/* Update the register file with the appropriate 4-bit code for
+ * one bank or other. This can be used for timers, for levels, or
+ * for initialisation.
+ */
+static void set_code(struct tca6507_chip *tca, int reg, int bank, int new)
+{
+ int mask = 0xF;
+ int n;
+ if (bank) {
+ mask <<= 4;
+ new <<= 4;
+ }
+ n = tca->reg_file[reg] & ~mask;
+ n |= new;
+ if (tca->reg_file[reg] != n) {
+ tca->reg_file[reg] = n;
+ tca->reg_set |= 1 << reg;
+ }
+}
+
+/* Update brightness level. */
+static void set_level(struct tca6507_chip *tca, int bank, int level)
+{
+ switch (bank) {
+ case BANK0:
+ case BANK1:
+ set_code(tca, TCA6507_MAX_INTENSITY, bank, level);
+ break;
+ case MASTER:
+ set_code(tca, TCA6507_MASTER_INTENSITY, 0, level);
+ break;
+ }
+ tca->bank[bank].level = level;
+}
+
+/* Record all relevant time code for a given bank */
+static void set_times(struct tca6507_chip *tca, int bank)
+{
+ int c1, c2;
+ int result;
+
+ result = choose_times(tca->bank[bank].ontime, &c1, &c2);
+ dev_dbg(&tca->client->dev,
+ "Chose on times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ c2, time_codes[c2], tca->bank[bank].ontime);
+ set_code(tca, TCA6507_FADE_ON, bank, c2);
+ set_code(tca, TCA6507_FULL_ON, bank, c1);
+ tca->bank[bank].ontime = result;
+
+ result = choose_times(tca->bank[bank].offtime, &c1, &c2);
+ dev_dbg(&tca->client->dev,
+ "Chose off times %d(%d) %d(%d) for %dms\n", c1, time_codes[c1],
+ c2, time_codes[c2], tca->bank[bank].offtime);
+ set_code(tca, TCA6507_FADE_OFF, bank, c2);
+ set_code(tca, TCA6507_FIRST_OFF, bank, c1);
+ set_code(tca, TCA6507_SECOND_OFF, bank, c1);
+ tca->bank[bank].offtime = result;
+
+ set_code(tca, TCA6507_INITIALIZE, bank, INIT_CODE);
+}
+
+/* Write all needed register of tca6507 */
+
+static void tca6507_work(struct work_struct *work)
+{
+ struct tca6507_chip *tca = container_of(work, struct tca6507_chip,
+ work);
+ struct i2c_client *cl = tca->client;
+ int set;
+ u8 file[TCA6507_REG_CNT];
+ int r;
+
+ spin_lock_irq(&tca->lock);
+ set = tca->reg_set;
+ memcpy(file, tca->reg_file, TCA6507_REG_CNT);
+ tca->reg_set = 0;
+ spin_unlock_irq(&tca->lock);
+
+ for (r = 0; r < TCA6507_REG_CNT; r++)
+ if (set & (1<<r))
+ i2c_smbus_write_byte_data(cl, r, file[r]);
+}
+
+static void led_release(struct tca6507_led *led)
+{
+ /* If led owns any resource, release it. */
+ struct tca6507_chip *tca = led->chip;
+ if (led->bank >= 0) {
+ struct bank *b = tca->bank + led->bank;
+ if (led->blink)
+ b->time_use--;
+ b->level_use--;
+ }
+ led->blink = 0;
+ led->bank = -1;
+}
+
+static int led_prepare(struct tca6507_led *led)
+{
+ /* Assign this led to a bank, configuring that bank if necessary. */
+ int level = TO_LEVEL(led->led_cdev.brightness);
+ struct tca6507_chip *tca = led->chip;
+ int c1, c2;
+ int i;
+ struct bank *b;
+ int need_init = 0;
+
+ led->led_cdev.brightness = TO_BRIGHT(level);
+ if (level == 0) {
+ set_select(tca, led->num, TCA6507_LS_LED_OFF);
+ return 0;
+ }
+
+ if (led->ontime == 0 || led->offtime == 0) {
+ /*
+ * Just set the brightness, choosing first usable bank.
+ * If none perfect, choose best.
+ * Count backwards so we check MASTER bank first
+ * to avoid wasting a timer.
+ */
+ int best = -1;/* full-on */
+ int diff = 15-level;
+
+ if (level == 15) {
+ set_select(tca, led->num, TCA6507_LS_LED_ON);
+ return 0;
+ }
+
+ for (i = MASTER; i >= BANK0; i--) {
+ int d;
+ if (tca->bank[i].level == level ||
+ tca->bank[i].level_use == 0) {
+ best = i;
+ break;
+ }
+ d = abs(level - tca->bank[i].level);
+ if (d < diff) {
+ diff = d;
+ best = i;
+ }
+ }
+ if (best == -1) {
+ /* Best brightness is full-on */
+ set_select(tca, led->num, TCA6507_LS_LED_ON);
+ led->led_cdev.brightness = LED_FULL;
+ return 0;
+ }
+
+ if (!tca->bank[best].level_use)
+ set_level(tca, best, level);
+
+ tca->bank[best].level_use++;
+ led->bank = best;
+ set_select(tca, led->num, bank_source[best]);
+ led->led_cdev.brightness = TO_BRIGHT(tca->bank[best].level);
+ return 0;
+ }
+
+ /*
+ * We have on/off time so we need to try to allocate a timing bank.
+ * First check if times are compatible with hardware and give up if
+ * not.
+ */
+ if (choose_times(led->ontime, &c1, &c2) < 0)
+ return -EINVAL;
+ if (choose_times(led->offtime, &c1, &c2) < 0)
+ return -EINVAL;
+
+ for (i = BANK0; i <= BANK1; i++) {
+ if (tca->bank[i].level_use == 0)
+ /* not in use - it is ours! */
+ break;
+ if (tca->bank[i].level != level)
+ /* Incompatible level - skip */
+ /* FIX: if timer matches we maybe should consider
+ * this anyway...
+ */
+ continue;
+
+ if (tca->bank[i].time_use == 0)
+ /* Timer not in use, and level matches - use it */
+ break;
+
+ if (!(tca->bank[i].on_dflt ||
+ led->on_dflt ||
+ tca->bank[i].ontime == led->ontime))
+ /* on time is incompatible */
+ continue;
+
+ if (!(tca->bank[i].off_dflt ||
+ led->off_dflt ||
+ tca->bank[i].offtime == led->offtime))
+ /* off time is incompatible */
+ continue;
+
+ /* looks like a suitable match */
+ break;
+ }
+
+ if (i > BANK1)
+ /* Nothing matches - how sad */
+ return -EINVAL;
+
+ b = &tca->bank[i];
+ if (b->level_use == 0)
+ set_level(tca, i, level);
+ b->level_use++;
+ led->bank = i;
+
+ if (b->on_dflt ||
+ !led->on_dflt ||
+ b->time_use == 0) {
+ b->ontime = led->ontime;
+ b->on_dflt = led->on_dflt;
+ need_init = 1;
+ }
+
+ if (b->off_dflt ||
+ !led->off_dflt ||
+ b->time_use == 0) {
+ b->offtime = led->offtime;
+ b->off_dflt = led->off_dflt;
+ need_init = 1;
+ }
+
+ if (need_init)
+ set_times(tca, i);
+
+ led->ontime = b->ontime;
+ led->offtime = b->offtime;
+
+ b->time_use++;
+ led->blink = 1;
+ led->led_cdev.brightness = TO_BRIGHT(b->level);
+ set_select(tca, led->num, blink_source[i]);
+ return 0;
+}
+
+static int led_assign(struct tca6507_led *led)
+{
+ struct tca6507_chip *tca = led->chip;
+ int err;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tca->lock, flags);
+ led_release(led);
+ err = led_prepare(led);
+ if (err) {
+ /*
+ * Can only fail on timer setup. In that case we need to
+ * re-establish as steady level.
+ */
+ led->ontime = 0;
+ led->offtime = 0;
+ led_prepare(led);
+ }
+ spin_unlock_irqrestore(&tca->lock, flags);
+
+ if (tca->reg_set)
+ schedule_work(&tca->work);
+ return err;
+}
+
+static void tca6507_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+ led_cdev);
+ led->led_cdev.brightness = brightness;
+ led->ontime = 0;
+ led->offtime = 0;
+ led_assign(led);
+}
+
+static int tca6507_blink_set(struct led_classdev *led_cdev,
+ unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct tca6507_led *led = container_of(led_cdev, struct tca6507_led,
+ led_cdev);
+
+ if (*delay_on == 0)
+ led->on_dflt = 1;
+ else if (delay_on != &led_cdev->blink_delay_on)
+ led->on_dflt = 0;
+ led->ontime = *delay_on;
+
+ if (*delay_off == 0)
+ led->off_dflt = 1;
+ else if (delay_off != &led_cdev->blink_delay_off)
+ led->off_dflt = 0;
+ led->offtime = *delay_off;
+
+ if (led->ontime == 0)
+ led->ontime = 512;
+ if (led->offtime == 0)
+ led->offtime = 512;
+
+ if (led->led_cdev.brightness == LED_OFF)
+ led->led_cdev.brightness = LED_FULL;
+ if (led_assign(led) < 0) {
+ led->ontime = 0;
+ led->offtime = 0;
+ led->led_cdev.brightness = LED_OFF;
+ return -EINVAL;
+ }
+ *delay_on = led->ontime;
+ *delay_off = led->offtime;
+ return 0;
+}
+
+#ifdef CONFIG_GPIOLIB
+static void tca6507_gpio_set_value(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ struct tca6507_chip *tca = container_of(gc, struct tca6507_chip, gpio);
+ unsigned long flags;
+
+ spin_lock_irqsave(&tca->lock, flags);
+ /*
+ * 'OFF' is floating high, and 'ON' is pulled down, so it has the
+ * inverse sense of 'val'.
+ */
+ set_select(tca, tca->gpio_map[offset],
+ val ? TCA6507_LS_LED_OFF : TCA6507_LS_LED_ON);
+ spin_unlock_irqrestore(&tca->lock, flags);
+ if (tca->reg_set)
+ schedule_work(&tca->work);
+}
+
+static int tca6507_gpio_direction_output(struct gpio_chip *gc,
+ unsigned offset, int val)
+{
+ tca6507_gpio_set_value(gc, offset, val);
+ return 0;
+}
+
+static int tca6507_probe_gpios(struct i2c_client *client,
+ struct tca6507_chip *tca,
+ struct tca6507_platform_data *pdata)
+{
+ int err;
+ int i = 0;
+ int gpios = 0;
+
+ for (i = 0; i < NUM_LEDS; i++)
+ if (pdata->leds.leds[i].name && pdata->leds.leds[i].flags) {
+ /* Configure as a gpio */
+ tca->gpio_name[gpios] = pdata->leds.leds[i].name;
+ tca->gpio_map[gpios] = i;
+ gpios++;
+ }
+
+ if (!gpios)
+ return 0;
+
+ tca->gpio.label = "gpio-tca6507";
+ tca->gpio.names = tca->gpio_name;
+ tca->gpio.ngpio = gpios;
+ tca->gpio.base = pdata->gpio_base;
+ tca->gpio.owner = THIS_MODULE;
+ tca->gpio.direction_output = tca6507_gpio_direction_output;
+ tca->gpio.set = tca6507_gpio_set_value;
+ tca->gpio.dev = &client->dev;
+ err = gpiochip_add(&tca->gpio);
+ if (err) {
+ tca->gpio.ngpio = 0;
+ return err;
+ }
+ if (pdata->setup)
+ pdata->setup(tca->gpio.base, tca->gpio.ngpio);
+ return 0;
+}
+
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+ if (tca->gpio.ngpio) {
+ int err = gpiochip_remove(&tca->gpio);
+ dev_err(&tca->client->dev, "%s failed, %d\n",
+ "gpiochip_remove()", err);
+ }
+}
+#else /* CONFIG_GPIOLIB */
+static int tca6507_probe_gpios(struct i2c_client *client,
+ struct tca6507_chip *tca,
+ struct tca6507_platform_data *pdata)
+{
+ return 0;
+}
+static void tca6507_remove_gpio(struct tca6507_chip *tca)
+{
+}
+#endif /* CONFIG_GPIOLIB */
+
+static int __devinit tca6507_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tca6507_chip *tca;
+ struct i2c_adapter *adapter;
+ struct tca6507_platform_data *pdata;
+ int err;
+ int i = 0;
+
+ adapter = to_i2c_adapter(client->dev.parent);
+ pdata = client->dev.platform_data;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_I2C))
+ return -EIO;
+
+ if (!pdata || pdata->leds.num_leds != NUM_LEDS) {
+ dev_err(&client->dev, "Need %d entries in platform-data list\n",
+ NUM_LEDS);
+ return -ENODEV;
+ }
+ err = -ENOMEM;
+ tca = kzalloc(sizeof(*tca), GFP_KERNEL);
+ if (!tca)
+ goto exit;
+
+ tca->client = client;
+ INIT_WORK(&tca->work, tca6507_work);
+ spin_lock_init(&tca->lock);
+ i2c_set_clientdata(client, tca);
+
+ for (i = 0; i < NUM_LEDS; i++) {
+ struct tca6507_led *l = tca->leds + i;
+
+ l->chip = tca;
+ l->num = i;
+ if (pdata->leds.leds[i].name && !pdata->leds.leds[i].flags) {
+ l->led_cdev.name = pdata->leds.leds[i].name;
+ l->led_cdev.default_trigger
+ = pdata->leds.leds[i].default_trigger;
+ l->led_cdev.brightness_set = tca6507_brightness_set;
+ l->led_cdev.blink_set = tca6507_blink_set;
+ l->bank = -1;
+ err = led_classdev_register(&client->dev,
+ &l->led_cdev);
+ if (err < 0)
+ goto exit;
+ }
+ }
+ err = tca6507_probe_gpios(client, tca, pdata);
+ if (err)
+ goto exit;
+ /* set all registers to known state - zero */
+ tca->reg_set = 0x7f;
+ schedule_work(&tca->work);
+
+ return 0;
+exit:
+ while (i--)
+ if (tca->leds[i].led_cdev.name)
+ led_classdev_unregister(&tca->leds[i].led_cdev);
+ cancel_work_sync(&tca->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(tca);
+ return err;
+}
+
+static int __devexit tca6507_remove(struct i2c_client *client)
+{
+ int i;
+ struct tca6507_chip *tca = i2c_get_clientdata(client);
+ struct tca6507_led *tca_leds = tca->leds;
+
+ for (i = 0; i < NUM_LEDS; i++) {
+ if (tca_leds[i].led_cdev.name)
+ led_classdev_unregister(&tca_leds[i].led_cdev);
+ }
+ tca6507_remove_gpio(tca);
+ cancel_work_sync(&tca->work);
+ i2c_set_clientdata(client, NULL);
+ kfree(tca);
+
+ return 0;
+}
+
+static struct i2c_driver tca6507_driver = {
+ .driver = {
+ .name = "leds-tca6507",
+ .owner = THIS_MODULE,
+ },
+ .probe = tca6507_probe,
+ .remove = __devexit_p(tca6507_remove),
+ .id_table = tca6507_id,
+};
+
+static int __init tca6507_leds_init(void)
+{
+ return i2c_add_driver(&tca6507_driver);
+}
+
+static void __exit tca6507_leds_exit(void)
+{
+ i2c_del_driver(&tca6507_driver);
+}
+
+module_init(tca6507_leds_init);
+module_exit(tca6507_leds_exit);
+
+MODULE_AUTHOR("NeilBrown <neilb@suse.de>");
+MODULE_DESCRIPTION("TCA6507 LED/GPO driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
index b1eb34c..74a24cf 100644
--- a/drivers/leds/leds-wm831x-status.c
+++ b/drivers/leds/leds-wm831x-status.c
@@ -237,7 +237,8 @@ static int wm831x_status_probe(struct platform_device *pdev)
goto err;
}
- drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_status),
+ GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, drvdata);
@@ -300,7 +301,6 @@ static int wm831x_status_probe(struct platform_device *pdev)
err_led:
led_classdev_unregister(&drvdata->cdev);
- kfree(drvdata);
err:
return ret;
}
@@ -311,7 +311,6 @@ static int wm831x_status_remove(struct platform_device *pdev)
device_remove_file(drvdata->cdev.dev, &dev_attr_src);
led_classdev_unregister(&drvdata->cdev);
- kfree(drvdata);
return 0;
}
@@ -325,17 +324,7 @@ static struct platform_driver wm831x_status_driver = {
.remove = wm831x_status_remove,
};
-static int __devinit wm831x_status_init(void)
-{
- return platform_driver_register(&wm831x_status_driver);
-}
-module_init(wm831x_status_init);
-
-static void wm831x_status_exit(void)
-{
- platform_driver_unregister(&wm831x_status_driver);
-}
-module_exit(wm831x_status_exit);
+module_platform_driver(wm831x_status_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("WM831x status LED driver");
diff --git a/drivers/leds/leds-wm8350.c b/drivers/leds/leds-wm8350.c
index 4a12765..918d4ba 100644
--- a/drivers/leds/leds-wm8350.c
+++ b/drivers/leds/leds-wm8350.c
@@ -227,7 +227,7 @@ static int wm8350_led_probe(struct platform_device *pdev)
goto err_isink;
}
- led = kzalloc(sizeof(*led), GFP_KERNEL);
+ led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
if (led == NULL) {
ret = -ENOMEM;
goto err_dcdc;
@@ -259,12 +259,10 @@ static int wm8350_led_probe(struct platform_device *pdev)
ret = led_classdev_register(&pdev->dev, &led->cdev);
if (ret < 0)
- goto err_led;
+ goto err_dcdc;
return 0;
- err_led:
- kfree(led);
err_dcdc:
regulator_put(dcdc);
err_isink:
@@ -281,7 +279,6 @@ static int wm8350_led_remove(struct platform_device *pdev)
wm8350_led_disable(led);
regulator_put(led->dcdc);
regulator_put(led->isink);
- kfree(led);
return 0;
}
@@ -295,17 +292,7 @@ static struct platform_driver wm8350_led_driver = {
.shutdown = wm8350_led_shutdown,
};
-static int __devinit wm8350_led_init(void)
-{
- return platform_driver_register(&wm8350_led_driver);
-}
-module_init(wm8350_led_init);
-
-static void wm8350_led_exit(void)
-{
- platform_driver_unregister(&wm8350_led_driver);
-}
-module_exit(wm8350_led_exit);
+module_platform_driver(wm8350_led_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM8350 LED driver");
diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile
index 8ac947c..c419750 100644
--- a/drivers/lguest/Makefile
+++ b/drivers/lguest/Makefile
@@ -18,7 +18,7 @@ Mastery: PREFIX=M
Beer:
@for f in Preparation Guest Drivers Launcher Host Switcher Mastery; do echo "{==- $$f -==}"; make -s $$f; done; echo "{==-==}"
Preparation Preparation! Guest Drivers Launcher Host Switcher Mastery:
- @sh ../../Documentation/virtual/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
+ @sh ../../tools/lguest/extract $(PREFIX) `find ../../* -name '*.[chS]' -wholename '*lguest*'`
Puppy:
@clear
@printf " __ \n (___()'\`;\n /, /\`\n \\\\\\\"--\\\\\\ \n"
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c
index 0dc30ff..9e8388e 100644
--- a/drivers/lguest/lguest_device.c
+++ b/drivers/lguest/lguest_device.c
@@ -241,7 +241,7 @@ static void lg_notify(struct virtqueue *vq)
}
/* An extern declaration inside a C file is bad form. Don't do it. */
-extern void lguest_setup_irq(unsigned int irq);
+extern int lguest_setup_irq(unsigned int irq);
/*
* This routine finds the Nth virtqueue described in the configuration of
@@ -292,17 +292,21 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
/*
* OK, tell virtio_ring.c to set up a virtqueue now we know its size
- * and we've got a pointer to its pages.
+ * and we've got a pointer to its pages. Note that we set weak_barriers
+ * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu
+ * barriers.
*/
- vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN,
- vdev, lvq->pages, lg_notify, callback, name);
+ vq = vring_new_virtqueue(lvq->config.num, LGUEST_VRING_ALIGN, vdev,
+ true, lvq->pages, lg_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto unmap;
}
/* Make sure the interrupt is allocated. */
- lguest_setup_irq(lvq->config.irq);
+ err = lguest_setup_irq(lvq->config.irq);
+ if (err)
+ goto destroy_vring;
/*
* Tell the interrupt for this virtqueue to go to the virtio_ring
@@ -315,7 +319,7 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED,
dev_name(&vdev->dev), vq);
if (err)
- goto destroy_vring;
+ goto free_desc;
/*
* Last of all we hook up our 'struct lguest_vq_info" to the
@@ -324,6 +328,8 @@ static struct virtqueue *lg_find_vq(struct virtio_device *vdev,
vq->priv = lvq;
return vq;
+free_desc:
+ irq_free_desc(lvq->config.irq);
destroy_vring:
vring_del_virtqueue(vq);
unmap:
@@ -381,6 +387,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *lg_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/* The ops structure which hooks everything together. */
static struct virtio_config_ops lguest_config_ops = {
.get_features = lg_get_features,
@@ -392,6 +403,7 @@ static struct virtio_config_ops lguest_config_ops = {
.reset = lg_reset,
.find_vqs = lg_find_vqs,
.del_vqs = lg_del_vqs,
+ .bus_name = lg_bus_name,
};
/*
diff --git a/drivers/lguest/segments.c b/drivers/lguest/segments.c
index ede4658..c4fb424 100644
--- a/drivers/lguest/segments.c
+++ b/drivers/lguest/segments.c
@@ -81,8 +81,8 @@ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
* sometimes careless and leaves this as 0, even though it's
* running at privilege level 1. If so, we fix it here.
*/
- if ((cpu->arch.gdt[i].b & 0x00006000) == 0)
- cpu->arch.gdt[i].b |= (GUEST_PL << 13);
+ if (cpu->arch.gdt[i].dpl == 0)
+ cpu->arch.gdt[i].dpl |= GUEST_PL;
/*
* Each descriptor has an "accessed" bit. If we don't set it
@@ -90,7 +90,7 @@ static void fixup_gdt_table(struct lg_cpu *cpu, unsigned start, unsigned end)
* that entry into a segment register. But the GDT isn't
* writable by the Guest, so bad things can happen.
*/
- cpu->arch.gdt[i].b |= 0x00000100;
+ cpu->arch.gdt[i].type |= 0x1;
}
}
@@ -114,13 +114,19 @@ void setup_default_gdt_entries(struct lguest_ro_state *state)
/*
* The TSS segment refers to the TSS entry for this particular CPU.
- * Forgive the magic flags: the 0x8900 means the entry is Present, it's
- * privilege level 0 Available 386 TSS system segment, and the 0x67
- * means Saturn is eclipsed by Mercury in the twelfth house.
*/
- gdt[GDT_ENTRY_TSS].a = 0x00000067 | (tss << 16);
- gdt[GDT_ENTRY_TSS].b = 0x00008900 | (tss & 0xFF000000)
- | ((tss >> 16) & 0x000000FF);
+ gdt[GDT_ENTRY_TSS].a = 0;
+ gdt[GDT_ENTRY_TSS].b = 0;
+
+ gdt[GDT_ENTRY_TSS].limit0 = 0x67;
+ gdt[GDT_ENTRY_TSS].base0 = tss & 0xFFFF;
+ gdt[GDT_ENTRY_TSS].base1 = (tss >> 16) & 0xFF;
+ gdt[GDT_ENTRY_TSS].base2 = tss >> 24;
+ gdt[GDT_ENTRY_TSS].type = 0x9; /* 32-bit TSS (available) */
+ gdt[GDT_ENTRY_TSS].p = 0x1; /* Entry is present */
+ gdt[GDT_ENTRY_TSS].dpl = 0x0; /* Privilege level 0 */
+ gdt[GDT_ENTRY_TSS].s = 0x0; /* system segment */
+
}
/*
@@ -135,8 +141,8 @@ void setup_guest_gdt(struct lg_cpu *cpu)
*/
cpu->arch.gdt[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT;
cpu->arch.gdt[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT;
- cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].b |= (GUEST_PL << 13);
- cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].b |= (GUEST_PL << 13);
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_CS].dpl |= GUEST_PL;
+ cpu->arch.gdt[GDT_ENTRY_KERNEL_DS].dpl |= GUEST_PL;
}
/*H:650
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
index 65af42f..39809035 100644
--- a/drivers/lguest/x86/core.c
+++ b/drivers/lguest/x86/core.c
@@ -697,7 +697,7 @@ void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start)
* interrupts are enabled. We always leave interrupts enabled while
* running the Guest.
*/
- regs->eflags = X86_EFLAGS_IF | 0x2;
+ regs->eflags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
/*
* The "Extended Instruction Pointer" register says where the Guest is
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index 75049e7..b026896 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -710,7 +710,7 @@ static ssize_t adb_read(struct file *file, char __user *buf,
req = NULL;
spin_lock_irqsave(&state->lock, flags);
add_wait_queue(&state->wait_queue, &wait);
- current->state = TASK_INTERRUPTIBLE;
+ set_current_state(TASK_INTERRUPTIBLE);
for (;;) {
req = state->completed;
@@ -734,7 +734,7 @@ static ssize_t adb_read(struct file *file, char __user *buf,
spin_lock_irqsave(&state->lock, flags);
}
- current->state = TASK_RUNNING;
+ set_current_state(TASK_RUNNING);
remove_wait_queue(&state->wait_queue, &wait);
spin_unlock_irqrestore(&state->lock, flags);
diff --git a/drivers/macintosh/ams/ams-core.c b/drivers/macintosh/ams/ams-core.c
index 399beb16..5c6a2d8 100644
--- a/drivers/macintosh/ams/ams-core.c
+++ b/drivers/macintosh/ams/ams-core.c
@@ -31,7 +31,7 @@
/* There is only one motion sensor per machine */
struct ams ams_info;
-static unsigned int verbose;
+static bool verbose;
module_param(verbose, bool, 0644);
MODULE_PARM_DESC(verbose, "Show free falls and shocks in kernel output");
diff --git a/drivers/macintosh/ams/ams-input.c b/drivers/macintosh/ams/ams-input.c
index 8a71239..b27e530 100644
--- a/drivers/macintosh/ams/ams-input.c
+++ b/drivers/macintosh/ams/ams-input.c
@@ -19,11 +19,11 @@
#include "ams.h"
-static unsigned int joystick;
+static bool joystick;
module_param(joystick, bool, S_IRUGO);
MODULE_PARM_DESC(joystick, "Enable the input class device on module load");
-static unsigned int invert;
+static bool invert;
module_param(invert, bool, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(invert, "Invert input data on X and Y axis");
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 2637c13..6dc26b6 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -81,13 +81,13 @@ static int rackmeter_ignore_nice;
*/
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
- cputime64_t retval;
+ u64 retval;
- retval = cputime64_add(kstat_cpu(cpu).cpustat.idle,
- kstat_cpu(cpu).cpustat.iowait);
+ retval = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE] +
+ kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
if (rackmeter_ignore_nice)
- retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice);
+ retval += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
return retval;
}
@@ -220,13 +220,11 @@ static void rackmeter_do_timer(struct work_struct *work)
int i, offset, load, cumm, pause;
cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
- total_ticks = (unsigned int)cputime64_sub(cur_jiffies,
- rcpu->prev_wall);
+ total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
rcpu->prev_wall = cur_jiffies;
total_idle_ticks = get_cpu_idle_time(cpu);
- idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,
- rcpu->prev_idle);
+ idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
rcpu->prev_idle = total_idle_ticks;
/* We do a very dumb calculation to update the LEDs for now,
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 116a49c..54ac7ff 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -32,7 +32,6 @@
#include <linux/completion.h>
#include <linux/miscdevice.h>
#include <linux/delay.h>
-#include <linux/sysdev.h>
#include <linux/poll.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
@@ -681,9 +680,6 @@ static struct platform_driver smu_of_platform_driver =
static int __init smu_init_sysfs(void)
{
/*
- * Due to sysfs bogosity, a sysdev is not a real device, so
- * we should in fact create both if we want sysdev semantics
- * for power management.
* For now, we don't power manage machines with an SMU chip,
* I'm a bit too far from figuring out how that works with those
* new chipsets, but that will come back and bite us
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index 0236730..c60d025 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -52,7 +52,7 @@ static const char *sensor_location[3];
static int limit_adjust;
static int fan_speed = -1;
-static int verbose;
+static bool verbose;
MODULE_AUTHOR("Colin Leroy <colin@colino.net>");
MODULE_DESCRIPTION("Driver for ADT746x thermostat in iBook G4 and "
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 6d03774..cdf36b1 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -1149,12 +1149,12 @@ void bitmap_daemon_work(struct mddev *mddev)
return;
}
if (time_before(jiffies, bitmap->daemon_lastrun
- + bitmap->mddev->bitmap_info.daemon_sleep))
+ + mddev->bitmap_info.daemon_sleep))
goto done;
bitmap->daemon_lastrun = jiffies;
if (bitmap->allclean) {
- bitmap->mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
+ mddev->thread->timeout = MAX_SCHEDULE_TIMEOUT;
goto done;
}
bitmap->allclean = 1;
@@ -1206,7 +1206,7 @@ void bitmap_daemon_work(struct mddev *mddev)
* sure that events_cleared is up-to-date.
*/
if (bitmap->need_sync &&
- bitmap->mddev->bitmap_info.external == 0) {
+ mddev->bitmap_info.external == 0) {
bitmap_super_t *sb;
bitmap->need_sync = 0;
sb = kmap_atomic(bitmap->sb_page, KM_USER0);
@@ -1270,8 +1270,8 @@ void bitmap_daemon_work(struct mddev *mddev)
done:
if (bitmap->allclean == 0)
- bitmap->mddev->thread->timeout =
- bitmap->mddev->bitmap_info.daemon_sleep;
+ mddev->thread->timeout =
+ mddev->bitmap_info.daemon_sleep;
mutex_unlock(&mddev->bitmap_info.mutex);
}
@@ -1587,7 +1587,7 @@ static void bitmap_set_memory_bits(struct bitmap *bitmap, sector_t offset, int n
}
if (!*bmc) {
struct page *page;
- *bmc = 1 | (needed ? NEEDED_MASK : 0);
+ *bmc = 2 | (needed ? NEEDED_MASK : 0);
bitmap_count_page(bitmap, offset, 1);
page = filemap_get_page(bitmap, offset >> CHUNK_BLOCK_SHIFT(bitmap));
set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index f84c080..b280c43 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -323,7 +323,7 @@ static int flakey_end_io(struct dm_target *ti, struct bio *bio,
* Corrupt successful READs while in down state.
* If flags were specified, only corrupt those that match.
*/
- if (!error && bio_submitted_while_down &&
+ if (fc->corrupt_bio_byte && !error && bio_submitted_while_down &&
(bio_data_dir(bio) == READ) && (fc->corrupt_bio_rw == READ) &&
all_corrupt_bio_flags_match(bio, fc))
corrupt_bio_data(bio, fc);
@@ -368,8 +368,17 @@ static int flakey_status(struct dm_target *ti, status_type_t type,
static int flakey_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg)
{
struct flakey_c *fc = ti->private;
+ struct dm_dev *dev = fc->dev;
+ int r = 0;
- return __blkdev_driver_ioctl(fc->dev->bdev, fc->dev->mode, cmd, arg);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (fc->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int flakey_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index ad2eba4..ea5dd28 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -296,6 +296,8 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
unsigned offset;
unsigned num_bvecs;
sector_t remaining = where->count;
+ struct request_queue *q = bdev_get_queue(where->bdev);
+ sector_t discard_sectors;
/*
* where->count may be zero if rw holds a flush and we need to
@@ -305,9 +307,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
/*
* Allocate a suitably sized-bio.
*/
- num_bvecs = dm_sector_div_up(remaining,
- (PAGE_SIZE >> SECTOR_SHIFT));
- num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
+ if (rw & REQ_DISCARD)
+ num_bvecs = 1;
+ else
+ num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
+ dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
+
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
@@ -315,10 +320,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
bio->bi_destructor = dm_bio_destructor;
store_io_and_region_in_bio(bio, io, region);
- /*
- * Try and add as many pages as possible.
- */
- while (remaining) {
+ if (rw & REQ_DISCARD) {
+ discard_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
+ bio->bi_size = discard_sectors << SECTOR_SHIFT;
+ remaining -= discard_sectors;
+ } else while (remaining) {
+ /*
+ * Try and add as many pages as possible.
+ */
dp->get_page(dp, &page, &len, &offset);
len = min(len, to_bytes(remaining));
if (!bio_add_page(bio, page, len, offset))
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 31c2dc2..1ce84ed 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1437,7 +1437,7 @@ static int target_message(struct dm_ioctl *param, size_t param_size)
if (!argc) {
DMWARN("Empty message received.");
- goto out;
+ goto out_argv;
}
table = dm_get_live_table(md);
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 3921e3b..9728839 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -116,7 +116,17 @@ static int linear_ioctl(struct dm_target *ti, unsigned int cmd,
unsigned long arg)
{
struct linear_c *lc = (struct linear_c *) ti->private;
- return __blkdev_driver_ioctl(lc->dev->bdev, lc->dev->mode, cmd, arg);
+ struct dm_dev *dev = lc->dev;
+ int r = 0;
+
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (lc->start ||
+ ti->len != i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
+ return r ? : __blkdev_driver_ioctl(dev->bdev, dev->mode, cmd, arg);
}
static int linear_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 5e0090e..801d92d 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1520,6 +1520,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
spin_unlock_irqrestore(&m->lock, flags);
+ /*
+ * Only pass ioctls through if the device sizes match exactly.
+ */
+ if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
+ r = scsi_verify_blk_ioctl(NULL, cmd);
+
return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
}
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c2907d8..787022c 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -56,7 +56,8 @@ struct raid_dev {
struct raid_set {
struct dm_target *ti;
- uint64_t print_flags;
+ uint32_t bitmap_loaded;
+ uint32_t print_flags;
struct mddev md;
struct raid_type *raid_type;
@@ -667,7 +668,14 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
return ret;
sb = page_address(rdev->sb_page);
- if (sb->magic != cpu_to_le32(DM_RAID_MAGIC)) {
+
+ /*
+ * Two cases that we want to write new superblocks and rebuild:
+ * 1) New device (no matching magic number)
+ * 2) Device specified for rebuild (!In_sync w/ offset == 0)
+ */
+ if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
+ (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
super_sync(rdev->mddev, rdev);
set_bit(FirstUse, &rdev->flags);
@@ -744,11 +752,8 @@ static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
*/
rdev_for_each(r, t, mddev) {
if (!test_bit(In_sync, &r->flags)) {
- if (!test_bit(FirstUse, &r->flags))
- DMERR("Superblock area of "
- "rebuild device %d should have been "
- "cleared.", r->raid_disk);
- set_bit(FirstUse, &r->flags);
+ DMINFO("Device %d specified for rebuild: "
+ "Clearing superblock", r->raid_disk);
rebuilds++;
} else if (test_bit(FirstUse, &r->flags))
new_devs++;
@@ -970,6 +975,7 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
INIT_WORK(&rs->md.event_work, do_table_event);
ti->private = rs;
+ ti->num_flush_requests = 1;
mutex_lock(&rs->md.reconfig_mutex);
ret = md_run(&rs->md);
@@ -1085,7 +1091,7 @@ static int raid_status(struct dm_target *ti, status_type_t type,
raid_param_cnt += 2;
}
- raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
+ raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
raid_param_cnt--;
@@ -1197,7 +1203,12 @@ static void raid_resume(struct dm_target *ti)
{
struct raid_set *rs = ti->private;
- bitmap_load(&rs->md);
+ if (!rs->bitmap_loaded) {
+ bitmap_load(&rs->md);
+ rs->bitmap_loaded = 1;
+ } else
+ md_wakeup_thread(rs->md.thread);
+
mddev_resume(&rs->md);
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 8e91321..63cc542 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -699,7 +699,7 @@ static int validate_hardware_logical_block_alignment(struct dm_table *table,
while (i < dm_table_get_num_targets(table)) {
ti = dm_table_get_target(table, i++);
- blk_set_default_limits(&ti_limits);
+ blk_set_stacking_limits(&ti_limits);
/* combine all target devices' limits */
if (ti->type->iterate_devices)
@@ -1221,10 +1221,10 @@ int dm_calculate_queue_limits(struct dm_table *table,
struct queue_limits ti_limits;
unsigned i = 0;
- blk_set_default_limits(limits);
+ blk_set_stacking_limits(limits);
while (i < dm_table_get_num_targets(table)) {
- blk_set_default_limits(&ti_limits);
+ blk_set_stacking_limits(&ti_limits);
ti = dm_table_get_target(table, i++);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 59c4f04..237571a 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -385,6 +385,7 @@ static int init_pmd(struct dm_pool_metadata *pmd,
data_sm = dm_sm_disk_create(tm, nr_blocks);
if (IS_ERR(data_sm)) {
DMERR("sm_disk_create failed");
+ dm_tm_unlock(tm, sblock);
r = PTR_ERR(data_sm);
goto bad;
}
@@ -789,6 +790,11 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
return 0;
}
+/*
+ * __open_device: Returns @td corresponding to device with id @dev,
+ * creating it if @create is set and incrementing @td->open_count.
+ * On failure, @td is undefined.
+ */
static int __open_device(struct dm_pool_metadata *pmd,
dm_thin_id dev, int create,
struct dm_thin_device **td)
@@ -799,10 +805,16 @@ static int __open_device(struct dm_pool_metadata *pmd,
struct disk_device_details details_le;
/*
- * Check the device isn't already open.
+ * If the device is already open, return it.
*/
list_for_each_entry(td2, &pmd->thin_devices, list)
if (td2->id == dev) {
+ /*
+ * May not create an already-open device.
+ */
+ if (create)
+ return -EEXIST;
+
td2->open_count++;
*td = td2;
return 0;
@@ -817,6 +829,9 @@ static int __open_device(struct dm_pool_metadata *pmd,
if (r != -ENODATA || !create)
return r;
+ /*
+ * Create new device.
+ */
changed = 1;
details_le.mapped_blocks = 0;
details_le.transaction_id = cpu_to_le64(pmd->trans_id);
@@ -882,12 +897,10 @@ static int __create_thin(struct dm_pool_metadata *pmd,
r = __open_device(pmd, dev, 1, &td);
if (r) {
- __close_device(td);
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_del(&pmd->bl_info, dev_root);
return r;
}
- td->changed = 1;
__close_device(td);
return r;
@@ -967,14 +980,14 @@ static int __create_snap(struct dm_pool_metadata *pmd,
goto bad;
r = __set_snapshot_details(pmd, td, origin, pmd->time);
+ __close_device(td);
+
if (r)
goto bad;
- __close_device(td);
return 0;
bad:
- __close_device(td);
dm_btree_remove(&pmd->tl_info, pmd->root, &key, &pmd->root);
dm_btree_remove(&pmd->details_info, pmd->details_root,
&key, &pmd->details_root);
@@ -1211,6 +1224,8 @@ static int __remove(struct dm_thin_device *td, dm_block_t block)
if (r)
return r;
+ td->mapped_blocks--;
+ td->changed = 1;
pmd->need_commit = 1;
return 0;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4720f68..b89c548 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -14,7 +14,6 @@
#include <linux/moduleparam.h>
#include <linux/blkpg.h>
#include <linux/bio.h>
-#include <linux/buffer_head.h>
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/idr.h>
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f47f1f8..ce88755 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -36,8 +36,7 @@
#include <linux/blkdev.h>
#include <linux/sysctl.h>
#include <linux/seq_file.h>
-#include <linux/mutex.h>
-#include <linux/buffer_head.h> /* for invalidate_bdev */
+#include <linux/fs.h>
#include <linux/poll.h>
#include <linux/ctype.h>
#include <linux/string.h>
@@ -1714,6 +1713,8 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
}
if (sb->devflags & WriteMostly1)
set_bit(WriteMostly, &rdev->flags);
+ if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT)
+ set_bit(Replacement, &rdev->flags);
} else /* MULTIPATH are always insync */
set_bit(In_sync, &rdev->flags);
@@ -1767,6 +1768,9 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
sb->recovery_offset =
cpu_to_le64(rdev->recovery_offset);
}
+ if (test_bit(Replacement, &rdev->flags))
+ sb->feature_map |=
+ cpu_to_le32(MD_FEATURE_REPLACEMENT);
if (mddev->reshape_position != MaxSector) {
sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE);
@@ -2560,6 +2564,15 @@ state_show(struct md_rdev *rdev, char *page)
len += sprintf(page+len, "%swrite_error", sep);
sep = ",";
}
+ if (test_bit(WantReplacement, &rdev->flags)) {
+ len += sprintf(page+len, "%swant_replacement", sep);
+ sep = ",";
+ }
+ if (test_bit(Replacement, &rdev->flags)) {
+ len += sprintf(page+len, "%sreplacement", sep);
+ sep = ",";
+ }
+
return len+sprintf(page+len, "\n");
}
@@ -2628,6 +2641,42 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len)
} else if (cmd_match(buf, "-write_error")) {
clear_bit(WriteErrorSeen, &rdev->flags);
err = 0;
+ } else if (cmd_match(buf, "want_replacement")) {
+ /* Any non-spare device that is not a replacement can
+ * become want_replacement at any time, but we then need to
+ * check if recovery is needed.
+ */
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(Replacement, &rdev->flags))
+ set_bit(WantReplacement, &rdev->flags);
+ set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
+ md_wakeup_thread(rdev->mddev->thread);
+ err = 0;
+ } else if (cmd_match(buf, "-want_replacement")) {
+ /* Clearing 'want_replacement' is always allowed.
+ * Once replacements starts it is too late though.
+ */
+ err = 0;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else if (cmd_match(buf, "replacement")) {
+ /* Can only set a device as a replacement when array has not
+ * yet been started. Once running, replacement is automatic
+ * from spares, or by assigning 'slot'.
+ */
+ if (rdev->mddev->pers)
+ err = -EBUSY;
+ else {
+ set_bit(Replacement, &rdev->flags);
+ err = 0;
+ }
+ } else if (cmd_match(buf, "-replacement")) {
+ /* Similarly, can only clear Replacement before start */
+ if (rdev->mddev->pers)
+ err = -EBUSY;
+ else {
+ clear_bit(Replacement, &rdev->flags);
+ err = 0;
+ }
}
if (!err)
sysfs_notify_dirent_safe(rdev->sysfs_state);
@@ -2689,7 +2738,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->mddev->pers->hot_remove_disk == NULL)
return -EINVAL;
err = rdev->mddev->pers->
- hot_remove_disk(rdev->mddev, rdev->raid_disk);
+ hot_remove_disk(rdev->mddev, rdev);
if (err)
return err;
sysfs_unlink_rdev(rdev->mddev, rdev);
@@ -2697,7 +2746,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery);
md_wakeup_thread(rdev->mddev->thread);
} else if (rdev->mddev->pers) {
- struct md_rdev *rdev2;
/* Activating a spare .. or possibly reactivating
* if we ever get bitmaps working here.
*/
@@ -2711,10 +2759,6 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
if (rdev->mddev->pers->hot_add_disk == NULL)
return -EINVAL;
- list_for_each_entry(rdev2, &rdev->mddev->disks, same_set)
- if (rdev2->raid_disk == slot)
- return -EEXIST;
-
if (slot >= rdev->mddev->raid_disks &&
slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks)
return -ENOSPC;
@@ -4622,6 +4666,7 @@ static int md_alloc(dev_t dev, char *name)
mddev->queue->queuedata = mddev;
blk_queue_make_request(mddev->queue, md_make_request);
+ blk_set_stacking_limits(&mddev->queue->limits);
disk = alloc_disk(1 << shift);
if (!disk) {
@@ -6054,8 +6099,15 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
struct mddev *mddev = NULL;
int ro;
- if (!capable(CAP_SYS_ADMIN))
- return -EACCES;
+ switch (cmd) {
+ case RAID_VERSION:
+ case GET_ARRAY_INFO:
+ case GET_DISK_INFO:
+ break;
+ default:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ }
/*
* Commands dealing with the RAID driver but not any
@@ -6715,8 +6767,11 @@ static int md_seq_show(struct seq_file *seq, void *v)
if (test_bit(Faulty, &rdev->flags)) {
seq_printf(seq, "(F)");
continue;
- } else if (rdev->raid_disk < 0)
+ }
+ if (rdev->raid_disk < 0)
seq_printf(seq, "(S)"); /* spare */
+ if (test_bit(Replacement, &rdev->flags))
+ seq_printf(seq, "(R)");
sectors += rdev->sectors;
}
@@ -7278,7 +7333,8 @@ void md_do_sync(struct mddev *mddev)
printk(KERN_INFO
"md: checkpointing %s of %s.\n",
desc, mdname(mddev));
- mddev->recovery_cp = mddev->curr_resync;
+ mddev->recovery_cp =
+ mddev->curr_resync_completed;
}
} else
mddev->recovery_cp = MaxSector;
@@ -7296,9 +7352,9 @@ void md_do_sync(struct mddev *mddev)
rcu_read_unlock();
}
}
+ skip:
set_bit(MD_CHANGE_DEVS, &mddev->flags);
- skip:
if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
/* We completed so min/max setting can be forgotten if used. */
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
@@ -7328,6 +7384,7 @@ static int remove_and_add_spares(struct mddev *mddev)
{
struct md_rdev *rdev;
int spares = 0;
+ int removed = 0;
mddev->curr_resync_completed = 0;
@@ -7338,29 +7395,32 @@ static int remove_and_add_spares(struct mddev *mddev)
! test_bit(In_sync, &rdev->flags)) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
- mddev, rdev->raid_disk)==0) {
+ mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
+ removed++;
}
}
+ if (removed)
+ sysfs_notify(&mddev->kobj, NULL,
+ "degraded");
- if (mddev->degraded) {
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk >= 0 &&
- !test_bit(In_sync, &rdev->flags) &&
- !test_bit(Faulty, &rdev->flags))
+
+ list_for_each_entry(rdev, &mddev->disks, same_set) {
+ if (rdev->raid_disk >= 0 &&
+ !test_bit(In_sync, &rdev->flags) &&
+ !test_bit(Faulty, &rdev->flags))
+ spares++;
+ if (rdev->raid_disk < 0
+ && !test_bit(Faulty, &rdev->flags)) {
+ rdev->recovery_offset = 0;
+ if (mddev->pers->
+ hot_add_disk(mddev, rdev) == 0) {
+ if (sysfs_link_rdev(mddev, rdev))
+ /* failure here is OK */;
spares++;
- if (rdev->raid_disk < 0
- && !test_bit(Faulty, &rdev->flags)) {
- rdev->recovery_offset = 0;
- if (mddev->pers->
- hot_add_disk(mddev, rdev) == 0) {
- if (sysfs_link_rdev(mddev, rdev))
- /* failure here is OK */;
- spares++;
- md_new_event(mddev);
- set_bit(MD_CHANGE_DEVS, &mddev->flags);
- }
+ md_new_event(mddev);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
}
}
}
@@ -7475,7 +7535,7 @@ void md_check_recovery(struct mddev *mddev)
test_bit(Faulty, &rdev->flags) &&
atomic_read(&rdev->nr_pending)==0) {
if (mddev->pers->hot_remove_disk(
- mddev, rdev->raid_disk)==0) {
+ mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
diff --git a/drivers/md/md.h b/drivers/md/md.h
index cf742d9..44c63df 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -72,34 +72,7 @@ struct md_rdev {
* This reduces the burden of testing multiple flags in many cases
*/
- unsigned long flags;
-#define Faulty 1 /* device is known to have a fault */
-#define In_sync 2 /* device is in_sync with rest of array */
-#define WriteMostly 4 /* Avoid reading if at all possible */
-#define AutoDetected 7 /* added by auto-detect */
-#define Blocked 8 /* An error occurred but has not yet
- * been acknowledged by the metadata
- * handler, so don't allow writes
- * until it is cleared */
-#define WriteErrorSeen 9 /* A write error has been seen on this
- * device
- */
-#define FaultRecorded 10 /* Intermediate state for clearing
- * Blocked. The Fault is/will-be
- * recorded in the metadata, but that
- * metadata hasn't been stored safely
- * on disk yet.
- */
-#define BlockedBadBlocks 11 /* A writer is blocked because they
- * found an unacknowledged bad-block.
- * This can safely be cleared at any
- * time, and the writer will re-check.
- * It may be set at any time, and at
- * worst the writer will timeout and
- * re-check. So setting it as
- * accurately as possible is good, but
- * not absolutely critical.
- */
+ unsigned long flags; /* bit set of 'enum flag_bits' bits. */
wait_queue_head_t blocked_wait;
int desc_nr; /* descriptor index in the superblock */
@@ -152,6 +125,44 @@ struct md_rdev {
sector_t size; /* in sectors */
} badblocks;
};
+enum flag_bits {
+ Faulty, /* device is known to have a fault */
+ In_sync, /* device is in_sync with rest of array */
+ WriteMostly, /* Avoid reading if at all possible */
+ AutoDetected, /* added by auto-detect */
+ Blocked, /* An error occurred but has not yet
+ * been acknowledged by the metadata
+ * handler, so don't allow writes
+ * until it is cleared */
+ WriteErrorSeen, /* A write error has been seen on this
+ * device
+ */
+ FaultRecorded, /* Intermediate state for clearing
+ * Blocked. The Fault is/will-be
+ * recorded in the metadata, but that
+ * metadata hasn't been stored safely
+ * on disk yet.
+ */
+ BlockedBadBlocks, /* A writer is blocked because they
+ * found an unacknowledged bad-block.
+ * This can safely be cleared at any
+ * time, and the writer will re-check.
+ * It may be set at any time, and at
+ * worst the writer will timeout and
+ * re-check. So setting it as
+ * accurately as possible is good, but
+ * not absolutely critical.
+ */
+ WantReplacement, /* This device is a candidate to be
+ * hot-replaced, either because it has
+ * reported some faults, or because
+ * of explicit request.
+ */
+ Replacement, /* This device is a replacement for
+ * a want_replacement device with same
+ * raid_disk number.
+ */
+};
#define BB_LEN_MASK (0x00000000000001FFULL)
#define BB_OFFSET_MASK (0x7FFFFFFFFFFFFE00ULL)
@@ -428,7 +439,7 @@ struct md_personality
*/
void (*error_handler)(struct mddev *mddev, struct md_rdev *rdev);
int (*hot_add_disk) (struct mddev *mddev, struct md_rdev *rdev);
- int (*hot_remove_disk) (struct mddev *mddev, int number);
+ int (*hot_remove_disk) (struct mddev *mddev, struct md_rdev *rdev);
int (*spare_active) (struct mddev *mddev);
sector_t (*sync_request)(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster);
int (*resize) (struct mddev *mddev, sector_t sectors);
@@ -482,15 +493,20 @@ static inline char * mdname (struct mddev * mddev)
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+ if (!test_bit(Replacement, &rdev->flags)) {
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
+ } else
+ return 0;
}
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
{
char nm[20];
- sprintf(nm, "rd%d", rdev->raid_disk);
- sysfs_remove_link(&mddev->kobj, nm);
+ if (!test_bit(Replacement, &rdev->flags)) {
+ sprintf(nm, "rd%d", rdev->raid_disk);
+ sysfs_remove_link(&mddev->kobj, nm);
+ }
}
/*
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 5899246..a222f51 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -292,17 +292,16 @@ static int multipath_add_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
-static int multipath_remove_disk(struct mddev *mddev, int number)
+static int multipath_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct mpconf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
struct multipath_info *p = conf->multipaths + number;
print_multipath_conf(conf);
- rdev = p->rdev;
- if (rdev) {
+ if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
printk(KERN_ERR "hot-remove-disk, slot %d is identified"
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index ede2461..a0b225e 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -135,7 +135,7 @@ out_free_pages:
put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
- while ( ++j < pi->raid_disks )
+ while (++j < pi->raid_disks)
bio_put(r1_bio->bios[j]);
r1bio_pool_free(r1_bio, data);
return NULL;
@@ -164,7 +164,7 @@ static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
{
int i;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio **bio = r1_bio->bios + i;
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
@@ -185,7 +185,7 @@ static void put_buf(struct r1bio *r1_bio)
struct r1conf *conf = r1_bio->mddev->private;
int i;
- for (i=0; i<conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct bio *bio = r1_bio->bios[i];
if (bio->bi_end_io)
rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
@@ -277,13 +277,14 @@ static inline void update_head_pos(int disk, struct r1bio *r1_bio)
static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
{
int mirror;
- int raid_disks = r1_bio->mddev->raid_disks;
+ struct r1conf *conf = r1_bio->mddev->private;
+ int raid_disks = conf->raid_disks;
- for (mirror = 0; mirror < raid_disks; mirror++)
+ for (mirror = 0; mirror < raid_disks * 2; mirror++)
if (r1_bio->bios[mirror] == bio)
break;
- BUG_ON(mirror == raid_disks);
+ BUG_ON(mirror == raid_disks * 2);
update_head_pos(mirror, r1_bio);
return mirror;
@@ -390,6 +391,11 @@ static void raid1_end_write_request(struct bio *bio, int error)
if (!uptodate) {
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &conf->mirrors[mirror].rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ conf->mddev->recovery);
+
set_bit(R1BIO_WriteError, &r1_bio->state);
} else {
/*
@@ -505,7 +511,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
start_disk = conf->last_used;
}
- for (i = 0 ; i < conf->raid_disks ; i++) {
+ for (i = 0 ; i < conf->raid_disks * 2 ; i++) {
sector_t dist;
sector_t first_bad;
int bad_sectors;
@@ -525,8 +531,17 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
if (test_bit(WriteMostly, &rdev->flags)) {
/* Don't balance among write-mostly, just
* use the first as a last resort */
- if (best_disk < 0)
+ if (best_disk < 0) {
+ if (is_badblock(rdev, this_sector, sectors,
+ &first_bad, &bad_sectors)) {
+ if (first_bad < this_sector)
+ /* Cannot use this */
+ continue;
+ best_good_sectors = first_bad - this_sector;
+ } else
+ best_good_sectors = sectors;
best_disk = disk;
+ }
continue;
}
/* This is a reasonable device to use. It might
@@ -609,7 +624,7 @@ int md_raid1_congested(struct mddev *mddev, int bits)
return 1;
rcu_read_lock();
- for (i = 0; i < mddev->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev && !test_bit(Faulty, &rdev->flags)) {
struct request_queue *q = bdev_get_queue(rdev->bdev);
@@ -974,7 +989,7 @@ read_again:
*/
plugged = mddev_check_plugged(mddev);
- disks = conf->raid_disks;
+ disks = conf->raid_disks * 2;
retry_write:
blocked_rdev = NULL;
rcu_read_lock();
@@ -988,7 +1003,8 @@ read_again:
}
r1_bio->bios[i] = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
- set_bit(R1BIO_Degraded, &r1_bio->state);
+ if (i < conf->raid_disks)
+ set_bit(R1BIO_Degraded, &r1_bio->state);
continue;
}
@@ -1263,6 +1279,25 @@ static int raid1_spare_active(struct mddev *mddev)
*/
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->mirrors[i].rdev;
+ struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
+ if (repl
+ && repl->recovery_offset == MaxSector
+ && !test_bit(Faulty, &repl->flags)
+ && !test_and_set_bit(In_sync, &repl->flags)) {
+ /* replacement has just become active */
+ if (!rdev ||
+ !test_and_clear_bit(In_sync, &rdev->flags))
+ count++;
+ if (rdev) {
+ /* Replaced device not technically
+ * faulty, but we need to be sure
+ * it gets removed and never re-added
+ */
+ set_bit(Faulty, &rdev->flags);
+ sysfs_notify_dirent_safe(
+ rdev->sysfs_state);
+ }
+ }
if (rdev
&& !test_bit(Faulty, &rdev->flags)
&& !test_and_set_bit(In_sync, &rdev->flags)) {
@@ -1286,7 +1321,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
int mirror = 0;
struct mirror_info *p;
int first = 0;
- int last = mddev->raid_disks - 1;
+ int last = conf->raid_disks - 1;
if (mddev->recovery_disabled == conf->recovery_disabled)
return -EBUSY;
@@ -1294,8 +1329,9 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
if (rdev->raid_disk >= 0)
first = last = rdev->raid_disk;
- for (mirror = first; mirror <= last; mirror++)
- if ( !(p=conf->mirrors+mirror)->rdev) {
+ for (mirror = first; mirror <= last; mirror++) {
+ p = conf->mirrors+mirror;
+ if (!p->rdev) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -1322,21 +1358,35 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
+ if (test_bit(WantReplacement, &p->rdev->flags) &&
+ p[conf->raid_disks].rdev == NULL) {
+ /* Add this device as a replacement */
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = mirror;
+ err = 0;
+ conf->fullsync = 1;
+ rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
+ break;
+ }
+ }
md_integrity_add_rdev(rdev, mddev);
print_conf(conf);
return err;
}
-static int raid1_remove_disk(struct mddev *mddev, int number)
+static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r1conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
struct mirror_info *p = conf->mirrors+ number;
+ if (rdev != p->rdev)
+ p = conf->mirrors + conf->raid_disks + number;
+
print_conf(conf);
- rdev = p->rdev;
- if (rdev) {
+ if (rdev == p->rdev) {
if (test_bit(In_sync, &rdev->flags) ||
atomic_read(&rdev->nr_pending)) {
err = -EBUSY;
@@ -1358,7 +1408,21 @@ static int raid1_remove_disk(struct mddev *mddev, int number)
err = -EBUSY;
p->rdev = rdev;
goto abort;
- }
+ } else if (conf->mirrors[conf->raid_disks + number].rdev) {
+ /* We just removed a device that is being replaced.
+ * Move down the replacement. We drain all IO before
+ * doing this to avoid confusion.
+ */
+ struct md_rdev *repl =
+ conf->mirrors[conf->raid_disks + number].rdev;
+ raise_barrier(conf);
+ clear_bit(Replacement, &repl->flags);
+ p->rdev = repl;
+ conf->mirrors[conf->raid_disks + number].rdev = NULL;
+ lower_barrier(conf);
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ clear_bit(WantReplacement, &rdev->flags);
err = md_integrity_register(mddev);
}
abort:
@@ -1411,6 +1475,10 @@ static void end_sync_write(struct bio *bio, int error)
} while (sectors_to_go > 0);
set_bit(WriteErrorSeen,
&conf->mirrors[mirror].rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &conf->mirrors[mirror].rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ mddev->recovery);
set_bit(R1BIO_WriteError, &r1_bio->state);
} else if (is_badblock(conf->mirrors[mirror].rdev,
r1_bio->sector,
@@ -1441,8 +1509,13 @@ static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE)
+ if (rw == WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED, &
+ rdev->mddev->recovery);
+ }
/* need to record an error - either for the block or the device */
if (!rdev_set_badblocks(rdev, sector, sectors, 0))
md_error(rdev->mddev, rdev);
@@ -1493,7 +1566,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
}
}
d++;
- if (d == conf->raid_disks)
+ if (d == conf->raid_disks * 2)
d = 0;
} while (!success && d != r1_bio->read_disk);
@@ -1510,7 +1583,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
mdname(mddev),
bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector);
- for (d = 0; d < conf->raid_disks; d++) {
+ for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev;
if (!rdev || test_bit(Faulty, &rdev->flags))
continue;
@@ -1536,7 +1609,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
/* write it back and re-read */
while (d != r1_bio->read_disk) {
if (d == 0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
@@ -1551,7 +1624,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
d = start;
while (d != r1_bio->read_disk) {
if (d == 0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
if (r1_bio->bios[d]->bi_end_io != end_sync_read)
continue;
@@ -1584,7 +1657,7 @@ static int process_checks(struct r1bio *r1_bio)
int primary;
int i;
- for (primary = 0; primary < conf->raid_disks; primary++)
+ for (primary = 0; primary < conf->raid_disks * 2; primary++)
if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) {
r1_bio->bios[primary]->bi_end_io = NULL;
@@ -1592,7 +1665,7 @@ static int process_checks(struct r1bio *r1_bio)
break;
}
r1_bio->read_disk = primary;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
struct bio *pbio = r1_bio->bios[primary];
@@ -1656,7 +1729,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
{
struct r1conf *conf = mddev->private;
int i;
- int disks = conf->raid_disks;
+ int disks = conf->raid_disks * 2;
struct bio *bio, *wbio;
bio = r1_bio->bios[r1_bio->read_disk];
@@ -1737,7 +1810,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
success = 1;
else {
d++;
- if (d == conf->raid_disks)
+ if (d == conf->raid_disks * 2)
d = 0;
}
} while (!success && d != read_disk);
@@ -1753,7 +1826,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
start = d;
while (d != read_disk) {
if (d==0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
@@ -1765,7 +1838,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
while (d != read_disk) {
char b[BDEVNAME_SIZE];
if (d==0)
- d = conf->raid_disks;
+ d = conf->raid_disks * 2;
d--;
rdev = conf->mirrors[d].rdev;
if (rdev &&
@@ -1887,7 +1960,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
{
int m;
int s = r1_bio->sectors;
- for (m = 0; m < conf->raid_disks ; m++) {
+ for (m = 0; m < conf->raid_disks * 2 ; m++) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
struct bio *bio = r1_bio->bios[m];
if (bio->bi_end_io == NULL)
@@ -1909,7 +1982,7 @@ static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio
static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
{
int m;
- for (m = 0; m < conf->raid_disks ; m++)
+ for (m = 0; m < conf->raid_disks * 2 ; m++)
if (r1_bio->bios[m] == IO_MADE_GOOD) {
struct md_rdev *rdev = conf->mirrors[m].rdev;
rdev_clear_badblocks(rdev,
@@ -2184,7 +2257,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
r1_bio->state = 0;
set_bit(R1BIO_IsSync, &r1_bio->state);
- for (i=0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
struct md_rdev *rdev;
bio = r1_bio->bios[i];
@@ -2203,7 +2276,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
rdev = rcu_dereference(conf->mirrors[i].rdev);
if (rdev == NULL ||
test_bit(Faulty, &rdev->flags)) {
- still_degraded = 1;
+ if (i < conf->raid_disks)
+ still_degraded = 1;
} else if (!test_bit(In_sync, &rdev->flags)) {
bio->bi_rw = WRITE;
bio->bi_end_io = end_sync_write;
@@ -2254,7 +2328,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
* need to mark them bad on all write targets
*/
int ok = 1;
- for (i = 0 ; i < conf->raid_disks ; i++)
+ for (i = 0 ; i < conf->raid_disks * 2 ; i++)
if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
struct md_rdev *rdev =
rcu_dereference(conf->mirrors[i].rdev);
@@ -2323,7 +2397,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
len = sync_blocks<<9;
}
- for (i=0 ; i < conf->raid_disks; i++) {
+ for (i = 0 ; i < conf->raid_disks * 2; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io) {
page = bio->bi_io_vec[bio->bi_vcnt].bv_page;
@@ -2356,7 +2430,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
*/
if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
atomic_set(&r1_bio->remaining, read_targets);
- for (i=0; i<conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) {
md_sync_acct(bio->bi_bdev, nr_sectors);
@@ -2393,7 +2467,8 @@ static struct r1conf *setup_conf(struct mddev *mddev)
if (!conf)
goto abort;
- conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks,
+ conf->mirrors = kzalloc(sizeof(struct mirror_info)
+ * mddev->raid_disks * 2,
GFP_KERNEL);
if (!conf->mirrors)
goto abort;
@@ -2405,7 +2480,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
if (!conf->poolinfo)
goto abort;
- conf->poolinfo->raid_disks = mddev->raid_disks;
+ conf->poolinfo->raid_disks = mddev->raid_disks * 2;
conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free,
conf->poolinfo);
@@ -2414,14 +2489,20 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->poolinfo->mddev = mddev;
+ err = -EINVAL;
spin_lock_init(&conf->device_lock);
list_for_each_entry(rdev, &mddev->disks, same_set) {
int disk_idx = rdev->raid_disk;
if (disk_idx >= mddev->raid_disks
|| disk_idx < 0)
continue;
- disk = conf->mirrors + disk_idx;
+ if (test_bit(Replacement, &rdev->flags))
+ disk = conf->mirrors + conf->raid_disks + disk_idx;
+ else
+ disk = conf->mirrors + disk_idx;
+ if (disk->rdev)
+ goto abort;
disk->rdev = rdev;
disk->head_position = 0;
@@ -2437,11 +2518,27 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->pending_count = 0;
conf->recovery_disabled = mddev->recovery_disabled - 1;
+ err = -EIO;
conf->last_used = -1;
- for (i = 0; i < conf->raid_disks; i++) {
+ for (i = 0; i < conf->raid_disks * 2; i++) {
disk = conf->mirrors + i;
+ if (i < conf->raid_disks &&
+ disk[conf->raid_disks].rdev) {
+ /* This slot has a replacement. */
+ if (!disk->rdev) {
+ /* No original, just make the replacement
+ * a recovering spare
+ */
+ disk->rdev =
+ disk[conf->raid_disks].rdev;
+ disk[conf->raid_disks].rdev = NULL;
+ } else if (!test_bit(In_sync, &disk->rdev->flags))
+ /* Original is not in_sync - bad */
+ goto abort;
+ }
+
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
@@ -2455,7 +2552,6 @@ static struct r1conf *setup_conf(struct mddev *mddev)
conf->last_used = i;
}
- err = -EIO;
if (conf->last_used < 0) {
printk(KERN_ERR "md/raid1:%s: no operational mirrors\n",
mdname(mddev));
@@ -2665,7 +2761,7 @@ static int raid1_reshape(struct mddev *mddev)
if (!newpoolinfo)
return -ENOMEM;
newpoolinfo->mddev = mddev;
- newpoolinfo->raid_disks = raid_disks;
+ newpoolinfo->raid_disks = raid_disks * 2;
newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc,
r1bio_pool_free, newpoolinfo);
@@ -2673,7 +2769,8 @@ static int raid1_reshape(struct mddev *mddev)
kfree(newpoolinfo);
return -ENOMEM;
}
- newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL);
+ newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2,
+ GFP_KERNEL);
if (!newmirrors) {
kfree(newpoolinfo);
mempool_destroy(newpool);
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index c732b6c..80ded13 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -12,6 +12,9 @@ struct mirror_info {
* pool was allocated for, so they know how much to allocate and free.
* mddev->raid_disks cannot be used, as it can change while a pool is active
* These two datums are stored in a kmalloced struct.
+ * The 'raid_disks' here is twice the raid_disks in r1conf.
+ * This allows space for each 'real' device can have a replacement in the
+ * second half of the array.
*/
struct pool_info {
@@ -21,7 +24,9 @@ struct pool_info {
struct r1conf {
struct mddev *mddev;
- struct mirror_info *mirrors;
+ struct mirror_info *mirrors; /* twice 'raid_disks' to
+ * allow for replacements.
+ */
int raid_disks;
/* When choose the best device for a read (read_balance())
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 685ddf3..58c44d6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -67,13 +67,15 @@ static int max_queued_requests = 1024;
static void allow_barrier(struct r10conf *conf);
static void lower_barrier(struct r10conf *conf);
+static int enough(struct r10conf *conf, int ignore);
static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
{
struct r10conf *conf = data;
int size = offsetof(struct r10bio, devs[conf->copies]);
- /* allocate a r10bio with room for raid_disks entries in the bios array */
+ /* allocate a r10bio with room for raid_disks entries in the
+ * bios array */
return kzalloc(size, gfp_flags);
}
@@ -123,12 +125,19 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
if (!bio)
goto out_free_bio;
r10_bio->devs[j].bio = bio;
+ if (!conf->have_replacement)
+ continue;
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ if (!bio)
+ goto out_free_bio;
+ r10_bio->devs[j].repl_bio = bio;
}
/*
* Allocate RESYNC_PAGES data pages and attach them
* where needed.
*/
for (j = 0 ; j < nalloc; j++) {
+ struct bio *rbio = r10_bio->devs[j].repl_bio;
bio = r10_bio->devs[j].bio;
for (i = 0; i < RESYNC_PAGES; i++) {
if (j == 1 && !test_bit(MD_RECOVERY_SYNC,
@@ -143,6 +152,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
goto out_free_pages;
bio->bi_io_vec[i].bv_page = page;
+ if (rbio)
+ rbio->bi_io_vec[i].bv_page = page;
}
}
@@ -156,8 +167,11 @@ out_free_pages:
safe_put_page(r10_bio->devs[j].bio->bi_io_vec[i].bv_page);
j = -1;
out_free_bio:
- while ( ++j < nalloc )
+ while (++j < nalloc) {
bio_put(r10_bio->devs[j].bio);
+ if (r10_bio->devs[j].repl_bio)
+ bio_put(r10_bio->devs[j].repl_bio);
+ }
r10bio_pool_free(r10_bio, conf);
return NULL;
}
@@ -178,6 +192,9 @@ static void r10buf_pool_free(void *__r10_bio, void *data)
}
bio_put(bio);
}
+ bio = r10bio->devs[j].repl_bio;
+ if (bio)
+ bio_put(bio);
}
r10bio_pool_free(r10bio, conf);
}
@@ -191,6 +208,10 @@ static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
if (!BIO_SPECIAL(*bio))
bio_put(*bio);
*bio = NULL;
+ bio = &r10_bio->devs[i].repl_bio;
+ if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
+ bio_put(*bio);
+ *bio = NULL;
}
}
@@ -275,19 +296,27 @@ static inline void update_head_pos(int slot, struct r10bio *r10_bio)
* Find the disk number which triggered given bio
*/
static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
- struct bio *bio, int *slotp)
+ struct bio *bio, int *slotp, int *replp)
{
int slot;
+ int repl = 0;
- for (slot = 0; slot < conf->copies; slot++)
+ for (slot = 0; slot < conf->copies; slot++) {
if (r10_bio->devs[slot].bio == bio)
break;
+ if (r10_bio->devs[slot].repl_bio == bio) {
+ repl = 1;
+ break;
+ }
+ }
BUG_ON(slot == conf->copies);
update_head_pos(slot, r10_bio);
if (slotp)
*slotp = slot;
+ if (replp)
+ *replp = repl;
return r10_bio->devs[slot].devnum;
}
@@ -296,11 +325,13 @@ static void raid10_end_read_request(struct bio *bio, int error)
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
struct r10bio *r10_bio = bio->bi_private;
int slot, dev;
+ struct md_rdev *rdev;
struct r10conf *conf = r10_bio->mddev->private;
slot = r10_bio->read_slot;
dev = r10_bio->devs[slot].devnum;
+ rdev = r10_bio->devs[slot].rdev;
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
@@ -317,8 +348,21 @@ static void raid10_end_read_request(struct bio *bio, int error)
* wait for the 'master' bio.
*/
set_bit(R10BIO_Uptodate, &r10_bio->state);
+ } else {
+ /* If all other devices that store this block have
+ * failed, we want to return the error upwards rather
+ * than fail the last device. Here we redefine
+ * "uptodate" to mean "Don't want to retry"
+ */
+ unsigned long flags;
+ spin_lock_irqsave(&conf->device_lock, flags);
+ if (!enough(conf, rdev->raid_disk))
+ uptodate = 1;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ }
+ if (uptodate) {
raid_end_bio_io(r10_bio);
- rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
} else {
/*
* oops, read error - keep the refcount on the rdev
@@ -327,7 +371,7 @@ static void raid10_end_read_request(struct bio *bio, int error)
printk_ratelimited(KERN_ERR
"md/raid10:%s: %s: rescheduling sector %llu\n",
mdname(conf->mddev),
- bdevname(conf->mirrors[dev].rdev->bdev, b),
+ bdevname(rdev->bdev, b),
(unsigned long long)r10_bio->sector);
set_bit(R10BIO_ReadError, &r10_bio->state);
reschedule_retry(r10_bio);
@@ -366,17 +410,35 @@ static void raid10_end_write_request(struct bio *bio, int error)
int dev;
int dec_rdev = 1;
struct r10conf *conf = r10_bio->mddev->private;
- int slot;
+ int slot, repl;
+ struct md_rdev *rdev = NULL;
- dev = find_bio_disk(conf, r10_bio, bio, &slot);
+ dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[dev].replacement;
+ if (!rdev) {
+ smp_rmb();
+ repl = 0;
+ rdev = conf->mirrors[dev].rdev;
+ }
/*
* this branch is our 'one mirror IO has finished' event handler:
*/
if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->mirrors[dev].rdev->flags);
- set_bit(R10BIO_WriteError, &r10_bio->state);
- dec_rdev = 0;
+ if (repl)
+ /* Never record new bad blocks to replacement,
+ * just fail it.
+ */
+ md_error(rdev->mddev, rdev);
+ else {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ dec_rdev = 0;
+ }
} else {
/*
* Set R10BIO_Uptodate in our master bio, so that
@@ -393,12 +455,15 @@ static void raid10_end_write_request(struct bio *bio, int error)
set_bit(R10BIO_Uptodate, &r10_bio->state);
/* Maybe we can clear some bad blocks. */
- if (is_badblock(conf->mirrors[dev].rdev,
+ if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
&first_bad, &bad_sectors)) {
bio_put(bio);
- r10_bio->devs[slot].bio = IO_MADE_GOOD;
+ if (repl)
+ r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
+ else
+ r10_bio->devs[slot].bio = IO_MADE_GOOD;
dec_rdev = 0;
set_bit(R10BIO_MadeGood, &r10_bio->state);
}
@@ -414,7 +479,6 @@ static void raid10_end_write_request(struct bio *bio, int error)
rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
}
-
/*
* RAID10 layout manager
* As well as the chunksize and raid_disks count, there are two
@@ -562,14 +626,16 @@ static int raid10_mergeable_bvec(struct request_queue *q,
* FIXME: possibly should rethink readbalancing and do it differently
* depending on near_copies / far_copies geometry.
*/
-static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_sectors)
+static struct md_rdev *read_balance(struct r10conf *conf,
+ struct r10bio *r10_bio,
+ int *max_sectors)
{
const sector_t this_sector = r10_bio->sector;
int disk, slot;
int sectors = r10_bio->sectors;
int best_good_sectors;
sector_t new_distance, best_dist;
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *best_rdev;
int do_balance;
int best_slot;
@@ -578,6 +644,7 @@ static int read_balance(struct r10conf *conf, struct r10bio *r10_bio, int *max_s
retry:
sectors = r10_bio->sectors;
best_slot = -1;
+ best_rdev = NULL;
best_dist = MaxSector;
best_good_sectors = 0;
do_balance = 1;
@@ -599,10 +666,16 @@ retry:
if (r10_bio->devs[slot].bio == IO_BLOCKED)
continue;
disk = r10_bio->devs[slot].devnum;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
+ rdev = rcu_dereference(conf->mirrors[disk].replacement);
+ if (rdev == NULL || test_bit(Faulty, &rdev->flags) ||
+ r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
+ rdev = rcu_dereference(conf->mirrors[disk].rdev);
if (rdev == NULL)
continue;
- if (!test_bit(In_sync, &rdev->flags))
+ if (test_bit(Faulty, &rdev->flags))
+ continue;
+ if (!test_bit(In_sync, &rdev->flags) &&
+ r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
continue;
dev_sector = r10_bio->devs[slot].addr;
@@ -627,6 +700,7 @@ retry:
if (good_sectors > best_good_sectors) {
best_good_sectors = good_sectors;
best_slot = slot;
+ best_rdev = rdev;
}
if (!do_balance)
/* Must read from here */
@@ -655,16 +729,15 @@ retry:
if (new_distance < best_dist) {
best_dist = new_distance;
best_slot = slot;
+ best_rdev = rdev;
}
}
- if (slot == conf->copies)
+ if (slot >= conf->copies) {
slot = best_slot;
+ rdev = best_rdev;
+ }
if (slot >= 0) {
- disk = r10_bio->devs[slot].devnum;
- rdev = rcu_dereference(conf->mirrors[disk].rdev);
- if (!rdev)
- goto retry;
atomic_inc(&rdev->nr_pending);
if (test_bit(Faulty, &rdev->flags)) {
/* Cannot risk returning a device that failed
@@ -675,11 +748,11 @@ retry:
}
r10_bio->read_slot = slot;
} else
- disk = -1;
+ rdev = NULL;
rcu_read_unlock();
*max_sectors = best_good_sectors;
- return disk;
+ return rdev;
}
static int raid10_congested(void *data, int bits)
@@ -846,7 +919,6 @@ static void unfreeze_array(struct r10conf *conf)
static void make_request(struct mddev *mddev, struct bio * bio)
{
struct r10conf *conf = mddev->private;
- struct mirror_info *mirror;
struct r10bio *r10_bio;
struct bio *read_bio;
int i;
@@ -945,27 +1017,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/*
* read balancing logic:
*/
- int disk;
+ struct md_rdev *rdev;
int slot;
read_again:
- disk = read_balance(conf, r10_bio, &max_sectors);
- slot = r10_bio->read_slot;
- if (disk < 0) {
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (!rdev) {
raid_end_bio_io(r10_bio);
return;
}
- mirror = conf->mirrors + disk;
+ slot = r10_bio->read_slot;
read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
md_trim_bio(read_bio, r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[slot].bio = read_bio;
+ r10_bio->devs[slot].rdev = rdev;
read_bio->bi_sector = r10_bio->devs[slot].addr +
- mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
+ rdev->data_offset;
+ read_bio->bi_bdev = rdev->bdev;
read_bio->bi_end_io = raid10_end_read_request;
read_bio->bi_rw = READ | do_sync;
read_bio->bi_private = r10_bio;
@@ -1025,6 +1097,7 @@ read_again:
*/
plugged = mddev_check_plugged(mddev);
+ r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
raid10_find_phys(conf, r10_bio);
retry_write:
blocked_rdev = NULL;
@@ -1034,12 +1107,25 @@ retry_write:
for (i = 0; i < conf->copies; i++) {
int d = r10_bio->devs[i].devnum;
struct md_rdev *rdev = rcu_dereference(conf->mirrors[d].rdev);
+ struct md_rdev *rrdev = rcu_dereference(
+ conf->mirrors[d].replacement);
+ if (rdev == rrdev)
+ rrdev = NULL;
if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
atomic_inc(&rdev->nr_pending);
blocked_rdev = rdev;
break;
}
+ if (rrdev && unlikely(test_bit(Blocked, &rrdev->flags))) {
+ atomic_inc(&rrdev->nr_pending);
+ blocked_rdev = rrdev;
+ break;
+ }
+ if (rrdev && test_bit(Faulty, &rrdev->flags))
+ rrdev = NULL;
+
r10_bio->devs[i].bio = NULL;
+ r10_bio->devs[i].repl_bio = NULL;
if (!rdev || test_bit(Faulty, &rdev->flags)) {
set_bit(R10BIO_Degraded, &r10_bio->state);
continue;
@@ -1088,6 +1174,10 @@ retry_write:
}
r10_bio->devs[i].bio = bio;
atomic_inc(&rdev->nr_pending);
+ if (rrdev) {
+ r10_bio->devs[i].repl_bio = bio;
+ atomic_inc(&rrdev->nr_pending);
+ }
}
rcu_read_unlock();
@@ -1096,11 +1186,23 @@ retry_write:
int j;
int d;
- for (j = 0; j < i; j++)
+ for (j = 0; j < i; j++) {
if (r10_bio->devs[j].bio) {
d = r10_bio->devs[j].devnum;
rdev_dec_pending(conf->mirrors[d].rdev, mddev);
}
+ if (r10_bio->devs[j].repl_bio) {
+ struct md_rdev *rdev;
+ d = r10_bio->devs[j].devnum;
+ rdev = conf->mirrors[d].replacement;
+ if (!rdev) {
+ /* Race with remove_disk */
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
+ rdev_dec_pending(rdev, mddev);
+ }
+ }
allow_barrier(conf);
md_wait_for_blocked_rdev(blocked_rdev, mddev);
wait_barrier(conf);
@@ -1147,6 +1249,31 @@ retry_write:
bio_list_add(&conf->pending_bio_list, mbio);
conf->pending_count++;
spin_unlock_irqrestore(&conf->device_lock, flags);
+
+ if (!r10_bio->devs[i].repl_bio)
+ continue;
+
+ mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+ max_sectors);
+ r10_bio->devs[i].repl_bio = mbio;
+
+ /* We are actively writing to the original device
+ * so it cannot disappear, so the replacement cannot
+ * become NULL here
+ */
+ mbio->bi_sector = (r10_bio->devs[i].addr+
+ conf->mirrors[d].replacement->data_offset);
+ mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
+ mbio->bi_end_io = raid10_end_write_request;
+ mbio->bi_rw = WRITE | do_sync | do_fua;
+ mbio->bi_private = r10_bio;
+
+ atomic_inc(&r10_bio->remaining);
+ spin_lock_irqsave(&conf->device_lock, flags);
+ bio_list_add(&conf->pending_bio_list, mbio);
+ conf->pending_count++;
+ spin_unlock_irqrestore(&conf->device_lock, flags);
}
/* Don't remove the bias on 'remaining' (one_write_done) until
@@ -1309,9 +1436,27 @@ static int raid10_spare_active(struct mddev *mddev)
*/
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->mirrors + i;
- if (tmp->rdev
- && !test_bit(Faulty, &tmp->rdev->flags)
- && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
+ if (tmp->replacement
+ && tmp->replacement->recovery_offset == MaxSector
+ && !test_bit(Faulty, &tmp->replacement->flags)
+ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+ /* Replacement has just become active */
+ if (!tmp->rdev
+ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+ count++;
+ if (tmp->rdev) {
+ /* Replaced device not technically faulty,
+ * but we need to be sure it gets removed
+ * and never re-added.
+ */
+ set_bit(Faulty, &tmp->rdev->flags);
+ sysfs_notify_dirent_safe(
+ tmp->rdev->sysfs_state);
+ }
+ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+ } else if (tmp->rdev
+ && !test_bit(Faulty, &tmp->rdev->flags)
+ && !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
count++;
sysfs_notify_dirent(tmp->rdev->sysfs_state);
}
@@ -1353,8 +1498,25 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
struct mirror_info *p = &conf->mirrors[mirror];
if (p->recovery_disabled == mddev->recovery_disabled)
continue;
- if (p->rdev)
- continue;
+ if (p->rdev) {
+ if (!test_bit(WantReplacement, &p->rdev->flags) ||
+ p->replacement != NULL)
+ continue;
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = mirror;
+ err = 0;
+ disk_stack_limits(mddev->gendisk, rdev->bdev,
+ rdev->data_offset << 9);
+ if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+ blk_queue_max_segments(mddev->queue, 1);
+ blk_queue_segment_boundary(mddev->queue,
+ PAGE_CACHE_SIZE - 1);
+ }
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->replacement, rdev);
+ break;
+ }
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
@@ -1385,40 +1547,61 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev)
return err;
}
-static int raid10_remove_disk(struct mddev *mddev, int number)
+static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r10conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
- struct mirror_info *p = conf->mirrors+ number;
+ int number = rdev->raid_disk;
+ struct md_rdev **rdevp;
+ struct mirror_info *p = conf->mirrors + number;
print_conf(conf);
- rdev = p->rdev;
- if (rdev) {
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- err = -EBUSY;
- goto abort;
- }
- /* Only remove faulty devices in recovery
- * is not possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != p->recovery_disabled &&
- enough(conf, -1)) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- goto abort;
- }
- err = md_integrity_register(mddev);
+ if (rdev == p->rdev)
+ rdevp = &p->rdev;
+ else if (rdev == p->replacement)
+ rdevp = &p->replacement;
+ else
+ return 0;
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending)) {
+ err = -EBUSY;
+ goto abort;
}
+ /* Only remove faulty devices if recovery
+ * is not possible.
+ */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != p->recovery_disabled &&
+ (!p->replacement || p->replacement == rdev) &&
+ enough(conf, -1)) {
+ err = -EBUSY;
+ goto abort;
+ }
+ *rdevp = NULL;
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ goto abort;
+ } else if (p->replacement) {
+ /* We must have just cleared 'rdev' */
+ p->rdev = p->replacement;
+ clear_bit(Replacement, &p->replacement->flags);
+ smp_mb(); /* Make sure other CPUs may see both as identical
+ * but will never see neither -- if they are careful.
+ */
+ p->replacement = NULL;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ /* We might have just remove the Replacement as faulty
+ * Clear the flag just in case
+ */
+ clear_bit(WantReplacement, &rdev->flags);
+
+ err = md_integrity_register(mddev);
+
abort:
print_conf(conf);
@@ -1432,7 +1615,7 @@ static void end_sync_read(struct bio *bio, int error)
struct r10conf *conf = r10_bio->mddev->private;
int d;
- d = find_bio_disk(conf, r10_bio, bio, NULL);
+ d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
if (test_bit(BIO_UPTODATE, &bio->bi_flags))
set_bit(R10BIO_Uptodate, &r10_bio->state);
@@ -1493,19 +1676,34 @@ static void end_sync_write(struct bio *bio, int error)
sector_t first_bad;
int bad_sectors;
int slot;
-
- d = find_bio_disk(conf, r10_bio, bio, &slot);
+ int repl;
+ struct md_rdev *rdev = NULL;
+
+ d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
+ if (repl)
+ rdev = conf->mirrors[d].replacement;
+ if (!rdev) {
+ smp_mb();
+ rdev = conf->mirrors[d].rdev;
+ }
if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags);
- set_bit(R10BIO_WriteError, &r10_bio->state);
- } else if (is_badblock(conf->mirrors[d].rdev,
+ if (repl)
+ md_error(mddev, rdev);
+ else {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ set_bit(R10BIO_WriteError, &r10_bio->state);
+ }
+ } else if (is_badblock(rdev,
r10_bio->devs[slot].addr,
r10_bio->sectors,
&first_bad, &bad_sectors))
set_bit(R10BIO_MadeGood, &r10_bio->state);
- rdev_dec_pending(conf->mirrors[d].rdev, mddev);
+ rdev_dec_pending(rdev, mddev);
end_sync_request(r10_bio);
}
@@ -1609,6 +1807,29 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
generic_make_request(tbio);
}
+ /* Now write out to any replacement devices
+ * that are active
+ */
+ for (i = 0; i < conf->copies; i++) {
+ int j, d;
+ int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
+
+ tbio = r10_bio->devs[i].repl_bio;
+ if (!tbio || !tbio->bi_end_io)
+ continue;
+ if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
+ && r10_bio->devs[i].bio != fbio)
+ for (j = 0; j < vcnt; j++)
+ memcpy(page_address(tbio->bi_io_vec[j].bv_page),
+ page_address(fbio->bi_io_vec[j].bv_page),
+ PAGE_SIZE);
+ d = r10_bio->devs[i].devnum;
+ atomic_inc(&r10_bio->remaining);
+ md_sync_acct(conf->mirrors[d].replacement->bdev,
+ tbio->bi_size >> 9);
+ generic_make_request(tbio);
+ }
+
done:
if (atomic_dec_and_test(&r10_bio->remaining)) {
md_done_sync(mddev, r10_bio->sectors, 1);
@@ -1668,8 +1889,13 @@ static void fix_recovery_read_error(struct r10bio *r10_bio)
s << 9,
bio->bi_io_vec[idx].bv_page,
WRITE, false);
- if (!ok)
+ if (!ok) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement,
+ &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ }
}
if (!ok) {
/* We don't worry if we cannot set a bad block -
@@ -1709,7 +1935,7 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
int d;
- struct bio *wbio;
+ struct bio *wbio, *wbio2;
if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
fix_recovery_read_error(r10_bio);
@@ -1721,12 +1947,20 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
* share the pages with the first bio
* and submit the write request
*/
- wbio = r10_bio->devs[1].bio;
d = r10_bio->devs[1].devnum;
-
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
- generic_make_request(wbio);
+ wbio = r10_bio->devs[1].bio;
+ wbio2 = r10_bio->devs[1].repl_bio;
+ if (wbio->bi_end_io) {
+ atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9);
+ generic_make_request(wbio);
+ }
+ if (wbio2 && wbio2->bi_end_io) {
+ atomic_inc(&conf->mirrors[d].replacement->nr_pending);
+ md_sync_acct(conf->mirrors[d].replacement->bdev,
+ wbio2->bi_size >> 9);
+ generic_make_request(wbio2);
+ }
}
@@ -1779,8 +2013,12 @@ static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
if (sync_page_io(rdev, sector, sectors << 9, page, rw, false))
/* success */
return 1;
- if (rw == WRITE)
+ if (rw == WRITE) {
set_bit(WriteErrorSeen, &rdev->flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ }
/* need to record an error - either for the block or the device */
if (!rdev_set_badblocks(rdev, sector, sectors, 0))
md_error(rdev->mddev, rdev);
@@ -1828,6 +2066,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
"md/raid10:%s: %s: Failing raid device\n",
mdname(mddev), b);
md_error(mddev, conf->mirrors[d].rdev);
+ r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
return;
}
@@ -1881,8 +2120,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
rdev,
r10_bio->devs[r10_bio->read_slot].addr
+ sect,
- s, 0))
+ s, 0)) {
md_error(mddev, rdev);
+ r10_bio->devs[r10_bio->read_slot].bio
+ = IO_BLOCKED;
+ }
break;
}
@@ -2060,10 +2302,9 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
{
int slot = r10_bio->read_slot;
- int mirror = r10_bio->devs[slot].devnum;
struct bio *bio;
struct r10conf *conf = mddev->private;
- struct md_rdev *rdev;
+ struct md_rdev *rdev = r10_bio->devs[slot].rdev;
char b[BDEVNAME_SIZE];
unsigned long do_sync;
int max_sectors;
@@ -2076,34 +2317,33 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
* This is all done synchronously while the array is
* frozen.
*/
+ bio = r10_bio->devs[slot].bio;
+ bdevname(bio->bi_bdev, b);
+ bio_put(bio);
+ r10_bio->devs[slot].bio = NULL;
+
if (mddev->ro == 0) {
freeze_array(conf);
fix_read_error(conf, mddev, r10_bio);
unfreeze_array(conf);
- }
- rdev_dec_pending(conf->mirrors[mirror].rdev, mddev);
+ } else
+ r10_bio->devs[slot].bio = IO_BLOCKED;
+
+ rdev_dec_pending(rdev, mddev);
- bio = r10_bio->devs[slot].bio;
- bdevname(bio->bi_bdev, b);
- r10_bio->devs[slot].bio =
- mddev->ro ? IO_BLOCKED : NULL;
read_more:
- mirror = read_balance(conf, r10_bio, &max_sectors);
- if (mirror == -1) {
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (rdev == NULL) {
printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O"
" read error for block %llu\n",
mdname(mddev), b,
(unsigned long long)r10_bio->sector);
raid_end_bio_io(r10_bio);
- bio_put(bio);
return;
}
do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
- if (bio)
- bio_put(bio);
slot = r10_bio->read_slot;
- rdev = conf->mirrors[mirror].rdev;
printk_ratelimited(
KERN_ERR
"md/raid10:%s: %s: redirecting"
@@ -2117,6 +2357,7 @@ read_more:
r10_bio->sector - bio->bi_sector,
max_sectors);
r10_bio->devs[slot].bio = bio;
+ r10_bio->devs[slot].rdev = rdev;
bio->bi_sector = r10_bio->devs[slot].addr
+ rdev->data_offset;
bio->bi_bdev = rdev->bdev;
@@ -2137,7 +2378,6 @@ read_more:
mbio->bi_phys_segments++;
spin_unlock_irq(&conf->device_lock);
generic_make_request(bio);
- bio = NULL;
r10_bio = mempool_alloc(conf->r10bio_pool,
GFP_NOIO);
@@ -2187,6 +2427,22 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
r10_bio->sectors, 0))
md_error(conf->mddev, rdev);
}
+ rdev = conf->mirrors[dev].replacement;
+ if (r10_bio->devs[m].repl_bio == NULL)
+ continue;
+ if (test_bit(BIO_UPTODATE,
+ &r10_bio->devs[m].repl_bio->bi_flags)) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ } else {
+ if (!rdev_set_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors, 0))
+ md_error(conf->mddev, rdev);
+ }
}
put_buf(r10_bio);
} else {
@@ -2209,6 +2465,15 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
}
rdev_dec_pending(rdev, conf->mddev);
}
+ bio = r10_bio->devs[m].repl_bio;
+ rdev = conf->mirrors[dev].replacement;
+ if (rdev && bio == IO_MADE_GOOD) {
+ rdev_clear_badblocks(
+ rdev,
+ r10_bio->devs[m].addr,
+ r10_bio->sectors);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
if (test_bit(R10BIO_WriteError,
&r10_bio->state))
@@ -2272,9 +2537,14 @@ static void raid10d(struct mddev *mddev)
static int init_resync(struct r10conf *conf)
{
int buffs;
+ int i;
buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
BUG_ON(conf->r10buf_pool);
+ conf->have_replacement = 0;
+ for (i = 0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].replacement)
+ conf->have_replacement = 1;
conf->r10buf_pool = mempool_create(buffs, r10buf_pool_alloc, r10buf_pool_free, conf);
if (!conf->r10buf_pool)
return -ENOMEM;
@@ -2355,9 +2625,22 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bitmap_end_sync(mddev->bitmap, sect,
&sync_blocks, 1);
}
- } else /* completed sync */
+ } else {
+ /* completed sync */
+ if ((!mddev->bitmap || conf->fullsync)
+ && conf->have_replacement
+ && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
+ /* Completed a full sync so the replacements
+ * are now fully recovered.
+ */
+ for (i = 0; i < conf->raid_disks; i++)
+ if (conf->mirrors[i].replacement)
+ conf->mirrors[i].replacement
+ ->recovery_offset
+ = MaxSector;
+ }
conf->fullsync = 0;
-
+ }
bitmap_close_sync(mddev->bitmap);
close_sync(conf);
*skipped = 1;
@@ -2414,23 +2697,30 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t sect;
int must_sync;
int any_working;
-
- if (conf->mirrors[i].rdev == NULL ||
- test_bit(In_sync, &conf->mirrors[i].rdev->flags))
+ struct mirror_info *mirror = &conf->mirrors[i];
+
+ if ((mirror->rdev == NULL ||
+ test_bit(In_sync, &mirror->rdev->flags))
+ &&
+ (mirror->replacement == NULL ||
+ test_bit(Faulty,
+ &mirror->replacement->flags)))
continue;
still_degraded = 0;
/* want to reconstruct this device */
rb2 = r10_bio;
sect = raid10_find_virt(conf, sector_nr, i);
- /* Unless we are doing a full sync, we only need
- * to recover the block if it is set in the bitmap
+ /* Unless we are doing a full sync, or a replacement
+ * we only need to recover the block if it is set in
+ * the bitmap
*/
must_sync = bitmap_start_sync(mddev->bitmap, sect,
&sync_blocks, 1);
if (sync_blocks < max_sync)
max_sync = sync_blocks;
if (!must_sync &&
+ mirror->replacement == NULL &&
!conf->fullsync) {
/* yep, skip the sync_blocks here, but don't assume
* that there will never be anything to do here
@@ -2500,33 +2790,60 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
bio->bi_end_io = end_sync_read;
bio->bi_rw = READ;
from_addr = r10_bio->devs[j].addr;
- bio->bi_sector = from_addr +
- conf->mirrors[d].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[d].rdev->bdev;
- atomic_inc(&conf->mirrors[d].rdev->nr_pending);
- atomic_inc(&r10_bio->remaining);
- /* and we write to 'i' */
+ bio->bi_sector = from_addr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&rdev->nr_pending);
+ /* and we write to 'i' (if not in_sync) */
for (k=0; k<conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
BUG_ON(k == conf->copies);
- bio = r10_bio->devs[1].bio;
- bio->bi_next = biolist;
- biolist = bio;
- bio->bi_private = r10_bio;
- bio->bi_end_io = end_sync_write;
- bio->bi_rw = WRITE;
to_addr = r10_bio->devs[k].addr;
- bio->bi_sector = to_addr +
- conf->mirrors[i].rdev->data_offset;
- bio->bi_bdev = conf->mirrors[i].rdev->bdev;
-
r10_bio->devs[0].devnum = d;
r10_bio->devs[0].addr = from_addr;
r10_bio->devs[1].devnum = i;
r10_bio->devs[1].addr = to_addr;
+ rdev = mirror->rdev;
+ if (!test_bit(In_sync, &rdev->flags)) {
+ bio = r10_bio->devs[1].bio;
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = to_addr
+ + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&r10_bio->remaining);
+ } else
+ r10_bio->devs[1].bio->bi_end_io = NULL;
+
+ /* and maybe write to replacement */
+ bio = r10_bio->devs[1].repl_bio;
+ if (bio)
+ bio->bi_end_io = NULL;
+ rdev = mirror->replacement;
+ /* Note: if rdev != NULL, then bio
+ * cannot be NULL as r10buf_pool_alloc will
+ * have allocated it.
+ * So the second test here is pointless.
+ * But it keeps semantic-checkers happy, and
+ * this comment keeps human reviewers
+ * happy.
+ */
+ if (rdev == NULL || bio == NULL ||
+ test_bit(Faulty, &rdev->flags))
+ break;
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = to_addr + rdev->data_offset;
+ bio->bi_bdev = rdev->bdev;
+ atomic_inc(&r10_bio->remaining);
break;
}
if (j == conf->copies) {
@@ -2544,8 +2861,16 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
for (k = 0; k < conf->copies; k++)
if (r10_bio->devs[k].devnum == i)
break;
- if (!rdev_set_badblocks(
- conf->mirrors[i].rdev,
+ if (!test_bit(In_sync,
+ &mirror->rdev->flags)
+ && !rdev_set_badblocks(
+ mirror->rdev,
+ r10_bio->devs[k].addr,
+ max_sync, 0))
+ any_working = 0;
+ if (mirror->replacement &&
+ !rdev_set_badblocks(
+ mirror->replacement,
r10_bio->devs[k].addr,
max_sync, 0))
any_working = 0;
@@ -2556,7 +2881,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
printk(KERN_INFO "md/raid10:%s: insufficient "
"working devices for recovery.\n",
mdname(mddev));
- conf->mirrors[i].recovery_disabled
+ mirror->recovery_disabled
= mddev->recovery_disabled;
}
break;
@@ -2605,6 +2930,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
sector_t first_bad, sector;
int bad_sectors;
+ if (r10_bio->devs[i].repl_bio)
+ r10_bio->devs[i].repl_bio->bi_end_io = NULL;
+
bio = r10_bio->devs[i].bio;
bio->bi_end_io = NULL;
clear_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -2635,6 +2963,27 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
conf->mirrors[d].rdev->data_offset;
bio->bi_bdev = conf->mirrors[d].rdev->bdev;
count++;
+
+ if (conf->mirrors[d].replacement == NULL ||
+ test_bit(Faulty,
+ &conf->mirrors[d].replacement->flags))
+ continue;
+
+ /* Need to set up for writing to the replacement */
+ bio = r10_bio->devs[i].repl_bio;
+ clear_bit(BIO_UPTODATE, &bio->bi_flags);
+
+ sector = r10_bio->devs[i].addr;
+ atomic_inc(&conf->mirrors[d].rdev->nr_pending);
+ bio->bi_next = biolist;
+ biolist = bio;
+ bio->bi_private = r10_bio;
+ bio->bi_end_io = end_sync_write;
+ bio->bi_rw = WRITE;
+ bio->bi_sector = sector +
+ conf->mirrors[d].replacement->data_offset;
+ bio->bi_bdev = conf->mirrors[d].replacement->bdev;
+ count++;
}
if (count < 2) {
@@ -2643,6 +2992,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
if (r10_bio->devs[i].bio->bi_end_io)
rdev_dec_pending(conf->mirrors[d].rdev,
mddev);
+ if (r10_bio->devs[i].repl_bio &&
+ r10_bio->devs[i].repl_bio->bi_end_io)
+ rdev_dec_pending(
+ conf->mirrors[d].replacement,
+ mddev);
}
put_buf(r10_bio);
biolist = NULL;
@@ -2896,7 +3250,16 @@ static int run(struct mddev *mddev)
continue;
disk = conf->mirrors + disk_idx;
- disk->rdev = rdev;
+ if (test_bit(Replacement, &rdev->flags)) {
+ if (disk->replacement)
+ goto out_free_conf;
+ disk->replacement = rdev;
+ } else {
+ if (disk->rdev)
+ goto out_free_conf;
+ disk->rdev = rdev;
+ }
+
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);
/* as we don't honour merge_bvec_fn, we must never risk
@@ -2923,6 +3286,13 @@ static int run(struct mddev *mddev)
disk = conf->mirrors + i;
+ if (!disk->rdev && disk->replacement) {
+ /* The replacement is all we have - use it */
+ disk->rdev = disk->replacement;
+ disk->replacement = NULL;
+ clear_bit(Replacement, &disk->rdev->flags);
+ }
+
if (!disk->rdev ||
!test_bit(In_sync, &disk->rdev->flags)) {
disk->head_position = 0;
diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h
index 7facfdf..7c615613 100644
--- a/drivers/md/raid10.h
+++ b/drivers/md/raid10.h
@@ -2,7 +2,7 @@
#define _RAID10_H
struct mirror_info {
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *replacement;
sector_t head_position;
int recovery_disabled; /* matches
* mddev->recovery_disabled
@@ -18,12 +18,13 @@ struct r10conf {
spinlock_t device_lock;
/* geometry */
- int near_copies; /* number of copies laid out raid0 style */
+ int near_copies; /* number of copies laid out
+ * raid0 style */
int far_copies; /* number of copies laid out
* at large strides across drives
*/
- int far_offset; /* far_copies are offset by 1 stripe
- * instead of many
+ int far_offset; /* far_copies are offset by 1
+ * stripe instead of many
*/
int copies; /* near_copies * far_copies.
* must be <= raid_disks
@@ -34,10 +35,11 @@ struct r10conf {
* 1 stripe.
*/
- sector_t dev_sectors; /* temp copy of mddev->dev_sectors */
+ sector_t dev_sectors; /* temp copy of
+ * mddev->dev_sectors */
- int chunk_shift; /* shift from chunks to sectors */
- sector_t chunk_mask;
+ int chunk_shift; /* shift from chunks to sectors */
+ sector_t chunk_mask;
struct list_head retry_list;
/* queue pending writes and submit them on unplug */
@@ -45,20 +47,22 @@ struct r10conf {
int pending_count;
spinlock_t resync_lock;
- int nr_pending;
- int nr_waiting;
- int nr_queued;
- int barrier;
+ int nr_pending;
+ int nr_waiting;
+ int nr_queued;
+ int barrier;
sector_t next_resync;
int fullsync; /* set to 1 if a full sync is needed,
* (fresh device added).
* Cleared when a sync completes.
*/
-
+ int have_replacement; /* There is at least one
+ * replacement device.
+ */
wait_queue_head_t wait_barrier;
- mempool_t *r10bio_pool;
- mempool_t *r10buf_pool;
+ mempool_t *r10bio_pool;
+ mempool_t *r10buf_pool;
struct page *tmppage;
/* When taking over an array from a different personality, we store
@@ -98,11 +102,18 @@ struct r10bio {
* When resyncing we also use one for each copy.
* When reconstructing, we use 2 bios, one for read, one for write.
* We choose the number when they are allocated.
+ * We sometimes need an extra bio to write to the replacement.
*/
struct {
- struct bio *bio;
- sector_t addr;
- int devnum;
+ struct bio *bio;
+ union {
+ struct bio *repl_bio; /* used for resync and
+ * writes */
+ struct md_rdev *rdev; /* used for reads
+ * (read_slot >= 0) */
+ };
+ sector_t addr;
+ int devnum;
} devs[0];
};
@@ -121,17 +132,19 @@ struct r10bio {
#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2)
/* bits for r10bio.state */
-#define R10BIO_Uptodate 0
-#define R10BIO_IsSync 1
-#define R10BIO_IsRecover 2
-#define R10BIO_Degraded 3
+enum r10bio_state {
+ R10BIO_Uptodate,
+ R10BIO_IsSync,
+ R10BIO_IsRecover,
+ R10BIO_Degraded,
/* Set ReadError on bios that experience a read error
* so that raid10d knows what to do with them.
*/
-#define R10BIO_ReadError 4
+ R10BIO_ReadError,
/* If a write for this request means we can clear some
* known-bad-block records, we set this flag.
*/
-#define R10BIO_MadeGood 5
-#define R10BIO_WriteError 6
+ R10BIO_MadeGood,
+ R10BIO_WriteError,
+};
#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 858fdbb..360f2b9 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -370,12 +370,10 @@ static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector,
* of the two sections, and some non-in_sync devices may
* be insync in the section most affected by failed devices.
*/
-static int has_failed(struct r5conf *conf)
+static int calc_degraded(struct r5conf *conf)
{
- int degraded;
+ int degraded, degraded2;
int i;
- if (conf->mddev->reshape_position == MaxSector)
- return conf->mddev->degraded > conf->max_degraded;
rcu_read_lock();
degraded = 0;
@@ -399,14 +397,14 @@ static int has_failed(struct r5conf *conf)
degraded++;
}
rcu_read_unlock();
- if (degraded > conf->max_degraded)
- return 1;
+ if (conf->raid_disks == conf->previous_raid_disks)
+ return degraded;
rcu_read_lock();
- degraded = 0;
+ degraded2 = 0;
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev);
if (!rdev || test_bit(Faulty, &rdev->flags))
- degraded++;
+ degraded2++;
else if (test_bit(In_sync, &rdev->flags))
;
else
@@ -416,9 +414,22 @@ static int has_failed(struct r5conf *conf)
* almost certainly hasn't.
*/
if (conf->raid_disks <= conf->previous_raid_disks)
- degraded++;
+ degraded2++;
}
rcu_read_unlock();
+ if (degraded2 > degraded)
+ return degraded2;
+ return degraded;
+}
+
+static int has_failed(struct r5conf *conf)
+{
+ int degraded;
+
+ if (conf->mddev->reshape_position == MaxSector)
+ return conf->mddev->degraded > conf->max_degraded;
+
+ degraded = calc_degraded(conf);
if (degraded > conf->max_degraded)
return 1;
return 0;
@@ -492,8 +503,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
for (i = disks; i--; ) {
int rw;
- struct bio *bi;
- struct md_rdev *rdev;
+ int replace_only = 0;
+ struct bio *bi, *rbi;
+ struct md_rdev *rdev, *rrdev = NULL;
if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) {
if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags))
rw = WRITE_FUA;
@@ -501,27 +513,57 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
rw = WRITE;
} else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags))
rw = READ;
- else
+ else if (test_and_clear_bit(R5_WantReplace,
+ &sh->dev[i].flags)) {
+ rw = WRITE;
+ replace_only = 1;
+ } else
continue;
bi = &sh->dev[i].req;
+ rbi = &sh->dev[i].rreq; /* For writing to replacement */
bi->bi_rw = rw;
- if (rw & WRITE)
+ rbi->bi_rw = rw;
+ if (rw & WRITE) {
bi->bi_end_io = raid5_end_write_request;
- else
+ rbi->bi_end_io = raid5_end_write_request;
+ } else
bi->bi_end_io = raid5_end_read_request;
rcu_read_lock();
+ rrdev = rcu_dereference(conf->disks[i].replacement);
+ smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (!rdev) {
+ rdev = rrdev;
+ rrdev = NULL;
+ }
+ if (rw & WRITE) {
+ if (replace_only)
+ rdev = NULL;
+ if (rdev == rrdev)
+ /* We raced and saw duplicates */
+ rrdev = NULL;
+ } else {
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags) && rrdev)
+ rdev = rrdev;
+ rrdev = NULL;
+ }
+
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev)
atomic_inc(&rdev->nr_pending);
+ if (rrdev && test_bit(Faulty, &rrdev->flags))
+ rrdev = NULL;
+ if (rrdev)
+ atomic_inc(&rrdev->nr_pending);
rcu_read_unlock();
/* We have already checked bad blocks for reads. Now
- * need to check for writes.
+ * need to check for writes. We never accept write errors
+ * on the replacement, so we don't to check rrdev.
*/
while ((rw & WRITE) && rdev &&
test_bit(WriteErrorSeen, &rdev->flags)) {
@@ -551,7 +593,8 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
}
if (rdev) {
- if (s->syncing || s->expanding || s->expanded)
+ if (s->syncing || s->expanding || s->expanded
+ || s->replacing)
md_sync_acct(rdev->bdev, STRIPE_SECTORS);
set_bit(STRIPE_IO_STARTED, &sh->state);
@@ -563,16 +606,38 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
atomic_inc(&sh->count);
bi->bi_sector = sh->sector + rdev->data_offset;
bi->bi_flags = 1 << BIO_UPTODATE;
- bi->bi_vcnt = 1;
- bi->bi_max_vecs = 1;
bi->bi_idx = 0;
- bi->bi_io_vec = &sh->dev[i].vec;
bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
bi->bi_io_vec[0].bv_offset = 0;
bi->bi_size = STRIPE_SIZE;
bi->bi_next = NULL;
+ if (rrdev)
+ set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
generic_make_request(bi);
- } else {
+ }
+ if (rrdev) {
+ if (s->syncing || s->expanding || s->expanded
+ || s->replacing)
+ md_sync_acct(rrdev->bdev, STRIPE_SECTORS);
+
+ set_bit(STRIPE_IO_STARTED, &sh->state);
+
+ rbi->bi_bdev = rrdev->bdev;
+ pr_debug("%s: for %llu schedule op %ld on "
+ "replacement disc %d\n",
+ __func__, (unsigned long long)sh->sector,
+ rbi->bi_rw, i);
+ atomic_inc(&sh->count);
+ rbi->bi_sector = sh->sector + rrdev->data_offset;
+ rbi->bi_flags = 1 << BIO_UPTODATE;
+ rbi->bi_idx = 0;
+ rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
+ rbi->bi_io_vec[0].bv_offset = 0;
+ rbi->bi_size = STRIPE_SIZE;
+ rbi->bi_next = NULL;
+ generic_make_request(rbi);
+ }
+ if (!rdev && !rrdev) {
if (rw & WRITE)
set_bit(STRIPE_DEGRADED, &sh->state);
pr_debug("skip op %ld on disc %d for sector %llu\n",
@@ -1583,7 +1648,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
int disks = sh->disks, i;
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
char b[BDEVNAME_SIZE];
- struct md_rdev *rdev;
+ struct md_rdev *rdev = NULL;
for (i=0 ; i<disks; i++)
@@ -1597,11 +1662,23 @@ static void raid5_end_read_request(struct bio * bi, int error)
BUG();
return;
}
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ /* If replacement finished while this request was outstanding,
+ * 'replacement' might be NULL already.
+ * In that case it moved down to 'rdev'.
+ * rdev is not removed until all requests are finished.
+ */
+ rdev = conf->disks[i].replacement;
+ if (!rdev)
+ rdev = conf->disks[i].rdev;
if (uptodate) {
set_bit(R5_UPTODATE, &sh->dev[i].flags);
if (test_bit(R5_ReadError, &sh->dev[i].flags)) {
- rdev = conf->disks[i].rdev;
+ /* Note that this cannot happen on a
+ * replacement device. We just fail those on
+ * any error
+ */
printk_ratelimited(
KERN_INFO
"md/raid:%s: read error corrected"
@@ -1614,16 +1691,24 @@ static void raid5_end_read_request(struct bio * bi, int error)
clear_bit(R5_ReadError, &sh->dev[i].flags);
clear_bit(R5_ReWrite, &sh->dev[i].flags);
}
- if (atomic_read(&conf->disks[i].rdev->read_errors))
- atomic_set(&conf->disks[i].rdev->read_errors, 0);
+ if (atomic_read(&rdev->read_errors))
+ atomic_set(&rdev->read_errors, 0);
} else {
- const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
+ const char *bdn = bdevname(rdev->bdev, b);
int retry = 0;
- rdev = conf->disks[i].rdev;
clear_bit(R5_UPTODATE, &sh->dev[i].flags);
atomic_inc(&rdev->read_errors);
- if (conf->mddev->degraded >= conf->max_degraded)
+ if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
+ printk_ratelimited(
+ KERN_WARNING
+ "md/raid:%s: read error on replacement device "
+ "(sector %llu on %s).\n",
+ mdname(conf->mddev),
+ (unsigned long long)(sh->sector
+ + rdev->data_offset),
+ bdn);
+ else if (conf->mddev->degraded >= conf->max_degraded)
printk_ratelimited(
KERN_WARNING
"md/raid:%s: read error not correctable "
@@ -1657,7 +1742,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
md_error(conf->mddev, rdev);
}
}
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
+ rdev_dec_pending(rdev, conf->mddev);
clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
@@ -1668,14 +1753,30 @@ static void raid5_end_write_request(struct bio *bi, int error)
struct stripe_head *sh = bi->bi_private;
struct r5conf *conf = sh->raid_conf;
int disks = sh->disks, i;
+ struct md_rdev *uninitialized_var(rdev);
int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags);
sector_t first_bad;
int bad_sectors;
+ int replacement = 0;
- for (i=0 ; i<disks; i++)
- if (bi == &sh->dev[i].req)
+ for (i = 0 ; i < disks; i++) {
+ if (bi == &sh->dev[i].req) {
+ rdev = conf->disks[i].rdev;
break;
-
+ }
+ if (bi == &sh->dev[i].rreq) {
+ rdev = conf->disks[i].replacement;
+ if (rdev)
+ replacement = 1;
+ else
+ /* rdev was removed and 'replacement'
+ * replaced it. rdev is not removed
+ * until all requests are finished.
+ */
+ rdev = conf->disks[i].rdev;
+ break;
+ }
+ }
pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
(unsigned long long)sh->sector, i, atomic_read(&sh->count),
uptodate);
@@ -1684,21 +1785,33 @@ static void raid5_end_write_request(struct bio *bi, int error)
return;
}
- if (!uptodate) {
- set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags);
- set_bit(R5_WriteError, &sh->dev[i].flags);
- } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS,
- &first_bad, &bad_sectors))
- set_bit(R5_MadeGood, &sh->dev[i].flags);
+ if (replacement) {
+ if (!uptodate)
+ md_error(conf->mddev, rdev);
+ else if (is_badblock(rdev, sh->sector,
+ STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGoodRepl, &sh->dev[i].flags);
+ } else {
+ if (!uptodate) {
+ set_bit(WriteErrorSeen, &rdev->flags);
+ set_bit(R5_WriteError, &sh->dev[i].flags);
+ if (!test_and_set_bit(WantReplacement, &rdev->flags))
+ set_bit(MD_RECOVERY_NEEDED,
+ &rdev->mddev->recovery);
+ } else if (is_badblock(rdev, sh->sector,
+ STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_MadeGood, &sh->dev[i].flags);
+ }
+ rdev_dec_pending(rdev, conf->mddev);
- rdev_dec_pending(conf->disks[i].rdev, conf->mddev);
-
- clear_bit(R5_LOCKED, &sh->dev[i].flags);
+ if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
+ clear_bit(R5_LOCKED, &sh->dev[i].flags);
set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh);
}
-
static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous);
static void raid5_build_block(struct stripe_head *sh, int i, int previous)
@@ -1709,12 +1822,15 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
dev->req.bi_io_vec = &dev->vec;
dev->req.bi_vcnt++;
dev->req.bi_max_vecs++;
+ dev->req.bi_private = sh;
dev->vec.bv_page = dev->page;
- dev->vec.bv_len = STRIPE_SIZE;
- dev->vec.bv_offset = 0;
- dev->req.bi_sector = sh->sector;
- dev->req.bi_private = sh;
+ bio_init(&dev->rreq);
+ dev->rreq.bi_io_vec = &dev->rvec;
+ dev->rreq.bi_vcnt++;
+ dev->rreq.bi_max_vecs++;
+ dev->rreq.bi_private = sh;
+ dev->rvec.bv_page = dev->page;
dev->flags = 0;
dev->sector = compute_blocknr(sh, i, previous);
@@ -1724,18 +1840,15 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
{
char b[BDEVNAME_SIZE];
struct r5conf *conf = mddev->private;
+ unsigned long flags;
pr_debug("raid456: error called\n");
- if (test_and_clear_bit(In_sync, &rdev->flags)) {
- unsigned long flags;
- spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded++;
- spin_unlock_irqrestore(&conf->device_lock, flags);
- /*
- * if recovery was running, make sure it aborts.
- */
- set_bit(MD_RECOVERY_INTR, &mddev->recovery);
- }
+ spin_lock_irqsave(&conf->device_lock, flags);
+ clear_bit(In_sync, &rdev->flags);
+ mddev->degraded = calc_degraded(conf);
+ spin_unlock_irqrestore(&conf->device_lock, flags);
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+
set_bit(Blocked, &rdev->flags);
set_bit(Faulty, &rdev->flags);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -2362,8 +2475,9 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
clear_bit(STRIPE_SYNCING, &sh->state);
s->syncing = 0;
+ s->replacing = 0;
/* There is nothing more to do for sync/check/repair.
- * For recover we need to record a bad block on all
+ * For recover/replace we need to record a bad block on all
* non-sync devices, or abort the recovery
*/
if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery))
@@ -2373,12 +2487,18 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
*/
for (i = 0; i < conf->raid_disks; i++) {
struct md_rdev *rdev = conf->disks[i].rdev;
- if (!rdev
- || test_bit(Faulty, &rdev->flags)
- || test_bit(In_sync, &rdev->flags))
- continue;
- if (!rdev_set_badblocks(rdev, sh->sector,
- STRIPE_SECTORS, 0))
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
+ abort = 1;
+ rdev = conf->disks[i].replacement;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && !rdev_set_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS, 0))
abort = 1;
}
if (abort) {
@@ -2387,6 +2507,22 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
}
}
+static int want_replace(struct stripe_head *sh, int disk_idx)
+{
+ struct md_rdev *rdev;
+ int rv = 0;
+ /* Doing recovery so rcu locking not required */
+ rdev = sh->raid_conf->disks[disk_idx].replacement;
+ if (rdev
+ && !test_bit(Faulty, &rdev->flags)
+ && !test_bit(In_sync, &rdev->flags)
+ && (rdev->recovery_offset <= sh->sector
+ || rdev->mddev->recovery_cp <= sh->sector))
+ rv = 1;
+
+ return rv;
+}
+
/* fetch_block - checks the given member device to see if its data needs
* to be read or computed to satisfy a request.
*
@@ -2406,6 +2542,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
(dev->toread ||
(dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) ||
s->syncing || s->expanding ||
+ (s->replacing && want_replace(sh, disk_idx)) ||
(s->failed >= 1 && fdev[0]->toread) ||
(s->failed >= 2 && fdev[1]->toread) ||
(sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite &&
@@ -2959,22 +3096,18 @@ static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh)
}
}
-
/*
* handle_stripe - do things to a stripe.
*
- * We lock the stripe and then examine the state of various bits
- * to see what needs to be done.
+ * We lock the stripe by setting STRIPE_ACTIVE and then examine the
+ * state of various bits to see what needs to be done.
* Possible results:
- * return some read request which now have data
- * return some write requests which are safely on disc
+ * return some read requests which now have data
+ * return some write requests which are safely on storage
* schedule a read on some buffers
* schedule a write of some buffers
* return confirmation of parity correctness
*
- * buffers are taken off read_list or write_list, and bh_cache buffers
- * get BH_Lock set before the stripe lock is released.
- *
*/
static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
@@ -2983,10 +3116,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
int disks = sh->disks;
struct r5dev *dev;
int i;
+ int do_recovery = 0;
memset(s, 0, sizeof(*s));
- s->syncing = test_bit(STRIPE_SYNCING, &sh->state);
s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state);
s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state);
s->failed_num[0] = -1;
@@ -3004,7 +3137,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
dev = &sh->dev[i];
pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
- i, dev->flags, dev->toread, dev->towrite, dev->written);
+ i, dev->flags,
+ dev->toread, dev->towrite, dev->written);
/* maybe we can reply to a read
*
* new wantfill requests are only permitted while
@@ -3035,7 +3169,21 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
if (dev->written)
s->written++;
- rdev = rcu_dereference(conf->disks[i].rdev);
+ /* Prefer to use the replacement for reads, but only
+ * if it is recovered enough and has no bad blocks.
+ */
+ rdev = rcu_dereference(conf->disks[i].replacement);
+ if (rdev && !test_bit(Faulty, &rdev->flags) &&
+ rdev->recovery_offset >= sh->sector + STRIPE_SECTORS &&
+ !is_badblock(rdev, sh->sector, STRIPE_SECTORS,
+ &first_bad, &bad_sectors))
+ set_bit(R5_ReadRepl, &dev->flags);
+ else {
+ if (rdev)
+ set_bit(R5_NeedReplace, &dev->flags);
+ rdev = rcu_dereference(conf->disks[i].rdev);
+ clear_bit(R5_ReadRepl, &dev->flags);
+ }
if (rdev && test_bit(Faulty, &rdev->flags))
rdev = NULL;
if (rdev) {
@@ -3077,20 +3225,38 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(R5_Insync, &dev->flags);
if (rdev && test_bit(R5_WriteError, &dev->flags)) {
- clear_bit(R5_Insync, &dev->flags);
- if (!test_bit(Faulty, &rdev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].rdev);
+ if (rdev2 == rdev)
+ clear_bit(R5_Insync, &dev->flags);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
- atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev2->nr_pending);
} else
clear_bit(R5_WriteError, &dev->flags);
}
if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
- if (!test_bit(Faulty, &rdev->flags)) {
+ /* This flag does not apply to '.replacement'
+ * only to .rdev, so make sure to check that*/
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].rdev);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
s->handle_bad_blocks = 1;
- atomic_inc(&rdev->nr_pending);
+ atomic_inc(&rdev2->nr_pending);
} else
clear_bit(R5_MadeGood, &dev->flags);
}
+ if (test_bit(R5_MadeGoodRepl, &dev->flags)) {
+ struct md_rdev *rdev2 = rcu_dereference(
+ conf->disks[i].replacement);
+ if (rdev2 && !test_bit(Faulty, &rdev2->flags)) {
+ s->handle_bad_blocks = 1;
+ atomic_inc(&rdev2->nr_pending);
+ } else
+ clear_bit(R5_MadeGoodRepl, &dev->flags);
+ }
if (!test_bit(R5_Insync, &dev->flags)) {
/* The ReadError flag will just be confusing now */
clear_bit(R5_ReadError, &dev->flags);
@@ -3102,9 +3268,25 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
if (s->failed < 2)
s->failed_num[s->failed] = i;
s->failed++;
+ if (rdev && !test_bit(Faulty, &rdev->flags))
+ do_recovery = 1;
}
}
spin_unlock_irq(&conf->device_lock);
+ if (test_bit(STRIPE_SYNCING, &sh->state)) {
+ /* If there is a failed device being replaced,
+ * we must be recovering.
+ * else if we are after recovery_cp, we must be syncing
+ * else we can only be replacing
+ * sync and recovery both need to read all devices, and so
+ * use the same flag.
+ */
+ if (do_recovery ||
+ sh->sector >= conf->mddev->recovery_cp)
+ s->syncing = 1;
+ else
+ s->replacing = 1;
+ }
rcu_read_unlock();
}
@@ -3146,7 +3328,7 @@ static void handle_stripe(struct stripe_head *sh)
if (unlikely(s.blocked_rdev)) {
if (s.syncing || s.expanding || s.expanded ||
- s.to_write || s.written) {
+ s.replacing || s.to_write || s.written) {
set_bit(STRIPE_HANDLE, &sh->state);
goto finish;
}
@@ -3172,7 +3354,7 @@ static void handle_stripe(struct stripe_head *sh)
sh->reconstruct_state = 0;
if (s.to_read+s.to_write+s.written)
handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
- if (s.syncing)
+ if (s.syncing + s.replacing)
handle_failed_sync(conf, sh, &s);
}
@@ -3203,7 +3385,9 @@ static void handle_stripe(struct stripe_head *sh)
*/
if (s.to_read || s.non_overwrite
|| (conf->level == 6 && s.to_write && s.failed)
- || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding)
+ || (s.syncing && (s.uptodate + s.compute < disks))
+ || s.replacing
+ || s.expanding)
handle_stripe_fill(sh, &s, disks);
/* Now we check to see if any write operations have recently
@@ -3265,7 +3449,20 @@ static void handle_stripe(struct stripe_head *sh)
handle_parity_checks5(conf, sh, &s, disks);
}
- if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) {
+ if (s.replacing && s.locked == 0
+ && !test_bit(STRIPE_INSYNC, &sh->state)) {
+ /* Write out to replacement devices where possible */
+ for (i = 0; i < conf->raid_disks; i++)
+ if (test_bit(R5_UPTODATE, &sh->dev[i].flags) &&
+ test_bit(R5_NeedReplace, &sh->dev[i].flags)) {
+ set_bit(R5_WantReplace, &sh->dev[i].flags);
+ set_bit(R5_LOCKED, &sh->dev[i].flags);
+ s.locked++;
+ }
+ set_bit(STRIPE_INSYNC, &sh->state);
+ }
+ if ((s.syncing || s.replacing) && s.locked == 0 &&
+ test_bit(STRIPE_INSYNC, &sh->state)) {
md_done_sync(conf->mddev, STRIPE_SECTORS, 1);
clear_bit(STRIPE_SYNCING, &sh->state);
}
@@ -3363,6 +3560,15 @@ finish:
STRIPE_SECTORS);
rdev_dec_pending(rdev, conf->mddev);
}
+ if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) {
+ rdev = conf->disks[i].replacement;
+ if (!rdev)
+ /* rdev have been moved down */
+ rdev = conf->disks[i].rdev;
+ rdev_clear_badblocks(rdev, sh->sector,
+ STRIPE_SECTORS);
+ rdev_dec_pending(rdev, conf->mddev);
+ }
}
if (s.ops_request)
@@ -3586,6 +3792,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
int dd_idx;
struct bio* align_bi;
struct md_rdev *rdev;
+ sector_t end_sector;
if (!in_chunk_boundary(mddev, raid_bio)) {
pr_debug("chunk_aligned_read : non aligned\n");
@@ -3610,9 +3817,19 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
0,
&dd_idx, NULL);
+ end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9);
rcu_read_lock();
- rdev = rcu_dereference(conf->disks[dd_idx].rdev);
- if (rdev && test_bit(In_sync, &rdev->flags)) {
+ rdev = rcu_dereference(conf->disks[dd_idx].replacement);
+ if (!rdev || test_bit(Faulty, &rdev->flags) ||
+ rdev->recovery_offset < end_sector) {
+ rdev = rcu_dereference(conf->disks[dd_idx].rdev);
+ if (rdev &&
+ (test_bit(Faulty, &rdev->flags) ||
+ !(test_bit(In_sync, &rdev->flags) ||
+ rdev->recovery_offset >= end_sector)))
+ rdev = NULL;
+ }
+ if (rdev) {
sector_t first_bad;
int bad_sectors;
@@ -4137,7 +4354,6 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int
return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */
}
-
bitmap_cond_end_sync(mddev->bitmap, sector_nr);
sh = get_active_stripe(conf, sector_nr, 0, 1, 0);
@@ -4208,7 +4424,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
return handled;
}
- set_bit(R5_ReadError, &sh->dev[dd_idx].flags);
if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) {
release_stripe(sh);
raid5_set_bi_hw_segments(raid_bio, scnt);
@@ -4635,7 +4850,15 @@ static struct r5conf *setup_conf(struct mddev *mddev)
continue;
disk = conf->disks + raid_disk;
- disk->rdev = rdev;
+ if (test_bit(Replacement, &rdev->flags)) {
+ if (disk->replacement)
+ goto abort;
+ disk->replacement = rdev;
+ } else {
+ if (disk->rdev)
+ goto abort;
+ disk->rdev = rdev;
+ }
if (test_bit(In_sync, &rdev->flags)) {
char b[BDEVNAME_SIZE];
@@ -4724,6 +4947,7 @@ static int run(struct mddev *mddev)
int dirty_parity_disks = 0;
struct md_rdev *rdev;
sector_t reshape_offset = 0;
+ int i;
if (mddev->recovery_cp != MaxSector)
printk(KERN_NOTICE "md/raid:%s: not clean"
@@ -4813,12 +5037,25 @@ static int run(struct mddev *mddev)
conf->thread = NULL;
mddev->private = conf;
- /*
- * 0 for a fully functional array, 1 or 2 for a degraded array.
- */
- list_for_each_entry(rdev, &mddev->disks, same_set) {
- if (rdev->raid_disk < 0)
+ for (i = 0; i < conf->raid_disks && conf->previous_raid_disks;
+ i++) {
+ rdev = conf->disks[i].rdev;
+ if (!rdev && conf->disks[i].replacement) {
+ /* The replacement is all we have yet */
+ rdev = conf->disks[i].replacement;
+ conf->disks[i].replacement = NULL;
+ clear_bit(Replacement, &rdev->flags);
+ conf->disks[i].rdev = rdev;
+ }
+ if (!rdev)
continue;
+ if (conf->disks[i].replacement &&
+ conf->reshape_progress != MaxSector) {
+ /* replacements and reshape simply do not mix. */
+ printk(KERN_ERR "md: cannot handle concurrent "
+ "replacement and reshape.\n");
+ goto abort;
+ }
if (test_bit(In_sync, &rdev->flags)) {
working_disks++;
continue;
@@ -4852,8 +5089,10 @@ static int run(struct mddev *mddev)
dirty_parity_disks++;
}
- mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks)
- - working_disks);
+ /*
+ * 0 for a fully functional array, 1 or 2 for a degraded array.
+ */
+ mddev->degraded = calc_degraded(conf);
if (has_failed(conf)) {
printk(KERN_ERR "md/raid:%s: not enough operational devices"
@@ -5016,7 +5255,25 @@ static int raid5_spare_active(struct mddev *mddev)
for (i = 0; i < conf->raid_disks; i++) {
tmp = conf->disks + i;
- if (tmp->rdev
+ if (tmp->replacement
+ && tmp->replacement->recovery_offset == MaxSector
+ && !test_bit(Faulty, &tmp->replacement->flags)
+ && !test_and_set_bit(In_sync, &tmp->replacement->flags)) {
+ /* Replacement has just become active. */
+ if (!tmp->rdev
+ || !test_and_clear_bit(In_sync, &tmp->rdev->flags))
+ count++;
+ if (tmp->rdev) {
+ /* Replaced device not technically faulty,
+ * but we need to be sure it gets removed
+ * and never re-added.
+ */
+ set_bit(Faulty, &tmp->rdev->flags);
+ sysfs_notify_dirent_safe(
+ tmp->rdev->sysfs_state);
+ }
+ sysfs_notify_dirent_safe(tmp->replacement->sysfs_state);
+ } else if (tmp->rdev
&& tmp->rdev->recovery_offset == MaxSector
&& !test_bit(Faulty, &tmp->rdev->flags)
&& !test_and_set_bit(In_sync, &tmp->rdev->flags)) {
@@ -5025,49 +5282,68 @@ static int raid5_spare_active(struct mddev *mddev)
}
}
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded -= count;
+ mddev->degraded = calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
print_raid5_conf(conf);
return count;
}
-static int raid5_remove_disk(struct mddev *mddev, int number)
+static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
{
struct r5conf *conf = mddev->private;
int err = 0;
- struct md_rdev *rdev;
+ int number = rdev->raid_disk;
+ struct md_rdev **rdevp;
struct disk_info *p = conf->disks + number;
print_raid5_conf(conf);
- rdev = p->rdev;
- if (rdev) {
- if (number >= conf->raid_disks &&
- conf->reshape_progress == MaxSector)
- clear_bit(In_sync, &rdev->flags);
+ if (rdev == p->rdev)
+ rdevp = &p->rdev;
+ else if (rdev == p->replacement)
+ rdevp = &p->replacement;
+ else
+ return 0;
- if (test_bit(In_sync, &rdev->flags) ||
- atomic_read(&rdev->nr_pending)) {
- err = -EBUSY;
- goto abort;
- }
- /* Only remove non-faulty devices if recovery
- * isn't possible.
- */
- if (!test_bit(Faulty, &rdev->flags) &&
- mddev->recovery_disabled != conf->recovery_disabled &&
- !has_failed(conf) &&
- number < conf->raid_disks) {
- err = -EBUSY;
- goto abort;
- }
- p->rdev = NULL;
- synchronize_rcu();
- if (atomic_read(&rdev->nr_pending)) {
- /* lost the race, try later */
- err = -EBUSY;
- p->rdev = rdev;
- }
+ if (number >= conf->raid_disks &&
+ conf->reshape_progress == MaxSector)
+ clear_bit(In_sync, &rdev->flags);
+
+ if (test_bit(In_sync, &rdev->flags) ||
+ atomic_read(&rdev->nr_pending)) {
+ err = -EBUSY;
+ goto abort;
}
+ /* Only remove non-faulty devices if recovery
+ * isn't possible.
+ */
+ if (!test_bit(Faulty, &rdev->flags) &&
+ mddev->recovery_disabled != conf->recovery_disabled &&
+ !has_failed(conf) &&
+ (!p->replacement || p->replacement == rdev) &&
+ number < conf->raid_disks) {
+ err = -EBUSY;
+ goto abort;
+ }
+ *rdevp = NULL;
+ synchronize_rcu();
+ if (atomic_read(&rdev->nr_pending)) {
+ /* lost the race, try later */
+ err = -EBUSY;
+ *rdevp = rdev;
+ } else if (p->replacement) {
+ /* We must have just cleared 'rdev' */
+ p->rdev = p->replacement;
+ clear_bit(Replacement, &p->replacement->flags);
+ smp_mb(); /* Make sure other CPUs may see both as identical
+ * but will never see neither - if they are careful
+ */
+ p->replacement = NULL;
+ clear_bit(WantReplacement, &rdev->flags);
+ } else
+ /* We might have just removed the Replacement as faulty-
+ * clear the bit just in case
+ */
+ clear_bit(WantReplacement, &rdev->flags);
abort:
print_raid5_conf(conf);
@@ -5103,8 +5379,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
disk = rdev->saved_raid_disk;
else
disk = first;
- for ( ; disk <= last ; disk++)
- if ((p=conf->disks + disk)->rdev == NULL) {
+ for ( ; disk <= last ; disk++) {
+ p = conf->disks + disk;
+ if (p->rdev == NULL) {
clear_bit(In_sync, &rdev->flags);
rdev->raid_disk = disk;
err = 0;
@@ -5113,6 +5390,17 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev)
rcu_assign_pointer(p->rdev, rdev);
break;
}
+ if (test_bit(WantReplacement, &p->rdev->flags) &&
+ p->replacement == NULL) {
+ clear_bit(In_sync, &rdev->flags);
+ set_bit(Replacement, &rdev->flags);
+ rdev->raid_disk = disk;
+ err = 0;
+ conf->fullsync = 1;
+ rcu_assign_pointer(p->replacement, rdev);
+ break;
+ }
+ }
print_raid5_conf(conf);
return err;
}
@@ -5286,8 +5574,7 @@ static int raid5_start_reshape(struct mddev *mddev)
* pre and post number of devices.
*/
spin_lock_irqsave(&conf->device_lock, flags);
- mddev->degraded += (conf->raid_disks - conf->previous_raid_disks)
- - added_devices;
+ mddev->degraded = calc_degraded(conf);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
mddev->raid_disks = conf->raid_disks;
@@ -5356,17 +5643,15 @@ static void raid5_finish_reshape(struct mddev *mddev)
revalidate_disk(mddev->gendisk);
} else {
int d;
- mddev->degraded = conf->raid_disks;
- for (d = 0; d < conf->raid_disks ; d++)
- if (conf->disks[d].rdev &&
- test_bit(In_sync,
- &conf->disks[d].rdev->flags))
- mddev->degraded--;
+ spin_lock_irq(&conf->device_lock);
+ mddev->degraded = calc_degraded(conf);
+ spin_unlock_irq(&conf->device_lock);
for (d = conf->raid_disks ;
d < conf->raid_disks - mddev->delta_disks;
d++) {
struct md_rdev *rdev = conf->disks[d].rdev;
- if (rdev && raid5_remove_disk(mddev, d) == 0) {
+ if (rdev &&
+ raid5_remove_disk(mddev, rdev) == 0) {
sysfs_unlink_rdev(mddev, rdev);
rdev->raid_disk = -1;
}
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index e10c553..8d8e139 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -27,7 +27,7 @@
* The possible state transitions are:
*
* Empty -> Want - on read or write to get old data for parity calc
- * Empty -> Dirty - on compute_parity to satisfy write/sync request.(RECONSTRUCT_WRITE)
+ * Empty -> Dirty - on compute_parity to satisfy write/sync request.
* Empty -> Clean - on compute_block when computing a block for failed drive
* Want -> Empty - on failed read
* Want -> Clean - on successful completion of read request
@@ -226,8 +226,11 @@ struct stripe_head {
#endif
} ops;
struct r5dev {
- struct bio req;
- struct bio_vec vec;
+ /* rreq and rvec are used for the replacement device when
+ * writing data to both devices.
+ */
+ struct bio req, rreq;
+ struct bio_vec vec, rvec;
struct page *page;
struct bio *toread, *read, *towrite, *written;
sector_t sector; /* sector of this page */
@@ -239,7 +242,13 @@ struct stripe_head {
* for handle_stripe.
*/
struct stripe_head_state {
- int syncing, expanding, expanded;
+ /* 'syncing' means that we need to read all devices, either
+ * to check/correct parity, or to reconstruct a missing device.
+ * 'replacing' means we are replacing one or more drives and
+ * the source is valid at this point so we don't need to
+ * read all devices, just the replacement targets.
+ */
+ int syncing, expanding, expanded, replacing;
int locked, uptodate, to_read, to_write, failed, written;
int to_fill, compute, req_compute, non_overwrite;
int failed_num[2];
@@ -252,38 +261,41 @@ struct stripe_head_state {
int handle_bad_blocks;
};
-/* Flags */
-#define R5_UPTODATE 0 /* page contains current data */
-#define R5_LOCKED 1 /* IO has been submitted on "req" */
-#define R5_OVERWRITE 2 /* towrite covers whole page */
+/* Flags for struct r5dev.flags */
+enum r5dev_flags {
+ R5_UPTODATE, /* page contains current data */
+ R5_LOCKED, /* IO has been submitted on "req" */
+ R5_DOUBLE_LOCKED,/* Cannot clear R5_LOCKED until 2 writes complete */
+ R5_OVERWRITE, /* towrite covers whole page */
/* and some that are internal to handle_stripe */
-#define R5_Insync 3 /* rdev && rdev->in_sync at start */
-#define R5_Wantread 4 /* want to schedule a read */
-#define R5_Wantwrite 5
-#define R5_Overlap 7 /* There is a pending overlapping request on this block */
-#define R5_ReadError 8 /* seen a read error here recently */
-#define R5_ReWrite 9 /* have tried to over-write the readerror */
+ R5_Insync, /* rdev && rdev->in_sync at start */
+ R5_Wantread, /* want to schedule a read */
+ R5_Wantwrite,
+ R5_Overlap, /* There is a pending overlapping request
+ * on this block */
+ R5_ReadError, /* seen a read error here recently */
+ R5_ReWrite, /* have tried to over-write the readerror */
-#define R5_Expanded 10 /* This block now has post-expand data */
-#define R5_Wantcompute 11 /* compute_block in progress treat as
- * uptodate
- */
-#define R5_Wantfill 12 /* dev->toread contains a bio that needs
- * filling
- */
-#define R5_Wantdrain 13 /* dev->towrite needs to be drained */
-#define R5_WantFUA 14 /* Write should be FUA */
-#define R5_WriteError 15 /* got a write error - need to record it */
-#define R5_MadeGood 16 /* A bad block has been fixed by writing to it*/
-/*
- * Write method
- */
-#define RECONSTRUCT_WRITE 1
-#define READ_MODIFY_WRITE 2
-/* not a write method, but a compute_parity mode */
-#define CHECK_PARITY 3
-/* Additional compute_parity mode -- updates the parity w/o LOCKING */
-#define UPDATE_PARITY 4
+ R5_Expanded, /* This block now has post-expand data */
+ R5_Wantcompute, /* compute_block in progress treat as
+ * uptodate
+ */
+ R5_Wantfill, /* dev->toread contains a bio that needs
+ * filling
+ */
+ R5_Wantdrain, /* dev->towrite needs to be drained */
+ R5_WantFUA, /* Write should be FUA */
+ R5_WriteError, /* got a write error - need to record it */
+ R5_MadeGood, /* A bad block has been fixed by writing to it */
+ R5_ReadRepl, /* Will/did read from replacement rather than orig */
+ R5_MadeGoodRepl,/* A bad block on the replacement device has been
+ * fixed by writing to it */
+ R5_NeedReplace, /* This device has a replacement which is not
+ * up-to-date at this stripe. */
+ R5_WantReplace, /* We need to update the replacement, we have read
+ * data in, and now is a good time to write it out.
+ */
+};
/*
* Stripe state
@@ -311,13 +323,14 @@ enum {
/*
* Operation request flags
*/
-#define STRIPE_OP_BIOFILL 0
-#define STRIPE_OP_COMPUTE_BLK 1
-#define STRIPE_OP_PREXOR 2
-#define STRIPE_OP_BIODRAIN 3
-#define STRIPE_OP_RECONSTRUCT 4
-#define STRIPE_OP_CHECK 5
-
+enum {
+ STRIPE_OP_BIOFILL,
+ STRIPE_OP_COMPUTE_BLK,
+ STRIPE_OP_PREXOR,
+ STRIPE_OP_BIODRAIN,
+ STRIPE_OP_RECONSTRUCT,
+ STRIPE_OP_CHECK,
+};
/*
* Plugging:
*
@@ -344,13 +357,12 @@ enum {
struct disk_info {
- struct md_rdev *rdev;
+ struct md_rdev *rdev, *replacement;
};
struct r5conf {
struct hlist_head *stripe_hashtbl;
struct mddev *mddev;
- struct disk_info *spare;
int chunk_sectors;
int level, algorithm;
int max_degraded;
diff --git a/drivers/media/dvb/b2c2/flexcop-usb.c b/drivers/media/dvb/b2c2/flexcop-usb.c
index bedcfb6..26c666d 100644
--- a/drivers/media/dvb/b2c2/flexcop-usb.c
+++ b/drivers/media/dvb/b2c2/flexcop-usb.c
@@ -583,25 +583,7 @@ static struct usb_driver flexcop_usb_driver = {
.id_table = flexcop_usb_table,
};
-/* module stuff */
-static int __init flexcop_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&flexcop_usb_driver))) {
- err("usb_register failed. (%d)", result);
- return result;
- }
- return 0;
-}
-
-static void __exit flexcop_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&flexcop_usb_driver);
-}
-
-module_init(flexcop_usb_module_init);
-module_exit(flexcop_usb_module_exit);
+module_usb_driver(flexcop_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_NAME);
diff --git a/drivers/media/dvb/ddbridge/ddbridge-core.c b/drivers/media/dvb/ddbridge/ddbridge-core.c
index 2f31648..ce4f858 100644
--- a/drivers/media/dvb/ddbridge/ddbridge-core.c
+++ b/drivers/media/dvb/ddbridge/ddbridge-core.c
@@ -1480,7 +1480,7 @@ static const struct file_operations ddb_fops = {
.open = ddb_open,
};
-static char *ddb_devnode(struct device *device, mode_t *mode)
+static char *ddb_devnode(struct device *device, umode_t *mode)
{
struct ddb *dev = dev_get_drvdata(device);
diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
index f732877..00a6732 100644
--- a/drivers/media/dvb/dvb-core/dvbdev.c
+++ b/drivers/media/dvb/dvb-core/dvbdev.c
@@ -450,7 +450,7 @@ static int dvb_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
-static char *dvb_devnode(struct device *dev, mode_t *mode)
+static char *dvb_devnode(struct device *dev, umode_t *mode)
{
struct dvb_device *dvbdev = dev_get_drvdata(dev);
diff --git a/drivers/media/dvb/dvb-usb/a800.c b/drivers/media/dvb/dvb-usb/a800.c
index 2aef3c8..8d7fef8 100644
--- a/drivers/media/dvb/dvb-usb/a800.c
+++ b/drivers/media/dvb/dvb-usb/a800.c
@@ -183,26 +183,7 @@ static struct usb_driver a800_driver = {
.id_table = a800_table,
};
-/* module stuff */
-static int __init a800_module_init(void)
-{
- int result;
- if ((result = usb_register(&a800_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit a800_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&a800_driver);
-}
-
-module_init (a800_module_init);
-module_exit (a800_module_exit);
+module_usb_driver(a800_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("AVerMedia AverTV DVB-T USB 2.0 (A800)");
diff --git a/drivers/media/dvb/dvb-usb/af9005.c b/drivers/media/dvb/dvb-usb/af9005.c
index 5d5e32f..af176b6 100644
--- a/drivers/media/dvb/dvb-usb/af9005.c
+++ b/drivers/media/dvb/dvb-usb/af9005.c
@@ -30,7 +30,7 @@ MODULE_PARM_DESC(debug,
"set debugging level (1=info,xfer=2,rc=4,reg=8,i2c=16,fw=32 (or-able))."
DVB_USB_DEBUG_STATUS);
/* enable obnoxious led */
-int dvb_usb_af9005_led = 1;
+bool dvb_usb_af9005_led = 1;
module_param_named(led, dvb_usb_af9005_led, bool, 0644);
MODULE_PARM_DESC(led, "enable led (default: 1).");
diff --git a/drivers/media/dvb/dvb-usb/af9005.h b/drivers/media/dvb/dvb-usb/af9005.h
index c71c77b..6a2bf3d 100644
--- a/drivers/media/dvb/dvb-usb/af9005.h
+++ b/drivers/media/dvb/dvb-usb/af9005.h
@@ -35,7 +35,7 @@ extern int dvb_usb_af9005_debug;
#define deb_i2c(args...) dprintk(dvb_usb_af9005_debug,0x10,args)
#define deb_fw(args...) dprintk(dvb_usb_af9005_debug,0x20,args)
-extern int dvb_usb_af9005_led;
+extern bool dvb_usb_af9005_led;
/* firmware */
#define FW_BULKOUT_SIZE 250
diff --git a/drivers/media/dvb/dvb-usb/af9015.c b/drivers/media/dvb/dvb-usb/af9015.c
index e755d76..282a43d 100644
--- a/drivers/media/dvb/dvb-usb/af9015.c
+++ b/drivers/media/dvb/dvb-usb/af9015.c
@@ -1967,25 +1967,7 @@ static struct usb_driver af9015_usb_driver = {
.id_table = af9015_usb_table,
};
-/* module stuff */
-static int __init af9015_usb_module_init(void)
-{
- int ret;
- ret = usb_register(&af9015_usb_driver);
- if (ret)
- err("module init failed:%d", ret);
-
- return ret;
-}
-
-static void __exit af9015_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&af9015_usb_driver);
-}
-
-module_init(af9015_usb_module_init);
-module_exit(af9015_usb_module_exit);
+module_usb_driver(af9015_usb_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Afatech AF9015 DVB-T");
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index fdee856..cf0c318 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -1373,26 +1373,7 @@ static struct usb_driver anysee_driver = {
.id_table = anysee_table,
};
-/* module stuff */
-static int __init anysee_module_init(void)
-{
- int ret;
-
- ret = usb_register(&anysee_driver);
- if (ret)
- err("%s: usb_register failed. Error number %d", __func__, ret);
-
- return ret;
-}
-
-static void __exit anysee_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&anysee_driver);
-}
-
-module_init(anysee_module_init);
-module_exit(anysee_module_exit);
+module_usb_driver(anysee_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver Anysee E30 DVB-C & DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/au6610.c b/drivers/media/dvb/dvb-usb/au6610.c
index b779949..16210c0 100644
--- a/drivers/media/dvb/dvb-usb/au6610.c
+++ b/drivers/media/dvb/dvb-usb/au6610.c
@@ -244,26 +244,7 @@ static struct usb_driver au6610_driver = {
.id_table = au6610_table,
};
-/* module stuff */
-static int __init au6610_module_init(void)
-{
- int ret;
-
- ret = usb_register(&au6610_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit au6610_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&au6610_driver);
-}
-
-module_init(au6610_module_init);
-module_exit(au6610_module_exit);
+module_usb_driver(au6610_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Alcor Micro AU6610 DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/az6027.c b/drivers/media/dvb/dvb-usb/az6027.c
index bf67b4d..5e45ae6 100644
--- a/drivers/media/dvb/dvb-usb/az6027.c
+++ b/drivers/media/dvb/dvb-usb/az6027.c
@@ -1174,28 +1174,7 @@ static struct usb_driver az6027_usb_driver = {
.id_table = az6027_usb_table,
};
-/* module stuff */
-static int __init az6027_usb_module_init(void)
-{
- int result;
-
- result = usb_register(&az6027_usb_driver);
- if (result) {
- err("usb_register failed. (%d)", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit az6027_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&az6027_usb_driver);
-}
-
-module_init(az6027_usb_module_init);
-module_exit(az6027_usb_module_exit);
+module_usb_driver(az6027_usb_driver);
MODULE_AUTHOR("Adams Xu <Adams.xu@azwave.com.cn>");
MODULE_DESCRIPTION("Driver for AZUREWAVE DVB-S/S2 USB2.0 (AZ6027)");
diff --git a/drivers/media/dvb/dvb-usb/ce6230.c b/drivers/media/dvb/dvb-usb/ce6230.c
index 57afb5a..fa63725 100644
--- a/drivers/media/dvb/dvb-usb/ce6230.c
+++ b/drivers/media/dvb/dvb-usb/ce6230.c
@@ -317,27 +317,7 @@ static struct usb_driver ce6230_driver = {
.id_table = ce6230_table,
};
-/* module stuff */
-static int __init ce6230_module_init(void)
-{
- int ret;
- deb_info("%s:\n", __func__);
- ret = usb_register(&ce6230_driver);
- if (ret)
- err("usb_register failed with error:%d", ret);
-
- return ret;
-}
-
-static void __exit ce6230_module_exit(void)
-{
- deb_info("%s:\n", __func__);
- /* deregister this driver from the USB subsystem */
- usb_deregister(&ce6230_driver);
-}
-
-module_init(ce6230_module_init);
-module_exit(ce6230_module_exit);
+module_usb_driver(ce6230_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("Driver for Intel CE6230 DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/cinergyT2-core.c b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
index f9d9050..0a98548 100644
--- a/drivers/media/dvb/dvb-usb/cinergyT2-core.c
+++ b/drivers/media/dvb/dvb-usb/cinergyT2-core.c
@@ -247,25 +247,7 @@ static struct usb_driver cinergyt2_driver = {
.id_table = cinergyt2_usb_table
};
-static int __init cinergyt2_usb_init(void)
-{
- int err;
-
- err = usb_register(&cinergyt2_driver);
- if (err) {
- err("usb_register() failed! (err %i)\n", err);
- return err;
- }
- return 0;
-}
-
-static void __exit cinergyt2_usb_exit(void)
-{
- usb_deregister(&cinergyt2_driver);
-}
-
-module_init(cinergyt2_usb_init);
-module_exit(cinergyt2_usb_exit);
+module_usb_driver(cinergyt2_driver);
MODULE_DESCRIPTION("Terratec Cinergy T2 DVB-T driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
index e8e5a74..3940bb0 100644
--- a/drivers/media/dvb/dvb-usb/cxusb.c
+++ b/drivers/media/dvb/dvb-usb/cxusb.c
@@ -2033,26 +2033,7 @@ static struct usb_driver cxusb_driver = {
.id_table = cxusb_table,
};
-/* module stuff */
-static int __init cxusb_module_init(void)
-{
- int result;
- if ((result = usb_register(&cxusb_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit cxusb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&cxusb_driver);
-}
-
-module_init (cxusb_module_init);
-module_exit (cxusb_module_exit);
+module_usb_driver(cxusb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
index 2d259ca..070e82a 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
@@ -832,27 +832,7 @@ static struct usb_driver dib0700_driver = {
.id_table = dib0700_usb_id_table,
};
-/* module stuff */
-static int __init dib0700_module_init(void)
-{
- int result;
- info("loaded with support for %d different device-types", dib0700_device_count);
- if ((result = usb_register(&dib0700_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dib0700_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dib0700_driver);
-}
-
-module_init (dib0700_module_init);
-module_exit (dib0700_module_exit);
+module_usb_driver(dib0700_driver);
MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mb.c b/drivers/media/dvb/dvb-usb/dibusb-mb.c
index 7270791..a4ac37e 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mb.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mb.c
@@ -463,26 +463,7 @@ static struct usb_driver dibusb_driver = {
.id_table = dibusb_dib3000mb_table,
};
-/* module stuff */
-static int __init dibusb_module_init(void)
-{
- int result;
- if ((result = usb_register(&dibusb_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dibusb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dibusb_driver);
-}
-
-module_init (dibusb_module_init);
-module_exit (dibusb_module_exit);
+module_usb_driver(dibusb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for DiBcom USB DVB-T devices (DiB3000M-B based)");
diff --git a/drivers/media/dvb/dvb-usb/dibusb-mc.c b/drivers/media/dvb/dvb-usb/dibusb-mc.c
index 9c165e2..9d1a59d0 100644
--- a/drivers/media/dvb/dvb-usb/dibusb-mc.c
+++ b/drivers/media/dvb/dvb-usb/dibusb-mc.c
@@ -141,26 +141,7 @@ static struct usb_driver dibusb_mc_driver = {
.id_table = dibusb_dib3000mc_table,
};
-/* module stuff */
-static int __init dibusb_mc_module_init(void)
-{
- int result;
- if ((result = usb_register(&dibusb_mc_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dibusb_mc_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dibusb_mc_driver);
-}
-
-module_init (dibusb_mc_module_init);
-module_exit (dibusb_mc_module_exit);
+module_usb_driver(dibusb_mc_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for DiBcom USB2.0 DVB-T (DiB3000M-C/P based) devices");
diff --git a/drivers/media/dvb/dvb-usb/digitv.c b/drivers/media/dvb/dvb-usb/digitv.c
index 00a446d..ff34419 100644
--- a/drivers/media/dvb/dvb-usb/digitv.c
+++ b/drivers/media/dvb/dvb-usb/digitv.c
@@ -346,26 +346,7 @@ static struct usb_driver digitv_driver = {
.id_table = digitv_table,
};
-/* module stuff */
-static int __init digitv_module_init(void)
-{
- int result;
- if ((result = usb_register(&digitv_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit digitv_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&digitv_driver);
-}
-
-module_init (digitv_module_init);
-module_exit (digitv_module_exit);
+module_usb_driver(digitv_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Nebula Electronics uDigiTV DVB-T USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/dtt200u.c b/drivers/media/dvb/dvb-usb/dtt200u.c
index 106dfd5..66f205c 100644
--- a/drivers/media/dvb/dvb-usb/dtt200u.c
+++ b/drivers/media/dvb/dvb-usb/dtt200u.c
@@ -360,26 +360,7 @@ static struct usb_driver dtt200u_usb_driver = {
.id_table = dtt200u_usb_table,
};
-/* module stuff */
-static int __init dtt200u_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&dtt200u_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit dtt200u_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dtt200u_usb_driver);
-}
-
-module_init(dtt200u_usb_module_init);
-module_exit(dtt200u_usb_module_exit);
+module_usb_driver(dtt200u_usb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for the WideView/Yakumo/Hama/Typhoon/Club3D/Miglia DVB-T USB2.0 devices");
diff --git a/drivers/media/dvb/dvb-usb/dtv5100.c b/drivers/media/dvb/dvb-usb/dtv5100.c
index 7373132..3d11df4 100644
--- a/drivers/media/dvb/dvb-usb/dtv5100.c
+++ b/drivers/media/dvb/dvb-usb/dtv5100.c
@@ -217,26 +217,7 @@ static struct usb_driver dtv5100_driver = {
.id_table = dtv5100_table,
};
-/* module stuff */
-static int __init dtv5100_module_init(void)
-{
- int ret;
-
- ret = usb_register(&dtv5100_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit dtv5100_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&dtv5100_driver);
-}
-
-module_init(dtv5100_module_init);
-module_exit(dtv5100_module_exit);
+module_usb_driver(dtv5100_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/dvb/dvb-usb/dw2102.c b/drivers/media/dvb/dvb-usb/dw2102.c
index 9191f0a..451c5a7 100644
--- a/drivers/media/dvb/dvb-usb/dw2102.c
+++ b/drivers/media/dvb/dvb-usb/dw2102.c
@@ -1943,22 +1943,7 @@ static struct usb_driver dw2102_driver = {
.id_table = dw2102_table,
};
-static int __init dw2102_module_init(void)
-{
- int ret = usb_register(&dw2102_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit dw2102_module_exit(void)
-{
- usb_deregister(&dw2102_driver);
-}
-
-module_init(dw2102_module_init);
-module_exit(dw2102_module_exit);
+module_usb_driver(dw2102_driver);
MODULE_AUTHOR("Igor M. Liplianin (c) liplianin@me.by");
MODULE_DESCRIPTION("Driver for DVBWorld DVB-S 2101, 2102, DVB-S2 2104,"
diff --git a/drivers/media/dvb/dvb-usb/ec168.c b/drivers/media/dvb/dvb-usb/ec168.c
index 78442fe..b4989ba 100644
--- a/drivers/media/dvb/dvb-usb/ec168.c
+++ b/drivers/media/dvb/dvb-usb/ec168.c
@@ -428,27 +428,7 @@ static struct usb_driver ec168_driver = {
.id_table = ec168_id,
};
-/* module stuff */
-static int __init ec168_module_init(void)
-{
- int ret;
- deb_info("%s:\n", __func__);
- ret = usb_register(&ec168_driver);
- if (ret)
- err("module init failed:%d", ret);
-
- return ret;
-}
-
-static void __exit ec168_module_exit(void)
-{
- deb_info("%s:\n", __func__);
- /* deregister this driver from the USB subsystem */
- usb_deregister(&ec168_driver);
-}
-
-module_init(ec168_module_init);
-module_exit(ec168_module_exit);
+module_usb_driver(ec168_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("E3C EC168 DVB-T USB2.0 driver");
diff --git a/drivers/media/dvb/dvb-usb/friio.c b/drivers/media/dvb/dvb-usb/friio.c
index b092dc2..474a17e 100644
--- a/drivers/media/dvb/dvb-usb/friio.c
+++ b/drivers/media/dvb/dvb-usb/friio.c
@@ -514,28 +514,7 @@ static struct usb_driver friio_driver = {
.id_table = friio_table,
};
-
-/* module stuff */
-static int __init friio_module_init(void)
-{
- int ret;
-
- ret = usb_register(&friio_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-
-static void __exit friio_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&friio_driver);
-}
-
-module_init(friio_module_init);
-module_exit(friio_module_exit);
+module_usb_driver(friio_driver);
MODULE_AUTHOR("Akihiro Tsukada <tskd2@yahoo.co.jp>");
MODULE_DESCRIPTION("Driver for Friio ISDB-T USB2.0 Receiver");
diff --git a/drivers/media/dvb/dvb-usb/gl861.c b/drivers/media/dvb/dvb-usb/gl861.c
index 63681df..c1f5582 100644
--- a/drivers/media/dvb/dvb-usb/gl861.c
+++ b/drivers/media/dvb/dvb-usb/gl861.c
@@ -209,26 +209,7 @@ static struct usb_driver gl861_driver = {
.id_table = gl861_table,
};
-/* module stuff */
-static int __init gl861_module_init(void)
-{
- int ret;
-
- ret = usb_register(&gl861_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit gl861_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&gl861_driver);
-}
-
-module_init(gl861_module_init);
-module_exit(gl861_module_exit);
+module_usb_driver(gl861_driver);
MODULE_AUTHOR("Carl Lundqvist <comabug@gmail.com>");
MODULE_DESCRIPTION("Driver MSI Mega Sky 580 DVB-T USB2.0 / GL861");
diff --git a/drivers/media/dvb/dvb-usb/gp8psk.c b/drivers/media/dvb/dvb-usb/gp8psk.c
index 5f71284..5d0384d 100644
--- a/drivers/media/dvb/dvb-usb/gp8psk.c
+++ b/drivers/media/dvb/dvb-usb/gp8psk.c
@@ -320,26 +320,7 @@ static struct usb_driver gp8psk_usb_driver = {
.id_table = gp8psk_usb_table,
};
-/* module stuff */
-static int __init gp8psk_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&gp8psk_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit gp8psk_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&gp8psk_usb_driver);
-}
-
-module_init(gp8psk_usb_module_init);
-module_exit(gp8psk_usb_module_exit);
+module_usb_driver(gp8psk_usb_driver);
MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>");
MODULE_DESCRIPTION("Driver for Genpix DVB-S");
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index 654aa7c..9f01cd7 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -819,26 +819,7 @@ static struct usb_driver it913x_driver = {
.id_table = it913x_table,
};
-/* module stuff */
-static int __init it913x_module_init(void)
-{
- int result = usb_register(&it913x_driver);
- if (result) {
- err("usb_register failed. Error number %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit it913x_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&it913x_driver);
-}
-
-module_init(it913x_module_init);
-module_exit(it913x_module_exit);
+module_usb_driver(it913x_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("it913x USB 2 Driver");
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 767c87f..b3fe05b 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -1291,26 +1291,7 @@ static struct usb_driver lme2510_driver = {
.id_table = lme2510_table,
};
-/* module stuff */
-static int __init lme2510_module_init(void)
-{
- int result = usb_register(&lme2510_driver);
- if (result) {
- err("usb_register failed. Error number %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit lme2510_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&lme2510_driver);
-}
-
-module_init(lme2510_module_init);
-module_exit(lme2510_module_exit);
+module_usb_driver(lme2510_driver);
MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/m920x.c b/drivers/media/dvb/dvb-usb/m920x.c
index a1e1287..288af29 100644
--- a/drivers/media/dvb/dvb-usb/m920x.c
+++ b/drivers/media/dvb/dvb-usb/m920x.c
@@ -1086,27 +1086,7 @@ static struct usb_driver m920x_driver = {
.id_table = m920x_table,
};
-/* module stuff */
-static int __init m920x_module_init(void)
-{
- int ret;
-
- if ((ret = usb_register(&m920x_driver))) {
- err("usb_register failed. Error number %d", ret);
- return ret;
- }
-
- return 0;
-}
-
-static void __exit m920x_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&m920x_driver);
-}
-
-module_init (m920x_module_init);
-module_exit (m920x_module_exit);
+module_usb_driver(m920x_driver);
MODULE_AUTHOR("Aapo Tahkola <aet@rasterburn.org>");
MODULE_DESCRIPTION("DVB Driver for ULI M920x");
diff --git a/drivers/media/dvb/dvb-usb/mxl111sf.c b/drivers/media/dvb/dvb-usb/mxl111sf.c
index 34e6f8d..38ef025 100644
--- a/drivers/media/dvb/dvb-usb/mxl111sf.c
+++ b/drivers/media/dvb/dvb-usb/mxl111sf.c
@@ -1047,24 +1047,7 @@ static struct usb_driver mxl111sf_driver = {
.id_table = mxl111sf_table,
};
-static int __init mxl111sf_module_init(void)
-{
- int result = usb_register(&mxl111sf_driver);
- if (result) {
- err("usb_register failed. Error number %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit mxl111sf_module_exit(void)
-{
- usb_deregister(&mxl111sf_driver);
-}
-
-module_init(mxl111sf_module_init);
-module_exit(mxl111sf_module_exit);
+module_usb_driver(mxl111sf_driver);
MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
diff --git a/drivers/media/dvb/dvb-usb/nova-t-usb2.c b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
index 21384da..6c55384 100644
--- a/drivers/media/dvb/dvb-usb/nova-t-usb2.c
+++ b/drivers/media/dvb/dvb-usb/nova-t-usb2.c
@@ -225,26 +225,7 @@ static struct usb_driver nova_t_driver = {
.id_table = nova_t_table,
};
-/* module stuff */
-static int __init nova_t_module_init(void)
-{
- int result;
- if ((result = usb_register(&nova_t_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit nova_t_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&nova_t_driver);
-}
-
-module_init (nova_t_module_init);
-module_exit (nova_t_module_exit);
+module_usb_driver(nova_t_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Hauppauge WinTV-NOVA-T usb2");
diff --git a/drivers/media/dvb/dvb-usb/opera1.c b/drivers/media/dvb/dvb-usb/opera1.c
index 98fd9a6..c8a9504 100644
--- a/drivers/media/dvb/dvb-usb/opera1.c
+++ b/drivers/media/dvb/dvb-usb/opera1.c
@@ -574,22 +574,7 @@ static struct usb_driver opera1_driver = {
.id_table = opera1_table,
};
-static int __init opera1_module_init(void)
-{
- int result = 0;
- if ((result = usb_register(&opera1_driver))) {
- err("usb_register failed. Error number %d", result);
- }
- return result;
-}
-
-static void __exit opera1_module_exit(void)
-{
- usb_deregister(&opera1_driver);
-}
-
-module_init(opera1_module_init);
-module_exit(opera1_module_exit);
+module_usb_driver(opera1_driver);
MODULE_AUTHOR("Mario Hlawitschka (c) dh1pa@amsat.org");
MODULE_AUTHOR("Marco Gittler (c) g.marco@freenet.de");
diff --git a/drivers/media/dvb/dvb-usb/pctv452e.c b/drivers/media/dvb/dvb-usb/pctv452e.c
index f9aec5c..f526eb0 100644
--- a/drivers/media/dvb/dvb-usb/pctv452e.c
+++ b/drivers/media/dvb/dvb-usb/pctv452e.c
@@ -1055,22 +1055,7 @@ static struct usb_driver pctv452e_usb_driver = {
.id_table = pctv452e_usb_table,
};
-static int __init pctv452e_usb_init(void)
-{
- int ret = usb_register(&pctv452e_usb_driver);
- if (ret)
- err("%s: usb_register failed! Error %d", __FILE__, ret);
-
- return ret;
-}
-
-static void __exit pctv452e_usb_exit(void)
-{
- usb_deregister(&pctv452e_usb_driver);
-}
-
-module_init(pctv452e_usb_init);
-module_exit(pctv452e_usb_exit);
+module_usb_driver(pctv452e_usb_driver);
MODULE_AUTHOR("Dominik Kuhlen <dkuhlen@gmx.net>");
MODULE_AUTHOR("Andre Weidemann <Andre.Weidemann@web.de>");
diff --git a/drivers/media/dvb/dvb-usb/technisat-usb2.c b/drivers/media/dvb/dvb-usb/technisat-usb2.c
index 0998fe9..acefaa8 100644
--- a/drivers/media/dvb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/dvb/dvb-usb/technisat-usb2.c
@@ -781,25 +781,7 @@ static struct usb_driver technisat_usb2_driver = {
.id_table = technisat_usb2_id_table,
};
-/* module stuff */
-static int __init technisat_usb2_module_init(void)
-{
- int result = usb_register(&technisat_usb2_driver);
- if (result) {
- err("usb_register failed. Code %d", result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit technisat_usb2_module_exit(void)
-{
- usb_deregister(&technisat_usb2_driver);
-}
-
-module_init(technisat_usb2_module_init);
-module_exit(technisat_usb2_module_exit);
+module_usb_driver(technisat_usb2_driver);
MODULE_AUTHOR("Patrick Boettcher <pboettcher@kernellabs.com>");
MODULE_DESCRIPTION("Driver for Technisat DVB-S/S2 USB 2.0 device");
diff --git a/drivers/media/dvb/dvb-usb/ttusb2.c b/drivers/media/dvb/dvb-usb/ttusb2.c
index bc50356..e53a106 100644
--- a/drivers/media/dvb/dvb-usb/ttusb2.c
+++ b/drivers/media/dvb/dvb-usb/ttusb2.c
@@ -812,26 +812,7 @@ static struct usb_driver ttusb2_driver = {
.id_table = ttusb2_table,
};
-/* module stuff */
-static int __init ttusb2_module_init(void)
-{
- int result;
- if ((result = usb_register(&ttusb2_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit ttusb2_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&ttusb2_driver);
-}
-
-module_init (ttusb2_module_init);
-module_exit (ttusb2_module_exit);
+module_usb_driver(ttusb2_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Pinnacle PCTV 400e DVB-S USB2.0");
diff --git a/drivers/media/dvb/dvb-usb/umt-010.c b/drivers/media/dvb/dvb-usb/umt-010.c
index 463673a..9b04229 100644
--- a/drivers/media/dvb/dvb-usb/umt-010.c
+++ b/drivers/media/dvb/dvb-usb/umt-010.c
@@ -143,26 +143,7 @@ static struct usb_driver umt_driver = {
.id_table = umt_table,
};
-/* module stuff */
-static int __init umt_module_init(void)
-{
- int result;
- if ((result = usb_register(&umt_driver))) {
- err("usb_register failed. Error number %d",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit umt_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&umt_driver);
-}
-
-module_init (umt_module_init);
-module_exit (umt_module_exit);
+module_usb_driver(umt_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for HanfTek UMT 010 USB2.0 DVB-T device");
diff --git a/drivers/media/dvb/dvb-usb/vp702x.c b/drivers/media/dvb/dvb-usb/vp702x.c
index 45e31f2..07c673a 100644
--- a/drivers/media/dvb/dvb-usb/vp702x.c
+++ b/drivers/media/dvb/dvb-usb/vp702x.c
@@ -436,26 +436,7 @@ static struct usb_driver vp702x_usb_driver = {
.id_table = vp702x_usb_table,
};
-/* module stuff */
-static int __init vp702x_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&vp702x_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit vp702x_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&vp702x_usb_driver);
-}
-
-module_init(vp702x_usb_module_init);
-module_exit(vp702x_usb_module_exit);
+module_usb_driver(vp702x_usb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Twinhan StarBox DVB-S USB2.0 and clones");
diff --git a/drivers/media/dvb/dvb-usb/vp7045.c b/drivers/media/dvb/dvb-usb/vp7045.c
index 90873af..d750724 100644
--- a/drivers/media/dvb/dvb-usb/vp7045.c
+++ b/drivers/media/dvb/dvb-usb/vp7045.c
@@ -294,26 +294,7 @@ static struct usb_driver vp7045_usb_driver = {
.id_table = vp7045_usb_table,
};
-/* module stuff */
-static int __init vp7045_usb_module_init(void)
-{
- int result;
- if ((result = usb_register(&vp7045_usb_driver))) {
- err("usb_register failed. (%d)",result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit vp7045_usb_module_exit(void)
-{
- /* deregister this driver from the USB subsystem */
- usb_deregister(&vp7045_usb_driver);
-}
-
-module_init(vp7045_usb_module_init);
-module_exit(vp7045_usb_module_exit);
+module_usb_driver(vp7045_usb_driver);
MODULE_AUTHOR("Patrick Boettcher <patrick.boettcher@desy.de>");
MODULE_DESCRIPTION("Driver for Twinhan MagicBox/Alpha and DNTV tinyUSB2 DVB-T USB2.0");
diff --git a/drivers/media/dvb/siano/smsusb.c b/drivers/media/dvb/siano/smsusb.c
index 51c7121..b1fe513 100644
--- a/drivers/media/dvb/siano/smsusb.c
+++ b/drivers/media/dvb/siano/smsusb.c
@@ -557,26 +557,7 @@ static struct usb_driver smsusb_driver = {
.resume = smsusb_resume,
};
-static int __init smsusb_module_init(void)
-{
- int rc = usb_register(&smsusb_driver);
- if (rc)
- sms_err("usb_register failed. Error number %d", rc);
-
- sms_debug("");
-
- return rc;
-}
-
-static void __exit smsusb_module_exit(void)
-{
- /* Regular USB Cleanup */
- usb_deregister(&smsusb_driver);
- sms_info("end");
-}
-
-module_init(smsusb_module_init);
-module_exit(smsusb_module_exit);
+module_usb_driver(smsusb_driver);
MODULE_DESCRIPTION("Driver for the Siano SMS1xxx USB dongle");
MODULE_AUTHOR("Siano Mobile Silicon, INC. (uris@siano-ms.com)");
diff --git a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
index 8aa0114..5b682cc 100644
--- a/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
+++ b/drivers/media/dvb/ttusb-budget/dvb-ttusb-budget.c
@@ -1808,26 +1808,7 @@ static struct usb_driver ttusb_driver = {
.id_table = ttusb_table,
};
-static int __init ttusb_init(void)
-{
- int err;
-
- if ((err = usb_register(&ttusb_driver)) < 0) {
- printk("%s: usb_register failed! Error number %d",
- __FILE__, err);
- return err;
- }
-
- return 0;
-}
-
-static void __exit ttusb_exit(void)
-{
- usb_deregister(&ttusb_driver);
-}
-
-module_init(ttusb_init);
-module_exit(ttusb_exit);
+module_usb_driver(ttusb_driver);
MODULE_AUTHOR("Holger Waechtler <holger@convergence.de>");
MODULE_DESCRIPTION("TTUSB DVB Driver");
diff --git a/drivers/media/dvb/ttusb-dec/ttusb_dec.c b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
index f893bff..504c812 100644
--- a/drivers/media/dvb/ttusb-dec/ttusb_dec.c
+++ b/drivers/media/dvb/ttusb-dec/ttusb_dec.c
@@ -1756,26 +1756,7 @@ static struct usb_driver ttusb_dec_driver = {
.id_table = ttusb_dec_table,
};
-static int __init ttusb_dec_init(void)
-{
- int result;
-
- if ((result = usb_register(&ttusb_dec_driver)) < 0) {
- printk("%s: initialisation failed: error %d.\n", __func__,
- result);
- return result;
- }
-
- return 0;
-}
-
-static void __exit ttusb_dec_exit(void)
-{
- usb_deregister(&ttusb_dec_driver);
-}
-
-module_init(ttusb_dec_init);
-module_exit(ttusb_dec_exit);
+module_usb_driver(ttusb_dec_driver);
MODULE_AUTHOR("Alex Woods <linux-dvb@giblets.org>");
MODULE_DESCRIPTION(DRIVER_NAME);
diff --git a/drivers/media/radio/dsbr100.c b/drivers/media/radio/dsbr100.c
index 25e58cb..f36905b 100644
--- a/drivers/media/radio/dsbr100.c
+++ b/drivers/media/radio/dsbr100.c
@@ -624,21 +624,7 @@ static int usb_dsbr100_probe(struct usb_interface *intf,
return 0;
}
-static int __init dsbr100_init(void)
-{
- int retval = usb_register(&usb_dsbr100_driver);
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return retval;
-}
-
-static void __exit dsbr100_exit(void)
-{
- usb_deregister(&usb_dsbr100_driver);
-}
-
-module_init (dsbr100_init);
-module_exit (dsbr100_exit);
+module_usb_driver(usb_dsbr100_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/media/radio/radio-gemtek.c b/drivers/media/radio/radio-gemtek.c
index edadc84..36ce061 100644
--- a/drivers/media/radio/radio-gemtek.c
+++ b/drivers/media/radio/radio-gemtek.c
@@ -47,11 +47,11 @@ MODULE_VERSION("0.0.4");
#endif
static int io = CONFIG_RADIO_GEMTEK_PORT;
-static int probe = CONFIG_RADIO_GEMTEK_PROBE;
-static int hardmute;
-static int shutdown = 1;
-static int keepmuted = 1;
-static int initmute = 1;
+static bool probe = CONFIG_RADIO_GEMTEK_PROBE;
+static bool hardmute;
+static bool shutdown = 1;
+static bool keepmuted = 1;
+static bool initmute = 1;
static int radio_nr = -1;
module_param(io, int, 0444);
diff --git a/drivers/media/radio/radio-miropcm20.c b/drivers/media/radio/radio-miropcm20.c
index 3fb76e3..87c1ee1 100644
--- a/drivers/media/radio/radio-miropcm20.c
+++ b/drivers/media/radio/radio-miropcm20.c
@@ -23,7 +23,7 @@ static int radio_nr = -1;
module_param(radio_nr, int, 0);
MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)");
-static int mono;
+static bool mono;
module_param(mono, bool, 0);
MODULE_PARM_DESC(mono, "Force tuner into mono mode.");
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index 1742bd8..a860a72 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -659,25 +659,4 @@ err:
return retval;
}
-static int __init amradio_init(void)
-{
- int retval = usb_register(&usb_amradio_driver);
-
- pr_info(KBUILD_MODNAME
- ": version " DRIVER_VERSION " " DRIVER_DESC "\n");
-
- if (retval)
- pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", retval);
-
- return retval;
-}
-
-static void __exit amradio_exit(void)
-{
- usb_deregister(&usb_amradio_driver);
-}
-
-module_init(amradio_init);
-module_exit(amradio_exit);
-
+module_usb_driver(usb_amradio_driver);
diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
index a6ad707..b7debb6 100644
--- a/drivers/media/radio/si470x/radio-si470x-usb.c
+++ b/drivers/media/radio/si470x/radio-si470x-usb.c
@@ -861,33 +861,7 @@ static struct usb_driver si470x_usb_driver = {
.supports_autosuspend = 1,
};
-
-
-/**************************************************************************
- * Module Interface
- **************************************************************************/
-
-/*
- * si470x_module_init - module init
- */
-static int __init si470x_module_init(void)
-{
- printk(KERN_INFO DRIVER_DESC ", Version " DRIVER_VERSION "\n");
- return usb_register(&si470x_usb_driver);
-}
-
-
-/*
- * si470x_module_exit - module exit
- */
-static void __exit si470x_module_exit(void)
-{
- usb_deregister(&si470x_usb_driver);
-}
-
-
-module_init(si470x_module_init);
-module_exit(si470x_module_exit);
+module_usb_driver(si470x_usb_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/ati_remote.c b/drivers/media/rc/ati_remote.c
index 01bb8da..baf907b 100644
--- a/drivers/media/rc/ati_remote.c
+++ b/drivers/media/rc/ati_remote.c
@@ -899,38 +899,7 @@ static void ati_remote_disconnect(struct usb_interface *interface)
kfree(ati_remote);
}
-/*
- * ati_remote_init
- */
-static int __init ati_remote_init(void)
-{
- int result;
-
- result = usb_register(&ati_remote_driver);
- if (result)
- printk(KERN_ERR KBUILD_MODNAME
- ": usb_register error #%d\n", result);
- else
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
- return result;
-}
-
-/*
- * ati_remote_exit
- */
-static void __exit ati_remote_exit(void)
-{
- usb_deregister(&ati_remote_driver);
-}
-
-/*
- * module specification
- */
-
-module_init(ati_remote_init);
-module_exit(ati_remote_exit);
+module_usb_driver(ati_remote_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/rc/ene_ir.c b/drivers/media/rc/ene_ir.c
index cf10ecf..860c112 100644
--- a/drivers/media/rc/ene_ir.c
+++ b/drivers/media/rc/ene_ir.c
@@ -324,7 +324,7 @@ static int ene_rx_get_sample_reg(struct ene_device *dev)
return dev->extra_buf2_address + r_pointer;
}
- dbg("attempt to read beyong ring bufer end");
+ dbg("attempt to read beyond ring buffer end");
return 0;
}
diff --git a/drivers/media/rc/ene_ir.h b/drivers/media/rc/ene_ir.h
index fd108d9..6f978e8 100644
--- a/drivers/media/rc/ene_ir.h
+++ b/drivers/media/rc/ene_ir.h
@@ -227,7 +227,7 @@ struct ene_device {
/* TX buffer */
unsigned *tx_buffer; /* input samples buffer*/
- int tx_pos; /* position in that bufer */
+ int tx_pos; /* position in that buffer */
int tx_len; /* current len of tx buffer */
int tx_done; /* done transmitting */
/* one more sample pending*/
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 3f175eb..7f26fdf 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -2476,23 +2476,4 @@ static int imon_resume(struct usb_interface *intf)
return rc;
}
-static int __init imon_init(void)
-{
- int rc;
-
- rc = usb_register(&imon_driver);
- if (rc) {
- pr_err("usb register failed(%d)\n", rc);
- rc = -ENODEV;
- }
-
- return rc;
-}
-
-static void __exit imon_exit(void)
-{
- usb_deregister(&imon_driver);
-}
-
-module_init(imon_init);
-module_exit(imon_exit);
+module_usb_driver(imon_driver);
diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
index 27997a9..ca12d32 100644
--- a/drivers/media/rc/lirc_dev.c
+++ b/drivers/media/rc/lirc_dev.c
@@ -38,7 +38,7 @@
#include <media/lirc.h>
#include <media/lirc_dev.h>
-static int debug;
+static bool debug;
#define IRCTL_DEV_NAME "BaseRemoteCtl"
#define NOPLUG -1
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 60d3c1e..21105bf 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -156,9 +156,9 @@
/* module parameters */
#ifdef CONFIG_USB_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
#define mce_dbg(dev, fmt, ...) \
@@ -1448,25 +1448,7 @@ static struct usb_driver mceusb_dev_driver = {
.id_table = mceusb_dev_table
};
-static int __init mceusb_dev_init(void)
-{
- int ret;
-
- ret = usb_register(&mceusb_dev_driver);
- if (ret < 0)
- printk(KERN_ERR DRIVER_NAME
- ": usb register failed, result = %d\n", ret);
-
- return ret;
-}
-
-static void __exit mceusb_dev_exit(void)
-{
- usb_deregister(&mceusb_dev_driver);
-}
-
-module_init(mceusb_dev_init);
-module_exit(mceusb_dev_exit);
+module_usb_driver(mceusb_dev_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 0ea55ea..f6a930b 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -715,7 +715,7 @@ static void ir_close(struct input_dev *idev)
}
/* class for /sys/class/rc */
-static char *ir_devnode(struct device *dev, mode_t *mode)
+static char *ir_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "rc/%s", dev_name(dev));
}
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 9bff23c..ad95c67 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -1284,25 +1284,7 @@ static struct usb_driver redrat3_dev_driver = {
.id_table = redrat3_dev_table
};
-static int __init redrat3_dev_init(void)
-{
- int ret;
-
- ret = usb_register(&redrat3_dev_driver);
- if (ret < 0)
- pr_err(DRIVER_NAME
- ": usb register failed, result = %d\n", ret);
-
- return ret;
-}
-
-static void __exit redrat3_dev_exit(void)
-{
- usb_deregister(&redrat3_dev_driver);
-}
-
-module_init(redrat3_dev_init);
-module_exit(redrat3_dev_exit);
+module_usb_driver(redrat3_dev_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/media/rc/streamzap.c b/drivers/media/rc/streamzap.c
index e435d94..d6f4bfe 100644
--- a/drivers/media/rc/streamzap.c
+++ b/drivers/media/rc/streamzap.c
@@ -43,9 +43,9 @@
#define DRIVER_DESC "Streamzap Remote Control driver"
#ifdef CONFIG_USB_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
#define USB_STREAMZAP_VENDOR_ID 0x0e9c
@@ -523,33 +523,7 @@ static int streamzap_resume(struct usb_interface *intf)
return 0;
}
-/**
- * streamzap_init
- */
-static int __init streamzap_init(void)
-{
- int ret;
-
- /* register this driver with the USB subsystem */
- ret = usb_register(&streamzap_driver);
- if (ret < 0)
- printk(KERN_ERR DRIVER_NAME ": usb register failed, "
- "result = %d\n", ret);
-
- return ret;
-}
-
-/**
- * streamzap_exit
- */
-static void __exit streamzap_exit(void)
-{
- usb_deregister(&streamzap_driver);
-}
-
-
-module_init(streamzap_init);
-module_exit(streamzap_exit);
+module_usb_driver(streamzap_driver);
MODULE_AUTHOR("Jarod Wilson <jarod@wilsonet.com>");
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/rc/winbond-cir.c b/drivers/media/rc/winbond-cir.c
index 13f54b5..b09c5fa 100644
--- a/drivers/media/rc/winbond-cir.c
+++ b/drivers/media/rc/winbond-cir.c
@@ -226,11 +226,11 @@ module_param(protocol, uint, 0444);
MODULE_PARM_DESC(protocol, "IR protocol to use for the power-on command "
"(0 = RC5, 1 = NEC, 2 = RC6A, default)");
-static int invert; /* default = 0 */
+static bool invert; /* default = 0 */
module_param(invert, bool, 0444);
MODULE_PARM_DESC(invert, "Invert the signal from the IR receiver");
-static int txandrx; /* default = 0 */
+static bool txandrx; /* default = 0 */
module_param(txandrx, bool, 0444);
MODULE_PARM_DESC(invert, "Allow simultaneous TX and RX");
@@ -1176,6 +1176,6 @@ wbcir_exit(void)
module_init(wbcir_init);
module_exit(wbcir_exit);
-MODULE_AUTHOR("David Hrdeman <david@hardeman.nu>");
+MODULE_AUTHOR("David Härdeman <david@hardeman.nu>");
MODULE_DESCRIPTION("Winbond SuperI/O Consumer IR Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/c-qcam.c b/drivers/media/video/c-qcam.c
index cd8ff04..fda32f5 100644
--- a/drivers/media/video/c-qcam.c
+++ b/drivers/media/video/c-qcam.c
@@ -72,7 +72,7 @@ struct qcam {
static int parport[MAX_CAMS] = { [1 ... MAX_CAMS-1] = -1 };
static int probe = 2;
-static int force_rgb;
+static bool force_rgb;
static int video_nr = -1;
/* FIXME: parport=auto would never have worked, surely? --RR */
diff --git a/drivers/media/video/cs5345.c b/drivers/media/video/cs5345.c
index 5909f25..1d64af9 100644
--- a/drivers/media/video/cs5345.c
+++ b/drivers/media/video/cs5345.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("i2c device driver for cs5345 Audio ADC");
MODULE_AUTHOR("Hans Verkuil");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
diff --git a/drivers/media/video/cs53l32a.c b/drivers/media/video/cs53l32a.c
index d93e5ab..51c5b9a 100644
--- a/drivers/media/video/cs53l32a.c
+++ b/drivers/media/video/cs53l32a.c
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("i2c device driver for cs53l32a Audio ADC");
MODULE_AUTHOR("Martin Vaughan");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index c6ff32a..349bd9c 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -75,7 +75,7 @@ static int radio[CX18_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1, -1 };
static unsigned cardtype_c = 1;
static unsigned tuner_c = 1;
-static unsigned radio_c = 1;
+static bool radio_c = 1;
static char pal[] = "--";
static char secam[] = "--";
static char ntsc[] = "-";
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 7d8edb4..875a7ce 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -1379,26 +1379,4 @@ static struct usb_driver cx231xx_usb_driver = {
.id_table = cx231xx_id_table,
};
-static int __init cx231xx_module_init(void)
-{
- int result;
-
- printk(KERN_INFO DRIVER_NAME " v4l2 driver loaded.\n");
-
- /* register this driver with the USB subsystem */
- result = usb_register(&cx231xx_usb_driver);
- if (result)
- cx231xx_err(DRIVER_NAME
- " usb_register failed. Error number %d.\n", result);
-
- return result;
-}
-
-static void __exit cx231xx_module_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&cx231xx_usb_driver);
-}
-
-module_init(cx231xx_module_init);
-module_exit(cx231xx_module_exit);
+module_usb_driver(cx231xx_usb_driver);
diff --git a/drivers/media/video/cx25821/cx25821-alsa.c b/drivers/media/video/cx25821/cx25821-alsa.c
index 6142ae2..03cfac4 100644
--- a/drivers/media/video/cx25821/cx25821-alsa.c
+++ b/drivers/media/video/cx25821/cx25821-alsa.c
@@ -102,7 +102,7 @@ struct cx25821_audio_dev {
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 };
+static bool enable[SNDRV_CARDS] = { 1, [1 ... (SNDRV_CARDS - 1)] = 1 };
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx25821 soundcard. default enabled.");
diff --git a/drivers/media/video/cx88/cx88-alsa.c b/drivers/media/video/cx88/cx88-alsa.c
index 68d1240..04bf662 100644
--- a/drivers/media/video/cx88/cx88-alsa.c
+++ b/drivers/media/video/cx88/cx88-alsa.c
@@ -96,7 +96,7 @@ typedef struct cx88_audio_dev snd_cx88_card_t;
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
static const char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
-static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable cx88x soundcard. default enabled.");
diff --git a/drivers/media/video/davinci/vpif.h b/drivers/media/video/davinci/vpif.h
index 10550bd..25036cb 100644
--- a/drivers/media/video/davinci/vpif.h
+++ b/drivers/media/video/davinci/vpif.h
@@ -20,6 +20,7 @@
#include <linux/videodev2.h>
#include <mach/hardware.h>
#include <mach/dm646x.h>
+#include <media/davinci/vpif_types.h>
/* Maximum channel allowed */
#define VPIF_NUM_CHANNELS (4)
diff --git a/drivers/media/video/davinci/vpif_capture.h b/drivers/media/video/davinci/vpif_capture.h
index 064550f..a693d4e 100644
--- a/drivers/media/video/davinci/vpif_capture.h
+++ b/drivers/media/video/davinci/vpif_capture.h
@@ -27,7 +27,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dma-contig.h>
-#include <mach/dm646x.h>
+#include <media/davinci/vpif_types.h>
#include "vpif.h"
diff --git a/drivers/media/video/davinci/vpif_display.h b/drivers/media/video/davinci/vpif_display.h
index 5d1936d..56879d1 100644
--- a/drivers/media/video/davinci/vpif_display.h
+++ b/drivers/media/video/davinci/vpif_display.h
@@ -22,6 +22,7 @@
#include <media/v4l2-device.h>
#include <media/videobuf-core.h>
#include <media/videobuf-dma-contig.h>
+#include <media/davinci/vpif_types.h>
#include "vpif.h"
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 9deb569..4561cd8 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -3405,26 +3405,4 @@ static struct usb_driver em28xx_usb_driver = {
.id_table = em28xx_id_table,
};
-static int __init em28xx_module_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&em28xx_usb_driver);
- if (result)
- em28xx_err(DRIVER_NAME
- " usb_register failed. Error number %d.\n", result);
-
- printk(KERN_INFO DRIVER_NAME " driver loaded\n");
-
- return result;
-}
-
-static void __exit em28xx_module_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&em28xx_usb_driver);
-}
-
-module_init(em28xx_module_init);
-module_exit(em28xx_module_exit);
+module_usb_driver(em28xx_usb_driver);
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index d3777c8..5539f09 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -76,8 +76,8 @@ MODULE_PARM_DESC(video_nr,
"\none and for every other camera."
"\n");
-static short force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] =
- ET61X251_FORCE_MUNMAP};
+static bool force_munmap[] = {[0 ... ET61X251_MAX_DEVICES-1] =
+ ET61X251_FORCE_MUNMAP};
module_param_array(force_munmap, bool, NULL, 0444);
MODULE_PARM_DESC(force_munmap,
"\n<0|1[,...]> Force the application to unmap previously"
@@ -2680,27 +2680,4 @@ static struct usb_driver et61x251_usb_driver = {
.disconnect = et61x251_usb_disconnect,
};
-/*****************************************************************************/
-
-static int __init et61x251_module_init(void)
-{
- int err = 0;
-
- KDBG(2, ET61X251_MODULE_NAME " v" ET61X251_MODULE_VERSION);
- KDBG(3, ET61X251_MODULE_AUTHOR);
-
- if ((err = usb_register(&et61x251_usb_driver)))
- KDBG(1, "usb_register() failed");
-
- return err;
-}
-
-
-static void __exit et61x251_module_exit(void)
-{
- usb_deregister(&et61x251_usb_driver);
-}
-
-
-module_init(et61x251_module_init);
-module_exit(et61x251_module_exit);
+module_usb_driver(et61x251_usb_driver);
diff --git a/drivers/media/video/gspca/benq.c b/drivers/media/video/gspca/benq.c
index 390291c..9769f17 100644
--- a/drivers/media/video/gspca/benq.c
+++ b/drivers/media/video/gspca/benq.c
@@ -291,15 +291,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/conex.c b/drivers/media/video/gspca/conex.c
index 4c56dbe..ea17b5d 100644
--- a/drivers/media/video/gspca/conex.c
+++ b/drivers/media/video/gspca/conex.c
@@ -1067,15 +1067,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/cpia1.c b/drivers/media/video/gspca/cpia1.c
index f9b86b2..8f33bbd 100644
--- a/drivers/media/video/gspca/cpia1.c
+++ b/drivers/media/video/gspca/cpia1.c
@@ -2132,15 +2132,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/etoms.c b/drivers/media/video/gspca/etoms.c
index 0357d6d..81a4adb 100644
--- a/drivers/media/video/gspca/etoms.c
+++ b/drivers/media/video/gspca/etoms.c
@@ -895,16 +895,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/finepix.c b/drivers/media/video/gspca/finepix.c
index ea48200..0107513 100644
--- a/drivers/media/video/gspca/finepix.c
+++ b/drivers/media/video/gspca/finepix.c
@@ -290,16 +290,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/gl860/gl860.c b/drivers/media/video/gspca/gl860/gl860.c
index d74da8e..c84e260 100644
--- a/drivers/media/video/gspca/gl860/gl860.c
+++ b/drivers/media/video/gspca/gl860/gl860.c
@@ -523,22 +523,7 @@ static struct usb_driver sd_driver = {
/*====================== Init and Exit module functions ====================*/
-static int __init sd_mod_init(void)
-{
- PDEBUG(D_PROBE, "driver startup - version %s", DRIVER_VERSION);
-
- if (usb_register(&sd_driver) < 0)
- return -1;
- return 0;
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
/*==========================================================================*/
diff --git a/drivers/media/video/gspca/jeilinj.c b/drivers/media/video/gspca/jeilinj.c
index 8e3dabe..5ab3f7e 100644
--- a/drivers/media/video/gspca/jeilinj.c
+++ b/drivers/media/video/gspca/jeilinj.c
@@ -582,16 +582,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c
index 4fe51fd..e8e8f2f 100644
--- a/drivers/media/video/gspca/kinect.c
+++ b/drivers/media/video/gspca/kinect.c
@@ -413,16 +413,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/konica.c b/drivers/media/video/gspca/konica.c
index 494b05e..f0c0d74 100644
--- a/drivers/media/video/gspca/konica.c
+++ b/drivers/media/video/gspca/konica.c
@@ -631,15 +631,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/m5602/m5602_core.c b/drivers/media/video/gspca/m5602/m5602_core.c
index 67533e5..0c44936 100644
--- a/drivers/media/video/gspca/m5602/m5602_core.c
+++ b/drivers/media/video/gspca/m5602/m5602_core.c
@@ -27,8 +27,8 @@
/* Kernel module parameters */
int force_sensor;
-static int dump_bridge;
-int dump_sensor;
+static bool dump_bridge;
+bool dump_sensor;
static const struct usb_device_id m5602_table[] = {
{USB_DEVICE(0x0402, 0x5602)},
@@ -404,19 +404,7 @@ static struct usb_driver sd_driver = {
.disconnect = m5602_disconnect
};
-/* -- module insert / remove -- */
-static int __init mod_m5602_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit mod_m5602_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(mod_m5602_init);
-module_exit(mod_m5602_exit);
+module_usb_driver(sd_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/media/video/gspca/m5602/m5602_mt9m111.h b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
index b1f0c49..8c672b5 100644
--- a/drivers/media/video/gspca/m5602/m5602_mt9m111.h
+++ b/drivers/media/video/gspca/m5602/m5602_mt9m111.h
@@ -106,7 +106,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int mt9m111_probe(struct sd *sd);
int mt9m111_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_ov7660.h b/drivers/media/video/gspca/m5602/m5602_ov7660.h
index 2efd607..2b6a13b 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov7660.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov7660.h
@@ -86,7 +86,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int ov7660_probe(struct sd *sd);
int ov7660_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_ov9650.h b/drivers/media/video/gspca/m5602/m5602_ov9650.h
index da9a129..f7aa5bf 100644
--- a/drivers/media/video/gspca/m5602/m5602_ov9650.h
+++ b/drivers/media/video/gspca/m5602/m5602_ov9650.h
@@ -135,7 +135,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int ov9650_probe(struct sd *sd);
int ov9650_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_po1030.h b/drivers/media/video/gspca/m5602/m5602_po1030.h
index 3383595..81a2bcb 100644
--- a/drivers/media/video/gspca/m5602/m5602_po1030.h
+++ b/drivers/media/video/gspca/m5602/m5602_po1030.h
@@ -147,7 +147,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int po1030_probe(struct sd *sd);
int po1030_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
index 8cc7a3f..8e0035e 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k4aa.h
@@ -65,7 +65,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int s5k4aa_probe(struct sd *sd);
int s5k4aa_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/m5602/m5602_s5k83a.h b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
index 80a63a2..7995224 100644
--- a/drivers/media/video/gspca/m5602/m5602_s5k83a.h
+++ b/drivers/media/video/gspca/m5602/m5602_s5k83a.h
@@ -41,7 +41,7 @@
/* Kernel module parameters */
extern int force_sensor;
-extern int dump_sensor;
+extern bool dump_sensor;
int s5k83a_probe(struct sd *sd);
int s5k83a_init(struct sd *sd);
diff --git a/drivers/media/video/gspca/mars.c b/drivers/media/video/gspca/mars.c
index edd3279..b023146 100644
--- a/drivers/media/video/gspca/mars.c
+++ b/drivers/media/video/gspca/mars.c
@@ -516,15 +516,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/mr97310a.c b/drivers/media/video/gspca/mr97310a.c
index 473e813..d73e5bd 100644
--- a/drivers/media/video/gspca/mr97310a.c
+++ b/drivers/media/video/gspca/mr97310a.c
@@ -1259,15 +1259,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/nw80x.c b/drivers/media/video/gspca/nw80x.c
index 09f7db8..7167cac 100644
--- a/drivers/media/video/gspca/nw80x.c
+++ b/drivers/media/video/gspca/nw80x.c
@@ -2118,18 +2118,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(webcam, int, 0644);
MODULE_PARM_DESC(webcam,
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 1075668..739e8a2 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -5055,18 +5055,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(frame_rate, int, 0644);
MODULE_PARM_DESC(frame_rate, "Frame rate (5, 10, 15, 20 or 30 fps)");
diff --git a/drivers/media/video/gspca/ov534.c b/drivers/media/video/gspca/ov534.c
index 76907ec..0475339 100644
--- a/drivers/media/video/gspca/ov534.c
+++ b/drivers/media/video/gspca/ov534.c
@@ -1533,16 +1533,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/ov534_9.c b/drivers/media/video/gspca/ov534_9.c
index b50c7a3..fbfa02a 100644
--- a/drivers/media/video/gspca/ov534_9.c
+++ b/drivers/media/video/gspca/ov534_9.c
@@ -1571,16 +1571,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac207.c b/drivers/media/video/gspca/pac207.c
index 1d95ec8..3844c49 100644
--- a/drivers/media/video/gspca/pac207.c
+++ b/drivers/media/video/gspca/pac207.c
@@ -569,15 +569,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac7302.c b/drivers/media/video/gspca/pac7302.c
index edbc04c..9db2b34 100644
--- a/drivers/media/video/gspca/pac7302.c
+++ b/drivers/media/video/gspca/pac7302.c
@@ -1221,15 +1221,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/pac7311.c b/drivers/media/video/gspca/pac7311.c
index 7509d05..1ac1111 100644
--- a/drivers/media/video/gspca/pac7311.c
+++ b/drivers/media/video/gspca/pac7311.c
@@ -868,15 +868,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/se401.c b/drivers/media/video/gspca/se401.c
index 6d98353..bb70092 100644
--- a/drivers/media/video/gspca/se401.c
+++ b/drivers/media/video/gspca/se401.c
@@ -774,15 +774,4 @@ static struct usb_driver sd_driver = {
.post_reset = sd_post_reset,
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sn9c2028.c b/drivers/media/video/gspca/sn9c2028.c
index 48aae39..478533c 100644
--- a/drivers/media/video/gspca/sn9c2028.c
+++ b/drivers/media/video/gspca/sn9c2028.c
@@ -737,16 +737,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sn9c20x.c b/drivers/media/video/gspca/sn9c20x.c
index b5fca91..9e198b4 100644
--- a/drivers/media/video/gspca/sn9c20x.c
+++ b/drivers/media/video/gspca/sn9c20x.c
@@ -2592,15 +2592,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sonixb.c b/drivers/media/video/gspca/sonixb.c
index 31f7aba..6a1148d 100644
--- a/drivers/media/video/gspca/sonixb.c
+++ b/drivers/media/video/gspca/sonixb.c
@@ -1530,15 +1530,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index c55d667..0c9e6dd 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -3110,15 +3110,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca1528.c b/drivers/media/video/gspca/spca1528.c
index 6956731..070b9c3 100644
--- a/drivers/media/video/gspca/spca1528.c
+++ b/drivers/media/video/gspca/spca1528.c
@@ -590,15 +590,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca500.c b/drivers/media/video/gspca/spca500.c
index bb82c94..1039847 100644
--- a/drivers/media/video/gspca/spca500.c
+++ b/drivers/media/video/gspca/spca500.c
@@ -1092,15 +1092,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca501.c b/drivers/media/video/gspca/spca501.c
index 7aaac72..9c16821 100644
--- a/drivers/media/video/gspca/spca501.c
+++ b/drivers/media/video/gspca/spca501.c
@@ -2188,15 +2188,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca505.c b/drivers/media/video/gspca/spca505.c
index 16722dc..1320f35 100644
--- a/drivers/media/video/gspca/spca505.c
+++ b/drivers/media/video/gspca/spca505.c
@@ -815,15 +815,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca506.c b/drivers/media/video/gspca/spca506.c
index 89fec4c..54eed87 100644
--- a/drivers/media/video/gspca/spca506.c
+++ b/drivers/media/video/gspca/spca506.c
@@ -714,21 +714,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- int ret;
- ret = usb_register(&sd_driver);
- if (ret < 0)
- return ret;
- PDEBUG(D_PROBE, "registered");
- return 0;
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
- PDEBUG(D_PROBE, "deregistered");
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca508.c b/drivers/media/video/gspca/spca508.c
index a44fe3d..df4e169 100644
--- a/drivers/media/video/gspca/spca508.c
+++ b/drivers/media/video/gspca/spca508.c
@@ -1544,15 +1544,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/spca561.c b/drivers/media/video/gspca/spca561.c
index 8a6e258..4a5f209 100644
--- a/drivers/media/video/gspca/spca561.c
+++ b/drivers/media/video/gspca/spca561.c
@@ -1106,15 +1106,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq905.c b/drivers/media/video/gspca/sq905.c
index df805f7..2fe3c29 100644
--- a/drivers/media/video/gspca/sq905.c
+++ b/drivers/media/video/gspca/sq905.c
@@ -432,16 +432,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq905c.c b/drivers/media/video/gspca/sq905c.c
index c2c0560..ae78363 100644
--- a/drivers/media/video/gspca/sq905c.c
+++ b/drivers/media/video/gspca/sq905c.c
@@ -339,16 +339,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/sq930x.c b/drivers/media/video/gspca/sq930x.c
index e4255b4..1a8ba9b 100644
--- a/drivers/media/video/gspca/sq930x.c
+++ b/drivers/media/video/gspca/sq930x.c
@@ -1197,15 +1197,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stk014.c b/drivers/media/video/gspca/stk014.c
index 42a7a28..4ae7cc8 100644
--- a/drivers/media/video/gspca/stk014.c
+++ b/drivers/media/video/gspca/stk014.c
@@ -519,15 +519,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stv0680.c b/drivers/media/video/gspca/stv0680.c
index 4dcc7e3..461ed64 100644
--- a/drivers/media/video/gspca/stv0680.c
+++ b/drivers/media/video/gspca/stv0680.c
@@ -355,15 +355,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx.c b/drivers/media/video/gspca/stv06xx/stv06xx.c
index a1c812a..91d99b4 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx.c
+++ b/drivers/media/video/gspca/stv06xx/stv06xx.c
@@ -36,8 +36,8 @@ MODULE_AUTHOR("Erik Andrén");
MODULE_DESCRIPTION("STV06XX USB Camera Driver");
MODULE_LICENSE("GPL");
-static int dump_bridge;
-static int dump_sensor;
+static bool dump_bridge;
+static bool dump_sensor;
int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data)
{
@@ -612,18 +612,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(dump_bridge, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup");
diff --git a/drivers/media/video/gspca/sunplus.c b/drivers/media/video/gspca/sunplus.c
index c890977..c80f0c0 100644
--- a/drivers/media/video/gspca/sunplus.c
+++ b/drivers/media/video/gspca/sunplus.c
@@ -1211,15 +1211,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/t613.c b/drivers/media/video/gspca/t613.c
index c22d4e8..9b9f85a8 100644
--- a/drivers/media/video/gspca/t613.c
+++ b/drivers/media/video/gspca/t613.c
@@ -1463,15 +1463,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/topro.c b/drivers/media/video/gspca/topro.c
index acb209a..444d3c5 100644
--- a/drivers/media/video/gspca/topro.c
+++ b/drivers/media/video/gspca/topro.c
@@ -4971,18 +4971,7 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(force_sensor, int, 0644);
MODULE_PARM_DESC(force_sensor,
diff --git a/drivers/media/video/gspca/tv8532.c b/drivers/media/video/gspca/tv8532.c
index 933ef2c..c8922c5 100644
--- a/drivers/media/video/gspca/tv8532.c
+++ b/drivers/media/video/gspca/tv8532.c
@@ -418,16 +418,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/vc032x.c b/drivers/media/video/gspca/vc032x.c
index 7ee2c82..208f6b2 100644
--- a/drivers/media/video/gspca/vc032x.c
+++ b/drivers/media/video/gspca/vc032x.c
@@ -4230,15 +4230,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/vicam.c b/drivers/media/video/gspca/vicam.c
index 8423ee1..911152e 100644
--- a/drivers/media/video/gspca/vicam.c
+++ b/drivers/media/video/gspca/vicam.c
@@ -369,16 +369,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/xirlink_cit.c b/drivers/media/video/gspca/xirlink_cit.c
index 3b0e194..ecada17 100644
--- a/drivers/media/video/gspca/xirlink_cit.c
+++ b/drivers/media/video/gspca/xirlink_cit.c
@@ -3323,15 +3323,4 @@ static struct usb_driver sd_driver = {
#endif
};
-/* -- module insert / remove -- */
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
diff --git a/drivers/media/video/gspca/zc3xx.c b/drivers/media/video/gspca/zc3xx.c
index f22e02f..b9e15bb 100644
--- a/drivers/media/video/gspca/zc3xx.c
+++ b/drivers/media/video/gspca/zc3xx.c
@@ -7033,18 +7033,7 @@ static struct usb_driver sd_driver = {
#endif
};
-static int __init sd_mod_init(void)
-{
- return usb_register(&sd_driver);
-}
-
-static void __exit sd_mod_exit(void)
-{
- usb_deregister(&sd_driver);
-}
-
-module_init(sd_mod_init);
-module_exit(sd_mod_exit);
+module_usb_driver(sd_driver);
module_param(force_sensor, int, 0644);
MODULE_PARM_DESC(force_sensor,
diff --git a/drivers/media/video/hdpvr/hdpvr-core.c b/drivers/media/video/hdpvr/hdpvr-core.c
index 687282d..6510110 100644
--- a/drivers/media/video/hdpvr/hdpvr-core.c
+++ b/drivers/media/video/hdpvr/hdpvr-core.c
@@ -49,7 +49,7 @@ module_param(default_audio_input, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / "
"1=RCA front / 2=S/PDIF");
-static int boost_audio;
+static bool boost_audio;
module_param(boost_audio, bool, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(boost_audio, "boost the audio signal");
@@ -464,26 +464,7 @@ static struct usb_driver hdpvr_usb_driver = {
.id_table = hdpvr_table,
};
-static int __init hdpvr_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&hdpvr_usb_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-static void __exit hdpvr_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&hdpvr_usb_driver);
-}
-
-module_init(hdpvr_init);
-module_exit(hdpvr_exit);
+module_usb_driver(hdpvr_usb_driver);
MODULE_LICENSE("GPL");
MODULE_VERSION("0.2.1");
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index 461ae44..3949b7d 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -99,7 +99,7 @@ static int i2c_clock_period[IVTV_MAX_CARDS] = { -1, -1, -1, -1, -1, -1, -1, -1,
static unsigned int cardtype_c = 1;
static unsigned int tuner_c = 1;
-static unsigned int radio_c = 1;
+static bool radio_c = 1;
static unsigned int i2c_clock_period_c = 1;
static char pal[] = "---";
static char secam[] = "--";
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 6b7c9c8..d0fbfcf 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -58,7 +58,7 @@
/* card parameters */
static int ivtvfb_card_id = -1;
static int ivtvfb_debug = 0;
-static int osd_laced;
+static bool osd_laced;
static int osd_depth;
static int osd_upper;
static int osd_left;
diff --git a/drivers/media/video/marvell-ccic/mcam-core.c b/drivers/media/video/marvell-ccic/mcam-core.c
index c1f12f9..37d20e7 100644
--- a/drivers/media/video/marvell-ccic/mcam-core.c
+++ b/drivers/media/video/marvell-ccic/mcam-core.c
@@ -51,7 +51,7 @@ static int delivered;
* sense.
*/
-static int alloc_bufs_at_read;
+static bool alloc_bufs_at_read;
module_param(alloc_bufs_at_read, bool, 0444);
MODULE_PARM_DESC(alloc_bufs_at_read,
"Non-zero value causes DMA buffers to be allocated when the "
@@ -73,11 +73,11 @@ MODULE_PARM_DESC(dma_buf_size,
"parameters require larger buffers, an attempt to reallocate "
"will be made.");
#else /* MCAM_MODE_VMALLOC */
-static const int alloc_bufs_at_read = 0;
+static const bool alloc_bufs_at_read = 0;
static const int n_dma_bufs = 3; /* Used by S/G_PARM */
#endif /* MCAM_MODE_VMALLOC */
-static int flip;
+static bool flip;
module_param(flip, bool, 0444);
MODULE_PARM_DESC(flip,
"If set, the sensor will be instructed to flip the image "
diff --git a/drivers/media/video/msp3400-driver.c b/drivers/media/video/msp3400-driver.c
index d0f5388..d7cd0f6 100644
--- a/drivers/media/video/msp3400-driver.c
+++ b/drivers/media/video/msp3400-driver.c
@@ -69,12 +69,12 @@ MODULE_LICENSE("GPL");
/* module parameters */
static int opmode = OPMODE_AUTO;
int msp_debug; /* msp_debug output */
-int msp_once; /* no continuous stereo monitoring */
-int msp_amsound; /* hard-wire AM sound at 6.5 Hz (france),
+bool msp_once; /* no continuous stereo monitoring */
+bool msp_amsound; /* hard-wire AM sound at 6.5 Hz (france),
the autoscan seems work well only with FM... */
int msp_standard = 1; /* Override auto detect of audio msp_standard,
if needed. */
-int msp_dolby;
+bool msp_dolby;
int msp_stereo_thresh = 0x190; /* a2 threshold for stereo/bilingual
(msp34xxg only) 0x00a0-0x03c0 */
diff --git a/drivers/media/video/msp3400-driver.h b/drivers/media/video/msp3400-driver.h
index 831e8db..fbe5e07 100644
--- a/drivers/media/video/msp3400-driver.h
+++ b/drivers/media/video/msp3400-driver.h
@@ -44,10 +44,10 @@
/* module parameters */
extern int msp_debug;
-extern int msp_once;
-extern int msp_amsound;
+extern bool msp_once;
+extern bool msp_amsound;
extern int msp_standard;
-extern int msp_dolby;
+extern bool msp_dolby;
extern int msp_stereo_thresh;
struct msp_state {
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 0cb461d..7452277 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
sg_dma_len(sg) = new_size;
txd = ichan->dma_chan.device->device_prep_slave_sg(
- &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
+ &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT);
if (!txd)
goto error;
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c
index 1e7c0d6..1fb7d5b 100644
--- a/drivers/media/video/omap/omap_vout.c
+++ b/drivers/media/video/omap/omap_vout.c
@@ -70,9 +70,9 @@ static u32 video1_numbuffers = 3;
static u32 video2_numbuffers = 3;
static u32 video1_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
static u32 video2_bufsize = OMAP_VOUT_MAX_BUF_SIZE;
-static u32 vid1_static_vrfb_alloc;
-static u32 vid2_static_vrfb_alloc;
-static int debug;
+static bool vid1_static_vrfb_alloc;
+static bool vid2_static_vrfb_alloc;
+static bool debug;
/* Module parameters */
module_param(video1_numbuffers, uint, S_IRUGO);
@@ -424,7 +424,7 @@ static int omapvid_setup_overlay(struct omap_vout_device *vout,
"%s enable=%d addr=%x width=%d\n height=%d color_mode=%d\n"
"rotation=%d mirror=%d posx=%d posy=%d out_width = %d \n"
"out_height=%d rotation_type=%d screen_width=%d\n",
- __func__, info.enabled, info.paddr, info.width, info.height,
+ __func__, ovl->is_enabled(ovl), info.paddr, info.width, info.height,
info.color_mode, info.rotation, info.mirror, info.pos_x,
info.pos_y, info.out_width, info.out_height, info.rotation_type,
info.screen_width);
@@ -948,12 +948,8 @@ static int omap_vout_release(struct file *file)
/* Disable all the overlay managers connected with this interface */
for (i = 0; i < ovid->num_overlays; i++) {
struct omap_overlay *ovl = ovid->overlays[i];
- if (ovl->manager && ovl->manager->device) {
- struct omap_overlay_info info;
- ovl->get_overlay_info(ovl, &info);
- info.enabled = 0;
- ovl->set_overlay_info(ovl, &info);
- }
+ if (ovl->manager && ovl->manager->device)
+ ovl->disable(ovl);
}
/* Turn off the pipeline */
ret = omapvid_apply_changes(vout);
@@ -1674,7 +1670,6 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
if (ovl->manager && ovl->manager->device) {
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
- info.enabled = 1;
info.paddr = addr;
if (ovl->set_overlay_info(ovl, &info)) {
ret = -EINVAL;
@@ -1693,6 +1688,16 @@ static int vidioc_streamon(struct file *file, void *fh, enum v4l2_buf_type i)
if (ret)
v4l2_err(&vout->vid_dev->v4l2_dev, "failed to change mode\n");
+ for (j = 0; j < ovid->num_overlays; j++) {
+ struct omap_overlay *ovl = ovid->overlays[j];
+
+ if (ovl->manager && ovl->manager->device) {
+ ret = ovl->enable(ovl);
+ if (ret)
+ goto streamon_err1;
+ }
+ }
+
ret = 0;
streamon_err1:
@@ -1722,16 +1727,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i)
for (j = 0; j < ovid->num_overlays; j++) {
struct omap_overlay *ovl = ovid->overlays[j];
- if (ovl->manager && ovl->manager->device) {
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
- info.enabled = 0;
- ret = ovl->set_overlay_info(ovl, &info);
- if (ret)
- v4l2_err(&vout->vid_dev->v4l2_dev,
- "failed to update overlay info in streamoff\n");
- }
+ if (ovl->manager && ovl->manager->device)
+ ovl->disable(ovl);
}
/* Turn of the pipeline */
diff --git a/drivers/media/video/omap/omap_vout_vrfb.c b/drivers/media/video/omap/omap_vout_vrfb.c
index ebebcac4..4be26abf6c 100644
--- a/drivers/media/video/omap/omap_vout_vrfb.c
+++ b/drivers/media/video/omap/omap_vout_vrfb.c
@@ -84,7 +84,7 @@ void omap_vout_free_vrfb_buffers(struct omap_vout_device *vout)
}
int omap_vout_setup_vrfb_bufs(struct platform_device *pdev, int vid_num,
- u32 static_vrfb_allocation)
+ bool static_vrfb_allocation)
{
int ret = 0, i, j;
struct omap_vout_device *vout;
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index faf1650..12d5f92 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -80,13 +80,6 @@
#include "isph3a.h"
#include "isphist.h"
-/*
- * this is provided as an interim solution until omap3isp doesn't need
- * any omap-specific iommu API
- */
-#define to_iommu(dev) \
- (struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))
-
static unsigned int autoidle;
module_param(autoidle, int, 0444);
MODULE_PARM_DESC(autoidle, "Enable OMAP3ISP AUTOIDLE support");
@@ -1119,8 +1112,7 @@ isp_restore_context(struct isp_device *isp, struct isp_reg *reg_list)
static void isp_save_ctx(struct isp_device *isp)
{
isp_save_context(isp, isp_reg_list);
- if (isp->iommu)
- omap_iommu_save_ctx(isp->iommu);
+ omap_iommu_save_ctx(isp->dev);
}
/*
@@ -1133,8 +1125,7 @@ static void isp_save_ctx(struct isp_device *isp)
static void isp_restore_ctx(struct isp_device *isp)
{
isp_restore_context(isp, isp_reg_list);
- if (isp->iommu)
- omap_iommu_restore_ctx(isp->iommu);
+ omap_iommu_restore_ctx(isp->dev);
omap3isp_ccdc_restore_context(isp);
omap3isp_preview_restore_context(isp);
}
@@ -1988,7 +1979,7 @@ static int isp_remove(struct platform_device *pdev)
isp_cleanup_modules(isp);
omap3isp_get(isp);
- iommu_detach_device(isp->domain, isp->iommu_dev);
+ iommu_detach_device(isp->domain, &pdev->dev);
iommu_domain_free(isp->domain);
omap3isp_put(isp);
@@ -2136,17 +2127,6 @@ static int isp_probe(struct platform_device *pdev)
}
}
- /* IOMMU */
- isp->iommu_dev = omap_find_iommu_device("isp");
- if (!isp->iommu_dev) {
- dev_err(isp->dev, "omap_find_iommu_device failed\n");
- ret = -ENODEV;
- goto error_isp;
- }
-
- /* to be removed once iommu migration is complete */
- isp->iommu = to_iommu(isp->iommu_dev);
-
isp->domain = iommu_domain_alloc(pdev->dev.bus);
if (!isp->domain) {
dev_err(isp->dev, "can't alloc iommu domain\n");
@@ -2154,7 +2134,7 @@ static int isp_probe(struct platform_device *pdev)
goto error_isp;
}
- ret = iommu_attach_device(isp->domain, isp->iommu_dev);
+ ret = iommu_attach_device(isp->domain, &pdev->dev);
if (ret) {
dev_err(&pdev->dev, "can't attach iommu device: %d\n", ret);
goto free_domain;
@@ -2193,7 +2173,7 @@ error_modules:
error_irq:
free_irq(isp->irq_num, isp);
detach_dev:
- iommu_detach_device(isp->domain, isp->iommu_dev);
+ iommu_detach_device(isp->domain, &pdev->dev);
free_domain:
iommu_domain_free(isp->domain);
error_isp:
diff --git a/drivers/media/video/omap3isp/isp.h b/drivers/media/video/omap3isp/isp.h
index 705946e..d96603e 100644
--- a/drivers/media/video/omap3isp/isp.h
+++ b/drivers/media/video/omap3isp/isp.h
@@ -212,9 +212,7 @@ struct isp_device {
unsigned int sbl_resources;
unsigned int subclk_resources;
- struct omap_iommu *iommu;
struct iommu_domain *domain;
- struct device *iommu_dev;
struct isp_platform_callback platform_cb;
};
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c
index ec62d11..eaabc27 100644
--- a/drivers/media/video/omap3isp/ispccdc.c
+++ b/drivers/media/video/omap3isp/ispccdc.c
@@ -366,7 +366,7 @@ static void ccdc_lsc_free_request(struct isp_ccdc_device *ccdc,
dma_unmap_sg(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
if (req->table)
- omap_iommu_vfree(isp->domain, isp->iommu, req->table);
+ omap_iommu_vfree(isp->domain, isp->dev, req->table);
kfree(req);
}
@@ -438,7 +438,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
req->enable = 1;
- req->table = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ req->table = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
req->config.size, IOMMU_FLAG);
if (IS_ERR_VALUE(req->table)) {
req->table = 0;
@@ -446,7 +446,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
goto done;
}
- req->iovm = omap_find_iovm_area(isp->iommu, req->table);
+ req->iovm = omap_find_iovm_area(isp->dev, req->table);
if (req->iovm == NULL) {
ret = -ENOMEM;
goto done;
@@ -462,7 +462,7 @@ static int ccdc_lsc_config(struct isp_ccdc_device *ccdc,
dma_sync_sg_for_cpu(isp->dev, req->iovm->sgt->sgl,
req->iovm->sgt->nents, DMA_TO_DEVICE);
- table = omap_da_to_va(isp->iommu, req->table);
+ table = omap_da_to_va(isp->dev, req->table);
if (copy_from_user(table, config->lsc, req->config.size)) {
ret = -EFAULT;
goto done;
@@ -734,15 +734,15 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
* already done by omap_iommu_vmalloc().
*/
size = ccdc->fpc.fpnum * 4;
- table_new = omap_iommu_vmalloc(isp->domain, isp->iommu,
+ table_new = omap_iommu_vmalloc(isp->domain, isp->dev,
0, size, IOMMU_FLAG);
if (IS_ERR_VALUE(table_new))
return -ENOMEM;
- if (copy_from_user(omap_da_to_va(isp->iommu, table_new),
+ if (copy_from_user(omap_da_to_va(isp->dev, table_new),
(__force void __user *)
ccdc->fpc.fpcaddr, size)) {
- omap_iommu_vfree(isp->domain, isp->iommu,
+ omap_iommu_vfree(isp->domain, isp->dev,
table_new);
return -EFAULT;
}
@@ -753,7 +753,7 @@ static int ccdc_config(struct isp_ccdc_device *ccdc,
ccdc_configure_fpc(ccdc);
if (table_old != 0)
- omap_iommu_vfree(isp->domain, isp->iommu, table_old);
+ omap_iommu_vfree(isp->domain, isp->dev, table_old);
}
return ccdc_lsc_config(ccdc, ccdc_struct);
@@ -2309,7 +2309,7 @@ void omap3isp_ccdc_cleanup(struct isp_device *isp)
ccdc_lsc_free_queue(ccdc, &ccdc->lsc.free_queue);
if (ccdc->fpc.fpcaddr != 0)
- omap_iommu_vfree(isp->domain, isp->iommu, ccdc->fpc.fpcaddr);
+ omap_iommu_vfree(isp->domain, isp->dev, ccdc->fpc.fpcaddr);
mutex_destroy(&ccdc->ioctl_lock);
}
diff --git a/drivers/media/video/omap3isp/ispstat.c b/drivers/media/video/omap3isp/ispstat.c
index bc0b2c7..11871ec 100644
--- a/drivers/media/video/omap3isp/ispstat.c
+++ b/drivers/media/video/omap3isp/ispstat.c
@@ -366,7 +366,7 @@ static void isp_stat_bufs_free(struct ispstat *stat)
dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl,
buf->iovm->sgt->nents,
DMA_FROM_DEVICE);
- omap_iommu_vfree(isp->domain, isp->iommu,
+ omap_iommu_vfree(isp->domain, isp->dev,
buf->iommu_addr);
} else {
if (!buf->virt_addr)
@@ -400,7 +400,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
struct iovm_struct *iovm;
WARN_ON(buf->dma_addr);
- buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->iommu, 0,
+ buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0,
size, IOMMU_FLAG);
if (IS_ERR((void *)buf->iommu_addr)) {
dev_err(stat->isp->dev,
@@ -410,7 +410,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
return -ENOMEM;
}
- iovm = omap_find_iovm_area(isp->iommu, buf->iommu_addr);
+ iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr);
if (!iovm ||
!dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents,
DMA_FROM_DEVICE)) {
@@ -419,7 +419,7 @@ static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size)
}
buf->iovm = iovm;
- buf->virt_addr = omap_da_to_va(stat->isp->iommu,
+ buf->virt_addr = omap_da_to_va(stat->isp->dev,
(u32)buf->iommu_addr);
buf->empty = 1;
dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated."
diff --git a/drivers/media/video/omap3isp/ispvideo.c b/drivers/media/video/omap3isp/ispvideo.c
index 615dae5..b020700 100644
--- a/drivers/media/video/omap3isp/ispvideo.c
+++ b/drivers/media/video/omap3isp/ispvideo.c
@@ -453,7 +453,7 @@ ispmmu_vmap(struct isp_device *isp, const struct scatterlist *sglist, int sglen)
sgt->nents = sglen;
sgt->orig_nents = sglen;
- da = omap_iommu_vmap(isp->domain, isp->iommu, 0, sgt, IOMMU_FLAG);
+ da = omap_iommu_vmap(isp->domain, isp->dev, 0, sgt, IOMMU_FLAG);
if (IS_ERR_VALUE(da))
kfree(sgt);
@@ -469,7 +469,7 @@ static void ispmmu_vunmap(struct isp_device *isp, dma_addr_t da)
{
struct sg_table *sgt;
- sgt = omap_iommu_vunmap(isp->domain, isp->iommu, (u32)da);
+ sgt = omap_iommu_vunmap(isp->domain, isp->dev, (u32)da);
kfree(sgt);
}
diff --git a/drivers/media/video/ov7670.c b/drivers/media/video/ov7670.c
index 8aa0585..6a56496 100644
--- a/drivers/media/video/ov7670.c
+++ b/drivers/media/video/ov7670.c
@@ -25,7 +25,7 @@ MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("A low-level driver for OmniVision ov7670 sensors");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index 803c9c8..c1bef61 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -2682,25 +2682,7 @@ static struct usb_driver s2255_driver = {
.id_table = s2255_table,
};
-static int __init usb_s2255_init(void)
-{
- int result;
- /* register this driver with the USB subsystem */
- result = usb_register(&s2255_driver);
- if (result)
- pr_err(KBUILD_MODNAME
- ": usb_register failed. Error number %d\n", result);
- dprintk(2, "%s\n", __func__);
- return result;
-}
-
-static void __exit usb_s2255_exit(void)
-{
- usb_deregister(&s2255_driver);
-}
-
-module_init(usb_s2255_init);
-module_exit(usb_s2255_exit);
+module_usb_driver(s2255_driver);
MODULE_DESCRIPTION("Sensoray 2255 Video for Linux driver");
MODULE_AUTHOR("Dean Anderson (Sensoray Company Inc.)");
diff --git a/drivers/media/video/saa7115.c b/drivers/media/video/saa7115.c
index 5cfdbc7..0ef5484 100644
--- a/drivers/media/video/saa7115.c
+++ b/drivers/media/video/saa7115.c
@@ -57,7 +57,7 @@ MODULE_AUTHOR( "Maxim Yevtyushkin, Kevin Thayer, Chris Kennedy, "
"Hans Verkuil, Mauro Carvalho Chehab");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 16cb07c5..c2882fa 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -75,8 +75,8 @@ MODULE_PARM_DESC(video_nr,
"\none and for every other camera."
"\n");
-static short force_munmap[] = {[0 ... SN9C102_MAX_DEVICES-1] =
- SN9C102_FORCE_MUNMAP};
+static bool force_munmap[] = {[0 ... SN9C102_MAX_DEVICES-1] =
+ SN9C102_FORCE_MUNMAP};
module_param_array(force_munmap, bool, NULL, 0444);
MODULE_PARM_DESC(force_munmap,
" <0|1[,...]>"
@@ -3420,27 +3420,4 @@ static struct usb_driver sn9c102_usb_driver = {
.disconnect = sn9c102_usb_disconnect,
};
-/*****************************************************************************/
-
-static int __init sn9c102_module_init(void)
-{
- int err = 0;
-
- KDBG(2, SN9C102_MODULE_NAME " v" SN9C102_MODULE_VERSION);
- KDBG(3, SN9C102_MODULE_AUTHOR);
-
- if ((err = usb_register(&sn9c102_usb_driver)))
- KDBG(1, "usb_register() failed");
-
- return err;
-}
-
-
-static void __exit sn9c102_module_exit(void)
-{
- usb_deregister(&sn9c102_usb_driver);
-}
-
-
-module_init(sn9c102_module_init);
-module_exit(sn9c102_module_exit);
+module_usb_driver(sn9c102_usb_driver);
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index dba2317..d427f84 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -38,11 +38,11 @@
#include "stk-webcam.h"
-static int hflip = 1;
+static bool hflip = 1;
module_param(hflip, bool, 0444);
MODULE_PARM_DESC(hflip, "Horizontal image flip (mirror). Defaults to 1");
-static int vflip = 1;
+static bool vflip = 1;
module_param(vflip, bool, 0444);
MODULE_PARM_DESC(vflip, "Vertical image flip. Defaults to 1");
@@ -1377,25 +1377,4 @@ static struct usb_driver stk_camera_driver = {
#endif
};
-
-static int __init stk_camera_init(void)
-{
- int result;
-
- result = usb_register(&stk_camera_driver);
- if (result)
- STK_ERROR("usb_register failed ! Error number %d\n", result);
-
-
- return result;
-}
-
-static void __exit stk_camera_exit(void)
-{
- usb_deregister(&stk_camera_driver);
-}
-
-module_init(stk_camera_init);
-module_exit(stk_camera_exit);
-
-
+module_usb_driver(stk_camera_driver);
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c
index 0a2d75f..4ed1c7c2 100644
--- a/drivers/media/video/timblogiw.c
+++ b/drivers/media/video/timblogiw.c
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
spin_unlock_irq(&fh->queue_lock);
desc = fh->chan->device->device_prep_slave_sg(fh->chan,
- buf->sg, sg_elems, DMA_FROM_DEVICE,
+ buf->sg, sg_elems, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!desc) {
spin_lock_irq(&fh->queue_lock);
diff --git a/drivers/media/video/tm6000/tm6000-alsa.c b/drivers/media/video/tm6000/tm6000-alsa.c
index ddffd67..bd07ec7 100644
--- a/drivers/media/video/tm6000/tm6000-alsa.c
+++ b/drivers/media/video/tm6000/tm6000-alsa.c
@@ -42,7 +42,7 @@
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */
-static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
+static bool enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 1};
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable tm6000x soundcard. default enabled.");
diff --git a/drivers/media/video/tm6000/tm6000-cards.c b/drivers/media/video/tm6000/tm6000-cards.c
index 3678918..034659b 100644
--- a/drivers/media/video/tm6000/tm6000-cards.c
+++ b/drivers/media/video/tm6000/tm6000-cards.c
@@ -1406,31 +1406,7 @@ static struct usb_driver tm6000_usb_driver = {
.id_table = tm6000_id_table,
};
-static int __init tm6000_module_init(void)
-{
- int result;
-
- printk(KERN_INFO "tm6000" " v4l2 driver version %d.%d.%d loaded\n",
- (TM6000_VERSION >> 16) & 0xff,
- (TM6000_VERSION >> 8) & 0xff, TM6000_VERSION & 0xff);
-
- /* register this driver with the USB subsystem */
- result = usb_register(&tm6000_usb_driver);
- if (result)
- printk(KERN_ERR "tm6000"
- " usb_register failed. Error number %d.\n", result);
-
- return result;
-}
-
-static void __exit tm6000_module_exit(void)
-{
- /* deregister at USB subsystem */
- usb_deregister(&tm6000_usb_driver);
-}
-
-module_init(tm6000_module_init);
-module_exit(tm6000_module_exit);
+module_usb_driver(tm6000_usb_driver);
MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000/TM6010 USB2 adapter");
MODULE_AUTHOR("Mauro Carvalho Chehab");
diff --git a/drivers/media/video/tvp514x.c b/drivers/media/video/tvp514x.c
index 926f039..dd26cac 100644
--- a/drivers/media/video/tvp514x.c
+++ b/drivers/media/video/tvp514x.c
@@ -52,7 +52,7 @@
#define LOCK_RETRY_DELAY (200)
/* Debug functions */
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/tvp7002.c b/drivers/media/video/tvp7002.c
index 7875e80..236c559 100644
--- a/drivers/media/video/tvp7002.c
+++ b/drivers/media/video/tvp7002.c
@@ -63,7 +63,7 @@ MODULE_LICENSE("GPL");
#define TVP7002_CL_MASK 0x0f
/* Debug functions */
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-2)");
diff --git a/drivers/media/video/upd64083.c b/drivers/media/video/upd64083.c
index 9bbe617..65d065a 100644
--- a/drivers/media/video/upd64083.c
+++ b/drivers/media/video/upd64083.c
@@ -34,7 +34,7 @@ MODULE_DESCRIPTION("uPD64083 driver");
MODULE_AUTHOR("T. Adachi, Takeru KOMORIYA, Hans Verkuil");
MODULE_LICENSE("GPL");
-static int debug;
+static bool debug;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 7e2c34e..20f7237 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -34,13 +34,13 @@ MODULE_AUTHOR("Jonathan Corbet <corbet@lwn.net>");
MODULE_DESCRIPTION("VIA framebuffer-based camera controller driver");
MODULE_LICENSE("GPL");
-static int flip_image;
+static bool flip_image;
module_param(flip_image, bool, 0444);
MODULE_PARM_DESC(flip_image,
"If set, the sensor will be instructed to flip the image "
"vertically.");
-static int override_serial;
+static bool override_serial;
module_param(override_serial, bool, 0444);
MODULE_PARM_DESC(override_serial,
"The camera driver will normally refuse to load if "
diff --git a/drivers/media/video/zoran/zoran_device.c b/drivers/media/video/zoran/zoran_device.c
index e8a2784..e86173b 100644
--- a/drivers/media/video/zoran/zoran_device.c
+++ b/drivers/media/video/zoran/zoran_device.c
@@ -57,7 +57,7 @@
ZR36057_ISR_GIRQ1 | \
ZR36057_ISR_JPEGRepIRQ )
-static int lml33dpath; /* default = 0
+static bool lml33dpath; /* default = 0
* 1 will use digital path in capture
* mode instead of analog. It can be
* used for picture adjustments using
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index f7d236a..4c09ab7 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -1550,7 +1550,7 @@ static int zoran_enum_fmt(struct zoran *zr, struct v4l2_fmtdesc *fmt, int flag)
if (zoran_formats[i].flags & flag && num++ == fmt->index) {
strncpy(fmt->description, zoran_formats[i].name,
sizeof(fmt->description) - 1);
- /* fmt struct pre-zeroed, so adding '\0' not neeed */
+ /* fmt struct pre-zeroed, so adding '\0' not needed */
fmt->pixelformat = zoran_formats[i].fourcc;
if (zoran_formats[i].flags & ZORAN_FORMAT_COMPRESSED)
fmt->flags |= V4L2_FMT_FLAG_COMPRESSED;
diff --git a/drivers/media/video/zoran/zr36060.c b/drivers/media/video/zoran/zr36060.c
index 5e4f57c..f08546f 100644
--- a/drivers/media/video/zoran/zr36060.c
+++ b/drivers/media/video/zoran/zr36060.c
@@ -50,7 +50,7 @@
/* amount of chips attached via this driver */
static int zr36060_codecs;
-static int low_bitrate;
+static bool low_bitrate;
module_param(low_bitrate, bool, 0);
MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate");
diff --git a/drivers/media/video/zr364xx.c b/drivers/media/video/zr364xx.c
index e78cf94..cd2e39f 100644
--- a/drivers/media/video/zr364xx.c
+++ b/drivers/media/video/zr364xx.c
@@ -1695,28 +1695,7 @@ static struct usb_driver zr364xx_driver = {
.id_table = device_table
};
-
-static int __init zr364xx_init(void)
-{
- int retval;
- retval = usb_register(&zr364xx_driver);
- if (retval)
- printk(KERN_ERR KBUILD_MODNAME ": usb_register failed!\n");
- else
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n");
- return retval;
-}
-
-
-static void __exit zr364xx_exit(void)
-{
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC " module unloaded\n");
- usb_deregister(&zr364xx_driver);
-}
-
-
-module_init(zr364xx_init);
-module_exit(zr364xx_exit);
+module_usb_driver(zr364xx_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 6ce70e9..5319e9b 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -21,7 +21,7 @@
#define DRIVER_NAME "jmb38x_ms"
-static int no_dma;
+static bool no_dma;
module_param(no_dma, bool, 0644);
enum {
diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
index 668f5c6..29b2172 100644
--- a/drivers/memstick/host/r592.c
+++ b/drivers/memstick/host/r592.c
@@ -23,7 +23,7 @@
#include <linux/swab.h>
#include "r592.h"
-static int r592_enable_dma = 1;
+static bool r592_enable_dma = 1;
static int debug;
static const char *tpc_names[] = {
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index b7aacf4..6902b83 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -22,7 +22,7 @@
#define DRIVER_NAME "tifm_ms"
-static int no_dma;
+static bool no_dma;
module_param(no_dma, bool, 0644);
/*
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 22027e7..d9bcfba 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -583,6 +583,7 @@ typedef struct _MSG_CONFIG_REPLY
#define MPI_MANUFACTPAGE_DEVID_SAS1066E (0x005A)
#define MPI_MANUFACTPAGE_DEVID_SAS1068 (0x0054)
#define MPI_MANUFACTPAGE_DEVID_SAS1068E (0x0058)
+#define MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP (0x0059)
#define MPI_MANUFACTPAGE_DEVID_SAS1078 (0x0062)
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
index fd62228..19fb21b 100644
--- a/drivers/message/fusion/lsi/mpi_ioc.h
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -857,7 +857,7 @@ typedef struct _EVENT_DATA_SAS_DISCOVERY
#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_MASK (0xFFFF0000)
#define MPI_EVENT_SAS_DSCVRY_PHY_BITS_SHIFT (16)
-/* SAS Discovery Errror Event data */
+/* SAS Discovery Error Event data */
typedef struct _EVENT_DATA_DISCOVERY_ERROR
{
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index e9c6a60..a7dc467 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -115,7 +115,8 @@ module_param(mpt_fwfault_debug, int, 0600);
MODULE_PARM_DESC(mpt_fwfault_debug,
"Enable detection of Firmware fault and halt Firmware on fault - (default=0)");
-static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS][50];
+static char MptCallbacksName[MPT_MAX_PROTOCOL_DRIVERS]
+ [MPT_MAX_CALLBACKNAME_LEN+1];
#ifdef MFCNT
static int mfcounter = 0;
@@ -717,8 +718,8 @@ mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass, char *func_name)
MptDriverClass[cb_idx] = dclass;
MptEvHandlers[cb_idx] = NULL;
last_drv_idx = cb_idx;
- memcpy(MptCallbacksName[cb_idx], func_name,
- strlen(func_name) > 50 ? 50 : strlen(func_name));
+ strlcpy(MptCallbacksName[cb_idx], func_name,
+ MPT_MAX_CALLBACKNAME_LEN+1);
break;
}
}
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index b4d24dc..76c05bc 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -89,6 +89,7 @@
*/
#define MPT_MAX_ADAPTERS 18
#define MPT_MAX_PROTOCOL_DRIVERS 16
+#define MPT_MAX_CALLBACKNAME_LEN 49
#define MPT_MAX_BUS 1 /* Do not change */
#define MPT_MAX_FC_DEVICES 255
#define MPT_MAX_SCSI_DEVICES 16
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 9d95042..551262e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -5376,6 +5376,8 @@ static struct pci_device_id mptsas_pci_table[] = {
PCI_ANY_ID, PCI_ANY_ID },
{ PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1078,
PCI_ANY_ID, PCI_ANY_ID },
+ { PCI_VENDOR_ID_LSI_LOGIC, MPI_MANUFACTPAGE_DEVID_SAS1068_820XELP,
+ PCI_ANY_ID, PCI_ANY_ID },
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mptsas_pci_table);
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 07dbeaf..6d115c7 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -56,7 +56,7 @@
/* Structure used to define /proc entries */
typedef struct _i2o_proc_entry_t {
char *name; /* entry name */
- mode_t mode; /* mode */
+ umode_t mode; /* mode */
const struct file_operations *fops; /* open function */
} i2o_proc_entry;
diff --git a/drivers/mfd/88pm860x-i2c.c b/drivers/mfd/88pm860x-i2c.c
index e017dc8..f93dd95 100644
--- a/drivers/mfd/88pm860x-i2c.c
+++ b/drivers/mfd/88pm860x-i2c.c
@@ -12,51 +12,20 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/i2c.h>
+#include <linux/err.h>
+#include <linux/regmap.h>
#include <linux/mfd/88pm860x.h>
#include <linux/slab.h>
-static inline int pm860x_read_device(struct i2c_client *i2c,
- int reg, int bytes, void *dest)
-{
- unsigned char data;
- int ret;
-
- data = (unsigned char)reg;
- ret = i2c_master_send(i2c, &data, 1);
- if (ret < 0)
- return ret;
-
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int pm860x_write_device(struct i2c_client *i2c,
- int reg, int bytes, void *src)
-{
- unsigned char buf[bytes + 1];
- int ret;
-
- buf[0] = (unsigned char)reg;
- memcpy(&buf[1], src, bytes);
-
- ret = i2c_master_send(i2c, buf, bytes + 1);
- if (ret < 0)
- return ret;
- return 0;
-}
-
int pm860x_reg_read(struct i2c_client *i2c, int reg)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
- unsigned char data;
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
+ unsigned int data;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_read_device(i2c, reg, 1, &data);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_read(map, reg, &data);
if (ret < 0)
return ret;
else
@@ -68,12 +37,11 @@ int pm860x_reg_write(struct i2c_client *i2c, int reg,
unsigned char data)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_write_device(i2c, reg, 1, &data);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_write(map, reg, data);
return ret;
}
EXPORT_SYMBOL(pm860x_reg_write);
@@ -82,12 +50,11 @@ int pm860x_bulk_read(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_read_device(i2c, reg, count, buf);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_raw_read(map, reg, buf, count);
return ret;
}
EXPORT_SYMBOL(pm860x_bulk_read);
@@ -96,12 +63,11 @@ int pm860x_bulk_write(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_write_device(i2c, reg, count, buf);
- mutex_unlock(&chip->io_lock);
-
+ ret = regmap_raw_write(map, reg, buf, count);
return ret;
}
EXPORT_SYMBOL(pm860x_bulk_write);
@@ -110,39 +76,78 @@ int pm860x_set_bits(struct i2c_client *i2c, int reg,
unsigned char mask, unsigned char data)
{
struct pm860x_chip *chip = i2c_get_clientdata(i2c);
- unsigned char value;
+ struct regmap *map = (i2c == chip->client) ? chip->regmap
+ : chip->regmap_companion;
int ret;
- mutex_lock(&chip->io_lock);
- ret = pm860x_read_device(i2c, reg, 1, &value);
- if (ret < 0)
- goto out;
- value &= ~mask;
- value |= data;
- ret = pm860x_write_device(i2c, reg, 1, &value);
-out:
- mutex_unlock(&chip->io_lock);
+ ret = regmap_update_bits(map, reg, mask, data);
return ret;
}
EXPORT_SYMBOL(pm860x_set_bits);
+static int read_device(struct i2c_client *i2c, int reg,
+ int bytes, void *dest)
+{
+ unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX + 3];
+ unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX + 2];
+ struct i2c_adapter *adap = i2c->adapter;
+ struct i2c_msg msg[2] = {{i2c->addr, 0, 1, msgbuf0},
+ {i2c->addr, I2C_M_RD, 0, msgbuf1},
+ };
+ int num = 1, ret = 0;
+
+ if (dest == NULL)
+ return -EINVAL;
+ msgbuf0[0] = (unsigned char)reg; /* command */
+ msg[1].len = bytes;
+
+ /* if data needs to read back, num should be 2 */
+ if (bytes > 0)
+ num = 2;
+ ret = adap->algo->master_xfer(adap, msg, num);
+ memcpy(dest, msgbuf1, bytes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int write_device(struct i2c_client *i2c, int reg,
+ int bytes, void *src)
+{
+ unsigned char buf[bytes + 1];
+ struct i2c_adapter *adap = i2c->adapter;
+ struct i2c_msg msg;
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+ msg.addr = i2c->addr;
+ msg.flags = 0;
+ msg.len = bytes + 1;
+ msg.buf = buf;
+
+ ret = adap->algo->master_xfer(adap, &msg, 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
int pm860x_page_reg_read(struct i2c_client *i2c, int reg)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero = 0;
unsigned char data;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_read_device(i2c, reg, 1, &data);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = read_device(i2c, reg, 1, &data);
if (ret >= 0)
ret = (int)data;
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_reg_read);
@@ -150,18 +155,17 @@ EXPORT_SYMBOL(pm860x_page_reg_read);
int pm860x_page_reg_write(struct i2c_client *i2c, int reg,
unsigned char data)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_write_device(i2c, reg, 1, &data);
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = write_device(i2c, reg, 1, &data);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_reg_write);
@@ -169,18 +173,17 @@ EXPORT_SYMBOL(pm860x_page_reg_write);
int pm860x_page_bulk_read(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero = 0;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_read_device(i2c, reg, count, buf);
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xfa, 0, &zero);
+ read_device(i2c, 0xfb, 0, &zero);
+ read_device(i2c, 0xff, 0, &zero);
+ ret = read_device(i2c, reg, count, buf);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_bulk_read);
@@ -188,18 +191,18 @@ EXPORT_SYMBOL(pm860x_page_bulk_read);
int pm860x_page_bulk_write(struct i2c_client *i2c, int reg,
int count, unsigned char *buf)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero = 0;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_write_device(i2c, reg, count, buf);
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = write_device(i2c, reg, count, buf);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_bulk_write);
@@ -207,25 +210,24 @@ EXPORT_SYMBOL(pm860x_page_bulk_write);
int pm860x_page_set_bits(struct i2c_client *i2c, int reg,
unsigned char mask, unsigned char data)
{
- struct pm860x_chip *chip = i2c_get_clientdata(i2c);
unsigned char zero;
unsigned char value;
int ret;
- mutex_lock(&chip->io_lock);
- pm860x_write_device(i2c, 0xFA, 0, &zero);
- pm860x_write_device(i2c, 0xFB, 0, &zero);
- pm860x_write_device(i2c, 0xFF, 0, &zero);
- ret = pm860x_read_device(i2c, reg, 1, &value);
+ i2c_lock_adapter(i2c->adapter);
+ read_device(i2c, 0xFA, 0, &zero);
+ read_device(i2c, 0xFB, 0, &zero);
+ read_device(i2c, 0xFF, 0, &zero);
+ ret = read_device(i2c, reg, 1, &value);
if (ret < 0)
goto out;
value &= ~mask;
value |= data;
- ret = pm860x_write_device(i2c, reg, 1, &value);
+ ret = write_device(i2c, reg, 1, &value);
out:
- pm860x_write_device(i2c, 0xFE, 0, &zero);
- pm860x_write_device(i2c, 0xFC, 0, &zero);
- mutex_unlock(&chip->io_lock);
+ read_device(i2c, 0xFE, 0, &zero);
+ read_device(i2c, 0xFC, 0, &zero);
+ i2c_unlock_adapter(i2c->adapter);
return ret;
}
EXPORT_SYMBOL(pm860x_page_set_bits);
@@ -257,11 +259,17 @@ static int verify_addr(struct i2c_client *i2c)
return 0;
}
+static struct regmap_config pm860x_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int __devinit pm860x_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct pm860x_platform_data *pdata = client->dev.platform_data;
struct pm860x_chip *chip;
+ int ret;
if (!pdata) {
pr_info("No platform data in %s!\n", __func__);
@@ -273,10 +281,17 @@ static int __devinit pm860x_probe(struct i2c_client *client,
return -ENOMEM;
chip->id = verify_addr(client);
+ chip->regmap = regmap_init_i2c(client, &pm860x_regmap_config);
+ if (IS_ERR(chip->regmap)) {
+ ret = PTR_ERR(chip->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ kfree(chip);
+ return ret;
+ }
chip->client = client;
i2c_set_clientdata(client, chip);
chip->dev = &client->dev;
- mutex_init(&chip->io_lock);
dev_set_drvdata(chip->dev, chip);
/*
@@ -290,6 +305,14 @@ static int __devinit pm860x_probe(struct i2c_client *client,
chip->companion_addr = pdata->companion_addr;
chip->companion = i2c_new_dummy(chip->client->adapter,
chip->companion_addr);
+ chip->regmap_companion = regmap_init_i2c(chip->companion,
+ &pm860x_regmap_config);
+ if (IS_ERR(chip->regmap_companion)) {
+ ret = PTR_ERR(chip->regmap_companion);
+ dev_err(&chip->companion->dev,
+ "Failed to allocate register map: %d\n", ret);
+ return ret;
+ }
i2c_set_clientdata(chip->companion, chip);
}
@@ -302,7 +325,11 @@ static int __devexit pm860x_remove(struct i2c_client *client)
struct pm860x_chip *chip = i2c_get_clientdata(client);
pm860x_device_exit(chip);
- i2c_unregister_device(chip->companion);
+ if (chip->companion) {
+ regmap_exit(chip->regmap_companion);
+ i2c_unregister_device(chip->companion);
+ }
+ regmap_exit(chip->regmap);
kfree(chip);
return 0;
}
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f1391c2..f147395 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -12,6 +12,7 @@ config MFD_CORE
config MFD_88PM860X
bool "Support Marvell 88PM8606/88PM8607"
depends on I2C=y && GENERIC_HARDIRQS
+ select REGMAP_I2C
select MFD_CORE
help
This supports for Marvell 88PM8606/88PM8607 Power Management IC.
@@ -257,7 +258,7 @@ config TWL6040_CORE
config MFD_STMPE
bool "Support STMicroelectronics STMPE"
- depends on I2C=y && GENERIC_HARDIRQS
+ depends on (I2C=y || SPI_MASTER=y) && GENERIC_HARDIRQS
select MFD_CORE
help
Support for the STMPE family of I/O Expanders from
@@ -278,6 +279,23 @@ config MFD_STMPE
Keypad: stmpe-keypad
Touchscreen: stmpe-ts
+menu "STMPE Interface Drivers"
+depends on MFD_STMPE
+
+config STMPE_I2C
+ bool "STMPE I2C Inteface"
+ depends on I2C=y
+ default y
+ help
+ This is used to enable I2C interface of STMPE
+
+config STMPE_SPI
+ bool "STMPE SPI Inteface"
+ depends on SPI_MASTER
+ help
+ This is used to enable SPI interface of STMPE
+endmenu
+
config MFD_TC3589X
bool "Support Toshiba TC35892 and variants"
depends on I2C=y && GENERIC_HARDIRQS
@@ -311,7 +329,7 @@ config MFD_TC6387XB
config MFD_TC6393XB
bool "Support Toshiba TC6393XB"
- depends on GPIOLIB && ARM
+ depends on GPIOLIB && ARM && HAVE_CLK
select MFD_CORE
select MFD_TMIO
help
@@ -328,6 +346,34 @@ config PMIC_DA903X
individual components like LCD backlight, voltage regulators,
LEDs and battery-charger under the corresponding menus.
+config PMIC_DA9052
+ bool
+ select MFD_CORE
+
+config MFD_DA9052_SPI
+ bool "Support Dialog Semiconductor DA9052/53 PMIC variants with SPI"
+ select REGMAP_SPI
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on SPI_MASTER=y
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using SPI. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
+config MFD_DA9052_I2C
+ bool "Support Dialog Semiconductor DA9052/53 PMIC variants with I2C"
+ select REGMAP_I2C
+ select REGMAP_IRQ
+ select PMIC_DA9052
+ depends on I2C=y
+ help
+ Support for the Dialog Semiconductor DA9052 PMIC
+ when controlled using I2C. This driver provides common support
+ for accessing the device, additional drivers must be enabled in
+ order to use the functionality of the device.
+
config PMIC_ADP5520
bool "Analog Devices ADP5520/01 MFD PMIC Core Support"
depends on I2C=y
@@ -371,6 +417,17 @@ config MFD_MAX8998
additional drivers must be enabled in order to use the functionality
of the device.
+config MFD_S5M_CORE
+ bool "SAMSUNG S5M Series Support"
+ depends on I2C=y && GENERIC_HARDIRQS
+ select MFD_CORE
+ select REGMAP_I2C
+ help
+ Support for the Samsung Electronics S5M MFD series.
+ This driver provies common support for accessing the device,
+ additional drivers must be enabled in order to use the functionality
+ of the device
+
config MFD_WM8400
tristate "Support Wolfson Microelectronics WM8400"
select MFD_CORE
@@ -477,6 +534,7 @@ config MFD_WM8994
bool "Support Wolfson Microelectronics WM8994"
select MFD_CORE
select REGMAP_I2C
+ select REGMAP_IRQ
depends on I2C=y && GENERIC_HARDIRQS
help
The WM8994 is a highly integrated hi-fi CODEC designed for
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index b2292eb..b953bab 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -16,6 +16,8 @@ obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
obj-$(CONFIG_MFD_STMPE) += stmpe.o
+obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
+obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
obj-$(CONFIG_MFD_TC3589X) += tc3589x.o
obj-$(CONFIG_MFD_T7L66XB) += t7l66xb.o tmio_core.o
obj-$(CONFIG_MFD_TC6387XB) += tc6387xb.o tmio_core.o
@@ -31,7 +33,7 @@ wm8350-objs := wm8350-core.o wm8350-regmap.o wm8350-gpio.o
wm8350-objs += wm8350-irq.o
obj-$(CONFIG_MFD_WM8350) += wm8350.o
obj-$(CONFIG_MFD_WM8350_I2C) += wm8350-i2c.o
-obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o
+obj-$(CONFIG_MFD_WM8994) += wm8994-core.o wm8994-irq.o wm8994-regmap.o
obj-$(CONFIG_TPS6105X) += tps6105x.o
obj-$(CONFIG_TPS65010) += tps65010.o
@@ -67,6 +69,11 @@ endif
obj-$(CONFIG_UCB1400_CORE) += ucb1400_core.o
obj-$(CONFIG_PMIC_DA903X) += da903x.o
+
+obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
+obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
+obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
+
max8925-objs := max8925-core.o max8925-i2c.o
obj-$(CONFIG_MFD_MAX8925) += max8925.o
obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
@@ -104,3 +111,4 @@ obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o
obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o
obj-$(CONFIG_MFD_AAT2870_CORE) += aat2870-core.o
obj-$(CONFIG_MFD_INTEL_MSIC) += intel_msic.o
+obj-$(CONFIG_MFD_S5M_CORE) += s5m-core.o s5m-irq.o
diff --git a/drivers/mfd/aat2870-core.c b/drivers/mfd/aat2870-core.c
index 02c4201..3aa36eb 100644
--- a/drivers/mfd/aat2870-core.c
+++ b/drivers/mfd/aat2870-core.c
@@ -407,13 +407,13 @@ static int aat2870_i2c_probe(struct i2c_client *client,
aat2870->init(aat2870);
if (aat2870->en_pin >= 0) {
- ret = gpio_request(aat2870->en_pin, "aat2870-en");
+ ret = gpio_request_one(aat2870->en_pin, GPIOF_OUT_INIT_HIGH,
+ "aat2870-en");
if (ret < 0) {
dev_err(&client->dev,
"Failed to request GPIO %d\n", aat2870->en_pin);
goto out_kfree;
}
- gpio_direction_output(aat2870->en_pin, 1);
}
aat2870_enable(aat2870);
@@ -468,9 +468,10 @@ static int aat2870_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM
-static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int aat2870_i2c_suspend(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct aat2870_data *aat2870 = i2c_get_clientdata(client);
aat2870_disable(aat2870);
@@ -478,8 +479,9 @@ static int aat2870_i2c_suspend(struct i2c_client *client, pm_message_t state)
return 0;
}
-static int aat2870_i2c_resume(struct i2c_client *client)
+static int aat2870_i2c_resume(struct device *dev)
{
+ struct i2c_client *client = to_i2c_client(dev);
struct aat2870_data *aat2870 = i2c_get_clientdata(client);
struct aat2870_register *reg = NULL;
int i;
@@ -495,12 +497,12 @@ static int aat2870_i2c_resume(struct i2c_client *client)
return 0;
}
-#else
-#define aat2870_i2c_suspend NULL
-#define aat2870_i2c_resume NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(aat2870_pm_ops, aat2870_i2c_suspend,
+ aat2870_i2c_resume);
-static struct i2c_device_id aat2870_i2c_id_table[] = {
+static const struct i2c_device_id aat2870_i2c_id_table[] = {
{ "aat2870", 0 },
{ }
};
@@ -510,11 +512,10 @@ static struct i2c_driver aat2870_i2c_driver = {
.driver = {
.name = "aat2870",
.owner = THIS_MODULE,
+ .pm = &aat2870_pm_ops,
},
.probe = aat2870_i2c_probe,
.remove = aat2870_i2c_remove,
- .suspend = aat2870_i2c_suspend,
- .resume = aat2870_i2c_resume,
.id_table = aat2870_i2c_id_table,
};
diff --git a/drivers/mfd/ab5500-core.c b/drivers/mfd/ab5500-core.c
index ec10629..bd56a76 100644
--- a/drivers/mfd/ab5500-core.c
+++ b/drivers/mfd/ab5500-core.c
@@ -22,8 +22,8 @@
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/random.h>
-#include <linux/mfd/ab5500/ab5500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
#include <linux/list.h>
#include <linux/bitops.h>
#include <linux/spinlock.h>
diff --git a/drivers/mfd/ab5500-debugfs.c b/drivers/mfd/ab5500-debugfs.c
index b7b2d348..7200694 100644
--- a/drivers/mfd/ab5500-debugfs.c
+++ b/drivers/mfd/ab5500-debugfs.c
@@ -7,8 +7,8 @@
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
-#include <linux/mfd/ab5500/ab5500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab5500.h>
#include <linux/uaccess.h>
#include "ab5500-core.h"
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index d3d572b..d295941 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -17,7 +17,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/ab8500.h>
/*
@@ -956,11 +956,12 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
return ret;
out_freeirq:
- if (ab8500->irq_base) {
+ if (ab8500->irq_base)
free_irq(ab8500->irq, ab8500);
out_removeirq:
+ if (ab8500->irq_base)
ab8500_irq_remove(ab8500);
- }
+
return ret;
}
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index dedb7f6..9a0211a 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -13,7 +13,7 @@
#include <linux/platform_device.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
static u32 debug_bank;
static u32 debug_address;
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index e985d17..c39fc71 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -18,9 +18,9 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/list.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/gpadc.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-gpadc.h>
/*
* GPADC register offsets
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
index 9be541c..087fecd 100644
--- a/drivers/mfd/ab8500-i2c.c
+++ b/drivers/mfd/ab8500-i2c.c
@@ -10,7 +10,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/mfd/db8500-prcmu.h>
static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index f20feef..c28d4eb 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -7,9 +7,9 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500/sysctrl.h>
+#include <linux/mfd/abx500/ab8500.h>
+#include <linux/mfd/abx500/ab8500-sysctrl.h>
static struct device *sysctrl_dev;
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 155fa04..315fef5 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -172,14 +172,14 @@ static void __devexit cs5535_mfd_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id cs5535_mfd_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cs5535_mfd_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_ISA) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_ISA) },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cs5535_mfd_pci_tbl);
-static struct pci_driver cs5535_mfd_drv = {
+static struct pci_driver cs5535_mfd_driver = {
.name = DRV_NAME,
.id_table = cs5535_mfd_pci_tbl,
.probe = cs5535_mfd_probe,
@@ -188,12 +188,12 @@ static struct pci_driver cs5535_mfd_drv = {
static int __init cs5535_mfd_init(void)
{
- return pci_register_driver(&cs5535_mfd_drv);
+ return pci_register_driver(&cs5535_mfd_driver);
}
static void __exit cs5535_mfd_exit(void)
{
- pci_unregister_driver(&cs5535_mfd_drv);
+ pci_unregister_driver(&cs5535_mfd_driver);
}
module_init(cs5535_mfd_init);
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
new file mode 100644
index 0000000..5ddde2a
--- /dev/null
+++ b/drivers/mfd/da9052-core.c
@@ -0,0 +1,694 @@
+/*
+ * Device access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/reg.h>
+
+#define DA9052_NUM_IRQ_REGS 4
+#define DA9052_IRQ_MASK_POS_1 0x01
+#define DA9052_IRQ_MASK_POS_2 0x02
+#define DA9052_IRQ_MASK_POS_3 0x04
+#define DA9052_IRQ_MASK_POS_4 0x08
+#define DA9052_IRQ_MASK_POS_5 0x10
+#define DA9052_IRQ_MASK_POS_6 0x20
+#define DA9052_IRQ_MASK_POS_7 0x40
+#define DA9052_IRQ_MASK_POS_8 0x80
+
+static bool da9052_reg_readable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_PAGE0_CON_REG:
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_FAULTLOG_REG:
+ case DA9052_IRQ_MASK_A_REG:
+ case DA9052_IRQ_MASK_B_REG:
+ case DA9052_IRQ_MASK_C_REG:
+ case DA9052_IRQ_MASK_D_REG:
+ case DA9052_CONTROL_A_REG:
+ case DA9052_CONTROL_B_REG:
+ case DA9052_CONTROL_C_REG:
+ case DA9052_CONTROL_D_REG:
+ case DA9052_PDDIS_REG:
+ case DA9052_INTERFACE_REG:
+ case DA9052_RESET_REG:
+ case DA9052_GPIO_0_1_REG:
+ case DA9052_GPIO_2_3_REG:
+ case DA9052_GPIO_4_5_REG:
+ case DA9052_GPIO_6_7_REG:
+ case DA9052_GPIO_14_15_REG:
+ case DA9052_ID_0_1_REG:
+ case DA9052_ID_2_3_REG:
+ case DA9052_ID_4_5_REG:
+ case DA9052_ID_6_7_REG:
+ case DA9052_ID_8_9_REG:
+ case DA9052_ID_10_11_REG:
+ case DA9052_ID_12_13_REG:
+ case DA9052_ID_14_15_REG:
+ case DA9052_ID_16_17_REG:
+ case DA9052_ID_18_19_REG:
+ case DA9052_ID_20_21_REG:
+ case DA9052_SEQ_STATUS_REG:
+ case DA9052_SEQ_A_REG:
+ case DA9052_SEQ_B_REG:
+ case DA9052_SEQ_TIMER_REG:
+ case DA9052_BUCKA_REG:
+ case DA9052_BUCKB_REG:
+ case DA9052_BUCKCORE_REG:
+ case DA9052_BUCKPRO_REG:
+ case DA9052_BUCKMEM_REG:
+ case DA9052_BUCKPERI_REG:
+ case DA9052_LDO1_REG:
+ case DA9052_LDO2_REG:
+ case DA9052_LDO3_REG:
+ case DA9052_LDO4_REG:
+ case DA9052_LDO5_REG:
+ case DA9052_LDO6_REG:
+ case DA9052_LDO7_REG:
+ case DA9052_LDO8_REG:
+ case DA9052_LDO9_REG:
+ case DA9052_LDO10_REG:
+ case DA9052_SUPPLY_REG:
+ case DA9052_PULLDOWN_REG:
+ case DA9052_CHGBUCK_REG:
+ case DA9052_WAITCONT_REG:
+ case DA9052_ISET_REG:
+ case DA9052_BATCHG_REG:
+ case DA9052_CHG_CONT_REG:
+ case DA9052_INPUT_CONT_REG:
+ case DA9052_CHG_TIME_REG:
+ case DA9052_BBAT_CONT_REG:
+ case DA9052_BOOST_REG:
+ case DA9052_LED_CONT_REG:
+ case DA9052_LEDMIN123_REG:
+ case DA9052_LED1_CONF_REG:
+ case DA9052_LED2_CONF_REG:
+ case DA9052_LED3_CONF_REG:
+ case DA9052_LED1CONT_REG:
+ case DA9052_LED2CONT_REG:
+ case DA9052_LED3CONT_REG:
+ case DA9052_LED_CONT_4_REG:
+ case DA9052_LED_CONT_5_REG:
+ case DA9052_ADC_MAN_REG:
+ case DA9052_ADC_CONT_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_VDD_MON_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_ICHG_THD_REG:
+ case DA9052_ICHG_END_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_TBAT_HIGHP_REG:
+ case DA9052_TBAT_HIGHN_REG:
+ case DA9052_TBAT_LOW_REG:
+ case DA9052_T_OFFSET_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_AUTO4_HIGH_REG:
+ case DA9052_AUTO4_LOW_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_AUTO5_HIGH_REG:
+ case DA9052_AUTO5_LOW_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_AUTO6_HIGH_REG:
+ case DA9052_AUTO6_LOW_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_CONT_A_REG:
+ case DA9052_TSI_CONT_B_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ case DA9052_ALARM_H_REG:
+ case DA9052_ALARM_D_REG:
+ case DA9052_ALARM_MO_REG:
+ case DA9052_ALARM_Y_REG:
+ case DA9052_SECOND_A_REG:
+ case DA9052_SECOND_B_REG:
+ case DA9052_SECOND_C_REG:
+ case DA9052_SECOND_D_REG:
+ case DA9052_PAGE1_CON_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool da9052_reg_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_PAGE0_CON_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_IRQ_MASK_A_REG:
+ case DA9052_IRQ_MASK_B_REG:
+ case DA9052_IRQ_MASK_C_REG:
+ case DA9052_IRQ_MASK_D_REG:
+ case DA9052_CONTROL_A_REG:
+ case DA9052_CONTROL_B_REG:
+ case DA9052_CONTROL_C_REG:
+ case DA9052_CONTROL_D_REG:
+ case DA9052_PDDIS_REG:
+ case DA9052_RESET_REG:
+ case DA9052_GPIO_0_1_REG:
+ case DA9052_GPIO_2_3_REG:
+ case DA9052_GPIO_4_5_REG:
+ case DA9052_GPIO_6_7_REG:
+ case DA9052_GPIO_14_15_REG:
+ case DA9052_ID_0_1_REG:
+ case DA9052_ID_2_3_REG:
+ case DA9052_ID_4_5_REG:
+ case DA9052_ID_6_7_REG:
+ case DA9052_ID_8_9_REG:
+ case DA9052_ID_10_11_REG:
+ case DA9052_ID_12_13_REG:
+ case DA9052_ID_14_15_REG:
+ case DA9052_ID_16_17_REG:
+ case DA9052_ID_18_19_REG:
+ case DA9052_ID_20_21_REG:
+ case DA9052_SEQ_STATUS_REG:
+ case DA9052_SEQ_A_REG:
+ case DA9052_SEQ_B_REG:
+ case DA9052_SEQ_TIMER_REG:
+ case DA9052_BUCKA_REG:
+ case DA9052_BUCKB_REG:
+ case DA9052_BUCKCORE_REG:
+ case DA9052_BUCKPRO_REG:
+ case DA9052_BUCKMEM_REG:
+ case DA9052_BUCKPERI_REG:
+ case DA9052_LDO1_REG:
+ case DA9052_LDO2_REG:
+ case DA9052_LDO3_REG:
+ case DA9052_LDO4_REG:
+ case DA9052_LDO5_REG:
+ case DA9052_LDO6_REG:
+ case DA9052_LDO7_REG:
+ case DA9052_LDO8_REG:
+ case DA9052_LDO9_REG:
+ case DA9052_LDO10_REG:
+ case DA9052_SUPPLY_REG:
+ case DA9052_PULLDOWN_REG:
+ case DA9052_CHGBUCK_REG:
+ case DA9052_WAITCONT_REG:
+ case DA9052_ISET_REG:
+ case DA9052_BATCHG_REG:
+ case DA9052_CHG_CONT_REG:
+ case DA9052_INPUT_CONT_REG:
+ case DA9052_BBAT_CONT_REG:
+ case DA9052_BOOST_REG:
+ case DA9052_LED_CONT_REG:
+ case DA9052_LEDMIN123_REG:
+ case DA9052_LED1_CONF_REG:
+ case DA9052_LED2_CONF_REG:
+ case DA9052_LED3_CONF_REG:
+ case DA9052_LED1CONT_REG:
+ case DA9052_LED2CONT_REG:
+ case DA9052_LED3CONT_REG:
+ case DA9052_LED_CONT_4_REG:
+ case DA9052_LED_CONT_5_REG:
+ case DA9052_ADC_MAN_REG:
+ case DA9052_ADC_CONT_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_VDD_MON_REG:
+ case DA9052_ICHG_THD_REG:
+ case DA9052_ICHG_END_REG:
+ case DA9052_TBAT_HIGHP_REG:
+ case DA9052_TBAT_HIGHN_REG:
+ case DA9052_TBAT_LOW_REG:
+ case DA9052_T_OFFSET_REG:
+ case DA9052_AUTO4_HIGH_REG:
+ case DA9052_AUTO4_LOW_REG:
+ case DA9052_AUTO5_HIGH_REG:
+ case DA9052_AUTO5_LOW_REG:
+ case DA9052_AUTO6_HIGH_REG:
+ case DA9052_AUTO6_LOW_REG:
+ case DA9052_TSI_CONT_A_REG:
+ case DA9052_TSI_CONT_B_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ case DA9052_ALARM_H_REG:
+ case DA9052_ALARM_D_REG:
+ case DA9052_ALARM_MO_REG:
+ case DA9052_ALARM_Y_REG:
+ case DA9052_PAGE1_CON_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA9052_STATUS_A_REG:
+ case DA9052_STATUS_B_REG:
+ case DA9052_STATUS_C_REG:
+ case DA9052_STATUS_D_REG:
+ case DA9052_EVENT_A_REG:
+ case DA9052_EVENT_B_REG:
+ case DA9052_EVENT_C_REG:
+ case DA9052_EVENT_D_REG:
+ case DA9052_FAULTLOG_REG:
+ case DA9052_CHG_TIME_REG:
+ case DA9052_ADC_RES_L_REG:
+ case DA9052_ADC_RES_H_REG:
+ case DA9052_VDD_RES_REG:
+ case DA9052_ICHG_AV_REG:
+ case DA9052_TBAT_RES_REG:
+ case DA9052_ADCIN4_RES_REG:
+ case DA9052_ADCIN5_RES_REG:
+ case DA9052_ADCIN6_RES_REG:
+ case DA9052_TJUNC_RES_REG:
+ case DA9052_TSI_X_MSB_REG:
+ case DA9052_TSI_Y_MSB_REG:
+ case DA9052_TSI_LSB_REG:
+ case DA9052_TSI_Z_MSB_REG:
+ case DA9052_COUNT_S_REG:
+ case DA9052_COUNT_MI_REG:
+ case DA9052_COUNT_H_REG:
+ case DA9052_COUNT_D_REG:
+ case DA9052_COUNT_MO_REG:
+ case DA9052_COUNT_Y_REG:
+ case DA9052_ALARM_MI_REG:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static struct resource da9052_rtc_resource = {
+ .name = "ALM",
+ .start = DA9052_IRQ_ALARM,
+ .end = DA9052_IRQ_ALARM,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_onkey_resource = {
+ .name = "ONKEY",
+ .start = DA9052_IRQ_NONKEY,
+ .end = DA9052_IRQ_NONKEY,
+ .flags = IORESOURCE_IRQ,
+};
+
+static struct resource da9052_bat_resources[] = {
+ {
+ .name = "BATT TEMP",
+ .start = DA9052_IRQ_TBAT,
+ .end = DA9052_IRQ_TBAT,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "DCIN DET",
+ .start = DA9052_IRQ_DCIN,
+ .end = DA9052_IRQ_DCIN,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "DCIN REM",
+ .start = DA9052_IRQ_DCINREM,
+ .end = DA9052_IRQ_DCINREM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS DET",
+ .start = DA9052_IRQ_VBUS,
+ .end = DA9052_IRQ_VBUS,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "VBUS REM",
+ .start = DA9052_IRQ_VBUSREM,
+ .end = DA9052_IRQ_VBUSREM,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "CHG END",
+ .start = DA9052_IRQ_CHGEND,
+ .end = DA9052_IRQ_CHGEND,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct resource da9052_tsi_resources[] = {
+ {
+ .name = "PENDWN",
+ .start = DA9052_IRQ_PENDOWN,
+ .end = DA9052_IRQ_PENDOWN,
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .name = "TSIRDY",
+ .start = DA9052_IRQ_TSIREADY,
+ .end = DA9052_IRQ_TSIREADY,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct mfd_cell __devinitdata da9052_subdev_info[] = {
+ {
+ .name = "da9052-regulator",
+ .id = 1,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 2,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 3,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 4,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 5,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 6,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 7,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 8,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 9,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 10,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 11,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 12,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 13,
+ },
+ {
+ .name = "da9052-regulator",
+ .id = 14,
+ },
+ {
+ .name = "da9052-onkey",
+ .resources = &da9052_onkey_resource,
+ .num_resources = 1,
+ },
+ {
+ .name = "da9052-rtc",
+ .resources = &da9052_rtc_resource,
+ .num_resources = 1,
+ },
+ {
+ .name = "da9052-gpio",
+ },
+ {
+ .name = "da9052-hwmon",
+ },
+ {
+ .name = "da9052-leds",
+ },
+ {
+ .name = "da9052-wled1",
+ },
+ {
+ .name = "da9052-wled2",
+ },
+ {
+ .name = "da9052-wled3",
+ },
+ {
+ .name = "da9052-tsi",
+ .resources = da9052_tsi_resources,
+ .num_resources = ARRAY_SIZE(da9052_tsi_resources),
+ },
+ {
+ .name = "da9052-bat",
+ .resources = da9052_bat_resources,
+ .num_resources = ARRAY_SIZE(da9052_bat_resources),
+ },
+ {
+ .name = "da9052-watchdog",
+ },
+};
+
+static struct regmap_irq da9052_irqs[] = {
+ [DA9052_IRQ_DCIN] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_VBUS] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_DCINREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_VBUSREM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_VDDLOW] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ALARM] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_SEQRDY] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_COMP1V2] = {
+ .reg_offset = 0,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_NONKEY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_IDFLOAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_IDGND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_CHGEND] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_TBAT] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_ADC_EOM] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_PENDOWN] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_TSIREADY] = {
+ .reg_offset = 1,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI0] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI1] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI2] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI3] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI4] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI5] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI6] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI7] = {
+ .reg_offset = 2,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+ [DA9052_IRQ_GPI8] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_1,
+ },
+ [DA9052_IRQ_GPI9] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_2,
+ },
+ [DA9052_IRQ_GPI10] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_3,
+ },
+ [DA9052_IRQ_GPI11] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_4,
+ },
+ [DA9052_IRQ_GPI12] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_5,
+ },
+ [DA9052_IRQ_GPI13] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_6,
+ },
+ [DA9052_IRQ_GPI14] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_7,
+ },
+ [DA9052_IRQ_GPI15] = {
+ .reg_offset = 3,
+ .mask = DA9052_IRQ_MASK_POS_8,
+ },
+};
+
+static struct regmap_irq_chip da9052_regmap_irq_chip = {
+ .name = "da9052_irq",
+ .status_base = DA9052_EVENT_A_REG,
+ .mask_base = DA9052_IRQ_MASK_A_REG,
+ .ack_base = DA9052_EVENT_A_REG,
+ .num_regs = DA9052_NUM_IRQ_REGS,
+ .irqs = da9052_irqs,
+ .num_irqs = ARRAY_SIZE(da9052_irqs),
+};
+
+struct regmap_config da9052_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .max_register = DA9052_PAGE1_CON_REG,
+ .readable_reg = da9052_reg_readable,
+ .writeable_reg = da9052_reg_writeable,
+ .volatile_reg = da9052_reg_volatile,
+};
+EXPORT_SYMBOL_GPL(da9052_regmap_config);
+
+int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
+{
+ struct da9052_pdata *pdata = da9052->dev->platform_data;
+ struct irq_desc *desc;
+ int ret;
+
+ mutex_init(&da9052->io_lock);
+
+ if (pdata && pdata->init != NULL)
+ pdata->init(da9052);
+
+ da9052->chip_id = chip_id;
+
+ if (!pdata || !pdata->irq_base)
+ da9052->irq_base = -1;
+ else
+ da9052->irq_base = pdata->irq_base;
+
+ ret = regmap_add_irq_chip(da9052->regmap, da9052->chip_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ da9052->irq_base, &da9052_regmap_irq_chip,
+ NULL);
+ if (ret < 0)
+ goto regmap_err;
+
+ desc = irq_to_desc(da9052->chip_irq);
+ da9052->irq_base = regmap_irq_chip_get_base(desc->action->dev_id);
+
+ ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+ ARRAY_SIZE(da9052_subdev_info), NULL, 0);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ mfd_remove_devices(da9052->dev);
+regmap_err:
+ return ret;
+}
+
+void da9052_device_exit(struct da9052 *da9052)
+{
+ regmap_del_irq_chip(da9052->chip_irq,
+ irq_get_irq_data(da9052->irq_base)->chip_data);
+ mfd_remove_devices(da9052->dev);
+}
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("DA9052 MFD Core");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
new file mode 100644
index 0000000..44b97c7
--- /dev/null
+++ b/drivers/mfd/da9052-i2c.c
@@ -0,0 +1,140 @@
+/*
+ * I2C access for DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/i2c.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+
+static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
+{
+ int reg_val, ret;
+
+ ret = regmap_read(da9052->regmap, DA9052_CONTROL_B_REG, &reg_val);
+ if (ret < 0)
+ return ret;
+
+ if (reg_val & DA9052_CONTROL_B_WRITEMODE) {
+ reg_val &= ~DA9052_CONTROL_B_WRITEMODE;
+ ret = regmap_write(da9052->regmap, DA9052_CONTROL_B_REG,
+ reg_val);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __devinit da9052_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct da9052 *da9052;
+ int ret;
+
+ da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+ if (!da9052)
+ return -ENOMEM;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA)) {
+ dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
+ __func__);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ da9052->dev = &client->dev;
+ da9052->chip_irq = client->irq;
+
+ i2c_set_clientdata(client, da9052);
+
+ da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config);
+ if (IS_ERR(da9052->regmap)) {
+ ret = PTR_ERR(da9052->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = da9052_i2c_enable_multiwrite(da9052);
+ if (ret < 0)
+ goto err;
+
+ ret = da9052_device_init(da9052, id->driver_data);
+ if (ret != 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(da9052);
+ return ret;
+}
+
+static int da9052_i2c_remove(struct i2c_client *client)
+{
+ struct da9052 *da9052 = i2c_get_clientdata(client);
+
+ da9052_device_exit(da9052);
+ kfree(da9052);
+
+ return 0;
+}
+
+static struct i2c_device_id da9052_i2c_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+static struct i2c_driver da9052_i2c_driver = {
+ .probe = da9052_i2c_probe,
+ .remove = da9052_i2c_remove,
+ .id_table = da9052_i2c_id,
+ .driver = {
+ .name = "da9052",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_i2c_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&da9052_i2c_driver);
+ if (ret != 0) {
+ pr_err("DA9052 I2C registration failed %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+subsys_initcall(da9052_i2c_init);
+
+static void __exit da9052_i2c_exit(void)
+{
+ i2c_del_driver(&da9052_i2c_driver);
+}
+module_exit(da9052_i2c_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("I2C driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
new file mode 100644
index 0000000..cdbc7ca
--- /dev/null
+++ b/drivers/mfd/da9052-spi.c
@@ -0,0 +1,115 @@
+/*
+ * SPI access for Dialog DA9052 PMICs.
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/input.h>
+#include <linux/mfd/core.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+
+#include <linux/mfd/da9052/da9052.h>
+
+static int da9052_spi_probe(struct spi_device *spi)
+{
+ int ret;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL);
+
+ if (!da9052)
+ return -ENOMEM;
+
+ spi->mode = SPI_MODE_0 | SPI_CPOL;
+ spi->bits_per_word = 8;
+ spi_setup(spi);
+
+ da9052->dev = &spi->dev;
+ da9052->chip_irq = spi->irq;
+
+ dev_set_drvdata(&spi->dev, da9052);
+
+ da9052_regmap_config.read_flag_mask = 1;
+ da9052_regmap_config.write_flag_mask = 0;
+
+ da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config);
+ if (IS_ERR(da9052->regmap)) {
+ ret = PTR_ERR(da9052->regmap);
+ dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = da9052_device_init(da9052, id->driver_data);
+ if (ret != 0)
+ goto err;
+
+ return 0;
+
+err:
+ kfree(da9052);
+ return ret;
+}
+
+static int da9052_spi_remove(struct spi_device *spi)
+{
+ struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
+
+ da9052_device_exit(da9052);
+ kfree(da9052);
+
+ return 0;
+}
+
+static struct spi_device_id da9052_spi_id[] = {
+ {"da9052", DA9052},
+ {"da9053-aa", DA9053_AA},
+ {"da9053-ba", DA9053_BA},
+ {"da9053-bb", DA9053_BB},
+ {}
+};
+
+static struct spi_driver da9052_spi_driver = {
+ .probe = da9052_spi_probe,
+ .remove = __devexit_p(da9052_spi_remove),
+ .id_table = da9052_spi_id,
+ .driver = {
+ .name = "da9052",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_spi_init(void)
+{
+ int ret;
+
+ ret = spi_register_driver(&da9052_spi_driver);
+ if (ret != 0) {
+ pr_err("Failed to register DA9052 SPI driver, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+subsys_initcall(da9052_spi_init);
+
+static void __exit da9052_spi_exit(void)
+{
+ spi_unregister_driver(&da9052_spi_driver);
+}
+module_exit(da9052_spi_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("SPI driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index a25ab9c..af8e0ef 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2102,14 +2102,11 @@ static struct irq_chip prcmu_irq_chip = {
void __init db8500_prcmu_early_init(void)
{
unsigned int i;
-
- if (cpu_is_u8500v1()) {
- tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
- } else if (cpu_is_u8500v2()) {
+ if (cpu_is_u8500v2()) {
void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
if (tcpm_base != NULL) {
- int version;
+ u32 version;
version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
prcmu_version.project_number = version & 0xFF;
prcmu_version.api_version = (version >> 8) & 0xFF;
diff --git a/drivers/mfd/dm355evm_msp.c b/drivers/mfd/dm355evm_msp.c
index 8ad88da..7710227 100644
--- a/drivers/mfd/dm355evm_msp.c
+++ b/drivers/mfd/dm355evm_msp.c
@@ -308,8 +308,7 @@ static int add_children(struct i2c_client *client)
for (i = 0; i < ARRAY_SIZE(config_inputs); i++) {
int gpio = dm355evm_msp_gpio.base + config_inputs[i].offset;
- gpio_request(gpio, config_inputs[i].label);
- gpio_direction_input(gpio);
+ gpio_request_one(gpio, GPIOF_IN, config_inputs[i].label);
/* make it easy for userspace to see these */
gpio_export(gpio, false);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 97c2776..b76657e 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -485,17 +485,7 @@ static struct platform_driver intel_msic_driver = {
},
};
-static int __init intel_msic_init(void)
-{
- return platform_driver_register(&intel_msic_driver);
-}
-module_init(intel_msic_init);
-
-static void __exit intel_msic_exit(void)
-{
- platform_driver_unregister(&intel_msic_driver);
-}
-module_exit(intel_msic_exit);
+module_platform_driver(intel_msic_driver);
MODULE_DESCRIPTION("Driver for Intel MSIC");
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index 5c2a06a..a9223ed 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -33,7 +33,7 @@
/* Module Parameters */
static unsigned int num_modules = CMODIO_MAX_MODULES;
-static unsigned char *modules[CMODIO_MAX_MODULES] = {
+static char *modules[CMODIO_MAX_MODULES] = {
"empty", "empty", "empty", "empty",
};
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c
index ef39528..87662a1 100644
--- a/drivers/mfd/jz4740-adc.c
+++ b/drivers/mfd/jz4740-adc.c
@@ -181,7 +181,7 @@ static struct resource jz4740_battery_resources[] = {
},
};
-const struct mfd_cell jz4740_adc_cells[] = {
+static struct mfd_cell jz4740_adc_cells[] = {
{
.id = 0,
.name = "jz4740-hwmon",
@@ -338,17 +338,7 @@ static struct platform_driver jz4740_adc_driver = {
},
};
-static int __init jz4740_adc_init(void)
-{
- return platform_driver_register(&jz4740_adc_driver);
-}
-module_init(jz4740_adc_init);
-
-static void __exit jz4740_adc_exit(void)
-{
- platform_driver_unregister(&jz4740_adc_driver);
-}
-module_exit(jz4740_adc_exit);
+module_platform_driver(jz4740_adc_driver);
MODULE_DESCRIPTION("JZ4740 SoC ADC driver");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index ea1169b..abc4213 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -74,7 +74,7 @@ static struct mfd_cell tunnelcreek_cells[] = {
},
};
-static struct pci_device_id lpc_sch_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
{ 0, }
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c
index e1e59c9..ca881ef 100644
--- a/drivers/mfd/max8925-core.c
+++ b/drivers/mfd/max8925-core.c
@@ -210,21 +210,6 @@ static struct max8925_irq_data max8925_irqs[] = {
.mask_reg = MAX8925_CHG_IRQ1_MASK,
.offs = 1 << 2,
},
- [MAX8925_IRQ_VCHG_USB_OVP] = {
- .reg = MAX8925_CHG_IRQ1,
- .mask_reg = MAX8925_CHG_IRQ1_MASK,
- .offs = 1 << 3,
- },
- [MAX8925_IRQ_VCHG_USB_F] = {
- .reg = MAX8925_CHG_IRQ1,
- .mask_reg = MAX8925_CHG_IRQ1_MASK,
- .offs = 1 << 4,
- },
- [MAX8925_IRQ_VCHG_USB_R] = {
- .reg = MAX8925_CHG_IRQ1,
- .mask_reg = MAX8925_CHG_IRQ1_MASK,
- .offs = 1 << 5,
- },
[MAX8925_IRQ_VCHG_THM_OK_R] = {
.reg = MAX8925_CHG_IRQ2,
.mask_reg = MAX8925_CHG_IRQ2_MASK,
diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
index 0219115..d9e4b36 100644
--- a/drivers/mfd/max8925-i2c.c
+++ b/drivers/mfd/max8925-i2c.c
@@ -161,6 +161,8 @@ static int __devinit max8925_probe(struct i2c_client *client,
chip->adc = i2c_new_dummy(chip->i2c->adapter, ADC_I2C_ADDR);
i2c_set_clientdata(chip->adc, chip);
+ device_init_wakeup(&client->dev, 1);
+
max8925_device_init(chip, pdata);
return 0;
@@ -177,10 +179,35 @@ static int __devexit max8925_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int max8925_suspend(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct max8925_chip *chip = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(dev) && chip->wakeup_flag)
+ enable_irq_wake(chip->core_irq);
+ return 0;
+}
+
+static int max8925_resume(struct device *dev)
+{
+ struct i2c_client *client = container_of(dev, struct i2c_client, dev);
+ struct max8925_chip *chip = i2c_get_clientdata(client);
+
+ if (device_may_wakeup(dev) && chip->wakeup_flag)
+ disable_irq_wake(chip->core_irq);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(max8925_pm_ops, max8925_suspend, max8925_resume);
+
static struct i2c_driver max8925_driver = {
.driver = {
.name = "max8925",
.owner = THIS_MODULE,
+ .pm = &max8925_pm_ops,
},
.probe = max8925_probe,
.remove = __devexit_p(max8925_remove),
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index 5be53ae..cb83a7a 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -43,7 +43,8 @@ static struct mfd_cell max8997_devs[] = {
{ .name = "max8997-battery", },
{ .name = "max8997-haptic", },
{ .name = "max8997-muic", },
- { .name = "max8997-flash", },
+ { .name = "max8997-led", .id = 1 },
+ { .name = "max8997-led", .id = 2 },
};
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index de4096a..6ef56d2 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -176,6 +176,8 @@ static int max8998_i2c_probe(struct i2c_client *i2c,
if (ret < 0)
goto err;
+ device_init_wakeup(max8998->dev, max8998->wakeup);
+
return ret;
err:
@@ -210,7 +212,7 @@ static int max8998_suspend(struct device *dev)
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
- if (max8998->wakeup)
+ if (device_may_wakeup(dev))
irq_set_irq_wake(max8998->irq, 1);
return 0;
}
@@ -220,7 +222,7 @@ static int max8998_resume(struct device *dev)
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8998_dev *max8998 = i2c_get_clientdata(i2c);
- if (max8998->wakeup)
+ if (device_may_wakeup(dev))
irq_set_irq_wake(max8998->irq, 0);
/*
* In LP3974, if IRQ registers are not "read & clear"
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index e9619ac..7122386 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -18,11 +18,15 @@
#include <linux/spi/spi.h>
#include <linux/mfd/core.h>
#include <linux/mfd/mc13xxx.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
struct mc13xxx {
struct spi_device *spidev;
struct mutex lock;
int irq;
+ int flags;
irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
void *irqdata[MC13XXX_NUM_IRQ];
@@ -550,10 +554,7 @@ static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
{
- struct mc13xxx_platform_data *pdata =
- dev_get_platdata(&mc13xxx->spidev->dev);
-
- return pdata->flags;
+ return mc13xxx->flags;
}
EXPORT_SYMBOL(mc13xxx_get_flags);
@@ -615,13 +616,13 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
break;
case MC13XXX_ADC_MODE_SINGLE_CHAN:
- adc0 |= old_adc0 & MC13XXX_ADC0_TSMOD_MASK;
+ adc0 |= old_adc0 & MC13XXX_ADC0_CONFIG_MASK;
adc1 |= (channel & 0x7) << MC13XXX_ADC1_CHAN0_SHIFT;
adc1 |= MC13XXX_ADC1_RAND;
break;
case MC13XXX_ADC_MODE_MULT_CHAN:
- adc0 |= old_adc0 & MC13XXX_ADC0_TSMOD_MASK;
+ adc0 |= old_adc0 & MC13XXX_ADC0_CONFIG_MASK;
adc1 |= 4 << MC13XXX_ADC1_CHAN1_SHIFT;
break;
@@ -696,17 +697,67 @@ static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0);
}
+#ifdef CONFIG_OF
+static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
+{
+ struct device_node *np = mc13xxx->spidev->dev.of_node;
+
+ if (!np)
+ return -ENODEV;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-adc", NULL))
+ mc13xxx->flags |= MC13XXX_USE_ADC;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-codec", NULL))
+ mc13xxx->flags |= MC13XXX_USE_CODEC;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-rtc", NULL))
+ mc13xxx->flags |= MC13XXX_USE_RTC;
+
+ if (of_get_property(np, "fsl,mc13xxx-uses-touch", NULL))
+ mc13xxx->flags |= MC13XXX_USE_TOUCHSCREEN;
+
+ return 0;
+}
+#else
+static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
+{
+ return -ENODEV;
+}
+#endif
+
+static const struct spi_device_id mc13xxx_device_id[] = {
+ {
+ .name = "mc13783",
+ .driver_data = MC13XXX_ID_MC13783,
+ }, {
+ .name = "mc13892",
+ .driver_data = MC13XXX_ID_MC13892,
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
+
+static const struct of_device_id mc13xxx_dt_ids[] = {
+ { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
+ { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
+
static int mc13xxx_probe(struct spi_device *spi)
{
+ const struct of_device_id *of_id;
+ struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
struct mc13xxx *mc13xxx;
struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
enum mc13xxx_id id;
int ret;
- if (!pdata) {
- dev_err(&spi->dev, "invalid platform data\n");
- return -EINVAL;
- }
+ of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
+ if (of_id)
+ sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
if (!mc13xxx)
@@ -749,28 +800,33 @@ err_revision:
mc13xxx_unlock(mc13xxx);
- if (pdata->flags & MC13XXX_USE_ADC)
+ if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
+ mc13xxx->flags = pdata->flags;
+
+ if (mc13xxx->flags & MC13XXX_USE_ADC)
mc13xxx_add_subdevice(mc13xxx, "%s-adc");
- if (pdata->flags & MC13XXX_USE_CODEC)
+ if (mc13xxx->flags & MC13XXX_USE_CODEC)
mc13xxx_add_subdevice(mc13xxx, "%s-codec");
- mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
- &pdata->regulators, sizeof(pdata->regulators));
-
- if (pdata->flags & MC13XXX_USE_RTC)
+ if (mc13xxx->flags & MC13XXX_USE_RTC)
mc13xxx_add_subdevice(mc13xxx, "%s-rtc");
- if (pdata->flags & MC13XXX_USE_TOUCHSCREEN)
+ if (mc13xxx->flags & MC13XXX_USE_TOUCHSCREEN)
mc13xxx_add_subdevice(mc13xxx, "%s-ts");
- if (pdata->leds)
+ if (pdata) {
+ mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator",
+ &pdata->regulators, sizeof(pdata->regulators));
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led",
pdata->leds, sizeof(*pdata->leds));
-
- if (pdata->buttons)
mc13xxx_add_subdevice_pdata(mc13xxx, "%s-pwrbutton",
pdata->buttons, sizeof(*pdata->buttons));
+ } else {
+ mc13xxx_add_subdevice(mc13xxx, "%s-regulator");
+ mc13xxx_add_subdevice(mc13xxx, "%s-led");
+ mc13xxx_add_subdevice(mc13xxx, "%s-pwrbutton");
+ }
return 0;
}
@@ -788,25 +844,12 @@ static int __devexit mc13xxx_remove(struct spi_device *spi)
return 0;
}
-static const struct spi_device_id mc13xxx_device_id[] = {
- {
- .name = "mc13783",
- .driver_data = MC13XXX_ID_MC13783,
- }, {
- .name = "mc13892",
- .driver_data = MC13XXX_ID_MC13892,
- }, {
- /* sentinel */
- }
-};
-MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
-
static struct spi_driver mc13xxx_driver = {
.id_table = mc13xxx_device_id,
.driver = {
.name = "mc13xxx",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
+ .of_match_table = mc13xxx_dt_ids,
},
.probe = mc13xxx_probe,
.remove = __devexit_p(mc13xxx_remove),
diff --git a/drivers/mfd/mcp-core.c b/drivers/mfd/mcp-core.c
index 84815f9..86cc3f7 100644
--- a/drivers/mfd/mcp-core.c
+++ b/drivers/mfd/mcp-core.c
@@ -93,9 +93,11 @@ static struct bus_type mcp_bus_type = {
*/
void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div)
{
- spin_lock_irq(&mcp->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcp->lock, flags);
mcp->ops->set_telecom_divisor(mcp, div);
- spin_unlock_irq(&mcp->lock);
+ spin_unlock_irqrestore(&mcp->lock, flags);
}
EXPORT_SYMBOL(mcp_set_telecom_divisor);
@@ -108,9 +110,11 @@ EXPORT_SYMBOL(mcp_set_telecom_divisor);
*/
void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div)
{
- spin_lock_irq(&mcp->lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mcp->lock, flags);
mcp->ops->set_audio_divisor(mcp, div);
- spin_unlock_irq(&mcp->lock);
+ spin_unlock_irqrestore(&mcp->lock, flags);
}
EXPORT_SYMBOL(mcp_set_audio_divisor);
@@ -163,10 +167,11 @@ EXPORT_SYMBOL(mcp_reg_read);
*/
void mcp_enable(struct mcp *mcp)
{
- spin_lock_irq(&mcp->lock);
+ unsigned long flags;
+ spin_lock_irqsave(&mcp->lock, flags);
if (mcp->use_count++ == 0)
mcp->ops->enable(mcp);
- spin_unlock_irq(&mcp->lock);
+ spin_unlock_irqrestore(&mcp->lock, flags);
}
EXPORT_SYMBOL(mcp_enable);
diff --git a/drivers/mfd/mcp-sa11x0.c b/drivers/mfd/mcp-sa11x0.c
index 2dab02d..02c53a0 100644
--- a/drivers/mfd/mcp-sa11x0.c
+++ b/drivers/mfd/mcp-sa11x0.c
@@ -257,18 +257,7 @@ static struct platform_driver mcp_sa11x0_driver = {
/*
* This needs re-working
*/
-static int __init mcp_sa11x0_init(void)
-{
- return platform_driver_register(&mcp_sa11x0_driver);
-}
-
-static void __exit mcp_sa11x0_exit(void)
-{
- platform_driver_unregister(&mcp_sa11x0_driver);
-}
-
-module_init(mcp_sa11x0_init);
-module_exit(mcp_sa11x0_exit);
+module_platform_driver(mcp_sa11x0_driver);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("SA11x0 multimedia communications port driver");
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
index 0f59228..411f523 100644
--- a/drivers/mfd/mfd-core.c
+++ b/drivers/mfd/mfd-core.c
@@ -123,7 +123,7 @@ static int mfd_add_device(struct device *parent, int id,
}
if (!cell->ignore_resource_conflicts) {
- ret = acpi_check_resource_conflict(res);
+ ret = acpi_check_resource_conflict(&res[r]);
if (ret)
goto fail_res;
}
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 86e1458..68ac2c5 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -27,8 +27,9 @@
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <plat/usb.h>
+#include <linux/pm_runtime.h>
-#define USBHS_DRIVER_NAME "usbhs-omap"
+#define USBHS_DRIVER_NAME "usbhs_omap"
#define OMAP_EHCI_DEVICE "ehci-omap"
#define OMAP_OHCI_DEVICE "ohci-omap3"
@@ -147,9 +148,6 @@
struct usbhs_hcd_omap {
- struct clk *usbhost_ick;
- struct clk *usbhost_hs_fck;
- struct clk *usbhost_fs_fck;
struct clk *xclk60mhsp1_ck;
struct clk *xclk60mhsp2_ck;
struct clk *utmi_p1_fck;
@@ -159,8 +157,7 @@ struct usbhs_hcd_omap {
struct clk *usbhost_p2_fck;
struct clk *usbtll_p2_fck;
struct clk *init_60m_fclk;
- struct clk *usbtll_fck;
- struct clk *usbtll_ick;
+ struct clk *ehci_logic_fck;
void __iomem *uhh_base;
void __iomem *tll_base;
@@ -169,7 +166,6 @@ struct usbhs_hcd_omap {
u32 usbhs_rev;
spinlock_t lock;
- int count;
};
/*-------------------------------------------------------------------------*/
@@ -319,269 +315,6 @@ err_end:
return ret;
}
-/**
- * usbhs_omap_probe - initialize TI-based HCDs
- *
- * Allocates basic resources for this USB host controller.
- */
-static int __devinit usbhs_omap_probe(struct platform_device *pdev)
-{
- struct device *dev = &pdev->dev;
- struct usbhs_omap_platform_data *pdata = dev->platform_data;
- struct usbhs_hcd_omap *omap;
- struct resource *res;
- int ret = 0;
- int i;
-
- if (!pdata) {
- dev_err(dev, "Missing platform data\n");
- ret = -ENOMEM;
- goto end_probe;
- }
-
- omap = kzalloc(sizeof(*omap), GFP_KERNEL);
- if (!omap) {
- dev_err(dev, "Memory allocation failed\n");
- ret = -ENOMEM;
- goto end_probe;
- }
-
- spin_lock_init(&omap->lock);
-
- for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
- omap->platdata.port_mode[i] = pdata->port_mode[i];
-
- omap->platdata.ehci_data = pdata->ehci_data;
- omap->platdata.ohci_data = pdata->ohci_data;
-
- omap->usbhost_ick = clk_get(dev, "usbhost_ick");
- if (IS_ERR(omap->usbhost_ick)) {
- ret = PTR_ERR(omap->usbhost_ick);
- dev_err(dev, "usbhost_ick failed error:%d\n", ret);
- goto err_end;
- }
-
- omap->usbhost_hs_fck = clk_get(dev, "hs_fck");
- if (IS_ERR(omap->usbhost_hs_fck)) {
- ret = PTR_ERR(omap->usbhost_hs_fck);
- dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret);
- goto err_usbhost_ick;
- }
-
- omap->usbhost_fs_fck = clk_get(dev, "fs_fck");
- if (IS_ERR(omap->usbhost_fs_fck)) {
- ret = PTR_ERR(omap->usbhost_fs_fck);
- dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret);
- goto err_usbhost_hs_fck;
- }
-
- omap->usbtll_fck = clk_get(dev, "usbtll_fck");
- if (IS_ERR(omap->usbtll_fck)) {
- ret = PTR_ERR(omap->usbtll_fck);
- dev_err(dev, "usbtll_fck failed error:%d\n", ret);
- goto err_usbhost_fs_fck;
- }
-
- omap->usbtll_ick = clk_get(dev, "usbtll_ick");
- if (IS_ERR(omap->usbtll_ick)) {
- ret = PTR_ERR(omap->usbtll_ick);
- dev_err(dev, "usbtll_ick failed error:%d\n", ret);
- goto err_usbtll_fck;
- }
-
- omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
- if (IS_ERR(omap->utmi_p1_fck)) {
- ret = PTR_ERR(omap->utmi_p1_fck);
- dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
- goto err_usbtll_ick;
- }
-
- omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
- if (IS_ERR(omap->xclk60mhsp1_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp1_ck);
- dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
- goto err_utmi_p1_fck;
- }
-
- omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
- if (IS_ERR(omap->utmi_p2_fck)) {
- ret = PTR_ERR(omap->utmi_p2_fck);
- dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
- goto err_xclk60mhsp1_ck;
- }
-
- omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
- if (IS_ERR(omap->xclk60mhsp2_ck)) {
- ret = PTR_ERR(omap->xclk60mhsp2_ck);
- dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
- goto err_utmi_p2_fck;
- }
-
- omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
- if (IS_ERR(omap->usbhost_p1_fck)) {
- ret = PTR_ERR(omap->usbhost_p1_fck);
- dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
- goto err_xclk60mhsp2_ck;
- }
-
- omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
- if (IS_ERR(omap->usbtll_p1_fck)) {
- ret = PTR_ERR(omap->usbtll_p1_fck);
- dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
- goto err_usbhost_p1_fck;
- }
-
- omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
- if (IS_ERR(omap->usbhost_p2_fck)) {
- ret = PTR_ERR(omap->usbhost_p2_fck);
- dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
- goto err_usbtll_p1_fck;
- }
-
- omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
- if (IS_ERR(omap->usbtll_p2_fck)) {
- ret = PTR_ERR(omap->usbtll_p2_fck);
- dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
- goto err_usbhost_p2_fck;
- }
-
- omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
- if (IS_ERR(omap->init_60m_fclk)) {
- ret = PTR_ERR(omap->init_60m_fclk);
- dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
- goto err_usbtll_p2_fck;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_init_60m_fclk;
- }
-
- omap->uhh_base = ioremap(res->start, resource_size(res));
- if (!omap->uhh_base) {
- dev_err(dev, "UHH ioremap failed\n");
- ret = -ENOMEM;
- goto err_init_60m_fclk;
- }
-
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
- if (!res) {
- dev_err(dev, "UHH EHCI get resource failed\n");
- ret = -ENODEV;
- goto err_tll;
- }
-
- omap->tll_base = ioremap(res->start, resource_size(res));
- if (!omap->tll_base) {
- dev_err(dev, "TLL ioremap failed\n");
- ret = -ENOMEM;
- goto err_tll;
- }
-
- platform_set_drvdata(pdev, omap);
-
- ret = omap_usbhs_alloc_children(pdev);
- if (ret) {
- dev_err(dev, "omap_usbhs_alloc_children failed\n");
- goto err_alloc;
- }
-
- goto end_probe;
-
-err_alloc:
- iounmap(omap->tll_base);
-
-err_tll:
- iounmap(omap->uhh_base);
-
-err_init_60m_fclk:
- clk_put(omap->init_60m_fclk);
-
-err_usbtll_p2_fck:
- clk_put(omap->usbtll_p2_fck);
-
-err_usbhost_p2_fck:
- clk_put(omap->usbhost_p2_fck);
-
-err_usbtll_p1_fck:
- clk_put(omap->usbtll_p1_fck);
-
-err_usbhost_p1_fck:
- clk_put(omap->usbhost_p1_fck);
-
-err_xclk60mhsp2_ck:
- clk_put(omap->xclk60mhsp2_ck);
-
-err_utmi_p2_fck:
- clk_put(omap->utmi_p2_fck);
-
-err_xclk60mhsp1_ck:
- clk_put(omap->xclk60mhsp1_ck);
-
-err_utmi_p1_fck:
- clk_put(omap->utmi_p1_fck);
-
-err_usbtll_ick:
- clk_put(omap->usbtll_ick);
-
-err_usbtll_fck:
- clk_put(omap->usbtll_fck);
-
-err_usbhost_fs_fck:
- clk_put(omap->usbhost_fs_fck);
-
-err_usbhost_hs_fck:
- clk_put(omap->usbhost_hs_fck);
-
-err_usbhost_ick:
- clk_put(omap->usbhost_ick);
-
-err_end:
- kfree(omap);
-
-end_probe:
- return ret;
-}
-
-/**
- * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
- * @pdev: USB Host Controller being removed
- *
- * Reverses the effect of usbhs_omap_probe().
- */
-static int __devexit usbhs_omap_remove(struct platform_device *pdev)
-{
- struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
-
- if (omap->count != 0) {
- dev_err(&pdev->dev,
- "Either EHCI or OHCI is still using usbhs core\n");
- return -EBUSY;
- }
-
- iounmap(omap->tll_base);
- iounmap(omap->uhh_base);
- clk_put(omap->init_60m_fclk);
- clk_put(omap->usbtll_p2_fck);
- clk_put(omap->usbhost_p2_fck);
- clk_put(omap->usbtll_p1_fck);
- clk_put(omap->usbhost_p1_fck);
- clk_put(omap->xclk60mhsp2_ck);
- clk_put(omap->utmi_p2_fck);
- clk_put(omap->xclk60mhsp1_ck);
- clk_put(omap->utmi_p1_fck);
- clk_put(omap->usbtll_ick);
- clk_put(omap->usbtll_fck);
- clk_put(omap->usbhost_fs_fck);
- clk_put(omap->usbhost_hs_fck);
- clk_put(omap->usbhost_ick);
- kfree(omap);
-
- return 0;
-}
-
static bool is_ohci_port(enum usbhs_omap_port_mode pmode)
{
switch (pmode) {
@@ -689,96 +422,101 @@ static void usbhs_omap_tll_init(struct device *dev, u8 tll_channel_count)
}
}
-static int usbhs_enable(struct device *dev)
+static int usbhs_runtime_resume(struct device *dev)
{
struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- int ret = 0;
- unsigned long timeout;
- unsigned reg;
+ unsigned long flags;
+
+ dev_dbg(dev, "usbhs_runtime_resume\n");
- dev_dbg(dev, "starting TI HSUSB Controller\n");
if (!pdata) {
dev_dbg(dev, "missing platform_data\n");
return -ENODEV;
}
spin_lock_irqsave(&omap->lock, flags);
- if (omap->count > 0)
- goto end_count;
- clk_enable(omap->usbhost_ick);
- clk_enable(omap->usbhost_hs_fck);
- clk_enable(omap->usbhost_fs_fck);
- clk_enable(omap->usbtll_fck);
- clk_enable(omap->usbtll_ick);
+ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ clk_enable(omap->ehci_logic_fck);
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
- gpio_request(pdata->ehci_data->reset_gpio_port[0],
- "USB1 PHY reset");
- gpio_direction_output
- (pdata->ehci_data->reset_gpio_port[0], 0);
- }
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_enable(omap->usbhost_p1_fck);
+ clk_enable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_enable(omap->usbhost_p2_fck);
+ clk_enable(omap->usbtll_p2_fck);
+ }
+ clk_enable(omap->utmi_p1_fck);
+ clk_enable(omap->utmi_p2_fck);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
- gpio_request(pdata->ehci_data->reset_gpio_port[1],
- "USB2 PHY reset");
- gpio_direction_output
- (pdata->ehci_data->reset_gpio_port[1], 0);
- }
+ spin_unlock_irqrestore(&omap->lock, flags);
- /* Hold the PHY in RESET for enough time till DIR is high */
- udelay(10);
- }
+ return 0;
+}
- omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
- dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
+static int usbhs_runtime_suspend(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags;
- /* perform TLL soft reset, and wait until reset is complete */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_SOFTRESET);
+ dev_dbg(dev, "usbhs_runtime_suspend\n");
- /* Wait for TLL reset to complete */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
- cpu_relax();
+ if (!pdata) {
+ dev_dbg(dev, "missing platform_data\n");
+ return -ENODEV;
+ }
- if (time_after(jiffies, timeout)) {
- dev_dbg(dev, "operation timed out\n");
- ret = -EINVAL;
- goto err_tll;
- }
+ spin_lock_irqsave(&omap->lock, flags);
+
+ if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ clk_disable(omap->usbhost_p1_fck);
+ clk_disable(omap->usbtll_p1_fck);
+ }
+ if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ clk_disable(omap->usbhost_p2_fck);
+ clk_disable(omap->usbtll_p2_fck);
}
+ clk_disable(omap->utmi_p2_fck);
+ clk_disable(omap->utmi_p1_fck);
- dev_dbg(dev, "TLL RESET DONE\n");
+ if (omap->ehci_logic_fck && !IS_ERR(omap->ehci_logic_fck))
+ clk_disable(omap->ehci_logic_fck);
- /* (1<<3) = no idle mode only for initial debugging */
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
- OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
- OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
- OMAP_USBTLL_SYSCONFIG_AUTOIDLE);
+ spin_unlock_irqrestore(&omap->lock, flags);
- /* Put UHH in NoIdle/NoStandby mode */
- reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
- if (is_omap_usbhs_rev1(omap)) {
- reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
- | OMAP_UHH_SYSCONFIG_SIDLEMODE
- | OMAP_UHH_SYSCONFIG_CACTIVITY
- | OMAP_UHH_SYSCONFIG_MIDLEMODE);
- reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
+ return 0;
+}
+
+static void omap_usbhs_init(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
+ unsigned long flags;
+ unsigned reg;
+ dev_dbg(dev, "starting TI HSUSB Controller\n");
- } else if (is_omap_usbhs_rev2(omap)) {
- reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
- reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
- reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
+ pm_runtime_get_sync(dev);
+ spin_lock_irqsave(&omap->lock, flags);
+
+ if (pdata->ehci_data->phy_reset) {
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[0],
+ GPIOF_OUT_INIT_LOW, "USB1 PHY reset");
+
+ if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
+ gpio_request_one(pdata->ehci_data->reset_gpio_port[1],
+ GPIOF_OUT_INIT_LOW, "USB2 PHY reset");
+
+ /* Hold the PHY in RESET for enough time till DIR is high */
+ udelay(10);
}
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);
+ omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
+ dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);
reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
/* setup ULPI bypass and burst configurations */
@@ -825,49 +563,6 @@ static int usbhs_enable(struct device *dev)
reg &= ~OMAP4_P1_MODE_CLEAR;
reg &= ~OMAP4_P2_MODE_CLEAR;
- if (is_ehci_phy_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->xclk60mhsp1_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
- ret = clk_set_parent(omap->utmi_p1_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p1_fck);
- clk_enable(omap->usbtll_p1_fck);
- }
-
- if (is_ehci_phy_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->xclk60mhsp2_ck);
- if (ret != 0) {
- dev_err(dev, "xclk60mhsp1_ck set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
- ret = clk_set_parent(omap->utmi_p2_fck,
- omap->init_60m_fclk);
- if (ret != 0) {
- dev_err(dev, "init_60m_fclk set parent"
- "failed error:%d\n", ret);
- goto err_tll;
- }
- clk_enable(omap->usbhost_p2_fck);
- clk_enable(omap->usbtll_p2_fck);
- }
-
- clk_enable(omap->utmi_p1_fck);
- clk_enable(omap->utmi_p2_fck);
-
if (is_ehci_tll_mode(pdata->port_mode[0]) ||
(is_ohci_port(pdata->port_mode[0])))
reg |= OMAP4_P1_MODE_TLL;
@@ -913,12 +608,15 @@ static int usbhs_enable(struct device *dev)
(pdata->ehci_data->reset_gpio_port[1], 1);
}
-end_count:
- omap->count++;
spin_unlock_irqrestore(&omap->lock, flags);
- return 0;
+ pm_runtime_put_sync(dev);
+}
+
+static void omap_usbhs_deinit(struct device *dev)
+{
+ struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
+ struct usbhs_omap_platform_data *pdata = &omap->platdata;
-err_tll:
if (pdata->ehci_data->phy_reset) {
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
gpio_free(pdata->ehci_data->reset_gpio_port[0]);
@@ -926,123 +624,272 @@ err_tll:
if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
gpio_free(pdata->ehci_data->reset_gpio_port[1]);
}
-
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
- spin_unlock_irqrestore(&omap->lock, flags);
- return ret;
}
-static void usbhs_disable(struct device *dev)
+
+/**
+ * usbhs_omap_probe - initialize TI-based HCDs
+ *
+ * Allocates basic resources for this USB host controller.
+ */
+static int __devinit usbhs_omap_probe(struct platform_device *pdev)
{
- struct usbhs_hcd_omap *omap = dev_get_drvdata(dev);
- struct usbhs_omap_platform_data *pdata = &omap->platdata;
- unsigned long flags = 0;
- unsigned long timeout;
+ struct device *dev = &pdev->dev;
+ struct usbhs_omap_platform_data *pdata = dev->platform_data;
+ struct usbhs_hcd_omap *omap;
+ struct resource *res;
+ int ret = 0;
+ int i;
- dev_dbg(dev, "stopping TI HSUSB Controller\n");
+ if (!pdata) {
+ dev_err(dev, "Missing platform data\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
- spin_lock_irqsave(&omap->lock, flags);
+ omap = kzalloc(sizeof(*omap), GFP_KERNEL);
+ if (!omap) {
+ dev_err(dev, "Memory allocation failed\n");
+ ret = -ENOMEM;
+ goto end_probe;
+ }
- if (omap->count == 0)
- goto end_disble;
+ spin_lock_init(&omap->lock);
- omap->count--;
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+ omap->platdata.port_mode[i] = pdata->port_mode[i];
- if (omap->count != 0)
- goto end_disble;
+ omap->platdata.ehci_data = pdata->ehci_data;
+ omap->platdata.ohci_data = pdata->ohci_data;
+
+ pm_runtime_enable(dev);
- /* Reset OMAP modules for insmod/rmmod to work */
- usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG,
- is_omap_usbhs_rev2(omap) ?
- OMAP4_UHH_SYSCONFIG_SOFTRESET :
- OMAP_UHH_SYSCONFIG_SOFTRESET);
- timeout = jiffies + msecs_to_jiffies(100);
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
+ for (i = 0; i < OMAP3_HS_USB_PORTS; i++)
+ if (is_ehci_phy_mode(i) || is_ehci_tll_mode(i) ||
+ is_ehci_hsic_mode(i)) {
+ omap->ehci_logic_fck = clk_get(dev, "ehci_logic_fck");
+ if (IS_ERR(omap->ehci_logic_fck)) {
+ ret = PTR_ERR(omap->ehci_logic_fck);
+ dev_warn(dev, "ehci_logic_fck failed:%d\n",
+ ret);
+ }
+ break;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
+ if (IS_ERR(omap->utmi_p1_fck)) {
+ ret = PTR_ERR(omap->utmi_p1_fck);
+ dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
+ goto err_end;
}
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 1))) {
- cpu_relax();
+ omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
+ if (IS_ERR(omap->xclk60mhsp1_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp1_ck);
+ dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
+ goto err_utmi_p1_fck;
+ }
+
+ omap->utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
+ if (IS_ERR(omap->utmi_p2_fck)) {
+ ret = PTR_ERR(omap->utmi_p2_fck);
+ dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
+ goto err_xclk60mhsp1_ck;
+ }
+
+ omap->xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
+ if (IS_ERR(omap->xclk60mhsp2_ck)) {
+ ret = PTR_ERR(omap->xclk60mhsp2_ck);
+ dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
+ goto err_utmi_p2_fck;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
+ if (IS_ERR(omap->usbhost_p1_fck)) {
+ ret = PTR_ERR(omap->usbhost_p1_fck);
+ dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
+ goto err_xclk60mhsp2_ck;
+ }
+
+ omap->usbtll_p1_fck = clk_get(dev, "usb_tll_hs_usb_ch0_clk");
+ if (IS_ERR(omap->usbtll_p1_fck)) {
+ ret = PTR_ERR(omap->usbtll_p1_fck);
+ dev_err(dev, "usbtll_p1_fck failed error:%d\n", ret);
+ goto err_usbhost_p1_fck;
+ }
+
+ omap->usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
+ if (IS_ERR(omap->usbhost_p2_fck)) {
+ ret = PTR_ERR(omap->usbhost_p2_fck);
+ dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
+ goto err_usbtll_p1_fck;
}
- while (!(usbhs_read(omap->uhh_base, OMAP_UHH_SYSSTATUS)
- & (1 << 2))) {
- cpu_relax();
+ omap->usbtll_p2_fck = clk_get(dev, "usb_tll_hs_usb_ch1_clk");
+ if (IS_ERR(omap->usbtll_p2_fck)) {
+ ret = PTR_ERR(omap->usbtll_p2_fck);
+ dev_err(dev, "usbtll_p2_fck failed error:%d\n", ret);
+ goto err_usbhost_p2_fck;
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ omap->init_60m_fclk = clk_get(dev, "init_60m_fclk");
+ if (IS_ERR(omap->init_60m_fclk)) {
+ ret = PTR_ERR(omap->init_60m_fclk);
+ dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
+ goto err_usbtll_p2_fck;
}
- usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, (1 << 1));
+ if (is_ehci_phy_mode(pdata->port_mode[0])) {
+ /* for OMAP3 , the clk set paretn fails */
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->xclk60mhsp1_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp1_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[0])) {
+ ret = clk_set_parent(omap->utmi_p1_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
- while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
- & (1 << 0))) {
- cpu_relax();
+ if (is_ehci_phy_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->xclk60mhsp2_ck);
+ if (ret != 0)
+ dev_err(dev, "xclk60mhsp2_ck set parent"
+ "failed error:%d\n", ret);
+ } else if (is_ehci_tll_mode(pdata->port_mode[1])) {
+ ret = clk_set_parent(omap->utmi_p2_fck,
+ omap->init_60m_fclk);
+ if (ret != 0)
+ dev_err(dev, "init_60m_fclk set parent"
+ "failed error:%d\n", ret);
+ }
- if (time_after(jiffies, timeout))
- dev_dbg(dev, "operation timed out\n");
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "uhh");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_init_60m_fclk;
}
- if (is_omap_usbhs_rev2(omap)) {
- if (is_ehci_tll_mode(pdata->port_mode[0]))
- clk_disable(omap->usbtll_p1_fck);
- if (is_ehci_tll_mode(pdata->port_mode[1]))
- clk_disable(omap->usbtll_p2_fck);
- clk_disable(omap->utmi_p2_fck);
- clk_disable(omap->utmi_p1_fck);
+ omap->uhh_base = ioremap(res->start, resource_size(res));
+ if (!omap->uhh_base) {
+ dev_err(dev, "UHH ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_init_60m_fclk;
}
- clk_disable(omap->usbtll_ick);
- clk_disable(omap->usbtll_fck);
- clk_disable(omap->usbhost_fs_fck);
- clk_disable(omap->usbhost_hs_fck);
- clk_disable(omap->usbhost_ick);
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tll");
+ if (!res) {
+ dev_err(dev, "UHH EHCI get resource failed\n");
+ ret = -ENODEV;
+ goto err_tll;
+ }
- /* The gpio_free migh sleep; so unlock the spinlock */
- spin_unlock_irqrestore(&omap->lock, flags);
+ omap->tll_base = ioremap(res->start, resource_size(res));
+ if (!omap->tll_base) {
+ dev_err(dev, "TLL ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_tll;
+ }
- if (pdata->ehci_data->phy_reset) {
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
- gpio_free(pdata->ehci_data->reset_gpio_port[0]);
+ platform_set_drvdata(pdev, omap);
- if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
- gpio_free(pdata->ehci_data->reset_gpio_port[1]);
+ ret = omap_usbhs_alloc_children(pdev);
+ if (ret) {
+ dev_err(dev, "omap_usbhs_alloc_children failed\n");
+ goto err_alloc;
}
- return;
-end_disble:
- spin_unlock_irqrestore(&omap->lock, flags);
-}
+ omap_usbhs_init(dev);
-int omap_usbhs_enable(struct device *dev)
-{
- return usbhs_enable(dev->parent);
+ goto end_probe;
+
+err_alloc:
+ iounmap(omap->tll_base);
+
+err_tll:
+ iounmap(omap->uhh_base);
+
+err_init_60m_fclk:
+ clk_put(omap->init_60m_fclk);
+
+err_usbtll_p2_fck:
+ clk_put(omap->usbtll_p2_fck);
+
+err_usbhost_p2_fck:
+ clk_put(omap->usbhost_p2_fck);
+
+err_usbtll_p1_fck:
+ clk_put(omap->usbtll_p1_fck);
+
+err_usbhost_p1_fck:
+ clk_put(omap->usbhost_p1_fck);
+
+err_xclk60mhsp2_ck:
+ clk_put(omap->xclk60mhsp2_ck);
+
+err_utmi_p2_fck:
+ clk_put(omap->utmi_p2_fck);
+
+err_xclk60mhsp1_ck:
+ clk_put(omap->xclk60mhsp1_ck);
+
+err_utmi_p1_fck:
+ clk_put(omap->utmi_p1_fck);
+
+err_end:
+ clk_put(omap->ehci_logic_fck);
+ pm_runtime_disable(dev);
+ kfree(omap);
+
+end_probe:
+ return ret;
}
-EXPORT_SYMBOL_GPL(omap_usbhs_enable);
-void omap_usbhs_disable(struct device *dev)
+/**
+ * usbhs_omap_remove - shutdown processing for UHH & TLL HCDs
+ * @pdev: USB Host Controller being removed
+ *
+ * Reverses the effect of usbhs_omap_probe().
+ */
+static int __devexit usbhs_omap_remove(struct platform_device *pdev)
{
- usbhs_disable(dev->parent);
+ struct usbhs_hcd_omap *omap = platform_get_drvdata(pdev);
+
+ omap_usbhs_deinit(&pdev->dev);
+ iounmap(omap->tll_base);
+ iounmap(omap->uhh_base);
+ clk_put(omap->init_60m_fclk);
+ clk_put(omap->usbtll_p2_fck);
+ clk_put(omap->usbhost_p2_fck);
+ clk_put(omap->usbtll_p1_fck);
+ clk_put(omap->usbhost_p1_fck);
+ clk_put(omap->xclk60mhsp2_ck);
+ clk_put(omap->utmi_p2_fck);
+ clk_put(omap->xclk60mhsp1_ck);
+ clk_put(omap->utmi_p1_fck);
+ clk_put(omap->ehci_logic_fck);
+ pm_runtime_disable(&pdev->dev);
+ kfree(omap);
+
+ return 0;
}
-EXPORT_SYMBOL_GPL(omap_usbhs_disable);
+
+static const struct dev_pm_ops usbhsomap_dev_pm_ops = {
+ .runtime_suspend = usbhs_runtime_suspend,
+ .runtime_resume = usbhs_runtime_resume,
+};
static struct platform_driver usbhs_omap_driver = {
.driver = {
.name = (char *)usbhs_driver_name,
.owner = THIS_MODULE,
+ .pm = &usbhsomap_dev_pm_ops,
},
.remove = __exit_p(usbhs_omap_remove),
};
diff --git a/drivers/mfd/pcf50633-adc.c b/drivers/mfd/pcf50633-adc.c
index aed0d2a..3927c17 100644
--- a/drivers/mfd/pcf50633-adc.c
+++ b/drivers/mfd/pcf50633-adc.c
@@ -249,17 +249,7 @@ static struct platform_driver pcf50633_adc_driver = {
.remove = __devexit_p(pcf50633_adc_remove),
};
-static int __init pcf50633_adc_init(void)
-{
- return platform_driver_register(&pcf50633_adc_driver);
-}
-module_init(pcf50633_adc_init);
-
-static void __exit pcf50633_adc_exit(void)
-{
- platform_driver_unregister(&pcf50633_adc_driver);
-}
-module_exit(pcf50633_adc_exit);
+module_platform_driver(pcf50633_adc_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 adc driver");
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
new file mode 100644
index 0000000..caadabe
--- /dev/null
+++ b/drivers/mfd/s5m-core.c
@@ -0,0 +1,176 @@
+/*
+ * s5m87xx.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
+#include <linux/mutex.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/s5m87xx/s5m-core.h>
+#include <linux/mfd/s5m87xx/s5m-pmic.h>
+#include <linux/mfd/s5m87xx/s5m-rtc.h>
+#include <linux/regmap.h>
+
+static struct mfd_cell s5m87xx_devs[] = {
+ {
+ .name = "s5m8767-pmic",
+ }, {
+ .name = "s5m-rtc",
+ },
+};
+
+int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest)
+{
+ return regmap_read(s5m87xx->regmap, reg, dest);
+}
+EXPORT_SYMBOL_GPL(s5m_reg_read);
+
+int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
+{
+ return regmap_bulk_read(s5m87xx->regmap, reg, buf, count);;
+}
+EXPORT_SYMBOL_GPL(s5m_bulk_read);
+
+int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value)
+{
+ return regmap_write(s5m87xx->regmap, reg, value);
+}
+EXPORT_SYMBOL_GPL(s5m_reg_write);
+
+int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
+{
+ return regmap_raw_write(s5m87xx->regmap, reg, buf, count * sizeof(u16));
+}
+EXPORT_SYMBOL_GPL(s5m_bulk_write);
+
+int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask)
+{
+ return regmap_update_bits(s5m87xx->regmap, reg, mask, val);
+}
+EXPORT_SYMBOL_GPL(s5m_reg_update);
+
+static struct regmap_config s5m_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static int s5m87xx_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct s5m_platform_data *pdata = i2c->dev.platform_data;
+ struct s5m87xx_dev *s5m87xx;
+ int ret = 0;
+ int error;
+
+ s5m87xx = kzalloc(sizeof(struct s5m87xx_dev), GFP_KERNEL);
+ if (s5m87xx == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(i2c, s5m87xx);
+ s5m87xx->dev = &i2c->dev;
+ s5m87xx->i2c = i2c;
+ s5m87xx->irq = i2c->irq;
+ s5m87xx->type = id->driver_data;
+
+ if (pdata) {
+ s5m87xx->device_type = pdata->device_type;
+ s5m87xx->ono = pdata->ono;
+ s5m87xx->irq_base = pdata->irq_base;
+ s5m87xx->wakeup = pdata->wakeup;
+ }
+
+ s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
+ if (IS_ERR(s5m87xx->regmap)) {
+ error = PTR_ERR(s5m87xx->regmap);
+ dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
+ error);
+ goto err;
+ }
+
+ s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
+ i2c_set_clientdata(s5m87xx->rtc, s5m87xx);
+
+ if (pdata && pdata->cfg_pmic_irq)
+ pdata->cfg_pmic_irq();
+
+ s5m_irq_init(s5m87xx);
+
+ pm_runtime_set_active(s5m87xx->dev);
+
+ ret = mfd_add_devices(s5m87xx->dev, -1,
+ s5m87xx_devs, ARRAY_SIZE(s5m87xx_devs),
+ NULL, 0);
+
+ if (ret < 0)
+ goto err;
+
+ return ret;
+
+err:
+ mfd_remove_devices(s5m87xx->dev);
+ s5m_irq_exit(s5m87xx);
+ i2c_unregister_device(s5m87xx->rtc);
+ regmap_exit(s5m87xx->regmap);
+ kfree(s5m87xx);
+ return ret;
+}
+
+static int s5m87xx_i2c_remove(struct i2c_client *i2c)
+{
+ struct s5m87xx_dev *s5m87xx = i2c_get_clientdata(i2c);
+
+ mfd_remove_devices(s5m87xx->dev);
+ s5m_irq_exit(s5m87xx);
+ i2c_unregister_device(s5m87xx->rtc);
+ regmap_exit(s5m87xx->regmap);
+ kfree(s5m87xx);
+ return 0;
+}
+
+static const struct i2c_device_id s5m87xx_i2c_id[] = {
+ { "s5m87xx", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, s5m87xx_i2c_id);
+
+static struct i2c_driver s5m87xx_i2c_driver = {
+ .driver = {
+ .name = "s5m87xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = s5m87xx_i2c_probe,
+ .remove = s5m87xx_i2c_remove,
+ .id_table = s5m87xx_i2c_id,
+};
+
+static int __init s5m87xx_i2c_init(void)
+{
+ return i2c_add_driver(&s5m87xx_i2c_driver);
+}
+
+subsys_initcall(s5m87xx_i2c_init);
+
+static void __exit s5m87xx_i2c_exit(void)
+{
+ i2c_del_driver(&s5m87xx_i2c_driver);
+}
+module_exit(s5m87xx_i2c_exit);
+
+MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
+MODULE_DESCRIPTION("Core support for the S5M MFD");
+MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-irq.c b/drivers/mfd/s5m-irq.c
new file mode 100644
index 0000000..de76dfb
--- /dev/null
+++ b/drivers/mfd/s5m-irq.c
@@ -0,0 +1,487 @@
+/*
+ * s5m-irq.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd
+ * http://www.samsung.com
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mfd/s5m87xx/s5m-core.h>
+
+struct s5m_irq_data {
+ int reg;
+ int mask;
+};
+
+static struct s5m_irq_data s5m8767_irqs[] = {
+ [S5M8767_IRQ_PWRR] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_PWRR_MASK,
+ },
+ [S5M8767_IRQ_PWRF] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_PWRF_MASK,
+ },
+ [S5M8767_IRQ_PWR1S] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_PWR1S_MASK,
+ },
+ [S5M8767_IRQ_JIGR] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_JIGR_MASK,
+ },
+ [S5M8767_IRQ_JIGF] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_JIGF_MASK,
+ },
+ [S5M8767_IRQ_LOWBAT2] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_LOWBAT2_MASK,
+ },
+ [S5M8767_IRQ_LOWBAT1] = {
+ .reg = 1,
+ .mask = S5M8767_IRQ_LOWBAT1_MASK,
+ },
+ [S5M8767_IRQ_MRB] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_MRB_MASK,
+ },
+ [S5M8767_IRQ_DVSOK2] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_DVSOK2_MASK,
+ },
+ [S5M8767_IRQ_DVSOK3] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_DVSOK3_MASK,
+ },
+ [S5M8767_IRQ_DVSOK4] = {
+ .reg = 2,
+ .mask = S5M8767_IRQ_DVSOK4_MASK,
+ },
+ [S5M8767_IRQ_RTC60S] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTC60S_MASK,
+ },
+ [S5M8767_IRQ_RTCA1] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTCA1_MASK,
+ },
+ [S5M8767_IRQ_RTCA2] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTCA2_MASK,
+ },
+ [S5M8767_IRQ_SMPL] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_SMPL_MASK,
+ },
+ [S5M8767_IRQ_RTC1S] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_RTC1S_MASK,
+ },
+ [S5M8767_IRQ_WTSR] = {
+ .reg = 3,
+ .mask = S5M8767_IRQ_WTSR_MASK,
+ },
+};
+
+static struct s5m_irq_data s5m8763_irqs[] = {
+ [S5M8763_IRQ_DCINF] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_DCINF_MASK,
+ },
+ [S5M8763_IRQ_DCINR] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_DCINR_MASK,
+ },
+ [S5M8763_IRQ_JIGF] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_JIGF_MASK,
+ },
+ [S5M8763_IRQ_JIGR] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_JIGR_MASK,
+ },
+ [S5M8763_IRQ_PWRONF] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_PWRONF_MASK,
+ },
+ [S5M8763_IRQ_PWRONR] = {
+ .reg = 1,
+ .mask = S5M8763_IRQ_PWRONR_MASK,
+ },
+ [S5M8763_IRQ_WTSREVNT] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_WTSREVNT_MASK,
+ },
+ [S5M8763_IRQ_SMPLEVNT] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_SMPLEVNT_MASK,
+ },
+ [S5M8763_IRQ_ALARM1] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_ALARM1_MASK,
+ },
+ [S5M8763_IRQ_ALARM0] = {
+ .reg = 2,
+ .mask = S5M8763_IRQ_ALARM0_MASK,
+ },
+ [S5M8763_IRQ_ONKEY1S] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_ONKEY1S_MASK,
+ },
+ [S5M8763_IRQ_TOPOFFR] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_TOPOFFR_MASK,
+ },
+ [S5M8763_IRQ_DCINOVPR] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_DCINOVPR_MASK,
+ },
+ [S5M8763_IRQ_CHGRSTF] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_CHGRSTF_MASK,
+ },
+ [S5M8763_IRQ_DONER] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_DONER_MASK,
+ },
+ [S5M8763_IRQ_CHGFAULT] = {
+ .reg = 3,
+ .mask = S5M8763_IRQ_CHGFAULT_MASK,
+ },
+ [S5M8763_IRQ_LOBAT1] = {
+ .reg = 4,
+ .mask = S5M8763_IRQ_LOBAT1_MASK,
+ },
+ [S5M8763_IRQ_LOBAT2] = {
+ .reg = 4,
+ .mask = S5M8763_IRQ_LOBAT2_MASK,
+ },
+};
+
+static inline struct s5m_irq_data *
+irq_to_s5m8767_irq(struct s5m87xx_dev *s5m87xx, int irq)
+{
+ return &s5m8767_irqs[irq - s5m87xx->irq_base];
+}
+
+static void s5m8767_irq_lock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&s5m87xx->irqlock);
+}
+
+static void s5m8767_irq_sync_unlock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5m87xx->irq_masks_cur); i++) {
+ if (s5m87xx->irq_masks_cur[i] != s5m87xx->irq_masks_cache[i]) {
+ s5m87xx->irq_masks_cache[i] = s5m87xx->irq_masks_cur[i];
+ s5m_reg_write(s5m87xx, S5M8767_REG_INT1M + i,
+ s5m87xx->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&s5m87xx->irqlock);
+}
+
+static void s5m8767_irq_unmask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8767_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void s5m8767_irq_mask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8767_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip s5m8767_irq_chip = {
+ .name = "s5m8767",
+ .irq_bus_lock = s5m8767_irq_lock,
+ .irq_bus_sync_unlock = s5m8767_irq_sync_unlock,
+ .irq_mask = s5m8767_irq_mask,
+ .irq_unmask = s5m8767_irq_unmask,
+};
+
+static inline struct s5m_irq_data *
+irq_to_s5m8763_irq(struct s5m87xx_dev *s5m87xx, int irq)
+{
+ return &s5m8763_irqs[irq - s5m87xx->irq_base];
+}
+
+static void s5m8763_irq_lock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+
+ mutex_lock(&s5m87xx->irqlock);
+}
+
+static void s5m8763_irq_sync_unlock(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(s5m87xx->irq_masks_cur); i++) {
+ if (s5m87xx->irq_masks_cur[i] != s5m87xx->irq_masks_cache[i]) {
+ s5m87xx->irq_masks_cache[i] = s5m87xx->irq_masks_cur[i];
+ s5m_reg_write(s5m87xx, S5M8763_REG_IRQM1 + i,
+ s5m87xx->irq_masks_cur[i]);
+ }
+ }
+
+ mutex_unlock(&s5m87xx->irqlock);
+}
+
+static void s5m8763_irq_unmask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8763_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
+}
+
+static void s5m8763_irq_mask(struct irq_data *data)
+{
+ struct s5m87xx_dev *s5m87xx = irq_data_get_irq_chip_data(data);
+ struct s5m_irq_data *irq_data = irq_to_s5m8763_irq(s5m87xx,
+ data->irq);
+
+ s5m87xx->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
+}
+
+static struct irq_chip s5m8763_irq_chip = {
+ .name = "s5m8763",
+ .irq_bus_lock = s5m8763_irq_lock,
+ .irq_bus_sync_unlock = s5m8763_irq_sync_unlock,
+ .irq_mask = s5m8763_irq_mask,
+ .irq_unmask = s5m8763_irq_unmask,
+};
+
+
+static irqreturn_t s5m8767_irq_thread(int irq, void *data)
+{
+ struct s5m87xx_dev *s5m87xx = data;
+ u8 irq_reg[NUM_IRQ_REGS-1];
+ int ret;
+ int i;
+
+
+ ret = s5m_bulk_read(s5m87xx, S5M8767_REG_INT1,
+ NUM_IRQ_REGS - 1, irq_reg);
+ if (ret < 0) {
+ dev_err(s5m87xx->dev, "Failed to read interrupt register: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < NUM_IRQ_REGS - 1; i++)
+ irq_reg[i] &= ~s5m87xx->irq_masks_cur[i];
+
+ for (i = 0; i < S5M8767_IRQ_NR; i++) {
+ if (irq_reg[s5m8767_irqs[i].reg - 1] & s5m8767_irqs[i].mask)
+ handle_nested_irq(s5m87xx->irq_base + i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t s5m8763_irq_thread(int irq, void *data)
+{
+ struct s5m87xx_dev *s5m87xx = data;
+ u8 irq_reg[NUM_IRQ_REGS];
+ int ret;
+ int i;
+
+ ret = s5m_bulk_read(s5m87xx, S5M8763_REG_IRQ1,
+ NUM_IRQ_REGS, irq_reg);
+ if (ret < 0) {
+ dev_err(s5m87xx->dev, "Failed to read interrupt register: %d\n",
+ ret);
+ return IRQ_NONE;
+ }
+
+ for (i = 0; i < NUM_IRQ_REGS; i++)
+ irq_reg[i] &= ~s5m87xx->irq_masks_cur[i];
+
+ for (i = 0; i < S5M8763_IRQ_NR; i++) {
+ if (irq_reg[s5m8763_irqs[i].reg - 1] & s5m8763_irqs[i].mask)
+ handle_nested_irq(s5m87xx->irq_base + i);
+ }
+
+ return IRQ_HANDLED;
+}
+
+int s5m_irq_resume(struct s5m87xx_dev *s5m87xx)
+{
+ if (s5m87xx->irq && s5m87xx->irq_base){
+ switch (s5m87xx->device_type) {
+ case S5M8763X:
+ s5m8763_irq_thread(s5m87xx->irq_base, s5m87xx);
+ break;
+ case S5M8767X:
+ s5m8767_irq_thread(s5m87xx->irq_base, s5m87xx);
+ break;
+ default:
+ break;
+
+ }
+ }
+ return 0;
+}
+
+int s5m_irq_init(struct s5m87xx_dev *s5m87xx)
+{
+ int i;
+ int cur_irq;
+ int ret = 0;
+ int type = s5m87xx->device_type;
+
+ if (!s5m87xx->irq) {
+ dev_warn(s5m87xx->dev,
+ "No interrupt specified, no interrupts\n");
+ s5m87xx->irq_base = 0;
+ return 0;
+ }
+
+ if (!s5m87xx->irq_base) {
+ dev_err(s5m87xx->dev,
+ "No interrupt base specified, no interrupts\n");
+ return 0;
+ }
+
+ mutex_init(&s5m87xx->irqlock);
+
+ switch (type) {
+ case S5M8763X:
+ for (i = 0; i < NUM_IRQ_REGS; i++) {
+ s5m87xx->irq_masks_cur[i] = 0xff;
+ s5m87xx->irq_masks_cache[i] = 0xff;
+ s5m_reg_write(s5m87xx, S5M8763_REG_IRQM1 + i,
+ 0xff);
+ }
+
+ s5m_reg_write(s5m87xx, S5M8763_REG_STATUSM1, 0xff);
+ s5m_reg_write(s5m87xx, S5M8763_REG_STATUSM2, 0xff);
+
+ for (i = 0; i < S5M8763_IRQ_NR; i++) {
+ cur_irq = i + s5m87xx->irq_base;
+ irq_set_chip_data(cur_irq, s5m87xx);
+ irq_set_chip_and_handler(cur_irq, &s5m8763_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(s5m87xx->irq, NULL,
+ s5m8763_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "s5m87xx-irq", s5m87xx);
+ if (ret) {
+ dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
+ s5m87xx->irq, ret);
+ return ret;
+ }
+ break;
+ case S5M8767X:
+ for (i = 0; i < NUM_IRQ_REGS - 1; i++) {
+ s5m87xx->irq_masks_cur[i] = 0xff;
+ s5m87xx->irq_masks_cache[i] = 0xff;
+ s5m_reg_write(s5m87xx, S5M8767_REG_INT1M + i,
+ 0xff);
+ }
+ for (i = 0; i < S5M8767_IRQ_NR; i++) {
+ cur_irq = i + s5m87xx->irq_base;
+ irq_set_chip_data(cur_irq, s5m87xx);
+ if (ret) {
+ dev_err(s5m87xx->dev,
+ "Failed to irq_set_chip_data %d: %d\n",
+ s5m87xx->irq, ret);
+ return ret;
+ }
+
+ irq_set_chip_and_handler(cur_irq, &s5m8767_irq_chip,
+ handle_edge_irq);
+ irq_set_nested_thread(cur_irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(cur_irq, IRQF_VALID);
+#else
+ irq_set_noprobe(cur_irq);
+#endif
+ }
+
+ ret = request_threaded_irq(s5m87xx->irq, NULL,
+ s5m8767_irq_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "s5m87xx-irq", s5m87xx);
+ if (ret) {
+ dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
+ s5m87xx->irq, ret);
+ return ret;
+ }
+ break;
+ default:
+ break;
+ }
+
+ if (!s5m87xx->ono)
+ return 0;
+
+ switch (type) {
+ case S5M8763X:
+ ret = request_threaded_irq(s5m87xx->ono, NULL,
+ s5m8763_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT, "s5m87xx-ono",
+ s5m87xx);
+ break;
+ case S5M8767X:
+ ret = request_threaded_irq(s5m87xx->ono, NULL,
+ s5m8767_irq_thread,
+ IRQF_TRIGGER_FALLING |
+ IRQF_TRIGGER_RISING |
+ IRQF_ONESHOT, "s5m87xx-ono", s5m87xx);
+ break;
+ default:
+ break;
+ }
+
+ if (ret)
+ dev_err(s5m87xx->dev, "Failed to request IRQ %d: %d\n",
+ s5m87xx->ono, ret);
+
+ return 0;
+}
+
+void s5m_irq_exit(struct s5m87xx_dev *s5m87xx)
+{
+ if (s5m87xx->ono)
+ free_irq(s5m87xx->ono, s5m87xx);
+
+ if (s5m87xx->irq)
+ free_irq(s5m87xx->irq, s5m87xx);
+}
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c
index df3702c..f4d8611 100644
--- a/drivers/mfd/sm501.c
+++ b/drivers/mfd/sm501.c
@@ -1720,7 +1720,7 @@ static int sm501_plat_remove(struct platform_device *dev)
return 0;
}
-static struct pci_device_id sm501_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sm501_pci_tbl) = {
{ 0x126f, 0x0501, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0, },
};
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
new file mode 100644
index 0000000..373f423
--- /dev/null
+++ b/drivers/mfd/stmpe-i2c.c
@@ -0,0 +1,109 @@
+/*
+ * ST Microelectronics MFD: stmpe's i2c client specific driver
+ *
+ * Copyright (C) ST-Ericsson SA 2010
+ * Copyright (C) ST Microelectronics SA 2011
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
+ * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ */
+
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include "stmpe.h"
+
+static int i2c_reg_read(struct stmpe *stmpe, u8 reg)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_read_byte_data(i2c, reg);
+}
+
+static int i2c_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_write_byte_data(i2c, reg, val);
+}
+
+static int i2c_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_read_i2c_block_data(i2c, reg, length, values);
+}
+
+static int i2c_block_write(struct stmpe *stmpe, u8 reg, u8 length,
+ const u8 *values)
+{
+ struct i2c_client *i2c = stmpe->client;
+
+ return i2c_smbus_write_i2c_block_data(i2c, reg, length, values);
+}
+
+static struct stmpe_client_info i2c_ci = {
+ .read_byte = i2c_reg_read,
+ .write_byte = i2c_reg_write,
+ .read_block = i2c_block_read,
+ .write_block = i2c_block_write,
+};
+
+static int __devinit
+stmpe_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
+{
+ i2c_ci.data = (void *)id;
+ i2c_ci.irq = i2c->irq;
+ i2c_ci.client = i2c;
+ i2c_ci.dev = &i2c->dev;
+
+ return stmpe_probe(&i2c_ci, id->driver_data);
+}
+
+static int __devexit stmpe_i2c_remove(struct i2c_client *i2c)
+{
+ struct stmpe *stmpe = dev_get_drvdata(&i2c->dev);
+
+ return stmpe_remove(stmpe);
+}
+
+static const struct i2c_device_id stmpe_i2c_id[] = {
+ { "stmpe610", STMPE610 },
+ { "stmpe801", STMPE801 },
+ { "stmpe811", STMPE811 },
+ { "stmpe1601", STMPE1601 },
+ { "stmpe2401", STMPE2401 },
+ { "stmpe2403", STMPE2403 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, stmpe_id);
+
+static struct i2c_driver stmpe_i2c_driver = {
+ .driver.name = "stmpe-i2c",
+ .driver.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .driver.pm = &stmpe_dev_pm_ops,
+#endif
+ .probe = stmpe_i2c_probe,
+ .remove = __devexit_p(stmpe_i2c_remove),
+ .id_table = stmpe_i2c_id,
+};
+
+static int __init stmpe_init(void)
+{
+ return i2c_add_driver(&stmpe_i2c_driver);
+}
+subsys_initcall(stmpe_init);
+
+static void __exit stmpe_exit(void)
+{
+ i2c_del_driver(&stmpe_i2c_driver);
+}
+module_exit(stmpe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMPE MFD I2C Interface Driver");
+MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
new file mode 100644
index 0000000..b58c43c
--- /dev/null
+++ b/drivers/mfd/stmpe-spi.c
@@ -0,0 +1,150 @@
+/*
+ * ST Microelectronics MFD: stmpe's spi client specific driver
+ *
+ * Copyright (C) ST Microelectronics SA 2011
+ *
+ * License Terms: GNU General Public License, version 2
+ * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics
+ */
+
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include "stmpe.h"
+
+#define READ_CMD (1 << 7)
+
+static int spi_reg_read(struct stmpe *stmpe, u8 reg)
+{
+ struct spi_device *spi = stmpe->client;
+ int status = spi_w8r16(spi, reg | READ_CMD);
+
+ return (status < 0) ? status : status >> 8;
+}
+
+static int spi_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
+{
+ struct spi_device *spi = stmpe->client;
+ u16 cmd = (val << 8) | reg;
+
+ return spi_write(spi, (const u8 *)&cmd, 2);
+}
+
+static int spi_block_read(struct stmpe *stmpe, u8 reg, u8 length, u8 *values)
+{
+ int ret, i;
+
+ for (i = 0; i < length; i++) {
+ ret = spi_reg_read(stmpe, reg + i);
+ if (ret < 0)
+ return ret;
+ *(values + i) = ret;
+ }
+
+ return 0;
+}
+
+static int spi_block_write(struct stmpe *stmpe, u8 reg, u8 length,
+ const u8 *values)
+{
+ int ret = 0, i;
+
+ for (i = length; i > 0; i--, reg++) {
+ ret = spi_reg_write(stmpe, reg, *(values + i - 1));
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static void spi_init(struct stmpe *stmpe)
+{
+ struct spi_device *spi = stmpe->client;
+
+ spi->bits_per_word = 8;
+
+ /* This register is only present for stmpe811 */
+ if (stmpe->variant->id_val == 0x0811)
+ spi_reg_write(stmpe, STMPE811_REG_SPI_CFG, spi->mode);
+
+ if (spi_setup(spi) < 0)
+ dev_dbg(&spi->dev, "spi_setup failed\n");
+}
+
+static struct stmpe_client_info spi_ci = {
+ .read_byte = spi_reg_read,
+ .write_byte = spi_reg_write,
+ .read_block = spi_block_read,
+ .write_block = spi_block_write,
+ .init = spi_init,
+};
+
+static int __devinit
+stmpe_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ /* don't exceed max specified rate - 1MHz - Limitation of STMPE */
+ if (spi->max_speed_hz > 1000000) {
+ dev_dbg(&spi->dev, "f(sample) %d KHz?\n",
+ (spi->max_speed_hz/1000));
+ return -EINVAL;
+ }
+
+ spi_ci.irq = spi->irq;
+ spi_ci.client = spi;
+ spi_ci.dev = &spi->dev;
+
+ return stmpe_probe(&spi_ci, id->driver_data);
+}
+
+static int __devexit stmpe_spi_remove(struct spi_device *spi)
+{
+ struct stmpe *stmpe = dev_get_drvdata(&spi->dev);
+
+ return stmpe_remove(stmpe);
+}
+
+static const struct spi_device_id stmpe_spi_id[] = {
+ { "stmpe610", STMPE610 },
+ { "stmpe801", STMPE801 },
+ { "stmpe811", STMPE811 },
+ { "stmpe1601", STMPE1601 },
+ { "stmpe2401", STMPE2401 },
+ { "stmpe2403", STMPE2403 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, stmpe_id);
+
+static struct spi_driver stmpe_spi_driver = {
+ .driver = {
+ .name = "stmpe-spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &stmpe_dev_pm_ops,
+#endif
+ },
+ .probe = stmpe_spi_probe,
+ .remove = __devexit_p(stmpe_spi_remove),
+ .id_table = stmpe_spi_id,
+};
+
+static int __init stmpe_init(void)
+{
+ return spi_register_driver(&stmpe_spi_driver);
+}
+subsys_initcall(stmpe_init);
+
+static void __exit stmpe_exit(void)
+{
+ spi_unregister_driver(&stmpe_spi_driver);
+}
+module_exit(stmpe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
+MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index 2963689c..e07947e 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -1,18 +1,20 @@
/*
+ * ST Microelectronics MFD: stmpe's driver
+ *
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License, version 2
* Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
*/
+#include <linux/gpio.h>
+#include <linux/export.h>
#include <linux/kernel.h>
-#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/pm.h>
#include <linux/slab.h>
-#include <linux/i2c.h>
#include <linux/mfd/core.h>
-#include <linux/mfd/stmpe.h>
#include "stmpe.h"
static int __stmpe_enable(struct stmpe *stmpe, unsigned int blocks)
@@ -29,10 +31,9 @@ static int __stmpe_reg_read(struct stmpe *stmpe, u8 reg)
{
int ret;
- ret = i2c_smbus_read_byte_data(stmpe->i2c, reg);
+ ret = stmpe->ci->read_byte(stmpe, reg);
if (ret < 0)
- dev_err(stmpe->dev, "failed to read reg %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to read reg %#x: %d\n", reg, ret);
dev_vdbg(stmpe->dev, "rd: reg %#x => data %#x\n", reg, ret);
@@ -45,10 +46,9 @@ static int __stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 val)
dev_vdbg(stmpe->dev, "wr: reg %#x <= %#x\n", reg, val);
- ret = i2c_smbus_write_byte_data(stmpe->i2c, reg, val);
+ ret = stmpe->ci->write_byte(stmpe, reg, val);
if (ret < 0)
- dev_err(stmpe->dev, "failed to write reg %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to write reg %#x: %d\n", reg, ret);
return ret;
}
@@ -72,10 +72,9 @@ static int __stmpe_block_read(struct stmpe *stmpe, u8 reg, u8 length,
{
int ret;
- ret = i2c_smbus_read_i2c_block_data(stmpe->i2c, reg, length, values);
+ ret = stmpe->ci->read_block(stmpe, reg, length, values);
if (ret < 0)
- dev_err(stmpe->dev, "failed to read regs %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to read regs %#x: %d\n", reg, ret);
dev_vdbg(stmpe->dev, "rd: reg %#x (%d) => ret %#x\n", reg, length, ret);
stmpe_dump_bytes("stmpe rd: ", values, length);
@@ -91,11 +90,9 @@ static int __stmpe_block_write(struct stmpe *stmpe, u8 reg, u8 length,
dev_vdbg(stmpe->dev, "wr: regs %#x (%d)\n", reg, length);
stmpe_dump_bytes("stmpe wr: ", values, length);
- ret = i2c_smbus_write_i2c_block_data(stmpe->i2c, reg, length,
- values);
+ ret = stmpe->ci->write_block(stmpe, reg, length, values);
if (ret < 0)
- dev_err(stmpe->dev, "failed to write regs %#x: %d\n",
- reg, ret);
+ dev_err(stmpe->dev, "failed to write regs %#x: %d\n", reg, ret);
return ret;
}
@@ -245,12 +242,14 @@ int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, enum stmpe_block block)
u8 regaddr = stmpe->regs[STMPE_IDX_GPAFR_U_MSB];
int af_bits = variant->af_bits;
int numregs = DIV_ROUND_UP(stmpe->num_gpios * af_bits, 8);
- int afperreg = 8 / af_bits;
int mask = (1 << af_bits) - 1;
u8 regs[numregs];
- int af;
- int ret;
+ int af, afperreg, ret;
+
+ if (!variant->get_altfunc)
+ return 0;
+ afperreg = 8 / af_bits;
mutex_lock(&stmpe->lock);
ret = __stmpe_enable(stmpe, STMPE_BLOCK_GPIO);
@@ -325,7 +324,51 @@ static struct mfd_cell stmpe_keypad_cell = {
};
/*
- * Touchscreen (STMPE811)
+ * STMPE801
+ */
+static const u8 stmpe801_regs[] = {
+ [STMPE_IDX_CHIP_ID] = STMPE801_REG_CHIP_ID,
+ [STMPE_IDX_ICR_LSB] = STMPE801_REG_SYS_CTRL,
+ [STMPE_IDX_GPMR_LSB] = STMPE801_REG_GPIO_MP_STA,
+ [STMPE_IDX_GPSR_LSB] = STMPE801_REG_GPIO_SET_PIN,
+ [STMPE_IDX_GPCR_LSB] = STMPE801_REG_GPIO_SET_PIN,
+ [STMPE_IDX_GPDR_LSB] = STMPE801_REG_GPIO_DIR,
+ [STMPE_IDX_IEGPIOR_LSB] = STMPE801_REG_GPIO_INT_EN,
+ [STMPE_IDX_ISGPIOR_MSB] = STMPE801_REG_GPIO_INT_STA,
+
+};
+
+static struct stmpe_variant_block stmpe801_blocks[] = {
+ {
+ .cell = &stmpe_gpio_cell,
+ .irq = 0,
+ .block = STMPE_BLOCK_GPIO,
+ },
+};
+
+static int stmpe801_enable(struct stmpe *stmpe, unsigned int blocks,
+ bool enable)
+{
+ if (blocks & STMPE_BLOCK_GPIO)
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static struct stmpe_variant_info stmpe801 = {
+ .name = "stmpe801",
+ .id_val = STMPE801_ID,
+ .id_mask = 0xffff,
+ .num_gpios = 8,
+ .regs = stmpe801_regs,
+ .blocks = stmpe801_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe801_blocks),
+ .num_irqs = STMPE801_NR_INTERNAL_IRQS,
+ .enable = stmpe801_enable,
+};
+
+/*
+ * Touchscreen (STMPE811 or STMPE610)
*/
static struct resource stmpe_ts_resources[] = {
@@ -350,7 +393,7 @@ static struct mfd_cell stmpe_ts_cell = {
};
/*
- * STMPE811
+ * STMPE811 or STMPE610
*/
static const u8 stmpe811_regs[] = {
@@ -421,6 +464,21 @@ static struct stmpe_variant_info stmpe811 = {
.get_altfunc = stmpe811_get_altfunc,
};
+/* Similar to 811, except number of gpios */
+static struct stmpe_variant_info stmpe610 = {
+ .name = "stmpe610",
+ .id_val = 0x0811,
+ .id_mask = 0xffff,
+ .num_gpios = 6,
+ .af_bits = 1,
+ .regs = stmpe811_regs,
+ .blocks = stmpe811_blocks,
+ .num_blocks = ARRAY_SIZE(stmpe811_blocks),
+ .num_irqs = STMPE811_NR_INTERNAL_IRQS,
+ .enable = stmpe811_enable,
+ .get_altfunc = stmpe811_get_altfunc,
+};
+
/*
* STMPE1601
*/
@@ -655,6 +713,8 @@ static struct stmpe_variant_info stmpe2403 = {
};
static struct stmpe_variant_info *stmpe_variant_info[] = {
+ [STMPE610] = &stmpe610,
+ [STMPE801] = &stmpe801,
[STMPE811] = &stmpe811,
[STMPE1601] = &stmpe1601,
[STMPE2401] = &stmpe2401,
@@ -671,6 +731,11 @@ static irqreturn_t stmpe_irq(int irq, void *data)
int ret;
int i;
+ if (variant->id_val == STMPE801_ID) {
+ handle_nested_irq(stmpe->irq_base);
+ return IRQ_HANDLED;
+ }
+
ret = stmpe_block_read(stmpe, israddr, num, isr);
if (ret < 0)
return IRQ_NONE;
@@ -757,14 +822,17 @@ static struct irq_chip stmpe_irq_chip = {
static int __devinit stmpe_irq_init(struct stmpe *stmpe)
{
+ struct irq_chip *chip = NULL;
int num_irqs = stmpe->variant->num_irqs;
int base = stmpe->irq_base;
int irq;
+ if (stmpe->variant->id_val != STMPE801_ID)
+ chip = &stmpe_irq_chip;
+
for (irq = base; irq < base + num_irqs; irq++) {
irq_set_chip_data(irq, stmpe);
- irq_set_chip_and_handler(irq, &stmpe_irq_chip,
- handle_edge_irq);
+ irq_set_chip_and_handler(irq, chip, handle_edge_irq);
irq_set_nested_thread(irq, 1);
#ifdef CONFIG_ARM
set_irq_flags(irq, IRQF_VALID);
@@ -796,7 +864,7 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
unsigned int irq_trigger = stmpe->pdata->irq_trigger;
int autosleep_timeout = stmpe->pdata->autosleep_timeout;
struct stmpe_variant_info *variant = stmpe->variant;
- u8 icr = STMPE_ICR_LSB_GIM;
+ u8 icr;
unsigned int id;
u8 data[2];
int ret;
@@ -819,16 +887,32 @@ static int __devinit stmpe_chip_init(struct stmpe *stmpe)
if (ret)
return ret;
- if (irq_trigger == IRQF_TRIGGER_FALLING ||
- irq_trigger == IRQF_TRIGGER_RISING)
- icr |= STMPE_ICR_LSB_EDGE;
+ if (id == STMPE801_ID)
+ icr = STMPE801_REG_SYS_CTRL_INT_EN;
+ else
+ icr = STMPE_ICR_LSB_GIM;
+
+ /* STMPE801 doesn't support Edge interrupts */
+ if (id != STMPE801_ID) {
+ if (irq_trigger == IRQF_TRIGGER_FALLING ||
+ irq_trigger == IRQF_TRIGGER_RISING)
+ icr |= STMPE_ICR_LSB_EDGE;
+ }
if (irq_trigger == IRQF_TRIGGER_RISING ||
- irq_trigger == IRQF_TRIGGER_HIGH)
- icr |= STMPE_ICR_LSB_HIGH;
+ irq_trigger == IRQF_TRIGGER_HIGH) {
+ if (id == STMPE801_ID)
+ icr |= STMPE801_REG_SYS_CTRL_INT_HI;
+ else
+ icr |= STMPE_ICR_LSB_HIGH;
+ }
- if (stmpe->pdata->irq_invert_polarity)
- icr ^= STMPE_ICR_LSB_HIGH;
+ if (stmpe->pdata->irq_invert_polarity) {
+ if (id == STMPE801_ID)
+ icr ^= STMPE801_REG_SYS_CTRL_INT_HI;
+ else
+ icr ^= STMPE_ICR_LSB_HIGH;
+ }
if (stmpe->pdata->autosleep) {
ret = stmpe_autosleep(stmpe, autosleep_timeout);
@@ -873,32 +957,10 @@ static int __devinit stmpe_devices_init(struct stmpe *stmpe)
return ret;
}
-#ifdef CONFIG_PM
-static int stmpe_suspend(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
-
- if (device_may_wakeup(&i2c->dev))
- enable_irq_wake(i2c->irq);
-
- return 0;
-}
-
-static int stmpe_resume(struct device *dev)
-{
- struct i2c_client *i2c = to_i2c_client(dev);
-
- if (device_may_wakeup(&i2c->dev))
- disable_irq_wake(i2c->irq);
-
- return 0;
-}
-#endif
-
-static int __devinit stmpe_probe(struct i2c_client *i2c,
- const struct i2c_device_id *id)
+/* Called from client specific probe routines */
+int __devinit stmpe_probe(struct stmpe_client_info *ci, int partnum)
{
- struct stmpe_platform_data *pdata = i2c->dev.platform_data;
+ struct stmpe_platform_data *pdata = dev_get_platdata(ci->dev);
struct stmpe *stmpe;
int ret;
@@ -912,30 +974,43 @@ static int __devinit stmpe_probe(struct i2c_client *i2c,
mutex_init(&stmpe->irq_lock);
mutex_init(&stmpe->lock);
- stmpe->dev = &i2c->dev;
- stmpe->i2c = i2c;
-
+ stmpe->dev = ci->dev;
+ stmpe->client = ci->client;
stmpe->pdata = pdata;
stmpe->irq_base = pdata->irq_base;
-
- stmpe->partnum = id->driver_data;
- stmpe->variant = stmpe_variant_info[stmpe->partnum];
+ stmpe->ci = ci;
+ stmpe->partnum = partnum;
+ stmpe->variant = stmpe_variant_info[partnum];
stmpe->regs = stmpe->variant->regs;
stmpe->num_gpios = stmpe->variant->num_gpios;
+ dev_set_drvdata(stmpe->dev, stmpe);
- i2c_set_clientdata(i2c, stmpe);
+ if (ci->init)
+ ci->init(stmpe);
+
+ if (pdata->irq_over_gpio) {
+ ret = gpio_request_one(pdata->irq_gpio, GPIOF_DIR_IN, "stmpe");
+ if (ret) {
+ dev_err(stmpe->dev, "failed to request IRQ GPIO: %d\n",
+ ret);
+ goto out_free;
+ }
+
+ stmpe->irq = gpio_to_irq(pdata->irq_gpio);
+ } else {
+ stmpe->irq = ci->irq;
+ }
ret = stmpe_chip_init(stmpe);
if (ret)
- goto out_free;
+ goto free_gpio;
ret = stmpe_irq_init(stmpe);
if (ret)
- goto out_free;
+ goto free_gpio;
- ret = request_threaded_irq(stmpe->i2c->irq, NULL, stmpe_irq,
- pdata->irq_trigger | IRQF_ONESHOT,
- "stmpe", stmpe);
+ ret = request_threaded_irq(stmpe->irq, NULL, stmpe_irq,
+ pdata->irq_trigger | IRQF_ONESHOT, "stmpe", stmpe);
if (ret) {
dev_err(stmpe->dev, "failed to request IRQ: %d\n", ret);
goto out_removeirq;
@@ -951,67 +1026,55 @@ static int __devinit stmpe_probe(struct i2c_client *i2c,
out_removedevs:
mfd_remove_devices(stmpe->dev);
- free_irq(stmpe->i2c->irq, stmpe);
+ free_irq(stmpe->irq, stmpe);
out_removeirq:
stmpe_irq_remove(stmpe);
+free_gpio:
+ if (pdata->irq_over_gpio)
+ gpio_free(pdata->irq_gpio);
out_free:
kfree(stmpe);
return ret;
}
-static int __devexit stmpe_remove(struct i2c_client *client)
+int stmpe_remove(struct stmpe *stmpe)
{
- struct stmpe *stmpe = i2c_get_clientdata(client);
-
mfd_remove_devices(stmpe->dev);
- free_irq(stmpe->i2c->irq, stmpe);
+ free_irq(stmpe->irq, stmpe);
stmpe_irq_remove(stmpe);
+ if (stmpe->pdata->irq_over_gpio)
+ gpio_free(stmpe->pdata->irq_gpio);
+
kfree(stmpe);
return 0;
}
-static const struct i2c_device_id stmpe_id[] = {
- { "stmpe811", STMPE811 },
- { "stmpe1601", STMPE1601 },
- { "stmpe2401", STMPE2401 },
- { "stmpe2403", STMPE2403 },
- { }
-};
-MODULE_DEVICE_TABLE(i2c, stmpe_id);
-
#ifdef CONFIG_PM
-static const struct dev_pm_ops stmpe_dev_pm_ops = {
- .suspend = stmpe_suspend,
- .resume = stmpe_resume,
-};
-#endif
+static int stmpe_suspend(struct device *dev)
+{
+ struct stmpe *stmpe = dev_get_drvdata(dev);
-static struct i2c_driver stmpe_driver = {
- .driver.name = "stmpe",
- .driver.owner = THIS_MODULE,
-#ifdef CONFIG_PM
- .driver.pm = &stmpe_dev_pm_ops,
-#endif
- .probe = stmpe_probe,
- .remove = __devexit_p(stmpe_remove),
- .id_table = stmpe_id,
-};
+ if (device_may_wakeup(dev))
+ enable_irq_wake(stmpe->irq);
-static int __init stmpe_init(void)
-{
- return i2c_add_driver(&stmpe_driver);
+ return 0;
}
-subsys_initcall(stmpe_init);
-static void __exit stmpe_exit(void)
+static int stmpe_resume(struct device *dev)
{
- i2c_del_driver(&stmpe_driver);
+ struct stmpe *stmpe = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(stmpe->irq);
+
+ return 0;
}
-module_exit(stmpe_exit);
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("STMPE MFD core driver");
-MODULE_AUTHOR("Rabin Vincent <rabin.vincent@stericsson.com>");
+const struct dev_pm_ops stmpe_dev_pm_ops = {
+ .suspend = stmpe_suspend,
+ .resume = stmpe_resume,
+};
+#endif
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index e4ee3895..7b8e13f 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -8,6 +8,14 @@
#ifndef __STMPE_H
#define __STMPE_H
+#include <linux/device.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/stmpe.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+
+extern const struct dev_pm_ops stmpe_dev_pm_ops;
+
#ifdef STMPE_DUMP_BYTES
static inline void stmpe_dump_bytes(const char *str, const void *buf,
size_t len)
@@ -67,11 +75,55 @@ struct stmpe_variant_info {
int (*enable_autosleep)(struct stmpe *stmpe, int autosleep_timeout);
};
+/**
+ * struct stmpe_client_info - i2c or spi specific routines/info
+ * @data: client specific data
+ * @read_byte: read single byte
+ * @write_byte: write single byte
+ * @read_block: read block or multiple bytes
+ * @write_block: write block or multiple bytes
+ * @init: client init routine, called during probe
+ */
+struct stmpe_client_info {
+ void *data;
+ int irq;
+ void *client;
+ struct device *dev;
+ int (*read_byte)(struct stmpe *stmpe, u8 reg);
+ int (*write_byte)(struct stmpe *stmpe, u8 reg, u8 val);
+ int (*read_block)(struct stmpe *stmpe, u8 reg, u8 len, u8 *values);
+ int (*write_block)(struct stmpe *stmpe, u8 reg, u8 len,
+ const u8 *values);
+ void (*init)(struct stmpe *stmpe);
+};
+
+int stmpe_probe(struct stmpe_client_info *ci, int partnum);
+int stmpe_remove(struct stmpe *stmpe);
+
#define STMPE_ICR_LSB_HIGH (1 << 2)
#define STMPE_ICR_LSB_EDGE (1 << 1)
#define STMPE_ICR_LSB_GIM (1 << 0)
/*
+ * STMPE801
+ */
+#define STMPE801_ID 0x0108
+#define STMPE801_NR_INTERNAL_IRQS 1
+
+#define STMPE801_REG_CHIP_ID 0x00
+#define STMPE801_REG_VERSION_ID 0x02
+#define STMPE801_REG_SYS_CTRL 0x04
+#define STMPE801_REG_GPIO_INT_EN 0x08
+#define STMPE801_REG_GPIO_INT_STA 0x09
+#define STMPE801_REG_GPIO_MP_STA 0x10
+#define STMPE801_REG_GPIO_SET_PIN 0x11
+#define STMPE801_REG_GPIO_DIR 0x12
+
+#define STMPE801_REG_SYS_CTRL_RESET (1 << 7)
+#define STMPE801_REG_SYS_CTRL_INT_EN (1 << 2)
+#define STMPE801_REG_SYS_CTRL_INT_HI (1 << 0)
+
+/*
* STMPE811
*/
@@ -87,6 +139,7 @@ struct stmpe_variant_info {
#define STMPE811_REG_CHIP_ID 0x00
#define STMPE811_REG_SYS_CTRL2 0x04
+#define STMPE811_REG_SPI_CFG 0x08
#define STMPE811_REG_INT_CTRL 0x09
#define STMPE811_REG_INT_EN 0x0A
#define STMPE811_REG_INT_STA 0x0B
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c
index 91ad21e..2d9e879 100644
--- a/drivers/mfd/t7l66xb.c
+++ b/drivers/mfd/t7l66xb.c
@@ -442,21 +442,7 @@ static struct platform_driver t7l66xb_platform_driver = {
/*--------------------------------------------------------------------------*/
-static int __init t7l66xb_init(void)
-{
- int retval = 0;
-
- retval = platform_driver_register(&t7l66xb_platform_driver);
- return retval;
-}
-
-static void __exit t7l66xb_exit(void)
-{
- platform_driver_unregister(&t7l66xb_platform_driver);
-}
-
-module_init(t7l66xb_init);
-module_exit(t7l66xb_exit);
+module_platform_driver(t7l66xb_platform_driver);
MODULE_DESCRIPTION("Toshiba T7L66XB core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c
index 71bc835..d20a284 100644
--- a/drivers/mfd/tc6387xb.c
+++ b/drivers/mfd/tc6387xb.c
@@ -234,19 +234,7 @@ static struct platform_driver tc6387xb_platform_driver = {
.resume = tc6387xb_resume,
};
-
-static int __init tc6387xb_init(void)
-{
- return platform_driver_register(&tc6387xb_platform_driver);
-}
-
-static void __exit tc6387xb_exit(void)
-{
- platform_driver_unregister(&tc6387xb_platform_driver);
-}
-
-module_init(tc6387xb_init);
-module_exit(tc6387xb_exit);
+module_platform_driver(tc6387xb_platform_driver);
MODULE_DESCRIPTION("Toshiba TC6387XB core driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ti-ssp.c b/drivers/mfd/ti-ssp.c
index af9ab0e..4fb0e6c 100644
--- a/drivers/mfd/ti-ssp.c
+++ b/drivers/mfd/ti-ssp.c
@@ -458,17 +458,7 @@ static struct platform_driver ti_ssp_driver = {
}
};
-static int __init ti_ssp_init(void)
-{
- return platform_driver_register(&ti_ssp_driver);
-}
-module_init(ti_ssp_init);
-
-static void __exit ti_ssp_exit(void)
-{
- platform_driver_unregister(&ti_ssp_driver);
-}
-module_exit(ti_ssp_exit);
+module_platform_driver(ti_ssp_driver);
MODULE_DESCRIPTION("Sequencer Serial Port (SSP) Driver");
MODULE_AUTHOR("Cyril Chemparathy");
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c
index 02d6569..0ba26fb 100644
--- a/drivers/mfd/timberdale.c
+++ b/drivers/mfd/timberdale.c
@@ -857,7 +857,7 @@ static void __devexit timb_remove(struct pci_dev *dev)
kfree(priv);
}
-static struct pci_device_id timberdale_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(timberdale_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_TIMB, PCI_DEVICE_ID_TIMB) },
{ 0 }
};
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index a56be93..95c0d79 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -215,6 +215,7 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
int tps65910_irq_exit(struct tps65910 *tps65910)
{
- free_irq(tps65910->chip_irq, tps65910);
+ if (tps65910->chip_irq)
+ free_irq(tps65910->chip_irq, tps65910);
return 0;
}
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index c1da84b..4392f6b 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -168,19 +168,16 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
goto err;
init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq_base;
tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
- ret = tps65910_irq_init(tps65910, init_data->irq, init_data);
- if (ret < 0)
- goto err;
+ tps65910_irq_init(tps65910, init_data->irq, init_data);
kfree(init_data);
return ret;
err:
- mfd_remove_devices(tps65910->dev);
kfree(tps65910);
kfree(init_data);
return ret;
@@ -190,8 +187,8 @@ static int tps65910_i2c_remove(struct i2c_client *i2c)
{
struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
- mfd_remove_devices(tps65910->dev);
tps65910_irq_exit(tps65910);
+ mfd_remove_devices(tps65910->dev);
kfree(tps65910);
return 0;
diff --git a/drivers/mfd/tps65912-core.c b/drivers/mfd/tps65912-core.c
index 5fec23a..74fd8cb 100644
--- a/drivers/mfd/tps65912-core.c
+++ b/drivers/mfd/tps65912-core.c
@@ -151,7 +151,7 @@ int tps65912_device_init(struct tps65912 *tps65912)
goto err;
init_data->irq = pmic_plat_data->irq;
- init_data->irq_base = pmic_plat_data->irq;
+ init_data->irq_base = pmic_plat_data->irq_base;
ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
if (ret < 0)
goto err;
diff --git a/drivers/mfd/tps65912-spi.c b/drivers/mfd/tps65912-spi.c
index 6d71e0d..27d3302 100644
--- a/drivers/mfd/tps65912-spi.c
+++ b/drivers/mfd/tps65912-spi.c
@@ -111,7 +111,6 @@ static int __devexit tps65912_spi_remove(struct spi_device *spi)
static struct spi_driver tps65912_spi_driver = {
.driver = {
.name = "tps65912",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = tps65912_spi_probe,
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 61e70cf..8ce3959 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -34,6 +34,11 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
#include <linux/regulator/machine.h>
@@ -144,6 +149,9 @@
#define TWL_MODULE_LAST TWL4030_MODULE_LAST
+#define TWL4030_NR_IRQS 8
+#define TWL6030_NR_IRQS 20
+
/* Base Address defns for twl4030_map[] */
/* subchip/slave 0 - USB ID */
@@ -255,6 +263,9 @@ struct twl_client {
static struct twl_client twl_modules[TWL_NUM_SLAVES];
+#ifdef CONFIG_IRQ_DOMAIN
+static struct irq_domain domain;
+#endif
/* mapping the module id to slave id and base address */
struct twl_mapping {
@@ -1183,14 +1194,48 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
int status;
unsigned i;
struct twl4030_platform_data *pdata = client->dev.platform_data;
+ struct device_node *node = client->dev.of_node;
u8 temp;
int ret = 0;
+ int nr_irqs = TWL4030_NR_IRQS;
+
+ if ((id->driver_data) & TWL6030_CLASS)
+ nr_irqs = TWL6030_NR_IRQS;
+
+ if (node && !pdata) {
+ /*
+ * XXX: Temporary pdata until the information is correctly
+ * retrieved by every TWL modules from DT.
+ */
+ pdata = devm_kzalloc(&client->dev,
+ sizeof(struct twl4030_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ }
if (!pdata) {
dev_dbg(&client->dev, "no platform data?\n");
return -EINVAL;
}
+ status = irq_alloc_descs(-1, pdata->irq_base, nr_irqs, 0);
+ if (IS_ERR_VALUE(status)) {
+ dev_err(&client->dev, "Fail to allocate IRQ descs\n");
+ return status;
+ }
+
+ pdata->irq_base = status;
+ pdata->irq_end = pdata->irq_base + nr_irqs;
+
+#ifdef CONFIG_IRQ_DOMAIN
+ domain.irq_base = pdata->irq_base;
+ domain.nr_irq = nr_irqs;
+ domain.of_node = of_node_get(node);
+ domain.ops = &irq_domain_simple_ops;
+ irq_domain_add(&domain);
+#endif
+
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
dev_dbg(&client->dev, "can't talk I2C?\n");
return -EIO;
@@ -1270,7 +1315,13 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
}
- status = add_children(pdata, id->driver_data);
+#ifdef CONFIG_OF_DEVICE
+ if (node)
+ status = of_platform_populate(node, NULL, NULL, &client->dev);
+ else
+#endif
+ status = add_children(pdata, id->driver_data);
+
fail:
if (status < 0)
twl_remove(client);
diff --git a/drivers/mfd/twl4030-audio.c b/drivers/mfd/twl4030-audio.c
index ae51ab5..838ce4e 100644
--- a/drivers/mfd/twl4030-audio.c
+++ b/drivers/mfd/twl4030-audio.c
@@ -261,17 +261,7 @@ static struct platform_driver twl4030_audio_driver = {
},
};
-static int __devinit twl4030_audio_init(void)
-{
- return platform_driver_register(&twl4030_audio_driver);
-}
-module_init(twl4030_audio_init);
-
-static void __devexit twl4030_audio_exit(void)
-{
- platform_driver_unregister(&twl4030_audio_driver);
-}
-module_exit(twl4030_audio_exit);
+module_platform_driver(twl4030_audio_driver);
MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 29f11e0..b69bb51 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -492,7 +492,7 @@ static void twl4030_sih_bus_sync_unlock(struct irq_data *data)
u8 bytes[4];
} imr;
- /* byte[0] gets overwriten as we write ... */
+ /* byte[0] gets overwritten as we write ... */
imr.word = cpu_to_le32(agent->imr << 8);
agent->imr_change_pending = false;
@@ -667,6 +667,7 @@ int twl4030_sih_setup(int module)
irq_set_chip_data(irq, agent);
irq_set_chip_and_handler(irq, &twl4030_sih_irq_chip,
handle_edge_irq);
+ irq_set_nested_thread(irq, 1);
activate_irq(irq);
}
diff --git a/drivers/mfd/twl4030-madc.c b/drivers/mfd/twl4030-madc.c
index 834f824..456ecb5 100644
--- a/drivers/mfd/twl4030-madc.c
+++ b/drivers/mfd/twl4030-madc.c
@@ -807,19 +807,7 @@ static struct platform_driver twl4030_madc_driver = {
},
};
-static int __init twl4030_madc_init(void)
-{
- return platform_driver_register(&twl4030_madc_driver);
-}
-
-module_init(twl4030_madc_init);
-
-static void __exit twl4030_madc_exit(void)
-{
- platform_driver_unregister(&twl4030_madc_driver);
-}
-
-module_exit(twl4030_madc_exit);
+module_platform_driver(twl4030_madc_driver);
MODULE_DESCRIPTION("TWL4030 ADC driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index a764676..79ca33d 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -34,7 +34,8 @@
static u8 twl4030_start_script_address = 0x2b;
#define PWR_P1_SW_EVENTS 0x10
-#define PWR_DEVOFF (1<<0)
+#define PWR_DEVOFF (1 << 0)
+#define SEQ_OFFSYNC (1 << 0)
#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
#define PHY_TO_OFF_PM_RECEIVER(p) (p - 0x5b)
@@ -123,7 +124,7 @@ static u8 res_config_addrs[] = {
[RES_MAIN_REF] = 0x94,
};
-static int __init twl4030_write_script_byte(u8 address, u8 byte)
+static int __devinit twl4030_write_script_byte(u8 address, u8 byte)
{
int err;
@@ -137,7 +138,7 @@ out:
return err;
}
-static int __init twl4030_write_script_ins(u8 address, u16 pmb_message,
+static int __devinit twl4030_write_script_ins(u8 address, u16 pmb_message,
u8 delay, u8 next)
{
int err;
@@ -157,7 +158,7 @@ out:
return err;
}
-static int __init twl4030_write_script(u8 address, struct twl4030_ins *script,
+static int __devinit twl4030_write_script(u8 address, struct twl4030_ins *script,
int len)
{
int err;
@@ -182,7 +183,7 @@ static int __init twl4030_write_script(u8 address, struct twl4030_ins *script,
return err;
}
-static int __init twl4030_config_wakeup3_sequence(u8 address)
+static int __devinit twl4030_config_wakeup3_sequence(u8 address)
{
int err;
u8 data;
@@ -207,7 +208,7 @@ out:
return err;
}
-static int __init twl4030_config_wakeup12_sequence(u8 address)
+static int __devinit twl4030_config_wakeup12_sequence(u8 address)
{
int err = 0;
u8 data;
@@ -261,7 +262,7 @@ out:
return err;
}
-static int __init twl4030_config_sleep_sequence(u8 address)
+static int __devinit twl4030_config_sleep_sequence(u8 address)
{
int err;
@@ -275,7 +276,7 @@ static int __init twl4030_config_sleep_sequence(u8 address)
return err;
}
-static int __init twl4030_config_warmreset_sequence(u8 address)
+static int __devinit twl4030_config_warmreset_sequence(u8 address)
{
int err;
u8 rd_data;
@@ -323,7 +324,7 @@ out:
return err;
}
-static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
+static int __devinit twl4030_configure_resource(struct twl4030_resconfig *rconfig)
{
int rconfig_addr;
int err;
@@ -415,7 +416,7 @@ static int __init twl4030_configure_resource(struct twl4030_resconfig *rconfig)
return 0;
}
-static int __init load_twl4030_script(struct twl4030_script *tscript,
+static int __devinit load_twl4030_script(struct twl4030_script *tscript,
u8 address)
{
int err;
@@ -511,12 +512,27 @@ int twl4030_remove_script(u8 flags)
return err;
}
-void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
+/*
+ * In master mode, start the power off sequence.
+ * After a successful execution, TWL shuts down the power to the SoC
+ * and all peripherals connected to it.
+ */
+void twl4030_power_off(void)
+{
+ int err;
+
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, PWR_DEVOFF,
+ TWL4030_PM_MASTER_P1_SW_EVENTS);
+ if (err)
+ pr_err("TWL4030 Unable to power off\n");
+}
+
+void __devinit twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
{
int err = 0;
int i;
struct twl4030_resconfig *resconfig;
- u8 address = twl4030_start_script_address;
+ u8 val, address = twl4030_start_script_address;
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER,
TWL4030_PM_MASTER_KEY_CFG1,
@@ -548,6 +564,28 @@ void __init twl4030_power_init(struct twl4030_power_data *twl4030_scripts)
}
}
+ /* Board has to be wired properly to use this feature */
+ if (twl4030_scripts->use_poweroff && !pm_power_off) {
+ /* Default for SEQ_OFFSYNC is set, lets ensure this */
+ err = twl_i2c_read_u8(TWL4030_MODULE_PM_MASTER, &val,
+ TWL4030_PM_MASTER_CFG_P123_TRANSITION);
+ if (err) {
+ pr_warning("TWL4030 Unable to read registers\n");
+
+ } else if (!(val & SEQ_OFFSYNC)) {
+ val |= SEQ_OFFSYNC;
+ err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, val,
+ TWL4030_PM_MASTER_CFG_P123_TRANSITION);
+ if (err) {
+ pr_err("TWL4030 Unable to setup SEQ_OFFSYNC\n");
+ goto relock;
+ }
+ }
+
+ pm_power_off = twl4030_power_off;
+ }
+
+relock:
err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, 0,
TWL4030_PM_MASTER_PROTECT_KEY);
if (err)
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
index 3eee45f..c6b456a 100644
--- a/drivers/mfd/twl6030-irq.c
+++ b/drivers/mfd/twl6030-irq.c
@@ -138,8 +138,6 @@ static int twl6030_irq_thread(void *data)
static const unsigned max_i2c_errors = 100;
int ret;
- current->flags |= PF_NOFREEZE;
-
while (!kthread_should_stop()) {
int i;
union {
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 268f80f..b2d8e51 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -282,6 +282,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
/* Default PLL configuration after power up */
twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
twl6040->sysclk = 19200000;
+ twl6040->mclk = 32768;
} else {
/* already powered-down */
if (!twl6040->power_count) {
@@ -305,6 +306,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
twl6040_power_down(twl6040);
}
twl6040->sysclk = 0;
+ twl6040->mclk = 0;
}
out:
@@ -324,23 +326,38 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL);
lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
+ /* Force full reconfiguration when switching between PLL */
+ if (pll_id != twl6040->pll) {
+ twl6040->sysclk = 0;
+ twl6040->mclk = 0;
+ }
+
switch (pll_id) {
case TWL6040_SYSCLK_SEL_LPPLL:
/* low-power PLL divider */
- switch (freq_out) {
- case 17640000:
- lppllctl |= TWL6040_LPLLFIN;
- break;
- case 19200000:
- lppllctl &= ~TWL6040_LPLLFIN;
- break;
- default:
- dev_err(twl6040->dev,
- "freq_out %d not supported\n", freq_out);
- ret = -EINVAL;
- goto pll_out;
+ /* Change the sysclk configuration only if it has been canged */
+ if (twl6040->sysclk != freq_out) {
+ switch (freq_out) {
+ case 17640000:
+ lppllctl |= TWL6040_LPLLFIN;
+ break;
+ case 19200000:
+ lppllctl &= ~TWL6040_LPLLFIN;
+ break;
+ default:
+ dev_err(twl6040->dev,
+ "freq_out %d not supported\n",
+ freq_out);
+ ret = -EINVAL;
+ goto pll_out;
+ }
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
}
- twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+ /* The PLL in use has not been change, we can exit */
+ if (twl6040->pll == pll_id)
+ break;
switch (freq_in) {
case 32768:
@@ -371,48 +388,56 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
goto pll_out;
}
- hppllctl &= ~TWL6040_MCLK_MSK;
+ if (twl6040->mclk != freq_in) {
+ hppllctl &= ~TWL6040_MCLK_MSK;
+
+ switch (freq_in) {
+ case 12000000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_12000KHZ |
+ TWL6040_HPLLENA;
+ break;
+ case 19200000:
+ /*
+ * PLL disabled
+ * (enable PLL if MCLK jitter quality
+ * doesn't meet specification)
+ */
+ hppllctl |= TWL6040_MCLK_19200KHZ;
+ break;
+ case 26000000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_26000KHZ |
+ TWL6040_HPLLENA;
+ break;
+ case 38400000:
+ /* PLL enabled, active mode */
+ hppllctl |= TWL6040_MCLK_38400KHZ |
+ TWL6040_HPLLENA;
+ break;
+ default:
+ dev_err(twl6040->dev,
+ "freq_in %d not supported\n", freq_in);
+ ret = -EINVAL;
+ goto pll_out;
+ }
- switch (freq_in) {
- case 12000000:
- /* PLL enabled, active mode */
- hppllctl |= TWL6040_MCLK_12000KHZ |
- TWL6040_HPLLENA;
- break;
- case 19200000:
/*
- * PLL disabled
- * (enable PLL if MCLK jitter quality
- * doesn't meet specification)
+ * enable clock slicer to ensure input waveform is
+ * square
*/
- hppllctl |= TWL6040_MCLK_19200KHZ;
- break;
- case 26000000:
- /* PLL enabled, active mode */
- hppllctl |= TWL6040_MCLK_26000KHZ |
- TWL6040_HPLLENA;
- break;
- case 38400000:
- /* PLL enabled, active mode */
- hppllctl |= TWL6040_MCLK_38400KHZ |
- TWL6040_HPLLENA;
- break;
- default:
- dev_err(twl6040->dev,
- "freq_in %d not supported\n", freq_in);
- ret = -EINVAL;
- goto pll_out;
- }
+ hppllctl |= TWL6040_HPLLSQRENA;
- /* enable clock slicer to ensure input waveform is square */
- hppllctl |= TWL6040_HPLLSQRENA;
-
- twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl);
- usleep_range(500, 700);
- lppllctl |= TWL6040_HPLLSEL;
- twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
- lppllctl &= ~TWL6040_LPLLENA;
- twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+ twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL,
+ hppllctl);
+ usleep_range(500, 700);
+ lppllctl |= TWL6040_HPLLSEL;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ lppllctl &= ~TWL6040_LPLLENA;
+ twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+ lppllctl);
+ }
break;
default:
dev_err(twl6040->dev, "unknown pll id %d\n", pll_id);
@@ -421,6 +446,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
}
twl6040->sysclk = freq_out;
+ twl6040->mclk = freq_in;
twl6040->pll = pll_id;
pll_out:
@@ -509,13 +535,10 @@ static int __devinit twl6040_probe(struct platform_device *pdev)
twl6040->audpwron = -EINVAL;
if (gpio_is_valid(twl6040->audpwron)) {
- ret = gpio_request(twl6040->audpwron, "audpwron");
+ ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
+ "audpwron");
if (ret)
goto gpio1_err;
-
- ret = gpio_direction_output(twl6040->audpwron, 0);
- if (ret)
- goto gpio2_err;
}
/* codec interrupt */
@@ -619,18 +642,7 @@ static struct platform_driver twl6040_driver = {
},
};
-static int __devinit twl6040_init(void)
-{
- return platform_driver_register(&twl6040_driver);
-}
-module_init(twl6040_init);
-
-static void __devexit twl6040_exit(void)
-{
- platform_driver_unregister(&twl6040_driver);
-}
-
-module_exit(twl6040_exit);
+module_platform_driver(twl6040_driver);
MODULE_DESCRIPTION("TWL6040 MFD");
MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>");
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index b281217..febc90c 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -148,16 +148,22 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
{
struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
unsigned long flags;
+ unsigned old, mask = 1 << offset;
spin_lock_irqsave(&ucb->io_lock, flags);
- ucb->io_dir |= (1 << offset);
- ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
-
+ old = ucb->io_out;
if (value)
- ucb->io_out |= 1 << offset;
+ ucb->io_out |= mask;
else
- ucb->io_out &= ~(1 << offset);
- ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
+ ucb->io_out &= ~mask;
+
+ if (old != ucb->io_out)
+ ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
+
+ if (!(ucb->io_dir & mask)) {
+ ucb->io_dir |= mask;
+ ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
+ }
spin_unlock_irqrestore(&ucb->io_lock, flags);
return 0;
@@ -687,6 +693,7 @@ static int ucb1x00_resume(struct mcp *mcp)
struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
struct ucb1x00_dev *dev;
+ ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
mutex_lock(&ucb1x00_mutex);
list_for_each_entry(dev, &ucb->devs, dev_node) {
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 38ffbd5..63a3cbd 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -47,7 +47,6 @@ struct ucb1x00_ts {
u16 x_res;
u16 y_res;
- unsigned int restart:1;
unsigned int adcsync:1;
};
@@ -207,15 +206,17 @@ static int ucb1x00_thread(void *_ts)
{
struct ucb1x00_ts *ts = _ts;
DECLARE_WAITQUEUE(wait, current);
+ bool frozen, ignore = false;
int valid = 0;
set_freezable();
add_wait_queue(&ts->irq_wait, &wait);
- while (!kthread_should_stop()) {
+ while (!kthread_freezable_should_stop(&frozen)) {
unsigned int x, y, p;
signed long timeout;
- ts->restart = 0;
+ if (frozen)
+ ignore = true;
ucb1x00_adc_enable(ts->ucb);
@@ -258,7 +259,7 @@ static int ucb1x00_thread(void *_ts)
* space. We therefore leave it to user space
* to do any filtering they please.
*/
- if (!ts->restart) {
+ if (!ignore) {
ucb1x00_ts_evt_add(ts, p, x, y);
valid = 1;
}
@@ -267,8 +268,6 @@ static int ucb1x00_thread(void *_ts)
timeout = HZ / 100;
}
- try_to_freeze();
-
schedule_timeout(timeout);
}
@@ -340,26 +339,6 @@ static void ucb1x00_ts_close(struct input_dev *idev)
ucb1x00_disable(ts->ucb);
}
-#ifdef CONFIG_PM
-static int ucb1x00_ts_resume(struct ucb1x00_dev *dev)
-{
- struct ucb1x00_ts *ts = dev->priv;
-
- if (ts->rtask != NULL) {
- /*
- * Restart the TS thread to ensure the
- * TS interrupt mode is set up again
- * after sleep.
- */
- ts->restart = 1;
- wake_up(&ts->irq_wait);
- }
- return 0;
-}
-#else
-#define ucb1x00_ts_resume NULL
-#endif
-
/*
* Initialisation.
@@ -425,7 +404,6 @@ static void ucb1x00_ts_remove(struct ucb1x00_dev *dev)
static struct ucb1x00_driver ucb1x00_ts_driver = {
.add = ucb1x00_ts_add,
.remove = ucb1x00_ts_remove,
- .resume = ucb1x00_ts_resume,
};
static int __init ucb1x00_ts_init(void)
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index d698703..b73cc15 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -118,7 +118,7 @@ static void __devexit vx855_remove(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id vx855_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(vx855_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
{ 0, }
};
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 0a2b8d4..f5e54fa 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -559,6 +559,8 @@ static int wm831x_write(struct wm831x *wm831x, unsigned short reg,
dev_vdbg(wm831x->dev, "Write %04x to R%d(0x%x)\n",
buf[i], reg + i, reg + i);
ret = regmap_write(wm831x->regmap, reg + i, buf[i]);
+ if (ret != 0)
+ return ret;
}
return 0;
@@ -1875,7 +1877,6 @@ err_irq:
err_regmap:
mfd_remove_devices(wm831x->dev);
regmap_exit(wm831x->regmap);
- kfree(wm831x);
return ret;
}
@@ -1887,7 +1888,6 @@ void wm831x_device_exit(struct wm831x *wm831x)
free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
wm831x_irq_exit(wm831x);
regmap_exit(wm831x->regmap);
- kfree(wm831x);
}
int wm831x_device_suspend(struct wm831x *wm831x)
diff --git a/drivers/mfd/wm831x-i2c.c b/drivers/mfd/wm831x-i2c.c
index ac8da1d..cb15609 100644
--- a/drivers/mfd/wm831x-i2c.c
+++ b/drivers/mfd/wm831x-i2c.c
@@ -30,7 +30,7 @@ static int wm831x_i2c_probe(struct i2c_client *i2c,
struct wm831x *wm831x;
int ret;
- wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
+ wm831x = devm_kzalloc(&i2c->dev, sizeof(struct wm831x), GFP_KERNEL);
if (wm831x == NULL)
return -ENOMEM;
@@ -42,7 +42,6 @@ static int wm831x_i2c_probe(struct i2c_client *i2c,
ret = PTR_ERR(wm831x->regmap);
dev_err(wm831x->dev, "Failed to allocate register map: %d\n",
ret);
- kfree(wm831x);
return ret;
}
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index f4747a4..bec4d05 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -325,11 +325,6 @@ static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data)
return WM831X_INTERRUPT_STATUS_1 - 1 + irq_data->reg;
}
-static inline int irq_data_to_mask_reg(struct wm831x_irq_data *irq_data)
-{
- return WM831X_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
-}
-
static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
int irq)
{
@@ -477,8 +472,7 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD);
if (primary & WM831X_TCHDATA_INT)
handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA);
- if (primary & (WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT))
- goto out;
+ primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
int offset = wm831x_irqs[i].reg - 1;
diff --git a/drivers/mfd/wm831x-spi.c b/drivers/mfd/wm831x-spi.c
index 8d6a9a9..62ef325 100644
--- a/drivers/mfd/wm831x-spi.c
+++ b/drivers/mfd/wm831x-spi.c
@@ -30,7 +30,7 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
type = (enum wm831x_parent)id->driver_data;
- wm831x = kzalloc(sizeof(struct wm831x), GFP_KERNEL);
+ wm831x = devm_kzalloc(&spi->dev, sizeof(struct wm831x), GFP_KERNEL);
if (wm831x == NULL)
return -ENOMEM;
@@ -45,7 +45,6 @@ static int __devinit wm831x_spi_probe(struct spi_device *spi)
ret = PTR_ERR(wm831x->regmap);
dev_err(wm831x->dev, "Failed to allocate register map: %d\n",
ret);
- kfree(wm831x);
return ret;
}
@@ -95,7 +94,6 @@ MODULE_DEVICE_TABLE(spi, wm831x_spi_id);
static struct spi_driver wm831x_spi_driver = {
.driver = {
.name = "wm831x",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &wm831x_spi_pm,
},
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index e81cc31..dd1caaa 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -573,6 +573,8 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
u16 id1, id2, mask_rev;
u16 cust_id, mode, chip_rev;
+ dev_set_drvdata(wm8350->dev, wm8350);
+
/* get WM8350 revision and config mode */
ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
if (ret != 0) {
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index 5fe5de1..d955faa 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -63,7 +63,7 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
struct wm8350 *wm8350;
int ret = 0;
- wm8350 = kzalloc(sizeof(struct wm8350), GFP_KERNEL);
+ wm8350 = devm_kzalloc(&i2c->dev, sizeof(struct wm8350), GFP_KERNEL);
if (wm8350 == NULL)
return -ENOMEM;
@@ -80,7 +80,6 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
return ret;
err:
- kfree(wm8350);
return ret;
}
@@ -89,7 +88,6 @@ static int wm8350_i2c_remove(struct i2c_client *i2c)
struct wm8350 *wm8350 = i2c_get_clientdata(i2c);
wm8350_device_exit(wm8350);
- kfree(wm8350);
return 0;
}
diff --git a/drivers/mfd/wm8350-irq.c b/drivers/mfd/wm8350-irq.c
index 8a1fafd..9fd01bf 100644
--- a/drivers/mfd/wm8350-irq.c
+++ b/drivers/mfd/wm8350-irq.c
@@ -496,7 +496,6 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
mutex_init(&wm8350->irq_lock);
wm8350->chip_irq = irq;
- wm8350->irq_base = pdata->irq_base;
if (pdata && pdata->irq_base > 0)
irq_base = pdata->irq_base;
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 62b4626..2204893 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -344,7 +344,7 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
struct wm8400 *wm8400;
int ret;
- wm8400 = kzalloc(sizeof(struct wm8400), GFP_KERNEL);
+ wm8400 = devm_kzalloc(&i2c->dev, sizeof(struct wm8400), GFP_KERNEL);
if (wm8400 == NULL) {
ret = -ENOMEM;
goto err;
@@ -353,7 +353,7 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
wm8400->regmap = regmap_init_i2c(i2c, &wm8400_regmap_config);
if (IS_ERR(wm8400->regmap)) {
ret = PTR_ERR(wm8400->regmap);
- goto struct_err;
+ goto err;
}
wm8400->dev = &i2c->dev;
@@ -367,8 +367,6 @@ static int wm8400_i2c_probe(struct i2c_client *i2c,
map_err:
regmap_exit(wm8400->regmap);
-struct_err:
- kfree(wm8400);
err:
return ret;
}
@@ -379,7 +377,6 @@ static int wm8400_i2c_remove(struct i2c_client *i2c)
wm8400_release(wm8400);
regmap_exit(wm8400->regmap);
- kfree(wm8400);
return 0;
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 61894fc..a04b3c1 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -28,11 +28,7 @@
#include <linux/mfd/wm8994/pdata.h>
#include <linux/mfd/wm8994/registers.h>
-static int wm8994_read(struct wm8994 *wm8994, unsigned short reg,
- int bytes, void *dest)
-{
- return regmap_raw_read(wm8994->regmap, reg, dest, bytes);
-}
+#include "wm8994.h"
/**
* wm8994_reg_read: Read a single WM8994 register.
@@ -68,12 +64,6 @@ int wm8994_bulk_read(struct wm8994 *wm8994, unsigned short reg,
return regmap_bulk_read(wm8994->regmap, reg, buf, count);
}
-static int wm8994_write(struct wm8994 *wm8994, unsigned short reg,
- int bytes, const void *src)
-{
- return regmap_raw_write(wm8994->regmap, reg, src, bytes);
-}
-
/**
* wm8994_reg_write: Write a single WM8994 register.
*
@@ -252,6 +242,34 @@ static int wm8994_suspend(struct device *dev)
break;
}
+ switch (wm8994->type) {
+ case WM1811:
+ ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read jackdet: %d\n", ret);
+ } else if (ret & WM1811_JACKDET_MODE_MASK) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
+ switch (wm8994->type) {
+ case WM1811:
+ ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read jackdet: %d\n", ret);
+ } else if (ret & WM1811_JACKDET_MODE_MASK) {
+ dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+ return 0;
+ }
+ break;
+ default:
+ break;
+ }
+
/* Disable LDO pulldowns while the device is suspended if we
* don't know that something will be driving them. */
if (!wm8994->ldo_ena_always_driven)
@@ -259,25 +277,14 @@ static int wm8994_suspend(struct device *dev)
WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD);
- /* GPIO configuration state is saved here since we may be configuring
- * the GPIO alternate functions even if we're not using the gpiolib
- * driver for them.
- */
- ret = wm8994_read(wm8994, WM8994_GPIO_1, WM8994_NUM_GPIO_REGS * 2,
- &wm8994->gpio_regs);
- if (ret < 0)
- dev_err(dev, "Failed to save GPIO registers: %d\n", ret);
-
- /* For similar reasons we also stash the regulator states */
- ret = wm8994_read(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
- &wm8994->ldo_regs);
- if (ret < 0)
- dev_err(dev, "Failed to save LDO registers: %d\n", ret);
-
/* Explicitly put the device into reset in case regulators
* don't get disabled in order to ensure consistent restart.
*/
- wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET, 0x8994);
+ wm8994_reg_write(wm8994, WM8994_SOFTWARE_RESET,
+ wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET));
+
+ regcache_cache_only(wm8994->regmap, true);
+ regcache_mark_dirty(wm8994->regmap);
wm8994->suspended = true;
@@ -294,7 +301,7 @@ static int wm8994_suspend(struct device *dev)
static int wm8994_resume(struct device *dev)
{
struct wm8994 *wm8994 = dev_get_drvdata(dev);
- int ret, i;
+ int ret;
/* We may have lied to the PM core about suspending */
if (!wm8994->suspended)
@@ -307,27 +314,13 @@ static int wm8994_resume(struct device *dev)
return ret;
}
- /* Write register at a time as we use the cache on the CPU so store
- * it in native endian.
- */
- for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
- ret = wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK
- + i, wm8994->irq_masks_cur[i]);
- if (ret < 0)
- dev_err(dev, "Failed to restore interrupt masks: %d\n",
- ret);
+ regcache_cache_only(wm8994->regmap, false);
+ ret = regcache_sync(wm8994->regmap);
+ if (ret != 0) {
+ dev_err(dev, "Failed to restore register map: %d\n", ret);
+ goto err_enable;
}
- ret = wm8994_write(wm8994, WM8994_LDO_1, WM8994_NUM_LDO_REGS * 2,
- &wm8994->ldo_regs);
- if (ret < 0)
- dev_err(dev, "Failed to restore LDO registers: %d\n", ret);
-
- ret = wm8994_write(wm8994, WM8994_GPIO_1, WM8994_NUM_GPIO_REGS * 2,
- &wm8994->gpio_regs);
- if (ret < 0)
- dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
-
/* Disable LDO pulldowns while the device is active */
wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
@@ -336,6 +329,11 @@ static int wm8994_resume(struct device *dev)
wm8994->suspended = false;
return 0;
+
+err_enable:
+ regulator_bulk_disable(wm8994->num_supplies, wm8994->supplies);
+
+ return ret;
}
#endif
@@ -361,19 +359,16 @@ static int wm8994_ldo_in_use(struct wm8994_pdata *pdata, int ldo)
}
#endif
-static struct regmap_config wm8994_regmap_config = {
- .reg_bits = 16,
- .val_bits = 16,
-};
-
/*
* Instantiate the generic non-control parts of the device.
*/
static int wm8994_device_init(struct wm8994 *wm8994, int irq)
{
struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ struct regmap_config *regmap_config;
const char *devname;
int ret, i;
+ int pulls = 0;
dev_set_drvdata(wm8994->dev, wm8994);
@@ -402,9 +397,9 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
goto err_regmap;
}
- wm8994->supplies = kzalloc(sizeof(struct regulator_bulk_data) *
- wm8994->num_supplies,
- GFP_KERNEL);
+ wm8994->supplies = devm_kzalloc(wm8994->dev,
+ sizeof(struct regulator_bulk_data) *
+ wm8994->num_supplies, GFP_KERNEL);
if (!wm8994->supplies) {
ret = -ENOMEM;
goto err_regmap;
@@ -432,7 +427,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
wm8994->supplies);
if (ret != 0) {
dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret);
- goto err_supplies;
+ goto err_regmap;
}
ret = regulator_bulk_enable(wm8994->num_supplies,
@@ -482,25 +477,54 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
ret);
goto err_enable;
}
+ wm8994->revision = ret;
switch (wm8994->type) {
case WM8994:
- switch (ret) {
+ switch (wm8994->revision) {
case 0:
case 1:
dev_warn(wm8994->dev,
"revision %c not fully supported\n",
- 'A' + ret);
+ 'A' + wm8994->revision);
break;
default:
break;
}
break;
+ case WM1811:
+ /* Revision C did not change the relevant layer */
+ if (wm8994->revision > 1)
+ wm8994->revision++;
+ break;
default:
break;
}
- dev_info(wm8994->dev, "%s revision %c\n", devname, 'A' + ret);
+ dev_info(wm8994->dev, "%s revision %c\n", devname,
+ 'A' + wm8994->revision);
+
+ switch (wm8994->type) {
+ case WM1811:
+ regmap_config = &wm1811_regmap_config;
+ break;
+ case WM8994:
+ regmap_config = &wm8994_regmap_config;
+ break;
+ case WM8958:
+ regmap_config = &wm8958_regmap_config;
+ break;
+ default:
+ dev_err(wm8994->dev, "Unknown device type %d\n", wm8994->type);
+ return -EINVAL;
+ }
+
+ ret = regmap_reinit_cache(wm8994->regmap, regmap_config);
+ if (ret != 0) {
+ dev_err(wm8994->dev, "Failed to reinit register cache: %d\n",
+ ret);
+ return ret;
+ }
if (pdata) {
wm8994->irq_base = pdata->irq_base;
@@ -516,12 +540,16 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq)
}
wm8994->ldo_ena_always_driven = pdata->ldo_ena_always_driven;
+
+ if (pdata->spkmode_pu)
+ pulls |= WM8994_SPKMODE_PU;
}
- /* Disable LDO pulldowns while the device is active */
+ /* Disable unneeded pulls */
wm8994_set_bits(wm8994, WM8994_PULL_CONTROL_2,
- WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD,
- 0);
+ WM8994_LDO1ENA_PD | WM8994_LDO2ENA_PD |
+ WM8994_SPKMODE_PU | WM8994_CSNADDR_PD,
+ pulls);
/* In some system designs where the regulators are not in use,
* we can achieve a small reduction in leakage currents by
@@ -560,12 +588,9 @@ err_enable:
wm8994->supplies);
err_get:
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
-err_supplies:
- kfree(wm8994->supplies);
err_regmap:
regmap_exit(wm8994->regmap);
mfd_remove_devices(wm8994->dev);
- kfree(wm8994);
return ret;
}
@@ -577,18 +602,24 @@ static void wm8994_device_exit(struct wm8994 *wm8994)
regulator_bulk_disable(wm8994->num_supplies,
wm8994->supplies);
regulator_bulk_free(wm8994->num_supplies, wm8994->supplies);
- kfree(wm8994->supplies);
regmap_exit(wm8994->regmap);
- kfree(wm8994);
}
+static const struct of_device_id wm8994_of_match[] = {
+ { .compatible = "wlf,wm1811", },
+ { .compatible = "wlf,wm8994", },
+ { .compatible = "wlf,wm8958", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, wm8994_of_match);
+
static int wm8994_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8994 *wm8994;
int ret;
- wm8994 = kzalloc(sizeof(struct wm8994), GFP_KERNEL);
+ wm8994 = devm_kzalloc(&i2c->dev, sizeof(struct wm8994), GFP_KERNEL);
if (wm8994 == NULL)
return -ENOMEM;
@@ -597,12 +628,11 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
wm8994->irq = i2c->irq;
wm8994->type = id->driver_data;
- wm8994->regmap = regmap_init_i2c(i2c, &wm8994_regmap_config);
+ wm8994->regmap = regmap_init_i2c(i2c, &wm8994_base_regmap_config);
if (IS_ERR(wm8994->regmap)) {
ret = PTR_ERR(wm8994->regmap);
dev_err(wm8994->dev, "Failed to allocate register map: %d\n",
ret);
- kfree(wm8994);
return ret;
}
@@ -620,6 +650,7 @@ static int wm8994_i2c_remove(struct i2c_client *i2c)
static const struct i2c_device_id wm8994_i2c_id[] = {
{ "wm1811", WM1811 },
+ { "wm1811a", WM1811 },
{ "wm8994", WM8994 },
{ "wm8958", WM8958 },
{ }
@@ -634,6 +665,7 @@ static struct i2c_driver wm8994_i2c_driver = {
.name = "wm8994",
.owner = THIS_MODULE,
.pm = &wm8994_pm_ops,
+ .of_match_table = wm8994_of_match,
},
.probe = wm8994_i2c_probe,
.remove = wm8994_i2c_remove,
diff --git a/drivers/mfd/wm8994-irq.c b/drivers/mfd/wm8994-irq.c
index d682f7b..46b20c4 100644
--- a/drivers/mfd/wm8994-irq.c
+++ b/drivers/mfd/wm8994-irq.c
@@ -18,248 +18,127 @@
#include <linux/irq.h>
#include <linux/mfd/core.h>
#include <linux/interrupt.h>
+#include <linux/regmap.h>
#include <linux/mfd/wm8994/core.h>
#include <linux/mfd/wm8994/registers.h>
#include <linux/delay.h>
-struct wm8994_irq_data {
- int reg;
- int mask;
-};
-
-static struct wm8994_irq_data wm8994_irqs[] = {
+static struct regmap_irq wm8994_irqs[] = {
[WM8994_IRQ_TEMP_SHUT] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_TEMP_SHUT_EINT,
},
[WM8994_IRQ_MIC1_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC1_DET_EINT,
},
[WM8994_IRQ_MIC1_SHRT] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC1_SHRT_EINT,
},
[WM8994_IRQ_MIC2_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC2_DET_EINT,
},
[WM8994_IRQ_MIC2_SHRT] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_MIC2_SHRT_EINT,
},
[WM8994_IRQ_FLL1_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_FLL1_LOCK_EINT,
},
[WM8994_IRQ_FLL2_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_FLL2_LOCK_EINT,
},
[WM8994_IRQ_SRC1_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_SRC1_LOCK_EINT,
},
[WM8994_IRQ_SRC2_LOCK] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_SRC2_LOCK_EINT,
},
[WM8994_IRQ_AIF1DRC1_SIG_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_AIF1DRC1_SIG_DET,
},
[WM8994_IRQ_AIF1DRC2_SIG_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_AIF1DRC2_SIG_DET_EINT,
},
[WM8994_IRQ_AIF2DRC_SIG_DET] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_AIF2DRC_SIG_DET_EINT,
},
[WM8994_IRQ_FIFOS_ERR] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_FIFOS_ERR_EINT,
},
[WM8994_IRQ_WSEQ_DONE] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_WSEQ_DONE_EINT,
},
[WM8994_IRQ_DCS_DONE] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_DCS_DONE_EINT,
},
[WM8994_IRQ_TEMP_WARN] = {
- .reg = 2,
+ .reg_offset = 1,
.mask = WM8994_TEMP_WARN_EINT,
},
[WM8994_IRQ_GPIO(1)] = {
- .reg = 1,
.mask = WM8994_GP1_EINT,
},
[WM8994_IRQ_GPIO(2)] = {
- .reg = 1,
.mask = WM8994_GP2_EINT,
},
[WM8994_IRQ_GPIO(3)] = {
- .reg = 1,
.mask = WM8994_GP3_EINT,
},
[WM8994_IRQ_GPIO(4)] = {
- .reg = 1,
.mask = WM8994_GP4_EINT,
},
[WM8994_IRQ_GPIO(5)] = {
- .reg = 1,
.mask = WM8994_GP5_EINT,
},
[WM8994_IRQ_GPIO(6)] = {
- .reg = 1,
.mask = WM8994_GP6_EINT,
},
[WM8994_IRQ_GPIO(7)] = {
- .reg = 1,
.mask = WM8994_GP7_EINT,
},
[WM8994_IRQ_GPIO(8)] = {
- .reg = 1,
.mask = WM8994_GP8_EINT,
},
[WM8994_IRQ_GPIO(9)] = {
- .reg = 1,
.mask = WM8994_GP8_EINT,
},
[WM8994_IRQ_GPIO(10)] = {
- .reg = 1,
.mask = WM8994_GP10_EINT,
},
[WM8994_IRQ_GPIO(11)] = {
- .reg = 1,
.mask = WM8994_GP11_EINT,
},
};
-static inline int irq_data_to_status_reg(struct wm8994_irq_data *irq_data)
-{
- return WM8994_INTERRUPT_STATUS_1 - 1 + irq_data->reg;
-}
-
-static inline int irq_data_to_mask_reg(struct wm8994_irq_data *irq_data)
-{
- return WM8994_INTERRUPT_STATUS_1_MASK - 1 + irq_data->reg;
-}
-
-static inline struct wm8994_irq_data *irq_to_wm8994_irq(struct wm8994 *wm8994,
- int irq)
-{
- return &wm8994_irqs[irq - wm8994->irq_base];
-}
-
-static void wm8994_irq_lock(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
-
- mutex_lock(&wm8994->irq_lock);
-}
-
-static void wm8994_irq_sync_unlock(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
- int i;
-
- for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
- /* If there's been a change in the mask write it back
- * to the hardware. */
- if (wm8994->irq_masks_cur[i] != wm8994->irq_masks_cache[i]) {
- wm8994->irq_masks_cache[i] = wm8994->irq_masks_cur[i];
- wm8994_reg_write(wm8994,
- WM8994_INTERRUPT_STATUS_1_MASK + i,
- wm8994->irq_masks_cur[i]);
- }
- }
-
- mutex_unlock(&wm8994->irq_lock);
-}
-
-static void wm8994_irq_enable(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
- struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
- data->irq);
-
- wm8994->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
-}
-
-static void wm8994_irq_disable(struct irq_data *data)
-{
- struct wm8994 *wm8994 = irq_data_get_irq_chip_data(data);
- struct wm8994_irq_data *irq_data = irq_to_wm8994_irq(wm8994,
- data->irq);
-
- wm8994->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
-}
+static struct regmap_irq_chip wm8994_irq_chip = {
+ .name = "wm8994",
+ .irqs = wm8994_irqs,
+ .num_irqs = ARRAY_SIZE(wm8994_irqs),
-static struct irq_chip wm8994_irq_chip = {
- .name = "wm8994",
- .irq_bus_lock = wm8994_irq_lock,
- .irq_bus_sync_unlock = wm8994_irq_sync_unlock,
- .irq_disable = wm8994_irq_disable,
- .irq_enable = wm8994_irq_enable,
+ .num_regs = 2,
+ .status_base = WM8994_INTERRUPT_STATUS_1,
+ .mask_base = WM8994_INTERRUPT_STATUS_1_MASK,
+ .ack_base = WM8994_INTERRUPT_STATUS_1,
};
-/* The processing of the primary interrupt occurs in a thread so that
- * we can interact with the device over I2C or SPI. */
-static irqreturn_t wm8994_irq_thread(int irq, void *data)
-{
- struct wm8994 *wm8994 = data;
- unsigned int i;
- u16 status[WM8994_NUM_IRQ_REGS];
- int ret;
-
- ret = wm8994_bulk_read(wm8994, WM8994_INTERRUPT_STATUS_1,
- WM8994_NUM_IRQ_REGS, status);
- if (ret < 0) {
- dev_err(wm8994->dev, "Failed to read interrupt status: %d\n",
- ret);
- return IRQ_NONE;
- }
-
- /* Bit swap and apply masking */
- for (i = 0; i < WM8994_NUM_IRQ_REGS; i++) {
- status[i] = be16_to_cpu(status[i]);
- status[i] &= ~wm8994->irq_masks_cur[i];
- }
-
- /* Ack any unmasked IRQs */
- for (i = 0; i < ARRAY_SIZE(status); i++) {
- if (status[i])
- wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1 + i,
- status[i]);
- }
-
- /* Report */
- for (i = 0; i < ARRAY_SIZE(wm8994_irqs); i++) {
- if (status[wm8994_irqs[i].reg - 1] & wm8994_irqs[i].mask)
- handle_nested_irq(wm8994->irq_base + i);
- }
-
- return IRQ_HANDLED;
-}
-
int wm8994_irq_init(struct wm8994 *wm8994)
{
- int i, cur_irq, ret;
-
- mutex_init(&wm8994->irq_lock);
-
- /* Mask the individual interrupt sources */
- for (i = 0; i < ARRAY_SIZE(wm8994->irq_masks_cur); i++) {
- wm8994->irq_masks_cur[i] = 0xffff;
- wm8994->irq_masks_cache[i] = 0xffff;
- wm8994_reg_write(wm8994, WM8994_INTERRUPT_STATUS_1_MASK + i,
- 0xffff);
- }
+ int ret;
if (!wm8994->irq) {
dev_warn(wm8994->dev,
@@ -274,30 +153,12 @@ int wm8994_irq_init(struct wm8994 *wm8994)
return 0;
}
- /* Register them with genirq */
- for (cur_irq = wm8994->irq_base;
- cur_irq < ARRAY_SIZE(wm8994_irqs) + wm8994->irq_base;
- cur_irq++) {
- irq_set_chip_data(cur_irq, wm8994);
- irq_set_chip_and_handler(cur_irq, &wm8994_irq_chip,
- handle_edge_irq);
- irq_set_nested_thread(cur_irq, 1);
-
- /* ARM needs us to explicitly flag the IRQ as valid
- * and will set them noprobe when we do so. */
-#ifdef CONFIG_ARM
- set_irq_flags(cur_irq, IRQF_VALID);
-#else
- irq_set_noprobe(cur_irq);
-#endif
- }
-
- ret = request_threaded_irq(wm8994->irq, NULL, wm8994_irq_thread,
- IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "wm8994", wm8994);
+ ret = regmap_add_irq_chip(wm8994->regmap, wm8994->irq,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ wm8994->irq_base, &wm8994_irq_chip,
+ &wm8994->irq_data);
if (ret != 0) {
- dev_err(wm8994->dev, "Failed to request IRQ %d: %d\n",
- wm8994->irq, ret);
+ dev_err(wm8994->dev, "Failed to register IRQ chip: %d\n", ret);
return ret;
}
@@ -309,6 +170,5 @@ int wm8994_irq_init(struct wm8994 *wm8994)
void wm8994_irq_exit(struct wm8994 *wm8994)
{
- if (wm8994->irq)
- free_irq(wm8994->irq, wm8994);
+ regmap_del_irq_chip(wm8994->irq, wm8994->irq_data);
}
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
new file mode 100644
index 0000000..bc0c509
--- /dev/null
+++ b/drivers/mfd/wm8994-regmap.c
@@ -0,0 +1,1239 @@
+/*
+ * wm8994-regmap.c -- Register map data for WM8994 series devices
+ *
+ * Copyright 2011 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/regmap.h>
+
+#include "wm8994.h"
+
+static struct reg_default wm1811_defaults[] = {
+ { 0x0000, 0x1811 }, /* R0 - Software Reset */
+ { 0x0001, 0x0000 }, /* R1 - Power Management (1) */
+ { 0x0002, 0x6000 }, /* R2 - Power Management (2) */
+ { 0x0003, 0x0000 }, /* R3 - Power Management (3) */
+ { 0x0004, 0x0000 }, /* R4 - Power Management (4) */
+ { 0x0005, 0x0000 }, /* R5 - Power Management (5) */
+ { 0x0006, 0x0000 }, /* R6 - Power Management (6) */
+ { 0x0015, 0x0000 }, /* R21 - Input Mixer (1) */
+ { 0x0018, 0x008B }, /* R24 - Left Line Input 1&2 Volume */
+ { 0x0019, 0x008B }, /* R25 - Left Line Input 3&4 Volume */
+ { 0x001A, 0x008B }, /* R26 - Right Line Input 1&2 Volume */
+ { 0x001B, 0x008B }, /* R27 - Right Line Input 3&4 Volume */
+ { 0x001C, 0x006D }, /* R28 - Left Output Volume */
+ { 0x001D, 0x006D }, /* R29 - Right Output Volume */
+ { 0x001E, 0x0066 }, /* R30 - Line Outputs Volume */
+ { 0x001F, 0x0020 }, /* R31 - HPOUT2 Volume */
+ { 0x0020, 0x0079 }, /* R32 - Left OPGA Volume */
+ { 0x0021, 0x0079 }, /* R33 - Right OPGA Volume */
+ { 0x0022, 0x0003 }, /* R34 - SPKMIXL Attenuation */
+ { 0x0023, 0x0003 }, /* R35 - SPKMIXR Attenuation */
+ { 0x0024, 0x0011 }, /* R36 - SPKOUT Mixers */
+ { 0x0025, 0x0140 }, /* R37 - ClassD */
+ { 0x0026, 0x0079 }, /* R38 - Speaker Volume Left */
+ { 0x0027, 0x0079 }, /* R39 - Speaker Volume Right */
+ { 0x0028, 0x0000 }, /* R40 - Input Mixer (2) */
+ { 0x0029, 0x0000 }, /* R41 - Input Mixer (3) */
+ { 0x002A, 0x0000 }, /* R42 - Input Mixer (4) */
+ { 0x002B, 0x0000 }, /* R43 - Input Mixer (5) */
+ { 0x002C, 0x0000 }, /* R44 - Input Mixer (6) */
+ { 0x002D, 0x0000 }, /* R45 - Output Mixer (1) */
+ { 0x002E, 0x0000 }, /* R46 - Output Mixer (2) */
+ { 0x002F, 0x0000 }, /* R47 - Output Mixer (3) */
+ { 0x0030, 0x0000 }, /* R48 - Output Mixer (4) */
+ { 0x0031, 0x0000 }, /* R49 - Output Mixer (5) */
+ { 0x0032, 0x0000 }, /* R50 - Output Mixer (6) */
+ { 0x0033, 0x0000 }, /* R51 - HPOUT2 Mixer */
+ { 0x0034, 0x0000 }, /* R52 - Line Mixer (1) */
+ { 0x0035, 0x0000 }, /* R53 - Line Mixer (2) */
+ { 0x0036, 0x0000 }, /* R54 - Speaker Mixer */
+ { 0x0037, 0x0000 }, /* R55 - Additional Control */
+ { 0x0038, 0x0000 }, /* R56 - AntiPOP (1) */
+ { 0x0039, 0x0180 }, /* R57 - AntiPOP (2) */
+ { 0x003B, 0x000D }, /* R59 - LDO 1 */
+ { 0x003C, 0x0003 }, /* R60 - LDO 2 */
+ { 0x003D, 0x0039 }, /* R61 - MICBIAS1 */
+ { 0x003E, 0x0039 }, /* R62 - MICBIAS2 */
+ { 0x004C, 0x1F25 }, /* R76 - Charge Pump (1) */
+ { 0x004D, 0xAB19 }, /* R77 - Charge Pump (2) */
+ { 0x0051, 0x0004 }, /* R81 - Class W (1) */
+ { 0x0054, 0x0000 }, /* R84 - DC Servo (1) */
+ { 0x0055, 0x054A }, /* R85 - DC Servo (2) */
+ { 0x0058, 0x0000 }, /* R88 - DC Servo Readback */
+ { 0x0059, 0x0000 }, /* R89 - DC Servo (4) */
+ { 0x0060, 0x0000 }, /* R96 - Analogue HP (1) */
+ { 0x00C5, 0x0000 }, /* R197 - Class D Test (5) */
+ { 0x00D0, 0x7600 }, /* R208 - Mic Detect 1 */
+ { 0x00D1, 0x007F }, /* R209 - Mic Detect 2 */
+ { 0x00D2, 0x0000 }, /* R210 - Mic Detect 3 */
+ { 0x0100, 0x0100 }, /* R256 - Chip Revision */
+ { 0x0101, 0x8004 }, /* R257 - Control Interface */
+ { 0x0200, 0x0000 }, /* R512 - AIF1 Clocking (1) */
+ { 0x0201, 0x0000 }, /* R513 - AIF1 Clocking (2) */
+ { 0x0204, 0x0000 }, /* R516 - AIF2 Clocking (1) */
+ { 0x0205, 0x0000 }, /* R517 - AIF2 Clocking (2) */
+ { 0x0208, 0x0000 }, /* R520 - Clocking (1) */
+ { 0x0209, 0x0000 }, /* R521 - Clocking (2) */
+ { 0x0210, 0x0083 }, /* R528 - AIF1 Rate */
+ { 0x0211, 0x0083 }, /* R529 - AIF2 Rate */
+ { 0x0212, 0x0000 }, /* R530 - Rate Status */
+ { 0x0220, 0x0000 }, /* R544 - FLL1 Control (1) */
+ { 0x0221, 0x0000 }, /* R545 - FLL1 Control (2) */
+ { 0x0222, 0x0000 }, /* R546 - FLL1 Control (3) */
+ { 0x0223, 0x0000 }, /* R547 - FLL1 Control (4) */
+ { 0x0224, 0x0C80 }, /* R548 - FLL1 Control (5) */
+ { 0x0226, 0x0000 }, /* R550 - FLL1 EFS 1 */
+ { 0x0227, 0x0006 }, /* R551 - FLL1 EFS 2 */
+ { 0x0240, 0x0000 }, /* R576 - FLL2Control (1) */
+ { 0x0241, 0x0000 }, /* R577 - FLL2Control (2) */
+ { 0x0242, 0x0000 }, /* R578 - FLL2Control (3) */
+ { 0x0243, 0x0000 }, /* R579 - FLL2 Control (4) */
+ { 0x0244, 0x0C80 }, /* R580 - FLL2Control (5) */
+ { 0x0246, 0x0000 }, /* R582 - FLL2 EFS 1 */
+ { 0x0247, 0x0006 }, /* R583 - FLL2 EFS 2 */
+ { 0x0300, 0x4050 }, /* R768 - AIF1 Control (1) */
+ { 0x0301, 0x4000 }, /* R769 - AIF1 Control (2) */
+ { 0x0302, 0x0000 }, /* R770 - AIF1 Master/Slave */
+ { 0x0303, 0x0040 }, /* R771 - AIF1 BCLK */
+ { 0x0304, 0x0040 }, /* R772 - AIF1ADC LRCLK */
+ { 0x0305, 0x0040 }, /* R773 - AIF1DAC LRCLK */
+ { 0x0306, 0x0004 }, /* R774 - AIF1DAC Data */
+ { 0x0307, 0x0100 }, /* R775 - AIF1ADC Data */
+ { 0x0310, 0x4050 }, /* R784 - AIF2 Control (1) */
+ { 0x0311, 0x4000 }, /* R785 - AIF2 Control (2) */
+ { 0x0312, 0x0000 }, /* R786 - AIF2 Master/Slave */
+ { 0x0313, 0x0040 }, /* R787 - AIF2 BCLK */
+ { 0x0314, 0x0040 }, /* R788 - AIF2ADC LRCLK */
+ { 0x0315, 0x0040 }, /* R789 - AIF2DAC LRCLK */
+ { 0x0316, 0x0000 }, /* R790 - AIF2DAC Data */
+ { 0x0317, 0x0000 }, /* R791 - AIF2ADC Data */
+ { 0x0318, 0x0003 }, /* R792 - AIF2TX Control */
+ { 0x0320, 0x0040 }, /* R800 - AIF3 Control (1) */
+ { 0x0321, 0x0000 }, /* R801 - AIF3 Control (2) */
+ { 0x0322, 0x0000 }, /* R802 - AIF3DAC Data */
+ { 0x0323, 0x0000 }, /* R803 - AIF3ADC Data */
+ { 0x0400, 0x00C0 }, /* R1024 - AIF1 ADC1 Left Volume */
+ { 0x0401, 0x00C0 }, /* R1025 - AIF1 ADC1 Right Volume */
+ { 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */
+ { 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */
+ { 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */
+ { 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */
+ { 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */
+ { 0x0430, 0x0068 }, /* R1072 - AIF1 DAC1 Noise Gate */
+ { 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */
+ { 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */
+ { 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */
+ { 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */
+ { 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */
+ { 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
+ { 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
+ { 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
+ { 0x0483, 0x0400 }, /* R1155 - AIF1 DAC1 EQ Band 1 B */
+ { 0x0484, 0x00D8 }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
+ { 0x0485, 0x1EB5 }, /* R1157 - AIF1 DAC1 EQ Band 2 A */
+ { 0x0486, 0xF145 }, /* R1158 - AIF1 DAC1 EQ Band 2 B */
+ { 0x0487, 0x0B75 }, /* R1159 - AIF1 DAC1 EQ Band 2 C */
+ { 0x0488, 0x01C5 }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
+ { 0x0489, 0x1C58 }, /* R1161 - AIF1 DAC1 EQ Band 3 A */
+ { 0x048A, 0xF373 }, /* R1162 - AIF1 DAC1 EQ Band 3 B */
+ { 0x048B, 0x0A54 }, /* R1163 - AIF1 DAC1 EQ Band 3 C */
+ { 0x048C, 0x0558 }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
+ { 0x048D, 0x168E }, /* R1165 - AIF1 DAC1 EQ Band 4 A */
+ { 0x048E, 0xF829 }, /* R1166 - AIF1 DAC1 EQ Band 4 B */
+ { 0x048F, 0x07AD }, /* R1167 - AIF1 DAC1 EQ Band 4 C */
+ { 0x0490, 0x1103 }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
+ { 0x0491, 0x0564 }, /* R1169 - AIF1 DAC1 EQ Band 5 A */
+ { 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
+ { 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
+ { 0x0494, 0x0000 }, /* R1172 - AIF1 DAC1 EQ Band 1 C */
+ { 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */
+ { 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */
+ { 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */
+ { 0x0503, 0x00C0 }, /* R1283 - AIF2 DAC Right Volume */
+ { 0x0510, 0x0000 }, /* R1296 - AIF2 ADC Filters */
+ { 0x0520, 0x0200 }, /* R1312 - AIF2 DAC Filters (1) */
+ { 0x0521, 0x0010 }, /* R1313 - AIF2 DAC Filters (2) */
+ { 0x0530, 0x0068 }, /* R1328 - AIF2 DAC Noise Gate */
+ { 0x0540, 0x0098 }, /* R1344 - AIF2 DRC (1) */
+ { 0x0541, 0x0845 }, /* R1345 - AIF2 DRC (2) */
+ { 0x0542, 0x0000 }, /* R1346 - AIF2 DRC (3) */
+ { 0x0543, 0x0000 }, /* R1347 - AIF2 DRC (4) */
+ { 0x0544, 0x0000 }, /* R1348 - AIF2 DRC (5) */
+ { 0x0580, 0x6318 }, /* R1408 - AIF2 EQ Gains (1) */
+ { 0x0581, 0x6300 }, /* R1409 - AIF2 EQ Gains (2) */
+ { 0x0582, 0x0FCA }, /* R1410 - AIF2 EQ Band 1 A */
+ { 0x0583, 0x0400 }, /* R1411 - AIF2 EQ Band 1 B */
+ { 0x0584, 0x00D8 }, /* R1412 - AIF2 EQ Band 1 PG */
+ { 0x0585, 0x1EB5 }, /* R1413 - AIF2 EQ Band 2 A */
+ { 0x0586, 0xF145 }, /* R1414 - AIF2 EQ Band 2 B */
+ { 0x0587, 0x0B75 }, /* R1415 - AIF2 EQ Band 2 C */
+ { 0x0588, 0x01C5 }, /* R1416 - AIF2 EQ Band 2 PG */
+ { 0x0589, 0x1C58 }, /* R1417 - AIF2 EQ Band 3 A */
+ { 0x058A, 0xF373 }, /* R1418 - AIF2 EQ Band 3 B */
+ { 0x058B, 0x0A54 }, /* R1419 - AIF2 EQ Band 3 C */
+ { 0x058C, 0x0558 }, /* R1420 - AIF2 EQ Band 3 PG */
+ { 0x058D, 0x168E }, /* R1421 - AIF2 EQ Band 4 A */
+ { 0x058E, 0xF829 }, /* R1422 - AIF2 EQ Band 4 B */
+ { 0x058F, 0x07AD }, /* R1423 - AIF2 EQ Band 4 C */
+ { 0x0590, 0x1103 }, /* R1424 - AIF2 EQ Band 4 PG */
+ { 0x0591, 0x0564 }, /* R1425 - AIF2 EQ Band 5 A */
+ { 0x0592, 0x0559 }, /* R1426 - AIF2 EQ Band 5 B */
+ { 0x0593, 0x4000 }, /* R1427 - AIF2 EQ Band 5 PG */
+ { 0x0594, 0x0000 }, /* R1428 - AIF2 EQ Band 1 C */
+ { 0x0600, 0x0000 }, /* R1536 - DAC1 Mixer Volumes */
+ { 0x0601, 0x0000 }, /* R1537 - DAC1 Left Mixer Routing */
+ { 0x0602, 0x0000 }, /* R1538 - DAC1 Right Mixer Routing */
+ { 0x0603, 0x0000 }, /* R1539 - AIF2ADC Mixer Volumes */
+ { 0x0604, 0x0000 }, /* R1540 - AIF2ADC Left Mixer Routing */
+ { 0x0605, 0x0000 }, /* R1541 - AIF2ADC Right Mixer Routing */
+ { 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
+ { 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
+ { 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */
+ { 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */
+ { 0x0612, 0x02C0 }, /* R1554 - AIF2TX Left Volume */
+ { 0x0613, 0x02C0 }, /* R1555 - AIF2TX Right Volume */
+ { 0x0614, 0x0000 }, /* R1556 - DAC Softmute */
+ { 0x0620, 0x0002 }, /* R1568 - Oversampling */
+ { 0x0621, 0x0000 }, /* R1569 - Sidetone */
+ { 0x0700, 0x8100 }, /* R1792 - GPIO 1 */
+ { 0x0701, 0xA101 }, /* R1793 - Pull Control (MCLK2) */
+ { 0x0702, 0xA101 }, /* R1794 - Pull Control (BCLK2) */
+ { 0x0703, 0xA101 }, /* R1795 - Pull Control (DACLRCLK2) */
+ { 0x0704, 0xA101 }, /* R1796 - Pull Control (DACDAT2) */
+ { 0x0707, 0xA101 }, /* R1799 - GPIO 8 */
+ { 0x0708, 0xA101 }, /* R1800 - GPIO 9 */
+ { 0x0709, 0xA101 }, /* R1801 - GPIO 10 */
+ { 0x070A, 0xA101 }, /* R1802 - GPIO 11 */
+ { 0x0720, 0x0000 }, /* R1824 - Pull Control (1) */
+ { 0x0721, 0x0156 }, /* R1825 - Pull Control (2) */
+ { 0x0730, 0x0000 }, /* R1840 - Interrupt Status 1 */
+ { 0x0731, 0x0000 }, /* R1841 - Interrupt Status 2 */
+ { 0x0732, 0x0000 }, /* R1842 - Interrupt Raw Status 2 */
+ { 0x0738, 0x07FF }, /* R1848 - Interrupt Status 1 Mask */
+ { 0x0739, 0xDFEF }, /* R1849 - Interrupt Status 2 Mask */
+ { 0x0740, 0x0000 }, /* R1856 - Interrupt Control */
+ { 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
+};
+
+static struct reg_default wm8994_defaults[] = {
+ { 0x0000, 0x8994 }, /* R0 - Software Reset */
+ { 0x0001, 0x0000 }, /* R1 - Power Management (1) */
+ { 0x0002, 0x6000 }, /* R2 - Power Management (2) */
+ { 0x0003, 0x0000 }, /* R3 - Power Management (3) */
+ { 0x0004, 0x0000 }, /* R4 - Power Management (4) */
+ { 0x0005, 0x0000 }, /* R5 - Power Management (5) */
+ { 0x0006, 0x0000 }, /* R6 - Power Management (6) */
+ { 0x0015, 0x0000 }, /* R21 - Input Mixer (1) */
+ { 0x0018, 0x008B }, /* R24 - Left Line Input 1&2 Volume */
+ { 0x0019, 0x008B }, /* R25 - Left Line Input 3&4 Volume */
+ { 0x001A, 0x008B }, /* R26 - Right Line Input 1&2 Volume */
+ { 0x001B, 0x008B }, /* R27 - Right Line Input 3&4 Volume */
+ { 0x001C, 0x006D }, /* R28 - Left Output Volume */
+ { 0x001D, 0x006D }, /* R29 - Right Output Volume */
+ { 0x001E, 0x0066 }, /* R30 - Line Outputs Volume */
+ { 0x001F, 0x0020 }, /* R31 - HPOUT2 Volume */
+ { 0x0020, 0x0079 }, /* R32 - Left OPGA Volume */
+ { 0x0021, 0x0079 }, /* R33 - Right OPGA Volume */
+ { 0x0022, 0x0003 }, /* R34 - SPKMIXL Attenuation */
+ { 0x0023, 0x0003 }, /* R35 - SPKMIXR Attenuation */
+ { 0x0024, 0x0011 }, /* R36 - SPKOUT Mixers */
+ { 0x0025, 0x0140 }, /* R37 - ClassD */
+ { 0x0026, 0x0079 }, /* R38 - Speaker Volume Left */
+ { 0x0027, 0x0079 }, /* R39 - Speaker Volume Right */
+ { 0x0028, 0x0000 }, /* R40 - Input Mixer (2) */
+ { 0x0029, 0x0000 }, /* R41 - Input Mixer (3) */
+ { 0x002A, 0x0000 }, /* R42 - Input Mixer (4) */
+ { 0x002B, 0x0000 }, /* R43 - Input Mixer (5) */
+ { 0x002C, 0x0000 }, /* R44 - Input Mixer (6) */
+ { 0x002D, 0x0000 }, /* R45 - Output Mixer (1) */
+ { 0x002E, 0x0000 }, /* R46 - Output Mixer (2) */
+ { 0x002F, 0x0000 }, /* R47 - Output Mixer (3) */
+ { 0x0030, 0x0000 }, /* R48 - Output Mixer (4) */
+ { 0x0031, 0x0000 }, /* R49 - Output Mixer (5) */
+ { 0x0032, 0x0000 }, /* R50 - Output Mixer (6) */
+ { 0x0033, 0x0000 }, /* R51 - HPOUT2 Mixer */
+ { 0x0034, 0x0000 }, /* R52 - Line Mixer (1) */
+ { 0x0035, 0x0000 }, /* R53 - Line Mixer (2) */
+ { 0x0036, 0x0000 }, /* R54 - Speaker Mixer */
+ { 0x0037, 0x0000 }, /* R55 - Additional Control */
+ { 0x0038, 0x0000 }, /* R56 - AntiPOP (1) */
+ { 0x0039, 0x0000 }, /* R57 - AntiPOP (2) */
+ { 0x003A, 0x0000 }, /* R58 - MICBIAS */
+ { 0x003B, 0x000D }, /* R59 - LDO 1 */
+ { 0x003C, 0x0003 }, /* R60 - LDO 2 */
+ { 0x004C, 0x1F25 }, /* R76 - Charge Pump (1) */
+ { 0x0051, 0x0004 }, /* R81 - Class W (1) */
+ { 0x0054, 0x0000 }, /* R84 - DC Servo (1) */
+ { 0x0055, 0x054A }, /* R85 - DC Servo (2) */
+ { 0x0057, 0x0000 }, /* R87 - DC Servo (4) */
+ { 0x0058, 0x0000 }, /* R88 - DC Servo Readback */
+ { 0x0060, 0x0000 }, /* R96 - Analogue HP (1) */
+ { 0x0100, 0x0003 }, /* R256 - Chip Revision */
+ { 0x0101, 0x8004 }, /* R257 - Control Interface */
+ { 0x0110, 0x0000 }, /* R272 - Write Sequencer Ctrl (1) */
+ { 0x0111, 0x0000 }, /* R273 - Write Sequencer Ctrl (2) */
+ { 0x0200, 0x0000 }, /* R512 - AIF1 Clocking (1) */
+ { 0x0201, 0x0000 }, /* R513 - AIF1 Clocking (2) */
+ { 0x0204, 0x0000 }, /* R516 - AIF2 Clocking (1) */
+ { 0x0205, 0x0000 }, /* R517 - AIF2 Clocking (2) */
+ { 0x0208, 0x0000 }, /* R520 - Clocking (1) */
+ { 0x0209, 0x0000 }, /* R521 - Clocking (2) */
+ { 0x0210, 0x0083 }, /* R528 - AIF1 Rate */
+ { 0x0211, 0x0083 }, /* R529 - AIF2 Rate */
+ { 0x0212, 0x0000 }, /* R530 - Rate Status */
+ { 0x0220, 0x0000 }, /* R544 - FLL1 Control (1) */
+ { 0x0221, 0x0000 }, /* R545 - FLL1 Control (2) */
+ { 0x0222, 0x0000 }, /* R546 - FLL1 Control (3) */
+ { 0x0223, 0x0000 }, /* R547 - FLL1 Control (4) */
+ { 0x0224, 0x0C80 }, /* R548 - FLL1 Control (5) */
+ { 0x0240, 0x0000 }, /* R576 - FLL2 Control (1) */
+ { 0x0241, 0x0000 }, /* R577 - FLL2 Control (2) */
+ { 0x0242, 0x0000 }, /* R578 - FLL2 Control (3) */
+ { 0x0243, 0x0000 }, /* R579 - FLL2 Control (4) */
+ { 0x0244, 0x0C80 }, /* R580 - FLL2 Control (5) */
+ { 0x0300, 0x4050 }, /* R768 - AIF1 Control (1) */
+ { 0x0301, 0x4000 }, /* R769 - AIF1 Control (2) */
+ { 0x0302, 0x0000 }, /* R770 - AIF1 Master/Slave */
+ { 0x0303, 0x0040 }, /* R771 - AIF1 BCLK */
+ { 0x0304, 0x0040 }, /* R772 - AIF1ADC LRCLK */
+ { 0x0305, 0x0040 }, /* R773 - AIF1DAC LRCLK */
+ { 0x0306, 0x0004 }, /* R774 - AIF1DAC Data */
+ { 0x0307, 0x0100 }, /* R775 - AIF1ADC Data */
+ { 0x0310, 0x4050 }, /* R784 - AIF2 Control (1) */
+ { 0x0311, 0x4000 }, /* R785 - AIF2 Control (2) */
+ { 0x0312, 0x0000 }, /* R786 - AIF2 Master/Slave */
+ { 0x0313, 0x0040 }, /* R787 - AIF2 BCLK */
+ { 0x0314, 0x0040 }, /* R788 - AIF2ADC LRCLK */
+ { 0x0315, 0x0040 }, /* R789 - AIF2DAC LRCLK */
+ { 0x0316, 0x0000 }, /* R790 - AIF2DAC Data */
+ { 0x0317, 0x0000 }, /* R791 - AIF2ADC Data */
+ { 0x0400, 0x00C0 }, /* R1024 - AIF1 ADC1 Left Volume */
+ { 0x0401, 0x00C0 }, /* R1025 - AIF1 ADC1 Right Volume */
+ { 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */
+ { 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */
+ { 0x0404, 0x00C0 }, /* R1028 - AIF1 ADC2 Left Volume */
+ { 0x0405, 0x00C0 }, /* R1029 - AIF1 ADC2 Right Volume */
+ { 0x0406, 0x00C0 }, /* R1030 - AIF1 DAC2 Left Volume */
+ { 0x0407, 0x00C0 }, /* R1031 - AIF1 DAC2 Right Volume */
+ { 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */
+ { 0x0411, 0x0000 }, /* R1041 - AIF1 ADC2 Filters */
+ { 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */
+ { 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */
+ { 0x0422, 0x0200 }, /* R1058 - AIF1 DAC2 Filters (1) */
+ { 0x0423, 0x0010 }, /* R1059 - AIF1 DAC2 Filters (2) */
+ { 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */
+ { 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */
+ { 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */
+ { 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */
+ { 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */
+ { 0x0450, 0x0098 }, /* R1104 - AIF1 DRC2 (1) */
+ { 0x0451, 0x0845 }, /* R1105 - AIF1 DRC2 (2) */
+ { 0x0452, 0x0000 }, /* R1106 - AIF1 DRC2 (3) */
+ { 0x0453, 0x0000 }, /* R1107 - AIF1 DRC2 (4) */
+ { 0x0454, 0x0000 }, /* R1108 - AIF1 DRC2 (5) */
+ { 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
+ { 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
+ { 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
+ { 0x0483, 0x0400 }, /* R1155 - AIF1 DAC1 EQ Band 1 B */
+ { 0x0484, 0x00D8 }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
+ { 0x0485, 0x1EB5 }, /* R1157 - AIF1 DAC1 EQ Band 2 A */
+ { 0x0486, 0xF145 }, /* R1158 - AIF1 DAC1 EQ Band 2 B */
+ { 0x0487, 0x0B75 }, /* R1159 - AIF1 DAC1 EQ Band 2 C */
+ { 0x0488, 0x01C5 }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
+ { 0x0489, 0x1C58 }, /* R1161 - AIF1 DAC1 EQ Band 3 A */
+ { 0x048A, 0xF373 }, /* R1162 - AIF1 DAC1 EQ Band 3 B */
+ { 0x048B, 0x0A54 }, /* R1163 - AIF1 DAC1 EQ Band 3 C */
+ { 0x048C, 0x0558 }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
+ { 0x048D, 0x168E }, /* R1165 - AIF1 DAC1 EQ Band 4 A */
+ { 0x048E, 0xF829 }, /* R1166 - AIF1 DAC1 EQ Band 4 B */
+ { 0x048F, 0x07AD }, /* R1167 - AIF1 DAC1 EQ Band 4 C */
+ { 0x0490, 0x1103 }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
+ { 0x0491, 0x0564 }, /* R1169 - AIF1 DAC1 EQ Band 5 A */
+ { 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
+ { 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
+ { 0x04A0, 0x6318 }, /* R1184 - AIF1 DAC2 EQ Gains (1) */
+ { 0x04A1, 0x6300 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */
+ { 0x04A2, 0x0FCA }, /* R1186 - AIF1 DAC2 EQ Band 1 A */
+ { 0x04A3, 0x0400 }, /* R1187 - AIF1 DAC2 EQ Band 1 B */
+ { 0x04A4, 0x00D8 }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */
+ { 0x04A5, 0x1EB5 }, /* R1189 - AIF1 DAC2 EQ Band 2 A */
+ { 0x04A6, 0xF145 }, /* R1190 - AIF1 DAC2 EQ Band 2 B */
+ { 0x04A7, 0x0B75 }, /* R1191 - AIF1 DAC2 EQ Band 2 C */
+ { 0x04A8, 0x01C5 }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */
+ { 0x04A9, 0x1C58 }, /* R1193 - AIF1 DAC2 EQ Band 3 A */
+ { 0x04AA, 0xF373 }, /* R1194 - AIF1 DAC2 EQ Band 3 B */
+ { 0x04AB, 0x0A54 }, /* R1195 - AIF1 DAC2 EQ Band 3 C */
+ { 0x04AC, 0x0558 }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */
+ { 0x04AD, 0x168E }, /* R1197 - AIF1 DAC2 EQ Band 4 A */
+ { 0x04AE, 0xF829 }, /* R1198 - AIF1 DAC2 EQ Band 4 B */
+ { 0x04AF, 0x07AD }, /* R1199 - AIF1 DAC2 EQ Band 4 C */
+ { 0x04B0, 0x1103 }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */
+ { 0x04B1, 0x0564 }, /* R1201 - AIF1 DAC2 EQ Band 5 A */
+ { 0x04B2, 0x0559 }, /* R1202 - AIF1 DAC2 EQ Band 5 B */
+ { 0x04B3, 0x4000 }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */
+ { 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */
+ { 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */
+ { 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */
+ { 0x0503, 0x00C0 }, /* R1283 - AIF2 DAC Right Volume */
+ { 0x0510, 0x0000 }, /* R1296 - AIF2 ADC Filters */
+ { 0x0520, 0x0200 }, /* R1312 - AIF2 DAC Filters (1) */
+ { 0x0521, 0x0010 }, /* R1313 - AIF2 DAC Filters (2) */
+ { 0x0540, 0x0098 }, /* R1344 - AIF2 DRC (1) */
+ { 0x0541, 0x0845 }, /* R1345 - AIF2 DRC (2) */
+ { 0x0542, 0x0000 }, /* R1346 - AIF2 DRC (3) */
+ { 0x0543, 0x0000 }, /* R1347 - AIF2 DRC (4) */
+ { 0x0544, 0x0000 }, /* R1348 - AIF2 DRC (5) */
+ { 0x0580, 0x6318 }, /* R1408 - AIF2 EQ Gains (1) */
+ { 0x0581, 0x6300 }, /* R1409 - AIF2 EQ Gains (2) */
+ { 0x0582, 0x0FCA }, /* R1410 - AIF2 EQ Band 1 A */
+ { 0x0583, 0x0400 }, /* R1411 - AIF2 EQ Band 1 B */
+ { 0x0584, 0x00D8 }, /* R1412 - AIF2 EQ Band 1 PG */
+ { 0x0585, 0x1EB5 }, /* R1413 - AIF2 EQ Band 2 A */
+ { 0x0586, 0xF145 }, /* R1414 - AIF2 EQ Band 2 B */
+ { 0x0587, 0x0B75 }, /* R1415 - AIF2 EQ Band 2 C */
+ { 0x0588, 0x01C5 }, /* R1416 - AIF2 EQ Band 2 PG */
+ { 0x0589, 0x1C58 }, /* R1417 - AIF2 EQ Band 3 A */
+ { 0x058A, 0xF373 }, /* R1418 - AIF2 EQ Band 3 B */
+ { 0x058B, 0x0A54 }, /* R1419 - AIF2 EQ Band 3 C */
+ { 0x058C, 0x0558 }, /* R1420 - AIF2 EQ Band 3 PG */
+ { 0x058D, 0x168E }, /* R1421 - AIF2 EQ Band 4 A */
+ { 0x058E, 0xF829 }, /* R1422 - AIF2 EQ Band 4 B */
+ { 0x058F, 0x07AD }, /* R1423 - AIF2 EQ Band 4 C */
+ { 0x0590, 0x1103 }, /* R1424 - AIF2 EQ Band 4 PG */
+ { 0x0591, 0x0564 }, /* R1425 - AIF2 EQ Band 5 A */
+ { 0x0592, 0x0559 }, /* R1426 - AIF2 EQ Band 5 B */
+ { 0x0593, 0x4000 }, /* R1427 - AIF2 EQ Band 5 PG */
+ { 0x0600, 0x0000 }, /* R1536 - DAC1 Mixer Volumes */
+ { 0x0601, 0x0000 }, /* R1537 - DAC1 Left Mixer Routing */
+ { 0x0602, 0x0000 }, /* R1538 - DAC1 Right Mixer Routing */
+ { 0x0603, 0x0000 }, /* R1539 - DAC2 Mixer Volumes */
+ { 0x0604, 0x0000 }, /* R1540 - DAC2 Left Mixer Routing */
+ { 0x0605, 0x0000 }, /* R1541 - DAC2 Right Mixer Routing */
+ { 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
+ { 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
+ { 0x0608, 0x0000 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */
+ { 0x0609, 0x0000 }, /* R1545 - AIF1 ADC2 Right mixer Routing */
+ { 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */
+ { 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */
+ { 0x0612, 0x02C0 }, /* R1554 - DAC2 Left Volume */
+ { 0x0613, 0x02C0 }, /* R1555 - DAC2 Right Volume */
+ { 0x0614, 0x0000 }, /* R1556 - DAC Softmute */
+ { 0x0620, 0x0002 }, /* R1568 - Oversampling */
+ { 0x0621, 0x0000 }, /* R1569 - Sidetone */
+ { 0x0700, 0x8100 }, /* R1792 - GPIO 1 */
+ { 0x0701, 0xA101 }, /* R1793 - GPIO 2 */
+ { 0x0702, 0xA101 }, /* R1794 - GPIO 3 */
+ { 0x0703, 0xA101 }, /* R1795 - GPIO 4 */
+ { 0x0704, 0xA101 }, /* R1796 - GPIO 5 */
+ { 0x0705, 0xA101 }, /* R1797 - GPIO 6 */
+ { 0x0706, 0xA101 }, /* R1798 - GPIO 7 */
+ { 0x0707, 0xA101 }, /* R1799 - GPIO 8 */
+ { 0x0708, 0xA101 }, /* R1800 - GPIO 9 */
+ { 0x0709, 0xA101 }, /* R1801 - GPIO 10 */
+ { 0x070A, 0xA101 }, /* R1802 - GPIO 11 */
+ { 0x0720, 0x0000 }, /* R1824 - Pull Control (1) */
+ { 0x0721, 0x0156 }, /* R1825 - Pull Control (2) */
+ { 0x0730, 0x0000 }, /* R1840 - Interrupt Status 1 */
+ { 0x0731, 0x0000 }, /* R1841 - Interrupt Status 2 */
+ { 0x0732, 0x0000 }, /* R1842 - Interrupt Raw Status 2 */
+ { 0x0738, 0x07FF }, /* R1848 - Interrupt Status 1 Mask */
+ { 0x0739, 0xFFFF }, /* R1849 - Interrupt Status 2 Mask */
+ { 0x0740, 0x0000 }, /* R1856 - Interrupt Control */
+ { 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
+};
+
+static struct reg_default wm8958_defaults[] = {
+ { 0x0000, 0x8958 }, /* R0 - Software Reset */
+ { 0x0001, 0x0000 }, /* R1 - Power Management (1) */
+ { 0x0002, 0x6000 }, /* R2 - Power Management (2) */
+ { 0x0003, 0x0000 }, /* R3 - Power Management (3) */
+ { 0x0004, 0x0000 }, /* R4 - Power Management (4) */
+ { 0x0005, 0x0000 }, /* R5 - Power Management (5) */
+ { 0x0006, 0x0000 }, /* R6 - Power Management (6) */
+ { 0x0015, 0x0000 }, /* R21 - Input Mixer (1) */
+ { 0x0018, 0x008B }, /* R24 - Left Line Input 1&2 Volume */
+ { 0x0019, 0x008B }, /* R25 - Left Line Input 3&4 Volume */
+ { 0x001A, 0x008B }, /* R26 - Right Line Input 1&2 Volume */
+ { 0x001B, 0x008B }, /* R27 - Right Line Input 3&4 Volume */
+ { 0x001C, 0x006D }, /* R28 - Left Output Volume */
+ { 0x001D, 0x006D }, /* R29 - Right Output Volume */
+ { 0x001E, 0x0066 }, /* R30 - Line Outputs Volume */
+ { 0x001F, 0x0020 }, /* R31 - HPOUT2 Volume */
+ { 0x0020, 0x0079 }, /* R32 - Left OPGA Volume */
+ { 0x0021, 0x0079 }, /* R33 - Right OPGA Volume */
+ { 0x0022, 0x0003 }, /* R34 - SPKMIXL Attenuation */
+ { 0x0023, 0x0003 }, /* R35 - SPKMIXR Attenuation */
+ { 0x0024, 0x0011 }, /* R36 - SPKOUT Mixers */
+ { 0x0025, 0x0140 }, /* R37 - ClassD */
+ { 0x0026, 0x0079 }, /* R38 - Speaker Volume Left */
+ { 0x0027, 0x0079 }, /* R39 - Speaker Volume Right */
+ { 0x0028, 0x0000 }, /* R40 - Input Mixer (2) */
+ { 0x0029, 0x0000 }, /* R41 - Input Mixer (3) */
+ { 0x002A, 0x0000 }, /* R42 - Input Mixer (4) */
+ { 0x002B, 0x0000 }, /* R43 - Input Mixer (5) */
+ { 0x002C, 0x0000 }, /* R44 - Input Mixer (6) */
+ { 0x002D, 0x0000 }, /* R45 - Output Mixer (1) */
+ { 0x002E, 0x0000 }, /* R46 - Output Mixer (2) */
+ { 0x002F, 0x0000 }, /* R47 - Output Mixer (3) */
+ { 0x0030, 0x0000 }, /* R48 - Output Mixer (4) */
+ { 0x0031, 0x0000 }, /* R49 - Output Mixer (5) */
+ { 0x0032, 0x0000 }, /* R50 - Output Mixer (6) */
+ { 0x0033, 0x0000 }, /* R51 - HPOUT2 Mixer */
+ { 0x0034, 0x0000 }, /* R52 - Line Mixer (1) */
+ { 0x0035, 0x0000 }, /* R53 - Line Mixer (2) */
+ { 0x0036, 0x0000 }, /* R54 - Speaker Mixer */
+ { 0x0037, 0x0000 }, /* R55 - Additional Control */
+ { 0x0038, 0x0000 }, /* R56 - AntiPOP (1) */
+ { 0x0039, 0x0180 }, /* R57 - AntiPOP (2) */
+ { 0x003B, 0x000D }, /* R59 - LDO 1 */
+ { 0x003C, 0x0005 }, /* R60 - LDO 2 */
+ { 0x003D, 0x0039 }, /* R61 - MICBIAS1 */
+ { 0x003E, 0x0039 }, /* R62 - MICBIAS2 */
+ { 0x004C, 0x1F25 }, /* R76 - Charge Pump (1) */
+ { 0x004D, 0xAB19 }, /* R77 - Charge Pump (2) */
+ { 0x0051, 0x0004 }, /* R81 - Class W (1) */
+ { 0x0055, 0x054A }, /* R85 - DC Servo (2) */
+ { 0x0057, 0x0000 }, /* R87 - DC Servo (4) */
+ { 0x0060, 0x0000 }, /* R96 - Analogue HP (1) */
+ { 0x00C5, 0x0000 }, /* R197 - Class D Test (5) */
+ { 0x00D0, 0x5600 }, /* R208 - Mic Detect 1 */
+ { 0x00D1, 0x007F }, /* R209 - Mic Detect 2 */
+ { 0x0101, 0x8004 }, /* R257 - Control Interface */
+ { 0x0110, 0x0000 }, /* R272 - Write Sequencer Ctrl (1) */
+ { 0x0111, 0x0000 }, /* R273 - Write Sequencer Ctrl (2) */
+ { 0x0200, 0x0000 }, /* R512 - AIF1 Clocking (1) */
+ { 0x0201, 0x0000 }, /* R513 - AIF1 Clocking (2) */
+ { 0x0204, 0x0000 }, /* R516 - AIF2 Clocking (1) */
+ { 0x0205, 0x0000 }, /* R517 - AIF2 Clocking (2) */
+ { 0x0208, 0x0000 }, /* R520 - Clocking (1) */
+ { 0x0209, 0x0000 }, /* R521 - Clocking (2) */
+ { 0x0210, 0x0083 }, /* R528 - AIF1 Rate */
+ { 0x0211, 0x0083 }, /* R529 - AIF2 Rate */
+ { 0x0220, 0x0000 }, /* R544 - FLL1 Control (1) */
+ { 0x0221, 0x0000 }, /* R545 - FLL1 Control (2) */
+ { 0x0222, 0x0000 }, /* R546 - FLL1 Control (3) */
+ { 0x0223, 0x0000 }, /* R547 - FLL1 Control (4) */
+ { 0x0224, 0x0C80 }, /* R548 - FLL1 Control (5) */
+ { 0x0226, 0x0000 }, /* R550 - FLL1 EFS 1 */
+ { 0x0227, 0x0006 }, /* R551 - FLL1 EFS 2 */
+ { 0x0240, 0x0000 }, /* R576 - FLL2Control (1) */
+ { 0x0241, 0x0000 }, /* R577 - FLL2Control (2) */
+ { 0x0242, 0x0000 }, /* R578 - FLL2Control (3) */
+ { 0x0243, 0x0000 }, /* R579 - FLL2 Control (4) */
+ { 0x0244, 0x0C80 }, /* R580 - FLL2Control (5) */
+ { 0x0246, 0x0000 }, /* R582 - FLL2 EFS 1 */
+ { 0x0247, 0x0006 }, /* R583 - FLL2 EFS 2 */
+ { 0x0300, 0x4050 }, /* R768 - AIF1 Control (1) */
+ { 0x0301, 0x4000 }, /* R769 - AIF1 Control (2) */
+ { 0x0302, 0x0000 }, /* R770 - AIF1 Master/Slave */
+ { 0x0303, 0x0040 }, /* R771 - AIF1 BCLK */
+ { 0x0304, 0x0040 }, /* R772 - AIF1ADC LRCLK */
+ { 0x0305, 0x0040 }, /* R773 - AIF1DAC LRCLK */
+ { 0x0306, 0x0004 }, /* R774 - AIF1DAC Data */
+ { 0x0307, 0x0100 }, /* R775 - AIF1ADC Data */
+ { 0x0310, 0x4053 }, /* R784 - AIF2 Control (1) */
+ { 0x0311, 0x4000 }, /* R785 - AIF2 Control (2) */
+ { 0x0312, 0x0000 }, /* R786 - AIF2 Master/Slave */
+ { 0x0313, 0x0040 }, /* R787 - AIF2 BCLK */
+ { 0x0314, 0x0040 }, /* R788 - AIF2ADC LRCLK */
+ { 0x0315, 0x0040 }, /* R789 - AIF2DAC LRCLK */
+ { 0x0316, 0x0000 }, /* R790 - AIF2DAC Data */
+ { 0x0317, 0x0000 }, /* R791 - AIF2ADC Data */
+ { 0x0320, 0x0040 }, /* R800 - AIF3 Control (1) */
+ { 0x0321, 0x0000 }, /* R801 - AIF3 Control (2) */
+ { 0x0322, 0x0000 }, /* R802 - AIF3DAC Data */
+ { 0x0323, 0x0000 }, /* R803 - AIF3ADC Data */
+ { 0x0400, 0x00C0 }, /* R1024 - AIF1 ADC1 Left Volume */
+ { 0x0401, 0x00C0 }, /* R1025 - AIF1 ADC1 Right Volume */
+ { 0x0402, 0x00C0 }, /* R1026 - AIF1 DAC1 Left Volume */
+ { 0x0403, 0x00C0 }, /* R1027 - AIF1 DAC1 Right Volume */
+ { 0x0404, 0x00C0 }, /* R1028 - AIF1 ADC2 Left Volume */
+ { 0x0405, 0x00C0 }, /* R1029 - AIF1 ADC2 Right Volume */
+ { 0x0406, 0x00C0 }, /* R1030 - AIF1 DAC2 Left Volume */
+ { 0x0407, 0x00C0 }, /* R1031 - AIF1 DAC2 Right Volume */
+ { 0x0410, 0x0000 }, /* R1040 - AIF1 ADC1 Filters */
+ { 0x0411, 0x0000 }, /* R1041 - AIF1 ADC2 Filters */
+ { 0x0420, 0x0200 }, /* R1056 - AIF1 DAC1 Filters (1) */
+ { 0x0421, 0x0010 }, /* R1057 - AIF1 DAC1 Filters (2) */
+ { 0x0422, 0x0200 }, /* R1058 - AIF1 DAC2 Filters (1) */
+ { 0x0423, 0x0010 }, /* R1059 - AIF1 DAC2 Filters (2) */
+ { 0x0430, 0x0068 }, /* R1072 - AIF1 DAC1 Noise Gate */
+ { 0x0431, 0x0068 }, /* R1073 - AIF1 DAC2 Noise Gate */
+ { 0x0440, 0x0098 }, /* R1088 - AIF1 DRC1 (1) */
+ { 0x0441, 0x0845 }, /* R1089 - AIF1 DRC1 (2) */
+ { 0x0442, 0x0000 }, /* R1090 - AIF1 DRC1 (3) */
+ { 0x0443, 0x0000 }, /* R1091 - AIF1 DRC1 (4) */
+ { 0x0444, 0x0000 }, /* R1092 - AIF1 DRC1 (5) */
+ { 0x0450, 0x0098 }, /* R1104 - AIF1 DRC2 (1) */
+ { 0x0451, 0x0845 }, /* R1105 - AIF1 DRC2 (2) */
+ { 0x0452, 0x0000 }, /* R1106 - AIF1 DRC2 (3) */
+ { 0x0453, 0x0000 }, /* R1107 - AIF1 DRC2 (4) */
+ { 0x0454, 0x0000 }, /* R1108 - AIF1 DRC2 (5) */
+ { 0x0480, 0x6318 }, /* R1152 - AIF1 DAC1 EQ Gains (1) */
+ { 0x0481, 0x6300 }, /* R1153 - AIF1 DAC1 EQ Gains (2) */
+ { 0x0482, 0x0FCA }, /* R1154 - AIF1 DAC1 EQ Band 1 A */
+ { 0x0483, 0x0400 }, /* R1155 - AIF1 DAC1 EQ Band 1 B */
+ { 0x0484, 0x00D8 }, /* R1156 - AIF1 DAC1 EQ Band 1 PG */
+ { 0x0485, 0x1EB5 }, /* R1157 - AIF1 DAC1 EQ Band 2 A */
+ { 0x0486, 0xF145 }, /* R1158 - AIF1 DAC1 EQ Band 2 B */
+ { 0x0487, 0x0B75 }, /* R1159 - AIF1 DAC1 EQ Band 2 C */
+ { 0x0488, 0x01C5 }, /* R1160 - AIF1 DAC1 EQ Band 2 PG */
+ { 0x0489, 0x1C58 }, /* R1161 - AIF1 DAC1 EQ Band 3 A */
+ { 0x048A, 0xF373 }, /* R1162 - AIF1 DAC1 EQ Band 3 B */
+ { 0x048B, 0x0A54 }, /* R1163 - AIF1 DAC1 EQ Band 3 C */
+ { 0x048C, 0x0558 }, /* R1164 - AIF1 DAC1 EQ Band 3 PG */
+ { 0x048D, 0x168E }, /* R1165 - AIF1 DAC1 EQ Band 4 A */
+ { 0x048E, 0xF829 }, /* R1166 - AIF1 DAC1 EQ Band 4 B */
+ { 0x048F, 0x07AD }, /* R1167 - AIF1 DAC1 EQ Band 4 C */
+ { 0x0490, 0x1103 }, /* R1168 - AIF1 DAC1 EQ Band 4 PG */
+ { 0x0491, 0x0564 }, /* R1169 - AIF1 DAC1 EQ Band 5 A */
+ { 0x0492, 0x0559 }, /* R1170 - AIF1 DAC1 EQ Band 5 B */
+ { 0x0493, 0x4000 }, /* R1171 - AIF1 DAC1 EQ Band 5 PG */
+ { 0x0494, 0x0000 }, /* R1172 - AIF1 DAC1 EQ Band 1 C */
+ { 0x04A0, 0x6318 }, /* R1184 - AIF1 DAC2 EQ Gains (1) */
+ { 0x04A1, 0x6300 }, /* R1185 - AIF1 DAC2 EQ Gains (2) */
+ { 0x04A2, 0x0FCA }, /* R1186 - AIF1 DAC2 EQ Band 1 A */
+ { 0x04A3, 0x0400 }, /* R1187 - AIF1 DAC2 EQ Band 1 B */
+ { 0x04A4, 0x00D8 }, /* R1188 - AIF1 DAC2 EQ Band 1 PG */
+ { 0x04A5, 0x1EB5 }, /* R1189 - AIF1 DAC2 EQ Band 2 A */
+ { 0x04A6, 0xF145 }, /* R1190 - AIF1 DAC2 EQ Band 2 B */
+ { 0x04A7, 0x0B75 }, /* R1191 - AIF1 DAC2 EQ Band 2 C */
+ { 0x04A8, 0x01C5 }, /* R1192 - AIF1 DAC2 EQ Band 2 PG */
+ { 0x04A9, 0x1C58 }, /* R1193 - AIF1 DAC2 EQ Band 3 A */
+ { 0x04AA, 0xF373 }, /* R1194 - AIF1 DAC2 EQ Band 3 B */
+ { 0x04AB, 0x0A54 }, /* R1195 - AIF1 DAC2 EQ Band 3 C */
+ { 0x04AC, 0x0558 }, /* R1196 - AIF1 DAC2 EQ Band 3 PG */
+ { 0x04AD, 0x168E }, /* R1197 - AIF1 DAC2 EQ Band 4 A */
+ { 0x04AE, 0xF829 }, /* R1198 - AIF1 DAC2 EQ Band 4 B */
+ { 0x04AF, 0x07AD }, /* R1199 - AIF1 DAC2 EQ Band 4 C */
+ { 0x04B0, 0x1103 }, /* R1200 - AIF1 DAC2 EQ Band 4 PG */
+ { 0x04B1, 0x0564 }, /* R1201 - AIF1 DAC2 EQ Band 5 A */
+ { 0x04B2, 0x0559 }, /* R1202 - AIF1 DAC2 EQ Band 5 B */
+ { 0x04B3, 0x4000 }, /* R1203 - AIF1 DAC2 EQ Band 5 PG */
+ { 0x04B4, 0x0000 }, /* R1204 - AIF1 DAC2EQ Band 1 C */
+ { 0x0500, 0x00C0 }, /* R1280 - AIF2 ADC Left Volume */
+ { 0x0501, 0x00C0 }, /* R1281 - AIF2 ADC Right Volume */
+ { 0x0502, 0x00C0 }, /* R1282 - AIF2 DAC Left Volume */
+ { 0x0503, 0x00C0 }, /* R1283 - AIF2 DAC Right Volume */
+ { 0x0510, 0x0000 }, /* R1296 - AIF2 ADC Filters */
+ { 0x0520, 0x0200 }, /* R1312 - AIF2 DAC Filters (1) */
+ { 0x0521, 0x0010 }, /* R1313 - AIF2 DAC Filters (2) */
+ { 0x0530, 0x0068 }, /* R1328 - AIF2 DAC Noise Gate */
+ { 0x0540, 0x0098 }, /* R1344 - AIF2 DRC (1) */
+ { 0x0541, 0x0845 }, /* R1345 - AIF2 DRC (2) */
+ { 0x0542, 0x0000 }, /* R1346 - AIF2 DRC (3) */
+ { 0x0543, 0x0000 }, /* R1347 - AIF2 DRC (4) */
+ { 0x0544, 0x0000 }, /* R1348 - AIF2 DRC (5) */
+ { 0x0580, 0x6318 }, /* R1408 - AIF2 EQ Gains (1) */
+ { 0x0581, 0x6300 }, /* R1409 - AIF2 EQ Gains (2) */
+ { 0x0582, 0x0FCA }, /* R1410 - AIF2 EQ Band 1 A */
+ { 0x0583, 0x0400 }, /* R1411 - AIF2 EQ Band 1 B */
+ { 0x0584, 0x00D8 }, /* R1412 - AIF2 EQ Band 1 PG */
+ { 0x0585, 0x1EB5 }, /* R1413 - AIF2 EQ Band 2 A */
+ { 0x0586, 0xF145 }, /* R1414 - AIF2 EQ Band 2 B */
+ { 0x0587, 0x0B75 }, /* R1415 - AIF2 EQ Band 2 C */
+ { 0x0588, 0x01C5 }, /* R1416 - AIF2 EQ Band 2 PG */
+ { 0x0589, 0x1C58 }, /* R1417 - AIF2 EQ Band 3 A */
+ { 0x058A, 0xF373 }, /* R1418 - AIF2 EQ Band 3 B */
+ { 0x058B, 0x0A54 }, /* R1419 - AIF2 EQ Band 3 C */
+ { 0x058C, 0x0558 }, /* R1420 - AIF2 EQ Band 3 PG */
+ { 0x058D, 0x168E }, /* R1421 - AIF2 EQ Band 4 A */
+ { 0x058E, 0xF829 }, /* R1422 - AIF2 EQ Band 4 B */
+ { 0x058F, 0x07AD }, /* R1423 - AIF2 EQ Band 4 C */
+ { 0x0590, 0x1103 }, /* R1424 - AIF2 EQ Band 4 PG */
+ { 0x0591, 0x0564 }, /* R1425 - AIF2 EQ Band 5 A */
+ { 0x0592, 0x0559 }, /* R1426 - AIF2 EQ Band 5 B */
+ { 0x0593, 0x4000 }, /* R1427 - AIF2 EQ Band 5 PG */
+ { 0x0594, 0x0000 }, /* R1428 - AIF2 EQ Band 1 C */
+ { 0x0600, 0x0000 }, /* R1536 - DAC1 Mixer Volumes */
+ { 0x0601, 0x0000 }, /* R1537 - DAC1 Left Mixer Routing */
+ { 0x0602, 0x0000 }, /* R1538 - DAC1 Right Mixer Routing */
+ { 0x0603, 0x0000 }, /* R1539 - DAC2 Mixer Volumes */
+ { 0x0604, 0x0000 }, /* R1540 - DAC2 Left Mixer Routing */
+ { 0x0605, 0x0000 }, /* R1541 - DAC2 Right Mixer Routing */
+ { 0x0606, 0x0000 }, /* R1542 - AIF1 ADC1 Left Mixer Routing */
+ { 0x0607, 0x0000 }, /* R1543 - AIF1 ADC1 Right Mixer Routing */
+ { 0x0608, 0x0000 }, /* R1544 - AIF1 ADC2 Left Mixer Routing */
+ { 0x0609, 0x0000 }, /* R1545 - AIF1 ADC2 Right mixer Routing */
+ { 0x0610, 0x02C0 }, /* R1552 - DAC1 Left Volume */
+ { 0x0611, 0x02C0 }, /* R1553 - DAC1 Right Volume */
+ { 0x0612, 0x02C0 }, /* R1554 - DAC2 Left Volume */
+ { 0x0613, 0x02C0 }, /* R1555 - DAC2 Right Volume */
+ { 0x0614, 0x0000 }, /* R1556 - DAC Softmute */
+ { 0x0620, 0x0002 }, /* R1568 - Oversampling */
+ { 0x0621, 0x0000 }, /* R1569 - Sidetone */
+ { 0x0700, 0x8100 }, /* R1792 - GPIO 1 */
+ { 0x0701, 0xA101 }, /* R1793 - Pull Control (MCLK2) */
+ { 0x0702, 0xA101 }, /* R1794 - Pull Control (BCLK2) */
+ { 0x0703, 0xA101 }, /* R1795 - Pull Control (DACLRCLK2) */
+ { 0x0704, 0xA101 }, /* R1796 - Pull Control (DACDAT2) */
+ { 0x0705, 0xA101 }, /* R1797 - GPIO 6 */
+ { 0x0707, 0xA101 }, /* R1799 - GPIO 8 */
+ { 0x0708, 0xA101 }, /* R1800 - GPIO 9 */
+ { 0x0709, 0xA101 }, /* R1801 - GPIO 10 */
+ { 0x070A, 0xA101 }, /* R1802 - GPIO 11 */
+ { 0x0720, 0x0000 }, /* R1824 - Pull Control (1) */
+ { 0x0721, 0x0156 }, /* R1825 - Pull Control (2) */
+ { 0x0738, 0x07FF }, /* R1848 - Interrupt Status 1 Mask */
+ { 0x0739, 0xFFEF }, /* R1849 - Interrupt Status 2 Mask */
+ { 0x0740, 0x0000 }, /* R1856 - Interrupt Control */
+ { 0x0748, 0x003F }, /* R1864 - IRQ Debounce */
+ { 0x0900, 0x1C00 }, /* R2304 - DSP2_Program */
+ { 0x0901, 0x0000 }, /* R2305 - DSP2_Config */
+ { 0x0A0D, 0x0000 }, /* R2573 - DSP2_ExecControl */
+ { 0x2400, 0x003F }, /* R9216 - MBC Band 1 K (1) */
+ { 0x2401, 0x8BD8 }, /* R9217 - MBC Band 1 K (2) */
+ { 0x2402, 0x0032 }, /* R9218 - MBC Band 1 N1 (1) */
+ { 0x2403, 0xF52D }, /* R9219 - MBC Band 1 N1 (2) */
+ { 0x2404, 0x0065 }, /* R9220 - MBC Band 1 N2 (1) */
+ { 0x2405, 0xAC8C }, /* R9221 - MBC Band 1 N2 (2) */
+ { 0x2406, 0x006B }, /* R9222 - MBC Band 1 N3 (1) */
+ { 0x2407, 0xE087 }, /* R9223 - MBC Band 1 N3 (2) */
+ { 0x2408, 0x0072 }, /* R9224 - MBC Band 1 N4 (1) */
+ { 0x2409, 0x1483 }, /* R9225 - MBC Band 1 N4 (2) */
+ { 0x240A, 0x0072 }, /* R9226 - MBC Band 1 N5 (1) */
+ { 0x240B, 0x1483 }, /* R9227 - MBC Band 1 N5 (2) */
+ { 0x240C, 0x0043 }, /* R9228 - MBC Band 1 X1 (1) */
+ { 0x240D, 0x3525 }, /* R9229 - MBC Band 1 X1 (2) */
+ { 0x240E, 0x0006 }, /* R9230 - MBC Band 1 X2 (1) */
+ { 0x240F, 0x6A4A }, /* R9231 - MBC Band 1 X2 (2) */
+ { 0x2410, 0x0043 }, /* R9232 - MBC Band 1 X3 (1) */
+ { 0x2411, 0x6079 }, /* R9233 - MBC Band 1 X3 (2) */
+ { 0x2412, 0x000C }, /* R9234 - MBC Band 1 Attack (1) */
+ { 0x2413, 0xCCCD }, /* R9235 - MBC Band 1 Attack (2) */
+ { 0x2414, 0x0000 }, /* R9236 - MBC Band 1 Decay (1) */
+ { 0x2415, 0x0800 }, /* R9237 - MBC Band 1 Decay (2) */
+ { 0x2416, 0x003F }, /* R9238 - MBC Band 2 K (1) */
+ { 0x2417, 0x8BD8 }, /* R9239 - MBC Band 2 K (2) */
+ { 0x2418, 0x0032 }, /* R9240 - MBC Band 2 N1 (1) */
+ { 0x2419, 0xF52D }, /* R9241 - MBC Band 2 N1 (2) */
+ { 0x241A, 0x0065 }, /* R9242 - MBC Band 2 N2 (1) */
+ { 0x241B, 0xAC8C }, /* R9243 - MBC Band 2 N2 (2) */
+ { 0x241C, 0x006B }, /* R9244 - MBC Band 2 N3 (1) */
+ { 0x241D, 0xE087 }, /* R9245 - MBC Band 2 N3 (2) */
+ { 0x241E, 0x0072 }, /* R9246 - MBC Band 2 N4 (1) */
+ { 0x241F, 0x1483 }, /* R9247 - MBC Band 2 N4 (2) */
+ { 0x2420, 0x0072 }, /* R9248 - MBC Band 2 N5 (1) */
+ { 0x2421, 0x1483 }, /* R9249 - MBC Band 2 N5 (2) */
+ { 0x2422, 0x0043 }, /* R9250 - MBC Band 2 X1 (1) */
+ { 0x2423, 0x3525 }, /* R9251 - MBC Band 2 X1 (2) */
+ { 0x2424, 0x0006 }, /* R9252 - MBC Band 2 X2 (1) */
+ { 0x2425, 0x6A4A }, /* R9253 - MBC Band 2 X2 (2) */
+ { 0x2426, 0x0043 }, /* R9254 - MBC Band 2 X3 (1) */
+ { 0x2427, 0x6079 }, /* R9255 - MBC Band 2 X3 (2) */
+ { 0x2428, 0x000C }, /* R9256 - MBC Band 2 Attack (1) */
+ { 0x2429, 0xCCCD }, /* R9257 - MBC Band 2 Attack (2) */
+ { 0x242A, 0x0000 }, /* R9258 - MBC Band 2 Decay (1) */
+ { 0x242B, 0x0800 }, /* R9259 - MBC Band 2 Decay (2) */
+ { 0x242C, 0x005A }, /* R9260 - MBC_B2_PG2 (1) */
+ { 0x242D, 0x7EFA }, /* R9261 - MBC_B2_PG2 (2) */
+ { 0x242E, 0x005A }, /* R9262 - MBC_B1_PG2 (1) */
+ { 0x242F, 0x7EFA }, /* R9263 - MBC_B1_PG2 (2) */
+ { 0x2600, 0x00A7 }, /* R9728 - MBC Crossover (1) */
+ { 0x2601, 0x0D1C }, /* R9729 - MBC Crossover (2) */
+ { 0x2602, 0x0083 }, /* R9730 - MBC HPF (1) */
+ { 0x2603, 0x98AD }, /* R9731 - MBC HPF (2) */
+ { 0x2606, 0x0008 }, /* R9734 - MBC LPF (1) */
+ { 0x2607, 0xE7A2 }, /* R9735 - MBC LPF (2) */
+ { 0x260A, 0x0055 }, /* R9738 - MBC RMS Limit (1) */
+ { 0x260B, 0x8C4B }, /* R9739 - MBC RMS Limit (2) */
+};
+
+static bool wm1811_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8994_SOFTWARE_RESET:
+ case WM8994_POWER_MANAGEMENT_1:
+ case WM8994_POWER_MANAGEMENT_2:
+ case WM8994_POWER_MANAGEMENT_3:
+ case WM8994_POWER_MANAGEMENT_4:
+ case WM8994_POWER_MANAGEMENT_5:
+ case WM8994_POWER_MANAGEMENT_6:
+ case WM8994_INPUT_MIXER_1:
+ case WM8994_LEFT_LINE_INPUT_1_2_VOLUME:
+ case WM8994_LEFT_LINE_INPUT_3_4_VOLUME:
+ case WM8994_RIGHT_LINE_INPUT_1_2_VOLUME:
+ case WM8994_RIGHT_LINE_INPUT_3_4_VOLUME:
+ case WM8994_LEFT_OUTPUT_VOLUME:
+ case WM8994_RIGHT_OUTPUT_VOLUME:
+ case WM8994_LINE_OUTPUTS_VOLUME:
+ case WM8994_HPOUT2_VOLUME:
+ case WM8994_LEFT_OPGA_VOLUME:
+ case WM8994_RIGHT_OPGA_VOLUME:
+ case WM8994_SPKMIXL_ATTENUATION:
+ case WM8994_SPKMIXR_ATTENUATION:
+ case WM8994_SPKOUT_MIXERS:
+ case WM8994_CLASSD:
+ case WM8994_SPEAKER_VOLUME_LEFT:
+ case WM8994_SPEAKER_VOLUME_RIGHT:
+ case WM8994_INPUT_MIXER_2:
+ case WM8994_INPUT_MIXER_3:
+ case WM8994_INPUT_MIXER_4:
+ case WM8994_INPUT_MIXER_5:
+ case WM8994_INPUT_MIXER_6:
+ case WM8994_OUTPUT_MIXER_1:
+ case WM8994_OUTPUT_MIXER_2:
+ case WM8994_OUTPUT_MIXER_3:
+ case WM8994_OUTPUT_MIXER_4:
+ case WM8994_OUTPUT_MIXER_5:
+ case WM8994_OUTPUT_MIXER_6:
+ case WM8994_HPOUT2_MIXER:
+ case WM8994_LINE_MIXER_1:
+ case WM8994_LINE_MIXER_2:
+ case WM8994_SPEAKER_MIXER:
+ case WM8994_ADDITIONAL_CONTROL:
+ case WM8994_ANTIPOP_1:
+ case WM8994_ANTIPOP_2:
+ case WM8994_LDO_1:
+ case WM8994_LDO_2:
+ case WM8958_MICBIAS1:
+ case WM8958_MICBIAS2:
+ case WM8994_CHARGE_PUMP_1:
+ case WM8958_CHARGE_PUMP_2:
+ case WM8994_CLASS_W_1:
+ case WM8994_DC_SERVO_1:
+ case WM8994_DC_SERVO_2:
+ case WM8994_DC_SERVO_READBACK:
+ case WM8994_DC_SERVO_4:
+ case WM8994_DC_SERVO_4E:
+ case WM8994_ANALOGUE_HP_1:
+ case WM8958_MIC_DETECT_1:
+ case WM8958_MIC_DETECT_2:
+ case WM8958_MIC_DETECT_3:
+ case WM8994_CHIP_REVISION:
+ case WM8994_CONTROL_INTERFACE:
+ case WM8994_AIF1_CLOCKING_1:
+ case WM8994_AIF1_CLOCKING_2:
+ case WM8994_AIF2_CLOCKING_1:
+ case WM8994_AIF2_CLOCKING_2:
+ case WM8994_CLOCKING_1:
+ case WM8994_CLOCKING_2:
+ case WM8994_AIF1_RATE:
+ case WM8994_AIF2_RATE:
+ case WM8994_RATE_STATUS:
+ case WM8994_FLL1_CONTROL_1:
+ case WM8994_FLL1_CONTROL_2:
+ case WM8994_FLL1_CONTROL_3:
+ case WM8994_FLL1_CONTROL_4:
+ case WM8994_FLL1_CONTROL_5:
+ case WM8958_FLL1_EFS_1:
+ case WM8958_FLL1_EFS_2:
+ case WM8994_FLL2_CONTROL_1:
+ case WM8994_FLL2_CONTROL_2:
+ case WM8994_FLL2_CONTROL_3:
+ case WM8994_FLL2_CONTROL_4:
+ case WM8994_FLL2_CONTROL_5:
+ case WM8958_FLL2_EFS_1:
+ case WM8958_FLL2_EFS_2:
+ case WM8994_AIF1_CONTROL_1:
+ case WM8994_AIF1_CONTROL_2:
+ case WM8994_AIF1_MASTER_SLAVE:
+ case WM8994_AIF1_BCLK:
+ case WM8994_AIF1ADC_LRCLK:
+ case WM8994_AIF1DAC_LRCLK:
+ case WM8994_AIF1DAC_DATA:
+ case WM8994_AIF1ADC_DATA:
+ case WM8994_AIF2_CONTROL_1:
+ case WM8994_AIF2_CONTROL_2:
+ case WM8994_AIF2_MASTER_SLAVE:
+ case WM8994_AIF2_BCLK:
+ case WM8994_AIF2ADC_LRCLK:
+ case WM8994_AIF2DAC_LRCLK:
+ case WM8994_AIF2DAC_DATA:
+ case WM8994_AIF2ADC_DATA:
+ case WM1811_AIF2TX_CONTROL:
+ case WM8958_AIF3_CONTROL_1:
+ case WM8958_AIF3_CONTROL_2:
+ case WM8958_AIF3DAC_DATA:
+ case WM8958_AIF3ADC_DATA:
+ case WM8994_AIF1_ADC1_LEFT_VOLUME:
+ case WM8994_AIF1_ADC1_RIGHT_VOLUME:
+ case WM8994_AIF1_DAC1_LEFT_VOLUME:
+ case WM8994_AIF1_DAC1_RIGHT_VOLUME:
+ case WM8994_AIF1_ADC1_FILTERS:
+ case WM8994_AIF1_DAC1_FILTERS_1:
+ case WM8994_AIF1_DAC1_FILTERS_2:
+ case WM8958_AIF1_DAC1_NOISE_GATE:
+ case WM8994_AIF1_DRC1_1:
+ case WM8994_AIF1_DRC1_2:
+ case WM8994_AIF1_DRC1_3:
+ case WM8994_AIF1_DRC1_4:
+ case WM8994_AIF1_DRC1_5:
+ case WM8994_AIF1_DAC1_EQ_GAINS_1:
+ case WM8994_AIF1_DAC1_EQ_GAINS_2:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_C:
+ case WM8994_AIF1_DAC1_EQ_BAND_2_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_C:
+ case WM8994_AIF1_DAC1_EQ_BAND_3_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_C:
+ case WM8994_AIF1_DAC1_EQ_BAND_4_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_5_A:
+ case WM8994_AIF1_DAC1_EQ_BAND_5_B:
+ case WM8994_AIF1_DAC1_EQ_BAND_5_PG:
+ case WM8994_AIF1_DAC1_EQ_BAND_1_C:
+ case WM8994_AIF2_ADC_LEFT_VOLUME:
+ case WM8994_AIF2_ADC_RIGHT_VOLUME:
+ case WM8994_AIF2_DAC_LEFT_VOLUME:
+ case WM8994_AIF2_DAC_RIGHT_VOLUME:
+ case WM8994_AIF2_ADC_FILTERS:
+ case WM8994_AIF2_DAC_FILTERS_1:
+ case WM8994_AIF2_DAC_FILTERS_2:
+ case WM8958_AIF2_DAC_NOISE_GATE:
+ case WM8994_AIF2_DRC_1:
+ case WM8994_AIF2_DRC_2:
+ case WM8994_AIF2_DRC_3:
+ case WM8994_AIF2_DRC_4:
+ case WM8994_AIF2_DRC_5:
+ case WM8994_AIF2_EQ_GAINS_1:
+ case WM8994_AIF2_EQ_GAINS_2:
+ case WM8994_AIF2_EQ_BAND_1_A:
+ case WM8994_AIF2_EQ_BAND_1_B:
+ case WM8994_AIF2_EQ_BAND_1_PG:
+ case WM8994_AIF2_EQ_BAND_2_A:
+ case WM8994_AIF2_EQ_BAND_2_B:
+ case WM8994_AIF2_EQ_BAND_2_C:
+ case WM8994_AIF2_EQ_BAND_2_PG:
+ case WM8994_AIF2_EQ_BAND_3_A:
+ case WM8994_AIF2_EQ_BAND_3_B:
+ case WM8994_AIF2_EQ_BAND_3_C:
+ case WM8994_AIF2_EQ_BAND_3_PG:
+ case WM8994_AIF2_EQ_BAND_4_A:
+ case WM8994_AIF2_EQ_BAND_4_B:
+ case WM8994_AIF2_EQ_BAND_4_C:
+ case WM8994_AIF2_EQ_BAND_4_PG:
+ case WM8994_AIF2_EQ_BAND_5_A:
+ case WM8994_AIF2_EQ_BAND_5_B:
+ case WM8994_AIF2_EQ_BAND_5_PG:
+ case WM8994_AIF2_EQ_BAND_1_C:
+ case WM8994_DAC1_MIXER_VOLUMES:
+ case WM8994_DAC1_LEFT_MIXER_ROUTING:
+ case WM8994_DAC1_RIGHT_MIXER_ROUTING:
+ case WM8994_DAC2_MIXER_VOLUMES:
+ case WM8994_DAC2_LEFT_MIXER_ROUTING:
+ case WM8994_DAC2_RIGHT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC1_LEFT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC1_RIGHT_MIXER_ROUTING:
+ case WM8994_DAC1_LEFT_VOLUME:
+ case WM8994_DAC1_RIGHT_VOLUME:
+ case WM8994_DAC2_LEFT_VOLUME:
+ case WM8994_DAC2_RIGHT_VOLUME:
+ case WM8994_DAC_SOFTMUTE:
+ case WM8994_OVERSAMPLING:
+ case WM8994_SIDETONE:
+ case WM8994_GPIO_1:
+ case WM8994_GPIO_2:
+ case WM8994_GPIO_3:
+ case WM8994_GPIO_4:
+ case WM8994_GPIO_5:
+ case WM8994_GPIO_6:
+ case WM8994_GPIO_8:
+ case WM8994_GPIO_9:
+ case WM8994_GPIO_10:
+ case WM8994_GPIO_11:
+ case WM8994_PULL_CONTROL_1:
+ case WM8994_PULL_CONTROL_2:
+ case WM8994_INTERRUPT_STATUS_1:
+ case WM8994_INTERRUPT_STATUS_2:
+ case WM8994_INTERRUPT_RAW_STATUS_2:
+ case WM8994_INTERRUPT_STATUS_1_MASK:
+ case WM8994_INTERRUPT_STATUS_2_MASK:
+ case WM8994_INTERRUPT_CONTROL:
+ case WM8994_IRQ_DEBOUNCE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm8994_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8994_DC_SERVO_READBACK:
+ case WM8994_WRITE_SEQUENCER_CTRL_1:
+ case WM8994_WRITE_SEQUENCER_CTRL_2:
+ case WM8994_AIF1_ADC2_LEFT_VOLUME:
+ case WM8994_AIF1_ADC2_RIGHT_VOLUME:
+ case WM8994_AIF1_DAC2_LEFT_VOLUME:
+ case WM8994_AIF1_DAC2_RIGHT_VOLUME:
+ case WM8994_AIF1_ADC2_FILTERS:
+ case WM8994_AIF1_DAC2_FILTERS_1:
+ case WM8994_AIF1_DAC2_FILTERS_2:
+ case WM8958_AIF1_DAC2_NOISE_GATE:
+ case WM8994_AIF1_DRC2_1:
+ case WM8994_AIF1_DRC2_2:
+ case WM8994_AIF1_DRC2_3:
+ case WM8994_AIF1_DRC2_4:
+ case WM8994_AIF1_DRC2_5:
+ case WM8994_AIF1_DAC2_EQ_GAINS_1:
+ case WM8994_AIF1_DAC2_EQ_GAINS_2:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_2_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_3_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_C:
+ case WM8994_AIF1_DAC2_EQ_BAND_4_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_A:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_B:
+ case WM8994_AIF1_DAC2_EQ_BAND_5_PG:
+ case WM8994_AIF1_DAC2_EQ_BAND_1_C:
+ case WM8994_DAC2_MIXER_VOLUMES:
+ case WM8994_DAC2_LEFT_MIXER_ROUTING:
+ case WM8994_DAC2_RIGHT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC2_LEFT_MIXER_ROUTING:
+ case WM8994_AIF1_ADC2_RIGHT_MIXER_ROUTING:
+ case WM8994_DAC2_LEFT_VOLUME:
+ case WM8994_DAC2_RIGHT_VOLUME:
+ return true;
+ default:
+ return wm1811_readable_register(dev, reg);
+ }
+}
+
+static bool wm8958_readable_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8958_DSP2_PROGRAM:
+ case WM8958_DSP2_CONFIG:
+ case WM8958_DSP2_MAGICNUM:
+ case WM8958_DSP2_RELEASEYEAR:
+ case WM8958_DSP2_RELEASEMONTHDAY:
+ case WM8958_DSP2_RELEASETIME:
+ case WM8958_DSP2_VERMAJMIN:
+ case WM8958_DSP2_VERBUILD:
+ case WM8958_DSP2_TESTREG:
+ case WM8958_DSP2_XORREG:
+ case WM8958_DSP2_SHIFTMAXX:
+ case WM8958_DSP2_SHIFTMAXY:
+ case WM8958_DSP2_SHIFTMAXZ:
+ case WM8958_DSP2_SHIFTMAXEXTLO:
+ case WM8958_DSP2_AESSELECT:
+ case WM8958_DSP2_EXECCONTROL:
+ case WM8958_DSP2_SAMPLEBREAK:
+ case WM8958_DSP2_COUNTBREAK:
+ case WM8958_DSP2_INTSTATUS:
+ case WM8958_DSP2_EVENTSTATUS:
+ case WM8958_DSP2_INTMASK:
+ case WM8958_DSP2_CONFIGDWIDTH:
+ case WM8958_DSP2_CONFIGINSTR:
+ case WM8958_DSP2_CONFIGDMEM:
+ case WM8958_DSP2_CONFIGDELAYS:
+ case WM8958_DSP2_CONFIGNUMIO:
+ case WM8958_DSP2_CONFIGEXTDEPTH:
+ case WM8958_DSP2_CONFIGMULTIPLIER:
+ case WM8958_DSP2_CONFIGCTRLDWIDTH:
+ case WM8958_DSP2_CONFIGPIPELINE:
+ case WM8958_DSP2_SHIFTMAXEXTHI:
+ case WM8958_DSP2_SWVERSIONREG:
+ case WM8958_DSP2_CONFIGXMEM:
+ case WM8958_DSP2_CONFIGYMEM:
+ case WM8958_DSP2_CONFIGZMEM:
+ case WM8958_FW_BUILD_1:
+ case WM8958_FW_BUILD_0:
+ case WM8958_FW_ID_1:
+ case WM8958_FW_ID_0:
+ case WM8958_FW_MAJOR_1:
+ case WM8958_FW_MAJOR_0:
+ case WM8958_FW_MINOR_1:
+ case WM8958_FW_MINOR_0:
+ case WM8958_FW_PATCH_1:
+ case WM8958_FW_PATCH_0:
+ case WM8958_MBC_BAND_1_K_1:
+ case WM8958_MBC_BAND_1_K_2:
+ case WM8958_MBC_BAND_1_N1_1:
+ case WM8958_MBC_BAND_1_N1_2:
+ case WM8958_MBC_BAND_1_N2_1:
+ case WM8958_MBC_BAND_1_N2_2:
+ case WM8958_MBC_BAND_1_N3_1:
+ case WM8958_MBC_BAND_1_N3_2:
+ case WM8958_MBC_BAND_1_N4_1:
+ case WM8958_MBC_BAND_1_N4_2:
+ case WM8958_MBC_BAND_1_N5_1:
+ case WM8958_MBC_BAND_1_N5_2:
+ case WM8958_MBC_BAND_1_X1_1:
+ case WM8958_MBC_BAND_1_X1_2:
+ case WM8958_MBC_BAND_1_X2_1:
+ case WM8958_MBC_BAND_1_X2_2:
+ case WM8958_MBC_BAND_1_X3_1:
+ case WM8958_MBC_BAND_1_X3_2:
+ case WM8958_MBC_BAND_1_ATTACK_1:
+ case WM8958_MBC_BAND_1_ATTACK_2:
+ case WM8958_MBC_BAND_1_DECAY_1:
+ case WM8958_MBC_BAND_1_DECAY_2:
+ case WM8958_MBC_BAND_2_K_1:
+ case WM8958_MBC_BAND_2_K_2:
+ case WM8958_MBC_BAND_2_N1_1:
+ case WM8958_MBC_BAND_2_N1_2:
+ case WM8958_MBC_BAND_2_N2_1:
+ case WM8958_MBC_BAND_2_N2_2:
+ case WM8958_MBC_BAND_2_N3_1:
+ case WM8958_MBC_BAND_2_N3_2:
+ case WM8958_MBC_BAND_2_N4_1:
+ case WM8958_MBC_BAND_2_N4_2:
+ case WM8958_MBC_BAND_2_N5_1:
+ case WM8958_MBC_BAND_2_N5_2:
+ case WM8958_MBC_BAND_2_X1_1:
+ case WM8958_MBC_BAND_2_X1_2:
+ case WM8958_MBC_BAND_2_X2_1:
+ case WM8958_MBC_BAND_2_X2_2:
+ case WM8958_MBC_BAND_2_X3_1:
+ case WM8958_MBC_BAND_2_X3_2:
+ case WM8958_MBC_BAND_2_ATTACK_1:
+ case WM8958_MBC_BAND_2_ATTACK_2:
+ case WM8958_MBC_BAND_2_DECAY_1:
+ case WM8958_MBC_BAND_2_DECAY_2:
+ case WM8958_MBC_B2_PG2_1:
+ case WM8958_MBC_B2_PG2_2:
+ case WM8958_MBC_B1_PG2_1:
+ case WM8958_MBC_B1_PG2_2:
+ case WM8958_MBC_CROSSOVER_1:
+ case WM8958_MBC_CROSSOVER_2:
+ case WM8958_MBC_HPF_1:
+ case WM8958_MBC_HPF_2:
+ case WM8958_MBC_LPF_1:
+ case WM8958_MBC_LPF_2:
+ case WM8958_MBC_RMS_LIMIT_1:
+ case WM8958_MBC_RMS_LIMIT_2:
+ return true;
+ default:
+ return wm8994_readable_register(dev, reg);
+ }
+}
+
+static bool wm8994_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8994_SOFTWARE_RESET:
+ case WM8994_DC_SERVO_1:
+ case WM8994_DC_SERVO_READBACK:
+ case WM8994_RATE_STATUS:
+ case WM8958_MIC_DETECT_3:
+ case WM8994_DC_SERVO_4E:
+ case WM8994_CHIP_REVISION:
+ case WM8994_INTERRUPT_STATUS_1:
+ case WM8994_INTERRUPT_STATUS_2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool wm1811_volatile_register(struct device *dev, unsigned int reg)
+{
+ struct wm8994 *wm8994 = dev_get_drvdata(dev);
+
+ switch (reg) {
+ case WM8994_GPIO_6:
+ if (wm8994->revision > 1)
+ return true;
+ else
+ return false;
+ default:
+ return wm8994_volatile_register(dev, reg);
+ }
+}
+
+static bool wm8958_volatile_register(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case WM8958_DSP2_MAGICNUM:
+ case WM8958_DSP2_RELEASEYEAR:
+ case WM8958_DSP2_RELEASEMONTHDAY:
+ case WM8958_DSP2_RELEASETIME:
+ case WM8958_DSP2_VERMAJMIN:
+ case WM8958_DSP2_VERBUILD:
+ case WM8958_DSP2_EXECCONTROL:
+ case WM8958_DSP2_SWVERSIONREG:
+ case WM8958_DSP2_CONFIGXMEM:
+ case WM8958_DSP2_CONFIGYMEM:
+ case WM8958_DSP2_CONFIGZMEM:
+ case WM8958_FW_BUILD_1:
+ case WM8958_FW_BUILD_0:
+ case WM8958_FW_ID_1:
+ case WM8958_FW_ID_0:
+ case WM8958_FW_MAJOR_1:
+ case WM8958_FW_MAJOR_0:
+ case WM8958_FW_MINOR_1:
+ case WM8958_FW_MINOR_0:
+ case WM8958_FW_PATCH_1:
+ case WM8958_FW_PATCH_0:
+ return true;
+ default:
+ return wm8994_volatile_register(dev, reg);
+ }
+}
+
+struct regmap_config wm1811_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm1811_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm1811_defaults),
+
+ .max_register = WM8994_MAX_REGISTER,
+ .volatile_reg = wm1811_volatile_register,
+ .readable_reg = wm1811_readable_register,
+};
+
+struct regmap_config wm8994_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm8994_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm8994_defaults),
+
+ .max_register = WM8994_MAX_REGISTER,
+ .volatile_reg = wm8994_volatile_register,
+ .readable_reg = wm8994_readable_register,
+};
+
+struct regmap_config wm8958_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+
+ .cache_type = REGCACHE_RBTREE,
+
+ .reg_defaults = wm8958_defaults,
+ .num_reg_defaults = ARRAY_SIZE(wm8958_defaults),
+
+ .max_register = WM8994_MAX_REGISTER,
+ .volatile_reg = wm8958_volatile_register,
+ .readable_reg = wm8958_readable_register,
+};
+
+struct regmap_config wm8994_base_regmap_config = {
+ .reg_bits = 16,
+ .val_bits = 16,
+};
diff --git a/drivers/mfd/wm8994.h b/drivers/mfd/wm8994.h
new file mode 100644
index 0000000..6f39a84
--- /dev/null
+++ b/drivers/mfd/wm8994.h
@@ -0,0 +1,25 @@
+/*
+ * wm8994.h -- WM8994 MFD internals
+ *
+ * Copyright 2011 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#ifndef __MFD_WM8994_H__
+#define __MFD_WM8994_H__
+
+#include <linux/regmap.h>
+
+extern struct regmap_config wm1811_regmap_config;
+extern struct regmap_config wm8994_regmap_config;
+extern struct regmap_config wm8958_regmap_config;
+extern struct regmap_config wm8994_base_regmap_config;
+
+#endif
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 5664696..c779509 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -2,24 +2,14 @@
# Misc strange devices
#
-# This one has to live outside of the MISC_DEVICES conditional,
-# because it may be selected by drivers/platform/x86/hp_accel.
+menu "Misc devices"
+
config SENSORS_LIS3LV02D
tristate
depends on INPUT
select INPUT_POLLDEV
default n
-menuconfig MISC_DEVICES
- bool "Misc devices"
- ---help---
- Say Y here to get to see options for device drivers from various
- different categories. This option alone does not add any kernel code.
-
- If you say N, all options in this submenu will be skipped and disabled.
-
-if MISC_DEVICES
-
config AD525X_DPOT
tristate "Analog Devices Digital Potentiometers"
depends on (I2C || SPI) && SYSFS
@@ -500,6 +490,14 @@ config USB_SWITCH_FSA9480
stereo and mono audio, video, microphone and UART data to use
a common connector port.
+config MAX8997_MUIC
+ tristate "MAX8997 MUIC Support"
+ depends on MFD_MAX8997
+ help
+ If you say yes here you get support for the MUIC device of
+ Maxim MAX8997 PMIC.
+ The MAX8997 MUIC is a USB port accessory detector and switch.
+
source "drivers/misc/c2port/Kconfig"
source "drivers/misc/eeprom/Kconfig"
source "drivers/misc/cb710/Kconfig"
@@ -508,5 +506,4 @@ source "drivers/misc/ti-st/Kconfig"
source "drivers/misc/lis3lv02d/Kconfig"
source "drivers/misc/carma/Kconfig"
source "drivers/misc/altera-stapl/Kconfig"
-
-endif # MISC_DEVICES
+endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b26495a..3e1d801 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -48,3 +48,4 @@ obj-y += lis3lv02d/
obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
+obj-$(CONFIG_MAX8997_MUIC) += max8997-muic.o
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index 2208a9d..d7a9aa1 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -8,8 +8,8 @@
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/pwm.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/module.h>
/*
diff --git a/drivers/misc/ad525x_dpot-i2c.c b/drivers/misc/ad525x_dpot-i2c.c
index a39e055..83adab6 100644
--- a/drivers/misc/ad525x_dpot-i2c.c
+++ b/drivers/misc/ad525x_dpot-i2c.c
@@ -1,7 +1,7 @@
/*
* Driver for the Analog Devices digital potentiometers (I2C bus)
*
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -11,7 +11,6 @@
#include "ad525x_dpot.h"
-/* ------------------------------------------------------------------------- */
/* I2C bus functions */
static int write_d8(void *client, u8 val)
{
@@ -60,18 +59,13 @@ static int __devinit ad_dpot_i2c_probe(struct i2c_client *client,
.bops = &bops,
};
- struct ad_dpot_id dpot_id = {
- .name = (char *) &id->name,
- .devid = id->driver_data,
- };
-
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(&client->dev, "SMBUS Word Data not Supported\n");
return -EIO;
}
- return ad_dpot_probe(&client->dev, &bdata, &dpot_id);
+ return ad_dpot_probe(&client->dev, &bdata, id->driver_data, id->name);
}
static int __devexit ad_dpot_i2c_remove(struct i2c_client *client)
diff --git a/drivers/misc/ad525x_dpot-spi.c b/drivers/misc/ad525x_dpot-spi.c
index 7f9a55a..822749e 100644
--- a/drivers/misc/ad525x_dpot-spi.c
+++ b/drivers/misc/ad525x_dpot-spi.c
@@ -1,7 +1,7 @@
/*
* Driver for the Analog Devices digital potentiometers (SPI bus)
*
- * Copyright (C) 2010 Michael Hennerich, Analog Devices Inc.
+ * Copyright (C) 2010-2011 Michael Hennerich, Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
@@ -11,40 +11,6 @@
#include "ad525x_dpot.h"
-static const struct ad_dpot_id ad_dpot_spi_devlist[] = {
- {.name = "ad5160", .devid = AD5160_ID},
- {.name = "ad5161", .devid = AD5161_ID},
- {.name = "ad5162", .devid = AD5162_ID},
- {.name = "ad5165", .devid = AD5165_ID},
- {.name = "ad5200", .devid = AD5200_ID},
- {.name = "ad5201", .devid = AD5201_ID},
- {.name = "ad5203", .devid = AD5203_ID},
- {.name = "ad5204", .devid = AD5204_ID},
- {.name = "ad5206", .devid = AD5206_ID},
- {.name = "ad5207", .devid = AD5207_ID},
- {.name = "ad5231", .devid = AD5231_ID},
- {.name = "ad5232", .devid = AD5232_ID},
- {.name = "ad5233", .devid = AD5233_ID},
- {.name = "ad5235", .devid = AD5235_ID},
- {.name = "ad5260", .devid = AD5260_ID},
- {.name = "ad5262", .devid = AD5262_ID},
- {.name = "ad5263", .devid = AD5263_ID},
- {.name = "ad5290", .devid = AD5290_ID},
- {.name = "ad5291", .devid = AD5291_ID},
- {.name = "ad5292", .devid = AD5292_ID},
- {.name = "ad5293", .devid = AD5293_ID},
- {.name = "ad7376", .devid = AD7376_ID},
- {.name = "ad8400", .devid = AD8400_ID},
- {.name = "ad8402", .devid = AD8402_ID},
- {.name = "ad8403", .devid = AD8403_ID},
- {.name = "adn2850", .devid = ADN2850_ID},
- {.name = "ad5270", .devid = AD5270_ID},
- {.name = "ad5271", .devid = AD5271_ID},
- {}
-};
-
-/* ------------------------------------------------------------------------- */
-
/* SPI bus functions */
static int write8(void *client, u8 val)
{
@@ -109,36 +75,16 @@ static const struct ad_dpot_bus_ops bops = {
.write_r8d8 = write16,
.write_r8d16 = write24,
};
-
-static const struct ad_dpot_id *dpot_match_id(const struct ad_dpot_id *id,
- char *name)
-{
- while (id->name && id->name[0]) {
- if (strcmp(name, id->name) == 0)
- return id;
- id++;
- }
- return NULL;
-}
-
static int __devinit ad_dpot_spi_probe(struct spi_device *spi)
{
- char *name = spi->dev.platform_data;
- const struct ad_dpot_id *dpot_id;
-
struct ad_dpot_bus_data bdata = {
.client = spi,
.bops = &bops,
};
- dpot_id = dpot_match_id(ad_dpot_spi_devlist, name);
-
- if (dpot_id == NULL) {
- dev_err(&spi->dev, "%s not in supported device list", name);
- return -ENODEV;
- }
-
- return ad_dpot_probe(&spi->dev, &bdata, dpot_id);
+ return ad_dpot_probe(&spi->dev, &bdata,
+ spi_get_device_id(spi)->driver_data,
+ spi_get_device_id(spi)->name);
}
static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
@@ -146,14 +92,47 @@ static int __devexit ad_dpot_spi_remove(struct spi_device *spi)
return ad_dpot_remove(&spi->dev);
}
+static const struct spi_device_id ad_dpot_spi_id[] = {
+ {"ad5160", AD5160_ID},
+ {"ad5161", AD5161_ID},
+ {"ad5162", AD5162_ID},
+ {"ad5165", AD5165_ID},
+ {"ad5200", AD5200_ID},
+ {"ad5201", AD5201_ID},
+ {"ad5203", AD5203_ID},
+ {"ad5204", AD5204_ID},
+ {"ad5206", AD5206_ID},
+ {"ad5207", AD5207_ID},
+ {"ad5231", AD5231_ID},
+ {"ad5232", AD5232_ID},
+ {"ad5233", AD5233_ID},
+ {"ad5235", AD5235_ID},
+ {"ad5260", AD5260_ID},
+ {"ad5262", AD5262_ID},
+ {"ad5263", AD5263_ID},
+ {"ad5290", AD5290_ID},
+ {"ad5291", AD5291_ID},
+ {"ad5292", AD5292_ID},
+ {"ad5293", AD5293_ID},
+ {"ad7376", AD7376_ID},
+ {"ad8400", AD8400_ID},
+ {"ad8402", AD8402_ID},
+ {"ad8403", AD8403_ID},
+ {"adn2850", ADN2850_ID},
+ {"ad5270", AD5270_ID},
+ {"ad5271", AD5271_ID},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad_dpot_spi_id);
+
static struct spi_driver ad_dpot_spi_driver = {
.driver = {
.name = "ad_dpot",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad_dpot_spi_probe,
.remove = __devexit_p(ad_dpot_spi_remove),
+ .id_table = ad_dpot_spi_id,
};
static int __init ad_dpot_spi_init(void)
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 7cb9110..1d1d426 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -64,7 +64,7 @@
* Author: Chris Verges <chrisv@cyberswitching.com>
*
* derived from ad5252.c
- * Copyright (c) 2006 Michael Hennerich <hennerich@blackfin.uclinux.org>
+ * Copyright (c) 2006-2011 Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Licensed under the GPL-2 or later.
*/
@@ -76,8 +76,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "0.2"
-
#include "ad525x_dpot.h"
/*
@@ -687,8 +685,9 @@ inline void ad_dpot_remove_files(struct device *dev,
}
}
-__devinit int ad_dpot_probe(struct device *dev,
- struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id)
+int __devinit ad_dpot_probe(struct device *dev,
+ struct ad_dpot_bus_data *bdata, unsigned long devid,
+ const char *name)
{
struct dpot_data *data;
@@ -704,13 +703,13 @@ __devinit int ad_dpot_probe(struct device *dev,
mutex_init(&data->update_lock);
data->bdata = *bdata;
- data->devid = id->devid;
+ data->devid = devid;
- data->max_pos = 1 << DPOT_MAX_POS(data->devid);
+ data->max_pos = 1 << DPOT_MAX_POS(devid);
data->rdac_mask = data->max_pos - 1;
- data->feat = DPOT_FEAT(data->devid);
- data->uid = DPOT_UID(data->devid);
- data->wipers = DPOT_WIPERS(data->devid);
+ data->feat = DPOT_FEAT(devid);
+ data->uid = DPOT_UID(devid);
+ data->wipers = DPOT_WIPERS(devid);
for (i = DPOT_RDAC0; i < MAX_RDACS; i++)
if (data->wipers & (1 << i)) {
@@ -731,7 +730,7 @@ __devinit int ad_dpot_probe(struct device *dev,
}
dev_info(dev, "%s %d-Position Digital Potentiometer registered\n",
- id->name, data->max_pos);
+ name, data->max_pos);
return 0;
@@ -745,7 +744,7 @@ exit_free:
dev_set_drvdata(dev, NULL);
exit:
dev_err(dev, "failed to create client for %s ID 0x%lX\n",
- id->name, id->devid);
+ name, devid);
return err;
}
EXPORT_SYMBOL(ad_dpot_probe);
@@ -770,4 +769,3 @@ MODULE_AUTHOR("Chris Verges <chrisv@cyberswitching.com>, "
"Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Digital potentiometer driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/misc/ad525x_dpot.h b/drivers/misc/ad525x_dpot.h
index 82b2cb7..6bd1eba 100644
--- a/drivers/misc/ad525x_dpot.h
+++ b/drivers/misc/ad525x_dpot.h
@@ -208,12 +208,8 @@ struct ad_dpot_bus_data {
const struct ad_dpot_bus_ops *bops;
};
-struct ad_dpot_id {
- char *name;
- unsigned long devid;
-};
-
-int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata, const struct ad_dpot_id *id);
+int ad_dpot_probe(struct device *dev, struct ad_dpot_bus_data *bdata,
+ unsigned long devid, const char *name);
int ad_dpot_remove(struct device *dev);
#endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 5f898cb..b29a2be 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -216,7 +216,7 @@ static s32 bmp085_get_temperature(struct bmp085_data *data, int *temperature)
*temperature = (x1+x2+8) >> 4;
exit:
- return status;;
+ return status;
}
/*
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 778fc3f..5484301 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/io.h>
+#include <linux/ioport.h>
#include <linux/c2port.h>
#define DATA_PORT 0x325
diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
index 19fc7c1..f428d86 100644
--- a/drivers/misc/c2port/core.c
+++ b/drivers/misc/c2port/core.c
@@ -984,9 +984,9 @@ static int __init c2port_init(void)
" - (C) 2007 Rodolfo Giometti\n");
c2port_class = class_create(THIS_MODULE, "c2port");
- if (!c2port_class) {
+ if (IS_ERR(c2port_class)) {
printk(KERN_ERR "c2port: failed to allocate class\n");
- return -ENOMEM;
+ return PTR_ERR(c2port_class);
}
c2port_class->dev_attrs = c2port_attrs;
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index eb5cd28..a2d25e4 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
* transaction, and then put it under external control
*/
memset(&config, 0, sizeof(config));
- config.direction = DMA_TO_DEVICE;
+ config.direction = DMA_MEM_TO_DEV;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
diff --git a/drivers/misc/cb710/core.c b/drivers/misc/cb710/core.c
index 68cd05b..85cc771 100644
--- a/drivers/misc/cb710/core.c
+++ b/drivers/misc/cb710/core.c
@@ -245,6 +245,7 @@ static int __devinit cb710_probe(struct pci_dev *pdev,
if (err)
return err;
+ spin_lock_init(&chip->irq_lock);
chip->pdev = pdev;
chip->iobase = pcim_iomap_table(pdev)[0];
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index bc685bf..87a390d 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -262,7 +262,7 @@ static void __init reset_all_timers(void)
* In other cases (such as with VSAless OpenFirmware), the system firmware
* leaves timers available for us to use.
*/
-static int __init scan_timers(struct cs5535_mfgpt_chip *mfgpt)
+static int __devinit scan_timers(struct cs5535_mfgpt_chip *mfgpt)
{
struct cs5535_mfgpt_timer timer = { .chip = mfgpt };
unsigned long flags;
diff --git a/drivers/misc/eeprom/eeprom_93cx6.c b/drivers/misc/eeprom/eeprom_93cx6.c
index 7b33de9..0ff4b02 100644
--- a/drivers/misc/eeprom/eeprom_93cx6.c
+++ b/drivers/misc/eeprom/eeprom_93cx6.c
@@ -63,6 +63,7 @@ static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
eeprom->reg_data_out = 0;
eeprom->reg_data_clock = 0;
eeprom->reg_chip_select = 1;
+ eeprom->drive_data = 1;
eeprom->register_write(eeprom);
/*
@@ -101,6 +102,7 @@ static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 1;
/*
* Start writing all bits.
@@ -140,6 +142,7 @@ static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
*/
eeprom->reg_data_in = 0;
eeprom->reg_data_out = 0;
+ eeprom->drive_data = 0;
/*
* Start reading all bits.
@@ -231,3 +234,88 @@ void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
}
EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
+/**
+ * eeprom_93cx6_wren - set the write enable state
+ * @eeprom: Pointer to eeprom structure
+ * @enable: true to enable writes, otherwise disable writes
+ *
+ * Set the EEPROM write enable state to either allow or deny
+ * writes depending on the @enable value.
+ */
+void eeprom_93cx6_wren(struct eeprom_93cx6 *eeprom, bool enable)
+{
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ /* create command to enable/disable */
+
+ command = enable ? PCI_EEPROM_EWEN_OPCODE : PCI_EEPROM_EWDS_OPCODE;
+ command <<= (eeprom->width - 2);
+
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_wren);
+
+/**
+ * eeprom_93cx6_write - write data to the EEPROM
+ * @eeprom: Pointer to eeprom structure
+ * @addr: Address to write data to.
+ * @data: The data to write to address @addr.
+ *
+ * Write the @data to the specified @addr in the EEPROM and
+ * waiting for the device to finish writing.
+ *
+ * Note, since we do not expect large number of write operations
+ * we delay in between parts of the operation to avoid using excessive
+ * amounts of CPU time busy waiting.
+ */
+void eeprom_93cx6_write(struct eeprom_93cx6 *eeprom, u8 addr, u16 data)
+{
+ int timeout = 100;
+ u16 command;
+
+ /* start the command */
+ eeprom_93cx6_startup(eeprom);
+
+ command = PCI_EEPROM_WRITE_OPCODE << eeprom->width;
+ command |= addr;
+
+ /* send write command */
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ /* send data */
+ eeprom_93cx6_write_bits(eeprom, data, 16);
+
+ /* get ready to check for busy */
+ eeprom->drive_data = 0;
+ eeprom->reg_chip_select = 1;
+ eeprom->register_write(eeprom);
+
+ /* wait at-least 250ns to get DO to be the busy signal */
+ usleep_range(1000, 2000);
+
+ /* wait for DO to go high to signify finish */
+
+ while (true) {
+ eeprom->register_read(eeprom);
+
+ if (eeprom->reg_data_out)
+ break;
+
+ usleep_range(1000, 2000);
+
+ if (--timeout <= 0) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ break;
+ }
+ }
+
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_write);
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c
index 5c766b4..7d56f45 100644
--- a/drivers/misc/ibmasm/command.c
+++ b/drivers/misc/ibmasm/command.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/dot_command.c b/drivers/misc/ibmasm/dot_command.c
index 3dd2dfb..d7b2ca3 100644
--- a/drivers/misc/ibmasm/dot_command.c
+++ b/drivers/misc/ibmasm/dot_command.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/dot_command.h b/drivers/misc/ibmasm/dot_command.h
index 6cbba1a..fc9fc9d 100644
--- a/drivers/misc/ibmasm/dot_command.h
+++ b/drivers/misc/ibmasm/dot_command.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/event.c b/drivers/misc/ibmasm/event.c
index 76bfda1..8e540f4 100644
--- a/drivers/misc/ibmasm/event.c
+++ b/drivers/misc/ibmasm/event.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c
index 1bc4306..9074637 100644
--- a/drivers/misc/ibmasm/heartbeat.c
+++ b/drivers/misc/ibmasm/heartbeat.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/i2o.h b/drivers/misc/ibmasm/i2o.h
index bf2c738..2e9566d 100644
--- a/drivers/misc/ibmasm/i2o.h
+++ b/drivers/misc/ibmasm/i2o.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/ibmasm.h b/drivers/misc/ibmasm/ibmasm.h
index 4d8a4e2..9b08344 100644
--- a/drivers/misc/ibmasm/ibmasm.h
+++ b/drivers/misc/ibmasm/ibmasm.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index 8994772..3536175 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.c b/drivers/misc/ibmasm/lowlevel.c
index 4b2398e..5319ea2 100644
--- a/drivers/misc/ibmasm/lowlevel.c
+++ b/drivers/misc/ibmasm/lowlevel.c
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/lowlevel.h b/drivers/misc/ibmasm/lowlevel.h
index 7667665..e97848f 100644
--- a/drivers/misc/ibmasm/lowlevel.h
+++ b/drivers/misc/ibmasm/lowlevel.h
@@ -17,7 +17,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/module.c b/drivers/misc/ibmasm/module.c
index a234d96..1ccedb71 100644
--- a/drivers/misc/ibmasm/module.c
+++ b/drivers/misc/ibmasm/module.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
* This driver is based on code originally written by Pete Reynolds
* and others.
diff --git a/drivers/misc/ibmasm/r_heartbeat.c b/drivers/misc/ibmasm/r_heartbeat.c
index 2de487a..232034f 100644
--- a/drivers/misc/ibmasm/r_heartbeat.c
+++ b/drivers/misc/ibmasm/r_heartbeat.c
@@ -16,7 +16,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/ibmasm/remote.h b/drivers/misc/ibmasm/remote.h
index 00dbf1d..a7729ef 100644
--- a/drivers/misc/ibmasm/remote.h
+++ b/drivers/misc/ibmasm/remote.h
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
* Originally written by Pete Reynolds
*/
diff --git a/drivers/misc/ibmasm/uart.c b/drivers/misc/ibmasm/uart.c
index 93baa35..1dcb9ae 100644
--- a/drivers/misc/ibmasm/uart.c
+++ b/drivers/misc/ibmasm/uart.c
@@ -18,7 +18,7 @@
*
* Copyright (C) IBM Corporation, 2004
*
- * Author: Max Asbck <amax@us.ibm.com>
+ * Author: Max Asböck <amax@us.ibm.com>
*
*/
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 307aada..3d6cce6 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -158,7 +158,7 @@ static int als_set_default_config(struct i2c_client *client)
dev_err(&client->dev, "default write failed.");
return retval;
}
- return 0;;
+ return 0;
}
static int isl29020_probe(struct i2c_client *client,
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index b1f4563..701eb60 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -376,20 +376,20 @@ static int blocks;
module_param(blocks, int, 0604);
MODULE_PARM_DESC(blocks, "max_blocks_to_send");
-static int dump;
+static bool dump;
module_param(dump, bool, 0604);
MODULE_PARM_DESC(dump, "dump_hex_content");
-static int jump = 1;
+static bool jump = 1;
module_param(jump, bool, 0604);
-static int direct = 1;
+static bool direct = 1;
module_param(direct, bool, 0604);
-static int checksum = 1;
+static bool checksum = 1;
module_param(checksum, bool, 0604);
-static int fw_download = 1;
+static bool fw_download = 1;
module_param(fw_download, bool, 0604);
static int block_size = IWMC_SDIO_BLK_SIZE;
@@ -398,7 +398,7 @@ module_param(block_size, int, 0404);
static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
module_param(download_trans_blks, int, 0604);
-static int rubbish_barker;
+static bool rubbish_barker;
module_param(rubbish_barker, bool, 0604);
#ifdef CONFIG_IWMC3200TOP_DEBUG
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
index 29d12a7..a981e2a 100644
--- a/drivers/misc/lis3lv02d/lis3lv02d.c
+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
@@ -111,6 +111,8 @@ static struct kernel_param_ops param_ops_axis = {
.get = param_get_int,
};
+#define param_check_axis(name, p) param_check_int(name, p)
+
module_param_array_named(axes, lis3_dev.ac.as_array, axis, NULL, 0644);
MODULE_PARM_DESC(axes, "Axis-mapping for x,y,z directions");
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 150cd70..28adefe 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -354,6 +354,7 @@ static void lkdtm_do_action(enum ctype which)
static void lkdtm_handler(void)
{
unsigned long flags;
+ bool do_it = false;
spin_lock_irqsave(&count_lock, flags);
count--;
@@ -361,10 +362,13 @@ static void lkdtm_handler(void)
cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
if (count == 0) {
- lkdtm_do_action(cptype);
+ do_it = true;
count = cpoint_count;
}
spin_unlock_irqrestore(&count_lock, flags);
+
+ if (do_it)
+ lkdtm_do_action(cptype);
}
static int lkdtm_register_cpoint(enum cname which)
diff --git a/drivers/misc/max8997-muic.c b/drivers/misc/max8997-muic.c
new file mode 100644
index 0000000..d74ef41
--- /dev/null
+++ b/drivers/misc/max8997-muic.c
@@ -0,0 +1,505 @@
+/*
+ * max8997-muic.c - MAX8997 muic driver for the Maxim 8997
+ *
+ * Copyright (C) 2011 Samsung Electrnoics
+ * Donggeun Kim <dg77.kim@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <linux/mfd/max8997.h>
+#include <linux/mfd/max8997-private.h>
+
+/* MAX8997-MUIC STATUS1 register */
+#define STATUS1_ADC_SHIFT 0
+#define STATUS1_ADCLOW_SHIFT 5
+#define STATUS1_ADCERR_SHIFT 6
+#define STATUS1_ADC_MASK (0x1f << STATUS1_ADC_SHIFT)
+#define STATUS1_ADCLOW_MASK (0x1 << STATUS1_ADCLOW_SHIFT)
+#define STATUS1_ADCERR_MASK (0x1 << STATUS1_ADCERR_SHIFT)
+
+/* MAX8997-MUIC STATUS2 register */
+#define STATUS2_CHGTYP_SHIFT 0
+#define STATUS2_CHGDETRUN_SHIFT 3
+#define STATUS2_DCDTMR_SHIFT 4
+#define STATUS2_DBCHG_SHIFT 5
+#define STATUS2_VBVOLT_SHIFT 6
+#define STATUS2_CHGTYP_MASK (0x7 << STATUS2_CHGTYP_SHIFT)
+#define STATUS2_CHGDETRUN_MASK (0x1 << STATUS2_CHGDETRUN_SHIFT)
+#define STATUS2_DCDTMR_MASK (0x1 << STATUS2_DCDTMR_SHIFT)
+#define STATUS2_DBCHG_MASK (0x1 << STATUS2_DBCHG_SHIFT)
+#define STATUS2_VBVOLT_MASK (0x1 << STATUS2_VBVOLT_SHIFT)
+
+/* MAX8997-MUIC STATUS3 register */
+#define STATUS3_OVP_SHIFT 2
+#define STATUS3_OVP_MASK (0x1 << STATUS3_OVP_SHIFT)
+
+/* MAX8997-MUIC CONTROL1 register */
+#define COMN1SW_SHIFT 0
+#define COMP2SW_SHIFT 3
+#define COMN1SW_MASK (0x7 << COMN1SW_SHIFT)
+#define COMP2SW_MASK (0x7 << COMP2SW_SHIFT)
+#define SW_MASK (COMP2SW_MASK | COMN1SW_MASK)
+
+#define MAX8997_SW_USB ((1 << COMP2SW_SHIFT) | (1 << COMN1SW_SHIFT))
+#define MAX8997_SW_AUDIO ((2 << COMP2SW_SHIFT) | (2 << COMN1SW_SHIFT))
+#define MAX8997_SW_UART ((3 << COMP2SW_SHIFT) | (3 << COMN1SW_SHIFT))
+#define MAX8997_SW_OPEN ((0 << COMP2SW_SHIFT) | (0 << COMN1SW_SHIFT))
+
+#define MAX8997_ADC_GROUND 0x00
+#define MAX8997_ADC_MHL 0x01
+#define MAX8997_ADC_JIG_USB_1 0x18
+#define MAX8997_ADC_JIG_USB_2 0x19
+#define MAX8997_ADC_DESKDOCK 0x1a
+#define MAX8997_ADC_JIG_UART 0x1c
+#define MAX8997_ADC_CARDOCK 0x1d
+#define MAX8997_ADC_OPEN 0x1f
+
+struct max8997_muic_irq {
+ unsigned int irq;
+ const char *name;
+};
+
+static struct max8997_muic_irq muic_irqs[] = {
+ { MAX8997_MUICIRQ_ADCError, "muic-ADC_error" },
+ { MAX8997_MUICIRQ_ADCLow, "muic-ADC_low" },
+ { MAX8997_MUICIRQ_ADC, "muic-ADC" },
+ { MAX8997_MUICIRQ_VBVolt, "muic-VB_voltage" },
+ { MAX8997_MUICIRQ_DBChg, "muic-DB_charger" },
+ { MAX8997_MUICIRQ_DCDTmr, "muic-DCD_timer" },
+ { MAX8997_MUICIRQ_ChgDetRun, "muic-CDR_status" },
+ { MAX8997_MUICIRQ_ChgTyp, "muic-charger_type" },
+ { MAX8997_MUICIRQ_OVP, "muic-over_voltage" },
+};
+
+struct max8997_muic_info {
+ struct device *dev;
+ struct max8997_dev *iodev;
+ struct i2c_client *muic;
+ struct max8997_muic_platform_data *muic_pdata;
+
+ int irq;
+ struct work_struct irq_work;
+
+ enum max8997_muic_charger_type pre_charger_type;
+ int pre_adc;
+
+ struct mutex mutex;
+};
+
+static int max8997_muic_handle_usb(struct max8997_muic_info *info,
+ enum max8997_muic_usb_type usb_type, bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ if (usb_type == MAX8997_USB_HOST) {
+ /* switch to USB */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_USB : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+ }
+
+ if (mdata->usb_callback)
+ mdata->usb_callback(usb_type, attached);
+out:
+ return ret;
+}
+
+static void max8997_muic_handle_mhl(struct max8997_muic_info *info,
+ bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+
+ if (mdata->mhl_callback)
+ mdata->mhl_callback(attached);
+}
+
+static int max8997_muic_handle_dock(struct max8997_muic_info *info,
+ int adc, bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ /* switch to AUDIO */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_AUDIO : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+
+ switch (adc) {
+ case MAX8997_ADC_DESKDOCK:
+ if (mdata->deskdock_callback)
+ mdata->deskdock_callback(attached);
+ break;
+ case MAX8997_ADC_CARDOCK:
+ if (mdata->cardock_callback)
+ mdata->cardock_callback(attached);
+ break;
+ default:
+ break;
+ }
+out:
+ return ret;
+}
+
+static int max8997_muic_handle_jig_uart(struct max8997_muic_info *info,
+ bool attached)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int ret = 0;
+
+ /* switch to UART */
+ ret = max8997_update_reg(info->muic, MAX8997_MUIC_REG_CONTROL1,
+ attached ? MAX8997_SW_UART : MAX8997_SW_OPEN,
+ SW_MASK);
+ if (ret) {
+ dev_err(info->dev, "failed to update muic register\n");
+ goto out;
+ }
+
+ if (mdata->uart_callback)
+ mdata->uart_callback(attached);
+out:
+ return ret;
+}
+
+static int max8997_muic_handle_adc_detach(struct max8997_muic_info *info)
+{
+ int ret = 0;
+
+ switch (info->pre_adc) {
+ case MAX8997_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, false);
+ break;
+ case MAX8997_ADC_MHL:
+ max8997_muic_handle_mhl(info, false);
+ break;
+ case MAX8997_ADC_JIG_USB_1:
+ case MAX8997_ADC_JIG_USB_2:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, false);
+ break;
+ case MAX8997_ADC_DESKDOCK:
+ case MAX8997_ADC_CARDOCK:
+ ret = max8997_muic_handle_dock(info, info->pre_adc, false);
+ break;
+ case MAX8997_ADC_JIG_UART:
+ ret = max8997_muic_handle_jig_uart(info, false);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int max8997_muic_handle_adc(struct max8997_muic_info *info, int adc)
+{
+ int ret = 0;
+
+ switch (adc) {
+ case MAX8997_ADC_GROUND:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_HOST, true);
+ break;
+ case MAX8997_ADC_MHL:
+ max8997_muic_handle_mhl(info, true);
+ break;
+ case MAX8997_ADC_JIG_USB_1:
+ case MAX8997_ADC_JIG_USB_2:
+ ret = max8997_muic_handle_usb(info, MAX8997_USB_DEVICE, true);
+ break;
+ case MAX8997_ADC_DESKDOCK:
+ case MAX8997_ADC_CARDOCK:
+ ret = max8997_muic_handle_dock(info, adc, true);
+ break;
+ case MAX8997_ADC_JIG_UART:
+ ret = max8997_muic_handle_jig_uart(info, true);
+ break;
+ case MAX8997_ADC_OPEN:
+ ret = max8997_muic_handle_adc_detach(info);
+ break;
+ default:
+ break;
+ }
+
+ info->pre_adc = adc;
+
+ return ret;
+}
+
+static int max8997_muic_handle_charger_type(struct max8997_muic_info *info,
+ enum max8997_muic_charger_type charger_type)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ u8 adc;
+ int ret;
+
+ ret = max8997_read_reg(info->muic, MAX8997_MUIC_REG_STATUS1, &adc);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ goto out;
+ }
+
+ switch (charger_type) {
+ case MAX8997_CHARGER_TYPE_NONE:
+ if (mdata->charger_callback)
+ mdata->charger_callback(false, charger_type);
+ if (info->pre_charger_type == MAX8997_CHARGER_TYPE_USB) {
+ max8997_muic_handle_usb(info,
+ MAX8997_USB_DEVICE, false);
+ }
+ break;
+ case MAX8997_CHARGER_TYPE_USB:
+ if ((adc & STATUS1_ADC_MASK) == MAX8997_ADC_OPEN) {
+ max8997_muic_handle_usb(info,
+ MAX8997_USB_DEVICE, true);
+ }
+ if (mdata->charger_callback)
+ mdata->charger_callback(true, charger_type);
+ break;
+ case MAX8997_CHARGER_TYPE_DOWNSTREAM_PORT:
+ case MAX8997_CHARGER_TYPE_DEDICATED_CHG:
+ case MAX8997_CHARGER_TYPE_500MA:
+ case MAX8997_CHARGER_TYPE_1A:
+ if (mdata->charger_callback)
+ mdata->charger_callback(true, charger_type);
+ break;
+ default:
+ break;
+ }
+
+ info->pre_charger_type = charger_type;
+out:
+ return ret;
+}
+
+static void max8997_muic_irq_work(struct work_struct *work)
+{
+ struct max8997_muic_info *info = container_of(work,
+ struct max8997_muic_info, irq_work);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(info->iodev->dev);
+ u8 status[3];
+ u8 adc, chg_type;
+
+ int irq_type = info->irq - pdata->irq_base;
+ int ret;
+
+ mutex_lock(&info->mutex);
+
+ ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
+ 3, status);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ mutex_unlock(&info->mutex);
+ return;
+ }
+
+ dev_dbg(info->dev, "%s: STATUS1:0x%x, 2:0x%x\n", __func__,
+ status[0], status[1]);
+
+ switch (irq_type) {
+ case MAX8997_MUICIRQ_ADC:
+ adc = status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ max8997_muic_handle_adc(info, adc);
+ break;
+ case MAX8997_MUICIRQ_ChgTyp:
+ chg_type = status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ max8997_muic_handle_charger_type(info, chg_type);
+ break;
+ default:
+ dev_info(info->dev, "misc interrupt: %s occurred\n",
+ muic_irqs[irq_type].name);
+ break;
+ }
+
+ mutex_unlock(&info->mutex);
+
+ return;
+}
+
+static irqreturn_t max8997_muic_irq_handler(int irq, void *data)
+{
+ struct max8997_muic_info *info = data;
+
+ dev_dbg(info->dev, "irq:%d\n", irq);
+ info->irq = irq;
+
+ schedule_work(&info->irq_work);
+
+ return IRQ_HANDLED;
+}
+
+static void max8997_muic_detect_dev(struct max8997_muic_info *info)
+{
+ int ret;
+ u8 status[2], adc, chg_type;
+
+ ret = max8997_bulk_read(info->muic, MAX8997_MUIC_REG_STATUS1,
+ 2, status);
+ if (ret) {
+ dev_err(info->dev, "failed to read muic register\n");
+ return;
+ }
+
+ dev_info(info->dev, "STATUS1:0x%x, STATUS2:0x%x\n",
+ status[0], status[1]);
+
+ adc = status[0] & STATUS1_ADC_MASK;
+ adc >>= STATUS1_ADC_SHIFT;
+
+ chg_type = status[1] & STATUS2_CHGTYP_MASK;
+ chg_type >>= STATUS2_CHGTYP_SHIFT;
+
+ max8997_muic_handle_adc(info, adc);
+ max8997_muic_handle_charger_type(info, chg_type);
+}
+
+static void max8997_initialize_device(struct max8997_muic_info *info)
+{
+ struct max8997_muic_platform_data *mdata = info->muic_pdata;
+ int i;
+
+ for (i = 0; i < mdata->num_init_data; i++) {
+ max8997_write_reg(info->muic, mdata->init_data[i].addr,
+ mdata->init_data[i].data);
+ }
+}
+
+static int __devinit max8997_muic_probe(struct platform_device *pdev)
+{
+ struct max8997_dev *iodev = dev_get_drvdata(pdev->dev.parent);
+ struct max8997_platform_data *pdata = dev_get_platdata(iodev->dev);
+ struct max8997_muic_info *info;
+ int ret, i;
+
+ info = kzalloc(sizeof(struct max8997_muic_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&pdev->dev, "failed to allocate memory\n");
+ ret = -ENOMEM;
+ goto err_kfree;
+ }
+
+ if (!pdata->muic_pdata) {
+ dev_err(&pdev->dev, "failed to get platform_data\n");
+ ret = -EINVAL;
+ goto err_pdata;
+ }
+ info->muic_pdata = pdata->muic_pdata;
+
+ info->dev = &pdev->dev;
+ info->iodev = iodev;
+ info->muic = iodev->muic;
+
+ platform_set_drvdata(pdev, info);
+ mutex_init(&info->mutex);
+
+ INIT_WORK(&info->irq_work, max8997_muic_irq_work);
+
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++) {
+ struct max8997_muic_irq *muic_irq = &muic_irqs[i];
+
+ ret = request_threaded_irq(pdata->irq_base + muic_irq->irq,
+ NULL, max8997_muic_irq_handler,
+ 0, muic_irq->name,
+ info);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed: irq request (IRQ: %d,"
+ " error :%d)\n",
+ muic_irq->irq, ret);
+
+ for (i = i - 1; i >= 0; i--)
+ free_irq(muic_irq->irq, info);
+
+ goto err_irq;
+ }
+ }
+
+ /* Initialize registers according to platform data */
+ max8997_initialize_device(info);
+
+ /* Initial device detection */
+ max8997_muic_detect_dev(info);
+
+ return ret;
+
+err_irq:
+err_pdata:
+ kfree(info);
+err_kfree:
+ return ret;
+}
+
+static int __devexit max8997_muic_remove(struct platform_device *pdev)
+{
+ struct max8997_muic_info *info = platform_get_drvdata(pdev);
+ struct max8997_platform_data *pdata =
+ dev_get_platdata(info->iodev->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(muic_irqs); i++)
+ free_irq(pdata->irq_base + muic_irqs[i].irq, info);
+ cancel_work_sync(&info->irq_work);
+
+ kfree(info);
+
+ return 0;
+}
+
+static struct platform_driver max8997_muic_driver = {
+ .driver = {
+ .name = "max8997-muic",
+ .owner = THIS_MODULE,
+ },
+ .probe = max8997_muic_probe,
+ .remove = __devexit_p(max8997_muic_remove),
+};
+
+static int __init max8997_muic_init(void)
+{
+ return platform_driver_register(&max8997_muic_driver);
+}
+module_init(max8997_muic_init);
+
+static void __exit max8997_muic_exit(void)
+{
+ platform_driver_unregister(&max8997_muic_driver);
+}
+module_exit(max8997_muic_exit);
+
+MODULE_DESCRIPTION("Maxim MAX8997 MUIC driver");
+MODULE_AUTHOR("Donggeun Kim <dg77.kim@samsung.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
index 7768b87..950dbe9 100644
--- a/drivers/misc/sgi-gru/gruprocfs.c
+++ b/drivers/misc/sgi-gru/gruprocfs.c
@@ -324,7 +324,7 @@ static const struct file_operations gru_fops = {
static struct proc_entry {
char *name;
- int mode;
+ umode_t mode;
const struct file_operations *fops;
struct proc_dir_entry *entry;
} proc_files[] = {
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 42f0673..3fac67a 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -576,7 +576,7 @@ xpnet_init(void)
* report an error if the data is not retrievable and the
* packet will be dropped.
*/
- xpnet_device->features = NETIF_F_NO_CSUM;
+ xpnet_device->features = NETIF_F_HW_CSUM;
result = register_netdev(xpnet_device);
if (result != 0) {
diff --git a/drivers/misc/ti-st/st_core.c b/drivers/misc/ti-st/st_core.c
index ba168a7..2b62232 100644
--- a/drivers/misc/ti-st/st_core.c
+++ b/drivers/misc/ti-st/st_core.c
@@ -137,6 +137,8 @@ void st_send_frame(unsigned char chnl_id, struct st_data_s *st_gdata)
* st_reg_complete -
* to call registration complete callbacks
* of all protocol stack drivers
+ * This function is being called with spin lock held, protocol drivers are
+ * only expected to complete their waits and do nothing more than that.
*/
void st_reg_complete(struct st_data_s *st_gdata, char err)
{
@@ -538,11 +540,12 @@ long st_register(struct st_proto_s *new_proto)
set_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_kim_recv;
+ /* enable the ST LL - to set default chip state */
+ st_ll_enable(st_gdata);
+
/* release lock previously held - re-locked below */
spin_unlock_irqrestore(&st_gdata->lock, flags);
- /* enable the ST LL - to set default chip state */
- st_ll_enable(st_gdata);
/* this may take a while to complete
* since it involves BT fw download
*/
@@ -553,10 +556,13 @@ long st_register(struct st_proto_s *new_proto)
(test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_err(" KIM failure complete callback ");
st_reg_complete(st_gdata, err);
+ clear_bit(ST_REG_PENDING, &st_gdata->st_state);
}
return -EINVAL;
}
+ spin_lock_irqsave(&st_gdata->lock, flags);
+
clear_bit(ST_REG_IN_PROGRESS, &st_gdata->st_state);
st_recv = st_int_recv;
@@ -576,10 +582,10 @@ long st_register(struct st_proto_s *new_proto)
if (st_gdata->is_registered[new_proto->chnl_id] == true) {
pr_err(" proto %d already registered ",
new_proto->chnl_id);
+ spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EALREADY;
}
- spin_lock_irqsave(&st_gdata->lock, flags);
add_channel_to_table(st_gdata, new_proto);
st_gdata->protos_registered++;
new_proto->write = st_write;
@@ -619,7 +625,7 @@ long st_unregister(struct st_proto_s *proto)
spin_lock_irqsave(&st_gdata->lock, flags);
- if (st_gdata->list[proto->chnl_id] == NULL) {
+ if (st_gdata->is_registered[proto->chnl_id] == false) {
pr_err(" chnl_id %d not registered", proto->chnl_id);
spin_unlock_irqrestore(&st_gdata->lock, flags);
return -EPROTONOSUPPORT;
@@ -629,6 +635,10 @@ long st_unregister(struct st_proto_s *proto)
remove_channel_from_table(st_gdata, proto);
spin_unlock_irqrestore(&st_gdata->lock, flags);
+ /* paranoid check */
+ if (st_gdata->protos_registered < ST_EMPTY)
+ st_gdata->protos_registered = ST_EMPTY;
+
if ((st_gdata->protos_registered == ST_EMPTY) &&
(!test_bit(ST_REG_PENDING, &st_gdata->st_state))) {
pr_info(" all chnl_ids unregistered ");
diff --git a/drivers/misc/ti-st/st_kim.c b/drivers/misc/ti-st/st_kim.c
index 43ef8d1..a7a861c 100644
--- a/drivers/misc/ti-st/st_kim.c
+++ b/drivers/misc/ti-st/st_kim.c
@@ -469,37 +469,21 @@ long st_kim_start(void *kim_data)
/* wait for ldisc to be installed */
err = wait_for_completion_timeout(&kim_gdata->ldisc_installed,
msecs_to_jiffies(LDISC_TIME));
- if (!err) { /* timeout */
- pr_err("line disc installation timed out ");
- kim_gdata->ldisc_install = 0;
- pr_info("ldisc_install = 0");
- sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
- NULL, "install");
- /* the following wait is never going to be completed,
- * since the ldisc was never installed, hence serving
- * as a mdelay of LDISC_TIME msecs */
- err = wait_for_completion_timeout
- (&kim_gdata->ldisc_installed,
- msecs_to_jiffies(LDISC_TIME));
- err = -ETIMEDOUT;
+ if (!err) {
+ /* ldisc installation timeout,
+ * flush uart, power cycle BT_EN */
+ pr_err("ldisc installation timeout");
+ err = st_kim_stop(kim_gdata);
continue;
} else {
/* ldisc installed now */
- pr_info(" line discipline installed ");
+ pr_info("line discipline installed");
err = download_firmware(kim_gdata);
if (err != 0) {
+ /* ldisc installed but fw download failed,
+ * flush uart & power cycle BT_EN */
pr_err("download firmware failed");
- kim_gdata->ldisc_install = 0;
- pr_info("ldisc_install = 0");
- sysfs_notify(&kim_gdata->kim_pdev->dev.kobj,
- NULL, "install");
- /* this wait might be completed, though in the
- * tty_close() since the ldisc is already
- * installed */
- err = wait_for_completion_timeout
- (&kim_gdata->ldisc_installed,
- msecs_to_jiffies(LDISC_TIME));
- err = -EINVAL;
+ err = st_kim_stop(kim_gdata);
continue;
} else { /* on success don't retry */
break;
@@ -510,8 +494,14 @@ long st_kim_start(void *kim_data)
}
/**
- * st_kim_stop - called from ST Core, on the last un-registration
- * toggle low the chip enable gpio
+ * st_kim_stop - stop communication with chip.
+ * This can be called from ST Core/KIM, on the-
+ * (a) last un-register when chip need not be powered there-after,
+ * (b) upon failure to either install ldisc or download firmware.
+ * The function is responsible to (a) notify UIM about un-installation,
+ * (b) flush UART if the ldisc was installed.
+ * (c) reset BT_EN - pull down nshutdown at the end.
+ * (d) invoke platform's chip disabling routine.
*/
long st_kim_stop(void *kim_data)
{
@@ -519,12 +509,16 @@ long st_kim_stop(void *kim_data)
struct kim_data_s *kim_gdata = (struct kim_data_s *)kim_data;
struct ti_st_plat_data *pdata =
kim_gdata->kim_pdev->dev.platform_data;
+ struct tty_struct *tty = kim_gdata->core_data->tty;
INIT_COMPLETION(kim_gdata->ldisc_installed);
- /* Flush any pending characters in the driver and discipline. */
- tty_ldisc_flush(kim_gdata->core_data->tty);
- tty_driver_flush_buffer(kim_gdata->core_data->tty);
+ if (tty) { /* can be called before ldisc is installed */
+ /* Flush any pending characters in the driver and discipline. */
+ tty_ldisc_flush(tty);
+ tty_driver_flush_buffer(tty);
+ tty->ops->flush_buffer(tty);
+ }
/* send uninstall notification to UIM */
pr_info("ldisc_install = 0");
@@ -579,6 +573,28 @@ static ssize_t show_install(struct device *dev,
return sprintf(buf, "%d\n", kim_data->ldisc_install);
}
+#ifdef DEBUG
+static ssize_t store_dev_name(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing dev name >%s<", buf);
+ strncpy(kim_data->dev_name, buf, count);
+ pr_debug("stored dev name >%s<", kim_data->dev_name);
+ return count;
+}
+
+static ssize_t store_baud_rate(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct kim_data_s *kim_data = dev_get_drvdata(dev);
+ pr_debug("storing baud rate >%s<", buf);
+ sscanf(buf, "%ld", &kim_data->baud_rate);
+ pr_debug("stored baud rate >%ld<", kim_data->baud_rate);
+ return count;
+}
+#endif /* if DEBUG */
+
static ssize_t show_dev_name(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -605,10 +621,18 @@ static struct kobj_attribute ldisc_install =
__ATTR(install, 0444, (void *)show_install, NULL);
static struct kobj_attribute uart_dev_name =
+#ifdef DEBUG /* TODO: move this to debug-fs if possible */
+__ATTR(dev_name, 0644, (void *)show_dev_name, (void *)store_dev_name);
+#else
__ATTR(dev_name, 0444, (void *)show_dev_name, NULL);
+#endif
static struct kobj_attribute uart_baud_rate =
+#ifdef DEBUG /* TODO: move to debugfs */
+__ATTR(baud_rate, 0644, (void *)show_baud_rate, (void *)store_baud_rate);
+#else
__ATTR(baud_rate, 0444, (void *)show_baud_rate, NULL);
+#endif
static struct kobj_attribute uart_flow_cntrl =
__ATTR(flow_cntrl, 0444, (void *)show_flow_cntrl, NULL);
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index cd41d40..cb56e27 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -314,7 +314,7 @@ static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
* fear that guest will need it. Host may reject some pages, we need to
* check the return value and maybe submit a different page.
*/
-static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
+static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
unsigned int *hv_status)
{
unsigned long status, dummy;
@@ -322,17 +322,17 @@ static bool vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
pfn32 = (u32)pfn;
if (pfn32 != pfn)
- return false;
+ return -1;
STATS_INC(b->stats.lock);
*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy);
if (vmballoon_check_status(b, status))
- return true;
+ return 0;
pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
STATS_INC(b->stats.lock_fail);
- return false;
+ return 1;
}
/*
@@ -411,7 +411,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
struct page *page;
gfp_t flags;
unsigned int hv_status;
- bool locked = false;
+ int locked;
flags = can_sleep ? VMW_PAGE_ALLOC_CANSLEEP : VMW_PAGE_ALLOC_NOSLEEP;
do {
@@ -431,7 +431,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
/* inform monitor */
locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status);
- if (!locked) {
+ if (locked > 0) {
STATS_INC(b->stats.refused_alloc);
if (hv_status == VMW_BALLOON_ERROR_RESET ||
@@ -449,7 +449,7 @@ static int vmballoon_reserve_page(struct vmballoon *b, bool can_sleep)
if (++b->n_refused_pages >= VMW_BALLOON_MAX_REFUSED)
return -EIO;
}
- } while (!locked);
+ } while (locked != 0);
/* track allocated page */
list_add(&page->lru, &b->pages);
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile
index 12eef39..400756e 100644
--- a/drivers/mmc/Makefile
+++ b/drivers/mmc/Makefile
@@ -6,5 +6,4 @@ subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG
obj-$(CONFIG_MMC) += core/
obj-$(CONFIG_MMC) += card/
-obj-$(CONFIG_MMC) += host/
-
+obj-$(subst m,y,$(CONFIG_MMC)) += host/
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 1e0e27c..c6a383d 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -107,6 +107,8 @@ struct mmc_blk_data {
*/
unsigned int part_curr;
struct device_attribute force_ro;
+ struct device_attribute power_ro_lock;
+ int area_type;
};
static DEFINE_MUTEX(open_lock);
@@ -119,6 +121,7 @@ enum mmc_blk_status {
MMC_BLK_ABORT,
MMC_BLK_DATA_ERR,
MMC_BLK_ECC_ERR,
+ MMC_BLK_NOMEDIUM,
};
module_param(perdev_minors, int, 0444);
@@ -165,6 +168,70 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
+static ssize_t power_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ int locked = 0;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ locked = 2;
+ else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ locked = 1;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
+ return ret;
+}
+
+static ssize_t power_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md, *part_md;
+ struct mmc_card *card;
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set))
+ return -EINVAL;
+
+ if (set != 1)
+ return count;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ card = md->queue.card;
+
+ mmc_claim_host(card->host);
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+ card->ext_csd.boot_ro_lock |
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
+ else
+ card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+
+ mmc_release_host(card->host);
+
+ if (!ret) {
+ pr_info("%s: Locking boot partition ro until next power on\n",
+ md->disk->disk_name);
+ set_disk_ro(md->disk, 1);
+
+ list_for_each_entry(part_md, &md->part, part)
+ if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+ pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
+ set_disk_ro(part_md->disk, 1);
+ }
+ }
+
+ mmc_blk_put(md);
+ return count;
+}
+
static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -266,6 +333,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
goto idata_err;
}
+ if (!idata->buf_bytes)
+ return idata;
+
idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
if (!idata->buf) {
err = -ENOMEM;
@@ -312,25 +382,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
if (IS_ERR(idata))
return PTR_ERR(idata);
- cmd.opcode = idata->ic.opcode;
- cmd.arg = idata->ic.arg;
- cmd.flags = idata->ic.flags;
-
- data.sg = &sg;
- data.sg_len = 1;
- data.blksz = idata->ic.blksz;
- data.blocks = idata->ic.blocks;
-
- sg_init_one(data.sg, idata->buf, idata->buf_bytes);
-
- if (idata->ic.write_flag)
- data.flags = MMC_DATA_WRITE;
- else
- data.flags = MMC_DATA_READ;
-
- mrq.cmd = &cmd;
- mrq.data = &data;
-
md = mmc_blk_get(bdev->bd_disk);
if (!md) {
err = -EINVAL;
@@ -343,6 +394,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_done;
}
+ cmd.opcode = idata->ic.opcode;
+ cmd.arg = idata->ic.arg;
+ cmd.flags = idata->ic.flags;
+
+ if (idata->buf_bytes) {
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = idata->ic.blksz;
+ data.blocks = idata->ic.blocks;
+
+ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+ if (idata->ic.write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+
+ /* Allow overriding the timeout_ns for empirical tuning. */
+ if (idata->ic.data_timeout_ns)
+ data.timeout_ns = idata->ic.data_timeout_ns;
+
+ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Pretend this is a data transfer and rely on the
+ * host driver to compute timeout. When all host
+ * drivers support cmd.cmd_timeout for R1B, this
+ * can be changed to:
+ *
+ * mrq.data = NULL;
+ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+ */
+ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+ }
+
+ mrq.data = &data;
+ }
+
+ mrq.cmd = &cmd;
+
mmc_claim_host(card->host);
if (idata->ic.is_acmd) {
@@ -351,24 +444,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_rel_host;
}
- /* data.flags must already be set before doing this. */
- mmc_set_data_timeout(&data, card);
- /* Allow overriding the timeout_ns for empirical tuning. */
- if (idata->ic.data_timeout_ns)
- data.timeout_ns = idata->ic.data_timeout_ns;
-
- if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
- /*
- * Pretend this is a data transfer and rely on the host driver
- * to compute timeout. When all host drivers support
- * cmd.cmd_timeout for R1B, this can be changed to:
- *
- * mrq.data = NULL;
- * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
- */
- data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
- }
-
mmc_wait_for_req(card->host, &mrq);
if (cmd.error) {
@@ -565,6 +640,7 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
return err;
}
+#define ERR_NOMEDIUM 3
#define ERR_RETRY 2
#define ERR_ABORT 1
#define ERR_CONTINUE 0
@@ -632,6 +708,9 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
u32 status, stop_status = 0;
int err, retry;
+ if (mmc_card_removed(card))
+ return ERR_NOMEDIUM;
+
/*
* Try to get card status which indicates both the card state
* and why there was no response. If the first attempt fails,
@@ -648,8 +727,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
}
/* We couldn't get a response from the card. Give up. */
- if (err)
+ if (err) {
+ /* Check if the card is removed */
+ if (mmc_detect_card_removed(card->host))
+ return ERR_NOMEDIUM;
return ERR_ABORT;
+ }
/* Flag ECC errors */
if ((status & R1_CARD_ECC_FAILED) ||
@@ -922,6 +1005,8 @@ static int mmc_blk_err_check(struct mmc_card *card,
return MMC_BLK_RETRY;
case ERR_ABORT:
return MMC_BLK_ABORT;
+ case ERR_NOMEDIUM:
+ return MMC_BLK_NOMEDIUM;
case ERR_CONTINUE:
break;
}
@@ -1255,6 +1340,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
if (!ret)
goto start_new_req;
break;
+ case MMC_BLK_NOMEDIUM:
+ goto cmd_abort;
}
if (ret) {
@@ -1271,6 +1358,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
cmd_abort:
spin_lock_irq(&md->lock);
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
while (ret)
ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
spin_unlock_irq(&md->lock);
@@ -1339,7 +1428,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
struct device *parent,
sector_t size,
bool default_ro,
- const char *subname)
+ const char *subname,
+ int area_type)
{
struct mmc_blk_data *md;
int devidx, ret;
@@ -1364,11 +1454,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
if (!subname) {
md->name_idx = find_first_zero_bit(name_use, max_devices);
__set_bit(md->name_idx, name_use);
- }
- else
+ } else
md->name_idx = ((struct mmc_blk_data *)
dev_to_disk(parent)->private_data)->name_idx;
+ md->area_type = area_type;
+
/*
* Set the read-only status based on the supported commands
* and the write protect switch.
@@ -1462,7 +1553,8 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
size = card->csd.capacity << (card->csd.read_blkbits - 9);
}
- md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL);
+ md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+ MMC_BLK_DATA_AREA_MAIN);
return md;
}
@@ -1471,13 +1563,14 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
unsigned int part_type,
sector_t size,
bool default_ro,
- const char *subname)
+ const char *subname,
+ int area_type)
{
char cap_str[10];
struct mmc_blk_data *part_md;
part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
- subname);
+ subname, area_type);
if (IS_ERR(part_md))
return PTR_ERR(part_md);
part_md->part_type = part_type;
@@ -1510,7 +1603,8 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
card->part[idx].part_cfg,
card->part[idx].size >> 9,
card->part[idx].force_ro,
- card->part[idx].name);
+ card->part[idx].name,
+ card->part[idx].area_type);
if (ret)
return ret;
}
@@ -1539,9 +1633,16 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
static void mmc_blk_remove_req(struct mmc_blk_data *md)
{
+ struct mmc_card *card;
+
if (md) {
+ card = md->queue.card;
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable)
+ device_remove_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
/* Stop new requests from getting into the queue */
del_gendisk(md->disk);
@@ -1570,6 +1671,7 @@ static void mmc_blk_remove_parts(struct mmc_card *card,
static int mmc_add_disk(struct mmc_blk_data *md)
{
int ret;
+ struct mmc_card *card = md->queue.card;
add_disk(md->disk);
md->force_ro.show = force_ro_show;
@@ -1579,18 +1681,54 @@ static int mmc_add_disk(struct mmc_blk_data *md)
md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
if (ret)
- del_gendisk(md->disk);
+ goto force_ro_fail;
+
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable) {
+ mode_t mode;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
+ mode = S_IRUGO;
+ else
+ mode = S_IRUGO | S_IWUSR;
+
+ md->power_ro_lock.show = power_ro_lock_show;
+ md->power_ro_lock.store = power_ro_lock_store;
+ sysfs_attr_init(&md->power_ro_lock.attr);
+ md->power_ro_lock.attr.mode = mode;
+ md->power_ro_lock.attr.name =
+ "ro_lock_until_next_power_on";
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+ if (ret)
+ goto power_ro_lock_fail;
+ }
+ return ret;
+
+power_ro_lock_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+ del_gendisk(md->disk);
return ret;
}
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+
static const struct mmc_fixup blk_fixups[] =
{
- MMC_FIXUP("SEM02G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM04G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM08G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM16G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
- MMC_FIXUP("SEM32G", 0x2, 0x100, add_quirk, MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
/*
* Some MMC cards experience performance degradation with CMD23
@@ -1600,18 +1738,18 @@ static const struct mmc_fixup blk_fixups[] =
*
* N.B. This doesn't affect SD cards.
*/
- MMC_FIXUP("MMC08G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC16G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
- MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
/*
* Some Micron MMC cards needs longer data read timeout than
* indicated in CSD.
*/
- MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
MMC_QUIRK_LONG_READ_TIME),
END_FIXUP
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index b038c4a..759714e 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -1581,6 +1581,7 @@ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
+ t->max_seg_sz -= t->max_seg_sz % 512;
t->max_tfr = t->max_sz;
if (t->max_tfr >> 9 > test->card->host->max_blk_count)
@@ -2949,7 +2950,7 @@ static void mmc_test_free_dbgfs_file(struct mmc_card *card)
}
static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
- const char *name, mode_t mode, const struct file_operations *fops)
+ const char *name, umode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index dcad59c..2517547 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -29,6 +29,8 @@
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
+ struct mmc_queue *mq = q->queuedata;
+
/*
* We only like normal block requests and discards.
*/
@@ -37,6 +39,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
+ if (mq && mmc_card_removed(mq->card))
+ return BLKPREP_KILL;
+
req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 6395019..dca4428 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -7,6 +7,6 @@ mmc_core-y := core.o bus.o host.o \
mmc.o mmc_ops.o sd.o sd_ops.o \
sdio.o sdio_ops.o sdio_bus.o \
sdio_cis.o sdio_io.o sdio_irq.o \
- quirks.o
+ quirks.o cd-gpio.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 6be4924..5d011a3 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -303,10 +303,11 @@ int mmc_add_card(struct mmc_card *card)
mmc_card_ddr_mode(card) ? "DDR " : "",
type);
} else {
- printk(KERN_INFO "%s: new %s%s%s card at address %04x\n",
+ pr_info("%s: new %s%s%s%s card at address %04x\n",
mmc_hostname(card->host),
- mmc_sd_card_uhs(card) ? "ultra high speed " :
+ mmc_card_uhs(card) ? "ultra high speed " :
(mmc_card_highspeed(card) ? "high speed " : ""),
+ (mmc_card_hs200(card) ? "HS200 " : ""),
mmc_card_ddr_mode(card) ? "DDR " : "",
type, card->rca);
}
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
new file mode 100644
index 0000000..082202a
--- /dev/null
+++ b/drivers/mmc/core/cd-gpio.c
@@ -0,0 +1,74 @@
+/*
+ * Generic GPIO card-detect helper
+ *
+ * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/mmc/host.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+struct mmc_cd_gpio {
+ unsigned int gpio;
+ char label[0];
+};
+
+static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id)
+{
+ /* Schedule a card detection after a debounce timeout */
+ mmc_detect_change(dev_id, msecs_to_jiffies(100));
+ return IRQ_HANDLED;
+}
+
+int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio,
+ unsigned int irq, unsigned long flags)
+{
+ size_t len = strlen(dev_name(host->parent)) + 4;
+ struct mmc_cd_gpio *cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL);
+ int ret;
+
+ if (!cd)
+ return -ENOMEM;
+
+ snprintf(cd->label, len, "%s cd", dev_name(host->parent));
+
+ ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label);
+ if (ret < 0)
+ goto egpioreq;
+
+ ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
+ flags, cd->label, host);
+ if (ret < 0)
+ goto eirqreq;
+
+ cd->gpio = gpio;
+ host->hotplug.irq = irq;
+ host->hotplug.handler_priv = cd;
+
+ return 0;
+
+eirqreq:
+ gpio_free(gpio);
+egpioreq:
+ kfree(cd);
+ return ret;
+}
+EXPORT_SYMBOL(mmc_cd_gpio_request);
+
+void mmc_cd_gpio_free(struct mmc_host *host)
+{
+ struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
+
+ free_irq(host->hotplug.irq, host);
+ gpio_free(cd->gpio);
+ kfree(cd);
+}
+EXPORT_SYMBOL(mmc_cd_gpio_free);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 950b97d..132378b 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -48,7 +48,7 @@ static struct workqueue_struct *workqueue;
* performance cost, and for other reasons may not always be desired.
* So we allow it it to be disabled.
*/
-int use_spi_crc = 1;
+bool use_spi_crc = 1;
module_param(use_spi_crc, bool, 0);
/*
@@ -58,9 +58,9 @@ module_param(use_spi_crc, bool, 0);
* overridden if necessary.
*/
#ifdef CONFIG_MMC_UNSAFE_RESUME
-int mmc_assume_removable;
+bool mmc_assume_removable;
#else
-int mmc_assume_removable = 1;
+bool mmc_assume_removable = 1;
#endif
EXPORT_SYMBOL(mmc_assume_removable);
module_param_named(removable, mmc_assume_removable, bool, 0644);
@@ -140,7 +140,7 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
cmd->retries = 0;
}
- if (err && cmd->retries) {
+ if (err && cmd->retries && !mmc_card_removed(host->card)) {
/*
* Request starter must handle retries - see
* mmc_wait_for_req_done().
@@ -247,6 +247,11 @@ static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
+ if (mmc_card_removed(host->card)) {
+ mrq->cmd->error = -ENOMEDIUM;
+ complete(&mrq->completion);
+ return;
+ }
mmc_start_request(host, mrq);
}
@@ -259,7 +264,8 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
wait_for_completion(&mrq->completion);
cmd = mrq->cmd;
- if (!cmd->error || !cmd->retries)
+ if (!cmd->error || !cmd->retries ||
+ mmc_card_removed(host->card))
break;
pr_debug("%s: req failed (CMD%u): %d, retrying...\n",
@@ -284,8 +290,11 @@ static void mmc_wait_for_req_done(struct mmc_host *host,
static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
bool is_first_req)
{
- if (host->ops->pre_req)
+ if (host->ops->pre_req) {
+ mmc_host_clk_hold(host);
host->ops->pre_req(host, mrq, is_first_req);
+ mmc_host_clk_release(host);
+ }
}
/**
@@ -300,8 +309,11 @@ static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq,
static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq,
int err)
{
- if (host->ops->post_req)
+ if (host->ops->post_req) {
+ mmc_host_clk_hold(host);
host->ops->post_req(host, mrq, err);
+ mmc_host_clk_release(host);
+ }
}
/**
@@ -614,7 +626,9 @@ int mmc_host_enable(struct mmc_host *host)
int err;
host->en_dis_recurs = 1;
+ mmc_host_clk_hold(host);
err = host->ops->enable(host);
+ mmc_host_clk_release(host);
host->en_dis_recurs = 0;
if (err) {
@@ -634,7 +648,9 @@ static int mmc_host_do_disable(struct mmc_host *host, int lazy)
int err;
host->en_dis_recurs = 1;
+ mmc_host_clk_hold(host);
err = host->ops->disable(host, lazy);
+ mmc_host_clk_release(host);
host->en_dis_recurs = 0;
if (err < 0) {
@@ -1115,6 +1131,10 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,
* might not allow this operation
*/
voltage = regulator_get_voltage(supply);
+
+ if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE)
+ min_uV = max_uV = voltage;
+
if (voltage < 0)
result = voltage;
else if (voltage < min_uV || voltage > max_uV)
@@ -1197,8 +1217,11 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11
host->ios.signal_voltage = signal_voltage;
- if (host->ops->start_signal_voltage_switch)
+ if (host->ops->start_signal_voltage_switch) {
+ mmc_host_clk_hold(host);
err = host->ops->start_signal_voltage_switch(host, &host->ios);
+ mmc_host_clk_release(host);
+ }
return err;
}
@@ -1233,6 +1256,7 @@ static void mmc_poweroff_notify(struct mmc_host *host)
int err = 0;
card = host->card;
+ mmc_claim_host(host);
/*
* Send power notify command only if card
@@ -1263,6 +1287,7 @@ static void mmc_poweroff_notify(struct mmc_host *host)
/* Set the card state to no notification after the poweroff */
card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
}
+ mmc_release_host(host);
}
/*
@@ -1321,12 +1346,28 @@ static void mmc_power_up(struct mmc_host *host)
void mmc_power_off(struct mmc_host *host)
{
+ int err = 0;
mmc_host_clk_hold(host);
host->ios.clock = 0;
host->ios.vdd = 0;
- mmc_poweroff_notify(host);
+ /*
+ * For eMMC 4.5 device send AWAKE command before
+ * POWER_OFF_NOTIFY command, because in sleep state
+ * eMMC 4.5 devices respond to only RESET and AWAKE cmd
+ */
+ if (host->card && mmc_card_is_sleep(host->card) &&
+ host->bus_ops->resume) {
+ err = host->bus_ops->resume(host);
+
+ if (!err)
+ mmc_poweroff_notify(host);
+ else
+ pr_warning("%s: error %d during resume "
+ "(continue with poweroff sequence)\n",
+ mmc_hostname(host), err);
+ }
/*
* Reset ocr mask to be the highest possible voltage supported for
@@ -1456,7 +1497,7 @@ void mmc_detect_change(struct mmc_host *host, unsigned long delay)
WARN_ON(host->removed);
spin_unlock_irqrestore(&host->lock, flags);
#endif
-
+ host->detect_change = 1;
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -2027,6 +2068,9 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
*/
mmc_hw_reset_for_init(host);
+ /* Initialization should be done at 3.3 V I/O voltage. */
+ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
/*
* sdio_reset sends CMD52 to reset card. Since we do not know
* if the card is being re-initialized, just send it. CMD52
@@ -2049,6 +2093,43 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
return -EIO;
}
+int _mmc_detect_card_removed(struct mmc_host *host)
+{
+ int ret;
+
+ if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive)
+ return 0;
+
+ if (!host->card || mmc_card_removed(host->card))
+ return 1;
+
+ ret = host->bus_ops->alive(host);
+ if (ret) {
+ mmc_card_set_removed(host->card);
+ pr_debug("%s: card remove detected\n", mmc_hostname(host));
+ }
+
+ return ret;
+}
+
+int mmc_detect_card_removed(struct mmc_host *host)
+{
+ struct mmc_card *card = host->card;
+
+ WARN_ON(!host->claimed);
+ /*
+ * The card will be considered unchanged unless we have been asked to
+ * detect a change or host requires polling to provide card detection.
+ */
+ if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
+ return mmc_card_removed(card);
+
+ host->detect_change = 0;
+
+ return _mmc_detect_card_removed(host);
+}
+EXPORT_SYMBOL(mmc_detect_card_removed);
+
void mmc_rescan(struct work_struct *work)
{
static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
@@ -2069,6 +2150,8 @@ void mmc_rescan(struct work_struct *work)
&& !(host->caps & MMC_CAP_NONREMOVABLE))
host->bus_ops->detect(host);
+ host->detect_change = 0;
+
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
* the card is no longer present.
@@ -2130,6 +2213,7 @@ void mmc_stop_host(struct mmc_host *host)
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
+ /* Calling bus_ops->remove() with a claimed host can deadlock */
if (host->bus_ops->remove)
host->bus_ops->remove(host);
@@ -2201,6 +2285,9 @@ int mmc_card_awake(struct mmc_host *host)
{
int err = -ENOSYS;
+ if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
+ return 0;
+
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
@@ -2216,6 +2303,9 @@ int mmc_card_sleep(struct mmc_host *host)
{
int err = -ENOSYS;
+ if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD)
+ return 0;
+
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
@@ -2270,6 +2360,7 @@ EXPORT_SYMBOL(mmc_flush_cache);
int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
{
struct mmc_card *card = host->card;
+ unsigned int timeout;
int err = 0;
if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) ||
@@ -2280,16 +2371,18 @@ int mmc_cache_ctrl(struct mmc_host *host, u8 enable)
(card->ext_csd.cache_size > 0)) {
enable = !!enable;
- if (card->ext_csd.cache_ctrl ^ enable)
+ if (card->ext_csd.cache_ctrl ^ enable) {
+ timeout = enable ? card->ext_csd.generic_cmd6_time : 0;
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, enable, 0);
- if (err)
- pr_err("%s: cache %s error %d\n",
- mmc_hostname(card->host),
- enable ? "on" : "off",
- err);
- else
- card->ext_csd.cache_ctrl = enable;
+ EXT_CSD_CACHE_CTRL, enable, timeout);
+ if (err)
+ pr_err("%s: cache %s error %d\n",
+ mmc_hostname(card->host),
+ enable ? "on" : "off",
+ err);
+ else
+ card->ext_csd.cache_ctrl = enable;
+ }
}
return err;
@@ -2310,7 +2403,13 @@ int mmc_suspend_host(struct mmc_host *host)
cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
- err = mmc_cache_ctrl(host, 0);
+ if (mmc_try_claim_host(host)) {
+ err = mmc_cache_ctrl(host, 0);
+ mmc_do_release_host(host);
+ } else {
+ err = -EBUSY;
+ }
+
if (err)
goto out;
@@ -2325,12 +2424,6 @@ int mmc_suspend_host(struct mmc_host *host)
*/
if (mmc_try_claim_host(host)) {
if (host->bus_ops->suspend) {
- /*
- * For eMMC 4.5 device send notify command
- * before sleep, because in sleep state eMMC 4.5
- * devices respond to only RESET and AWAKE cmd
- */
- mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
}
mmc_do_release_host(host);
@@ -2338,7 +2431,9 @@ int mmc_suspend_host(struct mmc_host *host)
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
- * It will be redetected on resume.
+ * It will be redetected on resume. (Calling
+ * bus_ops->remove() with a claimed host can
+ * deadlock.)
*/
if (host->bus_ops->remove)
host->bus_ops->remove(host);
@@ -2431,11 +2526,11 @@ int mmc_pm_notify(struct notifier_block *notify_block,
if (!host->bus_ops || host->bus_ops->suspend)
break;
- mmc_claim_host(host);
-
+ /* Calling bus_ops->remove() with a claimed host can deadlock */
if (host->bus_ops->remove)
host->bus_ops->remove(host);
+ mmc_claim_host(host);
mmc_detach_bus(host);
mmc_power_off(host);
mmc_release_host(host);
diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h
index 14664f1..3bdafbc 100644
--- a/drivers/mmc/core/core.h
+++ b/drivers/mmc/core/core.h
@@ -24,6 +24,7 @@ struct mmc_bus_ops {
int (*resume)(struct mmc_host *);
int (*power_save)(struct mmc_host *);
int (*power_restore)(struct mmc_host *);
+ int (*alive)(struct mmc_host *);
};
void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops);
@@ -59,12 +60,14 @@ void mmc_rescan(struct work_struct *work);
void mmc_start_host(struct mmc_host *host);
void mmc_stop_host(struct mmc_host *host);
+int _mmc_detect_card_removed(struct mmc_host *host);
+
int mmc_attach_mmc(struct mmc_host *host);
int mmc_attach_sd(struct mmc_host *host);
int mmc_attach_sdio(struct mmc_host *host);
/* Module parameters */
-extern int use_spi_crc;
+extern bool use_spi_crc;
/* Debugfs information for hosts and cards */
void mmc_add_host_debugfs(struct mmc_host *host);
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 3923880..9ab5b17 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -57,6 +57,8 @@ static int mmc_ios_show(struct seq_file *s, void *data)
const char *str;
seq_printf(s, "clock:\t\t%u Hz\n", ios->clock);
+ if (host->actual_clock)
+ seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock);
seq_printf(s, "vdd:\t\t%u ", ios->vdd);
if ((1 << ios->vdd) & MMC_VDD_165_195)
seq_printf(s, "(1.65 - 1.95 V)\n");
@@ -133,6 +135,9 @@ static int mmc_ios_show(struct seq_file *s, void *data)
case MMC_TIMING_UHS_DDR50:
str = "sd uhs DDR50";
break;
+ case MMC_TIMING_MMC_HS200:
+ str = "mmc high-speed SDR200";
+ break;
default:
str = "invalid";
break;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index d31c78b..c3704e2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -54,6 +54,27 @@ static DEFINE_IDR(mmc_host_idr);
static DEFINE_SPINLOCK(mmc_host_lock);
#ifdef CONFIG_MMC_CLKGATE
+static ssize_t clkgate_delay_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay);
+}
+
+static ssize_t clkgate_delay_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ unsigned long flags, value;
+
+ if (kstrtoul(buf, 0, &value))
+ return -EINVAL;
+
+ spin_lock_irqsave(&host->clk_lock, flags);
+ host->clkgate_delay = value;
+ spin_unlock_irqrestore(&host->clk_lock, flags);
+ return count;
+}
/*
* Enabling clock gating will make the core call out to the host
@@ -114,7 +135,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
static void mmc_host_clk_gate_work(struct work_struct *work)
{
struct mmc_host *host = container_of(work, struct mmc_host,
- clk_gate_work);
+ clk_gate_work.work);
mmc_host_clk_gate_delayed(host);
}
@@ -131,6 +152,8 @@ void mmc_host_clk_hold(struct mmc_host *host)
{
unsigned long flags;
+ /* cancel any clock gating work scheduled by mmc_host_clk_release() */
+ cancel_delayed_work_sync(&host->clk_gate_work);
mutex_lock(&host->clk_gate_mutex);
spin_lock_irqsave(&host->clk_lock, flags);
if (host->clk_gated) {
@@ -180,7 +203,8 @@ void mmc_host_clk_release(struct mmc_host *host)
host->clk_requests--;
if (mmc_host_may_gate_card(host->card) &&
!host->clk_requests)
- queue_work(system_nrt_wq, &host->clk_gate_work);
+ queue_delayed_work(system_nrt_wq, &host->clk_gate_work,
+ msecs_to_jiffies(host->clkgate_delay));
spin_unlock_irqrestore(&host->clk_lock, flags);
}
@@ -213,8 +237,13 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
host->clk_requests = 0;
/* Hold MCI clock for 8 cycles by default */
host->clk_delay = 8;
+ /*
+ * Default clock gating delay is 0ms to avoid wasting power.
+ * This value can be tuned by writing into sysfs entry.
+ */
+ host->clkgate_delay = 0;
host->clk_gated = false;
- INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
+ INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
spin_lock_init(&host->clk_lock);
mutex_init(&host->clk_gate_mutex);
}
@@ -229,7 +258,7 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
* Wait for any outstanding gate and then make sure we're
* ungated before exiting.
*/
- if (cancel_work_sync(&host->clk_gate_work))
+ if (cancel_delayed_work_sync(&host->clk_gate_work))
mmc_host_clk_gate_delayed(host);
if (host->clk_gated)
mmc_host_clk_hold(host);
@@ -237,6 +266,17 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
WARN_ON(host->clk_requests > 1);
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+ host->clkgate_delay_attr.show = clkgate_delay_show;
+ host->clkgate_delay_attr.store = clkgate_delay_store;
+ sysfs_attr_init(&host->clkgate_delay_attr.attr);
+ host->clkgate_delay_attr.attr.name = "clkgate_delay";
+ host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (device_create_file(&host->class_dev, &host->clkgate_delay_attr))
+ pr_err("%s: Failed to create clkgate_delay sysfs entry\n",
+ mmc_hostname(host));
+}
#else
static inline void mmc_host_clk_init(struct mmc_host *host)
@@ -247,6 +287,10 @@ static inline void mmc_host_clk_exit(struct mmc_host *host)
{
}
+static inline void mmc_host_clk_sysfs_init(struct mmc_host *host)
+{
+}
+
#endif
/**
@@ -335,6 +379,7 @@ int mmc_add_host(struct mmc_host *host)
#ifdef CONFIG_DEBUG_FS
mmc_add_host_debugfs(host);
#endif
+ mmc_host_clk_sysfs_init(host);
mmc_start_host(host);
register_pm_notifier(&host->pm_notify);
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index fb8a5cd..08a7852 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -14,27 +14,6 @@
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
-
-#ifdef CONFIG_MMC_CLKGATE
-void mmc_host_clk_hold(struct mmc_host *host);
-void mmc_host_clk_release(struct mmc_host *host);
-unsigned int mmc_host_clk_rate(struct mmc_host *host);
-
-#else
-static inline void mmc_host_clk_hold(struct mmc_host *host)
-{
-}
-
-static inline void mmc_host_clk_release(struct mmc_host *host)
-{
-}
-
-static inline unsigned int mmc_host_clk_rate(struct mmc_host *host)
-{
- return host->ios.clock;
-}
-#endif
-
void mmc_host_deeper_disable(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index d240427..2b9ed14 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -286,6 +286,27 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
}
card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) {
+ case EXT_CSD_CARD_TYPE_SDR_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
+ break;
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
+ break;
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
+ case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
+ card->ext_csd.hs_max_dtr = 200000000;
+ card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
+ break;
case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
EXT_CSD_CARD_TYPE_26:
card->ext_csd.hs_max_dtr = 52000000;
@@ -348,13 +369,14 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
mmc_part_add(card, part_size,
EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
- "boot%d", idx, true);
+ "boot%d", idx, true,
+ MMC_BLK_DATA_AREA_BOOT);
}
}
}
card->ext_csd.raw_hc_erase_gap_size =
- ext_csd[EXT_CSD_PARTITION_ATTRIBUTE];
+ ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
card->ext_csd.raw_sec_trim_mult =
ext_csd[EXT_CSD_SEC_TRIM_MULT];
card->ext_csd.raw_sec_erase_mult =
@@ -435,7 +457,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
hc_wp_grp_sz);
mmc_part_add(card, part_size << 19,
EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
- "gp%d", idx, false);
+ "gp%d", idx, false,
+ MMC_BLK_DATA_AREA_GP);
}
}
card->ext_csd.sec_trim_mult =
@@ -446,6 +469,14 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
card->ext_csd.trim_timeout = 300 *
ext_csd[EXT_CSD_TRIM_MULT];
+
+ /*
+ * Note that the call to mmc_part_add above defaults to read
+ * only. If this default assumption is changed, the call must
+ * take into account the value of boot_locked below.
+ */
+ card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
+ card->ext_csd.boot_ro_lockable = true;
}
if (card->ext_csd.rev >= 5) {
@@ -520,7 +551,7 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
goto out;
/* only compare read only fields */
- err = (!(card->ext_csd.raw_partition_support ==
+ err = !((card->ext_csd.raw_partition_support ==
bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
(card->ext_csd.raw_erased_mem_count ==
bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
@@ -690,6 +721,79 @@ static int mmc_select_powerclass(struct mmc_card *card,
}
/*
+ * Selects the desired buswidth and switch to the HS200 mode
+ * if bus width set without error
+ */
+static int mmc_select_hs200(struct mmc_card *card)
+{
+ int idx, err = 0;
+ struct mmc_host *host;
+ static unsigned ext_csd_bits[] = {
+ EXT_CSD_BUS_WIDTH_4,
+ EXT_CSD_BUS_WIDTH_8,
+ };
+ static unsigned bus_widths[] = {
+ MMC_BUS_WIDTH_4,
+ MMC_BUS_WIDTH_8,
+ };
+
+ BUG_ON(!card);
+
+ host = card->host;
+
+ if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
+ host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
+ if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0))
+ err = mmc_set_signal_voltage(host,
+ MMC_SIGNAL_VOLTAGE_180, 0);
+
+ /* If fails try again during next card power cycle */
+ if (err)
+ goto err;
+
+ idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0;
+
+ /*
+ * Unlike SD, MMC cards dont have a configuration register to notify
+ * supported bus width. So bus test command should be run to identify
+ * the supported bus width or compare the ext csd values of current
+ * bus width and ext csd values of 1 bit mode read earlier.
+ */
+ for (; idx >= 0; idx--) {
+
+ /*
+ * Host is capable of 8bit transfer, then switch
+ * the device to work in 8bit transfer mode. If the
+ * mmc switch command returns error then switch to
+ * 4bit transfer mode. On success set the corresponding
+ * bus width on the host.
+ */
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_BUS_WIDTH,
+ ext_csd_bits[idx],
+ card->ext_csd.generic_cmd6_time);
+ if (err)
+ continue;
+
+ mmc_set_bus_width(card->host, bus_widths[idx]);
+
+ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
+ err = mmc_compare_ext_csds(card, bus_widths[idx]);
+ else
+ err = mmc_bus_test(card, bus_widths[idx]);
+ if (!err)
+ break;
+ }
+
+ /* switch to HS200 mode if bus width set successfully */
+ if (!err)
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 2, 0);
+err:
+ return err;
+}
+
+/*
* Handle the detection and initialisation of a card.
*
* In the case of a resume, "oldcard" will contain the card
@@ -712,6 +816,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if (!mmc_host_is_spi(host))
mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
+ /* Initialization should be done at 3.3 V I/O voltage. */
+ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
/*
* Since we're changing the OCR value, we seem to
* need to tell some cards to go back to the idle
@@ -895,11 +1002,16 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
/*
* Activate high speed (if supported)
*/
- if ((card->ext_csd.hs_max_dtr != 0) &&
- (host->caps & MMC_CAP_MMC_HIGHSPEED)) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HS_TIMING, 1,
- card->ext_csd.generic_cmd6_time);
+ if (card->ext_csd.hs_max_dtr != 0) {
+ err = 0;
+ if (card->ext_csd.hs_max_dtr > 52000000 &&
+ host->caps2 & MMC_CAP2_HS200)
+ err = mmc_select_hs200(card);
+ else if (host->caps & MMC_CAP_MMC_HIGHSPEED)
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HS_TIMING, 1,
+ card->ext_csd.generic_cmd6_time);
+
if (err && err != -EBADMSG)
goto free_card;
@@ -908,8 +1020,15 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
mmc_hostname(card->host));
err = 0;
} else {
- mmc_card_set_highspeed(card);
- mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ if (card->ext_csd.hs_max_dtr > 52000000 &&
+ host->caps2 & MMC_CAP2_HS200) {
+ mmc_card_set_hs200(card);
+ mmc_set_timing(card->host,
+ MMC_TIMING_MMC_HS200);
+ } else {
+ mmc_card_set_highspeed(card);
+ mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+ }
}
}
@@ -934,7 +1053,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*/
max_dtr = (unsigned int)-1;
- if (mmc_card_highspeed(card)) {
+ if (mmc_card_highspeed(card) || mmc_card_hs200(card)) {
if (max_dtr > card->ext_csd.hs_max_dtr)
max_dtr = card->ext_csd.hs_max_dtr;
} else if (max_dtr > card->csd.max_dtr) {
@@ -960,9 +1079,48 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Indicate HS200 SDR mode (if supported).
+ */
+ if (mmc_card_hs200(card)) {
+ u32 ext_csd_bits;
+ u32 bus_width = card->host->ios.bus_width;
+
+ /*
+ * For devices supporting HS200 mode, the bus width has
+ * to be set before executing the tuning function. If
+ * set before tuning, then device will respond with CRC
+ * errors for responses on CMD line. So for HS200 the
+ * sequence will be
+ * 1. set bus width 4bit / 8 bit (1 bit not supported)
+ * 2. switch to HS200 mode
+ * 3. set the clock to > 52Mhz <=200MHz and
+ * 4. execute tuning for HS200
+ */
+ if ((host->caps2 & MMC_CAP2_HS200) &&
+ card->host->ops->execute_tuning)
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK_HS200);
+ if (err) {
+ pr_warning("%s: tuning execution failed\n",
+ mmc_hostname(card->host));
+ goto err;
+ }
+
+ ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
+ EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
+ err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
+ if (err) {
+ pr_err("%s: power class selection to bus width %d failed\n",
+ mmc_hostname(card->host), 1 << bus_width);
+ goto err;
+ }
+ }
+
+ /*
* Activate wide bus and DDR (if supported).
*/
- if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
+ if (!mmc_card_hs200(card) &&
+ (card->csd.mmca_vsn >= CSD_SPEC_VER_4) &&
(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) {
static unsigned ext_csd_bits[][2] = {
{ EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 },
@@ -1048,7 +1206,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
*
* WARNING: eMMC rules are NOT the same as SD DDR
*/
- if (ddr == EXT_CSD_CARD_TYPE_DDR_1_2V) {
+ if (ddr == MMC_1_2V_DDR_MODE) {
err = mmc_set_signal_voltage(host,
MMC_SIGNAL_VOLTAGE_120, 0);
if (err)
@@ -1067,14 +1225,23 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
if ((host->caps2 & MMC_CAP2_CACHE_CTRL) &&
card->ext_csd.cache_size > 0) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_CACHE_CTRL, 1, 0);
+ EXT_CSD_CACHE_CTRL, 1,
+ card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
/*
* Only if no error, cache is turned on successfully.
*/
- card->ext_csd.cache_ctrl = err ? 0 : 1;
+ if (err) {
+ pr_warning("%s: Cache is supported, "
+ "but failed to turn on (%d)\n",
+ mmc_hostname(card->host), err);
+ card->ext_csd.cache_ctrl = 0;
+ err = 0;
+ } else {
+ card->ext_csd.cache_ctrl = 1;
+ }
}
if (!oldcard)
@@ -1105,6 +1272,14 @@ static void mmc_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_detect(struct mmc_host *host)
@@ -1119,7 +1294,7 @@ static void mmc_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_send_status(host->card, NULL);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -1144,11 +1319,13 @@ static int mmc_suspend(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
- if (mmc_card_can_sleep(host))
+ if (mmc_card_can_sleep(host)) {
err = mmc_card_sleep(host);
- else if (!mmc_host_is_spi(host))
+ if (!err)
+ mmc_card_set_sleep(host->card);
+ } else if (!mmc_host_is_spi(host))
mmc_deselect_cards(host);
- host->card->state &= ~MMC_STATE_HIGHSPEED;
+ host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
mmc_release_host(host);
return err;
@@ -1168,7 +1345,11 @@ static int mmc_resume(struct mmc_host *host)
BUG_ON(!host->card);
mmc_claim_host(host);
- err = mmc_init_card(host, host->ocr, host->card);
+ if (mmc_card_is_sleep(host->card)) {
+ err = mmc_card_awake(host);
+ mmc_card_clr_sleep(host->card);
+ } else
+ err = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
return err;
@@ -1178,7 +1359,8 @@ static int mmc_power_restore(struct mmc_host *host)
{
int ret;
- host->card->state &= ~MMC_STATE_HIGHSPEED;
+ host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
+ mmc_card_clr_sleep(host->card);
mmc_claim_host(host);
ret = mmc_init_card(host, host->ocr, host->card);
mmc_release_host(host);
@@ -1224,6 +1406,7 @@ static const struct mmc_bus_ops mmc_ops = {
.suspend = NULL,
.resume = NULL,
.power_restore = mmc_power_restore,
+ .alive = mmc_alive,
};
static const struct mmc_bus_ops mmc_ops_unsafe = {
@@ -1234,6 +1417,7 @@ static const struct mmc_bus_ops mmc_ops_unsafe = {
.suspend = mmc_suspend,
.resume = mmc_resume,
.power_restore = mmc_power_restore,
+ .alive = mmc_alive,
};
static void mmc_attach_bus_ops(struct mmc_host *host)
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index f2a05ea..c272c686 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -307,8 +307,8 @@ static int mmc_read_switch(struct mmc_card *card)
goto out;
}
- if (status[13] & UHS_SDR50_BUS_SPEED)
- card->sw_caps.hs_max_dtr = 50000000;
+ if (status[13] & SD_MODE_HIGH_SPEED)
+ card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR;
if (card->scr.sda_spec3) {
card->sw_caps.sd3_bus_mode = status[13];
@@ -451,9 +451,11 @@ static int sd_select_driver_type(struct mmc_card *card, u8 *status)
* information and let the hardware specific code
* return what is possible given the options
*/
+ mmc_host_clk_hold(card->host);
drive_strength = card->host->ops->select_drive_strength(
card->sw_caps.uhs_max_dtr,
host_drv_type, card_drv_type);
+ mmc_host_clk_release(card->host);
err = mmc_sd_switch(card, 1, 2, drive_strength, status);
if (err)
@@ -660,8 +662,12 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card)
goto out;
/* SPI mode doesn't define CMD19 */
- if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
- err = card->host->ops->execute_tuning(card->host);
+ if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) {
+ mmc_host_clk_hold(card->host);
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
+ mmc_host_clk_release(card->host);
+ }
out:
kfree(status);
@@ -849,8 +855,11 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,
if (!reinit) {
int ro = -1;
- if (host->ops->get_ro)
+ if (host->ops->get_ro) {
+ mmc_host_clk_hold(card->host);
ro = host->ops->get_ro(host);
+ mmc_host_clk_release(card->host);
+ }
if (ro < 0) {
pr_warning("%s: host does not "
@@ -902,6 +911,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
BUG_ON(!host);
WARN_ON(!host->claimed);
+ /* The initialization should be done at 3.3 V I/O voltage. */
+ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
err = mmc_sd_get_cid(host, ocr, cid, &rocr);
if (err)
return err;
@@ -960,14 +972,17 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
goto free_card;
/* Card is an ultra-high-speed card */
- mmc_sd_card_set_uhs(card);
+ mmc_card_set_uhs(card);
/*
* Since initialization is now complete, enable preset
* value registers for UHS-I cards.
*/
- if (host->ops->enable_preset_value)
+ if (host->ops->enable_preset_value) {
+ mmc_host_clk_hold(card->host);
host->ops->enable_preset_value(host, true);
+ mmc_host_clk_release(card->host);
+ }
} else {
/*
* Attempt to change to high-speed (if supported)
@@ -1019,6 +1034,14 @@ static void mmc_sd_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_sd_alive(struct mmc_host *host)
+{
+ return mmc_send_status(host->card, NULL);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_sd_detect(struct mmc_host *host)
@@ -1033,7 +1056,7 @@ static void mmc_sd_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_send_status(host->card, NULL);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -1102,6 +1125,7 @@ static const struct mmc_bus_ops mmc_sd_ops = {
.suspend = NULL,
.resume = NULL,
.power_restore = mmc_sd_power_restore,
+ .alive = mmc_sd_alive,
};
static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
@@ -1110,6 +1134,7 @@ static const struct mmc_bus_ops mmc_sd_ops_unsafe = {
.suspend = mmc_sd_suspend,
.resume = mmc_sd_resume,
.power_restore = mmc_sd_power_restore,
+ .alive = mmc_sd_alive,
};
static void mmc_sd_attach_bus_ops(struct mmc_host *host)
@@ -1134,14 +1159,12 @@ int mmc_attach_sd(struct mmc_host *host)
BUG_ON(!host);
WARN_ON(!host->claimed);
- /* Make sure we are at 3.3V signalling voltage */
- err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
- if (err)
- return err;
-
/* Disable preset value enable if already set since last time */
- if (host->ops->enable_preset_value)
+ if (host->ops->enable_preset_value) {
+ mmc_host_clk_hold(host);
host->ops->enable_preset_value(host, false);
+ mmc_host_clk_release(host);
+ }
err = mmc_send_app_op_cond(host, 0, &ocr);
if (err)
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 3ab565e..2c7c83f 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -14,6 +14,7 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/mmc.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
@@ -97,11 +98,13 @@ fail:
return ret;
}
-static int sdio_read_cccr(struct mmc_card *card)
+static int sdio_read_cccr(struct mmc_card *card, u32 ocr)
{
int ret;
int cccr_vsn;
+ int uhs = ocr & R4_18V_PRESENT;
unsigned char data;
+ unsigned char speed;
memset(&card->cccr, 0, sizeof(struct sdio_cccr));
@@ -140,12 +143,60 @@ static int sdio_read_cccr(struct mmc_card *card)
}
if (cccr_vsn >= SDIO_CCCR_REV_1_20) {
- ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &data);
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
if (ret)
goto out;
- if (data & SDIO_SPEED_SHS)
- card->cccr.high_speed = 1;
+ card->scr.sda_spec3 = 0;
+ card->sw_caps.sd3_bus_mode = 0;
+ card->sw_caps.sd3_drv_type = 0;
+ if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) {
+ card->scr.sda_spec3 = 1;
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_UHS, 0, &data);
+ if (ret)
+ goto out;
+
+ if (card->host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50)) {
+ if (data & SDIO_UHS_DDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_DDR50;
+
+ if (data & SDIO_UHS_SDR50)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR50;
+
+ if (data & SDIO_UHS_SDR104)
+ card->sw_caps.sd3_bus_mode
+ |= SD_MODE_UHS_SDR104;
+ }
+
+ ret = mmc_io_rw_direct(card, 0, 0,
+ SDIO_CCCR_DRIVE_STRENGTH, 0, &data);
+ if (ret)
+ goto out;
+
+ if (data & SDIO_DRIVE_SDTA)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A;
+ if (data & SDIO_DRIVE_SDTC)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C;
+ if (data & SDIO_DRIVE_SDTD)
+ card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D;
+ }
+
+ /* if no uhs mode ensure we check for high speed */
+ if (!card->sw_caps.sd3_bus_mode) {
+ if (speed & SDIO_SPEED_SHS) {
+ card->cccr.high_speed = 1;
+ card->sw_caps.hs_max_dtr = 50000000;
+ } else {
+ card->cccr.high_speed = 0;
+ card->sw_caps.hs_max_dtr = 25000000;
+ }
+ }
}
out:
@@ -327,6 +378,194 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)
return max_dtr;
}
+static unsigned char host_drive_to_sdio_drive(int host_strength)
+{
+ switch (host_strength) {
+ case MMC_SET_DRIVER_TYPE_A:
+ return SDIO_DTSx_SET_TYPE_A;
+ case MMC_SET_DRIVER_TYPE_B:
+ return SDIO_DTSx_SET_TYPE_B;
+ case MMC_SET_DRIVER_TYPE_C:
+ return SDIO_DTSx_SET_TYPE_C;
+ case MMC_SET_DRIVER_TYPE_D:
+ return SDIO_DTSx_SET_TYPE_D;
+ default:
+ return SDIO_DTSx_SET_TYPE_B;
+ }
+}
+
+static void sdio_select_driver_type(struct mmc_card *card)
+{
+ int host_drv_type = SD_DRIVER_TYPE_B;
+ int card_drv_type = SD_DRIVER_TYPE_B;
+ int drive_strength;
+ unsigned char card_strength;
+ int err;
+
+ /*
+ * If the host doesn't support any of the Driver Types A,C or D,
+ * or there is no board specific handler then default Driver
+ * Type B is used.
+ */
+ if (!(card->host->caps &
+ (MMC_CAP_DRIVER_TYPE_A |
+ MMC_CAP_DRIVER_TYPE_C |
+ MMC_CAP_DRIVER_TYPE_D)))
+ return;
+
+ if (!card->host->ops->select_drive_strength)
+ return;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_A)
+ host_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_C)
+ host_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->host->caps & MMC_CAP_DRIVER_TYPE_D)
+ host_drv_type |= SD_DRIVER_TYPE_D;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A)
+ card_drv_type |= SD_DRIVER_TYPE_A;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C)
+ card_drv_type |= SD_DRIVER_TYPE_C;
+
+ if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D)
+ card_drv_type |= SD_DRIVER_TYPE_D;
+
+ /*
+ * The drive strength that the hardware can support
+ * depends on the board design. Pass the appropriate
+ * information and let the hardware specific code
+ * return what is possible given the options
+ */
+ drive_strength = card->host->ops->select_drive_strength(
+ card->sw_caps.uhs_max_dtr,
+ host_drv_type, card_drv_type);
+
+ /* if error just use default for drive strength B */
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0,
+ &card_strength);
+ if (err)
+ return;
+
+ card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT);
+ card_strength |= host_drive_to_sdio_drive(drive_strength);
+
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH,
+ card_strength, NULL);
+
+ /* if error default to drive strength B */
+ if (!err)
+ mmc_set_driver_type(card->host, drive_strength);
+}
+
+
+static int sdio_set_bus_speed_mode(struct mmc_card *card)
+{
+ unsigned int bus_speed, timing;
+ int err;
+ unsigned char speed;
+
+ /*
+ * If the host doesn't support any of the UHS-I modes, fallback on
+ * default speed.
+ */
+ if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)))
+ return 0;
+
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ if ((card->host->caps & MMC_CAP_UHS_SDR104) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) {
+ bus_speed = SDIO_SPEED_SDR104;
+ timing = MMC_TIMING_UHS_SDR104;
+ card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR;
+ } else if ((card->host->caps & MMC_CAP_UHS_DDR50) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) {
+ bus_speed = SDIO_SPEED_DDR50;
+ timing = MMC_TIMING_UHS_DDR50;
+ card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR50)) {
+ bus_speed = SDIO_SPEED_SDR50;
+ timing = MMC_TIMING_UHS_SDR50;
+ card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) &&
+ (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) {
+ bus_speed = SDIO_SPEED_SDR25;
+ timing = MMC_TIMING_UHS_SDR25;
+ card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR;
+ } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode &
+ SD_MODE_UHS_SDR12)) {
+ bus_speed = SDIO_SPEED_SDR12;
+ timing = MMC_TIMING_UHS_SDR12;
+ card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR;
+ }
+
+ err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);
+ if (err)
+ return err;
+
+ speed &= ~SDIO_SPEED_BSS_MASK;
+ speed |= bus_speed;
+ err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL);
+ if (err)
+ return err;
+
+ if (bus_speed) {
+ mmc_set_timing(card->host, timing);
+ mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr);
+ }
+
+ return 0;
+}
+
+/*
+ * UHS-I specific initialization procedure
+ */
+static int mmc_sdio_init_uhs_card(struct mmc_card *card)
+{
+ int err;
+
+ if (!card->scr.sda_spec3)
+ return 0;
+
+ /*
+ * Switch to wider bus (if supported).
+ */
+ if (card->host->caps & MMC_CAP_4_BIT_DATA) {
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0) {
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ err = 0;
+ }
+ }
+
+ /* Set the driver strength for the card */
+ sdio_select_driver_type(card);
+
+ /* Set bus speed mode of the card */
+ err = sdio_set_bus_speed_mode(card);
+ if (err)
+ goto out;
+
+ /* Initialize and start re-tuning timer */
+ if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning)
+ err = card->host->ops->execute_tuning(card->host,
+ MMC_SEND_TUNING_BLOCK);
+
+out:
+
+ return err;
+}
+
/*
* Handle the detection and initialisation of a card.
*
@@ -346,6 +585,9 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
* Inform the card of the voltage
*/
if (!powered_resume) {
+ /* The initialization should be done at 3.3 V I/O voltage. */
+ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
err = mmc_send_io_op_cond(host, host->ocr, &ocr);
if (err)
goto err;
@@ -394,6 +636,30 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
host->ops->init_card(host, card);
/*
+ * If the host and card support UHS-I mode request the card
+ * to switch to 1.8V signaling level. No 1.8v signalling if
+ * UHS mode is not enabled to maintain compatibilty and some
+ * systems that claim 1.8v signalling in fact do not support
+ * it.
+ */
+ if ((ocr & R4_18V_PRESENT) &&
+ (host->caps &
+ (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
+ MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
+ MMC_CAP_UHS_DDR50))) {
+ err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180,
+ true);
+ if (err) {
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+ err = 0;
+ } else {
+ ocr &= ~R4_18V_PRESENT;
+ host->ocr &= ~R4_18V_PRESENT;
+ }
+
+ /*
* For native busses: set card RCA and quit open drain mode.
*/
if (!powered_resume && !mmc_host_is_spi(host)) {
@@ -450,7 +716,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
/*
* Read the common registers.
*/
- err = sdio_read_cccr(card);
+ err = sdio_read_cccr(card, ocr);
if (err)
goto remove;
@@ -492,29 +758,39 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
if (err)
goto remove;
- /*
- * Switch to high-speed (if supported).
- */
- err = sdio_enable_hs(card);
- if (err > 0)
- mmc_sd_go_highspeed(card);
- else if (err)
- goto remove;
+ /* Initialization sequence for UHS-I cards */
+ /* Only if card supports 1.8v and UHS signaling */
+ if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) {
+ err = mmc_sdio_init_uhs_card(card);
+ if (err)
+ goto remove;
- /*
- * Change to the card's maximum speed.
- */
- mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+ /* Card is an ultra-high-speed card */
+ mmc_card_set_uhs(card);
+ } else {
+ /*
+ * Switch to high-speed (if supported).
+ */
+ err = sdio_enable_hs(card);
+ if (err > 0)
+ mmc_sd_go_highspeed(card);
+ else if (err)
+ goto remove;
- /*
- * Switch to wider bus (if supported).
- */
- err = sdio_enable_4bit_bus(card);
- if (err > 0)
- mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
- else if (err)
- goto remove;
+ /*
+ * Change to the card's maximum speed.
+ */
+ mmc_set_clock(host, mmc_sdio_get_max_clock(card));
+ /*
+ * Switch to wider bus (if supported).
+ */
+ err = sdio_enable_4bit_bus(card);
+ if (err > 0)
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4);
+ else if (err)
+ goto remove;
+ }
finish:
if (!oldcard)
host->card = card;
@@ -550,6 +826,14 @@ static void mmc_sdio_remove(struct mmc_host *host)
}
/*
+ * Card detection - card is alive.
+ */
+static int mmc_sdio_alive(struct mmc_host *host)
+{
+ return mmc_select_card(host->card);
+}
+
+/*
* Card detection callback from host.
*/
static void mmc_sdio_detect(struct mmc_host *host)
@@ -571,7 +855,7 @@ static void mmc_sdio_detect(struct mmc_host *host)
/*
* Just check if our card has been removed.
*/
- err = mmc_select_card(host->card);
+ err = _mmc_detect_card_removed(host);
mmc_release_host(host);
@@ -715,6 +999,11 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
* With these steps taken, mmc_select_voltage() is also required to
* restore the correct voltage setting of the card.
*/
+
+ /* The initialization should be done at 3.3 V I/O voltage. */
+ if (!mmc_card_keep_power(host))
+ mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
sdio_reset(host);
mmc_go_idle(host);
mmc_send_if_cond(host, host->ocr_avail);
@@ -749,6 +1038,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = {
.suspend = mmc_sdio_suspend,
.resume = mmc_sdio_resume,
.power_restore = mmc_sdio_power_restore,
+ .alive = mmc_sdio_alive,
};
@@ -797,8 +1087,17 @@ int mmc_attach_sdio(struct mmc_host *host)
* Detect and init the card.
*/
err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
- if (err)
- goto err;
+ if (err) {
+ if (err == -EAGAIN) {
+ /*
+ * Retry initialization with S18R set to 0.
+ */
+ host->ocr &= ~R4_18V_PRESENT;
+ err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
+ }
+ if (err)
+ goto err;
+ }
card = host->card;
/*
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index b1f3168..8f6f5ac 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -196,6 +196,9 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
else
mval = min(mval, func->max_blksize);
+ if (mmc_card_broken_byte_mode_512(func->card))
+ return min(mval, 511u);
+
return min(mval, 512u); /* maximum size for byte mode */
}
@@ -314,7 +317,7 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
func->card->host->max_seg_size / func->cur_blksize);
max_blocks = min(max_blocks, 511u);
- while (remainder > func->cur_blksize) {
+ while (remainder >= func->cur_blksize) {
unsigned blocks;
blocks = remainder / func->cur_blksize;
@@ -339,8 +342,9 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,
while (remainder > 0) {
size = min(remainder, sdio_max_byte_size(func));
+ /* Indicate byte mode by setting "blocks" = 0 */
ret = mmc_io_rw_extended(func->card, write, func->num, addr,
- incr_addr, buf, 1, size);
+ incr_addr, buf, 0, size);
if (ret)
return ret;
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index 68f81b9..f573e7f 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -146,15 +146,21 @@ static int sdio_irq_thread(void *_host)
}
set_current_state(TASK_INTERRUPTIBLE);
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 1);
+ mmc_host_clk_release(host);
+ }
if (!kthread_should_stop())
schedule_timeout(period);
set_current_state(TASK_RUNNING);
} while (!kthread_should_stop());
- if (host->caps & MMC_CAP_SDIO_IRQ)
+ if (host->caps & MMC_CAP_SDIO_IRQ) {
+ mmc_host_clk_hold(host);
host->ops->enable_sdio_irq(host, 0);
+ mmc_host_clk_release(host);
+ }
pr_debug("%s: IRQ thread exiting with code %d\n",
mmc_hostname(host), ret);
diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c
index b0517cc..d29e206 100644
--- a/drivers/mmc/core/sdio_ops.c
+++ b/drivers/mmc/core/sdio_ops.c
@@ -128,8 +128,6 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
BUG_ON(!card);
BUG_ON(fn > 7);
- BUG_ON(blocks == 1 && blksz > 512);
- WARN_ON(blocks == 0);
WARN_ON(blksz == 0);
/* sanity check */
@@ -144,22 +142,20 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,
cmd.arg |= fn << 28;
cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;
cmd.arg |= addr << 9;
- if (blocks == 1 && blksz < 512)
- cmd.arg |= blksz; /* byte mode */
- else if (blocks == 1 && blksz == 512 &&
- !(mmc_card_broken_byte_mode_512(card)))
- cmd.arg |= 0; /* byte mode, 0==512 */
+ if (blocks == 0)
+ cmd.arg |= (blksz == 512) ? 0 : blksz; /* byte mode */
else
cmd.arg |= 0x08000000 | blocks; /* block mode */
cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
data.blksz = blksz;
- data.blocks = blocks;
+ /* Code in host drivers/fwk assumes that "blocks" always is >=1 */
+ data.blocks = blocks ? blocks : 1;
data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
- sg_init_one(&sg, buf, blksz * blocks);
+ sg_init_one(&sg, buf, data.blksz * data.blocks);
mmc_set_data_timeout(&data, card);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index cf444b0..00fcbed 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -477,7 +477,6 @@ config MMC_SDHI
config MMC_CB710
tristate "ENE CB710 MMC/SD Interface support"
depends on PCI
- select MISC_DEVICES
select CB710_CORE
help
This option enables support for MMC/SD part of ENE CB710/720 Flash
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index b4b83f3..745f8fc 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
+obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o
obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o
obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index a8b4d2a..947faa5 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -236,7 +236,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
sg = &data->sg[i];
- sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset;
amount = min(size, sg->length);
size -= amount;
@@ -252,7 +252,7 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
dmabuf = (unsigned *)tmpv;
}
- kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(sgbuffer);
if (size == 0)
break;
@@ -302,7 +302,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
sg = &data->sg[i];
- sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset;
amount = min(size, sg->length);
size -= amount;
@@ -318,7 +318,7 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
}
flush_kernel_dcache_page(sg_page(sg));
- kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(sgbuffer);
data->bytes_xfered += amount;
if (size == 0)
break;
@@ -741,7 +741,7 @@ static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
/* maybe switch power to the card */
- if (host->board->vcc_pin) {
+ if (gpio_is_valid(host->board->vcc_pin)) {
switch (ios->power_mode) {
case MMC_POWER_OFF:
gpio_set_value(host->board->vcc_pin, 0);
@@ -897,7 +897,7 @@ static int at91_mci_get_ro(struct mmc_host *mmc)
{
struct at91mci_host *host = mmc_priv(mmc);
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
return !!gpio_get_value(host->board->wp_pin);
/*
* Board doesn't support read only detection; let the mmc core
@@ -991,21 +991,21 @@ static int __init at91_mci_probe(struct platform_device *pdev)
* Reserve GPIOs ... board init code makes sure these pins are set
* up as GPIOs with the right direction (input, except for vcc)
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
ret = gpio_request(host->board->det_pin, "mmc_detect");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
goto fail4b;
}
}
- if (host->board->wp_pin) {
+ if (gpio_is_valid(host->board->wp_pin)) {
ret = gpio_request(host->board->wp_pin, "mmc_wp");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
goto fail4;
}
}
- if (host->board->vcc_pin) {
+ if (gpio_is_valid(host->board->vcc_pin)) {
ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
@@ -1057,7 +1057,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* Add host to MMC layer
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
host->present = !gpio_get_value(host->board->det_pin);
}
else
@@ -1068,7 +1068,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
/*
* monitor card insertion/removal if we can
*/
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
ret = request_irq(gpio_to_irq(host->board->det_pin),
at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
if (ret)
@@ -1087,13 +1087,13 @@ fail0:
fail1:
clk_put(host->mci_clk);
fail2:
- if (host->board->vcc_pin)
+ if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
fail3:
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
fail4:
- if (host->board->det_pin)
+ if (gpio_is_valid(host->board->det_pin))
gpio_free(host->board->det_pin);
fail4b:
if (host->buffer)
@@ -1125,7 +1125,7 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
host->buffer, host->physical_address);
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
if (device_can_wakeup(&pdev->dev))
free_irq(gpio_to_irq(host->board->det_pin), host);
device_init_wakeup(&pdev->dev, 0);
@@ -1140,9 +1140,9 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
clk_disable(host->mci_clk); /* Disable the peripheral clock */
clk_put(host->mci_clk);
- if (host->board->vcc_pin)
+ if (gpio_is_valid(host->board->vcc_pin))
gpio_free(host->board->vcc_pin);
- if (host->board->wp_pin)
+ if (gpio_is_valid(host->board->wp_pin))
gpio_free(host->board->wp_pin);
iounmap(host->baseaddr);
@@ -1163,7 +1163,7 @@ static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
- if (host->board->det_pin && device_may_wakeup(&pdev->dev))
+ if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
enable_irq_wake(host->board->det_pin);
if (mmc)
@@ -1178,7 +1178,7 @@ static int at91_mci_resume(struct platform_device *pdev)
struct at91mci_host *host = mmc_priv(mmc);
int ret = 0;
- if (host->board->det_pin && device_may_wakeup(&pdev->dev))
+ if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev))
disable_irq_wake(host->board->det_pin);
if (mmc)
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index a7ee502..e4449a5 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
struct scatterlist *sg;
unsigned int i;
enum dma_data_direction direction;
+ enum dma_transfer_direction slave_dirn;
unsigned int sglen;
u32 iflags;
@@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (host->caps.has_dma)
atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
- else
+ slave_dirn = DMA_DEV_TO_MEM;
+ } else {
direction = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+ }
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
desc = chan->device->device_prep_slave_sg(chan,
- data->sg, sglen, direction,
+ data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
@@ -965,11 +969,14 @@ static void atmci_start_request(struct atmel_mci *host,
host->data_status = 0;
if (host->need_reset) {
+ iflags = atmci_readl(host, ATMCI_IMR);
+ iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN);
atmci_writel(host, ATMCI_MR, host->mode_reg);
if (host->caps.has_cfg_reg)
atmci_writel(host, ATMCI_CFG, host->cfg_reg);
+ atmci_writel(host, ATMCI_IER, iflags);
host->need_reset = false;
}
atmci_writel(host, ATMCI_SDCR, slot->sdc_reg);
@@ -1941,12 +1948,12 @@ static bool atmci_filter(struct dma_chan *chan, void *slave)
}
}
-static void atmci_configure_dma(struct atmel_mci *host)
+static bool atmci_configure_dma(struct atmel_mci *host)
{
struct mci_platform_data *pdata;
if (host == NULL)
- return;
+ return false;
pdata = host->pdev->dev.platform_data;
@@ -1963,12 +1970,15 @@ static void atmci_configure_dma(struct atmel_mci *host)
host->dma.chan =
dma_request_channel(mask, atmci_filter, pdata->dma_slave);
}
- if (!host->dma.chan)
- dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
- else
+ if (!host->dma.chan) {
+ dev_warn(&host->pdev->dev, "no DMA channel available\n");
+ return false;
+ } else {
dev_info(&host->pdev->dev,
"Using %s for DMA transfers\n",
dma_chan_name(host->dma.chan));
+ return true;
+ }
}
static inline unsigned int atmci_get_version(struct atmel_mci *host)
@@ -2078,8 +2088,7 @@ static int __init atmci_probe(struct platform_device *pdev)
/* Get MCI capabilities and set operations according to it */
atmci_get_cap(host);
- if (host->caps.has_dma) {
- dev_info(&pdev->dev, "using DMA\n");
+ if (host->caps.has_dma && atmci_configure_dma(host)) {
host->prepare_data = &atmci_prepare_data_dma;
host->submit_data = &atmci_submit_data_dma;
host->stop_transfer = &atmci_stop_transfer_dma;
@@ -2089,15 +2098,12 @@ static int __init atmci_probe(struct platform_device *pdev)
host->submit_data = &atmci_submit_data_pdc;
host->stop_transfer = &atmci_stop_transfer_pdc;
} else {
- dev_info(&pdev->dev, "no DMA, no PDC\n");
+ dev_info(&pdev->dev, "using PIO\n");
host->prepare_data = &atmci_prepare_data;
host->submit_data = &atmci_submit_data;
host->stop_transfer = &atmci_stop_transfer;
}
- if (host->caps.has_dma)
- atmci_configure_dma(host);
-
platform_set_drvdata(pdev, host);
/* We need at least one slot to succeed */
diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c
index 5d3b9ae..dbd0c8a 100644
--- a/drivers/mmc/host/au1xmmc.c
+++ b/drivers/mmc/host/au1xmmc.c
@@ -153,6 +153,7 @@ static inline int has_dbdma(void)
{
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
return 1;
default:
return 0;
@@ -768,11 +769,15 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
config2 = au_readl(HOST_CONFIG2(host));
switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ config2 |= SD_CONFIG2_BB;
+ break;
case MMC_BUS_WIDTH_4:
+ config2 &= ~SD_CONFIG2_BB;
config2 |= SD_CONFIG2_WB;
break;
case MMC_BUS_WIDTH_1:
- config2 &= ~SD_CONFIG2_WB;
+ config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);
break;
}
au_writel(config2, HOST_CONFIG2(host));
@@ -943,7 +948,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct au1xmmc_host *host;
struct resource *r;
- int ret;
+ int ret, iflag;
mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);
if (!mmc) {
@@ -982,37 +987,43 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "no IRQ defined\n");
goto out3;
}
-
host->irq = r->start;
- /* IRQ is shared among both SD controllers */
- ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED,
- DRIVER_NAME, host);
- if (ret) {
- dev_err(&pdev->dev, "cannot grab IRQ\n");
- goto out3;
- }
mmc->ops = &au1xmmc_ops;
mmc->f_min = 450000;
mmc->f_max = 24000000;
+ mmc->max_blk_size = 2048;
+ mmc->max_blk_count = 512;
+
+ mmc->ocr_avail = AU1XMMC_OCR;
+ mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+
+ iflag = IRQF_SHARED; /* Au1100/Au1200: one int for both ctrls */
+
switch (alchemy_get_cputype()) {
case ALCHEMY_CPU_AU1100:
mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE;
- mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
break;
case ALCHEMY_CPU_AU1200:
mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
- mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT;
+ break;
+ case ALCHEMY_CPU_AU1300:
+ iflag = 0; /* nothing is shared */
+ mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE;
+ mmc->f_max = 52000000;
+ if (host->ioarea->start == AU1100_SD0_PHYS_ADDR)
+ mmc->caps |= MMC_CAP_8_BIT_DATA;
break;
}
- mmc->max_blk_size = 2048;
- mmc->max_blk_count = 512;
-
- mmc->ocr_avail = AU1XMMC_OCR;
- mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
+ ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot grab IRQ\n");
+ goto out3;
+ }
host->status = HOST_S_IDLE;
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 0371bf5..0366617 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -627,17 +627,7 @@ static struct platform_driver sdh_driver = {
},
};
-static int __init sdh_init(void)
-{
- return platform_driver_register(&sdh_driver);
-}
-module_init(sdh_init);
-
-static void __exit sdh_exit(void)
-{
- platform_driver_unregister(&sdh_driver);
-}
-module_exit(sdh_exit);
+module_platform_driver(sdh_driver);
MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");
MODULE_AUTHOR("Cliff Cai, Roy Huang");
diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c
index ce2a47b..83693fd 100644
--- a/drivers/mmc/host/cb710-mmc.c
+++ b/drivers/mmc/host/cb710-mmc.c
@@ -780,18 +780,7 @@ static struct platform_driver cb710_mmc_driver = {
#endif
};
-static int __init cb710_mmc_init_module(void)
-{
- return platform_driver_register(&cb710_mmc_driver);
-}
-
-static void __exit cb710_mmc_cleanup_module(void)
-{
- platform_driver_unregister(&cb710_mmc_driver);
-}
-
-module_init(cb710_mmc_init_module);
-module_exit(cb710_mmc_cleanup_module);
+module_platform_driver(cb710_mmc_driver);
MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>");
MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part");
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 3aaeb08..8bec1c3 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -22,7 +22,6 @@
#include <linux/ioport.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/scatterlist.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
@@ -502,8 +501,14 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data)
host->dir_status = DW_MCI_SEND_STATUS;
if (dw_mci_submit_data_dma(host, data)) {
+ int flags = SG_MITER_ATOMIC;
+ if (host->data->flags & MMC_DATA_READ)
+ flags |= SG_MITER_TO_SG;
+ else
+ flags |= SG_MITER_FROM_SG;
+
+ sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
host->sg = data->sg;
- host->pio_offset = 0;
host->part_buf_start = 0;
host->part_buf_count = 0;
@@ -588,11 +593,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
mci_writel(host, CTYPE, (slot->ctype << slot->id));
}
-static void dw_mci_start_request(struct dw_mci *host,
- struct dw_mci_slot *slot)
+static void __dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot,
+ struct mmc_command *cmd)
{
struct mmc_request *mrq;
- struct mmc_command *cmd;
struct mmc_data *data;
u32 cmdflags;
@@ -610,14 +615,13 @@ static void dw_mci_start_request(struct dw_mci *host,
host->completed_events = 0;
host->data_status = 0;
- data = mrq->data;
+ data = cmd->data;
if (data) {
dw_mci_set_timeout(host);
mci_writel(host, BYTCNT, data->blksz*data->blocks);
mci_writel(host, BLKSIZ, data->blksz);
}
- cmd = mrq->cmd;
cmdflags = dw_mci_prepare_command(slot->mmc, cmd);
/* this is the first command, send the initialization clock */
@@ -635,6 +639,16 @@ static void dw_mci_start_request(struct dw_mci *host,
host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop);
}
+static void dw_mci_start_request(struct dw_mci *host,
+ struct dw_mci_slot *slot)
+{
+ struct mmc_request *mrq = slot->mrq;
+ struct mmc_command *cmd;
+
+ cmd = mrq->sbc ? mrq->sbc : mrq->cmd;
+ __dw_mci_start_request(host, slot, cmd);
+}
+
/* must be called with host->lock held */
static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot,
struct mmc_request *mrq)
@@ -698,12 +712,15 @@ static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
break;
}
+ regs = mci_readl(slot->host, UHS_REG);
+
/* DDR mode set */
- if (ios->timing == MMC_TIMING_UHS_DDR50) {
- regs = mci_readl(slot->host, UHS_REG);
+ if (ios->timing == MMC_TIMING_UHS_DDR50)
regs |= (0x1 << slot->id) << 16;
- mci_writel(slot->host, UHS_REG, regs);
- }
+ else
+ regs &= ~(0x1 << slot->id) << 16;
+
+ mci_writel(slot->host, UHS_REG, regs);
if (ios->clock) {
/*
@@ -889,7 +906,14 @@ static void dw_mci_tasklet_func(unsigned long priv)
cmd = host->cmd;
host->cmd = NULL;
set_bit(EVENT_CMD_COMPLETE, &host->completed_events);
- dw_mci_command_complete(host, host->mrq->cmd);
+ dw_mci_command_complete(host, cmd);
+ if (cmd == host->mrq->sbc && !cmd->error) {
+ prev_state = state = STATE_SENDING_CMD;
+ __dw_mci_start_request(host, host->cur_slot,
+ host->mrq->cmd);
+ goto unlock;
+ }
+
if (!host->mrq->data || cmd->error) {
dw_mci_request_end(host, host->mrq);
goto unlock;
@@ -953,6 +977,7 @@ static void dw_mci_tasklet_func(unsigned long priv)
* generates a block interrupt, hence setting
* the scatter-gather pointer to NULL.
*/
+ sg_miter_stop(&host->sg_miter);
host->sg = NULL;
ctrl = mci_readl(host, CTRL);
ctrl |= SDMMC_CTRL_FIFO_RESET;
@@ -967,6 +992,12 @@ static void dw_mci_tasklet_func(unsigned long priv)
goto unlock;
}
+ if (host->mrq->sbc && !data->error) {
+ data->stop->error = 0;
+ dw_mci_request_end(host, host->mrq);
+ goto unlock;
+ }
+
prev_state = state = STATE_SENDING_STOP;
if (!data->error)
send_stop_cmd(host, data);
@@ -1286,54 +1317,44 @@ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt)
static void dw_mci_read_data_pio(struct dw_mci *host)
{
- struct scatterlist *sg = host->sg;
- void *buf = sg_virt(sg);
- unsigned int offset = host->pio_offset;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
+ unsigned int remain, fcnt;
do {
- len = host->part_buf_count +
- (SDMMC_GET_FCNT(mci_readl(host, STATUS)) << shift);
- if (offset + len <= sg->length) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ host->sg = sg_miter->__sg;
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
+
+ do {
+ fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS))
+ << shift) + host->part_buf_count;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
dw_mci_pull_data(host, (void *)(buf + offset), len);
-
offset += len;
nbytes += len;
-
- if (offset == sg->length) {
- flush_dcache_page(sg_page(sg));
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = 0;
- buf = sg_virt(sg);
- }
- } else {
- unsigned int remaining = sg->length - offset;
- dw_mci_pull_data(host, (void *)(buf + offset),
- remaining);
- nbytes += remaining;
-
- flush_dcache_page(sg_page(sg));
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = len - remaining;
- buf = sg_virt(sg);
- dw_mci_pull_data(host, buf, offset);
- nbytes += offset;
- }
+ remain -= len;
+ } while (remain);
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_RXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_DATA_ERROR, &host->pending_events);
@@ -1342,65 +1363,66 @@ static void dw_mci_read_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/
- host->pio_offset = offset;
data->bytes_xfered += nbytes;
+
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
return;
done:
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
static void dw_mci_write_data_pio(struct dw_mci *host)
{
- struct scatterlist *sg = host->sg;
- void *buf = sg_virt(sg);
- unsigned int offset = host->pio_offset;
+ struct sg_mapping_iter *sg_miter = &host->sg_miter;
+ void *buf;
+ unsigned int offset;
struct mmc_data *data = host->data;
int shift = host->data_shift;
u32 status;
unsigned int nbytes = 0, len;
+ unsigned int fifo_depth = host->fifo_depth;
+ unsigned int remain, fcnt;
do {
- len = ((host->fifo_depth -
- SDMMC_GET_FCNT(mci_readl(host, STATUS))) << shift)
- - host->part_buf_count;
- if (offset + len <= sg->length) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+
+ host->sg = sg_miter->__sg;
+ buf = sg_miter->addr;
+ remain = sg_miter->length;
+ offset = 0;
+
+ do {
+ fcnt = ((fifo_depth -
+ SDMMC_GET_FCNT(mci_readl(host, STATUS)))
+ << shift) - host->part_buf_count;
+ len = min(remain, fcnt);
+ if (!len)
+ break;
host->push_data(host, (void *)(buf + offset), len);
-
offset += len;
nbytes += len;
- if (offset == sg->length) {
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = 0;
- buf = sg_virt(sg);
- }
- } else {
- unsigned int remaining = sg->length - offset;
-
- host->push_data(host, (void *)(buf + offset),
- remaining);
- nbytes += remaining;
-
- host->sg = sg = sg_next(sg);
- if (!sg)
- goto done;
-
- offset = len - remaining;
- buf = sg_virt(sg);
- host->push_data(host, (void *)buf, offset);
- nbytes += offset;
- }
+ remain -= len;
+ } while (remain);
+ sg_miter->consumed = offset;
status = mci_readl(host, MINTSTS);
mci_writel(host, RINTSTS, SDMMC_INT_TXDR);
if (status & DW_MCI_DATA_ERROR_FLAGS) {
host->data_status = status;
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
@@ -1410,12 +1432,20 @@ static void dw_mci_write_data_pio(struct dw_mci *host)
return;
}
} while (status & SDMMC_INT_TXDR); /* if TXDR write again */
- host->pio_offset = offset;
data->bytes_xfered += nbytes;
+
+ if (!remain) {
+ if (!sg_miter_next(sg_miter))
+ goto done;
+ sg_miter->consumed = 0;
+ }
+ sg_miter_stop(sg_miter);
return;
done:
data->bytes_xfered += nbytes;
+ sg_miter_stop(sg_miter);
+ host->sg = NULL;
smp_wmb();
set_bit(EVENT_XFER_COMPLETE, &host->pending_events);
}
@@ -1618,6 +1648,7 @@ static void dw_mci_work_routine_card(struct work_struct *work)
* block interrupt, hence setting the
* scatter-gather pointer to NULL.
*/
+ sg_miter_stop(&host->sg_miter);
host->sg = NULL;
ctrl = mci_readl(host, CTRL);
@@ -1678,8 +1709,9 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
if (host->pdata->caps)
mmc->caps = host->pdata->caps;
- else
- mmc->caps = 0;
+
+ if (host->pdata->caps2)
+ mmc->caps2 = host->pdata->caps2;
if (host->pdata->get_bus_wd)
if (host->pdata->get_bus_wd(slot->id) >= 4)
@@ -1923,7 +1955,7 @@ static int dw_mci_probe(struct platform_device *pdev)
* should put it in the platform data.
*/
fifo_size = mci_readl(host, FIFOTH);
- fifo_size = 1 + ((fifo_size >> 16) & 0x7ff);
+ fifo_size = 1 + ((fifo_size >> 16) & 0xfff);
} else {
fifo_size = host->pdata->fifo_depth;
}
@@ -2062,14 +2094,14 @@ static int __exit dw_mci_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
/*
* TODO: we should probably disable the clock to the card in the suspend path.
*/
-static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
+static int dw_mci_suspend(struct device *dev)
{
int i, ret;
- struct dw_mci *host = platform_get_drvdata(pdev);
+ struct dw_mci *host = dev_get_drvdata(dev);
for (i = 0; i < host->num_slots; i++) {
struct dw_mci_slot *slot = host->slot[i];
@@ -2092,10 +2124,10 @@ static int dw_mci_suspend(struct platform_device *pdev, pm_message_t mesg)
return 0;
}
-static int dw_mci_resume(struct platform_device *pdev)
+static int dw_mci_resume(struct device *dev)
{
int i, ret;
- struct dw_mci *host = platform_get_drvdata(pdev);
+ struct dw_mci *host = dev_get_drvdata(dev);
if (host->vmmc)
regulator_enable(host->vmmc);
@@ -2103,7 +2135,7 @@ static int dw_mci_resume(struct platform_device *pdev)
if (host->dma_ops->init)
host->dma_ops->init(host);
- if (!mci_wait_reset(&pdev->dev, host)) {
+ if (!mci_wait_reset(dev, host)) {
ret = -ENODEV;
return ret;
}
@@ -2131,14 +2163,15 @@ static int dw_mci_resume(struct platform_device *pdev)
#else
#define dw_mci_suspend NULL
#define dw_mci_resume NULL
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dw_mci_pmops, dw_mci_suspend, dw_mci_resume);
static struct platform_driver dw_mci_driver = {
.remove = __exit_p(dw_mci_remove),
- .suspend = dw_mci_suspend,
- .resume = dw_mci_resume,
.driver = {
.name = "dw_mmc",
+ .pm = &dw_mci_pmops,
},
};
diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
index 72c071f..df392a1 100644
--- a/drivers/mmc/host/dw_mmc.h
+++ b/drivers/mmc/host/dw_mmc.h
@@ -126,7 +126,7 @@
#define SDMMC_CMD_RESP_EXP BIT(6)
#define SDMMC_CMD_INDX(n) ((n) & 0x1F)
/* Status register defines */
-#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FF)
+#define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF)
/* Internal DMAC interrupt defines */
#define SDMMC_IDMAC_INT_AI BIT(9)
#define SDMMC_IDMAC_INT_NI BIT(8)
diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
index 74218ad..c8852a8 100644
--- a/drivers/mmc/host/jz4740_mmc.c
+++ b/drivers/mmc/host/jz4740_mmc.c
@@ -1012,17 +1012,7 @@ static struct platform_driver jz4740_mmc_driver = {
},
};
-static int __init jz4740_mmc_init(void)
-{
- return platform_driver_register(&jz4740_mmc_driver);
-}
-module_init(jz4740_mmc_init);
-
-static void __exit jz4740_mmc_exit(void)
-{
- platform_driver_unregister(&jz4740_mmc_driver);
-}
-module_exit(jz4740_mmc_exit);
+module_platform_driver(jz4740_mmc_driver);
MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 92946b8..273306c 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -1525,7 +1525,6 @@ static struct of_device_id mmc_spi_of_match_table[] __devinitdata = {
static struct spi_driver mmc_spi_driver = {
.driver = {
.name = "mmc_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = mmc_spi_of_match_table,
},
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0726e59..11e589c 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
struct dma_chan *chan;
struct dma_device *device;
struct dma_async_tx_descriptor *desc;
+ enum dma_data_direction buffer_dirn;
int nr_sg;
/* Check if next job is already prepared */
@@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
}
if (data->flags & MMC_DATA_READ) {
- conf.direction = DMA_FROM_DEVICE;
+ conf.direction = DMA_DEV_TO_MEM;
+ buffer_dirn = DMA_FROM_DEVICE;
chan = host->dma_rx_channel;
} else {
- conf.direction = DMA_TO_DEVICE;
+ conf.direction = DMA_MEM_TO_DEV;
+ buffer_dirn = DMA_TO_DEVICE;
chan = host->dma_tx_channel;
}
@@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
device = chan->device;
- nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
if (nr_sg == 0)
return -EINVAL;
@@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
unmap_exit:
if (!next)
dmaengine_terminate_all(chan);
- dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+ dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
@@ -1245,6 +1248,7 @@ static int __devinit mmci_probe(struct amba_device *dev,
if (host->vcc == NULL)
mmc->ocr_avail = plat->ocr_mask;
mmc->caps = plat->capabilities;
+ mmc->caps2 = plat->capabilities2;
/*
* We can do SGIO
@@ -1267,12 +1271,13 @@ static int __devinit mmci_probe(struct amba_device *dev,
/*
* Block size can be up to 2048 bytes, but must be a power of two.
*/
- mmc->max_blk_size = 2048;
+ mmc->max_blk_size = 1 << 11;
/*
- * No limit on the number of blocks transferred.
+ * Limit the number of blocks transferred so that we don't overflow
+ * the maximum request size.
*/
- mmc->max_blk_count = mmc->max_req_size;
+ mmc->max_blk_count = mmc->max_req_size >> 11;
spin_lock_init(&host->lock);
@@ -1502,6 +1507,8 @@ static struct amba_id mmci_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, mmci_ids);
+
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c
index 80d8eb1..1d14cda 100644
--- a/drivers/mmc/host/msm_sdcc.c
+++ b/drivers/mmc/host/msm_sdcc.c
@@ -689,8 +689,8 @@ msmsdcc_pio_irq(int irq, void *dev_id)
/* Map the current scatter buffer */
local_irq_save(flags);
- buffer = kmap_atomic(sg_page(host->pio.sg),
- KM_BIO_SRC_IRQ) + host->pio.sg->offset;
+ buffer = kmap_atomic(sg_page(host->pio.sg))
+ + host->pio.sg->offset;
buffer += host->pio.sg_off;
remain = host->pio.sg->length - host->pio.sg_off;
len = 0;
@@ -700,7 +700,7 @@ msmsdcc_pio_irq(int irq, void *dev_id)
len = msmsdcc_pio_write(host, buffer, remain, status);
/* Unmap the buffer */
- kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(buffer);
local_irq_restore(flags);
host->pio.sg_off += len;
@@ -1480,18 +1480,7 @@ static struct platform_driver msmsdcc_driver = {
},
};
-static int __init msmsdcc_init(void)
-{
- return platform_driver_register(&msmsdcc_driver);
-}
-
-static void __exit msmsdcc_exit(void)
-{
- platform_driver_unregister(&msmsdcc_driver);
-}
-
-module_init(msmsdcc_init);
-module_exit(msmsdcc_exit);
+module_platform_driver(msmsdcc_driver);
MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index 211a495..eeb8cd1 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -679,8 +679,9 @@ static const struct mmc_host_ops mvsd_ops = {
.enable_sdio_irq = mvsd_enable_sdio_irq,
};
-static void __init mv_conf_mbus_windows(struct mvsd_host *host,
- struct mbus_dram_target_info *dram)
+static void __init
+mv_conf_mbus_windows(struct mvsd_host *host,
+ const struct mbus_dram_target_info *dram)
{
void __iomem *iobase = host->base;
int i;
@@ -691,7 +692,7 @@ static void __init mv_conf_mbus_windows(struct mvsd_host *host,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
@@ -705,6 +706,7 @@ static int __init mvsd_probe(struct platform_device *pdev)
struct mmc_host *mmc = NULL;
struct mvsd_host *host = NULL;
const struct mvsdio_platform_data *mvsd_data;
+ const struct mbus_dram_target_info *dram;
struct resource *r;
int ret, irq;
@@ -755,8 +757,9 @@ static int __init mvsd_probe(struct platform_device *pdev)
}
/* (Re-)program MBUS remapping windows if we are asked to. */
- if (mvsd_data->dram != NULL)
- mv_conf_mbus_windows(host, mvsd_data->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv_conf_mbus_windows(host, dram);
mvsd_power_down(host);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index 8e0fbe9..4184b79 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
unsigned int blksz = data->blksz;
unsigned int datasize = nob * blksz;
struct scatterlist *sg;
+ enum dma_transfer_direction slave_dirn;
int i, nents;
if (data->flags & MMC_DATA_STREAM)
@@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
}
}
- if (data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ) {
host->dma_dir = DMA_FROM_DEVICE;
- else
+ slave_dirn = DMA_DEV_TO_MEM;
+ } else {
host->dma_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
+ }
nents = dma_map_sg(host->dma->device->dev, data->sg,
data->sg_len, host->dma_dir);
@@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
return -EINVAL;
host->desc = host->dma->device->device_prep_slave_sg(host->dma,
- data->sg, data->sg_len, host->dma_dir,
+ data->sg, data->sg_len, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!host->desc) {
@@ -1047,18 +1051,7 @@ static struct platform_driver mxcmci_driver = {
}
};
-static int __init mxcmci_init(void)
-{
- return platform_driver_register(&mxcmci_driver);
-}
-
-static void __exit mxcmci_exit(void)
-{
- platform_driver_unregister(&mxcmci_driver);
-}
-
-module_init(mxcmci_init);
-module_exit(mxcmci_exit);
+module_platform_driver(mxcmci_driver);
MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 99b449d..382c835 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -154,6 +154,7 @@ struct mxs_mmc_host {
struct dma_chan *dmach;
struct mxs_dma_data dma_data;
unsigned int dma_dir;
+ enum dma_transfer_direction slave_dirn;
u32 ssp_pio_words[SSP_PIO_NUM];
unsigned int version;
@@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
}
desc = host->dmach->device->device_prep_slave_sg(host->dmach,
- sgl, sg_len, host->dma_dir, append);
+ sgl, sg_len, host->slave_dirn, append);
if (desc) {
desc->callback = mxs_mmc_dma_irq_callback;
desc->callback_param = host;
@@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
int i;
unsigned short dma_data_dir, timeout;
+ enum dma_transfer_direction slave_dirn;
unsigned int data_size = 0, log2_blksz;
unsigned int blocks = data->blocks;
@@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
if (data->flags & MMC_DATA_WRITE) {
dma_data_dir = DMA_TO_DEVICE;
+ slave_dirn = DMA_MEM_TO_DEV;
read = 0;
} else {
dma_data_dir = DMA_FROM_DEVICE;
+ slave_dirn = DMA_DEV_TO_MEM;
read = BM_SSP_CTRL0_READ;
}
@@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
host->ssp_pio_words[1] = cmd0;
host->ssp_pio_words[2] = cmd1;
host->dma_dir = DMA_NONE;
+ host->slave_dirn = DMA_TRANS_NONE;
desc = mxs_mmc_prep_dma(host, 0);
if (!desc)
goto out;
@@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
WARN_ON(host->data != NULL);
host->data = data;
host->dma_dir = dma_data_dir;
+ host->slave_dirn = slave_dirn;
desc = mxs_mmc_prep_dma(host, 1);
if (!desc)
goto out;
@@ -713,7 +721,7 @@ static int mxs_mmc_probe(struct platform_device *pdev)
ret = PTR_ERR(host->clk);
goto out_iounmap;
}
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
mxs_mmc_reset(host);
@@ -772,7 +780,7 @@ out_free_dma:
if (host->dmach)
dma_release_channel(host->dmach);
out_clk_put:
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
out_iounmap:
iounmap(host->base);
@@ -798,7 +806,7 @@ static int mxs_mmc_remove(struct platform_device *pdev)
if (host->dmach)
dma_release_channel(host->dmach);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
clk_put(host->clk);
iounmap(host->base);
@@ -819,7 +827,7 @@ static int mxs_mmc_suspend(struct device *dev)
ret = mmc_suspend_host(mmc);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
return ret;
}
@@ -830,7 +838,7 @@ static int mxs_mmc_resume(struct device *dev)
struct mxs_mmc_host *host = mmc_priv(mmc);
int ret = 0;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
ret = mmc_resume_host(mmc);
@@ -855,18 +863,7 @@ static struct platform_driver mxs_mmc_driver = {
},
};
-static int __init mxs_mmc_init(void)
-{
- return platform_driver_register(&mxs_mmc_driver);
-}
-
-static void __exit mxs_mmc_exit(void)
-{
- platform_driver_unregister(&mxs_mmc_driver);
-}
-
-module_init(mxs_mmc_init);
-module_exit(mxs_mmc_exit);
+module_platform_driver(mxs_mmc_driver);
MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral");
MODULE_AUTHOR("Freescale Semiconductor");
diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c
index ab66f24..1534b58 100644
--- a/drivers/mmc/host/of_mmc_spi.c
+++ b/drivers/mmc/host/of_mmc_spi.c
@@ -113,8 +113,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)
const int j = i * 2;
u32 mask;
- mask = mmc_vddrange_to_ocrmask(voltage_ranges[j],
- voltage_ranges[j + 1]);
+ mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]),
+ be32_to_cpu(voltage_ranges[j + 1]));
if (!mask) {
ret = -EINVAL;
dev_err(dev, "OF: voltage-range #%d is invalid\n", i);
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 2dba999..887c0e5 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -2,7 +2,7 @@
* linux/drivers/mmc/host/omap.c
*
* Copyright (C) 2004 Nokia Corporation
- * Written by Tuukka Tikkanen and Juha Yrjl<juha.yrjola@nokia.com>
+ * Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>
* Misc hacks here and there by Tony Lindgren <tony@atomide.com>
* Other hacks (DMA, SD, etc) by David Brownell
*
@@ -1634,4 +1634,4 @@ module_exit(mmc_omap_exit);
MODULE_DESCRIPTION("OMAP Multimedia Card driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:" DRIVER_NAME);
-MODULE_AUTHOR("Juha Yrjl");
+MODULE_AUTHOR("Juha Yrjölä");
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index d5fe43d..fd0c661 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -24,7 +24,6 @@
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
-#include <linux/workqueue.h>
#include <linux/timer.h>
#include <linux/clk.h>
#include <linux/mmc/host.h>
@@ -120,7 +119,6 @@
#define MMC_AUTOSUSPEND_DELAY 100
#define MMC_TIMEOUT_MS 20
-#define OMAP_MMC_MASTER_CLOCK 96000000
#define OMAP_MMC_MIN_CLOCK 400000
#define OMAP_MMC_MAX_CLOCK 52000000
#define DRIVER_NAME "omap_hsmmc"
@@ -163,7 +161,6 @@ struct omap_hsmmc_host {
*/
struct regulator *vcc;
struct regulator *vcc_aux;
- struct work_struct mmc_carddetect_work;
void __iomem *base;
resource_size_t mapbase;
spinlock_t irq_lock; /* Prevent races with irq handler */
@@ -598,12 +595,12 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)
}
/* Calculate divisor for the given clock frequency */
-static u16 calc_divisor(struct mmc_ios *ios)
+static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios)
{
u16 dsor = 0;
if (ios->clock) {
- dsor = DIV_ROUND_UP(OMAP_MMC_MASTER_CLOCK, ios->clock);
+ dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock);
if (dsor > 250)
dsor = 250;
}
@@ -623,7 +620,7 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host)
regval = OMAP_HSMMC_READ(host->base, SYSCTL);
regval = regval & ~(CLKD_MASK | DTO_MASK);
- regval = regval | (calc_divisor(ios) << 6) | (DTO << 16);
+ regval = regval | (calc_divisor(host, ios) << 6) | (DTO << 16);
OMAP_HSMMC_WRITE(host->base, SYSCTL, regval);
OMAP_HSMMC_WRITE(host->base, SYSCTL,
OMAP_HSMMC_READ(host->base, SYSCTL) | ICE);
@@ -1280,17 +1277,16 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)
}
/*
- * Work Item to notify the core about card insertion/removal
+ * irq handler to notify the core about card insertion/removal
*/
-static void omap_hsmmc_detect(struct work_struct *work)
+static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)
{
- struct omap_hsmmc_host *host =
- container_of(work, struct omap_hsmmc_host, mmc_carddetect_work);
+ struct omap_hsmmc_host *host = dev_id;
struct omap_mmc_slot_data *slot = &mmc_slot(host);
int carddetect;
if (host->suspended)
- return;
+ return IRQ_HANDLED;
sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
@@ -1305,19 +1301,6 @@ static void omap_hsmmc_detect(struct work_struct *work)
mmc_detect_change(host->mmc, (HZ * 200) / 1000);
else
mmc_detect_change(host->mmc, (HZ * 50) / 1000);
-}
-
-/*
- * ISR for handling card insertion and removal
- */
-static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id)
-{
- struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id;
-
- if (host->suspended)
- return IRQ_HANDLED;
- schedule_work(&host->mmc_carddetect_work);
-
return IRQ_HANDLED;
}
@@ -1919,7 +1902,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
host->next_data.cookie = 1;
platform_set_drvdata(pdev, host);
- INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
mmc->ops = &omap_hsmmc_ops;
@@ -1991,6 +1973,8 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if (mmc_slot(host).nonremovable)
mmc->caps |= MMC_CAP_NONREMOVABLE;
+ mmc->pm_caps = mmc_slot(host).pm_caps;
+
omap_hsmmc_conf_bus_power(host);
/* Select DMA lines */
@@ -2047,10 +2031,11 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
/* Request IRQ for card detect */
if ((mmc_slot(host).card_detect_irq)) {
- ret = request_irq(mmc_slot(host).card_detect_irq,
- omap_hsmmc_cd_handler,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- mmc_hostname(mmc), host);
+ ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
+ NULL,
+ omap_hsmmc_detect,
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ mmc_hostname(mmc), host);
if (ret) {
dev_dbg(mmc_dev(host->mmc),
"Unable to grab MMC CD IRQ\n");
@@ -2129,7 +2114,6 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
free_irq(host->irq, host);
if (mmc_slot(host).card_detect_irq)
free_irq(mmc_slot(host).card_detect_irq, host);
- flush_work_sync(&host->mmc_carddetect_work);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
@@ -2176,16 +2160,9 @@ static int omap_hsmmc_suspend(struct device *dev)
return ret;
}
}
- cancel_work_sync(&host->mmc_carddetect_work);
ret = mmc_suspend_host(host->mmc);
- if (ret == 0) {
- omap_hsmmc_disable_irq(host);
- OMAP_HSMMC_WRITE(host->base, HCTL,
- OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
- if (host->got_dbclk)
- clk_disable(host->dbclk);
- } else {
+ if (ret) {
host->suspended = 0;
if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev,
@@ -2194,9 +2171,20 @@ static int omap_hsmmc_suspend(struct device *dev)
dev_dbg(mmc_dev(host->mmc),
"Unmask interrupt failed\n");
}
+ goto err;
}
- pm_runtime_put_sync(host->dev);
+
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) {
+ omap_hsmmc_disable_irq(host);
+ OMAP_HSMMC_WRITE(host->base, HCTL,
+ OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
+ }
+ if (host->got_dbclk)
+ clk_disable(host->dbclk);
+
}
+err:
+ pm_runtime_put_sync(host->dev);
return ret;
}
@@ -2216,7 +2204,8 @@ static int omap_hsmmc_resume(struct device *dev)
if (host->got_dbclk)
clk_enable(host->dbclk);
- omap_hsmmc_conf_bus_power(host);
+ if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
+ omap_hsmmc_conf_bus_power(host);
if (host->pdata->resume) {
ret = host->pdata->resume(&pdev->dev, host->slot_id);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index fc4356e..cb2dc0e 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -872,18 +872,7 @@ static struct platform_driver pxamci_driver = {
},
};
-static int __init pxamci_init(void)
-{
- return platform_driver_register(&pxamci_driver);
-}
-
-static void __exit pxamci_exit(void)
-{
- platform_driver_unregister(&pxamci_driver);
-}
-
-module_init(pxamci_init);
-module_exit(pxamci_exit);
+module_platform_driver(pxamci_driver);
MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 720f993..1bcfd6d 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1914,18 +1914,7 @@ static struct platform_driver s3cmci_driver = {
.shutdown = s3cmci_shutdown,
};
-static int __init s3cmci_init(void)
-{
- return platform_driver_register(&s3cmci_driver);
-}
-
-static void __exit s3cmci_exit(void)
-{
- platform_driver_unregister(&s3cmci_driver);
-}
-
-module_init(s3cmci_init);
-module_exit(s3cmci_exit);
+module_platform_driver(s3cmci_driver);
MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c
index b4257e7..28a8708 100644
--- a/drivers/mmc/host/sdhci-cns3xxx.c
+++ b/drivers/mmc/host/sdhci-cns3xxx.c
@@ -115,17 +115,7 @@ static struct platform_driver sdhci_cns3xxx_driver = {
.remove = __devexit_p(sdhci_cns3xxx_remove),
};
-static int __init sdhci_cns3xxx_init(void)
-{
- return platform_driver_register(&sdhci_cns3xxx_driver);
-}
-module_init(sdhci_cns3xxx_init);
-
-static void __exit sdhci_cns3xxx_exit(void)
-{
- platform_driver_unregister(&sdhci_cns3xxx_driver);
-}
-module_exit(sdhci_cns3xxx_exit);
+module_platform_driver(sdhci_cns3xxx_driver);
MODULE_DESCRIPTION("SDHCI driver for CNS3xxx");
MODULE_AUTHOR("Scott Shu, "
diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c
index a81312c..46fd1fd 100644
--- a/drivers/mmc/host/sdhci-dove.c
+++ b/drivers/mmc/host/sdhci-dove.c
@@ -88,17 +88,7 @@ static struct platform_driver sdhci_dove_driver = {
.remove = __devexit_p(sdhci_dove_remove),
};
-static int __init sdhci_dove_init(void)
-{
- return platform_driver_register(&sdhci_dove_driver);
-}
-module_init(sdhci_dove_init);
-
-static void __exit sdhci_dove_exit(void)
-{
- platform_driver_unregister(&sdhci_dove_driver);
-}
-module_exit(sdhci_dove_exit);
+module_platform_driver(sdhci_dove_driver);
MODULE_DESCRIPTION("SDHCI driver for Dove");
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 38ebc4e..0be4e20 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -269,8 +269,9 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
imx_data->scratchpad = val;
return;
case SDHCI_COMMAND:
- if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
- && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+ if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
+ host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+ (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
val |= SDHCI_CMD_ABORTCMD;
if (is_imx6q_usdhc(imx_data)) {
@@ -606,17 +607,7 @@ static struct platform_driver sdhci_esdhc_imx_driver = {
.remove = __devexit_p(sdhci_esdhc_imx_remove),
};
-static int __init sdhci_esdhc_imx_init(void)
-{
- return platform_driver_register(&sdhci_esdhc_imx_driver);
-}
-module_init(sdhci_esdhc_imx_init);
-
-static void __exit sdhci_esdhc_imx_exit(void)
-{
- platform_driver_unregister(&sdhci_esdhc_imx_driver);
-}
-module_exit(sdhci_esdhc_imx_exit);
+module_platform_driver(sdhci_esdhc_imx_driver);
MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC");
MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>");
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index c3b08f1..b97b2f5 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -73,7 +73,7 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock)
| (div << ESDHC_DIVIDER_SHIFT)
| (pre_div << ESDHC_PREDIV_SHIFT));
sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL);
- mdelay(100);
+ mdelay(1);
out:
host->clock = clock;
}
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
index 01e5f62..5d876ff 100644
--- a/drivers/mmc/host/sdhci-of-esdhc.c
+++ b/drivers/mmc/host/sdhci-of-esdhc.c
@@ -38,6 +38,23 @@ static u8 esdhc_readb(struct sdhci_host *host, int reg)
int base = reg & ~0x3;
int shift = (reg & 0x3) * 8;
u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff;
+
+ /*
+ * "DMA select" locates at offset 0x28 in SD specification, but on
+ * P5020 or P3041, it locates at 0x29.
+ */
+ if (reg == SDHCI_HOST_CONTROL) {
+ u32 dma_bits;
+
+ dma_bits = in_be32(host->ioaddr + reg);
+ /* DMA select is 22,23 bits in Protocol Control Register */
+ dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK;
+
+ /* fixup the result */
+ ret &= ~SDHCI_CTRL_DMA_MASK;
+ ret |= dma_bits;
+ }
+
return ret;
}
@@ -56,6 +73,21 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)
static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)
{
+ /*
+ * "DMA select" location is offset 0x28 in SD specification, but on
+ * P5020 or P3041, it's located at 0x29.
+ */
+ if (reg == SDHCI_HOST_CONTROL) {
+ u32 dma_bits;
+
+ /* DMA select is 22,23 bits in Protocol Control Register */
+ dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5;
+ clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5,
+ dma_bits);
+ val &= ~SDHCI_CTRL_DMA_MASK;
+ val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK;
+ }
+
/* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */
if (reg == SDHCI_HOST_CONTROL)
val &= ~ESDHC_HOST_CONTROL_RES;
@@ -131,17 +163,7 @@ static struct platform_driver sdhci_esdhc_driver = {
.remove = __devexit_p(sdhci_esdhc_remove),
};
-static int __init sdhci_esdhc_init(void)
-{
- return platform_driver_register(&sdhci_esdhc_driver);
-}
-module_init(sdhci_esdhc_init);
-
-static void __exit sdhci_esdhc_exit(void)
-{
- platform_driver_unregister(&sdhci_esdhc_driver);
-}
-module_exit(sdhci_esdhc_exit);
+module_platform_driver(sdhci_esdhc_driver);
MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC");
MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, "
diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c
index 3619adc..0ce088a 100644
--- a/drivers/mmc/host/sdhci-of-hlwd.c
+++ b/drivers/mmc/host/sdhci-of-hlwd.c
@@ -93,17 +93,7 @@ static struct platform_driver sdhci_hlwd_driver = {
.remove = __devexit_p(sdhci_hlwd_remove),
};
-static int __init sdhci_hlwd_init(void)
-{
- return platform_driver_register(&sdhci_hlwd_driver);
-}
-module_init(sdhci_hlwd_init);
-
-static void __exit sdhci_hlwd_exit(void)
-{
- platform_driver_unregister(&sdhci_hlwd_driver);
-}
-module_exit(sdhci_hlwd_exit);
+module_platform_driver(sdhci_hlwd_driver);
MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver");
MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz");
diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c
new file mode 100644
index 0000000..a611217
--- /dev/null
+++ b/drivers/mmc/host/sdhci-pci-data.c
@@ -0,0 +1,5 @@
+#include <linux/module.h>
+#include <linux/mmc/sdhci-pci-data.h>
+
+struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno);
+EXPORT_SYMBOL_GPL(sdhci_pci_get_data);
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 6878a946..6ebdc40 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -23,8 +23,8 @@
#include <linux/scatterlist.h>
#include <linux/io.h>
#include <linux/gpio.h>
-#include <linux/sfi.h>
#include <linux/pm_runtime.h>
+#include <linux/mmc/sdhci-pci-data.h>
#include "sdhci.h"
@@ -61,6 +61,7 @@ struct sdhci_pci_fixes {
struct sdhci_pci_slot {
struct sdhci_pci_chip *chip;
struct sdhci_host *host;
+ struct sdhci_pci_data *data;
int pci_bar;
int rst_n_gpio;
@@ -171,32 +172,9 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip)
return 0;
}
-/* Medfield eMMC hardware reset GPIOs */
-static int mfd_emmc0_rst_gpio = -EINVAL;
-static int mfd_emmc1_rst_gpio = -EINVAL;
-
-static int mfd_emmc_gpio_parse(struct sfi_table_header *table)
-{
- struct sfi_table_simple *sb = (struct sfi_table_simple *)table;
- struct sfi_gpio_table_entry *entry;
- int i, num;
-
- num = SFI_GET_NUM_ENTRIES(sb, struct sfi_gpio_table_entry);
- entry = (struct sfi_gpio_table_entry *)sb->pentry;
-
- for (i = 0; i < num; i++, entry++) {
- if (!strncmp(entry->pin_name, "emmc0_rst", SFI_NAME_LEN))
- mfd_emmc0_rst_gpio = entry->pin_no;
- else if (!strncmp(entry->pin_name, "emmc1_rst", SFI_NAME_LEN))
- mfd_emmc1_rst_gpio = entry->pin_no;
- }
-
- return 0;
-}
-
#ifdef CONFIG_PM_RUNTIME
-static irqreturn_t mfd_sd_cd(int irq, void *dev_id)
+static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id)
{
struct sdhci_pci_slot *slot = dev_id;
struct sdhci_host *host = slot->host;
@@ -205,15 +183,16 @@ static irqreturn_t mfd_sd_cd(int irq, void *dev_id)
return IRQ_HANDLED;
}
-#define MFLD_SD_CD_PIN 69
-
-static int mfd_sd_probe_slot(struct sdhci_pci_slot *slot)
+static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
{
- int err, irq, gpio = MFLD_SD_CD_PIN;
+ int err, irq, gpio = slot->cd_gpio;
slot->cd_gpio = -EINVAL;
slot->cd_irq = -EINVAL;
+ if (!gpio_is_valid(gpio))
+ return;
+
err = gpio_request(gpio, "sd_cd");
if (err < 0)
goto out;
@@ -226,72 +205,53 @@ static int mfd_sd_probe_slot(struct sdhci_pci_slot *slot)
if (irq < 0)
goto out_free;
- err = request_irq(irq, mfd_sd_cd, IRQF_TRIGGER_RISING |
+ err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING, "sd_cd", slot);
if (err)
goto out_free;
slot->cd_gpio = gpio;
slot->cd_irq = irq;
- slot->host->quirks2 |= SDHCI_QUIRK2_OWN_CARD_DETECTION;
- return 0;
+ return;
out_free:
gpio_free(gpio);
out:
dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n");
- return 0;
}
-static void mfd_sd_remove_slot(struct sdhci_pci_slot *slot, int dead)
+static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
{
if (slot->cd_irq >= 0)
free_irq(slot->cd_irq, slot);
- gpio_free(slot->cd_gpio);
+ if (gpio_is_valid(slot->cd_gpio))
+ gpio_free(slot->cd_gpio);
}
#else
-#define mfd_sd_probe_slot NULL
-#define mfd_sd_remove_slot NULL
+static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot)
+{
+}
+
+static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot)
+{
+}
#endif
static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
- const char *name = NULL;
- int gpio = -EINVAL;
-
- sfi_table_parse(SFI_SIG_GPIO, NULL, NULL, mfd_emmc_gpio_parse);
-
- switch (slot->chip->pdev->device) {
- case PCI_DEVICE_ID_INTEL_MFD_EMMC0:
- gpio = mfd_emmc0_rst_gpio;
- name = "eMMC0_reset";
- break;
- case PCI_DEVICE_ID_INTEL_MFD_EMMC1:
- gpio = mfd_emmc1_rst_gpio;
- name = "eMMC1_reset";
- break;
- }
-
- if (!gpio_request(gpio, name)) {
- gpio_direction_output(gpio, 1);
- slot->rst_n_gpio = gpio;
- slot->host->mmc->caps |= MMC_CAP_HW_RESET;
- }
-
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE;
-
slot->host->mmc->caps2 = MMC_CAP2_BOOTPART_NOACC;
-
return 0;
}
-static void mfd_emmc_remove_slot(struct sdhci_pci_slot *slot, int dead)
+static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot)
{
- gpio_free(slot->rst_n_gpio);
+ slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE;
+ return 0;
}
static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
@@ -307,20 +267,18 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
- .probe_slot = mfd_sd_probe_slot,
- .remove_slot = mfd_sd_remove_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
+ .probe_slot = mfd_sdio_probe_slot,
};
static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = {
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC,
.allow_runtime_pm = true,
.probe_slot = mfd_emmc_probe_slot,
- .remove_slot = mfd_emmc_remove_slot,
};
/* O2Micro extra registers */
@@ -1012,11 +970,8 @@ static int sdhci_pci_suspend(struct device *dev)
ret = sdhci_suspend_host(slot->host);
- if (ret) {
- for (i--; i >= 0; i--)
- sdhci_resume_host(chip->slots[i]->host);
- return ret;
- }
+ if (ret)
+ goto err_pci_suspend;
slot_pm_flags = slot->host->mmc->pm_flags;
if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
@@ -1027,11 +982,8 @@ static int sdhci_pci_suspend(struct device *dev)
if (chip->fixes && chip->fixes->suspend) {
ret = chip->fixes->suspend(chip);
- if (ret) {
- for (i = chip->num_slots - 1; i >= 0; i--)
- sdhci_resume_host(chip->slots[i]->host);
- return ret;
- }
+ if (ret)
+ goto err_pci_suspend;
}
pci_save_state(pdev);
@@ -1048,6 +1000,11 @@ static int sdhci_pci_suspend(struct device *dev)
}
return 0;
+
+err_pci_suspend:
+ while (--i >= 0)
+ sdhci_resume_host(chip->slots[i]->host);
+ return ret;
}
static int sdhci_pci_resume(struct device *dev)
@@ -1113,23 +1070,22 @@ static int sdhci_pci_runtime_suspend(struct device *dev)
ret = sdhci_runtime_suspend_host(slot->host);
- if (ret) {
- for (i--; i >= 0; i--)
- sdhci_runtime_resume_host(chip->slots[i]->host);
- return ret;
- }
+ if (ret)
+ goto err_pci_runtime_suspend;
}
if (chip->fixes && chip->fixes->suspend) {
ret = chip->fixes->suspend(chip);
- if (ret) {
- for (i = chip->num_slots - 1; i >= 0; i--)
- sdhci_runtime_resume_host(chip->slots[i]->host);
- return ret;
- }
+ if (ret)
+ goto err_pci_runtime_suspend;
}
return 0;
+
+err_pci_runtime_suspend:
+ while (--i >= 0)
+ sdhci_runtime_resume_host(chip->slots[i]->host);
+ return ret;
}
static int sdhci_pci_runtime_resume(struct device *dev)
@@ -1190,11 +1146,12 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = {
\*****************************************************************************/
static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
- struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar)
+ struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar,
+ int slotno)
{
struct sdhci_pci_slot *slot;
struct sdhci_host *host;
- int ret;
+ int ret, bar = first_bar + slotno;
if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);
@@ -1228,6 +1185,23 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
slot->host = host;
slot->pci_bar = bar;
slot->rst_n_gpio = -EINVAL;
+ slot->cd_gpio = -EINVAL;
+
+ /* Retrieve platform data if there is any */
+ if (*sdhci_pci_get_data)
+ slot->data = sdhci_pci_get_data(pdev, slotno);
+
+ if (slot->data) {
+ if (slot->data->setup) {
+ ret = slot->data->setup(slot->data);
+ if (ret) {
+ dev_err(&pdev->dev, "platform setup failed\n");
+ goto free;
+ }
+ }
+ slot->rst_n_gpio = slot->data->rst_n_gpio;
+ slot->cd_gpio = slot->data->cd_gpio;
+ }
host->hw_name = "PCI";
host->ops = &sdhci_pci_ops;
@@ -1238,7 +1212,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));
if (ret) {
dev_err(&pdev->dev, "cannot request region\n");
- goto free;
+ goto cleanup;
}
host->ioaddr = pci_ioremap_bar(pdev, bar);
@@ -1254,15 +1228,30 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
goto unmap;
}
+ if (gpio_is_valid(slot->rst_n_gpio)) {
+ if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) {
+ gpio_direction_output(slot->rst_n_gpio, 1);
+ slot->host->mmc->caps |= MMC_CAP_HW_RESET;
+ } else {
+ dev_warn(&pdev->dev, "failed to request rst_n_gpio\n");
+ slot->rst_n_gpio = -EINVAL;
+ }
+ }
+
host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
ret = sdhci_add_host(host);
if (ret)
goto remove;
+ sdhci_pci_add_own_cd(slot);
+
return slot;
remove:
+ if (gpio_is_valid(slot->rst_n_gpio))
+ gpio_free(slot->rst_n_gpio);
+
if (chip->fixes && chip->fixes->remove_slot)
chip->fixes->remove_slot(slot, 0);
@@ -1272,6 +1261,10 @@ unmap:
release:
pci_release_region(pdev, bar);
+cleanup:
+ if (slot->data && slot->data->cleanup)
+ slot->data->cleanup(slot->data);
+
free:
sdhci_free_host(host);
@@ -1283,6 +1276,8 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
int dead;
u32 scratch;
+ sdhci_pci_remove_own_cd(slot);
+
dead = 0;
scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);
if (scratch == (u32)-1)
@@ -1290,9 +1285,15 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)
sdhci_remove_host(slot->host, dead);
+ if (gpio_is_valid(slot->rst_n_gpio))
+ gpio_free(slot->rst_n_gpio);
+
if (slot->chip->fixes && slot->chip->fixes->remove_slot)
slot->chip->fixes->remove_slot(slot, dead);
+ if (slot->data && slot->data->cleanup)
+ slot->data->cleanup(slot->data);
+
pci_release_region(slot->chip->pdev, slot->pci_bar);
sdhci_free_host(slot->host);
@@ -1379,7 +1380,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,
slots = chip->num_slots; /* Quirk may have changed this */
for (i = 0; i < slots; i++) {
- slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i);
+ slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);
if (IS_ERR(slot)) {
for (i--; i >= 0; i--)
sdhci_pci_remove_slot(chip->slots[i]);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index 03970bc..c5c2a48 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -2,7 +2,7 @@
* sdhci-pltfm.c Support for SDHCI platform devices
* Copyright (c) 2009 Intel Corporation
*
- * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc.
* Copyright (c) 2009 MontaVista Software, Inc.
*
* Authors: Xiaobo Xie <X.Xie@freescale.com>
@@ -71,6 +71,14 @@ void sdhci_get_of_property(struct platform_device *pdev)
if (sdhci_of_wp_inverted(np))
host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT;
+ if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_DMA;
+
+ if (of_device_is_compatible(np, "fsl,p2020-esdhc") ||
+ of_device_is_compatible(np, "fsl,p1010-esdhc") ||
+ of_device_is_compatible(np, "fsl,mpc8536-esdhc"))
+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
+
clk = of_get_property(np, "clock-frequency", &size);
if (clk && size == sizeof(*clk) && *clk)
pltfm_host->clock = be32_to_cpup(clk);
diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c
index 7a039c3..dbb75bf 100644
--- a/drivers/mmc/host/sdhci-pxav2.c
+++ b/drivers/mmc/host/sdhci-pxav2.c
@@ -223,18 +223,8 @@ static struct platform_driver sdhci_pxav2_driver = {
.probe = sdhci_pxav2_probe,
.remove = __devexit_p(sdhci_pxav2_remove),
};
-static int __init sdhci_pxav2_init(void)
-{
- return platform_driver_register(&sdhci_pxav2_driver);
-}
-
-static void __exit sdhci_pxav2_exit(void)
-{
- platform_driver_unregister(&sdhci_pxav2_driver);
-}
-module_init(sdhci_pxav2_init);
-module_exit(sdhci_pxav2_exit);
+module_platform_driver(sdhci_pxav2_driver);
MODULE_DESCRIPTION("SDHCI driver for pxav2");
MODULE_AUTHOR("Marvell International Ltd.");
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 15673a7..f296956 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -269,18 +269,8 @@ static struct platform_driver sdhci_pxav3_driver = {
.probe = sdhci_pxav3_probe,
.remove = __devexit_p(sdhci_pxav3_remove),
};
-static int __init sdhci_pxav3_init(void)
-{
- return platform_driver_register(&sdhci_pxav3_driver);
-}
-
-static void __exit sdhci_pxav3_exit(void)
-{
- platform_driver_unregister(&sdhci_pxav3_driver);
-}
-module_init(sdhci_pxav3_init);
-module_exit(sdhci_pxav3_exit);
+module_platform_driver(sdhci_pxav3_driver);
MODULE_DESCRIPTION("SDHCI driver for pxav3");
MODULE_AUTHOR("Marvell International Ltd.");
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 0d33ff0..1af756e 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -80,7 +80,7 @@ static void sdhci_s3c_check_sclk(struct sdhci_host *host)
tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK;
tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT;
- writel(tmp, host->ioaddr + 0x80);
+ writel(tmp, host->ioaddr + S3C_SDHCI_CONTROL2);
}
}
@@ -435,14 +435,11 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
struct clk *clk;
- char *name = pdata->clocks[ptr];
-
- if (name == NULL)
- continue;
+ char name[14];
+ snprintf(name, 14, "mmc_busclk.%d", ptr);
clk = clk_get(dev, name);
if (IS_ERR(clk)) {
- dev_err(dev, "failed to get clock %s\n", name);
continue;
}
@@ -524,6 +521,9 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
if (pdata->host_caps)
host->mmc->caps |= pdata->host_caps;
+ if (pdata->pm_caps)
+ host->mmc->pm_caps |= pdata->pm_caps;
+
host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
SDHCI_QUIRK_32BIT_DMA_SIZE);
@@ -657,18 +657,7 @@ static struct platform_driver sdhci_s3c_driver = {
},
};
-static int __init sdhci_s3c_init(void)
-{
- return platform_driver_register(&sdhci_s3c_driver);
-}
-
-static void __exit sdhci_s3c_exit(void)
-{
- platform_driver_unregister(&sdhci_s3c_driver);
-}
-
-module_init(sdhci_s3c_init);
-module_exit(sdhci_s3c_exit);
+module_platform_driver(sdhci_s3c_driver);
MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 63cc8b6..b7f8b33 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
+#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdhci-spear.h>
@@ -271,26 +272,54 @@ static int __devexit sdhci_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int sdhci_suspend(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct spear_sdhci *sdhci = dev_get_platdata(dev);
+ int ret;
+
+ ret = sdhci_suspend_host(host);
+ if (!ret)
+ clk_disable(sdhci->clk);
+
+ return ret;
+}
+
+static int sdhci_resume(struct device *dev)
+{
+ struct sdhci_host *host = dev_get_drvdata(dev);
+ struct spear_sdhci *sdhci = dev_get_platdata(dev);
+ int ret;
+
+ ret = clk_enable(sdhci->clk);
+ if (ret) {
+ dev_dbg(dev, "Resume: Error enabling clock\n");
+ return ret;
+ }
+
+ return sdhci_resume_host(host);
+}
+
+const struct dev_pm_ops sdhci_pm_ops = {
+ .suspend = sdhci_suspend,
+ .resume = sdhci_resume,
+};
+#endif
+
static struct platform_driver sdhci_driver = {
.driver = {
.name = "sdhci",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &sdhci_pm_ops,
+#endif
},
.probe = sdhci_probe,
.remove = __devexit_p(sdhci_remove),
};
-static int __init sdhci_init(void)
-{
- return platform_driver_register(&sdhci_driver);
-}
-module_init(sdhci_init);
-
-static void __exit sdhci_exit(void)
-{
- platform_driver_unregister(&sdhci_driver);
-}
-module_exit(sdhci_exit);
+module_platform_driver(sdhci_driver);
MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index e2e18d3..78a36eb 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -324,17 +324,7 @@ static struct platform_driver sdhci_tegra_driver = {
.remove = __devexit_p(sdhci_tegra_remove),
};
-static int __init sdhci_tegra_init(void)
-{
- return platform_driver_register(&sdhci_tegra_driver);
-}
-module_init(sdhci_tegra_init);
-
-static void __exit sdhci_tegra_exit(void)
-{
- platform_driver_unregister(&sdhci_tegra_driver);
-}
-module_exit(sdhci_tegra_exit);
+module_platform_driver(sdhci_tegra_driver);
MODULE_DESCRIPTION("SDHCI driver for Tegra");
MODULE_AUTHOR(" Google, Inc.");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 19ed580..8d66706 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -49,7 +49,7 @@ static void sdhci_finish_data(struct sdhci_host *);
static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);
static void sdhci_finish_command(struct sdhci_host *);
-static int sdhci_execute_tuning(struct mmc_host *mmc);
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode);
static void sdhci_tuning_timer(unsigned long data);
#ifdef CONFIG_PM_RUNTIME
@@ -146,10 +146,8 @@ static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
{
u32 present, irqs;
- if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
- return;
-
- if (host->quirks2 & SDHCI_QUIRK2_OWN_CARD_DETECTION)
+ if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ !mmc_card_is_removable(host->mmc))
return;
present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
@@ -214,6 +212,11 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET)
sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
+
+ if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
+ if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL))
+ host->ops->enable_dma(host);
+ }
}
static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
@@ -423,12 +426,12 @@ static void sdhci_transfer_pio(struct sdhci_host *host)
static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
{
local_irq_save(*flags);
- return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ return kmap_atomic(sg_page(sg)) + sg->offset;
}
static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
{
- kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ kunmap_atomic(buffer);
local_irq_restore(*flags);
}
@@ -1016,7 +1019,8 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
flags |= SDHCI_CMD_INDEX;
/* CMD19 is special in that the Data Present Select should be set */
- if (cmd->data || (cmd->opcode == MMC_SEND_TUNING_BLOCK))
+ if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
+ cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
flags |= SDHCI_CMD_DATA;
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
@@ -1066,12 +1070,15 @@ static void sdhci_finish_command(struct sdhci_host *host)
static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
{
int div = 0; /* Initialized for compiler warning */
+ int real_div = div, clk_mul = 1;
u16 clk = 0;
unsigned long timeout;
- if (clock == host->clock)
+ if (clock && clock == host->clock)
return;
+ host->mmc->actual_clock = 0;
+
if (host->ops->set_clock) {
host->ops->set_clock(host, clock);
if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK)
@@ -1109,6 +1116,8 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
* Control register.
*/
clk = SDHCI_PROG_CLOCK_MODE;
+ real_div = div;
+ clk_mul = host->clk_mul;
div--;
}
} else {
@@ -1122,6 +1131,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
break;
}
}
+ real_div = div;
div >>= 1;
}
} else {
@@ -1130,9 +1140,13 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
if ((host->max_clk / div) <= clock)
break;
}
+ real_div = div;
div >>= 1;
}
+ if (real_div)
+ host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div;
+
clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1160,7 +1174,7 @@ out:
host->clock = clock;
}
-static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
+static int sdhci_set_power(struct sdhci_host *host, unsigned short power)
{
u8 pwr = 0;
@@ -1183,13 +1197,13 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
}
if (host->pwr == pwr)
- return;
+ return -1;
host->pwr = pwr;
if (pwr == 0) {
sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
- return;
+ return 0;
}
/*
@@ -1216,6 +1230,8 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)
*/
if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
mdelay(10);
+
+ return power;
}
/*****************************************************************************\
@@ -1277,7 +1293,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if ((host->flags & SDHCI_NEEDS_RETUNING) &&
!(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
spin_unlock_irqrestore(&host->lock, flags);
- sdhci_execute_tuning(mmc);
+ sdhci_execute_tuning(mmc, mrq->cmd->opcode);
spin_lock_irqsave(&host->lock, flags);
/* Restore original mmc_request structure */
@@ -1297,12 +1313,17 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
{
unsigned long flags;
+ int vdd_bit = -1;
u8 ctrl;
spin_lock_irqsave(&host->lock, flags);
- if (host->flags & SDHCI_DEVICE_DEAD)
- goto out;
+ if (host->flags & SDHCI_DEVICE_DEAD) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ if (host->vmmc && ios->power_mode == MMC_POWER_OFF)
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, 0);
+ return;
+ }
/*
* Reset the chip on each power off.
@@ -1316,9 +1337,15 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
sdhci_set_clock(host, ios->clock);
if (ios->power_mode == MMC_POWER_OFF)
- sdhci_set_power(host, -1);
+ vdd_bit = sdhci_set_power(host, -1);
else
- sdhci_set_power(host, ios->vdd);
+ vdd_bit = sdhci_set_power(host, ios->vdd);
+
+ if (host->vmmc && vdd_bit != -1) {
+ spin_unlock_irqrestore(&host->lock, flags);
+ mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit);
+ spin_lock_irqsave(&host->lock, flags);
+ }
if (host->ops->platform_send_init_74_clocks)
host->ops->platform_send_init_74_clocks(host, ios->power_mode);
@@ -1361,11 +1388,11 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
unsigned int clock;
/* In case of UHS-I modes, set High Speed Enable */
- if ((ios->timing == MMC_TIMING_UHS_SDR50) ||
+ if ((ios->timing == MMC_TIMING_MMC_HS200) ||
+ (ios->timing == MMC_TIMING_UHS_SDR50) ||
(ios->timing == MMC_TIMING_UHS_SDR104) ||
(ios->timing == MMC_TIMING_UHS_DDR50) ||
- (ios->timing == MMC_TIMING_UHS_SDR25) ||
- (ios->timing == MMC_TIMING_UHS_SDR12))
+ (ios->timing == MMC_TIMING_UHS_SDR25))
ctrl |= SDHCI_CTRL_HISPD;
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
@@ -1415,7 +1442,9 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/* Select Bus Speed Mode for host */
ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
- if (ios->timing == MMC_TIMING_UHS_SDR12)
+ if (ios->timing == MMC_TIMING_MMC_HS200)
+ ctrl_2 |= SDHCI_CTRL_HS_SDR200;
+ else if (ios->timing == MMC_TIMING_UHS_SDR12)
ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
else if (ios->timing == MMC_TIMING_UHS_SDR25)
ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
@@ -1443,7 +1472,6 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios)
if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
-out:
mmiowb();
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -1663,7 +1691,7 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
return err;
}
-static int sdhci_execute_tuning(struct mmc_host *mmc)
+static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host;
u16 ctrl;
@@ -1671,6 +1699,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
int tuning_loop_counter = MAX_TUNING_LOOP;
unsigned long timeout;
int err = 0;
+ bool requires_tuning_nonuhs = false;
host = mmc_priv(mmc);
@@ -1681,13 +1710,19 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
/*
- * Host Controller needs tuning only in case of SDR104 mode
- * and for SDR50 mode when Use Tuning for SDR50 is set in
+ * The Host Controller needs tuning only in case of SDR104 mode
+ * and for SDR50 mode when Use Tuning for SDR50 is set in the
* Capabilities register.
+ * If the Host Controller supports the HS200 mode then the
+ * tuning function has to be executed.
*/
+ if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
+ (host->flags & SDHCI_SDR50_NEEDS_TUNING ||
+ host->flags & SDHCI_HS200_NEEDS_TUNING))
+ requires_tuning_nonuhs = true;
+
if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) ||
- (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) &&
- (host->flags & SDHCI_SDR50_NEEDS_TUNING)))
+ requires_tuning_nonuhs)
ctrl |= SDHCI_CTRL_EXEC_TUNING;
else {
spin_unlock(&host->lock);
@@ -1723,7 +1758,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
if (!tuning_loop_counter && !timeout)
break;
- cmd.opcode = MMC_SEND_TUNING_BLOCK;
+ cmd.opcode = opcode;
cmd.arg = 0;
cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
cmd.retries = 0;
@@ -1738,7 +1773,17 @@ static int sdhci_execute_tuning(struct mmc_host *mmc)
* block to the Host Controller. So we set the block size
* to 64 here.
*/
- sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), SDHCI_BLOCK_SIZE);
+ if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) {
+ if (mmc->ios.bus_width == MMC_BUS_WIDTH_8)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128),
+ SDHCI_BLOCK_SIZE);
+ else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4)
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ } else {
+ sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64),
+ SDHCI_BLOCK_SIZE);
+ }
/*
* The tuning block is sent by the card to the host controller.
@@ -2121,12 +2166,14 @@ static void sdhci_show_adma_error(struct sdhci_host *host) { }
static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
{
+ u32 command;
BUG_ON(intmask == 0);
/* CMD19 generates _only_ Buffer Read Ready interrupt */
if (intmask & SDHCI_INT_DATA_AVAIL) {
- if (SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) ==
- MMC_SEND_TUNING_BLOCK) {
+ command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
+ if (command == MMC_SEND_TUNING_BLOCK ||
+ command == MMC_SEND_TUNING_BLOCK_HS200) {
host->tuning_done = 1;
wake_up(&host->buf_ready_int);
return;
@@ -2330,26 +2377,33 @@ out:
int sdhci_suspend_host(struct sdhci_host *host)
{
int ret;
+ bool has_tuning_timer;
sdhci_disable_card_detection(host);
/* Disable tuning since we are suspending */
- if (host->version >= SDHCI_SPEC_300 && host->tuning_count &&
- host->tuning_mode == SDHCI_TUNING_MODE_1) {
+ has_tuning_timer = host->version >= SDHCI_SPEC_300 &&
+ host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1;
+ if (has_tuning_timer) {
+ del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
}
ret = mmc_suspend_host(host->mmc);
- if (ret)
+ if (ret) {
+ if (has_tuning_timer) {
+ host->flags |= SDHCI_NEEDS_RETUNING;
+ mod_timer(&host->tuning_timer, jiffies +
+ host->tuning_count * HZ);
+ }
+
+ sdhci_enable_card_detection(host);
+
return ret;
+ }
free_irq(host->irq, host);
- if (host->vmmc)
- ret = regulator_disable(host->vmmc);
-
return ret;
}
@@ -2359,12 +2413,6 @@ int sdhci_resume_host(struct sdhci_host *host)
{
int ret;
- if (host->vmmc) {
- int ret = regulator_enable(host->vmmc);
- if (ret)
- return ret;
- }
-
if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
if (host->ops->enable_dma)
host->ops->enable_dma(host);
@@ -2727,10 +2775,14 @@ int sdhci_add_host(struct sdhci_host *host)
if (caps[1] & SDHCI_SUPPORT_DDR50)
mmc->caps |= MMC_CAP_UHS_DDR50;
- /* Does the host needs tuning for SDR50? */
+ /* Does the host need tuning for SDR50? */
if (caps[1] & SDHCI_USE_SDR50_TUNING)
host->flags |= SDHCI_SDR50_NEEDS_TUNING;
+ /* Does the host need tuning for HS200? */
+ if (mmc->caps2 & MMC_CAP2_HS200)
+ host->flags |= SDHCI_HS200_NEEDS_TUNING;
+
/* Driver Type(s) (A, C, D) supported by the host */
if (caps[1] & SDHCI_DRIVER_TYPE_A)
mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
@@ -2926,8 +2978,6 @@ int sdhci_add_host(struct sdhci_host *host)
if (IS_ERR(host->vmmc)) {
pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc));
host->vmmc = NULL;
- } else {
- regulator_enable(host->vmmc);
}
sdhci_init(host, 0);
@@ -3016,10 +3066,8 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
tasklet_kill(&host->card_tasklet);
tasklet_kill(&host->finish_tasklet);
- if (host->vmmc) {
- regulator_disable(host->vmmc);
+ if (host->vmmc)
regulator_put(host->vmmc);
- }
kfree(host->adma_desc);
kfree(host->align_buffer);
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a04d4d0..ad265b9 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -158,6 +158,7 @@
#define SDHCI_CTRL_UHS_SDR50 0x0002
#define SDHCI_CTRL_UHS_SDR104 0x0003
#define SDHCI_CTRL_UHS_DDR50 0x0004
+#define SDHCI_CTRL_HS_SDR200 0x0005 /* reserved value in SDIO spec */
#define SDHCI_CTRL_VDD_180 0x0008
#define SDHCI_CTRL_DRV_TYPE_MASK 0x0030
#define SDHCI_CTRL_DRV_TYPE_B 0x0000
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index d5505f3..352d479 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -16,6 +16,33 @@
*
*/
+/*
+ * The MMCIF driver is now processing MMC requests asynchronously, according
+ * to the Linux MMC API requirement.
+ *
+ * The MMCIF driver processes MMC requests in up to 3 stages: command, optional
+ * data, and optional stop. To achieve asynchronous processing each of these
+ * stages is split into two halves: a top and a bottom half. The top half
+ * initialises the hardware, installs a timeout handler to handle completion
+ * timeouts, and returns. In case of the command stage this immediately returns
+ * control to the caller, leaving all further processing to run asynchronously.
+ * All further request processing is performed by the bottom halves.
+ *
+ * The bottom half further consists of a "hard" IRQ handler, an IRQ handler
+ * thread, a DMA completion callback, if DMA is used, a timeout work, and
+ * request- and stage-specific handler methods.
+ *
+ * Each bottom half run begins with either a hardware interrupt, a DMA callback
+ * invocation, or a timeout work run. In case of an error or a successful
+ * processing completion, the MMC core is informed and the request processing is
+ * finished. In case processing has to continue, i.e., if data has to be read
+ * from or written to the card, or if a stop command has to be sent, the next
+ * top half is called, which performs the necessary hardware handling and
+ * reschedules the timeout work. This returns the driver state machine into the
+ * bottom half waiting state.
+ */
+
+#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
@@ -123,6 +150,11 @@
#define MASK_MRBSYTO (1 << 1)
#define MASK_MRSPTO (1 << 0)
+#define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \
+ MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \
+ MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \
+ MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO)
+
/* CE_HOST_STS1 */
#define STS1_CMDSEQ (1 << 31)
@@ -162,9 +194,21 @@ enum mmcif_state {
STATE_IOS,
};
+enum mmcif_wait_for {
+ MMCIF_WAIT_FOR_REQUEST,
+ MMCIF_WAIT_FOR_CMD,
+ MMCIF_WAIT_FOR_MREAD,
+ MMCIF_WAIT_FOR_MWRITE,
+ MMCIF_WAIT_FOR_READ,
+ MMCIF_WAIT_FOR_WRITE,
+ MMCIF_WAIT_FOR_READ_END,
+ MMCIF_WAIT_FOR_WRITE_END,
+ MMCIF_WAIT_FOR_STOP,
+};
+
struct sh_mmcif_host {
struct mmc_host *mmc;
- struct mmc_data *data;
+ struct mmc_request *mrq;
struct platform_device *pd;
struct sh_dmae_slave dma_slave_tx;
struct sh_dmae_slave dma_slave_rx;
@@ -172,11 +216,17 @@ struct sh_mmcif_host {
unsigned int clk;
int bus_width;
bool sd_error;
+ bool dying;
long timeout;
void __iomem *addr;
- struct completion intr_wait;
+ u32 *pio_ptr;
+ spinlock_t lock; /* protect sh_mmcif_host::state */
enum mmcif_state state;
- spinlock_t lock;
+ enum mmcif_wait_for wait_for;
+ struct delayed_work timeout_work;
+ size_t blocksize;
+ int sg_idx;
+ int sg_blkidx;
bool power;
bool card_present;
@@ -202,19 +252,21 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
static void mmcif_dma_complete(void *arg)
{
struct sh_mmcif_host *host = arg;
+ struct mmc_data *data = host->mrq->data;
+
dev_dbg(&host->pd->dev, "Command completed\n");
- if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
+ if (WARN(!data, "%s: NULL data in DMA completion!\n",
dev_name(&host->pd->dev)))
return;
- if (host->data->flags & MMC_DATA_READ)
+ if (data->flags & MMC_DATA_READ)
dma_unmap_sg(host->chan_rx->device->dev,
- host->data->sg, host->data->sg_len,
+ data->sg, data->sg_len,
DMA_FROM_DEVICE);
else
dma_unmap_sg(host->chan_tx->device->dev,
- host->data->sg, host->data->sg_len,
+ data->sg, data->sg_len,
DMA_TO_DEVICE);
complete(&host->dma_complete);
@@ -222,18 +274,19 @@ static void mmcif_dma_complete(void *arg)
static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
{
- struct scatterlist *sg = host->data->sg;
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_rx;
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
DMA_FROM_DEVICE);
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {
@@ -244,7 +297,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
- __func__, host->data->sg_len, ret, cookie);
+ __func__, data->sg_len, ret, cookie);
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -265,23 +318,24 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
}
dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
- desc, cookie, host->data->sg_len);
+ desc, cookie, data->sg_len);
}
static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
{
- struct scatterlist *sg = host->data->sg;
+ struct mmc_data *data = host->mrq->data;
+ struct scatterlist *sg = data->sg;
struct dma_async_tx_descriptor *desc = NULL;
struct dma_chan *chan = host->chan_tx;
dma_cookie_t cookie = -EINVAL;
int ret;
- ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
+ ret = dma_map_sg(chan->device->dev, sg, data->sg_len,
DMA_TO_DEVICE);
if (ret > 0) {
host->dma_active = true;
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
}
if (desc) {
@@ -292,7 +346,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
dma_async_issue_pending(chan);
}
dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
- __func__, host->data->sg_len, ret, cookie);
+ __func__, data->sg_len, ret, cookie);
if (!desc) {
/* DMA failed, fall back to PIO */
@@ -399,7 +453,7 @@ static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
else
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
- (ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16));
+ ((fls(host->clk / clk) - 1) << 16));
sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
}
@@ -421,7 +475,7 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
{
u32 state1, state2;
- int ret, timeout = 10000000;
+ int ret, timeout;
host->sd_error = false;
@@ -433,155 +487,212 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
if (state1 & STS1_CMDSEQ) {
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
- while (1) {
- timeout--;
- if (timeout < 0) {
- dev_err(&host->pd->dev,
- "Forceed end of command sequence timeout err\n");
- return -EIO;
- }
+ for (timeout = 10000000; timeout; timeout--) {
if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
- & STS1_CMDSEQ))
+ & STS1_CMDSEQ))
break;
mdelay(1);
}
+ if (!timeout) {
+ dev_err(&host->pd->dev,
+ "Forced end of command sequence timeout err\n");
+ return -EIO;
+ }
sh_mmcif_sync_reset(host);
dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
return -EIO;
}
if (state2 & STS2_CRC_ERR) {
- dev_dbg(&host->pd->dev, ": Happened CRC error\n");
+ dev_dbg(&host->pd->dev, ": CRC error\n");
ret = -EIO;
} else if (state2 & STS2_TIMEOUT_ERR) {
- dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
+ dev_dbg(&host->pd->dev, ": Timeout\n");
ret = -ETIMEDOUT;
} else {
- dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
+ dev_dbg(&host->pd->dev, ": End/Index error\n");
ret = -EIO;
}
return ret;
}
-static int sh_mmcif_single_read(struct sh_mmcif_host *host,
- struct mmc_request *mrq)
+static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)
{
- struct mmc_data *data = mrq->data;
- long time;
- u32 blocksize, i, *p = sg_virt(data->sg);
+ struct mmc_data *data = host->mrq->data;
+
+ host->sg_blkidx += host->blocksize;
+
+ /* data->sg->length must be a multiple of host->blocksize? */
+ BUG_ON(host->sg_blkidx > data->sg->length);
+
+ if (host->sg_blkidx == data->sg->length) {
+ host->sg_blkidx = 0;
+ if (++host->sg_idx < data->sg_len)
+ host->pio_ptr = sg_virt(++data->sg);
+ } else {
+ host->pio_ptr = p;
+ }
+
+ if (host->sg_idx == data->sg_len)
+ return false;
+
+ return true;
+}
+
+static void sh_mmcif_single_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
+{
+ host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK) + 3;
+
+ host->wait_for = MMCIF_WAIT_FOR_READ;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf read enable */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- blocksize = (BLOCK_SIZE_MASK &
- sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
- for (i = 0; i < blocksize / 4; i++)
+}
+
+static bool sh_mmcif_read_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = sg_virt(data->sg);
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
+ }
+
+ for (i = 0; i < host->blocksize / 4; i++)
*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
/* buffer read end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
+ host->wait_for = MMCIF_WAIT_FOR_READ_END;
- return 0;
+ return true;
}
-static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
- struct mmc_request *mrq)
+static void sh_mmcif_multi_read(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
- long time;
- u32 blocksize, i, j, sec, *p;
-
- blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
- MMCIF_CE_BLOCK_SET);
- for (j = 0; j < data->sg_len; j++) {
- p = sg_virt(data->sg);
- for (sec = 0; sec < data->sg->length / blocksize; sec++) {
- sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
- /* buf read enable */
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
-
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- for (i = 0; i < blocksize / 4; i++)
- *p++ = sh_mmcif_readl(host->addr,
- MMCIF_CE_DATA);
- }
- if (j < data->sg_len - 1)
- data->sg++;
+
+ if (!data->sg_len || !data->sg->length)
+ return;
+
+ host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK;
+
+ host->wait_for = MMCIF_WAIT_FOR_MREAD;
+ host->sg_idx = 0;
+ host->sg_blkidx = 0;
+ host->pio_ptr = sg_virt(data->sg);
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+}
+
+static bool sh_mmcif_mread_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = host->pio_ptr;
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
}
- return 0;
+
+ BUG_ON(!data->sg->length);
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
+
+ if (!sh_mmcif_next_block(host, p))
+ return false;
+
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
+
+ return true;
}
-static int sh_mmcif_single_write(struct sh_mmcif_host *host,
+static void sh_mmcif_single_write(struct sh_mmcif_host *host,
struct mmc_request *mrq)
{
- struct mmc_data *data = mrq->data;
- long time;
- u32 blocksize, i, *p = sg_virt(data->sg);
+ host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK) + 3;
- sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+ host->wait_for = MMCIF_WAIT_FOR_WRITE;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
/* buf write enable */
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- blocksize = (BLOCK_SIZE_MASK &
- sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
- for (i = 0; i < blocksize / 4; i++)
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
+
+static bool sh_mmcif_write_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = sg_virt(data->sg);
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
+ }
+
+ for (i = 0; i < host->blocksize / 4; i++)
sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
/* buffer write end */
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
+ host->wait_for = MMCIF_WAIT_FOR_WRITE_END;
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
-
- return 0;
+ return true;
}
-static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
- struct mmc_request *mrq)
+static void sh_mmcif_multi_write(struct sh_mmcif_host *host,
+ struct mmc_request *mrq)
{
struct mmc_data *data = mrq->data;
- long time;
- u32 i, sec, j, blocksize, *p;
- blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
- MMCIF_CE_BLOCK_SET);
+ if (!data->sg_len || !data->sg->length)
+ return;
- for (j = 0; j < data->sg_len; j++) {
- p = sg_virt(data->sg);
- for (sec = 0; sec < data->sg->length / blocksize; sec++) {
- sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
- /* buf write enable*/
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
+ host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) &
+ BLOCK_SIZE_MASK;
- if (time <= 0 || host->sd_error)
- return sh_mmcif_error_manage(host);
+ host->wait_for = MMCIF_WAIT_FOR_MWRITE;
+ host->sg_idx = 0;
+ host->sg_blkidx = 0;
+ host->pio_ptr = sg_virt(data->sg);
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+}
- for (i = 0; i < blocksize / 4; i++)
- sh_mmcif_writel(host->addr,
- MMCIF_CE_DATA, *p++);
- }
- if (j < data->sg_len - 1)
- data->sg++;
+static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host)
+{
+ struct mmc_data *data = host->mrq->data;
+ u32 *p = host->pio_ptr;
+ int i;
+
+ if (host->sd_error) {
+ data->error = sh_mmcif_error_manage(host);
+ return false;
}
- return 0;
+
+ BUG_ON(!data->sg->length);
+
+ for (i = 0; i < host->blocksize / 4; i++)
+ sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
+
+ if (!sh_mmcif_next_block(host, p))
+ return false;
+
+ schedule_delayed_work(&host->timeout_work, host->timeout);
+ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
+
+ return true;
}
static void sh_mmcif_get_response(struct sh_mmcif_host *host,
@@ -603,8 +714,11 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
}
static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
+ struct mmc_request *mrq)
{
+ struct mmc_data *data = mrq->data;
+ struct mmc_command *cmd = mrq->cmd;
+ u32 opc = cmd->opcode;
u32 tmp = 0;
/* Response Type check */
@@ -636,7 +750,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
break;
}
/* WDAT / DATW */
- if (host->data) {
+ if (data) {
tmp |= CMD_SET_WDAT;
switch (host->bus_width) {
case MMC_BUS_WIDTH_1:
@@ -660,7 +774,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
- mrq->data->blocks << 16);
+ data->blocks << 16);
}
/* RIDXC[1:0] check bits */
if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
@@ -674,68 +788,60 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
tmp |= CMD_SET_CRC7C_INTERNAL;
- return opc = ((opc << 24) | tmp);
+ return (opc << 24) | tmp;
}
static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
- struct mmc_request *mrq, u32 opc)
+ struct mmc_request *mrq, u32 opc)
{
- int ret;
-
switch (opc) {
case MMC_READ_MULTIPLE_BLOCK:
- ret = sh_mmcif_multi_read(host, mrq);
- break;
+ sh_mmcif_multi_read(host, mrq);
+ return 0;
case MMC_WRITE_MULTIPLE_BLOCK:
- ret = sh_mmcif_multi_write(host, mrq);
- break;
+ sh_mmcif_multi_write(host, mrq);
+ return 0;
case MMC_WRITE_BLOCK:
- ret = sh_mmcif_single_write(host, mrq);
- break;
+ sh_mmcif_single_write(host, mrq);
+ return 0;
case MMC_READ_SINGLE_BLOCK:
case MMC_SEND_EXT_CSD:
- ret = sh_mmcif_single_read(host, mrq);
- break;
+ sh_mmcif_single_read(host, mrq);
+ return 0;
default:
dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
- ret = -EINVAL;
- break;
+ return -EINVAL;
}
- return ret;
}
static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_request *mrq)
{
- long time;
- int ret = 0, mask = 0;
+ struct mmc_command *cmd = mrq->cmd;
u32 opc = cmd->opcode;
+ u32 mask;
switch (opc) {
- /* respons busy check */
+ /* response busy check */
case MMC_SWITCH:
case MMC_STOP_TRANSMISSION:
case MMC_SET_WRITE_PROT:
case MMC_CLR_WRITE_PROT:
case MMC_ERASE:
case MMC_GEN_CMD:
- mask = MASK_MRBSYE;
+ mask = MASK_START_CMD | MASK_MRBSYE;
break;
default:
- mask = MASK_MCRSPE;
+ mask = MASK_START_CMD | MASK_MCRSPE;
break;
}
- mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
- MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
- MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
- MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
- if (host->data) {
+ if (mrq->data) {
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
mrq->data->blksz);
}
- opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
+ opc = sh_mmcif_set_cmd(host, mrq);
sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
@@ -744,80 +850,28 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
/* set cmd */
sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0) {
- cmd->error = sh_mmcif_error_manage(host);
- return;
- }
- if (host->sd_error) {
- switch (cmd->opcode) {
- case MMC_ALL_SEND_CID:
- case MMC_SELECT_CARD:
- case MMC_APP_CMD:
- cmd->error = -ETIMEDOUT;
- break;
- default:
- dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
- cmd->opcode);
- cmd->error = sh_mmcif_error_manage(host);
- break;
- }
- host->sd_error = false;
- return;
- }
- if (!(cmd->flags & MMC_RSP_PRESENT)) {
- cmd->error = 0;
- return;
- }
- sh_mmcif_get_response(host, cmd);
- if (host->data) {
- if (!host->dma_active) {
- ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
- } else {
- long time =
- wait_for_completion_interruptible_timeout(&host->dma_complete,
- host->timeout);
- if (!time)
- ret = -ETIMEDOUT;
- else if (time < 0)
- ret = time;
- sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
- BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
- host->dma_active = false;
- }
- if (ret < 0)
- mrq->data->bytes_xfered = 0;
- else
- mrq->data->bytes_xfered =
- mrq->data->blocks * mrq->data->blksz;
- }
- cmd->error = ret;
+ host->wait_for = MMCIF_WAIT_FOR_CMD;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
}
static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
- struct mmc_request *mrq, struct mmc_command *cmd)
+ struct mmc_request *mrq)
{
- long time;
-
- if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
+ switch (mrq->cmd->opcode) {
+ case MMC_READ_MULTIPLE_BLOCK:
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
- else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
+ break;
+ case MMC_WRITE_MULTIPLE_BLOCK:
sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
- else {
+ break;
+ default:
dev_err(&host->pd->dev, "unsupported stop cmd\n");
- cmd->error = sh_mmcif_error_manage(host);
+ mrq->stop->error = sh_mmcif_error_manage(host);
return;
}
- time = wait_for_completion_interruptible_timeout(&host->intr_wait,
- host->timeout);
- if (time <= 0 || host->sd_error) {
- cmd->error = sh_mmcif_error_manage(host);
- return;
- }
- sh_mmcif_get_cmd12response(host, cmd);
- cmd->error = 0;
+ host->wait_for = MMCIF_WAIT_FOR_STOP;
+ schedule_delayed_work(&host->timeout_work, host->timeout);
}
static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
@@ -856,23 +910,10 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
default:
break;
}
- host->data = mrq->data;
- if (mrq->data) {
- if (mrq->data->flags & MMC_DATA_READ) {
- if (host->chan_rx)
- sh_mmcif_start_dma_rx(host);
- } else {
- if (host->chan_tx)
- sh_mmcif_start_dma_tx(host);
- }
- }
- sh_mmcif_start_cmd(host, mrq, mrq->cmd);
- host->data = NULL;
- if (!mrq->cmd->error && mrq->stop)
- sh_mmcif_stop_cmd(host, mrq, mrq->stop);
- host->state = STATE_IDLE;
- mmc_request_done(mmc, mrq);
+ host->mrq = mrq;
+
+ sh_mmcif_start_cmd(host, mrq);
}
static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
@@ -947,9 +988,156 @@ static struct mmc_host_ops sh_mmcif_ops = {
.get_cd = sh_mmcif_get_cd,
};
-static void sh_mmcif_detect(struct mmc_host *mmc)
+static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
{
- mmc_detect_change(mmc, 0);
+ struct mmc_command *cmd = host->mrq->cmd;
+ struct mmc_data *data = host->mrq->data;
+ long time;
+
+ if (host->sd_error) {
+ switch (cmd->opcode) {
+ case MMC_ALL_SEND_CID:
+ case MMC_SELECT_CARD:
+ case MMC_APP_CMD:
+ cmd->error = -ETIMEDOUT;
+ host->sd_error = false;
+ break;
+ default:
+ cmd->error = sh_mmcif_error_manage(host);
+ dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n",
+ cmd->opcode, cmd->error);
+ break;
+ }
+ return false;
+ }
+ if (!(cmd->flags & MMC_RSP_PRESENT)) {
+ cmd->error = 0;
+ return false;
+ }
+
+ sh_mmcif_get_response(host, cmd);
+
+ if (!data)
+ return false;
+
+ if (data->flags & MMC_DATA_READ) {
+ if (host->chan_rx)
+ sh_mmcif_start_dma_rx(host);
+ } else {
+ if (host->chan_tx)
+ sh_mmcif_start_dma_tx(host);
+ }
+
+ if (!host->dma_active) {
+ data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode);
+ if (!data->error)
+ return true;
+ return false;
+ }
+
+ /* Running in the IRQ thread, can sleep */
+ time = wait_for_completion_interruptible_timeout(&host->dma_complete,
+ host->timeout);
+ if (host->sd_error) {
+ dev_err(host->mmc->parent,
+ "Error IRQ while waiting for DMA completion!\n");
+ /* Woken up by an error IRQ: abort DMA */
+ if (data->flags & MMC_DATA_READ)
+ dmaengine_terminate_all(host->chan_rx);
+ else
+ dmaengine_terminate_all(host->chan_tx);
+ data->error = sh_mmcif_error_manage(host);
+ } else if (!time) {
+ data->error = -ETIMEDOUT;
+ } else if (time < 0) {
+ data->error = time;
+ }
+ sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
+ BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
+ host->dma_active = false;
+
+ if (data->error)
+ data->bytes_xfered = 0;
+
+ return false;
+}
+
+static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
+{
+ struct sh_mmcif_host *host = dev_id;
+ struct mmc_request *mrq = host->mrq;
+ struct mmc_data *data = mrq->data;
+
+ cancel_delayed_work_sync(&host->timeout_work);
+
+ /*
+ * All handlers return true, if processing continues, and false, if the
+ * request has to be completed - successfully or not
+ */
+ switch (host->wait_for) {
+ case MMCIF_WAIT_FOR_REQUEST:
+ /* We're too late, the timeout has already kicked in */
+ return IRQ_HANDLED;
+ case MMCIF_WAIT_FOR_CMD:
+ if (sh_mmcif_end_cmd(host))
+ /* Wait for data */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_MREAD:
+ if (sh_mmcif_mread_block(host))
+ /* Wait for more data */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_READ:
+ if (sh_mmcif_read_block(host))
+ /* Wait for data end */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_MWRITE:
+ if (sh_mmcif_mwrite_block(host))
+ /* Wait data to write */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_WRITE:
+ if (sh_mmcif_write_block(host))
+ /* Wait for data end */
+ return IRQ_HANDLED;
+ break;
+ case MMCIF_WAIT_FOR_STOP:
+ if (host->sd_error) {
+ mrq->stop->error = sh_mmcif_error_manage(host);
+ break;
+ }
+ sh_mmcif_get_cmd12response(host, mrq->stop);
+ mrq->stop->error = 0;
+ break;
+ case MMCIF_WAIT_FOR_READ_END:
+ case MMCIF_WAIT_FOR_WRITE_END:
+ if (host->sd_error)
+ data->error = sh_mmcif_error_manage(host);
+ break;
+ default:
+ BUG();
+ }
+
+ if (host->wait_for != MMCIF_WAIT_FOR_STOP) {
+ if (!mrq->cmd->error && data && !data->error)
+ data->bytes_xfered =
+ data->blocks * data->blksz;
+
+ if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) {
+ sh_mmcif_stop_cmd(host, mrq);
+ if (!mrq->stop->error)
+ return IRQ_HANDLED;
+ }
+ }
+
+ host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+ host->state = STATE_IDLE;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+
+ return IRQ_HANDLED;
}
static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
@@ -960,7 +1148,12 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
- if (state & INT_RBSYE) {
+ if (state & INT_ERR_STS) {
+ /* error interrupts - process first */
+ sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
+ sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
+ err = 1;
+ } else if (state & INT_RBSYE) {
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_RBSYE | INT_CRSPE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
@@ -988,11 +1181,6 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
sh_mmcif_writel(host->addr, MMCIF_CE_INT,
~(INT_CMD12RBE | INT_CMD12CRE));
sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
- } else if (state & INT_ERR_STS) {
- /* err interrupts */
- sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
- sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
- err = 1;
} else {
dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
@@ -1003,14 +1191,57 @@ static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
host->sd_error = true;
dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
}
- if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
- complete(&host->intr_wait);
- else
+ if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) {
+ if (!host->dma_active)
+ return IRQ_WAKE_THREAD;
+ else if (host->sd_error)
+ mmcif_dma_complete(host);
+ } else {
dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
+ }
return IRQ_HANDLED;
}
+static void mmcif_timeout_work(struct work_struct *work)
+{
+ struct delayed_work *d = container_of(work, struct delayed_work, work);
+ struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
+ struct mmc_request *mrq = host->mrq;
+
+ if (host->dying)
+ /* Don't run after mmc_remove_host() */
+ return;
+
+ /*
+ * Handle races with cancel_delayed_work(), unless
+ * cancel_delayed_work_sync() is used
+ */
+ switch (host->wait_for) {
+ case MMCIF_WAIT_FOR_CMD:
+ mrq->cmd->error = sh_mmcif_error_manage(host);
+ break;
+ case MMCIF_WAIT_FOR_STOP:
+ mrq->stop->error = sh_mmcif_error_manage(host);
+ break;
+ case MMCIF_WAIT_FOR_MREAD:
+ case MMCIF_WAIT_FOR_MWRITE:
+ case MMCIF_WAIT_FOR_READ:
+ case MMCIF_WAIT_FOR_WRITE:
+ case MMCIF_WAIT_FOR_READ_END:
+ case MMCIF_WAIT_FOR_WRITE_END:
+ mrq->data->error = sh_mmcif_error_manage(host);
+ break;
+ default:
+ BUG();
+ }
+
+ host->state = STATE_IDLE;
+ host->wait_for = MMCIF_WAIT_FOR_REQUEST;
+ host->mrq = NULL;
+ mmc_request_done(host->mmc, mrq);
+}
+
static int __devinit sh_mmcif_probe(struct platform_device *pdev)
{
int ret = 0, irq[2];
@@ -1064,7 +1295,6 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
host->clk = clk_get_rate(host->hclk);
host->pd = pdev;
- init_completion(&host->intr_wait);
spin_lock_init(&host->lock);
mmc->ops = &sh_mmcif_ops;
@@ -1097,31 +1327,35 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)
if (ret < 0)
goto clean_up2;
- mmc_add_host(mmc);
+ INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
- ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
+ ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host);
if (ret) {
dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
goto clean_up3;
}
- ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
+ ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host);
if (ret) {
- free_irq(irq[0], host);
dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
- goto clean_up3;
+ goto clean_up4;
}
- sh_mmcif_detect(host->mmc);
+ ret = mmc_add_host(mmc);
+ if (ret < 0)
+ goto clean_up5;
dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
dev_dbg(&pdev->dev, "chip ver H'%04x\n",
sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
return ret;
+clean_up5:
+ free_irq(irq[1], host);
+clean_up4:
+ free_irq(irq[0], host);
clean_up3:
- mmc_remove_host(mmc);
pm_runtime_suspend(&pdev->dev);
clean_up2:
pm_runtime_disable(&pdev->dev);
@@ -1139,11 +1373,19 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev)
struct sh_mmcif_host *host = platform_get_drvdata(pdev);
int irq[2];
+ host->dying = true;
pm_runtime_get_sync(&pdev->dev);
mmc_remove_host(host->mmc);
sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
+ /*
+ * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the
+ * mmc_remove_host() call above. But swapping order doesn't help either
+ * (a query on the linux-mmc mailing list didn't bring any replies).
+ */
+ cancel_delayed_work_sync(&host->timeout_work);
+
if (host->addr)
iounmap(host->addr);
@@ -1206,19 +1448,7 @@ static struct platform_driver sh_mmcif_driver = {
},
};
-static int __init sh_mmcif_init(void)
-{
- return platform_driver_register(&sh_mmcif_driver);
-}
-
-static void __exit sh_mmcif_exit(void)
-{
- platform_driver_unregister(&sh_mmcif_driver);
-}
-
-module_init(sh_mmcif_init);
-module_exit(sh_mmcif_exit);
-
+module_platform_driver(sh_mmcif_driver);
MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c
index 41ae646..58da3c4 100644
--- a/drivers/mmc/host/sh_mobile_sdhi.c
+++ b/drivers/mmc/host/sh_mobile_sdhi.c
@@ -282,18 +282,7 @@ static struct platform_driver sh_mobile_sdhi_driver = {
.remove = __devexit_p(sh_mobile_sdhi_remove),
};
-static int __init sh_mobile_sdhi_init(void)
-{
- return platform_driver_register(&sh_mobile_sdhi_driver);
-}
-
-static void __exit sh_mobile_sdhi_exit(void)
-{
- platform_driver_unregister(&sh_mobile_sdhi_driver);
-}
-
-module_init(sh_mobile_sdhi_init);
-module_exit(sh_mobile_sdhi_exit);
+module_platform_driver(sh_mobile_sdhi_driver);
MODULE_DESCRIPTION("SuperH Mobile SDHI driver");
MODULE_AUTHOR("Magnus Damm");
diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c
index f70d046..43d9628 100644
--- a/drivers/mmc/host/tifm_sd.c
+++ b/drivers/mmc/host/tifm_sd.c
@@ -22,8 +22,8 @@
#define DRIVER_NAME "tifm_sd"
#define DRIVER_VERSION "0.8"
-static int no_dma = 0;
-static int fixed_timeout = 0;
+static bool no_dma = 0;
+static bool fixed_timeout = 0;
module_param(no_dma, bool, 0644);
module_param(fixed_timeout, bool, 0644);
@@ -118,7 +118,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
unsigned char *buf;
unsigned int pos = 0, val;
- buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off;
+ buf = kmap_atomic(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
buf[pos++] = host->bounce_buf_data[0];
host->cmd_flags &= ~DATA_CARRY;
@@ -134,7 +134,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,
}
buf[pos++] = (val >> 8) & 0xff;
}
- kunmap_atomic(buf - off, KM_BIO_DST_IRQ);
+ kunmap_atomic(buf - off);
}
static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
@@ -144,7 +144,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
unsigned char *buf;
unsigned int pos = 0, val;
- buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off;
+ buf = kmap_atomic(pg) + off;
if (host->cmd_flags & DATA_CARRY) {
val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);
writel(val, sock->addr + SOCK_MMCSD_DATA);
@@ -161,7 +161,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,
val |= (buf[pos++] << 8) & 0xff00;
writel(val, sock->addr + SOCK_MMCSD_DATA);
}
- kunmap_atomic(buf - off, KM_BIO_SRC_IRQ);
+ kunmap_atomic(buf - off);
}
static void tifm_sd_transfer_data(struct tifm_sd *host)
@@ -212,13 +212,13 @@ static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,
struct page *src, unsigned int src_off,
unsigned int count)
{
- unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off;
- unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off;
+ unsigned char *src_buf = kmap_atomic(src) + src_off;
+ unsigned char *dst_buf = kmap_atomic(dst) + dst_off;
memcpy(dst_buf, src_buf, count);
- kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ);
- kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ);
+ kunmap_atomic(dst_buf - dst_off);
+ kunmap_atomic(src_buf - src_off);
}
static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data)
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c
index a4ea102..113ce6c 100644
--- a/drivers/mmc/host/tmio_mmc.c
+++ b/drivers/mmc/host/tmio_mmc.c
@@ -138,19 +138,7 @@ static struct platform_driver tmio_mmc_driver = {
.resume = tmio_mmc_resume,
};
-
-static int __init tmio_mmc_init(void)
-{
- return platform_driver_register(&tmio_mmc_driver);
-}
-
-static void __exit tmio_mmc_exit(void)
-{
- platform_driver_unregister(&tmio_mmc_driver);
-}
-
-module_init(tmio_mmc_init);
-module_exit(tmio_mmc_exit);
+module_platform_driver(tmio_mmc_driver);
MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");
MODULE_AUTHOR("Ian Molton <spyro@f2s.com>");
diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h
index 3020f98..f96c536 100644
--- a/drivers/mmc/host/tmio_mmc.h
+++ b/drivers/mmc/host/tmio_mmc.h
@@ -20,8 +20,8 @@
#include <linux/mmc/tmio.h>
#include <linux/mutex.h>
#include <linux/pagemap.h>
-#include <linux/spinlock.h>
#include <linux/scatterlist.h>
+#include <linux/spinlock.h>
/* Definitions for values the CTRL_SDIO_STATUS register can take. */
#define TMIO_SDIO_STAT_IOIRQ 0x0001
@@ -105,13 +105,13 @@ static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg,
unsigned long *flags)
{
local_irq_save(*flags);
- return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ return kmap_atomic(sg_page(sg)) + sg->offset;
}
static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg,
unsigned long *flags, void *virt)
{
- kunmap_atomic(virt - sg->offset, KM_BIO_SRC_IRQ);
+ kunmap_atomic(virt - sg->offset);
local_irq_restore(*flags);
}
@@ -120,6 +120,7 @@ void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data);
void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable);
void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata);
void tmio_mmc_release_dma(struct tmio_mmc_host *host);
+void tmio_mmc_abort_dma(struct tmio_mmc_host *host);
#else
static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host,
struct mmc_data *data)
@@ -140,6 +141,10 @@ static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host,
static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)
{
}
+
+static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+}
#endif
#ifdef CONFIG_PM
diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c
index 86f259c..8253ec1 100644
--- a/drivers/mmc/host/tmio_mmc_dma.c
+++ b/drivers/mmc/host/tmio_mmc_dma.c
@@ -34,6 +34,18 @@ void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)
#endif
}
+void tmio_mmc_abort_dma(struct tmio_mmc_host *host)
+{
+ tmio_mmc_enable_dma(host, false);
+
+ if (host->chan_rx)
+ dmaengine_terminate_all(host->chan_rx);
+ if (host->chan_tx)
+ dmaengine_terminate_all(host->chan_tx);
+
+ tmio_mmc_enable_dma(host, true);
+}
+
static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
{
struct scatterlist *sg = host->sg_ptr, *sg_tmp;
@@ -77,7 +89,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_FROM_DEVICE, DMA_CTRL_ACK);
+ DMA_DEV_TO_MEM, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
@@ -158,7 +170,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
if (ret > 0)
desc = chan->device->device_prep_slave_sg(chan, sg, ret,
- DMA_TO_DEVICE, DMA_CTRL_ACK);
+ DMA_MEM_TO_DEV, DMA_CTRL_ACK);
if (desc) {
cookie = dmaengine_submit(desc);
diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c
index 4208b39..5f9ad74 100644
--- a/drivers/mmc/host/tmio_mmc_pio.c
+++ b/drivers/mmc/host/tmio_mmc_pio.c
@@ -41,8 +41,8 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/scatterlist.h>
-#include <linux/workqueue.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
#include "tmio_mmc.h"
@@ -246,6 +246,7 @@ static void tmio_mmc_reset_work(struct work_struct *work)
/* Ready for new calls */
host->mrq = NULL;
+ tmio_mmc_abort_dma(host);
mmc_request_done(host->mmc, mrq);
}
@@ -272,6 +273,9 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
host->mrq = NULL;
spin_unlock_irqrestore(&host->lock, flags);
+ if (mrq->cmd->error || (mrq->data && mrq->data->error))
+ tmio_mmc_abort_dma(host);
+
mmc_request_done(host->mmc, mrq);
}
@@ -800,8 +804,7 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
} else if (ios->power_mode != MMC_POWER_UP) {
if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
host->set_pwr(host->pdev, 0);
- if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
- pdata->power) {
+ if (pdata->power) {
pdata->power = false;
pm_runtime_put(&host->pdev->dev);
}
@@ -915,6 +918,23 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
if (ret < 0)
goto pm_disable;
+ /*
+ * There are 4 different scenarios for the card detection:
+ * 1) an external gpio irq handles the cd (best for power savings)
+ * 2) internal sdhi irq handles the cd
+ * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL
+ * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE
+ *
+ * While we increment the rtpm counter for all scenarios when the mmc
+ * core activates us by calling an appropriate set_ios(), we must
+ * additionally ensure that in case 2) the tmio mmc hardware stays
+ * powered on during runtime for the card detection to work.
+ */
+ if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD
+ || mmc->caps & MMC_CAP_NEEDS_POLL
+ || mmc->caps & MMC_CAP_NONREMOVABLE))
+ pm_runtime_get_noresume(&pdev->dev);
+
tmio_mmc_clk_stop(_host);
tmio_mmc_reset(_host);
@@ -933,12 +953,6 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host,
/* See if we also get DMA */
tmio_mmc_request_dma(_host, pdata);
- /* We have to keep the device powered for its card detection to work */
- if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD)) {
- pdata->power = true;
- pm_runtime_get_noresume(&pdev->dev);
- }
-
mmc_add_host(mmc);
/* Unmask the IRQs we want to know about */
@@ -974,7 +988,9 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host)
* the controller, the runtime PM is suspended and pdata->power == false,
* so, our .runtime_resume() will not try to detect a card in the slot.
*/
- if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD)
+ if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD
+ || host->mmc->caps & MMC_CAP_NEEDS_POLL
+ || host->mmc->caps & MMC_CAP_NONREMOVABLE)
pm_runtime_get_sync(&pdev->dev);
mmc_remove_host(host->mmc);
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index f08f944..c0105a2 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -562,17 +562,7 @@ static struct usb_driver ushc_driver = {
.disconnect = ushc_disconnect,
};
-static int __init ushc_init(void)
-{
- return usb_register(&ushc_driver);
-}
-module_init(ushc_init);
-
-static void __exit ushc_exit(void)
-{
- usb_deregister(&ushc_driver);
-}
-module_exit(ushc_exit);
+module_usb_driver(ushc_driver);
MODULE_DESCRIPTION("USB SD Host Controller driver");
MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>");
diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c
index 2ec978b..3135a1a 100644
--- a/drivers/mmc/host/vub300.c
+++ b/drivers/mmc/host/vub300.c
@@ -223,25 +223,25 @@ enum SD_RESPONSE_TYPE {
#define FUN(c) (0x000007 & (c->arg>>28))
#define REG(c) (0x01FFFF & (c->arg>>9))
-static int limit_speed_to_24_MHz;
+static bool limit_speed_to_24_MHz;
module_param(limit_speed_to_24_MHz, bool, 0644);
MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz");
-static int pad_input_to_usb_pkt;
+static bool pad_input_to_usb_pkt;
module_param(pad_input_to_usb_pkt, bool, 0644);
MODULE_PARM_DESC(pad_input_to_usb_pkt,
"Pad USB data input transfers to whole USB Packet");
-static int disable_offload_processing;
+static bool disable_offload_processing;
module_param(disable_offload_processing, bool, 0644);
MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing");
-static int force_1_bit_data_xfers;
+static bool force_1_bit_data_xfers;
module_param(force_1_bit_data_xfers, bool, 0644);
MODULE_PARM_DESC(force_1_bit_data_xfers,
"Force SDIO Data Transfers to 1-bit Mode");
-static int force_polling_for_irqs;
+static bool force_polling_for_irqs;
module_param(force_polling_for_irqs, bool, 0644);
MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts");
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 318a869..1be6218 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -140,6 +140,14 @@ config MTD_AR7_PARTS
---help---
TI AR7 partitioning support
+config MTD_BCM63XX_PARTS
+ tristate "BCM63XX CFE partitioning support"
+ depends on BCM63XX
+ select CRC32
+ help
+ This provides partions parsing for BCM63xx devices with CFE
+ bootloaders.
+
comment "User Modules And Translation Layers"
config MTD_CHAR
diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile
index 9aaac3a..f901354 100644
--- a/drivers/mtd/Makefile
+++ b/drivers/mtd/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o
obj-$(CONFIG_MTD_CMDLINE_PARTS) += cmdlinepart.o
obj-$(CONFIG_MTD_AFS_PARTS) += afs.o
obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o
+obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o
# 'Users' - code which presents functionality to userspace.
obj-$(CONFIG_MTD_CHAR) += mtdchar.o
diff --git a/drivers/mtd/afs.c b/drivers/mtd/afs.c
index 89a02f6..5a3942b 100644
--- a/drivers/mtd/afs.c
+++ b/drivers/mtd/afs.c
@@ -75,7 +75,7 @@ afs_read_footer(struct mtd_info *mtd, u_int *img_start, u_int *iis_start,
size_t sz;
int ret;
- ret = mtd->read(mtd, ptr, sizeof(fs), &sz, (u_char *) &fs);
+ ret = mtd_read(mtd, ptr, sizeof(fs), &sz, (u_char *)&fs);
if (ret >= 0 && sz != sizeof(fs))
ret = -EINVAL;
@@ -132,7 +132,7 @@ afs_read_iis(struct mtd_info *mtd, struct image_info_struct *iis, u_int ptr)
int ret, i;
memset(iis, 0, sizeof(*iis));
- ret = mtd->read(mtd, ptr, sizeof(*iis), &sz, (u_char *) iis);
+ ret = mtd_read(mtd, ptr, sizeof(*iis), &sz, (u_char *)iis);
if (ret < 0)
goto failed;
diff --git a/drivers/mtd/ar7part.c b/drivers/mtd/ar7part.c
index f40ea45..9453931 100644
--- a/drivers/mtd/ar7part.c
+++ b/drivers/mtd/ar7part.c
@@ -73,8 +73,8 @@ static int create_mtd_partitions(struct mtd_info *master,
do { /* Try 10 blocks starting from master->erasesize */
offset = pre_size;
- master->read(master, offset,
- sizeof(header), &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
if (!strncmp((char *)&header, "TIENV0.8", 8))
ar7_parts[1].offset = pre_size;
if (header.checksum == LOADER_MAGIC1)
@@ -95,16 +95,16 @@ static int create_mtd_partitions(struct mtd_info *master,
case LOADER_MAGIC1:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4;
break;
case LOADER_MAGIC2:
while (header.length) {
offset += sizeof(header) + header.length;
- master->read(master, offset, sizeof(header),
- &len, (uint8_t *)&header);
+ mtd_read(master, offset, sizeof(header), &len,
+ (uint8_t *)&header);
}
root_offset = offset + sizeof(header) + 4 + 0xff;
root_offset &= ~(uint32_t)0xff;
@@ -114,8 +114,7 @@ static int create_mtd_partitions(struct mtd_info *master,
break;
}
- master->read(master, root_offset,
- sizeof(header), &len, (u8 *)&header);
+ mtd_read(master, root_offset, sizeof(header), &len, (u8 *)&header);
if (header.checksum != SQUASHFS_MAGIC) {
root_offset += master->erasesize - 1;
root_offset &= ~(master->erasesize - 1);
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
new file mode 100644
index 0000000..608321e
--- /dev/null
+++ b/drivers/mtd/bcm63xxpart.c
@@ -0,0 +1,222 @@
+/*
+ * BCM63XX CFE image tag parser
+ *
+ * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
+ * Mike Albon <malbon@openwrt.org>
+ * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
+ * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/crc32.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/mach-bcm63xx/bcm963xx_tag.h>
+#include <asm/mach-bcm63xx/board_bcm963xx.h>
+
+#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
+
+#define BCM63XX_MIN_CFE_SIZE 0x10000 /* always at least 64KiB */
+#define BCM63XX_MIN_NVRAM_SIZE 0x10000 /* always at least 64KiB */
+
+#define BCM63XX_CFE_MAGIC_OFFSET 0x4e0
+
+static int bcm63xx_detect_cfe(struct mtd_info *master)
+{
+ char buf[9];
+ int ret;
+ size_t retlen;
+
+ ret = mtd_read(master, BCM963XX_CFE_VERSION_OFFSET, 5, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ if (ret)
+ return ret;
+
+ if (strncmp("cfe-v", buf, 5) == 0)
+ return 0;
+
+ /* very old CFE's do not have the cfe-v string, so check for magic */
+ ret = mtd_read(master, BCM63XX_CFE_MAGIC_OFFSET, 8, &retlen,
+ (void *)buf);
+ buf[retlen] = 0;
+
+ return strncmp("CFE1CFE1", buf, 8);
+}
+
+static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
+ struct mtd_partition **pparts,
+ struct mtd_part_parser_data *data)
+{
+ /* CFE, NVRAM and global Linux are always present */
+ int nrparts = 3, curpart = 0;
+ struct bcm_tag *buf;
+ struct mtd_partition *parts;
+ int ret;
+ size_t retlen;
+ unsigned int rootfsaddr, kerneladdr, spareaddr;
+ unsigned int rootfslen, kernellen, sparelen, totallen;
+ unsigned int cfelen, nvramlen;
+ int namelen = 0;
+ int i;
+ u32 computed_crc;
+
+ if (bcm63xx_detect_cfe(master))
+ return -EINVAL;
+
+ cfelen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_CFE_SIZE);
+ nvramlen = max_t(uint32_t, master->erasesize, BCM63XX_MIN_NVRAM_SIZE);
+
+ /* Allocate memory for buffer */
+ buf = vmalloc(sizeof(struct bcm_tag));
+ if (!buf)
+ return -ENOMEM;
+
+ /* Get the tag */
+ ret = mtd_read(master, cfelen, sizeof(struct bcm_tag), &retlen,
+ (void *)buf);
+
+ if (retlen != sizeof(struct bcm_tag)) {
+ vfree(buf);
+ return -EIO;
+ }
+
+ computed_crc = crc32_le(IMAGETAG_CRC_START, (u8 *)buf,
+ offsetof(struct bcm_tag, header_crc));
+ if (computed_crc == buf->header_crc) {
+ char *boardid = &(buf->board_id[0]);
+ char *tagversion = &(buf->tag_version[0]);
+
+ sscanf(buf->kernel_address, "%u", &kerneladdr);
+ sscanf(buf->kernel_length, "%u", &kernellen);
+ sscanf(buf->total_length, "%u", &totallen);
+
+ pr_info("CFE boot tag found with version %s and board type %s\n",
+ tagversion, boardid);
+
+ kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
+ rootfsaddr = kerneladdr + kernellen;
+ spareaddr = roundup(totallen, master->erasesize) + cfelen;
+ sparelen = master->size - spareaddr - nvramlen;
+ rootfslen = spareaddr - rootfsaddr;
+ } else {
+ pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
+ buf->header_crc, computed_crc);
+ kernellen = 0;
+ rootfslen = 0;
+ rootfsaddr = 0;
+ spareaddr = cfelen;
+ sparelen = master->size - cfelen - nvramlen;
+ }
+
+ /* Determine number of partitions */
+ namelen = 8;
+ if (rootfslen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+ if (kernellen > 0) {
+ nrparts++;
+ namelen += 6;
+ }
+
+ /* Ask kernel for more memory */
+ parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
+ if (!parts) {
+ vfree(buf);
+ return -ENOMEM;
+ }
+
+ /* Start building partition list */
+ parts[curpart].name = "CFE";
+ parts[curpart].offset = 0;
+ parts[curpart].size = cfelen;
+ curpart++;
+
+ if (kernellen > 0) {
+ parts[curpart].name = "kernel";
+ parts[curpart].offset = kerneladdr;
+ parts[curpart].size = kernellen;
+ curpart++;
+ }
+
+ if (rootfslen > 0) {
+ parts[curpart].name = "rootfs";
+ parts[curpart].offset = rootfsaddr;
+ parts[curpart].size = rootfslen;
+ if (sparelen > 0)
+ parts[curpart].size += sparelen;
+ curpart++;
+ }
+
+ parts[curpart].name = "nvram";
+ parts[curpart].offset = master->size - nvramlen;
+ parts[curpart].size = nvramlen;
+
+ /* Global partition "linux" to make easy firmware upgrade */
+ curpart++;
+ parts[curpart].name = "linux";
+ parts[curpart].offset = cfelen;
+ parts[curpart].size = master->size - cfelen - nvramlen;
+
+ for (i = 0; i < nrparts; i++)
+ pr_info("Partition %d is %s offset %lx and length %lx\n", i,
+ parts[i].name, (long unsigned int)(parts[i].offset),
+ (long unsigned int)(parts[i].size));
+
+ pr_info("Spare partition is offset %x and length %x\n", spareaddr,
+ sparelen);
+
+ *pparts = parts;
+ vfree(buf);
+
+ return nrparts;
+};
+
+static struct mtd_part_parser bcm63xx_cfe_parser = {
+ .owner = THIS_MODULE,
+ .parse_fn = bcm63xx_parse_cfe_partitions,
+ .name = "bcm63xxpart",
+};
+
+static int __init bcm63xx_cfe_parser_init(void)
+{
+ return register_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+static void __exit bcm63xx_cfe_parser_exit(void)
+{
+ deregister_mtd_parser(&bcm63xx_cfe_parser);
+}
+
+module_init(bcm63xx_cfe_parser_init);
+module_exit(bcm63xx_cfe_parser_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
+MODULE_AUTHOR("Jonas Gorski <jonas.gorski@gmail.com");
+MODULE_DESCRIPTION("MTD partitioning for BCM63XX CFE bootloaders");
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
index 179814a..85e8018 100644
--- a/drivers/mtd/chips/cfi_cmdset_0020.c
+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
@@ -139,8 +139,9 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary)
}
/* Do some byteswapping if necessary */
- extp->FeatureSupport = cfi32_to_cpu(extp->FeatureSupport);
- extp->BlkStatusRegMask = cfi32_to_cpu(extp->BlkStatusRegMask);
+ extp->FeatureSupport = cfi32_to_cpu(map, extp->FeatureSupport);
+ extp->BlkStatusRegMask = cfi32_to_cpu(map,
+ extp->BlkStatusRegMask);
#ifdef DEBUG_CFI_FEATURES
/* Tell the user about it in lots of lovely detail */
@@ -698,7 +699,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
continue;
}
memcpy(buffer+buflen, elem_base, ECCBUF_SIZE-buflen);
- ret = mtd->write(mtd, to, ECCBUF_SIZE, &thislen, buffer);
+ ret = mtd_write(mtd, to, ECCBUF_SIZE, &thislen,
+ buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
@@ -707,7 +709,8 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
to += ECCBUF_SIZE;
}
if (ECCBUF_DIV(elem_len)) { /* write clean aligned data */
- ret = mtd->write(mtd, to, ECCBUF_DIV(elem_len), &thislen, elem_base);
+ ret = mtd_write(mtd, to, ECCBUF_DIV(elem_len),
+ &thislen, elem_base);
totlen += thislen;
if (ret || thislen != ECCBUF_DIV(elem_len))
goto write_error;
@@ -721,7 +724,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
}
if (buflen) { /* flush last page, even if not full */
/* This is sometimes intended behaviour, really */
- ret = mtd->write(mtd, to, buflen, &thislen, buffer);
+ ret = mtd_write(mtd, to, buflen, &thislen, buffer);
totlen += thislen;
if (ret || thislen != ECCBUF_SIZE)
goto write_error;
diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig
index 283d887..37b05c3 100644
--- a/drivers/mtd/devices/Kconfig
+++ b/drivers/mtd/devices/Kconfig
@@ -191,6 +191,7 @@ comment "Disk-On-Chip Device Drivers"
config MTD_DOC2000
tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -213,6 +214,7 @@ config MTD_DOC2000
config MTD_DOC2001
tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -234,6 +236,7 @@ config MTD_DOC2001
config MTD_DOC2001PLUS
tristate "M-Systems Disk-On-Chip Millennium Plus"
+ depends on MTD_NAND
select MTD_DOCPROBE
select MTD_NAND_IDS
---help---
@@ -251,6 +254,8 @@ config MTD_DOC2001PLUS
config MTD_DOCG3
tristate "M-Systems Disk-On-Chip G3"
+ select BCH
+ select BCH_CONST_PARAMS
---help---
This provides an MTD device driver for the M-Systems DiskOnChip
G3 devices.
@@ -259,6 +264,13 @@ config MTD_DOCG3
M-Systems and now Sandisk. The support is very experimental,
and doesn't give access to any write operations.
+if MTD_DOCG3
+config BCH_CONST_M
+ default 14
+config BCH_CONST_T
+ default 4
+endif
+
config MTD_DOCPROBE
tristate
select MTD_DOCECC
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index b78f231..e7e46d1 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -14,7 +14,6 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
-#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/mount.h>
#include <linux/slab.h>
@@ -288,7 +287,7 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
dev->mtd.flags = MTD_CAP_RAM;
dev->mtd.erase = block2mtd_erase;
dev->mtd.write = block2mtd_write;
- dev->mtd.writev = default_mtd_writev;
+ dev->mtd.writev = mtd_writev;
dev->mtd.sync = block2mtd_sync;
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
index e9fad91..b1cdf64 100644
--- a/drivers/mtd/devices/doc2000.c
+++ b/drivers/mtd/devices/doc2000.c
@@ -562,23 +562,14 @@ void DoC2k_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
-
this->curfloor = -1;
this->curchip = -1;
mutex_init(&this->lock);
diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
index a3f7a27..7543b98 100644
--- a/drivers/mtd/devices/doc2001.c
+++ b/drivers/mtd/devices/doc2001.c
@@ -343,25 +343,17 @@ void DoCMil_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
/* FIXME: erase size is not always 8KiB */
mtd->erasesize = 0x2000;
-
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
diff --git a/drivers/mtd/devices/doc2001plus.c b/drivers/mtd/devices/doc2001plus.c
index 99351bc..177510d 100644
--- a/drivers/mtd/devices/doc2001plus.c
+++ b/drivers/mtd/devices/doc2001plus.c
@@ -467,23 +467,14 @@ void DoCMilPlus_init(struct mtd_info *mtd)
mtd->type = MTD_NANDFLASH;
mtd->flags = MTD_CAP_NANDFLASH;
- mtd->size = 0;
-
- mtd->erasesize = 0;
mtd->writesize = 512;
mtd->oobsize = 16;
mtd->owner = THIS_MODULE;
mtd->erase = doc_erase;
- mtd->point = NULL;
- mtd->unpoint = NULL;
mtd->read = doc_read;
mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
mtd->write_oob = doc_write_oob;
- mtd->sync = NULL;
-
- this->totlen = 0;
- this->numchips = 0;
this->curfloor = -1;
this->curchip = -1;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index bdcf5df..ad11ef0a 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -29,6 +29,9 @@
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
+#include <linux/bitmap.h>
+#include <linux/bitrev.h>
+#include <linux/bch.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -41,11 +44,7 @@
*
* As no specification is available from M-Systems/Sandisk, this drivers lacks
* several functions available on the chip, as :
- * - block erase
- * - page write
* - IPL write
- * - ECC fixing (lack of BCH algorith understanding)
- * - powerdown / powerup
*
* The bus data width (8bits versus 16bits) is not handled (if_cfg flag), and
* the driver assumes a 16bits data bus.
@@ -53,8 +52,7 @@
* DocG3 relies on 2 ECC algorithms, which are handled in hardware :
* - a 1 byte Hamming code stored in the OOB for each page
* - a 7 bytes BCH code stored in the OOB for each page
- * The BCH part is only used for check purpose, no correction is available as
- * some information is missing. What is known is that :
+ * The BCH ECC is :
* - BCH is in GF(2^14)
* - BCH is over data of 520 bytes (512 page + 7 page_info bytes
* + 1 hamming byte)
@@ -63,6 +61,30 @@
*
*/
+static unsigned int reliable_mode;
+module_param(reliable_mode, uint, 0);
+MODULE_PARM_DESC(reliable_mode, "Set the docg3 mode (0=normal MLC, 1=fast, "
+ "2=reliable) : MLC normal operations are in normal mode");
+
+/**
+ * struct docg3_oobinfo - DiskOnChip G3 OOB layout
+ * @eccbytes: 8 bytes are used (1 for Hamming ECC, 7 for BCH ECC)
+ * @eccpos: ecc positions (byte 7 is Hamming ECC, byte 8-14 are BCH ECC)
+ * @oobfree: free pageinfo bytes (byte 0 until byte 6, byte 15
+ * @oobavail: 8 available bytes remaining after ECC toll
+ */
+static struct nand_ecclayout docg3_oobinfo = {
+ .eccbytes = 8,
+ .eccpos = {7, 8, 9, 10, 11, 12, 13, 14},
+ .oobfree = {{0, 7}, {15, 1} },
+ .oobavail = 8,
+};
+
+/**
+ * struct docg3_bch - BCH engine
+ */
+static struct bch_control *docg3_bch;
+
static inline u8 doc_readb(struct docg3 *docg3, u16 reg)
{
u8 val = readb(docg3->base + reg);
@@ -82,7 +104,7 @@ static inline u16 doc_readw(struct docg3 *docg3, u16 reg)
static inline void doc_writeb(struct docg3 *docg3, u8 val, u16 reg)
{
writeb(val, docg3->base + reg);
- trace_docg3_io(1, 16, reg, val);
+ trace_docg3_io(1, 8, reg, val);
}
static inline void doc_writew(struct docg3 *docg3, u16 val, u16 reg)
@@ -143,7 +165,7 @@ static void doc_delay(struct docg3 *docg3, int nbNOPs)
{
int i;
- doc_dbg("NOP x %d\n", nbNOPs);
+ doc_vdbg("NOP x %d\n", nbNOPs);
for (i = 0; i < nbNOPs; i++)
doc_writeb(docg3, 0, DOC_NOP);
}
@@ -196,8 +218,8 @@ static int doc_reset_seq(struct docg3 *docg3)
/**
* doc_read_data_area - Read data from data area
* @docg3: the device
- * @buf: the buffer to fill in
- * @len: the lenght to read
+ * @buf: the buffer to fill in (might be NULL is dummy reads)
+ * @len: the length to read
* @first: first time read, DOC_READADDRESS should be set
*
* Reads bytes from flash data. Handles the single byte / even bytes reads.
@@ -218,8 +240,10 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
dst16 = buf;
for (i = 0; i < len4; i += 2) {
data16 = doc_readw(docg3, DOC_IOSPACE_DATA);
- *dst16 = data16;
- dst16++;
+ if (dst16) {
+ *dst16 = data16;
+ dst16++;
+ }
}
if (cdr) {
@@ -229,26 +253,84 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
dst8 = (u8 *)dst16;
for (i = 0; i < cdr; i++) {
data8 = doc_readb(docg3, DOC_IOSPACE_DATA);
- *dst8 = data8;
- dst8++;
+ if (dst8) {
+ *dst8 = data8;
+ dst8++;
+ }
}
}
}
/**
- * doc_set_data_mode - Sets the flash to reliable data mode
+ * doc_write_data_area - Write data into data area
+ * @docg3: the device
+ * @buf: the buffer to get input bytes from
+ * @len: the length to write
+ *
+ * Writes bytes into flash data. Handles the single byte / even bytes writes.
+ */
+static void doc_write_data_area(struct docg3 *docg3, const void *buf, int len)
+{
+ int i, cdr, len4;
+ u16 *src16;
+ u8 *src8;
+
+ doc_dbg("doc_write_data_area(buf=%p, len=%d)\n", buf, len);
+ cdr = len & 0x3;
+ len4 = len - cdr;
+
+ doc_writew(docg3, DOC_IOSPACE_DATA, DOC_READADDRESS);
+ src16 = (u16 *)buf;
+ for (i = 0; i < len4; i += 2) {
+ doc_writew(docg3, *src16, DOC_IOSPACE_DATA);
+ src16++;
+ }
+
+ src8 = (u8 *)src16;
+ for (i = 0; i < cdr; i++) {
+ doc_writew(docg3, DOC_IOSPACE_DATA | DOC_READADDR_ONE_BYTE,
+ DOC_READADDRESS);
+ doc_writeb(docg3, *src8, DOC_IOSPACE_DATA);
+ src8++;
+ }
+}
+
+/**
+ * doc_set_data_mode - Sets the flash to normal or reliable data mode
* @docg3: the device
*
* The reliable data mode is a bit slower than the fast mode, but less errors
* occur. Entering the reliable mode cannot be done without entering the fast
* mode first.
+ *
+ * In reliable mode, pages 2*n and 2*n+1 are clones. Writing to page 0 of blocks
+ * (4,5) make the hardware write also to page 1 of blocks blocks(4,5). Reading
+ * from page 0 of blocks (4,5) or from page 1 of blocks (4,5) gives the same
+ * result, which is a logical and between bytes from page 0 and page 1 (which is
+ * consistent with the fact that writing to a page is _clearing_ bits of that
+ * page).
*/
static void doc_set_reliable_mode(struct docg3 *docg3)
{
- doc_dbg("doc_set_reliable_mode()\n");
- doc_flash_sequence(docg3, DOC_SEQ_SET_MODE);
- doc_flash_command(docg3, DOC_CMD_FAST_MODE);
- doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ static char *strmode[] = { "normal", "fast", "reliable", "invalid" };
+
+ doc_dbg("doc_set_reliable_mode(%s)\n", strmode[docg3->reliable]);
+ switch (docg3->reliable) {
+ case 0:
+ break;
+ case 1:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_FASTMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ break;
+ case 2:
+ doc_flash_sequence(docg3, DOC_SEQ_SET_RELIABLEMODE);
+ doc_flash_command(docg3, DOC_CMD_FAST_MODE);
+ doc_flash_command(docg3, DOC_CMD_RELIABLE_MODE);
+ break;
+ default:
+ doc_err("doc_set_reliable_mode(): invalid mode\n");
+ break;
+ }
doc_delay(docg3, 2);
}
@@ -325,6 +407,37 @@ static int doc_set_extra_page_mode(struct docg3 *docg3)
}
/**
+ * doc_setup_addr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ */
+static void doc_setup_addr_sector(struct docg3 *docg3, int sector)
+{
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
+ * doc_setup_writeaddr_sector - Setup blocks/page/ofs address for one plane
+ * @docg3: the device
+ * @sector: the sector
+ * @ofs: the offset in the page, between 0 and (512 + 16 + 512)
+ */
+static void doc_setup_writeaddr_sector(struct docg3 *docg3, int sector, int ofs)
+{
+ ofs = ofs >> 2;
+ doc_delay(docg3, 1);
+ doc_flash_address(docg3, ofs & 0xff);
+ doc_flash_address(docg3, sector & 0xff);
+ doc_flash_address(docg3, (sector >> 8) & 0xff);
+ doc_flash_address(docg3, (sector >> 16) & 0xff);
+ doc_delay(docg3, 1);
+}
+
+/**
* doc_seek - Set both flash planes to the specified block, page for reading
* @docg3: the device
* @block0: the first plane block index
@@ -360,34 +473,80 @@ static int doc_read_seek(struct docg3 *docg3, int block0, int block1, int page,
if (ret)
goto out;
- sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_sequence(docg3, DOC_SEQ_READ);
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
- doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
- doc_delay(docg3, 1);
+ doc_setup_addr_sector(docg3, sector);
sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
doc_delay(docg3, 1);
- doc_flash_address(docg3, sector & 0xff);
- doc_flash_address(docg3, (sector >> 8) & 0xff);
- doc_flash_address(docg3, (sector >> 16) & 0xff);
+
+out:
+ return ret;
+}
+
+/**
+ * doc_write_seek - Set both flash planes to the specified block, page for writing
+ * @docg3: the device
+ * @block0: the first plane block index
+ * @block1: the second plane block index
+ * @page: the page index within the block
+ * @ofs: offset in page to write
+ *
+ * Programs the flash even and odd planes to the specific block and page.
+ * Alternatively, programs the flash to the wear area of the specified page.
+ */
+static int doc_write_seek(struct docg3 *docg3, int block0, int block1, int page,
+ int ofs)
+{
+ int ret = 0, sector;
+
+ doc_dbg("doc_write_seek(blocks=(%d,%d), page=%d, ofs=%d)\n",
+ block0, block1, page, ofs);
+
+ doc_set_reliable_mode(docg3);
+
+ if (ofs < 2 * DOC_LAYOUT_PAGE_SIZE) {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE1);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE1);
+ doc_delay(docg3, 2);
+ } else {
+ doc_flash_sequence(docg3, DOC_SEQ_SET_PLANE2);
+ doc_flash_command(docg3, DOC_CMD_READ_PLANE2);
+ doc_delay(docg3, 2);
+ }
+
+ doc_flash_sequence(docg3, DOC_SEQ_PAGE_SETUP);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+
+ sector = (block0 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE3);
doc_delay(docg3, 2);
+ ret = doc_wait_ready(docg3);
+ if (ret)
+ goto out;
+
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE1);
+ sector = (block1 << DOC_ADDR_BLOCK_SHIFT) + (page & DOC_ADDR_PAGE_MASK);
+ doc_setup_writeaddr_sector(docg3, sector, ofs);
+ doc_delay(docg3, 1);
out:
return ret;
}
+
/**
* doc_read_page_ecc_init - Initialize hardware ECC engine
* @docg3: the device
* @len: the number of bytes covered by the ECC (BCH covered)
*
* The function does initialize the hardware ECC engine to compute the Hamming
- * ECC (on 1 byte) and the BCH Syndroms (on 7 bytes).
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
*
* Return 0 if succeeded, -EIO on error
*/
@@ -403,6 +562,106 @@ static int doc_read_page_ecc_init(struct docg3 *docg3, int len)
}
/**
+ * doc_write_page_ecc_init - Initialize hardware BCH ECC engine
+ * @docg3: the device
+ * @len: the number of bytes covered by the ECC (BCH covered)
+ *
+ * The function does initialize the hardware ECC engine to compute the Hamming
+ * ECC (on 1 byte) and the BCH hardware ECC (on 7 bytes).
+ *
+ * Return 0 if succeeded, -EIO on error
+ */
+static int doc_write_page_ecc_init(struct docg3 *docg3, int len)
+{
+ doc_writew(docg3, DOC_ECCCONF0_WRITE_MODE
+ | DOC_ECCCONF0_BCH_ENABLE | DOC_ECCCONF0_HAMMING_ENABLE
+ | (len & DOC_ECCCONF0_DATA_BYTES_MASK),
+ DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+ doc_register_readb(docg3, DOC_FLASHCONTROL);
+ return doc_wait_ready(docg3);
+}
+
+/**
+ * doc_ecc_disable - Disable Hamming and BCH ECC hardware calculator
+ * @docg3: the device
+ *
+ * Disables the hardware ECC generator and checker, for unchecked reads (as when
+ * reading OOB only or write status byte).
+ */
+static void doc_ecc_disable(struct docg3 *docg3)
+{
+ doc_writew(docg3, DOC_ECCCONF0_READ_MODE, DOC_ECCCONF0);
+ doc_delay(docg3, 4);
+}
+
+/**
+ * doc_hamming_ecc_init - Initialize hardware Hamming ECC engine
+ * @docg3: the device
+ * @nb_bytes: the number of bytes covered by the ECC (Hamming covered)
+ *
+ * This function programs the ECC hardware to compute the hamming code on the
+ * last provided N bytes to the hardware generator.
+ */
+static void doc_hamming_ecc_init(struct docg3 *docg3, int nb_bytes)
+{
+ u8 ecc_conf1;
+
+ ecc_conf1 = doc_register_readb(docg3, DOC_ECCCONF1);
+ ecc_conf1 &= ~DOC_ECCCONF1_HAMMING_BITS_MASK;
+ ecc_conf1 |= (nb_bytes & DOC_ECCCONF1_HAMMING_BITS_MASK);
+ doc_writeb(docg3, ecc_conf1, DOC_ECCCONF1);
+}
+
+/**
+ * doc_ecc_bch_fix_data - Fix if need be read data from flash
+ * @docg3: the device
+ * @buf: the buffer of read data (512 + 7 + 1 bytes)
+ * @hwecc: the hardware calculated ECC.
+ * It's in fact recv_ecc ^ calc_ecc, where recv_ecc was read from OOB
+ * area data, and calc_ecc the ECC calculated by the hardware generator.
+ *
+ * Checks if the received data matches the ECC, and if an error is detected,
+ * tries to fix the bit flips (at most 4) in the buffer buf. As the docg3
+ * understands the (data, ecc, syndroms) in an inverted order in comparison to
+ * the BCH library, the function reverses the order of bits (ie. bit7 and bit0,
+ * bit6 and bit 1, ...) for all ECC data.
+ *
+ * The hardware ecc unit produces oob_ecc ^ calc_ecc. The kernel's bch
+ * algorithm is used to decode this. However the hw operates on page
+ * data in a bit order that is the reverse of that of the bch alg,
+ * requiring that the bits be reversed on the result. Thanks to Ivan
+ * Djelic for his analysis.
+ *
+ * Returns number of fixed bits (0, 1, 2, 3, 4) or -EBADMSG if too many bit
+ * errors were detected and cannot be fixed.
+ */
+static int doc_ecc_bch_fix_data(struct docg3 *docg3, void *buf, u8 *hwecc)
+{
+ u8 ecc[DOC_ECC_BCH_SIZE];
+ int errorpos[DOC_ECC_BCH_T], i, numerrs;
+
+ for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
+ ecc[i] = bitrev8(hwecc[i]);
+ numerrs = decode_bch(docg3_bch, NULL, DOC_ECC_BCH_COVERED_BYTES,
+ NULL, ecc, NULL, errorpos);
+ BUG_ON(numerrs == -EINVAL);
+ if (numerrs < 0)
+ goto out;
+
+ for (i = 0; i < numerrs; i++)
+ errorpos[i] = (errorpos[i] & ~7) | (7 - (errorpos[i] & 7));
+ for (i = 0; i < numerrs; i++)
+ if (errorpos[i] < DOC_ECC_BCH_COVERED_BYTES*8)
+ /* error is located in data, correct it */
+ change_bit(errorpos[i], buf);
+out:
+ doc_dbg("doc_ecc_bch_fix_data: flipped %d bits\n", numerrs);
+ return numerrs;
+}
+
+
+/**
* doc_read_page_prepare - Prepares reading data from a flash page
* @docg3: the device
* @block0: the first plane block index on flash memory
@@ -488,16 +747,40 @@ static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
}
/**
- * doc_get_hw_bch_syndroms - Get hardware calculated BCH syndroms
+ * doc_write_page_putbytes - Writes bytes into a prepared page
+ * @docg3: the device
+ * @len: the number of bytes to be written
+ * @buf: the buffer of input bytes
+ *
+ */
+static void doc_write_page_putbytes(struct docg3 *docg3, int len,
+ const u_char *buf)
+{
+ doc_write_data_area(docg3, buf, len);
+ doc_delay(docg3, 2);
+}
+
+/**
+ * doc_get_bch_hw_ecc - Get hardware calculated BCH ECC
* @docg3: the device
- * @syns: the array of 7 integers where the syndroms will be stored
+ * @hwecc: the array of 7 integers where the hardware ecc will be stored
*/
-static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
+static void doc_get_bch_hw_ecc(struct docg3 *docg3, u8 *hwecc)
{
int i;
for (i = 0; i < DOC_ECC_BCH_SIZE; i++)
- syns[i] = doc_register_readb(docg3, DOC_BCH_SYNDROM(i));
+ hwecc[i] = doc_register_readb(docg3, DOC_BCH_HW_ECC(i));
+}
+
+/**
+ * doc_page_finish - Ends reading/writing of a flash page
+ * @docg3: the device
+ */
+static void doc_page_finish(struct docg3 *docg3)
+{
+ doc_writeb(docg3, 0, DOC_DATAEND);
+ doc_delay(docg3, 2);
}
/**
@@ -510,8 +793,7 @@ static void doc_get_hw_bch_syndroms(struct docg3 *docg3, int *syns)
*/
static void doc_read_page_finish(struct docg3 *docg3)
{
- doc_writeb(docg3, 0, DOC_DATAEND);
- doc_delay(docg3, 2);
+ doc_page_finish(docg3);
doc_set_device_id(docg3, 0);
}
@@ -523,18 +805,29 @@ static void doc_read_page_finish(struct docg3 *docg3)
* @block1: second plane block index calculated
* @page: page calculated
* @ofs: offset in page
+ * @reliable: 0 if docg3 in normal mode, 1 if docg3 in fast mode, 2 if docg3 in
+ * reliable mode.
+ *
+ * The calculation is based on the reliable/normal mode. In normal mode, the 64
+ * pages of a block are available. In reliable mode, as pages 2*n and 2*n+1 are
+ * clones, only 32 pages per block are available.
*/
static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
- int *ofs)
+ int *ofs, int reliable)
{
- uint sector;
+ uint sector, pages_biblock;
+
+ pages_biblock = DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ pages_biblock /= 2;
sector = from / DOC_LAYOUT_PAGE_SIZE;
- *block0 = sector / (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES)
- * DOC_LAYOUT_NBPLANES;
+ *block0 = sector / pages_biblock * DOC_LAYOUT_NBPLANES;
*block1 = *block0 + 1;
- *page = sector % (DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_NBPLANES);
+ *page = sector % pages_biblock;
*page /= DOC_LAYOUT_NBPLANES;
+ if (reliable == 1 || reliable == 2)
+ *page *= 2;
if (sector % 2)
*ofs = DOC_LAYOUT_PAGE_OOB_SIZE;
else
@@ -542,99 +835,124 @@ static void calc_block_sector(loff_t from, int *block0, int *block1, int *page,
}
/**
- * doc_read - Read bytes from flash
+ * doc_read_oob - Read out of band bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @len: the number of bytes to read (must be a multiple of 4)
- * @retlen: the number of bytes actually read
- * @buf: the filled in buffer
+ * @ops: the mtd oob structure
*
- * Reads flash memory pages. This function does not read the OOB chunk, but only
- * the page data.
+ * Reads flash memory OOB area of pages.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, u_char *buf)
+static int doc_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
{
struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, readlen, ret, ofs = 0;
- int syn[DOC_ECC_BCH_SIZE], eccconf1;
- u8 oob[DOC_LAYOUT_OOB_SIZE];
+ int block0, block1, page, ret, ofs = 0;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen, nbdata, nboob;
+ u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_read_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ from, ops->mode, buf, len, oobbuf, ooblen);
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % DOC_LAYOUT_OOB_SIZE) ||
+ (from % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
ret = -EINVAL;
- doc_dbg("doc_read(from=%lld, len=%zu, buf=%p)\n", from, len, buf);
- if (from % DOC_LAYOUT_PAGE_SIZE)
- goto err;
- if (len % 4)
- goto err;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from + len, &block0, &block1, &page, &ofs,
+ docg3->reliable);
if (block1 > docg3->max_block)
goto err;
- *retlen = 0;
+ ops->oobretlen = 0;
+ ops->retlen = 0;
ret = 0;
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
- while (!ret && len > 0) {
- readlen = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ while (!ret && (len > 0 || ooblen > 0)) {
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ nbdata = min_t(size_t, len, (size_t)DOC_LAYOUT_PAGE_SIZE);
+ nboob = min_t(size_t, ooblen, (size_t)DOC_LAYOUT_OOB_SIZE);
ret = doc_read_page_prepare(docg3, block0, block1, page, ofs);
if (ret < 0)
goto err;
- ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_COVERED_BYTES);
+ ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
if (ret < 0)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, readlen, buf, 1);
- if (ret < readlen)
+ ret = doc_read_page_getbytes(docg3, nbdata, buf, 1);
+ if (ret < nbdata)
goto err_in_read;
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- oob, 0);
- if (ret < DOC_LAYOUT_OOB_SIZE)
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE - nbdata,
+ NULL, 0);
+ ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0);
+ if (ret < nboob)
goto err_in_read;
+ doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
+ NULL, 0);
- *retlen += readlen;
- buf += readlen;
- len -= readlen;
-
- ofs ^= DOC_LAYOUT_PAGE_OOB_SIZE;
- if (ofs == 0)
- page += 2;
- if (page > DOC_ADDR_PAGE_MASK) {
- page = 0;
- block0 += 2;
- block1 += 2;
- }
-
- /*
- * There should be a BCH bitstream fixing algorithm here ...
- * By now, a page read failure is triggered by BCH error
- */
- doc_get_hw_bch_syndroms(docg3, syn);
+ doc_get_bch_hw_ecc(docg3, hwecc);
eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
- doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[0], oob[1], oob[2], oob[3], oob[4],
- oob[5], oob[6]);
- doc_dbg("OOB - HAMMING: %02x\n", oob[7]);
- doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- oob[8], oob[9], oob[10], oob[11], oob[12],
- oob[13], oob[14]);
- doc_dbg("OOB - UNUSED: %02x\n", oob[15]);
+ if (nboob >= DOC_LAYOUT_OOB_SIZE) {
+ doc_dbg("OOB - INFO: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[0], oobbuf[1], oobbuf[2], oobbuf[3],
+ oobbuf[4], oobbuf[5], oobbuf[6]);
+ doc_dbg("OOB - HAMMING: %02x\n", oobbuf[7]);
+ doc_dbg("OOB - BCH_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ oobbuf[8], oobbuf[9], oobbuf[10], oobbuf[11],
+ oobbuf[12], oobbuf[13], oobbuf[14]);
+ doc_dbg("OOB - UNUSED: %02x\n", oobbuf[15]);
+ }
doc_dbg("ECC checks: ECCConf1=%x\n", eccconf1);
- doc_dbg("ECC BCH syndrom: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
- syn[0], syn[1], syn[2], syn[3], syn[4], syn[5], syn[6]);
-
- ret = -EBADMSG;
- if (block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) {
- if (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR)
- goto err_in_read;
- if (is_prot_seq_error(docg3))
- goto err_in_read;
+ doc_dbg("ECC HW_ECC: %02x:%02x:%02x:%02x:%02x:%02x:%02x\n",
+ hwecc[0], hwecc[1], hwecc[2], hwecc[3], hwecc[4],
+ hwecc[5], hwecc[6]);
+
+ ret = -EIO;
+ if (is_prot_seq_error(docg3))
+ goto err_in_read;
+ ret = 0;
+ if ((block0 >= DOC_LAYOUT_BLOCK_FIRST_DATA) &&
+ (eccconf1 & DOC_ECCCONF1_BCH_SYNDROM_ERR) &&
+ (eccconf1 & DOC_ECCCONF1_PAGE_IS_WRITTEN) &&
+ (ops->mode != MTD_OPS_RAW) &&
+ (nbdata == DOC_LAYOUT_PAGE_SIZE)) {
+ ret = doc_ecc_bch_fix_data(docg3, buf, hwecc);
+ if (ret < 0) {
+ mtd->ecc_stats.failed++;
+ ret = -EBADMSG;
+ }
+ if (ret > 0) {
+ mtd->ecc_stats.corrected += ret;
+ ret = -EUCLEAN;
+ }
}
+
doc_read_page_finish(docg3);
+ ops->retlen += nbdata;
+ ops->oobretlen += nboob;
+ buf += nbdata;
+ oobbuf += nboob;
+ len -= nbdata;
+ ooblen -= nboob;
+ from += DOC_LAYOUT_PAGE_SIZE;
}
- return 0;
+ return ret;
err_in_read:
doc_read_page_finish(docg3);
err:
@@ -642,54 +960,33 @@ err:
}
/**
- * doc_read_oob - Read out of band bytes from flash
+ * doc_read - Read bytes from flash
* @mtd: the device
* @from: the offset from first block and first page, in bytes, aligned on page
* size
- * @ops: the mtd oob structure
+ * @len: the number of bytes to read (must be a multiple of 4)
+ * @retlen: the number of bytes actually read
+ * @buf: the filled in buffer
*
- * Reads flash memory OOB area of pages.
+ * Reads flash memory pages. This function does not read the OOB chunk, but only
+ * the page data.
*
* Returns 0 if read successfull, of -EIO, -EINVAL if an error occured
*/
-static int doc_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+static int doc_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, u_char *buf)
{
- struct docg3 *docg3 = mtd->priv;
- int block0, block1, page, ofs, ret;
- u8 *buf = ops->oobbuf;
- size_t len = ops->ooblen;
-
- doc_dbg("doc_read_oob(from=%lld, buf=%p, len=%zu)\n", from, buf, len);
- if (len != DOC_LAYOUT_OOB_SIZE)
- return -EINVAL;
-
- switch (ops->mode) {
- case MTD_OPS_PLACE_OOB:
- buf += ops->ooboffs;
- break;
- default:
- break;
- }
+ struct mtd_oob_ops ops;
+ size_t ret;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
- if (block1 > docg3->max_block)
- return -EINVAL;
-
- ret = doc_read_page_prepare(docg3, block0, block1, page,
- ofs + DOC_LAYOUT_PAGE_SIZE);
- if (!ret)
- ret = doc_read_page_ecc_init(docg3, DOC_LAYOUT_OOB_SIZE);
- if (!ret)
- ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE,
- buf, 1);
- doc_read_page_finish(docg3);
+ memset(&ops, 0, sizeof(ops));
+ ops.datbuf = buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_AUTO_OOB;
- if (ret > 0)
- ops->oobretlen = ret;
- else
- ops->oobretlen = 0;
- return (ret > 0) ? 0 : ret;
+ ret = doc_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+ return ret;
}
static int doc_reload_bbt(struct docg3 *docg3)
@@ -726,7 +1023,8 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
struct docg3 *docg3 = mtd->priv;
int block0, block1, page, ofs, is_good;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs,
+ docg3->reliable);
doc_dbg("doc_block_isbad(from=%lld) => block=(%d,%d), page=%d, ofs=%d\n",
from, block0, block1, page, ofs);
@@ -739,6 +1037,7 @@ static int doc_block_isbad(struct mtd_info *mtd, loff_t from)
return !is_good;
}
+#if 0
/**
* doc_get_erase_count - Get block erase count
* @docg3: the device
@@ -758,7 +1057,7 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
doc_dbg("doc_get_erase_count(from=%lld, buf=%p)\n", from, buf);
if (from % DOC_LAYOUT_PAGE_SIZE)
return -EINVAL;
- calc_block_sector(from, &block0, &block1, &page, &ofs);
+ calc_block_sector(from, &block0, &block1, &page, &ofs, docg3->reliable);
if (block1 > docg3->max_block)
return -EINVAL;
@@ -780,6 +1079,558 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
return max(plane1_erase_count, plane2_erase_count);
}
+#endif
+
+/**
+ * doc_get_op_status - get erase/write operation status
+ * @docg3: the device
+ *
+ * Queries the status from the chip, and returns it
+ *
+ * Returns the status (bits DOC_PLANES_STATUS_*)
+ */
+static int doc_get_op_status(struct docg3 *docg3)
+{
+ u8 status;
+
+ doc_flash_sequence(docg3, DOC_SEQ_PLANES_STATUS);
+ doc_flash_command(docg3, DOC_CMD_PLANES_STATUS);
+ doc_delay(docg3, 5);
+
+ doc_ecc_disable(docg3);
+ doc_read_data_area(docg3, &status, 1, 1);
+ return status;
+}
+
+/**
+ * doc_write_erase_wait_status - wait for write or erase completion
+ * @docg3: the device
+ *
+ * Wait for the chip to be ready again after erase or write operation, and check
+ * erase/write status.
+ *
+ * Returns 0 if erase successfull, -EIO if erase/write issue, -ETIMEOUT if
+ * timeout
+ */
+static int doc_write_erase_wait_status(struct docg3 *docg3)
+{
+ int status, ret = 0;
+
+ if (!doc_is_ready(docg3))
+ usleep_range(3000, 3000);
+ if (!doc_is_ready(docg3)) {
+ doc_dbg("Timeout reached and the chip is still not ready\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ status = doc_get_op_status(docg3);
+ if (status & DOC_PLANES_STATUS_FAIL) {
+ doc_dbg("Erase/Write failed on (a) plane(s), status = %x\n",
+ status);
+ ret = -EIO;
+ }
+
+out:
+ doc_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_erase_block - Erase a couple of blocks
+ * @docg3: the device
+ * @block0: the first block to erase (leftmost plane)
+ * @block1: the second block to erase (rightmost plane)
+ *
+ * Erase both blocks, and return operation status
+ *
+ * Returns 0 if erase successful, -EIO if erase issue, -ETIMEOUT if chip not
+ * ready for too long
+ */
+static int doc_erase_block(struct docg3 *docg3, int block0, int block1)
+{
+ int ret, sector;
+
+ doc_dbg("doc_erase_block(blocks=(%d,%d))\n", block0, block1);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ return -EIO;
+
+ doc_set_reliable_mode(docg3);
+ doc_flash_sequence(docg3, DOC_SEQ_ERASE);
+
+ sector = block0 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ sector = block1 << DOC_ADDR_BLOCK_SHIFT;
+ doc_flash_command(docg3, DOC_CMD_PROG_BLOCK_ADDR);
+ doc_setup_addr_sector(docg3, sector);
+ doc_delay(docg3, 1);
+
+ doc_flash_command(docg3, DOC_CMD_ERASECYCLE2);
+ doc_delay(docg3, 2);
+
+ if (is_prot_seq_error(docg3)) {
+ doc_err("Erase blocks %d,%d error\n", block0, block1);
+ return -EIO;
+ }
+
+ return doc_write_erase_wait_status(docg3);
+}
+
+/**
+ * doc_erase - Erase a portion of the chip
+ * @mtd: the device
+ * @info: the erase info
+ *
+ * Erase a bunch of contiguous blocks, by pairs, as a "mtd" page of 1024 is
+ * split into 2 pages of 512 bytes on 2 contiguous blocks.
+ *
+ * Returns 0 if erase successful, -EINVAL if adressing error, -EIO if erase
+ * issue
+ */
+static int doc_erase(struct mtd_info *mtd, struct erase_info *info)
+{
+ struct docg3 *docg3 = mtd->priv;
+ uint64_t len;
+ int block0, block1, page, ret, ofs = 0;
+
+ doc_dbg("doc_erase(from=%lld, len=%lld\n", info->addr, info->len);
+ doc_set_device_id(docg3, docg3->device_id);
+
+ info->state = MTD_ERASE_PENDING;
+ calc_block_sector(info->addr + info->len, &block0, &block1, &page,
+ &ofs, docg3->reliable);
+ ret = -EINVAL;
+ if (block1 > docg3->max_block || page || ofs)
+ goto reset_err;
+
+ ret = 0;
+ calc_block_sector(info->addr, &block0, &block1, &page, &ofs,
+ docg3->reliable);
+ doc_set_reliable_mode(docg3);
+ for (len = info->len; !ret && len > 0; len -= mtd->erasesize) {
+ info->state = MTD_ERASING;
+ ret = doc_erase_block(docg3, block0, block1);
+ block0 += 2;
+ block1 += 2;
+ }
+
+ if (ret)
+ goto reset_err;
+
+ info->state = MTD_ERASE_DONE;
+ return 0;
+
+reset_err:
+ info->state = MTD_ERASE_FAILED;
+ return ret;
+}
+
+/**
+ * doc_write_page - Write a single page to the chip
+ * @docg3: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @buf: buffer to get bytes from
+ * @oob: buffer to get out of band bytes from (can be NULL if no OOB should be
+ * written)
+ * @autoecc: if 0, all 16 bytes from OOB are taken, regardless of HW Hamming or
+ * BCH computations. If 1, only bytes 0-7 and byte 15 are taken,
+ * remaining ones are filled with hardware Hamming and BCH
+ * computations. Its value is not meaningfull is oob == NULL.
+ *
+ * Write one full page (ie. 1 page split on two planes), of 512 bytes, with the
+ * OOB data. The OOB ECC is automatically computed by the hardware Hamming and
+ * BCH generator if autoecc is not null.
+ *
+ * Returns 0 if write successful, -EIO if write error, -EAGAIN if timeout
+ */
+static int doc_write_page(struct docg3 *docg3, loff_t to, const u_char *buf,
+ const u_char *oob, int autoecc)
+{
+ int block0, block1, page, ret, ofs = 0;
+ u8 hwecc[DOC_ECC_BCH_SIZE], hamming;
+
+ doc_dbg("doc_write_page(to=%lld)\n", to);
+ calc_block_sector(to, &block0, &block1, &page, &ofs, docg3->reliable);
+
+ doc_set_device_id(docg3, docg3->device_id);
+ ret = doc_reset_seq(docg3);
+ if (ret)
+ goto err;
+
+ /* Program the flash address block and page */
+ ret = doc_write_seek(docg3, block0, block1, page, ofs);
+ if (ret)
+ goto err;
+
+ doc_write_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_PAGE_SIZE, buf);
+
+ if (oob && autoecc) {
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ, oob);
+ doc_delay(docg3, 2);
+ oob += DOC_LAYOUT_OOB_UNUSED_OFS;
+
+ hamming = doc_register_readb(docg3, DOC_HAMMINGPARITY);
+ doc_delay(docg3, 2);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_HAMMING_SZ,
+ &hamming);
+ doc_delay(docg3, 2);
+
+ doc_get_bch_hw_ecc(docg3, hwecc);
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_BCH_SZ, hwecc);
+ doc_delay(docg3, 2);
+
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_UNUSED_SZ, oob);
+ }
+ if (oob && !autoecc)
+ doc_write_page_putbytes(docg3, DOC_LAYOUT_OOB_SIZE, oob);
+
+ doc_delay(docg3, 2);
+ doc_page_finish(docg3);
+ doc_delay(docg3, 2);
+ doc_flash_command(docg3, DOC_CMD_PROG_CYCLE2);
+ doc_delay(docg3, 2);
+
+ /*
+ * The wait status will perform another doc_page_finish() call, but that
+ * seems to please the docg3, so leave it.
+ */
+ ret = doc_write_erase_wait_status(docg3);
+ return ret;
+err:
+ doc_read_page_finish(docg3);
+ return ret;
+}
+
+/**
+ * doc_guess_autoecc - Guess autoecc mode from mbd_oob_ops
+ * @ops: the oob operations
+ *
+ * Returns 0 or 1 if success, -EINVAL if invalid oob mode
+ */
+static int doc_guess_autoecc(struct mtd_oob_ops *ops)
+{
+ int autoecc;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ autoecc = 1;
+ break;
+ case MTD_OPS_RAW:
+ autoecc = 0;
+ break;
+ default:
+ autoecc = -EINVAL;
+ }
+ return autoecc;
+}
+
+/**
+ * doc_fill_autooob - Fill a 16 bytes OOB from 8 non-ECC bytes
+ * @dst: the target 16 bytes OOB buffer
+ * @oobsrc: the source 8 bytes non-ECC OOB buffer
+ *
+ */
+static void doc_fill_autooob(u8 *dst, u8 *oobsrc)
+{
+ memcpy(dst, oobsrc, DOC_LAYOUT_OOB_PAGEINFO_SZ);
+ dst[DOC_LAYOUT_OOB_UNUSED_OFS] = oobsrc[DOC_LAYOUT_OOB_PAGEINFO_SZ];
+}
+
+/**
+ * doc_backup_oob - Backup OOB into docg3 structure
+ * @docg3: the device
+ * @to: the page offset in the chip
+ * @ops: the OOB size and buffer
+ *
+ * As the docg3 should write a page with its OOB in one pass, and some userland
+ * applications do write_oob() to setup the OOB and then write(), store the OOB
+ * into a temporary storage. This is very dangerous, as 2 concurrent
+ * applications could store an OOB, and then write their pages (which will
+ * result into one having its OOB corrupted).
+ *
+ * The only reliable way would be for userland to call doc_write_oob() with both
+ * the page data _and_ the OOB area.
+ *
+ * Returns 0 if success, -EINVAL if ops content invalid
+ */
+static int doc_backup_oob(struct docg3 *docg3, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ooblen = ops->ooblen, autoecc;
+
+ if (ooblen != DOC_LAYOUT_OOB_SIZE)
+ return -EINVAL;
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ docg3->oob_write_ofs = to;
+ docg3->oob_autoecc = autoecc;
+ if (ops->mode == MTD_OPS_AUTO_OOB) {
+ doc_fill_autooob(docg3->oob_write_buf, ops->oobbuf);
+ ops->oobretlen = 8;
+ } else {
+ memcpy(docg3->oob_write_buf, ops->oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ops->oobretlen = DOC_LAYOUT_OOB_SIZE;
+ }
+ return 0;
+}
+
+/**
+ * doc_write_oob - Write out of band bytes to flash
+ * @mtd: the device
+ * @ofs: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @ops: the mtd oob structure
+ *
+ * Either write OOB data into a temporary buffer, for the subsequent write
+ * page. The provided OOB should be 16 bytes long. If a data buffer is provided
+ * as well, issue the page write.
+ * Or provide data without OOB, and then a all zeroed OOB will be used (ECC will
+ * still be filled in if asked for).
+ *
+ * Returns 0 is successfull, EINVAL if length is not 14 bytes
+ */
+static int doc_write_oob(struct mtd_info *mtd, loff_t ofs,
+ struct mtd_oob_ops *ops)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int block0, block1, page, ret, pofs = 0, autoecc, oobdelta;
+ u8 *oobbuf = ops->oobbuf;
+ u8 *buf = ops->datbuf;
+ size_t len, ooblen;
+ u8 oob[DOC_LAYOUT_OOB_SIZE];
+
+ if (buf)
+ len = ops->len;
+ else
+ len = 0;
+ if (oobbuf)
+ ooblen = ops->ooblen;
+ else
+ ooblen = 0;
+
+ if (oobbuf && ops->mode == MTD_OPS_PLACE_OOB)
+ oobbuf += ops->ooboffs;
+
+ doc_dbg("doc_write_oob(from=%lld, mode=%d, data=(%p:%zu), oob=(%p:%zu))\n",
+ ofs, ops->mode, buf, len, oobbuf, ooblen);
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ oobdelta = mtd->oobsize;
+ break;
+ case MTD_OPS_AUTO_OOB:
+ oobdelta = mtd->ecclayout->oobavail;
+ break;
+ default:
+ oobdelta = 0;
+ }
+ if ((len % DOC_LAYOUT_PAGE_SIZE) || (ooblen % oobdelta) ||
+ (ofs % DOC_LAYOUT_PAGE_SIZE))
+ return -EINVAL;
+ if (len && ooblen &&
+ (len / DOC_LAYOUT_PAGE_SIZE) != (ooblen / oobdelta))
+ return -EINVAL;
+
+ ret = -EINVAL;
+ calc_block_sector(ofs + len, &block0, &block1, &page, &pofs,
+ docg3->reliable);
+ if (block1 > docg3->max_block)
+ goto err;
+
+ ops->oobretlen = 0;
+ ops->retlen = 0;
+ ret = 0;
+ if (len == 0 && ooblen == 0)
+ return -EINVAL;
+ if (len == 0 && ooblen > 0)
+ return doc_backup_oob(docg3, ofs, ops);
+
+ autoecc = doc_guess_autoecc(ops);
+ if (autoecc < 0)
+ return autoecc;
+
+ while (!ret && len > 0) {
+ memset(oob, 0, sizeof(oob));
+ if (ofs == docg3->oob_write_ofs)
+ memcpy(oob, docg3->oob_write_buf, DOC_LAYOUT_OOB_SIZE);
+ else if (ooblen > 0 && ops->mode == MTD_OPS_AUTO_OOB)
+ doc_fill_autooob(oob, oobbuf);
+ else if (ooblen > 0)
+ memcpy(oob, oobbuf, DOC_LAYOUT_OOB_SIZE);
+ ret = doc_write_page(docg3, ofs, buf, oob, autoecc);
+
+ ofs += DOC_LAYOUT_PAGE_SIZE;
+ len -= DOC_LAYOUT_PAGE_SIZE;
+ buf += DOC_LAYOUT_PAGE_SIZE;
+ if (ooblen) {
+ oobbuf += oobdelta;
+ ooblen -= oobdelta;
+ ops->oobretlen += oobdelta;
+ }
+ ops->retlen += DOC_LAYOUT_PAGE_SIZE;
+ }
+err:
+ doc_set_device_id(docg3, 0);
+ return ret;
+}
+
+/**
+ * doc_write - Write a buffer to the chip
+ * @mtd: the device
+ * @to: the offset from first block and first page, in bytes, aligned on page
+ * size
+ * @len: the number of bytes to write (must be a full page size, ie. 512)
+ * @retlen: the number of bytes actually written (0 or 512)
+ * @buf: the buffer to get bytes from
+ *
+ * Writes data to the chip.
+ *
+ * Returns 0 if write successful, -EIO if write error
+ */
+static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+{
+ struct docg3 *docg3 = mtd->priv;
+ int ret;
+ struct mtd_oob_ops ops;
+
+ doc_dbg("doc_write(to=%lld, len=%zu)\n", to, len);
+ ops.datbuf = (char *)buf;
+ ops.len = len;
+ ops.mode = MTD_OPS_PLACE_OOB;
+ ops.oobbuf = NULL;
+ ops.ooblen = 0;
+ ops.ooboffs = 0;
+
+ ret = doc_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+ return ret;
+}
+
+static struct docg3 *sysfs_dev2docg3(struct device *dev,
+ struct device_attribute *attr)
+{
+ int floor;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+
+ floor = attr->attr.name[1] - '0';
+ if (floor < 0 || floor >= DOC_MAX_NBFLOORS)
+ return NULL;
+ else
+ return docg3_floors[floor]->priv;
+}
+
+static ssize_t dps0_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps0;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps0 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps1_is_key_locked(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int dps1;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ doc_set_device_id(docg3, 0);
+
+ return sprintf(buf, "%d\n", !(dps1 & DOC_DPS_KEY_OK));
+}
+
+static ssize_t dps0_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS0_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+static ssize_t dps1_insert_key(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct docg3 *docg3 = sysfs_dev2docg3(dev, attr);
+ int i;
+
+ if (count != DOC_LAYOUT_DPS_KEY_LENGTH)
+ return -EINVAL;
+
+ doc_set_device_id(docg3, docg3->device_id);
+ for (i = 0; i < DOC_LAYOUT_DPS_KEY_LENGTH; i++)
+ doc_writeb(docg3, buf[i], DOC_DPS1_KEY);
+ doc_set_device_id(docg3, 0);
+ return count;
+}
+
+#define FLOOR_SYSFS(id) { \
+ __ATTR(f##id##_dps0_is_keylocked, S_IRUGO, dps0_is_key_locked, NULL), \
+ __ATTR(f##id##_dps1_is_keylocked, S_IRUGO, dps1_is_key_locked, NULL), \
+ __ATTR(f##id##_dps0_protection_key, S_IWUGO, NULL, dps0_insert_key), \
+ __ATTR(f##id##_dps1_protection_key, S_IWUGO, NULL, dps1_insert_key), \
+}
+
+static struct device_attribute doc_sys_attrs[DOC_MAX_NBFLOORS][4] = {
+ FLOOR_SYSFS(0), FLOOR_SYSFS(1), FLOOR_SYSFS(2), FLOOR_SYSFS(3)
+};
+
+static int doc_register_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ int ret = 0, floor, i = 0;
+ struct device *dev = &pdev->dev;
+
+ for (floor = 0; !ret && floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; !ret && i < 4; i++)
+ ret = device_create_file(dev, &doc_sys_attrs[floor][i]);
+ if (!ret)
+ return 0;
+ do {
+ while (--i >= 0)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+ i = 4;
+ } while (--floor >= 0);
+ return ret;
+}
+
+static void doc_unregister_sysfs(struct platform_device *pdev,
+ struct mtd_info **floors)
+{
+ struct device *dev = &pdev->dev;
+ int floor, i;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS && floors[floor];
+ floor++)
+ for (i = 0; i < 4; i++)
+ device_remove_file(dev, &doc_sys_attrs[floor][i]);
+}
/*
* Debug sysfs entries
@@ -852,13 +1703,15 @@ static int dbg_protection_show(struct seq_file *s, void *p)
{
struct docg3 *docg3 = (struct docg3 *)s->private;
int pos = 0;
- int protect = doc_register_readb(docg3, DOC_PROTECTION);
- int dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
- int dps0_low = doc_register_readb(docg3, DOC_DPS0_ADDRLOW);
- int dps0_high = doc_register_readb(docg3, DOC_DPS0_ADDRHIGH);
- int dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
- int dps1_low = doc_register_readb(docg3, DOC_DPS1_ADDRLOW);
- int dps1_high = doc_register_readb(docg3, DOC_DPS1_ADDRHIGH);
+ int protect, dps0, dps0_low, dps0_high, dps1, dps1_low, dps1_high;
+
+ protect = doc_register_readb(docg3, DOC_PROTECTION);
+ dps0 = doc_register_readb(docg3, DOC_DPS0_STATUS);
+ dps0_low = doc_register_readw(docg3, DOC_DPS0_ADDRLOW);
+ dps0_high = doc_register_readw(docg3, DOC_DPS0_ADDRHIGH);
+ dps1 = doc_register_readb(docg3, DOC_DPS1_STATUS);
+ dps1_low = doc_register_readw(docg3, DOC_DPS1_ADDRLOW);
+ dps1_high = doc_register_readw(docg3, DOC_DPS1_ADDRHIGH);
pos += seq_printf(s, "Protection = 0x%02x (",
protect);
@@ -947,52 +1800,54 @@ static void __init doc_set_driver_info(int chip_id, struct mtd_info *mtd)
cfg = doc_register_readb(docg3, DOC_CONFIGURATION);
docg3->if_cfg = (cfg & DOC_CONF_IF_CFG ? 1 : 0);
+ docg3->reliable = reliable_mode;
switch (chip_id) {
case DOC_CHIPID_G3:
- mtd->name = "DiskOnChip G3";
+ mtd->name = kasprintf(GFP_KERNEL, "DiskOnChip G3 floor %d",
+ docg3->device_id);
docg3->max_block = 2047;
break;
}
mtd->type = MTD_NANDFLASH;
- /*
- * Once write methods are added, the correct flags will be set.
- * mtd->flags = MTD_CAP_NANDFLASH;
- */
- mtd->flags = MTD_CAP_ROM;
+ mtd->flags = MTD_CAP_NANDFLASH;
mtd->size = (docg3->max_block + 1) * DOC_LAYOUT_BLOCK_SIZE;
+ if (docg3->reliable == 2)
+ mtd->size /= 2;
mtd->erasesize = DOC_LAYOUT_BLOCK_SIZE * DOC_LAYOUT_NBPLANES;
+ if (docg3->reliable == 2)
+ mtd->erasesize /= 2;
mtd->writesize = DOC_LAYOUT_PAGE_SIZE;
mtd->oobsize = DOC_LAYOUT_OOB_SIZE;
mtd->owner = THIS_MODULE;
- mtd->erase = NULL;
- mtd->point = NULL;
- mtd->unpoint = NULL;
+ mtd->erase = doc_erase;
mtd->read = doc_read;
- mtd->write = NULL;
+ mtd->write = doc_write;
mtd->read_oob = doc_read_oob;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
+ mtd->write_oob = doc_write_oob;
mtd->block_isbad = doc_block_isbad;
+ mtd->ecclayout = &docg3_oobinfo;
}
/**
- * doc_probe - Probe the IO space for a DiskOnChip G3 chip
- * @pdev: platform device
+ * doc_probe_device - Check if a device is available
+ * @base: the io space where the device is probed
+ * @floor: the floor of the probed device
+ * @dev: the device
*
- * Probes for a G3 chip at the specified IO space in the platform data
- * ressources.
+ * Checks whether a device at the specified IO range, and floor is available.
*
- * Returns 0 on success, -ENOMEM, -ENXIO on error
+ * Returns a mtd_info struct if there is a device, ENODEV if none found, ENOMEM
+ * if a memory allocation failed. If floor 0 is checked, a reset of the ASIC is
+ * launched.
*/
-static int __init docg3_probe(struct platform_device *pdev)
+static struct mtd_info *doc_probe_device(void __iomem *base, int floor,
+ struct device *dev)
{
- struct device *dev = &pdev->dev;
- struct docg3 *docg3;
- struct mtd_info *mtd;
- struct resource *ress;
int ret, bbt_nbpages;
u16 chip_id, chip_id_inv;
+ struct docg3 *docg3;
+ struct mtd_info *mtd;
ret = -ENOMEM;
docg3 = kzalloc(sizeof(struct docg3), GFP_KERNEL);
@@ -1002,69 +1857,218 @@ static int __init docg3_probe(struct platform_device *pdev)
if (!mtd)
goto nomem2;
mtd->priv = docg3;
+ bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
+ 8 * DOC_LAYOUT_PAGE_SIZE);
+ docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
+ if (!docg3->bbt)
+ goto nomem3;
- ret = -ENXIO;
- ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!ress) {
- dev_err(dev, "No I/O memory resource defined\n");
- goto noress;
- }
- docg3->base = ioremap(ress->start, DOC_IOSPACE_SIZE);
-
- docg3->dev = &pdev->dev;
- docg3->device_id = 0;
+ docg3->dev = dev;
+ docg3->device_id = floor;
+ docg3->base = base;
doc_set_device_id(docg3, docg3->device_id);
- doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
+ if (!floor)
+ doc_set_asic_mode(docg3, DOC_ASICMODE_RESET);
doc_set_asic_mode(docg3, DOC_ASICMODE_NORMAL);
chip_id = doc_register_readw(docg3, DOC_CHIPID);
chip_id_inv = doc_register_readw(docg3, DOC_CHIPID_INV);
- ret = -ENODEV;
+ ret = 0;
if (chip_id != (u16)(~chip_id_inv)) {
- doc_info("No device found at IO addr %p\n",
- (void *)ress->start);
- goto nochipfound;
+ goto nomem3;
}
switch (chip_id) {
case DOC_CHIPID_G3:
- doc_info("Found a G3 DiskOnChip at addr %p\n",
- (void *)ress->start);
+ doc_info("Found a G3 DiskOnChip at addr %p, floor %d\n",
+ base, floor);
break;
default:
doc_err("Chip id %04x is not a DiskOnChip G3 chip\n", chip_id);
- goto nochipfound;
+ goto nomem3;
}
doc_set_driver_info(chip_id, mtd);
- platform_set_drvdata(pdev, mtd);
- ret = -ENOMEM;
- bbt_nbpages = DIV_ROUND_UP(docg3->max_block + 1,
- 8 * DOC_LAYOUT_PAGE_SIZE);
- docg3->bbt = kzalloc(bbt_nbpages * DOC_LAYOUT_PAGE_SIZE, GFP_KERNEL);
- if (!docg3->bbt)
- goto nochipfound;
+ doc_hamming_ecc_init(docg3, DOC_LAYOUT_OOB_PAGEINFO_SZ);
doc_reload_bbt(docg3);
+ return mtd;
- ret = mtd_device_parse_register(mtd, part_probes,
- NULL, NULL, 0);
- if (ret)
- goto register_error;
+nomem3:
+ kfree(mtd);
+nomem2:
+ kfree(docg3);
+nomem1:
+ return ERR_PTR(ret);
+}
- doc_dbg_register(docg3);
- return 0;
+/**
+ * doc_release_device - Release a docg3 floor
+ * @mtd: the device
+ */
+static void doc_release_device(struct mtd_info *mtd)
+{
+ struct docg3 *docg3 = mtd->priv;
-register_error:
+ mtd_device_unregister(mtd);
kfree(docg3->bbt);
-nochipfound:
- iounmap(docg3->base);
-noress:
+ kfree(docg3);
+ kfree(mtd->name);
kfree(mtd);
+}
+
+/**
+ * docg3_resume - Awakens docg3 floor
+ * @pdev: platfrom device
+ *
+ * Returns 0 (always successfull)
+ */
+static int docg3_resume(struct platform_device *pdev)
+{
+ int i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+
+ doc_dbg("docg3_resume()\n");
+ for (i = 0; i < 12; i++)
+ doc_readb(docg3, DOC_IOSPACE_IPL);
+ return 0;
+}
+
+/**
+ * docg3_suspend - Put in low power mode the docg3 floor
+ * @pdev: platform device
+ * @state: power state
+ *
+ * Shuts off most of docg3 circuitery to lower power consumption.
+ *
+ * Returns 0 if suspend succeeded, -EIO if chip refused suspend
+ */
+static int docg3_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ int floor, i;
+ struct mtd_info **docg3_floors, *mtd;
+ struct docg3 *docg3;
+ u8 ctrl, pwr_down;
+
+ docg3_floors = platform_get_drvdata(pdev);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = docg3_floors[floor];
+ if (!mtd)
+ continue;
+ docg3 = mtd->priv;
+
+ doc_writeb(docg3, floor, DOC_DEVICESELECT);
+ ctrl = doc_register_readb(docg3, DOC_FLASHCONTROL);
+ ctrl &= ~DOC_CTRL_VIOLATION & ~DOC_CTRL_CE;
+ doc_writeb(docg3, ctrl, DOC_FLASHCONTROL);
+
+ for (i = 0; i < 10; i++) {
+ usleep_range(3000, 4000);
+ pwr_down = doc_register_readb(docg3, DOC_POWERMODE);
+ if (pwr_down & DOC_POWERDOWN_READY)
+ break;
+ }
+ if (pwr_down & DOC_POWERDOWN_READY) {
+ doc_dbg("docg3_suspend(): floor %d powerdown ok\n",
+ floor);
+ } else {
+ doc_err("docg3_suspend(): floor %d powerdown failed\n",
+ floor);
+ return -EIO;
+ }
+ }
+
+ mtd = docg3_floors[0];
+ docg3 = mtd->priv;
+ doc_set_asic_mode(docg3, DOC_ASICMODE_POWERDOWN);
+ return 0;
+}
+
+/**
+ * doc_probe - Probe the IO space for a DiskOnChip G3 chip
+ * @pdev: platform device
+ *
+ * Probes for a G3 chip at the specified IO space in the platform data
+ * ressources. The floor 0 must be available.
+ *
+ * Returns 0 on success, -ENOMEM, -ENXIO on error
+ */
+static int __init docg3_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mtd_info *mtd;
+ struct resource *ress;
+ void __iomem *base;
+ int ret, floor, found = 0;
+ struct mtd_info **docg3_floors;
+
+ ret = -ENXIO;
+ ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!ress) {
+ dev_err(dev, "No I/O memory resource defined\n");
+ goto noress;
+ }
+ base = ioremap(ress->start, DOC_IOSPACE_SIZE);
+
+ ret = -ENOMEM;
+ docg3_floors = kzalloc(sizeof(*docg3_floors) * DOC_MAX_NBFLOORS,
+ GFP_KERNEL);
+ if (!docg3_floors)
+ goto nomem1;
+ docg3_bch = init_bch(DOC_ECC_BCH_M, DOC_ECC_BCH_T,
+ DOC_ECC_BCH_PRIMPOLY);
+ if (!docg3_bch)
+ goto nomem2;
+
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++) {
+ mtd = doc_probe_device(base, floor, dev);
+ if (IS_ERR(mtd)) {
+ ret = PTR_ERR(mtd);
+ goto err_probe;
+ }
+ if (!mtd) {
+ if (floor == 0)
+ goto notfound;
+ else
+ continue;
+ }
+ docg3_floors[floor] = mtd;
+ ret = mtd_device_parse_register(mtd, part_probes, NULL, NULL,
+ 0);
+ if (ret)
+ goto err_probe;
+ found++;
+ }
+
+ ret = doc_register_sysfs(pdev, docg3_floors);
+ if (ret)
+ goto err_probe;
+ if (!found)
+ goto notfound;
+
+ platform_set_drvdata(pdev, docg3_floors);
+ doc_dbg_register(docg3_floors[0]->priv);
+ return 0;
+
+notfound:
+ ret = -ENODEV;
+ dev_info(dev, "No supported DiskOnChip found\n");
+err_probe:
+ free_bch(docg3_bch);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
nomem2:
- kfree(docg3);
+ kfree(docg3_floors);
nomem1:
+ iounmap(base);
+noress:
return ret;
}
@@ -1076,15 +2080,20 @@ nomem1:
*/
static int __exit docg3_release(struct platform_device *pdev)
{
- struct mtd_info *mtd = platform_get_drvdata(pdev);
- struct docg3 *docg3 = mtd->priv;
+ struct mtd_info **docg3_floors = platform_get_drvdata(pdev);
+ struct docg3 *docg3 = docg3_floors[0]->priv;
+ void __iomem *base = docg3->base;
+ int floor;
+ doc_unregister_sysfs(pdev, docg3_floors);
doc_dbg_unregister(docg3);
- mtd_device_unregister(mtd);
- iounmap(docg3->base);
- kfree(docg3->bbt);
- kfree(docg3);
- kfree(mtd);
+ for (floor = 0; floor < DOC_MAX_NBFLOORS; floor++)
+ if (docg3_floors[floor])
+ doc_release_device(docg3_floors[floor]);
+
+ kfree(docg3_floors);
+ free_bch(docg3_bch);
+ iounmap(base);
return 0;
}
@@ -1093,6 +2102,8 @@ static struct platform_driver g3_driver = {
.name = "docg3",
.owner = THIS_MODULE,
},
+ .suspend = docg3_suspend,
+ .resume = docg3_resume,
.remove = __exit_p(docg3_release),
};
diff --git a/drivers/mtd/devices/docg3.h b/drivers/mtd/devices/docg3.h
index 0d407be..db0da43 100644
--- a/drivers/mtd/devices/docg3.h
+++ b/drivers/mtd/devices/docg3.h
@@ -51,10 +51,19 @@
#define DOC_LAYOUT_WEAR_OFFSET (DOC_LAYOUT_PAGE_OOB_SIZE * 2)
#define DOC_LAYOUT_BLOCK_SIZE \
(DOC_LAYOUT_PAGES_PER_BLOCK * DOC_LAYOUT_PAGE_SIZE)
+
+/*
+ * ECC related constants
+ */
+#define DOC_ECC_BCH_M 14
+#define DOC_ECC_BCH_T 4
+#define DOC_ECC_BCH_PRIMPOLY 0x4443
#define DOC_ECC_BCH_SIZE 7
#define DOC_ECC_BCH_COVERED_BYTES \
(DOC_LAYOUT_PAGE_SIZE + DOC_LAYOUT_OOB_PAGEINFO_SZ + \
- DOC_LAYOUT_OOB_HAMMING_SZ + DOC_LAYOUT_OOB_BCH_SZ)
+ DOC_LAYOUT_OOB_HAMMING_SZ)
+#define DOC_ECC_BCH_TOTAL_BYTES \
+ (DOC_ECC_BCH_COVERED_BYTES + DOC_LAYOUT_OOB_BCH_SZ)
/*
* Blocks distribution
@@ -80,6 +89,7 @@
#define DOC_CHIPID_G3 0x200
#define DOC_ERASE_MARK 0xaa
+#define DOC_MAX_NBFLOORS 4
/*
* Flash registers
*/
@@ -105,9 +115,11 @@
#define DOC_ECCCONF1 0x1042
#define DOC_ECCPRESET 0x1044
#define DOC_HAMMINGPARITY 0x1046
-#define DOC_BCH_SYNDROM(idx) (0x1048 + (idx << 1))
+#define DOC_BCH_HW_ECC(idx) (0x1048 + idx)
#define DOC_PROTECTION 0x1056
+#define DOC_DPS0_KEY 0x105c
+#define DOC_DPS1_KEY 0x105e
#define DOC_DPS0_ADDRLOW 0x1060
#define DOC_DPS0_ADDRHIGH 0x1062
#define DOC_DPS1_ADDRLOW 0x1064
@@ -117,6 +129,7 @@
#define DOC_ASICMODECONFIRM 0x1072
#define DOC_CHIPID_INV 0x1074
+#define DOC_POWERMODE 0x107c
/*
* Flash sequences
@@ -124,11 +137,14 @@
*/
#define DOC_SEQ_RESET 0x00
#define DOC_SEQ_PAGE_SIZE_532 0x03
-#define DOC_SEQ_SET_MODE 0x09
+#define DOC_SEQ_SET_FASTMODE 0x05
+#define DOC_SEQ_SET_RELIABLEMODE 0x09
#define DOC_SEQ_READ 0x12
#define DOC_SEQ_SET_PLANE1 0x0e
#define DOC_SEQ_SET_PLANE2 0x10
#define DOC_SEQ_PAGE_SETUP 0x1d
+#define DOC_SEQ_ERASE 0x27
+#define DOC_SEQ_PLANES_STATUS 0x31
/*
* Flash commands
@@ -143,7 +159,10 @@
#define DOC_CMD_PROG_BLOCK_ADDR 0x60
#define DOC_CMD_PROG_CYCLE1 0x80
#define DOC_CMD_PROG_CYCLE2 0x10
+#define DOC_CMD_PROG_CYCLE3 0x11
#define DOC_CMD_ERASECYCLE2 0xd0
+#define DOC_CMD_READ_STATUS 0x70
+#define DOC_CMD_PLANES_STATUS 0x71
#define DOC_CMD_RELIABLE_MODE 0x22
#define DOC_CMD_FAST_MODE 0xa2
@@ -174,6 +193,7 @@
/*
* Flash register : DOC_ECCCONF0
*/
+#define DOC_ECCCONF0_WRITE_MODE 0x0000
#define DOC_ECCCONF0_READ_MODE 0x8000
#define DOC_ECCCONF0_AUTO_ECC_ENABLE 0x4000
#define DOC_ECCCONF0_HAMMING_ENABLE 0x1000
@@ -185,7 +205,7 @@
*/
#define DOC_ECCCONF1_BCH_SYNDROM_ERR 0x80
#define DOC_ECCCONF1_UNKOWN1 0x40
-#define DOC_ECCCONF1_UNKOWN2 0x20
+#define DOC_ECCCONF1_PAGE_IS_WRITTEN 0x20
#define DOC_ECCCONF1_UNKOWN3 0x10
#define DOC_ECCCONF1_HAMMING_BITS_MASK 0x0f
@@ -223,13 +243,46 @@
#define DOC_READADDR_ONE_BYTE 0x4000
#define DOC_READADDR_ADDR_MASK 0x1fff
+/*
+ * Flash register : DOC_POWERMODE
+ */
+#define DOC_POWERDOWN_READY 0x80
+
+/*
+ * Status of erase and write operation
+ */
+#define DOC_PLANES_STATUS_FAIL 0x01
+#define DOC_PLANES_STATUS_PLANE0_KO 0x02
+#define DOC_PLANES_STATUS_PLANE1_KO 0x04
+
+/*
+ * DPS key management
+ *
+ * Each floor of docg3 has 2 protection areas: DPS0 and DPS1. These areas span
+ * across block boundaries, and define whether these blocks can be read or
+ * written.
+ * The definition is dynamically stored in page 0 of blocks (2,3) for DPS0, and
+ * page 0 of blocks (4,5) for DPS1.
+ */
+#define DOC_LAYOUT_DPS_KEY_LENGTH 8
+
/**
* struct docg3 - DiskOnChip driver private data
* @dev: the device currently under control
* @base: mapped IO space
* @device_id: number of the cascaded DoCG3 device (0, 1, 2 or 3)
* @if_cfg: if true, reads are on 16bits, else reads are on 8bits
+
+ * @reliable: if 0, docg3 in normal mode, if 1 docg3 in fast mode, if 2 in
+ * reliable mode
+ * Fast mode implies more errors than normal mode.
+ * Reliable mode implies that page 2*n and 2*n+1 are clones.
* @bbt: bad block table cache
+ * @oob_write_ofs: offset of the MTD where this OOB should belong (ie. in next
+ * page_write)
+ * @oob_autoecc: if 1, use only bytes 0-7, 15, and fill the others with HW ECC
+ * if 0, use all the 16 bytes.
+ * @oob_write_buf: prepared OOB for next page_write
* @debugfs_root: debugfs root node
*/
struct docg3 {
@@ -237,8 +290,12 @@ struct docg3 {
void __iomem *base;
unsigned int device_id:4;
unsigned int if_cfg:1;
+ unsigned int reliable:2;
int max_block;
u8 *bbt;
+ loff_t oob_write_ofs;
+ int oob_autoecc;
+ u8 oob_write_buf[DOC_LAYOUT_OOB_SIZE];
struct dentry *debugfs_root;
};
diff --git a/drivers/mtd/devices/docprobe.c b/drivers/mtd/devices/docprobe.c
index 45116bb..706b847 100644
--- a/drivers/mtd/devices/docprobe.c
+++ b/drivers/mtd/devices/docprobe.c
@@ -241,8 +241,7 @@ static void __init DoC_Probe(unsigned long physadr)
return;
}
docfound = 1;
- mtd = kmalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
-
+ mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL);
if (!mtd) {
printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n");
iounmap(docptr);
@@ -250,10 +249,6 @@ static void __init DoC_Probe(unsigned long physadr)
}
this = (struct DiskOnChip *)(&mtd[1]);
-
- memset((char *)mtd,0, sizeof(struct mtd_info));
- memset((char *)this, 0, sizeof(struct DiskOnChip));
-
mtd->priv = this;
this->virtadr = docptr;
this->physadr = physadr;
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 884904d..7c60ddd 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -992,7 +992,6 @@ static int __devexit m25p_remove(struct spi_device *spi)
static struct spi_driver m25p80_driver = {
.driver = {
.name = "m25p80",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.id_table = m25p_ids,
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index d75c7af..236057e 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -936,7 +936,6 @@ static int __devexit dataflash_remove(struct spi_device *spi)
static struct spi_driver dataflash_driver = {
.driver = {
.name = "mtd_dataflash",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.of_match_table = dataflash_dt_ids,
},
diff --git a/drivers/mtd/devices/sst25l.c b/drivers/mtd/devices/sst25l.c
index d38ef3b..5fc1983 100644
--- a/drivers/mtd/devices/sst25l.c
+++ b/drivers/mtd/devices/sst25l.c
@@ -378,7 +378,7 @@ static int __devinit sst25l_probe(struct spi_device *spi)
struct flash_info *flash_info;
struct sst25l_flash *flash;
struct flash_platform_data *data;
- int ret, i;
+ int ret;
flash_info = sst25l_match_device(spi);
if (!flash_info)
@@ -444,7 +444,6 @@ static int __devexit sst25l_remove(struct spi_device *spi)
static struct spi_driver sst25l_driver = {
.driver = {
.name = "sst25l",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = sst25l_probe,
diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
index c7382bb..19d6372 100644
--- a/drivers/mtd/ftl.c
+++ b/drivers/mtd/ftl.c
@@ -168,8 +168,8 @@ static int scan_header(partition_t *part)
(offset + sizeof(header)) < max_offset;
offset += part->mbd.mtd->erasesize ? : 0x2000) {
- err = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &ret,
- (unsigned char *)&header);
+ err = mtd_read(part->mbd.mtd, offset, sizeof(header), &ret,
+ (unsigned char *)&header);
if (err)
return err;
@@ -224,8 +224,8 @@ static int build_maps(partition_t *part)
for (i = 0; i < le16_to_cpu(part->header.NumEraseUnits); i++) {
offset = ((i + le16_to_cpu(part->header.FirstPhysicalEUN))
<< part->header.EraseUnitSize);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(header), &retval,
- (unsigned char *)&header);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(header), &retval,
+ (unsigned char *)&header);
if (ret)
goto out_XferInfo;
@@ -289,9 +289,9 @@ static int build_maps(partition_t *part)
part->EUNInfo[i].Deleted = 0;
offset = part->EUNInfo[i].Offset + le32_to_cpu(header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t), &retval,
- (unsigned char *)part->bam_cache);
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retval,
+ (unsigned char *)part->bam_cache);
if (ret)
goto out_bam_cache;
@@ -355,7 +355,7 @@ static int erase_xfer(partition_t *part,
erase->len = 1 << part->header.EraseUnitSize;
erase->priv = (u_long)part;
- ret = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ ret = mtd_erase(part->mbd.mtd, erase);
if (!ret)
xfer->EraseCount++;
@@ -422,8 +422,8 @@ static int prepare_xfer(partition_t *part, int i)
header.LogicalEUN = cpu_to_le16(0xffff);
header.EraseCount = cpu_to_le32(xfer->EraseCount);
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset, sizeof(header),
- &retlen, (u_char *)&header);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset, sizeof(header), &retlen,
+ (u_char *)&header);
if (ret) {
return ret;
@@ -438,8 +438,8 @@ static int prepare_xfer(partition_t *part, int i)
for (i = 0; i < nbam; i++, offset += sizeof(uint32_t)) {
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&ctl);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&ctl);
if (ret)
return ret;
@@ -485,9 +485,9 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = eun->Offset + le32_to_cpu(part->header.BAMOffset);
- ret = part->mbd.mtd->read(part->mbd.mtd, offset,
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd, offset,
+ part->BlocksPerUnit * sizeof(uint32_t), &retlen,
+ (u_char *)(part->bam_cache));
/* mark the cache bad, in case we get an error later */
part->bam_index = 0xffff;
@@ -503,8 +503,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
offset = xfer->Offset + 20; /* Bad! */
unit = cpu_to_le16(0x7fff);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint16_t),
- &retlen, (u_char *) &unit);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint16_t), &retlen,
+ (u_char *)&unit);
if (ret) {
printk( KERN_WARNING "ftl: Failed to write back to BAM cache in copy_erase_unit()!\n");
@@ -523,16 +523,16 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
break;
case BLOCK_DATA:
case BLOCK_REPLACEMENT:
- ret = part->mbd.mtd->read(part->mbd.mtd, src, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_read(part->mbd.mtd, src, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error reading old xfer unit in copy_erase_unit\n");
return ret;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, dest, SECTOR_SIZE,
- &retlen, (u_char *) buf);
+ ret = mtd_write(part->mbd.mtd, dest, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new xfer unit in copy_erase_unit\n");
return ret;
@@ -550,9 +550,11 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
}
/* Write the BAM to the transfer unit */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(int32_t), &retlen,
- (u_char *)part->bam_cache);
+ ret = mtd_write(part->mbd.mtd,
+ xfer->Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(int32_t),
+ &retlen,
+ (u_char *)part->bam_cache);
if (ret) {
printk( KERN_WARNING "ftl: Error writing BAM in copy_erase_unit\n");
return ret;
@@ -560,8 +562,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
/* All clear? Then update the LogicalEUN again */
- ret = part->mbd.mtd->write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
- &retlen, (u_char *)&srcunitswap);
+ ret = mtd_write(part->mbd.mtd, xfer->Offset + 20, sizeof(uint16_t),
+ &retlen, (u_char *)&srcunitswap);
if (ret) {
printk(KERN_WARNING "ftl: Error writing new LogicalEUN in copy_erase_unit\n");
@@ -648,8 +650,7 @@ static int reclaim_block(partition_t *part)
if (queued) {
pr_debug("ftl_cs: waiting for transfer "
"unit to be prepared...\n");
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
} else {
static int ne = 0;
if (++ne < 5)
@@ -747,10 +748,11 @@ static uint32_t find_free(partition_t *part)
/* Invalidate cache */
part->bam_index = 0xffff;
- ret = part->mbd.mtd->read(part->mbd.mtd,
- part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
- part->BlocksPerUnit * sizeof(uint32_t),
- &retlen, (u_char *) (part->bam_cache));
+ ret = mtd_read(part->mbd.mtd,
+ part->EUNInfo[eun].Offset + le32_to_cpu(part->header.BAMOffset),
+ part->BlocksPerUnit * sizeof(uint32_t),
+ &retlen,
+ (u_char *)(part->bam_cache));
if (ret) {
printk(KERN_WARNING"ftl: Error reading BAM in find_free\n");
@@ -810,8 +812,8 @@ static int ftl_read(partition_t *part, caddr_t buffer,
else {
offset = (part->EUNInfo[log_addr / bsize].Offset
+ (log_addr % bsize));
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, SECTOR_SIZE,
- &retlen, (u_char *) buffer);
+ ret = mtd_read(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
+ (u_char *)buffer);
if (ret) {
printk(KERN_WARNING "Error reading MTD device in ftl_read()\n");
@@ -849,8 +851,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
le32_to_cpu(part->header.BAMOffset));
#ifdef PSYCHO_DEBUG
- ret = part->mbd.mtd->read(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&old_addr);
+ ret = mtd_read(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&old_addr);
if (ret) {
printk(KERN_WARNING"ftl: Error reading old_addr in set_bam_entry: %d\n",ret);
return ret;
@@ -886,8 +888,8 @@ static int set_bam_entry(partition_t *part, uint32_t log_addr,
#endif
part->bam_cache[blk] = le_virt_addr;
}
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, sizeof(uint32_t),
- &retlen, (u_char *)&le_virt_addr);
+ ret = mtd_write(part->mbd.mtd, offset, sizeof(uint32_t), &retlen,
+ (u_char *)&le_virt_addr);
if (ret) {
printk(KERN_NOTICE "ftl_cs: set_bam_entry() failed!\n");
@@ -946,8 +948,7 @@ static int ftl_write(partition_t *part, caddr_t buffer,
part->EUNInfo[part->bam_index].Deleted++;
offset = (part->EUNInfo[part->bam_index].Offset +
blk * SECTOR_SIZE);
- ret = part->mbd.mtd->write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen,
- buffer);
+ ret = mtd_write(part->mbd.mtd, offset, SECTOR_SIZE, &retlen, buffer);
if (ret) {
printk(KERN_NOTICE "ftl_cs: block write failed!\n");
diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
index dd034ef..28646c9 100644
--- a/drivers/mtd/inftlcore.c
+++ b/drivers/mtd/inftlcore.c
@@ -158,7 +158,7 @@ int inftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_read_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -178,7 +178,7 @@ int inftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -199,7 +199,7 @@ static int inftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
+ res = mtd_write_oob(mtd, offs & ~(mtd->writesize - 1), &ops);
*retlen = ops.retlen;
return res;
}
@@ -343,14 +343,17 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd->read(mtd,
- (inftl->EraseSize * BlockMap[block]) +
- (block * SECTORSIZE), SECTORSIZE,
- &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (inftl->EraseSize * BlockMap[block]) + (block * SECTORSIZE),
+ SECTORSIZE,
+ &retlen,
+ movebuf);
if (ret != -EIO)
pr_debug("INFTL: error went away on retry?\n");
}
@@ -914,7 +917,7 @@ foundit:
} else {
size_t retlen;
loff_t ptr = (thisEUN * inftl->EraseSize) + blockofs;
- int ret = mtd->read(mtd, ptr, SECTORSIZE, &retlen, buffer);
+ int ret = mtd_read(mtd, ptr, SECTORSIZE, &retlen, buffer);
/* Handle corrected bit flips gracefully */
if (ret < 0 && !mtd_is_bitflip(ret))
diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
index 2ff601f..4adc037 100644
--- a/drivers/mtd/inftlmount.c
+++ b/drivers/mtd/inftlmount.c
@@ -73,8 +73,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
* Check for BNAND header first. Then whinge if it's found
* but later checks fail.
*/
- ret = mtd->read(mtd, block * inftl->EraseSize,
- SECTORSIZE, &retlen, buf);
+ ret = mtd_read(mtd, block * inftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -118,8 +118,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
memcpy(mh, buf, sizeof(struct INFTLMediaHeader));
/* Read the spare media header at offset 4096 */
- mtd->read(mtd, block * inftl->EraseSize + 4096,
- SECTORSIZE, &retlen, buf);
+ mtd_read(mtd, block * inftl->EraseSize + 4096, SECTORSIZE,
+ &retlen, buf);
if (retlen != SECTORSIZE) {
printk(KERN_WARNING "INFTL: Unable to read spare "
"Media Header\n");
@@ -220,7 +220,7 @@ static int find_boot_record(struct INFTLrecord *inftl)
*/
instr->addr = ip->Reserved0 * inftl->EraseSize;
instr->len = inftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
}
if ((ip->lastUnit - ip->firstUnit + 1) < ip->virtualUnits) {
printk(KERN_WARNING "INFTL: Media Header "
@@ -306,7 +306,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
/* If any of the physical eraseblocks are bad, don't
use the unit. */
for (physblock = 0; physblock < inftl->EraseSize; physblock += inftl->mbd.mtd->erasesize) {
- if (inftl->mbd.mtd->block_isbad(inftl->mbd.mtd, i * inftl->EraseSize + physblock))
+ if (mtd_block_isbad(inftl->mbd.mtd,
+ i * inftl->EraseSize + physblock))
inftl->PUtable[i] = BLOCK_RESERVED;
}
}
@@ -342,7 +343,7 @@ static int check_free_sectors(struct INFTLrecord *inftl, unsigned int address,
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -393,7 +394,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
mark only the failed block in the bbt. */
for (physblock = 0; physblock < inftl->EraseSize;
physblock += instr->len, instr->addr += instr->len) {
- mtd->erase(inftl->mbd.mtd, instr);
+ mtd_erase(inftl->mbd.mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk(KERN_WARNING "INFTL: error while formatting block %d\n",
@@ -423,7 +424,7 @@ int INFTL_formatblock(struct INFTLrecord *inftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the PUtable to BLOCK_RESERVED on failure) */
- inftl->mbd.mtd->block_markbad(inftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(inftl->mbd.mtd, instr->addr);
return -1;
}
diff --git a/drivers/mtd/lpddr/lpddr_cmds.c b/drivers/mtd/lpddr/lpddr_cmds.c
index 1dca31d..536bbce 100644
--- a/drivers/mtd/lpddr/lpddr_cmds.c
+++ b/drivers/mtd/lpddr/lpddr_cmds.c
@@ -70,19 +70,12 @@ struct mtd_info *lpddr_cmdset(struct map_info *map)
mtd->erase = lpddr_erase;
mtd->write = lpddr_write_buffers;
mtd->writev = lpddr_writev;
- mtd->read_oob = NULL;
- mtd->write_oob = NULL;
- mtd->sync = NULL;
mtd->lock = lpddr_lock;
mtd->unlock = lpddr_unlock;
- mtd->suspend = NULL;
- mtd->resume = NULL;
if (map_is_linear(map)) {
mtd->point = lpddr_point;
mtd->unpoint = lpddr_unpoint;
}
- mtd->block_isbad = NULL;
- mtd->block_markbad = NULL;
mtd->size = 1 << lpddr->qinfo->DevSizeShift;
mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8e0c4bf9f..6c5c431 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -242,15 +242,6 @@ config MTD_NETtel
help
Support for flash chips on NETtel/SecureEdge/SnapGear boards.
-config MTD_BCM963XX
- tristate "Map driver for Broadcom BCM963xx boards"
- depends on BCM63XX
- select MTD_MAP_BANK_WIDTH_2
- select MTD_CFI_I1
- help
- Support for parsing CFE image tag and creating MTD partitions on
- Broadcom BCM63xx boards.
-
config MTD_LANTIQ
tristate "Lantiq SoC NOR support"
depends on LANTIQ
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index 45dcb8b..68a9a91 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -55,6 +55,5 @@ obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
obj-$(CONFIG_MTD_VMU) += vmu-flash.o
obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o
-obj-$(CONFIG_MTD_BCM963XX) += bcm963xx-flash.o
obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o
obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o
diff --git a/drivers/mtd/maps/bcm963xx-flash.c b/drivers/mtd/maps/bcm963xx-flash.c
deleted file mode 100644
index 736ca10..0000000
--- a/drivers/mtd/maps/bcm963xx-flash.c
+++ /dev/null
@@ -1,277 +0,0 @@
-/*
- * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
- * Mike Albon <malbon@openwrt.org>
- * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/partitions.h>
-#include <linux/vmalloc.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include <asm/mach-bcm63xx/bcm963xx_tag.h>
-
-#define BCM63XX_BUSWIDTH 2 /* Buswidth */
-#define BCM63XX_EXTENDED_SIZE 0xBFC00000 /* Extended flash address */
-
-#define PFX KBUILD_MODNAME ": "
-
-static struct mtd_partition *parsed_parts;
-
-static struct mtd_info *bcm963xx_mtd_info;
-
-static struct map_info bcm963xx_map = {
- .name = "bcm963xx",
- .bankwidth = BCM63XX_BUSWIDTH,
-};
-
-static int parse_cfe_partitions(struct mtd_info *master,
- struct mtd_partition **pparts)
-{
- /* CFE, NVRAM and global Linux are always present */
- int nrparts = 3, curpart = 0;
- struct bcm_tag *buf;
- struct mtd_partition *parts;
- int ret;
- size_t retlen;
- unsigned int rootfsaddr, kerneladdr, spareaddr;
- unsigned int rootfslen, kernellen, sparelen, totallen;
- int namelen = 0;
- int i;
- char *boardid;
- char *tagversion;
-
- /* Allocate memory for buffer */
- buf = vmalloc(sizeof(struct bcm_tag));
- if (!buf)
- return -ENOMEM;
-
- /* Get the tag */
- ret = master->read(master, master->erasesize, sizeof(struct bcm_tag),
- &retlen, (void *)buf);
- if (retlen != sizeof(struct bcm_tag)) {
- vfree(buf);
- return -EIO;
- }
-
- sscanf(buf->kernel_address, "%u", &kerneladdr);
- sscanf(buf->kernel_length, "%u", &kernellen);
- sscanf(buf->total_length, "%u", &totallen);
- tagversion = &(buf->tag_version[0]);
- boardid = &(buf->board_id[0]);
-
- printk(KERN_INFO PFX "CFE boot tag found with version %s "
- "and board type %s\n", tagversion, boardid);
-
- kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
- rootfsaddr = kerneladdr + kernellen;
- spareaddr = roundup(totallen, master->erasesize) + master->erasesize;
- sparelen = master->size - spareaddr - master->erasesize;
- rootfslen = spareaddr - rootfsaddr;
-
- /* Determine number of partitions */
- namelen = 8;
- if (rootfslen > 0) {
- nrparts++;
- namelen += 6;
- };
- if (kernellen > 0) {
- nrparts++;
- namelen += 6;
- };
-
- /* Ask kernel for more memory */
- parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL);
- if (!parts) {
- vfree(buf);
- return -ENOMEM;
- };
-
- /* Start building partition list */
- parts[curpart].name = "CFE";
- parts[curpart].offset = 0;
- parts[curpart].size = master->erasesize;
- curpart++;
-
- if (kernellen > 0) {
- parts[curpart].name = "kernel";
- parts[curpart].offset = kerneladdr;
- parts[curpart].size = kernellen;
- curpart++;
- };
-
- if (rootfslen > 0) {
- parts[curpart].name = "rootfs";
- parts[curpart].offset = rootfsaddr;
- parts[curpart].size = rootfslen;
- if (sparelen > 0)
- parts[curpart].size += sparelen;
- curpart++;
- };
-
- parts[curpart].name = "nvram";
- parts[curpart].offset = master->size - master->erasesize;
- parts[curpart].size = master->erasesize;
-
- /* Global partition "linux" to make easy firmware upgrade */
- curpart++;
- parts[curpart].name = "linux";
- parts[curpart].offset = parts[0].size;
- parts[curpart].size = master->size - parts[0].size - parts[3].size;
-
- for (i = 0; i < nrparts; i++)
- printk(KERN_INFO PFX "Partition %d is %s offset %lx and "
- "length %lx\n", i, parts[i].name,
- (long unsigned int)(parts[i].offset),
- (long unsigned int)(parts[i].size));
-
- printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n",
- spareaddr, sparelen);
- *pparts = parts;
- vfree(buf);
-
- return nrparts;
-};
-
-static int bcm963xx_detect_cfe(struct mtd_info *master)
-{
- int idoffset = 0x4e0;
- static char idstring[8] = "CFE1CFE1";
- char buf[9];
- int ret;
- size_t retlen;
-
- ret = master->read(master, idoffset, 8, &retlen, (void *)buf);
- buf[retlen] = 0;
- printk(KERN_INFO PFX "Read Signature value of %s\n", buf);
-
- return strncmp(idstring, buf, 8);
-}
-
-static int bcm963xx_probe(struct platform_device *pdev)
-{
- int err = 0;
- int parsed_nr_parts = 0;
- char *part_type;
- struct resource *r;
-
- r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r) {
- dev_err(&pdev->dev, "no resource supplied\n");
- return -ENODEV;
- }
-
- bcm963xx_map.phys = r->start;
- bcm963xx_map.size = resource_size(r);
- bcm963xx_map.virt = ioremap(r->start, resource_size(r));
- if (!bcm963xx_map.virt) {
- dev_err(&pdev->dev, "failed to ioremap\n");
- return -EIO;
- }
-
- dev_info(&pdev->dev, "0x%08lx at 0x%08x\n",
- bcm963xx_map.size, bcm963xx_map.phys);
-
- simple_map_init(&bcm963xx_map);
-
- bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map);
- if (!bcm963xx_mtd_info) {
- dev_err(&pdev->dev, "failed to probe using CFI\n");
- bcm963xx_mtd_info = do_map_probe("jedec_probe", &bcm963xx_map);
- if (bcm963xx_mtd_info)
- goto probe_ok;
- dev_err(&pdev->dev, "failed to probe using JEDEC\n");
- err = -EIO;
- goto err_probe;
- }
-
-probe_ok:
- bcm963xx_mtd_info->owner = THIS_MODULE;
-
- /* This is mutually exclusive */
- if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) {
- dev_info(&pdev->dev, "CFE bootloader detected\n");
- if (parsed_nr_parts == 0) {
- int ret = parse_cfe_partitions(bcm963xx_mtd_info,
- &parsed_parts);
- if (ret > 0) {
- part_type = "CFE";
- parsed_nr_parts = ret;
- }
- }
- } else {
- dev_info(&pdev->dev, "unsupported bootloader\n");
- err = -ENODEV;
- goto err_probe;
- }
-
- return mtd_device_register(bcm963xx_mtd_info, parsed_parts,
- parsed_nr_parts);
-
-err_probe:
- iounmap(bcm963xx_map.virt);
- return err;
-}
-
-static int bcm963xx_remove(struct platform_device *pdev)
-{
- if (bcm963xx_mtd_info) {
- mtd_device_unregister(bcm963xx_mtd_info);
- map_destroy(bcm963xx_mtd_info);
- }
-
- if (bcm963xx_map.virt) {
- iounmap(bcm963xx_map.virt);
- bcm963xx_map.virt = 0;
- }
-
- return 0;
-}
-
-static struct platform_driver bcm63xx_mtd_dev = {
- .probe = bcm963xx_probe,
- .remove = bcm963xx_remove,
- .driver = {
- .name = "bcm963xx-flash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init bcm963xx_mtd_init(void)
-{
- return platform_driver_register(&bcm63xx_mtd_dev);
-}
-
-static void __exit bcm963xx_mtd_exit(void)
-{
- platform_driver_unregister(&bcm63xx_mtd_dev);
-}
-
-module_init(bcm963xx_mtd_init);
-module_exit(bcm963xx_mtd_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Broadcom BCM63xx MTD driver for CFE and RedBoot");
-MODULE_AUTHOR("Daniel Dickinson <openwrt@cshore.neomailbox.net>");
-MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
-MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>");
diff --git a/drivers/mtd/maps/bfin-async-flash.c b/drivers/mtd/maps/bfin-async-flash.c
index 6d6b2b5..650126c 100644
--- a/drivers/mtd/maps/bfin-async-flash.c
+++ b/drivers/mtd/maps/bfin-async-flash.c
@@ -190,17 +190,7 @@ static struct platform_driver bfin_flash_driver = {
},
};
-static int __init bfin_flash_init(void)
-{
- return platform_driver_register(&bfin_flash_driver);
-}
-module_init(bfin_flash_init);
-
-static void __exit bfin_flash_exit(void)
-{
- platform_driver_unregister(&bfin_flash_driver);
-}
-module_exit(bfin_flash_exit);
+module_platform_driver(bfin_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
diff --git a/drivers/mtd/maps/gpio-addr-flash.c b/drivers/mtd/maps/gpio-addr-flash.c
index 1ec66f0..33cce89 100644
--- a/drivers/mtd/maps/gpio-addr-flash.c
+++ b/drivers/mtd/maps/gpio-addr-flash.c
@@ -279,17 +279,7 @@ static struct platform_driver gpio_flash_driver = {
},
};
-static int __init gpio_flash_init(void)
-{
- return platform_driver_register(&gpio_flash_driver);
-}
-module_init(gpio_flash_init);
-
-static void __exit gpio_flash_exit(void)
-{
- platform_driver_unregister(&gpio_flash_driver);
-}
-module_exit(gpio_flash_exit);
+module_platform_driver(gpio_flash_driver);
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically and with gpios");
diff --git a/drivers/mtd/maps/ixp2000.c b/drivers/mtd/maps/ixp2000.c
index 437fcd2..fc7d4d0 100644
--- a/drivers/mtd/maps/ixp2000.c
+++ b/drivers/mtd/maps/ixp2000.c
@@ -246,18 +246,8 @@ static struct platform_driver ixp2000_flash_driver = {
},
};
-static int __init ixp2000_flash_init(void)
-{
- return platform_driver_register(&ixp2000_flash_driver);
-}
-
-static void __exit ixp2000_flash_exit(void)
-{
- platform_driver_unregister(&ixp2000_flash_driver);
-}
+module_platform_driver(ixp2000_flash_driver);
-module_init(ixp2000_flash_init);
-module_exit(ixp2000_flash_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Deepak Saxena <dsaxena@plexity.net>");
MODULE_ALIAS("platform:IXP2000-Flash");
diff --git a/drivers/mtd/maps/ixp4xx.c b/drivers/mtd/maps/ixp4xx.c
index 3040901..8b54101 100644
--- a/drivers/mtd/maps/ixp4xx.c
+++ b/drivers/mtd/maps/ixp4xx.c
@@ -270,19 +270,7 @@ static struct platform_driver ixp4xx_flash_driver = {
},
};
-static int __init ixp4xx_flash_init(void)
-{
- return platform_driver_register(&ixp4xx_flash_driver);
-}
-
-static void __exit ixp4xx_flash_exit(void)
-{
- platform_driver_unregister(&ixp4xx_flash_driver);
-}
-
-
-module_init(ixp4xx_flash_init);
-module_exit(ixp4xx_flash_exit);
+module_platform_driver(ixp4xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MTD map driver for Intel IXP4xx systems");
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index 4f10e27..7b889de 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -159,7 +159,7 @@ ltq_mtd_probe(struct platform_device *pdev)
if (!ltq_mtd->mtd) {
dev_err(&pdev->dev, "probing failed\n");
err = -ENXIO;
- goto err_unmap;
+ goto err_free;
}
ltq_mtd->mtd->owner = THIS_MODULE;
@@ -179,8 +179,6 @@ ltq_mtd_probe(struct platform_device *pdev)
err_destroy:
map_destroy(ltq_mtd->mtd);
-err_unmap:
- iounmap(ltq_mtd->map->virt);
err_free:
kfree(ltq_mtd->map);
err_out:
@@ -198,8 +196,6 @@ ltq_mtd_remove(struct platform_device *pdev)
mtd_device_unregister(ltq_mtd->mtd);
map_destroy(ltq_mtd->mtd);
}
- if (ltq_mtd->map->virt)
- iounmap(ltq_mtd->map->virt);
kfree(ltq_mtd->map);
kfree(ltq_mtd);
}
diff --git a/drivers/mtd/maps/latch-addr-flash.c b/drivers/mtd/maps/latch-addr-flash.c
index 119baa7..8fed58e 100644
--- a/drivers/mtd/maps/latch-addr-flash.c
+++ b/drivers/mtd/maps/latch-addr-flash.c
@@ -223,17 +223,7 @@ static struct platform_driver latch_addr_flash_driver = {
},
};
-static int __init latch_addr_flash_init(void)
-{
- return platform_driver_register(&latch_addr_flash_driver);
-}
-module_init(latch_addr_flash_init);
-
-static void __exit latch_addr_flash_exit(void)
-{
- platform_driver_unregister(&latch_addr_flash_driver);
-}
-module_exit(latch_addr_flash_exit);
+module_platform_driver(latch_addr_flash_driver);
MODULE_AUTHOR("David Griego <dgriego@mvista.com>");
MODULE_DESCRIPTION("MTD map driver for flashes addressed physically with upper "
diff --git a/drivers/mtd/maps/physmap.c b/drivers/mtd/maps/physmap.c
index 66e8200..abc5626 100644
--- a/drivers/mtd/maps/physmap.c
+++ b/drivers/mtd/maps/physmap.c
@@ -85,6 +85,7 @@ static int physmap_flash_probe(struct platform_device *dev)
struct physmap_flash_data *physmap_data;
struct physmap_flash_info *info;
const char **probe_type;
+ const char **part_types;
int err = 0;
int i;
int devices_found = 0;
@@ -171,7 +172,9 @@ static int physmap_flash_probe(struct platform_device *dev)
if (err)
goto err_out;
- mtd_device_parse_register(info->cmtd, part_probe_types, 0,
+ part_types = physmap_data->part_probe_types ? : part_probe_types;
+
+ mtd_device_parse_register(info->cmtd, part_types, 0,
physmap_data->parts, physmap_data->nr_parts);
return 0;
@@ -187,9 +190,8 @@ static void physmap_flash_shutdown(struct platform_device *dev)
int i;
for (i = 0; i < MAX_RESOURCES && info->mtd[i]; i++)
- if (info->mtd[i]->suspend && info->mtd[i]->resume)
- if (info->mtd[i]->suspend(info->mtd[i]) == 0)
- info->mtd[i]->resume(info->mtd[i]);
+ if (mtd_suspend(info->mtd[i]) == 0)
+ mtd_resume(info->mtd[i]);
}
#else
#define physmap_flash_shutdown NULL
diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c
index 7d65f9d..2e6fb68 100644
--- a/drivers/mtd/maps/physmap_of.c
+++ b/drivers/mtd/maps/physmap_of.c
@@ -338,18 +338,7 @@ static struct platform_driver of_flash_driver = {
.remove = of_flash_remove,
};
-static int __init of_flash_init(void)
-{
- return platform_driver_register(&of_flash_driver);
-}
-
-static void __exit of_flash_exit(void)
-{
- platform_driver_unregister(&of_flash_driver);
-}
-
-module_init(of_flash_init);
-module_exit(of_flash_exit);
+module_platform_driver(of_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool <vwool@ru.mvista.com>");
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index 2a25b67..436d121 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -125,8 +125,8 @@ static void pxa2xx_flash_shutdown(struct platform_device *dev)
{
struct pxa2xx_flash_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define pxa2xx_flash_shutdown NULL
@@ -142,18 +142,7 @@ static struct platform_driver pxa2xx_flash_driver = {
.shutdown = pxa2xx_flash_shutdown,
};
-static int __init init_pxa2xx_flash(void)
-{
- return platform_driver_register(&pxa2xx_flash_driver);
-}
-
-static void __exit cleanup_pxa2xx_flash(void)
-{
- platform_driver_unregister(&pxa2xx_flash_driver);
-}
-
-module_init(init_pxa2xx_flash);
-module_exit(cleanup_pxa2xx_flash);
+module_platform_driver(pxa2xx_flash_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Nicolas Pitre <nico@fluxnic.net>");
diff --git a/drivers/mtd/maps/rbtx4939-flash.c b/drivers/mtd/maps/rbtx4939-flash.c
index 0237f19..3da63fc 100644
--- a/drivers/mtd/maps/rbtx4939-flash.c
+++ b/drivers/mtd/maps/rbtx4939-flash.c
@@ -119,9 +119,8 @@ static void rbtx4939_flash_shutdown(struct platform_device *dev)
{
struct rbtx4939_flash_info *info = platform_get_drvdata(dev);
- if (info->mtd->suspend && info->mtd->resume)
- if (info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define rbtx4939_flash_shutdown NULL
@@ -137,18 +136,7 @@ static struct platform_driver rbtx4939_flash_driver = {
},
};
-static int __init rbtx4939_flash_init(void)
-{
- return platform_driver_register(&rbtx4939_flash_driver);
-}
-
-static void __exit rbtx4939_flash_exit(void)
-{
- platform_driver_unregister(&rbtx4939_flash_driver);
-}
-
-module_init(rbtx4939_flash_init);
-module_exit(rbtx4939_flash_exit);
+module_platform_driver(rbtx4939_flash_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("RBTX4939 MTD map driver");
diff --git a/drivers/mtd/maps/sa1100-flash.c b/drivers/mtd/maps/sa1100-flash.c
index fa9c0a9..5028219 100644
--- a/drivers/mtd/maps/sa1100-flash.c
+++ b/drivers/mtd/maps/sa1100-flash.c
@@ -377,8 +377,8 @@ static int __exit sa1100_mtd_remove(struct platform_device *pdev)
static void sa1100_mtd_shutdown(struct platform_device *dev)
{
struct sa_info *info = platform_get_drvdata(dev);
- if (info && info->mtd->suspend(info->mtd) == 0)
- info->mtd->resume(info->mtd);
+ if (info && mtd_suspend(info->mtd) == 0)
+ mtd_resume(info->mtd);
}
#else
#define sa1100_mtd_shutdown NULL
@@ -394,18 +394,7 @@ static struct platform_driver sa1100_mtd_driver = {
},
};
-static int __init sa1100_mtd_init(void)
-{
- return platform_driver_register(&sa1100_mtd_driver);
-}
-
-static void __exit sa1100_mtd_exit(void)
-{
- platform_driver_unregister(&sa1100_mtd_driver);
-}
-
-module_init(sa1100_mtd_init);
-module_exit(sa1100_mtd_exit);
+module_platform_driver(sa1100_mtd_driver);
MODULE_AUTHOR("Nicolas Pitre");
MODULE_DESCRIPTION("SA1100 CFI map driver");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index d88c842..934a72c 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -204,8 +204,7 @@ scb2_flash_remove(struct pci_dev *dev)
return;
/* disable flash writes */
- if (scb2_mtd->lock)
- scb2_mtd->lock(scb2_mtd, 0, scb2_mtd->size);
+ mtd_lock(scb2_mtd, 0, scb2_mtd->size);
mtd_device_unregister(scb2_mtd);
map_destroy(scb2_mtd);
diff --git a/drivers/mtd/maps/sun_uflash.c b/drivers/mtd/maps/sun_uflash.c
index 2d66234..175e537 100644
--- a/drivers/mtd/maps/sun_uflash.c
+++ b/drivers/mtd/maps/sun_uflash.c
@@ -158,15 +158,4 @@ static struct platform_driver uflash_driver = {
.remove = __devexit_p(uflash_remove),
};
-static int __init uflash_init(void)
-{
- return platform_driver_register(&uflash_driver);
-}
-
-static void __exit uflash_exit(void)
-{
- platform_driver_unregister(&uflash_driver);
-}
-
-module_init(uflash_init);
-module_exit(uflash_exit);
+module_platform_driver(uflash_driver);
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index ed8b5e7..424ca5f 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -215,7 +215,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
mutex_lock(&dev->lock);
- if (dev->open++)
+ if (dev->open)
goto unlock;
kref_get(&dev->ref);
@@ -235,6 +235,7 @@ static int blktrans_open(struct block_device *bdev, fmode_t mode)
goto error_release;
unlock:
+ dev->open++;
mutex_unlock(&dev->lock);
blktrans_dev_put(dev);
return ret;
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index 7c1dc90..af65912 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -85,7 +85,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -102,7 +102,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos,
* Next, write the data to flash.
*/
- ret = mtd->write(mtd, pos, len, &retlen, buf);
+ ret = mtd_write(mtd, pos, len, &retlen, buf);
if (ret)
return ret;
if (retlen != len)
@@ -152,7 +152,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtd->name, pos, len);
if (!sect_size)
- return mtd->write(mtd, pos, len, &retlen, buf);
+ return mtd_write(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -184,8 +184,8 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset != sect_start) {
/* fill the cache with the current sector */
mtdblk->cache_state = STATE_EMPTY;
- ret = mtd->read(mtd, sect_start, sect_size,
- &retlen, mtdblk->cache_data);
+ ret = mtd_read(mtd, sect_start, sect_size,
+ &retlen, mtdblk->cache_data);
if (ret)
return ret;
if (retlen != sect_size)
@@ -222,7 +222,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
mtd->name, pos, len);
if (!sect_size)
- return mtd->read(mtd, pos, len, &retlen, buf);
+ return mtd_read(mtd, pos, len, &retlen, buf);
while (len > 0) {
unsigned long sect_start = (pos/sect_size)*sect_size;
@@ -241,7 +241,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
mtdblk->cache_offset == sect_start) {
memcpy (buf, mtdblk->cache_data + offset, size);
} else {
- ret = mtd->read(mtd, pos, size, &retlen, buf);
+ ret = mtd_read(mtd, pos, size, &retlen, buf);
if (ret)
return ret;
if (retlen != size)
@@ -322,8 +322,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
if (!--mtdblk->count) {
/* It was the last usage. Free the cache */
- if (mbd->mtd->sync)
- mbd->mtd->sync(mbd->mtd);
+ mtd_sync(mbd->mtd);
vfree(mtdblk->cache_data);
}
@@ -341,9 +340,7 @@ static int mtdblock_flush(struct mtd_blktrans_dev *dev)
mutex_lock(&mtdblk->cache_mutex);
write_cached_data(mtdblk);
mutex_unlock(&mtdblk->cache_mutex);
-
- if (dev->mtd->sync)
- dev->mtd->sync(dev->mtd);
+ mtd_sync(dev->mtd);
return 0;
}
diff --git a/drivers/mtd/mtdblock_ro.c b/drivers/mtd/mtdblock_ro.c
index 0470a6e..92759a9 100644
--- a/drivers/mtd/mtdblock_ro.c
+++ b/drivers/mtd/mtdblock_ro.c
@@ -30,7 +30,7 @@ static int mtdblock_readsect(struct mtd_blktrans_dev *dev,
{
size_t retlen;
- if (dev->mtd->read(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_read(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
@@ -40,7 +40,7 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev,
{
size_t retlen;
- if (dev->mtd->write(dev->mtd, (block * 512), 512, &retlen, buf))
+ if (mtd_write(dev->mtd, (block * 512), 512, &retlen, buf))
return 1;
return 0;
}
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index e7dc732..50c6a1e 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -51,7 +51,7 @@ struct mtd_file_info {
enum mtd_file_modes mode;
};
-static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
+static loff_t mtdchar_lseek(struct file *file, loff_t offset, int orig)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -77,7 +77,7 @@ static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
-static int mtd_open(struct inode *inode, struct file *file)
+static int mtdchar_open(struct inode *inode, struct file *file)
{
int minor = iminor(inode);
int devnum = minor >> 1;
@@ -142,11 +142,11 @@ static int mtd_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&mtd_mutex);
return ret;
-} /* mtd_open */
+} /* mtdchar_open */
/*====================================================================*/
-static int mtd_close(struct inode *inode, struct file *file)
+static int mtdchar_close(struct inode *inode, struct file *file)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -154,8 +154,8 @@ static int mtd_close(struct inode *inode, struct file *file)
pr_debug("MTD_close\n");
/* Only sync if opened RW */
- if ((file->f_mode & FMODE_WRITE) && mtd->sync)
- mtd->sync(mtd);
+ if ((file->f_mode & FMODE_WRITE))
+ mtd_sync(mtd);
iput(mfi->ino);
@@ -164,7 +164,7 @@ static int mtd_close(struct inode *inode, struct file *file)
kfree(mfi);
return 0;
-} /* mtd_close */
+} /* mtdchar_close */
/* Back in June 2001, dwmw2 wrote:
*
@@ -184,11 +184,12 @@ static int mtd_close(struct inode *inode, struct file *file)
* alignment requirements are not met in the NAND subdriver.
*/
-static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
- size_t retlen=0;
+ size_t retlen;
size_t total_retlen=0;
int ret=0;
int len;
@@ -212,10 +213,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_fact_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_OTP_USER:
- ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
{
@@ -226,12 +229,12 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
ops.oobbuf = NULL;
ops.len = len;
- ret = mtd->read_oob(mtd, *ppos, &ops);
+ ret = mtd_read_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_read(mtd, *ppos, len, &retlen, kbuf);
}
/* Nand returns -EBADMSG on ECC errors, but it returns
* the data. For our userspace tools it is important
@@ -265,9 +268,10 @@ static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t
kfree(kbuf);
return total_retlen;
-} /* mtd_read */
+} /* mtdchar_read */
-static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
+static ssize_t mtdchar_write(struct file *file, const char __user *buf, size_t count,
+ loff_t *ppos)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -306,11 +310,8 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
ret = -EROFS;
break;
case MTD_FILE_MODE_OTP_USER:
- if (!mtd->write_user_prot_reg) {
- ret = -EOPNOTSUPP;
- break;
- }
- ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write_user_prot_reg(mtd, *ppos, len,
+ &retlen, kbuf);
break;
case MTD_FILE_MODE_RAW:
@@ -323,13 +324,13 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
ops.ooboffs = 0;
ops.len = len;
- ret = mtd->write_oob(mtd, *ppos, &ops);
+ ret = mtd_write_oob(mtd, *ppos, &ops);
retlen = ops.retlen;
break;
}
default:
- ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
+ ret = mtd_write(mtd, *ppos, len, &retlen, kbuf);
}
if (!ret) {
*ppos += retlen;
@@ -345,7 +346,7 @@ static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count
kfree(kbuf);
return total_retlen;
-} /* mtd_write */
+} /* mtdchar_write */
/*======================================================================
@@ -361,20 +362,22 @@ static void mtdchar_erase_callback (struct erase_info *instr)
static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
{
struct mtd_info *mtd = mfi->mtd;
+ size_t retlen;
int ret = 0;
+ /*
+ * Make a fake call to mtd_read_fact_prot_reg() to check if OTP
+ * operations are supported.
+ */
+ if (mtd_read_fact_prot_reg(mtd, -1, -1, &retlen, NULL) == -EOPNOTSUPP)
+ return -EOPNOTSUPP;
+
switch (mode) {
case MTD_OTP_FACTORY:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
+ mfi->mode = MTD_FILE_MODE_OTP_FACTORY;
break;
case MTD_OTP_USER:
- if (!mtd->read_fact_prot_reg)
- ret = -EOPNOTSUPP;
- else
- mfi->mode = MTD_FILE_MODE_OTP_USER;
+ mfi->mode = MTD_FILE_MODE_OTP_USER;
break;
default:
ret = -EINVAL;
@@ -387,7 +390,7 @@ static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
# define otp_select_filemode(f,m) -EOPNOTSUPP
#endif
-static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
@@ -424,7 +427,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return PTR_ERR(ops.oobbuf);
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->write_oob(mtd, start, &ops);
+ ret = mtd_write_oob(mtd, start, &ops);
if (ops.oobretlen > 0xFFFFFFFFU)
ret = -EOVERFLOW;
@@ -436,7 +439,7 @@ static int mtd_do_writeoob(struct file *file, struct mtd_info *mtd,
return ret;
}
-static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
+static int mtdchar_readoob(struct file *file, struct mtd_info *mtd,
uint64_t start, uint32_t length, void __user *ptr,
uint32_t __user *retp)
{
@@ -447,13 +450,8 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
if (length > 4096)
return -EINVAL;
- if (!mtd->read_oob)
- ret = -EOPNOTSUPP;
- else
- ret = access_ok(VERIFY_WRITE, ptr,
- length) ? 0 : -EFAULT;
- if (ret)
- return ret;
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
ops.ooblen = length;
ops.ooboffs = start & (mtd->writesize - 1);
@@ -469,7 +467,7 @@ static int mtd_do_readoob(struct file *file, struct mtd_info *mtd,
return -ENOMEM;
start &= ~((uint64_t)mtd->writesize - 1);
- ret = mtd->read_oob(mtd, start, &ops);
+ ret = mtd_read_oob(mtd, start, &ops);
if (put_user(ops.oobretlen, retp))
ret = -EFAULT;
@@ -530,7 +528,7 @@ static int shrink_ecclayout(const struct nand_ecclayout *from,
return 0;
}
-static int mtd_blkpg_ioctl(struct mtd_info *mtd,
+static int mtdchar_blkpg_ioctl(struct mtd_info *mtd,
struct blkpg_ioctl_arg __user *arg)
{
struct blkpg_ioctl_arg a;
@@ -566,7 +564,7 @@ static int mtd_blkpg_ioctl(struct mtd_info *mtd,
}
}
-static int mtd_write_ioctl(struct mtd_info *mtd,
+static int mtdchar_write_ioctl(struct mtd_info *mtd,
struct mtd_write_req __user *argp)
{
struct mtd_write_req req;
@@ -607,7 +605,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
ops.oobbuf = NULL;
}
- ret = mtd->write_oob(mtd, (loff_t)req.start, &ops);
+ ret = mtd_write_oob(mtd, (loff_t)req.start, &ops);
kfree(ops.datbuf);
kfree(ops.oobbuf);
@@ -615,7 +613,7 @@ static int mtd_write_ioctl(struct mtd_info *mtd,
return ret;
}
-static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
+static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg)
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
@@ -729,7 +727,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
wq_head is no longer there when the
callback routine tries to wake us up.
*/
- ret = mtd->erase(mtd, erase);
+ ret = mtd_erase(mtd, erase);
if (!ret) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -755,7 +753,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->length);
break;
}
@@ -769,7 +767,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
buf.ptr, &buf_user->start);
break;
}
@@ -782,7 +780,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_writeoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
@@ -796,7 +794,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start, buf.length,
+ ret = mtdchar_readoob(file, mtd, buf.start, buf.length,
(void __user *)(uintptr_t)buf.usr_ptr,
&buf_user->length);
break;
@@ -804,7 +802,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case MEMWRITE:
{
- ret = mtd_write_ioctl(mtd,
+ ret = mtdchar_write_ioctl(mtd,
(struct mtd_write_req __user *)arg);
break;
}
@@ -816,10 +814,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->lock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->lock(mtd, einfo.start, einfo.length);
+ ret = mtd_lock(mtd, einfo.start, einfo.length);
break;
}
@@ -830,10 +825,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->unlock)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->unlock(mtd, einfo.start, einfo.length);
+ ret = mtd_unlock(mtd, einfo.start, einfo.length);
break;
}
@@ -844,10 +836,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&einfo, argp, sizeof(einfo)))
return -EFAULT;
- if (!mtd->is_locked)
- ret = -EOPNOTSUPP;
- else
- ret = mtd->is_locked(mtd, einfo.start, einfo.length);
+ ret = mtd_is_locked(mtd, einfo.start, einfo.length);
break;
}
@@ -878,10 +867,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_isbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_isbad(mtd, offs);
+ return mtd_block_isbad(mtd, offs);
break;
}
@@ -891,10 +877,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
if (copy_from_user(&offs, argp, sizeof(loff_t)))
return -EFAULT;
- if (!mtd->block_markbad)
- ret = -EOPNOTSUPP;
- else
- return mtd->block_markbad(mtd, offs);
+ return mtd_block_markbad(mtd, offs);
break;
}
@@ -919,17 +902,15 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
if (!buf)
return -ENOMEM;
- ret = -EOPNOTSUPP;
switch (mfi->mode) {
case MTD_FILE_MODE_OTP_FACTORY:
- if (mtd->get_fact_prot_info)
- ret = mtd->get_fact_prot_info(mtd, buf, 4096);
+ ret = mtd_get_fact_prot_info(mtd, buf, 4096);
break;
case MTD_FILE_MODE_OTP_USER:
- if (mtd->get_user_prot_info)
- ret = mtd->get_user_prot_info(mtd, buf, 4096);
+ ret = mtd_get_user_prot_info(mtd, buf, 4096);
break;
default:
+ ret = -EINVAL;
break;
}
if (ret >= 0) {
@@ -953,9 +934,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return -EINVAL;
if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
return -EFAULT;
- if (!mtd->lock_user_prot_reg)
- return -EOPNOTSUPP;
- ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
+ ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
break;
}
#endif
@@ -999,7 +978,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
break;
case MTD_FILE_MODE_RAW:
- if (!mtd->read_oob || !mtd->write_oob)
+ if (!mtd_has_oob(mtd))
return -EOPNOTSUPP;
mfi->mode = arg;
@@ -1014,7 +993,7 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
case BLKPG:
{
- ret = mtd_blkpg_ioctl(mtd,
+ ret = mtdchar_blkpg_ioctl(mtd,
(struct blkpg_ioctl_arg __user *)arg);
break;
}
@@ -1033,12 +1012,12 @@ static int mtd_ioctl(struct file *file, u_int cmd, u_long arg)
return ret;
} /* memory_ioctl */
-static long mtd_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
+static long mtdchar_unlocked_ioctl(struct file *file, u_int cmd, u_long arg)
{
int ret;
mutex_lock(&mtd_mutex);
- ret = mtd_ioctl(file, cmd, arg);
+ ret = mtdchar_ioctl(file, cmd, arg);
mutex_unlock(&mtd_mutex);
return ret;
@@ -1055,7 +1034,7 @@ struct mtd_oob_buf32 {
#define MEMWRITEOOB32 _IOWR('M', 3, struct mtd_oob_buf32)
#define MEMREADOOB32 _IOWR('M', 4, struct mtd_oob_buf32)
-static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
+static long mtdchar_compat_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct mtd_file_info *mfi = file->private_data;
@@ -1074,7 +1053,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_writeoob(file, mtd, buf.start,
+ ret = mtdchar_writeoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->length);
break;
@@ -1089,13 +1068,13 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
if (copy_from_user(&buf, argp, sizeof(buf)))
ret = -EFAULT;
else
- ret = mtd_do_readoob(file, mtd, buf.start,
+ ret = mtdchar_readoob(file, mtd, buf.start,
buf.length, compat_ptr(buf.ptr),
&buf_user->start);
break;
}
default:
- ret = mtd_ioctl(file, cmd, (unsigned long)argp);
+ ret = mtdchar_ioctl(file, cmd, (unsigned long)argp);
}
mutex_unlock(&mtd_mutex);
@@ -1111,7 +1090,7 @@ static long mtd_compat_ioctl(struct file *file, unsigned int cmd,
* mappings)
*/
#ifndef CONFIG_MMU
-static unsigned long mtd_get_unmapped_area(struct file *file,
+static unsigned long mtdchar_get_unmapped_area(struct file *file,
unsigned long addr,
unsigned long len,
unsigned long pgoff,
@@ -1119,32 +1098,28 @@ static unsigned long mtd_get_unmapped_area(struct file *file,
{
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
+ unsigned long offset;
+ int ret;
- if (mtd->get_unmapped_area) {
- unsigned long offset;
-
- if (addr != 0)
- return (unsigned long) -EINVAL;
-
- if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
- return (unsigned long) -EINVAL;
+ if (addr != 0)
+ return (unsigned long) -EINVAL;
- offset = pgoff << PAGE_SHIFT;
- if (offset > mtd->size - len)
- return (unsigned long) -EINVAL;
+ if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
+ return (unsigned long) -EINVAL;
- return mtd->get_unmapped_area(mtd, len, offset, flags);
- }
+ offset = pgoff << PAGE_SHIFT;
+ if (offset > mtd->size - len)
+ return (unsigned long) -EINVAL;
- /* can't map directly */
- return (unsigned long) -ENOSYS;
+ ret = mtd_get_unmapped_area(mtd, len, offset, flags);
+ return ret == -EOPNOTSUPP ? -ENOSYS : ret;
}
#endif
/*
* set up a mapping for shared memory segments
*/
-static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
+static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
{
#ifdef CONFIG_MMU
struct mtd_file_info *mfi = file->private_data;
@@ -1185,18 +1160,18 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
static const struct file_operations mtd_fops = {
.owner = THIS_MODULE,
- .llseek = mtd_lseek,
- .read = mtd_read,
- .write = mtd_write,
- .unlocked_ioctl = mtd_unlocked_ioctl,
+ .llseek = mtdchar_lseek,
+ .read = mtdchar_read,
+ .write = mtdchar_write,
+ .unlocked_ioctl = mtdchar_unlocked_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = mtd_compat_ioctl,
+ .compat_ioctl = mtdchar_compat_ioctl,
#endif
- .open = mtd_open,
- .release = mtd_close,
- .mmap = mtd_mmap,
+ .open = mtdchar_open,
+ .release = mtdchar_close,
+ .mmap = mtdchar_mmap,
#ifndef CONFIG_MMU
- .get_unmapped_area = mtd_get_unmapped_area,
+ .get_unmapped_area = mtdchar_get_unmapped_area,
#endif
};
diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c
index 6df4d4d..1ed5103 100644
--- a/drivers/mtd/mtdconcat.c
+++ b/drivers/mtd/mtdconcat.c
@@ -91,7 +91,7 @@ concat_read(struct mtd_info *mtd, loff_t from, size_t len,
/* Entire transaction goes into this subdev */
size = len;
- err = subdev->read(subdev, from, size, &retsize, buf);
+ err = mtd_read(subdev, from, size, &retsize, buf);
/* Save information about bitflips! */
if (unlikely(err)) {
@@ -148,7 +148,7 @@ concat_write(struct mtd_info *mtd, loff_t to, size_t len,
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->write(subdev, to, size, &retsize, buf);
+ err = mtd_write(subdev, to, size, &retsize, buf);
if (err)
break;
@@ -227,8 +227,9 @@ concat_writev(struct mtd_info *mtd, const struct kvec *vecs,
if (!(subdev->flags & MTD_WRITEABLE))
err = -EROFS;
else
- err = subdev->writev(subdev, &vecs_copy[entry_low],
- entry_high - entry_low + 1, to, &retsize);
+ err = mtd_writev(subdev, &vecs_copy[entry_low],
+ entry_high - entry_low + 1, to,
+ &retsize);
vecs_copy[entry_high].iov_len = old_iov_len - size;
vecs_copy[entry_high].iov_base += size;
@@ -273,7 +274,7 @@ concat_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
if (from + devops.len > subdev->size)
devops.len = subdev->size - from;
- err = subdev->read_oob(subdev, from, &devops);
+ err = mtd_read_oob(subdev, from, &devops);
ops->retlen += devops.retlen;
ops->oobretlen += devops.oobretlen;
@@ -333,7 +334,7 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops)
if (to + devops.len > subdev->size)
devops.len = subdev->size - to;
- err = subdev->write_oob(subdev, to, &devops);
+ err = mtd_write_oob(subdev, to, &devops);
ops->retlen += devops.oobretlen;
if (err)
return err;
@@ -379,7 +380,7 @@ static int concat_dev_erase(struct mtd_info *mtd, struct erase_info *erase)
* FIXME: Allow INTERRUPTIBLE. Which means
* not having the wait_queue head on the stack.
*/
- err = mtd->erase(mtd, erase);
+ err = mtd_erase(mtd, erase);
if (!err) {
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&waitq, &wait);
@@ -554,12 +555,9 @@ static int concat_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->lock) {
- err = subdev->lock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_lock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -594,12 +592,9 @@ static int concat_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
else
size = len;
- if (subdev->unlock) {
- err = subdev->unlock(subdev, ofs, size);
- if (err)
- break;
- } else
- err = -EOPNOTSUPP;
+ err = mtd_unlock(subdev, ofs, size);
+ if (err)
+ break;
len -= size;
if (len == 0)
@@ -619,7 +614,7 @@ static void concat_sync(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->sync(subdev);
+ mtd_sync(subdev);
}
}
@@ -630,7 +625,7 @@ static int concat_suspend(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- if ((rc = subdev->suspend(subdev)) < 0)
+ if ((rc = mtd_suspend(subdev)) < 0)
return rc;
}
return rc;
@@ -643,7 +638,7 @@ static void concat_resume(struct mtd_info *mtd)
for (i = 0; i < concat->num_subdev; i++) {
struct mtd_info *subdev = concat->subdev[i];
- subdev->resume(subdev);
+ mtd_resume(subdev);
}
}
@@ -652,7 +647,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, res = 0;
- if (!concat->subdev[0]->block_isbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return res;
if (ofs > mtd->size)
@@ -666,7 +661,7 @@ static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- res = subdev->block_isbad(subdev, ofs);
+ res = mtd_block_isbad(subdev, ofs);
break;
}
@@ -678,7 +673,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
struct mtd_concat *concat = CONCAT(mtd);
int i, err = -EINVAL;
- if (!concat->subdev[0]->block_markbad)
+ if (!mtd_can_have_bb(concat->subdev[0]))
return 0;
if (ofs > mtd->size)
@@ -692,7 +687,7 @@ static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs)
continue;
}
- err = subdev->block_markbad(subdev, ofs);
+ err = mtd_block_markbad(subdev, ofs);
if (!err)
mtd->ecc_stats.badblocks++;
break;
@@ -725,11 +720,7 @@ static unsigned long concat_get_unmapped_area(struct mtd_info *mtd,
if (offset + len > subdev->size)
return (unsigned long) -EINVAL;
- if (subdev->get_unmapped_area)
- return subdev->get_unmapped_area(subdev, len, offset,
- flags);
-
- break;
+ return mtd_get_unmapped_area(subdev, len, offset, flags);
}
return (unsigned long) -ENOSYS;
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index b01993e..9a9ce71 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -107,7 +107,8 @@ static LIST_HEAD(mtd_notifiers);
*/
static void mtd_release(struct device *dev)
{
- dev_t index = MTD_DEVT(dev_to_mtd(dev)->index);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+ dev_t index = MTD_DEVT(mtd->index);
/* remove /dev/mtdXro node if needed */
if (index)
@@ -116,27 +117,24 @@ static void mtd_release(struct device *dev)
static int mtd_cls_suspend(struct device *dev, pm_message_t state)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
- if (mtd && mtd->suspend)
- return mtd->suspend(mtd);
- else
- return 0;
+ return mtd ? mtd_suspend(mtd) : 0;
}
static int mtd_cls_resume(struct device *dev)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
-
+ struct mtd_info *mtd = dev_get_drvdata(dev);
+
if (mtd && mtd->resume)
- mtd->resume(mtd);
+ mtd_resume(mtd);
return 0;
}
static ssize_t mtd_type_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
char *type;
switch (mtd->type) {
@@ -172,7 +170,7 @@ static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
static ssize_t mtd_flags_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
@@ -182,7 +180,7 @@ static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
static ssize_t mtd_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%llu\n",
(unsigned long long)mtd->size);
@@ -193,7 +191,7 @@ static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
static ssize_t mtd_erasesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
@@ -203,7 +201,7 @@ static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
static ssize_t mtd_writesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
@@ -213,7 +211,7 @@ static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
static ssize_t mtd_subpagesize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
@@ -224,7 +222,7 @@ static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
static ssize_t mtd_oobsize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
@@ -234,7 +232,7 @@ static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
static ssize_t mtd_numeraseregions_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
@@ -245,7 +243,7 @@ static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
static ssize_t mtd_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct mtd_info *mtd = dev_to_mtd(dev);
+ struct mtd_info *mtd = dev_get_drvdata(dev);
return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
@@ -338,9 +336,9 @@ int add_mtd_device(struct mtd_info *mtd)
mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
/* Some chips always power up locked. Unlock them now */
- if ((mtd->flags & MTD_WRITEABLE)
- && (mtd->flags & MTD_POWERUP_LOCK) && mtd->unlock) {
- if (mtd->unlock(mtd, 0, mtd->size))
+ if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
+ error = mtd_unlock(mtd, 0, mtd->size);
+ if (error && error != -EOPNOTSUPP)
printk(KERN_WARNING
"%s: unlock failed, writes may not work\n",
mtd->name);
@@ -516,7 +514,6 @@ EXPORT_SYMBOL_GPL(mtd_device_unregister);
* or removal of MTD devices. Causes the 'add' callback to be immediately
* invoked for each MTD device currently present in the system.
*/
-
void register_mtd_user (struct mtd_notifier *new)
{
struct mtd_info *mtd;
@@ -532,6 +529,7 @@ void register_mtd_user (struct mtd_notifier *new)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(register_mtd_user);
/**
* unregister_mtd_user - unregister a 'user' of MTD devices.
@@ -542,7 +540,6 @@ void register_mtd_user (struct mtd_notifier *new)
* 'remove' callback to be immediately invoked for each MTD device
* currently present in the system.
*/
-
int unregister_mtd_user (struct mtd_notifier *old)
{
struct mtd_info *mtd;
@@ -558,7 +555,7 @@ int unregister_mtd_user (struct mtd_notifier *old)
mutex_unlock(&mtd_table_mutex);
return 0;
}
-
+EXPORT_SYMBOL_GPL(unregister_mtd_user);
/**
* get_mtd_device - obtain a validated handle for an MTD device
@@ -571,7 +568,6 @@ int unregister_mtd_user (struct mtd_notifier *old)
* both, return the num'th driver only if its address matches. Return
* error code if not.
*/
-
struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
{
struct mtd_info *ret = NULL, *other;
@@ -604,6 +600,7 @@ out:
mutex_unlock(&mtd_table_mutex);
return ret;
}
+EXPORT_SYMBOL_GPL(get_mtd_device);
int __get_mtd_device(struct mtd_info *mtd)
@@ -624,6 +621,7 @@ int __get_mtd_device(struct mtd_info *mtd)
mtd->usecount++;
return 0;
}
+EXPORT_SYMBOL_GPL(__get_mtd_device);
/**
* get_mtd_device_nm - obtain a validated handle for an MTD device by
@@ -633,7 +631,6 @@ int __get_mtd_device(struct mtd_info *mtd)
* This function returns MTD device description structure in case of
* success and an error code in case of failure.
*/
-
struct mtd_info *get_mtd_device_nm(const char *name)
{
int err = -ENODEV;
@@ -662,6 +659,7 @@ out_unlock:
mutex_unlock(&mtd_table_mutex);
return ERR_PTR(err);
}
+EXPORT_SYMBOL_GPL(get_mtd_device_nm);
void put_mtd_device(struct mtd_info *mtd)
{
@@ -670,6 +668,7 @@ void put_mtd_device(struct mtd_info *mtd)
mutex_unlock(&mtd_table_mutex);
}
+EXPORT_SYMBOL_GPL(put_mtd_device);
void __put_mtd_device(struct mtd_info *mtd)
{
@@ -681,39 +680,65 @@ void __put_mtd_device(struct mtd_info *mtd)
module_put(mtd->owner);
}
+EXPORT_SYMBOL_GPL(__put_mtd_device);
-/* default_mtd_writev - default mtd writev method for MTD devices that
- * don't implement their own
+/*
+ * default_mtd_writev - the default writev method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
*/
-
-int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
- unsigned long count, loff_t to, size_t *retlen)
+static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
{
unsigned long i;
size_t totlen = 0, thislen;
int ret = 0;
- if(!mtd->write) {
- ret = -EROFS;
- } else {
- for (i=0; i<count; i++) {
- if (!vecs[i].iov_len)
- continue;
- ret = mtd->write(mtd, to, vecs[i].iov_len, &thislen, vecs[i].iov_base);
- totlen += thislen;
- if (ret || thislen != vecs[i].iov_len)
- break;
- to += vecs[i].iov_len;
- }
+ for (i = 0; i < count; i++) {
+ if (!vecs[i].iov_len)
+ continue;
+ ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
+ vecs[i].iov_base);
+ totlen += thislen;
+ if (ret || thislen != vecs[i].iov_len)
+ break;
+ to += vecs[i].iov_len;
}
- if (retlen)
- *retlen = totlen;
+ *retlen = totlen;
return ret;
}
+/*
+ * mtd_writev - the vector-based MTD write method
+ * @mtd: mtd device description object pointer
+ * @vecs: the vectors to write
+ * @count: count of vectors in @vecs
+ * @to: the MTD device offset to write to
+ * @retlen: on exit contains the count of bytes written to the MTD device.
+ *
+ * This function returns zero in case of success and a negative error code in
+ * case of failure.
+ */
+int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
+ unsigned long count, loff_t to, size_t *retlen)
+{
+ *retlen = 0;
+ if (!mtd->writev)
+ return default_mtd_writev(mtd, vecs, count, to, retlen);
+ return mtd->writev(mtd, vecs, count, to, retlen);
+}
+EXPORT_SYMBOL_GPL(mtd_writev);
+
/**
* mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
- * @size: A pointer to the ideal or maximum size of the allocation. Points
+ * @mtd: mtd device description object pointer
+ * @size: a pointer to the ideal or maximum size of the allocation, points
* to the actual allocation size on success.
*
* This routine attempts to allocate a contiguous kernel buffer up to
@@ -758,15 +783,6 @@ void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
*/
return kmalloc(*size, GFP_KERNEL);
}
-
-EXPORT_SYMBOL_GPL(get_mtd_device);
-EXPORT_SYMBOL_GPL(get_mtd_device_nm);
-EXPORT_SYMBOL_GPL(__get_mtd_device);
-EXPORT_SYMBOL_GPL(put_mtd_device);
-EXPORT_SYMBOL_GPL(__put_mtd_device);
-EXPORT_SYMBOL_GPL(register_mtd_user);
-EXPORT_SYMBOL_GPL(unregister_mtd_user);
-EXPORT_SYMBOL_GPL(default_mtd_writev);
EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
#ifdef CONFIG_PROC_FS
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index 1e2fa62..3ce99e0 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -112,7 +112,7 @@ static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&wait_q, &wait);
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&wait_q, &wait);
@@ -169,8 +169,8 @@ static void mtdoops_workfunc_erase(struct work_struct *work)
cxt->nextpage = 0;
}
- while (mtd->block_isbad) {
- ret = mtd->block_isbad(mtd, cxt->nextpage * record_size);
+ while (mtd_can_have_bb(mtd)) {
+ ret = mtd_block_isbad(mtd, cxt->nextpage * record_size);
if (!ret)
break;
if (ret < 0) {
@@ -199,8 +199,8 @@ badblock:
return;
}
- if (mtd->block_markbad && ret == -EIO) {
- ret = mtd->block_markbad(mtd, cxt->nextpage * record_size);
+ if (mtd_can_have_bb(mtd) && ret == -EIO) {
+ ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
if (ret < 0) {
printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
return;
@@ -221,12 +221,16 @@ static void mtdoops_write(struct mtdoops_context *cxt, int panic)
hdr[0] = cxt->nextcount;
hdr[1] = MTDOOPS_KERNMSG_MAGIC;
- if (panic)
- ret = mtd->panic_write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
- else
- ret = mtd->write(mtd, cxt->nextpage * record_size,
- record_size, &retlen, cxt->oops_buf);
+ if (panic) {
+ ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
+ if (ret == -EOPNOTSUPP) {
+ printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
+ return;
+ }
+ } else
+ ret = mtd_write(mtd, cxt->nextpage * record_size,
+ record_size, &retlen, cxt->oops_buf);
if (retlen != record_size || ret < 0)
printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
@@ -253,10 +257,13 @@ static void find_next_position(struct mtdoops_context *cxt)
size_t retlen;
for (page = 0; page < cxt->oops_pages; page++) {
+ if (mtd_can_have_bb(mtd) &&
+ mtd_block_isbad(mtd, page * record_size))
+ continue;
/* Assume the page is used */
mark_page_used(cxt, page);
- ret = mtd->read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
- &retlen, (u_char *) &count[0]);
+ ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
+ &retlen, (u_char *)&count[0]);
if (retlen != MTDOOPS_HEADER_SIZE ||
(ret < 0 && !mtd_is_bitflip(ret))) {
printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
@@ -308,8 +315,7 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
char *dst;
if (reason != KMSG_DUMP_OOPS &&
- reason != KMSG_DUMP_PANIC &&
- reason != KMSG_DUMP_KEXEC)
+ reason != KMSG_DUMP_PANIC)
return;
/* Only dump oopses if dump_oops is set */
@@ -327,13 +333,8 @@ static void mtdoops_do_dump(struct kmsg_dumper *dumper,
memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
/* Panics must be written immediately */
- if (reason != KMSG_DUMP_OOPS) {
- if (!cxt->mtd->panic_write)
- printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
- else
- mtdoops_write(cxt, 1);
- return;
- }
+ if (reason != KMSG_DUMP_OOPS)
+ mtdoops_write(cxt, 1);
/* For other cases, schedule work to write it "nicely" */
schedule_work(&cxt->work_write);
@@ -369,7 +370,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
/* oops_page_used is a bit field */
cxt->oops_page_used = vmalloc(DIV_ROUND_UP(mtdoops_pages,
- BITS_PER_LONG));
+ BITS_PER_LONG) * sizeof(unsigned long));
if (!cxt->oops_page_used) {
printk(KERN_ERR "mtdoops: could not allocate page array\n");
return;
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index a0bd2de..a3d44c3 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -70,8 +70,7 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- res = part->master->read(part->master, from + part->offset,
- len, retlen, buf);
+ res = mtd_read(part->master, from + part->offset, len, retlen, buf);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected;
@@ -89,15 +88,15 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len,
len = 0;
else if (from + len > mtd->size)
len = mtd->size - from;
- return part->master->point (part->master, from + part->offset,
- len, retlen, virt, phys);
+ return mtd_point(part->master, from + part->offset, len, retlen,
+ virt, phys);
}
static void part_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
{
struct mtd_part *part = PART(mtd);
- part->master->unpoint(part->master, from + part->offset, len);
+ mtd_unpoint(part->master, from + part->offset, len);
}
static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
@@ -108,8 +107,7 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd,
struct mtd_part *part = PART(mtd);
offset += part->offset;
- return part->master->get_unmapped_area(part->master, len, offset,
- flags);
+ return mtd_get_unmapped_area(part->master, len, offset, flags);
}
static int part_read_oob(struct mtd_info *mtd, loff_t from,
@@ -140,7 +138,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from,
return -EINVAL;
}
- res = part->master->read_oob(part->master, from + part->offset, ops);
+ res = mtd_read_oob(part->master, from + part->offset, ops);
if (unlikely(res)) {
if (mtd_is_bitflip(res))
mtd->ecc_stats.corrected++;
@@ -154,30 +152,28 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_user_prot_info(struct mtd_info *mtd,
struct otp_info *buf, size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_user_prot_info(part->master, buf, len);
+ return mtd_get_user_prot_info(part->master, buf, len);
}
static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->read_fact_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_read_fact_prot_reg(part->master, from, len, retlen, buf);
}
static int part_get_fact_prot_info(struct mtd_info *mtd, struct otp_info *buf,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->get_fact_prot_info(part->master, buf, len);
+ return mtd_get_fact_prot_info(part->master, buf, len);
}
static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -190,8 +186,7 @@ static int part_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_write(part->master, to + part->offset, len, retlen, buf);
}
static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
@@ -204,8 +199,8 @@ static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
len = 0;
else if (to + len > mtd->size)
len = mtd->size - to;
- return part->master->panic_write(part->master, to + part->offset,
- len, retlen, buf);
+ return mtd_panic_write(part->master, to + part->offset, len, retlen,
+ buf);
}
static int part_write_oob(struct mtd_info *mtd, loff_t to,
@@ -220,22 +215,21 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to,
return -EINVAL;
if (ops->datbuf && to + ops->len > mtd->size)
return -EINVAL;
- return part->master->write_oob(part->master, to + part->offset, ops);
+ return mtd_write_oob(part->master, to + part->offset, ops);
}
static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len, size_t *retlen, u_char *buf)
{
struct mtd_part *part = PART(mtd);
- return part->master->write_user_prot_reg(part->master, from,
- len, retlen, buf);
+ return mtd_write_user_prot_reg(part->master, from, len, retlen, buf);
}
static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
size_t len)
{
struct mtd_part *part = PART(mtd);
- return part->master->lock_user_prot_reg(part->master, from, len);
+ return mtd_lock_user_prot_reg(part->master, from, len);
}
static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
@@ -244,8 +238,8 @@ static int part_writev(struct mtd_info *mtd, const struct kvec *vecs,
struct mtd_part *part = PART(mtd);
if (!(mtd->flags & MTD_WRITEABLE))
return -EROFS;
- return part->master->writev(part->master, vecs, count,
- to + part->offset, retlen);
+ return mtd_writev(part->master, vecs, count, to + part->offset,
+ retlen);
}
static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
@@ -257,7 +251,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr)
if (instr->addr >= mtd->size)
return -EINVAL;
instr->addr += part->offset;
- ret = part->master->erase(part->master, instr);
+ ret = mtd_erase(part->master, instr);
if (ret) {
if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN)
instr->fail_addr -= part->offset;
@@ -285,7 +279,7 @@ static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->lock(part->master, ofs + part->offset, len);
+ return mtd_lock(part->master, ofs + part->offset, len);
}
static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -293,7 +287,7 @@ static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->unlock(part->master, ofs + part->offset, len);
+ return mtd_unlock(part->master, ofs + part->offset, len);
}
static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
@@ -301,25 +295,25 @@ static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct mtd_part *part = PART(mtd);
if ((len + ofs) > mtd->size)
return -EINVAL;
- return part->master->is_locked(part->master, ofs + part->offset, len);
+ return mtd_is_locked(part->master, ofs + part->offset, len);
}
static void part_sync(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->sync(part->master);
+ mtd_sync(part->master);
}
static int part_suspend(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- return part->master->suspend(part->master);
+ return mtd_suspend(part->master);
}
static void part_resume(struct mtd_info *mtd)
{
struct mtd_part *part = PART(mtd);
- part->master->resume(part->master);
+ mtd_resume(part->master);
}
static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
@@ -328,7 +322,7 @@ static int part_block_isbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- return part->master->block_isbad(part->master, ofs);
+ return mtd_block_isbad(part->master, ofs);
}
static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
@@ -341,7 +335,7 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs)
if (ofs >= mtd->size)
return -EINVAL;
ofs += part->offset;
- res = part->master->block_markbad(part->master, ofs);
+ res = mtd_block_markbad(part->master, ofs);
if (!res)
mtd->ecc_stats.badblocks++;
return res;
@@ -559,8 +553,7 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
uint64_t offs = 0;
while (offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
+ if (mtd_block_isbad(master, offs + slave->offset))
slave->mtd.ecc_stats.badblocks++;
offs += slave->mtd.erasesize;
}
diff --git a/drivers/mtd/mtdswap.c b/drivers/mtd/mtdswap.c
index bd9590c..c92f0f6 100644
--- a/drivers/mtd/mtdswap.c
+++ b/drivers/mtd/mtdswap.c
@@ -274,12 +274,12 @@ static int mtdswap_handle_badblock(struct mtdswap_dev *d, struct swap_eb *eb)
eb->root = NULL;
/* badblocks not supported */
- if (!d->mtd->block_markbad)
+ if (!mtd_can_have_bb(d->mtd))
return 1;
offset = mtdswap_eb_offset(d, eb);
dev_warn(d->dev, "Marking bad block at %08llx\n", offset);
- ret = d->mtd->block_markbad(d->mtd, offset);
+ ret = mtd_block_markbad(d->mtd, offset);
if (ret) {
dev_warn(d->dev, "Mark block bad failed for block at %08llx "
@@ -312,7 +312,7 @@ static int mtdswap_handle_write_error(struct mtdswap_dev *d, struct swap_eb *eb)
static int mtdswap_read_oob(struct mtdswap_dev *d, loff_t from,
struct mtd_oob_ops *ops)
{
- int ret = d->mtd->read_oob(d->mtd, from, ops);
+ int ret = mtd_read_oob(d->mtd, from, ops);
if (mtd_is_bitflip(ret))
return ret;
@@ -343,7 +343,7 @@ static int mtdswap_read_markers(struct mtdswap_dev *d, struct swap_eb *eb)
offset = mtdswap_eb_offset(d, eb);
/* Check first if the block is bad. */
- if (d->mtd->block_isbad && d->mtd->block_isbad(d->mtd, offset))
+ if (mtd_can_have_bb(d->mtd) && mtd_block_isbad(d->mtd, offset))
return MTDSWAP_SCANNED_BAD;
ops.ooblen = 2 * d->mtd->ecclayout->oobavail;
@@ -403,7 +403,7 @@ static int mtdswap_write_marker(struct mtdswap_dev *d, struct swap_eb *eb,
offset = mtdswap_eb_offset(d, eb) + d->mtd->writesize;
}
- ret = d->mtd->write_oob(d->mtd, offset , &ops);
+ ret = mtd_write_oob(d->mtd, offset, &ops);
if (ret) {
dev_warn(d->dev, "Write OOB failed for block at %08llx "
@@ -567,7 +567,7 @@ retry:
erase.len = mtd->erasesize;
erase.priv = (u_long)&wq;
- ret = mtd->erase(mtd, &erase);
+ ret = mtd_erase(mtd, &erase);
if (ret) {
if (retries++ < MTDSWAP_ERASE_RETRIES) {
dev_warn(d->dev,
@@ -689,7 +689,7 @@ retry:
return ret;
writepos = (loff_t)*bp << PAGE_SHIFT;
- ret = mtd->write(mtd, writepos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_write(mtd, writepos, PAGE_SIZE, &retlen, buf);
if (ret == -EIO || mtd_is_eccerr(ret)) {
d->curr_write_pos--;
eb->active_count--;
@@ -736,7 +736,7 @@ static int mtdswap_move_block(struct mtdswap_dev *d, unsigned int oldblock,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, d->page_buf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
oldeb = d->eb_data + oldblock / d->pages_per_eblk;
@@ -946,7 +946,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
patt = mtdswap_test_patt(test + i);
memset(d->page_buf, patt, mtd->writesize);
memset(d->oob_buf, patt, mtd->ecclayout->oobavail);
- ret = mtd->write_oob(mtd, pos, &ops);
+ ret = mtd_write_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -955,7 +955,7 @@ static unsigned int mtdswap_eblk_passes(struct mtdswap_dev *d,
pos = base;
for (i = 0; i < mtd_pages; i++) {
- ret = mtd->read_oob(mtd, pos, &ops);
+ ret = mtd_read_oob(mtd, pos, &ops);
if (ret)
goto error;
@@ -1047,8 +1047,7 @@ static int mtdswap_flush(struct mtd_blktrans_dev *dev)
{
struct mtdswap_dev *d = MTDSWAP_MBD_TO_MTDSWAP(dev);
- if (d->mtd->sync)
- d->mtd->sync(d->mtd);
+ mtd_sync(d->mtd);
return 0;
}
@@ -1059,9 +1058,9 @@ static unsigned int mtdswap_badblocks(struct mtd_info *mtd, uint64_t size)
badcnt = 0;
- if (mtd->block_isbad)
+ if (mtd_can_have_bb(mtd))
for (offset = 0; offset < size; offset += mtd->erasesize)
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
badcnt++;
return badcnt;
@@ -1161,7 +1160,7 @@ static int mtdswap_readsect(struct mtd_blktrans_dev *dev,
retries = 0;
retry:
- ret = mtd->read(mtd, readpos, PAGE_SIZE, &retlen, buf);
+ ret = mtd_read(mtd, readpos, PAGE_SIZE, &retlen, buf);
d->mtd_read_count++;
if (mtd_is_bitflip(ret)) {
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index cce7b70..31b034b 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -110,7 +110,7 @@ config MTD_NAND_AMS_DELTA
config MTD_NAND_OMAP2
tristate "NAND Flash device on OMAP2, OMAP3 and OMAP4"
- depends on ARM && (ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4)
+ depends on ARCH_OMAP2PLUS
help
Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
platforms.
@@ -420,7 +420,6 @@ config MTD_NAND_NANDSIM
config MTD_NAND_GPMI_NAND
bool "GPMI NAND Flash Controller driver"
depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28)
- select MTD_PARTITIONS
select MTD_CMDLINE_PARTS
help
Enables NAND Flash support for IMX23 or IMX28.
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index eb40ea8..6a5ff64 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -717,17 +717,6 @@ static struct usb_driver alauda_driver = {
.id_table = alauda_table,
};
-static int __init alauda_init(void)
-{
- return usb_register(&alauda_driver);
-}
-
-static void __exit alauda_exit(void)
-{
- usb_deregister(&alauda_driver);
-}
-
-module_init(alauda_init);
-module_exit(alauda_exit);
+module_usb_driver(alauda_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c
index 9e6b498..3197e97 100644
--- a/drivers/mtd/nand/ams-delta.c
+++ b/drivers/mtd/nand/ams-delta.c
@@ -280,17 +280,7 @@ static struct platform_driver ams_delta_nand_driver = {
},
};
-static int __init ams_delta_nand_init(void)
-{
- return platform_driver_register(&ams_delta_nand_driver);
-}
-module_init(ams_delta_nand_init);
-
-static void __exit ams_delta_nand_exit(void)
-{
- platform_driver_unregister(&ams_delta_nand_driver);
-}
-module_exit(ams_delta_nand_exit);
+module_platform_driver(ams_delta_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jonathan McDowell <noodles@earth.li>");
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 23e5d77..35b4fb5 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -113,7 +113,7 @@ static int cpu_has_dma(void)
*/
static void atmel_nand_enable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
+ if (gpio_is_valid(host->board->enable_pin))
gpio_set_value(host->board->enable_pin, 0);
}
@@ -122,7 +122,7 @@ static void atmel_nand_enable(struct atmel_nand_host *host)
*/
static void atmel_nand_disable(struct atmel_nand_host *host)
{
- if (host->board->enable_pin)
+ if (gpio_is_valid(host->board->enable_pin))
gpio_set_value(host->board->enable_pin, 1);
}
@@ -161,6 +161,37 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
!!host->board->rdy_pin_active_low;
}
+/*
+ * Minimal-overhead PIO for data access.
+ */
+static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+}
+
+static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+}
+
+static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
+}
+
+static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
+{
+ struct nand_chip *nand_chip = mtd->priv;
+
+ __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
+}
+
static void dma_complete_func(void *completion)
{
complete(completion);
@@ -235,27 +266,33 @@ err_buf:
static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
return;
- /* if no DMA operation possible, use PIO */
- memcpy_fromio(buf, chip->IO_ADDR_R, len);
+ if (host->board->bus_width_16)
+ atmel_read_buf16(mtd, buf, len);
+ else
+ atmel_read_buf8(mtd, buf, len);
}
static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
{
struct nand_chip *chip = mtd->priv;
+ struct atmel_nand_host *host = chip->priv;
if (use_dma && len > mtd->oobsize)
/* only use DMA for bigger than oob size: better performances */
if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
return;
- /* if no DMA operation possible, use PIO */
- memcpy_toio(chip->IO_ADDR_W, buf, len);
+ if (host->board->bus_width_16)
+ atmel_write_buf16(mtd, buf, len);
+ else
+ atmel_write_buf8(mtd, buf, len);
}
/*
@@ -492,7 +529,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
nand_chip->IO_ADDR_W = host->io_base;
nand_chip->cmd_ctrl = atmel_nand_cmd_ctrl;
- if (host->board->rdy_pin)
+ if (gpio_is_valid(host->board->rdy_pin))
nand_chip->dev_ready = atmel_nand_device_ready;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
@@ -530,7 +567,7 @@ static int __init atmel_nand_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, host);
atmel_nand_enable(host);
- if (host->board->det_pin) {
+ if (gpio_is_valid(host->board->det_pin)) {
if (gpio_get_value(host->board->det_pin)) {
printk(KERN_INFO "No SmartMedia card inserted.\n");
res = -ENXIO;
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 7dd3700..73abbc3 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -17,35 +17,19 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
+#include <linux/platform_device.h>
#include <asm/io.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1550nd.h>
-#ifdef CONFIG_MIPS_PB1550
-#include <asm/mach-pb1x00/pb1550.h>
-#elif defined(CONFIG_MIPS_DB1550)
-#include <asm/mach-db1x00/db1x00.h>
-#endif
-#include <asm/mach-db1x00/bcsr.h>
-/*
- * MTD structure for NAND controller
- */
-static struct mtd_info *au1550_mtd = NULL;
-static void __iomem *p_nand;
-static int nand_width = 1; /* default x8 */
-static void (*au1550_write_byte)(struct mtd_info *, u_char);
+struct au1550nd_ctx {
+ struct mtd_info info;
+ struct nand_chip chip;
-/*
- * Define partitions for flash device
- */
-static const struct mtd_partition partition_info[] = {
- {
- .name = "NAND FS 0",
- .offset = 0,
- .size = 8 * 1024 * 1024},
- {
- .name = "NAND FS 1",
- .offset = MTDPART_OFS_APPEND,
- .size = MTDPART_SIZ_FULL}
+ int cs;
+ void __iomem *base;
+ void (*write_byte)(struct mtd_info *, u_char);
};
/**
@@ -259,24 +243,25 @@ static int au_verify_buf16(struct mtd_info *mtd, const u_char *buf, int len)
static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
{
- register struct nand_chip *this = mtd->priv;
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
switch (cmd) {
case NAND_CTL_SETCLE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_CMD;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_CMD;
break;
case NAND_CTL_CLRCLE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
break;
case NAND_CTL_SETALE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_ADDR;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_ADDR;
break;
case NAND_CTL_CLRALE:
- this->IO_ADDR_W = p_nand + MEM_STNAND_DATA;
+ this->IO_ADDR_W = ctx->base + MEM_STNAND_DATA;
/* FIXME: Nobody knows why this is necessary,
* but it works only that way */
udelay(1);
@@ -284,7 +269,7 @@ static void au1550_hwcontrol(struct mtd_info *mtd, int cmd)
case NAND_CTL_SETNCE:
/* assert (force assert) chip enable */
- au_writel((1 << (4 + NAND_CS)), MEM_STNDCTL);
+ au_writel((1 << (4 + ctx->cs)), MEM_STNDCTL);
break;
case NAND_CTL_CLRNCE:
@@ -331,9 +316,10 @@ static void au1550_select_chip(struct mtd_info *mtd, int chip)
*/
static void au1550_command(struct mtd_info *mtd, unsigned command, int column, int page_addr)
{
- register struct nand_chip *this = mtd->priv;
+ struct au1550nd_ctx *ctx = container_of(mtd, struct au1550nd_ctx, info);
+ struct nand_chip *this = mtd->priv;
int ce_override = 0, i;
- ulong flags;
+ unsigned long flags = 0;
/* Begin command latch cycle */
au1550_hwcontrol(mtd, NAND_CTL_SETCLE);
@@ -354,9 +340,9 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
column -= 256;
readcmd = NAND_CMD_READ1;
}
- au1550_write_byte(mtd, readcmd);
+ ctx->write_byte(mtd, readcmd);
}
- au1550_write_byte(mtd, command);
+ ctx->write_byte(mtd, command);
/* Set ALE and clear CLE to start address cycle */
au1550_hwcontrol(mtd, NAND_CTL_CLRCLE);
@@ -369,10 +355,10 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
/* Adjust columns for 16 bit buswidth */
if (this->options & NAND_BUSWIDTH_16)
column >>= 1;
- au1550_write_byte(mtd, column);
+ ctx->write_byte(mtd, column);
}
if (page_addr != -1) {
- au1550_write_byte(mtd, (u8)(page_addr & 0xff));
+ ctx->write_byte(mtd, (u8)(page_addr & 0xff));
if (command == NAND_CMD_READ0 ||
command == NAND_CMD_READ1 ||
@@ -390,11 +376,12 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
au1550_hwcontrol(mtd, NAND_CTL_SETNCE);
}
- au1550_write_byte(mtd, (u8)(page_addr >> 8));
+ ctx->write_byte(mtd, (u8)(page_addr >> 8));
/* One more address cycle for devices > 32MiB */
if (this->chipsize > (32 << 20))
- au1550_write_byte(mtd, (u8)((page_addr >> 16) & 0x0f));
+ ctx->write_byte(mtd,
+ ((page_addr >> 16) & 0x0f));
}
/* Latch in address */
au1550_hwcontrol(mtd, NAND_CTL_CLRALE);
@@ -440,121 +427,79 @@ static void au1550_command(struct mtd_info *mtd, unsigned command, int column, i
while(!this->dev_ready(mtd));
}
-
-/*
- * Main initialization routine
- */
-static int __init au1xxx_nand_init(void)
+static int __devinit find_nand_cs(unsigned long nand_base)
{
- struct nand_chip *this;
- u16 boot_swapboot = 0; /* default value */
- int retval;
- u32 mem_staddr;
- u32 nand_phys;
-
- /* Allocate memory for MTD device structure and private data */
- au1550_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
- if (!au1550_mtd) {
- printk("Unable to allocate NAND MTD dev structure.\n");
- return -ENOMEM;
- }
-
- /* Get pointer to private data */
- this = (struct nand_chip *)(&au1550_mtd[1]);
-
- /* Link the private data with the MTD structure */
- au1550_mtd->priv = this;
- au1550_mtd->owner = THIS_MODULE;
-
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1000_STATIC_MEM_PHYS_ADDR);
+ unsigned long addr, staddr, start, mask, end;
+ int i;
- /* MEM_STNDCTL: disable ints, disable nand boot */
- au_writel(0, MEM_STNDCTL);
+ for (i = 0; i < 4; i++) {
+ addr = 0x1000 + (i * 0x10); /* CSx */
+ staddr = __raw_readl(base + addr + 0x08); /* STADDRx */
+ /* figure out the decoded range of this CS */
+ start = (staddr << 4) & 0xfffc0000;
+ mask = (staddr << 18) & 0xfffc0000;
+ end = (start | (start - 1)) & ~(start ^ mask);
+ if ((nand_base >= start) && (nand_base < end))
+ return i;
+ }
-#ifdef CONFIG_MIPS_PB1550
- /* set gpio206 high */
- gpio_direction_input(206);
+ return -ENODEV;
+}
- boot_swapboot = (au_readl(MEM_STSTAT) & (0x7 << 1)) | ((bcsr_read(BCSR_STATUS) >> 6) & 0x1);
+static int __devinit au1550nd_probe(struct platform_device *pdev)
+{
+ struct au1550nd_platdata *pd;
+ struct au1550nd_ctx *ctx;
+ struct nand_chip *this;
+ struct resource *r;
+ int ret, cs;
- switch (boot_swapboot) {
- case 0:
- case 2:
- case 8:
- case 0xC:
- case 0xD:
- /* x16 NAND Flash */
- nand_width = 0;
- break;
- case 1:
- case 9:
- case 3:
- case 0xE:
- case 0xF:
- /* x8 NAND Flash */
- nand_width = 1;
- break;
- default:
- printk("Pb1550 NAND: bad boot:swap\n");
- retval = -EINVAL;
- goto outmem;
+ pd = pdev->dev.platform_data;
+ if (!pd) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
}
-#endif
-
- /* Configure chip-select; normally done by boot code, e.g. YAMON */
-#ifdef NAND_STCFG
- if (NAND_CS == 0) {
- au_writel(NAND_STCFG, MEM_STCFG0);
- au_writel(NAND_STTIME, MEM_STTIME0);
- au_writel(NAND_STADDR, MEM_STADDR0);
- }
- if (NAND_CS == 1) {
- au_writel(NAND_STCFG, MEM_STCFG1);
- au_writel(NAND_STTIME, MEM_STTIME1);
- au_writel(NAND_STADDR, MEM_STADDR1);
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx) {
+ dev_err(&pdev->dev, "no memory for NAND context\n");
+ return -ENOMEM;
}
- if (NAND_CS == 2) {
- au_writel(NAND_STCFG, MEM_STCFG2);
- au_writel(NAND_STTIME, MEM_STTIME2);
- au_writel(NAND_STADDR, MEM_STADDR2);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no NAND memory resource\n");
+ ret = -ENODEV;
+ goto out1;
}
- if (NAND_CS == 3) {
- au_writel(NAND_STCFG, MEM_STCFG3);
- au_writel(NAND_STTIME, MEM_STTIME3);
- au_writel(NAND_STADDR, MEM_STADDR3);
+ if (request_mem_region(r->start, resource_size(r), "au1550-nand")) {
+ dev_err(&pdev->dev, "cannot claim NAND memory area\n");
+ ret = -ENOMEM;
+ goto out1;
}
-#endif
-
- /* Locate NAND chip-select in order to determine NAND phys address */
- mem_staddr = 0x00000000;
- if (((au_readl(MEM_STCFG0) & 0x7) == 0x5) && (NAND_CS == 0))
- mem_staddr = au_readl(MEM_STADDR0);
- else if (((au_readl(MEM_STCFG1) & 0x7) == 0x5) && (NAND_CS == 1))
- mem_staddr = au_readl(MEM_STADDR1);
- else if (((au_readl(MEM_STCFG2) & 0x7) == 0x5) && (NAND_CS == 2))
- mem_staddr = au_readl(MEM_STADDR2);
- else if (((au_readl(MEM_STCFG3) & 0x7) == 0x5) && (NAND_CS == 3))
- mem_staddr = au_readl(MEM_STADDR3);
-
- if (mem_staddr == 0x00000000) {
- printk("Au1xxx NAND: ERROR WITH NAND CHIP-SELECT\n");
- kfree(au1550_mtd);
- return 1;
+
+ ctx->base = ioremap_nocache(r->start, 0x1000);
+ if (!ctx->base) {
+ dev_err(&pdev->dev, "cannot remap NAND memory area\n");
+ ret = -ENODEV;
+ goto out2;
}
- nand_phys = (mem_staddr << 4) & 0xFFFC0000;
- p_nand = ioremap(nand_phys, 0x1000);
+ this = &ctx->chip;
+ ctx->info.priv = this;
+ ctx->info.owner = THIS_MODULE;
- /* make controller and MTD agree */
- if (NAND_CS == 0)
- nand_width = au_readl(MEM_STCFG0) & (1 << 22);
- if (NAND_CS == 1)
- nand_width = au_readl(MEM_STCFG1) & (1 << 22);
- if (NAND_CS == 2)
- nand_width = au_readl(MEM_STCFG2) & (1 << 22);
- if (NAND_CS == 3)
- nand_width = au_readl(MEM_STCFG3) & (1 << 22);
+ /* figure out which CS# r->start belongs to */
+ cs = find_nand_cs(r->start);
+ if (cs < 0) {
+ dev_err(&pdev->dev, "cannot detect NAND chipselect\n");
+ ret = -ENODEV;
+ goto out3;
+ }
+ ctx->cs = cs;
- /* Set address of hardware control function */
this->dev_ready = au1550_device_ready;
this->select_chip = au1550_select_chip;
this->cmdfunc = au1550_command;
@@ -565,54 +510,57 @@ static int __init au1xxx_nand_init(void)
this->options = NAND_NO_AUTOINCR;
- if (!nand_width)
+ if (pd->devwidth)
this->options |= NAND_BUSWIDTH_16;
- this->read_byte = (!nand_width) ? au_read_byte16 : au_read_byte;
- au1550_write_byte = (!nand_width) ? au_write_byte16 : au_write_byte;
+ this->read_byte = (pd->devwidth) ? au_read_byte16 : au_read_byte;
+ ctx->write_byte = (pd->devwidth) ? au_write_byte16 : au_write_byte;
this->read_word = au_read_word;
- this->write_buf = (!nand_width) ? au_write_buf16 : au_write_buf;
- this->read_buf = (!nand_width) ? au_read_buf16 : au_read_buf;
- this->verify_buf = (!nand_width) ? au_verify_buf16 : au_verify_buf;
-
- /* Scan to find existence of the device */
- if (nand_scan(au1550_mtd, 1)) {
- retval = -ENXIO;
- goto outio;
+ this->write_buf = (pd->devwidth) ? au_write_buf16 : au_write_buf;
+ this->read_buf = (pd->devwidth) ? au_read_buf16 : au_read_buf;
+ this->verify_buf = (pd->devwidth) ? au_verify_buf16 : au_verify_buf;
+
+ ret = nand_scan(&ctx->info, 1);
+ if (ret) {
+ dev_err(&pdev->dev, "NAND scan failed with %d\n", ret);
+ goto out3;
}
- /* Register the partitions */
- mtd_device_register(au1550_mtd, partition_info,
- ARRAY_SIZE(partition_info));
+ mtd_device_register(&ctx->info, pd->parts, pd->num_parts);
return 0;
- outio:
- iounmap(p_nand);
-
- outmem:
- kfree(au1550_mtd);
- return retval;
+out3:
+ iounmap(ctx->base);
+out2:
+ release_mem_region(r->start, resource_size(r));
+out1:
+ kfree(ctx);
+ return ret;
}
-module_init(au1xxx_nand_init);
-
-/*
- * Clean up routine
- */
-static void __exit au1550_cleanup(void)
+static int __devexit au1550nd_remove(struct platform_device *pdev)
{
- /* Release resources, unregister device */
- nand_release(au1550_mtd);
+ struct au1550nd_ctx *ctx = platform_get_drvdata(pdev);
+ struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- /* Free the MTD device structure */
- kfree(au1550_mtd);
-
- /* Unmap */
- iounmap(p_nand);
+ nand_release(&ctx->info);
+ iounmap(ctx->base);
+ release_mem_region(r->start, 0x1000);
+ kfree(ctx);
+ return 0;
}
-module_exit(au1550_cleanup);
+static struct platform_driver au1550nd_driver = {
+ .driver = {
+ .name = "au1550-nand",
+ .owner = THIS_MODULE,
+ },
+ .probe = au1550nd_probe,
+ .remove = __devexit_p(au1550nd_remove),
+};
+
+module_platform_driver(au1550nd_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Embedded Edge, LLC");
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 46b58d6..50387fd 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -546,18 +546,7 @@ static struct platform_driver nand_driver = {
.resume = bcm_umi_nand_resume,
};
-static int __init nand_init(void)
-{
- return platform_driver_register(&nand_driver);
-}
-
-static void __exit nand_exit(void)
-{
- platform_driver_unregister(&nand_driver);
-}
-
-module_init(nand_init);
-module_exit(nand_exit);
+module_platform_driver(nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Broadcom");
diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c
index c153e1f..6e56615 100644
--- a/drivers/mtd/nand/davinci_nand.c
+++ b/drivers/mtd/nand/davinci_nand.c
@@ -675,7 +675,9 @@ static int __init nand_davinci_probe(struct platform_device *pdev)
davinci_nand_writel(info, A1CR_OFFSET + info->core_chipsel * 4, val);
- ret = davinci_aemif_setup_timing(info->timing, info->base,
+ ret = 0;
+ if (info->timing)
+ ret = davinci_aemif_setup_timing(info->timing, info->base,
info->core_chipsel);
if (ret < 0) {
dev_dbg(&pdev->dev, "NAND timing values setup fail\n");
diff --git a/drivers/mtd/nand/diskonchip.c b/drivers/mtd/nand/diskonchip.c
index 5780dba..df921e7 100644
--- a/drivers/mtd/nand/diskonchip.c
+++ b/drivers/mtd/nand/diskonchip.c
@@ -1072,7 +1072,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
size_t retlen;
for (offs = 0; offs < mtd->size; offs += mtd->erasesize) {
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize)
continue;
if (ret) {
@@ -1097,7 +1097,7 @@ static int __init find_media_headers(struct mtd_info *mtd, u_char *buf, const ch
/* Only one mediaheader was found. We want buf to contain a
mediaheader on return, so we'll have to re-read the one we found. */
offs = doc->mh0_page << this->page_shift;
- ret = mtd->read(mtd, offs, mtd->writesize, &retlen, buf);
+ ret = mtd_read(mtd, offs, mtd->writesize, &retlen, buf);
if (retlen != mtd->writesize) {
/* Insanity. Give up. */
printk(KERN_ERR "Read DiskOnChip Media Header once, but can't reread it???\n");
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index eedd8ee..7195ee6 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -166,15 +166,22 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob)
elbc_fcm_ctrl->page = page_addr;
- out_be32(&lbc->fbar,
- page_addr >> (chip->phys_erase_shift - chip->page_shift));
-
if (priv->page_size) {
+ /*
+ * large page size chip : FPAR[PI] save the lowest 6 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 6);
out_be32(&lbc->fpar,
((page_addr << FPAR_LP_PI_SHIFT) & FPAR_LP_PI) |
(oob ? FPAR_LP_MS : 0) | column);
buf_num = (page_addr & 1) << 2;
} else {
+ /*
+ * small page size chip : FPAR[PI] save the lowest 5 bits,
+ * FBAR[BLK] save the other bits.
+ */
+ out_be32(&lbc->fbar, page_addr >> 5);
out_be32(&lbc->fpar,
((page_addr << FPAR_SP_PI_SHIFT) & FPAR_SP_PI) |
(oob ? FPAR_SP_MS : 0) | column);
@@ -349,20 +356,22 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
fsl_elbc_run_command(mtd);
return;
- /* READID must read all 5 possible bytes while CEB is active */
case NAND_CMD_READID:
- dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD_READID.\n");
+ case NAND_CMD_PARAM:
+ dev_vdbg(priv->dev, "fsl_elbc_cmdfunc: NAND_CMD %x\n", command);
out_be32(&lbc->fir, (FIR_OP_CM0 << FIR_OP0_SHIFT) |
(FIR_OP_UA << FIR_OP1_SHIFT) |
(FIR_OP_RBW << FIR_OP2_SHIFT));
- out_be32(&lbc->fcr, NAND_CMD_READID << FCR_CMD0_SHIFT);
- /* nand_get_flash_type() reads 8 bytes of entire ID string */
- out_be32(&lbc->fbcr, 8);
- elbc_fcm_ctrl->read_bytes = 8;
+ out_be32(&lbc->fcr, command << FCR_CMD0_SHIFT);
+ /*
+ * although currently it's 8 bytes for READID, we always read
+ * the maximum 256 bytes(for PARAM)
+ */
+ out_be32(&lbc->fbcr, 256);
+ elbc_fcm_ctrl->read_bytes = 256;
elbc_fcm_ctrl->use_mdr = 1;
- elbc_fcm_ctrl->mdr = 0;
-
+ elbc_fcm_ctrl->mdr = column;
set_addr(mtd, 0, 0, 0);
fsl_elbc_run_command(mtd);
return;
@@ -407,9 +416,17 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
page_addr, column);
elbc_fcm_ctrl->column = column;
- elbc_fcm_ctrl->oob = 0;
elbc_fcm_ctrl->use_mdr = 1;
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ elbc_fcm_ctrl->oob = 1;
+ } else {
+ WARN_ON(column != 0);
+ elbc_fcm_ctrl->oob = 0;
+ }
+
fcr = (NAND_CMD_STATUS << FCR_CMD1_SHIFT) |
(NAND_CMD_SEQIN << FCR_CMD2_SHIFT) |
(NAND_CMD_PAGEPROG << FCR_CMD3_SHIFT);
@@ -434,16 +451,12 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
(FIR_OP_CW1 << FIR_OP6_SHIFT) |
(FIR_OP_RS << FIR_OP7_SHIFT));
- if (column >= mtd->writesize) {
+ if (elbc_fcm_ctrl->oob)
/* OOB area --> READOOB */
- column -= mtd->writesize;
fcr |= NAND_CMD_READOOB << FCR_CMD0_SHIFT;
- elbc_fcm_ctrl->oob = 1;
- } else {
- WARN_ON(column != 0);
+ else
/* First 256 bytes --> READ0 */
fcr |= NAND_CMD_READ0 << FCR_CMD0_SHIFT;
- }
}
out_be32(&lbc->fcr, fcr);
@@ -463,7 +476,8 @@ static void fsl_elbc_cmdfunc(struct mtd_info *mtd, unsigned int command,
*/
if (elbc_fcm_ctrl->oob || elbc_fcm_ctrl->column != 0 ||
elbc_fcm_ctrl->index != mtd->writesize + mtd->oobsize)
- out_be32(&lbc->fbcr, elbc_fcm_ctrl->index);
+ out_be32(&lbc->fbcr,
+ elbc_fcm_ctrl->index - elbc_fcm_ctrl->column);
else
out_be32(&lbc->fbcr, 0);
@@ -659,9 +673,7 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
if (chip->pagemask & 0xff000000)
al++;
- /* add to ECCM mode set in fsl_elbc_init */
- priv->fmr |= (12 << FMR_CWTO_SHIFT) | /* Timeout > 12 ms */
- (al << FMR_AL_SHIFT);
+ priv->fmr |= al << FMR_AL_SHIFT;
dev_dbg(priv->dev, "fsl_elbc_init: nand->numchips = %d\n",
chip->numchips);
@@ -764,8 +776,10 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
priv->mtd.priv = chip;
priv->mtd.owner = THIS_MODULE;
- /* Set the ECCM according to the settings in bootloader.*/
- priv->fmr = in_be32(&lbc->fmr) & FMR_ECCM;
+ /* set timeout to maximum */
+ priv->fmr = 15 << FMR_CWTO_SHIFT;
+ if (in_be32(&lbc->bank[priv->bank].or) & OR_FCM_PGS)
+ priv->fmr |= FMR_ECCM;
/* fill in nand_chip structure */
/* set up function call table */
@@ -971,18 +985,7 @@ static struct platform_driver fsl_elbc_nand_driver = {
.remove = fsl_elbc_nand_remove,
};
-static int __init fsl_elbc_nand_init(void)
-{
- return platform_driver_register(&fsl_elbc_nand_driver);
-}
-
-static void __exit fsl_elbc_nand_exit(void)
-{
- platform_driver_unregister(&fsl_elbc_nand_driver);
-}
-
-module_init(fsl_elbc_nand_init);
-module_exit(fsl_elbc_nand_exit);
+module_platform_driver(fsl_elbc_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Freescale");
diff --git a/drivers/mtd/nand/fsl_upm.c b/drivers/mtd/nand/fsl_upm.c
index b4f3cc9..45df542 100644
--- a/drivers/mtd/nand/fsl_upm.c
+++ b/drivers/mtd/nand/fsl_upm.c
@@ -353,17 +353,7 @@ static struct platform_driver of_fun_driver = {
.remove = __devexit_p(fun_remove),
};
-static int __init fun_module_init(void)
-{
- return platform_driver_register(&of_fun_driver);
-}
-module_init(fun_module_init);
-
-static void __exit fun_module_exit(void)
-{
- platform_driver_unregister(&of_fun_driver);
-}
-module_exit(fun_module_exit);
+module_platform_driver(of_fun_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <avorontsov@ru.mvista.com>");
diff --git a/drivers/mtd/nand/gpio.c b/drivers/mtd/nand/gpio.c
index 2c2060b..27000a5 100644
--- a/drivers/mtd/nand/gpio.c
+++ b/drivers/mtd/nand/gpio.c
@@ -27,6 +27,9 @@
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand-gpio.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_gpio.h>
struct gpiomtd {
void __iomem *io_sync;
@@ -171,6 +174,96 @@ static int gpio_nand_devready(struct mtd_info *mtd)
return gpio_get_value(gpiomtd->plat.gpio_rdy);
}
+#ifdef CONFIG_OF
+static const struct of_device_id gpio_nand_id_table[] = {
+ { .compatible = "gpio-control-nand" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, gpio_nand_id_table);
+
+static int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ u32 val;
+
+ if (!of_property_read_u32(dev->of_node, "bank-width", &val)) {
+ if (val == 2) {
+ plat->options |= NAND_BUSWIDTH_16;
+ } else if (val != 1) {
+ dev_err(dev, "invalid bank-width %u\n", val);
+ return -EINVAL;
+ }
+ }
+
+ plat->gpio_rdy = of_get_gpio(dev->of_node, 0);
+ plat->gpio_nce = of_get_gpio(dev->of_node, 1);
+ plat->gpio_ale = of_get_gpio(dev->of_node, 2);
+ plat->gpio_cle = of_get_gpio(dev->of_node, 3);
+ plat->gpio_nwp = of_get_gpio(dev->of_node, 4);
+
+ if (!of_property_read_u32(dev->of_node, "chip-delay", &val))
+ plat->chip_delay = val;
+
+ return 0;
+}
+
+static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ struct resource *r = devm_kzalloc(&pdev->dev, sizeof(*r), GFP_KERNEL);
+ u64 addr;
+
+ if (!r || of_property_read_u64(pdev->dev.of_node,
+ "gpio-control-nand,io-sync-reg", &addr))
+ return NULL;
+
+ r->start = addr;
+ r->end = r->start + 0x3;
+ r->flags = IORESOURCE_MEM;
+
+ return r;
+}
+#else /* CONFIG_OF */
+#define gpio_nand_id_table NULL
+static inline int gpio_nand_get_config_of(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ return -ENOSYS;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync_of(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif /* CONFIG_OF */
+
+static inline int gpio_nand_get_config(const struct device *dev,
+ struct gpio_nand_platdata *plat)
+{
+ int ret = gpio_nand_get_config_of(dev, plat);
+
+ if (!ret)
+ return ret;
+
+ if (dev->platform_data) {
+ memcpy(plat, dev->platform_data, sizeof(*plat));
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static inline struct resource *
+gpio_nand_get_io_sync(struct platform_device *pdev)
+{
+ struct resource *r = gpio_nand_get_io_sync_of(pdev);
+
+ if (r)
+ return r;
+
+ return platform_get_resource(pdev, IORESOURCE_MEM, 1);
+}
+
static int __devexit gpio_nand_remove(struct platform_device *dev)
{
struct gpiomtd *gpiomtd = platform_get_drvdata(dev);
@@ -178,7 +271,7 @@ static int __devexit gpio_nand_remove(struct platform_device *dev)
nand_release(&gpiomtd->mtd_info);
- res = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res = gpio_nand_get_io_sync(dev);
iounmap(gpiomtd->io_sync);
if (res)
release_mem_region(res->start, resource_size(res));
@@ -226,9 +319,10 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
struct gpiomtd *gpiomtd;
struct nand_chip *this;
struct resource *res0, *res1;
- int ret;
+ struct mtd_part_parser_data ppdata = {};
+ int ret = 0;
- if (!dev->dev.platform_data)
+ if (!dev->dev.of_node && !dev->dev.platform_data)
return -EINVAL;
res0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
@@ -248,7 +342,7 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
goto err_map;
}
- res1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
+ res1 = gpio_nand_get_io_sync(dev);
if (res1) {
gpiomtd->io_sync = request_and_remap(res1, 4, "NAND sync", &ret);
if (!gpiomtd->io_sync) {
@@ -257,7 +351,9 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
}
}
- memcpy(&gpiomtd->plat, dev->dev.platform_data, sizeof(gpiomtd->plat));
+ ret = gpio_nand_get_config(&dev->dev, &gpiomtd->plat);
+ if (ret)
+ goto err_nce;
ret = gpio_request(gpiomtd->plat.gpio_nce, "NAND NCE");
if (ret)
@@ -316,8 +412,12 @@ static int __devinit gpio_nand_probe(struct platform_device *dev)
gpiomtd->plat.adjust_parts(&gpiomtd->plat,
gpiomtd->mtd_info.size);
- mtd_device_register(&gpiomtd->mtd_info, gpiomtd->plat.parts,
- gpiomtd->plat.num_parts);
+ ppdata.of_node = dev->dev.of_node;
+ ret = mtd_device_parse_register(&gpiomtd->mtd_info, NULL, &ppdata,
+ gpiomtd->plat.parts,
+ gpiomtd->plat.num_parts);
+ if (ret)
+ goto err_wp;
platform_set_drvdata(dev, gpiomtd);
return 0;
@@ -352,6 +452,7 @@ static struct platform_driver gpio_nand_driver = {
.remove = gpio_nand_remove,
.driver = {
.name = "gpio-nand",
+ .of_match_table = gpio_nand_id_table,
},
};
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index de4db76..7db6555 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -69,17 +69,19 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
* [1] enable the module.
* [2] reset the module.
*
- * In most of the cases, it's ok. But there is a hardware bug in the BCH block.
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
* If you try to soft reset the BCH block, it becomes unusable until
* the next hard reset. This case occurs in the NAND boot mode. When the board
* boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
* So If the driver tries to reset the BCH again, the BCH will not work anymore.
- * You will see a DMA timeout in this case.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
*
* To avoid this bug, just add a new parameter `just_enable` for
* the mxs_reset_block(), and rewrite it here.
*/
-int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
{
int ret;
int timeout = 0x400;
@@ -126,7 +128,7 @@ int gpmi_init(struct gpmi_nand_data *this)
struct resources *r = &this->resources;
int ret;
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret)
goto err_out;
ret = gpmi_reset_block(r->gpmi_regs, false);
@@ -146,7 +148,7 @@ int gpmi_init(struct gpmi_nand_data *this)
/* Select BCH ECC. */
writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
return 0;
err_out:
return ret;
@@ -202,11 +204,19 @@ int bch_set_geometry(struct gpmi_nand_data *this)
ecc_strength = bch_geo->ecc_strength >> 1;
page_size = bch_geo->page_size;
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret)
goto err_out;
- ret = gpmi_reset_block(r->bch_regs, true);
+ /*
+ * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+ * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+ * On the other hand, the MX28 needs the reset, because one case has been
+ * seen where the BCH produced ECC errors constantly after 10000
+ * consecutive reboots. The latter case has not been seen on the MX23 yet,
+ * still we don't know if it could happen there as well.
+ */
+ ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
if (ret)
goto err_out;
@@ -229,7 +239,7 @@ int bch_set_geometry(struct gpmi_nand_data *this)
writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
r->bch_regs + HW_BCH_CTRL_SET);
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
return 0;
err_out:
return ret;
@@ -704,7 +714,7 @@ void gpmi_begin(struct gpmi_nand_data *this)
int ret;
/* Enable the clock. */
- ret = clk_enable(r->clock);
+ ret = clk_prepare_enable(r->clock);
if (ret) {
pr_err("We failed in enable the clk\n");
goto err_out;
@@ -773,7 +783,7 @@ err_out:
void gpmi_end(struct gpmi_nand_data *this)
{
struct resources *r = &this->resources;
- clk_disable(r->clock);
+ clk_disable_unprepare(r->clock);
}
/* Clears a BCH interrupt. */
@@ -827,7 +837,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
pio[1] = pio[2] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -839,7 +849,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
sg_init_one(sgl, this->cmd_buffer, this->command_length);
dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
desc = channel->device->device_prep_slave_sg(channel,
- sgl, 1, DMA_TO_DEVICE, 1);
+ sgl, 1, DMA_MEM_TO_DEV, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -872,7 +882,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -881,7 +891,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
/* [2] send DMA request */
prepare_data_dma(this, DMA_TO_DEVICE);
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_TO_DEVICE, 1);
+ 1, DMA_MEM_TO_DEV, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -908,7 +918,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -917,7 +927,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
/* [2] : send DMA request */
prepare_data_dma(this, DMA_FROM_DEVICE);
desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
- 1, DMA_FROM_DEVICE, 1);
+ 1, DMA_DEV_TO_MEM, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -964,7 +974,7 @@ int gpmi_send_page(struct gpmi_nand_data *this,
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 0);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -998,7 +1008,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_XFER_COUNT(0);
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio, 2, DMA_NONE, 0);
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 0);
if (!desc) {
pr_err("step 1 error\n");
return -1;
@@ -1027,7 +1038,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
pio[5] = auxiliary;
desc = channel->device->device_prep_slave_sg(channel,
(struct scatterlist *)pio,
- ARRAY_SIZE(pio), DMA_NONE, 1);
+ ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
if (!desc) {
pr_err("step 2 error\n");
return -1;
@@ -1045,7 +1056,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
| BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
pio[1] = 0;
desc = channel->device->device_prep_slave_sg(channel,
- (struct scatterlist *)pio, 2, DMA_NONE, 1);
+ (struct scatterlist *)pio, 2,
+ DMA_TRANS_NONE, 1);
if (!desc) {
pr_err("step 3 error\n");
return -1;
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e266407..ac3b9f2 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -423,17 +423,7 @@ static struct platform_driver jz_nand_driver = {
},
};
-static int __init jz_nand_init(void)
-{
- return platform_driver_register(&jz_nand_driver);
-}
-module_init(jz_nand_init);
-
-static void __exit jz_nand_exit(void)
-{
- platform_driver_unregister(&jz_nand_driver);
-}
-module_exit(jz_nand_exit);
+module_platform_driver(jz_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index 5ede647..c240cf1 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -879,19 +879,7 @@ static struct platform_driver mpc5121_nfc_driver = {
},
};
-static int __init mpc5121_nfc_init(void)
-{
- return platform_driver_register(&mpc5121_nfc_driver);
-}
-
-module_init(mpc5121_nfc_init);
-
-static void __exit mpc5121_nfc_cleanup(void)
-{
- platform_driver_unregister(&mpc5121_nfc_driver);
-}
-
-module_exit(mpc5121_nfc_cleanup);
+module_platform_driver(mpc5121_nfc_driver);
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MPC5121 NAND MTD driver");
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 3ed9c5e..8a393f9 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -2588,7 +2588,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
instr->state = MTD_ERASING;
while (len) {
- /* Heck if we have a bad block, we do not erase bad blocks! */
+ /* Check if we have a bad block, we do not erase bad blocks! */
if (nand_block_checkbad(mtd, ((loff_t) page) <<
chip->page_shift, 0, allowbbt)) {
pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
@@ -3132,8 +3132,8 @@ ident_done:
* Bad block marker is stored in the last page of each block
* on Samsung and Hynix MLC devices; stored in first two pages
* of each block on Micron devices with 2KiB pages and on
- * SLC Samsung, Hynix, Toshiba and AMD/Spansion. All others scan
- * only the first page.
+ * SLC Samsung, Hynix, Toshiba, AMD/Spansion, and Macronix.
+ * All others scan only the first page.
*/
if ((chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
(*maf_id == NAND_MFR_SAMSUNG ||
@@ -3143,7 +3143,8 @@ ident_done:
(*maf_id == NAND_MFR_SAMSUNG ||
*maf_id == NAND_MFR_HYNIX ||
*maf_id == NAND_MFR_TOSHIBA ||
- *maf_id == NAND_MFR_AMD)) ||
+ *maf_id == NAND_MFR_AMD ||
+ *maf_id == NAND_MFR_MACRONIX)) ||
(mtd->writesize == 2048 &&
*maf_id == NAND_MFR_MICRON))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 69148ae..20a112f 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -201,7 +201,7 @@ static int read_bbt(struct mtd_info *mtd, uint8_t *buf, int page, int num,
from += marker_len;
marker_len = 0;
}
- res = mtd->read(mtd, from, len, &retlen, buf);
+ res = mtd_read(mtd, from, len, &retlen, buf);
if (res < 0) {
if (mtd_is_eccerr(res)) {
pr_info("nand_bbt: ECC error in BBT at "
@@ -298,7 +298,7 @@ static int scan_read_raw_data(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
if (td->options & NAND_BBT_VERSION)
len++;
- return mtd->read(mtd, offs, len, &retlen, buf);
+ return mtd_read(mtd, offs, len, &retlen, buf);
}
/* Scan read raw data from flash */
@@ -317,7 +317,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
ops.len = min(len, (size_t)mtd->writesize);
ops.oobbuf = buf + ops.len;
- res = mtd->read_oob(mtd, offs, &ops);
+ res = mtd_read_oob(mtd, offs, &ops);
if (res)
return res;
@@ -350,7 +350,7 @@ static int scan_write_bbt(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = oob;
ops.len = len;
- return mtd->write_oob(mtd, offs, &ops);
+ return mtd_write_oob(mtd, offs, &ops);
}
static u32 bbt_get_ver_offs(struct mtd_info *mtd, struct nand_bbt_descr *td)
@@ -434,7 +434,7 @@ static int scan_block_fast(struct mtd_info *mtd, struct nand_bbt_descr *bd,
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
@@ -756,7 +756,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Make it block aligned */
to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
len = 1 << this->bbt_erase_shift;
- res = mtd->read(mtd, to, len, &retlen, buf);
+ res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
if (retlen != len) {
pr_info("nand_bbt: error reading block "
@@ -769,7 +769,7 @@ static int write_bbt(struct mtd_info *mtd, uint8_t *buf,
/* Read oob data */
ops.ooblen = (len >> this->page_shift) * mtd->oobsize;
ops.oobbuf = &buf[len];
- res = mtd->read_oob(mtd, to + mtd->writesize, &ops);
+ res = mtd_read_oob(mtd, to + mtd->writesize, &ops);
if (res < 0 || ops.oobretlen != ops.ooblen)
goto outerr;
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 00cf1b0..af4fe8c 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -73,11 +73,12 @@ struct nand_flash_dev nand_flash_ids[] = {
#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR)
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
- /*512 Megabit */
+ /* 512 Megabit */
{"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS},
+ {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS},
{"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16},
{"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16},
@@ -176,6 +177,7 @@ struct nand_manufacturers nand_manuf_ids[] = {
{NAND_MFR_HYNIX, "Hynix"},
{NAND_MFR_MICRON, "Micron"},
{NAND_MFR_AMD, "AMD"},
+ {NAND_MFR_MACRONIX, "Macronix"},
{0x0, "Unknown"}
};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 34c03be..261f478 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -737,7 +737,7 @@ static int parse_badblocks(struct nandsim *ns, struct mtd_info *mtd)
return -EINVAL;
}
offset = erase_block_no * ns->geom.secsz;
- if (mtd->block_markbad(mtd, offset)) {
+ if (mtd_block_markbad(mtd, offset)) {
NS_ERR("invalid badblocks.\n");
return -EINVAL;
}
diff --git a/drivers/mtd/nand/ndfc.c b/drivers/mtd/nand/ndfc.c
index f8aacf4..ec68854 100644
--- a/drivers/mtd/nand/ndfc.c
+++ b/drivers/mtd/nand/ndfc.c
@@ -294,18 +294,7 @@ static struct platform_driver ndfc_driver = {
.remove = __devexit_p(ndfc_remove),
};
-static int __init ndfc_nand_init(void)
-{
- return platform_driver_register(&ndfc_driver);
-}
-
-static void __exit ndfc_nand_exit(void)
-{
- platform_driver_unregister(&ndfc_driver);
-}
-
-module_init(ndfc_nand_init);
-module_exit(ndfc_nand_exit);
+module_platform_driver(ndfc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index b463ecf..a86aa81 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -201,7 +201,7 @@ static int nomadik_nand_suspend(struct device *dev)
struct nomadik_nand_host *host = dev_get_drvdata(dev);
int ret = 0;
if (host)
- ret = host->mtd.suspend(&host->mtd);
+ ret = mtd_suspend(&host->mtd);
return ret;
}
@@ -209,7 +209,7 @@ static int nomadik_nand_resume(struct device *dev)
{
struct nomadik_nand_host *host = dev_get_drvdata(dev);
if (host)
- host->mtd.resume(&host->mtd);
+ mtd_resume(&host->mtd);
return 0;
}
@@ -228,19 +228,7 @@ static struct platform_driver nomadik_nand_driver = {
},
};
-static int __init nand_nomadik_init(void)
-{
- pr_info("Nomadik NAND driver\n");
- return platform_driver_register(&nomadik_nand_driver);
-}
-
-static void __exit nand_nomadik_exit(void)
-{
- platform_driver_unregister(&nomadik_nand_driver);
-}
-
-module_init(nand_nomadik_init);
-module_exit(nand_nomadik_exit);
+module_platform_driver(nomadik_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("ST Microelectronics (sachin.verma@st.com)");
diff --git a/drivers/mtd/nand/nuc900_nand.c b/drivers/mtd/nand/nuc900_nand.c
index fa8faed..8febe46 100644
--- a/drivers/mtd/nand/nuc900_nand.c
+++ b/drivers/mtd/nand/nuc900_nand.c
@@ -364,18 +364,7 @@ static struct platform_driver nuc900_nand_driver = {
},
};
-static int __init nuc900_nand_init(void)
-{
- return platform_driver_register(&nuc900_nand_driver);
-}
-
-static void __exit nuc900_nand_exit(void)
-{
- platform_driver_unregister(&nuc900_nand_driver);
-}
-
-module_init(nuc900_nand_init);
-module_exit(nuc900_nand_exit);
+module_platform_driver(nuc900_nand_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910/NUC9xx nand driver!");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index f745f00..b3a883e 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1145,20 +1145,7 @@ static struct platform_driver omap_nand_driver = {
},
};
-static int __init omap_nand_init(void)
-{
- pr_info("%s driver initializing\n", DRIVER_NAME);
-
- return platform_driver_register(&omap_nand_driver);
-}
-
-static void __exit omap_nand_exit(void)
-{
- platform_driver_unregister(&omap_nand_driver);
-}
-
-module_init(omap_nand_init);
-module_exit(omap_nand_exit);
+module_platform_driver(omap_nand_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_LICENSE("GPL");
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index a97264e..974dbf8 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -230,17 +230,7 @@ static struct platform_driver pasemi_nand_driver =
.remove = pasemi_nand_remove,
};
-static int __init pasemi_nand_init(void)
-{
- return platform_driver_register(&pasemi_nand_driver);
-}
-module_init(pasemi_nand_init);
-
-static void __exit pasemi_nand_exit(void)
-{
- platform_driver_unregister(&pasemi_nand_driver);
-}
-module_exit(pasemi_nand_exit);
+module_platform_driver(pasemi_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>");
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index ea8e123..7f2da69 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -148,18 +148,7 @@ static struct platform_driver plat_nand_driver = {
},
};
-static int __init plat_nand_init(void)
-{
- return platform_driver_register(&plat_nand_driver);
-}
-
-static void __exit plat_nand_exit(void)
-{
- platform_driver_unregister(&plat_nand_driver);
-}
-
-module_init(plat_nand_init);
-module_exit(plat_nand_exit);
+module_platform_driver(plat_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vitaly Wool");
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 9eb7f87..5c3d719 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -185,7 +185,7 @@ struct pxa3xx_nand_info {
uint32_t ndcb2;
};
-static int use_dma = 1;
+static bool use_dma = 1;
module_param(use_dma, bool, 0444);
MODULE_PARM_DESC(use_dma, "enable DMA for data transferring to/from NAND HW");
@@ -1258,7 +1258,7 @@ static int pxa3xx_nand_suspend(struct platform_device *pdev, pm_message_t state)
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->suspend(mtd);
+ mtd_suspend(mtd);
}
return 0;
@@ -1291,7 +1291,7 @@ static int pxa3xx_nand_resume(struct platform_device *pdev)
nand_writel(info, NDSR, NDSR_MASK);
for (cs = 0; cs < pdata->num_cs; cs++) {
mtd = info->host[cs]->mtd;
- mtd->resume(mtd);
+ mtd_resume(mtd);
}
return 0;
@@ -1311,17 +1311,7 @@ static struct platform_driver pxa3xx_nand_driver = {
.resume = pxa3xx_nand_resume,
};
-static int __init pxa3xx_nand_init(void)
-{
- return platform_driver_register(&pxa3xx_nand_driver);
-}
-module_init(pxa3xx_nand_init);
-
-static void __exit pxa3xx_nand_exit(void)
-{
- platform_driver_unregister(&pxa3xx_nand_driver);
-}
-module_exit(pxa3xx_nand_exit);
+module_platform_driver(pxa3xx_nand_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PXA3xx NAND controller driver");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index f20f393..769a4e0 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -22,7 +22,7 @@
#include "r852.h"
-static int r852_enable_dma = 1;
+static bool r852_enable_dma = 1;
module_param(r852_enable_dma, bool, S_IRUGO);
MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)");
diff --git a/drivers/mtd/nand/sharpsl.c b/drivers/mtd/nand/sharpsl.c
index 619d2a5..b175c0f 100644
--- a/drivers/mtd/nand/sharpsl.c
+++ b/drivers/mtd/nand/sharpsl.c
@@ -230,17 +230,7 @@ static struct platform_driver sharpsl_nand_driver = {
.remove = __devexit_p(sharpsl_nand_remove),
};
-static int __init sharpsl_nand_init(void)
-{
- return platform_driver_register(&sharpsl_nand_driver);
-}
-module_init(sharpsl_nand_init);
-
-static void __exit sharpsl_nand_exit(void)
-{
- platform_driver_unregister(&sharpsl_nand_driver);
-}
-module_exit(sharpsl_nand_exit);
+module_platform_driver(sharpsl_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 32ae5af..774c3c2 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -55,7 +55,7 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs)
ops.datbuf = NULL;
- ret = mtd->write_oob(mtd, ofs, &ops);
+ ret = mtd_write_oob(mtd, ofs, &ops);
if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) {
printk(KERN_NOTICE
"sm_common: can't mark sector at %i as bad\n",
diff --git a/drivers/mtd/nand/socrates_nand.c b/drivers/mtd/nand/socrates_nand.c
index 0fb24f9..e02b08b 100644
--- a/drivers/mtd/nand/socrates_nand.c
+++ b/drivers/mtd/nand/socrates_nand.c
@@ -273,18 +273,7 @@ static struct platform_driver socrates_nand_driver = {
.remove = __devexit_p(socrates_nand_remove),
};
-static int __init socrates_nand_init(void)
-{
- return platform_driver_register(&socrates_nand_driver);
-}
-
-static void __exit socrates_nand_exit(void)
-{
- platform_driver_unregister(&socrates_nand_driver);
-}
-
-module_init(socrates_nand_init);
-module_exit(socrates_nand_exit);
+module_platform_driver(socrates_nand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Ilya Yanok");
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c
index beebd95..6caa0cd 100644
--- a/drivers/mtd/nand/tmio_nand.c
+++ b/drivers/mtd/nand/tmio_nand.c
@@ -533,18 +533,7 @@ static struct platform_driver tmio_driver = {
.resume = tmio_resume,
};
-static int __init tmio_init(void)
-{
- return platform_driver_register(&tmio_driver);
-}
-
-static void __exit tmio_exit(void)
-{
- platform_driver_unregister(&tmio_driver);
-}
-
-module_init(tmio_init);
-module_exit(tmio_exit);
+module_platform_driver(tmio_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
diff --git a/drivers/mtd/nand/txx9ndfmc.c b/drivers/mtd/nand/txx9ndfmc.c
index ace46fd..c7c4f1d 100644
--- a/drivers/mtd/nand/txx9ndfmc.c
+++ b/drivers/mtd/nand/txx9ndfmc.c
@@ -298,11 +298,7 @@ static int __init txx9ndfmc_probe(struct platform_device *dev)
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
- if (!devm_request_mem_region(&dev->dev, res->start,
- resource_size(res), dev_name(&dev->dev)))
- return -EBUSY;
- drvdata->base = devm_ioremap(&dev->dev, res->start,
- resource_size(res));
+ drvdata->base = devm_request_and_ioremap(&dev->dev, res);
if (!drvdata->base)
return -EBUSY;
diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
index cda77b5..a75382a 100644
--- a/drivers/mtd/nftlcore.c
+++ b/drivers/mtd/nftlcore.c
@@ -56,7 +56,7 @@ static void nftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd)
if (memcmp(mtd->name, "DiskOnChip", 10))
return;
- if (!mtd->block_isbad) {
+ if (!mtd_can_have_bb(mtd)) {
printk(KERN_ERR
"NFTL no longer supports the old DiskOnChip drivers loaded via docprobe.\n"
"Please use the new diskonchip driver under the NAND subsystem.\n");
@@ -153,7 +153,7 @@ int nftl_read_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->read_oob(mtd, offs & ~mask, &ops);
+ res = mtd_read_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -174,7 +174,7 @@ int nftl_write_oob(struct mtd_info *mtd, loff_t offs, size_t len,
ops.oobbuf = buf;
ops.datbuf = NULL;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.oobretlen;
return res;
}
@@ -198,7 +198,7 @@ static int nftl_write(struct mtd_info *mtd, loff_t offs, size_t len,
ops.datbuf = buf;
ops.len = len;
- res = mtd->write_oob(mtd, offs & ~mask, &ops);
+ res = mtd_write_oob(mtd, offs & ~mask, &ops);
*retlen = ops.retlen;
return res;
}
@@ -423,12 +423,17 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
if (BlockMap[block] == BLOCK_NIL)
continue;
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block]) + (block * 512),
- 512, &retlen, movebuf);
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
if (ret < 0 && !mtd_is_bitflip(ret)) {
- ret = mtd->read(mtd, (nftl->EraseSize * BlockMap[block])
- + (block * 512), 512, &retlen,
- movebuf);
+ ret = mtd_read(mtd,
+ (nftl->EraseSize * BlockMap[block]) + (block * 512),
+ 512,
+ &retlen,
+ movebuf);
if (ret != -EIO)
printk("Error went away on retry.\n");
}
@@ -771,7 +776,7 @@ static int nftl_readblock(struct mtd_blktrans_dev *mbd, unsigned long block,
} else {
loff_t ptr = (lastgoodEUN * nftl->EraseSize) + blockofs;
size_t retlen;
- int res = mtd->read(mtd, ptr, 512, &retlen, buffer);
+ int res = mtd_read(mtd, ptr, 512, &retlen, buffer);
if (res < 0 && !mtd_is_bitflip(res))
return -EIO;
diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
index ac40925..51b9d6a 100644
--- a/drivers/mtd/nftlmount.c
+++ b/drivers/mtd/nftlmount.c
@@ -63,8 +63,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
/* Check for ANAND header first. Then can whinge if it's found but later
checks fail */
- ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE,
- &retlen, buf);
+ ret = mtd_read(mtd, block * nftl->EraseSize, SECTORSIZE,
+ &retlen, buf);
/* We ignore ret in case the ECC of the MediaHeader is invalid
(which is apparently acceptable) */
if (retlen != SECTORSIZE) {
@@ -242,7 +242,8 @@ The new DiskOnChip driver already scanned the bad block table. Just query it.
if (buf[i & (SECTORSIZE - 1)] != 0xff)
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
#endif
- if (nftl->mbd.mtd->block_isbad(nftl->mbd.mtd, i * nftl->EraseSize))
+ if (mtd_block_isbad(nftl->mbd.mtd,
+ i * nftl->EraseSize))
nftl->ReplUnitTable[i] = BLOCK_RESERVED;
}
@@ -274,7 +275,7 @@ static int check_free_sectors(struct NFTLrecord *nftl, unsigned int address, int
int i;
for (i = 0; i < len; i += SECTORSIZE) {
- if (mtd->read(mtd, address, SECTORSIZE, &retlen, buf))
+ if (mtd_read(mtd, address, SECTORSIZE, &retlen, buf))
return -1;
if (memcmpb(buf, 0xff, SECTORSIZE) != 0)
return -1;
@@ -326,7 +327,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
instr->mtd = nftl->mbd.mtd;
instr->addr = block * nftl->EraseSize;
instr->len = nftl->EraseSize;
- mtd->erase(mtd, instr);
+ mtd_erase(mtd, instr);
if (instr->state == MTD_ERASE_FAILED) {
printk("Error while formatting block %d\n", block);
@@ -355,7 +356,7 @@ int NFTL_formatblock(struct NFTLrecord *nftl, int block)
fail:
/* could not format, update the bad block table (caller is responsible
for setting the ReplUnitTable to BLOCK_RESERVED on failure) */
- nftl->mbd.mtd->block_markbad(nftl->mbd.mtd, instr->addr);
+ mtd_block_markbad(nftl->mbd.mtd, instr->addr);
return -1;
}
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index 7813095..0ccd5bf 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -115,21 +115,9 @@ static struct platform_driver generic_onenand_driver = {
.remove = __devexit_p(generic_onenand_remove),
};
-MODULE_ALIAS("platform:" DRIVER_NAME);
-
-static int __init generic_onenand_init(void)
-{
- return platform_driver_register(&generic_onenand_driver);
-}
-
-static void __exit generic_onenand_exit(void)
-{
- platform_driver_unregister(&generic_onenand_driver);
-}
-
-module_init(generic_onenand_init);
-module_exit(generic_onenand_exit);
+module_platform_driver(generic_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
MODULE_DESCRIPTION("Glue layer for OneNAND flash on generic boards");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index a839473..a061bc1 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -2633,7 +2633,6 @@ static int onenand_default_block_markbad(struct mtd_info *mtd, loff_t ofs)
*/
static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
{
- struct onenand_chip *this = mtd->priv;
int ret;
ret = onenand_block_isbad(mtd, ofs);
@@ -2645,7 +2644,7 @@ static int onenand_block_markbad(struct mtd_info *mtd, loff_t ofs)
}
onenand_get_device(mtd, FL_WRITING);
- ret = this->block_markbad(mtd, ofs);
+ ret = mtd_block_markbad(mtd, ofs);
onenand_release_device(mtd);
return ret;
}
diff --git a/drivers/mtd/onenand/samsung.c b/drivers/mtd/onenand/samsung.c
index 5474547..fa1ee43 100644
--- a/drivers/mtd/onenand/samsung.c
+++ b/drivers/mtd/onenand/samsung.c
@@ -1133,18 +1133,7 @@ static struct platform_driver s3c_onenand_driver = {
.remove = __devexit_p(s3c_onenand_remove),
};
-static int __init s3c_onenand_init(void)
-{
- return platform_driver_register(&s3c_onenand_driver);
-}
-
-static void __exit s3c_onenand_exit(void)
-{
- platform_driver_unregister(&s3c_onenand_driver);
-}
-
-module_init(s3c_onenand_init);
-module_exit(s3c_onenand_exit);
+module_platform_driver(s3c_onenand_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kyungmin Park <kyungmin.park@samsung.com>");
diff --git a/drivers/mtd/redboot.c b/drivers/mtd/redboot.c
index e366b1d..48970c1 100644
--- a/drivers/mtd/redboot.c
+++ b/drivers/mtd/redboot.c
@@ -78,8 +78,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
if ( directory < 0 ) {
offset = master->size + directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_can_have_bb(master) &&
+ mtd_block_isbad(master, offset)) {
if (!offset) {
nogood:
printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n");
@@ -89,8 +89,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
}
} else {
offset = directory * master->erasesize;
- while (master->block_isbad &&
- master->block_isbad(master, offset)) {
+ while (mtd_can_have_bb(master) &&
+ mtd_block_isbad(master, offset)) {
offset += master->erasesize;
if (offset == master->size)
goto nogood;
@@ -104,8 +104,8 @@ static int parse_redboot_partitions(struct mtd_info *master,
printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n",
master->name, offset);
- ret = master->read(master, offset,
- master->erasesize, &retlen, (void *)buf);
+ ret = mtd_read(master, offset, master->erasesize, &retlen,
+ (void *)buf);
if (ret)
goto out;
diff --git a/drivers/mtd/rfd_ftl.c b/drivers/mtd/rfd_ftl.c
index 73ae217..233b946 100644
--- a/drivers/mtd/rfd_ftl.c
+++ b/drivers/mtd/rfd_ftl.c
@@ -200,9 +200,9 @@ static int scan_header(struct partition *part)
part->sector_map[i] = -1;
for (i=0, blocks_found=0; i<part->total_blocks; i++) {
- rc = part->mbd.mtd->read(part->mbd.mtd,
- i * part->block_size, part->header_size,
- &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, i * part->block_size,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -250,8 +250,8 @@ static int rfd_ftl_readsect(struct mtd_blktrans_dev *dev, u_long sector, char *b
addr = part->sector_map[sector];
if (addr != -1) {
- rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE,
- &retlen, (u_char*)buf);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -304,9 +304,8 @@ static void erase_callback(struct erase_info *erase)
part->blocks[i].used_sectors = 0;
part->blocks[i].erases++;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- part->blocks[i].offset, sizeof(magic), &retlen,
- (u_char*)&magic);
+ rc = mtd_write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic),
+ &retlen, (u_char *)&magic);
if (!rc && retlen != sizeof(magic))
rc = -EIO;
@@ -342,7 +341,7 @@ static int erase_block(struct partition *part, int block)
part->blocks[block].state = BLOCK_ERASING;
part->blocks[block].free_sectors = 0;
- rc = part->mbd.mtd->erase(part->mbd.mtd, erase);
+ rc = mtd_erase(part->mbd.mtd, erase);
if (rc) {
printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' "
@@ -372,9 +371,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
if (!map)
goto err2;
- rc = part->mbd.mtd->read(part->mbd.mtd,
- part->blocks[block_no].offset, part->header_size,
- &retlen, (u_char*)map);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block_no].offset,
+ part->header_size, &retlen, (u_char *)map);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -413,8 +411,8 @@ static int move_block_contents(struct partition *part, int block_no, u_long *old
}
continue;
}
- rc = part->mbd.mtd->read(part->mbd.mtd, addr,
- SECTOR_SIZE, &retlen, sector_data);
+ rc = mtd_read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ sector_data);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -450,8 +448,7 @@ static int reclaim_block(struct partition *part, u_long *old_sector)
int rc;
/* we have a race if sync doesn't exist */
- if (part->mbd.mtd->sync)
- part->mbd.mtd->sync(part->mbd.mtd);
+ mtd_sync(part->mbd.mtd);
score = 0x7fffffff; /* MAX_INT */
best_block = -1;
@@ -563,8 +560,9 @@ static int find_writable_block(struct partition *part, u_long *old_sector)
}
}
- rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset,
- part->header_size, &retlen, (u_char*)part->header_cache);
+ rc = mtd_read(part->mbd.mtd, part->blocks[block].offset,
+ part->header_size, &retlen,
+ (u_char *)part->header_cache);
if (!rc && retlen != part->header_size)
rc = -EIO;
@@ -595,8 +593,8 @@ static int mark_sector_deleted(struct partition *part, u_long old_addr)
addr = part->blocks[block].offset +
(HEADER_MAP_OFFSET + offset) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(del), &retlen, (u_char*)&del);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(del), &retlen,
+ (u_char *)&del);
if (!rc && retlen != sizeof(del))
rc = -EIO;
@@ -668,8 +666,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
addr = (i + part->header_sectors_per_block) * SECTOR_SIZE +
block->offset;
- rc = part->mbd.mtd->write(part->mbd.mtd,
- addr, SECTOR_SIZE, &retlen, (u_char*)buf);
+ rc = mtd_write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen,
+ (u_char *)buf);
if (!rc && retlen != SECTOR_SIZE)
rc = -EIO;
@@ -688,8 +686,8 @@ static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf,
part->header_cache[i + HEADER_MAP_OFFSET] = entry;
addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16);
- rc = part->mbd.mtd->write(part->mbd.mtd, addr,
- sizeof(entry), &retlen, (u_char*)&entry);
+ rc = mtd_write(part->mbd.mtd, addr, sizeof(entry), &retlen,
+ (u_char *)&entry);
if (!rc && retlen != sizeof(entry))
rc = -EIO;
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index fddb714..072ed59 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -25,7 +25,7 @@
struct workqueue_struct *cache_flush_workqueue;
static int cache_timeout = 1000;
-module_param(cache_timeout, bool, S_IRUGO);
+module_param(cache_timeout, int, S_IRUGO);
MODULE_PARM_DESC(cache_timeout,
"Timeout (in ms) for cache flush (1000 ms default");
@@ -278,7 +278,7 @@ again:
/* Unfortunately, oob read will _always_ succeed,
despite card removal..... */
- ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Test for unknown errors */
if (ret != 0 && !mtd_is_bitflip_or_eccerr(ret)) {
@@ -343,7 +343,7 @@ static int sm_write_sector(struct sm_ftl *ftl,
ops.ooblen = SM_OOB_SIZE;
ops.oobbuf = (void *)oob;
- ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
+ ret = mtd_write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops);
/* Now we assume that hardware will catch write bitflip errors */
/* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */
@@ -479,7 +479,7 @@ static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block,
return -EIO;
}
- if (mtd->erase(mtd, &erase)) {
+ if (mtd_erase(mtd, &erase)) {
sm_printk("erase of block %d in zone %d failed",
block, zone_num);
goto error;
@@ -645,8 +645,8 @@ int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd)
if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE)
return -ENODEV;
- /* We use these functions for IO */
- if (!mtd->read_oob || !mtd->write_oob)
+ /* We use OOB */
+ if (!mtd_has_oob(mtd))
return -ENODEV;
/* Find geometry information */
diff --git a/drivers/mtd/ssfdc.c b/drivers/mtd/ssfdc.c
index 976e3d2..ab2a52a 100644
--- a/drivers/mtd/ssfdc.c
+++ b/drivers/mtd/ssfdc.c
@@ -122,9 +122,9 @@ static int get_valid_cis_sector(struct mtd_info *mtd)
* is not SSFDC formatted
*/
for (k = 0, offset = 0; k < 4; k++, offset += mtd->erasesize) {
- if (!mtd->block_isbad(mtd, offset)) {
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen,
- sect_buf);
+ if (mtd_block_isbad(mtd, offset)) {
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen,
+ sect_buf);
/* CIS pattern match on the sector buffer */
if (ret < 0 || retlen != SECTOR_SIZE) {
@@ -156,7 +156,7 @@ static int read_physical_sector(struct mtd_info *mtd, uint8_t *sect_buf,
size_t retlen;
loff_t offset = (loff_t)sect_no << SECTOR_SHIFT;
- ret = mtd->read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
+ ret = mtd_read(mtd, offset, SECTOR_SIZE, &retlen, sect_buf);
if (ret < 0 || retlen != SECTOR_SIZE)
return -1;
@@ -175,7 +175,7 @@ static int read_raw_oob(struct mtd_info *mtd, loff_t offs, uint8_t *buf)
ops.oobbuf = buf;
ops.datbuf = NULL;
- ret = mtd->read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs, &ops);
if (ret < 0 || ops.oobretlen != OOB_SIZE)
return -1;
@@ -255,7 +255,7 @@ static int build_logical_block_map(struct ssfdcr_record *ssfdc)
for (phys_block = ssfdc->cis_block + 1; phys_block < ssfdc->map_len;
phys_block++) {
offset = (unsigned long)phys_block * ssfdc->erase_size;
- if (mtd->block_isbad(mtd, offset))
+ if (mtd_block_isbad(mtd, offset))
continue; /* skip bad blocks */
ret = read_raw_oob(mtd, offset, oob_buf);
diff --git a/drivers/mtd/tests/mtd_oobtest.c b/drivers/mtd/tests/mtd_oobtest.c
index 933f7e5..ed9b628 100644
--- a/drivers/mtd/tests/mtd_oobtest.c
+++ b/drivers/mtd/tests/mtd_oobtest.c
@@ -78,7 +78,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -139,7 +139,7 @@ static int write_eraseblock(int ebnum)
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ err = mtd_write_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
printk(PRINT_PREF "error: writeoob failed at %#llx\n",
(long long)addr);
@@ -192,7 +192,7 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = use_offset;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != use_len) {
printk(PRINT_PREF "error: readoob failed at %#llx\n",
(long long)addr);
@@ -219,7 +219,7 @@ static int verify_eraseblock(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != mtd->ecclayout->oobavail) {
printk(PRINT_PREF "error: readoob failed at "
"%#llx\n", (long long)addr);
@@ -284,7 +284,7 @@ static int verify_eraseblock_in_one_go(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err || ops.oobretlen != len) {
printk(PRINT_PREF "error: readoob failed at %#llx\n",
(long long)addr);
@@ -329,7 +329,7 @@ static int is_block_bad(int ebnum)
int ret;
loff_t addr = ebnum * mtd->erasesize;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -524,7 +524,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to start write past end of OOB\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, addr0, &ops);
+ err = mtd_write_oob(mtd, addr0, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -544,7 +544,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to start read past end of OOB\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, addr0, &ops);
+ err = mtd_read_oob(mtd, addr0, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -568,7 +568,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to write past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -588,7 +588,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to read past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -612,7 +612,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = writebuf;
printk(PRINT_PREF "attempting to write past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->write_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_write_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -632,7 +632,7 @@ static int __init mtd_oobtest_init(void)
ops.oobbuf = readbuf;
printk(PRINT_PREF "attempting to read past end of device\n");
printk(PRINT_PREF "an error is expected...\n");
- err = mtd->read_oob(mtd, mtd->size - mtd->writesize, &ops);
+ err = mtd_read_oob(mtd, mtd->size - mtd->writesize, &ops);
if (err) {
printk(PRINT_PREF "error occurred as expected\n");
err = 0;
@@ -670,7 +670,7 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = writebuf;
- err = mtd->write_oob(mtd, addr, &ops);
+ err = mtd_write_oob(mtd, addr, &ops);
if (err)
goto out;
if (i % 256 == 0)
@@ -698,7 +698,7 @@ static int __init mtd_oobtest_init(void)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = readbuf;
- err = mtd->read_oob(mtd, addr, &ops);
+ err = mtd_read_oob(mtd, addr, &ops);
if (err)
goto out;
if (memcmp(readbuf, writebuf, mtd->ecclayout->oobavail * 2)) {
diff --git a/drivers/mtd/tests/mtd_pagetest.c b/drivers/mtd/tests/mtd_pagetest.c
index afafb69..252ddb0 100644
--- a/drivers/mtd/tests/mtd_pagetest.c
+++ b/drivers/mtd/tests/mtd_pagetest.c
@@ -77,7 +77,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -95,12 +95,12 @@ static int erase_eraseblock(int ebnum)
static int write_eraseblock(int ebnum)
{
int err = 0;
- size_t written = 0;
+ size_t written;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, mtd->erasesize);
cond_resched();
- err = mtd->write(mtd, addr, mtd->erasesize, &written, writebuf);
+ err = mtd_write(mtd, addr, mtd->erasesize, &written, writebuf);
if (err || written != mtd->erasesize)
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -111,7 +111,7 @@ static int write_eraseblock(int ebnum)
static int verify_eraseblock(int ebnum)
{
uint32_t j;
- size_t read = 0;
+ size_t read;
int err = 0, i;
loff_t addr0, addrn;
loff_t addr = ebnum * mtd->erasesize;
@@ -127,7 +127,7 @@ static int verify_eraseblock(int ebnum)
set_random_data(writebuf, mtd->erasesize);
for (j = 0; j < pgcnt - 1; ++j, addr += pgsize) {
/* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -135,7 +135,7 @@ static int verify_eraseblock(int ebnum)
(long long)addr0);
return err;
}
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+ err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -144,8 +144,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -163,7 +162,7 @@ static int verify_eraseblock(int ebnum)
if (addr <= addrn - pgsize - pgsize && !bbt[ebnum + 1]) {
unsigned long oldnext = next;
/* Do a read to set the internal dataRAMs to different data */
- err = mtd->read(mtd, addr0, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr0, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -171,7 +170,7 @@ static int verify_eraseblock(int ebnum)
(long long)addr0);
return err;
}
- err = mtd->read(mtd, addrn - bufsize, bufsize, &read, twopages);
+ err = mtd_read(mtd, addrn - bufsize, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -180,8 +179,7 @@ static int verify_eraseblock(int ebnum)
return err;
}
memset(twopages, 0, bufsize);
- read = 0;
- err = mtd->read(mtd, addr, bufsize, &read, twopages);
+ err = mtd_read(mtd, addr, bufsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != bufsize) {
@@ -203,7 +201,7 @@ static int verify_eraseblock(int ebnum)
static int crosstest(void)
{
- size_t read = 0;
+ size_t read;
int err = 0, i;
loff_t addr, addr0, addrn;
unsigned char *pp1, *pp2, *pp3, *pp4;
@@ -228,9 +226,8 @@ static int crosstest(void)
addrn -= mtd->erasesize;
/* Read 2nd-to-last page to pp1 */
- read = 0;
addr = addrn - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
+ err = mtd_read(mtd, addr, pgsize, &read, pp1);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -241,9 +238,8 @@ static int crosstest(void)
}
/* Read 3rd-to-last page to pp1 */
- read = 0;
addr = addrn - pgsize - pgsize - pgsize;
- err = mtd->read(mtd, addr, pgsize, &read, pp1);
+ err = mtd_read(mtd, addr, pgsize, &read, pp1);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -254,10 +250,9 @@ static int crosstest(void)
}
/* Read first page to pp2 */
- read = 0;
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp2);
+ err = mtd_read(mtd, addr, pgsize, &read, pp2);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -268,10 +263,9 @@ static int crosstest(void)
}
/* Read last page to pp3 */
- read = 0;
addr = addrn - pgsize;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp3);
+ err = mtd_read(mtd, addr, pgsize, &read, pp3);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -282,10 +276,9 @@ static int crosstest(void)
}
/* Read first page again to pp4 */
- read = 0;
addr = addr0;
printk(PRINT_PREF "reading page at %#llx\n", (long long)addr);
- err = mtd->read(mtd, addr, pgsize, &read, pp4);
+ err = mtd_read(mtd, addr, pgsize, &read, pp4);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -309,7 +302,7 @@ static int crosstest(void)
static int erasecrosstest(void)
{
- size_t read = 0, written = 0;
+ size_t read, written;
int err = 0, i, ebnum, ebnum2;
loff_t addr0;
char *readbuf = twopages;
@@ -335,7 +328,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -344,7 +337,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+ err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -368,7 +361,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
strcpy(writebuf, "There is no data like this!");
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -382,7 +375,7 @@ static int erasecrosstest(void)
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
memset(readbuf, 0, pgsize);
- err = mtd->read(mtd, addr0, pgsize, &read, readbuf);
+ err = mtd_read(mtd, addr0, pgsize, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -405,7 +398,7 @@ static int erasecrosstest(void)
static int erasetest(void)
{
- size_t read = 0, written = 0;
+ size_t read, written;
int err = 0, i, ebnum, ok = 1;
loff_t addr0;
@@ -425,7 +418,7 @@ static int erasetest(void)
printk(PRINT_PREF "writing 1st page of block %d\n", ebnum);
set_random_data(writebuf, pgsize);
- err = mtd->write(mtd, addr0, pgsize, &written, writebuf);
+ err = mtd_write(mtd, addr0, pgsize, &written, writebuf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr0);
@@ -438,7 +431,7 @@ static int erasetest(void)
return err;
printk(PRINT_PREF "reading 1st page of block %d\n", ebnum);
- err = mtd->read(mtd, addr0, pgsize, &read, twopages);
+ err = mtd_read(mtd, addr0, pgsize, &read, twopages);
if (mtd_is_bitflip(err))
err = 0;
if (err || read != pgsize) {
@@ -469,7 +462,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
diff --git a/drivers/mtd/tests/mtd_readtest.c b/drivers/mtd/tests/mtd_readtest.c
index 550fe51..121aba1 100644
--- a/drivers/mtd/tests/mtd_readtest.c
+++ b/drivers/mtd/tests/mtd_readtest.c
@@ -44,7 +44,7 @@ static int pgcnt;
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
+ size_t read;
int i, ret, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
@@ -52,7 +52,7 @@ static int read_eraseblock_by_page(int ebnum)
for (i = 0; i < pgcnt; i++) {
memset(buf, 0 , pgcnt);
- ret = mtd->read(mtd, addr, pgsize, &read, buf);
+ ret = mtd_read(mtd, addr, pgsize, &read, buf);
if (ret == -EUCLEAN)
ret = 0;
if (ret || read != pgsize) {
@@ -74,7 +74,7 @@ static int read_eraseblock_by_page(int ebnum)
ops.ooboffs = 0;
ops.datbuf = NULL;
ops.oobbuf = oobbuf;
- ret = mtd->read_oob(mtd, addr, &ops);
+ ret = mtd_read_oob(mtd, addr, &ops);
if ((ret && !mtd_is_bitflip(ret)) ||
ops.oobretlen != mtd->oobsize) {
printk(PRINT_PREF "error: read oob failed at "
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -148,8 +148,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
return 0;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
diff --git a/drivers/mtd/tests/mtd_speedtest.c b/drivers/mtd/tests/mtd_speedtest.c
index 493b367..2aec4f3 100644
--- a/drivers/mtd/tests/mtd_speedtest.c
+++ b/drivers/mtd/tests/mtd_speedtest.c
@@ -79,7 +79,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -105,7 +105,7 @@ static int multiblock_erase(int ebnum, int blocks)
ei.addr = addr;
ei.len = mtd->erasesize * blocks;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d, blocks %d\n",
err, ebnum, blocks);
@@ -139,11 +139,11 @@ static int erase_whole_device(void)
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->write(mtd, addr, mtd->erasesize, &written, iobuf);
+ err = mtd_write(mtd, addr, mtd->erasesize, &written, iobuf);
if (err || written != mtd->erasesize) {
printk(PRINT_PREF "error: write failed at %#llx\n", addr);
if (!err)
@@ -155,13 +155,13 @@ static int write_eraseblock(int ebnum)
static int write_eraseblock_by_page(int ebnum)
{
- size_t written = 0;
+ size_t written;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
+ err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -178,13 +178,13 @@ static int write_eraseblock_by_page(int ebnum)
static int write_eraseblock_by_2pages(int ebnum)
{
- size_t written = 0, sz = pgsize * 2;
+ size_t written, sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->write(mtd, addr, sz, &written, buf);
+ err = mtd_write(mtd, addr, sz, &written, buf);
if (err || written != sz) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -196,7 +196,7 @@ static int write_eraseblock_by_2pages(int ebnum)
buf += sz;
}
if (pgcnt % 2) {
- err = mtd->write(mtd, addr, pgsize, &written, buf);
+ err = mtd_write(mtd, addr, pgsize, &written, buf);
if (err || written != pgsize) {
printk(PRINT_PREF "error: write failed at %#llx\n",
addr);
@@ -210,11 +210,11 @@ static int write_eraseblock_by_2pages(int ebnum)
static int read_eraseblock(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
- err = mtd->read(mtd, addr, mtd->erasesize, &read, iobuf);
+ err = mtd_read(mtd, addr, mtd->erasesize, &read, iobuf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -229,13 +229,13 @@ static int read_eraseblock(int ebnum)
static int read_eraseblock_by_page(int ebnum)
{
- size_t read = 0;
+ size_t read;
int i, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < pgcnt; i++) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
+ err = mtd_read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -255,13 +255,13 @@ static int read_eraseblock_by_page(int ebnum)
static int read_eraseblock_by_2pages(int ebnum)
{
- size_t read = 0, sz = pgsize * 2;
+ size_t read, sz = pgsize * 2;
int i, n = pgcnt / 2, err = 0;
loff_t addr = ebnum * mtd->erasesize;
void *buf = iobuf;
for (i = 0; i < n; i++) {
- err = mtd->read(mtd, addr, sz, &read, buf);
+ err = mtd_read(mtd, addr, sz, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -276,7 +276,7 @@ static int read_eraseblock_by_2pages(int ebnum)
buf += sz;
}
if (pgcnt % 2) {
- err = mtd->read(mtd, addr, pgsize, &read, buf);
+ err = mtd_read(mtd, addr, pgsize, &read, buf);
/* Ignore corrected ECC errors */
if (mtd_is_bitflip(err))
err = 0;
@@ -296,7 +296,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -336,8 +336,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
goto out;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
diff --git a/drivers/mtd/tests/mtd_stresstest.c b/drivers/mtd/tests/mtd_stresstest.c
index 52ffd91..7b33f22 100644
--- a/drivers/mtd/tests/mtd_stresstest.c
+++ b/drivers/mtd/tests/mtd_stresstest.c
@@ -112,7 +112,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (unlikely(err)) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -132,7 +132,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
@@ -140,7 +140,7 @@ static int is_block_bad(int ebnum)
static int do_read(void)
{
- size_t read = 0;
+ size_t read;
int eb = rand_eb();
int offs = rand_offs();
int len = rand_len(offs), err;
@@ -153,7 +153,7 @@ static int do_read(void)
len = mtd->erasesize - offs;
}
addr = eb * mtd->erasesize + offs;
- err = mtd->read(mtd, addr, len, &read, readbuf);
+ err = mtd_read(mtd, addr, len, &read, readbuf);
if (mtd_is_bitflip(err))
err = 0;
if (unlikely(err || read != len)) {
@@ -169,7 +169,7 @@ static int do_read(void)
static int do_write(void)
{
int eb = rand_eb(), offs, err, len;
- size_t written = 0;
+ size_t written;
loff_t addr;
offs = offsets[eb];
@@ -192,7 +192,7 @@ static int do_write(void)
}
}
addr = eb * mtd->erasesize + offs;
- err = mtd->write(mtd, addr, len, &written, writebuf);
+ err = mtd_write(mtd, addr, len, &written, writebuf);
if (unlikely(err || written != len)) {
printk(PRINT_PREF "error: write failed at 0x%llx\n",
(long long)addr);
@@ -227,8 +227,7 @@ static int scan_for_bad_eraseblocks(void)
return -ENOMEM;
}
- /* NOR flash does not implement block_isbad */
- if (mtd->block_isbad == NULL)
+ if (!mtd_can_have_bb(mtd))
return 0;
printk(PRINT_PREF "scanning for bad eraseblocks\n");
@@ -284,6 +283,12 @@ static int __init mtd_stresstest_init(void)
(unsigned long long)mtd->size, mtd->erasesize,
pgsize, ebcnt, pgcnt, mtd->oobsize);
+ if (ebcnt < 2) {
+ printk(PRINT_PREF "error: need at least 2 eraseblocks\n");
+ err = -ENOSPC;
+ goto out_put_mtd;
+ }
+
/* Read or write up 2 eraseblocks at a time */
bufsize = mtd->erasesize * 2;
@@ -322,6 +327,7 @@ out:
kfree(bbt);
vfree(writebuf);
vfree(readbuf);
+out_put_mtd:
put_mtd_device(mtd);
if (err)
printk(PRINT_PREF "error %d occurred\n", err);
diff --git a/drivers/mtd/tests/mtd_subpagetest.c b/drivers/mtd/tests/mtd_subpagetest.c
index 1a05bfa..9667bf5 100644
--- a/drivers/mtd/tests/mtd_subpagetest.c
+++ b/drivers/mtd/tests/mtd_subpagetest.c
@@ -80,7 +80,7 @@ static int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -115,12 +115,12 @@ static int erase_whole_device(void)
static int write_eraseblock(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -134,7 +134,7 @@ static int write_eraseblock(int ebnum)
addr += subpgsize;
set_random_data(writebuf, subpgsize);
- err = mtd->write(mtd, addr, subpgsize, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize, &written, writebuf);
if (unlikely(err || written != subpgsize)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -150,7 +150,7 @@ static int write_eraseblock(int ebnum)
static int write_eraseblock2(int ebnum)
{
- size_t written = 0;
+ size_t written;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
@@ -158,7 +158,7 @@ static int write_eraseblock2(int ebnum)
if (addr + (subpgsize * k) > (ebnum + 1) * mtd->erasesize)
break;
set_random_data(writebuf, subpgsize * k);
- err = mtd->write(mtd, addr, subpgsize * k, &written, writebuf);
+ err = mtd_write(mtd, addr, subpgsize * k, &written, writebuf);
if (unlikely(err || written != subpgsize * k)) {
printk(PRINT_PREF "error: write failed at %#llx\n",
(long long)addr);
@@ -189,14 +189,13 @@ static void print_subpage(unsigned char *p)
static int verify_eraseblock(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
set_random_data(writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -223,8 +222,7 @@ static int verify_eraseblock(int ebnum)
set_random_data(writebuf, subpgsize);
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -252,7 +250,7 @@ static int verify_eraseblock(int ebnum)
static int verify_eraseblock2(int ebnum)
{
- size_t read = 0;
+ size_t read;
int err = 0, k;
loff_t addr = ebnum * mtd->erasesize;
@@ -261,8 +259,7 @@ static int verify_eraseblock2(int ebnum)
break;
set_random_data(writebuf, subpgsize * k);
clear_data(readbuf, subpgsize * k);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize * k, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize * k, &read, readbuf);
if (unlikely(err || read != subpgsize * k)) {
if (mtd_is_bitflip(err) && read == subpgsize * k) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -288,15 +285,14 @@ static int verify_eraseblock2(int ebnum)
static int verify_eraseblock_ff(int ebnum)
{
uint32_t j;
- size_t read = 0;
+ size_t read;
int err = 0;
loff_t addr = ebnum * mtd->erasesize;
memset(writebuf, 0xff, subpgsize);
for (j = 0; j < mtd->erasesize / subpgsize; ++j) {
clear_data(readbuf, subpgsize);
- read = 0;
- err = mtd->read(mtd, addr, subpgsize, &read, readbuf);
+ err = mtd_read(mtd, addr, subpgsize, &read, readbuf);
if (unlikely(err || read != subpgsize)) {
if (mtd_is_bitflip(err) && read == subpgsize) {
printk(PRINT_PREF "ECC correction at %#llx\n",
@@ -344,7 +340,7 @@ static int is_block_bad(int ebnum)
loff_t addr = ebnum * mtd->erasesize;
int ret;
- ret = mtd->block_isbad(mtd, addr);
+ ret = mtd_block_isbad(mtd, addr);
if (ret)
printk(PRINT_PREF "block %d is bad\n", ebnum);
return ret;
diff --git a/drivers/mtd/tests/mtd_torturetest.c b/drivers/mtd/tests/mtd_torturetest.c
index 03ab649..b65861b 100644
--- a/drivers/mtd/tests/mtd_torturetest.c
+++ b/drivers/mtd/tests/mtd_torturetest.c
@@ -105,7 +105,7 @@ static inline int erase_eraseblock(int ebnum)
ei.addr = addr;
ei.len = mtd->erasesize;
- err = mtd->erase(mtd, &ei);
+ err = mtd_erase(mtd, &ei);
if (err) {
printk(PRINT_PREF "error %d while erasing EB %d\n", err, ebnum);
return err;
@@ -127,7 +127,7 @@ static inline int erase_eraseblock(int ebnum)
static inline int check_eraseblock(int ebnum, unsigned char *buf)
{
int err, retries = 0;
- size_t read = 0;
+ size_t read;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -137,7 +137,7 @@ static inline int check_eraseblock(int ebnum, unsigned char *buf)
}
retry:
- err = mtd->read(mtd, addr, len, &read, check_buf);
+ err = mtd_read(mtd, addr, len, &read, check_buf);
if (mtd_is_bitflip(err))
printk(PRINT_PREF "single bit flip occurred at EB %d "
"MTD reported that it was fixed.\n", ebnum);
@@ -181,7 +181,7 @@ retry:
static inline int write_pattern(int ebnum, void *buf)
{
int err;
- size_t written = 0;
+ size_t written;
loff_t addr = ebnum * mtd->erasesize;
size_t len = mtd->erasesize;
@@ -189,7 +189,7 @@ static inline int write_pattern(int ebnum, void *buf)
addr = (ebnum + 1) * mtd->erasesize - pgcnt * pgsize;
len = pgcnt * pgsize;
}
- err = mtd->write(mtd, addr, len, &written, buf);
+ err = mtd_write(mtd, addr, len, &written, buf);
if (err) {
printk(PRINT_PREF "error %d while writing EB %d, written %zd"
" bytes\n", err, ebnum, written);
@@ -290,10 +290,9 @@ static int __init tort_init(void)
* Check if there is a bad eraseblock among those we are going to test.
*/
memset(&bad_ebs[0], 0, sizeof(int) * ebcnt);
- if (mtd->block_isbad) {
+ if (mtd_can_have_bb(mtd)) {
for (i = eb; i < eb + ebcnt; i++) {
- err = mtd->block_isbad(mtd,
- (loff_t)i * mtd->erasesize);
+ err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize);
if (err < 0) {
printk(PRINT_PREF "block_isbad() returned %d "
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6c3fb5a..115749f 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -664,7 +664,7 @@ static int io_init(struct ubi_device *ubi)
ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd);
ubi->flash_size = ubi->mtd->size;
- if (ubi->mtd->block_isbad && ubi->mtd->block_markbad)
+ if (mtd_can_have_bb(ubi->mtd))
ubi->bad_allowed = 1;
if (ubi->mtd->type == MTD_NORFLASH) {
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 3320a50..ad76592 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -632,6 +632,9 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
if (req->alignment != 1 && n)
goto bad;
+ if (!req->name[0] || !req->name_len)
+ goto bad;
+
if (req->name_len > UBI_VOL_NAME_MAX) {
err = -ENAMETOOLONG;
goto bad;
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index ab80c0d..e2cdebf 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -216,7 +216,7 @@ void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
buf = vmalloc(len);
if (!buf)
return;
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && err != -EUCLEAN) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index 64fbb00..ead2cd1 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -43,7 +43,10 @@
pr_debug("UBI DBG " type ": " fmt "\n", ##__VA_ARGS__)
/* Just a debugging messages not related to any specific UBI subsystem */
-#define dbg_msg(fmt, ...) ubi_dbg_msg("msg", fmt, ##__VA_ARGS__)
+#define dbg_msg(fmt, ...) \
+ printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \
+ current->pid, __func__, ##__VA_ARGS__)
+
/* General debugging messages */
#define dbg_gen(fmt, ...) ubi_dbg_msg("gen", fmt, ##__VA_ARGS__)
/* Messages from the eraseblock association sub-system */
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index fb7f19b..cd26da8 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return
- * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later.
+ * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
+ * we do not know the reasons of the contention - it may be just a
+ * normal I/O on this LEB, so we want to re-try.
*/
err = leb_write_trylock(ubi, vol_id, lnum);
if (err) {
dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
- return MOVE_CANCEL_RACE;
+ return MOVE_RETRY;
}
/*
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index f20b6f2..5cde4e5 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -170,7 +170,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err) {
const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
@@ -289,7 +289,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
}
addr = (loff_t)pnum * ubi->peb_size + offset;
- err = ubi->mtd->write(ubi->mtd, addr, len, &written, buf);
+ err = mtd_write(ubi->mtd, addr, len, &written, buf);
if (err) {
ubi_err("error %d while writing %d bytes to PEB %d:%d, written "
"%zd bytes", err, len, pnum, offset, written);
@@ -361,7 +361,7 @@ retry:
ei.callback = erase_callback;
ei.priv = (unsigned long)&wq;
- err = ubi->mtd->erase(ubi->mtd, &ei);
+ err = mtd_erase(ubi->mtd, &ei);
if (err) {
if (retries++ < UBI_IO_RETRIES) {
dbg_io("error %d while erasing PEB %d, retry",
@@ -525,11 +525,10 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
* the header comment in scan.c for more information).
*/
addr = (loff_t)pnum * ubi->peb_size;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if (!err) {
addr += ubi->vid_hdr_aloffset;
- err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
- (void *)&data);
+ err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
if (!err)
return 0;
}
@@ -635,7 +634,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
if (ubi->bad_allowed) {
int ret;
- ret = mtd->block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
+ ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
if (ret < 0)
ubi_err("error %d while checking if PEB %d is bad",
ret, pnum);
@@ -670,7 +669,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
if (!ubi->bad_allowed)
return 0;
- err = mtd->block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
+ err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
if (err)
ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
return err;
@@ -1357,7 +1356,7 @@ int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf1);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf1);
if (err && !mtd_is_bitflip(err))
goto out_free;
@@ -1421,7 +1420,7 @@ int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
return 0;
}
- err = ubi->mtd->read(ubi->mtd, addr, len, &read, buf);
+ err = mtd_read(ubi->mtd, addr, len, &read, buf);
if (err && !mtd_is_bitflip(err)) {
ubi_err("error %d while reading %d bytes from PEB %d:%d, "
"read %zd bytes", err, len, pnum, offset, read);
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 1a35fc5..9fdb353 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -714,9 +714,7 @@ int ubi_sync(int ubi_num)
if (!ubi)
return -ENODEV;
- if (ubi->mtd->sync)
- ubi->mtd->sync(ubi->mtd);
-
+ mtd_sync(ubi->mtd);
ubi_put_device(ubi);
return 0;
}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index dc64c76..d51d75d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -120,6 +120,7 @@ enum {
* PEB
* MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
* target PEB
+ * MOVE_RETRY: retry scrubbing the PEB
*/
enum {
MOVE_CANCEL_RACE = 1,
@@ -127,6 +128,7 @@ enum {
MOVE_TARGET_RD_ERR,
MOVE_TARGET_WR_ERR,
MOVE_CANCEL_BITFLIPS,
+ MOVE_RETRY,
};
/**
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 9ad18da..17cec0c 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -306,7 +306,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_scan_info *si,
int copy, void *vtbl)
{
int err, tries = 0;
- static struct ubi_vid_hdr *vid_hdr;
+ struct ubi_vid_hdr *vid_hdr;
struct ubi_scan_leb *new_seb;
ubi_msg("create volume table (copy #%d)", copy + 1);
@@ -322,7 +322,7 @@ retry:
goto out_free;
}
- vid_hdr->vol_type = UBI_VID_DYNAMIC;
+ vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE;
vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID);
vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT;
vid_hdr->data_size = vid_hdr->used_ebs =
@@ -632,7 +632,7 @@ static int init_volumes(struct ubi_device *ubi, const struct ubi_scan_info *si,
return -ENOMEM;
vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS;
- vol->alignment = 1;
+ vol->alignment = UBI_LAYOUT_VOLUME_ALIGN;
vol->vol_type = UBI_DYNAMIC_VOLUME;
vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1;
memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1);
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 42c684c..0696e36 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
protect = 1;
goto out_not_moved;
}
-
+ if (err == MOVE_RETRY) {
+ scrubbing = 1;
+ goto out_not_moved;
+ }
if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
err == MOVE_TARGET_RD_ERR) {
/*
@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk);
- kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) {
@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro;
}
return err;
- } else if (err != -EIO) {
+ }
+
+ kmem_cache_free(ubi_wl_entry_slab, e);
+ if (err != -EIO)
/*
* If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to R/O mode.
*/
goto out_ro;
- }
/* It is %-EIO, the PEB went bad */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 654a5e9..b982854 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -125,6 +125,8 @@ config IFB
'ifb1' etc.
Look at the iproute2 documentation directory for usage etc
+source "drivers/net/team/Kconfig"
+
config MACVLAN
tristate "MAC-VLAN support (EXPERIMENTAL)"
depends on EXPERIMENTAL
@@ -241,6 +243,8 @@ source "drivers/atm/Kconfig"
source "drivers/net/caif/Kconfig"
+source "drivers/net/dsa/Kconfig"
+
source "drivers/net/ethernet/Kconfig"
source "drivers/net/fddi/Kconfig"
@@ -338,4 +342,6 @@ config VMXNET3
To compile this driver as a module, choose M here: the
module will be called vmxnet3.
+source "drivers/net/hyperv/Kconfig"
+
endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index fa877cd..a6b8ce1 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -17,6 +17,7 @@ obj-$(CONFIG_NET) += Space.o loopback.o
obj-$(CONFIG_NETCONSOLE) += netconsole.o
obj-$(CONFIG_PHYLIB) += phy/
obj-$(CONFIG_RIONET) += rionet.o
+obj-$(CONFIG_NET_TEAM) += team/
obj-$(CONFIG_TUN) += tun.o
obj-$(CONFIG_VETH) += veth.o
obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_DEV_APPLETALK) += appletalk/
obj-$(CONFIG_CAIF) += caif/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_ETRAX_ETHERNET) += cris/
+obj-$(CONFIG_NET_DSA) += dsa/
obj-$(CONFIG_ETHERNET) += ethernet/
obj-$(CONFIG_FDDI) += fddi/
obj-$(CONFIG_HIPPI) += hippi/
@@ -66,3 +68,5 @@ obj-$(CONFIG_USB_USBNET) += usb/
obj-$(CONFIG_USB_ZD1201) += usb/
obj-$(CONFIG_USB_IPHETH) += usb/
obj-$(CONFIG_USB_CDC_PHONET) += usb/
+
+obj-$(CONFIG_HYPERV_NET) += hyperv/
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 106b88a..f820b26 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -99,16 +99,26 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
/*********************** tlb specific functions ***************************/
-static inline void _lock_tx_hashtbl(struct bonding *bond)
+static inline void _lock_tx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
-static inline void _unlock_tx_hashtbl(struct bonding *bond)
+static inline void _unlock_tx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
}
+static inline void _lock_tx_hashtbl(struct bonding *bond)
+{
+ spin_lock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
+}
+
+static inline void _unlock_tx_hashtbl(struct bonding *bond)
+{
+ spin_unlock(&(BOND_ALB_INFO(bond).tx_hashtbl_lock));
+}
+
/* Caller must hold tx_hashtbl lock */
static inline void tlb_init_table_entry(struct tlb_client_info *entry, int save_load)
{
@@ -129,14 +139,13 @@ static inline void tlb_init_slave(struct slave *slave)
SLAVE_TLB_INFO(slave).head = TLB_NULL_INDEX;
}
-/* Caller must hold bond lock for read */
-static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_load)
+/* Caller must hold bond lock for read, BH disabled */
+static void __tlb_clear_slave(struct bonding *bond, struct slave *slave,
+ int save_load)
{
struct tlb_client_info *tx_hash_table;
u32 index;
- _lock_tx_hashtbl(bond);
-
/* clear slave from tx_hashtbl */
tx_hash_table = BOND_ALB_INFO(bond).tx_hashtbl;
@@ -151,8 +160,15 @@ static void tlb_clear_slave(struct bonding *bond, struct slave *slave, int save_
}
tlb_init_slave(slave);
+}
- _unlock_tx_hashtbl(bond);
+/* Caller must hold bond lock for read */
+static void tlb_clear_slave(struct bonding *bond, struct slave *slave,
+ int save_load)
+{
+ _lock_tx_hashtbl_bh(bond);
+ __tlb_clear_slave(bond, slave, save_load);
+ _unlock_tx_hashtbl_bh(bond);
}
/* Must be called before starting the monitor timer */
@@ -169,7 +185,7 @@ static int tlb_initialize(struct bonding *bond)
bond->dev->name);
return -1;
}
- _lock_tx_hashtbl(bond);
+ _lock_tx_hashtbl_bh(bond);
bond_info->tx_hashtbl = new_hashtbl;
@@ -177,7 +193,7 @@ static int tlb_initialize(struct bonding *bond)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
}
- _unlock_tx_hashtbl(bond);
+ _unlock_tx_hashtbl_bh(bond);
return 0;
}
@@ -187,12 +203,12 @@ static void tlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- _lock_tx_hashtbl(bond);
+ _lock_tx_hashtbl_bh(bond);
kfree(bond_info->tx_hashtbl);
bond_info->tx_hashtbl = NULL;
- _unlock_tx_hashtbl(bond);
+ _unlock_tx_hashtbl_bh(bond);
}
static long long compute_gap(struct slave *slave)
@@ -226,15 +242,13 @@ static struct slave *tlb_get_least_loaded_slave(struct bonding *bond)
return least_loaded;
}
-/* Caller must hold bond lock for read */
-static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u32 skb_len)
+static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
+ u32 skb_len)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
struct tlb_client_info *hash_table;
struct slave *assigned_slave;
- _lock_tx_hashtbl(bond);
-
hash_table = bond_info->tx_hashtbl;
assigned_slave = hash_table[hash_index].tx_slave;
if (!assigned_slave) {
@@ -263,22 +277,46 @@ static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index, u3
hash_table[hash_index].tx_bytes += skb_len;
}
- _unlock_tx_hashtbl(bond);
-
return assigned_slave;
}
+/* Caller must hold bond lock for read */
+static struct slave *tlb_choose_channel(struct bonding *bond, u32 hash_index,
+ u32 skb_len)
+{
+ struct slave *tx_slave;
+ /*
+ * We don't need to disable softirq here, becase
+ * tlb_choose_channel() is only called by bond_alb_xmit()
+ * which already has softirq disabled.
+ */
+ _lock_tx_hashtbl(bond);
+ tx_slave = __tlb_choose_channel(bond, hash_index, skb_len);
+ _unlock_tx_hashtbl(bond);
+ return tx_slave;
+}
+
/*********************** rlb specific functions ***************************/
-static inline void _lock_rx_hashtbl(struct bonding *bond)
+static inline void _lock_rx_hashtbl_bh(struct bonding *bond)
{
spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
-static inline void _unlock_rx_hashtbl(struct bonding *bond)
+static inline void _unlock_rx_hashtbl_bh(struct bonding *bond)
{
spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
}
+static inline void _lock_rx_hashtbl(struct bonding *bond)
+{
+ spin_lock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+}
+
+static inline void _unlock_rx_hashtbl(struct bonding *bond)
+{
+ spin_unlock(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
+}
+
/* when an ARP REPLY is received from a client update its info
* in the rx_hashtbl
*/
@@ -288,7 +326,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
struct rlb_client_info *client_info;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
@@ -303,7 +341,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
bond_info->rx_ntt = 1;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
static void rlb_arp_recv(struct sk_buff *skb, struct bonding *bond,
@@ -401,7 +439,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
u32 index, next_index;
/* clear slave from rx_hashtbl */
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
rx_hash_table = bond_info->rx_hashtbl;
index = bond_info->rx_hashtbl_head;
@@ -432,7 +470,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
}
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
write_lock_bh(&bond->curr_slave_lock);
@@ -489,7 +527,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
struct rlb_client_info *client_info;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
@@ -507,7 +545,7 @@ static void rlb_update_rx_clients(struct bonding *bond)
*/
bond_info->rlb_update_delay_counter = RLB_UPDATE_DELAY;
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/* The slave was assigned a new mac address - update the clients */
@@ -518,7 +556,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
int ntt = 0;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
hash_index = bond_info->rx_hashtbl_head;
for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
@@ -538,7 +576,7 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/* mark all clients using src_ip to be updated */
@@ -709,7 +747,7 @@ static void rlb_rebalance(struct bonding *bond)
int ntt;
u32 hash_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
ntt = 0;
hash_index = bond_info->rx_hashtbl_head;
@@ -727,7 +765,7 @@ static void rlb_rebalance(struct bonding *bond)
if (ntt) {
bond_info->rx_ntt = 1;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/* Caller must hold rx_hashtbl lock */
@@ -751,7 +789,7 @@ static int rlb_initialize(struct bonding *bond)
bond->dev->name);
return -1;
}
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
bond_info->rx_hashtbl = new_hashtbl;
@@ -761,7 +799,7 @@ static int rlb_initialize(struct bonding *bond)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
/* register to receive ARPs */
bond->recv_probe = rlb_arp_recv;
@@ -773,13 +811,13 @@ static void rlb_deinitialize(struct bonding *bond)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
kfree(bond_info->rx_hashtbl);
bond_info->rx_hashtbl = NULL;
bond_info->rx_hashtbl_head = RLB_NULL_INDEX;
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
@@ -787,7 +825,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
u32 curr_index;
- _lock_rx_hashtbl(bond);
+ _lock_rx_hashtbl_bh(bond);
curr_index = bond_info->rx_hashtbl_head;
while (curr_index != RLB_NULL_INDEX) {
@@ -812,7 +850,7 @@ static void rlb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
curr_index = next_index;
}
- _unlock_rx_hashtbl(bond);
+ _unlock_rx_hashtbl_bh(bond);
}
/*********************** tlb/rlb shared functions *********************/
@@ -871,16 +909,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
}
}
-/* hw is a boolean parameter that determines whether we should try and
- * set the hw address of the device as well as the hw address of the
- * net_device
- */
-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
+static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
{
struct net_device *dev = slave->dev;
struct sockaddr s_addr;
- if (!hw) {
+ if (slave->bond->params.mode == BOND_MODE_TLB) {
memcpy(dev->dev_addr, addr, dev->addr_len);
return 0;
}
@@ -910,8 +944,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct
u8 tmp_mac_addr[ETH_ALEN];
memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
- alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
- alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
+ alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
+ alb_set_slave_mac_addr(slave2, tmp_mac_addr);
}
@@ -1058,8 +1092,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
/* Try setting slave mac to bond address and fall-through
to code handling that situation below... */
- alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
- bond->alb_info.rlb_enabled);
+ alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
}
/* The slave's address is equal to the address of the bond.
@@ -1095,8 +1128,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
}
if (free_mac_slave) {
- alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
- bond->alb_info.rlb_enabled);
+ alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
bond->dev->name, slave->dev->name,
@@ -1320,7 +1352,9 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
res = bond_dev_queue_xmit(bond, skb, tx_slave->dev);
} else {
if (tx_slave) {
- tlb_clear_slave(bond, tx_slave, 0);
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
}
}
@@ -1451,8 +1485,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
{
int res;
- res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
- bond->alb_info.rlb_enabled);
+ res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
if (res) {
return res;
}
@@ -1603,8 +1636,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
alb_swap_mac_addr(bond, swap_slave, new_slave);
} else {
/* set the new_slave to the bond mac address */
- alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
- bond->alb_info.rlb_enabled);
+ alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
}
if (swap_slave) {
@@ -1664,8 +1696,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
} else {
- alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
- bond->alb_info.rlb_enabled);
+ alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
read_lock(&bond->lock);
alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
deleted file mode 100644
index 027a0ee..0000000
--- a/drivers/net/bonding/bond_ipv6.c
+++ /dev/null
@@ -1,225 +0,0 @@
-/*
- * Copyright(c) 2008 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/types.h>
-#include <linux/if_vlan.h>
-#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/addrconf.h>
-#include <net/netns/generic.h>
-#include "bonding.h"
-
-/*
- * Assign bond->master_ipv6 to the next IPv6 address in the list, or
- * zero it out if there are none.
- */
-static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
-{
- struct inet6_dev *idev;
-
- if (!dev)
- return;
-
- idev = in6_dev_get(dev);
- if (!idev)
- return;
-
- read_lock_bh(&idev->lock);
- if (!list_empty(&idev->addr_list)) {
- struct inet6_ifaddr *ifa
- = list_first_entry(&idev->addr_list,
- struct inet6_ifaddr, if_list);
- ipv6_addr_copy(addr, &ifa->addr);
- } else
- ipv6_addr_set(addr, 0, 0, 0, 0);
-
- read_unlock_bh(&idev->lock);
-
- in6_dev_put(idev);
-}
-
-static void bond_na_send(struct net_device *slave_dev,
- struct in6_addr *daddr,
- int router,
- unsigned short vlan_id)
-{
- struct in6_addr mcaddr;
- struct icmp6hdr icmp6h = {
- .icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT,
- };
- struct sk_buff *skb;
-
- icmp6h.icmp6_router = router;
- icmp6h.icmp6_solicited = 0;
- icmp6h.icmp6_override = 1;
-
- addrconf_addr_solict_mult(daddr, &mcaddr);
-
- pr_debug("ipv6 na on slave %s: dest %pI6, src %pI6\n",
- slave_dev->name, &mcaddr, daddr);
-
- skb = ndisc_build_skb(slave_dev, &mcaddr, daddr, &icmp6h, daddr,
- ND_OPT_TARGET_LL_ADDR);
-
- if (!skb) {
- pr_err("NA packet allocation failed\n");
- return;
- }
-
- if (vlan_id) {
- /* The Ethernet header is not present yet, so it is
- * too early to insert a VLAN tag. Force use of an
- * out-of-line tag here and let dev_hard_start_xmit()
- * insert it if the slave hardware can't.
- */
- skb = __vlan_hwaccel_put_tag(skb, vlan_id);
- if (!skb) {
- pr_err("failed to insert VLAN tag\n");
- return;
- }
- }
-
- ndisc_send_skb(skb, slave_dev, NULL, &mcaddr, daddr, &icmp6h);
-}
-
-/*
- * Kick out an unsolicited Neighbor Advertisement for an IPv6 address on
- * the bonding master. This will help the switch learn our address
- * if in active-backup mode.
- *
- * Caller must hold curr_slave_lock for read or better
- */
-void bond_send_unsolicited_na(struct bonding *bond)
-{
- struct slave *slave = bond->curr_active_slave;
- struct vlan_entry *vlan;
- struct inet6_dev *idev;
- int is_router;
-
- pr_debug("%s: bond %s slave %s\n", bond->dev->name,
- __func__, slave ? slave->dev->name : "NULL");
-
- if (!slave || !bond->send_unsol_na ||
- test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
- return;
-
- bond->send_unsol_na--;
-
- idev = in6_dev_get(bond->dev);
- if (!idev)
- return;
-
- is_router = !!idev->cnf.forwarding;
-
- in6_dev_put(idev);
-
- if (!ipv6_addr_any(&bond->master_ipv6))
- bond_na_send(slave->dev, &bond->master_ipv6, is_router, 0);
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- if (!ipv6_addr_any(&vlan->vlan_ipv6)) {
- bond_na_send(slave->dev, &vlan->vlan_ipv6, is_router,
- vlan->vlan_id);
- }
- }
-}
-
-/*
- * bond_inet6addr_event: handle inet6addr notifier chain events.
- *
- * We keep track of device IPv6 addresses primarily to use as source
- * addresses in NS probes.
- *
- * We track one IPv6 for the main device (if it has one).
- */
-static int bond_inet6addr_event(struct notifier_block *this,
- unsigned long event,
- void *ptr)
-{
- struct inet6_ifaddr *ifa = ptr;
- struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
- struct bonding *bond;
- struct vlan_entry *vlan;
- struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
-
- list_for_each_entry(bond, &bn->dev_list, bond_list) {
- if (bond->dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&bond->master_ipv6))
- ipv6_addr_copy(&bond->master_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&bond->master_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(bond->dev,
- &bond->master_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
-
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
- rcu_read_lock();
- vlan_dev = __vlan_find_dev_deep(bond->dev,
- vlan->vlan_id);
- rcu_read_unlock();
- if (vlan_dev == event_dev) {
- switch (event) {
- case NETDEV_UP:
- if (ipv6_addr_any(&vlan->vlan_ipv6))
- ipv6_addr_copy(&vlan->vlan_ipv6,
- &ifa->addr);
- return NOTIFY_OK;
- case NETDEV_DOWN:
- if (ipv6_addr_equal(&vlan->vlan_ipv6,
- &ifa->addr))
- bond_glean_dev_ipv6(vlan_dev,
- &vlan->vlan_ipv6);
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
- }
- }
- }
- return NOTIFY_DONE;
-}
-
-static struct notifier_block bond_inet6addr_notifier = {
- .notifier_call = bond_inet6addr_event,
-};
-
-void bond_register_ipv6_notifier(void)
-{
- register_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
-void bond_unregister_ipv6_notifier(void)
-{
- unregister_inet6addr_notifier(&bond_inet6addr_notifier);
-}
-
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 7f87568..435984a 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -428,27 +428,34 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
* @bond_dev: bonding net device that got called
* @vid: vlan id being added
*/
-static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct slave *slave;
+ struct slave *slave, *stop_at;
int i, res;
bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_add_vid) {
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vid);
- }
+ res = vlan_vid_add(slave->dev, vid);
+ if (res)
+ goto unwind;
}
res = bond_add_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to add vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
+
+unwind:
+ /* unwind from head to the slave that failed */
+ stop_at = slave;
+ bond_for_each_slave_from_to(bond, slave, i, bond->first_slave, stop_at)
+ vlan_vid_del(slave->dev, vid);
+
+ return res;
}
/**
@@ -456,56 +463,48 @@ static void bond_vlan_rx_add_vid(struct net_device *bond_dev, uint16_t vid)
* @bond_dev: bonding net device that got called
* @vid: vlan id being removed
*/
-static void bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
+static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, uint16_t vid)
{
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave;
int i, res;
- bond_for_each_slave(bond, slave, i) {
- struct net_device *slave_dev = slave->dev;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if ((slave_dev->features & NETIF_F_HW_VLAN_FILTER) &&
- slave_ops->ndo_vlan_rx_kill_vid) {
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vid);
- }
- }
+ bond_for_each_slave(bond, slave, i)
+ vlan_vid_del(slave->dev, vid);
res = bond_del_vlan(bond, vid);
if (res) {
pr_err("%s: Error: Failed to remove vlan id %d\n",
bond_dev->name, vid);
+ return res;
}
+
+ return 0;
}
static void bond_add_vlans_on_slave(struct bonding *bond, struct net_device *slave_dev)
{
struct vlan_entry *vlan;
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
-
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_add_vid))
- return;
+ int res;
- list_for_each_entry(vlan, &bond->vlan_list, vlan_list)
- slave_ops->ndo_vlan_rx_add_vid(slave_dev, vlan->vlan_id);
+ list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
+ res = vlan_vid_add(slave_dev, vlan->vlan_id);
+ if (res)
+ pr_warning("%s: Failed to add vlan id %d to device %s\n",
+ bond->dev->name, vlan->vlan_id,
+ slave_dev->name);
+ }
}
static void bond_del_vlans_from_slave(struct bonding *bond,
struct net_device *slave_dev)
{
- const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
struct vlan_entry *vlan;
- if (!(slave_dev->features & NETIF_F_HW_VLAN_FILTER) ||
- !(slave_ops->ndo_vlan_rx_kill_vid))
- return;
-
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
if (!vlan->vlan_id)
continue;
- slave_ops->ndo_vlan_rx_kill_vid(slave_dev, vlan->vlan_id);
+ vlan_vid_del(slave_dev, vlan->vlan_id);
}
}
@@ -1325,11 +1324,12 @@ static int bond_sethwaddr(struct net_device *bond_dev,
return 0;
}
-static u32 bond_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t bond_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct slave *slave;
struct bonding *bond = netdev_priv(dev);
- u32 mask;
+ netdev_features_t mask;
int i;
read_lock(&bond->lock);
@@ -1363,7 +1363,7 @@ static void bond_compute_features(struct bonding *bond)
{
struct slave *slave;
struct net_device *bond_dev = bond->dev;
- u32 vlan_features = BOND_VLAN_FEATURES;
+ netdev_features_t vlan_features = BOND_VLAN_FEATURES;
unsigned short max_hard_header_len = ETH_HLEN;
int i;
@@ -1822,7 +1822,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
"but new slave device does not support netpoll.\n",
bond_dev->name);
res = -EBUSY;
- goto err_close;
+ goto err_detach;
}
}
#endif
@@ -1831,7 +1831,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
res = bond_create_slave_symlinks(bond_dev, slave_dev);
if (res)
- goto err_close;
+ goto err_detach;
res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
new_slave);
@@ -1852,6 +1852,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
err_dest_symlinks:
bond_destroy_slave_symlinks(bond_dev, slave_dev);
+err_detach:
+ write_lock_bh(&bond->lock);
+ bond_detach_slave(bond, new_slave);
+ write_unlock_bh(&bond->lock);
+
err_close:
dev_close(slave_dev);
@@ -1897,7 +1902,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *oldcurrent;
struct sockaddr addr;
- u32 old_features = bond_dev->features;
+ netdev_features_t old_features = bond_dev->features;
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -4339,7 +4344,7 @@ static void bond_setup(struct net_device *bond_dev)
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
- bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
+ bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
bond_dev->features |= bond_dev->hw_features;
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 4ef7e2f..aef42f0 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/sched.h>
-#include <linux/sysdev.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/string.h>
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index 0733525..c998e1a 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -117,15 +117,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
dev_dbg(&cfhsi->ndev->dev, "%s.\n",
__func__);
-
- ret = cfhsi->dev->cfhsi_wake_up(cfhsi->dev);
- if (ret) {
- dev_warn(&cfhsi->ndev->dev,
- "%s: can't wake up HSI interface: %d.\n",
- __func__, ret);
- return ret;
- }
-
do {
ret = cfhsi->dev->cfhsi_fifo_occupancy(cfhsi->dev,
&fifo_occupancy);
@@ -168,8 +159,6 @@ static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
}
} while (1);
- cfhsi->dev->cfhsi_wake_down(cfhsi->dev);
-
return ret;
}
@@ -944,7 +933,7 @@ static int cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
/* Create HSI frame. */
len = cfhsi_tx_frm(desc, cfhsi);
- BUG_ON(!len);
+ WARN_ON(!len);
/* Set up new transfer. */
res = cfhsi->dev->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->dev);
@@ -989,7 +978,7 @@ static void cfhsi_setup(struct net_device *dev)
dev->netdev_ops = &cfhsi_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
+ dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
skb_queue_head_init(&cfhsi->qhead);
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 23406e6..8a3054b 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -38,15 +38,15 @@ MODULE_ALIAS_LDISC(N_CAIF);
/*This list is protected by the rtnl lock. */
static LIST_HEAD(ser_list);
-static int ser_loop;
+static bool ser_loop;
module_param(ser_loop, bool, S_IRUGO);
MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
-static int ser_use_stx = 1;
+static bool ser_use_stx = true;
module_param(ser_use_stx, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
-static int ser_use_fcs = 1;
+static bool ser_use_fcs = true;
module_param(ser_use_fcs, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
@@ -261,7 +261,7 @@ static int handle_tx(struct ser_device *ser)
skb_pull(skb, tty_wr);
if (skb->len == 0) {
struct sk_buff *tmp = skb_dequeue(&ser->head);
- BUG_ON(tmp != skb);
+ WARN_ON(tmp != skb);
if (in_interrupt())
dev_kfree_skb_irq(skb);
else
@@ -305,7 +305,7 @@ static void ldisc_tx_wakeup(struct tty_struct *tty)
ser = tty->disc_data;
BUG_ON(ser == NULL);
- BUG_ON(ser->tty != tty);
+ WARN_ON(ser->tty != tty);
handle_tx(ser);
}
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index d4b26fb..5b20413 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -238,11 +238,11 @@ int caif_shmdrv_rx_cb(u32 mbx_msg, void *priv)
if ((avail_emptybuff > HIGH_WATERMARK) &&
(!pshm_drv->tx_empty_available)) {
pshm_drv->tx_empty_available = 1;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_ON);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Schedule the work queue. if required */
if (!work_pending(&pshm_drv->shm_tx_work))
@@ -285,6 +285,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
list_entry(pshm_drv->rx_full_list.next, struct buf_list,
list);
list_del_init(&pbuf->list);
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
/* Retrieve pointer to start of the packet descriptor area. */
pck_desc = (struct shm_pck_desc *) pbuf->desc_vptr;
@@ -336,7 +337,11 @@ static void shm_rx_work_func(struct work_struct *rx_work)
/* Get a suitable CAIF packet and copy in data. */
skb = netdev_alloc_skb(pshm_drv->pshm_dev->pshm_netdev,
frm_pck_len + 1);
- BUG_ON(skb == NULL);
+
+ if (skb == NULL) {
+ pr_info("OOM: Try next frame in descriptor\n");
+ break;
+ }
p = skb_put(skb, frm_pck_len);
memcpy(p, pbuf->desc_vptr + frm_pck_ofs, frm_pck_len);
@@ -360,6 +365,7 @@ static void shm_rx_work_func(struct work_struct *rx_work)
pck_desc++;
}
+ spin_lock_irqsave(&pshm_drv->lock, flags);
list_add_tail(&pbuf->list, &pshm_drv->rx_pend_list);
spin_unlock_irqrestore(&pshm_drv->lock, flags);
@@ -412,7 +418,6 @@ static void shm_tx_work_func(struct work_struct *tx_work)
if (skb == NULL)
goto send_msg;
-
/* Check the available no. of buffers in the empty list */
list_for_each(pos, &pshm_drv->tx_empty_list)
avail_emptybuff++;
@@ -421,9 +426,11 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->tx_empty_available) {
/* Update blocking condition. */
pshm_drv->tx_empty_available = 0;
+ spin_unlock_irqrestore(&pshm_drv->lock, flags);
pshm_drv->cfdev.flowctrl
(pshm_drv->pshm_dev->pshm_netdev,
CAIF_FLOW_OFF);
+ spin_lock_irqsave(&pshm_drv->lock, flags);
}
/*
* We simply return back to the caller if we do not have space
@@ -469,6 +476,8 @@ static void shm_tx_work_func(struct work_struct *tx_work)
}
skb = skb_dequeue(&pshm_drv->sk_qhead);
+ if (skb == NULL)
+ break;
/* Copy in CAIF frame. */
skb_copy_bits(skb, 0, pbuf->desc_vptr +
pbuf->frm_ofs + SHM_HDR_LEN +
@@ -477,7 +486,7 @@ static void shm_tx_work_func(struct work_struct *tx_work)
pshm_drv->pshm_dev->pshm_netdev->stats.tx_packets++;
pshm_drv->pshm_dev->pshm_netdev->stats.tx_bytes +=
frmlen;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
/* Fill in the shared memory packet descriptor area. */
pck_desc = (struct shm_pck_desc *) (pbuf->desc_vptr);
@@ -512,16 +521,11 @@ send_msg:
static int shm_netdev_tx(struct sk_buff *skb, struct net_device *shm_netdev)
{
struct shmdrv_layer *pshm_drv;
- unsigned long flags = 0;
pshm_drv = netdev_priv(shm_netdev);
- spin_lock_irqsave(&pshm_drv->lock, flags);
-
skb_queue_tail(&pshm_drv->sk_qhead, skb);
- spin_unlock_irqrestore(&pshm_drv->lock, flags);
-
/* Schedule Tx work queue. for deferred processing of skbs*/
if (!work_pending(&pshm_drv->shm_tx_work))
queue_work(pshm_drv->pshm_tx_workqueue, &pshm_drv->shm_tx_work);
@@ -606,6 +610,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
pshm_drv->shm_rx_addr = pshm_dev->shm_base_addr +
(NR_TX_BUF * TX_BUF_SZ);
+ spin_lock_init(&pshm_drv->lock);
INIT_LIST_HEAD(&pshm_drv->tx_empty_list);
INIT_LIST_HEAD(&pshm_drv->tx_pend_list);
INIT_LIST_HEAD(&pshm_drv->tx_full_list);
@@ -640,7 +645,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
tx_buf->frm_ofs = SHM_CAIF_FRM_OFS;
if (pshm_dev->shm_loopback)
- tx_buf->desc_vptr = (char *)tx_buf->phy_addr;
+ tx_buf->desc_vptr = (unsigned char *)tx_buf->phy_addr;
else
tx_buf->desc_vptr =
ioremap(tx_buf->phy_addr, TX_BUF_SZ);
@@ -664,7 +669,7 @@ int caif_shmcore_probe(struct shmdev_layer *pshm_dev)
rx_buf->len = RX_BUF_SZ;
if (pshm_dev->shm_loopback)
- rx_buf->desc_vptr = (char *)rx_buf->phy_addr;
+ rx_buf->desc_vptr = (unsigned char *)rx_buf->phy_addr;
else
rx_buf->desc_vptr =
ioremap(rx_buf->phy_addr, RX_BUF_SZ);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 05e791f..96391c3 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -35,7 +35,7 @@ MODULE_DESCRIPTION("CAIF SPI driver");
/* Returns the number of padding bytes for alignment. */
#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
-static int spi_loop;
+static bool spi_loop;
module_param(spi_loop, bool, S_IRUGO);
MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -226,7 +226,7 @@ static ssize_t dbgfs_frame(struct file *file, char __user *user_buf,
"Tx data (Len: %d):\n", cfspi->tx_cpck_len);
len += print_frame((buf + len), (DEBUGFS_BUF_SIZE - len),
- cfspi->xfer.va_tx,
+ cfspi->xfer.va_tx[0],
(cfspi->tx_cpck_len + SPI_CMD_SZ), 100);
len += snprintf((buf + len), (DEBUGFS_BUF_SIZE - len),
@@ -599,48 +599,11 @@ static int cfspi_close(struct net_device *dev)
netif_stop_queue(dev);
return 0;
}
-static const struct net_device_ops cfspi_ops = {
- .ndo_open = cfspi_open,
- .ndo_stop = cfspi_close,
- .ndo_start_xmit = cfspi_xmit
-};
-static void cfspi_setup(struct net_device *dev)
+static int cfspi_init(struct net_device *dev)
{
+ int res = 0;
struct cfspi *cfspi = netdev_priv(dev);
- dev->features = 0;
- dev->netdev_ops = &cfspi_ops;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_NOARP | IFF_POINTOPOINT;
- dev->tx_queue_len = 0;
- dev->mtu = SPI_MAX_PAYLOAD_SIZE;
- dev->destructor = free_netdev;
- skb_queue_head_init(&cfspi->qhead);
- skb_queue_head_init(&cfspi->chead);
- cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfspi->cfdev.use_frag = false;
- cfspi->cfdev.use_stx = false;
- cfspi->cfdev.use_fcs = false;
- cfspi->ndev = dev;
-}
-
-int cfspi_spi_probe(struct platform_device *pdev)
-{
- struct cfspi *cfspi = NULL;
- struct net_device *ndev;
- struct cfspi_dev *dev;
- int res;
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
-
- ndev = alloc_netdev(sizeof(struct cfspi),
- "cfspi%d", cfspi_setup);
- if (!ndev)
- return -ENOMEM;
-
- cfspi = netdev_priv(ndev);
- netif_stop_queue(ndev);
- cfspi->ndev = ndev;
- cfspi->pdev = pdev;
/* Set flow info. */
cfspi->flow_off_sent = 0;
@@ -656,16 +619,11 @@ int cfspi_spi_probe(struct platform_device *pdev)
cfspi->slave_talked = false;
}
- /* Assign the SPI device. */
- cfspi->dev = dev;
- /* Assign the device ifc to this SPI interface. */
- dev->ifc = &cfspi->ifc;
-
/* Allocate DMA buffers. */
- cfspi->xfer.va_tx = dma_alloc(&cfspi->xfer.pa_tx);
- if (!cfspi->xfer.va_tx) {
+ cfspi->xfer.va_tx[0] = dma_alloc(&cfspi->xfer.pa_tx[0]);
+ if (!cfspi->xfer.va_tx[0]) {
res = -ENODEV;
- goto err_dma_alloc_tx;
+ goto err_dma_alloc_tx_0;
}
cfspi->xfer.va_rx = dma_alloc(&cfspi->xfer.pa_rx);
@@ -714,6 +672,87 @@ int cfspi_spi_probe(struct platform_device *pdev)
/* Schedule the work queue. */
queue_work(cfspi->wq, &cfspi->work);
+ return 0;
+
+ err_create_wq:
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ err_dma_alloc_rx:
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ err_dma_alloc_tx_0:
+ return res;
+}
+
+static void cfspi_uninit(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+
+ /* Remove from list. */
+ spin_lock(&cfspi_list_lock);
+ list_del(&cfspi->list);
+ spin_unlock(&cfspi_list_lock);
+
+ cfspi->ndev = NULL;
+ /* Free DMA buffers. */
+ dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
+ dma_free(cfspi->xfer.va_tx[0], cfspi->xfer.pa_tx[0]);
+ set_bit(SPI_TERMINATE, &cfspi->state);
+ wake_up_interruptible(&cfspi->wait);
+ destroy_workqueue(cfspi->wq);
+ /* Destroy debugfs directory and files. */
+ dev_debugfs_rem(cfspi);
+ return;
+}
+
+static const struct net_device_ops cfspi_ops = {
+ .ndo_open = cfspi_open,
+ .ndo_stop = cfspi_close,
+ .ndo_init = cfspi_init,
+ .ndo_uninit = cfspi_uninit,
+ .ndo_start_xmit = cfspi_xmit
+};
+
+static void cfspi_setup(struct net_device *dev)
+{
+ struct cfspi *cfspi = netdev_priv(dev);
+ dev->features = 0;
+ dev->netdev_ops = &cfspi_ops;
+ dev->type = ARPHRD_CAIF;
+ dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+ dev->tx_queue_len = 0;
+ dev->mtu = SPI_MAX_PAYLOAD_SIZE;
+ dev->destructor = free_netdev;
+ skb_queue_head_init(&cfspi->qhead);
+ skb_queue_head_init(&cfspi->chead);
+ cfspi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
+ cfspi->cfdev.use_frag = false;
+ cfspi->cfdev.use_stx = false;
+ cfspi->cfdev.use_fcs = false;
+ cfspi->ndev = dev;
+}
+
+int cfspi_spi_probe(struct platform_device *pdev)
+{
+ struct cfspi *cfspi = NULL;
+ struct net_device *ndev;
+ struct cfspi_dev *dev;
+ int res;
+ dev = (struct cfspi_dev *)pdev->dev.platform_data;
+
+ ndev = alloc_netdev(sizeof(struct cfspi),
+ "cfspi%d", cfspi_setup);
+ if (!dev)
+ return -ENODEV;
+
+ cfspi = netdev_priv(ndev);
+ netif_stop_queue(ndev);
+ cfspi->ndev = ndev;
+ cfspi->pdev = pdev;
+
+ /* Assign the SPI device. */
+ cfspi->dev = dev;
+ /* Assign the device ifc to this SPI interface. */
+ dev->ifc = &cfspi->ifc;
+
/* Register network device. */
res = register_netdev(ndev);
if (res) {
@@ -723,15 +762,6 @@ int cfspi_spi_probe(struct platform_device *pdev)
return res;
err_net_reg:
- dev_debugfs_rem(cfspi);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- err_create_wq:
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- err_dma_alloc_rx:
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- err_dma_alloc_tx:
free_netdev(ndev);
return res;
@@ -739,34 +769,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
int cfspi_spi_remove(struct platform_device *pdev)
{
- struct list_head *list_node;
- struct list_head *n;
- struct cfspi *cfspi = NULL;
- struct cfspi_dev *dev;
-
- dev = (struct cfspi_dev *)pdev->dev.platform_data;
- spin_lock(&cfspi_list_lock);
- list_for_each_safe(list_node, n, &cfspi_list) {
- cfspi = list_entry(list_node, struct cfspi, list);
- /* Find the corresponding device. */
- if (cfspi->dev == dev) {
- /* Remove from list. */
- list_del(list_node);
- /* Free DMA buffers. */
- dma_free(cfspi->xfer.va_rx, cfspi->xfer.pa_rx);
- dma_free(cfspi->xfer.va_tx, cfspi->xfer.pa_tx);
- set_bit(SPI_TERMINATE, &cfspi->state);
- wake_up_interruptible(&cfspi->wait);
- destroy_workqueue(cfspi->wq);
- /* Destroy debugfs directory and files. */
- dev_debugfs_rem(cfspi);
- unregister_netdev(cfspi->ndev);
- spin_unlock(&cfspi_list_lock);
- return 0;
- }
- }
- spin_unlock(&cfspi_list_lock);
- return -ENODEV;
+ /* Everything is done in cfspi_uninit(). */
+ return 0;
}
static void __exit cfspi_exit_module(void)
@@ -777,7 +781,7 @@ static void __exit cfspi_exit_module(void)
list_for_each_safe(list_node, n, &cfspi_list) {
cfspi = list_entry(list_node, struct cfspi, list);
- platform_device_unregister(cfspi->pdev);
+ unregister_netdev(cfspi->ndev);
}
/* Destroy sysfs files. */
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index f6c98fb..ab45758 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -116,6 +116,8 @@ source "drivers/net/can/sja1000/Kconfig"
source "drivers/net/can/c_can/Kconfig"
+source "drivers/net/can/cc770/Kconfig"
+
source "drivers/net/can/usb/Kconfig"
source "drivers/net/can/softing/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 24ebfe8..938be37 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -14,6 +14,7 @@ obj-y += softing/
obj-$(CONFIG_CAN_SJA1000) += sja1000/
obj-$(CONFIG_CAN_MSCAN) += mscan/
obj-$(CONFIG_CAN_C_CAN) += c_can/
+obj-$(CONFIG_CAN_CC770) += cc770/
obj-$(CONFIG_CAN_AT91) += at91_can.o
obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 044ea06..6ea905c 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -1383,18 +1383,7 @@ static struct platform_driver at91_can_driver = {
.id_table = at91_can_id_table,
};
-static int __init at91_can_module_init(void)
-{
- return platform_driver_register(&at91_can_driver);
-}
-
-static void __exit at91_can_module_exit(void)
-{
- platform_driver_unregister(&at91_can_driver);
-}
-
-module_init(at91_can_module_init);
-module_exit(at91_can_module_exit);
+module_platform_driver(at91_can_driver);
MODULE_AUTHOR("Marc Kleine-Budde <mkl@pengutronix.de>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index a1c5abc..349e0fa 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -676,17 +676,7 @@ static struct platform_driver bfin_can_driver = {
},
};
-static int __init bfin_can_init(void)
-{
- return platform_driver_register(&bfin_can_driver);
-}
-module_init(bfin_can_init);
-
-static void __exit bfin_can_exit(void)
-{
- platform_driver_unregister(&bfin_can_driver);
-}
-module_exit(bfin_can_exit);
+module_platform_driver(bfin_can_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index 0b5c6f8..5e1a5ff 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -197,17 +197,7 @@ static struct platform_driver c_can_plat_driver = {
.remove = __devexit_p(c_can_plat_remove),
};
-static int __init c_can_plat_init(void)
-{
- return platform_driver_register(&c_can_plat_driver);
-}
-module_init(c_can_plat_init);
-
-static void __exit c_can_plat_exit(void)
-{
- platform_driver_unregister(&c_can_plat_driver);
-}
-module_exit(c_can_plat_exit);
+module_platform_driver(c_can_plat_driver);
MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/cc770/Kconfig b/drivers/net/can/cc770/Kconfig
new file mode 100644
index 0000000..22c07a8
--- /dev/null
+++ b/drivers/net/can/cc770/Kconfig
@@ -0,0 +1,21 @@
+menuconfig CAN_CC770
+ tristate "Bosch CC770 and Intel AN82527 devices"
+ depends on CAN_DEV && HAS_IOMEM
+
+if CAN_CC770
+
+config CAN_CC770_ISA
+ tristate "ISA Bus based legacy CC770 driver"
+ ---help---
+ This driver adds legacy support for CC770 and AN82527 chips
+ connected to the ISA bus using I/O port, memory mapped or
+ indirect access.
+
+config CAN_CC770_PLATFORM
+ tristate "Generic Platform Bus based CC770 driver"
+ ---help---
+ This driver adds support for the CC770 and AN82527 chips
+ connected to the "platform bus" (Linux abstraction for directly
+ to the processor attached devices).
+
+endif
diff --git a/drivers/net/can/cc770/Makefile b/drivers/net/can/cc770/Makefile
new file mode 100644
index 0000000..9fb8321
--- /dev/null
+++ b/drivers/net/can/cc770/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the Bosch CC770 CAN controller drivers.
+#
+
+obj-$(CONFIG_CAN_CC770) += cc770.o
+obj-$(CONFIG_CAN_CC770_ISA) += cc770_isa.o
+obj-$(CONFIG_CAN_CC770_PLATFORM) += cc770_platform.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
new file mode 100644
index 0000000..c30f0e6
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.c
@@ -0,0 +1,883 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/error.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver");
+
+/*
+ * The CC770 is a CAN controller from Bosch, which is 100% compatible
+ * with the AN82527 from Intel, but with "bugs" being fixed and some
+ * additional functionality, mainly:
+ *
+ * 1. RX and TX error counters are readable.
+ * 2. Support of silent (listen-only) mode.
+ * 3. Message object 15 can receive all types of frames, also RTR and EFF.
+ *
+ * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf",
+ * which explains in detail the compatibility between the CC770 and the
+ * 82527. This driver use the additional functionality 3. on real CC770
+ * devices. Unfortunately, the CC770 does still not store the message
+ * identifier of received remote transmission request frames and
+ * therefore it's set to 0.
+ *
+ * The message objects 1..14 can be used for TX and RX while the message
+ * objects 15 is optimized for RX. It has a shadow register for reliable
+ * data receiption under heavy bus load. Therefore it makes sense to use
+ * this message object for the needed use case. The frame type (EFF/SFF)
+ * for the message object 15 can be defined via kernel module parameter
+ * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames,
+ * otherwise 11 bit SFF messages.
+ */
+static int msgobj15_eff;
+module_param(msgobj15_eff, int, S_IRUGO);
+MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 "
+ "(default: 11-bit standard frames)");
+
+static int i82527_compat;
+module_param(i82527_compat, int, S_IRUGO);
+MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 comptibility mode "
+ "without using additional functions");
+
+/*
+ * This driver uses the last 5 message objects 11..15. The definitions
+ * and structure below allows to configure and assign them to the real
+ * message object.
+ */
+static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = {
+ [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX,
+ [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR,
+ [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR |
+ CC770_OBJ_FLAG_EFF,
+ [CC770_OBJ_TX] = 0,
+};
+
+static struct can_bittiming_const cc770_bittiming_const = {
+ .name = KBUILD_MODNAME,
+ .tseg1_min = 1,
+ .tseg1_max = 16,
+ .tseg2_min = 1,
+ .tseg2_max = 8,
+ .sjw_max = 4,
+ .brp_min = 1,
+ .brp_max = 64,
+ .brp_inc = 1,
+};
+
+static inline int intid2obj(unsigned int intid)
+{
+ if (intid == 2)
+ return 0;
+ else
+ return MSGOBJ_LAST + 2 - intid;
+}
+
+static void enable_all_objs(const struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 msgcfg;
+ unsigned char obj_flags;
+ unsigned int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ obj_flags = priv->obj_flags[o];
+ mo = obj2msgobj(o);
+
+ if (obj_flags & CC770_OBJ_FLAG_RX) {
+ /*
+ * We don't need extra objects for RTR and EFF if
+ * the additional CC770 functions are enabled.
+ */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (o > 0)
+ continue;
+ netdev_dbg(dev, "Message object %d for "
+ "RX data, RTR, SFF and EFF\n", mo);
+ } else {
+ netdev_dbg(dev,
+ "Message object %d for RX %s %s\n",
+ mo, obj_flags & CC770_OBJ_FLAG_RTR ?
+ "RTR" : "data",
+ obj_flags & CC770_OBJ_FLAG_EFF ?
+ "EFF" : "SFF");
+ }
+
+ if (obj_flags & CC770_OBJ_FLAG_EFF)
+ msgcfg = MSGCFG_XTD;
+ else
+ msgcfg = 0;
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ msgcfg |= MSGCFG_DIR;
+
+ cc770_write_reg(priv, msgobj[mo].config, msgcfg);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+
+ if (obj_flags & CC770_OBJ_FLAG_RTR)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ else
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ } else {
+ netdev_dbg(dev, "Message object %d for "
+ "TX data, RTR, SFF and EFF\n", mo);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void disable_all_objs(const struct cc770_priv *priv)
+{
+ int o, mo;
+
+ for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) {
+ mo = obj2msgobj(o);
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) {
+ if (o > 0 && priv->control_normal_mode & CTRL_EAF)
+ continue;
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ } else {
+ /* Clear message object for send */
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES |
+ CPUUPD_RES | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES |
+ RXIE_RES | INTPND_RES);
+ }
+ }
+}
+
+static void set_reset_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration and puts chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI);
+
+ priv->can.state = CAN_STATE_STOPPED;
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Disable all used message objects */
+ disable_all_objs(priv);
+}
+
+static void set_normal_mode(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register and pre-set last error code */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ /* Enable all used message objects*/
+ enable_all_objs(dev);
+
+ /*
+ * Clear bus-off, interrupts only for errors,
+ * not for status change
+ */
+ cc770_write_reg(priv, control, priv->control_normal_mode);
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+}
+
+static void chipset_init(struct cc770_priv *priv)
+{
+ int mo, id, data;
+
+ /* Enable configuration and put chip in bus-off, disable interrupts */
+ cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI));
+
+ /* Set CLKOUT divider and slew rates */
+ cc770_write_reg(priv, clkout, priv->clkout);
+
+ /* Configure CPU interface / CLKOUT enable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /* Set bus configuration */
+ cc770_write_reg(priv, bus_config, priv->bus_config);
+
+ /* Clear interrupts */
+ cc770_read_reg(priv, interrupt);
+
+ /* Clear status register */
+ cc770_write_reg(priv, status, 0);
+
+ /* Clear and invalidate message objects */
+ for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) {
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_UNC | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ INTPND_RES | RXIE_RES |
+ TXIE_RES | MSGVAL_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ for (data = 0; data < 8; data++)
+ cc770_write_reg(priv, msgobj[mo].data[data], 0);
+ for (id = 0; id < 4; id++)
+ cc770_write_reg(priv, msgobj[mo].id[id], 0);
+ cc770_write_reg(priv, msgobj[mo].config, 0);
+ }
+
+ /* Set all global ID masks to "don't care" */
+ cc770_write_reg(priv, global_mask_std[0], 0);
+ cc770_write_reg(priv, global_mask_std[1], 0);
+ cc770_write_reg(priv, global_mask_ext[0], 0);
+ cc770_write_reg(priv, global_mask_ext[1], 0);
+ cc770_write_reg(priv, global_mask_ext[2], 0);
+ cc770_write_reg(priv, global_mask_ext[3], 0);
+
+}
+
+static int cc770_probe_chip(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* Enable configuration, put chip in bus-off, disable ints */
+ cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI);
+ /* Configure cpu interface / CLKOUT disable */
+ cc770_write_reg(priv, cpu_interface, priv->cpu_interface);
+
+ /*
+ * Check if hardware reset is still inactive or maybe there
+ * is no chip in this address space
+ */
+ if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) {
+ netdev_info(dev, "probing @0x%p failed (reset)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Write and read back test pattern (some arbitrary values) */
+ cc770_write_reg(priv, msgobj[1].data[1], 0x25);
+ cc770_write_reg(priv, msgobj[2].data[3], 0x52);
+ cc770_write_reg(priv, msgobj[10].data[6], 0xc3);
+ if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) ||
+ (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) ||
+ (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) {
+ netdev_info(dev, "probing @0x%p failed (pattern)\n",
+ priv->reg_base);
+ return -ENODEV;
+ }
+
+ /* Check if this chip is a CC770 supporting additional functions */
+ if (cc770_read_reg(priv, control) & CTRL_EAF)
+ priv->control_normal_mode |= CTRL_EAF;
+
+ return 0;
+}
+
+static void cc770_start(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ /* leave reset mode */
+ if (priv->can.state != CAN_STATE_STOPPED)
+ set_reset_mode(dev);
+
+ /* leave reset mode */
+ set_normal_mode(dev);
+}
+
+static int cc770_set_mode(struct net_device *dev, enum can_mode mode)
+{
+ switch (mode) {
+ case CAN_MODE_START:
+ cc770_start(dev);
+ netif_wake_queue(dev);
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cc770_set_bittiming(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct can_bittiming *bt = &priv->can.bittiming;
+ u8 btr0, btr1;
+
+ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6);
+ btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) |
+ (((bt->phase_seg2 - 1) & 0x7) << 4);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
+ btr1 |= 0x80;
+
+ netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1);
+
+ cc770_write_reg(priv, bit_timing_0, btr0);
+ cc770_write_reg(priv, bit_timing_1, btr1);
+
+ return 0;
+}
+
+static int cc770_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+
+ bec->txerr = cc770_read_reg(priv, tx_error_counter);
+ bec->rxerr = cc770_read_reg(priv, rx_error_counter);
+
+ return 0;
+}
+
+static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf = (struct can_frame *)skb->data;
+ unsigned int mo = obj2msgobj(CC770_OBJ_TX);
+ u8 dlc, rtr;
+ u32 id;
+ int i;
+
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
+ if ((cc770_read_reg(priv,
+ msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) {
+ netdev_err(dev, "TX register is still occupied!\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ netif_stop_queue(dev);
+
+ dlc = cf->can_dlc;
+ id = cf->can_id;
+ if (cf->can_id & CAN_RTR_FLAG)
+ rtr = 0;
+ else
+ rtr = MSGCFG_DIR;
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_SET | RXIE_RES | INTPND_RES);
+ if (id & CAN_EFF_FLAG) {
+ id &= CAN_EFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config,
+ (dlc << 4) | rtr | MSGCFG_XTD);
+ cc770_write_reg(priv, msgobj[mo].id[3], id << 3);
+ cc770_write_reg(priv, msgobj[mo].id[2], id >> 5);
+ cc770_write_reg(priv, msgobj[mo].id[1], id >> 13);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 21);
+ } else {
+ id &= CAN_SFF_MASK;
+ cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr);
+ cc770_write_reg(priv, msgobj[mo].id[0], id >> 3);
+ cc770_write_reg(priv, msgobj[mo].id[1], id << 5);
+ }
+
+ for (i = 0; i < dlc; i++)
+ cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]);
+
+ /* Store echo skb before starting the transfer */
+ can_put_echo_skb(skb, dev, 0);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ RMTPND_RES | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC);
+
+ stats->tx_bytes += dlc;
+
+
+ /*
+ * HM: We had some cases of repeated IRQs so make sure the
+ * INT is acknowledged I know it's already further up, but
+ * doing again fixed the issue
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ return NETDEV_TX_OK;
+}
+
+static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 config;
+ u32 id;
+ int i;
+
+ skb = alloc_can_skb(dev, &cf);
+ if (!skb)
+ return;
+
+ config = cc770_read_reg(priv, msgobj[mo].config);
+
+ if (ctrl1 & RMTPND_SET) {
+ /*
+ * Unfortunately, the chip does not store the real message
+ * identifier of the received remote transmission request
+ * frame. Therefore we set it to 0.
+ */
+ cf->can_id = CAN_RTR_FLAG;
+ if (config & MSGCFG_XTD)
+ cf->can_id |= CAN_EFF_FLAG;
+ cf->can_dlc = 0;
+ } else {
+ if (config & MSGCFG_XTD) {
+ id = cc770_read_reg(priv, msgobj[mo].id[3]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8;
+ id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16;
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24;
+ id >>= 3;
+ id |= CAN_EFF_FLAG;
+ } else {
+ id = cc770_read_reg(priv, msgobj[mo].id[1]);
+ id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8;
+ id >>= 5;
+ }
+
+ cf->can_id = id;
+ cf->can_dlc = get_can_dlc((config & 0xf0) >> 4);
+ for (i = 0; i < cf->can_dlc; i++)
+ cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
+ }
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+}
+
+static int cc770_err(struct net_device *dev, u8 status)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ struct can_frame *cf;
+ struct sk_buff *skb;
+ u8 lec;
+
+ netdev_dbg(dev, "status interrupt (%#x)\n", status);
+
+ skb = alloc_can_err_skb(dev, &cf);
+ if (!skb)
+ return -ENOMEM;
+
+ /* Use extended functions of the CC770 */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ cf->data[6] = cc770_read_reg(priv, tx_error_counter);
+ cf->data[7] = cc770_read_reg(priv, rx_error_counter);
+ }
+
+ if (status & STAT_BOFF) {
+ /* Disable interrupts */
+ cc770_write_reg(priv, control, CTRL_INI);
+ cf->can_id |= CAN_ERR_BUSOFF;
+ priv->can.state = CAN_STATE_BUS_OFF;
+ can_bus_off(dev);
+ } else if (status & STAT_WARN) {
+ cf->can_id |= CAN_ERR_CRTL;
+ /* Only the CC770 does show error passive */
+ if (cf->data[7] > 127) {
+ cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE |
+ CAN_ERR_CRTL_TX_PASSIVE;
+ priv->can.state = CAN_STATE_ERROR_PASSIVE;
+ priv->can.can_stats.error_passive++;
+ } else {
+ cf->data[1] = CAN_ERR_CRTL_RX_WARNING |
+ CAN_ERR_CRTL_TX_WARNING;
+ priv->can.state = CAN_STATE_ERROR_WARNING;
+ priv->can.can_stats.error_warning++;
+ }
+ } else {
+ /* Back to error avtive */
+ cf->can_id |= CAN_ERR_PROT;
+ cf->data[2] = CAN_ERR_PROT_ACTIVE;
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ }
+
+ lec = status & STAT_LEC_MASK;
+ if (lec < 7 && lec > 0) {
+ if (lec == STAT_LEC_ACK) {
+ cf->can_id |= CAN_ERR_ACK;
+ } else {
+ cf->can_id |= CAN_ERR_PROT;
+ switch (lec) {
+ case STAT_LEC_STUFF:
+ cf->data[2] |= CAN_ERR_PROT_STUFF;
+ break;
+ case STAT_LEC_FORM:
+ cf->data[2] |= CAN_ERR_PROT_FORM;
+ break;
+ case STAT_LEC_BIT1:
+ cf->data[2] |= CAN_ERR_PROT_BIT1;
+ break;
+ case STAT_LEC_BIT0:
+ cf->data[2] |= CAN_ERR_PROT_BIT0;
+ break;
+ case STAT_LEC_CRC:
+ cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ;
+ break;
+ }
+ }
+ }
+
+ netif_rx(skb);
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+
+ return 0;
+}
+
+static int cc770_status_interrupt(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 status;
+
+ status = cc770_read_reg(priv, status);
+ /* Reset the status register including RXOK and TXOK */
+ cc770_write_reg(priv, status, STAT_LEC_MASK);
+
+ if (status & (STAT_WARN | STAT_BOFF) ||
+ (status & STAT_LEC_MASK) != STAT_LEC_MASK) {
+ cc770_err(dev, status);
+ return status & STAT_BOFF;
+ }
+
+ return 0;
+}
+
+static void cc770_rx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+
+ if (!(ctrl1 & NEWDAT_SET)) {
+ /* Check for RTR if additional functions are enabled */
+ if (priv->control_normal_mode & CTRL_EAF) {
+ if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) &
+ INTPND_SET))
+ break;
+ } else {
+ break;
+ }
+ }
+
+ if (ctrl1 & MSGLST_SET) {
+ stats->rx_over_errors++;
+ stats->rx_errors++;
+ }
+ if (mo < MSGOBJ_LAST)
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_UNC | RMTPND_UNC);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | MSGLST_RES |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ unsigned int mo = obj2msgobj(o);
+ u8 ctrl0, ctrl1;
+ int n = CC770_MAX_MSG;
+
+ while (n--) {
+ ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0);
+ if (!(ctrl0 & INTPND_SET))
+ break;
+
+ ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1);
+ cc770_rx(dev, mo, ctrl1);
+
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_SET | TXIE_RES |
+ RXIE_SET | INTPND_RES);
+ cc770_write_reg(priv, msgobj[mo].ctrl1,
+ NEWDAT_RES | CPUUPD_SET |
+ TXRQST_RES | RMTPND_RES);
+ }
+}
+
+static void cc770_tx_interrupt(struct net_device *dev, unsigned int o)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct net_device_stats *stats = &dev->stats;
+ unsigned int mo = obj2msgobj(o);
+
+ /* Nothing more to send, switch off interrupts */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES);
+ /*
+ * We had some cases of repeated IRQ so make sure the
+ * INT is acknowledged
+ */
+ cc770_write_reg(priv, msgobj[mo].ctrl0,
+ MSGVAL_UNC | TXIE_UNC | RXIE_UNC | INTPND_RES);
+
+ stats->tx_packets++;
+ can_get_echo_skb(dev, 0);
+ netif_wake_queue(dev);
+}
+
+irqreturn_t cc770_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct cc770_priv *priv = netdev_priv(dev);
+ u8 intid;
+ int o, n = 0;
+
+ /* Shared interrupts and IRQ off? */
+ if (priv->can.state == CAN_STATE_STOPPED)
+ return IRQ_NONE;
+
+ if (priv->pre_irq)
+ priv->pre_irq(priv);
+
+ while (n < CC770_MAX_IRQ) {
+ /* Read the highest pending interrupt request */
+ intid = cc770_read_reg(priv, interrupt);
+ if (!intid)
+ break;
+ n++;
+
+ if (intid == 1) {
+ /* Exit in case of bus-off */
+ if (cc770_status_interrupt(dev))
+ break;
+ } else {
+ o = intid2obj(intid);
+
+ if (o >= CC770_OBJ_MAX) {
+ netdev_err(dev, "Unexpected interrupt id %d\n",
+ intid);
+ continue;
+ }
+
+ if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR)
+ cc770_rtr_interrupt(dev, o);
+ else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX)
+ cc770_rx_interrupt(dev, o);
+ else
+ cc770_tx_interrupt(dev, o);
+ }
+ }
+
+ if (priv->post_irq)
+ priv->post_irq(priv);
+
+ if (n >= CC770_MAX_IRQ)
+ netdev_dbg(dev, "%d messages handled in ISR", n);
+
+ return (n) ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static int cc770_open(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ /* set chip into reset mode */
+ set_reset_mode(dev);
+
+ /* common open */
+ err = open_candev(dev);
+ if (err)
+ return err;
+
+ err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags,
+ dev->name, dev);
+ if (err) {
+ close_candev(dev);
+ return -EAGAIN;
+ }
+
+ /* init and start chip */
+ cc770_start(dev);
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+static int cc770_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ set_reset_mode(dev);
+
+ free_irq(dev->irq, dev);
+ close_candev(dev);
+
+ return 0;
+}
+
+struct net_device *alloc_cc770dev(int sizeof_priv)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+
+ dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv,
+ CC770_ECHO_SKB_MAX);
+ if (!dev)
+ return NULL;
+
+ priv = netdev_priv(dev);
+
+ priv->dev = dev;
+ priv->can.bittiming_const = &cc770_bittiming_const;
+ priv->can.do_set_bittiming = cc770_set_bittiming;
+ priv->can.do_set_mode = cc770_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+
+ memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags));
+
+ if (sizeof_priv)
+ priv->priv = (void *)priv + sizeof(struct cc770_priv);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(alloc_cc770dev);
+
+void free_cc770dev(struct net_device *dev)
+{
+ free_candev(dev);
+}
+EXPORT_SYMBOL_GPL(free_cc770dev);
+
+static const struct net_device_ops cc770_netdev_ops = {
+ .ndo_open = cc770_open,
+ .ndo_stop = cc770_close,
+ .ndo_start_xmit = cc770_start_xmit,
+};
+
+int register_cc770dev(struct net_device *dev)
+{
+ struct cc770_priv *priv = netdev_priv(dev);
+ int err;
+
+ err = cc770_probe_chip(dev);
+ if (err)
+ return err;
+
+ dev->netdev_ops = &cc770_netdev_ops;
+
+ dev->flags |= IFF_ECHO; /* we support local echo */
+
+ /* Should we use additional functions? */
+ if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) {
+ priv->can.do_get_berr_counter = cc770_get_berr_counter;
+ priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE;
+ netdev_dbg(dev, "i82527 mode with additional functions\n");
+ } else {
+ priv->control_normal_mode = CTRL_IE | CTRL_EIE;
+ netdev_dbg(dev, "strict i82527 compatibility mode\n");
+ }
+
+ chipset_init(priv);
+ set_reset_mode(dev);
+
+ return register_candev(dev);
+}
+EXPORT_SYMBOL_GPL(register_cc770dev);
+
+void unregister_cc770dev(struct net_device *dev)
+{
+ set_reset_mode(dev);
+ unregister_candev(dev);
+}
+EXPORT_SYMBOL_GPL(unregister_cc770dev);
+
+static __init int cc770_init(void)
+{
+ if (msgobj15_eff) {
+ cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF;
+ cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF;
+ }
+
+ pr_info("CAN netdevice driver\n");
+
+ return 0;
+}
+module_init(cc770_init);
+
+static __exit void cc770_exit(void)
+{
+ pr_info("driver removed\n");
+}
+module_exit(cc770_exit);
diff --git a/drivers/net/can/cc770/cc770.h b/drivers/net/can/cc770/cc770.h
new file mode 100644
index 0000000..a1739db
--- /dev/null
+++ b/drivers/net/can/cc770/cc770.h
@@ -0,0 +1,203 @@
+/*
+ * Core driver for the CC770 and AN82527 CAN controllers
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CC770_DEV_H
+#define CC770_DEV_H
+
+#include <linux/can/dev.h>
+
+struct cc770_msgobj {
+ u8 ctrl0;
+ u8 ctrl1;
+ u8 id[4];
+ u8 config;
+ u8 data[8];
+ u8 dontuse; /* padding */
+} __packed;
+
+struct cc770_regs {
+ union {
+ struct cc770_msgobj msgobj[16]; /* Message object 1..15 */
+ struct {
+ u8 control; /* Control Register */
+ u8 status; /* Status Register */
+ u8 cpu_interface; /* CPU Interface Register */
+ u8 dontuse1;
+ u8 high_speed_read[2]; /* High Speed Read */
+ u8 global_mask_std[2]; /* Standard Global Mask */
+ u8 global_mask_ext[4]; /* Extended Global Mask */
+ u8 msg15_mask[4]; /* Message 15 Mask */
+ u8 dontuse2[15];
+ u8 clkout; /* Clock Out Register */
+ u8 dontuse3[15];
+ u8 bus_config; /* Bus Configuration Register */
+ u8 dontuse4[15];
+ u8 bit_timing_0; /* Bit Timing Register byte 0 */
+ u8 dontuse5[15];
+ u8 bit_timing_1; /* Bit Timing Register byte 1 */
+ u8 dontuse6[15];
+ u8 interrupt; /* Interrupt Register */
+ u8 dontuse7[15];
+ u8 rx_error_counter; /* Receive Error Counter */
+ u8 dontuse8[15];
+ u8 tx_error_counter; /* Transmit Error Counter */
+ u8 dontuse9[31];
+ u8 p1_conf;
+ u8 dontuse10[15];
+ u8 p2_conf;
+ u8 dontuse11[15];
+ u8 p1_in;
+ u8 dontuse12[15];
+ u8 p2_in;
+ u8 dontuse13[15];
+ u8 p1_out;
+ u8 dontuse14[15];
+ u8 p2_out;
+ u8 dontuse15[15];
+ u8 serial_reset_addr;
+ };
+ };
+} __packed;
+
+/* Control Register (0x00) */
+#define CTRL_INI 0x01 /* Initialization */
+#define CTRL_IE 0x02 /* Interrupt Enable */
+#define CTRL_SIE 0x04 /* Status Interrupt Enable */
+#define CTRL_EIE 0x08 /* Error Interrupt Enable */
+#define CTRL_EAF 0x20 /* Enable additional functions */
+#define CTRL_CCE 0x40 /* Change Configuration Enable */
+
+/* Status Register (0x01) */
+#define STAT_LEC_STUFF 0x01 /* Stuff error */
+#define STAT_LEC_FORM 0x02 /* Form error */
+#define STAT_LEC_ACK 0x03 /* Acknowledgement error */
+#define STAT_LEC_BIT1 0x04 /* Bit1 error */
+#define STAT_LEC_BIT0 0x05 /* Bit0 error */
+#define STAT_LEC_CRC 0x06 /* CRC error */
+#define STAT_LEC_MASK 0x07 /* Last Error Code mask */
+#define STAT_TXOK 0x08 /* Transmit Message Successfully */
+#define STAT_RXOK 0x10 /* Receive Message Successfully */
+#define STAT_WAKE 0x20 /* Wake Up Status */
+#define STAT_WARN 0x40 /* Warning Status */
+#define STAT_BOFF 0x80 /* Bus Off Status */
+
+/*
+ * CPU Interface Register (0x02)
+ * Clock Out Register (0x1f)
+ * Bus Configuration Register (0x2f)
+ *
+ * see include/linux/can/platform/cc770.h
+ */
+
+/* Message Control Register 0 (Base Address + 0x0) */
+#define INTPND_RES 0x01 /* No Interrupt pending */
+#define INTPND_SET 0x02 /* Interrupt pending */
+#define INTPND_UNC 0x03
+#define RXIE_RES 0x04 /* Receive Interrupt Disable */
+#define RXIE_SET 0x08 /* Receive Interrupt Enable */
+#define RXIE_UNC 0x0c
+#define TXIE_RES 0x10 /* Transmit Interrupt Disable */
+#define TXIE_SET 0x20 /* Transmit Interrupt Enable */
+#define TXIE_UNC 0x30
+#define MSGVAL_RES 0x40 /* Message Invalid */
+#define MSGVAL_SET 0x80 /* Message Valid */
+#define MSGVAL_UNC 0xc0
+
+/* Message Control Register 1 (Base Address + 0x01) */
+#define NEWDAT_RES 0x01 /* No New Data */
+#define NEWDAT_SET 0x02 /* New Data */
+#define NEWDAT_UNC 0x03
+#define MSGLST_RES 0x04 /* No Message Lost */
+#define MSGLST_SET 0x08 /* Message Lost */
+#define MSGLST_UNC 0x0c
+#define CPUUPD_RES 0x04 /* No CPU Updating */
+#define CPUUPD_SET 0x08 /* CPU Updating */
+#define CPUUPD_UNC 0x0c
+#define TXRQST_RES 0x10 /* No Transmission Request */
+#define TXRQST_SET 0x20 /* Transmission Request */
+#define TXRQST_UNC 0x30
+#define RMTPND_RES 0x40 /* No Remote Request Pending */
+#define RMTPND_SET 0x80 /* Remote Request Pending */
+#define RMTPND_UNC 0xc0
+
+/* Message Configuration Register (Base Address + 0x06) */
+#define MSGCFG_XTD 0x04 /* Extended Identifier */
+#define MSGCFG_DIR 0x08 /* Direction is Transmit */
+
+#define MSGOBJ_FIRST 1
+#define MSGOBJ_LAST 15
+
+#define CC770_IO_SIZE 0x100
+#define CC770_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
+#define CC770_MAX_MSG 4 /* max. number of messages handled in ISR */
+
+#define CC770_ECHO_SKB_MAX 1
+
+#define cc770_read_reg(priv, member) \
+ priv->read_reg(priv, offsetof(struct cc770_regs, member))
+
+#define cc770_write_reg(priv, member, value) \
+ priv->write_reg(priv, offsetof(struct cc770_regs, member), value)
+
+/*
+ * Message objects and flags used by this driver
+ */
+#define CC770_OBJ_FLAG_RX 0x01
+#define CC770_OBJ_FLAG_RTR 0x02
+#define CC770_OBJ_FLAG_EFF 0x04
+
+enum {
+ CC770_OBJ_RX0 = 0, /* for receiving normal messages */
+ CC770_OBJ_RX1, /* for receiving normal messages */
+ CC770_OBJ_RX_RTR0, /* for receiving remote transmission requests */
+ CC770_OBJ_RX_RTR1, /* for receiving remote transmission requests */
+ CC770_OBJ_TX, /* for sending messages */
+ CC770_OBJ_MAX
+};
+
+#define obj2msgobj(o) (MSGOBJ_LAST - (o)) /* message object 11..15 */
+
+/*
+ * CC770 private data structure
+ */
+struct cc770_priv {
+ struct can_priv can; /* must be the first member */
+ struct sk_buff *echo_skb;
+
+ /* the lower-layer is responsible for appropriate locking */
+ u8 (*read_reg)(const struct cc770_priv *priv, int reg);
+ void (*write_reg)(const struct cc770_priv *priv, int reg, u8 val);
+ void (*pre_irq)(const struct cc770_priv *priv);
+ void (*post_irq)(const struct cc770_priv *priv);
+
+ void *priv; /* for board-specific data */
+ struct net_device *dev;
+
+ void __iomem *reg_base; /* ioremap'ed address to registers */
+ unsigned long irq_flags; /* for request_irq() */
+
+ unsigned char obj_flags[CC770_OBJ_MAX];
+ u8 control_normal_mode; /* Control register for normal mode */
+ u8 cpu_interface; /* CPU interface register */
+ u8 clkout; /* Clock out register */
+ u8 bus_config; /* Bus conffiguration register */
+};
+
+struct net_device *alloc_cc770dev(int sizeof_priv);
+void free_cc770dev(struct net_device *dev);
+int register_cc770dev(struct net_device *dev);
+void unregister_cc770dev(struct net_device *dev);
+
+#endif /* CC770_DEV_H */
diff --git a/drivers/net/can/cc770/cc770_isa.c b/drivers/net/can/cc770/cc770_isa.c
new file mode 100644
index 0000000..9f3a25c
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_isa.c
@@ -0,0 +1,381 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus.
+ * The I/O port or memory address and the IRQ number must be specified via
+ * module parameters:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 irq=7,11
+ *
+ * for ISA devices using I/O ports or:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11
+ *
+ * for memory mapped ISA devices.
+ *
+ * Indirect access via address and data port is supported as well:
+ *
+ * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11
+ *
+ * Furthermore, the following mode parameter can be defined:
+ *
+ * clk: External oscillator clock frequency (default=16000000 [16 MHz])
+ * cir: CPU interface register (default=0x40 [DSC])
+ * bcr: Bus configuration register (default=0x40 [CBY])
+ * cor: Clockout register (default=0x00)
+ *
+ * Note: for clk, cir, bcr and cor, the first argument re-defines the
+ * default for all other devices, e.g.:
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000
+ *
+ * is equivalent to
+ *
+ * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define MAXDEV 8
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus");
+MODULE_LICENSE("GPL v2");
+
+#define CLK_DEFAULT 16000000 /* 16 MHz */
+#define COR_DEFAULT 0x00
+#define BCR_DEFAULT BUSCFG_CBY
+
+static unsigned long port[MAXDEV];
+static unsigned long mem[MAXDEV];
+static int __devinitdata irq[MAXDEV];
+static int __devinitdata clk[MAXDEV];
+static u8 __devinitdata cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static u8 __devinitdata bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+
+module_param_array(port, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(port, "I/O port number");
+
+module_param_array(mem, ulong, NULL, S_IRUGO);
+MODULE_PARM_DESC(mem, "I/O memory address");
+
+module_param_array(indirect, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
+
+module_param_array(irq, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(irq, "IRQ number");
+
+module_param_array(clk, int, NULL, S_IRUGO);
+MODULE_PARM_DESC(clk, "External oscillator clock frequency "
+ "(default=16000000 [16 MHz])");
+
+module_param_array(cir, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])");
+
+module_param_array(cor, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(cor, "Clockout register (default=0x00)");
+
+module_param_array(bcr, byte, NULL, S_IRUGO);
+MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])");
+
+#define CC770_IOSIZE 0x20
+#define CC770_IOSIZE_INDIRECT 0x02
+
+/* Spinlock for cc770_isa_port_write_reg_indirect
+ * and cc770_isa_port_read_reg_indirect
+ */
+static DEFINE_SPINLOCK(cc770_isa_port_lock);
+
+static struct platform_device *cc770_isa_devs[MAXDEV];
+
+static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return readb(priv->reg_base + reg);
+}
+
+static void cc770_isa_mem_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ writeb(val, priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return inb((unsigned long)priv->reg_base + reg);
+}
+
+static void cc770_isa_port_write_reg(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ outb(val, (unsigned long)priv->reg_base + reg);
+}
+
+static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv,
+ int reg)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags;
+ u8 val;
+
+ spin_lock_irqsave(&cc770_isa_port_lock, flags);
+ outb(reg, base);
+ val = inb(base + 1);
+ spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
+
+ return val;
+}
+
+static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv,
+ int reg, u8 val)
+{
+ unsigned long base = (unsigned long)priv->reg_base;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cc770_isa_port_lock, flags);
+ outb(reg, base);
+ outb(val, base + 1);
+ spin_unlock_irqrestore(&cc770_isa_port_lock, flags);
+}
+
+static int __devinit cc770_isa_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ void __iomem *base = NULL;
+ int iosize = CC770_IOSIZE;
+ int idx = pdev->id;
+ int err;
+ u32 clktmp;
+
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ if (mem[idx]) {
+ if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ base = ioremap_nocache(mem[idx], iosize);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release;
+ }
+ } else {
+ if (indirect[idx] > 0 ||
+ (indirect[idx] == -1 && indirect[0] > 0))
+ iosize = CC770_IOSIZE_INDIRECT;
+ if (!request_region(port[idx], iosize, KBUILD_MODNAME)) {
+ err = -EBUSY;
+ goto exit;
+ }
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap;
+ }
+ priv = netdev_priv(dev);
+
+ dev->irq = irq[idx];
+ priv->irq_flags = IRQF_SHARED;
+ if (mem[idx]) {
+ priv->reg_base = base;
+ dev->base_addr = mem[idx];
+ priv->read_reg = cc770_isa_mem_read_reg;
+ priv->write_reg = cc770_isa_mem_write_reg;
+ } else {
+ priv->reg_base = (void __iomem *)port[idx];
+ dev->base_addr = port[idx];
+
+ if (iosize == CC770_IOSIZE_INDIRECT) {
+ priv->read_reg = cc770_isa_port_read_reg_indirect;
+ priv->write_reg = cc770_isa_port_write_reg_indirect;
+ } else {
+ priv->read_reg = cc770_isa_port_read_reg;
+ priv->write_reg = cc770_isa_port_write_reg;
+ }
+ }
+
+ if (clk[idx])
+ clktmp = clk[idx];
+ else if (clk[0])
+ clktmp = clk[0];
+ else
+ clktmp = CLK_DEFAULT;
+ priv->can.clock.freq = clktmp;
+
+ if (cir[idx] != 0xff) {
+ priv->cpu_interface = cir[idx];
+ } else if (cir[0] != 0xff) {
+ priv->cpu_interface = cir[0];
+ } else {
+ /* The system clock may not exceed 10 MHz */
+ if (clktmp > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ clktmp /= 2;
+ }
+ /* The memory clock may not exceed 8 MHz */
+ if (clktmp > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+ }
+
+ if (priv->cpu_interface & CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+
+ if (bcr[idx] != 0xff)
+ priv->bus_config = bcr[idx];
+ else if (bcr[0] != 0xff)
+ priv->bus_config = bcr[0];
+ else
+ priv->bus_config = BCR_DEFAULT;
+
+ if (cor[idx] != 0xff)
+ priv->clkout = cor[idx];
+ else if (cor[0] != 0xff)
+ priv->clkout = cor[0];
+ else
+ priv->clkout = COR_DEFAULT;
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register device (err=%d)\n", err);
+ goto exit_unmap;
+ }
+
+ dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n",
+ priv->reg_base, dev->irq);
+ return 0;
+
+ exit_unmap:
+ if (mem[idx])
+ iounmap(base);
+ exit_release:
+ if (mem[idx])
+ release_mem_region(mem[idx], iosize);
+ else
+ release_region(port[idx], iosize);
+ exit:
+ return err;
+}
+
+static int __devexit cc770_isa_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
+
+ unregister_cc770dev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ if (mem[idx]) {
+ iounmap(priv->reg_base);
+ release_mem_region(mem[idx], CC770_IOSIZE);
+ } else {
+ if (priv->read_reg == cc770_isa_port_read_reg_indirect)
+ release_region(port[idx], CC770_IOSIZE_INDIRECT);
+ else
+ release_region(port[idx], CC770_IOSIZE);
+ }
+ free_cc770dev(dev);
+
+ return 0;
+}
+
+static struct platform_driver cc770_isa_driver = {
+ .probe = cc770_isa_probe,
+ .remove = __devexit_p(cc770_isa_remove),
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init cc770_isa_init(void)
+{
+ int idx, err;
+
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ cc770_isa_devs[idx] =
+ platform_device_alloc(KBUILD_MODNAME, idx);
+ if (!cc770_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(cc770_isa_devs[idx]);
+ if (err) {
+ platform_device_put(cc770_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("insufficient parameters supplied\n");
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&cc770_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("driver for max. %d devices registered\n", MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+
+ return err;
+}
+module_init(cc770_isa_init);
+
+static void __exit cc770_isa_exit(void)
+{
+ int idx;
+
+ platform_driver_unregister(&cc770_isa_driver);
+ for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) {
+ if (cc770_isa_devs[idx])
+ platform_device_unregister(cc770_isa_devs[idx]);
+ }
+}
+module_exit(cc770_isa_exit);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
new file mode 100644
index 0000000..53115ee
--- /dev/null
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -0,0 +1,272 @@
+/*
+ * Driver for CC770 and AN82527 CAN controllers on the platform bus
+ *
+ * Copyright (C) 2009, 2011 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+/*
+ * If platform data are used you should have similar definitions
+ * in your board-specific code:
+ *
+ * static struct cc770_platform_data myboard_cc770_pdata = {
+ * .osc_freq = 16000000,
+ * .cir = 0x41,
+ * .cor = 0x20,
+ * .bcr = 0x40,
+ * };
+ *
+ * Please see include/linux/can/platform/cc770.h for description of
+ * above fields.
+ *
+ * If the device tree is used, you need a CAN node definition in your
+ * DTS file similar to:
+ *
+ * can@3,100 {
+ * compatible = "bosch,cc770";
+ * reg = <3 0x100 0x80>;
+ * interrupts = <2 0>;
+ * interrupt-parent = <&mpic>;
+ * bosch,external-clock-frequency = <16000000>;
+ * };
+ *
+ * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further
+ * information.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/can/platform/cc770.h>
+
+#include "cc770.h"
+
+#define DRV_NAME "cc770_platform"
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus");
+MODULE_LICENSE("GPL v2");
+
+#define CC770_PLATFORM_CAN_CLOCK 16000000
+
+static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg)
+{
+ return ioread8(priv->reg_base + reg);
+}
+
+static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg,
+ u8 val)
+{
+ iowrite8(val, priv->reg_base + reg);
+}
+
+static int __devinit cc770_get_of_node_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const u32 *prop;
+ int prop_size;
+ u32 clkext;
+
+ prop = of_get_property(np, "bosch,external-clock-frequency",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ clkext = *prop;
+ else
+ clkext = CC770_PLATFORM_CAN_CLOCK; /* default */
+ priv->can.clock.freq = clkext;
+
+ /* The system clock may not exceed 10 MHz */
+ if (priv->can.clock.freq > 10000000) {
+ priv->cpu_interface |= CPUIF_DSC;
+ priv->can.clock.freq /= 2;
+ }
+
+ /* The memory clock may not exceed 8 MHz */
+ if (priv->can.clock.freq > 8000000)
+ priv->cpu_interface |= CPUIF_DMC;
+
+ if (of_get_property(np, "bosch,divide-memory-clock", NULL))
+ priv->cpu_interface |= CPUIF_DMC;
+ if (of_get_property(np, "bosch,iso-low-speed-mux", NULL))
+ priv->cpu_interface |= CPUIF_MUX;
+
+ if (!of_get_property(np, "bosch,no-comperator-bypass", NULL))
+ priv->bus_config |= BUSCFG_CBY;
+ if (of_get_property(np, "bosch,disconnect-rx0-input", NULL))
+ priv->bus_config |= BUSCFG_DR0;
+ if (of_get_property(np, "bosch,disconnect-rx1-input", NULL))
+ priv->bus_config |= BUSCFG_DR1;
+ if (of_get_property(np, "bosch,disconnect-tx1-output", NULL))
+ priv->bus_config |= BUSCFG_DT1;
+ if (of_get_property(np, "bosch,polarity-dominant", NULL))
+ priv->bus_config |= BUSCFG_POL;
+
+ prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)) && *prop > 0) {
+ u32 cdv = clkext / *prop;
+ int slew;
+
+ if (cdv > 0 && cdv < 16) {
+ priv->cpu_interface |= CPUIF_CEN;
+ priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK;
+
+ prop = of_get_property(np, "bosch,slew-rate",
+ &prop_size);
+ if (prop && (prop_size == sizeof(u32))) {
+ slew = *prop;
+ } else {
+ /* Determine default slew rate */
+ slew = (CLKOUT_SL_MASK >>
+ CLKOUT_SL_SHIFT) -
+ ((cdv * clkext - 1) / 8000000);
+ if (slew < 0)
+ slew = 0;
+ }
+ priv->clkout |= (slew << CLKOUT_SL_SHIFT) &
+ CLKOUT_SL_MASK;
+ } else {
+ dev_dbg(&pdev->dev, "invalid clock-out-frequency\n");
+ }
+ }
+
+ return 0;
+}
+
+static int __devinit cc770_get_platform_data(struct platform_device *pdev,
+ struct cc770_priv *priv)
+{
+
+ struct cc770_platform_data *pdata = pdev->dev.platform_data;
+
+ priv->can.clock.freq = pdata->osc_freq;
+ if (priv->cpu_interface | CPUIF_DSC)
+ priv->can.clock.freq /= 2;
+ priv->clkout = pdata->cor;
+ priv->bus_config = pdata->bcr;
+ priv->cpu_interface = pdata->cir;
+
+ return 0;
+}
+
+static int __devinit cc770_platform_probe(struct platform_device *pdev)
+{
+ struct net_device *dev;
+ struct cc770_priv *priv;
+ struct resource *mem;
+ resource_size_t mem_size;
+ void __iomem *base;
+ int err, irq;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq <= 0)
+ return -ENODEV;
+
+ mem_size = resource_size(mem);
+ if (!request_mem_region(mem->start, mem_size, pdev->name))
+ return -EBUSY;
+
+ base = ioremap(mem->start, mem_size);
+ if (!base) {
+ err = -ENOMEM;
+ goto exit_release_mem;
+ }
+
+ dev = alloc_cc770dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto exit_unmap_mem;
+ }
+
+ dev->irq = irq;
+ priv = netdev_priv(dev);
+ priv->read_reg = cc770_platform_read_reg;
+ priv->write_reg = cc770_platform_write_reg;
+ priv->irq_flags = IRQF_SHARED;
+ priv->reg_base = base;
+
+ if (pdev->dev.of_node)
+ err = cc770_get_of_node_data(pdev, priv);
+ else if (pdev->dev.platform_data)
+ err = cc770_get_platform_data(pdev, priv);
+ else
+ err = -ENODEV;
+ if (err)
+ goto exit_free_cc770;
+
+ dev_dbg(&pdev->dev,
+ "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x "
+ "bus_config=0x%02x clkout=0x%02x\n",
+ priv->reg_base, dev->irq, priv->can.clock.freq,
+ priv->cpu_interface, priv->bus_config, priv->clkout);
+
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ err = register_cc770dev(dev);
+ if (err) {
+ dev_err(&pdev->dev,
+ "couldn't register CC700 device (err=%d)\n", err);
+ goto exit_free_cc770;
+ }
+
+ return 0;
+
+exit_free_cc770:
+ free_cc770dev(dev);
+exit_unmap_mem:
+ iounmap(base);
+exit_release_mem:
+ release_mem_region(mem->start, mem_size);
+
+ return err;
+}
+
+static int __devexit cc770_platform_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+ struct cc770_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ unregister_cc770dev(dev);
+ iounmap(priv->reg_base);
+ free_cc770dev(dev);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(mem->start, resource_size(mem));
+
+ return 0;
+}
+
+static struct of_device_id __devinitdata cc770_platform_table[] = {
+ {.compatible = "bosch,cc770"}, /* CC770 from Bosch */
+ {.compatible = "intc,82527"}, /* AN82527 from Intel CP */
+ {},
+};
+
+static struct platform_driver cc770_platform_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = cc770_platform_table,
+ },
+ .probe = cc770_platform_probe,
+ .remove = __devexit_p(cc770_platform_remove),
+};
+
+module_platform_driver(cc770_platform_driver);
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 25695bd..120f1ab 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -454,7 +454,7 @@ static void can_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index e023379..96d2357 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -118,6 +118,9 @@
(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
#define FLEXCAN_ESR_ERR_ALL \
(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
+#define FLEXCAN_ESR_ALL_INT \
+ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
+ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
/* FLEXCAN interrupt flag register (IFLAG) bits */
#define FLEXCAN_TX_BUF_ID 8
@@ -577,7 +580,9 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id)
reg_iflag1 = flexcan_read(&regs->iflag1);
reg_esr = flexcan_read(&regs->esr);
- flexcan_write(FLEXCAN_ESR_ERR_INT, &regs->esr); /* ACK err IRQ */
+ /* ACK all bus error and state change IRQ sources */
+ if (reg_esr & FLEXCAN_ESR_ALL_INT)
+ flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
/*
* schedule NAPI in case of:
@@ -802,7 +807,7 @@ static int flexcan_open(struct net_device *dev)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
err = open_candev(dev);
if (err)
@@ -824,7 +829,7 @@ static int flexcan_open(struct net_device *dev)
out_close:
close_candev(dev);
out:
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return err;
}
@@ -838,7 +843,7 @@ static int flexcan_close(struct net_device *dev)
flexcan_chip_stop(dev);
free_irq(dev->irq, dev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
close_candev(dev);
@@ -877,7 +882,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
struct flexcan_regs __iomem *regs = priv->base;
u32 reg, err;
- clk_enable(priv->clk);
+ clk_prepare_enable(priv->clk);
/* select "bus clock", chip must be disabled */
flexcan_chip_disable(priv);
@@ -911,7 +916,7 @@ static int __devinit register_flexcandev(struct net_device *dev)
out:
/* disable core and turn off clocks */
flexcan_chip_disable(priv);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return err;
}
@@ -1060,20 +1065,7 @@ static struct platform_driver flexcan_driver = {
.remove = __devexit_p(flexcan_remove),
};
-static int __init flexcan_init(void)
-{
- pr_info("%s netdevice driver\n", DRV_NAME);
- return platform_driver_register(&flexcan_driver);
-}
-
-static void __exit flexcan_exit(void)
-{
- platform_driver_unregister(&flexcan_driver);
- pr_info("%s: driver removed\n", DRV_NAME);
-}
-
-module_init(flexcan_init);
-module_exit(flexcan_exit);
+module_platform_driver(flexcan_driver);
MODULE_AUTHOR("Sascha Hauer <kernel@pengutronix.de>, "
"Marc Kleine-Budde <kernel@pengutronix.de>");
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 32778d5..08c893c 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1803,20 +1803,9 @@ static struct platform_driver ican3_driver = {
.remove = __devexit_p(ican3_remove),
};
-static int __init ican3_init(void)
-{
- return platform_driver_register(&ican3_driver);
-}
-
-static void __exit ican3_exit(void)
-{
- platform_driver_unregister(&ican3_driver);
-}
+module_platform_driver(ican3_driver);
MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:janz-ican3");
-
-module_init(ican3_init);
-module_exit(ican3_exit);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 5fedc33..5caa572 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -411,17 +411,7 @@ static struct platform_driver mpc5xxx_can_driver = {
#endif
};
-static int __init mpc5xxx_can_init(void)
-{
- return platform_driver_register(&mpc5xxx_can_driver);
-}
-module_init(mpc5xxx_can_init);
-
-static void __exit mpc5xxx_can_exit(void)
-{
- platform_driver_unregister(&mpc5xxx_can_driver);
-};
-module_exit(mpc5xxx_can_exit);
+module_platform_driver(mpc5xxx_can_driver);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index ec4a311..1c82dd8 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -581,7 +581,10 @@ static int mscan_open(struct net_device *dev)
priv->open_time = jiffies;
- clrbits8(&regs->canctl1, MSCAN_LISTEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ setbits8(&regs->canctl1, MSCAN_LISTEN);
+ else
+ clrbits8(&regs->canctl1, MSCAN_LISTEN);
ret = mscan_start(dev);
if (ret)
@@ -690,7 +693,8 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index d11fbb2..6edc25e 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -66,6 +66,7 @@
#define PCH_IF_CREQ_BUSY BIT(15)
#define PCH_STATUS_INT 0x8000
+#define PCH_RP 0x00008000
#define PCH_REC 0x00007f00
#define PCH_TEC 0x000000ff
@@ -527,7 +528,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
priv->can.can_stats.error_passive++;
state = CAN_STATE_ERROR_PASSIVE;
cf->can_id |= CAN_ERR_CRTL;
- if (((errc & PCH_REC) >> 8) > 127)
+ if (errc & PCH_RP)
cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
if ((errc & PCH_TEC) > 127)
cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index fe9e64d..36e9d59 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -6,7 +6,6 @@ if CAN_SJA1000
config CAN_SJA1000_ISA
tristate "ISA Bus based legacy SJA1000 driver"
- depends on ISA
---help---
This driver adds legacy support for SJA1000 chips connected to
the ISA bus using I/O port, memory mapped or indirect access.
diff --git a/drivers/net/can/sja1000/peak_pci.c b/drivers/net/can/sja1000/peak_pci.c
index 2c7f503..2147959 100644
--- a/drivers/net/can/sja1000/peak_pci.c
+++ b/drivers/net/can/sja1000/peak_pci.c
@@ -39,9 +39,9 @@ MODULE_LICENSE("GPL v2");
#define DRV_NAME "peak_pci"
struct peak_pci_chan {
- void __iomem *cfg_base; /* Common for all channels */
- struct net_device *next_dev; /* Chain of network devices */
- u16 icr_mask; /* Interrupt mask for fast ack */
+ void __iomem *cfg_base; /* Common for all channels */
+ struct net_device *prev_dev; /* Chain of network devices */
+ u16 icr_mask; /* Interrupt mask for fast ack */
};
#define PEAK_PCI_CAN_CLOCK (16000000 / 2)
@@ -98,7 +98,7 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
{
struct sja1000_priv *priv;
struct peak_pci_chan *chan;
- struct net_device *dev, *dev0 = NULL;
+ struct net_device *dev;
void __iomem *cfg_base, *reg_base;
u16 sub_sys_id, icr;
int i, err, channels;
@@ -196,18 +196,14 @@ static int __devinit peak_pci_probe(struct pci_dev *pdev,
}
/* Create chain of SJA1000 devices */
- if (i == 0)
- dev0 = dev;
- else
- chan->next_dev = dev;
+ chan->prev_dev = pci_get_drvdata(pdev);
+ pci_set_drvdata(pdev, dev);
dev_info(&pdev->dev,
"%s at reg_base=0x%p cfg_base=0x%p irq=%d\n",
dev->name, priv->reg_base, chan->cfg_base, dev->irq);
}
- pci_set_drvdata(pdev, dev0);
-
/* Enable interrupts */
writew(icr, cfg_base + PITA_ICR + 2);
@@ -217,12 +213,11 @@ failure_remove_channels:
/* Disable interrupts */
writew(0x0, cfg_base + PITA_ICR + 2);
- for (dev = dev0; dev; dev = chan->next_dev) {
+ for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
unregister_sja1000dev(dev);
free_sja1000dev(dev);
priv = netdev_priv(dev);
chan = priv->priv;
- dev = chan->next_dev;
}
pci_iounmap(pdev, reg_base);
@@ -241,7 +236,7 @@ failure_disable_pci:
static void __devexit peak_pci_remove(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata(pdev); /* First device */
+ struct net_device *dev = pci_get_drvdata(pdev); /* Last device */
struct sja1000_priv *priv = netdev_priv(dev);
struct peak_pci_chan *chan = priv->priv;
void __iomem *cfg_base = chan->cfg_base;
@@ -255,7 +250,7 @@ static void __devexit peak_pci_remove(struct pci_dev *pdev)
dev_info(&pdev->dev, "removing device %s\n", dev->name);
unregister_sja1000dev(dev);
free_sja1000dev(dev);
- dev = chan->next_dev;
+ dev = chan->prev_dev;
if (!dev)
break;
priv = netdev_priv(dev);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 04a3f1b..192b0d1 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -95,11 +95,16 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
}
+static int sja1000_is_absent(struct sja1000_priv *priv)
+{
+ return (priv->read_reg(priv, REG_MOD) == 0xFF);
+}
+
static int sja1000_probe_chip(struct net_device *dev)
{
struct sja1000_priv *priv = netdev_priv(dev);
- if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) {
+ if (priv->reg_base && sja1000_is_absent(priv)) {
printk(KERN_INFO "%s: probing @0x%lX failed\n",
DRV_NAME, dev->base_addr);
return 0;
@@ -493,6 +498,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
n++;
status = priv->read_reg(priv, REG_SR);
+ /* check for absent controller due to hw unplug */
+ if (status == 0xFF && sja1000_is_absent(priv))
+ return IRQ_NONE;
if (isrc & IRQ_WUI)
dev_warn(dev->dev.parent, "wakeup interrupt\n");
@@ -509,6 +517,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
while (status & SR_RBS) {
sja1000_rx(dev);
status = priv->read_reg(priv, REG_SR);
+ /* check for absent controller */
+ if (status == 0xFF && sja1000_is_absent(priv))
+ return IRQ_NONE;
}
}
if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) {
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index 496223e..90c5c2d 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -17,7 +17,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/isa.h>
+#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/delay.h>
@@ -44,9 +44,9 @@ static unsigned long port[MAXDEV];
static unsigned long mem[MAXDEV];
static int __devinitdata irq[MAXDEV];
static int __devinitdata clk[MAXDEV];
-static char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
-static char __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
+static unsigned char __devinitdata cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static unsigned char __devinitdata ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff};
+static int __devinitdata indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1};
module_param_array(port, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(port, "I/O port number");
@@ -54,7 +54,7 @@ MODULE_PARM_DESC(port, "I/O port number");
module_param_array(mem, ulong, NULL, S_IRUGO);
MODULE_PARM_DESC(mem, "I/O memory address");
-module_param_array(indirect, byte, NULL, S_IRUGO);
+module_param_array(indirect, int, NULL, S_IRUGO);
MODULE_PARM_DESC(indirect, "Indirect access via address and data port");
module_param_array(irq, int, NULL, S_IRUGO);
@@ -75,6 +75,8 @@ MODULE_PARM_DESC(ocr, "Output control register "
#define SJA1000_IOSIZE 0x20
#define SJA1000_IOSIZE_INDIRECT 0x02
+static struct platform_device *sja1000_isa_devs[MAXDEV];
+
static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg)
{
return readb(priv->reg_base + reg);
@@ -115,26 +117,18 @@ static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv,
outb(val, base + 1);
}
-static int __devinit sja1000_isa_match(struct device *pdev, unsigned int idx)
-{
- if (port[idx] || mem[idx]) {
- if (irq[idx])
- return 1;
- } else if (idx)
- return 0;
-
- dev_err(pdev, "insufficient parameters supplied\n");
- return 0;
-}
-
-static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
+static int __devinit sja1000_isa_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sja1000_priv *priv;
void __iomem *base = NULL;
int iosize = SJA1000_IOSIZE;
+ int idx = pdev->id;
int err;
+ dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n",
+ idx, port[idx], mem[idx], irq[idx]);
+
if (mem[idx]) {
if (!request_mem_region(mem[idx], iosize, DRV_NAME)) {
err = -EBUSY;
@@ -189,31 +183,31 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
else
priv->can.clock.freq = CLK_DEFAULT / 2;
- if (ocr[idx] != -1)
- priv->ocr = ocr[idx] & 0xff;
- else if (ocr[0] != -1)
- priv->ocr = ocr[0] & 0xff;
+ if (ocr[idx] != 0xff)
+ priv->ocr = ocr[idx];
+ else if (ocr[0] != 0xff)
+ priv->ocr = ocr[0];
else
priv->ocr = OCR_DEFAULT;
- if (cdr[idx] != -1)
- priv->cdr = cdr[idx] & 0xff;
- else if (cdr[0] != -1)
- priv->cdr = cdr[0] & 0xff;
+ if (cdr[idx] != 0xff)
+ priv->cdr = cdr[idx];
+ else if (cdr[0] != 0xff)
+ priv->cdr = cdr[0];
else
priv->cdr = CDR_DEFAULT;
- dev_set_drvdata(pdev, dev);
- SET_NETDEV_DEV(dev, pdev);
+ dev_set_drvdata(&pdev->dev, dev);
+ SET_NETDEV_DEV(dev, &pdev->dev);
err = register_sja1000dev(dev);
if (err) {
- dev_err(pdev, "registering %s failed (err=%d)\n",
+ dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
goto exit_unmap;
}
- dev_info(pdev, "%s device registered (reg_base=0x%p, irq=%d)\n",
+ dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n",
DRV_NAME, priv->reg_base, dev->irq);
return 0;
@@ -229,13 +223,14 @@ static int __devinit sja1000_isa_probe(struct device *pdev, unsigned int idx)
return err;
}
-static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
+static int __devexit sja1000_isa_remove(struct platform_device *pdev)
{
- struct net_device *dev = dev_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
struct sja1000_priv *priv = netdev_priv(dev);
+ int idx = pdev->id;
unregister_sja1000dev(dev);
- dev_set_drvdata(pdev, NULL);
+ dev_set_drvdata(&pdev->dev, NULL);
if (mem[idx]) {
iounmap(priv->reg_base);
@@ -251,29 +246,70 @@ static int __devexit sja1000_isa_remove(struct device *pdev, unsigned int idx)
return 0;
}
-static struct isa_driver sja1000_isa_driver = {
- .match = sja1000_isa_match,
+static struct platform_driver sja1000_isa_driver = {
.probe = sja1000_isa_probe,
.remove = __devexit_p(sja1000_isa_remove),
.driver = {
.name = DRV_NAME,
+ .owner = THIS_MODULE,
},
};
static int __init sja1000_isa_init(void)
{
- int err = isa_register_driver(&sja1000_isa_driver, MAXDEV);
+ int idx, err;
+
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if ((port[idx] || mem[idx]) && irq[idx]) {
+ sja1000_isa_devs[idx] =
+ platform_device_alloc(DRV_NAME, idx);
+ if (!sja1000_isa_devs[idx]) {
+ err = -ENOMEM;
+ goto exit_free_devices;
+ }
+ err = platform_device_add(sja1000_isa_devs[idx]);
+ if (err) {
+ platform_device_put(sja1000_isa_devs[idx]);
+ goto exit_free_devices;
+ }
+ pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, "
+ "irq=%d\n",
+ DRV_NAME, idx, port[idx], mem[idx], irq[idx]);
+ } else if (idx == 0 || port[idx] || mem[idx]) {
+ pr_err("%s: insufficient parameters supplied\n",
+ DRV_NAME);
+ err = -EINVAL;
+ goto exit_free_devices;
+ }
+ }
+
+ err = platform_driver_register(&sja1000_isa_driver);
+ if (err)
+ goto exit_free_devices;
+
+ pr_info("Legacy %s driver for max. %d devices registered\n",
+ DRV_NAME, MAXDEV);
+
+ return 0;
+
+exit_free_devices:
+ while (--idx >= 0) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
- if (!err)
- printk(KERN_INFO
- "Legacy %s driver for max. %d devices registered\n",
- DRV_NAME, MAXDEV);
return err;
}
static void __exit sja1000_isa_exit(void)
{
- isa_unregister_driver(&sja1000_isa_driver);
+ int idx;
+
+ platform_driver_unregister(&sja1000_isa_driver);
+ for (idx = 0; idx < MAXDEV; idx++) {
+ if (sja1000_isa_devs[idx])
+ platform_device_unregister(sja1000_isa_devs[idx]);
+ }
}
module_init(sja1000_isa_init);
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index c3dd9d0..f2683eb 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -220,14 +220,4 @@ static struct platform_driver sja1000_ofp_driver = {
.remove = __devexit_p(sja1000_ofp_remove),
};
-static int __init sja1000_ofp_init(void)
-{
- return platform_driver_register(&sja1000_ofp_driver);
-}
-module_init(sja1000_ofp_init);
-
-static void __exit sja1000_ofp_exit(void)
-{
- return platform_driver_unregister(&sja1000_ofp_driver);
-};
-module_exit(sja1000_ofp_exit);
+module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index d9fadc4..4f50145 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -185,15 +185,4 @@ static struct platform_driver sp_driver = {
},
};
-static int __init sp_init(void)
-{
- return platform_driver_register(&sp_driver);
-}
-
-static void __exit sp_exit(void)
-{
- platform_driver_unregister(&sp_driver);
-}
-
-module_init(sp_init);
-module_exit(sp_exit);
+module_platform_driver(sp_driver);
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a979b00..3f1ebcc 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -387,7 +387,7 @@ static void slc_setup(struct net_device *dev)
/* New-style flags. */
dev->flags = IFF_NOARP;
- dev->features = NETIF_F_NO_CSUM;
+ dev->features = NETIF_F_HW_CSUM;
}
/******************************************
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 09a8b86..a7c77c74 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -874,21 +874,9 @@ static struct platform_driver softing_driver = {
.remove = __devexit_p(softing_pdev_remove),
};
-MODULE_ALIAS("platform:softing");
-
-static int __init softing_start(void)
-{
- return platform_driver_register(&softing_driver);
-}
-
-static void __exit softing_stop(void)
-{
- platform_driver_unregister(&softing_driver);
-}
-
-module_init(softing_start);
-module_exit(softing_stop);
+module_platform_driver(softing_driver);
+MODULE_ALIAS("platform:softing");
MODULE_DESCRIPTION("Softing DPRAM CAN driver");
MODULE_AUTHOR("Kurt Van Dijck <kurt.van.dijck@eia.be>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 2adc294..5a2e1e3 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -745,9 +745,10 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
}
}
- netif_receive_skb(skb);
+ netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+
return 0;
}
@@ -1037,20 +1038,7 @@ static struct platform_driver ti_hecc_driver = {
.resume = ti_hecc_resume,
};
-static int __init ti_hecc_init_driver(void)
-{
- printk(KERN_INFO DRV_DESC "\n");
- return platform_driver_register(&ti_hecc_driver);
-}
-
-static void __exit ti_hecc_exit_driver(void)
-{
- printk(KERN_INFO DRV_DESC " unloaded\n");
- platform_driver_unregister(&ti_hecc_driver);
-}
-
-module_exit(ti_hecc_exit_driver);
-module_init(ti_hecc_init_driver);
+module_platform_driver(ti_hecc_driver);
MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index a72c7bf..7dae64d 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -627,9 +627,6 @@ static int ems_usb_start(struct ems_usb *dev)
err = usb_submit_urb(urb, GFP_KERNEL);
if (err) {
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
@@ -659,9 +656,6 @@ static int ems_usb_start(struct ems_usb *dev)
err = usb_submit_urb(dev->intr_urb, GFP_KERNEL);
if (err) {
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
dev_warn(netdev->dev.parent, "intr URB submit failed: %d\n",
err);
@@ -692,9 +686,6 @@ static int ems_usb_start(struct ems_usb *dev)
return 0;
failed:
- if (err == -ENODEV)
- netif_device_detach(dev->netdev);
-
dev_warn(netdev->dev.parent, "couldn't submit control: %d\n", err);
return err;
@@ -1115,28 +1106,4 @@ static struct usb_driver ems_usb_driver = {
.id_table = ems_usb_table,
};
-static int __init ems_usb_init(void)
-{
- int err;
-
- printk(KERN_INFO "CPC-USB kernel driver loaded\n");
-
- /* register this driver with the USB subsystem */
- err = usb_register(&ems_usb_driver);
-
- if (err) {
- err("usb_register failed. Error number %d\n", err);
- return err;
- }
-
- return 0;
-}
-
-static void __exit ems_usb_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&ems_usb_driver);
-}
-
-module_init(ems_usb_init);
-module_exit(ems_usb_exit);
+module_usb_driver(ems_usb_driver);
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index eb8b0e6..9277463 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -1108,25 +1108,4 @@ static struct usb_driver esd_usb2_driver = {
.id_table = esd_usb2_table,
};
-static int __init esd_usb2_init(void)
-{
- int err;
-
- /* register this driver with the USB subsystem */
- err = usb_register(&esd_usb2_driver);
-
- if (err) {
- err("usb_register failed. Error number %d\n", err);
- return err;
- }
-
- return 0;
-}
-module_init(esd_usb2_init);
-
-static void __exit esd_usb2_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&esd_usb2_driver);
-}
-module_exit(esd_usb2_exit);
+module_usb_driver(esd_usb2_driver);
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index f93e2d6..ea2d942 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -63,7 +63,7 @@ MODULE_AUTHOR("Urs Thuermann <urs.thuermann@volkswagen.de>");
* See Documentation/networking/can.txt for details.
*/
-static int echo; /* echo testing. Default: 0 (Off) */
+static bool echo; /* echo testing. Default: 0 (Off) */
module_param(echo, bool, S_IRUGO);
MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig
new file mode 100644
index 0000000..dd151d5
--- /dev/null
+++ b/drivers/net/dsa/Kconfig
@@ -0,0 +1,36 @@
+menu "Distributed Switch Architecture drivers"
+ depends on NET_DSA
+
+config NET_DSA_MV88E6XXX
+ tristate
+ default n
+
+config NET_DSA_MV88E6060
+ tristate "Marvell 88E6060 ethernet switch chip support"
+ select NET_DSA_TAG_TRAILER
+ ---help---
+ This enables support for the Marvell 88E6060 ethernet switch
+ chip.
+
+config NET_DSA_MV88E6XXX_NEED_PPU
+ bool
+ default n
+
+config NET_DSA_MV88E6131
+ tristate "Marvell 88E6085/6095/6095F/6131 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_MV88E6XXX_NEED_PPU
+ select NET_DSA_TAG_DSA
+ ---help---
+ This enables support for the Marvell 88E6085/6095/6095F/6131
+ ethernet switch chips.
+
+config NET_DSA_MV88E6123_61_65
+ tristate "Marvell 88E6123/6161/6165 ethernet switch chip support"
+ select NET_DSA_MV88E6XXX
+ select NET_DSA_TAG_EDSA
+ ---help---
+ This enables support for the Marvell 88E6123/6161/6165
+ ethernet switch chips.
+
+endmenu
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
new file mode 100644
index 0000000..f3bda05
--- /dev/null
+++ b/drivers/net/dsa/Makefile
@@ -0,0 +1,9 @@
+obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
+obj-$(CONFIG_NET_DSA_MV88E6XXX) += mv88e6xxx_drv.o
+mv88e6xxx_drv-y += mv88e6xxx.o
+ifdef CONFIG_NET_DSA_MV88E6123_61_65
+mv88e6xxx_drv-y += mv88e6123_61_65.o
+endif
+ifdef CONFIG_NET_DSA_MV88E6131
+mv88e6xxx_drv-y += mv88e6131.o
+endif
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
new file mode 100644
index 0000000..325391d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.c
@@ -0,0 +1,294 @@
+/*
+ * net/dsa/mv88e6060.c - Driver for Marvell 88e6060 switch chips
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+
+#define REG_PORT(p) (8 + (p))
+#define REG_GLOBAL 0x0f
+
+static int reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+ return mdiobus_read(ds->master_mii_bus, ds->pd->sw_addr + addr, reg);
+}
+
+#define REG_READ(addr, reg) \
+ ({ \
+ int __ret; \
+ \
+ __ret = reg_read(ds, addr, reg); \
+ if (__ret < 0) \
+ return __ret; \
+ __ret; \
+ })
+
+
+static int reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ return mdiobus_write(ds->master_mii_bus, ds->pd->sw_addr + addr,
+ reg, val);
+}
+
+#define REG_WRITE(addr, reg, val) \
+ ({ \
+ int __ret; \
+ \
+ __ret = reg_write(ds, addr, reg, val); \
+ if (__ret < 0) \
+ return __ret; \
+ })
+
+static char *mv88e6060_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == 0x0600)
+ return "Marvell 88E6060";
+ }
+
+ return NULL;
+}
+
+static int mv88e6060_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 6; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0x8000) == 0x0000)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6060_setup_global(struct dsa_switch *ds)
+{
+ /*
+ * Disable discarding of frames with excessive collisions,
+ * set the maximum frame size to 1536 bytes, and mask all
+ * interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
+
+ /*
+ * Enable automatic address learning, set the address
+ * database size to 1024 entries, and set the default aging
+ * time to 5 minutes.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x2130);
+
+ return 0;
+}
+
+static int mv88e6060_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+
+ /*
+ * Do not force flow control, disable Ingress and Egress
+ * Header tagging, disable VLAN tunneling, and set the port
+ * state to Forwarding. Additionally, if this is the CPU
+ * port, enable Ingress and Egress Trailer tagging mode.
+ */
+ REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ? 0x4103 : 0x0003);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the CPU port.
+ */
+ REG_WRITE(addr, 0x06,
+ ((p & 0xf) << 12) |
+ (dsa_is_cpu_port(ds, p) ?
+ ds->phys_port_mask :
+ (1 << ds->dst->cpu_port)));
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ return 0;
+}
+
+static int mv88e6060_setup(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ ret = mv88e6060_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise atu */
+
+ ret = mv88e6060_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 6; i++) {
+ ret = mv88e6060_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
+{
+ REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
+ REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+
+ return 0;
+}
+
+static int mv88e6060_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 5)
+ return port;
+ return -1;
+}
+
+static int mv88e6060_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_read(ds, addr, regnum);
+}
+
+static int
+mv88e6060_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
+{
+ int addr;
+
+ addr = mv88e6060_port_to_phy_addr(port);
+ if (addr == -1)
+ return 0xffff;
+
+ return reg_write(ds, addr, regnum, val);
+}
+
+static void mv88e6060_poll_link(struct dsa_switch *ds)
+{
+ int i;
+
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ struct net_device *dev;
+ int uninitialized_var(port_status);
+ int link;
+ int speed;
+ int duplex;
+ int fc;
+
+ dev = ds->ports[i];
+ if (dev == NULL)
+ continue;
+
+ link = 0;
+ if (dev->flags & IFF_UP) {
+ port_status = reg_read(ds, REG_PORT(i), 0x00);
+ if (port_status < 0)
+ continue;
+
+ link = !!(port_status & 0x1000);
+ }
+
+ if (!link) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ continue;
+ }
+
+ speed = (port_status & 0x0100) ? 100 : 10;
+ duplex = (port_status & 0x0200) ? 1 : 0;
+ fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
+
+ if (!netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half",
+ fc ? "en" : "dis");
+ netif_carrier_on(dev);
+ }
+ }
+}
+
+static struct dsa_switch_driver mv88e6060_switch_driver = {
+ .tag_protocol = htons(ETH_P_TRAILER),
+ .probe = mv88e6060_probe,
+ .setup = mv88e6060_setup,
+ .set_addr = mv88e6060_set_addr,
+ .phy_read = mv88e6060_phy_read,
+ .phy_write = mv88e6060_phy_write,
+ .poll_link = mv88e6060_poll_link,
+};
+
+static int __init mv88e6060_init(void)
+{
+ register_switch_driver(&mv88e6060_switch_driver);
+ return 0;
+}
+module_init(mv88e6060_init);
+
+static void __exit mv88e6060_cleanup(void)
+{
+ unregister_switch_driver(&mv88e6060_switch_driver);
+}
+module_exit(mv88e6060_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6060 ethernet switch chip");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mv88e6060");
diff --git a/drivers/net/dsa/mv88e6123_61_65.c b/drivers/net/dsa/mv88e6123_61_65.c
new file mode 100644
index 0000000..c17c75b
--- /dev/null
+++ b/drivers/net/dsa/mv88e6123_61_65.c
@@ -0,0 +1,452 @@
+/*
+ * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ if (ret == 0x1212)
+ return "Marvell 88E6123 (A1)";
+ if (ret == 0x1213)
+ return "Marvell 88E6123 (A2)";
+ if ((ret & 0xfff0) == 0x1210)
+ return "Marvell 88E6123";
+
+ if (ret == 0x1612)
+ return "Marvell 88E6161 (A1)";
+ if (ret == 0x1613)
+ return "Marvell 88E6161 (A2)";
+ if ((ret & 0xfff0) == 0x1610)
+ return "Marvell 88E6161";
+
+ if (ret == 0x1652)
+ return "Marvell 88E6165 (A1)";
+ if (ret == 0x1653)
+ return "Marvell 88e6165 (A2)";
+ if ((ret & 0xfff0) == 0x1650)
+ return "Marvell 88E6165";
+ }
+
+ return NULL;
+}
+
+static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 8; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0xc800) == 0xc800)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /*
+ * Disable the PHY polling unit (since there won't be any
+ * external PHYs to poll), don't discard packets with
+ * excessive collisions, and mask all interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x0000);
+
+ /*
+ * Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /*
+ * Configure the priority mapping registers.
+ */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Configure the upstream port, and configure the upstream
+ * port as the port to which ingress and egress monitor frames
+ * are to be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110));
+
+ /*
+ * Disable remote management for now, and set the switch's
+ * DSA device number.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f);
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:2x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x02, 0xffff);
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /*
+ * Disable the loopback filter, disable flow control
+ * messages, disable flood broadcast override, disable
+ * removing of provider tags, disable ATU age violation
+ * interrupts, disable tag flow control, force flow
+ * control priority to the highest, and send all special
+ * multicast frames to the CPU at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /*
+ * Program the DSA routing table.
+ */
+ for (i = 0; i < 32; i++) {
+ int nexthop;
+
+ nexthop = 0x1f;
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /*
+ * Clear all trunk masks.
+ */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff);
+
+ /*
+ * Clear all trunk mappings.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /*
+ * Disable ingress rate limiting by resetting all ingress
+ * rate limit registers to their initial state.
+ */
+ for (i = 0; i < 6; i++)
+ REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8));
+
+ /*
+ * Initialise cross-chip port VLAN table to reset defaults.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000);
+
+ /*
+ * Clear the priority override table.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8));
+
+ /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p)
+{
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /*
+ * MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ REG_WRITE(addr, 0x01, 0x003e);
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /*
+ * Do not limit the period of time that this port can be
+ * paused for by the remote end or the period of time that
+ * this port can pause the remote end.
+ */
+ REG_WRITE(addr, 0x02, 0x0000);
+
+ /*
+ * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
+ * disable Header mode, enable IGMP/MLD snooping, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and IP
+ * priority fields (IP prio has precedence), and set STP state
+ * to Forwarding.
+ *
+ * If this is the CPU link, use DSA or EDSA tagging depending
+ * on which tagging mode was configured.
+ *
+ * If this is a link to another switch, use DSA tagging mode.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts and multicasts.
+ */
+ val = 0x0433;
+ if (dsa_is_cpu_port(ds, p)) {
+ if (ds->dst->tag_protocol == htons(ETH_P_EDSA))
+ val |= 0x3300;
+ else
+ val |= 0x0100;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ if (p == dsa_upstream_port(ds))
+ val |= 0x000c;
+ REG_WRITE(addr, 0x04, val);
+
+ /*
+ * Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /*
+ * Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /*
+ * Port Control 2: don't force a good FCS, set the maximum
+ * frame size to 10240 bytes, don't let the switch add or
+ * strip 802.1q tags, don't discard tagged or untagged frames
+ * on this port, do a destination address lookup on all
+ * received packets as usual, disable ARP mirroring and don't
+ * send a copy of all transmitted/received frames on this port
+ * to the CPU.
+ */
+ REG_WRITE(addr, 0x08, 0x2080);
+
+ /*
+ * Egress rate control: disable egress rate control.
+ */
+ REG_WRITE(addr, 0x09, 0x0001);
+
+ /*
+ * Egress rate control 2: disable egress rate control.
+ */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /*
+ * Port ATU control: disable limiting the number of address
+ * database entries that this port is allowed to use.
+ */
+ REG_WRITE(addr, 0x0c, 0x0000);
+
+ /*
+ * Priorit Override: disable DA, SA and VTU priority override.
+ */
+ REG_WRITE(addr, 0x0d, 0x0000);
+
+ /*
+ * Port Ethertype: use the Ethertype DSA Ethertype value.
+ */
+ REG_WRITE(addr, 0x0f, ETH_P_EDSA);
+
+ /*
+ * Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /*
+ * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+static int mv88e6123_61_65_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int i;
+ int ret;
+
+ mutex_init(&ps->smi_mutex);
+ mutex_init(&ps->stats_mutex);
+
+ ret = mv88e6123_61_65_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6123_61_65_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 6; i++) {
+ ret = mv88e6123_61_65_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6123_61_65_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 4)
+ return port;
+ return -1;
+}
+
+static int
+mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr = mv88e6123_61_65_port_to_phy_addr(port);
+ return mv88e6xxx_phy_read(ds, addr, regnum);
+}
+
+static int
+mv88e6123_61_65_phy_write(struct dsa_switch *ds,
+ int port, int regnum, u16 val)
+{
+ int addr = mv88e6123_61_65_port_to_phy_addr(port);
+ return mv88e6xxx_phy_write(ds, addr, regnum, val);
+}
+
+static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+};
+
+static void
+mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
+ mv88e6123_61_65_hw_stats, port, data);
+}
+
+static void
+mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds,
+ int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats),
+ mv88e6123_61_65_hw_stats, port, data);
+}
+
+static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6123_61_65_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6123_61_65_switch_driver = {
+ .tag_protocol = cpu_to_be16(ETH_P_EDSA),
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6123_61_65_probe,
+ .setup = mv88e6123_61_65_setup,
+ .set_addr = mv88e6xxx_set_addr_indirect,
+ .phy_read = mv88e6123_61_65_phy_read,
+ .phy_write = mv88e6123_61_65_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6123_61_65_get_strings,
+ .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats,
+ .get_sset_count = mv88e6123_61_65_get_sset_count,
+};
+
+MODULE_ALIAS("platform:mv88e6123");
+MODULE_ALIAS("platform:mv88e6161");
+MODULE_ALIAS("platform:mv88e6165");
diff --git a/drivers/net/dsa/mv88e6131.c b/drivers/net/dsa/mv88e6131.c
new file mode 100644
index 0000000..55888b0
--- /dev/null
+++ b/drivers/net/dsa/mv88e6131.c
@@ -0,0 +1,436 @@
+/*
+ * net/dsa/mv88e6131.c - Marvell 88e6095/6095f/6131 switch chip support
+ * Copyright (c) 2008-2009 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+/*
+ * Switch product IDs
+ */
+#define ID_6085 0x04a0
+#define ID_6095 0x0950
+#define ID_6131 0x1060
+
+static char *mv88e6131_probe(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+
+ ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
+ if (ret >= 0) {
+ ret &= 0xfff0;
+ if (ret == ID_6085)
+ return "Marvell 88E6085";
+ if (ret == ID_6095)
+ return "Marvell 88E6095/88E6095F";
+ if (ret == ID_6131)
+ return "Marvell 88E6131";
+ }
+
+ return NULL;
+}
+
+static int mv88e6131_switch_reset(struct dsa_switch *ds)
+{
+ int i;
+ int ret;
+
+ /*
+ * Set all ports to the disabled state.
+ */
+ for (i = 0; i < 11; i++) {
+ ret = REG_READ(REG_PORT(i), 0x04);
+ REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+ }
+
+ /*
+ * Wait for transmit queues to drain.
+ */
+ msleep(2);
+
+ /*
+ * Reset the switch.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
+
+ /*
+ * Wait up to one second for reset to complete.
+ */
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ if ((ret & 0xc800) == 0xc800)
+ break;
+
+ msleep(1);
+ }
+ if (i == 1000)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int mv88e6131_setup_global(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ /*
+ * Enable the PHY polling unit, don't discard packets with
+ * excessive collisions, use a weighted fair queueing scheme
+ * to arbitrate between packet queues, set the maximum frame
+ * size to 1632, and mask all interrupt sources.
+ */
+ REG_WRITE(REG_GLOBAL, 0x04, 0x4400);
+
+ /*
+ * Set the default address aging time to 5 minutes, and
+ * enable address learn messages to be sent to all message
+ * ports.
+ */
+ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148);
+
+ /*
+ * Configure the priority mapping registers.
+ */
+ ret = mv88e6xxx_config_prio(ds);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Set the VLAN ethertype to 0x8100.
+ */
+ REG_WRITE(REG_GLOBAL, 0x19, 0x8100);
+
+ /*
+ * Disable ARP mirroring, and configure the upstream port as
+ * the port to which ingress and egress monitor frames are to
+ * be sent.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1100) | 0x00f0);
+
+ /*
+ * Disable cascade port functionality unless this device
+ * is used in a cascade configuration, and set the switch's
+ * DSA device number.
+ */
+ if (ds->dst->pd->nr_chips > 1)
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xf000 | (ds->index & 0x1f));
+ else
+ REG_WRITE(REG_GLOBAL, 0x1c, 0xe000 | (ds->index & 0x1f));
+
+ /*
+ * Send all frames with destination addresses matching
+ * 01:80:c2:00:00:0x to the CPU port.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff);
+
+ /*
+ * Ignore removed tag data on doubly tagged packets, disable
+ * flow control messages, force flow control priority to the
+ * highest, and send all special multicast frames to the CPU
+ * port at the highest priority.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff);
+
+ /*
+ * Program the DSA routing table.
+ */
+ for (i = 0; i < 32; i++) {
+ int nexthop;
+
+ nexthop = 0x1f;
+ if (i != ds->index && i < ds->dst->pd->nr_chips)
+ nexthop = ds->pd->rtable[i] & 0x1f;
+
+ REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop);
+ }
+
+ /*
+ * Clear all trunk masks.
+ */
+ for (i = 0; i < 8; i++)
+ REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0x7ff);
+
+ /*
+ * Clear all trunk mappings.
+ */
+ for (i = 0; i < 16; i++)
+ REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11));
+
+ /*
+ * Force the priority of IGMP/MLD snoop frames and ARP frames
+ * to the highest setting.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0f, 0x00ff);
+
+ return 0;
+}
+
+static int mv88e6131_setup_port(struct dsa_switch *ds, int p)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int addr = REG_PORT(p);
+ u16 val;
+
+ /*
+ * MAC Forcing register: don't force link, speed, duplex
+ * or flow control state to any particular values on physical
+ * ports, but force the CPU port and all DSA ports to 1000 Mb/s
+ * (100 Mb/s on 6085) full duplex.
+ */
+ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p))
+ if (ps->id == ID_6085)
+ REG_WRITE(addr, 0x01, 0x003d); /* 100 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x003e); /* 1000 Mb/s */
+ else
+ REG_WRITE(addr, 0x01, 0x0003);
+
+ /*
+ * Port Control: disable Core Tag, disable Drop-on-Lock,
+ * transmit frames unmodified, disable Header mode,
+ * enable IGMP/MLD snoop, disable DoubleTag, disable VLAN
+ * tunneling, determine priority by looking at 802.1p and
+ * IP priority fields (IP prio has precedence), and set STP
+ * state to Forwarding.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown unicasts, and enable DSA tagging
+ * mode.
+ *
+ * If this is the link to another switch, use DSA tagging
+ * mode, but do not enable forwarding of unknown unicasts.
+ */
+ val = 0x0433;
+ if (p == dsa_upstream_port(ds)) {
+ val |= 0x0104;
+ /*
+ * On 6085, unknown multicast forward is controlled
+ * here rather than in Port Control 2 register.
+ */
+ if (ps->id == ID_6085)
+ val |= 0x0008;
+ }
+ if (ds->dsa_port_mask & (1 << p))
+ val |= 0x0100;
+ REG_WRITE(addr, 0x04, val);
+
+ /*
+ * Port Control 1: disable trunking. Also, if this is the
+ * CPU port, enable learn messages to be sent to this port.
+ */
+ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000);
+
+ /*
+ * Port based VLAN map: give each port its own address
+ * database, allow the CPU port to talk to each of the 'real'
+ * ports, and allow each of the 'real' ports to only talk to
+ * the upstream port.
+ */
+ val = (p & 0xf) << 12;
+ if (dsa_is_cpu_port(ds, p))
+ val |= ds->phys_port_mask;
+ else
+ val |= 1 << dsa_upstream_port(ds);
+ REG_WRITE(addr, 0x06, val);
+
+ /*
+ * Default VLAN ID and priority: don't set a default VLAN
+ * ID, and set the default packet priority to zero.
+ */
+ REG_WRITE(addr, 0x07, 0x0000);
+
+ /*
+ * Port Control 2: don't force a good FCS, don't use
+ * VLAN-based, source address-based or destination
+ * address-based priority overrides, don't let the switch
+ * add or strip 802.1q tags, don't discard tagged or
+ * untagged frames on this port, do a destination address
+ * lookup on received packets as usual, don't send a copy
+ * of all transmitted/received frames on this port to the
+ * CPU, and configure the upstream port number.
+ *
+ * If this is the upstream port for this switch, enable
+ * forwarding of unknown multicast addresses.
+ */
+ if (ps->id == ID_6085)
+ /*
+ * on 6085, bits 3:0 are reserved, bit 6 control ARP
+ * mirroring, and multicast forward is handled in
+ * Port Control register.
+ */
+ REG_WRITE(addr, 0x08, 0x0080);
+ else {
+ val = 0x0080 | dsa_upstream_port(ds);
+ if (p == dsa_upstream_port(ds))
+ val |= 0x0040;
+ REG_WRITE(addr, 0x08, val);
+ }
+
+ /*
+ * Rate Control: disable ingress rate limiting.
+ */
+ REG_WRITE(addr, 0x09, 0x0000);
+
+ /*
+ * Rate Control 2: disable egress rate limiting.
+ */
+ REG_WRITE(addr, 0x0a, 0x0000);
+
+ /*
+ * Port Association Vector: when learning source addresses
+ * of packets, add the address to the address database using
+ * a port bitmap that has only the bit for this port set and
+ * the other bits clear.
+ */
+ REG_WRITE(addr, 0x0b, 1 << p);
+
+ /*
+ * Tag Remap: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x18, 0x3210);
+
+ /*
+ * Tag Remap 2: use an identity 802.1p prio -> switch prio
+ * mapping.
+ */
+ REG_WRITE(addr, 0x19, 0x7654);
+
+ return 0;
+}
+
+static int mv88e6131_setup(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int i;
+ int ret;
+
+ mutex_init(&ps->smi_mutex);
+ mv88e6xxx_ppu_state_init(ds);
+ mutex_init(&ps->stats_mutex);
+
+ ps->id = REG_READ(REG_PORT(0), 0x03) & 0xfff0;
+
+ ret = mv88e6131_switch_reset(ds);
+ if (ret < 0)
+ return ret;
+
+ /* @@@ initialise vtu and atu */
+
+ ret = mv88e6131_setup_global(ds);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < 11; i++) {
+ ret = mv88e6131_setup_port(ds, i);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mv88e6131_port_to_phy_addr(int port)
+{
+ if (port >= 0 && port <= 11)
+ return port;
+ return -1;
+}
+
+static int
+mv88e6131_phy_read(struct dsa_switch *ds, int port, int regnum)
+{
+ int addr = mv88e6131_port_to_phy_addr(port);
+ return mv88e6xxx_phy_read_ppu(ds, addr, regnum);
+}
+
+static int
+mv88e6131_phy_write(struct dsa_switch *ds,
+ int port, int regnum, u16 val)
+{
+ int addr = mv88e6131_port_to_phy_addr(port);
+ return mv88e6xxx_phy_write_ppu(ds, addr, regnum, val);
+}
+
+static struct mv88e6xxx_hw_stat mv88e6131_hw_stats[] = {
+ { "in_good_octets", 8, 0x00, },
+ { "in_bad_octets", 4, 0x02, },
+ { "in_unicast", 4, 0x04, },
+ { "in_broadcasts", 4, 0x06, },
+ { "in_multicasts", 4, 0x07, },
+ { "in_pause", 4, 0x16, },
+ { "in_undersize", 4, 0x18, },
+ { "in_fragments", 4, 0x19, },
+ { "in_oversize", 4, 0x1a, },
+ { "in_jabber", 4, 0x1b, },
+ { "in_rx_error", 4, 0x1c, },
+ { "in_fcs_error", 4, 0x1d, },
+ { "out_octets", 8, 0x0e, },
+ { "out_unicast", 4, 0x10, },
+ { "out_broadcasts", 4, 0x13, },
+ { "out_multicasts", 4, 0x12, },
+ { "out_pause", 4, 0x15, },
+ { "excessive", 4, 0x11, },
+ { "collisions", 4, 0x1e, },
+ { "deferred", 4, 0x05, },
+ { "single", 4, 0x14, },
+ { "multiple", 4, 0x17, },
+ { "out_fcs_error", 4, 0x03, },
+ { "late", 4, 0x1f, },
+ { "hist_64bytes", 4, 0x08, },
+ { "hist_65_127bytes", 4, 0x09, },
+ { "hist_128_255bytes", 4, 0x0a, },
+ { "hist_256_511bytes", 4, 0x0b, },
+ { "hist_512_1023bytes", 4, 0x0c, },
+ { "hist_1024_max_bytes", 4, 0x0d, },
+};
+
+static void
+mv88e6131_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
+{
+ mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6131_hw_stats),
+ mv88e6131_hw_stats, port, data);
+}
+
+static void
+mv88e6131_get_ethtool_stats(struct dsa_switch *ds,
+ int port, uint64_t *data)
+{
+ mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6131_hw_stats),
+ mv88e6131_hw_stats, port, data);
+}
+
+static int mv88e6131_get_sset_count(struct dsa_switch *ds)
+{
+ return ARRAY_SIZE(mv88e6131_hw_stats);
+}
+
+struct dsa_switch_driver mv88e6131_switch_driver = {
+ .tag_protocol = cpu_to_be16(ETH_P_DSA),
+ .priv_size = sizeof(struct mv88e6xxx_priv_state),
+ .probe = mv88e6131_probe,
+ .setup = mv88e6131_setup,
+ .set_addr = mv88e6xxx_set_addr_direct,
+ .phy_read = mv88e6131_phy_read,
+ .phy_write = mv88e6131_phy_write,
+ .poll_link = mv88e6xxx_poll_link,
+ .get_strings = mv88e6131_get_strings,
+ .get_ethtool_stats = mv88e6131_get_ethtool_stats,
+ .get_sset_count = mv88e6131_get_sset_count,
+};
+
+MODULE_ALIAS("platform:mv88e6085");
+MODULE_ALIAS("platform:mv88e6095");
+MODULE_ALIAS("platform:mv88e6095f");
+MODULE_ALIAS("platform:mv88e6131");
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
new file mode 100644
index 0000000..a2c62c2
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -0,0 +1,550 @@
+/*
+ * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <net/dsa.h>
+#include "mv88e6xxx.h"
+
+/*
+ * If the switch's ADDR[4:0] strap pins are strapped to zero, it will
+ * use all 32 SMI bus addresses on its SMI bus, and all switch registers
+ * will be directly accessible on some {device address,register address}
+ * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
+ * will only respond to SMI transactions to that specific address, and
+ * an indirect addressing mechanism needs to be used to access its
+ * registers.
+ */
+static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ ret = mdiobus_read(bus, sw_addr, 0);
+ if (ret < 0)
+ return ret;
+
+ if ((ret & 0x8000) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
+{
+ int ret;
+
+ if (sw_addr == 0)
+ return mdiobus_read(bus, addr, reg);
+
+ /*
+ * Wait for the bus to become free.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the read command.
+ */
+ ret = mdiobus_write(bus, sw_addr, 0, 0x9800 | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for the read command to complete.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Read the data.
+ */
+ ret = mdiobus_read(bus, sw_addr, 1);
+ if (ret < 0)
+ return ret;
+
+ return ret & 0xffff;
+}
+
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_reg_read(ds->master_mii_bus,
+ ds->pd->sw_addr, addr, reg);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
+ int reg, u16 val)
+{
+ int ret;
+
+ if (sw_addr == 0)
+ return mdiobus_write(bus, addr, reg, val);
+
+ /*
+ * Wait for the bus to become free.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the data to write.
+ */
+ ret = mdiobus_write(bus, sw_addr, 1, val);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Transmit the write command.
+ */
+ ret = mdiobus_write(bus, sw_addr, 0, 0x9400 | (addr << 5) | reg);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Wait for the write command to complete.
+ */
+ ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->smi_mutex);
+ ret = __mv88e6xxx_reg_write(ds->master_mii_bus,
+ ds->pd->sw_addr, addr, reg, val);
+ mutex_unlock(&ps->smi_mutex);
+
+ return ret;
+}
+
+int mv88e6xxx_config_prio(struct dsa_switch *ds)
+{
+ /*
+ * Configure the IP ToS mapping registers.
+ */
+ REG_WRITE(REG_GLOBAL, 0x10, 0x0000);
+ REG_WRITE(REG_GLOBAL, 0x11, 0x0000);
+ REG_WRITE(REG_GLOBAL, 0x12, 0x5555);
+ REG_WRITE(REG_GLOBAL, 0x13, 0x5555);
+ REG_WRITE(REG_GLOBAL, 0x14, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, 0x15, 0xaaaa);
+ REG_WRITE(REG_GLOBAL, 0x16, 0xffff);
+ REG_WRITE(REG_GLOBAL, 0x17, 0xffff);
+
+ /*
+ * Configure the IEEE 802.1p priority mapping register.
+ */
+ REG_WRITE(REG_GLOBAL, 0x18, 0xfa41);
+
+ return 0;
+}
+
+int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
+{
+ REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
+ REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
+ REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+
+ return 0;
+}
+
+int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < 6; i++) {
+ int j;
+
+ /*
+ * Write the MAC address byte.
+ */
+ REG_WRITE(REG_GLOBAL2, 0x0d, 0x8000 | (i << 8) | addr[i]);
+
+ /*
+ * Wait for the write to complete.
+ */
+ for (j = 0; j < 16; j++) {
+ ret = REG_READ(REG_GLOBAL2, 0x0d);
+ if ((ret & 0x8000) == 0)
+ break;
+ }
+ if (j == 16)
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
+{
+ if (addr >= 0)
+ return mv88e6xxx_reg_read(ds, addr, regnum);
+ return 0xffff;
+}
+
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val)
+{
+ if (addr >= 0)
+ return mv88e6xxx_reg_write(ds, addr, regnum, val);
+ return 0;
+}
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ ret = REG_READ(REG_GLOBAL, 0x04);
+ REG_WRITE(REG_GLOBAL, 0x04, ret & ~0x4000);
+
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ msleep(1);
+ if ((ret & 0xc000) != 0xc000)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ ret = REG_READ(REG_GLOBAL, 0x04);
+ REG_WRITE(REG_GLOBAL, 0x04, ret | 0x4000);
+
+ for (i = 0; i < 1000; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x00);
+ msleep(1);
+ if ((ret & 0xc000) == 0xc000)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
+{
+ struct mv88e6xxx_priv_state *ps;
+
+ ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
+ if (mutex_trylock(&ps->ppu_mutex)) {
+ struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
+
+ if (mv88e6xxx_ppu_enable(ds) == 0)
+ ps->ppu_disabled = 0;
+ mutex_unlock(&ps->ppu_mutex);
+ }
+}
+
+static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)_ps;
+
+ schedule_work(&ps->ppu_work);
+}
+
+static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+
+ mutex_lock(&ps->ppu_mutex);
+
+ /*
+ * If the PHY polling unit is enabled, disable it so that
+ * we can access the PHY registers. If it was already
+ * disabled, cancel the timer that is going to re-enable
+ * it.
+ */
+ if (!ps->ppu_disabled) {
+ ret = mv88e6xxx_ppu_disable(ds);
+ if (ret < 0) {
+ mutex_unlock(&ps->ppu_mutex);
+ return ret;
+ }
+ ps->ppu_disabled = 1;
+ } else {
+ del_timer(&ps->ppu_timer);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+
+ /*
+ * Schedule a timer to re-enable the PHY polling unit.
+ */
+ mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
+ mutex_unlock(&ps->ppu_mutex);
+}
+
+void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+
+ mutex_init(&ps->ppu_mutex);
+ INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
+ init_timer(&ps->ppu_timer);
+ ps->ppu_timer.data = (unsigned long)ps;
+ ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
+}
+
+int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(ds);
+ if (ret >= 0) {
+ ret = mv88e6xxx_reg_read(ds, addr, regnum);
+ mv88e6xxx_ppu_access_put(ds);
+ }
+
+ return ret;
+}
+
+int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
+ int regnum, u16 val)
+{
+ int ret;
+
+ ret = mv88e6xxx_ppu_access_get(ds);
+ if (ret >= 0) {
+ ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
+ mv88e6xxx_ppu_access_put(ds);
+ }
+
+ return ret;
+}
+#endif
+
+void mv88e6xxx_poll_link(struct dsa_switch *ds)
+{
+ int i;
+
+ for (i = 0; i < DSA_MAX_PORTS; i++) {
+ struct net_device *dev;
+ int uninitialized_var(port_status);
+ int link;
+ int speed;
+ int duplex;
+ int fc;
+
+ dev = ds->ports[i];
+ if (dev == NULL)
+ continue;
+
+ link = 0;
+ if (dev->flags & IFF_UP) {
+ port_status = mv88e6xxx_reg_read(ds, REG_PORT(i), 0x00);
+ if (port_status < 0)
+ continue;
+
+ link = !!(port_status & 0x0800);
+ }
+
+ if (!link) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_carrier_off(dev);
+ }
+ continue;
+ }
+
+ switch (port_status & 0x0300) {
+ case 0x0000:
+ speed = 10;
+ break;
+ case 0x0100:
+ speed = 100;
+ break;
+ case 0x0200:
+ speed = 1000;
+ break;
+ default:
+ speed = -1;
+ break;
+ }
+ duplex = (port_status & 0x0400) ? 1 : 0;
+ fc = (port_status & 0x8000) ? 1 : 0;
+
+ if (!netif_carrier_ok(dev)) {
+ printk(KERN_INFO "%s: link up, %d Mb/s, %s duplex, "
+ "flow control %sabled\n", dev->name,
+ speed, duplex ? "full" : "half",
+ fc ? "en" : "dis");
+ netif_carrier_on(dev);
+ }
+ }
+}
+
+static int mv88e6xxx_stats_wait(struct dsa_switch *ds)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < 10; i++) {
+ ret = REG_READ(REG_GLOBAL, 0x1d);
+ if ((ret & 0x8000) == 0)
+ return 0;
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
+{
+ int ret;
+
+ /*
+ * Snapshot the hardware statistics counters for this port.
+ */
+ REG_WRITE(REG_GLOBAL, 0x1d, 0xdc00 | port);
+
+ /*
+ * Wait for the snapshotting to complete.
+ */
+ ret = mv88e6xxx_stats_wait(ds);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
+{
+ u32 _val;
+ int ret;
+
+ *val = 0;
+
+ ret = mv88e6xxx_reg_write(ds, REG_GLOBAL, 0x1d, 0xcc00 | stat);
+ if (ret < 0)
+ return;
+
+ ret = mv88e6xxx_stats_wait(ds);
+ if (ret < 0)
+ return;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1e);
+ if (ret < 0)
+ return;
+
+ _val = ret << 16;
+
+ ret = mv88e6xxx_reg_read(ds, REG_GLOBAL, 0x1f);
+ if (ret < 0)
+ return;
+
+ *val = _val | ret;
+}
+
+void mv88e6xxx_get_strings(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint8_t *data)
+{
+ int i;
+
+ for (i = 0; i < nr_stats; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ stats[i].string, ETH_GSTRING_LEN);
+ }
+}
+
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint64_t *data)
+{
+ struct mv88e6xxx_priv_state *ps = (void *)(ds + 1);
+ int ret;
+ int i;
+
+ mutex_lock(&ps->stats_mutex);
+
+ ret = mv88e6xxx_stats_snapshot(ds, port);
+ if (ret < 0) {
+ mutex_unlock(&ps->stats_mutex);
+ return;
+ }
+
+ /*
+ * Read each of the counters.
+ */
+ for (i = 0; i < nr_stats; i++) {
+ struct mv88e6xxx_hw_stat *s = stats + i;
+ u32 low;
+ u32 high;
+
+ mv88e6xxx_stats_read(ds, s->reg, &low);
+ if (s->sizeof_stat == 8)
+ mv88e6xxx_stats_read(ds, s->reg + 1, &high);
+ else
+ high = 0;
+
+ data[i] = (((u64)high) << 32) | low;
+ }
+
+ mutex_unlock(&ps->stats_mutex);
+}
+
+static int __init mv88e6xxx_init(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ register_switch_driver(&mv88e6131_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ register_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+ return 0;
+}
+module_init(mv88e6xxx_init);
+
+static void __exit mv88e6xxx_cleanup(void)
+{
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
+ unregister_switch_driver(&mv88e6123_61_65_switch_driver);
+#endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
+ unregister_switch_driver(&mv88e6131_switch_driver);
+#endif
+}
+module_exit(mv88e6xxx_cleanup);
+
+MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
+MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/dsa/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx.h
new file mode 100644
index 0000000..fc2cd7b
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx.h
@@ -0,0 +1,98 @@
+/*
+ * net/dsa/mv88e6xxx.h - Marvell 88e6xxx switch chip support
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MV88E6XXX_H
+#define __MV88E6XXX_H
+
+#define REG_PORT(p) (0x10 + (p))
+#define REG_GLOBAL 0x1b
+#define REG_GLOBAL2 0x1c
+
+struct mv88e6xxx_priv_state {
+ /*
+ * When using multi-chip addressing, this mutex protects
+ * access to the indirect access registers. (In single-chip
+ * mode, this mutex is effectively useless.)
+ */
+ struct mutex smi_mutex;
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
+ /*
+ * Handles automatic disabling and re-enabling of the PHY
+ * polling unit.
+ */
+ struct mutex ppu_mutex;
+ int ppu_disabled;
+ struct work_struct ppu_work;
+ struct timer_list ppu_timer;
+#endif
+
+ /*
+ * This mutex serialises access to the statistics unit.
+ * Hold this mutex over snapshot + dump sequences.
+ */
+ struct mutex stats_mutex;
+
+ int id; /* switch product id */
+};
+
+struct mv88e6xxx_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int reg;
+};
+
+int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg);
+int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg);
+int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
+ int reg, u16 val);
+int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val);
+int mv88e6xxx_config_prio(struct dsa_switch *ds);
+int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr);
+int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr);
+int mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum, u16 val);
+void mv88e6xxx_ppu_state_init(struct dsa_switch *ds);
+int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum);
+int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
+ int regnum, u16 val);
+void mv88e6xxx_poll_link(struct dsa_switch *ds);
+void mv88e6xxx_get_strings(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint8_t *data);
+void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
+ int nr_stats, struct mv88e6xxx_hw_stat *stats,
+ int port, uint64_t *data);
+
+extern struct dsa_switch_driver mv88e6131_switch_driver;
+extern struct dsa_switch_driver mv88e6123_61_65_switch_driver;
+
+#define REG_READ(addr, reg) \
+ ({ \
+ int __ret; \
+ \
+ __ret = mv88e6xxx_reg_read(ds, addr, reg); \
+ if (__ret < 0) \
+ return __ret; \
+ __ret; \
+ })
+
+#define REG_WRITE(addr, reg, val) \
+ ({ \
+ int __ret; \
+ \
+ __ret = mv88e6xxx_reg_write(ds, addr, reg, val); \
+ if (__ret < 0) \
+ return __ret; \
+ })
+
+
+
+#endif
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index a7c5e88..087648e 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -134,7 +134,7 @@ static void dummy_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;
dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO;
- dev->features |= NETIF_F_NO_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
+ dev->features |= NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_LLTX;
random_ether_addr(dev->dev_addr);
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 972f80e..da410f0 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -468,9 +468,10 @@ static void tc589_reset(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index b42c06b..f9b74c0 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data)
ok = 1;
}
- if (!netif_carrier_ok(dev))
+ if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
next_tick = 5*HZ;
if (vp->medialock)
@@ -2929,15 +2929,17 @@ static void vortex_get_drvinfo(struct net_device *dev,
{
struct vortex_private *vp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
if (VORTEX_PCI(vp)) {
- strcpy(info->bus_info, pci_name(VORTEX_PCI(vp)));
+ strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
+ sizeof(info->bus_info));
} else {
if (VORTEX_EISA(vp))
- strcpy(info->bus_info, dev_name(vp->gendev));
+ strlcpy(info->bus_info, dev_name(vp->gendev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "EISA 0x%lx %d",
- dev->base_addr, dev->irq);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "EISA 0x%lx %d", dev->base_addr, dev->irq);
}
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 20ea075..6d6bc75 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -988,21 +988,23 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
smp_rmb();
if(tp->card_state == Sleeping) {
- strcpy(info->fw_version, "Sleep image");
+ strlcpy(info->fw_version, "Sleep image",
+ sizeof(info->fw_version));
} else {
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- strcpy(info->fw_version, "Unknown runtime");
+ strlcpy(info->fw_version, "Unknown runtime",
+ sizeof(info->fw_version));
} else {
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
- snprintf(info->fw_version, 32, "%02x.%03x.%03x",
- sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
- sleep_ver & 0xfff);
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%02x.%03x.%03x", sleep_ver >> 24,
+ (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff);
}
}
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int
diff --git a/drivers/net/ethernet/8390/8390.h b/drivers/net/ethernet/8390/8390.h
index 58a12e4..ef325ff 100644
--- a/drivers/net/ethernet/8390/8390.h
+++ b/drivers/net/ethernet/8390/8390.h
@@ -14,8 +14,6 @@
#define TX_PAGES 12 /* Two Tx slots */
-#define ETHER_ADDR_LEN 6
-
/* The 8390 specific per-packet-header format. */
struct e8390_pkt_hdr {
unsigned char status; /* status */
diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c
index 5477373..3ad5d2f 100644
--- a/drivers/net/ethernet/8390/apne.c
+++ b/drivers/net/ethernet/8390/apne.c
@@ -318,7 +318,7 @@ static int __init apne_probe1(struct net_device *dev, int ioaddr)
i = request_irq(dev->irq, apne_interrupt, IRQF_SHARED, DRV_NAME, dev);
if (i) return i;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index e9f8432..0f92e35 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -623,7 +623,8 @@ static int ax_mii_init(struct net_device *dev)
ax->mii_bus->name = "ax88796_mii_bus";
ax->mii_bus->parent = dev->dev.parent;
- snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!ax->mii_bus->irq) {
@@ -735,15 +736,14 @@ static int ax_init_dev(struct net_device *dev)
if (ax->plat->flags & AXFLG_MAC_FROMDEV) {
ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ei_local->mem + E8390_CMD); /* 0x61 */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] =
ei_inb(ioaddr + EN1_PHYS_SHIFT(i));
}
if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) &&
ax->plat->mac_addr)
- memcpy(dev->dev_addr, ax->plat->mac_addr,
- ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, ax->plat->mac_addr, ETH_ALEN);
ax_reset_8390(dev);
@@ -991,18 +991,7 @@ static struct platform_driver axdrv = {
.resume = ax_resume,
};
-static int __init axdrv_init(void)
-{
- return platform_driver_register(&axdrv);
-}
-
-static void __exit axdrv_exit(void)
-{
- platform_driver_unregister(&axdrv);
-}
-
-module_init(axdrv_init);
-module_exit(axdrv_exit);
+module_platform_driver(axdrv);
MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver");
MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
diff --git a/drivers/net/ethernet/8390/es3210.c b/drivers/net/ethernet/8390/es3210.c
index 7a09575..6428f9e 100644
--- a/drivers/net/ethernet/8390/es3210.c
+++ b/drivers/net/ethernet/8390/es3210.c
@@ -195,7 +195,7 @@ static int __init es_probe1(struct net_device *dev, int ioaddr)
goto out;
}
- for (i = 0; i < ETHER_ADDR_LEN ; i++)
+ for (i = 0; i < ETH_ALEN ; i++)
dev->dev_addr[i] = inb(ioaddr + ES_SA_PROM + i);
/* Check the Racal vendor ID as well. */
diff --git a/drivers/net/ethernet/8390/hp-plus.c b/drivers/net/ethernet/8390/hp-plus.c
index eeac843..d42938b 100644
--- a/drivers/net/ethernet/8390/hp-plus.c
+++ b/drivers/net/ethernet/8390/hp-plus.c
@@ -202,7 +202,7 @@ static int __init hpp_probe1(struct net_device *dev, int ioaddr)
/* Retrieve and checksum the station address. */
outw(MAC_Page, ioaddr + HP_PAGING);
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for(i = 0; i < ETH_ALEN; i++) {
unsigned char inval = inb(ioaddr + 8 + i);
dev->dev_addr[i] = inval;
checksum += inval;
diff --git a/drivers/net/ethernet/8390/hp.c b/drivers/net/ethernet/8390/hp.c
index 18564d4..113f1e0 100644
--- a/drivers/net/ethernet/8390/hp.c
+++ b/drivers/net/ethernet/8390/hp.c
@@ -156,7 +156,7 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for(i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + i);
printk(" %pM", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 3dac937..5370c88 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -129,7 +129,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
if (!dev)
return -ENOMEM;
- for(j = 0; j < ETHER_ADDR_LEN; j++)
+ for (j = 0; j < ETH_ALEN; j++)
dev->dev_addr[j] = *((u8 *)(board + HYDRA_ADDRPROM + 2*j));
/* We must set the 8390 for word mode. */
diff --git a/drivers/net/ethernet/8390/lne390.c b/drivers/net/ethernet/8390/lne390.c
index f9888d2..69490ae 100644
--- a/drivers/net/ethernet/8390/lne390.c
+++ b/drivers/net/ethernet/8390/lne390.c
@@ -191,14 +191,14 @@ static int __init lne390_probe1(struct net_device *dev, int ioaddr)
|| inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1
|| inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) {
printk("lne390.c: card not found");
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i));
printk(" (invalid prefix).\n");
return -ENODEV;
}
#endif
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i);
printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n",
0xa+revision, ioaddr/0x1000, dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne-h8300.c b/drivers/net/ethernet/8390/ne-h8300.c
index cd36a6a..9b9c77d 100644
--- a/drivers/net/ethernet/8390/ne-h8300.c
+++ b/drivers/net/ethernet/8390/ne-h8300.c
@@ -312,7 +312,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
dev->base_addr = ioaddr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 1063093..f92ea2a 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -503,12 +503,12 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
#ifdef CONFIG_PLAT_MAPPI
outb_p(E8390_NODMA + E8390_PAGE1 + E8390_STOP,
ioaddr + E8390_CMD); /* 0x61 */
- for (i = 0 ; i < ETHER_ADDR_LEN ; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i]
= inb_p(ioaddr + EN1_PHYS_SHIFT(i));
}
#else
- for(i = 0; i < ETHER_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
dev->dev_addr[i] = SA_prom[i];
}
#endif
diff --git a/drivers/net/ethernet/8390/ne2.c b/drivers/net/ethernet/8390/ne2.c
index 70cdc69..922b320 100644
--- a/drivers/net/ethernet/8390/ne2.c
+++ b/drivers/net/ethernet/8390/ne2.c
@@ -460,7 +460,7 @@ static int __init ne2_probe1(struct net_device *dev, int slot)
dev->base_addr = base_addr;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
printk(" %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 3992342..3fab04a 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -639,9 +639,9 @@ static void ne2k_pci_get_drvinfo(struct net_device *dev,
struct ei_device *ei = netdev_priv(dev);
struct pci_dev *pci_dev = (struct pci_dev *) ei->priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static const struct ethtool_ops ne2k_pci_ethtool_ops = {
diff --git a/drivers/net/ethernet/8390/ne3210.c b/drivers/net/ethernet/8390/ne3210.c
index 243ed2a..2a3e805 100644
--- a/drivers/net/ethernet/8390/ne3210.c
+++ b/drivers/net/ethernet/8390/ne3210.c
@@ -125,7 +125,7 @@ static int __init ne3210_eisa_probe (struct device *device)
#endif
port_index = inb(ioaddr + NE3210_CFG2) >> 6;
- for(i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = inb(ioaddr + NE3210_SA_PROM + i);
printk("ne3210.c: NE3210 in EISA slot %d, media: %s, addr: %pM.\n",
edev->slot, ifmap[port_index], dev->dev_addr);
diff --git a/drivers/net/ethernet/8390/stnic.c b/drivers/net/ethernet/8390/stnic.c
index d85f0a8..3b90375 100644
--- a/drivers/net/ethernet/8390/stnic.c
+++ b/drivers/net/ethernet/8390/stnic.c
@@ -114,7 +114,7 @@ static int __init stnic_probe(void)
#ifdef CONFIG_SH_STANDARD_BIOS
sh_bios_get_node_addr (stnic_eadr);
#endif
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = stnic_eadr[i];
/* Set the base address to point to the NIC, not the "real" base! */
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 3aa9fe9..bcd2732 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -365,7 +365,7 @@ static int __devinit zorro8390_init(struct net_device *dev,
if (i)
return i;
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = SA_prom[i];
pr_debug("Found ethernet address: %pM\n", dev->dev_addr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 597f4d4..3474a61 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -28,6 +28,7 @@ source "drivers/net/ethernet/cadence/Kconfig"
source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
+source "drivers/net/ethernet/calxeda/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index be5dde0..08d5f03 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -10,10 +10,11 @@ obj-$(CONFIG_NET_VENDOR_ALTEON) += alteon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
-obj-$(CONFIG_NET_ATMEL) += cadence/
+obj-$(CONFIG_NET_CADENCE) += cadence/
obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 6d9f691..cb4f38a 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -607,7 +607,7 @@ static const struct ethtool_ops ethtool_ops;
#ifdef VLAN_SUPPORT
-static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -617,9 +617,11 @@ static void netdev_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
-static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct netdev_private *np = netdev_priv(dev);
@@ -629,6 +631,8 @@ static void netdev_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, np->active_vlans);
set_rx_mode(dev);
spin_unlock(&np->lock);
+
+ return 0;
}
#endif /* VLAN_SUPPORT */
@@ -1842,9 +1846,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index b6d69c9..d812a10 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1670,7 +1670,8 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
miibus->name = "bfin_mii_bus";
miibus->phy_mask = mii_bus_pd->phy_mask;
- snprintf(miibus->id, MII_BUS_ID_SIZE, "0");
+ snprintf(miibus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
miibus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!miibus->irq)
goto out_err_irq_alloc;
diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c
index 442fefa..c885aa9 100644
--- a/drivers/net/ethernet/aeroflex/greth.c
+++ b/drivers/net/ethernet/aeroflex/greth.c
@@ -1623,18 +1623,7 @@ static struct platform_driver greth_of_driver = {
.remove = __devexit_p(greth_of_remove),
};
-static int __init greth_init(void)
-{
- return platform_driver_register(&greth_of_driver);
-}
-
-static void __exit greth_cleanup(void)
-{
- platform_driver_unregister(&greth_of_driver);
-}
-
-module_init(greth_init);
-module_exit(greth_cleanup);
+module_platform_driver(greth_of_driver);
MODULE_AUTHOR("Aeroflex Gaisler AB.");
MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index a9745f4..33e0a8c 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -499,7 +499,7 @@ static int amd8111e_restart(struct net_device *dev)
writel( VAL0 | APAD_XMT | REX_RTRY, mmio + CMD2 );
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], mmio + PADR + i );
/* Enable interrupt coalesce */
@@ -1412,10 +1412,11 @@ static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo
{
struct amd8111e_priv *lp = netdev_priv(dev);
struct pci_dev *pci_dev = lp->pci_dev;
- strcpy (info->driver, MODULE_NAME);
- strcpy (info->version, MODULE_VERS);
- sprintf(info->fw_version,"%u",chip_version);
- strcpy (info->bus_info, pci_name(pci_dev));
+ strlcpy(info->driver, MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, MODULE_VERS, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u", chip_version);
+ strlcpy(info->bus_info, pci_name(pci_dev), sizeof(info->bus_info));
}
static int amd8111e_get_regs_len(struct net_device *dev)
@@ -1549,7 +1550,7 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
spin_lock_irq(&lp->lock);
/* Setting the MAC address to the device */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb( dev->dev_addr[i], lp->mmio + PADR + i );
spin_unlock_irq(&lp->lock);
@@ -1885,7 +1886,7 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
}
/* Initializing MAC address */
- for(i = 0; i < ETH_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = readb(lp->mmio + PADR + i);
/* Setting user defined parametrs */
diff --git a/drivers/net/ethernet/amd/amd8111e.h b/drivers/net/ethernet/amd/amd8111e.h
index 2ff2e7a..8baa352 100644
--- a/drivers/net/ethernet/amd/amd8111e.h
+++ b/drivers/net/ethernet/amd/amd8111e.h
@@ -586,7 +586,6 @@ typedef enum {
#define PKT_BUFF_SZ 1536
#define MIN_PKT_LEN 60
-#define ETH_ADDR_LEN 6
#define AMD8111E_TX_TIMEOUT (3 * HZ)/* 3 sec */
#define SOFT_TIMER_FREQ 0xBEBC /* 0.5 sec */
@@ -808,8 +807,8 @@ typedef enum {
static int card_idx;
static int speed_duplex[MAX_UNITS] = { 0, };
-static int coalesce[MAX_UNITS] = {1,1,1,1,1,1,1,1};
-static int dynamic_ipg[MAX_UNITS] = {0,0,0,0,0,0,0,0};
+static bool coalesce[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = true };
+static bool dynamic_ipg[MAX_UNITS] = { [ 0 ... MAX_UNITS-1] = false };
static unsigned int chip_version;
#endif /* _AMD8111E_H */
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index 4865ff1..8b95dd3 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1171,7 +1171,8 @@ static int __devinit au1000_probe(struct platform_device *pdev)
aup->mii_bus->write = au1000_mdiobus_write;
aup->mii_bus->reset = au1000_mdiobus_reset;
aup->mii_bus->name = "au1000_eth_mii";
- snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
+ snprintf(aup->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, aup->mac_id);
aup->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (aup->mii_bus->irq == NULL)
goto err_out;
@@ -1339,18 +1340,7 @@ static struct platform_driver au1000_eth_driver = {
.owner = THIS_MODULE,
},
};
-MODULE_ALIAS("platform:au1000-eth");
-
-
-static int __init au1000_init_module(void)
-{
- return platform_driver_register(&au1000_eth_driver);
-}
-static void __exit au1000_exit_module(void)
-{
- platform_driver_unregister(&au1000_eth_driver);
-}
+module_platform_driver(au1000_eth_driver);
-module_init(au1000_init_module);
-module_exit(au1000_exit_module);
+MODULE_ALIAS("platform:au1000-eth");
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 3accd5d..6be0dd6 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -160,8 +160,6 @@ Include Files
Defines
---------------------------------------------------------------------------- */
-#define ETHER_ADDR_LEN ETH_ALEN
- /* 6 bytes in an Ethernet Address */
#define MACE_LADRF_LEN 8
/* 8 bytes in Logical Address Filter */
@@ -600,7 +598,7 @@ static int mace_init(mace_private *lp, unsigned int ioaddr, char *enet_addr)
}
}
/* Set PADR register */
- for (i = 0; i < ETHER_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
mace_write(lp, ioaddr, MACE_PADR, enet_addr[i]);
/* MAC Configuration Control Register should be written last */
@@ -639,11 +637,11 @@ static int nmclan_config(struct pcmcia_device *link)
/* Read the ethernet address from the CIS. */
len = pcmcia_get_tuple(link, 0x80, &buf);
- if (!buf || len < ETHER_ADDR_LEN) {
+ if (!buf || len < ETH_ALEN) {
kfree(buf);
goto failed;
}
- memcpy(dev->dev_addr, buf, ETHER_ADDR_LEN);
+ memcpy(dev->dev_addr, buf, ETH_ALEN);
kfree(buf);
/* Verify configuration by reading the MACE ID. */
@@ -822,9 +820,10 @@ static int mace_close(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
@@ -1420,7 +1419,7 @@ Output
static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
- int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
+ int adr[ETH_ALEN] = {0}; /* Ethernet address */
struct netdev_hw_addr *ha;
#ifdef PCMCIA_DEBUG
@@ -1442,7 +1441,7 @@ static void set_multicast_list(struct net_device *dev)
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
netdev_for_each_mc_addr(ha, dev) {
- memcpy(adr, ha->addr, ETHER_ADDR_LEN);
+ memcpy(adr, ha->addr, ETH_ALEN);
BuildLAF(lp->multicast_ladrf, adr);
}
}
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index f92bc6e..20e6dab 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -711,12 +711,14 @@ static void pcnet32_get_drvinfo(struct net_device *dev,
{
struct pcnet32_private *lp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (lp->pci_dev)
- strcpy(info->bus_info, pci_name(lp->pci_dev));
+ strlcpy(info->bus_info, pci_name(lp->pci_dev),
+ sizeof(info->bus_info));
else
- sprintf(info->bus_info, "VLB 0x%lx", dev->base_addr);
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "VLB 0x%lx", dev->base_addr);
}
static u32 pcnet32_get_link(struct net_device *dev)
diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c
index 8fda457..7ea16d3 100644
--- a/drivers/net/ethernet/amd/sunlance.c
+++ b/drivers/net/ethernet/amd/sunlance.c
@@ -1540,17 +1540,4 @@ static struct platform_driver sunlance_sbus_driver = {
.remove = __devexit_p(sunlance_sbus_remove),
};
-
-/* Find all the lance cards on the system and initialize them */
-static int __init sparc_lance_init(void)
-{
- return platform_driver_register(&sunlance_sbus_driver);
-}
-
-static void __exit sparc_lance_exit(void)
-{
- platform_driver_unregister(&sunlance_sbus_driver);
-}
-
-module_init(sparc_lance_init);
-module_exit(sparc_lance_exit);
+module_platform_driver(sunlance_sbus_driver);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 7be884d..0a9326a 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -232,7 +232,6 @@ static void atl1c_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, atl1c_driver_name, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, atl1c_driver_version,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 02c7ed8..47a9bb2 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -411,7 +411,7 @@ static void atl1c_set_multi(struct net_device *netdev)
}
}
-static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1c_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -422,7 +422,8 @@ static void __atl1c_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1c_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1c_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct pci_dev *pdev = adapter->pdev;
@@ -482,7 +483,8 @@ static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter,
roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE;
}
-static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1c_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -499,9 +501,10 @@ static u32 atl1c_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1c_set_features(struct net_device *netdev, u32 features)
+static int atl1c_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1c_vlan_mode(netdev, features);
@@ -1707,7 +1710,7 @@ static irqreturn_t atl1c_intr(int irq, void *data)
"atl1c hardware error (status = 0x%x)\n",
status & ISR_ERROR);
/* reset MAC */
- adapter->work_event |= ATL1C_WORK_EVENT_RESET;
+ set_bit(ATL1C_WORK_EVENT_RESET, &adapter->work_event);
schedule_work(&adapter->common_task);
return IRQ_HANDLED;
}
@@ -2241,10 +2244,6 @@ static netdev_tx_t atl1c_xmit_frame(struct sk_buff *skb,
dev_info(&adapter->pdev->dev, "tx locked\n");
return NETDEV_TX_LOCKED;
}
- if (skb->mark == 0x01)
- type = atl1c_trans_high;
- else
- type = atl1c_trans_normal;
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 6269438..6e61f9f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -310,10 +310,12 @@ static void atl1e_get_drvinfo(struct net_device *netdev,
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl1e_driver_name, 32);
- strncpy(drvinfo->version, atl1e_driver_version, 32);
- strncpy(drvinfo->fw_version, "L1e", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl1e_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl1e_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L1e", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl1e_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index 95483bc..c915c08 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -313,7 +313,7 @@ static void atl1e_set_multi(struct net_device *netdev)
}
}
-static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
+static void __atl1e_vlan_mode(netdev_features_t features, u32 *mac_ctrl_data)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -324,7 +324,8 @@ static void __atl1e_vlan_mode(u32 features, u32 *mac_ctrl_data)
}
}
-static void atl1e_vlan_mode(struct net_device *netdev, u32 features)
+static void atl1e_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
u32 mac_ctrl_data = 0;
@@ -370,7 +371,8 @@ static int atl1e_set_mac_addr(struct net_device *netdev, void *p)
return 0;
}
-static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl1e_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -384,9 +386,10 @@ static u32 atl1e_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl1e_set_features(struct net_device *netdev, u32 features)
+static int atl1e_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl1e_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 33a4e35..9bd2049 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3365,7 +3365,6 @@ static void atl1_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ATLX_DRIVER_VERSION,
sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->eedump_len = ATL1_EEDUMP_LEN;
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 1feae59..071f4c8 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -361,7 +361,7 @@ static inline void atl2_irq_disable(struct atl2_adapter *adapter)
synchronize_irq(adapter->pdev->irq);
}
-static void __atl2_vlan_mode(u32 features, u32 *ctrl)
+static void __atl2_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -372,7 +372,8 @@ static void __atl2_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atl2_vlan_mode(struct net_device *netdev, u32 features)
+static void atl2_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atl2_adapter *adapter = netdev_priv(netdev);
u32 ctrl;
@@ -391,7 +392,8 @@ static void atl2_restore_vlan(struct atl2_adapter *adapter)
atl2_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atl2_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atl2_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -405,9 +407,10 @@ static u32 atl2_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atl2_set_features(struct net_device *netdev, u32 features)
+static int atl2_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atl2_vlan_mode(netdev, features);
@@ -2049,10 +2052,12 @@ static void atl2_get_drvinfo(struct net_device *netdev,
{
struct atl2_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, atl2_driver_name, 32);
- strncpy(drvinfo->version, atl2_driver_version, 32);
- strncpy(drvinfo->fw_version, "L2", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, atl2_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, atl2_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "L2", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = atl2_get_regs_len(netdev);
diff --git a/drivers/net/ethernet/atheros/atlx/atlx.c b/drivers/net/ethernet/atheros/atlx/atlx.c
index aabcf4b..8ff7411 100644
--- a/drivers/net/ethernet/atheros/atlx/atlx.c
+++ b/drivers/net/ethernet/atheros/atlx/atlx.c
@@ -211,7 +211,7 @@ static void atlx_link_chg_task(struct work_struct *work)
spin_unlock_irqrestore(&adapter->lock, flags);
}
-static void __atlx_vlan_mode(u32 features, u32 *ctrl)
+static void __atlx_vlan_mode(netdev_features_t features, u32 *ctrl)
{
if (features & NETIF_F_HW_VLAN_RX) {
/* enable VLAN tag insert/strip */
@@ -222,7 +222,8 @@ static void __atlx_vlan_mode(u32 features, u32 *ctrl)
}
}
-static void atlx_vlan_mode(struct net_device *netdev, u32 features)
+static void atlx_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
@@ -242,7 +243,8 @@ static void atlx_restore_vlan(struct atlx_adapter *adapter)
atlx_vlan_mode(adapter->netdev, adapter->netdev->features);
}
-static u32 atlx_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t atlx_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -256,9 +258,10 @@ static u32 atlx_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int atlx_set_features(struct net_device *netdev, u32 features)
+static int atlx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
atlx_vlan_mode(netdev, features);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 3fb66d0..cab8745 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2339,7 +2339,7 @@ static inline int __init b44_pci_init(void)
return err;
}
-static inline void __exit b44_pci_exit(void)
+static inline void b44_pci_exit(void)
{
#ifdef CONFIG_B44_PCI
ssb_pcihost_unregister(&b44_pci_driver);
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a11a8ad..c7ca7ec 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -797,7 +797,7 @@ static int bcm_enet_open(struct net_device *dev)
if (priv->has_phy) {
/* connect to PHY */
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
- priv->mac_id ? "1" : "0", priv->phy_id);
+ priv->mii_bus->id, priv->phy_id);
phydev = phy_connect(dev, phy_id, bcm_enet_adjust_phy_link, 0,
PHY_INTERFACE_MODE_MII);
@@ -1469,7 +1469,7 @@ static int bcm_enet_set_pauseparam(struct net_device *dev,
return 0;
}
-static struct ethtool_ops bcm_enet_ethtool_ops = {
+static const struct ethtool_ops bcm_enet_ethtool_ops = {
.get_strings = bcm_enet_get_strings,
.get_sset_count = bcm_enet_get_sset_count,
.get_ethtool_stats = bcm_enet_get_ethtool_stats,
@@ -1727,7 +1727,7 @@ static int __devinit bcm_enet_probe(struct platform_device *pdev)
bus->priv = priv;
bus->read = bcm_enet_mdio_read_phylib;
bus->write = bcm_enet_mdio_write_phylib;
- sprintf(bus->id, "%d", priv->mac_id);
+ sprintf(bus->id, "%s-%d", pdev->name, priv->mac_id);
/* only probe bus where we think the PHY is, because
* the mdio read operation return 0 instead of 0xffff
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 965c723..021fb81 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -57,11 +57,11 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define DRV_MODULE_VERSION "2.1.11"
-#define DRV_MODULE_RELDATE "July 20, 2011"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
+#define DRV_MODULE_VERSION "2.2.1"
+#define DRV_MODULE_RELDATE "Dec 18, 2011"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.3.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1a.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1b.fw"
#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
@@ -409,7 +409,7 @@ static int bnx2_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_lock);
cp->drv_state = 0;
bnapi->cnic_present = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_lock);
synchronize_rcu();
return 0;
@@ -2054,8 +2054,8 @@ __acquires(&bp->phy_lock)
if (bp->autoneg & AUTONEG_SPEED) {
u32 adv_reg, adv1000_reg;
- u32 new_adv_reg = 0;
- u32 new_adv1000_reg = 0;
+ u32 new_adv = 0;
+ u32 new_adv1000 = 0;
bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
@@ -2064,27 +2064,18 @@ __acquires(&bp->phy_lock)
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
- if (bp->advertising & ADVERTISED_10baseT_Half)
- new_adv_reg |= ADVERTISE_10HALF;
- if (bp->advertising & ADVERTISED_10baseT_Full)
- new_adv_reg |= ADVERTISE_10FULL;
- if (bp->advertising & ADVERTISED_100baseT_Half)
- new_adv_reg |= ADVERTISE_100HALF;
- if (bp->advertising & ADVERTISED_100baseT_Full)
- new_adv_reg |= ADVERTISE_100FULL;
- if (bp->advertising & ADVERTISED_1000baseT_Full)
- new_adv1000_reg |= ADVERTISE_1000FULL;
+ new_adv = ethtool_adv_to_mii_adv_t(bp->advertising);
+ new_adv |= ADVERTISE_CSMA;
+ new_adv |= bnx2_phy_get_pause_adv(bp);
- new_adv_reg |= ADVERTISE_CSMA;
+ new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
- new_adv_reg |= bnx2_phy_get_pause_adv(bp);
-
- if ((adv1000_reg != new_adv1000_reg) ||
- (adv_reg != new_adv_reg) ||
+ if ((adv1000_reg != new_adv1000) ||
+ (adv_reg != new_adv) ||
((bmcr & BMCR_ANENABLE) == 0)) {
- bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
- bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
+ bnx2_write_phy(bp, bp->mii_adv, new_adv);
+ bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
BMCR_ANENABLE);
}
@@ -2734,31 +2725,27 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
}
static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
+bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
dma_addr_t mapping;
struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
- unsigned long align;
- skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
- if (skb == NULL) {
+ data = kmalloc(bp->rx_buf_size, gfp);
+ if (!data)
return -ENOMEM;
- }
- if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
- skb_reserve(skb, BNX2_RX_ALIGN - align);
-
- mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+ mapping = dma_map_single(&bp->pdev->dev,
+ get_l2_fhdr(data),
+ bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
if (dma_mapping_error(&bp->pdev->dev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- rx_buf->skb = skb;
- rx_buf->desc = (struct l2_fhdr *) skb->data;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
@@ -2823,6 +2810,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
u16 hw_cons, sw_cons, sw_ring_cons;
int tx_pkt = 0, index;
+ unsigned int tx_bytes = 0;
struct netdev_queue *txq;
index = (bnapi - bp->bnx2_napi);
@@ -2877,6 +2865,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
sw_cons = NEXT_TX_BD(sw_cons);
+ tx_bytes += skb->len;
dev_kfree_skb(skb);
tx_pkt++;
if (tx_pkt == budget)
@@ -2886,6 +2875,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
hw_cons = bnx2_get_hw_tx_cons(bnapi);
}
+ netdev_tx_completed_queue(txq, tx_pkt, tx_bytes);
txr->hw_tx_cons = hw_cons;
txr->tx_cons = sw_cons;
@@ -2965,8 +2955,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
}
static inline void
-bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
- struct sk_buff *skb, u16 cons, u16 prod)
+bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
+ u8 *data, u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf, *prod_rx_buf;
struct rx_bd *cons_bd, *prod_bd;
@@ -2980,8 +2970,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
rxr->rx_prod_bseq += bp->rx_buf_use_size;
- prod_rx_buf->skb = skb;
- prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
+ prod_rx_buf->data = data;
if (cons == prod)
return;
@@ -2995,33 +2984,39 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
}
-static int
-bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
+static struct sk_buff *
+bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
u32 ring_idx)
{
int err;
u16 prod = ring_idx & 0xffff;
+ struct sk_buff *skb;
- err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
+ err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
if (unlikely(err)) {
- bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
+ bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
+error:
if (hdr_len) {
unsigned int raw_len = len + 4;
int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
- return err;
+ return NULL;
}
- skb_reserve(skb, BNX2_RX_OFFSET);
dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
-
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto error;
+ }
+ skb_reserve(skb, ((u8 *)get_l2_fhdr(data) - data) + BNX2_RX_OFFSET);
if (hdr_len == 0) {
skb_put(skb, len);
- return 0;
+ return skb;
} else {
unsigned int i, frag_len, frag_size, pages;
struct sw_pg *rx_pg;
@@ -3052,7 +3047,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
skb_frag_size_sub(frag, tail);
skb->data_len -= tail;
}
- return 0;
+ return skb;
}
rx_pg = &rxr->rx_pg_ring[pg_cons];
@@ -3074,7 +3069,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
bnx2_reuse_rx_skb_pages(bp, rxr, skb,
pages - i);
- return err;
+ return NULL;
}
dma_unmap_page(&bp->pdev->dev, mapping_old,
@@ -3091,7 +3086,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
rxr->rx_pg_prod = pg_prod;
rxr->rx_pg_cons = pg_cons;
}
- return 0;
+ return skb;
}
static inline u16
@@ -3130,19 +3125,17 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
struct sw_bd *rx_buf, *next_rx_buf;
struct sk_buff *skb;
dma_addr_t dma_addr;
+ u8 *data;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
- skb = rx_buf->skb;
- prefetchw(skb);
-
- next_rx_buf =
- &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
- prefetch(next_rx_buf->desc);
+ data = rx_buf->data;
+ rx_buf->data = NULL;
- rx_buf->skb = NULL;
+ rx_hdr = get_l2_fhdr(data);
+ prefetch(rx_hdr);
dma_addr = dma_unmap_addr(rx_buf, mapping);
@@ -3150,7 +3143,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
PCI_DMA_FROMDEVICE);
- rx_hdr = rx_buf->desc;
+ next_rx_buf =
+ &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+ prefetch(get_l2_fhdr(next_rx_buf->data));
+
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
@@ -3169,7 +3165,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME))) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
if (pg_ring_used) {
int pages;
@@ -3184,30 +3180,29 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
len -= 4;
if (len <= bp->rx_copy_thresh) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + 6);
- if (new_skb == NULL) {
- bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
+ skb = netdev_alloc_skb(bp->dev, len + 6);
+ if (skb == NULL) {
+ bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
/* aligned copy */
- skb_copy_from_linear_data_offset(skb,
- BNX2_RX_OFFSET - 6,
- new_skb->data, len + 6);
- skb_reserve(new_skb, 6);
- skb_put(new_skb, len);
+ memcpy(skb->data,
+ (u8 *)rx_hdr + BNX2_RX_OFFSET - 6,
+ len + 6);
+ skb_reserve(skb, 6);
+ skb_put(skb, len);
- bnx2_reuse_rx_skb(bp, rxr, skb,
+ bnx2_reuse_rx_data(bp, rxr, data,
sw_ring_cons, sw_ring_prod);
- skb = new_skb;
- } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
- dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
- goto next_rx;
-
+ } else {
+ skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
+ (sw_ring_cons << 16) | sw_ring_prod);
+ if (!skb)
+ goto next_rx;
+ }
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
__vlan_hwaccel_put_tag(skb, rx_hdr->l2_fhdr_vlan_tag);
@@ -5234,7 +5229,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
- if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
+ if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
ring_num, i, bp->rx_ring_size);
break;
@@ -5329,7 +5324,7 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
- sizeof(struct skb_shared_info);
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
bp->rx_pg_ring_size = 0;
@@ -5351,8 +5346,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
}
bp->rx_buf_use_size = rx_size;
- /* hw alignment */
- bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
+ /* hw alignment + build_skb() overhead*/
+ bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
+ NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
bp->rx_ring_size = size;
bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
@@ -5400,6 +5396,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
}
dev_kfree_skb(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
}
}
@@ -5418,9 +5415,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
for (j = 0; j < bp->rx_max_ring_idx; j++) {
struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
@@ -5428,9 +5425,9 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
- dev_kfree_skb(skb);
+ kfree(data);
}
for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
bnx2_free_rx_page(bp, rxr, j);
@@ -5736,7 +5733,8 @@ static int
bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
{
unsigned int pkt_size, num_pkts, i;
- struct sk_buff *skb, *rx_skb;
+ struct sk_buff *skb;
+ u8 *data;
unsigned char *packet;
u16 rx_start_idx, rx_idx;
dma_addr_t map;
@@ -5828,14 +5826,14 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
rx_buf = &rxr->rx_buf_ring[rx_start_idx];
- rx_skb = rx_buf->skb;
+ data = rx_buf->data;
- rx_hdr = rx_buf->desc;
- skb_reserve(rx_skb, BNX2_RX_OFFSET);
+ rx_hdr = get_l2_fhdr(data);
+ data = (u8 *)rx_hdr + BNX2_RX_OFFSET;
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- bp->rx_buf_size, PCI_DMA_FROMDEVICE);
+ bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
if (rx_hdr->l2_fhdr_status &
(L2_FHDR_ERRORS_BAD_CRC |
@@ -5852,7 +5850,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
}
for (i = 14; i < pkt_size; i++) {
- if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
+ if (*(data + i) != (unsigned char) (i & 0xff)) {
goto loopback_test_done;
}
}
@@ -6552,6 +6550,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
+ netdev_tx_sent_queue(txq, skb->len);
+
prod = NEXT_TX_BD(prod);
txr->tx_prod_bseq += skb->len;
@@ -6873,10 +6873,10 @@ bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct bnx2 *bp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->bus_info, pci_name(bp->pdev));
- strcpy(info->fw_version, bp->fw_version);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
+ strlcpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
}
#define BNX2_REGDUMP_LEN (32 * 1024)
@@ -7571,8 +7571,8 @@ bnx2_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state)
return 0;
}
-static u32
-bnx2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t
+bnx2_fix_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
@@ -7583,7 +7583,7 @@ bnx2_fix_features(struct net_device *dev, u32 features)
}
static int
-bnx2_set_features(struct net_device *dev, u32 features)
+bnx2_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2 *bp = netdev_priv(dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index 99d31a7..1db2d51 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6563,12 +6563,25 @@ struct l2_fhdr {
#define MB_TX_CID_ADDR MB_GET_CID_ADDR(TX_CID)
#define MB_RX_CID_ADDR MB_GET_CID_ADDR(RX_CID)
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_bd {
- struct sk_buff *skb;
- struct l2_fhdr *desc;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
+/* Its faster to compute this from data than storing it in sw_bd
+ * (less cache misses)
+ */
+static inline struct l2_fhdr *get_l2_fhdr(u8 *data)
+{
+ return (struct l2_fhdr *)(PTR_ALIGN(data, BNX2_RX_ALIGN) + NET_SKB_PAD);
+}
+
+
struct sw_pg {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index aec7212..8c73d34 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -23,8 +23,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.70.30-0"
-#define DRV_MODULE_RELDATE "2011/10/25"
+#define DRV_MODULE_VERSION "1.70.35-0"
+#define DRV_MODULE_RELDATE "2011/11/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -293,8 +293,13 @@ enum {
#define FCOE_TXQ_IDX(bp) (MAX_ETH_TXQ_IDX(bp))
/* fast path */
+/*
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
+ */
struct sw_rx_bd {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -411,8 +416,7 @@ union db_prod {
/* Number of u64 elements in SGE mask array */
-#define RX_SGE_MASK_LEN ((NUM_RX_SGE_PAGES * RX_SGE_CNT) / \
- BIT_VEC64_ELEM_SZ)
+#define RX_SGE_MASK_LEN (NUM_RX_SGE / BIT_VEC64_ELEM_SZ)
#define RX_SGE_MASK_LEN_MASK (RX_SGE_MASK_LEN - 1)
#define NEXT_SGE_MASK_ELEM(el) (((el) + 1) & RX_SGE_MASK_LEN_MASK)
@@ -425,8 +429,8 @@ union host_hc_status_block {
struct bnx2x_agg_info {
/*
- * First aggregation buffer is an skb, the following - are pages.
- * We will preallocate the skbs for each aggregation when
+ * First aggregation buffer is a data buffer, the following - are pages.
+ * We will preallocate the data buffer for each aggregation when
* we open the interface and will replace the BD at the consumer
* with this one when we receive the TPA_START CQE in order to
* keep the Rx BD ring consistent.
@@ -440,6 +444,7 @@ struct bnx2x_agg_info {
u16 parsing_flags;
u16 vlan_tag;
u16 len_on_bd;
+ u32 rxhash;
};
#define Q_STATS_OFFSET32(stat_name) \
@@ -507,6 +512,7 @@ struct bnx2x_fastpath {
__le16 fp_hc_idx;
u8 index; /* number in fp array */
+ u8 rx_queue; /* index for skb_record */
u8 cl_id; /* eth client id */
u8 cl_qzone_id;
u8 fw_sb_id; /* status block number in FW */
@@ -881,6 +887,8 @@ struct bnx2x_common {
#define CHIP_PORT_MODE_NONE 0x2
#define CHIP_MODE(bp) (bp->common.chip_port_mode)
#define CHIP_MODE_IS_4_PORT(bp) (CHIP_MODE(bp) == CHIP_4_PORT_MODE)
+
+ u32 boot_mode;
};
/* IGU MSIX STATISTICS on 57712: 64 for VFs; 4 for PFs; 4 for Attentions */
@@ -1042,6 +1050,8 @@ struct bnx2x_slowpath {
u32 wb_comp;
u32 wb_data[4];
+
+ union drv_info_to_mcp drv_info_to_mcp;
};
#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -1122,18 +1132,21 @@ enum {
enum {
BNX2X_PORT_QUERY_IDX,
BNX2X_PF_QUERY_IDX,
+ BNX2X_FCOE_QUERY_IDX,
BNX2X_FIRST_QUEUE_QUERY_IDX,
};
struct bnx2x_fw_stats_req {
struct stats_query_header hdr;
- struct stats_query_entry query[STATS_QUERY_CMD_COUNT];
+ struct stats_query_entry query[FP_SB_MAX_E1x+
+ BNX2X_FIRST_QUEUE_QUERY_IDX];
};
struct bnx2x_fw_stats_data {
struct stats_counter storm_counters;
struct per_port_stats port;
struct per_pf_stats pf;
+ struct fcoe_statistics_params fcoe;
struct per_queue_stats queue_stats[1];
};
@@ -1141,6 +1154,7 @@ struct bnx2x_fw_stats_data {
enum {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
+ BNX2X_SP_RTNL_FAN_FAILURE,
};
@@ -1186,10 +1200,20 @@ struct bnx2x {
#define ETH_MAX_JUMBO_PACKET_SIZE 9600
/* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT ((L1_CACHE_SHIFT < 8) ? \
- L1_CACHE_SHIFT : 8)
- /* FW use 2 Cache lines Alignment for start packet and size */
-#define BNX2X_FW_RX_ALIGN (2 << BNX2X_RX_ALIGN_SHIFT)
+#define BNX2X_RX_ALIGN_SHIFT min(8, L1_CACHE_SHIFT)
+
+ /* FW uses 2 Cache lines Alignment for start packet and size
+ *
+ * We assume skb_build() uses sizeof(struct skb_shared_info) bytes
+ * at the end of skb->data, to avoid wasting a full cache line.
+ * This reduces memory use (skb->truesize).
+ */
+#define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT)
+
+#define BNX2X_FW_RX_ALIGN_END \
+ max(1UL << BNX2X_RX_ALIGN_SHIFT, \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+
#define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5)
struct host_sp_status_block *def_status_blk;
@@ -1249,6 +1273,7 @@ struct bnx2x {
#define NO_ISCSI_OOO_FLAG (1 << 13)
#define NO_ISCSI_FLAG (1 << 14)
#define NO_FCOE_FLAG (1 << 15)
+#define BC_SUPPORTS_PFC_STATS (1 << 17)
#define NO_ISCSI(bp) ((bp)->flags & NO_ISCSI_FLAG)
#define NO_ISCSI_OOO(bp) ((bp)->flags & NO_ISCSI_OOO_FLAG)
@@ -1984,13 +2009,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \
AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
-#define RSS_FLAGS(bp) \
- (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_TCP_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_CAPABILITY | \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV6_TCP_CAPABILITY | \
- (bp->multi_mode << \
- TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT))
#define MULTI_MASK 0x7f
@@ -2055,6 +2073,8 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
#define BNX2X_VPD_LEN 128
#define VENDOR_ID_LEN 4
+int bnx2x_close(struct net_device *dev);
+
/* Congestion management fairness mode */
#define CMNG_FNS_NONE 0
#define CMNG_FNS_MINMAX 1
@@ -2072,4 +2092,16 @@ static const u32 dmae_reg_go_c[] = {
void bnx2x_set_ethtool_ops(struct net_device *netdev);
void bnx2x_notify_link_changed(struct bnx2x *bp);
+
+
+#define BNX2X_MF_PROTOCOL(bp) \
+ ((bp)->mf_config[BP_VN(bp)] & FUNC_MF_CFG_PROTOCOL_MASK)
+
+#ifdef BCM_CNIC
+#define BNX2X_IS_MF_PROTOCOL_ISCSI(bp) \
+ (BNX2X_MF_PROTOCOL(bp) == FUNC_MF_CFG_PROTOCOL_ISCSI)
+
+#define IS_MF_ISCSI_SD(bp) (IS_MF_SD(bp) && BNX2X_IS_MF_PROTOCOL_ISCSI(bp))
+#endif
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 580b44e..99389c8 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -79,19 +79,21 @@ static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
* @to: destination FP index
*
* Makes sure the contents of the bp->fp[to].napi is kept
- * intact.
+ * intact. This is done by first copying the napi struct from
+ * the target to the source, and then mem copying the entire
+ * source onto the target
*/
static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
{
struct bnx2x_fastpath *from_fp = &bp->fp[from];
struct bnx2x_fastpath *to_fp = &bp->fp[to];
- struct napi_struct orig_napi = to_fp->napi;
+
+ /* Copy the NAPI object as it has been already initialized */
+ from_fp->napi = to_fp->napi;
+
/* Move bnx2x_fastpath contents */
memcpy(to_fp, from_fp, sizeof(*to_fp));
to_fp->index = to;
-
- /* Restore the NAPI object as it has been already initialized */
- to_fp->napi = orig_napi;
}
int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
@@ -100,7 +102,8 @@ int load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
* return idx of last bd freed
*/
static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
- u16 idx)
+ u16 idx, unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
struct eth_tx_start_bd *tx_start_bd;
@@ -157,6 +160,10 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
/* release skb */
WARN_ON(!skb);
+ if (skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += skb->len;
+ }
dev_kfree_skb_any(skb);
tx_buf->first_bd = 0;
tx_buf->skb = NULL;
@@ -168,6 +175,7 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
{
struct netdev_queue *txq;
u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -187,10 +195,14 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
" pkt_cons %u\n",
txdata->txq_index, hw_cons, sw_cons, pkt_cons);
- bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons);
+ bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
+ &pkts_compl, &bytes_compl);
+
sw_cons++;
}
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
txdata->tx_pkt_cons = sw_cons;
txdata->tx_bd_cons = bd_cons;
@@ -292,8 +304,21 @@ static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
fp->last_max_sge, fp->rx_sge_prod);
}
+/* Set Toeplitz hash value in the skb using the value from the
+ * CQE (calculated by HW).
+ */
+static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
+ const struct eth_fast_path_rx_cqe *cqe)
+{
+ /* Set Toeplitz hash from CQE */
+ if ((bp->dev->features & NETIF_F_RXHASH) &&
+ (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
+ return le32_to_cpu(cqe->rss_hash_result);
+ return 0;
+}
+
static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
- struct sk_buff *skb, u16 cons, u16 prod,
+ u16 cons, u16 prod,
struct eth_fast_path_rx_cqe *cqe)
{
struct bnx2x *bp = fp->bp;
@@ -308,9 +333,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (tpa_info->tpa_state != BNX2X_TPA_STOP)
BNX2X_ERR("start of bin not in stop [%d]\n", queue);
- /* Try to map an empty skb from the aggregation info */
+ /* Try to map an empty data buffer from the aggregation info */
mapping = dma_map_single(&bp->pdev->dev,
- first_buf->skb->data,
+ first_buf->data + NET_SKB_PAD,
fp->rx_buf_size, DMA_FROM_DEVICE);
/*
* ...if it fails - move the skb from the consumer to the producer
@@ -320,15 +345,15 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
/* Move the BD from the consumer to the producer */
- bnx2x_reuse_rx_skb(fp, cons, prod);
+ bnx2x_reuse_rx_data(fp, cons, prod);
tpa_info->tpa_state = BNX2X_TPA_ERROR;
return;
}
- /* move empty skb from pool to prod */
- prod_rx_buf->skb = first_buf->skb;
+ /* move empty data from pool to prod */
+ prod_rx_buf->data = first_buf->data;
dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
- /* point prod_bd to new skb */
+ /* point prod_bd to new data */
prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -342,6 +367,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
tpa_info->tpa_state = BNX2X_TPA_START;
tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
tpa_info->placement_offset = cqe->placement_offset;
+ tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe);
#ifdef BNX2X_STOP_ON_ERROR
fp->tpa_queue_used |= (1 << queue);
@@ -469,11 +495,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
{
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
- u8 pad = tpa_info->placement_offset;
+ u32 pad = tpa_info->placement_offset;
u16 len = tpa_info->len_on_bd;
- struct sk_buff *skb = rx_buf->skb;
+ struct sk_buff *skb = NULL;
+ u8 *data = rx_buf->data;
/* alloc new skb */
- struct sk_buff *new_skb;
+ u8 *new_data;
u8 old_tpa_state = tpa_info->tpa_state;
tpa_info->tpa_state = BNX2X_TPA_STOP;
@@ -484,19 +511,18 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
if (old_tpa_state == BNX2X_TPA_ERROR)
goto drop;
- /* Try to allocate the new skb */
- new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
+ /* Try to allocate the new data */
+ new_data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
/* Unmap skb in the pool anyway, as we are going to change
pool entry status to BNX2X_TPA_STOP even if new skb allocation
fails. */
dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
+ if (likely(new_data))
+ skb = build_skb(data);
- if (likely(new_skb)) {
- prefetch(skb);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
-
+ if (likely(skb)) {
#ifdef BNX2X_STOP_ON_ERROR
if (pad + len > fp->rx_buf_size) {
BNX2X_ERR("skb_put is about to fail... "
@@ -507,8 +533,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
#endif
- skb_reserve(skb, pad);
+ skb_reserve(skb, pad + NET_SKB_PAD);
skb_put(skb, len);
+ skb->rxhash = tpa_info->rxhash;
skb->protocol = eth_type_trans(skb, bp->dev);
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -524,12 +551,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
}
- /* put new skb in bin */
- rx_buf->skb = new_skb;
+ /* put new data in bin */
+ rx_buf->data = new_data;
return;
}
-
+ kfree(new_data);
drop:
/* drop the packet and keep the buffer in the bin */
DP(NETIF_MSG_RX_STATUS,
@@ -537,19 +564,6 @@ drop:
fp->eth_q_stats.rx_skb_alloc_failed++;
}
-/* Set Toeplitz hash value in the skb using the value from the
- * CQE (calculated by HW).
- */
-static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
- struct sk_buff *skb)
-{
- /* Set Toeplitz hash from CQE */
- if ((bp->dev->features & NETIF_F_RXHASH) &&
- (cqe->fast_path_cqe.status_flags &
- ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
- skb->rxhash =
- le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
-}
int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
{
@@ -592,6 +606,7 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u8 cqe_fp_flags;
enum eth_rx_cqe_type cqe_fp_type;
u16 len, pad;
+ u8 *data;
#ifdef BNX2X_STOP_ON_ERROR
if (unlikely(bp->panic))
@@ -602,13 +617,6 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
- /* Prefetch the page containing the BD descriptor
- at producer's index. It will be needed when new skb is
- allocated */
- prefetch((void *)(PAGE_ALIGN((unsigned long)
- (&fp->rx_desc_ring[bd_prod])) -
- PAGE_SIZE + 1));
-
cqe = &fp->rx_comp_ring[comp_ring_cons];
cqe_fp = &cqe->fast_path_cqe;
cqe_fp_flags = cqe_fp->type_error_flags;
@@ -624,138 +632,123 @@ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
bnx2x_sp_event(fp, cqe);
goto next_cqe;
+ }
+ rx_buf = &fp->rx_buf_ring[bd_cons];
+ data = rx_buf->data;
- /* this is an rx packet */
- } else {
- rx_buf = &fp->rx_buf_ring[bd_cons];
- skb = rx_buf->skb;
- prefetch(skb);
-
- if (!CQE_TYPE_FAST(cqe_fp_type)) {
+ if (!CQE_TYPE_FAST(cqe_fp_type)) {
#ifdef BNX2X_STOP_ON_ERROR
- /* sanity check */
- if (fp->disable_tpa &&
- (CQE_TYPE_START(cqe_fp_type) ||
- CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while "
- "disable_tpa type %x\n",
- CQE_TYPE(cqe_fp_type));
+ /* sanity check */
+ if (fp->disable_tpa &&
+ (CQE_TYPE_START(cqe_fp_type) ||
+ CQE_TYPE_STOP(cqe_fp_type)))
+ BNX2X_ERR("START/STOP packet while "
+ "disable_tpa type %x\n",
+ CQE_TYPE(cqe_fp_type));
#endif
- if (CQE_TYPE_START(cqe_fp_type)) {
- u16 queue = cqe_fp->queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_start on queue %d\n",
- queue);
-
- bnx2x_tpa_start(fp, queue, skb,
- bd_cons, bd_prod,
- cqe_fp);
-
- /* Set Toeplitz hash for LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
-
- goto next_rx;
+ if (CQE_TYPE_START(cqe_fp_type)) {
+ u16 queue = cqe_fp->queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_start on queue %d\n",
+ queue);
- } else {
- u16 queue =
- cqe->end_agg_cqe.queue_index;
- DP(NETIF_MSG_RX_STATUS,
- "calling tpa_stop on queue %d\n",
- queue);
-
- bnx2x_tpa_stop(bp, fp, queue,
- &cqe->end_agg_cqe,
- comp_ring_cons);
+ bnx2x_tpa_start(fp, queue,
+ bd_cons, bd_prod,
+ cqe_fp);
+ goto next_rx;
+ } else {
+ u16 queue =
+ cqe->end_agg_cqe.queue_index;
+ DP(NETIF_MSG_RX_STATUS,
+ "calling tpa_stop on queue %d\n",
+ queue);
+
+ bnx2x_tpa_stop(bp, fp, queue,
+ &cqe->end_agg_cqe,
+ comp_ring_cons);
#ifdef BNX2X_STOP_ON_ERROR
- if (bp->panic)
- return 0;
+ if (bp->panic)
+ return 0;
#endif
- bnx2x_update_sge_prod(fp, cqe_fp);
- goto next_cqe;
- }
+ bnx2x_update_sge_prod(fp, cqe_fp);
+ goto next_cqe;
}
- /* non TPA */
- len = le16_to_cpu(cqe_fp->pkt_len);
- pad = cqe_fp->placement_offset;
- dma_sync_single_for_cpu(&bp->pdev->dev,
+ }
+ /* non TPA */
+ len = le16_to_cpu(cqe_fp->pkt_len);
+ pad = cqe_fp->placement_offset;
+ dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
- pad + RX_COPY_THRESH,
- DMA_FROM_DEVICE);
- prefetch(((char *)(skb)) + L1_CACHE_BYTES);
+ pad + RX_COPY_THRESH,
+ DMA_FROM_DEVICE);
+ pad += NET_SKB_PAD;
+ prefetch(data + pad); /* speedup eth_type_trans() */
+ /* is this an error packet? */
+ if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ DP(NETIF_MSG_RX_ERR,
+ "ERROR flags %x rx packet %u\n",
+ cqe_fp_flags, sw_comp_cons);
+ fp->eth_q_stats.rx_err_discard_pkt++;
+ goto reuse_rx;
+ }
- /* is this an error packet? */
- if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
+ /* Since we don't have a jumbo ring
+ * copy small packets if mtu > 1500
+ */
+ if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
+ (len <= RX_COPY_THRESH)) {
+ skb = netdev_alloc_skb_ip_align(bp->dev, len);
+ if (skb == NULL) {
DP(NETIF_MSG_RX_ERR,
- "ERROR flags %x rx packet %u\n",
- cqe_fp_flags, sw_comp_cons);
- fp->eth_q_stats.rx_err_discard_pkt++;
+ "ERROR packet dropped because of alloc failure\n");
+ fp->eth_q_stats.rx_skb_alloc_failed++;
goto reuse_rx;
}
-
- /* Since we don't have a jumbo ring
- * copy small packets if mtu > 1500
- */
- if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
- (len <= RX_COPY_THRESH)) {
- struct sk_buff *new_skb;
-
- new_skb = netdev_alloc_skb(bp->dev, len + pad);
- if (new_skb == NULL) {
- DP(NETIF_MSG_RX_ERR,
- "ERROR packet dropped "
- "because of alloc failure\n");
- fp->eth_q_stats.rx_skb_alloc_failed++;
- goto reuse_rx;
- }
-
- /* aligned copy */
- skb_copy_from_linear_data_offset(skb, pad,
- new_skb->data + pad, len);
- skb_reserve(new_skb, pad);
- skb_put(new_skb, len);
-
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
-
- skb = new_skb;
-
- } else
- if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
+ memcpy(skb->data, data + pad, len);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
+ } else {
+ if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod) == 0)) {
dma_unmap_single(&bp->pdev->dev,
- dma_unmap_addr(rx_buf, mapping),
+ dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size,
DMA_FROM_DEVICE);
+ skb = build_skb(data);
+ if (unlikely(!skb)) {
+ kfree(data);
+ fp->eth_q_stats.rx_skb_alloc_failed++;
+ goto next_rx;
+ }
skb_reserve(skb, pad);
- skb_put(skb, len);
-
} else {
DP(NETIF_MSG_RX_ERR,
"ERROR packet dropped because "
"of alloc failure\n");
fp->eth_q_stats.rx_skb_alloc_failed++;
reuse_rx:
- bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
+ bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
goto next_rx;
}
+ }
- skb->protocol = eth_type_trans(skb, bp->dev);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, bp->dev);
- /* Set Toeplitz hash for a none-LRO skb */
- bnx2x_set_skb_rxhash(bp, cqe, skb);
+ /* Set Toeplitz hash for a none-LRO skb */
+ skb->rxhash = bnx2x_get_rxhash(bp, cqe_fp);
- skb_checksum_none_assert(skb);
+ skb_checksum_none_assert(skb);
- if (bp->dev->features & NETIF_F_RXCSUM) {
+ if (bp->dev->features & NETIF_F_RXCSUM) {
- if (likely(BNX2X_RX_CSUM_OK(cqe)))
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- else
- fp->eth_q_stats.hw_csum_err++;
- }
+ if (likely(BNX2X_RX_CSUM_OK(cqe)))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ fp->eth_q_stats.hw_csum_err++;
}
- skb_record_rx_queue(skb, fp->index);
+ skb_record_rx_queue(skb, fp->rx_queue);
if (le16_to_cpu(cqe_fp->pars_flags.flags) &
PARSING_FLAGS_VLAN)
@@ -765,7 +758,7 @@ reuse_rx:
next_rx:
- rx_buf->skb = NULL;
+ rx_buf->data = NULL;
bd_cons = NEXT_RX_IDX(bd_cons);
bd_prod = NEXT_RX_IDX(bd_prod);
@@ -1011,9 +1004,9 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
struct sw_rx_bd *first_buf =
&tpa_info->first_buf;
- first_buf->skb = netdev_alloc_skb(bp->dev,
- fp->rx_buf_size);
- if (!first_buf->skb) {
+ first_buf->data = kmalloc(fp->rx_buf_size + NET_SKB_PAD,
+ GFP_ATOMIC);
+ if (!first_buf->data) {
BNX2X_ERR("Failed to allocate TPA "
"skb pool for queue[%d] - "
"disabling TPA on this "
@@ -1093,16 +1086,18 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
struct bnx2x_fastpath *fp = &bp->fp[i];
for_each_cos_in_tx_queue(fp, cos) {
struct bnx2x_fp_txdata *txdata = &fp->txdata[cos];
+ unsigned pkts_compl = 0, bytes_compl = 0;
- u16 bd_cons = txdata->tx_bd_cons;
u16 sw_prod = txdata->tx_pkt_prod;
u16 sw_cons = txdata->tx_pkt_cons;
while (sw_cons != sw_prod) {
- bd_cons = bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(sw_cons));
+ bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
+ &pkts_compl, &bytes_compl);
sw_cons++;
}
+ netdev_tx_reset_queue(
+ netdev_get_tx_queue(bp->dev, txdata->txq_index));
}
}
}
@@ -1118,16 +1113,16 @@ static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
for (i = 0; i < NUM_RX_BD; i++) {
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
- struct sk_buff *skb = rx_buf->skb;
+ u8 *data = rx_buf->data;
- if (skb == NULL)
+ if (data == NULL)
continue;
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- rx_buf->skb = NULL;
- dev_kfree_skb(skb);
+ rx_buf->data = NULL;
+ kfree(data);
}
}
@@ -1445,6 +1440,11 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
break;
}
+#ifdef BCM_CNIC
+ /* override in ISCSI SD mod */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->num_queues = 1;
+#endif
/* Add special queues */
bp->num_queues += NON_ETH_CONTEXT_USE;
}
@@ -1509,6 +1509,7 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
for_each_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
+ u32 mtu;
/* Always use a mini-jumbo MTU for the FCoE L2 ring */
if (IS_FCOE_IDX(i))
@@ -1518,13 +1519,15 @@ static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
* IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
* overrun attack.
*/
- fp->rx_buf_size =
- BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
else
- fp->rx_buf_size =
- bp->dev->mtu + ETH_OVREHEAD +
- BNX2X_FW_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
+ mtu = bp->dev->mtu;
+ fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
+ IP_HEADER_ALIGNMENT_PADDING +
+ ETH_OVREHEAD +
+ mtu +
+ BNX2X_FW_RX_ALIGN_END;
+ /* Note : rx_buf_size doesnt take into account NET_SKB_PAD */
}
}
@@ -1541,7 +1544,8 @@ static inline int bnx2x_init_rss_pf(struct bnx2x *bp)
if (bp->multi_mode != ETH_RSS_MODE_DISABLED) {
for (i = 0; i < sizeof(ind_table); i++)
ind_table[i] =
- bp->fp->cl_id + (i % num_eth_queues);
+ bp->fp->cl_id +
+ ethtool_rxfh_indir_default(i, num_eth_queues);
}
/*
@@ -1929,13 +1933,17 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
break;
}
- if (!bp->port.pmf)
+ if (bp->port.pmf)
+ bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
+ else
bnx2x__link_status_update(bp);
/* start the timer */
mod_timer(&bp->timer, jiffies + bp->current_interval);
#ifdef BCM_CNIC
+ /* re-read iscsi info */
+ bnx2x_get_iscsi_info(bp);
bnx2x_setup_cnic_irq_info(bp);
if (bp->state == BNX2X_STATE_OPEN)
bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
@@ -2799,6 +2807,7 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
DP(NETIF_MSG_TX_QUEUED, "Unable to map page - "
"dropping packet...\n");
@@ -2810,7 +2819,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
*/
first_bd->nbd = cpu_to_le16(nbd);
bnx2x_free_tx_pkt(bp, txdata,
- TX_BD(txdata->tx_pkt_prod));
+ TX_BD(txdata->tx_pkt_prod),
+ &pkts_compl, &bytes_compl);
return NETDEV_TX_OK;
}
@@ -2871,6 +2881,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
pbd_e2->parsing_data);
DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
+ netdev_tx_sent_queue(txq, skb->len);
+
txdata->tx_pkt_prod++;
/*
* Make sure that the BD data is updated before updating the producer
@@ -2981,9 +2993,14 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p)
struct bnx2x *bp = netdev_priv(dev);
int rc = 0;
- if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
+ if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data))
return -EINVAL;
+#ifdef BCM_CNIC
+ if (IS_MF_ISCSI_SD(bp) && !is_zero_ether_addr(addr->sa_data))
+ return -EINVAL;
+#endif
+
if (netif_running(dev)) {
rc = bnx2x_set_eth_mac(bp, false);
if (rc)
@@ -3098,7 +3115,12 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
u8 cos;
int rx_ring_size = 0;
- /* if rx_ring_size specified - use it */
+#ifdef BCM_CNIC
+ if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
+ rx_ring_size = MIN_RX_SIZE_NONTPA;
+ bp->rx_ring_size = rx_ring_size;
+ } else
+#endif
if (!bp->rx_ring_size) {
rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
@@ -3108,7 +3130,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
MIN_RX_SIZE_TPA, rx_ring_size);
bp->rx_ring_size = rx_ring_size;
- } else
+ } else /* if rx_ring_size specified - use it */
rx_ring_size = bp->rx_ring_size;
/* Common */
@@ -3278,14 +3300,14 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
msix_table_size = bp->igu_sb_cnt + 1;
/* fp array: RSS plus CNIC related L2 queues */
- fp = kzalloc((BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE) *
+ fp = kcalloc(BNX2X_MAX_RSS_COUNT(bp) + NON_ETH_CONTEXT_USE,
sizeof(*fp), GFP_KERNEL);
if (!fp)
goto alloc_err;
bp->fp = fp;
/* msix table */
- tbl = kzalloc(msix_table_size * sizeof(*tbl), GFP_KERNEL);
+ tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
if (!tbl)
goto alloc_err;
bp->msix_table = tbl;
@@ -3409,7 +3431,8 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
return bnx2x_reload_if_running(dev);
}
-u32 bnx2x_fix_features(struct net_device *dev, u32 features)
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -3420,7 +3443,7 @@ u32 bnx2x_fix_features(struct net_device *dev, u32 features)
return features;
}
-int bnx2x_set_features(struct net_device *dev, u32 features)
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
u32 flags = bp->flags;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 283d663..4f40f7d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -20,6 +20,7 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
#include "bnx2x.h"
@@ -533,8 +534,9 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
*/
int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type);
#endif
-u32 bnx2x_fix_features(struct net_device *dev, u32 features);
-int bnx2x_set_features(struct net_device *dev, u32 features);
+netdev_features_t bnx2x_fix_features(struct net_device *dev,
+ netdev_features_t features);
+int bnx2x_set_features(struct net_device *dev, netdev_features_t features);
/**
* bnx2x_tx_timeout - tx timeout netdev callback
@@ -874,8 +876,7 @@ static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
{
/* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
- memset(fp->sge_mask, 0xff,
- (NUM_RX_SGE >> BIT_VEC64_ELEM_SHIFT)*sizeof(u64));
+ memset(fp->sge_mask, 0xff, sizeof(fp->sge_mask));
/* Clear the two last indices in the page to 1:
these are the indices that correspond to the "next" element,
@@ -911,26 +912,27 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
return 0;
}
-static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
- struct bnx2x_fastpath *fp, u16 index)
+static inline int bnx2x_alloc_rx_data(struct bnx2x *bp,
+ struct bnx2x_fastpath *fp, u16 index)
{
- struct sk_buff *skb;
+ u8 *data;
struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
dma_addr_t mapping;
- skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
- if (unlikely(skb == NULL))
+ data = kmalloc(fp->rx_buf_size + NET_SKB_PAD, GFP_ATOMIC);
+ if (unlikely(data == NULL))
return -ENOMEM;
- mapping = dma_map_single(&bp->pdev->dev, skb->data, fp->rx_buf_size,
+ mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
+ fp->rx_buf_size,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- dev_kfree_skb_any(skb);
+ kfree(data);
return -ENOMEM;
}
- rx_buf->skb = skb;
+ rx_buf->data = data;
dma_unmap_addr_set(rx_buf, mapping, mapping);
rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
@@ -939,12 +941,12 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
return 0;
}
-/* note that we are not allocating a new skb,
+/* note that we are not allocating a new buffer,
* we are just moving one from cons to prod
* we are not creating a new mapping,
* so there is no need to check for dma_mapping_error().
*/
-static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
+static inline void bnx2x_reuse_rx_data(struct bnx2x_fastpath *fp,
u16 cons, u16 prod)
{
struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
@@ -954,7 +956,7 @@ static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
dma_unmap_addr_set(prod_rx_buf, mapping,
dma_unmap_addr(cons_rx_buf, mapping));
- prod_rx_buf->skb = cons_rx_buf->skb;
+ prod_rx_buf->data = cons_rx_buf->data;
*prod_bd = *cons_bd;
}
@@ -1030,9 +1032,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
for (i = 0; i < last; i++) {
struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
struct sw_rx_bd *first_buf = &tpa_info->first_buf;
- struct sk_buff *skb = first_buf->skb;
+ u8 *data = first_buf->data;
- if (skb == NULL) {
+ if (data == NULL) {
DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
continue;
}
@@ -1040,8 +1042,8 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
dma_unmap_single(&bp->pdev->dev,
dma_unmap_addr(first_buf, mapping),
fp->rx_buf_size, DMA_FROM_DEVICE);
- dev_kfree_skb(skb);
- first_buf->skb = NULL;
+ kfree(data);
+ first_buf->data = NULL;
}
}
@@ -1149,7 +1151,7 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
* fp->eth_q_stats.rx_skb_alloc_failed = 0
*/
for (i = 0; i < rx_ring_size; i++) {
- if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
+ if (bnx2x_alloc_rx_data(bp, fp, ring_prod) < 0) {
fp->eth_q_stats.rx_skb_alloc_failed++;
continue;
}
@@ -1177,10 +1179,16 @@ static inline int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
*/
static inline u8 bnx2x_stats_id(struct bnx2x_fastpath *fp)
{
- if (!CHIP_IS_E1x(fp->bp))
+ struct bnx2x *bp = fp->bp;
+ if (!CHIP_IS_E1x(bp)) {
+#ifdef BCM_CNIC
+ /* there are special statistics counters for FCoE 136..140 */
+ if (IS_FCOE_FP(fp))
+ return bp->cnic_base_cl_id + (bp->pf_num >> 1);
+#endif
return fp->cl_id;
- else
- return fp->cl_id + BP_PORT(fp->bp) * FP_SB_MAX_E1x;
+ }
+ return fp->cl_id + BP_PORT(bp) * FP_SB_MAX_E1x;
}
static inline void bnx2x_init_vlan_mac_fp_objs(struct bnx2x_fastpath *fp,
@@ -1318,6 +1326,7 @@ static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
unsigned long q_type = 0;
+ bnx2x_fcoe(bp, rx_queue) = BNX2X_NUM_ETH_QUEUES(bp);
bnx2x_fcoe(bp, cl_id) = bnx2x_cnic_eth_cl_id(bp,
BNX2X_FCOE_ETH_CL_ID_IDX);
/** Current BNX2X_FCOE_ETH_CID deffinition implies not more than
@@ -1488,4 +1497,77 @@ static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
return max_cfg;
}
+/**
+ * bnx2x_get_iscsi_info - update iSCSI params according to licensing info.
+ *
+ * @bp: driver handle
+ *
+ */
+void bnx2x_get_iscsi_info(struct bnx2x *bp);
+
+/* returns func by VN for current port */
+static inline int func_by_vn(struct bnx2x *bp, int vn)
+{
+ return 2 * vn + BP_PORT(bp);
+}
+
+/**
+ * bnx2x_link_sync_notify - send notification to other functions.
+ *
+ * @bp: driver handle
+ *
+ */
+static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
+{
+ int func;
+ int vn;
+
+ /* Set the attention towards other drivers on the same port */
+ for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
+ if (vn == BP_VN(bp))
+ continue;
+
+ func = func_by_vn(bp, vn);
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
+ (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
+ }
+}
+
+/**
+ * bnx2x_update_drv_flags - update flags in shmem
+ *
+ * @bp: driver handle
+ * @flags: flags to update
+ * @set: set or clear
+ *
+ */
+static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
+{
+ if (SHMEM2_HAS(bp, drv_flags)) {
+ u32 drv_flags;
+ bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ drv_flags = SHMEM2_RD(bp, drv_flags);
+
+ if (set)
+ SET_FLAGS(drv_flags, flags);
+ else
+ RESET_FLAGS(drv_flags, flags);
+
+ SHMEM2_WR(bp, drv_flags, drv_flags);
+ DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
+ bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
+ }
+}
+
+static inline bool bnx2x_is_valid_ether_addr(struct bnx2x *bp, u8 *addr)
+{
+ if (is_valid_ether_addr(addr))
+ return true;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(addr) && IS_MF_ISCSI_SD(bp))
+ return true;
+#endif
+ return false;
+}
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 51bd748..6d82ade 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -685,24 +685,6 @@ int bnx2x_dcbnl_update_applist(struct bnx2x *bp, bool delall)
}
#endif
-static inline void bnx2x_update_drv_flags(struct bnx2x *bp, u32 flags, u32 set)
-{
- if (SHMEM2_HAS(bp, drv_flags)) {
- u32 drv_flags;
- bnx2x_acquire_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- drv_flags = SHMEM2_RD(bp, drv_flags);
-
- if (set)
- SET_FLAGS(drv_flags, flags);
- else
- RESET_FLAGS(drv_flags, flags);
-
- SHMEM2_WR(bp, drv_flags, drv_flags);
- DP(NETIF_MSG_HW, "drv_flags 0x%08x\n", drv_flags);
- bnx2x_release_hw_lock(bp, HW_LOCK_DRV_FLAGS);
- }
-}
-
static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
{
u8 prio, cos;
@@ -753,20 +735,30 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bp->dcbx_error);
/* mark DCBX result for PMF migration */
- bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 1);
+ bnx2x_update_drv_flags(bp,
+ 1 << DRV_FLAGS_DCB_CONFIGURED,
+ 1);
#ifdef BCM_DCBNL
- /**
+ /*
* Add new app tlvs to dcbnl
*/
bnx2x_dcbnl_update_applist(bp, false);
#endif
- bnx2x_dcbx_stop_hw_tx(bp);
-
- /* reconfigure the netdevice with the results of the new
+ /*
+ * reconfigure the netdevice with the results of the new
* dcbx negotiation.
*/
bnx2x_dcbx_update_tc_mapping(bp);
+ /*
+ * allow other funtions to update their netdevices
+ * accordingly
+ */
+ if (IS_MF(bp))
+ bnx2x_link_sync_notify(bp);
+
+ bnx2x_dcbx_stop_hw_tx(bp);
+
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
@@ -775,6 +767,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
bnx2x_dcbx_update_ets_params(bp);
bnx2x_dcbx_resume_hw_tx(bp);
+
return;
case BNX2X_DCBX_STATE_TX_RELEASED:
DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
@@ -883,7 +876,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
/*For IEEE admin_recommendation_bw_precentage
*For IEEE admin_recommendation_ets_pg */
af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
- for (i = 0; i < 4; i++) {
+ for (i = 0; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
if (dp->admin_priority_app_table[i].valid) {
struct bnx2x_admin_priority_app_table *table =
dp->admin_priority_app_table;
@@ -923,7 +916,7 @@ static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
{
- if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3(bp)) {
+ if (!CHIP_IS_E1x(bp)) {
bp->dcb_state = dcb_on;
bp->dcbx_enabled = dcbx_enabled;
} else {
@@ -1029,7 +1022,7 @@ void bnx2x_dcbx_init(struct bnx2x *bp)
DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
dcbx_lldp_params_offset);
- bnx2x_update_drv_flags(bp, DRV_FLAGS_DCB_CONFIGURED, 0);
+ bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_DCB_CONFIGURED, 0);
if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
bnx2x_dcbx_admin_mib_updated_params(bp,
@@ -1863,10 +1856,10 @@ static void bnx2x_dcbx_fw_struct(struct bnx2x *bp,
void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
{
/* if we need to syncronize DCBX result from prev PMF
- * read it from shmem and update bp accordingly
+ * read it from shmem and update bp and netdev accordingly
*/
if (SHMEM2_HAS(bp, drv_flags) &&
- GET_FLAGS(SHMEM2_RD(bp, drv_flags), DRV_FLAGS_DCB_CONFIGURED)) {
+ GET_FLAGS(SHMEM2_RD(bp, drv_flags), 1 << DRV_FLAGS_DCB_CONFIGURED)) {
/* Read neg results if dcbx is in the FW */
if (bnx2x_dcbx_read_shmem_neg_results(bp))
return;
@@ -1875,6 +1868,22 @@ void bnx2x_dcbx_pmf_update(struct bnx2x *bp)
bp->dcbx_error);
bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
bp->dcbx_error);
+#ifdef BCM_DCBNL
+ /*
+ * Add new app tlvs to dcbnl
+ */
+ bnx2x_dcbnl_update_applist(bp, false);
+ /*
+ * Send a notification for the new negotiated parameters
+ */
+ dcbnl_cee_notify(bp->dev, RTM_GETDCB, DCB_CMD_CEE_GET, 0, 0);
+#endif
+ /*
+ * reconfigure the netdevice with the results of the new
+ * dcbx negotiation.
+ */
+ bnx2x_dcbx_update_tc_mapping(bp);
+
}
}
@@ -2242,7 +2251,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
int i, ff;
/* iterate over the app entries looking for idtype and idval */
- for (i = 0, ff = -1; i < 4; i++) {
+ for (i = 0, ff = -1; i < DCBX_CONFIG_MAX_APP_PROTOCOL; i++) {
struct bnx2x_admin_priority_app_table *app_ent =
&bp->dcbx_config_params.admin_priority_app_table[i];
if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
@@ -2251,7 +2260,7 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
if (ff < 0 && !app_ent->valid)
ff = i;
}
- if (i < 4)
+ if (i < DCBX_CONFIG_MAX_APP_PROTOCOL)
/* if found overwrite up */
bp->dcbx_config_params.
admin_priority_app_table[i].priority = up;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 2c6a3bc..2ab9254 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -90,6 +90,7 @@ struct bnx2x_admin_priority_app_table {
u32 app_id;
};
+#define DCBX_CONFIG_MAX_APP_PROTOCOL 4
struct bnx2x_config_dcbx_params {
u32 overwrite_settings;
u32 admin_dcbx_version;
@@ -109,7 +110,8 @@ struct bnx2x_config_dcbx_params {
u32 admin_recommendation_bw_precentage[8];
u32 admin_recommendation_ets_pg[8];
u32 admin_pfc_bitmap;
- struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
+ struct bnx2x_admin_priority_app_table
+ admin_priority_app_table[DCBX_CONFIG_MAX_APP_PROTOCOL];
u32 admin_default_priority;
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index f0ca8b2..31a8b38 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -107,6 +107,10 @@ static const struct {
4, STATS_FLAGS_PORT, "rx_filtered_packets" },
{ STATS_OFFSET32(mf_tag_discard),
4, STATS_FLAGS_PORT, "rx_mf_tag_discard" },
+ { STATS_OFFSET32(pfc_frames_received_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_received" },
+ { STATS_OFFSET32(pfc_frames_sent_hi),
+ 8, STATS_FLAGS_PORT, "pfc_frames_sent" },
{ STATS_OFFSET32(brb_drop_hi),
8, STATS_FLAGS_PORT, "rx_brb_discard" },
{ STATS_OFFSET32(brb_truncate_hi),
@@ -352,7 +356,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(NETIF_MSG_LINK, "Unsupported port type\n");
return -EINVAL;
}
- /* Save new config in case command complete successuly */
+ /* Save new config in case command complete successully */
new_multi_phy_config = bp->link_params.multi_phy_config;
/* Get the new cfg_idx */
cfg_idx = bnx2x_get_link_cfg_idx(bp);
@@ -361,13 +365,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
if (cmd->autoneg == AUTONEG_ENABLE) {
+ u32 an_supported_speed = bp->port.supported[cfg_idx];
+ if (bp->link_params.phy[EXT_PHY1].type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ an_supported_speed |= (SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
DP(NETIF_MSG_LINK, "Autoneg not supported\n");
return -EINVAL;
}
/* advertise the requested speed and duplex if supported */
- if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+ if (cmd->advertising & ~an_supported_speed) {
DP(NETIF_MSG_LINK, "Advertisement parameters "
"are not supported\n");
return -EINVAL;
@@ -761,8 +770,8 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
struct bnx2x *bp = netdev_priv(dev);
u8 phy_fw_ver[PHY_FW_VER_LEN];
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
phy_fw_ver[0] = '\0';
if (bp->port.pmf) {
@@ -773,14 +782,14 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
bnx2x_release_phy_lock(bp);
}
- strncpy(info->fw_version, bp->fw_ver, 32);
+ strlcpy(info->fw_version, bp->fw_ver, sizeof(info->fw_version));
snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
"bc %d.%d.%d%s%s",
(bp->common.bc_ver & 0xff0000) >> 16,
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff),
((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
- strcpy(info->bus_info, pci_name(bp->pdev));
+ strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNX2X_NUM_STATS;
info->testinfo_len = BNX2X_NUM_TESTS;
info->eedump_len = bp->common.flash_size;
@@ -1729,7 +1738,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
u16 tx_start_idx, tx_idx;
u16 rx_start_idx, rx_idx;
- u16 pkt_prod, bd_prod, rx_comp_cons;
+ u16 pkt_prod, bd_prod;
struct sw_tx_bd *tx_buf;
struct eth_tx_start_bd *tx_start_bd;
struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
@@ -1740,6 +1749,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
struct sw_rx_bd *rx_buf;
u16 len;
int rc = -ENODEV;
+ u8 *data;
+ struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
/* check the loopback mode */
switch (loopback_mode) {
@@ -1748,8 +1759,18 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
return -EINVAL;
break;
case BNX2X_MAC_LOOPBACK:
- bp->link_params.loopback_mode = CHIP_IS_E3(bp) ?
- LOOPBACK_XMAC : LOOPBACK_BMAC;
+ if (CHIP_IS_E3(bp)) {
+ int cfg_idx = bnx2x_get_link_cfg_idx(bp);
+ if (bp->port.supported[cfg_idx] &
+ (SUPPORTED_10000baseT_Full |
+ SUPPORTED_20000baseMLD2_Full |
+ SUPPORTED_20000baseKR2_Full))
+ bp->link_params.loopback_mode = LOOPBACK_XMAC;
+ else
+ bp->link_params.loopback_mode = LOOPBACK_UMAC;
+ } else
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+
bnx2x_phy_init(&bp->link_params, &bp->link_vars);
break;
default:
@@ -1784,6 +1805,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
tx_start_idx = le16_to_cpu(*txdata->tx_cons_sb);
rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
+ netdev_tx_sent_queue(txq, skb->len);
+
pkt_prod = txdata->tx_pkt_prod++;
tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
tx_buf->first_bd = txdata->tx_bd_prod;
@@ -1850,8 +1873,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
if (rx_idx != rx_start_idx + num_pkts)
goto test_loopback_exit;
- rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
- cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
+ cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
@@ -1865,10 +1887,9 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
dma_sync_single_for_cpu(&bp->pdev->dev,
dma_unmap_addr(rx_buf, mapping),
fp_rx->rx_buf_size, DMA_FROM_DEVICE);
- skb = rx_buf->skb;
- skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
+ data = rx_buf->data + NET_SKB_PAD + cqe->fast_path_cqe.placement_offset;
for (i = ETH_HLEN; i < pkt_size; i++)
- if (*(skb->data + i) != (unsigned char) (i & 0xff))
+ if (*(data + i) != (unsigned char) (i & 0xff))
goto test_loopback_rx_exit;
rc = 0;
@@ -2099,18 +2120,16 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
case ETH_SS_STATS:
if (is_multi(bp)) {
num_stats = bnx2x_num_stat_queues(bp) *
- BNX2X_NUM_Q_STATS;
- if (!IS_MF_MODE_STAT(bp))
- num_stats += BNX2X_NUM_STATS;
- } else {
- if (IS_MF_MODE_STAT(bp)) {
- num_stats = 0;
- for (i = 0; i < BNX2X_NUM_STATS; i++)
- if (IS_FUNC_STAT(i))
- num_stats++;
- } else
- num_stats = BNX2X_NUM_STATS;
- }
+ BNX2X_NUM_Q_STATS;
+ } else
+ num_stats = 0;
+ if (IS_MF_MODE_STAT(bp)) {
+ for (i = 0; i < BNX2X_NUM_STATS; i++)
+ if (IS_FUNC_STAT(i))
+ num_stats++;
+ } else
+ num_stats += BNX2X_NUM_STATS;
+
return num_stats;
case ETH_SS_TEST:
@@ -2129,8 +2148,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
switch (stringset) {
case ETH_SS_STATS:
+ k = 0;
if (is_multi(bp)) {
- k = 0;
for_each_eth_queue(bp, i) {
memset(queue_name, 0, sizeof(queue_name));
sprintf(queue_name, "%d", i);
@@ -2141,20 +2160,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
queue_name);
k += BNX2X_NUM_Q_STATS;
}
- if (IS_MF_MODE_STAT(bp))
- break;
- for (j = 0; j < BNX2X_NUM_STATS; j++)
- strcpy(buf + (k + j)*ETH_GSTRING_LEN,
- bnx2x_stats_arr[j].string);
- } else {
- for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
- continue;
- strcpy(buf + j*ETH_GSTRING_LEN,
- bnx2x_stats_arr[i].string);
- j++;
- }
}
+
+
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+ bnx2x_stats_arr[i].string);
+ j++;
+ }
+
break;
case ETH_SS_TEST:
@@ -2168,10 +2184,9 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
u32 *hw_stats, *offset;
- int i, j, k;
+ int i, j, k = 0;
if (is_multi(bp)) {
- k = 0;
for_each_eth_queue(bp, i) {
hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
@@ -2192,46 +2207,28 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
}
k += BNX2X_NUM_Q_STATS;
}
- if (IS_MF_MODE_STAT(bp))
- return;
- hw_stats = (u32 *)&bp->eth_stats;
- for (j = 0; j < BNX2X_NUM_STATS; j++) {
- if (bnx2x_stats_arr[j].size == 0) {
- /* skip this counter */
- buf[k + j] = 0;
- continue;
- }
- offset = (hw_stats + bnx2x_stats_arr[j].offset);
- if (bnx2x_stats_arr[j].size == 4) {
- /* 4-byte counter */
- buf[k + j] = (u64) *offset;
- continue;
- }
- /* 8-byte counter */
- buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ }
+
+ hw_stats = (u32 *)&bp->eth_stats;
+ for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+ if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+ continue;
+ if (bnx2x_stats_arr[i].size == 0) {
+ /* skip this counter */
+ buf[k + j] = 0;
+ j++;
+ continue;
}
- } else {
- hw_stats = (u32 *)&bp->eth_stats;
- for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
- if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
- continue;
- if (bnx2x_stats_arr[i].size == 0) {
- /* skip this counter */
- buf[j] = 0;
- j++;
- continue;
- }
- offset = (hw_stats + bnx2x_stats_arr[i].offset);
- if (bnx2x_stats_arr[i].size == 4) {
- /* 4-byte counter */
- buf[j] = (u64) *offset;
- j++;
- continue;
- }
- /* 8-byte counter */
- buf[j] = HILO_U64(*offset, *(offset + 1));
+ offset = (hw_stats + bnx2x_stats_arr[i].offset);
+ if (bnx2x_stats_arr[i].size == 4) {
+ /* 4-byte counter */
+ buf[k + j] = (u64) *offset;
j++;
+ continue;
}
+ /* 8-byte counter */
+ buf[k + j] = HILO_U64(*offset, *(offset + 1));
+ j++;
}
}
@@ -2285,18 +2282,20 @@ static int bnx2x_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
}
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev,
- struct ethtool_rxfh_indir *indir)
+static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ return (bp->multi_mode == ETH_RSS_MODE_DISABLED ?
+ 0 : T_ETH_INDIRECTION_TABLE_SIZE);
+}
+
+static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
- size_t copy_size =
- min_t(size_t, indir->size, T_ETH_INDIRECTION_TABLE_SIZE);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
size_t i;
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
/* Get the current configuration of the RSS indirection table */
bnx2x_get_rss_ind_table(&bp->rss_conf_obj, ind_table);
@@ -2309,33 +2308,19 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev,
* align the returned table to the Client ID of the leading RSS
* queue.
*/
- for (i = 0; i < copy_size; i++)
- indir->ring_index[i] = ind_table[i] - bp->fp->cl_id;
-
- indir->size = T_ETH_INDIRECTION_TABLE_SIZE;
+ for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++)
+ indir[i] = ind_table[i] - bp->fp->cl_id;
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev,
- const struct ethtool_rxfh_indir *indir)
+static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
- u32 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
-
- if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
- return -EOPNOTSUPP;
-
- /* validate the size */
- if (indir->size != T_ETH_INDIRECTION_TABLE_SIZE)
- return -EINVAL;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
- /* validate the indices */
- if (indir->ring_index[i] >= num_eth_queues)
- return -EINVAL;
/*
* The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
@@ -2345,7 +2330,7 @@ static int bnx2x_set_rxfh_indir(struct net_device *dev,
* align the received table to the Client ID of the leading RSS
* queue
*/
- ind_table[i] = indir->ring_index[i] + bp->fp->cl_id;
+ ind_table[i] = indir[i] + bp->fp->cl_id;
}
return bnx2x_config_rss_pf(bp, ind_table, false);
@@ -2378,6 +2363,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.set_phys_id = bnx2x_set_phys_id,
.get_ethtool_stats = bnx2x_get_ethtool_stats,
.get_rxnfc = bnx2x_get_rxnfc,
+ .get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
.get_rxfh_indir = bnx2x_get_rxfh_indir,
.set_rxfh_indir = bnx2x_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index fc754cb..3e30c86 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -1247,11 +1247,14 @@ struct drv_func_mb {
#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
#define REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED 0x00070014
+ #define REQ_BC_VER_4_PFC_STATS_SUPPORTED 0x00070201
#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000
+ #define DRV_MSG_CODE_DRV_INFO_ACK 0xd8000000
+ #define DRV_MSG_CODE_DRV_INFO_NACK 0xd9000000
#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
#define REQ_BC_VER_4_SET_MF_BW 0x00060202
@@ -1304,6 +1307,8 @@ struct drv_func_mb {
#define FW_MSG_CODE_VRFY_OPT_MDL_INVLD_IMG 0xa0200000
#define FW_MSG_CODE_VRFY_OPT_MDL_UNAPPROVED 0xa0300000
#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000
+ #define FW_MSG_CODE_DRV_INFO_ACK 0xd8100000
+ #define FW_MSG_CODE_DRV_INFO_NACK 0xd9100000
#define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000
#define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000
@@ -1360,6 +1365,7 @@ struct drv_func_mb {
#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
+ #define DRV_STATUS_DRV_INFO_REQ 0x04000000
u32 virt_mac_upper;
#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -1964,9 +1970,38 @@ struct shmem2_region {
u32 extended_dev_info_shared_addr;
u32 ncsi_oem_data_addr;
- u32 ocsd_host_addr;
- u32 ocbb_host_addr;
- u32 ocsd_req_update_interval;
+ u32 ocsd_host_addr; /* initialized by option ROM */
+ u32 ocbb_host_addr; /* initialized by option ROM */
+ u32 ocsd_req_update_interval; /* initialized by option ROM */
+ u32 temperature_in_half_celsius;
+ u32 glob_struct_in_host;
+
+ u32 dcbx_neg_res_ext_offset;
+#define SHMEM_DCBX_NEG_RES_EXT_NONE 0x00000000
+
+ u32 drv_capabilities_flag[E2_FUNC_MAX];
+#define DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED 0x00000001
+#define DRV_FLAGS_CAPABILITIES_LOADED_L2 0x00000002
+#define DRV_FLAGS_CAPABILITIES_LOADED_FCOE 0x00000004
+#define DRV_FLAGS_CAPABILITIES_LOADED_ISCSI 0x00000008
+
+ u32 extended_dev_info_shared_cfg_size;
+
+ u32 dcbx_en[PORT_MAX];
+
+ /* The offset points to the multi threaded meta structure */
+ u32 multi_thread_data_offset;
+
+ /* address of DMAable host address holding values from the drivers */
+ u32 drv_info_host_addr_lo;
+ u32 drv_info_host_addr_hi;
+
+ /* general values written by the MFW (such as current version) */
+ u32 drv_info_control;
+#define DRV_INFO_CONTROL_VER_MASK 0x000000ff
+#define DRV_INFO_CONTROL_VER_SHIFT 0
+#define DRV_INFO_CONTROL_OP_CODE_MASK 0x0000ff00
+#define DRV_INFO_CONTROL_OP_CODE_SHIFT 8
};
@@ -2501,14 +2536,18 @@ struct mac_stx {
#define MAC_STX_IDX_MAX 2
struct host_port_stats {
- u32 host_port_stats_start;
+ u32 host_port_stats_counter;
struct mac_stx mac_stx[MAC_STX_IDX_MAX];
u32 brb_drop_hi;
u32 brb_drop_lo;
- u32 host_port_stats_end;
+ u32 not_used; /* obsolete */
+ u32 pfc_frames_tx_hi;
+ u32 pfc_frames_tx_lo;
+ u32 pfc_frames_rx_hi;
+ u32 pfc_frames_rx_lo;
};
@@ -2548,6 +2587,118 @@ struct host_func_stats {
/* VIC definitions */
#define VICSTATST_UIF_INDEX 2
+/* current drv_info version */
+#define DRV_INFO_CUR_VER 1
+
+/* drv_info op codes supported */
+enum drv_info_opcode {
+ ETH_STATS_OPCODE,
+ FCOE_STATS_OPCODE,
+ ISCSI_STATS_OPCODE
+};
+
+#define ETH_STAT_INFO_VERSION_LEN 12
+/* Per PCI Function Ethernet Statistics required from the driver */
+struct eth_stats_info {
+ /* Function's Driver Version. padded to 12 */
+ u8 version[ETH_STAT_INFO_VERSION_LEN];
+ /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */
+ u8 mac_local[8];
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */
+ u32 feature_flags; /* Feature_Flags. */
+#define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01
+#define FEATURE_ETH_LSO_MASK 0x02
+#define FEATURE_ETH_BOOTMODE_MASK 0x1C
+#define FEATURE_ETH_BOOTMODE_SHIFT 2
+#define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2)
+#define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2)
+#define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2)
+#define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2)
+#define FEATURE_ETH_TOE_MASK 0x20
+ u32 lso_max_size; /* LSO MaxOffloadSize. */
+ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */
+ /* Num Offloaded Connections TCP_IPv4. */
+ u32 ipv4_ofld_cnt;
+ /* Num Offloaded Connections TCP_IPv6. */
+ u32 ipv6_ofld_cnt;
+ u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */
+ u32 txq_size; /* TX Descriptors Queue Size */
+ u32 rxq_size; /* RX Descriptors Queue Size */
+ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 txq_avg_depth;
+ /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */
+ u32 rxq_avg_depth;
+ /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/
+ u32 iov_offload;
+ /* Number of NetQueue/VMQ Config'd. */
+ u32 netq_cnt;
+ u32 vf_cnt; /* Num VF assigned to this PF. */
+};
+
+/* Per PCI Function FCOE Statistics required from the driver */
+struct fcoe_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u32 txq_size; /* FCoE TX Descriptors Queue Size. */
+ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */
+ /* FCoE TX Descriptor Queue Avg Depth. */
+ u32 txq_avg_depth;
+ /* FCoE RX Descriptors Queue Avg Depth. */
+ u32 rxq_avg_depth;
+ u32 rx_frames_lo; /* FCoE RX Frames received. */
+ u32 rx_frames_hi; /* FCoE RX Frames received. */
+ u32 rx_bytes_lo; /* FCoE RX Bytes received. */
+ u32 rx_bytes_hi; /* FCoE RX Bytes received. */
+ u32 tx_frames_lo; /* FCoE TX Frames sent. */
+ u32 tx_frames_hi; /* FCoE TX Frames sent. */
+ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */
+ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */
+};
+
+/* Per PCI Function iSCSI Statistics required from the driver*/
+struct iscsi_stats_info {
+ u8 version[12]; /* Function's Driver Version. */
+ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */
+ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */
+ /* QoS Priority (per 802.1p). 0-7255 */
+ u32 qos_priority;
+ u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */
+ u8 ww_port_name[64]; /* iSCSI World wide port name */
+ u8 boot_target_name[64];/* iSCSI Boot Target Name. */
+ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */
+ u32 boot_target_portal; /* iSCSI Boot Target Portal. */
+ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */
+ u32 max_frame_size; /* Max Frame Size. bytes */
+ u32 txq_size; /* PDU TX Descriptors Queue Size. */
+ u32 rxq_size; /* PDU RX Descriptors Queue Size. */
+ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */
+ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */
+ u32 rx_pdus_lo; /* iSCSI PDUs received. */
+ u32 rx_pdus_hi; /* iSCSI PDUs received. */
+ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */
+ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */
+ u32 tx_pdus_lo; /* iSCSI PDUs sent. */
+ u32 tx_pdus_hi; /* iSCSI PDUs sent. */
+ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */
+ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */
+ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable.
+ * 9 nibbles, the position of each nibble
+ * represents the C-PCP value, the value
+ * of the nibble = S-PCP value.
+ */
+};
+
+union drv_info_to_mcp {
+ struct eth_stats_info ether_stat;
+ struct fcoe_stats_info fcoe_stat;
+ struct iscsi_stats_info iscsi_stat;
+};
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 0
#define BCM_5710_FW_REVISION_VERSION 29
@@ -4161,8 +4312,62 @@ struct ustorm_eth_rx_producers {
/*
- * cfc delete event data
+ * FCoE RX statistics parameters section#0
*/
+struct fcoe_rx_stat_params_section0 {
+ __le32 fcoe_rx_pkt_cnt;
+ __le32 fcoe_rx_byte_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#1
+ */
+struct fcoe_rx_stat_params_section1 {
+ __le32 fcoe_ver_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+};
+
+
+/*
+ * FCoE RX statistics parameters section#2
+ */
+struct fcoe_rx_stat_params_section2 {
+ __le32 fc_crc_cnt;
+ __le32 eofa_del_cnt;
+ __le32 miss_frame_cnt;
+ __le32 seq_timeout_cnt;
+ __le32 drop_seq_cnt;
+ __le32 fcoe_rx_drop_pkt_cnt;
+ __le32 fcp_rx_pkt_cnt;
+ __le32 reserved0;
+};
+
+
+/*
+ * FCoE TX statistics parameters
+ */
+struct fcoe_tx_stat_params {
+ __le32 fcoe_tx_pkt_cnt;
+ __le32 fcoe_tx_byte_cnt;
+ __le32 fcp_tx_pkt_cnt;
+ __le32 reserved0;
+};
+
+/*
+ * FCoE statistics parameters
+ */
+struct fcoe_statistics_params {
+ struct fcoe_tx_stat_params tx_stat;
+ struct fcoe_rx_stat_params_section0 rx_stat0;
+ struct fcoe_rx_stat_params_section1 rx_stat1;
+ struct fcoe_rx_stat_params_section2 rx_stat2;
+};
+
+
+/*
+ * cfc delete event data
+*/
struct cfc_del_event_data {
u32 cid;
u32 reserved0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 882f48f..2091e5d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -27,7 +27,6 @@
#include "bnx2x.h"
#include "bnx2x_cmn.h"
-
/********************************************************/
#define ETH_HLEN 14
/* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
@@ -163,6 +162,11 @@
#define EDC_MODE_LIMITING 0x0044
#define EDC_MODE_PASSIVE_DAC 0x0055
+/* BRB default for class 0 E2 */
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR 170
+#define DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR 250
+#define DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E2_BRB_MAC_FULL_XON_THR 50
/* BRB thresholds for E2*/
#define PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE 170
@@ -177,6 +181,12 @@
#define PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE 250
+/* BRB default for class 0 E3A0 */
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR 290
+#define DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR 410
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR 10
+#define DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR 50
+
/* BRB thresholds for E3A0 */
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE 290
#define PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE 0
@@ -190,6 +200,11 @@
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE 410
+/* BRB default for E3B0 */
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR 330
+#define DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR 490
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR 15
+#define DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR 55
/* BRB thresholds for E3B0 2 port mode*/
#define PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE 1025
@@ -239,18 +254,29 @@
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE 50
#define PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE 384
-
/* only for E3B0*/
#define PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR 304
#define PFC_E3B0_4P_BRB_FULL_LB_XON_THR 384
-#define PFC_E3B0_4P_LB_GUART 120
+#define PFC_E3B0_4P_LB_GUART 120
#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART 120
-#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
+#define PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST 80
#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART 80
-#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+#define PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST 120
+
+/* Pause defines*/
+#define DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR 330
+#define DEFAULT_E3B0_BRB_FULL_LB_XON_THR 490
+#define DEFAULT_E3B0_LB_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST 0
+
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART 40
+#define DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST 0
+
+/* ETS defines*/
#define DCBX_INVALID_COS (0xFF)
#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
@@ -440,7 +466,7 @@ static u32 bnx2x_ets_get_min_w_val_nig(const struct link_vars *vars)
u32 min_w_val = 0;
/* Calculate min_w_val.*/
if (vars->link_up) {
- if (SPEED_20000 == vars->line_speed)
+ if (vars->line_speed == SPEED_20000)
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_20GBPS;
else
min_w_val = ETS_E3B0_NIG_MIN_W_VAL_UP_TO_10GBPS;
@@ -490,7 +516,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_nig(
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_UPPER_BOUND_5 :
NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_5, credit_upper_bound);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_6,
credit_upper_bound);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_7,
@@ -584,7 +610,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_4, 0x0);
REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_CREDIT_WEIGHT_5 :
NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_5, 0x0);
- if (0 == port) {
+ if (!port) {
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_6, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_7, 0x0);
REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_8, 0x0);
@@ -612,7 +638,7 @@ static void bnx2x_ets_e3b0_set_credit_upper_bound_pbf(
* In 2 port mode port0 has COS0-5 that can be used for WFQ.In 4
* port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_upper_bound = PBF_REG_COS0_UPPER_BOUND_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -674,7 +700,7 @@ static void bnx2x_ets_e3b0_pbf_disabled(const struct link_params *params)
* In 2 port mode port0 has COS0-5 that can be used for WFQ.
* In 4 port mode port1 has COS0-2 that can be used for WFQ.
*/
- if (0 == port) {
+ if (!port) {
base_weight = PBF_REG_COS0_WEIGHT_P0;
max_cos = DCBX_E3B0_MAX_NUM_COS_PORT0;
} else {
@@ -846,34 +872,47 @@ static int bnx2x_ets_e3b0_set_cos_bw(struct bnx2x *bp,
******************************************************************************/
static int bnx2x_ets_e3b0_get_total_bw(
const struct link_params *params,
- const struct bnx2x_ets_params *ets_params,
+ struct bnx2x_ets_params *ets_params,
u16 *total_bw)
{
struct bnx2x *bp = params->bp;
u8 cos_idx = 0;
+ u8 is_bw_cos_exist = 0;
*total_bw = 0 ;
+
/* Calculate total BW requested */
for (cos_idx = 0; cos_idx < ets_params->num_of_cos; cos_idx++) {
- if (bnx2x_cos_state_bw == ets_params->cos[cos_idx].state) {
+ if (ets_params->cos[cos_idx].state == bnx2x_cos_state_bw) {
+ is_bw_cos_exist = 1;
+ if (!ets_params->cos[cos_idx].params.bw_params.bw) {
+ DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config BW"
+ "was set to 0\n");
+ /*
+ * This is to prevent a state when ramrods
+ * can't be sent
+ */
+ ets_params->cos[cos_idx].params.bw_params.bw
+ = 1;
+ }
*total_bw +=
ets_params->cos[cos_idx].params.bw_params.bw;
}
}
/* Check total BW is valid */
- if ((100 != *total_bw) || (0 == *total_bw)) {
- if (0 == *total_bw) {
+ if ((is_bw_cos_exist == 1) && (*total_bw != 100)) {
+ if (*total_bw == 0) {
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW shouldn't be 0\n");
+ "bnx2x_ets_E3B0_config total BW shouldn't be 0\n");
return -EINVAL;
}
DP(NETIF_MSG_LINK,
- "bnx2x_ets_E3B0_config toatl BW should be 100\n");
- /**
- * We can handle a case whre the BW isn't 100 this can happen
- * if the TC are joined.
- */
+ "bnx2x_ets_E3B0_config total BW should be 100\n");
+ /*
+ * We can handle a case whre the BW isn't 100 this can happen
+ * if the TC are joined.
+ */
}
return 0;
}
@@ -904,7 +943,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
const u8 max_num_of_cos = (port) ? DCBX_E3B0_MAX_NUM_COS_PORT1 :
DCBX_E3B0_MAX_NUM_COS_PORT0;
- if (DCBX_INVALID_COS != sp_pri_to_cos[pri]) {
+ if (sp_pri_to_cos[pri] != DCBX_INVALID_COS) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
"parameter There can't be two COS's with "
"the same strict pri\n");
@@ -913,7 +952,7 @@ static int bnx2x_ets_e3b0_sp_pri_to_cos_set(const struct link_params *params,
if (pri > max_num_of_cos) {
DP(NETIF_MSG_LINK, "bnx2x_ets_e3b0_sp_pri_to_cos_set invalid "
- "parameter Illegal strict priority\n");
+ "parameter Illegal strict priority\n");
return -EINVAL;
}
@@ -995,8 +1034,8 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
/* Set all the strict priority first */
for (i = 0; i < max_num_of_cos; i++) {
- if (DCBX_INVALID_COS != sp_pri_to_cos[i]) {
- if (DCBX_MAX_NUM_COS <= sp_pri_to_cos[i]) {
+ if (sp_pri_to_cos[i] != DCBX_INVALID_COS) {
+ if (sp_pri_to_cos[i] >= DCBX_MAX_NUM_COS) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid cos entry\n");
@@ -1010,7 +1049,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
sp_pri_to_cos[i], pri_set);
pri_bitmask = 1 << sp_pri_to_cos[i];
/* COS is used remove it from bitmap.*/
- if (0 == (pri_bitmask & cos_bit_to_set)) {
+ if (!(pri_bitmask & cos_bit_to_set)) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_sp_set_pri_cli_reg "
"invalid There can't be two COS's with"
@@ -1072,7 +1111,7 @@ static int bnx2x_ets_e3b0_sp_set_pri_cli_reg(const struct link_params *params,
******************************************************************************/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params)
+ struct bnx2x_ets_params *ets_params)
{
struct bnx2x *bp = params->bp;
int bnx2x_status = 0;
@@ -1105,15 +1144,15 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
/* Prepare BW parameters*/
bnx2x_status = bnx2x_ets_e3b0_get_total_bw(params, ets_params,
&total_bw);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config get_total_bw failed\n");
return -EINVAL;
}
- /**
- * Upper bound is set according to current link speed (min_w_val
- * should be the same for upper bound and COS credit val).
+ /*
+ * Upper bound is set according to current link speed (min_w_val
+ * should be the same for upper bound and COS credit val).
*/
bnx2x_ets_e3b0_set_credit_upper_bound_nig(params, min_w_val_nig);
bnx2x_ets_e3b0_set_credit_upper_bound_pbf(params, min_w_val_pbf);
@@ -1122,7 +1161,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
for (cos_entry = 0; cos_entry < ets_params->num_of_cos; cos_entry++) {
if (bnx2x_cos_state_bw == ets_params->cos[cos_entry].state) {
cos_bw_bitmap |= (1 << cos_entry);
- /**
+ /*
* The function also sets the BW in HW(not the mappin
* yet)
*/
@@ -1146,7 +1185,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
"bnx2x_ets_e3b0_config cos state not valid\n");
return -EINVAL;
}
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_e3b0_config set cos bw failed\n");
return bnx2x_status;
@@ -1157,7 +1196,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
bnx2x_status = bnx2x_ets_e3b0_sp_set_pri_cli_reg(params,
sp_pri_to_cos);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK,
"bnx2x_ets_E3B0_config set_pri_cli_reg failed\n");
return bnx2x_status;
@@ -1168,7 +1207,7 @@ int bnx2x_ets_e3b0_config(const struct link_params *params,
cos_sp_bitmap,
cos_bw_bitmap);
- if (0 != bnx2x_status) {
+ if (bnx2x_status) {
DP(NETIF_MSG_LINK, "bnx2x_ets_E3B0_config SP failed\n");
return bnx2x_status;
}
@@ -1232,9 +1271,9 @@ void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
- if ((0 == total_bw) ||
- (0 == cos0_bw) ||
- (0 == cos1_bw)) {
+ if ((!total_bw) ||
+ (!cos0_bw) ||
+ (!cos1_bw)) {
DP(NETIF_MSG_LINK, "Total BW can't be zero\n");
return;
}
@@ -1290,7 +1329,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
* dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
* dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
*/
- val = (0 == strict_cos) ? 0x2318 : 0x22E0;
+ val = (!strict_cos) ? 0x2318 : 0x22E0;
REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
return 0;
@@ -1298,7 +1337,6 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
/******************************************************************/
/* PFC section */
/******************************************************************/
-
static void bnx2x_update_pfc_xmac(struct link_params *params,
struct link_vars *vars,
u8 is_lb)
@@ -1401,7 +1439,7 @@ void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
if (!vars->link_up)
return;
- if (MAC_TYPE_EMAC == vars->mac_type) {
+ if (vars->mac_type == MAC_TYPE_EMAC) {
DP(NETIF_MSG_LINK, "About to read PFC stats from EMAC\n");
bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
pfc_frames_received);
@@ -1435,6 +1473,18 @@ static void bnx2x_set_mdio_clk(struct bnx2x *bp, u32 chip_id, u8 port)
udelay(40);
}
+static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
+{
+ u32 port4mode_ovwr_val;
+ /* Check 4-port override enabled */
+ port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
+ if (port4mode_ovwr_val & (1<<0)) {
+ /* Return 4-port mode override value */
+ return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
+ }
+ /* Return 4-port mode from input pin */
+ return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
+}
static void bnx2x_emac_init(struct link_params *params,
struct link_vars *vars)
@@ -1601,31 +1651,18 @@ static void bnx2x_umac_enable(struct link_params *params,
}
-static u8 bnx2x_is_4_port_mode(struct bnx2x *bp)
-{
- u32 port4mode_ovwr_val;
- /* Check 4-port override enabled */
- port4mode_ovwr_val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
- if (port4mode_ovwr_val & (1<<0)) {
- /* Return 4-port mode override value */
- return ((port4mode_ovwr_val & (1<<1)) == (1<<1));
- }
- /* Return 4-port mode from input pin */
- return (u8)REG_RD(bp, MISC_REG_PORT4MODE_EN);
-}
-
/* Define the XMAC mode */
static void bnx2x_xmac_init(struct link_params *params, u32 max_speed)
{
struct bnx2x *bp = params->bp;
u32 is_port4mode = bnx2x_is_4_port_mode(bp);
- /**
- * In 4-port mode, need to set the mode only once, so if XMAC is
- * already out of reset, it means the mode has already been set,
- * and it must not* reset the XMAC again, since it controls both
- * ports of the path
- **/
+ /*
+ * In 4-port mode, need to set the mode only once, so if XMAC is
+ * already out of reset, it means the mode has already been set,
+ * and it must not* reset the XMAC again, since it controls both
+ * ports of the path
+ */
if ((CHIP_NUM(bp) == CHIP_NUM_57840) &&
(REG_RD(bp, MISC_REG_RESET_REG_2) &
@@ -1743,6 +1780,7 @@ static int bnx2x_xmac_enable(struct link_params *params,
return 0;
}
+
static int bnx2x_emac_enable(struct link_params *params,
struct link_vars *vars, u8 lb)
{
@@ -1999,7 +2037,6 @@ static void bnx2x_update_pfc_bmac2(struct link_params *params,
REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_BMAC_CONTROL, wb_data, 2);
}
-
/* PFC BRB internal port configuration params */
struct bnx2x_pfc_brb_threshold_val {
u32 pause_xoff;
@@ -2009,6 +2046,8 @@ struct bnx2x_pfc_brb_threshold_val {
};
struct bnx2x_pfc_brb_e3b0_val {
+ u32 per_class_guaranty_mode;
+ u32 lb_guarantied_hyst;
u32 full_lb_xoff_th;
u32 full_lb_xon_threshold;
u32 lb_guarantied;
@@ -2021,6 +2060,9 @@ struct bnx2x_pfc_brb_e3b0_val {
struct bnx2x_pfc_brb_th_val {
struct bnx2x_pfc_brb_threshold_val pauseable_th;
struct bnx2x_pfc_brb_threshold_val non_pauseable_th;
+ struct bnx2x_pfc_brb_threshold_val default_class0;
+ struct bnx2x_pfc_brb_threshold_val default_class1;
+
};
static int bnx2x_pfc_brb_get_config_params(
struct link_params *params,
@@ -2028,140 +2070,200 @@ static int bnx2x_pfc_brb_get_config_params(
{
struct bnx2x *bp = params->bp;
DP(NETIF_MSG_LINK, "Setting PFC BRB configuration\n");
+
+ config_val->default_class1.pause_xoff = 0;
+ config_val->default_class1.pause_xon = 0;
+ config_val->default_class1.full_xoff = 0;
+ config_val->default_class1.full_xon = 0;
+
if (CHIP_IS_E2(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E2_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E2_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E2_BRB_MAC_FULL_XON_THR;
+ /* pause able*/
config_val->pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E2_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3A0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3A0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3A0_BRB_MAC_FULL_XON_THR;
+ /* pause able */
config_val->pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ PFC_E3A0_BRB_MAC_FULL_XON_THR_NON_PAUSE;
} else if (CHIP_IS_E3B0(bp)) {
+ /* class0 defaults */
+ config_val->default_class0.pause_xoff =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XOFF_THR;
+ config_val->default_class0.pause_xon =
+ DEFAULT0_E3B0_BRB_MAC_PAUSE_XON_THR;
+ config_val->default_class0.full_xoff =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XOFF_THR;
+ config_val->default_class0.full_xon =
+ DEFAULT0_E3B0_BRB_MAC_FULL_XON_THR;
+
if (params->phy[INT_PHY].flags &
FLAGS_4_PORT_MODE) {
config_val->pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
config_val->pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_PAUSE;
config_val->pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_PAUSE;
config_val->pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_PAUSE;
/* non pause able*/
config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ PFC_E3B0_4P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
config_val->non_pauseable_th.full_xon =
- PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- } else {
- config_val->pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
- config_val->pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
- config_val->pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
- config_val->pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
- /* non pause able*/
- config_val->non_pauseable_th.pause_xoff =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.pause_xon =
- PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xoff =
- PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
- config_val->non_pauseable_th.full_xon =
- PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
- }
+ PFC_E3B0_4P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ } else {
+ config_val->pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_PAUSE;
+ config_val->pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_PAUSE;
+ config_val->pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_PAUSE;
+ config_val->pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_PAUSE;
+ /* non pause able*/
+ config_val->non_pauseable_th.pause_xoff =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.pause_xon =
+ PFC_E3B0_2P_BRB_MAC_PAUSE_XON_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xoff =
+ PFC_E3B0_2P_BRB_MAC_FULL_XOFF_THR_NON_PAUSE;
+ config_val->non_pauseable_th.full_xon =
+ PFC_E3B0_2P_BRB_MAC_FULL_XON_THR_NON_PAUSE;
+ }
} else
return -EINVAL;
return 0;
}
-
-static void bnx2x_pfc_brb_get_e3b0_config_params(struct link_params *params,
- struct bnx2x_pfc_brb_e3b0_val
- *e3b0_val,
- u32 cos0_pauseable,
- u32 cos1_pauseable)
+static void bnx2x_pfc_brb_get_e3b0_config_params(
+ struct link_params *params,
+ struct bnx2x_pfc_brb_e3b0_val
+ *e3b0_val,
+ struct bnx2x_nig_brb_pfc_port_params *pfc_params,
+ const u8 pfc_enabled)
{
- if (params->phy[INT_PHY].flags & FLAGS_4_PORT_MODE) {
+ if (pfc_enabled && pfc_params) {
+ e3b0_val->per_class_guaranty_mode = 1;
+ e3b0_val->lb_guarantied_hyst = 80;
+
+ if (params->phy[INT_PHY].flags &
+ FLAGS_4_PORT_MODE) {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_4P_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
+ } else {
+ e3b0_val->full_lb_xoff_th =
+ PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
+ e3b0_val->full_lb_xon_threshold =
+ PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
+ e3b0_val->mac_0_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ e3b0_val->mac_1_class_t_guarantied =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
+ e3b0_val->mac_1_class_t_guarantied_hyst =
+ PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
+
+ if (pfc_params->cos0_pauseable !=
+ pfc_params->cos1_pauseable) {
+ /* nonpauseable= Lossy + pauseable = Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
+ } else if (pfc_params->cos0_pauseable) {
+ /* Lossless +Lossless*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
+ } else {
+ /* Lossy +Lossy*/
+ e3b0_val->lb_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_LB_GUART;
+ e3b0_val->mac_0_class_t_guarantied =
+ PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
+ }
+ }
+ } else {
+ e3b0_val->per_class_guaranty_mode = 0;
+ e3b0_val->lb_guarantied_hyst = 0;
e3b0_val->full_lb_xoff_th =
- PFC_E3B0_4P_BRB_FULL_LB_XOFF_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XOFF_THR;
e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_4P_BRB_FULL_LB_XON_THR;
+ DEFAULT_E3B0_BRB_FULL_LB_XON_THR;
e3b0_val->lb_guarantied =
- PFC_E3B0_4P_LB_GUART;
+ DEFAULT_E3B0_LB_GUART;
e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART;
e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_0_CLASS_T_GUART_HYST;
+ DEFAULT_E3B0_BRB_MAC_0_CLASS_T_GUART_HYST;
e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART;
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART;
e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_4P_BRB_MAC_1_CLASS_T_GUART_HYST;
- } else {
- e3b0_val->full_lb_xoff_th =
- PFC_E3B0_2P_BRB_FULL_LB_XOFF_THR;
- e3b0_val->full_lb_xon_threshold =
- PFC_E3B0_2P_BRB_FULL_LB_XON_THR;
- e3b0_val->mac_0_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_0_CLASS_T_GUART_HYST;
- e3b0_val->mac_1_class_t_guarantied =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART;
- e3b0_val->mac_1_class_t_guarantied_hyst =
- PFC_E3B0_2P_BRB_MAC_1_CLASS_T_GUART_HYST;
-
- if (cos0_pauseable != cos1_pauseable) {
- /* nonpauseable= Lossy + pauseable = Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_MIX_PAUSE_MAC_0_CLASS_T_GUART;
- } else if (cos0_pauseable) {
- /* Lossless +Lossless*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_PAUSE_MAC_0_CLASS_T_GUART;
- } else {
- /* Lossy +Lossy*/
- e3b0_val->lb_guarantied =
- PFC_E3B0_2P_NON_PAUSE_LB_GUART;
- e3b0_val->mac_0_class_t_guarantied =
- PFC_E3B0_2P_NON_PAUSE_MAC_0_CLASS_T_GUART;
- }
+ DEFAULT_E3B0_BRB_MAC_1_CLASS_T_GUART_HYST;
}
}
static int bnx2x_update_pfc_brb(struct link_params *params,
@@ -2172,23 +2274,28 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
struct bnx2x *bp = params->bp;
struct bnx2x_pfc_brb_th_val config_val = { {0} };
struct bnx2x_pfc_brb_threshold_val *reg_th_config =
- &config_val.pauseable_th;
+ &config_val.pauseable_th;
struct bnx2x_pfc_brb_e3b0_val e3b0_val = {0};
- int set_pfc = params->feature_config_flags &
+ const int set_pfc = params->feature_config_flags &
FEATURE_CONFIG_PFC_ENABLED;
+ const u8 pfc_enabled = (set_pfc && pfc_params);
int bnx2x_status = 0;
u8 port = params->port;
/* default - pause configuration */
reg_th_config = &config_val.pauseable_th;
bnx2x_status = bnx2x_pfc_brb_get_config_params(params, &config_val);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
- if (set_pfc && pfc_params)
+ if (pfc_enabled) {
/* First COS */
- if (!pfc_params->cos0_pauseable)
+ if (pfc_params->cos0_pauseable)
+ reg_th_config = &config_val.pauseable_th;
+ else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class0;
/*
* The number of free blocks below which the pause signal to class 0
* of MAC #n is asserted. n=0,1
@@ -2215,122 +2322,119 @@ static int bnx2x_update_pfc_brb(struct link_params *params,
REG_WR(bp, (port) ? BRB1_REG_FULL_0_XON_THRESHOLD_1 :
BRB1_REG_FULL_0_XON_THRESHOLD_0 , reg_th_config->full_xon);
- if (set_pfc && pfc_params) {
+ if (pfc_enabled) {
/* Second COS */
if (pfc_params->cos1_pauseable)
reg_th_config = &config_val.pauseable_th;
else
reg_th_config = &config_val.non_pauseable_th;
+ } else
+ reg_th_config = &config_val.default_class1;
+ /*
+ * The number of free blocks below which the pause signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
+ reg_th_config->pause_xoff);
+
+ /*
+ * The number of free blocks above which the pause signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
+ BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
+ reg_th_config->pause_xon);
+ /*
+ * The number of free blocks below which the full signal to
+ * class 1 of MAC #n is asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
+ reg_th_config->full_xoff);
+ /*
+ * The number of free blocks above which the full signal to
+ * class 1 of MAC #n is de-asserted. n=0,1
+ */
+ REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
+ BRB1_REG_FULL_1_XON_THRESHOLD_0,
+ reg_th_config->full_xon);
+
+ if (CHIP_IS_E3B0(bp)) {
+ bnx2x_pfc_brb_get_e3b0_config_params(
+ params,
+ &e3b0_val,
+ pfc_params,
+ pfc_enabled);
+
+ REG_WR(bp, BRB1_REG_PER_CLASS_GUARANTY_MODE,
+ e3b0_val.per_class_guaranty_mode);
+
/*
- * The number of free blocks below which the pause signal to
- * class 1 of MAC #n is asserted. n=0,1
- **/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XOFF_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0,
- reg_th_config->pause_xoff);
+ * The hysteresis on the guarantied buffer space for the Lb
+ * port before signaling XON.
+ */
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST,
+ e3b0_val.lb_guarantied_hyst);
+
/*
- * The number of free blocks above which the pause signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of free blocks below which the full signal to the
+ * LB port is asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_PAUSE_1_XON_THRESHOLD_1 :
- BRB1_REG_PAUSE_1_XON_THRESHOLD_0,
- reg_th_config->pause_xon);
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
+ e3b0_val.full_lb_xoff_th);
/*
- * The number of free blocks below which the full signal to
- * class 1 of MAC #n is asserted. n=0,1
+ * The number of free blocks above which the full signal to the
+ * LB port is de-asserted.
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XOFF_THRESHOLD_1 :
- BRB1_REG_FULL_1_XOFF_THRESHOLD_0,
- reg_th_config->full_xoff);
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
+ e3b0_val.full_lb_xon_threshold);
/*
- * The number of free blocks above which the full signal to
- * class 1 of MAC #n is de-asserted. n=0,1
+ * The number of blocks guarantied for the MAC #n port. n=0,1
*/
- REG_WR(bp, (port) ? BRB1_REG_FULL_1_XON_THRESHOLD_1 :
- BRB1_REG_FULL_1_XON_THRESHOLD_0,
- reg_th_config->full_xon);
+ /* The number of blocks guarantied for the LB port.*/
+ REG_WR(bp, BRB1_REG_LB_GUARANTIED,
+ e3b0_val.lb_guarantied);
- if (CHIP_IS_E3B0(bp)) {
- /*Should be done by init tool */
- /*
- * BRB_empty_for_dup = BRB1_REG_BRB_EMPTY_THRESHOLD
- * reset value
- * 944
- */
-
- /**
- * The hysteresis on the guarantied buffer space for the Lb port
- * before signaling XON.
- **/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED_HYST, 80);
-
- bnx2x_pfc_brb_get_e3b0_config_params(
- params,
- &e3b0_val,
- pfc_params->cos0_pauseable,
- pfc_params->cos1_pauseable);
- /**
- * The number of free blocks below which the full signal to the
- * LB port is asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD,
- e3b0_val.full_lb_xoff_th);
- /**
- * The number of free blocks above which the full signal to the
- * LB port is de-asserted.
- */
- REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD,
- e3b0_val.full_lb_xon_threshold);
- /**
- * The number of blocks guarantied for the MAC #n port. n=0,1
- */
-
- /*The number of blocks guarantied for the LB port.*/
- REG_WR(bp, BRB1_REG_LB_GUARANTIED,
- e3b0_val.lb_guarantied);
-
- /**
- * The number of blocks guarantied for the MAC #n port.
- */
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
- 2 * e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
- 2 * e3b0_val.mac_1_class_t_guarantied);
- /**
- * The number of blocks guarantied for class #t in MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
- e3b0_val.mac_0_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class in
- * MAC0. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_0_class_t_guarantied_hyst);
-
- /**
- * The number of blocks guarantied for class #t in MAC1.t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
- e3b0_val.mac_1_class_t_guarantied);
- /**
- * The hysteresis on the guarantied buffer space for class #t
- * in MAC1. t=0,1
- */
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
- REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
- e3b0_val.mac_1_class_t_guarantied_hyst);
-
- }
+ /*
+ * The number of blocks guarantied for the MAC #n port.
+ */
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_0,
+ 2 * e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_GUARANTIED_1,
+ 2 * e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The number of blocks guarantied for class #t in MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED,
+ e3b0_val.mac_0_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class in
+ * MAC0. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_0_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_0_class_t_guarantied_hyst);
+ /*
+ * The number of blocks guarantied for class #t in MAC1.t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED,
+ e3b0_val.mac_1_class_t_guarantied);
+ /*
+ * The hysteresis on the guarantied buffer space for class #t
+ * in MAC1. t=0,1
+ */
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_0_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
+ REG_WR(bp, BRB1_REG_MAC_1_CLASS_1_GUARANTIED_HYST,
+ e3b0_val.mac_1_class_t_guarantied_hyst);
}
return bnx2x_status;
@@ -2398,7 +2502,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
struct bnx2x_nig_brb_pfc_port_params *nig_params)
{
u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
- u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+ u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
u32 pkt_priority_to_cos = 0;
struct bnx2x *bp = params->bp;
u8 port = params->port;
@@ -2412,9 +2516,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
* MAC control frames (that are not pause packets)
* will be forwarded to the XCM.
*/
- xcm_mask = REG_RD(bp,
- port ? NIG_REG_LLH1_XCM_MASK :
- NIG_REG_LLH0_XCM_MASK);
+ xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
+ NIG_REG_LLH0_XCM_MASK);
/*
* nig params will override non PFC params, since it's possible to
* do transition from PFC to SAFC
@@ -2429,8 +2532,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
ppp_enable = 1;
xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
- xcm0_out_en = 0;
- p0_hwpfc_enable = 1;
+ xcm_out_en = 0;
+ hwpfc_enable = 1;
} else {
if (nig_params) {
llfc_out_en = nig_params->llfc_out_en;
@@ -2441,7 +2544,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
- xcm0_out_en = 1;
+ xcm_out_en = 1;
}
if (CHIP_IS_E3(bp))
@@ -2460,13 +2563,16 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
NIG_REG_LLH0_XCM_MASK, xcm_mask);
- REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+ REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+ NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
/* output enable for RX_XCM # IF */
- REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+ REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
+ NIG_REG_XCM0_OUT_EN, xcm_out_en);
/* HW PFC TX enable */
- REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+ REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
+ NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
if (nig_params) {
u8 i = 0;
@@ -2515,7 +2621,7 @@ int bnx2x_update_pfc(struct link_params *params,
/* update BRB params */
bnx2x_status = bnx2x_update_pfc_brb(params, vars, pfc_params);
- if (0 != bnx2x_status)
+ if (bnx2x_status)
return bnx2x_status;
if (!vars->link_up)
@@ -2533,7 +2639,6 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_emac_enable(params, vars, 0);
return bnx2x_status;
}
-
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
else
@@ -3053,7 +3158,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "write phy register failed\n");
netdev_err(bp->dev, "MDC/MDIO access timeout\n");
rc = -EFAULT;
-
} else {
/* data */
tmp = ((phy->addr << 21) | (devad << 16) | val |
@@ -3090,8 +3194,6 @@ static int bnx2x_cl45_write(struct bnx2x *bp, struct bnx2x_phy *phy,
EMAC_MDIO_STATUS_10MB);
return rc;
}
-
-
/******************************************************************/
/* BSC access functions from E3 */
/******************************************************************/
@@ -3339,7 +3441,7 @@ static void bnx2x_set_aer_mmd(struct link_params *params,
aer_val = 0x3800 + offset - 1;
else
aer_val = 0x3800 + offset;
- DP(NETIF_MSG_LINK, "Set AER to 0x%x\n", aer_val);
+
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, aer_val);
@@ -3661,7 +3763,15 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Advertise pause */
bnx2x_ext_phy_set_pause(params, phy, vars);
- vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ /*
+ * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+ */
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
+ if (val16 < 0xd108) {
+ DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
+ vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+ }
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC7, &val16);
@@ -3942,13 +4052,11 @@ static void bnx2x_warpcore_set_20G_DXGXS(struct bnx2x *bp,
static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
struct link_params *params,
- u8 fiber_mode)
+ u8 fiber_mode,
+ u8 always_autoneg)
{
struct bnx2x *bp = params->bp;
u16 val16, digctrl_kx1, digctrl_kx2;
- u8 lane;
-
- lane = bnx2x_get_warpcore_lane(phy, params);
/* Clear XFI clock comp in non-10G single lane mode. */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -3956,7 +4064,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_RX66_CONTROL, val16 & ~(3<<13));
- if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (always_autoneg || phy->req_line_speed == SPEED_AUTO_NEG) {
/* SGMII Autoneg */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
@@ -3967,7 +4075,7 @@ static void bnx2x_warpcore_set_sgmii_speed(struct bnx2x_phy *phy,
} else {
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- val16 &= 0xcfbf;
+ val16 &= 0xcebf;
switch (phy->req_line_speed) {
case SPEED_10:
break;
@@ -4043,9 +4151,7 @@ static void bnx2x_warpcore_reset_lane(struct bnx2x *bp,
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_DIGITAL5_MISC6, &val);
}
-
-
- /* Clear SFI/XFI link settings registers */
+/* Clear SFI/XFI link settings registers */
static void bnx2x_warpcore_clear_regs(struct bnx2x_phy *phy,
struct link_params *params,
u16 lane)
@@ -4250,7 +4356,7 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
vars->phy_flags |= PHY_SGMII_FLAG;
DP(NETIF_MSG_LINK, "Setting SGMII mode\n");
bnx2x_warpcore_clear_regs(phy, params, lane);
- bnx2x_warpcore_set_sgmii_speed(phy, params, 0);
+ bnx2x_warpcore_set_sgmii_speed(phy, params, 0, 1);
} else {
switch (serdes_net_if) {
case PORT_HW_CFG_NET_SERDES_IF_KR:
@@ -4278,7 +4384,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
}
bnx2x_warpcore_set_sgmii_speed(phy,
params,
- fiber_mode);
+ fiber_mode,
+ 0);
}
break;
@@ -4291,7 +4398,8 @@ static void bnx2x_warpcore_config_init(struct bnx2x_phy *phy,
bnx2x_warpcore_set_10G_XFI(phy, params, 0);
} else if (vars->line_speed == SPEED_1000) {
DP(NETIF_MSG_LINK, "Setting 1G Fiber\n");
- bnx2x_warpcore_set_sgmii_speed(phy, params, 1);
+ bnx2x_warpcore_set_sgmii_speed(
+ phy, params, 1, 0);
}
/* Issue Module detection */
if (bnx2x_is_sfp_module_plugged(phy, params))
@@ -4428,12 +4536,6 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
/* Switch back to 4-copy registers */
bnx2x_set_aer_mmd(params, phy);
- /* Global loopback, not recommended. */
- bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, &val16);
- bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_COMBO_IEEE0_MIICTRL, val16 |
- 0x4000);
} else {
/* 10G & 20G */
bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
@@ -4450,25 +4552,14 @@ static void bnx2x_set_warpcore_loopback(struct bnx2x_phy *phy,
}
-void bnx2x_link_status_update(struct link_params *params,
- struct link_vars *vars)
+void bnx2x_sync_link(struct link_params *params,
+ struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
u8 link_10g_plus;
- u8 port = params->port;
- u32 sync_offset, media_types;
- /* Update PHY configuration */
- set_phy_vars(params, vars);
-
- vars->link_status = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- port_mb[port].link_status));
-
- vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
- vars->phy_flags = PHY_XGXS_FLAG;
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_PHYSICAL_LINK_FLAG;
-
+ vars->link_up = (vars->link_status & LINK_STATUS_LINK_UP);
if (vars->link_up) {
DP(NETIF_MSG_LINK, "phy link up\n");
@@ -4563,7 +4654,23 @@ void bnx2x_link_status_update(struct link_params *params,
if (vars->link_status & LINK_STATUS_PHYSICAL_LINK_FLAG)
vars->phy_flags |= PHY_HALF_OPEN_CONN_FLAG;
}
+}
+
+void bnx2x_link_status_update(struct link_params *params,
+ struct link_vars *vars)
+{
+ struct bnx2x *bp = params->bp;
+ u8 port = params->port;
+ u32 sync_offset, media_types;
+ /* Update PHY configuration */
+ set_phy_vars(params, vars);
+ vars->link_status = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ port_mb[port].link_status));
+
+ vars->phy_flags = PHY_XGXS_FLAG;
+ bnx2x_sync_link(params, vars);
/* Sync media type */
sync_offset = params->shmem_base +
offsetof(struct shmem_region,
@@ -4602,7 +4709,6 @@ void bnx2x_link_status_update(struct link_params *params,
vars->line_speed, vars->duplex, vars->flow_ctrl);
}
-
static void bnx2x_set_master_ln(struct link_params *params,
struct bnx2x_phy *phy)
{
@@ -4676,11 +4782,8 @@ static void bnx2x_set_swap_lanes(struct link_params *params,
* Each two bits represents a lane number:
* No swap is 0123 => 0x1b no need to enable the swap
*/
- u16 ser_lane, rx_lane_swap, tx_lane_swap;
+ u16 rx_lane_swap, tx_lane_swap;
- ser_lane = ((params->lane_config &
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
- PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
rx_lane_swap = ((params->lane_config &
PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
@@ -5356,7 +5459,6 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
u16 gp_status, duplex = DUPLEX_HALF, link_up = 0, speed_mask;
@@ -5403,9 +5505,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars)
{
-
struct bnx2x *bp = params->bp;
-
u8 lane;
u16 gp_status1, gp_speed, link_up, duplex = DUPLEX_FULL;
int rc = 0;
@@ -6678,7 +6778,6 @@ int bnx2x_link_update(struct link_params *params, struct link_vars *vars)
return rc;
}
-
/*****************************************************************************/
/* External Phy section */
/*****************************************************************************/
@@ -8103,7 +8202,15 @@ static void bnx2x_warpcore_power_module(struct link_params *params,
static void bnx2x_warpcore_hw_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
+ struct bnx2x *bp = params->bp;
bnx2x_warpcore_power_module(params, phy, 0);
+ /* Put Warpcore in low power mode */
+ REG_WR(bp, MISC_REG_WC0_RESET, 0x0c0e);
+
+ /* Put LCPLL in low power mode */
+ REG_WR(bp, MISC_REG_LCPLL_E40_PWRDWN, 1);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_ANA, 0);
+ REG_WR(bp, MISC_REG_LCPLL_E40_RESETB_DIG, 0);
}
static void bnx2x_power_sfp_module(struct link_params *params,
@@ -9040,13 +9147,13 @@ static u8 bnx2x_8727_read_status(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK,
"8727 Power fault has been detected on port %d\n",
oc_port);
- netdev_err(bp->dev, "Error: Power fault on Port %d has"
- " been detected and the power to "
- "that SFP+ module has been removed"
- " to prevent failure of the card."
- " Please remove the SFP+ module and"
- " restart the system to clear this"
- " error.\n",
+ netdev_err(bp->dev, "Error: Power fault on Port %d has "
+ "been detected and the power to "
+ "that SFP+ module has been removed "
+ "to prevent failure of the card. "
+ "Please remove the SFP+ module and "
+ "restart the system to clear this "
+ "error.\n",
oc_port);
/* Disable all RX_ALARMs except for mod_abs */
bnx2x_cl45_write(bp, phy,
@@ -9169,66 +9276,72 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
/* BCM8481/BCM84823/BCM84833 PHY SECTION */
/******************************************************************/
static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
- struct link_params *params)
+ struct bnx2x *bp,
+ u8 port)
{
u16 val, fw_ver1, fw_ver2, cnt;
- u8 port;
- struct bnx2x *bp = params->bp;
- port = params->port;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+ bnx2x_save_spirom_version(bp, port,
+ ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
+ phy->ver_addr);
+ } else {
+ /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+ /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx "
+ "phy fw version(1)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
- /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
- /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
- if (val & 1)
- break;
- udelay(5);
- }
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
- bnx2x_save_spirom_version(bp, port, 0,
- phy->ver_addr);
- return;
- }
+ /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+ bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+ for (cnt = 0; cnt < 100; cnt++) {
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+ if (val & 1)
+ break;
+ udelay(5);
+ }
+ if (cnt == 100) {
+ DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
+ "version(2)\n");
+ bnx2x_save_spirom_version(bp, port, 0,
+ phy->ver_addr);
+ return;
+ }
+ /* lower 16 bits of the register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+ /* upper 16 bits of register SPI_FW_STATUS */
+ bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
- /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
- bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
- for (cnt = 0; cnt < 100; cnt++) {
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
- if (val & 1)
- break;
- udelay(5);
- }
- if (cnt == 100) {
- DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
- bnx2x_save_spirom_version(bp, port, 0,
+ bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
phy->ver_addr);
- return;
}
- /* lower 16 bits of the register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
- /* upper 16 bits of register SPI_FW_STATUS */
- bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
-
- bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
- phy->ver_addr);
}
-
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val;
+ u16 val, offset;
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
@@ -9263,14 +9376,22 @@ static void bnx2x_848xx_set_led(struct bnx2x *bp,
MDIO_PMA_REG_8481_LED3_BLINK,
0);
- bnx2x_cl45_read(bp, phy,
+ /* Configure the blink rate to ~15.9 Hz */
+ bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, &val);
- val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
+ MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
+ MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ);
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ offset = MDIO_PMA_REG_84833_CTL_LED_CTL_1;
+ else
+ offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
+
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD, offset, &val);
+ val |= MDIO_PMA_REG_84823_LED3_STRETCH_EN; /* stretch_en for LED3*/
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_84823_CTL_LED_CTL_1, val);
+ MDIO_PMA_DEVAD, offset, val);
/* 'Interrupt Mask' */
bnx2x_cl45_write(bp, phy,
@@ -9283,14 +9404,17 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
struct link_vars *vars)
{
struct bnx2x *bp = params->bp;
- u16 autoneg_val, an_1000_val, an_10_100_val;
+ u16 autoneg_val, an_1000_val, an_10_100_val, an_10g_val;
u16 tmp_req_line_speed;
tmp_req_line_speed = phy->req_line_speed;
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
if (phy->req_line_speed == SPEED_10000)
phy->req_line_speed = SPEED_AUTO_NEG;
-
+ } else {
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+ }
/*
* This phy uses the NIG latch mechanism since link indication
* arrives through its LED4 and not via its LASI signal, so we
@@ -9338,13 +9462,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
an_1000_val);
/* set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
(phy->speed_cap_mask &
(PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
- (phy->supported &
- (SUPPORTED_100baseT_Half |
- SUPPORTED_100baseT_Full)))) {
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
an_10_100_val |= (1<<7);
/* Enable autoneg and restart autoneg for legacy speeds */
autoneg_val |= (1<<9 | 1<<12);
@@ -9378,6 +9499,8 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD, MDIO_AN_REG_8481_AUX_CTRL,
(1<<15 | 1<<9 | 7<<0));
+ /* The PHY needs this set even for forced link. */
+ an_10_100_val |= (1<<8) | (1<<7);
DP(NETIF_MSG_LINK, "Setting 100M force\n");
}
if ((phy->req_line_speed == SPEED_10) &&
@@ -9415,18 +9538,23 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Advertising 10G\n");
/* Restart autoneg for 10G*/
+ bnx2x_cl45_read(bp, phy,
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ &an_10g_val);
bnx2x_cl45_write(bp, phy,
- MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
- 0x3200);
+ MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
+ an_10g_val | 0x1000);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_AN_DEVAD, MDIO_AN_REG_CTRL,
+ 0x3200);
} else
bnx2x_cl45_write(bp, phy,
MDIO_AN_DEVAD,
MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
1);
- /* Save spirom version */
- bnx2x_save_848xx_spirom_version(phy, params);
-
phy->req_line_speed = tmp_req_line_speed;
return 0;
@@ -9449,74 +9577,95 @@ static int bnx2x_8481_config_init(struct bnx2x_phy *phy,
return bnx2x_848xx_cmn_config_init(phy, params, vars);
}
-
-#define PHY84833_HDSHK_WAIT 300
-static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+#define PHY84833_CMDHDLR_WAIT 300
+#define PHY84833_CMDHDLR_MAX_ARGS 5
+static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
- struct link_vars *vars)
+ u16 fw_cmd,
+ u16 cmd_args[])
{
u32 idx;
- u32 pair_swap;
u16 val;
- u16 data;
struct bnx2x *bp = params->bp;
- /* Do pair swap */
-
- /* Check for configuration. */
- pair_swap = REG_RD(bp, params->shmem_base +
- offsetof(struct shmem_region,
- dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
- PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
-
- if (pair_swap == 0)
- return 0;
-
- data = (u16)pair_swap;
-
/* Write CMD_OPEN_OVERRIDE to STATUS reg */
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_OPEN_OVERRIDE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if (val == PHY84833_STATUS_CMD_OPEN_FOR_CMDS)
break;
msleep(1);
}
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "Pairswap: FW not ready.\n");
+ if (idx >= PHY84833_CMDHDLR_WAIT) {
+ DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
return -EINVAL;
}
+ /* Prepare argument(s) and issue command */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- data);
- /* Issue pair swap command */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE);
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
+ MDIO_84833_CMD_HDLR_COMMAND, fw_cmd);
+ for (idx = 0; idx < PHY84833_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
+ MDIO_84833_CMD_HDLR_STATUS, &val);
+ if ((val == PHY84833_STATUS_CMD_COMPLETE_PASS) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR))
break;
msleep(1);
}
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "Pairswap: override failed.\n");
+ if ((idx >= PHY84833_CMDHDLR_WAIT) ||
+ (val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
+ DP(NETIF_MSG_LINK, "FW cmd failed.\n");
return -EINVAL;
}
+ /* Gather returning data */
+ for (idx = 0; idx < PHY84833_CMDHDLR_MAX_ARGS; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_84833_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
- DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data);
+ MDIO_84833_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
return 0;
}
+static int bnx2x_84833_pair_swap_cfg(struct bnx2x_phy *phy,
+ struct link_params *params,
+ struct link_vars *vars)
+{
+ u32 pair_swap;
+ u16 data[PHY84833_CMDHDLR_MAX_ARGS];
+ int status;
+ struct bnx2x *bp = params->bp;
+
+ /* Check for configuration. */
+ pair_swap = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region,
+ dev_info.port_hw_config[params->port].xgbt_phy_cfg)) &
+ PORT_HW_CFG_RJ45_PAIR_SWAP_MASK;
+
+ if (pair_swap == 0)
+ return 0;
+
+ /* Only the second argument is used for this command */
+ data[1] = (u16)pair_swap;
+
+ status = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_PAIR_SWAP, data);
+ if (status == 0)
+ DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
+
+ return status;
+}
+
static u8 bnx2x_84833_get_reset_gpios(struct bnx2x *bp,
u32 shmem_base_path[],
u32 chip_id)
@@ -9579,24 +9728,6 @@ static int bnx2x_84833_hw_reset_phy(struct bnx2x_phy *phy,
return 0;
}
-static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
- u32 shmem_base_path[],
- u32 chip_id)
-{
- u8 reset_gpios;
-
- reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
-
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
- udelay(10);
- bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
- msleep(800);
- DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
- reset_gpios);
-
- return 0;
-}
-
#define PHY84833_CONSTANT_LATENCY 1193
static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct link_params *params,
@@ -9605,8 +9736,8 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
struct bnx2x *bp = params->bp;
u8 port, initialize = 1;
u16 val;
- u16 temp;
- u32 actual_phy_selection, cms_enable, idx;
+ u32 actual_phy_selection, cms_enable;
+ u16 cmd_args[PHY84833_CMDHDLR_MAX_ARGS];
int rc = 0;
msleep(1);
@@ -9625,34 +9756,24 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_CTRL, 0x8000);
- /* Bring PHY out of super isolate mode */
- bnx2x_cl45_read(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
- val &= ~MDIO_84833_SUPER_ISOLATE;
- bnx2x_cl45_write(bp, phy,
- MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
}
bnx2x_wait_reset_complete(bp, phy, params);
/* Wait for GPHY to come out of reset */
msleep(50);
-
- if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
- bnx2x_84833_pair_swap_cfg(phy, params, vars);
-
- /*
- * BCM84823 requires that XGXS links up first @ 10G for normal behavior
- */
- temp = vars->line_speed;
- vars->line_speed = SPEED_10000;
- bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
- bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
- vars->line_speed = temp;
-
- /* Set dual-media configuration according to configuration */
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /*
+ * BCM84823 requires that XGXS links up first @ 10G for normal
+ * behavior.
+ */
+ u16 temp;
+ temp = vars->line_speed;
+ vars->line_speed = SPEED_10000;
+ bnx2x_set_autoneg(&params->phy[INT_PHY], params, vars, 0);
+ bnx2x_program_serdes(&params->phy[INT_PHY], params, vars);
+ vars->line_speed = temp;
+ }
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_CTL_REG_84823_MEDIA, &val);
@@ -9698,70 +9819,23 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
params->multi_phy_config, val);
- /* AutogrEEEn */
- if (params->feature_config_flags &
- FEATURE_CONFIG_AUTOGREEEN_ENABLED) {
- /* Ensure that f/w is ready */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if (val == PHY84833_CMD_OPEN_FOR_CMDS)
- break;
- usleep_range(1000, 1000);
- }
- if (idx >= PHY84833_HDSHK_WAIT) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: FW not ready.\n");
- return -EINVAL;
- }
-
- /* Select EEE mode */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG3,
- 0x2);
-
- /* Set Idle and Latency */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG4,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA3_REG,
- PHY84833_CONSTANT_LATENCY + 1);
-
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_DATA4_REG,
- PHY84833_CONSTANT_LATENCY);
-
- /* Send EEE instruction to command register */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG0,
- PHY84833_DIAG_CMD_SET_EEE_MODE);
-
- /* Ensure that the command has completed */
- for (idx = 0; idx < PHY84833_HDSHK_WAIT; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2, &val);
- if ((val == PHY84833_CMD_COMPLETE_PASS) ||
- (val == PHY84833_CMD_COMPLETE_ERROR))
- break;
- usleep_range(1000, 1000);
- }
- if ((idx >= PHY84833_HDSHK_WAIT) ||
- (val == PHY84833_CMD_COMPLETE_ERROR)) {
- DP(NETIF_MSG_LINK, "AutogrEEEn: command failed.\n");
- return -EINVAL;
- }
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ bnx2x_84833_pair_swap_cfg(phy, params, vars);
- /* Reset command handler */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_84833_TOP_CFG_SCRATCH_REG2,
- PHY84833_CMD_CLEAR_COMPLETE);
+ /* Keep AutogrEEEn disabled. */
+ cmd_args[0] = 0x0;
+ cmd_args[1] = 0x0;
+ cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+ cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+ rc = bnx2x_84833_cmd_hdlr(phy, params,
+ PHY84833_CMD_SET_EEE_MODE, cmd_args);
+ if (rc != 0)
+ DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
-
if (initialize)
rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
else
- bnx2x_save_848xx_spirom_version(phy, params);
+ bnx2x_save_848xx_spirom_version(phy, bp, params->port);
/* 84833 PHY has a better feature and doesn't need to support this. */
if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
cms_enable = REG_RD(bp, params->shmem_base +
@@ -9779,6 +9853,16 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
MDIO_CTL_REG_84823_USER_CTRL_REG, val);
}
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /* Bring PHY out of super isolate mode as the final step. */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val &= ~MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+ }
return rc;
}
@@ -9916,10 +10000,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
} else {
bnx2x_cl45_read(bp, phy,
MDIO_CTL_DEVAD,
- 0x400f, &val16);
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+ val16 |= MDIO_84833_SUPER_ISOLATE;
bnx2x_cl45_write(bp, phy,
- MDIO_PMA_DEVAD,
- MDIO_PMA_REG_CTRL, 0x800);
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
}
}
@@ -10144,8 +10229,10 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
DP(NETIF_MSG_LINK, "54618SE cfg init\n");
usleep_range(1000, 1000);
- /* This works with E3 only, no need to check the chip
- before determining the port. */
+ /*
+ * This works with E3 only, no need to check the chip
+ * before determining the port.
+ */
port = params->port;
cfg_pin = (REG_RD(bp, params->shmem_base +
@@ -11218,7 +11305,9 @@ static int bnx2x_populate_int_phy(struct bnx2x *bp, u32 shmem_base, u8 port,
offsetof(struct shmem_region,
dev_info.port_feature_config[port].link_config)) &
PORT_FEATURE_CONNECTED_SWITCH_MASK);
- chip_id = REG_RD(bp, MISC_REG_CHIP_NUM) << 16;
+ chip_id = (REG_RD(bp, MISC_REG_CHIP_NUM) << 16) |
+ ((REG_RD(bp, MISC_REG_CHIP_REV) & 0xf) << 12);
+
DP(NETIF_MSG_LINK, ":chip_id = 0x%x\n", chip_id);
if (USES_WARPCORE(bp)) {
u32 serdes_net_if;
@@ -11397,6 +11486,10 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
return -EINVAL;
default:
*phy = phy_null;
+ /* In case external PHY wasn't found */
+ if ((phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
+ (phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
+ return -EINVAL;
return 0;
}
@@ -11436,6 +11529,19 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
}
phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
+ if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+ (phy->ver_addr)) {
+ /*
+ * Remove 100Mb link supported for BCM84833 when phy fw
+ * version lower than or equal to 1.39
+ */
+ u32 raw_ver = REG_RD(bp, phy->ver_addr);
+ if (((raw_ver & 0x7F) <= 39) &&
+ (((raw_ver & 0xF80) >> 7) <= 1))
+ phy->supported &= ~(SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
+ }
+
/*
* In case mdc/mdio_access of the external phy is different than the
* mdc/mdio access of the XGXS, a HW lock must be taken in each access
@@ -11570,7 +11676,7 @@ u32 bnx2x_phy_selection(struct link_params *params)
int bnx2x_phy_probe(struct link_params *params)
{
- u8 phy_index, actual_phy_idx, link_cfg_idx;
+ u8 phy_index, actual_phy_idx;
u32 phy_config_swapped, sync_offset, media_types;
struct bnx2x *bp = params->bp;
struct bnx2x_phy *phy;
@@ -11581,7 +11687,6 @@ int bnx2x_phy_probe(struct link_params *params)
for (phy_index = INT_PHY; phy_index < MAX_PHYS;
phy_index++) {
- link_cfg_idx = LINK_CONFIG_IDX(phy_index);
actual_phy_idx = phy_index;
if (phy_config_swapped) {
if (phy_index == EXT_PHY1)
@@ -12247,6 +12352,77 @@ static int bnx2x_8727_common_init_phy(struct bnx2x *bp,
return 0;
}
+static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
+ u32 shmem_base_path[],
+ u32 shmem2_base_path[],
+ u8 phy_index,
+ u32 chip_id)
+{
+ u8 reset_gpios;
+ reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
+ udelay(10);
+ bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
+ DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
+ reset_gpios);
+ return 0;
+}
+
+static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
+ struct bnx2x_phy *phy)
+{
+ u16 val, cnt;
+ /* Wait for FW completing its initialization. */
+ for (cnt = 0; cnt < 1500; cnt++) {
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_CTRL, &val);
+ if (!(val & (1<<15)))
+ break;
+ msleep(1);
+ }
+ if (cnt >= 1500) {
+ DP(NETIF_MSG_LINK, "84833 reset timeout\n");
+ return -EINVAL;
+ }
+
+ /* Put the port in super isolate mode. */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+ val |= MDIO_84833_SUPER_ISOLATE;
+ bnx2x_cl45_write(bp, phy,
+ MDIO_CTL_DEVAD,
+ MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+
+ /* Save spirom version */
+ bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
+ return 0;
+}
+
+int bnx2x_pre_init_phy(struct bnx2x *bp,
+ u32 shmem_base,
+ u32 shmem2_base,
+ u32 chip_id)
+{
+ int rc = 0;
+ struct bnx2x_phy phy;
+ bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+ if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
+ PORT_0, &phy)) {
+ DP(NETIF_MSG_LINK, "populate_phy failed\n");
+ return -EINVAL;
+ }
+ switch (phy.type) {
+ case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+ rc = bnx2x_84833_pre_init_phy(bp, &phy);
+ break;
+ default:
+ break;
+ }
+ return rc;
+}
+
static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
u32 shmem2_base_path[], u8 phy_index,
u32 ext_phy_type, u32 chip_id)
@@ -12281,7 +12457,9 @@ static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
* GPIO3's are linked, and so both need to be toggled
* to obtain required 2us pulse.
*/
- rc = bnx2x_84833_common_init_phy(bp, shmem_base_path, chip_id);
+ rc = bnx2x_84833_common_init_phy(bp, shmem_base_path,
+ shmem2_base_path,
+ phy_index, chip_id);
break;
case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
rc = -EINVAL;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
index 2a46e63..e02a68a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.h
@@ -479,7 +479,7 @@ int bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
/* Configure the COS to ETS according to BW and SP settings.*/
int bnx2x_ets_e3b0_config(const struct link_params *params,
const struct link_vars *vars,
- const struct bnx2x_ets_params *ets_params);
+ struct bnx2x_ets_params *ets_params);
/* Read pfc statistic*/
void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
u32 pfc_frames_sent[2],
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2f6361e..2545213 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -117,10 +117,6 @@ static int dropless_fc;
module_param(dropless_fc, int, 0);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
-static int poll;
-module_param(poll, int, 0);
-MODULE_PARM_DESC(poll, " Use polling (for debug)");
-
static int mrrs = -1;
module_param(mrrs, int, 0);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
@@ -941,7 +937,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
- i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+ i, j, rx_bd[1], rx_bd[0], sw_bd->data);
}
start = RX_SGE(fp->rx_sge_prod);
@@ -2318,12 +2314,6 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
}
-/* returns func by VN for current port */
-static inline int func_by_vn(struct bnx2x *bp, int vn)
-{
- return 2 * vn + BP_PORT(bp);
-}
-
static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
{
struct rate_shaping_vars_per_vn m_rs_vn;
@@ -2475,22 +2465,6 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
"rate shaping and fairness are disabled\n");
}
-static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
-{
- int func;
- int vn;
-
- /* Set the attention towards other drivers on the same port */
- for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
- if (vn == BP_VN(bp))
- continue;
-
- func = func_by_vn(bp, vn);
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
- }
-}
-
/* This function is called upon link interrupt */
static void bnx2x_link_attn(struct bnx2x *bp)
{
@@ -2549,6 +2523,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
if (bp->state != BNX2X_STATE_OPEN)
return;
+ /* read updated dcb configuration */
+ bnx2x_dcbx_pmf_update(bp);
+
bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
if (bp->link_vars.link_up)
@@ -2643,15 +2620,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
return rc;
}
-static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
-{
-#ifdef BCM_CNIC
- /* Statistics are not supported for CNIC Clients at the moment */
- if (IS_FCOE_FP(fp))
- return false;
-#endif
- return true;
-}
void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
{
@@ -2695,11 +2663,11 @@ static inline unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
* parent connection). The statistics are zeroed when the parent
* connection is initialized.
*/
- if (stat_counter_valid(bp, fp)) {
- __set_bit(BNX2X_Q_FLG_STATS, &flags);
- if (zero_stats)
- __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
- }
+
+ __set_bit(BNX2X_Q_FLG_STATS, &flags);
+ if (zero_stats)
+ __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
+
return flags;
}
@@ -2808,8 +2776,8 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
/* This should be a maximum number of data bytes that may be
* placed on the BD (not including paddings).
*/
- rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN -
- IP_HEADER_ALIGNMENT_PADDING;
+ rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
+ BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
rxq_init->cl_qzone_id = fp->cl_qzone_id;
rxq_init->tpa_agg_sz = tpa_agg_size;
@@ -2940,6 +2908,143 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
*/
}
+#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
+
+static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
+{
+ struct eth_stats_info *ether_stat =
+ &bp->slowpath->drv_info_to_mcp.ether_stat;
+
+ /* leave last char as NULL */
+ memcpy(ether_stat->version, DRV_MODULE_VERSION,
+ ETH_STAT_INFO_VERSION_LEN - 1);
+
+ bp->fp[0].mac_obj.get_n_elements(bp, &bp->fp[0].mac_obj,
+ DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
+ ether_stat->mac_local);
+
+ ether_stat->mtu_size = bp->dev->mtu;
+
+ if (bp->dev->features & NETIF_F_RXCSUM)
+ ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
+ if (bp->dev->features & NETIF_F_TSO)
+ ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
+ ether_stat->feature_flags |= bp->common.boot_mode;
+
+ ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
+
+ ether_stat->txq_size = bp->tx_ring_size;
+ ether_stat->rxq_size = bp->rx_ring_size;
+}
+
+static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct fcoe_stats_info *fcoe_stat =
+ &bp->slowpath->drv_info_to_mcp.fcoe_stat;
+
+ memcpy(fcoe_stat->mac_local, bp->fip_mac, ETH_ALEN);
+
+ fcoe_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
+
+ /* insert FCoE stats from ramrod response */
+ if (!NO_FCOE(bp)) {
+ struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ tstorm_queue_statistics;
+
+ struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
+ &bp->fw_stats_data->queue_stats[FCOE_IDX].
+ xstorm_queue_statistics;
+
+ struct fcoe_statistics_params *fw_fcoe_stat =
+ &bp->fw_stats_data->fcoe;
+
+ ADD_64(fcoe_stat->rx_bytes_hi, 0, fcoe_stat->rx_bytes_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_bytes_hi,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
+ fcoe_stat->rx_bytes_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_ucast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_bcast_pkts);
+
+ ADD_64(fcoe_stat->rx_frames_hi, 0, fcoe_stat->rx_frames_lo,
+ fcoe_q_tstorm_stats->rcv_mcast_pkts);
+
+ ADD_64(fcoe_stat->tx_bytes_hi, 0, fcoe_stat->tx_bytes_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_bytes_hi,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
+ fcoe_stat->tx_bytes_lo,
+ fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->ucast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->bcast_pkts_sent);
+
+ ADD_64(fcoe_stat->tx_frames_hi, 0, fcoe_stat->tx_frames_lo,
+ fcoe_q_xstorm_stats->mcast_pkts_sent);
+ }
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
+#endif
+}
+
+static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
+ struct iscsi_stats_info *iscsi_stat =
+ &bp->slowpath->drv_info_to_mcp.iscsi_stat;
+
+ memcpy(iscsi_stat->mac_local, bp->cnic_eth_dev.iscsi_mac, ETH_ALEN);
+
+ iscsi_stat->qos_priority =
+ app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
+
+ /* ask L5 driver to add data to the struct */
+ bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
+#endif
+}
+
/* called due to MCP event (on pmf):
* reread new bandwidth configuration
* configure FW
@@ -2960,6 +3065,50 @@ static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
}
+static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
+{
+ enum drv_info_opcode op_code;
+ u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
+
+ /* if drv_info version supported by MFW doesn't match - send NACK */
+ if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
+ DRV_INFO_CONTROL_OP_CODE_SHIFT;
+
+ memset(&bp->slowpath->drv_info_to_mcp, 0,
+ sizeof(union drv_info_to_mcp));
+
+ switch (op_code) {
+ case ETH_STATS_OPCODE:
+ bnx2x_drv_info_ether_stat(bp);
+ break;
+ case FCOE_STATS_OPCODE:
+ bnx2x_drv_info_fcoe_stat(bp);
+ break;
+ case ISCSI_STATS_OPCODE:
+ bnx2x_drv_info_iscsi_stat(bp);
+ break;
+ default:
+ /* if op code isn't supported - send NACK */
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
+ return;
+ }
+
+ /* if we got drv_info attn from MFW then these fields are defined in
+ * shmem2 for sure
+ */
+ SHMEM2_WR(bp, drv_info_host_addr_lo,
+ U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+ SHMEM2_WR(bp, drv_info_host_addr_hi,
+ U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
+
+ bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
+}
+
static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
{
DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -3318,6 +3467,17 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
" the driver to shutdown the card to prevent permanent"
" damage. Please contact OEM Support for assistance\n");
+
+ /*
+ * Scheudle device reset (unload)
+ * This is due to some boards consuming sufficient power when driver is
+ * up to overheat if fan fails.
+ */
+ smp_mb__before_clear_bit();
+ set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3456,6 +3616,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
if (val & DRV_STATUS_SET_MF_BW)
bnx2x_set_mf_bw(bp);
+ if (val & DRV_STATUS_DRV_INFO_REQ)
+ bnx2x_handle_drv_info_req(bp);
if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
bnx2x_pmf_update(bp);
@@ -4668,20 +4830,11 @@ void bnx2x_drv_pulse(struct bnx2x *bp)
static void bnx2x_timer(unsigned long data)
{
- u8 cos;
struct bnx2x *bp = (struct bnx2x *) data;
if (!netif_running(bp->dev))
return;
- if (poll) {
- struct bnx2x_fastpath *fp = &bp->fp[0];
-
- for_each_cos_in_tx_queue(fp, cos)
- bnx2x_tx_int(bp, &fp->txdata[cos]);
- bnx2x_rx_int(fp, 1000);
- }
-
if (!BP_NOMCP(bp)) {
int mb_idx = BP_FW_MB_IDX(bp);
u32 drv_pulse;
@@ -5247,7 +5400,7 @@ static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
u8 cos;
unsigned long q_type = 0;
u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
-
+ fp->rx_queue = fp_idx;
fp->cid = fp_idx;
fp->cl_id = bnx2x_fp_cl_id(fp);
fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
@@ -6856,13 +7009,16 @@ void bnx2x_free_mem(struct bnx2x *bp)
static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
{
int num_groups;
+ int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
- /* number of eth_queues */
- u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp);
+ /* number of queues for statistics is number of eth queues + FCoE */
+ u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
/* Total number of FW statistics requests =
- * 1 for port stats + 1 for PF stats + num_eth_queues */
- bp->fw_stats_num = 2 + num_queue_stats;
+ * 1 for port stats + 1 for PF stats + potential 1 for FCoE stats +
+ * num of queues
+ */
+ bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
/* Request is built from stats_query_header and an array of
@@ -6870,8 +7026,8 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
* STATS_QUERY_CMD_COUNT rules. The real number or requests is
* configured in the stats_query_header.
*/
- num_groups = (2 + num_queue_stats) / STATS_QUERY_CMD_COUNT +
- (((2 + num_queue_stats) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
+ num_groups = ((bp->fw_stats_num) / STATS_QUERY_CMD_COUNT) +
+ (((bp->fw_stats_num) % STATS_QUERY_CMD_COUNT) ? 1 : 0);
bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
num_groups * sizeof(struct stats_query_cmd_group);
@@ -6880,9 +7036,13 @@ static inline int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
*
* stats_counter holds per-STORM counters that are incremented
* when STORM has finished with the current request.
+ *
+ * memory for FCoE offloaded statistics are counted anyway,
+ * even if they will not be sent.
*/
bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
sizeof(struct per_pf_stats) +
+ sizeof(struct fcoe_statistics_params) +
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
@@ -7025,6 +7185,13 @@ int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
{
unsigned long ramrod_flags = 0;
+#ifdef BCM_CNIC
+ if (is_zero_ether_addr(bp->dev->dev_addr) && IS_MF_ISCSI_SD(bp)) {
+ DP(NETIF_MSG_IFUP, "Ignoring Zero MAC for iSCSI SD mode\n");
+ return 0;
+ }
+#endif
+
DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
@@ -8522,6 +8689,17 @@ sp_rtnl_not_reset:
if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
+ /*
+ * in case of fan failure we need to reset id if the "stop on error"
+ * debug flag is set, since we trying to prevent permanent overheating
+ * damage
+ */
+ if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
+ DP(BNX2X_MSG_SP, "fan failure detected. Unloading driver\n");
+ netif_device_detach(bp->dev);
+ bnx2x_close(bp->dev);
+ }
+
sp_rtnl_exit:
rtnl_unlock();
}
@@ -8708,7 +8886,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
{
- u32 val, val2, val3, val4, id;
+ u32 val, val2, val3, val4, id, boot_mode;
u16 pmc;
/* Get the chip revision id and number. */
@@ -8817,6 +8995,26 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
bp->link_params.feature_config_flags |=
(val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
+ bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
+ BC_SUPPORTS_PFC_STATS : 0;
+
+ boot_mode = SHMEM_RD(bp,
+ dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
+ PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
+ switch (boot_mode) {
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
+ break;
+ case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
+ bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
+ break;
+ }
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
@@ -9267,22 +9465,43 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
bp->common.shmem2_base);
}
-#ifdef BCM_CNIC
-static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+void bnx2x_get_iscsi_info(struct bnx2x *bp)
{
+#ifdef BCM_CNIC
int port = BP_PORT(bp);
- int func = BP_ABS_FUNC(bp);
u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
drv_lic_key[port].max_iscsi_conn);
- u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
- drv_lic_key[port].max_fcoe_conn);
- /* Get the number of maximum allowed iSCSI and FCoE connections */
+ /* Get the number of maximum allowed iSCSI connections */
bp->cnic_eth_dev.max_iscsi_conn =
(max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
+ BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
+ bp->cnic_eth_dev.max_iscsi_conn);
+
+ /*
+ * If maximum allowed number of connections is zero -
+ * disable the feature.
+ */
+ if (!bp->cnic_eth_dev.max_iscsi_conn)
+ bp->flags |= NO_ISCSI_FLAG;
+#else
+ bp->flags |= NO_ISCSI_FLAG;
+#endif
+}
+
+static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp)
+{
+#ifdef BCM_CNIC
+ int port = BP_PORT(bp);
+ int func = BP_ABS_FUNC(bp);
+
+ u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
+ drv_lic_key[port].max_fcoe_conn);
+
+ /* Get the number of maximum allowed FCoE connections */
bp->cnic_eth_dev.max_fcoe_conn =
(max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
@@ -9334,21 +9553,29 @@ static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
}
}
- BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
- bp->cnic_eth_dev.max_iscsi_conn,
- bp->cnic_eth_dev.max_fcoe_conn);
+ BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
/*
* If maximum allowed number of connections is zero -
* disable the feature.
*/
- if (!bp->cnic_eth_dev.max_iscsi_conn)
- bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
-
if (!bp->cnic_eth_dev.max_fcoe_conn)
bp->flags |= NO_FCOE_FLAG;
-}
+#else
+ bp->flags |= NO_FCOE_FLAG;
#endif
+}
+
+static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
+{
+ /*
+ * iSCSI may be dynamically disabled but reading
+ * info here we will decrease memory usage by driver
+ * if the feature is disabled for good
+ */
+ bnx2x_get_iscsi_info(bp);
+ bnx2x_get_fcoe_info(bp);
+}
static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
{
@@ -9374,7 +9601,8 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
#ifdef BCM_CNIC
- /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
+ /*
+ * iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
* FCoE MAC then the appropriate feature should be disabled.
*/
if (IS_MF_SI(bp)) {
@@ -9396,11 +9624,22 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
val = MF_CFG_RD(bp, func_ext_config[func].
fcoe_mac_addr_lower);
bnx2x_set_mac_buf(fip_mac, val, val2);
- BNX2X_DEV_INFO("Read FCoE L2 MAC to %pM\n",
+ BNX2X_DEV_INFO("Read FCoE L2 MAC: %pM\n",
fip_mac);
} else
bp->flags |= NO_FCOE_FLAG;
+ } else { /* SD mode */
+ if (BNX2X_IS_MF_PROTOCOL_ISCSI(bp)) {
+ /* use primary mac as iscsi mac */
+ memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
+ /* Zero primary MAC configuration */
+ memset(bp->dev->dev_addr, 0, ETH_ALEN);
+
+ BNX2X_DEV_INFO("SD ISCSI MODE\n");
+ BNX2X_DEV_INFO("Read iSCSI MAC: %pM\n",
+ iscsi_mac);
+ }
}
#endif
} else {
@@ -9449,7 +9688,7 @@ static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
}
#endif
- if (!is_valid_ether_addr(bp->dev->dev_addr))
+ if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
dev_err(&bp->pdev->dev,
"bad Ethernet MAC address configuration: "
"%pM, change it manually before bringing up "
@@ -9661,9 +9900,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
/* Get MAC addresses */
bnx2x_get_mac_hwinfo(bp);
-#ifdef BCM_CNIC
bnx2x_get_cnic_info(bp);
-#endif
/* Get current FW pulse sequence */
if (!BP_NOMCP(bp)) {
@@ -9681,30 +9918,49 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
{
int cnt, i, block_end, rodi;
- char vpd_data[BNX2X_VPD_LEN+1];
+ char vpd_start[BNX2X_VPD_LEN+1];
char str_id_reg[VENDOR_ID_LEN+1];
char str_id_cap[VENDOR_ID_LEN+1];
+ char *vpd_data;
+ char *vpd_extended_data = NULL;
u8 len;
- cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
+ cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
if (cnt < BNX2X_VPD_LEN)
goto out_not_found;
- i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
+ /* VPD RO tag should be first tag after identifier string, hence
+ * we should be able to find it in first BNX2X_VPD_LEN chars
+ */
+ i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
PCI_VPD_LRDT_RO_DATA);
if (i < 0)
goto out_not_found;
-
block_end = i + PCI_VPD_LRDT_TAG_SIZE +
- pci_vpd_lrdt_size(&vpd_data[i]);
+ pci_vpd_lrdt_size(&vpd_start[i]);
i += PCI_VPD_LRDT_TAG_SIZE;
- if (block_end > BNX2X_VPD_LEN)
- goto out_not_found;
+ if (block_end > BNX2X_VPD_LEN) {
+ vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
+ if (vpd_extended_data == NULL)
+ goto out_not_found;
+
+ /* read rest of vpd image into vpd_extended_data */
+ memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
+ cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
+ block_end - BNX2X_VPD_LEN,
+ vpd_extended_data + BNX2X_VPD_LEN);
+ if (cnt < (block_end - BNX2X_VPD_LEN))
+ goto out_not_found;
+ vpd_data = vpd_extended_data;
+ } else
+ vpd_data = vpd_start;
+
+ /* now vpd_data holds full vpd content in both cases */
rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
PCI_VPD_RO_KEYWORD_MFR_ID);
@@ -9736,9 +9992,11 @@ static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
bp->fw_ver[len] = ' ';
}
}
+ kfree(vpd_extended_data);
return;
}
out_not_found:
+ kfree(vpd_extended_data);
return;
}
@@ -9792,7 +10050,6 @@ static void __devinit bnx2x_set_modes_bitmap(struct bnx2x *bp)
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
{
int func;
- int timer_interval;
int rc;
mutex_init(&bp->port.phy_mutex);
@@ -9840,15 +10097,20 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->multi_mode = multi_mode;
+ bp->disable_tpa = disable_tpa;
+
+#ifdef BCM_CNIC
+ bp->disable_tpa |= IS_MF_ISCSI_SD(bp);
+#endif
+
/* Set TPA flags */
- if (disable_tpa) {
+ if (bp->disable_tpa) {
bp->flags &= ~TPA_ENABLE_FLAG;
bp->dev->features &= ~NETIF_F_LRO;
} else {
bp->flags |= TPA_ENABLE_FLAG;
bp->dev->features |= NETIF_F_LRO;
}
- bp->disable_tpa = disable_tpa;
if (CHIP_IS_E1(bp))
bp->dropless_fc = 0;
@@ -9863,8 +10125,7 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
- timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
- bp->current_interval = (poll ? poll : timer_interval);
+ bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
init_timer(&bp->timer);
bp->timer.expires = jiffies + bp->current_interval;
@@ -9965,7 +10226,7 @@ static int bnx2x_open(struct net_device *dev)
}
/* called with rtnl_lock */
-static int bnx2x_close(struct net_device *dev)
+int bnx2x_close(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -10119,6 +10380,11 @@ void bnx2x_set_rx_mode(struct net_device *dev)
}
bp->rx_mode = rx_mode;
+#ifdef BCM_CNIC
+ /* handle ISCSI SD mode */
+ if (IS_MF_ISCSI_SD(bp))
+ bp->rx_mode = BNX2X_RX_MODE_NONE;
+#endif
/* Schedule the rx_mode command */
if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
@@ -10198,6 +10464,15 @@ static void poll_bnx2x(struct net_device *dev)
}
#endif
+static int bnx2x_validate_addr(struct net_device *dev)
+{
+ struct bnx2x *bp = netdev_priv(dev);
+
+ if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr))
+ return -EADDRNOTAVAIL;
+ return 0;
+}
+
static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_open = bnx2x_open,
.ndo_stop = bnx2x_close,
@@ -10205,7 +10480,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
.ndo_select_queue = bnx2x_select_queue,
.ndo_set_rx_mode = bnx2x_set_rx_mode,
.ndo_set_mac_address = bnx2x_change_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
+ .ndo_validate_addr = bnx2x_validate_addr,
.ndo_do_ioctl = bnx2x_ioctl,
.ndo_change_mtu = bnx2x_change_mtu,
.ndo_fix_features = bnx2x_fix_features,
@@ -10246,6 +10521,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
{
struct bnx2x *bp;
int rc;
+ bool chip_is_e1x = (board_type == BCM57710 ||
+ board_type == BCM57711 ||
+ board_type == BCM57711E);
SET_NETDEV_DEV(dev, &pdev->dev);
bp = netdev_priv(dev);
@@ -10334,7 +10612,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
- if (CHIP_IS_E1x(bp)) {
+ if (chip_is_e1x) {
REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
@@ -10345,9 +10623,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
* Enable internal target-read (in case we are probed after PF FLR).
* Must be done prior to any BAR read access. Only for 57712 and up
*/
- if (board_type != BCM57710 &&
- board_type != BCM57711 &&
- board_type != BCM57711E)
+ if (!chip_is_e1x)
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
/* Reset the load counter */
@@ -10823,8 +11099,8 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
#ifdef BCM_CNIC
- /* disable FCOE L2 queue for E1x and E3*/
- if (CHIP_IS_E1x(bp) || CHIP_IS_E3(bp))
+ /* disable FCOE L2 queue for E1x */
+ if (CHIP_IS_E1x(bp))
bp->flags |= NO_FCOE_FLAG;
#endif
@@ -11486,6 +11762,38 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
smp_mb__after_atomic_inc();
break;
}
+ case DRV_CTL_ULP_REGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
+ case DRV_CTL_ULP_UNREGISTER_CMD: {
+ int ulp_type = ctl->data.ulp_type;
+
+ if (CHIP_IS_E3(bp)) {
+ int idx = BP_FW_MB_IDX(bp);
+ u32 cap;
+
+ cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
+ else if (ulp_type == CNIC_ULP_FCOE)
+ cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
+ SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
+ }
+ break;
+ }
default:
BNX2X_ERR("unknown command %x\n", ctl->cmd);
@@ -11561,7 +11869,7 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
mutex_lock(&bp->cnic_mutex);
cp->drv_state = 0;
- rcu_assign_pointer(bp->cnic_ops, NULL);
+ RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
kfree(bp->cnic_kwq);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index e58073e..dddbcf6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -160,8 +160,11 @@
#define BRB1_REG_PAUSE_HIGH_THRESHOLD_1 0x6007c
/* [RW 10] Write client 0: Assert pause threshold. */
#define BRB1_REG_PAUSE_LOW_THRESHOLD_0 0x60068
-#define BRB1_REG_PAUSE_LOW_THRESHOLD_1 0x6006c
-/* [R 24] The number of full blocks occupied by port. */
+/* [RW 1] Indicates if to use per-class guaranty mode (new mode) or per-MAC
+ * guaranty mode (backwards-compatible mode). 0=per-MAC guaranty mode (BC
+ * mode). 1=per-class guaranty mode (new mode). */
+#define BRB1_REG_PER_CLASS_GUARANTY_MODE 0x60268
+/* [R 24] The number of full blocks occpied by port. */
#define BRB1_REG_PORT_NUM_OCC_BLOCKS_0 0x60094
/* [RW 1] Reset the design by software. */
#define BRB1_REG_SOFT_RESET 0x600dc
@@ -1619,6 +1622,14 @@
register bits. */
#define MISC_REG_LCPLL_CTRL_1 0xa2a4
#define MISC_REG_LCPLL_CTRL_REG_2 0xa2a8
+/* [RW 1] LCPLL power down. Global register. Active High. Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_PWRDWN 0xaa74
+/* [RW 1] LCPLL VCO reset. Global register. Active Low Reset on POR reset. */
+#define MISC_REG_LCPLL_E40_RESETB_ANA 0xaa78
+/* [RW 1] LCPLL post-divider reset. Global register. Active Low Reset on POR
+ * reset. */
+#define MISC_REG_LCPLL_E40_RESETB_DIG 0xaa7c
/* [RW 4] Interrupt mask register #0 read/write */
#define MISC_REG_MISC_INT_MASK 0xa388
/* [RW 1] Parity mask register #0 read/write */
@@ -1754,6 +1765,7 @@
* is compared to the value on ctrl_md_devad. Drives output
* misc_xgxs0_phy_addr. Global register. */
#define MISC_REG_WC0_CTRL_PHY_ADDR 0xa9cc
+#define MISC_REG_WC0_RESET 0xac30
/* [RW 2] XMAC Core port mode. Indicates the number of ports on the system
side. This should be less than or equal to phy_port_mode; if some of the
ports are not used. This enables reduction of frequency on the core side.
@@ -2164,6 +2176,7 @@
* set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
* accommodate the 9 input clients to ETS arbiter. */
#define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB 0x18684
+#define NIG_REG_P1_HWPFC_ENABLE 0x181d0
#define NIG_REG_P1_MAC_IN_EN 0x185c0
/* [RW 1] Output enable for TX MAC interface */
#define NIG_REG_P1_MAC_OUT_EN 0x185c4
@@ -6823,11 +6836,13 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_COPPER 0x0000
#define MDIO_CTL_REG_84823_MEDIA_PRIORITY_FIBER 0x0100
#define MDIO_CTL_REG_84823_MEDIA_FIBER_1G 0x1000
-#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
-#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
-
-#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
-#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+#define MDIO_CTL_REG_84823_USER_CTRL_REG 0x4005
+#define MDIO_CTL_REG_84823_USER_CTRL_CMS 0x0080
+#define MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH 0xa82b
+#define MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ 0x2f
+#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
+#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
+#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
/* BCM84833 only */
#define MDIO_84833_TOP_CFG_XGPHY_STRAP1 0x401a
@@ -6838,26 +6853,35 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_84833_TOP_CFG_SCRATCH_REG2 0x4007
#define MDIO_84833_TOP_CFG_SCRATCH_REG3 0x4008
#define MDIO_84833_TOP_CFG_SCRATCH_REG4 0x4009
-#define MDIO_84833_TOP_CFG_DATA3_REG 0x4011
-#define MDIO_84833_TOP_CFG_DATA4_REG 0x4012
+#define MDIO_84833_TOP_CFG_SCRATCH_REG26 0x4037
+#define MDIO_84833_TOP_CFG_SCRATCH_REG27 0x4038
+#define MDIO_84833_TOP_CFG_SCRATCH_REG28 0x4039
+#define MDIO_84833_TOP_CFG_SCRATCH_REG29 0x403a
+#define MDIO_84833_TOP_CFG_SCRATCH_REG30 0x403b
+#define MDIO_84833_TOP_CFG_SCRATCH_REG31 0x403c
+#define MDIO_84833_CMD_HDLR_COMMAND MDIO_84833_TOP_CFG_SCRATCH_REG0
+#define MDIO_84833_CMD_HDLR_STATUS MDIO_84833_TOP_CFG_SCRATCH_REG26
+#define MDIO_84833_CMD_HDLR_DATA1 MDIO_84833_TOP_CFG_SCRATCH_REG27
+#define MDIO_84833_CMD_HDLR_DATA2 MDIO_84833_TOP_CFG_SCRATCH_REG28
+#define MDIO_84833_CMD_HDLR_DATA3 MDIO_84833_TOP_CFG_SCRATCH_REG29
+#define MDIO_84833_CMD_HDLR_DATA4 MDIO_84833_TOP_CFG_SCRATCH_REG30
+#define MDIO_84833_CMD_HDLR_DATA5 MDIO_84833_TOP_CFG_SCRATCH_REG31
/* Mailbox command set used by 84833. */
-#define PHY84833_DIAG_CMD_PAIR_SWAP_CHANGE 0x2
+#define PHY84833_CMD_SET_PAIR_SWAP 0x8001
+#define PHY84833_CMD_GET_EEE_MODE 0x8008
+#define PHY84833_CMD_SET_EEE_MODE 0x8009
/* Mailbox status set used by 84833. */
-#define PHY84833_CMD_RECEIVED 0x0001
-#define PHY84833_CMD_IN_PROGRESS 0x0002
-#define PHY84833_CMD_COMPLETE_PASS 0x0004
-#define PHY84833_CMD_COMPLETE_ERROR 0x0008
-#define PHY84833_CMD_OPEN_FOR_CMDS 0x0010
-#define PHY84833_CMD_SYSTEM_BOOT 0x0020
-#define PHY84833_CMD_NOT_OPEN_FOR_CMDS 0x0040
-#define PHY84833_CMD_CLEAR_COMPLETE 0x0080
-#define PHY84833_CMD_OPEN_OVERRIDE 0xa5a5
-
+#define PHY84833_STATUS_CMD_RECEIVED 0x0001
+#define PHY84833_STATUS_CMD_IN_PROGRESS 0x0002
+#define PHY84833_STATUS_CMD_COMPLETE_PASS 0x0004
+#define PHY84833_STATUS_CMD_COMPLETE_ERROR 0x0008
+#define PHY84833_STATUS_CMD_OPEN_FOR_CMDS 0x0010
+#define PHY84833_STATUS_CMD_SYSTEM_BOOT 0x0020
+#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
+#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
+#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
-/* 84833 F/W Feature Commands */
-#define PHY84833_DIAG_CMD_GET_EEE_MODE 0x27
-#define PHY84833_DIAG_CMD_SET_EEE_MODE 0x28
/* Warpcore clause 45 addressing */
#define MDIO_WC_DEVAD 0x3
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 1451769..94110e9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -30,6 +30,8 @@
#define BNX2X_MAX_EMUL_MULTI 16
+#define MAC_LEADING_ZERO_CNT (ALIGN(ETH_ALEN, sizeof(u32)) - ETH_ALEN)
+
/**** Exe Queue interfaces ****/
/**
@@ -48,6 +50,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
int exe_len,
union bnx2x_qable_obj *owner,
exe_q_validate validate,
+ exe_q_remove remove,
exe_q_optimize optimize,
exe_q_execute exec,
exe_q_get get)
@@ -64,6 +67,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
/* Owner specific callbacks */
o->validate = validate;
+ o->remove = remove;
o->optimize = optimize;
o->execute = exec;
o->get = get;
@@ -441,6 +445,36 @@ static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
return true;
}
+static int bnx2x_get_n_elements(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf)
+{
+ struct bnx2x_vlan_mac_registry_elem *pos;
+ u8 *next = buf;
+ int counter = 0;
+
+ /* traverse list */
+ list_for_each_entry(pos, &o->head, link) {
+ if (counter < n) {
+ /* place leading zeroes in buffer */
+ memset(next, 0, MAC_LEADING_ZERO_CNT);
+
+ /* place mac after leading zeroes*/
+ memcpy(next + MAC_LEADING_ZERO_CNT, pos->u.mac.mac,
+ ETH_ALEN);
+
+ /* calculate address of next element and
+ * advance counter
+ */
+ counter++;
+ next = buf + counter * ALIGN(ETH_ALEN, sizeof(u32));
+
+ DP(BNX2X_MSG_SP, "copied element number %d to address %p element was %pM\n",
+ counter, next, pos->u.mac.mac);
+ }
+ }
+ return counter * ETH_ALEN;
+}
+
/* check_add() callbacks */
static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
union bnx2x_classification_ramrod_data *data)
@@ -1308,6 +1342,35 @@ static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
}
}
+static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
+ union bnx2x_qable_obj *qo,
+ struct bnx2x_exeq_elem *elem)
+{
+ int rc = 0;
+
+ /* If consumption wasn't required, nothing to do */
+ if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &elem->cmd_data.vlan_mac.vlan_mac_flags))
+ return 0;
+
+ switch (elem->cmd_data.vlan_mac.cmd) {
+ case BNX2X_VLAN_MAC_ADD:
+ case BNX2X_VLAN_MAC_MOVE:
+ rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
+ break;
+ case BNX2X_VLAN_MAC_DEL:
+ rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (rc != true)
+ return -EINVAL;
+
+ return 0;
+}
+
/**
* bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
*
@@ -1769,8 +1832,14 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- *vlan_mac_flags)
+ *vlan_mac_flags) {
+ rc = exeq->remove(bp, exeq->owner, exeq_pos);
+ if (rc) {
+ BNX2X_ERR("Failed to remove command\n");
+ return rc;
+ }
list_del(&exeq_pos->link);
+ }
}
spin_unlock_bh(&exeq->lock);
@@ -1876,6 +1945,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
bnx2x_exe_queue_init(bp,
&mac_obj->exe_queue, 1, qable_obj,
bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_mac);
@@ -1886,11 +1956,13 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
mac_obj->check_move = bnx2x_check_move;
mac_obj->ramrod_cmd =
RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
+ mac_obj->get_n_elements = bnx2x_get_n_elements;
/* Exe Queue */
bnx2x_exe_queue_init(bp,
&mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_mac);
@@ -1930,6 +2002,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
bnx2x_exe_queue_init(bp,
&vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan);
@@ -1976,6 +2049,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
bnx2x_exe_queue_init(bp,
&vlan_mac_obj->exe_queue, 1, qable_obj,
bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan_mac);
@@ -1992,6 +2066,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
&vlan_mac_obj->exe_queue,
CLASSIFY_RULES_COUNT,
qable_obj, bnx2x_validate_vlan_mac,
+ bnx2x_remove_vlan_mac,
bnx2x_optimize_vlan_mac,
bnx2x_execute_vlan_mac,
bnx2x_exeq_get_vlan_mac);
@@ -3342,7 +3417,7 @@ static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
if (!list_empty(&o->registry.exact_match.macs))
return 0;
- elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
+ elem = kcalloc(len, sizeof(*elem), GFP_ATOMIC);
if (!elem) {
BNX2X_ERR("Failed to allocate registry memory\n");
return -ENOMEM;
@@ -5526,7 +5601,7 @@ static inline int bnx2x_func_send_start(struct bnx2x *bp,
/* Fill the ramrod data with provided parameters */
rdata->function_mode = cpu_to_le16(start_params->mf_mode);
- rdata->sd_vlan_tag = start_params->sd_vlan_tag;
+ rdata->sd_vlan_tag = cpu_to_le16(start_params->sd_vlan_tag);
rdata->path_id = BP_PATH(bp);
rdata->network_cos_mode = start_params->network_cos_mode;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 9a517c2..66da39f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -161,6 +161,10 @@ typedef int (*exe_q_validate)(struct bnx2x *bp,
union bnx2x_qable_obj *o,
struct bnx2x_exeq_elem *elem);
+typedef int (*exe_q_remove)(struct bnx2x *bp,
+ union bnx2x_qable_obj *o,
+ struct bnx2x_exeq_elem *elem);
+
/**
* @return positive is entry was optimized, 0 - if not, negative
* in case of an error.
@@ -203,11 +207,18 @@ struct bnx2x_exe_queue_obj {
*/
exe_q_validate validate;
+ /**
+ * Called before removing pending commands, cleaning allocated
+ * resources (e.g., credits from validate)
+ */
+ exe_q_remove remove;
/**
* This will try to cancel the current pending commands list
* considering the new command.
*
+ * Returns the number of optimized commands or a negative error code
+ *
* Must run under exe_queue->lock
*/
exe_q_optimize optimize;
@@ -285,6 +296,19 @@ struct bnx2x_vlan_mac_obj {
/* RAMROD command to be used */
int ramrod_cmd;
+ /* copy first n elements onto preallocated buffer
+ *
+ * @param n number of elements to get
+ * @param buf buffer preallocated by caller into which elements
+ * will be copied. Note elements are 4-byte aligned
+ * so buffer size must be able to accomodate the
+ * aligned elements.
+ *
+ * @return number of copied bytes
+ */
+ int (*get_n_elements)(struct bnx2x *bp, struct bnx2x_vlan_mac_obj *o,
+ int n, u8 *buf);
+
/**
* Checks if ADD-ramrod with the given params may be performed.
*
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 02ac6a7..a766b25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -39,6 +39,17 @@ static inline long bnx2x_hilo(u32 *hiref)
#endif
}
+static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp)
+{
+ u16 res = sizeof(struct host_port_stats) >> 2;
+
+ /* if PFC stats are not supported by the MFW, don't DMA them */
+ if (!(bp->flags & BC_SUPPORTS_PFC_STATS))
+ res -= (sizeof(u32)*4) >> 2;
+
+ return res;
+}
+
/*
* Init service functions
*/
@@ -178,7 +189,8 @@ static void bnx2x_stats_pmf_update(struct bnx2x *bp)
DMAE_LEN32_RD_MAX * 4);
dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
DMAE_LEN32_RD_MAX * 4);
- dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX;
+
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -217,7 +229,7 @@ static void bnx2x_port_stats_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
dmae->comp_val = 1;
@@ -540,6 +552,13 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
UPDATE_STAT64(tx_stat_gterr,
tx_stat_dot3statsinternalmactransmiterrors);
UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl);
+
+ /* collect PFC stats */
+ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi;
+ pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo;
+
+ pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi;
+ pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo;
}
estats->pause_frames_received_hi =
@@ -551,6 +570,15 @@ static void bnx2x_bmac_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_mstat_stats_update(struct bnx2x *bp)
@@ -571,6 +599,11 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent);
ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone);
+ /* collect pfc stats */
+ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi,
+ pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo);
+ ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi,
+ pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo);
ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets);
ADD_STAT64(stats_tx.tx_gt127,
@@ -628,6 +661,15 @@ static void bnx2x_mstat_stats_update(struct bnx2x *bp)
pstats->mac_stx[1].tx_stat_outxoffsent_hi;
estats->pause_frames_sent_lo =
pstats->mac_stx[1].tx_stat_outxoffsent_lo;
+
+ estats->pfc_frames_received_hi =
+ pstats->pfc_frames_rx_hi;
+ estats->pfc_frames_received_lo =
+ pstats->pfc_frames_rx_lo;
+ estats->pfc_frames_sent_hi =
+ pstats->pfc_frames_tx_hi;
+ estats->pfc_frames_sent_lo =
+ pstats->pfc_frames_tx_lo;
}
static void bnx2x_emac_stats_update(struct bnx2x *bp)
@@ -740,7 +782,7 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
estats->brb_drop_hi = pstats->brb_drop_hi;
estats->brb_drop_lo = pstats->brb_drop_lo;
- pstats->host_port_stats_start = ++pstats->host_port_stats_end;
+ pstats->host_port_stats_counter++;
if (!BP_NOMCP(bp)) {
u32 nig_timer_max =
@@ -1027,17 +1069,17 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
estats->rx_stat_ifhcinbadoctets_lo);
ADD_64(fstats->total_bytes_received_hi,
- tfunc->rcv_error_bytes.hi,
+ le32_to_cpu(tfunc->rcv_error_bytes.hi),
fstats->total_bytes_received_lo,
- tfunc->rcv_error_bytes.lo);
+ le32_to_cpu(tfunc->rcv_error_bytes.lo));
memcpy(estats, &(fstats->total_bytes_received_hi),
sizeof(struct host_func_stats) - 2*sizeof(u32));
ADD_64(estats->error_bytes_received_hi,
- tfunc->rcv_error_bytes.hi,
+ le32_to_cpu(tfunc->rcv_error_bytes.hi),
estats->error_bytes_received_lo,
- tfunc->rcv_error_bytes.lo);
+ le32_to_cpu(tfunc->rcv_error_bytes.lo));
ADD_64(estats->etherstatsoverrsizepkts_hi,
estats->rx_stat_dot3statsframestoolong_hi,
@@ -1265,7 +1307,7 @@ static void bnx2x_port_stats_stop(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
if (bp->func_stx) {
dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
dmae->comp_addr_hi = 0;
@@ -1349,12 +1391,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
enum bnx2x_stats_state state;
if (unlikely(bp->panic))
return;
- bnx2x_stats_stm[bp->stats_state][event].action(bp);
+
spin_lock_bh(&bp->stats_lock);
state = bp->stats_state;
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
spin_unlock_bh(&bp->stats_lock);
+ bnx2x_stats_stm[state][event].action(bp);
+
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
@@ -1380,7 +1424,7 @@ static void bnx2x_port_stats_base_init(struct bnx2x *bp)
dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
dmae->dst_addr_lo = bp->port.port_stx >> 2;
dmae->dst_addr_hi = 0;
- dmae->len = sizeof(struct host_port_stats) >> 2;
+ dmae->len = bnx2x_get_port_stats_dma_len(bp);
dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
dmae->comp_val = DMAE_COMP_VAL;
@@ -1457,6 +1501,7 @@ static void bnx2x_func_stats_base_update(struct bnx2x *bp)
static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
{
int i;
+ int first_queue_query_index;
struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr;
dma_addr_t cur_data_offset;
@@ -1512,14 +1557,40 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset));
cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset));
+ /**** FCoE FW statistics data ****/
+ if (!NO_FCOE(bp)) {
+ cur_data_offset = bp->fw_stats_data_mapping +
+ offsetof(struct bnx2x_fw_stats_data, fcoe);
+
+ cur_query_entry =
+ &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX];
+
+ cur_query_entry->kind = STATS_TYPE_FCOE;
+ /* For FCoE query index is a DONT CARE */
+ cur_query_entry->index = BP_PORT(bp);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
+
/**** Clients' queries ****/
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats);
+ /* first queue query index depends whether FCoE offloaded request will
+ * be included in the ramrod
+ */
+ if (!NO_FCOE(bp))
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX;
+ else
+ first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1;
+
for_each_eth_queue(bp, i) {
cur_query_entry =
&bp->fw_stats_req->
- query[BNX2X_FIRST_QUEUE_QUERY_IDX + i];
+ query[first_queue_query_index + i];
cur_query_entry->kind = STATS_TYPE_QUEUE;
cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]);
@@ -1531,6 +1602,21 @@ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp)
cur_data_offset += sizeof(struct per_queue_stats);
}
+
+ /* add FCoE queue query if needed */
+ if (!NO_FCOE(bp)) {
+ cur_query_entry =
+ &bp->fw_stats_req->
+ query[first_queue_query_index + i];
+
+ cur_query_entry->kind = STATS_TYPE_QUEUE;
+ cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]);
+ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp));
+ cur_query_entry->address.hi =
+ cpu_to_le32(U64_HI(cur_data_offset));
+ cur_query_entry->address.lo =
+ cpu_to_le32(U64_LO(cur_data_offset));
+ }
}
void bnx2x_stats_init(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index 5d8ce2f..683deb0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -193,6 +193,12 @@ struct bnx2x_eth_stats {
u32 total_tpa_aggregated_frames_lo;
u32 total_tpa_bytes_hi;
u32 total_tpa_bytes_lo;
+
+ /* PFC */
+ u32 pfc_frames_received_hi;
+ u32 pfc_frames_received_lo;
+ u32 pfc_frames_sent_hi;
+ u32 pfc_frames_sent_lo;
};
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 6f10c69..818a573 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -250,6 +250,21 @@ static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
return io->data;
}
+static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_eth_dev *ethdev = cp->ethdev;
+ struct drv_ctl_info info;
+
+ if (reg)
+ info.cmd = DRV_CTL_ULP_REGISTER_CMD;
+ else
+ info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
+
+ info.data.ulp_type = ulp_type;
+ ethdev->drv_ctl(dev->netdev, &info);
+}
+
static int cnic_in_use(struct cnic_sock *csk)
{
return test_bit(SK_F_INUSE, &csk->flags);
@@ -506,7 +521,7 @@ int cnic_unregister_driver(int ulp_type)
}
read_unlock(&cnic_dev_lock);
- rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+ RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
mutex_unlock(&cnic_lock);
synchronize_rcu();
@@ -563,6 +578,8 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
mutex_unlock(&cnic_lock);
+ cnic_ulp_ctl(dev, ulp_type, true);
+
return 0;
}
@@ -579,7 +596,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
pr_err("%s: device not registered to this ulp type %d\n",
@@ -602,6 +619,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
+ cnic_ulp_ctl(dev, ulp_type, false);
+
return 0;
}
EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1342,7 +1361,7 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
if (ret == 1)
return 0;
- return -EBUSY;
+ return ret;
}
static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
@@ -1830,7 +1849,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
done:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
- return ret;
+ return 0;
}
@@ -1928,7 +1947,7 @@ destroy_reply:
cqes[0] = (struct kcqe *) &kcqe;
cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
- return ret;
+ return 0;
}
static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
@@ -2494,6 +2513,57 @@ static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
return ret;
}
+static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct kcqe kcqe;
+ struct kcqe *cqes[1];
+ u32 cid;
+ u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
+ u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
+ int ulp_type;
+
+ cid = kwqe->kwqe_info0;
+ memset(&kcqe, 0, sizeof(kcqe));
+
+ if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
+ ulp_type = CNIC_ULP_ISCSI;
+ if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
+ cid = kwqe->kwqe_info1;
+
+ kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
+ kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
+ kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR;
+ kcqe.kcqe_info2 = cid;
+ cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
+
+ } else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
+ struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
+ u32 kcqe_op;
+
+ ulp_type = CNIC_ULP_L4;
+ if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
+ else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+ else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
+ kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+ else
+ return;
+
+ kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
+ KCQE_FLAGS_LAYER_MASK_L4;
+ l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR;
+ l4kcqe->cid = cid;
+ cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
+ } else {
+ return;
+ }
+
+ cqes[0] = (struct kcqe *) &kcqe;
+ cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
+}
+
static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
struct kwqe *wqes[], u32 num_wqes)
{
@@ -2551,9 +2621,17 @@ static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
opcode);
break;
}
- if (ret < 0)
+ if (ret < 0) {
netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
opcode);
+
+ /* Possibly bnx2x parity error, send completion
+ * to ulp drivers with error code to speed up
+ * cleanup and reset recovery.
+ */
+ if (ret == -EIO || ret == -EAGAIN)
+ cnic_bnx2x_kwqe_err(dev, kwqe);
+ }
i += work;
}
return 0;
@@ -3052,9 +3130,26 @@ static void cnic_ulp_start(struct cnic_dev *dev)
}
}
+static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
+{
+ struct cnic_local *cp = dev->cnic_priv;
+ struct cnic_ulp_ops *ulp_ops;
+ int rc;
+
+ mutex_lock(&cnic_lock);
+ ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ if (ulp_ops && ulp_ops->cnic_get_stats)
+ rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
+ else
+ rc = -ENODEV;
+ mutex_unlock(&cnic_lock);
+ return rc;
+}
+
static int cnic_ctl(void *data, struct cnic_ctl_info *info)
{
struct cnic_dev *dev = data;
+ int ulp_type = CNIC_ULP_ISCSI;
switch (info->cmd) {
case CNIC_CTL_STOP_CMD:
@@ -3100,6 +3195,15 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
}
break;
}
+ case CNIC_CTL_FCOE_STATS_GET_CMD:
+ ulp_type = CNIC_ULP_FCOE;
+ /* fall through */
+ case CNIC_CTL_ISCSI_STATS_GET_CMD:
+ cnic_hold(dev);
+ cnic_copy_ulp_stats(dev, ulp_type);
+ cnic_put(dev);
+ break;
+
default:
return -EINVAL;
}
@@ -3475,12 +3579,16 @@ static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
struct flowi6 fl6;
memset(&fl6, 0, sizeof(fl6));
- ipv6_addr_copy(&fl6.daddr, &dst_addr->sin6_addr);
+ fl6.daddr = dst_addr->sin6_addr;
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = dst_addr->sin6_scope_id;
*dst = ip6_route_output(&init_net, NULL, &fl6);
- if (*dst)
+ if ((*dst)->error) {
+ dst_release(*dst);
+ *dst = NULL;
+ return -ENETUNREACH;
+ } else
return 0;
#endif
@@ -3804,6 +3912,9 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
+ if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
+ set_bit(SK_F_HW_ERR, &csk->flags);
+
cp->close_conn(csk, opcode);
break;
@@ -3931,7 +4042,9 @@ static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
case L4_KCQE_OPCODE_VALUE_RESET_COMP:
if (cnic_ready_to_close(csk, opcode)) {
- if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+ if (test_bit(SK_F_HW_ERR, &csk->flags))
+ close_complete = 1;
+ else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
else
close_complete = 1;
@@ -4824,6 +4937,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
int func = CNIC_FUNC(cp), ret;
u32 pfid;
+ dev->stats_addr = ethdev->addr_drv_info_to_mcp;
cp->port_mode = CHIP_PORT_MODE_NONE;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
@@ -5134,7 +5248,7 @@ static void cnic_stop_hw(struct cnic_dev *dev)
}
cnic_shutdown_rings(dev);
clear_bit(CNIC_F_CNIC_UP, &dev->flags);
- rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+ RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
synchronize_rcu();
cnic_cm_shutdown(dev);
cp->stop_hw(dev);
@@ -5288,6 +5402,8 @@ static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
cdev->pcidev = pdev;
cp->chip_id = ethdev->chip_id;
+ cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
+
if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 239de89..86936f6 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -85,6 +85,7 @@
/* KCQ (kernel completion queue) completion status */
#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
+#define L4_KCQE_COMPLETION_STATUS_NIC_ERROR (4)
#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 79443e0..1517763 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2011 Broadcom Corporation
+ * Copyright (c) 2006-2012 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.5.7"
-#define CNIC_MODULE_RELDATE "July 20, 2011"
+#define CNIC_MODULE_VERSION "2.5.8"
+#define CNIC_MODULE_RELDATE "Jan 3, 2012"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -86,6 +86,8 @@ struct kcqe {
#define CNIC_CTL_START_CMD 2
#define CNIC_CTL_COMPLETION_CMD 3
#define CNIC_CTL_STOP_ISCSI_CMD 4
+#define CNIC_CTL_FCOE_STATS_GET_CMD 5
+#define CNIC_CTL_ISCSI_STATS_GET_CMD 6
#define DRV_CTL_IO_WR_CMD 0x101
#define DRV_CTL_IO_RD_CMD 0x102
@@ -96,6 +98,8 @@ struct kcqe {
#define DRV_CTL_STOP_L2_CMD 0x107
#define DRV_CTL_RET_L2_SPQ_CREDIT_CMD 0x10c
#define DRV_CTL_ISCSI_STOPPED_CMD 0x10d
+#define DRV_CTL_ULP_REGISTER_CMD 0x10e
+#define DRV_CTL_ULP_UNREGISTER_CMD 0x10f
struct cnic_ctl_completion {
u32 cid;
@@ -133,6 +137,7 @@ struct drv_ctl_info {
struct drv_ctl_spq_credit credit;
struct drv_ctl_io io;
struct drv_ctl_l2_ring ring;
+ int ulp_type;
char bytes[MAX_DRV_CTL_DATA];
} data;
};
@@ -201,6 +206,7 @@ struct cnic_eth_dev {
struct kwqe_16 *[], u32);
int (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
unsigned long reserved1[2];
+ union drv_info_to_mcp *addr_drv_info_to_mcp;
};
struct cnic_sockaddr {
@@ -255,6 +261,7 @@ struct cnic_sock {
#define SK_F_CONNECT_START 4
#define SK_F_IPV6 5
#define SK_F_CLOSING 7
+#define SK_F_HW_ERR 8
atomic_t ref_count;
u32 state;
@@ -297,6 +304,8 @@ struct cnic_dev {
int max_fcoe_conn;
int max_rdma_conn;
+ union drv_info_to_mcp *stats_addr;
+
void *cnic_priv;
};
@@ -326,6 +335,7 @@ struct cnic_ulp_ops {
void (*cm_remote_abort)(struct cnic_sock *);
int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
char *data, u16 data_size);
+ int (*cnic_get_stats)(void *ulp_ctx);
struct module *owner;
atomic_t ref_count;
};
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 0a1d7f2..084904c 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -163,7 +163,6 @@ enum sbmac_state {
#define SBMAC_MAX_TXDESCR 256
#define SBMAC_MAX_RXDESCR 256
-#define ETHER_ADDR_LEN 6
#define ENET_PACKET_SIZE 1518
/*#define ENET_PACKET_SIZE 9216 */
@@ -266,7 +265,7 @@ struct sbmac_softc {
int sbm_pause; /* current pause setting */
int sbm_link; /* current link state */
- unsigned char sbm_hwaddr[ETHER_ADDR_LEN];
+ unsigned char sbm_hwaddr[ETH_ALEN];
struct sbmacdma sbm_txdma; /* only channel 0 for now */
struct sbmacdma sbm_rxdma;
@@ -2260,7 +2259,8 @@ static int sbmac_init(struct platform_device *pldev, long long base)
}
sc->mii_bus->name = sbmac_mdio_string;
- snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%x", idx);
+ snprintf(sc->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pldev->name, idx);
sc->mii_bus->priv = sc;
sc->mii_bus->read = sbmac_mii_read;
sc->mii_bus->write = sbmac_mii_write;
@@ -2676,15 +2676,4 @@ static struct platform_driver sbmac_driver = {
},
};
-static int __init sbmac_init_module(void)
-{
- return platform_driver_register(&sbmac_driver);
-}
-
-static void __exit sbmac_cleanup_module(void)
-{
- platform_driver_unregister(&sbmac_driver);
-}
-
-module_init(sbmac_init_module);
-module_exit(sbmac_cleanup_module);
+module_platform_driver(sbmac_driver);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bf40741..35c2a20 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -89,10 +89,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 121
+#define TG3_MIN_NUM 122
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "November 2, 2011"
+#define DRV_MODULE_RELDATE "December 7, 2011"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -135,7 +135,6 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
(tg3_flag(tp, LRG_PROD_RING_CAP) ? \
TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
-#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
@@ -194,12 +193,13 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#if (NET_IP_ALIGN != 0)
#define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
#else
-#define TG3_RX_OFFSET(tp) 0
+#define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
#endif
/* minimum number of free TX descriptors required to wake up TX process */
#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
-#define TG3_TX_BD_DMA_MAX 4096
+#define TG3_TX_BD_DMA_MAX_2K 2048
+#define TG3_TX_BD_DMA_MAX_4K 4096
#define TG3_RAW_IP_ALIGN 2
@@ -1670,22 +1670,6 @@ static void tg3_link_report(struct tg3 *tp)
}
}
-static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
-{
- u16 miireg;
-
- if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
- miireg = ADVERTISE_PAUSE_CAP;
- else if (flow_ctrl & FLOW_CTRL_TX)
- miireg = ADVERTISE_PAUSE_ASYM;
- else if (flow_ctrl & FLOW_CTRL_RX)
- miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
- else
- miireg = 0;
-
- return miireg;
-}
-
static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
{
u16 miireg;
@@ -1706,18 +1690,12 @@ static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
{
u8 cap = 0;
- if (lcladv & ADVERTISE_1000XPAUSE) {
- if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- else if (rmtadv & LPA_1000XPAUSE_ASYM)
- cap = FLOW_CTRL_RX;
- } else {
- if (rmtadv & LPA_1000XPAUSE)
- cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
- }
- } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
- if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
+ if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
+ cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
+ } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
+ if (lcladv & ADVERTISE_1000XPAUSE)
+ cap = FLOW_CTRL_RX;
+ if (rmtadv & ADVERTISE_1000XPAUSE)
cap = FLOW_CTRL_TX;
}
@@ -1792,7 +1770,7 @@ static void tg3_adjust_link(struct net_device *dev)
if (phydev->duplex == DUPLEX_HALF)
mac_mode |= MAC_MODE_HALF_DUPLEX;
else {
- lcl_adv = tg3_advert_flowctrl_1000T(
+ lcl_adv = mii_advertise_flowctrl(
tp->link_config.flowctrl);
if (phydev->pause)
@@ -2160,7 +2138,7 @@ static void tg3_phy_eee_enable(struct tg3 *tp)
if (tp->link_config.active_speed == SPEED_1000 &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
+ tg3_flag(tp, 57765_CLASS)) &&
!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
val = MII_TG3_DSP_TAP26_ALNOKO |
MII_TG3_DSP_TAP26_RMRXSTO;
@@ -2679,8 +2657,7 @@ static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
bool need_vaux = false;
/* The GPIOs do something completely different on 57765. */
- if (!tg3_flag(tp, IS_NIC) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
@@ -3594,37 +3571,24 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
u32 val, new_adv;
new_adv = ADVERTISE_CSMA;
- if (advertise & ADVERTISED_10baseT_Half)
- new_adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- new_adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- new_adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- new_adv |= ADVERTISE_100FULL;
-
- new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
+ new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
+ new_adv |= mii_advertise_flowctrl(flowctrl);
err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
if (err)
goto done;
- if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
- goto done;
-
- new_adv = 0;
- if (advertise & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000HALF;
- if (advertise & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000FULL;
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
- if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
- tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
- new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
+ tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
+ new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
- err = tg3_writephy(tp, MII_CTRL1000, new_adv);
- if (err)
- goto done;
+ err = tg3_writephy(tp, MII_CTRL1000, new_adv);
+ if (err)
+ goto done;
+ }
if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
goto done;
@@ -3650,6 +3614,7 @@ static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
case ASIC_REV_5717:
case ASIC_REV_57765:
+ case ASIC_REV_57766:
case ASIC_REV_5719:
/* If we advertised any eee advertisements above... */
if (val)
@@ -3786,76 +3751,61 @@ static int tg3_init_5401phy_dsp(struct tg3 *tp)
return err;
}
-static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
+static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
{
- u32 adv_reg, all_mask = 0;
+ u32 advmsk, tgtadv, advertising;
- if (mask & ADVERTISED_10baseT_Half)
- all_mask |= ADVERTISE_10HALF;
- if (mask & ADVERTISED_10baseT_Full)
- all_mask |= ADVERTISE_10FULL;
- if (mask & ADVERTISED_100baseT_Half)
- all_mask |= ADVERTISE_100HALF;
- if (mask & ADVERTISED_100baseT_Full)
- all_mask |= ADVERTISE_100FULL;
+ advertising = tp->link_config.advertising;
+ tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
- if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
- return 0;
+ advmsk = ADVERTISE_ALL;
+ if (tp->link_config.active_duplex == DUPLEX_FULL) {
+ tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
+ advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+ }
- if ((adv_reg & ADVERTISE_ALL) != all_mask)
- return 0;
+ if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
+ return false;
+
+ if ((*lcladv & advmsk) != tgtadv)
+ return false;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
u32 tg3_ctrl;
- all_mask = 0;
- if (mask & ADVERTISED_1000baseT_Half)
- all_mask |= ADVERTISE_1000HALF;
- if (mask & ADVERTISED_1000baseT_Full)
- all_mask |= ADVERTISE_1000FULL;
+ tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
- return 0;
+ return false;
tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
- if (tg3_ctrl != all_mask)
- return 0;
+ if (tg3_ctrl != tgtadv)
+ return false;
}
- return 1;
+ return true;
}
-static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
+static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
{
- u32 curadv, reqadv;
-
- if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
- return 1;
+ u32 lpeth = 0;
- curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
- reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
+ if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
+ u32 val;
- if (tp->link_config.active_duplex == DUPLEX_FULL) {
- if (curadv != reqadv)
- return 0;
+ if (tg3_readphy(tp, MII_STAT1000, &val))
+ return false;
- if (tg3_flag(tp, PAUSE_AUTONEG))
- tg3_readphy(tp, MII_LPA, rmtadv);
- } else {
- /* Reprogram the advertisement register, even if it
- * does not affect the current link. If the link
- * gets renegotiated in the future, we can save an
- * additional renegotiation cycle by advertising
- * it correctly in the first place.
- */
- if (curadv != reqadv) {
- *lcladv &= ~(ADVERTISE_PAUSE_CAP |
- ADVERTISE_PAUSE_ASYM);
- tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
- }
+ lpeth = mii_stat1000_to_ethtool_lpa_t(val);
}
- return 1;
+ if (tg3_readphy(tp, MII_LPA, rmtadv))
+ return false;
+
+ lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
+ tp->link_config.rmt_adv = lpeth;
+
+ return true;
}
static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
@@ -3961,6 +3911,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
+ tp->link_config.rmt_adv = 0;
if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
err = tg3_phy_auxctl_read(tp,
@@ -4016,12 +3968,9 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if (tp->link_config.autoneg == AUTONEG_ENABLE) {
if ((bmcr & BMCR_ANENABLE) &&
- tg3_copper_is_advertising_all(tp,
- tp->link_config.advertising)) {
- if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
- &rmt_adv))
- current_link_up = 1;
- }
+ tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
+ tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
+ current_link_up = 1;
} else {
if (!(bmcr & BMCR_ANENABLE) &&
tp->link_config.speed == current_speed &&
@@ -4033,8 +3982,22 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
if (current_link_up == 1 &&
- tp->link_config.active_duplex == DUPLEX_FULL)
+ tp->link_config.active_duplex == DUPLEX_FULL) {
+ u32 reg, bit;
+
+ if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
+ reg = MII_TG3_FET_GEN_STAT;
+ bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
+ } else {
+ reg = MII_TG3_EXT_STAT;
+ bit = MII_TG3_EXT_STAT_MDIX;
+ }
+
+ if (!tg3_readphy(tp, reg, &val) && (val & bit))
+ tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
+
tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
+ }
}
relink:
@@ -4643,6 +4606,9 @@ restart_autoneg:
if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
tp->serdes_counter = 0;
@@ -4714,6 +4680,9 @@ static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
if (rxflags & MR_LP_ADV_ASYM_PAUSE)
remote_adv |= LPA_1000XPAUSE_ASYM;
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
+
tg3_setup_flow_control(tp, local_adv, remote_adv);
current_link_up = 1;
@@ -4796,6 +4765,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
udelay(40);
current_link_up = 0;
+ tp->link_config.rmt_adv = 0;
mac_status = tr32(MAC_STATUS);
if (tg3_flag(tp, HW_AUTONEG))
@@ -4887,6 +4857,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_link_up = 0;
current_speed = SPEED_INVALID;
current_duplex = DUPLEX_INVALID;
+ tp->link_config.rmt_adv = 0;
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
err |= tg3_readphy(tp, MII_BMSR, &bmsr);
@@ -4903,23 +4874,19 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
(tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
/* do nothing, just check for link up at the end */
} else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
- u32 adv, new_adv;
+ u32 adv, newadv;
err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
- new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
- ADVERTISE_1000XPAUSE |
- ADVERTISE_1000XPSE_ASYM |
- ADVERTISE_SLCT);
-
- new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
+ ADVERTISE_1000XPAUSE |
+ ADVERTISE_1000XPSE_ASYM |
+ ADVERTISE_SLCT);
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
- new_adv |= ADVERTISE_1000XHALF;
- if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
- new_adv |= ADVERTISE_1000XFULL;
+ newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
+ newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
- if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
- tg3_writephy(tp, MII_ADVERTISE, new_adv);
+ if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
+ tg3_writephy(tp, MII_ADVERTISE, newadv);
bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
tg3_writephy(tp, MII_BMCR, bmcr);
@@ -4997,6 +4964,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
current_duplex = DUPLEX_FULL;
else
current_duplex = DUPLEX_HALF;
+
+ tp->link_config.rmt_adv =
+ mii_adv_to_ethtool_adv_x(remote_adv);
} else if (!tg3_flag(tp, 5780_CLASS)) {
/* Link is up via parallel detect */
} else {
@@ -5320,6 +5290,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
u32 sw_idx = tnapi->tx_cons;
struct netdev_queue *txq;
int index = tnapi - tp->napi;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
if (tg3_flag(tp, ENABLE_TSS))
index--;
@@ -5370,6 +5341,9 @@ static void tg3_tx(struct tg3_napi *tnapi)
sw_idx = NEXT_TX(sw_idx);
}
+ pkts_compl++;
+ bytes_compl += skb->len;
+
dev_kfree_skb(skb);
if (unlikely(tx_bug)) {
@@ -5378,6 +5352,8 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
+ netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
+
tnapi->tx_cons = sw_idx;
/* Need to make the tx_cons update visible to tg3_start_xmit()
@@ -5397,15 +5373,15 @@ static void tg3_tx(struct tg3_napi *tnapi)
}
}
-static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
{
- if (!ri->skb)
+ if (!ri->data)
return;
pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
map_sz, PCI_DMA_FROMDEVICE);
- dev_kfree_skb_any(ri->skb);
- ri->skb = NULL;
+ kfree(ri->data);
+ ri->data = NULL;
}
/* Returns size of skb allocated or < 0 on error.
@@ -5419,28 +5395,28 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
* buffers the cpu only reads the last cacheline of the RX descriptor
* (to fetch the error flags, vlan tag, checksum, and opaque cookie).
*/
-static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
u32 opaque_key, u32 dest_idx_unmasked)
{
struct tg3_rx_buffer_desc *desc;
struct ring_info *map;
- struct sk_buff *skb;
+ u8 *data;
dma_addr_t mapping;
- int skb_size, dest_idx;
+ int skb_size, data_size, dest_idx;
switch (opaque_key) {
case RXD_OPAQUE_RING_STD:
dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
desc = &tpr->rx_std[dest_idx];
map = &tpr->rx_std_buffers[dest_idx];
- skb_size = tp->rx_pkt_map_sz;
+ data_size = tp->rx_pkt_map_sz;
break;
case RXD_OPAQUE_RING_JUMBO:
dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
desc = &tpr->rx_jmb[dest_idx].std;
map = &tpr->rx_jmb_buffers[dest_idx];
- skb_size = TG3_RX_JMB_MAP_SZ;
+ data_size = TG3_RX_JMB_MAP_SZ;
break;
default:
@@ -5453,31 +5429,33 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
* Callers depend upon this behavior and assume that
* we leave everything unchanged if we fail.
*/
- skb = netdev_alloc_skb(tp->dev, skb_size + TG3_RX_OFFSET(tp));
- if (skb == NULL)
+ skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ data = kmalloc(skb_size, GFP_ATOMIC);
+ if (!data)
return -ENOMEM;
- skb_reserve(skb, TG3_RX_OFFSET(tp));
-
- mapping = pci_map_single(tp->pdev, skb->data, skb_size,
+ mapping = pci_map_single(tp->pdev,
+ data + TG3_RX_OFFSET(tp),
+ data_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(tp->pdev, mapping)) {
- dev_kfree_skb(skb);
+ kfree(data);
return -EIO;
}
- map->skb = skb;
+ map->data = data;
dma_unmap_addr_set(map, mapping, mapping);
desc->addr_hi = ((u64)mapping >> 32);
desc->addr_lo = ((u64)mapping & 0xffffffff);
- return skb_size;
+ return data_size;
}
/* We only need to move over in the address because the other
* members of the RX descriptor are invariant. See notes above
- * tg3_alloc_rx_skb for full details.
+ * tg3_alloc_rx_data for full details.
*/
static void tg3_recycle_rx(struct tg3_napi *tnapi,
struct tg3_rx_prodring_set *dpr,
@@ -5511,7 +5489,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
return;
}
- dest_map->skb = src_map->skb;
+ dest_map->data = src_map->data;
dma_unmap_addr_set(dest_map, mapping,
dma_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
@@ -5522,7 +5500,7 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
*/
smp_wmb();
- src_map->skb = NULL;
+ src_map->data = NULL;
}
/* The RX ring scheme is composed of multiple rings which post fresh
@@ -5576,19 +5554,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
struct sk_buff *skb;
dma_addr_t dma_addr;
u32 opaque_key, desc_idx, *post_ptr;
+ u8 *data;
desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
if (opaque_key == RXD_OPAQUE_RING_STD) {
ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &std_prod_idx;
rx_std_posted++;
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
dma_addr = dma_unmap_addr(ri, mapping);
- skb = ri->skb;
+ data = ri->data;
post_ptr = &jmb_prod_idx;
} else
goto next_pkt_nopost;
@@ -5606,13 +5585,14 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
goto next_pkt;
}
+ prefetch(data + TG3_RX_OFFSET(tp));
len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
ETH_FCS_LEN;
if (len > TG3_RX_COPY_THRESH(tp)) {
int skb_size;
- skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+ skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
*post_ptr);
if (skb_size < 0)
goto drop_it;
@@ -5620,35 +5600,37 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
- /* Ensure that the update to the skb happens
+ skb = build_skb(data);
+ if (!skb) {
+ kfree(data);
+ goto drop_it_no_recycle;
+ }
+ skb_reserve(skb, TG3_RX_OFFSET(tp));
+ /* Ensure that the update to the data happens
* after the usage of the old DMA mapping.
*/
smp_wmb();
- ri->skb = NULL;
+ ri->data = NULL;
- skb_put(skb, len);
} else {
- struct sk_buff *copy_skb;
-
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
- copy_skb = netdev_alloc_skb(tp->dev, len +
- TG3_RAW_IP_ALIGN);
- if (copy_skb == NULL)
+ skb = netdev_alloc_skb(tp->dev,
+ len + TG3_RAW_IP_ALIGN);
+ if (skb == NULL)
goto drop_it_no_recycle;
- skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
- skb_put(copy_skb, len);
+ skb_reserve(skb, TG3_RAW_IP_ALIGN);
pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
- skb_copy_from_linear_data(skb, copy_skb->data, len);
+ memcpy(skb->data,
+ data + TG3_RX_OFFSET(tp),
+ len);
pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
-
- /* We'll reuse the original ring buffer. */
- skb = copy_skb;
}
+ skb_put(skb, len);
if ((tp->dev->features & NETIF_F_RXCSUM) &&
(desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
(((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
@@ -5787,7 +5769,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_std_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_std_buffers[i].skb) {
+ if (dpr->rx_std_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -5845,7 +5827,7 @@ static int tg3_rx_prodring_xfer(struct tg3 *tp,
di = dpr->rx_jmb_prod_idx;
for (i = di; i < di + cpycnt; i++) {
- if (dpr->rx_jmb_buffers[i].skb) {
+ if (dpr->rx_jmb_buffers[i].data) {
cpycnt = i - di;
err = -ENOSPC;
break;
@@ -6443,25 +6425,25 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
bool hwbug = false;
if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
- hwbug = 1;
+ hwbug = true;
if (tg3_4g_overflow_test(map, len))
- hwbug = 1;
+ hwbug = true;
if (tg3_40bit_overflow_test(tp, map, len))
- hwbug = 1;
+ hwbug = true;
- if (tg3_flag(tp, 4K_FIFO_LIMIT)) {
+ if (tp->dma_limit) {
u32 prvidx = *entry;
u32 tmp_flag = flags & ~TXD_FLAG_END;
- while (len > TG3_TX_BD_DMA_MAX && *budget) {
- u32 frag_len = TG3_TX_BD_DMA_MAX;
- len -= TG3_TX_BD_DMA_MAX;
+ while (len > tp->dma_limit && *budget) {
+ u32 frag_len = tp->dma_limit;
+ len -= tp->dma_limit;
/* Avoid the 8byte DMA problem */
if (len <= 8) {
- len += TG3_TX_BD_DMA_MAX / 2;
- frag_len = TG3_TX_BD_DMA_MAX / 2;
+ len += tp->dma_limit / 2;
+ frag_len = tp->dma_limit / 2;
}
tnapi->tx_buffers[*entry].fragmented = true;
@@ -6482,7 +6464,7 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
*budget -= 1;
*entry = NEXT_TX(*entry);
} else {
- hwbug = 1;
+ hwbug = true;
tnapi->tx_buffers[prvidx].fragmented = false;
}
}
@@ -6685,14 +6667,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
iph = ip_hdr(skb);
tcp_opt_len = tcp_optlen(skb);
- if (skb_is_gso_v6(skb)) {
- hdr_len = skb_headlen(skb) - ETH_HLEN;
- } else {
- u32 ip_tcp_len;
-
- ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
- hdr_len = ip_tcp_len + tcp_opt_len;
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
+ if (!skb_is_gso_v6(skb)) {
iph->check = 0;
iph->tot_len = htons(mss + hdr_len);
}
@@ -6816,6 +6793,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
skb_tx_timestamp(skb);
+ netdev_tx_sent_queue(txq, skb->len);
/* Packets are ready, update Tx producer idx local and on card. */
tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -6968,7 +6946,7 @@ static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
return 0;
}
-static void tg3_set_loopback(struct net_device *dev, u32 features)
+static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -6994,7 +6972,8 @@ static void tg3_set_loopback(struct net_device *dev, u32 features)
}
}
-static u32 tg3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tg3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7004,9 +6983,9 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int tg3_set_features(struct net_device *dev, u32 features)
+static int tg3_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
tg3_set_loopback(dev, features);
@@ -7082,14 +7061,14 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
if (tpr != &tp->napi[0].prodring) {
for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
i = (i + 1) & tp->rx_std_ring_mask)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE)) {
for (i = tpr->rx_jmb_cons_idx;
i != tpr->rx_jmb_prod_idx;
i = (i + 1) & tp->rx_jmb_ring_mask) {
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7098,12 +7077,12 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
}
for (i = 0; i <= tp->rx_std_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
tp->rx_pkt_map_sz);
if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
- tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+ tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
TG3_RX_JMB_MAP_SZ);
}
}
@@ -7159,7 +7138,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX standard ring. Only "
"%d out of %d buffers were allocated "
@@ -7191,7 +7170,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
}
for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
netdev_warn(tp->dev,
"Using a smaller RX jumbo ring. Only %d "
"out of %d buffers were allocated "
@@ -7296,6 +7275,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
+ netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
}
}
@@ -7591,8 +7571,6 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
if (tnapi->hw_status)
memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
}
- if (tp->hw_stats)
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
return err;
}
@@ -7626,15 +7604,11 @@ static void tg3_restore_pci_state(struct tg3 *tp)
pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
- if (tg3_flag(tp, PCI_EXPRESS))
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
- else {
- pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
- tp->pci_cacheline_sz);
- pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
- tp->pci_lat_timer);
- }
+ if (!tg3_flag(tp, PCI_EXPRESS)) {
+ pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
+ tp->pci_cacheline_sz);
+ pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
+ tp->pci_lat_timer);
}
/* Make sure PCI-X relaxed ordering bit is clear. */
@@ -7819,8 +7793,6 @@ static int tg3_chip_reset(struct tg3 *tp)
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
val16);
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
-
/* Clear error status */
pci_write_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
@@ -7914,6 +7886,9 @@ static int tg3_chip_reset(struct tg3 *tp)
return 0;
}
+static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
+static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
+
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, int silent)
{
@@ -7931,6 +7906,15 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
tg3_write_sig_legacy(tp, kind);
tg3_write_sig_post_reset(tp, kind);
+ if (tp->hw_stats) {
+ /* Save the stats across chip resets... */
+ tg3_get_nstats(tp, &tp->net_stats_prev),
+ tg3_get_estats(tp, &tp->estats_prev);
+
+ /* And make sure the next sample is new data */
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+ }
+
if (err)
return err;
@@ -8074,7 +8058,7 @@ static void tg3_rings_reset(struct tg3 *tp)
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
else if (tg3_flag(tp, 5717_PLUS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
else
limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
@@ -8091,7 +8075,7 @@ static void tg3_rings_reset(struct tg3 *tp)
else if (!tg3_flag(tp, 5705_PLUS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
else
limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
@@ -8197,7 +8181,8 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, 5750_PLUS) ||
tg3_flag(tp, 5780_CLASS) ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
+ tg3_flag(tp, 57765_PLUS))
bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
@@ -8217,10 +8202,7 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
return;
- if (!tg3_flag(tp, 5705_PLUS))
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
- else
- bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
+ bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
@@ -8231,6 +8213,54 @@ static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
}
+static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] =
+ ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
+}
+
+static void tg3_rss_check_indir_tbl(struct tg3 *tp)
+{
+ int i;
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return;
+
+ if (tp->irq_cnt <= 2) {
+ memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
+ return;
+ }
+
+ /* Validate table against current IRQ count */
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+ if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
+ break;
+ }
+
+ if (i != TG3_RSS_INDIR_TBL_SIZE)
+ tg3_rss_init_dflt_indir_tbl(tp);
+}
+
+static void tg3_rss_write_indir_tbl(struct tg3 *tp)
+{
+ int i = 0;
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+
+ while (i < TG3_RSS_INDIR_TBL_SIZE) {
+ u32 val = tp->rss_ind_tbl[i];
+ i++;
+ for (; i % 8; i++) {
+ val <<= 4;
+ val |= tp->rss_ind_tbl[i];
+ }
+ tw32(reg, val);
+ reg += 4;
+ }
+}
+
/* tp->lock is held. */
static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
{
@@ -8337,7 +8367,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(GRC_MODE, grc_mode);
}
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ if (tg3_flag(tp, 57765_CLASS)) {
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
u32 grc_mode = tr32(GRC_MODE);
@@ -8425,7 +8455,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
+ if (!tg3_flag(tp, 57765_CLASS) &&
GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
val |= DMA_RWCTRL_TAGGED_STAT_WA;
tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
@@ -8572,7 +8602,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
val | BDINFO_FLAGS_USE_EXT_RECV);
if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -8581,10 +8611,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
}
if (tg3_flag(tp, 57765_PLUS)) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
- val = TG3_RX_STD_MAX_SIZE_5700;
- else
- val = TG3_RX_STD_MAX_SIZE_5717;
+ val = TG3_RX_STD_RING_SIZE(tp);
val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
val |= (TG3_RX_STD_DMA_SZ << 2);
} else
@@ -8661,6 +8688,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
if (tg3_flag(tp, PCI_EXPRESS))
rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3))
@@ -8809,9 +8839,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
- if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+ if (tg3_flag(tp, USING_MSIX)) {
val = tr32(MSGINT_MODE);
- val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ val |= MSGINT_MODE_ENABLE;
+ if (tp->irq_cnt > 1)
+ val |= MSGINT_MODE_MULTIVEC_EN;
if (!tg3_flag(tp, 1SHOT_MSI))
val |= MSGINT_MODE_ONE_SHOT_DISABLE;
tw32(MSGINT_MODE, val);
@@ -8924,28 +8956,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(100);
if (tg3_flag(tp, ENABLE_RSS)) {
- int i = 0;
- u32 reg = MAC_RSS_INDIR_TBL_0;
-
- if (tp->irq_cnt == 2) {
- for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i += 8) {
- tw32(reg, 0x0);
- reg += 4;
- }
- } else {
- u32 val;
-
- while (i < TG3_RSS_INDIR_TBL_SIZE) {
- val = i % (tp->irq_cnt - 1);
- i++;
- for (; i % 8; i++) {
- val <<= 4;
- val |= (i % (tp->irq_cnt - 1));
- }
- tw32(reg, val);
- reg += 4;
- }
- }
+ tg3_rss_write_indir_tbl(tp);
/* Setup the "secret" hash key. */
tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
@@ -9002,7 +9013,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ if (tg3_flag(tp, 57765_CLASS))
val = 1;
else
val = 2;
@@ -9217,7 +9228,7 @@ static void tg3_timer(unsigned long __opaque)
spin_lock(&tp->lock);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
if (!tg3_flag(tp, TAGGED_STATUS)) {
@@ -9532,19 +9543,18 @@ static int tg3_request_firmware(struct tg3 *tp)
static bool tg3_enable_msix(struct tg3 *tp)
{
- int i, rc, cpus = num_online_cpus();
+ int i, rc;
struct msix_entry msix_ent[tp->irq_max];
- if (cpus == 1)
- /* Just fallback to the simpler MSI mode. */
- return false;
-
- /*
- * We want as many rx rings enabled as there are cpus.
- * The first MSIX vector only deals with link interrupts, etc,
- * so we add one to the number of vectors we are requesting.
- */
- tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+ tp->irq_cnt = num_online_cpus();
+ if (tp->irq_cnt > 1) {
+ /* We want as many rx rings enabled as there are cpus.
+ * In multiqueue MSI-X mode, the first MSI-X vector
+ * only deals with link interrupts, etc, so we add
+ * one to the number of vectors we are requesting.
+ */
+ tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+ }
for (i = 0; i < tp->irq_max; i++) {
msix_ent[i].entry = i;
@@ -9669,6 +9679,8 @@ static int tg3_open(struct net_device *dev)
*/
tg3_ints_init(tp);
+ tg3_rss_check_indir_tbl(tp);
+
/* The placement of this call is tied
* to the setup and use of Host TX descriptors.
*/
@@ -9700,8 +9712,8 @@ static int tg3_open(struct net_device *dev)
tg3_free_rings(tp);
} else {
if (tg3_flag(tp, TAGGED_STATUS) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765)
+ GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
+ !tg3_flag(tp, 57765_CLASS))
tp->timer_offset = HZ;
else
tp->timer_offset = HZ / 10;
@@ -9782,10 +9794,6 @@ err_out1:
return err;
}
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
- struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
-
static int tg3_close(struct net_device *dev)
{
int i;
@@ -9817,10 +9825,9 @@ static int tg3_close(struct net_device *dev)
tg3_ints_fini(tp);
- tg3_get_stats64(tp->dev, &tp->net_stats_prev);
-
- memcpy(&tp->estats_prev, tg3_get_estats(tp),
- sizeof(tp->estats_prev));
+ /* Clear stats across close / open calls */
+ memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
+ memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
tg3_napi_fini(tp);
@@ -9838,7 +9845,7 @@ static inline u64 get_stat64(tg3_stat64_t *val)
return ((u64)val->high << 32) | ((u64)val->low);
}
-static u64 calc_crc_errors(struct tg3 *tp)
+static u64 tg3_calc_crc_errors(struct tg3 *tp)
{
struct tg3_hw_stats *hw_stats = tp->hw_stats;
@@ -9847,14 +9854,12 @@ static u64 calc_crc_errors(struct tg3 *tp)
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
u32 val;
- spin_lock_bh(&tp->lock);
if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
tg3_writephy(tp, MII_TG3_TEST1,
val | MII_TG3_TEST1_CRC_EN);
tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
} else
val = 0;
- spin_unlock_bh(&tp->lock);
tp->phy_crc_errors += val;
@@ -9868,14 +9873,13 @@ static u64 calc_crc_errors(struct tg3 *tp)
estats->member = old_estats->member + \
get_stat64(&hw_stats->member)
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
+static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
{
- struct tg3_ethtool_stats *estats = &tp->estats;
struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
if (!hw_stats)
- return old_estats;
+ return;
ESTAT_ADD(rx_octets);
ESTAT_ADD(rx_fragments);
@@ -9954,20 +9958,13 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
ESTAT_ADD(nic_tx_threshold_hit);
ESTAT_ADD(mbuf_lwm_thresh_hit);
-
- return estats;
}
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats)
+static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
{
- struct tg3 *tp = netdev_priv(dev);
struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
- if (!hw_stats)
- return old_stats;
-
stats->rx_packets = old_stats->rx_packets +
get_stat64(&hw_stats->rx_ucast_packets) +
get_stat64(&hw_stats->rx_mcast_packets) +
@@ -10010,15 +10007,13 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
get_stat64(&hw_stats->tx_carrier_sense_errors);
stats->rx_crc_errors = old_stats->rx_crc_errors +
- calc_crc_errors(tp);
+ tg3_calc_crc_errors(tp);
stats->rx_missed_errors = old_stats->rx_missed_errors +
get_stat64(&hw_stats->rx_discards);
stats->rx_dropped = tp->rx_dropped;
stats->tx_dropped = tp->tx_dropped;
-
- return stats;
}
static inline u32 calc_crc(unsigned char *buf, int len)
@@ -10318,12 +10313,20 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->advertising |= ADVERTISED_Asym_Pause;
}
}
- if (netif_running(dev)) {
+ if (netif_running(dev) && netif_carrier_ok(dev)) {
ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
cmd->duplex = tp->link_config.active_duplex;
+ cmd->lp_advertising = tp->link_config.rmt_adv;
+ if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
+ if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
+ cmd->eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->eth_tp_mdix = ETH_TP_MDI;
+ }
} else {
ethtool_cmd_speed_set(cmd, SPEED_INVALID);
cmd->duplex = DUPLEX_INVALID;
+ cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
}
cmd->phy_address = tp->phy_addr;
cmd->transceiver = XCVR_INTERNAL;
@@ -10428,10 +10431,10 @@ static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tg3 *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- strcpy(info->fw_version, tp->fw_ver);
- strcpy(info->bus_info, pci_name(tp->pdev));
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
+ strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
}
static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -10590,12 +10593,12 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
- if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_RX)
epause->rx_pause = 1;
else
epause->rx_pause = 0;
- if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
+ if (tp->link_config.flowctrl & FLOW_CTRL_TX)
epause->tx_pause = 1;
else
epause->tx_pause = 0;
@@ -10715,6 +10718,78 @@ static int tg3_get_sset_count(struct net_device *dev, int sset)
}
}
+static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules __always_unused)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tg3_flag(tp, SUPPORT_MSIX))
+ return -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (netif_running(tp->dev))
+ info->data = tp->irq_cnt;
+ else {
+ info->data = num_online_cpus();
+ if (info->data > TG3_IRQ_MAX_VECS_RSS)
+ info->data = TG3_IRQ_MAX_VECS_RSS;
+ }
+
+ /* The first interrupt vector only
+ * handles link interrupts.
+ */
+ info->data -= 1;
+ return 0;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
+{
+ u32 size = 0;
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (tg3_flag(tp, SUPPORT_MSIX))
+ size = TG3_RSS_INDIR_TBL_SIZE;
+
+ return size;
+}
+
+static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ int i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ indir[i] = tp->rss_ind_tbl[i];
+
+ return 0;
+}
+
+static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+{
+ struct tg3 *tp = netdev_priv(dev);
+ size_t i;
+
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
+ tp->rss_ind_tbl[i] = indir[i];
+
+ if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
+ return 0;
+
+ /* It is legal to write the indirection
+ * table while the device is running.
+ */
+ tg3_full_lock(tp, 0);
+ tg3_rss_write_indir_tbl(tp);
+ tg3_full_unlock(tp);
+
+ return 0;
+}
+
static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
{
switch (stringset) {
@@ -10769,7 +10844,8 @@ static void tg3_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *estats, u64 *tmp_stats)
{
struct tg3 *tp = netdev_priv(dev);
- memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
+
+ tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
}
static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
@@ -11352,7 +11428,7 @@ static int tg3_test_memory(struct tg3 *tp)
if (tg3_flag(tp, 5717_PLUS))
mem_tbl = mem_tbl_5717;
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (tg3_flag(tp, 57765_CLASS))
mem_tbl = mem_tbl_57765;
else if (tg3_flag(tp, 5755_PLUS))
mem_tbl = mem_tbl_5755;
@@ -11400,8 +11476,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
u32 budget;
- struct sk_buff *skb, *rx_skb;
- u8 *tx_data;
+ struct sk_buff *skb;
+ u8 *tx_data, *rx_data;
dma_addr_t map;
int num_pkts, tx_len, rx_len, i, err;
struct tg3_rx_buffer_desc *desc;
@@ -11569,11 +11645,11 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
}
if (opaque_key == RXD_OPAQUE_RING_STD) {
- rx_skb = tpr->rx_std_buffers[desc_idx].skb;
+ rx_data = tpr->rx_std_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
mapping);
} else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
- rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
+ rx_data = tpr->rx_jmb_buffers[desc_idx].data;
map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
mapping);
} else
@@ -11582,15 +11658,16 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
PCI_DMA_FROMDEVICE);
+ rx_data += TG3_RX_OFFSET(tp);
for (i = data_off; i < rx_len; i++, val++) {
- if (*(rx_skb->data + i) != (u8) (val & 0xff))
+ if (*(rx_data + i) != (u8) (val & 0xff))
goto out;
}
}
err = 0;
- /* tg3_free_rings will unmap and free the rx_skb */
+ /* tg3_free_rings will unmap and free the rx_data */
out:
return err;
}
@@ -11943,6 +12020,10 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_coalesce = tg3_get_coalesce,
.set_coalesce = tg3_set_coalesce,
.get_sset_count = tg3_get_sset_count,
+ .get_rxnfc = tg3_get_rxnfc,
+ .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
+ .get_rxfh_indir = tg3_get_rxfh_indir,
+ .set_rxfh_indir = tg3_set_rxfh_indir,
};
static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
@@ -12612,7 +12693,7 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
tg3_get_5906_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tg3_get_57780_nvram_info(tp);
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
@@ -13218,8 +13299,7 @@ static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
{
- u32 adv = ADVERTISED_Autoneg |
- ADVERTISED_Pause;
+ u32 adv = ADVERTISED_Autoneg;
if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
adv |= ADVERTISED_1000baseT_Half |
@@ -13322,7 +13402,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
!tg3_flag(tp, ENABLE_APE) &&
!tg3_flag(tp, ENABLE_ASF)) {
- u32 bmsr, mask;
+ u32 bmsr, dummy;
tg3_readphy(tp, MII_BMSR, &bmsr);
if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
@@ -13335,10 +13415,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
tg3_phy_set_wirespeed(tp);
- mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
- if (!tg3_copper_is_advertising_all(tp, mask)) {
+ if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
tp->link_config.flowctrl);
@@ -13460,6 +13537,17 @@ out_no_vpd:
strcpy(tp->board_part_number, "BCM57795");
else
goto nomatch;
+ } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
+ if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
+ strcpy(tp->board_part_number, "BCM57762");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
+ strcpy(tp->board_part_number, "BCM57766");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
+ strcpy(tp->board_part_number, "BCM57782");
+ else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
+ strcpy(tp->board_part_number, "BCM57786");
+ else
+ goto nomatch;
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
strcpy(tp->board_part_number, "BCM95906");
} else {
@@ -13798,7 +13886,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
- tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
pci_read_config_dword(tp->pdev,
TG3PCI_GEN15_PRODID_ASICREV,
&prod_id_asic_rev);
@@ -13945,7 +14037,10 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, 5717_PLUS);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
- tg3_flag(tp, 5717_PLUS))
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tg3_flag_set(tp, 57765_CLASS);
+
+ if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
tg3_flag_set(tp, 57765_PLUS);
/* Intentionally exclude ASIC_REV_5906 */
@@ -13997,9 +14092,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3) ||
- (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+ tp->fw_needed) {
+ /* For firmware TSO, assume ASF is disabled.
+ * We'll disable TSO later if we discover ASF
+ * is enabled in tg3_get_eeprom_hw_cfg().
+ */
tg3_flag_set(tp, TSO_CAPABLE);
- else {
+ } else {
tg3_flag_clear(tp, TSO_CAPABLE);
tg3_flag_clear(tp, TSO_BUG);
tp->fw_needed = NULL;
@@ -14027,6 +14126,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tg3_flag(tp, 57765_PLUS)) {
tg3_flag_set(tp, SUPPORT_MSIX);
tp->irq_max = TG3_IRQ_MAX_VECS;
+ tg3_rss_init_dflt_indir_tbl(tp);
}
}
@@ -14034,9 +14134,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, SHORT_DMA_BUG);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
- tg3_flag_set(tp, 4K_FIFO_LIMIT);
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
+ tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
- if (tg3_flag(tp, 5717_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
tg3_flag_set(tp, LRG_PROD_RING_CAP);
if (tg3_flag(tp, 57765_PLUS) &&
@@ -14056,12 +14160,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tg3_flag_set(tp, PCI_EXPRESS);
- tp->pcie_readrq = 4096;
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
- tp->pcie_readrq = 2048;
-
- pcie_set_readrq(tp->pdev, tp->pcie_readrq);
+ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ int readrq = pcie_get_readrq(tp->pdev);
+ if (readrq > 2048)
+ pcie_set_readrq(tp->pdev, 2048);
+ }
pci_read_config_word(tp->pdev,
pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
@@ -14273,6 +14376,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
*/
tg3_get_eeprom_hw_cfg(tp);
+ if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+ tg3_flag_clear(tp, TSO_CAPABLE);
+ tg3_flag_clear(tp, TSO_BUG);
+ tp->fw_needed = NULL;
+ }
+
if (tg3_flag(tp, ENABLE_APE)) {
/* Allow reads and writes to the
* APE register and memory space.
@@ -14311,7 +14420,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ tg3_flag(tp, 57765_CLASS))
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -14548,11 +14657,11 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
else
tg3_flag_clear(tp, POLL_SERDES);
- tp->rx_offset = NET_IP_ALIGN;
+ tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
tg3_flag(tp, PCIX_MODE)) {
- tp->rx_offset = 0;
+ tp->rx_offset = NET_SKB_PAD;
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
tp->rx_copy_thresh = ~(u16)0;
#endif
@@ -15286,6 +15395,21 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
}
}
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct tg3 *tp = netdev_priv(dev);
+
+ if (!tp->hw_stats)
+ return &tp->net_stats_prev;
+
+ spin_lock_bh(&tp->lock);
+ tg3_get_nstats(tp, stats);
+ spin_unlock_bh(&tp->lock);
+
+ return stats;
+}
+
static const struct net_device_ops tg3_netdev_ops = {
.ndo_open = tg3_open,
.ndo_stop = tg3_close,
@@ -15313,7 +15437,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
u32 sndmbx, rcvmbx, intmbx;
char str[40];
u64 dma_mask, persist_dma_mask;
- u32 features = 0;
+ netdev_features_t features = 0;
printk_once(KERN_INFO "%s\n", version);
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 94b4bd0..aea8f72 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -31,6 +31,8 @@
#define TG3_RX_RET_MAX_SIZE_5705 512
#define TG3_RX_RET_MAX_SIZE_5717 4096
+#define TG3_RSS_INDIR_TBL_SIZE 128
+
/* First 256 bytes are a mirror of PCI config space. */
#define TG3PCI_VENDOR 0x00000000
#define TG3PCI_VENDOR_BROADCOM 0x14e4
@@ -57,6 +59,10 @@
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
#define TG3PCI_DEVICE_TIGON3_5719 0x1657
#define TG3PCI_DEVICE_TIGON3_5720 0x165f
+#define TG3PCI_DEVICE_TIGON3_57762 0x1682
+#define TG3PCI_DEVICE_TIGON3_57766 0x1686
+#define TG3PCI_DEVICE_TIGON3_57786 0x16b3
+#define TG3PCI_DEVICE_TIGON3_57782 0x16b7
/* 0x04 --> 0x2c unused */
#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
@@ -168,6 +174,7 @@
#define ASIC_REV_57765 0x57785
#define ASIC_REV_5719 0x5719
#define ASIC_REV_5720 0x5720
+#define ASIC_REV_57766 0x57766
#define GET_CHIP_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 8)
#define CHIPREV_5700_AX 0x70
#define CHIPREV_5700_BX 0x71
@@ -1340,6 +1347,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_JMB_2K_MMRR 0x00800000
#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
@@ -2174,6 +2182,7 @@
#define MII_TG3_EXT_CTRL_TBI 0x8000
#define MII_TG3_EXT_STAT 0x11 /* Extended status register */
+#define MII_TG3_EXT_STAT_MDIX 0x2000
#define MII_TG3_EXT_STAT_LPASS 0x0100
#define MII_TG3_RXR_COUNTERS 0x14 /* Local/Remote Receiver Counts */
@@ -2277,6 +2286,9 @@
#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+#define MII_TG3_FET_GEN_STAT 0x1c
+#define MII_TG3_FET_GEN_STAT_MDIXSTAT 0x2000
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2662,9 +2674,13 @@ struct tg3_hw_stats {
/* 'mapping' is superfluous as the chip does not write into
* the tx/rx post rings so we could just fetch it from there.
* But the cache behavior is better how we are doing it now.
+ *
+ * This driver uses new build_skb() API :
+ * RX ring buffer contains pointer to kmalloc() data only,
+ * skb are built only after Hardware filled the frame.
*/
struct ring_info {
- struct sk_buff *skb;
+ u8 *data;
DEFINE_DMA_UNMAP_ADDR(mapping);
};
@@ -2690,6 +2706,7 @@ struct tg3_link_config {
#define DUPLEX_INVALID 0xff
#define AUTONEG_INVALID 0xff
u16 active_speed;
+ u32 rmt_adv;
/* When we go in and out of low power mode we need
* to swap with this state.
@@ -2865,6 +2882,8 @@ enum TG3_FLAGS {
TG3_FLAG_NVRAM_BUFFERED,
TG3_FLAG_SUPPORT_MSI,
TG3_FLAG_SUPPORT_MSIX,
+ TG3_FLAG_USING_MSI,
+ TG3_FLAG_USING_MSIX,
TG3_FLAG_PCIX_MODE,
TG3_FLAG_PCI_HIGH_SPEED,
TG3_FLAG_PCI_32BIT,
@@ -2880,7 +2899,6 @@ enum TG3_FLAGS {
TG3_FLAG_CHIP_RESETTING,
TG3_FLAG_INIT_COMPLETE,
TG3_FLAG_TSO_BUG,
- TG3_FLAG_IS_5788,
TG3_FLAG_MAX_RXPEND_64,
TG3_FLAG_TSO_CAPABLE,
TG3_FLAG_PCI_EXPRESS, /* BCM5785 + pci_is_pcie() */
@@ -2889,14 +2907,9 @@ enum TG3_FLAGS {
TG3_FLAG_IS_NIC,
TG3_FLAG_FLASH,
TG3_FLAG_HW_TSO_1,
- TG3_FLAG_5705_PLUS,
- TG3_FLAG_5750_PLUS,
+ TG3_FLAG_HW_TSO_2,
TG3_FLAG_HW_TSO_3,
- TG3_FLAG_USING_MSI,
- TG3_FLAG_USING_MSIX,
TG3_FLAG_ICH_WORKAROUND,
- TG3_FLAG_5780_CLASS,
- TG3_FLAG_HW_TSO_2,
TG3_FLAG_1SHOT_MSI,
TG3_FLAG_NO_FWARE_REPORTED,
TG3_FLAG_NO_NVRAM_ADDR_TRANS,
@@ -2910,18 +2923,23 @@ enum TG3_FLAGS {
TG3_FLAG_RGMII_EXT_IBND_RX_EN,
TG3_FLAG_RGMII_EXT_IBND_TX_EN,
TG3_FLAG_CLKREQ_BUG,
- TG3_FLAG_5755_PLUS,
TG3_FLAG_NO_NVRAM,
TG3_FLAG_ENABLE_RSS,
TG3_FLAG_ENABLE_TSS,
TG3_FLAG_SHORT_DMA_BUG,
TG3_FLAG_USE_JUMBO_BDFLAG,
TG3_FLAG_L1PLLPD_EN,
- TG3_FLAG_57765_PLUS,
TG3_FLAG_APE_HAS_NCSI,
- TG3_FLAG_5717_PLUS,
TG3_FLAG_4K_FIFO_LIMIT,
TG3_FLAG_RESET_TASK_PENDING,
+ TG3_FLAG_5705_PLUS,
+ TG3_FLAG_IS_5788,
+ TG3_FLAG_5750_PLUS,
+ TG3_FLAG_5780_CLASS,
+ TG3_FLAG_5755_PLUS,
+ TG3_FLAG_57765_PLUS,
+ TG3_FLAG_57765_CLASS,
+ TG3_FLAG_5717_PLUS,
/* Add new flags before this comment and TG3_FLAG_NUMBER_OF_FLAGS */
TG3_FLAG_NUMBER_OF_FLAGS, /* Last entry in enum TG3_FLAGS */
@@ -2985,6 +3003,7 @@ struct tg3 {
/* begin "tx thread" cacheline section */
void (*write32_tx_mbox) (struct tg3 *, u32,
u32);
+ u32 dma_limit;
/* begin "rx thread" cacheline section */
struct tg3_napi napi[TG3_IRQ_MAX_VECS];
@@ -3005,7 +3024,6 @@ struct tg3 {
unsigned long rx_dropped;
unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
- struct tg3_ethtool_stats estats;
struct tg3_ethtool_stats estats_prev;
DECLARE_BITMAP(tg3_flags, TG3_FLAG_NUMBER_OF_FLAGS);
@@ -3131,10 +3149,12 @@ struct tg3 {
#define TG3_PHYFLG_SERDES_PREEMPHASIS 0x00010000
#define TG3_PHYFLG_PARALLEL_DETECT 0x00020000
#define TG3_PHYFLG_EEE_CAP 0x00040000
+#define TG3_PHYFLG_MDIX_STATE 0x00200000
u32 led_ctrl;
u32 phy_otp;
u32 setlpicnt;
+ u8 rss_ind_tbl[TG3_RSS_INDIR_TBL_SIZE];
#define TG3_BPN_SIZE 24
char board_part_number[TG3_BPN_SIZE];
diff --git a/drivers/net/ethernet/brocade/bna/Makefile b/drivers/net/ethernet/brocade/bna/Makefile
index 74d3abc..6027302 100644
--- a/drivers/net/ethernet/brocade/bna/Makefile
+++ b/drivers/net/ethernet/brocade/bna/Makefile
@@ -5,7 +5,7 @@
obj-$(CONFIG_BNA) += bna.o
-bna-objs := bnad.o bnad_ethtool.o bna_enet.o bna_tx_rx.o
+bna-objs := bnad.o bnad_ethtool.o bnad_debugfs.o bna_enet.o bna_tx_rx.o
bna-objs += bfa_msgq.o bfa_ioc.o bfa_ioc_ct.o bfa_cee.o
bna-objs += cna_fwimg.o
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.c b/drivers/net/ethernet/brocade/bna/bfa_cee.c
index 8e62718..29f284f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.c
@@ -185,6 +185,41 @@ bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva, u64 dma_pa)
}
/**
+ * bfa_cee_get_attr()
+ *
+ * @brief Send the request to the f/w to fetch CEE attributes.
+ *
+ * @param[in] Pointer to the CEE module data structure.
+ *
+ * @return Status
+ */
+enum bfa_status
+bfa_nw_cee_get_attr(struct bfa_cee *cee, struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
+{
+ struct bfi_cee_get_req *cmd;
+
+ BUG_ON(!((cee != NULL) && (cee->ioc != NULL)));
+ if (!bfa_nw_ioc_is_operational(cee->ioc))
+ return BFA_STATUS_IOC_FAILURE;
+
+ if (cee->get_attr_pending == true)
+ return BFA_STATUS_DEVBUSY;
+
+ cee->get_attr_pending = true;
+ cmd = (struct bfi_cee_get_req *) cee->get_cfg_mb.msg;
+ cee->attr = attr;
+ cee->cbfn.get_attr_cbfn = cbfn;
+ cee->cbfn.get_attr_cbarg = cbarg;
+ bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
+ bfa_ioc_portid(cee->ioc));
+ bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
+ bfa_nw_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/**
* bfa_cee_isrs()
*
* @brief Handles Mail-box interrupts for CEE module.
diff --git a/drivers/net/ethernet/brocade/bna/bfa_cee.h b/drivers/net/ethernet/brocade/bna/bfa_cee.h
index 58d54e9..93fde63 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_cee.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_cee.h
@@ -59,5 +59,7 @@ u32 bfa_nw_cee_meminfo(void);
void bfa_nw_cee_mem_claim(struct bfa_cee *cee, u8 *dma_kva,
u64 dma_pa);
void bfa_nw_cee_attach(struct bfa_cee *cee, struct bfa_ioc *ioc, void *dev);
-
+enum bfa_status bfa_nw_cee_get_attr(struct bfa_cee *cee,
+ struct bfa_cee_attr *attr,
+ bfa_cee_get_attr_cbfn_t cbfn, void *cbarg);
#endif /* __BFA_CEE_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_defs.h b/drivers/net/ethernet/brocade/bna/bfa_defs.h
index 2f12d68..871c630 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_defs.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_defs.h
@@ -219,41 +219,39 @@ enum {
* All numerical fields are in big-endian format.
*/
struct bfa_mfg_block {
- u8 version; /*!< manufacturing block version */
- u8 mfg_sig[3]; /*!< characters 'M', 'F', 'G' */
- u16 mfgsize; /*!< mfg block size */
- u16 u16_chksum; /*!< old u16 checksum */
- char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
- char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
- u8 mfg_day; /*!< manufacturing day */
- u8 mfg_month; /*!< manufacturing month */
- u16 mfg_year; /*!< manufacturing year */
- u64 mfg_wwn; /*!< wwn base for this adapter */
- u8 num_wwn; /*!< number of wwns assigned */
- u8 mfg_speeds; /*!< speeds allowed for this adapter */
- u8 rsv[2];
- char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
- char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
- char
- supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
- char
- supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
- mac_t mfg_mac; /*!< mac address */
- u8 num_mac; /*!< number of mac addresses */
- u8 rsv2;
- u32 card_type; /*!< card type */
- char cap_nic; /*!< capability nic */
- char cap_cna; /*!< capability cna */
- char cap_hba; /*!< capability hba */
- char cap_fc16g; /*!< capability fc 16g */
- char cap_sriov; /*!< capability sriov */
- char cap_mezz; /*!< capability mezz */
- u8 rsv3;
- u8 mfg_nports; /*!< number of ports */
- char media[8]; /*!< xfi/xaui */
- char initial_mode[8];/*!< initial mode: hba/cna/nic */
- u8 rsv4[84];
- u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*!< md5 checksum */
+ u8 version; /* manufacturing block version */
+ u8 mfg_sig[3]; /* characters 'M', 'F', 'G' */
+ u16 mfgsize; /* mfg block size */
+ u16 u16_chksum; /* old u16 checksum */
+ char brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+ char brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+ u8 mfg_day; /* manufacturing day */
+ u8 mfg_month; /* manufacturing month */
+ u16 mfg_year; /* manufacturing year */
+ u64 mfg_wwn; /* wwn base for this adapter */
+ u8 num_wwn; /* number of wwns assigned */
+ u8 mfg_speeds; /* speeds allowed for this adapter */
+ u8 rsv[2];
+ char supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+ char supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+ char supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+ char supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+ mac_t mfg_mac; /* base mac address */
+ u8 num_mac; /* number of mac addresses */
+ u8 rsv2;
+ u32 card_type; /* card type */
+ char cap_nic; /* capability nic */
+ char cap_cna; /* capability cna */
+ char cap_hba; /* capability hba */
+ char cap_fc16g; /* capability fc 16g */
+ char cap_sriov; /* capability sriov */
+ char cap_mezz; /* capability mezz */
+ u8 rsv3;
+ u8 mfg_nports; /* number of ports */
+ char media[8]; /* xfi/xaui */
+ char initial_mode[8]; /* initial mode: hba/cna/nic */
+ u8 rsv4[84];
+ u8 md5_chksum[BFA_MFG_CHKSUM_SIZE]; /* md5 checksum */
};
#pragma pack()
@@ -293,4 +291,34 @@ enum bfa_mode {
BFA_MODE_NIC = 3
};
+/*
+ * Flash module specific
+ */
+#define BFA_FLASH_PART_ENTRY_SIZE 32 /* partition entry size */
+#define BFA_FLASH_PART_MAX 32 /* maximal # of partitions */
+#define BFA_TOTAL_FLASH_SIZE 0x400000
+#define BFA_FLASH_PART_MFG 7
+
+/*
+ * flash partition attributes
+ */
+struct bfa_flash_part_attr {
+ u32 part_type; /* partition type */
+ u32 part_instance; /* partition instance */
+ u32 part_off; /* partition offset */
+ u32 part_size; /* partition size */
+ u32 part_len; /* partition content length */
+ u32 part_status; /* partition status */
+ char rsv[BFA_FLASH_PART_ENTRY_SIZE - 24];
+};
+
+/*
+ * flash attributes
+ */
+struct bfa_flash_attr {
+ u32 status; /* flash overall status */
+ u32 npart; /* num of partitions */
+ struct bfa_flash_part_attr part[BFA_FLASH_PART_MAX];
+};
+
#endif /* __BFA_DEFS_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index b0307a0..abfad27 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -74,6 +74,7 @@ static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
+static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
@@ -997,6 +998,7 @@ bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
static void
bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
{
+ bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
bfa_ioc_hw_sem_get(iocpf->ioc);
}
@@ -1743,6 +1745,114 @@ bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
bfa_q_deq(&mod->cmd_q, &cmd);
}
+/**
+ * Read data from SMEM to host through PCI memmap
+ *
+ * @param[in] ioc memory for IOC
+ * @param[in] tbuf app memory to store data from smem
+ * @param[in] soff smem offset
+ * @param[in] sz size of smem in bytes
+ */
+static int
+bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
+{
+ u32 pgnum, loff, r32;
+ int i, len;
+ u32 *buf = tbuf;
+
+ pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
+ loff = PSS_SMEM_PGOFF(soff);
+
+ /*
+ * Hold semaphore to serialize pll init and fwtrc.
+ */
+ if (bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg) == 0)
+ return 1;
+
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+
+ len = sz/sizeof(u32);
+ for (i = 0; i < len; i++) {
+ r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
+ buf[i] = be32_to_cpu(r32);
+ loff += sizeof(u32);
+
+ /**
+ * handle page offset wrap around
+ */
+ loff = PSS_SMEM_PGOFF(loff);
+ if (loff == 0) {
+ pgnum++;
+ writel(pgnum, ioc->ioc_regs.host_page_num_fn);
+ }
+ }
+
+ writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
+ ioc->ioc_regs.host_page_num_fn);
+
+ /*
+ * release semaphore
+ */
+ readl(ioc->ioc_regs.ioc_init_sem_reg);
+ writel(1, ioc->ioc_regs.ioc_init_sem_reg);
+ return 0;
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
+ int tlen, status = 0;
+
+ tlen = *trclen;
+ if (tlen > BNA_DBG_FWTRC_LEN)
+ tlen = BNA_DBG_FWTRC_LEN;
+
+ status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
+ *trclen = tlen;
+ return status;
+}
+
+/**
+ * Save firmware trace if configured.
+ */
+static void
+bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_once) {
+ ioc->dbg_fwsave_once = 0;
+ if (ioc->dbg_fwsave_len) {
+ tlen = ioc->dbg_fwsave_len;
+ bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
+ }
+ }
+}
+
+/**
+ * Retrieve saved firmware trace from a prior IOC failure.
+ */
+int
+bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
+{
+ int tlen;
+
+ if (ioc->dbg_fwsave_len == 0)
+ return BFA_STATUS_ENOFSAVE;
+
+ tlen = *trclen;
+ if (tlen > ioc->dbg_fwsave_len)
+ tlen = ioc->dbg_fwsave_len;
+
+ memcpy(trcdata, ioc->dbg_fwsave, tlen);
+ *trclen = tlen;
+ return BFA_STATUS_OK;
+}
+
static void
bfa_ioc_fail_notify(struct bfa_ioc *ioc)
{
@@ -1751,6 +1861,7 @@ bfa_ioc_fail_notify(struct bfa_ioc *ioc)
*/
ioc->cbfn->hbfail_cbfn(ioc->bfa);
bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
+ bfa_nw_ioc_debug_save_ftrc(ioc);
}
/**
@@ -2058,6 +2169,16 @@ bfa_nw_ioc_disable(struct bfa_ioc *ioc)
bfa_fsm_send_event(ioc, IOC_E_DISABLE);
}
+/**
+ * Initialize memory for saving firmware trace.
+ */
+void
+bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
+{
+ ioc->dbg_fwsave = dbg_fwsave;
+ ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
+}
+
static u32
bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
{
@@ -2172,6 +2293,15 @@ bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
}
/**
+ * return true if IOC is operational
+ */
+bool
+bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
+{
+ return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
+}
+
+/**
* Add to IOC heartbeat failure notification queue. To be used by common
* modules such as cee, port, diag.
*/
@@ -2471,3 +2601,366 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
msecs_to_jiffies(BFA_IOC_POLL_TOV));
}
}
+
+/*
+ * Flash module specific
+ */
+
+/*
+ * FLASH DMA buffer should be big enough to hold both MFG block and
+ * asic block(64k) at the same time and also should be 2k aligned to
+ * avoid write segement to cross sector boundary.
+ */
+#define BFA_FLASH_SEG_SZ 2048
+#define BFA_FLASH_DMA_BUF_SZ \
+ roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
+
+static void
+bfa_flash_cb(struct bfa_flash *flash)
+{
+ flash->op_busy = 0;
+ if (flash->cbfn)
+ flash->cbfn(flash->cbarg, flash->status);
+}
+
+static void
+bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
+{
+ struct bfa_flash *flash = cbarg;
+
+ switch (event) {
+ case BFA_IOC_E_DISABLED:
+ case BFA_IOC_E_FAILED:
+ if (flash->op_busy) {
+ flash->status = BFA_STATUS_IOC_FAILURE;
+ flash->cbfn(flash->cbarg, flash->status);
+ flash->op_busy = 0;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * Send flash write request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_write_send(struct bfa_flash *flash)
+{
+ struct bfi_flash_write_req *msg =
+ (struct bfi_flash_write_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+
+ /* indicate if it's the last msg of the whole write operation */
+ msg->last = (len == flash->residue) ? 1 : 0;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ flash->residue -= len;
+ flash->offset += len;
+}
+
+/*
+ * Send flash read request.
+ *
+ * @param[in] cbarg - callback argument
+ */
+static void
+bfa_flash_read_send(void *cbarg)
+{
+ struct bfa_flash *flash = cbarg;
+ struct bfi_flash_read_req *msg =
+ (struct bfi_flash_read_req *) flash->mb.msg;
+ u32 len;
+
+ msg->type = be32_to_cpu(flash->type);
+ msg->instance = flash->instance;
+ msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
+ len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
+ flash->residue : BFA_FLASH_DMA_BUF_SZ;
+ msg->length = be32_to_cpu(len);
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+}
+
+/*
+ * Process flash response messages upon receiving interrupts.
+ *
+ * @param[in] flasharg - flash structure
+ * @param[in] msg - message structure
+ */
+static void
+bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
+{
+ struct bfa_flash *flash = flasharg;
+ u32 status;
+
+ union {
+ struct bfi_flash_query_rsp *query;
+ struct bfi_flash_write_rsp *write;
+ struct bfi_flash_read_rsp *read;
+ struct bfi_mbmsg *msg;
+ } m;
+
+ m.msg = msg;
+
+ /* receiving response after ioc failure */
+ if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
+ return;
+
+ switch (msg->mh.msg_id) {
+ case BFI_FLASH_I2H_QUERY_RSP:
+ status = be32_to_cpu(m.query->status);
+ if (status == BFA_STATUS_OK) {
+ u32 i;
+ struct bfa_flash_attr *attr, *f;
+
+ attr = (struct bfa_flash_attr *) flash->ubuf;
+ f = (struct bfa_flash_attr *) flash->dbuf_kva;
+ attr->status = be32_to_cpu(f->status);
+ attr->npart = be32_to_cpu(f->npart);
+ for (i = 0; i < attr->npart; i++) {
+ attr->part[i].part_type =
+ be32_to_cpu(f->part[i].part_type);
+ attr->part[i].part_instance =
+ be32_to_cpu(f->part[i].part_instance);
+ attr->part[i].part_off =
+ be32_to_cpu(f->part[i].part_off);
+ attr->part[i].part_size =
+ be32_to_cpu(f->part[i].part_size);
+ attr->part[i].part_len =
+ be32_to_cpu(f->part[i].part_len);
+ attr->part[i].part_status =
+ be32_to_cpu(f->part[i].part_status);
+ }
+ }
+ flash->status = status;
+ bfa_flash_cb(flash);
+ break;
+ case BFI_FLASH_I2H_WRITE_RSP:
+ status = be32_to_cpu(m.write->status);
+ if (status != BFA_STATUS_OK || flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_write_send(flash);
+ break;
+ case BFI_FLASH_I2H_READ_RSP:
+ status = be32_to_cpu(m.read->status);
+ if (status != BFA_STATUS_OK) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else {
+ u32 len = be32_to_cpu(m.read->length);
+ memcpy(flash->ubuf + flash->offset,
+ flash->dbuf_kva, len);
+ flash->residue -= len;
+ flash->offset += len;
+ if (flash->residue == 0) {
+ flash->status = status;
+ bfa_flash_cb(flash);
+ } else
+ bfa_flash_read_send(flash);
+ }
+ break;
+ case BFI_FLASH_I2H_BOOT_VER_RSP:
+ case BFI_FLASH_I2H_EVENT:
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+/*
+ * Flash memory info API.
+ */
+u32
+bfa_nw_flash_meminfo(void)
+{
+ return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Flash attach API.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] ioc - ioc structure
+ * @param[in] dev - device structure
+ */
+void
+bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
+{
+ flash->ioc = ioc;
+ flash->cbfn = NULL;
+ flash->cbarg = NULL;
+ flash->op_busy = 0;
+
+ bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
+ bfa_q_qe_init(&flash->ioc_notify);
+ bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
+ list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
+}
+
+/*
+ * Claim memory for flash
+ *
+ * @param[in] flash - flash structure
+ * @param[in] dm_kva - pointer to virtual memory address
+ * @param[in] dm_pa - physical memory address
+ */
+void
+bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
+{
+ flash->dbuf_kva = dm_kva;
+ flash->dbuf_pa = dm_pa;
+ memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
+ dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+ dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
+}
+
+/*
+ * Get flash attribute.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] attr - flash attribute structure
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ struct bfi_flash_query_req *msg =
+ (struct bfi_flash_query_req *) flash->mb.msg;
+
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->ubuf = (u8 *) attr;
+
+ bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
+ bfa_ioc_portid(flash->ioc));
+ bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
+ bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Update flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - update data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (type == BFA_FLASH_PART_MFG)
+ return BFA_STATUS_EINVAL;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_write_send(flash);
+
+ return BFA_STATUS_OK;
+}
+
+/*
+ * Read flash partition.
+ *
+ * @param[in] flash - flash structure
+ * @param[in] type - flash partition type
+ * @param[in] instance - flash partition instance
+ * @param[in] buf - read data buffer
+ * @param[in] len - data buffer length
+ * @param[in] offset - offset relative to the partition starting address
+ * @param[in] cbfn - callback function
+ * @param[in] cbarg - callback argument
+ *
+ * Return status.
+ */
+enum bfa_status
+bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
+ void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg)
+{
+ if (!bfa_nw_ioc_is_operational(flash->ioc))
+ return BFA_STATUS_IOC_NON_OP;
+
+ /*
+ * 'len' must be in word (4-byte) boundary
+ */
+ if (!len || (len & 0x03))
+ return BFA_STATUS_FLASH_BAD_LEN;
+
+ if (flash->op_busy)
+ return BFA_STATUS_DEVBUSY;
+
+ flash->op_busy = 1;
+ flash->cbfn = cbfn;
+ flash->cbarg = cbarg;
+ flash->type = type;
+ flash->instance = instance;
+ flash->residue = len;
+ flash->offset = 0;
+ flash->addr_off = offset;
+ flash->ubuf = buf;
+
+ bfa_flash_read_send(flash);
+
+ return BFA_STATUS_OK;
+}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.h b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
index ca158d1..3b4460f 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.h
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.h
@@ -27,6 +27,8 @@
#define BFA_IOC_HWSEM_TOV 500 /* msecs */
#define BFA_IOC_HB_TOV 500 /* msecs */
#define BFA_IOC_POLL_TOV 200 /* msecs */
+#define BNA_DBG_FWTRC_LEN (BFI_IOC_TRC_ENTS * BFI_IOC_TRC_ENT_SZ + \
+ BFI_IOC_TRC_HDR_SZ)
/**
* PCI device information required by IOC
@@ -68,6 +70,16 @@ __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
dma_addr->a32.addr_hi = (u32) htonl(upper_32_bits(pa));
}
+#define bfa_alen_set(__alen, __len, __pa) \
+ __bfa_alen_set(__alen, __len, (u64)__pa)
+
+static inline void
+__bfa_alen_set(struct bfi_alen *alen, u32 len, u64 pa)
+{
+ alen->al_len = cpu_to_be32(len);
+ bfa_dma_be_addr_set(alen->al_addr, pa);
+}
+
struct bfa_ioc_regs {
void __iomem *hfn_mbox_cmd;
void __iomem *hfn_mbox;
@@ -296,6 +308,7 @@ void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
bool bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc);
+bool bfa_nw_ioc_is_operational(struct bfa_ioc *ioc);
void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
void bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
struct bfa_ioc_notify *notify);
@@ -307,6 +320,9 @@ void bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc,
bool bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc,
struct bfi_ioc_image_hdr *fwhdr);
mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
+void bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave);
+int bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen);
+int bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen);
/*
* Timeout APIs
@@ -322,4 +338,42 @@ void bfa_nw_iocpf_sem_timeout(void *ioc);
u32 *bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off);
u32 bfa_cb_image_get_size(enum bfi_asic_gen asic_gen);
+/*
+ * Flash module specific
+ */
+typedef void (*bfa_cb_flash) (void *cbarg, enum bfa_status status);
+
+struct bfa_flash {
+ struct bfa_ioc *ioc; /* back pointer to ioc */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 op_busy; /* operation busy flag */
+ u32 residue; /* residual length */
+ u32 offset; /* offset */
+ enum bfa_status status; /* status */
+ u8 *dbuf_kva; /* dma buf virtual address */
+ u64 dbuf_pa; /* dma buf physical address */
+ bfa_cb_flash cbfn; /* user callback function */
+ void *cbarg; /* user callback arg */
+ u8 *ubuf; /* user supplied buffer */
+ u32 addr_off; /* partition address offset */
+ struct bfa_mbox_cmd mb; /* mailbox */
+ struct bfa_ioc_notify ioc_notify; /* ioc event notify */
+};
+
+enum bfa_status bfa_nw_flash_get_attr(struct bfa_flash *flash,
+ struct bfa_flash_attr *attr,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_update_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+enum bfa_status bfa_nw_flash_read_part(struct bfa_flash *flash,
+ u32 type, u8 instance, void *buf, u32 len, u32 offset,
+ bfa_cb_flash cbfn, void *cbarg);
+u32 bfa_nw_flash_meminfo(void);
+void bfa_nw_flash_attach(struct bfa_flash *flash,
+ struct bfa_ioc *ioc, void *dev);
+void bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa);
+
#endif /* __BFA_IOC_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bfi.h b/drivers/net/ethernet/brocade/bna/bfi.h
index 7a1393a..0d9df69 100644
--- a/drivers/net/ethernet/brocade/bna/bfi.h
+++ b/drivers/net/ethernet/brocade/bna/bfi.h
@@ -83,6 +83,14 @@ union bfi_addr_u {
} a32;
};
+/**
+ * Generic DMA addr-len pair.
+ */
+struct bfi_alen {
+ union bfi_addr_u al_addr; /* DMA addr of buffer */
+ u32 al_len; /* length of buffer */
+};
+
/*
* Large Message structure - 128 Bytes size Msgs
*/
@@ -249,6 +257,8 @@ struct bfi_ioc_getattr_reply {
*/
#define BFI_IOC_TRC_OFF (0x4b00)
#define BFI_IOC_TRC_ENTS 256
+#define BFI_IOC_TRC_ENT_SZ 16
+#define BFI_IOC_TRC_HDR_SZ 32
#define BFI_IOC_FW_SIGNATURE (0xbfadbfad)
#define BFI_IOC_MD5SUM_SZ 4
@@ -476,6 +486,93 @@ struct bfi_msgq_i2h_cmdq_copy_req {
u16 len;
};
+/*
+ * FLASH module specific
+ */
+enum bfi_flash_h2i_msgs {
+ BFI_FLASH_H2I_QUERY_REQ = 1,
+ BFI_FLASH_H2I_ERASE_REQ = 2,
+ BFI_FLASH_H2I_WRITE_REQ = 3,
+ BFI_FLASH_H2I_READ_REQ = 4,
+ BFI_FLASH_H2I_BOOT_VER_REQ = 5,
+};
+
+enum bfi_flash_i2h_msgs {
+ BFI_FLASH_I2H_QUERY_RSP = BFA_I2HM(1),
+ BFI_FLASH_I2H_ERASE_RSP = BFA_I2HM(2),
+ BFI_FLASH_I2H_WRITE_RSP = BFA_I2HM(3),
+ BFI_FLASH_I2H_READ_RSP = BFA_I2HM(4),
+ BFI_FLASH_I2H_BOOT_VER_RSP = BFA_I2HM(5),
+ BFI_FLASH_I2H_EVENT = BFA_I2HM(127),
+};
+
+/*
+ * Flash query request
+ */
+struct bfi_flash_query_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash write request
+ */
+struct bfi_flash_write_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ struct bfi_alen alen;
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 last;
+ u8 rsv[2];
+ u32 offset;
+ u32 length;
+};
+
+/*
+ * Flash read request
+ */
+struct bfi_flash_read_req {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 offset;
+ u32 length;
+ struct bfi_alen alen;
+};
+
+/*
+ * Flash query response
+ */
+struct bfi_flash_query_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 status;
+};
+
+/*
+ * Flash read response
+ */
+struct bfi_flash_read_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
+/*
+ * Flash write response
+ */
+struct bfi_flash_write_rsp {
+ struct bfi_mhdr mh; /* Common msg header */
+ u32 type; /* partition type */
+ u8 instance; /* partition instance */
+ u8 rsv[3];
+ u32 status;
+ u32 length;
+};
+
#pragma pack()
#endif /* __BFI_H__ */
diff --git a/drivers/net/ethernet/brocade/bna/bna_enet.c b/drivers/net/ethernet/brocade/bna/bna_enet.c
index 26f5c5a..9ccc586 100644
--- a/drivers/net/ethernet/brocade/bna/bna_enet.c
+++ b/drivers/net/ethernet/brocade/bna/bna_enet.c
@@ -1727,6 +1727,7 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
+ bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
/**
* Attach common modules (Diag, SFP, CEE, Port) and claim respective
@@ -1740,6 +1741,11 @@ bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
kva += bfa_nw_cee_meminfo();
dma += bfa_nw_cee_meminfo();
+ bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
+ bfa_nw_flash_memclaim(&bna->flash, kva, dma);
+ kva += bfa_nw_flash_meminfo();
+ dma += bfa_nw_flash_meminfo();
+
bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
bfa_msgq_memclaim(&bna->msgq, kva, dma);
bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
@@ -1892,7 +1898,8 @@ bna_res_req(struct bna_res_info *res_info)
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
(bfa_nw_cee_meminfo() +
- bfa_msgq_meminfo()), PAGE_SIZE);
+ bfa_nw_flash_meminfo() +
+ bfa_msgq_meminfo()), PAGE_SIZE);
/* DMA memory for retrieving IOC attributes */
res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
@@ -1904,8 +1911,8 @@ bna_res_req(struct bna_res_info *res_info)
/* Virtual memory for retreiving fw_trc */
res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
- res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
+ res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
/* DMA memory for retreiving stats */
res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h
index d090fbf..e8d3ab7 100644
--- a/drivers/net/ethernet/brocade/bna/bna_types.h
+++ b/drivers/net/ethernet/brocade/bna/bna_types.h
@@ -427,7 +427,7 @@ struct bna_ethport {
/* Doorbell structure */
struct bna_ib_dbell {
- void *__iomem doorbell_addr;
+ void __iomem *doorbell_addr;
u32 doorbell_ack;
};
@@ -463,7 +463,7 @@ struct bna_tcb {
u32 consumer_index;
volatile u32 *hw_consumer_index;
u32 q_depth;
- void *__iomem q_dbell;
+ void __iomem *q_dbell;
struct bna_ib_dbell *i_dbell;
int page_idx;
int page_count;
@@ -599,7 +599,7 @@ struct bna_rcb {
u32 producer_index;
u32 consumer_index;
u32 q_depth;
- void *__iomem q_dbell;
+ void __iomem *q_dbell;
int page_idx;
int page_count;
/* Control path */
@@ -966,6 +966,7 @@ struct bna {
struct bna_ioceth ioceth;
struct bfa_cee cee;
+ struct bfa_flash flash;
struct bfa_msgq msgq;
struct bna_ethport ethport;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 7f3091e..be7d91e 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -44,11 +44,18 @@ static uint bnad_ioc_auto_recover = 1;
module_param(bnad_ioc_auto_recover, uint, 0444);
MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
+static uint bna_debugfs_enable = 1;
+module_param(bna_debugfs_enable, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
+ " Range[false:0|true:1]");
+
/*
* Global variables
*/
u32 bnad_rxqs_per_cq = 2;
-
+static u32 bna_id;
+static struct mutex bnad_list_mutex;
+static LIST_HEAD(bnad_list);
static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*
@@ -75,6 +82,23 @@ do { \
#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
+static void
+bnad_add_to_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_add_tail(&bnad->list_entry, &bnad_list);
+ bnad->id = bna_id++;
+ mutex_unlock(&bnad_list_mutex);
+}
+
+static void
+bnad_remove_from_list(struct bnad *bnad)
+{
+ mutex_lock(&bnad_list_mutex);
+ list_del(&bnad->list_entry);
+ mutex_unlock(&bnad_list_mutex);
+}
+
/*
* Reinitialize completions in CQ, once Rx is taken down
*/
@@ -723,7 +747,7 @@ void
bnad_cb_ethport_link_status(struct bnad *bnad,
enum bna_link_status link_status)
{
- bool link_up = 0;
+ bool link_up = false;
link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
@@ -1084,6 +1108,16 @@ bnad_cb_enet_mtu_set(struct bnad *bnad)
complete(&bnad->bnad_completions.mtu_comp);
}
+void
+bnad_cb_completion(void *arg, enum bfa_status status)
+{
+ struct bnad_iocmd_comp *iocmd_comp =
+ (struct bnad_iocmd_comp *)arg;
+
+ iocmd_comp->comp_status = (u32) status;
+ complete(&iocmd_comp->comp);
+}
+
/* Resource allocation, free functions */
static void
@@ -2968,7 +3002,7 @@ bnad_change_mtu(struct net_device *netdev, int new_mtu)
return err;
}
-static void
+static int
bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2976,7 +3010,7 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -2986,9 +3020,11 @@ bnad_vlan_rx_add_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
-static void
+static int
bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned short vid)
{
@@ -2996,7 +3032,7 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
unsigned long flags;
if (!bnad->rx_info[0].rx)
- return;
+ return 0;
mutex_lock(&bnad->conf_mutex);
@@ -3006,6 +3042,8 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev,
spin_unlock_irqrestore(&bnad->bna_lock, flags);
mutex_unlock(&bnad->conf_mutex);
+
+ return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -3163,12 +3201,14 @@ bnad_lock_init(struct bnad *bnad)
{
spin_lock_init(&bnad->bna_lock);
mutex_init(&bnad->conf_mutex);
+ mutex_init(&bnad_list_mutex);
}
static void
bnad_lock_uninit(struct bnad *bnad)
{
mutex_destroy(&bnad->conf_mutex);
+ mutex_destroy(&bnad_list_mutex);
}
/* PCI Initialization */
@@ -3186,7 +3226,7 @@ bnad_pci_init(struct bnad *bnad,
goto disable_device;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- *using_dac = 1;
+ *using_dac = true;
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
@@ -3195,7 +3235,7 @@ bnad_pci_init(struct bnad *bnad,
if (err)
goto release_regions;
}
- *using_dac = 0;
+ *using_dac = false;
}
pci_set_master(pdev);
return 0;
@@ -3249,8 +3289,8 @@ bnad_pci_probe(struct pci_dev *pdev,
return err;
}
bnad = netdev_priv(netdev);
-
bnad_lock_init(bnad);
+ bnad_add_to_list(bnad);
mutex_lock(&bnad->conf_mutex);
/*
@@ -3277,6 +3317,10 @@ bnad_pci_probe(struct pci_dev *pdev,
/* Set link to down state */
netif_carrier_off(netdev);
+ /* Setup the debugfs node for this bfad */
+ if (bna_debugfs_enable)
+ bnad_debugfs_init(bnad);
+
/* Get resource requirement form bna */
spin_lock_irqsave(&bnad->bna_lock, flags);
bna_res_req(&bnad->res_info[0]);
@@ -3398,11 +3442,15 @@ disable_ioceth:
res_free:
bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
drv_uninit:
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
pci_uninit:
bnad_pci_uninit(pdev);
unlock_mutex:
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
free_netdev(netdev);
return err;
@@ -3441,7 +3489,11 @@ bnad_pci_remove(struct pci_dev *pdev)
bnad_disable_msix(bnad);
bnad_pci_uninit(pdev);
mutex_unlock(&bnad->conf_mutex);
+ bnad_remove_from_list(bnad);
bnad_lock_uninit(bnad);
+ /* Remove the debugfs node for this bnad */
+ kfree(bnad->regdata);
+ bnad_debugfs_uninit(bnad);
bnad_uninit(bnad);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.h b/drivers/net/ethernet/brocade/bna/bnad.h
index 5487ca4..55824d9 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.h
+++ b/drivers/net/ethernet/brocade/bna/bnad.h
@@ -124,6 +124,12 @@ enum bnad_link_state {
BNAD_LS_UP = 1
};
+struct bnad_iocmd_comp {
+ struct bnad *bnad;
+ struct completion comp;
+ int comp_status;
+};
+
struct bnad_completion {
struct completion ioc_comp;
struct completion ucast_comp;
@@ -251,6 +257,8 @@ struct bnad_unmap_q {
struct bnad {
struct net_device *netdev;
+ u32 id;
+ struct list_head list_entry;
/* Data path */
struct bnad_tx_info tx_info[BNAD_MAX_TX];
@@ -320,12 +328,26 @@ struct bnad {
char adapter_name[BNAD_NAME_LEN];
char port_name[BNAD_NAME_LEN];
char mbox_irq_name[BNAD_NAME_LEN];
+
+ /* debugfs specific data */
+ char *regdata;
+ u32 reglen;
+ struct dentry *bnad_dentry_files[5];
+ struct dentry *port_debugfs_root;
+};
+
+struct bnad_drvinfo {
+ struct bfa_ioc_attr ioc_attr;
+ struct bfa_cee_attr cee_attr;
+ struct bfa_flash_attr flash_attr;
+ u32 cee_status;
+ u32 flash_status;
};
/*
* EXTERN VARIABLES
*/
-extern struct firmware *bfi_fw;
+extern const struct firmware *bfi_fw;
extern u32 bnad_rxqs_per_cq;
/*
@@ -340,6 +362,7 @@ extern int bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr);
extern int bnad_enable_default_bcast(struct bnad *bnad);
extern void bnad_restore_vlans(struct bnad *bnad, u32 rx_id);
extern void bnad_set_ethtool_ops(struct net_device *netdev);
+extern void bnad_cb_completion(void *arg, enum bfa_status status);
/* Configuration & setup */
extern void bnad_tx_coalescing_timeo_set(struct bnad *bnad);
@@ -359,6 +382,10 @@ extern void bnad_netdev_qstats_fill(struct bnad *bnad,
extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
struct rtnl_link_stats64 *stats);
+/* Debugfs */
+void bnad_debugfs_init(struct bnad *bnad);
+void bnad_debugfs_uninit(struct bnad *bnad);
+
/**
* MACROS
*/
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
new file mode 100644
index 0000000..592ad39
--- /dev/null
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -0,0 +1,623 @@
+/*
+ * Linux network driver for Brocade Converged Network Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+/*
+ * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include "bnad.h"
+
+/*
+ * BNA debufs interface
+ *
+ * To access the interface, debugfs file system should be mounted
+ * if not already mounted using:
+ * mount -t debugfs none /sys/kernel/debug
+ *
+ * BNA Hierarchy:
+ * - bna/pci_dev:<pci_name>
+ * where the pci_name corresponds to the one under /sys/bus/pci/drivers/bna
+ *
+ * Debugging service available per pci_dev:
+ * fwtrc: To collect current firmware trace.
+ * fwsave: To collect last saved fw trace as a result of firmware crash.
+ * regwr: To write one word to chip register
+ * regrd: To read one or more words from chip register.
+ */
+
+struct bnad_debug_info {
+ char *debug_buffer;
+ void *i_private;
+ int buffer_len;
+};
+
+static int
+bnad_debugfs_open_fwtrc(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwtrc buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwtrc(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bnad %s: Failed to collect fwtrc\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_fwsave(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *fw_debug;
+ unsigned long flags;
+ int rc;
+
+ fw_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!fw_debug)
+ return -ENOMEM;
+
+ fw_debug->buffer_len = BNA_DBG_FWTRC_LEN;
+
+ fw_debug->debug_buffer = kzalloc(fw_debug->buffer_len, GFP_KERNEL);
+ if (!fw_debug->debug_buffer) {
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to allocate fwsave buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ rc = bfa_nw_ioc_debug_fwsave(&bnad->bna.ioceth.ioc,
+ fw_debug->debug_buffer,
+ &fw_debug->buffer_len);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ if (rc != BFA_STATUS_OK && rc != BFA_STATUS_ENOFSAVE) {
+ kfree(fw_debug->debug_buffer);
+ fw_debug->debug_buffer = NULL;
+ kfree(fw_debug);
+ fw_debug = NULL;
+ pr_warn("bna %s: Failed to collect fwsave\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = fw_debug;
+
+ return 0;
+}
+
+static int
+bnad_debugfs_open_reg(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *reg_debug;
+
+ reg_debug = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!reg_debug)
+ return -ENOMEM;
+
+ reg_debug->i_private = inode->i_private;
+
+ file->private_data = reg_debug;
+
+ return 0;
+}
+
+static int
+bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
+{
+ struct bnad_drvinfo *drvinfo = (struct bnad_drvinfo *) buffer;
+ struct bnad_iocmd_comp fcomp;
+ unsigned long flags = 0;
+ int ret = BFA_STATUS_FAILED;
+
+ /* Get IOC info */
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, &drvinfo->ioc_attr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ /* Retrieve CEE related info */
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_cee_get_attr(&bnad->bna.cee, &drvinfo->cee_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->cee_status = fcomp.comp_status;
+
+ /* Retrieve flash partition info */
+ fcomp.comp_status = 0;
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto out;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ drvinfo->flash_status = fcomp.comp_status;
+out:
+ return ret;
+}
+
+static int
+bnad_debugfs_open_drvinfo(struct inode *inode, struct file *file)
+{
+ struct bnad *bnad = inode->i_private;
+ struct bnad_debug_info *drv_info;
+ int rc;
+
+ drv_info = kzalloc(sizeof(struct bnad_debug_info), GFP_KERNEL);
+ if (!drv_info)
+ return -ENOMEM;
+
+ drv_info->buffer_len = sizeof(struct bnad_drvinfo);
+
+ drv_info->debug_buffer = kzalloc(drv_info->buffer_len, GFP_KERNEL);
+ if (!drv_info->debug_buffer) {
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to allocate drv info buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ mutex_lock(&bnad->conf_mutex);
+ rc = bnad_get_debug_drvinfo(bnad, drv_info->debug_buffer,
+ drv_info->buffer_len);
+ mutex_unlock(&bnad->conf_mutex);
+ if (rc != BFA_STATUS_OK) {
+ kfree(drv_info->debug_buffer);
+ drv_info->debug_buffer = NULL;
+ kfree(drv_info);
+ drv_info = NULL;
+ pr_warn("bna %s: Failed to collect drvinfo\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ file->private_data = drv_info;
+
+ return 0;
+}
+
+/* Changes the current file position */
+static loff_t
+bnad_debugfs_lseek(struct file *file, loff_t offset, int orig)
+{
+ loff_t pos = file->f_pos;
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return -EINVAL;
+
+ switch (orig) {
+ case 0:
+ file->f_pos = offset;
+ break;
+ case 1:
+ file->f_pos += offset;
+ break;
+ case 2:
+ file->f_pos = debug->buffer_len - offset;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (file->f_pos < 0 || file->f_pos > debug->buffer_len) {
+ file->f_pos = pos;
+ return -EINVAL;
+ }
+
+ return file->f_pos;
+}
+
+static ssize_t
+bnad_debugfs_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug || !debug->debug_buffer)
+ return 0;
+
+ return simple_read_from_buffer(buf, nbytes, pos,
+ debug->debug_buffer, debug->buffer_len);
+}
+
+#define BFA_REG_CT_ADDRSZ (0x40000)
+#define BFA_REG_CB_ADDRSZ (0x20000)
+#define BFA_REG_ADDRSZ(__ioc) \
+ ((u32)(bfa_asic_id_ctc(bfa_ioc_devid(__ioc)) ? \
+ BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ))
+#define BFA_REG_ADDRMSK(__ioc) (BFA_REG_ADDRSZ(__ioc) - 1)
+
+/*
+ * Function to check if the register offset passed is valid.
+ */
+static int
+bna_reg_offset_check(struct bfa_ioc *ioc, u32 offset, u32 len)
+{
+ u8 area;
+
+ /* check [16:15] */
+ area = (offset >> 15) & 0x7;
+ if (area == 0) {
+ /* PCIe core register */
+ if ((offset + (len<<2)) > 0x8000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else if (area == 0x1) {
+ /* CB 32 KB memory page */
+ if ((offset + (len<<2)) > 0x10000) /* 8k dwords or 32KB */
+ return BFA_STATUS_EINVAL;
+ } else {
+ /* CB register space 64KB */
+ if ((offset + (len<<2)) > BFA_REG_ADDRMSK(ioc))
+ return BFA_STATUS_EINVAL;
+ }
+ return BFA_STATUS_OK;
+}
+
+static ssize_t
+bnad_debugfs_read_regrd(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *pos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ ssize_t rc;
+
+ if (!bnad->regdata)
+ return 0;
+
+ rc = simple_read_from_buffer(buf, nbytes, pos,
+ bnad->regdata, bnad->reglen);
+
+ if ((*pos + nbytes) >= bnad->reglen) {
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ }
+
+ return rc;
+}
+
+static ssize_t
+bnad_debugfs_write_regrd(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *regrd_debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)regrd_debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, len, rc, i;
+ u32 *regbuf;
+ void __iomem *rb, *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &len);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+
+ kfree(kern_buf);
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+
+ bnad->regdata = kzalloc(len << 2, GFP_KERNEL);
+ if (!bnad->regdata) {
+ pr_warn("bna %s: Failed to allocate regrd buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ bnad->reglen = len << 2;
+ rb = bfa_ioc_bar0(ioc);
+ addr &= BFA_REG_ADDRMSK(ioc);
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, len);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ kfree(bnad->regdata);
+ bnad->regdata = NULL;
+ bnad->reglen = 0;
+ return -EINVAL;
+ }
+
+ reg_addr = rb + addr;
+ regbuf = (u32 *)bnad->regdata;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ for (i = 0; i < len; i++) {
+ *regbuf = readl(reg_addr);
+ regbuf++;
+ reg_addr += sizeof(u32);
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static ssize_t
+bnad_debugfs_write_regwr(struct file *file, const char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ struct bnad_debug_info *debug = file->private_data;
+ struct bnad *bnad = (struct bnad *)debug->i_private;
+ struct bfa_ioc *ioc = &bnad->bna.ioceth.ioc;
+ int addr, val, rc;
+ void __iomem *reg_addr;
+ unsigned long flags;
+ void *kern_buf;
+
+ /* Allocate memory to store the user space buf */
+ kern_buf = kzalloc(nbytes, GFP_KERNEL);
+ if (!kern_buf) {
+ pr_warn("bna %s: Failed to allocate user buffer\n",
+ pci_name(bnad->pcidev));
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kern_buf, (void __user *)buf, nbytes)) {
+ kfree(kern_buf);
+ return -ENOMEM;
+ }
+
+ rc = sscanf(kern_buf, "%x:%x", &addr, &val);
+ if (rc < 2) {
+ pr_warn("bna %s: Failed to read user buffer\n",
+ pci_name(bnad->pcidev));
+ kfree(kern_buf);
+ return -EINVAL;
+ }
+ kfree(kern_buf);
+
+ addr &= BFA_REG_ADDRMSK(ioc); /* offset only 17 bit and word align */
+
+ /* offset and len sanity check */
+ rc = bna_reg_offset_check(ioc, addr, 1);
+ if (rc) {
+ pr_warn("bna %s: Failed reg offset check\n",
+ pci_name(bnad->pcidev));
+ return -EINVAL;
+ }
+
+ reg_addr = (bfa_ioc_bar0(ioc)) + addr;
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ writel(val, reg_addr);
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+
+ return nbytes;
+}
+
+static int
+bnad_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ file->private_data = NULL;
+ kfree(debug);
+ return 0;
+}
+
+static int
+bnad_debugfs_buffer_release(struct inode *inode, struct file *file)
+{
+ struct bnad_debug_info *debug = file->private_data;
+
+ if (!debug)
+ return 0;
+
+ kfree(debug->debug_buffer);
+
+ file->private_data = NULL;
+ kfree(debug);
+ debug = NULL;
+ return 0;
+}
+
+static const struct file_operations bnad_debugfs_op_fwtrc = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwtrc,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_fwsave = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_fwsave,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regrd = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read_regrd,
+ .write = bnad_debugfs_write_regrd,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_regwr = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_reg,
+ .llseek = bnad_debugfs_lseek,
+ .write = bnad_debugfs_write_regwr,
+ .release = bnad_debugfs_release,
+};
+
+static const struct file_operations bnad_debugfs_op_drvinfo = {
+ .owner = THIS_MODULE,
+ .open = bnad_debugfs_open_drvinfo,
+ .llseek = bnad_debugfs_lseek,
+ .read = bnad_debugfs_read,
+ .release = bnad_debugfs_buffer_release,
+};
+
+struct bnad_debugfs_entry {
+ const char *name;
+ mode_t mode;
+ const struct file_operations *fops;
+};
+
+static const struct bnad_debugfs_entry bnad_debugfs_files[] = {
+ { "fwtrc", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwtrc, },
+ { "fwsave", S_IFREG|S_IRUGO, &bnad_debugfs_op_fwsave, },
+ { "regrd", S_IFREG|S_IRUGO|S_IWUSR, &bnad_debugfs_op_regrd, },
+ { "regwr", S_IFREG|S_IWUSR, &bnad_debugfs_op_regwr, },
+ { "drvinfo", S_IFREG|S_IRUGO, &bnad_debugfs_op_drvinfo, },
+};
+
+static struct dentry *bna_debugfs_root;
+static atomic_t bna_debugfs_port_count;
+
+/* Initialize debugfs interface for BNA */
+void
+bnad_debugfs_init(struct bnad *bnad)
+{
+ const struct bnad_debugfs_entry *file;
+ char name[64];
+ int i;
+
+ /* Setup the BNA debugfs root directory*/
+ if (!bna_debugfs_root) {
+ bna_debugfs_root = debugfs_create_dir("bna", NULL);
+ atomic_set(&bna_debugfs_port_count, 0);
+ if (!bna_debugfs_root) {
+ pr_warn("BNA: debugfs root dir creation failed\n");
+ return;
+ }
+ }
+
+ /* Setup the pci_dev debugfs directory for the port */
+ snprintf(name, sizeof(name), "pci_dev:%s", pci_name(bnad->pcidev));
+ if (!bnad->port_debugfs_root) {
+ bnad->port_debugfs_root =
+ debugfs_create_dir(name, bna_debugfs_root);
+ if (!bnad->port_debugfs_root) {
+ pr_warn("bna pci_dev %s: root dir creation failed\n",
+ pci_name(bnad->pcidev));
+ return;
+ }
+
+ atomic_inc(&bna_debugfs_port_count);
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ file = &bnad_debugfs_files[i];
+ bnad->bnad_dentry_files[i] =
+ debugfs_create_file(file->name,
+ file->mode,
+ bnad->port_debugfs_root,
+ bnad,
+ file->fops);
+ if (!bnad->bnad_dentry_files[i]) {
+ pr_warn(
+ "BNA pci_dev:%s: create %s entry failed\n",
+ pci_name(bnad->pcidev), file->name);
+ return;
+ }
+ }
+ }
+}
+
+/* Uninitialize debugfs interface for BNA */
+void
+bnad_debugfs_uninit(struct bnad *bnad)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bnad_debugfs_files); i++) {
+ if (bnad->bnad_dentry_files[i]) {
+ debugfs_remove(bnad->bnad_dentry_files[i]);
+ bnad->bnad_dentry_files[i] = NULL;
+ }
+ }
+
+ /* Remove the pci_dev debugfs directory for the port */
+ if (bnad->port_debugfs_root) {
+ debugfs_remove(bnad->port_debugfs_root);
+ bnad->port_debugfs_root = NULL;
+ atomic_dec(&bna_debugfs_port_count);
+ }
+
+ /* Remove the BNA debugfs root directory */
+ if (atomic_read(&bna_debugfs_port_count) == 0) {
+ debugfs_remove(bna_debugfs_root);
+ bna_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index fd3dcc1..803ea32 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -38,7 +38,7 @@
sizeof(struct bnad_drv_stats) / sizeof(u64) + \
offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -296,8 +296,8 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct bfa_ioc_attr *ioc_attr;
unsigned long flags;
- strcpy(drvinfo->driver, BNAD_NAME);
- strcpy(drvinfo->version, BNAD_VERSION);
+ strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, BNAD_VERSION, sizeof(drvinfo->version));
ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
if (ioc_attr) {
@@ -305,12 +305,13 @@ bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
spin_unlock_irqrestore(&bnad->bna_lock, flags);
- strncpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
- sizeof(drvinfo->fw_version) - 1);
+ strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
+ sizeof(drvinfo->fw_version));
kfree(ioc_attr);
}
- strncpy(drvinfo->bus_info, pci_name(bnad->pcidev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
+ sizeof(drvinfo->bus_info));
}
static void
@@ -934,7 +935,142 @@ bnad_get_sset_count(struct net_device *netdev, int sset)
}
}
-static struct ethtool_ops bnad_ethtool_ops = {
+static u32
+bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
+ u32 *base_offset)
+{
+ struct bfa_flash_attr *flash_attr;
+ struct bnad_iocmd_comp fcomp;
+ u32 i, flash_part = 0, ret;
+ unsigned long flags = 0;
+
+ flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
+ if (!flash_attr)
+ return 0;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ kfree(flash_attr);
+ return 0;
+ }
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+
+ /* Check for the flash type & base offset value */
+ if (ret == BFA_STATUS_OK) {
+ for (i = 0; i < flash_attr->npart; i++) {
+ if (offset >= flash_attr->part[i].part_off &&
+ offset < (flash_attr->part[i].part_off +
+ flash_attr->part[i].part_size)) {
+ flash_part = flash_attr->part[i].part_type;
+ *base_offset = flash_attr->part[i].part_off;
+ break;
+ }
+ }
+ }
+ kfree(flash_attr);
+ return flash_part;
+}
+
+static int
+bnad_get_eeprom_len(struct net_device *netdev)
+{
+ return BFA_TOTAL_FLASH_SIZE;
+}
+
+static int
+bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash read request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EFAULT;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part == 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
+static int
+bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct bnad *bnad = netdev_priv(netdev);
+ struct bnad_iocmd_comp fcomp;
+ u32 flash_part = 0, base_offset = 0;
+ unsigned long flags = 0;
+ int ret = 0;
+
+ /* Check if the flash update request is valid */
+ if (eeprom->magic != (bnad->pcidev->vendor |
+ (bnad->pcidev->device << 16)))
+ return -EINVAL;
+
+ /* Query the flash partition based on the offset */
+ flash_part = bnad_get_flash_partition_by_offset(bnad,
+ eeprom->offset, &base_offset);
+ if (flash_part == 0)
+ return -EFAULT;
+
+ fcomp.bnad = bnad;
+ fcomp.comp_status = 0;
+
+ init_completion(&fcomp.comp);
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+ ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
+ bnad->id, bytes, eeprom->len,
+ eeprom->offset - base_offset,
+ bnad_cb_completion, &fcomp);
+ if (ret != BFA_STATUS_OK) {
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ goto done;
+ }
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ wait_for_completion(&fcomp.comp);
+ ret = fcomp.comp_status;
+done:
+ return ret;
+}
+
+static const struct ethtool_ops bnad_ethtool_ops = {
.get_settings = bnad_get_settings,
.set_settings = bnad_set_settings,
.get_drvinfo = bnad_get_drvinfo,
@@ -948,7 +1084,10 @@ static struct ethtool_ops bnad_ethtool_ops = {
.set_pauseparam = bnad_set_pauseparam,
.get_strings = bnad_get_strings,
.get_ethtool_stats = bnad_get_ethtool_stats,
- .get_sset_count = bnad_get_sset_count
+ .get_sset_count = bnad_get_sset_count,
+ .get_eeprom_len = bnad_get_eeprom_len,
+ .get_eeprom = bnad_get_eeprom,
+ .set_eeprom = bnad_set_eeprom,
};
void
diff --git a/drivers/net/ethernet/brocade/bna/cna.h b/drivers/net/ethernet/brocade/bna/cna.h
index 1b3e90d..32e8f17 100644
--- a/drivers/net/ethernet/brocade/bna/cna.h
+++ b/drivers/net/ethernet/brocade/bna/cna.h
@@ -43,8 +43,7 @@ extern char bfa_version[];
#pragma pack(1)
-#define MAC_ADDRLEN (6)
-typedef struct mac { u8 mac[MAC_ADDRLEN]; } mac_t;
+typedef struct mac { u8 mac[ETH_ALEN]; } mac_t;
#pragma pack()
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index 725b9ff..cfc22a6 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -16,6 +16,7 @@
* www.brocade.com
*/
#include <linux/firmware.h>
+#include "bnad.h"
#include "bfi.h"
#include "cna.h"
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index b48378a..db93191 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -5,8 +5,8 @@
config HAVE_NET_MACB
bool
-config NET_ATMEL
- bool "Atmel devices"
+config NET_CADENCE
+ bool "Cadence devices"
default y
depends on HAVE_NET_MACB || (ARM && ARCH_AT91RM9200)
---help---
@@ -21,7 +21,7 @@ config NET_ATMEL
the remaining Atmel network card questions. If you say Y, you will be
asked for your specific card in the following questions.
-if NET_ATMEL
+if NET_CADENCE
config ARM_AT91_ETHER
tristate "AT91RM9200 Ethernet support"
@@ -33,14 +33,16 @@ config ARM_AT91_ETHER
ethernet support, then you should always answer Y to this.
config MACB
- tristate "Atmel MACB support"
+ tristate "Cadence MACB/GEM support"
depends on HAVE_NET_MACB
select PHYLIB
---help---
- The Atmel MACB ethernet interface is found on many AT32 and AT91
- parts. Say Y to include support for the MACB chip.
+ The Cadence MACB ethernet interface is found on many Atmel AT32 and
+ AT91 parts. This driver also supports the Cadence GEM (Gigabit
+ Ethernet MAC found in some ARM SoC devices). Note: the Gigabit mode
+ is not yet supported. Say Y to include support for the MACB/GEM chip.
To compile this driver as a module, choose M here: the module
will be called macb.
-endif # NET_ATMEL
+endif # NET_CADENCE
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 56624d3..1a5b6ef 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -26,6 +26,7 @@
#include <linux/skbuff.h>
#include <linux/dma-mapping.h>
#include <linux/ethtool.h>
+#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gfp.h>
@@ -255,8 +256,7 @@ static void enable_phyirq(struct net_device *dev)
unsigned int dsintr, irq_number;
int status;
- irq_number = lp->board_data.phy_irq_pin;
- if (!irq_number) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
/*
* PHY doesn't have an IRQ pin (RTL8201, DP83847, AC101L),
* or board does not have it connected.
@@ -265,6 +265,7 @@ static void enable_phyirq(struct net_device *dev)
return;
}
+ irq_number = lp->board_data.phy_irq_pin;
status = request_irq(irq_number, at91ether_phy_interrupt, 0, dev->name, dev);
if (status) {
printk(KERN_ERR "at91_ether: PHY IRQ %d request failed - status %d!\n", irq_number, status);
@@ -319,8 +320,7 @@ static void disable_phyirq(struct net_device *dev)
unsigned int dsintr;
unsigned int irq_number;
- irq_number = lp->board_data.phy_irq_pin;
- if (!irq_number) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
del_timer_sync(&lp->check_timer);
return;
}
@@ -365,6 +365,7 @@ static void disable_phyirq(struct net_device *dev)
disable_mdi();
spin_unlock_irq(&lp->lock);
+ irq_number = lp->board_data.phy_irq_pin;
free_irq(irq_number, dev); /* Free interrupt handler */
}
@@ -984,7 +985,7 @@ static const struct net_device_ops at91ether_netdev_ops = {
static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_address,
struct platform_device *pdev, struct clk *ether_clk)
{
- struct at91_eth_data *board_data = pdev->dev.platform_data;
+ struct macb_platform_data *board_data = pdev->dev.platform_data;
struct net_device *dev;
struct at91_private *lp;
unsigned int val;
@@ -1077,7 +1078,7 @@ static int __init at91ether_setup(unsigned long phy_type, unsigned short phy_add
netif_carrier_off(dev); /* will be enabled in open() */
/* If board has no PHY IRQ, use a timer to poll the PHY */
- if (!lp->board_data.phy_irq_pin) {
+ if (!gpio_is_valid(lp->board_data.phy_irq_pin)) {
init_timer(&lp->check_timer);
lp->check_timer.data = (unsigned long)dev;
lp->check_timer.function = at91ether_check_link;
@@ -1169,7 +1170,8 @@ static int __devexit at91ether_remove(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(dev);
- if (lp->board_data.phy_irq_pin >= 32)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin) &&
+ lp->board_data.phy_irq_pin >= 32)
gpio_free(lp->board_data.phy_irq_pin);
unregister_netdev(dev);
@@ -1188,11 +1190,12 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
- int phy_irq = lp->board_data.phy_irq_pin;
if (netif_running(net_dev)) {
- if (phy_irq)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
+ int phy_irq = lp->board_data.phy_irq_pin;
disable_irq(phy_irq);
+ }
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
@@ -1206,7 +1209,6 @@ static int at91ether_resume(struct platform_device *pdev)
{
struct net_device *net_dev = platform_get_drvdata(pdev);
struct at91_private *lp = netdev_priv(net_dev);
- int phy_irq = lp->board_data.phy_irq_pin;
if (netif_running(net_dev)) {
clk_enable(lp->ether_clk);
@@ -1214,8 +1216,10 @@ static int at91ether_resume(struct platform_device *pdev)
netif_device_attach(net_dev);
netif_start_queue(net_dev);
- if (phy_irq)
+ if (gpio_is_valid(lp->board_data.phy_irq_pin)) {
+ int phy_irq = lp->board_data.phy_irq_pin;
enable_irq(phy_irq);
+ }
}
return 0;
}
diff --git a/drivers/net/ethernet/cadence/at91_ether.h b/drivers/net/ethernet/cadence/at91_ether.h
index 353f4da..3725fbb0 100644
--- a/drivers/net/ethernet/cadence/at91_ether.h
+++ b/drivers/net/ethernet/cadence/at91_ether.h
@@ -85,7 +85,9 @@ struct recv_desc_bufs
struct at91_private
{
struct mii_if_info mii; /* ethtool support */
- struct at91_eth_data board_data; /* board-specific configuration */
+ struct macb_platform_data board_data; /* board-specific
+ * configuration (shared with
+ * macb for common data */
struct clk *ether_clk; /* clock */
/* PHY */
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index a437b46..2320068 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1,5 +1,5 @@
/*
- * Atmel MACB Ethernet Controller driver
+ * Cadence MACB/GEM Ethernet Controller driver
*
* Copyright (C) 2004-2006 Atmel Corporation
*
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -19,11 +20,12 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/dma-mapping.h>
+#include <linux/platform_data/macb.h>
#include <linux/platform_device.h>
#include <linux/phy.h>
-
-#include <mach/board.h>
-#include <mach/cpu.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
#include "macb.h"
@@ -60,9 +62,9 @@ static void __macb_set_hwaddr(struct macb *bp)
u16 top;
bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
- macb_writel(bp, SA1B, bottom);
+ macb_or_gem_writel(bp, SA1B, bottom);
top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
- macb_writel(bp, SA1T, top);
+ macb_or_gem_writel(bp, SA1T, top);
}
static void __init macb_get_hwaddr(struct macb *bp)
@@ -71,8 +73,8 @@ static void __init macb_get_hwaddr(struct macb *bp)
u16 top;
u8 addr[6];
- bottom = macb_readl(bp, SA1B);
- top = macb_readl(bp, SA1T);
+ bottom = macb_or_gem_readl(bp, SA1B);
+ top = macb_or_gem_readl(bp, SA1T);
addr[0] = bottom & 0xff;
addr[1] = (bottom >> 8) & 0xff;
@@ -84,7 +86,7 @@ static void __init macb_get_hwaddr(struct macb *bp)
if (is_valid_ether_addr(addr)) {
memcpy(bp->dev->dev_addr, addr, sizeof(addr));
} else {
- dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
+ netdev_info(bp->dev, "invalid hw address, using random\n");
random_ether_addr(bp->dev->dev_addr);
}
}
@@ -178,11 +180,12 @@ static void macb_handle_link_change(struct net_device *dev)
if (status_change) {
if (phydev->link)
- printk(KERN_INFO "%s: link up (%d/%s)\n",
- dev->name, phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full":"Half");
+ netdev_info(dev, "link up (%d/%s)\n",
+ phydev->speed,
+ phydev->duplex == DUPLEX_FULL ?
+ "Full" : "Half");
else
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netdev_info(dev, "link down\n");
}
}
@@ -191,25 +194,21 @@ static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct phy_device *phydev;
- struct eth_platform_data *pdata;
int ret;
phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
- printk (KERN_ERR "%s: no PHY found\n", dev->name);
+ netdev_err(dev, "no PHY found\n");
return -1;
}
- pdata = bp->pdev->dev.platform_data;
/* TODO : add pin_irq */
/* attach the mac to the phy */
ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
- pdata && pdata->is_rmii ?
- PHY_INTERFACE_MODE_RMII :
- PHY_INTERFACE_MODE_MII);
+ bp->phy_interface);
if (ret) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
+ netdev_err(dev, "Could not attach to PHY\n");
return ret;
}
@@ -228,7 +227,7 @@ static int macb_mii_probe(struct net_device *dev)
static int macb_mii_init(struct macb *bp)
{
- struct eth_platform_data *pdata;
+ struct macb_platform_data *pdata;
int err = -ENXIO, i;
/* Enable management port */
@@ -244,7 +243,8 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus->read = &macb_mdio_read;
bp->mii_bus->write = &macb_mdio_write;
bp->mii_bus->reset = &macb_mdio_reset;
- snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
+ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
bp->mii_bus->parent = &bp->dev->dev;
pdata = bp->pdev->dev.platform_data;
@@ -285,8 +285,8 @@ err_out:
static void macb_update_stats(struct macb *bp)
{
u32 __iomem *reg = bp->regs + MACB_PFR;
- u32 *p = &bp->hw_stats.rx_pause_frames;
- u32 *end = &bp->hw_stats.tx_pause_frames + 1;
+ u32 *p = &bp->hw_stats.macb.rx_pause_frames;
+ u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
@@ -303,14 +303,13 @@ static void macb_tx(struct macb *bp)
status = macb_readl(bp, TSR);
macb_writel(bp, TSR, status);
- dev_dbg(&bp->pdev->dev, "macb_tx status = %02lx\n",
- (unsigned long)status);
+ netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status);
if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) {
int i;
- printk(KERN_ERR "%s: TX %s, resetting buffers\n",
- bp->dev->name, status & MACB_BIT(UND) ?
- "underrun" : "retry limit exceeded");
+ netdev_err(bp->dev, "TX %s, resetting buffers\n",
+ status & MACB_BIT(UND) ?
+ "underrun" : "retry limit exceeded");
/* Transfer ongoing, disable transmitter, to avoid confusion */
if (status & MACB_BIT(TGO))
@@ -369,8 +368,8 @@ static void macb_tx(struct macb *bp)
if (!(bufstat & MACB_BIT(TX_USED)))
break;
- dev_dbg(&bp->pdev->dev, "skb %u (data %p) TX complete\n",
- tail, skb->data);
+ netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n",
+ tail, skb->data);
dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len,
DMA_TO_DEVICE);
bp->stats.tx_packets++;
@@ -395,8 +394,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl);
- dev_dbg(&bp->pdev->dev, "macb_rx_frame frags %u - %u (len %u)\n",
- first_frag, last_frag, len);
+ netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+ first_frag, last_frag, len);
skb = dev_alloc_skb(len + RX_OFFSET);
if (!skb) {
@@ -437,8 +436,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
bp->stats.rx_packets++;
bp->stats.rx_bytes += len;
- dev_dbg(&bp->pdev->dev, "received skb of length %u, csum: %08x\n",
- skb->len, skb->csum);
+ netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n",
+ skb->len, skb->csum);
netif_receive_skb(skb);
return 0;
@@ -515,8 +514,8 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = 0;
- dev_dbg(&bp->pdev->dev, "poll: status = %08lx, budget = %d\n",
- (unsigned long)status, budget);
+ netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n",
+ (unsigned long)status, budget);
work_done = macb_rx(bp, budget);
if (work_done < budget) {
@@ -565,8 +564,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
if (napi_schedule_prep(&bp->napi)) {
- dev_dbg(&bp->pdev->dev,
- "scheduling RX softirq\n");
+ netdev_dbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&bp->napi);
}
}
@@ -582,16 +580,19 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(ISR_ROVR)) {
/* We missed at least one packet */
- bp->hw_stats.rx_overruns++;
+ if (macb_is_gem(bp))
+ bp->hw_stats.gem.rx_overruns++;
+ else
+ bp->hw_stats.macb.rx_overruns++;
}
if (status & MACB_BIT(HRESP)) {
/*
- * TODO: Reset the hardware, and maybe move the printk
- * to a lower-priority context as well (work queue?)
+ * TODO: Reset the hardware, and maybe move the
+ * netdev_err to a lower-priority context as well
+ * (work queue?)
*/
- printk(KERN_ERR "%s: DMA bus error: HRESP not OK\n",
- dev->name);
+ netdev_err(dev, "DMA bus error: HRESP not OK\n");
}
status = macb_readl(bp, ISR);
@@ -626,16 +627,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned long flags;
#ifdef DEBUG
- int i;
- dev_dbg(&bp->pdev->dev,
- "start_xmit: len %u head %p data %p tail %p end %p\n",
- skb->len, skb->head, skb->data,
- skb_tail_pointer(skb), skb_end_pointer(skb));
- dev_dbg(&bp->pdev->dev,
- "data:");
- for (i = 0; i < 16; i++)
- printk(" %02x", (unsigned int)skb->data[i]);
- printk("\n");
+ netdev_dbg(bp->dev,
+ "start_xmit: len %u head %p data %p tail %p end %p\n",
+ skb->len, skb->head, skb->data,
+ skb_tail_pointer(skb), skb_end_pointer(skb));
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, 16, true);
#endif
len = skb->len;
@@ -645,21 +642,20 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (TX_BUFFS_AVAIL(bp) < 1) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&bp->lock, flags);
- dev_err(&bp->pdev->dev,
- "BUG! Tx Ring full when queue awake!\n");
- dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
- bp->tx_head, bp->tx_tail);
+ netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
+ netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+ bp->tx_head, bp->tx_tail);
return NETDEV_TX_BUSY;
}
entry = bp->tx_head;
- dev_dbg(&bp->pdev->dev, "Allocated ring entry %u\n", entry);
+ netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
bp->tx_skb[entry].skb = skb;
bp->tx_skb[entry].mapping = mapping;
- dev_dbg(&bp->pdev->dev, "Mapped skb data %p to DMA addr %08lx\n",
- skb->data, (unsigned long)mapping);
+ netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+ skb->data, (unsigned long)mapping);
ctrl = MACB_BF(TX_FRMLEN, len);
ctrl |= MACB_BIT(TX_LAST);
@@ -723,27 +719,27 @@ static int macb_alloc_consistent(struct macb *bp)
&bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+ netdev_dbg(bp->dev,
+ "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
size = TX_RING_BYTES;
bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->tx_ring_dma, GFP_KERNEL);
if (!bp->tx_ring)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+ netdev_dbg(bp->dev,
+ "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
size = RX_RING_SIZE * RX_BUFFER_SIZE;
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers)
goto out_err;
- dev_dbg(&bp->pdev->dev,
- "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
- size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+ netdev_dbg(bp->dev,
+ "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+ size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0;
@@ -797,6 +793,84 @@ static void macb_reset_hw(struct macb *bp)
macb_readl(bp, ISR);
}
+static u32 gem_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz = clk_get_rate(bp->pclk);
+
+ if (pclk_hz <= 20000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV32);
+ else if (pclk_hz <= 120000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV48);
+ else if (pclk_hz <= 160000000)
+ config = GEM_BF(CLK, GEM_CLK_DIV64);
+ else
+ config = GEM_BF(CLK, GEM_CLK_DIV96);
+
+ return config;
+}
+
+static u32 macb_mdc_clk_div(struct macb *bp)
+{
+ u32 config;
+ unsigned long pclk_hz;
+
+ if (macb_is_gem(bp))
+ return gem_mdc_clk_div(bp);
+
+ pclk_hz = clk_get_rate(bp->pclk);
+ if (pclk_hz <= 20000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV8);
+ else if (pclk_hz <= 40000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV16);
+ else if (pclk_hz <= 80000000)
+ config = MACB_BF(CLK, MACB_CLK_DIV32);
+ else
+ config = MACB_BF(CLK, MACB_CLK_DIV64);
+
+ return config;
+}
+
+/*
+ * Get the DMA bus width field of the network configuration register that we
+ * should program. We find the width from decoding the design configuration
+ * register to find the maximum supported data bus width.
+ */
+static u32 macb_dbw(struct macb *bp)
+{
+ if (!macb_is_gem(bp))
+ return 0;
+
+ switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
+ case 4:
+ return GEM_BF(DBW, GEM_DBW128);
+ case 2:
+ return GEM_BF(DBW, GEM_DBW64);
+ case 1:
+ default:
+ return GEM_BF(DBW, GEM_DBW32);
+ }
+}
+
+/*
+ * Configure the receive DMA engine to use the correct receive buffer size.
+ * This is a configurable parameter for GEM.
+ */
+static void macb_configure_dma(struct macb *bp)
+{
+ u32 dmacfg;
+
+ if (macb_is_gem(bp)) {
+ dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
+ dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64);
+ gem_writel(bp, DMACFG, dmacfg);
+ }
+}
+
static void macb_init_hw(struct macb *bp)
{
u32 config;
@@ -804,7 +878,7 @@ static void macb_init_hw(struct macb *bp)
macb_reset_hw(bp);
__macb_set_hwaddr(bp);
- config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
+ config = macb_mdc_clk_div(bp);
config |= MACB_BIT(PAE); /* PAuse Enable */
config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
config |= MACB_BIT(BIG); /* Receive oversized frames */
@@ -812,8 +886,11 @@ static void macb_init_hw(struct macb *bp)
config |= MACB_BIT(CAF); /* Copy All Frames */
if (!(bp->dev->flags & IFF_BROADCAST))
config |= MACB_BIT(NBC); /* No BroadCast */
+ config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
+ macb_configure_dma(bp);
+
/* Initialize TX and RX buffers */
macb_writel(bp, RBQP, bp->rx_ring_dma);
macb_writel(bp, TBQP, bp->tx_ring_dma);
@@ -909,8 +986,8 @@ static void macb_sethashtable(struct net_device *dev)
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
- macb_writel(bp, HRB, mc_filter[0]);
- macb_writel(bp, HRT, mc_filter[1]);
+ macb_or_gem_writel(bp, HRB, mc_filter[0]);
+ macb_or_gem_writel(bp, HRT, mc_filter[1]);
}
/*
@@ -932,8 +1009,8 @@ static void macb_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
/* Enable all multicast mode */
- macb_writel(bp, HRB, -1);
- macb_writel(bp, HRT, -1);
+ macb_or_gem_writel(bp, HRB, -1);
+ macb_or_gem_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
} else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
@@ -941,8 +1018,8 @@ static void macb_set_rx_mode(struct net_device *dev)
cfg |= MACB_BIT(NCFGR_MTI);
} else if (dev->flags & (~IFF_ALLMULTI)) {
/* Disable all multicast mode */
- macb_writel(bp, HRB, 0);
- macb_writel(bp, HRT, 0);
+ macb_or_gem_writel(bp, HRB, 0);
+ macb_or_gem_writel(bp, HRT, 0);
cfg &= ~MACB_BIT(NCFGR_MTI);
}
@@ -954,7 +1031,7 @@ static int macb_open(struct net_device *dev)
struct macb *bp = netdev_priv(dev);
int err;
- dev_dbg(&bp->pdev->dev, "open\n");
+ netdev_dbg(bp->dev, "open\n");
/* if the phy is not yet register, retry later*/
if (!bp->phy_dev)
@@ -965,9 +1042,8 @@ static int macb_open(struct net_device *dev)
err = macb_alloc_consistent(bp);
if (err) {
- printk(KERN_ERR
- "%s: Unable to allocate DMA memory (error %d)\n",
- dev->name, err);
+ netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
+ err);
return err;
}
@@ -1005,11 +1081,62 @@ static int macb_close(struct net_device *dev)
return 0;
}
+static void gem_update_stats(struct macb *bp)
+{
+ u32 __iomem *reg = bp->regs + GEM_OTX;
+ u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+ u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
+
+ for (; p < end; p++, reg++)
+ *p += __raw_readl(reg);
+}
+
+static struct net_device_stats *gem_get_stats(struct macb *bp)
+{
+ struct gem_stats *hwstat = &bp->hw_stats.gem;
+ struct net_device_stats *nstat = &bp->stats;
+
+ gem_update_stats(bp);
+
+ nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+ hwstat->rx_alignment_errors +
+ hwstat->rx_resource_errors +
+ hwstat->rx_overruns +
+ hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->tx_errors = (hwstat->tx_late_collisions +
+ hwstat->tx_excessive_collisions +
+ hwstat->tx_underrun +
+ hwstat->tx_carrier_sense_errors);
+ nstat->multicast = hwstat->rx_multicast_frames;
+ nstat->collisions = (hwstat->tx_single_collision_frames +
+ hwstat->tx_multiple_collision_frames +
+ hwstat->tx_excessive_collisions);
+ nstat->rx_length_errors = (hwstat->rx_oversize_frames +
+ hwstat->rx_jabbers +
+ hwstat->rx_undersized_frames +
+ hwstat->rx_length_field_frame_errors);
+ nstat->rx_over_errors = hwstat->rx_resource_errors;
+ nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
+ nstat->rx_frame_errors = hwstat->rx_alignment_errors;
+ nstat->rx_fifo_errors = hwstat->rx_overruns;
+ nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+ nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+ nstat->tx_fifo_errors = hwstat->tx_underrun;
+
+ return nstat;
+}
+
static struct net_device_stats *macb_get_stats(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
struct net_device_stats *nstat = &bp->stats;
- struct macb_stats *hwstat = &bp->hw_stats;
+ struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+ if (macb_is_gem(bp))
+ return gem_get_stats(bp);
/* read stats from hardware */
macb_update_stats(bp);
@@ -1117,14 +1244,59 @@ static const struct net_device_ops macb_netdev_ops = {
#endif
};
+#if defined(CONFIG_OF)
+static const struct of_device_id macb_dt_ids[] = {
+ { .compatible = "cdns,at32ap7000-macb" },
+ { .compatible = "cdns,at91sam9260-macb" },
+ { .compatible = "cdns,macb" },
+ { .compatible = "cdns,pc302-gem" },
+ { .compatible = "cdns,gem" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+
+static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+
+ if (np)
+ return of_get_phy_mode(np);
+
+ return -ENODEV;
+}
+
+static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+{
+ struct device_node *np = bp->pdev->dev.of_node;
+ if (np) {
+ const char *mac = of_get_mac_address(np);
+ if (mac) {
+ memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+#else
+static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+static int __devinit macb_get_hwaddr_dt(struct macb *bp)
+{
+ return -ENODEV;
+}
+#endif
+
static int __init macb_probe(struct platform_device *pdev)
{
- struct eth_platform_data *pdata;
+ struct macb_platform_data *pdata;
struct resource *regs;
struct net_device *dev;
struct macb *bp;
struct phy_device *phydev;
- unsigned long pclk_hz;
u32 config;
int err = -ENXIO;
@@ -1152,28 +1324,19 @@ static int __init macb_probe(struct platform_device *pdev)
spin_lock_init(&bp->lock);
-#if defined(CONFIG_ARCH_AT91)
- bp->pclk = clk_get(&pdev->dev, "macb_clk");
+ bp->pclk = clk_get(&pdev->dev, "pclk");
if (IS_ERR(bp->pclk)) {
dev_err(&pdev->dev, "failed to get macb_clk\n");
goto err_out_free_dev;
}
clk_enable(bp->pclk);
-#else
- bp->pclk = clk_get(&pdev->dev, "pclk");
- if (IS_ERR(bp->pclk)) {
- dev_err(&pdev->dev, "failed to get pclk\n");
- goto err_out_free_dev;
- }
+
bp->hclk = clk_get(&pdev->dev, "hclk");
if (IS_ERR(bp->hclk)) {
dev_err(&pdev->dev, "failed to get hclk\n");
goto err_out_put_pclk;
}
-
- clk_enable(bp->pclk);
clk_enable(bp->hclk);
-#endif
bp->regs = ioremap(regs->start, resource_size(regs));
if (!bp->regs) {
@@ -1185,9 +1348,8 @@ static int __init macb_probe(struct platform_device *pdev)
dev->irq = platform_get_irq(pdev, 0);
err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev);
if (err) {
- printk(KERN_ERR
- "%s: Unable to request IRQ %d (error %d)\n",
- dev->name, dev->irq, err);
+ dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
+ dev->irq, err);
goto err_out_iounmap;
}
@@ -1198,31 +1360,37 @@ static int __init macb_probe(struct platform_device *pdev)
dev->base_addr = regs->start;
/* Set MII management clock divider */
- pclk_hz = clk_get_rate(bp->pclk);
- if (pclk_hz <= 20000000)
- config = MACB_BF(CLK, MACB_CLK_DIV8);
- else if (pclk_hz <= 40000000)
- config = MACB_BF(CLK, MACB_CLK_DIV16);
- else if (pclk_hz <= 80000000)
- config = MACB_BF(CLK, MACB_CLK_DIV32);
- else
- config = MACB_BF(CLK, MACB_CLK_DIV64);
+ config = macb_mdc_clk_div(bp);
+ config |= macb_dbw(bp);
macb_writel(bp, NCFGR, config);
- macb_get_hwaddr(bp);
- pdata = pdev->dev.platform_data;
+ err = macb_get_hwaddr_dt(bp);
+ if (err < 0)
+ macb_get_hwaddr(bp);
+
+ err = macb_get_phy_mode_dt(pdev);
+ if (err < 0) {
+ pdata = pdev->dev.platform_data;
+ if (pdata && pdata->is_rmii)
+ bp->phy_interface = PHY_INTERFACE_MODE_RMII;
+ else
+ bp->phy_interface = PHY_INTERFACE_MODE_MII;
+ } else {
+ bp->phy_interface = err;
+ }
- if (pdata && pdata->is_rmii)
+ if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
#if defined(CONFIG_ARCH_AT91)
- macb_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN)) );
+ macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
+ MACB_BIT(CLKEN)));
#else
- macb_writel(bp, USRIO, 0);
+ macb_or_gem_writel(bp, USRIO, 0);
#endif
else
#if defined(CONFIG_ARCH_AT91)
- macb_writel(bp, USRIO, MACB_BIT(CLKEN));
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
#else
- macb_writel(bp, USRIO, MACB_BIT(MII));
+ macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
#endif
bp->tx_pending = DEF_TX_RING_PENDING;
@@ -1239,13 +1407,13 @@ static int __init macb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- printk(KERN_INFO "%s: Atmel MACB at 0x%08lx irq %d (%pM)\n",
- dev->name, dev->base_addr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
+ macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
+ dev->irq, dev->dev_addr);
phydev = bp->phy_dev;
- printk(KERN_INFO "%s: attached PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name,
- phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
return 0;
@@ -1256,14 +1424,10 @@ err_out_free_irq:
err_out_iounmap:
iounmap(bp->regs);
err_out_disable_clocks:
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
clk_put(bp->hclk);
-#endif
clk_disable(bp->pclk);
-#ifndef CONFIG_ARCH_AT91
err_out_put_pclk:
-#endif
clk_put(bp->pclk);
err_out_free_dev:
free_netdev(dev);
@@ -1289,10 +1453,8 @@ static int __exit macb_remove(struct platform_device *pdev)
unregister_netdev(dev);
free_irq(dev->irq, dev);
iounmap(bp->regs);
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
clk_put(bp->hclk);
-#endif
clk_disable(bp->pclk);
clk_put(bp->pclk);
free_netdev(dev);
@@ -1310,9 +1472,7 @@ static int macb_suspend(struct platform_device *pdev, pm_message_t state)
netif_device_detach(netdev);
-#ifndef CONFIG_ARCH_AT91
clk_disable(bp->hclk);
-#endif
clk_disable(bp->pclk);
return 0;
@@ -1324,9 +1484,7 @@ static int macb_resume(struct platform_device *pdev)
struct macb *bp = netdev_priv(netdev);
clk_enable(bp->pclk);
-#ifndef CONFIG_ARCH_AT91
clk_enable(bp->hclk);
-#endif
netif_device_attach(netdev);
@@ -1344,6 +1502,7 @@ static struct platform_driver macb_driver = {
.driver = {
.name = "macb",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(macb_dt_ids),
},
};
@@ -1361,6 +1520,6 @@ module_init(macb_init);
module_exit(macb_exit);
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Atmel MACB Ethernet driver");
+MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
MODULE_ALIAS("platform:macb");
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d3212f6..335e288 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -59,6 +59,24 @@
#define MACB_TPQ 0x00bc
#define MACB_USRIO 0x00c0
#define MACB_WOL 0x00c4
+#define MACB_MID 0x00fc
+
+/* GEM register offsets. */
+#define GEM_NCFGR 0x0004
+#define GEM_USRIO 0x000c
+#define GEM_DMACFG 0x0010
+#define GEM_HRB 0x0080
+#define GEM_HRT 0x0084
+#define GEM_SA1B 0x0088
+#define GEM_SA1T 0x008C
+#define GEM_OTX 0x0100
+#define GEM_DCFG1 0x0280
+#define GEM_DCFG2 0x0284
+#define GEM_DCFG3 0x0288
+#define GEM_DCFG4 0x028c
+#define GEM_DCFG5 0x0290
+#define GEM_DCFG6 0x0294
+#define GEM_DCFG7 0x0298
/* Bitfields in NCR */
#define MACB_LB_OFFSET 0
@@ -126,6 +144,21 @@
#define MACB_IRXFCS_OFFSET 19
#define MACB_IRXFCS_SIZE 1
+/* GEM specific NCFGR bitfields. */
+#define GEM_CLK_OFFSET 18
+#define GEM_CLK_SIZE 3
+#define GEM_DBW_OFFSET 21
+#define GEM_DBW_SIZE 2
+
+/* Constants for data bus width. */
+#define GEM_DBW32 0
+#define GEM_DBW64 1
+#define GEM_DBW128 2
+
+/* Bitfields in DMACFG. */
+#define GEM_RXBS_OFFSET 16
+#define GEM_RXBS_SIZE 8
+
/* Bitfields in NSR */
#define MACB_NSR_LINK_OFFSET 0
#define MACB_NSR_LINK_SIZE 1
@@ -228,12 +261,30 @@
#define MACB_WOL_MTI_OFFSET 19
#define MACB_WOL_MTI_SIZE 1
+/* Bitfields in MID */
+#define MACB_IDNUM_OFFSET 16
+#define MACB_IDNUM_SIZE 16
+#define MACB_REV_OFFSET 0
+#define MACB_REV_SIZE 16
+
+/* Bitfields in DCFG1. */
+#define GEM_DBWDEF_OFFSET 25
+#define GEM_DBWDEF_SIZE 3
+
/* Constants for CLK */
#define MACB_CLK_DIV8 0
#define MACB_CLK_DIV16 1
#define MACB_CLK_DIV32 2
#define MACB_CLK_DIV64 3
+/* GEM specific constants for CLK. */
+#define GEM_CLK_DIV8 0
+#define GEM_CLK_DIV16 1
+#define GEM_CLK_DIV32 2
+#define GEM_CLK_DIV48 3
+#define GEM_CLK_DIV64 4
+#define GEM_CLK_DIV96 5
+
/* Constants for MAN register */
#define MACB_MAN_SOF 1
#define MACB_MAN_WRITE 1
@@ -254,11 +305,52 @@
<< MACB_##name##_OFFSET)) \
| MACB_BF(name,value))
+#define GEM_BIT(name) \
+ (1 << GEM_##name##_OFFSET)
+#define GEM_BF(name, value) \
+ (((value) & ((1 << GEM_##name##_SIZE) - 1)) \
+ << GEM_##name##_OFFSET)
+#define GEM_BFEXT(name, value)\
+ (((value) >> GEM_##name##_OFFSET) \
+ & ((1 << GEM_##name##_SIZE) - 1))
+#define GEM_BFINS(name, value, old) \
+ (((old) & ~(((1 << GEM_##name##_SIZE) - 1) \
+ << GEM_##name##_OFFSET)) \
+ | GEM_BF(name, value))
+
/* Register access macros */
#define macb_readl(port,reg) \
__raw_readl((port)->regs + MACB_##reg)
#define macb_writel(port,reg,value) \
__raw_writel((value), (port)->regs + MACB_##reg)
+#define gem_readl(port, reg) \
+ __raw_readl((port)->regs + GEM_##reg)
+#define gem_writel(port, reg, value) \
+ __raw_writel((value), (port)->regs + GEM_##reg)
+
+/*
+ * Conditional GEM/MACB macros. These perform the operation to the correct
+ * register dependent on whether the device is a GEM or a MACB. For registers
+ * and bitfields that are common across both devices, use macb_{read,write}l
+ * to avoid the cost of the conditional.
+ */
+#define macb_or_gem_writel(__bp, __reg, __value) \
+ ({ \
+ if (macb_is_gem((__bp))) \
+ gem_writel((__bp), __reg, __value); \
+ else \
+ macb_writel((__bp), __reg, __value); \
+ })
+
+#define macb_or_gem_readl(__bp, __reg) \
+ ({ \
+ u32 __v; \
+ if (macb_is_gem((__bp))) \
+ __v = gem_readl((__bp), __reg); \
+ else \
+ __v = macb_readl((__bp), __reg); \
+ __v; \
+ })
struct dma_desc {
u32 addr;
@@ -358,6 +450,54 @@ struct macb_stats {
u32 tx_pause_frames;
};
+struct gem_stats {
+ u32 tx_octets_31_0;
+ u32 tx_octets_47_32;
+ u32 tx_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_64_byte_frames;
+ u32 tx_65_127_byte_frames;
+ u32 tx_128_255_byte_frames;
+ u32 tx_256_511_byte_frames;
+ u32 tx_512_1023_byte_frames;
+ u32 tx_1024_1518_byte_frames;
+ u32 tx_greater_than_1518_byte_frames;
+ u32 tx_underrun;
+ u32 tx_single_collision_frames;
+ u32 tx_multiple_collision_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_deferred_frames;
+ u32 tx_carrier_sense_errors;
+ u32 rx_octets_31_0;
+ u32 rx_octets_47_32;
+ u32 rx_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_64_byte_frames;
+ u32 rx_65_127_byte_frames;
+ u32 rx_128_255_byte_frames;
+ u32 rx_256_511_byte_frames;
+ u32 rx_512_1023_byte_frames;
+ u32 rx_1024_1518_byte_frames;
+ u32 rx_greater_than_1518_byte_frames;
+ u32 rx_undersized_frames;
+ u32 rx_oversize_frames;
+ u32 rx_jabbers;
+ u32 rx_frame_check_sequence_errors;
+ u32 rx_length_field_frame_errors;
+ u32 rx_symbol_errors;
+ u32 rx_alignment_errors;
+ u32 rx_resource_errors;
+ u32 rx_overruns;
+ u32 rx_ip_header_checksum_errors;
+ u32 rx_tcp_checksum_errors;
+ u32 rx_udp_checksum_errors;
+};
+
struct macb {
void __iomem *regs;
@@ -376,7 +516,10 @@ struct macb {
struct net_device *dev;
struct napi_struct napi;
struct net_device_stats stats;
- struct macb_stats hw_stats;
+ union {
+ struct macb_stats macb;
+ struct gem_stats gem;
+ } hw_stats;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
@@ -389,6 +532,13 @@ struct macb {
unsigned int link;
unsigned int speed;
unsigned int duplex;
+
+ phy_interface_t phy_interface;
};
+static inline bool macb_is_gem(struct macb *bp)
+{
+ return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+}
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
new file mode 100644
index 0000000..aba435c
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -0,0 +1,7 @@
+config NET_CALXEDA_XGMAC
+ tristate "Calxeda 1G/10G XGMAC Ethernet driver"
+ depends on HAS_IOMEM
+ select CRC32
+ help
+ This is the driver for the XGMAC Ethernet IP block found on Calxeda
+ Highbank platforms.
diff --git a/drivers/net/ethernet/calxeda/Makefile b/drivers/net/ethernet/calxeda/Makefile
new file mode 100644
index 0000000..f0ef080
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_NET_CALXEDA_XGMAC) += xgmac.o
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
new file mode 100644
index 0000000..1fce186
--- /dev/null
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -0,0 +1,1928 @@
+/*
+ * Copyright 2010-2011 Calxeda, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/circ_buf.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+/* XGMAC Register definitions */
+#define XGMAC_CONTROL 0x00000000 /* MAC Configuration */
+#define XGMAC_FRAME_FILTER 0x00000004 /* MAC Frame Filter */
+#define XGMAC_FLOW_CTRL 0x00000018 /* MAC Flow Control */
+#define XGMAC_VLAN_TAG 0x0000001C /* VLAN Tags */
+#define XGMAC_VERSION 0x00000020 /* Version */
+#define XGMAC_VLAN_INCL 0x00000024 /* VLAN tag for tx frames */
+#define XGMAC_LPI_CTRL 0x00000028 /* LPI Control and Status */
+#define XGMAC_LPI_TIMER 0x0000002C /* LPI Timers Control */
+#define XGMAC_TX_PACE 0x00000030 /* Transmit Pace and Stretch */
+#define XGMAC_VLAN_HASH 0x00000034 /* VLAN Hash Table */
+#define XGMAC_DEBUG 0x00000038 /* Debug */
+#define XGMAC_INT_STAT 0x0000003C /* Interrupt and Control */
+#define XGMAC_ADDR_HIGH(reg) (0x00000040 + ((reg) * 8))
+#define XGMAC_ADDR_LOW(reg) (0x00000044 + ((reg) * 8))
+#define XGMAC_HASH(n) (0x00000300 + (n) * 4) /* HASH table regs */
+#define XGMAC_NUM_HASH 16
+#define XGMAC_OMR 0x00000400
+#define XGMAC_REMOTE_WAKE 0x00000700 /* Remote Wake-Up Frm Filter */
+#define XGMAC_PMT 0x00000704 /* PMT Control and Status */
+#define XGMAC_MMC_CTRL 0x00000800 /* XGMAC MMC Control */
+#define XGMAC_MMC_INTR_RX 0x00000804 /* Recieve Interrupt */
+#define XGMAC_MMC_INTR_TX 0x00000808 /* Transmit Interrupt */
+#define XGMAC_MMC_INTR_MASK_RX 0x0000080c /* Recieve Interrupt Mask */
+#define XGMAC_MMC_INTR_MASK_TX 0x00000810 /* Transmit Interrupt Mask */
+
+/* Hardware TX Statistics Counters */
+#define XGMAC_MMC_TXOCTET_GB_LO 0x00000814
+#define XGMAC_MMC_TXOCTET_GB_HI 0x00000818
+#define XGMAC_MMC_TXFRAME_GB_LO 0x0000081C
+#define XGMAC_MMC_TXFRAME_GB_HI 0x00000820
+#define XGMAC_MMC_TXBCFRAME_G 0x00000824
+#define XGMAC_MMC_TXMCFRAME_G 0x0000082C
+#define XGMAC_MMC_TXUCFRAME_GB 0x00000864
+#define XGMAC_MMC_TXMCFRAME_GB 0x0000086C
+#define XGMAC_MMC_TXBCFRAME_GB 0x00000874
+#define XGMAC_MMC_TXUNDERFLOW 0x0000087C
+#define XGMAC_MMC_TXOCTET_G_LO 0x00000884
+#define XGMAC_MMC_TXOCTET_G_HI 0x00000888
+#define XGMAC_MMC_TXFRAME_G_LO 0x0000088C
+#define XGMAC_MMC_TXFRAME_G_HI 0x00000890
+#define XGMAC_MMC_TXPAUSEFRAME 0x00000894
+#define XGMAC_MMC_TXVLANFRAME 0x0000089C
+
+/* Hardware RX Statistics Counters */
+#define XGMAC_MMC_RXFRAME_GB_LO 0x00000900
+#define XGMAC_MMC_RXFRAME_GB_HI 0x00000904
+#define XGMAC_MMC_RXOCTET_GB_LO 0x00000908
+#define XGMAC_MMC_RXOCTET_GB_HI 0x0000090C
+#define XGMAC_MMC_RXOCTET_G_LO 0x00000910
+#define XGMAC_MMC_RXOCTET_G_HI 0x00000914
+#define XGMAC_MMC_RXBCFRAME_G 0x00000918
+#define XGMAC_MMC_RXMCFRAME_G 0x00000920
+#define XGMAC_MMC_RXCRCERR 0x00000928
+#define XGMAC_MMC_RXRUNT 0x00000930
+#define XGMAC_MMC_RXJABBER 0x00000934
+#define XGMAC_MMC_RXUCFRAME_G 0x00000970
+#define XGMAC_MMC_RXLENGTHERR 0x00000978
+#define XGMAC_MMC_RXPAUSEFRAME 0x00000988
+#define XGMAC_MMC_RXOVERFLOW 0x00000990
+#define XGMAC_MMC_RXVLANFRAME 0x00000998
+#define XGMAC_MMC_RXWATCHDOG 0x000009a0
+
+/* DMA Control and Status Registers */
+#define XGMAC_DMA_BUS_MODE 0x00000f00 /* Bus Mode */
+#define XGMAC_DMA_TX_POLL 0x00000f04 /* Transmit Poll Demand */
+#define XGMAC_DMA_RX_POLL 0x00000f08 /* Received Poll Demand */
+#define XGMAC_DMA_RX_BASE_ADDR 0x00000f0c /* Receive List Base */
+#define XGMAC_DMA_TX_BASE_ADDR 0x00000f10 /* Transmit List Base */
+#define XGMAC_DMA_STATUS 0x00000f14 /* Status Register */
+#define XGMAC_DMA_CONTROL 0x00000f18 /* Ctrl (Operational Mode) */
+#define XGMAC_DMA_INTR_ENA 0x00000f1c /* Interrupt Enable */
+#define XGMAC_DMA_MISS_FRAME_CTR 0x00000f20 /* Missed Frame Counter */
+#define XGMAC_DMA_RI_WDOG_TIMER 0x00000f24 /* RX Intr Watchdog Timer */
+#define XGMAC_DMA_AXI_BUS 0x00000f28 /* AXI Bus Mode */
+#define XGMAC_DMA_AXI_STATUS 0x00000f2C /* AXI Status */
+#define XGMAC_DMA_HW_FEATURE 0x00000f58 /* Enabled Hardware Features */
+
+#define XGMAC_ADDR_AE 0x80000000
+#define XGMAC_MAX_FILTER_ADDR 31
+
+/* PMT Control and Status */
+#define XGMAC_PMT_POINTER_RESET 0x80000000
+#define XGMAC_PMT_GLBL_UNICAST 0x00000200
+#define XGMAC_PMT_WAKEUP_RX_FRM 0x00000040
+#define XGMAC_PMT_MAGIC_PKT 0x00000020
+#define XGMAC_PMT_WAKEUP_FRM_EN 0x00000004
+#define XGMAC_PMT_MAGIC_PKT_EN 0x00000002
+#define XGMAC_PMT_POWERDOWN 0x00000001
+
+#define XGMAC_CONTROL_SPD 0x40000000 /* Speed control */
+#define XGMAC_CONTROL_SPD_MASK 0x60000000
+#define XGMAC_CONTROL_SPD_1G 0x60000000
+#define XGMAC_CONTROL_SPD_2_5G 0x40000000
+#define XGMAC_CONTROL_SPD_10G 0x00000000
+#define XGMAC_CONTROL_SARC 0x10000000 /* Source Addr Insert/Replace */
+#define XGMAC_CONTROL_SARK_MASK 0x18000000
+#define XGMAC_CONTROL_CAR 0x04000000 /* CRC Addition/Replacement */
+#define XGMAC_CONTROL_CAR_MASK 0x06000000
+#define XGMAC_CONTROL_DP 0x01000000 /* Disable Padding */
+#define XGMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on rx */
+#define XGMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define XGMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+#define XGMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define XGMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define XGMAC_CONTROL_ACS 0x00000080 /* Automatic Pad/FCS Strip */
+#define XGMAC_CONTROL_DDIC 0x00000010 /* Disable Deficit Idle Count */
+#define XGMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define XGMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+/* XGMAC Frame Filter defines */
+#define XGMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define XGMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define XGMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define XGMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define XGMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define XGMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define XGMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define XGMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define XGMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define XGMAC_FRAME_FILTER_VHF 0x00000800 /* VLAN Hash Filter */
+#define XGMAC_FRAME_FILTER_VPF 0x00001000 /* VLAN Perfect Filter */
+#define XGMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+
+/* XGMAC FLOW CTRL defines */
+#define XGMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define XGMAC_FLOW_CTRL_PT_SHIFT 16
+#define XGMAC_FLOW_CTRL_DZQP 0x00000080 /* Disable Zero-Quanta Phase */
+#define XGMAC_FLOW_CTRL_PLT 0x00000020 /* Pause Low Threshhold */
+#define XGMAC_FLOW_CTRL_PLT_MASK 0x00000030 /* PLT MASK */
+#define XGMAC_FLOW_CTRL_UP 0x00000008 /* Unicast Pause Frame Detect */
+#define XGMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/* XGMAC_INT_STAT reg */
+#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
+#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
+
+/* Programmable burst length */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_8PBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+#define DMA_CONTROL_DFF 0x01000000 /* Disable flush of rx frames */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TUE)
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_RWE | DMA_INTR_ENA_RSE | \
+ DMA_INTR_ENA_RUE | DMA_INTR_ENA_UNE | \
+ DMA_INTR_ENA_OVE | DMA_INTR_ENA_TJE | \
+ DMA_INTR_ENA_TSE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavail */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+/* Common MAC defines */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
+
+/* XGMAC Operation Mode Register */
+#define XGMAC_OMR_TSF 0x00200000 /* TX FIFO Store and Forward */
+#define XGMAC_OMR_FTF 0x00100000 /* Flush Transmit FIFO */
+#define XGMAC_OMR_TTC 0x00020000 /* Transmit Threshhold Ctrl */
+#define XGMAC_OMR_TTC_MASK 0x00030000
+#define XGMAC_OMR_RFD 0x00006000 /* FC Deactivation Threshhold */
+#define XGMAC_OMR_RFD_MASK 0x00007000 /* FC Deact Threshhold MASK */
+#define XGMAC_OMR_RFA 0x00000600 /* FC Activation Threshhold */
+#define XGMAC_OMR_RFA_MASK 0x00000E00 /* FC Act Threshhold MASK */
+#define XGMAC_OMR_EFC 0x00000100 /* Enable Hardware FC */
+#define XGMAC_OMR_FEF 0x00000080 /* Forward Error Frames */
+#define XGMAC_OMR_DT 0x00000040 /* Drop TCP/IP csum Errors */
+#define XGMAC_OMR_RSF 0x00000020 /* RX FIFO Store and Forward */
+#define XGMAC_OMR_RTC 0x00000010 /* RX Threshhold Ctrl */
+#define XGMAC_OMR_RTC_MASK 0x00000018 /* RX Threshhold Ctrl MASK */
+
+/* XGMAC HW Features Register */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* TX Checksum offload */
+
+#define XGMAC_MMC_CTRL_CNT_FRZ 0x00000008
+
+/* XGMAC Descriptor Defines */
+#define MAX_DESC_BUF_SZ (0x2000 - 8)
+
+#define RXDESC_EXT_STATUS 0x00000001
+#define RXDESC_CRC_ERR 0x00000002
+#define RXDESC_RX_ERR 0x00000008
+#define RXDESC_RX_WDOG 0x00000010
+#define RXDESC_FRAME_TYPE 0x00000020
+#define RXDESC_GIANT_FRAME 0x00000080
+#define RXDESC_LAST_SEG 0x00000100
+#define RXDESC_FIRST_SEG 0x00000200
+#define RXDESC_VLAN_FRAME 0x00000400
+#define RXDESC_OVERFLOW_ERR 0x00000800
+#define RXDESC_LENGTH_ERR 0x00001000
+#define RXDESC_SA_FILTER_FAIL 0x00002000
+#define RXDESC_DESCRIPTOR_ERR 0x00004000
+#define RXDESC_ERROR_SUMMARY 0x00008000
+#define RXDESC_FRAME_LEN_OFFSET 16
+#define RXDESC_FRAME_LEN_MASK 0x3fff0000
+#define RXDESC_DA_FILTER_FAIL 0x40000000
+
+#define RXDESC1_END_RING 0x00008000
+
+#define RXDESC_IP_PAYLOAD_MASK 0x00000003
+#define RXDESC_IP_PAYLOAD_UDP 0x00000001
+#define RXDESC_IP_PAYLOAD_TCP 0x00000002
+#define RXDESC_IP_PAYLOAD_ICMP 0x00000003
+#define RXDESC_IP_HEADER_ERR 0x00000008
+#define RXDESC_IP_PAYLOAD_ERR 0x00000010
+#define RXDESC_IPV4_PACKET 0x00000040
+#define RXDESC_IPV6_PACKET 0x00000080
+#define TXDESC_UNDERFLOW_ERR 0x00000001
+#define TXDESC_JABBER_TIMEOUT 0x00000002
+#define TXDESC_LOCAL_FAULT 0x00000004
+#define TXDESC_REMOTE_FAULT 0x00000008
+#define TXDESC_VLAN_FRAME 0x00000010
+#define TXDESC_FRAME_FLUSHED 0x00000020
+#define TXDESC_IP_HEADER_ERR 0x00000040
+#define TXDESC_PAYLOAD_CSUM_ERR 0x00000080
+#define TXDESC_ERROR_SUMMARY 0x00008000
+#define TXDESC_SA_CTRL_INSERT 0x00040000
+#define TXDESC_SA_CTRL_REPLACE 0x00080000
+#define TXDESC_2ND_ADDR_CHAINED 0x00100000
+#define TXDESC_END_RING 0x00200000
+#define TXDESC_CSUM_IP 0x00400000
+#define TXDESC_CSUM_IP_PAYLD 0x00800000
+#define TXDESC_CSUM_ALL 0x00C00000
+#define TXDESC_CRC_EN_REPLACE 0x01000000
+#define TXDESC_CRC_EN_APPEND 0x02000000
+#define TXDESC_DISABLE_PAD 0x04000000
+#define TXDESC_FIRST_SEG 0x10000000
+#define TXDESC_LAST_SEG 0x20000000
+#define TXDESC_INTERRUPT 0x40000000
+
+#define DESC_OWN 0x80000000
+#define DESC_BUFFER1_SZ_MASK 0x00001fff
+#define DESC_BUFFER2_SZ_MASK 0x1fff0000
+#define DESC_BUFFER2_SZ_OFFSET 16
+
+struct xgmac_dma_desc {
+ __le32 flags;
+ __le32 buf_size;
+ __le32 buf1_addr; /* Buffer 1 Address Pointer */
+ __le32 buf2_addr; /* Buffer 2 Address Pointer */
+ __le32 ext_status;
+ __le32 res[3];
+};
+
+struct xgmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ unsigned long tx_local_fault;
+ unsigned long tx_remote_fault;
+ /* Receive errors */
+ unsigned long rx_watchdog;
+ unsigned long rx_da_filter_fail;
+ unsigned long rx_sa_filter_fail;
+ unsigned long rx_payload_error;
+ unsigned long rx_ip_header_error;
+ /* Tx/Rx IRQ errors */
+ unsigned long tx_undeflow;
+ unsigned long tx_process_stopped;
+ unsigned long rx_buf_unav;
+ unsigned long rx_process_stopped;
+ unsigned long tx_early;
+ unsigned long fatal_bus_error;
+};
+
+struct xgmac_priv {
+ struct xgmac_dma_desc *dma_rx;
+ struct sk_buff **rx_skbuff;
+ unsigned int rx_tail;
+ unsigned int rx_head;
+
+ struct xgmac_dma_desc *dma_tx;
+ struct sk_buff **tx_skbuff;
+ unsigned int tx_head;
+ unsigned int tx_tail;
+
+ void __iomem *base;
+ struct sk_buff_head rx_recycle;
+ unsigned int dma_buf_sz;
+ dma_addr_t dma_rx_phy;
+ dma_addr_t dma_tx_phy;
+
+ struct net_device *dev;
+ struct device *device;
+ struct napi_struct napi;
+
+ struct xgmac_extra_stats xstats;
+
+ spinlock_t stats_lock;
+ int pmt_irq;
+ char rx_pause;
+ char tx_pause;
+ int wolopts;
+};
+
+/* XGMAC Configuration Settings */
+#define MAX_MTU 9000
+#define PAUSE_TIME 0x400
+
+#define DMA_RX_RING_SZ 256
+#define DMA_TX_RING_SZ 128
+/* minimum number of free TX descriptors required to wake up TX process */
+#define TX_THRESH (DMA_TX_RING_SZ/4)
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_ring_space(h, t, s) CIRC_SPACE(h, t, s)
+#define dma_ring_cnt(h, t, s) CIRC_CNT(h, t, s)
+
+/* XGMAC Descriptor Access Helpers */
+static inline void desc_set_buf_len(struct xgmac_dma_desc *p, u32 buf_sz)
+{
+ if (buf_sz > MAX_DESC_BUF_SZ)
+ p->buf_size = cpu_to_le32(MAX_DESC_BUF_SZ |
+ (buf_sz - MAX_DESC_BUF_SZ) << DESC_BUFFER2_SZ_OFFSET);
+ else
+ p->buf_size = cpu_to_le32(buf_sz);
+}
+
+static inline int desc_get_buf_len(struct xgmac_dma_desc *p)
+{
+ u32 len = cpu_to_le32(p->flags);
+ return (len & DESC_BUFFER1_SZ_MASK) +
+ ((len & DESC_BUFFER2_SZ_MASK) >> DESC_BUFFER2_SZ_OFFSET);
+}
+
+static inline void desc_init_rx_desc(struct xgmac_dma_desc *p, int ring_size,
+ int buf_sz)
+{
+ struct xgmac_dma_desc *end = p + ring_size - 1;
+
+ memset(p, 0, sizeof(*p) * ring_size);
+
+ for (; p <= end; p++)
+ desc_set_buf_len(p, buf_sz);
+
+ end->buf_size |= cpu_to_le32(RXDESC1_END_RING);
+}
+
+static inline void desc_init_tx_desc(struct xgmac_dma_desc *p, u32 ring_size)
+{
+ memset(p, 0, sizeof(*p) * ring_size);
+ p[ring_size - 1].flags = cpu_to_le32(TXDESC_END_RING);
+}
+
+static inline int desc_get_owner(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & DESC_OWN;
+}
+
+static inline void desc_set_rx_owner(struct xgmac_dma_desc *p)
+{
+ /* Clear all fields and set the owner */
+ p->flags = cpu_to_le32(DESC_OWN);
+}
+
+static inline void desc_set_tx_owner(struct xgmac_dma_desc *p, u32 flags)
+{
+ u32 tmpflags = le32_to_cpu(p->flags);
+ tmpflags &= TXDESC_END_RING;
+ tmpflags |= flags | DESC_OWN;
+ p->flags = cpu_to_le32(tmpflags);
+}
+
+static inline int desc_get_tx_ls(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->flags) & TXDESC_LAST_SEG;
+}
+
+static inline u32 desc_get_buf_addr(struct xgmac_dma_desc *p)
+{
+ return le32_to_cpu(p->buf1_addr);
+}
+
+static inline void desc_set_buf_addr(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ p->buf1_addr = cpu_to_le32(paddr);
+ if (len > MAX_DESC_BUF_SZ)
+ p->buf2_addr = cpu_to_le32(paddr + MAX_DESC_BUF_SZ);
+}
+
+static inline void desc_set_buf_addr_and_size(struct xgmac_dma_desc *p,
+ u32 paddr, int len)
+{
+ desc_set_buf_len(p, len);
+ desc_set_buf_addr(p, paddr, len);
+}
+
+static inline int desc_get_rx_frame_len(struct xgmac_dma_desc *p)
+{
+ u32 data = le32_to_cpu(p->flags);
+ u32 len = (data & RXDESC_FRAME_LEN_MASK) >> RXDESC_FRAME_LEN_OFFSET;
+ if (data & RXDESC_FRAME_TYPE)
+ len -= ETH_FCS_LEN;
+
+ return len;
+}
+
+static void xgmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+ int timeout = 1000;
+ u32 reg = readl(ioaddr + XGMAC_OMR);
+ writel(reg | XGMAC_OMR_FTF, ioaddr + XGMAC_OMR);
+
+ while ((timeout-- > 0) && readl(ioaddr + XGMAC_OMR) & XGMAC_OMR_FTF)
+ udelay(1);
+}
+
+static int desc_get_tx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ u32 status = le32_to_cpu(p->flags);
+
+ if (!(status & TXDESC_ERROR_SUMMARY))
+ return 0;
+
+ netdev_dbg(priv->dev, "tx desc error = 0x%08x\n", status);
+ if (status & TXDESC_JABBER_TIMEOUT)
+ x->tx_jabber++;
+ if (status & TXDESC_FRAME_FLUSHED)
+ x->tx_frame_flushed++;
+ if (status & TXDESC_UNDERFLOW_ERR)
+ xgmac_dma_flush_tx_fifo(priv->base);
+ if (status & TXDESC_IP_HEADER_ERR)
+ x->tx_ip_header_error++;
+ if (status & TXDESC_LOCAL_FAULT)
+ x->tx_local_fault++;
+ if (status & TXDESC_REMOTE_FAULT)
+ x->tx_remote_fault++;
+ if (status & TXDESC_PAYLOAD_CSUM_ERR)
+ x->tx_payload_error++;
+
+ return -1;
+}
+
+static int desc_get_rx_status(struct xgmac_priv *priv, struct xgmac_dma_desc *p)
+{
+ struct xgmac_extra_stats *x = &priv->xstats;
+ int ret = CHECKSUM_UNNECESSARY;
+ u32 status = le32_to_cpu(p->flags);
+ u32 ext_status = le32_to_cpu(p->ext_status);
+
+ if (status & RXDESC_DA_FILTER_FAIL) {
+ netdev_dbg(priv->dev, "XGMAC RX : Dest Address filter fail\n");
+ x->rx_da_filter_fail++;
+ return -1;
+ }
+
+ /* Check if packet has checksum already */
+ if ((status & RXDESC_FRAME_TYPE) && (status & RXDESC_EXT_STATUS) &&
+ !(ext_status & RXDESC_IP_PAYLOAD_MASK))
+ ret = CHECKSUM_NONE;
+
+ netdev_dbg(priv->dev, "rx status - frame type=%d, csum = %d, ext stat %08x\n",
+ (status & RXDESC_FRAME_TYPE) ? 1 : 0, ret, ext_status);
+
+ if (!(status & RXDESC_ERROR_SUMMARY))
+ return ret;
+
+ /* Handle any errors */
+ if (status & (RXDESC_DESCRIPTOR_ERR | RXDESC_OVERFLOW_ERR |
+ RXDESC_GIANT_FRAME | RXDESC_LENGTH_ERR | RXDESC_CRC_ERR))
+ return -1;
+
+ if (status & RXDESC_EXT_STATUS) {
+ if (ext_status & RXDESC_IP_HEADER_ERR)
+ x->rx_ip_header_error++;
+ if (ext_status & RXDESC_IP_PAYLOAD_ERR)
+ x->rx_payload_error++;
+ netdev_dbg(priv->dev, "IP checksum error - stat %08x\n",
+ ext_status);
+ return CHECKSUM_NONE;
+ }
+
+ return ret;
+}
+
+static inline void xgmac_mac_enable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_CONTROL);
+ value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+ writel(value, ioaddr + XGMAC_CONTROL);
+
+ value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value |= DMA_CONTROL_ST | DMA_CONTROL_SR;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+}
+
+static inline void xgmac_mac_disable(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ value = readl(ioaddr + XGMAC_CONTROL);
+ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+ writel(value, ioaddr + XGMAC_CONTROL);
+}
+
+static void xgmac_set_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 data;
+
+ data = (addr[5] << 8) | addr[4] | (num ? XGMAC_ADDR_AE : 0);
+ writel(data, ioaddr + XGMAC_ADDR_HIGH(num));
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + XGMAC_ADDR_LOW(num));
+}
+
+static void xgmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ int num)
+{
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDR_HIGH(num));
+ lo_addr = readl(ioaddr + XGMAC_ADDR_LOW(num));
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static int xgmac_set_flow_ctrl(struct xgmac_priv *priv, int rx, int tx)
+{
+ u32 reg;
+ unsigned int flow = 0;
+
+ priv->rx_pause = rx;
+ priv->tx_pause = tx;
+
+ if (rx || tx) {
+ if (rx)
+ flow |= XGMAC_FLOW_CTRL_RFE;
+ if (tx)
+ flow |= XGMAC_FLOW_CTRL_TFE;
+
+ flow |= XGMAC_FLOW_CTRL_PLT | XGMAC_FLOW_CTRL_UP;
+ flow |= (PAUSE_TIME << XGMAC_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg |= XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ } else {
+ writel(0, priv->base + XGMAC_FLOW_CTRL);
+
+ reg = readl(priv->base + XGMAC_OMR);
+ reg &= ~XGMAC_OMR_EFC;
+ writel(reg, priv->base + XGMAC_OMR);
+ }
+
+ return 0;
+}
+
+static void xgmac_rx_refill(struct xgmac_priv *priv)
+{
+ struct xgmac_dma_desc *p;
+ dma_addr_t paddr;
+
+ while (dma_ring_space(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ) > 1) {
+ int entry = priv->rx_head;
+ struct sk_buff *skb;
+
+ p = priv->dma_rx + entry;
+
+ if (priv->rx_skbuff[entry] != NULL)
+ continue;
+
+ skb = __skb_dequeue(&priv->rx_recycle);
+ if (skb == NULL)
+ skb = netdev_alloc_skb(priv->dev, priv->dma_buf_sz);
+ if (unlikely(skb == NULL))
+ break;
+
+ priv->rx_skbuff[entry] = skb;
+ paddr = dma_map_single(priv->device, skb->data,
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ desc_set_buf_addr(p, paddr, priv->dma_buf_sz);
+
+ netdev_dbg(priv->dev, "rx ring: head %d, tail %d\n",
+ priv->rx_head, priv->rx_tail);
+
+ priv->rx_head = dma_ring_incr(priv->rx_head, DMA_RX_RING_SZ);
+ /* Ensure descriptor is in memory before handing to h/w */
+ wmb();
+ desc_set_rx_owner(p);
+ }
+}
+
+/**
+ * init_xgmac_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers.
+ */
+static int xgmac_dma_desc_rings_init(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int bfsize;
+
+ /* Set the Buffer size according to the MTU;
+ * indeed, in case of jumbo we need to bump-up the buffer sizes.
+ */
+ bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN + 64,
+ 64);
+
+ netdev_dbg(priv->dev, "mtu [%d] bfsize [%d]\n", dev->mtu, bfsize);
+
+ priv->rx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_RX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->rx_skbuff)
+ return -ENOMEM;
+
+ priv->dma_rx = dma_alloc_coherent(priv->device,
+ DMA_RX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_rx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_rx)
+ goto err_dma_rx;
+
+ priv->tx_skbuff = kzalloc(sizeof(struct sk_buff *) * DMA_TX_RING_SZ,
+ GFP_KERNEL);
+ if (!priv->tx_skbuff)
+ goto err_tx_skb;
+
+ priv->dma_tx = dma_alloc_coherent(priv->device,
+ DMA_TX_RING_SZ *
+ sizeof(struct xgmac_dma_desc),
+ &priv->dma_tx_phy,
+ GFP_KERNEL);
+ if (!priv->dma_tx)
+ goto err_dma_tx;
+
+ netdev_dbg(priv->dev, "DMA desc rings: virt addr (Rx %p, "
+ "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",
+ priv->dma_rx, priv->dma_tx,
+ (unsigned int)priv->dma_rx_phy, (unsigned int)priv->dma_tx_phy);
+
+ priv->rx_tail = 0;
+ priv->rx_head = 0;
+ priv->dma_buf_sz = bfsize;
+ desc_init_rx_desc(priv->dma_rx, DMA_RX_RING_SZ, priv->dma_buf_sz);
+ xgmac_rx_refill(priv);
+
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+
+ writel(priv->dma_tx_phy, priv->base + XGMAC_DMA_TX_BASE_ADDR);
+ writel(priv->dma_rx_phy, priv->base + XGMAC_DMA_RX_BASE_ADDR);
+
+ return 0;
+
+err_dma_tx:
+ kfree(priv->tx_skbuff);
+err_tx_skb:
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+err_dma_rx:
+ kfree(priv->rx_skbuff);
+ return -ENOMEM;
+}
+
+static void xgmac_free_rx_skbufs(struct xgmac_priv *priv)
+{
+ int i;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->rx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_RX_RING_SZ; i++) {
+ if (priv->rx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_rx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ priv->dma_buf_sz, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(priv->rx_skbuff[i]);
+ priv->rx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_tx_skbufs(struct xgmac_priv *priv)
+{
+ int i, f;
+ struct xgmac_dma_desc *p;
+
+ if (!priv->tx_skbuff)
+ return;
+
+ for (i = 0; i < DMA_TX_RING_SZ; i++) {
+ if (priv->tx_skbuff[i] == NULL)
+ continue;
+
+ p = priv->dma_tx + i;
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ for (f = 0; f < skb_shinfo(priv->tx_skbuff[i])->nr_frags; f++) {
+ p = priv->dma_tx + i++;
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ dev_kfree_skb_any(priv->tx_skbuff[i]);
+ priv->tx_skbuff[i] = NULL;
+ }
+}
+
+static void xgmac_free_dma_desc_rings(struct xgmac_priv *priv)
+{
+ /* Release the DMA TX/RX socket buffers */
+ xgmac_free_rx_skbufs(priv);
+ xgmac_free_tx_skbufs(priv);
+
+ /* Free the consistent memory allocated for descriptor rings */
+ if (priv->dma_tx) {
+ dma_free_coherent(priv->device,
+ DMA_TX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_tx, priv->dma_tx_phy);
+ priv->dma_tx = NULL;
+ }
+ if (priv->dma_rx) {
+ dma_free_coherent(priv->device,
+ DMA_RX_RING_SZ * sizeof(struct xgmac_dma_desc),
+ priv->dma_rx, priv->dma_rx_phy);
+ priv->dma_rx = NULL;
+ }
+ kfree(priv->rx_skbuff);
+ priv->rx_skbuff = NULL;
+ kfree(priv->tx_skbuff);
+ priv->tx_skbuff = NULL;
+}
+
+/**
+ * xgmac_tx:
+ * @priv: private driver structure
+ * Description: it reclaims resources after transmission completes.
+ */
+static void xgmac_tx_complete(struct xgmac_priv *priv)
+{
+ int i;
+ void __iomem *ioaddr = priv->base;
+
+ writel(DMA_STATUS_TU | DMA_STATUS_NIS, ioaddr + XGMAC_DMA_STATUS);
+
+ while (dma_ring_cnt(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ)) {
+ unsigned int entry = priv->tx_tail;
+ struct sk_buff *skb = priv->tx_skbuff[entry];
+ struct xgmac_dma_desc *p = priv->dma_tx + entry;
+
+ /* Check if the descriptor is owned by the DMA. */
+ if (desc_get_owner(p))
+ break;
+
+ /* Verify tx error by looking at the last segment */
+ if (desc_get_tx_ls(p))
+ desc_get_tx_status(priv, p);
+
+ netdev_dbg(priv->dev, "tx ring: curr %d, dirty %d\n",
+ priv->tx_head, priv->tx_tail);
+
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+
+ priv->tx_skbuff[entry] = NULL;
+ priv->tx_tail = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ if (!skb) {
+ continue;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ entry = priv->tx_tail = dma_ring_incr(priv->tx_tail,
+ DMA_TX_RING_SZ);
+ p = priv->dma_tx + priv->tx_tail;
+
+ dma_unmap_page(priv->device, desc_get_buf_addr(p),
+ desc_get_buf_len(p), DMA_TO_DEVICE);
+ }
+
+ /*
+ * If there's room in the queue (limit it to size)
+ * we add this skb back into the pool,
+ * if it's the right size.
+ */
+ if ((skb_queue_len(&priv->rx_recycle) <
+ DMA_RX_RING_SZ) &&
+ skb_recycle_check(skb, priv->dma_buf_sz))
+ __skb_queue_head(&priv->rx_recycle, skb);
+ else
+ dev_kfree_skb(skb);
+ }
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) >
+ TX_THRESH)
+ netif_wake_queue(priv->dev);
+}
+
+/**
+ * xgmac_tx_err:
+ * @priv: pointer to the private device structure
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of errors.
+ */
+static void xgmac_tx_err(struct xgmac_priv *priv)
+{
+ u32 reg, value, inten;
+
+ netif_stop_queue(priv->dev);
+
+ inten = readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ reg = readl(priv->base + XGMAC_DMA_CONTROL);
+ writel(reg & ~DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+ do {
+ value = readl(priv->base + XGMAC_DMA_STATUS) & 0x700000;
+ } while (value && (value != 0x600000));
+
+ xgmac_free_tx_skbufs(priv);
+ desc_init_tx_desc(priv->dma_tx, DMA_TX_RING_SZ);
+ priv->tx_tail = 0;
+ priv->tx_head = 0;
+ writel(reg | DMA_CONTROL_ST, priv->base + XGMAC_DMA_CONTROL);
+
+ writel(DMA_STATUS_TU | DMA_STATUS_TPS | DMA_STATUS_NIS | DMA_STATUS_AIS,
+ priv->base + XGMAC_DMA_STATUS);
+ writel(inten, priv->base + XGMAC_DMA_INTR_ENA);
+
+ netif_wake_queue(priv->dev);
+}
+
+static int xgmac_hw_init(struct net_device *dev)
+{
+ u32 value, ctrl;
+ int limit;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Save the ctrl register value */
+ ctrl = readl(ioaddr + XGMAC_CONTROL) & XGMAC_CONTROL_SPD_MASK;
+
+ /* SW reset */
+ value = DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+ limit = 15000;
+ while (limit-- &&
+ (readl(ioaddr + XGMAC_DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
+ cpu_relax();
+ if (limit < 0)
+ return -EBUSY;
+
+ value = (0x10 << DMA_BUS_MODE_PBL_SHIFT) |
+ (0x10 << DMA_BUS_MODE_RPBL_SHIFT) |
+ DMA_BUS_MODE_FB | DMA_BUS_MODE_ATDS | DMA_BUS_MODE_AAL;
+ writel(value, ioaddr + XGMAC_DMA_BUS_MODE);
+
+ /* Enable interrupts */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ /* XGMAC requires AXI bus init. This is a 'magic number' for now */
+ writel(0x000100E, ioaddr + XGMAC_DMA_AXI_BUS);
+
+ ctrl |= XGMAC_CONTROL_DDIC | XGMAC_CONTROL_JE | XGMAC_CONTROL_ACS |
+ XGMAC_CONTROL_CAR;
+ if (dev->features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ value = DMA_CONTROL_DFF;
+ writel(value, ioaddr + XGMAC_DMA_CONTROL);
+
+ /* Set the HW DMA mode and the COE */
+ writel(XGMAC_OMR_TSF | XGMAC_OMR_RSF | XGMAC_OMR_RFD | XGMAC_OMR_RFA,
+ ioaddr + XGMAC_OMR);
+
+ /* Reset the MMC counters */
+ writel(1, ioaddr + XGMAC_MMC_CTRL);
+ return 0;
+}
+
+/**
+ * xgmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_open(struct net_device *dev)
+{
+ int ret;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ /* Check that the MAC address is valid. If its not, refuse
+ * to bring the device up. The user must specify an
+ * address using the following linux command:
+ * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
+ if (!is_valid_ether_addr(dev->dev_addr)) {
+ random_ether_addr(dev->dev_addr);
+ netdev_dbg(priv->dev, "generated random MAC address %pM\n",
+ dev->dev_addr);
+ }
+
+ skb_queue_head_init(&priv->rx_recycle);
+ memset(&priv->xstats, 0, sizeof(struct xgmac_extra_stats));
+
+ /* Initialize the XGMAC and descriptors */
+ xgmac_hw_init(dev);
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+ xgmac_set_flow_ctrl(priv, priv->rx_pause, priv->tx_pause);
+
+ ret = xgmac_dma_desc_rings_init(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Enable the MAC Rx/Tx */
+ xgmac_mac_enable(ioaddr);
+
+ napi_enable(&priv->napi);
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/**
+ * xgmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int xgmac_stop(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ netif_stop_queue(dev);
+
+ if (readl(priv->base + XGMAC_DMA_INTR_ENA))
+ napi_disable(&priv->napi);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ skb_queue_purge(&priv->rx_recycle);
+
+ /* Disable the MAC core */
+ xgmac_mac_disable(priv->base);
+
+ /* Release and free the Rx/Tx resources */
+ xgmac_free_dma_desc_rings(priv);
+
+ return 0;
+}
+
+/**
+ * xgmac_xmit:
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : Tx entry point of the driver.
+ */
+static netdev_tx_t xgmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ unsigned int entry;
+ int i;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ struct xgmac_dma_desc *desc, *first;
+ unsigned int desc_flags;
+ unsigned int len;
+ dma_addr_t paddr;
+
+ if (dma_ring_space(priv->tx_head, priv->tx_tail, DMA_TX_RING_SZ) <
+ (nfrags + 1)) {
+ writel(DMA_INTR_DEFAULT_MASK | DMA_INTR_ENA_TIE,
+ priv->base + XGMAC_DMA_INTR_ENA);
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc_flags = (skb->ip_summed == CHECKSUM_PARTIAL) ?
+ TXDESC_CSUM_ALL : 0;
+ entry = priv->tx_head;
+ desc = priv->dma_tx + entry;
+ first = desc;
+
+ len = skb_headlen(skb);
+ paddr = dma_map_single(priv->device, skb->data, len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+ priv->tx_skbuff[entry] = skb;
+ desc_set_buf_addr_and_size(desc, paddr, len);
+
+ for (i = 0; i < nfrags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ len = frag->size;
+
+ paddr = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, paddr)) {
+ dev_kfree_skb(skb);
+ return -EIO;
+ }
+
+ entry = dma_ring_incr(entry, DMA_TX_RING_SZ);
+ desc = priv->dma_tx + entry;
+ priv->tx_skbuff[entry] = NULL;
+
+ desc_set_buf_addr_and_size(desc, paddr, len);
+ if (i < (nfrags - 1))
+ desc_set_tx_owner(desc, desc_flags);
+ }
+
+ /* Interrupt on completition only for the latest segment */
+ if (desc != first)
+ desc_set_tx_owner(desc, desc_flags |
+ TXDESC_LAST_SEG | TXDESC_INTERRUPT);
+ else
+ desc_flags |= TXDESC_LAST_SEG | TXDESC_INTERRUPT;
+
+ /* Set owner on first desc last to avoid race condition */
+ wmb();
+ desc_set_tx_owner(first, desc_flags | TXDESC_FIRST_SEG);
+
+ priv->tx_head = dma_ring_incr(entry, DMA_TX_RING_SZ);
+
+ writel(1, priv->base + XGMAC_DMA_TX_POLL);
+
+ return NETDEV_TX_OK;
+}
+
+static int xgmac_rx(struct xgmac_priv *priv, int limit)
+{
+ unsigned int entry;
+ unsigned int count = 0;
+ struct xgmac_dma_desc *p;
+
+ while (count < limit) {
+ int ip_checksum;
+ struct sk_buff *skb;
+ int frame_len;
+
+ writel(DMA_STATUS_RI | DMA_STATUS_NIS,
+ priv->base + XGMAC_DMA_STATUS);
+
+ entry = priv->rx_tail;
+ p = priv->dma_rx + entry;
+ if (desc_get_owner(p))
+ break;
+
+ count++;
+ priv->rx_tail = dma_ring_incr(priv->rx_tail, DMA_RX_RING_SZ);
+
+ /* read the status of the incoming frame */
+ ip_checksum = desc_get_rx_status(priv, p);
+ if (ip_checksum < 0)
+ continue;
+
+ skb = priv->rx_skbuff[entry];
+ if (unlikely(!skb)) {
+ netdev_err(priv->dev, "Inconsistent Rx descriptor chain\n");
+ break;
+ }
+ priv->rx_skbuff[entry] = NULL;
+
+ frame_len = desc_get_rx_frame_len(p);
+ netdev_dbg(priv->dev, "RX frame size %d, COE status: %d\n",
+ frame_len, ip_checksum);
+
+ skb_put(skb, frame_len);
+ dma_unmap_single(priv->device, desc_get_buf_addr(p),
+ frame_len, DMA_FROM_DEVICE);
+
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ skb->ip_summed = ip_checksum;
+ if (ip_checksum == CHECKSUM_NONE)
+ netif_receive_skb(skb);
+ else
+ napi_gro_receive(&priv->napi, skb);
+ }
+
+ xgmac_rx_refill(priv);
+
+ writel(1, priv->base + XGMAC_DMA_RX_POLL);
+
+ return count;
+}
+
+/**
+ * xgmac_poll - xgmac poll method (NAPI)
+ * @napi : pointer to the napi structure.
+ * @budget : maximum number of packets that the current CPU can receive from
+ * all interfaces.
+ * Description :
+ * This function implements the the reception process.
+ * Also it runs the TX completion thread
+ */
+static int xgmac_poll(struct napi_struct *napi, int budget)
+{
+ struct xgmac_priv *priv = container_of(napi,
+ struct xgmac_priv, napi);
+ int work_done = 0;
+
+ xgmac_tx_complete(priv);
+ work_done = xgmac_rx(priv, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
+ }
+ return work_done;
+}
+
+/**
+ * xgmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable tmrate. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void xgmac_tx_timeout(struct net_device *dev)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ /* Clear Tx resources and restart transmitting again */
+ xgmac_tx_err(priv);
+}
+
+/**
+ * xgmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void xgmac_set_rx_mode(struct net_device *dev)
+{
+ int i;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ unsigned int value = 0;
+ u32 hash_filter[XGMAC_NUM_HASH];
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+ bool use_hash = false;
+
+ netdev_dbg(priv->dev, "# mcasts %d, # unicast %d\n",
+ netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC) {
+ writel(XGMAC_FRAME_FILTER_PR, ioaddr + XGMAC_FRAME_FILTER);
+ return;
+ }
+
+ memset(hash_filter, 0, sizeof(hash_filter));
+
+ if (netdev_uc_count(dev) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HUC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_uc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+ if (dev->flags & IFF_ALLMULTI) {
+ value |= XGMAC_FRAME_FILTER_PM;
+ goto out;
+ }
+
+ if ((netdev_mc_count(dev) + reg - 1) > XGMAC_MAX_FILTER_ADDR) {
+ use_hash = true;
+ value |= XGMAC_FRAME_FILTER_HMC | XGMAC_FRAME_FILTER_HPF;
+ }
+ netdev_for_each_mc_addr(ha, dev) {
+ if (use_hash) {
+ u32 bit_nr = ~ether_crc(ETH_ALEN, ha->addr) >> 23;
+
+ /* The most significant 4 bits determine the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ hash_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ } else {
+ xgmac_set_mac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+out:
+ for (i = 0; i < XGMAC_NUM_HASH; i++)
+ writel(hash_filter[i], ioaddr + XGMAC_HASH(i));
+
+ writel(value, ioaddr + XGMAC_FRAME_FILTER);
+}
+
+/**
+ * xgmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int xgmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ int old_mtu;
+
+ if ((new_mtu < 46) || (new_mtu > MAX_MTU)) {
+ netdev_err(priv->dev, "invalid MTU, max MTU is: %d\n", MAX_MTU);
+ return -EINVAL;
+ }
+
+ old_mtu = dev->mtu;
+ dev->mtu = new_mtu;
+
+ /* return early if the buffer sizes will not change */
+ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+ return 0;
+ if (old_mtu == new_mtu)
+ return 0;
+
+ /* Stop everything, get ready to change the MTU */
+ if (!netif_running(dev))
+ return 0;
+
+ /* Bring the interface down and then back up */
+ xgmac_stop(dev);
+ return xgmac_open(dev);
+}
+
+static irqreturn_t xgmac_pmt_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+
+ intr_status = readl(ioaddr + XGMAC_INT_STAT);
+ if (intr_status & XGMAC_INT_STAT_PMT) {
+ netdev_dbg(priv->dev, "received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT */
+ readl(ioaddr + XGMAC_PMT);
+ }
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t xgmac_interrupt(int irq, void *dev_id)
+{
+ u32 intr_status;
+ bool tx_err = false;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ struct xgmac_extra_stats *x = &priv->xstats;
+
+ /* read the status register (CSR5) */
+ intr_status = readl(priv->base + XGMAC_DMA_STATUS);
+ intr_status &= readl(priv->base + XGMAC_DMA_INTR_ENA);
+ writel(intr_status, priv->base + XGMAC_DMA_STATUS);
+
+ /* It displays the DMA process states (CSR5 register) */
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ if (intr_status & DMA_STATUS_TJT) {
+ netdev_err(priv->dev, "transmit jabber\n");
+ x->tx_jabber++;
+ }
+ if (intr_status & DMA_STATUS_RU)
+ x->rx_buf_unav++;
+ if (intr_status & DMA_STATUS_RPS) {
+ netdev_err(priv->dev, "receive process stopped\n");
+ x->rx_process_stopped++;
+ }
+ if (intr_status & DMA_STATUS_ETI) {
+ netdev_err(priv->dev, "transmit early interrupt\n");
+ x->tx_early++;
+ }
+ if (intr_status & DMA_STATUS_TPS) {
+ netdev_err(priv->dev, "transmit process stopped\n");
+ x->tx_process_stopped++;
+ tx_err = true;
+ }
+ if (intr_status & DMA_STATUS_FBI) {
+ netdev_err(priv->dev, "fatal bus error\n");
+ x->fatal_bus_error++;
+ tx_err = true;
+ }
+
+ if (tx_err)
+ xgmac_tx_err(priv);
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & (DMA_STATUS_RI | DMA_STATUS_TU)) {
+ writel(DMA_INTR_ABNORMAL, priv->base + XGMAC_DMA_INTR_ENA);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled. */
+static void xgmac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ xgmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static struct rtnl_link_stats64 *
+xgmac_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *base = priv->base;
+ u32 count;
+
+ spin_lock_bh(&priv->stats_lock);
+ writel(XGMAC_MMC_CTRL_CNT_FRZ, base + XGMAC_MMC_CTRL);
+
+ storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO);
+ storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32;
+
+ storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO);
+ storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G);
+ storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR);
+ storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR);
+ storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW);
+
+ storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO);
+ storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32;
+
+ count = readl(base + XGMAC_MMC_TXFRAME_GB_LO);
+ storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO);
+ storage->tx_packets = count;
+ storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW);
+
+ writel(0, base + XGMAC_MMC_CTRL);
+ spin_unlock_bh(&priv->stats_lock);
+ return storage;
+}
+
+static int xgmac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ xgmac_set_mac_addr(ioaddr, dev->dev_addr, 0);
+
+ return 0;
+}
+
+static int xgmac_set_features(struct net_device *dev, netdev_features_t features)
+{
+ u32 ctrl;
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = priv->base;
+ u32 changed = dev->features ^ features;
+
+ if (!(changed & NETIF_F_RXCSUM))
+ return 0;
+
+ ctrl = readl(ioaddr + XGMAC_CONTROL);
+ if (features & NETIF_F_RXCSUM)
+ ctrl |= XGMAC_CONTROL_IPC;
+ else
+ ctrl &= ~XGMAC_CONTROL_IPC;
+ writel(ctrl, ioaddr + XGMAC_CONTROL);
+
+ return 0;
+}
+
+static const struct net_device_ops xgmac_netdev_ops = {
+ .ndo_open = xgmac_open,
+ .ndo_start_xmit = xgmac_xmit,
+ .ndo_stop = xgmac_stop,
+ .ndo_change_mtu = xgmac_change_mtu,
+ .ndo_set_rx_mode = xgmac_set_rx_mode,
+ .ndo_tx_timeout = xgmac_tx_timeout,
+ .ndo_get_stats64 = xgmac_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgmac_poll_controller,
+#endif
+ .ndo_set_mac_address = xgmac_set_mac_address,
+ .ndo_set_features = xgmac_set_features,
+};
+
+static int xgmac_ethtool_getsettings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ cmd->autoneg = 0;
+ cmd->duplex = DUPLEX_FULL;
+ ethtool_cmd_speed_set(cmd, 10000);
+ cmd->supported = 0;
+ cmd->advertising = 0;
+ cmd->transceiver = XCVR_INTERNAL;
+ return 0;
+}
+
+static void xgmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ pause->rx_pause = priv->rx_pause;
+ pause->tx_pause = priv->tx_pause;
+}
+
+static int xgmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgmac_priv *priv = netdev_priv(netdev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ return xgmac_set_flow_ctrl(priv, pause->rx_pause, pause->tx_pause);
+}
+
+struct xgmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_offset;
+ bool is_reg;
+};
+
+#define XGMAC_STAT(m) \
+ { #m, offsetof(struct xgmac_priv, xstats.m), false }
+#define XGMAC_HW_STAT(m, reg_offset) \
+ { #m, reg_offset, true }
+
+static const struct xgmac_stats xgmac_gstrings_stats[] = {
+ XGMAC_STAT(tx_frame_flushed),
+ XGMAC_STAT(tx_payload_error),
+ XGMAC_STAT(tx_ip_header_error),
+ XGMAC_STAT(tx_local_fault),
+ XGMAC_STAT(tx_remote_fault),
+ XGMAC_STAT(tx_early),
+ XGMAC_STAT(tx_process_stopped),
+ XGMAC_STAT(tx_jabber),
+ XGMAC_STAT(rx_buf_unav),
+ XGMAC_STAT(rx_process_stopped),
+ XGMAC_STAT(rx_payload_error),
+ XGMAC_STAT(rx_ip_header_error),
+ XGMAC_STAT(rx_da_filter_fail),
+ XGMAC_STAT(rx_sa_filter_fail),
+ XGMAC_STAT(fatal_bus_error),
+ XGMAC_HW_STAT(rx_watchdog, XGMAC_MMC_RXWATCHDOG),
+ XGMAC_HW_STAT(tx_vlan, XGMAC_MMC_TXVLANFRAME),
+ XGMAC_HW_STAT(rx_vlan, XGMAC_MMC_RXVLANFRAME),
+ XGMAC_HW_STAT(tx_pause, XGMAC_MMC_TXPAUSEFRAME),
+ XGMAC_HW_STAT(rx_pause, XGMAC_MMC_RXPAUSEFRAME),
+};
+#define XGMAC_STATS_LEN ARRAY_SIZE(xgmac_gstrings_stats)
+
+static void xgmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy,
+ u64 *data)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ void *p = priv;
+ int i;
+
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ if (xgmac_gstrings_stats[i].is_reg)
+ *data++ = readl(priv->base +
+ xgmac_gstrings_stats[i].stat_offset);
+ else
+ *data++ = *(u32 *)(p +
+ xgmac_gstrings_stats[i].stat_offset);
+ }
+}
+
+static int xgmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return XGMAC_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void xgmac_get_strings(struct net_device *dev, u32 stringset,
+ u8 *data)
+{
+ int i;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGMAC_STATS_LEN; i++) {
+ memcpy(p, xgmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static void xgmac_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ wol->wolopts = priv->wolopts;
+ }
+}
+
+static int xgmac_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct xgmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(priv->device))
+ return -ENOTSUPP;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ priv->wolopts = wol->wolopts;
+
+ if (wol->wolopts) {
+ device_set_wakeup_enable(priv->device, 1);
+ enable_irq_wake(dev->irq);
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ disable_irq_wake(dev->irq);
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops xgmac_ethtool_ops = {
+ .get_settings = xgmac_ethtool_getsettings,
+ .get_link = ethtool_op_get_link,
+ .get_pauseparam = xgmac_get_pauseparam,
+ .set_pauseparam = xgmac_set_pauseparam,
+ .get_ethtool_stats = xgmac_get_ethtool_stats,
+ .get_strings = xgmac_get_strings,
+ .get_wol = xgmac_get_wol,
+ .set_wol = xgmac_set_wol,
+ .get_sset_count = xgmac_get_sset_count,
+};
+
+/**
+ * xgmac_probe
+ * @pdev: platform device pointer
+ * Description: the driver is initialized through platform_device.
+ */
+static int xgmac_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ struct net_device *ndev = NULL;
+ struct xgmac_priv *priv = NULL;
+ u32 uid;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name))
+ return -EBUSY;
+
+ ndev = alloc_etherdev(sizeof(struct xgmac_priv));
+ if (!ndev) {
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ priv = netdev_priv(ndev);
+ platform_set_drvdata(pdev, ndev);
+ ether_setup(ndev);
+ ndev->netdev_ops = &xgmac_netdev_ops;
+ SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+ spin_lock_init(&priv->stats_lock);
+
+ priv->device = &pdev->dev;
+ priv->dev = ndev;
+ priv->rx_pause = 1;
+ priv->tx_pause = 1;
+
+ priv->base = ioremap(res->start, resource_size(res));
+ if (!priv->base) {
+ netdev_err(ndev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+
+ uid = readl(priv->base + XGMAC_VERSION);
+ netdev_info(ndev, "h/w version is 0x%x\n", uid);
+
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq == -ENXIO) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = ndev->irq;
+ goto err_irq;
+ }
+
+ ret = request_irq(ndev->irq, xgmac_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ ndev->irq, ret);
+ goto err_irq;
+ }
+
+ priv->pmt_irq = platform_get_irq(pdev, 1);
+ if (priv->pmt_irq == -ENXIO) {
+ netdev_err(ndev, "No pmt irq resource\n");
+ ret = priv->pmt_irq;
+ goto err_pmt_irq;
+ }
+
+ ret = request_irq(priv->pmt_irq, xgmac_pmt_interrupt, 0,
+ dev_name(&pdev->dev), ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "Could not request irq %d - ret %d)\n",
+ priv->pmt_irq, ret);
+ goto err_pmt_irq;
+ }
+
+ device_set_wakeup_capable(&pdev->dev, 1);
+ if (device_can_wakeup(priv->device))
+ priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
+ if (readl(priv->base + XGMAC_DMA_HW_FEATURE) & DMA_HW_FEAT_TXCOESEL)
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* Get the MAC address */
+ xgmac_get_mac_addr(priv->base, ndev->dev_addr, 0);
+ if (!is_valid_ether_addr(ndev->dev_addr))
+ netdev_warn(ndev, "MAC address %pM not valid",
+ ndev->dev_addr);
+
+ netif_napi_add(ndev, &priv->napi, xgmac_poll, 64);
+ ret = register_netdev(ndev);
+ if (ret)
+ goto err_reg;
+
+ return 0;
+
+err_reg:
+ netif_napi_del(&priv->napi);
+ free_irq(priv->pmt_irq, ndev);
+err_pmt_irq:
+ free_irq(ndev->irq, ndev);
+err_irq:
+ iounmap(priv->base);
+err_io:
+ free_netdev(ndev);
+err_alloc:
+ release_mem_region(res->start, resource_size(res));
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+/**
+ * xgmac_dvr_remove
+ * @pdev: platform device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings,
+ * unregisters the MDIO bus and unmaps the allocated memory.
+ */
+static int xgmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+
+ xgmac_mac_disable(priv->base);
+
+ /* Free the IRQ lines */
+ free_irq(ndev->irq, ndev);
+ free_irq(priv->pmt_irq, ndev);
+
+ platform_set_drvdata(pdev, NULL);
+ unregister_netdev(ndev);
+ netif_napi_del(&priv->napi);
+
+ iounmap(priv->base);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
+ if (mode & WAKE_UCAST)
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
+
+ writel(pmt, ioaddr + XGMAC_PMT);
+}
+
+static int xgmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ u32 value;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ netif_device_detach(ndev);
+ napi_disable(&priv->napi);
+ writel(0, priv->base + XGMAC_DMA_INTR_ENA);
+
+ if (device_may_wakeup(priv->device)) {
+ /* Stop TX/RX DMA Only */
+ value = readl(priv->base + XGMAC_DMA_CONTROL);
+ value &= ~(DMA_CONTROL_ST | DMA_CONTROL_SR);
+ writel(value, priv->base + XGMAC_DMA_CONTROL);
+
+ xgmac_pmt(priv->base, priv->wolopts);
+ } else
+ xgmac_mac_disable(priv->base);
+
+ return 0;
+}
+
+static int xgmac_resume(struct device *dev)
+{
+ struct net_device *ndev = platform_get_drvdata(to_platform_device(dev));
+ struct xgmac_priv *priv = netdev_priv(ndev);
+ void __iomem *ioaddr = priv->base;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ xgmac_pmt(ioaddr, 0);
+
+ /* Enable the MAC and DMA */
+ xgmac_mac_enable(ioaddr);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
+
+ netif_device_attach(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(xgmac_pm_ops, xgmac_suspend, xgmac_resume);
+#define XGMAC_PM_OPS (&xgmac_pm_ops)
+#else
+#define XGMAC_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct of_device_id xgmac_of_match[] = {
+ { .compatible = "calxeda,hb-xgmac", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, xgmac_of_match);
+
+static struct platform_driver xgmac_driver = {
+ .driver = {
+ .name = "calxedaxgmac",
+ .of_match_table = xgmac_of_match,
+ },
+ .probe = xgmac_probe,
+ .remove = xgmac_remove,
+ .driver.pm = XGMAC_PM_OPS,
+};
+
+module_platform_driver(xgmac_driver);
+
+MODULE_AUTHOR("Calxeda, Inc.");
+MODULE_DESCRIPTION("Calxeda 10G XGMAC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index ca26d97..1d17c92 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -434,10 +434,10 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = dev->ml_priv;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
}
static int get_sset_count(struct net_device *dev, int sset)
@@ -849,7 +849,8 @@ static int t1_set_mac_addr(struct net_device *dev, void *p)
return 0;
}
-static u32 t1_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t t1_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -863,9 +864,9 @@ static u32 t1_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int t1_set_features(struct net_device *dev, u32 features)
+static int t1_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
struct adapter *adapter = dev->ml_priv;
if (changed & NETIF_F_HW_VLAN_RX)
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index f9b6023..47a8435 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -742,7 +742,7 @@ static inline void setup_ring_params(struct adapter *adapter, u64 addr,
/*
* Enable/disable VLAN acceleration.
*/
-void t1_vlan_mode(struct adapter *adapter, u32 features)
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features)
{
struct sge *sge = adapter->sge;
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.h b/drivers/net/ethernet/chelsio/cxgb/sge.h
index e03980b..b9bf16b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.h
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.h
@@ -79,7 +79,7 @@ irqreturn_t t1_interrupt(int irq, void *cookie);
int t1_poll(struct napi_struct *, int);
netdev_tx_t t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
-void t1_vlan_mode(struct adapter *adapter, u32 features);
+void t1_vlan_mode(struct adapter *adapter, netdev_features_t features);
void t1_sge_start(struct sge *);
void t1_sge_stop(struct sge *);
int t1_sge_intr_error_handler(struct sge *);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 4d15c8f..857cc25 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1576,12 +1576,11 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
t3_get_tp_version(adapter, &tp_vers);
spin_unlock(&adapter->stats_lock);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
- if (!fw_vers)
- strcpy(info->fw_version, "N/A");
- else {
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
+ if (fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%s %u.%u.%u TP %u.%u.%u",
G_FW_VERSION_TYPE(fw_vers) ? "T" : "N",
@@ -1591,7 +1590,6 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
G_TP_VERSION_MAJOR(tp_vers),
G_TP_VERSION_MINOR(tp_vers),
G_TP_VERSION_MICRO(tp_vers));
- }
}
static void get_strings(struct net_device *dev, u32 stringset, u8 * data)
@@ -2531,7 +2529,7 @@ static void t3_synchronize_rx(struct adapter *adap, const struct port_info *p)
}
}
-static void cxgb_vlan_mode(struct net_device *dev, u32 features)
+static void cxgb_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
@@ -2552,7 +2550,8 @@ static void cxgb_vlan_mode(struct net_device *dev, u32 features)
t3_synchronize_rx(adapter, pi);
}
-static u32 cxgb_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2566,9 +2565,9 @@ static u32 cxgb_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
cxgb_vlan_mode(dev, features);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index 90ff131..65e4b28 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -969,7 +969,7 @@ static int nb_callback(struct notifier_block *self, unsigned long event,
case (NETEVENT_REDIRECT):{
struct netevent_redirect *nr = ctx;
cxgb_redirect(nr->old, nr->new);
- cxgb_neigh_update(dst_get_neighbour(nr->new));
+ cxgb_neigh_update(dst_get_neighbour_noref(nr->new));
break;
}
default:
@@ -1072,8 +1072,11 @@ static int is_offloading(struct net_device *dev)
static void cxgb_neigh_update(struct neighbour *neigh)
{
- struct net_device *dev = neigh->dev;
+ struct net_device *dev;
+ if (!neigh)
+ return;
+ dev = neigh->dev;
if (dev && (is_offloading(dev))) {
struct t3cdev *tdev = dev2t3cdev(dev);
@@ -1107,6 +1110,7 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
{
struct net_device *olddev, *newdev;
+ struct neighbour *n;
struct tid_info *ti;
struct t3cdev *tdev;
u32 tid;
@@ -1114,8 +1118,16 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
struct l2t_entry *e;
struct t3c_tid_entry *te;
- olddev = dst_get_neighbour(old)->dev;
- newdev = dst_get_neighbour(new)->dev;
+ n = dst_get_neighbour_noref(old);
+ if (!n)
+ return;
+ olddev = n->dev;
+
+ n = dst_get_neighbour_noref(new);
+ if (!n)
+ return;
+ newdev = n->dev;
+
if (!is_offloading(olddev))
return;
if (!is_offloading(newdev)) {
@@ -1132,7 +1144,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
}
/* Add new L2T entry */
- e = t3_l2t_get(tdev, dst_get_neighbour(new), newdev);
+ e = t3_l2t_get(tdev, new, newdev);
if (!e) {
printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n",
__func__);
@@ -1301,7 +1313,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
out_free_l2t:
t3_free_l2t(L2DATA(dev));
- rcu_assign_pointer(dev->l2opt, NULL);
+ RCU_INIT_POINTER(dev->l2opt, NULL);
out_free:
kfree(t);
return err;
@@ -1329,7 +1341,7 @@ void cxgb3_offload_deactivate(struct adapter *adapter)
rcu_read_lock();
d = L2DATA(tdev);
rcu_read_unlock();
- rcu_assign_pointer(tdev->l2opt, NULL);
+ RCU_INIT_POINTER(tdev->l2opt, NULL);
call_rcu(&d->rcu_head, clean_l2_data);
if (t->nofail_skb)
kfree_skb(t->nofail_skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
index 70fec8b..3fa3c88 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c
@@ -298,18 +298,31 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
spin_unlock(&e->lock);
}
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev)
{
struct l2t_entry *e = NULL;
+ struct neighbour *neigh;
+ struct port_info *p;
struct l2t_data *d;
int hash;
- u32 addr = *(u32 *) neigh->primary_key;
- int ifidx = neigh->dev->ifindex;
- struct port_info *p = netdev_priv(dev);
- int smt_idx = p->port_id;
+ u32 addr;
+ int ifidx;
+ int smt_idx;
rcu_read_lock();
+ neigh = dst_get_neighbour_noref(dst);
+ if (!neigh)
+ goto done_rcu;
+
+ addr = *(u32 *) neigh->primary_key;
+ ifidx = neigh->dev->ifindex;
+
+ if (!dev)
+ dev = neigh->dev;
+ p = netdev_priv(dev);
+ smt_idx = p->port_id;
+
d = L2DATA(cdev);
if (!d)
goto done_rcu;
@@ -323,7 +336,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
l2t_hold(d, e);
if (atomic_read(&e->refcnt) == 1)
reuse_entry(e, neigh);
- goto done;
+ goto done_unlock;
}
/* Need to allocate a new entry */
@@ -344,7 +357,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
e->vlan = VLAN_NONE;
spin_unlock(&e->lock);
}
-done:
+done_unlock:
write_unlock_bh(&d->lock);
done_rcu:
rcu_read_unlock();
diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
index c5f5479..c4e8643 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
@@ -109,7 +109,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb,
void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e);
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh);
-struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh,
+struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
struct net_device *dev);
int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
struct l2t_entry *e);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 4c8f42a..9d76e59 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -196,6 +196,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x4408, 4),
CH_DEVICE(0x4409, 4),
CH_DEVICE(0x440a, 4),
+ CH_DEVICE(0x440d, 4),
+ CH_DEVICE(0x440e, 4),
{ 0, }
};
@@ -243,7 +245,7 @@ module_param_array(intr_cnt, uint, NULL, 0644);
MODULE_PARM_DESC(intr_cnt,
"thresholds 1..3 for queue interrupt packet counters");
-static int vf_acls;
+static bool vf_acls;
#ifdef CONFIG_PCI_IOV
module_param(vf_acls, bool, 0644);
@@ -1002,13 +1004,12 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(info->driver, KBUILD_MODNAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(adapter->pdev));
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(adapter->pdev),
+ sizeof(info->bus_info));
- if (!adapter->params.fw_vers)
- strcpy(info->fw_version, "N/A");
- else
+ if (adapter->params.fw_vers)
snprintf(info->fw_version, sizeof(info->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
@@ -1855,10 +1856,10 @@ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return err;
}
-static int cxgb_set_features(struct net_device *dev, u32 features)
+static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
{
const struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
int err;
if (!(changed & NETIF_F_HW_VLAN_RX))
@@ -1872,30 +1873,30 @@ static int cxgb_set_features(struct net_device *dev, u32 features)
return err;
}
-static int get_rss_table(struct net_device *dev, struct ethtool_rxfh_indir *p)
+static u32 get_rss_table_size(struct net_device *dev)
+{
+ const struct port_info *pi = netdev_priv(dev);
+
+ return pi->rss_size;
+}
+
+static int get_rss_table(struct net_device *dev, u32 *p)
{
const struct port_info *pi = netdev_priv(dev);
- unsigned int n = min_t(unsigned int, p->size, pi->rss_size);
+ unsigned int n = pi->rss_size;
- p->size = pi->rss_size;
while (n--)
- p->ring_index[n] = pi->rss[n];
+ p[n] = pi->rss[n];
return 0;
}
-static int set_rss_table(struct net_device *dev,
- const struct ethtool_rxfh_indir *p)
+static int set_rss_table(struct net_device *dev, const u32 *p)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
- if (p->size != pi->rss_size)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- if (p->ring_index[i] >= pi->nqsets)
- return -EINVAL;
- for (i = 0; i < p->size; i++)
- pi->rss[i] = p->ring_index[i];
+ for (i = 0; i < pi->rss_size; i++)
+ pi->rss[i] = p[i];
if (pi->adapter->flags & FULL_INIT_DONE)
return write_rss(pi, pi->rss);
return 0;
@@ -1964,7 +1965,7 @@ static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
return -EOPNOTSUPP;
}
-static struct ethtool_ops cxgb_ethtool_ops = {
+static const struct ethtool_ops cxgb_ethtool_ops = {
.get_settings = get_settings,
.set_settings = set_settings,
.get_drvinfo = get_drvinfo,
@@ -1990,6 +1991,7 @@ static struct ethtool_ops cxgb_ethtool_ops = {
.get_wol = get_wol,
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
+ .get_rxfh_indir_size = get_rss_table_size,
.get_rxfh_indir = get_rss_table,
.set_rxfh_indir = set_rss_table,
.flash_device = set_flash,
@@ -3449,7 +3451,7 @@ static int __devinit init_rss(struct adapter *adap)
if (!pi->rss)
return -ENOMEM;
for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = j % pi->nqsets;
+ pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
}
return 0;
}
@@ -3537,7 +3539,7 @@ static int __devinit init_one(struct pci_dev *pdev,
{
int func, i, err;
struct port_info *pi;
- unsigned int highdma = 0;
+ bool highdma = false;
struct adapter *adapter = NULL;
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -3563,7 +3565,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- highdma = NETIF_F_HIGHDMA;
+ highdma = true;
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
@@ -3637,7 +3639,9 @@ static int __devinit init_one(struct pci_dev *pdev,
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
- netdev->features |= netdev->hw_features | highdma;
+ if (highdma)
+ netdev->hw_features |= NETIF_F_HIGHDMA;
+ netdev->features |= netdev->hw_features;
netdev->vlan_features = netdev->features & VLAN_FEAT;
netdev->priv_flags |= IFF_UNICAST_FLT;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 140254c..2dae795 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -491,7 +491,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
__be64 *d = &q->desc[q->pidx];
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
- gfp |= __GFP_NOWARN; /* failures are expected */
+ gfp |= __GFP_NOWARN | __GFP_COLD;
#if FL_PG_ORDER > 0
/*
@@ -528,7 +528,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
#endif
while (n--) {
- pg = __netdev_alloc_page(adap->port[0], gfp);
+ pg = alloc_page(gfp);
if (unlikely(!pg)) {
q->alloc_failed++;
break;
@@ -537,7 +537,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
- netdev_free_page(adap->port[0], pg);
+ put_page(pg);
goto out;
}
*d++ = cpu_to_be64(mapping);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index da9072b..d963c1d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1092,7 +1092,8 @@ static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
return ret;
}
-static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1106,10 +1107,11 @@ static u32 cxgb4vf_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int cxgb4vf_set_features(struct net_device *dev, u32 features)
+static int cxgb4vf_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct port_info *pi = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
@@ -1203,9 +1205,10 @@ static void cxgb4vf_get_drvinfo(struct net_device *dev,
{
struct adapter *adapter = netdev2adap(dev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, DRV_VERSION);
- strcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
+ sizeof(drvinfo->bus_info));
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%u.%u.%u.%u, TP %u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_GET(adapter->params.dev.fwrev),
@@ -1561,7 +1564,7 @@ static void cxgb4vf_get_wol(struct net_device *dev,
*/
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
-static struct ethtool_ops cxgb4vf_ethtool_ops = {
+static const struct ethtool_ops cxgb4vf_ethtool_ops = {
.get_settings = cxgb4vf_get_settings,
.get_drvinfo = cxgb4vf_get_drvinfo,
.get_msglevel = cxgb4vf_get_msglevel,
@@ -2000,7 +2003,7 @@ static const struct file_operations interfaces_proc_fops = {
*/
struct cxgb4vf_debugfs_entry {
const char *name; /* name of debugfs node */
- mode_t mode; /* file system mode */
+ umode_t mode; /* file system mode */
const struct file_operations *fops;
};
@@ -2889,6 +2892,8 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
CH_DEVICE(0x4808, 0), /* T420-cx */
CH_DEVICE(0x4809, 0), /* T420-bt */
CH_DEVICE(0x480a, 0), /* T404-bt */
+ CH_DEVICE(0x480d, 0), /* T480-cr */
+ CH_DEVICE(0x480e, 0), /* T440-lp-cr */
{ 0, }
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8d5d55a..0bd585b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -653,8 +653,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
alloc_small_pages:
while (n--) {
- page = __netdev_alloc_page(adapter->port[0],
- gfp | __GFP_NOWARN);
+ page = alloc_page(gfp | __GFP_NOWARN | __GFP_COLD);
if (unlikely(!page)) {
fl->alloc_failed++;
break;
@@ -664,7 +663,7 @@ alloc_small_pages:
dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
- netdev_free_page(adapter->port[0], page);
+ put_page(page);
break;
}
*d++ = cpu_to_be64(dma_addr);
@@ -684,7 +683,7 @@ out:
/*
* Update our accounting state to incorporate the new Free List
* buffers, tell the hardware about them and return the number of
- * bufers which we were able to allocate.
+ * buffers which we were able to allocate.
*/
cred = fl->avail - cred;
fl->pend_cred += cred;
diff --git a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
index c2c0680..ac37cac 100644
--- a/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
+++ b/drivers/net/ethernet/cisco/enic/cq_enet_desc.h
@@ -157,7 +157,7 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
CQ_ENET_RQ_DESC_FCOE_FC_CRC_OK) ? 1 : 0;
*fcoe_enc_error = (desc->flags &
CQ_ENET_RQ_DESC_FCOE_ENC_ERROR) ? 1 : 0;
- *fcoe_eof = (u8)((desc->checksum_fcoe >>
+ *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >>
CQ_ENET_RQ_DESC_FCOE_EOF_SHIFT) &
CQ_ENET_RQ_DESC_FCOE_EOF_MASK);
*checksum = 0;
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index fe0c29a..c52295c 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -32,7 +32,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.1.1.28"
+#define DRV_VERSION "2.1.1.31"
#define DRV_COPYRIGHT "Copyright 2008-2011 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -94,7 +94,7 @@ struct enic {
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
#ifdef CONFIG_PCI_IOV
- u32 num_vfs;
+ u16 num_vfs;
#endif
struct enic_port_profile *pp;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index fd6247b..bf0fc56 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -212,23 +212,29 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
}
/* rtnl lock is held */
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_add_vlan(enic, vid);
+ err = enic_add_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
/* rtnl lock is held */
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct enic *enic = netdev_priv(netdev);
+ int err;
spin_lock(&enic->devcmd_lock);
- enic_del_vlan(enic, vid);
+ err = enic_del_vlan(enic, vid);
spin_unlock(&enic->devcmd_lock);
+
+ return err;
}
int enic_dev_enable2(struct enic *enic, int active)
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 1f83a47..da1cba3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -46,8 +46,8 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
int enic_dev_add_addr(struct enic *enic, u8 *addr);
int enic_dev_del_addr(struct enic *enic, u8 *addr);
-void enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-void enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+int enic_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
int enic_dev_hang_notify(struct enic *enic);
int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c3786fd..0e4edd3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -57,11 +57,13 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
#define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN 0x0044 /* enet dynamic vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF 0x0071 /* enet SRIOV VF */
/* Supported devices */
static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
+ { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
{ 0, } /* end of table */
};
@@ -132,6 +134,11 @@ int enic_sriov_enabled(struct enic *enic)
return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
}
+static int enic_is_sriov_vf(struct enic *enic)
+{
+ return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
+}
+
int enic_is_valid_vf(struct enic *enic, int vf)
{
#ifdef CONFIG_PCI_IOV
@@ -217,11 +224,11 @@ static void enic_get_drvinfo(struct net_device *netdev,
enic_dev_fw_info(enic, &fw_info);
- strncpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
- strncpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
- strncpy(drvinfo->fw_version, fw_info->fw_version,
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, fw_info->fw_version,
sizeof(drvinfo->fw_version));
- strncpy(drvinfo->bus_info, pci_name(enic->pdev),
+ strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
sizeof(drvinfo->bus_info));
}
@@ -437,7 +444,7 @@ static void enic_mtu_check(struct enic *enic)
if (mtu && mtu != enic->port_mtu) {
enic->port_mtu = mtu;
- if (enic_is_dynamic(enic)) {
+ if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
mtu = max_t(int, ENIC_MIN_MTU,
min_t(int, ENIC_MAX_MTU, mtu));
if (mtu != netdev->mtu)
@@ -849,7 +856,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
{
struct enic *enic = netdev_priv(netdev);
- if (enic_is_dynamic(enic)) {
+ if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
return -EADDRNOTAVAIL;
} else {
@@ -1608,7 +1615,7 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->rq_count; i++)
vnic_rq_enable(&enic->rq[i]);
- if (!enic_is_dynamic(enic))
+ if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
enic_set_rx_mode(netdev);
@@ -1659,7 +1666,7 @@ static int enic_stop(struct net_device *netdev)
netif_carrier_off(netdev);
netif_tx_disable(netdev);
- if (!enic_is_dynamic(enic))
+ if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_del_station_addr(enic);
for (i = 0; i < enic->wq_count; i++) {
@@ -1696,7 +1703,7 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
return -EINVAL;
- if (enic_is_dynamic(enic))
+ if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
return -EOPNOTSUPP;
if (running)
@@ -2263,10 +2270,10 @@ static int __devinit enic_probe(struct pci_dev *pdev,
int using_dac = 0;
unsigned int i;
int err;
- int num_pps = 1;
#ifdef CONFIG_PCI_IOV
int pos = 0;
#endif
+ int num_pps = 1;
/* Allocate net device structure and initialize. Private
* instance data is initialized to zero.
@@ -2363,7 +2370,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
if (pos) {
pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
- (u16 *)&enic->num_vfs);
+ &enic->num_vfs);
if (enic->num_vfs) {
err = pci_enable_sriov(pdev, enic->num_vfs);
if (err) {
@@ -2376,14 +2383,14 @@ static int __devinit enic_probe(struct pci_dev *pdev,
num_pps = enic->num_vfs;
}
}
-
#endif
+
/* Allocate structure for port profiles */
- enic->pp = kzalloc(num_pps * sizeof(*enic->pp), GFP_KERNEL);
+ enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
if (!enic->pp) {
pr_err("port profile alloc failed, aborting\n");
err = -ENOMEM;
- goto err_out_disable_sriov;
+ goto err_out_disable_sriov_pp;
}
/* Issue device open to get device in known state
@@ -2392,7 +2399,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
err = enic_dev_open(enic);
if (err) {
dev_err(dev, "vNIC dev open failed, aborting\n");
- goto err_out_free_pp;
+ goto err_out_disable_sriov;
}
/* Setup devcmd lock
@@ -2426,7 +2433,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
* called later by an upper layer.
*/
- if (!enic_is_dynamic(enic)) {
+ if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) {
err = vnic_dev_init(enic->vdev, 0);
if (err) {
dev_err(dev, "vNIC dev init failed, aborting\n");
@@ -2460,8 +2467,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
(void)enic_change_mtu(netdev, enic->port_mtu);
#ifdef CONFIG_PCI_IOV
- if (enic_is_dynamic(enic) && pdev->is_virtfn &&
- is_zero_ether_addr(enic->mac_addr))
+ if (enic_is_sriov_vf(enic) && is_zero_ether_addr(enic->mac_addr))
random_ether_addr(enic->mac_addr);
#endif
@@ -2474,7 +2480,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
- if (enic_is_dynamic(enic))
+ if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
netdev->netdev_ops = &enic_netdev_dynamic_ops;
else
netdev->netdev_ops = &enic_netdev_ops;
@@ -2516,17 +2522,17 @@ err_out_dev_deinit:
enic_dev_deinit(enic);
err_out_dev_close:
vnic_dev_close(enic->vdev);
-err_out_free_pp:
- kfree(enic->pp);
err_out_disable_sriov:
+ kfree(enic->pp);
+err_out_disable_sriov_pp:
#ifdef CONFIG_PCI_IOV
if (enic_sriov_enabled(enic)) {
pci_disable_sriov(pdev);
enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
}
err_out_vnic_unregister:
- vnic_dev_unregister(enic->vdev);
#endif
+ vnic_dev_unregister(enic->vdev);
err_out_iounmap:
enic_iounmap(enic);
err_out_release_regions:
diff --git a/drivers/net/ethernet/cisco/enic/enic_pp.c b/drivers/net/ethernet/cisco/enic/enic_pp.c
index 22bf03a..c347b62 100644
--- a/drivers/net/ethernet/cisco/enic/enic_pp.c
+++ b/drivers/net/ethernet/cisco/enic/enic_pp.c
@@ -72,7 +72,7 @@ static int enic_set_port_profile(struct enic *enic, int vf)
struct enic_port_profile *pp;
struct vic_provinfo *vp;
const u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
- const u16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
+ const __be16 os_type = htons(VIC_GENERIC_PROV_OS_TYPE_LINUX);
char uuid_str[38];
char client_mac_str[18];
u8 *client_mac;
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 2a22f52..f801754 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -474,10 +474,11 @@ static int dm9000_nway_reset(struct net_device *dev)
return mii_nway_restart(&dm->mii);
}
-static int dm9000_set_features(struct net_device *dev, u32 features)
+static int dm9000_set_features(struct net_device *dev,
+ netdev_features_t features)
{
board_info_t *dm = to_dm9000_board(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
unsigned long flags;
if (!(changed & NETIF_F_RXCSUM))
diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c
index 1427739..1eb46a0 100644
--- a/drivers/net/ethernet/dec/tulip/de2104x.c
+++ b/drivers/net/ethernet/dec/tulip/de2104x.c
@@ -1598,9 +1598,9 @@ static void de_get_drvinfo (struct net_device *dev,struct ethtool_drvinfo *info)
{
struct de_private *de = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(de->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(de->pdev), sizeof(info->bus_info));
info->eedump_len = DE_EEPROM_SIZE;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index 871bcaa..4d71f5a 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -2127,14 +2127,9 @@ srom_search(struct net_device *dev, struct pci_dev *pdev)
u_long iobase = 0; /* Clear upper 32 bits in Alphas */
int i, j;
struct de4x5_private *lp = netdev_priv(dev);
- struct list_head *walk;
-
- list_for_each(walk, &pdev->bus_list) {
- struct pci_dev *this_dev = pci_dev_b(walk);
-
- /* Skip the pci_bus list entry */
- if (list_entry(walk, struct pci_bus, devices) == pdev->bus) continue;
+ struct pci_dev *this_dev;
+ list_for_each_entry(this_dev, &pdev->bus->devices, bus_list) {
vendor = this_dev->vendor;
device = this_dev->device << 8;
if (!(is_DC21040 || is_DC21041 || is_DC21140 || is_DC2114x)) continue;
@@ -5196,7 +5191,7 @@ de4x5_parse_params(struct net_device *dev)
struct de4x5_private *lp = netdev_priv(dev);
char *p, *q, t;
- lp->params.fdx = 0;
+ lp->params.fdx = false;
lp->params.autosense = AUTO;
if (args == NULL) return;
@@ -5206,7 +5201,7 @@ de4x5_parse_params(struct net_device *dev)
t = *q;
*q = '\0';
- if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = 1;
+ if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
if (strstr(p, "TP")) {
diff --git a/drivers/net/ethernet/dec/tulip/dmfe.c b/drivers/net/ethernet/dec/tulip/dmfe.c
index 17b11ee..51f7542 100644
--- a/drivers/net/ethernet/dec/tulip/dmfe.c
+++ b/drivers/net/ethernet/dec/tulip/dmfe.c
@@ -1085,10 +1085,11 @@ static void dmfe_ethtool_get_drvinfo(struct net_device *dev,
{
struct dmfe_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 9656dd0..4eb0d76 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -871,9 +871,9 @@ static struct net_device_stats *tulip_get_stats(struct net_device *dev)
static void tulip_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tulip_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pdev), sizeof(info->bus_info));
}
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 7a44a7a..48b0b65 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -960,10 +960,11 @@ static void netdev_get_drvinfo(struct net_device *dev,
{
struct uli526x_board_info *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
if (np->pdev)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
else
sprintf(info->bus_info, "EISA 0x%lx %d",
dev->base_addr, dev->irq);
diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c
index 4d01219..52da7b2 100644
--- a/drivers/net/ethernet/dec/tulip/winbond-840.c
+++ b/drivers/net/ethernet/dec/tulip/winbond-840.c
@@ -1390,9 +1390,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct netdev_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/dlink/de600.c b/drivers/net/ethernet/dlink/de600.c
index 23a6539..c24fab1 100644
--- a/drivers/net/ethernet/dlink/de600.c
+++ b/drivers/net/ethernet/dlink/de600.c
@@ -59,7 +59,7 @@ static const char version[] = "de600.c: $Revision: 1.41-2.5 $, Bjorn Ekwall (bj
#include "de600.h"
-static unsigned int check_lost = 1;
+static bool check_lost = true;
module_param(check_lost, bool, 0);
MODULE_PARM_DESC(check_lost, "If set then check for unplugged de600");
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index dcd7f7a..28a3a9b 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -1634,9 +1634,9 @@ static int check_if_running(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index c1063d1..925c9ba 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -325,7 +325,8 @@ static int dnet_mii_init(struct dnet *bp)
bp->mii_bus->write = &dnet_mdio_write;
bp->mii_bus->reset = &dnet_mdio_reset;
- snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+ snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ bp->pdev->name, bp->pdev->id);
bp->mii_bus->priv = bp;
@@ -804,9 +805,9 @@ static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void dnet_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, "0");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, "0", sizeof(info->bus_info));
}
static const struct ethtool_ops dnet_ethtool_ops = {
@@ -977,18 +978,7 @@ static struct platform_driver dnet_driver = {
},
};
-static int __init dnet_init(void)
-{
- return platform_driver_register(&dnet_driver);
-}
-
-static void __exit dnet_exit(void)
-{
- platform_driver_unregister(&dnet_driver);
-}
-
-module_init(dnet_init);
-module_exit(dnet_exit);
+module_platform_driver(dnet_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Dave DNET Ethernet driver");
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 644e8fe..cbdec25 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -40,6 +40,7 @@
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME_BE OC_NAME "(be3)"
#define OC_NAME_LANCER OC_NAME "(Lancer)"
+#define OC_NAME_SH OC_NAME "(Skyhawk)"
#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
@@ -50,6 +51,7 @@
#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
#define OC_DEVICE_ID4 0xe228 /* Device id for VF in Lancer */
+#define OC_DEVICE_ID5 0x720 /* Device Id for Skyhawk cards */
static inline char *nic_name(struct pci_dev *pdev)
{
@@ -63,6 +65,8 @@ static inline char *nic_name(struct pci_dev *pdev)
return OC_NAME_LANCER;
case BE_DEVICE_ID2:
return BE3_NAME;
+ case OC_DEVICE_ID5:
+ return OC_NAME_SH;
default:
return BE_NAME;
}
@@ -288,14 +292,14 @@ struct be_drv_stats {
};
struct be_vf_cfg {
- unsigned char vf_mac_addr[ETH_ALEN];
- u32 vf_if_handle;
- u32 vf_pmac_id;
- u16 vf_vlan_tag;
- u32 vf_tx_rate;
+ unsigned char mac_addr[ETH_ALEN];
+ int if_handle;
+ int pmac_id;
+ u16 vlan_tag;
+ u32 tx_rate;
};
-#define BE_INVALID_PMAC_ID 0xffffffff
+#define BE_FLAGS_LINK_STATUS_INIT 1
struct be_adapter {
struct pci_dev *pdev;
@@ -345,13 +349,16 @@ struct be_adapter {
struct delayed_work work;
u16 work_counter;
+ u32 flags;
/* Ethtool knobs and info */
char fw_ver[FW_VER_LEN];
- u32 if_handle; /* Used to configure filtering */
+ int if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
u32 beacon_state; /* for set_phys_id */
bool eeh_err;
+ bool ue_detected;
+ bool fw_timeout;
u32 port_num;
bool promiscuous;
bool wol;
@@ -359,7 +366,6 @@ struct be_adapter {
u32 function_caps;
u32 rx_fc; /* Rx flow control */
u32 tx_fc; /* Tx flow control */
- bool ue_detected;
bool stats_cmd_sent;
int link_speed;
u8 port_type;
@@ -369,16 +375,20 @@ struct be_adapter {
u32 flash_status;
struct completion flash_compl;
- bool be3_native;
- bool sriov_enabled;
- struct be_vf_cfg *vf_cfg;
+ u32 num_vfs;
u8 is_virtfn;
+ struct be_vf_cfg *vf_cfg;
+ bool be3_native;
u32 sli_family;
u8 hba_port_num;
u16 pvid;
};
#define be_physfn(adapter) (!adapter->is_virtfn)
+#define sriov_enabled(adapter) (adapter->num_vfs > 0)
+#define for_all_vfs(adapter, vf_cfg, i) \
+ for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
+ i++, vf_cfg++)
/* BladeEngine Generation numbers */
#define BE_GEN2 2
@@ -524,9 +534,14 @@ static inline bool be_multi_rxq(const struct be_adapter *adapter)
return adapter->num_rx_qs > 1;
}
+static inline bool be_error(struct be_adapter *adapter)
+{
+ return adapter->eeh_err || adapter->ue_detected || adapter->fw_timeout;
+}
+
extern void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm,
u16 num_popped);
-extern void be_link_status_update(struct be_adapter *adapter, u32 link_status);
+extern void be_link_status_update(struct be_adapter *adapter, u8 link_status);
extern void be_parse_stats(struct be_adapter *adapter);
extern int be_load_fw(struct be_adapter *adapter, u8 *func);
#endif /* BE_H */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 2c7b366..0fcb456 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -31,11 +31,8 @@ static void be_mcc_notify(struct be_adapter *adapter)
struct be_queue_info *mccq = &adapter->mcc_obj.q;
u32 val = 0;
- if (adapter->eeh_err) {
- dev_info(&adapter->pdev->dev,
- "Error in Card Detected! Cannot issue commands\n");
+ if (be_error(adapter))
return;
- }
val |= mccq->id & DB_MCCQ_RING_ID_MASK;
val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
@@ -128,7 +125,14 @@ done:
static void be_async_link_state_process(struct be_adapter *adapter,
struct be_async_event_link_state *evt)
{
- be_link_status_update(adapter, evt->port_link_status);
+ /* When link status changes, link speed must be re-queried from FW */
+ adapter->link_speed = -1;
+
+ /* For the initial link status do not rely on the ASYNC event as
+ * it may not be received in some cases.
+ */
+ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
+ be_link_status_update(adapter, evt->port_link_status);
}
/* Grp5 CoS Priority evt */
@@ -266,10 +270,10 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
int i, num, status = 0;
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- if (adapter->eeh_err)
- return -EIO;
-
for (i = 0; i < mcc_timeout; i++) {
+ if (be_error(adapter))
+ return -EIO;
+
num = be_process_mcc(adapter, &status);
if (num)
be_cq_notify(adapter, mcc_obj->cq.id,
@@ -280,7 +284,8 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
udelay(100);
}
if (i == mcc_timeout) {
- dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
return -1;
}
return status;
@@ -298,26 +303,21 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
int msecs = 0;
u32 ready;
- if (adapter->eeh_err) {
- dev_err(&adapter->pdev->dev,
- "Error detected in card.Cannot issue commands\n");
- return -EIO;
- }
-
do {
+ if (be_error(adapter))
+ return -EIO;
+
ready = ioread32(db);
- if (ready == 0xffffffff) {
- dev_err(&adapter->pdev->dev,
- "pci slot disconnected\n");
+ if (ready == 0xffffffff)
return -1;
- }
ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
if (msecs > 4000) {
- dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
+ dev_err(&adapter->pdev->dev, "FW not responding\n");
+ adapter->fw_timeout = true;
be_detect_dump_ue(adapter);
return -1;
}
@@ -555,9 +555,6 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -619,7 +616,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
/* Use MCC */
int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle)
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mac_query *req;
@@ -641,6 +638,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req->permanent = 1;
} else {
req->if_id = cpu_to_le16((u16) if_handle);
+ req->pmac_id = cpu_to_le32(pmac_id);
req->permanent = 0;
}
@@ -695,12 +693,15 @@ err:
}
/* Uses synchronous MCCQ */
-int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
+int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_del *req;
int status;
+ if (pmac_id == -1)
+ return 0;
+
spin_lock_bh(&adapter->mcc_lock);
wrb = wrb_from_mccq(adapter);
@@ -923,10 +924,14 @@ int be_cmd_txq_create(struct be_adapter *adapter,
void *ctxt;
int status;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
- wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
ctxt = &req->context;
@@ -952,14 +957,15 @@ int be_cmd_txq_create(struct be_adapter *adapter,
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
- status = be_mbox_notify_wait(adapter);
+ status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
txq->id = le16_to_cpu(resp->cid);
txq->created = true;
}
- mutex_unlock(&adapter->mbox_lock);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -1018,9 +1024,6 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -1136,16 +1139,13 @@ err:
}
/* Uses MCCQ */
-int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
+int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_if_destroy *req;
int status;
- if (adapter->eeh_err)
- return -EIO;
-
- if (!interface_id)
+ if (interface_id == -1)
return 0;
spin_lock_bh(&adapter->mcc_lock);
@@ -1239,7 +1239,7 @@ err:
/* Uses synchronous mcc */
int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
- u16 *link_speed, u32 dom)
+ u16 *link_speed, u8 *link_status, u32 dom)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_link_status *req;
@@ -1247,6 +1247,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
spin_lock_bh(&adapter->mcc_lock);
+ if (link_status)
+ *link_status = LINK_DOWN;
+
wrb = wrb_from_mccq(adapter);
if (!wrb) {
status = -EBUSY;
@@ -1254,6 +1257,9 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
}
req = embedded_payload(wrb);
+ if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
+ req->hdr.version = 1;
+
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
@@ -1261,10 +1267,13 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
if (!status) {
struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
- *link_speed = le16_to_cpu(resp->link_speed);
+ if (link_speed)
+ *link_speed = le16_to_cpu(resp->link_speed);
if (mac_speed)
*mac_speed = resp->mac_speed;
}
+ if (link_status)
+ *link_status = resp->logical_link_status;
}
err:
@@ -1673,8 +1682,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF,
- 0x0123, 0x4567, 0x89AB, 0xCDEF, 0x01EF};
+ u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
+ 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
+ 0x3ea83c02, 0x4a110304};
int status;
if (mutex_lock_interruptible(&adapter->mbox_lock))
@@ -1836,6 +1846,53 @@ err_unlock:
return status;
}
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
+{
+ struct be_mcc_wrb *wrb;
+ struct lancer_cmd_req_read_object *req;
+ struct lancer_cmd_resp_read_object *resp;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err_unlock;
+ }
+
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
+
+ req->desired_read_len = cpu_to_le32(data_size);
+ req->read_offset = cpu_to_le32(data_offset);
+ strcpy(req->object_name, obj_name);
+ req->descriptor_count = cpu_to_le32(1);
+ req->buf_len = cpu_to_le32(data_size);
+ req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
+ req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
+
+ status = be_mcc_notify_wait(adapter);
+
+ resp = embedded_payload(wrb);
+ if (!status) {
+ *data_read = le32_to_cpu(resp->actual_read_len);
+ *eof = le32_to_cpu(resp->eof);
+ } else {
+ *addn_status = resp->additional_status;
+ }
+
+err_unlock:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
@@ -2238,3 +2295,99 @@ err:
mutex_unlock(&adapter->mbox_lock);
return status;
}
+
+/* Uses synchronous MCCQ */
+int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_get_mac_list *req;
+ int status;
+ int mac_count;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+ req = embedded_payload(wrb);
+
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
+ wrb, NULL);
+
+ req->hdr.domain = domain;
+
+ status = be_mcc_notify_wait(adapter);
+ if (!status) {
+ struct be_cmd_resp_get_mac_list *resp =
+ embedded_payload(wrb);
+ int i;
+ u8 *ctxt = &resp->context[0][0];
+ status = -EIO;
+ mac_count = resp->mac_count;
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
+ for (i = 0; i < mac_count; i++) {
+ if (!AMAP_GET_BITS(struct amap_get_mac_list_context,
+ act, ctxt)) {
+ *pmac_id = AMAP_GET_BITS
+ (struct amap_get_mac_list_context,
+ macid, ctxt);
+ status = 0;
+ break;
+ }
+ ctxt += sizeof(struct amap_get_mac_list_context) / 8;
+ }
+ }
+
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
+
+/* Uses synchronous MCCQ */
+int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_set_mac_list *req;
+ int status;
+ struct be_dma_mem cmd;
+
+ memset(&cmd, 0, sizeof(struct be_dma_mem));
+ cmd.size = sizeof(struct be_cmd_req_set_mac_list);
+ cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
+ &cmd.dma, GFP_KERNEL);
+ if (!cmd.va) {
+ dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
+
+ req = cmd.va;
+ be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
+
+ req->hdr.domain = domain;
+ req->mac_count = mac_count;
+ if (mac_count)
+ memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
+
+ status = be_mcc_notify_wait(adapter);
+
+err:
+ dma_free_coherent(&adapter->pdev->dev, cmd.size,
+ cmd.va, cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index a35cd03..dca8924 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -189,6 +189,9 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_GET_PHY_DETAILS 102
#define OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP 103
#define OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES 121
+#define OPCODE_COMMON_GET_MAC_LIST 147
+#define OPCODE_COMMON_SET_MAC_LIST 148
+#define OPCODE_COMMON_READ_OBJECT 171
#define OPCODE_COMMON_WRITE_OBJECT 172
#define OPCODE_ETH_RSS_CONFIG 1
@@ -294,6 +297,7 @@ struct be_cmd_req_mac_query {
u8 type;
u8 permanent;
u16 if_id;
+ u32 pmac_id;
} __packed;
struct be_cmd_resp_mac_query {
@@ -956,7 +960,8 @@ struct be_cmd_resp_link_status {
u8 mgmt_mac_duplex;
u8 mgmt_mac_speed;
u16 link_speed;
- u32 rsvd0;
+ u8 logical_link_status;
+ u8 rsvd1[3];
} __packed;
/******************** Port Identification ***************************/
@@ -1161,6 +1166,38 @@ struct lancer_cmd_resp_write_object {
u32 actual_write_len;
};
+/************************ Lancer Read FW info **************/
+#define LANCER_READ_FILE_CHUNK (32*1024)
+#define LANCER_READ_FILE_EOF_MASK 0x80000000
+
+#define LANCER_FW_DUMP_FILE "/dbg/dump.bin"
+#define LANCER_VPD_PF_FILE "/vpd/ntr_pf.vpd"
+#define LANCER_VPD_VF_FILE "/vpd/ntr_vf.vpd"
+
+struct lancer_cmd_req_read_object {
+ struct be_cmd_req_hdr hdr;
+ u32 desired_read_len;
+ u32 read_offset;
+ u8 object_name[104];
+ u32 descriptor_count;
+ u32 buf_len;
+ u32 addr_low;
+ u32 addr_high;
+};
+
+struct lancer_cmd_resp_read_object {
+ u8 opcode;
+ u8 subsystem;
+ u8 rsvd1[2];
+ u8 status;
+ u8 additional_status;
+ u8 rsvd2[2];
+ u32 resp_len;
+ u32 actual_resp_len;
+ u32 actual_read_len;
+ u32 eof;
+};
+
/************************ WOL *******************************/
struct be_cmd_req_acpi_wol_magic_config{
struct be_cmd_req_hdr hdr;
@@ -1307,6 +1344,34 @@ struct be_cmd_resp_set_func_cap {
u8 rsvd[212];
};
+/******************** GET/SET_MACLIST **************************/
+#define BE_MAX_MAC 64
+struct amap_get_mac_list_context {
+ u8 macid[31];
+ u8 act;
+} __packed;
+
+struct be_cmd_req_get_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u32 rsvd;
+} __packed;
+
+struct be_cmd_resp_get_mac_list {
+ struct be_cmd_resp_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ u8 context[sizeof(struct amap_get_mac_list_context) / 8][BE_MAX_MAC];
+} __packed;
+
+struct be_cmd_req_set_mac_list {
+ struct be_cmd_req_hdr hdr;
+ u8 mac_count;
+ u8 rsvd1;
+ u16 rsvd2;
+ struct macaddr mac[BE_MAX_MAC];
+} __packed;
+
/*************** HW Stats Get v1 **********************************/
#define BE_TXP_SW_SZ 48
struct be_port_rxf_stats_v1 {
@@ -1413,15 +1478,15 @@ static inline void *be_erx_stats_from_cmd(struct be_adapter *adapter)
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
- u8 type, bool permanent, u32 if_handle);
+ u8 type, bool permanent, u32 if_handle, u32 pmac_id);
extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
u32 if_id, u32 *pmac_id, u32 domain);
extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id,
- u32 pmac_id, u32 domain);
+ int pmac_id, u32 domain);
extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id,
u32 domain);
-extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle,
+extern int be_cmd_if_destroy(struct be_adapter *adapter, int if_handle,
u32 domain);
extern int be_cmd_eq_create(struct be_adapter *adapter,
struct be_queue_info *eq, int eq_delay);
@@ -1443,8 +1508,8 @@ extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
int type);
extern int be_cmd_rxq_destroy(struct be_adapter *adapter,
struct be_queue_info *q);
-extern int be_cmd_link_status_query(struct be_adapter *adapter,
- u8 *mac_speed, u16 *link_speed, u32 dom);
+extern int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
+ u16 *link_speed, u8 *link_status, u32 dom);
extern int be_cmd_reset(struct be_adapter *adapter);
extern int be_cmd_get_stats(struct be_adapter *adapter,
struct be_dma_mem *nonemb_cmd);
@@ -1480,6 +1545,9 @@ extern int lancer_cmd_write_object(struct be_adapter *adapter,
u32 data_size, u32 data_offset,
const char *obj_name,
u32 *data_written, u8 *addn_status);
+int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -1506,4 +1574,8 @@ extern int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
extern int be_cmd_req_native_mode(struct be_adapter *adapter);
extern int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size);
extern void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf);
+extern int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
+ u32 *pmac_id);
+extern int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ u8 mac_count, u32 domain);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index bf8153e..802e5dd 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -127,8 +127,8 @@ static void be_get_drvinfo(struct net_device *netdev,
memset(fw_on_flash, 0 , sizeof(fw_on_flash));
be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VER);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, adapter->fw_ver, FW_VER_LEN);
if (memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN) != 0) {
strcat(drvinfo->fw_version, " [");
@@ -136,21 +136,84 @@ static void be_get_drvinfo(struct net_device *netdev,
strcat(drvinfo->fw_version, "]");
}
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->testinfo_len = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
+static u32
+lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+{
+ u32 data_read = 0, eof;
+ u8 addn_status;
+ struct be_dma_mem data_len_cmd;
+ int status;
+
+ memset(&data_len_cmd, 0, sizeof(data_len_cmd));
+ /* data_offset and data_size should be 0 to get reg len */
+ status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
+ file_name, &data_read, &eof, &addn_status);
+
+ return data_read;
+}
+
+static int
+lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
+{
+ struct be_dma_mem read_cmd;
+ u32 read_len = 0, total_read_len = 0, chunk_size;
+ u32 eof = 0;
+ u8 addn_status;
+ int status = 0;
+
+ read_cmd.size = LANCER_READ_FILE_CHUNK;
+ read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
+ &read_cmd.dma);
+
+ if (!read_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure while reading dump\n");
+ return -ENOMEM;
+ }
+
+ while ((total_read_len < buf_len) && !eof) {
+ chunk_size = min_t(u32, (buf_len - total_read_len),
+ LANCER_READ_FILE_CHUNK);
+ chunk_size = ALIGN(chunk_size, 4);
+ status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
+ total_read_len, file_name, &read_len,
+ &eof, &addn_status);
+ if (!status) {
+ memcpy(buf + total_read_len, read_cmd.va, read_len);
+ total_read_len += read_len;
+ eof &= LANCER_READ_FILE_EOF_MASK;
+ } else {
+ status = -EIO;
+ break;
+ }
+ }
+ pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
+ read_cmd.dma);
+
+ return status;
+}
+
static int
be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
- if (be_physfn(adapter))
- be_cmd_get_reg_len(adapter, &log_size);
-
+ if (be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ log_size = lancer_cmd_get_file_len(adapter,
+ LANCER_FW_DUMP_FILE);
+ else
+ be_cmd_get_reg_len(adapter, &log_size);
+ }
return log_size;
}
@@ -161,7 +224,11 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
if (be_physfn(adapter)) {
memset(buf, 0, regs->len);
- be_cmd_get_regs(adapter, regs->len, buf);
+ if (lancer_chip(adapter))
+ lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
+ regs->len, buf);
+ else
+ be_cmd_get_regs(adapter, regs->len, buf);
}
}
@@ -362,11 +429,14 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct be_phy_info phy_info;
u8 mac_speed = 0;
u16 link_speed = 0;
+ u8 link_status;
int status;
if ((adapter->link_speed < 0) || (!(netdev->flags & IFF_UP))) {
status = be_cmd_link_status_query(adapter, &mac_speed,
- &link_speed, 0);
+ &link_speed, &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
/* link_speed is in units of 10 Mbps */
if (link_speed) {
@@ -453,16 +523,13 @@ static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
-static void
-be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
+static void be_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
{
struct be_adapter *adapter = netdev_priv(netdev);
- ring->rx_max_pending = adapter->rx_obj[0].q.len;
- ring->tx_max_pending = adapter->tx_obj[0].q.len;
-
- ring->rx_pending = atomic_read(&adapter->rx_obj[0].q.used);
- ring->tx_pending = atomic_read(&adapter->tx_obj[0].q.used);
+ ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len;
+ ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len;
}
static void
@@ -636,7 +703,7 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
if (be_cmd_link_status_query(adapter, &mac_speed,
- &qos_link_speed, 0) != 0) {
+ &qos_link_speed, NULL, 0) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
data[4] = -1;
} else if (!mac_speed) {
@@ -649,18 +716,24 @@ static int
be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
- char file_name[ETHTOOL_FLASH_MAX_FILENAME];
- file_name[ETHTOOL_FLASH_MAX_FILENAME - 1] = 0;
- strcpy(file_name, efl->data);
-
- return be_load_fw(adapter, file_name);
+ return be_load_fw(adapter, efl->data);
}
static int
be_get_eeprom_len(struct net_device *netdev)
{
- return BE_READ_SEEPROM_LEN;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_PF_FILE);
+ else
+ return lancer_cmd_get_file_len(adapter,
+ LANCER_VPD_VF_FILE);
+ } else {
+ return BE_READ_SEEPROM_LEN;
+ }
}
static int
@@ -675,6 +748,15 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (!eeprom->len)
return -EINVAL;
+ if (lancer_chip(adapter)) {
+ if (be_physfn(adapter))
+ return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
+ eeprom->len, data);
+ else
+ return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
+ eeprom->len, data);
+ }
+
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index bf266a0..e703d64 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -27,13 +27,14 @@ MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
MODULE_AUTHOR("ServerEngines Corporation");
MODULE_LICENSE("GPL");
-static ushort rx_frag_size = 2048;
static unsigned int num_vfs;
-module_param(rx_frag_size, ushort, S_IRUGO);
module_param(num_vfs, uint, S_IRUGO);
-MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
+static ushort rx_frag_size = 2048;
+module_param(rx_frag_size, ushort, S_IRUGO);
+MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
+
static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
@@ -41,6 +42,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
+ { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -237,7 +239,8 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
status = be_cmd_mac_addr_query(adapter, current_mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (status)
goto err;
@@ -315,6 +318,8 @@ static void populate_be3_stats(struct be_adapter *adapter)
struct be_drv_stats *drvs = &adapter->drv_stats;
be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
+ drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
+ drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
drvs->rx_pause_frames = port_stats->rx_pause_frames;
drvs->rx_crc_errors = port_stats->rx_crc_errors;
drvs->rx_control_frames = port_stats->rx_control_frames;
@@ -491,19 +496,19 @@ static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
return stats;
}
-void be_link_status_update(struct be_adapter *adapter, u32 link_status)
+void be_link_status_update(struct be_adapter *adapter, u8 link_status)
{
struct net_device *netdev = adapter->netdev;
- /* when link status changes, link speed must be re-queried from card */
- adapter->link_speed = -1;
- if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
- netif_carrier_on(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
- } else {
+ if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
netif_carrier_off(netdev);
- dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
+ adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
}
+
+ if ((link_status & LINK_STATUS_MASK) == LINK_UP)
+ netif_carrier_on(netdev);
+ else
+ netif_carrier_off(netdev);
}
static void be_tx_stats_update(struct be_tx_obj *txo,
@@ -549,11 +554,26 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
}
+static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
+ struct sk_buff *skb)
+{
+ u8 vlan_prio;
+ u16 vlan_tag;
+
+ vlan_tag = vlan_tx_tag_get(skb);
+ vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+ /* If vlan priority provided by OS is NOT in available bmap */
+ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
+ vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
+ adapter->recommended_prio;
+
+ return vlan_tag;
+}
+
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
struct sk_buff *skb, u32 wrb_cnt, u32 len)
{
- u8 vlan_prio = 0;
- u16 vlan_tag = 0;
+ u16 vlan_tag;
memset(hdr, 0, sizeof(*hdr));
@@ -584,12 +604,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
if (vlan_tx_tag_present(skb)) {
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
- vlan_tag = vlan_tx_tag_get(skb);
- vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- /* If vlan priority provided by OS is NOT in available bmap */
- if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
- vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
- adapter->recommended_prio;
+ vlan_tag = be_get_tx_vlan_tag(adapter, skb);
AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
}
@@ -692,6 +707,25 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
u32 start = txq->head;
bool dummy_wrb, stopped = false;
+ /* For vlan tagged pkts, BE
+ * 1) calculates checksum even when CSO is not requested
+ * 2) calculates checksum wrongly for padded pkt less than
+ * 60 bytes long.
+ * As a workaround disable TX vlan offloading in such cases.
+ */
+ if (unlikely(vlan_tx_tag_present(skb) &&
+ (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
+ if (unlikely(!skb))
+ goto tx_drop;
+
+ skb->vlan_tci = 0;
+ }
+
wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
@@ -719,6 +753,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
txq->head = start;
dev_kfree_skb_any(skb);
}
+tx_drop:
return NETDEV_TX_OK;
}
@@ -746,15 +781,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
*/
static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
{
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
int status = 0;
- u32 if_handle;
if (vf) {
- if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
- vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
- status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
+ vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
+ status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
+ 1, 1, 0);
}
/* No need to further configure vids if in promiscuous mode */
@@ -779,31 +814,48 @@ static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
return status;
}
-static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added++;
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 1;
if (adapter->vlans_added <= (adapter->max_vlans + 1))
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added++;
+ else
+ adapter->vlan_tag[vid] = 0;
+ret:
+ return status;
}
-static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
+static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ int status = 0;
- adapter->vlans_added--;
-
- if (!be_physfn(adapter))
- return;
+ if (!be_physfn(adapter)) {
+ status = -EINVAL;
+ goto ret;
+ }
adapter->vlan_tag[vid] = 0;
if (adapter->vlans_added <= adapter->max_vlans)
- be_vid_config(adapter, false, 0);
+ status = be_vid_config(adapter, false, 0);
+
+ if (!status)
+ adapter->vlans_added--;
+ else
+ adapter->vlan_tag[vid] = 1;
+ret:
+ return status;
}
static void be_set_rx_mode(struct net_device *netdev)
@@ -840,28 +892,30 @@ done:
static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
int status;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
return -EINVAL;
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- status = be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
+ status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
mac, vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
return status;
}
@@ -870,18 +924,19 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if (vf >= num_vfs)
+ if (vf >= adapter->num_vfs)
return -EINVAL;
vi->vf = vf;
- vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
- vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
+ vi->tx_rate = vf_cfg->tx_rate;
+ vi->vlan = vf_cfg->vlan_tag;
vi->qos = 0;
- memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
+ memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
return 0;
}
@@ -892,17 +947,17 @@ static int be_set_vf_vlan(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (vlan > 4095))
+ if (vf >= adapter->num_vfs || vlan > 4095)
return -EINVAL;
if (vlan) {
- adapter->vf_cfg[vf].vf_vlan_tag = vlan;
+ adapter->vf_cfg[vf].vlan_tag = vlan;
adapter->vlans_added++;
} else {
- adapter->vf_cfg[vf].vf_vlan_tag = 0;
+ adapter->vf_cfg[vf].vlan_tag = 0;
adapter->vlans_added--;
}
@@ -920,21 +975,25 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
struct be_adapter *adapter = netdev_priv(netdev);
int status = 0;
- if (!adapter->sriov_enabled)
+ if (!sriov_enabled(adapter))
return -EPERM;
- if ((vf >= num_vfs) || (rate < 0))
+ if (vf >= adapter->num_vfs)
return -EINVAL;
- if (rate > 10000)
- rate = 10000;
+ if (rate < 100 || rate > 10000) {
+ dev_err(&adapter->pdev->dev,
+ "tx rate must be between 100 and 10000 Mbps\n");
+ return -EINVAL;
+ }
- adapter->vf_cfg[vf].vf_tx_rate = rate;
status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
if (status)
- dev_info(&adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"tx rate %d on VF %d failed\n", rate, vf);
+ else
+ adapter->vf_cfg[vf].tx_rate = rate;
return status;
}
@@ -1645,8 +1704,7 @@ static void be_tx_queues_destroy(struct be_adapter *adapter)
static int be_num_txqs_want(struct be_adapter *adapter)
{
- if ((num_vfs && adapter->sriov_enabled) ||
- be_is_mc(adapter) ||
+ if (sriov_enabled(adapter) || be_is_mc(adapter) ||
lancer_chip(adapter) || !be_physfn(adapter) ||
adapter->generation == BE_GEN2)
return 1;
@@ -1662,9 +1720,12 @@ static int be_tx_queues_create(struct be_adapter *adapter)
u8 i;
adapter->num_tx_qs = be_num_txqs_want(adapter);
- if (adapter->num_tx_qs != MAX_TX_QS)
+ if (adapter->num_tx_qs != MAX_TX_QS) {
+ rtnl_lock();
netif_set_real_num_tx_queues(adapter->netdev,
adapter->num_tx_qs);
+ rtnl_unlock();
+ }
adapter->tx_eq.max_eqd = 0;
adapter->tx_eq.min_eqd = 0;
@@ -1693,9 +1754,6 @@ static int be_tx_queues_create(struct be_adapter *adapter)
if (be_queue_alloc(adapter, q, TX_Q_LEN,
sizeof(struct be_eth_wrb)))
goto err;
-
- if (be_cmd_txq_create(adapter, q, cq))
- goto err;
}
return 0;
@@ -1728,8 +1786,7 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
static u32 be_num_rxqs_want(struct be_adapter *adapter)
{
if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
- !adapter->sriov_enabled && be_physfn(adapter) &&
- !be_is_mc(adapter)) {
+ !sriov_enabled(adapter) && be_physfn(adapter)) {
return 1 + MAX_RSS_QS; /* one default non-RSS queue */
} else {
dev_warn(&adapter->pdev->dev,
@@ -1929,6 +1986,7 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
struct be_adapter *adapter =
container_of(tx_eq, struct be_adapter, tx_eq);
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
struct be_tx_obj *txo;
struct be_eth_tx_compl *txcp;
int tx_compl, mcc_compl, status = 0;
@@ -1965,12 +2023,19 @@ static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
mcc_compl = be_process_mcc(adapter, &status);
if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
}
napi_complete(napi);
+ /* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
+ if (lancer_chip(adapter) && !msix_enabled(adapter)) {
+ for_all_tx_queues(adapter, txo, i)
+ be_cq_notify(adapter, txo->cq.id, true, 0);
+
+ be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
+ }
+
be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
adapter->drv_stats.tx_events++;
return 1;
@@ -1982,6 +2047,9 @@ void be_detect_dump_ue(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
if (lancer_chip(adapter)) {
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2008,7 +2076,8 @@ void be_detect_dump_ue(struct be_adapter *adapter)
sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->ue_detected = true;
adapter->eeh_err = true;
- dev_err(&adapter->pdev->dev, "UE Detected!!\n");
+ dev_err(&adapter->pdev->dev,
+ "Unrecoverable error in the card\n");
}
if (ue_lo) {
@@ -2036,53 +2105,6 @@ void be_detect_dump_ue(struct be_adapter *adapter)
}
}
-static void be_worker(struct work_struct *work)
-{
- struct be_adapter *adapter =
- container_of(work, struct be_adapter, work.work);
- struct be_rx_obj *rxo;
- int i;
-
- if (!adapter->ue_detected)
- be_detect_dump_ue(adapter);
-
- /* when interrupts are not yet enabled, just reap any pending
- * mcc completions */
- if (!netif_running(adapter->netdev)) {
- int mcc_compl, status = 0;
-
- mcc_compl = be_process_mcc(adapter, &status);
-
- if (mcc_compl) {
- struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
- be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
- }
-
- goto reschedule;
- }
-
- if (!adapter->stats_cmd_sent) {
- if (lancer_chip(adapter))
- lancer_cmd_get_pport_stats(adapter,
- &adapter->stats_cmd);
- else
- be_cmd_get_stats(adapter, &adapter->stats_cmd);
- }
-
- for_all_rx_queues(adapter, rxo, i) {
- be_rx_eqd_update(adapter, rxo);
-
- if (rxo->rx_post_starved) {
- rxo->rx_post_starved = false;
- be_post_rx_frags(rxo, GFP_KERNEL);
- }
- }
-
-reschedule:
- adapter->work_counter++;
- schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
-}
-
static void be_msix_disable(struct be_adapter *adapter)
{
if (msix_enabled(adapter)) {
@@ -2119,27 +2141,28 @@ done:
static int be_sriov_enable(struct be_adapter *adapter)
{
be_check_sriov_fn_type(adapter);
+
#ifdef CONFIG_PCI_IOV
if (be_physfn(adapter) && num_vfs) {
int status, pos;
- u16 nvfs;
+ u16 dev_vfs;
pos = pci_find_ext_capability(adapter->pdev,
PCI_EXT_CAP_ID_SRIOV);
pci_read_config_word(adapter->pdev,
- pos + PCI_SRIOV_TOTAL_VF, &nvfs);
+ pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
- if (num_vfs > nvfs) {
+ adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
+ if (adapter->num_vfs != num_vfs)
dev_info(&adapter->pdev->dev,
- "Device supports %d VFs and not %d\n",
- nvfs, num_vfs);
- num_vfs = nvfs;
- }
+ "Device supports %d VFs and not %d\n",
+ adapter->num_vfs, num_vfs);
- status = pci_enable_sriov(adapter->pdev, num_vfs);
- adapter->sriov_enabled = status ? false : true;
+ status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (status)
+ adapter->num_vfs = 0;
- if (adapter->sriov_enabled) {
+ if (adapter->num_vfs) {
adapter->vf_cfg = kcalloc(num_vfs,
sizeof(struct be_vf_cfg),
GFP_KERNEL);
@@ -2154,10 +2177,10 @@ static int be_sriov_enable(struct be_adapter *adapter)
static void be_sriov_disable(struct be_adapter *adapter)
{
#ifdef CONFIG_PCI_IOV
- if (adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
pci_disable_sriov(adapter->pdev);
kfree(adapter->vf_cfg);
- adapter->sriov_enabled = false;
+ adapter->num_vfs = 0;
}
#endif
}
@@ -2351,8 +2374,8 @@ static int be_close(struct net_device *netdev)
static int be_rx_queues_setup(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
- int rc, i;
- u8 rsstable[MAX_RSS_QS];
+ int rc, i, j;
+ u8 rsstable[128];
for_all_rx_queues(adapter, rxo, i) {
rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
@@ -2364,11 +2387,15 @@ static int be_rx_queues_setup(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for_all_rss_queues(adapter, rxo, i)
- rsstable[i] = rxo->rss_id;
+ for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for_all_rss_queues(adapter, rxo, i) {
+ if ((j + i) >= 128)
+ break;
+ rsstable[j + i] = rxo->rss_id;
+ }
+ }
+ rc = be_cmd_rss_config(adapter, rsstable, 128);
- rc = be_cmd_rss_config(adapter, rsstable,
- adapter->num_rx_qs - 1);
if (rc)
return rc;
}
@@ -2386,6 +2413,7 @@ static int be_open(struct net_device *netdev)
struct be_adapter *adapter = netdev_priv(netdev);
struct be_eq_obj *tx_eq = &adapter->tx_eq;
struct be_rx_obj *rxo;
+ u8 link_status;
int status, i;
status = be_rx_queues_setup(adapter);
@@ -2409,6 +2437,11 @@ static int be_open(struct net_device *netdev)
/* Now that interrupts are on we can process async mcc */
be_async_mcc_enable(adapter);
+ status = be_cmd_link_status_query(adapter, NULL, NULL,
+ &link_status, 0);
+ if (!status)
+ be_link_status_update(adapter, link_status);
+
return 0;
err:
be_close(adapter->netdev);
@@ -2465,19 +2498,24 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
u32 vf;
int status = 0;
u8 mac[ETH_ALEN];
+ struct be_vf_cfg *vf_cfg;
be_vf_eth_addr_generate(adapter, mac);
- for (vf = 0; vf < num_vfs; vf++) {
- status = be_cmd_pmac_add(adapter, mac,
- adapter->vf_cfg[vf].vf_if_handle,
- &adapter->vf_cfg[vf].vf_pmac_id,
- vf + 1);
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter)) {
+ status = be_cmd_set_mac_list(adapter, mac, 1, vf + 1);
+ } else {
+ status = be_cmd_pmac_add(adapter, mac,
+ vf_cfg->if_handle,
+ &vf_cfg->pmac_id, vf + 1);
+ }
+
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address add failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n", vf);
else
- memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
+ memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
mac[5] += 1;
}
@@ -2486,24 +2524,23 @@ static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
static void be_vf_clear(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 vf;
- for (vf = 0; vf < num_vfs; vf++) {
- if (adapter->vf_cfg[vf].vf_pmac_id != BE_INVALID_PMAC_ID)
- be_cmd_pmac_del(adapter,
- adapter->vf_cfg[vf].vf_if_handle,
- adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
- }
+ for_all_vfs(adapter, vf_cfg, vf) {
+ if (lancer_chip(adapter))
+ be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
+ else
+ be_cmd_pmac_del(adapter, vf_cfg->if_handle,
+ vf_cfg->pmac_id, vf + 1);
- for (vf = 0; vf < num_vfs; vf++)
- if (adapter->vf_cfg[vf].vf_if_handle)
- be_cmd_if_destroy(adapter,
- adapter->vf_cfg[vf].vf_if_handle, vf + 1);
+ be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
+ }
}
static int be_clear(struct be_adapter *adapter)
{
- if (be_physfn(adapter) && adapter->sriov_enabled)
+ if (sriov_enabled(adapter))
be_vf_clear(adapter);
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
@@ -2511,61 +2548,94 @@ static int be_clear(struct be_adapter *adapter)
be_mcc_queues_destroy(adapter);
be_rx_queues_destroy(adapter);
be_tx_queues_destroy(adapter);
- adapter->eq_next_idx = 0;
-
- adapter->be3_native = false;
- adapter->promiscuous = false;
/* tell fw we're done with firing cmds */
be_cmd_fw_clean(adapter);
return 0;
}
+static void be_vf_setup_init(struct be_adapter *adapter)
+{
+ struct be_vf_cfg *vf_cfg;
+ int vf;
+
+ for_all_vfs(adapter, vf_cfg, vf) {
+ vf_cfg->if_handle = -1;
+ vf_cfg->pmac_id = -1;
+ }
+}
+
static int be_vf_setup(struct be_adapter *adapter)
{
+ struct be_vf_cfg *vf_cfg;
u32 cap_flags, en_flags, vf;
u16 lnk_speed;
int status;
- cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
- for (vf = 0; vf < num_vfs; vf++) {
+ be_vf_setup_init(adapter);
+
+ cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST;
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
- &adapter->vf_cfg[vf].vf_if_handle,
- NULL, vf+1);
+ &vf_cfg->if_handle, NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_pmac_id = BE_INVALID_PMAC_ID;
}
- if (!lancer_chip(adapter)) {
- status = be_vf_eth_addr_config(adapter);
- if (status)
- goto err;
- }
+ status = be_vf_eth_addr_config(adapter);
+ if (status)
+ goto err;
- for (vf = 0; vf < num_vfs; vf++) {
+ for_all_vfs(adapter, vf_cfg, vf) {
status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
- vf + 1);
+ NULL, vf + 1);
if (status)
goto err;
- adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
+ vf_cfg->tx_rate = lnk_speed * 10;
}
return 0;
err:
return status;
}
+static void be_setup_init(struct be_adapter *adapter)
+{
+ adapter->vlan_prio_bmap = 0xff;
+ adapter->link_speed = -1;
+ adapter->if_handle = -1;
+ adapter->be3_native = false;
+ adapter->promiscuous = false;
+ adapter->eq_next_idx = 0;
+}
+
+static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
+{
+ u32 pmac_id;
+ int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK,
+ false, adapter->if_handle, pmac_id);
+ if (status != 0)
+ goto do_none;
+ status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
+ &adapter->pmac_id, 0);
+do_none:
+ return status;
+}
+
static int be_setup(struct be_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u32 cap_flags, en_flags;
u32 tx_fc, rx_fc;
- int status;
+ int status, i;
u8 mac[ETH_ALEN];
+ struct be_tx_obj *txo;
- /* Allow all priorities by default. A GRP5 evt may modify this */
- adapter->vlan_prio_bmap = 0xff;
- adapter->link_speed = -1;
+ be_setup_init(adapter);
be_cmd_req_native_mode(adapter);
@@ -2583,7 +2653,7 @@ static int be_setup(struct be_adapter *adapter)
memset(mac, 0, ETH_ALEN);
status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
- true /*permanent */, 0);
+ true /*permanent */, 0, 0);
if (status)
return status;
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
@@ -2592,7 +2662,8 @@ static int be_setup(struct be_adapter *adapter)
en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
- BE_IF_FLAGS_PROMISCUOUS;
+ BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
+
if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
cap_flags |= BE_IF_FLAGS_RSS;
en_flags |= BE_IF_FLAGS_RSS;
@@ -2603,12 +2674,23 @@ static int be_setup(struct be_adapter *adapter)
if (status != 0)
goto err;
- /* For BEx, the VF's permanent mac queried from card is incorrect.
- * Query the mac configued by the PF using if_handle
- */
- if (!be_physfn(adapter) && !lancer_chip(adapter)) {
- status = be_cmd_mac_addr_query(adapter, mac,
- MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
+ for_all_tx_queues(adapter, txo, i) {
+ status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
+ if (status)
+ goto err;
+ }
+
+ /* The VF's permanent mac queried from card is incorrect.
+ * For BEx: Query the mac configued by the PF using if_handle
+ * For Lancer: Get and use mac_list to obtain mac address.
+ */
+ if (!be_physfn(adapter)) {
+ if (lancer_chip(adapter))
+ status = be_configure_mac_from_list(adapter, mac);
+ else
+ status = be_cmd_mac_addr_query(adapter, mac,
+ MAC_ADDRESS_TYPE_NETWORK, false,
+ adapter->if_handle, 0);
if (!status) {
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
@@ -2624,18 +2706,21 @@ static int be_setup(struct be_adapter *adapter)
be_set_rx_mode(adapter->netdev);
status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
+
if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
adapter->rx_fc);
- if (status)
+ /* For Lancer: It is legal for this cmd to fail on VF */
+ if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
goto err;
}
pcie_set_readrq(adapter->pdev, 4096);
- if (be_physfn(adapter) && adapter->sriov_enabled) {
+ if (sriov_enabled(adapter)) {
status = be_vf_setup(adapter);
if (status)
goto err;
@@ -2647,6 +2732,19 @@ err:
return status;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void be_netpoll(struct net_device *netdev)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_rx_obj *rxo;
+ int i;
+
+ event_handle(adapter, &adapter->tx_eq, false);
+ for_all_rx_queues(adapter, rxo, i)
+ event_handle(adapter, &rxo->rx_eq, true);
+}
+#endif
+
#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
static bool be_flash_redboot(struct be_adapter *adapter,
const u8 *p, u32 img_start, int image_size,
@@ -2981,7 +3079,7 @@ fw_exit:
return status;
}
-static struct net_device_ops be_netdev_ops = {
+static const struct net_device_ops be_netdev_ops = {
.ndo_open = be_open,
.ndo_stop = be_close,
.ndo_start_xmit = be_xmit,
@@ -2995,7 +3093,10 @@ static struct net_device_ops be_netdev_ops = {
.ndo_set_vf_mac = be_set_vf_mac,
.ndo_set_vf_vlan = be_set_vf_vlan,
.ndo_set_vf_tx_rate = be_set_vf_tx_rate,
- .ndo_get_vf_config = be_get_vf_config
+ .ndo_get_vf_config = be_get_vf_config,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = be_netpoll,
+#endif
};
static void be_netdev_init(struct net_device *netdev)
@@ -3242,6 +3343,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID2:
+ case OC_DEVICE_ID5:
adapter->generation = BE_GEN3;
break;
case OC_DEVICE_ID3:
@@ -3267,7 +3369,7 @@ static int be_dev_family_check(struct be_adapter *adapter)
static int lancer_wait_ready(struct be_adapter *adapter)
{
-#define SLIPORT_READY_TIMEOUT 500
+#define SLIPORT_READY_TIMEOUT 30
u32 sliport_status;
int status = 0, i;
@@ -3276,7 +3378,7 @@ static int lancer_wait_ready(struct be_adapter *adapter)
if (sliport_status & SLIPORT_STATUS_RDY_MASK)
break;
- msleep(20);
+ msleep(1000);
}
if (i == SLIPORT_READY_TIMEOUT)
@@ -3313,6 +3415,104 @@ static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
return status;
}
+static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
+{
+ int status;
+ u32 sliport_status;
+
+ if (adapter->eeh_err || adapter->ue_detected)
+ return;
+
+ sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
+
+ if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
+ dev_err(&adapter->pdev->dev,
+ "Adapter in error state."
+ "Trying to recover.\n");
+
+ status = lancer_test_and_set_rdy_state(adapter);
+ if (status)
+ goto err;
+
+ netif_device_detach(adapter->netdev);
+
+ if (netif_running(adapter->netdev))
+ be_close(adapter->netdev);
+
+ be_clear(adapter);
+
+ adapter->fw_timeout = false;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(adapter->netdev)) {
+ status = be_open(adapter->netdev);
+ if (status)
+ goto err;
+ }
+
+ netif_device_attach(adapter->netdev);
+
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery succeeded\n");
+ }
+ return;
+err:
+ dev_err(&adapter->pdev->dev,
+ "Adapter error recovery failed\n");
+}
+
+static void be_worker(struct work_struct *work)
+{
+ struct be_adapter *adapter =
+ container_of(work, struct be_adapter, work.work);
+ struct be_rx_obj *rxo;
+ int i;
+
+ if (lancer_chip(adapter))
+ lancer_test_and_recover_fn_err(adapter);
+
+ be_detect_dump_ue(adapter);
+
+ /* when interrupts are not yet enabled, just reap any pending
+ * mcc completions */
+ if (!netif_running(adapter->netdev)) {
+ int mcc_compl, status = 0;
+
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
+ }
+
+ goto reschedule;
+ }
+
+ if (!adapter->stats_cmd_sent) {
+ if (lancer_chip(adapter))
+ lancer_cmd_get_pport_stats(adapter,
+ &adapter->stats_cmd);
+ else
+ be_cmd_get_stats(adapter, &adapter->stats_cmd);
+ }
+
+ for_all_rx_queues(adapter, rxo, i) {
+ be_rx_eqd_update(adapter, rxo);
+
+ if (rxo->rx_post_starved) {
+ rxo->rx_post_starved = false;
+ be_post_rx_frags(rxo, GFP_KERNEL);
+ }
+ }
+
+reschedule:
+ adapter->work_counter++;
+ schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
+}
+
static int __devinit be_probe(struct pci_dev *pdev,
const struct pci_device_id *pdev_id)
{
@@ -3365,7 +3565,12 @@ static int __devinit be_probe(struct pci_dev *pdev,
goto disable_sriov;
if (lancer_chip(adapter)) {
- status = lancer_test_and_set_rdy_state(adapter);
+ status = lancer_wait_ready(adapter);
+ if (!status) {
+ iowrite32(SLI_PORT_CONTROL_IP_MASK,
+ adapter->db + SLIPORT_CONTROL_OFFSET);
+ status = lancer_test_and_set_rdy_state(adapter);
+ }
if (status) {
dev_err(&pdev->dev, "Adapter in non recoverable error\n");
goto ctrl_clean;
@@ -3559,6 +3764,8 @@ static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
dev_info(&adapter->pdev->dev, "EEH reset\n");
adapter->eeh_err = false;
+ adapter->ue_detected = false;
+ adapter->fw_timeout = false;
status = pci_enable_device(pdev);
if (status)
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 251b635..60f0e78 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -1185,18 +1185,7 @@ static struct platform_driver ethoc_driver = {
},
};
-static int __init ethoc_init(void)
-{
- return platform_driver_register(&ethoc_driver);
-}
-
-static void __exit ethoc_exit(void)
-{
- platform_driver_unregister(&ethoc_driver);
-}
-
-module_init(ethoc_init);
-module_exit(ethoc_exit);
+module_platform_driver(ethoc_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("OpenCores Ethernet MAC driver");
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index fb5579a..47f85c3 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -25,6 +25,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/netdevice.h>
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index a127cb2..bb336a0 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -25,6 +25,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/init.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/mii.h>
#include <linux/module.h>
diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c
index 61d2bdd..c82d444 100644
--- a/drivers/net/ethernet/fealnx.c
+++ b/drivers/net/ethernet/fealnx.c
@@ -1818,9 +1818,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct netdev_private *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 9de3764..3574e14 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -21,7 +21,7 @@ config NET_VENDOR_FREESCALE
if NET_VENDOR_FREESCALE
config FEC
- bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
+ tristate "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || SOC_IMX28)
default ARCH_MXC || SOC_IMX28 if ARM
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index c136230..e92ef1b 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -99,7 +99,7 @@ static struct platform_device_id fec_devtype[] = {
MODULE_DEVICE_TABLE(platform, fec_devtype);
enum imx_fec_type {
- IMX25_FEC = 1, /* runs on i.mx25/50/53 */
+ IMX25_FEC = 1, /* runs on i.mx25/50/53 */
IMX27_FEC, /* runs on i.mx27/35/51 */
IMX28_FEC,
IMX6Q_FEC,
@@ -132,7 +132,7 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#elif defined (CONFIG_M5272C3)
#define FEC_FLASHMAC (0xffe04000 + 4)
#elif defined(CONFIG_MOD5272)
-#define FEC_FLASHMAC 0xffc0406b
+#define FEC_FLASHMAC 0xffc0406b
#else
#define FEC_FLASHMAC 0
#endif
@@ -255,11 +255,13 @@ struct fec_enet_private {
#define FEC_MMFR_TA (2 << 16)
#define FEC_MMFR_DATA(v) (v & 0xffff)
-#define FEC_MII_TIMEOUT 1000 /* us */
+#define FEC_MII_TIMEOUT 30000 /* us */
/* Transmitter timeout */
#define TX_TIMEOUT (2 * HZ)
+static int mii_cnt;
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -474,6 +476,7 @@ fec_restart(struct net_device *ndev, int duplex)
} else {
#ifdef FEC_MIIGSK_ENR
if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) {
+ u32 cfgr;
/* disable the gasket and wait */
writel(0, fep->hwp + FEC_MIIGSK_ENR);
while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
@@ -484,9 +487,11 @@ fec_restart(struct net_device *ndev, int duplex)
* RMII, 50 MHz, no loopback, no echo
* MII, 25 MHz, no loopback, no echo
*/
- writel((fep->phy_interface == PHY_INTERFACE_MODE_RMII) ?
- 1 : 0, fep->hwp + FEC_MIIGSK_CFGR);
-
+ cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+ ? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
+ if (fep->phy_dev && fep->phy_dev->speed == SPEED_10)
+ cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
+ writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
/* re-enable the gasket */
writel(2, fep->hwp + FEC_MIIGSK_ENR);
@@ -516,6 +521,7 @@ fec_stop(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
+ u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
/* We cannot expect a graceful transmit stop without link !!! */
if (fep->link) {
@@ -532,8 +538,10 @@ fec_stop(struct net_device *ndev)
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
/* We have to keep ENET enabled to have MII interrupt stay working */
- if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
+ if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
writel(2, fep->hwp + FEC_ECNTRL);
+ writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+ }
}
@@ -819,7 +827,7 @@ static void __inline__ fec_get_mac(struct net_device *ndev)
iap = (unsigned char *)FEC_FLASHMAC;
#else
if (pdata)
- memcpy(iap, pdata->mac, ETH_ALEN);
+ iap = (unsigned char *)&pdata->mac;
#endif
}
@@ -866,6 +874,8 @@ static void fec_enet_adjust_link(struct net_device *ndev)
if (phy_dev->link) {
if (fep->full_duplex != phy_dev->duplex) {
fec_restart(ndev, phy_dev->duplex);
+ /* prevent unnecessary second fec_restart() below */
+ fep->link = phy_dev->link;
status_change = 1;
}
}
@@ -973,13 +983,14 @@ static int fec_enet_mii_probe(struct net_device *ndev)
}
if (phy_id >= PHY_MAX_ADDR) {
- printk(KERN_INFO "%s: no PHY, assuming direct connection "
- "to switch\n", ndev->name);
- strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE);
+ printk(KERN_INFO
+ "%s: no PHY, assuming direct connection to switch\n",
+ ndev->name);
+ strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
phy_id = 0;
}
- snprintf(phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id);
+ snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, phy_id);
phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link, 0,
fep->phy_interface);
if (IS_ERR(phy_dev)) {
@@ -999,8 +1010,9 @@ static int fec_enet_mii_probe(struct net_device *ndev)
fep->link = 0;
fep->full_duplex = 0;
- printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
- "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
+ printk(KERN_INFO
+ "%s: Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+ ndev->name,
fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
fep->phy_dev->irq);
@@ -1034,8 +1046,12 @@ static int fec_enet_mii_init(struct platform_device *pdev)
*/
if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
- fep->mii_bus = fec0_mii_bus;
- return 0;
+ if (mii_cnt && fec0_mii_bus) {
+ fep->mii_bus = fec0_mii_bus;
+ mii_cnt++;
+ return 0;
+ }
+ return -ENOENT;
}
fep->mii_timeout = 0;
@@ -1064,7 +1080,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
@@ -1080,6 +1097,8 @@ static int fec_enet_mii_init(struct platform_device *pdev)
if (mdiobus_register(fep->mii_bus))
goto err_out_free_mdio_irq;
+ mii_cnt++;
+
/* save fec0 mii_bus */
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fec0_mii_bus = fep->mii_bus;
@@ -1096,11 +1115,11 @@ err_out:
static void fec_enet_mii_remove(struct fec_enet_private *fep)
{
- if (fep->phy_dev)
- phy_disconnect(fep->phy_dev);
- mdiobus_unregister(fep->mii_bus);
- kfree(fep->mii_bus->irq);
- mdiobus_free(fep->mii_bus);
+ if (--mii_cnt == 0) {
+ mdiobus_unregister(fep->mii_bus);
+ kfree(fep->mii_bus->irq);
+ mdiobus_free(fep->mii_bus);
+ }
}
static int fec_enet_get_settings(struct net_device *ndev,
@@ -1137,7 +1156,7 @@ static void fec_enet_get_drvinfo(struct net_device *ndev,
strcpy(info->bus_info, dev_name(&ndev->dev));
}
-static struct ethtool_ops fec_enet_ethtool_ops = {
+static const struct ethtool_ops fec_enet_ethtool_ops = {
.get_settings = fec_enet_get_settings,
.set_settings = fec_enet_set_settings,
.get_drvinfo = fec_enet_get_drvinfo,
@@ -1574,8 +1593,12 @@ fec_probe(struct platform_device *pdev)
for (i = 0; i < FEC_IRQ_NUM; i++) {
irq = platform_get_irq(pdev, i);
- if (i && irq < 0)
- break;
+ if (irq < 0) {
+ if (i)
+ break;
+ ret = irq;
+ goto failed_irq;
+ }
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
if (ret) {
while (--i >= 0) {
@@ -1586,12 +1609,12 @@ fec_probe(struct platform_device *pdev)
}
}
- fep->clk = clk_get(&pdev->dev, "fec_clk");
+ fep->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(fep->clk)) {
ret = PTR_ERR(fep->clk);
goto failed_clk;
}
- clk_enable(fep->clk);
+ clk_prepare_enable(fep->clk);
ret = fec_enet_init(ndev);
if (ret)
@@ -1614,7 +1637,7 @@ failed_register:
fec_enet_mii_remove(fep);
failed_mii_init:
failed_init:
- clk_disable(fep->clk);
+ clk_disable_unprepare(fep->clk);
clk_put(fep->clk);
failed_clk:
for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1638,13 +1661,18 @@ fec_drv_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct fec_enet_private *fep = netdev_priv(ndev);
struct resource *r;
+ int i;
- fec_stop(ndev);
+ unregister_netdev(ndev);
fec_enet_mii_remove(fep);
- clk_disable(fep->clk);
+ for (i = 0; i < FEC_IRQ_NUM; i++) {
+ int irq = platform_get_irq(pdev, i);
+ if (irq > 0)
+ free_irq(irq, ndev);
+ }
+ clk_disable_unprepare(fep->clk);
clk_put(fep->clk);
iounmap(fep->hwp);
- unregister_netdev(ndev);
free_netdev(ndev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1667,7 +1695,7 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- clk_disable(fep->clk);
+ clk_disable_unprepare(fep->clk);
return 0;
}
@@ -1678,7 +1706,7 @@ fec_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
- clk_enable(fep->clk);
+ clk_prepare_enable(fep->clk);
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 8b2c6d7..8408c62 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -47,6 +47,10 @@
#define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */
#define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */
+#define BM_MIIGSK_CFGR_MII 0x00
+#define BM_MIIGSK_CFGR_RMII 0x01
+#define BM_MIIGSK_CFGR_FRCONT_10M 0x40
+
#else
#define FEC_ECNTRL 0x000 /* Ethernet control reg */
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 5bf5471..910a8e1 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -1171,16 +1171,6 @@ static struct platform_driver fs_enet_driver = {
.remove = fs_enet_remove,
};
-static int __init fs_init(void)
-{
- return platform_driver_register(&fs_enet_driver);
-}
-
-static void __exit fs_cleanup(void)
-{
- platform_driver_unregister(&fs_enet_driver);
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void fs_enet_netpoll(struct net_device *dev)
{
@@ -1190,7 +1180,4 @@ static void fs_enet_netpoll(struct net_device *dev)
}
#endif
-/**************************************************************************************/
-
-module_init(fs_init);
-module_exit(fs_cleanup);
+module_platform_driver(fs_enet_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
index b09270b..0f2d1a7 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c
@@ -232,15 +232,4 @@ static struct platform_driver fs_enet_bb_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_bb_init(void)
-{
- return platform_driver_register(&fs_enet_bb_mdio_driver);
-}
-
-static void fs_enet_mdio_bb_exit(void)
-{
- platform_driver_unregister(&fs_enet_bb_mdio_driver);
-}
-
-module_init(fs_enet_mdio_bb_init);
-module_exit(fs_enet_mdio_bb_exit);
+module_platform_driver(fs_enet_bb_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
index e0e9d6c..55bb867 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c
@@ -237,15 +237,4 @@ static struct platform_driver fs_enet_fec_mdio_driver = {
.remove = fs_enet_mdio_remove,
};
-static int fs_enet_mdio_fec_init(void)
-{
- return platform_driver_register(&fs_enet_fec_mdio_driver);
-}
-
-static void fs_enet_mdio_fec_exit(void)
-{
- platform_driver_unregister(&fs_enet_fec_mdio_driver);
-}
-
-module_init(fs_enet_mdio_fec_init);
-module_exit(fs_enet_mdio_fec_exit);
+module_platform_driver(fs_enet_fec_mdio_driver);
diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
index 4d9f84b..9eb8159 100644
--- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c
+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c
@@ -360,12 +360,11 @@ static int fsl_pq_mdio_probe(struct platform_device *ofdev)
if (tbiaddr == -1) {
err = -EBUSY;
-
goto err_free_irqs;
+ } else {
+ out_be32(tbipa, tbiaddr);
}
- out_be32(tbipa, tbiaddr);
-
err = of_mdiobus_register(new_bus, np);
if (err) {
printk (KERN_ERR "%s: Cannot register as MDIO bus\n",
@@ -443,15 +442,6 @@ static struct platform_driver fsl_pq_mdio_driver = {
.remove = fsl_pq_mdio_remove,
};
-int __init fsl_pq_mdio_init(void)
-{
- return platform_driver_register(&fsl_pq_mdio_driver);
-}
-module_init(fsl_pq_mdio_init);
+module_platform_driver(fsl_pq_mdio_driver);
-void fsl_pq_mdio_exit(void)
-{
- platform_driver_unregister(&fsl_pq_mdio_driver);
-}
-module_exit(fsl_pq_mdio_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 83199fd..39d160d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -734,7 +734,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
mac_addr = of_get_mac_address(np);
if (mac_addr)
- memcpy(dev->dev_addr, mac_addr, MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
priv->device_flags =
@@ -1984,7 +1984,8 @@ static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
return fcb;
}
-static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
+static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
+ int fcb_length)
{
u8 flags = 0;
@@ -2006,7 +2007,7 @@ static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb)
* frame (skb->data) and the start of the IP hdr.
* l4os is the distance between the start of the
* l3 hdr and the l4 hdr */
- fcb->l3os = (u16)(skb_network_offset(skb) - GMAC_FCB_LEN);
+ fcb->l3os = (u16)(skb_network_offset(skb) - fcb_length);
fcb->l4os = skb_network_header_len(skb);
fcb->flags = flags;
@@ -2046,7 +2047,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rq = 0, do_tstamp = 0;
u32 bufaddr;
unsigned long flags;
- unsigned int nr_frags, nr_txbds, length;
+ unsigned int nr_frags, nr_txbds, length, fcb_length = GMAC_FCB_LEN;
/*
* TOE=1 frames larger than 2500 bytes may see excess delays
@@ -2070,22 +2071,28 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* check if time stamp should be generated */
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- priv->hwts_tx_en))
+ priv->hwts_tx_en)) {
do_tstamp = 1;
+ fcb_length = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
+ }
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
vlan_tx_tag_present(skb) ||
unlikely(do_tstamp)) &&
- (skb_headroom(skb) < GMAC_FCB_LEN)) {
+ (skb_headroom(skb) < fcb_length)) {
struct sk_buff *skb_new;
- skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
+ skb_new = skb_realloc_headroom(skb, fcb_length);
if (!skb_new) {
dev->stats.tx_errors++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
+
+ /* Steal sock reference for processing TX time stamps */
+ swap(skb_new->sk, skb->sk);
+ swap(skb_new->destructor, skb->destructor);
kfree_skb(skb);
skb = skb_new;
}
@@ -2154,6 +2161,12 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
lstatus = txbdp_start->lstatus;
}
+ /* Add TxPAL between FCB and frame if required */
+ if (unlikely(do_tstamp)) {
+ skb_push(skb, GMAC_TXPAL_LEN);
+ memset(skb->data, 0, GMAC_TXPAL_LEN);
+ }
+
/* Set up checksumming */
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(skb);
@@ -2164,7 +2177,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb_checksum_help(skb);
} else {
lstatus |= BD_LFLAG(TXBD_TOE);
- gfar_tx_checksum(skb, fcb);
+ gfar_tx_checksum(skb, fcb, fcb_length);
}
}
@@ -2196,9 +2209,9 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
* the full frame length.
*/
if (unlikely(do_tstamp)) {
- txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
+ txbdp_tstamp->bufPtr = txbdp_start->bufPtr + fcb_length;
txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
- (skb_headlen(skb) - GMAC_FCB_LEN);
+ (skb_headlen(skb) - fcb_length);
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
} else {
lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
@@ -2306,7 +2319,7 @@ void gfar_check_rx_parser_mode(struct gfar_private *priv)
}
/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, u32 features)
+void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = NULL;
@@ -2490,7 +2503,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) {
next = next_txbd(bdp, base, tx_ring_size);
- buflen = next->length + GMAC_FCB_LEN;
+ buflen = next->length + GMAC_FCB_LEN + GMAC_TXPAL_LEN;
} else
buflen = bdp->length;
@@ -2502,6 +2515,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
memset(&shhwtstamps, 0, sizeof(shhwtstamps));
shhwtstamps.hwtstamp = ns_to_ktime(*ns);
+ skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
skb_tstamp_tx(skb, &shhwtstamps);
bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
bdp = next;
@@ -3114,7 +3128,7 @@ static void gfar_set_multi(struct net_device *dev)
static void gfar_clear_exact_match(struct net_device *dev)
{
int idx;
- static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+ static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
gfar_set_mac_for_addr(dev, idx, zero_arr);
@@ -3137,7 +3151,7 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
{
u32 tempval;
struct gfar_private *priv = netdev_priv(dev);
- u32 result = ether_crc(MAC_ADDR_LEN, addr);
+ u32 result = ether_crc(ETH_ALEN, addr);
int width = priv->hash_width;
u8 whichbit = (result >> (32 - width)) & 0x1f;
u8 whichreg = result >> (32 - width + 5);
@@ -3158,7 +3172,7 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
int idx;
- char tmpbuf[MAC_ADDR_LEN];
+ char tmpbuf[ETH_ALEN];
u32 tempval;
u32 __iomem *macptr = &regs->macstnaddr1;
@@ -3166,8 +3180,8 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num,
/* Now copy it into the mac registers backwards, cuz */
/* little endian is silly */
- for (idx = 0; idx < MAC_ADDR_LEN; idx++)
- tmpbuf[MAC_ADDR_LEN - 1 - idx] = addr[idx];
+ for (idx = 0; idx < ETH_ALEN; idx++)
+ tmpbuf[ETH_ALEN - 1 - idx] = addr[idx];
gfar_write(macptr, *((u32 *) (tmpbuf)));
@@ -3281,16 +3295,4 @@ static struct platform_driver gfar_driver = {
.remove = gfar_remove,
};
-static int __init gfar_init(void)
-{
- return platform_driver_register(&gfar_driver);
-}
-
-static void __exit gfar_exit(void)
-{
- platform_driver_unregister(&gfar_driver);
-}
-
-module_init(gfar_init);
-module_exit(gfar_exit);
-
+module_platform_driver(gfar_driver);
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 9aa4377..40c33a7 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -63,6 +63,9 @@ struct ethtool_rx_list {
/* Length for FCB */
#define GMAC_FCB_LEN 8
+/* Length for TxPAL */
+#define GMAC_TXPAL_LEN 16
+
/* Default padding amount */
#define DEFAULT_PADDING 2
@@ -74,9 +77,6 @@ struct ethtool_rx_list {
* will be the next highest multiple of 512 bytes. */
#define INCREMENTAL_BUFFER_SIZE 512
-
-#define MAC_ADDR_LEN 6
-
#define PHY_INIT_TIMEOUT 100000
#define GFAR_PHY_CHANGE_TIME 2
@@ -1179,9 +1179,9 @@ extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
extern void gfar_configure_coalescing(struct gfar_private *priv,
unsigned long tx_mask, unsigned long rx_mask);
void gfar_init_sysfs(struct net_device *dev);
-int gfar_set_features(struct net_device *dev, u32 features);
+int gfar_set_features(struct net_device *dev, netdev_features_t features);
extern void gfar_check_rx_parser_mode(struct gfar_private *priv);
-extern void gfar_vlan_mode(struct net_device *dev, u32 features);
+extern void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 212736b..5a3b2e5 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -519,12 +519,12 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
return err;
}
-int gfar_set_features(struct net_device *dev, u32 features)
+int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
struct gfar_private *priv = netdev_priv(dev);
unsigned long flags;
int err = 0, i = 0;
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
gfar_vlan_mode(dev, features);
@@ -1410,10 +1410,9 @@ static int gfar_optimize_filer_masks(struct filer_table *tab)
/* We need a copy of the filer table because
* we want to change its order */
- temp_table = kmalloc(sizeof(*temp_table), GFP_KERNEL);
+ temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
if (temp_table == NULL)
return -ENOMEM;
- memcpy(temp_table, tab, sizeof(*temp_table));
mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
sizeof(struct gfar_mask_entry), GFP_KERNEL);
@@ -1693,8 +1692,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
ret = gfar_set_hash_opts(priv, cmd);
break;
case ETHTOOL_SRXCLSRLINS:
- if (cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
- cmd->fs.ring_cookie >= priv->num_rx_queues) {
+ if ((cmd->fs.ring_cookie != RX_CLS_FLOW_DISC &&
+ cmd->fs.ring_cookie >= priv->num_rx_queues) ||
+ cmd->fs.location >= MAX_FILER_IDX) {
ret = -EINVAL;
break;
}
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index f67b8ae..83e0ed75 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -562,21 +562,7 @@ static struct platform_driver gianfar_ptp_driver = {
.remove = gianfar_ptp_remove,
};
-/* module operations */
-
-static int __init ptp_gianfar_init(void)
-{
- return platform_driver_register(&gianfar_ptp_driver);
-}
-
-module_init(ptp_gianfar_init);
-
-static void __exit ptp_gianfar_exit(void)
-{
- platform_driver_unregister(&gianfar_ptp_driver);
-}
-
-module_exit(ptp_gianfar_exit);
+module_platform_driver(gianfar_ptp_driver);
MODULE_AUTHOR("Richard Cochran <richard.cochran@omicron.at>");
MODULE_DESCRIPTION("PTP clock using the eTSEC");
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index b5dc027..ba2dc08 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -443,7 +443,7 @@ static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
static inline int compare_addr(u8 **addr1, u8 **addr2)
{
- return memcmp(addr1, addr2, ENET_NUM_OCTETS_PER_ADDRESS);
+ return memcmp(addr1, addr2, ETH_ALEN);
}
#ifdef DEBUG
diff --git a/drivers/net/ethernet/freescale/ucc_geth.h b/drivers/net/ethernet/freescale/ucc_geth.h
index d12fcad..2e395a2 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.h
+++ b/drivers/net/ethernet/freescale/ucc_geth.h
@@ -20,6 +20,7 @@
#include <linux/kernel.h>
#include <linux/list.h>
+#include <linux/if_ether.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
@@ -881,7 +882,6 @@ struct ucc_geth_hardware_statistics {
#define TX_RING_MOD_MASK(size) (size-1)
#define RX_RING_MOD_MASK(size) (size-1)
-#define ENET_NUM_OCTETS_PER_ADDRESS 6
#define ENET_GROUP_ADDR 0x01 /* Group address mask
for ethernet
addresses */
@@ -1051,7 +1051,7 @@ enum ucc_geth_num_of_station_addresses {
/* UCC GETH 82xx Ethernet Address Container */
struct enet_addr_container {
- u8 address[ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 address[ETH_ALEN]; /* ethernet address */
enum ucc_geth_enet_address_recognition_location location; /* location in
82xx address
recognition
@@ -1194,7 +1194,7 @@ struct ucc_geth_private {
u16 cpucount[NUM_TX_QUEUES];
u16 __iomem *p_cpucount[NUM_TX_QUEUES];
int indAddrRegUsed[NUM_OF_PADDRS];
- u8 paddr[NUM_OF_PADDRS][ENET_NUM_OCTETS_PER_ADDRESS]; /* ethernet address */
+ u8 paddr[NUM_OF_PADDRS][ETH_ALEN]; /* ethernet address */
u8 numGroupAddrInHash;
u8 numIndAddrInHash;
u8 numIndAddrInReg;
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 1541675..ee84b47 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -1058,9 +1058,10 @@ static void fjn_rx(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "PCMCIA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops netdev_ethtool_ops = {
diff --git a/drivers/net/ethernet/i825xx/eepro.c b/drivers/net/ethernet/i825xx/eepro.c
index 067c460..114cda77 100644
--- a/drivers/net/ethernet/i825xx/eepro.c
+++ b/drivers/net/ethernet/i825xx/eepro.c
@@ -1726,9 +1726,10 @@ static int eepro_ethtool_get_settings(struct net_device *dev,
static void eepro_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->version, DRV_VERSION);
- sprintf(drvinfo->bus_info, "ISA 0x%lx", dev->base_addr);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ snprintf(drvinfo->bus_info, sizeof(drvinfo->bus_info),
+ "ISA 0x%lx", dev->base_addr);
}
static const struct ethtool_ops eepro_ethtool_ops = {
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 05b7359..6bdd8e3 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -263,7 +263,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
data[i++] = atomic_read(&port->port_res[k].swqe_avail);
}
-const struct ethtool_ops ehea_ethtool_ops = {
+static const struct ethtool_ops ehea_ethtool_ops = {
.get_settings = ehea_get_settings,
.get_drvinfo = ehea_get_drvinfo,
.get_msglevel = ehea_get_msglevel,
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index bfeccbf..e6893cd 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -94,8 +94,8 @@ static int port_name_cnt;
static LIST_HEAD(adapter_list);
static unsigned long ehea_driver_flags;
static DEFINE_MUTEX(dlpar_mem_lock);
-struct ehea_fw_handle_array ehea_fw_handles;
-struct ehea_bcmc_reg_array ehea_bcmc_regs;
+static struct ehea_fw_handle_array ehea_fw_handles;
+static struct ehea_bcmc_reg_array ehea_bcmc_regs;
static int __devinit ehea_probe_adapter(struct platform_device *dev,
@@ -133,7 +133,7 @@ void ehea_dump(void *adr, int len, char *msg)
}
}
-void ehea_schedule_port_reset(struct ehea_port *port)
+static void ehea_schedule_port_reset(struct ehea_port *port)
{
if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
schedule_work(&port->reset_task);
@@ -336,7 +336,9 @@ static struct rtnl_link_stats64 *ehea_get_stats64(struct net_device *dev,
stats->tx_bytes = tx_bytes;
stats->rx_packets = rx_packets;
- return &port->stats;
+ stats->multicast = port->stats.multicast;
+ stats->rx_errors = port->stats.rx_errors;
+ return stats;
}
static void ehea_update_stats(struct work_struct *work)
@@ -1404,7 +1406,7 @@ out:
return ret;
}
-int ehea_gen_smrs(struct ehea_port_res *pr)
+static int ehea_gen_smrs(struct ehea_port_res *pr)
{
int ret;
struct ehea_adapter *adapter = pr->port->adapter;
@@ -1426,7 +1428,7 @@ out:
return -EIO;
}
-int ehea_rem_smrs(struct ehea_port_res *pr)
+static int ehea_rem_smrs(struct ehea_port_res *pr)
{
if ((ehea_rem_mr(&pr->send_mr)) ||
(ehea_rem_mr(&pr->recv_mr)))
@@ -2114,17 +2116,19 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
-static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2132,6 +2136,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2140,24 +2145,28 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
- return;
+ return err;
}
-static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
struct hcp_ehea_port_cb1 *cb1;
int index;
u64 hret;
+ int err = 0;
cb1 = (void *)get_zeroed_page(GFP_KERNEL);
if (!cb1) {
pr_err("no mem for cb1\n");
+ err = -ENOMEM;
goto out;
}
@@ -2165,6 +2174,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
if (hret != H_SUCCESS) {
pr_err("query_ehea_port failed\n");
+ err = -EINVAL;
goto out;
}
@@ -2173,13 +2183,16 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
H_PORT_CB1, H_PORT_CB1_ALL, cb1);
- if (hret != H_SUCCESS)
+ if (hret != H_SUCCESS) {
pr_err("modify_ehea_port failed\n");
+ err = -EINVAL;
+ }
out:
free_page((unsigned long)cb1);
+ return err;
}
-int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
{
int ret = -EIO;
u64 hret;
@@ -2520,7 +2533,7 @@ static void ehea_flush_sq(struct ehea_port *port)
}
}
-int ehea_stop_qps(struct net_device *dev)
+static int ehea_stop_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2589,7 +2602,7 @@ out:
return ret;
}
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
{
struct ehea_qp qp = *orig_qp;
struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2622,7 +2635,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
}
}
-int ehea_restart_qps(struct net_device *dev)
+static int ehea_restart_qps(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct ehea_adapter *adapter = port->adapter;
@@ -2813,7 +2826,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
ehea_schedule_port_reset(port);
}
-int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
{
struct hcp_query_ehea *cb;
u64 hret;
@@ -2841,7 +2854,7 @@ out:
return ret;
}
-int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
{
struct hcp_ehea_port_cb4 *cb4;
u64 hret;
@@ -2955,7 +2968,7 @@ static const struct net_device_ops ehea_netdev_ops = {
.ndo_tx_timeout = ehea_tx_watchdog,
};
-struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
u32 logical_port_id,
struct device_node *dn)
{
@@ -3226,7 +3239,7 @@ static ssize_t ehea_remove_port(struct device *dev,
static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
-int ehea_create_device_sysfs(struct platform_device *dev)
+static int ehea_create_device_sysfs(struct platform_device *dev)
{
int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
if (ret)
@@ -3237,7 +3250,7 @@ out:
return ret;
}
-void ehea_remove_device_sysfs(struct platform_device *dev)
+static void ehea_remove_device_sysfs(struct platform_device *dev)
{
device_remove_file(&dev->dev, &dev_attr_probe_port);
device_remove_file(&dev->dev, &dev_attr_remove_port);
@@ -3368,7 +3381,7 @@ static int __devexit ehea_remove(struct platform_device *dev)
return 0;
}
-void ehea_crash_handler(void)
+static void ehea_crash_handler(void)
{
int i;
@@ -3480,7 +3493,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
ehea_show_capabilities, NULL);
-int __init ehea_module_init(void)
+static int __init ehea_module_init(void)
{
int ret;
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 95b9f4f..c25b05b 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -34,9 +34,7 @@
#include "ehea_phyp.h"
#include "ehea_qmr.h"
-struct ehea_bmap *ehea_bmap = NULL;
-
-
+static struct ehea_bmap *ehea_bmap;
static void *hw_qpageit_get_inc(struct hw_queue *queue)
{
@@ -212,7 +210,7 @@ out_nomem:
return NULL;
}
-u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
+static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
{
u64 hret;
u64 adapter_handle = cq->adapter->handle;
@@ -337,7 +335,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
return eqe;
}
-u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
+static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
{
u64 hret;
unsigned long flags;
@@ -381,7 +379,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
/**
* allocates memory for a queue and registers pages in phyp
*/
-int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
int nr_pages, int wqe_size, int act_nr_sges,
struct ehea_adapter *adapter, int h_call_q_selector)
{
@@ -516,7 +514,7 @@ out_freemem:
return NULL;
}
-u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
+static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
{
u64 hret;
struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
@@ -976,7 +974,7 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
return 0;
}
-void print_error_data(u64 *data)
+static void print_error_data(u64 *data)
{
int length;
u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ed79b2d..2abce96 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2924,6 +2924,9 @@ static int __devexit emac_remove(struct platform_device *ofdev)
if (emac_has_feature(dev, EMAC_FTR_HAS_ZMII))
zmii_detach(dev->zmii_dev, dev->zmii_port);
+ busy_phy_map &= ~(1 << dev->phy.address);
+ DBG(dev, "busy_phy_map now %#x" NL, busy_phy_map);
+
mal_unregister_commac(dev->mal, &dev->commac);
emac_put_deps(dev);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index b1cd41b..e877371 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -735,7 +735,8 @@ static void netdev_get_drvinfo(struct net_device *dev,
sizeof(info->version) - 1);
}
-static u32 ibmveth_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t ibmveth_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/*
* Since the ibmveth firmware interface does not have the
@@ -838,7 +839,8 @@ static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
return rc1 ? rc1 : rc2;
}
-static int ibmveth_set_features(struct net_device *dev, u32 features)
+static int ibmveth_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct ibmveth_adapter *adapter = netdev_priv(dev);
int rx_csum = !!(features & NETIF_F_RXCSUM);
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 8fd80a0..075451d 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -371,16 +371,9 @@ static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
}
/* The last cycle is a tri-state, so read from the PHY. */
- for (j = 7; j < 8; j++) {
- for (i = 0; i < p[j].len; i++) {
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-
- p[j].field |= ((ipg_r8(PHY_CTRL) &
- IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i);
-
- ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
- }
- }
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
+ ipg_r8(PHY_CTRL);
+ ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
}
static void ipg_set_led_mode(struct net_device *dev)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 5a2fdf7..9436397 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2376,10 +2376,10 @@ static void e100_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
struct nic *nic = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(nic->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev),
+ sizeof(info->bus_info));
}
#define E100_PHY_REGS 0x1C
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 2b223ac..3103f0b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -515,14 +515,14 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000_driver_name, 32);
- strncpy(drvinfo->version, e1000_driver_version, 32);
+ strlcpy(drvinfo->driver, e1000_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000_driver_version,
+ sizeof(drvinfo->version));
- sprintf(firmware_version, "N/A");
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.h b/drivers/net/ethernet/intel/e1000/e1000_hw.h
index 5c9a840..f6c4d7e 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.h
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.h
@@ -448,7 +448,6 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
#define NODE_ADDRESS_SIZE 6
-#define ETH_LENGTH_OF_ADDRESS 6
/* MAC decode size is 128K - This is the size of BAR0 */
#define MAC_DECODE_SIZE (128 * 1024)
@@ -813,8 +812,7 @@ struct e1000_ffvt_entry {
#define E1000_FLA 0x0001C /* Flash Access - RW */
#define E1000_MDIC 0x00020 /* MDI Control - RW */
-extern void __iomem *ce4100_gbe_mdio_base_virt;
-#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
+#define INTEL_CE_GBE_MDIO_RCOMP_BASE (hw->ce4100_gbe_mdio_base_virt)
#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
@@ -1344,6 +1342,7 @@ struct e1000_hw_stats {
struct e1000_hw {
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
+ void __iomem *ce4100_gbe_mdio_base_virt;
e1000_mac_type mac_type;
e1000_phy_type phy_type;
u32 phy_init_script;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index cf480b5..d94d64b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -33,11 +33,6 @@
#include <linux/bitops.h>
#include <linux/if_vlan.h>
-/* Intel Media SOC GbE MDIO physical base address */
-static unsigned long ce4100_gbe_mdio_base_phy;
-/* Intel Media SOC GbE MDIO virtual base address */
-void __iomem *ce4100_gbe_mdio_base_virt;
-
char e1000_driver_name[] = "e1000";
static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#define DRV_VERSION "7.3.21-k8-NAPI"
@@ -167,9 +162,10 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
static bool e1000_vlan_used(struct e1000_adapter *adapter);
-static void e1000_vlan_mode(struct net_device *netdev, u32 features);
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
#ifdef CONFIG_PM
@@ -806,7 +802,8 @@ static int e1000_is_need_ioport(struct pci_dev *pdev)
}
}
-static u32 e1000_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t e1000_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -820,10 +817,11 @@ static u32 e1000_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & NETIF_F_HW_VLAN_RX)
e1000_vlan_mode(netdev, features);
@@ -1051,11 +1049,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err = -EIO;
if (hw->mac_type == e1000_ce4100) {
- ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
- ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
+ hw->ce4100_gbe_mdio_base_virt =
+ ioremap(pci_resource_start(pdev, BAR_1),
pci_resource_len(pdev, BAR_1));
- if (!ce4100_gbe_mdio_base_virt)
+ if (!hw->ce4100_gbe_mdio_base_virt)
goto err_mdio_ioremap;
}
@@ -1182,7 +1180,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
if (global_quad_port_a != 0)
adapter->eeprom_wol = 0;
else
- adapter->quad_port_a = 1;
+ adapter->quad_port_a = true;
/* Reset for multiple quad port adapters */
if (++global_quad_port_a == 4)
global_quad_port_a = 0;
@@ -1246,7 +1244,7 @@ err_eeprom:
err_dma:
err_sw_init:
err_mdio_ioremap:
- iounmap(ce4100_gbe_mdio_base_virt);
+ iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
err_ioremap:
free_netdev(netdev);
@@ -1283,6 +1281,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
kfree(adapter->tx_ring);
kfree(adapter->rx_ring);
+ if (hw->mac_type == e1000_ce4100)
+ iounmap(hw->ce4100_gbe_mdio_base_virt);
iounmap(hw->hw_addr);
if (hw->flash_address)
iounmap(hw->flash_address);
@@ -1676,7 +1676,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
* need this to apply a workaround later in the send path. */
if (hw->mac_type == e1000_82544 &&
hw->bus_type == e1000_bus_type_pcix)
- adapter->pcix_82544 = 1;
+ adapter->pcix_82544 = true;
ew32(TCTL, tctl);
@@ -1999,7 +1999,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
writel(0, hw->hw_addr + tx_ring->tdh);
writel(0, hw->hw_addr + tx_ring->tdt);
@@ -2848,7 +2848,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
* DMA'd to the controller */
if (!skb->data_len && tx_ring->last_tx_tso &&
!skb_is_gso(skb)) {
- tx_ring->last_tx_tso = 0;
+ tx_ring->last_tx_tso = false;
size -= 4;
}
@@ -3216,7 +3216,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
if (likely(tso)) {
if (likely(hw->mac_type != e1000_82544))
- tx_ring->last_tx_tso = 1;
+ tx_ring->last_tx_tso = true;
tx_flags |= E1000_TX_FLAGS_TSO;
} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
tx_flags |= E1000_TX_FLAGS_CSUM;
@@ -4577,7 +4577,8 @@ static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
e1000_irq_enable(adapter);
}
-static void e1000_vlan_mode(struct net_device *netdev, u32 features)
+static void e1000_vlan_mode(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4600,7 +4601,7 @@ static void e1000_vlan_mode(struct net_device *netdev, u32 features)
e1000_irq_enable(adapter);
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4609,7 +4610,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((hw->mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, true);
@@ -4621,9 +4622,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
e1000_write_vfta(hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -4644,6 +4647,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (!e1000_vlan_used(adapter))
e1000_vlan_filter_on_off(adapter, false);
+
+ return 0;
}
static void e1000_restore_vlan(struct e1000_adapter *adapter)
@@ -4716,8 +4721,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
- mutex_lock(&adapter->mutex);
-
if (netif_running(netdev)) {
WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
e1000_down(adapter);
@@ -4725,10 +4728,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
#ifdef CONFIG_PM
retval = pci_save_state(pdev);
- if (retval) {
- mutex_unlock(&adapter->mutex);
+ if (retval)
return retval;
- }
#endif
status = er32(STATUS);
@@ -4739,12 +4740,14 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
e1000_setup_rctl(adapter);
e1000_set_rx_mode(netdev);
+ rctl = er32(RCTL);
+
/* turn on all-multi mode if wake on multicast is enabled */
- if (wufc & E1000_WUFC_MC) {
- rctl = er32(RCTL);
+ if (wufc & E1000_WUFC_MC)
rctl |= E1000_RCTL_MPE;
- ew32(RCTL, rctl);
- }
+
+ /* enable receives in the hardware */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
if (hw->mac_type >= e1000_82540) {
ctrl = er32(CTRL);
@@ -4783,8 +4786,6 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
if (netif_running(netdev))
e1000_free_irq(adapter);
- mutex_unlock(&adapter->mutex);
-
pci_disable_device(pdev);
return 0;
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 9fe18d1..f478a22 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -309,6 +309,7 @@ struct e1000_adapter {
u32 txd_cmd;
bool detect_tx_hung;
+ bool tx_hang_recheck;
u8 tx_timeout_factor;
u32 tx_int_delay;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 69c9d21..fb2c28e 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -579,26 +579,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
- strncpy(drvinfo->driver, e1000e_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, e1000e_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, e1000e_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, e1000e_driver_version,
+ sizeof(drvinfo->version));
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
- snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(adapter->eeprom_vers & 0xF000) >> 12,
(adapter->eeprom_vers & 0x0FF0) >> 4,
(adapter->eeprom_vers & 0x000F));
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = e1000_get_regs_len(netdev);
drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a855db1..3911401 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -163,16 +163,13 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
regs[n] = __er32(hw, E1000_TARC(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, __er32(hw, reginfo->ofs));
+ pr_info("%-15s %08x\n",
+ reginfo->name, __er32(hw, reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 2; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
}
/*
@@ -208,16 +205,15 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name, netdev->state, netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n",
+ netdev->name, netdev->state, netdev->trans_start,
+ netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
reginfo->name; reginfo++) {
e1000_regdump(hw, reginfo);
@@ -228,15 +224,14 @@ static void e1000e_dump(struct e1000_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
- 0, tx_ring->next_to_use, tx_ring->next_to_clean,
- (unsigned long long)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
+ 0, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp);
/* Print Tx Ring */
if (!netif_msg_tx_done(adapter))
@@ -271,37 +266,32 @@ static void e1000e_dump(struct e1000_adapter *adapter)
* +----------------------------------------------------------------+
* 63 48 47 40 39 36 35 32 31 24 23 20 19 0
*/
- printk(KERN_INFO "Tl[desc] [address 63:0 ] [SpeCssSCmCsLen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Legacy format\n");
- printk(KERN_INFO "Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Context format\n");
- printk(KERN_INFO "Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen]"
- " [bi->dma ] leng ntw timestamp bi->skb "
- "<-- Ext Data format\n");
+ pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
+ pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
+ pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T%c[0x%03X] %016llX %016llX %016llX "
- "%04X %3X %016llX %p",
- (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
- ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
- (unsigned long long)le64_to_cpu(u0->a),
- (unsigned long long)le64_to_cpu(u0->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->length, buffer_info->next_to_watch,
- (unsigned long long)buffer_info->time_stamp,
- buffer_info->skb);
if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
+ next_desc = " NTC/U";
else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
+ next_desc = " NTU";
else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
+ next_desc = " NTC";
else
- printk(KERN_CONT "\n");
+ next_desc = "";
+ pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
+ (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+ ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
+ i,
+ (unsigned long long)le64_to_cpu(u0->a),
+ (unsigned long long)le64_to_cpu(u0->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->length, buffer_info->next_to_watch,
+ (unsigned long long)buffer_info->time_stamp,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
@@ -312,9 +302,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print Rx Ring Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
- printk(KERN_INFO " %5d %5X %5X\n", 0,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info("Queue [NTU] [NTC]\n");
+ pr_info(" %5d %5X %5X\n",
+ 0, rx_ring->next_to_use, rx_ring->next_to_clean);
/* Print Rx Ring */
if (!netif_msg_rx_status(adapter))
@@ -337,10 +327,7 @@ rx_ring_summary:
* 24 | Buffer Address 3 [63:0] |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buffer 0 63:0 ] "
- "[buffer 1 63:0 ] "
- "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext Pkt Split format\n");
+ pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
/* [Extended] Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 13 12 8 7 4 3 0
@@ -352,35 +339,40 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [ck ipid mrqhsh] "
- "[vl l0 ee es] "
- "[ l3 l2 l1 hs] [reserved ] ---------------- "
- "[bi->skb] <-- Ext Rx Write-Back format\n");
+ pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
buffer_info = &rx_ring->buffer_info[i];
rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc_ps;
staterr =
le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX %016llX %016llX "
- "---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)le64_to_cpu(u1->c),
- (unsigned long long)le64_to_cpu(u1->d),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)le64_to_cpu(u1->c),
+ (unsigned long long)le64_to_cpu(u1->d),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -388,13 +380,6 @@ rx_ring_summary:
phys_to_virt(buffer_info->dma),
adapter->rx_ps_bsize0, true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
break;
default:
@@ -407,9 +392,7 @@ rx_ring_summary:
* 8 | Reserved |
* +-----------------------------------------------------+
*/
- printk(KERN_INFO "R [desc] [buf addr 63:0 ] "
- "[reserved 63:0 ] [bi->dma ] "
- "[bi->skb] <-- Ext (Read) format\n");
+ pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
/* Extended Receive Descriptor (Write-Back) Format
*
* 63 48 47 32 31 24 23 4 3 0
@@ -423,29 +406,37 @@ rx_ring_summary:
* +------------------------------------------------------+
* 63 48 47 32 31 20 19 0
*/
- printk(KERN_INFO "RWB[desc] [cs ipid mrq] "
- "[vt ln xe xs] "
- "[bi->skb] <-- Ext (Write-Back) format\n");
+ pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
+
buffer_info = &rx_ring->buffer_info[i];
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
u1 = (struct my_u1 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
+ "RWB", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
- (unsigned long long)le64_to_cpu(u1->a),
- (unsigned long long)le64_to_cpu(u1->b),
- (unsigned long long)buffer_info->dma,
- buffer_info->skb);
+ pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
+ "R ", i,
+ (unsigned long long)le64_to_cpu(u1->a),
+ (unsigned long long)le64_to_cpu(u1->b),
+ (unsigned long long)buffer_info->dma,
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter))
print_hex_dump(KERN_INFO, "",
@@ -456,13 +447,6 @@ rx_ring_summary:
adapter->rx_buffer_len,
true);
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
}
}
@@ -875,7 +859,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
u32 length, staterr;
unsigned int i;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -904,7 +888,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev,
buffer_info->dma,
@@ -1030,6 +1014,7 @@ static void e1000_print_hw_hang(struct work_struct *work)
struct e1000_adapter *adapter = container_of(work,
struct e1000_adapter,
print_hang_task);
+ struct net_device *netdev = adapter->netdev;
struct e1000_ring *tx_ring = adapter->tx_ring;
unsigned int i = tx_ring->next_to_clean;
unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1041,6 +1026,21 @@ static void e1000_print_hw_hang(struct work_struct *work)
if (test_bit(__E1000_DOWN, &adapter->state))
return;
+ if (!adapter->tx_hang_recheck &&
+ (adapter->flags2 & FLAG2_DMA_BURST)) {
+ /* May be block on write-back, flush and detect again
+ * flush pending descriptor writebacks to memory
+ */
+ ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+ /* execute the writes immediately */
+ e1e_flush();
+ adapter->tx_hang_recheck = true;
+ return;
+ }
+ /* Real hang detected */
+ adapter->tx_hang_recheck = false;
+ netif_stop_queue(netdev);
+
e1e_rphy(hw, PHY_STATUS, &phy_status);
e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1095,6 +1095,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
unsigned int i, eop;
unsigned int count = 0;
unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@@ -1112,6 +1113,10 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
if (cleaned) {
total_tx_packets += buffer_info->segs;
total_tx_bytes += buffer_info->bytecount;
+ if (buffer_info->skb) {
+ bytes_compl += buffer_info->skb->len;
+ pkts_compl++;
+ }
}
e1000_put_txbuf(adapter, buffer_info);
@@ -1130,6 +1135,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
tx_ring->next_to_clean = i;
+ netdev_completed_queue(netdev, pkts_compl, bytes_compl);
+
#define TX_WAKE_THRESHOLD 32
if (count && netif_carrier_ok(netdev) &&
e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
@@ -1150,14 +1157,14 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i
*/
- adapter->detect_tx_hung = 0;
+ adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[i].time_stamp &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+ (adapter->tx_timeout_factor * HZ)) &&
- !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+ !(er32(STATUS) & E1000_STATUS_TXOFF))
schedule_work(&adapter->print_hang_task);
- netif_stop_queue(netdev);
- }
+ else
+ adapter->tx_hang_recheck = false;
}
adapter->total_tx_bytes += total_tx_bytes;
adapter->total_tx_packets += total_tx_packets;
@@ -1185,7 +1192,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
unsigned int i, j;
u32 length, staterr;
int cleaned_count = 0;
- bool cleaned = 0;
+ bool cleaned = false;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
i = rx_ring->next_to_clean;
@@ -1211,7 +1218,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
- cleaned = 1;
+ cleaned = true;
cleaned_count++;
dma_unmap_single(&pdev->dev, buffer_info->dma,
adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
@@ -1222,8 +1229,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
adapter->flags2 |= FLAG2_IS_DISCARDING;
if (adapter->flags2 & FLAG2_IS_DISCARDING) {
- e_dbg("Packet Split buffers didn't pick up the full "
- "packet\n");
+ e_dbg("Packet Split buffers didn't pick up the full packet\n");
dev_kfree_skb_irq(skb);
if (staterr & E1000_RXD_STAT_EOP)
adapter->flags2 &= ~FLAG2_IS_DISCARDING;
@@ -1238,8 +1244,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
length = le16_to_cpu(rx_desc->wb.middle.length0);
if (!length) {
- e_dbg("Last part of the packet spanning multiple "
- "descriptors\n");
+ e_dbg("Last part of the packet spanning multiple descriptors\n");
dev_kfree_skb_irq(skb);
goto next_desc;
}
@@ -1917,8 +1922,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
return;
}
/* MSI-X failed, so fall through and try MSI */
- e_err("Failed to initialize MSI-X interrupts. "
- "Falling back to MSI interrupts.\n");
+ e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
e1000e_reset_interrupt_capability(adapter);
}
adapter->int_mode = E1000E_INT_MODE_MSI;
@@ -1928,8 +1932,7 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
adapter->flags |= FLAG_MSI_ENABLED;
} else {
adapter->int_mode = E1000E_INT_MODE_LEGACY;
- e_err("Failed to initialize MSI interrupts. Falling "
- "back to legacy interrupts.\n");
+ e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
}
/* Fall through */
case E1000E_INT_MODE_LEGACY:
@@ -2260,6 +2263,7 @@ static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
e1000_put_txbuf(adapter, buffer_info);
}
+ netdev_reset_queue(adapter->netdev);
size = sizeof(struct e1000_buffer) * tx_ring->count;
memset(tx_ring->buffer_info, 0, size);
@@ -2518,7 +2522,7 @@ clean_rx:
return work_done;
}
-static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2528,7 +2532,7 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
(vid == adapter->mng_vlan_id))
- return;
+ return 0;
/* add VID to filter table */
if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
@@ -2539,9 +2543,11 @@ static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -2552,7 +2558,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
(vid == adapter->mng_vlan_id)) {
/* release control to f/w */
e1000e_release_hw_control(adapter);
- return;
+ return 0;
}
/* remove VID from filter table */
@@ -2564,6 +2570,8 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3113,79 +3121,147 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
}
/**
- * e1000_update_mc_addr_list - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
+ * e1000e_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ */
+static int e1000e_write_mc_addr_list(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ struct netdev_hw_addr *ha;
+ u8 *mta_list;
+ int i;
+
+ if (netdev_mc_empty(netdev)) {
+ /* nothing to program, so clear mc list */
+ hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
+ return 0;
+ }
+
+ mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
+ if (!mta_list)
+ return -ENOMEM;
+
+ /* update_mc_addr_list expects a packed array of only addresses. */
+ i = 0;
+ netdev_for_each_mc_addr(ha, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+ hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
+ kfree(mta_list);
+
+ return netdev_mc_count(netdev);
+}
+
+/**
+ * e1000e_write_uc_addr_list - write unicast addresses to RAR table
+ * @netdev: network interface device structure
*
- * Updates the Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
+ * Writes unicast address list to the RAR table.
+ * Returns: -ENOMEM on failure/insufficient address space
+ * 0 on no addresses written
+ * X on writing X addresses to the RAR table
**/
-static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count)
+static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+ struct e1000_hw *hw = &adapter->hw;
+ unsigned int rar_entries = hw->mac.rar_entry_count;
+ int count = 0;
+
+ /* save a rar entry for our hardware address */
+ rar_entries--;
+
+ /* save a rar entry for the LAA workaround */
+ if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
+ rar_entries--;
+
+ /* return ENOMEM indicating insufficient memory for addresses */
+ if (netdev_uc_count(netdev) > rar_entries)
+ return -ENOMEM;
+
+ if (!netdev_uc_empty(netdev) && rar_entries) {
+ struct netdev_hw_addr *ha;
+
+ /*
+ * write the addresses in reverse order to avoid write
+ * combining
+ */
+ netdev_for_each_uc_addr(ha, netdev) {
+ if (!rar_entries)
+ break;
+ e1000e_rar_set(hw, ha->addr, rar_entries--);
+ count++;
+ }
+ }
+
+ /* zero out the remaining RAR entries not used above */
+ for (; rar_entries > 0; rar_entries--) {
+ ew32(RAH(rar_entries), 0);
+ ew32(RAL(rar_entries), 0);
+ }
+ e1e_flush();
+
+ return count;
}
/**
- * e1000_set_multi - Multicast and Promiscuous mode set
+ * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
- * The set_multi entry point is called whenever the multicast address
- * list or the network interface flags are updated. This routine is
- * responsible for configuring the hardware for proper multicast,
+ * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
+ * address list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper unicast, multicast,
* promiscuous mode, and all-multi behavior.
**/
-static void e1000_set_multi(struct net_device *netdev)
+static void e1000e_set_rx_mode(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct netdev_hw_addr *ha;
- u8 *mta_list;
u32 rctl;
/* Check for Promiscuous and All Multicast modes */
-
rctl = er32(RCTL);
+ /* clear the affected bits */
+ rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+
if (netdev->flags & IFF_PROMISC) {
rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
- rctl &= ~E1000_RCTL_VFE;
/* Do not hardware filter VLANs in promisc mode */
e1000e_vlan_filter_disable(adapter);
} else {
+ int count;
if (netdev->flags & IFF_ALLMULTI) {
rctl |= E1000_RCTL_MPE;
- rctl &= ~E1000_RCTL_UPE;
} else {
- rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+ /*
+ * Write addresses to the MTA, if the attempt fails
+ * then we should just turn on promiscuous mode so
+ * that we can at least receive multicast traffic
+ */
+ count = e1000e_write_mc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_MPE;
}
e1000e_vlan_filter_enable(adapter);
- }
-
- ew32(RCTL, rctl);
-
- if (!netdev_mc_empty(netdev)) {
- int i = 0;
-
- mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
- if (!mta_list)
- return;
-
- /* prepare a packed array of only addresses. */
- netdev_for_each_mc_addr(ha, netdev)
- memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
-
- e1000_update_mc_addr_list(hw, mta_list, i);
- kfree(mta_list);
- } else {
/*
- * if we're called from probe, we might not have
- * anything to do here, so clear out the list
+ * Write addresses to available RAR registers, if there is not
+ * sufficient space to store all the addresses then enable
+ * unicast promiscuous mode
*/
- e1000_update_mc_addr_list(hw, NULL, 0);
+ count = e1000e_write_uc_addr_list(netdev);
+ if (count < 0)
+ rctl |= E1000_RCTL_UPE;
}
+ ew32(RCTL, rctl);
+
if (netdev->features & NETIF_F_HW_VLAN_RX)
e1000e_vlan_strip_enable(adapter);
else
@@ -3198,7 +3274,7 @@ static void e1000_set_multi(struct net_device *netdev)
**/
static void e1000_configure(struct e1000_adapter *adapter)
{
- e1000_set_multi(adapter->netdev);
+ e1000e_set_rx_mode(adapter->netdev);
e1000_restore_vlan(adapter);
e1000_init_manageability_pt(adapter);
@@ -3444,7 +3520,6 @@ int e1000e_up(struct e1000_adapter *adapter)
clear_bit(__E1000_DOWN, &adapter->state);
- napi_enable(&adapter->napi);
if (adapter->msix_entries)
e1000_configure_msix(adapter);
e1000_irq_enable(adapter);
@@ -3506,7 +3581,6 @@ void e1000e_down(struct e1000_adapter *adapter)
e1e_flush();
usleep_range(10000, 20000);
- napi_disable(&adapter->napi);
e1000_irq_disable(adapter);
del_timer_sync(&adapter->watchdog_timer);
@@ -3782,6 +3856,7 @@ static int e1000_open(struct net_device *netdev)
e1000_irq_enable(adapter);
+ adapter->tx_hang_recheck = false;
netif_start_queue(netdev);
adapter->idle_check = true;
@@ -3828,6 +3903,8 @@ static int e1000_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev);
+ napi_disable(&adapter->napi);
+
if (!test_bit(__E1000_DOWN, &adapter->state)) {
e1000e_down(adapter);
e1000_free_irq(adapter);
@@ -4168,22 +4245,19 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
u32 ctrl = er32(CTRL);
/* Link status message must follow this format for user tools */
- printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
- adapter->netdev->name,
- adapter->link_speed,
- (adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
- "Rx/Tx" :
- ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
- ((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+ printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+ adapter->netdev->name,
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
+ (ctrl & E1000_CTRL_RFCE) ? "Rx" :
+ (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
}
static bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- bool link_active = 0;
+ bool link_active = false;
s32 ret_val = 0;
/*
@@ -4198,7 +4272,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
ret_val = hw->mac.ops.check_for_link(hw);
link_active = !hw->mac.get_link_status;
} else {
- link_active = 1;
+ link_active = true;
}
break;
case e1000_media_type_fiber:
@@ -4297,7 +4371,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if (link) {
if (!netif_carrier_ok(netdev)) {
- bool txb2b = 1;
+ bool txb2b = true;
/* Cancel scheduled suspend requests. */
pm_runtime_resume(netdev->dev.parent);
@@ -4323,21 +4397,18 @@ static void e1000_watchdog_task(struct work_struct *work)
e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
- e_info("Autonegotiated half duplex but"
- " link partner cannot autoneg. "
- " Try forcing full duplex if "
- "link gets many collisions.\n");
+ e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
}
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
- txb2b = 0;
+ txb2b = false;
adapter->tx_timeout_factor = 10;
break;
}
@@ -4473,7 +4544,7 @@ link_up:
e1000e_flush_descriptors(adapter);
/* Force detection of hung controller every watchdog period */
- adapter->detect_tx_hung = 1;
+ adapter->detect_tx_hung = true;
/*
* With 82571 controllers, LAA may be overwritten due to controller
@@ -4985,6 +5056,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* if count is 0 then mapping error has occurred */
count = e1000_tx_map(adapter, skb, first, max_per_txd, nr_frags, mss);
if (count) {
+ netdev_sent_queue(netdev, skb->len);
e1000_tx_queue(adapter, tx_flags, count);
/* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, MAX_SKB_FRAGS + 2);
@@ -5110,8 +5182,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
if ((adapter->hw.mac.type == e1000_pch2lan) &&
!(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
(new_mtu > ETH_DATA_LEN)) {
- e_err("Jumbo Frames not supported on 82579 when CRC "
- "stripping is disabled.\n");
+ e_err("Jumbo Frames not supported on 82579 when CRC stripping is disabled.\n");
return -EINVAL;
}
@@ -5331,7 +5402,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
if (wufc) {
e1000_setup_rctl(adapter);
- e1000_set_multi(netdev);
+ e1000e_set_rx_mode(netdev);
/* turn on all-multi mode if wake on multicast is enabled */
if (wufc & E1000_WUFC_MC) {
@@ -5527,8 +5598,8 @@ static int __e1000_resume(struct pci_dev *pdev)
phy_data & E1000_WUS_MC ? "Multicast Packet" :
phy_data & E1000_WUS_BC ? "Broadcast Packet" :
phy_data & E1000_WUS_MAG ? "Magic Packet" :
- phy_data & E1000_WUS_LNKC ? "Link Status "
- " Change" : "other");
+ phy_data & E1000_WUS_LNKC ?
+ "Link Status Change" : "other");
}
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
@@ -5859,10 +5930,11 @@ static void e1000_eeprom_checks(struct e1000_adapter *adapter)
}
}
-static int e1000_set_features(struct net_device *netdev, u32 features)
+static int e1000_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
adapter->flags |= FLAG_TSO_FORCE;
@@ -5884,7 +5956,7 @@ static const struct net_device_ops e1000e_netdev_ops = {
.ndo_stop = e1000_close,
.ndo_start_xmit = e1000_xmit_frame,
.ndo_get_stats64 = e1000e_get_stats64,
- .ndo_set_rx_mode = e1000_set_multi,
+ .ndo_set_rx_mode = e1000e_set_rx_mode,
.ndo_set_mac_address = e1000_set_mac,
.ndo_change_mtu = e1000_change_mtu,
.ndo_do_ioctl = e1000_ioctl,
@@ -5949,8 +6021,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "No usable DMA "
- "configuration, aborting\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_dma;
}
}
@@ -6076,6 +6147,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
NETIF_F_TSO6 |
NETIF_F_HW_CSUM);
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA;
@@ -6135,7 +6208,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
/* Initialize link parameters. User can change them with ethtool */
adapter->hw.mac.autoneg = 1;
- adapter->fc_autoneg = 1;
+ adapter->fc_autoneg = true;
adapter->hw.fc.requested_mode = e1000_fc_default;
adapter->hw.fc.current_mode = e1000_fc_default;
adapter->hw.phy.autoneg_advertised = 0x2f;
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index c6e4621..6565c46 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2011 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 7881fb9..08bdc33 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,8 @@
* e1000_82576
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/if_ether.h>
@@ -244,8 +246,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
* Check for invalid size
*/
if ((hw->mac.type == e1000_82576) && (size > 15)) {
- printk("igb: The NVM size is not valid, "
- "defaulting to 32K.\n");
+ pr_notice("The NVM size is not valid, defaulting to 32K\n");
size = 15;
}
nvm->word_size = 1 << size;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 08a757e..b927d79 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index f5fc572..aed2174 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 4519a13..f67cbd3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 73aac08..f57338a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -151,7 +151,7 @@ void igb_clear_vfta_i350(struct e1000_hw *hw)
* Writes value at the given offset in the register array which stores
* the VLAN filter table.
**/
-void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
{
int i;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e45996b..cbddc4e 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index 469d95e..5988b89 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index eddb0f8..dbcfa3d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 4040712..fa2c6ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index a2a7ca9..825b022 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2011 Intel Corporation.
+ Copyright(c) 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index b17d7c2..789de5b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 8510797..4c32ac6 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 0a860bc..ccdf36d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index c69feeb..8e33bdd 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -447,4 +447,9 @@ static inline s32 igb_get_phy_info(struct e1000_hw *hw)
return 0;
}
+static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
+{
+ return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
+}
+
#endif /* _IGB_H_ */
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 43873eb..aa399a8 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -36,6 +36,7 @@
#include <linux/ethtool.h>
#include <linux/sched.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include "igb.h"
@@ -148,7 +149,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
SUPPORTED_1000baseT_Full|
SUPPORTED_Autoneg |
SUPPORTED_TP);
- ecmd->advertising = ADVERTISED_TP;
+ ecmd->advertising = (ADVERTISED_TP |
+ ADVERTISED_Pause);
if (hw->mac.autoneg == 1) {
ecmd->advertising |= ADVERTISED_Autoneg;
@@ -165,7 +167,8 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->advertising = (ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
- ADVERTISED_Autoneg);
+ ADVERTISED_Autoneg |
+ ADVERTISED_Pause);
ecmd->port = PORT_FIBRE;
}
@@ -673,25 +676,22 @@ static void igb_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u16 eeprom_data;
- strncpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, igb_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, igb_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igb_driver_version, sizeof(drvinfo->version));
/* EEPROM image version # is reported as firmware version # for
* 82575 controllers */
adapter->hw.nvm.ops.read(&adapter->hw, 5, 1, &eeprom_data);
- sprintf(firmware_version, "%d.%d-%d",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4,
eeprom_data & 0x000F);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IGB_STATS_LEN;
drvinfo->testinfo_len = IGB_TEST_LEN;
drvinfo->regdump_len = igb_get_regs_len(netdev);
@@ -2162,6 +2162,19 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
}
}
+static int igb_ethtool_begin(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_get_sync(&adapter->pdev->dev);
+ return 0;
+}
+
+static void igb_ethtool_complete(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ pm_runtime_put(&adapter->pdev->dev);
+}
+
static const struct ethtool_ops igb_ethtool_ops = {
.get_settings = igb_get_settings,
.set_settings = igb_set_settings,
@@ -2188,6 +2201,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_ethtool_stats = igb_get_ethtool_stats,
.get_coalesce = igb_get_coalesce,
.set_coalesce = igb_set_coalesce,
+ .begin = igb_ethtool_begin,
+ .complete = igb_ethtool_complete,
};
void igb_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index ced5444..94be6c3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2011 Intel Corporation.
+ Copyright(c) 2007-2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -51,6 +53,7 @@
#include <linux/if_ether.h>
#include <linux/aer.h>
#include <linux/prefetch.h>
+#include <linux/pm_runtime.h>
#ifdef CONFIG_IGB_DCA
#include <linux/dca.h>
#endif
@@ -65,7 +68,7 @@ char igb_driver_name[] = "igb";
char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -145,9 +148,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, u32 features);
-static void igb_vlan_rx_add_vid(struct net_device *, u16);
-static void igb_vlan_rx_kill_vid(struct net_device *, u16);
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct net_device *, u16);
+static int igb_vlan_rx_kill_vid(struct net_device *, u16);
static void igb_restore_vlan(struct igb_adapter *);
static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
static void igb_ping_all_vfs(struct igb_adapter *);
@@ -170,8 +173,18 @@ static int igb_check_vf_assignment(struct igb_adapter *adapter);
#endif
#ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *, pm_message_t);
-static int igb_resume(struct pci_dev *);
+static int igb_suspend(struct device *);
+static int igb_resume(struct device *);
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_suspend(struct device *dev);
+static int igb_runtime_resume(struct device *dev);
+static int igb_runtime_idle(struct device *dev);
+#endif
+static const struct dev_pm_ops igb_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+ SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+ igb_runtime_idle)
+};
#endif
static void igb_shutdown(struct pci_dev *);
#ifdef CONFIG_IGB_DCA
@@ -212,9 +225,7 @@ static struct pci_driver igb_driver = {
.probe = igb_probe,
.remove = __devexit_p(igb_remove),
#ifdef CONFIG_PM
- /* Power Management Hooks */
- .suspend = igb_suspend,
- .resume = igb_resume,
+ .driver.pm = &igb_pm_ops,
#endif
.shutdown = igb_shutdown,
.err_handler = &igb_err_handler
@@ -325,16 +336,13 @@ static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
regs[n] = rd32(E1000_TXDCTL(n));
break;
default:
- printk(KERN_INFO "%-15s %08x\n",
- reginfo->name, rd32(reginfo->ofs));
+ pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
return;
}
snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
- printk(KERN_INFO "%-15s ", rname);
- for (n = 0; n < 4; n++)
- printk(KERN_CONT "%08x ", regs[n]);
- printk(KERN_CONT "\n");
+ pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+ regs[2], regs[3]);
}
/*
@@ -359,18 +367,15 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- printk(KERN_INFO "Device Name state "
- "trans_start last_rx\n");
- printk(KERN_INFO "%-15s %016lX %016lX %016lX\n",
- netdev->name,
- netdev->state,
- netdev->trans_start,
- netdev->last_rx);
+ pr_info("Device Name state trans_start "
+ "last_rx\n");
+ pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
+ netdev->state, netdev->trans_start, netdev->last_rx);
}
/* Print Registers */
dev_info(&adapter->pdev->dev, "Register Dump\n");
- printk(KERN_INFO " Register Name Value\n");
+ pr_info(" Register Name Value\n");
for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
reginfo->name; reginfo++) {
igb_regdump(hw, reginfo);
@@ -381,18 +386,17 @@ static void igb_dump(struct igb_adapter *adapter)
goto exit;
dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma ]"
- " leng ntw timestamp\n");
+ pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
for (n = 0; n < adapter->num_tx_queues; n++) {
struct igb_tx_buffer *buffer_info;
tx_ring = adapter->tx_ring[n];
buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
- printk(KERN_INFO " %5d %5X %5X %016llX %04X %p %016llX\n",
- n, tx_ring->next_to_use, tx_ring->next_to_clean,
- (u64)buffer_info->dma,
- buffer_info->length,
- buffer_info->next_to_watch,
- (u64)buffer_info->time_stamp);
+ pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
+ n, tx_ring->next_to_use, tx_ring->next_to_clean,
+ (u64)buffer_info->dma,
+ buffer_info->length,
+ buffer_info->next_to_watch,
+ (u64)buffer_info->time_stamp);
}
/* Print TX Rings */
@@ -414,36 +418,38 @@ static void igb_dump(struct igb_adapter *adapter)
for (n = 0; n < adapter->num_tx_queues; n++) {
tx_ring = adapter->tx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "TX QUEUE INDEX = %d\n", tx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "T [desc] [address 63:0 ] "
- "[PlPOCIStDDM Ln] [bi->dma ] "
- "leng ntw timestamp bi->skb\n");
+ pr_info("------------------------------------\n");
+ pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
+ "[bi->dma ] leng ntw timestamp "
+ "bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+ const char *next_desc;
struct igb_tx_buffer *buffer_info;
tx_desc = IGB_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->tx_buffer_info[i];
u0 = (struct my_u0 *)tx_desc;
- printk(KERN_INFO "T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p", i,
+ if (i == tx_ring->next_to_use &&
+ i == tx_ring->next_to_clean)
+ next_desc = " NTC/U";
+ else if (i == tx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == tx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
+ pr_info("T [0x%03X] %016llX %016llX %016llX"
+ " %04X %p %016llX %p%s\n", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
buffer_info->length,
buffer_info->next_to_watch,
(u64)buffer_info->time_stamp,
- buffer_info->skb);
- if (i == tx_ring->next_to_use &&
- i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC/U\n");
- else if (i == tx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == tx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
print_hex_dump(KERN_INFO, "",
@@ -456,11 +462,11 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print RX Rings Summary */
rx_ring_summary:
dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
- printk(KERN_INFO "Queue [NTU] [NTC]\n");
+ pr_info("Queue [NTU] [NTC]\n");
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO " %5d %5X %5X\n", n,
- rx_ring->next_to_use, rx_ring->next_to_clean);
+ pr_info(" %5d %5X %5X\n",
+ n, rx_ring->next_to_use, rx_ring->next_to_clean);
}
/* Print RX Rings */
@@ -492,36 +498,43 @@ rx_ring_summary:
for (n = 0; n < adapter->num_rx_queues; n++) {
rx_ring = adapter->rx_ring[n];
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "RX QUEUE INDEX = %d\n", rx_ring->queue_index);
- printk(KERN_INFO "------------------------------------\n");
- printk(KERN_INFO "R [desc] [ PktBuf A0] "
- "[ HeadBuf DD] [bi->dma ] [bi->skb] "
- "<-- Adv Rx Read format\n");
- printk(KERN_INFO "RWB[desc] [PcsmIpSHl PtRs] "
- "[vl er S cks ln] ---------------- [bi->skb] "
- "<-- Adv Rx Write-Back format\n");
+ pr_info("------------------------------------\n");
+ pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+ pr_info("------------------------------------\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
+ "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+ "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
+ const char *next_desc;
struct igb_rx_buffer *buffer_info;
buffer_info = &rx_ring->rx_buffer_info[i];
rx_desc = IGB_RX_DESC(rx_ring, i);
u0 = (struct my_u0 *)rx_desc;
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+ if (i == rx_ring->next_to_use)
+ next_desc = " NTU";
+ else if (i == rx_ring->next_to_clean)
+ next_desc = " NTC";
+ else
+ next_desc = "";
+
if (staterr & E1000_RXD_STAT_DD) {
/* Descriptor Done */
- printk(KERN_INFO "RWB[0x%03X] %016llX "
- "%016llX ---------------- %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX -------"
+ "--------- %p%s\n", "RWB", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
- buffer_info->skb);
+ buffer_info->skb, next_desc);
} else {
- printk(KERN_INFO "R [0x%03X] %016llX "
- "%016llX %016llX %p", i,
+ pr_info("%s[0x%03X] %016llX %016llX %016llX"
+ " %p%s\n", "R ", i,
le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)buffer_info->dma,
- buffer_info->skb);
+ buffer_info->skb, next_desc);
if (netif_msg_pktdata(adapter)) {
print_hex_dump(KERN_INFO, "",
@@ -538,14 +551,6 @@ rx_ring_summary:
PAGE_SIZE/2, true);
}
}
-
- if (i == rx_ring->next_to_use)
- printk(KERN_CONT " NTU\n");
- else if (i == rx_ring->next_to_clean)
- printk(KERN_CONT " NTC\n");
- else
- printk(KERN_CONT "\n");
-
}
}
@@ -599,10 +604,10 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
+ pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
- printk(KERN_INFO "%s\n", igb_copyright);
+ pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
dca_register_notify(&dca_notifier);
@@ -1502,6 +1507,7 @@ void igb_power_up_link(struct igb_adapter *adapter)
igb_power_up_phy_copper(&adapter->hw);
else
igb_power_up_serdes_link_82575(&adapter->hw);
+ igb_reset_phy(&adapter->hw);
}
/**
@@ -1742,7 +1748,8 @@ void igb_reset(struct igb_adapter *adapter)
igb_get_phy_info(hw);
}
-static u32 igb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t igb_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -1756,9 +1763,10 @@ static u32 igb_fix_features(struct net_device *netdev, u32 features)
return features;
}
-static int igb_set_features(struct net_device *netdev, u32 features)
+static int igb_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
igb_vlan_mode(netdev, features);
@@ -2113,6 +2121,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
default:
break;
}
+
+ pm_runtime_put_noidle(&pdev->dev);
return 0;
err_register:
@@ -2152,6 +2162,8 @@ static void __devexit igb_remove(struct pci_dev *pdev)
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ pm_runtime_get_noresume(&pdev->dev);
+
/*
* The watchdog timer may be rescheduled, so explicitly
* disable watchdog from being rescheduled.
@@ -2474,16 +2486,22 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
* handler is registered with the OS, the watchdog timer is started,
* and the stack is notified that the interface is ready.
**/
-static int igb_open(struct net_device *netdev)
+static int __igb_open(struct net_device *netdev, bool resuming)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
int err;
int i;
/* disallow open during test */
- if (test_bit(__IGB_TESTING, &adapter->state))
+ if (test_bit(__IGB_TESTING, &adapter->state)) {
+ WARN_ON(resuming);
return -EBUSY;
+ }
+
+ if (!resuming)
+ pm_runtime_get_sync(&pdev->dev);
netif_carrier_off(netdev);
@@ -2529,6 +2547,9 @@ static int igb_open(struct net_device *netdev)
netif_tx_start_all_queues(netdev);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
+
/* start the watchdog. */
hw->mac.get_link_status = 1;
schedule_work(&adapter->watchdog_task);
@@ -2543,10 +2564,17 @@ err_setup_rx:
igb_free_all_tx_resources(adapter);
err_setup_tx:
igb_reset(adapter);
+ if (!resuming)
+ pm_runtime_put(&pdev->dev);
return err;
}
+static int igb_open(struct net_device *netdev)
+{
+ return __igb_open(netdev, false);
+}
+
/**
* igb_close - Disables a network interface
* @netdev: network interface device structure
@@ -2558,21 +2586,32 @@ err_setup_tx:
* needs to be disabled. A global MAC reset is issued to stop the
* hardware, and all transmit and receive resources are freed.
**/
-static int igb_close(struct net_device *netdev)
+static int __igb_close(struct net_device *netdev, bool suspending)
{
struct igb_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
- igb_down(adapter);
+ if (!suspending)
+ pm_runtime_get_sync(&pdev->dev);
+
+ igb_down(adapter);
igb_free_irq(adapter);
igb_free_all_tx_resources(adapter);
igb_free_all_rx_resources(adapter);
+ if (!suspending)
+ pm_runtime_put_sync(&pdev->dev);
return 0;
}
+static int igb_close(struct net_device *netdev)
+{
+ return __igb_close(netdev, false);
+}
+
/**
* igb_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: tx descriptor ring (for a specific queue) to setup
@@ -3203,6 +3242,7 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
buffer_info = &tx_ring->tx_buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
+ netdev_tx_reset_queue(txring_txq(tx_ring));
size = sizeof(struct igb_tx_buffer) * tx_ring->count;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -3632,6 +3672,9 @@ static void igb_watchdog_task(struct work_struct *work)
link = igb_has_link(adapter);
if (link) {
+ /* Cancel scheduled suspend requests. */
+ pm_runtime_resume(netdev->dev.parent);
+
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
hw->mac.ops.get_speed_and_duplex(hw,
@@ -3640,23 +3683,23 @@ static void igb_watchdog_task(struct work_struct *work)
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
- "Flow Control: %s\n",
+ printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
+ "Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex",
- ((ctrl & E1000_CTRL_TFCE) &&
- (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
- ((ctrl & E1000_CTRL_RFCE) ? "RX" :
- ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
+ "Full" : "Half",
+ (ctrl & E1000_CTRL_TFCE) &&
+ (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+ (ctrl & E1000_CTRL_RFCE) ? "RX" :
+ (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_LINK_THROTTLE)) {
- printk(KERN_INFO "igb: %s The network adapter "
- "link speed was downshifted "
- "because it overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_LINK_THROTTLE)) {
+ netdev_info(netdev, "The network adapter link "
+ "speed was downshifted because it "
+ "overheated\n");
}
/* adjust timeout factor according to speed/duplex */
@@ -3686,11 +3729,10 @@ static void igb_watchdog_task(struct work_struct *work)
adapter->link_duplex = 0;
/* check for thermal sensor event */
- if (igb_thermal_sensor_event(hw, E1000_THSTAT_PWR_DOWN)) {
- printk(KERN_ERR "igb: %s The network adapter "
- "was stopped because it "
- "overheated.\n",
- netdev->name);
+ if (igb_thermal_sensor_event(hw,
+ E1000_THSTAT_PWR_DOWN)) {
+ netdev_err(netdev, "The network adapter was "
+ "stopped because it overheated\n");
}
/* Links status message must follow this format */
@@ -3704,6 +3746,9 @@ static void igb_watchdog_task(struct work_struct *work)
if (!test_bit(__IGB_DOWN, &adapter->state))
mod_timer(&adapter->phy_info_timer,
round_jiffies(jiffies + 2 * HZ));
+
+ pm_schedule_suspend(netdev->dev.parent,
+ MSEC_PER_SEC * 5);
}
}
@@ -3958,8 +4003,8 @@ set_itr_now:
}
}
-void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
- u32 type_tucmd, u32 mss_l4len_idx)
+static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
+ u32 type_tucmd, u32 mss_l4len_idx)
{
struct e1000_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
@@ -4241,6 +4286,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
frag++;
}
+ netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
+
/* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
tx_desc->read.cmd_type_len = cmd_type;
@@ -4965,7 +5012,8 @@ static int igb_find_enabled_vfs(struct igb_adapter *adapter)
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(hw->vendor_id, device_id, NULL);
while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
+ if (pvfdev->devfn == vf_devfn &&
+ (pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += vf_stride;
pvfdev = pci_get_device(hw->vendor_id,
@@ -5576,7 +5624,7 @@ static irqreturn_t igb_intr(int irq, void *data)
return IRQ_HANDLED;
}
-void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
{
struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw;
@@ -5780,6 +5828,8 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
}
}
+ netdev_tx_completed_queue(txring_txq(tx_ring),
+ total_packets, total_bytes);
i += tx_ring->count;
tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->tx_syncp);
@@ -6138,7 +6188,7 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
if (!page) {
- page = netdev_alloc_page(rx_ring->netdev);
+ page = alloc_page(GFP_ATOMIC | __GFP_COLD);
bi->page = page;
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_failed++;
@@ -6467,7 +6517,7 @@ s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
return 0;
}
-static void igb_vlan_mode(struct net_device *netdev, u32 features)
+static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6494,7 +6544,7 @@ static void igb_vlan_mode(struct net_device *netdev, u32 features)
igb_rlpml_set(adapter);
}
-static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6507,9 +6557,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6524,6 +6576,8 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
igb_vfta_set(hw, vid, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void igb_restore_vlan(struct igb_adapter *adapter)
@@ -6582,13 +6636,14 @@ err_inval:
return -EINVAL;
}
-static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+ bool runtime)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
- u32 wufc = adapter->wol;
+ u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
#endif
@@ -6596,7 +6651,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
if (netif_running(netdev))
- igb_close(netdev);
+ __igb_close(netdev, true);
igb_clear_interrupt_scheme(adapter);
@@ -6655,12 +6710,13 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
}
#ifdef CONFIG_PM
-static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
+static int igb_suspend(struct device *dev)
{
int retval;
bool wake;
+ struct pci_dev *pdev = to_pci_dev(dev);
- retval = __igb_shutdown(pdev, &wake);
+ retval = __igb_shutdown(pdev, &wake, 0);
if (retval)
return retval;
@@ -6674,8 +6730,9 @@ static int igb_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int igb_resume(struct pci_dev *pdev)
+static int igb_resume(struct device *dev)
{
+ struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -6696,7 +6753,18 @@ static int igb_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
- if (igb_init_interrupt_scheme(adapter)) {
+ if (!rtnl_is_locked()) {
+ /*
+ * shut up ASSERT_RTNL() warning in
+ * netif_set_real_num_tx/rx_queues.
+ */
+ rtnl_lock();
+ err = igb_init_interrupt_scheme(adapter);
+ rtnl_unlock();
+ } else {
+ err = igb_init_interrupt_scheme(adapter);
+ }
+ if (err) {
dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -6709,23 +6777,61 @@ static int igb_resume(struct pci_dev *pdev)
wr32(E1000_WUS, ~0);
- if (netif_running(netdev)) {
- err = igb_open(netdev);
+ if (netdev->flags & IFF_UP) {
+ err = __igb_open(netdev, true);
if (err)
return err;
}
netif_device_attach(netdev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int igb_runtime_idle(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if (!igb_has_link(adapter))
+ pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+ return -EBUSY;
+}
+
+static int igb_runtime_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int retval;
+ bool wake;
+
+ retval = __igb_shutdown(pdev, &wake, 1);
+ if (retval)
+ return retval;
+
+ if (wake) {
+ pci_prepare_to_sleep(pdev);
+ } else {
+ pci_wake_from_d3(pdev, false);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
return 0;
}
+
+static int igb_runtime_resume(struct device *dev)
+{
+ return igb_resume(dev);
+}
+#endif /* CONFIG_PM_RUNTIME */
#endif
static void igb_shutdown(struct pci_dev *pdev)
{
bool wake;
- __igb_shutdown(pdev, &wake);
+ __igb_shutdown(pdev, &wake, 0);
if (system_state == SYSTEM_POWER_OFF) {
pci_wake_from_d3(pdev, wake);
@@ -7064,15 +7170,28 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
wr32(E1000_DMCTXTH, 0);
/*
- * DMA Coalescing high water mark needs to be higher
- * than the RX threshold. set hwm to PBA - 2 * max
- * frame size
+ * DMA Coalescing high water mark needs to be greater
+ * than the Rx threshold. Set hwm to PBA - max frame
+ * size in 16B units, capping it at PBA - 6KB.
*/
- hwm = pba - (2 * adapter->max_frame_size);
+ hwm = 64 * pba - adapter->max_frame_size / 16;
+ if (hwm < 64 * (pba - 6))
+ hwm = 64 * (pba - 6);
+ reg = rd32(E1000_FCRTC);
+ reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+ reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+ & E1000_FCRTC_RTH_COAL_MASK);
+ wr32(E1000_FCRTC, reg);
+
+ /*
+ * Set the DMA Coalescing Rx threshold to PBA - 2 * max
+ * frame size, capping it at PBA - 10KB.
+ */
+ dmac_thr = pba - adapter->max_frame_size / 512;
+ if (dmac_thr < pba - 10)
+ dmac_thr = pba - 10;
reg = rd32(E1000_DMACR);
reg &= ~E1000_DMACR_DMACTHR_MASK;
- dmac_thr = pba - 4;
-
reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
& E1000_DMACR_DMACTHR_MASK);
@@ -7088,7 +7207,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
* coalescing(smart fifb)-UTRESH=0
*/
wr32(E1000_DMCRTRH, 0);
- wr32(E1000_FCRTC, hwm);
reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile
index 0fa3db3..044b0ad 100644
--- a/drivers/net/ethernet/intel/igbvf/Makefile
+++ b/drivers/net/ethernet/intel/igbvf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 - 2010 Intel Corporation.
+# Copyright(c) 2009 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/defines.h b/drivers/net/ethernet/intel/igbvf/defines.h
index 79f2604..33f40d3 100644
--- a/drivers/net/ethernet/intel/igbvf/defines.h
+++ b/drivers/net/ethernet/intel/igbvf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 2c25858..db7dce2 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -191,12 +191,12 @@ static void igbvf_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32] = "N/A";
- strncpy(drvinfo->driver, igbvf_driver_name, 32);
- strncpy(drvinfo->version, igbvf_driver_version, 32);
- strncpy(drvinfo->fw_version, firmware_version, 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, igbvf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, igbvf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = igbvf_get_regs_len(netdev);
drvinfo->eedump_len = igbvf_get_eeprom_len(netdev);
}
@@ -468,6 +468,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
- /* have to "undeclare" const on this struct to remove warnings */
- SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
+ SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
}
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index fd4a7b7..2c6d87e 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c
index 048aae2..b4b65bc 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.c
+++ b/drivers/net/ethernet/intel/igbvf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/mbx.h b/drivers/net/ethernet/intel/igbvf/mbx.h
index c2883c4..24370bc 100644
--- a/drivers/net/ethernet/intel/igbvf/mbx.h
+++ b/drivers/net/ethernet/intel/igbvf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index cca7812..4e9141c 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -25,6 +25,8 @@
*******************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/types.h>
#include <linux/init.h>
@@ -51,7 +53,7 @@ const char igbvf_driver_version[] = DRV_VERSION;
static const char igbvf_driver_string[] =
"Intel(R) Gigabit Virtual Function Network Driver";
static const char igbvf_copyright[] =
- "Copyright (c) 2009 - 2011 Intel Corporation.";
+ "Copyright (c) 2009 - 2012 Intel Corporation.";
static int igbvf_poll(struct napi_struct *napi, int budget);
static void igbvf_reset(struct igbvf_adapter *);
@@ -1174,32 +1176,31 @@ static void igbvf_set_rlpml(struct igbvf_adapter *adapter)
e1000_rlpml_set_vf(hw, max_frame_size);
}
-static void igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- if (hw->mac.ops.set_vfta(hw, vid, true))
+ if (hw->mac.ops.set_vfta(hw, vid, true)) {
dev_err(&adapter->pdev->dev, "Failed to add vlan id %d\n", vid);
- else
- set_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ set_bit(vid, adapter->active_vlans);
+ return 0;
}
-static void igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- igbvf_irq_disable(adapter);
-
- if (!test_bit(__IGBVF_DOWN, &adapter->state))
- igbvf_irq_enable(adapter);
-
- if (hw->mac.ops.set_vfta(hw, vid, false))
+ if (hw->mac.ops.set_vfta(hw, vid, false)) {
dev_err(&adapter->pdev->dev,
"Failed to remove vlan id %d\n", vid);
- else
- clear_bit(vid, adapter->active_vlans);
+ return -EINVAL;
+ }
+ clear_bit(vid, adapter->active_vlans);
+ return 0;
}
static void igbvf_restore_vlan(struct igbvf_adapter *adapter)
@@ -1746,10 +1747,9 @@ void igbvf_update_stats(struct igbvf_adapter *adapter)
static void igbvf_print_link_info(struct igbvf_adapter *adapter)
{
- dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s\n",
- adapter->link_speed,
- ((adapter->link_duplex == FULL_DUPLEX) ?
- "Full Duplex" : "Half Duplex"));
+ dev_info(&adapter->pdev->dev, "Link is Up %d Mbps %s Duplex\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half");
}
static bool igbvf_has_link(struct igbvf_adapter *adapter)
@@ -2532,7 +2532,8 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
}
-static int igbvf_set_features(struct net_device *netdev, u32 features)
+static int igbvf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct igbvf_adapter *adapter = netdev_priv(netdev);
@@ -2842,9 +2843,8 @@ static struct pci_driver igbvf_driver = {
static int __init igbvf_init_module(void)
{
int ret;
- printk(KERN_INFO "%s - version %s\n",
- igbvf_driver_string, igbvf_driver_version);
- printk(KERN_INFO "%s\n", igbvf_copyright);
+ pr_info("%s - version %s\n", igbvf_driver_string, igbvf_driver_version);
+ pr_info("%s\n", igbvf_copyright);
ret = pci_register_driver(&igbvf_driver);
diff --git a/drivers/net/ethernet/intel/igbvf/regs.h b/drivers/net/ethernet/intel/igbvf/regs.h
index 77e18d3..7dc6341 100644
--- a/drivers/net/ethernet/intel/igbvf/regs.h
+++ b/drivers/net/ethernet/intel/igbvf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/vf.c b/drivers/net/ethernet/intel/igbvf/vf.c
index af3822f..1955197 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.c
+++ b/drivers/net/ethernet/intel/igbvf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/igbvf/vf.h b/drivers/net/ethernet/intel/igbvf/vf.h
index d7ed58f..57db3c6 100644
--- a/drivers/net/ethernet/intel/igbvf/vf.h
+++ b/drivers/net/ethernet/intel/igbvf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) 82576 Virtual Function Linux driver
- Copyright(c) 2009 - 2010 Intel Corporation.
+ Copyright(c) 2009 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index 9dfce7d..dbb7dd2 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -473,10 +473,12 @@ ixgb_get_drvinfo(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- strncpy(drvinfo->driver, ixgb_driver_name, 32);
- strncpy(drvinfo->version, ixgb_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgb_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgb_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGB_STATS_LEN;
drvinfo->regdump_len = ixgb_get_regs_len(netdev);
drvinfo->eedump_len = ixgb_get_eeprom_len(netdev);
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index e21148f..9bd5faf 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -101,8 +101,8 @@ static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
-static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
-static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static int ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -228,7 +228,7 @@ ixgb_up(struct ixgb_adapter *adapter)
if (IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_PCIX_MODE) {
err = pci_enable_msi(adapter->pdev);
if (!err) {
- adapter->have_msi = 1;
+ adapter->have_msi = true;
irq_flags = 0;
}
/* proceed to try to request regular interrupt */
@@ -325,8 +325,8 @@ ixgb_reset(struct ixgb_adapter *adapter)
}
}
-static u32
-ixgb_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+ixgb_fix_features(struct net_device *netdev, netdev_features_t features)
{
/*
* Tx VLAN insertion does not work per HW design when Rx stripping is
@@ -339,10 +339,10 @@ ixgb_fix_features(struct net_device *netdev, u32 features)
}
static int
-ixgb_set_features(struct net_device *netdev, u32 features)
+ixgb_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & (NETIF_F_RXCSUM|NETIF_F_HW_VLAN_RX)))
return 0;
@@ -2217,7 +2217,7 @@ ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
}
-static void
+static int
ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2230,9 +2230,11 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
vfta |= (1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@@ -2245,6 +2247,8 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
vfta &= ~(1 << (vid & 0x1F));
ixgb_write_vfta(&adapter->hw, index, vfta);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void
diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile
index 7d7387f..7a16177 100644
--- a/drivers/net/ethernet/intel/ixgbe/Makefile
+++ b/drivers/net/ethernet/intel/ixgbe/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 10 Gigabit PCI Express Linux driver
-# Copyright(c) 1999 - 2010 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index a8368d5..e6aeb64 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -560,6 +560,7 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[];
+extern char ixgbe_default_device_descr[];
extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter);
@@ -627,6 +628,8 @@ extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
#endif /* CONFIG_IXGBE_DCB */
extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
+extern int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info);
#endif /* IXGBE_FCOE */
#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index ef2afef..b406c36 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index 4ae26a7..4e59083 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
case IXGBE_DEV_ID_82599_SFP_FCOE:
case IXGBE_DEV_ID_82599_SFP_EM:
case IXGBE_DEV_ID_82599_SFP_SF2:
+ case IXGBE_DEV_ID_82599_SFP_SF_QP:
case IXGBE_DEV_ID_82599EN_SFP:
media_type = ixgbe_media_type_fiber;
break;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index f1365fe..383b941 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -266,10 +266,10 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
if (hw->mac.type == ixgbe_mac_X540) {
if (hw->phy.id == 0)
hw->phy.ops.identify(hw);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_PCRC8ECH, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECL, &i);
- hw->phy.ops.read_reg(hw, 0x3, IXGBE_LDPCECH, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i);
+ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i);
}
return 0;
@@ -2599,7 +2599,7 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
{
ixgbe_link_speed speed = 0;
- bool link_up = 0;
+ bool link_up = false;
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index 863f9c1..2c834c4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -75,7 +75,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
-s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packtetbuf_num);
+s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num);
s32 ixgbe_fc_autoneg(struct ixgbe_hw *hw);
s32 ixgbe_validate_mac_addr(u8 *mac_addr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index 318caf4..8bfaaee 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
index e162775..24333b7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index fcd0e47..d3695ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
index 2f31893..ba83570 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index 32cd97b..888a419 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index a59d5dc..4dec47f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index 33b93ff..79a92fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -112,6 +112,8 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
{
u8 err = 0;
+ u8 prio_tc[MAX_USER_PRIORITY] = {0};
+ int i;
struct ixgbe_adapter *adapter = netdev_priv(netdev);
/* Fail command if not in CEE mode */
@@ -122,10 +124,15 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
if (!!state != !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
return err;
- if (state > 0)
+ if (state > 0) {
err = ixgbe_setup_tc(netdev, adapter->dcb_cfg.num_tcs.pg_tcs);
- else
+ ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
+ } else {
err = ixgbe_setup_tc(netdev, 0);
+ }
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ netdev_set_prio_tc_map(netdev, i, prio_tc[i]);
return err;
}
@@ -158,10 +165,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort a bad configuration */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -185,7 +188,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
if (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap !=
adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)
- adapter->dcb_set_bitmap |= BIT_PFC;
+ adapter->dcb_set_bitmap |= BIT_PFC | BIT_APP_UPCHG;
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
@@ -206,10 +209,6 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- /* Abort bad configurations */
- if (ffs(up_map) > adapter->dcb_cfg.num_tcs.pg_tcs)
- return;
-
if (prio != DCB_ATTR_VALUE_UNDEFINED)
adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio;
if (bwg_id != DCB_ATTR_VALUE_UNDEFINED)
@@ -309,6 +308,27 @@ static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
*setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc;
}
+#ifdef IXGBE_FCOE
+static void ixgbe_dcbnl_devreset(struct net_device *dev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+
+ while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
+ usleep_range(1000, 2000);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_stop(dev);
+
+ ixgbe_clear_interrupt_scheme(adapter);
+ ixgbe_init_interrupt_scheme(adapter);
+
+ if (netif_running(dev))
+ dev->netdev_ops->ndo_open(dev);
+
+ clear_bit(__IXGBE_RESETTING, &adapter->state);
+}
+#endif
+
static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -338,27 +358,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (ret)
return DCB_NO_HW_CHG;
-#ifdef IXGBE_FCOE
- if (up && !(up & (1 << adapter->fcoe.up)))
- adapter->dcb_set_bitmap |= BIT_APP_UPCHG;
-
- /*
- * Only take down the adapter if an app change occurred. FCoE
- * may shuffle tx rings in this case and this can not be done
- * without a reset currently.
- */
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
- usleep_range(1000, 2000);
-
- adapter->fcoe.up = ffs(up) - 1;
-
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_stop(netdev);
- ixgbe_clear_interrupt_scheme(adapter);
- }
-#endif
-
if (adapter->dcb_cfg.pfc_mode_enable) {
switch (adapter->hw.mac.type) {
case ixgbe_mac_82599EB:
@@ -385,15 +384,6 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
}
}
-#ifdef IXGBE_FCOE
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) {
- ixgbe_init_interrupt_scheme(adapter);
- if (netif_running(netdev))
- netdev->netdev_ops->ndo_open(netdev);
- ret = DCB_HW_CHG_RST;
- }
-#endif
-
if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS];
@@ -442,8 +432,19 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
if (adapter->dcb_cfg.pfc_mode_enable)
adapter->hw.fc.current_mode = ixgbe_fc_pfc;
- if (adapter->dcb_set_bitmap & BIT_APP_UPCHG)
- clear_bit(__IXGBE_RESETTING, &adapter->state);
+#ifdef IXGBE_FCOE
+ /* Reprogam FCoE hardware offloads when the traffic class
+ * FCoE is using changes. This happens if the APP info
+ * changes or the up2tc mapping is updated.
+ */
+ if ((up && !(up & (1 << adapter->fcoe.up))) ||
+ (adapter->dcb_set_bitmap & BIT_APP_UPCHG)) {
+ adapter->fcoe.up = ffs(up) - 1;
+ ixgbe_dcbnl_devreset(netdev);
+ ret = DCB_HW_CHG_RST;
+ }
+#endif
+
adapter->dcb_set_bitmap = 0x00;
return ret;
}
@@ -661,22 +662,6 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev,
return ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en, prio_tc);
}
-#ifdef IXGBE_FCOE
-static void ixgbe_dcbnl_devreset(struct net_device *dev)
-{
- struct ixgbe_adapter *adapter = netdev_priv(dev);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_stop(dev);
-
- ixgbe_clear_interrupt_scheme(adapter);
- ixgbe_init_interrupt_scheme(adapter);
-
- if (netif_running(dev))
- dev->netdev_ops->ndo_open(dev);
-}
-#endif
-
static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
struct dcb_app *app)
{
@@ -761,7 +746,9 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
ixgbe_dcbnl_ieee_setets(dev, &ets);
ixgbe_dcbnl_ieee_setpfc(dev, &pfc);
} else if (mode & DCB_CAP_DCBX_VER_CEE) {
- adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX);
+ u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG;
+
+ adapter->dcb_set_bitmap |= mask;
ixgbe_dcbnl_set_all(dev);
} else {
/* Drop into single TC mode strict priority as this
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 70d58c3..a629754 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -58,7 +58,7 @@ struct ixgbe_stats {
sizeof(((struct rtnl_link_stats64 *)0)->m), \
offsetof(struct rtnl_link_stats64, m)
-static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBE_NETDEV_STAT(rx_packets)},
{"tx_packets", IXGBE_NETDEV_STAT(tx_packets)},
{"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)},
@@ -120,19 +120,23 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
#endif /* IXGBE_FCOE */
};
-#define IXGBE_QUEUE_STATS_LEN \
- ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_tx_queues + \
- ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) * \
+/* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
+ * we set the num_rx_queues to evaluate to num_tx_queues. This is
+ * used because we do not have a good way to get the max number of
+ * rx queues with CONFIG_RPS disabled.
+ */
+#define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
+
+#define IXGBE_QUEUE_STATS_LEN ( \
+ (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
(sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
#define IXGBE_PB_STATS_LEN ( \
- (((struct ixgbe_adapter *)netdev_priv(netdev))->flags & \
- IXGBE_FLAG_DCB_ENABLED) ? \
- (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
- sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
- / sizeof(u64) : 0)
+ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
+ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
+ / sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
IXGBE_PB_STATS_LEN + \
IXGBE_QUEUE_STATS_LEN)
@@ -888,23 +892,19 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- char firmware_version[32];
u32 nvm_track_id;
- strncpy(drvinfo->driver, ixgbe_driver_name,
- sizeof(drvinfo->driver) - 1);
- strncpy(drvinfo->version, ixgbe_driver_version,
- sizeof(drvinfo->version) - 1);
+ strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbe_driver_version,
+ sizeof(drvinfo->version));
nvm_track_id = (adapter->eeprom_verh << 16) |
adapter->eeprom_verl;
- snprintf(firmware_version, sizeof(firmware_version), "0x%08x",
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x",
nvm_track_id);
- strncpy(drvinfo->fw_version, firmware_version,
- sizeof(drvinfo->fw_version) - 1);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
- sizeof(drvinfo->bus_info) - 1);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = IXGBE_STATS_LEN;
drvinfo->testinfo_len = IXGBE_TEST_LEN;
drvinfo->regdump_len = ixgbe_get_regs_len(netdev);
@@ -1082,8 +1082,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < adapter->num_tx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->tx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
@@ -1091,8 +1098,15 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- for (j = 0; j < adapter->num_rx_queues; j++) {
+ for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) {
ring = adapter->rx_ring[j];
+ if (!ring) {
+ data[i] = 0;
+ data[i+1] = 0;
+ i += 2;
+ continue;
+ }
+
do {
start = u64_stats_fetch_begin_bh(&ring->syncp);
data[i] = ring->stats.packets;
@@ -1100,22 +1114,20 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
i += 2;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (j = 0; j < MAX_TX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxontxc[j];
- data[i++] = adapter->stats.pxofftxc[j];
- }
- for (j = 0; j < MAX_RX_PACKET_BUFFERS; j++) {
- data[i++] = adapter->stats.pxonrxc[j];
- data[i++] = adapter->stats.pxoffrxc[j];
- }
+
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxontxc[j];
+ data[i++] = adapter->stats.pxofftxc[j];
+ }
+ for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) {
+ data[i++] = adapter->stats.pxonrxc[j];
+ data[i++] = adapter->stats.pxoffrxc[j];
}
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
- struct ixgbe_adapter *adapter = netdev_priv(netdev);
char *p = (char *)data;
int i;
@@ -1130,31 +1142,29 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_tx_queues; i++) {
+ for (i = 0; i < netdev->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- for (i = 0; i < adapter->num_rx_queues; i++) {
+ for (i = 0; i < IXGBE_NUM_RX_QUEUES; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
- sprintf(p, "tx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "tx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < MAX_RX_PACKET_BUFFERS; i++) {
- sprintf(p, "rx_pb_%u_pxon", i);
- p += ETH_GSTRING_LEN;
- sprintf(p, "rx_pb_%u_pxoff", i);
- p += ETH_GSTRING_LEN;
- }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "tx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++) {
+ sprintf(p, "rx_pb_%u_pxon", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_pb_%u_pxoff", i);
+ p += ETH_GSTRING_LEN;
}
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break;
@@ -1959,12 +1969,21 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
/* WOL not supported except for the following */
switch(hw->device_id) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+ /* Only these subdevices could supports WOL */
+ switch (hw->subsystem_device_id) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0) {
+ wol->supported = 0;
+ break;
+ }
+ case IXGBE_SUBDEV_ID_82599_SFP:
+ retval = 0;
+ break;
+ default:
wol->supported = 0;
break;
}
- retval = 0;
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
index df3b1be..4bc7942 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -855,3 +855,86 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
}
return rc;
}
+
+/**
+ * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
+ * @netdev : ixgbe adapter
+ * @info : HBA information
+ *
+ * Returns ixgbe HBA information
+ *
+ * Returns : 0 on success
+ */
+int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
+ struct netdev_fcoe_hbainfo *info)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, pos;
+ u8 buf[8];
+
+ if (!info)
+ return -EINVAL;
+
+ /* Don't return information on unsupported devices */
+ if (hw->mac.type != ixgbe_mac_82599EB &&
+ hw->mac.type != ixgbe_mac_X540)
+ return -EINVAL;
+
+ /* Manufacturer */
+ snprintf(info->manufacturer, sizeof(info->manufacturer),
+ "Intel Corporation");
+
+ /* Serial Number */
+
+ /* Get the PCI-e Device Serial Number Capability */
+ pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ pos += 4;
+ for (i = 0; i < 8; i++)
+ pci_read_config_byte(adapter->pdev, pos + i, &buf[i]);
+
+ snprintf(info->serial_number, sizeof(info->serial_number),
+ "%02X%02X%02X%02X%02X%02X%02X%02X",
+ buf[7], buf[6], buf[5], buf[4],
+ buf[3], buf[2], buf[1], buf[0]);
+ } else
+ snprintf(info->serial_number, sizeof(info->serial_number),
+ "Unknown");
+
+ /* Hardware Version */
+ snprintf(info->hardware_version,
+ sizeof(info->hardware_version),
+ "Rev %d", hw->revision_id);
+ /* Driver Name/Version */
+ snprintf(info->driver_version,
+ sizeof(info->driver_version),
+ "%s v%s",
+ ixgbe_driver_name,
+ ixgbe_driver_version);
+ /* Firmware Version */
+ snprintf(info->firmware_version,
+ sizeof(info->firmware_version),
+ "0x%08x",
+ (adapter->eeprom_verh << 16) |
+ adapter->eeprom_verl);
+
+ /* Model */
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ snprintf(info->model,
+ sizeof(info->model),
+ "Intel 82599");
+ } else {
+ snprintf(info->model,
+ sizeof(info->model),
+ "Intel X540");
+ }
+
+ /* Model Description */
+ snprintf(info->model_description,
+ sizeof(info->model_description),
+ "%s",
+ ixgbe_default_device_descr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index 261fd62..1dbed17 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 8ef92d1..3dc6cef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -55,6 +55,8 @@
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
+char ixgbe_default_device_descr[] =
+ "Intel(R) 10 Gigabit Network Connection";
#define MAJ 3
#define MIN 6
#define BUILD 7
@@ -62,7 +64,7 @@ static const char ixgbe_driver_string[] =
__stringify(BUILD) "-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2011 Intel Corporation.";
+ "Copyright (c) 1999-2012 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -106,6 +108,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 },
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 },
/* required last entry */
{0, }
};
@@ -146,7 +149,7 @@ static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
- /* flush memory to make sure state is correct before next watchog */
+ /* flush memory to make sure state is correct before next watchdog */
smp_mb__before_clear_bit();
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
}
@@ -1140,7 +1143,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(rx_ring->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
rx_ring->rx_stats.alloc_rx_page_failed++;
goto no_buffers;
@@ -2156,7 +2159,7 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
/* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read
- * therefore no explict interrupt disable is necessary */
+ * therefore no explicit interrupt disable is necessary */
eicr = IXGBE_READ_REG(hw, IXGBE_EICR);
if (!eicr) {
/*
@@ -2630,22 +2633,22 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
/*
* we must limit the number of descriptors so that the
* total size of max desc * buf_len is not greater
- * than 65535
+ * than 65536
*/
if (ring_is_ps_enabled(ring)) {
-#if (MAX_SKB_FRAGS > 16)
+#if (PAGE_SIZE < 8192)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
-#elif (MAX_SKB_FRAGS > 8)
+#elif (PAGE_SIZE < 16384)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
-#elif (MAX_SKB_FRAGS > 4)
+#elif (PAGE_SIZE < 32768)
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
#else
rscctrl |= IXGBE_RSCCTL_MAXDESC_1;
#endif
} else {
- if (rx_buf_len < IXGBE_RXBUFFER_4K)
+ if (rx_buf_len <= IXGBE_RXBUFFER_4K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
- else if (rx_buf_len < IXGBE_RXBUFFER_8K)
+ else if (rx_buf_len <= IXGBE_RXBUFFER_8K)
rscctrl |= IXGBE_RSCCTL_MAXDESC_8;
else
rscctrl |= IXGBE_RSCCTL_MAXDESC_4;
@@ -2827,7 +2830,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
vf_shift = adapter->num_vfs % 32;
- reg_offset = (adapter->num_vfs > 32) ? 1 : 0;
+ reg_offset = (adapter->num_vfs >= 32) ? 1 : 0;
/* Enable only the PF's pool for Tx/Rx */
IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
@@ -3044,7 +3047,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
hw->mac.ops.enable_rx_dma(hw, rxctrl);
}
-static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3053,9 +3056,11 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
/* add VID to filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -3064,6 +3069,8 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
/* remove VID from filter table */
hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
/**
@@ -3602,7 +3609,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
{
/*
- * We are assuming the worst case scenerio here, and that
+ * We are assuming the worst case scenario here, and that
* is that an SFP was inserted/removed after the reset
* but before SFP detection was enabled. As such the best
* solution is to just start searching as soon as we start
@@ -3824,7 +3831,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
case IXGBE_ERR_EEPROM_VERSION:
/* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. "
- "Please be aware there may be issuesassociated with "
+ "Please be aware there may be issues associated with "
"your hardware. If you are experiencing problems "
"please contact your Intel or hardware "
"representative who provided you with this "
@@ -4019,7 +4026,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* Mark all the VFs as inactive */
for (i = 0 ; i < adapter->num_vfs; i++)
- adapter->vfinfo[i].clear_to_send = 0;
+ adapter->vfinfo[i].clear_to_send = false;
/* ping all the active vfs to let them know we are going down */
ixgbe_ping_all_vfs(adapter);
@@ -4323,6 +4330,10 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
adapter->num_tx_queues = 1;
done:
+ if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
+ (adapter->netdev->reg_state == NETREG_UNREGISTERING))
+ return 0;
+
/* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,
@@ -5788,9 +5799,9 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
* @adapter - pointer to the device adapter structure
*
* This function serves two purposes. First it strobes the interrupt lines
- * in order to make certain interrupts are occuring. Secondly it sets the
+ * in order to make certain interrupts are occurring. Secondly it sets the
* bits needed to check for TX hangs. As a result we should immediately
- * determine if a hang has occured.
+ * determine if a hang has occurred.
*/
static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
{
@@ -7128,7 +7139,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return -EINVAL;
/* Hardware has to reinitialize queues and interrupts to
- * match packet buffer alignment. Unfortunantly, the
+ * match packet buffer alignment. Unfortunately, the
* hardware is not flexible enough to do this dynamically.
*/
if (netif_running(dev))
@@ -7174,7 +7185,8 @@ void ixgbe_do_reset(struct net_device *netdev)
ixgbe_reset(adapter);
}
-static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
+static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -7204,7 +7216,8 @@ static u32 ixgbe_fix_features(struct net_device *netdev, u32 data)
return data;
}
-static int ixgbe_set_features(struct net_device *netdev, u32 data)
+static int ixgbe_set_features(struct net_device *netdev,
+ netdev_features_t data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool need_reset = false;
@@ -7286,6 +7299,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_fcoe_enable = ixgbe_fcoe_enable,
.ndo_fcoe_disable = ixgbe_fcoe_disable,
.ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
+ .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo,
#endif /* IXGBE_FCOE */
.ndo_set_features = ixgbe_set_features,
.ndo_fix_features = ixgbe_fix_features,
@@ -7598,9 +7612,16 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
adapter->wol = 0;
switch (pdev->device) {
case IXGBE_DEV_ID_82599_SFP:
- /* Only this subdevice supports WOL */
- if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+ /* Only these subdevice supports WOL */
+ switch (pdev->subsystem_device) {
+ case IXGBE_SUBDEV_ID_82599_560FLR:
+ /* only support first port */
+ if (hw->bus.func != 0)
+ break;
+ case IXGBE_SUBDEV_ID_82599_SFP:
adapter->wol = IXGBE_WUFC_MAG;
+ break;
+ }
break;
case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
/* All except this subdevice support WOL */
@@ -7708,7 +7729,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
- e_dev_info("Intel(R) 10 Gigabit Network Connection\n");
+ e_dev_info("%s\n", ixgbe_default_device_descr);
cards_found++;
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index 3f725d4..1f3e32b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index b239bda..310bdd9 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 9a56fd7..b917735 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1214,7 +1214,7 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
u32 max_retry = 10;
u32 retry = 0;
u16 swfw_mask = 0;
- bool nack = 1;
+ bool nack = true;
*data = 0;
if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)
@@ -1421,7 +1421,7 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw)
static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
{
s32 i;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
ixgbe_clock_in_i2c_bit(hw, &bit);
@@ -1443,7 +1443,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
s32 status = 0;
s32 i;
u32 i2cctl;
- bool bit = 0;
+ bool bit = false;
for (i = 7; i >= 0; i--) {
bit = (data >> i) & 0x1;
@@ -1457,6 +1457,7 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
i2cctl |= IXGBE_I2C_DATA_OUT;
IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl);
+ IXGBE_WRITE_FLUSH(hw);
return status;
}
@@ -1473,7 +1474,7 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw)
u32 i = 0;
u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
u32 timeout = 10;
- bool ack = 1;
+ bool ack = true;
ixgbe_raise_i2c_clk(hw, &i2cctl);
@@ -1646,9 +1647,9 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl)
bool data;
if (*i2cctl & IXGBE_I2C_DATA_IN)
- data = 1;
+ data = true;
else
- data = 0;
+ data = false;
return data;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 197bdd1..cc18165 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 00fcd39..b01ecb4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -67,7 +67,8 @@ static int ixgbe_find_enabled_vfs(struct ixgbe_adapter *adapter)
vf_devfn = pdev->devfn + 0x80;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID, device_id, NULL);
while (pvfdev) {
- if (pvfdev->devfn == vf_devfn)
+ if (pvfdev->devfn == vf_devfn &&
+ (pvfdev->bus->number >= pdev->bus->number))
vfs_found++;
vf_devfn += 2;
pvfdev = pci_get_device(IXGBE_INTEL_VENDOR_ID,
@@ -572,7 +573,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
/* reply to reset with ack and vf mac address */
msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
- memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(new_mac, vf_mac, ETH_ALEN);
/*
* Piggyback the multicast filter type so VF can compute the
* correct vectors
@@ -646,6 +647,9 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false);
retval = ixgbe_set_vf_macvlan(adapter, vf, index,
(unsigned char *)(&msgbuf[1]));
+ if (retval == -ENOSPC)
+ e_warn(drv, "VF %d has requested a MACVLAN filter "
+ "but there is no space for it\n", vf);
break;
default:
e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index df04f1a..2ab38d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -33,7 +33,6 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
-void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 6c5cca8..9b95bef 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -57,6 +57,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D
#define IXGBE_DEV_ID_82599EN_SFP 0x1557
@@ -65,6 +66,7 @@
#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
#define IXGBE_DEV_ID_82599_LS 0x154F
#define IXGBE_DEV_ID_X540T 0x1528
+#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A
/* VF Device IDs */
#define IXGBE_DEV_ID_82599_VF 0x10ED
@@ -159,19 +161,19 @@
/* Receive DMA Registers */
#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
- (0x0D000 + ((_i - 64) * 0x40)))
+ (0x0D000 + (((_i) - 64) * 0x40)))
#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
- (0x0D004 + ((_i - 64) * 0x40)))
+ (0x0D004 + (((_i) - 64) * 0x40)))
#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
- (0x0D008 + ((_i - 64) * 0x40)))
+ (0x0D008 + (((_i) - 64) * 0x40)))
#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
- (0x0D010 + ((_i - 64) * 0x40)))
+ (0x0D010 + (((_i) - 64) * 0x40)))
#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
- (0x0D018 + ((_i - 64) * 0x40)))
+ (0x0D018 + (((_i) - 64) * 0x40)))
#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
- (0x0D028 + ((_i - 64) * 0x40)))
+ (0x0D028 + (((_i) - 64) * 0x40)))
#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
- (0x0D02C + ((_i - 64) * 0x40)))
+ (0x0D02C + (((_i) - 64) * 0x40)))
#define IXGBE_RSCDBU 0x03028
#define IXGBE_RDDCC 0x02F20
#define IXGBE_RXMEMWRAP 0x03190
@@ -184,7 +186,7 @@
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
(((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
- (0x0D014 + ((_i - 64) * 0x40))))
+ (0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
* 00-15 : 0x02200 + n*4
@@ -193,7 +195,7 @@
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
(((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
- (0x0D00C + ((_i - 64) * 0x40))))
+ (0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
/* 8 of these 0x03C00 - 0x03C1C */
@@ -342,9 +344,9 @@
#define IXGBE_WUPL 0x05900
#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_FHFT(_n) (0x09000 + (_n * 0x100)) /* Flex host filter table */
-#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
- * Filter Table */
+#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
+#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
+ * Filter Table */
#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4
#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2
@@ -1483,7 +1485,7 @@ enum {
#define IXGBE_LED_BLINK_BASE 0x00000080
#define IXGBE_LED_MODE_MASK_BASE 0x0000000F
#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
-#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i))
#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
@@ -1710,8 +1712,6 @@ enum {
#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */
#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
-
#define IXGBE_EEPROM_PAGE_SIZE_MAX 128
#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */
#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */
@@ -2068,9 +2068,9 @@ enum {
/* SR-IOV specific macros */
#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
-#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
-#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+#define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4))
+#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4))
enum ixgbe_fdir_pballoc_type {
IXGBE_FDIR_PBALLOC_NONE = 0,
@@ -2802,9 +2802,9 @@ struct ixgbe_eeprom_info {
struct ixgbe_mac_info {
struct ixgbe_mac_operations ops;
enum ixgbe_mac_type type;
- u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
- u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
+ u8 addr[ETH_ALEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 san_addr[ETH_ALEN];
/* prefix for World Wide Node Name (WWNN) */
u16 wwnn_prefix;
/* prefix for World Wide Port Name (WWPN) */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index e5101e9..f838a2b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2011 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -751,16 +751,20 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index)
{
u32 macc_reg;
u32 ledctl_reg;
+ ixgbe_link_speed speed;
+ bool link_up;
/*
- * In order for the blink bit in the LED control register
- * to work, link and speed must be forced in the MAC. We
- * will reverse this when we stop the blinking.
+ * Link should be up in order for the blink bit in the LED control
+ * register to work. Force link and speed in the MAC if link is down.
+ * This will be reversed when we stop the blinking.
*/
- macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
- macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
- IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
-
+ hw->mac.ops.check_link(hw, &speed, &link_up, false);
+ if (link_up == false) {
+ macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC);
+ macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS;
+ IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg);
+ }
/* Set the LED to LINK_UP + BLINK. */
ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
ledctl_reg &= ~IXGBE_LED_MODE_MASK(index);
diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile
index 1f35d22..4ce4c97 100644
--- a/drivers/net/ethernet/intel/ixgbevf/Makefile
+++ b/drivers/net/ethernet/intel/ixgbevf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2010 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/ixgbevf/defines.h b/drivers/net/ethernet/intel/ixgbevf/defines.h
index 78abb6f..947b5c8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/defines.h
+++ b/drivers/net/ethernet/intel/ixgbevf/defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -35,7 +35,6 @@
#define IXGBE_VF_IRQ_CLEAR_MASK 7
#define IXGBE_VF_MAX_TX_QUEUES 1
#define IXGBE_VF_MAX_RX_QUEUES 1
-#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
/* Link speed */
typedef u32 ixgbe_link_speed;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index e29ba45..2bfe0d1 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2009 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,8 @@
/* ethtool support for ixgbevf */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -54,7 +56,8 @@ struct ixgbe_stats {
offsetof(struct ixgbevf_adapter, m), \
offsetof(struct ixgbevf_adapter, b), \
offsetof(struct ixgbevf_adapter, r)
-static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+
+static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
stats.saved_reset_vfgprc)},
{"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
@@ -265,11 +268,11 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
- strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
- strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
-
- strlcpy(drvinfo->fw_version, "N/A", 4);
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ixgbevf_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -549,8 +552,8 @@ static const u32 register_test_patterns[] = {
writel((W & M), (adapter->hw.hw_addr + R)); \
val = readl(adapter->hw.hw_addr + R); \
if ((W & M) != (val & M)) { \
- printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
- "expected 0x%08X\n", R, (val & M), (W & M)); \
+ pr_err("set/check reg %04X test failed: got 0x%08X expected " \
+ "0x%08X\n", R, (val & M), (W & M)); \
*data = R; \
writel(before, (adapter->hw.hw_addr + R)); \
return 1; \
@@ -669,7 +672,7 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
return 0;
}
-static struct ethtool_ops ixgbevf_ethtool_ops = {
+static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_settings = ixgbevf_get_settings,
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index e6c9d1a..dfed420 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -279,12 +279,12 @@ enum ixgbevf_boards {
board_X540_vf,
};
-extern struct ixgbevf_info ixgbevf_82599_vf_info;
-extern struct ixgbevf_info ixgbevf_X540_vf_info;
-extern struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_82599_vf_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_info;
+extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
/* needed by ethtool.c */
-extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_name[];
extern const char ixgbevf_driver_version[];
extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 4c8e199..e51d552 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -29,6 +29,9 @@
/******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/module.h>
@@ -50,14 +53,14 @@
#include "ixgbevf.h"
-char ixgbevf_driver_name[] = "ixgbevf";
+const char ixgbevf_driver_name[] = "ixgbevf";
static const char ixgbevf_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
#define DRV_VERSION "2.2.0-k"
const char ixgbevf_driver_version[] = DRV_VERSION;
static char ixgbevf_copyright[] =
- "Copyright (c) 2009 - 2010 Intel Corporation.";
+ "Copyright (c) 2009 - 2012 Intel Corporation.";
static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
[board_82599_vf] = &ixgbevf_82599_vf_info,
@@ -363,7 +366,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
if (!bi->page_dma &&
(adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
if (!bi->page) {
- bi->page = netdev_alloc_page(adapter->netdev);
+ bi->page = alloc_page(GFP_ATOMIC | __GFP_COLD);
if (!bi->page) {
adapter->alloc_rx_page_failed++;
goto no_buffers;
@@ -914,32 +917,39 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
struct ixgbe_hw *hw = &adapter->hw;
u32 eicr;
u32 msg;
+ bool got_ack = false;
eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
- if (!hw->mbx.ops.check_for_ack(hw)) {
+ if (!hw->mbx.ops.check_for_ack(hw))
+ got_ack = true;
+
+ if (!hw->mbx.ops.check_for_msg(hw)) {
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 1));
+
+ if (msg & IXGBE_VT_MSGTYPE_NACK)
+ pr_warn("Last Request of type %2.2x to PF Nacked\n",
+ msg & 0xFF);
/*
- * checking for the ack clears the PFACK bit. Place
- * it back in the v2p_mailbox cache so that anyone
- * polling for an ack will not miss it. Also
- * avoid the read below because the code to read
- * the mailbox will also clear the ack bit. This was
- * causing lost acks. Just cache the bit and exit
- * the IRQ handler.
+ * Restore the PFSTS bit in case someone is polling for a
+ * return message from the PF
*/
- hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
- goto out;
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFSTS;
}
- /* Not an ack interrupt, go ahead and read the message */
- hw->mbx.ops.read(hw, &msg, 1);
-
- if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
- mod_timer(&adapter->watchdog_timer,
- round_jiffies(jiffies + 1));
+ /*
+ * checking for the ack clears the PFACK bit. Place
+ * it back in the v2p_mailbox cache so that anyone
+ * polling for an ack will not miss it
+ */
+ if (got_ack)
+ hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
-out:
return IRQ_HANDLED;
}
@@ -1400,7 +1410,7 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
}
}
-static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1409,9 +1419,11 @@ static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, true);
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -1420,6 +1432,8 @@ static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
if (hw->mac.ops.set_vfta)
hw->mac.ops.set_vfta(hw, vid, 0, false);
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
@@ -1437,7 +1451,7 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
int count = 0;
if ((netdev_uc_count(netdev)) > 10) {
- printk(KERN_ERR "Too many unicast filters - No Space\n");
+ pr_err("Too many unicast filters - No Space\n");
return -ENOSPC;
}
@@ -2135,7 +2149,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
err = ixgbevf_alloc_queues(adapter);
if (err) {
- printk(KERN_ERR "Unable to allocate memory for queues\n");
+ pr_err("Unable to allocate memory for queues\n");
goto err_alloc_queues;
}
@@ -2189,7 +2203,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
} else {
err = hw->mac.ops.init_hw(hw);
if (err) {
- printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ pr_err("init_shared_code failed: %d\n", err);
goto out;
}
}
@@ -2630,8 +2644,8 @@ static int ixgbevf_open(struct net_device *netdev)
* the vf can't start. */
if (hw->adapter_stopped) {
err = IXGBE_ERR_MBX;
- printk(KERN_ERR "Unable to start - perhaps the PF"
- " Driver isn't up yet\n");
+ pr_err("Unable to start - perhaps the PF Driver isn't "
+ "up yet\n");
goto err_setup_reset;
}
}
@@ -2842,10 +2856,8 @@ static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
break;
default:
if (unlikely(net_ratelimit())) {
- printk(KERN_WARNING
- "partial checksum but "
- "proto=%x!\n",
- skb->protocol);
+ pr_warn("partial checksum but "
+ "proto=%x!\n", skb->protocol);
}
break;
}
@@ -3249,7 +3261,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
return stats;
}
-static int ixgbevf_set_features(struct net_device *netdev, u32 features)
+static int ixgbevf_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
@@ -3414,7 +3427,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
- printk(KERN_ERR "invalid MAC address\n");
+ pr_err("invalid MAC address\n");
err = -EIO;
goto err_sw_init;
}
@@ -3535,10 +3548,10 @@ static struct pci_driver ixgbevf_driver = {
static int __init ixgbevf_init_module(void)
{
int ret;
- printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
- ixgbevf_driver_version);
+ pr_info("%s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
- printk(KERN_INFO "%s\n", ixgbevf_copyright);
+ pr_info("%s\n", ixgbevf_copyright);
ret = pci_register_driver(&ixgbevf_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.c b/drivers/net/ethernet/intel/ixgbevf/mbx.c
index 930fa83..9c95590 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.c
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -26,6 +26,7 @@
*******************************************************************************/
#include "mbx.h"
+#include "ixgbevf.h"
/**
* ixgbevf_poll_for_msg - Wait for message notification
@@ -328,7 +329,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
return 0;
}
-struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
.init_params = ixgbevf_init_mbx_params_vf,
.read = ixgbevf_read_mbx_vf,
.write = ixgbevf_write_mbx_vf,
diff --git a/drivers/net/ethernet/intel/ixgbevf/mbx.h b/drivers/net/ethernet/intel/ixgbevf/mbx.h
index ea393eb..cf9131c 100644
--- a/drivers/net/ethernet/intel/ixgbevf/mbx.h
+++ b/drivers/net/ethernet/intel/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -47,8 +47,8 @@
#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
-#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
-#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * (x)))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * (vfn)))
#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
diff --git a/drivers/net/ethernet/intel/ixgbevf/regs.h b/drivers/net/ethernet/intel/ixgbevf/regs.h
index 189200e..debd8c0 100644
--- a/drivers/net/ethernet/intel/ixgbevf/regs.h
+++ b/drivers/net/ethernet/intel/ixgbevf/regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -39,29 +39,29 @@
#define IXGBE_VTEIMC 0x0010C
#define IXGBE_VTEIAC 0x00110
#define IXGBE_VTEIAM 0x00114
-#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
-#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTEITR(x) (0x00820 + (4 * (x)))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * (x)))
#define IXGBE_VTIVAR_MISC 0x00140
-#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
-#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
-#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
-#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
-#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
-#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
-#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
-#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
-#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * (x)))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * (x)))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * (x)))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * (x)))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * (x)))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * (x)))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * (x)))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * (x)))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * (x)))
#define IXGBE_VFPSRTYPE 0x00300
-#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
-#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
-#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
-#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
-#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
-#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
-#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
-#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
-#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
-#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * (x)))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * (x)))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * (x)))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * (x)))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * (x)))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * (x)))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * (x)))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * (x)))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * (x)))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * (x)))
#define IXGBE_VFGPRC 0x0101C
#define IXGBE_VFGPTC 0x0201C
#define IXGBE_VFGORC_LSB 0x01020
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.c b/drivers/net/ethernet/intel/ixgbevf/vf.c
index aa3682e..74be741 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -26,6 +26,7 @@
*******************************************************************************/
#include "vf.h"
+#include "ixgbevf.h"
/**
* ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
@@ -108,7 +109,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
return IXGBE_ERR_INVALID_MAC_ADDR;
- memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(hw->mac.perm_addr, addr, ETH_ALEN);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0;
@@ -211,7 +212,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
**/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{
- memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
return 0;
}
@@ -282,6 +283,17 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
return ret_val;
}
+static void ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw,
+ u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 retmsg[IXGBE_VFMAILBOX_SIZE];
+ s32 retval = mbx->ops.write_posted(hw, msg, size);
+
+ if (!retval)
+ mbx->ops.read_posted(hw, retmsg, size);
+}
+
/**
* ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
* @hw: pointer to the HW structure
@@ -293,7 +305,6 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
struct net_device *netdev)
{
struct netdev_hw_addr *ha;
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
u16 *vector_list = (u16 *)&msgbuf[1];
u32 cnt, i;
@@ -320,7 +331,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
}
- mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+ ixgbevf_write_msg_read_ack(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
return 0;
}
@@ -335,7 +346,6 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
u32 msgbuf[2];
msgbuf[0] = IXGBE_VF_SET_VLAN;
@@ -343,7 +353,9 @@ static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
- return mbx->ops.write_posted(hw, msgbuf, 2);
+ ixgbevf_write_msg_read_ack(hw, msgbuf, 2);
+
+ return 0;
}
/**
@@ -401,7 +413,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
return 0;
}
-static struct ixgbe_mac_operations ixgbevf_mac_ops = {
+static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
.init_hw = ixgbevf_init_hw_vf,
.reset_hw = ixgbevf_reset_hw_vf,
.start_hw = ixgbevf_start_hw_vf,
@@ -415,12 +427,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
.set_vfta = ixgbevf_set_vfta_vf,
};
-struct ixgbevf_info ixgbevf_82599_vf_info = {
+const struct ixgbevf_info ixgbevf_82599_vf_info = {
.mac = ixgbe_mac_82599_vf,
.mac_ops = &ixgbevf_mac_ops,
};
-struct ixgbevf_info ixgbevf_X540_vf_info = {
+const struct ixgbevf_info ixgbevf_X540_vf_info = {
.mac = ixgbe_mac_X540_vf,
.mac_ops = &ixgbevf_mac_ops,
};
diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h
index 10306b4..25c951d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/vf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/vf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 82599 Virtual Function driver
- Copyright(c) 1999 - 2010 Intel Corporation.
+ Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -167,7 +167,7 @@ struct ixgbevf_hw_stats {
struct ixgbevf_info {
enum ixgbe_mac_type mac;
- struct ixgbe_mac_operations *mac_ops;
+ const struct ixgbe_mac_operations *mac_ops;
};
#endif /* __IXGBE_VF_H__ */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index 76b8457..55cbf65 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1990,7 +1990,7 @@ jme_fill_tx_map(struct pci_dev *pdev,
struct page *page,
u32 page_offset,
u32 len,
- u8 hidma)
+ bool hidma)
{
dma_addr_t dmaaddr;
@@ -2024,7 +2024,7 @@ jme_map_tx_skb(struct jme_adapter *jme, struct sk_buff *skb, int idx)
struct jme_ring *txring = &(jme->txring[0]);
struct txdesc *txdesc = txring->desc, *ctxdesc;
struct jme_buffer_info *txbi = txring->bufinf, *ctxbi;
- u8 hidma = jme->dev->features & NETIF_F_HIGHDMA;
+ bool hidma = jme->dev->features & NETIF_F_HIGHDMA;
int i, nr_frags = skb_shinfo(skb)->nr_frags;
int mask = jme->tx_ring_mask;
const struct skb_frag_struct *frag;
@@ -2328,19 +2328,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
((new_mtu) < IPV6_MIN_MTU))
return -EINVAL;
- if (new_mtu > 4000) {
- jme->reg_rxcs &= ~RXCS_FIFOTHNP;
- jme->reg_rxcs |= RXCS_FIFOTHNP_64QW;
- jme_restart_rx_engine(jme);
- } else {
- jme->reg_rxcs &= ~RXCS_FIFOTHNP;
- jme->reg_rxcs |= RXCS_FIFOTHNP_128QW;
- jme_restart_rx_engine(jme);
- }
netdev->mtu = new_mtu;
netdev_update_features(netdev);
+ jme_restart_rx_engine(jme);
jme_reset_link(jme);
return 0;
@@ -2399,9 +2391,9 @@ jme_get_drvinfo(struct net_device *netdev,
{
struct jme_adapter *jme = netdev_priv(netdev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(jme->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(jme->pdev), sizeof(info->bus_info));
}
static int
@@ -2727,8 +2719,8 @@ jme_set_msglevel(struct net_device *netdev, u32 value)
jme->msg_enable = value;
}
-static u32
-jme_fix_features(struct net_device *netdev, u32 features)
+static netdev_features_t
+jme_fix_features(struct net_device *netdev, netdev_features_t features)
{
if (netdev->mtu > 1900)
features &= ~(NETIF_F_ALL_TSO | NETIF_F_ALL_CSUM);
@@ -2736,7 +2728,7 @@ jme_fix_features(struct net_device *netdev, u32 features)
}
static int
-jme_set_features(struct net_device *netdev, u32 features)
+jme_set_features(struct net_device *netdev, netdev_features_t features)
{
struct jme_adapter *jme = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 4304072..3efc897 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -730,7 +730,7 @@ enum jme_rxcs_values {
RXCS_RETRYCNT_60 = 0x00000F00,
RXCS_DEFAULT = RXCS_FIFOTHTP_128T |
- RXCS_FIFOTHNP_128QW |
+ RXCS_FIFOTHNP_16QW |
RXCS_DMAREQSZ_128B |
RXCS_RETRYGAP_256ns |
RXCS_RETRYCNT_32,
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d8430f4..6ad094f 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -1230,18 +1230,7 @@ static struct platform_driver korina_driver = {
.remove = korina_remove,
};
-static int __init korina_init_module(void)
-{
- return platform_driver_register(&korina_driver);
-}
-
-static void korina_cleanup_module(void)
-{
- return platform_driver_unregister(&korina_driver);
-}
-
-module_init(korina_init_module);
-module_exit(korina_cleanup_module);
+module_platform_driver(korina_driver);
MODULE_AUTHOR("Philip Rischel <rischelp@idt.com>");
MODULE_AUTHOR("Felix Fietkau <nbd@openwrt.org>");
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 0b3567a..85e2c6c 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -98,6 +98,7 @@ struct ltq_etop_chan {
struct ltq_etop_priv {
struct net_device *netdev;
+ struct platform_device *pdev;
struct ltq_eth_data *pldata;
struct resource *res;
@@ -436,7 +437,8 @@ ltq_etop_mdio_init(struct net_device *dev)
priv->mii_bus->read = ltq_etop_mdio_rd;
priv->mii_bus->write = ltq_etop_mdio_wr;
priv->mii_bus->name = "ltq_mii";
- snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%x", 0);
+ snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->pdev->name, priv->pdev->id);
priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!priv->mii_bus->irq) {
err = -ENOMEM;
@@ -734,6 +736,7 @@ ltq_etop_probe(struct platform_device *pdev)
dev->ethtool_ops = &ltq_etop_ethtool_ops;
priv = netdev_priv(dev);
priv->res = res;
+ priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
spin_lock_init(&priv->lock);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 194a031..9edecfa 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -136,6 +136,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define INT_MASK 0x0068
#define INT_MASK_EXT 0x006c
#define TX_FIFO_URGENT_THRESHOLD 0x0074
+#define RX_DISCARD_FRAME_CNT 0x0084
+#define RX_OVERRUN_FRAME_CNT 0x0088
#define TXQ_FIX_PRIO_CONF_MOVED 0x00dc
#define TX_BW_RATE_MOVED 0x00e0
#define TX_BW_MTU_MOVED 0x00e8
@@ -334,6 +336,9 @@ struct mib_counters {
u32 bad_crc_event;
u32 collision;
u32 late_collision;
+ /* Non MIB hardware counters */
+ u32 rx_discard;
+ u32 rx_overrun;
};
struct lro_counters {
@@ -1225,6 +1230,10 @@ static void mib_counters_clear(struct mv643xx_eth_private *mp)
for (i = 0; i < 0x80; i += 4)
mib_read(mp, i);
+
+ /* Clear non MIB hw counters also */
+ rdlp(mp, RX_DISCARD_FRAME_CNT);
+ rdlp(mp, RX_OVERRUN_FRAME_CNT);
}
static void mib_counters_update(struct mv643xx_eth_private *mp)
@@ -1262,6 +1271,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
p->bad_crc_event += mib_read(mp, 0x74);
p->collision += mib_read(mp, 0x78);
p->late_collision += mib_read(mp, 0x7c);
+ /* Non MIB hardware counters */
+ p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
+ p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
spin_unlock_bh(&mp->mib_counters_lock);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
@@ -1413,6 +1425,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
MIBSTAT(bad_crc_event),
MIBSTAT(collision),
MIBSTAT(late_collision),
+ MIBSTAT(rx_discard),
+ MIBSTAT(rx_overrun),
LROSTAT(lro_aggregated),
LROSTAT(lro_flushed),
LROSTAT(lro_no_desc),
@@ -1502,10 +1516,12 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void mv643xx_eth_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *drvinfo)
{
- strncpy(drvinfo->driver, mv643xx_eth_driver_name, 32);
- strncpy(drvinfo->version, mv643xx_eth_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, "platform", 32);
+ strlcpy(drvinfo->driver, mv643xx_eth_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, mv643xx_eth_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info));
drvinfo->n_stats = ARRAY_SIZE(mv643xx_eth_stats);
}
@@ -1578,10 +1594,10 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
static int
-mv643xx_eth_set_features(struct net_device *dev, u32 features)
+mv643xx_eth_set_features(struct net_device *dev, netdev_features_t features)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- u32 rx_csum = features & NETIF_F_RXCSUM;
+ bool rx_csum = features & NETIF_F_RXCSUM;
wrlp(mp, PORT_CONFIG, rx_csum ? 0x02000000 : 0x00000000);
@@ -2509,7 +2525,7 @@ static void mv643xx_eth_netpoll(struct net_device *dev)
/* platform glue ************************************************************/
static void
mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
void __iomem *base = msp->base;
u32 win_enable;
@@ -2527,7 +2543,7 @@ mv643xx_eth_conf_mbus_windows(struct mv643xx_eth_shared_private *msp,
win_protect = 0;
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
writel((cs->base & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -2577,6 +2593,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
static int mv643xx_eth_version_printed;
struct mv643xx_eth_shared_platform_data *pd = pdev->dev.platform_data;
struct mv643xx_eth_shared_private *msp;
+ const struct mbus_dram_target_info *dram;
struct resource *res;
int ret;
@@ -2610,7 +2627,8 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
msp->smi_bus->name = "mv643xx_eth smi";
msp->smi_bus->read = smi_bus_read;
msp->smi_bus->write = smi_bus_write,
- snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ snprintf(msp->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ pdev->name, pdev->id);
msp->smi_bus->parent = &pdev->dev;
msp->smi_bus->phy_mask = 0xffffffff;
if (mdiobus_register(msp->smi_bus) < 0)
@@ -2641,8 +2659,9 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- mv643xx_eth_conf_mbus_windows(msp, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ mv643xx_eth_conf_mbus_windows(msp, dram);
/*
* Detect hardware parameters.
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index d17d062..953ba58 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1552,7 +1552,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->smi_bus->name = "pxa168_eth smi";
pep->smi_bus->read = pxa168_smi_read;
pep->smi_bus->write = pxa168_smi_write;
- snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%d", pdev->id);
+ snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ pdev->name, pdev->id);
pep->smi_bus->parent = &pdev->dev;
pep->smi_bus->phy_mask = 0xffffffff;
err = mdiobus_register(pep->smi_bus);
@@ -1645,18 +1646,7 @@ static struct platform_driver pxa168_eth_driver = {
},
};
-static int __init pxa168_init_module(void)
-{
- return platform_driver_register(&pxa168_eth_driver);
-}
-
-static void __exit pxa168_cleanup_module(void)
-{
- platform_driver_unregister(&pxa168_eth_driver);
-}
-
-module_init(pxa168_init_module);
-module_exit(pxa168_cleanup_module);
+module_platform_driver(pxa168_eth_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Ethernet driver for Marvell PXA168");
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index dea0cb4..33947ac 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -394,10 +394,10 @@ static void skge_get_drvinfo(struct net_device *dev,
{
struct skge_port *skge = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(skge->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(skge->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct skge_stat {
@@ -2576,6 +2576,7 @@ static int skge_up(struct net_device *dev)
}
/* Initialize MAC */
+ netif_carrier_off(dev);
spin_lock_bh(&hw->phy_lock);
if (is_genesis(hw))
genesis_mac_init(hw, port);
@@ -2797,6 +2798,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
wmb();
+ netdev_sent_queue(dev, skb->len);
+
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
@@ -2816,11 +2819,9 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
/* Free resources associated with this reing element */
-static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
- u32 control)
+static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
+ u32 control)
{
- struct pci_dev *pdev = skge->hw->pdev;
-
/* skb header vs. fragment */
if (control & BMU_STF)
pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
@@ -2830,13 +2831,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
dma_unmap_len(e, maplen),
PCI_DMA_TODEVICE);
-
- if (control & BMU_EOF) {
- netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
- "tx done slot %td\n", e - skge->tx_ring.start);
-
- dev_kfree_skb(e->skb);
- }
}
/* Free all buffers in transmit ring */
@@ -2847,10 +2841,15 @@ static void skge_tx_clean(struct net_device *dev)
for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
struct skge_tx_desc *td = e->desc;
- skge_tx_free(skge, e, td->control);
+
+ skge_tx_unmap(skge->hw->pdev, e, td->control);
+
+ if (td->control & BMU_EOF)
+ dev_kfree_skb(e->skb);
td->control = 0;
}
+ netdev_reset_queue(dev);
skge->tx_ring.to_clean = e;
}
@@ -3111,6 +3110,7 @@ static void skge_tx_done(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_ring *ring = &skge->tx_ring;
struct skge_element *e;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
@@ -3120,8 +3120,20 @@ static void skge_tx_done(struct net_device *dev)
if (control & BMU_OWN)
break;
- skge_tx_free(skge, e, control);
+ skge_tx_unmap(skge->hw->pdev, e, control);
+
+ if (control & BMU_EOF) {
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n",
+ e - skge->tx_ring.start);
+
+ pkts_compl++;
+ bytes_compl += e->skb->len;
+
+ dev_kfree_skb(e->skb);
+ }
}
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
skge->tx_ring.to_clean = e;
/* Can run lockless until we need to synchronize to restart queue. */
@@ -4042,7 +4054,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int skge_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -4104,7 +4116,7 @@ static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
#else
#define SKGE_PM_OPS NULL
-#endif
+#endif /* CONFIG_PM_SLEEP */
static void skge_shutdown(struct pci_dev *pdev)
{
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 7803efa..760c2b1 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -1110,6 +1110,7 @@ static void tx_init(struct sky2_port *sky2)
sky2->tx_prod = sky2->tx_cons = 0;
sky2->tx_tcpsum = 0;
sky2->tx_last_mss = 0;
+ netdev_reset_queue(sky2->netdev);
le = get_tx_le(sky2, &sky2->tx_prod);
le->addr = 0;
@@ -1284,7 +1285,7 @@ static const uint32_t rss_init_key[10] = {
};
/* Enable/disable receive hash calculation (RSS) */
-static void rx_set_rss(struct net_device *dev, u32 features)
+static void rx_set_rss(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1402,7 +1403,7 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
#define SKY2_VLAN_OFFLOADS (NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO)
-static void sky2_vlan_mode(struct net_device *dev, u32 features)
+static void sky2_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
@@ -1971,6 +1972,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
if (tx_avail(sky2) <= MAX_SKB_TX_LE)
netif_stop_queue(dev);
+ netdev_sent_queue(dev, skb->len);
sky2_put_idx(hw, txqaddr[sky2->port], sky2->tx_prod);
return NETDEV_TX_OK;
@@ -2002,7 +2004,8 @@ mapping_error:
static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
{
struct net_device *dev = sky2->netdev;
- unsigned idx;
+ u16 idx;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
BUG_ON(done >= sky2->tx_ring_size);
@@ -2017,10 +2020,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
netif_printk(sky2, tx_done, KERN_DEBUG, dev,
"tx done %u\n", idx);
- u64_stats_update_begin(&sky2->tx_stats.syncp);
- ++sky2->tx_stats.packets;
- sky2->tx_stats.bytes += skb->len;
- u64_stats_update_end(&sky2->tx_stats.syncp);
+ pkts_compl++;
+ bytes_compl += skb->len;
re->skb = NULL;
dev_kfree_skb_any(skb);
@@ -2031,6 +2032,13 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ u64_stats_update_begin(&sky2->tx_stats.syncp);
+ sky2->tx_stats.packets += pkts_compl;
+ sky2->tx_stats.bytes += bytes_compl;
+ u64_stats_update_end(&sky2->tx_stats.syncp);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -3643,10 +3651,10 @@ static void sky2_get_drvinfo(struct net_device *dev,
{
struct sky2_port *sky2 = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
- strcpy(info->bus_info, pci_name(sky2->hw->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sky2->hw->pdev),
+ sizeof(info->bus_info));
}
static const struct sky2_stat {
@@ -4311,7 +4319,8 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
return sky2_vpd_write(sky2->hw, cap, data, eeprom->offset, eeprom->len);
}
-static u32 sky2_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t sky2_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
const struct sky2_port *sky2 = netdev_priv(dev);
const struct sky2_hw *hw = sky2->hw;
@@ -4335,13 +4344,13 @@ static u32 sky2_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int sky2_set_features(struct net_device *dev, u32 features)
+static int sky2_set_features(struct net_device *dev, netdev_features_t features)
{
struct sky2_port *sky2 = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (changed & NETIF_F_RXCSUM) {
- u32 on = features & NETIF_F_RXCSUM;
+ bool on = features & NETIF_F_RXCSUM;
sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/Makefile b/drivers/net/ethernet/mellanox/mlx4/Makefile
index d1aa45a..4a40ab9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx4/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_MLX4_CORE) += mlx4_core.o
mlx4_core-y := alloc.o catas.o cmd.o cq.o eq.o fw.o icm.o intf.o main.o mcg.o \
- mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o
+ mr.o pd.o port.o profile.o qp.o reset.o sense.o srq.o resource_tracker.o
obj-$(CONFIG_MLX4_EN) += mlx4_en.o
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 45aea9c..915e947 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -48,7 +48,8 @@ static struct work_struct catas_work;
static int internal_err_reset = 1;
module_param(internal_err_reset, int, 0644);
MODULE_PARM_DESC(internal_err_reset,
- "Reset device on internal errors if non-zero (default 1)");
+ "Reset device on internal errors if non-zero"
+ " (default 1, in SRIOV mode default is 0)");
static void dump_err_buf(struct mlx4_dev *dev)
{
@@ -116,6 +117,10 @@ void mlx4_start_catas_poll(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
phys_addr_t addr;
+ /*If we are in SRIOV the default of the module param must be 0*/
+ if (mlx4_is_mfunc(dev))
+ internal_err_reset = 0;
+
INIT_LIST_HEAD(&priv->catas_err.list);
init_timer(&priv->catas_err.timer);
priv->catas_err.map = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 78f5a1a..eaf09d4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -39,12 +39,18 @@
#include <linux/errno.h>
#include <linux/mlx4/cmd.h>
+#include <linux/semaphore.h>
#include <asm/io.h>
#include "mlx4.h"
+#include "fw.h"
#define CMD_POLL_TOKEN 0xffff
+#define INBOX_MASK 0xffffffffffffff00ULL
+
+#define CMD_CHAN_VER 1
+#define CMD_CHAN_IF_REV 1
enum {
/* command completed successfully: */
@@ -110,8 +116,12 @@ struct mlx4_cmd_context {
int next;
u64 out_param;
u16 token;
+ u8 fw_status;
};
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr);
+
static int mlx4_status_to_errno(u8 status)
{
static const int trans_table[] = {
@@ -142,6 +152,139 @@ static int mlx4_status_to_errno(u8 status)
return trans_table[status];
}
+static u8 mlx4_errno_to_status(int errno)
+{
+ switch (errno) {
+ case -EPERM:
+ return CMD_STAT_BAD_OP;
+ case -EINVAL:
+ return CMD_STAT_BAD_PARAM;
+ case -ENXIO:
+ return CMD_STAT_BAD_SYS_STATE;
+ case -EBUSY:
+ return CMD_STAT_RESOURCE_BUSY;
+ case -ENOMEM:
+ return CMD_STAT_EXCEED_LIM;
+ case -ENFILE:
+ return CMD_STAT_ICM_ERROR;
+ default:
+ return CMD_STAT_INTERNAL_ERR;
+ }
+}
+
+static int comm_pending(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 status = readl(&priv->mfunc.comm->slave_read);
+
+ return (swab32(status) >> 31) != priv->cmd.comm_toggle;
+}
+
+static void mlx4_comm_cmd_post(struct mlx4_dev *dev, u8 cmd, u16 param)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u32 val;
+
+ priv->cmd.comm_toggle ^= 1;
+ val = param | (cmd << 16) | (priv->cmd.comm_toggle << 31);
+ __raw_writel((__force u32) cpu_to_be32(val),
+ &priv->mfunc.comm->slave_write);
+ mmiowb();
+}
+
+static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned long end;
+ int err = 0;
+ int ret_from_pending = 0;
+
+ /* First, verify that the master reports correct status */
+ if (comm_pending(dev)) {
+ mlx4_warn(dev, "Communication channel is not idle."
+ "my toggle is %d (cmd:0x%x)\n",
+ priv->cmd.comm_toggle, cmd);
+ return -EAGAIN;
+ }
+
+ /* Write command */
+ down(&priv->cmd.poll_sem);
+ mlx4_comm_cmd_post(dev, cmd, param);
+
+ end = msecs_to_jiffies(timeout) + jiffies;
+ while (comm_pending(dev) && time_before(jiffies, end))
+ cond_resched();
+ ret_from_pending = comm_pending(dev);
+ if (ret_from_pending) {
+ /* check if the slave is trying to boot in the middle of
+ * FLR process. The only non-zero result in the RESET command
+ * is MLX4_DELAY_RESET_SLAVE*/
+ if ((MLX4_COMM_CMD_RESET == cmd)) {
+ mlx4_warn(dev, "Got slave FLRed from Communication"
+ " channel (ret:0x%x)\n", ret_from_pending);
+ err = MLX4_DELAY_RESET_SLAVE;
+ } else {
+ mlx4_warn(dev, "Communication channel timed out\n");
+ err = -ETIMEDOUT;
+ }
+ }
+
+ up(&priv->cmd.poll_sem);
+ return err;
+}
+
+static int mlx4_comm_cmd_wait(struct mlx4_dev *dev, u8 op,
+ u16 param, unsigned long timeout)
+{
+ struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
+ struct mlx4_cmd_context *context;
+ int err = 0;
+
+ down(&cmd->event_sem);
+
+ spin_lock(&cmd->context_lock);
+ BUG_ON(cmd->free_head < 0);
+ context = &cmd->context[cmd->free_head];
+ context->token += cmd->token_mask + 1;
+ cmd->free_head = context->next;
+ spin_unlock(&cmd->context_lock);
+
+ init_completion(&context->done);
+
+ mlx4_comm_cmd_post(dev, op, param);
+
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = context->result;
+ if (err && context->fw_status != CMD_STAT_MULTI_FUNC_REQ) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
+ goto out;
+ }
+
+out:
+ spin_lock(&cmd->context_lock);
+ context->next = cmd->free_head;
+ cmd->free_head = context - cmd->context;
+ spin_unlock(&cmd->context_lock);
+
+ up(&cmd->event_sem);
+ return err;
+}
+
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout)
+{
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_comm_cmd_wait(dev, cmd, param, timeout);
+ return mlx4_comm_cmd_poll(dev, cmd, param, timeout);
+}
+
static int cmd_pending(struct mlx4_dev *dev)
{
u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
@@ -167,8 +310,10 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
while (cmd_pending(dev)) {
- if (time_after_eq(jiffies, end))
+ if (time_after_eq(jiffies, end)) {
+ mlx4_err(dev, "%s:cmd_pending failed\n", __func__);
goto out;
+ }
cond_resched();
}
@@ -192,7 +337,7 @@ static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
(cmd->toggle << HCR_T_BIT) |
(event ? (1 << HCR_E_BIT) : 0) |
(op_modifier << HCR_OPMOD_SHIFT) |
- op), hcr + 6);
+ op), hcr + 6);
/*
* Make sure that our HCR writes don't get mixed in with
@@ -209,6 +354,62 @@ out:
return ret;
}
+static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
+ int out_is_imm, u32 in_modifier, u8 op_modifier,
+ u16 op, unsigned long timeout)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_vhcr_cmd *vhcr = priv->mfunc.vhcr;
+ int ret;
+
+ down(&priv->cmd.slave_sem);
+ vhcr->in_param = cpu_to_be64(in_param);
+ vhcr->out_param = out_param ? cpu_to_be64(*out_param) : 0;
+ vhcr->in_modifier = cpu_to_be32(in_modifier);
+ vhcr->opcode = cpu_to_be16((((u16) op_modifier) << 12) | (op & 0xfff));
+ vhcr->token = cpu_to_be16(CMD_POLL_TOKEN);
+ vhcr->status = 0;
+ vhcr->flags = !!(priv->cmd.use_events) << 6;
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_master_process_vhcr(dev, dev->caps.function, vhcr);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ }
+ } else {
+ ret = mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_POST, 0,
+ MLX4_COMM_TIME + timeout);
+ if (!ret) {
+ if (out_is_imm) {
+ if (out_param)
+ *out_param =
+ be64_to_cpu(vhcr->out_param);
+ else {
+ mlx4_err(dev, "response expected while"
+ "output mailbox is NULL for "
+ "command 0x%x\n", op);
+ vhcr->status = CMD_STAT_BAD_PARAM;
+ }
+ }
+ ret = mlx4_status_to_errno(vhcr->status);
+ } else
+ mlx4_err(dev, "failed execution of VHCR_POST command"
+ "opcode 0x%x\n", op);
+ }
+ up(&priv->cmd.slave_sem);
+ return ret;
+}
+
static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
u16 op, unsigned long timeout)
@@ -217,6 +418,7 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
void __iomem *hcr = priv->cmd.hcr;
int err = 0;
unsigned long end;
+ u32 stat;
down(&priv->cmd.poll_sem);
@@ -240,9 +442,12 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
(u64) be32_to_cpu((__force __be32)
__raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
-
- err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
- __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
+ stat = be32_to_cpu((__force __be32)
+ __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24;
+ err = mlx4_status_to_errno(stat);
+ if (err)
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, stat);
out:
up(&priv->cmd.poll_sem);
@@ -259,6 +464,7 @@ void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
if (token != context->token)
return;
+ context->fw_status = status;
context->result = mlx4_status_to_errno(status);
context->out_param = out_param;
@@ -287,14 +493,18 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, context->token, 1);
- if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
+ if (!wait_for_completion_timeout(&context->done,
+ msecs_to_jiffies(timeout))) {
err = -EBUSY;
goto out;
}
err = context->result;
- if (err)
+ if (err) {
+ mlx4_err(dev, "command 0x%x failed: fw status = 0x%x\n",
+ op, context->fw_status);
goto out;
+ }
if (out_is_imm)
*out_param = context->out_param;
@@ -311,17 +521,1053 @@ out:
int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
int out_is_imm, u32 in_modifier, u8 op_modifier,
- u16 op, unsigned long timeout)
+ u16 op, unsigned long timeout, int native)
{
- if (mlx4_priv(dev)->cmd.use_events)
- return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
- else
- return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
- in_modifier, op_modifier, op, timeout);
+ if (!mlx4_is_mfunc(dev) || (native && mlx4_is_master(dev))) {
+ if (mlx4_priv(dev)->cmd.use_events)
+ return mlx4_cmd_wait(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ else
+ return mlx4_cmd_poll(dev, in_param, out_param,
+ out_is_imm, in_modifier,
+ op_modifier, op, timeout);
+ }
+ return mlx4_slave_cmd(dev, in_param, out_param, out_is_imm,
+ in_modifier, op_modifier, op, timeout);
}
EXPORT_SYMBOL_GPL(__mlx4_cmd);
+
+static int mlx4_ARM_COMM_CHANNEL(struct mlx4_dev *dev)
+{
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_ARM_COMM_CHANNEL,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+}
+
+static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
+ int slave, u64 slave_addr,
+ int size, int is_read)
+{
+ u64 in_param;
+ u64 out_param;
+
+ if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
+ (slave & ~0x7f) | (size & 0xff)) {
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
+ "master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
+ return -EINVAL;
+ }
+
+ if (is_read) {
+ in_param = (u64) slave | slave_addr;
+ out_param = (u64) dev->caps.function | master_addr;
+ } else {
+ in_param = (u64) dev->caps.function | master_addr;
+ out_param = (u64) slave | slave_addr;
+ }
+
+ return mlx4_cmd_imm(dev, in_param, &out_param, size, 0,
+ MLX4_CMD_ACCESS_MEM,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+}
+
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ in_param = cmd->has_inbox ? (u64) inbox->dma : vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma : vhcr->out_param;
+ if (cmd->encode_slave_id) {
+ in_param &= 0xffffffffffffff00ll;
+ in_param |= slave;
+ }
+
+ err = __mlx4_cmd(dev, in_param, &out_param, cmd->out_is_imm,
+ vhcr->in_modifier, vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm)
+ vhcr->out_param = out_param;
+
+ return err;
+}
+
+static struct mlx4_cmd_info cmd_info[] = {
+ {
+ .opcode = MLX4_CMD_QUERY_FW,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_HCA,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_DEV_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_FUNC_CAP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_FUNC_CAP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_ADAPTER,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_INIT_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_CLOSE_PORT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_CLOSE_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_PORT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_PORT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_PORT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MAP_EQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MAP_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_EQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW_HEALTH_CHECK,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_NOP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_ALLOC_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ALLOC_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_FREE_RES,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_FREE_RES_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_MPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_MPT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_MPT,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_MPT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_READ_MTT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_WRITE_MTT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_WRITE_MTT_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SYNC_TPT,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_EQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_EQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_CQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_CQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_MODIFY_CQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = true,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_MODIFY_CQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SW2HW_SRQ,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_SW2HW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_HW2SW_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_HW2SW_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_SRQ,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_ARM_SRQ,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_ARM_SRQ_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RST2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = true,
+ .verify = NULL,
+ .wrapper = mlx4_RST2INIT_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2INIT_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INIT2RTR_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_INIT2RTR_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQERR2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2ERR_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_RTS2SQD_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2SQD_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SQD2RTS_QP,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_2RST_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_2RST_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_QP,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_UNSUSPEND_QP,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_GEN_QP_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_QUERY_IF_STAT,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QUERY_IF_STAT_wrapper
+ },
+ /* Native multicast commands are not available for guests */
+ {
+ .opcode = MLX4_CMD_QP_ATTACH,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_QP_ATTACH_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_PROMISC,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_PROMISC_wrapper
+ },
+ /* Ethernet specific commands */
+ {
+ .opcode = MLX4_CMD_SET_VLAN_FLTR,
+ .has_inbox = true,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_VLAN_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_SET_MCAST_FLTR,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_SET_MCAST_FLTR_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_DUMP_ETH_STATS,
+ .has_inbox = false,
+ .has_outbox = true,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = mlx4_DUMP_ETH_STATS_wrapper
+ },
+ {
+ .opcode = MLX4_CMD_INFORM_FLR_DONE,
+ .has_inbox = false,
+ .has_outbox = false,
+ .out_is_imm = false,
+ .encode_slave_id = false,
+ .verify = NULL,
+ .wrapper = NULL
+ },
+};
+
+static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr_cmd *in_vhcr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cmd_info *cmd = NULL;
+ struct mlx4_vhcr_cmd *vhcr_cmd = in_vhcr ? in_vhcr : priv->mfunc.vhcr;
+ struct mlx4_vhcr *vhcr;
+ struct mlx4_cmd_mailbox *inbox = NULL;
+ struct mlx4_cmd_mailbox *outbox = NULL;
+ u64 in_param;
+ u64 out_param;
+ int ret = 0;
+ int i;
+ int err = 0;
+
+ /* Create sw representation of Virtual HCR */
+ vhcr = kzalloc(sizeof(struct mlx4_vhcr), GFP_KERNEL);
+ if (!vhcr)
+ return -ENOMEM;
+
+ /* DMA in the vHCR */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr_cmd),
+ MLX4_ACCESS_MEM_ALIGN), 1);
+ if (ret) {
+ mlx4_err(dev, "%s:Failed reading vhcr"
+ "ret: 0x%x\n", __func__, ret);
+ kfree(vhcr);
+ return ret;
+ }
+ }
+
+ /* Fill SW VHCR fields */
+ vhcr->in_param = be64_to_cpu(vhcr_cmd->in_param);
+ vhcr->out_param = be64_to_cpu(vhcr_cmd->out_param);
+ vhcr->in_modifier = be32_to_cpu(vhcr_cmd->in_modifier);
+ vhcr->token = be16_to_cpu(vhcr_cmd->token);
+ vhcr->op = be16_to_cpu(vhcr_cmd->opcode) & 0xfff;
+ vhcr->op_modifier = (u8) (be16_to_cpu(vhcr_cmd->opcode) >> 12);
+ vhcr->e_bit = vhcr_cmd->flags & (1 << 6);
+
+ /* Lookup command */
+ for (i = 0; i < ARRAY_SIZE(cmd_info); ++i) {
+ if (vhcr->op == cmd_info[i].opcode) {
+ cmd = &cmd_info[i];
+ break;
+ }
+ }
+ if (!cmd) {
+ mlx4_err(dev, "Unknown command:0x%x accepted from slave:%d\n",
+ vhcr->op, slave);
+ vhcr_cmd->status = CMD_STAT_BAD_PARAM;
+ goto out_status;
+ }
+
+ /* Read inbox */
+ if (cmd->has_inbox) {
+ vhcr->in_param &= INBOX_MASK;
+ inbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(inbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ inbox = NULL;
+ goto out_status;
+ }
+
+ if (mlx4_ACCESS_MEM(dev, inbox->dma, slave,
+ vhcr->in_param,
+ MLX4_MAILBOX_SIZE, 1)) {
+ mlx4_err(dev, "%s: Failed reading inbox (cmd:0x%x)\n",
+ __func__, cmd->opcode);
+ vhcr_cmd->status = CMD_STAT_INTERNAL_ERR;
+ goto out_status;
+ }
+ }
+
+ /* Apply permission and bound checks if applicable */
+ if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
+ "checks for resource_id:%d\n", vhcr->op, slave,
+ vhcr->in_modifier);
+ vhcr_cmd->status = CMD_STAT_BAD_OP;
+ goto out_status;
+ }
+
+ /* Allocate outbox */
+ if (cmd->has_outbox) {
+ outbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(outbox)) {
+ vhcr_cmd->status = CMD_STAT_BAD_SIZE;
+ outbox = NULL;
+ goto out_status;
+ }
+ }
+
+ /* Execute the command! */
+ if (cmd->wrapper) {
+ err = cmd->wrapper(dev, slave, vhcr, inbox, outbox,
+ cmd);
+ if (cmd->out_is_imm)
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ } else {
+ in_param = cmd->has_inbox ? (u64) inbox->dma :
+ vhcr->in_param;
+ out_param = cmd->has_outbox ? (u64) outbox->dma :
+ vhcr->out_param;
+ err = __mlx4_cmd(dev, in_param, &out_param,
+ cmd->out_is_imm, vhcr->in_modifier,
+ vhcr->op_modifier, vhcr->op,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+
+ if (cmd->out_is_imm) {
+ vhcr->out_param = out_param;
+ vhcr_cmd->out_param = cpu_to_be64(vhcr->out_param);
+ }
+ }
+
+ if (err) {
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
+ " error:%d, status %d\n",
+ vhcr->op, slave, vhcr->errno, err);
+ vhcr_cmd->status = mlx4_errno_to_status(err);
+ goto out_status;
+ }
+
+
+ /* Write outbox if command completed successfully */
+ if (cmd->has_outbox && !vhcr_cmd->status) {
+ ret = mlx4_ACCESS_MEM(dev, outbox->dma, slave,
+ vhcr->out_param,
+ MLX4_MAILBOX_SIZE, MLX4_CMD_WRAPPED);
+ if (ret) {
+ /* If we failed to write back the outbox after the
+ *command was successfully executed, we must fail this
+ * slave, as it is now in undefined state */
+ mlx4_err(dev, "%s:Failed writing outbox\n", __func__);
+ goto out;
+ }
+ }
+
+out_status:
+ /* DMA back vhcr result */
+ if (!in_vhcr) {
+ ret = mlx4_ACCESS_MEM(dev, priv->mfunc.vhcr_dma, slave,
+ priv->mfunc.master.slave_state[slave].vhcr_dma,
+ ALIGN(sizeof(struct mlx4_vhcr),
+ MLX4_ACCESS_MEM_ALIGN),
+ MLX4_CMD_WRAPPED);
+ if (ret)
+ mlx4_err(dev, "%s:Failed writing vhcr result\n",
+ __func__);
+ else if (vhcr->e_bit &&
+ mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
+ mlx4_warn(dev, "Failed to generate command completion "
+ "eqe for slave %d\n", slave);
+ }
+
+out:
+ kfree(vhcr);
+ mlx4_free_cmd_mailbox(dev, inbox);
+ mlx4_free_cmd_mailbox(dev, outbox);
+ return ret;
+}
+
+static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
+ u16 param, u8 toggle)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ u32 reply;
+ u32 slave_status = 0;
+ u8 is_going_down = 0;
+ int i;
+
+ slave_state[slave].comm_toggle ^= 1;
+ reply = (u32) slave_state[slave].comm_toggle << 31;
+ if (toggle != slave_state[slave].comm_toggle) {
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
+ "STATE COMPROMISIED ***\n", toggle, slave);
+ goto reset_slave;
+ }
+ if (cmd == MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "Received reset from slave:%d\n", slave);
+ slave_state[slave].active = false;
+ for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
+ slave_state[slave].event_eq[i].eqn = -1;
+ slave_state[slave].event_eq[i].token = 0;
+ }
+ /*check if we are in the middle of FLR process,
+ if so return "retry" status to the slave*/
+ if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ slave_status = MLX4_DELAY_RESET_SLAVE;
+ goto inform_slave_state;
+ }
+
+ /* write the version in the event field */
+ reply |= mlx4_comm_get_version();
+
+ goto reset_slave;
+ }
+ /*command from slave in the middle of FLR*/
+ if (cmd != MLX4_COMM_CMD_RESET &&
+ MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
+ "in the middle of FLR\n", slave, cmd);
+ return;
+ }
+
+ switch (cmd) {
+ case MLX4_COMM_CMD_VHCR0:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_RESET)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma = ((u64) param) << 48;
+ priv->mfunc.master.slave_state[slave].cookie = 0;
+ mutex_init(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ break;
+ case MLX4_COMM_CMD_VHCR1:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR0)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 32;
+ break;
+ case MLX4_COMM_CMD_VHCR2:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR1)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= ((u64) param) << 16;
+ break;
+ case MLX4_COMM_CMD_VHCR_EN:
+ if (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR2)
+ goto reset_slave;
+ slave_state[slave].vhcr_dma |= param;
+ slave_state[slave].active = true;
+ break;
+ case MLX4_COMM_CMD_VHCR_POST:
+ if ((slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_EN) &&
+ (slave_state[slave].last_cmd != MLX4_COMM_CMD_VHCR_POST))
+ goto reset_slave;
+ down(&priv->cmd.slave_sem);
+ if (mlx4_master_process_vhcr(dev, slave, NULL)) {
+ mlx4_err(dev, "Failed processing vhcr for slave:%d,"
+ " reseting slave.\n", slave);
+ up(&priv->cmd.slave_sem);
+ goto reset_slave;
+ }
+ up(&priv->cmd.slave_sem);
+ break;
+ default:
+ mlx4_warn(dev, "Bad comm cmd:%d from slave:%d\n", cmd, slave);
+ goto reset_slave;
+ }
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = cmd;
+ else
+ is_going_down = 1;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ if (is_going_down) {
+ mlx4_warn(dev, "Slave is going down aborting command(%d)"
+ " executing from slave:%d\n",
+ cmd, slave);
+ return;
+ }
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ mmiowb();
+
+ return;
+
+reset_slave:
+ /* cleanup any slave resources */
+ mlx4_delete_all_resources_for_slave(dev, slave);
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (!slave_state[slave].is_slave_going_down)
+ slave_state[slave].last_cmd = MLX4_COMM_CMD_RESET;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*with slave in the middle of flr, no need to clean resources again.*/
+inform_slave_state:
+ memset(&slave_state[slave].event_eq, 0,
+ sizeof(struct mlx4_slave_event_eq_info));
+ __raw_writel((__force u32) cpu_to_be32(reply),
+ &priv->mfunc.comm[slave].slave_read);
+ wmb();
+}
+
+/* master command processing */
+void mlx4_master_comm_channel(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work,
+ struct mlx4_mfunc_master_ctx,
+ comm_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ __be32 *bit_vec;
+ u32 comm_cmd;
+ u32 vec;
+ int i, j, slave;
+ int toggle;
+ int served = 0;
+ int reported = 0;
+ u32 slt;
+
+ bit_vec = master->comm_arm_bit_vector;
+ for (i = 0; i < COMM_CHANNEL_BIT_ARRAY_SIZE; i++) {
+ vec = be32_to_cpu(bit_vec[i]);
+ for (j = 0; j < 32; j++) {
+ if (!(vec & (1 << j)))
+ continue;
+ ++reported;
+ slave = (i * 32) + j;
+ comm_cmd = swab32(readl(
+ &mfunc->comm[slave].slave_write));
+ slt = swab32(readl(&mfunc->comm[slave].slave_read))
+ >> 31;
+ toggle = comm_cmd >> 31;
+ if (toggle != slt) {
+ if (master->slave_state[slave].comm_toggle
+ != slt) {
+ printk(KERN_INFO "slave %d out of sync."
+ " read toggle %d, state toggle %d. "
+ "Resynching.\n", slave, slt,
+ master->slave_state[slave].comm_toggle);
+ master->slave_state[slave].comm_toggle =
+ slt;
+ }
+ mlx4_master_do_cmd(dev, slave,
+ comm_cmd >> 16 & 0xff,
+ comm_cmd & 0xffff, toggle);
+ ++served;
+ }
+ }
+ }
+
+ if (reported && reported != served)
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves"
+ " but %d were served\n",
+ reported, served);
+
+ if (mlx4_ARM_COMM_CHANNEL(dev))
+ mlx4_warn(dev, "Failed to arm comm channel events\n");
+}
+
+static int sync_toggles(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int wr_toggle;
+ int rd_toggle;
+ unsigned long end;
+
+ wr_toggle = swab32(readl(&priv->mfunc.comm->slave_write)) >> 31;
+ end = jiffies + msecs_to_jiffies(5000);
+
+ while (time_before(jiffies, end)) {
+ rd_toggle = swab32(readl(&priv->mfunc.comm->slave_read)) >> 31;
+ if (rd_toggle == wr_toggle) {
+ priv->cmd.comm_toggle = rd_toggle;
+ return 0;
+ }
+
+ cond_resched();
+ }
+
+ /*
+ * we could reach here if for example the previous VM using this
+ * function misbehaved and left the channel with unsynced state. We
+ * should fix this here and give this VM a chance to use a properly
+ * synced channel
+ */
+ mlx4_warn(dev, "recovering from previously mis-behaved VM\n");
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_read);
+ __raw_writel((__force u32) 0, &priv->mfunc.comm->slave_write);
+ priv->cmd.comm_toggle = 0;
+
+ return 0;
+}
+
+int mlx4_multi_func_init(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i, j, err, port;
+
+ priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ &priv->mfunc.vhcr_dma,
+ GFP_KERNEL);
+ if (!priv->mfunc.vhcr) {
+ mlx4_err(dev, "Couldn't allocate vhcr.\n");
+ return -ENOMEM;
+ }
+
+ if (mlx4_is_master(dev))
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, priv->fw.comm_bar) +
+ priv->fw.comm_base, MLX4_COMM_PAGESIZE);
+ else
+ priv->mfunc.comm =
+ ioremap(pci_resource_start(dev->pdev, 2) +
+ MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
+ if (!priv->mfunc.comm) {
+ mlx4_err(dev, "Couldn't map communication vector.\n");
+ goto err_vhcr;
+ }
+
+ if (mlx4_is_master(dev)) {
+ priv->mfunc.master.slave_state =
+ kzalloc(dev->num_slaves *
+ sizeof(struct mlx4_slave_state), GFP_KERNEL);
+ if (!priv->mfunc.master.slave_state)
+ goto err_comm;
+
+ for (i = 0; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ s_state->last_cmd = MLX4_COMM_CMD_RESET;
+ for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
+ s_state->event_eq[j].eqn = -1;
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_write);
+ __raw_writel((__force u32) 0,
+ &priv->mfunc.comm[i].slave_read);
+ mmiowb();
+ for (port = 1; port <= MLX4_MAX_PORTS; port++) {
+ s_state->vlan_filter[port] =
+ kzalloc(sizeof(struct mlx4_vlan_fltr),
+ GFP_KERNEL);
+ if (!s_state->vlan_filter[port]) {
+ if (--port)
+ kfree(s_state->vlan_filter[port]);
+ goto err_slaves;
+ }
+ INIT_LIST_HEAD(&s_state->mcast_filters[port]);
+ }
+ spin_lock_init(&s_state->lock);
+ }
+
+ memset(&priv->mfunc.master.cmd_eqe, 0, sizeof(struct mlx4_eqe));
+ priv->mfunc.master.cmd_eqe.type = MLX4_EVENT_TYPE_CMD;
+ INIT_WORK(&priv->mfunc.master.comm_work,
+ mlx4_master_comm_channel);
+ INIT_WORK(&priv->mfunc.master.slave_event_work,
+ mlx4_gen_slave_eqe);
+ INIT_WORK(&priv->mfunc.master.slave_flr_event_work,
+ mlx4_master_handle_slave_flr);
+ spin_lock_init(&priv->mfunc.master.slave_state_lock);
+ priv->mfunc.master.comm_wq =
+ create_singlethread_workqueue("mlx4_comm");
+ if (!priv->mfunc.master.comm_wq)
+ goto err_slaves;
+
+ if (mlx4_init_resource_tracker(dev))
+ goto err_thread;
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ err = mlx4_ARM_COMM_CHANNEL(dev);
+ if (err) {
+ mlx4_err(dev, " Failed to arm comm channel eq: %x\n",
+ err);
+ goto err_resource;
+ }
+
+ } else {
+ err = sync_toggles(dev);
+ if (err) {
+ mlx4_err(dev, "Couldn't sync toggles\n");
+ goto err_comm;
+ }
+
+ sema_init(&priv->cmd.slave_sem, 1);
+ }
+ return 0;
+
+err_resource:
+ mlx4_free_resource_tracker(dev);
+err_thread:
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+err_slaves:
+ while (--i) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
+err_comm:
+ iounmap(priv->mfunc.comm);
+err_vhcr:
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr,
+ priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
+ return -ENOMEM;
+}
+
int mlx4_cmd_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -331,22 +1577,51 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.use_events = 0;
priv->cmd.toggle = 1;
- priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
- MLX4_HCR_SIZE);
- if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.");
- return -ENOMEM;
+ priv->cmd.hcr = NULL;
+ priv->mfunc.vhcr = NULL;
+
+ if (!mlx4_is_slave(dev)) {
+ priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
+ MLX4_HCR_BASE, MLX4_HCR_SIZE);
+ if (!priv->cmd.hcr) {
+ mlx4_err(dev, "Couldn't map command register.\n");
+ return -ENOMEM;
+ }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE,
MLX4_MAILBOX_SIZE, 0);
- if (!priv->cmd.pool) {
+ if (!priv->cmd.pool)
+ goto err_hcr;
+
+ return 0;
+
+err_hcr:
+ if (!mlx4_is_slave(dev))
iounmap(priv->cmd.hcr);
- return -ENOMEM;
+ return -ENOMEM;
+}
+
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i, port;
+
+ if (mlx4_is_master(dev)) {
+ flush_workqueue(priv->mfunc.master.comm_wq);
+ destroy_workqueue(priv->mfunc.master.comm_wq);
+ for (i = 0; i < dev->num_slaves; i++) {
+ for (port = 1; port <= MLX4_MAX_PORTS; port++)
+ kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
+ }
+ kfree(priv->mfunc.master.slave_state);
}
- return 0;
+ iounmap(priv->mfunc.comm);
+ dma_free_coherent(&(dev->pdev->dev), PAGE_SIZE,
+ priv->mfunc.vhcr, priv->mfunc.vhcr_dma);
+ priv->mfunc.vhcr = NULL;
}
void mlx4_cmd_cleanup(struct mlx4_dev *dev)
@@ -354,7 +1629,9 @@ void mlx4_cmd_cleanup(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
pci_pool_destroy(priv->cmd.pool);
- iounmap(priv->cmd.hcr);
+
+ if (!mlx4_is_slave(dev))
+ iounmap(priv->cmd.hcr);
}
/*
@@ -365,6 +1642,7 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ int err = 0;
priv->cmd.context = kmalloc(priv->cmd.max_cmds *
sizeof (struct mlx4_cmd_context),
@@ -389,11 +1667,10 @@ int mlx4_cmd_use_events(struct mlx4_dev *dev)
; /* nothing */
--priv->cmd.token_mask;
- priv->cmd.use_events = 1;
-
down(&priv->cmd.poll_sem);
+ priv->cmd.use_events = 1;
- return 0;
+ return err;
}
/*
@@ -433,7 +1710,8 @@ struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
}
EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
-void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
+void mlx4_free_cmd_mailbox(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox)
{
if (!mailbox)
return;
@@ -442,3 +1720,8 @@ void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbo
kfree(mailbox);
}
EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);
+
+u32 mlx4_comm_get_version(void)
+{
+ return ((u32) CMD_CHAN_IF_REV << 8) | (u32) CMD_CHAN_VER;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 499a516..7e64033 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -34,9 +34,9 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/hardirq.h>
#include <linux/export.h>
-#include <linux/gfp.h>
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/cq.h>
@@ -44,27 +44,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_cq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- __be32 logsize_usrpage;
- __be16 cq_period;
- __be16 cq_max_count;
- u8 reserved2[3];
- u8 comp_eqn;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 last_notified_index;
- __be32 solicit_producer_index;
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved4[2];
- __be64 db_rec_addr;
-};
-
#define MLX4_CQ_STATUS_OK ( 0 << 28)
#define MLX4_CQ_STATUS_OVERFLOW ( 9 << 28)
#define MLX4_CQ_STATUS_WRITE_FAIL (10 << 28)
@@ -81,7 +60,7 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
if (!cq) {
- mlx4_warn(dev, "Completion event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
@@ -117,23 +96,24 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd(dev, mailbox->dma, cq_num, 0, MLX4_CMD_SW2HW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
+ MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num, u32 opmod)
{
return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_MODIFY_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int cq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, cq_num,
- mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
+ cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
int mlx4_cq_modify(struct mlx4_dev *dev, struct mlx4_cq *cq,
@@ -188,6 +168,78 @@ int mlx4_cq_resize(struct mlx4_dev *dev, struct mlx4_cq *cq,
}
EXPORT_SYMBOL_GPL(mlx4_cq_resize);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+ int err;
+
+ *cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
+ if (*cqn == -1)
+ return -ENOMEM;
+
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
+ if (err)
+ goto err_out;
+
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
+ if (err)
+ goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &cq_table->table, *cqn);
+
+err_out:
+ mlx4_bitmap_free(&cq_table->bitmap, *cqn);
+ return err;
+}
+
+static int mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_CQ,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+ else {
+ *cqn = get_param_l(&out_param);
+ return 0;
+ }
+ }
+ return __mlx4_cq_alloc_icm(dev, cqn);
+}
+
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_cq_table *cq_table = &priv->cq_table;
+
+ mlx4_table_put(dev, &cq_table->cmpt_table, cqn);
+ mlx4_table_put(dev, &cq_table->table, cqn);
+ mlx4_bitmap_free(&cq_table->bitmap, cqn);
+}
+
+static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cqn);
+ err = mlx4_cmd(dev, in_param, RES_CQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed freeing cq:%d\n", cqn);
+ } else
+ __mlx4_cq_free_icm(dev, cqn);
+}
+
int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq,
unsigned vector, int collapsed)
@@ -204,23 +256,15 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt,
cq->vector = vector;
- cq->cqn = mlx4_bitmap_alloc(&cq_table->bitmap);
- if (cq->cqn == -1)
- return -ENOMEM;
-
- err = mlx4_table_get(dev, &cq_table->table, cq->cqn);
- if (err)
- goto err_out;
-
- err = mlx4_table_get(dev, &cq_table->cmpt_table, cq->cqn);
+ err = mlx4_cq_alloc_icm(dev, &cq->cqn);
if (err)
- goto err_put;
+ return err;
spin_lock_irq(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
spin_unlock_irq(&cq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -259,14 +303,8 @@ err_radix:
radix_tree_delete(&cq_table->tree, cq->cqn);
spin_unlock_irq(&cq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &cq_table->cmpt_table, cq->cqn);
-
-err_put:
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
-
-err_out:
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+err_icm:
+ mlx4_cq_free_icm(dev, cq->cqn);
return err;
}
@@ -292,8 +330,7 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
complete(&cq->free);
wait_for_completion(&cq->free);
- mlx4_table_put(dev, &cq_table->table, cq->cqn);
- mlx4_bitmap_free(&cq_table->bitmap, cq->cqn);
+ mlx4_cq_free_icm(dev, cq->cqn);
}
EXPORT_SYMBOL_GPL(mlx4_cq_free);
@@ -304,6 +341,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
spin_lock_init(&cq_table->lock);
INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&cq_table->bitmap, dev->caps.num_cqs,
dev->caps.num_cqs - 1, dev->caps.reserved_cqs, 0);
@@ -315,6 +354,8 @@ int mlx4_init_cq_table(struct mlx4_dev *dev)
void mlx4_cleanup_cq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
/* Nothing to do to clean up radix_tree */
mlx4_bitmap_cleanup(&mlx4_priv(dev)->cq_table.bitmap);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 5829e0b..00b8127 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -51,10 +51,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
int err;
cq->size = entries;
- if (mode == RX)
- cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
- else
- cq->buf_size = sizeof(struct mlx4_cqe);
+ cq->buf_size = cq->size * sizeof(struct mlx4_cqe);
cq->ring = ring;
cq->is_tx = mode;
@@ -120,7 +117,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
cq->size = priv->rx_ring[cq->ring].actual_size;
err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar,
- cq->wqres.db.dma, &cq->mcq, cq->vector, cq->is_tx);
+ cq->wqres.db.dma, &cq->mcq, cq->vector, 0);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 74e2a2a..70346fd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -45,13 +45,16 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- strncpy(drvinfo->driver, DRV_NAME, 32);
- strncpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")", 32);
- sprintf(drvinfo->fw_version, "%d.%d.%d",
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION " (" DRV_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
(u16) (mdev->dev->caps.fw_ver >> 32),
(u16) ((mdev->dev->caps.fw_ver >> 16) & 0xffff),
(u16) (mdev->dev->caps.fw_ver & 0xffff));
- strncpy(drvinfo->bus_info, pci_name(mdev->dev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(mdev->dev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
@@ -103,8 +106,17 @@ static void mlx4_en_get_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
int err = 0;
u64 config = 0;
+ u64 mask;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL)) {
+ if ((priv->port < 1) || (priv->port > 2)) {
+ en_err(priv, "Failed to get WoL information\n");
+ return;
+ }
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+ if (!(priv->mdev->dev->caps.flags & mask)) {
wol->supported = 0;
wol->wolopts = 0;
return;
@@ -133,8 +145,15 @@ static int mlx4_en_set_wol(struct net_device *netdev,
struct mlx4_en_priv *priv = netdev_priv(netdev);
u64 config = 0;
int err = 0;
+ u64 mask;
- if (!(priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_WOL))
+ if ((priv->port < 1) || (priv->port > 2))
+ return -EOPNOTSUPP;
+
+ mask = (priv->port == 1) ? MLX4_DEV_CAP_FLAG_WOL_PORT1 :
+ MLX4_DEV_CAP_FLAG_WOL_PORT2;
+
+ if (!(priv->mdev->dev->caps.flags & mask))
return -EOPNOTSUPP;
if (wol->supported & ~WAKE_MAGIC)
@@ -164,10 +183,11 @@ static int mlx4_en_set_wol(struct net_device *netdev,
static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
+ int bit_count = hweight64(priv->stats_bitmap);
switch (sset) {
case ETH_SS_STATS:
- return NUM_ALL_STATS +
+ return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
(priv->tx_ring_num + priv->rx_ring_num) * 2;
case ETH_SS_TEST:
return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
@@ -182,14 +202,34 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
int index = 0;
- int i;
+ int i, j = 0;
spin_lock_bh(&priv->stats_lock);
- for (i = 0; i < NUM_MAIN_STATS; i++)
- data[index++] = ((unsigned long *) &priv->stats)[i];
- for (i = 0; i < NUM_PORT_STATS; i++)
- data[index++] = ((unsigned long *) &priv->port_stats)[i];
+ if (!(priv->stats_bitmap)) {
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ data[index++] =
+ ((unsigned long *) &priv->stats)[i];
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ data[index++] =
+ ((unsigned long *) &priv->port_stats)[i];
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ data[index++] =
+ ((unsigned long *) &priv->pkstats)[i];
+ } else {
+ for (i = 0; i < NUM_MAIN_STATS; i++) {
+ if ((priv->stats_bitmap >> j) & 1)
+ data[index++] =
+ ((unsigned long *) &priv->stats)[i];
+ j++;
+ }
+ for (i = 0; i < NUM_PORT_STATS; i++) {
+ if ((priv->stats_bitmap >> j) & 1)
+ data[index++] =
+ ((unsigned long *) &priv->port_stats)[i];
+ j++;
+ }
+ }
for (i = 0; i < priv->tx_ring_num; i++) {
data[index++] = priv->tx_ring[i].packets;
data[index++] = priv->tx_ring[i].bytes;
@@ -198,8 +238,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
data[index++] = priv->rx_ring[i].packets;
data[index++] = priv->rx_ring[i].bytes;
}
- for (i = 0; i < NUM_PKT_STATS; i++)
- data[index++] = ((unsigned long *) &priv->pkstats)[i];
spin_unlock_bh(&priv->stats_lock);
}
@@ -228,11 +266,29 @@ static void mlx4_en_get_strings(struct net_device *dev,
case ETH_SS_STATS:
/* Add main counters */
- for (i = 0; i < NUM_MAIN_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
- for (i = 0; i< NUM_PORT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i + NUM_MAIN_STATS]);
+ if (!priv->stats_bitmap) {
+ for (i = 0; i < NUM_MAIN_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i]);
+ for (i = 0; i < NUM_PORT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i +
+ NUM_MAIN_STATS]);
+ for (i = 0; i < NUM_PKT_STATS; i++)
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[i +
+ NUM_MAIN_STATS +
+ NUM_PORT_STATS]);
+ } else
+ for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
+ if ((priv->stats_bitmap >> i) & 1) {
+ strcpy(data +
+ (index++) * ETH_GSTRING_LEN,
+ main_strings[i]);
+ }
+ if (!(priv->stats_bitmap >> i))
+ break;
+ }
for (i = 0; i < priv->tx_ring_num; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
@@ -245,9 +301,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
sprintf(data + (index++) * ETH_GSTRING_LEN,
"rx%d_bytes", i);
}
- for (i = 0; i< NUM_PKT_STATS; i++)
- strcpy(data + (index++) * ETH_GSTRING_LEN,
- main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
break;
}
}
@@ -460,6 +513,95 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
param->tx_pending = priv->tx_ring[0].size;
}
+static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+
+ return priv->rx_ring_num;
+}
+
+static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+ int rss_rings;
+ size_t n = priv->rx_ring_num;
+ int err = 0;
+
+ rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
+
+ while (n--) {
+ ring_index[n] = rss_map->qps[n % rss_rings].qpn -
+ rss_map->base_qpn;
+ }
+
+ return err;
+}
+
+static int mlx4_en_set_rxfh_indir(struct net_device *dev,
+ const u32 *ring_index)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int port_up = 0;
+ int err = 0;
+ int i;
+ int rss_rings = 0;
+
+ /* Calculate RSS table size and make sure flows are spread evenly
+ * between rings
+ */
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ if (i > 0 && !ring_index[i] && !rss_rings)
+ rss_rings = i;
+
+ if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
+ return -EINVAL;
+ }
+
+ if (!rss_rings)
+ rss_rings = priv->rx_ring_num;
+
+ /* RSS table size must be an order of 2 */
+ if (!is_power_of_2(rss_rings))
+ return -EINVAL;
+
+ mutex_lock(&mdev->state_lock);
+ if (priv->port_up) {
+ port_up = 1;
+ mlx4_en_stop_port(dev);
+ }
+
+ priv->prof->rss_rings = rss_rings;
+
+ if (port_up) {
+ err = mlx4_en_start_port(dev);
+ if (err)
+ en_err(priv, "Failed starting port\n");
+ }
+
+ mutex_unlock(&mdev->state_lock);
+ return err;
+}
+
+static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->rx_ring_num;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_drvinfo = mlx4_en_get_drvinfo,
.get_settings = mlx4_en_get_settings,
@@ -479,6 +621,10 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.set_pauseparam = mlx4_en_set_pauseparam,
.get_ringparam = mlx4_en_get_ringparam,
.set_ringparam = mlx4_en_set_ringparam,
+ .get_rxnfc = mlx4_en_get_rxnfc,
+ .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
+ .get_rxfh_indir = mlx4_en_get_rxfh_indir,
+ .set_rxfh_indir = mlx4_en_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index a06096f..2097a7d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -62,10 +62,6 @@ static const char mlx4_en_version[] =
* Device scope module parameters
*/
-
-/* Enable RSS TCP traffic */
-MLX4_EN_PARM_INT(tcp_rss, 1,
- "Enable RSS for incomming TCP traffic or disabled (0)");
/* Enable RSS UDP traffic */
MLX4_EN_PARM_INT(udp_rss, 1,
"Enable RSS for incomming UDP traffic or disabled (0)");
@@ -104,7 +100,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
struct mlx4_en_profile *params = &mdev->profile;
int i;
- params->tcp_rss = tcp_rss;
params->udp_rss = udp_rss;
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
@@ -120,6 +115,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
(!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+ params->prof[i].rss_rings = 0;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 78d776b..149e60d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -45,7 +45,7 @@
#include "mlx4_en.h"
#include "en_port.h"
-static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -67,9 +67,10 @@ static void mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "failed adding vlan %d\n", vid);
mutex_unlock(&mdev->state_lock);
+ return 0;
}
-static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -93,6 +94,8 @@ static void mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
en_err(priv, "Failed configuring VLAN filter\n");
}
mutex_unlock(&mdev->state_lock);
+
+ return 0;
}
u64 mlx4_en_mac_to_u64(u8 *addr)
@@ -133,7 +136,7 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
if (priv->port_up) {
/* Remove old MAC and insert the new one */
err = mlx4_replace_mac(mdev->dev, priv->port,
- priv->base_qpn, priv->mac, 0);
+ priv->base_qpn, priv->mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
} else
@@ -148,6 +151,7 @@ static void mlx4_en_clear_list(struct net_device *dev)
struct mlx4_en_priv *priv = netdev_priv(dev);
kfree(priv->mc_addrs);
+ priv->mc_addrs = NULL;
priv->mc_addrs_cnt = 0;
}
@@ -167,6 +171,7 @@ static void mlx4_en_cache_mclist(struct net_device *dev)
i = 0;
netdev_for_each_mc_addr(ha, dev)
memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
+ mlx4_en_clear_list(dev);
priv->mc_addrs = mc_addrs;
priv->mc_addrs_cnt = mc_addrs_cnt;
}
@@ -204,6 +209,16 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
goto out;
}
+ if (!netif_carrier_ok(dev)) {
+ if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
+ if (priv->port_state.link_state) {
+ priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
+ netif_carrier_on(dev);
+ en_dbg(LINK, priv, "Link Up\n");
+ }
+ }
+ }
+
/*
* Promsicuous mode: disable all filters
*/
@@ -599,12 +614,12 @@ int mlx4_en_start_port(struct net_device *dev)
++rx_index;
}
- /* Set port mac number */
- en_dbg(DRV, priv, "Setting mac for port %d\n", priv->port);
- err = mlx4_register_mac(mdev->dev, priv->port,
- priv->mac, &priv->base_qpn, 0);
+ /* Set qp number */
+ en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
+ err = mlx4_get_eth_qp(mdev->dev, priv->port,
+ priv->mac, &priv->base_qpn);
if (err) {
- en_err(priv, "Failed setting port mac\n");
+ en_err(priv, "Failed getting eth qp\n");
goto cq_err;
}
mdev->mac_removed[priv->port] = 0;
@@ -687,6 +702,8 @@ int mlx4_en_start_port(struct net_device *dev)
/* Schedule multicast task to populate multicast list */
queue_work(mdev->workqueue, &priv->mcast_task);
+ mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+
priv->port_up = true;
netif_tx_start_all_queues(dev);
return 0;
@@ -699,7 +716,7 @@ tx_err:
mlx4_en_release_rss_steer(priv);
mac_err:
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
cq_err:
while (rx_index--)
mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
@@ -745,10 +762,6 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Flush multicast filter */
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
- /* Unregister Mac address for the port */
- mlx4_unregister_mac(mdev->dev, priv->port, priv->base_qpn);
- mdev->mac_removed[priv->port] = 1;
-
/* Free TX Rings */
for (i = 0; i < priv->tx_ring_num; i++) {
mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
@@ -762,6 +775,10 @@ void mlx4_en_stop_port(struct net_device *dev)
/* Free RSS qps */
mlx4_en_release_rss_steer(priv);
+ /* Unregister Mac address for the port */
+ mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn);
+ mdev->mac_removed[priv->port] = 1;
+
/* Free RX Rings */
for (i = 0; i < priv->rx_ring_num; i++) {
mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
@@ -792,37 +809,49 @@ static void mlx4_en_restart(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
-
-static int mlx4_en_open(struct net_device *dev)
+static void mlx4_en_clear_stats(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
int i;
- int err = 0;
-
- mutex_lock(&mdev->state_lock);
-
- if (!mdev->device_up) {
- en_err(priv, "Cannot open - device down/disabled\n");
- err = -EBUSY;
- goto out;
- }
- /* Reset HW statistics and performance counters */
if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
en_dbg(HW, priv, "Failed dumping statistics\n");
memset(&priv->stats, 0, sizeof(priv->stats));
memset(&priv->pstats, 0, sizeof(priv->pstats));
+ memset(&priv->pkstats, 0, sizeof(priv->pkstats));
+ memset(&priv->port_stats, 0, sizeof(priv->port_stats));
for (i = 0; i < priv->tx_ring_num; i++) {
priv->tx_ring[i].bytes = 0;
priv->tx_ring[i].packets = 0;
+ priv->tx_ring[i].tx_csum = 0;
}
for (i = 0; i < priv->rx_ring_num; i++) {
priv->rx_ring[i].bytes = 0;
priv->rx_ring[i].packets = 0;
+ priv->rx_ring[i].csum_ok = 0;
+ priv->rx_ring[i].csum_none = 0;
}
+}
+
+static int mlx4_en_open(struct net_device *dev)
+{
+ struct mlx4_en_priv *priv = netdev_priv(dev);
+ struct mlx4_en_dev *mdev = priv->mdev;
+ int err = 0;
+
+ mutex_lock(&mdev->state_lock);
+
+ if (!mdev->device_up) {
+ en_err(priv, "Cannot open - device down/disabled\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ /* Reset HW statistics and SW counters */
+ mlx4_en_clear_stats(dev);
err = mlx4_en_start_port(dev);
if (err)
@@ -863,7 +892,8 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
for (i = 0; i < priv->rx_ring_num; i++) {
if (priv->rx_ring[i].rx_info)
- mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i]);
+ mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
+ priv->prof->rx_ring_size, priv->stride);
if (priv->rx_cq[i].buf)
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
}
@@ -974,6 +1004,21 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
+static int mlx4_en_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx4_en_priv *priv = netdev_priv(netdev);
+
+ if (features & NETIF_F_LOOPBACK)
+ priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
+ else
+ priv->ctrl_flags &=
+ cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
+
+ return 0;
+
+}
+
static const struct net_device_ops mlx4_netdev_ops = {
.ndo_open = mlx4_en_open,
.ndo_stop = mlx4_en_close,
@@ -990,6 +1035,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx4_en_netpoll,
#endif
+ .ndo_set_features = mlx4_en_set_features,
};
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -1022,6 +1068,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->port = port;
priv->port_up = false;
priv->flags = prof->flags;
+ priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
+ MLX4_WQE_CTRL_SOLICITED);
priv->tx_ring_num = prof->tx_ring_num;
priv->rx_ring_num = prof->rx_ring_num;
priv->mac_index = -1;
@@ -1088,6 +1136,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
dev->features = dev->hw_features | NETIF_F_HIGHDMA |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
+ dev->hw_features |= NETIF_F_LOOPBACK;
mdev->pndev[port] = dev;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 03c84cd..3317914 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -41,13 +41,6 @@
#include "mlx4_en.h"
-int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
- u64 mac, u64 clear, u8 mode)
-{
- return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
- MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B);
-}
-
int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -72,76 +65,7 @@ int mlx4_SET_VLAN_FLTR(struct mlx4_dev *dev, struct mlx4_en_priv *priv)
filter->entry[i] = cpu_to_be32(entry);
}
err = mlx4_cmd(dev, mailbox->dma, priv->port, 0, MLX4_CMD_SET_VLAN_FLTR,
- MLX4_CMD_TIME_CLASS_B);
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-
-int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
- u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_general_context *context;
- int err;
- u32 in_mod;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->flags = SET_PORT_GEN_ALL_VALID;
- context->mtu = cpu_to_be16(mtu);
- context->pptx = (pptx * (!pfctx)) << 7;
- context->pfctx = pfctx;
- context->pprx = (pprx * (!pfcrx)) << 7;
- context->pfcrx = pfcrx;
-
- in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
- u8 promisc)
-{
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_set_port_rqp_calc_context *context;
- int err;
- u32 in_mod;
- u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
- MCAST_DIRECT : MCAST_DEFAULT;
-
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
- dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
- return 0;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
- context = mailbox->buf;
- memset(context, 0, sizeof *context);
-
- context->base_qpn = cpu_to_be32(base_qpn);
- context->n_mac = dev->caps.log_num_macs;
- context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
- base_qpn);
- context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
- base_qpn);
- context->intra_no_vlan = 0;
- context->no_vlan = MLX4_NO_VLAN_IDX;
- context->intra_vlan_miss = 0;
- context->vlan_miss = MLX4_VLAN_MISS_IDX;
-
- in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
@@ -159,7 +83,8 @@ int mlx4_en_QUERY_PORT(struct mlx4_en_dev *mdev, u8 port)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*qport_context));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0,
- MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
qport_context = mailbox->buf;
@@ -204,7 +129,8 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
return PTR_ERR(mailbox);
memset(mailbox->buf, 0, sizeof(*mlx4_en_stats));
err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0,
- MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err)
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 19eb244..6934fd7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -39,49 +39,6 @@
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
-enum {
- MLX4_CMD_SET_VLAN_FLTR = 0x47,
- MLX4_CMD_SET_MCAST_FLTR = 0x48,
- MLX4_CMD_DUMP_ETH_STATS = 0x49,
-};
-
-enum {
- MCAST_DIRECT_ONLY = 0,
- MCAST_DIRECT = 1,
- MCAST_DEFAULT = 2
-};
-
-struct mlx4_set_port_general_context {
- u8 reserved[3];
- u8 flags;
- u16 reserved2;
- __be16 mtu;
- u8 pptx;
- u8 pfctx;
- u16 reserved3;
- u8 pprx;
- u8 pfcrx;
- u16 reserved4;
-};
-
-struct mlx4_set_port_rqp_calc_context {
- __be32 base_qpn;
- u8 rererved;
- u8 n_mac;
- u8 n_vlan;
- u8 n_prio;
- u8 reserved2[3];
- u8 mac_miss;
- u8 intra_no_vlan;
- u8 no_vlan;
- u8 intra_vlan_miss;
- u8 vlan_miss;
- u8 reserved3[3];
- u8 no_vlan_prio;
- __be32 promisc;
- __be32 mcast;
-};
-
#define VLAN_FLTR_SIZE 128
struct mlx4_set_vlan_fltr_mbox {
__be32 entry[VLAN_FLTR_SIZE];
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 0dfb4ec..bcbc54c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -44,7 +44,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
struct mlx4_en_dev *mdev = priv->mdev;
memset(context, 0, sizeof *context);
- context->flags = cpu_to_be32(7 << 16 | rss << 13);
+ context->flags = cpu_to_be32(7 << 16 | rss << MLX4_RSS_QPC_FLAG_OFFSET);
context->pd = cpu_to_be32(mdev->priv_pdn);
context->mtu_msgmax = 0xff;
if (!is_tx && !rss)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index c2df6c3..d4ad8c2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -168,8 +168,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
return 0;
err:
- while (i--)
+ while (i--) {
+ dma_addr_t dma = be64_to_cpu(rx_desc->data[i].addr);
+ pci_unmap_single(priv->mdev->pdev, dma, skb_frags[i].size,
+ PCI_DMA_FROMDEVICE);
put_page(skb_frags[i].page);
+ }
return -ENOMEM;
}
@@ -380,12 +384,12 @@ err_allocator:
}
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring)
+ struct mlx4_en_rx_ring *ring, u32 size, u16 stride)
{
struct mlx4_en_dev *mdev = priv->mdev;
mlx4_en_unmap_buffer(&ring->wqres.buf);
- mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size + TXBB_SIZE);
+ mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
vfree(ring->rx_info);
ring->rx_info = NULL;
}
@@ -541,6 +545,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
unsigned int length;
int polled = 0;
int ip_summed;
+ struct ethhdr *ethh;
+ u64 s_mac;
if (!priv->port_up)
return 0;
@@ -577,6 +583,19 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
goto next;
}
+ /* Get pointer to first fragment since we haven't skb yet and
+ * cast it to ethhdr struct */
+ ethh = (struct ethhdr *)(page_address(skb_frags[0].page) +
+ skb_frags[0].offset);
+ s_mac = mlx4_en_mac_to_u64(ethh->h_source);
+
+ /* If source MAC is equal to our own MAC and not performing
+ * the selftest or flb disabled - drop the packet */
+ if (s_mac == priv->mac &&
+ (!(dev->features & NETIF_F_LOOPBACK) ||
+ !priv->validate_loopback))
+ goto next;
+
/*
* Packet is OK - process it.
*/
@@ -837,9 +856,11 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
struct mlx4_qp_context context;
- struct mlx4_en_rss_context *rss_context;
+ struct mlx4_rss_context *rss_context;
+ int rss_rings;
void *ptr;
- u8 rss_mask = 0x3f;
+ u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
+ MLX4_RSS_TCP_IPV6);
int i, qpn;
int err = 0;
int good_qps = 0;
@@ -877,18 +898,26 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
priv->rx_ring[0].cqn, &context);
- ptr = ((void *) &context) + 0x3c;
+ if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
+ rss_rings = priv->rx_ring_num;
+ else
+ rss_rings = priv->prof->rss_rings;
+
+ ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
+ + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
rss_context = ptr;
- rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
+ rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
(rss_map->base_qpn));
rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
+ if (priv->mdev->profile.udp_rss) {
+ rss_mask |= MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
+ rss_context->base_qpn_udp = rss_context->default_qpn;
+ }
rss_context->flags = rss_mask;
- rss_context->hash_fn = 1;
+ rss_context->hash_fn = MLX4_RSS_HASH_TOP;
for (i = 0; i < 10; i++)
rss_context->rss_key[i] = rsskey[i];
- if (priv->mdev->profile.udp_rss)
- rss_context->base_qpn_udp = rss_context->default_qpn;
err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
&rss_map->indir_qp, &rss_map->indir_state);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index 9fdbcec..bf2e5d3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -43,7 +43,7 @@
static int mlx4_en_test_registers(struct mlx4_en_priv *priv)
{
return mlx4_cmd(priv->mdev->dev, 0, 0, 0, MLX4_CMD_HW_HEALTH_CHECK,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_en_test_loopback_xmit(struct mlx4_en_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index d901b42..9ef9038 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -307,59 +307,60 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
return cnt;
}
-
static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_cq *mcq = &cq->mcq;
struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
- struct mlx4_cqe *cqe = cq->buf;
+ struct mlx4_cqe *cqe;
u16 index;
- u16 new_index;
+ u16 new_index, ring_index;
u32 txbbs_skipped = 0;
- u32 cq_last_sav;
-
- /* index always points to the first TXBB of the last polled descriptor */
- index = ring->cons & ring->size_mask;
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- if (index == new_index)
- return;
+ u32 cons_index = mcq->cons_index;
+ int size = cq->size;
+ u32 size_mask = ring->size_mask;
+ struct mlx4_cqe *buf = cq->buf;
if (!priv->port_up)
return;
- /*
- * We use a two-stage loop:
- * - the first samples the HW-updated CQE
- * - the second frees TXBBs until the last sample
- * This lets us amortize CQE cache misses, while still polling the CQ
- * until is quiescent.
- */
- cq_last_sav = mcq->cons_index;
- do {
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ ring_index = ring->cons & size_mask;
+
+ /* Process all completed CQEs */
+ while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
+ cons_index & size)) {
+ /*
+ * make sure we read the CQE after we read the
+ * ownership bit
+ */
+ rmb();
+
+ /* Skip over last polled CQE */
+ new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
+
do {
- /* Skip over last polled CQE */
- index = (index + ring->last_nr_txbb) & ring->size_mask;
txbbs_skipped += ring->last_nr_txbb;
-
- /* Poll next CQE */
+ ring_index = (ring_index + ring->last_nr_txbb) & size_mask;
+ /* free next descriptor */
ring->last_nr_txbb = mlx4_en_free_tx_desc(
- priv, ring, index,
- !!((ring->cons + txbbs_skipped) &
- ring->size));
- ++mcq->cons_index;
-
- } while (index != new_index);
+ priv, ring, ring_index,
+ !!((ring->cons + txbbs_skipped) &
+ ring->size));
+ } while (ring_index != new_index);
+
+ ++cons_index;
+ index = cons_index & size_mask;
+ cqe = &buf[index];
+ }
- new_index = be16_to_cpu(cqe->wqe_index) & ring->size_mask;
- } while (index != new_index);
- AVG_PERF_COUNTER(priv->pstats.tx_coal_avg,
- (u32) (mcq->cons_index - cq_last_sav));
/*
* To prevent CQ overflow we first update CQ consumer and only then
* the ring consumer.
*/
+ mcq->cons_index = cons_index;
mlx4_cq_set_ci(mcq);
wmb();
ring->cons += txbbs_skipped;
@@ -565,7 +566,8 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
inl->byte_count = cpu_to_be32(1 << 31 | (skb->len - spc));
}
tx_desc->ctrl.vlan_tag = cpu_to_be16(*vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!(*vlan_tag);
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ (!!vlan_tx_tag_present(skb));
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
}
@@ -676,27 +678,25 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Prepare ctrl segement apart opcode+ownership, which depends on
* whether LSO is used */
tx_desc->ctrl.vlan_tag = cpu_to_be16(vlan_tag);
- tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN * !!vlan_tag;
+ tx_desc->ctrl.ins_vlan = MLX4_WQE_CTRL_INS_VLAN *
+ !!vlan_tx_tag_present(skb);
tx_desc->ctrl.fence_size = (real_size / 16) & 0x3f;
- tx_desc->ctrl.srcrb_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
- MLX4_WQE_CTRL_SOLICITED);
+ tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
MLX4_WQE_CTRL_TCP_UDP_CSUM);
ring->tx_csum++;
}
- if (unlikely(priv->validate_loopback)) {
- /* Copy dst mac address to wqe */
- skb_reset_mac_header(skb);
- ethh = eth_hdr(skb);
- if (ethh && ethh->h_dest) {
- mac = mlx4_en_mac_to_u64(ethh->h_dest);
- mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
- mac_l = (u32) (mac & 0xffffffff);
- tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
- tx_desc->ctrl.imm = cpu_to_be32(mac_l);
- }
+ /* Copy dst mac address to wqe */
+ skb_reset_mac_header(skb);
+ ethh = eth_hdr(skb);
+ if (ethh && ethh->h_dest) {
+ mac = mlx4_en_mac_to_u64(ethh->h_dest);
+ mac_h = (u32) ((mac & 0xffff00000000ULL) >> 16);
+ mac_l = (u32) (mac & 0xffffffff);
+ tx_desc->ctrl.srcrb_flags |= cpu_to_be32(mac_h);
+ tx_desc->ctrl.imm = cpu_to_be32(mac_l);
}
/* Handle LSO (TSO) packets */
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 24ee967..9129ace0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/export.h>
@@ -52,30 +53,6 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
-/*
- * Must be packed because start is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_eq_context {
- __be32 flags;
- u16 reserved1[3];
- __be16 page_offset;
- u8 log_eq_size;
- u8 reserved2[4];
- u8 eq_period;
- u8 reserved3;
- u8 eq_max_count;
- u8 reserved4[3];
- u8 intr;
- u8 log_page_size;
- u8 reserved5[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- u32 reserved6[2];
- __be32 consumer_index;
- __be32 producer_index;
- u32 reserved7[4];
-};
-
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -100,46 +77,9 @@ struct mlx4_eq_context {
(1ull << MLX4_EVENT_TYPE_SRQ_CATAS_ERROR) | \
(1ull << MLX4_EVENT_TYPE_SRQ_QP_LAST_WQE) | \
(1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \
- (1ull << MLX4_EVENT_TYPE_CMD))
-
-struct mlx4_eqe {
- u8 reserved1;
- u8 type;
- u8 reserved2;
- u8 subtype;
- union {
- u32 raw[6];
- struct {
- __be32 cqn;
- } __packed comp;
- struct {
- u16 reserved1;
- __be16 token;
- u32 reserved2;
- u8 reserved3[3];
- u8 status;
- __be64 out_param;
- } __packed cmd;
- struct {
- __be32 qpn;
- } __packed qp;
- struct {
- __be32 srqn;
- } __packed srq;
- struct {
- __be32 cqn;
- u32 reserved1;
- u8 reserved2[3];
- u8 syndrome;
- } __packed cq_err;
- struct {
- u32 reserved1[2];
- __be32 port;
- } __packed port_change;
- } event;
- u8 reserved3[3];
- u8 owner;
-} __packed;
+ (1ull << MLX4_EVENT_TYPE_CMD) | \
+ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \
+ (1ull << MLX4_EVENT_TYPE_FLR_EVENT))
static void eq_set_ci(struct mlx4_eq *eq, int req_not)
{
@@ -162,13 +102,144 @@ static struct mlx4_eqe *next_eqe_sw(struct mlx4_eq *eq)
return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
}
+static struct mlx4_eqe *next_slave_event_eqe(struct mlx4_slave_event_eq *slave_eq)
+{
+ struct mlx4_eqe *eqe =
+ &slave_eq->event_eqe[slave_eq->cons & (SLAVE_EVENT_EQ_SIZE - 1)];
+ return (!!(eqe->owner & 0x80) ^
+ !!(slave_eq->cons & SLAVE_EVENT_EQ_SIZE)) ?
+ eqe : NULL;
+}
+
+void mlx4_gen_slave_eqe(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv = container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_event_eq *slave_eq = &mfunc->master.slave_eq;
+ struct mlx4_eqe *eqe;
+ u8 slave;
+ int i;
+
+ for (eqe = next_slave_event_eqe(slave_eq); eqe;
+ eqe = next_slave_event_eqe(slave_eq)) {
+ slave = eqe->slave_id;
+
+ /* All active slaves need to receive the event */
+ if (slave == ALL_SLAVES) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i != dev->caps.function &&
+ master->slave_state[i].active)
+ if (mlx4_GEN_EQE(dev, i, eqe))
+ mlx4_warn(dev, "Failed to "
+ " generate event "
+ "for slave %d\n", i);
+ }
+ } else {
+ if (mlx4_GEN_EQE(dev, slave, eqe))
+ mlx4_warn(dev, "Failed to generate event "
+ "for slave %d\n", slave);
+ }
+ ++slave_eq->cons;
+ }
+}
+
+
+static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq *slave_eq = &priv->mfunc.master.slave_eq;
+ struct mlx4_eqe *s_eqe =
+ &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
+
+ if ((!!(s_eqe->owner & 0x80)) ^
+ (!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
+ "No free EQE on slave events queue\n", slave);
+ return;
+ }
+
+ memcpy(s_eqe, eqe, sizeof(struct mlx4_eqe) - 1);
+ s_eqe->slave_id = slave;
+ /* ensure all information is written before setting the ownersip bit */
+ wmb();
+ s_eqe->owner = !!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE) ? 0x0 : 0x80;
+ ++slave_eq->prod;
+
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_event_work);
+}
+
+static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
+ struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave =
+ &priv->mfunc.master.slave_state[slave];
+
+ if (!s_slave->active) {
+ /*mlx4_warn(dev, "Trying to pass event to inactive slave\n");*/
+ return;
+ }
+
+ slave_event(dev, slave, eqe);
+}
+
+void mlx4_master_handle_slave_flr(struct work_struct *work)
+{
+ struct mlx4_mfunc_master_ctx *master =
+ container_of(work, struct mlx4_mfunc_master_ctx,
+ slave_flr_event_work);
+ struct mlx4_mfunc *mfunc =
+ container_of(master, struct mlx4_mfunc, master);
+ struct mlx4_priv *priv =
+ container_of(mfunc, struct mlx4_priv, mfunc);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
+ int i;
+ int err;
+
+ mlx4_dbg(dev, "mlx4_handle_slave_flr\n");
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+
+ if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: "
+ "clean slave: %d\n", i);
+
+ mlx4_delete_all_resources_for_slave(dev, i);
+ /*return the slave to running mode*/
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ slave_state[i].last_cmd = MLX4_COMM_CMD_RESET;
+ slave_state[i].is_slave_going_down = 0;
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ /*notify the FW:*/
+ err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to notify FW on "
+ "FLR done (slave:%d)\n", i);
+ }
+ }
+}
+
static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
{
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_eqe *eqe;
int cqn;
int eqes_found = 0;
int set_ci = 0;
int port;
+ int slave = 0;
+ int ret;
+ u32 flr_slave;
+ u8 update_slave_state;
+ int i;
while ((eqe = next_eqe_sw(eq))) {
/*
@@ -191,14 +262,68 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PATH_MIG_FAILED:
case MLX4_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
case MLX4_EVENT_TYPE_WQ_ACCESS_ERROR:
- mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) & 0xffffff,
- eqe->type);
+ mlx4_dbg(dev, "event %d arrived\n", eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the QP */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_QP,
+ be32_to_cpu(eqe->event.qp.qpn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "QP event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+
+ }
+ mlx4_qp_event(dev, be32_to_cpu(eqe->event.qp.qpn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
+ mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
+ __func__);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
- mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) & 0xffffff,
- eqe->type);
+ if (mlx4_is_master(dev)) {
+ /* forward only to slave owning the SRQ */
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_SRQ,
+ be32_to_cpu(eqe->event.srq.srqn)
+ & 0xffffff,
+ &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_warn(dev, "SRQ event %02x(%02x) "
+ "on EQ %d at index %u: could"
+ " not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
+ " event: %02x(%02x)\n", __func__,
+ slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_warn(dev, "%s: sending event "
+ "%02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_srq_event(dev, be32_to_cpu(eqe->event.srq.srqn) &
+ 0xffffff, eqe->type);
break;
case MLX4_EVENT_TYPE_CMD:
@@ -211,13 +336,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_PORT_CHANGE:
port = be32_to_cpu(eqe->event.port_change.port) >> 28;
if (eqe->subtype == MLX4_PORT_CHANGE_SUBTYPE_DOWN) {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_DOWN,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_DOWN,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 1;
+ if (mlx4_is_master(dev))
+ /*change the state of all slave's port
+ * to down:*/
+ for (i = 0; i < dev->num_slaves; i++) {
+ mlx4_dbg(dev, "%s: Sending "
+ "MLX4_PORT_CHANGE_SUBTYPE_DOWN"
+ " to slave: %d, port:%d\n",
+ __func__, i, port);
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
} else {
- mlx4_dispatch_event(dev, MLX4_DEV_EVENT_PORT_UP,
+ mlx4_dispatch_event(dev,
+ MLX4_DEV_EVENT_PORT_UP,
port);
mlx4_priv(dev)->sense.do_sense_port[port] = 0;
+
+ if (mlx4_is_master(dev)) {
+ for (i = 0; i < dev->num_slaves; i++) {
+ if (i == dev->caps.function)
+ continue;
+ mlx4_slave_event(dev, i, eqe);
+ }
+ }
}
break;
@@ -226,7 +373,28 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eqe->event.cq_err.syndrome == 1 ?
"overrun" : "access violation",
be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff);
- mlx4_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn),
+ if (mlx4_is_master(dev)) {
+ ret = mlx4_get_slave_from_resource_id(dev,
+ RES_CQ,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff, &slave);
+ if (ret && ret != -ENOENT) {
+ mlx4_dbg(dev, "CQ event %02x(%02x) on "
+ "EQ %d at index %u: could "
+ "not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
+ break;
+ }
+
+ if (!ret && slave != dev->caps.function) {
+ mlx4_slave_event(dev, slave, eqe);
+ break;
+ }
+ }
+ mlx4_cq_event(dev,
+ be32_to_cpu(eqe->event.cq_err.cqn)
+ & 0xffffff,
eqe->type);
break;
@@ -234,13 +402,60 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
mlx4_warn(dev, "EQ overrun on EQN %d\n", eq->eqn);
break;
+ case MLX4_EVENT_TYPE_COMM_CHANNEL:
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Received comm channel event "
+ "for non master device\n");
+ break;
+ }
+ memcpy(&priv->mfunc.master.comm_arm_bit_vector,
+ eqe->event.comm_channel_arm.bit_vec,
+ sizeof eqe->event.comm_channel_arm.bit_vec);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.comm_work);
+ break;
+
+ case MLX4_EVENT_TYPE_FLR_EVENT:
+ flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
+ if (!mlx4_is_master(dev)) {
+ mlx4_warn(dev, "Non-master function received"
+ "FLR event\n");
+ break;
+ }
+
+ mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
+
+ if (flr_slave > dev->num_slaves) {
+ mlx4_warn(dev,
+ "Got FLR for unknown function: %d\n",
+ flr_slave);
+ update_slave_state = 0;
+ } else
+ update_slave_state = 1;
+
+ spin_lock(&priv->mfunc.master.slave_state_lock);
+ if (update_slave_state) {
+ priv->mfunc.master.slave_state[flr_slave].active = false;
+ priv->mfunc.master.slave_state[flr_slave].last_cmd = MLX4_COMM_CMD_FLR;
+ priv->mfunc.master.slave_state[flr_slave].is_slave_going_down = 1;
+ }
+ spin_unlock(&priv->mfunc.master.slave_state_lock);
+ queue_work(priv->mfunc.master.comm_wq,
+ &priv->mfunc.master.slave_flr_event_work);
+ break;
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u\n",
- eqe->type, eqe->subtype, eq->eqn, eq->cons_index);
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
+ "index %u. owner=%x, nent=0x%x, slave=%x, "
+ "ownership=%s\n",
+ eqe->type, eqe->subtype, eq->eqn,
+ eq->cons_index, eqe->owner, eq->nent,
+ eqe->slave_id,
+ !!(eqe->owner & 0x80) ^
+ !!(eq->cons_index & eq->nent) ? "HW" : "SW");
break;
- }
+ };
++eq->cons_index;
eqes_found = 1;
@@ -290,25 +505,55 @@ static irqreturn_t mlx4_msi_x_interrupt(int irq, void *eq_ptr)
return IRQ_HANDLED;
}
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq =
+ priv->mfunc.master.slave_state[slave].event_eq;
+ u32 in_modifier = vhcr->in_modifier;
+ u32 eqn = in_modifier & 0x1FF;
+ u64 in_param = vhcr->in_param;
+ int err = 0;
+ int i;
+
+ if (slave == dev->caps.function)
+ err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ if (!err)
+ for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
+ if (in_param & (1LL << i))
+ event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
+
+ return err;
+}
+
static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
int eq_num)
{
return mlx4_cmd(dev, event_mask, (unmap << 31) | eq_num,
- 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B);
+ 0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd(dev, mailbox->dma, eq_num, 0, MLX4_CMD_SW2HW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
+ MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int eq_num)
{
- return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num, 0, MLX4_CMD_HW2SW_EQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
+ 0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_num_eq_uar(struct mlx4_dev *dev)
@@ -570,8 +815,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
int err;
int i;
- priv->eq_table.uar_map = kcalloc(sizeof *priv->eq_table.uar_map,
- mlx4_num_eq_uar(dev), GFP_KERNEL);
+ priv->eq_table.uar_map = kcalloc(mlx4_num_eq_uar(dev),
+ sizeof *priv->eq_table.uar_map,
+ GFP_KERNEL);
if (!priv->eq_table.uar_map) {
err = -ENOMEM;
goto err_out_free;
@@ -585,14 +831,16 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
priv->eq_table.uar_map[i] = NULL;
- err = mlx4_map_clr_int(dev);
- if (err)
- goto err_out_bitmap;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_map_clr_int(dev);
+ if (err)
+ goto err_out_bitmap;
- priv->eq_table.clr_mask =
- swab32(1 << (priv->eq_table.inta_pin & 31));
- priv->eq_table.clr_int = priv->clr_base +
- (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ priv->eq_table.clr_mask =
+ swab32(1 << (priv->eq_table.inta_pin & 31));
+ priv->eq_table.clr_int = priv->clr_base +
+ (priv->eq_table.inta_pin < 32 ? 4 : 0);
+ }
priv->eq_table.irq_names =
kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
@@ -700,7 +948,8 @@ err_out_unmap:
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
--i;
}
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
mlx4_free_irqs(dev);
err_out_bitmap:
@@ -725,7 +974,8 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- mlx4_unmap_clr_int(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_unmap_clr_int(dev);
for (i = 0; i < mlx4_num_eq_uar(dev); ++i)
if (priv->eq_table.uar_map[i])
@@ -748,7 +998,7 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
err = mlx4_NOP(dev);
/* When not in MSI_X, there is only one irq to check */
- if (!(dev->flags & MLX4_FLAG_MSI_X))
+ if (!(dev->flags & MLX4_FLAG_MSI_X) || mlx4_is_slave(dev))
return err;
/* A loop over all completion vectors, for each vector we will check
@@ -786,7 +1036,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
struct mlx4_priv *priv = mlx4_priv(dev);
int vec = 0, err = 0, i;
- spin_lock(&priv->msix_ctl.pool_lock);
+ mutex_lock(&priv->msix_ctl.pool_lock);
for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
if (~priv->msix_ctl.pool_bm & 1ULL << i) {
priv->msix_ctl.pool_bm |= 1ULL << i;
@@ -808,7 +1058,7 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char* name, int * vector)
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
}
- spin_unlock(&priv->msix_ctl.pool_lock);
+ mutex_unlock(&priv->msix_ctl.pool_lock);
if (vec) {
*vector = vec;
@@ -829,13 +1079,13 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
if (likely(i >= 0)) {
/*sanity check , making sure were not trying to free irq's
Belonging to a legacy EQ*/
- spin_lock(&priv->msix_ctl.pool_lock);
+ mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
}
- spin_unlock(&priv->msix_ctl.pool_lock);
+ mutex_unlock(&priv->msix_ctl.pool_lock);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 435ca6e..9ea7cab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -32,6 +32,7 @@
* SOFTWARE.
*/
+#include <linux/etherdevice.h>
#include <linux/mlx4/cmd.h>
#include <linux/module.h>
#include <linux/cache.h>
@@ -48,7 +49,7 @@ enum {
extern void __buggy_use_of_MLX4_GET(void);
extern void __buggy_use_of_MLX4_PUT(void);
-static int enable_qos;
+static bool enable_qos;
module_param(enable_qos, bool, 0444);
MODULE_PARM_DESC(enable_qos, "Enable Quality of Service support in the HCA (default: off)");
@@ -139,12 +140,178 @@ int mlx4_MOD_STAT_CFG(struct mlx4_dev *dev, struct mlx4_mod_stat_cfg *cfg)
MLX4_PUT(inbox, cfg->log_pg_sz_m, MOD_STAT_CFG_PG_SZ_M_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u8 field;
+ u32 size;
+ int err = 0;
+
+#define QUERY_FUNC_CAP_FLAGS_OFFSET 0x0
+#define QUERY_FUNC_CAP_NUM_PORTS_OFFSET 0x1
+#define QUERY_FUNC_CAP_PF_BHVR_OFFSET 0x4
+#define QUERY_FUNC_CAP_QP_QUOTA_OFFSET 0x10
+#define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET 0x14
+#define QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET 0x18
+#define QUERY_FUNC_CAP_MPT_QUOTA_OFFSET 0x20
+#define QUERY_FUNC_CAP_MTT_QUOTA_OFFSET 0x24
+#define QUERY_FUNC_CAP_MCG_QUOTA_OFFSET 0x28
+#define QUERY_FUNC_CAP_MAX_EQ_OFFSET 0x2c
+#define QUERY_FUNC_CAP_RESERVED_EQ_OFFSET 0X30
+
+#define QUERY_FUNC_CAP_PHYS_PORT_OFFSET 0x3
+#define QUERY_FUNC_CAP_ETH_PROPS_OFFSET 0xc
+
+ if (vhcr->op_modifier == 1) {
+ field = vhcr->in_modifier;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+
+ field = 0; /* ensure fvl bit is not set */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ } else if (vhcr->op_modifier == 0) {
+ field = 1 << 7; /* enable only ethernet interface */
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
+
+ field = dev->caps.num_ports;
+ MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+
+ size = 0; /* no PF behavious is set for now */
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+
+ size = dev->caps.num_qps;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+
+ size = dev->caps.num_srqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_cqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+
+ size = dev->caps.num_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+
+ size = dev->caps.reserved_eqs;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+
+ size = dev->caps.num_mpts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mtts;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+
+ size = dev->caps.num_mgms + dev->caps.num_amgms;
+ MLX4_PUT(outbox->buf, size, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+
+ } else
+ err = -EINVAL;
+
+ return err;
+}
+
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 *outbox;
+ u8 field;
+ u32 size;
+ int i;
+ int err = 0;
+
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ outbox = mailbox->buf;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS_OFFSET);
+ if (!(field & (1 << 7))) {
+ mlx4_err(dev, "The host doesn't support eth interface\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
+ func_cap->num_ports = field;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_PF_BHVR_OFFSET);
+ func_cap->pf_context_behaviour = size;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_QP_QUOTA_OFFSET);
+ func_cap->qp_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_SRQ_QUOTA_OFFSET);
+ func_cap->srq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_CQ_QUOTA_OFFSET);
+ func_cap->cq_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MAX_EQ_OFFSET);
+ func_cap->max_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_RESERVED_EQ_OFFSET);
+ func_cap->reserved_eq = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MPT_QUOTA_OFFSET);
+ func_cap->mpt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MTT_QUOTA_OFFSET);
+ func_cap->mtt_quota = size & 0xFFFFFF;
+
+ MLX4_GET(size, outbox, QUERY_FUNC_CAP_MCG_QUOTA_OFFSET);
+ func_cap->mcg_quota = size & 0xFFFFFF;
+
+ for (i = 1; i <= func_cap->num_ports; ++i) {
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 1,
+ MLX4_CMD_QUERY_FUNC_CAP,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ goto out;
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_ETH_PROPS_OFFSET);
+ if (field & (1 << 7)) {
+ mlx4_err(dev, "VLAN is enforced on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ if (field & (1 << 6)) {
+ mlx4_err(dev, "Force mac is enabled on this port\n");
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
+ func_cap->physical_port[i] = field;
+ }
+
+ /* All other resources are allocated by the master, but we still report
+ * 'num' and 'reserved' capabilities as follows:
+ * - num remains the maximum resource index
+ * - 'num - reserved' is the total available objects of a resource, but
+ * resource indices may be less than 'reserved'
+ * TODO: set per-resource quotas */
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -229,7 +396,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev));
if (err)
goto out;
@@ -396,12 +563,15 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
for (i = 1; i <= dev_cap->num_ports; ++i) {
err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
if (err)
goto out;
MLX4_GET(field, outbox, QUERY_PORT_SUPPORTED_TYPE_OFFSET);
dev_cap->supported_port_types[i] = field & 3;
+ dev_cap->suggested_type[i] = (field >> 3) & 1;
+ dev_cap->default_sense[i] = (field >> 4) & 1;
MLX4_GET(field, outbox, QUERY_PORT_MTU_OFFSET);
dev_cap->ib_mtu[i] = field & 0xf;
MLX4_GET(field, outbox, QUERY_PORT_WIDTH_OFFSET);
@@ -470,6 +640,61 @@ out:
return err;
}
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u64 def_mac;
+ u8 port_type;
+ int err;
+
+#define MLX4_PORT_SUPPORT_IB (1 << 0)
+#define MLX4_PORT_SUGGEST_TYPE (1 << 3)
+#define MLX4_PORT_DEFAULT_SENSE (1 << 4)
+#define MLX4_VF_PORT_ETH_ONLY_MASK (0xff & ~MLX4_PORT_SUPPORT_IB & \
+ ~MLX4_PORT_SUGGEST_TYPE & \
+ ~MLX4_PORT_DEFAULT_SENSE)
+
+ err = mlx4_cmd_box(dev, 0, outbox->dma, vhcr->in_modifier, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ if (!err && dev->caps.function != slave) {
+ /* set slave default_mac address */
+ MLX4_GET(def_mac, outbox->buf, QUERY_PORT_MAC_OFFSET);
+ def_mac += slave << 8;
+ MLX4_PUT(outbox->buf, def_mac, QUERY_PORT_MAC_OFFSET);
+
+ /* get port type - currently only eth is enabled */
+ MLX4_GET(port_type, outbox->buf,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+
+ /* Allow only Eth port, no link sensing allowed */
+ port_type &= MLX4_VF_PORT_ETH_ONLY_MASK;
+
+ /* check eth is enabled for this port */
+ if (!(port_type & 2))
+ mlx4_dbg(dev, "QUERY PORT: eth not supported by host");
+
+ MLX4_PUT(outbox->buf, port_type,
+ QUERY_PORT_SUPPORTED_TYPE_OFFSET);
+ }
+
+ return err;
+}
+
+int mlx4_QUERY_PORT(struct mlx4_dev *dev, void *ptr, u8 port)
+{
+ struct mlx4_cmd_mailbox *outbox = ptr;
+
+ return mlx4_cmd_box(dev, 0, outbox->dma, port, 0,
+ MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL_GPL(mlx4_QUERY_PORT);
+
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -519,7 +744,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
if (++nent == MLX4_MAILBOX_SIZE / 16) {
err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
if (err)
goto out;
nent = 0;
@@ -528,7 +754,8 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
}
if (nent)
- err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, MLX4_CMD_TIME_CLASS_B);
+ err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -557,13 +784,15 @@ int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_FA(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_FA,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_RUN_FW(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW, MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_RUN_FW,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
int mlx4_QUERY_FW(struct mlx4_dev *dev)
@@ -579,6 +808,7 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_OUT_SIZE 0x100
#define QUERY_FW_VER_OFFSET 0x00
+#define QUERY_FW_PPF_ID 0x09
#define QUERY_FW_CMD_IF_REV_OFFSET 0x0a
#define QUERY_FW_MAX_CMD_OFFSET 0x0f
#define QUERY_FW_ERR_START_OFFSET 0x30
@@ -589,13 +819,16 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20
#define QUERY_FW_CLR_INT_BAR_OFFSET 0x28
+#define QUERY_FW_COMM_BASE_OFFSET 0x40
+#define QUERY_FW_COMM_BAR_OFFSET 0x48
+
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -608,6 +841,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
((fw_ver & 0xffff0000ull) >> 16) |
((fw_ver & 0x0000ffffull) << 16);
+ MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
+ dev->caps.function = lg;
+
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
@@ -649,6 +885,11 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(fw->clr_int_bar, outbox, QUERY_FW_CLR_INT_BAR_OFFSET);
fw->clr_int_bar = (fw->clr_int_bar >> 6) * 2;
+ MLX4_GET(fw->comm_base, outbox, QUERY_FW_COMM_BASE_OFFSET);
+ MLX4_GET(fw->comm_bar, outbox, QUERY_FW_COMM_BAR_OFFSET);
+ fw->comm_bar = (fw->comm_bar >> 6) * 2;
+ mlx4_dbg(dev, "Communication vector bar:%d offset:0x%llx\n",
+ fw->comm_bar, fw->comm_base);
mlx4_dbg(dev, "FW size %d KB\n", fw->fw_pages >> 2);
/*
@@ -711,7 +952,7 @@ int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter)
outbox = mailbox->buf;
err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_ADAPTER,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (err)
goto out;
@@ -743,6 +984,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f)
#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30)
#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37)
+#define INIT_HCA_EQE_CQE_OFFSETS (INIT_HCA_QPC_OFFSET + 0x38)
#define INIT_HCA_ALTC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40)
#define INIT_HCA_AUXC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50)
#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60)
@@ -831,10 +1073,11 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
/* UAR attributes */
- MLX4_PUT(inbox, (u8) (PAGE_SHIFT - 12), INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_PUT(inbox, param->uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET);
MLX4_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
- err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000);
+ err = mlx4_cmd(dev, mailbox->dma, 0, 0, MLX4_CMD_INIT_HCA, 10000,
+ MLX4_CMD_NATIVE);
if (err)
mlx4_err(dev, "INIT_HCA returns %d\n", err);
@@ -843,6 +1086,101 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
return err;
}
+int mlx4_QUERY_HCA(struct mlx4_dev *dev,
+ struct mlx4_init_hca_param *param)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ __be32 *outbox;
+ int err;
+
+#define QUERY_HCA_GLOBAL_CAPS_OFFSET 0x04
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ outbox = mailbox->buf;
+
+ err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0,
+ MLX4_CMD_QUERY_HCA,
+ MLX4_CMD_TIME_CLASS_B,
+ !mlx4_is_slave(dev));
+ if (err)
+ goto out;
+
+ MLX4_GET(param->global_caps, outbox, QUERY_HCA_GLOBAL_CAPS_OFFSET);
+
+ /* QPC/EEC/CQC/EQC/RDMARC attributes */
+
+ MLX4_GET(param->qpc_base, outbox, INIT_HCA_QPC_BASE_OFFSET);
+ MLX4_GET(param->log_num_qps, outbox, INIT_HCA_LOG_QP_OFFSET);
+ MLX4_GET(param->srqc_base, outbox, INIT_HCA_SRQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_srqs, outbox, INIT_HCA_LOG_SRQ_OFFSET);
+ MLX4_GET(param->cqc_base, outbox, INIT_HCA_CQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_cqs, outbox, INIT_HCA_LOG_CQ_OFFSET);
+ MLX4_GET(param->altc_base, outbox, INIT_HCA_ALTC_BASE_OFFSET);
+ MLX4_GET(param->auxc_base, outbox, INIT_HCA_AUXC_BASE_OFFSET);
+ MLX4_GET(param->eqc_base, outbox, INIT_HCA_EQC_BASE_OFFSET);
+ MLX4_GET(param->log_num_eqs, outbox, INIT_HCA_LOG_EQ_OFFSET);
+ MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET);
+ MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET);
+
+ /* multicast attributes */
+
+ MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET);
+ MLX4_GET(param->log_mc_entry_sz, outbox,
+ INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET);
+ MLX4_GET(param->log_mc_hash_sz, outbox,
+ INIT_HCA_LOG_MC_HASH_SZ_OFFSET);
+ MLX4_GET(param->log_mc_table_sz, outbox,
+ INIT_HCA_LOG_MC_TABLE_SZ_OFFSET);
+
+ /* TPT attributes */
+
+ MLX4_GET(param->dmpt_base, outbox, INIT_HCA_DMPT_BASE_OFFSET);
+ MLX4_GET(param->log_mpt_sz, outbox, INIT_HCA_LOG_MPT_SZ_OFFSET);
+ MLX4_GET(param->mtt_base, outbox, INIT_HCA_MTT_BASE_OFFSET);
+ MLX4_GET(param->cmpt_base, outbox, INIT_HCA_CMPT_BASE_OFFSET);
+
+ /* UAR attributes */
+
+ MLX4_GET(param->uar_page_sz, outbox, INIT_HCA_UAR_PAGE_SZ_OFFSET);
+ MLX4_GET(param->log_uar_sz, outbox, INIT_HCA_LOG_UAR_SZ_OFFSET);
+
+out:
+ mlx4_free_cmd_mailbox(dev, mailbox);
+
+ return err;
+}
+
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (priv->mfunc.master.slave_state[slave].init_port_mask & (1 << port))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+
+ /* Enable port only if it was previously disabled */
+ if (!priv->mfunc.master.init_port_ref[port]) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ priv->mfunc.master.slave_state[slave].init_port_mask |=
+ (1 << port);
+ }
+ ++priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -886,33 +1224,62 @@ int mlx4_INIT_PORT(struct mlx4_dev *dev, int port)
MLX4_PUT(inbox, field, INIT_PORT_MAX_PKEY_OFFSET);
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
mlx4_free_cmd_mailbox(dev, mailbox);
} else
err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_INIT_PORT,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_INIT_PORT);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int port = vhcr->in_modifier;
+ int err;
+
+ if (!(priv->mfunc.master.slave_state[slave].init_port_mask &
+ (1 << port)))
+ return 0;
+
+ if (dev->caps.port_mask[port] == MLX4_PORT_TYPE_IB)
+ return -ENODEV;
+ if (priv->mfunc.master.init_port_ref[port] == 1) {
+ err = mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_NATIVE);
+ if (err)
+ return err;
+ }
+ priv->mfunc.master.slave_state[slave].init_port_mask &= ~(1 << port);
+ --priv->mfunc.master.init_port_ref[port];
+ return 0;
+}
+
int mlx4_CLOSE_PORT(struct mlx4_dev *dev, int port)
{
- return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000);
+ return mlx4_cmd(dev, 0, port, 0, MLX4_CMD_CLOSE_PORT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_CLOSE_PORT);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic)
{
- return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000);
+ return mlx4_cmd(dev, 0, 0, panic, MLX4_CMD_CLOSE_HCA, 1000,
+ MLX4_CMD_NATIVE);
}
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
{
int ret = mlx4_cmd_imm(dev, icm_size, aux_pages, 0, 0,
MLX4_CMD_SET_ICM_SIZE,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
if (ret)
return ret;
@@ -929,7 +1296,7 @@ int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages)
int mlx4_NOP(struct mlx4_dev *dev)
{
/* Input modifier of 0x1f means "finish as soon as possible." */
- return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100);
+ return mlx4_cmd(dev, 0, 0x1f, 0, MLX4_CMD_NOP, 100, MLX4_CMD_NATIVE);
}
#define MLX4_WOL_SETUP_MODE (5 << 28)
@@ -938,7 +1305,8 @@ int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd_imm(dev, 0, config, in_mod, 0x3,
- MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MOD_STAT_CFG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_read);
@@ -947,6 +1315,6 @@ int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port)
u32 in_mod = MLX4_WOL_SETUP_MODE | port << 8;
return mlx4_cmd(dev, config, in_mod, 0x1, MLX4_CMD_MOD_STAT_CFG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
EXPORT_SYMBOL_GPL(mlx4_wol_write);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h
index bf5ec22..e1a5fa5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.h
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.h
@@ -111,11 +111,29 @@ struct mlx4_dev_cap {
u64 max_icm_sz;
int max_gso_sz;
u8 supported_port_types[MLX4_MAX_PORTS + 1];
+ u8 suggested_type[MLX4_MAX_PORTS + 1];
+ u8 default_sense[MLX4_MAX_PORTS + 1];
u8 log_max_macs[MLX4_MAX_PORTS + 1];
u8 log_max_vlans[MLX4_MAX_PORTS + 1];
u32 max_counters;
};
+struct mlx4_func_cap {
+ u8 num_ports;
+ u8 flags;
+ u32 pf_context_behaviour;
+ int qp_quota;
+ int cq_quota;
+ int srq_quota;
+ int mpt_quota;
+ int mtt_quota;
+ int max_eq;
+ int reserved_eq;
+ int mcg_quota;
+ u8 physical_port[MLX4_MAX_PORTS + 1];
+ u8 port_flags[MLX4_MAX_PORTS + 1];
+};
+
struct mlx4_adapter {
char board_id[MLX4_BOARD_ID_LEN];
u8 inta_pin;
@@ -133,6 +151,7 @@ struct mlx4_init_hca_param {
u64 dmpt_base;
u64 cmpt_base;
u64 mtt_base;
+ u64 global_caps;
u16 log_mc_entry_sz;
u16 log_mc_hash_sz;
u8 log_num_qps;
@@ -143,6 +162,7 @@ struct mlx4_init_hca_param {
u8 log_mc_table_sz;
u8 log_mpt_sz;
u8 log_uar_sz;
+ u8 uar_page_sz; /* log pg sz in 4k chunks */
};
struct mlx4_init_ib_param {
@@ -167,12 +187,19 @@ struct mlx4_set_ib_param {
};
int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap);
+int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap);
+int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_MAP_FA(struct mlx4_dev *dev, struct mlx4_icm *icm);
int mlx4_UNMAP_FA(struct mlx4_dev *dev);
int mlx4_RUN_FW(struct mlx4_dev *dev);
int mlx4_QUERY_FW(struct mlx4_dev *dev);
int mlx4_QUERY_ADAPTER(struct mlx4_dev *dev, struct mlx4_adapter *adapter);
int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
+int mlx4_QUERY_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param);
int mlx4_CLOSE_HCA(struct mlx4_dev *dev, int panic);
int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt);
int mlx4_SET_ICM_SIZE(struct mlx4_dev *dev, u64 icm_size, u64 *aux_pages);
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 02393fd..a9ade1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -213,7 +213,7 @@ static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
@@ -223,7 +223,8 @@ int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index ca6feb5..b4e9f6f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -142,7 +142,8 @@ int mlx4_register_device(struct mlx4_dev *dev)
mlx4_add_device(intf, priv);
mutex_unlock(&intf_mutex);
- mlx4_start_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_start_catas_poll(dev);
return 0;
}
@@ -152,7 +153,8 @@ void mlx4_unregister_device(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_interface *intf;
- mlx4_stop_catas_poll(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_stop_catas_poll(dev);
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 94bbc85..d498f04 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -40,6 +40,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/delay.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
@@ -75,21 +76,42 @@ MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#endif /* CONFIG_PCI_MSI */
+static int num_vfs;
+module_param(num_vfs, int, 0444);
+MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0");
+
+static int probe_vf;
+module_param(probe_vf, int, 0644);
+MODULE_PARM_DESC(probe_vf, "number of vfs to probe by pf driver (num_vfs > 0)");
+
+int mlx4_log_num_mgm_entry_size = 10;
+module_param_named(log_num_mgm_entry_size,
+ mlx4_log_num_mgm_entry_size, int, 0444);
+MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num"
+ " of qp per mcg, for example:"
+ " 10 gives 248.range: 9<="
+ " log_num_mgm_entry_size <= 12");
+
+#define MLX4_VF (1 << 0)
+
+#define HCA_GLOBAL_CAP_MASK 0
+#define PF_CONTEXT_BEHAVIOUR_MASK 0
+
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mlx4_profile default_profile = {
- .num_qp = 1 << 17,
+ .num_qp = 1 << 18,
.num_srq = 1 << 16,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
- .num_mpt = 1 << 17,
- .num_mtt = 1 << 20,
+ .num_mpt = 1 << 19,
+ .num_mtt = 1 << 20, /* It is really num mtt segements */
};
-static int log_num_mac = 2;
+static int log_num_mac = 7;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
@@ -99,15 +121,33 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
/* Log2 max number of VLANs per ETH port (0-7) */
#define MLX4_LOG_NUM_VLANS 7
-static int use_prio;
+static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
-static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
+int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
+static int port_type_array[2] = {MLX4_PORT_TYPE_NONE, MLX4_PORT_TYPE_NONE};
+static int arr_argc = 2;
+module_param_array(port_type_array, int, &arr_argc, 0444);
+MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default "
+ "1 for IB, 2 for Ethernet");
+
+struct mlx4_port_config {
+ struct list_head list;
+ enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1];
+ struct pci_dev *pdev;
+};
+
+static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
+{
+ return dev->caps.reserved_eqs +
+ MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
+}
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -140,10 +180,8 @@ static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
int i;
- dev->caps.port_mask = 0;
for (i = 1; i <= dev->caps.num_ports; ++i)
- if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
- dev->caps.port_mask |= 1 << (i - 1);
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
@@ -188,12 +226,15 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
+ dev->caps.suggested_type[i] = dev_cap->suggested_type[i];
+ dev->caps.default_sense[i] = dev_cap->default_sense[i];
dev->caps.trans_type[i] = dev_cap->trans_type[i];
dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
dev->caps.wavelength[i] = dev_cap->wavelength[i];
dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
+ dev->caps.uar_page_size = PAGE_SIZE;
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
@@ -207,7 +248,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
- dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
+ dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -216,17 +257,18 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
- dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
- dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
- dev->caps.mtts_per_seg);
+ dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
- dev->caps.reserved_uars = dev_cap->reserved_uars;
+
+ /* The first 128 UARs are used for EQ doorbells */
+ dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
dev->caps.max_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->max_xrcds : 0;
- dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
+ dev->caps.mtt_entry_sz = dev_cap->mtt_entry_sz;
+
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
@@ -235,18 +277,70 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
+ /* Sense port always allowed on supported devices for ConnectX1 and 2 */
+ if (dev->pdev->device != 0x1003)
+ dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
+
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
- if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
- dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
- else
- dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
- dev->caps.possible_type[i] = dev->caps.port_type[i];
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
+ if (dev->caps.supported_type[i]) {
+ /* if only ETH is supported - assign ETH */
+ if (dev->caps.supported_type[i] == MLX4_PORT_TYPE_ETH)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ /* if only IB is supported,
+ * assign IB only if SRIOV is off*/
+ else if (dev->caps.supported_type[i] ==
+ MLX4_PORT_TYPE_IB) {
+ if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_NONE;
+ else
+ dev->caps.port_type[i] =
+ MLX4_PORT_TYPE_IB;
+ /* if IB and ETH are supported,
+ * first of all check if SRIOV is on */
+ } else if (dev->flags & MLX4_FLAG_SRIOV)
+ dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
+ else {
+ /* In non-SRIOV mode, we set the port type
+ * according to user selection of port type,
+ * if usere selected none, take the FW hint */
+ if (port_type_array[i-1] == MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = dev->caps.suggested_type[i] ?
+ MLX4_PORT_TYPE_ETH : MLX4_PORT_TYPE_IB;
+ else
+ dev->caps.port_type[i] = port_type_array[i-1];
+ }
+ }
+ /*
+ * Link sensing is allowed on the port if 3 conditions are true:
+ * 1. Both protocols are supported on the port.
+ * 2. Different types are supported on the port
+ * 3. FW declared that it supports link sensing
+ */
mlx4_priv(dev)->sense.sense_allowed[i] =
- dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
+ ((dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ (dev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT));
+
+ /*
+ * If "default_sense" bit is set, we move the port to "AUTO" mode
+ * and perform sense_port FW command to try and set the correct
+ * port type from beginning
+ */
+ if (mlx4_priv(dev)->sense.sense_allowed[i] && dev->caps.default_sense[i]) {
+ enum mlx4_port_type sensed_port = MLX4_PORT_TYPE_NONE;
+ dev->caps.possible_type[i] = MLX4_PORT_TYPE_AUTO;
+ mlx4_SENSE_PORT(dev, i, &sensed_port);
+ if (sensed_port != MLX4_PORT_TYPE_NONE)
+ dev->caps.port_type[i] = sensed_port;
+ } else {
+ dev->caps.possible_type[i] = dev->caps.port_type[i];
+ }
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
@@ -262,8 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
}
}
- mlx4_set_port_mask(dev);
-
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
@@ -282,6 +374,148 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
return 0;
}
+/*The function checks if there are live vf, return the num of them*/
+static int mlx4_how_many_lives_vf(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_state;
+ int i;
+ int ret = 0;
+
+ for (i = 1/*the ppf is 0*/; i < dev->num_slaves; ++i) {
+ s_state = &priv->mfunc.master.slave_state[i];
+ if (s_state->active && s_state->last_cmd !=
+ MLX4_COMM_CMD_RESET) {
+ mlx4_warn(dev, "%s: slave: %d is still active\n",
+ __func__, i);
+ ret++;
+ }
+ }
+ return ret;
+}
+
+static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_state *s_slave;
+
+ if (!mlx4_is_master(dev))
+ return 0;
+
+ s_slave = &priv->mfunc.master.slave_state[slave];
+ return !!s_slave->active;
+}
+EXPORT_SYMBOL(mlx4_is_slave_active);
+
+static int mlx4_slave_cap(struct mlx4_dev *dev)
+{
+ int err;
+ u32 page_size;
+ struct mlx4_dev_cap dev_cap;
+ struct mlx4_func_cap func_cap;
+ struct mlx4_init_hca_param hca_param;
+ int i;
+
+ memset(&hca_param, 0, sizeof(hca_param));
+ err = mlx4_QUERY_HCA(dev, &hca_param);
+ if (err) {
+ mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ return err;
+ }
+
+ /*fail if the hca has an unknown capability */
+ if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
+ HCA_GLOBAL_CAP_MASK) {
+ mlx4_err(dev, "Unknown hca global capabilities\n");
+ return -ENOSYS;
+ }
+
+ mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
+
+ memset(&dev_cap, 0, sizeof(dev_cap));
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ page_size = ~dev->caps.page_size_cap + 1;
+ mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
+ if (page_size > PAGE_SIZE) {
+ mlx4_err(dev, "HCA minimum page size of %d bigger than "
+ "kernel PAGE_SIZE of %ld, aborting.\n",
+ page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ /* slave gets uar page size from QUERY_HCA fw command */
+ dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+
+ /* TODO: relax this assumption */
+ if (dev->caps.uar_page_size != PAGE_SIZE) {
+ mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
+ dev->caps.uar_page_size, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ memset(&func_cap, 0, sizeof(func_cap));
+ err = mlx4_QUERY_FUNC_CAP(dev, &func_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_FUNC_CAP command failed, aborting.\n");
+ return err;
+ }
+
+ if ((func_cap.pf_context_behaviour | PF_CONTEXT_BEHAVIOUR_MASK) !=
+ PF_CONTEXT_BEHAVIOUR_MASK) {
+ mlx4_err(dev, "Unknown pf context behaviour\n");
+ return -ENOSYS;
+ }
+
+ dev->caps.num_ports = func_cap.num_ports;
+ dev->caps.num_qps = func_cap.qp_quota;
+ dev->caps.num_srqs = func_cap.srq_quota;
+ dev->caps.num_cqs = func_cap.cq_quota;
+ dev->caps.num_eqs = func_cap.max_eq;
+ dev->caps.reserved_eqs = func_cap.reserved_eq;
+ dev->caps.num_mpts = func_cap.mpt_quota;
+ dev->caps.num_mtts = func_cap.mtt_quota;
+ dev->caps.num_pds = MLX4_NUM_PDS;
+ dev->caps.num_mgms = 0;
+ dev->caps.num_amgms = 0;
+
+ for (i = 1; i <= dev->caps.num_ports; ++i)
+ dev->caps.port_mask[i] = dev->caps.port_type[i];
+
+ if (dev->caps.num_ports > MLX4_MAX_PORTS) {
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, "
+ "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ return -ENODEV;
+ }
+
+ if (dev->caps.uar_page_size * (dev->caps.num_uars -
+ dev->caps.reserved_uars) >
+ pci_resource_len(dev->pdev, 2)) {
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
+ "PCI resource 2 size of 0x%llx, aborting.\n",
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ (unsigned long long) pci_resource_len(dev->pdev, 2));
+ return -ENODEV;
+ }
+
+#if 0
+ mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
+ mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
+ dev->caps.num_uars, dev->caps.reserved_uars,
+ dev->caps.uar_page_size * dev->caps.num_uars,
+ pci_resource_len(dev->pdev, 2));
+ mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
+ dev->caps.reserved_eqs);
+ mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
+ dev->caps.num_pds, dev->caps.reserved_pds,
+ dev->caps.slave_pd_shift, dev->caps.pd_base);
+#endif
+ return 0;
+}
/*
* Change the port configuration of the device.
@@ -297,15 +531,14 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
for (port = 0; port < dev->caps.num_ports; port++) {
/* Change the port type only if the new type is different
* from the current, and not set to Auto */
- if (port_types[port] != dev->caps.port_type[port + 1]) {
+ if (port_types[port] != dev->caps.port_type[port + 1])
change = 1;
- dev->caps.port_type[port + 1] = port_types[port];
- }
}
if (change) {
mlx4_unregister_device(dev);
for (port = 1; port <= dev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(dev, port);
+ dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port);
if (err) {
mlx4_err(dev, "Failed to set port %d, "
@@ -377,7 +610,8 @@ static ssize_t set_port_type(struct device *dev,
types[i] = mdev->caps.port_type[i+1];
}
- if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
+ if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP) &&
+ !(mdev->caps.flags & MLX4_DEV_CAP_FLAG_SENSE_SUPPORT)) {
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
@@ -451,6 +685,7 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
+ int num_eqs;
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
cmpt_base +
@@ -480,12 +715,14 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
if (err)
goto err_srq;
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
- cmpt_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
+ cmpt_entry_sz, num_eqs, num_eqs, 0, 0);
if (err)
goto err_cq;
@@ -509,6 +746,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 aux_pages;
+ int num_eqs;
int err;
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
@@ -540,10 +778,13 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
goto err_unmap_aux;
}
+
+ num_eqs = (mlx4_is_master(dev)) ?
+ roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) :
+ dev->caps.num_eqs;
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
- dev->caps.num_eqs, dev->caps.num_eqs,
- 0, 0);
+ num_eqs, num_eqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
@@ -563,7 +804,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
- dev->caps.num_mtt_segs,
+ dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
@@ -650,7 +891,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
* and it's a lot easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
- init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
+ init_hca->mc_base,
+ mlx4_get_mgm_entry_size(dev),
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
@@ -726,6 +968,16 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
+static void mlx4_slave_exit(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ down(&priv->cmd.slave_sem);
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
+ mlx4_warn(dev, "Failed to close slave function.\n");
+ up(&priv->cmd.slave_sem);
+}
+
static int map_bf_area(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -733,8 +985,13 @@ static int map_bf_area(struct mlx4_dev *dev)
resource_size_t bf_len;
int err = 0;
- bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
- bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
+ if (!dev->caps.bf_reg_size)
+ return -ENXIO;
+
+ bf_start = pci_resource_start(dev->pdev, 2) +
+ (dev->caps.num_uars << PAGE_SHIFT);
+ bf_len = pci_resource_len(dev->pdev, 2) -
+ (dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
err = -ENOMEM;
@@ -751,10 +1008,81 @@ static void unmap_bf_area(struct mlx4_dev *dev)
static void mlx4_close_hca(struct mlx4_dev *dev)
{
unmap_bf_area(dev);
- mlx4_CLOSE_HCA(dev, 0);
- mlx4_free_icms(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ if (mlx4_is_slave(dev))
+ mlx4_slave_exit(dev);
+ else {
+ mlx4_CLOSE_HCA(dev, 0);
+ mlx4_free_icms(dev);
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
+ }
+}
+
+static int mlx4_init_slave(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ u64 dma = (u64) priv->mfunc.vhcr_dma;
+ int num_of_reset_retries = NUM_OF_RESET_RETRIES;
+ int ret_from_reset = 0;
+ u32 slave_read;
+ u32 cmd_channel_ver;
+
+ down(&priv->cmd.slave_sem);
+ priv->cmd.max_cmds = 1;
+ mlx4_warn(dev, "Sending reset\n");
+ ret_from_reset = mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0,
+ MLX4_COMM_TIME);
+ /* if we are in the middle of flr the slave will try
+ * NUM_OF_RESET_RETRIES times before leaving.*/
+ if (ret_from_reset) {
+ if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
+ msleep(SLEEP_TIME_IN_RESET);
+ while (ret_from_reset && num_of_reset_retries) {
+ mlx4_warn(dev, "slave is currently in the"
+ "middle of FLR. retrying..."
+ "(try num:%d)\n",
+ (NUM_OF_RESET_RETRIES -
+ num_of_reset_retries + 1));
+ ret_from_reset =
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET,
+ 0, MLX4_COMM_TIME);
+ num_of_reset_retries = num_of_reset_retries - 1;
+ }
+ } else
+ goto err;
+ }
+
+ /* check the driver version - the slave I/F revision
+ * must match the master's */
+ slave_read = swab32(readl(&priv->mfunc.comm->slave_read));
+ cmd_channel_ver = mlx4_comm_get_version();
+
+ if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
+ MLX4_COMM_GET_IF_REV(slave_read)) {
+ mlx4_err(dev, "slave driver version is not supported"
+ " by the master\n");
+ goto err;
+ }
+
+ mlx4_warn(dev, "Sending vhcr0\n");
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR0, dma >> 48,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR1, dma >> 32,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR2, dma >> 16,
+ MLX4_COMM_TIME))
+ goto err;
+ if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_VHCR_EN, dma, MLX4_COMM_TIME))
+ goto err;
+ up(&priv->cmd.slave_sem);
+ return 0;
+
+err:
+ mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, 0);
+ up(&priv->cmd.slave_sem);
+ return -EIO;
}
static int mlx4_init_hca(struct mlx4_dev *dev)
@@ -768,56 +1096,76 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
u64 icm_size;
int err;
- err = mlx4_QUERY_FW(dev);
- if (err) {
- if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
- else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
- return err;
- }
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_QUERY_FW(dev);
+ if (err) {
+ if (err == -EACCES)
+ mlx4_info(dev, "non-primary physical function, skipping.\n");
+ else
+ mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ goto unmap_bf;
+ }
- err = mlx4_load_fw(dev);
- if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
- return err;
- }
+ err = mlx4_load_fw(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to start FW, aborting.\n");
+ goto unmap_bf;
+ }
- mlx4_cfg.log_pg_sz_m = 1;
- mlx4_cfg.log_pg_sz = 0;
- err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
- if (err)
- mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
+ mlx4_cfg.log_pg_sz_m = 1;
+ mlx4_cfg.log_pg_sz = 0;
+ err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
+ if (err)
+ mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
- err = mlx4_dev_cap(dev, &dev_cap);
- if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
- goto err_stop_fw;
- }
+ err = mlx4_dev_cap(dev, &dev_cap);
+ if (err) {
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ goto err_stop_fw;
+ }
- profile = default_profile;
+ profile = default_profile;
- icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
- if ((long long) icm_size < 0) {
- err = icm_size;
- goto err_stop_fw;
- }
+ icm_size = mlx4_make_profile(dev, &profile, &dev_cap,
+ &init_hca);
+ if ((long long) icm_size < 0) {
+ err = icm_size;
+ goto err_stop_fw;
+ }
- if (map_bf_area(dev))
- mlx4_dbg(dev, "Failed to map blue flame area\n");
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ init_hca.uar_page_sz = PAGE_SHIFT - 12;
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
+ err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
+ if (err)
+ goto err_stop_fw;
- err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
- if (err)
- goto err_stop_fw;
+ err = mlx4_INIT_HCA(dev, &init_hca);
+ if (err) {
+ mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ goto err_free_icm;
+ }
+ } else {
+ err = mlx4_init_slave(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize slave\n");
+ goto unmap_bf;
+ }
- err = mlx4_INIT_HCA(dev, &init_hca);
- if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
- goto err_free_icm;
+ err = mlx4_slave_cap(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to obtain slave caps\n");
+ goto err_close;
+ }
}
+ if (map_bf_area(dev))
+ mlx4_dbg(dev, "Failed to map blue flame area\n");
+
+ /*Only the master set the ports, all the rest got it from it.*/
+ if (!mlx4_is_slave(dev))
+ mlx4_set_port_mask(dev);
+
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
@@ -830,16 +1178,19 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
return 0;
err_close:
- mlx4_CLOSE_HCA(dev, 0);
+ mlx4_close_hca(dev);
err_free_icm:
- mlx4_free_icms(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_free_icms(dev);
err_stop_fw:
+ if (!mlx4_is_slave(dev)) {
+ mlx4_UNMAP_FA(dev);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
+ }
+unmap_bf:
unmap_bf_area(dev);
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
-
return err;
}
@@ -986,55 +1337,56 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
goto err_srq_table_free;
}
- err = mlx4_init_mcg_table(dev);
- if (err) {
- mlx4_err(dev, "Failed to initialize "
- "multicast group table, aborting.\n");
- goto err_qp_table_free;
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_mcg_table(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to initialize "
+ "multicast group table, aborting.\n");
+ goto err_qp_table_free;
+ }
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
- goto err_counters_table_free;
+ goto err_mcg_table_free;
}
- for (port = 1; port <= dev->caps.num_ports; port++) {
- enum mlx4_port_type port_type = 0;
- mlx4_SENSE_PORT(dev, port, &port_type);
- if (port_type)
- dev->caps.port_type[port] = port_type;
- ib_port_default_caps = 0;
- err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
- if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing with "
- "caps = 0\n", port, err);
- dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
-
- err = mlx4_check_ext_port_caps(dev, port);
- if (err)
- mlx4_warn(dev, "failed to get port %d extended "
- "port capabilities support info (%d)."
- " Assuming not supported\n", port, err);
+ if (!mlx4_is_slave(dev)) {
+ for (port = 1; port <= dev->caps.num_ports; port++) {
+ ib_port_default_caps = 0;
+ err = mlx4_get_port_ib_caps(dev, port,
+ &ib_port_default_caps);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d default "
+ "ib capabilities (%d). Continuing "
+ "with caps = 0\n", port, err);
+ dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
+
+ err = mlx4_check_ext_port_caps(dev, port);
+ if (err)
+ mlx4_warn(dev, "failed to get port %d extended "
+ "port capabilities support info (%d)."
+ " Assuming not supported\n",
+ port, err);
- err = mlx4_SET_PORT(dev, port);
- if (err) {
- mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
- goto err_mcg_table_free;
+ err = mlx4_SET_PORT(dev, port);
+ if (err) {
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
+ goto err_counters_table_free;
+ }
}
}
- mlx4_set_port_mask(dev);
return 0;
-err_mcg_table_free:
- mlx4_cleanup_mcg_table(dev);
-
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
+err_mcg_table_free:
+ mlx4_cleanup_mcg_table(dev);
+
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
@@ -1081,8 +1433,16 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
int i;
if (msi_x) {
- nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
- nreq);
+ /* In multifunction mode each function gets 2 msi-X vectors
+ * one for data path completions anf the other for asynch events
+ * or command completions */
+ if (mlx4_is_mfunc(dev)) {
+ nreq = 2;
+ } else {
+ nreq = min_t(int, dev->caps.num_eqs -
+ dev->caps.reserved_eqs, nreq);
+ }
+
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
goto no_msi;
@@ -1138,16 +1498,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
info->dev = dev;
info->port = port;
- mlx4_init_mac_table(dev, &info->mac_table);
- mlx4_init_vlan_table(dev, &info->vlan_table);
- info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
+ if (!mlx4_is_slave(dev)) {
+ INIT_RADIX_TREE(&info->mac_tree, GFP_KERNEL);
+ mlx4_init_mac_table(dev, &info->mac_table);
+ mlx4_init_vlan_table(dev, &info->vlan_table);
+ info->base_qpn =
+ dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
+ }
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
- info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ if (mlx4_is_mfunc(dev))
+ info->port_attr.attr.mode = S_IRUGO;
+ else {
+ info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
+ info->port_attr.store = set_port_type;
+ }
info->port_attr.show = show_port_type;
- info->port_attr.store = set_port_type;
sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->pdev->dev, &info->port_attr);
@@ -1220,6 +1588,46 @@ static void mlx4_clear_steering(struct mlx4_dev *dev)
kfree(priv->steer);
}
+static int extended_func_num(struct pci_dev *pdev)
+{
+ return PCI_SLOT(pdev->devfn) * 8 + PCI_FUNC(pdev->devfn);
+}
+
+#define MLX4_OWNER_BASE 0x8069c
+#define MLX4_OWNER_SIZE 4
+
+static int mlx4_get_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+ u32 ret;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return -ENOMEM;
+ }
+
+ ret = readl(owner);
+ iounmap(owner);
+ return (int) !!ret;
+}
+
+static void mlx4_free_ownership(struct mlx4_dev *dev)
+{
+ void __iomem *owner;
+
+ owner = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_OWNER_BASE,
+ MLX4_OWNER_SIZE);
+ if (!owner) {
+ mlx4_err(dev, "Failed to obtain ownership bit\n");
+ return;
+ }
+ writel(0, owner);
+ msleep(1000);
+ iounmap(owner);
+}
+
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
@@ -1235,13 +1643,20 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
"aborting.\n");
return err;
}
-
+ if (num_vfs > MLX4_MAX_NUM_VF) {
+ printk(KERN_ERR "There are more VF's (%d) than allowed(%d)\n",
+ num_vfs, MLX4_MAX_NUM_VF);
+ return -EINVAL;
+ }
/*
- * Check for BARs. We expect 0: 1MB
+ * Check for BARs.
*/
- if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
- pci_resource_len(pdev, 0) != 1 << 20) {
- dev_err(&pdev->dev, "Missing DCS, aborting.\n");
+ if (((id == NULL) || !(id->driver_data & MLX4_VF)) &&
+ !(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ dev_err(&pdev->dev, "Missing DCS, aborting."
+ "(id == 0X%p, id->driver_data: 0x%lx,"
+ " pci_resource_flags(pdev, 0):0x%lx)\n", id,
+ id ? id->driver_data : 0, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
@@ -1305,42 +1720,132 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
+ /* Detect if this device is a virtual function */
+ if (id && id->driver_data & MLX4_VF) {
+ /* When acting as pf, we normally skip vfs unless explicitly
+ * requested to probe them. */
+ if (num_vfs && extended_func_num(pdev) > probe_vf) {
+ mlx4_warn(dev, "Skipping virtual function:%d\n",
+ extended_func_num(pdev));
+ err = -ENODEV;
+ goto err_free_dev;
+ }
+ mlx4_warn(dev, "Detected virtual function - running in slave mode\n");
+ dev->flags |= MLX4_FLAG_SLAVE;
+ } else {
+ /* We reset the device and enable SRIOV only for physical
+ * devices. Try to claim ownership on the device;
+ * if already taken, skip -- do not allow multiple PFs */
+ err = mlx4_get_ownership(dev);
+ if (err) {
+ if (err < 0)
+ goto err_free_dev;
+ else {
+ mlx4_warn(dev, "Multiple PFs not yet supported."
+ " Skipping PF.\n");
+ err = -EINVAL;
+ goto err_free_dev;
+ }
+ }
- /*
- * Now reset the HCA before we touch the PCI capabilities or
- * attempt a firmware command, since a boot ROM may have left
- * the HCA in an undefined state.
- */
- err = mlx4_reset(dev);
- if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
- goto err_free_dev;
+ if (num_vfs) {
+ mlx4_warn(dev, "Enabling sriov with:%d vfs\n", num_vfs);
+ err = pci_enable_sriov(pdev, num_vfs);
+ if (err) {
+ mlx4_err(dev, "Failed to enable sriov,"
+ "continuing without sriov enabled"
+ " (err = %d).\n", err);
+ num_vfs = 0;
+ err = 0;
+ } else {
+ mlx4_warn(dev, "Running in master mode\n");
+ dev->flags |= MLX4_FLAG_SRIOV |
+ MLX4_FLAG_MASTER;
+ dev->num_vfs = num_vfs;
+ }
+ }
+
+ /*
+ * Now reset the HCA before we touch the PCI capabilities or
+ * attempt a firmware command, since a boot ROM may have left
+ * the HCA in an undefined state.
+ */
+ err = mlx4_reset(dev);
+ if (err) {
+ mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ goto err_rel_own;
+ }
}
+slave_start:
if (mlx4_cmd_init(dev)) {
mlx4_err(dev, "Failed to init command interface, aborting.\n");
- goto err_free_dev;
+ goto err_sriov;
+ }
+
+ /* In slave functions, the communication channel must be initialized
+ * before posting commands. Also, init num_slaves before calling
+ * mlx4_init_hca */
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_is_master(dev))
+ dev->num_slaves = MLX4_MAX_NUM_SLAVES;
+ else {
+ dev->num_slaves = 0;
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init slave mfunc"
+ " interface, aborting.\n");
+ goto err_cmd;
+ }
+ }
}
err = mlx4_init_hca(dev);
- if (err)
- goto err_cmd;
+ if (err) {
+ if (err == -EACCES) {
+ /* Not primary Physical function
+ * Running in slave mode */
+ mlx4_cmd_cleanup(dev);
+ dev->flags |= MLX4_FLAG_SLAVE;
+ dev->flags &= ~MLX4_FLAG_MASTER;
+ goto slave_start;
+ } else
+ goto err_mfunc;
+ }
+
+ /* In master functions, the communication channel must be initialized
+ * after obtaining its address from fw */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_multi_func_init(dev)) {
+ mlx4_err(dev, "Failed to init master mfunc"
+ "interface, aborting.\n");
+ goto err_close;
+ }
+ }
err = mlx4_alloc_eq_table(dev);
if (err)
- goto err_close;
+ goto err_master_mfunc;
priv->msix_ctl.pool_bm = 0;
- spin_lock_init(&priv->msix_ctl.pool_lock);
+ mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
-
- err = mlx4_init_steering(dev);
- if (err)
+ if ((mlx4_is_mfunc(dev)) &&
+ !(dev->flags & MLX4_FLAG_MSI_X)) {
+ mlx4_err(dev, "INTx is not supported in multi-function mode."
+ " aborting.\n");
goto err_free_eq;
+ }
+
+ if (!mlx4_is_slave(dev)) {
+ err = mlx4_init_steering(dev);
+ if (err)
+ goto err_free_eq;
+ }
err = mlx4_setup_hca(dev);
- if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
+ if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X) &&
+ !mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
@@ -1383,20 +1888,37 @@ err_port:
mlx4_cleanup_uar_table(dev);
err_steer:
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
err_free_eq:
mlx4_free_eq_table(dev);
+err_master_mfunc:
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
mlx4_close_hca(dev);
+err_mfunc:
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
+
err_cmd:
mlx4_cmd_cleanup(dev);
+err_sriov:
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV))
+ pci_disable_sriov(pdev);
+
+err_rel_own:
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
+
err_free_dev:
kfree(priv);
@@ -1424,6 +1946,12 @@ static void mlx4_remove_one(struct pci_dev *pdev)
int p;
if (dev) {
+ /* in SRIOV it is not allowed to unload the pf's
+ * driver while there are alive vf's */
+ if (mlx4_is_master(dev)) {
+ if (mlx4_how_many_lives_vf(dev))
+ printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ }
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -1443,17 +1971,31 @@ static void mlx4_remove_one(struct pci_dev *pdev)
mlx4_cleanup_xrcd_table(dev);
mlx4_cleanup_pd_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_free_resource_tracker(dev);
+
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
- mlx4_clear_steering(dev);
+ if (!mlx4_is_slave(dev))
+ mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
+ if (mlx4_is_master(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_close_hca(dev);
+ if (mlx4_is_slave(dev))
+ mlx4_multi_func_cleanup(dev);
mlx4_cmd_cleanup(dev);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
+ if (num_vfs && (dev->flags & MLX4_FLAG_SRIOV)) {
+ mlx4_warn(dev, "Disabling sriov\n");
+ pci_disable_sriov(pdev);
+ }
+ if (!mlx4_is_slave(dev))
+ mlx4_free_ownership(dev);
kfree(priv);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -1468,33 +2010,48 @@ int mlx4_restart_one(struct pci_dev *pdev)
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
- { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
- { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
- { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
- { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
- { PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
- { PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
- { PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
- { PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
- { PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
- { PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
- { PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
- { PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
- { PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
- { PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
- { PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
- { PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
- { PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
- { PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
- { PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
- { PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
- { PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
+ /* MT25408 "Hermon" SDR */
+ { PCI_VDEVICE(MELLANOX, 0x6340), 0 },
+ /* MT25408 "Hermon" DDR */
+ { PCI_VDEVICE(MELLANOX, 0x634a), 0 },
+ /* MT25408 "Hermon" QDR */
+ { PCI_VDEVICE(MELLANOX, 0x6354), 0 },
+ /* MT25408 "Hermon" DDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6732), 0 },
+ /* MT25408 "Hermon" QDR PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x673c), 0 },
+ /* MT25408 "Hermon" EN 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6368), 0 },
+ /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x6750), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x6372), 0 },
+ /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
+ { PCI_VDEVICE(MELLANOX, 0x675a), 0 },
+ /* MT26468 ConnectX EN 10GigE PCIe gen2*/
+ { PCI_VDEVICE(MELLANOX, 0x6764), 0 },
+ /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
+ { PCI_VDEVICE(MELLANOX, 0x6746), 0 },
+ /* MT26478 ConnectX2 40GigE PCIe gen2 */
+ { PCI_VDEVICE(MELLANOX, 0x676e), 0 },
+ /* MT25400 Family [ConnectX-2 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1002), MLX4_VF },
+ /* MT27500 Family [ConnectX-3] */
+ { PCI_VDEVICE(MELLANOX, 0x1003), 0 },
+ /* MT27500 Family [ConnectX-3 Virtual Function] */
+ { PCI_VDEVICE(MELLANOX, 0x1004), MLX4_VF },
+ { PCI_VDEVICE(MELLANOX, 0x1005), 0 }, /* MT27510 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1006), 0 }, /* MT27511 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1007), 0 }, /* MT27520 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1008), 0 }, /* MT27521 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1009), 0 }, /* MT27530 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100a), 0 }, /* MT27531 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100b), 0 }, /* MT27540 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100c), 0 }, /* MT27541 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100d), 0 }, /* MT27550 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100e), 0 }, /* MT27551 Family */
+ { PCI_VDEVICE(MELLANOX, 0x100f), 0 }, /* MT27560 Family */
+ { PCI_VDEVICE(MELLANOX, 0x1010), 0 }, /* MT27561 Family */
{ 0, }
};
@@ -1523,6 +2080,12 @@ static int __init mlx4_verify_params(void)
return -1;
}
+ /* Check if module param for ports type has legal combination */
+ if (port_type_array[0] == false && port_type_array[1] == true) {
+ printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ port_type_array[0] = true;
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 978688c..ca574d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -44,28 +44,47 @@
static const u8 zero_gid[16]; /* automatically initialized to 0 */
+struct mlx4_mgm {
+ __be32 next_gid_index;
+ __be32 members_count;
+ u32 reserved[2];
+ u8 gid[16];
+ __be32 qp[MLX4_MAX_QP_PER_MGM];
+};
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
+{
+ return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
+}
+
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
+{
+ return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
+}
+
static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
struct mlx4_cmd_mailbox *mailbox)
{
return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
}
-static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
+static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
struct mlx4_cmd_mailbox *mailbox)
{
u32 in_mod;
- in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
+ in_mod = (u32) port << 16 | steer << 1;
return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
- MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
}
static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -75,7 +94,8 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int err;
err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
- MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
if (!err)
*hash = imm;
@@ -102,7 +122,7 @@ static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
* Add new entry to steering data structure.
* All promisc QPs should be added as well
*/
-static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int new_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -115,10 +135,8 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_promisc_qp *dqp = NULL;
u32 prot;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
@@ -130,7 +148,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* If the given qpn is also a promisc qp,
* it should be inserted to duplicates list
*/
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (pqp) {
dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
if (!dqp) {
@@ -165,7 +183,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* don't add already existing qpn */
if (pqp->qpn == qpn)
continue;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* out of space */
err = -ENOMEM;
goto out_mailbox;
@@ -193,7 +211,7 @@ out_alloc:
}
/* update the data structures with existing steering entry */
-static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
@@ -201,12 +219,10 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *pqp;
struct mlx4_promisc_qp *dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (!pqp)
return 0; /* nothing to do */
@@ -225,7 +241,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
* we need to add it as a duplicate to this entry
* for future references */
list_for_each_entry(dqp, &entry->duplicates, list) {
- if (qpn == dqp->qpn)
+ if (qpn == pqp->qpn)
return 0; /* qp is already duplicated */
}
@@ -241,20 +257,18 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
/* Check whether a qpn is a duplicate on steering entry
* If so, it should not be removed from mgm */
-static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 qpn)
{
struct mlx4_steer *s_steer;
struct mlx4_steer_index *tmp_entry, *entry = NULL;
struct mlx4_promisc_qp *dqp, *tmp_dqp;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
/* if qp is not promisc, it cannot be duplicated */
- if (!get_promisc_qp(dev, pf_num, steer, qpn))
+ if (!get_promisc_qp(dev, 0, steer, qpn))
return false;
/* The qp is promisc qp so it is a duplicate on this index
@@ -279,7 +293,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer,
unsigned int index, u32 tqpn)
{
@@ -291,10 +305,8 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
u32 members_count;
bool ret = false;
int i;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -306,7 +318,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
for (i = 0; i < members_count; i++) {
qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
- if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
+ if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
/* the qp is not promisc, the entry can't be removed */
goto out;
}
@@ -332,7 +344,7 @@ out:
return ret;
}
-static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_steer *s_steer;
@@ -347,14 +359,13 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool found;
int last_index;
int err;
- u8 pf_num;
struct mlx4_priv *priv = mlx4_priv(dev);
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
- if (get_promisc_qp(dev, pf_num, steer, qpn)) {
+ if (get_promisc_qp(dev, 0, steer, qpn)) {
err = 0; /* Noting to do, already exists */
goto out_mutex;
}
@@ -397,7 +408,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
}
if (!found) {
/* Need to add the qpn to mgm */
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
/* entry is full */
err = -ENOMEM;
goto out_mailbox;
@@ -420,7 +431,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_list;
@@ -439,7 +450,7 @@ out_mutex:
return err;
}
-static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
+static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
enum mlx4_steer_type steer, u32 qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -454,13 +465,11 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
bool back_to_list = false;
int loc, i;
int err;
- u8 pf_num;
- pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
- s_steer = &mlx4_priv(dev)->steer[pf_num];
+ s_steer = &mlx4_priv(dev)->steer[port - 1];
mutex_lock(&priv->mcg_table.mutex);
- pqp = get_promisc_qp(dev, pf_num, steer, qpn);
+ pqp = get_promisc_qp(dev, 0, steer, qpn);
if (unlikely(!pqp)) {
mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
/* nothing to do */
@@ -479,12 +488,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
goto out_list;
}
mgm = mailbox->buf;
+ memset(mgm, 0, sizeof *mgm);
members_count = 0;
list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
- err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
+ err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
if (err)
goto out_mailbox;
@@ -649,12 +659,13 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
}
index += dev->caps.num_mgms;
+ new_entry = 1;
memset(mgm, 0, sizeof *mgm);
memcpy(mgm->gid, gid, 16);
}
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
- if (members_count == MLX4_QP_PER_MGM) {
+ if (members_count == dev->caps.num_qp_per_mgm) {
mlx4_err(dev, "MGM at index %x is full.\n", index);
err = -ENOMEM;
goto out;
@@ -696,9 +707,9 @@ out:
if (prot == MLX4_PROT_ETH) {
/* manage the steering entry for promisc mode */
if (new_entry)
- new_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ new_steering_entry(dev, port, steer, index, qp->qpn);
else
- existing_steering_entry(dev, 0, port, steer,
+ existing_steering_entry(dev, port, steer,
index, qp->qpn);
}
if (err && link && index != -1) {
@@ -749,7 +760,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
/* if this pq is also a promisc qp, it shouldn't be removed */
if (prot == MLX4_PROT_ETH &&
- check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
+ check_duplicate_entry(dev, port, steer, index, qp->qpn))
goto out;
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
@@ -769,7 +780,8 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
mgm->qp[i - 1] = 0;
if (prot == MLX4_PROT_ETH)
- removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
+ removed_entry = can_remove_steering_entry(dev, port, steer,
+ index, qp->qpn);
if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
err = mlx4_WRITE_ENTRY(dev, index, mailbox);
goto out;
@@ -828,6 +840,34 @@ out:
return err;
}
+static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], u8 attach, u8 block_loopback,
+ enum mlx4_protocol prot)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err = 0;
+ int qpn;
+
+ if (!mlx4_is_mfunc(dev))
+ return -EBADF;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, gid, 16);
+ qpn = qp->qpn;
+ qpn |= (prot << 28);
+ if (attach && block_loopback)
+ qpn |= (1 << 31);
+
+ err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
+ MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot)
@@ -843,9 +883,12 @@ int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- return mlx4_qp_attach_common(dev, qp, gid,
- block_mcast_loopback, prot,
- steer);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
@@ -860,22 +903,90 @@ int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
- if (prot == MLX4_PROT_ETH) {
+ if (prot == MLX4_PROT_ETH)
gid[7] |= (steer << 1);
- }
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
+int mlx4_unicast_attach(struct mlx4_dev *dev,
+ struct mlx4_qp *qp, u8 gid[16],
+ int block_mcast_loopback, enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 1,
+ block_mcast_loopback, prot);
+
+ return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
+ prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
+
+int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
+ u8 gid[16], enum mlx4_protocol prot)
+{
+ if (prot == MLX4_PROT_ETH &&
+ !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
+ return 0;
+
+ if (prot == MLX4_PROT_ETH)
+ gid[7] |= (MLX4_UC_STEER << 1);
+
+ if (mlx4_is_mfunc(dev))
+ return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
+
+ return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
+}
+EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ u32 qpn = (u32) vhcr->in_param & 0xffffffff;
+ u8 port = vhcr->in_param >> 62;
+ enum mlx4_steer_type steer = vhcr->in_modifier;
+
+ /* Promiscuous unicast is not allowed in mfunc */
+ if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
+ return 0;
+
+ if (vhcr->op_modifier)
+ return add_promisc_qp(dev, port, steer, qpn);
+ else
+ return remove_promisc_qp(dev, port, steer, qpn);
+}
+
+static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
+ enum mlx4_steer_type steer, u8 add, u8 port)
+{
+ return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
+ MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+}
int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
@@ -884,27 +995,34 @@ int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
- return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
+ return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
- return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
{
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
return 0;
- return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
+ if (mlx4_is_mfunc(dev))
+ return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
+
+ return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
}
EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 5dfa68f..28f8251 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -46,21 +46,25 @@
#include <linux/mlx4/device.h>
#include <linux/mlx4/driver.h>
#include <linux/mlx4/doorbell.h>
+#include <linux/mlx4/cmd.h>
#define DRV_NAME "mlx4_core"
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "July 14, 2011"
+#define PFX DRV_NAME ": "
+#define DRV_VERSION "1.1"
+#define DRV_RELDATE "Dec, 2011"
enum {
MLX4_HCR_BASE = 0x80680,
MLX4_HCR_SIZE = 0x0001c,
- MLX4_CLR_INT_SIZE = 0x00008
+ MLX4_CLR_INT_SIZE = 0x00008,
+ MLX4_SLAVE_COMM_BASE = 0x0,
+ MLX4_COMM_PAGESIZE = 0x1000
};
enum {
- MLX4_MGM_ENTRY_SIZE = 0x100,
- MLX4_QP_PER_MGM = 4 * (MLX4_MGM_ENTRY_SIZE / 16 - 2),
- MLX4_MTT_ENTRY_PER_SEG = 8
+ MLX4_MAX_MGM_ENTRY_SIZE = 0x1000,
+ MLX4_MAX_QP_PER_MGM = 4 * (MLX4_MAX_MGM_ENTRY_SIZE / 16 - 2),
+ MLX4_MTT_ENTRY_PER_SEG = 8,
};
enum {
@@ -80,6 +84,94 @@ enum {
MLX4_NUM_CMPTS = MLX4_CMPT_NUM_TYPE << MLX4_CMPT_SHIFT
};
+enum mlx4_mr_state {
+ MLX4_MR_DISABLED = 0,
+ MLX4_MR_EN_HW,
+ MLX4_MR_EN_SW
+};
+
+#define MLX4_COMM_TIME 10000
+enum {
+ MLX4_COMM_CMD_RESET,
+ MLX4_COMM_CMD_VHCR0,
+ MLX4_COMM_CMD_VHCR1,
+ MLX4_COMM_CMD_VHCR2,
+ MLX4_COMM_CMD_VHCR_EN,
+ MLX4_COMM_CMD_VHCR_POST,
+ MLX4_COMM_CMD_FLR = 254
+};
+
+/*The flag indicates that the slave should delay the RESET cmd*/
+#define MLX4_DELAY_RESET_SLAVE 0xbbbbbbb
+/*indicates how many retries will be done if we are in the middle of FLR*/
+#define NUM_OF_RESET_RETRIES 10
+#define SLEEP_TIME_IN_RESET (2 * 1000)
+enum mlx4_resource {
+ RES_QP,
+ RES_CQ,
+ RES_SRQ,
+ RES_XRCD,
+ RES_MPT,
+ RES_MTT,
+ RES_MAC,
+ RES_VLAN,
+ RES_EQ,
+ RES_COUNTER,
+ MLX4_NUM_OF_RESOURCE_TYPE
+};
+
+enum mlx4_alloc_mode {
+ RES_OP_RESERVE,
+ RES_OP_RESERVE_AND_MAP,
+ RES_OP_MAP_ICM,
+};
+
+
+/*
+ *Virtual HCR structures.
+ * mlx4_vhcr is the sw representation, in machine endianess
+ *
+ * mlx4_vhcr_cmd is the formalized structure, the one that is passed
+ * to FW to go through communication channel.
+ * It is big endian, and has the same structure as the physical HCR
+ * used by command interface
+ */
+struct mlx4_vhcr {
+ u64 in_param;
+ u64 out_param;
+ u32 in_modifier;
+ u32 errno;
+ u16 op;
+ u16 token;
+ u8 op_modifier;
+ u8 e_bit;
+};
+
+struct mlx4_vhcr_cmd {
+ __be64 in_param;
+ __be32 in_modifier;
+ __be64 out_param;
+ __be16 token;
+ u16 reserved;
+ u8 status;
+ u8 flags;
+ __be16 opcode;
+};
+
+struct mlx4_cmd_info {
+ u16 opcode;
+ bool has_inbox;
+ bool has_outbox;
+ bool out_is_imm;
+ bool encode_slave_id;
+ int (*verify)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox);
+ int (*wrapper)(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+};
+
#ifdef CONFIG_MLX4_DEBUG
extern int mlx4_debug_level;
#else /* CONFIG_MLX4_DEBUG */
@@ -99,6 +191,12 @@ do { \
#define mlx4_warn(mdev, format, arg...) \
dev_warn(&mdev->pdev->dev, format, ##arg)
+extern int mlx4_log_num_mgm_entry_size;
+extern int log_mtts_per_seg;
+
+#define MLX4_MAX_NUM_SLAVES (MLX4_MAX_NUM_PF + MLX4_MAX_NUM_VF)
+#define ALL_SLAVES 0xff
+
struct mlx4_bitmap {
u32 last;
u32 top;
@@ -130,6 +228,147 @@ struct mlx4_icm_table {
struct mlx4_icm **icm;
};
+/*
+ * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_mpt_entry {
+ __be32 flags;
+ __be32 qpn;
+ __be32 key;
+ __be32 pd_flags;
+ __be64 start;
+ __be64 length;
+ __be32 lkey;
+ __be32 win_cnt;
+ u8 reserved1[3];
+ u8 mtt_rep;
+ __be64 mtt_addr;
+ __be32 mtt_sz;
+ __be32 entity_size;
+ __be32 first_byte_offset;
+} __packed;
+
+/*
+ * Must be packed because start is 64 bits but only aligned to 32 bits.
+ */
+struct mlx4_eq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ u8 log_eq_size;
+ u8 reserved2[4];
+ u8 eq_period;
+ u8 reserved3;
+ u8 eq_max_count;
+ u8 reserved4[3];
+ u8 intr;
+ u8 log_page_size;
+ u8 reserved5[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ u32 reserved6[2];
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved7[4];
+};
+
+struct mlx4_cq_context {
+ __be32 flags;
+ u16 reserved1[3];
+ __be16 page_offset;
+ __be32 logsize_usrpage;
+ __be16 cq_period;
+ __be16 cq_max_count;
+ u8 reserved2[3];
+ u8 comp_eqn;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 last_notified_index;
+ __be32 solicit_producer_index;
+ __be32 consumer_index;
+ __be32 producer_index;
+ u32 reserved4[2];
+ __be64 db_rec_addr;
+};
+
+struct mlx4_srq_context {
+ __be32 state_logsize_srqn;
+ u8 logstride;
+ u8 reserved1;
+ __be16 xrcd;
+ __be32 pg_offset_cqn;
+ u32 reserved2;
+ u8 log_page_size;
+ u8 reserved3[2];
+ u8 mtt_base_addr_h;
+ __be32 mtt_base_addr_l;
+ __be32 pd;
+ __be16 limit_watermark;
+ __be16 wqe_cnt;
+ u16 reserved4;
+ __be16 wqe_counter;
+ u32 reserved5;
+ __be64 db_rec_addr;
+};
+
+struct mlx4_eqe {
+ u8 reserved1;
+ u8 type;
+ u8 reserved2;
+ u8 subtype;
+ union {
+ u32 raw[6];
+ struct {
+ __be32 cqn;
+ } __packed comp;
+ struct {
+ u16 reserved1;
+ __be16 token;
+ u32 reserved2;
+ u8 reserved3[3];
+ u8 status;
+ __be64 out_param;
+ } __packed cmd;
+ struct {
+ __be32 qpn;
+ } __packed qp;
+ struct {
+ __be32 srqn;
+ } __packed srq;
+ struct {
+ __be32 cqn;
+ u32 reserved1;
+ u8 reserved2[3];
+ u8 syndrome;
+ } __packed cq_err;
+ struct {
+ u32 reserved1[2];
+ __be32 port;
+ } __packed port_change;
+ struct {
+ #define COMM_CHANNEL_BIT_ARRAY_SIZE 4
+ u32 reserved;
+ u32 bit_vec[COMM_CHANNEL_BIT_ARRAY_SIZE];
+ } __packed comm_channel_arm;
+ struct {
+ u8 port;
+ u8 reserved[3];
+ __be64 mac;
+ } __packed mac_update;
+ struct {
+ u8 port;
+ } __packed sw_event;
+ struct {
+ __be32 slave_id;
+ } __packed flr_event;
+ } event;
+ u8 slave_id;
+ u8 reserved3[2];
+ u8 owner;
+} __packed;
+
struct mlx4_eq {
struct mlx4_dev *dev;
void __iomem *doorbell;
@@ -142,6 +381,17 @@ struct mlx4_eq {
struct mlx4_mtt mtt;
};
+struct mlx4_slave_eqe {
+ u8 type;
+ u8 port;
+ u32 param;
+};
+
+struct mlx4_slave_event_eq_info {
+ int eqn;
+ u16 token;
+};
+
struct mlx4_profile {
int num_qp;
int rdmarc_per_qp;
@@ -155,16 +405,37 @@ struct mlx4_profile {
struct mlx4_fw {
u64 clr_int_base;
u64 catas_offset;
+ u64 comm_base;
struct mlx4_icm *fw_icm;
struct mlx4_icm *aux_icm;
u32 catas_size;
u16 fw_pages;
u8 clr_int_bar;
u8 catas_bar;
+ u8 comm_bar;
};
-#define MGM_QPN_MASK 0x00FFFFFF
-#define MGM_BLCK_LB_BIT 30
+struct mlx4_comm {
+ u32 slave_write;
+ u32 slave_read;
+};
+
+enum {
+ MLX4_MCAST_CONFIG = 0,
+ MLX4_MCAST_DISABLE = 1,
+ MLX4_MCAST_ENABLE = 2,
+};
+
+#define VLAN_FLTR_SIZE 128
+
+struct mlx4_vlan_fltr {
+ __be32 entry[VLAN_FLTR_SIZE];
+};
+
+struct mlx4_mcast_entry {
+ struct list_head list;
+ u64 addr;
+};
struct mlx4_promisc_qp {
struct list_head list;
@@ -177,19 +448,90 @@ struct mlx4_steer_index {
struct list_head duplicates;
};
-struct mlx4_mgm {
- __be32 next_gid_index;
- __be32 members_count;
- u32 reserved[2];
- u8 gid[16];
- __be32 qp[MLX4_QP_PER_MGM];
+#define MLX4_EVENT_TYPES_NUM 64
+
+struct mlx4_slave_state {
+ u8 comm_toggle;
+ u8 last_cmd;
+ u8 init_port_mask;
+ bool active;
+ u8 function;
+ dma_addr_t vhcr_dma;
+ u16 mtu[MLX4_MAX_PORTS + 1];
+ __be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
+ struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
+ struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
+ struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
+ /* event type to eq number lookup */
+ struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM];
+ u16 eq_pi;
+ u16 eq_ci;
+ spinlock_t lock;
+ /*initialized via the kzalloc*/
+ u8 is_slave_going_down;
+ u32 cookie;
+};
+
+struct slave_list {
+ struct mutex mutex;
+ struct list_head res_list[MLX4_NUM_OF_RESOURCE_TYPE];
+};
+
+struct mlx4_resource_tracker {
+ spinlock_t lock;
+ /* tree for each resources */
+ struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE];
+ /* num_of_slave's lists, one per slave */
+ struct slave_list *slave_list;
+};
+
+#define SLAVE_EVENT_EQ_SIZE 128
+struct mlx4_slave_event_eq {
+ u32 eqn;
+ u32 cons;
+ u32 prod;
+ struct mlx4_eqe event_eqe[SLAVE_EVENT_EQ_SIZE];
+};
+
+struct mlx4_master_qp0_state {
+ int proxy_qp0_active;
+ int qp0_active;
+ int port_active;
+};
+
+struct mlx4_mfunc_master_ctx {
+ struct mlx4_slave_state *slave_state;
+ struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
+ int init_port_ref[MLX4_MAX_PORTS + 1];
+ u16 max_mtu[MLX4_MAX_PORTS + 1];
+ int disable_mcast_ref[MLX4_MAX_PORTS + 1];
+ struct mlx4_resource_tracker res_tracker;
+ struct workqueue_struct *comm_wq;
+ struct work_struct comm_work;
+ struct work_struct slave_event_work;
+ struct work_struct slave_flr_event_work;
+ spinlock_t slave_state_lock;
+ __be32 comm_arm_bit_vector[4];
+ struct mlx4_eqe cmd_eqe;
+ struct mlx4_slave_event_eq slave_eq;
+ struct mutex gen_eqe_mutex[MLX4_MFUNC_MAX];
+};
+
+struct mlx4_mfunc {
+ struct mlx4_comm __iomem *comm;
+ struct mlx4_vhcr_cmd *vhcr;
+ dma_addr_t vhcr_dma;
+
+ struct mlx4_mfunc_master_ctx master;
};
+
struct mlx4_cmd {
struct pci_pool *pool;
void __iomem *hcr;
struct mutex hcr_mutex;
struct semaphore poll_sem;
struct semaphore event_sem;
+ struct semaphore slave_sem;
int max_cmds;
spinlock_t context_lock;
int free_head;
@@ -197,6 +539,7 @@ struct mlx4_cmd {
u16 token_mask;
u8 use_events;
u8 toggle;
+ u8 comm_toggle;
};
struct mlx4_uar_table {
@@ -287,6 +630,48 @@ struct mlx4_vlan_table {
int max;
};
+#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_PROMISC_SHIFT 31
+#define SET_PORT_MC_PROMISC_SHIFT 30
+
+enum {
+ MCAST_DIRECT_ONLY = 0,
+ MCAST_DIRECT = 1,
+ MCAST_DEFAULT = 2
+};
+
+
+struct mlx4_set_port_general_context {
+ u8 reserved[3];
+ u8 flags;
+ u16 reserved2;
+ __be16 mtu;
+ u8 pptx;
+ u8 pfctx;
+ u16 reserved3;
+ u8 pprx;
+ u8 pfcrx;
+ u16 reserved4;
+};
+
+struct mlx4_set_port_rqp_calc_context {
+ __be32 base_qpn;
+ u8 rererved;
+ u8 n_mac;
+ u8 n_vlan;
+ u8 n_prio;
+ u8 reserved2[3];
+ u8 mac_miss;
+ u8 intra_no_vlan;
+ u8 no_vlan;
+ u8 intra_vlan_miss;
+ u8 vlan_miss;
+ u8 reserved3[3];
+ u8 no_vlan_prio;
+ __be32 promisc;
+ __be32 mcast;
+};
+
struct mlx4_mac_entry {
u64 mac;
};
@@ -312,7 +697,7 @@ struct mlx4_sense {
struct mlx4_msix_ctl {
u64 pool_bm;
- spinlock_t pool_lock;
+ struct mutex pool_lock;
};
struct mlx4_steer {
@@ -333,6 +718,7 @@ struct mlx4_priv {
struct mlx4_fw fw;
struct mlx4_cmd cmd;
+ struct mlx4_mfunc mfunc;
struct mlx4_bitmap pd_bitmap;
struct mlx4_bitmap xrcd_bitmap;
@@ -359,6 +745,7 @@ struct mlx4_priv {
struct list_head bf_list;
struct mutex bf_mutex;
struct io_mapping *bf_mapping;
+ int reserved_mtts;
};
static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev)
@@ -403,6 +790,62 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
+int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
+void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn);
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
+int __mlx4_mr_reserve(struct mlx4_dev *dev);
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index);
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index);
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index);
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SYNC_TPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_DMA_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base);
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
+int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list);
void mlx4_start_catas_poll(struct mlx4_dev *dev);
void mlx4_stop_catas_poll(struct mlx4_dev *dev);
@@ -419,13 +862,113 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
struct mlx4_profile *request,
struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca);
+void mlx4_master_comm_channel(struct work_struct *work);
+void mlx4_gen_slave_eqe(struct work_struct *work);
+void mlx4_master_handle_slave_flr(struct work_struct *work);
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_COMM_INT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe);
int mlx4_cmd_init(struct mlx4_dev *dev);
void mlx4_cmd_cleanup(struct mlx4_dev *dev);
+int mlx4_multi_func_init(struct mlx4_dev *dev);
+void mlx4_multi_func_cleanup(struct mlx4_dev *dev);
void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
int mlx4_cmd_use_events(struct mlx4_dev *dev);
void mlx4_cmd_use_polling(struct mlx4_dev *dev);
+int mlx4_comm_cmd(struct mlx4_dev *dev, u8 cmd, u16 param,
+ unsigned long timeout);
+
void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn);
void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type);
@@ -452,12 +995,113 @@ void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table);
void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table);
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port);
+/* resource tracker functions*/
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource resource_type,
+ int resource_id, int *slave);
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id);
+int mlx4_init_resource_tracker(struct mlx4_dev *dev);
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev);
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_INIT_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps);
int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port);
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
enum mlx4_protocol prot, enum mlx4_steer_type steer);
int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
int block_mcast_loopback, enum mlx4_protocol prot,
enum mlx4_steer_type steer);
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_common_set_vlan_fltr(struct mlx4_dev *dev, int function,
+ int port, void *buf);
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave, u32 in_mod,
+ struct mlx4_cmd_mailbox *outbox);
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_PKEY_TABLE_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd);
+
+int mlx4_get_mgm_entry_size(struct mlx4_dev *dev);
+int mlx4_get_qp_per_mgm(struct mlx4_dev *dev);
+
+static inline void set_param_l(u64 *arg, u32 val)
+{
+ *((u32 *)arg) = val;
+}
+
+static inline void set_param_h(u64 *arg, u32 val)
+{
+ *arg = (*arg & 0xffffffff) | ((u64) val << 32);
+}
+
+static inline u32 get_param_l(u64 *arg)
+{
+ return (u32) (*arg & 0xffffffff);
+}
+
+static inline u32 get_param_h(u64 *arg)
+{
+ return (u32)(*arg >> 32);
+}
+
+static inline spinlock_t *mlx4_tlock(struct mlx4_dev *dev)
+{
+ return &mlx4_priv(dev)->mfunc.master.res_tracker.lock;
+}
+
+#define NOT_MASKED_PD_BITS 17
+
#endif /* MLX4_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 207b5ad..d60335f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -51,8 +51,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "1.5.4.2"
-#define DRV_RELDATE "October 2011"
+#define DRV_VERSION "2.0"
+#define DRV_RELDATE "Dec 2011"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -325,11 +325,11 @@ struct mlx4_en_port_profile {
u8 rx_ppp;
u8 tx_pause;
u8 tx_ppp;
+ int rss_rings;
};
struct mlx4_en_profile {
int rss_xor;
- int tcp_rss;
int udp_rss;
u8 rss_mask;
u32 active_ports;
@@ -366,16 +366,6 @@ struct mlx4_en_rss_map {
enum mlx4_qp_state indir_state;
};
-struct mlx4_en_rss_context {
- __be32 base_qpn;
- __be32 default_qpn;
- u16 reserved;
- u8 hash_fn;
- u8 flags;
- __be32 rss_key[10];
- __be32 base_qpn_udp;
-};
-
struct mlx4_en_port_state {
int link_state;
int link_speed;
@@ -463,6 +453,7 @@ struct mlx4_en_priv {
int base_qpn;
struct mlx4_en_rss_map rss_map;
+ u32 ctrl_flags;
u32 flags;
#define MLX4_EN_FLAG_PROMISC 0x1
#define MLX4_EN_FLAG_MC_PROMISC 0x2
@@ -485,6 +476,7 @@ struct mlx4_en_priv {
struct mlx4_en_perf_stats pstats;
struct mlx4_en_pkt_stats pkstats;
struct mlx4_en_port_stats port_stats;
+ u64 stats_bitmap;
char *mc_addrs;
int mc_addrs_cnt;
struct mlx4_en_stat_out_mbox hw_stats;
@@ -495,9 +487,9 @@ struct mlx4_en_priv {
enum mlx4_en_wol {
MLX4_EN_WOL_MAGIC = (1ULL << 61),
MLX4_EN_WOL_ENABLED = (1ULL << 62),
- MLX4_EN_WOL_DO_MODIFY = (1ULL << 63),
};
+#define MLX4_EN_WOL_DO_MODIFY (1ULL << 63)
void mlx4_en_destroy_netdev(struct net_device *dev);
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -536,7 +528,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring,
u32 size, u16 stride);
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
- struct mlx4_en_rx_ring *ring);
+ struct mlx4_en_rx_ring *ring,
+ u32 size, u16 stride);
int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv);
void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring *ring);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index efa3e77..25a80d7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -32,35 +32,17 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/slab.h>
+#include <linux/kernel.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
-/*
- * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
- */
-struct mlx4_mpt_entry {
- __be32 flags;
- __be32 qpn;
- __be32 key;
- __be32 pd_flags;
- __be64 start;
- __be64 length;
- __be32 lkey;
- __be32 win_cnt;
- u8 reserved1[3];
- u8 mtt_rep;
- __be64 mtt_seg;
- __be32 mtt_sz;
- __be32 entity_size;
- __be32 first_byte_offset;
-} __packed;
-
#define MLX4_MPT_FLAG_SW_OWNS (0xfUL << 28)
#define MLX4_MPT_FLAG_FREE (0x3UL << 28)
#define MLX4_MPT_FLAG_MIO (1 << 17)
@@ -180,22 +162,48 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy)
kfree(buddy->num_free);
}
-static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
u32 seg;
+ int seg_order;
+ u32 offset;
- seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, order);
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+
+ seg = mlx4_buddy_alloc(&mr_table->mtt_buddy, seg_order);
if (seg == -1)
return -1;
- if (mlx4_table_get_range(dev, &mr_table->mtt_table, seg,
- seg + (1 << order) - 1)) {
- mlx4_buddy_free(&mr_table->mtt_buddy, seg, order);
+ offset = seg * (1 << log_mtts_per_seg);
+
+ if (mlx4_table_get_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1)) {
+ mlx4_buddy_free(&mr_table->mtt_buddy, seg, seg_order);
return -1;
}
- return seg;
+ return offset;
+}
+
+static u32 mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, order);
+ err = mlx4_cmd_imm(dev, in_param, &out_param, RES_MTT,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_alloc_mtt_range(dev, order);
}
int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
@@ -210,33 +218,63 @@ int mlx4_mtt_init(struct mlx4_dev *dev, int npages, int page_shift,
} else
mtt->page_shift = page_shift;
- for (mtt->order = 0, i = dev->caps.mtts_per_seg; i < npages; i <<= 1)
+ for (mtt->order = 0, i = 1; i < npages; i <<= 1)
++mtt->order;
- mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order);
- if (mtt->first_seg == -1)
+ mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order);
+ if (mtt->offset == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_init);
-void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
{
+ u32 first_seg;
+ int seg_order;
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ seg_order = max_t(int, order - log_mtts_per_seg, 0);
+ first_seg = offset / (1 << log_mtts_per_seg);
+
+ mlx4_buddy_free(&mr_table->mtt_buddy, first_seg, seg_order);
+ mlx4_table_put_range(dev, &mr_table->mtt_table, offset,
+ offset + (1 << order) - 1);
+}
+
+static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, offset);
+ set_param_h(&in_param, order);
+ err = mlx4_cmd(dev, in_param, RES_MTT, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ mlx4_warn(dev, "Failed to free mtt range at:"
+ "%d order:%d\n", offset, order);
+ return;
+ }
+ __mlx4_free_mtt_range(dev, offset, order);
+}
+
+void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
+{
if (mtt->order < 0)
return;
- mlx4_buddy_free(&mr_table->mtt_buddy, mtt->first_seg, mtt->order);
- mlx4_table_put_range(dev, &mr_table->mtt_table, mtt->first_seg,
- mtt->first_seg + (1 << mtt->order) - 1);
+ mlx4_free_mtt_range(dev, mtt->offset, mtt->order);
}
EXPORT_SYMBOL_GPL(mlx4_mtt_cleanup);
u64 mlx4_mtt_addr(struct mlx4_dev *dev, struct mlx4_mtt *mtt)
{
- return (u64) mtt->first_seg * dev->caps.mtt_entry_sz;
+ return (u64) mtt->offset * dev->caps.mtt_entry_sz;
}
EXPORT_SYMBOL_GPL(mlx4_mtt_addr);
@@ -253,69 +291,205 @@ static u32 key_to_hw_index(u32 key)
static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
- return mlx4_cmd(dev, mailbox->dma, mpt_index, 0, MLX4_CMD_SW2HW_MPT,
- MLX4_CMD_TIME_CLASS_B);
+ return mlx4_cmd(dev, mailbox->dma, mpt_index,
+ 0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int mpt_index)
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index,
- !mailbox, MLX4_CMD_HW2SW_MPT, MLX4_CMD_TIME_CLASS_B);
+ !mailbox, MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
-int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
- int npages, int page_shift, struct mlx4_mr *mr)
+int mlx4_mr_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ u32 *base_mridx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u32 index;
- int err;
+ u32 mridx;
- index = mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
- if (index == -1)
+ mridx = mlx4_bitmap_alloc_range(&priv->mr_table.mpt_bitmap, cnt, align);
+ if (mridx == -1)
return -ENOMEM;
+ *base_mridx = mridx;
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_reserve_range);
+
+void mlx4_mr_release_range(struct mlx4_dev *dev, u32 base_mridx, int cnt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ mlx4_bitmap_free_range(&priv->mr_table.mpt_bitmap, base_mridx, cnt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_release_range);
+
+int mlx4_mr_alloc_reserved(struct mlx4_dev *dev, u32 mridx, u32 pd,
+ u64 iova, u64 size, u32 access, int npages,
+ int page_shift, struct mlx4_mr *mr)
+{
mr->iova = iova;
mr->size = size;
mr->pd = pd;
mr->access = access;
- mr->enabled = 0;
- mr->key = hw_index_to_key(index);
+ mr->enabled = MLX4_MR_DISABLED;
+ mr->key = hw_index_to_key(mridx);
+
+ return mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_alloc_reserved);
+
+static int mlx4_WRITE_MTT(struct mlx4_dev *dev,
+ struct mlx4_cmd_mailbox *mailbox,
+ int num_entries)
+{
+ return mlx4_cmd(dev, mailbox->dma, num_entries, 0, MLX4_CMD_WRITE_MTT,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+}
- err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
+int __mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return mlx4_bitmap_alloc(&priv->mr_table.mpt_bitmap);
+}
+
+static int mlx4_mr_reserve(struct mlx4_dev *dev)
+{
+ u64 out_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ if (mlx4_cmd_imm(dev, 0, &out_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ return -1;
+ return get_param_l(&out_param);
+ }
+ return __mlx4_mr_reserve(dev);
+}
+
+void __mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+}
+
+static void mlx4_mr_release(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to release mr index:%d\n",
+ index);
+ return;
+ }
+ __mlx4_mr_release(dev, index);
+}
+
+int __mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
+}
+
+static int mlx4_mr_alloc_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, index);
+ return mlx4_cmd_imm(dev, param, &param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_mr_alloc_icm(dev, index);
+}
+
+void __mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+
+ mlx4_table_put(dev, &mr_table->dmpt_table, index);
+}
+
+static void mlx4_mr_free_icm(struct mlx4_dev *dev, u32 index)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, index);
+ if (mlx4_cmd(dev, in_param, RES_MPT, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of mr index:%d\n",
+ index);
+ return;
+ }
+ return __mlx4_mr_free_icm(dev, index);
+}
+
+int mlx4_mr_alloc(struct mlx4_dev *dev, u32 pd, u64 iova, u64 size, u32 access,
+ int npages, int page_shift, struct mlx4_mr *mr)
+{
+ u32 index;
+ int err;
+
+ index = mlx4_mr_reserve(dev);
+ if (index == -1)
+ return -ENOMEM;
+
+ err = mlx4_mr_alloc_reserved(dev, index, pd, iova, size,
+ access, npages, page_shift, mr);
if (err)
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, index);
+ mlx4_mr_release(dev, index);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_alloc);
-void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+void mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_priv *priv = mlx4_priv(dev);
int err;
- if (mr->enabled) {
+ if (mr->enabled == MLX4_MR_EN_HW) {
err = mlx4_HW2SW_MPT(dev, NULL,
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err)
- mlx4_warn(dev, "HW2SW_MPT failed (%d)\n", err);
- }
+ mlx4_warn(dev, "xxx HW2SW_MPT failed (%d)\n", err);
+ mr->enabled = MLX4_MR_EN_SW;
+ }
mlx4_mtt_cleanup(dev, &mr->mtt);
- mlx4_bitmap_free(&priv->mr_table.mpt_bitmap, key_to_hw_index(mr->key));
+}
+EXPORT_SYMBOL_GPL(mlx4_mr_free_reserved);
+
+void mlx4_mr_free(struct mlx4_dev *dev, struct mlx4_mr *mr)
+{
+ mlx4_mr_free_reserved(dev, mr);
+ if (mr->enabled)
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
+ mlx4_mr_release(dev, key_to_hw_index(mr->key));
}
EXPORT_SYMBOL_GPL(mlx4_mr_free);
int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
struct mlx4_cmd_mailbox *mailbox;
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_table_get(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ err = mlx4_mr_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
@@ -340,9 +514,10 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
if (mr->mtt.order < 0) {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_PHYSICAL);
- mpt_entry->mtt_seg = 0;
+ mpt_entry->mtt_addr = 0;
} else {
- mpt_entry->mtt_seg = cpu_to_be64(mlx4_mtt_addr(dev, &mr->mtt));
+ mpt_entry->mtt_addr = cpu_to_be64(mlx4_mtt_addr(dev,
+ &mr->mtt));
}
if (mr->mtt.order >= 0 && mr->mtt.page_shift == 0) {
@@ -350,8 +525,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_FREE);
mpt_entry->pd_flags |= cpu_to_be32(MLX4_MPT_PD_FLAG_FAST_REG |
MLX4_MPT_PD_FLAG_RAE);
- mpt_entry->mtt_sz = cpu_to_be32((1 << mr->mtt.order) *
- dev->caps.mtts_per_seg);
+ mpt_entry->mtt_sz = cpu_to_be32(1 << mr->mtt.order);
} else {
mpt_entry->flags |= cpu_to_be32(MLX4_MPT_FLAG_SW_OWNS);
}
@@ -362,8 +536,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr)
mlx4_warn(dev, "SW2HW_MPT failed (%d)\n", err);
goto err_cmd;
}
-
- mr->enabled = 1;
+ mr->enabled = MLX4_MR_EN_HW;
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -373,7 +546,7 @@ err_cmd:
mlx4_free_cmd_mailbox(dev, mailbox);
err_table:
- mlx4_table_put(dev, &mr_table->dmpt_table, key_to_hw_index(mr->key));
+ mlx4_mr_free_icm(dev, key_to_hw_index(mr->key));
return err;
}
EXPORT_SYMBOL_GPL(mlx4_mr_enable);
@@ -385,18 +558,10 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
__be64 *mtts;
dma_addr_t dma_handle;
int i;
- int s = start_index * sizeof (u64);
- /* All MTTs must fit in the same page */
- if (start_index / (PAGE_SIZE / sizeof (u64)) !=
- (start_index + npages - 1) / (PAGE_SIZE / sizeof (u64)))
- return -EINVAL;
-
- if (start_index & (dev->caps.mtts_per_seg - 1))
- return -EINVAL;
+ mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->offset +
+ start_index, &dma_handle);
- mtts = mlx4_table_find(&priv->mr_table.mtt_table, mtt->first_seg +
- s / dev->caps.mtt_entry_sz, &dma_handle);
if (!mtts)
return -ENOMEM;
@@ -412,27 +577,75 @@ static int mlx4_write_mtt_chunk(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
return 0;
}
-int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- int start_index, int npages, u64 *page_list)
+int __mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
{
+ int err = 0;
int chunk;
- int err;
+ int mtts_per_page;
+ int max_mtts_first_page;
- if (mtt->order < 0)
- return -EINVAL;
+ /* compute how may mtts fit in the first page */
+ mtts_per_page = PAGE_SIZE / sizeof(u64);
+ max_mtts_first_page = mtts_per_page - (mtt->offset + start_index)
+ % mtts_per_page;
+
+ chunk = min_t(int, max_mtts_first_page, npages);
while (npages > 0) {
- chunk = min_t(int, PAGE_SIZE / sizeof(u64), npages);
err = mlx4_write_mtt_chunk(dev, mtt, start_index, chunk, page_list);
if (err)
return err;
-
npages -= chunk;
start_index += chunk;
page_list += chunk;
+
+ chunk = min_t(int, mtts_per_page, npages);
}
+ return err;
+}
- return 0;
+int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ int start_index, int npages, u64 *page_list)
+{
+ struct mlx4_cmd_mailbox *mailbox = NULL;
+ __be64 *inbox = NULL;
+ int chunk;
+ int err = 0;
+ int i;
+
+ if (mtt->order < 0)
+ return -EINVAL;
+
+ if (mlx4_is_mfunc(dev)) {
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ inbox = mailbox->buf;
+
+ while (npages > 0) {
+ chunk = min_t(int, MLX4_MAILBOX_SIZE / sizeof(u64) - 2,
+ npages);
+ inbox[0] = cpu_to_be64(mtt->offset + start_index);
+ inbox[1] = 0;
+ for (i = 0; i < chunk; ++i)
+ inbox[i + 2] = cpu_to_be64(page_list[i] |
+ MLX4_MTT_FLAG_PRESENT);
+ err = mlx4_WRITE_MTT(dev, mailbox, chunk);
+ if (err) {
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ npages -= chunk;
+ start_index += chunk;
+ page_list += chunk;
+ }
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+ }
+
+ return __mlx4_write_mtt(dev, mtt, start_index, npages, page_list);
}
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
@@ -462,21 +675,34 @@ EXPORT_SYMBOL_GPL(mlx4_buf_write_mtt);
int mlx4_init_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
int err;
+ if (!is_power_of_2(dev->caps.num_mpts))
+ return -EINVAL;
+
+ /* Nothing to do for slaves - all MR handling is forwarded
+ * to the master */
+ if (mlx4_is_slave(dev))
+ return 0;
+
err = mlx4_bitmap_init(&mr_table->mpt_bitmap, dev->caps.num_mpts,
~0, dev->caps.reserved_mrws, 0);
if (err)
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
- ilog2(dev->caps.num_mtt_segs));
+ ilog2(dev->caps.num_mtts /
+ (1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
if (dev->caps.reserved_mtts) {
- if (mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)) == -1) {
+ priv->reserved_mtts =
+ mlx4_alloc_mtt_range(dev,
+ fls(dev->caps.reserved_mtts - 1));
+ if (priv->reserved_mtts < 0) {
mlx4_warn(dev, "MTT table of order %d is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
@@ -497,8 +723,14 @@ err_buddy:
void mlx4_cleanup_mr_table(struct mlx4_dev *dev)
{
- struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mr_table *mr_table = &priv->mr_table;
+ if (mlx4_is_slave(dev))
+ return;
+ if (priv->reserved_mtts >= 0)
+ mlx4_free_mtt_range(dev, priv->reserved_mtts,
+ fls(dev->caps.reserved_mtts - 1));
mlx4_buddy_cleanup(&mr_table->mtt_buddy);
mlx4_bitmap_cleanup(&mr_table->mpt_bitmap);
}
@@ -581,7 +813,7 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- u64 mtt_seg;
+ u64 mtt_offset;
int err = -ENOMEM;
if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
@@ -601,11 +833,12 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages,
if (err)
return err;
- mtt_seg = fmr->mr.mtt.first_seg * dev->caps.mtt_entry_sz;
+ mtt_offset = fmr->mr.mtt.offset * dev->caps.mtt_entry_sz;
fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
- fmr->mr.mtt.first_seg,
+ fmr->mr.mtt.offset,
&fmr->dma_handle);
+
if (!fmr->mtts) {
err = -ENOMEM;
goto err_free;
@@ -619,6 +852,46 @@ err_free:
}
EXPORT_SYMBOL_GPL(mlx4_fmr_alloc);
+int mlx4_fmr_alloc_reserved(struct mlx4_dev *dev, u32 mridx,
+ u32 pd, u32 access, int max_pages,
+ int max_maps, u8 page_shift, struct mlx4_fmr *fmr)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int err = -ENOMEM;
+
+ if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32)
+ return -EINVAL;
+
+ /* All MTTs must fit in the same page */
+ if (max_pages * sizeof *fmr->mtts > PAGE_SIZE)
+ return -EINVAL;
+
+ fmr->page_shift = page_shift;
+ fmr->max_pages = max_pages;
+ fmr->max_maps = max_maps;
+ fmr->maps = 0;
+
+ err = mlx4_mr_alloc_reserved(dev, mridx, pd, 0, 0, access, max_pages,
+ page_shift, &fmr->mr);
+ if (err)
+ return err;
+
+ fmr->mtts = mlx4_table_find(&priv->mr_table.mtt_table,
+ fmr->mr.mtt.offset,
+ &fmr->dma_handle);
+ if (!fmr->mtts) {
+ err = -ENOMEM;
+ goto err_free;
+ }
+
+ return 0;
+
+err_free:
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_alloc_reserved);
+
int mlx4_fmr_enable(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -640,12 +913,32 @@ EXPORT_SYMBOL_GPL(mlx4_fmr_enable);
void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
u32 *lkey, u32 *rkey)
{
+ struct mlx4_cmd_mailbox *mailbox;
+ int err;
+
if (!fmr->maps)
return;
fmr->maps = 0;
- *(u8 *) fmr->mpt = MLX4_MPT_STATUS_SW;
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
+ " failed (%d)\n", err);
+ return;
+ }
+
+ err = mlx4_HW2SW_MPT(dev, NULL,
+ key_to_hw_index(fmr->mr.key) &
+ (dev->caps.num_mpts - 1));
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (err) {
+ printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
+ err);
+ return;
+ }
+ fmr->mr.enabled = MLX4_MR_EN_SW;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_unmap);
@@ -654,15 +947,28 @@ int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
if (fmr->maps)
return -EBUSY;
- fmr->mr.enabled = 0;
mlx4_mr_free(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_fmr_free);
+int mlx4_fmr_free_reserved(struct mlx4_dev *dev, struct mlx4_fmr *fmr)
+{
+ if (fmr->maps)
+ return -EBUSY;
+
+ mlx4_mr_free_reserved(dev, &fmr->mr);
+ fmr->mr.enabled = MLX4_MR_DISABLED;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx4_fmr_free_reserved);
+
int mlx4_SYNC_TPT(struct mlx4_dev *dev)
{
- return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000);
+ return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_SYNC_TPT, 1000,
+ MLX4_CMD_WRAPPED);
}
EXPORT_SYMBOL_GPL(mlx4_SYNC_TPT);
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 260ed25..db4746d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -31,6 +31,7 @@
* SOFTWARE.
*/
+#include <linux/init.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/io-mapping.h>
@@ -85,7 +86,8 @@ int mlx4_init_pd_table(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
return mlx4_bitmap_init(&priv->pd_bitmap, dev->caps.num_pds,
- (1 << 24) - 1, dev->caps.reserved_pds, 0);
+ (1 << NOT_MASKED_PD_BITS) - 1,
+ dev->caps.reserved_pds, 0);
}
void mlx4_cleanup_pd_table(struct mlx4_dev *dev)
@@ -108,13 +110,19 @@ void mlx4_cleanup_xrcd_table(struct mlx4_dev *dev)
int mlx4_uar_alloc(struct mlx4_dev *dev, struct mlx4_uar *uar)
{
+ int offset;
+
uar->index = mlx4_bitmap_alloc(&mlx4_priv(dev)->uar_table.bitmap);
if (uar->index == -1)
return -ENOMEM;
- uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + uar->index;
+ if (mlx4_is_slave(dev))
+ offset = uar->index % ((int) pci_resource_len(dev->pdev, 2) /
+ dev->caps.uar_page_size);
+ else
+ offset = uar->index;
+ uar->pfn = (pci_resource_start(dev->pdev, 2) >> PAGE_SHIFT) + offset;
uar->map = NULL;
-
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_uar_alloc);
@@ -232,7 +240,7 @@ int mlx4_init_uar_table(struct mlx4_dev *dev)
return mlx4_bitmap_init(&mlx4_priv(dev)->uar_table.bitmap,
dev->caps.num_uars, dev->caps.num_uars - 1,
- max(128, dev->caps.reserved_uars), 0);
+ dev->caps.reserved_uars, 0);
}
void mlx4_cleanup_uar_table(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index d942aea..f44ae55 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -44,6 +44,11 @@
#define MLX4_VLAN_VALID (1u << 31)
#define MLX4_VLAN_MASK 0xfff
+#define MLX4_STATS_TRAFFIC_COUNTERS_MASK 0xfULL
+#define MLX4_STATS_TRAFFIC_DROPS_MASK 0xc0ULL
+#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
+#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
+
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
int i;
@@ -70,41 +75,12 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table)
table->total = 0;
}
-static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
- __be64 *entries)
-{
- struct mlx4_cmd_mailbox *mailbox;
- u32 in_mod;
- int err;
-
- mailbox = mlx4_alloc_cmd_mailbox(dev);
- if (IS_ERR(mailbox))
- return PTR_ERR(mailbox);
-
- memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
-
- in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
- err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
-
- mlx4_free_cmd_mailbox(dev, mailbox);
- return err;
-}
-
-static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
- u64 mac, int *qpn, u8 reserve)
+static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
int err;
- if (reserve) {
- err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
- if (err) {
- mlx4_err(dev, "Failed to reserve qp for mac registration\n");
- return err;
- }
- }
qp.qpn = *qpn;
mac &= 0xffffffffffffULL;
@@ -113,16 +89,15 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- err = mlx4_qp_attach_common(dev, &qp, gid, 0,
- MLX4_PROT_ETH, MLX4_UC_STEER);
- if (err && reserve)
- mlx4_qp_release_range(dev, *qpn, 1);
+ err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
+ if (err)
+ mlx4_warn(dev, "Failed Attaching Unicast\n");
return err;
}
static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
- u64 mac, int qpn, u8 free)
+ u64 mac, int qpn)
{
struct mlx4_qp qp;
u8 gid[16] = {0};
@@ -134,60 +109,164 @@ static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port,
gid[5] = port;
gid[7] = MLX4_UC_STEER << 1;
- mlx4_qp_detach_common(dev, &qp, gid, MLX4_PROT_ETH, MLX4_UC_STEER);
- if (free)
- mlx4_qp_release_range(dev, qpn, 1);
+ mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
+}
+
+static int validate_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, int index)
+{
+ int err = 0;
+
+ if (index < 0 || index >= table->max || !table->entries[index]) {
+ mlx4_warn(dev, "No valid Mac entry for the given index\n");
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int find_index(struct mlx4_dev *dev,
+ struct mlx4_mac_table *table, u64 mac)
+{
+ int i;
+
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if ((mac & MLX4_MAC_MASK) ==
+ (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
+ return i;
+ }
+ /* Mac not found */
+ return -EINVAL;
}
-int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
+int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
- struct mlx4_mac_table *table = &info->mac_table;
struct mlx4_mac_entry *entry;
- int i, err = 0;
- int free = -1;
+ int index = 0;
+ int err = 0;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- err = mlx4_uc_steer_add(dev, port, mac, qpn, 1);
- if (err)
- return err;
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n",
+ (unsigned long long) mac);
+ index = mlx4_register_mac(dev, port, mac);
+ if (index < 0) {
+ err = index;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
+ return err;
+ }
- entry = kmalloc(sizeof *entry, GFP_KERNEL);
- if (!entry) {
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return -ENOMEM;
- }
+ if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) {
+ *qpn = info->base_qpn + index;
+ return 0;
+ }
+
+ err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
+ mlx4_dbg(dev, "Reserved qp %d\n", *qpn);
+ if (err) {
+ mlx4_err(dev, "Failed to reserve qp for mac registration\n");
+ goto qp_err;
+ }
+
+ err = mlx4_uc_steer_add(dev, port, mac, qpn);
+ if (err)
+ goto steer_err;
+
+ entry = kmalloc(sizeof *entry, GFP_KERNEL);
+ if (!entry) {
+ err = -ENOMEM;
+ goto alloc_err;
+ }
+ entry->mac = mac;
+ err = radix_tree_insert(&info->mac_tree, *qpn, entry);
+ if (err)
+ goto insert_err;
+ return 0;
- entry->mac = mac;
- err = radix_tree_insert(&info->mac_tree, *qpn, entry);
- if (err) {
+insert_err:
+ kfree(entry);
+
+alloc_err:
+ mlx4_uc_steer_release(dev, port, mac, *qpn);
+
+steer_err:
+ mlx4_qp_release_range(dev, *qpn, 1);
+
+qp_err:
+ mlx4_unregister_mac(dev, port, mac);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx4_get_eth_qp);
+
+void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_entry *entry;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for deleting\n",
+ (unsigned long long) mac);
+ mlx4_unregister_mac(dev, port, mac);
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
+ entry = radix_tree_lookup(&info->mac_tree, qpn);
+ if (entry) {
+ mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx,"
+ " qpn %d\n", port,
+ (unsigned long long) mac, qpn);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_qp_release_range(dev, qpn, 1);
+ radix_tree_delete(&info->mac_tree, qpn);
kfree(entry);
- mlx4_uc_steer_release(dev, port, mac, *qpn, 1);
- return err;
}
}
+}
+EXPORT_SYMBOL_GPL(mlx4_put_eth_qp);
- mlx4_dbg(dev, "Registering MAC: 0x%llx\n", (unsigned long long) mac);
+static int mlx4_set_port_mac_table(struct mlx4_dev *dev, u8 port,
+ __be64 *entries)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+
+ memcpy(mailbox->buf, entries, MLX4_MAC_TABLE_SIZE);
+
+ in_mod = MLX4_SET_PORT_MAC_TABLE << 8 | port;
+
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int __mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
+ struct mlx4_mac_table *table = &info->mac_table;
+ int i, err = 0;
+ int free = -1;
+
+ mlx4_dbg(dev, "Registering MAC: 0x%llx for port %d\n",
+ (unsigned long long) mac, port);
mutex_lock(&table->mutex);
- for (i = 0; i < MLX4_MAX_MAC_NUM - 1; i++) {
- if (free < 0 && !table->refs[i]) {
+ for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
+ if (free < 0 && !table->entries[i]) {
free = i;
continue;
}
if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i]))) {
- /* MAC already registered, increase references count */
- ++table->refs[i];
+ /* MAC already registered, Must not have duplicates */
+ err = -EEXIST;
goto out;
}
}
- if (free < 0) {
- err = -ENOMEM;
- goto out;
- }
-
mlx4_dbg(dev, "Free MAC index is %d\n", free);
if (table->total == table->max) {
@@ -197,103 +276,103 @@ int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn, u8 wrap)
}
/* Register new MAC */
- table->refs[free] = 1;
table->entries[free] = cpu_to_be64(mac | MLX4_MAC_VALID);
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) mac);
- table->refs[free] = 0;
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) mac);
table->entries[free] = 0;
goto out;
}
- if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
- *qpn = info->base_qpn + free;
+ err = free;
++table->total;
out:
mutex_unlock(&table->mutex);
return err;
}
-EXPORT_SYMBOL_GPL(mlx4_register_mac);
+EXPORT_SYMBOL_GPL(__mlx4_register_mac);
-static int validate_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, int index)
+int mlx4_register_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
- int err = 0;
+ u64 out_param;
+ int err;
- if (index < 0 || index >= table->max || !table->entries[index]) {
- mlx4_warn(dev, "No valid Mac entry for the given index\n");
- err = -EINVAL;
- }
- return err;
-}
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
-static int find_index(struct mlx4_dev *dev,
- struct mlx4_mac_table *table, u64 mac)
-{
- int i;
- for (i = 0; i < MLX4_MAX_MAC_NUM; i++) {
- if (mac == (MLX4_MAC_MASK & be64_to_cpu(table->entries[i])))
- return i;
+ return get_param_l(&out_param);
}
- /* Mac not found */
- return -EINVAL;
+ return __mlx4_register_mac(dev, port, mac);
}
+EXPORT_SYMBOL_GPL(mlx4_register_mac);
+
-void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, int qpn)
+void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
- struct mlx4_mac_entry *entry;
+ int index;
- if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
- entry = radix_tree_lookup(&info->mac_tree, qpn);
- if (entry) {
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 1);
- radix_tree_delete(&info->mac_tree, qpn);
- index = find_index(dev, table, entry->mac);
- kfree(entry);
- }
- }
+ index = find_index(dev, table, mac);
mutex_lock(&table->mutex);
if (validate_index(dev, table, index))
goto out;
- /* Check whether this address has reference count */
- if (!(--table->refs[index])) {
- table->entries[index] = 0;
- mlx4_set_port_mac_table(dev, port, table->entries);
- --table->total;
- }
+ table->entries[index] = 0;
+ mlx4_set_port_mac_table(dev, port, table->entries);
+ --table->total;
out:
mutex_unlock(&table->mutex);
}
+EXPORT_SYMBOL_GPL(__mlx4_unregister_mac);
+
+void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, mac, &out_param, RES_MAC,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ return;
+ }
+ __mlx4_unregister_mac(dev, port, mac);
+ return;
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_mac);
-int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wrap)
+int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
struct mlx4_mac_table *table = &info->mac_table;
- int index = qpn - info->base_qpn;
struct mlx4_mac_entry *entry;
- int err;
+ int index = qpn - info->base_qpn;
+ int err = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) {
entry = radix_tree_lookup(&info->mac_tree, qpn);
if (!entry)
return -EINVAL;
- index = find_index(dev, table, entry->mac);
- mlx4_uc_steer_release(dev, port, entry->mac, qpn, 0);
+ mlx4_uc_steer_release(dev, port, entry->mac, qpn);
+ mlx4_unregister_mac(dev, port, entry->mac);
entry->mac = new_mac;
- err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn, 0);
- if (err || index < 0)
- return err;
+ mlx4_register_mac(dev, port, new_mac);
+ err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn);
+ return err;
}
+ /* CX1 doesn't support multi-functions */
mutex_lock(&table->mutex);
err = validate_index(dev, table, index);
@@ -304,7 +383,8 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac, u8 wra
err = mlx4_set_port_mac_table(dev, port, table->entries);
if (unlikely(err)) {
- mlx4_err(dev, "Failed adding MAC: 0x%llx\n", (unsigned long long) new_mac);
+ mlx4_err(dev, "Failed adding MAC: 0x%llx\n",
+ (unsigned long long) new_mac);
table->entries[index] = 0;
}
out:
@@ -312,6 +392,7 @@ out:
return err;
}
EXPORT_SYMBOL_GPL(mlx4_replace_mac);
+
static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
__be32 *entries)
{
@@ -326,7 +407,7 @@ static int mlx4_set_port_vlan_table(struct mlx4_dev *dev, u8 port,
memcpy(mailbox->buf, entries, MLX4_VLAN_TABLE_SIZE);
in_mod = MLX4_SET_PORT_VLAN_TABLE << 8 | port;
err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -352,7 +433,8 @@ int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx)
}
EXPORT_SYMBOL_GPL(mlx4_find_cached_vlan);
-int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+static int __mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan,
+ int *index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
int i, err = 0;
@@ -387,7 +469,7 @@ int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
goto out;
}
- /* Register new MAC */
+ /* Register new VLAN */
table->refs[free] = 1;
table->entries[free] = cpu_to_be32(vlan | MLX4_VLAN_VALID);
@@ -405,9 +487,27 @@ out:
mutex_unlock(&table->mutex);
return err;
}
+
+int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&out_param, port);
+ err = mlx4_cmd_imm(dev, vlan, &out_param, RES_VLAN,
+ RES_OP_RESERVE_AND_MAP, MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *index = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_register_vlan(dev, port, vlan, index);
+}
EXPORT_SYMBOL_GPL(mlx4_register_vlan);
-void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+static void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
{
struct mlx4_vlan_table *table = &mlx4_priv(dev)->port[port].vlan_table;
@@ -432,6 +532,25 @@ void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
out:
mutex_unlock(&table->mutex);
}
+
+void mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, int index)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, port);
+ err = mlx4_cmd(dev, in_param, RES_VLAN, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ if (!err)
+ mlx4_warn(dev, "Failed freeing vlan at index:%d\n",
+ index);
+
+ return;
+ }
+ __mlx4_unregister_vlan(dev, port, index);
+}
EXPORT_SYMBOL_GPL(mlx4_unregister_vlan);
int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
@@ -462,7 +581,8 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
if (!err)
*caps = *(__be32 *) (outbuf + 84);
mlx4_free_cmd_mailbox(dev, inmailbox);
@@ -499,7 +619,8 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
*(__be32 *) (&inbuf[20]) = cpu_to_be32(port);
err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3,
- MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C);
+ MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
+ MLX4_CMD_NATIVE);
packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4));
@@ -512,6 +633,139 @@ int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port)
return err;
}
+static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
+ u8 op_mod, struct mlx4_cmd_mailbox *inbox)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_port_info *port_info;
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ struct mlx4_set_port_rqp_calc_context *qpn_context;
+ struct mlx4_set_port_general_context *gen_context;
+ int reset_qkey_viols;
+ int port;
+ int is_eth;
+ u32 in_modifier;
+ u32 promisc;
+ u16 mtu, prev_mtu;
+ int err;
+ int i;
+ __be32 agg_cap_mask;
+ __be32 slave_cap_mask;
+ __be32 new_cap_mask;
+
+ port = in_mod & 0xff;
+ in_modifier = in_mod >> 8;
+ is_eth = op_mod;
+ port_info = &priv->port[port];
+
+ /* Slaves cannot perform SET_PORT operations except changing MTU */
+ if (is_eth) {
+ if (slave != dev->caps.function &&
+ in_modifier != MLX4_SET_PORT_GENERAL) {
+ mlx4_warn(dev, "denying SET_PORT for slave:%d\n",
+ slave);
+ return -EINVAL;
+ }
+ switch (in_modifier) {
+ case MLX4_SET_PORT_RQP_CALC:
+ qpn_context = inbox->buf;
+ qpn_context->base_qpn =
+ cpu_to_be32(port_info->base_qpn);
+ qpn_context->n_mac = 0x7;
+ promisc = be32_to_cpu(qpn_context->promisc) >>
+ SET_PORT_PROMISC_SHIFT;
+ qpn_context->promisc = cpu_to_be32(
+ promisc << SET_PORT_PROMISC_SHIFT |
+ port_info->base_qpn);
+ promisc = be32_to_cpu(qpn_context->mcast) >>
+ SET_PORT_MC_PROMISC_SHIFT;
+ qpn_context->mcast = cpu_to_be32(
+ promisc << SET_PORT_MC_PROMISC_SHIFT |
+ port_info->base_qpn);
+ break;
+ case MLX4_SET_PORT_GENERAL:
+ gen_context = inbox->buf;
+ /* Mtu is configured as the max MTU among all the
+ * the functions on the port. */
+ mtu = be16_to_cpu(gen_context->mtu);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port]);
+ prev_mtu = slave_st->mtu[port];
+ slave_st->mtu[port] = mtu;
+ if (mtu > master->max_mtu[port])
+ master->max_mtu[port] = mtu;
+ if (mtu < prev_mtu && prev_mtu ==
+ master->max_mtu[port]) {
+ slave_st->mtu[port] = mtu;
+ master->max_mtu[port] = mtu;
+ for (i = 0; i < dev->num_slaves; i++) {
+ master->max_mtu[port] =
+ max(master->max_mtu[port],
+ master->slave_state[i].mtu[port]);
+ }
+ }
+
+ gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+ break;
+ }
+ return mlx4_cmd(dev, inbox->dma, in_mod, op_mod,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+ }
+
+ /* For IB, we only consider:
+ * - The capability mask, which is set to the aggregate of all
+ * slave function capabilities
+ * - The QKey violatin counter - reset according to each request.
+ */
+
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ reset_qkey_viols = (*(u8 *) inbox->buf) & 0x40;
+ new_cap_mask = ((__be32 *) inbox->buf)[2];
+ } else {
+ reset_qkey_viols = ((u8 *) inbox->buf)[3] & 0x1;
+ new_cap_mask = ((__be32 *) inbox->buf)[1];
+ }
+
+ agg_cap_mask = 0;
+ slave_cap_mask =
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port];
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] = new_cap_mask;
+ for (i = 0; i < dev->num_slaves; i++)
+ agg_cap_mask |=
+ priv->mfunc.master.slave_state[i].ib_cap_mask[port];
+
+ /* only clear mailbox for guests. Master may be setting
+ * MTU or PKEY table size
+ */
+ if (slave != dev->caps.function)
+ memset(inbox->buf, 0, 256);
+ if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
+ *(u8 *) inbox->buf = !!reset_qkey_viols << 6;
+ ((__be32 *) inbox->buf)[2] = agg_cap_mask;
+ } else {
+ ((u8 *) inbox->buf)[3] = !!reset_qkey_viols;
+ ((__be32 *) inbox->buf)[1] = agg_cap_mask;
+ }
+
+ err = mlx4_cmd(dev, inbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
+ if (err)
+ priv->mfunc.master.slave_state[slave].ib_cap_mask[port] =
+ slave_cap_mask;
+ return err;
+}
+
+int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ return mlx4_common_set_port(dev, slave, vhcr->in_modifier,
+ vhcr->op_modifier, inbox);
+}
+
int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -528,8 +782,145 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port)
((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port];
err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+
+int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
+ u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ int err;
+ u32 in_mod;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->flags = SET_PORT_GEN_ALL_VALID;
+ context->mtu = cpu_to_be16(mtu);
+ context->pptx = (pptx * (!pfctx)) << 7;
+ context->pfctx = pfctx;
+ context->pprx = (pprx * (!pfcrx)) << 7;
+ context->pfcrx = pfcrx;
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
mlx4_free_cmd_mailbox(dev, mailbox);
return err;
}
+EXPORT_SYMBOL(mlx4_SET_PORT_general);
+
+int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
+ u8 promisc)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_rqp_calc_context *context;
+ int err;
+ u32 in_mod;
+ u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ?
+ MCAST_DIRECT : MCAST_DEFAULT;
+
+ if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER &&
+ dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)
+ return 0;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ memset(context, 0, sizeof *context);
+
+ context->base_qpn = cpu_to_be32(base_qpn);
+ context->n_mac = dev->caps.log_num_macs;
+ context->promisc = cpu_to_be32(promisc << SET_PORT_PROMISC_SHIFT |
+ base_qpn);
+ context->mcast = cpu_to_be32(m_promisc << SET_PORT_MC_PROMISC_SHIFT |
+ base_qpn);
+ context->intra_no_vlan = 0;
+ context->no_vlan = MLX4_NO_VLAN_IDX;
+ context->intra_vlan_miss = 0;
+ context->vlan_miss = MLX4_VLAN_MISS_IDX;
+
+ in_mod = MLX4_SET_PORT_RQP_CALC << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+
+int mlx4_SET_MCAST_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_SET_MCAST_FLTR(struct mlx4_dev *dev, u8 port,
+ u64 mac, u64 clear, u8 mode)
+{
+ return mlx4_cmd(dev, (mac | (clear << 63)), port, mode,
+ MLX4_CMD_SET_MCAST_FLTR, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+}
+EXPORT_SYMBOL(mlx4_SET_MCAST_FLTR);
+
+int mlx4_SET_VLAN_FLTR_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = 0;
+
+ return err;
+}
+
+int mlx4_common_dump_eth_stats(struct mlx4_dev *dev, int slave,
+ u32 in_mod, struct mlx4_cmd_mailbox *outbox)
+{
+ return mlx4_cmd_box(dev, 0, outbox->dma, in_mod, 0,
+ MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+}
+
+int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ if (slave != dev->caps.function)
+ return 0;
+ return mlx4_common_dump_eth_stats(dev, slave,
+ vhcr->in_modifier, outbox);
+}
+
+void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
+{
+ if (!mlx4_is_mfunc(dev)) {
+ *stats_bitmap = 0;
+ return;
+ }
+
+ *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
+ MLX4_STATS_TRAFFIC_DROPS_MASK |
+ MLX4_STATS_PORT_COUNTERS_MASK);
+
+ if (mlx4_is_master(dev))
+ *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
+}
+EXPORT_SYMBOL(mlx4_set_stats_bitmap);
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index b967647..1129677 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -98,8 +98,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].size = dev_cap->eqc_entry_sz;
profile[MLX4_RES_DMPT].size = dev_cap->dmpt_entry_sz;
profile[MLX4_RES_CMPT].size = dev_cap->cmpt_entry_sz;
- profile[MLX4_RES_MTT].size = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
- profile[MLX4_RES_MCG].size = MLX4_MGM_ENTRY_SIZE;
+ profile[MLX4_RES_MTT].size = dev_cap->mtt_entry_sz;
+ profile[MLX4_RES_MCG].size = mlx4_get_mgm_entry_size(dev);
profile[MLX4_RES_QP].num = request->num_qp;
profile[MLX4_RES_RDMARC].num = request->num_qp * request->rdmarc_per_qp;
@@ -110,7 +110,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
profile[MLX4_RES_DMPT].num = request->num_mpt;
profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
- profile[MLX4_RES_MTT].num = request->num_mtt;
+ profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
profile[MLX4_RES_MCG].num = request->num_mcg;
for (i = 0; i < MLX4_RES_NUM; ++i) {
@@ -210,7 +210,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
init_hca->cmpt_base = profile[i].start;
break;
case MLX4_RES_MTT:
- dev->caps.num_mtt_segs = profile[i].num;
+ dev->caps.num_mtts = profile[i].num;
priv->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
break;
@@ -218,7 +218,8 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
dev->caps.num_mgms = profile[i].num >> 1;
dev->caps.num_amgms = profile[i].num >> 1;
init_hca->mc_base = profile[i].start;
- init_hca->log_mc_entry_sz = ilog2(MLX4_MGM_ENTRY_SIZE);
+ init_hca->log_mc_entry_sz =
+ ilog2(mlx4_get_mgm_entry_size(dev));
init_hca->log_mc_table_sz = profile[i].log_num;
init_hca->log_mc_hash_sz = profile[i].log_num - 1;
break;
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 15f870c..fb2b367 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -35,6 +35,8 @@
#include <linux/gfp.h>
#include <linux/export.h>
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/mlx4/qp.h>
@@ -55,7 +57,7 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
spin_unlock(&qp_table->lock);
if (!qp) {
- mlx4_warn(dev, "Async event for bogus QP %08x\n", qpn);
+ mlx4_dbg(dev, "Async event for none existent QP %08x\n", qpn);
return;
}
@@ -65,10 +67,17 @@ void mlx4_qp_event(struct mlx4_dev *dev, u32 qpn, int event_type)
complete(&qp->free);
}
-int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
- struct mlx4_qp_context *context, enum mlx4_qp_optpar optpar,
- int sqd_event, struct mlx4_qp *qp)
+static int is_qp0(struct mlx4_dev *dev, struct mlx4_qp *qp)
+{
+ return qp->qpn >= dev->caps.sqp_start &&
+ qp->qpn <= dev->caps.sqp_start + 1;
+}
+
+static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp, int native)
{
static const u16 op[MLX4_QP_NUM_STATE][MLX4_QP_NUM_STATE] = {
[MLX4_QP_STATE_RST] = {
@@ -110,16 +119,26 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
}
};
+ struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_cmd_mailbox *mailbox;
int ret = 0;
+ u8 port;
if (cur_state >= MLX4_QP_NUM_STATE || new_state >= MLX4_QP_NUM_STATE ||
!op[cur_state][new_state])
return -EINVAL;
- if (op[cur_state][new_state] == MLX4_CMD_2RST_QP)
- return mlx4_cmd(dev, 0, qp->qpn, 2,
- MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A);
+ if (op[cur_state][new_state] == MLX4_CMD_2RST_QP) {
+ ret = mlx4_cmd(dev, 0, qp->qpn, 2,
+ MLX4_CMD_2RST_QP, MLX4_CMD_TIME_CLASS_A, native);
+ if (mlx4_is_master(dev) && cur_state != MLX4_QP_STATE_ERR &&
+ cur_state != MLX4_QP_STATE_RST &&
+ is_qp0(dev, qp)) {
+ port = (qp->qpn & 1) + 1;
+ priv->mfunc.master.qp0_state[port].qp0_active = 0;
+ }
+ return ret;
+ }
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox))
@@ -138,101 +157,207 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
cpu_to_be32(qp->qpn);
- ret = mlx4_cmd(dev, mailbox->dma, qp->qpn | (!!sqd_event << 31),
+ ret = mlx4_cmd(dev, mailbox->dma,
+ qp->qpn | (!!sqd_event << 31),
new_state == MLX4_QP_STATE_RST ? 2 : 0,
- op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C);
+ op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
mlx4_free_cmd_mailbox(dev, mailbox);
return ret;
}
+
+int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
+ enum mlx4_qp_state cur_state, enum mlx4_qp_state new_state,
+ struct mlx4_qp_context *context,
+ enum mlx4_qp_optpar optpar,
+ int sqd_event, struct mlx4_qp *qp)
+{
+ return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context,
+ optpar, sqd_event, qp, 0);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_modify);
-int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+int __mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align,
+ int *base)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- int qpn;
- qpn = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
- if (qpn == -1)
+ *base = mlx4_bitmap_alloc_range(&qp_table->bitmap, cnt, align);
+ if (*base == -1)
return -ENOMEM;
- *base = qpn;
return 0;
}
+
+int mlx4_qp_reserve_range(struct mlx4_dev *dev, int cnt, int align, int *base)
+{
+ u64 in_param;
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, cnt);
+ set_param_h(&in_param, align);
+ err = mlx4_cmd_imm(dev, in_param, &out_param,
+ RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err)
+ return err;
+
+ *base = get_param_l(&out_param);
+ return 0;
+ }
+ return __mlx4_qp_reserve_range(dev, cnt, align, base);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_reserve_range);
-void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+void __mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
- if (base_qpn < dev->caps.sqp_start + 8)
- return;
+ if (mlx4_is_qp_reserved(dev, (u32) base_qpn))
+ return;
mlx4_bitmap_free_range(&qp_table->bitmap, base_qpn, cnt);
}
+
+void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
+{
+ u64 in_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, base_qpn);
+ set_param_h(&in_param, cnt);
+ err = mlx4_cmd(dev, in_param, RES_QP, RES_OP_RESERVE,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (err) {
+ mlx4_warn(dev, "Failed to release qp range"
+ " base:%d cnt:%d\n", base_qpn, cnt);
+ }
+ } else
+ __mlx4_qp_release_range(dev, base_qpn, cnt);
+}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- if (!qpn)
- return -EINVAL;
-
- qp->qpn = qpn;
-
- err = mlx4_table_get(dev, &qp_table->qp_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qp->qpn);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
- spin_lock_irq(&qp_table->lock);
- err = radix_tree_insert(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1), qp);
- spin_unlock_irq(&qp_table->lock);
- if (err)
- goto err_put_cmpt;
-
- atomic_set(&qp->refcount, 1);
- init_completion(&qp->free);
-
return 0;
-err_put_cmpt:
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
-
err_put_rdmarc:
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
err_put_altc:
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
err_put_auxc:
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
err_put_qp:
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
err_out:
return err;
}
+
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&param, qpn);
+ return mlx4_cmd_imm(dev, param, &param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
+ }
+ return __mlx4_qp_alloc_icm(dev, qpn);
+}
+
+void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+
+ mlx4_table_put(dev, &qp_table->cmpt_table, qpn);
+ mlx4_table_put(dev, &qp_table->rdmarc_table, qpn);
+ mlx4_table_put(dev, &qp_table->altc_table, qpn);
+ mlx4_table_put(dev, &qp_table->auxc_table, qpn);
+ mlx4_table_put(dev, &qp_table->qp_table, qpn);
+}
+
+static void mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, qpn);
+ if (mlx4_cmd(dev, in_param, RES_QP, RES_OP_MAP_ICM,
+ MLX4_CMD_FREE_RES, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed to free icm of qp:%d\n", qpn);
+ } else
+ __mlx4_qp_free_icm(dev, qpn);
+}
+
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_qp_table *qp_table = &priv->qp_table;
+ int err;
+
+ if (!qpn)
+ return -EINVAL;
+
+ qp->qpn = qpn;
+
+ err = mlx4_qp_alloc_icm(dev, qpn);
+ if (err)
+ return err;
+
+ spin_lock_irq(&qp_table->lock);
+ err = radix_tree_insert(&dev->qp_table_tree, qp->qpn &
+ (dev->caps.num_qps - 1), qp);
+ spin_unlock_irq(&qp_table->lock);
+ if (err)
+ goto err_icm;
+
+ atomic_set(&qp->refcount, 1);
+ init_completion(&qp->free);
+
+ return 0;
+
+err_icm:
+ mlx4_qp_free_icm(dev, qpn);
+ return err;
+}
+
EXPORT_SYMBOL_GPL(mlx4_qp_alloc);
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp)
@@ -248,24 +373,18 @@ EXPORT_SYMBOL_GPL(mlx4_qp_remove);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp)
{
- struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
-
if (atomic_dec_and_test(&qp->refcount))
complete(&qp->free);
wait_for_completion(&qp->free);
- mlx4_table_put(dev, &qp_table->cmpt_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->rdmarc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->altc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->auxc_table, qp->qpn);
- mlx4_table_put(dev, &qp_table->qp_table, qp->qpn);
+ mlx4_qp_free_icm(dev, qp->qpn);
}
EXPORT_SYMBOL_GPL(mlx4_qp_free);
static int mlx4_CONF_SPECIAL_QP(struct mlx4_dev *dev, u32 base_qpn)
{
return mlx4_cmd(dev, 0, base_qpn, 0, MLX4_CMD_CONF_SPECIAL_QP,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_init_qp_table(struct mlx4_dev *dev)
@@ -276,6 +395,8 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
spin_lock_init(&qp_table->lock);
INIT_RADIX_TREE(&dev->qp_table_tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
/*
* We reserve 2 extra QPs per port for the special QPs. The
@@ -327,6 +448,9 @@ int mlx4_init_qp_table(struct mlx4_dev *dev)
void mlx4_cleanup_qp_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
+
mlx4_CONF_SPECIAL_QP(dev, 0);
mlx4_bitmap_cleanup(&mlx4_priv(dev)->qp_table.bitmap);
}
@@ -342,7 +466,8 @@ int mlx4_qp_query(struct mlx4_dev *dev, struct mlx4_qp *qp,
return PTR_ERR(mailbox);
err = mlx4_cmd_box(dev, 0, mailbox->dma, qp->qpn, 0,
- MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_QUERY_QP, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
if (!err)
memcpy(context, mailbox->buf + 8, sizeof *context);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
new file mode 100644
index 0000000..8752e6e
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -0,0 +1,3073 @@
+/*
+ * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
+ * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
+ * All rights reserved.
+ * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/sched.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/mlx4/cmd.h>
+#include <linux/mlx4/qp.h>
+
+#include "mlx4.h"
+#include "fw.h"
+
+#define MLX4_MAC_VALID (1ull << 63)
+#define MLX4_MAC_MASK 0x7fffffffffffffffULL
+#define ETH_ALEN 6
+
+struct mac_res {
+ struct list_head list;
+ u64 mac;
+ u8 port;
+};
+
+struct res_common {
+ struct list_head list;
+ u32 res_id;
+ int owner;
+ int state;
+ int from_state;
+ int to_state;
+ int removing;
+};
+
+enum {
+ RES_ANY_BUSY = 1
+};
+
+struct res_gid {
+ struct list_head list;
+ u8 gid[16];
+ enum mlx4_protocol prot;
+ enum mlx4_steer_type steer;
+};
+
+enum res_qp_states {
+ RES_QP_BUSY = RES_ANY_BUSY,
+
+ /* QP number was allocated */
+ RES_QP_RESERVED,
+
+ /* ICM memory for QP context was mapped */
+ RES_QP_MAPPED,
+
+ /* QP is in hw ownership */
+ RES_QP_HW
+};
+
+static inline const char *qp_states_str(enum res_qp_states state)
+{
+ switch (state) {
+ case RES_QP_BUSY: return "RES_QP_BUSY";
+ case RES_QP_RESERVED: return "RES_QP_RESERVED";
+ case RES_QP_MAPPED: return "RES_QP_MAPPED";
+ case RES_QP_HW: return "RES_QP_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_qp {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ struct res_srq *srq;
+ struct list_head mcg_list;
+ spinlock_t mcg_spl;
+ int local_qpn;
+};
+
+enum res_mtt_states {
+ RES_MTT_BUSY = RES_ANY_BUSY,
+ RES_MTT_ALLOCATED,
+};
+
+static inline const char *mtt_states_str(enum res_mtt_states state)
+{
+ switch (state) {
+ case RES_MTT_BUSY: return "RES_MTT_BUSY";
+ case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_mtt {
+ struct res_common com;
+ int order;
+ atomic_t ref_count;
+};
+
+enum res_mpt_states {
+ RES_MPT_BUSY = RES_ANY_BUSY,
+ RES_MPT_RESERVED,
+ RES_MPT_MAPPED,
+ RES_MPT_HW,
+};
+
+struct res_mpt {
+ struct res_common com;
+ struct res_mtt *mtt;
+ int key;
+};
+
+enum res_eq_states {
+ RES_EQ_BUSY = RES_ANY_BUSY,
+ RES_EQ_RESERVED,
+ RES_EQ_HW,
+};
+
+struct res_eq {
+ struct res_common com;
+ struct res_mtt *mtt;
+};
+
+enum res_cq_states {
+ RES_CQ_BUSY = RES_ANY_BUSY,
+ RES_CQ_ALLOCATED,
+ RES_CQ_HW,
+};
+
+struct res_cq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ atomic_t ref_count;
+};
+
+enum res_srq_states {
+ RES_SRQ_BUSY = RES_ANY_BUSY,
+ RES_SRQ_ALLOCATED,
+ RES_SRQ_HW,
+};
+
+static inline const char *srq_states_str(enum res_srq_states state)
+{
+ switch (state) {
+ case RES_SRQ_BUSY: return "RES_SRQ_BUSY";
+ case RES_SRQ_ALLOCATED: return "RES_SRQ_ALLOCATED";
+ case RES_SRQ_HW: return "RES_SRQ_HW";
+ default: return "Unknown";
+ }
+}
+
+struct res_srq {
+ struct res_common com;
+ struct res_mtt *mtt;
+ struct res_cq *cq;
+ atomic_t ref_count;
+};
+
+enum res_counter_states {
+ RES_COUNTER_BUSY = RES_ANY_BUSY,
+ RES_COUNTER_ALLOCATED,
+};
+
+static inline const char *counter_states_str(enum res_counter_states state)
+{
+ switch (state) {
+ case RES_COUNTER_BUSY: return "RES_COUNTER_BUSY";
+ case RES_COUNTER_ALLOCATED: return "RES_COUNTER_ALLOCATED";
+ default: return "Unknown";
+ }
+}
+
+struct res_counter {
+ struct res_common com;
+ int port;
+};
+
+/* For Debug uses */
+static const char *ResourceType(enum mlx4_resource rt)
+{
+ switch (rt) {
+ case RES_QP: return "RES_QP";
+ case RES_CQ: return "RES_CQ";
+ case RES_SRQ: return "RES_SRQ";
+ case RES_MPT: return "RES_MPT";
+ case RES_MTT: return "RES_MTT";
+ case RES_MAC: return "RES_MAC";
+ case RES_EQ: return "RES_EQ";
+ case RES_COUNTER: return "RES_COUNTER";
+ default: return "Unknown resource type !!!";
+ };
+}
+
+int mlx4_init_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+ int t;
+
+ priv->mfunc.master.res_tracker.slave_list =
+ kzalloc(dev->num_slaves * sizeof(struct slave_list),
+ GFP_KERNEL);
+ if (!priv->mfunc.master.res_tracker.slave_list)
+ return -ENOMEM;
+
+ for (i = 0 ; i < dev->num_slaves; i++) {
+ for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
+ INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
+ slave_list[i].res_list[t]);
+ mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
+ }
+
+ mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
+ dev->num_slaves);
+ for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
+ INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i],
+ GFP_ATOMIC|__GFP_NOWARN);
+
+ spin_lock_init(&priv->mfunc.master.res_tracker.lock);
+ return 0 ;
+}
+
+void mlx4_free_resource_tracker(struct mlx4_dev *dev)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ int i;
+
+ if (priv->mfunc.master.res_tracker.slave_list) {
+ for (i = 0 ; i < dev->num_slaves; i++)
+ mlx4_delete_all_resources_for_slave(dev, i);
+
+ kfree(priv->mfunc.master.res_tracker.slave_list);
+ }
+}
+
+static void update_ud_gid(struct mlx4_dev *dev,
+ struct mlx4_qp_context *qp_ctx, u8 slave)
+{
+ u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
+
+ if (MLX4_QP_ST_UD == ts)
+ qp_ctx->pri_path.mgid_index = 0x80 | slave;
+
+ mlx4_dbg(dev, "slave %d, new gid index: 0x%x ",
+ slave, qp_ctx->pri_path.mgid_index);
+}
+
+static int mpt_mask(struct mlx4_dev *dev)
+{
+ return dev->caps.num_mpts - 1;
+}
+
+static void *find_res(struct mlx4_dev *dev, int res_id,
+ enum mlx4_resource type)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
+ res_id);
+}
+
+static int get_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type,
+ void *res)
+{
+ struct res_common *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (!r) {
+ err = -ENONET;
+ goto exit;
+ }
+
+ if (r->state == RES_ANY_BUSY) {
+ err = -EBUSY;
+ goto exit;
+ }
+
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto exit;
+ }
+
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ mlx4_dbg(dev, "res %s id 0x%x to busy\n",
+ ResourceType(type), r->res_id);
+
+ if (res)
+ *((struct res_common **)res) = r;
+
+exit:
+ spin_unlock_irq(mlx4_tlock(dev));
+ return err;
+}
+
+int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
+ enum mlx4_resource type,
+ int res_id, int *slave)
+{
+
+ struct res_common *r;
+ int err = -ENOENT;
+ int id = res_id;
+
+ if (type == RES_QP)
+ id &= 0x7fffff;
+ spin_lock(mlx4_tlock(dev));
+
+ r = find_res(dev, id, type);
+ if (r) {
+ *slave = r->owner;
+ err = 0;
+ }
+ spin_unlock(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void put_res(struct mlx4_dev *dev, int slave, int res_id,
+ enum mlx4_resource type)
+{
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = find_res(dev, res_id, type);
+ if (r)
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static struct res_common *alloc_qp_tr(int id)
+{
+ struct res_qp *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_QP_RESERVED;
+ ret->local_qpn = id;
+ INIT_LIST_HEAD(&ret->mcg_list);
+ spin_lock_init(&ret->mcg_spl);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mtt_tr(int id, int order)
+{
+ struct res_mtt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->order = order;
+ ret->com.state = RES_MTT_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_mpt_tr(int id, int key)
+{
+ struct res_mpt *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_MPT_RESERVED;
+ ret->key = key;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_eq_tr(int id)
+{
+ struct res_eq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_EQ_RESERVED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_cq_tr(int id)
+{
+ struct res_cq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_CQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_srq_tr(int id)
+{
+ struct res_srq *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_SRQ_ALLOCATED;
+ atomic_set(&ret->ref_count, 0);
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_counter_tr(int id)
+{
+ struct res_counter *ret;
+
+ ret = kzalloc(sizeof *ret, GFP_KERNEL);
+ if (!ret)
+ return NULL;
+
+ ret->com.res_id = id;
+ ret->com.state = RES_COUNTER_ALLOCATED;
+
+ return &ret->com;
+}
+
+static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave,
+ int extra)
+{
+ struct res_common *ret;
+
+ switch (type) {
+ case RES_QP:
+ ret = alloc_qp_tr(id);
+ break;
+ case RES_MPT:
+ ret = alloc_mpt_tr(id, extra);
+ break;
+ case RES_MTT:
+ ret = alloc_mtt_tr(id, extra);
+ break;
+ case RES_EQ:
+ ret = alloc_eq_tr(id);
+ break;
+ case RES_CQ:
+ ret = alloc_cq_tr(id);
+ break;
+ case RES_SRQ:
+ ret = alloc_srq_tr(id);
+ break;
+ case RES_MAC:
+ printk(KERN_ERR "implementation missing\n");
+ return NULL;
+ case RES_COUNTER:
+ ret = alloc_counter_tr(id);
+ break;
+
+ default:
+ return NULL;
+ }
+ if (ret)
+ ret->owner = slave;
+
+ return ret;
+}
+
+static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct res_common **res_arr;
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct radix_tree_root *root = &tracker->res_tree[type];
+
+ res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
+ if (!res_arr)
+ return -ENOMEM;
+
+ for (i = 0; i < count; ++i) {
+ res_arr[i] = alloc_tr(base + i, type, slave, extra);
+ if (!res_arr[i]) {
+ for (--i; i >= 0; --i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+ return -ENOMEM;
+ }
+ }
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = 0; i < count; ++i) {
+ if (find_res(dev, base + i, type)) {
+ err = -EEXIST;
+ goto undo;
+ }
+ err = radix_tree_insert(root, base + i, res_arr[i]);
+ if (err)
+ goto undo;
+ list_add_tail(&res_arr[i]->list,
+ &tracker->slave_list[slave].res_list[type]);
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(res_arr);
+
+ return 0;
+
+undo:
+ for (--i; i >= base; --i)
+ radix_tree_delete(&tracker->res_tree[type], i);
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ for (i = 0; i < count; ++i)
+ kfree(res_arr[i]);
+
+ kfree(res_arr);
+
+ return err;
+}
+
+static int remove_qp_ok(struct res_qp *res)
+{
+ if (res->com.state == RES_QP_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_QP_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_mtt_ok(struct res_mtt *res, int order)
+{
+ if (res->com.state == RES_MTT_BUSY ||
+ atomic_read(&res->ref_count)) {
+ printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
+ return -EBUSY;
+ } else if (res->com.state != RES_MTT_ALLOCATED)
+ return -EPERM;
+ else if (res->order != order)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int remove_mpt_ok(struct res_mpt *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_eq_ok(struct res_eq *res)
+{
+ if (res->com.state == RES_MPT_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_MPT_RESERVED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_counter_ok(struct res_counter *res)
+{
+ if (res->com.state == RES_COUNTER_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_COUNTER_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_cq_ok(struct res_cq *res)
+{
+ if (res->com.state == RES_CQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_CQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_srq_ok(struct res_srq *res)
+{
+ if (res->com.state == RES_SRQ_BUSY)
+ return -EBUSY;
+ else if (res->com.state != RES_SRQ_ALLOCATED)
+ return -EPERM;
+
+ return 0;
+}
+
+static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
+{
+ switch (type) {
+ case RES_QP:
+ return remove_qp_ok((struct res_qp *)res);
+ case RES_CQ:
+ return remove_cq_ok((struct res_cq *)res);
+ case RES_SRQ:
+ return remove_srq_ok((struct res_srq *)res);
+ case RES_MPT:
+ return remove_mpt_ok((struct res_mpt *)res);
+ case RES_MTT:
+ return remove_mtt_ok((struct res_mtt *)res, extra);
+ case RES_MAC:
+ return -ENOSYS;
+ case RES_EQ:
+ return remove_eq_ok((struct res_eq *)res);
+ case RES_COUNTER:
+ return remove_counter_ok((struct res_counter *)res);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count,
+ enum mlx4_resource type, int extra)
+{
+ int i;
+ int err;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ if (!r) {
+ err = -ENOENT;
+ goto out;
+ }
+ if (r->owner != slave) {
+ err = -EPERM;
+ goto out;
+ }
+ err = remove_ok(r, type, extra);
+ if (err)
+ goto out;
+ }
+
+ for (i = base; i < base + count; ++i) {
+ r = radix_tree_lookup(&tracker->res_tree[type], i);
+ radix_tree_delete(&tracker->res_tree[type], i);
+ list_del(&r->list);
+ kfree(r);
+ }
+ err = 0;
+
+out:
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
+ enum res_qp_states state, struct res_qp **qp,
+ int alloc)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_qp *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_QP_BUSY:
+ mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n",
+ __func__, r->com.res_id);
+ err = -EBUSY;
+ break;
+
+ case RES_QP_RESERVED:
+ if (r->com.state == RES_QP_MAPPED && !alloc)
+ break;
+
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id);
+ err = -EINVAL;
+ break;
+
+ case RES_QP_MAPPED:
+ if ((r->com.state == RES_QP_RESERVED && alloc) ||
+ r->com.state == RES_QP_HW)
+ break;
+ else {
+ mlx4_dbg(dev, "failed RES_QP, 0x%x\n",
+ r->com.res_id);
+ err = -EINVAL;
+ }
+
+ break;
+
+ case RES_QP_HW:
+ if (r->com.state != RES_QP_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_QP_BUSY;
+ if (qp)
+ *qp = (struct res_qp *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_mpt_states state, struct res_mpt **mpt)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mpt *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_MPT_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_RESERVED:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_MAPPED:
+ if (r->com.state != RES_MPT_RESERVED &&
+ r->com.state != RES_MPT_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_MPT_HW:
+ if (r->com.state != RES_MPT_MAPPED)
+ err = -EINVAL;
+ break;
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_MPT_BUSY;
+ if (mpt)
+ *mpt = (struct res_mpt *)r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_eq_states state, struct res_eq **eq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_eq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_EQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_RESERVED:
+ if (r->com.state != RES_EQ_HW)
+ err = -EINVAL;
+ break;
+
+ case RES_EQ_HW:
+ if (r->com.state != RES_EQ_RESERVED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_EQ_BUSY;
+ if (eq)
+ *eq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
+ enum res_cq_states state, struct res_cq **cq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_cq *r;
+ int err;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_CQ_BUSY:
+ err = -EBUSY;
+ break;
+
+ case RES_CQ_ALLOCATED:
+ if (r->com.state != RES_CQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ else
+ err = 0;
+ break;
+
+ case RES_CQ_HW:
+ if (r->com.state != RES_CQ_ALLOCATED)
+ err = -EINVAL;
+ else
+ err = 0;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_CQ_BUSY;
+ if (cq)
+ *cq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
+ enum res_cq_states state, struct res_srq **srq)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_srq *r;
+ int err = 0;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index);
+ if (!r)
+ err = -ENOENT;
+ else if (r->com.owner != slave)
+ err = -EPERM;
+ else {
+ switch (state) {
+ case RES_SRQ_BUSY:
+ err = -EINVAL;
+ break;
+
+ case RES_SRQ_ALLOCATED:
+ if (r->com.state != RES_SRQ_HW)
+ err = -EINVAL;
+ else if (atomic_read(&r->ref_count))
+ err = -EBUSY;
+ break;
+
+ case RES_SRQ_HW:
+ if (r->com.state != RES_SRQ_ALLOCATED)
+ err = -EINVAL;
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (!err) {
+ r->com.from_state = r->com.state;
+ r->com.to_state = state;
+ r->com.state = RES_SRQ_BUSY;
+ if (srq)
+ *srq = r;
+ }
+ }
+
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+static void res_abort_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->from_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void res_end_move(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int id)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_common *r;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ r = radix_tree_lookup(&tracker->res_tree[type], id);
+ if (r && (r->owner == slave))
+ r->state = r->to_state;
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
+{
+ return mlx4_is_qp_reserved(dev, qpn);
+}
+
+static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err;
+ int count;
+ int align;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ count = get_param_l(&in_param);
+ align = get_param_h(&in_param);
+ err = __mlx4_qp_reserve_range(dev, count, align, &base);
+ if (err)
+ return err;
+
+ err = add_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err) {
+ __mlx4_qp_release_range(dev, base, count);
+ return err;
+ }
+ set_param_l(out_param, base);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ if (valid_reserved(dev, slave, qpn)) {
+ err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ if (err)
+ return err;
+ }
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
+ NULL, 1);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn)) {
+ err = __mlx4_qp_alloc_icm(dev, qpn);
+ if (err) {
+ res_abort_move(dev, slave, RES_QP, qpn);
+ return err;
+ }
+ }
+
+ res_end_move(dev, slave, RES_QP, qpn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ order = get_param_l(&in_param);
+ base = __mlx4_alloc_mtt_range(dev, order);
+ if (base == -1)
+ return -ENOMEM;
+
+ err = add_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (err)
+ __mlx4_free_mtt_range(dev, base, order);
+ else
+ set_param_l(out_param, base);
+
+ return err;
+}
+
+static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = __mlx4_mr_reserve(dev);
+ if (index == -1)
+ break;
+ id = index & mpt_mask(dev);
+
+ err = add_res_range(dev, slave, id, 1, RES_MPT, index);
+ if (err) {
+ __mlx4_mr_release(dev, index);
+ break;
+ }
+ set_param_l(out_param, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = __mlx4_mr_alloc_icm(dev, mpt->key);
+ if (err) {
+ res_abort_move(dev, slave, RES_MPT, id);
+ return err;
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ break;
+ }
+ return err;
+}
+
+static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_cq_alloc_icm(dev, &cqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err) {
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+ }
+
+ set_param_l(out_param, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ err = __mlx4_srq_alloc_icm(dev, &srqn);
+ if (err)
+ break;
+
+ err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err) {
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+ }
+
+ set_param_l(out_param, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct mac_res *res;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+ res->mac = mac;
+ res->port = (u8) port;
+ list_add_tail(&res->list,
+ &tracker->slave_list[slave].res_list[RES_MAC]);
+ return 0;
+}
+
+static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
+ int port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ if (res->mac == mac && res->port == (u8) port) {
+ list_del(&res->list);
+ kfree(res);
+ break;
+ }
+ }
+}
+
+static void rem_slave_macs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mac_list =
+ &tracker->slave_list[slave].res_list[RES_MAC];
+ struct mac_res *res, *tmp;
+
+ list_for_each_entry_safe(res, tmp, mac_list, list) {
+ list_del(&res->list);
+ __mlx4_unregister_mac(dev, res->port, res->mac);
+ kfree(res);
+ }
+}
+
+static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int port;
+ u64 mac;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ port = get_param_l(out_param);
+ mac = in_param;
+
+ err = __mlx4_register_mac(dev, port, mac);
+ if (err >= 0) {
+ set_param_l(out_param, err);
+ err = 0;
+ }
+
+ if (!err) {
+ err = mac_add_to_slave(dev, slave, mac, port);
+ if (err)
+ __mlx4_unregister_mac(dev, port, mac);
+ }
+ return err;
+}
+
+static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_CQ:
+ err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err;
+ int count;
+ int base;
+ int qpn;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ base = get_param_l(&in_param) & 0x7fffff;
+ count = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, count, RES_QP, 0);
+ if (err)
+ break;
+ __mlx4_qp_release_range(dev, base, count);
+ break;
+ case RES_OP_MAP_ICM:
+ qpn = get_param_l(&in_param) & 0x7fffff;
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
+ NULL, 0);
+ if (err)
+ return err;
+
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ if (valid_reserved(dev, slave, qpn))
+ err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int err = -EINVAL;
+ int base;
+ int order;
+
+ if (op != RES_OP_RESERVE_AND_MAP)
+ return err;
+
+ base = get_param_l(&in_param);
+ order = get_param_h(&in_param);
+ err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
+ if (!err)
+ __mlx4_free_mtt_range(dev, base, order);
+ return err;
+}
+
+static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param)
+{
+ int err = -EINVAL;
+ int index;
+ int id;
+ struct res_mpt *mpt;
+
+ switch (op) {
+ case RES_OP_RESERVE:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ break;
+ index = mpt->key;
+ put_res(dev, slave, id, RES_MPT);
+
+ err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
+ if (err)
+ break;
+ __mlx4_mr_release(dev, index);
+ break;
+ case RES_OP_MAP_ICM:
+ index = get_param_l(&in_param);
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id,
+ RES_MPT_RESERVED, &mpt);
+ if (err)
+ return err;
+
+ __mlx4_mr_free_icm(dev, mpt->key);
+ res_end_move(dev, slave, RES_MPT, id);
+ return err;
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int cqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ cqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
+ if (err)
+ break;
+
+ __mlx4_cq_free_icm(dev, cqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int srqn;
+ int err;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ srqn = get_param_l(&in_param);
+ err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
+ if (err)
+ break;
+
+ __mlx4_srq_free_icm(dev, srqn);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+}
+
+static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ int port;
+ int err = 0;
+
+ switch (op) {
+ case RES_OP_RESERVE_AND_MAP:
+ port = get_param_l(out_param);
+ mac_del_from_slave(dev, slave, in_param, port);
+ __mlx4_unregister_mac(dev, port, in_param);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ return err;
+
+}
+
+static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
+ u64 in_param, u64 *out_param)
+{
+ return 0;
+}
+
+int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err = -EINVAL;
+ int alop = vhcr->op_modifier;
+
+ switch (vhcr->in_modifier) {
+ case RES_QP:
+ err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_MTT:
+ err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MPT:
+ err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param);
+ break;
+
+ case RES_CQ:
+ err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_SRQ:
+ err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_MAC:
+ err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ case RES_VLAN:
+ err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
+ vhcr->in_param, &vhcr->out_param);
+ break;
+
+ default:
+ break;
+ }
+ return err;
+}
+
+/* ugly but other choices are uglier */
+static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
+{
+ return (be32_to_cpu(mpt->flags) >> 9) & 1;
+}
+
+static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
+{
+ return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
+}
+
+static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
+{
+ return be32_to_cpu(mpt->mtt_sz);
+}
+
+static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
+{
+ return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
+{
+ int page_shift = (qpc->log_page_size & 0x3f) + 12;
+ int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
+ int log_sq_sride = qpc->sq_size_stride & 7;
+ int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
+ int log_rq_stride = qpc->rq_size_stride & 7;
+ int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
+ int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
+ int xrc = (be32_to_cpu(qpc->local_qpn) >> 23) & 1;
+ int sq_size;
+ int rq_size;
+ int total_pages;
+ int total_mem;
+ int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+
+ sq_size = 1 << (log_sq_size + log_sq_sride + 4);
+ rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
+ total_mem = sq_size + rq_size;
+ total_pages =
+ roundup_pow_of_two((total_mem + (page_offset << 6)) >>
+ page_shift);
+
+ return total_pages;
+}
+
+static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
+ int size, struct res_mtt *mtt)
+{
+ int res_start = mtt->com.res_id;
+ int res_size = (1 << mtt->order);
+
+ if (start < res_start || start + size > res_start + res_size)
+ return -EPERM;
+ return 0;
+}
+
+int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_mpt *mpt;
+ int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
+ int phys;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
+ if (err)
+ return err;
+
+ phys = mr_phys_mpt(inbox->buf);
+ if (!phys) {
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base,
+ mr_get_mtt_size(inbox->buf), mtt);
+ if (err)
+ goto ex_put;
+
+ mpt->mtt = mtt;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ if (!phys) {
+ atomic_inc(&mtt->ref_count);
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ }
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_put:
+ if (!phys)
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+
+ res_end_move(dev, slave, RES_MPT, id);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_MPT, id);
+
+ return err;
+}
+
+int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier;
+ struct res_mpt *mpt;
+ int id;
+
+ id = index & mpt_mask(dev);
+ err = get_res(dev, slave, id, RES_MPT, &mpt);
+ if (err)
+ return err;
+
+ if (mpt->com.from_state != RES_MPT_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+out:
+ put_res(dev, slave, id, RES_MPT);
+ return err;
+}
+
+static int qp_get_rcqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
+}
+
+static int qp_get_scqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->cqn_send) & 0xffffff;
+}
+
+static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
+{
+ return be32_to_cpu(qpc->srqn) & 0x1ffffff;
+}
+
+int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_mtt *mtt;
+ struct res_qp *qp;
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+ int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
+ int mtt_size = qp_get_mtt_size(qpc);
+ struct res_cq *rcq;
+ struct res_cq *scq;
+ int rcqn = qp_get_rcqn(qpc);
+ int scqn = qp_get_scqn(qpc);
+ u32 srqn = qp_get_srqn(qpc) & 0xffffff;
+ int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
+ struct res_srq *srq;
+ int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
+ if (err)
+ return err;
+ qp->local_qpn = local_qpn;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
+ if (err)
+ goto ex_put_mtt;
+
+ if (scqn != rcqn) {
+ err = get_res(dev, slave, scqn, RES_CQ, &scq);
+ if (err)
+ goto ex_put_rcq;
+ } else
+ scq = rcq;
+
+ if (use_srq) {
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ goto ex_put_scq;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_srq;
+ atomic_inc(&mtt->ref_count);
+ qp->mtt = mtt;
+ atomic_inc(&rcq->ref_count);
+ qp->rcq = rcq;
+ atomic_inc(&scq->ref_count);
+ qp->scq = scq;
+
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+
+ if (use_srq) {
+ atomic_inc(&srq->ref_count);
+ put_res(dev, slave, srqn, RES_SRQ);
+ qp->srq = srq;
+ }
+ put_res(dev, slave, rcqn, RES_CQ);
+ put_res(dev, slave, mtt_base, RES_MTT);
+ res_end_move(dev, slave, RES_QP, qpn);
+
+ return 0;
+
+ex_put_srq:
+ if (use_srq)
+ put_res(dev, slave, srqn, RES_SRQ);
+ex_put_scq:
+ if (scqn != rcqn)
+ put_res(dev, slave, scqn, RES_CQ);
+ex_put_rcq:
+ put_res(dev, slave, rcqn, RES_CQ);
+ex_put_mtt:
+ put_res(dev, slave, mtt_base, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
+{
+ return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
+{
+ int log_eq_size = eqc->log_eq_size & 0x1f;
+ int page_shift = (eqc->log_page_size & 0x3f) + 12;
+
+ if (log_eq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_eq_size + 5 - page_shift);
+}
+
+static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
+{
+ return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
+}
+
+static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
+{
+ int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
+ int page_shift = (cqc->log_page_size & 0x3f) + 12;
+
+ if (log_cq_size + 5 < page_shift)
+ return 1;
+
+ return 1 << (log_cq_size + 5 - page_shift);
+}
+
+int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int eqn = vhcr->in_modifier;
+ int res_id = (slave << 8) | eqn;
+ struct mlx4_eq_context *eqc = inbox->buf;
+ int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
+ int mtt_size = eq_get_mtt_size(eqc);
+ struct res_eq *eq;
+ struct res_mtt *mtt;
+
+ err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ if (err)
+ return err;
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
+ if (err)
+ goto out_add;
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+
+ err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
+ if (err)
+ goto out_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+
+ atomic_inc(&mtt->ref_count);
+ eq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+out_add:
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+ return err;
+}
+
+static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
+ int len, struct res_mtt **res)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct res_mtt *mtt;
+ int err = -EINVAL;
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
+ com.list) {
+ if (!check_mtt_range(dev, slave, start, len, mtt)) {
+ *res = mtt;
+ mtt->com.from_state = mtt->com.state;
+ mtt->com.state = RES_MTT_BUSY;
+ err = 0;
+ break;
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return err;
+}
+
+int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_mtt mtt;
+ __be64 *page_list = inbox->buf;
+ u64 *pg_list = (u64 *)page_list;
+ int i;
+ struct res_mtt *rmtt = NULL;
+ int start = be64_to_cpu(page_list[0]);
+ int npages = vhcr->in_modifier;
+ int err;
+
+ err = get_containing_mtt(dev, slave, start, npages, &rmtt);
+ if (err)
+ return err;
+
+ /* Call the SW implementation of write_mtt:
+ * - Prepare a dummy mtt struct
+ * - Translate inbox contents to simple addresses in host endianess */
+ mtt.offset = 0; /* TBD this is broken but I don't handle it since
+ we don't really use it */
+ mtt.order = 0;
+ mtt.page_shift = 0;
+ for (i = 0; i < npages; ++i)
+ pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
+
+ err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
+ ((u64 *)page_list + 2));
+
+ if (rmtt)
+ put_res(dev, slave, rmtt->com.res_id, RES_MTT);
+
+ return err;
+}
+
+int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
+ if (err)
+ return err;
+
+ err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
+ if (err)
+ goto ex_abort;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put;
+
+ atomic_dec(&eq->mtt->ref_count);
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_EQ, res_id);
+ rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
+
+ return 0;
+
+ex_put:
+ put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_EQ, res_id);
+
+ return err;
+}
+
+int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_slave_event_eq_info *event_eq;
+ struct mlx4_cmd_mailbox *mailbox;
+ u32 in_modifier = 0;
+ int err;
+ int res_id;
+ struct res_eq *req;
+
+ if (!priv->mfunc.master.slave_state)
+ return -EINVAL;
+
+ event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
+
+ /* Create the event only if the slave is registered */
+ if (event_eq->eqn < 0)
+ return 0;
+
+ mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ res_id = (slave << 8) | event_eq->eqn;
+ err = get_res(dev, slave, res_id, RES_EQ, &req);
+ if (err)
+ goto unlock;
+
+ if (req->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto put;
+ }
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ err = PTR_ERR(mailbox);
+ goto put;
+ }
+
+ if (eqe->type == MLX4_EVENT_TYPE_CMD) {
+ ++event_eq->token;
+ eqe->event.cmd.token = cpu_to_be16(event_eq->token);
+ }
+
+ memcpy(mailbox->buf, (u8 *) eqe, 28);
+
+ in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+
+ err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
+ MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_NATIVE);
+
+ put_res(dev, slave, res_id, RES_EQ);
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+
+put:
+ put_res(dev, slave, res_id, RES_EQ);
+
+unlock:
+ mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
+ return err;
+}
+
+int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int eqn = vhcr->in_modifier;
+ int res_id = eqn | (slave << 8);
+ struct res_eq *eq;
+ int err;
+
+ err = get_res(dev, slave, res_id, RES_EQ, &eq);
+ if (err)
+ return err;
+
+ if (eq->com.from_state != RES_EQ_HW) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+
+ex_put:
+ put_res(dev, slave, res_id, RES_EQ);
+ return err;
+}
+
+int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+ struct res_cq *cq;
+ struct res_mtt *mtt;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto out_move;
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto out_put;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_put;
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_put:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+
+ err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto out_move;
+ atomic_dec(&cq->mtt->ref_count);
+ res_end_move(dev, slave, RES_CQ, cqn);
+ return 0;
+
+out_move:
+ res_abort_move(dev, slave, RES_CQ, cqn);
+ return err;
+}
+
+int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int handle_resize(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd,
+ struct res_cq *cq)
+{
+ int err;
+ struct res_mtt *orig_mtt;
+ struct res_mtt *mtt;
+ struct mlx4_cq_context *cqc = inbox->buf;
+ int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
+
+ err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
+ if (err)
+ return err;
+
+ if (orig_mtt != cq->mtt) {
+ err = -EINVAL;
+ goto ex_put;
+ }
+
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_put;
+
+ err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
+ if (err)
+ goto ex_put1;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put1;
+ atomic_dec(&orig_mtt->ref_count);
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+ atomic_inc(&mtt->ref_count);
+ cq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ return 0;
+
+ex_put1:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_put:
+ put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
+
+ return err;
+
+}
+
+int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int cqn = vhcr->in_modifier;
+ struct res_cq *cq;
+ int err;
+
+ err = get_res(dev, slave, cqn, RES_CQ, &cq);
+ if (err)
+ return err;
+
+ if (cq->com.from_state != RES_CQ_HW)
+ goto ex_put;
+
+ if (vhcr->op_modifier == 0) {
+ err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
+ goto ex_put;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ex_put:
+ put_res(dev, slave, cqn, RES_CQ);
+
+ return err;
+}
+
+static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
+{
+ int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
+ int log_rq_stride = srqc->logstride & 7;
+ int page_shift = (srqc->log_page_size & 0x3f) + 12;
+
+ if (log_srq_size + log_rq_stride + 4 < page_shift)
+ return 1;
+
+ return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
+}
+
+int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_mtt *mtt;
+ struct res_srq *srq;
+ struct mlx4_srq_context *srqc = inbox->buf;
+ int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
+
+ if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
+ return -EINVAL;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
+ if (err)
+ return err;
+ err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
+ if (err)
+ goto ex_abort;
+ err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
+ mtt);
+ if (err)
+ goto ex_put_mtt;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_put_mtt;
+
+ atomic_inc(&mtt->ref_count);
+ srq->mtt = mtt;
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+ return 0;
+
+ex_put_mtt:
+ put_res(dev, slave, mtt->com.res_id, RES_MTT);
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ res_end_move(dev, slave, RES_SRQ, srqn);
+
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_SRQ, srqn);
+
+ return err;
+}
+
+int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int srqn = vhcr->in_modifier;
+ struct res_srq *srq;
+
+ err = get_res(dev, slave, srqn, RES_SRQ, &srq);
+ if (err)
+ return err;
+
+ if (srq->com.from_state != RES_SRQ_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, srqn, RES_SRQ);
+ return err;
+}
+
+int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = get_res(dev, slave, qpn, RES_QP, &qp);
+ if (err)
+ return err;
+ if (qp->com.from_state != RES_QP_HW) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+out:
+ put_res(dev, slave, qpn, RES_QP);
+ return err;
+}
+
+int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp_context *qpc = inbox->buf + 8;
+
+ update_ud_gid(dev, qpc, (u8)slave);
+
+ return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+}
+
+int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int qpn = vhcr->in_modifier & 0x7fffff;
+ struct res_qp *qp;
+
+ err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
+ if (err)
+ return err;
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ if (err)
+ goto ex_abort;
+
+ atomic_dec(&qp->mtt->ref_count);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ res_end_move(dev, slave, RES_QP, qpn);
+ return 0;
+
+ex_abort:
+ res_abort_move(dev, slave, RES_QP, qpn);
+
+ return err;
+}
+
+static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
+ struct res_qp *rqp, u8 *gid)
+{
+ struct res_gid *res;
+
+ list_for_each_entry(res, &rqp->mcg_list, list) {
+ if (!memcmp(res->gid, gid, 16))
+ return res;
+ }
+ return NULL;
+}
+
+static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer)
+{
+ struct res_gid *res;
+ int err;
+
+ res = kzalloc(sizeof *res, GFP_KERNEL);
+ if (!res)
+ return -ENOMEM;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ if (find_gid(dev, slave, rqp, gid)) {
+ kfree(res);
+ err = -EEXIST;
+ } else {
+ memcpy(res->gid, gid, 16);
+ res->prot = prot;
+ res->steer = steer;
+ list_add_tail(&res->list, &rqp->mcg_list);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
+ u8 *gid, enum mlx4_protocol prot,
+ enum mlx4_steer_type steer)
+{
+ struct res_gid *res;
+ int err;
+
+ spin_lock_irq(&rqp->mcg_spl);
+ res = find_gid(dev, slave, rqp, gid);
+ if (!res || res->prot != prot || res->steer != steer)
+ err = -EINVAL;
+ else {
+ list_del(&res->list);
+ kfree(res);
+ err = 0;
+ }
+ spin_unlock_irq(&rqp->mcg_spl);
+
+ return err;
+}
+
+int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+ u8 *gid = inbox->buf;
+ enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
+ int err, err1;
+ int qpn;
+ struct res_qp *rqp;
+ int attach = vhcr->op_modifier;
+ int block_loopback = vhcr->in_modifier >> 31;
+ u8 steer_type_mask = 2;
+ enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
+
+ qpn = vhcr->in_modifier & 0xffffff;
+ err = get_res(dev, slave, qpn, RES_QP, &rqp);
+ if (err)
+ return err;
+
+ qp.qpn = qpn;
+ if (attach) {
+ err = add_mcg_res(dev, slave, rqp, gid, prot, type);
+ if (err)
+ goto ex_put;
+
+ err = mlx4_qp_attach_common(dev, &qp, gid,
+ block_loopback, prot, type);
+ if (err)
+ goto ex_rem;
+ } else {
+ err = rem_mcg_res(dev, slave, rqp, gid, prot, type);
+ if (err)
+ goto ex_put;
+ err = mlx4_qp_detach_common(dev, &qp, gid, prot, type);
+ }
+
+ put_res(dev, slave, qpn, RES_QP);
+ return 0;
+
+ex_rem:
+ /* ignore error return below, already in error */
+ err1 = rem_mcg_res(dev, slave, rqp, gid, prot, type);
+ex_put:
+ put_res(dev, slave, qpn, RES_QP);
+
+ return err;
+}
+
+enum {
+ BUSY_MAX_RETRIES = 10
+};
+
+int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
+ struct mlx4_vhcr *vhcr,
+ struct mlx4_cmd_mailbox *inbox,
+ struct mlx4_cmd_mailbox *outbox,
+ struct mlx4_cmd_info *cmd)
+{
+ int err;
+ int index = vhcr->in_modifier & 0xffff;
+
+ err = get_res(dev, slave, index, RES_COUNTER, NULL);
+ if (err)
+ return err;
+
+ err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
+ put_res(dev, slave, index, RES_COUNTER);
+ return err;
+}
+
+static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
+{
+ struct res_gid *rgid;
+ struct res_gid *tmp;
+ int err;
+ struct mlx4_qp qp; /* dummy for calling attach/detach */
+
+ list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
+ qp.qpn = rqp->local_qpn;
+ err = mlx4_qp_detach_common(dev, &qp, rgid->gid, rgid->prot,
+ rgid->steer);
+ list_del(&rgid->list);
+ kfree(rgid);
+ }
+}
+
+static int _move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type, int print)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
+ struct res_common *r;
+ struct res_common *tmp;
+ int busy;
+
+ busy = 0;
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(r, tmp, rlist, list) {
+ if (r->owner == slave) {
+ if (!r->removing) {
+ if (r->state == RES_ANY_BUSY) {
+ if (print)
+ mlx4_dbg(dev,
+ "%s id 0x%x is busy\n",
+ ResourceType(type),
+ r->res_id);
+ ++busy;
+ } else {
+ r->from_state = r->state;
+ r->state = RES_ANY_BUSY;
+ r->removing = 1;
+ }
+ }
+ }
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+
+ return busy;
+}
+
+static int move_all_busy(struct mlx4_dev *dev, int slave,
+ enum mlx4_resource type)
+{
+ unsigned long begin;
+ int busy;
+
+ begin = jiffies;
+ do {
+ busy = _move_all_busy(dev, slave, type, 0);
+ if (time_after(jiffies, begin + 5 * HZ))
+ break;
+ if (busy)
+ cond_resched();
+ } while (busy);
+
+ if (busy)
+ busy = _move_all_busy(dev, slave, type, 1);
+
+ return busy;
+}
+static void rem_slave_qps(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *qp_list =
+ &tracker->slave_list[slave].res_list[RES_QP];
+ struct res_qp *qp;
+ struct res_qp *tmp;
+ int state;
+ u64 in_param;
+ int qpn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_QP);
+ if (err)
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
+ "for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (qp->com.owner == slave) {
+ qpn = qp->com.res_id;
+ detach_qp(dev, slave, qp);
+ state = qp->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_QP_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_QP],
+ qp->com.res_id);
+ list_del(&qp->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(qp);
+ state = 0;
+ break;
+ case RES_QP_MAPPED:
+ if (!valid_reserved(dev, slave, qpn))
+ __mlx4_qp_free_icm(dev, qpn);
+ state = RES_QP_RESERVED;
+ break;
+ case RES_QP_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param,
+ qp->local_qpn, 2,
+ MLX4_CMD_2RST_QP,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_qps: failed"
+ " to move slave %d qpn %d to"
+ " reset\n", slave,
+ qp->local_qpn);
+ atomic_dec(&qp->rcq->ref_count);
+ atomic_dec(&qp->scq->ref_count);
+ atomic_dec(&qp->mtt->ref_count);
+ if (qp->srq)
+ atomic_dec(&qp->srq->ref_count);
+ state = RES_QP_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *srq_list =
+ &tracker->slave_list[slave].res_list[RES_SRQ];
+ struct res_srq *srq;
+ struct res_srq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int srqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_SRQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (srq->com.owner == slave) {
+ srqn = srq->com.res_id;
+ state = srq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_SRQ_ALLOCATED:
+ __mlx4_srq_free_icm(dev, srqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_SRQ],
+ srqn);
+ list_del(&srq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(srq);
+ state = 0;
+ break;
+
+ case RES_SRQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, srqn, 1,
+ MLX4_CMD_HW2SW_SRQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_srqs: failed"
+ " to move slave %d srq %d to"
+ " SW ownership\n",
+ slave, srqn);
+
+ atomic_dec(&srq->mtt->ref_count);
+ if (srq->cq)
+ atomic_dec(&srq->cq->ref_count);
+ state = RES_SRQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *cq_list =
+ &tracker->slave_list[slave].res_list[RES_CQ];
+ struct res_cq *cq;
+ struct res_cq *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int cqn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_CQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
+ cqn = cq->com.res_id;
+ state = cq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_CQ_ALLOCATED:
+ __mlx4_cq_free_icm(dev, cqn);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_CQ],
+ cqn);
+ list_del(&cq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(cq);
+ state = 0;
+ break;
+
+ case RES_CQ_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, cqn, 1,
+ MLX4_CMD_HW2SW_CQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_cqs: failed"
+ " to move slave %d cq %d to"
+ " SW ownership\n",
+ slave, cqn);
+ atomic_dec(&cq->mtt->ref_count);
+ state = RES_CQ_ALLOCATED;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *mpt_list =
+ &tracker->slave_list[slave].res_list[RES_MPT];
+ struct res_mpt *mpt;
+ struct res_mpt *tmp;
+ int state;
+ u64 in_param;
+ LIST_HEAD(tlist);
+ int mptn;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MPT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mpt->com.owner == slave) {
+ mptn = mpt->com.res_id;
+ state = mpt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MPT_RESERVED:
+ __mlx4_mr_release(dev, mpt->key);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MPT],
+ mptn);
+ list_del(&mpt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mpt);
+ state = 0;
+ break;
+
+ case RES_MPT_MAPPED:
+ __mlx4_mr_free_icm(dev, mpt->key);
+ state = RES_MPT_RESERVED;
+ break;
+
+ case RES_MPT_HW:
+ in_param = slave;
+ err = mlx4_cmd(dev, in_param, mptn, 0,
+ MLX4_CMD_HW2SW_MPT,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ if (err)
+ mlx4_dbg(dev, "rem_slave_mrs: failed"
+ " to move slave %d mpt %d to"
+ " SW ownership\n",
+ slave, mptn);
+ if (mpt->mtt)
+ atomic_dec(&mpt->mtt->ref_count);
+ state = RES_MPT_MAPPED;
+ break;
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker =
+ &priv->mfunc.master.res_tracker;
+ struct list_head *mtt_list =
+ &tracker->slave_list[slave].res_list[RES_MTT];
+ struct res_mtt *mtt;
+ struct res_mtt *tmp;
+ int state;
+ LIST_HEAD(tlist);
+ int base;
+ int err;
+
+ err = move_all_busy(dev, slave, RES_MTT);
+ if (err)
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (mtt->com.owner == slave) {
+ base = mtt->com.res_id;
+ state = mtt->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_MTT_ALLOCATED:
+ __mlx4_free_mtt_range(dev, base,
+ mtt->order);
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_MTT],
+ base);
+ list_del(&mtt->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(mtt);
+ state = 0;
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
+ struct list_head *eq_list =
+ &tracker->slave_list[slave].res_list[RES_EQ];
+ struct res_eq *eq;
+ struct res_eq *tmp;
+ int err;
+ int state;
+ LIST_HEAD(tlist);
+ int eqn;
+ struct mlx4_cmd_mailbox *mailbox;
+
+ err = move_all_busy(dev, slave, RES_EQ);
+ if (err)
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
+ "busy for slave %d\n", slave);
+
+ spin_lock_irq(mlx4_tlock(dev));
+ list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
+ spin_unlock_irq(mlx4_tlock(dev));
+ if (eq->com.owner == slave) {
+ eqn = eq->com.res_id;
+ state = eq->com.from_state;
+ while (state != 0) {
+ switch (state) {
+ case RES_EQ_RESERVED:
+ spin_lock_irq(mlx4_tlock(dev));
+ radix_tree_delete(&tracker->res_tree[RES_EQ],
+ eqn);
+ list_del(&eq->com.list);
+ spin_unlock_irq(mlx4_tlock(dev));
+ kfree(eq);
+ state = 0;
+ break;
+
+ case RES_EQ_HW:
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox)) {
+ cond_resched();
+ continue;
+ }
+ err = mlx4_cmd_box(dev, slave, 0,
+ eqn & 0xff, 0,
+ MLX4_CMD_HW2SW_EQ,
+ MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_NATIVE);
+ mlx4_dbg(dev, "rem_slave_eqs: failed"
+ " to move slave %d eqs %d to"
+ " SW ownership\n", slave, eqn);
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ if (!err) {
+ atomic_dec(&eq->mtt->ref_count);
+ state = RES_EQ_RESERVED;
+ }
+ break;
+
+ default:
+ state = 0;
+ }
+ }
+ }
+ spin_lock_irq(mlx4_tlock(dev));
+ }
+ spin_unlock_irq(mlx4_tlock(dev));
+}
+
+void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+ /*VLAN*/
+ rem_slave_macs(dev, slave);
+ rem_slave_qps(dev, slave);
+ rem_slave_srqs(dev, slave);
+ rem_slave_cqs(dev, slave);
+ rem_slave_mrs(dev, slave);
+ rem_slave_eqs(dev, slave);
+ rem_slave_mtts(dev, slave);
+ mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx4/sense.c b/drivers/net/ethernet/mellanox/mlx4/sense.c
index e2337a7..8024982 100644
--- a/drivers/net/ethernet/mellanox/mlx4/sense.c
+++ b/drivers/net/ethernet/mellanox/mlx4/sense.c
@@ -45,7 +45,8 @@ int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
- MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c
index 9cbf3fc..feda6c0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/srq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/srq.c
@@ -31,6 +31,8 @@
* SOFTWARE.
*/
+#include <linux/init.h>
+
#include <linux/mlx4/cmd.h>
#include <linux/export.h>
#include <linux/gfp.h>
@@ -38,26 +40,6 @@
#include "mlx4.h"
#include "icm.h"
-struct mlx4_srq_context {
- __be32 state_logsize_srqn;
- u8 logstride;
- u8 reserved1;
- __be16 xrcd;
- __be32 pg_offset_cqn;
- u32 reserved2;
- u8 log_page_size;
- u8 reserved3[2];
- u8 mtt_base_addr_h;
- __be32 mtt_base_addr_l;
- __be32 pd;
- __be16 limit_watermark;
- __be16 wqe_cnt;
- u16 reserved4;
- __be16 wqe_counter;
- u32 reserved5;
- __be64 db_rec_addr;
-};
-
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
@@ -85,8 +67,9 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
- return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
+ MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
+ MLX4_CMD_WRAPPED);
}
static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -94,48 +77,109 @@ static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
{
return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num,
mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark)
{
return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ,
- MLX4_CMD_TIME_CLASS_B);
+ MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED);
}
static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
int srq_num)
{
return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ,
- MLX4_CMD_TIME_CLASS_A);
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
}
-int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
- struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
{
struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
- struct mlx4_cmd_mailbox *mailbox;
- struct mlx4_srq_context *srq_context;
- u64 mtt_addr;
int err;
- srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
- if (srq->srqn == -1)
+
+ *srqn = mlx4_bitmap_alloc(&srq_table->bitmap);
+ if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
+ return 0;
+
+err_put:
+ mlx4_table_put(dev, &srq_table->table, *srqn);
+
+err_out:
+ mlx4_bitmap_free(&srq_table->bitmap, *srqn);
+ return err;
+}
+
+static int mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn)
+{
+ u64 out_param;
+ int err;
+
+ if (mlx4_is_mfunc(dev)) {
+ err = mlx4_cmd_imm(dev, 0, &out_param, RES_SRQ,
+ RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_ALLOC_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
+ if (!err)
+ *srqn = get_param_l(&out_param);
+
+ return err;
+ }
+ return __mlx4_srq_alloc_icm(dev, srqn);
+}
+
+void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+
+ mlx4_table_put(dev, &srq_table->cmpt_table, srqn);
+ mlx4_table_put(dev, &srq_table->table, srqn);
+ mlx4_bitmap_free(&srq_table->bitmap, srqn);
+}
+
+static void mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn)
+{
+ u64 in_param;
+
+ if (mlx4_is_mfunc(dev)) {
+ set_param_l(&in_param, srqn);
+ if (mlx4_cmd(dev, in_param, RES_SRQ, RES_OP_RESERVE_AND_MAP,
+ MLX4_CMD_FREE_RES,
+ MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED))
+ mlx4_warn(dev, "Failed freeing cq:%d\n", srqn);
+ return;
+ }
+ __mlx4_srq_free_icm(dev, srqn);
+}
+
+int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcd,
+ struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq)
+{
+ struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table;
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_srq_context *srq_context;
+ u64 mtt_addr;
+ int err;
+
+ err = mlx4_srq_alloc_icm(dev, &srq->srqn);
+ if (err)
+ return err;
spin_lock_irq(&srq_table->lock);
err = radix_tree_insert(&srq_table->tree, srq->srqn, srq);
spin_unlock_irq(&srq_table->lock);
if (err)
- goto err_cmpt_put;
+ goto err_icm;
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
@@ -174,15 +218,8 @@ err_radix:
radix_tree_delete(&srq_table->tree, srq->srqn);
spin_unlock_irq(&srq_table->lock);
-err_cmpt_put:
- mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn);
-
-err_put:
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
-
-err_out:
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
-
+err_icm:
+ mlx4_srq_free_icm(dev, srq->srqn);
return err;
}
EXPORT_SYMBOL_GPL(mlx4_srq_alloc);
@@ -204,8 +241,7 @@ void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq)
complete(&srq->free);
wait_for_completion(&srq->free);
- mlx4_table_put(dev, &srq_table->table, srq->srqn);
- mlx4_bitmap_free(&srq_table->bitmap, srq->srqn);
+ mlx4_srq_free_icm(dev, srq->srqn);
}
EXPORT_SYMBOL_GPL(mlx4_srq_free);
@@ -245,6 +281,8 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
spin_lock_init(&srq_table->lock);
INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC);
+ if (mlx4_is_slave(dev))
+ return 0;
err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs,
dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0);
@@ -256,5 +294,7 @@ int mlx4_init_srq_table(struct mlx4_dev *dev)
void mlx4_cleanup_srq_table(struct mlx4_dev *dev)
{
+ if (mlx4_is_slave(dev))
+ return;
mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap);
}
diff --git a/drivers/net/ethernet/micrel/Kconfig b/drivers/net/ethernet/micrel/Kconfig
index d10c2e1..fe42fc0 100644
--- a/drivers/net/ethernet/micrel/Kconfig
+++ b/drivers/net/ethernet/micrel/Kconfig
@@ -42,6 +42,7 @@ config KS8851
select NET_CORE
select MII
select CRC32
+ select EEPROM_93CX6
---help---
SPI driver for Micrel KS8851 SPI attached network chip.
diff --git a/drivers/net/ethernet/micrel/ks8842.c b/drivers/net/ethernet/micrel/ks8842.c
index 4a6ae05..0a85690 100644
--- a/drivers/net/ethernet/micrel/ks8842.c
+++ b/drivers/net/ethernet/micrel/ks8842.c
@@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
- &ctl->sg, 1, DMA_TO_DEVICE,
+ &ctl->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc)
return NETDEV_TX_BUSY;
@@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
sg_dma_len(sg) = DMA_BUFFER_SIZE;
ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
- sg, 1, DMA_FROM_DEVICE,
+ sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
if (!ctl->adesc)
@@ -1264,18 +1264,7 @@ static struct platform_driver ks8842_platform_driver = {
.remove = ks8842_remove,
};
-static int __init ks8842_init(void)
-{
- return platform_driver_register(&ks8842_platform_driver);
-}
-
-static void __exit ks8842_exit(void)
-{
- platform_driver_unregister(&ks8842_platform_driver);
-}
-
-module_init(ks8842_init);
-module_exit(ks8842_exit);
+module_platform_driver(ks8842_platform_driver);
MODULE_DESCRIPTION("Timberdale KS8842 ethernet driver");
MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index f56743a..0c3e400 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -22,6 +22,7 @@
#include <linux/cache.h>
#include <linux/crc32.h>
#include <linux/mii.h>
+#include <linux/eeprom_93cx6.h>
#include <linux/spi/spi.h>
@@ -82,6 +83,7 @@ union ks8851_tx_hdr {
* @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
+ * @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -128,6 +130,8 @@ struct ks8851_net {
struct spi_message spi_msg2;
struct spi_transfer spi_xfer1;
struct spi_transfer spi_xfer2[2];
+
+ struct eeprom_93cx6 eeprom;
};
static int msg_enable;
@@ -343,6 +347,26 @@ static void ks8851_soft_reset(struct ks8851_net *ks, unsigned op)
}
/**
+ * ks8851_set_powermode - set power mode of the device
+ * @ks: The device state
+ * @pwrmode: The power mode value to write to KS_PMECR.
+ *
+ * Change the power mode of the chip.
+ */
+static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
+{
+ unsigned pmecr;
+
+ netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
+
+ pmecr = ks8851_rdreg16(ks, KS_PMECR);
+ pmecr &= ~PMECR_PM_MASK;
+ pmecr |= pwrmode;
+
+ ks8851_wrreg16(ks, KS_PMECR, pmecr);
+}
+
+/**
* ks8851_write_mac_addr - write mac address to device registers
* @dev: The network device
*
@@ -358,8 +382,15 @@ static int ks8851_write_mac_addr(struct net_device *dev)
mutex_lock(&ks->lock);
+ /*
+ * Wake up chip in case it was powered off when stopped; otherwise,
+ * the first write to the MAC address does not take effect.
+ */
+ ks8851_set_powermode(ks, PMECR_PM_NORMAL);
for (i = 0; i < ETH_ALEN; i++)
ks8851_wrreg8(ks, KS_MAR(i), dev->dev_addr[i]);
+ if (!netif_running(dev))
+ ks8851_set_powermode(ks, PMECR_PM_SOFTDOWN);
mutex_unlock(&ks->lock);
@@ -367,21 +398,47 @@ static int ks8851_write_mac_addr(struct net_device *dev)
}
/**
+ * ks8851_read_mac_addr - read mac address from device registers
+ * @dev: The network device
+ *
+ * Update our copy of the KS8851 MAC address from the registers of @dev.
+*/
+static void ks8851_read_mac_addr(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int i;
+
+ mutex_lock(&ks->lock);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ dev->dev_addr[i] = ks8851_rdreg8(ks, KS_MAR(i));
+
+ mutex_unlock(&ks->lock);
+}
+
+/**
* ks8851_init_mac - initialise the mac address
* @ks: The device structure
*
* Get or create the initial mac address for the device and then set that
- * into the station address register. Currently we assume that the device
- * does not have a valid mac address in it, and so we use random_ether_addr()
+ * into the station address register. If there is an EEPROM present, then
+ * we try that. If no valid mac address is found we use random_ether_addr()
* to create a new one.
- *
- * In future, the driver should check to see if the device has an EEPROM
- * attached and whether that has a valid ethernet address in it.
*/
static void ks8851_init_mac(struct ks8851_net *ks)
{
struct net_device *dev = ks->netdev;
+ /* first, try reading what we've got already */
+ if (ks->rc_ccr & CCR_EEPROM) {
+ ks8851_read_mac_addr(dev);
+ if (is_valid_ether_addr(dev->dev_addr))
+ return;
+
+ netdev_err(ks->netdev, "invalid mac address read %pM\n",
+ dev->dev_addr);
+ }
+
random_ether_addr(dev->dev_addr);
ks8851_write_mac_addr(dev);
}
@@ -526,7 +583,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
ks8851_dbg_dumpkkt(ks, rxpkt);
skb->protocol = eth_type_trans(skb, ks->netdev);
- netif_rx(skb);
+ netif_rx_ni(skb);
ks->netdev->stats.rx_packets++;
ks->netdev->stats.rx_bytes += rxlen;
@@ -739,26 +796,6 @@ static void ks8851_tx_work(struct work_struct *work)
}
/**
- * ks8851_set_powermode - set power mode of the device
- * @ks: The device state
- * @pwrmode: The power mode value to write to KS_PMECR.
- *
- * Change the power mode of the chip.
- */
-static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
-{
- unsigned pmecr;
-
- netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
-
- pmecr = ks8851_rdreg16(ks, KS_PMECR);
- pmecr &= ~PMECR_PM_MASK;
- pmecr |= pwrmode;
-
- ks8851_wrreg16(ks, KS_PMECR, pmecr);
-}
-
-/**
* ks8851_net_open - open network device
* @dev: The network device being opened.
*
@@ -1038,234 +1075,6 @@ static const struct net_device_ops ks8851_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-/* Companion eeprom access */
-
-enum { /* EEPROM programming states */
- EEPROM_CONTROL,
- EEPROM_ADDRESS,
- EEPROM_DATA,
- EEPROM_COMPLETE
-};
-
-/**
- * ks8851_eeprom_read - read a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @addr: EEPROM address to read
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a read on the EEPROM using ks8851 EEPROM SW access feature.
- * Warning: The READ feature is not supported on ks8851 revision 0.
- *
- * Rough programming model:
- * - on period start: set clock high and read value on bus
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-unsigned int ks8851_eeprom_read(struct net_device *dev, unsigned int addr)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int ctrl = EEPROM_OP_READ;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int data = 0;
- int dummy;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((ctrl >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- bit_count--;
- break;
- case EEPROM_DATA:
- /* Change to receive mode */
- eepcr &= ~EEPCR_EESRWA;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* waitread period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- mutex_lock(&ks->lock);
- eepcr |= EEPCR_EESCK;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* Manage read */
- switch (state) {
- case EEPROM_ADDRESS:
- if (bit_count < 0) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- }
- break;
- case EEPROM_DATA:
- mutex_lock(&ks->lock);
- dummy = ks8851_rdreg16(ks, KS_EEPCR);
- mutex_unlock(&ks->lock);
- data |= ((dummy >> EEPCR_EESB_OFFSET) & 1) << bit_count;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- return data;
-}
-
-/**
- * ks8851_eeprom_write - write a 16bits word in ks8851 companion EEPROM
- * @dev: The network device the PHY is on.
- * @op: operand (can be WRITE, EWEN, EWDS)
- * @addr: EEPROM address to write
- * @data: data to write
- *
- * eeprom_size: used to define the data coding length. Can be changed
- * through debug-fs.
- *
- * Programs a write on the EEPROM using ks8851 EEPROM SW access feature.
- *
- * Note that a write enable is required before writing data.
- *
- * Rough programming model:
- * - on period start: set clock high
- * - on period / 2: set clock low and program value on bus
- * - start on period / 2
- */
-void ks8851_eeprom_write(struct net_device *dev, unsigned int op,
- unsigned int addr, unsigned int data)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- int eepcr;
- int state = EEPROM_CONTROL;
- int bit_count = EEPROM_OP_LEN - 1;
- unsigned int addr_len;
-
- addr_len = (ks->eeprom_size == 128) ? 6 : 8;
-
- switch (op) {
- case EEPROM_OP_EWEN:
- addr = 0x30;
- break;
- case EEPROM_OP_EWDS:
- addr = 0;
- break;
- }
-
- /* start transaction: chip select high, authorize write */
- mutex_lock(&ks->lock);
- eepcr = EEPCR_EESA | EEPCR_EESRWA;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr |= EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- while (state != EEPROM_COMPLETE) {
- /* falling clock period starts... */
- /* set EED_IO pin for control and address */
- eepcr &= ~EEPCR_EEDO;
- switch (state) {
- case EEPROM_CONTROL:
- eepcr |= ((op >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- bit_count = addr_len - 1;
- state = EEPROM_ADDRESS;
- }
- break;
- case EEPROM_ADDRESS:
- eepcr |= ((addr >> bit_count) & 1) << 2;
- if (bit_count-- <= 0) {
- if (op == EEPROM_OP_WRITE) {
- bit_count = EEPROM_DATA_LEN - 1;
- state = EEPROM_DATA;
- } else {
- state = EEPROM_COMPLETE;
- }
- }
- break;
- case EEPROM_DATA:
- eepcr |= ((data >> bit_count) & 1) << 2;
- if (bit_count-- <= 0)
- state = EEPROM_COMPLETE;
- break;
- }
-
- /* lower clock */
- eepcr &= ~EEPCR_EESCK;
-
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
-
- /* rising clock period starts... */
-
- /* raise clock */
- eepcr |= EEPCR_EESCK;
- mutex_lock(&ks->lock);
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
- /* wait period / 2 */
- udelay(EEPROM_SK_PERIOD / 2);
- }
-
- /* close transaction */
- mutex_lock(&ks->lock);
- eepcr &= ~EEPCR_EECS;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- eepcr = 0;
- ks8851_wrreg16(ks, KS_EEPCR, eepcr);
- mutex_unlock(&ks->lock);
-
-}
-
/* ethtool support */
static void ks8851_get_drvinfo(struct net_device *dev,
@@ -1312,115 +1121,141 @@ static int ks8851_nway_reset(struct net_device *dev)
return mii_nway_restart(&ks->mii);
}
-static int ks8851_get_eeprom_len(struct net_device *dev)
-{
- struct ks8851_net *ks = netdev_priv(dev);
- return ks->eeprom_size;
-}
+/* EEPROM support */
-static int ks8851_get_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+static void ks8851_eeprom_regread(struct eeprom_93cx6 *ee)
{
- struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EINVAL;
+ struct ks8851_net *ks = ee->data;
+ unsigned val;
- if (eeprom->len > ks->eeprom_size)
- return -EINVAL;
+ val = ks8851_rdreg16(ks, KS_EEPCR);
- eeprom->magic = ks8851_rdreg16(ks, KS_CIDER);
+ ee->reg_data_out = (val & EEPCR_EESB) ? 1 : 0;
+ ee->reg_data_clock = (val & EEPCR_EESCK) ? 1 : 0;
+ ee->reg_chip_select = (val & EEPCR_EECS) ? 1 : 0;
+}
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+static void ks8851_eeprom_regwrite(struct eeprom_93cx6 *ee)
+{
+ struct ks8851_net *ks = ee->data;
+ unsigned val = EEPCR_EESA; /* default - eeprom access on */
+
+ if (ee->drive_data)
+ val |= EEPCR_EESRWA;
+ if (ee->reg_data_in)
+ val |= EEPCR_EEDO;
+ if (ee->reg_data_clock)
+ val |= EEPCR_EESCK;
+ if (ee->reg_chip_select)
+ val |= EEPCR_EECS;
+
+ ks8851_wrreg16(ks, KS_EEPCR, val);
+}
- eeprom_buff = kmalloc(sizeof(u16) *
- (last_word - first_word + 1), GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+/**
+ * ks8851_eeprom_claim - claim device EEPROM and activate the interface
+ * @ks: The network device state.
+ *
+ * Check for the presence of an EEPROM, and then activate software access
+ * to the device.
+ */
+static int ks8851_eeprom_claim(struct ks8851_net *ks)
+{
+ if (!(ks->rc_ccr & CCR_EEPROM))
+ return -ENOENT;
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = ks8851_eeprom_read(dev, first_word + 1);
+ mutex_lock(&ks->lock);
- /* Device's eeprom is little-endian, word addressable */
- for (i = 0; i < last_word - first_word + 1; i++)
- le16_to_cpus(&eeprom_buff[i]);
+ /* start with clock low, cs high */
+ ks8851_wrreg16(ks, KS_EEPCR, EEPCR_EESA | EEPCR_EECS);
+ return 0;
+}
- memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1), eeprom->len);
- kfree(eeprom_buff);
+/**
+ * ks8851_eeprom_release - release the EEPROM interface
+ * @ks: The device state
+ *
+ * Release the software access to the device EEPROM
+ */
+static void ks8851_eeprom_release(struct ks8851_net *ks)
+{
+ unsigned val = ks8851_rdreg16(ks, KS_EEPCR);
- return ret_val;
+ ks8851_wrreg16(ks, KS_EEPCR, val & ~EEPCR_EESA);
+ mutex_unlock(&ks->lock);
}
+#define KS_EEPROM_MAGIC (0x00008851)
+
static int ks8851_set_eeprom(struct net_device *dev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *ee, u8 *data)
{
struct ks8851_net *ks = netdev_priv(dev);
- u16 *eeprom_buff;
- void *ptr;
- int max_len;
- int first_word;
- int last_word;
- int ret_val = 0;
- u16 i;
-
- if (eeprom->len == 0)
- return -EOPNOTSUPP;
-
- if (eeprom->len > ks->eeprom_size)
+ int offset = ee->offset;
+ int len = ee->len;
+ u16 tmp;
+
+ /* currently only support byte writing */
+ if (len != 1)
return -EINVAL;
- if (eeprom->magic != ks8851_rdreg16(ks, KS_CIDER))
- return -EFAULT;
+ if (ee->magic != KS_EEPROM_MAGIC)
+ return -EINVAL;
- first_word = eeprom->offset >> 1;
- last_word = (eeprom->offset + eeprom->len - 1) >> 1;
- max_len = (last_word - first_word + 1) * 2;
- eeprom_buff = kmalloc(max_len, GFP_KERNEL);
- if (!eeprom_buff)
- return -ENOMEM;
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
- ptr = (void *)eeprom_buff;
+ eeprom_93cx6_wren(&ks->eeprom, true);
- if (eeprom->offset & 1) {
- /* need read/modify/write of first changed EEPROM word */
- /* only the second byte of the word is being modified */
- eeprom_buff[0] = ks8851_eeprom_read(dev, first_word);
- ptr++;
+ /* ethtool currently only supports writing bytes, which means
+ * we have to read/modify/write our 16bit EEPROMs */
+
+ eeprom_93cx6_read(&ks->eeprom, offset/2, &tmp);
+
+ if (offset & 1) {
+ tmp &= 0xff;
+ tmp |= *data << 8;
+ } else {
+ tmp &= 0xff00;
+ tmp |= *data;
}
- if ((eeprom->offset + eeprom->len) & 1)
- /* need read/modify/write of last changed EEPROM word */
- /* only the first byte of the word is being modified */
- eeprom_buff[last_word - first_word] =
- ks8851_eeprom_read(dev, last_word);
+ eeprom_93cx6_write(&ks->eeprom, offset/2, tmp);
+ eeprom_93cx6_wren(&ks->eeprom, false);
- /* Device's eeprom is little-endian, word addressable */
- le16_to_cpus(&eeprom_buff[0]);
- le16_to_cpus(&eeprom_buff[last_word - first_word]);
+ ks8851_eeprom_release(ks);
- memcpy(ptr, bytes, eeprom->len);
+ return 0;
+}
- for (i = 0; i < last_word - first_word + 1; i++)
- eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+static int ks8851_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+ int offset = ee->offset;
+ int len = ee->len;
- ks8851_eeprom_write(dev, EEPROM_OP_EWEN, 0, 0);
+ /* must be 2 byte aligned */
+ if (len & 1 || offset & 1)
+ return -EINVAL;
- for (i = 0; i < last_word - first_word + 1; i++) {
- ks8851_eeprom_write(dev, EEPROM_OP_WRITE, first_word + i,
- eeprom_buff[i]);
- mdelay(EEPROM_WRITE_TIME);
- }
+ if (ks8851_eeprom_claim(ks))
+ return -ENOENT;
+
+ ee->magic = KS_EEPROM_MAGIC;
- ks8851_eeprom_write(dev, EEPROM_OP_EWDS, 0, 0);
+ eeprom_93cx6_multiread(&ks->eeprom, offset/2, (__le16 *)data, len/2);
+ ks8851_eeprom_release(ks);
- kfree(eeprom_buff);
- return ret_val;
+ return 0;
+}
+
+static int ks8851_get_eeprom_len(struct net_device *dev)
+{
+ struct ks8851_net *ks = netdev_priv(dev);
+
+ /* currently, we assume it is an 93C46 attached, so return 128 */
+ return ks->rc_ccr & CCR_EEPROM ? 128 : 0;
}
static const struct ethtool_ops ks8851_ethtool_ops = {
@@ -1613,6 +1448,13 @@ static int __devinit ks8851_probe(struct spi_device *spi)
spi_message_add_tail(&ks->spi_xfer2[0], &ks->spi_msg2);
spi_message_add_tail(&ks->spi_xfer2[1], &ks->spi_msg2);
+ /* setup EEPROM state */
+
+ ks->eeprom.data = ks;
+ ks->eeprom.width = PCI_EEPROM_WIDTH_93C46;
+ ks->eeprom.register_read = ks8851_eeprom_regread;
+ ks->eeprom.register_write = ks8851_eeprom_regwrite;
+
/* setup mii state */
ks->mii.dev = ndev;
ks->mii.phy_id = 1,
@@ -1674,9 +1516,10 @@ static int __devinit ks8851_probe(struct spi_device *spi)
goto err_netdev;
}
- netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
+ netdev_info(ndev, "revision %d, MAC %pM, IRQ %d, %s EEPROM\n",
CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
- ndev->dev_addr, ndev->irq);
+ ndev->dev_addr, ndev->irq,
+ ks->rc_ccr & CCR_EEPROM ? "has" : "no");
return 0;
diff --git a/drivers/net/ethernet/micrel/ks8851.h b/drivers/net/ethernet/micrel/ks8851.h
index 537fb06e..b0fae86 100644
--- a/drivers/net/ethernet/micrel/ks8851.h
+++ b/drivers/net/ethernet/micrel/ks8851.h
@@ -16,7 +16,7 @@
#define CCR_32PIN (1 << 0)
/* MAC address registers */
-#define KS_MAR(_m) 0x15 - (_m)
+#define KS_MAR(_m) (0x15 - (_m))
#define KS_MARL 0x10
#define KS_MARM 0x12
#define KS_MARH 0x14
@@ -27,22 +27,11 @@
#define KS_EEPCR 0x22
#define EEPCR_EESRWA (1 << 5)
#define EEPCR_EESA (1 << 4)
-#define EEPCR_EESB_OFFSET 3
-#define EEPCR_EESB (1 << EEPCR_EESB_OFFSET)
+#define EEPCR_EESB (1 << 3)
#define EEPCR_EEDO (1 << 2)
#define EEPCR_EESCK (1 << 1)
#define EEPCR_EECS (1 << 0)
-#define EEPROM_OP_LEN 3 /* bits:*/
-#define EEPROM_OP_READ 0x06
-#define EEPROM_OP_EWEN 0x04
-#define EEPROM_OP_WRITE 0x05
-#define EEPROM_OP_EWDS 0x14
-
-#define EEPROM_DATA_LEN 16 /* 16 bits EEPROM */
-#define EEPROM_WRITE_TIME 4 /* wrt ack time in ms */
-#define EEPROM_SK_PERIOD 400 /* in us */
-
#define KS_MBIR 0x24
#define MBIR_TXMBF (1 << 12)
#define MBIR_TXMBFA (1 << 11)
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index d19c849..2784bc7 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -394,7 +394,6 @@ union ks_tx_hdr {
* @msg_enable : The message flags controlling driver output (see ethtool).
* @frame_cnt : number of frames received.
* @bus_width : i/o bus width.
- * @irq : irq number assigned to this device.
* @rc_rxqcr : Cached copy of KS_RXQCR.
* @rc_txcr : Cached copy of KS_TXCR.
* @rc_ier : Cached copy of KS_IER.
@@ -441,7 +440,6 @@ struct ks_net {
u32 msg_enable;
u32 frame_cnt;
int bus_width;
- int irq;
u16 rc_rxqcr;
u16 rc_txcr;
@@ -907,10 +905,10 @@ static int ks_net_open(struct net_device *netdev)
netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
/* reset the HW */
- err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
+ err = request_irq(netdev->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
if (err) {
- pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
+ pr_err("Failed to request IRQ: %d: %d\n", netdev->irq, err);
return err;
}
@@ -955,7 +953,7 @@ static int ks_net_stop(struct net_device *netdev)
/* set powermode to soft power down to save power */
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
- free_irq(ks->irq, netdev);
+ free_irq(netdev->irq, netdev);
mutex_unlock(&ks->lock);
return 0;
}
@@ -1500,8 +1498,7 @@ static int ks_hw_init(struct ks_net *ks)
ks->all_mcast = 0;
ks->mcast_lst_size = 0;
- ks->frame_head_info = (struct type_frame_head *) \
- kmalloc(MHEADER_SIZE, GFP_KERNEL);
+ ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
if (!ks->frame_head_info) {
pr_err("Error: Fail to allocate frame memory\n");
return false;
@@ -1546,10 +1543,10 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
if (!ks->hw_addr_cmd)
goto err_ioremap1;
- ks->irq = platform_get_irq(pdev, 0);
+ netdev->irq = platform_get_irq(pdev, 0);
- if (ks->irq < 0) {
- err = ks->irq;
+ if ((int)netdev->irq < 0) {
+ err = netdev->irq;
goto err_get_irq;
}
@@ -1659,18 +1656,7 @@ static struct platform_driver ks8851_platform_driver = {
.remove = __devexit_p(ks8851_remove),
};
-static int __init ks8851_init(void)
-{
- return platform_driver_register(&ks8851_platform_driver);
-}
-
-static void __exit ks8851_exit(void)
-{
- platform_driver_unregister(&ks8851_platform_driver);
-}
-
-module_init(ks8851_init);
-module_exit(ks8851_exit);
+module_platform_driver(ks8851_platform_driver);
MODULE_DESCRIPTION("KS8851 MLL Network driver");
MODULE_AUTHOR("David Choi <david.choi@micrel.com>");
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 7ece990..e52cd31 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -743,11 +743,10 @@
/* Change default LED mode. */
#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
-#define MAC_ADDR_LEN 6
-#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+#define MAC_ADDR_ORDER(i) (ETH_ALEN - 1 - (i))
#define MAX_ETHERNET_BODY_SIZE 1500
-#define ETHERNET_HEADER_SIZE 14
+#define ETHERNET_HEADER_SIZE (14 + VLAN_HLEN)
#define MAX_ETHERNET_PACKET_SIZE \
(MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
@@ -1043,7 +1042,7 @@ enum {
* @valid: Valid setting indicating the entry is being used.
*/
struct ksz_mac_table {
- u8 mac_addr[MAC_ADDR_LEN];
+ u8 mac_addr[ETH_ALEN];
u16 vid;
u8 fid;
u8 ports;
@@ -1187,8 +1186,8 @@ struct ksz_switch {
u8 diffserv[DIFFSERV_ENTRIES];
u8 p_802_1p[PRIO_802_1P_ENTRIES];
- u8 br_addr[MAC_ADDR_LEN];
- u8 other_addr[MAC_ADDR_LEN];
+ u8 br_addr[ETH_ALEN];
+ u8 other_addr[ETH_ALEN];
u8 broad_per;
u8 member;
@@ -1292,14 +1291,14 @@ struct ksz_hw {
int tx_int_mask;
int tx_size;
- u8 perm_addr[MAC_ADDR_LEN];
- u8 override_addr[MAC_ADDR_LEN];
- u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 perm_addr[ETH_ALEN];
+ u8 override_addr[ETH_ALEN];
+ u8 address[ADDITIONAL_ENTRIES][ETH_ALEN];
u8 addr_list_size;
u8 mac_override;
u8 promiscuous;
u8 all_multi;
- u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_list[MAX_MULTICAST_LIST][ETH_ALEN];
u8 multi_bits[HW_MULTICAST_SIZE];
u8 multi_list_size;
@@ -3654,7 +3653,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
static const u8 mask[] = { 0x3F };
static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
- hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+ hw_set_wol_frame(hw, 2, 1, mask, ETH_ALEN, pattern);
}
/**
@@ -3689,7 +3688,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
{
static const u8 mask[] = { 0x3F };
- hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+ hw_set_wol_frame(hw, 0, 1, mask, ETH_ALEN, hw->override_addr);
}
/**
@@ -4055,7 +4054,7 @@ static void hw_set_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
hw->io + KS884X_ADDR_0_OFFSET + i);
@@ -4072,17 +4071,16 @@ static void hw_read_addr(struct ksz_hw *hw)
{
int i;
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
KS884X_ADDR_0_OFFSET + i);
if (!hw->mac_override) {
- memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, hw->perm_addr, ETH_ALEN);
if (empty_addr(hw->override_addr)) {
- memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS, ETH_ALEN);
memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
- MAC_ADDR_LEN);
+ ETH_ALEN);
hw->override_addr[5] += hw->id;
hw_set_addr(hw);
}
@@ -4130,16 +4128,16 @@ static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
int j = ADDITIONAL_ENTRIES;
- if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->override_addr, mac_addr, ETH_ALEN))
return 0;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN))
return 0;
if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
j = i;
}
if (j < ADDITIONAL_ENTRIES) {
- memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ memcpy(hw->address[j], mac_addr, ETH_ALEN);
hw_ena_add_addr(hw, j, hw->address[j]);
return 0;
}
@@ -4151,8 +4149,8 @@ static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
int i;
for (i = 0; i < hw->addr_list_size; i++) {
- if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
- memset(hw->address[i], 0, MAC_ADDR_LEN);
+ if (!memcmp(hw->address[i], mac_addr, ETH_ALEN)) {
+ memset(hw->address[i], 0, ETH_ALEN);
writel(0, hw->io + ADD_ADDR_INCR * i +
KS_ADD_ADDR_0_HI);
return 0;
@@ -4382,12 +4380,10 @@ static void ksz_update_timer(struct ksz_timer_info *info)
*/
static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
{
- desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
- GFP_KERNEL);
+ desc_info->ring = kzalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
if (!desc_info->ring)
return 1;
- memset((void *) desc_info->ring, 0,
- sizeof(struct ksz_desc) * desc_info->alloc);
hw_init_desc(desc_info, transmit);
return 0;
}
@@ -5676,7 +5672,7 @@ static int netdev_set_mac_address(struct net_device *dev, void *addr)
hw_del_addr(hw, dev->dev_addr);
else {
hw->mac_override = 1;
- memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ memcpy(hw->override_addr, mac->sa_data, ETH_ALEN);
}
memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
@@ -5786,7 +5782,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
netdev_for_each_mc_addr(ha, dev) {
if (i >= MAX_MULTICAST_LIST)
break;
- memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
+ memcpy(hw->multi_list[i++], ha->addr, ETH_ALEN);
}
hw->multi_list_size = (u8) i;
hw_set_grp_addr(hw);
@@ -6093,9 +6089,10 @@ static void netdev_get_drvinfo(struct net_device *dev,
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(hw_priv->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(hw_priv->pdev),
+ sizeof(info->bus_info));
}
/**
@@ -6587,7 +6584,8 @@ static void netdev_get_ethtool_stats(struct net_device *dev,
*
* Return 0 if successful; otherwise an error code.
*/
-static int netdev_set_features(struct net_device *dev, u32 features)
+static int netdev_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct dev_priv *priv = netdev_priv(dev);
struct dev_info *hw_priv = priv->adapter;
@@ -6609,7 +6607,7 @@ static int netdev_set_features(struct net_device *dev, u32 features)
return 0;
}
-static struct ethtool_ops netdev_ethtool_ops = {
+static const struct ethtool_ops netdev_ethtool_ops = {
.get_settings = netdev_get_settings,
.set_settings = netdev_set_settings,
.nway_reset = netdev_nway_reset,
@@ -6860,7 +6858,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
int num;
i = j = num = got_num = 0;
- while (j < MAC_ADDR_LEN) {
+ while (j < ETH_ALEN) {
if (macaddr[i]) {
int digit;
@@ -6891,7 +6889,7 @@ static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
}
i++;
}
- if (MAC_ADDR_LEN == j) {
+ if (ETH_ALEN == j) {
if (MAIN_PORT == port)
hw_priv->hw.mac_override = 1;
}
@@ -7058,7 +7056,7 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
/* Multiple device interfaces mode requires a second MAC address. */
if (hw->dev_count > 1) {
- memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ memcpy(sw->other_addr, hw->override_addr, ETH_ALEN);
read_other_addr(hw);
if (mac1addr[0] != ':')
get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
@@ -7108,12 +7106,11 @@ static int __devinit pcidev_init(struct pci_dev *pdev,
dev->irq = pdev->irq;
if (MAIN_PORT == i)
memcpy(dev->dev_addr, hw_priv->hw.override_addr,
- MAC_ADDR_LEN);
+ ETH_ALEN);
else {
- memcpy(dev->dev_addr, sw->other_addr,
- MAC_ADDR_LEN);
+ memcpy(dev->dev_addr, sw->other_addr, ETH_ALEN);
if (!memcmp(sw->other_addr, hw->override_addr,
- MAC_ADDR_LEN))
+ ETH_ALEN))
dev->dev_addr[5] += port->first_port;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 0778edc..20b72ec 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1491,7 +1491,7 @@ myri10ge_clean_rx_done(struct myri10ge_slice_state *ss, int budget)
* access to avoid theoretical race condition with functions that
* change NETIF_F_LRO flag at runtime.
*/
- bool lro_enabled = ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO;
+ bool lro_enabled = !!(ACCESS_ONCE(mgp->dev->features) & NETIF_F_LRO);
while (rx_done->entry[idx].length != 0 && work_done < budget) {
length = ntohs(rx_done->entry[idx].length);
@@ -3149,7 +3149,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-static u32 myri10ge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t myri10ge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM))
features &= ~NETIF_F_LRO;
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
index 11be150..b7fc26c 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge_mcp.h
@@ -356,7 +356,7 @@ enum myri10ge_mcp_cmd_type {
MXGEFW_CMD_GET_DCA_OFFSET = 56,
/* offset of dca control for WDMAs */
- /* VMWare NetQueue commands */
+ /* VMware NetQueue commands */
MXGEFW_CMD_NETQ_GET_FILTERS_PER_QUEUE = 57,
MXGEFW_CMD_NETQ_ADD_FILTER = 58,
/* data0 = filter_id << 16 | queue << 8 | type */
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index fc7c6a9..5b89fd3 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -294,15 +294,4 @@ static struct platform_driver jazz_sonic_driver = {
},
};
-static int __init jazz_sonic_init_module(void)
-{
- return platform_driver_register(&jazz_sonic_driver);
-}
-
-static void __exit jazz_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&jazz_sonic_driver);
-}
-
-module_init(jazz_sonic_init_module);
-module_exit(jazz_sonic_cleanup_module);
+module_platform_driver(jazz_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index a2eacbf..f1b8556 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -142,8 +142,7 @@ static int macsonic_open(struct net_device* dev)
{
int retval;
- retval = request_irq(dev->irq, sonic_interrupt, IRQ_FLG_FAST,
- "sonic", dev);
+ retval = request_irq(dev->irq, sonic_interrupt, 0, "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, dev->irq);
@@ -154,8 +153,8 @@ static int macsonic_open(struct net_device* dev)
* rupt as well, which must prevent re-entrance of the sonic handler.
*/
if (dev->irq == IRQ_AUTO_3) {
- retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt,
- IRQ_FLG_FAST, "sonic", dev);
+ retval = request_irq(IRQ_NUBUS_9, macsonic_interrupt, 0,
+ "sonic", dev);
if (retval) {
printk(KERN_ERR "%s: unable to get IRQ %d.\n",
dev->name, IRQ_NUBUS_9);
@@ -643,15 +642,4 @@ static struct platform_driver mac_sonic_driver = {
},
};
-static int __init mac_sonic_init_module(void)
-{
- return platform_driver_register(&mac_sonic_driver);
-}
-
-static void __exit mac_sonic_cleanup_module(void)
-{
- platform_driver_unregister(&mac_sonic_driver);
-}
-
-module_init(mac_sonic_init_module);
-module_exit(mac_sonic_cleanup_module);
+module_platform_driver(mac_sonic_driver);
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 6ca047a..ac7b16b 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2555,9 +2555,9 @@ static void set_rx_mode(struct net_device *dev)
static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct netdev_private *np = netdev_priv(dev);
- strncpy(info->driver, DRV_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_VERSION, ETHTOOL_BUSINFO_LEN);
- strncpy(info->bus_info, pci_name(np->pci_dev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index 2b8f64d..c24b46c 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -1364,9 +1364,9 @@ static int ns83820_set_settings(struct net_device *ndev,
static void ns83820_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *info)
{
struct ns83820 *dev = PRIV(ndev);
- strcpy(info->driver, "ns83820");
- strcpy(info->version, VERSION);
- strcpy(info->bus_info, pci_name(dev->pci_dev));
+ strlcpy(info->driver, "ns83820", sizeof(info->driver));
+ strlcpy(info->version, VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(dev->pci_dev), sizeof(info->bus_info));
}
static u32 ns83820_get_link(struct net_device *ndev)
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index ccf61b9..e01c0a0 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -319,15 +319,4 @@ static struct platform_driver xtsonic_driver = {
},
};
-static int __init xtsonic_init(void)
-{
- return platform_driver_register(&xtsonic_driver);
-}
-
-static void __exit xtsonic_cleanup(void)
-{
- platform_driver_unregister(&xtsonic_driver);
-}
-
-module_init(xtsonic_init);
-module_exit(xtsonic_cleanup);
+module_platform_driver(xtsonic_driver);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index c27fb3d..97f63e1 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5391,10 +5391,9 @@ static void s2io_ethtool_gdrvinfo(struct net_device *dev,
{
struct s2io_nic *sp = netdev_priv(dev);
- strncpy(info->driver, s2io_driver_name, sizeof(info->driver));
- strncpy(info->version, s2io_driver_version, sizeof(info->version));
- strncpy(info->fw_version, "", sizeof(info->fw_version));
- strncpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
+ strlcpy(info->driver, s2io_driver_name, sizeof(info->driver));
+ strlcpy(info->version, s2io_driver_version, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sp->pdev), sizeof(info->bus_info));
info->regdump_len = XENA_REG_SPACE;
info->eedump_len = XENA_EEPROM_SPACE;
}
@@ -6616,10 +6615,10 @@ static void s2io_ethtool_get_strings(struct net_device *dev,
}
}
-static int s2io_set_features(struct net_device *dev, u32 features)
+static int s2io_set_features(struct net_device *dev, netdev_features_t features)
{
struct s2io_nic *sp = netdev_priv(dev);
- u32 changed = (features ^ dev->features) & NETIF_F_LRO;
+ netdev_features_t changed = (features ^ dev->features) & NETIF_F_LRO;
if (changed && netif_running(dev)) {
int rc;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index a83197d..ef76725 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2662,9 +2662,10 @@ static void vxge_poll_vp_lockup(unsigned long data)
mod_timer(&vdev->vp_lockup_timer, jiffies + HZ / 1000);
}
-static u32 vxge_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t vxge_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
/* Enabling RTH requires some of the logic in vxge_device_register and a
* vpath reset. Due to these restrictions, only allow modification
@@ -2676,10 +2677,10 @@ static u32 vxge_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int vxge_set_features(struct net_device *dev, u32 features)
+static int vxge_set_features(struct net_device *dev, netdev_features_t features)
{
struct vxgedev *vdev = netdev_priv(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
if (!(changed & NETIF_F_RXHASH))
return 0;
@@ -3304,7 +3305,7 @@ static void vxge_tx_watchdog(struct net_device *dev)
*
* Add the vlan id to the devices vlan id table
*/
-static void
+static int
vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3319,6 +3320,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
vxge_hw_vpath_vid_add(vpath->handle, vid);
}
set_bit(vid, vdev->active_vlans);
+ return 0;
}
/**
@@ -3328,7 +3330,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
*
* Remove the vlan id from the device's vlan id table
*/
-static void
+static int
vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct vxgedev *vdev = netdev_priv(dev);
@@ -3347,6 +3349,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...", __func__, __LINE__);
clear_bit(vid, vdev->active_vlans);
+ return 0;
}
static const struct net_device_ops vxge_netdev_ops = {
diff --git a/drivers/net/ethernet/nuvoton/w90p910_ether.c b/drivers/net/ethernet/nuvoton/w90p910_ether.c
index f1bfb8f..b75a049 100644
--- a/drivers/net/ethernet/nuvoton/w90p910_ether.c
+++ b/drivers/net/ethernet/nuvoton/w90p910_ether.c
@@ -1103,18 +1103,7 @@ static struct platform_driver w90p910_ether_driver = {
},
};
-static int __init w90p910_ether_init(void)
-{
- return platform_driver_register(&w90p910_ether_driver);
-}
-
-static void __exit w90p910_ether_exit(void)
-{
- platform_driver_unregister(&w90p910_ether_driver);
-}
-
-module_init(w90p910_ether_init);
-module_exit(w90p910_ether_exit);
+module_platform_driver(w90p910_ether_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 MAC driver!");
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 1c61d36..4c4e7f4 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -65,7 +65,8 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/prefetch.h>
-#include <linux/io.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/io.h>
#include <asm/irq.h>
#include <asm/system.h>
@@ -736,6 +737,16 @@ struct nv_skb_map {
* - tx setup is lockless: it relies on netif_tx_lock. Actual submission
* needs netdev_priv(dev)->lock :-(
* - set_multicast_list: preparation lockless, relies on netif_tx_lock.
+ *
+ * Hardware stats updates are protected by hwstats_lock:
+ * - updated by nv_do_stats_poll (timer). This is meant to avoid
+ * integer wraparound in the NIC stats registers, at low frequency
+ * (0.1 Hz)
+ * - updated by nv_get_ethtool_stats + nv_get_stats64
+ *
+ * Software stats are accessed only through 64b synchronization points
+ * and are not subject to other synchronization techniques (single
+ * update thread on the TX or RX paths).
*/
/* in dev: base, irq */
@@ -745,9 +756,10 @@ struct fe_priv {
struct net_device *dev;
struct napi_struct napi;
- /* General data:
- * Locking: spin_lock(&np->lock); */
+ /* hardware stats are updated in syscall and timer */
+ spinlock_t hwstats_lock;
struct nv_ethtool_stats estats;
+
int in_shutdown;
u32 linkspeed;
int duplex;
@@ -798,6 +810,13 @@ struct fe_priv {
u32 nic_poll_irq;
int rx_ring_size;
+ /* RX software stats */
+ struct u64_stats_sync swstats_rx_syncp;
+ u64 stat_rx_packets;
+ u64 stat_rx_bytes; /* not always available in HW */
+ u64 stat_rx_missed_errors;
+ u64 stat_rx_dropped;
+
/* media detection workaround.
* Locking: Within irq hander or disable_irq+spin_lock(&np->lock);
*/
@@ -820,6 +839,12 @@ struct fe_priv {
struct nv_skb_map *tx_end_flip;
int tx_stop;
+ /* TX software stats */
+ struct u64_stats_sync swstats_tx_syncp;
+ u64 stat_tx_packets; /* not always available in HW */
+ u64 stat_tx_bytes;
+ u64 stat_tx_dropped;
+
/* msi/msi-x fields */
u32 msi_flags;
struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
@@ -892,6 +917,11 @@ enum {
static int dma_64bit = NV_DMA_64BIT_ENABLED;
/*
+ * Debug output control for tx_timeout
+ */
+static bool debug_tx_timeout = false;
+
+/*
* Crossover Detection
* Realtek 8201 phy + some OEM boards do not work properly.
*/
@@ -1630,11 +1660,19 @@ static void nv_mac_reset(struct net_device *dev)
pci_push(base);
}
-static void nv_get_hw_stats(struct net_device *dev)
+/* Caller must appropriately lock netdev_priv(dev)->hwstats_lock */
+static void nv_update_stats(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
+ /* If it happens that this is run in top-half context, then
+ * replace the spin_lock of hwstats_lock with
+ * spin_lock_irqsave() in calling functions. */
+ WARN_ONCE(in_irq(), "forcedeth: estats spin_lock(_bh) from top-half");
+ assert_spin_locked(&np->hwstats_lock);
+
+ /* query hardware */
np->estats.tx_bytes += readl(base + NvRegTxCnt);
np->estats.tx_zero_rexmt += readl(base + NvRegTxZeroReXmt);
np->estats.tx_one_rexmt += readl(base + NvRegTxOneReXmt);
@@ -1693,40 +1731,73 @@ static void nv_get_hw_stats(struct net_device *dev)
}
/*
- * nv_get_stats: dev->get_stats function
+ * nv_get_stats64: dev->ndo_get_stats64 function
* Get latest stats value from the nic.
* Called with read_lock(&dev_base_lock) held for read -
* only synchronized against unregister_netdevice.
*/
-static struct net_device_stats *nv_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64*
+nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
+ unsigned int syncp_start;
+
+ /*
+ * Note: because HW stats are not always available and for
+ * consistency reasons, the following ifconfig stats are
+ * managed by software: rx_bytes, tx_bytes, rx_packets and
+ * tx_packets. The related hardware stats reported by ethtool
+ * should be equivalent to these ifconfig stats, with 4
+ * additional bytes per packet (Ethernet FCS CRC), except for
+ * tx_packets when TSO kicks in.
+ */
+
+ /* software stats */
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_rx_syncp);
+ storage->rx_packets = np->stat_rx_packets;
+ storage->rx_bytes = np->stat_rx_bytes;
+ storage->rx_dropped = np->stat_rx_dropped;
+ storage->rx_missed_errors = np->stat_rx_missed_errors;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_rx_syncp, syncp_start));
+
+ do {
+ syncp_start = u64_stats_fetch_begin_bh(&np->swstats_tx_syncp);
+ storage->tx_packets = np->stat_tx_packets;
+ storage->tx_bytes = np->stat_tx_bytes;
+ storage->tx_dropped = np->stat_tx_dropped;
+ } while (u64_stats_fetch_retry_bh(&np->swstats_tx_syncp, syncp_start));
/* If the nic supports hw counters then retrieve latest values */
- if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2|DEV_HAS_STATISTICS_V3)) {
- nv_get_hw_stats(dev);
+ if (np->driver_data & DEV_HAS_STATISTICS_V123) {
+ spin_lock_bh(&np->hwstats_lock);
- /*
- * Note: because HW stats are not always available and
- * for consistency reasons, the following ifconfig
- * stats are managed by software: rx_bytes, tx_bytes,
- * rx_packets and tx_packets. The related hardware
- * stats reported by ethtool should be equivalent to
- * these ifconfig stats, with 4 additional bytes per
- * packet (Ethernet FCS CRC).
- */
+ nv_update_stats(dev);
+
+ /* generic stats */
+ storage->rx_errors = np->estats.rx_errors_total;
+ storage->tx_errors = np->estats.tx_errors_total;
- /* copy to net_device stats */
- dev->stats.tx_fifo_errors = np->estats.tx_fifo_errors;
- dev->stats.tx_carrier_errors = np->estats.tx_carrier_errors;
- dev->stats.rx_crc_errors = np->estats.rx_crc_errors;
- dev->stats.rx_over_errors = np->estats.rx_over_errors;
- dev->stats.rx_fifo_errors = np->estats.rx_drop_frame;
- dev->stats.rx_errors = np->estats.rx_errors_total;
- dev->stats.tx_errors = np->estats.tx_errors_total;
+ /* meaningful only when NIC supports stats v3 */
+ storage->multicast = np->estats.rx_multicast;
+
+ /* detailed rx_errors */
+ storage->rx_length_errors = np->estats.rx_length_error;
+ storage->rx_over_errors = np->estats.rx_over_errors;
+ storage->rx_crc_errors = np->estats.rx_crc_errors;
+ storage->rx_frame_errors = np->estats.rx_frame_align_error;
+ storage->rx_fifo_errors = np->estats.rx_drop_frame;
+
+ /* detailed tx_errors */
+ storage->tx_carrier_errors = np->estats.tx_carrier_errors;
+ storage->tx_fifo_errors = np->estats.tx_fifo_errors;
+
+ spin_unlock_bh(&np->hwstats_lock);
}
- return &dev->stats;
+ return storage;
}
/*
@@ -1759,8 +1830,12 @@ static int nv_alloc_rx(struct net_device *dev)
np->put_rx.orig = np->first_rx.orig;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1791,8 +1866,12 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
np->put_rx.ex = np->first_rx.ex;
if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
np->put_rx_ctx = np->first_rx_ctx;
- } else
+ } else {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_dropped++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
return 1;
+ }
}
return 0;
}
@@ -1849,6 +1928,7 @@ static void nv_init_tx(struct net_device *dev)
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
np->get_tx_ctx = np->put_tx_ctx = np->first_tx_ctx = np->tx_skb;
np->last_tx_ctx = &np->tx_skb[np->tx_ring_size-1];
+ netdev_reset_queue(np->dev);
np->tx_pkts_in_progress = 0;
np->tx_change_owner = NULL;
np->tx_end_flip = NULL;
@@ -1927,8 +2007,11 @@ static void nv_drain_tx(struct net_device *dev)
np->tx_ring.ex[i].bufhigh = 0;
np->tx_ring.ex[i].buflow = 0;
}
- if (nv_release_txskb(np, &np->tx_skb[i]))
- dev->stats.tx_dropped++;
+ if (nv_release_txskb(np, &np->tx_skb[i])) {
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_dropped++;
+ u64_stats_update_end(&np->swstats_tx_syncp);
+ }
np->tx_skb[i].dma = 0;
np->tx_skb[i].dma_len = 0;
np->tx_skb[i].dma_single = 0;
@@ -2194,6 +2277,9 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.orig = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2338,6 +2424,9 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
/* set tx flags */
start_tx->flaglen |= cpu_to_le32(tx_flags | tx_flags_extra);
+
+ netdev_sent_queue(np->dev, skb->len);
+
np->put_tx.ex = put_tx;
spin_unlock_irqrestore(&np->lock, flags);
@@ -2375,6 +2464,7 @@ static int nv_tx_done(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc *orig_get_tx = np->get_tx.orig;
+ unsigned int bytes_compl = 0;
while ((np->get_tx.orig != np->put_tx.orig) &&
!((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
@@ -2385,12 +2475,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (np->desc_ver == DESC_VER_1) {
if (flags & NV_TX_LASTPACKET) {
if (flags & NV_TX_ERROR) {
- if ((flags & NV_TX_RETRYERROR) && !(flags & NV_TX_RETRYCOUNT_MASK))
+ if ((flags & NV_TX_RETRYERROR)
+ && !(flags & NV_TX_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2398,12 +2492,16 @@ static int nv_tx_done(struct net_device *dev, int limit)
} else {
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK))
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK))
nv_legacybackoff_reseed(dev);
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_compl += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2414,6 +2512,9 @@ static int nv_tx_done(struct net_device *dev, int limit)
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_compl);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.orig != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2427,6 +2528,7 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
u32 flags;
int tx_work = 0;
struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
+ unsigned long bytes_cleaned = 0;
while ((np->get_tx.ex != np->put_tx.ex) &&
!((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
@@ -2436,17 +2538,21 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (flags & NV_TX2_LASTPACKET) {
if (flags & NV_TX2_ERROR) {
- if ((flags & NV_TX2_RETRYERROR) && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
+ if ((flags & NV_TX2_RETRYERROR)
+ && !(flags & NV_TX2_RETRYCOUNT_MASK)) {
if (np->driver_data & DEV_HAS_GEAR_MODE)
nv_gear_backoff_reseed(dev);
else
nv_legacybackoff_reseed(dev);
}
} else {
- dev->stats.tx_packets++;
- dev->stats.tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_begin(&np->swstats_tx_syncp);
+ np->stat_tx_packets++;
+ np->stat_tx_bytes += np->get_tx_ctx->skb->len;
+ u64_stats_update_end(&np->swstats_tx_syncp);
}
+ bytes_cleaned += np->get_tx_ctx->skb->len;
dev_kfree_skb_any(np->get_tx_ctx->skb);
np->get_tx_ctx->skb = NULL;
tx_work++;
@@ -2454,11 +2560,15 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
if (np->tx_limit)
nv_tx_flip_ownership(dev);
}
+
if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
np->get_tx.ex = np->first_tx.ex;
if (unlikely(np->get_tx_ctx++ == np->last_tx_ctx))
np->get_tx_ctx = np->first_tx_ctx;
}
+
+ netdev_completed_queue(np->dev, tx_work, bytes_cleaned);
+
if (unlikely((np->tx_stop == 1) && (np->get_tx.ex != orig_get_tx))) {
np->tx_stop = 0;
netif_wake_queue(dev);
@@ -2477,56 +2587,64 @@ static void nv_tx_timeout(struct net_device *dev)
u32 status;
union ring_type put_tx;
int saved_tx_limit;
- int i;
if (np->msi_flags & NV_MSI_X_ENABLED)
status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
else
status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
- netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
+ netdev_warn(dev, "Got tx_timeout. irq status: %08x\n", status);
- netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
- netdev_info(dev, "Dumping tx registers\n");
- for (i = 0; i <= np->register_size; i += 32) {
- netdev_info(dev,
- "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
- i,
- readl(base + i + 0), readl(base + i + 4),
- readl(base + i + 8), readl(base + i + 12),
- readl(base + i + 16), readl(base + i + 20),
- readl(base + i + 24), readl(base + i + 28));
- }
- netdev_info(dev, "Dumping tx ring\n");
- for (i = 0; i < np->tx_ring_size; i += 4) {
- if (!nv_optimized(np)) {
- netdev_info(dev,
- "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
- i,
- le32_to_cpu(np->tx_ring.orig[i].buf),
- le32_to_cpu(np->tx_ring.orig[i].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+1].buf),
- le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+2].buf),
- le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
- le32_to_cpu(np->tx_ring.orig[i+3].buf),
- le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
- } else {
+ if (unlikely(debug_tx_timeout)) {
+ int i;
+
+ netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+ netdev_info(dev, "Dumping tx registers\n");
+ for (i = 0; i <= np->register_size; i += 32) {
netdev_info(dev,
- "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+ "%3x: %08x %08x %08x %08x "
+ "%08x %08x %08x %08x\n",
i,
- le32_to_cpu(np->tx_ring.ex[i].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i].buflow),
- le32_to_cpu(np->tx_ring.ex[i].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+1].buflow),
- le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+2].buflow),
- le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
- le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
- le32_to_cpu(np->tx_ring.ex[i+3].buflow),
- le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ readl(base + i + 0), readl(base + i + 4),
+ readl(base + i + 8), readl(base + i + 12),
+ readl(base + i + 16), readl(base + i + 20),
+ readl(base + i + 24), readl(base + i + 28));
+ }
+ netdev_info(dev, "Dumping tx ring\n");
+ for (i = 0; i < np->tx_ring_size; i += 4) {
+ if (!nv_optimized(np)) {
+ netdev_info(dev,
+ "%03x: %08x %08x // %08x %08x "
+ "// %08x %08x // %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.orig[i].buf),
+ le32_to_cpu(np->tx_ring.orig[i].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+1].buf),
+ le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+2].buf),
+ le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.orig[i+3].buf),
+ le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+ } else {
+ netdev_info(dev,
+ "%03x: %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x "
+ "// %08x %08x %08x\n",
+ i,
+ le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i].buflow),
+ le32_to_cpu(np->tx_ring.ex[i].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+ le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+ le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+ le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
+ }
}
}
@@ -2649,8 +2767,11 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* the rest are hard errors */
else {
- if (flags & NV_RX_MISSEDFRAME)
- dev->stats.rx_missed_errors++;
+ if (flags & NV_RX_MISSEDFRAME) {
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_missed_errors++;
+ u64_stats_update_end(&np->swstats_rx_syncp);
+ }
dev_kfree_skb(skb);
goto next_pkt;
}
@@ -2693,8 +2814,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, dev);
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
next_pkt:
if (unlikely(np->get_rx.orig++ == np->last_rx.orig))
np->get_rx.orig = np->first_rx.orig;
@@ -2777,8 +2900,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
__vlan_hwaccel_put_tag(skb, vid);
}
napi_gro_receive(&np->napi, skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += len;
+ u64_stats_update_begin(&np->swstats_rx_syncp);
+ np->stat_rx_packets++;
+ np->stat_rx_bytes += len;
+ u64_stats_update_end(&np->swstats_rx_syncp);
} else {
dev_kfree_skb(skb);
}
@@ -3021,6 +3146,73 @@ static void nv_update_pause(struct net_device *dev, u32 pause_flags)
}
}
+static void nv_force_linkspeed(struct net_device *dev, int speed, int duplex)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ u8 __iomem *base = get_hwbase(dev);
+ u32 phyreg, txreg;
+ int mii_status;
+
+ np->linkspeed = NVREG_LINKSPEED_FORCE|speed;
+ np->duplex = duplex;
+
+ /* see if gigabit phy */
+ mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
+ if (mii_status & PHY_GIGABIT) {
+ np->gigabit = PHY_GIGABIT;
+ phyreg = readl(base + NvRegSlotTime);
+ phyreg &= ~(0x3FF00);
+ if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_10)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_100)
+ phyreg |= NVREG_SLOTTIME_10_100_FULL;
+ else if ((np->linkspeed & 0xFFF) == NVREG_LINKSPEED_1000)
+ phyreg |= NVREG_SLOTTIME_1000_FULL;
+ writel(phyreg, base + NvRegSlotTime);
+ }
+
+ phyreg = readl(base + NvRegPhyInterface);
+ phyreg &= ~(PHY_HALF|PHY_100|PHY_1000);
+ if (np->duplex == 0)
+ phyreg |= PHY_HALF;
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) == NVREG_LINKSPEED_100)
+ phyreg |= PHY_100;
+ else if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ phyreg |= PHY_1000;
+ writel(phyreg, base + NvRegPhyInterface);
+
+ if (phyreg & PHY_RGMII) {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_DEFERRAL_RGMII_1000;
+ else
+ txreg = NVREG_TX_DEFERRAL_RGMII_10_100;
+ } else {
+ txreg = NVREG_TX_DEFERRAL_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxDeferral);
+
+ if (np->desc_ver == DESC_VER_1) {
+ txreg = NVREG_TX_WM_DESC1_DEFAULT;
+ } else {
+ if ((np->linkspeed & NVREG_LINKSPEED_MASK) ==
+ NVREG_LINKSPEED_1000)
+ txreg = NVREG_TX_WM_DESC2_3_1000;
+ else
+ txreg = NVREG_TX_WM_DESC2_3_DEFAULT;
+ }
+ writel(txreg, base + NvRegTxWatermark);
+
+ writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
+ base + NvRegMisc1);
+ pci_push(base);
+ writel(np->linkspeed, base + NvRegLinkSpeed);
+ pci_push(base);
+
+ return;
+}
+
/**
* nv_update_linkspeed: Setup the MAC according to the link partner
* @dev: Network device to be configured
@@ -3042,11 +3234,25 @@ static int nv_update_linkspeed(struct net_device *dev)
int newls = np->linkspeed;
int newdup = np->duplex;
int mii_status;
+ u32 bmcr;
int retval = 0;
u32 control_1000, status_1000, phyreg, pause_flags, txreg;
u32 txrxFlags = 0;
u32 phy_exp;
+ /* If device loopback is enabled, set carrier on and enable max link
+ * speed.
+ */
+ bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (bmcr & BMCR_LOOPBACK) {
+ if (netif_running(dev)) {
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000, 1);
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ }
+ return 1;
+ }
+
/* BMSR_LSTATUS is latched, read it twice:
* we want the current value.
*/
@@ -3729,6 +3935,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap0);
writel(0, base + NvRegMSIXMap1);
}
+ netdev_info(dev, "MSI-X enabled\n");
}
}
if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
@@ -3750,6 +3957,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIMap1);
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
+ netdev_info(dev, "MSI enabled\n");
}
}
if (ret != 0) {
@@ -3904,11 +4112,18 @@ static void nv_poll_controller(struct net_device *dev)
#endif
static void nv_do_stats_poll(unsigned long data)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct net_device *dev = (struct net_device *) data;
struct fe_priv *np = netdev_priv(dev);
- nv_get_hw_stats(dev);
+ /* If lock is currently taken, the stats are being refreshed
+ * and hence fresh enough */
+ if (spin_trylock(&np->hwstats_lock)) {
+ nv_update_stats(dev);
+ spin_unlock(&np->hwstats_lock);
+ }
if (!np->in_shutdown)
mod_timer(&np->stats_poll,
@@ -3918,9 +4133,9 @@ static void nv_do_stats_poll(unsigned long data)
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct fe_priv *np = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, FORCEDETH_VERSION);
- strcpy(info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, FORCEDETH_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
@@ -4473,7 +4688,63 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
return 0;
}
-static u32 nv_fix_features(struct net_device *dev, u32 features)
+static int nv_set_loopback(struct net_device *dev, netdev_features_t features)
+{
+ struct fe_priv *np = netdev_priv(dev);
+ unsigned long flags;
+ u32 miicontrol;
+ int err, retval = 0;
+
+ spin_lock_irqsave(&np->lock, flags);
+ miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
+ if (features & NETIF_F_LOOPBACK) {
+ if (miicontrol & BMCR_LOOPBACK) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already enabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn on loopback mode */
+ miicontrol |= BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
+ err = mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol);
+ if (err) {
+ retval = PHY_ERROR;
+ spin_unlock_irqrestore(&np->lock, flags);
+ phy_init(dev);
+ } else {
+ if (netif_running(dev)) {
+ /* Force 1000 Mbps full-duplex */
+ nv_force_linkspeed(dev, NVREG_LINKSPEED_1000,
+ 1);
+ /* Force link up */
+ netif_carrier_on(dev);
+ }
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev,
+ "Internal PHY loopback mode enabled.\n");
+ }
+ } else {
+ if (!(miicontrol & BMCR_LOOPBACK)) {
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Loopback already disabled\n");
+ return 0;
+ }
+ nv_disable_irq(dev);
+ /* Turn off loopback */
+ spin_unlock_irqrestore(&np->lock, flags);
+ netdev_info(dev, "Internal PHY loopback mode disabled.\n");
+ phy_init(dev);
+ }
+ msleep(500);
+ spin_lock_irqsave(&np->lock, flags);
+ nv_enable_irq(dev);
+ spin_unlock_irqrestore(&np->lock, flags);
+
+ return retval;
+}
+
+static netdev_features_t nv_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
/* vlan is dependent on rx checksum offload */
if (features & (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX))
@@ -4482,7 +4753,7 @@ static u32 nv_fix_features(struct net_device *dev, u32 features)
return features;
}
-static void nv_vlan_mode(struct net_device *dev, u32 features)
+static void nv_vlan_mode(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = get_nvpriv(dev);
@@ -4503,11 +4774,18 @@ static void nv_vlan_mode(struct net_device *dev, u32 features)
spin_unlock_irq(&np->lock);
}
-static int nv_set_features(struct net_device *dev, u32 features)
+static int nv_set_features(struct net_device *dev, netdev_features_t features)
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- u32 changed = dev->features ^ features;
+ netdev_features_t changed = dev->features ^ features;
+ int retval;
+
+ if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) {
+ retval = nv_set_loopback(dev, features);
+ if (retval != 0)
+ return retval;
+ }
if (changed & NETIF_F_RXCSUM) {
spin_lock_irq(&np->lock);
@@ -4553,14 +4831,18 @@ static int nv_get_sset_count(struct net_device *dev, int sset)
}
}
-static void nv_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *buffer)
+static void nv_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *estats, u64 *buffer)
+ __acquires(&netdev_priv(dev)->hwstats_lock)
+ __releases(&netdev_priv(dev)->hwstats_lock)
{
struct fe_priv *np = netdev_priv(dev);
- /* update stats */
- nv_get_hw_stats(dev);
-
- memcpy(buffer, &np->estats, nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_lock_bh(&np->hwstats_lock);
+ nv_update_stats(dev);
+ memcpy(buffer, &np->estats,
+ nv_get_sset_count(dev, ETH_SS_STATS)*sizeof(u64));
+ spin_unlock_bh(&np->hwstats_lock);
}
static int nv_link_test(struct net_device *dev)
@@ -5142,6 +5424,12 @@ static int nv_open(struct net_device *dev)
spin_unlock_irq(&np->lock);
+ /* If the loopback feature was set while the device was down, make sure
+ * that it's set correctly now.
+ */
+ if (dev->features & NETIF_F_LOOPBACK)
+ nv_set_loopback(dev, dev->features);
+
return 0;
out_drain:
nv_drain_rxtx(dev);
@@ -5198,7 +5486,7 @@ static int nv_close(struct net_device *dev)
static const struct net_device_ops nv_netdev_ops = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5215,7 +5503,7 @@ static const struct net_device_ops nv_netdev_ops = {
static const struct net_device_ops nv_netdev_ops_optimized = {
.ndo_open = nv_open,
.ndo_stop = nv_close,
- .ndo_get_stats = nv_get_stats,
+ .ndo_get_stats64 = nv_get_stats64,
.ndo_start_xmit = nv_start_xmit_optimized,
.ndo_tx_timeout = nv_tx_timeout,
.ndo_change_mtu = nv_change_mtu,
@@ -5254,6 +5542,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->dev = dev;
np->pci_dev = pci_dev;
spin_lock_init(&np->lock);
+ spin_lock_init(&np->hwstats_lock);
SET_NETDEV_DEV(dev, &pci_dev->dev);
init_timer(&np->oom_kick);
@@ -5262,7 +5551,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
init_timer(&np->nic_poll);
np->nic_poll.data = (unsigned long) dev;
np->nic_poll.function = nv_do_nic_poll; /* timer handler */
- init_timer(&np->stats_poll);
+ init_timer_deferrable(&np->stats_poll);
np->stats_poll.data = (unsigned long) dev;
np->stats_poll.function = nv_do_stats_poll; /* timer handler */
@@ -5346,6 +5635,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->features |= dev->hw_features;
+ /* Add loopback capability to the device. */
+ dev->hw_features |= NETIF_F_LOOPBACK;
+
np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE | NV_PAUSEFRAME_RX_REQ | NV_PAUSEFRAME_AUTONEG;
if ((id->driver_data & DEV_HAS_PAUSEFRAME_TX_V1) ||
(id->driver_data & DEV_HAS_PAUSEFRAME_TX_V2) ||
@@ -5621,12 +5913,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
- dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+ dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
"csum " : "",
dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
"vlan " : "",
+ dev->features & (NETIF_F_LOOPBACK) ?
+ "loopback " : "",
id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
@@ -6000,6 +6294,9 @@ module_param(phy_cross, int, 0);
MODULE_PARM_DESC(phy_cross, "Phy crossover detection for Realtek 8201 phy is enabled by setting to 1 and disabled by setting to 0.");
module_param(phy_power_down, int, 0);
MODULE_PARM_DESC(phy_power_down, "Power down phy and disable link when interface is down (1), or leave phy powered up (0).");
+module_param(debug_tx_timeout, bool, 0);
+MODULE_PARM_DESC(debug_tx_timeout,
+ "Dump tx related registers and ring when tx_timeout happens");
MODULE_AUTHOR("Manfred Spraul <manfred@colorfullife.com>");
MODULE_DESCRIPTION("Reverse Engineered nForce ethernet driver");
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 212f43b..cd827ff 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -670,7 +670,7 @@ static void octeon_mgmt_adjust_link(struct net_device *netdev)
static int octeon_mgmt_init_phy(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- char phy_id[20];
+ char phy_id[MII_BUS_ID_SIZE + 3];
if (octeon_is_simulation()) {
/* No PHYs in the simulator. */
@@ -678,7 +678,7 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
return 0;
}
- snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "0", p->port);
+ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, "mdio-octeon-0", p->port);
p->phydev = phy_connect(netdev, phy_id, octeon_mgmt_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 8c80271..ac4e72d 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -161,10 +161,10 @@ static void pch_gbe_get_drvinfo(struct net_device *netdev,
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- strcpy(drvinfo->driver, KBUILD_MODNAME);
- strcpy(drvinfo->version, pch_driver_version);
- strcpy(drvinfo->fw_version, "N/A");
- strcpy(drvinfo->bus_info, pci_name(adapter->pdev));
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, pch_driver_version, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = pch_gbe_get_regs_len(netdev);
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index 48406ca..3ead111 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -1745,6 +1745,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
int err;
+ /* Ensure we have a valid MAC */
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ pr_err("Error: Invalid MAC address\n");
+ return -EINVAL;
+ }
+
/* hardware has been reset, we need to reload some things */
pch_gbe_set_multi(netdev);
@@ -2109,10 +2115,11 @@ static int pch_gbe_change_mtu(struct net_device *netdev, int new_mtu)
* Returns
* 0: HW state updated successfully
*/
-static int pch_gbe_set_features(struct net_device *netdev, u32 features)
+static int pch_gbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct pch_gbe_adapter *adapter = netdev_priv(netdev);
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (!(changed & NETIF_F_RXCSUM))
return 0;
@@ -2467,9 +2474,14 @@ static int pch_gbe_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->dev_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address\n");
- ret = -EIO;
- goto err_free_adapter;
+ /*
+ * If the MAC is invalid (or just missing), display a warning
+ * but do not abort setting up the device. pch_gbe_up will
+ * prevent the interface from being brought up until a valid MAC
+ * is set.
+ */
+ dev_err(&pdev->dev, "Invalid MAC address, "
+ "interface disabled.\n");
}
setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
(unsigned long)adapter);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
index 9cb5f91..29e23be 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
@@ -321,10 +321,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
hw->phy.autoneg_advertised = opt.def;
} else {
- hw->phy.autoneg_advertised = AutoNeg;
- pch_gbe_validate_option(
- (int *)(&hw->phy.autoneg_advertised),
- &opt, adapter);
+ int tmp = AutoNeg;
+
+ pch_gbe_validate_option(&tmp, &opt, adapter);
+ hw->phy.autoneg_advertised = tmp;
}
}
@@ -495,9 +495,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
.arg = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
.p = fc_list } }
};
- hw->mac.fc = FlowControl;
- pch_gbe_validate_option((int *)(&hw->mac.fc),
- &opt, adapter);
+ int tmp = FlowControl;
+
+ pch_gbe_validate_option(&tmp, &opt, adapter);
+ hw->mac.fc = tmp;
}
pch_gbe_check_copper_options(adapter);
diff --git a/drivers/net/ethernet/packetengines/Kconfig b/drivers/net/ethernet/packetengines/Kconfig
index b97132d..8f29feb 100644
--- a/drivers/net/ethernet/packetengines/Kconfig
+++ b/drivers/net/ethernet/packetengines/Kconfig
@@ -4,6 +4,7 @@
config NET_PACKET_ENGINE
bool "Packet Engine devices"
+ default y
depends on PCI
---help---
If you have a network (Ethernet) card belonging to this class, say Y
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
index e09ea83..8a37198 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c
@@ -83,14 +83,18 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
u32 fw_minor = 0;
u32 fw_build = 0;
- strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
- strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
+ strlcpy(drvinfo->driver, netxen_nic_driver_name,
+ sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
- strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = NETXEN_NIC_REGS_LEN;
drvinfo->eedump_len = netxen_nic_get_eeprom_len(dev);
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 8cf3173..7dd9a4b 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -544,7 +544,8 @@ static void netxen_set_multicast_list(struct net_device *dev)
adapter->set_multi(dev);
}
-static u32 netxen_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t netxen_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_RXCSUM)) {
netdev_info(dev, "disabling LRO as RXCSUM is off\n");
@@ -555,7 +556,8 @@ static u32 netxen_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int netxen_set_features(struct net_device *dev, u32 features)
+static int netxen_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netxen_adapter *adapter = netdev_priv(dev);
int hw_lro;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index a4bdff4..e61560e 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -1735,10 +1735,11 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql3_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, ql3xxx_driver_name, 32);
- strncpy(drvinfo->version, ql3xxx_driver_version, 32);
- strncpy(drvinfo->fw_version, "N/A", 32);
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->driver, ql3xxx_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, ql3xxx_driver_version,
+ sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->regdump_len = 0;
drvinfo->eedump_len = 0;
}
@@ -3016,7 +3017,6 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
(void __iomem *)port_regs;
u32 delay = 10;
int status = 0;
- unsigned long hw_flags = 0;
if (ql_mii_setup(qdev))
return -1;
@@ -3227,9 +3227,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
value = ql_read_page0_reg(qdev, &port_regs->portStatus);
if (value & PORT_STATUS_IC)
break;
- spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+ spin_unlock_irq(&qdev->hw_lock);
msleep(500);
- spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+ spin_lock_irq(&qdev->hw_lock);
} while (--delay);
if (delay == 0) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 7ed53db..60976fc 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1466,8 +1466,9 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features);
-int qlcnic_set_features(struct net_device *netdev, u32 features);
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features);
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features);
int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 8aa1c6e..cc228cf 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -140,11 +140,14 @@ qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
- sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
-
- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
- strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
- strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->driver, qlcnic_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID,
+ sizeof(drvinfo->version));
}
static int
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index bcb81e4..b528e52 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -817,12 +817,13 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
}
-u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
+netdev_features_t qlcnic_fix_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
if ((adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
}
@@ -833,10 +834,10 @@ u32 qlcnic_fix_features(struct net_device *netdev, u32 features)
}
-int qlcnic_set_features(struct net_device *netdev, u32 features)
+int qlcnic_set_features(struct net_device *netdev, netdev_features_t features)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- u32 changed = netdev->features ^ features;
+ netdev_features_t changed = netdev->features ^ features;
int hw_lro = (features & NETIF_F_LRO) ? QLCNIC_LRO_ENABLED : 0;
if (!(changed & NETIF_F_LRO))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0bd1638..69b8e4e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -97,8 +97,8 @@ static int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-static void qlcnic_vlan_rx_add(struct net_device *, u16);
-static void qlcnic_vlan_rx_del(struct net_device *, u16);
+static int qlcnic_vlan_rx_add(struct net_device *, u16);
+static int qlcnic_vlan_rx_del(struct net_device *, u16);
/* PCI Device ID Table */
#define ENTRY(device) \
@@ -735,20 +735,22 @@ qlcnic_set_vlan_config(struct qlcnic_adapter *adapter,
adapter->pvid = 0;
}
-static void
+static int
qlcnic_vlan_rx_add(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
set_bit(vid, adapter->vlans);
+ return 0;
}
-static void
+static int
qlcnic_vlan_rx_del(struct net_device *netdev, u16 vid)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
qlcnic_restore_indev_addr(netdev, NETDEV_DOWN);
clear_bit(vid, adapter->vlans);
+ return 0;
}
static void
@@ -792,7 +794,7 @@ qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg)
{
struct net_device *netdev = adapter->netdev;
- unsigned long features, vlan_features;
+ netdev_features_t features, vlan_features;
features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 9b67bfe..8e2c2a7 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -366,13 +366,16 @@ static void ql_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *drvinfo)
{
struct ql_adapter *qdev = netdev_priv(ndev);
- strncpy(drvinfo->driver, qlge_driver_name, 32);
- strncpy(drvinfo->version, qlge_driver_version, 32);
- snprintf(drvinfo->fw_version, 32, "v%d.%d.%d",
+ strlcpy(drvinfo->driver, qlge_driver_name, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, qlge_driver_version,
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "v%d.%d.%d",
(qdev->fw_rev_id & 0x00ff0000) >> 16,
(qdev->fw_rev_id & 0x0000ff00) >> 8,
(qdev->fw_rev_id & 0x000000ff));
- strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
+ strlcpy(drvinfo->bus_info, pci_name(qdev->pdev),
+ sizeof(drvinfo->bus_info));
drvinfo->n_stats = 0;
drvinfo->testinfo_len = 0;
if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index c92afcd..b548987 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2307,7 +2307,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
return work_done;
}
-static void qlge_vlan_mode(struct net_device *ndev, u32 features)
+static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
{
struct ql_adapter *qdev = netdev_priv(ndev);
@@ -2323,7 +2323,8 @@ static void qlge_vlan_mode(struct net_device *ndev, u32 features)
}
}
-static u32 qlge_fix_features(struct net_device *ndev, u32 features)
+static netdev_features_t qlge_fix_features(struct net_device *ndev,
+ netdev_features_t features)
{
/*
* Since there is no support for separate rx/tx vlan accel
@@ -2337,9 +2338,10 @@ static u32 qlge_fix_features(struct net_device *ndev, u32 features)
return features;
}
-static int qlge_set_features(struct net_device *ndev, u32 features)
+static int qlge_set_features(struct net_device *ndev,
+ netdev_features_t features)
{
- u32 changed = ndev->features ^ features;
+ netdev_features_t changed = ndev->features ^ features;
if (changed & NETIF_F_HW_VLAN_RX)
qlge_vlan_mode(ndev, features);
@@ -2347,56 +2349,66 @@ static int qlge_set_features(struct net_device *ndev, u32 features)
return 0;
}
-static void __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = MAC_ADDR_E;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to init vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_add_vid(qdev, vid);
+ err = __qlge_vlan_rx_add_vid(qdev, vid);
set_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
-static void __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
+static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
{
u32 enable_bit = 0;
+ int err;
- if (ql_set_mac_addr_reg
- (qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
+ err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
+ MAC_ADDR_TYPE_VLAN, vid);
+ if (err)
netif_err(qdev, ifup, qdev->ndev,
"Failed to clear vlan address.\n");
- }
+ return err;
}
-static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
+static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
{
struct ql_adapter *qdev = netdev_priv(ndev);
int status;
+ int err;
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- return;
+ return status;
- __qlge_vlan_rx_kill_vid(qdev, vid);
+ err = __qlge_vlan_rx_kill_vid(qdev, vid);
clear_bit(vid, qdev->active_vlans);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
+
+ return err;
}
static void qlge_restore_vlan(struct ql_adapter *qdev)
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4bf68cf..cb0eca8 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -52,12 +52,6 @@
#define DRV_VERSION "0.28"
#define DRV_RELDATE "07Oct2011"
-/* PHY CHIP Address */
-#define PHY1_ADDR 1 /* For MAC1 */
-#define PHY2_ADDR 3 /* For MAC2 */
-#define PHY_MODE 0x3100 /* PHY CHIP Register 0 */
-#define PHY_CAP 0x01E1 /* PHY CHIP Register 4 */
-
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (6000 * HZ / 1000)
@@ -69,8 +63,11 @@
/* MAC registers */
#define MCR0 0x00 /* Control register 0 */
+#define MCR0_RCVEN 0x0002 /* Receive enable */
#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
+#define MCR0_XMTEN 0x1000 /* Transmission enable */
+#define MCR0_FD 0x8000 /* Full/Half duplex */
#define MCR1 0x04 /* Control register 1 */
#define MAC_RST 0x0001 /* Reset the MAC */
#define MBCR 0x08 /* Bus control */
@@ -129,6 +126,7 @@
#define PHY_CC 0x88 /* PHY status change configuration register */
#define PHY_ST 0x8A /* PHY status register */
#define MAC_SM 0xAC /* MAC status machine */
+#define MAC_SM_RST 0x0002 /* MAC status machine reset */
#define MAC_ID 0xBE /* Identifier register */
#define TX_DCNT 0x80 /* TX descriptor count */
@@ -154,9 +152,6 @@
#define DSC_RX_MIDH_HIT 0x0004 /* RX MID table hit (no error) */
#define DSC_RX_IDX_MID_MASK 3 /* RX mask for the index of matched MIDx */
-/* PHY settings */
-#define ICPLUS_PHY_ID 0x0243
-
MODULE_AUTHOR("Sten Wang <sten.wang@rdc.com.tw>,"
"Daniel Gimpelevich <daniel@gimpelevich.san-francisco.ca.us>,"
"Florian Fainelli <florian@openwrt.org>");
@@ -178,7 +173,7 @@ struct r6040_descriptor {
struct r6040_descriptor *vndescp; /* 14-17 */
struct sk_buff *skb_ptr; /* 18-1B */
u32 rev2; /* 1C-1F */
-} __attribute__((aligned(32)));
+} __aligned(32);
struct r6040_private {
spinlock_t lock; /* driver lock */
@@ -191,7 +186,7 @@ struct r6040_private {
struct r6040_descriptor *tx_ring;
dma_addr_t rx_ring_dma;
dma_addr_t tx_ring_dma;
- u16 tx_free_desc, phy_addr;
+ u16 tx_free_desc;
u16 mcr0, mcr1;
struct net_device *dev;
struct mii_bus *mii_bus;
@@ -206,8 +201,6 @@ static char version[] __devinitdata = DRV_NAME
": RDC R6040 NAPI net driver,"
"version "DRV_VERSION " (" DRV_RELDATE ")";
-static int phy_table[] = { PHY1_ADDR, PHY2_ADDR };
-
/* Read a word data from PHY Chip */
static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
{
@@ -379,11 +372,11 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(MAC_RST, ioaddr + MCR1);
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
- if (cmd & 0x1)
+ if (cmd & MAC_RST)
break;
}
/* Reset internal state machine */
- iowrite16(2, ioaddr + MAC_SM);
+ iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
@@ -409,7 +402,7 @@ static void r6040_init_mac_regs(struct net_device *dev)
iowrite16(INT_MASK, ioaddr + MIER);
/* Enable TX and RX */
- iowrite16(lp->mcr0 | 0x0002, ioaddr);
+ iowrite16(lp->mcr0 | MCR0_RCVEN, ioaddr);
/* Let TX poll the descriptors
* we may got called by r6040_tx_timeout which has left
@@ -461,7 +454,7 @@ static void r6040_down(struct net_device *dev)
iowrite16(MAC_RST, ioaddr + MCR1); /* Reset RDC MAC */
while (limit--) {
cmd = ioread16(ioaddr + MCR1);
- if (cmd & 0x1)
+ if (cmd & MAC_RST)
break;
}
@@ -742,9 +735,10 @@ static void r6040_mac_address(struct net_device *dev)
void __iomem *ioaddr = lp->base;
u16 *adrp;
- /* MAC operation register */
- iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */
- iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */
+ /* Reset MAC */
+ iowrite16(MAC_RST, ioaddr + MCR1);
+ /* Reset internal state machine */
+ iowrite16(MAC_SM_RST, ioaddr + MAC_SM);
iowrite16(0, ioaddr + MAC_SM);
mdelay(5);
@@ -1013,7 +1007,7 @@ static void r6040_adjust_link(struct net_device *dev)
/* reflect duplex change */
if (phydev->link && (lp->old_duplex != phydev->duplex)) {
- lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? 0x8000 : 0);
+ lp->mcr0 |= (phydev->duplex == DUPLEX_FULL ? MCR0_FD : 0);
iowrite16(lp->mcr0, ioaddr);
status_changed = 1;
@@ -1166,8 +1160,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->dev = dev;
/* Init RDC private data */
- lp->mcr0 = 0x1002;
- lp->phy_addr = phy_table[card_idx];
+ lp->mcr0 = MCR0_XMTEN | MCR0_RCVEN;
/* The RDC-specific entries in the device structure. */
dev->netdev_ops = &r6040_netdev_ops;
@@ -1188,7 +1181,8 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
lp->mii_bus->write = r6040_mdiobus_write;
lp->mii_bus->reset = r6040_mdiobus_reset;
lp->mii_bus->name = "r6040_eth_mii";
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%x", card_idx);
+ snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ dev_name(&pdev->dev), card_idx);
lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
if (!lp->mii_bus->irq) {
dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index ee5da92..abc7907 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -563,6 +563,7 @@ rx_next:
if (cpr16(IntrStatus) & cp_rx_intr_mask)
goto rx_status_loop;
+ napi_gro_flush(napi);
spin_lock_irqsave(&cp->lock, flags);
__napi_complete(napi);
cpw16_f(IntrMask, cp_intr_mask);
@@ -859,7 +860,6 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
int rx_mode;
- u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
@@ -886,11 +886,9 @@ static void __cp_set_rx_mode (struct net_device *dev)
}
/* We can safely update without stopping the chip. */
- tmp = cp_rx_config | rx_mode;
- if (cp->rx_config != tmp) {
- cpw32_f (RxConfig, tmp);
- cp->rx_config = tmp;
- }
+ cp->rx_config = cp_rx_config | rx_mode;
+ cpw32_f(RxConfig, cp->rx_config);
+
cpw32_f (MAR0 + 0, mc_filter[0]);
cpw32_f (MAR0 + 4, mc_filter[1]);
}
@@ -1319,9 +1317,9 @@ static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info
{
struct cp_private *cp = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(cp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
}
static void cp_get_ringparam(struct net_device *dev,
@@ -1392,7 +1390,7 @@ static void cp_set_msglevel(struct net_device *dev, u32 value)
cp->msg_enable = value;
}
-static int cp_set_features(struct net_device *dev, u32 features)
+static int cp_set_features(struct net_device *dev, netdev_features_t features)
{
struct cp_private *cp = netdev_priv(dev);
unsigned long flags;
@@ -1589,7 +1587,7 @@ static int cp_set_mac_address(struct net_device *dev, void *p)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() readl(ee_addr)
+#define eeprom_delay() readb(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_EXTEND_CMD (4)
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 4d6b254..a8779be 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1122,7 +1122,7 @@ static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
*/
-#define eeprom_delay() (void)RTL_R32(Cfg9346)
+#define eeprom_delay() (void)RTL_R8(Cfg9346)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
@@ -2330,9 +2330,9 @@ static int rtl8139_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void rtl8139_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct rtl8139_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
info->regdump_len = tp->regs_len;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index c8f47f1..bbacb37 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -69,9 +69,6 @@
The RTL chips use a 64 element hash table based on the Ethernet CRC. */
static const int multicast_filter_limit = 32;
-/* MAC address length */
-#define MAC_ADDR_LEN 6
-
#define MAX_READ_REQUEST_SHIFT 12
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
@@ -1406,12 +1403,13 @@ static void rtl8169_get_drvinfo(struct net_device *dev,
struct rtl8169_private *tp = netdev_priv(dev);
struct rtl_fw *rtl_fw = tp->rtl_fw;
- strcpy(info->driver, MODULENAME);
- strcpy(info->version, RTL8169_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, MODULENAME, sizeof(info->driver));
+ strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
- strcpy(info->fw_version, IS_ERR_OR_NULL(rtl_fw) ? "N/A" :
- rtl_fw->version);
+ if (!IS_ERR_OR_NULL(rtl_fw))
+ strlcpy(info->fw_version, rtl_fw->version,
+ sizeof(info->fw_version));
}
static int rtl8169_get_regs_len(struct net_device *dev)
@@ -1555,7 +1553,8 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
return ret;
}
-static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t rtl8169_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
@@ -1569,7 +1568,8 @@ static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int rtl8169_set_features(struct net_device *dev, u32 features)
+static int rtl8169_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
@@ -3781,12 +3781,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
rtl_generic_op(tp, tp->jumbo_ops.enable);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
}
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
+ void __iomem *ioaddr = tp->mmio_addr;
+
+ RTL_W8(Cfg9346, Cfg9346_Unlock);
rtl_generic_op(tp, tp->jumbo_ops.disable);
+ RTL_W8(Cfg9346, Cfg9346_Lock);
}
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -4101,7 +4109,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
spin_lock_init(&tp->lock);
/* Get MAC address */
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
@@ -6186,6 +6194,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
+ struct device *d = &pdev->dev;
+
+ pm_runtime_get_sync(d);
rtl8169_net_suspend(dev);
@@ -6207,6 +6218,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
pci_wake_from_d3(pdev, true);
pci_set_power_state(pdev, PCI_D3hot);
}
+
+ pm_runtime_put_noidle(d);
}
static struct pci_driver rtl8169_pci_driver = {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 9b23074..87b6501 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -38,6 +38,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
#include <linux/sh_eth.h>
#include "sh_eth.h"
@@ -817,7 +818,8 @@ static int sh_eth_dev_init(struct net_device *ndev)
sh_eth_write(ndev, 0, TRIMD);
/* Recv frame limit set register */
- sh_eth_write(ndev, RFLR_VALUE, RFLR);
+ sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
+ RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
@@ -1369,13 +1371,13 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
}
}
-static struct ethtool_ops sh_eth_ethtool_ops = {
+static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_settings = sh_eth_get_settings,
.set_settings = sh_eth_set_settings,
- .nway_reset = sh_eth_nway_reset,
+ .nway_reset = sh_eth_nway_reset,
.get_msglevel = sh_eth_get_msglevel,
.set_msglevel = sh_eth_set_msglevel,
- .get_link = ethtool_op_get_link,
+ .get_link = ethtool_op_get_link,
.get_strings = sh_eth_get_strings,
.get_ethtool_stats = sh_eth_get_ethtool_stats,
.get_sset_count = sh_eth_get_sset_count,
@@ -1702,7 +1704,8 @@ static int sh_mdio_init(struct net_device *ndev, int id,
/* Hook up MII support for ethtool */
mdp->mii_bus->name = "sh_mii";
mdp->mii_bus->parent = &ndev->dev;
- snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id);
+ snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ mdp->pdev->name, id);
/* PHY IRQ */
mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
@@ -1957,18 +1960,7 @@ static struct platform_driver sh_eth_driver = {
},
};
-static int __init sh_eth_init(void)
-{
- return platform_driver_register(&sh_eth_driver);
-}
-
-static void __exit sh_eth_cleanup(void)
-{
- platform_driver_unregister(&sh_eth_driver);
-}
-
-module_init(sh_eth_init);
-module_exit(sh_eth_cleanup);
+module_platform_driver(sh_eth_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 47877b1..cdbd844 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -575,9 +575,6 @@ enum RPADIR_BIT {
RPADIR_PADR = 0x0003f,
};
-/* RFLR */
-#define RFLR_VALUE 0x1000
-
/* FDR */
#define DEFAULT_FDR_INIT 0x00000707
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
index a7ff8ea..22e9c01 100644
--- a/drivers/net/ethernet/s6gmac.c
+++ b/drivers/net/ethernet/s6gmac.c
@@ -1004,7 +1004,7 @@ static int __devinit s6gmac_probe(struct platform_device *pdev)
mb->write = s6mii_write;
mb->reset = s6mii_reset;
mb->priv = pd;
- snprintf(mb->id, MII_BUS_ID_SIZE, "0");
+ snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
mb->phy_mask = ~(1 << 0);
mb->irq = &pd->mii.irq[0];
for (i = 0; i < PHY_MAX_ADDR; i++) {
diff --git a/drivers/net/ethernet/seeq/sgiseeq.c b/drivers/net/ethernet/seeq/sgiseeq.c
index c3673f1..f955a19 100644
--- a/drivers/net/ethernet/seeq/sgiseeq.c
+++ b/drivers/net/ethernet/seeq/sgiseeq.c
@@ -834,23 +834,7 @@ static struct platform_driver sgiseeq_driver = {
}
};
-static int __init sgiseeq_module_init(void)
-{
- if (platform_driver_register(&sgiseeq_driver)) {
- printk(KERN_ERR "Driver registration failed\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit sgiseeq_module_exit(void)
-{
- platform_driver_unregister(&sgiseeq_driver);
-}
-
-module_init(sgiseeq_module_init);
-module_exit(sgiseeq_module_exit);
+module_platform_driver(sgiseeq_driver);
MODULE_DESCRIPTION("SGI Seeq 8003 driver");
MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d5731f1..e43702f 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1336,7 +1336,8 @@ static int efx_probe_nic(struct efx_nic *efx)
if (efx->n_channels > 1)
get_random_bytes(&efx->rx_hash_key, sizeof(efx->rx_hash_key));
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- efx->rx_indir_table[i] = i % efx->n_rx_channels;
+ efx->rx_indir_table[i] =
+ ethtool_rxfh_indir_default(i, efx->n_rx_channels);
efx_set_channels(efx);
netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
@@ -1900,7 +1901,7 @@ static void efx_set_multicast_list(struct net_device *net_dev)
/* Otherwise efx_start_port() will do this */
}
-static int efx_set_features(struct net_device *net_dev, u32 data)
+static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -2235,9 +2236,9 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE,
PCI_DEVICE_ID_SOLARFLARE_SFC4000B),
.driver_data = (unsigned long) &falcon_b0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, BETHPAGE_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0803), /* SFC9020 */
.driver_data = (unsigned long) &siena_a0_nic_type},
- {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, SIENA_A_P_DEVID),
+ {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0813), /* SFL9021 */
.driver_data = (unsigned long) &siena_a0_nic_type},
{0} /* end of list */
};
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index 4764793..a3541ac 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,10 +14,6 @@
#include "net_driver.h"
#include "filter.h"
-/* PCI IDs */
-#define BETHPAGE_A_P_DEVID 0x0803
-#define SIENA_A_P_DEVID 0x0813
-
/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
@@ -65,13 +61,23 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx);
-extern int efx_filter_insert_filter(struct efx_nic *efx,
+extern s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec,
bool replace);
-extern int efx_filter_remove_filter(struct efx_nic *efx,
- struct efx_filter_spec *spec);
+extern int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id);
+extern int efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *);
extern void efx_filter_clear_rx(struct efx_nic *efx,
enum efx_filter_priority priority);
+extern u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority);
+extern u32 efx_filter_get_rx_id_limit(struct efx_nic *efx);
+extern s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size);
#ifdef CONFIG_RFS_ACCEL
extern int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index f3cd96d..29b2ebf 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -818,9 +818,58 @@ static int efx_ethtool_reset(struct net_device *net_dev, u32 *flags)
return efx_reset(efx, rc);
}
+static int efx_ethtool_get_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
+{
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct efx_filter_spec spec;
+ u16 vid;
+ u8 proto;
+ int rc;
+
+ rc = efx_filter_get_filter_safe(efx, EFX_FILTER_PRI_MANUAL,
+ rule->location, &spec);
+ if (rc)
+ return rc;
+
+ if (spec.dmaq_id == 0xfff)
+ rule->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ rule->ring_cookie = spec.dmaq_id;
+
+ rc = efx_filter_get_eth_local(&spec, &vid,
+ rule->h_u.ether_spec.h_dest);
+ if (rc == 0) {
+ rule->flow_type = ETHER_FLOW;
+ memset(rule->m_u.ether_spec.h_dest, ~0, ETH_ALEN);
+ if (vid != EFX_FILTER_VID_UNSPEC) {
+ rule->flow_type |= FLOW_EXT;
+ rule->h_ext.vlan_tci = htons(vid);
+ rule->m_ext.vlan_tci = htons(0xfff);
+ }
+ return 0;
+ }
+
+ rc = efx_filter_get_ipv4_local(&spec, &proto,
+ &ip_entry->ip4dst, &ip_entry->pdst);
+ if (rc != 0) {
+ rc = efx_filter_get_ipv4_full(
+ &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc,
+ &ip_entry->ip4dst, &ip_entry->pdst);
+ EFX_WARN_ON_PARANOID(rc);
+ ip_mask->ip4src = ~0;
+ ip_mask->psrc = ~0;
+ }
+ rule->flow_type = (proto == IPPROTO_TCP) ? TCP_V4_FLOW : UDP_V4_FLOW;
+ ip_mask->ip4dst = ~0;
+ ip_mask->pdst = ~0;
+ return rc;
+}
+
static int
efx_ethtool_get_rxnfc(struct net_device *net_dev,
- struct ethtool_rxnfc *info, u32 *rules __always_unused)
+ struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -862,42 +911,80 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
return 0;
}
+ case ETHTOOL_GRXCLSRLCNT:
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ info->data |= RX_CLS_LOC_SPECIAL;
+ info->rule_cnt =
+ efx_filter_count_rx_used(efx, EFX_FILTER_PRI_MANUAL);
+ return 0;
+
+ case ETHTOOL_GRXCLSRULE:
+ if (efx_filter_get_rx_id_limit(efx) == 0)
+ return -EOPNOTSUPP;
+ return efx_ethtool_get_class_rule(efx, &info->fs);
+
+ case ETHTOOL_GRXCLSRLALL: {
+ s32 rc;
+ info->data = efx_filter_get_rx_id_limit(efx);
+ if (info->data == 0)
+ return -EOPNOTSUPP;
+ rc = efx_filter_get_rx_ids(efx, EFX_FILTER_PRI_MANUAL,
+ rule_locs, info->rule_cnt);
+ if (rc < 0)
+ return rc;
+ info->rule_cnt = rc;
+ return 0;
+ }
+
default:
return -EOPNOTSUPP;
}
}
-static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
- struct ethtool_rx_ntuple *ntuple)
+static int efx_ethtool_set_class_rule(struct efx_nic *efx,
+ struct ethtool_rx_flow_spec *rule)
{
- struct efx_nic *efx = netdev_priv(net_dev);
- struct ethtool_tcpip4_spec *ip_entry = &ntuple->fs.h_u.tcp_ip4_spec;
- struct ethtool_tcpip4_spec *ip_mask = &ntuple->fs.m_u.tcp_ip4_spec;
- struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
- struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
- struct efx_filter_spec filter;
+ struct ethtool_tcpip4_spec *ip_entry = &rule->h_u.tcp_ip4_spec;
+ struct ethtool_tcpip4_spec *ip_mask = &rule->m_u.tcp_ip4_spec;
+ struct ethhdr *mac_entry = &rule->h_u.ether_spec;
+ struct ethhdr *mac_mask = &rule->m_u.ether_spec;
+ struct efx_filter_spec spec;
int rc;
- /* Range-check action */
- if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
- ntuple->fs.action >= (s32)efx->n_rx_channels)
+ /* Check that user wants us to choose the location */
+ if (rule->location != RX_CLS_LOC_ANY &&
+ rule->location != RX_CLS_LOC_FIRST &&
+ rule->location != RX_CLS_LOC_LAST)
+ return -EINVAL;
+
+ /* Range-check ring_cookie */
+ if (rule->ring_cookie >= efx->n_rx_channels &&
+ rule->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (~ntuple->fs.data_mask)
+ /* Check for unsupported extensions */
+ if ((rule->flow_type & FLOW_EXT) &&
+ (rule->m_ext.vlan_etype | rule->m_ext.data[0] |
+ rule->m_ext.data[1]))
return -EINVAL;
- efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
- (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
- 0xfff : ntuple->fs.action);
+ efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
+ (rule->location == RX_CLS_LOC_FIRST) ?
+ EFX_FILTER_FLAG_RX_OVERRIDE_IP : 0,
+ (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
+ 0xfff : rule->ring_cookie);
- switch (ntuple->fs.flow_type) {
+ switch (rule->flow_type) {
case TCP_V4_FLOW:
case UDP_V4_FLOW: {
- u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+ u8 proto = (rule->flow_type == TCP_V4_FLOW ?
IPPROTO_TCP : IPPROTO_UDP);
/* Must match all of destination, */
- if (ip_mask->ip4dst | ip_mask->pdst)
+ if ((__force u32)~ip_mask->ip4dst |
+ (__force u16)~ip_mask->pdst)
return -EINVAL;
/* all or none of source, */
if ((ip_mask->ip4src | ip_mask->psrc) &&
@@ -905,17 +992,17 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
(__force u16)~ip_mask->psrc))
return -EINVAL;
/* and nothing else */
- if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
+ if (ip_mask->tos | rule->m_ext.vlan_tci)
return -EINVAL;
- if (!ip_mask->ip4src)
- rc = efx_filter_set_ipv4_full(&filter, proto,
+ if (ip_mask->ip4src)
+ rc = efx_filter_set_ipv4_full(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst,
ip_entry->ip4src,
ip_entry->psrc);
else
- rc = efx_filter_set_ipv4_local(&filter, proto,
+ rc = efx_filter_set_ipv4_local(&spec, proto,
ip_entry->ip4dst,
ip_entry->pdst);
if (rc)
@@ -923,23 +1010,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
break;
}
- case ETHER_FLOW:
- /* Must match all of destination, */
- if (!is_zero_ether_addr(mac_mask->h_dest))
+ case ETHER_FLOW | FLOW_EXT:
+ /* Must match all or none of VID */
+ if (rule->m_ext.vlan_tci != htons(0xfff) &&
+ rule->m_ext.vlan_tci != 0)
return -EINVAL;
- /* all or none of VID, */
- if (ntuple->fs.vlan_tag_mask != 0xf000 &&
- ntuple->fs.vlan_tag_mask != 0xffff)
+ case ETHER_FLOW:
+ /* Must match all of destination */
+ if (!is_broadcast_ether_addr(mac_mask->h_dest))
return -EINVAL;
/* and nothing else */
- if (!is_broadcast_ether_addr(mac_mask->h_source) ||
- mac_mask->h_proto != htons(0xffff))
+ if (!is_zero_ether_addr(mac_mask->h_source) ||
+ mac_mask->h_proto)
return -EINVAL;
rc = efx_filter_set_eth_local(
- &filter,
- (ntuple->fs.vlan_tag_mask == 0xf000) ?
- ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+ &spec,
+ (rule->flow_type & FLOW_EXT && rule->m_ext.vlan_tci) ?
+ ntohs(rule->h_ext.vlan_tci) : EFX_FILTER_VID_UNSPEC,
mac_entry->h_dest);
if (rc)
return rc;
@@ -949,47 +1037,57 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
return -EINVAL;
}
- if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
- return efx_filter_remove_filter(efx, &filter);
+ rc = efx_filter_insert_filter(efx, &spec, true);
+ if (rc < 0)
+ return rc;
- rc = efx_filter_insert_filter(efx, &filter, true);
- return rc < 0 ? rc : 0;
+ rule->location = rc;
+ return 0;
}
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
- struct ethtool_rxfh_indir *indir)
+static int efx_ethtool_set_rxnfc(struct net_device *net_dev,
+ struct ethtool_rxnfc *info)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t copy_size =
- min_t(size_t, indir->size, ARRAY_SIZE(efx->rx_indir_table));
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
+ if (efx_filter_get_rx_id_limit(efx) == 0)
return -EOPNOTSUPP;
- indir->size = ARRAY_SIZE(efx->rx_indir_table);
- memcpy(indir->ring_index, efx->rx_indir_table,
- copy_size * sizeof(indir->ring_index[0]));
- return 0;
+ switch (info->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ return efx_ethtool_set_class_rule(efx, &info->fs);
+
+ case ETHTOOL_SRXCLSRLDEL:
+ return efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_MANUAL,
+ info->fs.location);
+
+ default:
+ return -EOPNOTSUPP;
+ }
}
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const struct ethtool_rxfh_indir *indir)
+static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- size_t i;
- if (efx_nic_rev(efx) < EFX_REV_FALCON_B0)
- return -EOPNOTSUPP;
+ return (efx_nic_rev(efx) < EFX_REV_FALCON_B0 ?
+ 0 : ARRAY_SIZE(efx->rx_indir_table));
+}
- /* Validate size and indices */
- if (indir->size != ARRAY_SIZE(efx->rx_indir_table))
- return -EINVAL;
- for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
- if (indir->ring_index[i] >= efx->n_rx_channels)
- return -EINVAL;
+static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ return 0;
+}
+
+static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
+ const u32 *indir)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
- memcpy(efx->rx_indir_table, indir->ring_index,
- sizeof(efx->rx_indir_table));
+ memcpy(efx->rx_indir_table, indir, sizeof(efx->rx_indir_table));
efx_nic_push_rx_indir_table(efx);
return 0;
}
@@ -1019,7 +1117,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.set_wol = efx_ethtool_set_wol,
.reset = efx_ethtool_reset,
.get_rxnfc = efx_ethtool_get_rxnfc,
- .set_rx_ntuple = efx_ethtool_set_rx_ntuple,
+ .set_rxnfc = efx_ethtool_set_rxnfc,
+ .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
.get_rxfh_indir = efx_ethtool_get_rxfh_indir,
.set_rxfh_indir = efx_ethtool_set_rxfh_indir,
};
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 97b606b..8ae1ebd 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -610,7 +610,7 @@ static void falcon_stats_complete(struct efx_nic *efx)
if (!nic_data->stats_pending)
return;
- nic_data->stats_pending = 0;
+ nic_data->stats_pending = false;
if (*nic_data->stats_dma_done == FALCON_STATS_DONE) {
rmb(); /* read the done flag before the stats */
efx->mac_op->update_stats(efx);
diff --git a/drivers/net/ethernet/sfc/filter.c b/drivers/net/ethernet/sfc/filter.c
index 2b9636f..1fbbbee 100644
--- a/drivers/net/ethernet/sfc/filter.c
+++ b/drivers/net/ethernet/sfc/filter.c
@@ -155,6 +155,16 @@ static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
spec->data[2] = ntohl(host2);
}
+static inline void __efx_filter_get_ipv4(const struct efx_filter_spec *spec,
+ __be32 *host1, __be16 *port1,
+ __be32 *host2, __be16 *port2)
+{
+ *host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
+ *port1 = htons(spec->data[0]);
+ *host2 = htonl(spec->data[2]);
+ *port2 = htons(spec->data[1] >> 16);
+}
+
/**
* efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
* @spec: Specification to initialise
@@ -205,6 +215,26 @@ int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
return 0;
}
+int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port)
+{
+ __be32 host1;
+ __be16 port1;
+
+ switch (spec->type) {
+ case EFX_FILTER_TCP_WILD:
+ *proto = IPPROTO_TCP;
+ __efx_filter_get_ipv4(spec, &host1, &port1, host, port);
+ return 0;
+ case EFX_FILTER_UDP_WILD:
+ *proto = IPPROTO_UDP;
+ __efx_filter_get_ipv4(spec, &host1, port, host, &port1);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
/**
* efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
* @spec: Specification to initialise
@@ -242,6 +272,25 @@ int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
return 0;
}
+int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port,
+ __be32 *rhost, __be16 *rport)
+{
+ switch (spec->type) {
+ case EFX_FILTER_TCP_FULL:
+ *proto = IPPROTO_TCP;
+ break;
+ case EFX_FILTER_UDP_FULL:
+ *proto = IPPROTO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ __efx_filter_get_ipv4(spec, rhost, rport, host, port);
+ return 0;
+}
+
/**
* efx_filter_set_eth_local - specify local Ethernet address and optional VID
* @spec: Specification to initialise
@@ -270,6 +319,29 @@ int efx_filter_set_eth_local(struct efx_filter_spec *spec,
return 0;
}
+int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+ u16 *vid, u8 *addr)
+{
+ switch (spec->type) {
+ case EFX_FILTER_MAC_WILD:
+ *vid = EFX_FILTER_VID_UNSPEC;
+ break;
+ case EFX_FILTER_MAC_FULL:
+ *vid = spec->data[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ addr[0] = spec->data[2] >> 8;
+ addr[1] = spec->data[2];
+ addr[2] = spec->data[1] >> 24;
+ addr[3] = spec->data[1] >> 16;
+ addr[4] = spec->data[1] >> 8;
+ addr[5] = spec->data[1];
+ return 0;
+}
+
/* Build a filter entry and return its n-tuple key. */
static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
{
@@ -332,7 +404,7 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
static int efx_filter_search(struct efx_filter_table *table,
struct efx_filter_spec *spec, u32 key,
- bool for_insert, int *depth_required)
+ bool for_insert, unsigned int *depth_required)
{
unsigned hash, incr, filter_idx, depth, depth_max;
@@ -366,12 +438,59 @@ static int efx_filter_search(struct efx_filter_table *table,
}
}
-/* Construct/deconstruct external filter IDs */
+/*
+ * Construct/deconstruct external filter IDs. These must be ordered
+ * by matching priority, for RX NFC semantics.
+ *
+ * Each RX MAC filter entry has a flag for whether it can override an
+ * RX IP filter that also matches. So we assign locations for MAC
+ * filters with overriding behaviour, then for IP filters, then for
+ * MAC filters without overriding behaviour.
+ */
+
+#define EFX_FILTER_INDEX_WIDTH 13
+#define EFX_FILTER_INDEX_MASK ((1 << EFX_FILTER_INDEX_WIDTH) - 1)
+
+static inline u32 efx_filter_make_id(enum efx_filter_table_id table_id,
+ unsigned int index, u8 flags)
+{
+ return (table_id == EFX_FILTER_TABLE_RX_MAC &&
+ flags & EFX_FILTER_FLAG_RX_OVERRIDE_IP) ?
+ index :
+ (table_id + 1) << EFX_FILTER_INDEX_WIDTH | index;
+}
+
+static inline enum efx_filter_table_id efx_filter_id_table_id(u32 id)
+{
+ return (id <= EFX_FILTER_INDEX_MASK) ?
+ EFX_FILTER_TABLE_RX_MAC :
+ (id >> EFX_FILTER_INDEX_WIDTH) - 1;
+}
+
+static inline unsigned int efx_filter_id_index(u32 id)
+{
+ return id & EFX_FILTER_INDEX_MASK;
+}
-static inline int
-efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+static inline u8 efx_filter_id_flags(u32 id)
{
- return table_id << 16 | index;
+ return (id <= EFX_FILTER_INDEX_MASK) ?
+ EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_OVERRIDE_IP :
+ EFX_FILTER_FLAG_RX;
+}
+
+u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
+{
+ struct efx_filter_state *state = efx->filter_state;
+
+ if (state->table[EFX_FILTER_TABLE_RX_MAC].size != 0)
+ return ((EFX_FILTER_TABLE_RX_MAC + 1) << EFX_FILTER_INDEX_WIDTH)
+ + state->table[EFX_FILTER_TABLE_RX_MAC].size;
+ else if (state->table[EFX_FILTER_TABLE_RX_IP].size != 0)
+ return ((EFX_FILTER_TABLE_RX_IP + 1) << EFX_FILTER_INDEX_WIDTH)
+ + state->table[EFX_FILTER_TABLE_RX_IP].size;
+ else
+ return 0;
}
/**
@@ -384,14 +503,14 @@ efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
* On success, return the filter ID.
* On failure, return a negative error code.
*/
-int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
+s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace)
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter;
- int filter_idx, depth;
+ unsigned int filter_idx, depth;
u32 key;
int rc;
@@ -439,7 +558,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id);
- rc = efx_filter_make_id(table->id, filter_idx);
+ rc = efx_filter_make_id(table->id, filter_idx, spec->flags);
out:
spin_unlock_bh(&state->lock);
@@ -448,7 +567,7 @@ out:
static void efx_filter_table_clear_entry(struct efx_nic *efx,
struct efx_filter_table *table,
- int filter_idx)
+ unsigned int filter_idx)
{
static efx_oword_t filter;
@@ -463,48 +582,101 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
}
/**
- * efx_filter_remove_filter - remove a filter by specification
+ * efx_filter_remove_id_safe - remove a filter by ID, carefully
* @efx: NIC from which to remove the filter
- * @spec: Specification for the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
*
- * On success, return zero.
- * On failure, return a negative error code.
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
*/
-int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
+int efx_filter_remove_id_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id)
{
struct efx_filter_state *state = efx->filter_state;
- struct efx_filter_table *table = efx_filter_spec_table(state, spec);
- struct efx_filter_spec *saved_spec;
- efx_oword_t filter;
- int filter_idx, depth;
- u32 key;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ struct efx_filter_spec *spec;
+ u8 filter_flags;
int rc;
- if (!table)
- return -EINVAL;
+ table_id = efx_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
- key = efx_filter_build(&filter, spec);
+ filter_idx = efx_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
- spin_lock_bh(&state->lock);
+ filter_flags = efx_filter_id_flags(filter_id);
- rc = efx_filter_search(table, spec, key, false, &depth);
- if (rc < 0)
- goto out;
- filter_idx = rc;
- saved_spec = &table->spec[filter_idx];
+ spin_lock_bh(&state->lock);
- if (spec->priority < saved_spec->priority) {
- rc = -EPERM;
- goto out;
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority && spec->flags == filter_flags) {
+ efx_filter_table_clear_entry(efx, table, filter_idx);
+ if (table->used == 0)
+ efx_filter_table_reset_search_depth(table);
+ rc = 0;
+ } else {
+ rc = -ENOENT;
}
- efx_filter_table_clear_entry(efx, table, filter_idx);
- if (table->used == 0)
- efx_filter_table_reset_search_depth(table);
- rc = 0;
+ spin_unlock_bh(&state->lock);
+
+ return rc;
+}
+
+/**
+ * efx_filter_get_filter_safe - retrieve a filter by ID, carefully
+ * @efx: NIC from which to remove the filter
+ * @priority: Priority of filter, as passed to @efx_filter_insert_filter
+ * @filter_id: ID of filter, as returned by @efx_filter_insert_filter
+ * @spec: Buffer in which to store filter specification
+ *
+ * This function will range-check @filter_id, so it is safe to call
+ * with a value passed from userland.
+ */
+int efx_filter_get_filter_safe(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 filter_id, struct efx_filter_spec *spec_buf)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ struct efx_filter_spec *spec;
+ unsigned int filter_idx;
+ u8 filter_flags;
+ int rc;
+
+ table_id = efx_filter_id_table_id(filter_id);
+ if ((unsigned int)table_id >= EFX_FILTER_TABLE_COUNT)
+ return -ENOENT;
+ table = &state->table[table_id];
+
+ filter_idx = efx_filter_id_index(filter_id);
+ if (filter_idx >= table->size)
+ return -ENOENT;
+ spec = &table->spec[filter_idx];
+
+ filter_flags = efx_filter_id_flags(filter_id);
+
+ spin_lock_bh(&state->lock);
+
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ spec->priority == priority && spec->flags == filter_flags) {
+ *spec_buf = *spec;
+ rc = 0;
+ } else {
+ rc = -ENOENT;
+ }
-out:
spin_unlock_bh(&state->lock);
+
return rc;
}
@@ -514,7 +686,7 @@ static void efx_filter_table_clear(struct efx_nic *efx,
{
struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[table_id];
- int filter_idx;
+ unsigned int filter_idx;
spin_lock_bh(&state->lock);
@@ -538,6 +710,68 @@ void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
}
+u32 efx_filter_count_rx_used(struct efx_nic *efx,
+ enum efx_filter_priority priority)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ u32 count = 0;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority)
+ ++count;
+ }
+ }
+
+ spin_unlock_bh(&state->lock);
+
+ return count;
+}
+
+s32 efx_filter_get_rx_ids(struct efx_nic *efx,
+ enum efx_filter_priority priority,
+ u32 *buf, u32 size)
+{
+ struct efx_filter_state *state = efx->filter_state;
+ enum efx_filter_table_id table_id;
+ struct efx_filter_table *table;
+ unsigned int filter_idx;
+ s32 count = 0;
+
+ spin_lock_bh(&state->lock);
+
+ for (table_id = EFX_FILTER_TABLE_RX_IP;
+ table_id <= EFX_FILTER_TABLE_RX_MAC;
+ table_id++) {
+ table = &state->table[table_id];
+ for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
+ if (test_bit(filter_idx, table->used_bitmap) &&
+ table->spec[filter_idx].priority == priority) {
+ if (count == size) {
+ count = -EMSGSIZE;
+ goto out;
+ }
+ buf[count++] = efx_filter_make_id(
+ table_id, filter_idx,
+ table->spec[filter_idx].flags);
+ }
+ }
+ }
+out:
+ spin_unlock_bh(&state->lock);
+
+ return count;
+}
+
/* Restore filter stater after reset */
void efx_restore_filters(struct efx_nic *efx)
{
@@ -545,7 +779,7 @@ void efx_restore_filters(struct efx_nic *efx)
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
efx_oword_t filter;
- int filter_idx;
+ unsigned int filter_idx;
spin_lock_bh(&state->lock);
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index 872f213..3d4108c 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -78,6 +78,11 @@ enum efx_filter_flags {
*
* Use the efx_filter_set_*() functions to initialise the @type and
* @data fields.
+ *
+ * The @priority field is used by software to determine whether a new
+ * filter may replace an old one. The hardware priority of a filter
+ * depends on the filter type and %EFX_FILTER_FLAG_RX_OVERRIDE_IP
+ * flag.
*/
struct efx_filter_spec {
u8 type:4;
@@ -100,11 +105,18 @@ static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port);
+extern int efx_filter_get_ipv4_local(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port);
extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
__be32 host, __be16 port,
__be32 rhost, __be16 rport);
+extern int efx_filter_get_ipv4_full(const struct efx_filter_spec *spec,
+ u8 *proto, __be32 *host, __be16 *port,
+ __be32 *rhost, __be16 *rport);
extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
u16 vid, const u8 *addr);
+extern int efx_filter_get_eth_local(const struct efx_filter_spec *spec,
+ u16 *vid, u8 *addr);
enum {
EFX_FILTER_VID_UNSPEC = 0xffff,
};
diff --git a/drivers/net/ethernet/sfc/mtd.c b/drivers/net/ethernet/sfc/mtd.c
index b630448..bc9dcd6 100644
--- a/drivers/net/ethernet/sfc/mtd.c
+++ b/drivers/net/ethernet/sfc/mtd.c
@@ -496,7 +496,7 @@ static int siena_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
/* The MCDI interface can in fact do multiple erase blocks at once;
@@ -528,7 +528,7 @@ static int siena_mtd_write(struct mtd_info *mtd, loff_t start,
rc = efx_mcdi_nvram_update_start(efx, part->mcdi.nvram_type);
if (rc)
goto out;
- part->mcdi.updating = 1;
+ part->mcdi.updating = true;
}
while (offset < end) {
@@ -553,7 +553,7 @@ static int siena_mtd_sync(struct mtd_info *mtd)
int rc = 0;
if (part->mcdi.updating) {
- part->mcdi.updating = 0;
+ part->mcdi.updating = false;
rc = efx_mcdi_nvram_update_finish(efx, part->mcdi.nvram_type);
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b8e251a..c49502b 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -908,7 +908,7 @@ struct efx_nic_type {
unsigned int phys_addr_channels;
unsigned int tx_dc_base;
unsigned int rx_dc_base;
- u32 offload_features;
+ netdev_features_t offload_features;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 752d521..fc52fca 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -156,11 +156,10 @@ static int efx_init_rx_buffers_skb(struct efx_rx_queue *rx_queue)
if (unlikely(!skb))
return -ENOMEM;
- /* Adjust the SKB for padding and checksum */
+ /* Adjust the SKB for padding */
skb_reserve(skb, NET_IP_ALIGN);
rx_buf->len = skb_len - NET_IP_ALIGN;
rx_buf->is_page = false;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
rx_buf->dma_addr = pci_map_single(efx->pci_dev,
skb->data, rx_buf->len,
@@ -479,11 +478,8 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
if (efx->net_dev->features & NETIF_F_RXHASH)
skb->rxhash = efx_rx_buf_hash(eh);
- skb_frag_set_page(skb, 0, page);
- skb_shinfo(skb)->frags[0].page_offset =
- efx_rx_buf_offset(efx, rx_buf);
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx_buf->len);
- skb_shinfo(skb)->nr_frags = 1;
+ skb_fill_page_desc(skb, 0, page,
+ efx_rx_buf_offset(efx, rx_buf), rx_buf->len);
skb->len = rx_buf->len;
skb->data_len = rx_buf->len;
@@ -499,6 +495,7 @@ static void efx_rx_packet_gro(struct efx_channel *channel,
EFX_BUG_ON_PARANOID(!checksummed);
rx_buf->u.skb = NULL;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
gro_result = napi_gro_receive(napi, skb);
}
@@ -669,7 +666,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
rx_queue->ptr_mask);
/* Allocate RX buffers */
- rx_queue->buffer = kzalloc(entries * sizeof(*rx_queue->buffer),
+ rx_queue->buffer = kcalloc(entries, sizeof(*rx_queue->buffer),
GFP_KERNEL);
if (!rx_queue->buffer)
return -ENOMEM;
diff --git a/drivers/net/ethernet/sfc/selftest.c b/drivers/net/ethernet/sfc/selftest.c
index 822f6c2..52edd24 100644
--- a/drivers/net/ethernet/sfc/selftest.c
+++ b/drivers/net/ethernet/sfc/selftest.c
@@ -503,8 +503,8 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
/* Determine how many packets to send */
state->packet_count = efx->txq_entries / 3;
state->packet_count = min(1 << (i << 2), state->packet_count);
- state->skbs = kzalloc(sizeof(state->skbs[0]) *
- state->packet_count, GFP_KERNEL);
+ state->skbs = kcalloc(state->packet_count,
+ sizeof(state->skbs[0]), GFP_KERNEL);
if (!state->skbs)
return -ENOMEM;
state->flush = false;
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index cc2549c..4d5d619 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -232,7 +232,7 @@ static int siena_probe_nvconfig(struct efx_nic *efx)
static int siena_probe_nic(struct efx_nic *efx)
{
struct siena_nic_data *nic_data;
- bool already_attached = 0;
+ bool already_attached = false;
efx_oword_t reg;
int rc;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index df88c543..72f0fbc 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -31,7 +31,9 @@
#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
- struct efx_tx_buffer *buffer)
+ struct efx_tx_buffer *buffer,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
if (buffer->unmap_len) {
struct pci_dev *pci_dev = tx_queue->efx->pci_dev;
@@ -48,6 +50,8 @@ static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
}
if (buffer->skb) {
+ (*pkts_compl)++;
+ (*bytes_compl) += buffer->skb->len;
dev_kfree_skb_any((struct sk_buff *) buffer->skb);
buffer->skb = NULL;
netif_vdbg(tx_queue->efx, tx_done, tx_queue->efx->net_dev,
@@ -250,6 +254,8 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
buffer->skb = skb;
buffer->continuation = false;
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
@@ -267,10 +273,11 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
unwind:
/* Work backwards until we hit the original insert pointer value */
while (tx_queue->insert_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
--tx_queue->insert_count;
insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
buffer = &tx_queue->buffer[insert_ptr];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->len = 0;
}
@@ -293,7 +300,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
* specified index.
*/
static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
- unsigned int index)
+ unsigned int index,
+ unsigned int *pkts_compl,
+ unsigned int *bytes_compl)
{
struct efx_nic *efx = tx_queue->efx;
unsigned int stop_index, read_ptr;
@@ -311,7 +320,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
return;
}
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, pkts_compl, bytes_compl);
buffer->continuation = true;
buffer->len = 0;
@@ -422,10 +431,12 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
{
unsigned fill_level;
struct efx_nic *efx = tx_queue->efx;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
- efx_dequeue_buffers(tx_queue, index);
+ efx_dequeue_buffers(tx_queue, index, &pkts_compl, &bytes_compl);
+ netdev_tx_completed_queue(tx_queue->core_txq, pkts_compl, bytes_compl);
/* See if we need to restart the netif queue. This barrier
* separates the update of read_count from the test of the
@@ -468,7 +479,7 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->queue, efx->txq_entries, tx_queue->ptr_mask);
/* Allocate software ring */
- tx_queue->buffer = kzalloc(entries * sizeof(*tx_queue->buffer),
+ tx_queue->buffer = kcalloc(entries, sizeof(*tx_queue->buffer),
GFP_KERNEL);
if (!tx_queue->buffer)
return -ENOMEM;
@@ -515,13 +526,15 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
/* Free any buffers left in the ring */
while (tx_queue->read_count != tx_queue->write_count) {
+ unsigned int pkts_compl = 0, bytes_compl = 0;
buffer = &tx_queue->buffer[tx_queue->read_count & tx_queue->ptr_mask];
- efx_dequeue_buffer(tx_queue, buffer);
+ efx_dequeue_buffer(tx_queue, buffer, &pkts_compl, &bytes_compl);
buffer->continuation = true;
buffer->len = 0;
++tx_queue->read_count;
}
+ netdev_tx_reset_queue(tx_queue->core_txq);
}
void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
@@ -1160,6 +1173,8 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
goto mem_err;
}
+ netdev_tx_sent_queue(tx_queue->core_txq, skb->len);
+
/* Pass off to hardware */
efx_nic_push_buffers(tx_queue);
diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c
index 60135aa..53efe7c 100644
--- a/drivers/net/ethernet/sgi/meth.c
+++ b/drivers/net/ethernet/sgi/meth.c
@@ -28,6 +28,7 @@
#include <linux/tcp.h> /* struct tcphdr */
#include <linux/skbuff.h>
#include <linux/mii.h> /* MII definitions */
+#include <linux/crc32.h>
#include <asm/ip32/mace.h>
#include <asm/ip32/ip32_ints.h>
@@ -58,12 +59,19 @@ static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * MACE Ethernet uses a 64 element hash table based on the Ethernet CRC.
+ */
+#define METH_MCF_LIMIT 32
+
+/*
* This structure is private to each device. It is used to pass
* packets in and out, so there is place for a packet
*/
struct meth_private {
/* in-memory copy of MAC Control register */
- unsigned long mac_ctrl;
+ u64 mac_ctrl;
+
/* in-memory copy of DMA Control register */
unsigned long dma_ctrl;
/* address of PHY, used by mdio_* functions, initialized in mdio_probe */
@@ -79,6 +87,9 @@ struct meth_private {
struct sk_buff *rx_skbs[RX_RING_ENTRIES];
unsigned long rx_write;
+ /* Multicast filter. */
+ u64 mcast_filter;
+
spinlock_t meth_lock;
};
@@ -765,6 +776,40 @@ static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
+static void meth_set_rx_mode(struct net_device *dev)
+{
+ struct meth_private *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ netif_stop_queue(dev);
+ spin_lock_irqsave(&priv->meth_lock, flags);
+ priv->mac_ctrl &= ~METH_PROMISC;
+
+ if (dev->flags & IFF_PROMISC) {
+ priv->mac_ctrl |= METH_PROMISC;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else if ((netdev_mc_count(dev) > METH_MCF_LIMIT) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ priv->mac_ctrl |= METH_ACCEPT_AMCAST;
+ priv->mcast_filter = 0xffffffffffffffffUL;
+ } else {
+ struct netdev_hw_addr *ha;
+ priv->mac_ctrl |= METH_ACCEPT_MCAST;
+
+ netdev_for_each_mc_addr(ha, dev)
+ set_bit((ether_crc(ETH_ALEN, ha->addr) >> 26),
+ (volatile unsigned long *)&priv->mcast_filter);
+ }
+
+ /* Write the changes to the chip registers. */
+ mace->eth.mac_ctrl = priv->mac_ctrl;
+ mace->eth.mcast_filter = priv->mcast_filter;
+
+ /* Done! */
+ spin_unlock_irqrestore(&priv->meth_lock, flags);
+ netif_wake_queue(dev);
+}
+
static const struct net_device_ops meth_netdev_ops = {
.ndo_open = meth_open,
.ndo_stop = meth_release,
@@ -774,6 +819,7 @@ static const struct net_device_ops meth_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_rx_mode = meth_set_rx_mode,
};
/*
@@ -830,24 +876,7 @@ static struct platform_driver meth_driver = {
}
};
-static int __init meth_init_module(void)
-{
- int err;
-
- err = platform_driver_register(&meth_driver);
- if (err)
- printk(KERN_ERR "Driver registration failed\n");
-
- return err;
-}
-
-static void __exit meth_exit_module(void)
-{
- platform_driver_unregister(&meth_driver);
-}
-
-module_init(meth_init_module);
-module_exit(meth_exit_module);
+module_platform_driver(meth_driver);
MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>");
MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver");
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index 1b4658c..5b118cd 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -47,8 +47,6 @@
#define sis190_rx_skb netif_rx
#define sis190_rx_quota(count, quota) count
-#define MAC_ADDR_LEN 6
-
#define NUM_TX_DESC 64 /* [8..1024] */
#define NUM_RX_DESC 64 /* [8..8192] */
#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
@@ -1601,7 +1599,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
}
/* Get MAC address from EEPROM */
- for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+ for (i = 0; i < ETH_ALEN / 2; i++) {
u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w);
@@ -1653,7 +1651,7 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
udelay(50);
pci_read_config_byte(isa_bridge, 0x48, &reg);
- for (i = 0; i < MAC_ADDR_LEN; i++) {
+ for (i = 0; i < ETH_ALEN; i++) {
outb(0x9 + i, 0x78);
dev->dev_addr[i] = inb(0x79);
}
@@ -1692,7 +1690,7 @@ static inline void sis190_init_rxfilter(struct net_device *dev)
*/
SIS_W16(RxMacControl, ctl & ~0x0f00);
- for (i = 0; i < MAC_ADDR_LEN; i++)
+ for (i = 0; i < ETH_ALEN; i++)
SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
SIS_W16(RxMacControl, ctl);
@@ -1760,9 +1758,10 @@ static void sis190_get_drvinfo(struct net_device *dev,
{
struct sis190_private *tp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(tp->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(tp->pci_dev),
+ sizeof(info->bus_info));
}
static int sis190_get_regs_len(struct net_device *dev)
diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c
index a184abc..c8efc70 100644
--- a/drivers/net/ethernet/sis/sis900.c
+++ b/drivers/net/ethernet/sis/sis900.c
@@ -1991,9 +1991,10 @@ static void sis900_get_drvinfo(struct net_device *net_dev,
{
struct sis900_private *sis_priv = netdev_priv(net_dev);
- strcpy (info->driver, SIS900_MODULE_NAME);
- strcpy (info->version, SIS900_DRV_VERSION);
- strcpy (info->bus_info, pci_name(sis_priv->pci_dev));
+ strlcpy(info->driver, SIS900_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, SIS900_DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(sis_priv->pci_dev),
+ sizeof(info->bus_info));
}
static u32 sis900_get_msglevel(struct net_device *net_dev)
diff --git a/drivers/net/ethernet/sis/sis900.h b/drivers/net/ethernet/sis/sis900.h
index 150511a..1341f33 100644
--- a/drivers/net/ethernet/sis/sis900.h
+++ b/drivers/net/ethernet/sis/sis900.h
@@ -205,7 +205,7 @@ enum sis900_tx_buffer_status {
EXCCOLL = 0x00100000, COLCNT = 0x000F0000
};
-enum sis900_rx_bufer_status {
+enum sis900_rx_buffer_status {
OVERRUN = 0x02000000, DEST = 0x00800000, BCAST = 0x01800000,
MCAST = 0x01000000, UNIMATCH = 0x00800000, TOOLONG = 0x00400000,
RUNT = 0x00200000, RXISERR = 0x00100000, CRCERR = 0x00080000,
diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c
index 0a5dfb8..2c077ce 100644
--- a/drivers/net/ethernet/smsc/epic100.c
+++ b/drivers/net/ethernet/smsc/epic100.c
@@ -1414,9 +1414,9 @@ static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *
{
struct epic_private *np = netdev_priv(dev);
- strcpy (info->driver, DRV_NAME);
- strcpy (info->version, DRV_VERSION);
- strcpy (info->bus_info, pci_name(np->pci_dev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c
index 8f61fe9..313ba3b 100644
--- a/drivers/net/ethernet/smsc/smc911x.c
+++ b/drivers/net/ethernet/smsc/smc911x.c
@@ -2196,15 +2196,4 @@ static struct platform_driver smc911x_driver = {
},
};
-static int __init smc911x_init(void)
-{
- return platform_driver_register(&smc911x_driver);
-}
-
-static void __exit smc911x_cleanup(void)
-{
- platform_driver_unregister(&smc911x_driver);
-}
-
-module_init(smc911x_init);
-module_exit(smc911x_cleanup);
+module_platform_driver(smc911x_driver);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index cbfa981..ada927a 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -1909,8 +1909,8 @@ static int check_if_running(struct net_device *dev)
static void smc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int smc_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index f47f81e..64ad3ed 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2417,15 +2417,4 @@ static struct platform_driver smc_driver = {
},
};
-static int __init smc_init(void)
-{
- return platform_driver_register(&smc_driver);
-}
-
-static void __exit smc_cleanup(void)
-{
- platform_driver_unregister(&smc_driver);
-}
-
-module_init(smc_init);
-module_exit(smc_cleanup);
+module_platform_driver(smc_driver);
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index 8843071..24d2df0 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -44,6 +44,7 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -88,6 +89,8 @@ struct smsc911x_ops {
unsigned int *buf, unsigned int wordcount);
};
+#define SMSC911X_NUM_SUPPLIES 2
+
struct smsc911x_data {
void __iomem *ioaddr;
@@ -138,6 +141,9 @@ struct smsc911x_data {
/* register access functions */
const struct smsc911x_ops *ops;
+
+ /* regulators */
+ struct regulator_bulk_data supplies[SMSC911X_NUM_SUPPLIES];
};
/* Easy access to information */
@@ -362,6 +368,76 @@ out:
spin_unlock_irqrestore(&pdata->dev_lock, flags);
}
+/*
+ * enable resources, currently just regulators.
+ */
+static int smsc911x_enable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "failed to enable regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * disable resources, currently just regulators.
+ */
+static int smsc911x_disable_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ return ret;
+}
+
+/*
+ * Request resources, currently just regulators.
+ *
+ * The SMSC911x has two power pins: vddvario and vdd33a, in designs where
+ * these are not always-on we need to request regulators to be turned on
+ * before we can try to access the device registers.
+ */
+static int smsc911x_request_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+ int ret = 0;
+
+ /* Request regulators */
+ pdata->supplies[0].supply = "vdd33a";
+ pdata->supplies[1].supply = "vddvario";
+ ret = regulator_bulk_get(&pdev->dev,
+ ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+ if (ret)
+ netdev_err(ndev, "couldn't get regulators %d\n",
+ ret);
+ return ret;
+}
+
+/*
+ * Free resources, currently just regulators.
+ *
+ */
+static void smsc911x_free_resources(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct smsc911x_data *pdata = netdev_priv(ndev);
+
+ /* Free regulators */
+ regulator_bulk_free(ARRAY_SIZE(pdata->supplies),
+ pdata->supplies);
+}
+
/* waits for MAC not busy, with timeout. Only called by smsc911x_mac_read
* and smsc911x_mac_write, so assumes mac_lock is held */
static int smsc911x_mac_complete(struct smsc911x_data *pdata)
@@ -968,7 +1044,8 @@ static int __devinit smsc911x_mii_init(struct platform_device *pdev,
}
pdata->mii_bus->name = SMSC_MDIONAME;
- snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ snprintf(pdata->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
pdata->mii_bus->priv = pdata;
pdata->mii_bus->read = smsc911x_mii_read;
pdata->mii_bus->write = smsc911x_mii_write;
@@ -1243,10 +1320,92 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
spin_unlock(&pdata->mac_lock);
}
+static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /*
+ * If energy is detected the PHY is already awake so is not necessary
+ * to disable the energy detect power-down mode.
+ */
+ if ((rc & MII_LAN83C185_EDPWRDOWN) &&
+ !(rc & MII_LAN83C185_ENERGYON)) {
+ /* Disable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc & (~MII_LAN83C185_EDPWRDOWN));
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ mdelay(1);
+ }
+
+ return 0;
+}
+
+static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
+{
+ int rc = 0;
+
+ if (!pdata->phy_dev)
+ return rc;
+
+ rc = phy_read(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
+ return rc;
+ }
+
+ /* Only enable if energy detect mode is already disabled */
+ if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
+ mdelay(100);
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+
+ if (rc < 0) {
+ SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
+ return rc;
+ }
+
+ mdelay(1);
+ }
+ return 0;
+}
+
static int smsc911x_soft_reset(struct smsc911x_data *pdata)
{
unsigned int timeout;
unsigned int temp;
+ int ret;
+
+ /*
+ * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
+ * are initialized in a Energy Detect Power-Down mode that prevents
+ * the MAC chip to be software reseted. So we have to wakeup the PHY
+ * before.
+ */
+ if (pdata->generation == 4) {
+ ret = smsc911x_phy_disable_energy_detect(pdata);
+
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+ return ret;
+ }
+ }
/* Reset the LAN911x */
smsc911x_reg_write(pdata, HW_CFG, HW_CFG_SRST_);
@@ -1260,6 +1419,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
SMSC_WARN(pdata, drv, "Failed to complete reset");
return -EIO;
}
+
+ if (pdata->generation == 4) {
+ ret = smsc911x_phy_enable_energy_detect(pdata);
+
+ if (ret) {
+ SMSC_WARN(pdata, drv, "Failed to wakeup the PHY chip");
+ return ret;
+ }
+ }
+
return 0;
}
@@ -2092,6 +2261,9 @@ static int __devexit smsc911x_drv_remove(struct platform_device *pdev)
iounmap(pdata->ioaddr);
+ (void)smsc911x_disable_resources(pdev);
+ smsc911x_free_resources(pdev);
+
free_netdev(dev);
return 0;
@@ -2218,10 +2390,20 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
pdata->dev = dev;
pdata->msg_enable = ((1 << debug) - 1);
+ platform_set_drvdata(pdev, dev);
+
+ retval = smsc911x_request_resources(pdev);
+ if (retval)
+ goto out_return_resources;
+
+ retval = smsc911x_enable_resources(pdev);
+ if (retval)
+ goto out_disable_resources;
+
if (pdata->ioaddr == NULL) {
SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
retval = -ENOMEM;
- goto out_free_netdev_2;
+ goto out_disable_resources;
}
retval = smsc911x_probe_config_dt(&pdata->config, np);
@@ -2233,7 +2415,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe, "Error smsc911x config not found");
- goto out_unmap_io_3;
+ goto out_disable_resources;
}
/* assume standard, non-shifted, access to HW registers */
@@ -2244,7 +2426,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
retval = smsc911x_init(dev);
if (retval < 0)
- goto out_unmap_io_3;
+ goto out_disable_resources;
/* configure irq polarity and type before connecting isr */
if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
@@ -2264,15 +2446,13 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
if (retval) {
SMSC_WARN(pdata, probe,
"Unable to claim requested irq: %d", dev->irq);
- goto out_unmap_io_3;
+ goto out_free_irq;
}
- platform_set_drvdata(pdev, dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_unset_drvdata_4;
+ goto out_free_irq;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
@@ -2321,12 +2501,14 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
out_unregister_netdev_5:
unregister_netdev(dev);
-out_unset_drvdata_4:
- platform_set_drvdata(pdev, NULL);
+out_free_irq:
free_irq(dev->irq, dev);
-out_unmap_io_3:
+out_disable_resources:
+ (void)smsc911x_disable_resources(pdev);
+out_return_resources:
+ smsc911x_free_resources(pdev);
+ platform_set_drvdata(pdev, NULL);
iounmap(pdata->ioaddr);
-out_free_netdev_2:
free_netdev(dev);
out_release_io_1:
release_mem_region(res->start, resource_size(res));
diff --git a/drivers/net/ethernet/smsc/smsc911x.h b/drivers/net/ethernet/smsc/smsc911x.h
index 8d67aac..9ad5e5d 100644
--- a/drivers/net/ethernet/smsc/smsc911x.h
+++ b/drivers/net/ethernet/smsc/smsc911x.h
@@ -401,4 +401,6 @@
#include <asm/smsc911x.h>
#endif
+#include <linux/smscphy.h>
+
#endif /* __SMSC911X_H__ */
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index edb24b0..a9efbdf 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -279,9 +279,10 @@ static void smsc9420_ethtool_get_drvinfo(struct net_device *netdev,
{
struct smsc9420_pdata *pd = netdev_priv(netdev);
- strcpy(drvinfo->driver, DRV_NAME);
- strcpy(drvinfo->bus_info, pci_name(pd->pdev));
- strcpy(drvinfo->version, DRV_VERSION);
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->bus_info, pci_name(pd->pdev),
+ sizeof(drvinfo->bus_info));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
}
static u32 smsc9420_ethtool_get_msglevel(struct net_device *netdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 22745d7..0364283 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -12,11 +12,36 @@ config STMMAC_ETH
if STMMAC_ETH
+config STMMAC_PLATFORM
+ tristate "STMMAC platform bus support"
+ depends on STMMAC_ETH
+ default y
+ ---help---
+ This selects the platform specific bus support for
+ the stmmac device driver. This is the driver used
+ on many embedded STM platforms based on ARM and SuperH
+ processors.
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+config STMMAC_PCI
+ tristate "STMMAC support on PCI bus (EXPERIMENTAL)"
+ depends on STMMAC_ETH && PCI && EXPERIMENTAL
+ ---help---
+ This is to select the Synopsys DWMAC available on PCI devices,
+ if you have a controller with this interface, say Y or M here.
+
+ This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
+ D1215994A VIRTEX FPGA board.
+
+ If unsure, say N.
+
config STMMAC_DEBUG_FS
bool "Enable monitoring via sysFS "
default n
depends on STMMAC_ETH && DEBUG_FS
- -- help
+ ---help---
The stmmac entry in /sys reports DMA TX/RX rings
or (if supported) the HW cap register.
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index d7c4516..bc965ac 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,8 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
stmmac-$(CONFIG_STMMAC_RING) += ring_mode.o
stmmac-$(CONFIG_STMMAC_CHAINED) += chain_mode.o
+stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
+stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 2cc1192..0319d64 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -22,7 +22,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/etherdevice.h>
#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/module.h>
+#include <linux/init.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
#include <linux/if_vlan.h>
@@ -63,6 +67,7 @@ struct stmmac_extra_stats {
unsigned long ipc_csum_error;
unsigned long rx_collision;
unsigned long rx_crc;
+ unsigned long dribbling_bit;
unsigned long rx_length;
unsigned long rx_mii;
unsigned long rx_multicast;
@@ -315,5 +320,8 @@ extern void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
unsigned int high, unsigned int low);
extern void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low);
+
+extern void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
extern const struct stmmac_ring_mode_ops ring_mode_ops;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index e250935..f20aa12 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -238,6 +238,19 @@ void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
writel(data, ioaddr + low);
}
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CTRL_REG);
+
+ if (enable)
+ value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
+ else
+ value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
+
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
unsigned int high, unsigned int low)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index d879763..ad1b627 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,7 +201,7 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.erx.dribbling)) {
CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
- ret = discard_frame;
+ x->dribbling_bit++;
}
if (unlikely(p->des01.erx.sa_filter_fail)) {
CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 41e6b33..c07cfe9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -22,6 +22,7 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/kernel.h>
#include <linux/io.h>
#include "mmc.h"
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index fda5d2b..25953bb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -104,7 +104,7 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
ret = discard_frame;
}
if (unlikely(p->des01.rx.dribbling))
- ret = discard_frame;
+ x->dribbling_bit++;
if (unlikely(p->des01.rx.length_error)) {
x->rx_length++;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index a140a8f..b4b095f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_2011"
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+#define DRV_MODULE_VERSION "Feb_2012"
#include <linux/stmmac.h>
#include <linux/phy.h>
#include "common.h"
@@ -82,8 +83,19 @@ struct stmmac_priv {
int hw_cap_support;
};
+extern int phyaddr;
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
extern const struct stmmac_desc_ops enh_desc_ops;
extern const struct stmmac_desc_ops ndesc_ops;
+
+int stmmac_freeze(struct net_device *ndev);
+int stmmac_restore(struct net_device *ndev);
+int stmmac_resume(struct net_device *ndev);
+int stmmac_suspend(struct net_device *ndev);
+int stmmac_dvr_remove(struct net_device *ndev);
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ void __iomem *addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 0395f9e..f98e151 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -47,23 +47,25 @@ struct stmmac_stats {
offsetof(struct stmmac_priv, xstats.m)}
static const struct stmmac_stats stmmac_gstrings_stats[] = {
+ /* Transmit errors */
STMMAC_STAT(tx_underflow),
STMMAC_STAT(tx_carrier),
STMMAC_STAT(tx_losscarrier),
STMMAC_STAT(vlan_tag),
STMMAC_STAT(tx_deferred),
STMMAC_STAT(tx_vlan),
- STMMAC_STAT(rx_vlan),
STMMAC_STAT(tx_jabber),
STMMAC_STAT(tx_frame_flushed),
STMMAC_STAT(tx_payload_error),
STMMAC_STAT(tx_ip_header_error),
+ /* Receive errors */
STMMAC_STAT(rx_desc),
STMMAC_STAT(sa_filter_fail),
STMMAC_STAT(overflow_error),
STMMAC_STAT(ipc_csum_error),
STMMAC_STAT(rx_collision),
STMMAC_STAT(rx_crc),
+ STMMAC_STAT(dribbling_bit),
STMMAC_STAT(rx_length),
STMMAC_STAT(rx_mii),
STMMAC_STAT(rx_multicast),
@@ -73,6 +75,8 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(sa_rx_filter_fail),
STMMAC_STAT(rx_missed_cntr),
STMMAC_STAT(rx_overflow_cntr),
+ STMMAC_STAT(rx_vlan),
+ /* Tx/Rx IRQ errors */
STMMAC_STAT(tx_undeflow_irq),
STMMAC_STAT(tx_process_stopped_irq),
STMMAC_STAT(tx_jabber_irq),
@@ -82,6 +86,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_watchdog_irq),
STMMAC_STAT(tx_early_irq),
STMMAC_STAT(fatal_bus_error_irq),
+ /* Extra info */
STMMAC_STAT(threshold),
STMMAC_STAT(tx_pkt_n),
STMMAC_STAT(rx_pkt_n),
@@ -185,9 +190,10 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
struct stmmac_priv *priv = netdev_priv(dev);
if (priv->plat->has_gmac)
- strcpy(info->driver, GMAC_ETHTOOL_NAME);
+ strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
else
- strcpy(info->driver, MAC100_ETHTOOL_NAME);
+ strlcpy(info->driver, MAC100_ETHTOOL_NAME,
+ sizeof(info->driver));
strcpy(info->version, DRV_MODULE_VERSION);
info->fw_version[0] = '\0';
@@ -458,7 +464,7 @@ static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return 0;
}
-static struct ethtool_ops stmmac_ethtool_ops = {
+static const struct ethtool_ops stmmac_ethtool_ops = {
.begin = stmmac_check_if_running,
.get_drvinfo = stmmac_ethtool_getdrvinfo,
.get_settings = stmmac_ethtool_getsettings,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 72cd190..6ee593a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -28,12 +28,8 @@
https://bugzilla.stlinux.com/
*******************************************************************************/
-#include <linux/module.h>
-#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
@@ -52,8 +48,6 @@
#endif
#include "stmmac.h"
-#define STMMAC_RESOURCE_NAME "stmmaceth"
-
#undef STMMAC_DEBUG
/*#define STMMAC_DEBUG*/
#ifdef STMMAC_DEBUG
@@ -93,7 +87,7 @@ static int debug = -1; /* -1: default, 0: no output, 16: all */
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Message Level (0: no output, 16: all)");
-static int phyaddr = -1;
+int phyaddr = -1;
module_param(phyaddr, int, S_IRUGO);
MODULE_PARM_DESC(phyaddr, "Physical device address");
@@ -141,6 +135,11 @@ static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+static int stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(void);
+#endif
+
/**
* stmmac_verify_args - verify the driver parameters.
* Description: it verifies if some wrong parameter is passed to the driver.
@@ -242,7 +241,7 @@ static void stmmac_adjust_link(struct net_device *dev)
case 1000:
if (likely(priv->plat->has_gmac))
ctrl &= ~priv->hw->link.port;
- stmmac_hw_fix_mac_speed(priv);
+ stmmac_hw_fix_mac_speed(priv);
break;
case 100:
case 10:
@@ -308,7 +307,7 @@ static int stmmac_init_phy(struct net_device *dev)
priv->speed = 0;
priv->oldduplex = -1;
- snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x", priv->plat->bus_id);
snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
priv->plat->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -345,22 +344,6 @@ static int stmmac_init_phy(struct net_device *dev)
return 0;
}
-static inline void stmmac_enable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value |= MAC_RNABLE_RX | MAC_ENABLE_TX;
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
-static inline void stmmac_disable_mac(void __iomem *ioaddr)
-{
- u32 value = readl(ioaddr + MAC_CTRL_REG);
-
- value &= ~(MAC_ENABLE_TX | MAC_RNABLE_RX);
- writel(value, ioaddr + MAC_CTRL_REG);
-}
-
/**
* display_ring
* @p: pointer to the ring.
@@ -789,7 +772,7 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv)
dwmac_mmc_ctrl(priv->ioaddr, mode);
memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
} else
- pr_info(" No MAC Management Counters available");
+ pr_info(" No MAC Management Counters available\n");
}
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
@@ -802,7 +785,7 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
u32 uid = ((hwid & 0x0000ff00) >> 8);
u32 synid = (hwid & 0x000000ff);
- pr_info("STMMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
uid, synid);
return synid;
@@ -886,6 +869,21 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
return hw_cap;
}
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ /* verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address */
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ priv->hw->mac->get_umac_addr((void __iomem *)
+ priv->dev->base_addr,
+ priv->dev->dev_addr, 0);
+ if (!is_valid_ether_addr(priv->dev->dev_addr))
+ random_ether_addr(priv->dev->dev_addr);
+ }
+ pr_warning("%s: device MAC address %pM\n", priv->dev->name,
+ priv->dev->dev_addr);
+}
+
/**
* stmmac_open - open entry point of the driver
* @dev : pointer to the device structure.
@@ -900,17 +898,15 @@ static int stmmac_open(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
- /* Check that the MAC address is valid. If its not, refuse
- * to bring the device up. The user must specify an
- * address using the following linux command:
- * ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx */
- if (!is_valid_ether_addr(dev->dev_addr)) {
- random_ether_addr(dev->dev_addr);
- pr_warning("%s: generated random MAC address %pM\n", dev->name,
- dev->dev_addr);
- }
+ stmmac_check_ether_addr(priv);
- stmmac_verify_args();
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(dev);
+ if (ret < 0) {
+ pr_debug("%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
+ return ret;
+ }
#ifdef CONFIG_STMMAC_TIMER
priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
@@ -936,44 +932,6 @@ static int stmmac_open(struct net_device *dev)
goto open_error;
}
- stmmac_get_synopsys_id(priv);
-
- priv->hw_cap_support = stmmac_get_hw_features(priv);
-
- if (priv->hw_cap_support) {
- pr_info(" Support DMA HW capability register");
-
- /* We can override some gmac/dma configuration fields: e.g.
- * enh_desc, tx_coe (e.g. that are passed through the
- * platform) with the values from the HW capability
- * register (if supported).
- */
- priv->plat->enh_desc = priv->dma_cap.enh_desc;
- priv->plat->tx_coe = priv->dma_cap.tx_coe;
- priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
-
- /* By default disable wol on magic frame if not supported */
- if (!priv->dma_cap.pmt_magic_frame)
- priv->wolopts &= ~WAKE_MAGIC;
-
- } else
- pr_info(" No HW DMA feature register supported");
-
- /* Select the enhnaced/normal descriptor structures */
- stmmac_selec_desc_mode(priv);
-
- /* PMT module is not integrated in all the MAC devices. */
- if (priv->plat->pmt) {
- pr_info(" Remote wake-up capable\n");
- device_set_wakeup_capable(priv->device, 1);
- }
-
- priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
- if (priv->rx_coe)
- pr_info(" Checksum Offload Engine supported\n");
- if (priv->plat->tx_coe)
- pr_info(" Checksum insertion supported\n");
-
/* Create and initialize the TX/RX descriptors chains. */
priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
@@ -990,14 +948,14 @@ static int stmmac_open(struct net_device *dev)
/* Copy the MAC addr into the HW */
priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+
/* If required, perform hw setup of the bus. */
if (priv->plat->bus_setup)
priv->plat->bus_setup(priv->ioaddr);
+
/* Initialize the MAC Core */
priv->hw->mac->core_init(priv->ioaddr);
- netdev_update_features(dev);
-
/* Request the IRQ lines */
ret = request_irq(dev->irq, stmmac_interrupt,
IRQF_SHARED, dev->name, dev);
@@ -1007,8 +965,19 @@ static int stmmac_open(struct net_device *dev)
goto open_error;
}
+ /* Request the Wake IRQ in case of another line is used for WoL */
+ if (priv->wol_irq != dev->irq) {
+ ret = request_irq(priv->wol_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ pr_err("%s: ERROR: allocating the ext WoL IRQ %d "
+ "(error: %d)\n", __func__, priv->wol_irq, ret);
+ goto open_error_wolirq;
+ }
+ }
+
/* Enable the MAC Rx/Tx */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
/* Set the HW DMA mode and the COE */
stmmac_dma_operation_mode(priv);
@@ -1019,6 +988,11 @@ static int stmmac_open(struct net_device *dev)
stmmac_mmc_setup(priv);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ ret = stmmac_init_fs(dev);
+ if (ret < 0)
+ pr_warning("%s: failed debugFS registration\n", __func__);
+#endif
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
priv->hw->dma->start_tx(priv->ioaddr);
@@ -1027,6 +1001,7 @@ static int stmmac_open(struct net_device *dev)
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
+
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
priv->hw->mac->dump_regs(priv->ioaddr);
@@ -1042,6 +1017,9 @@ static int stmmac_open(struct net_device *dev)
return 0;
+open_error_wolirq:
+ free_irq(dev->irq, dev);
+
open_error:
#ifdef CONFIG_STMMAC_TIMER
kfree(priv->tm);
@@ -1082,6 +1060,8 @@ static int stmmac_release(struct net_device *dev)
/* Free the IRQ lines */
free_irq(dev->irq, dev);
+ if (priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
priv->hw->dma->stop_tx(priv->ioaddr);
@@ -1091,10 +1071,15 @@ static int stmmac_release(struct net_device *dev)
free_dma_desc_resources(priv);
/* Disable the MAC Rx/Tx */
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(dev);
+#ifdef CONFIG_STMMAC_DEBUG_FS
+ stmmac_exit_fs();
+#endif
+ stmmac_mdio_unregister(dev);
+
return 0;
}
@@ -1470,7 +1455,8 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 stmmac_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct stmmac_priv *priv = netdev_priv(dev);
@@ -1738,277 +1724,176 @@ static const struct net_device_ops stmmac_netdev_ops = {
};
/**
- * stmmac_probe - Initialization of the adapter .
- * @dev : device pointer
- * Description: The function initializes the network device structure for
- * the STMMAC driver. It also calls the low level routines
- * in order to init the HW (i.e. the DMA engine)
+ * stmmac_hw_init - Init the MAC device
+ * @priv : pointer to the private device structure.
+ * Description: this function detects which MAC device
+ * (GMAC/MAC10-100) has to attached, checks the HW capability
+ * (if supported) and sets the driver's features (for example
+ * to use the ring or chaine mode or support the normal/enh
+ * descriptor structure).
*/
-static int stmmac_probe(struct net_device *dev)
+static int stmmac_hw_init(struct stmmac_priv *priv)
{
int ret = 0;
- struct stmmac_priv *priv = netdev_priv(dev);
+ struct mac_device_info *mac;
- ether_setup(dev);
+ /* Identify the MAC HW device */
+ if (priv->plat->has_gmac)
+ mac = dwmac1000_setup(priv->ioaddr);
+ else
+ mac = dwmac100_setup(priv->ioaddr);
+ if (!mac)
+ return -ENOMEM;
- dev->netdev_ops = &stmmac_netdev_ops;
- stmmac_set_ethtool_ops(dev);
+ priv->hw = mac;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
- dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
- dev->watchdog_timeo = msecs_to_jiffies(watchdog);
-#ifdef STMMAC_VLAN_TAG_USED
- /* Both mac100 and gmac support receive VLAN tag detection */
- dev->features |= NETIF_F_HW_VLAN_RX;
-#endif
- priv->msg_enable = netif_msg_init(debug, default_msg_level);
+ /* To use the chained or ring mode */
+ priv->hw->ring = &ring_mode_ops;
- if (flow_ctrl)
- priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+ /* Get and dump the chip ID */
+ stmmac_get_synopsys_id(priv);
- priv->pause = pause;
- netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
+ /* Get the HW capability (new GMAC newer than 3.50a) */
+ priv->hw_cap_support = stmmac_get_hw_features(priv);
+ if (priv->hw_cap_support) {
+ pr_info(" DMA HW capability register supported");
- /* Get the MAC address */
- priv->hw->mac->get_umac_addr((void __iomem *) dev->base_addr,
- dev->dev_addr, 0);
+ /* We can override some gmac/dma configuration fields: e.g.
+ * enh_desc, tx_coe (e.g. that are passed through the
+ * platform) with the values from the HW capability
+ * register (if supported).
+ */
+ priv->plat->enh_desc = priv->dma_cap.enh_desc;
+ priv->plat->tx_coe = priv->dma_cap.tx_coe;
+ priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
+ } else
+ pr_info(" No HW DMA feature register supported");
- if (!is_valid_ether_addr(dev->dev_addr))
- pr_warning("\tno valid MAC address;"
- "please, use ifconfig or nwhwconfig!\n");
+ /* Select the enhnaced/normal descriptor structures */
+ stmmac_selec_desc_mode(priv);
- spin_lock_init(&priv->lock);
- spin_lock_init(&priv->tx_lock);
+ priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
+ if (priv->rx_coe)
+ pr_info(" RX Checksum Offload Engine supported\n");
+ if (priv->plat->tx_coe)
+ pr_info(" TX Checksum insertion supported\n");
- ret = register_netdev(dev);
- if (ret) {
- pr_err("%s: ERROR %i registering the device\n",
- __func__, ret);
- return -ENODEV;
+ if (priv->plat->pmt) {
+ pr_info(" Wake-Up On Lan supported\n");
+ device_set_wakeup_capable(priv->device, 1);
}
- DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
- dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
- (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
-
return ret;
}
/**
- * stmmac_mac_device_setup
- * @dev : device pointer
- * Description: select and initialise the mac device (mac100 or Gmac).
- */
-static int stmmac_mac_device_setup(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
-
- struct mac_device_info *device;
-
- if (priv->plat->has_gmac) {
- dev->priv_flags |= IFF_UNICAST_FLT;
- device = dwmac1000_setup(priv->ioaddr);
- } else {
- device = dwmac100_setup(priv->ioaddr);
- }
-
- if (!device)
- return -ENOMEM;
-
- priv->hw = device;
- priv->hw->ring = &ring_mode_ops;
-
- if (device_can_wakeup(priv->device)) {
- priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
- enable_irq_wake(priv->wol_irq);
- }
-
- return 0;
-}
-
-/**
* stmmac_dvr_probe
- * @pdev: platform device pointer
- * Description: the driver is initialized through platform_device.
+ * @device: device pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
*/
-static int stmmac_dvr_probe(struct platform_device *pdev)
+struct stmmac_priv *stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ void __iomem *addr)
{
int ret = 0;
- struct resource *res;
- void __iomem *addr = NULL;
struct net_device *ndev = NULL;
- struct stmmac_priv *priv = NULL;
- struct plat_stmmacenet_data *plat_dat;
-
- pr_info("STMMAC driver:\n\tplatform registration... ");
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res)
- return -ENODEV;
- pr_info("\tdone!\n");
-
- if (!request_mem_region(res->start, resource_size(res),
- pdev->name)) {
- pr_err("%s: ERROR: memory allocation failed"
- "cannot get the I/O addr 0x%x\n",
- __func__, (unsigned int)res->start);
- return -EBUSY;
- }
-
- addr = ioremap(res->start, resource_size(res));
- if (!addr) {
- pr_err("%s: ERROR: memory mapping failed\n", __func__);
- ret = -ENOMEM;
- goto out_release_region;
- }
+ struct stmmac_priv *priv;
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev) {
pr_err("%s: ERROR: allocating the device\n", __func__);
- ret = -ENOMEM;
- goto out_unmap;
+ return NULL;
}
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- /* Get the MAC information */
- ndev->irq = platform_get_irq_byname(pdev, "macirq");
- if (ndev->irq == -ENXIO) {
- pr_err("%s: ERROR: MAC IRQ configuration "
- "information not found\n", __func__);
- ret = -ENXIO;
- goto out_free_ndev;
- }
+ SET_NETDEV_DEV(ndev, device);
priv = netdev_priv(ndev);
- priv->device = &(pdev->dev);
+ priv->device = device;
priv->dev = ndev;
- plat_dat = pdev->dev.platform_data;
- priv->plat = plat_dat;
+ ether_setup(ndev);
+ stmmac_set_ethtool_ops(ndev);
+ priv->pause = pause;
+ priv->plat = plat_dat;
priv->ioaddr = addr;
+ priv->dev->base_addr = (unsigned long)addr;
- /*
- * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
- * The external wake up irq can be passed through the platform code
- * named as "eth_wake_irq"
- *
- * In case the wake up interrupt is not passed from the platform
- * so the driver will continue to use the mac irq (ndev->irq)
- */
- priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq == -ENXIO)
- priv->wol_irq = ndev->irq;
-
- platform_set_drvdata(pdev, ndev);
-
- /* Set the I/O base addr */
- ndev->base_addr = (unsigned long)addr;
-
- /* Custom initialisation */
- if (priv->plat->init) {
- ret = priv->plat->init(pdev);
- if (unlikely(ret))
- goto out_free_ndev;
- }
-
- /* MAC HW device detection */
- ret = stmmac_mac_device_setup(ndev);
- if (ret < 0)
- goto out_plat_exit;
-
- /* Network Device Registration */
- ret = stmmac_probe(ndev);
- if (ret < 0)
- goto out_plat_exit;
+ /* Verify driver arguments */
+ stmmac_verify_args();
/* Override with kernel parameters if supplied XXX CRS XXX
* this needs to have multiple instances */
if ((phyaddr >= 0) && (phyaddr <= 31))
priv->plat->phy_addr = phyaddr;
- pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
- "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
- pdev->id, ndev->irq, addr);
+ /* Init MAC and get the capabilities */
+ stmmac_hw_init(priv);
- /* MDIO bus Registration */
- pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
- ret = stmmac_mdio_register(ndev);
- if (ret < 0)
- goto out_unregister;
- pr_debug("registered!\n");
+ ndev->netdev_ops = &stmmac_netdev_ops;
-#ifdef CONFIG_STMMAC_DEBUG_FS
- ret = stmmac_init_fs(ndev);
- if (ret < 0)
- pr_warning("\tFailed debugFS registration");
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ ndev->features |= NETIF_F_HW_VLAN_RX;
#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
- return 0;
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->tx_lock);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ pr_err("%s: ERROR %i registering the device\n", __func__, ret);
+ goto error;
+ }
+
+ return priv;
+
+error:
+ netif_napi_del(&priv->napi);
-out_unregister:
unregister_netdev(ndev);
-out_plat_exit:
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-out_free_ndev:
free_netdev(ndev);
- platform_set_drvdata(pdev, NULL);
-out_unmap:
- iounmap(addr);
-out_release_region:
- release_mem_region(res->start, resource_size(res));
- return ret;
+ return NULL;
}
/**
* stmmac_dvr_remove
- * @pdev: platform device pointer
+ * @ndev: net device pointer
* Description: this function resets the TX/RX processes, disables the MAC RX/TX
- * changes the link status, releases the DMA descriptor rings,
- * unregisters the MDIO bus and unmaps the allocated memory.
+ * changes the link status, releases the DMA descriptor rings.
*/
-static int stmmac_dvr_remove(struct platform_device *pdev)
+int stmmac_dvr_remove(struct net_device *ndev)
{
- struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *priv = netdev_priv(ndev);
- struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
priv->hw->dma->stop_rx(priv->ioaddr);
priv->hw->dma->stop_tx(priv->ioaddr);
- stmmac_disable_mac(priv->ioaddr);
-
+ stmmac_set_mac(priv->ioaddr, false);
netif_carrier_off(ndev);
-
- stmmac_mdio_unregister(ndev);
-
- if (priv->plat->exit)
- priv->plat->exit(pdev);
-
- platform_set_drvdata(pdev, NULL);
unregister_netdev(ndev);
-
- iounmap((void *)priv->ioaddr);
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
-#ifdef CONFIG_STMMAC_DEBUG_FS
- stmmac_exit_fs();
-#endif
-
free_netdev(ndev);
return 0;
}
#ifdef CONFIG_PM
-static int stmmac_suspend(struct device *dev)
+int stmmac_suspend(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
int dis_ic = 0;
@@ -2042,15 +1927,14 @@ static int stmmac_suspend(struct device *dev)
if (device_may_wakeup(priv->device))
priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
else
- stmmac_disable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, false);
spin_unlock(&priv->lock);
return 0;
}
-static int stmmac_resume(struct device *dev)
+int stmmac_resume(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
struct stmmac_priv *priv = netdev_priv(ndev);
if (!netif_running(ndev))
@@ -2069,7 +1953,7 @@ static int stmmac_resume(struct device *dev)
netif_device_attach(ndev);
/* Enable the MAC and DMA */
- stmmac_enable_mac(priv->ioaddr);
+ stmmac_set_mac(priv->ioaddr, true);
priv->hw->dma->start_tx(priv->ioaddr);
priv->hw->dma->start_rx(priv->ioaddr);
@@ -2089,68 +1973,23 @@ static int stmmac_resume(struct device *dev)
return 0;
}
-static int stmmac_freeze(struct device *dev)
+int stmmac_freeze(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_release(ndev);
}
-static int stmmac_restore(struct device *dev)
+int stmmac_restore(struct net_device *ndev)
{
- struct net_device *ndev = dev_get_drvdata(dev);
-
if (!ndev || !netif_running(ndev))
return 0;
return stmmac_open(ndev);
}
-
-static const struct dev_pm_ops stmmac_pm_ops = {
- .suspend = stmmac_suspend,
- .resume = stmmac_resume,
- .freeze = stmmac_freeze,
- .thaw = stmmac_restore,
- .restore = stmmac_restore,
-};
-#else
-static const struct dev_pm_ops stmmac_pm_ops;
#endif /* CONFIG_PM */
-static struct platform_driver stmmac_driver = {
- .probe = stmmac_dvr_probe,
- .remove = stmmac_dvr_remove,
- .driver = {
- .name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
- .pm = &stmmac_pm_ops,
- },
-};
-
-/**
- * stmmac_init_module - Entry point for the driver
- * Description: This function is the entry point for the driver.
- */
-static int __init stmmac_init_module(void)
-{
- int ret;
-
- ret = platform_driver_register(&stmmac_driver);
- return ret;
-}
-
-/**
- * stmmac_cleanup_module - Cleanup routine for the driver
- * Description: This function is the cleanup routine for the driver.
- */
-static void __exit stmmac_cleanup_module(void)
-{
- platform_driver_unregister(&stmmac_driver);
-}
-
#ifndef MODULE
static int __init stmmac_cmdline_opt(char *str)
{
@@ -2210,9 +2049,6 @@ err:
__setup("stmmaceth=", stmmac_cmdline_opt);
#endif
-module_init(stmmac_init_module);
-module_exit(stmmac_cleanup_module);
-
-MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet driver");
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index 9c3b9d5..7319532 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -109,6 +109,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
*/
static int stmmac_mdio_reset(struct mii_bus *bus)
{
+#if defined(CONFIG_STMMAC_PLATFORM)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned int mii_address = priv->hw->mii.addr;
@@ -123,7 +124,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
* on MDC, so perform a dummy mdio read.
*/
writel(0, priv->ioaddr + mii_address);
-
+#endif
return 0;
}
@@ -153,11 +154,12 @@ int stmmac_mdio_register(struct net_device *ndev)
else
irqlist = priv->mii_irq;
- new_bus->name = "STMMAC MII Bus";
+ new_bus->name = "stmmac";
new_bus->read = &stmmac_mdio_read;
new_bus->write = &stmmac_mdio_write;
new_bus->reset = &stmmac_mdio_reset;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", mdio_bus_data->bus_id);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ new_bus->name, mdio_bus_data->bus_id);
new_bus->priv = ndev;
new_bus->irq = irqlist;
new_bus->phy_mask = mdio_bus_data->phy_mask;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644
index 0000000..50ad5b8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -0,0 +1,219 @@
+/*******************************************************************************
+ This contains the functions to handle the pci driver.
+
+ Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include "stmmac.h"
+
+struct plat_stmmacenet_data plat_dat;
+struct stmmac_mdio_bus_data mdio_data;
+
+static void stmmac_default_data(void)
+{
+ memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
+ plat_dat.bus_id = 1;
+ plat_dat.phy_addr = 0;
+ plat_dat.interface = PHY_INTERFACE_MODE_GMII;
+ plat_dat.pbl = 32;
+ plat_dat.clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat_dat.has_gmac = 1;
+ plat_dat.force_sf_dma_mode = 1;
+
+ mdio_data.bus_id = 1;
+ mdio_data.phy_reset = NULL;
+ mdio_data.phy_mask = 0;
+ plat_dat.mdio_bus_data = &mdio_data;
+}
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int __devinit stmmac_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int ret = 0;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ int i;
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ pr_err("%s : ERROR: failed to enable %s device\n", __func__,
+ pci_name(pdev));
+ return ret;
+ }
+ if (pci_request_regions(pdev, STMMAC_RESOURCE_NAME)) {
+ pr_err("%s: ERROR: failed to get PCI region\n", __func__);
+ ret = -ENODEV;
+ goto err_out_req_reg_failed;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i <= 5; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ addr = pci_iomap(pdev, i, 0);
+ if (addr == NULL) {
+ pr_err("%s: ERROR: cannot map regiser memory, aborting",
+ __func__);
+ ret = -EIO;
+ goto err_out_map_failed;
+ }
+ break;
+ }
+ pci_set_master(pdev);
+
+ stmmac_default_data();
+
+ priv = stmmac_dvr_probe(&(pdev->dev), &plat_dat, addr);
+ if (!priv) {
+ pr_err("%s: main driver probe failed", __func__);
+ goto err_out;
+ }
+ priv->dev->irq = pdev->irq;
+ priv->wol_irq = pdev->irq;
+
+ pci_set_drvdata(pdev, priv->dev);
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+err_out:
+ pci_clear_master(pdev);
+err_out_map_failed:
+ pci_release_regions(pdev);
+err_out_req_reg_failed:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+/**
+ * stmmac_dvr_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void __devexit stmmac_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_dvr_remove(ndev);
+
+ pci_set_drvdata(pdev, NULL);
+ pci_iounmap(pdev, priv->ioaddr);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = stmmac_suspend(ndev);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return ret;
+}
+
+static int stmmac_pci_resume(struct pci_dev *pdev)
+{
+ struct net_device *ndev = pci_get_drvdata(pdev);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ return stmmac_resume(ndev);
+}
+#endif
+
+#define STMMAC_VENDOR_ID 0x700
+#define STMMAC_DEVICE_ID 0x1108
+
+static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
+ {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
+ {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .id_table = stmmac_id_table,
+ .probe = stmmac_pci_probe,
+ .remove = __devexit_p(stmmac_pci_remove),
+#ifdef CONFIG_PM
+ .suspend = stmmac_pci_suspend,
+ .resume = stmmac_pci_resume,
+#endif
+};
+
+/**
+ * stmmac_init_module - Entry point for the driver
+ * Description: This function is the entry point for the driver.
+ */
+static int __init stmmac_init_module(void)
+{
+ int ret;
+
+ ret = pci_register_driver(&stmmac_driver);
+ if (ret < 0)
+ pr_err("%s: ERROR: driver registration failed\n", __func__);
+
+ return ret;
+}
+
+/**
+ * stmmac_cleanup_module - Cleanup routine for the driver
+ * Description: This function is the cleanup routine for the driver.
+ */
+static void __exit stmmac_cleanup_module(void)
+{
+ pci_unregister_driver(&stmmac_driver);
+}
+
+module_init(stmmac_init_module);
+module_exit(stmmac_cleanup_module);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644
index 0000000..3aad981
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -0,0 +1,195 @@
+/*******************************************************************************
+ This contains the functions to handle the platform driver.
+
+ Copyright (C) 2007-2011 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include "stmmac.h"
+
+/**
+ * stmmac_pltfr_probe
+ * @pdev: platform device pointer
+ * Description: platform_device probe function. It allocates
+ * the necessary resources and invokes the main to init
+ * the net device, register the mdio bus etc.
+ */
+static int stmmac_pltfr_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct resource *res;
+ void __iomem *addr = NULL;
+ struct stmmac_priv *priv = NULL;
+ struct plat_stmmacenet_data *plat_dat;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+
+ if (!request_mem_region(res->start, resource_size(res), pdev->name)) {
+ pr_err("%s: ERROR: memory allocation failed"
+ "cannot get the I/O addr 0x%x\n",
+ __func__, (unsigned int)res->start);
+ return -EBUSY;
+ }
+
+ addr = ioremap(res->start, resource_size(res));
+ if (!addr) {
+ pr_err("%s: ERROR: memory mapping failed", __func__);
+ ret = -ENOMEM;
+ goto out_release_region;
+ }
+ plat_dat = pdev->dev.platform_data;
+
+ /* Custom initialisation (if needed)*/
+ if (plat_dat->init) {
+ ret = plat_dat->init(pdev);
+ if (unlikely(ret))
+ goto out_unmap;
+ }
+
+ priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr);
+ if (!priv) {
+ pr_err("%s: main driver probe failed", __func__);
+ goto out_unmap;
+ }
+
+ /* Get the MAC information */
+ priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
+ if (priv->dev->irq == -ENXIO) {
+ pr_err("%s: ERROR: MAC IRQ configuration "
+ "information not found\n", __func__);
+ ret = -ENXIO;
+ goto out_unmap;
+ }
+
+ /*
+ * On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
+ if (priv->wol_irq == -ENXIO)
+ priv->wol_irq = priv->dev->irq;
+
+ platform_set_drvdata(pdev, priv->dev);
+
+ pr_debug("STMMAC platform driver registration completed");
+
+ return 0;
+
+out_unmap:
+ iounmap(addr);
+ platform_set_drvdata(pdev, NULL);
+
+out_release_region:
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+static int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct resource *res;
+ int ret = stmmac_dvr_remove(ndev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ if (priv->plat->exit)
+ priv->plat->exit(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ iounmap((void *)priv->ioaddr);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ release_mem_region(res->start, resource_size(res));
+
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int stmmac_pltfr_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_suspend(ndev);
+}
+
+static int stmmac_pltfr_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_resume(ndev);
+}
+
+int stmmac_pltfr_freeze(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_freeze(ndev);
+}
+
+int stmmac_pltfr_restore(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ return stmmac_restore(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ .suspend = stmmac_pltfr_suspend,
+ .resume = stmmac_pltfr_resume,
+ .freeze = stmmac_pltfr_freeze,
+ .thaw = stmmac_pltfr_restore,
+ .restore = stmmac_pltfr_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pltfr_pm_ops;
+#endif /* CONFIG_PM */
+
+static struct platform_driver stmmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .owner = THIS_MODULE,
+ .pm = &stmmac_pltfr_pm_ops,
+ },
+};
+
+module_platform_driver(stmmac_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index fd40988..f10665f 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -4532,10 +4532,9 @@ static void cas_set_multicast(struct net_device *dev)
static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct cas *cp = netdev_priv(dev);
- strncpy(info->driver, DRV_MODULE_NAME, ETHTOOL_BUSINFO_LEN);
- strncpy(info->version, DRV_MODULE_VERSION, ETHTOOL_BUSINFO_LEN);
- info->fw_version[0] = '\0';
- strncpy(info->bus_info, pci_name(cp->pdev), ETHTOOL_BUSINFO_LEN);
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
info->regdump_len = cp->casreg_len < CAS_MAX_REGS ?
cp->casreg_len : CAS_MAX_REGS;
info->n_stats = CAS_NUM_STAT_KEYS;
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 73c7081..cf43393 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -1151,19 +1151,8 @@ static int link_status_mii(struct niu *np, int *link_up_p)
supported |= SUPPORTED_1000baseT_Full;
lp->supported = supported;
- advertising = 0;
- if (advert & ADVERTISE_10HALF)
- advertising |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- advertising |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- advertising |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- advertising |= ADVERTISED_100baseT_Full;
- if (ctrl1000 & ADVERTISE_1000HALF)
- advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- advertising |= ADVERTISED_1000baseT_Full;
+ advertising = mii_adv_to_ethtool_adv_t(advert);
+ advertising |= mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmcr & BMCR_ANENABLE) {
int neg, neg1000;
@@ -3609,6 +3598,7 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
{
struct netdev_queue *txq;
+ unsigned int tx_bytes;
u16 pkt_cnt, tmp;
int cons, index;
u64 cs;
@@ -3631,12 +3621,18 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
netif_printk(np, tx_done, KERN_DEBUG, np->dev,
"%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
- while (pkt_cnt--)
+ tx_bytes = 0;
+ tmp = pkt_cnt;
+ while (tmp--) {
+ tx_bytes += rp->tx_buffs[cons].skb->len;
cons = release_tx_packet(np, rp, cons);
+ }
rp->cons = cons;
smp_mb();
+ netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
+
out:
if (unlikely(netif_tx_queue_stopped(txq) &&
(niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4337,6 +4333,7 @@ static void niu_free_channels(struct niu *np)
struct tx_ring_info *rp = &np->tx_rings[i];
niu_free_tx_ring_info(np, rp);
+ netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
}
kfree(np->tx_rings);
np->tx_rings = NULL;
@@ -6742,6 +6739,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
prod = NEXT_TX(rp, prod);
}
+ netdev_tx_sent_queue(txq, skb->len);
+
if (prod < rp->prod)
rp->wrap_bit ^= TX_RING_KICK_WRAP;
rp->prod = prod;
@@ -6823,12 +6822,13 @@ static void niu_get_drvinfo(struct net_device *dev,
struct niu *np = netdev_priv(dev);
struct niu_vpd *vpd = &np->vpd;
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
- sprintf(info->fw_version, "%d.%d",
+ strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
+ snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d",
vpd->fcode_major, vpd->fcode_minor);
if (np->parent->plat_type != PLAT_TYPE_NIU)
- strcpy(info->bus_info, pci_name(np->pdev));
+ strlcpy(info->bus_info, pci_name(np->pdev),
+ sizeof(info->bus_info));
}
static int niu_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -8589,9 +8589,11 @@ static int __devinit phy_record(struct niu_parent *parent,
if (dev_id_1 < 0 || dev_id_2 < 0)
return 0;
if (type == PHY_TYPE_PMA_PMD || type == PHY_TYPE_PCS) {
+ /* Because of the NIU_PHY_ID_MASK being applied, the 8704
+ * test covers the 8706 as well.
+ */
if (((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8704) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011) &&
- ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM8706))
+ ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_MRVL88X2011))
return 0;
} else {
if ((id & NIU_PHY_ID_MASK) != NIU_PHY_ID_BCM5464R)
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c
index 0d8cfd9..220f724 100644
--- a/drivers/net/ethernet/sun/sunbmac.c
+++ b/drivers/net/ethernet/sun/sunbmac.c
@@ -1293,15 +1293,4 @@ static struct platform_driver bigmac_sbus_driver = {
.remove = __devexit_p(bigmac_sbus_remove),
};
-static int __init bigmac_init(void)
-{
- return platform_driver_register(&bigmac_sbus_driver);
-}
-
-static void __exit bigmac_exit(void)
-{
- platform_driver_unregister(&bigmac_sbus_driver);
-}
-
-module_init(bigmac_init);
-module_exit(bigmac_exit);
+module_platform_driver(bigmac_sbus_driver);
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index ceab215..31441a8 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -2517,9 +2517,9 @@ static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct gem *gp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(gp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info));
}
static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index cf14ab9..09c5186 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -2457,11 +2457,11 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct happy_meal *hp = netdev_priv(dev);
- strcpy(info->driver, "sunhme");
- strcpy(info->version, "2.02");
+ strlcpy(info->driver, "sunhme", sizeof(info->driver));
+ strlcpy(info->version, "2.02", sizeof(info->version));
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
- strcpy(info->bus_info, pci_name(pdev));
+ strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info));
}
#ifdef CONFIG_SBUS
else {
@@ -2469,7 +2469,8 @@ static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
struct platform_device *op = hp->happy_dev;
regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
- sprintf(info->bus_info, "SBUS:%d",
+ snprintf(info->bus_info, sizeof(info->bus_info),
+ "SBUS:%d",
regs->which_io);
}
#endif
@@ -2849,7 +2850,7 @@ err_out:
static int is_quattro_p(struct pci_dev *pdev)
{
struct pci_dev *busdev = pdev->bus->self;
- struct list_head *tmp;
+ struct pci_dev *this_pdev;
int n_hmes;
if (busdev == NULL ||
@@ -2858,15 +2859,10 @@ static int is_quattro_p(struct pci_dev *pdev)
return 0;
n_hmes = 0;
- tmp = pdev->bus->devices.next;
- while (tmp != &pdev->bus->devices) {
- struct pci_dev *this_pdev = pci_dev_b(tmp);
-
+ list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) {
if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
n_hmes++;
-
- tmp = tmp->next;
}
if (n_hmes != 4)
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 3a90af6..4b19e9b 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -727,9 +727,10 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
* @ndev network device
* @vid VLAN vid to add
*/
-static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
+static int bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
{
__bdx_vlan_rx_vid(ndev, vid, 1);
+ return 0;
}
/*
@@ -737,9 +738,10 @@ static void bdx_vlan_rx_add_vid(struct net_device *ndev, uint16_t vid)
* @ndev network device
* @vid VLAN vid to kill
*/
-static void bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
+static int bdx_vlan_rx_kill_vid(struct net_device *ndev, unsigned short vid)
{
__bdx_vlan_rx_vid(ndev, vid, 0);
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index aaac0c7..cbc8df7 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1122,7 +1122,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (external_switch || dumb_switch) {
- strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
} else {
for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
@@ -1138,7 +1138,7 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
if (phy_id == PHY_MAX_ADDR) {
dev_err(&pdev->dev, "no PHY present, falling back "
"to switch on MDIO bus 0\n");
- strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
}
@@ -1269,7 +1269,7 @@ int __devinit cpmac_init(void)
}
cpmac_mii->phy_mask = ~(mask | 0x80000000);
- snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1");
+ snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
res = mdiobus_register(cpmac_mii);
if (res)
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 815c797..4b2f545 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -115,6 +115,7 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
#define EMAC_DEF_RX_NUM_DESC (128)
+#define EMAC_DEF_TX_NUM_DESC (128)
#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
#define EMAC_POLL_WEIGHT (64) /* Default NAPI poll weight */
@@ -336,6 +337,7 @@ struct emac_priv {
u32 mac_hash2;
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
+ atomic_t cur_tx;
const char *phy_id;
struct phy_device *phydev;
spinlock_t lock;
@@ -1007,7 +1009,7 @@ static void emac_rx_handler(void *token, int len, int status)
int ret;
/* free and bail if we are shutting down */
- if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
+ if (unlikely(!netif_running(ndev))) {
dev_kfree_skb_any(skb);
return;
}
@@ -1036,7 +1038,9 @@ static void emac_rx_handler(void *token, int len, int status)
recycle:
ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
skb_tailroom(skb), GFP_KERNEL);
- if (WARN_ON(ret < 0))
+
+ WARN_ON(ret == -ENOMEM);
+ if (unlikely(ret < 0))
dev_kfree_skb_any(skb);
}
@@ -1044,6 +1048,9 @@ static void emac_tx_handler(void *token, int len, int status)
{
struct sk_buff *skb = token;
struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ atomic_dec(&priv->cur_tx);
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
@@ -1092,6 +1099,9 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
goto fail_tx;
}
+ if (atomic_inc_return(&priv->cur_tx) >= EMAC_DEF_TX_NUM_DESC)
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
fail_tx:
@@ -1592,8 +1602,9 @@ static int emac_dev_open(struct net_device *ndev)
if (IS_ERR(priv->phydev)) {
dev_err(emac_dev, "could not connect to phy %s\n",
priv->phy_id);
+ ret = PTR_ERR(priv->phydev);
priv->phydev = NULL;
- return PTR_ERR(priv->phydev);
+ return ret;
}
priv->link = 0;
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 7615040..af8b8fc 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -313,13 +313,14 @@ static int __devinit davinci_mdio_probe(struct platform_device *pdev)
data->bus->reset = davinci_mdio_reset,
data->bus->parent = dev;
data->bus->priv = data;
- snprintf(data->bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
data->clk = clk_get(dev, NULL);
if (IS_ERR(data->clk)) {
- data->clk = NULL;
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
+ data->clk = NULL;
goto bail_out;
}
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 1187a11..d9951af 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -1256,7 +1256,7 @@ static void tile_net_stop_aux(struct net_device *dev)
sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
panic("Failed to stop LIPP/LEPP!\n");
- priv->partly_opened = 0;
+ priv->partly_opened = false;
}
@@ -1507,7 +1507,7 @@ static int tile_net_open(struct net_device *dev)
priv->network_cpus_count, priv->network_cpus_credits);
#endif
- priv->partly_opened = 1;
+ priv->partly_opened = true;
} else {
/* FIXME: Is this possible? */
@@ -2260,8 +2260,7 @@ static int tile_net_get_mac(struct net_device *dev)
return 0;
}
-
-static struct net_device_ops tile_net_ops = {
+static const struct net_device_ops tile_net_ops = {
.ndo_open = tile_net_open,
.ndo_stop = tile_net_stop,
.ndo_start_xmit = tile_net_tx,
diff --git a/drivers/net/ethernet/toshiba/Kconfig b/drivers/net/ethernet/toshiba/Kconfig
index 0517647..74acb5c 100644
--- a/drivers/net/ethernet/toshiba/Kconfig
+++ b/drivers/net/ethernet/toshiba/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_TOSHIBA
bool "Toshiba devices"
default y
- depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB) || PPC_PS3
+ depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB || MIPS) || PPC_PS3
---help---
If you have a network (Ethernet) card belonging to this class, say Y
and read the Ethernet-HOWTO, available from
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 7bf1e20..5ee82a7 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -640,7 +640,7 @@ static inline void gelic_card_disable_rxdmac(struct gelic_card *card)
int status;
/* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card), 0);
+ status = lv1_net_stop_rx_dma(bus_id(card), dev_id(card));
if (status)
dev_err(ctodev(card),
"lv1_net_stop_rx_dma failed, %d\n", status);
@@ -658,7 +658,7 @@ static inline void gelic_card_disable_txdmac(struct gelic_card *card)
int status;
/* this hvc blocks until the DMA in progress really stopped */
- status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card), 0);
+ status = lv1_net_stop_tx_dma(bus_id(card), dev_id(card));
if (status)
dev_err(ctodev(card),
"lv1_net_stop_tx_dma failed, status=%d\n", status);
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index a8df7ec..164fb77 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -1604,7 +1604,7 @@ tsi108_init_one(struct platform_device *pdev)
data->phyregs = ioremap(einfo->phyregs, 0x400);
if (NULL == data->phyregs) {
err = -ENOMEM;
- goto regs_fail;
+ goto phyregs_fail;
}
/* MII setup */
data->mii_if.dev = dev;
@@ -1663,9 +1663,11 @@ tsi108_init_one(struct platform_device *pdev)
return 0;
register_fail:
- iounmap(data->regs);
iounmap(data->phyregs);
+phyregs_fail:
+ iounmap(data->regs);
+
regs_fail:
free_netdev(dev);
return err;
@@ -1688,18 +1690,6 @@ static void tsi108_timed_checker(unsigned long dev_ptr)
mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);
}
-static int tsi108_ether_init(void)
-{
- int ret;
- ret = platform_driver_register (&tsi_eth_driver);
- if (ret < 0){
- printk("tsi108_ether_init: error initializing ethernet "
- "device\n");
- return ret;
- }
- return 0;
-}
-
static int tsi108_ether_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
@@ -1714,13 +1704,7 @@ static int tsi108_ether_remove(struct platform_device *pdev)
return 0;
}
-static void tsi108_ether_exit(void)
-{
- platform_driver_unregister(&tsi_eth_driver);
-}
-
-module_init(tsi108_ether_init);
-module_exit(tsi108_ether_exit);
+module_platform_driver(tsi_eth_driver);
MODULE_AUTHOR("Tundra Semiconductor Corporation");
MODULE_DESCRIPTION("Tsi108 Gigabit Ethernet driver");
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f34dd99..10b18eb 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -35,13 +35,13 @@
#define DRV_VERSION "1.5.0"
#define DRV_RELDATE "2010-10-09"
+#include <linux/types.h>
/* A few user-configurable values.
These may be modified when a driver module is loaded. */
-
-#define DEBUG
-static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
-static int max_interrupt_work = 20;
+static int debug = 0;
+#define RHINE_MSG_DEFAULT \
+ (0x0000)
/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
Setting to > 1518 effectively disables this feature. */
@@ -55,7 +55,7 @@ static int rx_copybreak;
/* Work-around for broken BIOSes: they are unable to get the chip back out of
power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
-static int avoid_D3;
+static bool avoid_D3;
/*
* In case you are looking for 'options[]' or 'full_duplex[]', they
@@ -127,12 +127,10 @@ MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
-module_param(max_interrupt_work, int, 0);
module_param(debug, int, 0);
module_param(rx_copybreak, int, 0);
module_param(avoid_D3, bool, 0);
-MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
-MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
@@ -350,16 +348,25 @@ static const int mmio_verify_registers[] = {
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
- IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
- IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
- IntrPCIErr=0x0040,
- IntrStatsMax=0x0080, IntrRxEarly=0x0100,
- IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
- IntrTxAborted=0x2000, IntrLinkChange=0x4000,
- IntrRxWakeUp=0x8000,
- IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
- IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
- IntrTxErrSummary=0x082218,
+ IntrRxDone = 0x0001,
+ IntrTxDone = 0x0002,
+ IntrRxErr = 0x0004,
+ IntrTxError = 0x0008,
+ IntrRxEmpty = 0x0020,
+ IntrPCIErr = 0x0040,
+ IntrStatsMax = 0x0080,
+ IntrRxEarly = 0x0100,
+ IntrTxUnderrun = 0x0210,
+ IntrRxOverflow = 0x0400,
+ IntrRxDropped = 0x0800,
+ IntrRxNoBuf = 0x1000,
+ IntrTxAborted = 0x2000,
+ IntrLinkChange = 0x4000,
+ IntrRxWakeUp = 0x8000,
+ IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
+ IntrNormalSummary = IntrRxDone | IntrTxDone,
+ IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
+ IntrTxUnderrun,
};
/* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
@@ -438,8 +445,13 @@ struct rhine_private {
struct net_device *dev;
struct napi_struct napi;
spinlock_t lock;
+ struct mutex task_lock;
+ bool task_enable;
+ struct work_struct slow_event_task;
struct work_struct reset_task;
+ u32 msg_enable;
+
/* Frequently used values: keep some adjacent for cache effect. */
u32 quirks;
struct rx_desc *rx_head_desc;
@@ -475,41 +487,50 @@ static int mdio_read(struct net_device *dev, int phy_id, int location);
static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
static int rhine_open(struct net_device *dev);
static void rhine_reset_task(struct work_struct *work);
+static void rhine_slow_event_task(struct work_struct *work);
static void rhine_tx_timeout(struct net_device *dev);
static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev);
static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
static void rhine_tx(struct net_device *dev);
static int rhine_rx(struct net_device *dev, int limit);
-static void rhine_error(struct net_device *dev, int intr_status);
static void rhine_set_rx_mode(struct net_device *dev);
static struct net_device_stats *rhine_get_stats(struct net_device *dev);
static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static const struct ethtool_ops netdev_ethtool_ops;
static int rhine_close(struct net_device *dev);
-static void rhine_shutdown (struct pci_dev *pdev);
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
-static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
-static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
-static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
-static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
-static void rhine_init_cam_filter(struct net_device *dev);
-static void rhine_update_vcam(struct net_device *dev);
-
-#define RHINE_WAIT_FOR(condition) \
-do { \
- int i = 1024; \
- while (!(condition) && --i) \
- ; \
- if (debug > 1 && i < 512) \
- pr_info("%4d cycles used @ %s:%d\n", \
- 1024 - i, __func__, __LINE__); \
-} while (0)
-
-static inline u32 get_intr_status(struct net_device *dev)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_restart_tx(struct net_device *dev);
+
+static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high)
+{
+ void __iomem *ioaddr = rp->base;
+ int i;
+
+ for (i = 0; i < 1024; i++) {
+ if (high ^ !!(ioread8(ioaddr + reg) & mask))
+ break;
+ udelay(10);
+ }
+ if (i > 64) {
+ netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
+ "count: %04d\n", high ? "high" : "low", reg, mask, i);
+ }
+}
+
+static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
+{
+ rhine_wait_bit(rp, reg, mask, true);
+}
+
+static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
+{
+ rhine_wait_bit(rp, reg, mask, false);
+}
+
+static u32 rhine_get_events(struct rhine_private *rp)
{
- struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
u32 intr_status;
@@ -520,6 +541,16 @@ static inline u32 get_intr_status(struct net_device *dev)
return intr_status;
}
+static void rhine_ack_events(struct rhine_private *rp, u32 mask)
+{
+ void __iomem *ioaddr = rp->base;
+
+ if (rp->quirks & rqStatusWBRace)
+ iowrite8(mask >> 16, ioaddr + IntrStatus2);
+ iowrite16(mask, ioaddr + IntrStatus);
+ mmiowb();
+}
+
/*
* Get power related registers into sane state.
* Notify user about past WOL event.
@@ -584,6 +615,7 @@ static void rhine_chip_reset(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
+ u8 cmd1;
iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
IOSYNC;
@@ -596,13 +628,12 @@ static void rhine_chip_reset(struct net_device *dev)
iowrite8(0x40, ioaddr + MiscCmd);
/* Reset can take somewhat longer (rare) */
- RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
+ rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
}
- if (debug > 1)
- netdev_info(dev, "Reset %s\n",
- (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
- "failed" : "succeeded");
+ cmd1 = ioread8(ioaddr + ChipCmd1);
+ netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
+ "failed" : "succeeded");
}
#ifdef USE_MMIO
@@ -628,9 +659,15 @@ static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
+ int i;
outb(0x20, pioaddr + MACRegEEcsr);
- RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
+ for (i = 0; i < 1024; i++) {
+ if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
+ break;
+ }
+ if (i > 512)
+ pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
#ifdef USE_MMIO
/*
@@ -656,23 +693,127 @@ static void rhine_poll(struct net_device *dev)
}
#endif
+static void rhine_kick_tx_threshold(struct rhine_private *rp)
+{
+ if (rp->tx_thresh < 0xe0) {
+ void __iomem *ioaddr = rp->base;
+
+ rp->tx_thresh += 0x20;
+ BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
+ }
+}
+
+static void rhine_tx_err(struct rhine_private *rp, u32 status)
+{
+ struct net_device *dev = rp->dev;
+
+ if (status & IntrTxAborted) {
+ netif_info(rp, tx_err, dev,
+ "Abort %08x, frame dropped\n", status);
+ }
+
+ if (status & IntrTxUnderrun) {
+ rhine_kick_tx_threshold(rp);
+ netif_info(rp, tx_err ,dev, "Transmitter underrun, "
+ "Tx threshold now %02x\n", rp->tx_thresh);
+ }
+
+ if (status & IntrTxDescRace)
+ netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
+
+ if ((status & IntrTxError) &&
+ (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
+ rhine_kick_tx_threshold(rp);
+ netif_info(rp, tx_err, dev, "Unspecified error. "
+ "Tx threshold now %02x\n", rp->tx_thresh);
+ }
+
+ rhine_restart_tx(dev);
+}
+
+static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
+{
+ void __iomem *ioaddr = rp->base;
+ struct net_device_stats *stats = &rp->dev->stats;
+
+ stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
+ stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
+
+ /*
+ * Clears the "tally counters" for CRC errors and missed frames(?).
+ * It has been reported that some chips need a write of 0 to clear
+ * these, for others the counters are set to 1 when written to and
+ * instead cleared when read. So we clear them both ways ...
+ */
+ iowrite32(0, ioaddr + RxMissed);
+ ioread16(ioaddr + RxCRCErrs);
+ ioread16(ioaddr + RxMissed);
+}
+
+#define RHINE_EVENT_NAPI_RX (IntrRxDone | \
+ IntrRxErr | \
+ IntrRxEmpty | \
+ IntrRxOverflow | \
+ IntrRxDropped | \
+ IntrRxNoBuf | \
+ IntrRxWakeUp)
+
+#define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
+ IntrTxAborted | \
+ IntrTxUnderrun | \
+ IntrTxDescRace)
+#define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
+
+#define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
+ RHINE_EVENT_NAPI_TX | \
+ IntrStatsMax)
+#define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
+#define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
+
static int rhine_napipoll(struct napi_struct *napi, int budget)
{
struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
struct net_device *dev = rp->dev;
void __iomem *ioaddr = rp->base;
- int work_done;
+ u16 enable_mask = RHINE_EVENT & 0xffff;
+ int work_done = 0;
+ u32 status;
+
+ status = rhine_get_events(rp);
+ rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
+
+ if (status & RHINE_EVENT_NAPI_RX)
+ work_done += rhine_rx(dev, budget);
+
+ if (status & RHINE_EVENT_NAPI_TX) {
+ if (status & RHINE_EVENT_NAPI_TX_ERR) {
+ /* Avoid scavenging before Tx engine turned off */
+ rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
+ if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
+ netif_warn(rp, tx_err, dev, "Tx still on\n");
+ }
- work_done = rhine_rx(dev, budget);
+ rhine_tx(dev);
+
+ if (status & RHINE_EVENT_NAPI_TX_ERR)
+ rhine_tx_err(rp, status);
+ }
+
+ if (status & IntrStatsMax) {
+ spin_lock(&rp->lock);
+ rhine_update_rx_crc_and_missed_errord(rp);
+ spin_unlock(&rp->lock);
+ }
+
+ if (status & RHINE_EVENT_SLOW) {
+ enable_mask &= ~RHINE_EVENT_SLOW;
+ schedule_work(&rp->slow_event_task);
+ }
if (work_done < budget) {
napi_complete(napi);
-
- iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
- IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
- IntrTxDone | IntrTxError | IntrTxUnderrun |
- IntrPCIErr | IntrStatsMax | IntrLinkChange,
- ioaddr + IntrEnable);
+ iowrite16(enable_mask, ioaddr + IntrEnable);
+ mmiowb();
}
return work_done;
}
@@ -796,6 +937,7 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
rp->quirks = quirks;
rp->pioaddr = pioaddr;
rp->pdev = pdev;
+ rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
@@ -855,7 +997,9 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
dev->irq = pdev->irq;
spin_lock_init(&rp->lock);
+ mutex_init(&rp->task_lock);
INIT_WORK(&rp->reset_task, rhine_reset_task);
+ INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
rp->mii_if.dev = dev;
rp->mii_if.mdio_read = mdio_read;
@@ -915,8 +1059,8 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
}
}
rp->mii_if.phy_id = phy_id;
- if (debug > 1 && avoid_D3)
- netdev_info(dev, "No D3 power state at shutdown\n");
+ if (avoid_D3)
+ netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
return 0;
@@ -1092,7 +1236,7 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- mii_check_media(&rp->mii_if, debug, init_media);
+ mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
if (rp->mii_if.full_duplex)
iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
@@ -1100,24 +1244,26 @@ static void rhine_check_media(struct net_device *dev, unsigned int init_media)
else
iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
ioaddr + ChipCmd1);
- if (debug > 1)
- netdev_info(dev, "force_media %d, carrier %d\n",
- rp->mii_if.force_media, netif_carrier_ok(dev));
+
+ netif_info(rp, link, dev, "force_media %d, carrier %d\n",
+ rp->mii_if.force_media, netif_carrier_ok(dev));
}
/* Called after status of force_media possibly changed */
static void rhine_set_carrier(struct mii_if_info *mii)
{
+ struct net_device *dev = mii->dev;
+ struct rhine_private *rp = netdev_priv(dev);
+
if (mii->force_media) {
/* autoneg is off: Link is always assumed to be up */
- if (!netif_carrier_ok(mii->dev))
- netif_carrier_on(mii->dev);
- }
- else /* Let MMI library update carrier status */
- rhine_check_media(mii->dev, 0);
- if (debug > 1)
- netdev_info(mii->dev, "force_media %d, carrier %d\n",
- mii->force_media, netif_carrier_ok(mii->dev));
+ if (!netif_carrier_ok(dev))
+ netif_carrier_on(dev);
+ } else /* Let MMI library update carrier status */
+ rhine_check_media(dev, 0);
+
+ netif_info(rp, link, dev, "force_media %d, carrier %d\n",
+ mii->force_media, netif_carrier_ok(dev));
}
/**
@@ -1261,24 +1407,26 @@ static void rhine_update_vcam(struct net_device *dev)
rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
}
-static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
- spin_lock_irq(&rp->lock);
+ spin_lock_bh(&rp->lock);
set_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
- spin_unlock_irq(&rp->lock);
+ spin_unlock_bh(&rp->lock);
+ return 0;
}
-static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct rhine_private *rp = netdev_priv(dev);
- spin_lock_irq(&rp->lock);
+ spin_lock_bh(&rp->lock);
clear_bit(vid, rp->active_vlans);
rhine_update_vcam(dev);
- spin_unlock_irq(&rp->lock);
+ spin_unlock_bh(&rp->lock);
+ return 0;
}
static void init_registers(struct net_device *dev)
@@ -1307,12 +1455,7 @@ static void init_registers(struct net_device *dev)
napi_enable(&rp->napi);
- /* Enable interrupts by setting the interrupt mask. */
- iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
- IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
- IntrTxDone | IntrTxError | IntrTxUnderrun |
- IntrPCIErr | IntrStatsMax | IntrLinkChange,
- ioaddr + IntrEnable);
+ iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
ioaddr + ChipCmd);
@@ -1320,23 +1463,27 @@ static void init_registers(struct net_device *dev)
}
/* Enable MII link status auto-polling (required for IntrLinkChange) */
-static void rhine_enable_linkmon(void __iomem *ioaddr)
+static void rhine_enable_linkmon(struct rhine_private *rp)
{
+ void __iomem *ioaddr = rp->base;
+
iowrite8(0, ioaddr + MIICmd);
iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
iowrite8(0x80, ioaddr + MIICmd);
- RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
+ rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
}
/* Disable MII link status auto-polling (required for MDIO access) */
-static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
+static void rhine_disable_linkmon(struct rhine_private *rp)
{
+ void __iomem *ioaddr = rp->base;
+
iowrite8(0, ioaddr + MIICmd);
- if (quirks & rqRhineI) {
+ if (rp->quirks & rqRhineI) {
iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
/* Can be called from ISR. Evil. */
@@ -1345,13 +1492,13 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
/* 0x80 must be set immediately before turning it off */
iowrite8(0x80, ioaddr + MIICmd);
- RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
+ rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
/* Heh. Now clear 0x80 again. */
iowrite8(0, ioaddr + MIICmd);
}
else
- RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
+ rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
}
/* Read and write over the MII Management Data I/O (MDIO) interface. */
@@ -1362,16 +1509,16 @@ static int mdio_read(struct net_device *dev, int phy_id, int regnum)
void __iomem *ioaddr = rp->base;
int result;
- rhine_disable_linkmon(ioaddr, rp->quirks);
+ rhine_disable_linkmon(rp);
/* rhine_disable_linkmon already cleared MIICmd */
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
- RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
+ rhine_wait_bit_low(rp, MIICmd, 0x40);
result = ioread16(ioaddr + MIIData);
- rhine_enable_linkmon(ioaddr);
+ rhine_enable_linkmon(rp);
return result;
}
@@ -1380,16 +1527,33 @@ static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
- rhine_disable_linkmon(ioaddr, rp->quirks);
+ rhine_disable_linkmon(rp);
/* rhine_disable_linkmon already cleared MIICmd */
iowrite8(phy_id, ioaddr + MIIPhyAddr);
iowrite8(regnum, ioaddr + MIIRegAddr);
iowrite16(value, ioaddr + MIIData);
iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
- RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
+ rhine_wait_bit_low(rp, MIICmd, 0x20);
- rhine_enable_linkmon(ioaddr);
+ rhine_enable_linkmon(rp);
+}
+
+static void rhine_task_disable(struct rhine_private *rp)
+{
+ mutex_lock(&rp->task_lock);
+ rp->task_enable = false;
+ mutex_unlock(&rp->task_lock);
+
+ cancel_work_sync(&rp->slow_event_task);
+ cancel_work_sync(&rp->reset_task);
+}
+
+static void rhine_task_enable(struct rhine_private *rp)
+{
+ mutex_lock(&rp->task_lock);
+ rp->task_enable = true;
+ mutex_unlock(&rp->task_lock);
}
static int rhine_open(struct net_device *dev)
@@ -1403,8 +1567,7 @@ static int rhine_open(struct net_device *dev)
if (rc)
return rc;
- if (debug > 1)
- netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+ netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
rc = alloc_ring(dev);
if (rc) {
@@ -1414,11 +1577,12 @@ static int rhine_open(struct net_device *dev)
alloc_rbufs(dev);
alloc_tbufs(dev);
rhine_chip_reset(dev);
+ rhine_task_enable(rp);
init_registers(dev);
- if (debug > 2)
- netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n",
- __func__, ioread16(ioaddr + ChipCmd),
- mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
+
+ netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
+ __func__, ioread16(ioaddr + ChipCmd),
+ mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
netif_start_queue(dev);
@@ -1431,11 +1595,12 @@ static void rhine_reset_task(struct work_struct *work)
reset_task);
struct net_device *dev = rp->dev;
- /* protect against concurrent rx interrupts */
- disable_irq(rp->pdev->irq);
+ mutex_lock(&rp->task_lock);
- napi_disable(&rp->napi);
+ if (!rp->task_enable)
+ goto out_unlock;
+ napi_disable(&rp->napi);
spin_lock_bh(&rp->lock);
/* clear all descriptors */
@@ -1449,11 +1614,13 @@ static void rhine_reset_task(struct work_struct *work)
init_registers(dev);
spin_unlock_bh(&rp->lock);
- enable_irq(rp->pdev->irq);
dev->trans_start = jiffies; /* prevent tx timeout */
dev->stats.tx_errors++;
netif_wake_queue(dev);
+
+out_unlock:
+ mutex_unlock(&rp->task_lock);
}
static void rhine_tx_timeout(struct net_device *dev)
@@ -1474,7 +1641,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
unsigned entry;
- unsigned long flags;
/* Caution: the write order is important here, set the field
with the "ownership" bits last. */
@@ -1526,7 +1692,6 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_ring[entry].tx_status = 0;
/* lock eth irq */
- spin_lock_irqsave(&rp->lock, flags);
wmb();
rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
wmb();
@@ -1547,78 +1712,43 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
netif_stop_queue(dev);
- spin_unlock_irqrestore(&rp->lock, flags);
+ netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
+ rp->cur_tx - 1, entry);
- if (debug > 4) {
- netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n",
- rp->cur_tx-1, entry);
- }
return NETDEV_TX_OK;
}
+static void rhine_irq_disable(struct rhine_private *rp)
+{
+ iowrite16(0x0000, rp->base + IntrEnable);
+ mmiowb();
+}
+
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = dev_instance;
struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
- u32 intr_status;
- int boguscnt = max_interrupt_work;
+ u32 status;
int handled = 0;
- while ((intr_status = get_intr_status(dev))) {
- handled = 1;
-
- /* Acknowledge all of the current interrupt sources ASAP. */
- if (intr_status & IntrTxDescRace)
- iowrite8(0x08, ioaddr + IntrStatus2);
- iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
- IOSYNC;
-
- if (debug > 4)
- netdev_dbg(dev, "Interrupt, status %08x\n",
- intr_status);
+ status = rhine_get_events(rp);
- if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
- IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf)) {
- iowrite16(IntrTxAborted |
- IntrTxDone | IntrTxError | IntrTxUnderrun |
- IntrPCIErr | IntrStatsMax | IntrLinkChange,
- ioaddr + IntrEnable);
+ netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
- napi_schedule(&rp->napi);
- }
-
- if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
- if (intr_status & IntrTxErrSummary) {
- /* Avoid scavenging before Tx engine turned off */
- RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
- if (debug > 2 &&
- ioread8(ioaddr+ChipCmd) & CmdTxOn)
- netdev_warn(dev,
- "%s: Tx engine still on\n",
- __func__);
- }
- rhine_tx(dev);
- }
+ if (status & RHINE_EVENT) {
+ handled = 1;
- /* Abnormal error summary/uncommon events handlers. */
- if (intr_status & (IntrPCIErr | IntrLinkChange |
- IntrStatsMax | IntrTxError | IntrTxAborted |
- IntrTxUnderrun | IntrTxDescRace))
- rhine_error(dev, intr_status);
+ rhine_irq_disable(rp);
+ napi_schedule(&rp->napi);
+ }
- if (--boguscnt < 0) {
- netdev_warn(dev, "Too much work at interrupt, status=%#08x\n",
- intr_status);
- break;
- }
+ if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
+ netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
+ status);
}
- if (debug > 3)
- netdev_dbg(dev, "exiting interrupt, status=%08x\n",
- ioread16(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1629,20 +1759,16 @@ static void rhine_tx(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
- spin_lock(&rp->lock);
-
/* find and cleanup dirty tx descriptors */
while (rp->dirty_tx != rp->cur_tx) {
txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
- if (debug > 6)
- netdev_dbg(dev, "Tx scavenge %d status %08x\n",
- entry, txstatus);
+ netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
+ entry, txstatus);
if (txstatus & DescOwn)
break;
if (txstatus & 0x8000) {
- if (debug > 1)
- netdev_dbg(dev, "Transmit error, Tx status %08x\n",
- txstatus);
+ netif_dbg(rp, tx_done, dev,
+ "Transmit error, Tx status %08x\n", txstatus);
dev->stats.tx_errors++;
if (txstatus & 0x0400)
dev->stats.tx_carrier_errors++;
@@ -1664,10 +1790,8 @@ static void rhine_tx(struct net_device *dev)
dev->stats.collisions += (txstatus >> 3) & 0x0F;
else
dev->stats.collisions += txstatus & 0x0F;
- if (debug > 6)
- netdev_dbg(dev, "collisions: %1.1x:%1.1x\n",
- (txstatus >> 3) & 0xF,
- txstatus & 0xF);
+ netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
+ (txstatus >> 3) & 0xF, txstatus & 0xF);
dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
dev->stats.tx_packets++;
}
@@ -1684,8 +1808,6 @@ static void rhine_tx(struct net_device *dev)
}
if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
netif_wake_queue(dev);
-
- spin_unlock(&rp->lock);
}
/**
@@ -1710,11 +1832,8 @@ static int rhine_rx(struct net_device *dev, int limit)
int count;
int entry = rp->cur_rx % RX_RING_SIZE;
- if (debug > 4) {
- netdev_dbg(dev, "%s(), entry %d status %08x\n",
- __func__, entry,
- le32_to_cpu(rp->rx_head_desc->rx_status));
- }
+ netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
+ entry, le32_to_cpu(rp->rx_head_desc->rx_status));
/* If EOP is set on the next entry, it's a new packet. Send it up. */
for (count = 0; count < limit; ++count) {
@@ -1726,9 +1845,8 @@ static int rhine_rx(struct net_device *dev, int limit)
if (desc_status & DescOwn)
break;
- if (debug > 4)
- netdev_dbg(dev, "%s() status is %08x\n",
- __func__, desc_status);
+ netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
+ desc_status);
if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
if ((desc_status & RxWholePkt) != RxWholePkt) {
@@ -1744,9 +1862,9 @@ static int rhine_rx(struct net_device *dev, int limit)
dev->stats.rx_length_errors++;
} else if (desc_status & RxErr) {
/* There was a error. */
- if (debug > 2)
- netdev_dbg(dev, "%s() Rx error was %08x\n",
- __func__, desc_status);
+ netif_dbg(rp, rx_err, dev,
+ "%s() Rx error %08x\n", __func__,
+ desc_status);
dev->stats.rx_errors++;
if (desc_status & 0x0030)
dev->stats.rx_length_errors++;
@@ -1836,19 +1954,6 @@ static int rhine_rx(struct net_device *dev, int limit)
return count;
}
-/*
- * Clears the "tally counters" for CRC errors and missed frames(?).
- * It has been reported that some chips need a write of 0 to clear
- * these, for others the counters are set to 1 when written to and
- * instead cleared when read. So we clear them both ways ...
- */
-static inline void clear_tally_counters(void __iomem *ioaddr)
-{
- iowrite32(0, ioaddr + RxMissed);
- ioread16(ioaddr + RxCRCErrs);
- ioread16(ioaddr + RxMissed);
-}
-
static void rhine_restart_tx(struct net_device *dev) {
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
@@ -1859,7 +1964,7 @@ static void rhine_restart_tx(struct net_device *dev) {
* If new errors occurred, we need to sort them out before doing Tx.
* In that case the ISR will be back here RSN anyway.
*/
- intr_status = get_intr_status(dev);
+ intr_status = rhine_get_events(rp);
if ((intr_status & IntrTxErrSummary) == 0) {
@@ -1880,79 +1985,50 @@ static void rhine_restart_tx(struct net_device *dev) {
}
else {
/* This should never happen */
- if (debug > 1)
- netdev_warn(dev, "%s() Another error occurred %08x\n",
- __func__, intr_status);
+ netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
+ intr_status);
}
}
-static void rhine_error(struct net_device *dev, int intr_status)
+static void rhine_slow_event_task(struct work_struct *work)
{
- struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
+ struct rhine_private *rp =
+ container_of(work, struct rhine_private, slow_event_task);
+ struct net_device *dev = rp->dev;
+ u32 intr_status;
- spin_lock(&rp->lock);
+ mutex_lock(&rp->task_lock);
+
+ if (!rp->task_enable)
+ goto out_unlock;
+
+ intr_status = rhine_get_events(rp);
+ rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
if (intr_status & IntrLinkChange)
rhine_check_media(dev, 0);
- if (intr_status & IntrStatsMax) {
- dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
- dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
- clear_tally_counters(ioaddr);
- }
- if (intr_status & IntrTxAborted) {
- if (debug > 1)
- netdev_info(dev, "Abort %08x, frame dropped\n",
- intr_status);
- }
- if (intr_status & IntrTxUnderrun) {
- if (rp->tx_thresh < 0xE0)
- BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
- if (debug > 1)
- netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n",
- rp->tx_thresh);
- }
- if (intr_status & IntrTxDescRace) {
- if (debug > 2)
- netdev_info(dev, "Tx descriptor write-back race\n");
- }
- if ((intr_status & IntrTxError) &&
- (intr_status & (IntrTxAborted |
- IntrTxUnderrun | IntrTxDescRace)) == 0) {
- if (rp->tx_thresh < 0xE0) {
- BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
- }
- if (debug > 1)
- netdev_info(dev, "Unspecified error. Tx threshold now %02x\n",
- rp->tx_thresh);
- }
- if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
- IntrTxError))
- rhine_restart_tx(dev);
-
- if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
- IntrTxError | IntrTxAborted | IntrNormalSummary |
- IntrTxDescRace)) {
- if (debug > 1)
- netdev_err(dev, "Something Wicked happened! %08x\n",
- intr_status);
- }
- spin_unlock(&rp->lock);
+ if (intr_status & IntrPCIErr)
+ netif_warn(rp, hw, dev, "PCI error\n");
+
+ napi_disable(&rp->napi);
+ rhine_irq_disable(rp);
+ /* Slow and safe. Consider __napi_schedule as a replacement ? */
+ napi_enable(&rp->napi);
+ napi_schedule(&rp->napi);
+
+out_unlock:
+ mutex_unlock(&rp->task_lock);
}
static struct net_device_stats *rhine_get_stats(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- void __iomem *ioaddr = rp->base;
- unsigned long flags;
- spin_lock_irqsave(&rp->lock, flags);
- dev->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
- dev->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
- clear_tally_counters(ioaddr);
- spin_unlock_irqrestore(&rp->lock, flags);
+ spin_lock_bh(&rp->lock);
+ rhine_update_rx_crc_and_missed_errord(rp);
+ spin_unlock_bh(&rp->lock);
return &dev->stats;
}
@@ -2009,9 +2085,9 @@ static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *i
{
struct rhine_private *rp = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->bus_info, pci_name(rp->pdev));
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2019,9 +2095,9 @@ static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct rhine_private *rp = netdev_priv(dev);
int rc;
- spin_lock_irq(&rp->lock);
+ mutex_lock(&rp->task_lock);
rc = mii_ethtool_gset(&rp->mii_if, cmd);
- spin_unlock_irq(&rp->lock);
+ mutex_unlock(&rp->task_lock);
return rc;
}
@@ -2031,10 +2107,10 @@ static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
struct rhine_private *rp = netdev_priv(dev);
int rc;
- spin_lock_irq(&rp->lock);
+ mutex_lock(&rp->task_lock);
rc = mii_ethtool_sset(&rp->mii_if, cmd);
- spin_unlock_irq(&rp->lock);
rhine_set_carrier(&rp->mii_if);
+ mutex_unlock(&rp->task_lock);
return rc;
}
@@ -2055,12 +2131,16 @@ static u32 netdev_get_link(struct net_device *dev)
static u32 netdev_get_msglevel(struct net_device *dev)
{
- return debug;
+ struct rhine_private *rp = netdev_priv(dev);
+
+ return rp->msg_enable;
}
static void netdev_set_msglevel(struct net_device *dev, u32 value)
{
- debug = value;
+ struct rhine_private *rp = netdev_priv(dev);
+
+ rp->msg_enable = value;
}
static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -2116,10 +2196,10 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!netif_running(dev))
return -EINVAL;
- spin_lock_irq(&rp->lock);
+ mutex_lock(&rp->task_lock);
rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
- spin_unlock_irq(&rp->lock);
rhine_set_carrier(&rp->mii_if);
+ mutex_unlock(&rp->task_lock);
return rc;
}
@@ -2129,27 +2209,21 @@ static int rhine_close(struct net_device *dev)
struct rhine_private *rp = netdev_priv(dev);
void __iomem *ioaddr = rp->base;
+ rhine_task_disable(rp);
napi_disable(&rp->napi);
- cancel_work_sync(&rp->reset_task);
netif_stop_queue(dev);
- spin_lock_irq(&rp->lock);
-
- if (debug > 1)
- netdev_dbg(dev, "Shutting down ethercard, status was %04x\n",
- ioread16(ioaddr + ChipCmd));
+ netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
+ ioread16(ioaddr + ChipCmd));
/* Switch to loopback mode to avoid hardware races. */
iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
- /* Disable interrupts by clearing the interrupt mask. */
- iowrite16(0x0000, ioaddr + IntrEnable);
+ rhine_irq_disable(rp);
/* Stop the chip's Tx and Rx processes. */
iowrite16(CmdStop, ioaddr + ChipCmd);
- spin_unlock_irq(&rp->lock);
-
free_irq(rp->pdev->irq, dev);
free_rbufs(dev);
free_tbufs(dev);
@@ -2189,6 +2263,8 @@ static void rhine_shutdown (struct pci_dev *pdev)
if (rp->quirks & rq6patterns)
iowrite8(0x04, ioaddr + WOLcgClr);
+ spin_lock(&rp->lock);
+
if (rp->wolopts & WAKE_MAGIC) {
iowrite8(WOLmagic, ioaddr + WOLcrSet);
/*
@@ -2213,58 +2289,46 @@ static void rhine_shutdown (struct pci_dev *pdev)
iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
}
- /* Hit power state D3 (sleep) */
- if (!avoid_D3)
- iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+ spin_unlock(&rp->lock);
- /* TODO: Check use of pci_enable_wake() */
+ if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
+ iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ }
}
-#ifdef CONFIG_PM
-static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int rhine_suspend(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
- unsigned long flags;
if (!netif_running(dev))
return 0;
+ rhine_task_disable(rp);
+ rhine_irq_disable(rp);
napi_disable(&rp->napi);
netif_device_detach(dev);
- pci_save_state(pdev);
- spin_lock_irqsave(&rp->lock, flags);
rhine_shutdown(pdev);
- spin_unlock_irqrestore(&rp->lock, flags);
- free_irq(dev->irq, dev);
return 0;
}
-static int rhine_resume(struct pci_dev *pdev)
+static int rhine_resume(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
- unsigned long flags;
- int ret;
if (!netif_running(dev))
return 0;
- if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
- netdev_err(dev, "request_irq failed\n");
-
- ret = pci_set_power_state(pdev, PCI_D0);
- if (debug > 1)
- netdev_info(dev, "Entering power state D0 %s (%d)\n",
- ret ? "failed" : "succeeded", ret);
-
- pci_restore_state(pdev);
-
- spin_lock_irqsave(&rp->lock, flags);
#ifdef USE_MMIO
enable_mmio(rp->pioaddr, rp->quirks);
#endif
@@ -2273,25 +2337,32 @@ static int rhine_resume(struct pci_dev *pdev)
free_rbufs(dev);
alloc_tbufs(dev);
alloc_rbufs(dev);
+ rhine_task_enable(rp);
+ spin_lock_bh(&rp->lock);
init_registers(dev);
- spin_unlock_irqrestore(&rp->lock, flags);
+ spin_unlock_bh(&rp->lock);
netif_device_attach(dev);
return 0;
}
-#endif /* CONFIG_PM */
+
+static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
+#define RHINE_PM_OPS (&rhine_pm_ops)
+
+#else
+
+#define RHINE_PM_OPS NULL
+
+#endif /* !CONFIG_PM_SLEEP */
static struct pci_driver rhine_driver = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
.probe = rhine_init_one,
.remove = __devexit_p(rhine_remove_one),
-#ifdef CONFIG_PM
- .suspend = rhine_suspend,
- .resume = rhine_resume,
-#endif /* CONFIG_PM */
- .shutdown = rhine_shutdown,
+ .shutdown = rhine_shutdown,
+ .driver.pm = RHINE_PM_OPS,
};
static struct dmi_system_id __initdata rhine_dmi_table[] = {
@@ -2320,7 +2391,7 @@ static int __init rhine_init(void)
#endif
if (dmi_check_system(rhine_dmi_table)) {
/* these BIOSes fail at PXE boot if chip is in D3 */
- avoid_D3 = 1;
+ avoid_D3 = true;
pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
}
else if (avoid_D3)
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4535d7c..cb35b14 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -522,7 +522,7 @@ static void velocity_init_cam_filter(struct velocity_info *vptr)
mac_set_vlan_cam_mask(regs, vptr->vCAMmask);
}
-static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -530,9 +530,10 @@ static void velocity_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
set_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
-static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct velocity_info *vptr = netdev_priv(dev);
@@ -540,6 +541,7 @@ static void velocity_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid
clear_bit(vid, vptr->active_vlans);
velocity_init_cam_filter(vptr);
spin_unlock_irq(&vptr->lock);
+ return 0;
}
static void velocity_init_rx_ring_indexes(struct velocity_info *vptr)
@@ -2489,9 +2491,6 @@ static int velocity_close(struct net_device *dev)
if (dev->irq != 0)
free_irq(dev->irq, dev);
- /* Power down the chip */
- pci_set_power_state(vptr->pdev, PCI_D3hot);
-
velocity_free_rings(vptr);
vptr->flags &= (~VELOCITY_FLAGS_OPENED);
@@ -3270,9 +3269,9 @@ static int velocity_set_settings(struct net_device *dev,
static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct velocity_info *vptr = netdev_priv(dev);
- strcpy(info->driver, VELOCITY_NAME);
- strcpy(info->version, VELOCITY_VERSION);
- strcpy(info->bus_info, pci_name(vptr->pdev));
+ strlcpy(info->driver, VELOCITY_NAME, sizeof(info->driver));
+ strlcpy(info->version, VELOCITY_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(vptr->pdev), sizeof(info->bus_info));
}
static void velocity_ethtool_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 2681b53..f21addb 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -237,7 +237,7 @@ static int temac_dma_bd_init(struct net_device *ndev)
struct sk_buff *skb;
int i;
- lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL);
+ lp->rx_skb = kcalloc(RX_BD_NUM, sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb) {
dev_err(&ndev->dev,
"can't allocate memory for DMA RX buffer\n");
@@ -920,12 +920,26 @@ temac_poll_controller(struct net_device *ndev)
}
#endif
+static int temac_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+ struct temac_local *lp = netdev_priv(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ if (!lp->phy_dev)
+ return -EINVAL;
+
+ return phy_mii_ioctl(lp->phy_dev, rq, cmd);
+}
+
static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
.ndo_set_mac_address = netdev_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = temac_ioctl,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
#endif
@@ -1077,7 +1091,7 @@ static int __devinit temac_of_probe(struct platform_device *op)
of_node_put(np); /* Finished with the DMA node; drop the reference */
- if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
+ if (!lp->rx_irq || !lp->tx_irq) {
dev_err(&op->dev, "could not determine irqs\n");
rc = -ENOMEM;
goto err_iounmap_2;
@@ -1167,17 +1181,7 @@ static struct platform_driver temac_of_driver = {
},
};
-static int __init temac_init(void)
-{
- return platform_driver_register(&temac_of_driver);
-}
-module_init(temac_init);
-
-static void __exit temac_exit(void)
-{
- platform_driver_unregister(&temac_of_driver);
-}
-module_exit(temac_exit);
+module_platform_driver(temac_of_driver);
MODULE_DESCRIPTION("Xilinx LL_TEMAC Ethernet driver");
MODULE_AUTHOR("Yoshio Kashiwagi");
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 8018d7d..79013e5 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -662,7 +662,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
*/
static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
{
- bool tx_complete = 0;
+ bool tx_complete = false;
struct net_device *dev = dev_id;
struct net_local *lp = netdev_priv(dev);
void __iomem *base_addr = lp->base_addr;
@@ -683,7 +683,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
out_be32(base_addr + XEL_TSR_OFFSET, tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
out_be32(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET,
tx_status);
- tx_complete = 1;
+ tx_complete = true;
}
/* If there was a Tx interrupt, call the Tx Handler */
@@ -1129,7 +1129,7 @@ static int __devinit xemaclite_of_probe(struct platform_device *ofdev)
/* Get IRQ for the device */
rc = of_irq_to_resource(ofdev->dev.of_node, 0, &r_irq);
- if (rc == NO_IRQ) {
+ if (!rc) {
dev_err(dev, "no IRQ found\n");
return rc;
}
@@ -1303,27 +1303,7 @@ static struct platform_driver xemaclite_of_driver = {
.remove = __devexit_p(xemaclite_of_remove),
};
-/**
- * xgpiopss_init - Initial driver registration call
- *
- * Return: 0 upon success, or a negative error upon failure.
- */
-static int __init xemaclite_init(void)
-{
- /* No kernel boot options used, we just need to register the driver */
- return platform_driver_register(&xemaclite_of_driver);
-}
-
-/**
- * xemaclite_cleanup - Driver un-registration call
- */
-static void __exit xemaclite_cleanup(void)
-{
- platform_driver_unregister(&xemaclite_of_driver);
-}
-
-module_init(xemaclite_init);
-module_exit(xemaclite_cleanup);
+module_platform_driver(xemaclite_of_driver);
MODULE_AUTHOR("Xilinx, Inc.");
MODULE_DESCRIPTION("Xilinx Ethernet MAC Lite driver");
diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c
index bbe8b7d..33979c3 100644
--- a/drivers/net/ethernet/xircom/xirc2ps_cs.c
+++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c
@@ -1411,7 +1411,7 @@ do_open(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "xirc2ps_cs");
+ strlcpy(info->driver, "xirc2ps_cs", sizeof(info->driver));
sprintf(info->bus_info, "PCMCIA 0x%lx", dev->base_addr);
}
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c
index f45c85a..41a8b5a 100644
--- a/drivers/net/ethernet/xscale/ixp4xx_eth.c
+++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c
@@ -529,7 +529,7 @@ static int ixp4xx_mdio_register(void)
mdio_bus->name = "IXP4xx MII Bus";
mdio_bus->read = &ixp4xx_mdio_read;
mdio_bus->write = &ixp4xx_mdio_write;
- strcpy(mdio_bus->id, "0");
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "ixp4xx-eth-0");
if ((err = mdiobus_register(mdio_bus)))
mdiobus_free(mdio_bus);
@@ -1416,7 +1416,8 @@ static int __devinit eth_init_one(struct platform_device *pdev)
__raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control);
udelay(50);
- snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
+ mdio_bus->id, plat->phy);
port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(port->phydev)) {
diff --git a/drivers/net/hyperv/Kconfig b/drivers/net/hyperv/Kconfig
new file mode 100644
index 0000000..936968d
--- /dev/null
+++ b/drivers/net/hyperv/Kconfig
@@ -0,0 +1,5 @@
+config HYPERV_NET
+ tristate "Microsoft Hyper-V virtual network driver"
+ depends on HYPERV
+ help
+ Select this option to enable the Hyper-V virtual network driver.
diff --git a/drivers/net/hyperv/Makefile b/drivers/net/hyperv/Makefile
new file mode 100644
index 0000000..c8a6682
--- /dev/null
+++ b/drivers/net/hyperv/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
+
+hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/staging/hv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ac1ec84..dec5836 100644
--- a/drivers/staging/hv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -39,9 +39,6 @@ struct xferpage_packet {
u32 count;
};
-/* The number of pages which are enough to cover jumbo frame buffer. */
-#define NETVSC_PACKET_MAXPAGE 4
-
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
* within the RNDIS
@@ -77,8 +74,9 @@ struct hv_netvsc_packet {
u32 total_data_buflen;
/* Points to the send/receive buffer where the ethernet frame is */
+ void *data;
u32 page_buf_cnt;
- struct hv_page_buffer page_buf[NETVSC_PACKET_MAXPAGE];
+ struct hv_page_buffer page_buf[0];
};
struct netvsc_device_info {
@@ -87,6 +85,27 @@ struct netvsc_device_info {
int ring_size;
};
+enum rndis_device_state {
+ RNDIS_DEV_UNINITIALIZED = 0,
+ RNDIS_DEV_INITIALIZING,
+ RNDIS_DEV_INITIALIZED,
+ RNDIS_DEV_DATAINITIALIZED,
+};
+
+struct rndis_device {
+ struct netvsc_device *net_dev;
+
+ enum rndis_device_state state;
+ bool link_state;
+ atomic_t new_req_id;
+
+ spinlock_t request_lock;
+ struct list_head req_list;
+
+ unsigned char hw_mac_adr[ETH_ALEN];
+};
+
+
/* Interface */
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_remove(struct hv_device *device);
@@ -109,11 +128,13 @@ int rndis_filter_receive(struct hv_device *dev,
int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt);
+int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
+
+
#define NVSP_INVALID_PROTOCOL_VERSION ((u32)0xFFFFFFFF)
#define NVSP_PROTOCOL_VERSION_1 2
-#define NVSP_MIN_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
-#define NVSP_MAX_PROTOCOL_VERSION NVSP_PROTOCOL_VERSION_1
+#define NVSP_PROTOCOL_VERSION_2 0x30002
enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -138,11 +159,36 @@ enum {
NVSP_MSG1_TYPE_SEND_RNDIS_PKT,
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE,
- /*
- * This should be set to the number of messages for the version with
- * the maximum number of messages.
- */
- NVSP_NUM_MSG_PER_VERSION = 9,
+ /* Version 2 messages */
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_DELEGATED_BUF_COMP,
+ NVSP_MSG2_TYPE_REVOKE_CHIMNEY_DELEGATED_BUF,
+
+ NVSP_MSG2_TYPE_RESUME_CHIMNEY_RX_INDICATION,
+
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY,
+ NVSP_MSG2_TYPE_TERMINATE_CHIMNEY_COMP,
+
+ NVSP_MSG2_TYPE_INDICATE_CHIMNEY_EVENT,
+
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT,
+ NVSP_MSG2_TYPE_SEND_CHIMNEY_PKT_COMP,
+
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ,
+ NVSP_MSG2_TYPE_POST_CHIMNEY_RECV_REQ_COMP,
+
+ NVSP_MSG2_TYPE_ALLOC_RXBUF,
+ NVSP_MSG2_TYPE_ALLOC_RXBUF_COMP,
+
+ NVSP_MSG2_TYPE_FREE_RXBUF,
+
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT,
+ NVSP_MSG2_TYPE_SEND_VMQ_RNDIS_PKT_COMP,
+
+ NVSP_MSG2_TYPE_SEND_NDIS_CONFIG,
+
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
+ NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
};
enum {
@@ -153,6 +199,7 @@ enum {
NVSP_STAT_PROTOCOL_TOO_OLD,
NVSP_STAT_INVALID_RNDIS_PKT,
NVSP_STAT_BUSY,
+ NVSP_STAT_PROTOCOL_UNSUPPORTED,
NVSP_STAT_MAX,
};
@@ -337,9 +384,69 @@ union nvsp_1_message_uber {
send_rndis_pkt_complete;
} __packed;
+
+/*
+ * Network VSP protocol version 2 messages:
+ */
+struct nvsp_2_vsc_capability {
+ union {
+ u64 data;
+ struct {
+ u64 vmq:1;
+ u64 chimney:1;
+ u64 sriov:1;
+ u64 ieee8021q:1;
+ u64 correlation_id:1;
+ };
+ };
+} __packed;
+
+struct nvsp_2_send_ndis_config {
+ u32 mtu;
+ u32 reserved;
+ struct nvsp_2_vsc_capability capability;
+} __packed;
+
+/* Allocate receive buffer */
+struct nvsp_2_alloc_rxbuf {
+ /* Allocation ID to match the allocation request and response */
+ u32 alloc_id;
+
+ /* Length of the VM shared memory receive buffer that needs to
+ * be allocated
+ */
+ u32 len;
+} __packed;
+
+/* Allocate receive buffer complete */
+struct nvsp_2_alloc_rxbuf_comp {
+ /* The NDIS_STATUS code for buffer allocation */
+ u32 status;
+
+ u32 alloc_id;
+
+ /* GPADL handle for the allocated receive buffer */
+ u32 gpadl_handle;
+
+ /* Receive buffer ID */
+ u64 recv_buf_id;
+} __packed;
+
+struct nvsp_2_free_rxbuf {
+ u64 recv_buf_id;
+} __packed;
+
+union nvsp_2_message_uber {
+ struct nvsp_2_send_ndis_config send_ndis_config;
+ struct nvsp_2_alloc_rxbuf alloc_rxbuf;
+ struct nvsp_2_alloc_rxbuf_comp alloc_rxbuf_comp;
+ struct nvsp_2_free_rxbuf free_rxbuf;
+} __packed;
+
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
+ union nvsp_2_message_uber v2_msg;
} __packed;
/* ALL Messages */
@@ -349,12 +456,9 @@ struct nvsp_message {
} __packed;
+#define NETVSC_MTU 65536
-
-/* #define NVSC_MIN_PROTOCOL_VERSION 1 */
-/* #define NVSC_MAX_PROTOCOL_VERSION 1 */
-
-#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024) /* 1MB */
+#define NETVSC_RECEIVE_BUFFER_SIZE (1024*1024*2) /* 2MB */
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
@@ -369,7 +473,10 @@ struct nvsp_message {
struct netvsc_device {
struct hv_device *dev;
+ u32 nvsp_version;
+
atomic_t num_outstanding_sends;
+ bool start_remove;
bool destroy;
/*
* List of free preallocated hv_netvsc_packet to represent receive
diff --git a/drivers/staging/hv/netvsc.c b/drivers/net/hyperv/netvsc.c
index b902579..8965b45 100644
--- a/drivers/staging/hv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -28,6 +28,7 @@
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
+#include <linux/if_ether.h>
#include "hyperv_net.h"
@@ -41,7 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
if (!net_device)
return NULL;
-
+ net_device->start_remove = false;
net_device->destroy = false;
net_device->dev = device;
net_device->ndev = ndev;
@@ -230,19 +231,16 @@ static int netvsc_init_recv_buf(struct hv_device *device)
net_device->recv_section_cnt = init_packet->msg.
v1_msg.send_recv_buf_complete.num_sections;
- net_device->recv_section = kmalloc(net_device->recv_section_cnt
- * sizeof(struct nvsp_1_receive_buffer_section), GFP_KERNEL);
+ net_device->recv_section = kmemdup(
+ init_packet->msg.v1_msg.send_recv_buf_complete.sections,
+ net_device->recv_section_cnt *
+ sizeof(struct nvsp_1_receive_buffer_section),
+ GFP_KERNEL);
if (net_device->recv_section == NULL) {
ret = -EINVAL;
goto cleanup;
}
- memcpy(net_device->recv_section,
- init_packet->msg.v1_msg.
- send_recv_buf_complete.sections,
- net_device->recv_section_cnt *
- sizeof(struct nvsp_1_receive_buffer_section));
-
/*
* For 1st release, there should only be 1 section that represents the
* entire receive buffer
@@ -263,27 +261,18 @@ exit:
}
-static int netvsc_connect_vsp(struct hv_device *device)
+/* Negotiate NVSP protocol version */
+static int negotiate_nvsp_ver(struct hv_device *device,
+ struct netvsc_device *net_device,
+ struct nvsp_message *init_packet,
+ u32 nvsp_ver)
{
int ret, t;
- struct netvsc_device *net_device;
- struct nvsp_message *init_packet;
- int ndis_version;
- struct net_device *ndev;
-
- net_device = get_outbound_net_device(device);
- if (!net_device)
- return -ENODEV;
- ndev = net_device->ndev;
-
- init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
- init_packet->msg.init_msg.init.min_protocol_ver =
- NVSP_MIN_PROTOCOL_VERSION;
- init_packet->msg.init_msg.init.max_protocol_ver =
- NVSP_MAX_PROTOCOL_VERSION;
+ init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
+ init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
/* Send the init request */
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -293,26 +282,62 @@ static int netvsc_connect_vsp(struct hv_device *device)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
- goto cleanup;
+ return ret;
t = wait_for_completion_timeout(&net_device->channel_init_wait, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (t == 0)
+ return -ETIMEDOUT;
if (init_packet->msg.init_msg.init_complete.status !=
- NVSP_STAT_SUCCESS) {
- ret = -EINVAL;
- goto cleanup;
- }
+ NVSP_STAT_SUCCESS)
+ return -EINVAL;
- if (init_packet->msg.init_msg.init_complete.
- negotiated_protocol_ver != NVSP_PROTOCOL_VERSION_1) {
+ if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+ return 0;
+
+ /* NVSPv2 only: Send NDIS config */
+ memset(init_packet, 0, sizeof(struct nvsp_message));
+ init_packet->hdr.msg_type = NVSP_MSG2_TYPE_SEND_NDIS_CONFIG;
+ init_packet->msg.v2_msg.send_ndis_config.mtu = net_device->ndev->mtu;
+
+ ret = vmbus_sendpacket(device->channel, init_packet,
+ sizeof(struct nvsp_message),
+ (unsigned long)init_packet,
+ VM_PKT_DATA_INBAND, 0);
+
+ return ret;
+}
+
+static int netvsc_connect_vsp(struct hv_device *device)
+{
+ int ret;
+ struct netvsc_device *net_device;
+ struct nvsp_message *init_packet;
+ int ndis_version;
+ struct net_device *ndev;
+
+ net_device = get_outbound_net_device(device);
+ if (!net_device)
+ return -ENODEV;
+ ndev = net_device->ndev;
+
+ init_packet = &net_device->channel_init_pkt;
+
+ /* Negotiate the latest NVSP protocol supported */
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ NVSP_PROTOCOL_VERSION_2) == 0) {
+ net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
+ } else if (negotiate_nvsp_ver(device, net_device, init_packet,
+ NVSP_PROTOCOL_VERSION_1) == 0) {
+ net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
+ } else {
ret = -EPROTO;
goto cleanup;
}
+
+ pr_debug("Negotiated NVSP version:%x\n", net_device->nvsp_version);
+
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
@@ -438,6 +463,9 @@ static void netvsc_send_completion(struct hv_device *device,
nvsc_packet->completion.send.send_completion_ctx);
atomic_dec(&net_device->num_outstanding_sends);
+
+ if (netif_queue_stopped(ndev) && !net_device->start_remove)
+ netif_wake_queue(ndev);
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -488,11 +516,16 @@ int netvsc_send(struct hv_device *device,
}
- if (ret != 0)
+ if (ret == 0) {
+ atomic_inc(&net_device->num_outstanding_sends);
+ } else if (ret == -EAGAIN) {
+ netif_stop_queue(ndev);
+ if (atomic_read(&net_device->num_outstanding_sends) < 1)
+ netif_wake_queue(ndev);
+ } else {
netdev_err(ndev, "Unable to send packet %p ret %d\n",
packet, ret);
- else
- atomic_inc(&net_device->num_outstanding_sends);
+ }
return ret;
}
@@ -598,12 +631,10 @@ static void netvsc_receive(struct hv_device *device,
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
- unsigned long start;
- unsigned long end, end_virtual;
/* struct netvsc_driver *netvscDriver; */
struct xferpage_packet *xferpage_packet = NULL;
- int i, j;
- int count = 0, bytes_remain = 0;
+ int i;
+ int count = 0;
unsigned long flags;
struct net_device *ndev;
@@ -712,53 +743,10 @@ static void netvsc_receive(struct hv_device *device,
netvsc_packet->completion.recv.recv_completion_tid =
vmxferpage_packet->d.trans_id;
+ netvsc_packet->data = (void *)((unsigned long)net_device->
+ recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
vmxferpage_packet->ranges[i].byte_count;
- netvsc_packet->page_buf_cnt = 1;
-
- netvsc_packet->page_buf[0].len =
- vmxferpage_packet->ranges[i].byte_count;
-
- start = virt_to_phys((void *)((unsigned long)net_device->
- recv_buf + vmxferpage_packet->ranges[i].byte_offset));
-
- netvsc_packet->page_buf[0].pfn = start >> PAGE_SHIFT;
- end_virtual = (unsigned long)net_device->recv_buf
- + vmxferpage_packet->ranges[i].byte_offset
- + vmxferpage_packet->ranges[i].byte_count - 1;
- end = virt_to_phys((void *)end_virtual);
-
- /* Calculate the page relative offset */
- netvsc_packet->page_buf[0].offset =
- vmxferpage_packet->ranges[i].byte_offset &
- (PAGE_SIZE - 1);
- if ((end >> PAGE_SHIFT) != (start >> PAGE_SHIFT)) {
- /* Handle frame across multiple pages: */
- netvsc_packet->page_buf[0].len =
- (netvsc_packet->page_buf[0].pfn <<
- PAGE_SHIFT)
- + PAGE_SIZE - start;
- bytes_remain = netvsc_packet->total_data_buflen -
- netvsc_packet->page_buf[0].len;
- for (j = 1; j < NETVSC_PACKET_MAXPAGE; j++) {
- netvsc_packet->page_buf[j].offset = 0;
- if (bytes_remain <= PAGE_SIZE) {
- netvsc_packet->page_buf[j].len =
- bytes_remain;
- bytes_remain = 0;
- } else {
- netvsc_packet->page_buf[j].len =
- PAGE_SIZE;
- bytes_remain -= PAGE_SIZE;
- }
- netvsc_packet->page_buf[j].pfn =
- virt_to_phys((void *)(end_virtual -
- bytes_remain)) >> PAGE_SHIFT;
- netvsc_packet->page_buf_cnt++;
- if (bytes_remain == 0)
- break;
- }
- }
/* Pass it to the upper layer */
rndis_filter_receive(device, netvsc_packet);
diff --git a/drivers/staging/hv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 93b0e91..bf01841 100644
--- a/drivers/staging/hv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -43,24 +43,60 @@
struct net_device_context {
/* point back to our device context */
struct hv_device *device_ctx;
- atomic_t avail;
struct delayed_work dwork;
};
-#define PACKET_PAGES_LOWATER 8
-/* Need this many pages to handle worst case fragmented packet */
-#define PACKET_PAGES_HIWATER (MAX_SKB_FRAGS + 2)
-
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
-/* no-op so the netdev core doesn't return -EINVAL when modifying the the
- * multicast address list in SIOCADDMULTI. hv is setup to get all multicast
- * when it calls RndisFilterOnOpen() */
+struct set_multicast_work {
+ struct work_struct work;
+ struct net_device *net;
+};
+
+static void do_set_multicast(struct work_struct *w)
+{
+ struct set_multicast_work *swk =
+ container_of(w, struct set_multicast_work, work);
+ struct net_device *net = swk->net;
+
+ struct net_device_context *ndevctx = netdev_priv(net);
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
+
+ nvdev = hv_get_drvdata(ndevctx->device_ctx);
+ if (nvdev == NULL)
+ goto out;
+
+ rdev = nvdev->extension;
+ if (rdev == NULL)
+ goto out;
+
+ if (net->flags & IFF_PROMISC)
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_PROMISCUOUS);
+ else
+ rndis_filter_set_packet_filter(rdev,
+ NDIS_PACKET_TYPE_BROADCAST |
+ NDIS_PACKET_TYPE_ALL_MULTICAST |
+ NDIS_PACKET_TYPE_DIRECTED);
+
+out:
+ kfree(w);
+}
+
static void netvsc_set_multicast_list(struct net_device *net)
{
+ struct set_multicast_work *swk =
+ kmalloc(sizeof(struct set_multicast_work), GFP_ATOMIC);
+ if (swk == NULL)
+ return;
+
+ swk->net = net;
+ INIT_WORK(&swk->work, do_set_multicast);
+ schedule_work(&swk->work);
}
static int netvsc_open(struct net_device *net)
@@ -87,7 +123,7 @@ static int netvsc_close(struct net_device *net)
struct hv_device *device_obj = net_device_ctx->device_ctx;
int ret;
- netif_stop_queue(net);
+ netif_tx_disable(net);
ret = rndis_filter_close(device_obj);
if (ret != 0)
@@ -104,18 +140,8 @@ static void netvsc_xmit_completion(void *context)
kfree(packet);
- if (skb) {
- struct net_device *net = skb->dev;
- struct net_device_context *net_device_ctx = netdev_priv(net);
- unsigned int num_pages = skb_shinfo(skb)->nr_frags + 2;
-
+ if (skb)
dev_kfree_skb_any(skb);
-
- atomic_add(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) >=
- PACKET_PAGES_HIWATER)
- netif_wake_queue(net);
- }
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -123,12 +149,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_netvsc_packet *packet;
int ret;
- unsigned int i, num_pages;
+ unsigned int i, num_pages, npg_data;
- /* Add 1 for skb->data and additional one for RNDIS */
- num_pages = skb_shinfo(skb)->nr_frags + 1 + 1;
- if (num_pages > atomic_read(&net_device_ctx->avail))
- return NETDEV_TX_BUSY;
+ /* Add multipages for skb->data and additional 2 for RNDIS */
+ npg_data = (((unsigned long)skb->data + skb_headlen(skb) - 1)
+ >> PAGE_SHIFT) - ((unsigned long)skb->data >> PAGE_SHIFT) + 1;
+ num_pages = skb_shinfo(skb)->nr_frags + npg_data + 2;
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
@@ -147,25 +173,40 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer));
- /* Setup the rndis header */
- packet->page_buf_cnt = num_pages;
+ /* If the rndis msg goes beyond 1 page, we will add 1 later */
+ packet->page_buf_cnt = num_pages - 1;
/* Initialize it from the skb */
- packet->total_data_buflen = skb->len;
+ packet->total_data_buflen = skb->len;
/* Start filling in the page buffers starting after RNDIS buffer. */
packet->page_buf[1].pfn = virt_to_phys(skb->data) >> PAGE_SHIFT;
packet->page_buf[1].offset
= (unsigned long)skb->data & (PAGE_SIZE - 1);
- packet->page_buf[1].len = skb_headlen(skb);
+ if (npg_data == 1)
+ packet->page_buf[1].len = skb_headlen(skb);
+ else
+ packet->page_buf[1].len = PAGE_SIZE
+ - packet->page_buf[1].offset;
+
+ for (i = 2; i <= npg_data; i++) {
+ packet->page_buf[i].pfn = virt_to_phys(skb->data
+ + PAGE_SIZE * (i-1)) >> PAGE_SHIFT;
+ packet->page_buf[i].offset = 0;
+ packet->page_buf[i].len = PAGE_SIZE;
+ }
+ if (npg_data > 1)
+ packet->page_buf[npg_data].len = (((unsigned long)skb->data
+ + skb_headlen(skb) - 1) & (PAGE_SIZE - 1)) + 1;
/* Additional fragments are after SKB data */
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
const skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- packet->page_buf[i+2].pfn = page_to_pfn(skb_frag_page(f));
- packet->page_buf[i+2].offset = f->page_offset;
- packet->page_buf[i+2].len = skb_frag_size(f);
+ packet->page_buf[i+npg_data+1].pfn =
+ page_to_pfn(skb_frag_page(f));
+ packet->page_buf[i+npg_data+1].offset = f->page_offset;
+ packet->page_buf[i+npg_data+1].len = skb_frag_size(f);
}
/* Set the completion routine */
@@ -178,10 +219,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
if (ret == 0) {
net->stats.tx_bytes += skb->len;
net->stats.tx_packets++;
-
- atomic_sub(num_pages, &net_device_ctx->avail);
- if (atomic_read(&net_device_ctx->avail) < PACKET_PAGES_LOWATER)
- netif_stop_queue(net);
} else {
/* we are shutting down or bus overloaded, just drop packet */
net->stats.tx_dropped++;
@@ -219,7 +256,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
- netif_stop_queue(net);
+ netif_tx_disable(net);
}
}
@@ -232,9 +269,6 @@ int netvsc_recv_callback(struct hv_device *device_obj,
{
struct net_device *net = dev_get_drvdata(&device_obj->device);
struct sk_buff *skb;
- void *data;
- int i;
- unsigned long flags;
struct netvsc_device *net_device;
net_device = hv_get_drvdata(device_obj);
@@ -253,33 +287,18 @@ int netvsc_recv_callback(struct hv_device *device_obj,
return 0;
}
- /* for kmap_atomic */
- local_irq_save(flags);
-
/*
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- for (i = 0; i < packet->page_buf_cnt; i++) {
- data = kmap_atomic(pfn_to_page(packet->page_buf[i].pfn),
- KM_IRQ1);
- data = (void *)(unsigned long)data +
- packet->page_buf[i].offset;
-
- memcpy(skb_put(skb, packet->page_buf[i].len), data,
- packet->page_buf[i].len);
-
- kunmap_atomic((void *)((unsigned long)data -
- packet->page_buf[i].offset), KM_IRQ1);
- }
-
- local_irq_restore(flags);
+ memcpy(skb_put(skb, packet->total_data_buflen), packet->data,
+ packet->total_data_buflen);
skb->protocol = eth_type_trans(skb, net);
skb->ip_summed = CHECKSUM_NONE;
net->stats.rx_packets++;
- net->stats.rx_bytes += skb->len;
+ net->stats.rx_bytes += packet->total_data_buflen;
/*
* Pass the skb back up. Network stack will deallocate the skb when it
@@ -294,11 +313,44 @@ int netvsc_recv_callback(struct hv_device *device_obj,
static void netvsc_get_drvinfo(struct net_device *net,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, "hv_netvsc");
+ strcpy(info->driver, KBUILD_MODNAME);
strcpy(info->version, HV_DRV_VERSION);
strcpy(info->fw_version, "N/A");
}
+static int netvsc_change_mtu(struct net_device *ndev, int mtu)
+{
+ struct net_device_context *ndevctx = netdev_priv(ndev);
+ struct hv_device *hdev = ndevctx->device_ctx;
+ struct netvsc_device *nvdev = hv_get_drvdata(hdev);
+ struct netvsc_device_info device_info;
+ int limit = ETH_DATA_LEN;
+
+ if (nvdev == NULL || nvdev->destroy)
+ return -ENODEV;
+
+ if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ limit = NETVSC_MTU;
+
+ if (mtu < 68 || mtu > limit)
+ return -EINVAL;
+
+ nvdev->start_remove = true;
+ cancel_delayed_work_sync(&ndevctx->dwork);
+ netif_tx_disable(ndev);
+ rndis_filter_device_remove(hdev);
+
+ ndev->mtu = mtu;
+
+ ndevctx->device_ctx = hdev;
+ hv_set_drvdata(hdev, ndev);
+ device_info.ring_size = ring_size;
+ rndis_filter_device_add(hdev, &device_info);
+ netif_wake_queue(ndev);
+
+ return 0;
+}
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -309,7 +361,7 @@ static const struct net_device_ops device_ops = {
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_rx_mode = netvsc_set_multicast_list,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
@@ -351,7 +403,6 @@ static int netvsc_probe(struct hv_device *dev,
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
- atomic_set(&net_device_ctx->avail, ring_size);
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
@@ -403,11 +454,13 @@ static int netvsc_remove(struct hv_device *dev)
return 0;
}
+ net_device->start_remove = true;
+
ndev_ctx = netdev_priv(net);
cancel_delayed_work_sync(&ndev_ctx->dwork);
/* Stop outbound asap */
- netif_stop_queue(net);
+ netif_tx_disable(net);
unregister_netdev(net);
@@ -432,7 +485,7 @@ MODULE_DEVICE_TABLE(vmbus, id_table);
/* The one and only one */
static struct hv_driver netvsc_drv = {
- .name = "netvsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = netvsc_probe,
.remove = netvsc_remove,
diff --git a/drivers/staging/hv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index bafccb3..133b7fb 100644
--- a/drivers/staging/hv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -30,26 +30,6 @@
#include "hyperv_net.h"
-enum rndis_device_state {
- RNDIS_DEV_UNINITIALIZED = 0,
- RNDIS_DEV_INITIALIZING,
- RNDIS_DEV_INITIALIZED,
- RNDIS_DEV_DATAINITIALIZED,
-};
-
-struct rndis_device {
- struct netvsc_device *net_dev;
-
- enum rndis_device_state state;
- bool link_state;
- atomic_t new_req_id;
-
- spinlock_t request_lock;
- struct list_head req_list;
-
- unsigned char hw_mac_adr[ETH_ALEN];
-};
-
struct rndis_request {
struct list_head list_ent;
struct completion wait_event;
@@ -329,7 +309,6 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
{
struct rndis_packet *rndis_pkt;
u32 data_offset;
- int i;
rndis_pkt = &msg->msg.pkt;
@@ -342,18 +321,27 @@ static void rndis_filter_receive_data(struct rndis_device *dev,
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
pkt->total_data_buflen -= data_offset;
- pkt->page_buf[0].offset += data_offset;
- pkt->page_buf[0].len -= data_offset;
-
- /* Drop the 0th page, if rndis data go beyond page boundary */
- if (pkt->page_buf[0].offset >= PAGE_SIZE) {
- pkt->page_buf[1].offset = pkt->page_buf[0].offset - PAGE_SIZE;
- pkt->page_buf[1].len -= pkt->page_buf[1].offset;
- pkt->page_buf_cnt--;
- for (i = 0; i < pkt->page_buf_cnt; i++)
- pkt->page_buf[i] = pkt->page_buf[i+1];
+
+ /*
+ * Make sure we got a valid RNDIS message, now total_data_buflen
+ * should be the data packet size plus the trailer padding size
+ */
+ if (pkt->total_data_buflen < rndis_pkt->data_len) {
+ netdev_err(dev->net_dev->ndev, "rndis message buffer "
+ "overflow detected (got %u, min %u)"
+ "...dropping this message!\n",
+ pkt->total_data_buflen, rndis_pkt->data_len);
+ return;
}
+ /*
+ * Remove the rndis trailer padding from rndis packet message
+ * rndis_pkt->data_len tell us the real data length, we only copy
+ * the data packet to the stack, without the rndis trailer padding
+ */
+ pkt->total_data_buflen = rndis_pkt->data_len;
+ pkt->data = (void *)((unsigned long)pkt->data + data_offset);
+
pkt->is_data_pkt = true;
netvsc_recv_callback(dev->net_dev->dev, pkt);
@@ -387,11 +375,7 @@ int rndis_filter_receive(struct hv_device *dev,
return -ENODEV;
}
- rndis_hdr = (struct rndis_message *)kmap_atomic(
- pfn_to_page(pkt->page_buf[0].pfn), KM_IRQ0);
-
- rndis_hdr = (void *)((unsigned long)rndis_hdr +
- pkt->page_buf[0].offset);
+ rndis_hdr = pkt->data;
/* Make sure we got a valid rndis message */
if ((rndis_hdr->ndis_msg_type != REMOTE_NDIS_PACKET_MSG) &&
@@ -407,8 +391,6 @@ int rndis_filter_receive(struct hv_device *dev,
sizeof(struct rndis_message) :
rndis_hdr->msg_len);
- kunmap_atomic(rndis_hdr - pkt->page_buf[0].offset, KM_IRQ0);
-
dump_rndis_message(dev, &rndis_msg);
switch (rndis_msg.ndis_msg_type) {
@@ -522,8 +504,7 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
return ret;
}
-static int rndis_filter_set_packet_filter(struct rndis_device *dev,
- u32 new_filter)
+int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
{
struct rndis_request *request;
struct rndis_set_request *set;
@@ -816,6 +797,19 @@ int rndis_filter_send(struct hv_device *dev,
(unsigned long)rndisMessage & (PAGE_SIZE-1);
pkt->page_buf[0].len = rndisMessageSize;
+ /* Add one page_buf if the rndis msg goes beyond page boundary */
+ if (pkt->page_buf[0].offset + rndisMessageSize > PAGE_SIZE) {
+ int i;
+ for (i = pkt->page_buf_cnt; i > 1; i--)
+ pkt->page_buf[i] = pkt->page_buf[i-1];
+ pkt->page_buf_cnt++;
+ pkt->page_buf[0].len = PAGE_SIZE - pkt->page_buf[0].offset;
+ pkt->page_buf[1].pfn = virt_to_phys((void *)((ulong)
+ rndisMessage + pkt->page_buf[0].len)) >> PAGE_SHIFT;
+ pkt->page_buf[1].offset = 0;
+ pkt->page_buf[1].len = rndisMessageSize - pkt->page_buf[0].len;
+ }
+
/* Save the packet send completion and context */
filterPacket->completion = pkt->completion.send.send_completion;
filterPacket->completion_ctx =
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 46b5f5f..e05b645 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -164,7 +164,7 @@ static const struct net_device_ops ifb_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
-#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
+#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index d423d18..e535137 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -313,8 +313,12 @@ config TOSHIBA_FIR
donauboe.
config AU1000_FIR
- tristate "Alchemy Au1000 SIR/FIR"
+ tristate "Alchemy IrDA SIR/FIR"
depends on IRDA && MIPS_ALCHEMY
+ help
+ Say Y/M here to build suppor the the IrDA peripheral on the
+ Alchemy Au1000 and Au1100 SoCs.
+ Say M to build a module; it will be called au1k_ir.ko
config SMC_IRCC_FIR
tristate "SMSC IrCC (EXPERIMENTAL)"
diff --git a/drivers/net/irda/au1000_ircc.h b/drivers/net/irda/au1000_ircc.h
deleted file mode 100644
index c072c09..0000000
--- a/drivers/net/irda/au1000_ircc.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- *
- * BRIEF MODULE DESCRIPTION
- * Au1000 IrDA driver.
- *
- * Copyright 2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef AU1000_IRCC_H
-#define AU1000_IRCC_H
-
-#include <linux/time.h>
-
-#include <linux/spinlock.h>
-#include <linux/pm.h>
-#include <asm/io.h>
-
-#define NUM_IR_IFF 1
-#define NUM_IR_DESC 64
-#define RING_SIZE_4 0x0
-#define RING_SIZE_16 0x3
-#define RING_SIZE_64 0xF
-#define MAX_NUM_IR_DESC 64
-#define MAX_BUF_SIZE 2048
-
-#define BPS_115200 0
-#define BPS_57600 1
-#define BPS_38400 2
-#define BPS_19200 5
-#define BPS_9600 11
-#define BPS_2400 47
-
-/* Ring descriptor flags */
-#define AU_OWN (1<<7) /* tx,rx */
-
-#define IR_DIS_CRC (1<<6) /* tx */
-#define IR_BAD_CRC (1<<5) /* tx */
-#define IR_NEED_PULSE (1<<4) /* tx */
-#define IR_FORCE_UNDER (1<<3) /* tx */
-#define IR_DISABLE_TX (1<<2) /* tx */
-#define IR_HW_UNDER (1<<0) /* tx */
-#define IR_TX_ERROR (IR_DIS_CRC|IR_BAD_CRC|IR_HW_UNDER)
-
-#define IR_PHY_ERROR (1<<6) /* rx */
-#define IR_CRC_ERROR (1<<5) /* rx */
-#define IR_MAX_LEN (1<<4) /* rx */
-#define IR_FIFO_OVER (1<<3) /* rx */
-#define IR_SIR_ERROR (1<<2) /* rx */
-#define IR_RX_ERROR (IR_PHY_ERROR|IR_CRC_ERROR| \
- IR_MAX_LEN|IR_FIFO_OVER|IR_SIR_ERROR)
-
-typedef struct db_dest {
- struct db_dest *pnext;
- volatile u32 *vaddr;
- dma_addr_t dma_addr;
-} db_dest_t;
-
-
-typedef struct ring_desc {
- u8 count_0; /* 7:0 */
- u8 count_1; /* 12:8 */
- u8 reserved;
- u8 flags;
- u8 addr_0; /* 7:0 */
- u8 addr_1; /* 15:8 */
- u8 addr_2; /* 23:16 */
- u8 addr_3; /* 31:24 */
-} ring_dest_t;
-
-
-/* Private data for each instance */
-struct au1k_private {
-
- db_dest_t *pDBfree;
- db_dest_t db[2*NUM_IR_DESC];
- volatile ring_dest_t *rx_ring[NUM_IR_DESC];
- volatile ring_dest_t *tx_ring[NUM_IR_DESC];
- db_dest_t *rx_db_inuse[NUM_IR_DESC];
- db_dest_t *tx_db_inuse[NUM_IR_DESC];
- u32 rx_head;
- u32 tx_head;
- u32 tx_tail;
- u32 tx_full;
-
- iobuff_t rx_buff;
-
- struct net_device *netdev;
-
- struct timeval stamp;
- struct timeval now;
- struct qos_info qos;
- struct irlap_cb *irlap;
-
- u8 open;
- u32 speed;
- u32 newspeed;
-
- u32 intr_work_done; /* number of Rx and Tx pkts processed in the isr */
- struct timer_list timer;
-
- spinlock_t lock; /* For serializing operations */
-};
-#endif /* AU1000_IRCC_H */
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index a3d696a..fc503aa 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -18,104 +18,220 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
-#include <linux/module.h>
-#include <linux/types.h>
+
#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/module.h>
#include <linux/netdevice.h>
-#include <linux/slab.h>
-#include <linux/rtnetlink.h>
#include <linux/interrupt.h>
-#include <linux/pm.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/au1000.h>
-#if defined(CONFIG_MIPS_PB1000) || defined(CONFIG_MIPS_PB1100)
-#include <asm/pb1000.h>
-#elif defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
-#include <asm/db1x00.h>
-#include <asm/mach-db1x00/bcsr.h>
-#else
-#error au1k_ir: unsupported board
-#endif
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/types.h>
#include <net/irda/irda.h>
#include <net/irda/irmod.h>
#include <net/irda/wrapper.h>
#include <net/irda/irda_device.h>
-#include "au1000_ircc.h"
+#include <asm/mach-au1x00/au1000.h>
+
+/* registers */
+#define IR_RING_PTR_STATUS 0x00
+#define IR_RING_BASE_ADDR_H 0x04
+#define IR_RING_BASE_ADDR_L 0x08
+#define IR_RING_SIZE 0x0C
+#define IR_RING_PROMPT 0x10
+#define IR_RING_ADDR_CMPR 0x14
+#define IR_INT_CLEAR 0x18
+#define IR_CONFIG_1 0x20
+#define IR_SIR_FLAGS 0x24
+#define IR_STATUS 0x28
+#define IR_READ_PHY_CONFIG 0x2C
+#define IR_WRITE_PHY_CONFIG 0x30
+#define IR_MAX_PKT_LEN 0x34
+#define IR_RX_BYTE_CNT 0x38
+#define IR_CONFIG_2 0x3C
+#define IR_ENABLE 0x40
+
+/* Config1 */
+#define IR_RX_INVERT_LED (1 << 0)
+#define IR_TX_INVERT_LED (1 << 1)
+#define IR_ST (1 << 2)
+#define IR_SF (1 << 3)
+#define IR_SIR (1 << 4)
+#define IR_MIR (1 << 5)
+#define IR_FIR (1 << 6)
+#define IR_16CRC (1 << 7)
+#define IR_TD (1 << 8)
+#define IR_RX_ALL (1 << 9)
+#define IR_DMA_ENABLE (1 << 10)
+#define IR_RX_ENABLE (1 << 11)
+#define IR_TX_ENABLE (1 << 12)
+#define IR_LOOPBACK (1 << 14)
+#define IR_SIR_MODE (IR_SIR | IR_DMA_ENABLE | \
+ IR_RX_ALL | IR_RX_ENABLE | IR_SF | \
+ IR_16CRC)
+
+/* ir_status */
+#define IR_RX_STATUS (1 << 9)
+#define IR_TX_STATUS (1 << 10)
+#define IR_PHYEN (1 << 15)
+
+/* ir_write_phy_config */
+#define IR_BR(x) (((x) & 0x3f) << 10) /* baud rate */
+#define IR_PW(x) (((x) & 0x1f) << 5) /* pulse width */
+#define IR_P(x) ((x) & 0x1f) /* preamble bits */
+
+/* Config2 */
+#define IR_MODE_INV (1 << 0)
+#define IR_ONE_PIN (1 << 1)
+#define IR_PHYCLK_40MHZ (0 << 2)
+#define IR_PHYCLK_48MHZ (1 << 2)
+#define IR_PHYCLK_56MHZ (2 << 2)
+#define IR_PHYCLK_64MHZ (3 << 2)
+#define IR_DP (1 << 4)
+#define IR_DA (1 << 5)
+#define IR_FLT_HIGH (0 << 6)
+#define IR_FLT_MEDHI (1 << 6)
+#define IR_FLT_MEDLO (2 << 6)
+#define IR_FLT_LO (3 << 6)
+#define IR_IEN (1 << 8)
+
+/* ir_enable */
+#define IR_HC (1 << 3) /* divide SBUS clock by 2 */
+#define IR_CE (1 << 2) /* clock enable */
+#define IR_C (1 << 1) /* coherency bit */
+#define IR_BE (1 << 0) /* set in big endian mode */
+
+#define NUM_IR_DESC 64
+#define RING_SIZE_4 0x0
+#define RING_SIZE_16 0x3
+#define RING_SIZE_64 0xF
+#define MAX_NUM_IR_DESC 64
+#define MAX_BUF_SIZE 2048
+
+/* Ring descriptor flags */
+#define AU_OWN (1 << 7) /* tx,rx */
+#define IR_DIS_CRC (1 << 6) /* tx */
+#define IR_BAD_CRC (1 << 5) /* tx */
+#define IR_NEED_PULSE (1 << 4) /* tx */
+#define IR_FORCE_UNDER (1 << 3) /* tx */
+#define IR_DISABLE_TX (1 << 2) /* tx */
+#define IR_HW_UNDER (1 << 0) /* tx */
+#define IR_TX_ERROR (IR_DIS_CRC | IR_BAD_CRC | IR_HW_UNDER)
+
+#define IR_PHY_ERROR (1 << 6) /* rx */
+#define IR_CRC_ERROR (1 << 5) /* rx */
+#define IR_MAX_LEN (1 << 4) /* rx */
+#define IR_FIFO_OVER (1 << 3) /* rx */
+#define IR_SIR_ERROR (1 << 2) /* rx */
+#define IR_RX_ERROR (IR_PHY_ERROR | IR_CRC_ERROR | \
+ IR_MAX_LEN | IR_FIFO_OVER | IR_SIR_ERROR)
+
+struct db_dest {
+ struct db_dest *pnext;
+ volatile u32 *vaddr;
+ dma_addr_t dma_addr;
+};
-static int au1k_irda_net_init(struct net_device *);
-static int au1k_irda_start(struct net_device *);
-static int au1k_irda_stop(struct net_device *dev);
-static int au1k_irda_hard_xmit(struct sk_buff *, struct net_device *);
-static int au1k_irda_rx(struct net_device *);
-static void au1k_irda_interrupt(int, void *);
-static void au1k_tx_timeout(struct net_device *);
-static int au1k_irda_ioctl(struct net_device *, struct ifreq *, int);
-static int au1k_irda_set_speed(struct net_device *dev, int speed);
+struct ring_dest {
+ u8 count_0; /* 7:0 */
+ u8 count_1; /* 12:8 */
+ u8 reserved;
+ u8 flags;
+ u8 addr_0; /* 7:0 */
+ u8 addr_1; /* 15:8 */
+ u8 addr_2; /* 23:16 */
+ u8 addr_3; /* 31:24 */
+};
-static void *dma_alloc(size_t, dma_addr_t *);
-static void dma_free(void *, size_t);
+/* Private data for each instance */
+struct au1k_private {
+ void __iomem *iobase;
+ int irq_rx, irq_tx;
+
+ struct db_dest *pDBfree;
+ struct db_dest db[2 * NUM_IR_DESC];
+ volatile struct ring_dest *rx_ring[NUM_IR_DESC];
+ volatile struct ring_dest *tx_ring[NUM_IR_DESC];
+ struct db_dest *rx_db_inuse[NUM_IR_DESC];
+ struct db_dest *tx_db_inuse[NUM_IR_DESC];
+ u32 rx_head;
+ u32 tx_head;
+ u32 tx_tail;
+ u32 tx_full;
+
+ iobuff_t rx_buff;
+
+ struct net_device *netdev;
+ struct timeval stamp;
+ struct timeval now;
+ struct qos_info qos;
+ struct irlap_cb *irlap;
+
+ u8 open;
+ u32 speed;
+ u32 newspeed;
+
+ struct timer_list timer;
+
+ struct resource *ioarea;
+ struct au1k_irda_platform_data *platdata;
+};
static int qos_mtt_bits = 0x07; /* 1 ms or more */
-static struct net_device *ir_devs[NUM_IR_IFF];
-static char version[] __devinitdata =
- "au1k_ircc:1.2 ppopov@mvista.com\n";
#define RUN_AT(x) (jiffies + (x))
-static DEFINE_SPINLOCK(ir_lock);
+static void au1k_irda_plat_set_phy_mode(struct au1k_private *p, int mode)
+{
+ if (p->platdata && p->platdata->set_phy_mode)
+ p->platdata->set_phy_mode(mode);
+}
-/*
- * IrDA peripheral bug. You have to read the register
- * twice to get the right value.
- */
-u32 read_ir_reg(u32 addr)
-{
- readl(addr);
- return readl(addr);
+static inline unsigned long irda_read(struct au1k_private *p,
+ unsigned long ofs)
+{
+ /*
+ * IrDA peripheral bug. You have to read the register
+ * twice to get the right value.
+ */
+ (void)__raw_readl(p->iobase + ofs);
+ return __raw_readl(p->iobase + ofs);
}
+static inline void irda_write(struct au1k_private *p, unsigned long ofs,
+ unsigned long val)
+{
+ __raw_writel(val, p->iobase + ofs);
+ wmb();
+}
/*
* Buffer allocation/deallocation routines. The buffer descriptor returned
- * has the virtual and dma address of a buffer suitable for
+ * has the virtual and dma address of a buffer suitable for
* both, receive and transmit operations.
*/
-static db_dest_t *GetFreeDB(struct au1k_private *aup)
+static struct db_dest *GetFreeDB(struct au1k_private *aup)
{
- db_dest_t *pDB;
- pDB = aup->pDBfree;
-
- if (pDB) {
- aup->pDBfree = pDB->pnext;
- }
- return pDB;
-}
+ struct db_dest *db;
+ db = aup->pDBfree;
-static void ReleaseDB(struct au1k_private *aup, db_dest_t *pDB)
-{
- db_dest_t *pDBfree = aup->pDBfree;
- if (pDBfree)
- pDBfree->pnext = pDB;
- aup->pDBfree = pDB;
+ if (db)
+ aup->pDBfree = db->pnext;
+ return db;
}
-
/*
DMA memory allocation, derived from pci_alloc_consistent.
However, the Au1000 data cache is coherent (when programmed
so), therefore we return KSEG0 address, not KSEG1.
*/
-static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
+static void *dma_alloc(size_t size, dma_addr_t *dma_handle)
{
void *ret;
int gfp = GFP_ATOMIC | GFP_DMA;
- ret = (void *) __get_free_pages(gfp, get_order(size));
+ ret = (void *)__get_free_pages(gfp, get_order(size));
if (ret != NULL) {
memset(ret, 0, size);
@@ -125,7 +241,6 @@ static void *dma_alloc(size_t size, dma_addr_t * dma_handle)
return ret;
}
-
static void dma_free(void *vaddr, size_t size)
{
vaddr = (void *)KSEG0ADDR(vaddr);
@@ -133,206 +248,306 @@ static void dma_free(void *vaddr, size_t size)
}
-static void
-setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
+static void setup_hw_rings(struct au1k_private *aup, u32 rx_base, u32 tx_base)
{
int i;
- for (i=0; i<NUM_IR_DESC; i++) {
- aup->rx_ring[i] = (volatile ring_dest_t *)
- (rx_base + sizeof(ring_dest_t)*i);
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->rx_ring[i] = (volatile struct ring_dest *)
+ (rx_base + sizeof(struct ring_dest) * i);
}
- for (i=0; i<NUM_IR_DESC; i++) {
- aup->tx_ring[i] = (volatile ring_dest_t *)
- (tx_base + sizeof(ring_dest_t)*i);
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ aup->tx_ring[i] = (volatile struct ring_dest *)
+ (tx_base + sizeof(struct ring_dest) * i);
}
}
-static int au1k_irda_init(void)
-{
- static unsigned version_printed = 0;
- struct au1k_private *aup;
- struct net_device *dev;
- int err;
-
- if (version_printed++ == 0) printk(version);
-
- dev = alloc_irdadev(sizeof(struct au1k_private));
- if (!dev)
- return -ENOMEM;
-
- dev->irq = AU1000_IRDA_RX_INT; /* TX has its own interrupt */
- err = au1k_irda_net_init(dev);
- if (err)
- goto out;
- err = register_netdev(dev);
- if (err)
- goto out1;
- ir_devs[0] = dev;
- printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
- return 0;
-
-out1:
- aup = netdev_priv(dev);
- dma_free((void *)aup->db[0].vaddr,
- MAX_BUF_SIZE * 2*NUM_IR_DESC);
- dma_free((void *)aup->rx_ring[0],
- 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
- kfree(aup->rx_buff.head);
-out:
- free_netdev(dev);
- return err;
-}
-
static int au1k_irda_init_iobuf(iobuff_t *io, int size)
{
io->head = kmalloc(size, GFP_KERNEL);
if (io->head != NULL) {
- io->truesize = size;
- io->in_frame = FALSE;
- io->state = OUTSIDE_FRAME;
- io->data = io->head;
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
}
return io->head ? 0 : -ENOMEM;
}
-static const struct net_device_ops au1k_irda_netdev_ops = {
- .ndo_open = au1k_irda_start,
- .ndo_stop = au1k_irda_stop,
- .ndo_start_xmit = au1k_irda_hard_xmit,
- .ndo_tx_timeout = au1k_tx_timeout,
- .ndo_do_ioctl = au1k_irda_ioctl,
-};
-
-static int au1k_irda_net_init(struct net_device *dev)
+/*
+ * Set the IrDA communications speed.
+ */
+static int au1k_irda_set_speed(struct net_device *dev, int speed)
{
struct au1k_private *aup = netdev_priv(dev);
- int i, retval = 0, err;
- db_dest_t *pDB, *pDBfree;
- dma_addr_t temp;
+ volatile struct ring_dest *ptxd;
+ unsigned long control;
+ int ret = 0, timeout = 10, i;
- err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
- if (err)
- goto out1;
+ if (speed == aup->speed)
+ return ret;
- dev->netdev_ops = &au1k_irda_netdev_ops;
+ /* disable PHY first */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
- irda_init_max_qos_capabilies(&aup->qos);
+ /* disable RX/TX */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~(IR_RX_ENABLE | IR_TX_ENABLE));
+ msleep(20);
+ while (irda_read(aup, IR_STATUS) & (IR_RX_STATUS | IR_TX_STATUS)) {
+ msleep(20);
+ if (!timeout--) {
+ printk(KERN_ERR "%s: rx/tx disable timeout\n",
+ dev->name);
+ break;
+ }
+ }
- /* The only value we must override it the baudrate */
- aup->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600|
- IR_115200|IR_576000 |(IR_4000000 << 8);
-
- aup->qos.min_turn_time.bits = qos_mtt_bits;
- irda_qos_bits_to_value(&aup->qos);
+ /* disable DMA */
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_DMA_ENABLE);
+ msleep(20);
- retval = -ENOMEM;
+ /* After we disable tx/rx. the index pointers go back to zero. */
+ aup->tx_head = aup->tx_tail = aup->rx_head = 0;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->tx_ring[i];
+ ptxd->flags = 0;
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ }
- /* Tx ring follows rx ring + 512 bytes */
- /* we need a 1k aligned buffer */
- aup->rx_ring[0] = (ring_dest_t *)
- dma_alloc(2*MAX_NUM_IR_DESC*(sizeof(ring_dest_t)), &temp);
- if (!aup->rx_ring[0])
- goto out2;
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ ptxd = aup->rx_ring[i];
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ ptxd->flags = AU_OWN;
+ }
- /* allocate the data buffers */
- aup->db[0].vaddr =
- (void *)dma_alloc(MAX_BUF_SIZE * 2*NUM_IR_DESC, &temp);
- if (!aup->db[0].vaddr)
- goto out3;
+ if (speed == 4000000)
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_FIR);
+ else
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
- setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
+ switch (speed) {
+ case 9600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(11) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 19200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(5) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 38400:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(2) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 57600:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_BR(1) | IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 115200:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_PW(12));
+ irda_write(aup, IR_CONFIG_1, IR_SIR_MODE);
+ break;
+ case 4000000:
+ irda_write(aup, IR_WRITE_PHY_CONFIG, IR_P(15));
+ irda_write(aup, IR_CONFIG_1, IR_FIR | IR_DMA_ENABLE |
+ IR_RX_ENABLE);
+ break;
+ default:
+ printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
+ ret = -EINVAL;
+ break;
+ }
- pDBfree = NULL;
- pDB = aup->db;
- for (i=0; i<(2*NUM_IR_DESC); i++) {
- pDB->pnext = pDBfree;
- pDBfree = pDB;
- pDB->vaddr =
- (u32 *)((unsigned)aup->db[0].vaddr + MAX_BUF_SIZE*i);
- pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
- pDB++;
+ aup->speed = speed;
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) | IR_PHYEN);
+
+ control = irda_read(aup, IR_STATUS);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ if (control & (1 << 14)) {
+ printk(KERN_ERR "%s: configuration error\n", dev->name);
+ } else {
+ if (control & (1 << 11))
+ printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
+ if (control & (1 << 12))
+ printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
+ if (control & (1 << 13))
+ printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
+ if (control & (1 << 10))
+ printk(KERN_DEBUG "%s TX enabled\n", dev->name);
+ if (control & (1 << 9))
+ printk(KERN_DEBUG "%s RX enabled\n", dev->name);
}
- aup->pDBfree = pDBfree;
- /* attach a data buffer to each descriptor */
- for (i=0; i<NUM_IR_DESC; i++) {
- pDB = GetFreeDB(aup);
- if (!pDB) goto out;
- aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
- aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
- aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
- aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
- aup->rx_db_inuse[i] = pDB;
+ return ret;
+}
+
+static void update_rx_stats(struct net_device *dev, u32 status, u32 count)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->rx_packets++;
+
+ if (status & IR_RX_ERROR) {
+ ps->rx_errors++;
+ if (status & (IR_PHY_ERROR | IR_FIFO_OVER))
+ ps->rx_missed_errors++;
+ if (status & IR_MAX_LEN)
+ ps->rx_length_errors++;
+ if (status & IR_CRC_ERROR)
+ ps->rx_crc_errors++;
+ } else
+ ps->rx_bytes += count;
+}
+
+static void update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
+{
+ struct net_device_stats *ps = &dev->stats;
+
+ ps->tx_packets++;
+ ps->tx_bytes += pkt_len;
+
+ if (status & IR_TX_ERROR) {
+ ps->tx_errors++;
+ ps->tx_aborted_errors++;
}
- for (i=0; i<NUM_IR_DESC; i++) {
- pDB = GetFreeDB(aup);
- if (!pDB) goto out;
- aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
- aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr>>8) & 0xff);
- aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr>>16) & 0xff);
- aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr>>24) & 0xff);
- aup->tx_ring[i]->count_0 = 0;
- aup->tx_ring[i]->count_1 = 0;
- aup->tx_ring[i]->flags = 0;
- aup->tx_db_inuse[i] = pDB;
+}
+
+static void au1k_tx_ack(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *ptxd;
+
+ ptxd = aup->tx_ring[aup->tx_tail];
+ while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
+ update_tx_stats(dev, ptxd->flags,
+ (ptxd->count_1 << 8) | ptxd->count_0);
+ ptxd->count_0 = 0;
+ ptxd->count_1 = 0;
+ wmb();
+ aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
+ ptxd = aup->tx_ring[aup->tx_tail];
+
+ if (aup->tx_full) {
+ aup->tx_full = 0;
+ netif_wake_queue(dev);
+ }
}
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- /* power on */
- bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK,
- BCSR_RESETS_IRDA_MODE_FULL);
-#endif
+ if (aup->tx_tail == aup->tx_head) {
+ if (aup->newspeed) {
+ au1k_irda_set_speed(dev, aup->newspeed);
+ aup->newspeed = 0;
+ } else {
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) & ~IR_TX_ENABLE);
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_RX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
+ }
+ }
+}
- return 0;
+static int au1k_irda_rx(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ volatile struct ring_dest *prxd;
+ struct sk_buff *skb;
+ struct db_dest *pDB;
+ u32 flags, count;
-out3:
- dma_free((void *)aup->rx_ring[0],
- 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
-out2:
- kfree(aup->rx_buff.head);
-out1:
- printk(KERN_ERR "au1k_init_module failed. Returns %d\n", retval);
- return retval;
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ while (!(flags & AU_OWN)) {
+ pDB = aup->rx_db_inuse[aup->rx_head];
+ count = (prxd->count_1 << 8) | prxd->count_0;
+ if (!(flags & IR_RX_ERROR)) {
+ /* good frame */
+ update_rx_stats(dev, flags, count);
+ skb = alloc_skb(count + 1, GFP_ATOMIC);
+ if (skb == NULL) {
+ dev->stats.rx_dropped++;
+ continue;
+ }
+ skb_reserve(skb, 1);
+ if (aup->speed == 4000000)
+ skb_put(skb, count);
+ else
+ skb_put(skb, count - 2);
+ skb_copy_to_linear_data(skb, (void *)pDB->vaddr,
+ count - 2);
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb->protocol = htons(ETH_P_IRDA);
+ netif_rx(skb);
+ prxd->count_0 = 0;
+ prxd->count_1 = 0;
+ }
+ prxd->flags |= AU_OWN;
+ aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
+ irda_write(aup, IR_RING_PROMPT, 0);
+
+ /* next descriptor */
+ prxd = aup->rx_ring[aup->rx_head];
+ flags = prxd->flags;
+
+ }
+ return 0;
}
+static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct au1k_private *aup = netdev_priv(dev);
+
+ irda_write(aup, IR_INT_CLEAR, 0); /* ack irda interrupts */
+
+ au1k_irda_rx(dev);
+ au1k_tx_ack(dev);
+
+ return IRQ_HANDLED;
+}
static int au1k_init(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
+ u32 enable, ring_address;
int i;
- u32 control;
- u32 ring_address;
- /* bring the device out of reset */
- control = 0xe; /* coherent, clock enable, one half system clock */
-
+ enable = IR_HC | IR_CE | IR_C;
#ifndef CONFIG_CPU_LITTLE_ENDIAN
- control |= 1;
+ enable |= IR_BE;
#endif
aup->tx_head = 0;
aup->tx_tail = 0;
aup->rx_head = 0;
- for (i=0; i<NUM_IR_DESC; i++) {
+ for (i = 0; i < NUM_IR_DESC; i++)
aup->rx_ring[i]->flags = AU_OWN;
- }
- writel(control, IR_INTERFACE_CONFIG);
- au_sync_delay(10);
+ irda_write(aup, IR_ENABLE, enable);
+ msleep(20);
- writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE); /* disable PHY */
- au_sync_delay(1);
+ /* disable PHY */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+ irda_write(aup, IR_STATUS, irda_read(aup, IR_STATUS) & ~IR_PHYEN);
+ msleep(20);
- writel(MAX_BUF_SIZE, IR_MAX_PKT_LEN);
+ irda_write(aup, IR_MAX_PKT_LEN, MAX_BUF_SIZE);
ring_address = (u32)virt_to_phys((void *)aup->rx_ring[0]);
- writel(ring_address >> 26, IR_RING_BASE_ADDR_H);
- writel((ring_address >> 10) & 0xffff, IR_RING_BASE_ADDR_L);
+ irda_write(aup, IR_RING_BASE_ADDR_H, ring_address >> 26);
+ irda_write(aup, IR_RING_BASE_ADDR_L, (ring_address >> 10) & 0xffff);
- writel(RING_SIZE_64<<8 | RING_SIZE_64<<12, IR_RING_SIZE);
+ irda_write(aup, IR_RING_SIZE,
+ (RING_SIZE_64 << 8) | (RING_SIZE_64 << 12));
- writel(1<<2 | IR_ONE_PIN, IR_CONFIG_2); /* 48MHz */
- writel(0, IR_RING_ADDR_CMPR);
+ irda_write(aup, IR_CONFIG_2, IR_PHYCLK_48MHZ | IR_ONE_PIN);
+ irda_write(aup, IR_RING_ADDR_CMPR, 0);
au1k_irda_set_speed(dev, 9600);
return 0;
@@ -340,25 +555,28 @@ static int au1k_init(struct net_device *dev)
static int au1k_irda_start(struct net_device *dev)
{
- int retval;
- char hwname[32];
struct au1k_private *aup = netdev_priv(dev);
+ char hwname[32];
+ int retval;
- if ((retval = au1k_init(dev))) {
+ retval = au1k_init(dev);
+ if (retval) {
printk(KERN_ERR "%s: error in au1k_init\n", dev->name);
return retval;
}
- if ((retval = request_irq(AU1000_IRDA_TX_INT, au1k_irda_interrupt,
- 0, dev->name, dev))) {
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ retval = request_irq(aup->irq_tx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
- if ((retval = request_irq(AU1000_IRDA_RX_INT, au1k_irda_interrupt,
- 0, dev->name, dev))) {
- free_irq(AU1000_IRDA_TX_INT, dev);
- printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ retval = request_irq(aup->irq_rx, &au1k_irda_interrupt, 0,
+ dev->name, dev);
+ if (retval) {
+ free_irq(aup->irq_tx, dev);
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
dev->name, dev->irq);
return retval;
}
@@ -368,9 +586,13 @@ static int au1k_irda_start(struct net_device *dev)
aup->irlap = irlap_open(dev, &aup->qos, hwname);
netif_start_queue(dev);
- writel(read_ir_reg(IR_CONFIG_2) | 1<<8, IR_CONFIG_2); /* int enable */
+ /* int enable */
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) | IR_IEN);
+
+ /* power up */
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_SIR);
- aup->timer.expires = RUN_AT((3*HZ));
+ aup->timer.expires = RUN_AT((3 * HZ));
aup->timer.data = (unsigned long)dev;
return 0;
}
@@ -379,11 +601,12 @@ static int au1k_irda_stop(struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
+ au1k_irda_plat_set_phy_mode(aup, AU1000_IRDA_PHY_MODE_OFF);
+
/* disable interrupts */
- writel(read_ir_reg(IR_CONFIG_2) & ~(1<<8), IR_CONFIG_2);
- writel(0, IR_CONFIG_1);
- writel(0, IR_INTERFACE_CONFIG); /* disable clock */
- au_sync();
+ irda_write(aup, IR_CONFIG_2, irda_read(aup, IR_CONFIG_2) & ~IR_IEN);
+ irda_write(aup, IR_CONFIG_1, 0);
+ irda_write(aup, IR_ENABLE, 0); /* disable clock */
if (aup->irlap) {
irlap_close(aup->irlap);
@@ -394,83 +617,12 @@ static int au1k_irda_stop(struct net_device *dev)
del_timer(&aup->timer);
/* disable the interrupt */
- free_irq(AU1000_IRDA_TX_INT, dev);
- free_irq(AU1000_IRDA_RX_INT, dev);
- return 0;
-}
-
-static void __exit au1k_irda_exit(void)
-{
- struct net_device *dev = ir_devs[0];
- struct au1k_private *aup = netdev_priv(dev);
+ free_irq(aup->irq_tx, dev);
+ free_irq(aup->irq_rx, dev);
- unregister_netdev(dev);
-
- dma_free((void *)aup->db[0].vaddr,
- MAX_BUF_SIZE * 2*NUM_IR_DESC);
- dma_free((void *)aup->rx_ring[0],
- 2 * MAX_NUM_IR_DESC*(sizeof(ring_dest_t)));
- kfree(aup->rx_buff.head);
- free_netdev(dev);
-}
-
-
-static inline void
-update_tx_stats(struct net_device *dev, u32 status, u32 pkt_len)
-{
- struct au1k_private *aup = netdev_priv(dev);
- struct net_device_stats *ps = &aup->stats;
-
- ps->tx_packets++;
- ps->tx_bytes += pkt_len;
-
- if (status & IR_TX_ERROR) {
- ps->tx_errors++;
- ps->tx_aborted_errors++;
- }
-}
-
-
-static void au1k_tx_ack(struct net_device *dev)
-{
- struct au1k_private *aup = netdev_priv(dev);
- volatile ring_dest_t *ptxd;
-
- ptxd = aup->tx_ring[aup->tx_tail];
- while (!(ptxd->flags & AU_OWN) && (aup->tx_tail != aup->tx_head)) {
- update_tx_stats(dev, ptxd->flags,
- ptxd->count_1<<8 | ptxd->count_0);
- ptxd->count_0 = 0;
- ptxd->count_1 = 0;
- au_sync();
-
- aup->tx_tail = (aup->tx_tail + 1) & (NUM_IR_DESC - 1);
- ptxd = aup->tx_ring[aup->tx_tail];
-
- if (aup->tx_full) {
- aup->tx_full = 0;
- netif_wake_queue(dev);
- }
- }
-
- if (aup->tx_tail == aup->tx_head) {
- if (aup->newspeed) {
- au1k_irda_set_speed(dev, aup->newspeed);
- aup->newspeed = 0;
- }
- else {
- writel(read_ir_reg(IR_CONFIG_1) & ~IR_TX_ENABLE,
- IR_CONFIG_1);
- au_sync();
- writel(read_ir_reg(IR_CONFIG_1) | IR_RX_ENABLE,
- IR_CONFIG_1);
- writel(0, IR_RING_PROMPT);
- au_sync();
- }
- }
+ return 0;
}
-
/*
* Au1000 transmit routine.
*/
@@ -478,15 +630,12 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct au1k_private *aup = netdev_priv(dev);
int speed = irda_get_next_speed(skb);
- volatile ring_dest_t *ptxd;
- u32 len;
-
- u32 flags;
- db_dest_t *pDB;
+ volatile struct ring_dest *ptxd;
+ struct db_dest *pDB;
+ u32 len, flags;
- if (speed != aup->speed && speed != -1) {
+ if (speed != aup->speed && speed != -1)
aup->newspeed = speed;
- }
if ((skb->len == 0) && (aup->newspeed)) {
if (aup->tx_tail == aup->tx_head) {
@@ -504,138 +653,47 @@ static int au1k_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
- return NETDEV_TX_BUSY;
- }
- else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
+ return 1;
+ } else if (((aup->tx_head + 1) & (NUM_IR_DESC - 1)) == aup->tx_tail) {
printk(KERN_DEBUG "%s: tx_full\n", dev->name);
netif_stop_queue(dev);
aup->tx_full = 1;
- return NETDEV_TX_BUSY;
+ return 1;
}
pDB = aup->tx_db_inuse[aup->tx_head];
#if 0
- if (read_ir_reg(IR_RX_BYTE_CNT) != 0) {
- printk("tx warning: rx byte cnt %x\n",
- read_ir_reg(IR_RX_BYTE_CNT));
+ if (irda_read(aup, IR_RX_BYTE_CNT) != 0) {
+ printk(KERN_DEBUG "tx warning: rx byte cnt %x\n",
+ irda_read(aup, IR_RX_BYTE_CNT));
}
#endif
-
+
if (aup->speed == 4000000) {
/* FIR */
- skb_copy_from_linear_data(skb, pDB->vaddr, skb->len);
+ skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
ptxd->count_0 = skb->len & 0xff;
ptxd->count_1 = (skb->len >> 8) & 0xff;
-
- }
- else {
+ } else {
/* SIR */
len = async_wrap_skb(skb, (u8 *)pDB->vaddr, MAX_BUF_SIZE);
ptxd->count_0 = len & 0xff;
ptxd->count_1 = (len >> 8) & 0xff;
ptxd->flags |= IR_DIS_CRC;
- au_writel(au_readl(0xae00000c) & ~(1<<13), 0xae00000c);
}
ptxd->flags |= AU_OWN;
- au_sync();
+ wmb();
- writel(read_ir_reg(IR_CONFIG_1) | IR_TX_ENABLE, IR_CONFIG_1);
- writel(0, IR_RING_PROMPT);
- au_sync();
+ irda_write(aup, IR_CONFIG_1,
+ irda_read(aup, IR_CONFIG_1) | IR_TX_ENABLE);
+ irda_write(aup, IR_RING_PROMPT, 0);
dev_kfree_skb(skb);
aup->tx_head = (aup->tx_head + 1) & (NUM_IR_DESC - 1);
return NETDEV_TX_OK;
}
-
-static inline void
-update_rx_stats(struct net_device *dev, u32 status, u32 count)
-{
- struct au1k_private *aup = netdev_priv(dev);
- struct net_device_stats *ps = &aup->stats;
-
- ps->rx_packets++;
-
- if (status & IR_RX_ERROR) {
- ps->rx_errors++;
- if (status & (IR_PHY_ERROR|IR_FIFO_OVER))
- ps->rx_missed_errors++;
- if (status & IR_MAX_LEN)
- ps->rx_length_errors++;
- if (status & IR_CRC_ERROR)
- ps->rx_crc_errors++;
- }
- else
- ps->rx_bytes += count;
-}
-
-/*
- * Au1000 receive routine.
- */
-static int au1k_irda_rx(struct net_device *dev)
-{
- struct au1k_private *aup = netdev_priv(dev);
- struct sk_buff *skb;
- volatile ring_dest_t *prxd;
- u32 flags, count;
- db_dest_t *pDB;
-
- prxd = aup->rx_ring[aup->rx_head];
- flags = prxd->flags;
-
- while (!(flags & AU_OWN)) {
- pDB = aup->rx_db_inuse[aup->rx_head];
- count = prxd->count_1<<8 | prxd->count_0;
- if (!(flags & IR_RX_ERROR)) {
- /* good frame */
- update_rx_stats(dev, flags, count);
- skb=alloc_skb(count+1,GFP_ATOMIC);
- if (skb == NULL) {
- aup->netdev->stats.rx_dropped++;
- continue;
- }
- skb_reserve(skb, 1);
- if (aup->speed == 4000000)
- skb_put(skb, count);
- else
- skb_put(skb, count-2);
- skb_copy_to_linear_data(skb, pDB->vaddr, count - 2);
- skb->dev = dev;
- skb_reset_mac_header(skb);
- skb->protocol = htons(ETH_P_IRDA);
- netif_rx(skb);
- prxd->count_0 = 0;
- prxd->count_1 = 0;
- }
- prxd->flags |= AU_OWN;
- aup->rx_head = (aup->rx_head + 1) & (NUM_IR_DESC - 1);
- writel(0, IR_RING_PROMPT);
- au_sync();
-
- /* next descriptor */
- prxd = aup->rx_ring[aup->rx_head];
- flags = prxd->flags;
-
- }
- return 0;
-}
-
-
-static irqreturn_t au1k_irda_interrupt(int dummy, void *dev_id)
-{
- struct net_device *dev = dev_id;
-
- writel(0, IR_INT_CLEAR); /* ack irda interrupts */
-
- au1k_irda_rx(dev);
- au1k_tx_ack(dev);
-
- return IRQ_HANDLED;
-}
-
-
/*
* The Tx ring has been full longer than the watchdog timeout
* value. The transmitter must be hung?
@@ -653,142 +711,7 @@ static void au1k_tx_timeout(struct net_device *dev)
netif_wake_queue(dev);
}
-
-/*
- * Set the IrDA communications speed.
- */
-static int
-au1k_irda_set_speed(struct net_device *dev, int speed)
-{
- unsigned long flags;
- struct au1k_private *aup = netdev_priv(dev);
- u32 control;
- int ret = 0, timeout = 10, i;
- volatile ring_dest_t *ptxd;
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- unsigned long irda_resets;
-#endif
-
- if (speed == aup->speed)
- return ret;
-
- spin_lock_irqsave(&ir_lock, flags);
-
- /* disable PHY first */
- writel(read_ir_reg(IR_ENABLE) & ~0x8000, IR_ENABLE);
-
- /* disable RX/TX */
- writel(read_ir_reg(IR_CONFIG_1) & ~(IR_RX_ENABLE|IR_TX_ENABLE),
- IR_CONFIG_1);
- au_sync_delay(1);
- while (read_ir_reg(IR_ENABLE) & (IR_RX_STATUS | IR_TX_STATUS)) {
- mdelay(1);
- if (!timeout--) {
- printk(KERN_ERR "%s: rx/tx disable timeout\n",
- dev->name);
- break;
- }
- }
-
- /* disable DMA */
- writel(read_ir_reg(IR_CONFIG_1) & ~IR_DMA_ENABLE, IR_CONFIG_1);
- au_sync_delay(1);
-
- /*
- * After we disable tx/rx. the index pointers
- * go back to zero.
- */
- aup->tx_head = aup->tx_tail = aup->rx_head = 0;
- for (i=0; i<NUM_IR_DESC; i++) {
- ptxd = aup->tx_ring[i];
- ptxd->flags = 0;
- ptxd->count_0 = 0;
- ptxd->count_1 = 0;
- }
-
- for (i=0; i<NUM_IR_DESC; i++) {
- ptxd = aup->rx_ring[i];
- ptxd->count_0 = 0;
- ptxd->count_1 = 0;
- ptxd->flags = AU_OWN;
- }
-
- if (speed == 4000000) {
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_FIR_SEL);
-#else /* Pb1000 and Pb1100 */
- writel(1<<13, CPLD_AUX1);
-#endif
- }
- else {
-#if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100)
- bcsr_mod(BCSR_RESETS, BCSR_RESETS_FIR_SEL, 0);
-#else /* Pb1000 and Pb1100 */
- writel(readl(CPLD_AUX1) & ~(1<<13), CPLD_AUX1);
-#endif
- }
-
- switch (speed) {
- case 9600:
- writel(11<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 19200:
- writel(5<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 38400:
- writel(2<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 57600:
- writel(1<<10 | 12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 115200:
- writel(12<<5, IR_WRITE_PHY_CONFIG);
- writel(IR_SIR_MODE, IR_CONFIG_1);
- break;
- case 4000000:
- writel(0xF, IR_WRITE_PHY_CONFIG);
- writel(IR_FIR|IR_DMA_ENABLE|IR_RX_ENABLE, IR_CONFIG_1);
- break;
- default:
- printk(KERN_ERR "%s unsupported speed %x\n", dev->name, speed);
- ret = -EINVAL;
- break;
- }
-
- aup->speed = speed;
- writel(read_ir_reg(IR_ENABLE) | 0x8000, IR_ENABLE);
- au_sync();
-
- control = read_ir_reg(IR_ENABLE);
- writel(0, IR_RING_PROMPT);
- au_sync();
-
- if (control & (1<<14)) {
- printk(KERN_ERR "%s: configuration error\n", dev->name);
- }
- else {
- if (control & (1<<11))
- printk(KERN_DEBUG "%s Valid SIR config\n", dev->name);
- if (control & (1<<12))
- printk(KERN_DEBUG "%s Valid MIR config\n", dev->name);
- if (control & (1<<13))
- printk(KERN_DEBUG "%s Valid FIR config\n", dev->name);
- if (control & (1<<10))
- printk(KERN_DEBUG "%s TX enabled\n", dev->name);
- if (control & (1<<9))
- printk(KERN_DEBUG "%s RX enabled\n", dev->name);
- }
-
- spin_unlock_irqrestore(&ir_lock, flags);
- return ret;
-}
-
-static int
-au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
+static int au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
{
struct if_irda_req *rq = (struct if_irda_req *)ifreq;
struct au1k_private *aup = netdev_priv(dev);
@@ -829,8 +752,218 @@ au1k_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
return ret;
}
+static const struct net_device_ops au1k_irda_netdev_ops = {
+ .ndo_open = au1k_irda_start,
+ .ndo_stop = au1k_irda_stop,
+ .ndo_start_xmit = au1k_irda_hard_xmit,
+ .ndo_tx_timeout = au1k_tx_timeout,
+ .ndo_do_ioctl = au1k_irda_ioctl,
+};
+
+static int __devinit au1k_irda_net_init(struct net_device *dev)
+{
+ struct au1k_private *aup = netdev_priv(dev);
+ struct db_dest *pDB, *pDBfree;
+ int i, err, retval = 0;
+ dma_addr_t temp;
+
+ err = au1k_irda_init_iobuf(&aup->rx_buff, 14384);
+ if (err)
+ goto out1;
+
+ dev->netdev_ops = &au1k_irda_netdev_ops;
+
+ irda_init_max_qos_capabilies(&aup->qos);
+
+ /* The only value we must override it the baudrate */
+ aup->qos.baud_rate.bits = IR_9600 | IR_19200 | IR_38400 |
+ IR_57600 | IR_115200 | IR_576000 | (IR_4000000 << 8);
+
+ aup->qos.min_turn_time.bits = qos_mtt_bits;
+ irda_qos_bits_to_value(&aup->qos);
+
+ retval = -ENOMEM;
+
+ /* Tx ring follows rx ring + 512 bytes */
+ /* we need a 1k aligned buffer */
+ aup->rx_ring[0] = (struct ring_dest *)
+ dma_alloc(2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)),
+ &temp);
+ if (!aup->rx_ring[0])
+ goto out2;
+
+ /* allocate the data buffers */
+ aup->db[0].vaddr =
+ (void *)dma_alloc(MAX_BUF_SIZE * 2 * NUM_IR_DESC, &temp);
+ if (!aup->db[0].vaddr)
+ goto out3;
+
+ setup_hw_rings(aup, (u32)aup->rx_ring[0], (u32)aup->rx_ring[0] + 512);
+
+ pDBfree = NULL;
+ pDB = aup->db;
+ for (i = 0; i < (2 * NUM_IR_DESC); i++) {
+ pDB->pnext = pDBfree;
+ pDBfree = pDB;
+ pDB->vaddr =
+ (u32 *)((unsigned)aup->db[0].vaddr + (MAX_BUF_SIZE * i));
+ pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr);
+ pDB++;
+ }
+ aup->pDBfree = pDBfree;
+
+ /* attach a data buffer to each descriptor */
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->rx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->rx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->rx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->rx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->rx_db_inuse[i] = pDB;
+ }
+ for (i = 0; i < NUM_IR_DESC; i++) {
+ pDB = GetFreeDB(aup);
+ if (!pDB)
+ goto out3;
+ aup->tx_ring[i]->addr_0 = (u8)(pDB->dma_addr & 0xff);
+ aup->tx_ring[i]->addr_1 = (u8)((pDB->dma_addr >> 8) & 0xff);
+ aup->tx_ring[i]->addr_2 = (u8)((pDB->dma_addr >> 16) & 0xff);
+ aup->tx_ring[i]->addr_3 = (u8)((pDB->dma_addr >> 24) & 0xff);
+ aup->tx_ring[i]->count_0 = 0;
+ aup->tx_ring[i]->count_1 = 0;
+ aup->tx_ring[i]->flags = 0;
+ aup->tx_db_inuse[i] = pDB;
+ }
+
+ return 0;
+
+out3:
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+out2:
+ kfree(aup->rx_buff.head);
+out1:
+ printk(KERN_ERR "au1k_irda_net_init() failed. Returns %d\n", retval);
+ return retval;
+}
+
+static int __devinit au1k_irda_probe(struct platform_device *pdev)
+{
+ struct au1k_private *aup;
+ struct net_device *dev;
+ struct resource *r;
+ int err;
+
+ dev = alloc_irdadev(sizeof(struct au1k_private));
+ if (!dev)
+ return -ENOMEM;
+
+ aup = netdev_priv(dev);
+
+ aup->platdata = pdev->dev.platform_data;
+
+ err = -EINVAL;
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r)
+ goto out;
+
+ aup->irq_tx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!r)
+ goto out;
+
+ aup->irq_rx = r->start;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ goto out;
+
+ err = -EBUSY;
+ aup->ioarea = request_mem_region(r->start, r->end - r->start + 1,
+ pdev->name);
+ if (!aup->ioarea)
+ goto out;
+
+ aup->iobase = ioremap_nocache(r->start, r->end - r->start + 1);
+ if (!aup->iobase)
+ goto out2;
+
+ dev->irq = aup->irq_rx;
+
+ err = au1k_irda_net_init(dev);
+ if (err)
+ goto out3;
+ err = register_netdev(dev);
+ if (err)
+ goto out4;
+
+ platform_set_drvdata(pdev, dev);
+
+ printk(KERN_INFO "IrDA: Registered device %s\n", dev->name);
+ return 0;
+
+out4:
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+out3:
+ iounmap(aup->iobase);
+out2:
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+out:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit au1k_irda_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct au1k_private *aup = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ dma_free((void *)aup->db[0].vaddr,
+ MAX_BUF_SIZE * 2 * NUM_IR_DESC);
+ dma_free((void *)aup->rx_ring[0],
+ 2 * MAX_NUM_IR_DESC * (sizeof(struct ring_dest)));
+ kfree(aup->rx_buff.head);
+
+ iounmap(aup->iobase);
+ release_resource(aup->ioarea);
+ kfree(aup->ioarea);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver au1k_irda_driver = {
+ .driver = {
+ .name = "au1000-irda",
+ .owner = THIS_MODULE,
+ },
+ .probe = au1k_irda_probe,
+ .remove = __devexit_p(au1k_irda_remove),
+};
+
+static int __init au1k_irda_load(void)
+{
+ return platform_driver_register(&au1k_irda_driver);
+}
+
+static void __exit au1k_irda_unload(void)
+{
+ return platform_driver_unregister(&au1k_irda_driver);
+}
+
MODULE_AUTHOR("Pete Popov <ppopov@mvista.com>");
MODULE_DESCRIPTION("Au1000 IrDA Device Driver");
-module_init(au1k_irda_init);
-module_exit(au1k_irda_exit);
+module_init(au1k_irda_load);
+module_exit(au1k_irda_unload);
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index 9d4ce1a..a561ae4 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -806,18 +806,7 @@ static struct platform_driver bfin_ir_driver = {
},
};
-static int __init bfin_sir_init(void)
-{
- return platform_driver_register(&bfin_ir_driver);
-}
-
-static void __exit bfin_sir_exit(void)
-{
- platform_driver_unregister(&bfin_ir_driver);
-}
-
-module_init(bfin_sir_init);
-module_exit(bfin_sir_exit);
+module_platform_driver(bfin_ir_driver);
module_param(max_rate, int, 0);
MODULE_PARM_DESC(max_rate, "Maximum baud rate (115200, 57600, 38400, 19200, 9600)");
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b45b2cc..64f403d 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -197,7 +197,7 @@ static char *driver_name = DRIVER_NAME;
static int max_baud = 4000000;
#ifdef USE_PROBE
-static int do_probe = 0;
+static bool do_probe = false;
#endif
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index d9267cb..72f687b 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1914,41 +1914,8 @@ static struct usb_driver irda_driver = {
#endif
};
-/************************* MODULE CALLBACKS *************************/
-/*
- * Deal with module insertion/removal
- * Mostly tell USB about our existence
- */
-
-/*------------------------------------------------------------------*/
-/*
- * Module insertion
- */
-static int __init usb_irda_init(void)
-{
- int ret;
-
- ret = usb_register(&irda_driver);
- if (ret < 0)
- return ret;
-
- IRDA_MESSAGE("USB IrDA support registered\n");
- return 0;
-}
-module_init(usb_irda_init);
+module_usb_driver(irda_driver);
-/*------------------------------------------------------------------*/
-/*
- * Module removal
- */
-static void __exit usb_irda_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-module_exit(usb_irda_cleanup);
-
-/*------------------------------------------------------------------*/
/*
* Module parameters
*/
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index cb90d64..79aebee 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -621,24 +621,7 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init kingsun_init(void)
-{
- return usb_register(&irda_driver);
-}
-module_init(kingsun_init);
-
-/*
- * Module removal
- */
-static void __exit kingsun_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-module_exit(kingsun_cleanup);
+module_usb_driver(irda_driver);
MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun/DonShine");
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index 1046014..abe689d 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -901,26 +901,7 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init ks959_init(void)
-{
- return usb_register(&irda_driver);
-}
-
-module_init(ks959_init);
-
-/*
- * Module removal
- */
-static void __exit ks959_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-
-module_exit(ks959_cleanup);
+module_usb_driver(irda_driver);
MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun KS-959");
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 9cc142f..f8c0108 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -796,26 +796,7 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init ksdazzle_init(void)
-{
- return usb_register(&irda_driver);
-}
-
-module_init(ksdazzle_init);
-
-/*
- * Module removal
- */
-static void __exit ksdazzle_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-
-module_exit(ksdazzle_cleanup);
+module_usb_driver(irda_driver);
MODULE_AUTHOR("Alex Villacís Lasso <a_villacis@palosanto.com>");
MODULE_DESCRIPTION("IrDA-USB Dongle Driver for KingSun Dazzle");
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index be52bfe..1a00b59 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -968,25 +968,4 @@ static void mcs_disconnect(struct usb_interface *intf)
IRDA_DEBUG(0, "MCS7780 now disconnected.\n");
}
-/* Module insertion */
-static int __init mcs_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&mcs_driver);
- if (result)
- IRDA_ERROR("usb_register failed. Error number %d\n", result);
-
- return result;
-}
-module_init(mcs_init);
-
-/* Module removal */
-static void __exit mcs_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&mcs_driver);
-}
-module_exit(mcs_exit);
-
+module_usb_driver(mcs_driver);
diff --git a/drivers/net/irda/nsc-ircc.c b/drivers/net/irda/nsc-ircc.c
index b56636d..2a4f2f1 100644
--- a/drivers/net/irda/nsc-ircc.c
+++ b/drivers/net/irda/nsc-ircc.c
@@ -1664,7 +1664,7 @@ static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self)
switch_bank(iobase, BANK0);
outb(inb(iobase+MCR) & ~MCR_DMA_EN, iobase+MCR);
- /* Check for underrrun! */
+ /* Check for underrun! */
if (inb(iobase+ASCR) & ASCR_TXUR) {
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index d0851df..81d5275 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -966,18 +966,7 @@ static struct platform_driver pxa_ir_driver = {
.resume = pxa_irda_resume,
};
-static int __init pxa_irda_init(void)
-{
- return platform_driver_register(&pxa_ir_driver);
-}
-
-static void __exit pxa_irda_exit(void)
-{
- platform_driver_unregister(&pxa_ir_driver);
-}
-
-module_init(pxa_irda_init);
-module_exit(pxa_irda_exit);
+module_platform_driver(pxa_ir_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:pxa2xx-ir");
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
index d275e27..725d6b3 100644
--- a/drivers/net/irda/sh_irda.c
+++ b/drivers/net/irda/sh_irda.c
@@ -873,18 +873,7 @@ static struct platform_driver sh_irda_driver = {
},
};
-static int __init sh_irda_init(void)
-{
- return platform_driver_register(&sh_irda_driver);
-}
-
-static void __exit sh_irda_exit(void)
-{
- platform_driver_unregister(&sh_irda_driver);
-}
-
-module_init(sh_irda_init);
-module_exit(sh_irda_exit);
+module_platform_driver(sh_irda_driver);
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index ed7d7d6..e6661b5 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -808,18 +808,7 @@ static struct platform_driver sh_sir_driver = {
},
};
-static int __init sh_sir_init(void)
-{
- return platform_driver_register(&sh_sir_driver);
-}
-
-static void __exit sh_sir_exit(void)
-{
- platform_driver_unregister(&sh_sir_driver);
-}
-
-module_init(sh_sir_init);
-module_exit(sh_sir_exit);
+module_platform_driver(sh_sir_driver);
MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
MODULE_DESCRIPTION("SuperH IrDA driver");
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8b1c348..6c95d40 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -79,7 +79,7 @@ MODULE_AUTHOR("Daniele Peri <peri@csai.unipa.it>");
MODULE_DESCRIPTION("SMC IrCC SIR/FIR controller driver");
MODULE_LICENSE("GPL");
-static int smsc_nopnp = 1;
+static bool smsc_nopnp = true;
module_param_named(nopnp, smsc_nopnp, bool, 0);
MODULE_PARM_DESC(nopnp, "Do not use PNP to detect controller settings, defaults to true");
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c
index 41c96b3..e6e59a0 100644
--- a/drivers/net/irda/stir4200.c
+++ b/drivers/net/irda/stir4200.c
@@ -750,7 +750,7 @@ static int stir_transmit_thread(void *arg)
write_reg(stir, REG_CTRL1, CTRL1_TXPWD|CTRL1_RXPWD);
- refrigerator();
+ try_to_freeze();
if (change_speed(stir, stir->speed))
break;
@@ -1133,21 +1133,4 @@ static struct usb_driver irda_driver = {
#endif
};
-/*
- * Module insertion
- */
-static int __init stir_init(void)
-{
- return usb_register(&irda_driver);
-}
-module_init(stir_init);
-
-/*
- * Module removal
- */
-static void __exit stir_cleanup(void)
-{
- /* Deregister the driver and remove all pending instances */
- usb_deregister(&irda_driver);
-}
-module_exit(stir_cleanup);
+module_usb_driver(irda_driver);
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index 6d64790..2d456dd 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -942,14 +942,14 @@ static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self)
iobase = self->io.fir_base;
/* Disable DMA */
// DisableDmaChannel(self->io.dma);
- /* Check for underrrun! */
+ /* Check for underrun! */
/* Clear bit, by writing 1 into it */
Tx_status = GetTXStatus(iobase);
if (Tx_status & 0x08) {
self->netdev->stats.tx_errors++;
self->netdev->stats.tx_fifo_errors++;
hwreset(self);
-// how to clear underrrun ?
+ /* how to clear underrun? */
} else {
self->netdev->stats.tx_packets++;
ResetChip(iobase, 3);
diff --git a/drivers/net/irda/w83977af_ir.c b/drivers/net/irda/w83977af_ir.c
index c436660..7d43506 100644
--- a/drivers/net/irda/w83977af_ir.c
+++ b/drivers/net/irda/w83977af_ir.c
@@ -677,7 +677,7 @@ static void w83977af_dma_xmit_complete(struct w83977af_ir *self)
switch_bank(iobase, SET0);
outb(inb(iobase+HCR) & ~HCR_EN_DMA, iobase+HCR);
- /* Check for underrrun! */
+ /* Check for underrun! */
if (inb(iobase+AUDR) & AUDR_UNDR) {
IRDA_DEBUG(0, "%s(), Transmit underrun!\n", __func__ );
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 4ce9e5f..b71998d 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -169,7 +169,7 @@ static void loopback_setup(struct net_device *dev)
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_ALL_TSO
| NETIF_F_UFO
- | NETIF_F_NO_CSUM
+ | NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 7413497..9ea9921 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -26,6 +26,7 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
#include <linux/if_link.h>
#include <linux/if_macvlan.h>
#include <net/rtnetlink.h>
@@ -172,6 +173,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
if (!skb)
return RX_HANDLER_CONSUMED;
+ eth = eth_hdr(skb);
src = macvlan_hash_lookup(port, eth->h_source);
if (!src)
/* frame comes from an external address */
@@ -520,26 +522,23 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
return stats;
}
-static void macvlan_vlan_rx_add_vid(struct net_device *dev,
+static int macvlan_vlan_rx_add_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_add_vid)
- ops->ndo_vlan_rx_add_vid(lowerdev, vid);
+ return vlan_vid_add(lowerdev, vid);
}
-static void macvlan_vlan_rx_kill_vid(struct net_device *dev,
+static int macvlan_vlan_rx_kill_vid(struct net_device *dev,
unsigned short vid)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct net_device *lowerdev = vlan->lowerdev;
- const struct net_device_ops *ops = lowerdev->netdev_ops;
- if (ops->ndo_vlan_rx_kill_vid)
- ops->ndo_vlan_rx_kill_vid(lowerdev, vid);
+ vlan_vid_del(lowerdev, vid);
+ return 0;
}
static void macvlan_ethtool_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 1b7082d..58dc117 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -145,8 +145,8 @@ static void macvtap_put_queue(struct macvtap_queue *q)
if (vlan) {
int index = get_slot(vlan, q);
- rcu_assign_pointer(vlan->taps[index], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[index], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
sock_put(&q->sk);
--vlan->numvtaps;
}
@@ -175,6 +175,14 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
if (!numvtaps)
goto out;
+ /* Check if we can use flow to select a queue */
+ rxq = skb_get_rxhash(skb);
+ if (rxq) {
+ tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
+ if (tap)
+ goto out;
+ }
+
if (likely(skb_rx_queue_recorded(skb))) {
rxq = skb_get_rx_queue(skb);
@@ -186,14 +194,6 @@ static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
goto out;
}
- /* Check if we can use flow to select a queue */
- rxq = skb_get_rxhash(skb);
- if (rxq) {
- tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
- if (tap)
- goto out;
- }
-
/* Everything failed - find first available queue */
for (rxq = 0; rxq < MAX_MACVTAP_QUEUES; rxq++) {
tap = rcu_dereference(vlan->taps[rxq]);
@@ -223,8 +223,8 @@ static void macvtap_del_queues(struct net_device *dev)
lockdep_is_held(&macvtap_lock));
if (q) {
qlist[j++] = q;
- rcu_assign_pointer(vlan->taps[i], NULL);
- rcu_assign_pointer(q->vlan, NULL);
+ RCU_INIT_POINTER(vlan->taps[i], NULL);
+ RCU_INIT_POINTER(q->vlan, NULL);
vlan->numvtaps--;
}
}
diff --git a/drivers/net/mii.c b/drivers/net/mii.c
index c62e781..c70c233 100644
--- a/drivers/net/mii.c
+++ b/drivers/net/mii.c
@@ -35,26 +35,11 @@
static u32 mii_get_an(struct mii_if_info *mii, u16 addr)
{
- u32 result = 0;
int advert;
advert = mii->mdio_read(mii->dev, mii->phy_id, addr);
- if (advert & LPA_LPACK)
- result |= ADVERTISED_Autoneg;
- if (advert & ADVERTISE_10HALF)
- result |= ADVERTISED_10baseT_Half;
- if (advert & ADVERTISE_10FULL)
- result |= ADVERTISED_10baseT_Full;
- if (advert & ADVERTISE_100HALF)
- result |= ADVERTISED_100baseT_Half;
- if (advert & ADVERTISE_100FULL)
- result |= ADVERTISED_100baseT_Full;
- if (advert & ADVERTISE_PAUSE_CAP)
- result |= ADVERTISED_Pause;
- if (advert & ADVERTISE_PAUSE_ASYM)
- result |= ADVERTISED_Asym_Pause;
-
- return result;
+
+ return mii_lpa_to_ethtool_lpa_t(advert);
}
/**
@@ -104,19 +89,14 @@ int mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
ecmd->autoneg = AUTONEG_ENABLE;
ecmd->advertising |= mii_get_an(mii, MII_ADVERTISE);
- if (ctrl1000 & ADVERTISE_1000HALF)
- ecmd->advertising |= ADVERTISED_1000baseT_Half;
- if (ctrl1000 & ADVERTISE_1000FULL)
- ecmd->advertising |= ADVERTISED_1000baseT_Full;
+ if (mii->supports_gmii)
+ ecmd->advertising |=
+ mii_ctrl1000_to_ethtool_adv_t(ctrl1000);
if (bmsr & BMSR_ANEGCOMPLETE) {
ecmd->lp_advertising = mii_get_an(mii, MII_LPA);
- if (stat1000 & LPA_1000HALF)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Half;
- if (stat1000 & LPA_1000FULL)
- ecmd->lp_advertising |=
- ADVERTISED_1000baseT_Full;
+ ecmd->lp_advertising |=
+ mii_stat1000_to_ethtool_lpa_t(stat1000);
} else {
ecmd->lp_advertising = 0;
}
@@ -204,20 +184,11 @@ int mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd)
advert2 = mii->mdio_read(dev, mii->phy_id, MII_CTRL1000);
tmp2 = advert2 & ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
}
- if (ecmd->advertising & ADVERTISED_10baseT_Half)
- tmp |= ADVERTISE_10HALF;
- if (ecmd->advertising & ADVERTISED_10baseT_Full)
- tmp |= ADVERTISE_10FULL;
- if (ecmd->advertising & ADVERTISED_100baseT_Half)
- tmp |= ADVERTISE_100HALF;
- if (ecmd->advertising & ADVERTISED_100baseT_Full)
- tmp |= ADVERTISE_100FULL;
- if (mii->supports_gmii) {
- if (ecmd->advertising & ADVERTISED_1000baseT_Half)
- tmp2 |= ADVERTISE_1000HALF;
- if (ecmd->advertising & ADVERTISED_1000baseT_Full)
- tmp2 |= ADVERTISE_1000FULL;
- }
+ tmp |= ethtool_adv_to_mii_adv_t(ecmd->advertising);
+
+ if (mii->supports_gmii)
+ tmp2 |=
+ ethtool_adv_to_mii_ctrl1000_t(ecmd->advertising);
if (advert != tmp) {
mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp);
mii->advertising = tmp;
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index a702443..fbdcdf8 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -131,3 +131,7 @@ config MDIO_OCTEON
If in doubt, say Y.
endif # PHYLIB
+
+config MICREL_KS8995MA
+ tristate "Micrel KS8995MA 5-ports 10/100 managed Ethernet switch"
+ depends on SPI
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 2333215..e15c83f 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -23,3 +23,4 @@ obj-$(CONFIG_DP83640_PHY) += dp83640.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
+obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 9663e0b..ba3c591 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1159,7 +1159,7 @@ static void rx_timestamp_work(struct work_struct *work)
}
}
spin_unlock_irqrestore(&dp83640->rx_lock, flags);
- netif_rx(skb);
+ netif_rx_ni(skb);
}
/* Clear out expired time stamps. */
diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c
index 1fa4d73..633680d 100644
--- a/drivers/net/phy/fixed.c
+++ b/drivers/net/phy/fixed.c
@@ -220,7 +220,7 @@ static int __init fixed_mdio_bus_init(void)
goto err_mdiobus_reg;
}
- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "0");
+ snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0");
fmb->mii_bus->name = "Fixed MDIO Bus";
fmb->mii_bus->priv = fmb;
fmb->mii_bus->parent = &pdev->dev;
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index c81f136..0856e1b 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -30,16 +30,16 @@
#include <asm/irq.h>
#include <asm/uaccess.h>
-MODULE_DESCRIPTION("ICPlus IP175C/IP101A/IC1001 PHY drivers");
+MODULE_DESCRIPTION("ICPlus IP175C/IP101A/IP101G/IC1001 PHY drivers");
MODULE_AUTHOR("Michael Barkowski");
MODULE_LICENSE("GPL");
-/* IP101A/IP1001 */
-#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
-#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
-#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
-#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
-#define IP101A_APS_ON 2 /* IP101A APS Mode bit */
+/* IP101A/G - IP1001 */
+#define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */
+#define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */
+#define IP1001_PHASE_SEL_MASK 3 /* IP1001 RX/TXPHASE_SEL */
+#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
+#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
static int ip175c_config_init(struct phy_device *phydev)
{
@@ -98,20 +98,24 @@ static int ip175c_config_init(struct phy_device *phydev)
static int ip1xx_reset(struct phy_device *phydev)
{
- int err, bmcr;
+ int bmcr;
/* Software Reset PHY */
bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
bmcr |= BMCR_RESET;
- err = phy_write(phydev, MII_BMCR, bmcr);
- if (err < 0)
- return err;
+ bmcr = phy_write(phydev, MII_BMCR, bmcr);
+ if (bmcr < 0)
+ return bmcr;
do {
bmcr = phy_read(phydev, MII_BMCR);
+ if (bmcr < 0)
+ return bmcr;
} while (bmcr & BMCR_RESET);
- return err;
+ return 0;
}
static int ip1001_config_init(struct phy_device *phydev)
@@ -124,7 +128,10 @@ static int ip1001_config_init(struct phy_device *phydev)
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP1001_SPEC_CTRL_STATUS_2);
+ if (c < 0)
+ return c;
c |= IP1001_APS_ON;
+ c = phy_write(phydev, IP1001_SPEC_CTRL_STATUS_2, c);
if (c < 0)
return c;
@@ -132,14 +139,19 @@ static int ip1001_config_init(struct phy_device *phydev)
/* Additional delay (2ns) used to adjust RX clock phase
* at RGMII interface */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
+ if (c < 0)
+ return c;
+
c |= IP1001_PHASE_SEL_MASK;
c = phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c);
+ if (c < 0)
+ return c;
}
- return c;
+ return 0;
}
-static int ip101a_config_init(struct phy_device *phydev)
+static int ip101a_g_config_init(struct phy_device *phydev)
{
int c;
@@ -149,7 +161,7 @@ static int ip101a_config_init(struct phy_device *phydev)
/* Enable Auto Power Saving mode */
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
- c |= IP101A_APS_ON;
+ c |= IP101A_G_APS_ON;
return c;
}
@@ -191,6 +203,7 @@ static struct phy_driver ip1001_driver = {
.phy_id_mask = 0x0ffffff0,
.features = PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause,
+ .flags = PHY_HAS_INTERRUPT,
.config_init = &ip1001_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
@@ -199,13 +212,14 @@ static struct phy_driver ip1001_driver = {
.driver = { .owner = THIS_MODULE,},
};
-static struct phy_driver ip101a_driver = {
+static struct phy_driver ip101a_g_driver = {
.phy_id = 0x02430c54,
- .name = "ICPlus IP101A",
+ .name = "ICPlus IP101A/G",
.phy_id_mask = 0x0ffffff0,
.features = PHY_BASIC_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause,
- .config_init = &ip101a_config_init,
+ .flags = PHY_HAS_INTERRUPT,
+ .config_init = &ip101a_g_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.suspend = genphy_suspend,
@@ -221,7 +235,7 @@ static int __init icplus_init(void)
if (ret < 0)
return -ENODEV;
- ret = phy_driver_register(&ip101a_driver);
+ ret = phy_driver_register(&ip101a_g_driver);
if (ret < 0)
return -ENODEV;
@@ -231,7 +245,7 @@ static int __init icplus_init(void)
static void __exit icplus_exit(void)
{
phy_driver_unregister(&ip1001_driver);
- phy_driver_unregister(&ip101a_driver);
+ phy_driver_unregister(&ip101a_g_driver);
phy_driver_unregister(&ip175c_driver);
}
@@ -241,6 +255,7 @@ module_exit(icplus_exit);
static struct mdio_device_id __maybe_unused icplus_tbl[] = {
{ 0x02430d80, 0x0ffffff0 },
{ 0x02430d90, 0x0ffffff0 },
+ { 0x02430c54, 0x0ffffff0 },
{ }
};
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 6539189..daec9b0 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -202,6 +202,14 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
return 0;
}
+static int mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ if (ctrl->reset)
+ ctrl->reset(bus);
+ return 0;
+}
+
struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
{
struct mii_bus *bus;
@@ -214,6 +222,7 @@ struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl)
bus->read = mdiobb_read;
bus->write = mdiobb_write;
+ bus->reset = mdiobb_reset;
bus->priv = ctrl;
return bus;
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 2843c90..50e8e5e 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -95,6 +95,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
goto out;
bitbang->ctrl.ops = &mdio_gpio_ops;
+ bitbang->ctrl.reset = pdata->reset;
bitbang->mdc = pdata->mdc;
bitbang->mdio = pdata->mdio;
@@ -115,7 +116,7 @@ static struct mii_bus * __devinit mdio_gpio_bus_init(struct device *dev,
if (!new_bus->irq[i])
new_bus->irq[i] = PHY_POLL;
- snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", bus_id);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
if (gpio_request(bitbang->mdc, "mdc"))
goto out_free_bus;
diff --git a/drivers/net/phy/mdio-octeon.c b/drivers/net/phy/mdio-octeon.c
index bd12ba9..826d961 100644
--- a/drivers/net/phy/mdio-octeon.c
+++ b/drivers/net/phy/mdio-octeon.c
@@ -118,7 +118,8 @@ static int __devinit octeon_mdiobus_probe(struct platform_device *pdev)
bus->mii_bus->priv = bus;
bus->mii_bus->irq = bus->phy_irq;
bus->mii_bus->name = "mdio-octeon";
- snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%x", bus->unit);
+ snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ bus->mii_bus->name, bus->unit);
bus->mii_bus->parent = &pdev->dev;
bus->mii_bus->read = octeon_mdiobus_read;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 6c58da2..8985cc6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -37,22 +37,35 @@
#include <asm/uaccess.h>
/**
- * mdiobus_alloc - allocate a mii_bus structure
+ * mdiobus_alloc_size - allocate a mii_bus structure
+ * @size: extra amount of memory to allocate for private storage.
+ * If non-zero, then bus->priv is points to that memory.
*
* Description: called by a bus driver to allocate an mii_bus
* structure to fill in.
*/
-struct mii_bus *mdiobus_alloc(void)
+struct mii_bus *mdiobus_alloc_size(size_t size)
{
struct mii_bus *bus;
+ size_t aligned_size = ALIGN(sizeof(*bus), NETDEV_ALIGN);
+ size_t alloc_size;
- bus = kzalloc(sizeof(*bus), GFP_KERNEL);
- if (bus != NULL)
+ /* If we alloc extra space, it should be aligned */
+ if (size)
+ alloc_size = aligned_size + size;
+ else
+ alloc_size = sizeof(*bus);
+
+ bus = kzalloc(alloc_size, GFP_KERNEL);
+ if (bus) {
bus->state = MDIOBUS_ALLOCATED;
+ if (size)
+ bus->priv = (void *)bus + aligned_size;
+ }
return bus;
}
-EXPORT_SYMBOL(mdiobus_alloc);
+EXPORT_SYMBOL(mdiobus_alloc_size);
/**
* mdiobus_release - mii_bus device release callback
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 83a5a5a..f320f46 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -563,20 +563,9 @@ static int genphy_config_advert(struct phy_device *phydev)
if (adv < 0)
return adv;
- adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
+ adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
- if (advertise & ADVERTISED_10baseT_Half)
- adv |= ADVERTISE_10HALF;
- if (advertise & ADVERTISED_10baseT_Full)
- adv |= ADVERTISE_10FULL;
- if (advertise & ADVERTISED_100baseT_Half)
- adv |= ADVERTISE_100HALF;
- if (advertise & ADVERTISED_100baseT_Full)
- adv |= ADVERTISE_100FULL;
- if (advertise & ADVERTISED_Pause)
- adv |= ADVERTISE_PAUSE_CAP;
- if (advertise & ADVERTISED_Asym_Pause)
- adv |= ADVERTISE_PAUSE_ASYM;
+ adv |= ethtool_adv_to_mii_adv_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_ADVERTISE, adv);
@@ -595,10 +584,7 @@ static int genphy_config_advert(struct phy_device *phydev)
return adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
- if (advertise & SUPPORTED_1000baseT_Half)
- adv |= ADVERTISE_1000HALF;
- if (advertise & SUPPORTED_1000baseT_Full)
- adv |= ADVERTISE_1000FULL;
+ adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
if (adv != oldadv) {
err = phy_write(phydev, MII_CTRL1000, adv);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 342505c..fc3e7e9 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -22,26 +22,7 @@
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
-
-#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
-#define MII_LAN83C185_IM 30 /* Interrupt Mask */
-#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
-
-#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
-#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
-#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
-#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
-#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
-#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
-#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
-
-#define MII_LAN83C185_ISF_INT_ALL (0x0e)
-
-#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
- MII_LAN83C185_ISF_INT7)
-
-#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
+#include <linux/smscphy.h>
static int smsc_phy_config_intr(struct phy_device *phydev)
{
diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
new file mode 100644
index 0000000..116a2dd
--- /dev/null
+++ b/drivers/net/phy/spi_ks8995.c
@@ -0,0 +1,375 @@
+/*
+ * SPI driver for Micrel/Kendin KS8995M ethernet switch
+ *
+ * Copyright (C) 2008 Gabor Juhos <juhosg at openwrt.org>
+ *
+ * This file was based on: drivers/spi/at25.c
+ * Copyright (C) 2006 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+
+#define DRV_VERSION "0.1.1"
+#define DRV_DESC "Micrel KS8995 Ethernet switch SPI driver"
+
+/* ------------------------------------------------------------------------ */
+
+#define KS8995_REG_ID0 0x00 /* Chip ID0 */
+#define KS8995_REG_ID1 0x01 /* Chip ID1 */
+
+#define KS8995_REG_GC0 0x02 /* Global Control 0 */
+#define KS8995_REG_GC1 0x03 /* Global Control 1 */
+#define KS8995_REG_GC2 0x04 /* Global Control 2 */
+#define KS8995_REG_GC3 0x05 /* Global Control 3 */
+#define KS8995_REG_GC4 0x06 /* Global Control 4 */
+#define KS8995_REG_GC5 0x07 /* Global Control 5 */
+#define KS8995_REG_GC6 0x08 /* Global Control 6 */
+#define KS8995_REG_GC7 0x09 /* Global Control 7 */
+#define KS8995_REG_GC8 0x0a /* Global Control 8 */
+#define KS8995_REG_GC9 0x0b /* Global Control 9 */
+
+#define KS8995_REG_PC(p, r) ((0x10 * p) + r) /* Port Control */
+#define KS8995_REG_PS(p, r) ((0x10 * p) + r + 0xe) /* Port Status */
+
+#define KS8995_REG_TPC0 0x60 /* TOS Priority Control 0 */
+#define KS8995_REG_TPC1 0x61 /* TOS Priority Control 1 */
+#define KS8995_REG_TPC2 0x62 /* TOS Priority Control 2 */
+#define KS8995_REG_TPC3 0x63 /* TOS Priority Control 3 */
+#define KS8995_REG_TPC4 0x64 /* TOS Priority Control 4 */
+#define KS8995_REG_TPC5 0x65 /* TOS Priority Control 5 */
+#define KS8995_REG_TPC6 0x66 /* TOS Priority Control 6 */
+#define KS8995_REG_TPC7 0x67 /* TOS Priority Control 7 */
+
+#define KS8995_REG_MAC0 0x68 /* MAC address 0 */
+#define KS8995_REG_MAC1 0x69 /* MAC address 1 */
+#define KS8995_REG_MAC2 0x6a /* MAC address 2 */
+#define KS8995_REG_MAC3 0x6b /* MAC address 3 */
+#define KS8995_REG_MAC4 0x6c /* MAC address 4 */
+#define KS8995_REG_MAC5 0x6d /* MAC address 5 */
+
+#define KS8995_REG_IAC0 0x6e /* Indirect Access Control 0 */
+#define KS8995_REG_IAC1 0x6f /* Indirect Access Control 0 */
+#define KS8995_REG_IAD7 0x70 /* Indirect Access Data 7 */
+#define KS8995_REG_IAD6 0x71 /* Indirect Access Data 6 */
+#define KS8995_REG_IAD5 0x72 /* Indirect Access Data 5 */
+#define KS8995_REG_IAD4 0x73 /* Indirect Access Data 4 */
+#define KS8995_REG_IAD3 0x74 /* Indirect Access Data 3 */
+#define KS8995_REG_IAD2 0x75 /* Indirect Access Data 2 */
+#define KS8995_REG_IAD1 0x76 /* Indirect Access Data 1 */
+#define KS8995_REG_IAD0 0x77 /* Indirect Access Data 0 */
+
+#define KS8995_REGS_SIZE 0x80
+
+#define ID1_CHIPID_M 0xf
+#define ID1_CHIPID_S 4
+#define ID1_REVISION_M 0x7
+#define ID1_REVISION_S 1
+#define ID1_START_SW 1 /* start the switch */
+
+#define FAMILY_KS8995 0x95
+#define CHIPID_M 0
+
+#define KS8995_CMD_WRITE 0x02U
+#define KS8995_CMD_READ 0x03U
+
+#define KS8995_RESET_DELAY 10 /* usec */
+
+struct ks8995_pdata {
+ /* not yet implemented */
+};
+
+struct ks8995_switch {
+ struct spi_device *spi;
+ struct mutex lock;
+ struct ks8995_pdata *pdata;
+};
+
+static inline u8 get_chip_id(u8 val)
+{
+ return (val >> ID1_CHIPID_S) & ID1_CHIPID_M;
+}
+
+static inline u8 get_chip_rev(u8 val)
+{
+ return (val >> ID1_REVISION_S) & ID1_REVISION_M;
+}
+
+/* ------------------------------------------------------------------------ */
+static int ks8995_read(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].rx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_READ;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+
+static int ks8995_write(struct ks8995_switch *ks, char *buf,
+ unsigned offset, size_t count)
+{
+ u8 cmd[2];
+ struct spi_transfer t[2];
+ struct spi_message m;
+ int err;
+
+ spi_message_init(&m);
+
+ memset(&t, 0, sizeof(t));
+
+ t[0].tx_buf = cmd;
+ t[0].len = sizeof(cmd);
+ spi_message_add_tail(&t[0], &m);
+
+ t[1].tx_buf = buf;
+ t[1].len = count;
+ spi_message_add_tail(&t[1], &m);
+
+ cmd[0] = KS8995_CMD_WRITE;
+ cmd[1] = offset;
+
+ mutex_lock(&ks->lock);
+ err = spi_sync(ks->spi, &m);
+ mutex_unlock(&ks->lock);
+
+ return err ? err : count;
+}
+
+static inline int ks8995_read_reg(struct ks8995_switch *ks, u8 addr, u8 *buf)
+{
+ return (ks8995_read(ks, buf, addr, 1) != 1);
+}
+
+static inline int ks8995_write_reg(struct ks8995_switch *ks, u8 addr, u8 val)
+{
+ char buf = val;
+
+ return (ks8995_write(ks, &buf, addr, 1) != 1);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static int ks8995_stop(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 0);
+}
+
+static int ks8995_start(struct ks8995_switch *ks)
+{
+ return ks8995_write_reg(ks, KS8995_REG_ID1, 1);
+}
+
+static int ks8995_reset(struct ks8995_switch *ks)
+{
+ int err;
+
+ err = ks8995_stop(ks);
+ if (err)
+ return err;
+
+ udelay(KS8995_RESET_DELAY);
+
+ return ks8995_start(ks);
+}
+
+/* ------------------------------------------------------------------------ */
+
+static ssize_t ks8995_registers_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off > KS8995_REGS_SIZE))
+ return 0;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_read(ks8995, buf, off, count);
+}
+
+
+static ssize_t ks8995_registers_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count)
+{
+ struct device *dev;
+ struct ks8995_switch *ks8995;
+
+ dev = container_of(kobj, struct device, kobj);
+ ks8995 = dev_get_drvdata(dev);
+
+ if (unlikely(off >= KS8995_REGS_SIZE))
+ return -EFBIG;
+
+ if ((off + count) > KS8995_REGS_SIZE)
+ count = KS8995_REGS_SIZE - off;
+
+ if (unlikely(!count))
+ return count;
+
+ return ks8995_write(ks8995, buf, off, count);
+}
+
+
+static struct bin_attribute ks8995_registers_attr = {
+ .attr = {
+ .name = "registers",
+ .mode = S_IRUSR | S_IWUSR,
+ },
+ .size = KS8995_REGS_SIZE,
+ .read = ks8995_registers_read,
+ .write = ks8995_registers_write,
+};
+
+/* ------------------------------------------------------------------------ */
+
+static int __devinit ks8995_probe(struct spi_device *spi)
+{
+ struct ks8995_switch *ks;
+ struct ks8995_pdata *pdata;
+ u8 ids[2];
+ int err;
+
+ /* Chip description */
+ pdata = spi->dev.platform_data;
+
+ ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+ if (!ks) {
+ dev_err(&spi->dev, "no memory for private data\n");
+ return -ENOMEM;
+ }
+
+ mutex_init(&ks->lock);
+ ks->pdata = pdata;
+ ks->spi = spi_dev_get(spi);
+ dev_set_drvdata(&spi->dev, ks);
+
+ spi->mode = SPI_MODE_0;
+ spi->bits_per_word = 8;
+ err = spi_setup(spi);
+ if (err) {
+ dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
+ goto err_drvdata;
+ }
+
+ err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
+ if (err < 0) {
+ dev_err(&spi->dev, "unable to read id registers, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ switch (ids[0]) {
+ case FAMILY_KS8995:
+ break;
+ default:
+ dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
+ err = -ENODEV;
+ goto err_drvdata;
+ }
+
+ err = ks8995_reset(ks);
+ if (err)
+ goto err_drvdata;
+
+ err = sysfs_create_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+ if (err) {
+ dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
+ err);
+ goto err_drvdata;
+ }
+
+ dev_info(&spi->dev, "KS89%02X device found, Chip ID:%01x, "
+ "Revision:%01x\n", ids[0],
+ get_chip_id(ids[1]), get_chip_rev(ids[1]));
+
+ return 0;
+
+err_drvdata:
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks);
+ return err;
+}
+
+static int __devexit ks8995_remove(struct spi_device *spi)
+{
+ struct ks8995_data *ks8995;
+
+ ks8995 = dev_get_drvdata(&spi->dev);
+ sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
+
+ dev_set_drvdata(&spi->dev, NULL);
+ kfree(ks8995);
+
+ return 0;
+}
+
+/* ------------------------------------------------------------------------ */
+
+static struct spi_driver ks8995_driver = {
+ .driver = {
+ .name = "spi-ks8995",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = ks8995_probe,
+ .remove = __devexit_p(ks8995_remove),
+};
+
+static int __init ks8995_init(void)
+{
+ printk(KERN_INFO DRV_DESC " version " DRV_VERSION"\n");
+
+ return spi_register_driver(&ks8995_driver);
+}
+module_init(ks8995_init);
+
+static void __exit ks8995_exit(void)
+{
+ spi_unregister_driver(&ks8995_driver);
+}
+module_exit(ks8995_exit);
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR("Gabor Juhos <juhosg at openwrt.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index edfa15d..486b404 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2024,14 +2024,22 @@ ppp_mp_reconstruct(struct ppp *ppp)
continue;
}
if (PPP_MP_CB(p)->sequence != seq) {
+ u32 oldseq;
/* Fragment `seq' is missing. If it is after
minseq, it might arrive later, so stop here. */
if (seq_after(seq, minseq))
break;
/* Fragment `seq' is lost, keep going. */
lost = 1;
+ oldseq = seq;
seq = seq_before(minseq, PPP_MP_CB(p)->sequence)?
minseq + 1: PPP_MP_CB(p)->sequence;
+
+ if (ppp->debug & 1)
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "lost frag %u..%u\n",
+ oldseq, seq-1);
+
goto again;
}
@@ -2076,6 +2084,10 @@ ppp_mp_reconstruct(struct ppp *ppp)
struct sk_buff *tmp2;
skb_queue_reverse_walk_from_safe(list, p, tmp2) {
+ if (ppp->debug & 1)
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "discarding frag %u\n",
+ PPP_MP_CB(p)->sequence);
__skb_unlink(p, list);
kfree_skb(p);
}
@@ -2091,6 +2103,17 @@ ppp_mp_reconstruct(struct ppp *ppp)
/* If we have discarded any fragments,
signal a receive error. */
if (PPP_MP_CB(head)->sequence != ppp->nextseq) {
+ skb_queue_walk_safe(list, p, tmp) {
+ if (p == head)
+ break;
+ if (ppp->debug & 1)
+ netdev_printk(KERN_DEBUG, ppp->dev,
+ "discarding frag %u\n",
+ PPP_MP_CB(p)->sequence);
+ __skb_unlink(p, list);
+ kfree_skb(p);
+ }
+
if (ppp->debug & 1)
netdev_printk(KERN_DEBUG, ppp->dev,
" missed pkts %u..%u\n",
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index f8a6853..df884dd 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -162,7 +162,7 @@ static void del_chan(struct pppox_sock *sock)
{
spin_lock(&chan_lock);
clear_bit(sock->proto.pptp.src_addr.call_id, callid_bitmap);
- rcu_assign_pointer(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
+ RCU_INIT_POINTER(callid_sock[sock->proto.pptp.src_addr.call_id], NULL);
spin_unlock(&chan_lock);
synchronize_rcu();
}
@@ -585,8 +585,8 @@ static int pptp_create(struct net *net, struct socket *sock)
po = pppox_sk(sk);
opt = &po->proto.pptp;
- opt->seq_sent = 0; opt->seq_recv = 0;
- opt->ack_recv = 0; opt->ack_sent = 0;
+ opt->seq_sent = 0; opt->seq_recv = 0xffffffff;
+ opt->ack_recv = 0; opt->ack_sent = 0xffffffff;
error = 0;
out:
diff --git a/drivers/net/team/Kconfig b/drivers/net/team/Kconfig
new file mode 100644
index 0000000..248a144
--- /dev/null
+++ b/drivers/net/team/Kconfig
@@ -0,0 +1,43 @@
+menuconfig NET_TEAM
+ tristate "Ethernet team driver support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ ---help---
+ This allows one to create virtual interfaces that teams together
+ multiple ethernet devices.
+
+ Team devices can be added using the "ip" command from the
+ iproute2 package:
+
+ "ip link add link [ address MAC ] [ NAME ] type team"
+
+ To compile this driver as a module, choose M here: the module
+ will be called team.
+
+if NET_TEAM
+
+config NET_TEAM_MODE_ROUNDROBIN
+ tristate "Round-robin mode support"
+ depends on NET_TEAM
+ ---help---
+ Basic mode where port used for transmitting packets is selected in
+ round-robin fashion using packet counter.
+
+ All added ports are setup to have bond's mac address.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_roundrobin.
+
+config NET_TEAM_MODE_ACTIVEBACKUP
+ tristate "Active-backup mode support"
+ depends on NET_TEAM
+ ---help---
+ Only one port is active at a time and the rest of ports are used
+ for backup.
+
+ Mac addresses of ports are not modified. Userspace is responsible
+ to do so.
+
+ To compile this team mode as a module, choose M here: the module
+ will be called team_mode_activebackup.
+
+endif # NET_TEAM
diff --git a/drivers/net/team/Makefile b/drivers/net/team/Makefile
new file mode 100644
index 0000000..85f2028
--- /dev/null
+++ b/drivers/net/team/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the network team driver
+#
+
+obj-$(CONFIG_NET_TEAM) += team.o
+obj-$(CONFIG_NET_TEAM_MODE_ROUNDROBIN) += team_mode_roundrobin.o
+obj-$(CONFIG_NET_TEAM_MODE_ACTIVEBACKUP) += team_mode_activebackup.o
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
new file mode 100644
index 0000000..6b678f3
--- /dev/null
+++ b/drivers/net/team/team.c
@@ -0,0 +1,1728 @@
+/*
+ * net/drivers/team/team.c - Network team device driver
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/errno.h>
+#include <linux/ctype.h>
+#include <linux/notifier.h>
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/if_arp.h>
+#include <linux/socket.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <net/rtnetlink.h>
+#include <net/genetlink.h>
+#include <net/netlink.h>
+#include <linux/if_team.h>
+
+#define DRV_NAME "team"
+
+
+/**********
+ * Helpers
+ **********/
+
+#define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
+
+static struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ struct team_port *port = rcu_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+static struct team_port *team_port_get_rtnl(const struct net_device *dev)
+{
+ struct team_port *port = rtnl_dereference(dev->rx_handler_data);
+
+ return team_port_exists(dev) ? port : NULL;
+}
+
+/*
+ * Since the ability to change mac address for open port device is tested in
+ * team_port_add, this function can be called without control of return value
+ */
+static int __set_port_mac(struct net_device *port_dev,
+ const unsigned char *dev_addr)
+{
+ struct sockaddr addr;
+
+ memcpy(addr.sa_data, dev_addr, ETH_ALEN);
+ addr.sa_family = ARPHRD_ETHER;
+ return dev_set_mac_address(port_dev, &addr);
+}
+
+int team_port_set_orig_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->orig.dev_addr);
+}
+
+int team_port_set_team_mac(struct team_port *port)
+{
+ return __set_port_mac(port->dev, port->team->dev->dev_addr);
+}
+EXPORT_SYMBOL(team_port_set_team_mac);
+
+
+/*******************
+ * Options handling
+ *******************/
+
+struct team_option *__team_find_option(struct team *team, const char *opt_name)
+{
+ struct team_option *option;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ if (strcmp(option->name, opt_name) == 0)
+ return option;
+ }
+ return NULL;
+}
+
+int __team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+ struct team_option **dst_opts;
+ int err;
+
+ dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
+ GFP_KERNEL);
+ if (!dst_opts)
+ return -ENOMEM;
+ for (i = 0; i < option_count; i++, option++) {
+ if (__team_find_option(team, option->name)) {
+ err = -EEXIST;
+ goto rollback;
+ }
+ dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
+ if (!dst_opts[i]) {
+ err = -ENOMEM;
+ goto rollback;
+ }
+ }
+
+ for (i = 0; i < option_count; i++) {
+ dst_opts[i]->changed = true;
+ dst_opts[i]->removed = false;
+ list_add_tail(&dst_opts[i]->list, &team->option_list);
+ }
+
+ kfree(dst_opts);
+ return 0;
+
+rollback:
+ for (i = 0; i < option_count; i++)
+ kfree(dst_opts[i]);
+
+ kfree(dst_opts);
+ return err;
+}
+
+static void __team_options_mark_removed(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+
+ for (i = 0; i < option_count; i++, option++) {
+ struct team_option *del_opt;
+
+ del_opt = __team_find_option(team, option->name);
+ if (del_opt) {
+ del_opt->changed = true;
+ del_opt->removed = true;
+ }
+ }
+}
+
+static void __team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int i;
+
+ for (i = 0; i < option_count; i++, option++) {
+ struct team_option *del_opt;
+
+ del_opt = __team_find_option(team, option->name);
+ if (del_opt) {
+ list_del(&del_opt->list);
+ kfree(del_opt);
+ }
+ }
+}
+
+static void __team_options_change_check(struct team *team);
+
+int team_options_register(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ int err;
+
+ err = __team_options_register(team, option, option_count);
+ if (err)
+ return err;
+ __team_options_change_check(team);
+ return 0;
+}
+EXPORT_SYMBOL(team_options_register);
+
+void team_options_unregister(struct team *team,
+ const struct team_option *option,
+ size_t option_count)
+{
+ __team_options_mark_removed(team, option, option_count);
+ __team_options_change_check(team);
+ __team_options_unregister(team, option, option_count);
+}
+EXPORT_SYMBOL(team_options_unregister);
+
+static int team_option_get(struct team *team, struct team_option *option,
+ void *arg)
+{
+ return option->getter(team, arg);
+}
+
+static int team_option_set(struct team *team, struct team_option *option,
+ void *arg)
+{
+ int err;
+
+ err = option->setter(team, arg);
+ if (err)
+ return err;
+
+ option->changed = true;
+ __team_options_change_check(team);
+ return err;
+}
+
+/****************
+ * Mode handling
+ ****************/
+
+static LIST_HEAD(mode_list);
+static DEFINE_SPINLOCK(mode_list_lock);
+
+static struct team_mode *__find_mode(const char *kind)
+{
+ struct team_mode *mode;
+
+ list_for_each_entry(mode, &mode_list, list) {
+ if (strcmp(mode->kind, kind) == 0)
+ return mode;
+ }
+ return NULL;
+}
+
+static bool is_good_mode_name(const char *name)
+{
+ while (*name != '\0') {
+ if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+ return false;
+ name++;
+ }
+ return true;
+}
+
+int team_mode_register(struct team_mode *mode)
+{
+ int err = 0;
+
+ if (!is_good_mode_name(mode->kind) ||
+ mode->priv_size > TEAM_MODE_PRIV_SIZE)
+ return -EINVAL;
+ spin_lock(&mode_list_lock);
+ if (__find_mode(mode->kind)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+ list_add_tail(&mode->list, &mode_list);
+unlock:
+ spin_unlock(&mode_list_lock);
+ return err;
+}
+EXPORT_SYMBOL(team_mode_register);
+
+int team_mode_unregister(struct team_mode *mode)
+{
+ spin_lock(&mode_list_lock);
+ list_del_init(&mode->list);
+ spin_unlock(&mode_list_lock);
+ return 0;
+}
+EXPORT_SYMBOL(team_mode_unregister);
+
+static struct team_mode *team_mode_get(const char *kind)
+{
+ struct team_mode *mode;
+
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ if (!mode) {
+ spin_unlock(&mode_list_lock);
+ request_module("team-mode-%s", kind);
+ spin_lock(&mode_list_lock);
+ mode = __find_mode(kind);
+ }
+ if (mode)
+ if (!try_module_get(mode->owner))
+ mode = NULL;
+
+ spin_unlock(&mode_list_lock);
+ return mode;
+}
+
+static void team_mode_put(const struct team_mode *mode)
+{
+ module_put(mode->owner);
+}
+
+static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+rx_handler_result_t team_dummy_receive(struct team *team,
+ struct team_port *port,
+ struct sk_buff *skb)
+{
+ return RX_HANDLER_ANOTHER;
+}
+
+static void team_adjust_ops(struct team *team)
+{
+ /*
+ * To avoid checks in rx/tx skb paths, ensure here that non-null and
+ * correct ops are always set.
+ */
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->transmit)
+ team->ops.transmit = team_dummy_transmit;
+ else
+ team->ops.transmit = team->mode->ops->transmit;
+
+ if (list_empty(&team->port_list) ||
+ !team->mode || !team->mode->ops->receive)
+ team->ops.receive = team_dummy_receive;
+ else
+ team->ops.receive = team->mode->ops->receive;
+}
+
+/*
+ * We can benefit from the fact that it's ensured no port is present
+ * at the time of mode change. Therefore no packets are in fly so there's no
+ * need to set mode operations in any special way.
+ */
+static int __team_change_mode(struct team *team,
+ const struct team_mode *new_mode)
+{
+ /* Check if mode was previously set and do cleanup if so */
+ if (team->mode) {
+ void (*exit_op)(struct team *team) = team->ops.exit;
+
+ /* Clear ops area so no callback is called any longer */
+ memset(&team->ops, 0, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ if (exit_op)
+ exit_op(team);
+ team_mode_put(team->mode);
+ team->mode = NULL;
+ /* zero private data area */
+ memset(&team->mode_priv, 0,
+ sizeof(struct team) - offsetof(struct team, mode_priv));
+ }
+
+ if (!new_mode)
+ return 0;
+
+ if (new_mode->ops->init) {
+ int err;
+
+ err = new_mode->ops->init(team);
+ if (err)
+ return err;
+ }
+
+ team->mode = new_mode;
+ memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
+ team_adjust_ops(team);
+
+ return 0;
+}
+
+static int team_change_mode(struct team *team, const char *kind)
+{
+ struct team_mode *new_mode;
+ struct net_device *dev = team->dev;
+ int err;
+
+ if (!list_empty(&team->port_list)) {
+ netdev_err(dev, "No ports can be present during mode change\n");
+ return -EBUSY;
+ }
+
+ if (team->mode && strcmp(team->mode->kind, kind) == 0) {
+ netdev_err(dev, "Unable to change to the same mode the team is in\n");
+ return -EINVAL;
+ }
+
+ new_mode = team_mode_get(kind);
+ if (!new_mode) {
+ netdev_err(dev, "Mode \"%s\" not found\n", kind);
+ return -EINVAL;
+ }
+
+ err = __team_change_mode(team, new_mode);
+ if (err) {
+ netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
+ team_mode_put(new_mode);
+ return err;
+ }
+
+ netdev_info(dev, "Mode changed to \"%s\"\n", kind);
+ return 0;
+}
+
+
+/************************
+ * Rx path frame handler
+ ************************/
+
+/* note: already called with rcu_read_lock */
+static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct team_port *port;
+ struct team *team;
+ rx_handler_result_t res;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return RX_HANDLER_CONSUMED;
+
+ *pskb = skb;
+
+ port = team_port_get_rcu(skb->dev);
+ team = port->team;
+
+ res = team->ops.receive(team, port, skb);
+ if (res == RX_HANDLER_ANOTHER) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->rx_packets++;
+ pcpu_stats->rx_bytes += skb->len;
+ if (skb->pkt_type == PACKET_MULTICAST)
+ pcpu_stats->rx_multicast++;
+ u64_stats_update_end(&pcpu_stats->syncp);
+
+ skb->dev = team->dev;
+ } else {
+ this_cpu_inc(team->pcpu_stats->rx_dropped);
+ }
+
+ return res;
+}
+
+
+/****************
+ * Port handling
+ ****************/
+
+static bool team_port_find(const struct team *team,
+ const struct team_port *port)
+{
+ struct team_port *cur;
+
+ list_for_each_entry(cur, &team->port_list, list)
+ if (cur == port)
+ return true;
+ return false;
+}
+
+/*
+ * Add/delete port to the team port list. Write guarded by rtnl_lock.
+ * Takes care of correct port->index setup (might be racy).
+ */
+static void team_port_list_add_port(struct team *team,
+ struct team_port *port)
+{
+ port->index = team->port_count++;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ list_add_tail_rcu(&port->list, &team->port_list);
+}
+
+static void __reconstruct_port_hlist(struct team *team, int rm_index)
+{
+ int i;
+ struct team_port *port;
+
+ for (i = rm_index + 1; i < team->port_count; i++) {
+ port = team_get_port_by_index(team, i);
+ hlist_del_rcu(&port->hlist);
+ port->index--;
+ hlist_add_head_rcu(&port->hlist,
+ team_port_index_hash(team, port->index));
+ }
+}
+
+static void team_port_list_del_port(struct team *team,
+ struct team_port *port)
+{
+ int rm_index = port->index;
+
+ hlist_del_rcu(&port->hlist);
+ list_del_rcu(&port->list);
+ __reconstruct_port_hlist(team, rm_index);
+ team->port_count--;
+}
+
+#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
+ NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
+ NETIF_F_HIGHDMA | NETIF_F_LRO)
+
+static void __team_compute_features(struct team *team)
+{
+ struct team_port *port;
+ u32 vlan_features = TEAM_VLAN_FEATURES;
+ unsigned short max_hard_header_len = ETH_HLEN;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ vlan_features = netdev_increment_features(vlan_features,
+ port->dev->vlan_features,
+ TEAM_VLAN_FEATURES);
+
+ if (port->dev->hard_header_len > max_hard_header_len)
+ max_hard_header_len = port->dev->hard_header_len;
+ }
+
+ team->dev->vlan_features = vlan_features;
+ team->dev->hard_header_len = max_hard_header_len;
+
+ netdev_change_features(team->dev);
+}
+
+static void team_compute_features(struct team *team)
+{
+ mutex_lock(&team->lock);
+ __team_compute_features(team);
+ mutex_unlock(&team->lock);
+}
+
+static int team_port_enter(struct team *team, struct team_port *port)
+{
+ int err = 0;
+
+ dev_hold(team->dev);
+ port->dev->priv_flags |= IFF_TEAM_PORT;
+ if (team->ops.port_enter) {
+ err = team->ops.port_enter(team, port);
+ if (err) {
+ netdev_err(team->dev, "Device %s failed to enter team mode\n",
+ port->dev->name);
+ goto err_port_enter;
+ }
+ }
+
+ return 0;
+
+err_port_enter:
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+
+ return err;
+}
+
+static void team_port_leave(struct team *team, struct team_port *port)
+{
+ if (team->ops.port_leave)
+ team->ops.port_leave(team, port);
+ port->dev->priv_flags &= ~IFF_TEAM_PORT;
+ dev_put(team->dev);
+}
+
+static void __team_port_change_check(struct team_port *port, bool linkup);
+
+static int team_port_add(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+ int err;
+
+ if (port_dev->flags & IFF_LOOPBACK ||
+ port_dev->type != ARPHRD_ETHER) {
+ netdev_err(dev, "Device %s is of an unsupported type\n",
+ portname);
+ return -EINVAL;
+ }
+
+ if (team_port_exists(port_dev)) {
+ netdev_err(dev, "Device %s is already a port "
+ "of a team device\n", portname);
+ return -EBUSY;
+ }
+
+ if (port_dev->flags & IFF_UP) {
+ netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
+ portname);
+ return -EBUSY;
+ }
+
+ port = kzalloc(sizeof(struct team_port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = port_dev;
+ port->team = team;
+
+ port->orig.mtu = port_dev->mtu;
+ err = dev_set_mtu(port_dev, dev->mtu);
+ if (err) {
+ netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
+ goto err_set_mtu;
+ }
+
+ memcpy(port->orig.dev_addr, port_dev->dev_addr, ETH_ALEN);
+
+ err = team_port_enter(team, port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to enter team mode\n",
+ portname);
+ goto err_port_enter;
+ }
+
+ err = dev_open(port_dev);
+ if (err) {
+ netdev_dbg(dev, "Device %s opening failed\n",
+ portname);
+ goto err_dev_open;
+ }
+
+ err = vlan_vids_add_by_dev(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Failed to add vlan ids to device %s\n",
+ portname);
+ goto err_vids_add;
+ }
+
+ err = netdev_set_master(port_dev, dev);
+ if (err) {
+ netdev_err(dev, "Device %s failed to set master\n", portname);
+ goto err_set_master;
+ }
+
+ err = netdev_rx_handler_register(port_dev, team_handle_frame,
+ port);
+ if (err) {
+ netdev_err(dev, "Device %s failed to register rx_handler\n",
+ portname);
+ goto err_handler_register;
+ }
+
+ team_port_list_add_port(team, port);
+ team_adjust_ops(team);
+ __team_compute_features(team);
+ __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+
+ netdev_info(dev, "Port device %s added\n", portname);
+
+ return 0;
+
+err_handler_register:
+ netdev_set_master(port_dev, NULL);
+
+err_set_master:
+ vlan_vids_del_by_dev(port_dev, dev);
+
+err_vids_add:
+ dev_close(port_dev);
+
+err_dev_open:
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+
+err_port_enter:
+ dev_set_mtu(port_dev, port->orig.mtu);
+
+err_set_mtu:
+ kfree(port);
+
+ return err;
+}
+
+static int team_port_del(struct team *team, struct net_device *port_dev)
+{
+ struct net_device *dev = team->dev;
+ struct team_port *port;
+ char *portname = port_dev->name;
+
+ port = team_port_get_rtnl(port_dev);
+ if (!port || !team_port_find(team, port)) {
+ netdev_err(dev, "Device %s does not act as a port of this team\n",
+ portname);
+ return -ENOENT;
+ }
+
+ port->removed = true;
+ __team_port_change_check(port, false);
+ team_port_list_del_port(team, port);
+ team_adjust_ops(team);
+ netdev_rx_handler_unregister(port_dev);
+ netdev_set_master(port_dev, NULL);
+ vlan_vids_del_by_dev(port_dev, dev);
+ dev_close(port_dev);
+ team_port_leave(team, port);
+ team_port_set_orig_mac(port);
+ dev_set_mtu(port_dev, port->orig.mtu);
+ synchronize_rcu();
+ kfree(port);
+ netdev_info(dev, "Port device %s removed\n", portname);
+ __team_compute_features(team);
+
+ return 0;
+}
+
+
+/*****************
+ * Net device ops
+ *****************/
+
+static const char team_no_mode_kind[] = "*NOMODE*";
+
+static int team_mode_option_get(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ *str = team->mode ? team->mode->kind : team_no_mode_kind;
+ return 0;
+}
+
+static int team_mode_option_set(struct team *team, void *arg)
+{
+ const char **str = arg;
+
+ return team_change_mode(team, *str);
+}
+
+static const struct team_option team_options[] = {
+ {
+ .name = "mode",
+ .type = TEAM_OPTION_TYPE_STRING,
+ .getter = team_mode_option_get,
+ .setter = team_mode_option_set,
+ },
+};
+
+static int team_init(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ int i;
+ int err;
+
+ team->dev = dev;
+ mutex_init(&team->lock);
+
+ team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ if (!team->pcpu_stats)
+ return -ENOMEM;
+
+ for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
+ INIT_HLIST_HEAD(&team->port_hlist[i]);
+ INIT_LIST_HEAD(&team->port_list);
+
+ team_adjust_ops(team);
+
+ INIT_LIST_HEAD(&team->option_list);
+ err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
+ if (err)
+ goto err_options_register;
+ netif_carrier_off(dev);
+
+ return 0;
+
+err_options_register:
+ free_percpu(team->pcpu_stats);
+
+ return err;
+}
+
+static void team_uninit(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct team_port *tmp;
+
+ mutex_lock(&team->lock);
+ list_for_each_entry_safe(port, tmp, &team->port_list, list)
+ team_port_del(team, port->dev);
+
+ __team_change_mode(team, NULL); /* cleanup */
+ __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
+ mutex_unlock(&team->lock);
+}
+
+static void team_destructor(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+
+ free_percpu(team->pcpu_stats);
+ free_netdev(dev);
+}
+
+static int team_open(struct net_device *dev)
+{
+ netif_carrier_on(dev);
+ return 0;
+}
+
+static int team_close(struct net_device *dev)
+{
+ netif_carrier_off(dev);
+ return 0;
+}
+
+/*
+ * note: already called with rcu_read_lock
+ */
+static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ bool tx_success = false;
+ unsigned int len = skb->len;
+
+ tx_success = team->ops.transmit(team, skb);
+ if (tx_success) {
+ struct team_pcpu_stats *pcpu_stats;
+
+ pcpu_stats = this_cpu_ptr(team->pcpu_stats);
+ u64_stats_update_begin(&pcpu_stats->syncp);
+ pcpu_stats->tx_packets++;
+ pcpu_stats->tx_bytes += len;
+ u64_stats_update_end(&pcpu_stats->syncp);
+ } else {
+ this_cpu_inc(team->pcpu_stats->tx_dropped);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static void team_change_rx_flags(struct net_device *dev, int change)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int inc;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (change & IFF_PROMISC) {
+ inc = dev->flags & IFF_PROMISC ? 1 : -1;
+ dev_set_promiscuity(port->dev, inc);
+ }
+ if (change & IFF_ALLMULTI) {
+ inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
+ dev_set_allmulti(port->dev, inc);
+ }
+ }
+ rcu_read_unlock();
+}
+
+static void team_set_rx_mode(struct net_device *dev)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ dev_uc_sync(port->dev, dev);
+ dev_mc_sync(port->dev, dev);
+ }
+ rcu_read_unlock();
+}
+
+static int team_set_mac_address(struct net_device *dev, void *p)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ struct sockaddr *addr = p;
+
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ if (team->ops.port_change_mac)
+ team->ops.port_change_mac(team, port);
+ rcu_read_unlock();
+ return 0;
+}
+
+static int team_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = dev_set_mtu(port->dev, new_mtu);
+ if (err) {
+ netdev_err(dev, "Device %s failed to change mtu",
+ port->dev->name);
+ goto unwind;
+ }
+ }
+ mutex_unlock(&team->lock);
+
+ dev->mtu = new_mtu;
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ dev_set_mtu(port->dev, dev->mtu);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_pcpu_stats *p;
+ u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+ u32 rx_dropped = 0, tx_dropped = 0;
+ unsigned int start;
+ int i;
+
+ for_each_possible_cpu(i) {
+ p = per_cpu_ptr(team->pcpu_stats, i);
+ do {
+ start = u64_stats_fetch_begin_bh(&p->syncp);
+ rx_packets = p->rx_packets;
+ rx_bytes = p->rx_bytes;
+ rx_multicast = p->rx_multicast;
+ tx_packets = p->tx_packets;
+ tx_bytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&p->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->multicast += rx_multicast;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ /*
+ * rx_dropped & tx_dropped are u32, updated
+ * without syncp protection.
+ */
+ rx_dropped += p->rx_dropped;
+ tx_dropped += p->tx_dropped;
+ }
+ stats->rx_dropped = rx_dropped;
+ stats->tx_dropped = tx_dropped;
+ return stats;
+}
+
+static int team_vlan_rx_add_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+ int err;
+
+ /*
+ * Alhough this is reader, it's guarded by team lock. It's not possible
+ * to traverse list in reverse under rcu_read_lock
+ */
+ mutex_lock(&team->lock);
+ list_for_each_entry(port, &team->port_list, list) {
+ err = vlan_vid_add(port->dev, vid);
+ if (err)
+ goto unwind;
+ }
+ mutex_unlock(&team->lock);
+
+ return 0;
+
+unwind:
+ list_for_each_entry_continue_reverse(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ mutex_unlock(&team->lock);
+
+ return err;
+}
+
+static int team_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
+{
+ struct team *team = netdev_priv(dev);
+ struct team_port *port;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list)
+ vlan_vid_del(port->dev, vid);
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_add(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
+{
+ struct team *team = netdev_priv(dev);
+ int err;
+
+ mutex_lock(&team->lock);
+ err = team_port_del(team, port_dev);
+ mutex_unlock(&team->lock);
+ return err;
+}
+
+static netdev_features_t team_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct team_port *port;
+ struct team *team = netdev_priv(dev);
+ netdev_features_t mask;
+
+ mask = features;
+ features &= ~NETIF_F_ONE_FOR_ALL;
+ features |= NETIF_F_ALL_FOR_ALL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ features = netdev_increment_features(features,
+ port->dev->features,
+ mask);
+ }
+ rcu_read_unlock();
+ return features;
+}
+
+static const struct net_device_ops team_netdev_ops = {
+ .ndo_init = team_init,
+ .ndo_uninit = team_uninit,
+ .ndo_open = team_open,
+ .ndo_stop = team_close,
+ .ndo_start_xmit = team_xmit,
+ .ndo_change_rx_flags = team_change_rx_flags,
+ .ndo_set_rx_mode = team_set_rx_mode,
+ .ndo_set_mac_address = team_set_mac_address,
+ .ndo_change_mtu = team_change_mtu,
+ .ndo_get_stats64 = team_get_stats64,
+ .ndo_vlan_rx_add_vid = team_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = team_vlan_rx_kill_vid,
+ .ndo_add_slave = team_add_slave,
+ .ndo_del_slave = team_del_slave,
+ .ndo_fix_features = team_fix_features,
+};
+
+
+/***********************
+ * rt netlink interface
+ ***********************/
+
+static void team_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+
+ dev->netdev_ops = &team_netdev_ops;
+ dev->destructor = team_destructor;
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_MULTICAST;
+ dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
+
+ /*
+ * Indicate we support unicast address filtering. That way core won't
+ * bring us to promisc mode in case a unicast addr is added.
+ * Let this up to underlay drivers.
+ */
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
+ dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_GRO;
+ dev->hw_features = NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ dev->features |= dev->hw_features;
+}
+
+static int team_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int err;
+
+ if (tb[IFLA_ADDRESS] == NULL)
+ random_ether_addr(dev->dev_addr);
+
+ err = register_netdevice(dev);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int team_validate(struct nlattr *tb[], struct nlattr *data[])
+{
+ if (tb[IFLA_ADDRESS]) {
+ if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
+ return -EINVAL;
+ if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
+ return -EADDRNOTAVAIL;
+ }
+ return 0;
+}
+
+static struct rtnl_link_ops team_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .priv_size = sizeof(struct team),
+ .setup = team_setup,
+ .newlink = team_newlink,
+ .validate = team_validate,
+};
+
+
+/***********************************
+ * Generic netlink custom interface
+ ***********************************/
+
+static struct genl_family team_nl_family = {
+ .id = GENL_ID_GENERATE,
+ .name = TEAM_GENL_NAME,
+ .version = TEAM_GENL_VERSION,
+ .maxattr = TEAM_ATTR_MAX,
+ .netnsok = true,
+};
+
+static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
+ [TEAM_ATTR_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_TEAM_IFINDEX] = { .type = NLA_U32 },
+ [TEAM_ATTR_LIST_OPTION] = { .type = NLA_NESTED },
+ [TEAM_ATTR_LIST_PORT] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy
+team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
+ [TEAM_ATTR_OPTION_UNSPEC] = { .type = NLA_UNSPEC, },
+ [TEAM_ATTR_OPTION_NAME] = {
+ .type = NLA_STRING,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+ [TEAM_ATTR_OPTION_CHANGED] = { .type = NLA_FLAG },
+ [TEAM_ATTR_OPTION_TYPE] = { .type = NLA_U8 },
+ [TEAM_ATTR_OPTION_DATA] = {
+ .type = NLA_BINARY,
+ .len = TEAM_STRING_MAX_LEN,
+ },
+};
+
+static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ void *hdr;
+ int err;
+
+ msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
+ &team_nl_family, 0, TEAM_CMD_NOOP);
+ if (IS_ERR(hdr)) {
+ err = PTR_ERR(hdr);
+ goto err_msg_put;
+ }
+
+ genlmsg_end(msg, hdr);
+
+ return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
+
+err_msg_put:
+ nlmsg_free(msg);
+
+ return err;
+}
+
+/*
+ * Netlink cmd functions should be locked by following two functions.
+ * Since dev gets held here, that ensures dev won't disappear in between.
+ */
+static struct team *team_nl_team_get(struct genl_info *info)
+{
+ struct net *net = genl_info_net(info);
+ int ifindex;
+ struct net_device *dev;
+ struct team *team;
+
+ if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
+ return NULL;
+
+ ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
+ dev = dev_get_by_index(net, ifindex);
+ if (!dev || dev->netdev_ops != &team_netdev_ops) {
+ if (dev)
+ dev_put(dev);
+ return NULL;
+ }
+
+ team = netdev_priv(dev);
+ mutex_lock(&team->lock);
+ return team;
+}
+
+static void team_nl_team_put(struct team *team)
+{
+ mutex_unlock(&team->lock);
+ dev_put(team->dev);
+}
+
+static int team_nl_send_generic(struct genl_info *info, struct team *team,
+ int (*fill_func)(struct sk_buff *skb,
+ struct genl_info *info,
+ int flags, struct team *team))
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = fill_func(skb, info, NLM_F_ACK, team);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_unicast(genl_info_net(info), skb, info->snd_pid);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_fill_options_get(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team, bool fillall)
+{
+ struct nlattr *option_list;
+ void *hdr;
+ struct team_option *option;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_OPTIONS_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
+ if (!option_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(option, &team->option_list, list) {
+ struct nlattr *option_item;
+ long arg;
+
+ /* Include only changed options if fill all mode is not on */
+ if (!fillall && !option->changed)
+ continue;
+ option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
+ if (!option_item)
+ goto nla_put_failure;
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
+ if (option->changed) {
+ NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+ option->changed = false;
+ }
+ if (option->removed)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
+ switch (option->type) {
+ case TEAM_OPTION_TYPE_U32:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
+ team_option_get(team, option, &arg);
+ NLA_PUT_U32(skb, TEAM_ATTR_OPTION_DATA, arg);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING);
+ team_option_get(team, option, &arg);
+ NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_DATA,
+ (char *) arg);
+ break;
+ default:
+ BUG();
+ }
+ nla_nest_end(skb, option_item);
+ }
+
+ nla_nest_end(skb, option_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_options_get_all(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_options_get(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, true);
+}
+
+static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
+{
+ struct team *team;
+ int err = 0;
+ int i;
+ struct nlattr *nl_option;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = -EINVAL;
+ if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+
+ nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
+ struct nlattr *mode_attrs[TEAM_ATTR_OPTION_MAX + 1];
+ enum team_option_type opt_type;
+ struct team_option *option;
+ char *opt_name;
+ bool opt_found = false;
+
+ if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ err = nla_parse_nested(mode_attrs, TEAM_ATTR_OPTION_MAX,
+ nl_option, team_nl_option_policy);
+ if (err)
+ goto team_put;
+ if (!mode_attrs[TEAM_ATTR_OPTION_NAME] ||
+ !mode_attrs[TEAM_ATTR_OPTION_TYPE] ||
+ !mode_attrs[TEAM_ATTR_OPTION_DATA]) {
+ err = -EINVAL;
+ goto team_put;
+ }
+ switch (nla_get_u8(mode_attrs[TEAM_ATTR_OPTION_TYPE])) {
+ case NLA_U32:
+ opt_type = TEAM_OPTION_TYPE_U32;
+ break;
+ case NLA_STRING:
+ opt_type = TEAM_OPTION_TYPE_STRING;
+ break;
+ default:
+ goto team_put;
+ }
+
+ opt_name = nla_data(mode_attrs[TEAM_ATTR_OPTION_NAME]);
+ list_for_each_entry(option, &team->option_list, list) {
+ long arg;
+ struct nlattr *opt_data_attr;
+
+ if (option->type != opt_type ||
+ strcmp(option->name, opt_name))
+ continue;
+ opt_found = true;
+ opt_data_attr = mode_attrs[TEAM_ATTR_OPTION_DATA];
+ switch (opt_type) {
+ case TEAM_OPTION_TYPE_U32:
+ arg = nla_get_u32(opt_data_attr);
+ break;
+ case TEAM_OPTION_TYPE_STRING:
+ arg = (long) nla_data(opt_data_attr);
+ break;
+ default:
+ BUG();
+ }
+ err = team_option_set(team, option, &arg);
+ if (err)
+ goto team_put;
+ }
+ if (!opt_found) {
+ err = -ENOENT;
+ goto team_put;
+ }
+ }
+
+team_put:
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+ u32 pid, u32 seq, int flags,
+ struct team *team,
+ bool fillall)
+{
+ struct nlattr *port_list;
+ void *hdr;
+ struct team_port *port;
+
+ hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
+ TEAM_CMD_PORT_LIST_GET);
+ if (IS_ERR(hdr))
+ return PTR_ERR(hdr);
+
+ NLA_PUT_U32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex);
+ port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
+ if (!port_list)
+ return -EMSGSIZE;
+
+ list_for_each_entry(port, &team->port_list, list) {
+ struct nlattr *port_item;
+
+ /* Include only changed ports if fill all mode is not on */
+ if (!fillall && !port->changed)
+ continue;
+ port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
+ if (!port_item)
+ goto nla_put_failure;
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
+ if (port->changed) {
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+ port->changed = false;
+ }
+ if (port->removed)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
+ if (port->linkup)
+ NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
+ NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
+ NLA_PUT_U8(skb, TEAM_ATTR_PORT_DUPLEX, port->duplex);
+ nla_nest_end(skb, port_item);
+ }
+
+ nla_nest_end(skb, port_list);
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ genlmsg_cancel(skb, hdr);
+ return -EMSGSIZE;
+}
+
+static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
+ struct genl_info *info, int flags,
+ struct team *team)
+{
+ return team_nl_fill_port_list_get(skb, info->snd_pid,
+ info->snd_seq, NLM_F_ACK,
+ team, true);
+}
+
+static int team_nl_cmd_port_list_get(struct sk_buff *skb,
+ struct genl_info *info)
+{
+ struct team *team;
+ int err;
+
+ team = team_nl_team_get(info);
+ if (!team)
+ return -EINVAL;
+
+ err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
+
+ team_nl_team_put(team);
+
+ return err;
+}
+
+static struct genl_ops team_nl_ops[] = {
+ {
+ .cmd = TEAM_CMD_NOOP,
+ .doit = team_nl_cmd_noop,
+ .policy = team_nl_policy,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_SET,
+ .doit = team_nl_cmd_options_set,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_OPTIONS_GET,
+ .doit = team_nl_cmd_options_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+ {
+ .cmd = TEAM_CMD_PORT_LIST_GET,
+ .doit = team_nl_cmd_port_list_get,
+ .policy = team_nl_policy,
+ .flags = GENL_ADMIN_PERM,
+ },
+};
+
+static struct genl_multicast_group team_change_event_mcgrp = {
+ .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
+};
+
+static int team_nl_send_event_options_get(struct team *team)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_send_event_port_list_get(struct team *team)
+{
+ struct sk_buff *skb;
+ int err;
+ struct net *net = dev_net(team->dev);
+
+ skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
+ if (err < 0)
+ goto err_fill;
+
+ err = genlmsg_multicast_netns(net, skb, 0, team_change_event_mcgrp.id,
+ GFP_KERNEL);
+ return err;
+
+err_fill:
+ nlmsg_free(skb);
+ return err;
+}
+
+static int team_nl_init(void)
+{
+ int err;
+
+ err = genl_register_family_with_ops(&team_nl_family, team_nl_ops,
+ ARRAY_SIZE(team_nl_ops));
+ if (err)
+ return err;
+
+ err = genl_register_mc_group(&team_nl_family, &team_change_event_mcgrp);
+ if (err)
+ goto err_change_event_grp_reg;
+
+ return 0;
+
+err_change_event_grp_reg:
+ genl_unregister_family(&team_nl_family);
+
+ return err;
+}
+
+static void team_nl_fini(void)
+{
+ genl_unregister_family(&team_nl_family);
+}
+
+
+/******************
+ * Change checkers
+ ******************/
+
+static void __team_options_change_check(struct team *team)
+{
+ int err;
+
+ err = team_nl_send_event_options_get(team);
+ if (err)
+ netdev_warn(team->dev, "Failed to send options change via netlink\n");
+}
+
+/* rtnl lock is held */
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+ int err;
+
+ if (!port->removed && port->linkup == linkup)
+ return;
+
+ port->changed = true;
+ port->linkup = linkup;
+ if (linkup) {
+ struct ethtool_cmd ecmd;
+
+ err = __ethtool_get_settings(port->dev, &ecmd);
+ if (!err) {
+ port->speed = ethtool_cmd_speed(&ecmd);
+ port->duplex = ecmd.duplex;
+ goto send_event;
+ }
+ }
+ port->speed = 0;
+ port->duplex = 0;
+
+send_event:
+ err = team_nl_send_event_port_list_get(port->team);
+ if (err)
+ netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
+ port->dev->name);
+
+}
+
+static void team_port_change_check(struct team_port *port, bool linkup)
+{
+ struct team *team = port->team;
+
+ mutex_lock(&team->lock);
+ __team_port_change_check(port, linkup);
+ mutex_unlock(&team->lock);
+}
+
+/************************************
+ * Net device notifier event handler
+ ************************************/
+
+static int team_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct team_port *port;
+
+ port = team_port_get_rtnl(dev);
+ if (!port)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UP:
+ if (netif_carrier_ok(dev))
+ team_port_change_check(port, true);
+ case NETDEV_DOWN:
+ team_port_change_check(port, false);
+ case NETDEV_CHANGE:
+ if (netif_running(port->dev))
+ team_port_change_check(port,
+ !!netif_carrier_ok(port->dev));
+ break;
+ case NETDEV_UNREGISTER:
+ team_del_slave(port->team->dev, dev);
+ break;
+ case NETDEV_FEAT_CHANGE:
+ team_compute_features(port->team);
+ break;
+ case NETDEV_CHANGEMTU:
+ /* Forbid to change mtu of underlaying device */
+ return NOTIFY_BAD;
+ case NETDEV_PRE_TYPE_CHANGE:
+ /* Forbid to change type of underlaying device */
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block team_notifier_block __read_mostly = {
+ .notifier_call = team_device_event,
+};
+
+
+/***********************
+ * Module init and exit
+ ***********************/
+
+static int __init team_module_init(void)
+{
+ int err;
+
+ register_netdevice_notifier(&team_notifier_block);
+
+ err = rtnl_link_register(&team_link_ops);
+ if (err)
+ goto err_rtnl_reg;
+
+ err = team_nl_init();
+ if (err)
+ goto err_nl_init;
+
+ return 0;
+
+err_nl_init:
+ rtnl_link_unregister(&team_link_ops);
+
+err_rtnl_reg:
+ unregister_netdevice_notifier(&team_notifier_block);
+
+ return err;
+}
+
+static void __exit team_module_exit(void)
+{
+ team_nl_fini();
+ rtnl_link_unregister(&team_link_ops);
+ unregister_netdevice_notifier(&team_notifier_block);
+}
+
+module_init(team_module_init);
+module_exit(team_module_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Ethernet team device driver");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/team/team_mode_activebackup.c b/drivers/net/team/team_mode_activebackup.c
new file mode 100644
index 0000000..f4d960e
--- /dev/null
+++ b/drivers/net/team/team_mode_activebackup.c
@@ -0,0 +1,136 @@
+/*
+ * net/drivers/team/team_mode_activebackup.c - Active-backup mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <net/rtnetlink.h>
+#include <linux/if_team.h>
+
+struct ab_priv {
+ struct team_port __rcu *active_port;
+};
+
+static struct ab_priv *ab_priv(struct team *team)
+{
+ return (struct ab_priv *) &team->mode_priv;
+}
+
+static rx_handler_result_t ab_receive(struct team *team, struct team_port *port,
+ struct sk_buff *skb) {
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (active_port != port)
+ return RX_HANDLER_EXACT;
+ return RX_HANDLER_ANOTHER;
+}
+
+static bool ab_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *active_port;
+
+ active_port = rcu_dereference(ab_priv(team)->active_port);
+ if (unlikely(!active_port))
+ goto drop;
+ skb->dev = active_port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static void ab_port_leave(struct team *team, struct team_port *port)
+{
+ if (ab_priv(team)->active_port == port)
+ RCU_INIT_POINTER(ab_priv(team)->active_port, NULL);
+}
+
+static int ab_active_port_get(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+
+ *ifindex = 0;
+ if (ab_priv(team)->active_port)
+ *ifindex = ab_priv(team)->active_port->dev->ifindex;
+ return 0;
+}
+
+static int ab_active_port_set(struct team *team, void *arg)
+{
+ u32 *ifindex = arg;
+ struct team_port *port;
+
+ list_for_each_entry_rcu(port, &team->port_list, list) {
+ if (port->dev->ifindex == *ifindex) {
+ rcu_assign_pointer(ab_priv(team)->active_port, port);
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static const struct team_option ab_options[] = {
+ {
+ .name = "activeport",
+ .type = TEAM_OPTION_TYPE_U32,
+ .getter = ab_active_port_get,
+ .setter = ab_active_port_set,
+ },
+};
+
+int ab_init(struct team *team)
+{
+ return team_options_register(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+void ab_exit(struct team *team)
+{
+ team_options_unregister(team, ab_options, ARRAY_SIZE(ab_options));
+}
+
+static const struct team_mode_ops ab_mode_ops = {
+ .init = ab_init,
+ .exit = ab_exit,
+ .receive = ab_receive,
+ .transmit = ab_transmit,
+ .port_leave = ab_port_leave,
+};
+
+static struct team_mode ab_mode = {
+ .kind = "activebackup",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct ab_priv),
+ .ops = &ab_mode_ops,
+};
+
+static int __init ab_init_module(void)
+{
+ return team_mode_register(&ab_mode);
+}
+
+static void __exit ab_cleanup_module(void)
+{
+ team_mode_unregister(&ab_mode);
+}
+
+module_init(ab_init_module);
+module_exit(ab_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Active-backup mode for team");
+MODULE_ALIAS("team-mode-activebackup");
diff --git a/drivers/net/team/team_mode_roundrobin.c b/drivers/net/team/team_mode_roundrobin.c
new file mode 100644
index 0000000..a0e8f80
--- /dev/null
+++ b/drivers/net/team/team_mode_roundrobin.c
@@ -0,0 +1,107 @@
+/*
+ * net/drivers/team/team_mode_roundrobin.c - Round-robin mode for team
+ * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+
+struct rr_priv {
+ unsigned int sent_packets;
+};
+
+static struct rr_priv *rr_priv(struct team *team)
+{
+ return (struct rr_priv *) &team->mode_priv;
+}
+
+static struct team_port *__get_first_port_up(struct team *team,
+ struct team_port *port)
+{
+ struct team_port *cur;
+
+ if (port->linkup)
+ return port;
+ cur = port;
+ list_for_each_entry_continue_rcu(cur, &team->port_list, list)
+ if (cur->linkup)
+ return cur;
+ list_for_each_entry_rcu(cur, &team->port_list, list) {
+ if (cur == port)
+ break;
+ if (cur->linkup)
+ return cur;
+ }
+ return NULL;
+}
+
+static bool rr_transmit(struct team *team, struct sk_buff *skb)
+{
+ struct team_port *port;
+ int port_index;
+
+ port_index = rr_priv(team)->sent_packets++ % team->port_count;
+ port = team_get_port_by_index_rcu(team, port_index);
+ port = __get_first_port_up(team, port);
+ if (unlikely(!port))
+ goto drop;
+ skb->dev = port->dev;
+ if (dev_queue_xmit(skb))
+ return false;
+ return true;
+
+drop:
+ dev_kfree_skb_any(skb);
+ return false;
+}
+
+static int rr_port_enter(struct team *team, struct team_port *port)
+{
+ return team_port_set_team_mac(port);
+}
+
+static void rr_port_change_mac(struct team *team, struct team_port *port)
+{
+ team_port_set_team_mac(port);
+}
+
+static const struct team_mode_ops rr_mode_ops = {
+ .transmit = rr_transmit,
+ .port_enter = rr_port_enter,
+ .port_change_mac = rr_port_change_mac,
+};
+
+static struct team_mode rr_mode = {
+ .kind = "roundrobin",
+ .owner = THIS_MODULE,
+ .priv_size = sizeof(struct rr_priv),
+ .ops = &rr_mode_ops,
+};
+
+static int __init rr_init_module(void)
+{
+ return team_mode_register(&rr_mode);
+}
+
+static void __exit rr_cleanup_module(void)
+{
+ team_mode_unregister(&rr_mode);
+}
+
+module_init(rr_init_module);
+module_exit(rr_cleanup_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
+MODULE_DESCRIPTION("Round-robin mode for team");
+MODULE_ALIAS("team-mode-roundrobin");
diff --git a/drivers/net/tokenring/Kconfig b/drivers/net/tokenring/Kconfig
index c7e0149..45550d4 100644
--- a/drivers/net/tokenring/Kconfig
+++ b/drivers/net/tokenring/Kconfig
@@ -7,7 +7,6 @@ menuconfig TR
bool "Token Ring driver support"
depends on NETDEVICES && !UML
depends on (PCI || ISA || MCA || CCW || PCMCIA)
- select LLC
help
Token Ring is IBM's way of communication on a local network; the
rest of the world uses Ethernet. To participate on a Token Ring
@@ -20,6 +19,10 @@ menuconfig TR
if TR
+config WANT_LLC
+ def_bool y
+ select LLC
+
config PCMCIA_IBMTR
tristate "IBM PCMCIA tokenring adapter support"
depends on IBMTR!=y && PCMCIA
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7bea9c6..2d7601d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -123,7 +123,7 @@ struct tun_struct {
gid_t group;
struct net_device *dev;
- u32 set_features;
+ netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
struct fasync_struct *fasync;
@@ -359,7 +359,7 @@ static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
- sock_put(tun->socket.sk);
+ sk_release_kernel(tun->socket.sk);
}
/* Net device open. */
@@ -454,7 +454,8 @@ tun_net_change_mtu(struct net_device *dev, int new_mtu)
return 0;
}
-static u32 tun_net_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t tun_net_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct tun_struct *tun = netdev_priv(dev);
@@ -979,10 +980,18 @@ static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
return ret;
}
+static int tun_release(struct socket *sock)
+{
+ if (sock->sk)
+ sock_put(sock->sk);
+ return 0;
+}
+
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
+ .release = tun_release,
};
static struct proto tun_proto = {
@@ -1109,10 +1118,11 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = -ENOMEM;
- sk = sk_alloc(net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
+ sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
if (!sk)
goto err_free_dev;
+ sk_change_net(sk, net);
tun->socket.wq = &tun->wq;
init_waitqueue_head(&tun->wq.wait);
tun->socket.ops = &tun_socket_ops;
@@ -1173,7 +1183,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
return 0;
err_free_sk:
- sock_put(sk);
+ tun_free_netdev(dev);
err_free_dev:
free_netdev(dev);
failed:
@@ -1196,7 +1206,7 @@ static int tun_get_iff(struct net *net, struct tun_struct *tun,
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
{
- u32 features = 0;
+ netdev_features_t features = 0;
if (arg & TUN_F_CSUM) {
features |= NETIF_F_HW_CSUM;
@@ -1589,16 +1599,15 @@ static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info
{
struct tun_struct *tun = netdev_priv(dev);
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
- strcpy(info->bus_info, "tun");
+ strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case TUN_TAP_DEV:
- strcpy(info->bus_info, "tap");
+ strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index e95f0e6..d6da5ee 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -36,7 +36,7 @@
#include <linux/usb/usbnet.h>
#include <linux/slab.h>
-#define DRIVER_VERSION "08-Nov-2011"
+#define DRIVER_VERSION "22-Dec-2011"
#define DRIVER_NAME "asix"
/* ASIX AX8817X based USB 2.0 Ethernet Devices */
@@ -376,7 +376,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_pull(skb, (size + 1) & 0xfffe);
- if (skb->len == 0)
+ if (skb->len < sizeof(header))
break;
head = (u8 *) skb->data;
@@ -689,6 +689,10 @@ asix_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
}
wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
wolinfo->wolopts = 0;
+ if (opt & AX_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & AX_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
}
static int
@@ -974,6 +978,7 @@ static int ax88772_link_reset(struct usbnet *dev)
static int ax88772_reset(struct usbnet *dev)
{
+ struct asix_data *data = (struct asix_data *)&dev->data;
int ret, embd_phy;
u16 rx_ctl;
@@ -1051,6 +1056,13 @@ static int ax88772_reset(struct usbnet *dev)
goto out;
}
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
/* Set RX_CTL to default values with 2k buffer, and enable cactus */
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
if (ret < 0)
@@ -1148,7 +1160,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
return 0;
}
-static struct ethtool_ops ax88178_ethtool_ops = {
+static const struct ethtool_ops ax88178_ethtool_ops = {
.get_drvinfo = asix_get_drvinfo,
.get_link = asix_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -1316,6 +1328,13 @@ static int ax88178_reset(struct usbnet *dev)
if (ret < 0)
return ret;
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = asix_write_cmd(dev, AX_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ return ret;
+
ret = asix_write_rx_ctl(dev, AX_DEFAULT_RX_CTL);
if (ret < 0)
return ret;
@@ -1580,6 +1599,10 @@ static const struct usb_device_id products [] = {
USB_DEVICE (0x6189, 0x182d),
.driver_info = (unsigned long) &ax8817x_info,
}, {
+ // Sitecom LN-031 "USB 2.0 10/100/1000 Ethernet adapter"
+ USB_DEVICE (0x0df6, 0x0056),
+ .driver_info = (unsigned long) &ax88178_info,
+}, {
// corega FEther USB2-TX
USB_DEVICE (0x07aa, 0x0017),
.driver_info = (unsigned long) &ax8817x_info,
@@ -1674,17 +1697,7 @@ static struct usb_driver asix_driver = {
.supports_autosuspend = 1,
};
-static int __init asix_init(void)
-{
- return usb_register(&asix_driver);
-}
-module_init(asix_init);
-
-static void __exit asix_exit(void)
-{
- usb_deregister(&asix_driver);
-}
-module_exit(asix_exit);
+module_usb_driver(asix_driver);
MODULE_AUTHOR("David Hollis");
MODULE_VERSION(DRIVER_VERSION);
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index a68272c..182cfb4 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -949,19 +949,4 @@ static struct usb_driver catc_driver = {
.id_table = catc_id_table,
};
-static int __init catc_init(void)
-{
- int result = usb_register(&catc_driver);
- if (result == 0)
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return result;
-}
-
-static void __exit catc_exit(void)
-{
- usb_deregister(&catc_driver);
-}
-
-module_init(catc_init);
-module_exit(catc_exit);
+module_usb_driver(catc_driver);
diff --git a/drivers/net/usb/cdc-phonet.c b/drivers/net/usb/cdc-phonet.c
index a60d006..790cbde 100644
--- a/drivers/net/usb/cdc-phonet.c
+++ b/drivers/net/usb/cdc-phonet.c
@@ -130,7 +130,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -140,7 +140,7 @@ static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags)
err = usb_submit_urb(req, gfp_flags);
if (unlikely(err)) {
dev_dbg(&dev->dev, "RX submit error (%d)\n", err);
- netdev_free_page(dev, page);
+ put_page(page);
}
return err;
}
@@ -208,9 +208,9 @@ static void rx_complete(struct urb *req)
dev->stats.rx_errors++;
resubmit:
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- rx_submit(pnd, req, GFP_ATOMIC);
+ rx_submit(pnd, req, GFP_ATOMIC | __GFP_COLD);
}
static int usbpn_close(struct net_device *dev);
@@ -229,7 +229,7 @@ static int usbpn_open(struct net_device *dev)
for (i = 0; i < rxq_size; i++) {
struct urb *req = usb_alloc_urb(0, GFP_KERNEL);
- if (!req || rx_submit(pnd, req, GFP_KERNEL)) {
+ if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) {
usbpn_close(dev);
return -ENOMEM;
}
@@ -457,18 +457,7 @@ static struct usb_driver usbpn_driver = {
.id_table = usbpn_ids,
};
-static int __init usbpn_init(void)
-{
- return usb_register(&usbpn_driver);
-}
-
-static void __exit usbpn_exit(void)
-{
- usb_deregister(&usbpn_driver);
-}
-
-module_init(usbpn_init);
-module_exit(usbpn_exit);
+module_usb_driver(usbpn_driver);
MODULE_AUTHOR("Remi Denis-Courmont");
MODULE_DESCRIPTION("USB CDC Phonet host interface");
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index 882f53f..439690b 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -369,18 +369,7 @@ static struct usb_driver eem_driver = {
.resume = usbnet_resume,
};
-
-static int __init eem_init(void)
-{
- return usb_register(&eem_driver);
-}
-module_init(eem_init);
-
-static void __exit eem_exit(void)
-{
- usb_deregister(&eem_driver);
-}
-module_exit(eem_exit);
+module_usb_driver(eem_driver);
MODULE_AUTHOR("Omar Laazimani <omar.oberthur@gmail.com>");
MODULE_DESCRIPTION("USB CDC EEM");
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 99ed6eb..90a3002 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -425,6 +425,9 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
int status;
struct cdc_state *info = (void *) &dev->data;
+ BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
+ < sizeof(struct cdc_state)));
+
status = usbnet_generic_cdc_bind(dev, intf);
if (status < 0)
return status;
@@ -570,6 +573,13 @@ static const struct usb_device_id products [] = {
.driver_info = 0,
},
+/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/*
* WHITELIST!!!
*
@@ -615,21 +625,7 @@ static struct usb_driver cdc_driver = {
.supports_autosuspend = 1,
};
-
-static int __init cdc_init(void)
-{
- BUILD_BUG_ON((sizeof(((struct usbnet *)0)->data)
- < sizeof(struct cdc_state)));
-
- return usb_register(&cdc_driver);
-}
-module_init(cdc_init);
-
-static void __exit cdc_exit(void)
-{
- usb_deregister(&cdc_driver);
-}
-module_exit(cdc_exit);
+module_usb_driver(cdc_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("USB CDC Ethernet devices");
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index f06fb78..3a539a9 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -138,7 +138,7 @@ struct cdc_ncm_ctx {
static void cdc_ncm_tx_timeout(unsigned long arg);
static const struct driver_info cdc_ncm_info;
static struct usb_driver cdc_ncm_driver;
-static struct ethtool_ops cdc_ncm_ethtool_ops;
+static const struct ethtool_ops cdc_ncm_ethtool_ops;
static const struct usb_device_id cdc_devs[] = {
{ USB_INTERFACE_INFO(USB_CLASS_COMM,
@@ -465,12 +465,10 @@ static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
int temp;
u8 iface_no;
- ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL)
return -ENODEV;
- memset(ctx, 0, sizeof(*ctx));
-
init_timer(&ctx->tx_timer);
spin_lock_init(&ctx->mtx);
ctx->netdev = dev->net;
@@ -1222,7 +1220,7 @@ static struct usb_driver cdc_ncm_driver = {
.supports_autosuspend = 1,
};
-static struct ethtool_ops cdc_ncm_ethtool_ops = {
+static const struct ethtool_ops cdc_ncm_ethtool_ops = {
.get_drvinfo = cdc_ncm_get_drvinfo,
.get_link = usbnet_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -1232,20 +1230,7 @@ static struct ethtool_ops cdc_ncm_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int __init cdc_ncm_init(void)
-{
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
- return usb_register(&cdc_ncm_driver);
-}
-
-module_init(cdc_ncm_init);
-
-static void __exit cdc_ncm_exit(void)
-{
- usb_deregister(&cdc_ncm_driver);
-}
-
-module_exit(cdc_ncm_exit);
+module_usb_driver(cdc_ncm_driver);
MODULE_AUTHOR("Hans Petter Selasky");
MODULE_DESCRIPTION("USB CDC NCM host driver");
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index fc5f13d..b403d93 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -338,17 +338,7 @@ static struct usb_driver cdc_subset_driver = {
.id_table = products,
};
-static int __init cdc_subset_init(void)
-{
- return usb_register(&cdc_subset_driver);
-}
-module_init(cdc_subset_init);
-
-static void __exit cdc_subset_exit(void)
-{
- usb_deregister(&cdc_subset_driver);
-}
-module_exit(cdc_subset_exit);
+module_usb_driver(cdc_subset_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("Simple 'CDC Subset' USB networking links");
diff --git a/drivers/net/usb/cx82310_eth.c b/drivers/net/usb/cx82310_eth.c
index 8969f12..0e05313 100644
--- a/drivers/net/usb/cx82310_eth.c
+++ b/drivers/net/usb/cx82310_eth.c
@@ -329,17 +329,7 @@ static struct usb_driver cx82310_driver = {
.resume = usbnet_resume,
};
-static int __init cx82310_init(void)
-{
- return usb_register(&cx82310_driver);
-}
-module_init(cx82310_init);
-
-static void __exit cx82310_exit(void)
-{
- usb_deregister(&cx82310_driver);
-}
-module_exit(cx82310_exit);
+module_usb_driver(cx82310_driver);
MODULE_AUTHOR("Ondrej Zary");
MODULE_DESCRIPTION("Conexant CX82310-based ADSL router USB ethernet driver");
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index fbc0e4d..b972263 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -672,18 +672,7 @@ static struct usb_driver dm9601_driver = {
.resume = usbnet_resume,
};
-static int __init dm9601_init(void)
-{
- return usb_register(&dm9601_driver);
-}
-
-static void __exit dm9601_exit(void)
-{
- usb_deregister(&dm9601_driver);
-}
-
-module_init(dm9601_init);
-module_exit(dm9601_exit);
+module_usb_driver(dm9601_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index c4cfd1d..38266bd 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -227,17 +227,7 @@ static struct usb_driver gl620a_driver = {
.resume = usbnet_resume,
};
-static int __init usbnet_init(void)
-{
- return usb_register(&gl620a_driver);
-}
-module_init(usbnet_init);
-
-static void __exit usbnet_exit(void)
-{
- usb_deregister(&gl620a_driver);
-}
-module_exit(usbnet_exit);
+module_usb_driver(gl620a_driver);
MODULE_AUTHOR("Jiun-Jie Huang");
MODULE_DESCRIPTION("GL620-USB-A Host-to-Host Link cables");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 304fe78..e1324b4 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1632,7 +1632,7 @@ static int hso_get_count(struct tty_struct *tty,
struct hso_serial *serial = get_serial_by_tty(tty);
struct hso_tiocmget *tiocmget = serial->tiocmget;
- memset(&icount, 0, sizeof(struct serial_icounter_struct));
+ memset(icount, 0, sizeof(struct serial_icounter_struct));
if (!tiocmget)
return -ENOENT;
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 131ac6c..12a22a4 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -238,17 +238,7 @@ static struct usb_driver int51x1_driver = {
.resume = usbnet_resume,
};
-static int __init int51x1_init(void)
-{
- return usb_register(&int51x1_driver);
-}
-module_init(int51x1_init);
-
-static void __exit int51x1_exit(void)
-{
- usb_deregister(&int51x1_driver);
-}
-module_exit(int51x1_exit);
+module_usb_driver(int51x1_driver);
MODULE_AUTHOR("Peter Holik");
MODULE_DESCRIPTION("Intellon usb powerline adapter");
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index 13c1f04..dd78c4c 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -60,6 +60,7 @@
#define USB_PRODUCT_IPHONE_3GS 0x1294
#define USB_PRODUCT_IPHONE_4 0x1297
#define USB_PRODUCT_IPHONE_4_VZW 0x129c
+#define USB_PRODUCT_IPHONE_4S 0x12a0
#define IPHETH_USBINTF_CLASS 255
#define IPHETH_USBINTF_SUBCLASS 253
@@ -103,6 +104,10 @@ static struct usb_device_id ipheth_table[] = {
USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW,
IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
IPHETH_USBINTF_PROTO) },
+ { USB_DEVICE_AND_INTERFACE_INFO(
+ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S,
+ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS,
+ IPHETH_USBINTF_PROTO) },
{ }
};
MODULE_DEVICE_TABLE(usb, ipheth_table);
@@ -420,7 +425,7 @@ static u32 ipheth_ethtool_op_get_link(struct net_device *net)
return netif_carrier_ok(dev->net);
}
-static struct ethtool_ops ops = {
+static const struct ethtool_ops ops = {
.get_link = ipheth_ethtool_op_get_link
};
@@ -543,25 +548,7 @@ static struct usb_driver ipheth_driver = {
.id_table = ipheth_table,
};
-static int __init ipheth_init(void)
-{
- int retval;
-
- retval = usb_register(&ipheth_driver);
- if (retval) {
- err("usb_register failed: %d", retval);
- return retval;
- }
- return 0;
-}
-
-static void __exit ipheth_exit(void)
-{
- usb_deregister(&ipheth_driver);
-}
-
-module_init(ipheth_init);
-module_exit(ipheth_exit);
+module_usb_driver(ipheth_driver);
MODULE_AUTHOR("Diego Giagio <diego@giagio.com>");
MODULE_DESCRIPTION("Apple iPhone USB Ethernet driver");
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index 5a6d0f8..7562649 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -375,17 +375,7 @@ static struct usb_driver kalmia_driver = {
.resume = usbnet_resume
};
-static int __init kalmia_init(void)
-{
- return usb_register(&kalmia_driver);
-}
-module_init( kalmia_init);
-
-static void __exit kalmia_exit(void)
-{
- usb_deregister(&kalmia_driver);
-}
-module_exit( kalmia_exit);
+module_usb_driver(kalmia_driver);
MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index 582ca2d..d034d9c 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1324,32 +1324,4 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
}
}
-
-/****************************************************************
- * kaweth_init
- ****************************************************************/
-static int __init kaweth_init(void)
-{
- dbg("Driver loading");
- return usb_register(&kaweth_driver);
-}
-
-/****************************************************************
- * kaweth_exit
- ****************************************************************/
-static void __exit kaweth_exit(void)
-{
- usb_deregister(&kaweth_driver);
-}
-
-module_init(kaweth_init);
-module_exit(kaweth_exit);
-
-
-
-
-
-
-
-
-
+module_usb_driver(kaweth_driver);
diff --git a/drivers/net/usb/lg-vl600.c b/drivers/net/usb/lg-vl600.c
index 9c26c63..45a981f 100644
--- a/drivers/net/usb/lg-vl600.c
+++ b/drivers/net/usb/lg-vl600.c
@@ -346,17 +346,7 @@ static struct usb_driver lg_vl600_driver = {
.resume = usbnet_resume,
};
-static int __init vl600_init(void)
-{
- return usb_register(&lg_vl600_driver);
-}
-module_init(vl600_init);
-
-static void __exit vl600_exit(void)
-{
- usb_deregister(&lg_vl600_driver);
-}
-module_exit(vl600_exit);
+module_usb_driver(lg_vl600_driver);
MODULE_AUTHOR("Anrzej Zaborowski");
MODULE_DESCRIPTION("LG-VL600 modem's ethernet link");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index db2cb74..a29aa9c 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -692,17 +692,7 @@ static struct usb_driver mcs7830_driver = {
.reset_resume = mcs7830_reset_resume,
};
-static int __init mcs7830_init(void)
-{
- return usb_register(&mcs7830_driver);
-}
-module_init(mcs7830_init);
-
-static void __exit mcs7830_exit(void)
-{
- usb_deregister(&mcs7830_driver);
-}
-module_exit(mcs7830_exit);
+module_usb_driver(mcs7830_driver);
MODULE_DESCRIPTION("USB to network adapter MCS7830)");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 01db460..83f965c 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -589,17 +589,7 @@ static struct usb_driver net1080_driver = {
.resume = usbnet_resume,
};
-static int __init net1080_init(void)
-{
- return usb_register(&net1080_driver);
-}
-module_init(net1080_init);
-
-static void __exit net1080_exit(void)
-{
- usb_deregister(&net1080_driver);
-}
-module_exit(net1080_exit);
+module_usb_driver(net1080_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("NetChip 1080 based USB Host-to-Host Links");
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 769f509..5d99b8c 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -55,8 +55,8 @@ static const char driver_name[] = "pegasus";
#define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \
BMSR_100FULL | BMSR_ANEGCAPABLE)
-static int loopback;
-static int mii_mode;
+static bool loopback;
+static bool mii_mode;
static char *devid;
static struct usb_eth_dev usb_dev_id[] = {
@@ -517,7 +517,7 @@ static inline int reset_mac(pegasus_t *pegasus)
for (i = 0; i < REG_TIMEOUT; i++) {
get_registers(pegasus, EthCtrl1, 1, &data);
if (~data & 0x08) {
- if (loopback & 1)
+ if (loopback)
break;
if (mii_mode && (pegasus->features & HAS_HOME_PNA))
set_register(pegasus, Gpio1, 0x34);
@@ -561,7 +561,7 @@ static int enable_net_traffic(struct net_device *dev, struct usb_device *usb)
data[1] |= 0x10; /* set 100 Mbps */
if (mii_mode)
data[1] = 0;
- data[2] = (loopback & 1) ? 0x09 : 0x01;
+ data[2] = loopback ? 0x09 : 0x01;
memcpy(pegasus->eth_regs, data, sizeof(data));
ret = set_registers(pegasus, EthCtrl0, 3, data);
diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c
index 217aec8..b2b035e 100644
--- a/drivers/net/usb/plusb.c
+++ b/drivers/net/usb/plusb.c
@@ -154,17 +154,7 @@ static struct usb_driver plusb_driver = {
.resume = usbnet_resume,
};
-static int __init plusb_init(void)
-{
- return usb_register(&plusb_driver);
-}
-module_init(plusb_init);
-
-static void __exit plusb_exit(void)
-{
- usb_deregister(&plusb_driver);
-}
-module_exit(plusb_exit);
+module_usb_driver(plusb_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("Prolific PL-2301/2302/25A1 USB Host to Host Link Driver");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 255d6a4..c8f1b5b 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -635,17 +635,7 @@ static struct usb_driver rndis_driver = {
.resume = usbnet_resume,
};
-static int __init rndis_init(void)
-{
- return usb_register(&rndis_driver);
-}
-module_init(rndis_init);
-
-static void __exit rndis_exit(void)
-{
- usb_deregister(&rndis_driver);
-}
-module_exit(rndis_exit);
+module_usb_driver(rndis_driver);
MODULE_AUTHOR("David Brownell");
MODULE_DESCRIPTION("USB Host side RNDIS driver");
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index bf8c84d..0710b4c 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -978,20 +978,7 @@ static struct usb_driver rtl8150_driver = {
.resume = rtl8150_resume
};
-static int __init usb_rtl8150_init(void)
-{
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return usb_register(&rtl8150_driver);
-}
-
-static void __exit usb_rtl8150_exit(void)
-{
- usb_deregister(&rtl8150_driver);
-}
-
-module_init(usb_rtl8150_init);
-module_exit(usb_rtl8150_exit);
+module_usb_driver(rtl8150_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index ed1b432..b59cf20 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -618,7 +618,7 @@ static u32 sierra_net_get_link(struct net_device *net)
return sierra_net_get_private(dev)->link_up && netif_running(net);
}
-static struct ethtool_ops sierra_net_ethtool_ops = {
+static const struct ethtool_ops sierra_net_ethtool_ops = {
.get_drvinfo = sierra_net_get_drvinfo,
.get_link = sierra_net_get_link,
.get_msglevel = usbnet_get_msglevel,
@@ -900,6 +900,9 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
u16 len;
bool need_tail;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
+ < sizeof(struct cdc_state));
+
dev_dbg(&dev->udev->dev, "%s", __func__);
if (priv->link_up && check_ethip_packet(skb, dev) && is_ip(skb)) {
/* enough head room as is? */
@@ -981,21 +984,7 @@ static struct usb_driver sierra_net_driver = {
.no_dynamic_id = 1,
};
-static int __init sierra_net_init(void)
-{
- BUILD_BUG_ON(FIELD_SIZEOF(struct usbnet, data)
- < sizeof(struct cdc_state));
-
- return usb_register(&sierra_net_driver);
-}
-
-static void __exit sierra_net_exit(void)
-{
- usb_deregister(&sierra_net_driver);
-}
-
-module_exit(sierra_net_exit);
-module_init(sierra_net_init);
+module_usb_driver(sierra_net_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index a5b9b12..3b017bb 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -76,7 +76,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -728,7 +728,8 @@ static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
}
/* Enable or disable Rx checksum offload engine */
-static int smsc75xx_set_features(struct net_device *netdev, u32 features)
+static int smsc75xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
@@ -1237,17 +1238,7 @@ static struct usb_driver smsc75xx_driver = {
.disconnect = usbnet_disconnect,
};
-static int __init smsc75xx_init(void)
-{
- return usb_register(&smsc75xx_driver);
-}
-module_init(smsc75xx_init);
-
-static void __exit smsc75xx_exit(void)
-{
- usb_deregister(&smsc75xx_driver);
-}
-module_exit(smsc75xx_exit);
+module_usb_driver(smsc75xx_driver);
MODULE_AUTHOR("Nancy Lin");
MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index eff6767..d45520e 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -59,7 +59,7 @@ struct usb_context {
struct usbnet *dev;
};
-static int turbo_mode = true;
+static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
@@ -516,7 +516,8 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
}
/* Enable or disable Tx & Rx checksum offload engines */
-static int smsc95xx_set_features(struct net_device *netdev, u32 features)
+static int smsc95xx_set_features(struct net_device *netdev,
+ netdev_features_t features)
{
struct usbnet *dev = netdev_priv(netdev);
u32 read_buf;
@@ -1297,17 +1298,7 @@ static struct usb_driver smsc95xx_driver = {
.disconnect = usbnet_disconnect,
};
-static int __init smsc95xx_init(void)
-{
- return usb_register(&smsc95xx_driver);
-}
-module_init(smsc95xx_init);
-
-static void __exit smsc95xx_exit(void)
-{
- usb_deregister(&smsc95xx_driver);
-}
-module_exit(smsc95xx_exit);
+module_usb_driver(smsc95xx_driver);
MODULE_AUTHOR("Nancy Lin");
MODULE_AUTHOR("Steve Glendinning <steve.glendinning@smsc.com>");
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index fae0fbd..81b96e3 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -589,6 +589,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
entry = (struct skb_data *) skb->cb;
urb = entry->urb;
+ spin_unlock_irqrestore(&q->lock, flags);
// during some PM-driven resume scenarios,
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
@@ -596,6 +597,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
+ spin_lock_irqsave(&q->lock, flags);
}
spin_unlock_irqrestore (&q->lock, flags);
return count;
diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c
index 1a2234c..c3197ce 100644
--- a/drivers/net/usb/zaurus.c
+++ b/drivers/net/usb/zaurus.c
@@ -316,6 +316,11 @@ static const struct usb_device_id products [] = {
ZAURUS_MASTER_INTERFACE,
.driver_info = ZAURUS_PXA_INFO,
}, {
+ /* C-750/C-760/C-860/SL-C3000 PDA in MDLM mode */
+ USB_DEVICE_AND_INTERFACE_INFO(0x04DD, 0x9031, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &bogus_mdlm_info,
+}, {
.match_flags = USB_DEVICE_ID_MATCH_INT_INFO
| USB_DEVICE_ID_MATCH_DEVICE,
.idVendor = 0x04DD,
@@ -349,6 +354,13 @@ static const struct usb_device_id products [] = {
ZAURUS_MASTER_INTERFACE,
.driver_info = OLYMPUS_MXL_INFO,
},
+
+/* Logitech Harmony 900 - uses the pseudo-MDLM (BLAN) driver */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(0x046d, 0xc11f, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long) &bogus_mdlm_info,
+},
{ }, // END
};
MODULE_DEVICE_TABLE(usb, products);
@@ -362,17 +374,7 @@ static struct usb_driver zaurus_driver = {
.resume = usbnet_resume,
};
-static int __init zaurus_init(void)
-{
- return usb_register(&zaurus_driver);
-}
-module_init(zaurus_init);
-
-static void __exit zaurus_exit(void)
-{
- usb_deregister(&zaurus_driver);
-}
-module_exit(zaurus_exit);
+module_usb_driver(zaurus_driver);
MODULE_AUTHOR("Pavel Machek, David Brownell");
MODULE_DESCRIPTION("Sharp Zaurus PDA, and compatible products");
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ef883e9..4a34028 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -27,8 +27,8 @@
struct veth_net_stats {
u64 rx_packets;
- u64 tx_packets;
u64 rx_bytes;
+ u64 tx_packets;
u64 tx_bytes;
u64 rx_dropped;
struct u64_stats_sync syncp;
@@ -66,9 +66,8 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
- strcpy(info->fw_version, "N/A");
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -271,7 +270,7 @@ static void veth_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->destructor = veth_dev_free;
- dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_RXCSUM;
}
/*
@@ -423,7 +422,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head)
unregister_netdevice_queue(peer, head);
}
-static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
+static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = {
+ [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) },
+};
static struct rtnl_link_ops veth_link_ops = {
.kind = DRV_NAME,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 6ee8410..4880aa8 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,7 +30,7 @@
static int napi_weight = 128;
module_param(napi_weight, int, 0444);
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
@@ -39,6 +39,7 @@ module_param(gso, bool, 0444);
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
struct virtnet_stats {
struct u64_stats_sync syncp;
@@ -155,6 +156,7 @@ static void set_skb_frag(struct sk_buff *skb, struct page *page,
*len -= size;
}
+/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct page *page, unsigned int len)
{
@@ -357,7 +359,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
struct skb_vnet_hdr *hdr;
int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
if (unlikely(!skb))
return -ENOMEM;
@@ -368,7 +370,7 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
@@ -413,8 +415,8 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
/* chain first in list head */
first->private = (unsigned long)list;
- err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
- first, gfp);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
+ first, gfp);
if (err < 0)
give_pages(vi, first);
@@ -432,14 +434,20 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = virtqueue_add_buf_gfp(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
if (err < 0)
give_pages(vi, page);
return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
@@ -501,7 +509,7 @@ static void refill_work(struct work_struct *work)
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
@@ -520,7 +528,7 @@ again:
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
}
/* Out of packets? */
@@ -601,7 +609,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
- 0, skb);
+ 0, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -699,6 +707,7 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
}
tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
tot->rx_dropped = dev->stats.rx_dropped;
tot->rx_length_errors = dev->stats.rx_length_errors;
tot->rx_frame_errors = dev->stats.rx_frame_errors;
@@ -719,6 +728,10 @@ static int virtnet_open(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
virtnet_napi_enable(vi);
return 0;
}
@@ -754,7 +767,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
virtqueue_kick(vi->cvq);
@@ -772,6 +785,8 @@ static int virtnet_close(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure refill_work doesn't re-enable napi! */
+ cancel_delayed_work_sync(&vi->refill);
napi_disable(&vi->napi);
return 0;
@@ -853,7 +868,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
kfree(buf);
}
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -863,9 +878,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+ return 0;
}
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
@@ -875,6 +891,7 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+ return 0;
}
static void virtnet_get_ringparam(struct net_device *dev,
@@ -889,7 +906,21 @@ static void virtnet_get_ringparam(struct net_device *dev,
}
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtio_device *vdev = vi->vdev;
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
+}
+
static const struct ethtool_ops virtnet_ethtool_ops = {
+ .get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = virtnet_get_ringparam,
};
@@ -954,15 +985,38 @@ static void virtnet_config_changed(struct virtio_device *vdev)
virtnet_update_status(vi);
}
+static int init_vqs(struct virtnet_info *vi)
+{
+ struct virtqueue *vqs[3];
+ vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
+ const char *names[] = { "input", "output", "control" };
+ int nvqs, err;
+
+ /* We expect two virtqueues, receive then send,
+ * and optionally control. */
+ nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
+
+ err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
+ if (err)
+ return err;
+
+ vi->rvq = vqs[0];
+ vi->svq = vqs[1];
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
+ vi->cvq = vqs[2];
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
+ vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
+ }
+ return 0;
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
struct net_device *dev;
struct virtnet_info *vi;
- struct virtqueue *vqs[3];
- vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
- const char *names[] = { "input", "output", "control" };
- int nvqs;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -1034,24 +1088,10 @@ static int virtnet_probe(struct virtio_device *vdev)
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
- /* We expect two virtqueues, receive then send,
- * and optionally control. */
- nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
-
- err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
+ err = init_vqs(vi);
if (err)
goto free_stats;
- vi->rvq = vqs[0];
- vi->svq = vqs[1];
-
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
- vi->cvq = vqs[2];
-
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
- dev->features |= NETIF_F_HW_VLAN_FILTER;
- }
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -1082,7 +1122,6 @@ static int virtnet_probe(struct virtio_device *vdev)
unregister:
unregister_netdev(dev);
- cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
free_stats:
@@ -1114,29 +1153,73 @@ static void free_unused_bufs(struct virtnet_info *vi)
BUG_ON(vi->num != 0);
}
-static void __devexit virtnet_remove(struct virtio_device *vdev)
+static void remove_vq_common(struct virtnet_info *vi)
{
- struct virtnet_info *vi = vdev->priv;
-
- /* Stop all the virtqueues. */
- vdev->config->reset(vdev);
-
-
- unregister_netdev(vi->dev);
- cancel_delayed_work_sync(&vi->refill);
+ vi->vdev->config->reset(vi->vdev);
/* Free unused buffers in both send and recv, if any. */
free_unused_bufs(vi);
- vdev->config->del_vqs(vi->vdev);
+ vi->vdev->config->del_vqs(vi->vdev);
while (vi->pages)
__free_pages(get_a_page(vi, GFP_KERNEL), 0);
+}
+
+static void __devexit virtnet_remove(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+
+ unregister_netdev(vi->dev);
+
+ remove_vq_common(vi);
free_percpu(vi->stats);
free_netdev(vi->dev);
}
+#ifdef CONFIG_PM
+static int virtnet_freeze(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+
+ virtqueue_disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->svq);
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
+ virtqueue_disable_cb(vi->cvq);
+
+ netif_device_detach(vi->dev);
+ cancel_delayed_work_sync(&vi->refill);
+
+ if (netif_running(vi->dev))
+ napi_disable(&vi->napi);
+
+ remove_vq_common(vi);
+
+ return 0;
+}
+
+static int virtnet_restore(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+ int err;
+
+ err = init_vqs(vi);
+ if (err)
+ return err;
+
+ if (netif_running(vi->dev))
+ virtnet_napi_enable(vi);
+
+ netif_device_attach(vi->dev);
+
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
+ return 0;
+}
+#endif
+
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
{ 0 },
@@ -1161,6 +1244,10 @@ static struct virtio_driver virtio_net_driver = {
.probe = virtnet_probe,
.remove = __devexit_p(virtnet_remove),
.config_changed = virtnet_config_changed,
+#ifdef CONFIG_PM
+ .freeze = virtnet_freeze,
+ .restore = virtnet_restore,
+#endif
};
static int __init init(void)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index d96bfb1..756c0f5 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -830,21 +830,16 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
ctx->l4_hdr_size = ((struct tcphdr *)
skb_transport_header(skb))->doff * 4;
else if (iph->protocol == IPPROTO_UDP)
- /*
- * Use tcp header size so that bytes to
- * be copied are more than required by
- * the device.
- */
ctx->l4_hdr_size =
- sizeof(struct tcphdr);
+ sizeof(struct udphdr);
else
ctx->l4_hdr_size = 0;
} else {
/* for simplicity, don't copy L4 headers */
ctx->l4_hdr_size = 0;
}
- ctx->copy_size = ctx->eth_ip_hdr_size +
- ctx->l4_hdr_size;
+ ctx->copy_size = min(ctx->eth_ip_hdr_size +
+ ctx->l4_hdr_size, skb->len);
} else {
ctx->eth_ip_hdr_size = 0;
ctx->l4_hdr_size = 0;
@@ -1926,7 +1921,7 @@ vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
}
-static void
+static int
vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1943,10 +1938,12 @@ vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
}
set_bit(vid, adapter->active_vlans);
+
+ return 0;
}
-static void
+static int
vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
@@ -1963,6 +1960,8 @@ vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
}
clear_bit(vid, adapter->active_vlans);
+
+ return 0;
}
@@ -2163,7 +2162,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = i % adapter->num_rx_queues;
+ rssConf->indTable[i] = ethtool_rxfh_indir_default(
+ i, adapter->num_rx_queues);
devRead->rssConfDesc.confVer = 1;
devRead->rssConfDesc.confLen = sizeof(*rssConf);
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index e662cbc..587a218 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -202,14 +202,9 @@ vmxnet3_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
strlcpy(drvinfo->driver, vmxnet3_driver_name, sizeof(drvinfo->driver));
- drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
strlcpy(drvinfo->version, VMXNET3_DRIVER_VERSION_REPORT,
sizeof(drvinfo->version));
- drvinfo->driver[sizeof(drvinfo->version) - 1] = '\0';
-
- strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
- drvinfo->fw_version[sizeof(drvinfo->fw_version) - 1] = '\0';
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
ETHTOOL_BUSINFO_LEN);
@@ -262,11 +257,11 @@ vmxnet3_get_strings(struct net_device *netdev, u32 stringset, u8 *buf)
}
}
-int vmxnet3_set_features(struct net_device *netdev, u32 features)
+int vmxnet3_set_features(struct net_device *netdev, netdev_features_t features)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
- u32 changed = features ^ netdev->features;
+ netdev_features_t changed = features ^ netdev->features;
if (changed & (NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_RX)) {
if (features & NETIF_F_RXCSUM)
@@ -570,44 +565,38 @@ vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
}
#ifdef VMXNET3_RSS
+static u32
+vmxnet3_get_rss_indir_size(struct net_device *netdev)
+{
+ struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+ struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+ return rssConf->indTableSize;
+}
+
static int
-vmxnet3_get_rss_indir(struct net_device *netdev,
- struct ethtool_rxfh_indir *p)
+vmxnet3_get_rss_indir(struct net_device *netdev, u32 *p)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+ unsigned int n = rssConf->indTableSize;
- p->size = rssConf->indTableSize;
while (n--)
- p->ring_index[n] = rssConf->indTable[n];
+ p[n] = rssConf->indTable[n];
return 0;
}
static int
-vmxnet3_set_rss_indir(struct net_device *netdev,
- const struct ethtool_rxfh_indir *p)
+vmxnet3_set_rss_indir(struct net_device *netdev, const u32 *p)
{
unsigned int i;
unsigned long flags;
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
struct UPT1_RSSConf *rssConf = adapter->rss_conf;
- if (p->size != rssConf->indTableSize)
- return -EINVAL;
- for (i = 0; i < rssConf->indTableSize; i++) {
- /*
- * Return with error code if any of the queue indices
- * is out of range
- */
- if (p->ring_index[i] < 0 ||
- p->ring_index[i] >= adapter->num_rx_queues)
- return -EINVAL;
- }
-
for (i = 0; i < rssConf->indTableSize; i++)
- rssConf->indTable[i] = p->ring_index[i];
+ rssConf->indTable[i] = p[i];
spin_lock_irqsave(&adapter->cmd_lock, flags);
VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
@@ -619,7 +608,7 @@ vmxnet3_set_rss_indir(struct net_device *netdev,
}
#endif
-static struct ethtool_ops vmxnet3_ethtool_ops = {
+static const struct ethtool_ops vmxnet3_ethtool_ops = {
.get_settings = vmxnet3_get_settings,
.get_drvinfo = vmxnet3_get_drvinfo,
.get_regs_len = vmxnet3_get_regs_len,
@@ -634,6 +623,7 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
.set_ringparam = vmxnet3_set_ringparam,
.get_rxnfc = vmxnet3_get_rxnfc,
#ifdef VMXNET3_RSS
+ .get_rxfh_indir_size = vmxnet3_get_rss_indir_size,
.get_rxfh_indir = vmxnet3_get_rss_indir,
.set_rxfh_indir = vmxnet3_set_rss_indir,
#endif
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index b18eac1..fc46a81 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -70,10 +70,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.1.18.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.1.29.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01011200
+#define VMXNET3_DRIVER_VERSION_NUM 0x01011D00
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
@@ -401,7 +401,7 @@ void
vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
int
-vmxnet3_set_features(struct net_device *netdev, u32 features);
+vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
int
vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/wan/sbni.c b/drivers/net/wan/sbni.c
index 783168c..d43f4ef 100644
--- a/drivers/net/wan/sbni.c
+++ b/drivers/net/wan/sbni.c
@@ -155,7 +155,7 @@ static int emancipate( struct net_device * );
static const char version[] =
"Granch SBNI12 driver ver 5.0.1 Jun 22 2001 Denis I.Timofeev.\n";
-static int skip_pci_probe __initdata = 0;
+static bool skip_pci_probe __initdata = false;
static int scandone __initdata = 0;
static int num __initdata = 0;
diff --git a/drivers/net/wan/sealevel.c b/drivers/net/wan/sealevel.c
index 0b4fd05..4f77484 100644
--- a/drivers/net/wan/sealevel.c
+++ b/drivers/net/wan/sealevel.c
@@ -362,7 +362,7 @@ static int io=0x238;
static int txdma=1;
static int rxdma=3;
static int irq=5;
-static int slow=0;
+static bool slow=false;
module_param(io, int, 0);
MODULE_PARM_DESC(io, "The I/O base of the Sealevel card");
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index c421a61..c806d45 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -75,7 +75,7 @@
* device is up and running or shutdown (through ifconfig up /
* down). Bus-generic only.
*
- * - control ops: control.c - implements various commmands for
+ * - control ops: control.c - implements various commands for
* controlling the device. bus-generic only.
*
* - device model glue: driver.c - implements helpers for the
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index 4b9ecb2..f20886a 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -562,7 +562,7 @@ void i2400m_tx_new(struct i2400m *i2400m)
{
struct device *dev = i2400m_dev(i2400m);
struct i2400m_msg_hdr *tx_msg;
- bool try_head = 0;
+ bool try_head = false;
BUG_ON(i2400m->tx_msg != NULL);
/*
* In certain situations, TX queue might have enough space to
@@ -580,7 +580,7 @@ try_head:
else if (tx_msg == TAIL_FULL) {
i2400m_tx_skip_tail(i2400m);
d_printf(2, dev, "new TX message: tail full, trying head\n");
- try_head = 1;
+ try_head = true;
goto try_head;
}
memset(tx_msg, 0, I2400M_TX_PLD_SIZE);
@@ -720,7 +720,7 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
unsigned long flags;
size_t padded_len;
void *ptr;
- bool try_head = 0;
+ bool try_head = false;
unsigned is_singleton = pl_type == I2400M_PT_RESET_WARM
|| pl_type == I2400M_PT_RESET_COLD;
@@ -771,7 +771,7 @@ try_new:
d_printf(2, dev, "pl append: tail full\n");
i2400m_tx_close(i2400m);
i2400m_tx_skip_tail(i2400m);
- try_head = 1;
+ try_head = true;
goto try_new;
} else if (ptr == NULL) { /* All full */
result = -ENOSPC;
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index ac357ac..99ef81b 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -177,7 +177,6 @@ retry:
static
int i2400mu_txd(void *_i2400mu)
{
- int result = 0;
struct i2400mu *i2400mu = _i2400mu;
struct i2400m *i2400m = &i2400mu->i2400m;
struct device *dev = &i2400mu->usb_iface->dev;
@@ -208,16 +207,14 @@ int i2400mu_txd(void *_i2400mu)
/* Yeah, we ignore errors ... not much we can do */
i2400mu_tx(i2400mu, tx_msg, tx_msg_size);
i2400m_tx_msg_sent(i2400m); /* ack it, advance the FIFO */
- if (result < 0)
- break;
}
spin_lock_irqsave(&i2400m->tx_lock, flags);
i2400mu->tx_kthread = NULL;
spin_unlock_irqrestore(&i2400m->tx_lock, flags);
- d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
- return result;
+ d_fnend(4, dev, "(i2400mu %p)\n", i2400mu);
+ return 0;
}
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 0a304b0..98db761 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -42,7 +42,7 @@ obj-$(CONFIG_ADM8211) += adm8211.o
obj-$(CONFIG_MWL8K) += mwl8k.o
obj-$(CONFIG_IWLWIFI) += iwlwifi/
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwlegacy/
+obj-$(CONFIG_IWLEGACY) += iwlegacy/
obj-$(CONFIG_RT2X00) += rt2x00/
obj-$(CONFIG_P54_COMMON) += p54/
@@ -58,6 +58,6 @@ obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx/
obj-$(CONFIG_IWM) += iwmc3200wifi/
obj-$(CONFIG_MWIFIEX) += mwifiex/
-obj-$(CONFIG_BRCMFMAC) += brcm80211/
-obj-$(CONFIG_BRCMUMAC) += brcm80211/
-obj-$(CONFIG_BRCMSMAC) += brcm80211/
+
+obj-$(CONFIG_BRCMFMAC) += brcm80211/
+obj-$(CONFIG_BRCMSMAC) += brcm80211/
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index ac1176a..1c008c6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -1418,7 +1418,7 @@ static int encapsulate(struct airo_info *ai ,etherHead *frame, MICBuffer *mic, i
emmh32_update(&context->seed,frame->da,ETH_ALEN * 2); // DA,SA
emmh32_update(&context->seed,(u8*)&mic->typelen,10); // Type/Length and Snap
emmh32_update(&context->seed,(u8*)&mic->seq,sizeof(mic->seq)); //SEQ
- emmh32_update(&context->seed,frame->da + ETH_ALEN * 2,payLen); //payload
+ emmh32_update(&context->seed,(u8*)(frame + 1),payLen); //payload
emmh32_final(&context->seed, (u8*)&mic->mic);
/* New Type/length ?????????? */
@@ -1506,7 +1506,7 @@ static int decapsulate(struct airo_info *ai, MICBuffer *mic, etherHead *eth, u16
emmh32_update(&context->seed, eth->da, ETH_ALEN*2);
emmh32_update(&context->seed, (u8 *)&mic->typelen, sizeof(mic->typelen)+sizeof(mic->u.snap));
emmh32_update(&context->seed, (u8 *)&mic->seq,sizeof(mic->seq));
- emmh32_update(&context->seed, eth->da + ETH_ALEN*2,payLen);
+ emmh32_update(&context->seed, (u8 *)(eth + 1),payLen);
//Calculate MIC
emmh32_final(&context->seed, digest);
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index d121469..d716b74 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -11,3 +11,4 @@ ath-objs := main.o \
key.o
ath-$(CONFIG_ATH_DEBUG) += debug.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 0f9ee46..efc0111 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -152,6 +152,7 @@ struct ath_common {
struct ath_cycle_counters cc_survey;
struct ath_regulatory regulatory;
+ struct ath_regulatory reg_world_copy;
const struct ath_ops *ops;
const struct ath_bus_ops *bus_ops;
@@ -214,6 +215,10 @@ do { \
* @ATH_DBG_HWTIMER: hardware timer handling
* @ATH_DBG_BTCOEX: bluetooth coexistance
* @ATH_DBG_BSTUCK: stuck beacons
+ * @ATH_DBG_MCI: Message Coexistence Interface, a private protocol
+ * used exclusively for WLAN-BT coexistence starting from
+ * AR9462.
+ * @ATH_DBG_DFS: radar datection
* @ATH_DBG_ANY: enable all debugging
*
* The debug level is used to control the amount and type of debugging output
@@ -239,6 +244,8 @@ enum ATH_DEBUG {
ATH_DBG_BTCOEX = 0x00002000,
ATH_DBG_WMI = 0x00004000,
ATH_DBG_BSTUCK = 0x00008000,
+ ATH_DBG_MCI = 0x00010000,
+ ATH_DBG_DFS = 0x00020000,
ATH_DBG_ANY = 0xffffffff
};
@@ -248,7 +255,7 @@ enum ATH_DEBUG {
#define ath_dbg(common, dbg_mask, fmt, ...) \
do { \
- if ((common)->debug_mask & dbg_mask) \
+ if ((common)->debug_mask & ATH_DBG_##dbg_mask) \
_ath_printk(KERN_DEBUG, common, fmt, ##__VA_ARGS__); \
} while (0)
@@ -258,10 +265,13 @@ do { \
#else
static inline __attribute__ ((format (printf, 3, 4)))
-void ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
+void _ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
const char *fmt, ...)
{
}
+#define ath_dbg(common, dbg_mask, fmt, ...) \
+ _ath_dbg(common, ATH_DBG_##dbg_mask, fmt, ##__VA_ARGS__)
+
#define ATH_DBG_WARN(foo, arg...) do {} while (0)
#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
int __ret_warn_once = !!(foo); \
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
index e5be7e70..ee7ea57 100644
--- a/drivers/net/wireless/ath/ath5k/ahb.c
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -166,7 +166,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
if (to_platform_device(ah->dev)->id == 0 &&
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
(BD_WLAN1 | BD_WLAN0))
- __set_bit(ATH_STAT_2G_DISABLED, ah->status);
+ ah->ah_capabilities.cap_needs_2GHz_ovr = true;
+ else
+ ah->ah_capabilities.cap_needs_2GHz_ovr = false;
}
ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index bea90e6..bf67416 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -27,15 +27,21 @@
* or reducing sensitivity as necessary.
*
* The parameters are:
+ *
* - "noise immunity"
+ *
* - "spur immunity"
+ *
* - "firstep level"
+ *
* - "OFDM weak signal detection"
+ *
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
+ *
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
@@ -45,11 +51,13 @@
*/
-/*** ANI parameter control ***/
+/***********************\
+* ANI parameter control *
+\***********************/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
@@ -91,12 +99,11 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @max_spur_level (the maximum level is dependent
- * on the chip revision).
+ * on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
@@ -117,10 +124,9 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
- *
+ * @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
@@ -140,11 +146,9 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
-
/**
- * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
- * detection
- *
+ * ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -182,10 +186,9 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
on ? "on" : "off");
}
-
/**
- * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
- *
+ * ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
+ * @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
@@ -200,13 +203,16 @@ ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
}
-/*** ANI algorithm ***/
+/***************\
+* ANI algorithm *
+\***************/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
- *
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
- * the algorithm will tune more parameters then.
+ * the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -290,9 +296,10 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
*/
}
-
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
@@ -352,9 +359,10 @@ ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
}
}
-
/**
* ath5k_hw_ani_get_listen_time() - Update counters and return listening time
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function.
@@ -379,9 +387,10 @@ ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
return listen;
}
-
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
+ * @ah: The &struct ath5k_hw
+ * @as: The &struct ath5k_ani_state
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
@@ -429,14 +438,14 @@ ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
return 1;
}
-
/**
* ath5k_ani_period_restart() - Restart ANI period
+ * @as: The &struct ath5k_ani_state
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
-ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
+ath5k_ani_period_restart(struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
@@ -448,9 +457,9 @@ ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
as->listen_time = 0;
}
-
/**
* ath5k_ani_calibration() - The main ANI calibration function
+ * @ah: The &struct ath5k_hw
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
@@ -492,7 +501,7 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
@@ -504,15 +513,18 @@ ath5k_ani_calibration(struct ath5k_hw *ah)
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
- ath5k_ani_period_restart(ah, as);
+ ath5k_ani_period_restart(as);
}
}
-/*** INTERRUPT HANDLER ***/
+/*******************\
+* Interrupt handler *
+\*******************/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
+ * @ah: The &struct ath5k_hw
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
@@ -549,9 +561,11 @@ ath5k_ani_mib_intr(struct ath5k_hw *ah)
tasklet_schedule(&ah->ani_tasklet);
}
-
/**
- * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
+ * ath5k_ani_phy_error_report - Used by older HW to report PHY errors
+ *
+ * @ah: The &struct ath5k_hw
+ * @phyerr: One of enum ath5k_phy_error_code
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
@@ -574,10 +588,13 @@ ath5k_ani_phy_error_report(struct ath5k_hw *ah,
}
-/*** INIT ***/
+/****************\
+* Initialization *
+\****************/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
@@ -596,9 +613,9 @@ ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
+ * @ah: The &struct ath5k_hw
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
@@ -615,10 +632,10 @@ ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
-
/**
* ath5k_ani_init() - Initialize ANI
- * @mode: Which mode to use (auto, manual high, manual low, off)
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_ani_mode
*
* Initialize ANI according to mode.
*/
@@ -695,10 +712,18 @@ ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
}
-/*** DEBUG ***/
+/**************\
+* Debug output *
+\**************/
#ifdef CONFIG_ATH5K_DEBUG
+/**
+ * ath5k_ani_print_counters() - Print ANI counters
+ * @ah: The &struct ath5k_hw
+ *
+ * Used for debugging ANI
+ */
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
index 7358b6c..21aa355 100644
--- a/drivers/net/wireless/ath/ath5k/ani.h
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -40,13 +40,13 @@ enum ath5k_phy_error_code;
* enum ath5k_ani_mode - mode for ANI / noise sensitivity
*
* @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
- * algorithm after it has been on auto mode.
- * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
- * maximizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
- * minimizing sensitivity. ANI will not run.
- * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
- * amount of OFDM and CCK frame errors (default).
+ * algorithm after it has been on auto mode.
+ * @ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
+ * maximizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
+ * minimizing sensitivity. ANI will not run.
+ * @ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
+ * amount of OFDM and CCK frame errors (default).
*/
enum ath5k_ani_mode {
ATH5K_ANI_MODE_OFF = 0,
@@ -58,8 +58,22 @@ enum ath5k_ani_mode {
/**
* struct ath5k_ani_state - ANI state and associated counters
- *
- * @max_spur_level: the maximum spur level is chip dependent
+ * @ani_mode: One of enum ath5k_ani_mode
+ * @noise_imm_level: Noise immunity level
+ * @spur_level: Spur immunity level
+ * @firstep_level: FIRstep level
+ * @ofdm_weak_sig: OFDM weak signal detection state (on/off)
+ * @cck_weak_sig: CCK weak signal detection state (on/off)
+ * @max_spur_level: Max spur immunity level (chip specific)
+ * @listen_time: Listen time
+ * @ofdm_errors: OFDM timing error count
+ * @cck_errors: CCK timing error count
+ * @last_cc: The &struct ath_cycle_counters (for stats)
+ * @last_listen: Listen time from previous run (for stats)
+ * @last_ofdm_errors: OFDM timing error count from previous run (for tats)
+ * @last_cck_errors: CCK timing error count from previous run (for stats)
+ * @sum_ofdm_errors: Sum of OFDM timing errors (for stats)
+ * @sum_cck_errors: Sum of all CCK timing errors (for stats)
*/
struct ath5k_ani_state {
enum ath5k_ani_mode ani_mode;
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index fecbcd9..c2b2518 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -187,10 +187,9 @@
#define AR5K_TUNE_MAX_TXPOWER 63
#define AR5K_TUNE_DEFAULT_TXPOWER 25
#define AR5K_TUNE_TPC_TXPOWER false
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 60000 /* 60 sec */
+#define ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT 10000 /* 10 sec */
#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
-#define ATH5K_TUNE_CALIBRATION_INTERVAL_NF 60000 /* 60 sec */
-
#define ATH5K_TX_COMPLETE_POLL_INT 3000 /* 3 sec */
#define AR5K_INIT_CARR_SENSE_EN 1
@@ -262,16 +261,34 @@
#define AR5K_AGC_SETTLING_TURBO 37
-/* GENERIC CHIPSET DEFINITIONS */
-/* MAC Chips */
+/*****************************\
+* GENERIC CHIPSET DEFINITIONS *
+\*****************************/
+
+/**
+ * enum ath5k_version - MAC Chips
+ * @AR5K_AR5210: AR5210 (Crete)
+ * @AR5K_AR5211: AR5211 (Oahu/Maui)
+ * @AR5K_AR5212: AR5212 (Venice) and newer
+ */
enum ath5k_version {
AR5K_AR5210 = 0,
AR5K_AR5211 = 1,
AR5K_AR5212 = 2,
};
-/* PHY Chips */
+/**
+ * enum ath5k_radio - PHY Chips
+ * @AR5K_RF5110: RF5110 (Fez)
+ * @AR5K_RF5111: RF5111 (Sombrero)
+ * @AR5K_RF5112: RF2112/5112(A) (Derby/Derby2)
+ * @AR5K_RF2413: RF2413/2414 (Griffin/Griffin-Lite)
+ * @AR5K_RF5413: RF5413/5414/5424 (Eagle/Condor)
+ * @AR5K_RF2316: RF2315/2316 (Cobra SoC)
+ * @AR5K_RF2317: RF2317 (Spider SoC)
+ * @AR5K_RF2425: RF2425/2417 (Swan/Nalla)
+ */
enum ath5k_radio {
AR5K_RF5110 = 0,
AR5K_RF5111 = 1,
@@ -303,11 +320,11 @@ enum ath5k_radio {
#define AR5K_SREV_AR5213A 0x59 /* Hainan */
#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
#define AR5K_SREV_AR2414 0x70 /* Griffin */
-#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
-#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
+#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
+#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
#define AR5K_SREV_AR5424 0x90 /* Condor */
-#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
-#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
+#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
+#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
#define AR5K_SREV_AR5414 0xa0 /* Eagle */
#define AR5K_SREV_AR2415 0xb0 /* Talon */
@@ -344,32 +361,40 @@ enum ath5k_radio {
/* TODO add support to mac80211 for vendor-specific rates and modes */
-/*
+/**
+ * DOC: Atheros XR
+ *
* Some of this information is based on Documentation from:
*
* http://madwifi-project.org/wiki/ChipsetFeatures/SuperAG
*
- * Modulation for Atheros' eXtended Range - range enhancing extension that is
- * supposed to double the distance an Atheros client device can keep a
- * connection with an Atheros access point. This is achieved by increasing
- * the receiver sensitivity up to, -105dBm, which is about 20dB above what
- * the 802.11 specifications demand. In addition, new (proprietary) data rates
- * are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
+ * Atheros' eXtended Range - range enhancing extension is a modulation scheme
+ * that is supposed to double the link distance between an Atheros XR-enabled
+ * client device with an Atheros XR-enabled access point. This is achieved
+ * by increasing the receiver sensitivity up to, -105dBm, which is about 20dB
+ * above what the 802.11 specifications demand. In addition, new (proprietary)
+ * data rates are introduced: 3, 2, 1, 0.5 and 0.25 MBit/s.
*
* Please note that can you either use XR or TURBO but you cannot use both,
* they are exclusive.
*
+ * Also note that we do not plan to support XR mode at least for now. You can
+ * get a mode similar to XR by using 5MHz bwmode.
*/
-#define MODULATION_XR 0x00000200
-/*
- * Modulation for Atheros' Turbo G and Turbo A, its supposed to provide a
- * throughput transmission speed up to 40Mbit/s-60Mbit/s at a 108Mbit/s
- * signaling rate achieved through the bonding of two 54Mbit/s 802.11g
- * channels. To use this feature your Access Point must also support it.
+
+
+/**
+ * DOC: Atheros SuperAG
+ *
+ * In addition to XR we have another modulation scheme called TURBO mode
+ * that is supposed to provide a throughput transmission speed up to 40Mbit/s
+ * -60Mbit/s at a 108Mbit/s signaling rate achieved through the bonding of two
+ * 54Mbit/s 802.11g channels. To use this feature both ends must support it.
* There is also a distinction between "static" and "dynamic" turbo modes:
*
* - Static: is the dumb version: devices set to this mode stick to it until
* the mode is turned off.
+ *
* - Dynamic: is the intelligent version, the network decides itself if it
* is ok to use turbo. As soon as traffic is detected on adjacent channels
* (which would get used in turbo mode), or when a non-turbo station joins
@@ -383,24 +408,39 @@ enum ath5k_radio {
*
* http://www.pcworld.com/article/id,113428-page,1/article.html
*
- * The channel bonding seems to be driver specific though. In addition to
- * deciding what channels will be used, these "Turbo" modes are accomplished
- * by also enabling the following features:
+ * The channel bonding seems to be driver specific though.
+ *
+ * In addition to TURBO modes we also have the following features for even
+ * greater speed-up:
*
* - Bursting: allows multiple frames to be sent at once, rather than pausing
* after each frame. Bursting is a standards-compliant feature that can be
* used with any Access Point.
+ *
* - Fast frames: increases the amount of information that can be sent per
* frame, also resulting in a reduction of transmission overhead. It is a
* proprietary feature that needs to be supported by the Access Point.
+ *
* - Compression: data frames are compressed in real time using a Lempel Ziv
* algorithm. This is done transparently. Once this feature is enabled,
* compression and decompression takes place inside the chipset, without
* putting additional load on the host CPU.
*
+ * As with XR we also don't plan to support SuperAG features for now. You can
+ * get a mode similar to TURBO by using 40MHz bwmode.
*/
-#define MODULATION_TURBO 0x00000080
+
+/**
+ * enum ath5k_driver_mode - PHY operation mode
+ * @AR5K_MODE_11A: 802.11a
+ * @AR5K_MODE_11B: 802.11b
+ * @AR5K_MODE_11G: 801.11g
+ * @AR5K_MODE_MAX: Used for boundary checks
+ *
+ * Do not change the order here, we use these as
+ * array indices and it also maps EEPROM structures.
+ */
enum ath5k_driver_mode {
AR5K_MODE_11A = 0,
AR5K_MODE_11B = 1,
@@ -408,30 +448,64 @@ enum ath5k_driver_mode {
AR5K_MODE_MAX = 3
};
+/**
+ * enum ath5k_ant_mode - Antenna operation mode
+ * @AR5K_ANTMODE_DEFAULT: Default antenna setup
+ * @AR5K_ANTMODE_FIXED_A: Only antenna A is present
+ * @AR5K_ANTMODE_FIXED_B: Only antenna B is present
+ * @AR5K_ANTMODE_SINGLE_AP: STA locked on a single ap
+ * @AR5K_ANTMODE_SECTOR_AP: AP with tx antenna set on tx desc
+ * @AR5K_ANTMODE_SECTOR_STA: STA with tx antenna set on tx desc
+ * @AR5K_ANTMODE_DEBUG: Debug mode -A -> Rx, B-> Tx-
+ * @AR5K_ANTMODE_MAX: Used for boundary checks
+ *
+ * For more infos on antenna control check out phy.c
+ */
enum ath5k_ant_mode {
- AR5K_ANTMODE_DEFAULT = 0, /* default antenna setup */
- AR5K_ANTMODE_FIXED_A = 1, /* only antenna A is present */
- AR5K_ANTMODE_FIXED_B = 2, /* only antenna B is present */
- AR5K_ANTMODE_SINGLE_AP = 3, /* sta locked on a single ap */
- AR5K_ANTMODE_SECTOR_AP = 4, /* AP with tx antenna set on tx desc */
- AR5K_ANTMODE_SECTOR_STA = 5, /* STA with tx antenna set on tx desc */
- AR5K_ANTMODE_DEBUG = 6, /* Debug mode -A -> Rx, B-> Tx- */
+ AR5K_ANTMODE_DEFAULT = 0,
+ AR5K_ANTMODE_FIXED_A = 1,
+ AR5K_ANTMODE_FIXED_B = 2,
+ AR5K_ANTMODE_SINGLE_AP = 3,
+ AR5K_ANTMODE_SECTOR_AP = 4,
+ AR5K_ANTMODE_SECTOR_STA = 5,
+ AR5K_ANTMODE_DEBUG = 6,
AR5K_ANTMODE_MAX,
};
+/**
+ * enum ath5k_bw_mode - Bandwidth operation mode
+ * @AR5K_BWMODE_DEFAULT: 20MHz, default operation
+ * @AR5K_BWMODE_5MHZ: Quarter rate
+ * @AR5K_BWMODE_10MHZ: Half rate
+ * @AR5K_BWMODE_40MHZ: Turbo
+ */
enum ath5k_bw_mode {
- AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */
- AR5K_BWMODE_5MHZ = 1, /* Quarter rate */
- AR5K_BWMODE_10MHZ = 2, /* Half rate */
- AR5K_BWMODE_40MHZ = 3 /* Turbo */
+ AR5K_BWMODE_DEFAULT = 0,
+ AR5K_BWMODE_5MHZ = 1,
+ AR5K_BWMODE_10MHZ = 2,
+ AR5K_BWMODE_40MHZ = 3
};
+
+
/****************\
TX DEFINITIONS
\****************/
-/*
- * TX Status descriptor
+/**
+ * struct ath5k_tx_status - TX Status descriptor
+ * @ts_seqnum: Sequence number
+ * @ts_tstamp: Timestamp
+ * @ts_status: Status code
+ * @ts_final_idx: Final transmission series index
+ * @ts_final_retry: Final retry count
+ * @ts_rssi: RSSI for received ACK
+ * @ts_shortretry: Short retry count
+ * @ts_virtcol: Virtual collision count
+ * @ts_antenna: Antenna used
+ *
+ * TX status descriptor gets filled by the hw
+ * on each transmission attempt.
*/
struct ath5k_tx_status {
u16 ts_seqnum;
@@ -454,7 +528,6 @@ struct ath5k_tx_status {
* enum ath5k_tx_queue - Queue types used to classify tx queues.
* @AR5K_TX_QUEUE_INACTIVE: q is unused -- see ath5k_hw_release_tx_queue
* @AR5K_TX_QUEUE_DATA: A normal data queue
- * @AR5K_TX_QUEUE_XR_DATA: An XR-data queue
* @AR5K_TX_QUEUE_BEACON: The beacon queue
* @AR5K_TX_QUEUE_CAB: The after-beacon queue
* @AR5K_TX_QUEUE_UAPSD: Unscheduled Automatic Power Save Delivery queue
@@ -462,7 +535,6 @@ struct ath5k_tx_status {
enum ath5k_tx_queue {
AR5K_TX_QUEUE_INACTIVE = 0,
AR5K_TX_QUEUE_DATA,
- AR5K_TX_QUEUE_XR_DATA,
AR5K_TX_QUEUE_BEACON,
AR5K_TX_QUEUE_CAB,
AR5K_TX_QUEUE_UAPSD,
@@ -471,36 +543,46 @@ enum ath5k_tx_queue {
#define AR5K_NUM_TX_QUEUES 10
#define AR5K_NUM_TX_QUEUES_NOQCU 2
-/*
- * Queue syb-types to classify normal data queues.
+/**
+ * enum ath5k_tx_queue_subtype - Queue sub-types to classify normal data queues
+ * @AR5K_WME_AC_BK: Background traffic
+ * @AR5K_WME_AC_BE: Best-effort (normal) traffic
+ * @AR5K_WME_AC_VI: Video traffic
+ * @AR5K_WME_AC_VO: Voice traffic
+ *
* These are the 4 Access Categories as defined in
* WME spec. 0 is the lowest priority and 4 is the
* highest. Normal data that hasn't been classified
* goes to the Best Effort AC.
*/
enum ath5k_tx_queue_subtype {
- AR5K_WME_AC_BK = 0, /*Background traffic*/
- AR5K_WME_AC_BE, /*Best-effort (normal) traffic*/
- AR5K_WME_AC_VI, /*Video traffic*/
- AR5K_WME_AC_VO, /*Voice traffic*/
+ AR5K_WME_AC_BK = 0,
+ AR5K_WME_AC_BE,
+ AR5K_WME_AC_VI,
+ AR5K_WME_AC_VO,
};
-/*
- * Queue ID numbers as returned by the hw functions, each number
- * represents a hw queue. If hw does not support hw queues
- * (eg 5210) all data goes in one queue. These match
- * d80211 definitions (net80211/MadWiFi don't use them).
+/**
+ * enum ath5k_tx_queue_id - Queue ID numbers as returned by the hw functions
+ * @AR5K_TX_QUEUE_ID_NOQCU_DATA: Data queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_NOQCU_BEACON: Beacon queue on AR5210 (no QCU available)
+ * @AR5K_TX_QUEUE_ID_DATA_MIN: Data queue min index
+ * @AR5K_TX_QUEUE_ID_DATA_MAX: Data queue max index
+ * @AR5K_TX_QUEUE_ID_CAB: Content after beacon queue
+ * @AR5K_TX_QUEUE_ID_BEACON: Beacon queue
+ * @AR5K_TX_QUEUE_ID_UAPSD: Urgent Automatic Power Save Delivery,
+ *
+ * Each number represents a hw queue. If hw does not support hw queues
+ * (eg 5210) all data goes in one queue.
*/
enum ath5k_tx_queue_id {
AR5K_TX_QUEUE_ID_NOQCU_DATA = 0,
AR5K_TX_QUEUE_ID_NOQCU_BEACON = 1,
- AR5K_TX_QUEUE_ID_DATA_MIN = 0, /*IEEE80211_TX_QUEUE_DATA0*/
- AR5K_TX_QUEUE_ID_DATA_MAX = 3, /*IEEE80211_TX_QUEUE_DATA3*/
- AR5K_TX_QUEUE_ID_DATA_SVP = 5, /*IEEE80211_TX_QUEUE_SVP - Spectralink Voice Protocol*/
- AR5K_TX_QUEUE_ID_CAB = 6, /*IEEE80211_TX_QUEUE_AFTER_BEACON*/
- AR5K_TX_QUEUE_ID_BEACON = 7, /*IEEE80211_TX_QUEUE_BEACON*/
- AR5K_TX_QUEUE_ID_UAPSD = 8,
- AR5K_TX_QUEUE_ID_XR_DATA = 9,
+ AR5K_TX_QUEUE_ID_DATA_MIN = 0,
+ AR5K_TX_QUEUE_ID_DATA_MAX = 3,
+ AR5K_TX_QUEUE_ID_UAPSD = 7,
+ AR5K_TX_QUEUE_ID_CAB = 8,
+ AR5K_TX_QUEUE_ID_BEACON = 9,
};
/*
@@ -521,46 +603,70 @@ enum ath5k_tx_queue_id {
#define AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS 0x1000 /* Disable backoff while bursting */
#define AR5K_TXQ_FLAG_COMPRESSION_ENABLE 0x2000 /* Enable hw compression -not implemented-*/
-/*
- * Data transmit queue state. One of these exists for each
- * hardware transmit queue. Packets sent to us from above
- * are assigned to queues based on their priority. Not all
- * devices support a complete set of hardware transmit queues.
- * For those devices the array sc_ac2q will map multiple
- * priorities to fewer hardware queues (typically all to one
- * hardware queue).
+/**
+ * struct ath5k_txq - Transmit queue state
+ * @qnum: Hardware q number
+ * @link: Link ptr in last TX desc
+ * @q: Transmit queue (&struct list_head)
+ * @lock: Lock on q and link
+ * @setup: Is the queue configured
+ * @txq_len:Number of queued buffers
+ * @txq_max: Max allowed num of queued buffers
+ * @txq_poll_mark: Used to check if queue got stuck
+ * @txq_stuck: Queue stuck counter
+ *
+ * One of these exists for each hardware transmit queue.
+ * Packets sent to us from above are assigned to queues based
+ * on their priority. Not all devices support a complete set
+ * of hardware transmit queues. For those devices the array
+ * sc_ac2q will map multiple priorities to fewer hardware queues
+ * (typically all to one hardware queue).
*/
struct ath5k_txq {
- unsigned int qnum; /* hardware q number */
- u32 *link; /* link ptr in last TX desc */
- struct list_head q; /* transmit queue */
- spinlock_t lock; /* lock on q and link */
+ unsigned int qnum;
+ u32 *link;
+ struct list_head q;
+ spinlock_t lock;
bool setup;
- int txq_len; /* number of queued buffers */
- int txq_max; /* max allowed num of queued buffers */
+ int txq_len;
+ int txq_max;
bool txq_poll_mark;
- unsigned int txq_stuck; /* informational counter */
+ unsigned int txq_stuck;
};
-/*
- * A struct to hold tx queue's parameters
+/**
+ * struct ath5k_txq_info - A struct to hold TX queue's parameters
+ * @tqi_type: One of enum ath5k_tx_queue
+ * @tqi_subtype: One of enum ath5k_tx_queue_subtype
+ * @tqi_flags: TX queue flags (see above)
+ * @tqi_aifs: Arbitrated Inter-frame Space
+ * @tqi_cw_min: Minimum Contention Window
+ * @tqi_cw_max: Maximum Contention Window
+ * @tqi_cbr_period: Constant bit rate period
+ * @tqi_ready_time: Time queue waits after an event when RDYTIME is enabled
*/
struct ath5k_txq_info {
enum ath5k_tx_queue tqi_type;
enum ath5k_tx_queue_subtype tqi_subtype;
- u16 tqi_flags; /* Tx queue flags (see above) */
- u8 tqi_aifs; /* Arbitrated Interframe Space */
- u16 tqi_cw_min; /* Minimum Contention Window */
- u16 tqi_cw_max; /* Maximum Contention Window */
- u32 tqi_cbr_period; /* Constant bit rate period */
+ u16 tqi_flags;
+ u8 tqi_aifs;
+ u16 tqi_cw_min;
+ u16 tqi_cw_max;
+ u32 tqi_cbr_period;
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Time queue waits after an event */
+ u32 tqi_ready_time;
};
-/*
- * Transmit packet types.
- * used on tx control descriptor
+/**
+ * enum ath5k_pkt_type - Transmit packet types
+ * @AR5K_PKT_TYPE_NORMAL: Normal data
+ * @AR5K_PKT_TYPE_ATIM: ATIM
+ * @AR5K_PKT_TYPE_PSPOLL: PS-Poll
+ * @AR5K_PKT_TYPE_BEACON: Beacon
+ * @AR5K_PKT_TYPE_PROBE_RESP: Probe response
+ * @AR5K_PKT_TYPE_PIFS: PIFS
+ * Used on tx control descriptor
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@@ -583,27 +689,23 @@ enum ath5k_pkt_type {
(ah->ah_txpower.txp_rates_power_table[(_r)] & 0x3f) << (_v) \
)
-/*
- * DMA size definitions (2^(n+2))
- */
-enum ath5k_dmasize {
- AR5K_DMASIZE_4B = 0,
- AR5K_DMASIZE_8B,
- AR5K_DMASIZE_16B,
- AR5K_DMASIZE_32B,
- AR5K_DMASIZE_64B,
- AR5K_DMASIZE_128B,
- AR5K_DMASIZE_256B,
- AR5K_DMASIZE_512B
-};
/****************\
RX DEFINITIONS
\****************/
-/*
- * RX Status descriptor
+/**
+ * struct ath5k_rx_status - RX Status descriptor
+ * @rs_datalen: Data length
+ * @rs_tstamp: Timestamp
+ * @rs_status: Status code
+ * @rs_phyerr: PHY error mask
+ * @rs_rssi: RSSI in 0.5dbm units
+ * @rs_keyix: Index to the key used for decrypting
+ * @rs_rate: Rate used to decode the frame
+ * @rs_antenna: Antenna used to receive the frame
+ * @rs_more: Indicates this is a frame fragment (Fast frames)
*/
struct ath5k_rx_status {
u16 rs_datalen;
@@ -645,10 +747,18 @@ struct ath5k_rx_status {
#define TSF_TO_TU(_tsf) (u32)((_tsf) >> 10)
+
/*******************************\
GAIN OPTIMIZATION DEFINITIONS
\*******************************/
+/**
+ * enum ath5k_rfgain - RF Gain optimization engine state
+ * @AR5K_RFGAIN_INACTIVE: Engine disabled
+ * @AR5K_RFGAIN_ACTIVE: Probe active
+ * @AR5K_RFGAIN_READ_REQUESTED: Probe requested
+ * @AR5K_RFGAIN_NEED_CHANGE: Gain_F needs change
+ */
enum ath5k_rfgain {
AR5K_RFGAIN_INACTIVE = 0,
AR5K_RFGAIN_ACTIVE,
@@ -656,6 +766,16 @@ enum ath5k_rfgain {
AR5K_RFGAIN_NEED_CHANGE,
};
+/**
+ * struct ath5k_gain - RF Gain optimization engine state data
+ * @g_step_idx: Current step index
+ * @g_current: Current gain
+ * @g_target: Target gain
+ * @g_low: Low gain boundary
+ * @g_high: High gain boundary
+ * @g_f_corr: Gain_F correction
+ * @g_state: One of enum ath5k_rfgain
+ */
struct ath5k_gain {
u8 g_step_idx;
u8 g_current;
@@ -666,6 +786,8 @@ struct ath5k_gain {
u8 g_state;
};
+
+
/********************\
COMMON DEFINITIONS
\********************/
@@ -674,9 +796,14 @@ struct ath5k_gain {
#define AR5K_SLOT_TIME_20 880
#define AR5K_SLOT_TIME_MAX 0xffff
-/*
- * The following structure is used to map 2GHz channels to
- * 5GHz Atheros channels.
+/**
+ * struct ath5k_athchan_2ghz - 2GHz to 5GHZ map for RF5111
+ * @a2_flags: Channel flags (internal)
+ * @a2_athchan: HW channel number (internal)
+ *
+ * This structure is used to map 2GHz channels to
+ * 5GHz Atheros channels on 2111 frequency converter
+ * that comes together with RF5111
* TODO: Clean up
*/
struct ath5k_athchan_2ghz {
@@ -684,36 +811,80 @@ struct ath5k_athchan_2ghz {
u16 a2_athchan;
};
+/**
+ * enum ath5k_dmasize - DMA size definitions (2^(n+2))
+ * @AR5K_DMASIZE_4B: 4Bytes
+ * @AR5K_DMASIZE_8B: 8Bytes
+ * @AR5K_DMASIZE_16B: 16Bytes
+ * @AR5K_DMASIZE_32B: 32Bytes
+ * @AR5K_DMASIZE_64B: 64Bytes (Default)
+ * @AR5K_DMASIZE_128B: 128Bytes
+ * @AR5K_DMASIZE_256B: 256Bytes
+ * @AR5K_DMASIZE_512B: 512Bytes
+ *
+ * These are used to set DMA burst size on hw
+ *
+ * Note: Some platforms can't handle more than 4Bytes
+ * be careful on embedded boards.
+ */
+enum ath5k_dmasize {
+ AR5K_DMASIZE_4B = 0,
+ AR5K_DMASIZE_8B,
+ AR5K_DMASIZE_16B,
+ AR5K_DMASIZE_32B,
+ AR5K_DMASIZE_64B,
+ AR5K_DMASIZE_128B,
+ AR5K_DMASIZE_256B,
+ AR5K_DMASIZE_512B
+};
+
+
/******************\
RATE DEFINITIONS
\******************/
/**
+ * DOC: Rate codes
+ *
* Seems the ar5xxx hardware supports up to 32 rates, indexed by 1-32.
*
* The rate code is used to get the RX rate or set the TX rate on the
* hardware descriptors. It is also used for internal modulation control
* and settings.
*
- * This is the hardware rate map we are aware of:
- *
- * rate_code 0x01 0x02 0x03 0x04 0x05 0x06 0x07 0x08
- * rate_kbps 3000 1000 ? ? ? 2000 500 48000
- *
- * rate_code 0x09 0x0A 0x0B 0x0C 0x0D 0x0E 0x0F 0x10
- * rate_kbps 24000 12000 6000 54000 36000 18000 9000 ?
+ * This is the hardware rate map we are aware of (html unfriendly):
*
- * rate_code 17 18 19 20 21 22 23 24
- * rate_kbps ? ? ? ? ? ? ? 11000
+ * Rate code Rate (Kbps)
+ * --------- -----------
+ * 0x01 3000 (XR)
+ * 0x02 1000 (XR)
+ * 0x03 250 (XR)
+ * 0x04 - 05 -Reserved-
+ * 0x06 2000 (XR)
+ * 0x07 500 (XR)
+ * 0x08 48000 (OFDM)
+ * 0x09 24000 (OFDM)
+ * 0x0A 12000 (OFDM)
+ * 0x0B 6000 (OFDM)
+ * 0x0C 54000 (OFDM)
+ * 0x0D 36000 (OFDM)
+ * 0x0E 18000 (OFDM)
+ * 0x0F 9000 (OFDM)
+ * 0x10 - 17 -Reserved-
+ * 0x18 11000L (CCK)
+ * 0x19 5500L (CCK)
+ * 0x1A 2000L (CCK)
+ * 0x1B 1000L (CCK)
+ * 0x1C 11000S (CCK)
+ * 0x1D 5500S (CCK)
+ * 0x1E 2000S (CCK)
+ * 0x1F -Reserved-
*
- * rate_code 25 26 27 28 29 30 31 32
- * rate_kbps 5500 2000 1000 11000S 5500S 2000S ? ?
- *
- * "S" indicates CCK rates with short preamble.
+ * "S" indicates CCK rates with short preamble and "L" with long preamble.
*
* AR5211 has different rate codes for CCK (802.11B) rates. It only uses the
- * lowest 4 bits, so they are the same as below with a 0xF mask.
+ * lowest 4 bits, so they are the same as above with a 0xF mask.
* (0xB, 0xA, 0x9 and 0x8 for 1M, 2M, 5.5M and 11M).
* We handle this in ath5k_setup_bands().
*/
@@ -733,13 +904,9 @@ struct ath5k_athchan_2ghz {
#define ATH5K_RATE_CODE_36M 0x0D
#define ATH5K_RATE_CODE_48M 0x08
#define ATH5K_RATE_CODE_54M 0x0C
-/* XR */
-#define ATH5K_RATE_CODE_XR_500K 0x07
-#define ATH5K_RATE_CODE_XR_1M 0x02
-#define ATH5K_RATE_CODE_XR_2M 0x06
-#define ATH5K_RATE_CODE_XR_3M 0x01
-/* adding this flag to rate_code enables short preamble */
+/* Adding this flag to rate_code on B rates
+ * enables short preamble */
#define AR5K_SET_SHORT_PREAMBLE 0x04
/*
@@ -747,7 +914,7 @@ struct ath5k_athchan_2ghz {
*/
#define AR5K_KEYCACHE_SIZE 8
-extern int ath5k_modparam_nohwcrypt;
+extern bool ath5k_modparam_nohwcrypt;
/***********************\
HW RELATED DEFINITIONS
@@ -769,49 +936,65 @@ extern int ath5k_modparam_nohwcrypt;
/**
* enum ath5k_int - Hardware interrupt masks helpers
+ * @AR5K_INT_RXOK: Frame successfully received
+ * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor
+ * @AR5K_INT_RXERR: Frame reception failed
+ * @AR5K_INT_RXNOFRM: No frame received within a specified time period
+ * @AR5K_INT_RXEOL: Reached "End Of List", means we need more RX descriptors
+ * @AR5K_INT_RXORN: Indicates we got RX FIFO overrun. Note that Rx overrun is
+ * not always fatal, on some chips we can continue operation
+ * without resetting the card, that's why %AR5K_INT_FATAL is not
+ * common for all chips.
+ * @AR5K_INT_RX_ALL: Mask to identify all RX related interrupts
+ *
+ * @AR5K_INT_TXOK: Frame transmission success
+ * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor
+ * @AR5K_INT_TXERR: Frame transmission failure
+ * @AR5K_INT_TXEOL: Received End Of List for VEOL (Virtual End Of List). The
+ * Queue Control Unit (QCU) signals an EOL interrupt only if a
+ * descriptor's LinkPtr is NULL. For more details, refer to:
+ * "http://www.freepatentsonline.com/20030225739.html"
+ * @AR5K_INT_TXNOFRM: No frame was transmitted within a specified time period
+ * @AR5K_INT_TXURN: Indicates we got TX FIFO underrun. In such case we should
+ * increase the TX trigger threshold.
+ * @AR5K_INT_TX_ALL: Mask to identify all TX related interrupts
*
- * @AR5K_INT_RX: mask to identify received frame interrupts, of type
- * AR5K_ISR_RXOK or AR5K_ISR_RXERR
- * @AR5K_INT_RXDESC: Request RX descriptor/Read RX descriptor (?)
- * @AR5K_INT_RXNOFRM: No frame received (?)
- * @AR5K_INT_RXEOL: received End Of List for VEOL (Virtual End Of List). The
- * Queue Control Unit (QCU) signals an EOL interrupt only if a descriptor's
- * LinkPtr is NULL. For more details, refer to:
- * http://www.freepatentsonline.com/20030225739.html
- * @AR5K_INT_RXORN: Indicates we got RX overrun (eg. no more descriptors).
- * Note that Rx overrun is not always fatal, on some chips we can continue
- * operation without resetting the card, that's why int_fatal is not
- * common for all chips.
- * @AR5K_INT_TX: mask to identify received frame interrupts, of type
- * AR5K_ISR_TXOK or AR5K_ISR_TXERR
- * @AR5K_INT_TXDESC: Request TX descriptor/Read TX status descriptor (?)
- * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
- * We currently do increments on interrupt by
- * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
* @AR5K_INT_MIB: Indicates the either Management Information Base counters or
- * one of the PHY error counters reached the maximum value and should be
- * read and cleared.
+ * one of the PHY error counters reached the maximum value and
+ * should be read and cleared.
+ * @AR5K_INT_SWI: Software triggered interrupt.
* @AR5K_INT_RXPHY: RX PHY Error
* @AR5K_INT_RXKCM: RX Key cache miss
* @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
- * beacon that must be handled in software. The alternative is if you
- * have VEOL support, in that case you let the hardware deal with things.
+ * beacon that must be handled in software. The alternative is if
+ * you have VEOL support, in that case you let the hardware deal
+ * with things.
+ * @AR5K_INT_BRSSI: Beacon received with an RSSI value below our threshold
* @AR5K_INT_BMISS: If in STA mode this indicates we have stopped seeing
- * beacons from the AP have associated with, we should probably try to
- * reassociate. When in IBSS mode this might mean we have not received
- * any beacons from any local stations. Note that every station in an
- * IBSS schedules to send beacons at the Target Beacon Transmission Time
- * (TBTT) with a random backoff.
- * @AR5K_INT_BNR: Beacon Not Ready interrupt - ??
- * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill, disabled for now
- * until properly handled
- * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by DMA
- * errors. These types of errors we can enable seem to be of type
- * AR5K_SIMR2_MCABT, AR5K_SIMR2_SSERR and AR5K_SIMR2_DPERR.
+ * beacons from the AP have associated with, we should probably
+ * try to reassociate. When in IBSS mode this might mean we have
+ * not received any beacons from any local stations. Note that
+ * every station in an IBSS schedules to send beacons at the
+ * Target Beacon Transmission Time (TBTT) with a random backoff.
+ * @AR5K_INT_BNR: Beacon queue got triggered (DMA beacon alert) while empty.
+ * @AR5K_INT_TIM: Beacon with local station's TIM bit set
+ * @AR5K_INT_DTIM: Beacon with DTIM bit and zero DTIM count received
+ * @AR5K_INT_DTIM_SYNC: DTIM sync lost
+ * @AR5K_INT_GPIO: GPIO interrupt is used for RF Kill switches connected to
+ * our GPIO pins.
+ * @AR5K_INT_BCN_TIMEOUT: Beacon timeout, we waited after TBTT but got noting
+ * @AR5K_INT_CAB_TIMEOUT: We waited for CAB traffic after the beacon but got
+ * nothing or an incomplete CAB frame sequence.
+ * @AR5K_INT_QCBRORN: A queue got it's CBR counter expired
+ * @AR5K_INT_QCBRURN: A queue got triggered wile empty
+ * @AR5K_INT_QTRIG: A queue got triggered
+ *
+ * @AR5K_INT_FATAL: Fatal errors were encountered, typically caused by bus/DMA
+ * errors. Indicates we need to reset the card.
* @AR5K_INT_GLOBAL: Used to clear and set the IER
- * @AR5K_INT_NOCARD: signals the card has been removed
- * @AR5K_INT_COMMON: common interrupts shared among MACs with the same
- * bit value
+ * @AR5K_INT_NOCARD: Signals the card has been removed
+ * @AR5K_INT_COMMON: Common interrupts shared among MACs with the same
+ * bit value
*
* These are mapped to take advantage of some common bits
* between the MACs, to be able to set intr properties
@@ -847,15 +1030,15 @@ enum ath5k_int {
AR5K_INT_GPIO = 0x01000000,
AR5K_INT_BCN_TIMEOUT = 0x02000000, /* Non common */
AR5K_INT_CAB_TIMEOUT = 0x04000000, /* Non common */
- AR5K_INT_RX_DOPPLER = 0x08000000, /* Non common */
- AR5K_INT_QCBRORN = 0x10000000, /* Non common */
- AR5K_INT_QCBRURN = 0x20000000, /* Non common */
- AR5K_INT_QTRIG = 0x40000000, /* Non common */
+ AR5K_INT_QCBRORN = 0x08000000, /* Non common */
+ AR5K_INT_QCBRURN = 0x10000000, /* Non common */
+ AR5K_INT_QTRIG = 0x20000000, /* Non common */
AR5K_INT_GLOBAL = 0x80000000,
AR5K_INT_TX_ALL = AR5K_INT_TXOK
| AR5K_INT_TXDESC
| AR5K_INT_TXERR
+ | AR5K_INT_TXNOFRM
| AR5K_INT_TXEOL
| AR5K_INT_TXURN,
@@ -891,15 +1074,32 @@ enum ath5k_int {
AR5K_INT_NOCARD = 0xffffffff
};
-/* mask which calibration is active at the moment */
+/**
+ * enum ath5k_calibration_mask - Mask which calibration is active at the moment
+ * @AR5K_CALIBRATION_FULL: Full calibration (AGC + SHORT)
+ * @AR5K_CALIBRATION_SHORT: Short calibration (NF + I/Q)
+ * @AR5K_CALIBRATION_NF: Noise Floor calibration
+ * @AR5K_CALIBRATION_ANI: Adaptive Noise Immunity
+ */
enum ath5k_calibration_mask {
AR5K_CALIBRATION_FULL = 0x01,
AR5K_CALIBRATION_SHORT = 0x02,
- AR5K_CALIBRATION_ANI = 0x04,
+ AR5K_CALIBRATION_NF = 0x04,
+ AR5K_CALIBRATION_ANI = 0x08,
};
-/*
- * Power management
+/**
+ * enum ath5k_power_mode - Power management modes
+ * @AR5K_PM_UNDEFINED: Undefined
+ * @AR5K_PM_AUTO: Allow card to sleep if possible
+ * @AR5K_PM_AWAKE: Force card to wake up
+ * @AR5K_PM_FULL_SLEEP: Force card to full sleep (DANGEROUS)
+ * @AR5K_PM_NETWORK_SLEEP: Allow to sleep for a specified duration
+ *
+ * Currently only PM_AWAKE is used, FULL_SLEEP and NETWORK_SLEEP/AUTO
+ * are also known to have problems on some cards. This is not a big
+ * problem though because we can have almost the same effect as
+ * FULL_SLEEP by putting card on warm reset (it's almost powered down).
*/
enum ath5k_power_mode {
AR5K_PM_UNDEFINED = 0,
@@ -957,6 +1157,8 @@ struct ath5k_capabilities {
} cap_queues;
bool cap_has_phyerr_counters;
+ bool cap_has_mrr_support;
+ bool cap_needs_2GHz_ovr;
};
/* size of noise floor history (keep it a power of two) */
@@ -1072,13 +1274,11 @@ struct ath5k_hw {
dma_addr_t desc_daddr; /* DMA (physical) address */
size_t desc_len; /* size of TX/RX descriptors */
- DECLARE_BITMAP(status, 6);
+ DECLARE_BITMAP(status, 4);
#define ATH_STAT_INVALID 0 /* disable hardware accesses */
-#define ATH_STAT_MRRETRY 1 /* multi-rate retry support */
-#define ATH_STAT_PROMISC 2
-#define ATH_STAT_LEDSOFT 3 /* enable LED gpio status */
-#define ATH_STAT_STARTED 4 /* opened & irqs enabled */
-#define ATH_STAT_2G_DISABLED 5 /* multiband radio without 2G */
+#define ATH_STAT_PROMISC 1
+#define ATH_STAT_LEDSOFT 2 /* enable LED gpio status */
+#define ATH_STAT_STARTED 3 /* opened & irqs enabled */
unsigned int filter_flags; /* HW flags, AR5K_RX_FILTER_* */
struct ieee80211_channel *curchan; /* current h/w channel */
@@ -1097,6 +1297,7 @@ struct ath5k_hw {
led_on; /* pin setting for LED on */
struct work_struct reset_work; /* deferred chip reset */
+ struct work_struct calib_work; /* deferred phy calibration */
struct list_head rxbuf; /* receive buffer */
spinlock_t rxbuflock;
@@ -1113,8 +1314,6 @@ struct ath5k_hw {
struct ath5k_rfkill rf_kill;
- struct tasklet_struct calib; /* calibration tasklet */
-
spinlock_t block; /* protects beacon */
struct tasklet_struct beacontq; /* beacon intr tasklet */
struct list_head bcbuf; /* beacon buffer */
@@ -1144,7 +1343,7 @@ struct ath5k_hw {
enum ath5k_int ah_imr;
struct ieee80211_channel *ah_current_channel;
- bool ah_calibration;
+ bool ah_iq_cal_needed;
bool ah_single_chip;
enum ath5k_version ah_version;
@@ -1187,7 +1386,13 @@ struct ath5k_hw {
u32 ah_txq_imr_cbrurn;
u32 ah_txq_imr_qtrig;
u32 ah_txq_imr_nofrm;
- u32 ah_txq_isr;
+
+ u32 ah_txq_isr_txok_all;
+ u32 ah_txq_isr_txurn;
+ u32 ah_txq_isr_qcborn;
+ u32 ah_txq_isr_qcburn;
+ u32 ah_txq_isr_qtrig;
+
u32 *ah_rf_banks;
size_t ah_rf_banks_size;
size_t ah_rf_regs_count;
@@ -1228,8 +1433,8 @@ struct ath5k_hw {
/* Calibration timestamp */
unsigned long ah_cal_next_full;
+ unsigned long ah_cal_next_short;
unsigned long ah_cal_next_ani;
- unsigned long ah_cal_next_nf;
/* Calibration mask */
u8 ah_cal_mask;
@@ -1338,11 +1543,11 @@ void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
+void ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon,
+ u32 interval);
bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
/* Init function */
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode);
+void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode);
/* Queue Control Unit, DFS Control Unit Functions */
int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 91627dd..d7114c7 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -27,8 +27,7 @@
#include "debug.h"
/**
- * ath5k_hw_post - Power On Self Test helper function
- *
+ * ath5k_hw_post() - Power On Self Test helper function
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_post(struct ath5k_hw *ah)
@@ -92,8 +91,7 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_init - Check if hw is supported and init the needed structs
- *
+ * ath5k_hw_init() - Check if hw is supported and init the needed structs
* @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
@@ -298,7 +296,7 @@ int ath5k_hw_init(struct ath5k_hw *ah)
/* Reset SERDES to load new settings */
ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Get misc capabilities */
@@ -308,11 +306,6 @@ int ath5k_hw_init(struct ath5k_hw *ah)
goto err;
}
- if (test_bit(ATH_STAT_2G_DISABLED, ah->status)) {
- __clear_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode);
- __clear_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode);
- }
-
/* Crypto settings */
common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
@@ -349,8 +342,7 @@ err:
}
/**
- * ath5k_hw_deinit - Free the ath5k_hw struct
- *
+ * ath5k_hw_deinit() - Free the &struct ath5k_hw
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index b346d04..d366dad 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -68,18 +68,23 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-int ath5k_modparam_nohwcrypt;
+bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
-static int modparam_all_channels;
+static bool modparam_all_channels;
module_param_named(all_channels, modparam_all_channels, bool, S_IRUGO);
MODULE_PARM_DESC(all_channels, "Expose all channels the device can use.");
-static int modparam_fastchanswitch;
+static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, S_IRUGO);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
+static int ath5k_modparam_no_hw_rfkill_switch;
+module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
+ bool, S_IRUGO);
+MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
+
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
@@ -183,7 +188,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
.flags = 0 },
- /* XR missing */
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
@@ -721,21 +725,24 @@ ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
if (ret)
goto err_unmap;
- memset(mrr_rate, 0, sizeof(mrr_rate));
- memset(mrr_tries, 0, sizeof(mrr_tries));
- for (i = 0; i < 3; i++) {
- rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
- if (!rate)
- break;
+ /* Set up MRR descriptor */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
+ memset(mrr_rate, 0, sizeof(mrr_rate));
+ memset(mrr_tries, 0, sizeof(mrr_tries));
+ for (i = 0; i < 3; i++) {
+ rate = ieee80211_get_alt_retry_rate(ah->hw, info, i);
+ if (!rate)
+ break;
- mrr_rate[i] = rate->hw_value;
- mrr_tries[i] = info->control.rates[i + 1].count;
- }
+ mrr_rate[i] = rate->hw_value;
+ mrr_tries[i] = info->control.rates[i + 1].count;
+ }
- ath5k_hw_setup_mrr_tx_desc(ah, ds,
- mrr_rate[0], mrr_tries[0],
- mrr_rate[1], mrr_tries[1],
- mrr_rate[2], mrr_tries[2]);
+ ath5k_hw_setup_mrr_tx_desc(ah, ds,
+ mrr_rate[0], mrr_tries[0],
+ mrr_rate[1], mrr_tries[1],
+ mrr_rate[2], mrr_tries[2]);
+ }
ds->ds_link = 0;
ds->ds_data = bf->skbaddr;
@@ -1689,7 +1696,7 @@ ath5k_tasklet_tx(unsigned long data)
struct ath5k_hw *ah = (void *)data;
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
- if (ah->txqs[i].setup && (ah->ah_txq_isr & BIT(i)))
+ if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
ath5k_tx_processq(ah, &ah->txqs[i]);
ah->tx_pending = false;
@@ -2005,7 +2012,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
ah->nexttbtt = nexttbtt;
intval |= AR5K_BEACON_ENA;
- ath5k_hw_init_beacon(ah, nexttbtt, intval);
+ ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
/*
* debugging output last in order to preserve the time critical aspect
@@ -2112,16 +2119,29 @@ static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
- !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
- /* run ANI only when full calibration is not active */
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run ANI only when calibration is not active */
+
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
tasklet_schedule(&ah->ani_tasklet);
- } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
- ah->ah_cal_next_full = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
- tasklet_schedule(&ah->calib);
+ } else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
+ !(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
+
+ /* Run calibration only when another calibration
+ * is not running.
+ *
+ * Note: This is for both full/short calibration,
+ * if it's time for a full one, ath5k_calibrate_work will deal
+ * with it. */
+
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+ ieee80211_queue_work(ah->hw, &ah->calib_work);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
@@ -2149,69 +2169,110 @@ ath5k_intr(int irq, void *dev_id)
enum ath5k_int status;
unsigned int counter = 1000;
+
+ /*
+ * If hw is not ready (or detached) and we get an
+ * interrupt, or if we have no interrupts pending
+ * (that means it's not for us) skip it.
+ *
+ * NOTE: Group 0/1 PCI interface registers are not
+ * supported on WiSOCs, so we can't check for pending
+ * interrupts (ISR belongs to another register group
+ * so we are ok).
+ */
if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
- ((ath5k_get_bus_type(ah) != ATH_AHB) &&
- !ath5k_hw_is_intr_pending(ah))))
+ ((ath5k_get_bus_type(ah) != ATH_AHB) &&
+ !ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
+ /** Main loop **/
do {
- ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+ ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
+
ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
status, ah->imask);
+
+ /*
+ * Fatal hw error -> Log and reset
+ *
+ * Fatal errors are unrecoverable so we have to
+ * reset the card. These errors include bus and
+ * dma errors.
+ */
if (unlikely(status & AR5K_INT_FATAL)) {
- /*
- * Fatal errors are unrecoverable.
- * Typically these are caused by DMA errors.
- */
+
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fatal int, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
+
+ /*
+ * RX Overrun -> Count and reset if needed
+ *
+ * Receive buffers are full. Either the bus is busy or
+ * the CPU is not fast enough to process all received
+ * frames.
+ */
} else if (unlikely(status & AR5K_INT_RXORN)) {
+
/*
- * Receive buffers are full. Either the bus is busy or
- * the CPU is not fast enough to process all received
- * frames.
* Older chipsets need a reset to come out of this
* condition, but we treat it as RX for newer chips.
- * We don't know exactly which versions need a reset -
+ * We don't know exactly which versions need a reset
* this guess is copied from the HAL.
*/
ah->stats.rxorn_intr++;
+
if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"rx overrun, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
} else
ath5k_schedule_rx(ah);
+
} else {
+
+ /* Software Beacon Alert -> Schedule beacon tasklet */
if (status & AR5K_INT_SWBA)
tasklet_hi_schedule(&ah->beacontq);
- if (status & AR5K_INT_RXEOL) {
- /*
- * NB: the hardware should re-read the link when
- * RXE bit is written, but it doesn't work at
- * least on older hardware revs.
- */
+ /*
+ * No more RX descriptors -> Just count
+ *
+ * NB: the hardware should re-read the link when
+ * RXE bit is written, but it doesn't work at
+ * least on older hardware revs.
+ */
+ if (status & AR5K_INT_RXEOL)
ah->stats.rxeol_intr++;
- }
- if (status & AR5K_INT_TXURN) {
- /* bump tx trigger level */
+
+
+ /* TX Underrun -> Bump tx trigger level */
+ if (status & AR5K_INT_TXURN)
ath5k_hw_update_tx_triglevel(ah, true);
- }
+
+ /* RX -> Schedule rx tasklet */
if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
ath5k_schedule_rx(ah);
- if (status & (AR5K_INT_TXOK | AR5K_INT_TXDESC
- | AR5K_INT_TXERR | AR5K_INT_TXEOL))
+
+ /* TX -> Schedule tx tasklet */
+ if (status & (AR5K_INT_TXOK
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXERR
+ | AR5K_INT_TXEOL))
ath5k_schedule_tx(ah);
- if (status & AR5K_INT_BMISS) {
- /* TODO */
- }
+
+ /* Missed beacon -> TODO
+ if (status & AR5K_INT_BMISS)
+ */
+
+ /* MIB event -> Update counters and notify ANI */
if (status & AR5K_INT_MIB) {
ah->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
+
+ /* GPIO -> Notify RFKill layer */
if (status & AR5K_INT_GPIO)
tasklet_schedule(&ah->rf_kill.toggleq);
@@ -2222,12 +2283,19 @@ ath5k_intr(int irq, void *dev_id)
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
+ /*
+ * Until we handle rx/tx interrupts mask them on IMR
+ *
+ * NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
+ * and unset after we 've handled the interrupts.
+ */
if (ah->rx_pending || ah->tx_pending)
ath5k_set_current_imask(ah);
if (unlikely(!counter))
ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
+ /* Fire up calibration poll */
ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
@@ -2238,41 +2306,58 @@ ath5k_intr(int irq, void *dev_id)
* for temperature/environment changes.
*/
static void
-ath5k_tasklet_calibrate(unsigned long data)
+ath5k_calibrate_work(struct work_struct *work)
{
- struct ath5k_hw *ah = (void *)data;
+ struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
+ calib_work);
+
+ /* Should we run a full calibration ? */
+ if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
+
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
+
+ ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
+ "running full calibration\n");
+
+ if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
+ /*
+ * Rfgain is out of bounds, reset the chip
+ * to load new gain values.
+ */
+ ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
+ "got new rfgain, resetting\n");
+ ieee80211_queue_work(ah->hw, &ah->reset_work);
+ }
+
+ /* TODO: On full calibration we should stop TX here,
+ * so that it doesn't interfere (mostly due to gain_f
+ * calibration that messes with tx packets -see phy.c).
+ *
+ * NOTE: Stopping the queues from above is not enough
+ * to stop TX but saves us from disconecting (at least
+ * we don't lose packets). */
+ ieee80211_stop_queues(ah->hw);
+ } else
+ ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
- /* Only full calibration for now */
- ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
ieee80211_frequency_to_channel(ah->curchan->center_freq),
ah->curchan->hw_value);
- if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
- /*
- * Rfgain is out of bounds, reset the chip
- * to load new gain values.
- */
- ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "calibration, resetting\n");
- ieee80211_queue_work(ah->hw, &ah->reset_work);
- }
if (ath5k_hw_phy_calibrate(ah, ah->curchan))
ATH5K_ERR(ah, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
ah->curchan->center_freq));
- /* Noise floor calibration interrupts rx/tx path while I/Q calibration
- * doesn't.
- * TODO: We should stop TX here, so that it doesn't interfere.
- * Note that stopping the queues is not enough to stop TX! */
- if (time_is_before_eq_jiffies(ah->ah_cal_next_nf)) {
- ah->ah_cal_next_nf = jiffies +
- msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_NF);
- ath5k_hw_update_noise_floor(ah);
- }
-
- ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ /* Clear calibration flags */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
+ ieee80211_wake_queues(ah->hw);
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
+ } else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
@@ -2407,8 +2492,8 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
if (ret)
goto err_irq;
- /* set up multi-rate retry capabilities */
- if (ah->ah_version == AR5K_AR5212) {
+ /* Set up multi-rate retry capabilities */
+ if (ah->ah_capabilities.cap_has_mrr_support) {
hw->max_rates = 4;
hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
AR5K_INIT_RETRY_LONG);
@@ -2544,15 +2629,22 @@ int ath5k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
ah->curchan = ah->hw->conf.channel;
- ah->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
- AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
- AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
+ ah->imask = AR5K_INT_RXOK
+ | AR5K_INT_RXERR
+ | AR5K_INT_RXEOL
+ | AR5K_INT_RXORN
+ | AR5K_INT_TXDESC
+ | AR5K_INT_TXEOL
+ | AR5K_INT_FATAL
+ | AR5K_INT_GLOBAL
+ | AR5K_INT_MIB;
ret = ath5k_reset(ah, NULL, false);
if (ret)
goto done;
- ath5k_rfkill_hw_start(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_start(ah);
/*
* Reset the key cache since some parts do not reset the
@@ -2585,7 +2677,6 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah)
ah->tx_pending = false;
tasklet_kill(&ah->rxtq);
tasklet_kill(&ah->txtq);
- tasklet_kill(&ah->calib);
tasklet_kill(&ah->beacontq);
tasklet_kill(&ah->ani_tasklet);
}
@@ -2637,7 +2728,8 @@ void ath5k_stop(struct ieee80211_hw *hw)
cancel_delayed_work_sync(&ah->tx_complete_work);
- ath5k_rfkill_hw_stop(ah);
+ if (!ath5k_modparam_no_hw_rfkill_switch)
+ ath5k_rfkill_hw_stop(ah);
}
/*
@@ -2689,9 +2781,24 @@ ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
ath5k_ani_init(ah, ani_mode);
- ah->ah_cal_next_full = jiffies + msecs_to_jiffies(100);
- ah->ah_cal_next_ani = jiffies;
- ah->ah_cal_next_nf = jiffies;
+ /*
+ * Set calibration intervals
+ *
+ * Note: We don't need to run calibration imediately
+ * since some initial calibration is done on reset
+ * even for fast channel switching. Also on scanning
+ * this will get set again and again and it won't get
+ * executed unless we connect somewhere and spend some
+ * time on the channel (that's what calibration needs
+ * anyway to be accurate).
+ */
+ ah->ah_cal_next_full = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
+ ah->ah_cal_next_ani = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
+ ah->ah_cal_next_short = jiffies +
+ msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
+
ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
/* clear survey data and cycle counters */
@@ -2745,20 +2852,6 @@ ath5k_init(struct ieee80211_hw *hw)
/*
- * Check if the MAC has multi-rate retry support.
- * We do this by trying to setup a fake extended
- * descriptor. MACs that don't have support will
- * return false w/o doing anything. MACs that do
- * support it will return true w/o doing anything.
- */
- ret = ath5k_hw_setup_mrr_tx_desc(ah, NULL, 0, 0, 0, 0, 0, 0);
-
- if (ret < 0)
- goto err;
- if (ret > 0)
- __set_bit(ATH_STAT_MRRETRY, ah->status);
-
- /*
* Collect the channel list. The 802.11 layer
* is responsible for filtering this list based
* on settings like the phy mode and regulatory
@@ -2841,11 +2934,11 @@ ath5k_init(struct ieee80211_hw *hw)
tasklet_init(&ah->rxtq, ath5k_tasklet_rx, (unsigned long)ah);
tasklet_init(&ah->txtq, ath5k_tasklet_tx, (unsigned long)ah);
- tasklet_init(&ah->calib, ath5k_tasklet_calibrate, (unsigned long)ah);
tasklet_init(&ah->beacontq, ath5k_tasklet_beacon, (unsigned long)ah);
tasklet_init(&ah->ani_tasklet, ath5k_tasklet_ani, (unsigned long)ah);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
+ INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 810fba9..994169a 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -85,12 +85,19 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
- if (AR5K_EEPROM_HDR_11B(ee_header))
- __set_bit(AR5K_MODE_11B, caps->cap_mode);
-
- if (AR5K_EEPROM_HDR_11G(ee_header) &&
- ah->ah_version != AR5K_AR5211)
- __set_bit(AR5K_MODE_11G, caps->cap_mode);
+ /* Override 2GHz modes on SoCs that need it
+ * NOTE: cap_needs_2GHz_ovr gets set from
+ * ath_ahb_probe */
+ if (!caps->cap_needs_2GHz_ovr) {
+ if (AR5K_EEPROM_HDR_11B(ee_header))
+ __set_bit(AR5K_MODE_11B,
+ caps->cap_mode);
+
+ if (AR5K_EEPROM_HDR_11G(ee_header) &&
+ ah->ah_version != AR5K_AR5211)
+ __set_bit(AR5K_MODE_11G,
+ caps->cap_mode);
+ }
}
}
@@ -103,12 +110,18 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
- /* newer hardware has PHY error counters */
+ /* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
+ /* MACs since AR5212 have MRR support */
+ if (ah->ah_version == AR5K_AR5212)
+ caps->cap_has_mrr_support = true;
+ else
+ caps->cap_has_mrr_support = false;
+
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 7e88dda..f8bfa3a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,20 +26,61 @@
#include "debug.h"
+/**
+ * DOC: Hardware descriptor functions
+ *
+ * Here we handle the processing of the low-level hw descriptors
+ * that hw reads and writes via DMA for each TX and RX attempt (that means
+ * we can also have descriptors for failed TX/RX tries). We have two kind of
+ * descriptors for RX and TX, control descriptors tell the hw how to send or
+ * receive a packet where to read/write it from/to etc and status descriptors
+ * that contain information about how the packet was sent or received (errors
+ * included).
+ *
+ * Descriptor format is not exactly the same for each MAC chip version so we
+ * have function pointers on &struct ath5k_hw we initialize at runtime based on
+ * the chip used.
+ */
+
+
/************************\
* TX Control descriptors *
\************************/
-/*
- * Initialize the 2-word tx control descriptor on 5210/5211
+/**
+ * ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 2-Word TX control descriptor
+ * found on AR5210 and AR5211 MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
static int
-ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int pkt_len, unsigned int hdr_len, int padsize,
- enum ath5k_pkt_type type,
- unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
- unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate, unsigned int rtscts_duration)
+ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
@@ -172,17 +213,40 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Initialize the 4-word tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @pkt_len: Frame length in bytes
+ * @hdr_len: Header length in bytes (only used on AR5210)
+ * @padsize: Any padding we've added to the frame length
+ * @type: One of enum ath5k_pkt_type
+ * @tx_power: Tx power in 0.5dB steps
+ * @tx_rate0: HW idx for transmission rate
+ * @tx_tries0: Max number of retransmissions
+ * @key_index: Index on key table to use for encryption
+ * @antenna_mode: Which antenna to use (0 for auto)
+ * @flags: One of AR5K_TXDESC_* flags (desc.h)
+ * @rtscts_rate: HW idx for RTS/CTS transmission rate
+ * @rtscts_duration: What to put on duration field on the header of RTS/CTS
+ *
+ * Internal function to initialize a 4-Word TX control descriptor
+ * found on AR5212 and later MACs chips.
+ *
+ * Returns 0 on success or -EINVAL on false input
*/
-static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
- struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
- int padsize,
- enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
- unsigned int tx_tries0, unsigned int key_index,
- unsigned int antenna_mode, unsigned int flags,
- unsigned int rtscts_rate,
- unsigned int rtscts_duration)
+static int
+ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ unsigned int pkt_len, unsigned int hdr_len,
+ int padsize,
+ enum ath5k_pkt_type type,
+ unsigned int tx_power,
+ unsigned int tx_rate0, unsigned int tx_tries0,
+ unsigned int key_index,
+ unsigned int antenna_mode,
+ unsigned int flags,
+ unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
@@ -292,13 +356,29 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
return 0;
}
-/*
- * Initialize a 4-word multi rate retry tx control descriptor on 5212
+/**
+ * ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @tx_rate1: HW idx for rate used on transmission series 1
+ * @tx_tries1: Max number of retransmissions for transmission series 1
+ * @tx_rate2: HW idx for rate used on transmission series 2
+ * @tx_tries2: Max number of retransmissions for transmission series 2
+ * @tx_rate3: HW idx for rate used on transmission series 3
+ * @tx_tries3: Max number of retransmissions for transmission series 3
+ *
+ * Multi rate retry (MRR) tx control descriptors are available only on AR5212
+ * MACs, they are part of the normal 4-word tx control descriptor (see above)
+ * but we handle them through a separate function for better abstraction.
+ *
+ * Returns 0 on success or -EINVAL on invalid input
*/
int
-ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
- u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3)
+ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u_int tx_rate1, u_int tx_tries1,
+ u_int tx_rate2, u_int tx_tries2,
+ u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
@@ -350,11 +430,16 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
* TX Status descriptors *
\***********************/
-/*
- * Process the tx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_2w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -399,11 +484,16 @@ static int ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process a tx status descriptor on 5212
+/**
+ * ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @ts: The &struct ath5k_tx_status
*/
-static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_tx_status *ts)
+static int
+ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_tx_status *ts)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
struct ath5k_hw_tx_status *tx_status;
@@ -460,11 +550,17 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
* RX Descriptors *
\****************/
-/*
- * Initialize an rx control descriptor
+/**
+ * ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @size: RX buffer length in bytes
+ * @flags: One of AR5K_RXDESC_* flags
*/
-int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
- u32 size, unsigned int flags)
+int
+ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
@@ -491,11 +587,22 @@ int ath5k_hw_setup_rx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
return 0;
}
-/*
- * Process the rx status descriptor on 5210/5211
+/**
+ * ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5210/5211 MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc, struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
@@ -574,12 +681,22 @@ static int ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
return 0;
}
-/*
- * Process the rx status descriptor on 5212
+/**
+ * ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
+ * @ah: The &struct ath5k_hw
+ * @desc: The &struct ath5k_desc
+ * @rs: The &struct ath5k_rx_status
+ *
+ * Internal function used to process an RX status descriptor
+ * on AR5212 and later MAC.
+ *
+ * Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
+ * frame yet.
*/
-static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
- struct ath5k_desc *desc,
- struct ath5k_rx_status *rs)
+static int
+ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
+ struct ath5k_desc *desc,
+ struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
@@ -646,10 +763,16 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
* Attach *
\********/
-/*
- * Init function pointers inside ath5k_hw struct
+/**
+ * ath5k_hw_init_desc_functions() - Init function pointers inside ah
+ * @ah: The &struct ath5k_hw
+ *
+ * Maps the internal descriptor functions to the function pointers on ah, used
+ * from above. This is used as an abstraction layer to handle the various chips
+ * the same way.
*/
-int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
+int
+ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index cfd529b5..8d6c01a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -20,25 +20,30 @@
* RX/TX descriptor structures
*/
-/*
- * Common hardware RX control descriptor
+/**
+ * struct ath5k_hw_rx_ctl - Common hardware RX control descriptor
+ * @rx_control_0: RX control word 0
+ * @rx_control_1: RX control word 1
*/
struct ath5k_hw_rx_ctl {
- u32 rx_control_0; /* RX control word 0 */
- u32 rx_control_1; /* RX control word 1 */
+ u32 rx_control_0;
+ u32 rx_control_1;
} __packed __aligned(4);
/* RX control word 1 fields/flags */
#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
#define AR5K_DESC_RX_CTL1_INTREQ 0x00002000 /* RX interrupt request */
-/*
- * Common hardware RX status descriptor
+/**
+ * struct ath5k_hw_rx_status - Common hardware RX status descriptor
+ * @rx_status_0: RX status word 0
+ * @rx_status_1: RX status word 1
+ *
* 5210, 5211 and 5212 differ only in the fields and flags defined below
*/
struct ath5k_hw_rx_status {
- u32 rx_status_0; /* RX status word 0 */
- u32 rx_status_1; /* RX status word 1 */
+ u32 rx_status_0;
+ u32 rx_status_1;
} __packed __aligned(4);
/* 5210/5211 */
@@ -98,17 +103,36 @@ struct ath5k_hw_rx_status {
/**
* enum ath5k_phy_error_code - PHY Error codes
+ * @AR5K_RX_PHY_ERROR_UNDERRUN: Transmit underrun, [5210] No error
+ * @AR5K_RX_PHY_ERROR_TIMING: Timing error
+ * @AR5K_RX_PHY_ERROR_PARITY: Illegal parity
+ * @AR5K_RX_PHY_ERROR_RATE: Illegal rate
+ * @AR5K_RX_PHY_ERROR_LENGTH: Illegal length
+ * @AR5K_RX_PHY_ERROR_RADAR: Radar detect, [5210] 64 QAM rate
+ * @AR5K_RX_PHY_ERROR_SERVICE: Illegal service
+ * @AR5K_RX_PHY_ERROR_TOR: Transmit override receive
+ * @AR5K_RX_PHY_ERROR_OFDM_TIMING: OFDM Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY: OFDM Signal parity error [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL: OFDM Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL: OFDM Illegal length [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_POWER_DROP: OFDM Power drop [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_SERVICE: OFDM Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_OFDM_RESTART: OFDM Restart (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_TIMING: CCK Timing error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_HEADER_CRC: Header CRC error [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL: Illegal rate [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_SERVICE: CCK Service (?) [5212+]
+ * @AR5K_RX_PHY_ERROR_CCK_RESTART: CCK Restart (?) [5212+]
*/
enum ath5k_phy_error_code {
- AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun, [5210] No error */
- AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
- AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
- AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
- AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
- AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect, [5210] 64 QAM rate */
- AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
- AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
- /* these are specific to the 5212 */
+ AR5K_RX_PHY_ERROR_UNDERRUN = 0,
+ AR5K_RX_PHY_ERROR_TIMING = 1,
+ AR5K_RX_PHY_ERROR_PARITY = 2,
+ AR5K_RX_PHY_ERROR_RATE = 3,
+ AR5K_RX_PHY_ERROR_LENGTH = 4,
+ AR5K_RX_PHY_ERROR_RADAR = 5,
+ AR5K_RX_PHY_ERROR_SERVICE = 6,
+ AR5K_RX_PHY_ERROR_TOR = 7,
AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
@@ -123,12 +147,14 @@ enum ath5k_phy_error_code {
AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
};
-/*
- * 5210/5211 hardware 2-word TX control descriptor
+/**
+ * struct ath5k_hw_2w_tx_ctl - 5210/5211 hardware 2-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
*/
struct ath5k_hw_2w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
+ u32 tx_control_0;
+ u32 tx_control_1;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -177,14 +203,18 @@ struct ath5k_hw_2w_tx_ctl {
#define AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS 4
#define AR5K_AR5211_TX_DESC_FRAME_TYPE_PRESP 4
-/*
- * 5212 hardware 4-word TX control descriptor
+/**
+ * struct ath5k_hw_4w_tx_ctl - 5212 hardware 4-word TX control descriptor
+ * @tx_control_0: TX control word 0
+ * @tx_control_1: TX control word 1
+ * @tx_control_2: TX control word 2
+ * @tx_control_3: TX control word 3
*/
struct ath5k_hw_4w_tx_ctl {
- u32 tx_control_0; /* TX control word 0 */
- u32 tx_control_1; /* TX control word 1 */
- u32 tx_control_2; /* TX control word 2 */
- u32 tx_control_3; /* TX control word 3 */
+ u32 tx_control_0;
+ u32 tx_control_1;
+ u32 tx_control_2;
+ u32 tx_control_3;
} __packed __aligned(4);
/* TX control word 0 fields/flags */
@@ -238,12 +268,14 @@ struct ath5k_hw_4w_tx_ctl {
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE 0x01f00000 /* RTS or CTS rate */
#define AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE_S 20
-/*
- * Common TX status descriptor
+/**
+ * struct ath5k_hw_tx_status - Common TX status descriptor
+ * @tx_status_0: TX status word 0
+ * @tx_status_1: TX status word 1
*/
struct ath5k_hw_tx_status {
- u32 tx_status_0; /* TX status word 0 */
- u32 tx_status_1; /* TX status word 1 */
+ u32 tx_status_0;
+ u32 tx_status_1;
} __packed __aligned(4);
/* TX status word 0 fields/flags */
@@ -276,37 +308,47 @@ struct ath5k_hw_tx_status {
#define AR5K_DESC_TX_STATUS1_COMP_SUCCESS_5212 0x00800000 /* [5212] compression status */
#define AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212 0x01000000 /* [5212] transmit antenna */
-/*
- * 5210/5211 hardware TX descriptor
+/**
+ * struct ath5k_hw_5210_tx_desc - 5210/5211 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_2w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5210_tx_desc {
struct ath5k_hw_2w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * 5212 hardware TX descriptor
+/**
+ * struct ath5k_hw_5212_tx_desc - 5212 hardware TX descriptor
+ * @tx_ctl: The &struct ath5k_hw_4w_tx_ctl
+ * @tx_stat: The &struct ath5k_hw_tx_status
*/
struct ath5k_hw_5212_tx_desc {
struct ath5k_hw_4w_tx_ctl tx_ctl;
struct ath5k_hw_tx_status tx_stat;
} __packed __aligned(4);
-/*
- * Common hardware RX descriptor
+/**
+ * struct ath5k_hw_all_rx_desc - Common hardware RX descriptor
+ * @rx_ctl: The &struct ath5k_hw_rx_ctl
+ * @rx_stat: The &struct ath5k_hw_rx_status
*/
struct ath5k_hw_all_rx_desc {
struct ath5k_hw_rx_ctl rx_ctl;
struct ath5k_hw_rx_status rx_stat;
} __packed __aligned(4);
-/*
- * Atheros hardware DMA descriptor
+/**
+ * struct ath5k_desc - Atheros hardware DMA descriptor
+ * @ds_link: Physical address of the next descriptor
+ * @ds_data: Physical address of data buffer (skb)
+ * @ud: Union containing hw_5xxx_tx_desc structs and hw_all_rx_desc
+ *
* This is read and written to by the hardware
*/
struct ath5k_desc {
- u32 ds_link; /* physical address of the next descriptor */
- u32 ds_data; /* physical address of data buffer (skb) */
+ u32 ds_link;
+ u32 ds_data;
union {
struct ath5k_hw_5210_tx_desc ds_tx5210;
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 2481f9c..5cc9aa8 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -20,16 +20,13 @@
* DMA and interrupt masking functions *
\*************************************/
-/*
- * dma.c - DMA and interrupt masking functions
+/**
+ * DOC: DMA and interrupt masking functions
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
* Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
- *
- * TODO: Handle SISR on 5211+ and introduce a function to return the queue
- * number that resulted the interrupt.
*/
#include "ath5k.h"
@@ -42,22 +39,22 @@
\*********/
/**
- * ath5k_hw_start_rx_dma - Start DMA receive
- *
+ * ath5k_hw_start_rx_dma() - Start DMA receive
* @ah: The &struct ath5k_hw
*/
-void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
/**
- * ath5k_hw_stop_rx_dma - Stop DMA receive
- *
+ * ath5k_hw_stop_rx_dma() - Stop DMA receive
* @ah: The &struct ath5k_hw
*/
-static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
+static int
+ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
@@ -79,24 +76,24 @@ static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_rxdp - Get RX Descriptor's address
- *
+ * ath5k_hw_get_rxdp() - Get RX Descriptor's address
* @ah: The &struct ath5k_hw
*/
-u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
/**
- * ath5k_hw_set_rxdp - Set RX Descriptor's address
- *
+ * ath5k_hw_set_rxdp() - Set RX Descriptor's address
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
* Returns -EIO if rx is active
*/
-int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
+int
+ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
@@ -114,8 +111,7 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
\**********/
/**
- * ath5k_hw_start_tx_dma - Start DMA transmit for a specific queue
- *
+ * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -128,7 +124,8 @@ int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
* NOTE: Must be called after setting up tx control descriptor for that
* queue (see below).
*/
-int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
@@ -177,17 +174,16 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_tx_dma - Stop DMA transmit on a specific queue
- *
+ * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
* -EINVAL if queue number is out of range or inactive.
- *
*/
-static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
+static int
+ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
@@ -320,14 +316,14 @@ static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_stop_beacon_queue - Stop beacon queue
- *
- * @ah The &struct ath5k_hw
- * @queue The queue number
+ * ath5k_hw_stop_beacon_queue() - Stop beacon queue
+ * @ah: The &struct ath5k_hw
+ * @queue: The queue number
*
* Returns -EIO if queue didn't stop
*/
-int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
{
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
@@ -340,8 +336,7 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
@@ -352,7 +347,8 @@ int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
*
* XXX: Is TXDP read and clear ?
*/
-u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
@@ -382,10 +378,10 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
}
/**
- * ath5k_hw_set_txdp - Set TX Descriptor's address for a specific queue
- *
+ * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
+ * @phys_addr: The physical address
*
* Set TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and we use tx queue type since we only have 2 queues
@@ -394,7 +390,8 @@ u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
* Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
* active.
*/
-int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
+int
+ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
@@ -435,8 +432,7 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
}
/**
- * ath5k_hw_update_tx_triglevel - Update tx trigger level
- *
+ * ath5k_hw_update_tx_triglevel() - Update tx trigger level
* @ah: The &struct ath5k_hw
* @increase: Flag to force increase of trigger level
*
@@ -444,15 +440,15 @@ int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
* buffer (aka FIFO threshold) that is used to indicate when PCU flushes
* the buffer and transmits its data. Lowering this results sending small
* frames more quickly but can lead to tx underruns, raising it a lot can
- * result other problems (i think bmiss is related). Right now we start with
- * the lowest possible (64Bytes) and if we get tx underrun we increase it using
- * the increase flag. Returns -EIO if we have reached maximum/minimum.
+ * result other problems. Right now we start with the lowest possible
+ * (64Bytes) and if we get tx underrun we increase it using the increase
+ * flag. Returns -EIO if we have reached maximum/minimum.
*
* XXX: Link this with tx DMA size ?
- * XXX: Use it to save interrupts ?
- * TODO: Needs testing, i think it's related to bmiss...
+ * XXX2: Use it to save interrupts ?
*/
-int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
+int
+ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
{
u32 trigger_level, imr;
int ret = -EIO;
@@ -498,21 +494,20 @@ done:
\*******************/
/**
- * ath5k_hw_is_intr_pending - Check if we have pending interrupts
- *
+ * ath5k_hw_is_intr_pending() - Check if we have pending interrupts
* @ah: The &struct ath5k_hw
*
* Check if we have pending interrupts to process. Returns 1 if we
* have pending interrupts and 0 if we haven't.
*/
-bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
+bool
+ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
/**
- * ath5k_hw_get_isr - Get interrupt status
- *
+ * ath5k_hw_get_isr() - Get interrupt status
* @ah: The @struct ath5k_hw
* @interrupt_mask: Driver's interrupt mask used to filter out
* interrupts in sw.
@@ -523,62 +518,162 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
* being mapped on some standard non hw-specific positions
* (check out &ath5k_int).
*
- * NOTE: We use read-and-clear register, so after this function is called ISR
- * is zeroed.
+ * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
+ * function gets called are cleared on return.
*/
-int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
+int
+ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
- u32 data;
+ u32 data = 0;
/*
- * Read interrupt status from the Interrupt Status register
- * on 5210
+ * Read interrupt status from Primary Interrupt
+ * Register.
+ *
+ * Note: PISR/SISR Not available on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
- data = ath5k_hw_reg_read(ah, AR5K_ISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ u32 isr = 0;
+ isr = ath5k_hw_reg_read(ah, AR5K_ISR);
+ if (unlikely(isr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = isr;
return -ENODEV;
}
- } else {
+
/*
- * Read interrupt status from Interrupt
- * Status Register shadow copy (Read And Clear)
- *
- * Note: PISR/SISR Not available on 5210
+ * Filter out the non-common bits from the interrupt
+ * status.
*/
- data = ath5k_hw_reg_read(ah, AR5K_RAC_PISR);
- if (unlikely(data == AR5K_INT_NOCARD)) {
- *interrupt_mask = data;
+ *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
+
+ /* Hanlde INT_FATAL */
+ if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
+ | AR5K_ISR_DPERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*
+ * XXX: BMISS interrupts may occur after association.
+ * I found this on 5210 code but it needs testing. If this is
+ * true we should disable them before assoc and re-enable them
+ * after a successful assoc + some jiffies.
+ interrupt_mask &= ~AR5K_INT_BMISS;
+ */
+
+ data = isr;
+ } else {
+ u32 pisr = 0;
+ u32 pisr_clear = 0;
+ u32 sisr0 = 0;
+ u32 sisr1 = 0;
+ u32 sisr2 = 0;
+ u32 sisr3 = 0;
+ u32 sisr4 = 0;
+
+ /* Read PISR and SISRs... */
+ pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
+ if (unlikely(pisr == AR5K_INT_NOCARD)) {
+ *interrupt_mask = pisr;
return -ENODEV;
}
- }
- /*
- * Get abstract interrupt mask (driver-compatible)
- */
- *interrupt_mask = (data & AR5K_INT_COMMON) & ah->ah_imr;
+ sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
+ sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
+ sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
+ sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
+ sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
- if (ah->ah_version != AR5K_AR5210) {
- u32 sisr2 = ath5k_hw_reg_read(ah, AR5K_RAC_SISR2);
+ /*
+ * PISR holds the logical OR of interrupt bits
+ * from SISR registers:
+ *
+ * TXOK and TXDESC -> Logical OR of TXOK and TXDESC
+ * per-queue bits on SISR0
+ *
+ * TXERR and TXEOL -> Logical OR of TXERR and TXEOL
+ * per-queue bits on SISR1
+ *
+ * TXURN -> Logical OR of TXURN per-queue bits on SISR2
+ *
+ * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
+ *
+ * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
+ * BCN_TIMEOUT, CAB_TIMEOUT and DTIM
+ * (and TSFOOR ?) bits on SISR2
+ *
+ * QCBRORN and QCBRURN -> Logical OR of QCBRORN and
+ * QCBRURN per-queue bits on SISR3
+ * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
+ *
+ * If we clean these bits on PISR we 'll also clear all
+ * related bits from SISRs, e.g. if we write the TXOK bit on
+ * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
+ * interrupt got fired for another queue while we were reading
+ * the interrupt registers and we write back the TXOK bit on
+ * PISR we 'll lose it. So make sure that we don't write back
+ * on PISR any bits that come from SISRs. Clearing them from
+ * SISRs will also clear PISR so no need to worry here.
+ */
- /*HIU = Host Interface Unit (PCI etc)*/
- if (unlikely(data & (AR5K_ISR_HIUERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS;
- /*Beacon Not Ready*/
- if (unlikely(data & (AR5K_ISR_BNR)))
- *interrupt_mask |= AR5K_INT_BNR;
+ /*
+ * Write to clear them...
+ * Note: This means that each bit we write back
+ * to the registers will get cleared, leaving the
+ * rest unaffected. So this won't affect new interrupts
+ * we didn't catch while reading/processing, we 'll get
+ * them next time get_isr gets called.
+ */
+ ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
+ ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
+ ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
+ ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
+ ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
+ ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
+ /* Flush previous write */
+ ath5k_hw_reg_read(ah, AR5K_PISR);
- if (unlikely(sisr2 & (AR5K_SISR2_SSERR |
- AR5K_SISR2_DPERR |
- AR5K_SISR2_MCABT)))
- *interrupt_mask |= AR5K_INT_FATAL;
+ /*
+ * Filter out the non-common bits from the interrupt
+ * status.
+ */
+ *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
+
+
+ /* We treat TXOK,TXDESC, TXERR and TXEOL
+ * the same way (schedule the tx tasklet)
+ * so we track them all together per queue */
+ if (pisr & AR5K_ISR_TXOK)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXOK);
- if (data & AR5K_ISR_TIM)
+ if (pisr & AR5K_ISR_TXDESC)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
+ AR5K_SISR0_QCU_TXDESC);
+
+ if (pisr & AR5K_ISR_TXERR)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXERR);
+
+ if (pisr & AR5K_ISR_TXEOL)
+ ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
+ AR5K_SISR1_QCU_TXEOL);
+
+ /* Currently this is not much usefull since we treat
+ * all queues the same way if we get a TXURN (update
+ * tx trigger level) but we might need it later on*/
+ if (pisr & AR5K_ISR_TXURN)
+ ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2,
+ AR5K_SISR2_QCU_TXURN);
+
+ /* Misc Beacon related interrupts */
+
+ /* For AR5211 */
+ if (pisr & AR5K_ISR_TIM)
*interrupt_mask |= AR5K_INT_TIM;
- if (data & AR5K_ISR_BCNMISC) {
+ /* For AR5212+ */
+ if (pisr & AR5K_ISR_BCNMISC) {
if (sisr2 & AR5K_SISR2_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (sisr2 & AR5K_SISR2_DTIM)
@@ -591,63 +686,39 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
}
- if (data & AR5K_ISR_RXDOPPLER)
- *interrupt_mask |= AR5K_INT_RX_DOPPLER;
- if (data & AR5K_ISR_QCBRORN) {
+ /* Below interrupts are unlikely to happen */
+
+ /* HIU = Host Interface Unit (PCI etc)
+ * Can be one of MCABT, SSERR, DPERR from SISR2 */
+ if (unlikely(pisr & (AR5K_ISR_HIUERR)))
+ *interrupt_mask |= AR5K_INT_FATAL;
+
+ /*Beacon Not Ready*/
+ if (unlikely(pisr & (AR5K_ISR_BNR)))
+ *interrupt_mask |= AR5K_INT_BNR;
+
+ /* A queue got CBR overrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRORN))) {
*interrupt_mask |= AR5K_INT_QCBRORN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRORN);
+ ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRORN);
}
- if (data & AR5K_ISR_QCBRURN) {
+
+ /* A queue got CBR underrun */
+ if (unlikely(pisr & (AR5K_ISR_QCBRURN))) {
*interrupt_mask |= AR5K_INT_QCBRURN;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR3),
- AR5K_SISR3_QCBRURN);
+ ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3,
+ AR5K_SISR3_QCBRURN);
}
- if (data & AR5K_ISR_QTRIG) {
+
+ /* A queue got triggered */
+ if (unlikely(pisr & (AR5K_ISR_QTRIG))) {
*interrupt_mask |= AR5K_INT_QTRIG;
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR4),
- AR5K_SISR4_QTRIG);
+ ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4,
+ AR5K_SISR4_QTRIG);
}
- if (data & AR5K_ISR_TXOK)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXOK);
-
- if (data & AR5K_ISR_TXDESC)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR0),
- AR5K_SISR0_QCU_TXDESC);
-
- if (data & AR5K_ISR_TXERR)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXERR);
-
- if (data & AR5K_ISR_TXEOL)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR1),
- AR5K_SISR1_QCU_TXEOL);
-
- if (data & AR5K_ISR_TXURN)
- ah->ah_txq_isr |= AR5K_REG_MS(
- ath5k_hw_reg_read(ah, AR5K_RAC_SISR2),
- AR5K_SISR2_QCU_TXURN);
- } else {
- if (unlikely(data & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
- | AR5K_ISR_HIUERR | AR5K_ISR_DPERR)))
- *interrupt_mask |= AR5K_INT_FATAL;
-
- /*
- * XXX: BMISS interrupts may occur after association.
- * I found this on 5210 code but it needs testing. If this is
- * true we should disable them before assoc and re-enable them
- * after a successful assoc + some jiffies.
- interrupt_mask &= ~AR5K_INT_BMISS;
- */
+ data = pisr;
}
/*
@@ -661,8 +732,7 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
}
/**
- * ath5k_hw_set_imr - Set interrupt mask
- *
+ * ath5k_hw_set_imr() - Set interrupt mask
* @ah: The &struct ath5k_hw
* @new_mask: The new interrupt mask to be set
*
@@ -670,7 +740,8 @@ int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
* ath5k_int bits to hw-specific bits to remove abstraction and writing
* Interrupt Mask Register.
*/
-enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
+enum ath5k_int
+ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
{
enum ath5k_int old_mask, int_mask;
@@ -697,16 +768,14 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
& AR5K_SIMR2_QCU_TXURN;
+ /* Fatal interrupt abstraction for 5211+ */
if (new_mask & AR5K_INT_FATAL) {
int_mask |= AR5K_IMR_HIUERR;
simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
| AR5K_SIMR2_DPERR);
}
- /*Beacon Not Ready*/
- if (new_mask & AR5K_INT_BNR)
- int_mask |= AR5K_INT_BNR;
-
+ /* Misc beacon related interrupts */
if (new_mask & AR5K_INT_TIM)
int_mask |= AR5K_IMR_TIM;
@@ -721,8 +790,9 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
if (new_mask & AR5K_INT_CAB_TIMEOUT)
simr2 |= AR5K_SISR2_CAB_TIMEOUT;
- if (new_mask & AR5K_INT_RX_DOPPLER)
- int_mask |= AR5K_IMR_RXDOPPLER;
+ /*Beacon Not Ready*/
+ if (new_mask & AR5K_INT_BNR)
+ int_mask |= AR5K_INT_BNR;
/* Note: Per queue interrupt masks
* are set via ath5k_hw_reset_tx_queue() (qcu.c) */
@@ -730,10 +800,12 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
} else {
+ /* Fatal interrupt abstraction for 5210 */
if (new_mask & AR5K_INT_FATAL)
int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
+ /* Only common interrupts left for 5210 (no SIMRs) */
ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
}
@@ -760,8 +832,7 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
\********************/
/**
- * ath5k_hw_dma_init - Initialize DMA unit
- *
+ * ath5k_hw_dma_init() - Initialize DMA unit
* @ah: The &struct ath5k_hw
*
* Set DMA size and pre-enable interrupts
@@ -770,7 +841,8 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
*
* XXX: Save/restore RXDP/TXDP registers ?
*/
-void ath5k_hw_dma_init(struct ath5k_hw *ah)
+void
+ath5k_hw_dma_init(struct ath5k_hw *ah)
{
/*
* Set Rx/Tx DMA Configuration
@@ -799,8 +871,7 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_dma_stop - stop DMA unit
- *
+ * ath5k_hw_dma_stop() - stop DMA unit
* @ah: The &struct ath5k_hw
*
* Stop tx/rx DMA and interrupts. Returns
@@ -810,7 +881,8 @@ void ath5k_hw_dma_init(struct ath5k_hw *ah)
* stuck frames on tx queues, only a reset
* can fix that.
*/
-int ath5k_hw_dma_stop(struct ath5k_hw *ah)
+int
+ath5k_hw_dma_stop(struct ath5k_hw *ah)
{
int i, qmax, err;
err = 0;
diff --git a/drivers/net/wireless/ath/ath5k/gpio.c b/drivers/net/wireless/ath/ath5k/gpio.c
index 8592978..73d3dd8 100644
--- a/drivers/net/wireless/ath/ath5k/gpio.c
+++ b/drivers/net/wireless/ath/ath5k/gpio.c
@@ -24,10 +24,33 @@
#include "reg.h"
#include "debug.h"
-/*
- * Set led state
+
+/**
+ * DOC: GPIO/LED functions
+ *
+ * Here we control the 6 bidirectional GPIO pins provided by the hw.
+ * We can set a GPIO pin to be an input or an output pin on GPIO control
+ * register and then read or set its status from GPIO data input/output
+ * registers.
+ *
+ * We also control the two LED pins provided by the hw, LED_0 is our
+ * "power" LED and LED_1 is our "network activity" LED but many scenarios
+ * are available from hw. Vendors might also provide LEDs connected to the
+ * GPIO pins, we handle them through the LED subsystem on led.c
+ */
+
+
+/**
+ * ath5k_hw_set_ledstate() - Set led state
+ * @ah: The &struct ath5k_hw
+ * @state: One of AR5K_LED_*
+ *
+ * Used to set the LED blinking state. This only
+ * works for the LED connected to the LED_0, LED_1 pins,
+ * not the GPIO based.
*/
-void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
+void
+ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
{
u32 led;
/*5210 has different led mode handling*/
@@ -74,10 +97,13 @@ void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
}
-/*
- * Set GPIO inputs
+/**
+ * ath5k_hw_set_gpio_input() - Set GPIO inputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: GPIO pin to set as input
*/
-int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -89,10 +115,13 @@ int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Set GPIO outputs
+/**
+ * ath5k_hw_set_gpio_output() - Set GPIO outputs
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set as output
*/
-int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
+int
+ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
@@ -104,10 +133,13 @@ int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
return 0;
}
-/*
- * Get GPIO state
+/**
+ * ath5k_hw_get_gpio() - Get GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to read
*/
-u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
+u32
+ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
@@ -117,10 +149,14 @@ u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
0x1;
}
-/*
- * Set GPIO state
+/**
+ * ath5k_hw_set_gpio() - Set GPIO state
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to set
+ * @val: Value to set (boolean)
*/
-int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
+int
+ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
@@ -138,10 +174,19 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
return 0;
}
-/*
- * Initialize the GPIO interrupt (RFKill switch)
+/**
+ * ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
+ * @ah: The &struct ath5k_hw
+ * @gpio: The GPIO pin to use
+ * @interrupt_level: True to generate interrupt on active pin (high)
+ *
+ * This function is used to set up the GPIO interrupt for the hw RFKill switch.
+ * That switch is connected to a GPIO pin and it's number is stored on EEPROM.
+ * It can either open or close the circuit to indicate that we should disable
+ * RF/Wireless to save power (we also get that from EEPROM).
*/
-void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
+void
+ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level)
{
u32 data;
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 1ffecc0..a1ea78e 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -23,24 +23,27 @@
#include "reg.h"
#include "debug.h"
-/*
- * Mode-independent initial register writes
+/**
+ * struct ath5k_ini - Mode-independent initial register writes
+ * @ini_register: Register address
+ * @ini_value: Default value
+ * @ini_mode: 0 to write 1 to read (and clear)
*/
-
struct ath5k_ini {
u16 ini_register;
u32 ini_value;
enum {
AR5K_INI_WRITE = 0, /* Default */
- AR5K_INI_READ = 1, /* Cleared on read */
+ AR5K_INI_READ = 1,
} ini_mode;
};
-/*
- * Mode specific initial register values
+/**
+ * struct ath5k_ini_mode - Mode specific initial register values
+ * @mode_register: Register address
+ * @mode_value: Set of values for each enum ath5k_driver_mode
*/
-
struct ath5k_ini_mode {
u16 mode_register;
u32 mode_value[3];
@@ -386,11 +389,10 @@ static const struct ath5k_ini ar5211_ini[] = {
/* Initial mode-specific settings for AR5211
* 5211 supports OFDM-only g (draft g) but we
- * need to test it !
- */
+ * need to test it ! */
static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ AR5K_TXCFG,
- /* A/XR B G */
+ /* A B G */
{ 0x00000015, 0x0000001d, 0x00000015 } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
@@ -460,7 +462,7 @@ static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ 0x00000010, 0x00000010, 0x00000010 } },
};
-/* Initial register settings for AR5212 */
+/* Initial register settings for AR5212 and newer chips */
static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_RXDP, 0x00000000 },
{ AR5K_RXCFG, 0x00000005 },
@@ -724,7 +726,8 @@ static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ 0x00000000, 0x00000000, 0x00000108 } },
};
-/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
+/* Initial mode-specific settings for AR5212 + RF5111
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -757,6 +760,7 @@ static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ 0x1883800a, 0x1873800a, 0x1883800a } },
};
+/* Common for all modes */
static const struct ath5k_ini rf5111_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
@@ -774,7 +778,9 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for AR5212 + RF5112
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -825,7 +831,9 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
{ 0xa23c, 0x13c889af },
};
-/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
+
+/* Initial mode-specific settings for RF5413/5414
+ * (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
@@ -963,7 +971,8 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2413/2414 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2413/2414
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1085,7 +1094,8 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
{ 0xa384, 0xf3307ff0 },
};
-/* Initial mode-specific settings for RF2425 (Written after ar5212_ini) */
+/* Initial mode-specific settings for RF2425
+ * (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
{ AR5K_TXCFG,
@@ -1357,10 +1367,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
};
-/*
- * Write initial register dump
+/**
+ * ath5k_hw_ini_registers() - Write initial register dump common for all modes
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_regs: The array of &struct ath5k_ini
+ * @skip_pcu: Skip PCU registers
*/
-static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
+static void
+ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
const struct ath5k_ini *ini_regs, bool skip_pcu)
{
unsigned int i;
@@ -1388,7 +1403,15 @@ static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
}
}
-static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @size: Dump size
+ * @ini_mode: The array of &struct ath5k_ini_mode
+ * @mode: One of enum ath5k_driver_mode
+ */
+static void
+ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
unsigned int size, const struct ath5k_ini_mode *ini_mode,
u8 mode)
{
@@ -1402,7 +1425,17 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
}
-int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
+/**
+ * ath5k_hw_write_initvals() - Write initial chip-specific register dump
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_driver_mode
+ * @skip_pcu: Skip PCU registers
+ *
+ * Write initial chip-specific register dump, to get the chipset on a
+ * clean and ready-to-work state after warm reset.
+ */
+int
+ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
{
/*
* Write initial register settings
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
index dfa48eb..849fa06 100644
--- a/drivers/net/wireless/ath/ath5k/pci.c
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -98,7 +98,7 @@ ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
0xffff);
return true;
}
- udelay(15);
+ usleep_range(15, 20);
}
return false;
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index a7eafa3..cebfd6f 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -30,11 +30,47 @@
#include "reg.h"
#include "debug.h"
-/*
+/**
+ * DOC: Protocol Control Unit (PCU) functions
+ *
+ * Protocol control unit is responsible to maintain various protocol
+ * properties before a frame is send and after a frame is received to/from
+ * baseband. To be more specific, PCU handles:
+ *
+ * - Buffering of RX and TX frames (after QCU/DCUs)
+ *
+ * - Encrypting and decrypting (using the built-in engine)
+ *
+ * - Generating ACKs, RTS/CTS frames
+ *
+ * - Maintaining TSF
+ *
+ * - FCS
+ *
+ * - Updating beacon data (with TSF etc)
+ *
+ * - Generating virtual CCA
+ *
+ * - RX/Multicast filtering
+ *
+ * - BSSID filtering
+ *
+ * - Various statistics
+ *
+ * -Different operating modes: AP, STA, IBSS
+ *
+ * Note: Most of these functions can be tweaked/bypassed so you can do
+ * them on sw above for debugging or research. For more infos check out PCU
+ * registers on reg.h.
+ */
+
+/**
+ * DOC: ACK rates
+ *
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
- * This is a mapping between G rates (that cover both
+ * There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
@@ -63,17 +99,18 @@ static const unsigned int ack_rates_high[] =
\*******************/
/**
- * ath5k_hw_get_frame_duration - Get tx time of a frame
- *
+ * ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
+ * @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
-int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
+int
+ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
@@ -129,11 +166,11 @@ int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_get_default_slottime - Get the default slot time for current mode
- *
+ * ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
@@ -160,11 +197,11 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
- *
+ * ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
-unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+unsigned int
+ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
@@ -191,17 +228,17 @@ unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
- *
+ * ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
- * Is called in interrupt context!
+ * NOTE: Is called in interrupt context!
*/
-void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
+void
+ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
@@ -219,10 +256,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
\******************/
/**
- * ath5k_hw_write_rate_duration - fill rate code to duration table
- *
- * @ah: the &struct ath5k_hw
- * @mode: one of enum ath5k_driver_mode
+ * ath5k_hw_write_rate_duration() - Fill rate code to duration table
+ * @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
@@ -236,7 +271,8 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
* that include all OFDM and CCK rates.
*
*/
-static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
+static inline void
+ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
@@ -280,12 +316,12 @@ static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
- *
+ * ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
@@ -298,12 +334,12 @@ static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
}
/**
- * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
- *
+ * ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
-static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
+static int
+ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
@@ -321,14 +357,14 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
\*******************/
/**
- * ath5k_hw_set_lladdr - Set station id
- *
+ * ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
- * @mac: The card's mac address
+ * @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
-int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
+int
+ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
@@ -349,14 +385,14 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
}
/**
- * ath5k_hw_set_bssid - Set current BSSID on hw
- *
+ * ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
-void ath5k_hw_set_bssid(struct ath5k_hw *ah)
+void
+ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
@@ -389,7 +425,23 @@ void ath5k_hw_set_bssid(struct ath5k_hw *ah)
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
-void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
+/**
+ * ath5k_hw_set_bssid_mask() - Filter out bssids we listen
+ * @ah: The &struct ath5k_hw
+ * @mask: The BSSID mask to set (array of octets)
+ *
+ * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
+ * which bits of the interface's MAC address should be looked at when trying
+ * to decide which packets to ACK. In station mode and AP mode with a single
+ * BSS every bit matters since we lock to only one BSS. In AP mode with
+ * multiple BSSes (virtual interfaces) not every bit matters because hw must
+ * accept frames for all BSSes and so we tweak some bits of our mac address
+ * in order to have multiple BSSes.
+ *
+ * For more information check out ../hw.c of the common ath module.
+ */
+void
+ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
@@ -400,18 +452,21 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
ath_hw_setbssidmask(common);
}
-/*
- * Set multicast filter
+/**
+ * ath5k_hw_set_mcast_filter() - Set multicast filter
+ * @ah: The &struct ath5k_hw
+ * @filter0: Lower 32bits of muticast filter
+ * @filter1: Higher 16bits of multicast filter
*/
-void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
+void
+ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
- * ath5k_hw_get_rx_filter - Get current rx filter
- *
+ * ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
@@ -420,7 +475,8 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
-u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
+u32
+ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
@@ -440,8 +496,7 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
}
/**
- * ath5k_hw_set_rx_filter - Set rx filter
- *
+ * ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
@@ -449,7 +504,8 @@ u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
-void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
+void
+ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
@@ -493,13 +549,13 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
#define ATH5K_MAX_TSF_READ 10
/**
- * ath5k_hw_get_tsf64 - Get the full 64bit TSF
- *
+ * ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
-u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
+u64
+ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
@@ -536,28 +592,30 @@ u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah)
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
+#undef ATH5K_MAX_TSF_READ
+
/**
- * ath5k_hw_set_tsf64 - Set a new 64bit TSF
- *
+ * ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
-void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
+void
+ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
- * ath5k_hw_reset_tsf - Force a TSF reset
- *
+ * ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
-void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
+void
+ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
@@ -573,10 +631,17 @@ void ath5k_hw_reset_tsf(struct ath5k_hw *ah)
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
-/*
- * Initialize beacon timers
+/**
+ * ath5k_hw_init_beacon_timers() - Initialize beacon timers
+ * @ah: The &struct ath5k_hw
+ * @next_beacon: Next TBTT
+ * @interval: Current beacon interval
+ *
+ * This function is used to initialize beacon timers based on current
+ * operation mode and settings.
*/
-void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
+void
+ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
@@ -655,8 +720,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
}
/**
- * ath5k_check_timer_win - Check if timer B is timer A + window
- *
+ * ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
@@ -686,12 +750,11 @@ ath5k_check_timer_win(int a, int b, int window, int intval)
}
/**
- * ath5k_hw_check_beacon_timers - Check if the beacon timers are correct
- *
+ * ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
- * This is a workaround for IBSS mode:
+ * This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
@@ -746,14 +809,14 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
}
/**
- * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
- *
+ * ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
-void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+void
+ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
@@ -772,8 +835,7 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
\***************************/
/**
- * ath5k_hw_start_rx_pcu - Start RX engine
- *
+ * ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
@@ -781,32 +843,33 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
-void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * at5k_hw_stop_rx_pcu - Stop RX engine
- *
+ * at5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
-void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
+void
+ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
- * ath5k_hw_set_opmode - Set PCU operating mode
- *
+ * ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
- * @op_mode: &enum nl80211_iftype operating mode
+ * @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
-int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
+int
+ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
@@ -873,8 +936,17 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
return 0;
}
-void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
- u8 mode)
+/**
+ * ath5k_hw_pcu_init() - Initialize PCU
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * This function is used to initialize PCU by setting current
+ * operation mode and various other settings.
+ */
+void
+ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 01cb72d..e1f8613 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1,6 +1,4 @@
/*
- * PHY functions
- *
* Copyright (c) 2004-2007 Reyk Floeter <reyk@openbsd.org>
* Copyright (c) 2006-2009 Nick Kossifidis <mickflemm@gmail.com>
* Copyright (c) 2007-2008 Jiri Slaby <jirislaby@gmail.com>
@@ -20,6 +18,10 @@
*
*/
+/***********************\
+* PHY related functions *
+\***********************/
+
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/unaligned.h>
@@ -31,14 +33,53 @@
#include "../regd.h"
+/**
+ * DOC: PHY related functions
+ *
+ * Here we handle the low-level functions related to baseband
+ * and analog frontend (RF) parts. This is by far the most complex
+ * part of the hw code so make sure you know what you are doing.
+ *
+ * Here is a list of what this is all about:
+ *
+ * - Channel setting/switching
+ *
+ * - Automatic Gain Control (AGC) calibration
+ *
+ * - Noise Floor calibration
+ *
+ * - I/Q imbalance calibration (QAM correction)
+ *
+ * - Calibration due to thermal changes (gain_F)
+ *
+ * - Spur noise mitigation
+ *
+ * - RF/PHY initialization for the various operating modes and bwmodes
+ *
+ * - Antenna control
+ *
+ * - TX power control per channel/rate/packet type
+ *
+ * Also have in mind we never got documentation for most of these
+ * functions, what we have comes mostly from Atheros's code, reverse
+ * engineering and patent docs/presentations etc.
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Get the PHY Chip revision
+/**
+ * ath5k_hw_radio_revision() - Get the PHY Chip revision
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Returns the revision number of a 2GHz, 5GHz or single chip
+ * radio.
*/
-u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
+u16
+ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
{
unsigned int i;
u32 srev;
@@ -58,7 +99,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return 0;
}
- mdelay(2);
+ usleep_range(2000, 2500);
/* ...wait until PHY is ready and read the selected radio revision */
ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
@@ -81,10 +122,16 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
return ret;
}
-/*
- * Check if a channel is supported
+/**
+ * ath5k_channel_ok() - Check if a channel is supported by the hw
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Note: We don't do any regulatory domain checks here, it's just
+ * a sanity check.
*/
-bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+bool
+ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
u16 freq = channel->center_freq;
@@ -101,7 +148,13 @@ bool ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
return false;
}
-bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ */
+bool
+ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u8 refclk_freq;
@@ -122,11 +175,20 @@ bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
return false;
}
-/*
- * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
+/**
+ * ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
+ * @ah: The &struct ath5k_hw
+ * @rf_regs: The struct ath5k_rf_reg
+ * @val: New value
+ * @reg_id: RF register ID
+ * @set: Indicate we need to swap data
+ *
+ * This is an internal function used to modify RF Banks before
+ * writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
+ * infos.
*/
-static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
- const struct ath5k_rf_reg *rf_regs,
+static unsigned int
+ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
u32 val, u8 reg_id, bool set)
{
const struct ath5k_rf_reg *rfreg = NULL;
@@ -204,8 +266,7 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
- *
+ * ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
* @ah: the &struct ath5k_hw
* @channel: the currently set channel upon reset
*
@@ -216,10 +277,11 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
* mantissa and provide these values on hw.
*
* For more infos i think this patent is related
- * http://www.freepatentsonline.com/7184495.html
+ * "http://www.freepatentsonline.com/7184495.html"
*/
-static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
- struct ieee80211_channel *channel)
+static inline int
+ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel)
{
/* Get exponent and mantissa and set it */
u32 coef_scaled, coef_exp, coef_man,
@@ -278,6 +340,10 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
return 0;
}
+/**
+ * ath5k_hw_phy_disable() - Disable PHY
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_phy_disable(struct ath5k_hw *ah)
{
/*Just a try M.F.*/
@@ -286,10 +352,13 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
return 0;
}
-/*
- * Wait for synth to settle
+/**
+ * ath5k_hw_wait_for_synth() - Wait for synth to settle
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
+static void
+ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
/*
@@ -308,9 +377,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
delay = delay << 2;
/* XXX: /2 on turbo ? Let's be safe
* for now */
- udelay(100 + delay);
+ usleep_range(100 + delay, 100 + (2 * delay));
} else {
- mdelay(1);
+ usleep_range(1000, 1500);
}
}
@@ -319,7 +388,9 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* RF Gain optimization *
\**********************/
-/*
+/**
+ * DOC: RF Gain optimization
+ *
* This code is used to optimize RF gain on different environments
* (temperature mostly) based on feedback from a power detector.
*
@@ -328,22 +399,22 @@ static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
* no gain optimization ladder-.
*
* For more infos check out this patent doc
- * http://www.freepatentsonline.com/7400691.html
+ * "http://www.freepatentsonline.com/7400691.html"
*
* This paper describes power drops as seen on the receiver due to
* probe packets
- * http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
- * %20of%20Power%20Control.pdf
+ * "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
+ * %20of%20Power%20Control.pdf"
*
* And this is the MadWiFi bug entry related to the above
- * http://madwifi-project.org/ticket/1659
+ * "http://madwifi-project.org/ticket/1659"
* with various measurements and diagrams
- *
- * TODO: Deal with power drops due to probes by setting an appropriate
- * tx power on the probe packets ! Make this part of the calibration process.
*/
-/* Initialize ah_gain during attach */
+/**
+ * ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
+ * @ah: The &struct ath5k_hw
+ */
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
{
/* Initialize the gain optimization values */
@@ -367,17 +438,21 @@ int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
return 0;
}
-/* Schedule a gain probe check on the next transmitted packet.
+/**
+ * ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
+ * @ah: The &struct ath5k_hw
+ *
+ * Schedules a gain probe check on the next transmitted packet.
* That means our next packet is going to be sent with lower
* tx power and a Peak to Average Power Detector (PAPD) will try
* to measure the gain.
*
- * XXX: How about forcing a tx packet (bypassing PCU arbitrator etc)
+ * TODO: Force a tx packet (bypassing PCU arbitrator etc)
* just after we enable the probe so that we don't mess with
- * standard traffic ? Maybe it's time to use sw interrupts and
- * a probe tasklet !!!
+ * standard traffic.
*/
-static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
+static void
+ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
{
/* Skip if gain calibration is inactive or
@@ -395,9 +470,15 @@ static void ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
}
-/* Calculate gain_F measurement correction
- * based on the current step for RF5112 rev. 2 */
-static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
+ * @ah: The &struct ath5k_hw
+ *
+ * Calculate Gain_F measurement correction
+ * based on the current step for RF5112 rev. 2
+ */
+static u32
+ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
u32 *rf;
@@ -450,11 +531,19 @@ static u32 ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
return ah->ah_gain.g_f_corr;
}
-/* Check if current gain_F measurement is in the range of our
+/**
+ * ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
+ * @ah: The &struct ath5k_hw
+ *
+ * Check if current gain_F measurement is in the range of our
* power detector windows. If we get a measurement outside range
* we know it's not accurate (detectors can't measure anything outside
- * their detection window) so we must ignore it */
-static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
+ * their detection window) so we must ignore it.
+ *
+ * Returns true if readback was O.K. or false on failure
+ */
+static bool
+ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
@@ -506,9 +595,15 @@ static bool ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
ah->ah_gain.g_current <= level[3]);
}
-/* Perform gain_F adjustment by choosing the right set
- * of parameters from RF gain optimization ladder */
-static s8 ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
+ * @ah: The &struct ath5k_hw
+ *
+ * Choose the right target gain based on current gain
+ * and RF gain optimization ladder
+ */
+static s8
+ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
{
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
@@ -572,13 +667,18 @@ done:
return ret;
}
-/* Main callback for thermal RF gain calibration engine
+/**
+ * ath5k_hw_gainf_calibrate() - Do a gain_F calibration
+ * @ah: The &struct ath5k_hw
+ *
+ * Main callback for thermal RF gain calibration engine
* Check for a new gain reading and schedule an adjustment
* if needed.
*
- * TODO: Use sw interrupt to schedule reset if gain_F needs
- * adjustment */
-enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
+ * Returns one of enum ath5k_rfgain codes
+ */
+enum ath5k_rfgain
+ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
{
u32 data, type;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -638,10 +738,18 @@ done:
return ah->ah_gain.g_state;
}
-/* Write initial RF gain table to set the RF sensitivity
- * this one works on all RF chips and has nothing to do
- * with gain_F calibration */
-static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
+/**
+ * ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
+ * @ah: The &struct ath5k_hw
+ * @band: One of enum ieee80211_band
+ *
+ * Write initial RF gain table to set the RF sensitivity.
+ *
+ * NOTE: This one works on all RF chips and has nothing to do
+ * with Gain_F calibration
+ */
+static int
+ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
unsigned int i, size, index;
@@ -688,16 +796,23 @@ static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
}
-
/********************\
* RF Registers setup *
\********************/
-/*
- * Setup RF registers by writing RF buffer on hw
+/**
+ * ath5k_hw_rfregs_init() - Initialize RF register settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ *
+ * Setup RF registers by writing RF buffer on hw. For
+ * more infos on this, check out rfbuffer.h
*/
-static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
- struct ieee80211_channel *channel, unsigned int mode)
+static int
+ath5k_hw_rfregs_init(struct ath5k_hw *ah,
+ struct ieee80211_channel *channel,
+ unsigned int mode)
{
const struct ath5k_rf_reg *rf_regs;
const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -1055,19 +1170,18 @@ static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
PHY/RF channel functions
\**************************/
-/*
- * Conversion needed for RF5110
+/**
+ * ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
+ * @channel: The &struct ieee80211_channel
+ *
+ * Map channel frequency to IEEE channel number and convert it
+ * to an internal channel value used by the RF5110 chipset.
*/
-static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
+static u32
+ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
{
u32 athchan;
- /*
- * Convert IEEE channel/MHz to an internal channel value used
- * by the AR5210 chipset. This has not been verified with
- * newer chipsets like the AR5212A who have a completely
- * different RF/PHY part.
- */
athchan = (ath5k_hw_bitswap(
(ieee80211_frequency_to_channel(
channel->center_freq) - 24) / 2, 5)
@@ -1075,10 +1189,13 @@ static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
return athchan;
}
-/*
- * Set channel on RF5110
+/**
+ * ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data;
@@ -1089,15 +1206,23 @@ static int ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
data = ath5k_hw_rf5110_chan2athchan(channel);
ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
- mdelay(1);
+ usleep_range(1000, 1500);
return 0;
}
-/*
- * Conversion needed for 5111
+/**
+ * ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
+ * @ieee: IEEE channel number
+ * @athchan: The &struct ath5k_athchan_2ghz
+ *
+ * In order to enable the RF2111 frequency converter on RF5111/2111 setups
+ * we need to add some offsets and extra flags to the data values we pass
+ * on to the PHY. So for every 2GHz channel this function gets called
+ * to do the conversion.
*/
-static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
+static int
+ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
struct ath5k_athchan_2ghz *athchan)
{
int channel;
@@ -1123,10 +1248,13 @@ static int ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
return 0;
}
-/*
- * Set channel on 5111
+/**
+ * ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
*/
-static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_athchan_2ghz ath5k_channel_2ghz;
@@ -1171,10 +1299,20 @@ static int ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set channel on 5112 and newer
+/**
+ * ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * On RF5112/2112 and newer we don't need to do any conversion.
+ * We pass the frequency value after a few modifications to the
+ * chip directly.
+ *
+ * NOTE: Make sure channel frequency given is within our range or else
+ * we might damage the chip ! Use ath5k_channel_ok before calling this one.
*/
-static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data1, data2;
@@ -1183,17 +1321,37 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
data = data0 = data1 = data2 = 0;
c = channel->center_freq;
+ /* My guess based on code:
+ * 2GHz RF has 2 synth modes, one with a Local Oscillator
+ * at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
+ * (3040/2). data0 is used to set the PLL divider and data1
+ * selects synth mode. */
if (c < 4800) {
+ /* Channel 14 and all frequencies with 2Hz spacing
+ * below/above (non-standard channels) */
if (!((c - 2224) % 5)) {
+ /* Same as (c - 2224) / 5 */
data0 = ((2 * (c - 704)) - 3040) / 10;
data1 = 1;
+ /* Channel 1 and all frequencies with 5Hz spacing
+ * below/above (standard channels without channel 14) */
} else if (!((c - 2192) % 5)) {
+ /* Same as (c - 2192) / 5 */
data0 = ((2 * (c - 672)) - 3040) / 10;
data1 = 0;
} else
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
+ /* This is more complex, we have a single synthesizer with
+ * 4 reference clock settings (?) based on frequency spacing
+ * and set using data2. LO is at 4800Hz and data0 is again used
+ * to set some divider.
+ *
+ * NOTE: There is an old atheros presentation at Stanford
+ * that mentions a method called dual direct conversion
+ * with 1GHz sliding IF for RF5110. Maybe that's what we
+ * have here, or an updated version. */
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
@@ -1219,10 +1377,16 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set the channel on the RF2425
+/**
+ * ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * AR2425/2417 have a different 2GHz RF so code changes
+ * a little bit from RF5112.
*/
-static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data2;
@@ -1258,10 +1422,16 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
return 0;
}
-/*
- * Set a channel on the radio chip
+/**
+ * ath5k_hw_channel() - Set a channel on the radio chip
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This is the main function called to set a channel on the
+ * radio chip based on the radio chip version.
*/
-static int ath5k_hw_channel(struct ath5k_hw *ah,
+static int
+ath5k_hw_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1313,11 +1483,46 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
return 0;
}
+
/*****************\
PHY calibration
\*****************/
-static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
+/**
+ * DOC: PHY Calibration routines
+ *
+ * Noise floor calibration: When we tell the hardware to
+ * perform a noise floor calibration by setting the
+ * AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
+ * sample-and-hold the minimum noise level seen at the antennas.
+ * This value is then stored in a ring buffer of recently measured
+ * noise floor values so we have a moving window of the last few
+ * samples. The median of the values in the history is then loaded
+ * into the hardware for its own use for RSSI and CCA measurements.
+ * This type of calibration doesn't interfere with traffic.
+ *
+ * AGC calibration: When we tell the hardware to perform
+ * an AGC (Automatic Gain Control) calibration by setting the
+ * AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
+ * a calibration on the DC offsets of ADCs. During this period
+ * rx/tx gets disabled so we have to deal with it on the driver
+ * part.
+ *
+ * I/Q calibration: When we tell the hardware to perform
+ * an I/Q calibration, it tries to correct I/Q imbalance and
+ * fix QAM constellation by sampling data from rxed frames.
+ * It doesn't interfere with traffic.
+ *
+ * For more infos on AGC and I/Q calibration check out patent doc
+ * #03/094463.
+ */
+
+/**
+ * ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
+ * @ah: The &struct ath5k_hw
+ */
+static s32
+ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
{
s32 val;
@@ -1325,7 +1530,12 @@ static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
}
-void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ */
+void
+ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
{
int i;
@@ -1334,6 +1544,11 @@ void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
}
+/**
+ * ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
+ * @ah: The &struct ath5k_hw
+ * @noise_floor: The NF we got from hw
+ */
static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
{
struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
@@ -1341,7 +1556,12 @@ static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
hist->nfval[hist->index] = noise_floor;
}
-static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
+ * @ah: The &struct ath5k_hw
+ */
+static s16
+ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
{
s16 sort[ATH5K_NF_CAL_HIST_MAX];
s16 tmp;
@@ -1364,18 +1584,16 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
return sort[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
}
-/*
- * When we tell the hardware to perform a noise floor calibration
- * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
- * sample-and-hold the minimum noise level seen at the antennas.
- * This value is then stored in a ring buffer of recently measured
- * noise floor values so we have a moving window of the last few
- * samples.
+/**
+ * ath5k_hw_update_noise_floor() - Update NF on hardware
+ * @ah: The &struct ath5k_hw
*
- * The median of the values in the history is then loaded into the
- * hardware for its own use for RSSI and CCA measurements.
+ * This is the main function we call to perform a NF calibration,
+ * it reads NF from hardware, calculates the median and updates
+ * NF on hw.
*/
-void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
+void
+ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
@@ -1390,6 +1608,8 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
return;
}
+ ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
+
ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
/* completed NF calibration, test threshold */
@@ -1434,20 +1654,29 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
ah->ah_noise_floor = nf;
+ ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
+
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor calibrated: %d\n", nf);
}
-/*
- * Perform a PHY calibration on RF5110
- * -Fix BPSK/QAM Constellation (I/Q correction)
+/**
+ * ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
*/
-static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
+static int
+ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 phy_sig, phy_agc, phy_sat, beacon;
int ret;
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
+ return 0;
+
/*
* Disable beacons and RX/TX queues, wait
*/
@@ -1456,7 +1685,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
- mdelay(2);
+ usleep_range(2000, 2500);
/*
* Set the channel (with AGC turned off)
@@ -1469,7 +1698,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
* Activate PHY and wait
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
@@ -1506,7 +1735,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Enable calibration and wait until completion
@@ -1537,8 +1766,9 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
return 0;
}
-/*
- * Perform I/Q calibration on RF5111/5112 and newer chips
+/**
+ * ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
+ * @ah: The &struct ath5k_hw
*/
static int
ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
@@ -1547,12 +1777,19 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
int i;
- if (!ah->ah_calibration ||
- ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN)
- return 0;
+ /* Skip if I/Q calibration is not needed or if it's still running */
+ if (!ah->ah_iq_cal_needed)
+ return -EINVAL;
+ else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "I/Q calibration still running");
+ return -EBUSY;
+ }
/* Calibration has finished, get the results and re-run */
- /* work around empty results which can apparently happen on 5212 */
+
+ /* Work around for empty results which can apparently happen on 5212:
+ * Read registers up to 10 times until we get both i_pr and q_pwr */
for (i = 0; i <= 10; i++) {
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
@@ -1570,9 +1807,13 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
else
q_coffd = q_pwr >> 7;
- /* protect against divide by 0 and loss of sign bits */
+ /* In case i_coffd became zero, cancel calibration
+ * not only it's too small, it'll also result a divide
+ * by zero later on. */
if (i_coffd == 0 || q_coffd < 2)
- return 0;
+ return -ECANCELED;
+
+ /* Protect against loss of sign bits */
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
@@ -1601,10 +1842,17 @@ ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
return 0;
}
-/*
- * Perform a PHY calibration
+/**
+ * ath5k_hw_phy_calibrate() - Perform a PHY calibration
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * The main function we call from above to perform
+ * a short or full PHY calibration based on RF chip
+ * and current channel
*/
-int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
+int
+ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
@@ -1613,10 +1861,43 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
return ath5k_hw_rf5110_calibrate(ah, channel);
ret = ath5k_hw_rf511x_iq_calibrate(ah);
+ if (ret) {
+ ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
+ "No I/Q correction performed (%uMHz)\n",
+ channel->center_freq);
+
+ /* Happens all the time if there is not much
+ * traffic, consider it normal behaviour. */
+ ret = 0;
+ }
+
+ /* On full calibration do an AGC calibration and
+ * request a PAPD probe for gainf calibration if
+ * needed */
+ if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL) {
- if ((ah->ah_radio == AR5K_RF5111 || ah->ah_radio == AR5K_RF5112) &&
- (channel->hw_value != AR5K_MODE_11B))
- ath5k_hw_request_rfgain_probe(ah);
+ AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL);
+
+ ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
+ AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF,
+ 0, false);
+ if (ret) {
+ ATH5K_ERR(ah,
+ "gain calibration timeout (%uMHz)\n",
+ channel->center_freq);
+ }
+
+ if ((ah->ah_radio == AR5K_RF5111 ||
+ ah->ah_radio == AR5K_RF5112)
+ && (channel->hw_value != AR5K_MODE_11B))
+ ath5k_hw_request_rfgain_probe(ah);
+ }
+
+ /* Update noise floor
+ * XXX: Only do this after AGC calibration */
+ if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
+ ath5k_hw_update_noise_floor(ah);
return ret;
}
@@ -1626,6 +1907,16 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
* Spur mitigation functions *
\***************************/
+/**
+ * ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * This function gets called during PHY initialization to
+ * configure the spur filter for the given channel. Spur is noise
+ * generated due to "reflection" effects, for more information on this
+ * method check out patent US7643810
+ */
static void
ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
@@ -1865,15 +2156,73 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
* Antenna control *
\*****************/
-static void /*TODO:Boundary check*/
+/**
+ * DOC: Antenna control
+ *
+ * Hw supports up to 14 antennas ! I haven't found any card that implements
+ * that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
+ * for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
+ * omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
+ *
+ * We can have a single antenna for RX and multiple antennas for TX.
+ * RX antenna is our "default" antenna (usually antenna 1) set on
+ * DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
+ * (0 for automatic selection, 1 - 14 antenna number).
+ *
+ * We can let hw do all the work doing fast antenna diversity for both
+ * tx and rx or we can do things manually. Here are the options we have
+ * (all are bits of STA_ID1 register):
+ *
+ * AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
+ * control descriptor, use the default antenna to transmit or else use the last
+ * antenna on which we received an ACK.
+ *
+ * AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
+ * the antenna on which we got the ACK for that frame.
+ *
+ * AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
+ * one on the TX descriptor.
+ *
+ * AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
+ * (ACKs etc), or else use current antenna (the one we just used for TX).
+ *
+ * Using the above we support the following scenarios:
+ *
+ * AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
+ *
+ * AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present
+ *
+ * AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present
+ *
+ * AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
+ *
+ * AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
+ *
+ * AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
+ *
+ * Also note that when setting antenna to F on tx descriptor card inverts
+ * current tx antenna.
+ */
+
+/**
+ * ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
+ * @ah: The &struct ath5k_hw
+ * @ant: Antenna number
+ */
+static void
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
-/*
- * Enable/disable fast rx antenna diversity
+/**
+ * ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @enable: True to enable, false to disable
*/
static void
ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
@@ -1913,6 +2262,14 @@ ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
}
}
+/**
+ * ath5k_hw_set_antenna_switch() - Set up antenna switch table
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ *
+ * Switch table comes from EEPROM and includes information on controlling
+ * the 2 antenna RX attenuators
+ */
void
ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -1944,8 +2301,10 @@ ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
AR5K_PHY_ANT_SWITCH_TABLE_1);
}
-/*
- * Set antenna operating mode
+/**
+ * ath5k_hw_set_antenna_mode() - Set antenna operating mode
+ * @ah: The &struct ath5k_hw
+ * @ant_mode: One of enum ath5k_ant_mode
*/
void
ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
@@ -2068,8 +2427,13 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
* Helper functions
*/
-/*
- * Do linear interpolation between two given (x, y) points
+/**
+ * ath5k_get_interpolated_value() - Get interpolated Y val between two points
+ * @target: X value of the middle point
+ * @x_left: X value of the left point
+ * @x_right: X value of the right point
+ * @y_left: Y value of the left point
+ * @y_right: Y value of the right point
*/
static s16
ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
@@ -2096,13 +2460,18 @@ ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
return result;
}
-/*
- * Find vertical boundary (min pwr) for the linear PCDAC curve.
+/**
+ * ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
+ * linear PCDAC curve
+ * @stepL: Left array with y values (pcdac steps)
+ * @stepR: Right array with y values (pcdac steps)
+ * @pwrL: Left array with x values (power steps)
+ * @pwrR: Right array with x values (power steps)
*
* Since we have the top of the curve and we draw the line below
* until we reach 1 (1 pcdac step) we need to know which point
- * (x value) that is so that we don't go below y axis and have negative
- * pcdac values when creating the curve, or fill the table with zeroes.
+ * (x value) that is so that we don't go below x axis and have negative
+ * pcdac values when creating the curve, or fill the table with zeros.
*/
static s16
ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
@@ -2148,7 +2517,16 @@ ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
return max(min_pwrL, min_pwrR);
}
-/*
+/**
+ * ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
+ * @pmin: Minimum power value (xmin)
+ * @pmax: Maximum power value (xmax)
+ * @pwr: Array of power steps (x values)
+ * @vpd: Array of matching PCDAC/PDADC steps (y values)
+ * @num_points: Number of provided points
+ * @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* Interpolate (pwr,vpd) points to create a Power to PDADC or a
* Power to PCDAC curve.
*
@@ -2206,7 +2584,14 @@ ath5k_create_power_curve(s16 pmin, s16 pmax,
}
}
-/*
+/**
+ * ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
+ * for a given channel.
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
+ * @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
+ *
* Get the surrounding per-channel power calibration piers
* for a given frequency so that we can interpolate between
* them and come up with an appropriate dataset for our current
@@ -2289,11 +2674,17 @@ done:
*pcinfo_r = &pcinfo[idx_r];
}
-/*
+/**
+ * ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
+ * calibration data
+ * @ah: The &struct ath5k_hw *ah,
+ * @channel: The &struct ieee80211_channel
+ * @rates: The &struct ath5k_rate_pcal_info to fill
+ *
* Get the surrounding per-rate power calibration data
* for a given frequency and interpolate between power
* values to set max target power supported by hw for
- * each rate.
+ * each rate on this frequency.
*/
static void
ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
@@ -2381,7 +2772,11 @@ done:
rpinfo[idx_r].target_power_54);
}
-/*
+/**
+ * ath5k_get_max_ctl_power() - Get max edge power for a given frequency
+ * @ah: the &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Get the max edge power for this channel if
* we have such data from EEPROM's Conformance Test
* Limits (CTL), and limit max power if needed.
@@ -2461,8 +2856,39 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
* Power to PCDAC table functions
*/
-/*
- * Fill Power to PCDAC table on RF5111
+/**
+ * DOC: Power to PCDAC table functions
+ *
+ * For RF5111 we have an XPD -eXternal Power Detector- curve
+ * for each calibrated channel. Each curve has 0,5dB Power steps
+ * on x axis and PCDAC steps (offsets) on y axis and looks like an
+ * exponential function. To recreate the curve we read 11 points
+ * from eeprom (eeprom.c) and interpolate here.
+ *
+ * For RF5112 we have 4 XPD -eXternal Power Detector- curves
+ * for each calibrated channel on 0, -6, -12 and -18dBm but we only
+ * use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
+ * power steps on x axis and PCDAC steps on y axis and looks like a
+ * linear function. To recreate the curve and pass the power values
+ * on hw, we get 4 points for xpd 0 (lower gain -> max power)
+ * and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
+ * and interpolate here.
+ *
+ * For a given channel we get the calibrated points (piers) for it or
+ * -if we don't have calibration data for this specific channel- from the
+ * available surrounding channels we have calibration data for, after we do a
+ * linear interpolation between them. Then since we have our calibrated points
+ * for this channel, we do again a linear interpolation between them to get the
+ * whole curve.
+ *
+ * We finally write the Y values of the curve(s) (the PCDAC values) on hw
+ */
+
+/**
+ * ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
*
* No further processing is needed for RF5111, the only thing we have to
* do is fill the values below and above calibration range since eeprom data
@@ -2503,10 +2929,14 @@ ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
}
-/*
- * Combine available XPD Curves and fill Linear Power to PCDAC table
- * on RF5112
+/**
+ * ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
+ * @ah: The &struct ath5k_hw
+ * @table_min: Minimum power (x min)
+ * @table_max: Maximum power (x max)
+ * @pdcurves: Number of pd curves
*
+ * Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
* RFX112 can have up to 2 curves (one for low txpower range and one for
* higher txpower range). We need to put them both on pcdac_out and place
* them in the correct location. In case we only have one curve available
@@ -2608,7 +3038,10 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
}
}
-/* Write PCDAC values on hw */
+/**
+ * ath5k_write_pcdac_table() - Write the PCDAC values on hw
+ * @ah: The &struct ath5k_hw
+ */
static void
ath5k_write_pcdac_table(struct ath5k_hw *ah)
{
@@ -2631,9 +3064,32 @@ ath5k_write_pcdac_table(struct ath5k_hw *ah)
* Power to PDADC table functions
*/
-/*
- * Set the gain boundaries and create final Power to PDADC table
+/**
+ * DOC: Power to PDADC table functions
+ *
+ * For RF2413 and later we have a Power to PDADC table (Power Detector)
+ * instead of a PCDAC (Power Control) and 4 pd gain curves for each
+ * calibrated channel. Each curve has power on x axis in 0.5 db steps and
+ * PDADC steps on y axis and looks like an exponential function like the
+ * RF5111 curve.
+ *
+ * To recreate the curves we read the points from eeprom (eeprom.c)
+ * and interpolate here. Note that in most cases only 2 (higher and lower)
+ * curves are used (like RF5112) but vendors have the opportunity to include
+ * all 4 curves on eeprom. The final curve (higher power) has an extra
+ * point for better accuracy like RF5112.
*
+ * The process is similar to what we do above for RF5111/5112
+ */
+
+/**
+ * ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
+ * @ah: The &struct ath5k_hw
+ * @pwr_min: Minimum power (x min)
+ * @pwr_max: Maximum power (x max)
+ * @pdcurves: Number of available curves
+ *
+ * Combine the various pd curves and create the final Power to PDADC table
* We can have up to 4 pd curves, we need to do a similar process
* as we do for RF5112. This time we don't have an edge_flag but we
* set the gain boundaries on a separate register.
@@ -2757,7 +3213,11 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
}
-/* Write PDADC values on hw */
+/**
+ * ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
{
@@ -2814,7 +3274,13 @@ ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
* Common code for PCDAC/PDADC tables
*/
-/*
+/**
+ * ath5k_setup_channel_powertable() - Set up power table for this channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ *
* This is the main function that uses all of the above
* to set PCDAC/PDADC table on hw for the current channel.
* This table is used for tx power calibration on the baseband,
@@ -3012,7 +3478,12 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
return 0;
}
-/* Write power table for current channel to hw */
+/**
+ * ath5k_write_channel_powertable() - Set power table for current channel on hw
+ * @ah: The &struct ath5k_hw
+ * @ee_mode: One of enum ath5k_driver_mode
+ * @type: One of enum ath5k_powertable_type (eeprom.h)
+ */
static void
ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
{
@@ -3022,28 +3493,36 @@ ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
ath5k_write_pcdac_table(ah);
}
-/*
- * Per-rate tx power setting
+
+/**
+ * DOC: Per-rate tx power setting
*
- * This is the code that sets the desired tx power (below
+ * This is the code that sets the desired tx power limit (below
* maximum) on hw for each rate (we also have TPC that sets
- * power per packet). We do that by providing an index on the
- * PCDAC/PDADC table we set up.
- */
-
-/*
- * Set rate power table
+ * power per packet type). We do that by providing an index on the
+ * PCDAC/PDADC table we set up above, for each rate.
*
* For now we only limit txpower based on maximum tx power
- * supported by hw (what's inside rate_info). We need to limit
- * this even more, based on regulatory domain etc.
+ * supported by hw (what's inside rate_info) + conformance test
+ * limits. We need to limit this even more, based on regulatory domain
+ * etc to be safe. Normally this is done from above so we don't care
+ * here, all we care is that the tx power we set will be O.K.
+ * for the hw (e.g. won't create noise on PA etc).
*
- * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps)
- * and is indexed as follows:
+ * Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
+ * x values) and is indexed as follows:
* rates[0] - rates[7] -> OFDM rates
* rates[8] - rates[14] -> CCK rates
* rates[15] -> XR rates (they all have the same power)
*/
+
+/**
+ * ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
+ * @ah: The &struct ath5k_hw
+ * @max_pwr: The maximum tx power requested in 0.5dB steps
+ * @rate_info: The &struct ath5k_rate_pcal_info to fill
+ * @ee_mode: One of enum ath5k_driver_mode
+ */
static void
ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
struct ath5k_rate_pcal_info *rate_info,
@@ -3114,8 +3593,14 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
}
-/*
- * Set transmission power
+/**
+ * ath5k_hw_txpower() - Set transmission power limit for a given channel
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ * @txpower: Requested tx power in 0.5dB steps
+ *
+ * Combines all of the above to set the requested tx power limit
+ * on hw.
*/
static int
ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
@@ -3233,7 +3718,16 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
return 0;
}
-int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
+/**
+ * ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
+ * @ah: The &struct ath5k_hw
+ * @txpower: The requested tx power limit in 0.5dB steps
+ *
+ * This function provides access to ath5k_hw_txpower to the driver in
+ * case user or an application changes it while PHY is running.
+ */
+int
+ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
"changing txpower to %d\n", txpower);
@@ -3241,11 +3735,26 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
}
+
/*************\
Init function
\*************/
-int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
+/**
+ * ath5k_hw_phy_init() - Initialize PHY
+ * @ah: The &struct ath5k_hw
+ * @channel: The @struct ieee80211_channel
+ * @mode: One of enum ath5k_driver_mode
+ * @fast: Try a fast channel switch instead
+ *
+ * This is the main function used during reset to initialize PHY
+ * or do a fast channel change if possible.
+ *
+ * NOTE: Do not call this one from the driver, it assumes PHY is in a
+ * warm reset state !
+ */
+int
+ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 mode, bool fast)
{
struct ieee80211_channel *curr_channel;
@@ -3355,7 +3864,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
if (ret)
return ret;
- mdelay(1);
+ usleep_range(1000, 1500);
/*
* Write RF buffer
@@ -3376,10 +3885,10 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
}
} else if (ah->ah_version == AR5K_AR5210) {
- mdelay(1);
+ usleep_range(1000, 1500);
/* Disable phy and wait */
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
- mdelay(1);
+ usleep_range(1000, 1500);
}
/* Set channel on PHY */
@@ -3405,7 +3914,7 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
for (i = 0; i <= 20; i++) {
if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
break;
- udelay(200);
+ usleep_range(200, 250);
}
ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
@@ -3433,9 +3942,9 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
/* At the same time start I/Q calibration for QAM constellation
* -no need for CCK- */
- ah->ah_calibration = false;
+ ah->ah_iq_cal_needed = false;
if (!(mode == AR5K_MODE_11B)) {
- ah->ah_calibration = true;
+ ah->ah_iq_cal_needed = true;
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 7766542..30b50f9 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -17,23 +17,48 @@
*/
/********************************************\
-Queue Control Unit, DFS Control Unit Functions
+Queue Control Unit, DCF Control Unit Functions
\********************************************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
+#include <linux/log2.h>
+
+/**
+ * DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
+ *
+ * Here we setup parameters for the 12 available TX queues. Note that
+ * on the various registers we can usually only map the first 10 of them so
+ * basically we have 10 queues to play with. Each queue has a matching
+ * QCU that controls when the queue will get triggered and multiple QCUs
+ * can be mapped to a single DCU that controls the various DFS parameters
+ * for the various queues. In our setup we have a 1:1 mapping between QCUs
+ * and DCUs allowing us to have different DFS settings for each queue.
+ *
+ * When a frame goes into a TX queue, QCU decides when it'll trigger a
+ * transmission based on various criteria (such as how many data we have inside
+ * it's buffer or -if it's a beacon queue- if it's time to fire up the queue
+ * based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
+ * (arbitrator) decides the priority of each QCU based on it's configuration
+ * (e.g. beacons are always transmitted when they leave DCU bypassing all other
+ * frames from other queues waiting to be transmitted). After a frame leaves
+ * the DCU it goes to PCU for further processing and then to PHY for
+ * the actual transmission.
+ */
/******************\
* Helper functions *
\******************/
-/*
- * Get number of pending frames
- * for a specific queue [5211+]
+/**
+ * ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
+u32
+ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
@@ -58,10 +83,13 @@ u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
return pending;
}
-/*
- * Set a transmit queue inactive
+/**
+ * ath5k_hw_release_tx_queue() - Set a transmit queue inactive
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*/
-void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+void
+ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
@@ -72,34 +100,56 @@ void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
-/*
+/**
+ * ath5k_cw_validate() - Make sure the given cw is valid
+ * @cw_req: The contention window value to check
+ *
* Make sure cw is a power of 2 minus 1 and smaller than 1024
*/
-static u16 ath5k_cw_validate(u16 cw_req)
+static u16
+ath5k_cw_validate(u16 cw_req)
{
- u32 cw = 1;
cw_req = min(cw_req, (u16)1023);
- while (cw < cw_req)
- cw = (cw << 1) | 1;
+ /* Check if cw_req + 1 a power of 2 */
+ if (is_power_of_2(cw_req + 1))
+ return cw_req;
- return cw;
+ /* Check if cw_req is a power of 2 */
+ if (is_power_of_2(cw_req))
+ return cw_req - 1;
+
+ /* If none of the above is correct
+ * find the closest power of 2 */
+ cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
+
+ return cw_req;
}
-/*
- * Get properties for a transmit queue
+/**
+ * ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @queue_info: The &struct ath5k_txq_info to fill
*/
-int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
-/*
- * Set properties for a transmit queue
+/**
+ * ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ * @qinfo: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success or -EIO if queue is inactive
*/
-int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
+int
+ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *qinfo)
{
struct ath5k_txq_info *qi;
@@ -139,10 +189,16 @@ int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
return 0;
}
-/*
- * Initialize a transmit queue
+/**
+ * ath5k_hw_setup_tx_queue() - Initialize a transmit queue
+ * @ah: The &struct ath5k_hw
+ * @queue_type: One of enum ath5k_tx_queue
+ * @queue_info: The &struct ath5k_txq_info to use
+ *
+ * Returns 0 on success, -EINVAL on invalid arguments
*/
-int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
+int
+ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
@@ -217,10 +273,16 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
* Single QCU/DCU initialization *
\*******************************/
-/*
- * Set tx retry limits on DCU
+/**
+ * ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
+ *
+ * This function is used when initializing a queue, to set
+ * retry limits based on ah->ah_retry_* and the chipset used.
*/
-void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
+void
+ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
unsigned int queue)
{
/* Single data queue on AR5210 */
@@ -255,15 +317,15 @@ void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
}
/**
- * ath5k_hw_reset_tx_queue - Initialize a single hw queue
+ * ath5k_hw_reset_tx_queue() - Initialize a single hw queue
+ * @ah: The &struct ath5k_hw
+ * @queue: One of enum ath5k_tx_queue_id
*
- * @ah The &struct ath5k_hw
- * @queue The hw queue number
- *
- * Set DFS properties for the given transmit queue on DCU
+ * Set DCF properties for the given transmit queue on DCU
* and configures all queue-specific parameters.
*/
-int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
+int
+ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
@@ -491,10 +553,9 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
\**************************/
/**
- * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
- *
- * @ah The &struct ath5k_hw
- * @slot_time Slot time in us
+ * ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
+ * @ah: The &struct ath5k_hw
+ * @slot_time: Slot time in us
*
* Sets the global IFS intervals on DCU (also works on AR5210) for
* the given slot time and the current bwmode.
@@ -597,7 +658,15 @@ int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
}
-int ath5k_hw_init_queues(struct ath5k_hw *ah)
+/**
+ * ath5k_hw_init_queues() - Initialize tx queues
+ * @ah: The &struct ath5k_hw
+ *
+ * Initializes all tx queues based on information on
+ * ah->ah_txq* set by the driver
+ */
+int
+ath5k_hw_init_queues(struct ath5k_hw *ah)
{
int i, ret;
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index f5c1000..0ea1608 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -280,6 +280,10 @@
* 5211/5212 we have one primary and 4 secondary registers.
* So we have AR5K_ISR for 5210 and AR5K_PISR /SISRx for 5211/5212.
* Most of these bits are common for all chipsets.
+ *
+ * NOTE: On 5211+ TXOK, TXDESC, TXERR, TXEOL and TXURN contain
+ * the logical OR from per-queue interrupt bits found on SISR registers
+ * (see below).
*/
#define AR5K_ISR 0x001c /* Register Address [5210] */
#define AR5K_PISR 0x0080 /* Register Address [5211+] */
@@ -292,7 +296,10 @@
#define AR5K_ISR_TXOK 0x00000040 /* Frame successfully transmitted */
#define AR5K_ISR_TXDESC 0x00000080 /* TX descriptor request */
#define AR5K_ISR_TXERR 0x00000100 /* Transmit error */
-#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout) */
+#define AR5K_ISR_TXNOFRM 0x00000200 /* No frame transmitted (transmit timeout)
+ * NOTE: We don't have per-queue info for this
+ * one, but we can enable it per-queue through
+ * TXNOFRM_QCU field on TXNOFRM register */
#define AR5K_ISR_TXEOL 0x00000400 /* Empty TX descriptor */
#define AR5K_ISR_TXURN 0x00000800 /* Transmit FIFO underrun */
#define AR5K_ISR_MIB 0x00001000 /* Update MIB counters */
@@ -302,21 +309,29 @@
#define AR5K_ISR_SWBA 0x00010000 /* Software beacon alert */
#define AR5K_ISR_BRSSI 0x00020000 /* Beacon rssi below threshold (?) */
#define AR5K_ISR_BMISS 0x00040000 /* Beacon missed */
-#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+] */
+#define AR5K_ISR_HIUERR 0x00080000 /* Host Interface Unit error [5211+]
+ * 'or' of MCABT, SSERR, DPERR from SISR2 */
#define AR5K_ISR_BNR 0x00100000 /* Beacon not ready [5211+] */
#define AR5K_ISR_MCABT 0x00100000 /* Master Cycle Abort [5210] */
#define AR5K_ISR_RXCHIRP 0x00200000 /* CHIRP Received [5212+] */
#define AR5K_ISR_SSERR 0x00200000 /* Signaled System Error [5210] */
-#define AR5K_ISR_DPERR 0x00400000 /* Det par Error (?) [5210] */
+#define AR5K_ISR_DPERR 0x00400000 /* Bus parity error [5210] */
#define AR5K_ISR_RXDOPPLER 0x00400000 /* Doppler chirp received [5212+] */
#define AR5K_ISR_TIM 0x00800000 /* [5211+] */
-#define AR5K_ISR_BCNMISC 0x00800000 /* 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
- CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
+#define AR5K_ISR_BCNMISC 0x00800000 /* Misc beacon related interrupt
+ * 'or' of TIM, CAB_END, DTIM_SYNC, BCN_TIMEOUT,
+ * CAB_TIMEOUT and DTIM bits from SISR2 [5212+] */
#define AR5K_ISR_GPIO 0x01000000 /* GPIO (rf kill) */
#define AR5K_ISR_QCBRORN 0x02000000 /* QCU CBR overrun [5211+] */
#define AR5K_ISR_QCBRURN 0x04000000 /* QCU CBR underrun [5211+] */
#define AR5K_ISR_QTRIG 0x08000000 /* QCU scheduling trigger [5211+] */
+#define AR5K_ISR_BITS_FROM_SISRS (AR5K_ISR_TXOK | AR5K_ISR_TXDESC |\
+ AR5K_ISR_TXERR | AR5K_ISR_TXEOL |\
+ AR5K_ISR_TXURN | AR5K_ISR_HIUERR |\
+ AR5K_ISR_BCNMISC | AR5K_ISR_QCBRORN |\
+ AR5K_ISR_QCBRURN | AR5K_ISR_QTRIG)
+
/*
* Secondary status registers [5211+] (0 - 4)
*
@@ -347,7 +362,7 @@
#define AR5K_SISR2_BCN_TIMEOUT 0x08000000 /* Beacon Timeout [5212+] */
#define AR5K_SISR2_CAB_TIMEOUT 0x10000000 /* CAB Timeout [5212+] */
#define AR5K_SISR2_DTIM 0x20000000 /* [5212+] */
-#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF OOR (?) */
+#define AR5K_SISR2_TSFOOR 0x80000000 /* TSF Out of range */
#define AR5K_SISR3 0x0090 /* Register Address [5211+] */
#define AR5K_SISR3_QCBRORN 0x000003ff /* Mask for QCBRORN */
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 2abac25..250db40 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,9 +19,9 @@
*
*/
-/*****************************\
- Reset functions and helpers
-\*****************************/
+/****************************\
+ Reset function and helpers
+\****************************/
#include <asm/unaligned.h>
@@ -33,14 +33,36 @@
#include "debug.h"
+/**
+ * DOC: Reset function and helpers
+ *
+ * Here we implement the main reset routine, used to bring the card
+ * to a working state and ready to receive. We also handle routines
+ * that don't fit on other places such as clock, sleep and power control
+ */
+
+
/******************\
* Helper functions *
\******************/
-/*
- * Check if a register write has been completed
+/**
+ * ath5k_hw_register_timeout() - Poll a register for a flag/field change
+ * @ah: The &struct ath5k_hw
+ * @reg: The register to read
+ * @flag: The flag/field to check on the register
+ * @val: The field value we expect (if we check a field)
+ * @is_set: Instead of checking if the flag got cleared, check if it got set
+ *
+ * Some registers contain flags that indicate that an operation is
+ * running. We use this function to poll these registers and check
+ * if these flags get cleared. We also use it to poll a register
+ * field (containing multiple flags) until it gets a specific value.
+ *
+ * Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
*/
-int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
+int
+ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
bool is_set)
{
int i;
@@ -64,35 +86,48 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
\*************************/
/**
- * ath5k_hw_htoclock - Translate usec to hw clock units
- *
+ * ath5k_hw_htoclock() - Translate usec to hw clock units
* @ah: The &struct ath5k_hw
* @usec: value in microseconds
+ *
+ * Translate usecs to hw clock units based on the current
+ * hw clock rate.
+ *
+ * Returns number of clock units
*/
-unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+unsigned int
+ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
{
struct ath_common *common = ath5k_hw_common(ah);
return usec * common->clockrate;
}
/**
- * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * ath5k_hw_clocktoh() - Translate hw clock units to usec
+ * @ah: The &struct ath5k_hw
* @clock: value in hw clock units
+ *
+ * Translate hw clock units to usecs based on the current
+ * hw clock rate.
+ *
+ * Returns number of usecs
*/
-unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+unsigned int
+ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
{
struct ath_common *common = ath5k_hw_common(ah);
return clock / common->clockrate;
}
/**
- * ath5k_hw_init_core_clock - Initialize core clock
- *
- * @ah The &struct ath5k_hw
+ * ath5k_hw_init_core_clock() - Initialize core clock
+ * @ah: The &struct ath5k_hw
*
- * Initialize core clock parameters (usec, usec32, latencies etc).
+ * Initialize core clock parameters (usec, usec32, latencies etc),
+ * based on current bwmode and chipset properties.
*/
-static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
+static void
+ath5k_hw_init_core_clock(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
struct ath_common *common = ath5k_hw_common(ah);
@@ -227,16 +262,21 @@ static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
}
}
-/*
+/**
+ * ath5k_hw_set_sleep_clock() - Setup sleep clock operation
+ * @ah: The &struct ath5k_hw
+ * @enable: Enable sleep clock operation (false to disable)
+ *
* If there is an external 32KHz crystal available, use it
* as ref. clock instead of 32/40MHz clock and baseband clocks
* to save power during sleep or restore normal 32/40MHz
* operation.
*
- * XXX: When operating on 32KHz certain PHY registers (27 - 31,
- * 123 - 127) require delay on access.
+ * NOTE: When operating on 32KHz certain PHY registers (27 - 31,
+ * 123 - 127) require delay on access.
*/
-static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
+static void
+ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 scal, spending, sclock;
@@ -340,10 +380,19 @@ static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
* Reset/Sleep control *
\*********************/
-/*
- * Reset chipset
+/**
+ * ath5k_hw_nic_reset() - Reset the various chipset units
+ * @ah: The &struct ath5k_hw
+ * @val: Mask to indicate what units to reset
+ *
+ * To reset the various chipset units we need to write
+ * the mask to AR5K_RESET_CTL and poll the register until
+ * all flags are cleared.
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
+static int
+ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
{
int ret;
u32 mask = val ? val : ~0U;
@@ -357,7 +406,7 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
/* Wait at least 128 PCI clocks */
- udelay(15);
+ usleep_range(15, 20);
if (ah->ah_version == AR5K_AR5210) {
val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
@@ -382,12 +431,17 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
return ret;
}
-/*
- * Reset AHB chipset
- * AR5K_RESET_CTL_PCU flag resets WMAC
- * AR5K_RESET_CTL_BASEBAND flag resets WBB
+/**
+ * ath5k_hw_wisoc_reset() - Reset AHB chipset
+ * @ah: The &struct ath5k_hw
+ * @flags: Mask to indicate what units to reset
+ *
+ * Same as ath5k_hw_nic_reset but for AHB based devices
+ *
+ * Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
-static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
+static int
+ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
{
u32 mask = flags ? flags : ~0U;
u32 __iomem *reg;
@@ -422,7 +476,7 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = __raw_readl(reg);
__raw_writel(regval | val, reg);
regval = __raw_readl(reg);
- udelay(100);
+ usleep_range(100, 150);
/* Bring BB/MAC out of reset */
__raw_writel(regval & ~val, reg);
@@ -439,11 +493,23 @@ static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
return 0;
}
-
-/*
- * Sleep control
+/**
+ * ath5k_hw_set_power_mode() - Set power mode
+ * @ah: The &struct ath5k_hw
+ * @mode: One of enum ath5k_power_mode
+ * @set_chip: Set to true to write sleep control register
+ * @sleep_duration: How much time the device is allowed to sleep
+ * when sleep logic is enabled (in 128 microsecond increments).
+ *
+ * This function is used to configure sleep policy and allowed
+ * sleep modes. For more information check out the sleep control
+ * register on reg.h and STA_ID1.
+ *
+ * Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
+ * mode is requested.
*/
-static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
+static int
+ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
bool set_chip, u16 sleep_duration)
{
unsigned int i;
@@ -493,7 +559,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
- udelay(15);
+ usleep_range(15, 20);
for (i = 200; i > 0; i--) {
/* Check if the chip did wake up */
@@ -502,7 +568,7 @@ static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
break;
/* Wait a bit and retry */
- udelay(50);
+ usleep_range(50, 75);
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
}
@@ -523,17 +589,20 @@ commit:
return 0;
}
-/*
- * Put device on hold
+/**
+ * ath5k_hw_on_hold() - Put device on hold
+ * @ah: The &struct ath5k_hw
*
- * Put MAC and Baseband on warm reset and
- * keep that state (don't clean sleep control
- * register). After this MAC and Baseband are
- * disabled and a full reset is needed to come
- * back. This way we save as much power as possible
+ * Put MAC and Baseband on warm reset and keep that state
+ * (don't clean sleep control register). After this MAC
+ * and Baseband are disabled and a full reset is needed
+ * to come back. This way we save as much power as possible
* without putting the card on full sleep.
+ *
+ * Returns 0 on success or -EIO on error
*/
-int ath5k_hw_on_hold(struct ath5k_hw *ah)
+int
+ath5k_hw_on_hold(struct ath5k_hw *ah)
{
struct pci_dev *pdev = ah->pdev;
u32 bus_flags;
@@ -543,7 +612,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return 0;
/* Make sure device is awake */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -563,7 +632,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
@@ -575,7 +644,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
}
/* ...wakeup again!*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to put device on hold\n");
return ret;
@@ -584,11 +653,18 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
return ret;
}
-/*
+/**
+ * ath5k_hw_nic_wakeup() - Force card out of sleep
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
* Bring up MAC + PHY Chips and program PLL
- * Channel is NULL for the initial wakeup.
+ * NOTE: Channel is NULL for the initial wakeup.
+ *
+ * Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
*/
-int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
+int
+ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
@@ -600,7 +676,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
/* Wakeup the device */
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
@@ -621,7 +697,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
- mdelay(2);
+ usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
@@ -637,7 +713,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
}
/* ...wakeup again!...*/
- ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
+ ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
return ret;
@@ -739,7 +815,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
/* ...update PLL if needed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
- udelay(300);
+ usleep_range(300, 350);
}
/* ...set the PHY operating mode */
@@ -755,8 +831,19 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
* Post-initvals register modifications *
\**************************************/
-/* TODO: Half/Quarter rate */
-static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_tweak_initval_settings() - Tweak initial settings
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Some settings are not handled on initvals, e.g. bwmode
+ * settings, some phy settings, workarounds etc that in general
+ * don't fit anywhere else or are too small to introduce a separate
+ * function for each one. So we have this function to handle
+ * them all during reset and complete card's initialization.
+ */
+static void
+ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
if (ah->ah_version == AR5K_AR5212 &&
@@ -875,7 +962,16 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
}
}
-static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
+/**
+ * ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
+ * @ah: The &struct ath5k_hw
+ * @channel: The &struct ieee80211_channel
+ *
+ * Use settings stored on EEPROM to properly initialize the card
+ * based on various infos and per-mode calibration data.
+ */
+static void
+ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
@@ -1029,7 +1125,23 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
* Main reset function *
\*********************/
-int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
+/**
+ * ath5k_hw_reset() - The main reset function
+ * @ah: The &struct ath5k_hw
+ * @op_mode: One of enum nl80211_iftype
+ * @channel: The &struct ieee80211_channel
+ * @fast: Enable fast channel switching
+ * @skip_pcu: Skip pcu initialization
+ *
+ * This is the function we call each time we want to (re)initialize the
+ * card and pass new settings to hw. We also call it when hw runs into
+ * trouble to make it come back to a working state.
+ *
+ * Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
+ * on failure.
+ */
+int
+ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool fast, bool skip_pcu)
{
u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
@@ -1047,7 +1159,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
*/
if (fast && (ah->ah_radio != AR5K_RF2413) &&
(ah->ah_radio != AR5K_RF5413))
- fast = 0;
+ fast = false;
/* Disable sleep clock operation
* to avoid register access delay on certain
@@ -1073,7 +1185,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
if (ret && fast) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"DMA didn't stop, falling back to normal reset\n");
- fast = 0;
+ fast = false;
/* Non fatal, just continue with
* normal reset */
ret = 0;
@@ -1242,7 +1354,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/*
* Initialize PCU
*/
- ath5k_hw_pcu_init(ah, op_mode, mode);
+ ath5k_hw_pcu_init(ah, op_mode);
/*
* Initialize PHY
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 5d11c23..aed34d9 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -18,7 +18,9 @@
*/
-/*
+/**
+ * DOC: RF Buffer registers
+ *
* There are some special registers on the RF chip
* that control various operation settings related mostly to
* the analog parts (channel, gain adjustment etc).
@@ -44,40 +46,63 @@
*/
-/*
+/**
+ * struct ath5k_ini_rfbuffer - Initial RF Buffer settings
+ * @rfb_bank: RF Bank number
+ * @rfb_ctrl_register: RF Buffer control register
+ * @rfb_mode_data: RF Buffer data for each mode
+ *
* Struct to hold default mode specific RF
- * register values (RF Banks)
+ * register values (RF Banks) for each chip.
*/
struct ath5k_ini_rfbuffer {
- u8 rfb_bank; /* RF Bank number */
- u16 rfb_ctrl_register; /* RF Buffer control register */
- u32 rfb_mode_data[3]; /* RF Buffer data for each mode */
+ u8 rfb_bank;
+ u16 rfb_ctrl_register;
+ u32 rfb_mode_data[3];
};
-/*
+/**
+ * struct ath5k_rfb_field - An RF Buffer field (register/value)
+ * @len: Field length
+ * @pos: Offset on the raw packet
+ * @col: Used for shifting
+ *
* Struct to hold RF Buffer field
* infos used to access certain RF
* analog registers
*/
struct ath5k_rfb_field {
- u8 len; /* Field length */
- u16 pos; /* Offset on the raw packet */
- u8 col; /* Column -used for shifting */
+ u8 len;
+ u16 pos;
+ u8 col;
};
-/*
- * RF analog register definition
+/**
+ * struct ath5k_rf_reg - RF analog register definition
+ * @bank: RF Buffer Bank number
+ * @index: Register's index on ath5k_rf_regx_idx
+ * @field: The &struct ath5k_rfb_field
+ *
+ * We use this struct to define the set of RF registers
+ * on each chip that we want to tweak. Some RF registers
+ * are common between different chip versions so this saves
+ * us space and complexity because we can refer to an rf
+ * register by it's index no matter what chip we work with
+ * as long as it has that register.
*/
struct ath5k_rf_reg {
- u8 bank; /* RF Buffer Bank number */
- u8 index; /* Register's index on rf_regs_idx */
- struct ath5k_rfb_field field; /* RF Buffer field for this register */
+ u8 bank;
+ u8 index;
+ struct ath5k_rfb_field field;
};
-/* Map RF registers to indexes
+/**
+ * enum ath5k_rf_regs_idx - Map RF registers to indexes
+ *
* We do this to handle common bits and make our
* life easier by using an index for each register
- * instead of a full rfb_field */
+ * instead of a full rfb_field
+ */
enum ath5k_rf_regs_idx {
/* BANK 2 */
AR5K_RF_TURBO = 0,
diff --git a/drivers/net/wireless/ath/ath5k/rfgain.h b/drivers/net/wireless/ath/ath5k/rfgain.h
index ebfae05..4d21df0 100644
--- a/drivers/net/wireless/ath/ath5k/rfgain.h
+++ b/drivers/net/wireless/ath/ath5k/rfgain.h
@@ -18,13 +18,17 @@
*
*/
-/*
+/**
+ * struct ath5k_ini_rfgain - RF Gain table
+ * @rfg_register: RF Gain register address
+ * @rfg_value: Register value for 5 and 2GHz
+ *
* Mode-specific RF Gain table (64bytes) for RF5111/5112
* (RF5110 only comes with AR5210 and only supports a/turbo a mode so initial
* RF Gain values are included in AR5K_AR5210_INI)
*/
struct ath5k_ini_rfgain {
- u16 rfg_register; /* RF Gain register address */
+ u16 rfg_register;
u32 rfg_value[2]; /* [freq (see below)] */
};
@@ -455,18 +459,31 @@ static const struct ath5k_ini_rfgain rfgain_2425[] = {
#define AR5K_GAIN_CHECK_ADJUST(_g) \
((_g)->g_current <= (_g)->g_low || (_g)->g_current >= (_g)->g_high)
+/**
+ * struct ath5k_gain_opt_step - An RF gain optimization step
+ * @gos_param: Set of parameters
+ * @gos_gain: Gain
+ */
struct ath5k_gain_opt_step {
s8 gos_param[AR5K_GAIN_CRN_MAX_FIX_BITS];
s8 gos_gain;
};
+/**
+ * struct ath5k_gain_opt - RF Gain optimization ladder
+ * @go_default: The default step
+ * @go_steps_count: How many optimization steps
+ * @go_step: Array of &struct ath5k_gain_opt_step
+ */
struct ath5k_gain_opt {
u8 go_default;
u8 go_steps_count;
const struct ath5k_gain_opt_step go_step[AR5K_GAIN_STEP_COUNT];
};
+
/*
+ * RF5111
* Parameters on gos_param:
* 1) Tx clip PHY register
* 2) PWD 90 RF register
@@ -490,6 +507,7 @@ static const struct ath5k_gain_opt rfgain_opt_5111 = {
};
/*
+ * RF5112
* Parameters on gos_param:
* 1) Mixgain ovr RF register
* 2) PWD 138 RF register
diff --git a/drivers/net/wireless/ath/ath5k/trace.h b/drivers/net/wireless/ath/ath5k/trace.h
index 39f002e..00f0158 100644
--- a/drivers/net/wireless/ath/ath5k/trace.h
+++ b/drivers/net/wireless/ath/ath5k/trace.h
@@ -3,7 +3,8 @@
#include <linux/tracepoint.h>
-#ifndef CONFIG_ATH5K_TRACER
+
+#if !defined(CONFIG_ATH5K_TRACER) || defined(__CHECKER__)
#undef TRACE_EVENT
#define TRACE_EVENT(name, proto, ...) \
static inline void trace_ ## name(proto) {}
@@ -93,7 +94,7 @@ TRACE_EVENT(ath5k_tx_complete,
#endif /* __TRACE_ATH5K_H */
-#ifdef CONFIG_ATH5K_TRACER
+#if defined(CONFIG_ATH5K_TRACER) && !defined(__CHECKER__)
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../drivers/net/wireless/ath/ath5k
diff --git a/drivers/net/wireless/ath/ath6kl/Makefile b/drivers/net/wireless/ath/ath6kl/Makefile
index 8f7a0d1..7070693 100644
--- a/drivers/net/wireless/ath/ath6kl/Makefile
+++ b/drivers/net/wireless/ath/ath6kl/Makefile
@@ -23,7 +23,7 @@
obj-$(CONFIG_ATH6KL) := ath6kl.o
ath6kl-y += debug.o
-ath6kl-y += htc_hif.o
+ath6kl-y += hif.o
ath6kl-y += htc.o
ath6kl-y += bmi.o
ath6kl-y += cfg80211.o
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c
index c5d11cc..bce3575 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.c
+++ b/drivers/net/wireless/ath/ath6kl/bmi.c
@@ -19,165 +19,6 @@
#include "target.h"
#include "debug.h"
-static int ath6kl_get_bmi_cmd_credits(struct ath6kl *ar)
-{
- u32 addr;
- unsigned long timeout;
- int ret;
-
- ar->bmi.cmd_credits = 0;
-
- /* Read the counter register to get the command credits */
- addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
-
- timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
- while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
-
- /*
- * Hit the credit counter with a 4-byte access, the first byte
- * read will hit the counter and cause a decrement, while the
- * remaining 3 bytes has no effect. The rationale behind this
- * is to make all HIF accesses 4-byte aligned.
- */
- ret = hif_read_write_sync(ar, addr,
- (u8 *)&ar->bmi.cmd_credits, 4,
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("Unable to decrement the command credit count register: %d\n",
- ret);
- return ret;
- }
-
- /* The counter is only 8 bits.
- * Ignore anything in the upper 3 bytes
- */
- ar->bmi.cmd_credits &= 0xFF;
- }
-
- if (!ar->bmi.cmd_credits) {
- ath6kl_err("bmi communication timeout\n");
- return -ETIMEDOUT;
- }
-
- return 0;
-}
-
-static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
-{
- unsigned long timeout;
- u32 rx_word = 0;
- int ret = 0;
-
- timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
- while (time_before(jiffies, timeout) && !rx_word) {
- ret = hif_read_write_sync(ar, RX_LOOKAHEAD_VALID_ADDRESS,
- (u8 *)&rx_word, sizeof(rx_word),
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
- return ret;
- }
-
- /* all we really want is one bit */
- rx_word &= (1 << ENDPOINT1);
- }
-
- if (!rx_word) {
- ath6kl_err("bmi_recv_buf FIFO empty\n");
- return -EINVAL;
- }
-
- return ret;
-}
-
-static int ath6kl_bmi_send_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
- int ret;
- u32 addr;
-
- ret = ath6kl_get_bmi_cmd_credits(ar);
- if (ret)
- return ret;
-
- addr = ar->mbox_info.htc_addr;
-
- ret = hif_read_write_sync(ar, addr, buf, len,
- HIF_WR_SYNC_BYTE_INC);
- if (ret)
- ath6kl_err("unable to send the bmi data to the device\n");
-
- return ret;
-}
-
-static int ath6kl_bmi_recv_buf(struct ath6kl *ar, u8 *buf, u32 len)
-{
- int ret;
- u32 addr;
-
- /*
- * During normal bootup, small reads may be required.
- * Rather than issue an HIF Read and then wait as the Target
- * adds successive bytes to the FIFO, we wait here until
- * we know that response data is available.
- *
- * This allows us to cleanly timeout on an unexpected
- * Target failure rather than risk problems at the HIF level.
- * In particular, this avoids SDIO timeouts and possibly garbage
- * data on some host controllers. And on an interconnect
- * such as Compact Flash (as well as some SDIO masters) which
- * does not provide any indication on data timeout, it avoids
- * a potential hang or garbage response.
- *
- * Synchronization is more difficult for reads larger than the
- * size of the MBOX FIFO (128B), because the Target is unable
- * to push the 129th byte of data until AFTER the Host posts an
- * HIF Read and removes some FIFO data. So for large reads the
- * Host proceeds to post an HIF Read BEFORE all the data is
- * actually available to read. Fortunately, large BMI reads do
- * not occur in practice -- they're supported for debug/development.
- *
- * So Host/Target BMI synchronization is divided into these cases:
- * CASE 1: length < 4
- * Should not happen
- *
- * CASE 2: 4 <= length <= 128
- * Wait for first 4 bytes to be in FIFO
- * If CONSERVATIVE_BMI_READ is enabled, also wait for
- * a BMI command credit, which indicates that the ENTIRE
- * response is available in the the FIFO
- *
- * CASE 3: length > 128
- * Wait for the first 4 bytes to be in FIFO
- *
- * For most uses, a small timeout should be sufficient and we will
- * usually see a response quickly; but there may be some unusual
- * (debug) cases of BMI_EXECUTE where we want an larger timeout.
- * For now, we use an unbounded busy loop while waiting for
- * BMI_EXECUTE.
- *
- * If BMI_EXECUTE ever needs to support longer-latency execution,
- * especially in production, this code needs to be enhanced to sleep
- * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
- * a function of Host processor speed.
- */
- if (len >= 4) { /* NB: Currently, always true */
- ret = ath6kl_bmi_get_rx_lkahd(ar);
- if (ret)
- return ret;
- }
-
- addr = ar->mbox_info.htc_addr;
- ret = hif_read_write_sync(ar, addr, buf, len,
- HIF_RD_SYNC_BYTE_INC);
- if (ret) {
- ath6kl_err("Unable to read the bmi data from the device: %d\n",
- ret);
- return ret;
- }
-
- return 0;
-}
-
int ath6kl_bmi_done(struct ath6kl *ar)
{
int ret;
@@ -190,14 +31,12 @@ int ath6kl_bmi_done(struct ath6kl *ar)
ar->bmi.done_sent = true;
- ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send bmi done: %d\n", ret);
return ret;
}
- ath6kl_bmi_cleanup(ar);
-
return 0;
}
@@ -212,13 +51,13 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
return -EACCES;
}
- ret = ath6kl_bmi_send_buf(ar, (u8 *)&cid, sizeof(cid));
+ ret = ath6kl_hif_bmi_write(ar, (u8 *)&cid, sizeof(cid));
if (ret) {
ath6kl_err("Unable to send get target info: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, (u8 *)&targ_info->version,
+ ret = ath6kl_hif_bmi_read(ar, (u8 *)&targ_info->version,
sizeof(targ_info->version));
if (ret) {
ath6kl_err("Unable to recv target info: %d\n", ret);
@@ -227,7 +66,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
if (le32_to_cpu(targ_info->version) == TARGET_VERSION_SENTINAL) {
/* Determine how many bytes are in the Target's targ_info */
- ret = ath6kl_bmi_recv_buf(ar,
+ ret = ath6kl_hif_bmi_read(ar,
(u8 *)&targ_info->byte_count,
sizeof(targ_info->byte_count));
if (ret) {
@@ -246,7 +85,7 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar,
}
/* Read the remainder of the targ_info */
- ret = ath6kl_bmi_recv_buf(ar,
+ ret = ath6kl_hif_bmi_read(ar,
((u8 *)targ_info) +
sizeof(targ_info->byte_count),
sizeof(*targ_info) -
@@ -278,8 +117,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return -EACCES;
}
- size = BMI_DATASZ_MAX + sizeof(cid) + sizeof(addr) + sizeof(len);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ size = ar->bmi.max_data_size + sizeof(cid) + sizeof(addr) + sizeof(len);
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -292,8 +131,8 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
len_remain = len;
while (len_remain) {
- rx_len = (len_remain < BMI_DATASZ_MAX) ?
- len_remain : BMI_DATASZ_MAX;
+ rx_len = (len_remain < ar->bmi.max_data_size) ?
+ len_remain : ar->bmi.max_data_size;
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
offset += sizeof(cid);
@@ -302,13 +141,13 @@ int ath6kl_bmi_read(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), &rx_len, sizeof(rx_len));
offset += sizeof(len);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, rx_len);
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, rx_len);
if (ret) {
ath6kl_err("Unable to read from the device: %d\n",
ret);
@@ -328,7 +167,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
u32 offset;
u32 len_remain, tx_len;
const u32 header = sizeof(cid) + sizeof(addr) + sizeof(len);
- u8 aligned_buf[BMI_DATASZ_MAX];
+ u8 aligned_buf[400];
u8 *src;
if (ar->bmi.done_sent) {
@@ -336,12 +175,15 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return -EACCES;
}
- if ((BMI_DATASZ_MAX + header) > MAX_BMI_CMDBUF_SZ) {
+ if ((ar->bmi.max_data_size + header) > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
- memset(ar->bmi.cmd_buf, 0, BMI_DATASZ_MAX + header);
+ if (WARN_ON(ar->bmi.max_data_size > sizeof(aligned_buf)))
+ return -E2BIG;
+
+ memset(ar->bmi.cmd_buf, 0, ar->bmi.max_data_size + header);
ath6kl_dbg(ATH6KL_DBG_BMI,
"bmi write memory: addr: 0x%x, len: %d\n", addr, len);
@@ -350,7 +192,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
while (len_remain) {
src = &buf[len - len_remain];
- if (len_remain < (BMI_DATASZ_MAX - header)) {
+ if (len_remain < (ar->bmi.max_data_size - header)) {
if (len_remain & 3) {
/* align it with 4 bytes */
len_remain = len_remain +
@@ -360,7 +202,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
}
tx_len = len_remain;
} else {
- tx_len = (BMI_DATASZ_MAX - header);
+ tx_len = (ar->bmi.max_data_size - header);
}
offset = 0;
@@ -373,7 +215,7 @@ int ath6kl_bmi_write(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
memcpy(&(ar->bmi.cmd_buf[offset]), src, tx_len);
offset += tx_len;
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
@@ -398,7 +240,7 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -415,13 +257,13 @@ int ath6kl_bmi_execute(struct ath6kl *ar, u32 addr, u32 *param)
memcpy(&(ar->bmi.cmd_buf[offset]), param, sizeof(*param));
offset += sizeof(*param);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
@@ -445,7 +287,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -459,7 +301,7 @@ int ath6kl_bmi_set_app_start(struct ath6kl *ar, u32 addr)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
@@ -481,7 +323,7 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -495,13 +337,13 @@ int ath6kl_bmi_reg_read(struct ath6kl *ar, u32 addr, u32 *param)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
}
- ret = ath6kl_bmi_recv_buf(ar, ar->bmi.cmd_buf, sizeof(*param));
+ ret = ath6kl_hif_bmi_read(ar, ar->bmi.cmd_buf, sizeof(*param));
if (ret) {
ath6kl_err("Unable to read from the device: %d\n", ret);
return ret;
@@ -524,7 +366,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
}
size = sizeof(cid) + sizeof(addr) + sizeof(param);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -542,7 +384,7 @@ int ath6kl_bmi_reg_write(struct ath6kl *ar, u32 addr, u32 param)
memcpy(&(ar->bmi.cmd_buf[offset]), &param, sizeof(param));
offset += sizeof(param);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n", ret);
return ret;
@@ -565,8 +407,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
return -EACCES;
}
- size = BMI_DATASZ_MAX + header;
- if (size > MAX_BMI_CMDBUF_SZ) {
+ size = ar->bmi.max_data_size + header;
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -577,8 +419,8 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
len_remain = len;
while (len_remain) {
- tx_len = (len_remain < (BMI_DATASZ_MAX - header)) ?
- len_remain : (BMI_DATASZ_MAX - header);
+ tx_len = (len_remain < (ar->bmi.max_data_size - header)) ?
+ len_remain : (ar->bmi.max_data_size - header);
offset = 0;
memcpy(&(ar->bmi.cmd_buf[offset]), &cid, sizeof(cid));
@@ -589,7 +431,7 @@ int ath6kl_bmi_lz_data(struct ath6kl *ar, u8 *buf, u32 len)
tx_len);
offset += tx_len;
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to write to the device: %d\n",
ret);
@@ -615,7 +457,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
}
size = sizeof(cid) + sizeof(addr);
- if (size > MAX_BMI_CMDBUF_SZ) {
+ if (size > ar->bmi.max_cmd_size) {
WARN_ON(1);
return -EINVAL;
}
@@ -631,7 +473,7 @@ int ath6kl_bmi_lz_stream_start(struct ath6kl *ar, u32 addr)
memcpy(&(ar->bmi.cmd_buf[offset]), &addr, sizeof(addr));
offset += sizeof(addr);
- ret = ath6kl_bmi_send_buf(ar, ar->bmi.cmd_buf, offset);
+ ret = ath6kl_hif_bmi_write(ar, ar->bmi.cmd_buf, offset);
if (ret) {
ath6kl_err("Unable to start LZ stream to the device: %d\n",
ret);
@@ -672,10 +514,20 @@ int ath6kl_bmi_fast_download(struct ath6kl *ar, u32 addr, u8 *buf, u32 len)
return ret;
}
+void ath6kl_bmi_reset(struct ath6kl *ar)
+{
+ ar->bmi.done_sent = false;
+}
+
int ath6kl_bmi_init(struct ath6kl *ar)
{
- ar->bmi.cmd_buf = kzalloc(MAX_BMI_CMDBUF_SZ, GFP_ATOMIC);
+ if (WARN_ON(ar->bmi.max_data_size == 0))
+ return -EINVAL;
+
+ /* cmd + addr + len + data_size */
+ ar->bmi.max_cmd_size = ar->bmi.max_data_size + (sizeof(u32) * 3);
+ ar->bmi.cmd_buf = kzalloc(ar->bmi.max_cmd_size, GFP_ATOMIC);
if (!ar->bmi.cmd_buf)
return -ENOMEM;
diff --git a/drivers/net/wireless/ath/ath6kl/bmi.h b/drivers/net/wireless/ath/ath6kl/bmi.h
index 96851d5..f1ca681 100644
--- a/drivers/net/wireless/ath/ath6kl/bmi.h
+++ b/drivers/net/wireless/ath/ath6kl/bmi.h
@@ -44,12 +44,6 @@
* BMI handles all required Target-side cache flushing.
*/
-#define MAX_BMI_CMDBUF_SZ (BMI_DATASZ_MAX + \
- (sizeof(u32) * 3 /* cmd + addr + len */))
-
-/* Maximum data size used for BMI transfers */
-#define BMI_DATASZ_MAX 256
-
/* BMI Commands */
#define BMI_NO_COMMAND 0
@@ -230,6 +224,8 @@ struct ath6kl_bmi_target_info {
int ath6kl_bmi_init(struct ath6kl *ar);
void ath6kl_bmi_cleanup(struct ath6kl *ar);
+void ath6kl_bmi_reset(struct ath6kl *ar);
+
int ath6kl_bmi_done(struct ath6kl *ar);
int ath6kl_bmi_get_target_info(struct ath6kl *ar,
struct ath6kl_bmi_target_info *targ_info);
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index f517eb8..6c59a21 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -123,17 +123,50 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
.bitrates = ath6kl_a_rates,
};
-static int ath6kl_set_wpa_version(struct ath6kl *ar,
+#define CCKM_KRK_CIPHER_SUITE 0x004096ff /* use for KRK */
+
+/* returns true if scheduled scan was stopped */
+static bool __ath6kl_cfg80211_sscan_stop(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ if (ar->state != ATH6KL_STATE_SCHED_SCAN)
+ return false;
+
+ del_timer_sync(&vif->sched_scan_timer);
+
+ ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+
+ ar->state = ATH6KL_STATE_ON;
+
+ return true;
+}
+
+static void ath6kl_cfg80211_sscan_disable(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return;
+
+ cfg80211_sched_scan_stopped(ar->wiphy);
+}
+
+static int ath6kl_set_wpa_version(struct ath6kl_vif *vif,
enum nl80211_wpa_versions wpa_version)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: %u\n", __func__, wpa_version);
if (!wpa_version) {
- ar->auth_mode = NONE_AUTH;
+ vif->auth_mode = NONE_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_2) {
- ar->auth_mode = WPA2_AUTH;
+ vif->auth_mode = WPA2_AUTH;
} else if (wpa_version & NL80211_WPA_VERSION_1) {
- ar->auth_mode = WPA_AUTH;
+ vif->auth_mode = WPA_AUTH;
} else {
ath6kl_err("%s: %u not supported\n", __func__, wpa_version);
return -ENOTSUPP;
@@ -142,25 +175,24 @@ static int ath6kl_set_wpa_version(struct ath6kl *ar,
return 0;
}
-static int ath6kl_set_auth_type(struct ath6kl *ar,
+static int ath6kl_set_auth_type(struct ath6kl_vif *vif,
enum nl80211_auth_type auth_type)
{
-
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, auth_type);
switch (auth_type) {
case NL80211_AUTHTYPE_OPEN_SYSTEM:
- ar->dot11_auth_mode = OPEN_AUTH;
+ vif->dot11_auth_mode = OPEN_AUTH;
break;
case NL80211_AUTHTYPE_SHARED_KEY:
- ar->dot11_auth_mode = SHARED_AUTH;
+ vif->dot11_auth_mode = SHARED_AUTH;
break;
case NL80211_AUTHTYPE_NETWORK_EAP:
- ar->dot11_auth_mode = LEAP_AUTH;
+ vif->dot11_auth_mode = LEAP_AUTH;
break;
case NL80211_AUTHTYPE_AUTOMATIC:
- ar->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
+ vif->dot11_auth_mode = OPEN_AUTH | SHARED_AUTH;
break;
default:
@@ -171,11 +203,11 @@ static int ath6kl_set_auth_type(struct ath6kl *ar,
return 0;
}
-static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
+static int ath6kl_set_cipher(struct ath6kl_vif *vif, u32 cipher, bool ucast)
{
- u8 *ar_cipher = ucast ? &ar->prwise_crypto : &ar->grp_crypto;
- u8 *ar_cipher_len = ucast ? &ar->prwise_crypto_len :
- &ar->grp_crypto_len;
+ u8 *ar_cipher = ucast ? &vif->prwise_crypto : &vif->grp_crypto;
+ u8 *ar_cipher_len = ucast ? &vif->prwise_crypto_len :
+ &vif->grp_crypto_len;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: cipher 0x%x, ucast %u\n",
__func__, cipher, ucast);
@@ -202,6 +234,10 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
*ar_cipher = AES_CRYPT;
*ar_cipher_len = 0;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ *ar_cipher = WAPI_CRYPT;
+ *ar_cipher_len = 0;
+ break;
default:
ath6kl_err("cipher 0x%x not supported\n", cipher);
return -ENOTSUPP;
@@ -210,28 +246,35 @@ static int ath6kl_set_cipher(struct ath6kl *ar, u32 cipher, bool ucast)
return 0;
}
-static void ath6kl_set_key_mgmt(struct ath6kl *ar, u32 key_mgmt)
+static void ath6kl_set_key_mgmt(struct ath6kl_vif *vif, u32 key_mgmt)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: 0x%x\n", __func__, key_mgmt);
if (key_mgmt == WLAN_AKM_SUITE_PSK) {
- if (ar->auth_mode == WPA_AUTH)
- ar->auth_mode = WPA_PSK_AUTH;
- else if (ar->auth_mode == WPA2_AUTH)
- ar->auth_mode = WPA2_PSK_AUTH;
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_PSK_AUTH;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_PSK_AUTH;
+ } else if (key_mgmt == 0x00409600) {
+ if (vif->auth_mode == WPA_AUTH)
+ vif->auth_mode = WPA_AUTH_CCKM;
+ else if (vif->auth_mode == WPA2_AUTH)
+ vif->auth_mode = WPA2_AUTH_CCKM;
} else if (key_mgmt != WLAN_AKM_SUITE_8021X) {
- ar->auth_mode = NONE_AUTH;
+ vif->auth_mode = NONE_AUTH;
}
}
-static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
+static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif)
{
+ struct ath6kl *ar = vif->ar;
+
if (!test_bit(WMI_READY, &ar->flag)) {
ath6kl_err("wmi is not ready\n");
return false;
}
- if (!test_bit(WLAN_ENABLED, &ar->flag)) {
+ if (!test_bit(WLAN_ENABLED, &vif->flags)) {
ath6kl_err("wlan disabled\n");
return false;
}
@@ -239,15 +282,146 @@ static bool ath6kl_cfg80211_ready(struct ath6kl *ar)
return true;
}
+static bool ath6kl_is_wpa_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_WPA && pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 &&
+ pos[4] == 0xf2 && pos[5] == 0x01;
+}
+
+static bool ath6kl_is_rsn_ie(const u8 *pos)
+{
+ return pos[0] == WLAN_EID_RSN;
+}
+
+static bool ath6kl_is_wps_ie(const u8 *pos)
+{
+ return (pos[0] == WLAN_EID_VENDOR_SPECIFIC &&
+ pos[1] >= 4 &&
+ pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 &&
+ pos[5] == 0x04);
+}
+
+static int ath6kl_set_assoc_req_ies(struct ath6kl_vif *vif, const u8 *ies,
+ size_t ies_len)
+{
+ struct ath6kl *ar = vif->ar;
+ const u8 *pos;
+ u8 *buf = NULL;
+ size_t len = 0;
+ int ret;
+
+ /*
+ * Clear previously set flag
+ */
+
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ /*
+ * Filter out RSN/WPA IE(s)
+ */
+
+ if (ies && ies_len) {
+ buf = kmalloc(ies_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ pos = ies;
+
+ while (pos + 1 < ies + ies_len) {
+ if (pos + 2 + pos[1] > ies + ies_len)
+ break;
+ if (!(ath6kl_is_wpa_ie(pos) || ath6kl_is_rsn_ie(pos))) {
+ memcpy(buf + len, pos, 2 + pos[1]);
+ len += 2 + pos[1];
+ }
+
+ if (ath6kl_is_wps_ie(pos))
+ ar->connect_ctrl_flags |= CONNECT_WPS_FLAG;
+
+ pos += 2 + pos[1];
+ }
+ }
+
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_REQ, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static int ath6kl_nliftype_to_drv_iftype(enum nl80211_iftype type, u8 *nw_type)
+{
+ switch (type) {
+ case NL80211_IFTYPE_STATION:
+ *nw_type = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ *nw_type = ADHOC_NETWORK;
+ break;
+ case NL80211_IFTYPE_AP:
+ *nw_type = AP_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ *nw_type = INFRA_NETWORK;
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ *nw_type = AP_NETWORK;
+ break;
+ default:
+ ath6kl_err("invalid interface type %u\n", type);
+ return -ENOTSUPP;
+ }
+
+ return 0;
+}
+
+static bool ath6kl_is_valid_iftype(struct ath6kl *ar, enum nl80211_iftype type,
+ u8 *if_idx, u8 *nw_type)
+{
+ int i;
+
+ if (ath6kl_nliftype_to_drv_iftype(type, nw_type))
+ return false;
+
+ if (ar->ibss_if_active || ((type == NL80211_IFTYPE_ADHOC) &&
+ ar->num_vif))
+ return false;
+
+ if (type == NL80211_IFTYPE_STATION ||
+ type == NL80211_IFTYPE_AP || type == NL80211_IFTYPE_ADHOC) {
+ for (i = 0; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map >> i) & BIT(0)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ if (type == NL80211_IFTYPE_P2P_CLIENT ||
+ type == NL80211_IFTYPE_P2P_GO) {
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++) {
+ if ((ar->avail_idx_map >> i) & BIT(0)) {
+ *if_idx = i;
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_connect_params *sme)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
int status;
+ u8 nw_subtype = (ar->p2p) ? SUBTYPE_P2PDEV : SUBTYPE_NONE;
- ar->sme_state = SME_CONNECTING;
+ ath6kl_cfg80211_sscan_disable(vif);
- if (!ath6kl_cfg80211_ready(ar))
+ vif->sme_state = SME_CONNECTING;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -287,12 +461,22 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
}
- if (test_bit(CONNECTED, &ar->flag) &&
- ar->ssid_len == sme->ssid_len &&
- !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
- ar->reconnect_flag = true;
- status = ath6kl_wmi_reconnect_cmd(ar->wmi, ar->req_bssid,
- ar->ch_hint);
+ if (sme->ie && (sme->ie_len > 0)) {
+ status = ath6kl_set_assoc_req_ies(vif, sme->ie, sme->ie_len);
+ if (status) {
+ up(&ar->sem);
+ return status;
+ }
+ } else
+ ar->connect_ctrl_flags &= ~CONNECT_WPS_FLAG;
+
+ if (test_bit(CONNECTED, &vif->flags) &&
+ vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ vif->reconnect_flag = true;
+ status = ath6kl_wmi_reconnect_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->req_bssid,
+ vif->ch_hint);
up(&ar->sem);
if (status) {
@@ -300,42 +484,43 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -EIO;
}
return 0;
- } else if (ar->ssid_len == sme->ssid_len &&
- !memcmp(ar->ssid, sme->ssid, ar->ssid_len)) {
- ath6kl_disconnect(ar);
+ } else if (vif->ssid_len == sme->ssid_len &&
+ !memcmp(vif->ssid, sme->ssid, vif->ssid_len)) {
+ ath6kl_disconnect(vif);
}
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = sme->ssid_len;
- memcpy(ar->ssid, sme->ssid, sme->ssid_len);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = sme->ssid_len;
+ memcpy(vif->ssid, sme->ssid, sme->ssid_len);
if (sme->channel)
- ar->ch_hint = sme->channel->center_freq;
+ vif->ch_hint = sme->channel->center_freq;
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (sme->bssid && !is_broadcast_ether_addr(sme->bssid))
- memcpy(ar->req_bssid, sme->bssid, sizeof(ar->req_bssid));
+ memcpy(vif->req_bssid, sme->bssid, sizeof(vif->req_bssid));
- ath6kl_set_wpa_version(ar, sme->crypto.wpa_versions);
+ ath6kl_set_wpa_version(vif, sme->crypto.wpa_versions);
- status = ath6kl_set_auth_type(ar, sme->auth_type);
+ status = ath6kl_set_auth_type(vif, sme->auth_type);
if (status) {
up(&ar->sem);
return status;
}
if (sme->crypto.n_ciphers_pairwise)
- ath6kl_set_cipher(ar, sme->crypto.ciphers_pairwise[0], true);
+ ath6kl_set_cipher(vif, sme->crypto.ciphers_pairwise[0], true);
else
- ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(vif, 0, true);
- ath6kl_set_cipher(ar, sme->crypto.cipher_group, false);
+ ath6kl_set_cipher(vif, sme->crypto.cipher_group, false);
if (sme->crypto.n_akm_suites)
- ath6kl_set_key_mgmt(ar, sme->crypto.akm_suites[0]);
+ ath6kl_set_key_mgmt(vif, sme->crypto.akm_suites[0]);
if ((sme->key_len) &&
- (ar->auth_mode == NONE_AUTH) && (ar->prwise_crypto == WEP_CRYPT)) {
+ (vif->auth_mode == NONE_AUTH) &&
+ (vif->prwise_crypto == WEP_CRYPT)) {
struct ath6kl_key *key = NULL;
if (sme->key_idx < WMI_MIN_KEY_INDEX ||
@@ -346,56 +531,60 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
return -ENOENT;
}
- key = &ar->keys[sme->key_idx];
+ key = &vif->keys[sme->key_idx];
key->key_len = sme->key_len;
memcpy(key->key, sme->key, key->key_len);
- key->cipher = ar->prwise_crypto;
- ar->def_txkey_index = sme->key_idx;
+ key->cipher = vif->prwise_crypto;
+ vif->def_txkey_index = sme->key_idx;
- ath6kl_wmi_addkey_cmd(ar->wmi, sme->key_idx,
- ar->prwise_crypto,
+ ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, sme->key_idx,
+ vif->prwise_crypto,
GROUP_USAGE | TX_USAGE,
key->key_len,
- NULL,
+ NULL, 0,
key->key, KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG);
}
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- if (ath6kl_wmi_bssfilter_cmd(ar->wmi, ALL_BSS_FILTER, 0) != 0) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ if (ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ ALL_BSS_FILTER, 0) != 0) {
ath6kl_err("couldn't set bss filtering\n");
up(&ar->sem);
return -EIO;
}
}
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
+
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_CLIENT)
+ nw_subtype = SUBTYPE_P2PCLIENT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
- ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
- ar->prwise_crypto_len, ar->grp_crypto,
- ar->grp_crypto_len, ar->ch_hint);
-
- ar->reconnect_flag = 0;
- status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
- ar->dot11_auth_mode, ar->auth_mode,
- ar->prwise_crypto,
- ar->prwise_crypto_len,
- ar->grp_crypto, ar->grp_crypto_len,
- ar->ssid_len, ar->ssid,
- ar->req_bssid, ar->ch_hint,
- ar->connect_ctrl_flags);
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ vif->reconnect_flag = 0;
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, nw_subtype);
up(&ar->sem);
if (status == -EINVAL) {
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
ath6kl_err("invalid request\n");
return -ENOENT;
} else if (status) {
@@ -404,28 +593,40 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
}
if ((!(ar->connect_ctrl_flags & CONNECT_DO_WPA_OFFLOAD)) &&
- ((ar->auth_mode == WPA_PSK_AUTH)
- || (ar->auth_mode == WPA2_PSK_AUTH))) {
- mod_timer(&ar->disconnect_timer,
+ ((vif->auth_mode == WPA_PSK_AUTH)
+ || (vif->auth_mode == WPA2_PSK_AUTH))) {
+ mod_timer(&vif->disconnect_timer,
jiffies + msecs_to_jiffies(DISCON_TIMER_INTVAL));
}
ar->connect_ctrl_flags &= ~CONNECT_DO_WPA_OFFLOAD;
- set_bit(CONNECT_PEND, &ar->flag);
+ set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
-static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
+static int ath6kl_add_bss_if_needed(struct ath6kl_vif *vif,
+ enum network_type nw_type,
+ const u8 *bssid,
struct ieee80211_channel *chan,
const u8 *beacon_ie, size_t beacon_ie_len)
{
+ struct ath6kl *ar = vif->ar;
struct cfg80211_bss *bss;
+ u16 cap_mask, cap_val;
u8 *ie;
- bss = cfg80211_get_bss(ar->wdev->wiphy, chan, bssid,
- ar->ssid, ar->ssid_len, WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
+ if (nw_type & ADHOC_NETWORK) {
+ cap_mask = WLAN_CAPABILITY_IBSS;
+ cap_val = WLAN_CAPABILITY_IBSS;
+ } else {
+ cap_mask = WLAN_CAPABILITY_ESS;
+ cap_val = WLAN_CAPABILITY_ESS;
+ }
+
+ bss = cfg80211_get_bss(ar->wiphy, chan, bssid,
+ vif->ssid, vif->ssid_len,
+ cap_mask, cap_val);
if (bss == NULL) {
/*
* Since cfg80211 may not yet know about the BSS,
@@ -435,21 +636,20 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
* Prepend SSID element since it is not included in the Beacon
* IEs from the target.
*/
- ie = kmalloc(2 + ar->ssid_len + beacon_ie_len, GFP_KERNEL);
+ ie = kmalloc(2 + vif->ssid_len + beacon_ie_len, GFP_KERNEL);
if (ie == NULL)
return -ENOMEM;
ie[0] = WLAN_EID_SSID;
- ie[1] = ar->ssid_len;
- memcpy(ie + 2, ar->ssid, ar->ssid_len);
- memcpy(ie + 2 + ar->ssid_len, beacon_ie, beacon_ie_len);
- bss = cfg80211_inform_bss(ar->wdev->wiphy, chan,
- bssid, 0, WLAN_CAPABILITY_ESS, 100,
- ie, 2 + ar->ssid_len + beacon_ie_len,
+ ie[1] = vif->ssid_len;
+ memcpy(ie + 2, vif->ssid, vif->ssid_len);
+ memcpy(ie + 2 + vif->ssid_len, beacon_ie, beacon_ie_len);
+ bss = cfg80211_inform_bss(ar->wiphy, chan,
+ bssid, 0, cap_val, 100,
+ ie, 2 + vif->ssid_len + beacon_ie_len,
0, GFP_KERNEL);
if (bss)
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added dummy bss for "
- "%pM prior to indicating connect/roamed "
- "event\n", bssid);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "added bss %pM to "
+ "cfg80211\n", bssid);
kfree(ie);
} else
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "cfg80211 already has a bss "
@@ -463,7 +663,7 @@ static int ath6kl_add_bss_if_needed(struct ath6kl *ar, const u8 *bssid,
return 0;
}
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
@@ -471,6 +671,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
u8 assoc_resp_len, u8 *assoc_info)
{
struct ieee80211_channel *chan;
+ struct ath6kl *ar = vif->ar;
/* capinfo + listen interval */
u8 assoc_req_ie_offset = sizeof(u16) + sizeof(u16);
@@ -489,11 +690,11 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
* Store Beacon interval here; DTIM period will be available only once
* a Beacon frame from the AP is seen.
*/
- ar->assoc_bss_beacon_int = beacon_intvl;
- clear_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+ vif->assoc_bss_beacon_int = beacon_intvl;
+ clear_bit(DTIM_PERIOD_AVAIL, &vif->flags);
if (nw_type & ADHOC_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
@@ -501,39 +702,39 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
}
if (nw_type & INFRA_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
- ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
}
}
- chan = ieee80211_get_channel(ar->wdev->wiphy, (int) channel);
-
+ chan = ieee80211_get_channel(ar->wiphy, (int) channel);
- if (nw_type & ADHOC_NETWORK) {
- cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ if (ath6kl_add_bss_if_needed(vif, nw_type, bssid, chan, assoc_info,
+ beacon_ie_len) < 0) {
+ ath6kl_err("could not add cfg80211 bss entry\n");
return;
}
- if (ath6kl_add_bss_if_needed(ar, bssid, chan, assoc_info,
- beacon_ie_len) < 0) {
- ath6kl_err("could not add cfg80211 bss entry for "
- "connect/roamed notification\n");
+ if (nw_type & ADHOC_NETWORK) {
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
+ nw_type & ADHOC_CREATOR ? "creator" : "joiner");
+ cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
- if (ar->sme_state == SME_CONNECTING) {
+ if (vif->sme_state == SME_CONNECTING) {
/* inform connect result to cfg80211 */
- ar->sme_state = SME_CONNECTED;
- cfg80211_connect_result(ar->net_dev, bssid,
+ vif->sme_state = SME_CONNECTED;
+ cfg80211_connect_result(vif->ndev, bssid,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len,
WLAN_STATUS_SUCCESS, GFP_KERNEL);
- } else if (ar->sme_state == SME_CONNECTED) {
+ } else if (vif->sme_state == SME_CONNECTED) {
/* inform roam event to cfg80211 */
- cfg80211_roamed(ar->net_dev, chan, bssid,
+ cfg80211_roamed(vif->ndev, chan, bssid,
assoc_req_ie, assoc_req_len,
assoc_resp_ie, assoc_resp_len, GFP_KERNEL);
}
@@ -542,12 +743,15 @@ void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
struct net_device *dev, u16 reason_code)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: reason=%u\n", __func__,
reason_code);
- if (!ath6kl_cfg80211_ready(ar))
+ ath6kl_cfg80211_sscan_disable(vif);
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (test_bit(DESTROY_IN_PROGRESS, &ar->flag)) {
@@ -560,44 +764,46 @@ static int ath6kl_cfg80211_disconnect(struct wiphy *wiphy,
return -ERESTARTSYS;
}
- ar->reconnect_flag = 0;
- ath6kl_disconnect(ar);
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ vif->reconnect_flag = 0;
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
if (!test_bit(SKIP_SCAN, &ar->flag))
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
up(&ar->sem);
- ar->sme_state = SME_DISCONNECTED;
+ vif->sme_state = SME_DISCONNECTED;
return 0;
}
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason)
{
- if (ar->scan_req) {
- cfg80211_scan_done(ar->scan_req, true);
- ar->scan_req = NULL;
+ struct ath6kl *ar = vif->ar;
+
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
}
- if (ar->nw_type & ADHOC_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->nw_type & ADHOC_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
return;
}
memset(bssid, 0, ETH_ALEN);
- cfg80211_ibss_joined(ar->net_dev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
- if (ar->nw_type & INFRA_NETWORK) {
- if (ar->wdev->iftype != NL80211_IFTYPE_STATION &&
- ar->wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) {
+ if (vif->nw_type & INFRA_NETWORK) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_STATION &&
+ vif->wdev.iftype != NL80211_IFTYPE_P2P_CLIENT) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in station mode\n", __func__);
return;
@@ -614,42 +820,46 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
*/
if (reason != DISCONNECT_CMD) {
- ath6kl_wmi_disconnect_cmd(ar->wmi);
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
return;
}
- clear_bit(CONNECT_PEND, &ar->flag);
+ clear_bit(CONNECT_PEND, &vif->flags);
- if (ar->sme_state == SME_CONNECTING) {
- cfg80211_connect_result(ar->net_dev,
+ if (vif->sme_state == SME_CONNECTING) {
+ cfg80211_connect_result(vif->ndev,
bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
GFP_KERNEL);
- } else if (ar->sme_state == SME_CONNECTED) {
- cfg80211_disconnected(ar->net_dev, reason,
+ } else if (vif->sme_state == SME_CONNECTED) {
+ cfg80211_disconnected(vif->ndev, reason,
NULL, 0, GFP_KERNEL);
}
- ar->sme_state = SME_DISCONNECTED;
+ vif->sme_state = SME_DISCONNECTED;
}
static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
struct cfg80211_scan_request *request)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
s8 n_channels = 0;
u16 *channels = NULL;
int ret = 0;
+ u32 force_fg_scan = 0;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
+ ath6kl_cfg80211_sscan_disable(vif);
+
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
ret = ath6kl_wmi_bssfilter_cmd(
- ar->wmi,
- (test_bit(CONNECTED, &ar->flag) ?
+ ar->wmi, vif->fw_vif_idx,
+ (test_bit(CONNECTED, &vif->flags) ?
ALL_BUT_BSS_FILTER : ALL_BSS_FILTER), 0);
if (ret) {
ath6kl_err("couldn't set bss filtering\n");
@@ -664,14 +874,19 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
request->n_ssids = MAX_PROBED_SSID_INDEX - 1;
for (i = 0; i < request->n_ssids; i++)
- ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
- SPECIFIC_SSID_FLAG,
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i + 1, SPECIFIC_SSID_FLAG,
request->ssids[i].ssid_len,
request->ssids[i].ssid);
}
+ /*
+ * FIXME: we should clear the IE in fw if it's not set so just
+ * remove the check altogether
+ */
if (request->ie) {
- ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_REQ,
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
request->ie, request->ie_len);
if (ret) {
ath6kl_err("failed to set Probe Request appie for "
@@ -702,44 +917,63 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
channels[i] = request->channels[i]->center_freq;
}
- ret = ath6kl_wmi_startscan_cmd(ar->wmi, WMI_LONG_SCAN, 0,
- false, 0, 0, n_channels, channels);
+ if (test_bit(CONNECTED, &vif->flags))
+ force_fg_scan = 1;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ ret = ath6kl_wmi_beginscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0, 0, n_channels,
+ channels, request->no_cck,
+ request->rates);
+ } else {
+ ret = ath6kl_wmi_startscan_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_LONG_SCAN, force_fg_scan,
+ false, 0, 0, n_channels,
+ channels);
+ }
if (ret)
ath6kl_err("wmi_startscan_cmd failed\n");
else
- ar->scan_req = request;
+ vif->scan_req = request;
kfree(channels);
return ret;
}
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status)
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted)
{
+ struct ath6kl *ar = vif->ar;
int i;
- ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status %d\n", __func__, status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: status%s\n", __func__,
+ aborted ? " aborted" : "");
- if (!ar->scan_req)
+ if (!vif->scan_req)
return;
- if ((status == -ECANCELED) || (status == -EBUSY)) {
- cfg80211_scan_done(ar->scan_req, true);
+ if (aborted)
goto out;
- }
- cfg80211_scan_done(ar->scan_req, false);
-
- if (ar->scan_req->n_ssids && ar->scan_req->ssids[0].ssid_len) {
- for (i = 0; i < ar->scan_req->n_ssids; i++) {
- ath6kl_wmi_probedssid_cmd(ar->wmi, i + 1,
- DISABLE_SSID_FLAG,
+ if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) {
+ for (i = 0; i < vif->scan_req->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i + 1, DISABLE_SSID_FLAG,
0, NULL);
}
}
out:
- ar->scan_req = NULL;
+ cfg80211_scan_done(vif->scan_req, aborted);
+ vif->scan_req = NULL;
}
static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -747,15 +981,22 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
const u8 *mac_addr,
struct key_params *params)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
u8 key_usage;
u8 key_type;
- int status = 0;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
+ if (params->cipher == CCKM_KRK_CIPHER_SUITE) {
+ if (params->key_len != WMI_KRK_LEN)
+ return -EINVAL;
+ return ath6kl_wmi_add_krk_cmd(ar->wmi, vif->fw_vif_idx,
+ params->key);
+ }
+
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: key index %d out of bounds\n", __func__,
@@ -763,7 +1004,7 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- key = &ar->keys[key_index];
+ key = &vif->keys[key_index];
memset(key, 0, sizeof(struct ath6kl_key));
if (pairwise)
@@ -772,13 +1013,19 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
key_usage = GROUP_USAGE;
if (params) {
+ int seq_len = params->seq_len;
+ if (params->cipher == WLAN_CIPHER_SUITE_SMS4 &&
+ seq_len > ATH6KL_KEY_SEQ_LEN) {
+ /* Only first half of the WPI PN is configured */
+ seq_len = ATH6KL_KEY_SEQ_LEN;
+ }
if (params->key_len > WLAN_MAX_KEY_LEN ||
- params->seq_len > sizeof(key->seq))
+ seq_len > sizeof(key->seq))
return -EINVAL;
key->key_len = params->key_len;
memcpy(key->key, params->key, key->key_len);
- key->seq_len = params->seq_len;
+ key->seq_len = seq_len;
memcpy(key->seq, params->seq, key->seq_len);
key->cipher = params->cipher;
}
@@ -796,31 +1043,33 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
case WLAN_CIPHER_SUITE_CCMP:
key_type = AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ key_type = WAPI_CRYPT;
+ break;
default:
return -ENOTSUPP;
}
- if (((ar->auth_mode == WPA_PSK_AUTH)
- || (ar->auth_mode == WPA2_PSK_AUTH))
+ if (((vif->auth_mode == WPA_PSK_AUTH)
+ || (vif->auth_mode == WPA2_PSK_AUTH))
&& (key_usage & GROUP_USAGE))
- del_timer(&ar->disconnect_timer);
+ del_timer(&vif->disconnect_timer);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d, key_len %d, key_type 0x%x, key_usage 0x%x, seq_len %d\n",
__func__, key_index, key->key_len, key_type,
key_usage, key->seq_len);
- ar->def_txkey_index = key_index;
-
- if (ar->nw_type == AP_NETWORK && !pairwise &&
- (key_type == TKIP_CRYPT || key_type == AES_CRYPT) && params) {
+ if (vif->nw_type == AP_NETWORK && !pairwise &&
+ (key_type == TKIP_CRYPT || key_type == AES_CRYPT ||
+ key_type == WAPI_CRYPT) && params) {
ar->ap_mode_bkey.valid = true;
ar->ap_mode_bkey.key_index = key_index;
ar->ap_mode_bkey.key_type = key_type;
ar->ap_mode_bkey.key_len = key->key_len;
memcpy(ar->ap_mode_bkey.key, key->key, key->key_len);
- if (!test_bit(CONNECTED, &ar->flag)) {
+ if (!test_bit(CONNECTED, &vif->flags)) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay initial group "
"key configuration until AP mode has been "
"started\n");
@@ -832,8 +1081,8 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
}
}
- if (ar->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
- !test_bit(CONNECTED, &ar->flag)) {
+ if (vif->next_mode == AP_NETWORK && key_type == WEP_CRYPT &&
+ !test_bit(CONNECTED, &vif->flags)) {
/*
* Store the key locally so that it can be re-configured after
* the AP mode has properly started
@@ -841,31 +1090,29 @@ static int ath6kl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
*/
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delay WEP key configuration "
"until AP mode has been started\n");
- ar->wep_key_list[key_index].key_len = key->key_len;
- memcpy(ar->wep_key_list[key_index].key, key->key, key->key_len);
+ vif->wep_key_list[key_index].key_len = key->key_len;
+ memcpy(vif->wep_key_list[key_index].key, key->key,
+ key->key_len);
return 0;
}
- status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
- key_type, key_usage, key->key_len,
- key->seq, key->key, KEY_OP_INIT_VAL,
- (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
-
- if (status)
- return -EIO;
-
- return 0;
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx, key_index,
+ key_type, key_usage, key->key_len,
+ key->seq, key->seq_len, key->key,
+ KEY_OP_INIT_VAL,
+ (u8 *) mac_addr, SYNC_BOTH_WMIFLAG);
}
static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise,
const u8 *mac_addr)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -875,15 +1122,15 @@ static int ath6kl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- if (!ar->keys[key_index].key_len) {
+ if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: index %d is empty\n", __func__, key_index);
return 0;
}
- ar->keys[key_index].key_len = 0;
+ vif->keys[key_index].key_len = 0;
- return ath6kl_wmi_deletekey_cmd(ar->wmi, key_index);
+ return ath6kl_wmi_deletekey_cmd(ar->wmi, vif->fw_vif_idx, key_index);
}
static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
@@ -892,13 +1139,13 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
void (*callback) (void *cookie,
struct key_params *))
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
struct key_params params;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -908,7 +1155,7 @@ static int ath6kl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
return -ENOENT;
}
- key = &ar->keys[key_index];
+ key = &vif->keys[key_index];
memset(&params, 0, sizeof(params));
params.cipher = key->cipher;
params.key_len = key->key_len;
@@ -926,15 +1173,15 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
u8 key_index, bool unicast,
bool multicast)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(ndev);
+ struct ath6kl *ar = ath6kl_priv(ndev);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
struct ath6kl_key *key = NULL;
- int status = 0;
u8 key_usage;
enum crypto_type key_type = NONE_CRYPT;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: index %d\n", __func__, key_index);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (key_index < WMI_MIN_KEY_INDEX || key_index > WMI_MAX_KEY_INDEX) {
@@ -944,43 +1191,41 @@ static int ath6kl_cfg80211_set_default_key(struct wiphy *wiphy,
return -ENOENT;
}
- if (!ar->keys[key_index].key_len) {
+ if (!vif->keys[key_index].key_len) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: invalid key index %d\n",
__func__, key_index);
return -EINVAL;
}
- ar->def_txkey_index = key_index;
- key = &ar->keys[ar->def_txkey_index];
+ vif->def_txkey_index = key_index;
+ key = &vif->keys[vif->def_txkey_index];
key_usage = GROUP_USAGE;
- if (ar->prwise_crypto == WEP_CRYPT)
+ if (vif->prwise_crypto == WEP_CRYPT)
key_usage |= TX_USAGE;
if (unicast)
- key_type = ar->prwise_crypto;
+ key_type = vif->prwise_crypto;
if (multicast)
- key_type = ar->grp_crypto;
+ key_type = vif->grp_crypto;
- if (ar->next_mode == AP_NETWORK && !test_bit(CONNECTED, &ar->flag))
+ if (vif->next_mode == AP_NETWORK && !test_bit(CONNECTED, &vif->flags))
return 0; /* Delay until AP mode has been started */
- status = ath6kl_wmi_addkey_cmd(ar->wmi, ar->def_txkey_index,
- key_type, key_usage,
- key->key_len, key->seq, key->key,
- KEY_OP_INIT_VAL, NULL,
- SYNC_BOTH_WMIFLAG);
- if (status)
- return -EIO;
-
- return 0;
+ return ath6kl_wmi_addkey_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->def_txkey_index,
+ key_type, key_usage,
+ key->key_len, key->seq, key->seq_len,
+ key->key,
+ KEY_OP_INIT_VAL, NULL,
+ SYNC_BOTH_WMIFLAG);
}
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast)
{
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: keyid %d, ismcast %d\n", __func__, keyid, ismcast);
- cfg80211_michael_mic_failure(ar->net_dev, ar->bssid,
+ cfg80211_michael_mic_failure(vif->ndev, vif->bssid,
(ismcast ? NL80211_KEYTYPE_GROUP :
NL80211_KEYTYPE_PAIRWISE), keyid, NULL,
GFP_KERNEL);
@@ -989,12 +1234,17 @@ void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
int ret;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: changed 0x%x\n", __func__,
changed);
- if (!ath6kl_cfg80211_ready(ar))
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
@@ -1014,15 +1264,21 @@ static int ath6kl_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
*/
static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
- int dbm)
+ int mbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
u8 ath6kl_dbm;
+ int dbm = MBM_TO_DBM(mbm);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type 0x%x, dbm %d\n", __func__,
type, dbm);
- if (!ath6kl_cfg80211_ready(ar))
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
switch (type) {
@@ -1037,7 +1293,7 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, ath6kl_dbm);
+ ath6kl_wmi_set_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx, ath6kl_dbm);
return 0;
}
@@ -1045,14 +1301,19 @@ static int ath6kl_cfg80211_set_txpower(struct wiphy *wiphy,
static int ath6kl_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
{
struct ath6kl *ar = (struct ath6kl *)wiphy_priv(wiphy);
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (test_bit(CONNECTED, &ar->flag)) {
+ if (test_bit(CONNECTED, &vif->flags)) {
ar->tx_pwr = 0;
- if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi) != 0) {
+ if (ath6kl_wmi_get_tx_pwr_cmd(ar->wmi, vif->fw_vif_idx) != 0) {
ath6kl_err("ath6kl_wmi_get_tx_pwr_cmd failed\n");
return -EIO;
}
@@ -1076,11 +1337,12 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
{
struct ath6kl *ar = ath6kl_priv(dev);
struct wmi_power_mode_cmd mode;
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: pmgmt %d, timeout %d\n",
__func__, pmgmt, timeout);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (pmgmt) {
@@ -1091,7 +1353,8 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
mode.pwr_mode = MAX_PERF_POWER;
}
- if (ath6kl_wmi_powermode_cmd(ar->wmi, mode.pwr_mode) != 0) {
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, vif->fw_vif_idx,
+ mode.pwr_mode) != 0) {
ath6kl_err("wmi_powermode_cmd failed\n");
return -EIO;
}
@@ -1099,41 +1362,83 @@ static int ath6kl_cfg80211_set_power_mgmt(struct wiphy *wiphy,
return 0;
}
+static struct net_device *ath6kl_cfg80211_add_iface(struct wiphy *wiphy,
+ char *name,
+ enum nl80211_iftype type,
+ u32 *flags,
+ struct vif_params *params)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct net_device *ndev;
+ u8 if_idx, nw_type;
+
+ if (ar->num_vif == ar->vif_max) {
+ ath6kl_err("Reached maximum number of supported vif\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!ath6kl_is_valid_iftype(ar, type, &if_idx, &nw_type)) {
+ ath6kl_err("Not a supported interface type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ ndev = ath6kl_interface_add(ar, name, type, if_idx, nw_type);
+ if (!ndev)
+ return ERR_PTR(-ENOMEM);
+
+ ar->num_vif++;
+
+ return ndev;
+}
+
+static int ath6kl_cfg80211_del_iface(struct wiphy *wiphy,
+ struct net_device *ndev)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+ struct ath6kl_vif *vif = netdev_priv(ndev);
+
+ spin_lock_bh(&ar->list_lock);
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+
+ ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+
+ ath6kl_deinit_if_data(vif);
+
+ return 0;
+}
+
static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
struct net_device *ndev,
enum nl80211_iftype type, u32 *flags,
struct vif_params *params)
{
- struct ath6kl *ar = ath6kl_priv(ndev);
- struct wireless_dev *wdev = ar->wdev;
+ struct ath6kl_vif *vif = netdev_priv(ndev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
- if (!ath6kl_cfg80211_ready(ar))
- return -EIO;
-
switch (type) {
case NL80211_IFTYPE_STATION:
- ar->next_mode = INFRA_NETWORK;
+ vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_ADHOC:
- ar->next_mode = ADHOC_NETWORK;
+ vif->next_mode = ADHOC_NETWORK;
break;
case NL80211_IFTYPE_AP:
- ar->next_mode = AP_NETWORK;
+ vif->next_mode = AP_NETWORK;
break;
case NL80211_IFTYPE_P2P_CLIENT:
- ar->next_mode = INFRA_NETWORK;
+ vif->next_mode = INFRA_NETWORK;
break;
case NL80211_IFTYPE_P2P_GO:
- ar->next_mode = AP_NETWORK;
+ vif->next_mode = AP_NETWORK;
break;
default:
ath6kl_err("invalid interface type %u\n", type);
return -EOPNOTSUPP;
}
- wdev->iftype = type;
+ vif->wdev.iftype = type;
return 0;
}
@@ -1143,16 +1448,17 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
struct cfg80211_ibss_params *ibss_param)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
int status;
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- ar->ssid_len = ibss_param->ssid_len;
- memcpy(ar->ssid, ibss_param->ssid, ar->ssid_len);
+ vif->ssid_len = ibss_param->ssid_len;
+ memcpy(vif->ssid, ibss_param->ssid, vif->ssid_len);
if (ibss_param->channel)
- ar->ch_hint = ibss_param->channel->center_freq;
+ vif->ch_hint = ibss_param->channel->center_freq;
if (ibss_param->channel_fixed) {
/*
@@ -1164,44 +1470,45 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
return -EOPNOTSUPP;
}
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
if (ibss_param->bssid && !is_broadcast_ether_addr(ibss_param->bssid))
- memcpy(ar->req_bssid, ibss_param->bssid, sizeof(ar->req_bssid));
+ memcpy(vif->req_bssid, ibss_param->bssid,
+ sizeof(vif->req_bssid));
- ath6kl_set_wpa_version(ar, 0);
+ ath6kl_set_wpa_version(vif, 0);
- status = ath6kl_set_auth_type(ar, NL80211_AUTHTYPE_OPEN_SYSTEM);
+ status = ath6kl_set_auth_type(vif, NL80211_AUTHTYPE_OPEN_SYSTEM);
if (status)
return status;
if (ibss_param->privacy) {
- ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, true);
- ath6kl_set_cipher(ar, WLAN_CIPHER_SUITE_WEP40, false);
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, true);
+ ath6kl_set_cipher(vif, WLAN_CIPHER_SUITE_WEP40, false);
} else {
- ath6kl_set_cipher(ar, 0, true);
- ath6kl_set_cipher(ar, 0, false);
+ ath6kl_set_cipher(vif, 0, true);
+ ath6kl_set_cipher(vif, 0, false);
}
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: connect called with authmode %d dot11 auth %d"
" PW crypto %d PW crypto len %d GRP crypto %d"
" GRP crypto len %d channel hint %u\n",
__func__,
- ar->auth_mode, ar->dot11_auth_mode, ar->prwise_crypto,
- ar->prwise_crypto_len, ar->grp_crypto,
- ar->grp_crypto_len, ar->ch_hint);
-
- status = ath6kl_wmi_connect_cmd(ar->wmi, ar->nw_type,
- ar->dot11_auth_mode, ar->auth_mode,
- ar->prwise_crypto,
- ar->prwise_crypto_len,
- ar->grp_crypto, ar->grp_crypto_len,
- ar->ssid_len, ar->ssid,
- ar->req_bssid, ar->ch_hint,
- ar->connect_ctrl_flags);
- set_bit(CONNECT_PEND, &ar->flag);
+ vif->auth_mode, vif->dot11_auth_mode, vif->prwise_crypto,
+ vif->prwise_crypto_len, vif->grp_crypto,
+ vif->grp_crypto_len, vif->ch_hint);
+
+ status = ath6kl_wmi_connect_cmd(ar->wmi, vif->fw_vif_idx, vif->nw_type,
+ vif->dot11_auth_mode, vif->auth_mode,
+ vif->prwise_crypto,
+ vif->prwise_crypto_len,
+ vif->grp_crypto, vif->grp_crypto_len,
+ vif->ssid_len, vif->ssid,
+ vif->req_bssid, vif->ch_hint,
+ ar->connect_ctrl_flags, SUBTYPE_NONE);
+ set_bit(CONNECT_PEND, &vif->flags);
return 0;
}
@@ -1209,14 +1516,14 @@ static int ath6kl_cfg80211_join_ibss(struct wiphy *wiphy,
static int ath6kl_cfg80211_leave_ibss(struct wiphy *wiphy,
struct net_device *dev)
{
- struct ath6kl *ar = (struct ath6kl *)ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- ath6kl_disconnect(ar);
- memset(ar->ssid, 0, sizeof(ar->ssid));
- ar->ssid_len = 0;
+ ath6kl_disconnect(vif);
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+ vif->ssid_len = 0;
return 0;
}
@@ -1226,6 +1533,8 @@ static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
+ CCKM_KRK_CIPHER_SUITE,
+ WLAN_CIPHER_SUITE_SMS4,
};
static bool is_rate_legacy(s32 rate)
@@ -1293,21 +1602,22 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_info *sinfo)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
long left;
bool sgi;
s32 rate;
int ret;
u8 mcs;
- if (memcmp(mac, ar->bssid, ETH_ALEN) != 0)
+ if (memcmp(mac, vif->bssid, ETH_ALEN) != 0)
return -ENOENT;
if (down_interruptible(&ar->sem))
return -EBUSY;
- set_bit(STATS_UPDATE_PEND, &ar->flag);
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
- ret = ath6kl_wmi_get_stats_cmd(ar->wmi);
+ ret = ath6kl_wmi_get_stats_cmd(ar->wmi, vif->fw_vif_idx);
if (ret != 0) {
up(&ar->sem);
@@ -1316,7 +1626,7 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
- &ar->flag),
+ &vif->flags),
WMI_TIMEOUT);
up(&ar->sem);
@@ -1326,24 +1636,24 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
else if (left < 0)
return left;
- if (ar->target_stats.rx_byte) {
- sinfo->rx_bytes = ar->target_stats.rx_byte;
+ if (vif->target_stats.rx_byte) {
+ sinfo->rx_bytes = vif->target_stats.rx_byte;
sinfo->filled |= STATION_INFO_RX_BYTES;
- sinfo->rx_packets = ar->target_stats.rx_pkt;
+ sinfo->rx_packets = vif->target_stats.rx_pkt;
sinfo->filled |= STATION_INFO_RX_PACKETS;
}
- if (ar->target_stats.tx_byte) {
- sinfo->tx_bytes = ar->target_stats.tx_byte;
+ if (vif->target_stats.tx_byte) {
+ sinfo->tx_bytes = vif->target_stats.tx_byte;
sinfo->filled |= STATION_INFO_TX_BYTES;
- sinfo->tx_packets = ar->target_stats.tx_pkt;
+ sinfo->tx_packets = vif->target_stats.tx_pkt;
sinfo->filled |= STATION_INFO_TX_PACKETS;
}
- sinfo->signal = ar->target_stats.cs_rssi;
+ sinfo->signal = vif->target_stats.cs_rssi;
sinfo->filled |= STATION_INFO_SIGNAL;
- rate = ar->target_stats.tx_ucast_rate;
+ rate = vif->target_stats.tx_ucast_rate;
if (is_rate_legacy(rate)) {
sinfo->txrate.legacy = rate / 100;
@@ -1375,13 +1685,13 @@ static int ath6kl_get_station(struct wiphy *wiphy, struct net_device *dev,
sinfo->filled |= STATION_INFO_TX_BITRATE;
- if (test_bit(CONNECTED, &ar->flag) &&
- test_bit(DTIM_PERIOD_AVAIL, &ar->flag) &&
- ar->nw_type == INFRA_NETWORK) {
+ if (test_bit(CONNECTED, &vif->flags) &&
+ test_bit(DTIM_PERIOD_AVAIL, &vif->flags) &&
+ vif->nw_type == INFRA_NETWORK) {
sinfo->filled |= STATION_INFO_BSS_PARAM;
sinfo->bss_param.flags = 0;
- sinfo->bss_param.dtim_period = ar->assoc_bss_dtim_period;
- sinfo->bss_param.beacon_interval = ar->assoc_bss_beacon_int;
+ sinfo->bss_param.dtim_period = vif->assoc_bss_dtim_period;
+ sinfo->bss_param.beacon_interval = vif->assoc_bss_beacon_int;
}
return 0;
@@ -1391,7 +1701,9 @@ static int ath6kl_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, true);
}
@@ -1399,25 +1711,302 @@ static int ath6kl_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
struct cfg80211_pmksa *pmksa)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, pmksa->bssid,
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx, pmksa->bssid,
pmksa->pmkid, false);
}
static int ath6kl_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
{
struct ath6kl *ar = ath6kl_priv(netdev);
- if (test_bit(CONNECTED, &ar->flag))
- return ath6kl_wmi_setpmkid_cmd(ar->wmi, ar->bssid, NULL, false);
+ struct ath6kl_vif *vif = netdev_priv(netdev);
+
+ if (test_bit(CONNECTED, &vif->flags))
+ return ath6kl_wmi_setpmkid_cmd(ar->wmi, vif->fw_vif_idx,
+ vif->bssid, NULL, false);
+ return 0;
+}
+
+static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
+{
+ struct ath6kl_vif *vif;
+ int ret, pos, left;
+ u32 filter = 0;
+ u16 i;
+ u8 mask[WOW_MASK_SIZE];
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ if (!ath6kl_cfg80211_ready(vif))
+ return -EIO;
+
+ if (!test_bit(CONNECTED, &vif->flags))
+ return -EINVAL;
+
+ /* Clear existing WOW patterns */
+ for (i = 0; i < WOW_MAX_FILTERS_PER_LIST; i++)
+ ath6kl_wmi_del_wow_pattern_cmd(ar->wmi, vif->fw_vif_idx,
+ WOW_LIST_ID, i);
+ /* Configure new WOW patterns */
+ for (i = 0; i < wow->n_patterns; i++) {
+
+ /*
+ * Convert given nl80211 specific mask value to equivalent
+ * driver specific mask value and send it to the chip along
+ * with patterns. For example, If the mask value defined in
+ * struct cfg80211_wowlan is 0xA (equivalent binary is 1010),
+ * then equivalent driver specific mask value is
+ * "0xFF 0x00 0xFF 0x00".
+ */
+ memset(&mask, 0, sizeof(mask));
+ for (pos = 0; pos < wow->patterns[i].pattern_len; pos++) {
+ if (wow->patterns[i].mask[pos / 8] & (0x1 << (pos % 8)))
+ mask[pos] = 0xFF;
+ }
+ /*
+ * Note: Pattern's offset is not passed as part of wowlan
+ * parameter from CFG layer. So it's always passed as ZERO
+ * to the firmware. It means, given WOW patterns are always
+ * matched from the first byte of received pkt in the firmware.
+ */
+ ret = ath6kl_wmi_add_wow_pattern_cmd(ar->wmi,
+ vif->fw_vif_idx, WOW_LIST_ID,
+ wow->patterns[i].pattern_len,
+ 0 /* pattern offset */,
+ wow->patterns[i].pattern, mask);
+ if (ret)
+ return ret;
+ }
+
+ if (wow->disconnect)
+ filter |= WOW_FILTER_OPTION_NWK_DISASSOC;
+
+ if (wow->magic_pkt)
+ filter |= WOW_FILTER_OPTION_MAGIC_PACKET;
+
+ if (wow->gtk_rekey_failure)
+ filter |= WOW_FILTER_OPTION_GTK_ERROR;
+
+ if (wow->eap_identity_req)
+ filter |= WOW_FILTER_OPTION_EAP_REQ;
+
+ if (wow->four_way_handshake)
+ filter |= WOW_FILTER_OPTION_8021X_4WAYHS;
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_ENABLE,
+ filter,
+ WOW_HOST_REQ_DELAY);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_ASLEEP);
+ if (ret)
+ return ret;
+
+ if (ar->tx_pending[ar->ctrl_ep]) {
+ left = wait_event_interruptible_timeout(ar->event_wq,
+ ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
+ if (left == 0) {
+ ath6kl_warn("clear wmi ctrl data timeout\n");
+ ret = -ETIMEDOUT;
+ } else if (left < 0) {
+ ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
+ ret = left;
+ }
+ }
+
+ return ret;
+}
+
+static int ath6kl_wow_resume(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+ int ret;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_AWAKE);
+ return ret;
+}
+
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow)
+{
+ int ret;
+
+ switch (mode) {
+ case ATH6KL_CFG_SUSPEND_WOW:
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode suspend\n");
+
+ /* Flush all non control pkts in TX path */
+ ath6kl_tx_data_cleanup(ar);
+
+ ret = ath6kl_wow_suspend(ar, wow);
+ if (ret) {
+ ath6kl_err("wow suspend failed: %d\n", ret);
+ return ret;
+ }
+ ar->state = ATH6KL_STATE_WOW;
+ break;
+
+ case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ /* save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
+ if (ret) {
+ ath6kl_warn("wmi powermode command failed during suspend: %d\n",
+ ret);
+ }
+
+ ar->state = ATH6KL_STATE_DEEPSLEEP;
+
+ break;
+
+ case ATH6KL_CFG_SUSPEND_CUTPOWER:
+
+ ath6kl_cfg80211_stop_all(ar);
+
+ if (ar->state == ATH6KL_STATE_OFF) {
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "suspend hw off, no action for cutpower\n");
+ break;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "suspend cutting power\n");
+
+ ret = ath6kl_init_hw_stop(ar);
+ if (ret) {
+ ath6kl_warn("failed to stop hw during suspend: %d\n",
+ ret);
+ }
+
+ ar->state = ATH6KL_STATE_CUTPOWER;
+
+ break;
+
+ case ATH6KL_CFG_SUSPEND_SCHED_SCAN:
+ /*
+ * Nothing needed for schedule scan, firmware is already in
+ * wow mode and sleeping most of the time.
+ */
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar)
+{
+ int ret;
+
+ switch (ar->state) {
+ case ATH6KL_STATE_WOW:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "wow mode resume\n");
+
+ ret = ath6kl_wow_resume(ar);
+ if (ret) {
+ ath6kl_warn("wow mode resume failed: %d\n", ret);
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_ON;
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
+ ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
+ ar->wmi->saved_pwr_mode);
+ if (ret) {
+ ath6kl_warn("wmi powermode command failed during resume: %d\n",
+ ret);
+ }
+ }
+
+ ar->state = ATH6KL_STATE_ON;
+
+ break;
+
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "resume restoring power\n");
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_warn("Failed to boot hw in resume: %d\n", ret);
+ return ret;
+ }
+ break;
+
+ case ATH6KL_STATE_SCHED_SCAN:
+ break;
+
+ default:
+ break;
+ }
+
return 0;
}
#ifdef CONFIG_PM
-static int ar6k_cfg80211_suspend(struct wiphy *wiphy,
+
+/* hif layer decides what suspend mode to use */
+static int __ath6kl_cfg80211_suspend(struct wiphy *wiphy,
struct cfg80211_wowlan *wow)
{
struct ath6kl *ar = wiphy_priv(wiphy);
- return ath6kl_hif_suspend(ar);
+ return ath6kl_hif_suspend(ar, wow);
+}
+
+static int __ath6kl_cfg80211_resume(struct wiphy *wiphy)
+{
+ struct ath6kl *ar = wiphy_priv(wiphy);
+
+ return ath6kl_hif_resume(ar);
+}
+
+/*
+ * FIXME: WOW suspend mode is selected if the host sdio controller supports
+ * both sdio irq wake up and keep power. The target pulls sdio data line to
+ * wake up the host when WOW pattern matches. This causes sdio irq handler
+ * is being called in the host side which internally hits ath6kl's RX path.
+ *
+ * Since sdio interrupt is not disabled, RX path executes even before
+ * the host executes the actual resume operation from PM module.
+ *
+ * In the current scenario, WOW resume should happen before start processing
+ * any data from the target. So It's required to perform WOW resume in RX path.
+ * Ideally we should perform WOW resume only in the actual platform
+ * resume path. This area needs bit rework to avoid WOW resume in RX path.
+ *
+ * ath6kl_check_wow_status() is called from ath6kl_rx().
+ */
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
+ if (ar->state == ATH6KL_STATE_WOW)
+ ath6kl_cfg80211_resume(ar);
+}
+
+#else
+
+void ath6kl_check_wow_status(struct ath6kl *ar)
+{
}
#endif
@@ -1425,14 +2014,14 @@ static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
__func__, chan->center_freq, chan->hw_value);
- ar->next_chan = chan->center_freq;
+ vif->next_chan = chan->center_freq;
return 0;
}
@@ -1444,9 +2033,10 @@ static bool ath6kl_is_p2p_ie(const u8 *pos)
pos[4] == 0x9a && pos[5] == 0x09;
}
-static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
- size_t ies_len)
+static int ath6kl_set_ap_probe_resp_ies(struct ath6kl_vif *vif,
+ const u8 *ies, size_t ies_len)
{
+ struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *buf = NULL;
size_t len = 0;
@@ -1473,8 +2063,8 @@ static int ath6kl_set_ap_probe_resp_ies(struct ath6kl *ar, const u8 *ies,
}
}
- ret = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_PROBE_RESP,
- buf, len);
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_RESP, buf, len);
kfree(buf);
return ret;
}
@@ -1483,36 +2073,39 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
struct beacon_parameters *info, bool add)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
struct ieee80211_mgmt *mgmt;
u8 *ies;
int ies_len;
struct wmi_connect_cmd p;
int res;
- int i;
+ int i, ret;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: add=%d\n", __func__, add);
- if (!ath6kl_cfg80211_ready(ar))
+ if (!ath6kl_cfg80211_ready(vif))
return -EIO;
- if (ar->next_mode != AP_NETWORK)
+ if (vif->next_mode != AP_NETWORK)
return -EOPNOTSUPP;
if (info->beacon_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_BEACON,
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_BEACON,
info->beacon_ies,
info->beacon_ies_len);
if (res)
return res;
}
if (info->proberesp_ies) {
- res = ath6kl_set_ap_probe_resp_ies(ar, info->proberesp_ies,
+ res = ath6kl_set_ap_probe_resp_ies(vif, info->proberesp_ies,
info->proberesp_ies_len);
if (res)
return res;
}
if (info->assocresp_ies) {
- res = ath6kl_wmi_set_appie_cmd(ar->wmi, WMI_FRAME_ASSOC_RESP,
+ res = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_ASSOC_RESP,
info->assocresp_ies,
info->assocresp_ies_len);
if (res)
@@ -1539,12 +2132,14 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
if (info->ssid == NULL)
return -EINVAL;
- memcpy(ar->ssid, info->ssid, info->ssid_len);
- ar->ssid_len = info->ssid_len;
+ memcpy(vif->ssid, info->ssid, info->ssid_len);
+ vif->ssid_len = info->ssid_len;
if (info->hidden_ssid != NL80211_HIDDEN_SSID_NOT_IN_USE)
return -EOPNOTSUPP; /* TODO */
- ar->dot11_auth_mode = OPEN_AUTH;
+ ret = ath6kl_set_auth_type(vif, info->auth_type);
+ if (ret)
+ return ret;
memset(&p, 0, sizeof(p));
@@ -1566,7 +2161,7 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
}
if (p.auth_mode == 0)
p.auth_mode = NONE_AUTH;
- ar->auth_mode = p.auth_mode;
+ vif->auth_mode = p.auth_mode;
for (i = 0; i < info->crypto.n_ciphers_pairwise; i++) {
switch (info->crypto.ciphers_pairwise[i]) {
@@ -1580,13 +2175,16 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_CCMP:
p.prwise_crypto_type |= AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.prwise_crypto_type |= WAPI_CRYPT;
+ break;
}
}
if (p.prwise_crypto_type == 0) {
p.prwise_crypto_type = NONE_CRYPT;
- ath6kl_set_cipher(ar, 0, true);
+ ath6kl_set_cipher(vif, 0, true);
} else if (info->crypto.n_ciphers_pairwise == 1)
- ath6kl_set_cipher(ar, info->crypto.ciphers_pairwise[0], true);
+ ath6kl_set_cipher(vif, info->crypto.ciphers_pairwise[0], true);
switch (info->crypto.cipher_group) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -1599,21 +2197,34 @@ static int ath6kl_ap_beacon(struct wiphy *wiphy, struct net_device *dev,
case WLAN_CIPHER_SUITE_CCMP:
p.grp_crypto_type = AES_CRYPT;
break;
+ case WLAN_CIPHER_SUITE_SMS4:
+ p.grp_crypto_type = WAPI_CRYPT;
+ break;
default:
p.grp_crypto_type = NONE_CRYPT;
break;
}
- ath6kl_set_cipher(ar, info->crypto.cipher_group, false);
+ ath6kl_set_cipher(vif, info->crypto.cipher_group, false);
p.nw_type = AP_NETWORK;
- ar->nw_type = ar->next_mode;
+ vif->nw_type = vif->next_mode;
- p.ssid_len = ar->ssid_len;
- memcpy(p.ssid, ar->ssid, ar->ssid_len);
- p.dot11_auth_mode = ar->dot11_auth_mode;
- p.ch = cpu_to_le16(ar->next_chan);
+ p.ssid_len = vif->ssid_len;
+ memcpy(p.ssid, vif->ssid, vif->ssid_len);
+ p.dot11_auth_mode = vif->dot11_auth_mode;
+ p.ch = cpu_to_le16(vif->next_chan);
- res = ath6kl_wmi_ap_profile_commit(ar->wmi, &p);
+ if (vif->wdev.iftype == NL80211_IFTYPE_P2P_GO) {
+ p.nw_subtype = SUBTYPE_P2PGO;
+ } else {
+ /*
+ * Due to firmware limitation, it is not possible to
+ * do P2P mgmt operations in AP mode
+ */
+ p.nw_subtype = SUBTYPE_NONE;
+ }
+
+ res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
if (res < 0)
return res;
@@ -1635,14 +2246,15 @@ static int ath6kl_set_beacon(struct wiphy *wiphy, struct net_device *dev,
static int ath6kl_del_beacon(struct wiphy *wiphy, struct net_device *dev)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (ar->nw_type != AP_NETWORK)
+ if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
- if (!test_bit(CONNECTED, &ar->flag))
+ if (!test_bit(CONNECTED, &vif->flags))
return -ENOTCONN;
- ath6kl_wmi_disconnect_cmd(ar->wmi);
- clear_bit(CONNECTED, &ar->flag);
+ ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
+ clear_bit(CONNECTED, &vif->flags);
return 0;
}
@@ -1651,8 +2263,9 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
u8 *mac, struct station_parameters *params)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (ar->nw_type != AP_NETWORK)
+ if (vif->nw_type != AP_NETWORK)
return -EOPNOTSUPP;
/* Use this only for authorizing/unauthorizing a station */
@@ -1660,10 +2273,10 @@ static int ath6kl_change_station(struct wiphy *wiphy, struct net_device *dev,
return -EOPNOTSUPP;
if (params->sta_flags_set & BIT(NL80211_STA_FLAG_AUTHORIZED))
- return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_AUTHORIZE,
- mac, 0);
- return ath6kl_wmi_ap_set_mlme(ar->wmi, WMI_AP_MLME_UNAUTHORIZE, mac,
- 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_AUTHORIZE, mac, 0);
+ return ath6kl_wmi_ap_set_mlme(ar->wmi, vif->fw_vif_idx,
+ WMI_AP_MLME_UNAUTHORIZE, mac, 0);
}
static int ath6kl_remain_on_channel(struct wiphy *wiphy,
@@ -1674,13 +2287,20 @@ static int ath6kl_remain_on_channel(struct wiphy *wiphy,
u64 *cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u32 id;
/* TODO: if already pending or ongoing remain-on-channel,
* return -EBUSY */
- *cookie = 1; /* only a single pending request is supported */
+ id = ++vif->last_roc_id;
+ if (id == 0) {
+ /* Do not use 0 as the cookie value */
+ id = ++vif->last_roc_id;
+ }
+ *cookie = id;
- return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, chan->center_freq,
- duration);
+ return ath6kl_wmi_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx,
+ chan->center_freq, duration);
}
static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
@@ -1688,16 +2308,20 @@ static int ath6kl_cancel_remain_on_channel(struct wiphy *wiphy,
u64 cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- if (cookie != 1)
+ if (cookie != vif->last_roc_id)
return -ENOENT;
+ vif->last_cancel_roc_id = cookie;
- return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi);
+ return ath6kl_wmi_cancel_remain_on_chnl_cmd(ar->wmi, vif->fw_vif_idx);
}
-static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
- size_t len, unsigned int freq)
+static int ath6kl_send_go_probe_resp(struct ath6kl_vif *vif,
+ const u8 *buf, size_t len,
+ unsigned int freq)
{
+ struct ath6kl *ar = vif->ar;
const u8 *pos;
u8 *p2p;
int p2p_len;
@@ -1724,8 +2348,8 @@ static int ath6kl_send_go_probe_resp(struct ath6kl *ar, const u8 *buf,
pos += 2 + pos[1];
}
- ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, freq, mgmt->da,
- p2p, p2p_len);
+ ret = ath6kl_wmi_send_probe_response_cmd(ar->wmi, vif->fw_vif_idx, freq,
+ mgmt->da, p2p, p2p_len);
kfree(p2p);
return ret;
}
@@ -1734,44 +2358,61 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
bool channel_type_valid, unsigned int wait,
- const u8 *buf, size_t len, bool no_cck, u64 *cookie)
+ const u8 *buf, size_t len, bool no_cck,
+ bool dont_wait_for_ack, u64 *cookie)
{
struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
u32 id;
const struct ieee80211_mgmt *mgmt;
mgmt = (const struct ieee80211_mgmt *) buf;
if (buf + len >= mgmt->u.probe_resp.variable &&
- ar->nw_type == AP_NETWORK && test_bit(CONNECTED, &ar->flag) &&
+ vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control)) {
/*
* Send Probe Response frame in AP mode using a separate WMI
* command to allow the target to fill in the generic IEs.
*/
*cookie = 0; /* TX status not supported */
- return ath6kl_send_go_probe_resp(ar, buf, len,
+ return ath6kl_send_go_probe_resp(vif, buf, len,
chan->center_freq);
}
- id = ar->send_action_id++;
+ id = vif->send_action_id++;
if (id == 0) {
/*
* 0 is a reserved value in the WMI command and shall not be
* used for the command.
*/
- id = ar->send_action_id++;
+ id = vif->send_action_id++;
}
*cookie = id;
- return ath6kl_wmi_send_action_cmd(ar->wmi, id, chan->center_freq, wait,
- buf, len);
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
+ ar->fw_capabilities)) {
+ /*
+ * If capable of doing P2P mgmt operations using
+ * station interface, send additional information like
+ * supported rates to advertise and xmit rates for
+ * probe requests
+ */
+ return ath6kl_wmi_send_mgmt_cmd(ar->wmi, vif->fw_vif_idx, id,
+ chan->center_freq, wait,
+ buf, len, no_cck);
+ } else {
+ return ath6kl_wmi_send_action_cmd(ar->wmi, vif->fw_vif_idx, id,
+ chan->center_freq, wait,
+ buf, len);
+ }
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
struct net_device *dev,
u16 frame_type, bool reg)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: frame_type=0x%x reg=%d\n",
__func__, frame_type, reg);
@@ -1781,8 +2422,92 @@ static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
* we cannot send WMI_PROBE_REQ_REPORT_CMD here. Instead, we
* hardcode target to report Probe Request frames all the time.
*/
- ar->probe_req_report = reg;
+ vif->probe_req_report = reg;
+ }
+}
+
+static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
+ struct net_device *dev,
+ struct cfg80211_sched_scan_request *request)
+{
+ struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ u16 interval;
+ int ret;
+ u8 i;
+
+ if (ar->state != ATH6KL_STATE_ON)
+ return -EIO;
+
+ if (vif->sme_state != SME_DISCONNECTED)
+ return -EBUSY;
+
+ for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i, DISABLE_SSID_FLAG,
+ 0, NULL);
}
+
+ /* fw uses seconds, also make sure that it's >0 */
+ interval = max_t(u16, 1, request->interval / 1000);
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
+ interval, interval,
+ 10, 0, 0, 0, 3, 0, 0, 0);
+
+ if (request->n_ssids && request->ssids[0].ssid_len) {
+ for (i = 0; i < request->n_ssids; i++) {
+ ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
+ i, SPECIFIC_SSID_FLAG,
+ request->ssids[i].ssid_len,
+ request->ssids[i].ssid);
+ }
+ }
+
+ ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_WOW_MODE_ENABLE,
+ WOW_FILTER_SSID,
+ WOW_HOST_REQ_DELAY);
+ if (ret) {
+ ath6kl_warn("Failed to enable wow with ssid filter: %d\n", ret);
+ return ret;
+ }
+
+ /* this also clears IE in fw if it's not set */
+ ret = ath6kl_wmi_set_appie_cmd(ar->wmi, vif->fw_vif_idx,
+ WMI_FRAME_PROBE_REQ,
+ request->ie, request->ie_len);
+ if (ret) {
+ ath6kl_warn("Failed to set probe request IE for scheduled scan: %d",
+ ret);
+ return ret;
+ }
+
+ ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
+ ATH6KL_HOST_MODE_ASLEEP);
+ if (ret) {
+ ath6kl_warn("Failed to enable host sleep mode for sched scan: %d\n",
+ ret);
+ return ret;
+ }
+
+ ar->state = ATH6KL_STATE_SCHED_SCAN;
+
+ return ret;
+}
+
+static int ath6kl_cfg80211_sscan_stop(struct wiphy *wiphy,
+ struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+ bool stopped;
+
+ stopped = __ath6kl_cfg80211_sscan_stop(vif);
+
+ if (!stopped)
+ return -EIO;
+
+ return 0;
}
static const struct ieee80211_txrx_stypes
@@ -1808,6 +2533,8 @@ ath6kl_mgmt_stypes[NUM_NL80211_IFTYPES] = {
};
static struct cfg80211_ops ath6kl_cfg80211_ops = {
+ .add_virtual_intf = ath6kl_cfg80211_add_iface,
+ .del_virtual_intf = ath6kl_cfg80211_del_iface,
.change_virtual_intf = ath6kl_cfg80211_change_iface,
.scan = ath6kl_cfg80211_scan,
.connect = ath6kl_cfg80211_connect,
@@ -1828,7 +2555,8 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.flush_pmksa = ath6kl_flush_pmksa,
CFG80211_TESTMODE_CMD(ath6kl_tm_cmd)
#ifdef CONFIG_PM
- .suspend = ar6k_cfg80211_suspend,
+ .suspend = __ath6kl_cfg80211_suspend,
+ .resume = __ath6kl_cfg80211_resume,
#endif
.set_channel = ath6kl_set_channel,
.add_beacon = ath6kl_add_beacon,
@@ -1839,78 +2567,277 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
.mgmt_tx = ath6kl_mgmt_tx,
.mgmt_frame_register = ath6kl_mgmt_frame_register,
+ .sched_scan_start = ath6kl_cfg80211_sscan_start,
+ .sched_scan_stop = ath6kl_cfg80211_sscan_stop,
};
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev)
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
{
- int ret = 0;
- struct wireless_dev *wdev;
- struct ath6kl *ar;
+ ath6kl_cfg80211_sscan_disable(vif);
- wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
- if (!wdev) {
- ath6kl_err("couldn't allocate wireless device\n");
- return NULL;
+ switch (vif->sme_state) {
+ case SME_DISCONNECTED:
+ break;
+ case SME_CONNECTING:
+ cfg80211_connect_result(vif->ndev, vif->bssid, NULL, 0,
+ NULL, 0,
+ WLAN_STATUS_UNSPECIFIED_FAILURE,
+ GFP_KERNEL);
+ break;
+ case SME_CONNECTED:
+ cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+ break;
+ }
+
+ if (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags))
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
+
+ vif->sme_state = SME_DISCONNECTED;
+ clear_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+
+ /* disable scanning */
+ if (ath6kl_wmi_scanparams_cmd(vif->ar->wmi, vif->fw_vif_idx, 0xFFFF,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0) != 0)
+ ath6kl_warn("failed to disable scan during stop\n");
+
+ ath6kl_cfg80211_scan_complete_event(vif, true);
+}
+
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar)
+{
+ struct ath6kl_vif *vif;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif) {
+ /* save the current power mode before enabling power save */
+ ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
+
+ if (ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER) != 0)
+ ath6kl_warn("ath6kl_deep_sleep_enable: "
+ "wmi_powermode_cmd failed\n");
+ return;
}
+ /*
+ * FIXME: we should take ar->list_lock to protect changes in the
+ * vif_list, but that's not trivial to do as ath6kl_cfg80211_stop()
+ * sleeps.
+ */
+ list_for_each_entry(vif, &ar->vif_list, list)
+ ath6kl_cfg80211_stop(vif);
+}
+
+struct ath6kl *ath6kl_core_alloc(struct device *dev)
+{
+ struct ath6kl *ar;
+ struct wiphy *wiphy;
+ u8 ctr;
+
/* create a new wiphy for use with cfg80211 */
- wdev->wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
- if (!wdev->wiphy) {
+ wiphy = wiphy_new(&ath6kl_cfg80211_ops, sizeof(struct ath6kl));
+
+ if (!wiphy) {
ath6kl_err("couldn't allocate wiphy device\n");
- kfree(wdev);
return NULL;
}
- ar = wiphy_priv(wdev->wiphy);
+ ar = wiphy_priv(wiphy);
ar->p2p = !!ath6kl_p2p;
+ ar->wiphy = wiphy;
+ ar->dev = dev;
- wdev->wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+ ar->vif_max = 1;
- wdev->wiphy->max_remain_on_channel_duration = 5000;
+ ar->max_norm_iface = 1;
+
+ spin_lock_init(&ar->lock);
+ spin_lock_init(&ar->mcastpsq_lock);
+ spin_lock_init(&ar->list_lock);
+
+ init_waitqueue_head(&ar->event_wq);
+ sema_init(&ar->sem, 1);
+
+ INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ INIT_LIST_HEAD(&ar->vif_list);
+
+ clear_bit(WMI_ENABLED, &ar->flag);
+ clear_bit(SKIP_SCAN, &ar->flag);
+ clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
+ ar->listen_intvl_b = 0;
+ ar->tx_pwr = 0;
+
+ ar->intra_bss = 1;
+ ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ memset((u8 *)ar->sta_list, 0,
+ AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
+
+ /* Init the PS queues */
+ for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
+ spin_lock_init(&ar->sta_list[ctr].psq_lock);
+ skb_queue_head_init(&ar->sta_list[ctr].psq);
+ }
+
+ skb_queue_head_init(&ar->mcastpsq);
+
+ memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+
+ return ar;
+}
+
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar)
+{
+ struct wiphy *wiphy = ar->wiphy;
+ int ret;
+
+ wiphy->mgmt_stypes = ath6kl_mgmt_stypes;
+
+ wiphy->max_remain_on_channel_duration = 5000;
/* set device pointer for wiphy */
- set_wiphy_dev(wdev->wiphy, dev);
+ set_wiphy_dev(wiphy, ar->dev);
- wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP);
+ wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_AP);
if (ar->p2p) {
- wdev->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT);
+ wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT);
}
- /* max num of ssids that can be probed during scanning */
- wdev->wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
- wdev->wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
- wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
- wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
- wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
-
- wdev->wiphy->cipher_suites = cipher_suites;
- wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
- ret = wiphy_register(wdev->wiphy);
+ /* max num of ssids that can be probed during scanning */
+ wiphy->max_scan_ssids = MAX_PROBED_SSID_INDEX;
+ wiphy->max_scan_ie_len = 1000; /* FIX: what is correct limit? */
+ wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
+ wiphy->bands[IEEE80211_BAND_5GHZ] = &ath6kl_band_5ghz;
+ wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
+
+ wiphy->cipher_suites = cipher_suites;
+ wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
+
+ wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE |
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_4WAY_HANDSHAKE;
+ wiphy->wowlan.n_patterns = WOW_MAX_FILTERS_PER_LIST;
+ wiphy->wowlan.pattern_min_len = 1;
+ wiphy->wowlan.pattern_max_len = WOW_PATTERN_SIZE;
+
+ wiphy->max_sched_scan_ssids = 10;
+
+ ret = wiphy_register(wiphy);
if (ret < 0) {
ath6kl_err("couldn't register wiphy device\n");
- wiphy_free(wdev->wiphy);
- kfree(wdev);
- return NULL;
+ return ret;
}
- return wdev;
+ return 0;
}
-void ath6kl_cfg80211_deinit(struct ath6kl *ar)
+static int ath6kl_init_if_data(struct ath6kl_vif *vif)
{
- struct wireless_dev *wdev = ar->wdev;
-
- if (ar->scan_req) {
- cfg80211_scan_done(ar->scan_req, true);
- ar->scan_req = NULL;
+ vif->aggr_cntxt = aggr_init(vif->ndev);
+ if (!vif->aggr_cntxt) {
+ ath6kl_err("failed to initialize aggr\n");
+ return -ENOMEM;
}
- if (!wdev)
- return;
+ setup_timer(&vif->disconnect_timer, disconnect_timer_handler,
+ (unsigned long) vif->ndev);
+ setup_timer(&vif->sched_scan_timer, ath6kl_wmi_sscan_timer,
+ (unsigned long) vif);
+
+ set_bit(WMM_ENABLED, &vif->flags);
+ spin_lock_init(&vif->if_lock);
- wiphy_unregister(wdev->wiphy);
- wiphy_free(wdev->wiphy);
- kfree(wdev);
+ return 0;
+}
+
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif)
+{
+ struct ath6kl *ar = vif->ar;
+
+ aggr_module_destroy(vif->aggr_cntxt);
+
+ ar->avail_idx_map |= BIT(vif->fw_vif_idx);
+
+ if (vif->nw_type == ADHOC_NETWORK)
+ ar->ibss_if_active = false;
+
+ unregister_netdevice(vif->ndev);
+
+ ar->num_vif--;
+}
+
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type, u8 fw_vif_idx,
+ u8 nw_type)
+{
+ struct net_device *ndev;
+ struct ath6kl_vif *vif;
+
+ ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
+ if (!ndev)
+ return NULL;
+
+ vif = netdev_priv(ndev);
+ ndev->ieee80211_ptr = &vif->wdev;
+ vif->wdev.wiphy = ar->wiphy;
+ vif->ar = ar;
+ vif->ndev = ndev;
+ SET_NETDEV_DEV(ndev, wiphy_dev(vif->wdev.wiphy));
+ vif->wdev.netdev = ndev;
+ vif->wdev.iftype = type;
+ vif->fw_vif_idx = fw_vif_idx;
+ vif->nw_type = vif->next_mode = nw_type;
+
+ memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
+ if (fw_vif_idx != 0)
+ ndev->dev_addr[0] = (ndev->dev_addr[0] ^ (1 << fw_vif_idx)) |
+ 0x2;
+
+ init_netdev(ndev);
+
+ ath6kl_init_control_info(vif);
+
+ /* TODO: Pass interface specific pointer instead of ar */
+ if (ath6kl_init_if_data(vif))
+ goto err;
+
+ if (register_netdevice(ndev))
+ goto err;
+
+ ar->avail_idx_map &= ~BIT(fw_vif_idx);
+ vif->sme_state = SME_DISCONNECTED;
+ set_bit(WLAN_ENABLED, &vif->flags);
+ ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+ set_bit(NETDEV_REGISTERED, &vif->flags);
+
+ if (type == NL80211_IFTYPE_ADHOC)
+ ar->ibss_if_active = true;
+
+ spin_lock_bh(&ar->list_lock);
+ list_add_tail(&vif->list, &ar->vif_list);
+ spin_unlock_bh(&ar->list_lock);
+
+ return ndev;
+
+err:
+ aggr_module_destroy(vif->aggr_cntxt);
+ free_netdev(ndev);
+ return NULL;
+}
+
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar)
+{
+ wiphy_unregister(ar->wiphy);
+ wiphy_free(ar->wiphy);
}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.h b/drivers/net/wireless/ath/ath6kl/cfg80211.h
index a84adc2..81f20a5 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.h
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.h
@@ -17,23 +17,43 @@
#ifndef ATH6KL_CFG80211_H
#define ATH6KL_CFG80211_H
-struct wireless_dev *ath6kl_cfg80211_init(struct device *dev);
-void ath6kl_cfg80211_deinit(struct ath6kl *ar);
+enum ath6kl_cfg_suspend_mode {
+ ATH6KL_CFG_SUSPEND_DEEPSLEEP,
+ ATH6KL_CFG_SUSPEND_CUTPOWER,
+ ATH6KL_CFG_SUSPEND_WOW,
+ ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+};
-void ath6kl_cfg80211_scan_complete_event(struct ath6kl *ar, int status);
+struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
+ enum nl80211_iftype type,
+ u8 fw_vif_idx, u8 nw_type);
+int ath6kl_register_ieee80211_hw(struct ath6kl *ar);
+struct ath6kl *ath6kl_core_alloc(struct device *dev);
+void ath6kl_deinit_ieee80211_hw(struct ath6kl *ar);
-void ath6kl_cfg80211_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted);
+
+void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_intvl,
u16 beacon_intvl,
enum network_type nw_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_cfg80211_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 proto_reason);
-void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl *ar, u8 keyid,
+void ath6kl_cfg80211_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid,
bool ismcast);
+int ath6kl_cfg80211_suspend(struct ath6kl *ar,
+ enum ath6kl_cfg_suspend_mode mode,
+ struct cfg80211_wowlan *wow);
+
+int ath6kl_cfg80211_resume(struct ath6kl *ar);
+
+void ath6kl_cfg80211_stop(struct ath6kl_vif *vif);
+void ath6kl_cfg80211_stop_all(struct ath6kl *ar);
+
#endif /* ATH6KL_CFG80211_H */
diff --git a/drivers/net/wireless/ath/ath6kl/common.h b/drivers/net/wireless/ath/ath6kl/common.h
index b92f0e5..bfd6597 100644
--- a/drivers/net/wireless/ath/ath6kl/common.h
+++ b/drivers/net/wireless/ath/ath6kl/common.h
@@ -23,8 +23,6 @@
extern int ath6kl_printk(const char *level, const char *fmt, ...);
-#define A_CACHE_LINE_PAD 128
-
/*
* Reflects the version of binary interface exposed by ATH6KL target
* firmware. Needs to be incremented by 1 for any change in the firmware
@@ -73,25 +71,16 @@ enum crypto_type {
WEP_CRYPT = 0x02,
TKIP_CRYPT = 0x04,
AES_CRYPT = 0x08,
+ WAPI_CRYPT = 0x10,
};
struct htc_endpoint_credit_dist;
struct ath6kl;
enum htc_credit_dist_reason;
-struct htc_credit_state_info;
+struct ath6kl_htc_credit_info;
-int ath6k_setup_credit_dist(void *htc_handle,
- struct htc_credit_state_info *cred_info);
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_inf,
- struct list_head *epdist_list,
- enum htc_credit_dist_reason reason);
-void ath6k_credit_init(struct htc_credit_state_info *cred_inf,
- struct list_head *ep_list,
- int tot_credits);
-void ath6k_seek_credits(struct htc_credit_state_info *cred_inf,
- struct htc_endpoint_credit_dist *ep_dist);
struct ath6kl *ath6kl_core_alloc(struct device *sdev);
int ath6kl_core_init(struct ath6kl *ar);
-int ath6kl_unavail_ev(struct ath6kl *ar);
+void ath6kl_core_cleanup(struct ath6kl *ar);
struct sk_buff *ath6kl_buf_alloc(int size);
#endif /* COMMON_H */
diff --git a/drivers/net/wireless/ath/ath6kl/core.h b/drivers/net/wireless/ath/ath6kl/core.h
index 6d8a484..c863a28 100644
--- a/drivers/net/wireless/ath/ath6kl/core.h
+++ b/drivers/net/wireless/ath/ath6kl/core.h
@@ -70,10 +70,20 @@ enum ath6kl_fw_ie_type {
ATH6KL_FW_IE_RESERVED_RAM_SIZE = 5,
ATH6KL_FW_IE_CAPABILITIES = 6,
ATH6KL_FW_IE_PATCH_ADDR = 7,
+ ATH6KL_FW_IE_BOARD_ADDR = 8,
+ ATH6KL_FW_IE_VIF_MAX = 9,
};
enum ath6kl_fw_capability {
ATH6KL_FW_CAPABILITY_HOST_P2P = 0,
+ ATH6KL_FW_CAPABILITY_SCHED_SCAN = 1,
+
+ /*
+ * Firmware is capable of supporting P2P mgmt operations on a
+ * station interface. After group formation, the station
+ * interface will become a P2P client/GO interface as the case may be
+ */
+ ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
@@ -88,37 +98,47 @@ struct ath6kl_fw_ie {
};
/* AR6003 1.0 definitions */
-#define AR6003_REV1_VERSION 0x300002ba
+#define AR6003_HW_1_0_VERSION 0x300002ba
/* AR6003 2.0 definitions */
-#define AR6003_REV2_VERSION 0x30000384
-#define AR6003_REV2_PATCH_DOWNLOAD_ADDRESS 0x57e910
-#define AR6003_REV2_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
-#define AR6003_REV2_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
-#define AR6003_REV2_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
-#define AR6003_REV2_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
-#define AR6003_REV2_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
-#define AR6003_REV2_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
-#define AR6003_REV2_DEFAULT_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.SD31.bin"
+#define AR6003_HW_2_0_VERSION 0x30000384
+#define AR6003_HW_2_0_PATCH_DOWNLOAD_ADDRESS 0x57e910
+#define AR6003_HW_2_0_OTP_FILE "ath6k/AR6003/hw2.0/otp.bin.z77"
+#define AR6003_HW_2_0_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athwlan.bin.z77"
+#define AR6003_HW_2_0_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.0/athtcmd_ram.bin"
+#define AR6003_HW_2_0_PATCH_FILE "ath6k/AR6003/hw2.0/data.patch.bin"
+#define AR6003_HW_2_0_FIRMWARE_2_FILE "ath6k/AR6003/hw2.0/fw-2.bin"
+#define AR6003_HW_2_0_BOARD_DATA_FILE "ath6k/AR6003/hw2.0/bdata.bin"
+#define AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.0/bdata.SD31.bin"
/* AR6003 3.0 definitions */
-#define AR6003_REV3_VERSION 0x30000582
-#define AR6003_REV3_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
-#define AR6003_REV3_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
-#define AR6003_REV3_TCMD_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
-#define AR6003_REV3_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
-#define AR6003_REV3_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
-#define AR6003_REV3_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
-#define AR6003_REV3_DEFAULT_BOARD_DATA_FILE \
- "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
+#define AR6003_HW_2_1_1_VERSION 0x30000582
+#define AR6003_HW_2_1_1_OTP_FILE "ath6k/AR6003/hw2.1.1/otp.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_FILE "ath6k/AR6003/hw2.1.1/athwlan.bin"
+#define AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE \
+ "ath6k/AR6003/hw2.1.1/athtcmd_ram.bin"
+#define AR6003_HW_2_1_1_PATCH_FILE "ath6k/AR6003/hw2.1.1/data.patch.bin"
+#define AR6003_HW_2_1_1_FIRMWARE_2_FILE "ath6k/AR6003/hw2.1.1/fw-2.bin"
+#define AR6003_HW_2_1_1_BOARD_DATA_FILE "ath6k/AR6003/hw2.1.1/bdata.bin"
+#define AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6003/hw2.1.1/bdata.SD31.bin"
/* AR6004 1.0 definitions */
-#define AR6004_REV1_VERSION 0x30000623
-#define AR6004_REV1_FIRMWARE_FILE "ath6k/AR6004/hw6.1/fw.ram.bin"
-#define AR6004_REV1_FIRMWARE_2_FILE "ath6k/AR6004/hw6.1/fw-2.bin"
-#define AR6004_REV1_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.bin"
-#define AR6004_REV1_DEFAULT_BOARD_DATA_FILE "ath6k/AR6004/hw6.1/bdata.DB132.bin"
-#define AR6004_REV1_EPPING_FIRMWARE_FILE "ath6k/AR6004/hw6.1/endpointping.bin"
+#define AR6004_HW_1_0_VERSION 0x30000623
+#define AR6004_HW_1_0_FIRMWARE_2_FILE "ath6k/AR6004/hw1.0/fw-2.bin"
+#define AR6004_HW_1_0_FIRMWARE_FILE "ath6k/AR6004/hw1.0/fw.ram.bin"
+#define AR6004_HW_1_0_BOARD_DATA_FILE "ath6k/AR6004/hw1.0/bdata.bin"
+#define AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6004/hw1.0/bdata.DB132.bin"
+
+/* AR6004 1.1 definitions */
+#define AR6004_HW_1_1_VERSION 0x30000001
+#define AR6004_HW_1_1_FIRMWARE_2_FILE "ath6k/AR6004/hw1.1/fw-2.bin"
+#define AR6004_HW_1_1_FIRMWARE_FILE "ath6k/AR6004/hw1.1/fw.ram.bin"
+#define AR6004_HW_1_1_BOARD_DATA_FILE "ath6k/AR6004/hw1.1/bdata.bin"
+#define AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE \
+ "ath6k/AR6004/hw1.1/bdata.DB132.bin"
/* Per STA data, used in AP mode */
#define STA_PS_AWAKE BIT(0)
@@ -166,6 +186,7 @@ struct ath6kl_fw_ie {
#define ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN BIT(1)
#define ATH6KL_CONF_ENABLE_11N BIT(2)
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
+#define ATH6KL_CONF_SUSPEND_CUTPOWER BIT(4)
enum wlan_low_pwr_state {
WLAN_POWER_STATE_ON,
@@ -271,6 +292,8 @@ struct ath6kl_bmi {
u32 cmd_credits;
bool done_sent;
u8 *cmd_buf;
+ u32 max_data_size;
+ u32 max_cmd_size;
};
struct target_stats {
@@ -380,40 +403,42 @@ struct ath6kl_req_key {
u8 key_len;
};
-/* Flag info */
-#define WMI_ENABLED 0
-#define WMI_READY 1
-#define CONNECTED 2
-#define STATS_UPDATE_PEND 3
-#define CONNECT_PEND 4
-#define WMM_ENABLED 5
-#define NETQ_STOPPED 6
-#define WMI_CTRL_EP_FULL 7
-#define DTIM_EXPIRED 8
-#define DESTROY_IN_PROGRESS 9
-#define NETDEV_REGISTERED 10
-#define SKIP_SCAN 11
-#define WLAN_ENABLED 12
-#define TESTMODE 13
-#define CLEAR_BSSFILTER_ON_BEACON 14
-#define DTIM_PERIOD_AVAIL 15
+enum ath6kl_hif_type {
+ ATH6KL_HIF_TYPE_SDIO,
+ ATH6KL_HIF_TYPE_USB,
+};
-struct ath6kl {
- struct device *dev;
- struct net_device *net_dev;
- struct ath6kl_bmi bmi;
- const struct ath6kl_hif_ops *hif_ops;
- struct wmi *wmi;
- int tx_pending[ENDPOINT_MAX];
- int total_tx_data_pend;
- struct htc_target *htc_target;
- void *hif_priv;
- spinlock_t lock;
- struct semaphore sem;
+/*
+ * Driver's maximum limit, note that some firmwares support only one vif
+ * and the runtime (current) limit must be checked from ar->vif_max.
+ */
+#define ATH6KL_VIF_MAX 3
+
+/* vif flags info */
+enum ath6kl_vif_state {
+ CONNECTED,
+ CONNECT_PEND,
+ WMM_ENABLED,
+ NETQ_STOPPED,
+ DTIM_EXPIRED,
+ NETDEV_REGISTERED,
+ CLEAR_BSSFILTER_ON_BEACON,
+ DTIM_PERIOD_AVAIL,
+ WLAN_ENABLED,
+ STATS_UPDATE_PEND,
+};
+
+struct ath6kl_vif {
+ struct list_head list;
+ struct wireless_dev wdev;
+ struct net_device *ndev;
+ struct ath6kl *ar;
+ /* Lock to protect vif specific net_stats and flags */
+ spinlock_t if_lock;
+ u8 fw_vif_idx;
+ unsigned long flags;
int ssid_len;
u8 ssid[IEEE80211_MAX_SSID_LEN];
- u8 next_mode;
- u8 nw_type;
u8 dot11_auth_mode;
u8 auth_mode;
u8 prwise_crypto;
@@ -421,21 +446,91 @@ struct ath6kl {
u8 grp_crypto;
u8 grp_crypto_len;
u8 def_txkey_index;
- struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ u8 next_mode;
+ u8 nw_type;
u8 bssid[ETH_ALEN];
u8 req_bssid[ETH_ALEN];
u16 ch_hint;
u16 bss_ch;
+ struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
+ struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
+ struct aggr_info *aggr_cntxt;
+
+ struct timer_list disconnect_timer;
+ struct timer_list sched_scan_timer;
+
+ struct cfg80211_scan_request *scan_req;
+ enum sme_state sme_state;
+ int reconnect_flag;
+ u32 last_roc_id;
+ u32 last_cancel_roc_id;
+ u32 send_action_id;
+ bool probe_req_report;
+ u16 next_chan;
+ u16 assoc_bss_beacon_int;
+ u8 assoc_bss_dtim_period;
+ struct net_device_stats net_stats;
+ struct target_stats target_stats;
+};
+
+#define WOW_LIST_ID 0
+#define WOW_HOST_REQ_DELAY 500 /* ms */
+
+#define ATH6KL_SCHED_SCAN_RESULT_DELAY 5000 /* ms */
+
+/* Flag info */
+enum ath6kl_dev_state {
+ WMI_ENABLED,
+ WMI_READY,
+ WMI_CTRL_EP_FULL,
+ TESTMODE,
+ DESTROY_IN_PROGRESS,
+ SKIP_SCAN,
+ ROAM_TBL_PEND,
+ FIRST_BOOT,
+};
+
+enum ath6kl_state {
+ ATH6KL_STATE_OFF,
+ ATH6KL_STATE_ON,
+ ATH6KL_STATE_DEEPSLEEP,
+ ATH6KL_STATE_CUTPOWER,
+ ATH6KL_STATE_WOW,
+ ATH6KL_STATE_SCHED_SCAN,
+};
+
+struct ath6kl {
+ struct device *dev;
+ struct wiphy *wiphy;
+
+ enum ath6kl_state state;
+
+ struct ath6kl_bmi bmi;
+ const struct ath6kl_hif_ops *hif_ops;
+ struct wmi *wmi;
+ int tx_pending[ENDPOINT_MAX];
+ int total_tx_data_pend;
+ struct htc_target *htc_target;
+ enum ath6kl_hif_type hif_type;
+ void *hif_priv;
+ struct list_head vif_list;
+ /* Lock to avoid race in vif_list entries among add/del/traverse */
+ spinlock_t list_lock;
+ u8 num_vif;
+ unsigned int vif_max;
+ u8 max_norm_iface;
+ u8 avail_idx_map;
+ spinlock_t lock;
+ struct semaphore sem;
u16 listen_intvl_b;
u16 listen_intvl_t;
u8 lrssi_roam_threshold;
struct ath6kl_version version;
u32 target_type;
u8 tx_pwr;
- struct net_device_stats net_stats;
- struct target_stats target_stats;
struct ath6kl_node_mapping node_map[MAX_NODE_NUM];
u8 ibss_ps_enable;
+ bool ibss_if_active;
u8 node_num;
u8 next_ep_id;
struct ath6kl_cookie *cookie_list;
@@ -446,7 +541,7 @@ struct ath6kl {
u8 hiac_stream_active_pri;
u8 ep2ac_map[ENDPOINT_MAX];
enum htc_endpoint_id ctrl_ep;
- struct htc_credit_state_info credit_state_info;
+ struct ath6kl_htc_credit_info credit_state_info;
u32 connect_ctrl_flags;
u32 user_key_ctrl;
u8 usr_bss_filter;
@@ -456,30 +551,37 @@ struct ath6kl {
struct sk_buff_head mcastpsq;
spinlock_t mcastpsq_lock;
u8 intra_bss;
- struct aggr_info *aggr_cntxt;
struct wmi_ap_mode_stat ap_stats;
u8 ap_country_code[3];
struct list_head amsdu_rx_buffer_queue;
- struct timer_list disconnect_timer;
u8 rx_meta_ver;
- struct wireless_dev *wdev;
- struct cfg80211_scan_request *scan_req;
- struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
- enum sme_state sme_state;
enum wlan_low_pwr_state wlan_pwr_state;
- struct wmi_scan_params_cmd sc_params;
+ u8 mac_addr[ETH_ALEN];
#define AR_MCAST_FILTER_MAC_ADDR_SIZE 4
struct {
void *rx_report;
size_t rx_report_len;
} tm;
- struct {
+ struct ath6kl_hw {
+ u32 id;
+ const char *name;
u32 dataset_patch_addr;
u32 app_load_addr;
u32 app_start_override_addr;
u32 board_ext_data_addr;
u32 reserved_ram_size;
+ u32 board_addr;
+ u32 refclk_hz;
+ u32 uarttx_pin;
+
+ const char *fw_otp;
+ const char *fw;
+ const char *fw_tcmd;
+ const char *fw_patch;
+ const char *fw_api2;
+ const char *fw_board;
+ const char *fw_default_board;
} hw;
u16 conf_flags;
@@ -487,7 +589,6 @@ struct ath6kl {
struct ath6kl_mbox_info mbox_info;
struct ath6kl_cookie cookie_mem[MAX_COOKIE_NUM];
- int reconnect_flag;
unsigned long flag;
u8 *fw_board;
@@ -508,13 +609,7 @@ struct ath6kl {
struct dentry *debugfs_phy;
- u32 send_action_id;
- bool probe_req_report;
- u16 next_chan;
-
bool p2p;
- u16 assoc_bss_beacon_int;
- u8 assoc_bss_dtim_period;
#ifdef CONFIG_ATH6KL_DEBUG
struct {
@@ -529,23 +624,19 @@ struct ath6kl {
struct {
unsigned int invalid_rate;
} war_stats;
+
+ u8 *roam_tbl;
+ unsigned int roam_tbl_len;
+
+ u8 keepalive;
+ u8 disc_timeout;
} debug;
#endif /* CONFIG_ATH6KL_DEBUG */
};
-static inline void *ath6kl_priv(struct net_device *dev)
+static inline struct ath6kl *ath6kl_priv(struct net_device *dev)
{
- return wdev_priv(dev->ieee80211_ptr);
-}
-
-static inline void ath6kl_deposit_credit_to_ep(struct htc_credit_state_info
- *cred_info,
- struct htc_endpoint_credit_dist
- *ep_dist, int credits)
-{
- ep_dist->credits += credits;
- ep_dist->cred_assngd += credits;
- cred_info->cur_free_credits -= credits;
+ return ((struct ath6kl_vif *) netdev_priv(dev))->ar;
}
static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
@@ -561,7 +652,6 @@ static inline u32 ath6kl_get_hi_item_addr(struct ath6kl *ar,
return addr;
}
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister);
int ath6kl_configure_target(struct ath6kl *ar);
void ath6kl_detect_error(unsigned long ptr);
void disconnect_timer_handler(unsigned long ptr);
@@ -579,10 +669,8 @@ int ath6kl_diag_write(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value);
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length);
int ath6kl_read_fwlogs(struct ath6kl *ar);
-void ath6kl_init_profile_info(struct ath6kl *ar);
+void ath6kl_init_profile_info(struct ath6kl_vif *vif);
void ath6kl_tx_data_cleanup(struct ath6kl *ar);
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
- bool get_dbglogs);
struct ath6kl_cookie *ath6kl_alloc_cookie(struct ath6kl *ar);
void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie);
@@ -598,40 +686,49 @@ struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
void aggr_module_destroy(struct aggr_info *aggr_info);
void aggr_reset_state(struct aggr_info *aggr_info);
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 * node_addr);
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 * node_addr);
struct ath6kl_sta *ath6kl_find_sta_by_aid(struct ath6kl *ar, u8 aid);
void ath6kl_ready_event(void *devt, u8 * datap, u32 sw_ver, u32 abi_ver);
int ath6kl_control_tx(void *devt, struct sk_buff *skb,
enum htc_endpoint_id eid);
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel,
u8 *bssid, u16 listen_int,
u16 beacon_int, enum network_type net_type,
u8 beacon_ie_len, u8 assoc_req_len,
u8 assoc_resp_len, u8 *assoc_info);
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel);
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel);
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info);
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason,
u8 *bssid, u8 assoc_resp_len,
u8 *assoc_info, u16 prot_reason_status);
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast);
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast);
void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr);
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status);
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len);
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status);
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len);
void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active);
enum htc_endpoint_id ath6kl_ac2_endpoint_id(void *devt, u8 ac);
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid);
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid);
-void ath6kl_dtimexpiry_event(struct ath6kl *ar);
-void ath6kl_disconnect(struct ath6kl *ar);
-void ath6kl_deep_sleep_enable(struct ath6kl *ar);
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid);
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no,
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif);
+void ath6kl_disconnect(struct ath6kl_vif *vif);
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid);
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
u8 win_sz);
void ath6kl_wakeup_event(void *dev);
-void ath6kl_target_failure(struct ath6kl *ar);
+
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset);
+void ath6kl_init_control_info(struct ath6kl_vif *vif);
+void ath6kl_deinit_if_data(struct ath6kl_vif *vif);
+void ath6kl_core_free(struct ath6kl *ar);
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar);
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready);
+int ath6kl_init_hw_start(struct ath6kl *ar);
+int ath6kl_init_hw_stop(struct ath6kl *ar);
+void ath6kl_check_wow_status(struct ath6kl *ar);
#endif /* CORE_H */
diff --git a/drivers/net/wireless/ath/ath6kl/debug.c b/drivers/net/wireless/ath/ath6kl/debug.c
index 7879b53..eb808b4 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.c
+++ b/drivers/net/wireless/ath/ath6kl/debug.c
@@ -143,49 +143,48 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
static void dump_cred_dist(struct htc_endpoint_credit_dist *ep_dist)
{
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
"--- endpoint: %d svc_id: 0x%X ---\n",
ep_dist->endpoint, ep_dist->svc_id);
- ath6kl_dbg(ATH6KL_DBG_ANY, " dist_flags : 0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " dist_flags : 0x%X\n",
ep_dist->dist_flags);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_norm : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_norm : %d\n",
ep_dist->cred_norm);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_min : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_min : %d\n",
ep_dist->cred_min);
- ath6kl_dbg(ATH6KL_DBG_ANY, " credits : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " credits : %d\n",
ep_dist->credits);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_assngd : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_assngd : %d\n",
ep_dist->cred_assngd);
- ath6kl_dbg(ATH6KL_DBG_ANY, " seek_cred : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " seek_cred : %d\n",
ep_dist->seek_cred);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_sz : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_sz : %d\n",
ep_dist->cred_sz);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_per_msg : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_per_msg : %d\n",
ep_dist->cred_per_msg);
- ath6kl_dbg(ATH6KL_DBG_ANY, " cred_to_dist : %d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " cred_to_dist : %d\n",
ep_dist->cred_to_dist);
- ath6kl_dbg(ATH6KL_DBG_ANY, " txq_depth : %d\n",
- get_queue_depth(&((struct htc_endpoint *)
- ep_dist->htc_rsvd)->txq));
- ath6kl_dbg(ATH6KL_DBG_ANY,
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, " txq_depth : %d\n",
+ get_queue_depth(&ep_dist->htc_ep->txq));
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
"----------------------------------\n");
}
+/* FIXME: move to htc.c */
void dump_cred_dist_stats(struct htc_target *target)
{
struct htc_endpoint_credit_dist *ep_list;
- if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_TRC))
+ if (!AR_DBG_LVL_CHECK(ATH6KL_DBG_CREDIT))
return;
list_for_each_entry(ep_list, &target->cred_dist_list, list)
dump_cred_dist(ep_list);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:%p dist:%p\n",
- target->cred_dist_cntxt, NULL);
- ath6kl_dbg(ATH6KL_DBG_TRC, "credit distribution, total : %d, free : %d\n",
- target->cred_dist_cntxt->total_avail_credits,
- target->cred_dist_cntxt->cur_free_credits);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit distribution total %d free %d\n",
+ target->credit_info->total_avail_credits,
+ target->credit_info->cur_free_credits);
}
static int ath6kl_debugfs_open(struct inode *inode, struct file *file)
@@ -397,13 +396,20 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath6kl *ar = file->private_data;
- struct target_stats *tgt_stats = &ar->target_stats;
+ struct ath6kl_vif *vif;
+ struct target_stats *tgt_stats;
char *buf;
unsigned int len = 0, buf_len = 1500;
int i;
long left;
ssize_t ret_cnt;
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ tgt_stats = &vif->target_stats;
+
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
@@ -413,9 +419,9 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
return -EBUSY;
}
- set_bit(STATS_UPDATE_PEND, &ar->flag);
+ set_bit(STATS_UPDATE_PEND, &vif->flags);
- if (ath6kl_wmi_get_stats_cmd(ar->wmi)) {
+ if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
up(&ar->sem);
kfree(buf);
return -EIO;
@@ -423,7 +429,7 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
- &ar->flag), WMI_TIMEOUT);
+ &vif->flags), WMI_TIMEOUT);
up(&ar->sem);
@@ -555,10 +561,10 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Total Avail Credits: ",
- target->cred_dist_cntxt->total_avail_credits);
+ target->credit_info->total_avail_credits);
len += scnprintf(buf + len, buf_len - len, "%25s%5d\n",
"Free credits :",
- target->cred_dist_cntxt->cur_free_credits);
+ target->credit_info->cur_free_credits);
len += scnprintf(buf + len, buf_len - len,
" Epid Flags Cred_norm Cred_min Credits Cred_assngd"
@@ -577,8 +583,7 @@ static ssize_t read_file_credit_dist_stats(struct file *file,
print_credit_info("%9d", cred_per_msg);
print_credit_info("%14d", cred_to_dist);
len += scnprintf(buf + len, buf_len - len, "%12d\n",
- get_queue_depth(&((struct htc_endpoint *)
- ep_list->htc_rsvd)->txq));
+ get_queue_depth(&ep_list->htc_ep->txq));
}
if (len > buf_len)
@@ -596,6 +601,107 @@ static const struct file_operations fops_credit_dist_stats = {
.llseek = default_llseek,
};
+static unsigned int print_endpoint_stat(struct htc_target *target, char *buf,
+ unsigned int buf_len, unsigned int len,
+ int offset, const char *name)
+{
+ int i;
+ struct htc_endpoint_stats *ep_st;
+ u32 *counter;
+
+ len += scnprintf(buf + len, buf_len - len, "%s:", name);
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ counter = ((u32 *) ep_st) + (offset / 4);
+ len += scnprintf(buf + len, buf_len - len, " %u", *counter);
+ }
+ len += scnprintf(buf + len, buf_len - len, "\n");
+
+ return len;
+}
+
+static ssize_t ath6kl_endpoint_stats_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ char *buf;
+ unsigned int buf_len, len = 0;
+ ssize_t ret_cnt;
+
+ buf_len = sizeof(struct htc_endpoint_stats) / sizeof(u32) *
+ (25 + ENDPOINT_MAX * 11);
+ buf = kmalloc(buf_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+#define EPSTAT(name) \
+ len = print_endpoint_stat(target, buf, buf_len, len, \
+ offsetof(struct htc_endpoint_stats, name), \
+ #name)
+ EPSTAT(cred_low_indicate);
+ EPSTAT(tx_issued);
+ EPSTAT(tx_pkt_bundled);
+ EPSTAT(tx_bundles);
+ EPSTAT(tx_dropped);
+ EPSTAT(tx_cred_rpt);
+ EPSTAT(cred_rpt_from_rx);
+ EPSTAT(cred_rpt_from_other);
+ EPSTAT(cred_rpt_ep0);
+ EPSTAT(cred_from_rx);
+ EPSTAT(cred_from_other);
+ EPSTAT(cred_from_ep0);
+ EPSTAT(cred_cosumd);
+ EPSTAT(cred_retnd);
+ EPSTAT(rx_pkts);
+ EPSTAT(rx_lkahds);
+ EPSTAT(rx_bundl);
+ EPSTAT(rx_bundle_lkahd);
+ EPSTAT(rx_bundle_from_hdr);
+ EPSTAT(rx_alloc_thresh_hit);
+ EPSTAT(rxalloc_thresh_byte);
+#undef EPSTAT
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret_cnt;
+}
+
+static ssize_t ath6kl_endpoint_stats_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ struct htc_target *target = ar->htc_target;
+ int ret, i;
+ u32 val;
+ struct htc_endpoint_stats *ep_st;
+
+ ret = kstrtou32_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+ if (val == 0) {
+ for (i = 0; i < ENDPOINT_MAX; i++) {
+ ep_st = &target->endpoint[i].ep_st;
+ memset(ep_st, 0, sizeof(*ep_st));
+ }
+ }
+
+ return count;
+}
+
+static const struct file_operations fops_endpoint_stats = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_endpoint_stats_read,
+ .write = ath6kl_endpoint_stats_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
static unsigned long ath6kl_get_num_reg(void)
{
int i;
@@ -868,6 +974,660 @@ static const struct file_operations fops_diag_reg_write = {
.llseek = default_llseek,
};
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len)
+{
+ const struct wmi_target_roam_tbl *tbl;
+ u16 num_entries;
+
+ if (len < sizeof(*tbl))
+ return -EINVAL;
+
+ tbl = (const struct wmi_target_roam_tbl *) buf;
+ num_entries = le16_to_cpu(tbl->num_entries);
+ if (sizeof(*tbl) + num_entries * sizeof(struct wmi_bss_roam_info) >
+ len)
+ return -EINVAL;
+
+ if (ar->debug.roam_tbl == NULL ||
+ ar->debug.roam_tbl_len < (unsigned int) len) {
+ kfree(ar->debug.roam_tbl);
+ ar->debug.roam_tbl = kmalloc(len, GFP_ATOMIC);
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+ }
+
+ memcpy(ar->debug.roam_tbl, buf, len);
+ ar->debug.roam_tbl_len = len;
+
+ if (test_bit(ROAM_TBL_PEND, &ar->flag)) {
+ clear_bit(ROAM_TBL_PEND, &ar->flag);
+ wake_up(&ar->event_wq);
+ }
+
+ return 0;
+}
+
+static ssize_t ath6kl_roam_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ long left;
+ struct wmi_target_roam_tbl *tbl;
+ u16 num_entries, i;
+ char *buf;
+ unsigned int len, buf_len;
+ ssize_t ret_cnt;
+
+ if (down_interruptible(&ar->sem))
+ return -EBUSY;
+
+ set_bit(ROAM_TBL_PEND, &ar->flag);
+
+ ret = ath6kl_wmi_get_roam_tbl_cmd(ar->wmi);
+ if (ret) {
+ up(&ar->sem);
+ return ret;
+ }
+
+ left = wait_event_interruptible_timeout(
+ ar->event_wq, !test_bit(ROAM_TBL_PEND, &ar->flag), WMI_TIMEOUT);
+ up(&ar->sem);
+
+ if (left <= 0)
+ return -ETIMEDOUT;
+
+ if (ar->debug.roam_tbl == NULL)
+ return -ENOMEM;
+
+ tbl = (struct wmi_target_roam_tbl *) ar->debug.roam_tbl;
+ num_entries = le16_to_cpu(tbl->num_entries);
+
+ buf_len = 100 + num_entries * 100;
+ buf = kzalloc(buf_len, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ len = 0;
+ len += scnprintf(buf + len, buf_len - len,
+ "roam_mode=%u\n\n"
+ "# roam_util bssid rssi rssidt last_rssi util bias\n",
+ le16_to_cpu(tbl->roam_mode));
+
+ for (i = 0; i < num_entries; i++) {
+ struct wmi_bss_roam_info *info = &tbl->info[i];
+ len += scnprintf(buf + len, buf_len - len,
+ "%d %pM %d %d %d %d %d\n",
+ a_sle32_to_cpu(info->roam_util), info->bssid,
+ info->rssi, info->rssidt, info->last_rssi,
+ info->util, info->bias);
+ }
+
+ if (len > buf_len)
+ len = buf_len;
+
+ ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+ kfree(buf);
+ return ret_cnt;
+}
+
+static const struct file_operations fops_roam_table = {
+ .read = ath6kl_roam_table_read,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_force_roam_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ u8 bssid[ETH_ALEN];
+ int i;
+ int addr[ETH_ALEN];
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+
+ if (sscanf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ &addr[0], &addr[1], &addr[2], &addr[3], &addr[4], &addr[5])
+ != ETH_ALEN)
+ return -EINVAL;
+ for (i = 0; i < ETH_ALEN; i++)
+ bssid[i] = addr[i];
+
+ ret = ath6kl_wmi_force_roam_cmd(ar->wmi, bssid);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_force_roam = {
+ .write = ath6kl_force_roam_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_roam_mode_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ char buf[20];
+ size_t len;
+ enum wmi_roam_mode mode;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ if (len > 0 && buf[len - 1] == '\n')
+ buf[len - 1] = '\0';
+
+ if (strcasecmp(buf, "default") == 0)
+ mode = WMI_DEFAULT_ROAM_MODE;
+ else if (strcasecmp(buf, "bssbias") == 0)
+ mode = WMI_HOST_BIAS_ROAM_MODE;
+ else if (strcasecmp(buf, "lock") == 0)
+ mode = WMI_LOCK_BSS_MODE;
+ else
+ return -EINVAL;
+
+ ret = ath6kl_wmi_set_roam_mode_cmd(ar->wmi, mode);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_roam_mode = {
+ .write = ath6kl_roam_mode_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+ ar->debug.keepalive = keepalive;
+}
+
+static ssize_t ath6kl_keepalive_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.keepalive);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_keepalive_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_set_keepalive_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_keepalive = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_keepalive_read,
+ .write = ath6kl_keepalive_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout)
+{
+ ar->debug.disc_timeout = timeout;
+}
+
+static ssize_t ath6kl_disconnect_timeout_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[16];
+ int len;
+
+ len = snprintf(buf, sizeof(buf), "%u\n", ar->debug.disc_timeout);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t ath6kl_disconnect_timeout_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ int ret;
+ u8 val;
+
+ ret = kstrtou8_from_user(user_buf, count, 0, &val);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_wmi_disctimeout_cmd(ar->wmi, 0, val);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static const struct file_operations fops_disconnect_timeout = {
+ .open = ath6kl_debugfs_open,
+ .read = ath6kl_disconnect_timeout_read,
+ .write = ath6kl_disconnect_timeout_write,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_create_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[200];
+ ssize_t len;
+ char *sptr, *token;
+ struct wmi_create_pstream_cmd pstream;
+ u32 val32;
+ u16 val16;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.user_pri))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_direc))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.traffic_type))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.voice_psc_cap))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_service_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.inactivity_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.suspension_int = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.service_start_time = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &pstream.tsid))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.nominal_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &val16))
+ return -EINVAL;
+ pstream.max_msdu = cpu_to_le16(val16);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.mean_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.peak_data_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.max_burst_size = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.delay_bound = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.min_phy_rate = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.sba = cpu_to_le32(val32);
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou32(token, 0, &val32))
+ return -EINVAL;
+ pstream.medium_time = cpu_to_le32(val32);
+
+ ath6kl_wmi_create_pstream_cmd(ar->wmi, vif->fw_vif_idx, &pstream);
+
+ return count;
+}
+
+static const struct file_operations fops_create_qos = {
+ .write = ath6kl_create_qos_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_delete_qos_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct ath6kl *ar = file->private_data;
+ struct ath6kl_vif *vif;
+ char buf[100];
+ ssize_t len;
+ char *sptr, *token;
+ u8 traffic_class;
+ u8 tsid;
+
+ vif = ath6kl_vif_first(ar);
+ if (!vif)
+ return -EIO;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &traffic_class))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou8(token, 0, &tsid))
+ return -EINVAL;
+
+ ath6kl_wmi_delete_pstream_cmd(ar->wmi, vif->fw_vif_idx,
+ traffic_class, tsid);
+
+ return count;
+}
+
+static const struct file_operations fops_delete_qos = {
+ .write = ath6kl_delete_qos_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_bgscan_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u16 bgscan_int;
+ char buf[32];
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ if (kstrtou16(buf, 0, &bgscan_int))
+ return -EINVAL;
+
+ if (bgscan_int == 0)
+ bgscan_int = 0xffff;
+
+ ath6kl_wmi_scanparams_cmd(ar->wmi, 0, 0, 0, bgscan_int, 0, 0, 0, 3,
+ 0, 0, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_bgscan_int = {
+ .write = ath6kl_bgscan_int_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_listen_int_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u16 listen_int_t, listen_int_b;
+ char buf[32];
+ char *sptr, *token;
+ ssize_t len;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+
+ if (kstrtou16(token, 0, &listen_int_t))
+ return -EINVAL;
+
+ if (kstrtou16(sptr, 0, &listen_int_b))
+ return -EINVAL;
+
+ if ((listen_int_t < 15) || (listen_int_t > 5000))
+ return -EINVAL;
+
+ if ((listen_int_b < 1) || (listen_int_b > 50))
+ return -EINVAL;
+
+ ar->listen_intvl_t = listen_int_t;
+ ar->listen_intvl_b = listen_int_b;
+
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, 0, ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ return count;
+}
+
+static ssize_t ath6kl_listen_int_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ char buf[32];
+ int len;
+
+ len = scnprintf(buf, sizeof(buf), "%u %u\n", ar->listen_intvl_t,
+ ar->listen_intvl_b);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_listen_int = {
+ .read = ath6kl_listen_int_read,
+ .write = ath6kl_listen_int_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+static ssize_t ath6kl_power_params_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath6kl *ar = file->private_data;
+ u8 buf[100];
+ unsigned int len = 0;
+ char *sptr, *token;
+ u16 idle_period, ps_poll_num, dtim,
+ tx_wakeup, num_tx;
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len))
+ return -EFAULT;
+ buf[len] = '\0';
+ sptr = buf;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &idle_period))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &ps_poll_num))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &dtim))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &tx_wakeup))
+ return -EINVAL;
+
+ token = strsep(&sptr, " ");
+ if (!token)
+ return -EINVAL;
+ if (kstrtou16(token, 0, &num_tx))
+ return -EINVAL;
+
+ ath6kl_wmi_pmparams_cmd(ar->wmi, 0, idle_period, ps_poll_num,
+ dtim, tx_wakeup, num_tx, 0);
+
+ return count;
+}
+
+static const struct file_operations fops_power_params = {
+ .write = ath6kl_power_params_write,
+ .open = ath6kl_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
int ath6kl_debug_init(struct ath6kl *ar)
{
ar->debug.fwlog_buf.buf = vmalloc(ATH6KL_FWLOG_SIZE);
@@ -889,7 +1649,7 @@ int ath6kl_debug_init(struct ath6kl *ar)
ar->debug.fwlog_mask = 0;
ar->debugfs_phy = debugfs_create_dir("ath6kl",
- ar->wdev->wiphy->debugfsdir);
+ ar->wiphy->debugfsdir);
if (!ar->debugfs_phy) {
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
@@ -902,6 +1662,9 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("credit_dist_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_credit_dist_stats);
+ debugfs_create_file("endpoint_stats", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_endpoint_stats);
+
debugfs_create_file("fwlog", S_IRUSR, ar->debugfs_phy, ar,
&fops_fwlog);
@@ -923,6 +1686,33 @@ int ath6kl_debug_init(struct ath6kl *ar)
debugfs_create_file("war_stats", S_IRUSR, ar->debugfs_phy, ar,
&fops_war_stats);
+ debugfs_create_file("roam_table", S_IRUSR, ar->debugfs_phy, ar,
+ &fops_roam_table);
+
+ debugfs_create_file("force_roam", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_force_roam);
+
+ debugfs_create_file("roam_mode", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_roam_mode);
+
+ debugfs_create_file("keepalive", S_IRUSR | S_IWUSR, ar->debugfs_phy, ar,
+ &fops_keepalive);
+
+ debugfs_create_file("disconnect_timeout", S_IRUSR | S_IWUSR,
+ ar->debugfs_phy, ar, &fops_disconnect_timeout);
+
+ debugfs_create_file("create_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_create_qos);
+
+ debugfs_create_file("delete_qos", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_delete_qos);
+
+ debugfs_create_file("bgscan_interval", S_IWUSR,
+ ar->debugfs_phy, ar, &fops_bgscan_int);
+
+ debugfs_create_file("power_params", S_IWUSR, ar->debugfs_phy, ar,
+ &fops_power_params);
+
return 0;
}
@@ -930,6 +1720,7 @@ void ath6kl_debug_cleanup(struct ath6kl *ar)
{
vfree(ar->debug.fwlog_buf.buf);
kfree(ar->debug.fwlog_tmp);
+ kfree(ar->debug.roam_tbl);
}
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/debug.h b/drivers/net/wireless/ath/ath6kl/debug.h
index 7b7675f..9853c9c 100644
--- a/drivers/net/wireless/ath/ath6kl/debug.h
+++ b/drivers/net/wireless/ath/ath6kl/debug.h
@@ -17,19 +17,19 @@
#ifndef DEBUG_H
#define DEBUG_H
-#include "htc_hif.h"
+#include "hif.h"
enum ATH6K_DEBUG_MASK {
- ATH6KL_DBG_WLAN_CONNECT = BIT(0), /* wlan connect */
- ATH6KL_DBG_WLAN_SCAN = BIT(1), /* wlan scan */
+ ATH6KL_DBG_CREDIT = BIT(0),
+ /* hole */
ATH6KL_DBG_WLAN_TX = BIT(2), /* wlan tx */
ATH6KL_DBG_WLAN_RX = BIT(3), /* wlan rx */
ATH6KL_DBG_BMI = BIT(4), /* bmi tracing */
- ATH6KL_DBG_HTC_SEND = BIT(5), /* htc send */
- ATH6KL_DBG_HTC_RECV = BIT(6), /* htc recv */
+ ATH6KL_DBG_HTC = BIT(5),
+ ATH6KL_DBG_HIF = BIT(6),
ATH6KL_DBG_IRQ = BIT(7), /* interrupt processing */
- ATH6KL_DBG_PM = BIT(8), /* power management */
- ATH6KL_DBG_WLAN_NODE = BIT(9), /* general wlan node tracing */
+ /* hole */
+ /* hole */
ATH6KL_DBG_WMI = BIT(10), /* wmi tracing */
ATH6KL_DBG_TRC = BIT(11), /* generic func tracing */
ATH6KL_DBG_SCATTER = BIT(12), /* hif scatter tracing */
@@ -40,6 +40,7 @@ enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_SDIO_DUMP = BIT(17),
ATH6KL_DBG_BOOT = BIT(18), /* driver init and fw boot */
ATH6KL_DBG_WMI_DUMP = BIT(19),
+ ATH6KL_DBG_SUSPEND = BIT(20),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
};
@@ -90,6 +91,10 @@ void ath6kl_dump_registers(struct ath6kl_device *dev,
void dump_cred_dist_stats(struct htc_target *target);
void ath6kl_debug_fwlog_event(struct ath6kl *ar, const void *buf, size_t len);
void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war);
+int ath6kl_debug_roam_tbl_event(struct ath6kl *ar, const void *buf,
+ size_t len);
+void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive);
+void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar, u8 timeout);
int ath6kl_debug_init(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
@@ -125,6 +130,21 @@ static inline void ath6kl_debug_war(struct ath6kl *ar, enum ath6kl_war war)
{
}
+static inline int ath6kl_debug_roam_tbl_event(struct ath6kl *ar,
+ const void *buf, size_t len)
+{
+ return 0;
+}
+
+static inline void ath6kl_debug_set_keepalive(struct ath6kl *ar, u8 keepalive)
+{
+}
+
+static inline void ath6kl_debug_set_disconnect_timeout(struct ath6kl *ar,
+ u8 timeout)
+{
+}
+
static inline int ath6kl_debug_init(struct ath6kl *ar)
{
return 0;
diff --git a/drivers/net/wireless/ath/ath6kl/hif-ops.h b/drivers/net/wireless/ath/ath6kl/hif-ops.h
index d6c898f..2fe1dad 100644
--- a/drivers/net/wireless/ath/ath6kl/hif-ops.h
+++ b/drivers/net/wireless/ath/ath6kl/hif-ops.h
@@ -18,10 +18,16 @@
#define HIF_OPS_H
#include "hif.h"
+#include "debug.h"
static inline int hif_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif %s sync addr 0x%x buf 0x%p len %d request 0x%x\n",
+ (request & HIF_WRITE) ? "write" : "read",
+ addr, buf, len, request);
+
return ar->hif_ops->read_write_sync(ar, addr, buf, len, request);
}
@@ -29,16 +35,24 @@ static inline int hif_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
u32 length, u32 request,
struct htc_packet *packet)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif write async addr 0x%x buf 0x%p len %d request 0x%x\n",
+ address, buffer, length, request);
+
return ar->hif_ops->write_async(ar, address, buffer, length,
request, packet);
}
static inline void ath6kl_hif_irq_enable(struct ath6kl *ar)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq enable\n");
+
return ar->hif_ops->irq_enable(ar);
}
static inline void ath6kl_hif_irq_disable(struct ath6kl *ar)
{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif irq disable\n");
+
return ar->hif_ops->irq_disable(ar);
}
@@ -69,9 +83,70 @@ static inline void ath6kl_hif_cleanup_scatter(struct ath6kl *ar)
return ar->hif_ops->cleanup_scatter(ar);
}
-static inline int ath6kl_hif_suspend(struct ath6kl *ar)
+static inline int ath6kl_hif_suspend(struct ath6kl *ar,
+ struct cfg80211_wowlan *wow)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif suspend\n");
+
+ return ar->hif_ops->suspend(ar, wow);
+}
+
+/*
+ * Read from the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_read32(struct ath6kl *ar, u32 address,
+ u32 *value)
{
- return ar->hif_ops->suspend(ar);
+ return ar->hif_ops->diag_read32(ar, address, value);
+}
+
+/*
+ * Write to the ATH6KL through its diagnostic window. No cooperation from
+ * the Target is required for this.
+ */
+static inline int ath6kl_hif_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 value)
+{
+ return ar->hif_ops->diag_write32(ar, address, value);
+}
+
+static inline int ath6kl_hif_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_read(ar, buf, len);
+}
+
+static inline int ath6kl_hif_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ return ar->hif_ops->bmi_write(ar, buf, len);
+}
+
+static inline int ath6kl_hif_resume(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif resume\n");
+
+ return ar->hif_ops->resume(ar);
+}
+
+static inline int ath6kl_hif_power_on(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power on\n");
+
+ return ar->hif_ops->power_on(ar);
+}
+
+static inline int ath6kl_hif_power_off(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif power off\n");
+
+ return ar->hif_ops->power_off(ar);
+}
+
+static inline void ath6kl_hif_stop(struct ath6kl *ar)
+{
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif stop\n");
+
+ ar->hif_ops->stop(ar);
}
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.c b/drivers/net/wireless/ath/ath6kl/hif.c
index 86b1cc7..e57da35 100644
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.c
+++ b/drivers/net/wireless/ath/ath6kl/hif.c
@@ -13,18 +13,19 @@
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#include "hif.h"
#include "core.h"
#include "target.h"
#include "hif-ops.h"
-#include "htc_hif.h"
#include "debug.h"
#define MAILBOX_FOR_BLOCK_SIZE 1
#define ATH6KL_TIME_QUANTUM 10 /* in ms */
-static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
+static int ath6kl_hif_cp_scat_dma_buf(struct hif_scatter_req *req,
+ bool from_dma)
{
u8 *buf;
int i;
@@ -46,12 +47,11 @@ static int ath6kldev_cp_scat_dma_buf(struct hif_scatter_req *req, bool from_dma)
return 0;
}
-int ath6kldev_rw_comp_handler(void *context, int status)
+int ath6kl_hif_rw_comp_handler(void *context, int status)
{
struct htc_packet *packet = context;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "ath6kldev_rw_comp_handler (pkt:0x%p , status: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rw completion pkt 0x%p status %d\n",
packet, status);
packet->status = status;
@@ -59,30 +59,83 @@ int ath6kldev_rw_comp_handler(void *context, int status)
return 0;
}
+#define REG_DUMP_COUNT_AR6003 60
+#define REGISTER_DUMP_LEN_MAX 60
-static int ath6kldev_proc_dbg_intr(struct ath6kl_device *dev)
+static void ath6kl_hif_dump_fw_crash(struct ath6kl *ar)
{
- u32 dummy;
- int status;
+ __le32 regdump_val[REGISTER_DUMP_LEN_MAX];
+ u32 i, address, regdump_addr = 0;
+ int ret;
+
+ if (ar->target_type != TARGET_TYPE_AR6003)
+ return;
+
+ /* the reg dump pointer is copied to the host interest area */
+ address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
+ address = TARG_VTOP(ar->target_type, address);
+
+ /* read RAM location through diagnostic window */
+ ret = ath6kl_diag_read32(ar, address, &regdump_addr);
+
+ if (ret || !regdump_addr) {
+ ath6kl_warn("failed to get ptr to register dump area: %d\n",
+ ret);
+ return;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_IRQ, "register dump data address 0x%x\n",
+ regdump_addr);
+ regdump_addr = TARG_VTOP(ar->target_type, regdump_addr);
+
+ /* fetch register dump data */
+ ret = ath6kl_diag_read(ar, regdump_addr, (u8 *)&regdump_val[0],
+ REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
+ if (ret) {
+ ath6kl_warn("failed to get register dump: %d\n", ret);
+ return;
+ }
+
+ ath6kl_info("crash dump:\n");
+ ath6kl_info("hw 0x%x fw %s\n", ar->wiphy->hw_version,
+ ar->wiphy->fw_version);
+
+ BUILD_BUG_ON(REG_DUMP_COUNT_AR6003 % 4);
- ath6kl_err("target debug interrupt\n");
+ for (i = 0; i < REG_DUMP_COUNT_AR6003 / 4; i++) {
+ ath6kl_info("%d: 0x%8.8x 0x%8.8x 0x%8.8x 0x%8.8x\n",
+ 4 * i,
+ le32_to_cpu(regdump_val[i]),
+ le32_to_cpu(regdump_val[i + 1]),
+ le32_to_cpu(regdump_val[i + 2]),
+ le32_to_cpu(regdump_val[i + 3]));
+ }
+
+}
+
+static int ath6kl_hif_proc_dbg_intr(struct ath6kl_device *dev)
+{
+ u32 dummy;
+ int ret;
- ath6kl_target_failure(dev->ar);
+ ath6kl_warn("firmware crashed\n");
/*
* read counter to clear the interrupt, the debug error interrupt is
* counter 0.
*/
- status = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
+ ret = hif_read_write_sync(dev->ar, COUNT_DEC_ADDRESS,
(u8 *)&dummy, 4, HIF_RD_SYNC_BYTE_INC);
- if (status)
- WARN_ON(1);
+ if (ret)
+ ath6kl_warn("Failed to clear debug interrupt: %d\n", ret);
- return status;
+ ath6kl_hif_dump_fw_crash(dev->ar);
+
+ return ret;
}
/* mailbox recv message polling */
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
int timeout)
{
struct ath6kl_irq_proc_registers *rg;
@@ -118,7 +171,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
/* delay a little */
mdelay(ATH6KL_TIME_QUANTUM);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "retry mbox poll : %d\n", i);
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif retry mbox poll try %d\n", i);
}
if (i == 0) {
@@ -131,7 +184,7 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Target failure handler will be called in case of
* an assert.
*/
- ath6kldev_proc_dbg_intr(dev);
+ ath6kl_hif_proc_dbg_intr(dev);
}
return status;
@@ -141,11 +194,14 @@ int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev, u32 *lk_ahd,
* Disable packet reception (used in case the host runs out of buffers)
* using the interrupt enable registers through the host I/F
*/
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx)
{
struct ath6kl_irq_enable_reg regs;
int status = 0;
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif rx %s\n",
+ enable_rx ? "enable" : "disable");
+
/* take the lock to protect interrupt enable shadows */
spin_lock_bh(&dev->lock);
@@ -168,7 +224,7 @@ int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx)
return status;
}
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
struct hif_scatter_req *scat_req, bool read)
{
int status = 0;
@@ -185,14 +241,14 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
dev->ar->mbox_info.htc_addr;
}
- ath6kl_dbg((ATH6KL_DBG_HTC_RECV | ATH6KL_DBG_HTC_SEND),
- "ath6kldev_submit_scat_req, entries: %d, total len: %d mbox:0x%X (mode: %s : %s)\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF,
+ "hif submit scatter request entries %d len %d mbox 0x%x %s %s\n",
scat_req->scat_entries, scat_req->len,
scat_req->addr, !read ? "async" : "sync",
(read) ? "rd" : "wr");
if (!read && scat_req->virt_scat) {
- status = ath6kldev_cp_scat_dma_buf(scat_req, false);
+ status = ath6kl_hif_cp_scat_dma_buf(scat_req, false);
if (status) {
scat_req->status = status;
scat_req->complete(dev->ar->htc_target, scat_req);
@@ -207,13 +263,13 @@ int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
scat_req->status = status;
if (!status && scat_req->virt_scat)
scat_req->status =
- ath6kldev_cp_scat_dma_buf(scat_req, true);
+ ath6kl_hif_cp_scat_dma_buf(scat_req, true);
}
return status;
}
-static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_counter_intr(struct ath6kl_device *dev)
{
u8 counter_int_status;
@@ -232,12 +288,12 @@ static int ath6kldev_proc_counter_intr(struct ath6kl_device *dev)
* the debug assertion counter interrupt.
*/
if (counter_int_status & ATH6KL_TARGET_DEBUG_INTR_MASK)
- return ath6kldev_proc_dbg_intr(dev);
+ return ath6kl_hif_proc_dbg_intr(dev);
return 0;
}
-static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_err_intr(struct ath6kl_device *dev)
{
int status;
u8 error_int_status;
@@ -282,7 +338,7 @@ static int ath6kldev_proc_err_intr(struct ath6kl_device *dev)
return status;
}
-static int ath6kldev_proc_cpu_intr(struct ath6kl_device *dev)
+static int ath6kl_hif_proc_cpu_intr(struct ath6kl_device *dev)
{
int status;
u8 cpu_int_status;
@@ -417,7 +473,7 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
* we rapidly pull packets.
*/
status = ath6kl_htc_rxmsg_pending_handler(dev->htc_cnxt,
- &lk_ahd, &fetched);
+ lk_ahd, &fetched);
if (status)
goto out;
@@ -436,21 +492,21 @@ static int proc_pending_irqs(struct ath6kl_device *dev, bool *done)
if (MS(HOST_INT_STATUS_CPU, host_int_status)) {
/* CPU Interrupt */
- status = ath6kldev_proc_cpu_intr(dev);
+ status = ath6kl_hif_proc_cpu_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_ERROR, host_int_status)) {
/* Error Interrupt */
- status = ath6kldev_proc_err_intr(dev);
+ status = ath6kl_hif_proc_err_intr(dev);
if (status)
goto out;
}
if (MS(HOST_INT_STATUS_COUNTER, host_int_status))
/* Counter Interrupt */
- status = ath6kldev_proc_counter_intr(dev);
+ status = ath6kl_hif_proc_counter_intr(dev);
out:
/*
@@ -479,9 +535,10 @@ out:
}
/* interrupt handler, kicks off all interrupt processing */
-int ath6kldev_intr_bh_handler(struct ath6kl *ar)
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar)
{
struct ath6kl_device *dev = ar->htc_target->dev;
+ unsigned long timeout;
int status = 0;
bool done = false;
@@ -495,7 +552,8 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
* IRQ processing is synchronous, interrupt status registers can be
* re-read.
*/
- while (!done) {
+ timeout = jiffies + msecs_to_jiffies(ATH6KL_HIF_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !done) {
status = proc_pending_irqs(dev, &done);
if (status)
break;
@@ -504,7 +562,7 @@ int ath6kldev_intr_bh_handler(struct ath6kl *ar)
return status;
}
-static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
+static int ath6kl_hif_enable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
int status;
@@ -552,7 +610,7 @@ static int ath6kldev_enable_intrs(struct ath6kl_device *dev)
return status;
}
-int ath6kldev_disable_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev)
{
struct ath6kl_irq_enable_reg regs;
@@ -571,7 +629,7 @@ int ath6kldev_disable_intrs(struct ath6kl_device *dev)
}
/* enable device interrupts */
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev)
{
int status = 0;
@@ -583,29 +641,29 @@ int ath6kldev_unmask_intrs(struct ath6kl_device *dev)
* target "soft" resets. The ATH6KL interrupt enables reset back to an
* "enabled" state when this happens.
*/
- ath6kldev_disable_intrs(dev);
+ ath6kl_hif_disable_intrs(dev);
/* unmask the host controller interrupts */
ath6kl_hif_irq_enable(dev->ar);
- status = ath6kldev_enable_intrs(dev);
+ status = ath6kl_hif_enable_intrs(dev);
return status;
}
/* disable all device interrupts */
-int ath6kldev_mask_intrs(struct ath6kl_device *dev)
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev)
{
/*
* Mask the interrupt at the HIF layer to avoid any stray interrupt
* taken while we zero out our shadow registers in
- * ath6kldev_disable_intrs().
+ * ath6kl_hif_disable_intrs().
*/
ath6kl_hif_irq_disable(dev->ar);
- return ath6kldev_disable_intrs(dev);
+ return ath6kl_hif_disable_intrs(dev);
}
-int ath6kldev_setup(struct ath6kl_device *dev)
+int ath6kl_hif_setup(struct ath6kl_device *dev)
{
int status = 0;
@@ -621,19 +679,17 @@ int ath6kldev_setup(struct ath6kl_device *dev)
/* must be a power of 2 */
if ((dev->htc_cnxt->block_sz & (dev->htc_cnxt->block_sz - 1)) != 0) {
WARN_ON(1);
+ status = -EINVAL;
goto fail_setup;
}
/* assemble mask, used for padding to a block */
dev->htc_cnxt->block_mask = dev->htc_cnxt->block_sz - 1;
- ath6kl_dbg(ATH6KL_DBG_TRC, "block size: %d, mbox addr:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HIF, "hif block size %d mbox addr 0x%x\n",
dev->htc_cnxt->block_sz, dev->ar->mbox_info.htc_addr);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "hif interrupt processing is sync only\n");
-
- status = ath6kldev_disable_intrs(dev);
+ status = ath6kl_hif_disable_intrs(dev);
fail_setup:
return status;
diff --git a/drivers/net/wireless/ath/ath6kl/hif.h b/drivers/net/wireless/ath/ath6kl/hif.h
index 797e2d1..699a036 100644
--- a/drivers/net/wireless/ath/ath6kl/hif.h
+++ b/drivers/net/wireless/ath/ath6kl/hif.h
@@ -35,6 +35,7 @@
#define MAX_SCATTER_REQ_TRANSFER_SIZE (32 * 1024)
#define MANUFACTURER_ID_AR6003_BASE 0x300
+#define MANUFACTURER_ID_AR6004_BASE 0x400
/* SDIO manufacturer ID and Codes */
#define MANUFACTURER_ID_ATH6KL_BASE_MASK 0xFF00
#define MANUFACTURER_CODE 0x271 /* Atheros */
@@ -59,6 +60,18 @@
/* mode to enable special 4-bit interrupt assertion without clock */
#define SDIO_IRQ_MODE_ASYNC_4BIT_IRQ (1 << 0)
+/* HTC runs over mailbox 0 */
+#define HTC_MAILBOX 0
+
+#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
+
+/* FIXME: are these duplicates with MAX_SCATTER_ values in hif.h? */
+#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
+#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
+#define ATH6KL_SCATTER_REQS 4
+
+#define ATH6KL_HIF_COMMUNICATION_TIMEOUT 1000
+
struct bus_request {
struct list_head list;
@@ -186,6 +199,34 @@ struct hif_scatter_req {
struct hif_scatter_item scat_list[1];
};
+struct ath6kl_irq_proc_registers {
+ u8 host_int_status;
+ u8 cpu_int_status;
+ u8 error_int_status;
+ u8 counter_int_status;
+ u8 mbox_frame;
+ u8 rx_lkahd_valid;
+ u8 host_int_status2;
+ u8 gmbox_rx_avail;
+ __le32 rx_lkahd[2];
+ __le32 rx_gmbox_lkahd_alias[2];
+} __packed;
+
+struct ath6kl_irq_enable_reg {
+ u8 int_status_en;
+ u8 cpu_int_status_en;
+ u8 err_int_status_en;
+ u8 cntr_int_status_en;
+} __packed;
+
+struct ath6kl_device {
+ spinlock_t lock;
+ struct ath6kl_irq_proc_registers irq_proc_reg;
+ struct ath6kl_irq_enable_reg irq_en_reg;
+ struct htc_target *htc_cnxt;
+ struct ath6kl *ar;
+};
+
struct ath6kl_hif_ops {
int (*read_write_sync)(struct ath6kl *ar, u32 addr, u8 *buf,
u32 len, u32 request);
@@ -202,7 +243,30 @@ struct ath6kl_hif_ops {
int (*scat_req_rw) (struct ath6kl *ar,
struct hif_scatter_req *scat_req);
void (*cleanup_scatter)(struct ath6kl *ar);
- int (*suspend)(struct ath6kl *ar);
+ int (*suspend)(struct ath6kl *ar, struct cfg80211_wowlan *wow);
+ int (*resume)(struct ath6kl *ar);
+ int (*diag_read32)(struct ath6kl *ar, u32 address, u32 *value);
+ int (*diag_write32)(struct ath6kl *ar, u32 address, __le32 value);
+ int (*bmi_read)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*bmi_write)(struct ath6kl *ar, u8 *buf, u32 len);
+ int (*power_on)(struct ath6kl *ar);
+ int (*power_off)(struct ath6kl *ar);
+ void (*stop)(struct ath6kl *ar);
};
+int ath6kl_hif_setup(struct ath6kl_device *dev);
+int ath6kl_hif_unmask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_mask_intrs(struct ath6kl_device *dev);
+int ath6kl_hif_poll_mboxmsg_rx(struct ath6kl_device *dev,
+ u32 *lk_ahd, int timeout);
+int ath6kl_hif_rx_control(struct ath6kl_device *dev, bool enable_rx);
+int ath6kl_hif_disable_intrs(struct ath6kl_device *dev);
+
+int ath6kl_hif_rw_comp_handler(void *context, int status);
+int ath6kl_hif_intr_bh_handler(struct ath6kl *ar);
+
+/* Scatter Function and Definitions */
+int ath6kl_hif_submit_scat_req(struct ath6kl_device *dev,
+ struct hif_scatter_req *scat_req, bool read);
+
#endif
diff --git a/drivers/net/wireless/ath/ath6kl/htc.c b/drivers/net/wireless/ath/ath6kl/htc.c
index f88a7c9..f3b63ca 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.c
+++ b/drivers/net/wireless/ath/ath6kl/htc.c
@@ -15,13 +15,321 @@
*/
#include "core.h"
-#include "htc_hif.h"
+#include "hif.h"
#include "debug.h"
#include "hif-ops.h"
#include <asm/unaligned.h>
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
+/* Functions for Tx credit handling */
+static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int credits)
+{
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n",
+ ep_dist->endpoint, credits);
+
+ ep_dist->credits += credits;
+ ep_dist->cred_assngd += credits;
+ cred_info->cur_free_credits -= credits;
+}
+
+static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_list,
+ int tot_credits)
+{
+ struct htc_endpoint_credit_dist *cur_ep_dist;
+ int count;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits);
+
+ cred_info->cur_free_credits = tot_credits;
+ cred_info->total_avail_credits = tot_credits;
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
+
+ if (tot_credits > 4) {
+ if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
+ (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
+ ath6kl_credit_deposit(cred_info,
+ cur_ep_dist,
+ cur_ep_dist->cred_min);
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ }
+ }
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
+ ath6kl_credit_deposit(cred_info, cur_ep_dist,
+ cur_ep_dist->cred_min);
+ /*
+ * Control service is always marked active, it
+ * never goes inactive EVER.
+ */
+ cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
+ } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
+ /* this is the lowest priority data endpoint */
+ /* FIXME: this looks fishy, check */
+ cred_info->lowestpri_ep_dist = cur_ep_dist->list;
+
+ /*
+ * Streams have to be created (explicit | implicit) for all
+ * kinds of traffic. BE endpoints are also inactive in the
+ * beginning. When BE traffic starts it creates implicit
+ * streams that redistributes credits.
+ *
+ * Note: all other endpoints have minimums set but are
+ * initially given NO credits. credits will be distributed
+ * as traffic activity demands
+ */
+ }
+
+ WARN_ON(cred_info->cur_free_credits <= 0);
+
+ list_for_each_entry(cur_ep_dist, ep_list, list) {
+ if (cur_ep_dist->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
+ cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
+ else {
+ /*
+ * For the remaining data endpoints, we assume that
+ * each cred_per_msg are the same. We use a simple
+ * calculation here, we take the remaining credits
+ * and determine how many max messages this can
+ * cover and then set each endpoint's normal value
+ * equal to 3/4 this amount.
+ */
+ count = (cred_info->cur_free_credits /
+ cur_ep_dist->cred_per_msg)
+ * cur_ep_dist->cred_per_msg;
+ count = (count * 3) >> 2;
+ count = max(count, cur_ep_dist->cred_per_msg);
+ cur_ep_dist->cred_norm = count;
+
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n",
+ cur_ep_dist->endpoint,
+ cur_ep_dist->svc_id,
+ cur_ep_dist->credits,
+ cur_ep_dist->cred_per_msg,
+ cur_ep_dist->cred_norm,
+ cur_ep_dist->cred_min);
+ }
+}
+
+/* initialize and setup credit distribution */
+int ath6kl_credit_setup(void *htc_handle,
+ struct ath6kl_htc_credit_info *cred_info)
+{
+ u16 servicepriority[5];
+
+ memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info));
+
+ servicepriority[0] = WMI_CONTROL_SVC; /* highest */
+ servicepriority[1] = WMI_DATA_VO_SVC;
+ servicepriority[2] = WMI_DATA_VI_SVC;
+ servicepriority[3] = WMI_DATA_BE_SVC;
+ servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
+
+ /* set priority list */
+ ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
+
+ return 0;
+}
+
+/* reduce an ep's credits back to a set limit */
+static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist,
+ int limit)
+{
+ int credits;
+
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n",
+ ep_dist->endpoint, limit);
+
+ ep_dist->cred_assngd = limit;
+
+ if (ep_dist->credits <= limit)
+ return;
+
+ credits = ep_dist->credits - limit;
+ ep_dist->credits -= credits;
+ cred_info->cur_free_credits += credits;
+}
+
+static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *epdist_list)
+{
+ struct htc_endpoint_credit_dist *cur_dist_list;
+
+ list_for_each_entry(cur_dist_list, epdist_list, list) {
+ if (cur_dist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if (cur_dist_list->cred_to_dist > 0) {
+ cur_dist_list->credits +=
+ cur_dist_list->cred_to_dist;
+ cur_dist_list->cred_to_dist = 0;
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_assngd)
+ ath6kl_credit_reduce(cred_info,
+ cur_dist_list,
+ cur_dist_list->cred_assngd);
+
+ if (cur_dist_list->credits >
+ cur_dist_list->cred_norm)
+ ath6kl_credit_reduce(cred_info, cur_dist_list,
+ cur_dist_list->cred_norm);
+
+ if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (cur_dist_list->txq_depth == 0)
+ ath6kl_credit_reduce(cred_info,
+ cur_dist_list, 0);
+ }
+ }
+ }
+}
+
+/*
+ * HTC has an endpoint that needs credits, ep_dist is the endpoint in
+ * question.
+ */
+static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info,
+ struct htc_endpoint_credit_dist *ep_dist)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+ int credits = 0;
+ int need;
+
+ if (ep_dist->svc_id == WMI_CONTROL_SVC)
+ goto out;
+
+ if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
+ (ep_dist->svc_id == WMI_DATA_VO_SVC))
+ if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
+ goto out;
+
+ /*
+ * For all other services, we follow a simple algorithm of:
+ *
+ * 1. checking the free pool for credits
+ * 2. checking lower priority endpoints for credits to take
+ */
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+ if (credits >= ep_dist->seek_cred)
+ goto out;
+
+ /*
+ * We don't have enough in the free pool, try taking away from
+ * lower priority services The rule for taking away credits:
+ *
+ * 1. Only take from lower priority endpoints
+ * 2. Only take what is allocated above the minimum (never
+ * starve an endpoint completely)
+ * 3. Only take what you need.
+ */
+
+ list_for_each_entry_reverse(curdist_list,
+ &cred_info->lowestpri_ep_dist,
+ list) {
+ if (curdist_list == ep_dist)
+ break;
+
+ need = ep_dist->seek_cred - cred_info->cur_free_credits;
+
+ if ((curdist_list->cred_assngd - need) >=
+ curdist_list->cred_min) {
+ /*
+ * The current one has been allocated more than
+ * it's minimum and it has enough credits assigned
+ * above it's minimum to fulfill our need try to
+ * take away just enough to fulfill our need.
+ */
+ ath6kl_credit_reduce(cred_info, curdist_list,
+ curdist_list->cred_assngd - need);
+
+ if (cred_info->cur_free_credits >=
+ ep_dist->seek_cred)
+ break;
+ }
+
+ if (curdist_list->endpoint == ENDPOINT_0)
+ break;
+ }
+
+ credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
+
+out:
+ /* did we find some credits? */
+ if (credits)
+ ath6kl_credit_deposit(cred_info, ep_dist, credits);
+
+ ep_dist->seek_cred = 0;
+}
+
+/* redistribute credits based on activity change */
+static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info,
+ struct list_head *ep_dist_list)
+{
+ struct htc_endpoint_credit_dist *curdist_list;
+
+ list_for_each_entry(curdist_list, ep_dist_list, list) {
+ if (curdist_list->endpoint == ENDPOINT_0)
+ continue;
+
+ if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
+ (curdist_list->svc_id == WMI_DATA_BE_SVC))
+ curdist_list->dist_flags |= HTC_EP_ACTIVE;
+
+ if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
+ !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
+ if (curdist_list->txq_depth == 0)
+ ath6kl_credit_reduce(info, curdist_list, 0);
+ else
+ ath6kl_credit_reduce(info,
+ curdist_list,
+ curdist_list->cred_min);
+ }
+ }
+}
+
+/*
+ *
+ * This function is invoked whenever endpoints require credit
+ * distributions. A lock is held while this function is invoked, this
+ * function shall NOT block. The ep_dist_list is a list of distribution
+ * structures in prioritized order as defined by the call to the
+ * htc_set_credit_dist() api.
+ */
+static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info,
+ struct list_head *ep_dist_list,
+ enum htc_credit_dist_reason reason)
+{
+ switch (reason) {
+ case HTC_CREDIT_DIST_SEND_COMPLETE:
+ ath6kl_credit_update(cred_info, ep_dist_list);
+ break;
+ case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
+ ath6kl_credit_redistribute(cred_info, ep_dist_list);
+ break;
+ default:
+ break;
+ }
+
+ WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
+ WARN_ON(cred_info->cur_free_credits < 0);
+}
+
static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len)
{
u8 *align_addr;
@@ -102,12 +410,12 @@ static void htc_tx_comp_update(struct htc_target *target,
packet->info.tx.cred_used;
endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_SEND_COMPLETE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
spin_unlock_bh(&target->tx_lock);
}
@@ -118,8 +426,8 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
if (list_empty(txq))
return;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "send complete ep %d, (%d pkts)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx complete ep %d pkts %d\n",
endpoint->eid, get_queue_depth(txq));
ath6kl_tx_complete(endpoint->target->dev->ar, txq);
@@ -131,6 +439,9 @@ static void htc_tx_comp_handler(struct htc_target *target,
struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint];
struct list_head container;
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n",
+ packet->info.tx.seqno);
+
htc_tx_comp_update(target, endpoint, packet);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
@@ -148,8 +459,8 @@ static void htc_async_tx_scat_complete(struct htc_target *target,
INIT_LIST_HEAD(&tx_compq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_async_tx_scat_complete total len: %d entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scat complete len %d entries %d\n",
scat_req->len, scat_req->scat_entries);
if (scat_req->status)
@@ -190,16 +501,13 @@ static int ath6kl_htc_tx_issue(struct htc_target *target,
send_len = packet->act_len + HTC_HDR_LENGTH;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s: transmit len : %d (%s)\n",
- __func__, send_len, sync ? "sync" : "async");
-
padded_len = CALC_TXRX_PADDED_LEN(target, send_len);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "DevSendPacket, padded len: %d mbox:0x%X (mode:%s)\n",
- padded_len,
- target->dev->ar->mbox_info.htc_addr,
- sync ? "sync" : "async");
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n",
+ send_len, packet->info.tx.seqno, padded_len,
+ target->dev->ar->mbox_info.htc_addr,
+ sync ? "sync" : "async");
if (sync) {
status = hif_read_write_sync(target->dev->ar,
@@ -227,7 +535,7 @@ static int htc_check_credits(struct htc_target *target,
*req_cred = (len > target->tgt_cred_sz) ?
DIV_ROUND_UP(len, target->tgt_cred_sz) : 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "creds required:%d got:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n",
*req_cred, ep->cred_dist.credits);
if (ep->cred_dist.credits < *req_cred) {
@@ -237,16 +545,13 @@ static int htc_check_credits(struct htc_target *target,
/* Seek more credits */
ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &ep->cred_dist);
-
- ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
ep->cred_dist.seek_cred = 0;
if (ep->cred_dist.credits < *req_cred) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "not enough credits for ep %d - leaving packet in queue\n",
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit not found for ep %d\n",
eid);
return -EINVAL;
}
@@ -260,17 +565,15 @@ static int htc_check_credits(struct htc_target *target,
ep->cred_dist.seek_cred =
ep->cred_dist.cred_per_msg - ep->cred_dist.credits;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &ep->cred_dist);
-
- ath6k_seek_credits(target->cred_dist_cntxt, &ep->cred_dist);
+ ath6kl_credit_seek(target->credit_info, &ep->cred_dist);
/* see if we were successful in getting more */
if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) {
/* tell the target we need credits ASAP! */
*flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "host needs credits\n");
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit we need credits asap\n");
}
}
@@ -295,8 +598,8 @@ static void ath6kl_htc_tx_pkts_get(struct htc_target *target,
packet = list_first_entry(&endpoint->txq, struct htc_packet,
list);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "got head pkt:0x%p , queue depth: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx got packet 0x%p queue depth %d\n",
packet, get_queue_depth(&endpoint->txq));
len = CALC_TXRX_PADDED_LEN(target,
@@ -404,9 +707,9 @@ static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target,
scat_req->len += len;
scat_req->scat_entries++;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "%d, adding pkt : 0x%p len:%d (remaining space:%d)\n",
- i, packet, len, rem_scat);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n",
+ i, packet, packet->info.tx.seqno, len, rem_scat);
}
/* Roll back scatter setup in case of any failure */
@@ -455,12 +758,12 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
if (!scat_req) {
/* no scatter resources */
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "no more scatter resources\n");
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx no more scatter resources\n");
break;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "pkts to scatter: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n",
n_scat);
scat_req->len = 0;
@@ -479,10 +782,10 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
n_sent_bundle++;
tot_pkts_bundle += scat_req->scat_entries;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "send scatter total bytes: %d , entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx scatter bytes %d entries %d\n",
scat_req->len, scat_req->scat_entries);
- ath6kldev_submit_scat_req(target->dev, scat_req, false);
+ ath6kl_hif_submit_scat_req(target->dev, scat_req, false);
if (status)
break;
@@ -490,8 +793,8 @@ static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint,
*sent_bundle = n_sent_bundle;
*n_bundle_pkts = tot_pkts_bundle;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "%s (sent:%d)\n",
- __func__, n_sent_bundle);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n",
+ n_sent_bundle);
return;
}
@@ -510,7 +813,7 @@ static void ath6kl_htc_tx_from_queue(struct htc_target *target,
if (endpoint->tx_proc_cnt > 1) {
endpoint->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "htc_try_send (busy)\n");
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n");
return;
}
@@ -588,15 +891,12 @@ static bool ath6kl_htc_tx_try(struct htc_target *target,
overflow = true;
if (overflow)
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "ep %d, tx queue will overflow :%d , tx depth:%d, max:%d\n",
- endpoint->eid, overflow, txq_depth,
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx overflow ep %d depth %d max %d\n",
+ endpoint->eid, txq_depth,
endpoint->max_txq_depth);
if (overflow && ep_cb.tx_full) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "indicating overflowed tx packet: 0x%p\n", tx_pkt);
-
if (ep_cb.tx_full(endpoint->target, tx_pkt) ==
HTC_SEND_FULL_DROP) {
endpoint->ep_st.tx_dropped += 1;
@@ -625,12 +925,12 @@ static void htc_chk_ep_txq(struct htc_target *target)
* are not modifying any state.
*/
list_for_each_entry(cred_dist, &target->cred_dist_list, list) {
- endpoint = (struct htc_endpoint *)cred_dist->htc_rsvd;
+ endpoint = cred_dist->htc_ep;
spin_lock_bh(&target->tx_lock);
if (!list_empty(&endpoint->txq)) {
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "ep %d has %d credits and %d packets in tx queue\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc creds ep %d credits %d pkts %d\n",
cred_dist->endpoint,
endpoint->cred_dist.credits,
get_queue_depth(&endpoint->txq));
@@ -704,13 +1004,13 @@ static int htc_setup_tx_complete(struct htc_target *target)
}
void ath6kl_htc_set_credit_dist(struct htc_target *target,
- struct htc_credit_state_info *cred_dist_cntxt,
+ struct ath6kl_htc_credit_info *credit_info,
u16 srvc_pri_order[], int list_len)
{
struct htc_endpoint *endpoint;
int i, ep;
- target->cred_dist_cntxt = cred_dist_cntxt;
+ target->credit_info = credit_info;
list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list,
&target->cred_dist_list);
@@ -736,8 +1036,8 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
struct htc_endpoint *endpoint;
struct list_head queue;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_tx: ep id: %d, buf: 0x%p, len: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx ep id %d buf 0x%p len %d\n",
packet->endpoint, packet->buf, packet->act_len);
if (packet->endpoint >= ENDPOINT_MAX) {
@@ -787,8 +1087,8 @@ void ath6kl_htc_flush_txep(struct htc_target *target,
list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) {
packet->status = -ECANCELED;
list_del(&packet->list);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "flushing tx pkt:0x%p, len:%d, ep:%d tag:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n",
packet, packet->act_len,
packet->endpoint, packet->info.tx.tag);
@@ -844,12 +1144,13 @@ void ath6kl_htc_indicate_activity_change(struct htc_target *target,
endpoint->cred_dist.txq_depth =
get_queue_depth(&endpoint->txq);
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc tx activity ctxt 0x%p dist 0x%p\n",
+ target->credit_info, &target->cred_dist_list);
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_ACTIVITY_CHANGE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_ACTIVITY_CHANGE);
}
spin_unlock_bh(&target->tx_lock);
@@ -919,15 +1220,15 @@ static int ath6kl_htc_rx_packet(struct htc_target *target,
padded_len = CALC_TXRX_PADDED_LEN(target, rx_len);
if (padded_len > packet->buf_len) {
- ath6kl_err("not enough receive space for packet - padlen:%d recvlen:%d bufferlen:%d\n",
+ ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n",
padded_len, rx_len, packet->buf_len);
return -ENOMEM;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "dev_rx_pkt (0x%p : hdr:0x%X) padded len: %d mbox:0x%X (mode:%s)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx 0x%p hdr x%x len %d mbox 0x%x\n",
packet, packet->info.rx.exp_hdr,
- padded_len, dev->ar->mbox_info.htc_addr, "sync");
+ padded_len, dev->ar->mbox_info.htc_addr);
status = hif_read_write_sync(dev->ar,
dev->ar->mbox_info.htc_addr,
@@ -1137,8 +1438,8 @@ static int ath6kl_htc_rx_alloc(struct htc_target *target,
}
endpoint->ep_st.rx_bundle_from_hdr += 1;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc hdr indicates :%d msg can be fetched as a bundle\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle pkts %d\n",
n_msg);
} else
/* HTC header only indicates 1 message to fetch */
@@ -1191,8 +1492,8 @@ static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets)
ath6kl_err("htc_ctrl_rx, got message with len:%zu\n",
packets->act_len + HTC_HDR_LENGTH);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
- "Unexpected ENDPOINT 0 Message", "",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx unexpected endpoint 0 message", "",
packets->buf - HTC_HDR_LENGTH,
packets->act_len + HTC_HDR_LENGTH);
}
@@ -1209,9 +1510,6 @@ static void htc_proc_cred_rpt(struct htc_target *target,
int tot_credits = 0, i;
bool dist = false;
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "htc_proc_cred_rpt, credit report entries:%d\n", n_entries);
-
spin_lock_bh(&target->tx_lock);
for (i = 0; i < n_entries; i++, rpt++) {
@@ -1223,8 +1521,9 @@ static void htc_proc_cred_rpt(struct htc_target *target,
endpoint = &target->endpoint[rpt->eid];
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, " ep %d got %d credits\n",
- rpt->eid, rpt->credits);
+ ath6kl_dbg(ATH6KL_DBG_CREDIT,
+ "credit report ep %d credits %d\n",
+ rpt->eid, rpt->credits);
endpoint->ep_st.tx_cred_rpt += 1;
endpoint->ep_st.cred_retnd += rpt->credits;
@@ -1264,21 +1563,14 @@ static void htc_proc_cred_rpt(struct htc_target *target,
tot_credits += rpt->credits;
}
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND,
- "report indicated %d credits to distribute\n",
- tot_credits);
-
if (dist) {
/*
* This was a credit return based on a completed send
* operations note, this is done with the lock held
*/
- ath6kl_dbg(ATH6KL_DBG_HTC_SEND, "ctxt:0x%p dist:0x%p\n",
- target->cred_dist_cntxt, &target->cred_dist_list);
-
- ath6k_credit_distribute(target->cred_dist_cntxt,
- &target->cred_dist_list,
- HTC_CREDIT_DIST_SEND_COMPLETE);
+ ath6kl_credit_distribute(target->credit_info,
+ &target->cred_dist_list,
+ HTC_CREDIT_DIST_SEND_COMPLETE);
}
spin_unlock_bh(&target->tx_lock);
@@ -1320,14 +1612,15 @@ static int htc_parse_trailer(struct htc_target *target,
if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF))
&& next_lk_ahds) {
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "lk_ahd report found (pre valid:0x%X, post valid:0x%X)\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n",
lk_ahd->pre_valid, lk_ahd->post_valid);
/* look ahead bytes are valid, copy them over */
memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Next Look Ahead",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC,
+ "htc rx next look ahead",
"", next_lk_ahds, 4);
*n_lk_ahds = 1;
@@ -1346,7 +1639,7 @@ static int htc_parse_trailer(struct htc_target *target,
bundle_lkahd_rpt =
(struct htc_bundle_lkahd_rpt *) record_buf;
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Bundle lk_ahd",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd",
"", record_buf, record->len);
for (i = 0; i < len; i++) {
@@ -1378,10 +1671,8 @@ static int htc_proc_trailer(struct htc_target *target,
u8 *record_buf;
u8 *orig_buf;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV, "+htc_proc_trailer (len:%d)\n", len);
-
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Recv Trailer", "",
- buf, len);
+ ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len);
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len);
orig_buf = buf;
orig_len = len;
@@ -1418,7 +1709,7 @@ static int htc_proc_trailer(struct htc_target *target,
}
if (status)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD Recv Trailer",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer",
"", orig_buf, orig_len);
return status;
@@ -1436,9 +1727,6 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
if (n_lkahds != NULL)
*n_lkahds = 0;
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "HTC Recv PKT", "htc ",
- packet->buf, packet->act_len);
-
/*
* NOTE: we cannot assume the alignment of buf, so we use the safe
* macros to retrieve 16 bit fields.
@@ -1480,9 +1768,9 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
if (lk_ahd != packet->info.rx.exp_hdr) {
ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n",
__func__, packet, packet->info.rx.rx_flags);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Expected Message lk_ahd",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd",
"", &packet->info.rx.exp_hdr, 4);
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "Current Frame Header",
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header",
"", (u8 *)&lk_ahd, sizeof(lk_ahd));
status = -ENOMEM;
goto fail_rx;
@@ -1518,15 +1806,8 @@ static int ath6kl_htc_rx_process_hdr(struct htc_target *target,
fail_rx:
if (status)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, "BAD HTC Recv PKT",
- "", packet->buf,
- packet->act_len < 256 ? packet->act_len : 256);
- else {
- if (packet->act_len > 0)
- ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES,
- "HTC - Application Msg", "",
- packet->buf, packet->act_len);
- }
+ ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet",
+ "", packet->buf, packet->act_len);
return status;
}
@@ -1534,8 +1815,8 @@ fail_rx:
static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint,
struct htc_packet *packet)
{
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc calling ep %d recv callback on packet 0x%p\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx complete ep %d packet 0x%p\n",
endpoint->eid, packet);
endpoint->ep_cb.rx(endpoint->target, packet);
}
@@ -1571,9 +1852,9 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
len = 0;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "%s(): (numpackets: %d , actual : %d)\n",
- __func__, get_queue_depth(rxq), n_scat_pkt);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx bundle depth %d pkts %d\n",
+ get_queue_depth(rxq), n_scat_pkt);
scat_req = hif_scatter_req_get(target->dev->ar);
@@ -1620,7 +1901,7 @@ static int ath6kl_htc_rx_bundle(struct htc_target *target,
scat_req->len = len;
scat_req->scat_entries = i;
- status = ath6kldev_submit_scat_req(target->dev, scat_req, true);
+ status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true);
if (!status)
*n_pkt_fetched = i;
@@ -1643,7 +1924,6 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
int status = 0;
list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) {
- list_del(&packet->list);
ep = &target->endpoint[packet->endpoint];
/* process header for each of the recv packet */
@@ -1652,6 +1932,8 @@ static int ath6kl_htc_rx_process_packets(struct htc_target *target,
if (status)
return status;
+ list_del(&packet->list);
+
if (list_empty(comp_pktq)) {
/*
* Last packet's more packet flag is set
@@ -1686,11 +1968,15 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
int fetched_pkts;
bool part_bundle = false;
int status = 0;
+ struct list_head tmp_rxq;
+ struct htc_packet *packet, *tmp_pkt;
/* now go fetch the list of HTC packets */
while (!list_empty(rx_pktq)) {
fetched_pkts = 0;
+ INIT_LIST_HEAD(&tmp_rxq);
+
if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) {
/*
* There are enough packets to attempt a
@@ -1698,28 +1984,27 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
* allowed.
*/
status = ath6kl_htc_rx_bundle(target, rx_pktq,
- comp_pktq,
+ &tmp_rxq,
&fetched_pkts,
part_bundle);
if (status)
- return status;
+ goto fail_rx;
if (!list_empty(rx_pktq))
part_bundle = true;
+
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
}
if (!fetched_pkts) {
- struct htc_packet *packet;
packet = list_first_entry(rx_pktq, struct htc_packet,
list);
- list_del(&packet->list);
-
/* fully synchronous */
packet->completion = NULL;
- if (!list_empty(rx_pktq))
+ if (!list_is_singular(rx_pktq))
/*
* look_aheads in all packet
* except the last one in the
@@ -1731,18 +2016,42 @@ static int ath6kl_htc_rx_fetch(struct htc_target *target,
/* go fetch the packet */
status = ath6kl_htc_rx_packet(target, packet,
packet->act_len);
+
+ list_move_tail(&packet->list, &tmp_rxq);
+
if (status)
- return status;
+ goto fail_rx;
- list_add_tail(&packet->list, comp_pktq);
+ list_splice_tail_init(&tmp_rxq, comp_pktq);
}
}
+ return 0;
+
+fail_rx:
+
+ /*
+ * Cleanup any packets we allocated but didn't use to
+ * actually fetch any packets.
+ */
+
+ list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
+ list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) {
+ list_del(&packet->list);
+ htc_reclaim_rxbuf(target, packet,
+ &target->endpoint[packet->endpoint]);
+ }
+
return status;
}
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
- u32 msg_look_ahead[], int *num_pkts)
+ u32 msg_look_ahead, int *num_pkts)
{
struct htc_packet *packets, *tmp_pkt;
struct htc_endpoint *endpoint;
@@ -1759,7 +2068,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
* On first entry copy the look_aheads into our temp array for
* processing
*/
- memcpy(look_aheads, msg_look_ahead, sizeof(look_aheads));
+ look_aheads[0] = msg_look_ahead;
while (true) {
@@ -1827,15 +2136,6 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
if (status) {
ath6kl_err("failed to get pending recv messages: %d\n",
status);
- /*
- * Cleanup any packets we allocated but didn't use to
- * actually fetch any packets.
- */
- list_for_each_entry_safe(packets, tmp_pkt, &rx_pktq, list) {
- list_del(&packets->list);
- htc_reclaim_rxbuf(target, packets,
- &target->endpoint[packets->endpoint]);
- }
/* cleanup any packets in sync completion queue */
list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) {
@@ -1846,7 +2146,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
if (target->htc_flags & HTC_OP_STATE_STOPPING) {
ath6kl_warn("host is going to stop blocking receiver for htc_stop\n");
- ath6kldev_rx_control(target->dev, false);
+ ath6kl_hif_rx_control(target->dev, false);
}
}
@@ -1856,7 +2156,7 @@ int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
*/
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n");
- ath6kldev_rx_control(target->dev, false);
+ ath6kl_hif_rx_control(target->dev, false);
}
*num_pkts = n_fetched;
@@ -1874,12 +2174,12 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
struct htc_frame_hdr *htc_hdr;
u32 look_ahead;
- if (ath6kldev_poll_mboxmsg_rx(target->dev, &look_ahead,
+ if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead,
HTC_TARGET_RESPONSE_TIMEOUT))
return NULL;
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc_wait_for_ctrl_msg: look_ahead : 0x%X\n", look_ahead);
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx wait ctrl look_ahead 0x%X\n", look_ahead);
htc_hdr = (struct htc_frame_hdr *)&look_ahead;
@@ -1943,8 +2243,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
depth = get_queue_depth(pkt_queue);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "htc_add_rxbuf_multiple: ep id: %d, cnt:%d, len: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx add multiple ep id %d cnt %d len %d\n",
first_pkt->endpoint, depth, first_pkt->buf_len);
endpoint = &target->endpoint[first_pkt->endpoint];
@@ -1969,8 +2269,8 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
/* check if we are blocked waiting for a new buffer */
if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) {
if (target->ep_waiting == first_pkt->endpoint) {
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "receiver was blocked on ep:%d, unblocking.\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx blocked on ep %d, unblocking\n",
target->ep_waiting);
target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS;
target->ep_waiting = ENDPOINT_MAX;
@@ -1982,7 +2282,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING))
/* TODO : implement a buffer threshold count? */
- ath6kldev_rx_control(target->dev, true);
+ ath6kl_hif_rx_control(target->dev, true);
return status;
}
@@ -2004,8 +2304,8 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
&endpoint->rx_bufq, list) {
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "flushing rx pkt:0x%p, len:%d, ep:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc rx flush pkt 0x%p len %d ep %d\n",
packet, packet->buf_len,
packet->endpoint);
dev_kfree_skb(packet->pkt_cntxt);
@@ -2028,8 +2328,8 @@ int ath6kl_htc_conn_service(struct htc_target *target,
unsigned int max_msg_sz = 0;
int status = 0;
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "htc_conn_service, target:0x%p service id:0x%X\n",
+ ath6kl_dbg(ATH6KL_DBG_HTC,
+ "htc connect service target 0x%p service id 0x%x\n",
target, conn_req->svc_id);
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
@@ -2115,7 +2415,7 @@ int ath6kl_htc_conn_service(struct htc_target *target,
endpoint->len_max = max_msg_sz;
endpoint->ep_cb = conn_req->ep_cb;
endpoint->cred_dist.svc_id = conn_req->svc_id;
- endpoint->cred_dist.htc_rsvd = endpoint;
+ endpoint->cred_dist.htc_ep = endpoint;
endpoint->cred_dist.endpoint = assigned_ep;
endpoint->cred_dist.cred_sz = target->tgt_cred_sz;
@@ -2172,6 +2472,7 @@ static void reset_ep_state(struct htc_target *target)
}
/* reset distribution list */
+ /* FIXME: free existing entries */
INIT_LIST_HEAD(&target->cred_dist_list);
}
@@ -2201,8 +2502,8 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->msg_per_bndl_max = min(target->max_scat_entries,
target->msg_per_bndl_max);
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "htc bundling allowed. max msg per htc bundle: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc bundling allowed msg_per_bndl_max %d\n",
target->msg_per_bndl_max);
/* Max rx bundle size is limited by the max tx bundle size */
@@ -2211,7 +2512,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH,
target->max_xfer_szper_scatreq);
- ath6kl_dbg(ATH6KL_DBG_ANY, "max recv: %d max send: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n",
target->max_rx_bndl_sz, target->max_tx_bndl_sz);
if (target->max_tx_bndl_sz)
@@ -2265,8 +2566,8 @@ int ath6kl_htc_wait_target(struct htc_target *target)
target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt);
target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz);
- ath6kl_dbg(ATH6KL_DBG_HTC_RECV,
- "target ready: credits: %d credit size: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "htc target ready credits %d size %d\n",
target->tgt_creds, target->tgt_cred_sz);
/* check if this is an extended ready message */
@@ -2280,7 +2581,7 @@ int ath6kl_htc_wait_target(struct htc_target *target)
target->msg_per_bndl_max = 0;
}
- ath6kl_dbg(ATH6KL_DBG_TRC, "using htc protocol version : %s (%d)\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n",
(target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1",
target->htc_tgt_ver);
@@ -2300,6 +2601,10 @@ int ath6kl_htc_wait_target(struct htc_target *target)
status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
if (status)
+ /*
+ * FIXME: this call doesn't make sense, the caller should
+ * call ath6kl_htc_cleanup() when it wants remove htc
+ */
ath6kl_hif_cleanup_scatter(target->dev->ar);
fail_wait_target:
@@ -2320,8 +2625,11 @@ int ath6kl_htc_start(struct htc_target *target)
struct htc_packet *packet;
int status;
+ memset(&target->dev->irq_proc_reg, 0,
+ sizeof(target->dev->irq_proc_reg));
+
/* Disable interrupts at the chip level */
- ath6kldev_disable_intrs(target->dev);
+ ath6kl_hif_disable_intrs(target->dev);
target->htc_flags = 0;
target->rx_st_flags = 0;
@@ -2334,8 +2642,8 @@ int ath6kl_htc_start(struct htc_target *target)
}
/* NOTE: the first entry in the distribution list is ENDPOINT_0 */
- ath6k_credit_init(target->cred_dist_cntxt, &target->cred_dist_list,
- target->tgt_creds);
+ ath6kl_credit_init(target->credit_info, &target->cred_dist_list,
+ target->tgt_creds);
dump_cred_dist_stats(target);
@@ -2346,7 +2654,7 @@ int ath6kl_htc_start(struct htc_target *target)
return status;
/* unmask interrupts */
- status = ath6kldev_unmask_intrs(target->dev);
+ status = ath6kl_hif_unmask_intrs(target->dev);
if (status)
ath6kl_htc_stop(target);
@@ -2354,6 +2662,44 @@ int ath6kl_htc_start(struct htc_target *target)
return status;
}
+static int ath6kl_htc_reset(struct htc_target *target)
+{
+ u32 block_size, ctrl_bufsz;
+ struct htc_packet *packet;
+ int i;
+
+ reset_ep_state(target);
+
+ block_size = target->dev->ar->mbox_info.block_size;
+
+ ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
+ (block_size + HTC_HDR_LENGTH) :
+ (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
+
+ for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
+ packet = kzalloc(sizeof(*packet), GFP_KERNEL);
+ if (!packet)
+ return -ENOMEM;
+
+ packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
+ if (!packet->buf_start) {
+ kfree(packet);
+ return -ENOMEM;
+ }
+
+ packet->buf_len = ctrl_bufsz;
+ if (i < NUM_CONTROL_RX_BUFFERS) {
+ packet->act_len = 0;
+ packet->buf = packet->buf_start;
+ packet->endpoint = ENDPOINT_0;
+ list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
+ } else
+ list_add_tail(&packet->list, &target->free_ctrl_txbuf);
+ }
+
+ return 0;
+}
+
/* htc_stop: stop interrupt reception, and flush all queued buffers */
void ath6kl_htc_stop(struct htc_target *target)
{
@@ -2366,21 +2712,19 @@ void ath6kl_htc_stop(struct htc_target *target)
* function returns all pending HIF I/O has completed, we can
* safely flush the queues.
*/
- ath6kldev_mask_intrs(target->dev);
+ ath6kl_hif_mask_intrs(target->dev);
ath6kl_htc_flush_txep_all(target);
ath6kl_htc_flush_rx_buf(target);
- reset_ep_state(target);
+ ath6kl_htc_reset(target);
}
void *ath6kl_htc_create(struct ath6kl *ar)
{
struct htc_target *target = NULL;
- struct htc_packet *packet;
- int status = 0, i = 0;
- u32 block_size, ctrl_bufsz;
+ int status = 0;
target = kzalloc(sizeof(*target), GFP_KERNEL);
if (!target) {
@@ -2392,7 +2736,7 @@ void *ath6kl_htc_create(struct ath6kl *ar)
if (!target->dev) {
ath6kl_err("unable to allocate memory\n");
status = -ENOMEM;
- goto fail_create_htc;
+ goto err_htc_cleanup;
}
spin_lock_init(&target->htc_lock);
@@ -2407,49 +2751,20 @@ void *ath6kl_htc_create(struct ath6kl *ar)
target->dev->htc_cnxt = target;
target->ep_waiting = ENDPOINT_MAX;
- reset_ep_state(target);
-
- status = ath6kldev_setup(target->dev);
-
+ status = ath6kl_hif_setup(target->dev);
if (status)
- goto fail_create_htc;
-
- block_size = ar->mbox_info.block_size;
-
- ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ?
- (block_size + HTC_HDR_LENGTH) :
- (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH);
-
- for (i = 0; i < NUM_CONTROL_BUFFERS; i++) {
- packet = kzalloc(sizeof(*packet), GFP_KERNEL);
- if (!packet)
- break;
+ goto err_htc_cleanup;
- packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL);
- if (!packet->buf_start) {
- kfree(packet);
- break;
- }
+ status = ath6kl_htc_reset(target);
+ if (status)
+ goto err_htc_cleanup;
- packet->buf_len = ctrl_bufsz;
- if (i < NUM_CONTROL_RX_BUFFERS) {
- packet->act_len = 0;
- packet->buf = packet->buf_start;
- packet->endpoint = ENDPOINT_0;
- list_add_tail(&packet->list, &target->free_ctrl_rxbuf);
- } else
- list_add_tail(&packet->list, &target->free_ctrl_txbuf);
- }
+ return target;
-fail_create_htc:
- if (i != NUM_CONTROL_BUFFERS || status) {
- if (target) {
- ath6kl_htc_cleanup(target);
- target = NULL;
- }
- }
+err_htc_cleanup:
+ ath6kl_htc_cleanup(target);
- return target;
+ return NULL;
}
/* cleanup the HTC instance */
diff --git a/drivers/net/wireless/ath/ath6kl/htc.h b/drivers/net/wireless/ath/ath6kl/htc.h
index 8ce0c2c..57672e1 100644
--- a/drivers/net/wireless/ath/ath6kl/htc.h
+++ b/drivers/net/wireless/ath/ath6kl/htc.h
@@ -393,7 +393,7 @@ struct htc_endpoint_credit_dist {
int cred_per_msg;
/* reserved for HTC use */
- void *htc_rsvd;
+ struct htc_endpoint *htc_ep;
/*
* current depth of TX queue , i.e. messages waiting for credits
@@ -414,9 +414,11 @@ enum htc_credit_dist_reason {
HTC_CREDIT_DIST_SEEK_CREDITS,
};
-struct htc_credit_state_info {
+struct ath6kl_htc_credit_info {
int total_avail_credits;
int cur_free_credits;
+
+ /* list of lowest priority endpoints */
struct list_head lowestpri_ep_dist;
};
@@ -508,10 +510,13 @@ struct ath6kl_device;
/* our HTC target state */
struct htc_target {
struct htc_endpoint endpoint[ENDPOINT_MAX];
+
+ /* contains struct htc_endpoint_credit_dist */
struct list_head cred_dist_list;
+
struct list_head free_ctrl_txbuf;
struct list_head free_ctrl_rxbuf;
- struct htc_credit_state_info *cred_dist_cntxt;
+ struct ath6kl_htc_credit_info *credit_info;
int tgt_creds;
unsigned int tgt_cred_sz;
spinlock_t htc_lock;
@@ -542,7 +547,7 @@ struct htc_target {
void *ath6kl_htc_create(struct ath6kl *ar);
void ath6kl_htc_set_credit_dist(struct htc_target *target,
- struct htc_credit_state_info *cred_info,
+ struct ath6kl_htc_credit_info *cred_info,
u16 svc_pri_order[], int len);
int ath6kl_htc_wait_target(struct htc_target *target);
int ath6kl_htc_start(struct htc_target *target);
@@ -563,7 +568,10 @@ int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pktq);
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
- u32 msg_look_ahead[], int *n_pkts);
+ u32 msg_look_ahead, int *n_pkts);
+
+int ath6kl_credit_setup(void *htc_handle,
+ struct ath6kl_htc_credit_info *cred_info);
static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
u8 *buf, unsigned int len,
diff --git a/drivers/net/wireless/ath/ath6kl/htc_hif.h b/drivers/net/wireless/ath/ath6kl/htc_hif.h
deleted file mode 100644
index 171ad63..0000000
--- a/drivers/net/wireless/ath/ath6kl/htc_hif.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/*
- * Copyright (c) 2007-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef HTC_HIF_H
-#define HTC_HIF_H
-
-#include "htc.h"
-#include "hif.h"
-
-#define ATH6KL_MAILBOXES 4
-
-/* HTC runs over mailbox 0 */
-#define HTC_MAILBOX 0
-
-#define ATH6KL_TARGET_DEBUG_INTR_MASK 0x01
-
-#define OTHER_INTS_ENABLED (INT_STATUS_ENABLE_ERROR_MASK | \
- INT_STATUS_ENABLE_CPU_MASK | \
- INT_STATUS_ENABLE_COUNTER_MASK)
-
-#define ATH6KL_REG_IO_BUFFER_SIZE 32
-#define ATH6KL_MAX_REG_IO_BUFFERS 8
-#define ATH6KL_SCATTER_ENTRIES_PER_REQ 16
-#define ATH6KL_MAX_TRANSFER_SIZE_PER_SCATTER (16 * 1024)
-#define ATH6KL_SCATTER_REQS 4
-
-#ifndef A_CACHE_LINE_PAD
-#define A_CACHE_LINE_PAD 128
-#endif
-#define ATH6KL_MIN_SCATTER_ENTRIES_PER_REQ 2
-#define ATH6KL_MIN_TRANSFER_SIZE_PER_SCATTER (4 * 1024)
-
-struct ath6kl_irq_proc_registers {
- u8 host_int_status;
- u8 cpu_int_status;
- u8 error_int_status;
- u8 counter_int_status;
- u8 mbox_frame;
- u8 rx_lkahd_valid;
- u8 host_int_status2;
- u8 gmbox_rx_avail;
- __le32 rx_lkahd[2];
- __le32 rx_gmbox_lkahd_alias[2];
-} __packed;
-
-struct ath6kl_irq_enable_reg {
- u8 int_status_en;
- u8 cpu_int_status_en;
- u8 err_int_status_en;
- u8 cntr_int_status_en;
-} __packed;
-
-struct ath6kl_device {
- spinlock_t lock;
- u8 pad1[A_CACHE_LINE_PAD];
- struct ath6kl_irq_proc_registers irq_proc_reg;
- u8 pad2[A_CACHE_LINE_PAD];
- struct ath6kl_irq_enable_reg irq_en_reg;
- u8 pad3[A_CACHE_LINE_PAD];
- struct htc_target *htc_cnxt;
- struct ath6kl *ar;
-};
-
-int ath6kldev_setup(struct ath6kl_device *dev);
-int ath6kldev_unmask_intrs(struct ath6kl_device *dev);
-int ath6kldev_mask_intrs(struct ath6kl_device *dev);
-int ath6kldev_poll_mboxmsg_rx(struct ath6kl_device *dev,
- u32 *lk_ahd, int timeout);
-int ath6kldev_rx_control(struct ath6kl_device *dev, bool enable_rx);
-int ath6kldev_disable_intrs(struct ath6kl_device *dev);
-
-int ath6kldev_rw_comp_handler(void *context, int status);
-int ath6kldev_intr_bh_handler(struct ath6kl *ar);
-
-/* Scatter Function and Definitions */
-int ath6kldev_submit_scat_req(struct ath6kl_device *dev,
- struct hif_scatter_req *scat_req, bool read);
-
-#endif /*ATH6KL_H_ */
diff --git a/drivers/net/wireless/ath/ath6kl/init.c b/drivers/net/wireless/ath/ath6kl/init.c
index c1d2366..7f55be3 100644
--- a/drivers/net/wireless/ath/ath6kl/init.c
+++ b/drivers/net/wireless/ath/ath6kl/init.c
@@ -16,6 +16,7 @@
*/
#include <linux/moduleparam.h>
+#include <linux/errno.h>
#include <linux/of.h>
#include <linux/mmc/sdio_func.h>
#include "core.h"
@@ -26,9 +27,85 @@
unsigned int debug_mask;
static unsigned int testmode;
+static bool suspend_cutpower;
module_param(debug_mask, uint, 0644);
module_param(testmode, uint, 0644);
+module_param(suspend_cutpower, bool, 0444);
+
+static const struct ath6kl_hw hw_list[] = {
+ {
+ .id = AR6003_HW_2_0_VERSION,
+ .name = "ar6003 hw 2.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x543180,
+ .board_ext_data_addr = 0x57e500,
+ .reserved_ram_size = 6912,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+
+ /* hw2.0 needs override address hardcoded */
+ .app_start_override_addr = 0x944C00,
+
+ .fw_otp = AR6003_HW_2_0_OTP_FILE,
+ .fw = AR6003_HW_2_0_FIRMWARE_FILE,
+ .fw_tcmd = AR6003_HW_2_0_TCMD_FIRMWARE_FILE,
+ .fw_patch = AR6003_HW_2_0_PATCH_FILE,
+ .fw_api2 = AR6003_HW_2_0_FIRMWARE_2_FILE,
+ .fw_board = AR6003_HW_2_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6003_HW_2_1_1_VERSION,
+ .name = "ar6003 hw 2.1.1",
+ .dataset_patch_addr = 0x57ff74,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x542330,
+ .reserved_ram_size = 512,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 8,
+
+ .fw_otp = AR6003_HW_2_1_1_OTP_FILE,
+ .fw = AR6003_HW_2_1_1_FIRMWARE_FILE,
+ .fw_tcmd = AR6003_HW_2_1_1_TCMD_FIRMWARE_FILE,
+ .fw_patch = AR6003_HW_2_1_1_PATCH_FILE,
+ .fw_api2 = AR6003_HW_2_1_1_FIRMWARE_2_FILE,
+ .fw_board = AR6003_HW_2_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_0_VERSION,
+ .name = "ar6004 hw 1.0",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 19456,
+ .board_addr = 0x433900,
+ .refclk_hz = 26000000,
+ .uarttx_pin = 11,
+
+ .fw = AR6004_HW_1_0_FIRMWARE_FILE,
+ .fw_api2 = AR6004_HW_1_0_FIRMWARE_2_FILE,
+ .fw_board = AR6004_HW_1_0_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE,
+ },
+ {
+ .id = AR6004_HW_1_1_VERSION,
+ .name = "ar6004 hw 1.1",
+ .dataset_patch_addr = 0x57e884,
+ .app_load_addr = 0x1234,
+ .board_ext_data_addr = 0x437000,
+ .reserved_ram_size = 11264,
+ .board_addr = 0x43d400,
+ .refclk_hz = 40000000,
+ .uarttx_pin = 11,
+
+ .fw = AR6004_HW_1_1_FIRMWARE_FILE,
+ .fw_api2 = AR6004_HW_1_1_FIRMWARE_2_FILE,
+ .fw_board = AR6004_HW_1_1_BOARD_DATA_FILE,
+ .fw_default_board = AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE,
+ },
+};
/*
* Include definitions here that can be used to tune the WLAN module
@@ -55,7 +132,6 @@ module_param(testmode, uint, 0644);
*/
#define WLAN_CONFIG_DISCONNECT_TIMEOUT 10
-#define CONFIG_AR600x_DEBUG_UART_TX_PIN 8
#define ATH6KL_DATA_OFFSET 64
struct sk_buff *ath6kl_buf_alloc(int size)
@@ -73,37 +149,21 @@ struct sk_buff *ath6kl_buf_alloc(int size)
return skb;
}
-void ath6kl_init_profile_info(struct ath6kl *ar)
+void ath6kl_init_profile_info(struct ath6kl_vif *vif)
{
- ar->ssid_len = 0;
- memset(ar->ssid, 0, sizeof(ar->ssid));
-
- ar->dot11_auth_mode = OPEN_AUTH;
- ar->auth_mode = NONE_AUTH;
- ar->prwise_crypto = NONE_CRYPT;
- ar->prwise_crypto_len = 0;
- ar->grp_crypto = NONE_CRYPT;
- ar->grp_crypto_len = 0;
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- memset(ar->req_bssid, 0, sizeof(ar->req_bssid));
- memset(ar->bssid, 0, sizeof(ar->bssid));
- ar->bss_ch = 0;
- ar->nw_type = ar->next_mode = INFRA_NETWORK;
-}
-
-static u8 ath6kl_get_fw_iftype(struct ath6kl *ar)
-{
- switch (ar->nw_type) {
- case INFRA_NETWORK:
- return HI_OPTION_FW_MODE_BSS_STA;
- case ADHOC_NETWORK:
- return HI_OPTION_FW_MODE_IBSS;
- case AP_NETWORK:
- return HI_OPTION_FW_MODE_AP;
- default:
- ath6kl_err("Unsupported interface type :%d\n", ar->nw_type);
- return 0xff;
- }
+ vif->ssid_len = 0;
+ memset(vif->ssid, 0, sizeof(vif->ssid));
+
+ vif->dot11_auth_mode = OPEN_AUTH;
+ vif->auth_mode = NONE_AUTH;
+ vif->prwise_crypto = NONE_CRYPT;
+ vif->prwise_crypto_len = 0;
+ vif->grp_crypto = NONE_CRYPT;
+ vif->grp_crypto_len = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ memset(vif->req_bssid, 0, sizeof(vif->req_bssid));
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
}
static int ath6kl_set_host_app_area(struct ath6kl *ar)
@@ -120,7 +180,7 @@ static int ath6kl_set_host_app_area(struct ath6kl *ar)
return -EIO;
address = TARG_VTOP(ar->target_type, data);
- host_app_area.wmi_protocol_ver = WMI_PROTOCOL_VERSION;
+ host_app_area.wmi_protocol_ver = cpu_to_le32(WMI_PROTOCOL_VERSION);
if (ath6kl_diag_write(ar, address, (u8 *) &host_app_area,
sizeof(struct host_app_area)))
return -EIO;
@@ -258,40 +318,12 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
return 0;
}
-static void ath6kl_init_control_info(struct ath6kl *ar)
+void ath6kl_init_control_info(struct ath6kl_vif *vif)
{
- u8 ctr;
-
- clear_bit(WMI_ENABLED, &ar->flag);
- ath6kl_init_profile_info(ar);
- ar->def_txkey_index = 0;
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- ar->ch_hint = 0;
- ar->listen_intvl_t = A_DEFAULT_LISTEN_INTERVAL;
- ar->listen_intvl_b = 0;
- ar->tx_pwr = 0;
- clear_bit(SKIP_SCAN, &ar->flag);
- set_bit(WMM_ENABLED, &ar->flag);
- ar->intra_bss = 1;
- memset(&ar->sc_params, 0, sizeof(ar->sc_params));
- ar->sc_params.short_scan_ratio = WMI_SHORTSCANRATIO_DEFAULT;
- ar->sc_params.scan_ctrl_flags = DEFAULT_SCAN_CTRL_FLAGS;
- ar->lrssi_roam_threshold = DEF_LRSSI_ROAM_THRESHOLD;
-
- memset((u8 *)ar->sta_list, 0,
- AP_MAX_NUM_STA * sizeof(struct ath6kl_sta));
-
- spin_lock_init(&ar->mcastpsq_lock);
-
- /* Init the PS queues */
- for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
- spin_lock_init(&ar->sta_list[ctr].psq_lock);
- skb_queue_head_init(&ar->sta_list[ctr].psq);
- }
-
- skb_queue_head_init(&ar->mcastpsq);
-
- memcpy(ar->ap_country_code, DEF_AP_COUNTRY_CODE, 3);
+ ath6kl_init_profile_info(vif);
+ vif->def_txkey_index = 0;
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ vif->ch_hint = 0;
}
/*
@@ -341,62 +373,7 @@ out:
return status;
}
-#define REG_DUMP_COUNT_AR6003 60
-#define REGISTER_DUMP_LEN_MAX 60
-
-static void ath6kl_dump_target_assert_info(struct ath6kl *ar)
-{
- u32 address;
- u32 regdump_loc = 0;
- int status;
- u32 regdump_val[REGISTER_DUMP_LEN_MAX];
- u32 i;
-
- if (ar->target_type != TARGET_TYPE_AR6003)
- return;
-
- /* the reg dump pointer is copied to the host interest area */
- address = ath6kl_get_hi_item_addr(ar, HI_ITEM(hi_failure_state));
- address = TARG_VTOP(ar->target_type, address);
-
- /* read RAM location through diagnostic window */
- status = ath6kl_diag_read32(ar, address, &regdump_loc);
-
- if (status || !regdump_loc) {
- ath6kl_err("failed to get ptr to register dump area\n");
- return;
- }
-
- ath6kl_dbg(ATH6KL_DBG_TRC, "location of register dump data: 0x%X\n",
- regdump_loc);
- regdump_loc = TARG_VTOP(ar->target_type, regdump_loc);
-
- /* fetch register dump data */
- status = ath6kl_diag_read(ar, regdump_loc, (u8 *)&regdump_val[0],
- REG_DUMP_COUNT_AR6003 * (sizeof(u32)));
-
- if (status) {
- ath6kl_err("failed to get register dump\n");
- return;
- }
- ath6kl_dbg(ATH6KL_DBG_TRC, "Register Dump:\n");
-
- for (i = 0; i < REG_DUMP_COUNT_AR6003; i++)
- ath6kl_dbg(ATH6KL_DBG_TRC, " %d : 0x%8.8X\n",
- i, regdump_val[i]);
-
-}
-
-void ath6kl_target_failure(struct ath6kl *ar)
-{
- ath6kl_err("target asserted\n");
-
- /* try dumping target assertion information (if any) */
- ath6kl_dump_target_assert_info(ar);
-
-}
-
-static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
+static int ath6kl_target_config_wlan_params(struct ath6kl *ar, int idx)
{
int status = 0;
int ret;
@@ -406,46 +383,46 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
* default values. Required if checksum offload is needed. Set
* RxMetaVersion to 2.
*/
- if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi,
+ if (ath6kl_wmi_set_rx_frame_format_cmd(ar->wmi, idx,
ar->rx_meta_ver, 0, 0)) {
ath6kl_err("unable to set the rx frame format\n");
status = -EIO;
}
if (ar->conf_flags & ATH6KL_CONF_IGNORE_PS_FAIL_EVT_IN_SCAN)
- if ((ath6kl_wmi_pmparams_cmd(ar->wmi, 0, 1, 0, 0, 1,
+ if ((ath6kl_wmi_pmparams_cmd(ar->wmi, idx, 0, 1, 0, 0, 1,
IGNORE_POWER_SAVE_FAIL_EVENT_DURING_SCAN)) != 0) {
ath6kl_err("unable to set power save fail event policy\n");
status = -EIO;
}
if (!(ar->conf_flags & ATH6KL_CONF_IGNORE_ERP_BARKER))
- if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, 0,
+ if ((ath6kl_wmi_set_lpreamble_cmd(ar->wmi, idx, 0,
WMI_DONOT_IGNORE_BARKER_IN_ERP)) != 0) {
ath6kl_err("unable to set barker preamble policy\n");
status = -EIO;
}
- if (ath6kl_wmi_set_keepalive_cmd(ar->wmi,
+ if (ath6kl_wmi_set_keepalive_cmd(ar->wmi, idx,
WLAN_CONFIG_KEEP_ALIVE_INTERVAL)) {
ath6kl_err("unable to set keep alive interval\n");
status = -EIO;
}
- if (ath6kl_wmi_disctimeout_cmd(ar->wmi,
+ if (ath6kl_wmi_disctimeout_cmd(ar->wmi, idx,
WLAN_CONFIG_DISCONNECT_TIMEOUT)) {
ath6kl_err("unable to set disconnect timeout\n");
status = -EIO;
}
if (!(ar->conf_flags & ATH6KL_CONF_ENABLE_TX_BURST))
- if (ath6kl_wmi_set_wmm_txop(ar->wmi, WMI_TXOP_DISABLED)) {
+ if (ath6kl_wmi_set_wmm_txop(ar->wmi, idx, WMI_TXOP_DISABLED)) {
ath6kl_err("unable to set txop bursting\n");
status = -EIO;
}
- if (ar->p2p) {
- ret = ath6kl_wmi_info_req_cmd(ar->wmi,
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
+ ret = ath6kl_wmi_info_req_cmd(ar->wmi, idx,
P2P_FLAG_CAPABILITIES_REQ |
P2P_FLAG_MACADDR_REQ |
P2P_FLAG_HMODEL_REQ);
@@ -453,13 +430,13 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
ath6kl_dbg(ATH6KL_DBG_TRC, "failed to request P2P "
"capabilities (%d) - assuming P2P not "
"supported\n", ret);
- ar->p2p = 0;
+ ar->p2p = false;
}
}
- if (ar->p2p) {
+ if (ar->p2p && (ar->vif_max == 1 || idx)) {
/* Enable Probe Request reporting for P2P */
- ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, true);
+ ret = ath6kl_wmi_probe_report_req_cmd(ar->wmi, idx, true);
if (ret) {
ath6kl_dbg(ATH6KL_DBG_TRC, "failed to enable Probe "
"Request reporting (%d)\n", ret);
@@ -472,13 +449,40 @@ static int ath6kl_target_config_wlan_params(struct ath6kl *ar)
int ath6kl_configure_target(struct ath6kl *ar)
{
u32 param, ram_reserved_size;
- u8 fw_iftype;
+ u8 fw_iftype, fw_mode = 0, fw_submode = 0;
+ int i, status;
- fw_iftype = ath6kl_get_fw_iftype(ar);
- if (fw_iftype == 0xff)
- return -EINVAL;
+ /*
+ * Note: Even though the firmware interface type is
+ * chosen as BSS_STA for all three interfaces, can
+ * be configured to IBSS/AP as long as the fw submode
+ * remains normal mode (0 - AP, STA and IBSS). But
+ * due to an target assert in firmware only one interface is
+ * configured for now.
+ */
+ fw_iftype = HI_OPTION_FW_MODE_BSS_STA;
+
+ for (i = 0; i < ar->vif_max; i++)
+ fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
+
+ /*
+ * By default, submodes :
+ * vif[0] - AP/STA/IBSS
+ * vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
+ * vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
+ */
+
+ for (i = 0; i < ar->max_norm_iface; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ for (i = ar->max_norm_iface; i < ar->vif_max; i++)
+ fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
+ (i * HI_OPTION_FW_SUBMODE_BITS);
+
+ if (ar->p2p && ar->vif_max == 1)
+ fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
- /* Tell target which HTC version it is used*/
param = HTC_PROTOCOL_VERSION;
if (ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
@@ -499,12 +503,10 @@ int ath6kl_configure_target(struct ath6kl *ar)
return -EIO;
}
- param |= (1 << HI_OPTION_NUM_DEV_SHIFT);
- param |= (fw_iftype << HI_OPTION_FW_MODE_SHIFT);
- if (ar->p2p && fw_iftype == HI_OPTION_FW_MODE_BSS_STA) {
- param |= HI_OPTION_FW_SUBMODE_P2PDEV <<
- HI_OPTION_FW_SUBMODE_SHIFT;
- }
+ param |= (ar->vif_max << HI_OPTION_NUM_DEV_SHIFT);
+ param |= fw_mode << HI_OPTION_FW_MODE_SHIFT;
+ param |= fw_submode << HI_OPTION_FW_SUBMODE_SHIFT;
+
param |= (0 << HI_OPTION_MAC_ADDR_METHOD_SHIFT);
param |= (0 << HI_OPTION_FW_BRIDGE_SHIFT);
@@ -550,71 +552,55 @@ int ath6kl_configure_target(struct ath6kl *ar)
/* use default number of control buffers */
return -EIO;
+ /* Configure GPIO AR600x UART */
+ param = ar->hw.uarttx_pin;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_dbg_uart_txpin)),
+ (u8 *)&param, 4);
+ if (status)
+ return status;
+
+ /* Configure target refclk_hz */
+ param = ar->hw.refclk_hz;
+ status = ath6kl_bmi_write(ar,
+ ath6kl_get_hi_item_addr(ar,
+ HI_ITEM(hi_refclk_hz)),
+ (u8 *)&param, 4);
+ if (status)
+ return status;
+
return 0;
}
-struct ath6kl *ath6kl_core_alloc(struct device *sdev)
+void ath6kl_core_free(struct ath6kl *ar)
{
- struct net_device *dev;
- struct ath6kl *ar;
- struct wireless_dev *wdev;
-
- wdev = ath6kl_cfg80211_init(sdev);
- if (!wdev) {
- ath6kl_err("ath6kl_cfg80211_init failed\n");
- return NULL;
- }
-
- ar = wdev_priv(wdev);
- ar->dev = sdev;
- ar->wdev = wdev;
- wdev->iftype = NL80211_IFTYPE_STATION;
-
- if (ath6kl_debug_init(ar)) {
- ath6kl_err("Failed to initialize debugfs\n");
- ath6kl_cfg80211_deinit(ar);
- return NULL;
- }
-
- dev = alloc_netdev(0, "wlan%d", ether_setup);
- if (!dev) {
- ath6kl_err("no memory for network device instance\n");
- ath6kl_cfg80211_deinit(ar);
- return NULL;
- }
-
- dev->ieee80211_ptr = wdev;
- SET_NETDEV_DEV(dev, wiphy_dev(wdev->wiphy));
- wdev->netdev = dev;
- ar->sme_state = SME_DISCONNECTED;
-
- init_netdev(dev);
+ wiphy_free(ar->wiphy);
+}
- ar->net_dev = dev;
- set_bit(WLAN_ENABLED, &ar->flag);
+void ath6kl_core_cleanup(struct ath6kl *ar)
+{
+ ath6kl_hif_power_off(ar);
- ar->wlan_pwr_state = WLAN_POWER_STATE_ON;
+ destroy_workqueue(ar->ath6kl_wq);
- spin_lock_init(&ar->lock);
+ if (ar->htc_target)
+ ath6kl_htc_cleanup(ar->htc_target);
- ath6kl_init_control_info(ar);
- init_waitqueue_head(&ar->event_wq);
- sema_init(&ar->sem, 1);
- clear_bit(DESTROY_IN_PROGRESS, &ar->flag);
+ ath6kl_cookie_cleanup(ar);
- INIT_LIST_HEAD(&ar->amsdu_rx_buffer_queue);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
- setup_timer(&ar->disconnect_timer, disconnect_timer_handler,
- (unsigned long) dev);
+ ath6kl_bmi_cleanup(ar);
- return ar;
-}
+ ath6kl_debug_cleanup(ar);
-int ath6kl_unavail_ev(struct ath6kl *ar)
-{
- ath6kl_destroy(ar->net_dev, 1);
+ kfree(ar->fw_board);
+ kfree(ar->fw_otp);
+ kfree(ar->fw);
+ kfree(ar->fw_patch);
- return 0;
+ ath6kl_deinit_ieee80211_hw(ar);
}
/* firmware upload */
@@ -643,11 +629,11 @@ static int ath6kl_get_fw(struct ath6kl *ar, const char *filename,
static const char *get_target_ver_dir(const struct ath6kl *ar)
{
switch (ar->version.target_ver) {
- case AR6003_REV1_VERSION:
+ case AR6003_HW_1_0_VERSION:
return "ath6k/AR6003/hw1.0";
- case AR6003_REV2_VERSION:
+ case AR6003_HW_2_0_VERSION:
return "ath6k/AR6003/hw2.0";
- case AR6003_REV3_VERSION:
+ case AR6003_HW_2_1_1_VERSION:
return "ath6k/AR6003/hw2.1.1";
}
ath6kl_warn("%s: unsupported target version 0x%x.\n", __func__,
@@ -705,17 +691,10 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
if (ar->fw_board != NULL)
return 0;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_BOARD_DATA_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_BOARD_DATA_FILE;
- break;
- default:
- filename = AR6003_REV3_BOARD_DATA_FILE;
- break;
- }
+ if (WARN_ON(ar->hw.fw_board == NULL))
+ return -EINVAL;
+
+ filename = ar->hw.fw_board;
ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
&ar->fw_board_len);
@@ -733,17 +712,7 @@ static int ath6kl_fetch_board_file(struct ath6kl *ar)
ath6kl_warn("Failed to get board file %s (%d), trying to find default board file.\n",
filename, ret);
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_DEFAULT_BOARD_DATA_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_DEFAULT_BOARD_DATA_FILE;
- break;
- default:
- filename = AR6003_REV3_DEFAULT_BOARD_DATA_FILE;
- break;
- }
+ filename = ar->hw.fw_default_board;
ret = ath6kl_get_fw(ar, filename, &ar->fw_board,
&ar->fw_board_len);
@@ -767,19 +736,14 @@ static int ath6kl_fetch_otp_file(struct ath6kl *ar)
if (ar->fw_otp != NULL)
return 0;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_OTP_FILE;
- break;
- case AR6004_REV1_VERSION:
- ath6kl_dbg(ATH6KL_DBG_TRC, "AR6004 doesn't need OTP file\n");
+ if (ar->hw.fw_otp == NULL) {
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "no OTP file configured for this hw\n");
return 0;
- break;
- default:
- filename = AR6003_REV3_OTP_FILE;
- break;
}
+ filename = ar->hw.fw_otp;
+
ret = ath6kl_get_fw(ar, filename, &ar->fw_otp,
&ar->fw_otp_len);
if (ret) {
@@ -800,38 +764,22 @@ static int ath6kl_fetch_fw_file(struct ath6kl *ar)
return 0;
if (testmode) {
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_TCMD_FIRMWARE_FILE;
- break;
- case AR6003_REV3_VERSION:
- filename = AR6003_REV3_TCMD_FIRMWARE_FILE;
- break;
- case AR6004_REV1_VERSION:
- ath6kl_warn("testmode not supported with ar6004\n");
+ if (ar->hw.fw_tcmd == NULL) {
+ ath6kl_warn("testmode not supported\n");
return -EOPNOTSUPP;
- default:
- ath6kl_warn("unknown target version: 0x%x\n",
- ar->version.target_ver);
- return -EINVAL;
}
+ filename = ar->hw.fw_tcmd;
+
set_bit(TESTMODE, &ar->flag);
goto get_fw;
}
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_FIRMWARE_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_FIRMWARE_FILE;
- break;
- default:
- filename = AR6003_REV3_FIRMWARE_FILE;
- break;
- }
+ if (WARN_ON(ar->hw.fw == NULL))
+ return -EINVAL;
+
+ filename = ar->hw.fw;
get_fw:
ret = ath6kl_get_fw(ar, filename, &ar->fw, &ar->fw_len);
@@ -849,27 +797,20 @@ static int ath6kl_fetch_patch_file(struct ath6kl *ar)
const char *filename;
int ret;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_PATCH_FILE;
- break;
- case AR6004_REV1_VERSION:
- /* FIXME: implement for AR6004 */
+ if (ar->fw_patch != NULL)
return 0;
- break;
- default:
- filename = AR6003_REV3_PATCH_FILE;
- break;
- }
- if (ar->fw_patch == NULL) {
- ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
- &ar->fw_patch_len);
- if (ret) {
- ath6kl_err("Failed to get patch file %s: %d\n",
- filename, ret);
- return ret;
- }
+ if (ar->hw.fw_patch == NULL)
+ return 0;
+
+ filename = ar->hw.fw_patch;
+
+ ret = ath6kl_get_fw(ar, filename, &ar->fw_patch,
+ &ar->fw_patch_len);
+ if (ret) {
+ ath6kl_err("Failed to get patch file %s: %d\n",
+ filename, ret);
+ return ret;
}
return 0;
@@ -904,19 +845,10 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
int ret, ie_id, i, index, bit;
__le32 *val;
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- filename = AR6003_REV2_FIRMWARE_2_FILE;
- break;
- case AR6003_REV3_VERSION:
- filename = AR6003_REV3_FIRMWARE_2_FILE;
- break;
- case AR6004_REV1_VERSION:
- filename = AR6004_REV1_FIRMWARE_2_FILE;
- break;
- default:
+ if (ar->hw.fw_api2 == NULL)
return -EOPNOTSUPP;
- }
+
+ filename = ar->hw.fw_api2;
ret = request_firmware(&fw, filename, ar->dev);
if (ret)
@@ -1006,12 +938,15 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
ar->hw.reserved_ram_size);
break;
case ATH6KL_FW_IE_CAPABILITIES:
+ if (ie_len < DIV_ROUND_UP(ATH6KL_FW_CAPABILITY_MAX, 8))
+ break;
+
ath6kl_dbg(ATH6KL_DBG_BOOT,
"found firmware capabilities ie (%zd B)\n",
ie_len);
for (i = 0; i < ATH6KL_FW_CAPABILITY_MAX; i++) {
- index = ALIGN(i, 8) / 8;
+ index = i / 8;
bit = i % 8;
if (data[index] & (1 << bit))
@@ -1030,9 +965,34 @@ static int ath6kl_fetch_fw_api2(struct ath6kl *ar)
ar->hw.dataset_patch_addr = le32_to_cpup(val);
ath6kl_dbg(ATH6KL_DBG_BOOT,
- "found patch address ie 0x%d\n",
+ "found patch address ie 0x%x\n",
ar->hw.dataset_patch_addr);
break;
+ case ATH6KL_FW_IE_BOARD_ADDR:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->hw.board_addr = le32_to_cpup(val);
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found board address ie 0x%x\n",
+ ar->hw.board_addr);
+ break;
+ case ATH6KL_FW_IE_VIF_MAX:
+ if (ie_len != sizeof(*val))
+ break;
+
+ val = (__le32 *) data;
+ ar->vif_max = min_t(unsigned int, le32_to_cpup(val),
+ ATH6KL_VIF_MAX);
+
+ if (ar->vif_max > 1 && !ar->p2p)
+ ar->max_norm_iface = 2;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "found vif max ie %d\n", ar->vif_max);
+ break;
default:
ath6kl_dbg(ATH6KL_DBG_BOOT, "Unknown fw ie: %u\n",
le32_to_cpup(&hdr->id));
@@ -1087,8 +1047,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
* For AR6004, host determine Target RAM address for
* writing board data.
*/
- if (ar->target_type == TARGET_TYPE_AR6004) {
- board_address = AR6004_REV1_BOARD_DATA_ADDRESS;
+ if (ar->hw.board_addr != 0) {
+ board_address = ar->hw.board_addr;
ath6kl_bmi_write(ar,
ath6kl_get_hi_item_addr(ar,
HI_ITEM(hi_board_data)),
@@ -1106,7 +1066,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
HI_ITEM(hi_board_ext_data)),
(u8 *) &board_ext_address, 4);
- if (board_ext_address == 0) {
+ if (ar->target_type == TARGET_TYPE_AR6003 &&
+ board_ext_address == 0) {
ath6kl_err("Failed to get board file target address.\n");
return -EINVAL;
}
@@ -1126,8 +1087,8 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
break;
}
- if (ar->fw_board_len == (board_data_size +
- board_ext_data_size)) {
+ if (board_ext_address &&
+ ar->fw_board_len == (board_data_size + board_ext_data_size)) {
/* write extended board data */
ath6kl_dbg(ATH6KL_DBG_BOOT,
@@ -1182,10 +1143,11 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
static int ath6kl_upload_otp(struct ath6kl *ar)
{
u32 address, param;
+ bool from_hw = false;
int ret;
- if (WARN_ON(ar->fw_otp == NULL))
- return -ENOENT;
+ if (ar->fw_otp == NULL)
+ return 0;
address = ar->hw.app_load_addr;
@@ -1210,15 +1172,20 @@ static int ath6kl_upload_otp(struct ath6kl *ar)
return ret;
}
- ar->hw.app_start_override_addr = address;
+ if (ar->hw.app_start_override_addr == 0) {
+ ar->hw.app_start_override_addr = address;
+ from_hw = true;
+ }
- ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr 0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "app_start_override_addr%s 0x%x\n",
+ from_hw ? " (from hw)" : "",
ar->hw.app_start_override_addr);
/* execute the OTP code */
- ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n", address);
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "executing OTP at 0x%x\n",
+ ar->hw.app_start_override_addr);
param = 0;
- ath6kl_bmi_execute(ar, address, &param);
+ ath6kl_bmi_execute(ar, ar->hw.app_start_override_addr, &param);
return ret;
}
@@ -1229,7 +1196,7 @@ static int ath6kl_upload_firmware(struct ath6kl *ar)
int ret;
if (WARN_ON(ar->fw == NULL))
- return -ENOENT;
+ return 0;
address = ar->hw.app_load_addr;
@@ -1259,8 +1226,8 @@ static int ath6kl_upload_patch(struct ath6kl *ar)
u32 address, param;
int ret;
- if (WARN_ON(ar->fw_patch == NULL))
- return -ENOENT;
+ if (ar->fw_patch == NULL)
+ return 0;
address = ar->hw.dataset_patch_addr;
@@ -1345,7 +1312,7 @@ static int ath6kl_init_upload(struct ath6kl *ar)
return status;
/* WAR to avoid SDIO CRC err */
- if (ar->version.target_ver == AR6003_REV2_VERSION) {
+ if (ar->version.target_ver == AR6003_HW_2_0_VERSION) {
ath6kl_err("temporary war to avoid sdio crc error\n");
param = 0x20;
@@ -1402,43 +1369,29 @@ static int ath6kl_init_upload(struct ath6kl *ar)
if (status)
return status;
- /* Configure GPIO AR6003 UART */
- param = CONFIG_AR600x_DEBUG_UART_TX_PIN;
- status = ath6kl_bmi_write(ar,
- ath6kl_get_hi_item_addr(ar,
- HI_ITEM(hi_dbg_uart_txpin)),
- (u8 *)&param, 4);
-
return status;
}
static int ath6kl_init_hw_params(struct ath6kl *ar)
{
- switch (ar->version.target_ver) {
- case AR6003_REV2_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = AR6003_REV2_APP_LOAD_ADDRESS;
- ar->hw.board_ext_data_addr = AR6003_REV2_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6003_REV2_RAM_RESERVE_SIZE;
- break;
- case AR6003_REV3_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV3_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = 0x1234;
- ar->hw.board_ext_data_addr = AR6003_REV3_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6003_REV3_RAM_RESERVE_SIZE;
- break;
- case AR6004_REV1_VERSION:
- ar->hw.dataset_patch_addr = AR6003_REV2_DATASET_PATCH_ADDRESS;
- ar->hw.app_load_addr = AR6003_REV3_APP_LOAD_ADDRESS;
- ar->hw.board_ext_data_addr = AR6004_REV1_BOARD_EXT_DATA_ADDRESS;
- ar->hw.reserved_ram_size = AR6004_REV1_RAM_RESERVE_SIZE;
- break;
- default:
+ const struct ath6kl_hw *hw;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = &hw_list[i];
+
+ if (hw->id == ar->version.target_ver)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(hw_list)) {
ath6kl_err("Unsupported hardware version: 0x%x\n",
ar->version.target_ver);
return -EINVAL;
}
+ ar->hw = *hw;
+
ath6kl_dbg(ATH6KL_DBG_BOOT,
"target_ver 0x%x target_type 0x%x dataset_patch 0x%x app_load_addr 0x%x\n",
ar->version.target_ver, ar->target_type,
@@ -1447,75 +1400,75 @@ static int ath6kl_init_hw_params(struct ath6kl *ar)
"app_start_override_addr 0x%x board_ext_data_addr 0x%x reserved_ram_size 0x%x",
ar->hw.app_start_override_addr, ar->hw.board_ext_data_addr,
ar->hw.reserved_ram_size);
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "refclk_hz %d uarttx_pin %d",
+ ar->hw.refclk_hz, ar->hw.uarttx_pin);
return 0;
}
-static int ath6kl_init(struct net_device *dev)
+static const char *ath6kl_init_get_hif_name(enum ath6kl_hif_type type)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- int status = 0;
- s32 timeleft;
+ switch (type) {
+ case ATH6KL_HIF_TYPE_SDIO:
+ return "sdio";
+ case ATH6KL_HIF_TYPE_USB:
+ return "usb";
+ }
- if (!ar)
- return -EIO;
+ return NULL;
+}
+
+int ath6kl_init_hw_start(struct ath6kl *ar)
+{
+ long timeleft;
+ int ret, i;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw start\n");
+
+ ret = ath6kl_hif_power_on(ar);
+ if (ret)
+ return ret;
+
+ ret = ath6kl_configure_target(ar);
+ if (ret)
+ goto err_power_off;
+
+ ret = ath6kl_init_upload(ar);
+ if (ret)
+ goto err_power_off;
/* Do we need to finish the BMI phase */
+ /* FIXME: return error from ath6kl_bmi_done() */
if (ath6kl_bmi_done(ar)) {
- status = -EIO;
- goto ath6kl_init_done;
- }
-
- /* Indicate that WMI is enabled (although not ready yet) */
- set_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = ath6kl_wmi_init(ar);
- if (!ar->wmi) {
- ath6kl_err("failed to initialize wmi\n");
- status = -EIO;
- goto ath6kl_init_done;
+ ret = -EIO;
+ goto err_power_off;
}
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
-
/*
* The reason we have to wait for the target here is that the
* driver layer has to init BMI in order to set the host block
* size.
*/
if (ath6kl_htc_wait_target(ar->htc_target)) {
- status = -EIO;
- goto err_node_cleanup;
+ ret = -EIO;
+ goto err_power_off;
}
if (ath6kl_init_service_ep(ar)) {
- status = -EIO;
+ ret = -EIO;
goto err_cleanup_scatter;
}
- /* setup access class priority mappings */
- ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
- ar->ac_stream_pri_map[WMM_AC_BE] = 1;
- ar->ac_stream_pri_map[WMM_AC_VI] = 2;
- ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
-
- /* give our connected endpoints some buffers */
- ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
- ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
-
- /* allocate some buffers that handle larger AMSDU frames */
- ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
-
/* setup credit distribution */
- ath6k_setup_credit_dist(ar->htc_target, &ar->credit_state_info);
-
- ath6kl_cookie_init(ar);
+ ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
/* start HTC */
- status = ath6kl_htc_start(ar->htc_target);
-
- if (status) {
+ ret = ath6kl_htc_start(ar->htc_target);
+ if (ret) {
+ /* FIXME: call this */
ath6kl_cookie_cleanup(ar);
- goto err_rxbuf_cleanup;
+ goto err_cleanup_scatter;
}
/* Wait for Wmi event to be ready */
@@ -1526,54 +1479,81 @@ static int ath6kl_init(struct net_device *dev)
ath6kl_dbg(ATH6KL_DBG_BOOT, "firmware booted\n");
+
+ if (test_and_clear_bit(FIRST_BOOT, &ar->flag)) {
+ ath6kl_info("%s %s fw %s%s\n",
+ ar->hw.name,
+ ath6kl_init_get_hif_name(ar->hif_type),
+ ar->wiphy->fw_version,
+ test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
+ }
+
if (ar->version.abi_ver != ATH6KL_ABI_VERSION) {
ath6kl_err("abi version mismatch: host(0x%x), target(0x%x)\n",
ATH6KL_ABI_VERSION, ar->version.abi_ver);
- status = -EIO;
+ ret = -EIO;
goto err_htc_stop;
}
if (!timeleft || signal_pending(current)) {
ath6kl_err("wmi is not ready or wait was interrupted\n");
- status = -EIO;
+ ret = -EIO;
goto err_htc_stop;
}
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: wmi is ready\n", __func__);
/* communicate the wmi protocol verision to the target */
+ /* FIXME: return error */
if ((ath6kl_set_host_app_area(ar)) != 0)
ath6kl_err("unable to set the host app area\n");
- ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
- ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+ for (i = 0; i < ar->vif_max; i++) {
+ ret = ath6kl_target_config_wlan_params(ar, i);
+ if (ret)
+ goto err_htc_stop;
+ }
- ar->wdev->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM;
+ ar->state = ATH6KL_STATE_ON;
- status = ath6kl_target_config_wlan_params(ar);
- if (!status)
- goto ath6kl_init_done;
+ return 0;
err_htc_stop:
ath6kl_htc_stop(ar->htc_target);
-err_rxbuf_cleanup:
- ath6kl_htc_flush_rx_buf(ar->htc_target);
- ath6kl_cleanup_amsdu_rxbufs(ar);
err_cleanup_scatter:
ath6kl_hif_cleanup_scatter(ar);
-err_node_cleanup:
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
+err_power_off:
+ ath6kl_hif_power_off(ar);
-ath6kl_init_done:
- return status;
+ return ret;
+}
+
+int ath6kl_init_hw_stop(struct ath6kl *ar)
+{
+ int ret;
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "hw stop\n");
+
+ ath6kl_htc_stop(ar->htc_target);
+
+ ath6kl_hif_stop(ar);
+
+ ath6kl_bmi_reset(ar);
+
+ ret = ath6kl_hif_power_off(ar);
+ if (ret)
+ ath6kl_warn("failed to power off hif: %d\n", ret);
+
+ ar->state = ATH6KL_STATE_OFF;
+
+ return 0;
}
int ath6kl_core_init(struct ath6kl *ar)
{
- int ret = 0;
struct ath6kl_bmi_target_info targ_info;
+ struct net_device *ndev;
+ int ret = 0, i;
ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
if (!ar->ath6kl_wq)
@@ -1583,145 +1563,236 @@ int ath6kl_core_init(struct ath6kl *ar)
if (ret)
goto err_wq;
- ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ /*
+ * Turn on power to get hardware (target) version and leave power
+ * on delibrately as we will boot the hardware anyway within few
+ * seconds.
+ */
+ ret = ath6kl_hif_power_on(ar);
if (ret)
goto err_bmi_cleanup;
+ ret = ath6kl_bmi_get_target_info(ar, &targ_info);
+ if (ret)
+ goto err_power_off;
+
ar->version.target_ver = le32_to_cpu(targ_info.version);
ar->target_type = le32_to_cpu(targ_info.type);
- ar->wdev->wiphy->hw_version = le32_to_cpu(targ_info.version);
+ ar->wiphy->hw_version = le32_to_cpu(targ_info.version);
ret = ath6kl_init_hw_params(ar);
if (ret)
- goto err_bmi_cleanup;
-
- ret = ath6kl_configure_target(ar);
- if (ret)
- goto err_bmi_cleanup;
+ goto err_power_off;
ar->htc_target = ath6kl_htc_create(ar);
if (!ar->htc_target) {
ret = -ENOMEM;
- goto err_bmi_cleanup;
- }
-
- ar->aggr_cntxt = aggr_init(ar->net_dev);
- if (!ar->aggr_cntxt) {
- ath6kl_err("failed to initialize aggr\n");
- ret = -ENOMEM;
- goto err_htc_cleanup;
+ goto err_power_off;
}
ret = ath6kl_fetch_firmwares(ar);
if (ret)
goto err_htc_cleanup;
- ret = ath6kl_init_upload(ar);
- if (ret)
+ /* FIXME: we should free all firmwares in the error cases below */
+
+ /* Indicate that WMI is enabled (although not ready yet) */
+ set_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = ath6kl_wmi_init(ar);
+ if (!ar->wmi) {
+ ath6kl_err("failed to initialize wmi\n");
+ ret = -EIO;
goto err_htc_cleanup;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: got wmi @ 0x%p.\n", __func__, ar->wmi);
- ret = ath6kl_init(ar->net_dev);
+ ret = ath6kl_register_ieee80211_hw(ar);
if (ret)
- goto err_htc_cleanup;
+ goto err_node_cleanup;
- /* This runs the init function if registered */
- ret = register_netdev(ar->net_dev);
+ ret = ath6kl_debug_init(ar);
if (ret) {
- ath6kl_err("register_netdev failed\n");
- ath6kl_destroy(ar->net_dev, 0);
- return ret;
+ wiphy_unregister(ar->wiphy);
+ goto err_node_cleanup;
+ }
+
+ for (i = 0; i < ar->vif_max; i++)
+ ar->avail_idx_map |= BIT(i);
+
+ rtnl_lock();
+
+ /* Add an initial station interface */
+ ndev = ath6kl_interface_add(ar, "wlan%d", NL80211_IFTYPE_STATION, 0,
+ INFRA_NETWORK);
+
+ rtnl_unlock();
+
+ if (!ndev) {
+ ath6kl_err("Failed to instantiate a network device\n");
+ ret = -ENOMEM;
+ wiphy_unregister(ar->wiphy);
+ goto err_debug_init;
}
- set_bit(NETDEV_REGISTERED, &ar->flag);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: name=%s dev=0x%p, ar=0x%p\n",
- __func__, ar->net_dev->name, ar->net_dev, ar);
+ __func__, ndev->name, ndev, ar);
+
+ /* setup access class priority mappings */
+ ar->ac_stream_pri_map[WMM_AC_BK] = 0; /* lowest */
+ ar->ac_stream_pri_map[WMM_AC_BE] = 1;
+ ar->ac_stream_pri_map[WMM_AC_VI] = 2;
+ ar->ac_stream_pri_map[WMM_AC_VO] = 3; /* highest */
+
+ /* give our connected endpoints some buffers */
+ ath6kl_rx_refill(ar->htc_target, ar->ctrl_ep);
+ ath6kl_rx_refill(ar->htc_target, ar->ac2ep_map[WMM_AC_BE]);
+
+ /* allocate some buffers that handle larger AMSDU frames */
+ ath6kl_refill_amsdu_rxbufs(ar, ATH6KL_MAX_AMSDU_RX_BUFFERS);
+
+ ath6kl_cookie_init(ar);
+
+ ar->conf_flags = ATH6KL_CONF_IGNORE_ERP_BARKER |
+ ATH6KL_CONF_ENABLE_11N | ATH6KL_CONF_ENABLE_TX_BURST;
+
+ if (suspend_cutpower)
+ ar->conf_flags |= ATH6KL_CONF_SUSPEND_CUTPOWER;
+
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_FW_ROAM |
+ WIPHY_FLAG_HAVE_AP_SME |
+ WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
+ WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+
+ if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
+ ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
+
+ ar->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_80211U;
+
+ set_bit(FIRST_BOOT, &ar->flag);
+
+ ret = ath6kl_init_hw_start(ar);
+ if (ret) {
+ ath6kl_err("Failed to start hardware: %d\n", ret);
+ goto err_rxbuf_cleanup;
+ }
+
+ /*
+ * Set mac address which is received in ready event
+ * FIXME: Move to ath6kl_interface_add()
+ */
+ memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
return ret;
+err_rxbuf_cleanup:
+ ath6kl_htc_flush_rx_buf(ar->htc_target);
+ ath6kl_cleanup_amsdu_rxbufs(ar);
+ rtnl_lock();
+ ath6kl_deinit_if_data(netdev_priv(ndev));
+ rtnl_unlock();
+ wiphy_unregister(ar->wiphy);
+err_debug_init:
+ ath6kl_debug_cleanup(ar);
+err_node_cleanup:
+ ath6kl_wmi_shutdown(ar->wmi);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ ar->wmi = NULL;
err_htc_cleanup:
ath6kl_htc_cleanup(ar->htc_target);
+err_power_off:
+ ath6kl_hif_power_off(ar);
err_bmi_cleanup:
ath6kl_bmi_cleanup(ar);
err_wq:
destroy_workqueue(ar->ath6kl_wq);
+
return ret;
}
-void ath6kl_stop_txrx(struct ath6kl *ar)
+void ath6kl_cleanup_vif(struct ath6kl_vif *vif, bool wmi_ready)
{
- struct net_device *ndev = ar->net_dev;
+ static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ bool discon_issued;
- if (!ndev)
- return;
+ netif_stop_queue(vif->ndev);
- set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+ clear_bit(WLAN_ENABLED, &vif->flags);
- if (down_interruptible(&ar->sem)) {
- ath6kl_err("down_interruptible failed\n");
- return;
- }
+ if (wmi_ready) {
+ discon_issued = test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags);
+ ath6kl_disconnect(vif);
+ del_timer(&vif->disconnect_timer);
- if (ar->wlan_pwr_state != WLAN_POWER_STATE_CUT_PWR)
- ath6kl_stop_endpoint(ndev, false, true);
+ if (discon_issued)
+ ath6kl_disconnect_event(vif, DISCONNECT_CMD,
+ (vif->nw_type & AP_NETWORK) ?
+ bcast_mac : vif->bssid,
+ 0, NULL, 0);
+ }
- clear_bit(WLAN_ENABLED, &ar->flag);
+ if (vif->scan_req) {
+ cfg80211_scan_done(vif->scan_req, true);
+ vif->scan_req = NULL;
+ }
}
-/*
- * We need to differentiate between the surprise and planned removal of the
- * device because of the following consideration:
- *
- * - In case of surprise removal, the hcd already frees up the pending
- * for the device and hence there is no need to unregister the function
- * driver inorder to get these requests. For planned removal, the function
- * driver has to explicitly unregister itself to have the hcd return all the
- * pending requests before the data structures for the devices are freed up.
- * Note that as per the current implementation, the function driver will
- * end up releasing all the devices since there is no API to selectively
- * release a particular device.
- *
- * - Certain commands issued to the target can be skipped for surprise
- * removal since they will anyway not go through.
- */
-void ath6kl_destroy(struct net_device *dev, unsigned int unregister)
+void ath6kl_stop_txrx(struct ath6kl *ar)
{
- struct ath6kl *ar;
+ struct ath6kl_vif *vif, *tmp_vif;
- if (!dev || !ath6kl_priv(dev)) {
- ath6kl_err("failed to get device structure\n");
+ set_bit(DESTROY_IN_PROGRESS, &ar->flag);
+
+ if (down_interruptible(&ar->sem)) {
+ ath6kl_err("down_interruptible failed\n");
return;
}
- ar = ath6kl_priv(dev);
-
- destroy_workqueue(ar->ath6kl_wq);
-
- if (ar->htc_target)
- ath6kl_htc_cleanup(ar->htc_target);
-
- aggr_module_destroy(ar->aggr_cntxt);
-
- ath6kl_cookie_cleanup(ar);
-
- ath6kl_cleanup_amsdu_rxbufs(ar);
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry_safe(vif, tmp_vif, &ar->vif_list, list) {
+ list_del(&vif->list);
+ spin_unlock_bh(&ar->list_lock);
+ ath6kl_cleanup_vif(vif, test_bit(WMI_READY, &ar->flag));
+ rtnl_lock();
+ ath6kl_deinit_if_data(vif);
+ rtnl_unlock();
+ spin_lock_bh(&ar->list_lock);
+ }
+ spin_unlock_bh(&ar->list_lock);
- ath6kl_bmi_cleanup(ar);
+ clear_bit(WMI_READY, &ar->flag);
- ath6kl_debug_cleanup(ar);
+ /*
+ * After wmi_shudown all WMI events will be dropped. We
+ * need to cleanup the buffers allocated in AP mode and
+ * give disconnect notification to stack, which usually
+ * happens in the disconnect_event. Simulate the disconnect
+ * event by calling the function directly. Sometimes
+ * disconnect_event will be received when the debug logs
+ * are collected.
+ */
+ ath6kl_wmi_shutdown(ar->wmi);
- if (unregister && test_bit(NETDEV_REGISTERED, &ar->flag)) {
- unregister_netdev(dev);
- clear_bit(NETDEV_REGISTERED, &ar->flag);
+ clear_bit(WMI_ENABLED, &ar->flag);
+ if (ar->htc_target) {
+ ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
+ ath6kl_htc_stop(ar->htc_target);
}
- free_netdev(dev);
-
- kfree(ar->fw_board);
- kfree(ar->fw_otp);
- kfree(ar->fw);
- kfree(ar->fw_patch);
+ /*
+ * Try to reset the device if we can. The driver may have been
+ * configure NOT to reset the target during a debug session.
+ */
+ ath6kl_dbg(ATH6KL_DBG_TRC,
+ "attempting to reset target on instance destroy\n");
+ ath6kl_reset_device(ar, ar->target_type, true, true);
- ath6kl_cfg80211_deinit(ar);
+ clear_bit(WLAN_ENABLED, &ar->flag);
}
diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c
index 30b5a53..eea3c74 100644
--- a/drivers/net/wireless/ath/ath6kl/main.c
+++ b/drivers/net/wireless/ath/ath6kl/main.c
@@ -20,12 +20,13 @@
#include "target.h"
#include "debug.h"
-struct ath6kl_sta *ath6kl_find_sta(struct ath6kl *ar, u8 *node_addr)
+struct ath6kl_sta *ath6kl_find_sta(struct ath6kl_vif *vif, u8 *node_addr)
{
+ struct ath6kl *ar = vif->ar;
struct ath6kl_sta *conn = NULL;
u8 i, max_conn;
- max_conn = (ar->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
+ max_conn = (vif->nw_type == AP_NETWORK) ? AP_MAX_NUM_STA : 0;
for (i = 0; i < max_conn; i++) {
if (memcmp(node_addr, ar->sta_list[i].mac, ETH_ALEN) == 0) {
@@ -174,64 +175,6 @@ void ath6kl_free_cookie(struct ath6kl *ar, struct ath6kl_cookie *cookie)
ar->cookie_count++;
}
-/* set the window address register (using 4-byte register access ). */
-static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
-{
- int status;
- s32 i;
- __le32 addr_val;
-
- /*
- * Write bytes 1,2,3 of the register to set the upper address bytes,
- * the LSB is written last to initiate the access cycle
- */
-
- for (i = 1; i <= 3; i++) {
- /*
- * Fill the buffer with the address byte value we want to
- * hit 4 times. No need to worry about endianness as the
- * same byte is copied to all four bytes of addr_val at
- * any time.
- */
- memset((u8 *)&addr_val, ((u8 *)&addr)[i], 4);
-
- /*
- * Hit each byte of the register address with a 4-byte
- * write operation to the same address, this is a harmless
- * operation.
- */
- status = hif_read_write_sync(ar, reg_addr + i, (u8 *)&addr_val,
- 4, HIF_WR_SYNC_BYTE_FIX);
- if (status)
- break;
- }
-
- if (status) {
- ath6kl_err("failed to write initial bytes of 0x%x to window reg: 0x%X\n",
- addr, reg_addr);
- return status;
- }
-
- /*
- * Write the address register again, this time write the whole
- * 4-byte value. The effect here is that the LSB write causes the
- * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
- * effect since we are writing the same values again
- */
- addr_val = cpu_to_le32(addr);
- status = hif_read_write_sync(ar, reg_addr,
- (u8 *)&(addr_val),
- 4, HIF_WR_SYNC_BYTE_INC);
-
- if (status) {
- ath6kl_err("failed to write 0x%x to window reg: 0x%X\n",
- addr, reg_addr);
- return status;
- }
-
- return 0;
-}
-
/*
* Read from the hardware through its diagnostic window. No cooperation
* from the firmware is required for this.
@@ -240,14 +183,7 @@ int ath6kl_diag_read32(struct ath6kl *ar, u32 address, u32 *value)
{
int ret;
- /* set window register to start read cycle */
- ret = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS, address);
- if (ret)
- return ret;
-
- /* read the data */
- ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) value,
- sizeof(*value), HIF_RD_SYNC_BYTE_INC);
+ ret = ath6kl_hif_diag_read32(ar, address, value);
if (ret) {
ath6kl_warn("failed to read32 through diagnose window: %d\n",
ret);
@@ -265,18 +201,15 @@ int ath6kl_diag_write32(struct ath6kl *ar, u32 address, __le32 value)
{
int ret;
- /* set write data */
- ret = hif_read_write_sync(ar, WINDOW_DATA_ADDRESS, (u8 *) &value,
- sizeof(value), HIF_WR_SYNC_BYTE_INC);
+ ret = ath6kl_hif_diag_write32(ar, address, value);
+
if (ret) {
ath6kl_err("failed to write 0x%x during diagnose window to 0x%d\n",
address, value);
return ret;
}
- /* set window register, which starts the write cycle */
- return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
- address);
+ return 0;
}
int ath6kl_diag_read(struct ath6kl *ar, u32 address, void *data, u32 length)
@@ -393,8 +326,8 @@ out:
#define AR6003_RESET_CONTROL_ADDRESS 0x00004000
#define AR6004_RESET_CONTROL_ADDRESS 0x00004000
-static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
- bool wait_fot_compltn, bool cold_reset)
+void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
+ bool wait_fot_compltn, bool cold_reset)
{
int status = 0;
u32 address;
@@ -425,102 +358,33 @@ static void ath6kl_reset_device(struct ath6kl *ar, u32 target_type,
ath6kl_err("failed to reset target\n");
}
-void ath6kl_stop_endpoint(struct net_device *dev, bool keep_profile,
- bool get_dbglogs)
-{
- struct ath6kl *ar = ath6kl_priv(dev);
- static u8 bcast_mac[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
- bool discon_issued;
-
- netif_stop_queue(dev);
-
- /* disable the target and the interrupts associated with it */
- if (test_bit(WMI_READY, &ar->flag)) {
- discon_issued = (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag));
- ath6kl_disconnect(ar);
- if (!keep_profile)
- ath6kl_init_profile_info(ar);
-
- del_timer(&ar->disconnect_timer);
-
- clear_bit(WMI_READY, &ar->flag);
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
-
- /*
- * After wmi_shudown all WMI events will be dropped. We
- * need to cleanup the buffers allocated in AP mode and
- * give disconnect notification to stack, which usually
- * happens in the disconnect_event. Simulate the disconnect
- * event by calling the function directly. Sometimes
- * disconnect_event will be received when the debug logs
- * are collected.
- */
- if (discon_issued)
- ath6kl_disconnect_event(ar, DISCONNECT_CMD,
- (ar->nw_type & AP_NETWORK) ?
- bcast_mac : ar->bssid,
- 0, NULL, 0);
-
- ar->user_key_ctrl = 0;
-
- } else {
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "%s: wmi is not ready 0x%p 0x%p\n",
- __func__, ar, ar->wmi);
-
- /* Shut down WMI if we have started it */
- if (test_bit(WMI_ENABLED, &ar->flag)) {
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "%s: shut down wmi\n", __func__);
- ath6kl_wmi_shutdown(ar->wmi);
- clear_bit(WMI_ENABLED, &ar->flag);
- ar->wmi = NULL;
- }
- }
-
- if (ar->htc_target) {
- ath6kl_dbg(ATH6KL_DBG_TRC, "%s: shut down htc\n", __func__);
- ath6kl_htc_stop(ar->htc_target);
- }
-
- /*
- * Try to reset the device if we can. The driver may have been
- * configure NOT to reset the target during a debug session.
- */
- ath6kl_dbg(ATH6KL_DBG_TRC,
- "attempting to reset target on instance destroy\n");
- ath6kl_reset_device(ar, ar->target_type, true, true);
-}
-
-static void ath6kl_install_static_wep_keys(struct ath6kl *ar)
+static void ath6kl_install_static_wep_keys(struct ath6kl_vif *vif)
{
u8 index;
u8 keyusage;
for (index = WMI_MIN_KEY_INDEX; index <= WMI_MAX_KEY_INDEX; index++) {
- if (ar->wep_key_list[index].key_len) {
+ if (vif->wep_key_list[index].key_len) {
keyusage = GROUP_USAGE;
- if (index == ar->def_txkey_index)
+ if (index == vif->def_txkey_index)
keyusage |= TX_USAGE;
- ath6kl_wmi_addkey_cmd(ar->wmi,
+ ath6kl_wmi_addkey_cmd(vif->ar->wmi, vif->fw_vif_idx,
index,
WEP_CRYPT,
keyusage,
- ar->wep_key_list[index].key_len,
- NULL,
- ar->wep_key_list[index].key,
+ vif->wep_key_list[index].key_len,
+ NULL, 0,
+ vif->wep_key_list[index].key,
KEY_OP_INIT_VAL, NULL,
NO_SYNC_WMIFLAG);
}
}
}
-void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
+void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel)
{
+ struct ath6kl *ar = vif->ar;
struct ath6kl_req_key *ik;
int res;
u8 key_rsc[ATH6KL_KEY_SEQ_LEN];
@@ -529,11 +393,13 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "AP mode started on %u MHz\n", channel);
- switch (ar->auth_mode) {
+ switch (vif->auth_mode) {
case NONE_AUTH:
- if (ar->prwise_crypto == WEP_CRYPT)
- ath6kl_install_static_wep_keys(ar);
- break;
+ if (vif->prwise_crypto == WEP_CRYPT)
+ ath6kl_install_static_wep_keys(vif);
+ if (!ik->valid || ik->key_type != WAPI_CRYPT)
+ break;
+ /* for WAPI, we need to set the delayed group key, continue: */
case WPA_PSK_AUTH:
case WPA2_PSK_AUTH:
case (WPA_PSK_AUTH | WPA2_PSK_AUTH):
@@ -544,8 +410,9 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
"the initial group key for AP mode\n");
memset(key_rsc, 0, sizeof(key_rsc));
res = ath6kl_wmi_addkey_cmd(
- ar->wmi, ik->key_index, ik->key_type,
- GROUP_USAGE, ik->key_len, key_rsc, ik->key,
+ ar->wmi, vif->fw_vif_idx, ik->key_index, ik->key_type,
+ GROUP_USAGE, ik->key_len, key_rsc, ATH6KL_KEY_SEQ_LEN,
+ ik->key,
KEY_OP_INIT_VAL, NULL, SYNC_BOTH_WMIFLAG);
if (res) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "Delayed "
@@ -554,15 +421,16 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl *ar, u16 channel)
break;
}
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
- set_bit(CONNECTED, &ar->flag);
- netif_carrier_on(ar->net_dev);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx, NONE_BSS_FILTER, 0);
+ set_bit(CONNECTED, &vif->flags);
+ netif_carrier_on(vif->ndev);
}
-void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
+void ath6kl_connect_ap_mode_sta(struct ath6kl_vif *vif, u16 aid, u8 *mac_addr,
u8 keymgmt, u8 ucipher, u8 auth,
u8 assoc_req_len, u8 *assoc_info)
{
+ struct ath6kl *ar = vif->ar;
u8 *ies = NULL, *wpa_ie = NULL, *pos;
size_t ies_len = 0;
struct station_info sinfo;
@@ -600,6 +468,18 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
wpa_ie = pos; /* WPS IE */
break; /* overrides WPA/RSN IE */
}
+ } else if (pos[0] == 0x44 && wpa_ie == NULL) {
+ /*
+ * Note: WAPI Parameter Set IE re-uses Element ID that
+ * was officially allocated for BSS AC Access Delay. As
+ * such, we need to be a bit more careful on when
+ * parsing the frame. However, BSS AC Access Delay
+ * element is not supposed to be included in
+ * (Re)Association Request frames, so this should not
+ * cause problems.
+ */
+ wpa_ie = pos; /* WAPI IE */
+ break;
}
pos += 2 + pos[1];
}
@@ -617,380 +497,49 @@ void ath6kl_connect_ap_mode_sta(struct ath6kl *ar, u16 aid, u8 *mac_addr,
sinfo.assoc_req_ies_len = ies_len;
sinfo.filled |= STATION_INFO_ASSOC_REQ_IES;
- cfg80211_new_sta(ar->net_dev, mac_addr, &sinfo, GFP_KERNEL);
-
- netif_wake_queue(ar->net_dev);
-}
-
-/* Functions for Tx credit handling */
-void ath6k_credit_init(struct htc_credit_state_info *cred_info,
- struct list_head *ep_list,
- int tot_credits)
-{
- struct htc_endpoint_credit_dist *cur_ep_dist;
- int count;
-
- cred_info->cur_free_credits = tot_credits;
- cred_info->total_avail_credits = tot_credits;
-
- list_for_each_entry(cur_ep_dist, ep_list, list) {
- if (cur_ep_dist->endpoint == ENDPOINT_0)
- continue;
-
- cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg;
-
- if (tot_credits > 4)
- if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) ||
- (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) {
- ath6kl_deposit_credit_to_ep(cred_info,
- cur_ep_dist,
- cur_ep_dist->cred_min);
- cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- }
-
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) {
- ath6kl_deposit_credit_to_ep(cred_info, cur_ep_dist,
- cur_ep_dist->cred_min);
- /*
- * Control service is always marked active, it
- * never goes inactive EVER.
- */
- cur_ep_dist->dist_flags |= HTC_EP_ACTIVE;
- } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC)
- /* this is the lowest priority data endpoint */
- cred_info->lowestpri_ep_dist = cur_ep_dist->list;
-
- /*
- * Streams have to be created (explicit | implicit) for all
- * kinds of traffic. BE endpoints are also inactive in the
- * beginning. When BE traffic starts it creates implicit
- * streams that redistributes credits.
- *
- * Note: all other endpoints have minimums set but are
- * initially given NO credits. credits will be distributed
- * as traffic activity demands
- */
- }
-
- WARN_ON(cred_info->cur_free_credits <= 0);
-
- list_for_each_entry(cur_ep_dist, ep_list, list) {
- if (cur_ep_dist->endpoint == ENDPOINT_0)
- continue;
-
- if (cur_ep_dist->svc_id == WMI_CONTROL_SVC)
- cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg;
- else {
- /*
- * For the remaining data endpoints, we assume that
- * each cred_per_msg are the same. We use a simple
- * calculation here, we take the remaining credits
- * and determine how many max messages this can
- * cover and then set each endpoint's normal value
- * equal to 3/4 this amount.
- */
- count = (cred_info->cur_free_credits /
- cur_ep_dist->cred_per_msg)
- * cur_ep_dist->cred_per_msg;
- count = (count * 3) >> 2;
- count = max(count, cur_ep_dist->cred_per_msg);
- cur_ep_dist->cred_norm = count;
-
- }
- }
-}
-
-/* initialize and setup credit distribution */
-int ath6k_setup_credit_dist(void *htc_handle,
- struct htc_credit_state_info *cred_info)
-{
- u16 servicepriority[5];
-
- memset(cred_info, 0, sizeof(struct htc_credit_state_info));
-
- servicepriority[0] = WMI_CONTROL_SVC; /* highest */
- servicepriority[1] = WMI_DATA_VO_SVC;
- servicepriority[2] = WMI_DATA_VI_SVC;
- servicepriority[3] = WMI_DATA_BE_SVC;
- servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
-
- /* set priority list */
- ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
-
- return 0;
-}
-
-/* reduce an ep's credits back to a set limit */
-static void ath6k_reduce_credits(struct htc_credit_state_info *cred_info,
- struct htc_endpoint_credit_dist *ep_dist,
- int limit)
-{
- int credits;
-
- ep_dist->cred_assngd = limit;
-
- if (ep_dist->credits <= limit)
- return;
+ cfg80211_new_sta(vif->ndev, mac_addr, &sinfo, GFP_KERNEL);
- credits = ep_dist->credits - limit;
- ep_dist->credits -= credits;
- cred_info->cur_free_credits += credits;
-}
-
-static void ath6k_credit_update(struct htc_credit_state_info *cred_info,
- struct list_head *epdist_list)
-{
- struct htc_endpoint_credit_dist *cur_dist_list;
-
- list_for_each_entry(cur_dist_list, epdist_list, list) {
- if (cur_dist_list->endpoint == ENDPOINT_0)
- continue;
-
- if (cur_dist_list->cred_to_dist > 0) {
- cur_dist_list->credits +=
- cur_dist_list->cred_to_dist;
- cur_dist_list->cred_to_dist = 0;
- if (cur_dist_list->credits >
- cur_dist_list->cred_assngd)
- ath6k_reduce_credits(cred_info,
- cur_dist_list,
- cur_dist_list->cred_assngd);
-
- if (cur_dist_list->credits >
- cur_dist_list->cred_norm)
- ath6k_reduce_credits(cred_info, cur_dist_list,
- cur_dist_list->cred_norm);
-
- if (!(cur_dist_list->dist_flags & HTC_EP_ACTIVE)) {
- if (cur_dist_list->txq_depth == 0)
- ath6k_reduce_credits(cred_info,
- cur_dist_list, 0);
- }
- }
- }
-}
-
-/*
- * HTC has an endpoint that needs credits, ep_dist is the endpoint in
- * question.
- */
-void ath6k_seek_credits(struct htc_credit_state_info *cred_info,
- struct htc_endpoint_credit_dist *ep_dist)
-{
- struct htc_endpoint_credit_dist *curdist_list;
- int credits = 0;
- int need;
-
- if (ep_dist->svc_id == WMI_CONTROL_SVC)
- goto out;
-
- if ((ep_dist->svc_id == WMI_DATA_VI_SVC) ||
- (ep_dist->svc_id == WMI_DATA_VO_SVC))
- if ((ep_dist->cred_assngd >= ep_dist->cred_norm))
- goto out;
-
- /*
- * For all other services, we follow a simple algorithm of:
- *
- * 1. checking the free pool for credits
- * 2. checking lower priority endpoints for credits to take
- */
-
- credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
- if (credits >= ep_dist->seek_cred)
- goto out;
-
- /*
- * We don't have enough in the free pool, try taking away from
- * lower priority services The rule for taking away credits:
- *
- * 1. Only take from lower priority endpoints
- * 2. Only take what is allocated above the minimum (never
- * starve an endpoint completely)
- * 3. Only take what you need.
- */
-
- list_for_each_entry_reverse(curdist_list,
- &cred_info->lowestpri_ep_dist,
- list) {
- if (curdist_list == ep_dist)
- break;
-
- need = ep_dist->seek_cred - cred_info->cur_free_credits;
-
- if ((curdist_list->cred_assngd - need) >=
- curdist_list->cred_min) {
- /*
- * The current one has been allocated more than
- * it's minimum and it has enough credits assigned
- * above it's minimum to fulfill our need try to
- * take away just enough to fulfill our need.
- */
- ath6k_reduce_credits(cred_info, curdist_list,
- curdist_list->cred_assngd - need);
-
- if (cred_info->cur_free_credits >=
- ep_dist->seek_cred)
- break;
- }
-
- if (curdist_list->endpoint == ENDPOINT_0)
- break;
- }
-
- credits = min(cred_info->cur_free_credits, ep_dist->seek_cred);
-
-out:
- /* did we find some credits? */
- if (credits)
- ath6kl_deposit_credit_to_ep(cred_info, ep_dist, credits);
-
- ep_dist->seek_cred = 0;
-}
-
-/* redistribute credits based on activity change */
-static void ath6k_redistribute_credits(struct htc_credit_state_info *info,
- struct list_head *ep_dist_list)
-{
- struct htc_endpoint_credit_dist *curdist_list;
-
- list_for_each_entry(curdist_list, ep_dist_list, list) {
- if (curdist_list->endpoint == ENDPOINT_0)
- continue;
-
- if ((curdist_list->svc_id == WMI_DATA_BK_SVC) ||
- (curdist_list->svc_id == WMI_DATA_BE_SVC))
- curdist_list->dist_flags |= HTC_EP_ACTIVE;
-
- if ((curdist_list->svc_id != WMI_CONTROL_SVC) &&
- !(curdist_list->dist_flags & HTC_EP_ACTIVE)) {
- if (curdist_list->txq_depth == 0)
- ath6k_reduce_credits(info,
- curdist_list, 0);
- else
- ath6k_reduce_credits(info,
- curdist_list,
- curdist_list->cred_min);
- }
- }
-}
-
-/*
- *
- * This function is invoked whenever endpoints require credit
- * distributions. A lock is held while this function is invoked, this
- * function shall NOT block. The ep_dist_list is a list of distribution
- * structures in prioritized order as defined by the call to the
- * htc_set_credit_dist() api.
- */
-void ath6k_credit_distribute(struct htc_credit_state_info *cred_info,
- struct list_head *ep_dist_list,
- enum htc_credit_dist_reason reason)
-{
- switch (reason) {
- case HTC_CREDIT_DIST_SEND_COMPLETE:
- ath6k_credit_update(cred_info, ep_dist_list);
- break;
- case HTC_CREDIT_DIST_ACTIVITY_CHANGE:
- ath6k_redistribute_credits(cred_info, ep_dist_list);
- break;
- default:
- break;
- }
-
- WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits);
- WARN_ON(cred_info->cur_free_credits < 0);
+ netif_wake_queue(vif->ndev);
}
void disconnect_timer_handler(unsigned long ptr)
{
struct net_device *dev = (struct net_device *)ptr;
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- ath6kl_init_profile_info(ar);
- ath6kl_disconnect(ar);
+ ath6kl_init_profile_info(vif);
+ ath6kl_disconnect(vif);
}
-void ath6kl_disconnect(struct ath6kl *ar)
+void ath6kl_disconnect(struct ath6kl_vif *vif)
{
- if (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag)) {
- ath6kl_wmi_disconnect_cmd(ar->wmi);
+ if (test_bit(CONNECTED, &vif->flags) ||
+ test_bit(CONNECT_PEND, &vif->flags)) {
+ ath6kl_wmi_disconnect_cmd(vif->ar->wmi, vif->fw_vif_idx);
/*
* Disconnect command is issued, clear the connect pending
* flag. The connected flag will be cleared in
* disconnect event notification.
*/
- clear_bit(CONNECT_PEND, &ar->flag);
+ clear_bit(CONNECT_PEND, &vif->flags);
}
}
-void ath6kl_deep_sleep_enable(struct ath6kl *ar)
-{
- switch (ar->sme_state) {
- case SME_CONNECTING:
- cfg80211_connect_result(ar->net_dev, ar->bssid, NULL, 0,
- NULL, 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE,
- GFP_KERNEL);
- break;
- case SME_CONNECTED:
- default:
- /*
- * FIXME: oddly enough smeState is in DISCONNECTED during
- * suspend, why? Need to send disconnected event in that
- * state.
- */
- cfg80211_disconnected(ar->net_dev, 0, NULL, 0, GFP_KERNEL);
- break;
- }
-
- if (test_bit(CONNECTED, &ar->flag) ||
- test_bit(CONNECT_PEND, &ar->flag))
- ath6kl_wmi_disconnect_cmd(ar->wmi);
-
- ar->sme_state = SME_DISCONNECTED;
-
- /* disable scanning */
- if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0, 0,
- 0, 0) != 0)
- printk(KERN_WARNING "ath6kl: failed to disable scan "
- "during suspend\n");
-
- ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
-}
-
/* WMI Event handlers */
-static const char *get_hw_id_string(u32 id)
-{
- switch (id) {
- case AR6003_REV1_VERSION:
- return "1.0";
- case AR6003_REV2_VERSION:
- return "2.0";
- case AR6003_REV3_VERSION:
- return "2.1.1";
- default:
- return "unknown";
- }
-}
-
void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
{
struct ath6kl *ar = devt;
- struct net_device *dev = ar->net_dev;
- memcpy(dev->dev_addr, datap, ETH_ALEN);
+ memcpy(ar->mac_addr, datap, ETH_ALEN);
ath6kl_dbg(ATH6KL_DBG_TRC, "%s: mac addr = %pM\n",
- __func__, dev->dev_addr);
+ __func__, ar->mac_addr);
ar->version.wlan_ver = sw_ver;
ar->version.abi_ver = abi_ver;
- snprintf(ar->wdev->wiphy->fw_version,
- sizeof(ar->wdev->wiphy->fw_version),
+ snprintf(ar->wiphy->fw_version,
+ sizeof(ar->wiphy->fw_version),
"%u.%u.%u.%u",
(ar->version.wlan_ver & 0xf0000000) >> 28,
(ar->version.wlan_ver & 0x0f000000) >> 24,
@@ -1000,79 +549,85 @@ void ath6kl_ready_event(void *devt, u8 *datap, u32 sw_ver, u32 abi_ver)
/* indicate to the waiting thread that the ready event was received */
set_bit(WMI_READY, &ar->flag);
wake_up(&ar->event_wq);
-
- ath6kl_info("hw %s fw %s%s\n",
- get_hw_id_string(ar->wdev->wiphy->hw_version),
- ar->wdev->wiphy->fw_version,
- test_bit(TESTMODE, &ar->flag) ? " testmode" : "");
}
-void ath6kl_scan_complete_evt(struct ath6kl *ar, int status)
+void ath6kl_scan_complete_evt(struct ath6kl_vif *vif, int status)
{
- ath6kl_cfg80211_scan_complete_event(ar, status);
+ struct ath6kl *ar = vif->ar;
+ bool aborted = false;
+
+ if (status != WMI_SCAN_STATUS_SUCCESS)
+ aborted = true;
+
+ ath6kl_cfg80211_scan_complete_event(vif, aborted);
if (!ar->usr_bss_filter) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
}
- ath6kl_dbg(ATH6KL_DBG_WLAN_SCAN, "scan complete: %d\n", status);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "scan complete: %d\n", status);
}
-void ath6kl_connect_event(struct ath6kl *ar, u16 channel, u8 *bssid,
+void ath6kl_connect_event(struct ath6kl_vif *vif, u16 channel, u8 *bssid,
u16 listen_int, u16 beacon_int,
enum network_type net_type, u8 beacon_ie_len,
u8 assoc_req_len, u8 assoc_resp_len,
u8 *assoc_info)
{
- unsigned long flags;
+ struct ath6kl *ar = vif->ar;
- ath6kl_cfg80211_connect_event(ar, channel, bssid,
+ ath6kl_cfg80211_connect_event(vif, channel, bssid,
listen_int, beacon_int,
net_type, beacon_ie_len,
assoc_req_len, assoc_resp_len,
assoc_info);
- memcpy(ar->bssid, bssid, sizeof(ar->bssid));
- ar->bss_ch = channel;
+ memcpy(vif->bssid, bssid, sizeof(vif->bssid));
+ vif->bss_ch = channel;
- if ((ar->nw_type == INFRA_NETWORK))
- ath6kl_wmi_listeninterval_cmd(ar->wmi, ar->listen_intvl_t,
+ if ((vif->nw_type == INFRA_NETWORK))
+ ath6kl_wmi_listeninterval_cmd(ar->wmi, vif->fw_vif_idx,
+ ar->listen_intvl_t,
ar->listen_intvl_b);
- netif_wake_queue(ar->net_dev);
+ netif_wake_queue(vif->ndev);
/* Update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
- set_bit(CONNECTED, &ar->flag);
- clear_bit(CONNECT_PEND, &ar->flag);
- netif_carrier_on(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_lock_bh(&vif->if_lock);
+ set_bit(CONNECTED, &vif->flags);
+ clear_bit(CONNECT_PEND, &vif->flags);
+ netif_carrier_on(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
- aggr_reset_state(ar->aggr_cntxt);
- ar->reconnect_flag = 0;
+ aggr_reset_state(vif->aggr_cntxt);
+ vif->reconnect_flag = 0;
- if ((ar->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
+ if ((vif->nw_type == ADHOC_NETWORK) && ar->ibss_ps_enable) {
memset(ar->node_map, 0, sizeof(ar->node_map));
ar->node_num = 0;
ar->next_ep_id = ENDPOINT_2;
}
if (!ar->usr_bss_filter) {
- set_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, CURRENT_BSS_FILTER, 0);
+ set_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ CURRENT_BSS_FILTER, 0);
}
}
-void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
+void ath6kl_tkip_micerr_event(struct ath6kl_vif *vif, u8 keyid, bool ismcast)
{
struct ath6kl_sta *sta;
+ struct ath6kl *ar = vif->ar;
u8 tsc[6];
+
/*
* For AP case, keyid will have aid of STA which sent pkt with
* MIC error. Use this aid to get MAC & send it to hostapd.
*/
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
sta = ath6kl_find_sta_by_aid(ar, (keyid >> 2));
if (!sta)
return;
@@ -1081,19 +636,20 @@ void ath6kl_tkip_micerr_event(struct ath6kl *ar, u8 keyid, bool ismcast)
"ap tkip mic error received from aid=%d\n", keyid);
memset(tsc, 0, sizeof(tsc)); /* FIX: get correct TSC */
- cfg80211_michael_mic_failure(ar->net_dev, sta->mac,
+ cfg80211_michael_mic_failure(vif->ndev, sta->mac,
NL80211_KEYTYPE_PAIRWISE, keyid,
tsc, GFP_KERNEL);
} else
- ath6kl_cfg80211_tkip_micerr_event(ar, keyid, ismcast);
+ ath6kl_cfg80211_tkip_micerr_event(vif, keyid, ismcast);
}
-static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
+static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{
struct wmi_target_stats *tgt_stats =
(struct wmi_target_stats *) ptr;
- struct target_stats *stats = &ar->target_stats;
+ struct ath6kl *ar = vif->ar;
+ struct target_stats *stats = &vif->target_stats;
struct tkip_ccmp_stats *ccmp_stats;
u8 ac;
@@ -1189,8 +745,8 @@ static void ath6kl_update_target_stats(struct ath6kl *ar, u8 *ptr, u32 len)
stats->wow_evt_discarded +=
le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
- if (test_bit(STATS_UPDATE_PEND, &ar->flag)) {
- clear_bit(STATS_UPDATE_PEND, &ar->flag);
+ if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
+ clear_bit(STATS_UPDATE_PEND, &vif->flags);
wake_up(&ar->event_wq);
}
}
@@ -1200,14 +756,15 @@ static void ath6kl_add_le32(__le32 *var, __le32 val)
*var = cpu_to_le32(le32_to_cpu(*var) + le32_to_cpu(val));
}
-void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
+void ath6kl_tgt_stats_event(struct ath6kl_vif *vif, u8 *ptr, u32 len)
{
struct wmi_ap_mode_stat *p = (struct wmi_ap_mode_stat *) ptr;
+ struct ath6kl *ar = vif->ar;
struct wmi_ap_mode_stat *ap = &ar->ap_stats;
struct wmi_per_sta_stat *st_ap, *st_p;
u8 ac;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
if (len < sizeof(*p))
return;
@@ -1226,7 +783,7 @@ void ath6kl_tgt_stats_event(struct ath6kl *ar, u8 *ptr, u32 len)
}
} else {
- ath6kl_update_target_stats(ar, ptr, len);
+ ath6kl_update_target_stats(vif, ptr, len);
}
}
@@ -1245,11 +802,12 @@ void ath6kl_txpwr_rx_evt(void *devt, u8 tx_pwr)
wake_up(&ar->event_wq);
}
-void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
+void ath6kl_pspoll_event(struct ath6kl_vif *vif, u8 aid)
{
struct ath6kl_sta *conn;
struct sk_buff *skb;
bool psq_empty = false;
+ struct ath6kl *ar = vif->ar;
conn = ath6kl_find_sta_by_aid(ar, aid);
@@ -1272,7 +830,7 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock);
conn->sta_flags |= STA_PS_POLLED;
- ath6kl_data_tx(skb, ar->net_dev);
+ ath6kl_data_tx(skb, vif->ndev);
conn->sta_flags &= ~STA_PS_POLLED;
spin_lock_bh(&conn->psq_lock);
@@ -1280,13 +838,14 @@ void ath6kl_pspoll_event(struct ath6kl *ar, u8 aid)
spin_unlock_bh(&conn->psq_lock);
if (psq_empty)
- ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, conn->aid, 0);
}
-void ath6kl_dtimexpiry_event(struct ath6kl *ar)
+void ath6kl_dtimexpiry_event(struct ath6kl_vif *vif)
{
bool mcastq_empty = false;
struct sk_buff *skb;
+ struct ath6kl *ar = vif->ar;
/*
* If there are no associated STAs, ignore the DTIM expiry event.
@@ -1308,31 +867,31 @@ void ath6kl_dtimexpiry_event(struct ath6kl *ar)
return;
/* set the STA flag to dtim_expired for the frame to go out */
- set_bit(DTIM_EXPIRED, &ar->flag);
+ set_bit(DTIM_EXPIRED, &vif->flags);
spin_lock_bh(&ar->mcastpsq_lock);
while ((skb = skb_dequeue(&ar->mcastpsq)) != NULL) {
spin_unlock_bh(&ar->mcastpsq_lock);
- ath6kl_data_tx(skb, ar->net_dev);
+ ath6kl_data_tx(skb, vif->ndev);
spin_lock_bh(&ar->mcastpsq_lock);
}
spin_unlock_bh(&ar->mcastpsq_lock);
- clear_bit(DTIM_EXPIRED, &ar->flag);
+ clear_bit(DTIM_EXPIRED, &vif->flags);
/* clear the LSB of the BitMapCtl field of the TIM IE */
- ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx, MCAST_AID, 0);
}
-void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
+void ath6kl_disconnect_event(struct ath6kl_vif *vif, u8 reason, u8 *bssid,
u8 assoc_resp_len, u8 *assoc_info,
u16 prot_reason_status)
{
- unsigned long flags;
+ struct ath6kl *ar = vif->ar;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
if (!ath6kl_remove_sta(ar, bssid, prot_reason_status))
return;
@@ -1344,31 +903,31 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
/* clear the LSB of the TIM IE's BitMapCtl field */
if (test_bit(WMI_READY, &ar->flag))
- ath6kl_wmi_set_pvb_cmd(ar->wmi, MCAST_AID, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ MCAST_AID, 0);
}
if (!is_broadcast_ether_addr(bssid)) {
/* send event to application */
- cfg80211_del_sta(ar->net_dev, bssid, GFP_KERNEL);
+ cfg80211_del_sta(vif->ndev, bssid, GFP_KERNEL);
}
- if (memcmp(ar->net_dev->dev_addr, bssid, ETH_ALEN) == 0) {
- memset(ar->wep_key_list, 0, sizeof(ar->wep_key_list));
- clear_bit(CONNECTED, &ar->flag);
+ if (memcmp(vif->ndev->dev_addr, bssid, ETH_ALEN) == 0) {
+ memset(vif->wep_key_list, 0, sizeof(vif->wep_key_list));
+ clear_bit(CONNECTED, &vif->flags);
}
return;
}
- ath6kl_cfg80211_disconnect_event(ar, reason, bssid,
+ ath6kl_cfg80211_disconnect_event(vif, reason, bssid,
assoc_resp_len, assoc_info,
prot_reason_status);
- aggr_reset_state(ar->aggr_cntxt);
+ aggr_reset_state(vif->aggr_cntxt);
- del_timer(&ar->disconnect_timer);
+ del_timer(&vif->disconnect_timer);
- ath6kl_dbg(ATH6KL_DBG_WLAN_CONNECT,
- "disconnect reason is %d\n", reason);
+ ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "disconnect reason is %d\n", reason);
/*
* If the event is due to disconnect cmd from the host, only they
@@ -1377,83 +936,88 @@ void ath6kl_disconnect_event(struct ath6kl *ar, u8 reason, u8 *bssid,
*/
if (reason == DISCONNECT_CMD) {
if (!ar->usr_bss_filter && test_bit(WMI_READY, &ar->flag))
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
} else {
- set_bit(CONNECT_PEND, &ar->flag);
+ set_bit(CONNECT_PEND, &vif->flags);
if (((reason == ASSOC_FAILED) &&
(prot_reason_status == 0x11)) ||
((reason == ASSOC_FAILED) && (prot_reason_status == 0x0)
- && (ar->reconnect_flag == 1))) {
- set_bit(CONNECTED, &ar->flag);
+ && (vif->reconnect_flag == 1))) {
+ set_bit(CONNECTED, &vif->flags);
return;
}
}
/* update connect & link status atomically */
- spin_lock_irqsave(&ar->lock, flags);
- clear_bit(CONNECTED, &ar->flag);
- netif_carrier_off(ar->net_dev);
- spin_unlock_irqrestore(&ar->lock, flags);
+ spin_lock_bh(&vif->if_lock);
+ clear_bit(CONNECTED, &vif->flags);
+ netif_carrier_off(vif->ndev);
+ spin_unlock_bh(&vif->if_lock);
- if ((reason != CSERV_DISCONNECT) || (ar->reconnect_flag != 1))
- ar->reconnect_flag = 0;
+ if ((reason != CSERV_DISCONNECT) || (vif->reconnect_flag != 1))
+ vif->reconnect_flag = 0;
if (reason != CSERV_DISCONNECT)
ar->user_key_ctrl = 0;
- netif_stop_queue(ar->net_dev);
- memset(ar->bssid, 0, sizeof(ar->bssid));
- ar->bss_ch = 0;
+ netif_stop_queue(vif->ndev);
+ memset(vif->bssid, 0, sizeof(vif->bssid));
+ vif->bss_ch = 0;
ath6kl_tx_data_cleanup(ar);
}
-static int ath6kl_open(struct net_device *dev)
+struct ath6kl_vif *ath6kl_vif_first(struct ath6kl *ar)
{
- struct ath6kl *ar = ath6kl_priv(dev);
- unsigned long flags;
+ struct ath6kl_vif *vif;
- spin_lock_irqsave(&ar->lock, flags);
+ spin_lock_bh(&ar->list_lock);
+ if (list_empty(&ar->vif_list)) {
+ spin_unlock_bh(&ar->list_lock);
+ return NULL;
+ }
- set_bit(WLAN_ENABLED, &ar->flag);
+ vif = list_first_entry(&ar->vif_list, struct ath6kl_vif, list);
- if (test_bit(CONNECTED, &ar->flag)) {
+ spin_unlock_bh(&ar->list_lock);
+
+ return vif;
+}
+
+static int ath6kl_open(struct net_device *dev)
+{
+ struct ath6kl_vif *vif = netdev_priv(dev);
+
+ set_bit(WLAN_ENABLED, &vif->flags);
+
+ if (test_bit(CONNECTED, &vif->flags)) {
netif_carrier_on(dev);
netif_wake_queue(dev);
} else
netif_carrier_off(dev);
- spin_unlock_irqrestore(&ar->lock, flags);
-
return 0;
}
static int ath6kl_close(struct net_device *dev)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
netif_stop_queue(dev);
- ath6kl_disconnect(ar);
-
- if (test_bit(WMI_READY, &ar->flag)) {
- if (ath6kl_wmi_scanparams_cmd(ar->wmi, 0xFFFF, 0, 0, 0, 0, 0, 0,
- 0, 0, 0))
- return -EIO;
-
- clear_bit(WLAN_ENABLED, &ar->flag);
- }
+ ath6kl_cfg80211_stop(vif);
- ath6kl_cfg80211_scan_complete_event(ar, -ECANCELED);
+ clear_bit(WLAN_ENABLED, &vif->flags);
return 0;
}
static struct net_device_stats *ath6kl_get_stats(struct net_device *dev)
{
- struct ath6kl *ar = ath6kl_priv(dev);
+ struct ath6kl_vif *vif = netdev_priv(dev);
- return &ar->net_stats;
+ return &vif->net_stats;
}
static struct net_device_ops ath6kl_netdev_ops = {
@@ -1466,6 +1030,7 @@ static struct net_device_ops ath6kl_netdev_ops = {
void init_netdev(struct net_device *dev)
{
dev->netdev_ops = &ath6kl_netdev_ops;
+ dev->destructor = free_netdev;
dev->watchdog_timeo = ATH6KL_TX_TIMEOUT;
dev->needed_headroom = ETH_HLEN;
diff --git a/drivers/net/wireless/ath/ath6kl/sdio.c b/drivers/net/wireless/ath/ath6kl/sdio.c
index 066d4f8..9475e2d 100644
--- a/drivers/net/wireless/ath/ath6kl/sdio.c
+++ b/drivers/net/wireless/ath/ath6kl/sdio.c
@@ -22,7 +22,7 @@
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sd.h>
-#include "htc_hif.h"
+#include "hif.h"
#include "hif-ops.h"
#include "target.h"
#include "debug.h"
@@ -40,12 +40,18 @@ struct ath6kl_sdio {
struct bus_request bus_req[BUS_REQUEST_MAX_NUM];
struct ath6kl *ar;
+
u8 *dma_buffer;
+ /* protects access to dma_buffer */
+ struct mutex dma_buffer_mutex;
+
/* scatter request list head */
struct list_head scat_req;
spinlock_t scat_lock;
+ bool scatter_enabled;
+
bool is_disabled;
atomic_t irq_handling;
const struct sdio_device_id *id;
@@ -135,6 +141,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
{
int ret = 0;
+ sdio_claim_host(func);
+
if (request & HIF_WRITE) {
/* FIXME: looks like ugly workaround for something */
if (addr >= HIF_MBOX_BASE_ADDR &&
@@ -156,6 +164,8 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
ret = sdio_memcpy_fromio(func, buf, addr, len);
}
+ sdio_release_host(func);
+
ath6kl_dbg(ATH6KL_DBG_SDIO, "%s addr 0x%x%s buf 0x%p len %d\n",
request & HIF_WRITE ? "wr" : "rd", addr,
request & HIF_FIXED_ADDRESS ? " (fixed)" : "", buf, len);
@@ -167,12 +177,11 @@ static int ath6kl_sdio_io(struct sdio_func *func, u32 request, u32 addr,
static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
{
struct bus_request *bus_req;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
if (list_empty(&ar_sdio->bus_req_freeq)) {
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
return NULL;
}
@@ -180,7 +189,7 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
struct bus_request, list);
list_del(&bus_req->list);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
@@ -190,14 +199,12 @@ static struct bus_request *ath6kl_sdio_alloc_busreq(struct ath6kl_sdio *ar_sdio)
static void ath6kl_sdio_free_bus_req(struct ath6kl_sdio *ar_sdio,
struct bus_request *bus_req)
{
- unsigned long flag;
-
ath6kl_dbg(ATH6KL_DBG_SCATTER, "%s: bus request 0x%p\n",
__func__, bus_req);
- spin_lock_irqsave(&ar_sdio->lock, flag);
+ spin_lock_bh(&ar_sdio->lock);
list_add_tail(&bus_req->list, &ar_sdio->bus_req_freeq);
- spin_unlock_irqrestore(&ar_sdio->lock, flag);
+ spin_unlock_bh(&ar_sdio->lock);
}
static void ath6kl_sdio_setup_scat_data(struct hif_scatter_req *scat_req,
@@ -291,10 +298,14 @@ static int ath6kl_sdio_scat_rw(struct ath6kl_sdio *ar_sdio,
mmc_req.cmd = &cmd;
mmc_req.data = &data;
+ sdio_claim_host(ar_sdio->func);
+
mmc_set_data_timeout(&data, ar_sdio->func->card);
/* synchronous call to process request */
mmc_wait_for_req(ar_sdio->func->card->host, &mmc_req);
+ sdio_release_host(ar_sdio->func);
+
status = cmd.error ? cmd.error : data.error;
scat_complete:
@@ -389,17 +400,19 @@ static int ath6kl_sdio_read_write_sync(struct ath6kl *ar, u32 addr, u8 *buf,
if (buf_needs_bounce(buf)) {
if (!ar_sdio->dma_buffer)
return -ENOMEM;
+ mutex_lock(&ar_sdio->dma_buffer_mutex);
tbuf = ar_sdio->dma_buffer;
memcpy(tbuf, buf, len);
bounced = true;
} else
tbuf = buf;
- sdio_claim_host(ar_sdio->func);
ret = ath6kl_sdio_io(ar_sdio->func, request, addr, tbuf, len);
if ((request & HIF_READ) && bounced)
memcpy(buf, tbuf, len);
- sdio_release_host(ar_sdio->func);
+
+ if (bounced)
+ mutex_unlock(&ar_sdio->dma_buffer_mutex);
return ret;
}
@@ -418,29 +431,25 @@ static void __ath6kl_sdio_write_async(struct ath6kl_sdio *ar_sdio,
req->request);
context = req->packet;
ath6kl_sdio_free_bus_req(ar_sdio, req);
- ath6kldev_rw_comp_handler(context, status);
+ ath6kl_hif_rw_comp_handler(context, status);
}
}
static void ath6kl_sdio_write_async_work(struct work_struct *work)
{
struct ath6kl_sdio *ar_sdio;
- unsigned long flags;
struct bus_request *req, *tmp_req;
ar_sdio = container_of(work, struct ath6kl_sdio, wr_async_work);
- sdio_claim_host(ar_sdio->func);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
list_del(&req->list);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
__ath6kl_sdio_write_async(ar_sdio, req);
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
}
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
-
- sdio_release_host(ar_sdio->func);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
}
static void ath6kl_sdio_irq_handler(struct sdio_func *func)
@@ -459,20 +468,23 @@ static void ath6kl_sdio_irq_handler(struct sdio_func *func)
*/
sdio_release_host(ar_sdio->func);
- status = ath6kldev_intr_bh_handler(ar_sdio->ar);
+ status = ath6kl_hif_intr_bh_handler(ar_sdio->ar);
sdio_claim_host(ar_sdio->func);
atomic_set(&ar_sdio->irq_handling, 0);
WARN_ON(status && status != -ECANCELED);
}
-static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_on(struct ath6kl *ar)
{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
int ret = 0;
if (!ar_sdio->is_disabled)
return 0;
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power on\n");
+
sdio_claim_host(func);
ret = sdio_enable_func(func);
@@ -495,13 +507,16 @@ static int ath6kl_sdio_power_on(struct ath6kl_sdio *ar_sdio)
return ret;
}
-static int ath6kl_sdio_power_off(struct ath6kl_sdio *ar_sdio)
+static int ath6kl_sdio_power_off(struct ath6kl *ar)
{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
int ret;
if (ar_sdio->is_disabled)
return 0;
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "sdio power off\n");
+
/* Disable the card */
sdio_claim_host(ar_sdio->func);
ret = sdio_disable_func(ar_sdio->func);
@@ -521,7 +536,6 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct bus_request *bus_req;
- unsigned long flags;
bus_req = ath6kl_sdio_alloc_busreq(ar_sdio);
@@ -534,9 +548,9 @@ static int ath6kl_sdio_write_async(struct ath6kl *ar, u32 address, u8 *buffer,
bus_req->request = request;
bus_req->packet = packet;
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&bus_req->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
return 0;
@@ -582,9 +596,8 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *node = NULL;
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
if (!list_empty(&ar_sdio->scat_req)) {
node = list_first_entry(&ar_sdio->scat_req,
@@ -592,7 +605,7 @@ static struct hif_scatter_req *ath6kl_sdio_scatter_req_get(struct ath6kl *ar)
list_del(&node->list);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
return node;
}
@@ -601,13 +614,12 @@ static void ath6kl_sdio_scatter_req_add(struct ath6kl *ar,
struct hif_scatter_req *s_req)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
- unsigned long flag;
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_add_tail(&s_req->list, &ar_sdio->scat_req);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
@@ -618,7 +630,6 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
u32 request = scat_req->req;
int status = 0;
- unsigned long flags;
if (!scat_req->len)
return -EINVAL;
@@ -627,14 +638,12 @@ static int ath6kl_sdio_async_rw_scatter(struct ath6kl *ar,
"hif-scatter: total len: %d scatter entries: %d\n",
scat_req->len, scat_req->scat_entries);
- if (request & HIF_SYNCHRONOUS) {
- sdio_claim_host(ar_sdio->func);
+ if (request & HIF_SYNCHRONOUS)
status = ath6kl_sdio_scat_rw(ar_sdio, scat_req->busrequest);
- sdio_release_host(ar_sdio->func);
- } else {
- spin_lock_irqsave(&ar_sdio->wr_async_lock, flags);
+ else {
+ spin_lock_bh(&ar_sdio->wr_async_lock);
list_add_tail(&scat_req->busrequest->list, &ar_sdio->wr_asyncq);
- spin_unlock_irqrestore(&ar_sdio->wr_async_lock, flags);
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
queue_work(ar->ath6kl_wq, &ar_sdio->wr_async_work);
}
@@ -646,23 +655,27 @@ static void ath6kl_sdio_cleanup_scatter(struct ath6kl *ar)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct hif_scatter_req *s_req, *tmp_req;
- unsigned long flag;
/* empty the free list */
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
list_for_each_entry_safe(s_req, tmp_req, &ar_sdio->scat_req, list) {
list_del(&s_req->list);
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
+ /*
+ * FIXME: should we also call completion handler with
+ * ath6kl_hif_rw_comp_handler() with status -ECANCELED so
+ * that the packet is properly freed?
+ */
if (s_req->busrequest)
ath6kl_sdio_free_bus_req(ar_sdio, s_req->busrequest);
kfree(s_req->virt_dma_buf);
kfree(s_req->sgentries);
kfree(s_req);
- spin_lock_irqsave(&ar_sdio->scat_lock, flag);
+ spin_lock_bh(&ar_sdio->scat_lock);
}
- spin_unlock_irqrestore(&ar_sdio->scat_lock, flag);
+ spin_unlock_bh(&ar_sdio->scat_lock);
}
/* setup of HIF scatter resources */
@@ -673,6 +686,11 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
int ret;
bool virt_scat = false;
+ if (ar_sdio->scatter_enabled)
+ return 0;
+
+ ar_sdio->scatter_enabled = true;
+
/* check if host supports scatter and it meets our requirements */
if (ar_sdio->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
ath6kl_err("host only supports scatter of :%d entries, need: %d\n",
@@ -687,8 +705,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
MAX_SCATTER_REQUESTS, virt_scat);
if (!ret) {
- ath6kl_dbg(ATH6KL_DBG_SCATTER,
- "hif-scatter enabled: max scatter req : %d entries: %d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "hif-scatter enabled requests %d entries %d\n",
MAX_SCATTER_REQUESTS,
MAX_SCATTER_ENTRIES_PER_REQ);
@@ -712,8 +730,8 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return ret;
}
- ath6kl_dbg(ATH6KL_DBG_SCATTER,
- "Vitual scatter enabled, max_scat_req:%d, entries:%d\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "virtual scatter enabled requests %d entries %d\n",
ATH6KL_SCATTER_REQS, ATH6KL_SCATTER_ENTRIES_PER_REQ);
target->max_scat_entries = ATH6KL_SCATTER_ENTRIES_PER_REQ;
@@ -724,7 +742,47 @@ static int ath6kl_sdio_enable_scatter(struct ath6kl *ar)
return 0;
}
-static int ath6kl_sdio_suspend(struct ath6kl *ar)
+static int ath6kl_sdio_config(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct sdio_func *func = ar_sdio->func;
+ int ret;
+
+ sdio_claim_host(func);
+
+ if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
+ MANUFACTURER_ID_AR6003_BASE) {
+ /* enable 4-bit ASYNC interrupt on AR6003 or later */
+ ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
+ CCCR_SDIO_IRQ_MODE_REG,
+ SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
+ if (ret) {
+ ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
+ ret);
+ goto out;
+ }
+
+ ath6kl_dbg(ATH6KL_DBG_BOOT, "4-bit async irq mode enabled\n");
+ }
+
+ /* give us some time to enable, in ms */
+ func->enable_timeout = 100;
+
+ ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ if (ret) {
+ ath6kl_err("Set sdio block size %d failed: %d)\n",
+ HIF_MBOX_BLOCK_SIZE, ret);
+ sdio_release_host(func);
+ goto out;
+ }
+
+out:
+ sdio_release_host(func);
+
+ return ret;
+}
+
+static int ath6kl_sdio_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
{
struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
struct sdio_func *func = ar_sdio->func;
@@ -733,12 +791,14 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
flags = sdio_get_host_pm_caps(func);
- if (!(flags & MMC_PM_KEEP_POWER))
- /* as host doesn't support keep power we need to bail out */
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "func %d doesn't support MMC_PM_KEEP_POWER\n",
- func->num);
- return -EINVAL;
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio suspend pm_caps 0x%x\n", flags);
+
+ if (!(flags & MMC_PM_KEEP_POWER) ||
+ (ar->conf_flags & ATH6KL_CONF_SUSPEND_CUTPOWER)) {
+ /* as host doesn't support keep power we need to cut power */
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_CUTPOWER,
+ NULL);
+ }
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
@@ -747,11 +807,367 @@ static int ath6kl_sdio_suspend(struct ath6kl *ar)
return ret;
}
- ath6kl_deep_sleep_enable(ar);
+ if (!(flags & MMC_PM_WAKE_SDIO_IRQ))
+ goto deepsleep;
+
+ /* sdio irq wakes up host */
+
+ if (ar->state == ATH6KL_STATE_SCHED_SCAN) {
+ ret = ath6kl_cfg80211_suspend(ar,
+ ATH6KL_CFG_SUSPEND_SCHED_SCAN,
+ NULL);
+ if (ret) {
+ ath6kl_warn("Schedule scan suspend failed: %d", ret);
+ return ret;
+ }
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_warn("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+ }
+
+ if (wow) {
+ /*
+ * The host sdio controller is capable of keep power and
+ * sdio irq wake up at this point. It's fine to continue
+ * wow suspend operation.
+ */
+ ret = ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_WOW, wow);
+ if (ret)
+ return ret;
+
+ ret = sdio_set_host_pm_flags(func, MMC_PM_WAKE_SDIO_IRQ);
+ if (ret)
+ ath6kl_err("set sdio wake irq flag failed: %d\n", ret);
+
+ return ret;
+ }
+
+deepsleep:
+ return ath6kl_cfg80211_suspend(ar, ATH6KL_CFG_SUSPEND_DEEPSLEEP, NULL);
+}
+
+static int ath6kl_sdio_resume(struct ath6kl *ar)
+{
+ switch (ar->state) {
+ case ATH6KL_STATE_OFF:
+ case ATH6KL_STATE_CUTPOWER:
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND,
+ "sdio resume configuring sdio\n");
+
+ /* need to set sdio settings after power is cut from sdio */
+ ath6kl_sdio_config(ar);
+ break;
+
+ case ATH6KL_STATE_ON:
+ break;
+
+ case ATH6KL_STATE_DEEPSLEEP:
+ break;
+
+ case ATH6KL_STATE_WOW:
+ break;
+ case ATH6KL_STATE_SCHED_SCAN:
+ break;
+ }
+
+ ath6kl_cfg80211_resume(ar);
+
+ return 0;
+}
+
+/* set the window address register (using 4-byte register access ). */
+static int ath6kl_set_addrwin_reg(struct ath6kl *ar, u32 reg_addr, u32 addr)
+{
+ int status;
+ u8 addr_val[4];
+ s32 i;
+
+ /*
+ * Write bytes 1,2,3 of the register to set the upper address bytes,
+ * the LSB is written last to initiate the access cycle
+ */
+
+ for (i = 1; i <= 3; i++) {
+ /*
+ * Fill the buffer with the address byte value we want to
+ * hit 4 times.
+ */
+ memset(addr_val, ((u8 *)&addr)[i], 4);
+
+ /*
+ * Hit each byte of the register address with a 4-byte
+ * write operation to the same address, this is a harmless
+ * operation.
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr + i, addr_val,
+ 4, HIF_WR_SYNC_BYTE_FIX);
+ if (status)
+ break;
+ }
+
+ if (status) {
+ ath6kl_err("%s: failed to write initial bytes of 0x%x "
+ "to window reg: 0x%X\n", __func__,
+ addr, reg_addr);
+ return status;
+ }
+
+ /*
+ * Write the address register again, this time write the whole
+ * 4-byte value. The effect here is that the LSB write causes the
+ * cycle to start, the extra 3 byte write to bytes 1,2,3 has no
+ * effect since we are writing the same values again
+ */
+ status = ath6kl_sdio_read_write_sync(ar, reg_addr, (u8 *)(&addr),
+ 4, HIF_WR_SYNC_BYTE_INC);
+
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window reg: 0x%X\n",
+ __func__, addr, reg_addr);
+ return status;
+ }
+
+ return 0;
+}
+
+static int ath6kl_sdio_diag_read32(struct ath6kl *ar, u32 address, u32 *data)
+{
+ int status;
+
+ /* set window register to start read cycle */
+ status = ath6kl_set_addrwin_reg(ar, WINDOW_READ_ADDR_ADDRESS,
+ address);
+
+ if (status)
+ return status;
+
+ /* read the data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *)data, sizeof(u32), HIF_RD_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to read from window data addr\n",
+ __func__);
+ return status;
+ }
+
+ return status;
+}
+
+static int ath6kl_sdio_diag_write32(struct ath6kl *ar, u32 address,
+ __le32 data)
+{
+ int status;
+ u32 val = (__force u32) data;
+
+ /* set write data */
+ status = ath6kl_sdio_read_write_sync(ar, WINDOW_DATA_ADDRESS,
+ (u8 *) &val, sizeof(u32), HIF_WR_SYNC_BYTE_INC);
+ if (status) {
+ ath6kl_err("%s: failed to write 0x%x to window data addr\n",
+ __func__, data);
+ return status;
+ }
+
+ /* set window register, which starts the write cycle */
+ return ath6kl_set_addrwin_reg(ar, WINDOW_WRITE_ADDR_ADDRESS,
+ address);
+}
+
+static int ath6kl_sdio_bmi_credits(struct ath6kl *ar)
+{
+ u32 addr;
+ unsigned long timeout;
+ int ret;
+
+ ar->bmi.cmd_credits = 0;
+
+ /* Read the counter register to get the command credits */
+ addr = COUNT_DEC_ADDRESS + (HTC_MAILBOX_NUM_MAX + ENDPOINT1) * 4;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while (time_before(jiffies, timeout) && !ar->bmi.cmd_credits) {
+
+ /*
+ * Hit the credit counter with a 4-byte access, the first byte
+ * read will hit the counter and cause a decrement, while the
+ * remaining 3 bytes has no effect. The rationale behind this
+ * is to make all HIF accesses 4-byte aligned.
+ */
+ ret = ath6kl_sdio_read_write_sync(ar, addr,
+ (u8 *)&ar->bmi.cmd_credits, 4,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to decrement the command credit "
+ "count register: %d\n", ret);
+ return ret;
+ }
+
+ /* The counter is only 8 bits.
+ * Ignore anything in the upper 3 bytes
+ */
+ ar->bmi.cmd_credits &= 0xFF;
+ }
+
+ if (!ar->bmi.cmd_credits) {
+ ath6kl_err("bmi communication timeout\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int ath6kl_bmi_get_rx_lkahd(struct ath6kl *ar)
+{
+ unsigned long timeout;
+ u32 rx_word = 0;
+ int ret = 0;
+
+ timeout = jiffies + msecs_to_jiffies(BMI_COMMUNICATION_TIMEOUT);
+ while ((time_before(jiffies, timeout)) && !rx_word) {
+ ret = ath6kl_sdio_read_write_sync(ar,
+ RX_LOOKAHEAD_VALID_ADDRESS,
+ (u8 *)&rx_word, sizeof(rx_word),
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("unable to read RX_LOOKAHEAD_VALID\n");
+ return ret;
+ }
+
+ /* all we really want is one bit */
+ rx_word &= (1 << ENDPOINT1);
+ }
+
+ if (!rx_word) {
+ ath6kl_err("bmi_recv_buf FIFO empty\n");
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static int ath6kl_sdio_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ ret = ath6kl_sdio_bmi_credits(ar);
+ if (ret)
+ return ret;
+
+ addr = ar->mbox_info.htc_addr;
+
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_WR_SYNC_BYTE_INC);
+ if (ret)
+ ath6kl_err("unable to send the bmi data to the device\n");
+
+ return ret;
+}
+
+static int ath6kl_sdio_bmi_read(struct ath6kl *ar, u8 *buf, u32 len)
+{
+ int ret;
+ u32 addr;
+
+ /*
+ * During normal bootup, small reads may be required.
+ * Rather than issue an HIF Read and then wait as the Target
+ * adds successive bytes to the FIFO, we wait here until
+ * we know that response data is available.
+ *
+ * This allows us to cleanly timeout on an unexpected
+ * Target failure rather than risk problems at the HIF level.
+ * In particular, this avoids SDIO timeouts and possibly garbage
+ * data on some host controllers. And on an interconnect
+ * such as Compact Flash (as well as some SDIO masters) which
+ * does not provide any indication on data timeout, it avoids
+ * a potential hang or garbage response.
+ *
+ * Synchronization is more difficult for reads larger than the
+ * size of the MBOX FIFO (128B), because the Target is unable
+ * to push the 129th byte of data until AFTER the Host posts an
+ * HIF Read and removes some FIFO data. So for large reads the
+ * Host proceeds to post an HIF Read BEFORE all the data is
+ * actually available to read. Fortunately, large BMI reads do
+ * not occur in practice -- they're supported for debug/development.
+ *
+ * So Host/Target BMI synchronization is divided into these cases:
+ * CASE 1: length < 4
+ * Should not happen
+ *
+ * CASE 2: 4 <= length <= 128
+ * Wait for first 4 bytes to be in FIFO
+ * If CONSERVATIVE_BMI_READ is enabled, also wait for
+ * a BMI command credit, which indicates that the ENTIRE
+ * response is available in the the FIFO
+ *
+ * CASE 3: length > 128
+ * Wait for the first 4 bytes to be in FIFO
+ *
+ * For most uses, a small timeout should be sufficient and we will
+ * usually see a response quickly; but there may be some unusual
+ * (debug) cases of BMI_EXECUTE where we want an larger timeout.
+ * For now, we use an unbounded busy loop while waiting for
+ * BMI_EXECUTE.
+ *
+ * If BMI_EXECUTE ever needs to support longer-latency execution,
+ * especially in production, this code needs to be enhanced to sleep
+ * and yield. Also note that BMI_COMMUNICATION_TIMEOUT is currently
+ * a function of Host processor speed.
+ */
+ if (len >= 4) { /* NB: Currently, always true */
+ ret = ath6kl_bmi_get_rx_lkahd(ar);
+ if (ret)
+ return ret;
+ }
+
+ addr = ar->mbox_info.htc_addr;
+ ret = ath6kl_sdio_read_write_sync(ar, addr, buf, len,
+ HIF_RD_SYNC_BYTE_INC);
+ if (ret) {
+ ath6kl_err("Unable to read the bmi data from the device: %d\n",
+ ret);
+ return ret;
+ }
return 0;
}
+static void ath6kl_sdio_stop(struct ath6kl *ar)
+{
+ struct ath6kl_sdio *ar_sdio = ath6kl_sdio_priv(ar);
+ struct bus_request *req, *tmp_req;
+ void *context;
+
+ /* FIXME: make sure that wq is not queued again */
+
+ cancel_work_sync(&ar_sdio->wr_async_work);
+
+ spin_lock_bh(&ar_sdio->wr_async_lock);
+
+ list_for_each_entry_safe(req, tmp_req, &ar_sdio->wr_asyncq, list) {
+ list_del(&req->list);
+
+ if (req->scat_req) {
+ /* this is a scatter gather request */
+ req->scat_req->status = -ECANCELED;
+ req->scat_req->complete(ar_sdio->ar->htc_target,
+ req->scat_req);
+ } else {
+ context = req->packet;
+ ath6kl_sdio_free_bus_req(ar_sdio, req);
+ ath6kl_hif_rw_comp_handler(context, -ECANCELED);
+ }
+ }
+
+ spin_unlock_bh(&ar_sdio->wr_async_lock);
+
+ WARN_ON(get_queue_depth(&ar_sdio->scat_req) != 4);
+}
+
static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.read_write_sync = ath6kl_sdio_read_write_sync,
.write_async = ath6kl_sdio_write_async,
@@ -763,8 +1179,47 @@ static const struct ath6kl_hif_ops ath6kl_sdio_ops = {
.scat_req_rw = ath6kl_sdio_async_rw_scatter,
.cleanup_scatter = ath6kl_sdio_cleanup_scatter,
.suspend = ath6kl_sdio_suspend,
+ .resume = ath6kl_sdio_resume,
+ .diag_read32 = ath6kl_sdio_diag_read32,
+ .diag_write32 = ath6kl_sdio_diag_write32,
+ .bmi_read = ath6kl_sdio_bmi_read,
+ .bmi_write = ath6kl_sdio_bmi_write,
+ .power_on = ath6kl_sdio_power_on,
+ .power_off = ath6kl_sdio_power_off,
+ .stop = ath6kl_sdio_stop,
};
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Empty handlers so that mmc subsystem doesn't remove us entirely during
+ * suspend. We instead follow cfg80211 suspend/resume handlers.
+ */
+static int ath6kl_sdio_pm_suspend(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm suspend\n");
+
+ return 0;
+}
+
+static int ath6kl_sdio_pm_resume(struct device *device)
+{
+ ath6kl_dbg(ATH6KL_DBG_SUSPEND, "sdio pm resume\n");
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(ath6kl_sdio_pm_ops, ath6kl_sdio_pm_suspend,
+ ath6kl_sdio_pm_resume);
+
+#define ATH6KL_SDIO_PM_OPS (&ath6kl_sdio_pm_ops)
+
+#else
+
+#define ATH6KL_SDIO_PM_OPS NULL
+
+#endif /* CONFIG_PM_SLEEP */
+
static int ath6kl_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
@@ -773,8 +1228,8 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
struct ath6kl *ar;
int count;
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio new func %d vendor 0x%x device 0x%x block 0x%x/0x%x\n",
func->num, func->vendor, func->device,
func->max_blksize, func->cur_blksize);
@@ -797,6 +1252,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
spin_lock_init(&ar_sdio->lock);
spin_lock_init(&ar_sdio->scat_lock);
spin_lock_init(&ar_sdio->wr_async_lock);
+ mutex_init(&ar_sdio->dma_buffer_mutex);
INIT_LIST_HEAD(&ar_sdio->scat_req);
INIT_LIST_HEAD(&ar_sdio->bus_req_freeq);
@@ -815,62 +1271,29 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
}
ar_sdio->ar = ar;
+ ar->hif_type = ATH6KL_HIF_TYPE_SDIO;
ar->hif_priv = ar_sdio;
ar->hif_ops = &ath6kl_sdio_ops;
+ ar->bmi.max_data_size = 256;
ath6kl_sdio_set_mbox_info(ar);
- sdio_claim_host(func);
-
- if ((ar_sdio->id->device & MANUFACTURER_ID_ATH6KL_BASE_MASK) >=
- MANUFACTURER_ID_AR6003_BASE) {
- /* enable 4-bit ASYNC interrupt on AR6003 or later */
- ret = ath6kl_sdio_func0_cmd52_wr_byte(func->card,
- CCCR_SDIO_IRQ_MODE_REG,
- SDIO_IRQ_MODE_ASYNC_4BIT_IRQ);
- if (ret) {
- ath6kl_err("Failed to enable 4-bit async irq mode %d\n",
- ret);
- sdio_release_host(func);
- goto err_cfg80211;
- }
-
- ath6kl_dbg(ATH6KL_DBG_SDIO, "4-bit async irq mode enabled\n");
- }
-
- /* give us some time to enable, in ms */
- func->enable_timeout = 100;
-
- sdio_release_host(func);
-
- ret = ath6kl_sdio_power_on(ar_sdio);
- if (ret)
- goto err_cfg80211;
-
- sdio_claim_host(func);
-
- ret = sdio_set_block_size(func, HIF_MBOX_BLOCK_SIZE);
+ ret = ath6kl_sdio_config(ar);
if (ret) {
- ath6kl_err("Set sdio block size %d failed: %d)\n",
- HIF_MBOX_BLOCK_SIZE, ret);
- sdio_release_host(func);
- goto err_off;
+ ath6kl_err("Failed to config sdio: %d\n", ret);
+ goto err_core_alloc;
}
- sdio_release_host(func);
-
ret = ath6kl_core_init(ar);
if (ret) {
ath6kl_err("Failed to init ath6kl core\n");
- goto err_off;
+ goto err_core_alloc;
}
return ret;
-err_off:
- ath6kl_sdio_power_off(ar_sdio);
-err_cfg80211:
- ath6kl_cfg80211_deinit(ar_sdio->ar);
+err_core_alloc:
+ ath6kl_core_free(ar_sdio->ar);
err_dma:
kfree(ar_sdio->dma_buffer);
err_hif:
@@ -883,8 +1306,8 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
{
struct ath6kl_sdio *ar_sdio;
- ath6kl_dbg(ATH6KL_DBG_SDIO,
- "removed func %d vendor 0x%x device 0x%x\n",
+ ath6kl_dbg(ATH6KL_DBG_BOOT,
+ "sdio removed func %d vendor 0x%x device 0x%x\n",
func->num, func->vendor, func->device);
ar_sdio = sdio_get_drvdata(func);
@@ -892,9 +1315,7 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
ath6kl_stop_txrx(ar_sdio->ar);
cancel_work_sync(&ar_sdio->wr_async_work);
- ath6kl_unavail_ev(ar_sdio->ar);
-
- ath6kl_sdio_power_off(ar_sdio);
+ ath6kl_core_cleanup(ar_sdio->ar);
kfree(ar_sdio->dma_buffer);
kfree(ar_sdio);
@@ -903,16 +1324,19 @@ static void ath6kl_sdio_remove(struct sdio_func *func)
static const struct sdio_device_id ath6kl_sdio_devices[] = {
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x0))},
{SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6003_BASE | 0x1))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x0))},
+ {SDIO_DEVICE(MANUFACTURER_CODE, (MANUFACTURER_ID_AR6004_BASE | 0x1))},
{},
};
MODULE_DEVICE_TABLE(sdio, ath6kl_sdio_devices);
static struct sdio_driver ath6kl_sdio_driver = {
- .name = "ath6kl_sdio",
+ .name = "ath6kl",
.id_table = ath6kl_sdio_devices,
.probe = ath6kl_sdio_probe,
.remove = ath6kl_sdio_remove,
+ .drv.pm = ATH6KL_SDIO_PM_OPS,
};
static int __init ath6kl_sdio_init(void)
@@ -938,13 +1362,19 @@ MODULE_AUTHOR("Atheros Communications, Inc.");
MODULE_DESCRIPTION("Driver support for Atheros AR600x SDIO devices");
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_FIRMWARE(AR6003_REV2_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV2_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV2_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV2_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV2_DEFAULT_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_OTP_FILE);
-MODULE_FIRMWARE(AR6003_REV3_FIRMWARE_FILE);
-MODULE_FIRMWARE(AR6003_REV3_PATCH_FILE);
-MODULE_FIRMWARE(AR6003_REV3_BOARD_DATA_FILE);
-MODULE_FIRMWARE(AR6003_REV3_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_OTP_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_PATCH_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6003_HW_2_1_1_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_0_DEFAULT_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_FIRMWARE_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_BOARD_DATA_FILE);
+MODULE_FIRMWARE(AR6004_HW_1_1_DEFAULT_BOARD_DATA_FILE);
diff --git a/drivers/net/wireless/ath/ath6kl/target.h b/drivers/net/wireless/ath/ath6kl/target.h
index c9a7605..108a723 100644
--- a/drivers/net/wireless/ath/ath6kl/target.h
+++ b/drivers/net/wireless/ath/ath6kl/target.h
@@ -20,7 +20,7 @@
#define AR6003_BOARD_DATA_SZ 1024
#define AR6003_BOARD_EXT_DATA_SZ 768
-#define AR6004_BOARD_DATA_SZ 7168
+#define AR6004_BOARD_DATA_SZ 6144
#define AR6004_BOARD_EXT_DATA_SZ 0
#define RESET_CONTROL_ADDRESS 0x00000000
@@ -320,7 +320,10 @@ struct host_interest {
| (2) | (2) | (2) | (2) | (2) | (2) | (2) | (2)
|------------------------------------------------------------------------------|
*/
+#define HI_OPTION_FW_MODE_BITS 0x2
#define HI_OPTION_FW_MODE_SHIFT 0xC
+
+#define HI_OPTION_FW_SUBMODE_BITS 0x2
#define HI_OPTION_FW_SUBMODE_SHIFT 0x14
/* Convert a Target virtual address into a Target physical address */
@@ -331,20 +334,6 @@ struct host_interest {
(((target_type) == TARGET_TYPE_AR6003) ? AR6003_VTOP(vaddr) : \
(((target_type) == TARGET_TYPE_AR6004) ? AR6004_VTOP(vaddr) : 0))
-#define AR6003_REV2_APP_LOAD_ADDRESS 0x543180
-#define AR6003_REV2_BOARD_EXT_DATA_ADDRESS 0x57E500
-#define AR6003_REV2_DATASET_PATCH_ADDRESS 0x57e884
-#define AR6003_REV2_RAM_RESERVE_SIZE 6912
-
-#define AR6003_REV3_APP_LOAD_ADDRESS 0x545000
-#define AR6003_REV3_BOARD_EXT_DATA_ADDRESS 0x542330
-#define AR6003_REV3_DATASET_PATCH_ADDRESS 0x57FF74
-#define AR6003_REV3_RAM_RESERVE_SIZE 512
-
-#define AR6004_REV1_BOARD_DATA_ADDRESS 0x435400
-#define AR6004_REV1_BOARD_EXT_DATA_ADDRESS 0x437000
-#define AR6004_REV1_RAM_RESERVE_SIZE 11264
-
#define ATH6KL_FWLOG_PAYLOAD_SIZE 1500
struct ath6kl_dbglog_buf {
diff --git a/drivers/net/wireless/ath/ath6kl/txrx.c b/drivers/net/wireless/ath/ath6kl/txrx.c
index a711707..506a303 100644
--- a/drivers/net/wireless/ath/ath6kl/txrx.c
+++ b/drivers/net/wireless/ath/ath6kl/txrx.c
@@ -77,12 +77,13 @@ static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
return ar->node_map[ep_map].ep_id;
}
-static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
+static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
bool *more_data)
{
struct ethhdr *datap = (struct ethhdr *) skb->data;
struct ath6kl_sta *conn = NULL;
bool ps_queued = false, is_psq_empty = false;
+ struct ath6kl *ar = vif->ar;
if (is_multicast_ether_addr(datap->h_dest)) {
u8 ctr = 0;
@@ -100,7 +101,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
* If this transmit is not because of a Dtim Expiry
* q it.
*/
- if (!test_bit(DTIM_EXPIRED, &ar->flag)) {
+ if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
bool is_mcastq_empty = false;
spin_lock_bh(&ar->mcastpsq_lock);
@@ -116,6 +117,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/
if (is_mcastq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
MCAST_AID, 1);
ps_queued = true;
@@ -131,7 +133,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
}
}
} else {
- conn = ath6kl_find_sta(ar, datap->h_dest);
+ conn = ath6kl_find_sta(vif, datap->h_dest);
if (!conn) {
dev_kfree_skb(skb);
@@ -154,6 +156,7 @@ static bool ath6kl_powersave_ap(struct ath6kl *ar, struct sk_buff *skb,
*/
if (is_psq_empty)
ath6kl_wmi_set_pvb_cmd(ar->wmi,
+ vif->fw_vif_idx,
conn->aid, 1);
ps_queued = true;
@@ -235,6 +238,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
struct ath6kl *ar = ath6kl_priv(dev);
struct ath6kl_cookie *cookie = NULL;
enum htc_endpoint_id eid = ENDPOINT_UNUSED;
+ struct ath6kl_vif *vif = netdev_priv(dev);
u32 map_no = 0;
u16 htc_tag = ATH6KL_DATA_PKT_TAG;
u8 ac = 99 ; /* initialize to unmapped ac */
@@ -246,7 +250,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
skb, skb->data, skb->len);
/* If target is not associated */
- if (!test_bit(CONNECTED, &ar->flag)) {
+ if (!test_bit(CONNECTED, &vif->flags)) {
dev_kfree_skb(skb);
return 0;
}
@@ -255,15 +259,21 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
goto fail_tx;
/* AP mode Power saving processing */
- if (ar->nw_type == AP_NETWORK) {
- if (ath6kl_powersave_ap(ar, skb, &more_data))
+ if (vif->nw_type == AP_NETWORK) {
+ if (ath6kl_powersave_ap(vif, skb, &more_data))
return 0;
}
if (test_bit(WMI_ENABLED, &ar->flag)) {
if (skb_headroom(skb) < dev->needed_headroom) {
- WARN_ON(1);
- goto fail_tx;
+ struct sk_buff *tmp_skb = skb;
+
+ skb = skb_realloc_headroom(skb, dev->needed_headroom);
+ kfree_skb(tmp_skb);
+ if (skb == NULL) {
+ vif->net_stats.tx_dropped++;
+ return 0;
+ }
}
if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
@@ -272,18 +282,20 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
}
if (ath6kl_wmi_data_hdr_add(ar->wmi, skb, DATA_MSGTYPE,
- more_data, 0, 0, NULL)) {
+ more_data, 0, 0, NULL,
+ vif->fw_vif_idx)) {
ath6kl_err("wmi_data_hdr_add failed\n");
goto fail_tx;
}
- if ((ar->nw_type == ADHOC_NETWORK) &&
- ar->ibss_ps_enable && test_bit(CONNECTED, &ar->flag))
+ if ((vif->nw_type == ADHOC_NETWORK) &&
+ ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
chk_adhoc_ps_mapping = true;
else {
/* get the stream mapping */
- ret = ath6kl_wmi_implicit_create_pstream(ar->wmi, skb,
- 0, test_bit(WMM_ENABLED, &ar->flag), &ac);
+ ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
+ vif->fw_vif_idx, skb,
+ 0, test_bit(WMM_ENABLED, &vif->flags), &ac);
if (ret)
goto fail_tx;
}
@@ -354,8 +366,8 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
fail_tx:
dev_kfree_skb(skb);
- ar->net_stats.tx_dropped++;
- ar->net_stats.tx_aborted_errors++;
+ vif->net_stats.tx_dropped++;
+ vif->net_stats.tx_aborted_errors++;
return 0;
}
@@ -426,7 +438,9 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
struct htc_packet *packet)
{
struct ath6kl *ar = target->dev->ar;
+ struct ath6kl_vif *vif;
enum htc_endpoint_id endpoint = packet->endpoint;
+ enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
if (endpoint == ar->ctrl_ep) {
/*
@@ -439,19 +453,11 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
set_bit(WMI_CTRL_EP_FULL, &ar->flag);
spin_unlock_bh(&ar->lock);
ath6kl_err("wmi ctrl ep is full\n");
- return HTC_SEND_FULL_KEEP;
+ return action;
}
if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
- return HTC_SEND_FULL_KEEP;
-
- if (ar->nw_type == ADHOC_NETWORK)
- /*
- * In adhoc mode, we cannot differentiate traffic
- * priorities so there is no need to continue, however we
- * should stop the network.
- */
- goto stop_net_queues;
+ return action;
/*
* The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
@@ -464,24 +470,36 @@ enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
* Give preference to the highest priority stream by
* dropping the packets which overflowed.
*/
- return HTC_SEND_FULL_DROP;
+ action = HTC_SEND_FULL_DROP;
-stop_net_queues:
- spin_lock_bh(&ar->lock);
- set_bit(NETQ_STOPPED, &ar->flag);
- spin_unlock_bh(&ar->lock);
- netif_stop_queue(ar->net_dev);
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->nw_type == ADHOC_NETWORK ||
+ action != HTC_SEND_FULL_DROP) {
+ spin_unlock_bh(&ar->list_lock);
+
+ spin_lock_bh(&vif->if_lock);
+ set_bit(NETQ_STOPPED, &vif->flags);
+ spin_unlock_bh(&vif->if_lock);
+ netif_stop_queue(vif->ndev);
- return HTC_SEND_FULL_KEEP;
+ return action;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return action;
}
/* TODO this needs to be looked at */
-static void ath6kl_tx_clear_node_map(struct ath6kl *ar,
+static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
enum htc_endpoint_id eid, u32 map_no)
{
+ struct ath6kl *ar = vif->ar;
u32 i;
- if (ar->nw_type != ADHOC_NETWORK)
+ if (vif->nw_type != ADHOC_NETWORK)
return;
if (!ar->ibss_ps_enable)
@@ -523,7 +541,9 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
int status;
enum htc_endpoint_id eid;
bool wake_event = false;
- bool flushing = false;
+ bool flushing[ATH6KL_VIF_MAX] = {false};
+ u8 if_idx;
+ struct ath6kl_vif *vif;
skb_queue_head_init(&skb_queue);
@@ -549,8 +569,6 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
if (!skb || !skb->data)
goto fatal;
- packet->buf = skb->data;
-
__skb_queue_tail(&skb_queue, skb);
if (!status && (packet->act_len != skb->len))
@@ -569,15 +587,30 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
wake_event = true;
}
+ if (eid == ar->ctrl_ep) {
+ if_idx = wmi_cmd_hdr_get_if_idx(
+ (struct wmi_cmd_hdr *) packet->buf);
+ } else {
+ if_idx = wmi_data_hdr_get_if_idx(
+ (struct wmi_data_hdr *) packet->buf);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
+ ath6kl_free_cookie(ar, ath6kl_cookie);
+ continue;
+ }
+
if (status) {
if (status == -ECANCELED)
/* a packet was flushed */
- flushing = true;
+ flushing[if_idx] = true;
+
+ vif->net_stats.tx_errors++;
- ar->net_stats.tx_errors++;
+ if (status != -ENOSPC && status != -ECANCELED)
+ ath6kl_warn("tx complete error: %d\n", status);
- if (status != -ENOSPC)
- ath6kl_err("tx error, status: 0x%x\n", status);
ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
"%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
__func__, skb, packet->buf, packet->act_len,
@@ -588,27 +621,34 @@ void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
__func__, skb, packet->buf, packet->act_len,
eid, "OK");
- flushing = false;
- ar->net_stats.tx_packets++;
- ar->net_stats.tx_bytes += skb->len;
+ flushing[if_idx] = false;
+ vif->net_stats.tx_packets++;
+ vif->net_stats.tx_bytes += skb->len;
}
- ath6kl_tx_clear_node_map(ar, eid, map_no);
+ ath6kl_tx_clear_node_map(vif, eid, map_no);
ath6kl_free_cookie(ar, ath6kl_cookie);
- if (test_bit(NETQ_STOPPED, &ar->flag))
- clear_bit(NETQ_STOPPED, &ar->flag);
+ if (test_bit(NETQ_STOPPED, &vif->flags))
+ clear_bit(NETQ_STOPPED, &vif->flags);
}
spin_unlock_bh(&ar->lock);
__skb_queue_purge(&skb_queue);
- if (test_bit(CONNECTED, &ar->flag)) {
- if (!flushing)
- netif_wake_queue(ar->net_dev);
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (test_bit(CONNECTED, &vif->flags) &&
+ !flushing[vif->fw_vif_idx]) {
+ spin_unlock_bh(&ar->list_lock);
+ netif_wake_queue(vif->ndev);
+ spin_lock_bh(&ar->list_lock);
+ }
}
+ spin_unlock_bh(&ar->list_lock);
if (wake_event)
wake_up(&ar->event_wq);
@@ -1041,8 +1081,9 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
struct ath6kl_sta *conn = NULL;
struct sk_buff *skb1 = NULL;
struct ethhdr *datap = NULL;
+ struct ath6kl_vif *vif;
u16 seq_no, offset;
- u8 tid;
+ u8 tid, if_idx;
ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
"%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
@@ -1050,7 +1091,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
packet->act_len, status);
if (status || !(skb->data + HTC_HDR_LENGTH)) {
- ar->net_stats.rx_errors++;
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
+ skb_pull(skb, HTC_HDR_LENGTH);
+
+ if (ept == ar->ctrl_ep) {
+ if_idx =
+ wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
+ } else {
+ if_idx =
+ wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
+ }
+
+ vif = ath6kl_get_vif_by_index(ar, if_idx);
+ if (!vif) {
dev_kfree_skb(skb);
return;
}
@@ -1059,28 +1116,28 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* Take lock to protect buffer counts and adaptive power throughput
* state.
*/
- spin_lock_bh(&ar->lock);
+ spin_lock_bh(&vif->if_lock);
- ar->net_stats.rx_packets++;
- ar->net_stats.rx_bytes += packet->act_len;
+ vif->net_stats.rx_packets++;
+ vif->net_stats.rx_bytes += packet->act_len;
- spin_unlock_bh(&ar->lock);
+ spin_unlock_bh(&vif->if_lock);
- skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
- skb_pull(skb, HTC_HDR_LENGTH);
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
skb->data, skb->len);
- skb->dev = ar->net_dev;
+ skb->dev = vif->ndev;
if (!test_bit(WMI_ENABLED, &ar->flag)) {
if (EPPING_ALIGNMENT_PAD > 0)
skb_pull(skb, EPPING_ALIGNMENT_PAD);
- ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
return;
}
+ ath6kl_check_wow_status(ar);
+
if (ept == ar->ctrl_ep) {
ath6kl_wmi_control_rx(ar->wmi, skb);
return;
@@ -1096,18 +1153,18 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* that do not have LLC hdr. They are 16 bytes in size.
* Allow these frames in the AP mode.
*/
- if (ar->nw_type != AP_NETWORK &&
+ if (vif->nw_type != AP_NETWORK &&
((packet->act_len < min_hdr_len) ||
(packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
ath6kl_info("frame len is too short or too long\n");
- ar->net_stats.rx_errors++;
- ar->net_stats.rx_length_errors++;
+ vif->net_stats.rx_errors++;
+ vif->net_stats.rx_length_errors++;
dev_kfree_skb(skb);
return;
}
/* Get the Power save state of the STA */
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
meta_type = wmi_data_hdr_get_meta(dhdr);
ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
@@ -1129,7 +1186,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
}
datap = (struct ethhdr *) (skb->data + offset);
- conn = ath6kl_find_sta(ar, datap->h_source);
+ conn = ath6kl_find_sta(vif, datap->h_source);
if (!conn) {
dev_kfree_skb(skb);
@@ -1160,12 +1217,13 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
while ((skbuff = skb_dequeue(&conn->psq))
!= NULL) {
spin_unlock_bh(&conn->psq_lock);
- ath6kl_data_tx(skbuff, ar->net_dev);
+ ath6kl_data_tx(skbuff, vif->ndev);
spin_lock_bh(&conn->psq_lock);
}
spin_unlock_bh(&conn->psq_lock);
/* Clear the PVB for this STA */
- ath6kl_wmi_set_pvb_cmd(ar->wmi, conn->aid, 0);
+ ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
+ conn->aid, 0);
}
}
@@ -1215,12 +1273,12 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
return;
}
- if (!(ar->net_dev->flags & IFF_UP)) {
+ if (!(vif->ndev->flags & IFF_UP)) {
dev_kfree_skb(skb);
return;
}
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
datap = (struct ethhdr *) skb->data;
if (is_multicast_ether_addr(datap->h_dest))
/*
@@ -1235,8 +1293,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
* frame to it on the air else send the
* frame up the stack.
*/
- struct ath6kl_sta *conn = NULL;
- conn = ath6kl_find_sta(ar, datap->h_dest);
+ conn = ath6kl_find_sta(vif, datap->h_dest);
if (conn && ar->intra_bss) {
skb1 = skb;
@@ -1247,18 +1304,23 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
}
}
if (skb1)
- ath6kl_data_tx(skb1, ar->net_dev);
+ ath6kl_data_tx(skb1, vif->ndev);
+
+ if (skb == NULL) {
+ /* nothing to deliver up the stack */
+ return;
+ }
}
datap = (struct ethhdr *) skb->data;
if (is_unicast_ether_addr(datap->h_dest) &&
- aggr_process_recv_frm(ar->aggr_cntxt, tid, seq_no,
+ aggr_process_recv_frm(vif->aggr_cntxt, tid, seq_no,
is_amsdu, skb))
/* aggregation code will handle the skb */
return;
- ath6kl_deliver_frames_to_nw_stack(ar->net_dev, skb);
+ ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
static void aggr_timeout(unsigned long arg)
@@ -1336,9 +1398,10 @@ static void aggr_delete_tid_state(struct aggr_info *p_aggr, u8 tid)
memset(stats, 0, sizeof(struct rxtid_stats));
}
-void aggr_recv_addba_req_evt(struct ath6kl *ar, u8 tid, u16 seq_no, u8 win_sz)
+void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid, u16 seq_no,
+ u8 win_sz)
{
- struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid;
struct rxtid_stats *stats;
u16 hold_q_size;
@@ -1405,9 +1468,9 @@ struct aggr_info *aggr_init(struct net_device *dev)
return p_aggr;
}
-void aggr_recv_delba_req_evt(struct ath6kl *ar, u8 tid)
+void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid)
{
- struct aggr_info *p_aggr = ar->aggr_cntxt;
+ struct aggr_info *p_aggr = vif->aggr_cntxt;
struct rxtid *rxtid;
if (!p_aggr)
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.c b/drivers/net/wireless/ath/ath6kl/wmi.c
index a7de23c..f6f2aa2 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.c
+++ b/drivers/net/wireless/ath/ath6kl/wmi.c
@@ -21,7 +21,7 @@
#include "../regd.h"
#include "../regd_common.h"
-static int ath6kl_wmi_sync_point(struct wmi *wmi);
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx);
static const s32 wmi_rate_tbl[][2] = {
/* {W/O SGI, with SGI} */
@@ -81,6 +81,26 @@ enum htc_endpoint_id ath6kl_wmi_get_control_ep(struct wmi *wmi)
return wmi->ep_id;
}
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx)
+{
+ struct ath6kl_vif *vif, *found = NULL;
+
+ if (WARN_ON(if_idx > (ar->vif_max - 1)))
+ return NULL;
+
+ /* FIXME: Locking */
+ spin_lock_bh(&ar->list_lock);
+ list_for_each_entry(vif, &ar->vif_list, list) {
+ if (vif->fw_vif_idx == if_idx) {
+ found = vif;
+ break;
+ }
+ }
+ spin_unlock_bh(&ar->list_lock);
+
+ return found;
+}
+
/* Performs DIX to 802.3 encapsulation for transmit packets.
* Assumes the entire DIX header is contigous and that there is
* enough room in the buffer for a 802.3 mac header and LLC+SNAP headers.
@@ -162,12 +182,12 @@ static int ath6kl_wmi_meta_add(struct wmi *wmi, struct sk_buff *skb,
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
u8 msg_type, bool more_data,
enum wmi_data_hdr_data_type data_type,
- u8 meta_ver, void *tx_meta_info)
+ u8 meta_ver, void *tx_meta_info, u8 if_idx)
{
struct wmi_data_hdr *data_hdr;
int ret;
- if (WARN_ON(skb == NULL))
+ if (WARN_ON(skb == NULL || (if_idx > wmi->parent_dev->vif_max - 1)))
return -EINVAL;
if (tx_meta_info) {
@@ -189,7 +209,7 @@ int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
WMI_DATA_HDR_MORE_MASK << WMI_DATA_HDR_MORE_SHIFT;
data_hdr->info2 = cpu_to_le16(meta_ver << WMI_DATA_HDR_META_SHIFT);
- data_hdr->info3 = 0;
+ data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
return 0;
}
@@ -216,7 +236,8 @@ static u8 ath6kl_wmi_determine_user_priority(u8 *pkt, u32 layer2_pri)
return ip_pri;
}
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb,
u32 layer2_priority, bool wmm_enabled,
u8 *ac)
{
@@ -262,7 +283,12 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
usr_pri = layer2_priority & 0x7;
}
- /* workaround for WMM S5 */
+ /*
+ * workaround for WMM S5
+ *
+ * FIXME: wmi->traffic_class is always 100 so this test doesn't
+ * make sense
+ */
if ((wmi->traffic_class == WMM_AC_VI) &&
((usr_pri == 5) || (usr_pri == 4)))
usr_pri = 1;
@@ -284,7 +310,7 @@ int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
cpu_to_le32(WMI_IMPLICIT_PSTREAM_INACTIVITY_INT);
/* Implicit streams are created with TSID 0xFF */
cmd.tsid = WMI_IMPLICIT_PSTREAM;
- ath6kl_wmi_create_pstream_cmd(wmi, &cmd);
+ ath6kl_wmi_create_pstream_cmd(wmi, if_idx, &cmd);
}
*ac = traffic_class;
@@ -410,13 +436,14 @@ static int ath6kl_wmi_tx_complete_event_rx(u8 *datap, int len)
}
static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
- int len)
+ int len, struct ath6kl_vif *vif)
{
struct wmi_remain_on_chnl_event *ev;
u32 freq;
u32 dur;
struct ieee80211_channel *chan;
struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
if (len < sizeof(*ev))
return -EINVAL;
@@ -426,26 +453,29 @@ static int ath6kl_wmi_remain_on_chnl_event_rx(struct wmi *wmi, u8 *datap,
dur = le32_to_cpu(ev->duration);
ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: freq=%u dur=%u\n",
freq, dur);
- chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
ath6kl_dbg(ATH6KL_DBG_WMI, "remain_on_chnl: Unknown channel "
"(freq=%u)\n", freq);
return -EINVAL;
}
- cfg80211_ready_on_channel(ar->net_dev, 1, chan, NL80211_CHAN_NO_HT,
+ id = vif->last_roc_id;
+ cfg80211_ready_on_channel(vif->ndev, id, chan, NL80211_CHAN_NO_HT,
dur, GFP_ATOMIC);
return 0;
}
static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
- u8 *datap, int len)
+ u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_cancel_remain_on_chnl_event *ev;
u32 freq;
u32 dur;
struct ieee80211_channel *chan;
struct ath6kl *ar = wmi->parent_dev;
+ u32 id;
if (len < sizeof(*ev))
return -EINVAL;
@@ -455,23 +485,29 @@ static int ath6kl_wmi_cancel_remain_on_chnl_event_rx(struct wmi *wmi,
dur = le32_to_cpu(ev->duration);
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: freq=%u dur=%u "
"status=%u\n", freq, dur, ev->status);
- chan = ieee80211_get_channel(ar->wdev->wiphy, freq);
+ chan = ieee80211_get_channel(ar->wiphy, freq);
if (!chan) {
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl: Unknown "
"channel (freq=%u)\n", freq);
return -EINVAL;
}
- cfg80211_remain_on_channel_expired(ar->net_dev, 1, chan,
+ if (vif->last_cancel_roc_id &&
+ vif->last_cancel_roc_id + 1 == vif->last_roc_id)
+ id = vif->last_cancel_roc_id; /* event for cancel command */
+ else
+ id = vif->last_roc_id; /* timeout on uncanceled r-o-c */
+ vif->last_cancel_roc_id = 0;
+ cfg80211_remain_on_channel_expired(vif->ndev, id, chan,
NL80211_CHAN_NO_HT, GFP_ATOMIC);
return 0;
}
-static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_tx_status_event *ev;
u32 id;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -481,7 +517,7 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
ath6kl_dbg(ATH6KL_DBG_WMI, "tx_status: id=%x ack_status=%u\n",
id, ev->ack_status);
if (wmi->last_mgmt_tx_frame) {
- cfg80211_mgmt_tx_status(ar->net_dev, id,
+ cfg80211_mgmt_tx_status(vif->ndev, id,
wmi->last_mgmt_tx_frame,
wmi->last_mgmt_tx_frame_len,
!!ev->ack_status, GFP_ATOMIC);
@@ -493,12 +529,12 @@ static int ath6kl_wmi_tx_status_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_p2p_rx_probe_req_event *ev;
u32 freq;
u16 dlen;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -513,10 +549,10 @@ static int ath6kl_wmi_rx_probe_req_event_rx(struct wmi *wmi, u8 *datap, int len)
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_probe_req: len=%u freq=%u "
"probe_req_report=%d\n",
- dlen, freq, ar->probe_req_report);
+ dlen, freq, vif->probe_req_report);
- if (ar->probe_req_report || ar->nw_type == AP_NETWORK)
- cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+ if (vif->probe_req_report || vif->nw_type == AP_NETWORK)
+ cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -536,12 +572,12 @@ static int ath6kl_wmi_p2p_capabilities_event_rx(u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_rx_action_event *ev;
u32 freq;
u16 dlen;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(*ev))
return -EINVAL;
@@ -555,7 +591,7 @@ static int ath6kl_wmi_rx_action_event_rx(struct wmi *wmi, u8 *datap, int len)
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_WMI, "rx_action: len=%u freq=%u\n", dlen, freq);
- cfg80211_rx_mgmt(ar->net_dev, freq, ev->data, dlen, GFP_ATOMIC);
+ cfg80211_rx_mgmt(vif->ndev, freq, ev->data, dlen, GFP_ATOMIC);
return 0;
}
@@ -620,7 +656,8 @@ static inline struct sk_buff *ath6kl_wmi_get_new_buf(u32 size)
}
/* Send a "simple" wmi command -- one with no arguments */
-static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
+static int ath6kl_wmi_simple_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_cmd_id cmd_id)
{
struct sk_buff *skb;
int ret;
@@ -629,7 +666,7 @@ static int ath6kl_wmi_simple_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id)
if (!skb)
return -ENOMEM;
- ret = ath6kl_wmi_cmd_send(wmi, skb, cmd_id, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, cmd_id, NO_SYNC_WMIFLAG);
return ret;
}
@@ -641,7 +678,6 @@ static int ath6kl_wmi_ready_event_rx(struct wmi *wmi, u8 *datap, int len)
if (len < sizeof(struct wmi_ready_event_2))
return -EINVAL;
- wmi->ready = true;
ath6kl_ready_event(wmi->parent_dev, ev->mac_addr,
le32_to_cpu(ev->sw_version),
le32_to_cpu(ev->abi_version));
@@ -673,32 +709,73 @@ int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi)
cmd->info.params.roam_rssi_floor = DEF_LRSSI_ROAM_FLOOR;
cmd->roam_ctrl = WMI_SET_LRSSI_SCAN_PARAMS;
- ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_ROAM_CTRL_CMDID, NO_SYNC_WMIFLAG);
+ ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
return 0;
}
-static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ memcpy(cmd->info.bssid, bssid, ETH_ALEN);
+ cmd->roam_ctrl = WMI_FORCE_ROAM;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "force roam to %pM\n", bssid);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode)
+{
+ struct sk_buff *skb;
+ struct roam_ctrl_cmd *cmd;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct roam_ctrl_cmd *) skb->data;
+ memset(cmd, 0, sizeof(*cmd));
+
+ cmd->info.roam_mode = mode;
+ cmd->roam_ctrl = WMI_SET_ROAM_MODE;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "set roam mode %d\n", mode);
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_ROAM_CTRL_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_connect_event *ev;
u8 *pie, *peie;
- struct ath6kl *ar = wmi->parent_dev;
if (len < sizeof(struct wmi_connect_event))
return -EINVAL;
ev = (struct wmi_connect_event *) datap;
- if (ar->nw_type == AP_NETWORK) {
+ if (vif->nw_type == AP_NETWORK) {
/* AP mode start/STA connected event */
- struct net_device *dev = ar->net_dev;
+ struct net_device *dev = vif->ndev;
if (memcmp(dev->dev_addr, ev->u.ap_bss.bssid, ETH_ALEN) == 0) {
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: freq %d bssid %pM "
"(AP started)\n",
__func__, le16_to_cpu(ev->u.ap_bss.ch),
ev->u.ap_bss.bssid);
ath6kl_connect_ap_mode_bss(
- ar, le16_to_cpu(ev->u.ap_bss.ch));
+ vif, le16_to_cpu(ev->u.ap_bss.ch));
} else {
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: aid %u mac_addr %pM "
"auth=%u keymgmt=%u cipher=%u apsd_info=%u "
@@ -710,7 +787,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.apsd_info);
ath6kl_connect_ap_mode_sta(
- ar, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
+ vif, ev->u.ap_sta.aid, ev->u.ap_sta.mac_addr,
ev->u.ap_sta.keymgmt,
le16_to_cpu(ev->u.ap_sta.cipher),
ev->u.ap_sta.auth, ev->assoc_req_len,
@@ -755,7 +832,7 @@ static int ath6kl_wmi_connect_event_rx(struct wmi *wmi, u8 *datap, int len)
pie += pie[1] + 2;
}
- ath6kl_connect_event(wmi->parent_dev, le16_to_cpu(ev->u.sta.ch),
+ ath6kl_connect_event(vif, le16_to_cpu(ev->u.sta.ch),
ev->u.sta.bssid,
le16_to_cpu(ev->u.sta.listen_intvl),
le16_to_cpu(ev->u.sta.beacon_intvl),
@@ -834,14 +911,15 @@ static void ath6kl_wmi_regdomain_event(struct wmi *wmi, u8 *datap, int len)
alpha2[0] = country->isoName[0];
alpha2[1] = country->isoName[1];
- regulatory_hint(wmi->parent_dev->wdev->wiphy, alpha2);
+ regulatory_hint(wmi->parent_dev->wiphy, alpha2);
ath6kl_dbg(ATH6KL_DBG_WMI, "Country alpha2 being used: %c%c\n",
alpha2[0], alpha2[1]);
}
}
-static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_disconnect_event *ev;
wmi->traffic_class = 100;
@@ -857,10 +935,8 @@ static int ath6kl_wmi_disconnect_event_rx(struct wmi *wmi, u8 *datap, int len)
ev->disconn_reason, ev->assoc_resp_len);
wmi->is_wmm_enabled = false;
- wmi->pair_crypto_type = NONE_CRYPT;
- wmi->grp_crypto_type = NONE_CRYPT;
- ath6kl_disconnect_event(wmi->parent_dev, ev->disconn_reason,
+ ath6kl_disconnect_event(vif, ev->disconn_reason,
ev->bssid, ev->assoc_resp_len, ev->assoc_info,
le16_to_cpu(ev->proto_reason_status));
@@ -886,7 +962,8 @@ static int ath6kl_wmi_peer_node_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_tkip_micerr_event *ev;
@@ -895,12 +972,20 @@ static int ath6kl_wmi_tkip_micerr_event_rx(struct wmi *wmi, u8 *datap, int len)
ev = (struct wmi_tkip_micerr_event *) datap;
- ath6kl_tkip_micerr_event(wmi->parent_dev, ev->key_id, ev->is_mcast);
+ ath6kl_tkip_micerr_event(vif, ev->key_id, ev->is_mcast);
return 0;
}
-static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
+void ath6kl_wmi_sscan_timer(unsigned long ptr)
+{
+ struct ath6kl_vif *vif = (struct ath6kl_vif *) ptr;
+
+ cfg80211_sched_scan_results(vif->ar->wiphy);
+}
+
+static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_bss_info_hdr2 *bih;
u8 *buf;
@@ -927,26 +1012,27 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0; /* Only update BSS table for now */
if (bih->frame_type == BEACON_FTYPE &&
- test_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag)) {
- clear_bit(CLEAR_BSSFILTER_ON_BEACON, &ar->flag);
- ath6kl_wmi_bssfilter_cmd(ar->wmi, NONE_BSS_FILTER, 0);
+ test_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags)) {
+ clear_bit(CLEAR_BSSFILTER_ON_BEACON, &vif->flags);
+ ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
+ NONE_BSS_FILTER, 0);
}
- channel = ieee80211_get_channel(ar->wdev->wiphy, le16_to_cpu(bih->ch));
+ channel = ieee80211_get_channel(ar->wiphy, le16_to_cpu(bih->ch));
if (channel == NULL)
return -EINVAL;
if (len < 8 + 2 + 2)
return -EINVAL;
- if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &ar->flag) &&
- memcmp(bih->bssid, ar->bssid, ETH_ALEN) == 0) {
+ if (bih->frame_type == BEACON_FTYPE && test_bit(CONNECTED, &vif->flags)
+ && memcmp(bih->bssid, vif->bssid, ETH_ALEN) == 0) {
const u8 *tim;
tim = cfg80211_find_ie(WLAN_EID_TIM, buf + 8 + 2 + 2,
len - 8 - 2 - 2);
if (tim && tim[1] >= 2) {
- ar->assoc_bss_dtim_period = tim[3];
- set_bit(DTIM_PERIOD_AVAIL, &ar->flag);
+ vif->assoc_bss_dtim_period = tim[3];
+ set_bit(DTIM_PERIOD_AVAIL, &vif->flags);
}
}
@@ -966,7 +1052,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
IEEE80211_STYPE_BEACON);
memset(mgmt->da, 0xff, ETH_ALEN);
} else {
- struct net_device *dev = ar->net_dev;
+ struct net_device *dev = vif->ndev;
mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
@@ -979,7 +1065,7 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
memcpy(&mgmt->u.beacon, buf, len);
- bss = cfg80211_inform_bss_frame(ar->wdev->wiphy, channel, mgmt,
+ bss = cfg80211_inform_bss_frame(ar->wiphy, channel, mgmt,
24 + len, (bih->snr - 95) * 100,
GFP_ATOMIC);
kfree(mgmt);
@@ -987,6 +1073,21 @@ static int ath6kl_wmi_bssinfo_event_rx(struct wmi *wmi, u8 *datap, int len)
return -ENOMEM;
cfg80211_put_bss(bss);
+ /*
+ * Firmware doesn't return any event when scheduled scan has
+ * finished, so we need to use a timer to find out when there are
+ * no more results.
+ *
+ * The timer is started from the first bss info received, otherwise
+ * the timer would not ever fire if the scan interval is short
+ * enough.
+ */
+ if (ar->state == ATH6KL_STATE_SCHED_SCAN &&
+ !timer_pending(&vif->sched_scan_timer)) {
+ mod_timer(&vif->sched_scan_timer, jiffies +
+ msecs_to_jiffies(ATH6KL_SCHED_SCAN_RESULT_DELAY));
+ }
+
return 0;
}
@@ -1094,20 +1195,21 @@ static int ath6kl_wmi_keepalive_reply_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_scan_complete_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_scan_complete_event *ev;
ev = (struct wmi_scan_complete_event *) datap;
- ath6kl_scan_complete_evt(wmi->parent_dev, a_sle32_to_cpu(ev->status));
+ ath6kl_scan_complete_evt(vif, a_sle32_to_cpu(ev->status));
wmi->is_probe_ssid = false;
return 0;
}
static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
- int len)
+ int len, struct ath6kl_vif *vif)
{
struct wmi_neighbor_report_event *ev;
u8 i;
@@ -1125,7 +1227,7 @@ static int ath6kl_wmi_neighbor_report_event_rx(struct wmi *wmi, u8 *datap,
ath6kl_dbg(ATH6KL_DBG_WMI, "neighbor %d/%d - %pM 0x%x\n",
i + 1, ev->num_neighbors, ev->neighbor[i].bssid,
ev->neighbor[i].bss_flags);
- cfg80211_pmksa_candidate_notify(wmi->parent_dev->net_dev, i,
+ cfg80211_pmksa_candidate_notify(vif->ndev, i,
ev->neighbor[i].bssid,
!!(ev->neighbor[i].bss_flags &
WMI_PREAUTH_CAPABLE_BSS),
@@ -1166,9 +1268,10 @@ static int ath6kl_wmi_error_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_stats_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
- ath6kl_tgt_stats_event(wmi->parent_dev, datap, len);
+ ath6kl_tgt_stats_event(vif, datap, len);
return 0;
}
@@ -1222,7 +1325,7 @@ static int ath6kl_wmi_send_rssi_threshold_params(struct wmi *wmi,
cmd = (struct wmi_rssi_threshold_params_cmd *) skb->data;
memcpy(cmd, rssi_cmd, sizeof(struct wmi_rssi_threshold_params_cmd));
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_RSSI_THRESHOLD_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -1322,7 +1425,8 @@ static int ath6kl_wmi_rssi_threshold_event_rx(struct wmi *wmi, u8 *datap,
return 0;
}
-static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_cac_event *reply;
struct ieee80211_tspec_ie *ts;
@@ -1343,7 +1447,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
tsid = (tsinfo >> IEEE80211_WMM_IE_TSPEC_TID_SHIFT) &
IEEE80211_WMM_IE_TSPEC_TID_MASK;
- ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, tsid);
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, tsid);
} else if (reply->cac_indication == CAC_INDICATION_NO_RESP) {
/*
* Following assumes that there is only one outstanding
@@ -1358,7 +1463,8 @@ static int ath6kl_wmi_cac_event_rx(struct wmi *wmi, u8 *datap, int len)
break;
}
if (index < (sizeof(active_tsids) * 8))
- ath6kl_wmi_delete_pstream_cmd(wmi, reply->ac, index);
+ ath6kl_wmi_delete_pstream_cmd(wmi, vif->fw_vif_idx,
+ reply->ac, index);
}
/*
@@ -1403,7 +1509,7 @@ static int ath6kl_wmi_send_snr_threshold_params(struct wmi *wmi,
cmd = (struct wmi_snr_threshold_params_cmd *) skb->data;
memcpy(cmd, snr_cmd, sizeof(struct wmi_snr_threshold_params_cmd));
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SNR_THRESHOLD_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -1528,14 +1634,15 @@ static int ath6kl_wmi_aplist_event_rx(struct wmi *wmi, u8 *datap, int len)
return 0;
}
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag)
{
struct wmi_cmd_hdr *cmd_hdr;
enum htc_endpoint_id ep_id = wmi->ep_id;
int ret;
+ u16 info1;
- if (WARN_ON(skb == NULL))
+ if (WARN_ON(skb == NULL || (if_idx > (wmi->parent_dev->vif_max - 1))))
return -EINVAL;
ath6kl_dbg(ATH6KL_DBG_WMI, "wmi tx id %d len %d flag %d\n",
@@ -1554,19 +1661,20 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
* Make sure all data currently queued is transmitted before
* the cmd execution. Establish a new sync point.
*/
- ath6kl_wmi_sync_point(wmi);
+ ath6kl_wmi_sync_point(wmi, if_idx);
}
skb_push(skb, sizeof(struct wmi_cmd_hdr));
cmd_hdr = (struct wmi_cmd_hdr *) skb->data;
cmd_hdr->cmd_id = cpu_to_le16(cmd_id);
- cmd_hdr->info1 = 0; /* added for virtual interface */
+ info1 = if_idx & WMI_CMD_HDR_IF_ID_MASK;
+ cmd_hdr->info1 = cpu_to_le16(info1);
/* Only for OPT_TX_CMD, use BE endpoint. */
if (cmd_id == WMI_OPT_TX_FRAME_CMDID) {
ret = ath6kl_wmi_data_hdr_add(wmi, skb, OPT_MSGTYPE,
- false, false, 0, NULL);
+ false, false, 0, NULL, if_idx);
if (ret) {
dev_kfree_skb(skb);
return ret;
@@ -1582,20 +1690,22 @@ int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
* Make sure all new data queued waits for the command to
* execute. Establish a new sync point.
*/
- ath6kl_wmi_sync_point(wmi);
+ ath6kl_wmi_sync_point(wmi, if_idx);
}
return 0;
}
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
enum crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
enum crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
- u8 *bssid, u16 channel, u32 ctrl_flags)
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype)
{
struct sk_buff *skb;
struct wmi_connect_cmd *cc;
@@ -1635,19 +1745,19 @@ int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
cc->grp_crypto_len = group_crypto_len;
cc->ch = cpu_to_le16(channel);
cc->ctrl_flags = cpu_to_le32(ctrl_flags);
+ cc->nw_subtype = nw_subtype;
if (bssid != NULL)
memcpy(cc->bssid, bssid, ETH_ALEN);
- wmi->pair_crypto_type = pairwise_crypto;
- wmi->grp_crypto_type = group_crypto;
-
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CONNECT_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CONNECT_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel)
{
struct sk_buff *skb;
struct wmi_reconnect_cmd *cc;
@@ -1668,13 +1778,13 @@ int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel)
if (bssid != NULL)
memcpy(cc->bssid, bssid, ETH_ALEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RECONNECT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RECONNECT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx)
{
int ret;
@@ -1683,12 +1793,79 @@ int ath6kl_wmi_disconnect_cmd(struct wmi *wmi)
wmi->traffic_class = 100;
/* Disconnect command does not need to do a SYNC before. */
- ret = ath6kl_wmi_simple_cmd(wmi, WMI_DISCONNECT_CMDID);
+ ret = ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_DISCONNECT_CMDID);
+
+ return ret;
+}
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck, u32 *rates)
+{
+ struct sk_buff *skb;
+ struct wmi_begin_scan_cmd *sc;
+ s8 size;
+ int i, band, ret;
+ struct ath6kl *ar = wmi->parent_dev;
+ int num_rates;
+
+ size = sizeof(struct wmi_begin_scan_cmd);
+
+ if ((scan_type != WMI_LONG_SCAN) && (scan_type != WMI_SHORT_SCAN))
+ return -EINVAL;
+
+ if (num_chan > WMI_MAX_CHANNELS)
+ return -EINVAL;
+
+ if (num_chan)
+ size += sizeof(u16) * (num_chan - 1);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ sc = (struct wmi_begin_scan_cmd *) skb->data;
+ sc->scan_type = scan_type;
+ sc->force_fg_scan = cpu_to_le32(force_fgscan);
+ sc->is_legacy = cpu_to_le32(is_legacy);
+ sc->home_dwell_time = cpu_to_le32(home_dwell_time);
+ sc->force_scan_intvl = cpu_to_le32(force_scan_interval);
+ sc->no_cck = cpu_to_le32(no_cck);
+ sc->num_ch = num_chan;
+
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ struct ieee80211_supported_band *sband =
+ ar->wiphy->bands[band];
+ u32 ratemask = rates[band];
+ u8 *supp_rates = sc->supp_rates[band].rates;
+ num_rates = 0;
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if ((BIT(i) & ratemask) == 0)
+ continue; /* skip rate */
+ supp_rates[num_rates++] =
+ (u8) (sband->bitrates[i].bitrate / 5);
+ }
+ sc->supp_rates[band].nrates = num_rates;
+ }
+
+ for (i = 0; i < num_chan; i++)
+ sc->ch_list[i] = cpu_to_le16(ch_list[i]);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_BEGIN_SCAN_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+/* ath6kl_wmi_start_scan_cmd is to be deprecated. Use
+ * ath6kl_wmi_begin_scan_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list)
@@ -1724,13 +1901,14 @@ int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
for (i = 0; i < num_chan; i++)
sc->ch_list[i] = cpu_to_le16(ch_list[i]);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_START_SCAN_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_START_SCAN_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx,
+ u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec,
u16 minact_chdw_msec, u16 maxact_chdw_msec,
u16 pas_chdw_msec, u8 short_scan_ratio,
@@ -1757,12 +1935,12 @@ int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
sc->max_dfsch_act_time = cpu_to_le32(max_dfsch_act_time);
sc->maxact_scan_per_ssid = cpu_to_le16(maxact_scan_per_ssid);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_SCAN_PARAMS_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_SCAN_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter, u32 ie_mask)
{
struct sk_buff *skb;
struct wmi_bss_filter_cmd *cmd;
@@ -1779,12 +1957,12 @@ int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask)
cmd->bss_filter = filter;
cmd->ie_mask = cpu_to_le32(ie_mask);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_BSS_FILTER_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_BSS_FILTER_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
u8 ssid_len, u8 *ssid)
{
struct sk_buff *skb;
@@ -1816,12 +1994,13 @@ int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
cmd->ssid_len = ssid_len;
memcpy(cmd->ssid, ssid, ssid_len);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PROBED_SSID_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PROBED_SSID_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
u16 listen_beacons)
{
struct sk_buff *skb;
@@ -1836,12 +2015,12 @@ int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
cmd->listen_intvl = cpu_to_le16(listen_interval);
cmd->num_beacons = cpu_to_le16(listen_beacons);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LISTEN_INT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LISTEN_INT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode)
{
struct sk_buff *skb;
struct wmi_power_mode_cmd *cmd;
@@ -1855,12 +2034,12 @@ int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode)
cmd->pwr_mode = pwr_mode;
wmi->pwr_mode = pwr_mode;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_MODE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_MODE_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
u16 tx_wakeup_policy, u16 num_tx_to_wakeup,
u16 ps_fail_event_policy)
@@ -1881,12 +2060,12 @@ int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
pm->num_tx_to_wakeup = cpu_to_le16(num_tx_to_wakeup);
pm->ps_fail_event_policy = cpu_to_le16(ps_fail_event_policy);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_POWER_PARAMS_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_POWER_PARAMS_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout)
{
struct sk_buff *skb;
struct wmi_disc_timeout_cmd *cmd;
@@ -1899,15 +2078,20 @@ int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout)
cmd = (struct wmi_disc_timeout_cmd *) skb->data;
cmd->discon_timeout = timeout;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_DISC_TIMEOUT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_DISC_TIMEOUT_CMDID,
NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_disconnect_timeout(wmi->parent_dev, timeout);
+
return ret;
}
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
enum crypto_type key_type,
u8 key_usage, u8 key_len,
- u8 *key_rsc, u8 *key_material,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag)
{
@@ -1920,7 +2104,7 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
key_index, key_type, key_usage, key_len, key_op_ctrl);
if ((key_index > WMI_MAX_KEY_INDEX) || (key_len > WMI_MAX_KEY_LEN) ||
- (key_material == NULL))
+ (key_material == NULL) || key_rsc_len > 8)
return -EINVAL;
if ((WEP_CRYPT != key_type) && (NULL == key_rsc))
@@ -1938,20 +2122,20 @@ int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
memcpy(cmd->key, key_material, key_len);
if (key_rsc != NULL)
- memcpy(cmd->key_rsc, key_rsc, sizeof(cmd->key_rsc));
+ memcpy(cmd->key_rsc, key_rsc, key_rsc_len);
cmd->key_op_ctrl = key_op_ctrl;
if (mac_addr)
memcpy(cmd->key_mac_addr, mac_addr, ETH_ALEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_CIPHER_KEY_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_CIPHER_KEY_CMDID,
sync_flag);
return ret;
}
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk)
{
struct sk_buff *skb;
struct wmi_add_krk_cmd *cmd;
@@ -1964,12 +2148,13 @@ int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk)
cmd = (struct wmi_add_krk_cmd *) skb->data;
memcpy(cmd->krk, krk, WMI_KRK_LEN);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_ADD_KRK_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_KRK_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index)
{
struct sk_buff *skb;
struct wmi_delete_cipher_key_cmd *cmd;
@@ -1985,13 +2170,13 @@ int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index)
cmd = (struct wmi_delete_cipher_key_cmd *) skb->data;
cmd->key_index = key_index;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_CIPHER_KEY_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_CIPHER_KEY_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set)
{
struct sk_buff *skb;
@@ -2018,14 +2203,14 @@ int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
cmd->enable = PMKID_DISABLE;
}
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_PMKID_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_PMKID_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
- enum htc_endpoint_id ep_id)
+ enum htc_endpoint_id ep_id, u8 if_idx)
{
struct wmi_data_hdr *data_hdr;
int ret;
@@ -2037,14 +2222,14 @@ static int ath6kl_wmi_data_sync_send(struct wmi *wmi, struct sk_buff *skb,
data_hdr = (struct wmi_data_hdr *) skb->data;
data_hdr->info = SYNC_MSGTYPE << WMI_DATA_HDR_MSG_TYPE_SHIFT;
- data_hdr->info3 = 0;
+ data_hdr->info3 = cpu_to_le16(if_idx & WMI_DATA_HDR_IF_IDX_MASK);
ret = ath6kl_control_tx(wmi->parent_dev, skb, ep_id);
return ret;
}
-static int ath6kl_wmi_sync_point(struct wmi *wmi)
+static int ath6kl_wmi_sync_point(struct wmi *wmi, u8 if_idx)
{
struct sk_buff *skb;
struct wmi_sync_cmd *cmd;
@@ -2100,7 +2285,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
* Send sync cmd followed by sync data messages on all
* endpoints being used
*/
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SYNCHRONIZE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SYNCHRONIZE_CMDID,
NO_SYNC_WMIFLAG);
if (ret)
@@ -2119,7 +2304,7 @@ static int ath6kl_wmi_sync_point(struct wmi *wmi)
traffic_class);
ret =
ath6kl_wmi_data_sync_send(wmi, data_sync_bufs[index].skb,
- ep_id);
+ ep_id, if_idx);
if (ret)
break;
@@ -2142,7 +2327,7 @@ free_skb:
return ret;
}
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
struct wmi_create_pstream_cmd *params)
{
struct sk_buff *skb;
@@ -2231,12 +2416,13 @@ int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
ath6kl_indicate_tx_activity(wmi->parent_dev,
params->traffic_class, true);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_CREATE_PSTREAM_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_CREATE_PSTREAM_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid)
{
struct sk_buff *skb;
struct wmi_delete_pstream_cmd *cmd;
@@ -2272,7 +2458,7 @@ int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid)
"sending delete_pstream_cmd: traffic class: %d tsid=%d\n",
traffic_class, tsid);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_DELETE_PSTREAM_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DELETE_PSTREAM_CMDID,
SYNC_BEFORE_WMIFLAG);
spin_lock_bh(&wmi->lock);
@@ -2311,17 +2497,173 @@ int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd)
cmd = (struct wmi_set_ip_cmd *) skb->data;
memcpy(cmd, ip_cmd, sizeof(struct wmi_set_ip_cmd));
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_IP_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_IP_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-static int ath6kl_wmi_get_wow_list_event_rx(struct wmi *wmi, u8 * datap,
- int len)
+static void ath6kl_wmi_relinquish_implicit_pstream_credits(struct wmi *wmi)
{
- if (len < sizeof(struct wmi_get_wow_list_reply))
+ u16 active_tsids;
+ u8 stream_exist;
+ int i;
+
+ /*
+ * Relinquish credits from all implicitly created pstreams
+ * since when we go to sleep. If user created explicit
+ * thinstreams exists with in a fatpipe leave them intact
+ * for the user to delete.
+ */
+ spin_lock_bh(&wmi->lock);
+ stream_exist = wmi->fat_pipe_exist;
+ spin_unlock_bh(&wmi->lock);
+
+ for (i = 0; i < WMM_NUM_AC; i++) {
+ if (stream_exist & (1 << i)) {
+
+ /*
+ * FIXME: Is this lock & unlock inside
+ * for loop correct? may need rework.
+ */
+ spin_lock_bh(&wmi->lock);
+ active_tsids = wmi->stream_exist_for_ac[i];
+ spin_unlock_bh(&wmi->lock);
+
+ /*
+ * If there are no user created thin streams
+ * delete the fatpipe
+ */
+ if (!active_tsids) {
+ stream_exist &= ~(1 << i);
+ /*
+ * Indicate inactivity to driver layer for
+ * this fatpipe (pstream)
+ */
+ ath6kl_indicate_tx_activity(wmi->parent_dev,
+ i, false);
+ }
+ }
+ }
+
+ /* FIXME: Can we do this assignment without locking ? */
+ spin_lock_bh(&wmi->lock);
+ wmi->fat_pipe_exist = stream_exist;
+ spin_unlock_bh(&wmi->lock);
+}
+
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode)
+{
+ struct sk_buff *skb;
+ struct wmi_set_host_sleep_mode_cmd *cmd;
+ int ret;
+
+ if ((host_mode != ATH6KL_HOST_MODE_ASLEEP) &&
+ (host_mode != ATH6KL_HOST_MODE_AWAKE)) {
+ ath6kl_err("invalid host sleep mode: %d\n", host_mode);
return -EINVAL;
+ }
- return 0;
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_host_sleep_mode_cmd *) skb->data;
+
+ if (host_mode == ATH6KL_HOST_MODE_ASLEEP) {
+ ath6kl_wmi_relinquish_implicit_pstream_credits(wmi);
+ cmd->asleep = cpu_to_le32(1);
+ } else
+ cmd->awake = cpu_to_le32(1);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SET_HOST_SLEEP_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay)
+{
+ struct sk_buff *skb;
+ struct wmi_set_wow_mode_cmd *cmd;
+ int ret;
+
+ if ((wow_mode != ATH6KL_WOW_MODE_ENABLE) &&
+ wow_mode != ATH6KL_WOW_MODE_DISABLE) {
+ ath6kl_err("invalid wow mode: %d\n", wow_mode);
+ return -EINVAL;
+ }
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_set_wow_mode_cmd *) skb->data;
+ cmd->enable_wow = cpu_to_le32(wow_mode);
+ cmd->filter = cpu_to_le32(filter);
+ cmd->host_req_delay = cpu_to_le16(host_req_delay);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WOW_MODE_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
+}
+
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, u8 *filter, u8 *mask)
+{
+ struct sk_buff *skb;
+ struct wmi_add_wow_pattern_cmd *cmd;
+ u16 size;
+ u8 *filter_mask;
+ int ret;
+
+ /*
+ * Allocate additional memory in the buffer to hold
+ * filter and mask value, which is twice of filter_size.
+ */
+ size = sizeof(*cmd) + (2 * filter_size);
+
+ skb = ath6kl_wmi_get_new_buf(size);
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_add_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = list_id;
+ cmd->filter_size = filter_size;
+ cmd->filter_offset = filter_offset;
+
+ memcpy(cmd->filter, filter, filter_size);
+
+ filter_mask = (u8 *) (cmd->filter + filter_size);
+ memcpy(filter_mask, mask, filter_size);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_ADD_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+
+ return ret;
+}
+
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id)
+{
+ struct sk_buff *skb;
+ struct wmi_del_wow_pattern_cmd *cmd;
+ int ret;
+
+ skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ cmd = (struct wmi_del_wow_pattern_cmd *) skb->data;
+ cmd->filter_list_id = cpu_to_le16(list_id);
+ cmd->filter_id = cpu_to_le16(filter_id);
+
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_DEL_WOW_PATTERN_CMDID,
+ NO_SYNC_WMIFLAG);
+ return ret;
}
static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
@@ -2336,7 +2678,7 @@ static int ath6kl_wmi_cmd_send_xtnd(struct wmi *wmi, struct sk_buff *skb,
cmd_hdr = (struct wmix_cmd_hdr *) skb->data;
cmd_hdr->cmd_id = cpu_to_le32(cmd_id);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_EXTENSION_CMDID, sync_flag);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_EXTENSION_CMDID, sync_flag);
return ret;
}
@@ -2379,12 +2721,12 @@ int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config)
return ret;
}
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx)
{
- return ath6kl_wmi_simple_cmd(wmi, WMI_GET_STATISTICS_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_STATISTICS_CMDID);
}
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM)
{
struct sk_buff *skb;
struct wmi_set_tx_pwr_cmd *cmd;
@@ -2397,18 +2739,24 @@ int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM)
cmd = (struct wmi_set_tx_pwr_cmd *) skb->data;
cmd->dbM = dbM;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_TX_PWR_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_TX_PWR_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi)
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx)
+{
+ return ath6kl_wmi_simple_cmd(wmi, if_idx, WMI_GET_TX_PWR_CMDID);
+}
+
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi)
{
- return ath6kl_wmi_simple_cmd(wmi, WMI_GET_TX_PWR_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, 0, WMI_GET_ROAM_TBL_CMDID);
}
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
+ u8 preamble_policy)
{
struct sk_buff *skb;
struct wmi_set_lpreamble_cmd *cmd;
@@ -2422,7 +2770,7 @@ int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status, u8 preamble_policy)
cmd->status = status;
cmd->preamble_policy = preamble_policy;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_LPREAMBLE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_LPREAMBLE_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
@@ -2440,11 +2788,12 @@ int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold)
cmd = (struct wmi_set_rts_cmd *) skb->data;
cmd->threshold = cpu_to_le16(threshold);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_RTS_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_SET_RTS_CMDID,
+ NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg)
{
struct sk_buff *skb;
struct wmi_set_wmm_txop_cmd *cmd;
@@ -2460,12 +2809,13 @@ int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg)
cmd = (struct wmi_set_wmm_txop_cmd *) skb->data;
cmd->txop_enable = cfg;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_WMM_TXOP_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_WMM_TXOP_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl)
{
struct sk_buff *skb;
struct wmi_set_keepalive_cmd *cmd;
@@ -2477,10 +2827,13 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl)
cmd = (struct wmi_set_keepalive_cmd *) skb->data;
cmd->keep_alive_intvl = keep_alive_intvl;
- wmi->keep_alive_intvl = keep_alive_intvl;
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_KEEPALIVE_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_KEEPALIVE_CMDID,
NO_SYNC_WMIFLAG);
+
+ if (ret == 0)
+ ath6kl_debug_set_keepalive(wmi->parent_dev, keep_alive_intvl);
+
return ret;
}
@@ -2495,7 +2848,7 @@ int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
memcpy(skb->data, buf, len);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
+ ret = ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_TEST_CMDID, NO_SYNC_WMIFLAG);
return ret;
}
@@ -2528,28 +2881,31 @@ static int ath6kl_wmi_get_pmkid_list_event_rx(struct wmi *wmi, u8 *datap,
return 0;
}
-static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_addba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_addba_req_event *cmd = (struct wmi_addba_req_event *) datap;
- aggr_recv_addba_req_evt(wmi->parent_dev, cmd->tid,
+ aggr_recv_addba_req_evt(vif, cmd->tid,
le16_to_cpu(cmd->st_seq_no), cmd->win_sz);
return 0;
}
-static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_delba_req_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_delba_event *cmd = (struct wmi_delba_event *) datap;
- aggr_recv_delba_req_evt(wmi->parent_dev, cmd->tid);
+ aggr_recv_delba_req_evt(vif, cmd->tid);
return 0;
}
/* AP mode functions */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p)
{
struct sk_buff *skb;
struct wmi_connect_cmd *cm;
@@ -2562,7 +2918,7 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
cm = (struct wmi_connect_cmd *) skb->data;
memcpy(cm, p, sizeof(*cm));
- res = ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_CONFIG_COMMIT_CMDID,
+ res = ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_CONFIG_COMMIT_CMDID,
NO_SYNC_WMIFLAG);
ath6kl_dbg(ATH6KL_DBG_WMI, "%s: nw_type=%u auth_mode=%u ch=%u "
"ctrl_flags=0x%x-> res=%d\n",
@@ -2571,7 +2927,8 @@ int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p)
return res;
}
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
+ u16 reason)
{
struct sk_buff *skb;
struct wmi_ap_set_mlme_cmd *cm;
@@ -2585,11 +2942,12 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason)
cm->reason = cpu_to_le16(reason);
cm->cmd = cmd;
- return ath6kl_wmi_cmd_send(wmip, skb, WMI_AP_SET_MLME_CMDID,
+ return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
NO_SYNC_WMIFLAG);
}
-static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
struct wmi_pspoll_event *ev;
@@ -2598,19 +2956,21 @@ static int ath6kl_wmi_pspoll_event_rx(struct wmi *wmi, u8 *datap, int len)
ev = (struct wmi_pspoll_event *) datap;
- ath6kl_pspoll_event(wmi->parent_dev, le16_to_cpu(ev->aid));
+ ath6kl_pspoll_event(vif, le16_to_cpu(ev->aid));
return 0;
}
-static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len)
+static int ath6kl_wmi_dtimexpiry_event_rx(struct wmi *wmi, u8 *datap, int len,
+ struct ath6kl_vif *vif)
{
- ath6kl_dtimexpiry_event(wmi->parent_dev);
+ ath6kl_dtimexpiry_event(vif);
return 0;
}
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid,
+ bool flag)
{
struct sk_buff *skb;
struct wmi_ap_set_pvb_cmd *cmd;
@@ -2625,13 +2985,14 @@ int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag)
cmd->rsvd = cpu_to_le16(0);
cmd->flag = cpu_to_le32(flag);
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_AP_SET_PVB_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_SET_PVB_CMDID,
NO_SYNC_WMIFLAG);
return 0;
}
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_ver,
bool rx_dot11_hdr, bool defrag_on_host)
{
struct sk_buff *skb;
@@ -2648,14 +3009,14 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_ver,
cmd->meta_ver = rx_meta_ver;
/* Delete the local aggr state, on host */
- ret = ath6kl_wmi_cmd_send(wmi, skb, WMI_RX_FRAME_FORMAT_CMDID,
+ ret = ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_RX_FRAME_FORMAT_CMDID,
NO_SYNC_WMIFLAG);
return ret;
}
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len)
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len)
{
struct sk_buff *skb;
struct wmi_set_appie_cmd *p;
@@ -2669,8 +3030,11 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
p = (struct wmi_set_appie_cmd *) skb->data;
p->mgmt_frm_type = mgmt_frm_type;
p->ie_len = ie_len;
- memcpy(p->ie_info, ie, ie_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SET_APPIE_CMDID,
+
+ if (ie != NULL && ie_len > 0)
+ memcpy(p->ie_info, ie, ie_len);
+
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_APPIE_CMDID,
NO_SYNC_WMIFLAG);
}
@@ -2688,11 +3052,11 @@ int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
cmd = (struct wmi_disable_11b_rates_cmd *) skb->data;
cmd->disable = disable ? 1 : 0;
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_DISABLE_11B_RATES_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, 0, skb, WMI_DISABLE_11B_RATES_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq, u32 dur)
{
struct sk_buff *skb;
struct wmi_remain_on_chnl_cmd *p;
@@ -2706,12 +3070,16 @@ int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur)
p = (struct wmi_remain_on_chnl_cmd *) skb->data;
p->freq = cpu_to_le32(freq);
p->duration = cpu_to_le32(dur);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_REMAIN_ON_CHNL_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_REMAIN_ON_CHNL_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
- const u8 *data, u16 data_len)
+/* ath6kl_wmi_send_action_cmd is to be deprecated. Use
+ * ath6kl_wmi_send_mgmt_cmd instead. The new function supports P2P
+ * mgmt operations using station interface.
+ */
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len)
{
struct sk_buff *skb;
struct wmi_send_action_cmd *p;
@@ -2731,6 +3099,7 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
}
kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
wmi->last_mgmt_tx_frame = buf;
wmi->last_mgmt_tx_frame_len = data_len;
@@ -2742,18 +3111,61 @@ int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
p->wait = cpu_to_le32(wait);
p->len = cpu_to_le16(data_len);
memcpy(p->data, data, data_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_ACTION_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_ACTION_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
- const u8 *dst,
- const u8 *data, u16 data_len)
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck)
{
struct sk_buff *skb;
- struct wmi_p2p_probe_response_cmd *p;
+ struct wmi_send_mgmt_cmd *p;
+ u8 *buf;
+
+ if (wait)
+ return -EINVAL; /* Offload for wait not supported */
+
+ buf = kmalloc(data_len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
skb = ath6kl_wmi_get_new_buf(sizeof(*p) + data_len);
+ if (!skb) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ kfree(wmi->last_mgmt_tx_frame);
+ memcpy(buf, data, data_len);
+ wmi->last_mgmt_tx_frame = buf;
+ wmi->last_mgmt_tx_frame_len = data_len;
+
+ ath6kl_dbg(ATH6KL_DBG_WMI, "send_action_cmd: id=%u freq=%u wait=%u "
+ "len=%u\n", id, freq, wait, data_len);
+ p = (struct wmi_send_mgmt_cmd *) skb->data;
+ p->id = cpu_to_le32(id);
+ p->freq = cpu_to_le32(freq);
+ p->wait = cpu_to_le32(wait);
+ p->no_cck = cpu_to_le32(no_cck);
+ p->len = cpu_to_le16(data_len);
+ memcpy(p->data, data, data_len);
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SEND_MGMT_CMDID,
+ NO_SYNC_WMIFLAG);
+}
+
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len)
+{
+ struct sk_buff *skb;
+ struct wmi_p2p_probe_response_cmd *p;
+ size_t cmd_len = sizeof(*p) + data_len;
+
+ if (data_len == 0)
+ cmd_len++; /* work around target minimum length requirement */
+
+ skb = ath6kl_wmi_get_new_buf(cmd_len);
if (!skb)
return -ENOMEM;
@@ -2764,11 +3176,12 @@ int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
memcpy(p->destination_addr, dst, ETH_ALEN);
p->len = cpu_to_le16(data_len);
memcpy(p->data, data, data_len);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_SEND_PROBE_RESPONSE_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb,
+ WMI_SEND_PROBE_RESPONSE_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable)
{
struct sk_buff *skb;
struct wmi_probe_req_report_cmd *p;
@@ -2781,11 +3194,11 @@ int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable)
enable);
p = (struct wmi_probe_req_report_cmd *) skb->data;
p->enable = enable ? 1 : 0;
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_PROBE_REQ_REPORT_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_PROBE_REQ_REPORT_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags)
{
struct sk_buff *skb;
struct wmi_get_p2p_info *p;
@@ -2798,14 +3211,15 @@ int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags)
info_req_flags);
p = (struct wmi_get_p2p_info *) skb->data;
p->info_req_flags = cpu_to_le32(info_req_flags);
- return ath6kl_wmi_cmd_send(wmi, skb, WMI_GET_P2P_INFO_CMDID,
+ return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_GET_P2P_INFO_CMDID,
NO_SYNC_WMIFLAG);
}
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi)
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
{
ath6kl_dbg(ATH6KL_DBG_WMI, "cancel_remain_on_chnl_cmd\n");
- return ath6kl_wmi_simple_cmd(wmi, WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
+ return ath6kl_wmi_simple_cmd(wmi, if_idx,
+ WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
}
static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
@@ -2818,7 +3232,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
if (skb->len < sizeof(struct wmix_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
- wmi->stat.cmd_len_err++;
return -EINVAL;
}
@@ -2840,7 +3253,6 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
break;
default:
ath6kl_warn("unknown cmd id 0x%x\n", id);
- wmi->stat.cmd_id_err++;
ret = -EINVAL;
break;
}
@@ -2848,12 +3260,19 @@ static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
return ret;
}
+static int ath6kl_wmi_roam_tbl_event_rx(struct wmi *wmi, u8 *datap, int len)
+{
+ return ath6kl_debug_roam_tbl_event(wmi->parent_dev, datap, len);
+}
+
/* Control Path */
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd;
+ struct ath6kl_vif *vif;
u32 len;
u16 id;
+ u8 if_idx;
u8 *datap;
int ret = 0;
@@ -2863,12 +3282,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
if (skb->len < sizeof(struct wmi_cmd_hdr)) {
ath6kl_err("bad packet 1\n");
dev_kfree_skb(skb);
- wmi->stat.cmd_len_err++;
return -EINVAL;
}
cmd = (struct wmi_cmd_hdr *) skb->data;
id = le16_to_cpu(cmd->cmd_id);
+ if_idx = le16_to_cpu(cmd->info1) & WMI_CMD_HDR_IF_ID_MASK;
skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -2879,6 +3298,15 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
ath6kl_dbg_dump(ATH6KL_DBG_WMI_DUMP, NULL, "wmi rx ",
datap, len);
+ vif = ath6kl_get_vif_by_index(wmi->parent_dev, if_idx);
+ if (!vif) {
+ ath6kl_dbg(ATH6KL_DBG_WMI,
+ "Wmi event for unavailable vif, vif_index:%d\n",
+ if_idx);
+ dev_kfree_skb(skb);
+ return -EINVAL;
+ }
+
switch (id) {
case WMI_GET_BITRATE_CMDID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_BITRATE_CMDID\n");
@@ -2898,11 +3326,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_CONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CONNECT_EVENTID\n");
- ret = ath6kl_wmi_connect_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_connect_event_rx(wmi, datap, len, vif);
break;
case WMI_DISCONNECT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DISCONNECT_EVENTID\n");
- ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_disconnect_event_rx(wmi, datap, len, vif);
break;
case WMI_PEER_NODE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PEER_NODE_EVENTID\n");
@@ -2910,11 +3338,11 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_TKIP_MICERR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TKIP_MICERR_EVENTID\n");
- ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_tkip_micerr_event_rx(wmi, datap, len, vif);
break;
case WMI_BSSINFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_BSSINFO_EVENTID\n");
- ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_bssinfo_event_rx(wmi, datap, len, vif);
break;
case WMI_REGDOMAIN_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REGDOMAIN_EVENTID\n");
@@ -2926,11 +3354,12 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_NEIGHBOR_REPORT_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_NEIGHBOR_REPORT_EVENTID\n");
- ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_neighbor_report_event_rx(wmi, datap, len,
+ vif);
break;
case WMI_SCAN_COMPLETE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SCAN_COMPLETE_EVENTID\n");
- ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len);
+ ret = ath6kl_wmi_scan_complete_rx(wmi, datap, len, vif);
break;
case WMI_CMDERROR_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CMDERROR_EVENTID\n");
@@ -2938,7 +3367,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REPORT_STATISTICS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_STATISTICS_EVENTID\n");
- ret = ath6kl_wmi_stats_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_stats_event_rx(wmi, datap, len, vif);
break;
case WMI_RSSI_THRESHOLD_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RSSI_THRESHOLD_EVENTID\n");
@@ -2953,6 +3382,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REPORT_ROAM_TBL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REPORT_ROAM_TBL_EVENTID\n");
+ ret = ath6kl_wmi_roam_tbl_event_rx(wmi, datap, len);
break;
case WMI_EXTENSION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_EXTENSION_EVENTID\n");
@@ -2960,7 +3390,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_CAC_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CAC_EVENTID\n");
- ret = ath6kl_wmi_cac_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_cac_event_rx(wmi, datap, len, vif);
break;
case WMI_CHANNEL_CHANGE_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_CHANNEL_CHANGE_EVENTID\n");
@@ -2996,7 +3426,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_GET_WOW_LIST_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_WOW_LIST_EVENTID\n");
- ret = ath6kl_wmi_get_wow_list_event_rx(wmi, datap, len);
break;
case WMI_GET_PMKID_LIST_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_GET_PMKID_LIST_EVENTID\n");
@@ -3004,25 +3433,25 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_PSPOLL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_PSPOLL_EVENTID\n");
- ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_pspoll_event_rx(wmi, datap, len, vif);
break;
case WMI_DTIMEXPIRY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DTIMEXPIRY_EVENTID\n");
- ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_dtimexpiry_event_rx(wmi, datap, len, vif);
break;
case WMI_SET_PARAMS_REPLY_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_SET_PARAMS_REPLY_EVENTID\n");
break;
case WMI_ADDBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_addba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_ADDBA_RESP_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_ADDBA_RESP_EVENTID\n");
break;
case WMI_DELBA_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_DELBA_REQ_EVENTID\n");
- ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_delba_req_event_rx(wmi, datap, len, vif);
break;
case WMI_REPORT_BTCOEX_CONFIG_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
@@ -3038,21 +3467,21 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_REMAIN_ON_CHNL_EVENTID\n");
- ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_remain_on_chnl_event_rx(wmi, datap, len, vif);
break;
case WMI_CANCEL_REMAIN_ON_CHNL_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI,
"WMI_CANCEL_REMAIN_ON_CHNL_EVENTID\n");
ret = ath6kl_wmi_cancel_remain_on_chnl_event_rx(wmi, datap,
- len);
+ len, vif);
break;
case WMI_TX_STATUS_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_TX_STATUS_EVENTID\n");
- ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_tx_status_event_rx(wmi, datap, len, vif);
break;
case WMI_RX_PROBE_REQ_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_PROBE_REQ_EVENTID\n");
- ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_rx_probe_req_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_CAPABILITIES_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_CAPABILITIES_EVENTID\n");
@@ -3060,7 +3489,7 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
case WMI_RX_ACTION_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_RX_ACTION_EVENTID\n");
- ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len);
+ ret = ath6kl_wmi_rx_action_event_rx(wmi, datap, len, vif);
break;
case WMI_P2P_INFO_EVENTID:
ath6kl_dbg(ATH6KL_DBG_WMI, "WMI_P2P_INFO_EVENTID\n");
@@ -3068,7 +3497,6 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
break;
default:
ath6kl_dbg(ATH6KL_DBG_WMI, "unknown cmd id 0x%x\n", id);
- wmi->stat.cmd_id_err++;
ret = -EINVAL;
break;
}
@@ -3078,11 +3506,8 @@ int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb)
return ret;
}
-static void ath6kl_wmi_qos_state_init(struct wmi *wmi)
+void ath6kl_wmi_reset(struct wmi *wmi)
{
- if (!wmi)
- return;
-
spin_lock_bh(&wmi->lock);
wmi->fat_pipe_exist = 0;
@@ -3103,16 +3528,9 @@ void *ath6kl_wmi_init(struct ath6kl *dev)
wmi->parent_dev = dev;
- ath6kl_wmi_qos_state_init(wmi);
-
wmi->pwr_mode = REC_POWER;
- wmi->phy_mode = WMI_11G_MODE;
-
- wmi->pair_crypto_type = NONE_CRYPT;
- wmi->grp_crypto_type = NONE_CRYPT;
- wmi->ht_allowed[A_BAND_24GHZ] = 1;
- wmi->ht_allowed[A_BAND_5GHZ] = 1;
+ ath6kl_wmi_reset(wmi);
return wmi;
}
diff --git a/drivers/net/wireless/ath/ath6kl/wmi.h b/drivers/net/wireless/ath/ath6kl/wmi.h
index f8e644d..42ac311 100644
--- a/drivers/net/wireless/ath/ath6kl/wmi.h
+++ b/drivers/net/wireless/ath/ath6kl/wmi.h
@@ -93,11 +93,6 @@ struct sq_threshold_params {
u8 last_rssi_poll_event;
};
-struct wmi_stats {
- u32 cmd_len_err;
- u32 cmd_id_err;
-};
-
struct wmi_data_sync_bufs {
u8 traffic_class;
struct sk_buff *skb;
@@ -111,32 +106,26 @@ struct wmi_data_sync_bufs {
#define WMM_AC_VO 3 /* voice */
struct wmi {
- bool ready;
u16 stream_exist_for_ac[WMM_NUM_AC];
u8 fat_pipe_exist;
struct ath6kl *parent_dev;
- struct wmi_stats stat;
u8 pwr_mode;
- u8 phy_mode;
- u8 keep_alive_intvl;
spinlock_t lock;
enum htc_endpoint_id ep_id;
struct sq_threshold_params
sq_threshld[SIGNAL_QUALITY_METRICS_NUM_MAX];
- enum crypto_type pair_crypto_type;
- enum crypto_type grp_crypto_type;
bool is_wmm_enabled;
- u8 ht_allowed[A_NUM_BANDS];
u8 traffic_class;
bool is_probe_ssid;
u8 *last_mgmt_tx_frame;
size_t last_mgmt_tx_frame_len;
+ u8 saved_pwr_mode;
};
struct host_app_area {
- u32 wmi_protocol_ver;
-};
+ __le32 wmi_protocol_ver;
+} __packed;
enum wmi_msg_type {
DATA_MSGTYPE = 0x0,
@@ -184,6 +173,8 @@ enum wmi_data_hdr_data_type {
#define WMI_DATA_HDR_META_MASK 0x7
#define WMI_DATA_HDR_META_SHIFT 13
+#define WMI_DATA_HDR_IF_IDX_MASK 0xF
+
struct wmi_data_hdr {
s8 rssi;
@@ -208,6 +199,12 @@ struct wmi_data_hdr {
* b15:b13 - META_DATA_VERSION 0 - 7
*/
__le16 info2;
+
+ /*
+ * usage of info3, 16-bit:
+ * b3:b0 - Interface index
+ * b15:b4 - Reserved
+ */
__le16 info3;
} __packed;
@@ -250,6 +247,11 @@ static inline u8 wmi_data_hdr_get_meta(struct wmi_data_hdr *dhdr)
WMI_DATA_HDR_META_MASK;
}
+static inline u8 wmi_data_hdr_get_if_idx(struct wmi_data_hdr *dhdr)
+{
+ return le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_IF_IDX_MASK;
+}
+
/* Tx meta version definitions */
#define WMI_MAX_TX_META_SZ 12
#define WMI_META_VERSION_1 0x01
@@ -299,6 +301,8 @@ struct wmi_rx_meta_v2 {
u8 csum_flags;
} __packed;
+#define WMI_CMD_HDR_IF_ID_MASK 0xF
+
/* Control Path */
struct wmi_cmd_hdr {
__le16 cmd_id;
@@ -312,6 +316,11 @@ struct wmi_cmd_hdr {
__le16 reserved;
} __packed;
+static inline u8 wmi_cmd_hdr_get_if_idx(struct wmi_cmd_hdr *chdr)
+{
+ return le16_to_cpu(chdr->info1) & WMI_CMD_HDR_IF_ID_MASK;
+}
+
/* List of WMI commands */
enum wmi_cmd_id {
WMI_CONNECT_CMDID = 0x0001,
@@ -320,6 +329,10 @@ enum wmi_cmd_id {
WMI_SYNCHRONIZE_CMDID,
WMI_CREATE_PSTREAM_CMDID,
WMI_DELETE_PSTREAM_CMDID,
+ /* WMI_START_SCAN_CMDID is to be deprecated. Use
+ * WMI_BEGIN_SCAN_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
WMI_START_SCAN_CMDID,
WMI_SET_SCAN_PARAMS_CMDID,
WMI_SET_BSS_FILTER_CMDID,
@@ -533,12 +546,61 @@ enum wmi_cmd_id {
WMI_GTK_OFFLOAD_OP_CMDID,
WMI_REMAIN_ON_CHNL_CMDID,
WMI_CANCEL_REMAIN_ON_CHNL_CMDID,
+ /* WMI_SEND_ACTION_CMDID is to be deprecated. Use
+ * WMI_SEND_MGMT_CMDID instead. The new cmd supports P2P mgmt
+ * operations using station interface.
+ */
WMI_SEND_ACTION_CMDID,
WMI_PROBE_REQ_REPORT_CMDID,
WMI_DISABLE_11B_RATES_CMDID,
WMI_SEND_PROBE_RESPONSE_CMDID,
WMI_GET_P2P_INFO_CMDID,
WMI_AP_JOIN_BSS_CMDID,
+
+ WMI_SMPS_ENABLE_CMDID,
+ WMI_SMPS_CONFIG_CMDID,
+ WMI_SET_RATECTRL_PARM_CMDID,
+ /* LPL specific commands*/
+ WMI_LPL_FORCE_ENABLE_CMDID,
+ WMI_LPL_SET_POLICY_CMDID,
+ WMI_LPL_GET_POLICY_CMDID,
+ WMI_LPL_GET_HWSTATE_CMDID,
+ WMI_LPL_SET_PARAMS_CMDID,
+ WMI_LPL_GET_PARAMS_CMDID,
+
+ WMI_SET_BUNDLE_PARAM_CMDID,
+
+ /*GreenTx specific commands*/
+
+ WMI_GREENTX_PARAMS_CMDID,
+
+ WMI_RTT_MEASREQ_CMDID,
+ WMI_RTT_CAPREQ_CMDID,
+ WMI_RTT_STATUSREQ_CMDID,
+
+ /* WPS Commands */
+ WMI_WPS_START_CMDID,
+ WMI_GET_WPS_STATUS_CMDID,
+
+ /* More P2P commands */
+ WMI_SET_NOA_CMDID,
+ WMI_GET_NOA_CMDID,
+ WMI_SET_OPPPS_CMDID,
+ WMI_GET_OPPPS_CMDID,
+ WMI_ADD_PORT_CMDID,
+ WMI_DEL_PORT_CMDID,
+
+ /* 802.11w cmd */
+ WMI_SET_RSN_CAP_CMDID,
+ WMI_GET_RSN_CAP_CMDID,
+ WMI_SET_IGTK_CMDID,
+
+ WMI_RX_FILTER_COALESCE_FILTER_OP_CMDID,
+ WMI_RX_FILTER_SET_FRAME_TEST_LIST_CMDID,
+
+ WMI_SEND_MGMT_CMDID,
+ WMI_BEGIN_SCAN_CMDID,
+
};
enum wmi_mgmt_frame_type {
@@ -558,6 +620,14 @@ enum network_type {
AP_NETWORK = 0x10,
};
+enum network_subtype {
+ SUBTYPE_NONE,
+ SUBTYPE_BT,
+ SUBTYPE_P2PDEV,
+ SUBTYPE_P2PCLIENT,
+ SUBTYPE_P2PGO,
+};
+
enum dot11_auth_mode {
OPEN_AUTH = 0x01,
SHARED_AUTH = 0x02,
@@ -576,9 +646,6 @@ enum auth_mode {
WPA2_AUTH_CCKM = 0x40,
};
-#define WMI_MIN_CRYPTO_TYPE NONE_CRYPT
-#define WMI_MAX_CRYPTO_TYPE (AES_CRYPT + 1)
-
#define WMI_MIN_KEY_INDEX 0
#define WMI_MAX_KEY_INDEX 3
@@ -617,6 +684,7 @@ enum wmi_connect_ctrl_flags_bits {
CONNECT_CSA_FOLLOW_BSS = 0x0020,
CONNECT_DO_WPA_OFFLOAD = 0x0040,
CONNECT_DO_NOT_DEAUTH = 0x0080,
+ CONNECT_WPS_FLAG = 0x0100,
};
struct wmi_connect_cmd {
@@ -632,6 +700,7 @@ struct wmi_connect_cmd {
__le16 ch;
u8 bssid[ETH_ALEN];
__le32 ctrl_flags;
+ u8 nw_subtype;
} __packed;
/* WMI_RECONNECT_CMDID */
@@ -719,7 +788,12 @@ enum wmi_scan_type {
WMI_SHORT_SCAN = 1,
};
-struct wmi_start_scan_cmd {
+struct wmi_supp_rates {
+ u8 nrates;
+ u8 rates[ATH6KL_RATE_MAXSIZE];
+};
+
+struct wmi_begin_scan_cmd {
__le32 force_fg_scan;
/* for legacy cisco AP compatibility */
@@ -731,9 +805,15 @@ struct wmi_start_scan_cmd {
/* time interval between scans (msec) */
__le32 force_scan_intvl;
+ /* no CCK rates */
+ __le32 no_cck;
+
/* enum wmi_scan_type */
u8 scan_type;
+ /* Supported rates to advertise in the probe request frames */
+ struct wmi_supp_rates supp_rates[IEEE80211_NUM_BANDS];
+
/* how many channels follow */
u8 num_ch;
@@ -741,8 +821,31 @@ struct wmi_start_scan_cmd {
__le16 ch_list[1];
} __packed;
-/* WMI_SET_SCAN_PARAMS_CMDID */
-#define WMI_SHORTSCANRATIO_DEFAULT 3
+/* wmi_start_scan_cmd is to be deprecated. Use
+ * wmi_begin_scan_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
+struct wmi_start_scan_cmd {
+ __le32 force_fg_scan;
+
+ /* for legacy cisco AP compatibility */
+ __le32 is_legacy;
+
+ /* max duration in the home channel(msec) */
+ __le32 home_dwell_time;
+
+ /* time interval between scans (msec) */
+ __le32 force_scan_intvl;
+
+ /* enum wmi_scan_type */
+ u8 scan_type;
+
+ /* how many channels follow */
+ u8 num_ch;
+
+ /* channels in Mhz */
+ __le16 ch_list[1];
+} __packed;
/*
* Warning: scan control flag value of 0xFF is used to disable
@@ -776,13 +879,6 @@ enum wmi_scan_ctrl_flags_bits {
ENABLE_SCAN_ABORT_EVENT = 0x40
};
-#define DEFAULT_SCAN_CTRL_FLAGS \
- (CONNECT_SCAN_CTRL_FLAGS | \
- SCAN_CONNECTED_CTRL_FLAGS | \
- ACTIVE_SCAN_CTRL_FLAGS | \
- ROAM_SCAN_CTRL_FLAGS | \
- ENABLE_AUTO_CTRL_FLAGS)
-
struct wmi_scan_params_cmd {
/* sec */
__le16 fg_start_period;
@@ -1365,14 +1461,20 @@ enum wmi_roam_ctrl {
WMI_SET_LRSSI_SCAN_PARAMS,
};
+enum wmi_roam_mode {
+ WMI_DEFAULT_ROAM_MODE = 1, /* RSSI based roam */
+ WMI_HOST_BIAS_ROAM_MODE = 2, /* Host bias based roam */
+ WMI_LOCK_BSS_MODE = 3, /* Lock to the current BSS */
+};
+
struct bss_bias {
u8 bssid[ETH_ALEN];
- u8 bias;
+ s8 bias;
} __packed;
struct bss_bias_info {
u8 num_bss;
- struct bss_bias bss_bias[1];
+ struct bss_bias bss_bias[0];
} __packed;
struct low_rssi_scan_params {
@@ -1385,10 +1487,11 @@ struct low_rssi_scan_params {
struct roam_ctrl_cmd {
union {
- u8 bssid[ETH_ALEN];
- u8 roam_mode;
- struct bss_bias_info bss;
- struct low_rssi_scan_params params;
+ u8 bssid[ETH_ALEN]; /* WMI_FORCE_ROAM */
+ u8 roam_mode; /* WMI_SET_ROAM_MODE */
+ struct bss_bias_info bss; /* WMI_SET_HOST_BIAS */
+ struct low_rssi_scan_params params; /* WMI_SET_LRSSI_SCAN_PARAMS
+ */
} __packed info;
u8 roam_ctrl;
} __packed;
@@ -1455,6 +1558,10 @@ struct wmi_tkip_micerr_event {
u8 is_mcast;
} __packed;
+enum wmi_scan_status {
+ WMI_SCAN_STATUS_SUCCESS = 0,
+};
+
/* WMI_SCAN_COMPLETE_EVENTID */
struct wmi_scan_complete_event {
a_sle32 status;
@@ -1635,6 +1742,12 @@ struct wmi_bss_roam_info {
u8 reserved;
} __packed;
+struct wmi_target_roam_tbl {
+ __le16 roam_mode;
+ __le16 num_entries;
+ struct wmi_bss_roam_info info[];
+} __packed;
+
/* WMI_CAC_EVENTID */
enum cac_indication {
CAC_INDICATION_ADMISSION = 0x00,
@@ -1771,7 +1884,6 @@ struct wmi_set_appie_cmd {
#define WSC_REG_ACTIVE 1
#define WSC_REG_INACTIVE 0
-#define WOW_MAX_FILTER_LISTS 1
#define WOW_MAX_FILTERS_PER_LIST 4
#define WOW_PATTERN_SIZE 64
#define WOW_MASK_SIZE 64
@@ -1794,17 +1906,52 @@ struct wmi_set_ip_cmd {
__le32 ips[MAX_IP_ADDRS];
} __packed;
-/* WMI_GET_WOW_LIST_CMD reply */
-struct wmi_get_wow_list_reply {
- /* number of patterns in reply */
- u8 num_filters;
+enum ath6kl_wow_filters {
+ WOW_FILTER_SSID = BIT(1),
+ WOW_FILTER_OPTION_MAGIC_PACKET = BIT(2),
+ WOW_FILTER_OPTION_EAP_REQ = BIT(3),
+ WOW_FILTER_OPTION_PATTERNS = BIT(4),
+ WOW_FILTER_OPTION_OFFLOAD_ARP = BIT(5),
+ WOW_FILTER_OPTION_OFFLOAD_NS = BIT(6),
+ WOW_FILTER_OPTION_OFFLOAD_GTK = BIT(7),
+ WOW_FILTER_OPTION_8021X_4WAYHS = BIT(8),
+ WOW_FILTER_OPTION_NLO_DISCVRY = BIT(9),
+ WOW_FILTER_OPTION_NWK_DISASSOC = BIT(10),
+ WOW_FILTER_OPTION_GTK_ERROR = BIT(11),
+ WOW_FILTER_OPTION_TEST_MODE = BIT(15),
+};
+
+enum ath6kl_host_mode {
+ ATH6KL_HOST_MODE_AWAKE,
+ ATH6KL_HOST_MODE_ASLEEP,
+};
+
+struct wmi_set_host_sleep_mode_cmd {
+ __le32 awake;
+ __le32 asleep;
+} __packed;
+
+enum ath6kl_wow_mode {
+ ATH6KL_WOW_MODE_DISABLE,
+ ATH6KL_WOW_MODE_ENABLE,
+};
+
+struct wmi_set_wow_mode_cmd {
+ __le32 enable_wow;
+ __le32 filter;
+ __le16 host_req_delay;
+} __packed;
- /* this is filter # x of total num_filters */
- u8 this_filter_num;
+struct wmi_add_wow_pattern_cmd {
+ u8 filter_list_id;
+ u8 filter_size;
+ u8 filter_offset;
+ u8 filter[0];
+} __packed;
- u8 wow_mode;
- u8 host_mode;
- struct wow_filter wow_filters[1];
+struct wmi_del_wow_pattern_cmd {
+ __le16 filter_list_id;
+ __le16 filter_id;
} __packed;
/* WMI_SET_AKMP_PARAMS_CMD */
@@ -1905,7 +2052,7 @@ struct wmi_tx_complete_event {
* !!! Warning !!!
* -Changing the following values needs compilation of both driver and firmware
*/
-#define AP_MAX_NUM_STA 8
+#define AP_MAX_NUM_STA 10
/* Spl. AID used to set DTIM flag in the beacons */
#define MCAST_AID 0xFF
@@ -1988,6 +2135,10 @@ struct wmi_remain_on_chnl_cmd {
__le32 duration;
} __packed;
+/* wmi_send_action_cmd is to be deprecated. Use
+ * wmi_send_mgmt_cmd instead. The new structure supports P2P mgmt
+ * operations using station interface.
+ */
struct wmi_send_action_cmd {
__le32 id;
__le32 freq;
@@ -1996,6 +2147,15 @@ struct wmi_send_action_cmd {
u8 data[0];
} __packed;
+struct wmi_send_mgmt_cmd {
+ __le32 id;
+ __le32 freq;
+ __le32 wait;
+ __le32 no_cck;
+ __le16 len;
+ u8 data[0];
+} __packed;
+
struct wmi_tx_status_event {
__le32 id;
u8 ack_status;
@@ -2163,120 +2323,162 @@ int ath6kl_wmi_dix_2_dot3(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_data_hdr_add(struct wmi *wmi, struct sk_buff *skb,
u8 msg_type, bool more_data,
enum wmi_data_hdr_data_type data_type,
- u8 meta_ver, void *tx_meta_info);
+ u8 meta_ver, void *tx_meta_info, u8 if_idx);
int ath6kl_wmi_dot11_hdr_remove(struct wmi *wmi, struct sk_buff *skb);
int ath6kl_wmi_dot3_2_dix(struct sk_buff *skb);
-int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, struct sk_buff *skb,
- u32 layer2_priority, bool wmm_enabled,
- u8 *ac);
+int ath6kl_wmi_implicit_create_pstream(struct wmi *wmi, u8 if_idx,
+ struct sk_buff *skb, u32 layer2_priority,
+ bool wmm_enabled, u8 *ac);
int ath6kl_wmi_control_rx(struct wmi *wmi, struct sk_buff *skb);
-int ath6kl_wmi_cmd_send(struct wmi *wmi, struct sk_buff *skb,
+int ath6kl_wmi_cmd_send(struct wmi *wmi, u8 if_idx, struct sk_buff *skb,
enum wmi_cmd_id cmd_id, enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_connect_cmd(struct wmi *wmi, enum network_type nw_type,
+int ath6kl_wmi_connect_cmd(struct wmi *wmi, u8 if_idx,
+ enum network_type nw_type,
enum dot11_auth_mode dot11_auth_mode,
enum auth_mode auth_mode,
enum crypto_type pairwise_crypto,
u8 pairwise_crypto_len,
enum crypto_type group_crypto,
u8 group_crypto_len, int ssid_len, u8 *ssid,
- u8 *bssid, u16 channel, u32 ctrl_flags);
-
-int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 *bssid, u16 channel);
-int ath6kl_wmi_disconnect_cmd(struct wmi *wmi);
-int ath6kl_wmi_startscan_cmd(struct wmi *wmi, enum wmi_scan_type scan_type,
+ u8 *bssid, u16 channel, u32 ctrl_flags,
+ u8 nw_subtype);
+
+int ath6kl_wmi_reconnect_cmd(struct wmi *wmi, u8 if_idx, u8 *bssid,
+ u16 channel);
+int ath6kl_wmi_disconnect_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_startscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
u32 force_fgscan, u32 is_legacy,
u32 home_dwell_time, u32 force_scan_interval,
s8 num_chan, u16 *ch_list);
-int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u16 fg_start_sec,
+
+int ath6kl_wmi_beginscan_cmd(struct wmi *wmi, u8 if_idx,
+ enum wmi_scan_type scan_type,
+ u32 force_fgscan, u32 is_legacy,
+ u32 home_dwell_time, u32 force_scan_interval,
+ s8 num_chan, u16 *ch_list, u32 no_cck,
+ u32 *rates);
+
+int ath6kl_wmi_scanparams_cmd(struct wmi *wmi, u8 if_idx, u16 fg_start_sec,
u16 fg_end_sec, u16 bg_sec,
u16 minact_chdw_msec, u16 maxact_chdw_msec,
u16 pas_chdw_msec, u8 short_scan_ratio,
u8 scan_ctrl_flag, u32 max_dfsch_act_time,
u16 maxact_scan_per_ssid);
-int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 filter, u32 ie_mask);
-int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 index, u8 flag,
+int ath6kl_wmi_bssfilter_cmd(struct wmi *wmi, u8 if_idx, u8 filter,
+ u32 ie_mask);
+int ath6kl_wmi_probedssid_cmd(struct wmi *wmi, u8 if_idx, u8 index, u8 flag,
u8 ssid_len, u8 *ssid);
-int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u16 listen_interval,
+int ath6kl_wmi_listeninterval_cmd(struct wmi *wmi, u8 if_idx,
+ u16 listen_interval,
u16 listen_beacons);
-int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 pwr_mode);
-int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u16 idle_period,
+int ath6kl_wmi_powermode_cmd(struct wmi *wmi, u8 if_idx, u8 pwr_mode);
+int ath6kl_wmi_pmparams_cmd(struct wmi *wmi, u8 if_idx, u16 idle_period,
u16 ps_poll_num, u16 dtim_policy,
u16 tx_wakup_policy, u16 num_tx_to_wakeup,
u16 ps_fail_event_policy);
-int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 timeout);
-int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi,
+int ath6kl_wmi_create_pstream_cmd(struct wmi *wmi, u8 if_idx,
struct wmi_create_pstream_cmd *pstream);
-int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 traffic_class, u8 tsid);
+int ath6kl_wmi_delete_pstream_cmd(struct wmi *wmi, u8 if_idx, u8 traffic_class,
+ u8 tsid);
+int ath6kl_wmi_disctimeout_cmd(struct wmi *wmi, u8 if_idx, u8 timeout);
int ath6kl_wmi_set_rts_cmd(struct wmi *wmi, u16 threshold);
-int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 status,
+int ath6kl_wmi_set_lpreamble_cmd(struct wmi *wmi, u8 if_idx, u8 status,
u8 preamble_policy);
int ath6kl_wmi_get_challenge_resp_cmd(struct wmi *wmi, u32 cookie, u32 source);
int ath6kl_wmi_config_debug_module_cmd(struct wmi *wmi, u32 valid, u32 config);
-int ath6kl_wmi_get_stats_cmd(struct wmi *wmi);
-int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 key_index,
+int ath6kl_wmi_get_stats_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_addkey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index,
enum crypto_type key_type,
u8 key_usage, u8 key_len,
- u8 *key_rsc, u8 *key_material,
+ u8 *key_rsc, unsigned int key_rsc_len,
+ u8 *key_material,
u8 key_op_ctrl, u8 *mac_addr,
enum wmi_sync_flag sync_flag);
-int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 *krk);
-int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 key_index);
-int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, const u8 *bssid,
+int ath6kl_wmi_add_krk_cmd(struct wmi *wmi, u8 if_idx, u8 *krk);
+int ath6kl_wmi_deletekey_cmd(struct wmi *wmi, u8 if_idx, u8 key_index);
+int ath6kl_wmi_setpmkid_cmd(struct wmi *wmi, u8 if_idx, const u8 *bssid,
const u8 *pmkid, bool set);
-int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 dbM);
-int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_tx_pwr_cmd(struct wmi *wmi, u8 if_idx, u8 dbM);
+int ath6kl_wmi_get_tx_pwr_cmd(struct wmi *wmi, u8 if_idx);
+int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
-int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, enum wmi_txop_cfg cfg);
-int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 keep_alive_intvl);
+int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
+int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
+ u8 keep_alive_intvl);
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
s32 ath6kl_wmi_get_rate(s8 rate_index);
int ath6kl_wmi_set_ip_cmd(struct wmi *wmi, struct wmi_set_ip_cmd *ip_cmd);
+int ath6kl_wmi_set_host_sleep_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_host_mode host_mode);
+int ath6kl_wmi_set_wow_mode_cmd(struct wmi *wmi, u8 if_idx,
+ enum ath6kl_wow_mode wow_mode,
+ u32 filter, u16 host_req_delay);
+int ath6kl_wmi_add_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u8 list_id, u8 filter_size,
+ u8 filter_offset, u8 *filter, u8 *mask);
+int ath6kl_wmi_del_wow_pattern_cmd(struct wmi *wmi, u8 if_idx,
+ u16 list_id, u16 filter_id);
int ath6kl_wmi_set_roam_lrssi_cmd(struct wmi *wmi, u8 lrssi);
+int ath6kl_wmi_force_roam_cmd(struct wmi *wmi, const u8 *bssid);
+int ath6kl_wmi_set_roam_mode_cmd(struct wmi *wmi, enum wmi_roam_mode mode);
/* AP mode */
-int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, struct wmi_connect_cmd *p);
+int ath6kl_wmi_ap_profile_commit(struct wmi *wmip, u8 if_idx,
+ struct wmi_connect_cmd *p);
-int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 cmd, const u8 *mac, u16 reason);
+int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd,
+ const u8 *mac, u16 reason);
-int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u16 aid, bool flag);
+int ath6kl_wmi_set_pvb_cmd(struct wmi *wmi, u8 if_idx, u16 aid, bool flag);
-int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 rx_meta_version,
+int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
+ u8 rx_meta_version,
bool rx_dot11_hdr, bool defrag_on_host);
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
/* P2P */
int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
-int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u32 freq, u32 dur);
+int ath6kl_wmi_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ u32 dur);
+
+int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len);
+
+int ath6kl_wmi_send_mgmt_cmd(struct wmi *wmi, u8 if_idx, u32 id, u32 freq,
+ u32 wait, const u8 *data, u16 data_len,
+ u32 no_cck);
-int ath6kl_wmi_send_action_cmd(struct wmi *wmi, u32 id, u32 freq, u32 wait,
- const u8 *data, u16 data_len);
+int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u8 if_idx, u32 freq,
+ const u8 *dst, const u8 *data,
+ u16 data_len);
-int ath6kl_wmi_send_probe_response_cmd(struct wmi *wmi, u32 freq,
- const u8 *dst,
- const u8 *data, u16 data_len);
+int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, u8 if_idx, bool enable);
-int ath6kl_wmi_probe_report_req_cmd(struct wmi *wmi, bool enable);
+int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u8 if_idx, u32 info_req_flags);
-int ath6kl_wmi_info_req_cmd(struct wmi *wmi, u32 info_req_flags);
+int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
-int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi);
+int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
+ const u8 *ie, u8 ie_len);
-int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 mgmt_frm_type, const u8 *ie,
- u8 ie_len);
+void ath6kl_wmi_sscan_timer(unsigned long ptr);
+struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
void *ath6kl_wmi_init(struct ath6kl *devt);
void ath6kl_wmi_shutdown(struct wmi *wmi);
+void ath6kl_wmi_reset(struct wmi *wmi);
#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index d9c08c6..dc6be4a 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -2,6 +2,9 @@ config ATH9K_HW
tristate
config ATH9K_COMMON
tristate
+config ATH9K_DFS_DEBUGFS
+ def_bool y
+ depends on ATH9K_DEBUGFS && ATH9K_DFS_CERTIFIED
config ATH9K
tristate "Atheros 802.11n wireless cards support"
@@ -25,6 +28,7 @@ config ATH9K
config ATH9K_PCI
bool "Atheros ath9k PCI/PCIe bus support"
+ default y
depends on ATH9K && PCI
---help---
This option enables the PCI bus support in ath9k.
@@ -50,6 +54,25 @@ config ATH9K_DEBUGFS
Also required for changing debug message flags at run time.
+config ATH9K_DFS_CERTIFIED
+ bool "Atheros DFS support for certified platforms"
+ depends on ATH9K && EXPERT
+ default n
+ ---help---
+ This option enables DFS support for initiating radiation on
+ ath9k. There is no way to dynamically detect if a card was DFS
+ certified and as such this is left as a build time option. This
+ option should only be enabled by system integrators that can
+ guarantee that all the platforms that their kernel will run on
+ have obtained appropriate regulatory body certification for a
+ respective Atheros card by using ath9k on the target shipping
+ platforms.
+
+ This is currently only a placeholder for future DFS support,
+ as DFS support requires more components that still need to be
+ developed. At this point enabling this option won't do anything
+ except increase code size.
+
config ATH9K_RATE_CONTROL
bool "Atheros ath9k rate control"
depends on ATH9K
@@ -58,6 +81,14 @@ config ATH9K_RATE_CONTROL
Say Y, if you want to use the ath9k specific rate control
module instead of minstrel_ht.
+config ATH9K_BTCOEX_SUPPORT
+ bool "Atheros ath9k bluetooth coexistence support"
+ depends on ATH9K
+ default y
+ ---help---
+ Say Y, if you want to use the ath9k radios together with
+ Bluetooth modules in the same system.
+
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 36ed3c4..da02242 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -4,11 +4,14 @@ ath9k-y += beacon.o \
main.o \
recv.o \
xmit.o \
+ mci.o \
ath9k-$(CONFIG_ATH9K_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DEBUGFS) += debug.o
+ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
+ath9k-$(CONFIG_ATH9K_DFS_CERTIFIED) += dfs.o
obj-$(CONFIG_ATH9K) += ath9k.o
@@ -33,7 +36,8 @@ ath9k_hw-y:= \
ar9002_mac.o \
ar9003_mac.o \
ar9003_eeprom.o \
- ar9003_paprd.o
+ ar9003_paprd.o \
+ ar9003_mci.o
obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index a639b94..bc56f57 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -136,8 +136,8 @@ static void ath9k_ani_restart(struct ath_hw *ah)
cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
}
- ath_dbg(common, ATH_DBG_ANI,
- "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base);
+ ath_dbg(common, ANI, "Writing ofdmbase=%u cckbase=%u\n",
+ ofdm_base, cck_base);
ENABLE_REGWRITE_BUFFER(ah);
@@ -268,8 +268,7 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_dbg(common, ATH_DBG_ANI,
- "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ ath_dbg(common, ANI, "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->ofdmNoiseImmunityLevel,
immunityLevel, aniState->noiseFloor,
aniState->rssiThrLow, aniState->rssiThrHigh);
@@ -336,8 +335,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
const struct ani_cck_level_entry *entry_cck;
aniState->noiseFloor = BEACON_RSSI(ah);
- ath_dbg(common, ATH_DBG_ANI,
- "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
+ ath_dbg(common, ANI, "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
aniState->cckNoiseImmunityLevel, immunityLevel,
aniState->noiseFloor, aniState->rssiThrLow,
aniState->rssiThrHigh);
@@ -481,8 +479,7 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
if (ah->opmode != NL80211_IFTYPE_STATION
&& ah->opmode != NL80211_IFTYPE_ADHOC) {
- ath_dbg(common, ATH_DBG_ANI,
- "Reset ANI state opmode %u\n", ah->opmode);
+ ath_dbg(common, ANI, "Reset ANI state opmode %u\n", ah->opmode);
ah->stats.ast_ani_reset++;
if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -582,7 +579,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
ATH9K_ANI_OFDM_DEF_LEVEL ||
aniState->cckNoiseImmunityLevel !=
ATH9K_ANI_CCK_DEF_LEVEL) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
@@ -599,7 +596,7 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
/*
* restore historical levels for this channel
*/
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
ah->opmode,
chan->channel,
@@ -662,7 +659,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
if (phyCnt1 < ofdm_base) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"phyCnt1 0x%x, resetting counter value to 0x%x\n",
phyCnt1, ofdm_base);
REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
@@ -670,7 +667,7 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
AR_PHY_ERR_OFDM_TIMING);
}
if (phyCnt2 < cck_base) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"phyCnt2 0x%x, resetting counter value to 0x%x\n",
phyCnt2, cck_base);
REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
@@ -713,7 +710,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
aniState->listenTime;
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
aniState->listenTime,
aniState->ofdmNoiseImmunityLevel,
@@ -748,7 +745,7 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n");
+ ath_dbg(common, ANI, "Enable MIB counters\n");
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -770,7 +767,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
- ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n");
+ ath_dbg(common, ANI, "Disable MIB counters\n");
REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -845,7 +842,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
int i;
- ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n");
+ ath_dbg(common, ANI, "Initialize ANI\n");
if (use_new_ani(ah)) {
ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index f199e9e..86a891f 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -158,7 +158,7 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
/* pre-reverse this field */
tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
- ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
+ ath_dbg(common, CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
new_bias, synth_freq);
/* swizzle rf_pwd_icsyndiv */
@@ -489,8 +489,6 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
- ATH_ALLOC_BANK(ah->addac5416_21,
- ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
return 0;
@@ -519,7 +517,6 @@ static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
ATH_FREE_BANK(ah->analogBank6Data);
ATH_FREE_BANK(ah->analogBank6TPCData);
ATH_FREE_BANK(ah->analogBank7Data);
- ATH_FREE_BANK(ah->addac5416_21);
ATH_FREE_BANK(ah->bank6Temp);
#undef ATH_FREE_BANK
@@ -805,27 +802,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
if (ah->eep_ops->set_addac)
ah->eep_ops->set_addac(ah, chan);
- if (AR_SREV_5416_22_OR_LATER(ah)) {
- REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
- } else {
- struct ar5416IniArray temp;
- u32 addacSize =
- sizeof(u32) * ah->iniAddac.ia_rows *
- ah->iniAddac.ia_columns;
-
- /* For AR5416 2.0/2.1 */
- memcpy(ah->addac5416_21,
- ah->iniAddac.ia_array, addacSize);
-
- /* override CLKDRV value at [row, column] = [31, 1] */
- (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
-
- temp.ia_array = ah->addac5416_21;
- temp.ia_columns = ah->iniAddac.ia_columns;
- temp.ia_rows = ah->iniAddac.ia_rows;
- REG_WRITE_ARRAY(&temp, 1, regWrites);
- }
-
+ REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
ENABLE_REGWRITE_BUFFER(ah);
@@ -1053,8 +1030,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(ah->totalSizeDesired));
return false;
}
@@ -1157,8 +1133,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep));
return false;
}
@@ -1177,8 +1152,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1)) {
- ath_dbg(common, ATH_DBG_ANI,
- "level out of range (%u > %zu)\n",
+ ath_dbg(common, ANI, "level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1));
return false;
}
@@ -1195,23 +1169,22 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n");
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI, "ANI parameters:\n");
+ ath_dbg(common, ANI,
"noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
aniState->noiseImmunityLevel,
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
aniState->cckWeakSigThreshold,
aniState->firstepLevel,
aniState->listenTime);
- ath_dbg(common, ATH_DBG_ANI,
- "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
+ ath_dbg(common, ANI, "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
aniState->ofdmPhyErrCount,
aniState->cckPhyErrCount);
@@ -1295,7 +1268,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
@@ -1313,7 +1286,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
@@ -1350,7 +1323,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -1358,7 +1331,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -1378,7 +1351,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
@@ -1414,7 +1387,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1422,7 +1395,7 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1448,11 +1421,11 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1506,7 +1479,7 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
iniDef = &aniState->iniDef;
- ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 88279e3..c55e5bb 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -61,18 +61,16 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah,
switch (currCal->calData->calType) {
case IQ_MISMATCH_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting IQ Mismatch Calibration\n");
break;
case ADC_GAIN_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "starting ADC Gain Calibration\n");
+ ath_dbg(common, CALIBRATE, "starting ADC Gain Calibration\n");
break;
case ADC_DC_CAL:
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "starting ADC DC Calibration\n");
+ ath_dbg(common, CALIBRATE, "starting ADC DC Calibration\n");
break;
}
@@ -129,7 +127,7 @@ static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
ah->cal_samples, i, ah->totalPowerMeasI[i],
ah->totalPowerMeasQ[i],
@@ -151,7 +149,7 @@ static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
ah->totalAdcQEvenPhase[i] +=
REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
ah->cal_samples, i,
ah->totalAdcIOddPhase[i],
@@ -175,7 +173,7 @@ static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
ah->totalAdcDcOffsetQEvenPhase[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
ah->cal_samples, i,
ah->totalAdcDcOffsetIOddPhase[i],
@@ -198,12 +196,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting IQ Cal and Correction for Chain %d\n",
i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -213,12 +211,11 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
qCoffDenom = powerMeasQ / 64;
@@ -227,13 +224,13 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
(qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
iCoff = iCoff & 0x3f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"New: Chn %d iCoff = 0x%08x\n", i, iCoff);
if (iqCorrNeg == 0x0)
iCoff = 0x40 - iCoff;
@@ -243,7 +240,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
else if (qCoff <= -16)
qCoff = -16;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
i, iCoff, qCoff);
@@ -253,7 +250,7 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction done for Chain %d\n",
i);
}
@@ -275,21 +272,17 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcQOddPhase[i];
qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting ADC Gain Cal for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
- iOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = 0x%08x\n", i,
- iEvenMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
- qOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = 0x%08x\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = 0x%08x\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = 0x%08x\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = 0x%08x\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = 0x%08x\n",
+ i, qEvenMeasOffset);
if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
iGainMismatch =
@@ -299,19 +292,19 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
((qOddMeasOffset * 32) /
qEvenMeasOffset) & 0x3f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_i = 0x%08x\n", i,
- iGainMismatch);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d gain_mismatch_q = 0x%08x\n", i,
- qGainMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_i = 0x%08x\n",
+ i, iGainMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d gain_mismatch_q = 0x%08x\n",
+ i, qGainMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xfffff000;
val |= (qGainMismatch) | (iGainMismatch << 6);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"ADC Gain Cal done for Chain %d\n", i);
}
}
@@ -337,40 +330,36 @@ static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Starting ADC DC Offset Cal for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_i = %d\n", i,
- iOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_i = %d\n", i,
- iEvenMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_odd_q = %d\n", i,
- qOddMeasOffset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_even_q = %d\n", i,
- qEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_i = %d\n",
+ i, iOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_i = %d\n",
+ i, iEvenMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_odd_q = %d\n",
+ i, qOddMeasOffset);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_even_q = %d\n",
+ i, qEvenMeasOffset);
iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
numSamples) & 0x1ff;
qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
numSamples) & 0x1ff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
- iDcMismatch);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
- qDcMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_i = 0x%08x\n",
+ i, iDcMismatch);
+ ath_dbg(common, CALIBRATE,
+ "Chn %d dc_offset_mismatch_q = 0x%08x\n",
+ i, qDcMismatch);
val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
val &= 0xc0000fff;
val |= (qDcMismatch << 12) | (iDcMismatch << 21);
REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"ADC DC Offset Cal done for Chain %d\n", i);
}
@@ -560,7 +549,7 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
{ 0x7838, 0 },
};
- ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
+ ath_dbg(common, CALIBRATE, "Running PA Calibration\n");
/* PA CAL is not needed for high power solution */
if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -741,7 +730,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -755,7 +744,7 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -851,7 +840,7 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -886,22 +875,21 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
if (ar9002_hw_is_cal_supported(ah, chan, ADC_GAIN_CAL)) {
INIT_CAL(&ah->adcgain_caldata);
INSERT_CAL(ah, &ah->adcgain_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling ADC Gain Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC Gain Calibration\n");
}
if (ar9002_hw_is_cal_supported(ah, chan, ADC_DC_CAL)) {
INIT_CAL(&ah->adcdc_caldata);
INSERT_CAL(ah, &ah->adcdc_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling ADC DC Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling ADC DC Calibration\n");
}
if (ar9002_hw_is_cal_supported(ah, chan, IQ_MISMATCH_CAL)) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
}
ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 11f192a..d190411 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -180,6 +180,25 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
ARRAY_SIZE(ar5416Addac), 2);
}
+
+ /* iniAddac needs to be modified for these chips */
+ if (AR_SREV_9160(ah) || !AR_SREV_5416_22_OR_LATER(ah)) {
+ struct ar5416IniArray *addac = &ah->iniAddac;
+ u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
+ u32 *data;
+
+ data = kmalloc(size, GFP_KERNEL);
+ if (!data)
+ return;
+
+ memcpy(data, addac->ia_array, size);
+ addac->ia_array = data;
+
+ if (!AR_SREV_5416_22_OR_LATER(ah)) {
+ /* override CLKDRV value */
+ INI_RA(addac, 31,1) = 0;
+ }
+ }
}
/* Support for Japan ch.14 (2484) spread */
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index b592016..7b6417b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -107,7 +107,7 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (isr & AR_ISR_RXORN) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"receive FIFO overrun interrupt\n");
}
@@ -143,24 +143,24 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if (fatal_int) {
if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"received PCI FATAL interrupt\n");
}
if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"received PCI PERR interrupt\n");
}
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
REG_WRITE(ah, AR_RC, 0);
*masked |= ATH9K_INT_FATAL;
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 12a730d..8e70f0b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,7 @@
#include "hw-ops.h"
#include "ar9003_phy.h"
#include "ar9003_rtt.h"
+#include "ar9003_mci.h"
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
@@ -51,7 +52,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
currCal->calData->calCountMax);
REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting IQ Mismatch Calibration\n");
/* Kick-off cal */
@@ -63,7 +64,7 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
AR_PHY_65NM_CH0_THERM_START, 1);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"starting Temperature Compensation Calibration\n");
break;
}
@@ -193,7 +194,7 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
ah->totalIqCorrMeas[i] +=
(int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
ah->cal_samples, i, ah->totalPowerMeasI[i],
ah->totalPowerMeasQ[i],
@@ -220,12 +221,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
powerMeasQ = ah->totalPowerMeasQ[i];
iqCorrMeas = ah->totalIqCorrMeas[i];
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Starting IQ Cal and Correction for Chain %d\n",
- i);
+ ath_dbg(common, CALIBRATE,
+ "Starting IQ Cal and Correction for Chain %d\n", i);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Orignal: Chn %diq_corr_meas = 0x%08x\n",
+ ath_dbg(common, CALIBRATE,
+ "Original: Chn %d iq_corr_meas = 0x%08x\n",
i, ah->totalIqCorrMeas[i]);
iqCorrNeg = 0;
@@ -235,12 +235,11 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iqCorrNeg = 1;
}
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
- ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
- iqCorrNeg);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n",
+ i, powerMeasI);
+ ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n",
+ i, powerMeasQ);
+ ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg);
iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
qCoffDenom = powerMeasQ / 64;
@@ -248,10 +247,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
iCoff = iqCorrMeas / iCoffDenom;
qCoff = powerMeasI / qCoffDenom - 64;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d iCoff = 0x%08x\n", i, iCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Chn %d qCoff = 0x%08x\n", i, qCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n",
+ i, iCoff);
+ ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n",
+ i, qCoff);
/* Force bounds on iCoff */
if (iCoff >= 63)
@@ -272,10 +271,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
iCoff = iCoff & 0x7f;
qCoff = qCoff & 0x7f;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
i, iCoff, qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) before update = 0x%x\n",
offset_array[i],
REG_READ(ah, offset_array[i]));
@@ -286,25 +285,25 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
REG_RMW_FIELD(ah, offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
qCoff);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n",
offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
REG_READ(ah, offset_array[i]));
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n",
offset_array[i],
AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
REG_READ(ah, offset_array[i]));
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction done for Chain %d\n", i);
}
}
REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n",
(unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
@@ -348,7 +347,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
f2 = (f1 * f1 + f3 * f3) / result_shift;
if (!f2) {
- ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
+ ath_dbg(common, CALIBRATE, "Divide by 0\n");
return false;
}
@@ -469,7 +468,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
(i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0:\n"
"a0_d0=%d\n"
"a0_d1=%d\n"
@@ -509,8 +508,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
if ((mag1 == 0) || (mag2 == 0)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Divide by 0: mag1=%d, mag2=%d\n",
+ ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n",
mag1, mag2);
return false;
}
@@ -528,8 +526,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_a0_d0, phs_a0_d0,
mag_a1_d0,
phs_a1_d0, solved_eq)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Call to ar9003_hw_solve_iq_cal() failed.\n");
+ ath_dbg(common, CALIBRATE,
+ "Call to ar9003_hw_solve_iq_cal() failed\n");
return false;
}
@@ -538,12 +536,12 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
mag_rx = solved_eq[2];
phs_rx = solved_eq[3];
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"chain %d: mag mismatch=%d phase mismatch=%d\n",
chain_idx, mag_tx/res_scale, phs_tx/res_scale);
if (res_scale == mag_tx) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0: mag_tx=%d, res_scale=%d\n",
mag_tx, res_scale);
return false;
@@ -556,8 +554,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_tx * 128 / res_scale);
q_i_coff = (phs_corr_tx * 256 / res_scale);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "tx chain %d: mag corr=%d phase corr=%d\n",
+ ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d phase corr=%d\n",
chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
@@ -571,12 +568,11 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "tx chain %d: iq corr coeff=%x\n",
+ ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
if (-mag_rx == res_scale) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Divide by 0: mag_rx=%d, res_scale=%d\n",
mag_rx, res_scale);
return false;
@@ -589,8 +585,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
q_q_coff = (mag_corr_rx * 128 / res_scale);
q_i_coff = (phs_corr_rx * 256 / res_scale);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "rx chain %d: mag corr=%d phase corr=%d\n",
+ ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d phase corr=%d\n",
chain_idx, q_q_coff, q_i_coff);
if (q_i_coff < -63)
@@ -604,8 +599,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "rx chain %d: iq corr coeff=%x\n",
+ ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
return true;
@@ -752,8 +746,7 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
AR_PHY_TX_IQCAL_START_DO_CAL, 0,
AH_WAIT_TIMEOUT)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal is not completed.\n");
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n");
return false;
}
return true;
@@ -791,13 +784,13 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
nmeasurement = MAX_MEASUREMENT;
for (im = 0; im < nmeasurement; im++) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Doing Tx IQ Cal for chain %d.\n", i);
+ ath_dbg(common, CALIBRATE,
+ "Doing Tx IQ Cal for chain %d\n", i);
if (REG_READ(ah, txiqcal_status[i]) &
AR_PHY_TX_IQCAL_STATUS_FAILED) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Tx IQ Cal failed for chain %d.\n", i);
+ ath_dbg(common, CALIBRATE,
+ "Tx IQ Cal failed for chain %d\n", i);
goto tx_iqcal_fail;
}
@@ -823,18 +816,16 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
iq_res[idx + 1] = 0xffff & REG_READ(ah,
chan_info_tab[i] + offset);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "IQ RES[%d]=0x%x"
- "IQ_RES[%d]=0x%x\n",
+ ath_dbg(common, CALIBRATE,
+ "IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
idx, iq_res[idx], idx + 1,
iq_res[idx + 1]);
}
if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
coeff.iqc_coeff)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Failed in calculation of \
- IQ correction.\n");
+ ath_dbg(common, CALIBRATE,
+ "Failed in calculation of IQ correction\n");
goto tx_iqcal_fail;
}
@@ -854,7 +845,7 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
return;
tx_iqcal_fail:
- ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
+ ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n");
return;
}
@@ -934,10 +925,12 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
bool txiqcal_done = false, txclcal_done = false;
bool is_reusable = true, status = true;
bool run_rtt_cal = false, run_agc_cal;
bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT);
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL |
AR_PHY_AGC_CONTROL_FLTR_CAL |
AR_PHY_AGC_CONTROL_PKDET_CAL;
@@ -950,7 +943,7 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
if (!ar9003_hw_rtt_restore(ah, chan))
run_rtt_cal = true;
- ath_dbg(common, ATH_DBG_CALIBRATE, "RTT restore %s\n",
+ ath_dbg(common, CALIBRATE, "RTT restore %s\n",
run_rtt_cal ? "failed" : "succeed");
}
run_agc_cal = run_rtt_cal;
@@ -1005,6 +998,31 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
} else if (caldata && !caldata->done_txiqcal_once)
run_agc_cal = true;
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ /* send CAL_REQ only when BT is AWAKE. */
+ ath_dbg(common, MCI, "MCI send WLAN_CAL_REQ 0x%x\n",
+ mci_hw->wlan_cal_seq);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_REQ);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_seq++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+
+ /* Wait BT_CAL_GRANT for 50ms */
+ ath_dbg(common, MCI, "MCI wait for BT_CAL_GRANT\n");
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_GRANT, 0, 50000))
+ ath_dbg(common, MCI, "MCI got BT_CAL_GRANT\n");
+ else {
+ is_reusable = false;
+ ath_dbg(common, MCI, "\nMCI BT is not responding\n");
+ }
+ }
+
txiqcal_done = ar9003_hw_tx_iq_cal_run(ah);
REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
udelay(5);
@@ -1022,6 +1040,21 @@ skip_tx_iqcal:
AR_PHY_AGC_CONTROL_CAL,
0, AH_WAIT_TIMEOUT);
}
+
+ if (mci && IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_AWAKE) &&
+ run_agc_cal &&
+ !(mci_hw->config & ATH_MCI_CONFIG_DISABLE_MCI_CAL)) {
+
+ u32 pld[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI Send WLAN_CAL_DONE 0x%x\n",
+ mci_hw->wlan_cal_done);
+ MCI_GPM_SET_CAL_TYPE(pld, MCI_GPM_WLAN_CAL_DONE);
+ pld[MCI_GPM_WLAN_CAL_W_SEQUENCE] = mci_hw->wlan_cal_done++;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16, true, false);
+ }
+
if (rtt && !run_rtt_cal) {
agc_ctrl |= agc_supp_cals;
REG_WRITE(ah, AR_PHY_AGC_CONTROL, agc_ctrl);
@@ -1031,9 +1064,8 @@ skip_tx_iqcal:
if (run_rtt_cal)
ar9003_hw_rtt_disable(ah);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "offset calibration failed to complete in 1ms;"
- "noisy environment?\n");
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in 1ms; noisy environment?\n");
return false;
}
@@ -1092,15 +1124,14 @@ skip_tx_iqcal:
if (ah->supp_cals & IQ_MISMATCH_CAL) {
INIT_CAL(&ah->iq_caldata);
INSERT_CAL(ah, &ah->iq_caldata);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling IQ Calibration.\n");
+ ath_dbg(common, CALIBRATE, "enabling IQ Calibration\n");
}
if (ah->supp_cals & TEMP_COMP_CAL) {
INIT_CAL(&ah->tempCompCalData);
INSERT_CAL(ah, &ah->tempCompCalData);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "enabling Temperature Compensation Calibration.\n");
+ ath_dbg(common, CALIBRATE,
+ "enabling Temperature Compensation Calibration\n");
}
/* Initialize current pointer to first element in list */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 3b262ba..9fbcbdd 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -121,10 +121,8 @@ static const struct ar9300_eeprom ar9300_default = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -144,7 +142,7 @@ static const struct ar9300_eeprom ar9300_default = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -323,10 +321,8 @@ static const struct ar9300_eeprom ar9300_default = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -698,10 +694,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -721,7 +715,7 @@ static const struct ar9300_eeprom ar9300_x113 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -900,10 +894,8 @@ static const struct ar9300_eeprom ar9300_x113 = {
.spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0xf,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1276,10 +1268,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1291,20 +1281,20 @@ static const struct ar9300_eeprom ar9300_h112 = {
.txEndToRxOn = 0x2,
.txFrameToXpaOn = 0xe,
.thresh62 = 28,
- .papdRateMaskHt20 = LE32(0x80c080),
- .papdRateMaskHt40 = LE32(0x80c080),
+ .papdRateMaskHt20 = LE32(0x0c80c080),
+ .papdRateMaskHt40 = LE32(0x0080c080),
.futureModal = {
0, 0, 0, 0, 0, 0, 0, 0,
},
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
- FREQ2FBIN(2472, 1),
+ FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@@ -1314,7 +1304,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
},
.calTarget_freqbin_Cck = {
FREQ2FBIN(2412, 1),
- FREQ2FBIN(2484, 1),
+ FREQ2FBIN(2472, 1),
},
.calTarget_freqbin_2G = {
FREQ2FBIN(2412, 1),
@@ -1478,10 +1468,8 @@ static const struct ar9300_eeprom ar9300_h112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1515,7 +1503,7 @@ static const struct ar9300_eeprom ar9300_h112 = {
FREQ2FBIN(5500, 0),
FREQ2FBIN(5600, 0),
FREQ2FBIN(5700, 0),
- FREQ2FBIN(5825, 0)
+ FREQ2FBIN(5785, 0)
},
.calPierData5G = {
{
@@ -1854,10 +1842,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -1877,7 +1863,7 @@ static const struct ar9300_eeprom ar9300_x112 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
@@ -2056,10 +2042,8 @@ static const struct ar9300_eeprom ar9300_x112 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshch check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2431,10 +2415,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
* if the register is per chain
*/
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {1, 1, 1},/* 3 chain */
- .db_stage2 = {1, 1, 1}, /* 3 chain */
- .db_stage3 = {0, 0, 0},
- .db_stage4 = {0, 0, 0},
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2454,12 +2436,12 @@ static const struct ar9300_eeprom ar9300_h116 = {
},
.base_ext1 = {
.ant_div_control = 0,
- .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+ .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
},
.calFreqPier2G = {
FREQ2FBIN(2412, 1),
FREQ2FBIN(2437, 1),
- FREQ2FBIN(2472, 1),
+ FREQ2FBIN(2462, 1),
},
/* ar9300_cal_data_per_freq_op_loop 2g */
.calPierData2G = {
@@ -2633,10 +2615,8 @@ static const struct ar9300_eeprom ar9300_h116 = {
.spurChans = {0, 0, 0, 0, 0},
/* noiseFloorThreshCh Check if the register is per chain */
.noiseFloorThreshCh = {-1, 0, 0},
- .ob = {3, 3, 3}, /* 3 chain */
- .db_stage2 = {3, 3, 3}, /* 3 chain */
- .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
- .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
+ .reserved = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+ .quick_drop = 0,
.xpaBiasLvl = 0,
.txFrameToDataStart = 0x0e,
.txFrameToPaOn = 0x0e,
@@ -2663,7 +2643,7 @@ static const struct ar9300_eeprom ar9300_h116 = {
.xatten1MarginHigh = {0, 0, 0}
},
.calFreqPier5G = {
- FREQ2FBIN(5180, 0),
+ FREQ2FBIN(5160, 0),
FREQ2FBIN(5220, 0),
FREQ2FBIN(5320, 0),
FREQ2FBIN(5400, 0),
@@ -3023,6 +3003,8 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
return eep->modalHeader5G.antennaGain;
case EEP_ANTENNA_GAIN_2G:
return eep->modalHeader2G.antennaGain;
+ case EEP_QUICK_DROP:
+ return pBase->miscConfiguration & BIT(1);
default:
return 0;
}
@@ -3061,8 +3043,7 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
int i;
if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "eeprom address not in range\n");
+ ath_dbg(common, EEPROM, "eeprom address not in range\n");
return false;
}
@@ -3093,8 +3074,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
return true;
error:
- ath_dbg(common, ATH_DBG_EEPROM,
- "unable to read eeprom region at offset %d\n", address);
+ ath_dbg(common, EEPROM, "unable to read eeprom region at offset %d\n",
+ address);
return false;
}
@@ -3178,13 +3159,13 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
length &= 0xff;
if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Restore at %d: spot=%d offset=%d length=%d\n",
it, spot, offset, length);
memcpy(&mptr[spot], &block[it+2], length);
spot += length;
} else if (length > 0) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Bad restore at %d: spot=%d offset=%d length=%d\n",
it, spot, offset, length);
return false;
@@ -3206,13 +3187,13 @@ static int ar9300_compress_decision(struct ath_hw *ah,
switch (code) {
case _CompressNone:
if (length != mdata_size) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM structure size mismatch memory=%d eeprom=%d\n",
mdata_size, length);
return -1;
}
memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"restored eeprom %d: uncompressed, length %d\n",
it, length);
break;
@@ -3221,22 +3202,21 @@ static int ar9300_compress_decision(struct ath_hw *ah,
} else {
eep = ar9003_eeprom_struct_find_by_id(reference);
if (eep == NULL) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"can't find reference eeprom struct %d\n",
reference);
return -1;
}
memcpy(mptr, eep, mdata_size);
}
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"restore eeprom %d: block, reference %d, length %d\n",
it, reference, length);
ar9300_uncompress_block(ah, mptr, mdata_size,
(u8 *) (word + COMP_HDR_LEN), length);
break;
default:
- ath_dbg(common, ATH_DBG_EEPROM,
- "unknown compression code %d\n", code);
+ ath_dbg(common, EEPROM, "unknown compression code %d\n", code);
return -1;
}
return 0;
@@ -3312,34 +3292,32 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
cptr = AR9300_BASE_ADDR_512;
else
cptr = AR9300_BASE_ADDR;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying EEPROM access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
cptr = AR9300_BASE_ADDR_512;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying EEPROM access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying EEPROM access at Address 0x%04x\n",
+ cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
read = ar9300_read_otp;
cptr = AR9300_BASE_ADDR;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying OTP access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
cptr = AR9300_BASE_ADDR_512;
- ath_dbg(common, ATH_DBG_EEPROM,
- "Trying OTP access at Address 0x%04x\n", cptr);
+ ath_dbg(common, EEPROM, "Trying OTP access at Address 0x%04x\n", cptr);
if (ar9300_check_eeprom_header(ah, read, cptr))
goto found;
goto fail;
found:
- ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n");
+ ath_dbg(common, EEPROM, "Found valid EEPROM data\n");
for (it = 0; it < MSTATE; it++) {
if (!read(ah, cptr, word, COMP_HDR_LEN))
@@ -3350,13 +3328,12 @@ found:
ar9300_comp_hdr_unpack(word, &code, &reference,
&length, &major, &minor);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
cptr, code, reference, length, major, minor);
if ((!AR_SREV_9485(ah) && length >= 1024) ||
(AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Skipping bad header\n");
+ ath_dbg(common, EEPROM, "Skipping bad header\n");
cptr -= COMP_HDR_LEN;
continue;
}
@@ -3365,13 +3342,13 @@ found:
read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
mchecksum = get_unaligned_le16(&word[COMP_HDR_LEN + osize]);
- ath_dbg(common, ATH_DBG_EEPROM,
- "checksum %x %x\n", checksum, mchecksum);
+ ath_dbg(common, EEPROM, "checksum %x %x\n",
+ checksum, mchecksum);
if (checksum == mchecksum) {
ar9300_compress_decision(ah, it, code, reference, mptr,
word, length, mdata_size);
} else {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"skipping block with bad checksum\n");
}
cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
@@ -3428,25 +3405,14 @@ static u32 ar9003_dump_modal_eeprom(char *buf, u32 len, u32 size,
PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]);
PR_EEP("Chain1 NF Threshold", modal_hdr->noiseFloorThreshCh[1]);
PR_EEP("Chain2 NF Threshold", modal_hdr->noiseFloorThreshCh[2]);
+ PR_EEP("Quick Drop", modal_hdr->quick_drop);
+ PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff);
PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl);
PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart);
PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn);
PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn);
PR_EEP("txClip", modal_hdr->txClip);
PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize);
- PR_EEP("Chain0 ob", modal_hdr->ob[0]);
- PR_EEP("Chain1 ob", modal_hdr->ob[1]);
- PR_EEP("Chain2 ob", modal_hdr->ob[2]);
-
- PR_EEP("Chain0 db_stage2", modal_hdr->db_stage2[0]);
- PR_EEP("Chain1 db_stage2", modal_hdr->db_stage2[1]);
- PR_EEP("Chain2 db_stage2", modal_hdr->db_stage2[2]);
- PR_EEP("Chain0 db_stage3", modal_hdr->db_stage3[0]);
- PR_EEP("Chain1 db_stage3", modal_hdr->db_stage3[1]);
- PR_EEP("Chain2 db_stage3", modal_hdr->db_stage3[2]);
- PR_EEP("Chain0 db_stage4", modal_hdr->db_stage4[0]);
- PR_EEP("Chain1 db_stage4", modal_hdr->db_stage4[1]);
- PR_EEP("Chain2 db_stage4", modal_hdr->db_stage4[2]);
return len;
}
@@ -3503,6 +3469,7 @@ static u32 ath9k_hw_ar9003_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr,
PR_EEP("Internal regulator", !!(pBase->featureEnable & BIT(4)));
PR_EEP("Enable Paprd", !!(pBase->featureEnable & BIT(5)));
PR_EEP("Driver Strength", !!(pBase->miscConfiguration & BIT(0)));
+ PR_EEP("Quick Drop", !!(pBase->miscConfiguration & BIT(1)));
PR_EEP("Chain mask Reduce", (pBase->miscConfiguration >> 0x3) & 0x1);
PR_EEP("Write enable Gpio", pBase->eepromWriteEnableGpio);
PR_EEP("WLAN Disable Gpio", pBase->wlanDisableGpio);
@@ -3571,13 +3538,13 @@ static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
static u16 ar9003_switch_com_spdt_get(struct ath_hw *ah, bool is_2ghz)
{
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
- __le32 val;
+ __le16 val;
if (is_2ghz)
val = eep->modalHeader2G.switchcomspdt;
else
val = eep->modalHeader5G.switchcomspdt;
- return le32_to_cpu(val);
+ return le16_to_cpu(val);
}
@@ -3965,6 +3932,40 @@ static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
}
}
+static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ int quick_drop = ath9k_hw_ar9300_get_eeprom(ah, EEP_QUICK_DROP);
+ s32 t[3], f[3] = {5180, 5500, 5785};
+
+ if (!quick_drop)
+ return;
+
+ if (freq < 4000)
+ quick_drop = eep->modalHeader2G.quick_drop;
+ else {
+ t[0] = eep->base_ext1.quick_drop_low;
+ t[1] = eep->modalHeader5G.quick_drop;
+ t[2] = eep->base_ext1.quick_drop_high;
+ quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
+}
+
+static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, u16 freq)
+{
+ struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
+ u32 value;
+
+ value = (freq < 4000) ? eep->modalHeader2G.txEndToXpaOff :
+ eep->modalHeader5G.txEndToXpaOff;
+
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAB_OFF, value);
+ REG_RMW_FIELD(ah, AR_PHY_XPA_TIMING_CTL,
+ AR_PHY_XPA_TIMING_CTL_TX_END_XPAA_OFF, value);
+}
+
static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
struct ath9k_channel *chan)
{
@@ -3972,10 +3973,12 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
ar9003_hw_drive_strength_apply(ah);
ar9003_hw_atten_apply(ah, chan);
+ ar9003_hw_quick_drop_apply(ah, chan->channel);
if (!AR_SREV_9330(ah) && !AR_SREV_9340(ah))
ar9003_hw_internal_regulator_apply(ah);
if (AR_SREV_9485(ah) || AR_SREV_9330(ah) || AR_SREV_9340(ah))
ar9003_hw_apply_tuning_caps(ah);
+ ar9003_hw_txend_to_xpa_off_apply(ah, chan->channel);
}
static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -4416,8 +4419,8 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
is2GHz) + ht40PowerIncForPdadc;
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
}
}
@@ -4436,7 +4439,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
if (ichain >= AR9300_MAX_CHAINS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid chain index, must be less than %d\n",
AR9300_MAX_CHAINS);
return -1;
@@ -4444,7 +4447,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
if (mode) { /* 5GHz */
if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid 5GHz cal pier index, must be less than %d\n",
AR9300_NUM_5G_CAL_PIERS);
return -1;
@@ -4454,7 +4457,7 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
is2GHz = 0;
} else {
if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Invalid 2GHz cal pier index, must be less than %d\n",
AR9300_NUM_2G_CAL_PIERS);
return -1;
@@ -4616,8 +4619,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
/* interpolate */
for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "ch=%d f=%d low=%d %d h=%d %d\n",
+ ath_dbg(common, EEPROM, "ch=%d f=%d low=%d %d h=%d %d\n",
ichain, frequency, lfrequency[ichain],
lcorrection[ichain], hfrequency[ichain],
hcorrection[ichain]);
@@ -4672,7 +4674,7 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
ar9003_hw_power_control_override(ah, frequency, correction, voltage,
temperature);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"for frequency=%d, calibration correction = %d %d %d\n",
frequency, correction[0], correction[1], correction[2]);
@@ -4771,7 +4773,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
{
struct ath_common *common = ath9k_hw_common(ah);
struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
u16 scaledPower = 0, minCtlPower;
static const u16 ctlModesFor11a[] = {
@@ -4858,7 +4860,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n",
ctlMode, numCtlModes, isHt40CtlMode,
(pCtlMode[ctlMode] & EXT_ADDITIVE));
@@ -4872,8 +4874,9 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
ctlNum = AR9300_NUM_CTLS_5G;
}
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
chan->channel);
@@ -4915,7 +4918,7 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
- ath_dbg(common, ATH_DBG_REGULATORY,
+ ath_dbg(common, REGULATORY,
"SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
scaledPower, minCtlPower);
@@ -5039,7 +5042,7 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
target_power_val_t2_eep[i]) >
paprd_scale_factor)) {
ah->paprd_ratemask &= ~(1 << i);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"paprd disabled for mcs %d\n", i);
}
}
@@ -5051,12 +5054,14 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
regulatory->max_power_level = targetPowerValT2[i];
}
+ ath9k_hw_update_regulatory_maxpower(ah);
+
if (test)
return;
for (i = 0; i < ar9300RateSize; i++) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
+ ath_dbg(common, EEPROM, "TPC[%02d] 0x%08x\n",
+ i, targetPowerValT2[i]);
}
ah->txpower_limit = regulatory->max_power_level;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 6335a86..bb223fe 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -216,10 +216,8 @@ struct ar9300_modal_eep_header {
u8 spurChans[AR_EEPROM_MODAL_SPURS];
/* 3 Check if the register is per chain */
int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
- u8 ob[AR9300_MAX_CHAINS];
- u8 db_stage2[AR9300_MAX_CHAINS];
- u8 db_stage3[AR9300_MAX_CHAINS];
- u8 db_stage4[AR9300_MAX_CHAINS];
+ u8 reserved[11];
+ int8_t quick_drop;
u8 xpaBiasLvl;
u8 txFrameToDataStart;
u8 txFrameToPaOn;
@@ -269,7 +267,9 @@ struct cal_ctl_data_5g {
struct ar9300_BaseExtension_1 {
u8 ant_div_control;
- u8 future[13];
+ u8 future[11];
+ int8_t quick_drop_low;
+ int8_t quick_drop_high;
} __packed;
struct ar9300_BaseExtension_2 {
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index ccde784..09b8c9d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -175,20 +175,24 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
u32 isr = 0;
u32 mask2 = 0;
struct ath9k_hw_capabilities *pCap = &ah->caps;
- u32 sync_cause = 0;
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 sync_cause = 0, async_cause;
- if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
+ async_cause = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
+
+ if (async_cause & (AR_INTR_MAC_IRQ | AR_INTR_ASYNC_MASK_MCI)) {
if ((REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M)
== AR_RTC_STATUS_ON)
isr = REG_READ(ah, AR_ISR);
}
+
sync_cause = REG_READ(ah, AR_INTR_SYNC_CAUSE) & AR_INTR_SYNC_DEFAULT;
*masked = 0;
- if (!isr && !sync_cause)
+ if (!isr && !sync_cause && !async_cause)
return false;
if (isr) {
@@ -294,6 +298,33 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
ar9003_hw_bb_watchdog_read(ah);
}
+ if (async_cause & AR_INTR_ASYNC_MASK_MCI) {
+ u32 raw_intr, rx_msg_intr;
+
+ rx_msg_intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ raw_intr = REG_READ(ah, AR_MCI_INTERRUPT_RAW);
+
+ if ((raw_intr == 0xdeadbeef) || (rx_msg_intr == 0xdeadbeef))
+ ath_dbg(common, MCI,
+ "MCI gets 0xdeadbeef during MCI int processing new raw_intr=0x%08x, new rx_msg_raw=0x%08x, raw_intr=0x%08x, rx_msg_raw=0x%08x\n",
+ raw_intr, rx_msg_intr, mci->raw_intr,
+ mci->rx_msg_intr);
+ else {
+ mci->rx_msg_intr |= rx_msg_intr;
+ mci->raw_intr |= raw_intr;
+ *masked |= ATH9K_INT_MCI;
+
+ if (rx_msg_intr & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO)
+ mci->cont_status =
+ REG_READ(ah, AR_MCI_CONT_STATUS);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW, rx_msg_intr);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW, raw_intr);
+ ath_dbg(common, MCI, "AR_INTR_SYNC_MCI\n");
+
+ }
+ }
+
if (sync_cause) {
if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
@@ -302,7 +333,7 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
}
if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
- ath_dbg(common, ATH_DBG_INTERRUPT,
+ ath_dbg(common, INTERRUPT,
"AR_INTR_SYNC_LOCAL_TIMEOUT\n");
REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -333,7 +364,7 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
(MS(ads->ds_info, AR_TxRxDesc) != 1)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(ah), XMIT,
"Tx Descriptor error %x\n", ads->ds_info);
memset(ads, 0, sizeof(*ads));
return -EIO;
@@ -526,10 +557,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
rxs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (rxsp->status11 & AR_MichaelErr)
rxs->rs_status |= ATH9K_RXERR_MIC;
- if (rxsp->status11 & AR_KeyMiss)
- rxs->rs_status |= ATH9K_RXERR_KEYMISS;
}
+ if (rxsp->status11 & AR_KeyMiss)
+ rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
@@ -541,7 +573,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
memset((void *) ah->ts_ring, 0,
ah->ts_size * sizeof(struct ar9003_txs));
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(ah), XMIT,
"TS Start 0x%x End 0x%x Virt %p, Size %d\n",
ah->ts_paddr_start, ah->ts_paddr_end,
ah->ts_ring, ah->ts_size);
@@ -552,7 +584,7 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
u32 ts_paddr_start,
- u8 size)
+ u16 size)
{
ah->ts_paddr_start = ts_paddr_start;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index c504493..e203b51 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -118,5 +118,5 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah,
void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah);
void ath9k_hw_setup_statusring(struct ath_hw *ah, void *ts_start,
u32 ts_paddr_start,
- u8 size);
+ u16 size);
#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
new file mode 100644
index 0000000..709520c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c
@@ -0,0 +1,1493 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include "hw.h"
+#include "ar9003_phy.h"
+#include "ar9003_mci.h"
+
+static void ar9003_mci_reset_req_wakeup(struct ath_hw *ah)
+{
+ if (!AR_SREV_9462_20(ah))
+ return;
+
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 1);
+ udelay(1);
+ REG_RMW_FIELD(ah, AR_MCI_COMMAND2,
+ AR_MCI_COMMAND2_RESET_REQ_WAKEUP, 0);
+}
+
+static int ar9003_mci_wait_for_interrupt(struct ath_hw *ah, u32 address,
+ u32 bit_position, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ while (time_out) {
+
+ if (REG_READ(ah, address) & bit_position) {
+
+ REG_WRITE(ah, address, bit_position);
+
+ if (address == AR_MCI_INTERRUPT_RX_MSG_RAW) {
+
+ if (bit_position &
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+ ar9003_mci_reset_req_wakeup(ah);
+
+ if (bit_position &
+ (AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING |
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING))
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_RX_MSG);
+ }
+ break;
+ }
+
+ udelay(10);
+ time_out -= 10;
+
+ if (time_out < 0)
+ break;
+ }
+
+ if (time_out <= 0) {
+ ath_dbg(common, MCI,
+ "MCI Wait for Reg 0x%08x = 0x%08x timeout\n",
+ address, bit_position);
+ ath_dbg(common, MCI,
+ "MCI INT_RAW = 0x%08x, RX_MSG_RAW = 0x%08x\n",
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW),
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ time_out = 0;
+ }
+
+ return time_out;
+}
+
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload[4] = { 0xffffffff, 0xffffffff, 0xffffffff, 0xffffff00};
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0, payload, 16,
+ wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x00000000;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TRANS, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_req_wake(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_REQ_WAKE, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+ udelay(5);
+}
+
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_lna_take(struct ath_hw *ah, bool wait_done)
+{
+ u32 payload = 0x70000000;
+
+ ar9003_mci_send_message(ah, MCI_LNA_TAKE, 0, &payload, 1,
+ wait_done, false);
+}
+
+static void ar9003_mci_send_sys_sleeping(struct ath_hw *ah, bool wait_done)
+{
+ ar9003_mci_send_message(ah, MCI_SYS_SLEEPING,
+ MCI_FLAG_DISABLE_TIMESTAMP,
+ NULL, 0, wait_done, false);
+}
+
+static void ar9003_mci_send_coex_version_query(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (!mci->bt_version_known &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ ath_dbg(common, MCI, "MCI Send Coex version query\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_VERSION_QUERY);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ }
+}
+
+static void ar9003_mci_send_coex_version_response(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI Send Coex version response\n");
+ MCI_GPM_SET_TYPE_OPCODE(payload, MCI_GPM_COEX_AGENT,
+ MCI_GPM_COEX_VERSION_RESPONSE);
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MAJOR_VERSION) =
+ mci->wlan_ver_major;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_MINOR_VERSION) =
+ mci->wlan_ver_minor;
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+static void ar9003_mci_send_coex_wlan_channels(struct ath_hw *ah,
+ bool wait_done)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload = &mci->wlan_channels[0];
+
+ if ((mci->wlan_channels_update == true) &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_WLAN_CHANNELS);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true);
+ MCI_GPM_SET_TYPE_OPCODE(payload, 0xff, 0xff);
+ }
+}
+
+static void ar9003_mci_send_coex_bt_status_query(struct ath_hw *ah,
+ bool wait_done, u8 query_type)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+ bool query_btinfo = !!(query_type & (MCI_GPM_COEX_QUERY_BT_ALL_INFO |
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY));
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ath_dbg(common, MCI, "MCI Send Coex BT Status Query 0x%02X\n",
+ query_type);
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_STATUS_QUERY);
+
+ *(((u8 *)payload) + MCI_GPM_COEX_B_BT_BITMAP) = query_type;
+ /*
+ * If bt_status_query message is not sent successfully,
+ * then need_flush_btinfo should be set again.
+ */
+ if (!ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ wait_done, true)) {
+ if (query_btinfo) {
+ mci->need_flush_btinfo = true;
+
+ ath_dbg(common, MCI,
+ "MCI send bt_status_query fail, set flush flag again\n");
+ }
+ }
+
+ if (query_btinfo)
+ mci->query_bt = false;
+ }
+}
+
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 payload[4] = {0, 0, 0, 0};
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ath_dbg(common, MCI, "MCI Send Coex %s BT GPM\n",
+ (halt) ? "halt" : "unhalt");
+
+ MCI_GPM_SET_TYPE_OPCODE(payload,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_HALT_BT_GPM);
+
+ if (halt) {
+ mci->query_bt = true;
+ /* Send next unhalt no matter halt sent or not */
+ mci->unhalt_bt_gpm = true;
+ mci->need_flush_btinfo = true;
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_HALT;
+ } else
+ *(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) =
+ MCI_GPM_COEX_BT_GPM_UNHALT;
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16, wait_done, true);
+}
+
+
+static void ar9003_mci_prep_interface(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 saved_mci_int_en;
+ u32 mci_timeout = 150;
+
+ mci->bt_state = MCI_BT_SLEEP;
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW));
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ REG_READ(ah, AR_MCI_INTERRUPT_RAW));
+
+ /* Remote Reset */
+ ath_dbg(common, MCI, "MCI Reset sequence start\n");
+ ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+
+ /*
+ * This delay is required for the reset delay worst case value 255 in
+ * MCI_COMMAND2 register
+ */
+
+ if (AR_SREV_9462_10(ah))
+ udelay(252);
+
+ ath_dbg(common, MCI, "MCI Send REQ_WAKE to remoter(BT)\n");
+ ar9003_mci_send_req_wake(ah, true);
+
+ if (ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING, 500)) {
+
+ ath_dbg(common, MCI, "MCI SYS_WAKING from remote(BT)\n");
+ mci->bt_state = MCI_BT_AWAKE;
+
+ if (AR_SREV_9462_10(ah))
+ udelay(10);
+ /*
+ * we don't need to send more remote_reset at this moment.
+ * If BT receive first remote_reset, then BT HW will
+ * be cleaned up and will be able to receive req_wake
+ * and BT HW will respond sys_waking.
+ * In this case, WLAN will receive BT's HW sys_waking.
+ * Otherwise, if BT SW missed initial remote_reset,
+ * that remote_reset will still clean up BT MCI RX,
+ * and the req_wake will wake BT up,
+ * and BT SW will respond this req_wake with a remote_reset and
+ * sys_waking. In this case, WLAN will receive BT's SW
+ * sys_waking. In either case, BT's RX is cleaned up. So we
+ * don't need to reply BT's remote_reset now, if any.
+ * Similarly, if in any case, WLAN can receive BT's sys_waking,
+ * that means WLAN's RX is also fine.
+ */
+
+ /* Send SYS_WAKING to BT */
+
+ ath_dbg(common, MCI, "MCI send SW SYS_WAKING to remote BT\n");
+
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(10);
+
+ /*
+ * Set BT priority interrupt value to be 0xff to
+ * avoid having too many BT PRIORITY interrupts.
+ */
+
+ REG_WRITE(ah, AR_MCI_BT_PRI0, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI1, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI2, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI3, 0xFFFFFFFF);
+ REG_WRITE(ah, AR_MCI_BT_PRI, 0X000000FF);
+
+ /*
+ * A contention reset will be received after send out
+ * sys_waking. Also BT priority interrupt bits will be set.
+ * Clear those bits before the next step.
+ */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_BT_PRI);
+
+ if (AR_SREV_9462_10(ah) || mci->is_2g) {
+ /* Send LNA_TRANS */
+ ath_dbg(common, MCI, "MCI send LNA_TRANS to BT\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+ }
+
+ if (AR_SREV_9462_10(ah) || (mci->is_2g &&
+ !mci->update_2g5g)) {
+ if (ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO,
+ mci_timeout))
+ ath_dbg(common, MCI,
+ "MCI WLAN has control over the LNA & BT obeys it\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT didn't respond to LNA_TRANS\n");
+ }
+
+ if (AR_SREV_9462_10(ah)) {
+ /* Send another remote_reset to deassert BT clk_req. */
+ ath_dbg(common, MCI,
+ "MCI another remote_reset to deassert clk_req\n");
+ ar9003_mci_remote_reset(ah, true);
+ udelay(252);
+ }
+ }
+
+ /* Clear the extra redundant SYS_WAKING from BT */
+ if ((mci->bt_state == MCI_BT_AWAKE) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING)) &&
+ (REG_READ_FIELD(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) == 0)) {
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE);
+ }
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+}
+
+void ar9003_mci_disable_interrupt(struct ath_hw *ah)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
+}
+
+void ar9003_mci_enable_interrupt(struct ath_hw *ah)
+{
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, AR_MCI_INTERRUPT_DEFAULT);
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN,
+ AR_MCI_INTERRUPT_RX_MSG_DEFAULT);
+}
+
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints)
+{
+ u32 intr;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return false;
+
+ intr = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_RAW);
+ return ((intr & ints) == ints);
+}
+
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ *raw_intr = mci->raw_intr;
+ *rx_msg_intr = mci->rx_msg_intr;
+
+ /* Clean int bits after the values are read. */
+ mci->raw_intr = 0;
+ mci->rx_msg_intr = 0;
+}
+EXPORT_SYMBOL(ar9003_mci_get_interrupt);
+
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ if (!mci->update_2g5g &&
+ (mci->is_2g != is_2g))
+ mci->update_2g5g = true;
+
+ mci->is_2g = is_2g;
+}
+
+static bool ar9003_mci_is_gpm_valid(struct ath_hw *ah, u32 msg_index)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *payload;
+ u32 recv_type, offset;
+
+ if (msg_index == MCI_GPM_INVALID)
+ return false;
+
+ offset = msg_index << 4;
+
+ payload = (u32 *)(mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(payload);
+
+ if (recv_type == MCI_GPM_RSVD_PATTERN) {
+ ath_dbg(common, MCI, "MCI Skip RSVD GPM\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void ar9003_mci_observation_set_up(struct ath_hw *ah)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ if (mci->config & ATH_MCI_CONFIG_MCI_OBS_MCI) {
+
+ ath9k_hw_cfg_output(ah, 3,
+ AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_TXRX) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 5, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+
+ } else if (mci->config & ATH_MCI_CONFIG_MCI_OBS_BT) {
+
+ ath9k_hw_cfg_output(ah, 3, AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX);
+ ath9k_hw_cfg_output(ah, 2, AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX);
+ ath9k_hw_cfg_output(ah, 1, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA);
+ ath9k_hw_cfg_output(ah, 0, AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK);
+
+ } else
+ return;
+
+ REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah)) {
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_DS_JTAG_DISABLE, 1);
+ REG_RMW_FIELD(ah, AR_PHY_GLB_CONTROL,
+ AR_GLB_WLAN_UART_INTF_EN, 0);
+ REG_SET_BIT(ah, AR_GLB_GPIO_CONTROL,
+ ATH_MCI_CONFIG_MCI_OBS_GPIO);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_GPIO_OBS_SEL, 0);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL, 1);
+ REG_WRITE(ah, AR_OBS, 0x4b);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL1, 0x03);
+ REG_RMW_FIELD(ah, AR_DIAG_SW, AR_DIAG_OBS_PT_SEL2, 0x01);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_LSB, 0x02);
+ REG_RMW_FIELD(ah, AR_MACMISC, AR_MACMISC_MISC_OBS_BUS_MSB, 0x03);
+ REG_RMW_FIELD(ah, AR_PHY_TEST_CTL_STATUS,
+ AR_PHY_TEST_CTL_DEBUGPORT_SEL, 0x07);
+}
+
+static bool ar9003_mci_send_coex_bt_flags(struct ath_hw *ah, bool wait_done,
+ u8 opcode, u32 bt_flags)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 pld[4] = {0, 0, 0, 0};
+
+ MCI_GPM_SET_TYPE_OPCODE(pld,
+ MCI_GPM_COEX_AGENT, MCI_GPM_COEX_BT_UPDATE_FLAGS);
+
+ *(((u8 *)pld) + MCI_GPM_COEX_B_BT_FLAGS_OP) = opcode;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 0) = bt_flags & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 1) = (bt_flags >> 8) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 2) = (bt_flags >> 16) & 0xFF;
+ *(((u8 *)pld) + MCI_GPM_COEX_W_BT_FLAGS + 3) = (bt_flags >> 24) & 0xFF;
+
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: Send Coex BT Update Flags %s 0x%08x\n",
+ opcode == MCI_GPM_COEX_BT_FLAGS_READ ? "READ" :
+ opcode == MCI_GPM_COEX_BT_FLAGS_SET ? "SET" : "CLEAR",
+ bt_flags);
+
+ return ar9003_mci_send_message(ah, MCI_GPM, 0, pld, 16,
+ wait_done, true);
+}
+
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 regval, thresh;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ath_dbg(common, MCI, "MCI full_sleep = %d, is_2g = %d\n",
+ is_full_sleep, is_2g);
+
+ /*
+ * GPM buffer and scheduling message buffer are not allocated
+ */
+
+ if (!mci->gpm_addr && !mci->sched_addr) {
+ ath_dbg(common, MCI,
+ "MCI GPM and schedule buffers are not allocated\n");
+ return;
+ }
+
+ if (REG_READ(ah, AR_BTCOEX_CTRL) == 0xdeadbeef) {
+ ath_dbg(common, MCI, "MCI it's deadbeef, quit mci_reset\n");
+ return;
+ }
+
+ /* Program MCI DMA related registers */
+ REG_WRITE(ah, AR_MCI_GPM_0, mci->gpm_addr);
+ REG_WRITE(ah, AR_MCI_GPM_1, mci->gpm_len);
+ REG_WRITE(ah, AR_MCI_SCHD_TABLE_0, mci->sched_addr);
+
+ /*
+ * To avoid MCI state machine be affected by incoming remote MCI msgs,
+ * MCI mode will be enabled later, right before reset the MCI TX and RX.
+ */
+
+ regval = SM(1, AR_BTCOEX_CTRL_AR9462_MODE) |
+ SM(1, AR_BTCOEX_CTRL_WBTIMER_EN) |
+ SM(1, AR_BTCOEX_CTRL_PA_SHARED) |
+ SM(1, AR_BTCOEX_CTRL_LNA_SHARED) |
+ SM(2, AR_BTCOEX_CTRL_NUM_ANTENNAS) |
+ SM(3, AR_BTCOEX_CTRL_RX_CHAIN_MASK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_ACK) |
+ SM(0, AR_BTCOEX_CTRL_1_CHAIN_BCN) |
+ SM(0, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+
+ if (is_2g && (AR_SREV_9462_20(ah)) &&
+ !(mci->config & ATH_MCI_CONFIG_DISABLE_OSLA)) {
+
+ regval |= SM(1, AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ ath_dbg(common, MCI, "MCI sched one step look ahead\n");
+
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_AGGR_THRESH)) {
+
+ thresh = MS(mci->config,
+ ATH_MCI_CONFIG_AGGR_THRESH);
+ thresh &= 7;
+ regval |= SM(1,
+ AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN);
+ regval |= SM(thresh, AR_BTCOEX_CTRL_AGGR_THRESH);
+
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_HW_BASED, 1);
+ REG_RMW_FIELD(ah, AR_MCI_SCHD_TABLE_2,
+ AR_MCI_SCHD_TABLE_2_MEM_BASED, 1);
+
+ } else
+ ath_dbg(common, MCI, "MCI sched aggr thresh: off\n");
+ } else
+ ath_dbg(common, MCI, "MCI SCHED one step look ahead off\n");
+
+ if (AR_SREV_9462_10(ah))
+ regval |= SM(1, AR_BTCOEX_CTRL_SPDT_ENABLE_10);
+
+ REG_WRITE(ah, AR_BTCOEX_CTRL, regval);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_SPDT_ENABLE);
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL3,
+ AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT, 20);
+ }
+
+ REG_RMW_FIELD(ah, AR_BTCOEX_CTRL2, AR_BTCOEX_CTRL2_RX_DEWEIGHT, 1);
+ REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
+
+ thresh = MS(mci->config, ATH_MCI_CONFIG_CLK_DIV);
+ REG_RMW_FIELD(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_CLK_DIV, thresh);
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL, AR_BTCOEX_CTRL_MCI_MODE_EN);
+
+ /* Resetting the Rx and Tx paths of MCI */
+ regval = REG_READ(ah, AR_MCI_COMMAND2);
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ udelay(1);
+
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_TX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ if (is_full_sleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(100);
+ }
+
+ regval |= SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+ udelay(1);
+ regval &= ~SM(1, AR_MCI_COMMAND2_RESET_RX);
+ REG_WRITE(ah, AR_MCI_COMMAND2, regval);
+
+ ar9003_mci_state(ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE,
+ (SM(0xe801, AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR) |
+ SM(0x0000, AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM)));
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20_OR_LATER(ah))
+ ar9003_mci_observation_set_up(ah);
+
+ mci->ready = true;
+ ar9003_mci_prep_interface(ah);
+
+ if (en_int)
+ ar9003_mci_enable_interrupt(ah);
+}
+
+void ar9003_mci_mute_bt(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /* disable all MCI messages */
+ REG_WRITE(ah, AR_MCI_MSG_ATTRIBUTES_TABLE, 0xffff0000);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS0, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS1, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS2, 0xffffffff);
+ REG_WRITE(ah, AR_BTCOEX_WL_WEIGHTS3, 0xffffffff);
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL, AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ /* wait pending HW messages to flush out */
+ udelay(10);
+
+ /*
+ * Send LNA_TAKE and SYS_SLEEPING when
+ * 1. reset not after resuming from full sleep
+ * 2. before reset MCI RX, to quiet BT and avoid MCI RX misalignment
+ */
+
+ ath_dbg(common, MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+
+ udelay(5);
+
+ ath_dbg(common, MCI, "MCI Send sys sleeping\n");
+ ar9003_mci_send_sys_sleeping(ah, true);
+}
+
+void ar9003_mci_sync_bt_state(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 cur_bt_state;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ cur_bt_state = ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL);
+
+ if (mci->bt_state != cur_bt_state) {
+ ath_dbg(common, MCI,
+ "MCI BT state mismatches. old: %d, new: %d\n",
+ mci->bt_state, cur_bt_state);
+ mci->bt_state = cur_bt_state;
+ }
+
+ if (mci->bt_state != MCI_BT_SLEEP) {
+
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm == true) {
+ ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+ }
+}
+
+static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 new_flags, to_set, to_clear;
+
+ if (AR_SREV_9462_20(ah) &&
+ mci->update_2g5g &&
+ (mci->bt_state != MCI_BT_SLEEP)) {
+
+ if (mci->is_2g) {
+ new_flags = MCI_2G_FLAGS;
+ to_clear = MCI_2G_FLAGS_CLEAR_MASK;
+ to_set = MCI_2G_FLAGS_SET_MASK;
+ } else {
+ new_flags = MCI_5G_FLAGS;
+ to_clear = MCI_5G_FLAGS_CLEAR_MASK;
+ to_set = MCI_5G_FLAGS_SET_MASK;
+ }
+
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: %s 0x%08x clr=0x%08x, set=0x%08x\n",
+ mci->is_2g ? "2G" : "5G", new_flags, to_clear, to_set);
+
+ if (to_clear)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR, to_clear);
+
+ if (to_set)
+ ar9003_mci_send_coex_bt_flags(ah, wait_done,
+ MCI_GPM_COEX_BT_FLAGS_SET, to_set);
+ }
+
+ if (AR_SREV_9462_10(ah) && (mci->bt_state != MCI_BT_SLEEP))
+ mci->update_2g5g = false;
+}
+
+static void ar9003_mci_queue_unsent_gpm(struct ath_hw *ah, u8 header,
+ u32 *payload, bool queue)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 type, opcode;
+
+ if (queue) {
+
+ if (payload)
+ ath_dbg(common, MCI,
+ "MCI ERROR: Send fail: %02x: %02x %02x %02x\n",
+ header,
+ *(((u8 *)payload) + 4),
+ *(((u8 *)payload) + 5),
+ *(((u8 *)payload) + 6));
+ else
+ ath_dbg(common, MCI, "MCI ERROR: Send fail: %02x\n",
+ header);
+ }
+
+ /* check if the message is to be queued */
+ if (header != MCI_GPM)
+ return;
+
+ type = MCI_GPM_TYPE(payload);
+ opcode = MCI_GPM_OPCODE(payload);
+
+ if (type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (opcode) {
+ case MCI_GPM_COEX_BT_UPDATE_FLAGS:
+
+ if (AR_SREV_9462_10(ah))
+ break;
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_BT_FLAGS_OP) ==
+ MCI_GPM_COEX_BT_FLAGS_READ)
+ break;
+
+ mci->update_2g5g = queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <queued> %s\n",
+ mci->is_2g ? "2G" : "5G");
+ else
+ ath_dbg(common, MCI,
+ "MCI BT_MCI_FLAGS: 2G5G status <sent> %s\n",
+ mci->is_2g ? "2G" : "5G");
+
+ break;
+
+ case MCI_GPM_COEX_WLAN_CHANNELS:
+
+ mci->wlan_channels_update = queue;
+ if (queue)
+ ath_dbg(common, MCI, "MCI WLAN channel map <queued>\n");
+ else
+ ath_dbg(common, MCI, "MCI WLAN channel map <sent>\n");
+ break;
+
+ case MCI_GPM_COEX_HALT_BT_GPM:
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_UNHALT) {
+
+ mci->unhalt_bt_gpm = queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <queued>\n");
+ else {
+ mci->halted_bt_gpm = false;
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+ }
+
+ if (*(((u8 *)payload) + MCI_GPM_COEX_B_HALT_STATE) ==
+ MCI_GPM_COEX_BT_GPM_HALT) {
+
+ mci->halted_bt_gpm = !queue;
+
+ if (queue)
+ ath_dbg(common, MCI,
+ "MCI HALT BT GPM <not sent>\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI UNHALT BT GPM <sent>\n");
+ }
+
+ break;
+ default:
+ break;
+ }
+}
+
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ if (mci->update_2g5g) {
+ if (mci->is_2g) {
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ ath_dbg(common, MCI, "MCI Send LNA trans\n");
+ ar9003_mci_send_lna_transfer(ah, true);
+ udelay(5);
+
+ REG_CLR_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_CLR_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ if (!(mci->config &
+ ATH_MCI_CONFIG_DISABLE_OSLA)) {
+ REG_SET_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+ }
+ } else {
+ ath_dbg(common, MCI, "MCI Send LNA take\n");
+ ar9003_mci_send_lna_take(ah, true);
+ udelay(5);
+
+ REG_SET_BIT(ah, AR_MCI_TX_CTRL,
+ AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE);
+
+ if (AR_SREV_9462_20(ah)) {
+ REG_SET_BIT(ah, AR_PHY_GLB_CONTROL,
+ AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL);
+ REG_CLR_BIT(ah, AR_BTCOEX_CTRL,
+ AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN);
+ }
+
+ ar9003_mci_send_2g5g_status(ah, true);
+ }
+ }
+}
+
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ bool msg_sent = false;
+ u32 regval;
+ u32 saved_mci_int_en;
+ int i;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return false;
+
+ saved_mci_int_en = REG_READ(ah, AR_MCI_INTERRUPT_EN);
+ regval = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((regval == 0xdeadbeef) || !(regval & AR_BTCOEX_CTRL_MCI_MODE_EN)) {
+
+ ath_dbg(common, MCI,
+ "MCI Not sending 0x%x. MCI is not enabled. full_sleep = %d\n",
+ header,
+ (ah->power_mode == ATH9K_PM_FULL_SLEEP) ? 1 : 0);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+
+ } else if (check_bt && (mci->bt_state == MCI_BT_SLEEP)) {
+
+ ath_dbg(common, MCI,
+ "MCI Don't send message 0x%x. BT is in sleep state\n",
+ header);
+
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ return false;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, 0);
+
+ /* Need to clear SW_MSG_DONE raw bit before wait */
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RAW,
+ (AR_MCI_INTERRUPT_SW_MSG_DONE |
+ AR_MCI_INTERRUPT_MSG_FAIL_MASK));
+
+ if (payload) {
+ for (i = 0; (i * 4) < len; i++)
+ REG_WRITE(ah, (AR_MCI_TX_PAYLOAD0 + i * 4),
+ *(payload + i));
+ }
+
+ REG_WRITE(ah, AR_MCI_COMMAND0,
+ (SM((flag & MCI_FLAG_DISABLE_TIMESTAMP),
+ AR_MCI_COMMAND0_DISABLE_TIMESTAMP) |
+ SM(len, AR_MCI_COMMAND0_LEN) |
+ SM(header, AR_MCI_COMMAND0_HEADER)));
+
+ if (wait_done &&
+ !(ar9003_mci_wait_for_interrupt(ah, AR_MCI_INTERRUPT_RAW,
+ AR_MCI_INTERRUPT_SW_MSG_DONE, 500)))
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, true);
+ else {
+ ar9003_mci_queue_unsent_gpm(ah, header, payload, false);
+ msg_sent = true;
+ }
+
+ if (wait_done)
+ REG_WRITE(ah, AR_MCI_INTERRUPT_EN, saved_mci_int_en);
+
+ return msg_sent;
+}
+EXPORT_SYMBOL(ar9003_mci_send_message);
+
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr)
+{
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ void *sched_buf = (void *)((char *) gpm_buf + (sched_addr - gpm_addr));
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ mci->gpm_addr = gpm_addr;
+ mci->gpm_buf = gpm_buf;
+ mci->gpm_len = len;
+ mci->sched_addr = sched_addr;
+ mci->sched_buf = sched_buf;
+
+ ar9003_mci_reset(ah, true, true, true);
+}
+EXPORT_SYMBOL(ar9003_mci_setup);
+
+void ar9003_mci_cleanup(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /* Turn off MCI and Jupiter mode. */
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0x00);
+ ath_dbg(common, MCI, "MCI ar9003_mci_cleanup\n");
+ ar9003_mci_disable_interrupt(ah);
+}
+EXPORT_SYMBOL(ar9003_mci_cleanup);
+
+static void ar9003_mci_process_gpm_extra(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, u32 *p_gpm)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u8 *p_data = (u8 *) p_gpm;
+
+ if (gpm_type != MCI_GPM_COEX_AGENT)
+ return;
+
+ switch (gpm_opcode) {
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ mci->bt_ver_major =
+ *(p_data + MCI_GPM_COEX_B_MAJOR_VERSION);
+ mci->bt_ver_minor =
+ *(p_data + MCI_GPM_COEX_B_MINOR_VERSION);
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ break;
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02X\n",
+ *(p_data + MCI_GPM_COEX_B_WLAN_BITMAP));
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ mci->query_bt = true;
+ ath_dbg(common, MCI, "MCI Recv GPM COEX BT_Profile_Info\n");
+ break;
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ mci->query_bt = true;
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update SEQ=%d (drop&query)\n",
+ *(p_gpm + 3));
+ break;
+ default:
+ break;
+ }
+}
+
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 *p_gpm = NULL, mismatch = 0, more_data;
+ u32 offset;
+ u8 recv_type = 0, recv_opcode = 0;
+ bool b_is_bt_cal_done = (gpm_type == MCI_GPM_BT_CAL_DONE);
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ more_data = time_out ? MCI_GPM_NOMORE : MCI_GPM_MORE;
+
+ while (time_out > 0) {
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (more_data != MCI_GPM_MORE)
+ time_out = ar9003_mci_wait_for_interrupt(ah,
+ AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM,
+ time_out);
+
+ if (!time_out)
+ break;
+
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ continue;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (MCI_GPM_IS_CAL_TYPE(recv_type)) {
+
+ if (recv_type == gpm_type) {
+
+ if ((gpm_type == MCI_GPM_BT_CAL_DONE) &&
+ !b_is_bt_cal_done) {
+ gpm_type = MCI_GPM_BT_CAL_GRANT;
+ ath_dbg(common, MCI,
+ "MCI Recv BT_CAL_DONE wait BT_CAL_GRANT\n");
+ continue;
+ }
+
+ break;
+ }
+ } else if ((recv_type == gpm_type) &&
+ (recv_opcode == gpm_opcode))
+ break;
+
+ /* not expected message */
+
+ /*
+ * check if it's cal_grant
+ *
+ * When we're waiting for cal_grant in reset routine,
+ * it's possible that BT sends out cal_request at the
+ * same time. Since BT's calibration doesn't happen
+ * that often, we'll let BT completes calibration then
+ * we continue to wait for cal_grant from BT.
+ * Orginal: Wait BT_CAL_GRANT.
+ * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
+ * BT_CAL_DONE -> Wait BT_CAL_GRANT.
+ */
+
+ if ((gpm_type == MCI_GPM_BT_CAL_GRANT) &&
+ (recv_type == MCI_GPM_BT_CAL_REQ)) {
+
+ u32 payload[4] = {0, 0, 0, 0};
+
+ gpm_type = MCI_GPM_BT_CAL_DONE;
+ ath_dbg(common, MCI,
+ "MCI Rcv BT_CAL_REQ, send WLAN_CAL_GRANT\n");
+
+ MCI_GPM_SET_CAL_TYPE(payload,
+ MCI_GPM_WLAN_CAL_GRANT);
+
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload, 16,
+ false, false);
+
+ ath_dbg(common, MCI, "MCI now wait for BT_CAL_DONE\n");
+
+ continue;
+ } else {
+ ath_dbg(common, MCI, "MCI GPM subtype not match 0x%x\n",
+ *(p_gpm + 1));
+ mismatch++;
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+ }
+ }
+ if (p_gpm) {
+ MCI_GPM_RECYCLE(p_gpm);
+ p_gpm = NULL;
+ }
+
+ if (time_out <= 0) {
+ time_out = 0;
+ ath_dbg(common, MCI,
+ "MCI GPM received timeout, mismatch = %d\n", mismatch);
+ } else
+ ath_dbg(common, MCI, "MCI Receive GPM type=0x%x, code=0x%x\n",
+ gpm_type, gpm_opcode);
+
+ while (more_data == MCI_GPM_MORE) {
+
+ ath_dbg(common, MCI, "MCI discard remaining GPM\n");
+ offset = ar9003_mci_state(ah, MCI_STATE_NEXT_GPM_OFFSET,
+ &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ p_gpm = (u32 *) (mci->gpm_buf + offset);
+ recv_type = MCI_GPM_TYPE(p_gpm);
+ recv_opcode = MCI_GPM_OPCODE(p_gpm);
+
+ if (!MCI_GPM_IS_CAL_TYPE(recv_type))
+ ar9003_mci_process_gpm_extra(ah, recv_type,
+ recv_opcode, p_gpm);
+
+ MCI_GPM_RECYCLE(p_gpm);
+ }
+
+ return time_out;
+}
+
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
+ u32 value = 0, more_gpm = 0, gpm_ptr;
+ u8 query_type;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ switch (state_type) {
+ case MCI_STATE_ENABLE:
+ if (mci->ready) {
+
+ value = REG_READ(ah, AR_BTCOEX_CTRL);
+
+ if ((value == 0xdeadbeef) || (value == 0xffffffff))
+ value = 0;
+ }
+ value &= AR_BTCOEX_CTRL_MCI_MODE_EN;
+ break;
+ case MCI_STATE_INIT_GPM_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ ath_dbg(common, MCI, "MCI GPM initial WRITE_PTR=%d\n", value);
+ mci->gpm_idx = value;
+ break;
+ case MCI_STATE_NEXT_GPM_OFFSET:
+ case MCI_STATE_LAST_GPM_OFFSET:
+ /*
+ * This could be useful to avoid new GPM message interrupt which
+ * may lead to spurious interrupt after power sleep, or multiple
+ * entry of ath_mci_intr().
+ * Adding empty GPM check by returning HAL_MCI_GPM_INVALID can
+ * alleviate this effect, but clearing GPM RX interrupt bit is
+ * safe, because whether this is called from hw or driver code
+ * there must be an interrupt bit set/triggered initially
+ */
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_GPM);
+
+ gpm_ptr = MS(REG_READ(ah, AR_MCI_GPM_1), AR_MCI_GPM_WRITE_PTR);
+ value = gpm_ptr;
+
+ if (value == 0)
+ value = mci->gpm_len - 1;
+ else if (value >= mci->gpm_len) {
+ if (value != 0xFFFF) {
+ value = 0;
+ ath_dbg(common, MCI,
+ "MCI GPM offset out of range\n");
+ }
+ } else
+ value--;
+
+ if (value == 0xFFFF) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+ ath_dbg(common, MCI,
+ "MCI GPM ptr invalid @ptr=%d, offset=%d, more=GPM_NOMORE\n",
+ gpm_ptr, value);
+ } else if (state_type == MCI_STATE_NEXT_GPM_OFFSET) {
+
+ if (gpm_ptr == mci->gpm_idx) {
+ value = MCI_GPM_INVALID;
+ more_gpm = MCI_GPM_NOMORE;
+
+ ath_dbg(common, MCI,
+ "MCI GPM message not available @ptr=%d, @offset=%d, more=GPM_NOMORE\n",
+ gpm_ptr, value);
+ } else {
+ for (;;) {
+
+ u32 temp_index;
+
+ /* skip reserved GPM if any */
+
+ if (value != mci->gpm_idx)
+ more_gpm = MCI_GPM_MORE;
+ else
+ more_gpm = MCI_GPM_NOMORE;
+
+ temp_index = mci->gpm_idx;
+ mci->gpm_idx++;
+
+ if (mci->gpm_idx >=
+ mci->gpm_len)
+ mci->gpm_idx = 0;
+
+ ath_dbg(common, MCI,
+ "MCI GPM message got ptr=%d, @offset=%d, more=%d\n",
+ gpm_ptr, temp_index,
+ (more_gpm == MCI_GPM_MORE));
+
+ if (ar9003_mci_is_gpm_valid(ah,
+ temp_index)) {
+ value = temp_index;
+ break;
+ }
+
+ if (more_gpm == MCI_GPM_NOMORE) {
+ value = MCI_GPM_INVALID;
+ break;
+ }
+ }
+ }
+ if (p_data)
+ *p_data = more_gpm;
+ }
+
+ if (value != MCI_GPM_INVALID)
+ value <<= 4;
+
+ break;
+ case MCI_STATE_LAST_SCHD_MSG_OFFSET:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_LAST_SCHD_MSG_INDEX);
+ /* Make it in bytes */
+ value <<= 4;
+ break;
+
+ case MCI_STATE_REMOTE_SLEEP:
+ value = MS(REG_READ(ah, AR_MCI_RX_STATUS),
+ AR_MCI_RX_REMOTE_SLEEP) ?
+ MCI_BT_SLEEP : MCI_BT_AWAKE;
+ break;
+
+ case MCI_STATE_CONT_RSSI_POWER:
+ value = MS(mci->cont_status, AR_MCI_CONT_RSSI_POWER);
+ break;
+
+ case MCI_STATE_CONT_PRIORITY:
+ value = MS(mci->cont_status, AR_MCI_CONT_RRIORITY);
+ break;
+
+ case MCI_STATE_CONT_TXRX:
+ value = MS(mci->cont_status, AR_MCI_CONT_TXRX);
+ break;
+
+ case MCI_STATE_BT:
+ value = mci->bt_state;
+ break;
+
+ case MCI_STATE_SET_BT_SLEEP:
+ mci->bt_state = MCI_BT_SLEEP;
+ break;
+
+ case MCI_STATE_SET_BT_AWAKE:
+ mci->bt_state = MCI_BT_AWAKE;
+ ar9003_mci_send_coex_version_query(ah, true);
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+
+ if (mci->unhalt_bt_gpm) {
+
+ ath_dbg(common, MCI, "MCI unhalt BT GPM\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah, false, true);
+ }
+
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_CAL_START:
+ mci->bt_state = MCI_BT_CAL_START;
+ break;
+
+ case MCI_STATE_SET_BT_CAL:
+ mci->bt_state = MCI_BT_CAL;
+ break;
+
+ case MCI_STATE_RESET_REQ_WAKE:
+ ar9003_mci_reset_req_wakeup(ah);
+ mci->update_2g5g = true;
+
+ if ((AR_SREV_9462_20_OR_LATER(ah)) &&
+ (mci->config & ATH_MCI_CONFIG_MCI_OBS_MASK)) {
+ /* Check if we still have control of the GPIOs */
+ if ((REG_READ(ah, AR_GLB_GPIO_CONTROL) &
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) !=
+ ATH_MCI_CONFIG_MCI_OBS_GPIO) {
+
+ ath_dbg(common, MCI,
+ "MCI reconfigure observation\n");
+ ar9003_mci_observation_set_up(ah);
+ }
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_COEX_VERSION:
+ ar9003_mci_send_coex_version_response(ah, true);
+ break;
+
+ case MCI_STATE_SET_BT_COEX_VERSION:
+
+ if (!p_data)
+ ath_dbg(common, MCI,
+ "MCI Set BT Coex version with NULL data!!\n");
+ else {
+ mci->bt_ver_major = (*p_data >> 8) & 0xff;
+ mci->bt_ver_minor = (*p_data) & 0xff;
+ mci->bt_version_known = true;
+ ath_dbg(common, MCI, "MCI BT version set: %d.%d\n",
+ mci->bt_ver_major, mci->bt_ver_minor);
+ }
+ break;
+
+ case MCI_STATE_SEND_WLAN_CHANNELS:
+ if (p_data) {
+ if (((mci->wlan_channels[1] & 0xffff0000) ==
+ (*(p_data + 1) & 0xffff0000)) &&
+ (mci->wlan_channels[2] == *(p_data + 2)) &&
+ (mci->wlan_channels[3] == *(p_data + 3)))
+ break;
+
+ mci->wlan_channels[0] = *p_data++;
+ mci->wlan_channels[1] = *p_data++;
+ mci->wlan_channels[2] = *p_data++;
+ mci->wlan_channels[3] = *p_data++;
+ }
+ mci->wlan_channels_update = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ break;
+
+ case MCI_STATE_SEND_VERSION_QUERY:
+ ar9003_mci_send_coex_version_query(ah, true);
+ break;
+
+ case MCI_STATE_SEND_STATUS_QUERY:
+ query_type = (AR_SREV_9462_10(ah)) ?
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO :
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY;
+
+ ar9003_mci_send_coex_bt_status_query(ah, true, query_type);
+ break;
+
+ case MCI_STATE_NEED_FLUSH_BT_INFO:
+ /*
+ * btcoex_hw.mci.unhalt_bt_gpm means whether it's
+ * needed to send UNHALT message. It's set whenever
+ * there's a request to send HALT message.
+ * mci_halted_bt_gpm means whether HALT message is sent
+ * out successfully.
+ *
+ * Checking (mci_unhalt_bt_gpm == false) instead of
+ * checking (ah->mci_halted_bt_gpm == false) will make
+ * sure currently is in UNHALT-ed mode and BT can
+ * respond to status query.
+ */
+ value = (!mci->unhalt_bt_gpm &&
+ mci->need_flush_btinfo) ? 1 : 0;
+ if (p_data)
+ mci->need_flush_btinfo =
+ (*p_data != 0) ? true : false;
+ break;
+
+ case MCI_STATE_RECOVER_RX:
+
+ ath_dbg(common, MCI, "MCI hw RECOVER_RX\n");
+ ar9003_mci_prep_interface(ah);
+ mci->query_bt = true;
+ mci->need_flush_btinfo = true;
+ ar9003_mci_send_coex_wlan_channels(ah, true);
+ ar9003_mci_2g5g_switch(ah, true);
+ break;
+
+ case MCI_STATE_NEED_FTP_STOMP:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_FTP_STOMP);
+ break;
+
+ case MCI_STATE_NEED_TUNING:
+ value = !(mci->config & ATH_MCI_CONFIG_DISABLE_TUNING);
+ break;
+
+ default:
+ break;
+
+ }
+
+ return value;
+}
+EXPORT_SYMBOL(ar9003_mci_state);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.h b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
new file mode 100644
index 0000000..798da11
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef AR9003_MCI_H
+#define AR9003_MCI_H
+
+#define MCI_FLAG_DISABLE_TIMESTAMP 0x00000001 /* Disable time stamp */
+
+/* Default remote BT device MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_DEFAULT 3
+#define MCI_GPM_COEX_MINOR_VERSION_DEFAULT 0
+
+/* Local WLAN MCI COEX version */
+#define MCI_GPM_COEX_MAJOR_VERSION_WLAN 3
+#define MCI_GPM_COEX_MINOR_VERSION_WLAN 0
+
+enum mci_gpm_coex_query_type {
+ MCI_GPM_COEX_QUERY_BT_ALL_INFO = BIT(0),
+ MCI_GPM_COEX_QUERY_BT_TOPOLOGY = BIT(1),
+ MCI_GPM_COEX_QUERY_BT_DEBUG = BIT(2),
+};
+
+enum mci_gpm_coex_halt_bt_gpm {
+ MCI_GPM_COEX_BT_GPM_UNHALT,
+ MCI_GPM_COEX_BT_GPM_HALT
+};
+
+enum mci_gpm_coex_bt_update_flags_op {
+ MCI_GPM_COEX_BT_FLAGS_READ,
+ MCI_GPM_COEX_BT_FLAGS_SET,
+ MCI_GPM_COEX_BT_FLAGS_CLEAR
+};
+
+#define MCI_NUM_BT_CHANNELS 79
+
+#define MCI_BT_MCI_FLAGS_UPDATE_CORR 0x00000002
+#define MCI_BT_MCI_FLAGS_UPDATE_HDR 0x00000004
+#define MCI_BT_MCI_FLAGS_UPDATE_PLD 0x00000008
+#define MCI_BT_MCI_FLAGS_LNA_CTRL 0x00000010
+#define MCI_BT_MCI_FLAGS_DEBUG 0x00000020
+#define MCI_BT_MCI_FLAGS_SCHED_MSG 0x00000040
+#define MCI_BT_MCI_FLAGS_CONT_MSG 0x00000080
+#define MCI_BT_MCI_FLAGS_COEX_GPM 0x00000100
+#define MCI_BT_MCI_FLAGS_CPU_INT_MSG 0x00000200
+#define MCI_BT_MCI_FLAGS_MCI_MODE 0x00000400
+#define MCI_BT_MCI_FLAGS_AR9462_MODE 0x00001000
+#define MCI_BT_MCI_FLAGS_OTHER 0x00010000
+
+#define MCI_DEFAULT_BT_MCI_FLAGS 0x00011dde
+
+#define MCI_TOGGLE_BT_MCI_FLAGS (MCI_BT_MCI_FLAGS_UPDATE_CORR | \
+ MCI_BT_MCI_FLAGS_UPDATE_HDR | \
+ MCI_BT_MCI_FLAGS_UPDATE_PLD | \
+ MCI_BT_MCI_FLAGS_MCI_MODE)
+
+#define MCI_2G_FLAGS_CLEAR_MASK 0x00000000
+#define MCI_2G_FLAGS_SET_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_2G_FLAGS MCI_DEFAULT_BT_MCI_FLAGS
+
+#define MCI_5G_FLAGS_CLEAR_MASK MCI_TOGGLE_BT_MCI_FLAGS
+#define MCI_5G_FLAGS_SET_MASK 0x00000000
+#define MCI_5G_FLAGS (MCI_DEFAULT_BT_MCI_FLAGS & \
+ ~MCI_TOGGLE_BT_MCI_FLAGS)
+
+/*
+ * Default value for AR9462 is 0x00002201
+ */
+#define ATH_MCI_CONFIG_CONCUR_TX 0x00000003
+#define ATH_MCI_CONFIG_MCI_OBS_MCI 0x00000004
+#define ATH_MCI_CONFIG_MCI_OBS_TXRX 0x00000008
+#define ATH_MCI_CONFIG_MCI_OBS_BT 0x00000010
+#define ATH_MCI_CONFIG_DISABLE_MCI_CAL 0x00000020
+#define ATH_MCI_CONFIG_DISABLE_OSLA 0x00000040
+#define ATH_MCI_CONFIG_DISABLE_FTP_STOMP 0x00000080
+#define ATH_MCI_CONFIG_AGGR_THRESH 0x00000700
+#define ATH_MCI_CONFIG_AGGR_THRESH_S 8
+#define ATH_MCI_CONFIG_DISABLE_AGGR_THRESH 0x00000800
+#define ATH_MCI_CONFIG_CLK_DIV 0x00003000
+#define ATH_MCI_CONFIG_CLK_DIV_S 12
+#define ATH_MCI_CONFIG_DISABLE_TUNING 0x00004000
+#define ATH_MCI_CONFIG_MCI_WEIGHT_DBG 0x40000000
+#define ATH_MCI_CONFIG_DISABLE_MCI 0x80000000
+
+#define ATH_MCI_CONFIG_MCI_OBS_MASK (ATH_MCI_CONFIG_MCI_OBS_MCI | \
+ ATH_MCI_CONFIG_MCI_OBS_TXRX | \
+ ATH_MCI_CONFIG_MCI_OBS_BT)
+#define ATH_MCI_CONFIG_MCI_OBS_GPIO 0x0000002F
+
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index a4450cb..59647a3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -119,8 +119,8 @@ static int ar9003_get_training_power_5g(struct ath_hw *ah)
break;
default:
delta = 0;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Invalid tx-chainmask: %u\n", ah->txchainmask);
+ ath_dbg(common, CALIBRATE, "Invalid tx-chainmask: %u\n",
+ ah->txchainmask);
}
power += delta;
@@ -148,13 +148,12 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
else
training_power = ar9003_get_training_power_5g(ah);
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Training power: %d, Target power: %d\n",
+ ath_dbg(common, CALIBRATE, "Training power: %d, Target power: %d\n",
training_power, ah->paprd_target_power);
if (training_power < 0) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "PAPRD target power delta out of range");
+ ath_dbg(common, CALIBRATE,
+ "PAPRD target power delta out of range\n");
return -ERANGE;
}
ah->paprd_training_power = training_power;
@@ -311,8 +310,8 @@ static unsigned int ar9003_get_desired_gain(struct ath_hw *ah, int chain,
reg_cl_gain = AR_PHY_CL_TAB_2;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
- "Invalid chainmask: %d\n", chain);
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+ "Invalid chainmask: %d\n", chain);
break;
}
@@ -850,7 +849,7 @@ bool ar9003_paprd_is_done(struct ath_hw *ah)
agc2_pwr = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_STAT1,
AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_AGC2_PWR);
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
+ ath_dbg(ath9k_hw_common(ah), CALIBRATE,
"AGC2_PWR = 0x%x training done = 0x%x\n",
agc2_pwr, paprd_done);
/*
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 2330e7e..2b0bfb8 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -46,7 +46,7 @@ static const int m2ThreshExt_off = 127;
* @chan:
*
* This is the function to change channel on single-chip devices, that is
- * all devices after ar9280.
+ * for AR9300 family of chipsets.
*
* This function takes the channel value in MHz and sets
* hardware channel value. Assumes writes have been enabled to analog bus.
@@ -199,12 +199,14 @@ static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
synth_freq = chan->channel;
}
} else {
- range = 10;
+ range = AR_SREV_9462(ah) ? 5 : 10;
max_spur_cnts = 4;
synth_freq = chan->channel;
}
for (i = 0; i < max_spur_cnts; i++) {
+ if (AR_SREV_9462(ah) && (i == 0 || i == 3))
+ continue;
negative = 0;
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
@@ -880,7 +882,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
if (!on != aniState->ofdmWeakSigDetectOff) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: ofdm weak signal: %s=>%s\n",
chan->channel,
!aniState->ofdmWeakSigDetectOff ?
@@ -898,7 +900,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(firstep_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(firstep_table));
return false;
@@ -935,7 +937,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
if (level != aniState->firstepLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -943,7 +945,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ATH9K_ANI_FIRSTEP_LVL_NEW,
value,
aniState->iniDef.firstep);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
chan->channel,
aniState->firstepLevel,
@@ -963,7 +965,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
u32 level = param;
if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
level, ARRAY_SIZE(cycpwrThr1_table));
return false;
@@ -999,7 +1001,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
AR_PHY_EXT_CYCPWR_THR1, value2);
if (level != aniState->spurImmunityLevel) {
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1007,7 +1009,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
value,
aniState->iniDef.cycpwrThr1);
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
chan->channel,
aniState->spurImmunityLevel,
@@ -1034,8 +1036,7 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
AR_PHY_MRC_CCK_MUX_REG, is_on);
if (!is_on != aniState->mrcCCKOff) {
- ath_dbg(common, ATH_DBG_ANI,
- "** ch %d: MRC CCK: %s=>%s\n",
+ ath_dbg(common, ANI, "** ch %d: MRC CCK: %s=>%s\n",
chan->channel,
!aniState->mrcCCKOff ? "on" : "off",
is_on ? "on" : "off");
@@ -1050,11 +1051,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
case ATH9K_ANI_PRESENT:
break;
default:
- ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
+ ath_dbg(common, ANI, "invalid cmd %u\n", cmd);
return false;
}
- ath_dbg(common, ATH_DBG_ANI,
+ ath_dbg(common, ANI,
"ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
aniState->spurImmunityLevel,
!aniState->ofdmWeakSigDetectOff ? "on" : "off",
@@ -1123,8 +1124,7 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
aniState = &ah->curchan->ani;
iniDef = &aniState->iniDef;
- ath_dbg(common, ATH_DBG_ANI,
- "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
+ ath_dbg(common, ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
ah->hw_version.macVersion,
ah->hw_version.macRev,
ah->opmode,
@@ -1386,7 +1386,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
AR_PHY_WATCHDOG_IDLE_ENABLE));
- ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
+ ath_dbg(common, RESET, "Disabled BB Watchdog\n");
return;
}
@@ -1422,8 +1422,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
AR_PHY_WATCHDOG_IDLE_MASK |
(AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
- ath_dbg(common, ATH_DBG_RESET,
- "Enabled BB Watchdog timeout (%u ms)\n",
+ ath_dbg(common, RESET, "Enabled BB Watchdog timeout (%u ms)\n",
idle_tmo_ms);
}
@@ -1452,9 +1451,9 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
return;
status = ah->bb_watchdog_last_status;
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"\n==== BB update: BB status=0x%08x ====\n", status);
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
MS(status, AR_PHY_WATCHDOG_INFO),
MS(status, AR_PHY_WATCHDOG_DET_HANG),
@@ -1466,22 +1465,19 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
MS(status, AR_PHY_WATCHDOG_AGC_SM),
MS(status, AR_PHY_WATCHDOG_SRCH_SM));
- ath_dbg(common, ATH_DBG_RESET,
- "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
+ ath_dbg(common, RESET, "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
- ath_dbg(common, ATH_DBG_RESET,
- "** BB mode: BB_gen_controls=0x%08x **\n",
+ ath_dbg(common, RESET, "** BB mode: BB_gen_controls=0x%08x **\n",
REG_READ(ah, AR_PHY_GEN_CTRL));
#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
if (common->cc_survey.cycles)
- ath_dbg(common, ATH_DBG_RESET,
+ ath_dbg(common, RESET,
"** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
- ath_dbg(common, ATH_DBG_RESET,
- "==== BB update: done ====\n\n");
+ ath_dbg(common, RESET, "==== BB update: done ====\n\n");
}
EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 4114fe7..ed64114 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -389,6 +389,8 @@
#define AR_PHY_DAG_CTRLCCK_RSSI_THR_S 10
#define AR_PHY_RIFS_INIT_DELAY 0x3ff0000
+#define AR_PHY_AGC_QUICK_DROP 0x03c00000
+#define AR_PHY_AGC_QUICK_DROP_S 22
#define AR_PHY_AGC_COARSE_LOW 0x00007F80
#define AR_PHY_AGC_COARSE_LOW_S 7
#define AR_PHY_AGC_COARSE_HIGH 0x003F8000
@@ -488,6 +490,8 @@
#define AR_PHY_TEST_CTL_TSTADC_EN_S 8
#define AR_PHY_TEST_CTL_RX_OBS_SEL 0x3C00
#define AR_PHY_TEST_CTL_RX_OBS_SEL_S 10
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL 0xe0000000
+#define AR_PHY_TEST_CTL_DEBUGPORT_SEL_S 29
#define AR_PHY_TSTDAC (AR_SM_BASE + 0x168)
@@ -999,6 +1003,7 @@
/* GLB Registers */
#define AR_GLB_BASE 0x20000
+#define AR_GLB_GPIO_CONTROL (AR_GLB_BASE)
#define AR_PHY_GLB_CONTROL (AR_GLB_BASE + 0x44)
#define AR_GLB_SCRATCH(_ah) (AR_GLB_BASE + \
(AR_SREV_9462_20(_ah) ? 0x4c : 0x50))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
index 48803ee..458bedf 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_rtt.c
@@ -16,6 +16,7 @@
#include "hw.h"
#include "ar9003_phy.h"
+#include "ar9003_rtt.h"
#define RTT_RESTORE_TIMEOUT 1000
#define RTT_ACCESS_TIMEOUT 100
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 9c51b39..dc2054f 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -41,24 +41,24 @@ static const u32 ar9462_pciephy_clkreq_enable_L1_2p0[][2] = {
static const u32 ar9462_2p0_baseband_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a8011},
- {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e},
- {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
- {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
+ {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8011, 0xd00a800d},
+ {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
+ {0x00009824, 0x5ac640de, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
+ {0x00009828, 0x0796be89, 0x0696b081, 0x0696b881, 0x09143e81},
{0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
{0x00009830, 0x0000059c, 0x0000059c, 0x0000119c, 0x0000119c},
{0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
{0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
{0x00009e04, 0x001c2020, 0x001c2020, 0x001c2020, 0x001c2020},
- {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
- {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
- {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3039605e, 0x33795d5e},
+ {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8},
+ {0x00009e10, 0x92c88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e},
+ {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3376605e, 0x33795d5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
- {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c782},
- {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
+ {0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
+ {0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
{0x0000a204, 0x013187c0, 0x013187c4, 0x013187c4, 0x013187c0},
@@ -81,6 +81,15 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000a2d0, 0x00041981, 0x00041981, 0x00041981, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
{0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
+ {0x0000a3a4, 0x00000010, 0x00000010, 0x00000000, 0x00000000},
+ {0x0000a3a8, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa},
+ {0x0000a3ac, 0xaaaaaa00, 0xaaaaaa30, 0xaaaaaa00, 0xaaaaaa00},
+ {0x0000a41c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a420, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a424, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a428, 0x000001ce, 0x000001ce, 0x000001ce, 0x000001ce},
+ {0x0000a42c, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
+ {0x0000a430, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce, 0x1ce739ce},
{0x0000a830, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -688,8 +697,8 @@ static const u32 ar9462_2p0_mac_postamble_emulation[][5] = {
static const u32 ar9462_2p0_radio_postamble_sys3ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
- {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
@@ -717,8 +726,8 @@ static const u32 ar9462_2p0_baseband_postamble_emulation[][5] = {
static const u32 ar9462_2p0_radio_postamble_sys2ant[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
{0x000160ac, 0xa4646c08, 0xa4646c08, 0x24645808, 0x24645808},
- {0x00016140, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
- {0x00016540, 0x10804008, 0x10804008, 0x90804008, 0x90804008},
+ {0x00016140, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
+ {0x00016540, 0x10804008, 0x10804008, 0x50804008, 0x50804008},
};
static const u32 ar9462_common_wo_xlna_rx_gain_table_2p0[][2] = {
@@ -1059,7 +1068,7 @@ static const u32 ar9462_modes_low_ob_db_tx_gain_table_2p0[][5] = {
static const u32 ar9462_2p0_soc_postamble[][5] = {
/* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
- {0x00007010, 0x00002233, 0x00002233, 0x00002233, 0x00002233},
+ {0x00007010, 0x00000033, 0x00000033, 0x00000033, 0x00000033},
};
static const u32 ar9462_2p0_baseband_core[][2] = {
@@ -1107,11 +1116,11 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x00009e30, 0x06336f77},
{0x00009e34, 0x6af6532f},
{0x00009e38, 0x0cc80c00},
- {0x00009e40, 0x0d261820},
+ {0x00009e40, 0x15262820},
{0x00009e4c, 0x00001004},
{0x00009e50, 0x00ff03f1},
- {0x00009e54, 0xe4c355c7},
- {0x00009e58, 0xfd897735},
+ {0x00009e54, 0xe4c555c2},
+ {0x00009e58, 0xfd857722},
{0x00009e5c, 0xe9198724},
{0x00009fc0, 0x803e4788},
{0x00009fc4, 0x0001efb5},
@@ -1142,9 +1151,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a398, 0x001f0e0f},
{0x0000a39c, 0x0075393f},
{0x0000a3a0, 0xb79f6427},
- {0x0000a3a4, 0x00000000},
- {0x0000a3a8, 0xaaaaaaaa},
- {0x0000a3ac, 0x3c466478},
{0x0000a3c0, 0x20202020},
{0x0000a3c4, 0x22222220},
{0x0000a3c8, 0x20200020},
@@ -1167,12 +1173,6 @@ static const u32 ar9462_2p0_baseband_core[][2] = {
{0x0000a40c, 0x00820820},
{0x0000a414, 0x1ce739ce},
{0x0000a418, 0x2d001dce},
- {0x0000a41c, 0x1ce739ce},
- {0x0000a420, 0x000001ce},
- {0x0000a424, 0x1ce739ce},
- {0x0000a428, 0x000001ce},
- {0x0000a42c, 0x1ce739ce},
- {0x0000a430, 0x1ce739ce},
{0x0000a434, 0x00000000},
{0x0000a438, 0x00001801},
{0x0000a43c, 0x00100000},
@@ -1257,8 +1257,8 @@ static const u32 ar9462_modes_high_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
@@ -1850,8 +1850,8 @@ static const u32 ar9462_modes_green_ob_db_tx_gain_table_2p0[][5] = {
{0x0000a540, 0x48025e6c, 0x48025e6c, 0x38001660, 0x38001660},
{0x0000a544, 0x4e025e8e, 0x4e025e8e, 0x3b001861, 0x3b001861},
{0x0000a548, 0x53025eb2, 0x53025eb2, 0x3e001a81, 0x3e001a81},
- {0x0000a54c, 0x59025eb2, 0x59025eb2, 0x42001a83, 0x42001a83},
- {0x0000a550, 0x5f025ef6, 0x5f025ef6, 0x44001c84, 0x44001c84},
+ {0x0000a54c, 0x59025eb6, 0x59025eb6, 0x42001a83, 0x42001a83},
+ {0x0000a550, 0x5d025ef6, 0x5d025ef6, 0x44001c84, 0x44001c84},
{0x0000a554, 0x62025f56, 0x62025f56, 0x48001ce3, 0x48001ce3},
{0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
{0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1c269f5..171ccf7 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -25,6 +25,7 @@
#include "debug.h"
#include "common.h"
+#include "mci.h"
/*
* Header for the ath9k.ko driver core *only* -- hw code nor any other driver
@@ -96,7 +97,7 @@ enum buffer_type {
#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
-#define ATH_TXSTATUS_RING_SIZE 64
+#define ATH_TXSTATUS_RING_SIZE 512
#define DS2PHYS(_dd, _ds) \
((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
@@ -158,6 +159,9 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
/* return block-ack bitmap index given sequence and starting sequence */
#define ATH_BA_INDEX(_st, _seq) (((_seq) - (_st)) & (IEEE80211_SEQ_MAX - 1))
+/* return the seqno for _start + _offset */
+#define ATH_BA_INDEX2SEQ(_seq, _offset) (((_seq) + (_offset)) & (IEEE80211_SEQ_MAX - 1))
+
/* returns delimiter padding required given the packet length */
#define ATH_AGGR_GET_NDELIM(_len) \
(((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
@@ -192,6 +196,7 @@ struct ath_txq {
u8 txq_headidx;
u8 txq_tailidx;
int pending_frames;
+ struct sk_buff_head complete_q;
};
struct ath_atx_ac {
@@ -237,6 +242,7 @@ struct ath_atx_tid {
struct ath_node *an;
struct ath_atx_ac *ac;
unsigned long tx_buf[BITS_TO_LONGS(ATH_TID_MAX_BUFS)];
+ int bar_index;
u16 seq_start;
u16 seq_next;
u16 baw_size;
@@ -251,8 +257,9 @@ struct ath_atx_tid {
struct ath_node {
#ifdef CONFIG_ATH9K_DEBUGFS
struct list_head list; /* for sc->nodes */
- struct ieee80211_sta *sta; /* station struct we're part of */
#endif
+ struct ieee80211_sta *sta; /* station struct we're part of */
+ struct ieee80211_vif *vif; /* interface with which we're associated */
struct ath_atx_tid tid[WME_NUM_TID];
struct ath_atx_ac ac[WME_NUM_AC];
int ps_key;
@@ -274,7 +281,6 @@ struct ath_tx_control {
};
#define ATH_TX_ERROR 0x01
-#define ATH_TX_BAR 0x02
/**
* @txq_map: Index is mac80211 queue number. This is
@@ -443,7 +449,9 @@ struct ath_btcoex {
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
u32 btscan_no_stomp; /* in usec */
+ u32 duty_cycle;
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
+ struct ath_mci_profile mci;
};
int ath_init_btcoex_timer(struct ath_softc *sc);
@@ -458,7 +466,7 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc);
#define ATH_LED_PIN_9287 8
#define ATH_LED_PIN_9300 10
#define ATH_LED_PIN_9485 6
-#define ATH_LED_PIN_9462 0
+#define ATH_LED_PIN_9462 4
#ifdef CONFIG_MAC80211_LEDS
void ath_init_leds(struct ath_softc *sc);
@@ -538,7 +546,7 @@ struct ath_ant_comb {
#define DEFAULT_CACHELINE 32
#define ATH_REGCLASSIDS_MAX 10
#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
-#define ATH_MAX_SW_RETRIES 10
+#define ATH_MAX_SW_RETRIES 30
#define ATH_CHAN_MAX 255
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
@@ -643,6 +651,7 @@ struct ath_softc {
struct delayed_work tx_complete_work;
struct delayed_work hw_pll_work;
struct ath_btcoex btcoex;
+ struct ath_mci_coex mci_coex;
struct ath_descdma txsdma;
@@ -670,7 +679,6 @@ void ath9k_deinit_device(struct ath_softc *sc);
void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_reload_chainmask_settings(struct ath_softc *sc);
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
bool ath9k_uses_beacons(int type);
#ifdef CONFIG_ATH9K_PCI
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index a13cabb..b8967e4 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -117,11 +117,10 @@ static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->beacon.cabq;
- ath_dbg(common, ATH_DBG_XMIT,
- "transmitting CABQ packet, skb: %p\n", skb);
+ ath_dbg(common, XMIT, "transmitting CABQ packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n");
+ ath_dbg(common, XMIT, "CABQ TX failed\n");
dev_kfree_skb_any(skb);
}
}
@@ -204,7 +203,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
if (skb && cabq_depth) {
if (sc->nvifs > 1) {
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"Flushing previous cabq traffic\n");
ath_draintxq(sc, cabq, false);
}
@@ -297,7 +296,7 @@ int ath_beacon_alloc(struct ath_softc *sc, struct ieee80211_vif *vif)
tsfadjust = TU_TO_USEC(intval * avp->av_bslot) / ATH_BCBUF;
avp->tsf_adjust = cpu_to_le64(tsfadjust);
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"stagger beacons, bslot %d intval %u tsfadjust %llu\n",
avp->av_bslot, intval, (unsigned long long)tsfadjust);
@@ -357,6 +356,7 @@ void ath_beacon_tasklet(unsigned long data)
struct ath_buf *bf = NULL;
struct ieee80211_vif *vif;
struct ath_tx_status ts;
+ bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
int slot;
u32 bfaddr, bc = 0;
@@ -371,15 +371,14 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.bmisscnt++;
if (sc->beacon.bmisscnt < BSTUCK_THRESH * sc->nbcnvifs) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"missed %u consecutive beacons\n",
sc->beacon.bmisscnt);
ath9k_hw_stop_dma_queue(ah, sc->beacon.beaconq);
if (sc->beacon.bmisscnt > 3)
ath9k_hw_bstuck_nfcal(ah);
} else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
- ath_dbg(common, ATH_DBG_BSTUCK,
- "beacon is officially stuck\n");
+ ath_dbg(common, BSTUCK, "beacon is officially stuck\n");
sc->sc_flags |= SC_OP_TSF_RESET;
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
}
@@ -406,7 +405,7 @@ void ath_beacon_tasklet(unsigned long data)
slot = (tsftu % (intval * ATH_BCBUF)) / intval;
vif = sc->beacon.bslot[slot];
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
slot, tsf, tsftu / ATH_BCBUF, intval, vif);
} else {
@@ -424,7 +423,7 @@ void ath_beacon_tasklet(unsigned long data)
}
if (sc->beacon.bmisscnt != 0) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"resume beacon xmit after %u misses\n",
sc->beacon.bmisscnt);
sc->beacon.bmisscnt = 0;
@@ -458,10 +457,12 @@ void ath_beacon_tasklet(unsigned long data)
if (bfaddr != 0) {
/* NB: cabq traffic should already be queued and primed */
ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bfaddr);
- ath9k_hw_txstart(ah, sc->beacon.beaconq);
+
+ if (!edma)
+ ath9k_hw_txstart(ah, sc->beacon.beaconq);
sc->beacon.ast_be_xmit += bc; /* XXX per-vif? */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
+ if (edma) {
spin_lock_bh(&sc->sc_pcu_lock);
ath9k_hw_txprocdesc(ah, bf->bf_desc, (void *)&ts);
spin_unlock_bh(&sc->sc_pcu_lock);
@@ -541,7 +542,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* No need to configure beacon if we are not associated */
if (!common->curaid) {
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"STA is not yet associated..skipping beacon config\n");
return;
}
@@ -631,8 +632,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
+ ath_dbg(common, BEACON,
"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration,
bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -660,8 +661,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
tsf = roundup(ath9k_hw_gettsf32(ah) + TU_TO_USEC(FUDGE), intval);
nexttbtt = tsf + intval;
- ath_dbg(common, ATH_DBG_BEACON,
- "IBSS nexttbtt %u intval %u (%u)\n",
+ ath_dbg(common, BEACON, "IBSS nexttbtt %u intval %u (%u)\n",
nexttbtt, intval, conf->beacon_interval);
/*
@@ -699,9 +699,8 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
(sc->nbcnvifs > 1) &&
(vif->type == NL80211_IFTYPE_AP) &&
(cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Changing beacon interval of multiple \
- AP interfaces !\n");
+ ath_dbg(common, CONFIG,
+ "Changing beacon interval of multiple AP interfaces !\n");
return false;
}
/*
@@ -710,7 +709,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
*/
if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
(vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"STA vif's beacon not allowed on AP mode\n");
return false;
}
@@ -722,7 +721,7 @@ static bool ath9k_allow_beacon_config(struct ath_softc *sc,
(vif->type == NL80211_IFTYPE_STATION) &&
(sc->sc_flags & SC_OP_BEACONS) &&
!avp->primary_sta_vif) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
return false;
}
@@ -802,8 +801,7 @@ void ath_set_beacon(struct ath_softc *sc)
ath_beacon_config_sta(sc, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 0122639..a6712a9 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -21,7 +21,7 @@ enum ath_bt_mode {
ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
- ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
+ ATH_BT_COEX_MODE_DISABLED, /* coexistence disabled */
};
struct ath_btcoex_config {
@@ -36,6 +36,20 @@ struct ath_btcoex_config {
bool bt_hold_rx_clear;
};
+static const u32 ar9003_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0xfffffff0, 0xfffffff0, 0xfffffff0, 0xfffffff0 }, /* STOMP_ALL */
+ { 0x88888880, 0x88888880, 0x88888880, 0x88888880 }, /* STOMP_LOW */
+ { 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, /* STOMP_NONE */
+};
+
+static const u32 ar9462_wlan_weights[ATH_BTCOEX_STOMP_MAX]
+ [AR9300_NUM_WLAN_WEIGHTS] = {
+ { 0x01017d01, 0x41414101, 0x41414101, 0x41414141 }, /* STOMP_ALL */
+ { 0x01017d01, 0x3b3b3b01, 0x3b3b3b01, 0x3b3b3b3b }, /* STOMP_LOW */
+ { 0x01017d01, 0x01010101, 0x01010101, 0x01010101 }, /* STOMP_NONE */
+ { 0x01017d01, 0x013b0101, 0x3b3b0101, 0x3b3b013b }, /* STOMP_LOW_FTP */
+};
void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
{
@@ -54,6 +68,9 @@ void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
u32 i, idx;
bool rxclear_polarity = ath_bt_config.bt_rxclear_polarity;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
if (AR_SREV_9300_20_OR_LATER(ah))
rxclear_polarity = !ath_bt_config.bt_rxclear_polarity;
@@ -85,6 +102,9 @@ void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* connect bt_active to baseband */
REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
@@ -107,6 +127,9 @@ void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* btcoex 3-wire */
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
(AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
@@ -133,6 +156,9 @@ static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
/* Configure the desired GPIO port for TX_FRAME output */
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
@@ -144,6 +170,9 @@ void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
SM(wlan_weight, AR_BTCOEX_WL_WGHT);
}
@@ -152,27 +181,26 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
{
- struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
u32 val;
+ int i;
/*
* Program coex mode and weight registers to
* enable coex 3-wire
*/
- REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
- REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
+ REG_WRITE(ah, AR_BT_COEX_MODE, btcoex->bt_coex_mode);
+ REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex->bt_coex_mode2);
if (AR_SREV_9300_20_OR_LATER(ah)) {
- REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, ah->bt_coex_wlan_weight[0]);
- REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, ah->bt_coex_wlan_weight[1]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, ah->bt_coex_bt_weight[0]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, ah->bt_coex_bt_weight[1]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, ah->bt_coex_bt_weight[2]);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, ah->bt_coex_bt_weight[3]);
-
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, btcoex->wlan_weight[0]);
+ REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, btcoex->wlan_weight[1]);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i),
+ btcoex->bt_weight[i]);
} else
- REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
+ REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex->bt_coex_weights);
@@ -185,23 +213,39 @@ static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
- ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
+ ath9k_hw_cfg_output(ah, btcoex->wlanactive_gpio,
AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
}
+static void ath9k_hw_btcoex_enable_mci(struct ath_hw *ah)
+{
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ int i;
+
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex->wlan_weight[i]);
+
+ REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
+ btcoex->enabled = true;
+}
+
void ath9k_hw_btcoex_enable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
- switch (btcoex_hw->scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(ah)) {
case ATH_BTCOEX_CFG_NONE:
- break;
+ return;
case ATH_BTCOEX_CFG_2WIRE:
ath9k_hw_btcoex_enable_2wire(ah);
break;
case ATH_BTCOEX_CFG_3WIRE:
ath9k_hw_btcoex_enable_3wire(ah);
break;
+ case ATH_BTCOEX_CFG_MCI:
+ ath9k_hw_btcoex_enable_mci(ah);
+ return;
}
REG_RMW(ah, AR_GPIO_PDPU,
@@ -215,7 +259,18 @@ EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
void ath9k_hw_btcoex_disable(struct ath_hw *ah)
{
struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
+ int i;
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+ btcoex_hw->enabled = false;
+ if (btcoex_hw->scheme == ATH_BTCOEX_CFG_MCI) {
+ ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_MCI_COEX_WL_WEIGHTS(i),
+ btcoex_hw->wlan_weight[i]);
+ }
ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
@@ -228,49 +283,27 @@ void ath9k_hw_btcoex_disable(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS0, 0);
REG_WRITE(ah, AR_BT_COEX_WL_WEIGHTS1, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS0, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS1, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS2, 0);
- REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS3, 0);
+ for (i = 0; i < AR9300_NUM_BT_WEIGHTS; i++)
+ REG_WRITE(ah, AR_BT_COEX_BT_WEIGHTS(i), 0);
} else
REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
}
-
- ah->btcoex_hw.enabled = false;
}
EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
- ah->bt_coex_bt_weight[0] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[1] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[2] = AR9300_BT_WGHT;
- ah->bt_coex_bt_weight[3] = AR9300_BT_WGHT;
-
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_ALL_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_ALL_WLAN_WGHT1;
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_LOW_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_LOW_WLAN_WGHT1;
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ah->bt_coex_wlan_weight[0] = AR9300_STOMP_NONE_WLAN_WGHT0;
- ah->bt_coex_wlan_weight[1] = AR9300_STOMP_NONE_WLAN_WGHT1;
- break;
-
- default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
+ struct ath_btcoex_hw *btcoex = &ah->btcoex_hw;
+ const u32 *weight = AR_SREV_9462(ah) ? ar9003_wlan_weights[stomp_type] :
+ ar9462_wlan_weights[stomp_type];
+ int i;
+
+ for (i = 0; i < AR9300_NUM_WLAN_WEIGHTS; i++) {
+ btcoex->bt_weight[i] = AR9300_BT_WGHT;
+ btcoex->wlan_weight[i] = weight[i];
}
-
- ath9k_hw_btcoex_enable(ah);
}
/*
@@ -279,6 +312,9 @@ static void ar9003_btcoex_bt_stomp(struct ath_hw *ah,
void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
enum ath_stomp_type stomp_type)
{
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_btcoex_bt_stomp(ah, stomp_type);
return;
@@ -298,11 +334,8 @@ void ath9k_hw_btcoex_bt_stomp(struct ath_hw *ah,
AR_STOMP_NONE_WLAN_WGHT);
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Invalid Stomptype\n");
break;
}
-
- ath9k_hw_btcoex_enable(ah);
}
EXPORT_SYMBOL(ath9k_hw_btcoex_bt_stomp);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 234f776..278361c 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -36,22 +36,57 @@
#define ATH_BT_CNT_THRESHOLD 3
#define ATH_BT_CNT_SCAN_THRESHOLD 15
+#define AR9300_NUM_BT_WEIGHTS 4
+#define AR9300_NUM_WLAN_WEIGHTS 4
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
- ATH_BTCOEX_NO_STOMP,
ATH_BTCOEX_STOMP_ALL,
ATH_BTCOEX_STOMP_LOW,
- ATH_BTCOEX_STOMP_NONE
+ ATH_BTCOEX_STOMP_NONE,
+ ATH_BTCOEX_STOMP_LOW_FTP,
+ ATH_BTCOEX_STOMP_MAX
};
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
ATH_BTCOEX_CFG_2WIRE,
ATH_BTCOEX_CFG_3WIRE,
+ ATH_BTCOEX_CFG_MCI,
+};
+
+struct ath9k_hw_mci {
+ u32 raw_intr;
+ u32 rx_msg_intr;
+ u32 cont_status;
+ u32 gpm_addr;
+ u32 gpm_len;
+ u32 gpm_idx;
+ u32 sched_addr;
+ u32 wlan_channels[4];
+ u32 wlan_cal_seq;
+ u32 wlan_cal_done;
+ u32 config;
+ u8 *gpm_buf;
+ u8 *sched_buf;
+ bool ready;
+ bool update_2g5g;
+ bool is_2g;
+ bool query_bt;
+ bool unhalt_bt_gpm; /* need send UNHALT */
+ bool halted_bt_gpm; /* HALT sent */
+ bool need_flush_btinfo;
+ bool bt_version_known;
+ bool wlan_channels_update;
+ u8 wlan_ver_major;
+ u8 wlan_ver_minor;
+ u8 bt_ver_major;
+ u8 bt_ver_minor;
+ u8 bt_state;
};
struct ath_btcoex_hw {
enum ath_btcoex_scheme scheme;
+ struct ath9k_hw_mci mci;
bool enabled;
u8 wlanactive_gpio;
u8 btactive_gpio;
@@ -59,6 +94,8 @@ struct ath_btcoex_hw {
u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
+ u32 bt_weight[AR9300_NUM_BT_WEIGHTS];
+ u32 wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
};
void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 9953881..2f4b48e 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -116,7 +116,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
if (h[i].privNF > limit->max) {
high_nf_mid = true;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
(cal->nfcal_interference ?
@@ -199,8 +199,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
return true;
if (currCal->calState != CAL_DONE) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Calibration state incorrect, %d\n",
+ ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n",
currCal->calState);
return true;
}
@@ -208,8 +207,7 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
if (!(ah->supp_cals & currCal->calData->calType))
return true;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Resetting Cal %d state for channel %u\n",
+ ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
currCal->calData->calType, conf->channel->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
@@ -302,7 +300,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
* noisefloor until the next calibration timer.
*/
if (j == 10000) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
return;
@@ -344,17 +342,17 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
if (!nf[i])
continue;
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF calibrated [%s] [chain %d] is %d\n",
(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
if (nf[i] > ATH9K_NF_TOO_HIGH) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
i, nf[i], ATH9K_NF_TOO_HIGH);
nf[i] = limit->max;
} else if (nf[i] < limit->min) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF[%d] (%d) < MIN (%d), correcting to NOM\n",
i, nf[i], limit->min);
nf[i] = limit->nominal;
@@ -373,7 +371,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
chan->channelFlags &= (~CHANNEL_CW_INT);
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"NF did not complete in calibration window\n");
return false;
}
@@ -383,7 +381,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
nf = nfarray[0];
if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"noise floor failed detected; detected %d, threshold %d\n",
nf, nfThresh);
chan->channelFlags |= CHANNEL_CW_INT;
@@ -402,6 +400,7 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
ah->noise = ath9k_hw_getchan_noise(ah, chan);
return true;
}
+EXPORT_SYMBOL(ath9k_hw_getnf);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan)
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 05b9dbf..3b33996d 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -19,7 +19,6 @@
#include "hw.h"
-#define AR_PHY_CCA_FILTERWINDOW_LENGTH_INIT 3
#define AR_PHY_CCA_FILTERWINDOW_LENGTH 5
#define NUM_NF_READINGS 6
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 2741203..68d972b 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -709,24 +709,29 @@ static ssize_t read_file_stations(struct file *file, char __user *user_buf,
len += snprintf(buf + len, size - len,
"Stations:\n"
- " tid: addr sched paused buf_q-empty an ac\n"
+ " tid: addr sched paused buf_q-empty an ac baw\n"
" ac: addr sched tid_q-empty txq\n");
spin_lock(&sc->nodes_lock);
list_for_each_entry(an, &sc->nodes, list) {
+ unsigned short ma = an->maxampdu;
+ if (ma == 0)
+ ma = 65535; /* see ath_lookup_rate */
len += snprintf(buf + len, size - len,
- "%pM\n", an->sta->addr);
+ "iface: %pM sta: %pM max-ampdu: %hu mpdu-density: %uus\n",
+ an->vif->addr, an->sta->addr, ma,
+ (unsigned int)(an->mpdudensity));
if (len >= size)
goto done;
for (q = 0; q < WME_NUM_TID; q++) {
struct ath_atx_tid *tid = &(an->tid[q]);
len += snprintf(buf + len, size - len,
- " tid: %p %s %s %i %p %p\n",
+ " tid: %p %s %s %i %p %p %hu\n",
tid, tid->sched ? "sched" : "idle",
tid->paused ? "paused" : "running",
skb_queue_empty(&tid->buf_q),
- tid->an, tid->ac);
+ tid->an, tid->ac, tid->baw_size);
if (len >= size)
goto done;
}
@@ -851,7 +856,7 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
if (bf_isampdu(bf)) {
- if (flags & ATH_TX_BAR)
+ if (flags & ATH_TX_ERROR)
TX_STAT_INC(qnum, a_xretries);
else
TX_STAT_INC(qnum, a_completed);
@@ -1625,6 +1630,9 @@ int ath9k_init_debug(struct ath_hw *ah)
debugfs_create_file("debug", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy,
sc, &fops_debug);
#endif
+
+ ath9k_dfs_init_debug(sc);
+
debugfs_create_file("dma", S_IRUSR, sc->debug.debugfs_phy, sc,
&fops_dma);
debugfs_create_file("interrupt", S_IRUSR, sc->debug.debugfs_phy, sc,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 356352a..776a24a 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -19,6 +19,7 @@
#include "hw.h"
#include "rc.h"
+#include "dfs_debug.h"
struct ath_txq;
struct ath_buf;
@@ -187,6 +188,7 @@ struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
struct ath_rx_stats rxstats;
+ struct ath_dfs_stats dfs_stats;
u32 reset[__RESET_TYPE_MAX];
};
diff --git a/drivers/net/wireless/ath/ath9k/dfs.c b/drivers/net/wireless/ath/ath9k/dfs.c
new file mode 100644
index 0000000..f4f56af
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "hw.h"
+#include "hw-ops.h"
+#include "ath9k.h"
+#include "dfs.h"
+#include "dfs_debug.h"
+
+/*
+ * TODO: move into or synchronize this with generic header
+ * as soon as IF is defined
+ */
+struct dfs_radar_pulse {
+ u16 freq;
+ u64 ts;
+ u32 width;
+ u8 rssi;
+};
+
+/* internal struct to pass radar data */
+struct ath_radar_data {
+ u8 pulse_bw_info;
+ u8 rssi;
+ u8 ext_rssi;
+ u8 pulse_length_ext;
+ u8 pulse_length_pri;
+};
+
+/* convert pulse duration to usecs, considering clock mode */
+static u32 dur_to_usecs(struct ath_hw *ah, u32 dur)
+{
+ const u32 AR93X_NSECS_PER_DUR = 800;
+ const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11);
+ u32 nsecs;
+
+ if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan))
+ nsecs = dur * AR93X_NSECS_PER_DUR_FAST;
+ else
+ nsecs = dur * AR93X_NSECS_PER_DUR;
+
+ return (nsecs + 500) / 1000;
+}
+
+#define PRI_CH_RADAR_FOUND 0x01
+#define EXT_CH_RADAR_FOUND 0x02
+static bool
+ath9k_postprocess_radar_event(struct ath_softc *sc,
+ struct ath_radar_data *are,
+ struct dfs_radar_pulse *drp)
+{
+ u8 rssi;
+ u16 dur;
+
+ ath_dbg(ath9k_hw_common(sc->sc_ah), DFS,
+ "pulse_bw_info=0x%x, pri,ext len/rssi=(%u/%u, %u/%u)\n",
+ are->pulse_bw_info,
+ are->pulse_length_pri, are->rssi,
+ are->pulse_length_ext, are->ext_rssi);
+
+ /*
+ * Only the last 2 bits of the BW info are relevant, they indicate
+ * which channel the radar was detected in.
+ */
+ are->pulse_bw_info &= 0x03;
+
+ switch (are->pulse_bw_info) {
+ case PRI_CH_RADAR_FOUND:
+ /* radar in ctrl channel */
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, pri_phy_errors);
+ /*
+ * cannot use ctrl channel RSSI
+ * if extension channel is stronger
+ */
+ rssi = (are->ext_rssi >= (are->rssi + 3)) ? 0 : are->rssi;
+ break;
+ case EXT_CH_RADAR_FOUND:
+ /* radar in extension channel */
+ dur = are->pulse_length_ext;
+ DFS_STAT_INC(sc, ext_phy_errors);
+ /*
+ * cannot use extension channel RSSI
+ * if control channel is stronger
+ */
+ rssi = (are->rssi >= (are->ext_rssi + 12)) ? 0 : are->ext_rssi;
+ break;
+ case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND):
+ /*
+ * Conducted testing, when pulse is on DC, both pri and ext
+ * durations are reported to be same
+ *
+ * Radiated testing, when pulse is on DC, different pri and
+ * ext durations are reported, so take the larger of the two
+ */
+ if (are->pulse_length_ext >= are->pulse_length_pri)
+ dur = are->pulse_length_ext;
+ else
+ dur = are->pulse_length_pri;
+ DFS_STAT_INC(sc, dc_phy_errors);
+
+ /* when both are present use stronger one */
+ rssi = (are->rssi < are->ext_rssi) ? are->ext_rssi : are->rssi;
+ break;
+ default:
+ /*
+ * Bogus bandwidth info was received in descriptor,
+ * so ignore this PHY error
+ */
+ DFS_STAT_INC(sc, bwinfo_discards);
+ return false;
+ }
+
+ if (rssi == 0) {
+ DFS_STAT_INC(sc, rssi_discards);
+ return false;
+ }
+
+ /*
+ * TODO: check chirping pulses
+ * checks for chirping are dependent on the DFS regulatory domain
+ * used, which is yet TBD
+ */
+
+ /* convert duration to usecs */
+ drp->width = dur_to_usecs(sc->sc_ah, dur);
+ drp->rssi = rssi;
+
+ DFS_STAT_INC(sc, pulses_detected);
+ return true;
+}
+#undef PRI_CH_RADAR_FOUND
+#undef EXT_CH_RADAR_FOUND
+
+/*
+ * DFS: check PHY-error for radar pulse and feed the detector
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime)
+{
+ struct ath_radar_data ard;
+ u16 datalen;
+ char *vdata_end;
+ struct dfs_radar_pulse drp;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+
+ if ((!(rs->rs_phyerr != ATH9K_PHYERR_RADAR)) &&
+ (!(rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT))) {
+ ath_dbg(common, DFS,
+ "Error: rs_phyer=0x%x not a radar error\n",
+ rs->rs_phyerr);
+ return;
+ }
+
+ datalen = rs->rs_datalen;
+ if (datalen == 0) {
+ DFS_STAT_INC(sc, datalen_discards);
+ return;
+ }
+
+ ard.rssi = rs->rs_rssi_ctl0;
+ ard.ext_rssi = rs->rs_rssi_ext0;
+
+ /*
+ * hardware stores this as 8 bit signed value.
+ * we will cap it at 0 if it is a negative number
+ */
+ if (ard.rssi & 0x80)
+ ard.rssi = 0;
+ if (ard.ext_rssi & 0x80)
+ ard.ext_rssi = 0;
+
+ vdata_end = (char *)data + datalen;
+ ard.pulse_bw_info = vdata_end[-1];
+ ard.pulse_length_ext = vdata_end[-2];
+ ard.pulse_length_pri = vdata_end[-3];
+
+ ath_dbg(common, DFS,
+ "bw_info=%d, length_pri=%d, length_ext=%d, "
+ "rssi_pri=%d, rssi_ext=%d\n",
+ ard.pulse_bw_info, ard.pulse_length_pri, ard.pulse_length_ext,
+ ard.rssi, ard.ext_rssi);
+
+ drp.freq = ah->curchan->channel;
+ drp.ts = mactime;
+ if (ath9k_postprocess_radar_event(sc, &ard, &drp)) {
+ static u64 last_ts;
+ ath_dbg(common, DFS,
+ "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, "
+ "width=%d, rssi=%d, delta_ts=%llu\n",
+ drp.freq, drp.ts, drp.width, drp.rssi, drp.ts-last_ts);
+ last_ts = drp.ts;
+ /*
+ * TODO: forward pulse to pattern detector
+ *
+ * ieee80211_add_radar_pulse(drp.freq, drp.ts,
+ * drp.width, drp.rssi);
+ */
+ }
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs.h b/drivers/net/wireless/ath/ath9k/dfs.h
new file mode 100644
index 0000000..c241285
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ATH9K_DFS_H
+#define ATH9K_DFS_H
+
+#if defined(CONFIG_ATH9K_DFS_CERTIFIED)
+/**
+ * ath9k_dfs_process_phyerr - process radar PHY error
+ * @sc: ath_softc
+ * @data: RX payload data
+ * @rs: RX status after processing descriptor
+ * @mactime: receive time
+ *
+ * This function is called whenever the HW DFS module detects a radar
+ * pulse and reports it as a PHY error.
+ *
+ * The radar information provided as raw payload data is validated and
+ * filtered for false pulses. Events passing all tests are forwarded to
+ * the upper layer for pattern detection.
+ */
+void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime);
+#else
+static inline void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data,
+ struct ath_rx_status *rs, u64 mactime) { }
+#endif
+
+#endif /* ATH9K_DFS_H */
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.c b/drivers/net/wireless/ath/ath9k/dfs_debug.c
new file mode 100644
index 0000000..106d031
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/export.h>
+
+#include "ath9k.h"
+#include "dfs_debug.h"
+
+#define ATH9K_DFS_STAT(s, p) \
+ len += snprintf(buf + len, size - len, "%28s : %10u\n", s, \
+ sc->debug.stats.dfs_stats.p);
+
+static ssize_t read_file_dfs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ath_softc *sc = file->private_data;
+ struct ath9k_hw_version *hw_ver = &sc->sc_ah->hw_version;
+ char *buf;
+ unsigned int len = 0, size = 8000;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, size - len, "DFS support for "
+ "macVersion = 0x%x, macRev = 0x%x: %s\n",
+ hw_ver->macVersion, hw_ver->macRev,
+ (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_DFS) ?
+ "enabled" : "disabled");
+ ATH9K_DFS_STAT("DFS pulses detected ", pulses_detected);
+ ATH9K_DFS_STAT("Datalen discards ", datalen_discards);
+ ATH9K_DFS_STAT("RSSI discards ", rssi_discards);
+ ATH9K_DFS_STAT("BW info discards ", bwinfo_discards);
+ ATH9K_DFS_STAT("Primary channel pulses ", pri_phy_errors);
+ ATH9K_DFS_STAT("Secondary channel pulses", ext_phy_errors);
+ ATH9K_DFS_STAT("Dual channel pulses ", dc_phy_errors);
+
+ if (len > size)
+ len = size;
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+}
+
+static int ath9k_dfs_debugfs_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return 0;
+}
+
+static const struct file_operations fops_dfs_stats = {
+ .read = read_file_dfs,
+ .open = ath9k_dfs_debugfs_open,
+ .owner = THIS_MODULE,
+ .llseek = default_llseek,
+};
+
+void ath9k_dfs_init_debug(struct ath_softc *sc)
+{
+ debugfs_create_file("dfs_stats", S_IRUSR,
+ sc->debug.debugfs_phy, sc, &fops_dfs_stats);
+}
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
new file mode 100644
index 0000000..4911724
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (c) 2008-2011 Atheros Communications Inc.
+ * Copyright (c) 2011 Neratec Solutions AG
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+
+#ifndef ATH9K_DFS_DEBUG_H
+#define ATH9K_DFS_DEBUG_H
+
+#include "hw.h"
+
+/**
+ * struct ath_dfs_stats - DFS Statistics
+ *
+ * @pulses_detected: No. of pulses detected so far
+ * @datalen_discards: No. of pulses discarded due to invalid datalen
+ * @rssi_discards: No. of pulses discarded due to invalid RSSI
+ * @bwinfo_discards: No. of pulses discarded due to invalid BW info
+ * @pri_phy_errors: No. of pulses reported for primary channel
+ * @ext_phy_errors: No. of pulses reported for extension channel
+ * @dc_phy_errors: No. of pulses reported for primary + extension channel
+ */
+struct ath_dfs_stats {
+ u32 pulses_detected;
+ u32 datalen_discards;
+ u32 rssi_discards;
+ u32 bwinfo_discards;
+ u32 pri_phy_errors;
+ u32 ext_phy_errors;
+ u32 dc_phy_errors;
+};
+
+#if defined(CONFIG_ATH9K_DFS_DEBUGFS)
+
+#define DFS_STAT_INC(sc, c) (sc->debug.stats.dfs_stats.c++)
+void ath9k_dfs_init_debug(struct ath_softc *sc);
+
+#else
+
+#define DFS_STAT_INC(sc, c) do { } while (0)
+static inline void ath9k_dfs_init_debug(struct ath_softc *sc) { }
+
+#endif /* CONFIG_ATH9K_DFS_DEBUGFS */
+
+#endif /* ATH9K_DFS_DEBUG_H */
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index e46f751..c435232 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -305,8 +305,7 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
break;
default:
- ath_dbg(common, ATH_DBG_EEPROM,
- "Invalid chainmask configuration\n");
+ ath_dbg(common, EEPROM, "Invalid chainmask configuration\n");
break;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 49abd34..5ff7ab9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -249,7 +249,8 @@ enum eeprom_param {
EEP_ANT_DIV_CTL1,
EEP_CHAIN_MASK_REDUCE,
EEP_ANTENNA_GAIN_2G,
- EEP_ANTENNA_GAIN_5G
+ EEP_ANTENNA_GAIN_5G,
+ EEP_QUICK_DROP
};
enum ar5416_rates {
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 9a7520f..4322ac8 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -38,7 +38,7 @@ static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Unable to read eeprom region\n");
return false;
}
@@ -62,8 +62,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -204,8 +203,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
return false;
}
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -227,7 +225,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -249,7 +247,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
u32 integer;
u16 word;
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM Endianness is not native.. Changing\n");
word = swab16(eep->baseEepHeader.length);
@@ -435,11 +433,11 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC: Chain %d | "
"PDADC %3d Value %3d | "
"PDADC %3d Value %3d | "
@@ -473,7 +471,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
int i;
u16 twiceMinEdgePower;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
u16 scaledPower = 0, minCtlPower;
u16 numCtlModes;
const u16 *pCtlMode;
@@ -542,9 +540,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
pEepData->ctlIndex[i]; i++) {
@@ -1081,8 +1077,7 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1090,8 +1085,8 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP4K_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 4f5c50a..f272236 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -41,7 +41,7 @@ static bool __ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
for (addr = 0; addr < SIZE_EEPROM_AR9287; addr++) {
if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
eep_data)) {
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"Unable to read eeprom region\n");
return false;
}
@@ -66,8 +66,7 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -197,8 +196,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
return false;
}
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -220,7 +218,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -569,7 +567,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_TWO_CHAIN 6
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data_ar9287 *rep;
struct cal_target_power_leg targetPowerOfdm = {0, {0, 0, 0, 0} },
@@ -669,6 +667,7 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
+ twiceMaxEdgePower = MAX_RATE_POWER;
/* Walk through the CTL indices stored in EEPROM */
for (i = 0; (i < AR9287_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
struct cal_ctl_edges *pRdEdgesPower;
@@ -1040,8 +1039,7 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
struct ath_common *common = ath9k_hw_common(ah);
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1049,8 +1047,8 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_MAP9287_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 81e6296..619b95d 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -121,8 +121,7 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Reading from EEPROM, not flash\n");
+ ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n");
}
if (common->bus_ops->ath_bus_type == ATH_USB)
@@ -279,8 +278,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
if (!ath9k_hw_use_flash(ah)) {
- ath_dbg(common, ATH_DBG_EEPROM,
- "Read Magic = 0x%04X\n", magic);
+ ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic);
if (magic != AR5416_EEPROM_MAGIC) {
magic2 = swab16(magic);
@@ -303,7 +301,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
+ ath_dbg(common, EEPROM, "need_swap = %s\n",
need_swap ? "True" : "False");
if (need_swap)
@@ -325,7 +323,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
u32 integer, j;
u16 word;
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"EEPROM Endianness is not native.. Changing.\n");
word = swab16(eep->baseEepHeader.length);
@@ -385,7 +383,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
if ((ah->hw_version.devid == AR9280_DEVID_PCI) &&
((eep->baseEepHeader.version & 0xff) > 0x0a) &&
(eep->baseEepHeader.pwdclkind == 0))
- ah->need_an_top2_fixup = 1;
+ ah->need_an_top2_fixup = true;
if ((common->bus_ops->ath_bus_type == ATH_USB) &&
(AR_SREV_9280(ah)))
@@ -965,15 +963,12 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
reg32 = get_unaligned_le32(&pdadcValues[4 * j]);
REG_WRITE(ah, regOffset, reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
+ ath_dbg(common, EEPROM,
"PDADC (%d,%4x): %4.4x %8.8x\n",
i, regChainOffset, regOffset,
reg32);
- ath_dbg(common, ATH_DBG_EEPROM,
- "PDADC: Chain %d | PDADC %3d "
- "Value %3d | PDADC %3d Value %3d | "
- "PDADC %3d Value %3d | PDADC %3d "
- "Value %3d |\n",
+ ath_dbg(common, EEPROM,
+ "PDADC: Chain %d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d | PDADC %3d Value %3d |\n",
i, 4 * j, pdadcValues[4 * j],
4 * j + 1, pdadcValues[4 * j + 1],
4 * j + 2, pdadcValues[4 * j + 2],
@@ -1000,7 +995,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 9 /* 10*log10(3)*2 */
struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
- u16 twiceMaxEdgePower = MAX_RATE_POWER;
+ u16 twiceMaxEdgePower;
int i;
struct cal_ctl_data *rep;
struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
@@ -1121,9 +1116,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
else
freq = centers.ctl_center;
- if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
- ah->eep_ops->get_eeprom_rev(ah) <= 2)
- twiceMaxEdgePower = MAX_RATE_POWER;
+ twiceMaxEdgePower = MAX_RATE_POWER;
for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
if ((((cfgCtl & ~CTL_MODE_M) |
@@ -1280,7 +1273,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM,
+ ath_dbg(ath9k_hw_common(ah), EEPROM,
"Invalid chainmask configuration\n");
break;
}
@@ -1398,8 +1391,7 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
u16 spur_val = AR_NO_SPUR;
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur idx:%d is2Ghz:%d val:%x\n",
+ ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n",
i, is2GHz, ah->config.spurchans[i][is2GHz]);
switch (ah->config.spurmode) {
@@ -1407,8 +1399,8 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
break;
case SPUR_ENABLE_IOCTL:
spur_val = ah->config.spurchans[i][is2GHz];
- ath_dbg(common, ATH_DBG_ANI,
- "Getting spur val from new loc. %d\n", spur_val);
+ ath_dbg(common, ANI, "Getting spur val from new loc. %d\n",
+ spur_val);
break;
case SPUR_ENABLE_EEPROM:
spur_val = EEP_DEF_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 655576c..597c84e 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -130,12 +130,12 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT scan detected\n");
sc->sc_flags |= (SC_OP_BT_SCAN |
SC_OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), BTCOEX,
"BT priority traffic detected\n");
sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
}
@@ -189,8 +189,8 @@ static void ath_btcoex_period_timer(unsigned long data)
bool is_btscan;
ath9k_ps_wakeup(sc);
- ath_detect_bt_priority(sc);
-
+ if (!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath_detect_bt_priority(sc);
is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
spin_lock_bh(&btcoex->btcoex_lock);
@@ -198,6 +198,7 @@ static void ath_btcoex_period_timer(unsigned long data)
ath9k_hw_btcoex_bt_stomp(ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
@@ -212,8 +213,9 @@ static void ath_btcoex_period_timer(unsigned long data)
}
ath9k_ps_restore(sc);
+ timer_period = btcoex->btcoex_period / 1000;
mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+ msecs_to_jiffies(timer_period));
}
/*
@@ -228,8 +230,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
- ath_dbg(common, ATH_DBG_BTCOEX,
- "no stomp timer running\n");
+ ath_dbg(common, BTCOEX, "no stomp timer running\n");
ath9k_ps_wakeup(sc);
spin_lock_bh(&btcoex->btcoex_lock);
@@ -239,6 +240,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+ ath9k_hw_btcoex_enable(ah);
spin_unlock_bh(&btcoex->btcoex_lock);
ath9k_ps_restore(sc);
}
@@ -247,6 +249,9 @@ int ath_init_btcoex_timer(struct ath_softc *sc)
{
struct ath_btcoex *btcoex = &sc->btcoex;
+ if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_NONE)
+ return 0;
+
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -277,8 +282,10 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers\n");
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex timers\n");
+
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
/* make sure duty cycle timer is also stopped when resuming */
if (btcoex->hw_timer_enabled)
@@ -300,6 +307,9 @@ void ath9k_btcoex_timer_pause(struct ath_softc *sc)
struct ath_btcoex *btcoex = &sc->btcoex;
struct ath_hw *ah = sc->sc_ah;
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
del_timer_sync(&btcoex->period_timer);
if (btcoex->hw_timer_enabled)
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 57fe22b..2eadffb 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -167,9 +167,9 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
/* TSF out of range threshold fixed at 1 second */
bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
- ath_dbg(common, ATH_DBG_CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
+ ath_dbg(common, CONFIG, "intval: %u tsf: %llu tsftu: %u\n",
intval, tsf, tsftu);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
bs.bs_bmissthreshold, bs.bs_sleepduration,
bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
@@ -224,9 +224,8 @@ static void ath9k_htc_beacon_config_ap(struct ath9k_htc_priv *priv,
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_dbg(common, ATH_DBG_CONFIG,
- "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d "
- "imask: 0x%x\n",
+ ath_dbg(common, CONFIG,
+ "AP Beacon config, intval: %d, nexttbtt: %u, resp_time: %d imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
@@ -273,9 +272,8 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
if (priv->op_flags & OP_ENABLE_BEACON)
imask |= ATH9K_INT_SWBA;
- ath_dbg(common, ATH_DBG_CONFIG,
- "IBSS Beacon config, intval: %d, nexttbtt: %u, "
- "resp_time: %d, imask: 0x%x\n",
+ ath_dbg(common, CONFIG,
+ "IBSS Beacon config, intval: %d, nexttbtt: %u, resp_time: %d, imask: 0x%x\n",
bss_conf->beacon_interval, nexttbtt,
priv->ah->config.sw_beacon_response_time, imask);
@@ -323,7 +321,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
tx_slot = ath9k_htc_tx_get_slot(priv);
if (tx_slot < 0) {
- ath_dbg(common, ATH_DBG_XMIT, "No free CAB slot\n");
+ ath_dbg(common, XMIT, "No free CAB slot\n");
dev_kfree_skb_any(skb);
goto next;
}
@@ -333,8 +331,7 @@ static void ath9k_htc_send_buffered(struct ath9k_htc_priv *priv,
ath9k_htc_tx_clear_slot(priv, tx_slot);
dev_kfree_skb_any(skb);
- ath_dbg(common, ATH_DBG_XMIT,
- "Failed to send CAB frame\n");
+ ath_dbg(common, XMIT, "Failed to send CAB frame\n");
} else {
spin_lock_bh(&priv->tx.tx_lock);
priv->tx.queued_cnt++;
@@ -409,7 +406,7 @@ static void ath9k_htc_send_beacon(struct ath9k_htc_priv *priv,
ret = htc_send(priv->htc, beacon);
if (ret != 0) {
if (ret == -ENOMEM) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"Failed to send beacon, no free TX buffer\n");
}
dev_kfree_skb_any(beacon);
@@ -434,7 +431,7 @@ static int ath9k_htc_choose_bslot(struct ath9k_htc_priv *priv,
slot = ((tsftu % intval) * ATH9K_HTC_MAX_BCN_VIF) / intval;
slot = ATH9K_HTC_MAX_BCN_VIF - slot - 1;
- ath_dbg(common, ATH_DBG_BEACON,
+ ath_dbg(common, BEACON,
"Choose slot: %d, tsf: %llu, tsftu: %u, intval: %u\n",
slot, tsf, tsftu, intval);
@@ -450,8 +447,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
if (swba->beacon_pending != 0) {
priv->cur_beacon_conf.bmiss_cnt++;
if (priv->cur_beacon_conf.bmiss_cnt > BSTUCK_THRESHOLD) {
- ath_dbg(common, ATH_DBG_BSTUCK,
- "Beacon stuck, HW reset\n");
+ ath_dbg(common, BSTUCK, "Beacon stuck, HW reset\n");
ieee80211_queue_work(priv->hw,
&priv->fatal_work);
}
@@ -459,7 +455,7 @@ void ath9k_htc_swba(struct ath9k_htc_priv *priv,
}
if (priv->cur_beacon_conf.bmiss_cnt) {
- ath_dbg(common, ATH_DBG_BSTUCK,
+ ath_dbg(common, BSTUCK,
"Resuming beacon xmit after %u misses\n",
priv->cur_beacon_conf.bmiss_cnt);
priv->cur_beacon_conf.bmiss_cnt = 0;
@@ -495,8 +491,8 @@ void ath9k_htc_assign_bslot(struct ath9k_htc_priv *priv,
priv->cur_beacon_conf.bslot[avp->bslot] = vif;
spin_unlock_bh(&priv->beacon_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Added interface at beacon slot: %d\n", avp->bslot);
+ ath_dbg(common, CONFIG, "Added interface at beacon slot: %d\n",
+ avp->bslot);
}
void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
@@ -509,8 +505,8 @@ void ath9k_htc_remove_bslot(struct ath9k_htc_priv *priv,
priv->cur_beacon_conf.bslot[avp->bslot] = NULL;
spin_unlock_bh(&priv->beacon_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Removed interface at beacon slot: %d\n", avp->bslot);
+ ath_dbg(common, CONFIG, "Removed interface at beacon slot: %d\n",
+ avp->bslot);
}
/*
@@ -536,8 +532,7 @@ void ath9k_htc_set_tsfadjust(struct ath9k_htc_priv *priv,
tsfadjust = cur_conf->beacon_interval * avp->bslot / ATH9K_HTC_MAX_BCN_VIF;
avp->tsfadjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
- ath_dbg(common, ATH_DBG_CONFIG,
- "tsfadjust is: %llu for bslot: %d\n",
+ ath_dbg(common, CONFIG, "tsfadjust is: %llu for bslot: %d\n",
(unsigned long long)tsfadjust, avp->bslot);
}
@@ -568,7 +563,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
(priv->num_ap_vif > 1) &&
(vif->type == NL80211_IFTYPE_AP) &&
(cur_conf->beacon_interval != bss_conf->beacon_int)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Changing beacon interval of multiple AP interfaces !\n");
return false;
}
@@ -579,7 +574,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
*/
if (priv->num_ap_vif &&
(vif->type != NL80211_IFTYPE_AP)) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"HW in AP mode, cannot set STA beacon parameters\n");
return false;
}
@@ -597,7 +592,7 @@ static bool ath9k_htc_check_beacon_config(struct ath9k_htc_priv *priv,
&beacon_configured);
if (beacon_configured) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon already configured for a station interface\n");
return false;
}
@@ -637,8 +632,7 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
}
@@ -659,8 +653,7 @@ void ath9k_htc_beacon_reconfig(struct ath9k_htc_priv *priv)
ath9k_htc_beacon_config_ap(priv, cur_conf);
break;
default:
- ath_dbg(common, ATH_DBG_CONFIG,
- "Unsupported beaconing mode\n");
+ ath_dbg(common, CONFIG, "Unsupported beaconing mode\n");
return;
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index e3a02eb..6506e1f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -36,12 +36,12 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
/* Detect if colocated bt started scanning */
if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT scan detected\n");
priv->op_flags |= (OP_BT_SCAN |
OP_BT_PRIORITY_DETECTED);
} else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ ath_dbg(ath9k_hw_common(ah), BTCOEX,
"BT priority traffic detected\n");
priv->op_flags |= OP_BT_PRIORITY_DETECTED;
}
@@ -80,6 +80,7 @@ static void ath_btcoex_period_work(struct work_struct *work)
ath9k_hw_btcoex_bt_stomp(priv->ah, is_btscan ? ATH_BTCOEX_STOMP_ALL :
btcoex->bt_stomp_type);
+ ath9k_hw_btcoex_enable(priv->ah);
timer_period = is_btscan ? btcoex->btscan_no_stomp :
btcoex->btcoex_no_stomp;
ieee80211_queue_delayed_work(priv->hw, &priv->duty_cycle_work,
@@ -101,19 +102,22 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
struct ath_common *common = ath9k_hw_common(ah);
bool is_btscan = priv->op_flags & OP_BT_SCAN;
- ath_dbg(common, ATH_DBG_BTCOEX,
- "time slice work for bt and wlan\n");
+ ath_dbg(common, BTCOEX, "time slice work for bt and wlan\n");
if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_NONE);
else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
ath9k_hw_btcoex_bt_stomp(ah, ATH_BTCOEX_STOMP_LOW);
+ ath9k_hw_btcoex_enable(priv->ah);
}
void ath_htc_init_btcoex_work(struct ath9k_htc_priv *priv)
{
struct ath_btcoex *btcoex = &priv->btcoex;
+ if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD;
btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
btcoex->btcoex_period / 100;
@@ -132,7 +136,10 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
struct ath_btcoex *btcoex = &priv->btcoex;
struct ath_hw *ah = priv->ah;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n");
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
+ ath_dbg(ath9k_hw_common(ah), BTCOEX, "Starting btcoex work\n");
btcoex->bt_priority_cnt = 0;
btcoex->bt_priority_time = jiffies;
@@ -146,6 +153,9 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
*/
void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
{
+ if (ath9k_hw_get_btcoex_scheme(priv->ah) == ATH_BTCOEX_CFG_NONE)
+ return;
+
cancel_delayed_work_sync(&priv->coex_period_work);
cancel_delayed_work_sync(&priv->duty_cycle_work);
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 966661c..9be10a2 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -299,8 +299,7 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
- "REGISTER READ FAILED: (0x%04x, %d)\n",
+ ath_dbg(common, WMI, "REGISTER READ FAILED: (0x%04x, %d)\n",
reg_offset, r);
return -EIO;
}
@@ -327,7 +326,7 @@ static void ath9k_multi_regread(void *hw_priv, u32 *addr,
(u8 *)tmpval, sizeof(u32) * count,
100);
if (unlikely(ret)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"Multiple REGISTER READ FAILED (count: %d)\n", count);
}
@@ -352,8 +351,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &val, sizeof(val),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
- "REGISTER WRITE FAILED:(0x%04x, %d)\n",
+ ath_dbg(common, WMI, "REGISTER WRITE FAILED:(0x%04x, %d)\n",
reg_offset, r);
}
}
@@ -384,7 +382,7 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"REGISTER WRITE FAILED, multi len: %d\n",
priv->wmi->multi_write_idx);
}
@@ -434,7 +432,7 @@ static void ath9k_regwrite_flush(void *hw_priv)
(u8 *) &rsp_status, sizeof(rsp_status),
100);
if (unlikely(r)) {
- ath_dbg(common, ATH_DBG_WMI,
+ ath_dbg(common, WMI,
"REGISTER WRITE FAILED, multi len: %d\n",
priv->wmi->multi_write_idx);
}
@@ -512,8 +510,7 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
tx_streams = ath9k_cmn_count_streams(priv->ah->txchainmask, 2);
rx_streams = ath9k_cmn_count_streams(priv->ah->rxchainmask, 2);
- ath_dbg(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
if (tx_streams != rx_streams) {
@@ -610,7 +607,7 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
{
int qnum;
- switch (priv->ah->btcoex_hw.scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(priv->ah)) {
case ATH_BTCOEX_CFG_NONE:
break;
case ATH_BTCOEX_CFG_3WIRE:
@@ -704,7 +701,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
if (product && strncmp(product, ATH_HTC_BTCOEX_PRODUCT_ID, 5) == 0) {
ah->btcoex_hw.scheme = ATH_BTCOEX_CFG_3WIRE;
- ath9k_init_btcoex(priv);
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
+ ath9k_init_btcoex(priv);
}
return 0;
@@ -876,9 +874,8 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
goto err_world;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, "
- "BE:%d, BK:%d, VI:%d, VO:%d\n",
+ ath_dbg(common, CONFIG,
+ "WMI:%d, BCN:%d, CAB:%d, UAPSD:%d, MGMT:%d, BE:%d, BK:%d, VI:%d, VO:%d\n",
priv->wmi_cmd_ep,
priv->beacon_ep,
priv->cab_ep,
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0b9a0e8..ef4c606 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -266,7 +266,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
ath9k_wmi_event_drain(priv);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
priv->ah->curchan->channel,
channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
@@ -415,7 +415,7 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
priv->vif_sta_pos[priv->mon_vif_idx] = sta_idx;
priv->ah->is_monitoring = true;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Attached a monitor interface at idx: %d, sta idx: %d\n",
priv->mon_vif_idx, sta_idx);
@@ -427,7 +427,7 @@ err_sta:
*/
__ath9k_htc_remove_monitor_interface(priv);
err_vif:
- ath_dbg(common, ATH_DBG_FATAL, "Unable to attach a monitor interface\n");
+ ath_dbg(common, FATAL, "Unable to attach a monitor interface\n");
return ret;
}
@@ -452,7 +452,7 @@ static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
priv->nstations--;
priv->ah->is_monitoring = false;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a monitor interface at idx: %d, sta idx: %d\n",
priv->mon_vif_idx, sta_idx);
@@ -512,11 +512,11 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
}
if (sta) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Added a station entry for: %pM (idx: %d)\n",
sta->addr, tsta.sta_index);
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Added a station entry for VIF %d (idx: %d)\n",
avp->index, tsta.sta_index);
}
@@ -556,11 +556,11 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
}
if (sta) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a station entry for: %pM (idx: %d)\n",
sta->addr, sta_idx);
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Removed a station entry for VIF %d (idx: %d)\n",
avp->index, sta_idx);
}
@@ -665,7 +665,7 @@ static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
ath9k_htc_setup_rate(priv, sta, &trate);
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Updated target sta: %pM, rate caps: 0x%X\n",
sta->addr, be32_to_cpu(trate.capflags));
}
@@ -692,7 +692,7 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
ret = ath9k_htc_send_rate_cmd(priv, &trate);
if (!ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Updated target sta: %pM, rate caps: 0x%X\n",
bss_conf->bssid, be32_to_cpu(trate.capflags));
}
@@ -721,11 +721,11 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Unable to %s TX aggregation for (%pM, %d)\n",
(aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
else
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"%s TX aggregation for (%pM, %d)\n",
(aggr.aggr_enable) ? "Starting" : "Stopping",
sta->addr, tid);
@@ -784,7 +784,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
longcal = true;
- ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
+ ath_dbg(common, ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -793,8 +793,7 @@ void ath9k_htc_ani_work(struct work_struct *work)
if ((timestamp - common->ani.shortcal_timer) >=
short_cal_interval) {
shortcal = true;
- ath_dbg(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
+ ath_dbg(common, ANI, "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -808,7 +807,8 @@ void ath9k_htc_ani_work(struct work_struct *work)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
+ if (ah->config.enable_ani &&
+ (timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -838,7 +838,7 @@ set_timer:
* short calibration and long calibration.
*/
cal_interval = ATH_LONG_CALINTERVAL;
- if (priv->ah->config.enable_ani)
+ if (ah->config.enable_ani)
cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
if (!common->ani.caldone)
cal_interval = min(cal_interval, (u32)short_cal_interval);
@@ -865,7 +865,7 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
padsize = padpos & 3;
if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize) {
- ath_dbg(common, ATH_DBG_XMIT, "No room for padding\n");
+ ath_dbg(common, XMIT, "No room for padding\n");
goto fail_tx;
}
skb_push(skb, padsize);
@@ -874,13 +874,13 @@ static void ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
slot = ath9k_htc_tx_get_slot(priv);
if (slot < 0) {
- ath_dbg(common, ATH_DBG_XMIT, "No free TX slot\n");
+ ath_dbg(common, XMIT, "No free TX slot\n");
goto fail_tx;
}
ret = ath9k_htc_tx_start(priv, skb, slot, false);
if (ret != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "Tx failed\n");
+ ath_dbg(common, XMIT, "Tx failed\n");
goto clear_slot;
}
@@ -908,7 +908,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
@@ -942,7 +942,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
ret = ath9k_htc_update_cap_target(priv, 0);
if (ret)
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Failed to update capability in target\n");
priv->op_flags &= ~OP_INVALID;
@@ -957,7 +957,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
mod_timer(&priv->tx.cleanup_timer,
jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE) {
ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
@@ -979,7 +979,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
if (priv->op_flags & OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&priv->mutex);
return;
}
@@ -1009,7 +1009,8 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
mutex_lock(&priv->mutex);
- if (ah->btcoex_hw.enabled) {
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
ath9k_hw_btcoex_disable(ah);
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
ath_htc_cancel_btcoex_work(priv);
@@ -1026,7 +1027,7 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
priv->op_flags |= OP_INVALID;
- ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, CONFIG, "Driver halt\n");
mutex_unlock(&priv->mutex);
}
@@ -1119,8 +1120,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
ath9k_htc_start_ani(priv);
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d at idx: %d\n", vif->type, avp->index);
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d at idx: %d\n",
+ vif->type, avp->index);
out:
ath9k_htc_ps_restore(priv);
@@ -1176,7 +1177,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_stop_ani(priv);
}
- ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface at idx: %d\n", avp->index);
+ ath_dbg(common, CONFIG, "Detach Interface at idx: %d\n", avp->index);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1201,8 +1202,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
mutex_unlock(&priv->htc_pm_lock);
if (enable_radio) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
+ ath_dbg(common, CONFIG, "not-idle: enabling radio\n");
ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
ath9k_htc_radio_enable(hw);
}
@@ -1224,7 +1224,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
- ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
+ ath_dbg(common, CONFIG, "Set channel: %d MHz\n",
curchan->center_freq);
ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
@@ -1264,8 +1264,7 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
}
mutex_unlock(&priv->htc_pm_lock);
- ath_dbg(common, ATH_DBG_CONFIG,
- "idle: disabling radio\n");
+ ath_dbg(common, CONFIG, "idle: disabling radio\n");
ath9k_htc_radio_disable(hw);
}
@@ -1297,7 +1296,7 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
*total_flags &= SUPPORTED_FILTERS;
if (priv->op_flags & OP_INVALID) {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_ANY,
+ ath_dbg(ath9k_hw_common(priv->ah), ANY,
"Unable to configure filter on invalid state\n");
mutex_unlock(&priv->mutex);
return;
@@ -1308,8 +1307,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
rfilt = ath9k_htc_calcrxfilter(priv);
ath9k_hw_setrxfilter(priv->ah, rfilt);
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(priv->ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1376,7 +1375,7 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
qnum = get_hw_qnum(queue, priv->hwq_map);
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
@@ -1411,7 +1410,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
return -ENOSPC;
mutex_lock(&priv->mutex);
- ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key\n");
ath9k_htc_ps_wakeup(priv);
switch (cmd) {
@@ -1447,8 +1446,7 @@ static void ath9k_htc_set_bssid(struct ath9k_htc_priv *priv)
struct ath_common *common = ath9k_hw_common(priv->ah);
ath9k_hw_write_associd(priv->ah);
- ath_dbg(common, ATH_DBG_CONFIG,
- "BSSID: %pM aid: 0x%x\n",
+ ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
common->curbssid, common->curaid);
}
@@ -1486,7 +1484,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
ath9k_htc_ps_wakeup(priv);
if (changed & BSS_CHANGED_ASSOC) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed ASSOC %d\n",
bss_conf->assoc);
bss_conf->assoc ?
@@ -1511,8 +1509,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
}
if ((changed & BSS_CHANGED_BEACON_ENABLED) && bss_conf->enable_beacon) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Beacon enabled for BSS: %pM\n", bss_conf->bssid);
+ ath_dbg(common, CONFIG, "Beacon enabled for BSS: %pM\n",
+ bss_conf->bssid);
ath9k_htc_set_tsfadjust(priv, vif);
priv->op_flags |= OP_ENABLE_BEACON;
ath9k_htc_beacon_config(priv, vif);
@@ -1524,7 +1522,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
* AP/IBSS interfaces.
*/
if ((priv->num_ap_vif <= 1) || priv->num_ibss_vif) {
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon disabled for BSS: %pM\n",
bss_conf->bssid);
priv->op_flags &= ~OP_ENABLE_BEACON;
@@ -1542,7 +1540,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
(vif->type == NL80211_IFTYPE_AP)) {
priv->op_flags |= OP_TSF_RESET;
}
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Beacon interval changed for BSS: %pM\n",
bss_conf->bssid);
ath9k_htc_beacon_config(priv, vif);
@@ -1732,8 +1730,7 @@ static int ath9k_htc_set_bitrate_mask(struct ieee80211_hw *hw,
goto out;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set bitrate masks: 0x%x, 0x%x\n",
+ ath_dbg(common, CONFIG, "Set bitrate masks: 0x%x, 0x%x\n",
mask->control[IEEE80211_BAND_2GHZ].legacy,
mask->control[IEEE80211_BAND_5GHZ].legacy);
out:
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 2d81c70..3e40a64 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -355,7 +355,7 @@ int ath9k_htc_tx_start(struct ath9k_htc_priv *priv,
vif_idx = avp->index;
} else {
if (!priv->ah->is_monitoring) {
- ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
+ ath_dbg(ath9k_hw_common(priv->ah), XMIT,
"VIF is null, but no monitor interface !\n");
return -EINVAL;
}
@@ -620,8 +620,7 @@ static struct sk_buff* ath9k_htc_tx_get_packet(struct ath9k_htc_priv *priv,
}
spin_unlock_irqrestore(&epid_queue->lock, flags);
- ath_dbg(common, ATH_DBG_XMIT,
- "No matching packet for cookie: %d, epid: %d\n",
+ ath_dbg(common, XMIT, "No matching packet for cookie: %d, epid: %d\n",
txs->cookie, epid);
return NULL;
@@ -705,8 +704,7 @@ static inline bool check_packet(struct ath9k_htc_priv *priv, struct sk_buff *skb
if (time_after(jiffies,
tx_ctl->timestamp +
msecs_to_jiffies(ATH9K_HTC_TX_TIMEOUT_INTERVAL))) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Dropping a packet due to TX timeout\n");
+ ath_dbg(common, XMIT, "Dropping a packet due to TX timeout\n");
return true;
}
@@ -753,7 +751,7 @@ void ath9k_htc_tx_cleanup_timer(unsigned long data)
skb = ath9k_htc_tx_get_packet(priv, &event->txs);
if (skb) {
- ath_dbg(common, ATH_DBG_XMIT,
+ ath_dbg(common, XMIT,
"Found packet for cookie: %d, epid: %d\n",
event->txs.cookie,
MS(event->txs.ts_rate, ATH9K_HTC_TXSTAT_EPID));
@@ -1167,8 +1165,7 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
spin_unlock(&priv->rx.rxbuflock);
if (rxbuf == NULL) {
- ath_dbg(common, ATH_DBG_ANY,
- "No free RX buffer\n");
+ ath_dbg(common, ANY, "No free RX buffer\n");
goto err;
}
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index e74c233..c4ad0b0 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -212,4 +212,13 @@ static inline int ath9k_hw_fast_chan_change(struct ath_hw *ah,
return ath9k_hw_private_ops(ah)->fast_chan_change(ah, chan,
ini_reloaded);
}
+
+static inline void ath9k_hw_set_radar_params(struct ath_hw *ah)
+{
+ if (!ath9k_hw_private_ops(ah)->set_radar_params)
+ return;
+
+ ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
+}
+
#endif /* ATH9K_HW_OPS_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8873c6e..87db1ee 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -133,7 +133,7 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
udelay(AH_TIME_QUANTUM);
}
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY,
+ ath_dbg(ath9k_hw_common(ah), ANY,
"timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
timeout, reg, REG_READ(ah, reg), mask, val);
@@ -491,8 +491,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
if (ecode != 0)
return ecode;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG,
- "Eeprom VER: %d, REV: %d\n",
+ ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n",
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
@@ -504,7 +503,7 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
return ecode;
}
- if (!AR_SREV_9100(ah) && !AR_SREV_9340(ah)) {
+ if (ah->config.enable_ani) {
ath9k_hw_ani_setup(ah);
ath9k_hw_ani_init(ah);
}
@@ -567,7 +566,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
}
}
- ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
+ ath_dbg(common, RESET, "serialize_regmode is %d\n",
ah->config.serialize_regmode);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
@@ -610,6 +609,10 @@ static int __ath9k_hw_init(struct ath_hw *ah)
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
+ /* disable ANI for 9340 */
+ if (AR_SREV_9340(ah))
+ ah->config.enable_ani = false;
+
ath9k_hw_init_mode_regs(ah);
if (!ah->is_pciexpress)
@@ -954,8 +957,8 @@ static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
{
if (tu > 0xFFFF) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
- "bad global tx timeout %u\n", tu);
+ ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n",
+ tu);
ah->globaltxtimeout = (u32) -1;
return false;
} else {
@@ -976,7 +979,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
int rx_lat = 0, tx_lat = 0, eifs = 0;
u32 reg;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
+ ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (!chan)
@@ -1034,13 +1037,16 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
/*
* Workaround for early ACK timeouts, add an offset to match the
- * initval's 64us ack timeout value.
+ * initval's 64us ack timeout value. Use 48us for the CTS timeout.
* This was initially only meant to work around an issue with delayed
* BA frames in some implementations, but it has been found to fix ACK
* timeout issues in other cases as well.
*/
- if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) {
acktimeout += 64 - sifstime - ah->slottime;
+ ctstimeout += 48 - sifstime - ah->slottime;
+ }
+
ath9k_hw_set_sifs_time(ah, sifstime);
ath9k_hw_setslottime(ah, slottime);
@@ -1271,7 +1277,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
(npend || type == ATH9K_RESET_COLD)) {
int reset_err = 0;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(ah), RESET,
"reset MAC via external reset\n");
reset_err = ah->external_reset();
@@ -1294,8 +1300,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC stuck in MAC reset\n");
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
return false;
}
@@ -1340,8 +1345,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
AR_RTC_STATUS_M,
AR_RTC_STATUS_ON,
AH_WAIT_TIMEOUT)) {
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
- "RTC not waking up\n");
+ ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
return false;
}
@@ -1350,6 +1354,7 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
+ bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
@@ -1361,13 +1366,20 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
switch (type) {
case ATH9K_RESET_POWER_ON:
- return ath9k_hw_set_reset_power_on(ah);
+ ret = ath9k_hw_set_reset_power_on(ah);
+ break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:
- return ath9k_hw_set_reset(ah, type);
+ ret = ath9k_hw_set_reset(ah, type);
+ break;
default:
- return false;
+ break;
}
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
+ return ret;
}
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
@@ -1406,7 +1418,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Transmit frames pending on queue %d\n", qnum);
return false;
}
@@ -1506,6 +1518,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool bChannelChange)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci_hw = &ah->btcoex_hw.mci;
u32 saveLedState;
struct ath9k_channel *curchan = ah->curchan;
u32 saveDefAntenna;
@@ -1513,6 +1526,52 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u64 tsf = 0;
int i, r;
bool allow_fbs = false;
+ bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
+ bool save_fullsleep = ah->chip_fullsleep;
+
+ if (mci) {
+
+ ar9003_mci_2g5g_changed(ah, IS_CHAN_2GHZ(chan));
+
+ if (mci_hw->bt_state == MCI_BT_CAL_START) {
+ u32 payload[4] = {0, 0, 0, 0};
+
+ ath_dbg(common, MCI, "MCI stop rx for BT CAL\n");
+
+ mci_hw->bt_state = MCI_BT_CAL;
+
+ /*
+ * MCI FIX: disable mci interrupt here. This is to avoid
+ * SW_MSG_DONE or RX_MSG bits to trigger MCI_INT and
+ * lead to mci_intr reentry.
+ */
+
+ ar9003_mci_disable_interrupt(ah);
+
+ ath_dbg(common, MCI, "send WLAN_CAL_GRANT\n");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_GRANT);
+ ar9003_mci_send_message(ah, MCI_GPM, 0, payload,
+ 16, true, false);
+
+ ath_dbg(common, MCI, "\nMCI BT is calibrating\n");
+
+ /* Wait BT calibration to be completed for 25ms */
+
+ if (ar9003_mci_wait_for_gpm(ah, MCI_GPM_BT_CAL_DONE,
+ 0, 25000))
+ ath_dbg(common, MCI,
+ "MCI got BT_CAL_DONE\n");
+ else
+ ath_dbg(common, MCI,
+ "MCI ### BT cal takes to long, force bt_state to be bt_awake\n");
+ mci_hw->bt_state = MCI_BT_AWAKE;
+ /* MCI FIX: enable mci interrupt here */
+ ar9003_mci_enable_interrupt(ah);
+
+ return true;
+ }
+ }
+
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
@@ -1550,12 +1609,29 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ath9k_hw_channel_change(ah, chan)) {
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready)
+ ar9003_mci_2g5g_switch(ah, true);
+
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
return 0;
}
}
+ if (mci) {
+ ar9003_mci_disable_interrupt(ah);
+
+ if (mci_hw->ready && !save_fullsleep) {
+ ar9003_mci_mute_bt(ah);
+ udelay(20);
+ REG_WRITE(ah, AR_BTCOEX_CTRL, 0);
+ }
+
+ mci_hw->bt_state = MCI_BT_SLEEP;
+ mci_hw->ready = false;
+ }
+
+
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
if (saveDefAntenna == 0)
saveDefAntenna = 1;
@@ -1611,6 +1687,9 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (r)
return r;
+ if (mci)
+ ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
+
/*
* Some AR91xx SoC devices frequently fail to accept TSF writes
* right after the chip reset. When that happens, write a new
@@ -1728,6 +1807,54 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
+ if (mci && mci_hw->ready) {
+
+ if (IS_CHAN_2GHZ(chan) &&
+ (mci_hw->bt_state == MCI_BT_SLEEP)) {
+
+ if (ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET) ||
+ ar9003_mci_check_int(ah,
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)) {
+
+ /*
+ * BT is sleeping. Check if BT wakes up during
+ * WLAN calibration. If BT wakes up during
+ * WLAN calibration, need to go through all
+ * message exchanges again and recal.
+ */
+
+ ath_dbg(common, MCI,
+ "MCI BT wakes up during WLAN calibration\n");
+
+ REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE);
+ ath_dbg(common, MCI, "MCI send REMOTE_RESET\n");
+ ar9003_mci_remote_reset(ah, true);
+ ar9003_mci_send_sys_waking(ah, true);
+ udelay(1);
+ if (IS_CHAN_2GHZ(chan))
+ ar9003_mci_send_lna_transfer(ah, true);
+
+ mci_hw->bt_state = MCI_BT_AWAKE;
+
+ ath_dbg(common, MCI, "MCI re-cal\n");
+
+ if (caldata) {
+ caldata->done_txiqcal_once = false;
+ caldata->done_txclcal_once = false;
+ caldata->rtt_hist.num_readings = 0;
+ }
+
+ if (!ath9k_hw_init_cal(ah, chan))
+ return -EIO;
+
+ }
+ }
+ ar9003_mci_enable_interrupt(ah);
+ }
+
ENABLE_REGWRITE_BUFFER(ah);
ath9k_hw_restore_chainmask(ah);
@@ -1742,14 +1869,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
u32 mask;
mask = REG_READ(ah, AR_CFG);
if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
- ath_dbg(common, ATH_DBG_RESET,
- "CFG Byte Swap Set 0x%x\n", mask);
+ ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
+ mask);
} else {
mask =
INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
REG_WRITE(ah, AR_CFG, mask);
- ath_dbg(common, ATH_DBG_RESET,
- "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
+ ath_dbg(common, RESET, "Setting CFG 0x%x\n",
+ REG_READ(ah, AR_CFG));
}
} else {
if (common->bus_ops->ath_bus_type == ATH_USB) {
@@ -1767,9 +1894,25 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
#endif
}
- if (ah->btcoex_hw.enabled)
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE)
ath9k_hw_btcoex_enable(ah);
+ if (mci && mci_hw->ready) {
+ /*
+ * check BT state again to make
+ * sure it's not changed.
+ */
+
+ ar9003_mci_sync_bt_state(ah);
+ ar9003_mci_2g5g_switch(ah, true);
+
+ if ((mci_hw->bt_state == MCI_BT_AWAKE) &&
+ (mci_hw->query_bt == true)) {
+ mci_hw->need_flush_btinfo = true;
+ }
+ }
+
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
@@ -1934,6 +2077,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
+ struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci;
int status = true, setChip = true;
static const char *modes[] = {
"AWAKE",
@@ -1945,18 +2089,41 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
if (ah->power_mode == mode)
return status;
- ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n",
+ ath_dbg(common, RESET, "%s -> %s\n",
modes[ah->power_mode], modes[mode]);
switch (mode) {
case ATH9K_PM_AWAKE:
status = ath9k_hw_set_power_awake(ah, setChip);
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
break;
case ATH9K_PM_FULL_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) &&
+ (mci->bt_state != MCI_BT_SLEEP) &&
+ !mci->halted_bt_gpm) {
+ ath_dbg(common, MCI,
+ "MCI halt BT GPM (full_sleep)\n");
+ ar9003_mci_send_coex_halt_bt_gpm(ah,
+ true, true);
+ }
+
+ mci->ready = false;
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+ }
+
ath9k_set_power_sleep(ah, setChip);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
+
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
@@ -2006,9 +2173,8 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
default:
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON,
- "%s: unsupported opmode: %d\n",
- __func__, ah->opmode);
+ ath_dbg(ath9k_hw_common(ah), BEACON,
+ "%s: unsupported opmode: %d\n", __func__, ah->opmode);
return;
break;
}
@@ -2059,10 +2225,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
else
nextTbtt = bs->bs_nexttbtt;
- ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
- ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
- ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
- ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
+ ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim);
+ ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt);
+ ath_dbg(common, BEACON, "beacon period %d\n", beaconintval);
+ ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod);
ENABLE_REGWRITE_BUFFER(ah);
@@ -2109,6 +2275,30 @@ static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
return chip_chainmask;
}
+/**
+ * ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
+ * @ah: the atheros hardware data structure
+ *
+ * We enable DFS support upstream on chipsets which have passed a series
+ * of tests. The testing requirements are going to be documented. Desired
+ * test requirements are documented at:
+ *
+ * http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
+ *
+ * Once a new chipset gets properly tested an individual commit can be used
+ * to document the testing for DFS for that chipset.
+ */
+static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
+{
+
+ switch (ah->hw_version.macVersion) {
+ /* AR9580 will likely be our first target to get testing on */
+ case AR_SREV_VERSION_9580:
+ default:
+ return false;
+ }
+}
+
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
@@ -2130,8 +2320,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
regulatory->current_rd += 5;
else if (regulatory->current_rd == 0x41)
regulatory->current_rd = 0x43;
- ath_dbg(common, ATH_DBG_REGULATORY,
- "regdomain mapped to 0x%x\n", regulatory->current_rd);
+ ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n",
+ regulatory->current_rd);
}
eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
@@ -2149,6 +2339,8 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
chip_chainmask = 1;
+ else if (AR_SREV_9462(ah))
+ chip_chainmask = 3;
else if (!AR_SREV_9280_20_OR_LATER(ah))
chip_chainmask = 7;
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
@@ -2205,12 +2397,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
else
pCap->num_gpio_pins = AR_NUM_GPIO;
- if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah)) {
- pCap->hw_caps |= ATH9K_HW_CAP_CST;
+ if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
- } else {
+ else
pCap->rts_aggr_limit = (8 * 1024);
- }
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
@@ -2234,7 +2424,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
if (common->btcoex_enabled) {
- if (AR_SREV_9300_20_OR_LATER(ah)) {
+ if (AR_SREV_9462(ah))
+ btcoex_hw->scheme = ATH_BTCOEX_CFG_MCI;
+ else if (AR_SREV_9300_20_OR_LATER(ah)) {
btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO_9300;
btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO_9300;
@@ -2318,6 +2510,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
pCap->pcie_lcr_offset = 0x80;
}
+ if (ath9k_hw_dfs_tested(ah))
+ pCap->hw_caps |= ATH9K_HW_CAP_DFS;
+
tx_chainmask = pCap->tx_chainmask;
rx_chainmask = pCap->rx_chainmask;
while (tx_chainmask || rx_chainmask) {
@@ -2332,11 +2527,11 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
- if (!AR_SREV_9330(ah))
+ if (AR_SREV_9485_OR_LATER(ah))
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
if (AR_SREV_9462(ah))
- pCap->hw_caps |= ATH9K_HW_CAP_RTT;
+ pCap->hw_caps |= ATH9K_HW_CAP_RTT | ATH9K_HW_CAP_MCI;
return 0;
}
@@ -2584,7 +2779,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
struct ath9k_channel *chan = ah->curchan;
struct ieee80211_channel *channel = chan->chan;
- reg->power_limit = min_t(int, limit, MAX_RATE_POWER);
+ reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
if (test)
channel->max_power = MAX_RATE_POWER / 2;
@@ -2651,7 +2846,7 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah)
{
if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
AH_TSF_WRITE_TIMEOUT))
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(ah), RESET,
"AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
@@ -2776,7 +2971,7 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
timer_next = tsf + trig_timeout;
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
+ ath_dbg(ath9k_hw_common(ah), HWTIMER,
"current tsf %x period %x timer_next %x\n",
tsf, timer_period, timer_next);
@@ -2865,8 +3060,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &thresh_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, ATH_DBG_HWTIMER,
- "TSF overflow for Gen timer %d\n", index);
+ ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
+ index);
timer->overflow(timer->arg);
}
@@ -2874,7 +3069,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
index = rightmost_index(timer_table, &trigger_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
- ath_dbg(common, ATH_DBG_HWTIMER,
+ ath_dbg(common, HWTIMER,
"Gen timer[%d] trigger\n", index);
timer->trigger(timer->arg);
}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index f389b3c..c8261d4 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -59,9 +59,6 @@
#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
-#define AR9300_NUM_BT_WEIGHTS 4
-#define AR9300_NUM_WLAN_WEIGHTS 4
-
#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
#define ATH_DEFAULT_NOISE_FLOOR -95
@@ -129,6 +126,16 @@
#define AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL 4
#define AR_GPIO_OUTPUT_MUX_AS_MAC_NETWORK_LED 5
#define AR_GPIO_OUTPUT_MUX_AS_MAC_POWER_LED 6
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_DATA 0x16
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_WLAN_CLK 0x17
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_DATA 0x18
+#define AR_GPIO_OUTPUT_MUX_AS_MCI_BT_CLK 0x19
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_TX 0x14
+#define AR_GPIO_OUTPUT_MUX_AS_WL_IN_RX 0x13
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_TX 9
+#define AR_GPIO_OUTPUT_MUX_AS_BT_IN_RX 8
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_STROBE 0x1d
+#define AR_GPIO_OUTPUT_MUX_AS_RUCKUS_DATA 0x1e
#define AR_GPIOD_MASK 0x00001FFF
#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
@@ -189,20 +196,25 @@ enum ath_ini_subsys {
enum ath9k_hw_caps {
ATH9K_HW_CAP_HT = BIT(0),
ATH9K_HW_CAP_RFSILENT = BIT(1),
- ATH9K_HW_CAP_CST = BIT(2),
- ATH9K_HW_CAP_AUTOSLEEP = BIT(4),
- ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(5),
- ATH9K_HW_CAP_EDMA = BIT(6),
- ATH9K_HW_CAP_RAC_SUPPORTED = BIT(7),
- ATH9K_HW_CAP_LDPC = BIT(8),
- ATH9K_HW_CAP_FASTCLOCK = BIT(9),
- ATH9K_HW_CAP_SGI_20 = BIT(10),
- ATH9K_HW_CAP_PAPRD = BIT(11),
- ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
- ATH9K_HW_CAP_2GHZ = BIT(13),
- ATH9K_HW_CAP_5GHZ = BIT(14),
- ATH9K_HW_CAP_APM = BIT(15),
- ATH9K_HW_CAP_RTT = BIT(16),
+ ATH9K_HW_CAP_AUTOSLEEP = BIT(2),
+ ATH9K_HW_CAP_4KB_SPLITTRANS = BIT(3),
+ ATH9K_HW_CAP_EDMA = BIT(4),
+ ATH9K_HW_CAP_RAC_SUPPORTED = BIT(5),
+ ATH9K_HW_CAP_LDPC = BIT(6),
+ ATH9K_HW_CAP_FASTCLOCK = BIT(7),
+ ATH9K_HW_CAP_SGI_20 = BIT(8),
+ ATH9K_HW_CAP_PAPRD = BIT(9),
+ ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10),
+ ATH9K_HW_CAP_2GHZ = BIT(11),
+ ATH9K_HW_CAP_5GHZ = BIT(12),
+ ATH9K_HW_CAP_APM = BIT(13),
+ ATH9K_HW_CAP_RTT = BIT(14),
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+ ATH9K_HW_CAP_MCI = BIT(15),
+#else
+ ATH9K_HW_CAP_MCI = 0,
+#endif
+ ATH9K_HW_CAP_DFS = BIT(16),
};
struct ath9k_hw_capabilities {
@@ -268,6 +280,7 @@ enum ath9k_int {
ATH9K_INT_TX = 0x00000040,
ATH9K_INT_TXDESC = 0x00000080,
ATH9K_INT_TIM_TIMER = 0x00000100,
+ ATH9K_INT_MCI = 0x00000200,
ATH9K_INT_BB_WATCHDOG = 0x00000400,
ATH9K_INT_TXURN = 0x00000800,
ATH9K_INT_MIB = 0x00001000,
@@ -419,6 +432,161 @@ enum ath9k_rx_qtype {
ATH9K_RX_QUEUE_MAX,
};
+enum mci_message_header { /* length of payload */
+ MCI_LNA_CTRL = 0x10, /* len = 0 */
+ MCI_CONT_NACK = 0x20, /* len = 0 */
+ MCI_CONT_INFO = 0x30, /* len = 4 */
+ MCI_CONT_RST = 0x40, /* len = 0 */
+ MCI_SCHD_INFO = 0x50, /* len = 16 */
+ MCI_CPU_INT = 0x60, /* len = 4 */
+ MCI_SYS_WAKING = 0x70, /* len = 0 */
+ MCI_GPM = 0x80, /* len = 16 */
+ MCI_LNA_INFO = 0x90, /* len = 1 */
+ MCI_LNA_STATE = 0x94,
+ MCI_LNA_TAKE = 0x98,
+ MCI_LNA_TRANS = 0x9c,
+ MCI_SYS_SLEEPING = 0xa0, /* len = 0 */
+ MCI_REQ_WAKE = 0xc0, /* len = 0 */
+ MCI_DEBUG_16 = 0xfe, /* len = 2 */
+ MCI_REMOTE_RESET = 0xff /* len = 16 */
+};
+
+enum ath_mci_gpm_coex_profile_type {
+ MCI_GPM_COEX_PROFILE_UNKNOWN,
+ MCI_GPM_COEX_PROFILE_RFCOMM,
+ MCI_GPM_COEX_PROFILE_A2DP,
+ MCI_GPM_COEX_PROFILE_HID,
+ MCI_GPM_COEX_PROFILE_BNEP,
+ MCI_GPM_COEX_PROFILE_VOICE,
+ MCI_GPM_COEX_PROFILE_MAX
+};
+
+/* MCI GPM/Coex opcode/type definitions */
+enum {
+ MCI_GPM_COEX_W_GPM_PAYLOAD = 1,
+ MCI_GPM_COEX_B_GPM_TYPE = 4,
+ MCI_GPM_COEX_B_GPM_OPCODE = 5,
+ /* MCI_GPM_WLAN_CAL_REQ, MCI_GPM_WLAN_CAL_DONE */
+ MCI_GPM_WLAN_CAL_W_SEQUENCE = 2,
+
+ /* MCI_GPM_COEX_VERSION_QUERY */
+ /* MCI_GPM_COEX_VERSION_RESPONSE */
+ MCI_GPM_COEX_B_MAJOR_VERSION = 6,
+ MCI_GPM_COEX_B_MINOR_VERSION = 7,
+ /* MCI_GPM_COEX_STATUS_QUERY */
+ MCI_GPM_COEX_B_BT_BITMAP = 6,
+ MCI_GPM_COEX_B_WLAN_BITMAP = 7,
+ /* MCI_GPM_COEX_HALT_BT_GPM */
+ MCI_GPM_COEX_B_HALT_STATE = 6,
+ /* MCI_GPM_COEX_WLAN_CHANNELS */
+ MCI_GPM_COEX_B_CHANNEL_MAP = 6,
+ /* MCI_GPM_COEX_BT_PROFILE_INFO */
+ MCI_GPM_COEX_B_PROFILE_TYPE = 6,
+ MCI_GPM_COEX_B_PROFILE_LINKID = 7,
+ MCI_GPM_COEX_B_PROFILE_STATE = 8,
+ MCI_GPM_COEX_B_PROFILE_ROLE = 9,
+ MCI_GPM_COEX_B_PROFILE_RATE = 10,
+ MCI_GPM_COEX_B_PROFILE_VOTYPE = 11,
+ MCI_GPM_COEX_H_PROFILE_T = 12,
+ MCI_GPM_COEX_B_PROFILE_W = 14,
+ MCI_GPM_COEX_B_PROFILE_A = 15,
+ /* MCI_GPM_COEX_BT_STATUS_UPDATE */
+ MCI_GPM_COEX_B_STATUS_TYPE = 6,
+ MCI_GPM_COEX_B_STATUS_LINKID = 7,
+ MCI_GPM_COEX_B_STATUS_STATE = 8,
+ /* MCI_GPM_COEX_BT_UPDATE_FLAGS */
+ MCI_GPM_COEX_W_BT_FLAGS = 6,
+ MCI_GPM_COEX_B_BT_FLAGS_OP = 10
+};
+
+enum mci_gpm_subtype {
+ MCI_GPM_BT_CAL_REQ = 0,
+ MCI_GPM_BT_CAL_GRANT = 1,
+ MCI_GPM_BT_CAL_DONE = 2,
+ MCI_GPM_WLAN_CAL_REQ = 3,
+ MCI_GPM_WLAN_CAL_GRANT = 4,
+ MCI_GPM_WLAN_CAL_DONE = 5,
+ MCI_GPM_COEX_AGENT = 0x0c,
+ MCI_GPM_RSVD_PATTERN = 0xfe,
+ MCI_GPM_RSVD_PATTERN32 = 0xfefefefe,
+ MCI_GPM_BT_DEBUG = 0xff
+};
+
+enum mci_bt_state {
+ MCI_BT_SLEEP,
+ MCI_BT_AWAKE,
+ MCI_BT_CAL_START,
+ MCI_BT_CAL
+};
+
+/* Type of state query */
+enum mci_state_type {
+ MCI_STATE_ENABLE,
+ MCI_STATE_INIT_GPM_OFFSET,
+ MCI_STATE_NEXT_GPM_OFFSET,
+ MCI_STATE_LAST_GPM_OFFSET,
+ MCI_STATE_BT,
+ MCI_STATE_SET_BT_SLEEP,
+ MCI_STATE_SET_BT_AWAKE,
+ MCI_STATE_SET_BT_CAL_START,
+ MCI_STATE_SET_BT_CAL,
+ MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ MCI_STATE_REMOTE_SLEEP,
+ MCI_STATE_CONT_RSSI_POWER,
+ MCI_STATE_CONT_PRIORITY,
+ MCI_STATE_CONT_TXRX,
+ MCI_STATE_RESET_REQ_WAKE,
+ MCI_STATE_SEND_WLAN_COEX_VERSION,
+ MCI_STATE_SET_BT_COEX_VERSION,
+ MCI_STATE_SEND_WLAN_CHANNELS,
+ MCI_STATE_SEND_VERSION_QUERY,
+ MCI_STATE_SEND_STATUS_QUERY,
+ MCI_STATE_NEED_FLUSH_BT_INFO,
+ MCI_STATE_SET_CONCUR_TX_PRI,
+ MCI_STATE_RECOVER_RX,
+ MCI_STATE_NEED_FTP_STOMP,
+ MCI_STATE_NEED_TUNING,
+ MCI_STATE_DEBUG,
+ MCI_STATE_MAX
+};
+
+enum mci_gpm_coex_opcode {
+ MCI_GPM_COEX_VERSION_QUERY,
+ MCI_GPM_COEX_VERSION_RESPONSE,
+ MCI_GPM_COEX_STATUS_QUERY,
+ MCI_GPM_COEX_HALT_BT_GPM,
+ MCI_GPM_COEX_WLAN_CHANNELS,
+ MCI_GPM_COEX_BT_PROFILE_INFO,
+ MCI_GPM_COEX_BT_STATUS_UPDATE,
+ MCI_GPM_COEX_BT_UPDATE_FLAGS
+};
+
+#define MCI_GPM_NOMORE 0
+#define MCI_GPM_MORE 1
+#define MCI_GPM_INVALID 0xffffffff
+
+#define MCI_GPM_RECYCLE(_p_gpm) do { \
+ *(((u32 *)_p_gpm) + MCI_GPM_COEX_W_GPM_PAYLOAD) = \
+ MCI_GPM_RSVD_PATTERN32; \
+} while (0)
+
+#define MCI_GPM_TYPE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) & 0xff)
+
+#define MCI_GPM_OPCODE(_p_gpm) \
+ (*(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) & 0xff)
+
+#define MCI_GPM_SET_CAL_TYPE(_p_gpm, _cal_type) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_cal_type) & 0xff;\
+} while (0)
+
+#define MCI_GPM_SET_TYPE_OPCODE(_p_gpm, _type, _opcode) do { \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_TYPE) = (_type) & 0xff; \
+ *(((u8 *)(_p_gpm)) + MCI_GPM_COEX_B_GPM_OPCODE) = (_opcode) & 0xff;\
+} while (0)
+
+#define MCI_GPM_IS_CAL_TYPE(_type) ((_type) <= MCI_GPM_WLAN_CAL_DONE)
+
struct ath9k_beacon_state {
u32 bs_nexttbtt;
u32 bs_nextdtim;
@@ -772,7 +940,6 @@ struct ath_hw {
u32 *analogBank6Data;
u32 *analogBank6TPCData;
u32 *analogBank7Data;
- u32 *addac5416_21;
u32 *bank6Temp;
u8 txpower_limit;
@@ -791,8 +958,6 @@ struct ath_hw {
/* Bluetooth coexistance */
struct ath_btcoex_hw btcoex_hw;
- u32 bt_coex_bt_weight[AR9300_NUM_BT_WEIGHTS];
- u32 bt_coex_wlan_weight[AR9300_NUM_WLAN_WEIGHTS];
u32 intr_txqs;
u8 txchainmask;
@@ -850,7 +1015,7 @@ struct ath_hw {
u32 ts_paddr_start;
u32 ts_paddr_end;
u16 ts_tail;
- u8 ts_size;
+ u16 ts_size;
u32 bb_watchdog_last_status;
u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
@@ -948,7 +1113,6 @@ bool ath9k_hw_disable(struct ath_hw *ah);
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
void ath9k_hw_setopmode(struct ath_hw *ah);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
-void ath9k_hw_setbssidmask(struct ath_hw *ah);
void ath9k_hw_write_associd(struct ath_hw *ah);
u32 ath9k_hw_gettsf32(struct ath_hw *ah);
u64 ath9k_hw_gettsf64(struct ath_hw *ah);
@@ -1041,6 +1205,42 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning);
void ath9k_hw_proc_mib_event(struct ath_hw *ah);
void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan);
+bool ar9003_mci_send_message(struct ath_hw *ah, u8 header, u32 flag,
+ u32 *payload, u8 len, bool wait_done,
+ bool check_bt);
+void ar9003_mci_mute_bt(struct ath_hw *ah);
+u32 ar9003_mci_state(struct ath_hw *ah, u32 state_type, u32 *p_data);
+void ar9003_mci_setup(struct ath_hw *ah, u32 gpm_addr, void *gpm_buf,
+ u16 len, u32 sched_addr);
+void ar9003_mci_cleanup(struct ath_hw *ah);
+void ar9003_mci_send_coex_halt_bt_gpm(struct ath_hw *ah, bool halt,
+ bool wait_done);
+u32 ar9003_mci_wait_for_gpm(struct ath_hw *ah, u8 gpm_type,
+ u8 gpm_opcode, int time_out);
+void ar9003_mci_2g5g_changed(struct ath_hw *ah, bool is_2g);
+void ar9003_mci_disable_interrupt(struct ath_hw *ah);
+void ar9003_mci_enable_interrupt(struct ath_hw *ah);
+void ar9003_mci_2g5g_switch(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_reset(struct ath_hw *ah, bool en_int, bool is_2g,
+ bool is_full_sleep);
+bool ar9003_mci_check_int(struct ath_hw *ah, u32 ints);
+void ar9003_mci_remote_reset(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_sys_waking(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_send_lna_transfer(struct ath_hw *ah, bool wait_done);
+void ar9003_mci_sync_bt_state(struct ath_hw *ah);
+void ar9003_mci_get_interrupt(struct ath_hw *ah, u32 *raw_intr,
+ u32 *rx_msg_intr);
+
+#ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
+static inline enum ath_btcoex_scheme
+ath9k_hw_get_btcoex_scheme(struct ath_hw *ah)
+{
+ return ah->btcoex_hw.scheme;
+}
+#else
+#define ath9k_hw_get_btcoex_scheme(...) ATH_BTCOEX_CFG_NONE
+#endif
+
#define ATH9K_CLOCK_RATE_CCK 22
#define ATH9K_CLOCK_RATE_5GHZ_OFDM 40
#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index d4c909f..53a005d 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -258,6 +258,8 @@ static void setup_ht_cap(struct ath_softc *sc,
if (AR_SREV_9330(ah) || AR_SREV_9485(ah))
max_streams = 1;
+ else if (AR_SREV_9462(ah))
+ max_streams = 2;
else if (AR_SREV_9300_20_OR_LATER(ah))
max_streams = 3;
else
@@ -274,8 +276,7 @@ static void setup_ht_cap(struct ath_softc *sc,
tx_streams = ath9k_cmn_count_streams(ah->txchainmask, max_streams);
rx_streams = ath9k_cmn_count_streams(ah->rxchainmask, max_streams);
- ath_dbg(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
+ ath_dbg(common, CONFIG, "TX streams %d, RX streams: %d\n",
tx_streams, rx_streams);
if (tx_streams != rx_streams) {
@@ -295,9 +296,22 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath_softc *sc = hw->priv;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
+ int ret;
+
+ ret = ath_reg_notifier_apply(wiphy, request, reg);
+
+ /* Set tx power */
+ if (ah->curchan) {
+ sc->config.txpowlimit = 2 * ah->curchan->chan->max_power;
+ ath9k_ps_wakeup(sc);
+ ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
+ sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
+ ath9k_ps_restore(sc);
+ }
- return ath_reg_notifier_apply(wiphy, request, reg);
+ return ret;
}
/*
@@ -314,7 +328,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
struct ath_buf *bf;
int i, bsize, error, desc_len;
- ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ ath_dbg(common, CONFIG, "%s DMA: %u buffers %u desc/buf\n",
name, nbuf, ndesc);
INIT_LIST_HEAD(head);
@@ -360,7 +374,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
goto fail;
}
ds = (u8 *) dd->dd_desc;
- ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ ath_dbg(common, CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
name, ds, (u32) dd->dd_desc_len,
ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
@@ -408,9 +422,10 @@ fail:
static int ath9k_init_btcoex(struct ath_softc *sc)
{
struct ath_txq *txq;
+ struct ath_hw *ah = sc->sc_ah;
int r;
- switch (sc->sc_ah->btcoex_hw.scheme) {
+ switch (ath9k_hw_get_btcoex_scheme(sc->sc_ah)) {
case ATH_BTCOEX_CFG_NONE:
break;
case ATH_BTCOEX_CFG_2WIRE:
@@ -425,6 +440,37 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
break;
+ case ATH_BTCOEX_CFG_MCI:
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ sc->btcoex.duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ INIT_LIST_HEAD(&sc->btcoex.mci.info);
+
+ r = ath_mci_setup(sc);
+ if (r)
+ return r;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) {
+ ah->btcoex_hw.mci.ready = false;
+ ah->btcoex_hw.mci.bt_state = 0;
+ ah->btcoex_hw.mci.bt_ver_major = 3;
+ ah->btcoex_hw.mci.bt_ver_minor = 0;
+ ah->btcoex_hw.mci.bt_version_known = false;
+ ah->btcoex_hw.mci.update_2g5g = true;
+ ah->btcoex_hw.mci.is_2g = true;
+ ah->btcoex_hw.mci.wlan_channels_update = false;
+ ah->btcoex_hw.mci.wlan_channels[0] = 0x00000000;
+ ah->btcoex_hw.mci.wlan_channels[1] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[2] = 0xffffffff;
+ ah->btcoex_hw.mci.wlan_channels[3] = 0x7fffffff;
+ ah->btcoex_hw.mci.query_bt = true;
+ ah->btcoex_hw.mci.unhalt_bt_gpm = true;
+ ah->btcoex_hw.mci.halted_bt_gpm = false;
+ ah->btcoex_hw.mci.need_flush_btinfo = false;
+ ah->btcoex_hw.mci.wlan_cal_seq = 0;
+ ah->btcoex_hw.mci.wlan_cal_done = 0;
+ ah->btcoex_hw.mci.config = 0x2201;
+ }
+ break;
default:
WARN_ON(1);
break;
@@ -695,6 +741,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
hw->queues = 4;
hw->max_rates = 4;
@@ -775,6 +822,11 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
ARRAY_SIZE(ath9k_tpt_blink));
#endif
+ INIT_WORK(&sc->hw_reset_work, ath_reset_work);
+ INIT_WORK(&sc->hw_check_work, ath_hw_check);
+ INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
+ INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
+
/* Register with mac80211 */
error = ieee80211_register_hw(hw);
if (error)
@@ -793,10 +845,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
goto error_world;
}
- INIT_WORK(&sc->hw_reset_work, ath_reset_work);
- INIT_WORK(&sc->hw_check_work, ath_hw_check);
- INIT_WORK(&sc->paprd_work, ath_paprd_calibrate);
- INIT_DELAYED_WORK(&sc->hw_pll_work, ath_hw_pll_work);
sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
ath_init_leds(sc);
@@ -833,9 +881,12 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
kfree(sc->sbands[IEEE80211_BAND_5GHZ].channels);
if ((sc->btcoex.no_stomp_timer) &&
- sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_3WIRE)
ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+ if (ath9k_hw_get_btcoex_scheme(sc->sc_ah) == ATH_BTCOEX_CFG_MCI)
+ ath_mci_cleanup(sc);
+
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i))
ath_tx_cleanupq(sc, &sc->tx.txq[i]);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index ecdb6fd..e196aba 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -21,7 +21,7 @@
static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
struct ath9k_tx_queue_info *qi)
{
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
+ ath_dbg(ath9k_hw_common(ah), INTERRUPT,
"tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
@@ -57,8 +57,7 @@ EXPORT_SYMBOL(ath9k_hw_puttxbuf);
void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
{
- ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
- "Enable TXE on queue: %u\n", q);
+ ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q);
REG_WRITE(ah, AR_Q_TXE, 1 << q);
}
EXPORT_SYMBOL(ath9k_hw_txstart);
@@ -202,12 +201,12 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Set TXQ properties, inactive queue: %u\n", q);
return false;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
+ ath_dbg(common, QUEUE, "Set queue properties for: %u\n", q);
qi->tqi_ver = qinfo->tqi_ver;
qi->tqi_subtype = qinfo->tqi_subtype;
@@ -266,7 +265,7 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
+ ath_dbg(common, QUEUE,
"Get TXQ properties, inactive queue: %u\n", q);
return false;
}
@@ -325,7 +324,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
return -1;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Setup TX queue: %u\n", q);
qi = &ah->txq[q];
if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
@@ -348,12 +347,11 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Release TXQ, inactive queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Release TXQ, inactive queue: %u\n", q);
return false;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Release TX queue: %u\n", q);
qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
ah->txok_interrupt_mask &= ~(1 << q);
@@ -376,12 +374,11 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
qi = &ah->txq[q];
if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
- ath_dbg(common, ATH_DBG_QUEUE,
- "Reset TXQ, inactive queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Reset TXQ, inactive queue: %u\n", q);
return true;
}
- ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
+ ath_dbg(common, QUEUE, "Reset TX queue: %u\n", q);
if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
if (chan && IS_CHAN_B(chan))
@@ -621,10 +618,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
rs->rs_status |= ATH9K_RXERR_DECRYPT;
else if (ads.ds_rxstatus8 & AR_MichaelErr)
rs->rs_status |= ATH9K_RXERR_MIC;
- if (ads.ds_rxstatus8 & AR_KeyMiss)
- rs->rs_status |= ATH9K_RXERR_KEYMISS;
}
+ if (ads.ds_rxstatus8 & AR_KeyMiss)
+ rs->rs_status |= ATH9K_RXERR_KEYMISS;
+
return 0;
}
EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
@@ -760,7 +758,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
return true;
host_isr = REG_READ(ah, AR_INTR_ASYNC_CAUSE);
- if ((host_isr & AR_INTR_MAC_IRQ) && (host_isr != AR_INTR_SPURIOUS))
+
+ if (((host_isr & AR_INTR_MAC_IRQ) ||
+ (host_isr & AR_INTR_ASYNC_MASK_MCI)) &&
+ (host_isr != AR_INTR_SPURIOUS))
return true;
host_isr = REG_READ(ah, AR_INTR_SYNC_CAUSE);
@@ -781,7 +782,7 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah)
else
atomic_dec(&ah->intr_ref_cnt);
- ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
+ ath_dbg(common, INTERRUPT, "disable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
(void) REG_READ(ah, AR_IER);
if (!AR_SREV_9100(ah)) {
@@ -798,13 +799,13 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sync_default = AR_INTR_SYNC_DEFAULT;
+ u32 async_mask;
if (!(ah->imask & ATH9K_INT_GLOBAL))
return;
if (!atomic_inc_and_test(&ah->intr_ref_cnt)) {
- ath_dbg(common, ATH_DBG_INTERRUPT,
- "Do not enable IER ref count %d\n",
+ ath_dbg(common, INTERRUPT, "Do not enable IER ref count %d\n",
atomic_read(&ah->intr_ref_cnt));
return;
}
@@ -812,18 +813,21 @@ void ath9k_hw_enable_interrupts(struct ath_hw *ah)
if (AR_SREV_9340(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
- ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
+ async_mask = AR_INTR_MAC_IRQ;
+
+ if (ah->imask & ATH9K_INT_MCI)
+ async_mask |= AR_INTR_ASYNC_MASK_MCI;
+
+ ath_dbg(common, INTERRUPT, "enable IER\n");
REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
if (!AR_SREV_9100(ah)) {
- REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
- AR_INTR_MAC_IRQ);
- REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
-
+ REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, async_mask);
+ REG_WRITE(ah, AR_INTR_ASYNC_MASK, async_mask);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
REG_WRITE(ah, AR_INTR_SYNC_MASK, sync_default);
}
- ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
+ ath_dbg(common, INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
}
EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
@@ -838,7 +842,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
if (!(ints & ATH9K_INT_GLOBAL))
ath9k_hw_disable_interrupts(ah);
- ath_dbg(common, ATH_DBG_INTERRUPT, "New interrupt mask 0x%x\n", ints);
+ ath_dbg(common, INTERRUPT, "New interrupt mask 0x%x\n", ints);
mask = ints & ATH9K_INT_COMMON;
mask2 = 0;
@@ -901,7 +905,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah)
mask2 |= AR_IMR_S2_CST;
}
- ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
+ ath_dbg(common, INTERRUPT, "new IMR 0x%x\n", mask);
REG_WRITE(ah, AR_IMR, mask);
ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index a9c5ae7..4a00806 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,7 +118,7 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_idle)
+ if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
mode = ATH9K_PM_FULL_SLEEP;
else if (sc->ps_enabled &&
!(sc->ps_flags & (PS_WAIT_FOR_BEACON |
@@ -332,14 +332,14 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan,
hchan = ah->curchan;
}
- if (fastcc && !ath9k_hw_check_alive(ah))
+ if (fastcc && (ah->chip_fullsleep ||
+ !ath9k_hw_check_alive(ah)))
fastcc = false;
if (!ath_prepare_reset(sc, retry_tx, flush))
fastcc = false;
- ath_dbg(common, ATH_DBG_CONFIG,
- "Reset to %u MHz, HT40: %d fastcc: %d\n",
+ ath_dbg(common, CONFIG, "Reset to %u MHz, HT40: %d fastcc: %d\n",
hchan->channel, !!(hchan->channelFlags & (CHANNEL_HT40MINUS |
CHANNEL_HT40PLUS)),
fastcc);
@@ -428,7 +428,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
txctl.paprd = BIT(chain);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_CALIBRATE, "PAPRD TX failed\n");
+ ath_dbg(common, CALIBRATE, "PAPRD TX failed\n");
dev_kfree_skb_any(skb);
return false;
}
@@ -437,7 +437,7 @@ static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int
msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
if (!time_left)
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Timeout waiting for paprd training on TX chain %d\n",
chain);
@@ -486,27 +486,27 @@ void ath_paprd_calibrate(struct work_struct *work)
chain_ok = 0;
- ath_dbg(common, ATH_DBG_CALIBRATE,
- "Sending PAPRD frame for thermal measurement "
- "on chain %d\n", chain);
+ ath_dbg(common, CALIBRATE,
+ "Sending PAPRD frame for thermal measurement on chain %d\n",
+ chain);
if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
ar9003_paprd_setup_gain_table(ah, chain);
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"Sending PAPRD training frame on chain %d\n", chain);
if (!ath_paprd_send_frame(sc, skb, chain))
goto fail_paprd;
if (!ar9003_paprd_is_done(ah)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"PAPRD not yet done on chain %d\n", chain);
break;
}
if (ar9003_paprd_create_curve(ah, caldata, chain)) {
- ath_dbg(common, ATH_DBG_CALIBRATE,
+ ath_dbg(common, CALIBRATE,
"PAPRD create curve failed on chain %d\n",
chain);
break;
@@ -561,7 +561,6 @@ void ath_ani_calibrate(unsigned long data)
/* Long calibration runs independently of short calibration. */
if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
longcal = true;
- ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
common->ani.longcal_timer = timestamp;
}
@@ -569,8 +568,6 @@ void ath_ani_calibrate(unsigned long data)
if (!common->ani.caldone) {
if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
shortcal = true;
- ath_dbg(common, ATH_DBG_ANI,
- "shortcal @%lu\n", jiffies);
common->ani.shortcal_timer = timestamp;
common->ani.resetcal_timer = timestamp;
}
@@ -584,8 +581,9 @@ void ath_ani_calibrate(unsigned long data)
}
/* Verify whether we must check ANI */
- if ((timestamp - common->ani.checkani_timer) >=
- ah->config.ani_poll_interval) {
+ if (sc->sc_ah->config.enable_ani
+ && (timestamp - common->ani.checkani_timer) >=
+ ah->config.ani_poll_interval) {
aniflag = true;
common->ani.checkani_timer = timestamp;
}
@@ -605,6 +603,12 @@ void ath_ani_calibrate(unsigned long data)
ah->rxchainmask, longcal);
}
+ ath_dbg(common, ANI,
+ "Calibration @%lu finished: %s %s %s, caldone: %s\n",
+ jiffies,
+ longcal ? "long" : "", shortcal ? "short" : "",
+ aniflag ? "ani" : "", common->ani.caldone ? "true" : "false");
+
ath9k_ps_restore(sc);
set_timer:
@@ -630,7 +634,8 @@ set_timer:
}
}
-static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
+static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta,
+ struct ieee80211_vif *vif)
{
struct ath_node *an;
an = (struct ath_node *)sta->drv_priv;
@@ -639,8 +644,9 @@ static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
spin_lock(&sc->nodes_lock);
list_add(&an->list, &sc->nodes);
spin_unlock(&sc->nodes_lock);
- an->sta = sta;
#endif
+ an->sta = sta;
+ an->vif = vif;
if (sc->sc_flags & SC_OP_TXAGGR) {
ath_tx_node_init(sc, an);
an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
@@ -709,8 +715,7 @@ void ath9k_tasklet(unsigned long data)
* TSF sync does not look correct; remain awake to sync with
* the next Beacon.
*/
- ath_dbg(common, ATH_DBG_PS,
- "TSFOOR - Sync with next Beacon\n");
+ ath_dbg(common, PS, "TSFOOR - Sync with next Beacon\n");
sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
@@ -736,10 +741,13 @@ void ath9k_tasklet(unsigned long data)
ath_tx_tasklet(sc);
}
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
if (status & ATH9K_INT_GENTIMER)
ath_gen_timer_isr(sc->sc_ah);
+ if ((status & ATH9K_INT_MCI) && ATH9K_HW_CAP_MCI)
+ ath_mci_intr(sc);
+
out:
/* re-enable hardware interrupt */
ath9k_hw_enable_interrupts(ah);
@@ -762,7 +770,8 @@ irqreturn_t ath_isr(int irq, void *dev)
ATH9K_INT_BMISS | \
ATH9K_INT_CST | \
ATH9K_INT_TSFOOR | \
- ATH9K_INT_GENTIMER)
+ ATH9K_INT_GENTIMER | \
+ ATH9K_INT_MCI)
struct ath_softc *sc = dev;
struct ath_hw *ah = sc->sc_ah;
@@ -880,82 +889,6 @@ chip_reset:
#undef SCHED_INTR
}
-static void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- atomic_set(&ah->intr_ref_cnt, -1);
-
- ath9k_hw_configpcipowersave(ah, false);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(sc->hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(common,
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath_complete_reset(sc, true);
-
- /* Enable LED */
- ath9k_hw_cfg_output(ah, ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(ah, ah->led_pin, 0);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
-
- ath9k_ps_restore(sc);
-}
-
-void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ieee80211_channel *channel = hw->conf.channel;
- int r;
-
- ath9k_ps_wakeup(sc);
-
- ath_cancel_work(sc);
-
- spin_lock_bh(&sc->sc_pcu_lock);
-
- /*
- * Keep the LED on when the radio is disabled
- * during idle unassociated state.
- */
- if (!sc->ps_idle) {
- ath9k_hw_set_gpio(ah, ah->led_pin, 1);
- ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
- }
-
- ath_prepare_reset(sc, false, true);
-
- if (!ah->curchan)
- ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
-
- r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
- if (r) {
- ath_err(ath9k_hw_common(sc->sc_ah),
- "Unable to reset channel (%u MHz), reset status %d\n",
- channel->center_freq, r);
- }
-
- ath9k_hw_phy_disable(ah);
-
- ath9k_hw_configpcipowersave(ah, true);
-
- spin_unlock_bh(&sc->sc_pcu_lock);
- ath9k_ps_restore(sc);
-}
-
static int ath_reset(struct ath_softc *sc, bool retry_tx)
{
int r;
@@ -1002,8 +935,8 @@ void ath_hw_check(struct work_struct *work)
busy = ath_update_survey_stats(sc);
spin_unlock_irqrestore(&common->cc_lock, flags);
- ath_dbg(common, ATH_DBG_RESET, "Possible baseband hang, "
- "busy=%d (try %d)\n", busy, sc->hw_busy_count + 1);
+ ath_dbg(common, RESET, "Possible baseband hang, busy=%d (try %d)\n",
+ busy, sc->hw_busy_count + 1);
if (busy >= 99) {
if (++sc->hw_busy_count >= 3) {
RESET_STAT_INC(sc, RESET_TYPE_BB_HANG);
@@ -1026,8 +959,7 @@ static void ath_hw_pll_rx_hang_check(struct ath_softc *sc, u32 pll_sqsum)
count++;
if (count == 3) {
/* Rx is hung for more than 500ms. Reset it */
- ath_dbg(common, ATH_DBG_RESET,
- "Possible RX hang, resetting");
+ ath_dbg(common, RESET, "Possible RX hang, resetting\n");
RESET_STAT_INC(sc, RESET_TYPE_PLL_HANG);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
count = 0;
@@ -1067,7 +999,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
struct ath9k_channel *init_channel;
int r;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Starting driver with initial channel: %d MHz\n",
curchan->center_freq);
@@ -1091,6 +1023,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
* and then setup of the interrupt mask.
*/
spin_lock_bh(&sc->sc_pcu_lock);
+
+ atomic_set(&ah->intr_ref_cnt, -1);
+
r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
if (r) {
ath_err(common,
@@ -1117,6 +1052,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
ah->imask |= ATH9K_INT_CST;
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
+ ah->imask |= ATH9K_INT_MCI;
+
sc->sc_flags &= ~SC_OP_INVALID;
sc->sc_ah->is_monitoring = false;
@@ -1129,15 +1067,28 @@ static int ath9k_start(struct ieee80211_hw *hw)
goto mutex_unlock;
}
+ if (ah->led_pin >= 0) {
+ ath9k_hw_cfg_output(ah, ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ ath9k_hw_set_gpio(ah, ah->led_pin, 0);
+ }
+
+ /*
+ * Reset key cache to sane defaults (all entries cleared) instead of
+ * semi-random values after suspend/resume.
+ */
+ ath9k_cmn_init_crypto(sc->sc_ah);
+
spin_unlock_bh(&sc->sc_pcu_lock);
- if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
+ if ((ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) &&
!ah->btcoex_hw.enabled) {
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI))
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
ath9k_hw_btcoex_enable(ah);
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_resume(sc);
}
@@ -1167,12 +1118,19 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (ieee80211_is_data(hdr->frame_control) &&
!ieee80211_is_nullfunc(hdr->frame_control) &&
!ieee80211_has_pm(hdr->frame_control)) {
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Add PM=1 for a TX frame while in PS mode\n");
hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
}
}
+ /*
+ * Cannot tx while the hardware is in full sleep, it first needs a full
+ * chip reset to recover from that
+ */
+ if (unlikely(sc->sc_ah->power_mode == ATH9K_PM_FULL_SLEEP))
+ goto exit;
+
if (unlikely(sc->sc_ah->power_mode != ATH9K_PM_AWAKE)) {
/*
* We are using PS-Poll and mac80211 can request TX while in
@@ -1183,12 +1141,11 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
ath9k_hw_setrxabort(sc->sc_ah, 0);
if (ieee80211_is_pspoll(hdr->frame_control)) {
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Sending PS-Poll to pick a buffered frame\n");
sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
- ath_dbg(common, ATH_DBG_PS,
- "Wake up to complete TX\n");
+ ath_dbg(common, PS, "Wake up to complete TX\n");
sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
@@ -1202,10 +1159,10 @@ static void ath9k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
memset(&txctl, 0, sizeof(struct ath_tx_control));
txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
- ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
+ ath_dbg(common, XMIT, "transmitting packet, skb: %p\n", skb);
if (ath_tx_start(hw, skb, &txctl) != 0) {
- ath_dbg(common, ATH_DBG_XMIT, "TX failed\n");
+ ath_dbg(common, XMIT, "TX failed\n");
goto exit;
}
@@ -1219,13 +1176,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
struct ath_softc *sc = hw->priv;
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
+ bool prev_idle;
mutex_lock(&sc->mutex);
ath_cancel_work(sc);
if (sc->sc_flags & SC_OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
@@ -1233,10 +1191,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
/* Ensure HW is awake when we try to shut it down. */
ath9k_ps_wakeup(sc);
- if (ah->btcoex_hw.enabled) {
+ if (ah->btcoex_hw.enabled &&
+ ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
ath9k_hw_btcoex_disable(ah);
- if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ if (ath9k_hw_get_btcoex_scheme(ah) == ATH_BTCOEX_CFG_3WIRE)
ath9k_btcoex_timer_pause(sc);
+ ath_mci_flush_profile(&sc->btcoex.mci);
}
spin_lock_bh(&sc->sc_pcu_lock);
@@ -1248,39 +1208,49 @@ static void ath9k_stop(struct ieee80211_hw *hw)
* before setting the invalid flag. */
ath9k_hw_disable_interrupts(ah);
- if (!(sc->sc_flags & SC_OP_INVALID)) {
- ath_drain_all_txq(sc, false);
- ath_stoprecv(sc);
- ath9k_hw_phy_disable(ah);
- } else
- sc->rx.rxlink = NULL;
+ spin_unlock_bh(&sc->sc_pcu_lock);
+
+ /* we can now sync irq and kill any running tasklets, since we already
+ * disabled interrupts and not holding a spin lock */
+ synchronize_irq(sc->irq);
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ prev_idle = sc->ps_idle;
+ sc->ps_idle = true;
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+
+ if (ah->led_pin >= 0) {
+ ath9k_hw_set_gpio(ah, ah->led_pin, 1);
+ ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
+ }
+
+ ath_prepare_reset(sc, false, true);
if (sc->rx.frag) {
dev_kfree_skb_any(sc->rx.frag);
sc->rx.frag = NULL;
}
- /* disable HAL and put h/w to sleep */
- ath9k_hw_disable(ah);
+ if (!ah->curchan)
+ ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
+
+ ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
+ ath9k_hw_phy_disable(ah);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ ath9k_hw_configpcipowersave(ah, true);
- /* we can now sync irq and kill any running tasklets, since we already
- * disabled interrupts and not holding a spin lock */
- synchronize_irq(sc->irq);
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
+ spin_unlock_bh(&sc->sc_pcu_lock);
ath9k_ps_restore(sc);
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
-
sc->sc_flags |= SC_OP_INVALID;
+ sc->ps_idle = prev_idle;
mutex_unlock(&sc->mutex);
- ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
+ ath_dbg(common, CONFIG, "Driver halt\n");
}
bool ath9k_uses_beacons(int type)
@@ -1495,8 +1465,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ath_dbg(common, ATH_DBG_CONFIG,
- "Attach a VIF of type: %d\n", vif->type);
+ ath_dbg(common, CONFIG, "Attach a VIF of type: %d\n", vif->type);
sc->nvifs++;
@@ -1516,7 +1485,7 @@ static int ath9k_change_interface(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
int ret = 0;
- ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
+ ath_dbg(common, CONFIG, "Change Interface\n");
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
@@ -1559,7 +1528,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
struct ath_softc *sc = hw->priv;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
+ ath_dbg(common, CONFIG, "Detach Interface\n");
ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
@@ -1616,8 +1585,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &hw->conf;
- bool disable_radio = false;
+ ath9k_ps_wakeup(sc);
mutex_lock(&sc->mutex);
/*
@@ -1628,13 +1597,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
sc->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
- if (!sc->ps_idle) {
- ath_radio_enable(sc, hw);
- ath_dbg(common, ATH_DBG_CONFIG,
- "not-idle: enabling radio\n");
- } else {
- disable_radio = true;
- }
+ if (sc->ps_idle)
+ ath_cancel_work(sc);
}
/*
@@ -1655,19 +1619,16 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
if (conf->flags & IEEE80211_CONF_MONITOR) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Monitor mode is enabled\n");
+ ath_dbg(common, CONFIG, "Monitor mode is enabled\n");
sc->sc_ah->is_monitoring = true;
} else {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Monitor mode is disabled\n");
+ ath_dbg(common, CONFIG, "Monitor mode is disabled\n");
sc->sc_ah->is_monitoring = false;
}
}
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
- struct ath9k_channel old_chan;
int pos = curchan->hw_value;
int old_pos = -1;
unsigned long flags;
@@ -1680,8 +1641,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
else
sc->sc_flags &= ~SC_OP_OFFCHANNEL;
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set channel: %d MHz type: %d\n",
+ ath_dbg(common, CONFIG, "Set channel: %d MHz type: %d\n",
curchan->center_freq, conf->channel_type);
/* update survey stats for the old channel before switching */
@@ -1693,11 +1653,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
* Preserve the current channel values, before updating
* the same channel
*/
- if (old_pos == pos) {
- memcpy(&old_chan, &sc->sc_ah->channels[pos],
- sizeof(struct ath9k_channel));
- ah->curchan = &old_chan;
- }
+ if (ah->curchan && (old_pos == pos))
+ ath9k_hw_getnf(ah, ah->curchan);
ath9k_cmn_update_ichannel(&sc->sc_ah->channels[pos],
curchan, conf->channel_type);
@@ -1738,21 +1695,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Set power: %d\n", conf->power_level);
+ ath_dbg(common, CONFIG, "Set power: %d\n", conf->power_level);
sc->config.txpowlimit = 2 * conf->power_level;
- ath9k_ps_wakeup(sc);
ath9k_cmn_update_txpow(ah, sc->curtxpow,
sc->config.txpowlimit, &sc->curtxpow);
- ath9k_ps_restore(sc);
- }
-
- if (disable_radio) {
- ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
- ath_radio_disable(sc, hw);
}
mutex_unlock(&sc->mutex);
+ ath9k_ps_restore(sc);
return 0;
}
@@ -1785,8 +1735,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
ath9k_ps_restore(sc);
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
- "Set HW RX filter: 0x%x\n", rfilt);
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG, "Set HW RX filter: 0x%x\n",
+ rfilt);
}
static int ath9k_sta_add(struct ieee80211_hw *hw,
@@ -1798,7 +1748,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
struct ath_node *an = (struct ath_node *) sta->drv_priv;
struct ieee80211_key_conf ps_key = { };
- ath_node_attach(sc, sta);
+ ath_node_attach(sc, sta, vif);
if (vif->type != NL80211_IFTYPE_AP &&
vif->type != NL80211_IFTYPE_AP_VLAN)
@@ -1883,7 +1833,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw,
qi.tqi_cwmax = params->cw_max;
qi.tqi_burstTime = params->txop;
- ath_dbg(common, ATH_DBG_CONFIG,
+ ath_dbg(common, CONFIG,
"Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, txq->axq_qnum, params->aifs, params->cw_min,
params->cw_max, params->txop);
@@ -1915,7 +1865,8 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
if (ath9k_modparam_nohwcrypt)
return -ENOSPC;
- if (vif->type == NL80211_IFTYPE_ADHOC &&
+ if ((vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MESH_POINT) &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
@@ -1931,7 +1882,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
mutex_lock(&sc->mutex);
ath9k_ps_wakeup(sc);
- ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
+ ath_dbg(common, CONFIG, "Set HW Key\n");
switch (cmd) {
case SET_KEY:
@@ -1983,9 +1934,8 @@ static void ath9k_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- ath_dbg(common, ATH_DBG_CONFIG,
- "Bss Info ASSOC %d, bssid: %pM\n",
- bss_conf->aid, common->curbssid);
+ ath_dbg(common, CONFIG, "Bss Info ASSOC %d, bssid: %pM\n",
+ bss_conf->aid, common->curbssid);
ath_beacon_config(sc, vif);
/*
* Request a re-configuration of Beacon related timers
@@ -2016,8 +1966,7 @@ static void ath9k_config_bss(struct ath_softc *sc, struct ieee80211_vif *vif)
/* Reconfigure bss info */
if (avp->primary_sta_vif && !bss_conf->assoc) {
- ath_dbg(common, ATH_DBG_CONFIG,
- "Bss Info DISASSOC %d, bssid %pM\n",
+ ath_dbg(common, CONFIG, "Bss Info DISASSOC %d, bssid %pM\n",
common->curaid, common->curbssid);
sc->sc_flags &= ~(SC_OP_PRIM_STA_VIF | SC_OP_BEACONS);
avp->primary_sta_vif = false;
@@ -2059,7 +2008,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
ath9k_config_bss(sc, vif);
- ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n",
+ ath_dbg(common, CONFIG, "BSSID: %pM aid: 0x%x\n",
common->curbssid, common->curaid);
}
@@ -2137,7 +2086,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed PREAMBLE %d\n",
bss_conf->use_short_preamble);
if (bss_conf->use_short_preamble)
sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
@@ -2146,7 +2095,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
- ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
+ ath_dbg(common, CONFIG, "BSS Changed CTS PROT %d\n",
bss_conf->use_cts_prot);
if (bss_conf->use_cts_prot &&
hw->conf.channel->band != IEEE80211_BAND_5GHZ)
@@ -2312,20 +2261,17 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
cancel_delayed_work_sync(&sc->tx_complete_work);
if (ah->ah_flags & AH_UNPLUGGED) {
- ath_dbg(common, ATH_DBG_ANY, "Device has been unplugged!\n");
+ ath_dbg(common, ANY, "Device has been unplugged!\n");
mutex_unlock(&sc->mutex);
return;
}
if (sc->sc_flags & SC_OP_INVALID) {
- ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
+ ath_dbg(common, ANY, "Device not present\n");
mutex_unlock(&sc->mutex);
return;
}
- if (drop)
- timeout = 1;
-
for (j = 0; j < timeout; j++) {
bool npend = false;
@@ -2343,21 +2289,22 @@ static void ath9k_flush(struct ieee80211_hw *hw, bool drop)
}
if (!npend)
- goto out;
+ break;
}
- ath9k_ps_wakeup(sc);
- spin_lock_bh(&sc->sc_pcu_lock);
- drain_txq = ath_drain_all_txq(sc, false);
- spin_unlock_bh(&sc->sc_pcu_lock);
+ if (drop) {
+ ath9k_ps_wakeup(sc);
+ spin_lock_bh(&sc->sc_pcu_lock);
+ drain_txq = ath_drain_all_txq(sc, false);
+ spin_unlock_bh(&sc->sc_pcu_lock);
- if (!drain_txq)
- ath_reset(sc, false);
+ if (!drain_txq)
+ ath_reset(sc, false);
- ath9k_ps_restore(sc);
- ieee80211_wake_queues(hw);
+ ath9k_ps_restore(sc);
+ ieee80211_wake_queues(hw);
+ }
-out:
ieee80211_queue_delayed_work(hw, &sc->tx_complete_work, 0);
mutex_unlock(&sc->mutex);
}
diff --git a/drivers/net/wireless/ath/ath9k/mci.c b/drivers/net/wireless/ath/ath9k/mci.c
new file mode 100644
index 0000000..05c23ea
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+
+#include "ath9k.h"
+#include "mci.h"
+
+static const u8 ath_mci_duty_cycle[] = { 0, 50, 60, 70, 80, 85, 90, 95, 98 };
+
+static struct ath_mci_profile_info*
+ath_mci_find_profile(struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ list_for_each_entry(entry, &mci->info, list) {
+ if (entry->conn_handle == info->conn_handle)
+ break;
+ }
+ return entry;
+}
+
+static bool ath_mci_add_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ if ((mci->num_sco == ATH_MCI_MAX_SCO_PROFILE) &&
+ (info->type == MCI_GPM_COEX_PROFILE_VOICE)) {
+ ath_dbg(common, MCI,
+ "Too many SCO profile, failed to add new profile\n");
+ return false;
+ }
+
+ if (((NUM_PROF(mci) - mci->num_sco) == ATH_MCI_MAX_ACL_PROFILE) &&
+ (info->type != MCI_GPM_COEX_PROFILE_VOICE)) {
+ ath_dbg(common, MCI,
+ "Too many ACL profile, failed to add new profile\n");
+ return false;
+ }
+
+ entry = ath_mci_find_profile(mci, info);
+
+ if (entry)
+ memcpy(entry, info, 10);
+ else {
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return false;
+
+ memcpy(entry, info, 10);
+ INC_PROF(mci, info);
+ list_add_tail(&info->list, &mci->info);
+ }
+ return true;
+}
+
+static void ath_mci_del_profile(struct ath_common *common,
+ struct ath_mci_profile *mci,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_mci_profile_info *entry;
+
+ entry = ath_mci_find_profile(mci, info);
+
+ if (!entry) {
+ ath_dbg(common, MCI, "Profile to be deleted not found\n");
+ return;
+ }
+ DEC_PROF(mci, entry);
+ list_del(&entry->list);
+ kfree(entry);
+}
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci)
+{
+ struct ath_mci_profile_info *info, *tinfo;
+
+ list_for_each_entry_safe(info, tinfo, &mci->info, list) {
+ list_del(&info->list);
+ DEC_PROF(mci, info);
+ kfree(info);
+ }
+ mci->aggr_limit = 0;
+}
+
+static void ath_mci_adjust_aggr_limit(struct ath_btcoex *btcoex)
+{
+ struct ath_mci_profile *mci = &btcoex->mci;
+ u32 wlan_airtime = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ /*
+ * Scale: wlan_airtime is in ms, aggr_limit is in 0.25 ms.
+ * When wlan_airtime is less than 4ms, aggregation limit has to be
+ * adjusted half of wlan_airtime to ensure that the aggregation can fit
+ * without collision with BT traffic.
+ */
+ if ((wlan_airtime <= 4) &&
+ (!mci->aggr_limit || (mci->aggr_limit > (2 * wlan_airtime))))
+ mci->aggr_limit = 2 * wlan_airtime;
+}
+
+static void ath_mci_update_scheme(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info *info;
+ u32 num_profile = NUM_PROF(mci);
+
+ if (num_profile == 1) {
+ info = list_first_entry(&mci->info,
+ struct ath_mci_profile_info,
+ list);
+ if (mci->num_sco && info->T == 12) {
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Single SCO, aggregation limit 2 ms\n");
+ } else if ((info->type == MCI_GPM_COEX_PROFILE_BNEP) &&
+ !info->master) {
+ btcoex->btcoex_period = 60;
+ ath_dbg(common, MCI,
+ "Single slave PAN/FTP, bt period 60 ms\n");
+ } else if ((info->type == MCI_GPM_COEX_PROFILE_HID) &&
+ (info->T > 0 && info->T < 50) &&
+ (info->A > 1 || info->W > 1)) {
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Multiple attempt/timeout single HID "
+ "aggregation limit 2 ms dutycycle 30%%\n");
+ }
+ } else if ((num_profile == 2) && (mci->num_hid == 2)) {
+ btcoex->duty_cycle = 30;
+ mci->aggr_limit = 8;
+ ath_dbg(common, MCI,
+ "Two HIDs aggregation limit 2 ms dutycycle 30%%\n");
+ } else if (num_profile > 3) {
+ mci->aggr_limit = 6;
+ ath_dbg(common, MCI,
+ "Three or more profiles aggregation limit 1.5 ms\n");
+ }
+
+ if (IS_CHAN_2GHZ(sc->sc_ah->curchan)) {
+ if (IS_CHAN_HT(sc->sc_ah->curchan))
+ ath_mci_adjust_aggr_limit(btcoex);
+ else
+ btcoex->btcoex_period >>= 1;
+ }
+
+ ath9k_hw_btcoex_disable(sc->sc_ah);
+ ath9k_btcoex_timer_pause(sc);
+
+ if (IS_CHAN_5GHZ(sc->sc_ah->curchan))
+ return;
+
+ btcoex->duty_cycle += (mci->num_bdr ? ATH_MCI_MAX_DUTY_CYCLE : 0);
+ if (btcoex->duty_cycle > ATH_MCI_MAX_DUTY_CYCLE)
+ btcoex->duty_cycle = ATH_MCI_MAX_DUTY_CYCLE;
+
+ btcoex->btcoex_period *= 1000;
+ btcoex->btcoex_no_stomp = btcoex->btcoex_period *
+ (100 - btcoex->duty_cycle) / 100;
+
+ ath9k_hw_btcoex_enable(sc->sc_ah);
+ ath9k_btcoex_timer_resume(sc);
+}
+
+
+static void ath_mci_cal_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 payload[4] = {0, 0, 0, 0};
+
+ switch (opcode) {
+ case MCI_GPM_BT_CAL_REQ:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_REQ\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_CAL_START, NULL);
+ ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
+ } else
+ ath_dbg(common, MCI, "MCI State mismatches: %d\n",
+ ar9003_mci_state(ah, MCI_STATE_BT, NULL));
+
+ break;
+
+ case MCI_GPM_BT_CAL_DONE:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_DONE\n");
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_CAL)
+ ath_dbg(common, MCI, "MCI error illegal!\n");
+ else
+ ath_dbg(common, MCI, "MCI BT not in CAL state\n");
+
+ break;
+
+ case MCI_GPM_BT_CAL_GRANT:
+
+ ath_dbg(common, MCI, "MCI received BT_CAL_GRANT\n");
+
+ /* Send WLAN_CAL_DONE for now */
+ ath_dbg(common, MCI, "MCI send WLAN_CAL_DONE\n");
+ MCI_GPM_SET_CAL_TYPE(payload, MCI_GPM_WLAN_CAL_DONE);
+ ar9003_mci_send_message(sc->sc_ah, MCI_GPM, 0, payload,
+ 16, false, true);
+ break;
+
+ default:
+ ath_dbg(common, MCI, "MCI Unknown GPM CAL message\n");
+ break;
+ }
+}
+
+static void ath_mci_process_profile(struct ath_softc *sc,
+ struct ath_mci_profile_info *info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+
+ if (info->start) {
+ if (!ath_mci_add_profile(common, mci, info))
+ return;
+ } else
+ ath_mci_del_profile(common, mci, info);
+
+ btcoex->btcoex_period = ATH_MCI_DEF_BT_PERIOD;
+ mci->aggr_limit = mci->num_sco ? 6 : 0;
+ if (NUM_PROF(mci)) {
+ btcoex->bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ btcoex->duty_cycle = ath_mci_duty_cycle[NUM_PROF(mci)];
+ } else {
+ btcoex->bt_stomp_type = mci->num_mgmt ? ATH_BTCOEX_STOMP_ALL :
+ ATH_BTCOEX_STOMP_LOW;
+ btcoex->duty_cycle = ATH_BTCOEX_DEF_DUTY_CYCLE;
+ }
+
+ ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_process_status(struct ath_softc *sc,
+ struct ath_mci_profile_status *status)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_mci_profile *mci = &btcoex->mci;
+ struct ath_mci_profile_info info;
+ int i = 0, old_num_mgmt = mci->num_mgmt;
+
+ /* Link status type are not handled */
+ if (status->is_link) {
+ ath_dbg(common, MCI, "Skip link type status update\n");
+ return;
+ }
+
+ memset(&info, 0, sizeof(struct ath_mci_profile_info));
+
+ info.conn_handle = status->conn_handle;
+ if (ath_mci_find_profile(mci, &info)) {
+ ath_dbg(common, MCI,
+ "Skip non link state update for existing profile %d\n",
+ status->conn_handle);
+ return;
+ }
+ if (status->conn_handle >= ATH_MCI_MAX_PROFILE) {
+ ath_dbg(common, MCI, "Ignore too many non-link update\n");
+ return;
+ }
+ if (status->is_critical)
+ __set_bit(status->conn_handle, mci->status);
+ else
+ __clear_bit(status->conn_handle, mci->status);
+
+ mci->num_mgmt = 0;
+ do {
+ if (test_bit(i, mci->status))
+ mci->num_mgmt++;
+ } while (++i < ATH_MCI_MAX_PROFILE);
+
+ if (old_num_mgmt != mci->num_mgmt)
+ ath_mci_update_scheme(sc);
+}
+
+static void ath_mci_msg(struct ath_softc *sc, u8 opcode, u8 *rx_payload)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_profile_info profile_info;
+ struct ath_mci_profile_status profile_status;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u32 version;
+ u8 major;
+ u8 minor;
+ u32 seq_num;
+
+ switch (opcode) {
+
+ case MCI_GPM_COEX_VERSION_QUERY:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Query\n");
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_COEX_VERSION, NULL);
+ break;
+
+ case MCI_GPM_COEX_VERSION_RESPONSE:
+ ath_dbg(common, MCI, "MCI Recv GPM COEX Version Response\n");
+ major = *(rx_payload + MCI_GPM_COEX_B_MAJOR_VERSION);
+ minor = *(rx_payload + MCI_GPM_COEX_B_MINOR_VERSION);
+ ath_dbg(common, MCI, "MCI BT Coex version: %d.%d\n",
+ major, minor);
+ version = (major << 8) + minor;
+ version = ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_COEX_VERSION, &version);
+ break;
+
+ case MCI_GPM_COEX_STATUS_QUERY:
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX Status Query = 0x%02x\n",
+ *(rx_payload + MCI_GPM_COEX_B_WLAN_BITMAP));
+ ar9003_mci_state(ah,
+ MCI_STATE_SEND_WLAN_CHANNELS, NULL);
+ break;
+
+ case MCI_GPM_COEX_BT_PROFILE_INFO:
+ ath_dbg(common, MCI, "MCI Recv GPM Coex BT profile info\n");
+ memcpy(&profile_info,
+ (rx_payload + MCI_GPM_COEX_B_PROFILE_TYPE), 10);
+
+ if ((profile_info.type == MCI_GPM_COEX_PROFILE_UNKNOWN)
+ || (profile_info.type >=
+ MCI_GPM_COEX_PROFILE_MAX)) {
+
+ ath_dbg(common, MCI,
+ "illegal profile type = %d, state = %d\n",
+ profile_info.type,
+ profile_info.start);
+ break;
+ }
+
+ ath_mci_process_profile(sc, &profile_info);
+ break;
+
+ case MCI_GPM_COEX_BT_STATUS_UPDATE:
+ profile_status.is_link = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_TYPE);
+ profile_status.conn_handle = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_LINKID);
+ profile_status.is_critical = *(rx_payload +
+ MCI_GPM_COEX_B_STATUS_STATE);
+
+ seq_num = *((u32 *)(rx_payload + 12));
+ ath_dbg(common, MCI,
+ "MCI Recv GPM COEX BT_Status_Update: is_link=%d, linkId=%d, state=%d, SEQ=%d\n",
+ profile_status.is_link, profile_status.conn_handle,
+ profile_status.is_critical, seq_num);
+
+ ath_mci_process_status(sc, &profile_status);
+ break;
+
+ default:
+ ath_dbg(common, MCI, "MCI Unknown GPM COEX message = 0x%02x\n",
+ opcode);
+ break;
+ }
+}
+
+static int ath_mci_buf_alloc(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ int error = 0;
+
+ buf->bf_addr = dma_alloc_coherent(sc->dev, buf->bf_len,
+ &buf->bf_paddr, GFP_KERNEL);
+
+ if (buf->bf_addr == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ memset(buf, 0, sizeof(*buf));
+ return error;
+}
+
+static void ath_mci_buf_free(struct ath_softc *sc, struct ath_mci_buf *buf)
+{
+ if (buf->bf_addr) {
+ dma_free_coherent(sc->dev, buf->bf_len, buf->bf_addr,
+ buf->bf_paddr);
+ memset(buf, 0, sizeof(*buf));
+ }
+}
+
+int ath_mci_setup(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ int error = 0;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return 0;
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE + ATH_MCI_GPM_BUF_SIZE;
+
+ if (ath_mci_buf_alloc(sc, &mci->sched_buf)) {
+ ath_dbg(common, FATAL, "MCI buffer alloc failed\n");
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ mci->sched_buf.bf_len = ATH_MCI_SCHED_BUF_SIZE;
+
+ memset(mci->sched_buf.bf_addr, MCI_GPM_RSVD_PATTERN,
+ mci->sched_buf.bf_len);
+
+ mci->gpm_buf.bf_len = ATH_MCI_GPM_BUF_SIZE;
+ mci->gpm_buf.bf_addr = (u8 *)mci->sched_buf.bf_addr +
+ mci->sched_buf.bf_len;
+ mci->gpm_buf.bf_paddr = mci->sched_buf.bf_paddr + mci->sched_buf.bf_len;
+
+ /* initialize the buffer */
+ memset(mci->gpm_buf.bf_addr, MCI_GPM_RSVD_PATTERN, mci->gpm_buf.bf_len);
+
+ ar9003_mci_setup(sc->sc_ah, mci->gpm_buf.bf_paddr,
+ mci->gpm_buf.bf_addr, (mci->gpm_buf.bf_len >> 4),
+ mci->sched_buf.bf_paddr);
+fail:
+ return error;
+}
+
+void ath_mci_cleanup(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_mci_coex *mci = &sc->mci_coex;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ /*
+ * both schedule and gpm buffers will be released
+ */
+ ath_mci_buf_free(sc, &mci->sched_buf);
+ ar9003_mci_cleanup(ah);
+}
+
+void ath_mci_intr(struct ath_softc *sc)
+{
+ struct ath_mci_coex *mci = &sc->mci_coex;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_common *common = ath9k_hw_common(ah);
+ u32 mci_int, mci_int_rxmsg;
+ u32 offset, subtype, opcode;
+ u32 *pgpm;
+ u32 more_data = MCI_GPM_MORE;
+ bool skip_gpm = false;
+
+ if (!ATH9K_HW_CAP_MCI)
+ return;
+
+ ar9003_mci_get_interrupt(sc->sc_ah, &mci_int, &mci_int_rxmsg);
+
+ if (ar9003_mci_state(ah, MCI_STATE_ENABLE, NULL) == 0) {
+
+ ar9003_mci_state(sc->sc_ah, MCI_STATE_INIT_GPM_OFFSET, NULL);
+ ath_dbg(common, MCI, "MCI interrupt but MCI disabled\n");
+
+ ath_dbg(common, MCI,
+ "MCI interrupt: intr = 0x%x, intr_rxmsg = 0x%x\n",
+ mci_int, mci_int_rxmsg);
+ return;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE) {
+ u32 payload[4] = { 0xffffffff, 0xffffffff,
+ 0xffffffff, 0xffffff00};
+
+ /*
+ * The following REMOTE_RESET and SYS_WAKING used to sent
+ * only when BT wake up. Now they are always sent, as a
+ * recovery method to reset BT MCI's RX alignment.
+ */
+ ath_dbg(common, MCI, "MCI interrupt send REMOTE_RESET\n");
+
+ ar9003_mci_send_message(ah, MCI_REMOTE_RESET, 0,
+ payload, 16, true, false);
+ ath_dbg(common, MCI, "MCI interrupt send SYS_WAKING\n");
+ ar9003_mci_send_message(ah, MCI_SYS_WAKING, 0,
+ NULL, 0, true, false);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE;
+ ar9003_mci_state(ah, MCI_STATE_RESET_REQ_WAKE, NULL);
+
+ /*
+ * always do this for recovery and 2G/5G toggling and LNA_TRANS
+ */
+ ath_dbg(common, MCI, "MCI Set BT state to AWAKE\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+
+ /* Processing SYS_WAKING/SYS_SLEEPING */
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_SLEEP) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_SLEEP)
+ ath_dbg(common, MCI,
+ "MCI BT stays in sleep mode\n");
+ else {
+ ath_dbg(common, MCI,
+ "MCI Set BT state to AWAKE\n");
+ ar9003_mci_state(ah,
+ MCI_STATE_SET_BT_AWAKE, NULL);
+ }
+ } else
+ ath_dbg(common, MCI, "MCI BT stays in AWAKE mode\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING;
+
+ if (ar9003_mci_state(ah, MCI_STATE_BT, NULL) == MCI_BT_AWAKE) {
+
+ if (ar9003_mci_state(ah, MCI_STATE_REMOTE_SLEEP, NULL)
+ == MCI_BT_AWAKE)
+ ath_dbg(common, MCI,
+ "MCI BT stays in AWAKE mode\n");
+ else {
+ ath_dbg(common, MCI,
+ "MCI SetBT state to SLEEP\n");
+ ar9003_mci_state(ah, MCI_STATE_SET_BT_SLEEP,
+ NULL);
+ }
+ } else
+ ath_dbg(common, MCI, "MCI BT stays in SLEEP mode\n");
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)) {
+
+ ath_dbg(common, MCI, "MCI RX broken, skip GPM msgs\n");
+ ar9003_mci_state(ah, MCI_STATE_RECOVER_RX, NULL);
+ skip_gpm = true;
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO;
+ offset = ar9003_mci_state(ah, MCI_STATE_LAST_SCHD_MSG_OFFSET,
+ NULL);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_GPM) {
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_GPM;
+
+ while (more_data == MCI_GPM_MORE) {
+
+ pgpm = mci->gpm_buf.bf_addr;
+ offset = ar9003_mci_state(ah,
+ MCI_STATE_NEXT_GPM_OFFSET, &more_data);
+
+ if (offset == MCI_GPM_INVALID)
+ break;
+
+ pgpm += (offset >> 2);
+
+ /*
+ * The first dword is timer.
+ * The real data starts from 2nd dword.
+ */
+
+ subtype = MCI_GPM_TYPE(pgpm);
+ opcode = MCI_GPM_OPCODE(pgpm);
+
+ if (!skip_gpm) {
+
+ if (MCI_GPM_IS_CAL_TYPE(subtype))
+ ath_mci_cal_msg(sc, subtype,
+ (u8 *) pgpm);
+ else {
+ switch (subtype) {
+ case MCI_GPM_COEX_AGENT:
+ ath_mci_msg(sc, opcode,
+ (u8 *) pgpm);
+ break;
+ default:
+ break;
+ }
+ }
+ }
+ MCI_GPM_RECYCLE(pgpm);
+ }
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_HW_MSG_MASK) {
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL)
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL;
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_LNA_INFO) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_LNA_INFO;
+ ath_dbg(common, MCI, "MCI LNA_INFO\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_INFO) {
+
+ int value_dbm = ar9003_mci_state(ah,
+ MCI_STATE_CONT_RSSI_POWER, NULL);
+
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_INFO;
+
+ if (ar9003_mci_state(ah, MCI_STATE_CONT_TXRX, NULL))
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (tx) pri = %d, pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ else
+ ath_dbg(common, MCI,
+ "MCI CONT_INFO: (rx) pri = %d,pwr = %d dBm\n",
+ ar9003_mci_state(ah,
+ MCI_STATE_CONT_PRIORITY, NULL),
+ value_dbm);
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_NACK) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_NACK;
+ ath_dbg(common, MCI, "MCI CONT_NACK\n");
+ }
+
+ if (mci_int_rxmsg & AR_MCI_INTERRUPT_RX_MSG_CONT_RST) {
+ mci_int_rxmsg &= ~AR_MCI_INTERRUPT_RX_MSG_CONT_RST;
+ ath_dbg(common, MCI, "MCI CONT_RST\n");
+ }
+ }
+
+ if ((mci_int & AR_MCI_INTERRUPT_RX_INVALID_HDR) ||
+ (mci_int & AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT))
+ mci_int &= ~(AR_MCI_INTERRUPT_RX_INVALID_HDR |
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT);
+
+ if (mci_int_rxmsg & 0xfffffffe)
+ ath_dbg(common, MCI, "MCI not processed mci_int_rxmsg = 0x%x\n",
+ mci_int_rxmsg);
+}
diff --git a/drivers/net/wireless/ath/ath9k/mci.h b/drivers/net/wireless/ath/ath9k/mci.h
new file mode 100644
index 0000000..29e3e51
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/mci.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (c) 2010-2011 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef MCI_H
+#define MCI_H
+
+#define ATH_MCI_SCHED_BUF_SIZE (16 * 16) /* 16 entries, 4 dword each */
+#define ATH_MCI_GPM_MAX_ENTRY 16
+#define ATH_MCI_GPM_BUF_SIZE (ATH_MCI_GPM_MAX_ENTRY * 16)
+#define ATH_MCI_DEF_BT_PERIOD 40
+#define ATH_MCI_BDR_DUTY_CYCLE 20
+#define ATH_MCI_MAX_DUTY_CYCLE 90
+
+#define ATH_MCI_DEF_AGGR_LIMIT 6 /* in 0.24 ms */
+#define ATH_MCI_MAX_ACL_PROFILE 7
+#define ATH_MCI_MAX_SCO_PROFILE 1
+#define ATH_MCI_MAX_PROFILE (ATH_MCI_MAX_ACL_PROFILE +\
+ ATH_MCI_MAX_SCO_PROFILE)
+
+#define INC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp++; \
+ if (!_info->edr) \
+ _mci->num_bdr++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan++; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ _mci->num_sco++; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define DEC_PROF(_mci, _info) do { \
+ switch (_info->type) { \
+ case MCI_GPM_COEX_PROFILE_RFCOMM:\
+ _mci->num_other_acl--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_A2DP: \
+ _mci->num_a2dp--; \
+ if (!_info->edr) \
+ _mci->num_bdr--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_HID: \
+ _mci->num_hid--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_BNEP: \
+ _mci->num_pan--; \
+ break; \
+ case MCI_GPM_COEX_PROFILE_VOICE: \
+ _mci->num_sco--; \
+ break; \
+ default: \
+ break; \
+ } \
+ } while (0)
+
+#define NUM_PROF(_mci) (_mci->num_other_acl + _mci->num_a2dp + \
+ _mci->num_hid + _mci->num_pan + _mci->num_sco)
+
+struct ath_mci_profile_info {
+ u8 type;
+ u8 conn_handle;
+ bool start;
+ bool master;
+ bool edr;
+ u8 voice_type;
+ u16 T; /* Voice: Tvoice, HID: Tsniff, in slots */
+ u8 W; /* Voice: Wvoice, HID: Sniff timeout, in slots */
+ u8 A; /* HID: Sniff attempt, in slots */
+ struct list_head list;
+};
+
+struct ath_mci_profile_status {
+ bool is_critical;
+ bool is_link;
+ u8 conn_handle;
+};
+
+struct ath_mci_profile {
+ struct list_head info;
+ DECLARE_BITMAP(status, ATH_MCI_MAX_PROFILE);
+ u16 aggr_limit;
+ u8 num_mgmt;
+ u8 num_sco;
+ u8 num_a2dp;
+ u8 num_hid;
+ u8 num_pan;
+ u8 num_other_acl;
+ u8 num_bdr;
+};
+
+
+struct ath_mci_buf {
+ void *bf_addr; /* virtual addr of desc */
+ dma_addr_t bf_paddr; /* physical addr of buffer */
+ u32 bf_len; /* len of data */
+};
+
+struct ath_mci_coex {
+ atomic_t mci_cal_flag;
+ struct ath_mci_buf sched_buf;
+ struct ath_mci_buf gpm_buf;
+ u32 bt_cal_start;
+};
+
+void ath_mci_flush_profile(struct ath_mci_profile *mci);
+int ath_mci_setup(struct ath_softc *sc);
+void ath_mci_cleanup(struct ath_softc *sc);
+void ath_mci_intr(struct ath_softc *sc);
+#endif
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 2dcdf63..77dc327 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -121,7 +121,7 @@ static void ath_pci_aspm_init(struct ath_common *common)
if (!parent)
return;
- if (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) {
+ if (ath9k_hw_get_btcoex_scheme(ah) != ATH_BTCOEX_CFG_NONE) {
/* Bluetooth coexistance requires disabling ASPM. */
pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &aspm);
aspm &= ~(PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
@@ -307,12 +307,11 @@ static int ath_pci_suspend(struct device *device)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_softc *sc = hw->priv;
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
/* The device has to be moved to FULLSLEEP forcibly.
* Otherwise the chip never moved to full sleep,
* when no interface is up.
*/
+ ath9k_hw_disable(sc->sc_ah);
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
return 0;
@@ -321,8 +320,6 @@ static int ath_pci_suspend(struct device *device)
static int ath_pci_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
- struct ieee80211_hw *hw = pci_get_drvdata(pdev);
- struct ath_softc *sc = hw->priv;
u32 val;
/*
@@ -334,22 +331,6 @@ static int ath_pci_resume(struct device *device)
if ((val & 0x0000ff00) != 0)
pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
- ath9k_ps_wakeup(sc);
- /* Enable LED */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
-
- /*
- * Reset key cache to sane defaults (all entries cleared) instead of
- * semi-random values after suspend/resume.
- */
- ath9k_cmn_init_crypto(sc->sc_ah);
- ath9k_ps_restore(sc);
-
- sc->ps_idle = true;
- ath_radio_disable(sc, hw);
-
return 0;
}
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 528d5f3..a427a16 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -694,7 +694,7 @@ static u8 ath_rc_get_highest_rix(struct ath_softc *sc,
return rate;
/* This should not happen */
- WARN_ON(1);
+ WARN_ON_ONCE(1);
rate = ath_rc_priv->valid_rate_index[0];
@@ -1199,7 +1199,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
return &ar5416_11na_ratetable;
return &ar5416_11a_ratetable;
default:
- ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n");
+ ath_dbg(common, CONFIG, "Invalid band\n");
return NULL;
}
}
@@ -1276,8 +1276,7 @@ static void ath_rc_init(struct ath_softc *sc,
ath_rc_priv->valid_rate_index[k-1];
ath_rc_priv->rate_table = rate_table;
- ath_dbg(common, ATH_DBG_CONFIG,
- "RC Initialized with capabilities: 0x%x\n",
+ ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
ath_rc_priv->ht_cap);
}
@@ -1347,7 +1346,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
fc = hdr->frame_control;
for (i = 0; i < sc->hw->max_rates; i++) {
struct ieee80211_tx_rate *rate = &tx_info->status.rates[i];
- if (!rate->count)
+ if (rate->idx < 0 || !rate->count)
break;
final_ts_idx = i;
@@ -1474,7 +1473,7 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
oper_cw40, oper_sgi);
ath_rc_init(sc, priv_sta, sband, sta, rate_table);
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
"Operating HT Bandwidth changed to: %d\n",
sc->hw->conf.channel_type);
}
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 67b862c..7e1a91a 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -172,7 +172,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
u32 nbuf = 0;
if (list_empty(&sc->rx.rxbuf)) {
- ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
+ ath_dbg(common, QUEUE, "No free rx buf available\n");
return;
}
@@ -337,7 +337,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
return ath_rx_edma_init(sc, nbufs);
} else {
- ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
+ ath_dbg(common, CONFIG, "cachelsz %u rxbufsize %u\n",
common->cachelsz, common->rx_bufsize);
/* Initialize rx descriptors */
@@ -475,7 +475,6 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
return rfilt;
-#undef RX_FILTER_PRESERVE
}
int ath_startrecv(struct ath_softc *sc)
@@ -592,7 +591,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (sc->ps_flags & PS_BEACON_SYNC) {
sc->ps_flags &= ~PS_BEACON_SYNC;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Reconfigure Beacon timers based on timestamp from the AP\n");
ath_set_beacon(sc);
}
@@ -605,7 +604,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* a backup trigger for returning into NETWORK SLEEP state,
* so we are waiting for it as well.
*/
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
@@ -618,8 +617,7 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
* been delivered.
*/
sc->ps_flags &= ~PS_WAIT_FOR_CAB;
- ath_dbg(common, ATH_DBG_PS,
- "PS wait for CAB frames timed out\n");
+ ath_dbg(common, PS, "PS wait for CAB frames timed out\n");
}
}
@@ -644,13 +642,13 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb, bool mybeacon)
* point.
*/
sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"All PS CAB frames received, back to sleep\n");
} else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Going back to sleep after having received PS-Poll data (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
@@ -824,6 +822,14 @@ static bool ath9k_rx_accept(struct ath_common *common,
(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
ATH9K_RXERR_KEYMISS));
+ /*
+ * Key miss events are only relevant for pairwise keys where the
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID)
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
if (!rx_stats->rs_datalen)
return false;
/*
@@ -933,7 +939,7 @@ static int ath9k_process_rate(struct ath_common *common,
* No valid hardware bitrate found -- we should not get here
* because hardware has already validated this frame as OK.
*/
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
rx_stats->rs_rate);
@@ -1824,6 +1830,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
hdr = (struct ieee80211_hdr *) (hdr_skb->data + rx_status_len);
rxs = IEEE80211_SKB_RXCB(hdr_skb);
if (ieee80211_is_beacon(hdr->frame_control) &&
+ !is_zero_ether_addr(common->curbssid) &&
!compare_ether_addr(hdr->addr3, common->curbssid))
rs.is_mybeacon = true;
else
@@ -1838,11 +1845,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
if (sc->sc_flags & SC_OP_RXFLUSH)
goto requeue_drop_frag;
- retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
- rxs, &decrypt_error);
- if (retval)
- goto requeue_drop_frag;
-
rxs->mactime = (tsf & ~0xffffffffULL) | rs.rs_tstamp;
if (rs.rs_tstamp > tsf_lower &&
unlikely(rs.rs_tstamp - tsf_lower > 0x10000000))
@@ -1852,6 +1854,11 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
unlikely(tsf_lower - rs.rs_tstamp > 0x10000000))
rxs->mactime += 0x100000000ULL;
+ retval = ath9k_rx_skb_preprocess(common, hw, hdr, &rs,
+ rxs, &decrypt_error);
+ if (retval)
+ goto requeue_drop_frag;
+
/* Ensure we always have an skb to requeue once we are done
* processing the current buffer's skb */
requeue_skb = ath_rxbuf_alloc(common, common->rx_bufsize, GFP_ATOMIC);
@@ -1923,15 +1930,20 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb = hdr_skb;
}
- /*
- * change the default rx antenna if rx diversity chooses the
- * other antenna 3 times in a row.
- */
- if (sc->rx.defant != rs.rs_antenna) {
- if (++sc->rx.rxotherant >= 3)
- ath_setdefantenna(sc, rs.rs_antenna);
- } else {
- sc->rx.rxotherant = 0;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) {
+
+ /*
+ * change the default rx antenna if rx diversity
+ * chooses the other antenna 3 times in a row.
+ */
+ if (sc->rx.defant != rs.rs_antenna) {
+ if (++sc->rx.rxotherant >= 3)
+ ath_setdefantenna(sc, rs.rs_antenna);
+ } else {
+ sc->rx.rxotherant = 0;
+ }
+
}
if (rxs->flag & RX_FLAG_MMIC_STRIPPED)
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8fcb7e9..6e2f188 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1006,6 +1006,8 @@ enum {
#define AR_INTR_ASYNC_MASK (AR_SREV_9340(ah) ? 0x4018 : 0x4030)
#define AR_INTR_ASYNC_MASK_GPIO 0xFFFC0000
#define AR_INTR_ASYNC_MASK_GPIO_S 18
+#define AR_INTR_ASYNC_MASK_MCI 0x00000080
+#define AR_INTR_ASYNC_MASK_MCI_S 7
#define AR_INTR_SYNC_MASK (AR_SREV_9340(ah) ? 0x401c : 0x4034)
#define AR_INTR_SYNC_MASK_GPIO 0xFFFC0000
@@ -1013,6 +1015,14 @@ enum {
#define AR_INTR_ASYNC_CAUSE_CLR (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
#define AR_INTR_ASYNC_CAUSE (AR_SREV_9340(ah) ? 0x4020 : 0x4038)
+#define AR_INTR_ASYNC_CAUSE_MCI 0x00000080
+#define AR_INTR_ASYNC_USED (AR_INTR_MAC_IRQ | \
+ AR_INTR_ASYNC_CAUSE_MCI)
+
+/* Asynchronous Interrupt Enable Register */
+#define AR_INTR_ASYNC_ENABLE_MCI 0x00000080
+#define AR_INTR_ASYNC_ENABLE_MCI_S 7
+
#define AR_INTR_ASYNC_ENABLE (AR_SREV_9340(ah) ? 0x4024 : 0x403c)
#define AR_INTR_ASYNC_ENABLE_GPIO 0xFFFC0000
@@ -1269,6 +1279,8 @@ enum {
#define AR_RTC_INTR_MASK \
((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0058) : 0x7058)
+#define AR_RTC_KEEP_AWAKE 0x7034
+
/* RTC_DERIVED_* - only for AR9100 */
#define AR_RTC_DERIVED_CLK \
@@ -1555,6 +1567,8 @@ enum {
#define AR_DIAG_FRAME_NV0 0x00020000
#define AR_DIAG_OBS_PT_SEL1 0x000C0000
#define AR_DIAG_OBS_PT_SEL1_S 18
+#define AR_DIAG_OBS_PT_SEL2 0x08000000
+#define AR_DIAG_OBS_PT_SEL2_S 27
#define AR_DIAG_FORCE_RX_CLEAR 0x00100000 /* force rx_clear high */
#define AR_DIAG_IGNORE_VIRT_CS 0x00200000
#define AR_DIAG_FORCE_CH_IDLE_HIGH 0x00400000
@@ -1752,19 +1766,10 @@ enum {
#define AR_BT_COEX_WL_WEIGHTS0 0x8174
#define AR_BT_COEX_WL_WEIGHTS1 0x81c4
+#define AR_MCI_COEX_WL_WEIGHTS(_i) (0x18b0 + (_i << 2))
+#define AR_BT_COEX_BT_WEIGHTS(_i) (0x83ac + (_i << 2))
-#define AR_BT_COEX_BT_WEIGHTS0 0x83ac
-#define AR_BT_COEX_BT_WEIGHTS1 0x83b0
-#define AR_BT_COEX_BT_WEIGHTS2 0x83b4
-#define AR_BT_COEX_BT_WEIGHTS3 0x83b8
-
-#define AR9300_BT_WGHT 0xcccc4444
-#define AR9300_STOMP_ALL_WLAN_WGHT0 0xfffffff0
-#define AR9300_STOMP_ALL_WLAN_WGHT1 0xfffffff0
-#define AR9300_STOMP_LOW_WLAN_WGHT0 0x88888880
-#define AR9300_STOMP_LOW_WLAN_WGHT1 0x88888880
-#define AR9300_STOMP_NONE_WLAN_WGHT0 0x00000000
-#define AR9300_STOMP_NONE_WLAN_WGHT1 0x00000000
+#define AR9300_BT_WGHT 0xcccc4444
#define AR_BT_COEX_MODE2 0x817c
#define AR_BT_BCN_MISS_THRESH 0x000000ff
@@ -1938,37 +1943,277 @@ enum {
#define AR_PHY_AGC_CONTROL_YCOK_MAX_S 6
/* MCI Registers */
-#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
-#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
-#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
-#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
-#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
-#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
-#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
-#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
-#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
-#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
-#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+
+#define AR_MCI_COMMAND0 0x1800
+#define AR_MCI_COMMAND0_HEADER 0xFF
+#define AR_MCI_COMMAND0_HEADER_S 0
+#define AR_MCI_COMMAND0_LEN 0x1f00
+#define AR_MCI_COMMAND0_LEN_S 8
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP 0x2000
+#define AR_MCI_COMMAND0_DISABLE_TIMESTAMP_S 13
+
+#define AR_MCI_COMMAND1 0x1804
+
+#define AR_MCI_COMMAND2 0x1808
+#define AR_MCI_COMMAND2_RESET_TX 0x01
+#define AR_MCI_COMMAND2_RESET_TX_S 0
+#define AR_MCI_COMMAND2_RESET_RX 0x02
+#define AR_MCI_COMMAND2_RESET_RX_S 1
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES 0x3FC
+#define AR_MCI_COMMAND2_RESET_RX_NUM_CYCLES_S 2
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP 0x400
+#define AR_MCI_COMMAND2_RESET_REQ_WAKEUP_S 10
+
+#define AR_MCI_RX_CTRL 0x180c
+
+#define AR_MCI_TX_CTRL 0x1810
+/* 0 = no division, 1 = divide by 2, 2 = divide by 4, 3 = divide by 8 */
+#define AR_MCI_TX_CTRL_CLK_DIV 0x03
+#define AR_MCI_TX_CTRL_CLK_DIV_S 0
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE 0x04
+#define AR_MCI_TX_CTRL_DISABLE_LNA_UPDATE_S 2
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ 0xFFFFF8
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_FREQ_S 3
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM 0xF000000
+#define AR_MCI_TX_CTRL_GAIN_UPDATE_NUM_S 24
+
+#define AR_MCI_MSG_ATTRIBUTES_TABLE 0x1814
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM 0xFFFF
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_CHECKSUM_S 0
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR 0xFFFF0000
+#define AR_MCI_MSG_ATTRIBUTES_TABLE_INVALID_HDR_S 16
+
+#define AR_MCI_SCHD_TABLE_0 0x1818
+#define AR_MCI_SCHD_TABLE_1 0x181c
+#define AR_MCI_GPM_0 0x1820
+#define AR_MCI_GPM_1 0x1824
+#define AR_MCI_GPM_WRITE_PTR 0xFFFF0000
+#define AR_MCI_GPM_WRITE_PTR_S 16
+#define AR_MCI_GPM_BUF_LEN 0x0000FFFF
+#define AR_MCI_GPM_BUF_LEN_S 0
+
+#define AR_MCI_INTERRUPT_RAW 0x1828
+#define AR_MCI_INTERRUPT_EN 0x182c
+#define AR_MCI_INTERRUPT_SW_MSG_DONE 0x00000001
+#define AR_MCI_INTERRUPT_SW_MSG_DONE_S 0
+#define AR_MCI_INTERRUPT_CPU_INT_MSG 0x00000002
+#define AR_MCI_INTERRUPT_CPU_INT_MSG_S 1
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL 0x00000004
+#define AR_MCI_INTERRUPT_RX_CKSUM_FAIL_S 2
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR 0x00000008
+#define AR_MCI_INTERRUPT_RX_INVALID_HDR_S 3
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL 0x00000010
+#define AR_MCI_INTERRUPT_RX_HW_MSG_FAIL_S 4
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL 0x00000020
+#define AR_MCI_INTERRUPT_RX_SW_MSG_FAIL_S 5
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL 0x00000080
+#define AR_MCI_INTERRUPT_TX_HW_MSG_FAIL_S 7
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL 0x00000100
+#define AR_MCI_INTERRUPT_TX_SW_MSG_FAIL_S 8
+#define AR_MCI_INTERRUPT_RX_MSG 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_S 9
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE 0x00000400
+#define AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE_S 10
+#define AR_MCI_INTERRUPT_BT_PRI 0x07fff800
+#define AR_MCI_INTERRUPT_BT_PRI_S 11
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH 0x08000000
+#define AR_MCI_INTERRUPT_BT_PRI_THRESH_S 27
+#define AR_MCI_INTERRUPT_BT_FREQ 0x10000000
+#define AR_MCI_INTERRUPT_BT_FREQ_S 28
+#define AR_MCI_INTERRUPT_BT_STOMP 0x20000000
+#define AR_MCI_INTERRUPT_BT_STOMP_S 29
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ 0x40000000
+#define AR_MCI_INTERRUPT_BB_AIC_IRQ_S 30
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT 0x80000000
+#define AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT_S 31
+
+#define AR_MCI_INTERRUPT_DEFAULT (AR_MCI_INTERRUPT_SW_MSG_DONE | \
+ AR_MCI_INTERRUPT_RX_INVALID_HDR | \
+ AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_MSG | \
+ AR_MCI_INTERRUPT_REMOTE_SLEEP_UPDATE | \
+ AR_MCI_INTERRUPT_CONT_INFO_TIMEOUT)
+
+#define AR_MCI_INTERRUPT_MSG_FAIL_MASK (AR_MCI_INTERRUPT_RX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_RX_SW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_HW_MSG_FAIL | \
+ AR_MCI_INTERRUPT_TX_SW_MSG_FAIL)
+
+#define AR_MCI_REMOTE_CPU_INT 0x1830
+#define AR_MCI_REMOTE_CPU_INT_EN 0x1834
+#define AR_MCI_INTERRUPT_RX_MSG_RAW 0x1838
+#define AR_MCI_INTERRUPT_RX_MSG_EN 0x183c
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET 0x00000001
+#define AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET_S 0
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL 0x00000002
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL_S 1
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK 0x00000004
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_NACK_S 2
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO 0x00000008
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_INFO_S 3
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST 0x00000010
+#define AR_MCI_INTERRUPT_RX_MSG_CONT_RST_S 4
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO 0x00000020
+#define AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO_S 5
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT 0x00000040
+#define AR_MCI_INTERRUPT_RX_MSG_CPU_INT_S 6
+#define AR_MCI_INTERRUPT_RX_MSG_GPM 0x00000100
+#define AR_MCI_INTERRUPT_RX_MSG_GPM_S 8
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO 0x00000200
+#define AR_MCI_INTERRUPT_RX_MSG_LNA_INFO_S 9
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING 0x00000400
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING_S 10
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING 0x00000800
+#define AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING_S 11
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE 0x00001000
+#define AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE_S 12
+#define AR_MCI_INTERRUPT_RX_HW_MSG_MASK (AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL| \
AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
AR_MCI_INTERRUPT_RX_MSG_CONT_RST)
+#define AR_MCI_INTERRUPT_RX_MSG_DEFAULT (AR_MCI_INTERRUPT_RX_MSG_GPM | \
+ AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET| \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_WAKING | \
+ AR_MCI_INTERRUPT_RX_MSG_SYS_SLEEPING| \
+ AR_MCI_INTERRUPT_RX_MSG_SCHD_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_CONTROL | \
+ AR_MCI_INTERRUPT_RX_MSG_LNA_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_NACK | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_INFO | \
+ AR_MCI_INTERRUPT_RX_MSG_CONT_RST | \
+ AR_MCI_INTERRUPT_RX_MSG_REQ_WAKE)
+
+#define AR_MCI_CPU_INT 0x1840
+
+#define AR_MCI_RX_STATUS 0x1844
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX 0x00000F00
+#define AR_MCI_RX_LAST_SCHD_MSG_INDEX_S 8
+#define AR_MCI_RX_REMOTE_SLEEP 0x00001000
+#define AR_MCI_RX_REMOTE_SLEEP_S 12
+#define AR_MCI_RX_MCI_CLK_REQ 0x00002000
+#define AR_MCI_RX_MCI_CLK_REQ_S 13
+
+#define AR_MCI_CONT_STATUS 0x1848
+#define AR_MCI_CONT_RSSI_POWER 0x000000FF
+#define AR_MCI_CONT_RSSI_POWER_S 0
+#define AR_MCI_CONT_RRIORITY 0x0000FF00
+#define AR_MCI_CONT_RRIORITY_S 8
+#define AR_MCI_CONT_TXRX 0x00010000
+#define AR_MCI_CONT_TXRX_S 16
+
+#define AR_MCI_BT_PRI0 0x184c
+#define AR_MCI_BT_PRI1 0x1850
+#define AR_MCI_BT_PRI2 0x1854
+#define AR_MCI_BT_PRI3 0x1858
+#define AR_MCI_BT_PRI 0x185c
+#define AR_MCI_WL_FREQ0 0x1860
+#define AR_MCI_WL_FREQ1 0x1864
+#define AR_MCI_WL_FREQ2 0x1868
+#define AR_MCI_GAIN 0x186c
+#define AR_MCI_WBTIMER1 0x1870
+#define AR_MCI_WBTIMER2 0x1874
+#define AR_MCI_WBTIMER3 0x1878
+#define AR_MCI_WBTIMER4 0x187c
+#define AR_MCI_MAXGAIN 0x1880
+#define AR_MCI_HW_SCHD_TBL_CTL 0x1884
+#define AR_MCI_HW_SCHD_TBL_D0 0x1888
+#define AR_MCI_HW_SCHD_TBL_D1 0x188c
+#define AR_MCI_HW_SCHD_TBL_D2 0x1890
+#define AR_MCI_HW_SCHD_TBL_D3 0x1894
+#define AR_MCI_TX_PAYLOAD0 0x1898
+#define AR_MCI_TX_PAYLOAD1 0x189c
+#define AR_MCI_TX_PAYLOAD2 0x18a0
+#define AR_MCI_TX_PAYLOAD3 0x18a4
+#define AR_BTCOEX_WBTIMER 0x18a8
+
+#define AR_BTCOEX_CTRL 0x18ac
+#define AR_BTCOEX_CTRL_AR9462_MODE 0x00000001
+#define AR_BTCOEX_CTRL_AR9462_MODE_S 0
+#define AR_BTCOEX_CTRL_WBTIMER_EN 0x00000002
+#define AR_BTCOEX_CTRL_WBTIMER_EN_S 1
+#define AR_BTCOEX_CTRL_MCI_MODE_EN 0x00000004
+#define AR_BTCOEX_CTRL_MCI_MODE_EN_S 2
+#define AR_BTCOEX_CTRL_LNA_SHARED 0x00000008
+#define AR_BTCOEX_CTRL_LNA_SHARED_S 3
+#define AR_BTCOEX_CTRL_PA_SHARED 0x00000010
+#define AR_BTCOEX_CTRL_PA_SHARED_S 4
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN 0x00000020
+#define AR_BTCOEX_CTRL_ONE_STEP_LOOK_AHEAD_EN_S 5
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN 0x00000040
+#define AR_BTCOEX_CTRL_TIME_TO_NEXT_BT_THRESH_EN_S 6
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS 0x00000180
+#define AR_BTCOEX_CTRL_NUM_ANTENNAS_S 7
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK 0x00000E00
+#define AR_BTCOEX_CTRL_RX_CHAIN_MASK_S 9
+#define AR_BTCOEX_CTRL_AGGR_THRESH 0x00007000
+#define AR_BTCOEX_CTRL_AGGR_THRESH_S 12
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN 0x00080000
+#define AR_BTCOEX_CTRL_1_CHAIN_BCN_S 19
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK 0x00100000
+#define AR_BTCOEX_CTRL_1_CHAIN_ACK_S 20
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN 0x1FE00000
+#define AR_BTCOEX_CTRL_WAIT_BA_MARGIN_S 28
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR 0x20000000
+#define AR_BTCOEX_CTRL_REDUCE_TXPWR_S 29
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10 0x40000000
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_10_S 30
+#define AR_BTCOEX_CTRL_SPDT_POLARITY 0x80000000
+#define AR_BTCOEX_CTRL_SPDT_POLARITY_S 31
+
+#define AR_BTCOEX_WL_WEIGHTS0 0x18b0
+#define AR_BTCOEX_WL_WEIGHTS1 0x18b4
+#define AR_BTCOEX_WL_WEIGHTS2 0x18b8
+#define AR_BTCOEX_WL_WEIGHTS3 0x18bc
+#define AR_BTCOEX_MAX_TXPWR(_x) (0x18c0 + ((_x) << 2))
+#define AR_BTCOEX_WL_LNA 0x1940
+#define AR_BTCOEX_RFGAIN_CTRL 0x1944
+
+#define AR_BTCOEX_CTRL2 0x1948
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH 0x0007F800
+#define AR_BTCOEX_CTRL2_TXPWR_THRESH_S 11
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK 0x00380000
+#define AR_BTCOEX_CTRL2_TX_CHAIN_MASK_S 19
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT 0x00400000
+#define AR_BTCOEX_CTRL2_RX_DEWEIGHT_S 22
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL 0x00800000
+#define AR_BTCOEX_CTRL2_GPIO_OBS_SEL_S 23
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL 0x01000000
+#define AR_BTCOEX_CTRL2_MAC_BB_OBS_SEL_S 24
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE 0x02000000
+#define AR_BTCOEX_CTRL2_DESC_BASED_TXPWR_ENABLE_S 25
+
+#define AR_BTCOEX_CTRL_SPDT_ENABLE 0x00000001
+#define AR_BTCOEX_CTRL_SPDT_ENABLE_S 0
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL 0x00000002
+#define AR_BTCOEX_CTRL_BT_OWN_SPDT_CTRL_S 1
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT 0x00000004
+#define AR_BTCOEX_CTRL_USE_LATCHED_BT_ANT_S 2
+#define AR_GLB_WLAN_UART_INTF_EN 0x00020000
+#define AR_GLB_WLAN_UART_INTF_EN_S 17
+#define AR_GLB_DS_JTAG_DISABLE 0x00040000
+#define AR_GLB_DS_JTAG_DISABLE_S 18
+
+#define AR_BTCOEX_RC 0x194c
+#define AR_BTCOEX_MAX_RFGAIN(_x) (0x1950 + ((_x) << 2))
+#define AR_BTCOEX_DBG 0x1a50
+#define AR_MCI_LAST_HW_MSG_HDR 0x1a54
+#define AR_MCI_LAST_HW_MSG_BDY 0x1a58
+
+#define AR_MCI_SCHD_TABLE_2 0x1a5c
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED 0x00000001
+#define AR_MCI_SCHD_TABLE_2_MEM_BASED_S 0
+#define AR_MCI_SCHD_TABLE_2_HW_BASED 0x00000002
+#define AR_MCI_SCHD_TABLE_2_HW_BASED_S 1
+
+#define AR_BTCOEX_CTRL3 0x1a60
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT 0x00000fff
+#define AR_BTCOEX_CTRL3_CONT_INFO_TIMEOUT_S 0
+
#endif
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 35422fc..65c8894 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -187,7 +187,7 @@ void ath9k_fatal_work(struct work_struct *work)
fatal_work);
struct ath_common *common = ath9k_hw_common(priv->ah);
- ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n");
+ ath_dbg(common, FATAL, "FATAL Event received, resetting device\n");
ath9k_htc_reset(priv);
}
@@ -330,8 +330,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
if (!time_left) {
- ath_dbg(common, ATH_DBG_WMI,
- "Timeout waiting for WMI command: %s\n",
+ ath_dbg(common, WMI, "Timeout waiting for WMI command: %s\n",
wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
return -ETIMEDOUT;
@@ -342,8 +341,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
return 0;
out:
- ath_dbg(common, ATH_DBG_WMI,
- "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
+ ath_dbg(common, WMI, "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
mutex_unlock(&wmi->op_mutex);
kfree_skb(skb);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 03b0a65..3182408 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -53,7 +53,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq);
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar);
+ struct ath_tx_status *ts, int txok);
static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *head, bool internal);
static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
@@ -104,6 +104,32 @@ static int ath_max_4ms_framelen[4][32] = {
/* Aggregation logic */
/*********************/
+static void ath_txq_lock(struct ath_softc *sc, struct ath_txq *txq)
+ __acquires(&txq->axq_lock)
+{
+ spin_lock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ spin_unlock_bh(&txq->axq_lock);
+}
+
+static void ath_txq_unlock_complete(struct ath_softc *sc, struct ath_txq *txq)
+ __releases(&txq->axq_lock)
+{
+ struct sk_buff_head q;
+ struct sk_buff *skb;
+
+ __skb_queue_head_init(&q);
+ skb_queue_splice_init(&txq->complete_q, &q);
+ spin_unlock_bh(&txq->axq_lock);
+
+ while ((skb = __skb_dequeue(&q)))
+ ieee80211_tx_status(sc->hw, skb);
+}
+
static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
{
struct ath_atx_ac *ac = tid->ac;
@@ -130,7 +156,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
WARN_ON(!tid->paused);
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
tid->paused = false;
if (skb_queue_empty(&tid->buf_q))
@@ -139,7 +165,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
ath_tx_queue_tid(txq, tid);
ath_txq_schedule(sc, txq);
unlock:
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
@@ -150,6 +176,12 @@ static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
}
+static void ath_send_bar(struct ath_atx_tid *tid, u16 seqno)
+{
+ ieee80211_send_bar(tid->an->vif, tid->an->sta->addr, tid->tidno,
+ seqno << IEEE80211_SEQ_SEQ_SHIFT);
+}
+
static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
{
struct ath_txq *txq = tid->ac->txq;
@@ -158,28 +190,36 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
struct list_head bf_head;
struct ath_tx_status ts;
struct ath_frame_info *fi;
+ bool sendbar = false;
INIT_LIST_HEAD(&bf_head);
memset(&ts, 0, sizeof(ts));
- spin_lock_bh(&txq->axq_lock);
while ((skb = __skb_dequeue(&tid->buf_q))) {
fi = get_frame_info(skb);
bf = fi->bf;
- spin_unlock_bh(&txq->axq_lock);
if (bf && fi->retries) {
list_add_tail(&bf->list, &bf_head);
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 1);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ sendbar = true;
} else {
ath_tx_send_normal(sc, txq, NULL, skb);
}
- spin_lock_bh(&txq->axq_lock);
}
- spin_unlock_bh(&txq->axq_lock);
+ if (tid->baw_head == tid->baw_tail) {
+ tid->state &= ~AGGR_ADDBA_COMPLETE;
+ tid->state &= ~AGGR_CLEANUP;
+ }
+
+ if (sendbar) {
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, tid->seq_start);
+ ath_txq_lock(sc, txq);
+ }
}
static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
@@ -195,6 +235,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
while (tid->baw_head != tid->baw_tail && !test_bit(tid->baw_head, tid->tx_buf)) {
INCR(tid->seq_start, IEEE80211_SEQ_MAX);
INCR(tid->baw_head, ATH_TID_MAX_BUFS);
+ if (tid->bar_index >= 0)
+ tid->bar_index--;
}
}
@@ -238,9 +280,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
bf = fi->bf;
if (!bf) {
- spin_unlock(&txq->axq_lock);
ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
- spin_lock(&txq->axq_lock);
continue;
}
@@ -249,24 +289,26 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
if (fi->retries)
ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
- spin_unlock(&txq->axq_lock);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
tid->seq_next = tid->seq_start;
tid->baw_tail = tid->baw_head;
+ tid->bar_index = -1;
}
static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
- struct sk_buff *skb)
+ struct sk_buff *skb, int count)
{
struct ath_frame_info *fi = get_frame_info(skb);
struct ath_buf *bf = fi->bf;
struct ieee80211_hdr *hdr;
+ int prev = fi->retries;
TX_STAT_INC(txq->axq_qnum, a_retries);
- if (fi->retries++ > 0)
+ fi->retries += count;
+
+ if (prev > 0)
return;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -365,7 +407,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
struct list_head bf_head;
struct sk_buff_head bf_pending;
- u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
+ u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0, seq_first;
u32 ba[WME_BA_BMP_SIZE >> 5];
int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
bool rc_update = true;
@@ -374,6 +416,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
int nframes;
u8 tidno;
bool flush = !!(ts->ts_status & ATH9K_TX_FLUSH);
+ int i, retries;
+ int bar_index = -1;
skb = bf->bf_mpdu;
hdr = (struct ieee80211_hdr *)skb->data;
@@ -382,6 +426,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
memcpy(rates, tx_info->control.rates, sizeof(rates));
+ retries = ts->ts_longretry + 1;
+ for (i = 0; i < ts->ts_rateindex; i++)
+ retries += rates[i].count;
+
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
@@ -395,8 +443,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (!bf->bf_stale || bf_next != NULL)
list_move_tail(&bf->list, &bf_head);
- ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- 0, 0);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 0);
bf = bf_next;
}
@@ -406,6 +453,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
an = (struct ath_node *)sta->drv_priv;
tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
tid = ATH_AN_2_TID(an, tidno);
+ seq_first = tid->seq_start;
/*
* The hardware occasionally sends a tx status for the wrong TID.
@@ -455,25 +503,25 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
} else if (!isaggr && txok) {
/* transmit completion */
acked_cnt++;
+ } else if ((tid->state & AGGR_CLEANUP) || !retry) {
+ /*
+ * cleanup in progress, just fail
+ * the un-acked sub-frames
+ */
+ txfail = 1;
+ } else if (flush) {
+ txpending = 1;
+ } else if (fi->retries < ATH_MAX_SW_RETRIES) {
+ if (txok || !an->sleeping)
+ ath_tx_set_retry(sc, txq, bf->bf_mpdu,
+ retries);
+
+ txpending = 1;
} else {
- if ((tid->state & AGGR_CLEANUP) || !retry) {
- /*
- * cleanup in progress, just fail
- * the un-acked sub-frames
- */
- txfail = 1;
- } else if (flush) {
- txpending = 1;
- } else if (fi->retries < ATH_MAX_SW_RETRIES) {
- if (txok || !an->sleeping)
- ath_tx_set_retry(sc, txq, bf->bf_mpdu);
-
- txpending = 1;
- } else {
- txfail = 1;
- sendbar = 1;
- txfail_cnt++;
- }
+ txfail = 1;
+ txfail_cnt++;
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
}
/*
@@ -490,9 +538,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
* complete the acked-ones/xretried ones; update
* block-ack window
*/
- spin_lock_bh(&txq->axq_lock);
ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -501,33 +547,30 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
}
ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
- !txfail, sendbar);
+ !txfail);
} else {
/* retry the un-acked ones */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)) {
- if (bf->bf_next == NULL && bf_last->bf_stale) {
- struct ath_buf *tbf;
-
- tbf = ath_clone_txbuf(sc, bf_last);
- /*
- * Update tx baw and complete the
- * frame with failed status if we
- * run out of tx buf.
- */
- if (!tbf) {
- spin_lock_bh(&txq->axq_lock);
- ath_tx_update_baw(sc, tid, seqno);
- spin_unlock_bh(&txq->axq_lock);
-
- ath_tx_complete_buf(sc, bf, txq,
- &bf_head,
- ts, 0,
- !flush);
- break;
- }
-
- fi->bf = tbf;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
+ bf->bf_next == NULL && bf_last->bf_stale) {
+ struct ath_buf *tbf;
+
+ tbf = ath_clone_txbuf(sc, bf_last);
+ /*
+ * Update tx baw and complete the
+ * frame with failed status if we
+ * run out of tx buf.
+ */
+ if (!tbf) {
+ ath_tx_update_baw(sc, tid, seqno);
+
+ ath_tx_complete_buf(sc, bf, txq,
+ &bf_head, ts, 0);
+ bar_index = max_t(int, bar_index,
+ ATH_BA_INDEX(seq_first, seqno));
+ break;
}
+
+ fi->bf = tbf;
}
/*
@@ -545,7 +588,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (an->sleeping)
ieee80211_sta_set_buffered(sta, tid->tidno, true);
- spin_lock_bh(&txq->axq_lock);
skb_queue_splice(&bf_pending, &tid->buf_q);
if (!an->sleeping) {
ath_tx_queue_tid(txq, tid);
@@ -553,18 +595,22 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
if (ts->ts_status & ATH9K_TXERR_FILT)
tid->ac->clear_ps_filter = true;
}
- spin_unlock_bh(&txq->axq_lock);
}
- if (tid->state & AGGR_CLEANUP) {
- ath_tx_flush_tid(sc, tid);
+ if (bar_index >= 0) {
+ u16 bar_seq = ATH_BA_INDEX2SEQ(seq_first, bar_index);
- if (tid->baw_head == tid->baw_tail) {
- tid->state &= ~AGGR_ADDBA_COMPLETE;
- tid->state &= ~AGGR_CLEANUP;
- }
+ if (BAW_WITHIN(tid->seq_start, tid->baw_size, bar_seq))
+ tid->bar_index = ATH_BA_INDEX(tid->seq_start, bar_seq);
+
+ ath_txq_unlock(sc, txq);
+ ath_send_bar(tid, ATH_BA_INDEX2SEQ(seq_first, bar_index + 1));
+ ath_txq_lock(sc, txq);
}
+ if (tid->state & AGGR_CLEANUP)
+ ath_tx_flush_tid(sc, tid);
+
rcu_read_unlock();
if (needreset) {
@@ -601,6 +647,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
struct sk_buff *skb;
struct ieee80211_tx_info *tx_info;
struct ieee80211_tx_rate *rates;
+ struct ath_mci_profile *mci = &sc->btcoex.mci;
u32 max_4ms_framelen, frmlen;
u16 aggr_limit, legacy = 0;
int i;
@@ -617,24 +664,26 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
for (i = 0; i < 4; i++) {
- if (rates[i].count) {
- int modeidx;
- if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
- legacy = 1;
- break;
- }
-
- if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
- modeidx = MCS_HT40;
- else
- modeidx = MCS_HT20;
+ int modeidx;
- if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
- modeidx++;
+ if (!rates[i].count)
+ continue;
- frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
- max_4ms_framelen = min(max_4ms_framelen, frmlen);
+ if (!(rates[i].flags & IEEE80211_TX_RC_MCS)) {
+ legacy = 1;
+ break;
}
+
+ if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
+ modeidx = MCS_HT40;
+ else
+ modeidx = MCS_HT20;
+
+ if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
+ modeidx++;
+
+ frmlen = ath_max_4ms_framelen[modeidx][rates[i].idx];
+ max_4ms_framelen = min(max_4ms_framelen, frmlen);
}
/*
@@ -645,7 +694,9 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
if (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE || legacy)
return 0;
- if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
+ if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && mci->aggr_limit)
+ aggr_limit = (max_4ms_framelen * mci->aggr_limit) >> 4;
+ else if (sc->sc_flags & SC_OP_BT_PRIORITY_DETECTED)
aggr_limit = min((max_4ms_framelen * 3) / 8,
(u32)ATH_AMPDU_LIMIT_MAX);
else
@@ -768,8 +819,6 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
bf->bf_state.bf_type = BUF_AMPDU | BUF_AGGR;
seqno = bf->bf_state.seqno;
- if (!bf_first)
- bf_first = bf;
/* do not step over block-ack window */
if (!BAW_WITHIN(tid->seq_start, tid->baw_size, seqno)) {
@@ -777,6 +826,21 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
break;
}
+ if (tid->bar_index > ATH_BA_INDEX(tid->seq_start, seqno)) {
+ struct ath_tx_status ts = {};
+ struct list_head bf_head;
+
+ INIT_LIST_HEAD(&bf_head);
+ list_add(&bf->list, &bf_head);
+ __skb_unlink(skb, &tid->buf_q);
+ ath_tx_update_baw(sc, tid, seqno);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
+ continue;
+ }
+
+ if (!bf_first)
+ bf_first = bf;
+
if (!rl) {
aggr_limit = ath_lookup_rate(sc, bf, tid);
rl = 1;
@@ -1119,6 +1183,7 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
txtid->state |= AGGR_ADDBA_PROGRESS;
txtid->paused = true;
*ssn = txtid->seq_start = txtid->seq_next;
+ txtid->bar_index = -1;
memset(txtid->tx_buf, 0, sizeof(txtid->tx_buf));
txtid->baw_head = txtid->baw_tail = 0;
@@ -1140,7 +1205,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
return;
}
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
txtid->paused = true;
/*
@@ -1153,9 +1218,9 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
txtid->state |= AGGR_CLEANUP;
else
txtid->state &= ~AGGR_ADDBA_COMPLETE;
- spin_unlock_bh(&txq->axq_lock);
ath_tx_flush_tid(sc, txtid);
+ ath_txq_unlock_complete(sc, txq);
}
void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -1176,7 +1241,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
buffered = !skb_queue_empty(&tid->buf_q);
@@ -1188,7 +1253,7 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
list_del(&ac->list);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
ieee80211_sta_set_buffered(sta, tidno, buffered);
}
@@ -1207,7 +1272,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
ac->clear_ps_filter = true;
if (!skb_queue_empty(&tid->buf_q) && !tid->paused) {
@@ -1215,7 +1280,7 @@ void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an)
ath_txq_schedule(sc, txq);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
}
@@ -1315,6 +1380,7 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
txq->axq_qnum = axq_qnum;
txq->mac80211_qnum = -1;
txq->axq_link = NULL;
+ __skb_queue_head_init(&txq->complete_q);
INIT_LIST_HEAD(&txq->axq_q);
INIT_LIST_HEAD(&txq->axq_acq);
spin_lock_init(&txq->axq_lock);
@@ -1397,8 +1463,6 @@ static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
struct list_head *list, bool retry_tx)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
struct ath_buf *bf, *lastbf;
struct list_head bf_head;
@@ -1425,13 +1489,11 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
if (bf_isampdu(bf))
ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
retry_tx);
else
- ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
- spin_lock_bh(&txq->axq_lock);
+ ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
}
}
@@ -1443,7 +1505,8 @@ static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
*/
void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
{
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
int idx = txq->txq_tailidx;
@@ -1464,7 +1527,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
if ((sc->sc_flags & SC_OP_TXAGGR) && !retry_tx)
ath_txq_drain_pending_buffers(sc, txq);
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
@@ -1558,11 +1621,9 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
break;
}
- if (!list_empty(&ac->tid_q)) {
- if (!ac->sched) {
- ac->sched = true;
- list_add_tail(&ac->list, &txq->axq_acq);
- }
+ if (!list_empty(&ac->tid_q) && !ac->sched) {
+ ac->sched = true;
+ list_add_tail(&ac->list, &txq->axq_acq);
}
if (ac == last_ac ||
@@ -1600,8 +1661,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
bf = list_first_entry(head, struct ath_buf, list);
bf_last = list_entry(head->prev, struct ath_buf, list);
- ath_dbg(common, ATH_DBG_QUEUE,
- "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
+ ath_dbg(common, QUEUE, "qnum: %d, txq depth: %d\n",
+ txq->axq_qnum, txq->axq_depth);
if (edma && list_empty(&txq->txq_fifo[txq->txq_headidx])) {
list_splice_tail_init(head, &txq->txq_fifo[txq->txq_headidx]);
@@ -1612,8 +1673,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
if (txq->axq_link) {
ath9k_hw_set_desc_link(ah, txq->axq_link, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT,
- "link[%u] (%p)=%llx (%p)\n",
+ ath_dbg(common, XMIT, "link[%u] (%p)=%llx (%p)\n",
txq->axq_qnum, txq->axq_link,
ito64(bf->bf_daddr), bf->bf_desc);
} else if (!edma)
@@ -1625,7 +1685,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
if (puttxbuf) {
TX_STAT_INC(txq->axq_qnum, puttxbuf);
ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
- ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
+ ath_dbg(common, XMIT, "TXDP[%u] = %llx (%p)\n",
txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
}
@@ -1705,10 +1765,6 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
list_add_tail(&bf->list, &bf_head);
bf->bf_state.bf_type = 0;
- /* update starting sequence number for subsequent ADDBA request */
- if (tid)
- INCR(tid->seq_start, IEEE80211_SEQ_MAX);
-
bf->bf_lastbf = bf;
ath_tx_fill_desc(sc, bf, txq, fi->framelen);
ath_tx_txqaddbuf(sc, txq, &bf_head, false);
@@ -1771,7 +1827,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
bf = ath_tx_get_buffer(sc);
if (!bf) {
- ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
+ ath_dbg(common, XMIT, "TX buffers are full\n");
goto error;
}
@@ -1816,7 +1872,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
struct ath_buf *bf;
u8 tidno;
- spin_lock_bh(&txctl->txq->axq_lock);
if ((sc->sc_flags & SC_OP_TXAGGR) && txctl->an &&
ieee80211_is_data_qos(hdr->frame_control)) {
tidno = ieee80211_get_qos_ctl(hdr)[0] &
@@ -1835,7 +1890,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
} else {
bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
if (!bf)
- goto out;
+ return;
bf->bf_state.bfs_paprd = txctl->paprd;
@@ -1844,9 +1899,6 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
ath_tx_send_normal(sc, txctl->txq, tid, skb);
}
-
-out:
- spin_unlock_bh(&txctl->txq->axq_lock);
}
/* Upon failure caller should free skb */
@@ -1907,15 +1959,18 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
*/
q = skb_get_queue_mapping(skb);
- spin_lock_bh(&txq->axq_lock);
+
+ ath_txq_lock(sc, txq);
if (txq == sc->tx.txq_map[q] &&
++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
ieee80211_stop_queue(sc->hw, q);
- txq->stopped = 1;
+ txq->stopped = true;
}
- spin_unlock_bh(&txq->axq_lock);
ath_tx_start_dma(sc, skb, txctl);
+
+ ath_txq_unlock(sc, txq);
+
return 0;
}
@@ -1926,16 +1981,12 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
int tx_flags, struct ath_txq *txq)
{
- struct ieee80211_hw *hw = sc->hw;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
int q, padpos, padsize;
- ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
-
- if (tx_flags & ATH_TX_BAR)
- tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+ ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb);
if (!(tx_flags & ATH_TX_ERROR))
/* Frame was ACKed */
@@ -1952,9 +2003,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ if ((sc->ps_flags & PS_WAIT_FOR_TX_ACK) && !txq->axq_depth) {
sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
- ath_dbg(common, ATH_DBG_PS,
+ ath_dbg(common, PS,
"Going back to sleep after having received TX status (0x%lx)\n",
sc->ps_flags & (PS_WAIT_FOR_BEACON |
PS_WAIT_FOR_CAB |
@@ -1964,32 +2015,27 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
q = skb_get_queue_mapping(skb);
if (txq == sc->tx.txq_map[q]) {
- spin_lock_bh(&txq->axq_lock);
if (WARN_ON(--txq->pending_frames < 0))
txq->pending_frames = 0;
if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
ieee80211_wake_queue(sc->hw, q);
- txq->stopped = 0;
+ txq->stopped = false;
}
- spin_unlock_bh(&txq->axq_lock);
}
- ieee80211_tx_status(hw, skb);
+ __skb_queue_tail(&txq->complete_q, skb);
}
static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
struct ath_txq *txq, struct list_head *bf_q,
- struct ath_tx_status *ts, int txok, int sendbar)
+ struct ath_tx_status *ts, int txok)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
unsigned long flags;
int tx_flags = 0;
- if (sendbar)
- tx_flags = ATH_TX_BAR;
-
if (!txok)
tx_flags |= ATH_TX_ERROR;
@@ -2081,8 +2127,6 @@ static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
struct ath_tx_status *ts, struct ath_buf *bf,
struct list_head *bf_head)
- __releases(txq->axq_lock)
- __acquires(txq->axq_lock)
{
int txok;
@@ -2092,16 +2136,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
if (bf_is_ampdu_not_probing(bf))
txq->axq_ampdu_depth--;
- spin_unlock_bh(&txq->axq_lock);
-
if (!bf_isampdu(bf)) {
ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
- ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok, 0);
+ ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
} else
ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
- spin_lock_bh(&txq->axq_lock);
-
if (sc->sc_flags & SC_OP_TXAGGR)
ath_txq_schedule(sc, txq);
}
@@ -2116,11 +2156,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
struct ath_tx_status ts;
int status;
- ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
+ ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n",
txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
txq->axq_link);
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
for (;;) {
if (work_pending(&sc->hw_reset_work))
break;
@@ -2179,7 +2219,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
static void ath_tx_complete_poll_work(struct work_struct *work)
@@ -2196,21 +2236,21 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
if (ATH_TXQ_SETUP(sc, i)) {
txq = &sc->tx.txq[i];
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (txq->axq_depth) {
if (txq->axq_tx_inprogress) {
needreset = true;
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
break;
} else {
txq->axq_tx_inprogress = true;
}
}
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
if (needreset) {
- ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
+ ath_dbg(ath9k_hw_common(sc->sc_ah), RESET,
"tx hung, resetting the chip\n");
RESET_STAT_INC(sc, RESET_TYPE_TX_HANG);
ieee80211_queue_work(sc->hw, &sc->hw_reset_work);
@@ -2253,8 +2293,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
if (status == -EINPROGRESS)
break;
if (status == -EIO) {
- ath_dbg(common, ATH_DBG_XMIT,
- "Error processing tx status\n");
+ ath_dbg(common, XMIT, "Error processing tx status\n");
break;
}
@@ -2264,10 +2303,10 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
txq = &sc->tx.txq[ts.qid];
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (list_empty(&txq->txq_fifo[txq->txq_tailidx])) {
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
return;
}
@@ -2293,7 +2332,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
}
ath_tx_process_buffer(sc, txq, &ts, bf, &bf_head);
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock_complete(sc, txq);
}
}
@@ -2431,7 +2470,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
ac = tid->ac;
txq = ac->txq;
- spin_lock_bh(&txq->axq_lock);
+ ath_txq_lock(sc, txq);
if (tid->sched) {
list_del(&tid->list);
@@ -2447,6 +2486,6 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
tid->state &= ~AGGR_ADDBA_COMPLETE;
tid->state &= ~AGGR_CLEANUP;
- spin_unlock_bh(&txq->axq_lock);
+ ath_txq_unlock(sc, txq);
}
}
diff --git a/drivers/net/wireless/ath/carl9170/debug.c b/drivers/net/wireless/ath/carl9170/debug.c
index de57f90..3c16422 100644
--- a/drivers/net/wireless/ath/carl9170/debug.c
+++ b/drivers/net/wireless/ath/carl9170/debug.c
@@ -56,7 +56,7 @@ static int carl9170_debugfs_open(struct inode *inode, struct file *file)
struct carl9170_debugfs_fops {
unsigned int read_bufsize;
- mode_t attr;
+ umode_t attr;
char *(*read)(struct ar9170 *ar, char *buf, size_t bufsize,
ssize_t *len);
ssize_t (*write)(struct ar9170 *aru, const char *buf, size_t size);
diff --git a/drivers/net/wireless/ath/carl9170/fw.c b/drivers/net/wireless/ath/carl9170/fw.c
index cba9d04..3de61ad 100644
--- a/drivers/net/wireless/ath/carl9170/fw.c
+++ b/drivers/net/wireless/ath/carl9170/fw.c
@@ -146,13 +146,15 @@ static bool valid_cpu_addr(const u32 address)
return false;
}
-static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+static int carl9170_fw_checksum(struct ar9170 *ar, const __u8 *data,
+ size_t len)
{
const struct carl9170fw_otus_desc *otus_desc;
- const struct carl9170fw_chk_desc *chk_desc;
const struct carl9170fw_last_desc *last_desc;
- const struct carl9170fw_txsq_desc *txsq_desc;
- u16 if_comb_types;
+ const struct carl9170fw_chk_desc *chk_desc;
+ unsigned long fin, diff;
+ unsigned int dsc_len;
+ u32 crc32;
last_desc = carl9170_fw_find_desc(ar, LAST_MAGIC,
sizeof(*last_desc), CARL9170FW_LAST_DESC_CUR_VER);
@@ -170,36 +172,68 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
chk_desc = carl9170_fw_find_desc(ar, CHK_MAGIC,
sizeof(*chk_desc), CARL9170FW_CHK_DESC_CUR_VER);
- if (chk_desc) {
- unsigned long fin, diff;
- unsigned int dsc_len;
- u32 crc32;
+ if (!chk_desc) {
+ dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+ return 0;
+ }
- dsc_len = min_t(unsigned int, len,
+ dsc_len = min_t(unsigned int, len,
(unsigned long)chk_desc - (unsigned long)otus_desc);
- fin = (unsigned long) last_desc + sizeof(*last_desc);
- diff = fin - (unsigned long) otus_desc;
+ fin = (unsigned long) last_desc + sizeof(*last_desc);
+ diff = fin - (unsigned long) otus_desc;
- if (diff < len)
- len -= diff;
+ if (diff < len)
+ len -= diff;
- if (len < 256)
- return -EIO;
+ if (len < 256)
+ return -EIO;
- crc32 = crc32_le(~0, data, len);
- if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
- dev_err(&ar->udev->dev, "fw checksum test failed.\n");
- return -ENOEXEC;
- }
+ crc32 = crc32_le(~0, data, len);
+ if (cpu_to_le32(crc32) != chk_desc->fw_crc32) {
+ dev_err(&ar->udev->dev, "fw checksum test failed.\n");
+ return -ENOEXEC;
+ }
+
+ crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
+ if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
+ dev_err(&ar->udev->dev, "descriptor check failed.\n");
+ return -EINVAL;
+ }
+ return 0;
+}
- crc32 = crc32_le(crc32, (void *)otus_desc, dsc_len);
- if (cpu_to_le32(crc32) != chk_desc->hdr_crc32) {
- dev_err(&ar->udev->dev, "descriptor check failed.\n");
+static int carl9170_fw_tx_sequence(struct ar9170 *ar)
+{
+ const struct carl9170fw_txsq_desc *txsq_desc;
+
+ txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC, sizeof(*txsq_desc),
+ CARL9170FW_TXSQ_DESC_CUR_VER);
+ if (txsq_desc) {
+ ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
+ if (!valid_cpu_addr(ar->fw.tx_seq_table))
return -EINVAL;
- }
} else {
- dev_warn(&ar->udev->dev, "Unprotected firmware image.\n");
+ ar->fw.tx_seq_table = 0;
+ }
+
+ return 0;
+}
+
+static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
+{
+ const struct carl9170fw_otus_desc *otus_desc;
+ int err;
+ u16 if_comb_types;
+
+ err = carl9170_fw_checksum(ar, data, len);
+ if (err)
+ return err;
+
+ otus_desc = carl9170_fw_find_desc(ar, OTUS_MAGIC,
+ sizeof(*otus_desc), CARL9170FW_OTUS_DESC_CUR_VER);
+ if (!otus_desc) {
+ return -ENODATA;
}
#define SUPP(feat) \
@@ -321,19 +355,8 @@ static int carl9170_fw(struct ar9170 *ar, const __u8 *data, size_t len)
ar->hw->wiphy->interface_modes |= if_comb_types;
- txsq_desc = carl9170_fw_find_desc(ar, TXSQ_MAGIC,
- sizeof(*txsq_desc), CARL9170FW_TXSQ_DESC_CUR_VER);
-
- if (txsq_desc) {
- ar->fw.tx_seq_table = le32_to_cpu(txsq_desc->seq_table_addr);
- if (!valid_cpu_addr(ar->fw.tx_seq_table))
- return -EINVAL;
- } else {
- ar->fw.tx_seq_table = 0;
- }
-
#undef SUPPORTED
- return 0;
+ return carl9170_fw_tx_sequence(ar);
}
static struct carl9170fw_desc_head *
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index f06e069..db77421 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -48,7 +48,7 @@
#include "carl9170.h"
#include "cmd.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware crypto offload.");
@@ -446,7 +446,7 @@ static void carl9170_op_stop(struct ieee80211_hw *hw)
mutex_lock(&ar->mutex);
if (IS_ACCEPTING_CMD(ar)) {
- rcu_assign_pointer(ar->beacon_iter, NULL);
+ RCU_INIT_POINTER(ar->beacon_iter, NULL);
carl9170_led_set_state(ar, 0);
@@ -678,7 +678,7 @@ unlock:
vif_priv->active = false;
bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
ar->vifs--;
- rcu_assign_pointer(ar->vif_priv[vif_id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[vif_id].vif, NULL);
list_del_rcu(&vif_priv->list);
mutex_unlock(&ar->mutex);
synchronize_rcu();
@@ -716,7 +716,7 @@ static void carl9170_op_remove_interface(struct ieee80211_hw *hw,
WARN_ON(vif_priv->enable_beacon);
vif_priv->enable_beacon = false;
list_del_rcu(&vif_priv->list);
- rcu_assign_pointer(ar->vif_priv[id].vif, NULL);
+ RCU_INIT_POINTER(ar->vif_priv[id].vif, NULL);
if (vif == main_vif) {
rcu_read_unlock();
@@ -1258,7 +1258,7 @@ static int carl9170_op_sta_add(struct ieee80211_hw *hw,
}
for (i = 0; i < CARL9170_NUM_TID; i++)
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
sta_info->ht_sta = true;
@@ -1285,7 +1285,7 @@ static int carl9170_op_sta_remove(struct ieee80211_hw *hw,
struct carl9170_sta_tid *tid_info;
tid_info = rcu_dereference(sta_info->agg[i]);
- rcu_assign_pointer(sta_info->agg[i], NULL);
+ RCU_INIT_POINTER(sta_info->agg[i], NULL);
if (!tid_info)
continue;
@@ -1398,7 +1398,7 @@ static int carl9170_op_ampdu_action(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->tx_ampdu_list_lock);
}
- rcu_assign_pointer(sta_info->agg[tid], NULL);
+ RCU_INIT_POINTER(sta_info->agg[tid], NULL);
rcu_read_unlock();
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 59472e1..bbc813d 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -314,7 +314,7 @@ static void carl9170_tx_release(struct kref *ref)
* feedback either [CTL_REQ_TX_STATUS not set]
*/
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
return;
} else {
/*
@@ -1234,6 +1234,7 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
{
struct ieee80211_sta *sta;
struct carl9170_sta_info *sta_info;
+ struct ieee80211_tx_info *tx_info;
rcu_read_lock();
sta = __carl9170_get_tx_sta(ar, skb);
@@ -1241,16 +1242,18 @@ static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
goto out_rcu;
sta_info = (void *) sta->drv_priv;
- if (unlikely(sta_info->sleeping)) {
- struct ieee80211_tx_info *tx_info;
+ tx_info = IEEE80211_SKB_CB(skb);
+ if (unlikely(sta_info->sleeping) &&
+ !(tx_info->flags & (IEEE80211_TX_CTL_POLL_RESPONSE |
+ IEEE80211_TX_CTL_CLEAR_PS_FILT))) {
rcu_read_unlock();
- tx_info = IEEE80211_SKB_CB(skb);
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_upload);
tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
+ carl9170_release_dev_space(ar, skb);
carl9170_tx_status(ar, skb, false);
return true;
}
@@ -1432,7 +1435,7 @@ void carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
err_free:
ar->tx_dropped++;
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(ar->hw, skb);
}
void carl9170_tx_scheduler(struct ar9170 *ar)
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 333b69e..89821e4 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -1161,15 +1161,4 @@ static struct usb_driver carl9170_driver = {
#endif /* CONFIG_PM */
};
-static int __init carl9170_usb_init(void)
-{
- return usb_register(&carl9170_driver);
-}
-
-static void __exit carl9170_usb_exit(void)
-{
- usb_deregister(&carl9170_driver);
-}
-
-module_init(carl9170_usb_init);
-module_exit(carl9170_usb_exit);
+module_usb_driver(carl9170_driver);
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index 4cf7c5e..0e81904 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -143,7 +143,7 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
break;
case ATH_CIPHER_AES_CCM:
if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"AES-CCM not supported by this mac rev\n");
return false;
}
@@ -152,15 +152,15 @@ static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
case ATH_CIPHER_TKIP:
keyType = AR_KEYTABLE_TYPE_TKIP;
if (entry + 64 >= common->keymax) {
- ath_dbg(common, ATH_DBG_ANY,
+ ath_dbg(common, ANY,
"entry %u inappropriate for TKIP\n", entry);
return false;
}
break;
case ATH_CIPHER_WEP:
if (k->kv_len < WLAN_KEY_LEN_WEP40) {
- ath_dbg(common, ATH_DBG_ANY,
- "WEP key length %u too small\n", k->kv_len);
+ ath_dbg(common, ANY, "WEP key length %u too small\n",
+ k->kv_len);
return false;
}
if (k->kv_len <= WLAN_KEY_LEN_WEP40)
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 65ecb5b..10dea37 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -21,6 +21,8 @@
#include "regd.h"
#include "regd_common.h"
+static int __ath_regd_init(struct ath_regulatory *reg);
+
/*
* This is a set of common rules used by our world regulatory domains.
* We have 12 world regulatory domains. To save space we consolidate
@@ -347,10 +349,26 @@ static void ath_reg_apply_world_flags(struct wiphy *wiphy,
}
}
+static u16 ath_regd_find_country_by_name(char *alpha2)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
+ if (!memcmp(allCountries[i].isoName, alpha2, 2))
+ return allCountries[i].countryCode;
+ }
+
+ return -1;
+}
+
int ath_reg_notifier_apply(struct wiphy *wiphy,
struct regulatory_request *request,
struct ath_regulatory *reg)
{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ u16 country_code;
+
/* We always apply this */
ath_reg_apply_radar_flags(wiphy);
@@ -363,14 +381,37 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
return 0;
switch (request->initiator) {
- case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_CORE:
+ /*
+ * If common->reg_world_copy is world roaming it means we *were*
+ * world roaming... so we now have to restore that data.
+ */
+ if (!ath_is_world_regd(&common->reg_world_copy))
+ break;
+
+ memcpy(reg, &common->reg_world_copy,
+ sizeof(struct ath_regulatory));
+ break;
+ case NL80211_REGDOM_SET_BY_DRIVER:
case NL80211_REGDOM_SET_BY_USER:
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
- if (ath_is_world_regd(reg))
- ath_reg_apply_world_flags(wiphy, request->initiator,
- reg);
+ if (!ath_is_world_regd(reg))
+ break;
+
+ country_code = ath_regd_find_country_by_name(request->alpha2);
+ if (country_code == (u16) -1)
+ break;
+
+ reg->current_rd = COUNTRY_ERD_FLAG;
+ reg->current_rd |= country_code;
+
+ printk(KERN_DEBUG "ath: regdomain 0x%0x updated by CountryIE\n",
+ reg->current_rd);
+ __ath_regd_init(reg);
+
+ ath_reg_apply_world_flags(wiphy, request->initiator, reg);
+
break;
}
@@ -508,11 +549,7 @@ static void ath_regd_sanitize(struct ath_regulatory *reg)
reg->current_rd = 0x64;
}
-int
-ath_regd_init(struct ath_regulatory *reg,
- struct wiphy *wiphy,
- int (*reg_notifier)(struct wiphy *wiphy,
- struct regulatory_request *request))
+static int __ath_regd_init(struct ath_regulatory *reg)
{
struct country_code_to_enum_rd *country = NULL;
u16 regdmn;
@@ -583,7 +620,29 @@ ath_regd_init(struct ath_regulatory *reg,
printk(KERN_DEBUG "ath: Regpair used: 0x%0x\n",
reg->regpair->regDmnEnum);
+ return 0;
+}
+
+int
+ath_regd_init(struct ath_regulatory *reg,
+ struct wiphy *wiphy,
+ int (*reg_notifier)(struct wiphy *wiphy,
+ struct regulatory_request *request))
+{
+ struct ath_common *common = container_of(reg, struct ath_common,
+ regulatory);
+ int r;
+
+ r = __ath_regd_init(reg);
+ if (r)
+ return r;
+
+ if (ath_is_world_regd(reg))
+ memcpy(&common->reg_world_copy, reg,
+ sizeof(struct ath_regulatory));
+
ath_regd_init_wiphy(reg, wiphy, reg_notifier);
+
return 0;
}
EXPORT_SYMBOL(ath_regd_init);
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index b97a40e..3876c7e 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -31,6 +31,12 @@ config B43_BCMA
depends on B43 && BCMA
default y
+config B43_BCMA_EXTRA
+ bool "Hardware support that overlaps with the brcmsmac driver"
+ depends on B43_BCMA
+ default n if BRCMSMAC || BRCMSMAC_MODULE
+ default y
+
config B43_SSB
bool
depends on B43 && SSB
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 37110df..16e8f80 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -191,6 +191,9 @@
#define B43_BFH_BUCKBOOST 0x0020 /* has buck/booster */
#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna
* with bluetooth */
+#define B43_BFH_NOCBUCK 0x0080
+#define B43_BFH_PALDO 0x0200
+#define B43_BFH_EXTLNA_5GHZ 0x1000 /* has an external LNA (5GHz mode) */
/* SPROM boardflags2_lo values */
#define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
@@ -204,6 +207,14 @@
#define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
#define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
#define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
+#define B43_BFL2_SINGLEANT_CCK 0x1000
+#define B43_BFL2_2G_SPUR_WAR 0x2000
+
+/* SPROM boardflags2_hi values */
+#define B43_BFH2_GPLL_WAR2 0x0001
+#define B43_BFH2_IPALVLSHIFT_3P3 0x0002
+#define B43_BFH2_INTERNDET_TXIQCAL 0x0004
+#define B43_BFH2_XTALBUFOUTEN 0x0008
/* GPIO register offset, in both ChipCommon and PCI core. */
#define B43_GPIO_CONTROL 0x6c
@@ -667,6 +678,7 @@ struct b43_key {
};
/* SHM offsets to the QOS data structures for the 4 different queues. */
+#define B43_QOS_QUEUE_NUM 4
#define B43_QOS_PARAMS(queue) (B43_SHM_SH_EDCFQ + \
(B43_NR_QOSPARAMS * sizeof(u16) * (queue)))
#define B43_QOS_BACKGROUND B43_QOS_PARAMS(0)
@@ -904,7 +916,7 @@ struct b43_wl {
struct work_struct beacon_update_trigger;
/* The current QOS parameters for the 4 queues. */
- struct b43_qos_params qos_params[4];
+ struct b43_qos_params qos_params[B43_QOS_QUEUE_NUM];
/* Work for adjustment of the transmission power.
* This is scheduled when we determine that the actual TX output
@@ -913,8 +925,12 @@ struct b43_wl {
/* Packet transmit work */
struct work_struct tx_work;
+
/* Queue of packets to be transmitted. */
- struct sk_buff_head tx_queue;
+ struct sk_buff_head tx_queue[B43_QOS_QUEUE_NUM];
+
+ /* Flag that implement the queues stopping. */
+ bool tx_queue_stopped[B43_QOS_QUEUE_NUM];
/* The device LEDs. */
struct b43_leds leds;
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 5e45604..b5f1b91 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -890,7 +890,7 @@ struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
else
ring->ops = &dma32_ops;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -1061,7 +1061,7 @@ void b43_dma_free(struct b43_wldev *dev)
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -1075,12 +1075,12 @@ static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43err(dev->wl, "The machine/kernel does not support "
@@ -1307,7 +1307,7 @@ static int dma_tx_fragment(struct b43_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
priv_info->bouncebuffer = NULL;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
@@ -1465,8 +1465,10 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
- ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- ring->stopped = 1;
+ unsigned int skb_mapping = skb_get_queue_mapping(skb);
+ ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ ring->stopped = true;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
}
@@ -1584,12 +1586,21 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
+ ring->stopped = false;
+ }
+
+ if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+ dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ } else {
+ /* If the driver queue is running wake the corresponding
+ * mac80211 queue. */
ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
- ring->stopped = 0;
if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
}
}
+ /* Add work to the queue. */
+ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
static void dma_rx(struct b43_dmaring *ring, int *slot)
diff --git a/drivers/net/wireless/b43/leds.c b/drivers/net/wireless/b43/leds.c
index a38c1c6..d79ab2a 100644
--- a/drivers/net/wireless/b43/leds.c
+++ b/drivers/net/wireless/b43/leds.c
@@ -74,7 +74,7 @@ static void b43_led_update(struct b43_wldev *dev,
if (radio_enabled)
turn_on = atomic_read(&led->state) != LED_OFF;
else
- turn_on = 0;
+ turn_on = false;
if (turn_on == led->hw_state)
return;
led->hw_state = turn_on;
@@ -225,11 +225,11 @@ static void b43_led_get_sprominfo(struct b43_wldev *dev,
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- *activelow = 0;
+ *activelow = false;
switch (led_index) {
case 0:
*behaviour = B43_LED_ACTIVITY;
- *activelow = 1;
+ *activelow = true;
if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
@@ -267,11 +267,11 @@ void b43_leds_init(struct b43_wldev *dev)
if (led->wl) {
if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
b43_led_turn_on(dev, led->index, led->activelow);
- led->hw_state = 1;
+ led->hw_state = true;
atomic_set(&led->state, 1);
} else {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
}
@@ -280,19 +280,19 @@ void b43_leds_init(struct b43_wldev *dev)
led = &dev->wl->leds.led_tx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_rx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_assoc;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
- led->hw_state = 0;
+ led->hw_state = false;
atomic_set(&led->state, 0);
}
diff --git a/drivers/net/wireless/b43/lo.c b/drivers/net/wireless/b43/lo.c
index 4c82d58..916123a 100644
--- a/drivers/net/wireless/b43/lo.c
+++ b/drivers/net/wireless/b43/lo.c
@@ -826,7 +826,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
const struct b43_rfatt *rfatt;
const struct b43_bbatt *bbatt;
u64 power_vector;
- bool table_changed = 0;
+ bool table_changed = false;
BUILD_BUG_ON(B43_DC_LT_SIZE != 32);
B43_WARN_ON(lo->rfatt_list.len * lo->bbatt_list.len > 64);
@@ -876,7 +876,7 @@ void b43_gphy_dc_lt_init(struct b43_wldev *dev, bool update_all)
lo->dc_lt[idx] = (lo->dc_lt[idx] & 0xFF00)
| (val & 0x00FF);
}
- table_changed = 1;
+ table_changed = true;
}
if (table_changed) {
/* The table changed in memory. Update the hardware table. */
@@ -938,7 +938,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
unsigned long now;
unsigned long expire;
struct b43_lo_calib *cal, *tmp;
- bool current_item_expired = 0;
+ bool current_item_expired = false;
bool hwpctl;
if (!lo)
@@ -968,7 +968,7 @@ void b43_lo_g_maintanance_work(struct b43_wldev *dev)
if (b43_compare_bbatt(&cal->bbatt, &gphy->bbatt) &&
b43_compare_rfatt(&cal->rfatt, &gphy->rfatt)) {
B43_WARN_ON(current_item_expired);
- current_item_expired = 1;
+ current_item_expired = true;
}
if (b43_debug(dev, B43_DBG_LO)) {
b43dbg(dev->wl, "LO: Item BB(%u), RF(%u,%u), "
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5634d9a..23ffb1b 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -116,8 +116,10 @@ MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
#ifdef CONFIG_B43_BCMA
static const struct bcma_device_id b43_bcma_tbl[] = {
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
+#ifdef CONFIG_B43_BCMA_EXTRA
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
+#endif
BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
BCMA_CORETABLE_END
};
@@ -1122,17 +1124,17 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
B43_WARN_ON((ps_flags & B43_PS_AWAKE) && (ps_flags & B43_PS_ASLEEP));
if (ps_flags & B43_PS_ENABLED) {
- hwps = 1;
+ hwps = true;
} else if (ps_flags & B43_PS_DISABLED) {
- hwps = 0;
+ hwps = false;
} else {
//TODO: If powersave is not off and FIXME is not set and we are not in adhoc
// and thus is not an AP and we are associated, set bit 25
}
if (ps_flags & B43_PS_AWAKE) {
- awake = 1;
+ awake = true;
} else if (ps_flags & B43_PS_ASLEEP) {
- awake = 0;
+ awake = false;
} else {
//TODO: If the device is awake or this is an AP, or we are scanning, or FIXME,
// or we are associated, or FIXME, or the latest PS-Poll packet sent was
@@ -1140,8 +1142,8 @@ void b43_power_saving_ctl_bits(struct b43_wldev *dev, unsigned int ps_flags)
}
/* FIXME: For now we force awake-on and hwps-off */
- hwps = 0;
- awake = 1;
+ hwps = false;
+ awake = true;
macctl = b43_read32(dev, B43_MMIO_MACCTL);
if (hwps)
@@ -1339,7 +1341,7 @@ static void b43_calculate_link_quality(struct b43_wldev *dev)
return;
if (dev->noisecalc.calculation_running)
return;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43_generate_noise_sample(dev);
@@ -1408,7 +1410,7 @@ static void handle_irq_noise(struct b43_wldev *dev)
average -= 48;
dev->stats.link_noise = average;
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -1424,7 +1426,7 @@ static void handle_irq_tbtt_indication(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
if (b43_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43_wldev *dev)
@@ -1433,7 +1435,7 @@ static void handle_irq_atim_end(struct b43_wldev *dev)
b43_write32(dev, B43_MMIO_MACCMD,
b43_read32(dev, B43_MMIO_MACCMD)
| B43_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -1539,7 +1541,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1588,7 +1590,7 @@ static void b43_write_beacon_template(struct b43_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt, u.beacon.variable);
@@ -1625,7 +1627,7 @@ static void b43_upload_beacon0(struct b43_wldev *dev)
if (wl->beacon0_uploaded)
return;
b43_write_beacon_template(dev, 0x68, 0x18);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43_upload_beacon1(struct b43_wldev *dev)
@@ -1635,7 +1637,7 @@ static void b43_upload_beacon1(struct b43_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43_wldev *dev)
@@ -1667,7 +1669,7 @@ static void handle_irq_beacon(struct b43_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43_upload_beacon0(dev);
b43_upload_beacon1(dev);
cmd = b43_read32(dev, B43_MMIO_MACCMD);
@@ -1755,8 +1757,8 @@ static void b43_update_templates(struct b43_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -1913,7 +1915,7 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
b43err(dev->wl, "This device does not support DMA "
"on your system. It will now be switched to PIO.\n");
/* Fall back to PIO transfers if we get fatal DMA errors! */
- dev->use_pio = 1;
+ dev->use_pio = true;
b43_controller_restart(dev, "DMA error");
return;
}
@@ -2240,12 +2242,12 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
filename = NULL;
else
goto err_no_pcm;
- fw->pcm_request_failed = 0;
+ fw->pcm_request_failed = false;
err = b43_do_request_fw(ctx, filename, &fw->pcm);
if (err == -ENOENT) {
/* We did not find a PCM file? Not fatal, but
* core rev <= 10 must do without hwcrypto then. */
- fw->pcm_request_failed = 1;
+ fw->pcm_request_failed = true;
} else if (err)
goto err_load;
@@ -2535,7 +2537,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
dev->qos_enabled = !!modparam_qos;
/* Default to firmware/hardware crypto acceleration. */
- dev->hwcrypto_enabled = 1;
+ dev->hwcrypto_enabled = true;
if (dev->fw.opensource) {
u16 fwcapa;
@@ -2549,7 +2551,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
if (!(fwcapa & B43_FWCAPA_HWCRYPTO) || dev->fw.pcm_request_failed) {
b43info(dev->wl, "Hardware crypto acceleration not supported by firmware\n");
/* Disable hardware crypto and fall back to software crypto. */
- dev->hwcrypto_enabled = 0;
+ dev->hwcrypto_enabled = false;
}
if (!(fwcapa & B43_FWCAPA_QOS)) {
b43info(dev->wl, "QoS not supported by firmware\n");
@@ -2557,7 +2559,7 @@ static int b43_upload_microcode(struct b43_wldev *dev)
* ieee80211_unregister to make sure the networking core can
* properly free possible resources. */
dev->wl->hw->queues = 1;
- dev->qos_enabled = 0;
+ dev->qos_enabled = false;
}
} else {
b43info(dev->wl, "Loading firmware version %u.%u "
@@ -3361,10 +3363,10 @@ static int b43_rng_init(struct b43_wl *wl)
wl->rng.name = wl->rng_name;
wl->rng.data_read = b43_rng_read;
wl->rng.priv = (unsigned long)wl;
- wl->rng_initialized = 1;
+ wl->rng_initialized = true;
err = hwrng_register(&wl->rng);
if (err) {
- wl->rng_initialized = 0;
+ wl->rng_initialized = false;
b43err(wl, "Failed to register the random "
"number generator (%d)\n", err);
}
@@ -3378,6 +3380,7 @@ static void b43_tx_work(struct work_struct *work)
struct b43_wl *wl = container_of(work, struct b43_wl, tx_work);
struct b43_wldev *dev;
struct sk_buff *skb;
+ int queue_num;
int err = 0;
mutex_lock(&wl->mutex);
@@ -3387,15 +3390,26 @@ static void b43_tx_work(struct work_struct *work)
return;
}
- while (skb_queue_len(&wl->tx_queue)) {
- skb = skb_dequeue(&wl->tx_queue);
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num])) {
+ skb = skb_dequeue(&wl->tx_queue[queue_num]);
+ if (b43_using_pio_transfers(dev))
+ err = b43_pio_tx(dev, skb);
+ else
+ err = b43_dma_tx(dev, skb);
+ if (err == -ENOSPC) {
+ wl->tx_queue_stopped[queue_num] = 1;
+ ieee80211_stop_queue(wl->hw, queue_num);
+ skb_queue_head(&wl->tx_queue[queue_num], skb);
+ break;
+ }
+ if (unlikely(err))
+ dev_kfree_skb(skb); /* Drop it */
+ err = 0;
+ }
- if (b43_using_pio_transfers(dev))
- err = b43_pio_tx(dev, skb);
- else
- err = b43_dma_tx(dev, skb);
- if (unlikely(err))
- dev_kfree_skb(skb); /* Drop it */
+ if (!err)
+ wl->tx_queue_stopped[queue_num] = 0;
}
#if B43_DEBUG
@@ -3416,8 +3430,12 @@ static void b43_op_tx(struct ieee80211_hw *hw,
}
B43_WARN_ON(skb_shinfo(skb)->nr_frags);
- skb_queue_tail(&wl->tx_queue, skb);
- ieee80211_queue_work(wl->hw, &wl->tx_work);
+ skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb->queue_mapping]) {
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ } else {
+ ieee80211_stop_queue(wl->hw, skb->queue_mapping);
+ }
}
static void b43_qos_params_upload(struct b43_wldev *dev,
@@ -3702,13 +3720,13 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
case IEEE80211_BAND_5GHZ:
if (d->phy.supports_5ghz) {
up_dev = d;
- gmode = 0;
+ gmode = false;
}
break;
case IEEE80211_BAND_2GHZ:
if (d->phy.supports_2ghz) {
up_dev = d;
- gmode = 1;
+ gmode = true;
}
break;
default:
@@ -4147,6 +4165,7 @@ static struct b43_wldev * b43_wireless_core_stop(struct b43_wldev *dev)
struct b43_wl *wl;
struct b43_wldev *orig_dev;
u32 mask;
+ int queue_num;
if (!dev)
return NULL;
@@ -4199,9 +4218,11 @@ redo:
mask = b43_read32(dev, B43_MMIO_GEN_IRQ_MASK);
B43_WARN_ON(mask != 0xFFFFFFFF && mask);
- /* Drain the TX queue */
- while (skb_queue_len(&wl->tx_queue))
- dev_kfree_skb(skb_dequeue(&wl->tx_queue));
+ /* Drain all TX queues. */
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num]))
+ dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+ }
b43_mac_suspend(dev);
b43_leds_exit(dev);
@@ -4425,18 +4446,18 @@ static void setup_struct_phy_for_init(struct b43_wldev *dev,
atomic_set(&phy->txerr_cnt, B43_PHY_TX_BADNESS_LIMIT);
#if B43_DEBUG
- phy->phy_locked = 0;
- phy->radio_locked = 0;
+ phy->phy_locked = false;
+ phy->radio_locked = false;
#endif
}
static void setup_struct_wldev_for_init(struct b43_wldev *dev)
{
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -4670,16 +4691,16 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if (b43_bus_host_is_pcmcia(dev->dev) ||
b43_bus_host_is_sdio(dev->dev)) {
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else if (dev->use_pio) {
b43warn(dev->wl, "Forced PIO by use_pio module parameter. "
"This should not be needed and will result in lower "
"performance.\n");
- dev->__using_pio_transfers = 1;
+ dev->__using_pio_transfers = true;
err = b43_pio_init(dev);
} else {
- dev->__using_pio_transfers = 0;
+ dev->__using_pio_transfers = false;
err = b43_dma_init(dev);
}
if (err)
@@ -4733,7 +4754,7 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -4767,7 +4788,7 @@ static void b43_op_remove_interface(struct ieee80211_hw *hw,
B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
b43_adjust_opmode(dev);
memset(wl->mac_addr, 0, ETH_ALEN);
@@ -4789,12 +4810,12 @@ static int b43_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->radiotap_enabled = 0;
+ wl->radiotap_enabled = false;
b43_qos_clear(wl);
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -4833,6 +4854,9 @@ static void b43_op_stop(struct ieee80211_hw *hw)
cancel_work_sync(&(wl->beacon_update_trigger));
+ if (!dev)
+ goto out;
+
mutex_lock(&wl->mutex);
if (b43_status(dev) >= B43_STAT_STARTED) {
dev = b43_wireless_core_stop(dev);
@@ -4840,11 +4864,11 @@ static void b43_op_stop(struct ieee80211_hw *hw)
goto out_unlock;
}
b43_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
out_unlock:
mutex_unlock(&wl->mutex);
-
+out:
cancel_work_sync(&(wl->txpower_adjust_work));
}
@@ -5028,7 +5052,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
struct pci_dev *pdev = NULL;
int err;
u32 tmp;
- bool have_2ghz_phy = 0, have_5ghz_phy = 0;
+ bool have_2ghz_phy = false, have_5ghz_phy = false;
/* Do NOT do any device initialization here.
* Do it in wireless_core_init() instead.
@@ -5071,7 +5095,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
}
dev->phy.gmode = have_2ghz_phy;
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
b43_wireless_core_reset(dev, dev->phy.gmode);
err = b43_phy_versioning(dev);
@@ -5082,11 +5106,11 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
(pdev->device != 0x4312 &&
pdev->device != 0x4319 && pdev->device != 0x4324)) {
/* No multiband support. */
- have_2ghz_phy = 0;
- have_5ghz_phy = 0;
+ have_2ghz_phy = false;
+ have_5ghz_phy = false;
switch (dev->phy.type) {
case B43_PHYTYPE_A:
- have_5ghz_phy = 1;
+ have_5ghz_phy = true;
break;
case B43_PHYTYPE_LP: //FIXME not always!
#if 0 //FIXME enabling 5GHz causes a NULL pointer dereference
@@ -5096,7 +5120,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
case B43_PHYTYPE_N:
case B43_PHYTYPE_HT:
case B43_PHYTYPE_LCN:
- have_2ghz_phy = 1;
+ have_2ghz_phy = true;
break;
default:
B43_WARN_ON(1);
@@ -5112,8 +5136,8 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
/* FIXME: For now we disable the A-PHY on multi-PHY devices. */
if (dev->phy.type != B43_PHYTYPE_N &&
dev->phy.type != B43_PHYTYPE_LP) {
- have_2ghz_phy = 1;
- have_5ghz_phy = 0;
+ have_2ghz_phy = true;
+ have_5ghz_phy = false;
}
}
@@ -5245,6 +5269,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
struct ieee80211_hw *hw;
struct b43_wl *wl;
char chip_name[6];
+ int queue_num;
hw = ieee80211_alloc_hw(sizeof(*wl), &b43_hw_ops);
if (!hw) {
@@ -5264,7 +5289,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
BIT(NL80211_IFTYPE_WDS) |
BIT(NL80211_IFTYPE_ADHOC);
- hw->queues = modparam_qos ? 4 : 1;
+ hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
wl->mac80211_initially_registered_queues = hw->queues;
hw->max_rates = 2;
SET_IEEE80211_DEV(hw, dev->dev);
@@ -5281,7 +5306,12 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
INIT_WORK(&wl->beacon_update_trigger, b43_beacon_update_trigger_work);
INIT_WORK(&wl->txpower_adjust_work, b43_phy_txpower_adjust_work);
INIT_WORK(&wl->tx_work, b43_tx_work);
- skb_queue_head_init(&wl->tx_queue);
+
+ /* Initialize queues and flags. */
+ for (queue_num = 0; queue_num < B43_QOS_QUEUE_NUM; queue_num++) {
+ skb_queue_head_init(&wl->tx_queue[queue_num]);
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
snprintf(chip_name, ARRAY_SIZE(chip_name),
(dev->chip_id > 0x9999) ? "%d" : "%04X", dev->chip_id);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 3ea44bb..3f8883b 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -145,7 +145,7 @@ void b43_radio_lock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(dev->phy.radio_locked);
- dev->phy.radio_locked = 1;
+ dev->phy.radio_locked = true;
#endif
macctl = b43_read32(dev, B43_MMIO_MACCTL);
@@ -163,7 +163,7 @@ void b43_radio_unlock(struct b43_wldev *dev)
#if B43_DEBUG
B43_WARN_ON(!dev->phy.radio_locked);
- dev->phy.radio_locked = 0;
+ dev->phy.radio_locked = false;
#endif
/* Commit any write */
@@ -178,7 +178,7 @@ void b43_phy_lock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(dev->phy.phy_locked);
- dev->phy.phy_locked = 1;
+ dev->phy.phy_locked = true;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
@@ -190,7 +190,7 @@ void b43_phy_unlock(struct b43_wldev *dev)
{
#if B43_DEBUG
B43_WARN_ON(!dev->phy.phy_locked);
- dev->phy.phy_locked = 0;
+ dev->phy.phy_locked = false;
#endif
B43_WARN_ON(dev->dev->core_rev < 3);
diff --git a/drivers/net/wireless/b43/phy_g.c b/drivers/net/wireless/b43/phy_g.c
index 8e157bc..12f467b 100644
--- a/drivers/net/wireless/b43/phy_g.c
+++ b/drivers/net/wireless/b43/phy_g.c
@@ -897,7 +897,7 @@ b43_radio_interference_mitigation_enable(struct b43_wldev *dev, int mode)
if (b43_phy_read(dev, 0x0033) & 0x0800)
break;
- gphy->aci_enable = 1;
+ gphy->aci_enable = true;
phy_stacksave(B43_PHY_RADIO_BITFIELD);
phy_stacksave(B43_PHY_G_CRS);
@@ -1038,7 +1038,7 @@ b43_radio_interference_mitigation_disable(struct b43_wldev *dev, int mode)
if (!(b43_phy_read(dev, 0x0033) & 0x0800))
break;
- gphy->aci_enable = 0;
+ gphy->aci_enable = false;
phy_stackrestore(B43_PHY_RADIO_BITFIELD);
phy_stackrestore(B43_PHY_G_CRS);
@@ -1956,10 +1956,10 @@ static void b43_phy_init_pctl(struct b43_wldev *dev)
bbatt.att = 11;
if (phy->radio_rev == 8) {
rfatt.att = 15;
- rfatt.with_padmix = 1;
+ rfatt.with_padmix = true;
} else {
rfatt.att = 9;
- rfatt.with_padmix = 0;
+ rfatt.with_padmix = false;
}
b43_set_txpower_g(dev, &bbatt, &rfatt, 0);
}
@@ -2137,7 +2137,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
struct b43_bus_dev *bdev = dev->dev;
struct b43_phy *phy = &dev->phy;
- rf->with_padmix = 0;
+ rf->with_padmix = false;
if (dev->dev->board_vendor == SSB_BOARDVENDOR_BCM &&
dev->dev->board_type == SSB_BOARD_BCM4309G) {
@@ -2221,7 +2221,7 @@ static void default_radio_attenuation(struct b43_wldev *dev,
return;
case 8:
rf->att = 0xA;
- rf->with_padmix = 1;
+ rf->with_padmix = true;
return;
case 9:
default:
@@ -2389,7 +2389,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
B43_WARN_ON((dev->dev->chip_id == 0x4301) &&
(phy->radio_ver != 0x2050)); /* Not supported anymore */
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
pab0 != -1 && pab1 != -1 && pab2 != -1) {
@@ -2404,7 +2404,7 @@ static int b43_gphy_init_tssi2dbm_table(struct b43_wldev *dev)
pab1, pab2);
if (!gphy->tssi2dbm)
return -ENOMEM;
- gphy->dyn_tssi_tbl = 1;
+ gphy->dyn_tssi_tbl = true;
} else {
/* pabX values not set in SPROM. */
gphy->tgt_idle_tssi = 52;
@@ -2504,7 +2504,7 @@ static void b43_gphy_op_free(struct b43_wldev *dev)
if (gphy->dyn_tssi_tbl)
kfree(gphy->tssi2dbm);
- gphy->dyn_tssi_tbl = 0;
+ gphy->dyn_tssi_tbl = false;
gphy->tssi2dbm = NULL;
kfree(gphy);
@@ -2531,10 +2531,10 @@ static int b43_gphy_op_prepare_hardware(struct b43_wldev *dev)
if (phy->rev == 1) {
/* Workaround: Temporarly disable gmode through the early init
* phase, as the gmode stuff is not needed for phy rev 1 */
- phy->gmode = 0;
+ phy->gmode = false;
b43_wireless_core_reset(dev, 0);
b43_phy_initg(dev);
- phy->gmode = 1;
+ phy->gmode = true;
b43_wireless_core_reset(dev, 1);
}
@@ -2613,7 +2613,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
gphy->radio_off_context.rfover);
b43_phy_write(dev, B43_PHY_RFOVERVAL,
gphy->radio_off_context.rfoverval);
- gphy->radio_off_context.valid = 0;
+ gphy->radio_off_context.valid = false;
}
channel = phy->channel;
b43_gphy_channel_switch(dev, 6, 1);
@@ -2626,7 +2626,7 @@ static void b43_gphy_op_software_rfkill(struct b43_wldev *dev,
rfoverval = b43_phy_read(dev, B43_PHY_RFOVERVAL);
gphy->radio_off_context.rfover = rfover;
gphy->radio_off_context.rfoverval = rfoverval;
- gphy->radio_off_context.valid = 1;
+ gphy->radio_off_context.valid = true;
b43_phy_write(dev, B43_PHY_RFOVER, rfover | 0x008C);
b43_phy_write(dev, B43_PHY_RFOVERVAL, rfoverval & 0xFF73);
}
@@ -2711,10 +2711,10 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
if ((phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- gphy->aci_wlan_automatic = 0;
+ gphy->aci_wlan_automatic = false;
switch (mode) {
case B43_INTERFMODE_AUTOWLAN:
- gphy->aci_wlan_automatic = 1;
+ gphy->aci_wlan_automatic = true;
if (gphy->aci_enable)
mode = B43_INTERFMODE_MANUALWLAN;
else
@@ -2735,8 +2735,8 @@ static int b43_gphy_op_interf_mitigation(struct b43_wldev *dev,
b43_radio_interference_mitigation_disable(dev, currentmode);
if (mode == B43_INTERFMODE_NONE) {
- gphy->aci_enable = 0;
- gphy->aci_hw_rssi = 0;
+ gphy->aci_enable = false;
+ gphy->aci_hw_rssi = false;
} else
b43_radio_interference_mitigation_enable(dev, mode);
gphy->interfmode = mode;
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index f93d66b..3ae2856 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -736,9 +736,9 @@ static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 1;
+ lpphy->crs_usr_disable = true;
else
- lpphy->crs_sys_disable = 1;
+ lpphy->crs_sys_disable = true;
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
}
@@ -747,9 +747,9 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
- lpphy->crs_usr_disable = 0;
+ lpphy->crs_usr_disable = false;
else
- lpphy->crs_sys_disable = 0;
+ lpphy->crs_sys_disable = false;
if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index b17d9b6..bf5a438 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -78,19 +78,6 @@ enum b43_nphy_rssi_type {
B43_NPHY_RSSI_TBD,
};
-/* TODO: reorder functions */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev,
- bool enable);
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
- u8 *events, u8 *delays, u8 length);
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
- enum b43_nphy_rf_sequence seq);
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
- u16 value, u8 core, bool off);
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
- u16 value, u8 core);
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev);
-
static inline bool b43_nphy_ipa(struct b43_wldev *dev)
{
enum ieee80211_band band = b43_current_band(dev->wl);
@@ -98,57 +85,394 @@ static inline bool b43_nphy_ipa(struct b43_wldev *dev)
(dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ));
}
-void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
-{//TODO
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ if (dev->dev->chip_id == 47162)
+ return txpwrctrl_tx_gain_ipa_rev5;
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
}
-static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
-{//TODO
+/**************************************************
+ * RF (just without b43_nphy_rf_control_intc_override)
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+ enum b43_nphy_rf_sequence seq)
+{
+ static const u16 trigger[] = {
+ [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX,
+ [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX,
+ [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX,
+ [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH,
+ [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL,
+ [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
+ };
+ int i;
+ u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+ B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
+
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
+ b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
+ for (i = 0; i < 200; i++) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
+ goto ok;
+ msleep(1);
+ }
+ b43err(dev->wl, "RF sequence status timeout\n");
+ok:
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
}
-static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
- bool ignore_tssi)
-{//TODO
- return B43_TXPWR_RES_DONE;
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off)
+{
+ int i;
+ u8 index = fls(field);
+ u8 addr, en_addr, val_addr;
+ /* we expect only one bit set */
+ B43_WARN_ON(field & (~(1 << (index - 1))));
+
+ if (dev->phy.rev >= 3) {
+ const struct nphy_rf_control_override_rev3 *rf_ctrl;
+ for (i = 0; i < 2; i++) {
+ if (index == 0 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+ en_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+ val_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~(field));
+ b43_phy_mask(dev, val_addr,
+ ~(rf_ctrl->val_mask));
+ } else {
+ if (core == 0 || ((1 << i) & core)) {
+ b43_phy_set(dev, en_addr, field);
+ b43_phy_maskset(dev, val_addr,
+ ~(rf_ctrl->val_mask),
+ (value << rf_ctrl->val_shift));
+ }
+ }
+ }
+ } else {
+ const struct nphy_rf_control_override_rev2 *rf_ctrl;
+ if (off) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+ value = 0;
+ } else {
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (index <= 1 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ if (index == 2 || index == 10 ||
+ (index >= 13 && index <= 15)) {
+ core = 1;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+ addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->addr0 : rf_ctrl->addr1);
+
+ if ((1 << i) & core)
+ b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+ (value << rf_ctrl->shift));
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+ }
+ }
}
-static void b43_chantab_radio_upload(struct b43_wldev *dev,
- const struct b43_nphy_channeltab_entry_rev2 *e)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core)
{
- b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
- b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
- b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
- b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ u8 i, j;
+ u16 reg, tmp, val;
- b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
- b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
- b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
- b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ B43_WARN_ON(dev->phy.rev < 3);
+ B43_WARN_ON(field > 4);
- b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
- b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
- b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
- b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ for (i = 0; i < 2; i++) {
+ if ((core == 1 && i == 1) || (core == 2 && !i))
+ continue;
- b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
- b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
- b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
- b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+ b43_phy_mask(dev, reg, 0xFBFF);
- b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
- b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
- b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
- b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
- b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+ switch (field) {
+ case 0:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case 1:
+ if (!i) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_RXTX);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE);
+ }
+ break;
+ case 2:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0020;
+ val = value << 5;
+ } else {
+ tmp = 0x0010;
+ val = value << 4;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 3:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 4:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ }
+ }
+}
- b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
- b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
+/**************************************************
+ * Various PHY ops
+ **************************************************/
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
+ const u16 *clip_st)
+{
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+
+ if (dev->dev->core_rev == 16)
+ b43_mac_suspend(dev);
+
+ tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+ tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+ B43_NPHY_CLASSCTL_WAITEDEN);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+ if (dev->dev->core_rev == 16)
+ b43_mac_enable(dev);
+
+ return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_phy_force_clock(dev, 1);
+ bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_phy_force_clock(dev, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ if (enable) {
+ static const u16 clip[] = { 0xFFFF, 0xFFFF };
+ if (nphy->deaf_count++ == 0) {
+ nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_read_clip_detection(dev, nphy->clip_state);
+ b43_nphy_write_clip_detection(dev, clip);
+ }
+ b43_nphy_reset_cca(dev);
+ } else {
+ if (--nphy->deaf_count == 0) {
+ b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+ b43_nphy_write_clip_detection(dev, nphy->clip_state);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
+static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i;
+ s16 tmp;
+ u16 data[4];
+ s16 gain[2];
+ u16 minmax[2];
+ static const u16 lna_gain[4] = { -2, 10, 19, 25 };
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ gain[0] = 6;
+ gain[1] = 6;
+ } else {
+ tmp = 40370 - 315 * dev->phy.channel;
+ gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ tmp = 23242 - 224 * dev->phy.channel;
+ gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
+ }
+ } else {
+ gain[0] = 0;
+ gain[1] = 0;
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (nphy->elna_gain_config) {
+ data[0] = 19 + gain[i];
+ data[1] = 25 + gain[i];
+ data[2] = 25 + gain[i];
+ data[3] = 25 + gain[i];
+ } else {
+ data[0] = lna_gain[0] + gain[i];
+ data[1] = lna_gain[1] + gain[i];
+ data[2] = lna_gain[2] + gain[i];
+ data[3] = lna_gain[3] + gain[i];
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
+
+ minmax[i] = 23 + gain[i];
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
+ minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
+ minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+ u16 offset1 = cmd << 4;
+ u16 offset2 = offset1 + 0x80;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+ for (i = length; i < 16; i++) {
+ b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+ b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/**************************************************
+ * Radio 0x2056
+ **************************************************/
+
static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
@@ -228,10 +552,98 @@ static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
static void b43_radio_2056_setup(struct b43_wldev *dev,
const struct b43_nphy_channeltab_entry_rev3 *e)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ u16 offset;
+ u8 i;
+ u16 bias, cbias, pag_boost, pgag_boost, mixg_boost, padg_boost;
+
B43_WARN_ON(dev->phy.rev < 3);
b43_chantab_radio_2056_upload(dev, e);
- /* TODO */
+ b2056_upload_syn_pll_cp2(dev, band == IEEE80211_BAND_5GHZ);
+
+ if (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ if (dev->dev->chip_id == 0x4716) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x14);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0);
+ } else {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x0B);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x14);
+ }
+ }
+ if (sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2, 0x1F);
+ b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4, 0x05);
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, 0x0C);
+ }
+
+ if (dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) {
+ for (i = 0; i < 2; i++) {
+ offset = i ? B2056_TX1 : B2056_TX0;
+ if (dev->phy.rev >= 5) {
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_IDAC, 0xcc);
+
+ if (dev->dev->chip_id == 0x4716) {
+ bias = 0x40;
+ cbias = 0x45;
+ pag_boost = 0x5;
+ pgag_boost = 0x33;
+ mixg_boost = 0x55;
+ } else {
+ bias = 0x25;
+ cbias = 0x20;
+ pag_boost = 0x4;
+ pgag_boost = 0x03;
+ mixg_boost = 0x65;
+ }
+ padg_boost = 0x77;
+
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ cbias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_BOOST_TUNE,
+ pag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PGAG_BOOST_TUNE,
+ pgag_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_PADG_BOOST_TUNE,
+ padg_boost);
+ b43_radio_write(dev,
+ offset | B2056_TX_MIXG_BOOST_TUNE,
+ mixg_boost);
+ } else {
+ bias = dev->phy.is_40mhz ? 0x40 : 0x20;
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IMAIN_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_IAUX_STAT,
+ bias);
+ b43_radio_write(dev,
+ offset | B2056_TX_INTPAG_CASCBIAS,
+ 0x30);
+ }
+ b43_radio_write(dev, offset | B2056_TX_PA_SPARE1, 0xee);
+ }
+ } else if (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ) {
+ /* TODO */
+ }
+
udelay(50);
/* VCO calibration */
b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
@@ -242,285 +654,84 @@ static void b43_radio_2056_setup(struct b43_wldev *dev,
udelay(300);
}
-static void b43_chantab_phy_upload(struct b43_wldev *dev,
- const struct b43_phy_n_sfo_cfg *e)
-{
- b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
- b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
- b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
- b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
- b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
- b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
-static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+static void b43_radio_init2056_pre(struct b43_wldev *dev)
{
- struct b43_phy_n *nphy = dev->phy.n;
- u8 i;
- u16 bmask, val, tmp;
- enum ieee80211_band band = b43_current_band(dev->wl);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- nphy->txpwrctrl = enable;
- if (!enable) {
- if (dev->phy.rev >= 3 &&
- (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
- (B43_NPHY_TXPCTL_CMD_COEFF |
- B43_NPHY_TXPCTL_CMD_HWPCTLEN |
- B43_NPHY_TXPCTL_CMD_PCTLEN))) {
- /* We disable enabled TX pwr ctl, save it's state */
- nphy->tx_pwr_idx[0] = b43_phy_read(dev,
- B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
- nphy->tx_pwr_idx[1] = b43_phy_read(dev,
- B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
- }
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
- for (i = 0; i < 84; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
- for (i = 0; i < 84; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
-
- tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- if (dev->phy.rev >= 3)
- tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
-
- if (dev->phy.rev >= 3) {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
- }
-
- if (dev->phy.rev == 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
- else if (dev->phy.rev < 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
-
- if (dev->phy.rev < 2 && dev->phy.is_40mhz)
- b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
- } else {
- b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
- nphy->adj_pwr_tbl);
- b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
- nphy->adj_pwr_tbl);
-
- bmask = B43_NPHY_TXPCTL_CMD_COEFF |
- B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- /* wl does useless check for "enable" param here */
- val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
- if (dev->phy.rev >= 3) {
- bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- if (val)
- val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
- }
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
-
- if (band == IEEE80211_BAND_5GHZ) {
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
- ~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
- if (dev->phy.rev > 1)
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
- ~B43_NPHY_TXPCTL_INIT_PIDXI1,
- 0x64);
- }
-
- if (dev->phy.rev >= 3) {
- if (nphy->tx_pwr_idx[0] != 128 &&
- nphy->tx_pwr_idx[1] != 128) {
- /* Recover TX pwr ctl state */
- b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
- ~B43_NPHY_TXPCTL_CMD_INIT,
- nphy->tx_pwr_idx[0]);
- if (dev->phy.rev > 1)
- b43_phy_maskset(dev,
- B43_NPHY_TXPCTL_INIT,
- ~0xff, nphy->tx_pwr_idx[1]);
- }
- }
-
- if (dev->phy.rev >= 3) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
- } else {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
- }
-
- if (dev->phy.rev == 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
- else if (dev->phy.rev < 2)
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
-
- if (dev->phy.rev < 2 && dev->phy.is_40mhz)
- b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
-
- if (b43_nphy_ipa(dev)) {
- b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
- b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
- }
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_CHIP0PU);
+ /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_CHIP0PU);
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
-static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
+static void b43_radio_init2056_post(struct b43_wldev *dev)
{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- u8 txpi[2], bbmult, i;
- u16 tmp, radio_gain, dac_gain;
- u16 freq = dev->phy.channel_freq;
- u32 txgain;
- /* u32 gaintbl; rev3+ */
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (dev->phy.rev >= 3) {
- txpi[0] = 40;
- txpi[1] = 40;
- } else if (sprom->revision < 4) {
- txpi[0] = 72;
- txpi[1] = 72;
- } else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- txpi[0] = sprom->txpid2g[0];
- txpi[1] = sprom->txpid2g[1];
- } else if (freq >= 4900 && freq < 5100) {
- txpi[0] = sprom->txpid5gl[0];
- txpi[1] = sprom->txpid5gl[1];
- } else if (freq >= 5100 && freq < 5500) {
- txpi[0] = sprom->txpid5g[0];
- txpi[1] = sprom->txpid5g[1];
- } else if (freq >= 5500) {
- txpi[0] = sprom->txpid5gh[0];
- txpi[1] = sprom->txpid5gh[1];
- } else {
- txpi[0] = 91;
- txpi[1] = 91;
- }
- }
-
+ b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
+ b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
+ b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
+ msleep(1);
+ b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
+ b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
+ b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
/*
- for (i = 0; i < 2; i++) {
- nphy->txpwrindex[i].index_internal = txpi[i];
- nphy->txpwrindex[i].index_internal_save = txpi[i];
- }
+ if (nphy->init_por)
+ Call Radio 2056 Recalibrate
*/
+}
- for (i = 0; i < 2; i++) {
- if (dev->phy.rev >= 3) {
- /* FIXME: support 5GHz */
- txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
- radio_gain = (txgain >> 16) & 0x1FFFF;
- } else {
- txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
- radio_gain = (txgain >> 16) & 0x1FFF;
- }
-
- dac_gain = (txgain >> 8) & 0x3F;
- bbmult = txgain & 0xFF;
-
- if (dev->phy.rev >= 3) {
- if (i == 0)
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
- else
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
- }
-
- if (i == 0)
- b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
- else
- b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
-
- b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
-
- tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
- if (i == 0)
- tmp = (tmp & 0x00FF) | (bbmult << 8);
- else
- tmp = (tmp & 0xFF00) | bbmult;
- b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
-
- if (b43_nphy_ipa(dev)) {
- u32 tmp32;
- u16 reg = (i == 0) ?
- B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
- tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i, txpi[i]));
- b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
- b43_phy_set(dev, reg, 0x4);
- }
- }
-
- b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
+/*
+ * Initialize a Broadcom 2056 N-radio
+ * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
+ */
+static void b43_radio_init2056(struct b43_wldev *dev)
+{
+ b43_radio_init2056_pre(dev);
+ b2056_upload_inittabs(dev, 0, 0);
+ b43_radio_init2056_post(dev);
}
-static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+/**************************************************
+ * Radio 0x2055
+ **************************************************/
+
+static void b43_chantab_radio_upload(struct b43_wldev *dev,
+ const struct b43_nphy_channeltab_entry_rev2 *e)
{
- struct b43_phy *phy = &dev->phy;
+ b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
+ b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
+ b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
+ b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- const u32 *table = NULL;
-#if 0
- TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
- u32 rfpwr_offset;
- u8 pga_gain;
- int i;
-#endif
+ b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
+ b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
+ b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
+ b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- if (phy->rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- table = b43_nphy_get_ipa_gain_table(dev);
- } else {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- if (phy->rev == 3)
- table = b43_ntab_tx_gain_rev3_5ghz;
- if (phy->rev == 4)
- table = b43_ntab_tx_gain_rev4_5ghz;
- else
- table = b43_ntab_tx_gain_rev5plus_5ghz;
- } else {
- table = b43_ntab_tx_gain_rev3plus_2ghz;
- }
- }
- } else {
- table = b43_ntab_tx_gain_rev0_1_2;
- }
- b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
- b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+ b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
+ b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
+ b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
+ b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- if (phy->rev >= 3) {
-#if 0
- nphy->gmval = (table[0] >> 16) & 0x7000;
+ b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
+ b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
+ b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
- for (i = 0; i < 128; i++) {
- pga_gain = (table[i] >> 24) & 0xF;
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
- else
- rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
- b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
- rfpwr_offset);
- b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
- rfpwr_offset);
- }
-#endif
- }
+ b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
+ b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
+ b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
+ b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
+ b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
+
+ b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
+ b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
@@ -622,1089 +833,9 @@ static void b43_radio_init2055(struct b43_wldev *dev)
b43_radio_init2055_post(dev);
}
-static void b43_radio_init2056_pre(struct b43_wldev *dev)
-{
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_CHIP0PU);
- /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_OEPORFORCE);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_CHIP0PU);
-}
-
-static void b43_radio_init2056_post(struct b43_wldev *dev)
-{
- b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
- b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
- b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
- msleep(1);
- b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
- b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
- b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
- /*
- if (nphy->init_por)
- Call Radio 2056 Recalibrate
- */
-}
-
-/*
- * Initialize a Broadcom 2056 N-radio
- * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
- */
-static void b43_radio_init2056(struct b43_wldev *dev)
-{
- b43_radio_init2056_pre(dev);
- b2056_upload_inittabs(dev, 0, 0);
- b43_radio_init2056_post(dev);
-}
-
-/*
- * Upload the N-PHY tables.
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
- */
-static void b43_nphy_tables_init(struct b43_wldev *dev)
-{
- if (dev->phy.rev < 3)
- b43_nphy_rev0_1_2_tables_init(dev);
- else
- b43_nphy_rev3plus_tables_init(dev);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
-static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- enum ieee80211_band band;
- u16 tmp;
-
- if (!enable) {
- nphy->rfctrl_intc1_save = b43_phy_read(dev,
- B43_NPHY_RFCTL_INTC1);
- nphy->rfctrl_intc2_save = b43_phy_read(dev,
- B43_NPHY_RFCTL_INTC2);
- band = b43_current_band(dev->wl);
- if (dev->phy.rev >= 3) {
- if (band == IEEE80211_BAND_5GHZ)
- tmp = 0x600;
- else
- tmp = 0x480;
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- tmp = 0x180;
- else
- tmp = 0x120;
- }
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
- } else {
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
- nphy->rfctrl_intc1_save);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
- nphy->rfctrl_intc2_save);
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
-static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
-{
- u16 tmp;
-
- if (dev->phy.rev >= 3) {
- if (b43_nphy_ipa(dev)) {
- tmp = 4;
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
- (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
- }
-
- tmp = 1;
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
- (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
-{
- u16 bbcfg;
-
- b43_phy_force_clock(dev, 1);
- bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
- b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
- udelay(1);
- b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- b43_phy_force_clock(dev, 0);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
-static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
-{
- u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
-
- mimocfg |= B43_NPHY_MIMOCFG_AUTO;
- if (preamble == 1)
- mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
- else
- mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
-
- b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
-static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- bool override = false;
- u16 chain = 0x33;
-
- if (nphy->txrx_chain == 0) {
- chain = 0x11;
- override = true;
- } else if (nphy->txrx_chain == 1) {
- chain = 0x22;
- override = true;
- }
-
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
- ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
- chain);
-
- if (override)
- b43_phy_set(dev, B43_NPHY_RFSEQMODE,
- B43_NPHY_RFSEQMODE_CAOVER);
- else
- b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
- ~B43_NPHY_RFSEQMODE_CAOVER);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
-static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
- u16 samps, u8 time, bool wait)
-{
- int i;
- u16 tmp;
-
- b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
- b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
- if (wait)
- b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
- else
- b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
-
- b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
-
- for (i = 1000; i; i--) {
- tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
- if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
- est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
- est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
- est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
-
- est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
- est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
- est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
- b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
- return;
- }
- udelay(10);
- }
- memset(est, 0, sizeof(*est));
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
-static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
- struct b43_phy_n_iq_comp *pcomp)
-{
- if (write) {
- b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
- b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
- b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
- b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
- } else {
- pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
- pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
- pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
- pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
- }
-}
-
-#if 0
-/* Ready but not used anywhere */
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
-static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
-{
- u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
- b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
- if (core == 0) {
- b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
- } else {
- b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
- b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
- }
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
- b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
- b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
- b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
- b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
- b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
- b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
-static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
-{
- u8 rxval, txval;
- u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
-
- regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
- if (core == 0) {
- regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
- regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
- } else {
- regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
- regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
- }
- regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
- regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
- regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
- regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
- regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
- regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
- regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
- regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
-
- b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
- b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
-
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
- ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
- ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
- ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
- (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
- b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
- (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
-
- if (core == 0) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
- } else {
- b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
- }
-
- b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
- b43_nphy_rf_control_override(dev, 8, 0, 3, false);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
-
- if (core == 0) {
- rxval = 1;
- txval = 8;
- } else {
- rxval = 4;
- txval = 2;
- }
- b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
- b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
-}
-#endif
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
-static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
-{
- int i;
- s32 iq;
- u32 ii;
- u32 qq;
- int iq_nbits, qq_nbits;
- int arsh, brsh;
- u16 tmp, a, b;
-
- struct nphy_iq_est est;
- struct b43_phy_n_iq_comp old;
- struct b43_phy_n_iq_comp new = { };
- bool error = false;
-
- if (mask == 0)
- return;
-
- b43_nphy_rx_iq_coeffs(dev, false, &old);
- b43_nphy_rx_iq_coeffs(dev, true, &new);
- b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
- new = old;
-
- for (i = 0; i < 2; i++) {
- if (i == 0 && (mask & 1)) {
- iq = est.iq0_prod;
- ii = est.i0_pwr;
- qq = est.q0_pwr;
- } else if (i == 1 && (mask & 2)) {
- iq = est.iq1_prod;
- ii = est.i1_pwr;
- qq = est.q1_pwr;
- } else {
- continue;
- }
-
- if (ii + qq < 2) {
- error = true;
- break;
- }
-
- iq_nbits = fls(abs(iq));
- qq_nbits = fls(qq);
-
- arsh = iq_nbits - 20;
- if (arsh >= 0) {
- a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
- tmp = ii >> arsh;
- } else {
- a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
- tmp = ii << -arsh;
- }
- if (tmp == 0) {
- error = true;
- break;
- }
- a /= tmp;
-
- brsh = qq_nbits - 11;
- if (brsh >= 0) {
- b = (qq << (31 - qq_nbits));
- tmp = ii >> brsh;
- } else {
- b = (qq << (31 - qq_nbits));
- tmp = ii << -brsh;
- }
- if (tmp == 0) {
- error = true;
- break;
- }
- b = int_sqrt(b / tmp - a * a) - (1 << 10);
-
- if (i == 0 && (mask & 0x1)) {
- if (dev->phy.rev >= 3) {
- new.a0 = a & 0x3FF;
- new.b0 = b & 0x3FF;
- } else {
- new.a0 = b & 0x3FF;
- new.b0 = a & 0x3FF;
- }
- } else if (i == 1 && (mask & 0x2)) {
- if (dev->phy.rev >= 3) {
- new.a1 = a & 0x3FF;
- new.b1 = b & 0x3FF;
- } else {
- new.a1 = b & 0x3FF;
- new.b1 = a & 0x3FF;
- }
- }
- }
-
- if (error)
- new = old;
-
- b43_nphy_rx_iq_coeffs(dev, true, &new);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
-static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
-{
- u16 array[4];
- b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
-
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
- b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
- const u16 *clip_st)
-{
- b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
- b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
-static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
-{
- clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
- clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
-static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
-{
- if (dev->phy.rev >= 3) {
- if (!init)
- return;
- if (0 /* FIXME */) {
- b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
- b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
- b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
- b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
- }
- } else {
- b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
- b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
-
- switch (dev->dev->bus_type) {
-#ifdef CONFIG_B43_BCMA
- case B43_BUS_BCMA:
- bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
- 0xFC00, 0xFC00);
- break;
-#endif
-#ifdef CONFIG_B43_SSB
- case B43_BUS_SSB:
- ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
- 0xFC00, 0xFC00);
- break;
-#endif
- }
-
- b43_write32(dev, B43_MMIO_MACCTL,
- b43_read32(dev, B43_MMIO_MACCTL) &
- ~B43_MACCTL_GPOUTSMSK);
- b43_write16(dev, B43_MMIO_GPIO_MASK,
- b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
- b43_write16(dev, B43_MMIO_GPIO_CONTROL,
- b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
-
- if (init) {
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
-static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
-{
- u16 tmp;
-
- if (dev->dev->core_rev == 16)
- b43_mac_suspend(dev);
-
- tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
- tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
- B43_NPHY_CLASSCTL_WAITEDEN);
- tmp &= ~mask;
- tmp |= (val & mask);
- b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
-
- if (dev->dev->core_rev == 16)
- b43_mac_enable(dev);
-
- return tmp;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
-static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- if (enable) {
- static const u16 clip[] = { 0xFFFF, 0xFFFF };
- if (nphy->deaf_count++ == 0) {
- nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
- b43_nphy_classifier(dev, 0x7, 0);
- b43_nphy_read_clip_detection(dev, nphy->clip_state);
- b43_nphy_write_clip_detection(dev, clip);
- }
- b43_nphy_reset_cca(dev);
- } else {
- if (--nphy->deaf_count == 0) {
- b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
- b43_nphy_write_clip_detection(dev, nphy->clip_state);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
-static void b43_nphy_stop_playback(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- u16 tmp;
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
- if (tmp & 0x1)
- b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
- else if (tmp & 0x2)
- b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
-
- b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
-
- if (nphy->bb_mult_save & 0x80000000) {
- tmp = nphy->bb_mult_save & 0xFFFF;
- b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
- nphy->bb_mult_save = 0;
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
-static void b43_nphy_spur_workaround(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- u8 channel = dev->phy.channel;
- int tone[2] = { 57, 58 };
- u32 noise[2] = { 0x3FF, 0x3FF };
-
- B43_WARN_ON(dev->phy.rev < 3);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (nphy->gband_spurwar_en) {
- /* TODO: N PHY Adjust Analog Pfbw (7) */
- if (channel == 11 && dev->phy.is_40mhz)
- ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
- else
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- /* TODO: N PHY Adjust CRS Min Power (0x1E) */
- }
-
- if (nphy->aband_spurwar_en) {
- if (channel == 54) {
- tone[0] = 0x20;
- noise[0] = 0x25F;
- } else if (channel == 38 || channel == 102 || channel == 118) {
- if (0 /* FIXME */) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
- } else if (channel == 134) {
- tone[0] = 0x20;
- noise[0] = 0x21F;
- } else if (channel == 151) {
- tone[0] = 0x10;
- noise[0] = 0x23F;
- } else if (channel == 153 || channel == 161) {
- tone[0] = 0x30;
- noise[0] = 0x23F;
- } else {
- tone[0] = 0;
- noise[0] = 0;
- }
-
- if (!tone[0] && !noise[0])
- ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
- else
- ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
-static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
-
- u8 i;
- s16 tmp;
- u16 data[4];
- s16 gain[2];
- u16 minmax[2];
- static const u16 lna_gain[4] = { -2, 10, 19, 25 };
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- gain[0] = 6;
- gain[1] = 6;
- } else {
- tmp = 40370 - 315 * dev->phy.channel;
- gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
- tmp = 23242 - 224 * dev->phy.channel;
- gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
- }
- } else {
- gain[0] = 0;
- gain[1] = 0;
- }
-
- for (i = 0; i < 2; i++) {
- if (nphy->elna_gain_config) {
- data[0] = 19 + gain[i];
- data[1] = 25 + gain[i];
- data[2] = 25 + gain[i];
- data[3] = 25 + gain[i];
- } else {
- data[0] = lna_gain[0] + gain[i];
- data[1] = lna_gain[1] + gain[i];
- data[2] = lna_gain[2] + gain[i];
- data[3] = lna_gain[3] + gain[i];
- }
- b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
-
- minmax[i] = 23 + gain[i];
- }
-
- b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
- minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
- minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
-static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- /* PHY rev 0, 1, 2 */
- u8 i, j;
- u8 code;
- u16 tmp;
- u8 rfseq_events[3] = { 6, 8, 7 };
- u8 rfseq_delays[3] = { 10, 30, 1 };
-
- /* PHY rev >= 3 */
- bool ghz5;
- bool ext_lna;
- u16 rssi_gain;
- struct nphy_gain_ctl_workaround_entry *e;
- u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
- u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
-
- if (dev->phy.rev >= 3) {
- /* Prepare values */
- ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
- & B43_NPHY_BANDCTL_5GHZ;
- ext_lna = sprom->boardflags_lo & B43_BFL_EXTLNA;
- e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
- if (ghz5 && dev->phy.rev >= 5)
- rssi_gain = 0x90;
- else
- rssi_gain = 0x50;
-
- b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
-
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
- rssi_gain);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
- rssi_gain);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
- 0x17);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
-
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
- b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
- b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
- b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
-
- b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
- b43_phy_write(dev, 0x2A7, e->init_gain);
- b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
- e->rfseq_init);
- b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
-
- /* TODO: check defines. Do not match variables names */
- b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
- b43_phy_write(dev, 0x2A9, e->cliphi_gain);
- b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
- b43_phy_write(dev, 0x2AB, e->clipmd_gain);
- b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
- b43_phy_write(dev, 0x2AD, e->cliplo_gain);
-
- b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
- b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
- b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
- b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
- } else {
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- /* Set narrowband clip threshold */
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
-
- if (!dev->phy.is_40mhz) {
- /* Set dwell lengths */
- b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
- b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
- b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
- b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
- }
-
- /* Set wideband clip 2 threshold */
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
- 21);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
- 21);
-
- if (!dev->phy.is_40mhz) {
- b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
- ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
- ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
- ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
- b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
- ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
- }
-
- b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
-
- if (nphy->gain_boost) {
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
- dev->phy.is_40mhz)
- code = 4;
- else
- code = 5;
- } else {
- code = dev->phy.is_40mhz ? 6 : 7;
- }
-
- /* Set HPVGA2 index */
- b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
- ~B43_NPHY_C1_INITGAIN_HPVGA2,
- code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
- ~B43_NPHY_C2_INITGAIN_HPVGA2,
- code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- /* specs say about 2 loops, but wl does 4 */
- for (i = 0; i < 4; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x7C));
-
- b43_nphy_adjust_lna_gain_table(dev);
-
- if (nphy->elna_gain_config) {
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
-
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
- /* specs say about 2 loops, but wl does 4 */
- for (i = 0; i < 4; i++)
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (code << 8 | 0x74));
- }
-
- if (dev->phy.rev == 2) {
- for (i = 0; i < 4; i++) {
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (0x0400 * i) + 0x0020);
- for (j = 0; j < 21; j++) {
- tmp = j * (i < 2 ? 3 : 1);
- b43_phy_write(dev,
- B43_NPHY_TABLE_DATALO, tmp);
- }
- }
- }
-
- b43_nphy_set_rf_sequence(dev, 5,
- rfseq_events, rfseq_delays, 3);
- b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
- ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
- 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
- b43_phy_maskset(dev, B43_PHY_N(0xC5D),
- 0xFF80, 4);
- }
-}
-
-static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
-
- /* TX to RX */
- u8 tx2rx_events[9] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
- u8 tx2rx_delays[9] = { 8, 4, 2, 2, 4, 4, 6, 1 };
- /* RX to TX */
- u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
- 0x1F };
- u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
- u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
- u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
-
- u16 tmp16;
- u32 tmp32;
-
- tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
- tmp32 &= 0xffffff;
- b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
-
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
-
- b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
- b43_phy_write(dev, 0x2AE, 0x000C);
-
- /* TX to RX */
- b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays, 9);
-
- /* RX to TX */
- if (b43_nphy_ipa(dev))
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events_ipa,
- rx2tx_delays_ipa, 9);
- if (nphy->hw_phyrxchain != 3 &&
- nphy->hw_phyrxchain != nphy->hw_phytxchain) {
- if (b43_nphy_ipa(dev)) {
- rx2tx_delays[5] = 59;
- rx2tx_delays[6] = 1;
- rx2tx_events[7] = 0x1F;
- }
- b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays, 9);
- }
-
- tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
- 0x2 : 0x9C40;
- b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
-
- b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
-
- b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
- b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
-
- b43_nphy_gain_ctrl_workarounds(dev);
-
- b43_ntab_write(dev, B43_NTAB32(8, 0), 2);
- b43_ntab_write(dev, B43_NTAB32(8, 16), 2);
-
- /* TODO */
-
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
- b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
- b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
-
- /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
-
- if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
- (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
- b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
- tmp32 = 0x00088888;
- else
- tmp32 = 0x88888888;
- b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
- b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
-
- if (dev->phy.rev == 4 &&
- b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
- 0x70);
- b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
- 0x70);
- }
-
- b43_phy_write(dev, 0x224, 0x039C);
- b43_phy_write(dev, 0x225, 0x0357);
- b43_phy_write(dev, 0x226, 0x0317);
- b43_phy_write(dev, 0x227, 0x02D7);
- b43_phy_write(dev, 0x228, 0x039C);
- b43_phy_write(dev, 0x229, 0x0357);
- b43_phy_write(dev, 0x22A, 0x0317);
- b43_phy_write(dev, 0x22B, 0x02D7);
- b43_phy_write(dev, 0x22C, 0x039C);
- b43_phy_write(dev, 0x22D, 0x0357);
- b43_phy_write(dev, 0x22E, 0x0317);
- b43_phy_write(dev, 0x22F, 0x02D7);
-}
-
-static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
-{
- struct ssb_sprom *sprom = dev->dev->bus_sprom;
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
- u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
-
- u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
- u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
- nphy->band5g_pwrgain) {
- b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
- b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
- } else {
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
- }
-
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
-
- if (dev->phy.rev < 2) {
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
- }
-
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
- if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
- dev->dev->board_type == 0x8B) {
- delays1[0] = 0x1;
- delays1[5] = 0x14;
- }
- b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
- b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
-
- b43_nphy_gain_ctrl_workarounds(dev);
-
- if (dev->phy.rev < 2) {
- if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
- b43_hf_write(dev, b43_hf_read(dev) |
- B43_HF_MLADVW);
- } else if (dev->phy.rev == 2) {
- b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
- b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
- }
-
- if (dev->phy.rev < 2)
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
-
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
-
- b43_phy_mask(dev, B43_NPHY_PIL_DW1,
- ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
- b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
-
- if (dev->phy.rev == 2)
- b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
- B43_NPHY_FINERX2_CGC_DECGC);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
-static void b43_nphy_workarounds(struct b43_wldev *dev)
-{
- struct b43_phy *phy = &dev->phy;
- struct b43_phy_n *nphy = phy->n;
-
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
- b43_nphy_classifier(dev, 1, 0);
- else
- b43_nphy_classifier(dev, 1, 1);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 1);
-
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
-
- if (dev->phy.rev >= 3)
- b43_nphy_workarounds_rev3plus(dev);
- else
- b43_nphy_workarounds_rev1_2(dev);
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, 0);
-}
+/**************************************************
+ * Samples
+ **************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
static int b43_nphy_load_samples(struct b43_wldev *dev,
@@ -1825,7 +956,7 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
}
for (i = 0; i < 100; i++) {
- if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+ if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & 1)) {
i = 0;
break;
}
@@ -1837,333 +968,9 @@ static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
}
-/*
- * Transmits a known value for LO calibration
- * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
- */
-static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
- bool iqmode, bool dac_test)
-{
- u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
- if (samp == 0)
- return -1;
- b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
- return 0;
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
-static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- int i, j;
- u32 tmp;
- u32 cur_real, cur_imag, real_part, imag_part;
-
- u16 buffer[7];
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
-
- b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
-
- for (i = 0; i < 2; i++) {
- tmp = ((buffer[i * 2] & 0x3FF) << 10) |
- (buffer[i * 2 + 1] & 0x3FF);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (((i + 26) << 10) | 320));
- for (j = 0; j < 128; j++) {
- b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
- ((tmp >> 16) & 0xFFFF));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (tmp & 0xFFFF));
- }
- }
-
- for (i = 0; i < 2; i++) {
- tmp = buffer[5 + i];
- real_part = (tmp >> 8) & 0xFF;
- imag_part = (tmp & 0xFF);
- b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
- (((i + 26) << 10) | 448));
-
- if (dev->phy.rev >= 3) {
- cur_real = real_part;
- cur_imag = imag_part;
- tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
- }
-
- for (j = 0; j < 128; j++) {
- if (dev->phy.rev < 3) {
- cur_real = (real_part * loscale[j] + 128) >> 8;
- cur_imag = (imag_part * loscale[j] + 128) >> 8;
- tmp = ((cur_real & 0xFF) << 8) |
- (cur_imag & 0xFF);
- }
- b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
- ((tmp >> 16) & 0xFFFF));
- b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
- (tmp & 0xFFFF));
- }
- }
-
- if (dev->phy.rev >= 3) {
- b43_shm_write16(dev, B43_SHM_SHARED,
- B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
- b43_shm_write16(dev, B43_SHM_SHARED,
- B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
-static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
- u8 *events, u8 *delays, u8 length)
-{
- struct b43_phy_n *nphy = dev->phy.n;
- u8 i;
- u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
- u16 offset1 = cmd << 4;
- u16 offset2 = offset1 + 0x80;
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, true);
-
- b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
- b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
-
- for (i = length; i < 16; i++) {
- b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
- b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
- }
-
- if (nphy->hang_avoid)
- b43_nphy_stay_in_carrier_search(dev, false);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
-static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
- enum b43_nphy_rf_sequence seq)
-{
- static const u16 trigger[] = {
- [B43_RFSEQ_RX2TX] = B43_NPHY_RFSEQTR_RX2TX,
- [B43_RFSEQ_TX2RX] = B43_NPHY_RFSEQTR_TX2RX,
- [B43_RFSEQ_RESET2RX] = B43_NPHY_RFSEQTR_RST2RX,
- [B43_RFSEQ_UPDATE_GAINH] = B43_NPHY_RFSEQTR_UPGH,
- [B43_RFSEQ_UPDATE_GAINL] = B43_NPHY_RFSEQTR_UPGL,
- [B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
- };
- int i;
- u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
-
- B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
-
- b43_phy_set(dev, B43_NPHY_RFSEQMODE,
- B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER);
- b43_phy_set(dev, B43_NPHY_RFSEQTR, trigger[seq]);
- for (i = 0; i < 200; i++) {
- if (!(b43_phy_read(dev, B43_NPHY_RFSEQST) & trigger[seq]))
- goto ok;
- msleep(1);
- }
- b43err(dev->wl, "RF sequence status timeout\n");
-ok:
- b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
-static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
- u16 value, u8 core, bool off)
-{
- int i;
- u8 index = fls(field);
- u8 addr, en_addr, val_addr;
- /* we expect only one bit set */
- B43_WARN_ON(field & (~(1 << (index - 1))));
-
- if (dev->phy.rev >= 3) {
- const struct nphy_rf_control_override_rev3 *rf_ctrl;
- for (i = 0; i < 2; i++) {
- if (index == 0 || index == 16) {
- b43err(dev->wl,
- "Unsupported RF Ctrl Override call\n");
- return;
- }
-
- rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
- en_addr = B43_PHY_N((i == 0) ?
- rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
- val_addr = B43_PHY_N((i == 0) ?
- rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
-
- if (off) {
- b43_phy_mask(dev, en_addr, ~(field));
- b43_phy_mask(dev, val_addr,
- ~(rf_ctrl->val_mask));
- } else {
- if (core == 0 || ((1 << core) & i) != 0) {
- b43_phy_set(dev, en_addr, field);
- b43_phy_maskset(dev, val_addr,
- ~(rf_ctrl->val_mask),
- (value << rf_ctrl->val_shift));
- }
- }
- }
- } else {
- const struct nphy_rf_control_override_rev2 *rf_ctrl;
- if (off) {
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
- value = 0;
- } else {
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
- }
-
- for (i = 0; i < 2; i++) {
- if (index <= 1 || index == 16) {
- b43err(dev->wl,
- "Unsupported RF Ctrl Override call\n");
- return;
- }
-
- if (index == 2 || index == 10 ||
- (index >= 13 && index <= 15)) {
- core = 1;
- }
-
- rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
- addr = B43_PHY_N((i == 0) ?
- rf_ctrl->addr0 : rf_ctrl->addr1);
-
- if ((core & (1 << i)) != 0)
- b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
- (value << rf_ctrl->shift));
-
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- udelay(1);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
-static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
- u16 value, u8 core)
-{
- u8 i, j;
- u16 reg, tmp, val;
-
- B43_WARN_ON(dev->phy.rev < 3);
- B43_WARN_ON(field > 4);
-
- for (i = 0; i < 2; i++) {
- if ((core == 1 && i == 1) || (core == 2 && !i))
- continue;
-
- reg = (i == 0) ?
- B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
- b43_phy_mask(dev, reg, 0xFBFF);
-
- switch (field) {
- case 0:
- b43_phy_write(dev, reg, 0);
- b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
- break;
- case 1:
- if (!i) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
- 0xFC3F, (value << 6));
- b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
- 0xFFFE, 1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
- j = 0;
- break;
- }
- udelay(10);
- }
- if (j)
- b43err(dev->wl,
- "intc override timeout\n");
- b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
- 0xFFFE);
- } else {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
- 0xFC3F, (value << 6));
- b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
- 0xFFFE, 1);
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_RXTX);
- for (j = 0; j < 100; j++) {
- if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
- j = 0;
- break;
- }
- udelay(10);
- }
- if (j)
- b43err(dev->wl,
- "intc override timeout\n");
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
- 0xFFFE);
- }
- break;
- case 2:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0020;
- val = value << 5;
- } else {
- tmp = 0x0010;
- val = value << 4;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- case 3:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0001;
- val = value;
- } else {
- tmp = 0x0004;
- val = value << 2;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- case 4:
- if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
- tmp = 0x0002;
- val = value << 1;
- } else {
- tmp = 0x0008;
- val = value << 3;
- }
- b43_phy_maskset(dev, reg, ~tmp, val);
- break;
- }
- }
-}
-
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
-static void b43_nphy_bphy_init(struct b43_wldev *dev)
-{
- unsigned int i;
- u16 val;
-
- val = 0x1E1F;
- for (i = 0; i < 16; i++) {
- b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
- val -= 0x202;
- }
- val = 0x3E3F;
- for (i = 0; i < 16; i++) {
- b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
- val -= 0x202;
- }
- b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
-}
+/**************************************************
+ * RSSI
+ **************************************************/
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
@@ -2233,67 +1040,6 @@ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
}
-static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
-{
- u16 val;
-
- if (type < 3)
- val = 0;
- else if (type == 6)
- val = 1;
- else if (type == 3)
- val = 2;
- else
- val = 3;
-
- val = (val << 12) | (val << 14);
- b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
- b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
-
- if (type < 3) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
- (type + 1) << 4);
- b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
- (type + 1) << 4);
- }
-
- if (code == 0) {
- b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
- if (type < 3) {
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~(B43_NPHY_RFCTL_CMD_RXEN |
- B43_NPHY_RFCTL_CMD_CORESEL));
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
- ~(0x1 << 12 |
- 0x1 << 5 |
- 0x1 << 1 |
- 0x1));
- b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
- ~B43_NPHY_RFCTL_CMD_START);
- udelay(20);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- }
- } else {
- b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
- if (type < 3) {
- b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
- ~(B43_NPHY_RFCTL_CMD_RXEN |
- B43_NPHY_RFCTL_CMD_CORESEL),
- (B43_NPHY_RFCTL_CMD_RXEN |
- code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
- b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
- (0x1 << 12 |
- 0x1 << 5 |
- 0x1 << 1 |
- 0x1));
- b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
- B43_NPHY_RFCTL_CMD_START);
- udelay(20);
- b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
- }
- }
-}
-
static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
{
u8 i;
@@ -2377,6 +1123,67 @@ static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
}
}
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ u16 val;
+
+ if (type < 3)
+ val = 0;
+ else if (type == 6)
+ val = 1;
+ else if (type == 3)
+ val = 2;
+ else
+ val = 3;
+
+ val = (val << 12) | (val << 14);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+ (type + 1) << 4);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+ (type + 1) << 4);
+ }
+
+ if (code == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
+ if (type < 3) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ ~(0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
+ ~B43_NPHY_RFCTL_CMD_START);
+ udelay(20);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+ }
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+ ~(B43_NPHY_RFCTL_CMD_RXEN |
+ B43_NPHY_RFCTL_CMD_CORESEL),
+ (B43_NPHY_RFCTL_CMD_RXEN |
+ code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
+ (0x1 << 12 |
+ 0x1 << 5 |
+ 0x1 << 1 |
+ 0x1));
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(20);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
+ }
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
{
@@ -2686,6 +1493,1413 @@ static void b43_nphy_rssi_cal(struct b43_wldev *dev)
}
}
+/**************************************************
+ * Workarounds
+ **************************************************/
+
+static void b43_nphy_gain_ctl_workarounds_rev3plus(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ bool ghz5;
+ bool ext_lna;
+ u16 rssi_gain;
+ struct nphy_gain_ctl_workaround_entry *e;
+ u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+ u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+
+ /* Prepare values */
+ ghz5 = b43_phy_read(dev, B43_NPHY_BANDCTL)
+ & B43_NPHY_BANDCTL_5GHZ;
+ ext_lna = ghz5 ? sprom->boardflags_hi & B43_BFH_EXTLNA_5GHZ :
+ sprom->boardflags_lo & B43_BFL_EXTLNA;
+ e = b43_nphy_get_gain_ctl_workaround_ent(dev, ghz5, ext_lna);
+ if (ghz5 && dev->phy.rev >= 5)
+ rssi_gain = 0x90;
+ else
+ rssi_gain = 0x50;
+
+ b43_phy_set(dev, B43_NPHY_RXCTL, 0x0040);
+
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAG1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG2_IDAC, 0xF0);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_POLE, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_RSSI_GAIN,
+ rssi_gain);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_BIASPOLE_LNAA1_IDAC,
+ 0x17);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA2_IDAC, 0xFF);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA2_IDAC, 0xFF);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 8), 4, e->lna1_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 16), 4, e->lna2_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 32), 10, e->gain_db);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 32), 10, e->gain_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(0, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(1, 0x40), 6, lpf_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB8(2, 0x40), 6, lpf_bits);
+ b43_ntab_write_bulk(dev, B43_NTAB8(3, 0x40), 6, lpf_bits);
+
+ b43_phy_write(dev, B43_NPHY_C1_INITGAIN, e->init_gain);
+ b43_phy_write(dev, 0x2A7, e->init_gain);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x106), 2,
+ e->rfseq_init);
+
+ /* TODO: check defines. Do not match variables names */
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1_MEDGAIN, e->cliphi_gain);
+ b43_phy_write(dev, 0x2A9, e->cliphi_gain);
+ b43_phy_write(dev, B43_NPHY_C1_CLIP2_GAIN, e->clipmd_gain);
+ b43_phy_write(dev, 0x2AB, e->clipmd_gain);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_HIGAIN, e->cliplo_gain);
+ b43_phy_write(dev, 0x2AD, e->cliplo_gain);
+
+ b43_phy_maskset(dev, 0x27D, 0xFF00, e->crsmin);
+ b43_phy_maskset(dev, 0x280, 0xFF00, e->crsminl);
+ b43_phy_maskset(dev, 0x283, 0xFF00, e->crsminu);
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, e->nbclip);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, e->nbclip);
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, e->wlclip);
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+}
+
+static void b43_nphy_gain_ctl_workarounds_rev1_2(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 i, j;
+ u8 code;
+ u16 tmp;
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
+
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI, B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI, B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ /* Set narrowband clip threshold */
+ b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+ if (!dev->phy.is_40mhz) {
+ /* Set dwell lengths */
+ b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ }
+
+ /* Set wideband clip 2 threshold */
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2, 21);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2, 21);
+
+ if (!dev->phy.is_40mhz) {
+ b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+ ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+ ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+ ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+ ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+ }
+
+ b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ dev->phy.is_40mhz)
+ code = 4;
+ else
+ code = 5;
+ } else {
+ code = dev->phy.is_40mhz ? 6 : 7;
+ }
+
+ /* Set HPVGA2 index */
+ b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN, ~B43_NPHY_C1_INITGAIN_HPVGA2,
+ code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+ b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN, ~B43_NPHY_C2_INITGAIN_HPVGA2,
+ code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, (code << 8 | 0x7C));
+
+ b43_nphy_adjust_lna_gain_table(dev);
+
+ if (nphy->elna_gain_config) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ /* specs say about 2 loops, but wl does 4 */
+ for (i = 0; i < 4; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ }
+
+ if (dev->phy.rev == 2) {
+ for (i = 0; i < 4; i++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (0x0400 * i) + 0x0020);
+ for (j = 0; j < 21; j++) {
+ tmp = j * (i < 2 ? 3 : 1);
+ b43_phy_write(dev,
+ B43_NPHY_TABLE_DATALO, tmp);
+ }
+ }
+ }
+
+ b43_nphy_set_rf_sequence(dev, 5, rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ ~B43_NPHY_OVER_DGAIN_CCKDGECV & 0xFFFF,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D), 0xFF80, 4);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_ctl_workarounds(struct b43_wldev *dev)
+{
+ if (dev->phy.rev >= 3)
+ b43_nphy_gain_ctl_workarounds_rev3plus(dev);
+ else
+ b43_nphy_gain_ctl_workarounds_rev1_2(dev);
+}
+
+static void b43_nphy_workarounds_rev3plus(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ /* TX to RX */
+ u8 tx2rx_events[8] = { 0x4, 0x3, 0x6, 0x5, 0x2, 0x1, 0x8, 0x1F };
+ u8 tx2rx_delays[8] = { 8, 4, 2, 2, 4, 4, 6, 1 };
+ /* RX to TX */
+ u8 rx2tx_events_ipa[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0xF, 0x3,
+ 0x1F };
+ u8 rx2tx_delays_ipa[9] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 };
+ u8 rx2tx_events[9] = { 0x0, 0x1, 0x2, 0x8, 0x5, 0x6, 0x3, 0x4, 0x1F };
+ u8 rx2tx_delays[9] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 };
+
+ u16 tmp16;
+ u32 tmp32;
+
+ b43_phy_write(dev, 0x23f, 0x1f8);
+ b43_phy_write(dev, 0x240, 0x1f8);
+
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(30, 0));
+ tmp32 &= 0xffffff;
+ b43_ntab_write(dev, B43_NTAB32(30, 0), tmp32);
+
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x0125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x01B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x0105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x016E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0x00CD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x0020);
+
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1_MEDGAIN, 0x000C);
+ b43_phy_write(dev, 0x2AE, 0x000C);
+
+ /* TX to RX */
+ b43_nphy_set_rf_sequence(dev, 1, tx2rx_events, tx2rx_delays,
+ ARRAY_SIZE(tx2rx_events));
+
+ /* RX to TX */
+ if (b43_nphy_ipa(dev))
+ b43_nphy_set_rf_sequence(dev, 0, rx2tx_events_ipa,
+ rx2tx_delays_ipa, ARRAY_SIZE(rx2tx_events_ipa));
+ if (nphy->hw_phyrxchain != 3 &&
+ nphy->hw_phyrxchain != nphy->hw_phytxchain) {
+ if (b43_nphy_ipa(dev)) {
+ rx2tx_delays[5] = 59;
+ rx2tx_delays[6] = 1;
+ rx2tx_events[7] = 0x1F;
+ }
+ b43_nphy_set_rf_sequence(dev, 1, rx2tx_events, rx2tx_delays,
+ ARRAY_SIZE(rx2tx_events));
+ }
+
+ tmp16 = (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) ?
+ 0x2 : 0x9C40;
+ b43_phy_write(dev, B43_NPHY_ENDROP_TLEN, tmp16);
+
+ b43_phy_maskset(dev, 0x294, 0xF0FF, 0x0700);
+
+ b43_ntab_write(dev, B43_NTAB32(16, 3), 0x18D);
+ b43_ntab_write(dev, B43_NTAB32(16, 127), 0x18D);
+
+ b43_nphy_gain_ctl_workarounds(dev);
+
+ b43_ntab_write(dev, B43_NTAB16(8, 0), 2);
+ b43_ntab_write(dev, B43_NTAB16(8, 16), 2);
+
+ /* TODO */
+
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_MAST_BIAS, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_MAIN, 0x06);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_BIAS_AUX, 0x07);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_LOB_BIAS, 0x88);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXA_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX0 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+ b43_radio_write(dev, B2056_RX1 | B2056_RX_MIXG_CMFB_IDAC, 0x00);
+
+ /* N PHY WAR TX Chain Update with hw_phytxchain as argument */
+
+ if ((sprom->boardflags2_lo & B43_BFL2_APLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
+ (sprom->boardflags2_lo & B43_BFL2_GPLL_WAR &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ))
+ tmp32 = 0x00088888;
+ else
+ tmp32 = 0x88888888;
+ b43_ntab_write(dev, B43_NTAB32(30, 1), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 2), tmp32);
+ b43_ntab_write(dev, B43_NTAB32(30, 3), tmp32);
+
+ if (dev->phy.rev == 4 &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write(dev, B2056_TX0 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ b43_radio_write(dev, B2056_TX1 | B2056_TX_GMBB_IDAC,
+ 0x70);
+ }
+
+ b43_phy_write(dev, 0x224, 0x03eb);
+ b43_phy_write(dev, 0x225, 0x03eb);
+ b43_phy_write(dev, 0x226, 0x0341);
+ b43_phy_write(dev, 0x227, 0x0341);
+ b43_phy_write(dev, 0x228, 0x042b);
+ b43_phy_write(dev, 0x229, 0x042b);
+ b43_phy_write(dev, 0x22a, 0x0381);
+ b43_phy_write(dev, 0x22b, 0x0381);
+ b43_phy_write(dev, 0x22c, 0x042b);
+ b43_phy_write(dev, 0x22d, 0x042b);
+ b43_phy_write(dev, 0x22e, 0x0381);
+ b43_phy_write(dev, 0x22f, 0x0381);
+}
+
+static void b43_nphy_workarounds_rev1_2(struct b43_wldev *dev)
+{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
+
+ b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
+
+ if (dev->phy.rev < 2) {
+ b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
+ b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+ if (sprom->boardflags2_lo & B43_BFL2_SKWRKFEM_BRD &&
+ dev->dev->board_type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+ b43_nphy_gain_ctl_workarounds(dev);
+
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ b43_hf_write(dev, b43_hf_read(dev) |
+ B43_HF_MLADVW);
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
+
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ ~B43_NPHY_PIL_DW_64QAM & 0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+ if (dev->phy.rev >= 3)
+ b43_nphy_workarounds_rev3plus(dev);
+ else
+ b43_nphy_workarounds_rev1_2(dev);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/**************************************************
+ * Tx/Rx common
+ **************************************************/
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+ bool iqmode, bool dac_test)
+{
+ u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+ if (samp == 0)
+ return -1;
+ b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ bool override = false;
+ u16 chain = 0x33;
+
+ if (nphy->txrx_chain == 0) {
+ chain = 0x11;
+ override = true;
+ } else if (nphy->txrx_chain == 1) {
+ chain = 0x22;
+ override = true;
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+ chain);
+
+ if (override)
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER);
+ else
+ b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+ ~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+
+ b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+ if (nphy->bb_mult_save & 0x80000000) {
+ tmp = nphy->bb_mult_save & 0xFFFF;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+ nphy->bb_mult_save = 0;
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+ struct nphy_txgains target,
+ struct nphy_iqcal_params *params)
+{
+ int i, j, indx;
+ u16 gain;
+
+ if (dev->phy.rev >= 3) {
+ params->txgm = target.txgm[core];
+ params->pga = target.pga[core];
+ params->pad = target.pad[core];
+ params->ipa = target.ipa[core];
+ params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa);
+ for (j = 0; j < 5; j++)
+ params->ncorr[j] = 0x79;
+ } else {
+ gain = (target.pad[core]) | (target.pga[core] << 4) |
+ (target.txgm[core] << 8);
+
+ indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ 1 : 0;
+ for (i = 0; i < 9; i++)
+ if (tbl_iqcal_gainparams[indx][i][0] == gain)
+ break;
+ i = min(i, 8);
+
+ params->txgm = tbl_iqcal_gainparams[indx][i][1];
+ params->pga = tbl_iqcal_gainparams[indx][i][2];
+ params->pad = tbl_iqcal_gainparams[indx][i][3];
+ params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+ (params->pad << 2);
+ for (j = 0; j < 4; j++)
+ params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+ }
+}
+
+/**************************************************
+ * Tx and Rx
+ **************************************************/
+
+void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
+{//TODO
+}
+
+static void b43_nphy_op_adjust_txpower(struct b43_wldev *dev)
+{//TODO
+}
+
+static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
+ bool ignore_tssi)
+{//TODO
+ return B43_TXPWR_RES_DONE;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
+static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 bmask, val, tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ nphy->txpwrctrl = enable;
+ if (!enable) {
+ if (dev->phy.rev >= 3 &&
+ (b43_phy_read(dev, B43_NPHY_TXPCTL_CMD) &
+ (B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN |
+ B43_NPHY_TXPCTL_CMD_PCTLEN))) {
+ /* We disable enabled TX pwr ctl, save it's state */
+ nphy->tx_pwr_idx[0] = b43_phy_read(dev,
+ B43_NPHY_C1_TXPCTL_STAT) & 0x7f;
+ nphy->tx_pwr_idx[1] = b43_phy_read(dev,
+ B43_NPHY_C2_TXPCTL_STAT) & 0x7f;
+ }
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
+ for (i = 0; i < 84; i++)
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
+
+ tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3)
+ tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
+
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) | B43_HF_TSSIRPSMW);
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB16(26, 64), 84,
+ nphy->adj_pwr_tbl);
+ b43_ntab_write_bulk(dev, B43_NTAB16(27, 64), 84,
+ nphy->adj_pwr_tbl);
+
+ bmask = B43_NPHY_TXPCTL_CMD_COEFF |
+ B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ /* wl does useless check for "enable" param here */
+ val = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
+ if (dev->phy.rev >= 3) {
+ bmask |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ if (val)
+ val |= B43_NPHY_TXPCTL_CMD_PCTLEN;
+ }
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD, ~(bmask), val);
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT, 0x64);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_INIT,
+ ~B43_NPHY_TXPCTL_INIT_PIDXI1,
+ 0x64);
+ }
+
+ if (dev->phy.rev >= 3) {
+ if (nphy->tx_pwr_idx[0] != 128 &&
+ nphy->tx_pwr_idx[1] != 128) {
+ /* Recover TX pwr ctl state */
+ b43_phy_maskset(dev, B43_NPHY_TXPCTL_CMD,
+ ~B43_NPHY_TXPCTL_CMD_INIT,
+ nphy->tx_pwr_idx[0]);
+ if (dev->phy.rev > 1)
+ b43_phy_maskset(dev,
+ B43_NPHY_TXPCTL_INIT,
+ ~0xff, nphy->tx_pwr_idx[1]);
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, ~0x100);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x100);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x4000);
+ }
+
+ if (dev->phy.rev == 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x3b);
+ else if (dev->phy.rev < 2)
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3, ~0xFF, 0x40);
+
+ if (dev->phy.rev < 2 && dev->phy.is_40mhz)
+ b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_TSSIRPSMW);
+
+ if (b43_nphy_ipa(dev)) {
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x4);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x4);
+ }
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
+static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
+ u8 txpi[2], bbmult, i;
+ u16 tmp, radio_gain, dac_gain;
+ u16 freq = dev->phy.channel_freq;
+ u32 txgain;
+ /* u32 gaintbl; rev3+ */
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev >= 7) {
+ txpi[0] = txpi[1] = 30;
+ } else if (dev->phy.rev >= 3) {
+ txpi[0] = 40;
+ txpi[1] = 40;
+ } else if (sprom->revision < 4) {
+ txpi[0] = 72;
+ txpi[1] = 72;
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txpi[0] = sprom->txpid2g[0];
+ txpi[1] = sprom->txpid2g[1];
+ } else if (freq >= 4900 && freq < 5100) {
+ txpi[0] = sprom->txpid5gl[0];
+ txpi[1] = sprom->txpid5gl[1];
+ } else if (freq >= 5100 && freq < 5500) {
+ txpi[0] = sprom->txpid5g[0];
+ txpi[1] = sprom->txpid5g[1];
+ } else if (freq >= 5500) {
+ txpi[0] = sprom->txpid5gh[0];
+ txpi[1] = sprom->txpid5gh[1];
+ } else {
+ txpi[0] = 91;
+ txpi[1] = 91;
+ }
+ }
+ if (dev->phy.rev < 7 &&
+ (txpi[0] < 40 || txpi[0] > 100 || txpi[1] < 40 || txpi[1] > 100))
+ txpi[0] = txpi[1] = 91;
+
+ /*
+ for (i = 0; i < 2; i++) {
+ nphy->txpwrindex[i].index_internal = txpi[i];
+ nphy->txpwrindex[i].index_internal_save = txpi[i];
+ }
+ */
+
+ for (i = 0; i < 2; i++) {
+ if (dev->phy.rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ txgain = *(b43_nphy_get_ipa_gain_table(dev) +
+ txpi[i]);
+ } else if (b43_current_band(dev->wl) ==
+ IEEE80211_BAND_5GHZ) {
+ /* FIXME: use 5GHz tables */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ } else {
+ if (dev->phy.rev >= 5 &&
+ sprom->fem.ghz5.extpa_gain == 3)
+ ; /* FIXME: 5GHz_txgain_HiPwrEPA */
+ txgain =
+ b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
+ }
+ radio_gain = (txgain >> 16) & 0x1FFFF;
+ } else {
+ txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
+ radio_gain = (txgain >> 16) & 0x1FFF;
+ }
+
+ if (dev->phy.rev >= 7)
+ dac_gain = (txgain >> 8) & 0x7;
+ else
+ dac_gain = (txgain >> 8) & 0x3F;
+ bbmult = txgain & 0xFF;
+
+ if (dev->phy.rev >= 3) {
+ if (i == 0)
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
+ else
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
+ } else {
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
+ }
+
+ if (i == 0)
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
+ else
+ b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
+
+ b43_ntab_write(dev, B43_NTAB16(0x7, 0x110 + i), radio_gain);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(0xF, 0x57));
+ if (i == 0)
+ tmp = (tmp & 0x00FF) | (bbmult << 8);
+ else
+ tmp = (tmp & 0xFF00) | bbmult;
+ b43_ntab_write(dev, B43_NTAB16(0xF, 0x57), tmp);
+
+ if (b43_nphy_ipa(dev)) {
+ u32 tmp32;
+ u16 reg = (i == 0) ?
+ B43_NPHY_PAPD_EN0 : B43_NPHY_PAPD_EN1;
+ tmp32 = b43_ntab_read(dev, B43_NTAB32(26 + i,
+ 576 + txpi[i]));
+ b43_phy_maskset(dev, reg, 0xE00F, (u32) tmp32 << 4);
+ b43_phy_set(dev, reg, 0x4);
+ }
+ }
+
+ b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+static void b43_nphy_ipa_internal_tssi_setup(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ u8 core;
+ u16 r; /* routing */
+
+ if (phy->rev >= 7) {
+ for (core = 0; core < 2; core++) {
+ r = core ? 0x190 : 0x170;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, r + 0x5, 0x5);
+ b43_radio_write(dev, r + 0x9, 0xE);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r + 0xA, 0);
+ if (phy->rev != 7)
+ b43_radio_write(dev, r + 0xB, 1);
+ else
+ b43_radio_write(dev, r + 0xB, 0x31);
+ } else {
+ b43_radio_write(dev, r + 0x5, 0x9);
+ b43_radio_write(dev, r + 0x9, 0xC);
+ b43_radio_write(dev, r + 0xB, 0x0);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r + 0xA, 1);
+ else
+ b43_radio_write(dev, r + 0xA, 0x31);
+ }
+ b43_radio_write(dev, r + 0x6, 0);
+ b43_radio_write(dev, r + 0x7, 0);
+ b43_radio_write(dev, r + 0x8, 3);
+ b43_radio_write(dev, r + 0xC, 0);
+ }
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x128);
+ else
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR31, 0x80);
+ b43_radio_write(dev, B2056_SYN_RESERVED_ADDR30, 0);
+ b43_radio_write(dev, B2056_SYN_GPIO_MASTER1, 0x29);
+
+ for (core = 0; core < 2; core++) {
+ r = core ? B2056_TX1 : B2056_TX0;
+
+ b43_radio_write(dev, r | B2056_TX_IQCAL_VCM_HG, 0);
+ b43_radio_write(dev, r | B2056_TX_IQCAL_IDAC, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_VCM, 3);
+ b43_radio_write(dev, r | B2056_TX_TX_AMP_DET, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC1, 8);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC2, 0);
+ b43_radio_write(dev, r | B2056_TX_TSSI_MISC3, 0);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+ 0x5);
+ if (phy->rev != 5)
+ b43_radio_write(dev, r | B2056_TX_TSSIA,
+ 0x00);
+ if (phy->rev >= 5)
+ b43_radio_write(dev, r | B2056_TX_TSSIG,
+ 0x31);
+ else
+ b43_radio_write(dev, r | B2056_TX_TSSIG,
+ 0x11);
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+ 0xE);
+ } else {
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MASTER,
+ 0x9);
+ b43_radio_write(dev, r | B2056_TX_TSSIA, 0x31);
+ b43_radio_write(dev, r | B2056_TX_TSSIG, 0x0);
+ b43_radio_write(dev, r | B2056_TX_TX_SSI_MUX,
+ 0xC);
+ }
+ }
+ }
+}
+
+/*
+ * Stop radio and transmit known signal. Then check received signal strength to
+ * get TSSI (Transmit Signal Strength Indicator).
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlIdleTssi
+ */
+static void b43_nphy_tx_power_ctl_idle_tssi(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u32 tmp;
+ s32 rssi[4] = { };
+
+ /* TODO: check if we can transmit */
+
+ if (b43_nphy_ipa(dev))
+ b43_nphy_ipa_internal_tssi_setup(dev);
+
+ if (phy->rev >= 7)
+ ; /* TODO: Override Rev7 with 0x2000, 0, 3, 0, 0 as arguments */
+ else if (phy->rev >= 3)
+ b43_nphy_rf_control_override(dev, 0x2000, 0, 3, false);
+
+ b43_nphy_stop_playback(dev);
+ b43_nphy_tx_tone(dev, 0xFA0, 0, false, false);
+ udelay(20);
+ tmp = b43_nphy_poll_rssi(dev, 4, rssi, 1);
+ b43_nphy_stop_playback(dev);
+ b43_nphy_rssi_select(dev, 0, 0);
+
+ if (phy->rev >= 7)
+ ; /* TODO: Override Rev7 with 0x2000, 0, 3, 1, 0 as arguments */
+ else if (phy->rev >= 3)
+ b43_nphy_rf_control_override(dev, 0x2000, 0, 3, true);
+
+ if (phy->rev >= 3) {
+ nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 24) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_5g = (tmp >> 8) & 0xFF;
+ } else {
+ nphy->pwr_ctl_info[0].idle_tssi_5g = (tmp >> 16) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_5g = tmp & 0xFF;
+ }
+ nphy->pwr_ctl_info[0].idle_tssi_2g = (tmp >> 24) & 0xFF;
+ nphy->pwr_ctl_info[1].idle_tssi_2g = (tmp >> 8) & 0xFF;
+}
+
+static void b43_nphy_tx_gain_table_upload(struct b43_wldev *dev)
+{
+ struct b43_phy *phy = &dev->phy;
+
+ const u32 *table = NULL;
+#if 0
+ TODO: b43_ntab_papd_pga_gain_delta_ipa_2*
+ u32 rfpwr_offset;
+ u8 pga_gain;
+ int i;
+#endif
+
+ if (phy->rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ if (phy->rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ if (phy->rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128, table);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128, table);
+
+ if (phy->rev >= 3) {
+#if 0
+ nphy->gmval = (table[0] >> 16) & 0x7000;
+
+ for (i = 0; i < 128; i++) {
+ pga_gain = (table[i] >> 24) & 0xF;
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_2g[pga_gain];
+ else
+ rfpwr_offset = b43_ntab_papd_pga_gain_delta_ipa_5g[pga_gain];
+ b43_ntab_write(dev, B43_NTAB32(26, 576 + i),
+ rfpwr_offset);
+ b43_ntab_write(dev, B43_NTAB32(27, 576 + i),
+ rfpwr_offset);
+ }
+#endif
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ enum ieee80211_band band;
+ u16 tmp;
+
+ if (!enable) {
+ nphy->rfctrl_intc1_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC1);
+ nphy->rfctrl_intc2_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC2);
+ band = b43_current_band(dev->wl);
+ if (dev->phy.rev >= 3) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x600;
+ else
+ tmp = 0x480;
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x180;
+ else
+ tmp = 0x120;
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+ nphy->rfctrl_intc1_save);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+ nphy->rfctrl_intc2_save);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+ u16 tmp;
+
+ if (dev->phy.rev >= 3) {
+ if (b43_nphy_ipa(dev)) {
+ tmp = 4;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+
+ tmp = 1;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+ u16 samps, u8 time, bool wait)
+{
+ int i;
+ u16 tmp;
+
+ b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+ b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+ if (wait)
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+ else
+ b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+ for (i = 1000; i; i--) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+ if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+ est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+ est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+ est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+ est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+ est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+ est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+ return;
+ }
+ udelay(10);
+ }
+ memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+ struct b43_phy_n_iq_comp *pcomp)
+{
+ if (write) {
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+ } else {
+ pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+ pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+ pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+ pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+ }
+}
+
+#if 0
+/* Ready but not used anywhere */
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+ if (core == 0) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+ u8 rxval, txval;
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ if (core == 0) {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ } else {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ }
+ regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+ regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~B43_NPHY_RFSEQCA_RXDIS & 0xFFFF,
+ ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+ (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+ (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+ if (core == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+ }
+
+ b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+ b43_nphy_rf_control_override(dev, 8, 0, 3, false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+ if (core == 0) {
+ rxval = 1;
+ txval = 8;
+ } else {
+ rxval = 4;
+ txval = 2;
+ }
+ b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+ b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
+}
+#endif
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+ int i;
+ s32 iq;
+ u32 ii;
+ u32 qq;
+ int iq_nbits, qq_nbits;
+ int arsh, brsh;
+ u16 tmp, a, b;
+
+ struct nphy_iq_est est;
+ struct b43_phy_n_iq_comp old;
+ struct b43_phy_n_iq_comp new = { };
+ bool error = false;
+
+ if (mask == 0)
+ return;
+
+ b43_nphy_rx_iq_coeffs(dev, false, &old);
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+ b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+ new = old;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0 && (mask & 1)) {
+ iq = est.iq0_prod;
+ ii = est.i0_pwr;
+ qq = est.q0_pwr;
+ } else if (i == 1 && (mask & 2)) {
+ iq = est.iq1_prod;
+ ii = est.i1_pwr;
+ qq = est.q1_pwr;
+ } else {
+ continue;
+ }
+
+ if (ii + qq < 2) {
+ error = true;
+ break;
+ }
+
+ iq_nbits = fls(abs(iq));
+ qq_nbits = fls(qq);
+
+ arsh = iq_nbits - 20;
+ if (arsh >= 0) {
+ a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ tmp = ii >> arsh;
+ } else {
+ a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ tmp = ii << -arsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ a /= tmp;
+
+ brsh = qq_nbits - 11;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii >> brsh;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii << -brsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+ if (i == 0 && (mask & 0x1)) {
+ if (dev->phy.rev >= 3) {
+ new.a0 = a & 0x3FF;
+ new.b0 = b & 0x3FF;
+ } else {
+ new.a0 = b & 0x3FF;
+ new.b0 = a & 0x3FF;
+ }
+ } else if (i == 1 && (mask & 0x2)) {
+ if (dev->phy.rev >= 3) {
+ new.a1 = a & 0x3FF;
+ new.b1 = b & 0x3FF;
+ } else {
+ new.a1 = b & 0x3FF;
+ new.b1 = a & 0x3FF;
+ }
+ }
+ }
+
+ if (error)
+ new = old;
+
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+ u16 array[4];
+ b43_ntab_read_bulk(dev, B43_NTAB16(0xF, 0x50), 4, array);
+
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u8 channel = dev->phy.channel;
+ int tone[2] = { 57, 58 };
+ u32 noise[2] = { 0x3FF, 0x3FF };
+
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (nphy->gband_spurwar_en) {
+ /* TODO: N PHY Adjust Analog Pfbw (7) */
+ if (channel == 11 && dev->phy.is_40mhz)
+ ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ /* TODO: N PHY Adjust CRS Min Power (0x1E) */
+ }
+
+ if (nphy->aband_spurwar_en) {
+ if (channel == 54) {
+ tone[0] = 0x20;
+ noise[0] = 0x25F;
+ } else if (channel == 38 || channel == 102 || channel == 118) {
+ if (0 /* FIXME */) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+ } else if (channel == 134) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else if (channel == 151) {
+ tone[0] = 0x10;
+ noise[0] = 0x23F;
+ } else if (channel == 153 || channel == 161) {
+ tone[0] = 0x30;
+ noise[0] = 0x23F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+
+ if (!tone[0] && !noise[0])
+ ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j;
+ u32 tmp;
+ u32 cur_real, cur_imag, real_part, imag_part;
+
+ u16 buffer[7];
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+ for (i = 0; i < 2; i++) {
+ tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+ (buffer[i * 2 + 1] & 0x3FF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 320));
+ for (j = 0; j < 128; j++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ tmp = buffer[5 + i];
+ real_part = (tmp >> 8) & 0xFF;
+ imag_part = (tmp & 0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 448));
+
+ if (dev->phy.rev >= 3) {
+ cur_real = real_part;
+ cur_imag = imag_part;
+ tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+ }
+
+ for (j = 0; j < 128; j++) {
+ if (dev->phy.rev < 3) {
+ cur_real = (real_part * loscale[j] + 128) >> 8;
+ cur_imag = (imag_part * loscale[j] + 128) >> 8;
+ tmp = ((cur_real & 0xFF) << 8) |
+ (cur_imag & 0xFF);
+ }
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
/*
* Restore RSSI Calibration
* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
@@ -2729,24 +2943,6 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
-static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
-{
- if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
- if (dev->phy.rev >= 6) {
- if (dev->dev->chip_id == 47162)
- return txpwrctrl_tx_gain_ipa_rev5;
- return txpwrctrl_tx_gain_ipa_rev6;
- } else if (dev->phy.rev >= 5) {
- return txpwrctrl_tx_gain_ipa_rev5;
- } else {
- return txpwrctrl_tx_gain_ipa;
- }
- } else {
- return txpwrctrl_tx_gain_ipa_5g;
- }
-}
-
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
{
@@ -2841,44 +3037,6 @@ static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
}
}
-/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
-static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
- struct nphy_txgains target,
- struct nphy_iqcal_params *params)
-{
- int i, j, indx;
- u16 gain;
-
- if (dev->phy.rev >= 3) {
- params->txgm = target.txgm[core];
- params->pga = target.pga[core];
- params->pad = target.pad[core];
- params->ipa = target.ipa[core];
- params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
- (params->pad << 4) | (params->ipa);
- for (j = 0; j < 5; j++)
- params->ncorr[j] = 0x79;
- } else {
- gain = (target.pad[core]) | (target.pga[core] << 4) |
- (target.txgm[core] << 8);
-
- indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
- 1 : 0;
- for (i = 0; i < 9; i++)
- if (tbl_iqcal_gainparams[indx][i][0] == gain)
- break;
- i = min(i, 8);
-
- params->txgm = tbl_iqcal_gainparams[indx][i][1];
- params->pga = tbl_iqcal_gainparams[indx][i][2];
- params->pad = tbl_iqcal_gainparams[indx][i][3];
- params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
- (params->pad << 2);
- for (j = 0; j < 4; j++)
- params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
- }
-}
-
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
{
@@ -3258,7 +3416,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (dev->phy.rev >= 4) {
avoid = nphy->hang_avoid;
- nphy->hang_avoid = 0;
+ nphy->hang_avoid = false;
}
b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
@@ -3368,7 +3526,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
if (phy6or5x && updated[core] == 0) {
b43_nphy_update_tx_cal_ladder(dev, core);
- updated[core] = 1;
+ updated[core] = true;
}
tmp = (params[core].ncorr[type] << 8) | 0x66;
@@ -3730,10 +3888,104 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
b43_mac_enable(dev);
}
+/**************************************************
+ * N-PHY init
+ **************************************************/
+
/*
- * Init N-PHY
- * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
*/
+static void b43_nphy_tables_init(struct b43_wldev *dev)
+{
+ if (dev->phy.rev < 3)
+ b43_nphy_rev0_1_2_tables_init(dev);
+ else
+ b43_nphy_rev3plus_tables_init(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+ u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+ mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+ if (preamble == 1)
+ mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+ else
+ mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+ b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
+static void b43_nphy_bphy_init(struct b43_wldev *dev)
+{
+ unsigned int i;
+ u16 val;
+
+ val = 0x1E1F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
+ val -= 0x202;
+ }
+ val = 0x3E3F;
+ for (i = 0; i < 16; i++) {
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
+ val -= 0x202;
+ }
+ b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
+static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
+{
+ if (dev->phy.rev >= 3) {
+ if (!init)
+ return;
+ if (0 /* FIXME */) {
+ b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
+ b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
+ b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
+ b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
+ }
+ } else {
+ b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
+ b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ bcma_chipco_gpio_control(&dev->dev->bdev->bus->drv_cc,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ ssb_chipco_gpio_control(&dev->dev->sdev->bus->chipco,
+ 0xFC00, 0xFC00);
+ break;
+#endif
+ }
+
+ b43_write32(dev, B43_MMIO_MACCTL,
+ b43_read32(dev, B43_MMIO_MACCTL) &
+ ~B43_MACCTL_GPOUTSMSK);
+ b43_write16(dev, B43_MMIO_GPIO_MASK,
+ b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
+ b43_write16(dev, B43_MMIO_GPIO_CONTROL,
+ b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
+
+ if (init) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N */
int b43_phy_initn(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;
@@ -3857,7 +4109,7 @@ int b43_phy_initn(struct b43_wldev *dev)
tx_pwr_state = nphy->txpwrctrl;
b43_nphy_tx_power_ctrl(dev, false);
b43_nphy_tx_power_fix(dev);
- /* TODO N PHY TX Power Control Idle TSSI */
+ b43_nphy_tx_power_ctl_idle_tssi(dev);
/* TODO N PHY TX Power Control Setup */
b43_nphy_tx_gain_table_upload(dev);
@@ -3928,6 +4180,91 @@ int b43_phy_initn(struct b43_wldev *dev)
return 0;
}
+/**************************************************
+ * Channel switching ops.
+ **************************************************/
+
+static void b43_chantab_phy_upload(struct b43_wldev *dev,
+ const struct b43_phy_n_sfo_cfg *e)
+{
+ b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
+ b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
+ b43_phy_write(dev, B43_NPHY_BW3, e->phy_bw3);
+ b43_phy_write(dev, B43_NPHY_BW4, e->phy_bw4);
+ b43_phy_write(dev, B43_NPHY_BW5, e->phy_bw5);
+ b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PmuSpurAvoid */
+static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
+{
+ struct bcma_drv_cc __maybe_unused *cc;
+ u32 __maybe_unused pmu_ctl;
+
+ switch (dev->dev->bus_type) {
+#ifdef CONFIG_B43_BCMA
+ case B43_BUS_BCMA:
+ cc = &dev->dev->bdev->bus->drv_cc;
+ if (dev->dev->chip_id == 43224 || dev->dev->chip_id == 43225) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E920);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100010);
+ bcma_chipco_pll_write(cc, 0x1, 0x000c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000a08);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else if (dev->dev->chip_id == 0x4716) {
+ if (avoid) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11500060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080C0C06);
+ bcma_chipco_pll_write(cc, 0x2, 0x0F600000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x2001E924);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ } else {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100060);
+ bcma_chipco_pll_write(cc, 0x1, 0x080c0c06);
+ bcma_chipco_pll_write(cc, 0x2, 0x03000000);
+ bcma_chipco_pll_write(cc, 0x3, 0x00000000);
+ bcma_chipco_pll_write(cc, 0x4, 0x200005c0);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888815);
+ }
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD |
+ BCMA_CC_PMU_CTL_NOILPONW;
+ } else if (dev->dev->chip_id == 0x4322 ||
+ dev->dev->chip_id == 0x4340 ||
+ dev->dev->chip_id == 0x4341) {
+ bcma_chipco_pll_write(cc, 0x0, 0x11100070);
+ bcma_chipco_pll_write(cc, 0x1, 0x1014140a);
+ bcma_chipco_pll_write(cc, 0x5, 0x88888854);
+ if (avoid)
+ bcma_chipco_pll_write(cc, 0x2, 0x05201828);
+ else
+ bcma_chipco_pll_write(cc, 0x2, 0x05001828);
+ pmu_ctl = BCMA_CC_PMU_CTL_PLL_UPD;
+ } else {
+ return;
+ }
+ bcma_cc_set32(cc, BCMA_CC_PMU_CTL, pmu_ctl);
+ break;
+#endif
+#ifdef CONFIG_B43_SSB
+ case B43_BUS_SSB:
+ /* FIXME */
+ break;
+#endif
+ }
+}
+
/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
static void b43_nphy_channel_setup(struct b43_wldev *dev,
const struct b43_phy_n_sfo_cfg *e,
@@ -3935,6 +4272,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = dev->phy.n;
+ int ch = new_channel->hw_value;
u16 old_band_5ghz;
u32 tmp32;
@@ -3974,8 +4312,41 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
b43_nphy_tx_lp_fbw(dev);
- if (dev->phy.rev >= 3 && 0) {
- /* TODO */
+ if (dev->phy.rev >= 3 &&
+ dev->phy.n->spur_avoid != B43_SPUR_AVOID_DISABLE) {
+ bool avoid = false;
+ if (dev->phy.n->spur_avoid == B43_SPUR_AVOID_FORCE) {
+ avoid = true;
+ } else if (!b43_channel_type_is_40mhz(phy->channel_type)) {
+ if ((ch >= 5 && ch <= 8) || ch == 13 || ch == 14)
+ avoid = true;
+ } else { /* 40MHz */
+ if (nphy->aband_spurwar_en &&
+ (ch == 38 || ch == 102 || ch == 118))
+ avoid = dev->dev->chip_id == 0x4716;
+ }
+
+ b43_nphy_pmu_spur_avoid(dev, avoid);
+
+ if (dev->dev->chip_id == 43222 || dev->dev->chip_id == 43224 ||
+ dev->dev->chip_id == 43225) {
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_LOW,
+ avoid ? 0x5341 : 0x8889);
+ b43_write16(dev, B43_MMIO_TSF_CLK_FRAC_HIGH, 0x8);
+ }
+
+ if (dev->phy.rev == 3 || dev->phy.rev == 4)
+ ; /* TODO: reset PLL */
+
+ if (avoid)
+ b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTRX);
+ else
+ b43_phy_mask(dev, B43_NPHY_BBCFG,
+ ~B43_NPHY_BBCFG_RSTRX & 0xFFFF);
+
+ b43_nphy_reset_cca(dev);
+
+ /* wl sets useless phy_isspuravoid here */
}
b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
@@ -4039,6 +4410,10 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
return 0;
}
+/**************************************************
+ * Basic PHY ops.
+ **************************************************/
+
static int b43_nphy_op_allocate(struct b43_wldev *dev)
{
struct b43_phy_n *nphy;
@@ -4055,10 +4430,13 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_n *nphy = phy->n;
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
memset(nphy, 0, sizeof(*nphy));
nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
+ nphy->spur_avoid = (phy->rev >= 3) ?
+ B43_SPUR_AVOID_AUTO : B43_SPUR_AVOID_DISABLE;
nphy->gain_boost = true; /* this way we follow wl, assume it is true */
nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
@@ -4067,6 +4445,38 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
* 0x7f == 127 and we check for 128 when restoring TX pwr ctl. */
nphy->tx_pwr_idx[0] = 128;
nphy->tx_pwr_idx[1] = 128;
+
+ /* Hardware TX power control and 5GHz power gain */
+ nphy->txpwrctrl = false;
+ nphy->pwg_gain_5ghz = false;
+ if (dev->phy.rev >= 3 ||
+ (dev->dev->board_vendor == PCI_VENDOR_ID_APPLE &&
+ (dev->dev->core_rev == 11 || dev->dev->core_rev == 12))) {
+ nphy->txpwrctrl = true;
+ nphy->pwg_gain_5ghz = true;
+ } else if (sprom->revision >= 4) {
+ if (dev->phy.rev >= 2 &&
+ (sprom->boardflags2_lo & B43_BFL2_TXPWRCTRL_EN)) {
+ nphy->txpwrctrl = true;
+#ifdef CONFIG_B43_SSB
+ if (dev->dev->bus_type == B43_BUS_SSB &&
+ dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI) {
+ struct pci_dev *pdev =
+ dev->dev->sdev->bus->host_pci;
+ if (pdev->device == 0x4328 ||
+ pdev->device == 0x432a)
+ nphy->pwg_gain_5ghz = true;
+ }
+#endif
+ } else if (sprom->boardflags2_lo & B43_BFL2_5G_PWRGAIN) {
+ nphy->pwg_gain_5ghz = true;
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ nphy->ipa2g_on = sprom->fem.ghz2.extpa_gain == 2;
+ nphy->ipa5g_on = sprom->fem.ghz5.extpa_gain == 2;
+ }
}
static void b43_nphy_op_free(struct b43_wldev *dev)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index fbf5202..5de8f74 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -716,6 +716,12 @@
struct b43_wldev;
+enum b43_nphy_spur_avoid {
+ B43_SPUR_AVOID_DISABLE,
+ B43_SPUR_AVOID_AUTO,
+ B43_SPUR_AVOID_FORCE,
+};
+
struct b43_chanspec {
u16 center_freq;
enum nl80211_channel_type channel_type;
@@ -759,6 +765,11 @@ struct b43_phy_n_txpwrindex {
u16 locomp;
};
+struct b43_phy_n_pwr_ctl_info {
+ u8 idle_tssi_2g;
+ u8 idle_tssi_5g;
+};
+
struct b43_phy_n {
u8 antsel_type;
u8 cal_orig_pwr_idx[2];
@@ -785,12 +796,14 @@ struct b43_phy_n {
u16 mphase_txcal_bestcoeffs[11];
bool txpwrctrl;
+ bool pwg_gain_5ghz;
u8 tx_pwr_idx[2];
u16 adj_pwr_tbl[84];
u16 txcal_bbmult;
u16 txiqlocal_bestc[11];
bool txiqlocal_coeffsvalid;
struct b43_phy_n_txpwrindex txpwrindex[2];
+ struct b43_phy_n_pwr_ctl_info pwr_ctl_info[2];
struct b43_chanspec txiqlocal_chanspec;
u8 txrx_chain;
@@ -803,6 +816,7 @@ struct b43_phy_n {
u16 classifier_state;
u16 clip_state[2];
+ enum b43_nphy_spur_avoid spur_avoid;
bool aband_spurwar_en;
bool gband_spurwar_en;
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index 279a53e..3533ab8 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -539,7 +539,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
/* Not enough memory on the queue. */
err = -EBUSY;
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
goto out;
}
@@ -566,7 +566,7 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
(q->free_packet_slots == 0)) {
/* The queue is full. */
ieee80211_stop_queue(dev->wl->hw, skb_get_queue_mapping(skb));
- q->stopped = 1;
+ q->stopped = true;
}
out:
@@ -601,7 +601,7 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
if (q->stopped) {
ieee80211_wake_queue(dev->wl->hw, q->queue_prio);
- q->stopped = 0;
+ q->stopped = false;
}
}
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index a01f776..ce037fb 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -1572,14 +1572,14 @@ static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
[B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
[B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
[B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
- [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
+ [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
- [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
+ [B2056_SYN_PLL_CP2] = { .ghz5 = 0x003f, .ghz2 = 0x003f, UPLOAD, },
[B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x0006, .ghz2 = 0x0006, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
- [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
+ [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x002b, .ghz2 = 0x002b, UPLOAD, },
[B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
[B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
[B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
@@ -9055,6 +9055,21 @@ void b2056_upload_inittabs(struct b43_wldev *dev,
B2056_RX1, pts->rx, pts->rx_length);
}
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5)
+{
+ struct b2056_inittabs_pts *pts;
+ const struct b2056_inittab_entry *e;
+
+ if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
+ B43_WARN_ON(1);
+ return;
+ }
+ pts = &b2056_inittabs[dev->phy.rev];
+ e = &pts->syn[B2056_SYN_PLL_CP2];
+
+ b43_radio_write(dev, B2056_SYN_PLL_CP2, ghz5 ? e->ghz5 : e->ghz2);
+}
+
const struct b43_nphy_channeltab_entry_rev3 *
b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
{
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index a7159d8..5b86673 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -1090,6 +1090,7 @@ struct b43_nphy_channeltab_entry_rev3 {
void b2056_upload_inittabs(struct b43_wldev *dev,
bool ghz5, bool ignore_uploadflag);
+void b2056_upload_syn_pll_cp2(struct b43_wldev *dev, bool ghz5);
/* Get the NPHY Channel Switch Table entry for a channel.
* Returns NULL on failure to find an entry. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 7b326f2..f7def135 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -2171,6 +2171,48 @@ static const u16 b43_ntab_loftlt1_r3[] = {
0x0000, 0x0000,
};
+/* volatile tables, PHY revision >= 3 */
+
+/* indexed by antswctl2g */
+static const u16 b43_ntab_antswctl2g_r3[4][32] = {
+ {
+ 0x0082, 0x0082, 0x0211, 0x0222, 0x0328,
+ 0x0000, 0x0000, 0x0000, 0x0144, 0x0000,
+ 0x0000, 0x0000, 0x0188, 0x0000, 0x0000,
+ 0x0000, 0x0082, 0x0082, 0x0211, 0x0222,
+ 0x0328, 0x0000, 0x0000, 0x0000, 0x0144,
+ 0x0000, 0x0000, 0x0000, 0x0188, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x0000, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0022, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0088, 0x0088, 0x0044, 0x0088, 0x0088,
+ 0x0000, 0x0000, 0x0000, 0x0044, 0x0000,
+ 0x0000, 0x0000, 0x0088, 0x0000, 0x0000,
+ 0x0000, 0x0088, 0x0088, 0x0044, 0x0088,
+ 0x0088, 0x0000, 0x0000, 0x0000, 0x0044,
+ 0x0000, 0x0000, 0x0000, 0x0088, 0x0000,
+ 0x0000, 0x0000,
+ },
+ {
+ 0x0022, 0x0022, 0x0011, 0x0022, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x0011, 0x0000,
+ 0x0000, 0x0000, 0x0022, 0x0000, 0x0000,
+ 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022,
+ 0x0000, 0x0000, 0x0000, 0x0000, 0x0011,
+ 0x0000, 0x0000, 0x0000, 0x0022, 0x0000,
+ 0x0000, 0x03cc,
+ }
+};
+
/* TX gain tables */
const u32 b43_ntab_tx_gain_rev0_1_2[] = {
0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
@@ -2652,7 +2694,7 @@ const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
const s16 tbl_tx_filter_coef_rev4[7][15] = {
{ -377, 137, -407, 208, -1527,
956, 93, 186, 93, 230,
- -44, 230, 20, -191, 201 },
+ -44, 230, 201, -191, 201 },
{ -77, 20, -98, 49, -93,
60, 56, 111, 56, 26,
-5, 26, 34, -32, 34 },
@@ -2710,7 +2752,18 @@ const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
{ 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
};
-struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = {
+ { 10, 14, 19, 27 },
+ { -5, 6, 10, 15 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x427E,
+ { 0x413F, 0x413F, 0x413F, 0x413F },
+ 0x007E, 0x0066, 0x1074,
+ 0x18, 0x18, 0x18,
+ 0x01D0, 0x5,
+};
+struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = {
{ /* 2GHz */
{ /* PHY rev 3 */
{ 7, 11, 16, 23 },
@@ -2734,15 +2787,26 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x18, 0x18, 0x18,
0x01A1, 0x5,
},
- { /* PHY rev 5+ */
+ { /* PHY rev 5 */
{ 9, 13, 18, 26 },
{ -3, 7, 11, 16 },
{ 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
{ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
0x427E, /* invalid for external LNA! */
{ 0x413F, 0x413F, 0x413F, 0x413F }, /* invalid for external LNA! */
- 0x1076, 0x0066, 0x106A,
- 0xC, 0xC, 0xC,
+ 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+ 0x18, 0x18, 0x18,
+ 0x01D0, 0x9,
+ },
+ { /* PHY rev 6+ */
+ { 8, 13, 18, 25 },
+ { -5, 6, 10, 14 },
+ { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA },
+ { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 },
+ 0x527E, /* invalid for external LNA! */
+ { 0x513F, 0x513F, 0x513F, 0x513F }, /* invalid for external LNA! */
+ 0x1076, 0x0066, 0x0000, /* low is invalid (the last one) */
+ 0x18, 0x18, 0x18,
0x01D0, 0x5,
},
},
@@ -2769,7 +2833,7 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x24, 0x24, 0x24,
0x0107, 25,
},
- { /* PHY rev 5+ */
+ { /* PHY rev 5 */
{ 6, 10, 16, 21 },
{ -7, 0, 4, 8 },
{ 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
@@ -2780,6 +2844,17 @@ struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][3] = {
0x24, 0x24, 0x24,
0x00A9, 25,
},
+ { /* PHY rev 6+ */
+ { 6, 10, 16, 21 },
+ { -7, 0, 4, 8 },
+ { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD },
+ { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 },
+ 0x729E,
+ { 0x714F, 0x714F, 0x714F, 0x714F },
+ 0x029E, 0x2084, 0x2086,
+ 0x24, 0x24, 0x24, /* low is invalid for radio rev 11! */
+ 0x00F0, 25,
+ },
},
};
@@ -2838,9 +2913,8 @@ u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
break;
case B43_NTAB_32BIT:
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
- value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- value <<= 16;
- value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
break;
default:
B43_WARN_ON(1);
@@ -2864,6 +2938,12 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
*data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
@@ -2874,9 +2954,10 @@ void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
data += 2;
break;
case B43_NTAB_32BIT:
- *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
- *((u32 *)data) <<= 16;
- *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) =
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ *((u32 *)data) |=
+ b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16;
data += 4;
break;
default:
@@ -2932,6 +3013,13 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
for (i = 0; i < nr_elements; i++) {
+ /* Auto increment broken + caching issue on BCM43224? */
+ if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 &&
+ dev->dev->chip_rev == 1) {
+ b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i);
+ }
+
switch (type) {
case B43_NTAB_8BIT:
value = *data;
@@ -2999,6 +3087,8 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
} while (0)
void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
{
+ struct ssb_sprom *sprom = dev->dev->bus_sprom;
+
/* Static tables */
ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3);
ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3);
@@ -3029,7 +3119,11 @@ void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3);
/* Volatile tables */
- /* TODO */
+ if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3))
+ ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3,
+ b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]);
+ else
+ B43_WARN_ON(1);
}
struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
@@ -3037,26 +3131,67 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
{
struct nphy_gain_ctl_workaround_entry *e;
u8 phy_idx;
+ u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso :
+ dev->dev->bus_sprom->fem.ghz2.tr_iso;
+
+ if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11)
+ return &nphy_gain_ctl_wa_phy6_radio11_ghz2;
B43_WARN_ON(dev->phy.rev < 3);
- if (dev->phy.rev >= 5)
+ if (dev->phy.rev >= 6)
+ phy_idx = 3;
+ else if (dev->phy.rev == 5)
phy_idx = 2;
else if (dev->phy.rev == 4)
phy_idx = 1;
else
phy_idx = 0;
-
e = &nphy_gain_ctl_workaround[ghz5][phy_idx];
- /* Only one entry differs for external LNA, so instead making whole
- * table 2 times bigger, hack is here
- */
- if (!ghz5 && dev->phy.rev >= 5 && ext_lna) {
- e->rfseq_init[0] &= 0x0FFF;
- e->rfseq_init[1] &= 0x0FFF;
- e->rfseq_init[2] &= 0x0FFF;
- e->rfseq_init[3] &= 0x0FFF;
- e->init_gain &= 0x0FFF;
+ /* Some workarounds to the workarounds... */
+ if (ghz5 && dev->phy.rev >= 6) {
+ if (dev->phy.radio_rev == 11 &&
+ !b43_channel_type_is_40mhz(dev->phy.channel_type))
+ e->cliplo_gain = 0x2d;
+ } else if (!ghz5 && dev->phy.rev >= 5) {
+ if (ext_lna) {
+ e->rfseq_init[0] &= ~0x4000;
+ e->rfseq_init[1] &= ~0x4000;
+ e->rfseq_init[2] &= ~0x4000;
+ e->rfseq_init[3] &= ~0x4000;
+ e->init_gain &= ~0x4000;
+ }
+ switch (tr_iso) {
+ case 0:
+ e->cliplo_gain = 0x0062;
+ case 1:
+ e->cliplo_gain = 0x0064;
+ case 2:
+ e->cliplo_gain = 0x006a;
+ case 3:
+ e->cliplo_gain = 0x106a;
+ case 4:
+ e->cliplo_gain = 0x106c;
+ case 5:
+ e->cliplo_gain = 0x1074;
+ case 6:
+ e->cliplo_gain = 0x107c;
+ case 7:
+ e->cliplo_gain = 0x207c;
+ default:
+ e->cliplo_gain = 0x106a;
+ }
+ } else if (ghz5 && dev->phy.rev == 4 && ext_lna) {
+ e->rfseq_init[0] &= ~0x4000;
+ e->rfseq_init[1] &= ~0x4000;
+ e->rfseq_init[2] &= ~0x4000;
+ e->rfseq_init[3] &= ~0x4000;
+ e->init_gain &= ~0x4000;
+ e->rfseq_init[0] |= 0x1000;
+ e->rfseq_init[1] |= 0x1000;
+ e->rfseq_init[2] |= 0x1000;
+ e->rfseq_init[3] |= 0x1000;
+ e->init_gain |= 0x1000;
}
return e;
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index a81696b..97038c4 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -126,26 +126,29 @@ struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent(
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+/* Volatile N-PHY tables, PHY revision >= 3 */
+#define B43_NTAB_ANT_SW_CTL_R3 B43_NTAB16( 9, 0) /* antenna software control */
+
/* Static N-PHY tables, PHY revision >= 3 */
-#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 000) /* frame struct */
-#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 000) /* pilot */
-#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 000) /* TM AP */
-#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 000) /* INT LV */
-#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 000) /* TD TRN */
-#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 000) /* noise variance 0 */
+#define B43_NTAB_FRAMESTRUCT_R3 B43_NTAB32(10, 0) /* frame struct */
+#define B43_NTAB_PILOT_R3 B43_NTAB16(11, 0) /* pilot */
+#define B43_NTAB_TMAP_R3 B43_NTAB32(12, 0) /* TM AP */
+#define B43_NTAB_INTLEVEL_R3 B43_NTAB32(13, 0) /* INT LV */
+#define B43_NTAB_TDTRN_R3 B43_NTAB32(14, 0) /* TD TRN */
+#define B43_NTAB_NOISEVAR0_R3 B43_NTAB32(16, 0) /* noise variance 0 */
#define B43_NTAB_NOISEVAR1_R3 B43_NTAB32(16, 128) /* noise variance 1 */
-#define B43_NTAB_MCS_R3 B43_NTAB16(18, 000) /* MCS */
+#define B43_NTAB_MCS_R3 B43_NTAB16(18, 0) /* MCS */
#define B43_NTAB_TDI20A0_R3 B43_NTAB32(19, 128) /* TDI 20/0 */
#define B43_NTAB_TDI20A1_R3 B43_NTAB32(19, 256) /* TDI 20/1 */
#define B43_NTAB_TDI40A0_R3 B43_NTAB32(19, 640) /* TDI 40/0 */
#define B43_NTAB_TDI40A1_R3 B43_NTAB32(19, 768) /* TDI 40/1 */
-#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 000) /* PLT lookup */
-#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 000) /* channel estimate */
-#define B43_NTAB_FRAMELT_R3 B43_NTAB8 (24, 000) /* frame lookup */
-#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8 (26, 000) /* estimated power lookup 0 */
-#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8 (27, 000) /* estimated power lookup 1 */
-#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8 (26, 064) /* adjusted power lookup 0 */
-#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8 (27, 064) /* adjusted power lookup 1 */
+#define B43_NTAB_PILOTLT_R3 B43_NTAB32(20, 0) /* PLT lookup */
+#define B43_NTAB_CHANEST_R3 B43_NTAB32(22, 0) /* channel estimate */
+#define B43_NTAB_FRAMELT_R3 B43_NTAB8(24, 0) /* frame lookup */
+#define B43_NTAB_C0_ESTPLT_R3 B43_NTAB8(26, 0) /* estimated power lookup 0 */
+#define B43_NTAB_C1_ESTPLT_R3 B43_NTAB8(27, 0) /* estimated power lookup 1 */
+#define B43_NTAB_C0_ADJPLT_R3 B43_NTAB8(26, 64) /* adjusted power lookup 0 */
+#define B43_NTAB_C1_ADJPLT_R3 B43_NTAB8(27, 64) /* adjusted power lookup 1 */
#define B43_NTAB_C0_GAINCTL_R3 B43_NTAB32(26, 192) /* gain control lookup 0 */
#define B43_NTAB_C1_GAINCTL_R3 B43_NTAB32(27, 192) /* gain control lookup 1 */
#define B43_NTAB_C0_IQLT_R3 B43_NTAB32(26, 320) /* I/Q lookup 0 */
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 5f77cbe..2c53678 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -874,7 +874,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
struct ieee80211_tx_info *report,
const struct b43_txstatus *status)
{
- bool frame_success = 1;
+ bool frame_success = true;
int retry_limit;
/* preserve the confiured retry limit before clearing the status
@@ -890,7 +890,7 @@ bool b43_fill_txstatus_report(struct b43_wldev *dev,
/* The frame was not ACKed... */
if (!(report->flags & IEEE80211_TX_CTL_NO_ACK)) {
/* ...but we expected an ACK. */
- frame_success = 0;
+ frame_success = false;
}
}
if (status->frame_count == 0) {
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 1d4fc9d..98e3d44 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -560,8 +560,16 @@ struct b43legacy_key {
u8 algorithm;
};
+#define B43legacy_QOS_QUEUE_NUM 4
+
struct b43legacy_wldev;
+/* QOS parameters for a queue. */
+struct b43legacy_qos_params {
+ /* The QOS parameters */
+ struct ieee80211_tx_queue_params p;
+};
+
/* Data structure for the WLAN parts (802.11 cores) of the b43legacy chip. */
struct b43legacy_wl {
/* Pointer to the active wireless device on this chip */
@@ -611,6 +619,18 @@ struct b43legacy_wl {
bool beacon1_uploaded;
bool beacon_templates_virgin; /* Never wrote the templates? */
struct work_struct beacon_update_trigger;
+ /* The current QOS parameters for the 4 queues. */
+ struct b43legacy_qos_params qos_params[B43legacy_QOS_QUEUE_NUM];
+
+ /* Packet transmit work */
+ struct work_struct tx_work;
+
+ /* Queue of packets to be transmitted. */
+ struct sk_buff_head tx_queue[B43legacy_QOS_QUEUE_NUM];
+
+ /* Flag that implement the queues stopping. */
+ bool tx_queue_stopped[B43legacy_QOS_QUEUE_NUM];
+
};
/* Pointers to the firmware data and meta information about it. */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index c5535ad..f1f8bd0 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -715,7 +715,7 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
ring->index = controller_index;
if (for_tx) {
- ring->tx = 1;
+ ring->tx = true;
ring->current_slot = -1;
} else {
if (ring->index == 0) {
@@ -727,7 +727,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
} else
B43legacy_WARN_ON(1);
}
- spin_lock_init(&ring->lock);
#ifdef CONFIG_B43LEGACY_DEBUG
ring->last_injected_overflow = jiffies;
#endif
@@ -806,7 +805,7 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
{
u64 orig_mask = mask;
- bool fallback = 0;
+ bool fallback = false;
int err;
/* Try to set the DMA mask. If it fails, try falling back to a
@@ -820,12 +819,12 @@ static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
}
if (mask == DMA_BIT_MASK(64)) {
mask = DMA_BIT_MASK(32);
- fallback = 1;
+ fallback = true;
continue;
}
if (mask == DMA_BIT_MASK(32)) {
mask = DMA_BIT_MASK(30);
- fallback = 1;
+ fallback = true;
continue;
}
b43legacyerr(dev->wl, "The machine/kernel does not support "
@@ -858,7 +857,7 @@ int b43legacy_dma_init(struct b43legacy_wldev *dev)
#ifdef CONFIG_B43LEGACY_PIO
b43legacywarn(dev->wl, "DMA for this device not supported. "
"Falling back to PIO\n");
- dev->__using_pio = 1;
+ dev->__using_pio = true;
return -EAGAIN;
#else
b43legacyerr(dev->wl, "DMA for this device not supported and "
@@ -1068,7 +1067,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
memset(meta, 0, sizeof(*meta));
meta->skb = skb;
- meta->is_last_fragment = 1;
+ meta->is_last_fragment = true;
meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
/* create a bounce buffer in zone_dma on mapping failure. */
@@ -1144,10 +1143,8 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
{
struct b43legacy_dmaring *ring;
int err = 0;
- unsigned long flags;
ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
if (unlikely(ring->stopped)) {
@@ -1157,16 +1154,14 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
* For now, just refuse the transmit. */
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacyerr(dev->wl, "Packet after queue stopped\n");
- err = -ENOSPC;
- goto out_unlock;
+ return -ENOSPC;
}
if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
/* If we get here, we have a real error with the queue
* full, but queues not stopped. */
b43legacyerr(dev->wl, "DMA queue overflow\n");
- err = -ENOSPC;
- goto out_unlock;
+ return -ENOSPC;
}
/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
@@ -1176,25 +1171,23 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
/* Drop this packet, as we don't have the encryption key
* anymore and must not transmit it unencrypted. */
dev_kfree_skb_any(skb);
- err = 0;
- goto out_unlock;
+ return 0;
}
if (unlikely(err)) {
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
- goto out_unlock;
+ return err;
}
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
- ieee80211_stop_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 1;
+ unsigned int skb_mapping = skb_get_queue_mapping(skb);
+ ieee80211_stop_queue(dev->wl->hw, skb_mapping);
+ dev->wl->tx_queue_stopped[skb_mapping] = 1;
+ ring->stopped = true;
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Stopped TX ring %d\n",
ring->index);
}
-out_unlock:
- spin_unlock_irqrestore(&ring->lock, flags);
-
return err;
}
@@ -1205,14 +1198,29 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
struct b43legacy_dmadesc_meta *meta;
int retry_limit;
int slot;
+ int firstused;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
return;
- B43legacy_WARN_ON(!irqs_disabled());
- spin_lock(&ring->lock);
-
B43legacy_WARN_ON(!ring->tx);
+
+ /* Sanity check: TX packets are processed in-order on one ring.
+ * Check if the slot deduced from the cookie really is the first
+ * used slot. */
+ firstused = ring->current_slot - ring->used_slots + 1;
+ if (firstused < 0)
+ firstused = ring->nr_slots + firstused;
+ if (unlikely(slot != firstused)) {
+ /* This possibly is a firmware bug and will result in
+ * malfunction, memory leaks and/or stall of DMA functionality.
+ */
+ b43legacydbg(dev->wl, "Out of order TX status report on DMA "
+ "ring %d. Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ return;
+ }
+
while (1) {
B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
op32_idx2desc(ring, slot, &meta);
@@ -1285,14 +1293,21 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
dev->stats.last_tx = jiffies;
if (ring->stopped) {
B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
- ieee80211_wake_queue(dev->wl->hw, txring_to_priority(ring));
- ring->stopped = 0;
+ ring->stopped = false;
+ }
+
+ if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
+ dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
+ } else {
+ /* If the driver queue is running wake the corresponding
+ * mac80211 queue. */
+ ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
b43legacydbg(dev->wl, "Woke up TX ring %d\n",
- ring->index);
+ ring->index);
}
-
- spin_unlock(&ring->lock);
+ /* Add work to the queue. */
+ ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
}
static void dma_rx(struct b43legacy_dmaring *ring,
@@ -1415,22 +1430,14 @@ void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
op32_tx_suspend(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ring->lock, flags);
B43legacy_WARN_ON(!ring->tx);
op32_tx_resume(ring);
- spin_unlock_irqrestore(&ring->lock, flags);
}
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 504a587..c3282f9 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -150,8 +150,9 @@ struct b43legacy_dmaring {
enum b43legacy_dmatype type;
/* Boolean. Is this ring stopped at ieee80211 level? */
bool stopped;
- /* Lock, only used for TX. */
- spinlock_t lock;
+ /* The QOS priority assigned to this ring. Only used for TX rings.
+ * This is the mac80211 "queue" value. */
+ u8 queue_prio;
struct b43legacy_wldev *dev;
#ifdef CONFIG_B43LEGACY_DEBUG
/* Maximum number of used slots. */
diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
index 2f1bfdc..fd45653 100644
--- a/drivers/net/wireless/b43legacy/leds.c
+++ b/drivers/net/wireless/b43legacy/leds.c
@@ -203,11 +203,11 @@ void b43legacy_leds_init(struct b43legacy_wldev *dev)
if (sprom[i] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
- activelow = 0;
+ activelow = false;
switch (i) {
case 0:
behaviour = B43legacy_LED_ACTIVITY;
- activelow = 1;
+ activelow = true;
if (bus->boardinfo.vendor == PCI_VENDOR_ID_COMPAQ)
behaviour = B43legacy_LED_RADIO_ALL;
break;
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 20f0243..75e70bc 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -722,9 +722,9 @@ void b43legacy_wireless_core_reset(struct b43legacy_wldev *dev, u32 flags)
macctl &= ~B43legacy_MACCTL_GMODE;
if (flags & B43legacy_TMSLOW_GMODE) {
macctl |= B43legacy_MACCTL_GMODE;
- dev->phy.gmode = 1;
+ dev->phy.gmode = true;
} else
- dev->phy.gmode = 0;
+ dev->phy.gmode = false;
macctl |= B43legacy_MACCTL_IHR_ENABLED;
b43legacy_write32(dev, B43legacy_MMIO_MACCTL, macctl);
}
@@ -811,7 +811,7 @@ static void b43legacy_calculate_link_quality(struct b43legacy_wldev *dev)
if (dev->noisecalc.calculation_running)
return;
dev->noisecalc.channel_at_start = dev->phy.channel;
- dev->noisecalc.calculation_running = 1;
+ dev->noisecalc.calculation_running = true;
dev->noisecalc.nr_samples = 0;
b43legacy_generate_noise_sample(dev);
@@ -873,7 +873,7 @@ static void handle_irq_noise(struct b43legacy_wldev *dev)
dev->stats.link_noise = average;
drop_calculation:
- dev->noisecalc.calculation_running = 0;
+ dev->noisecalc.calculation_running = false;
return;
}
generate_new:
@@ -889,7 +889,7 @@ static void handle_irq_tbtt_indication(struct b43legacy_wldev *dev)
b43legacy_power_saving_ctl_bits(dev, -1, -1);
}
if (b43legacy_is_mode(dev->wl, NL80211_IFTYPE_ADHOC))
- dev->dfq_valid = 1;
+ dev->dfq_valid = true;
}
static void handle_irq_atim_end(struct b43legacy_wldev *dev)
@@ -898,7 +898,7 @@ static void handle_irq_atim_end(struct b43legacy_wldev *dev)
b43legacy_write32(dev, B43legacy_MMIO_MACCMD,
b43legacy_read32(dev, B43legacy_MMIO_MACCMD)
| B43legacy_MACCMD_DFQ_VALID);
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
}
}
@@ -971,7 +971,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
unsigned int i, len, variable_len;
const struct ieee80211_mgmt *bcn;
const u8 *ie;
- bool tim_found = 0;
+ bool tim_found = false;
unsigned int rate;
u16 ctl;
int antenna;
@@ -1019,7 +1019,7 @@ static void b43legacy_write_beacon_template(struct b43legacy_wldev *dev,
/* A valid TIM is at least 4 bytes long. */
if (ie_len < 4)
break;
- tim_found = 1;
+ tim_found = true;
tim_position = sizeof(struct b43legacy_plcp_hdr6);
tim_position += offsetof(struct ieee80211_mgmt,
@@ -1172,7 +1172,7 @@ static void b43legacy_upload_beacon0(struct b43legacy_wldev *dev)
* but we don't use that feature anyway. */
b43legacy_write_probe_resp_template(dev, 0x268, 0x4A,
&__b43legacy_ratetable[3]);
- wl->beacon0_uploaded = 1;
+ wl->beacon0_uploaded = true;
}
static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
@@ -1182,7 +1182,7 @@ static void b43legacy_upload_beacon1(struct b43legacy_wldev *dev)
if (wl->beacon1_uploaded)
return;
b43legacy_write_beacon_template(dev, 0x468, 0x1A);
- wl->beacon1_uploaded = 1;
+ wl->beacon1_uploaded = true;
}
static void handle_irq_beacon(struct b43legacy_wldev *dev)
@@ -1212,7 +1212,7 @@ static void handle_irq_beacon(struct b43legacy_wldev *dev)
if (unlikely(wl->beacon_templates_virgin)) {
/* We never uploaded a beacon before.
* Upload both templates now, but only mark one valid. */
- wl->beacon_templates_virgin = 0;
+ wl->beacon_templates_virgin = false;
b43legacy_upload_beacon0(dev);
b43legacy_upload_beacon1(dev);
cmd = b43legacy_read32(dev, B43legacy_MMIO_MACCMD);
@@ -1275,8 +1275,8 @@ static void b43legacy_update_templates(struct b43legacy_wl *wl)
if (wl->current_beacon)
dev_kfree_skb_any(wl->current_beacon);
wl->current_beacon = beacon;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
ieee80211_queue_work(wl->hw, &wl->beacon_update_trigger);
}
@@ -2440,30 +2440,64 @@ static int b43legacy_rng_init(struct b43legacy_wl *wl)
return err;
}
+static void b43legacy_tx_work(struct work_struct *work)
+{
+ struct b43legacy_wl *wl = container_of(work, struct b43legacy_wl,
+ tx_work);
+ struct b43legacy_wldev *dev;
+ struct sk_buff *skb;
+ int queue_num;
+ int err = 0;
+
+ mutex_lock(&wl->mutex);
+ dev = wl->current_dev;
+ if (unlikely(!dev || b43legacy_status(dev) < B43legacy_STAT_STARTED)) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num])) {
+ skb = skb_dequeue(&wl->tx_queue[queue_num]);
+ if (b43legacy_using_pio(dev))
+ err = b43legacy_pio_tx(dev, skb);
+ else
+ err = b43legacy_dma_tx(dev, skb);
+ if (err == -ENOSPC) {
+ wl->tx_queue_stopped[queue_num] = 1;
+ ieee80211_stop_queue(wl->hw, queue_num);
+ skb_queue_head(&wl->tx_queue[queue_num], skb);
+ break;
+ }
+ if (unlikely(err))
+ dev_kfree_skb(skb); /* Drop it */
+ err = 0;
+ }
+
+ if (!err)
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
+
+ mutex_unlock(&wl->mutex);
+}
+
static void b43legacy_op_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- int err = -ENODEV;
- unsigned long flags;
- if (unlikely(!dev))
- goto out;
- if (unlikely(b43legacy_status(dev) < B43legacy_STAT_STARTED))
- goto out;
- /* DMA-TX is done without a global lock. */
- if (b43legacy_using_pio(dev)) {
- spin_lock_irqsave(&wl->irq_lock, flags);
- err = b43legacy_pio_tx(dev, skb);
- spin_unlock_irqrestore(&wl->irq_lock, flags);
- } else
- err = b43legacy_dma_tx(dev, skb);
-out:
- if (unlikely(err)) {
- /* Drop the packet. */
+ if (unlikely(skb->len < 2 + 2 + 6)) {
+ /* Too short, this can't be a valid frame. */
dev_kfree_skb_any(skb);
+ return;
}
+ B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags);
+
+ skb_queue_tail(&wl->tx_queue[skb->queue_mapping], skb);
+ if (!wl->tx_queue_stopped[skb->queue_mapping])
+ ieee80211_queue_work(wl->hw, &wl->tx_work);
+ else
+ ieee80211_stop_queue(wl->hw, skb->queue_mapping);
}
static int b43legacy_op_conf_tx(struct ieee80211_hw *hw,
@@ -2510,7 +2544,7 @@ static int find_wldev_for_phymode(struct b43legacy_wl *wl,
if (d->phy.possible_phymodes & phymode) {
/* Ok, this device supports the PHY-mode.
* Set the gmode bit. */
- *gmode = 1;
+ *gmode = true;
*dev = d;
return 0;
@@ -2546,7 +2580,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
struct b43legacy_wldev *uninitialized_var(up_dev);
struct b43legacy_wldev *down_dev;
int err;
- bool gmode = 0;
+ bool gmode = false;
int prev_status;
err = find_wldev_for_phymode(wl, new_mode, &up_dev, &gmode);
@@ -2879,6 +2913,7 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
{
struct b43legacy_wl *wl = dev->wl;
unsigned long flags;
+ int queue_num;
if (b43legacy_status(dev) < B43legacy_STAT_STARTED)
return;
@@ -2898,11 +2933,16 @@ static void b43legacy_wireless_core_stop(struct b43legacy_wldev *dev)
/* Must unlock as it would otherwise deadlock. No races here.
* Cancel the possibly running self-rearming periodic work. */
cancel_delayed_work_sync(&dev->periodic_work);
+ cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
- ieee80211_stop_queues(wl->hw); /* FIXME this could cause a deadlock */
+ /* Drain all TX queues. */
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ while (skb_queue_len(&wl->tx_queue[queue_num]))
+ dev_kfree_skb(skb_dequeue(&wl->tx_queue[queue_num]));
+ }
- b43legacy_mac_suspend(dev);
+b43legacy_mac_suspend(dev);
free_irq(dev->dev->irq, dev);
b43legacydbg(wl, "Wireless interface stopped\n");
}
@@ -3044,12 +3084,12 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
/* Assume the radio is enabled. If it's not enabled, the state will
* immediately get fixed on the first periodic work run. */
- dev->radio_hw_enable = 1;
+ dev->radio_hw_enable = true;
phy->savedpctlreg = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
lo = phy->_lo_pairs;
if (lo)
@@ -3081,7 +3121,7 @@ static void setup_struct_phy_for_init(struct b43legacy_wldev *dev,
static void setup_struct_wldev_for_init(struct b43legacy_wldev *dev)
{
/* Flags */
- dev->dfq_valid = 0;
+ dev->dfq_valid = false;
/* Stats */
memset(&dev->stats, 0, sizeof(dev->stats));
@@ -3187,9 +3227,9 @@ static void prepare_phy_data_for_init(struct b43legacy_wldev *dev)
phy->lofcal = 0xFFFF;
phy->initval = 0xFFFF;
- phy->aci_enable = 0;
- phy->aci_wlan_automatic = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_wlan_automatic = false;
+ phy->aci_hw_rssi = false;
phy->antenna_diversity = 0xFFFF;
memset(phy->minlowsig, 0xFF, sizeof(phy->minlowsig));
@@ -3355,7 +3395,7 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
- wl->operating = 1;
+ wl->operating = true;
wl->vif = vif;
wl->if_type = vif->type;
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
@@ -3389,7 +3429,7 @@ static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
- wl->operating = 0;
+ wl->operating = false;
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3413,10 +3453,10 @@ static int b43legacy_op_start(struct ieee80211_hw *hw)
memset(wl->bssid, 0, ETH_ALEN);
memset(wl->mac_addr, 0, ETH_ALEN);
wl->filter_flags = 0;
- wl->beacon0_uploaded = 0;
- wl->beacon1_uploaded = 0;
- wl->beacon_templates_virgin = 1;
- wl->radio_enabled = 1;
+ wl->beacon0_uploaded = false;
+ wl->beacon1_uploaded = false;
+ wl->beacon_templates_virgin = true;
+ wl->radio_enabled = true;
mutex_lock(&wl->mutex);
@@ -3455,7 +3495,7 @@ static void b43legacy_op_stop(struct ieee80211_hw *hw)
if (b43legacy_status(dev) >= B43legacy_STAT_STARTED)
b43legacy_wireless_core_stop(dev);
b43legacy_wireless_core_exit(dev);
- wl->radio_enabled = 0;
+ wl->radio_enabled = false;
mutex_unlock(&wl->mutex);
}
@@ -3614,7 +3654,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
have_bphy = 1;
dev->phy.gmode = (have_gphy || have_bphy);
- dev->phy.radio_on = 1;
+ dev->phy.radio_on = true;
tmp = dev->phy.gmode ? B43legacy_TMSLOW_GMODE : 0;
b43legacy_wireless_core_reset(dev, tmp);
@@ -3705,7 +3745,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
(void (*)(unsigned long))b43legacy_interrupt_tasklet,
(unsigned long)wldev);
if (modparam_pio)
- wldev->__using_pio = 1;
+ wldev->__using_pio = true;
INIT_LIST_HEAD(&wldev->list);
err = b43legacy_wireless_core_attach(wldev);
@@ -3748,6 +3788,7 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
struct ieee80211_hw *hw;
struct b43legacy_wl *wl;
int err = -ENOMEM;
+ int queue_num;
b43legacy_sprom_fixup(dev->bus);
@@ -3782,6 +3823,13 @@ static int b43legacy_wireless_init(struct ssb_device *dev)
mutex_init(&wl->mutex);
INIT_LIST_HEAD(&wl->devlist);
INIT_WORK(&wl->beacon_update_trigger, b43legacy_beacon_update_trigger_work);
+ INIT_WORK(&wl->tx_work, b43legacy_tx_work);
+
+ /* Initialize queues and flags. */
+ for (queue_num = 0; queue_num < B43legacy_QOS_QUEUE_NUM; queue_num++) {
+ skb_queue_head_init(&wl->tx_queue[queue_num]);
+ wl->tx_queue_stopped[queue_num] = 0;
+ }
ssb_set_devtypedata(dev, wl);
b43legacyinfo(wl, "Broadcom %04X WLAN found (core revision %u)\n",
diff --git a/drivers/net/wireless/b43legacy/radio.c b/drivers/net/wireless/b43legacy/radio.c
index 475eb14..fcbafcd 100644
--- a/drivers/net/wireless/b43legacy/radio.c
+++ b/drivers/net/wireless/b43legacy/radio.c
@@ -1067,7 +1067,7 @@ b43legacy_radio_interference_mitigation_enable(struct b43legacy_wldev *dev,
if (b43legacy_phy_read(dev, 0x0033) & 0x0800)
break;
- phy->aci_enable = 1;
+ phy->aci_enable = true;
phy_stacksave(B43legacy_PHY_RADIO_BITFIELD);
phy_stacksave(B43legacy_PHY_G_CRS);
@@ -1279,7 +1279,7 @@ b43legacy_radio_interference_mitigation_disable(struct b43legacy_wldev *dev,
if (!(b43legacy_phy_read(dev, 0x0033) & 0x0800))
break;
- phy->aci_enable = 0;
+ phy->aci_enable = false;
phy_stackrestore(B43legacy_PHY_RADIO_BITFIELD);
phy_stackrestore(B43legacy_PHY_G_CRS);
@@ -1346,10 +1346,10 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
(phy->rev == 0) || (!phy->gmode))
return -ENODEV;
- phy->aci_wlan_automatic = 0;
+ phy->aci_wlan_automatic = false;
switch (mode) {
case B43legacy_RADIO_INTERFMODE_AUTOWLAN:
- phy->aci_wlan_automatic = 1;
+ phy->aci_wlan_automatic = true;
if (phy->aci_enable)
mode = B43legacy_RADIO_INTERFMODE_MANUALWLAN;
else
@@ -1371,8 +1371,8 @@ int b43legacy_radio_set_interference_mitigation(struct b43legacy_wldev *dev,
currentmode);
if (mode == B43legacy_RADIO_INTERFMODE_NONE) {
- phy->aci_enable = 0;
- phy->aci_hw_rssi = 0;
+ phy->aci_enable = false;
+ phy->aci_hw_rssi = false;
} else
b43legacy_radio_interference_mitigation_enable(dev, mode);
phy->interfmode = mode;
@@ -2102,7 +2102,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
phy->radio_off_context.rfover);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
phy->radio_off_context.rfoverval);
- phy->radio_off_context.valid = 0;
+ phy->radio_off_context.valid = false;
}
channel = phy->channel;
err = b43legacy_radio_selectchannel(dev,
@@ -2113,7 +2113,7 @@ void b43legacy_radio_turn_on(struct b43legacy_wldev *dev)
default:
B43legacy_BUG_ON(1);
}
- phy->radio_on = 1;
+ phy->radio_on = true;
}
void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
@@ -2131,14 +2131,14 @@ void b43legacy_radio_turn_off(struct b43legacy_wldev *dev, bool force)
if (!force) {
phy->radio_off_context.rfover = rfover;
phy->radio_off_context.rfoverval = rfoverval;
- phy->radio_off_context.valid = 1;
+ phy->radio_off_context.valid = true;
}
b43legacy_phy_write(dev, B43legacy_PHY_RFOVER, rfover | 0x008C);
b43legacy_phy_write(dev, B43legacy_PHY_RFOVERVAL,
rfoverval & 0xFF73);
} else
b43legacy_phy_write(dev, 0x0015, 0xAA00);
- phy->radio_on = 0;
+ phy->radio_on = false;
b43legacydbg(dev->wl, "Radio initialized\n");
}
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index 2069fc8..cd6375d 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -3,9 +3,8 @@ config BRCMUTIL
config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
- depends on PCI
depends on MAC80211
- depends on BCMA=n
+ depends on BCMA
select BRCMUTIL
select FW_LOADER
select CRC_CCITT
@@ -18,16 +17,26 @@ config BRCMSMAC
config BRCMFMAC
tristate "Broadcom IEEE802.11n embedded FullMAC WLAN driver"
- depends on MMC
depends on CFG80211
select BRCMUTIL
- select FW_LOADER
---help---
This module adds support for embedded wireless adapters based on
- Broadcom IEEE802.11n FullMAC chipsets. This driver uses the kernel's
- wireless extensions subsystem. If you choose to build a module,
+ Broadcom IEEE802.11n FullMAC chipsets. It has to work with at least
+ one of the bus interface support. If you choose to build a module,
it'll be called brcmfmac.ko.
+config BRCMFMAC_SDIO
+ bool "SDIO bus interface support for FullMAC"
+ depends on MMC
+ depends on BRCMFMAC
+ select FW_LOADER
+ default y
+ ---help---
+ This option enables the SDIO bus interface support for Broadcom
+ FullMAC WLAN driver.
+ Say Y if you want to use brcmfmac for a compatible SDIO interface
+ wireless card.
+
config BRCMDBG
bool "Broadcom driver debug functions"
depends on BRCMSMAC || BRCMFMAC
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index b44e309..9ca9ea1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -19,15 +19,16 @@ ccflags-y += \
-Idrivers/net/wireless/brcm80211/brcmfmac \
-Idrivers/net/wireless/brcm80211/include
-DHDOFILES = \
- wl_cfg80211.o \
- dhd_cdc.o \
- dhd_common.o \
- dhd_sdio.o \
- dhd_linux.o \
- bcmsdh.o \
- bcmsdh_sdmmc.o
-
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
-brcmfmac-objs += $(DHDOFILES)
+brcmfmac-objs += \
+ wl_cfg80211.o \
+ dhd_cdc.o \
+ dhd_common.o \
+ dhd_linux.o
+brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
+ dhd_sdio.o \
+ bcmsdh.o \
+ bcmsdh_sdmmc.o \
+ sdio_chip.o
+
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h b/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
deleted file mode 100644
index d7d3afd..0000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmchip.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _bcmchip_h_
-#define _bcmchip_h_
-
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE 0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-/* firmware name */
-#define BCM4329_FW_NAME "brcm/bcm4329-fullmac-4.bin"
-#define BCM4329_NV_NAME "brcm/bcm4329-fullmac-4.txt"
-
-#endif /* _bcmchip_h_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 89ff94d..4bc8d25 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -31,7 +31,6 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include <soc.h>
-#include "dhd.h"
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
@@ -51,12 +50,18 @@ static void brcmf_sdioh_irqhandler(struct sdio_func *func)
sdio_claim_host(func);
}
+/* dummy handler for SDIO function 2 interrupt */
+static void brcmf_sdioh_dummy_irq_handler(struct sdio_func *func)
+{
+}
+
int brcmf_sdcard_intr_reg(struct brcmf_sdio_dev *sdiodev)
{
brcmf_dbg(TRACE, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
sdio_claim_irq(sdiodev->func[1], brcmf_sdioh_irqhandler);
+ sdio_claim_irq(sdiodev->func[2], brcmf_sdioh_dummy_irq_handler);
sdio_release_host(sdiodev->func[1]);
return 0;
@@ -67,6 +72,7 @@ int brcmf_sdcard_intr_dereg(struct brcmf_sdio_dev *sdiodev)
brcmf_dbg(TRACE, "Entering\n");
sdio_claim_host(sdiodev->func[1]);
+ sdio_release_irq(sdiodev->func[2]);
sdio_release_irq(sdiodev->func[1]);
sdio_release_host(sdiodev->func[1]);
@@ -222,19 +228,12 @@ bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev)
return sdiodev->regfail;
}
-int
-brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
+static int brcmf_sdcard_recv_prepare(struct brcmf_sdio_dev *sdiodev, uint fn,
+ uint flags, uint width, u32 *addr)
{
- int status;
- uint incr_fix;
- uint width;
- uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
+ uint bar0 = *addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
-
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
return -ENOTSUPP;
@@ -247,29 +246,114 @@ brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
sdiodev->sbwad = bar0;
}
- addr &= SBSDIO_SB_OFT_ADDR_MASK;
+ *addr &= SBSDIO_SB_OFT_ADDR_MASK;
+
+ if (width == 4)
+ *addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+
+ return 0;
+}
+
+int
+brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ err = brcmf_sdcard_recv_pkt(sdiodev, addr, fn, flags, mypkt);
+ if (!err)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+}
+
+int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
+
+ width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
+ fn, addr, pkt);
+
+ return err;
+}
+
+int brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq)
+{
+ uint incr_fix;
+ uint width;
+ int err = 0;
+
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pktq->qlen);
+
width = (flags & SDIO_REQ_4BYTE) ? 4 : 2;
- if (width == 4)
- addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
+ err = brcmf_sdcard_recv_prepare(sdiodev, fn, flags, width, &addr);
+ if (err)
+ return err;
- status = brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_READ,
- fn, addr, width, nbytes, buf, pkt);
+ incr_fix = (flags & SDIO_REQ_FIXED) ? SDIOH_DATA_FIX : SDIOH_DATA_INC;
+ err = brcmf_sdioh_request_chain(sdiodev, incr_fix, SDIOH_READ, fn, addr,
+ pktq);
- return status;
+ return err;
}
int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt)
+ uint flags, u8 *buf, uint nbytes)
+{
+ struct sk_buff *mypkt;
+ int err;
+
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ memcpy(mypkt->data, buf, nbytes);
+ err = brcmf_sdcard_send_pkt(sdiodev, addr, fn, flags, mypkt);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
+
+}
+
+int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt)
{
uint incr_fix;
uint width;
uint bar0 = addr & ~SBSDIO_SB_OFT_ADDR_MASK;
int err = 0;
- brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n", fn, addr, nbytes);
+ brcmf_dbg(INFO, "fun = %d, addr = 0x%x, size = %d\n",
+ fn, addr, pkt->len);
/* Async not implemented yet */
if (flags & SDIO_REQ_ASYNC)
@@ -291,18 +375,39 @@ brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
return brcmf_sdioh_request_buffer(sdiodev, incr_fix, SDIOH_WRITE, fn,
- addr, width, nbytes, buf, pkt);
+ addr, pkt);
}
int brcmf_sdcard_rwdata(struct brcmf_sdio_dev *sdiodev, uint rw, u32 addr,
u8 *buf, uint nbytes)
{
+ struct sk_buff *mypkt;
+ bool write = rw ? SDIOH_WRITE : SDIOH_READ;
+ int err;
+
addr &= SBSDIO_SB_OFT_ADDR_MASK;
addr |= SBSDIO_SB_ACCESS_2_4B_FLAG;
- return brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC,
- (rw ? SDIOH_WRITE : SDIOH_READ), SDIO_FUNC_1,
- addr, 4, nbytes, buf, NULL);
+ mypkt = brcmu_pkt_buf_get_skb(nbytes);
+ if (!mypkt) {
+ brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
+ nbytes);
+ return -EIO;
+ }
+
+ /* For a write, copy the buffer data into the packet. */
+ if (write)
+ memcpy(mypkt->data, buf, nbytes);
+
+ err = brcmf_sdioh_request_buffer(sdiodev, SDIOH_DATA_INC, write,
+ SDIO_FUNC_1, addr, mypkt);
+
+ /* For a read, copy the packet data back to the buffer. */
+ if (!err && !write)
+ memcpy(buf, mypkt->data, nbytes);
+
+ brcmu_pkt_buf_free_skb(mypkt);
+ return err;
}
int brcmf_sdcard_abort(struct brcmf_sdio_dev *sdiodev, uint fn)
@@ -333,7 +438,7 @@ int brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
sdiodev->sbwad = SI_ENUM_BASE;
/* try to attach to the target device */
- sdiodev->bus = brcmf_sdbrcm_probe(0, 0, 0, 0, regs, sdiodev);
+ sdiodev->bus = brcmf_sdbrcm_probe(regs, sdiodev);
if (!sdiodev->bus) {
brcmf_dbg(ERROR, "device attach failed\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index bbaeb2d..9b8c0ed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -31,15 +31,15 @@
#include <brcmu_utils.h>
#include <brcmu_wifi.h>
#include "sdio_host.h"
-#include "dhd.h"
#include "dhd_dbg.h"
-#include "wl_cfg80211.h"
+#include "dhd_bus.h"
#define SDIO_VENDOR_ID_BROADCOM 0x02d0
#define DMA_ALIGN_MASK 0x03
#define SDIO_DEVICE_ID_BROADCOM_4329 0x4329
+#define SDIO_DEVICE_ID_BROADCOM_4330 0x4330
#define SDIO_FUNC1_BLOCKSIZE 64
#define SDIO_FUNC2_BLOCKSIZE 512
@@ -47,6 +47,7 @@
/* devices we support, null terminated */
static const struct sdio_device_id brcmf_sdmmc_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4329)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_BROADCOM, SDIO_DEVICE_ID_BROADCOM_4330)},
{ /* end: all zeroes */ },
};
MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids);
@@ -204,62 +205,75 @@ int brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
return err_ret;
}
+/* precondition: host controller is claimed */
static int
-brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
- uint write, uint func, uint addr,
- struct sk_buff *pkt)
+brcmf_sdioh_request_data(struct brcmf_sdio_dev *sdiodev, uint write, bool fifo,
+ uint func, uint addr, struct sk_buff *pkt, uint pktlen)
+{
+ int err_ret = 0;
+
+ if ((write) && (!fifo)) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (write) {
+ err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
+ ((u8 *) (pkt->data)), pktlen);
+ } else if (fifo) {
+ err_ret = sdio_readsb(sdiodev->func[func],
+ ((u8 *) (pkt->data)), addr, pktlen);
+ } else {
+ err_ret = sdio_memcpy_fromio(sdiodev->func[func],
+ ((u8 *) (pkt->data)),
+ addr, pktlen);
+ }
+
+ return err_ret;
+}
+
+/*
+ * This function takes a queue of packets. The packets on the queue
+ * are assumed to be properly aligned by the caller.
+ */
+int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq)
{
bool fifo = (fix_inc == SDIOH_DATA_FIX);
u32 SGCount = 0;
int err_ret = 0;
- struct sk_buff *pnext;
+ struct sk_buff *pkt;
brcmf_dbg(TRACE, "Enter\n");
- brcmf_pm_resume_wait(sdiodev, &sdiodev->request_packet_wait);
+ brcmf_pm_resume_wait(sdiodev, &sdiodev->request_chain_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
/* Claim host controller */
sdio_claim_host(sdiodev->func[func]);
- for (pnext = pkt; pnext; pnext = pnext->next) {
- uint pkt_len = pnext->len;
+
+ skb_queue_walk(pktq, pkt) {
+ uint pkt_len = pkt->len;
pkt_len += 3;
pkt_len &= 0xFFFFFFFC;
- if ((write) && (!fifo)) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (write) {
- err_ret = sdio_memcpy_toio(sdiodev->func[func], addr,
- ((u8 *) (pnext->data)),
- pkt_len);
- } else if (fifo) {
- err_ret = sdio_readsb(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- } else {
- err_ret = sdio_memcpy_fromio(sdiodev->func[func],
- ((u8 *) (pnext->data)),
- addr, pkt_len);
- }
-
+ err_ret = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
if (err_ret) {
brcmf_dbg(ERROR, "%s FAILED %p[%d], addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len, err_ret);
} else {
brcmf_dbg(TRACE, "%s xfr'd %p[%d], addr=0x%05x, len=%d\n",
- write ? "TX" : "RX", pnext, SGCount, addr,
+ write ? "TX" : "RX", pkt, SGCount, addr,
pkt_len);
}
-
if (!fifo)
addr += pkt_len;
- SGCount++;
+ SGCount++;
}
/* Release host controller */
@@ -270,91 +284,45 @@ brcmf_sdioh_request_packet(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
}
/*
- * This function takes a buffer or packet, and fixes everything up
- * so that in the end, a DMA-able packet is created.
- *
- * A buffer does not have an associated packet pointer,
- * and may or may not be aligned.
- * A packet may consist of a single packet, or a packet chain.
- * If it is a packet chain, then all the packets in the chain
- * must be properly aligned.
- *
- * If the packet data is not aligned, then there may only be
- * one packet, and in this case, it is copied to a new
- * aligned packet.
- *
+ * This function takes a single DMA-able packet.
*/
int brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
uint fix_inc, uint write, uint func, uint addr,
- uint reg_width, uint buflen_u, u8 *buffer,
struct sk_buff *pkt)
{
- int Status;
- struct sk_buff *mypkt = NULL;
+ int status;
+ uint pkt_len = pkt->len;
+ bool fifo = (fix_inc == SDIOH_DATA_FIX);
brcmf_dbg(TRACE, "Enter\n");
+ if (pkt == NULL)
+ return -EINVAL;
+
brcmf_pm_resume_wait(sdiodev, &sdiodev->request_buffer_wait);
if (brcmf_pm_resume_error(sdiodev))
return -EIO;
- /* Case 1: we don't have a packet. */
- if (pkt == NULL) {
- brcmf_dbg(DATA, "Creating new %s Packet, len=%d\n",
- write ? "TX" : "RX", buflen_u);
- mypkt = brcmu_pkt_buf_get_skb(buflen_u);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- buflen_u);
- return -EIO;
- }
-
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, buffer, buflen_u);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
-
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(buffer, mypkt->data, buflen_u);
-
- brcmu_pkt_buf_free_skb(mypkt);
- } else if (((ulong) (pkt->data) & DMA_ALIGN_MASK) != 0) {
- /*
- * Case 2: We have a packet, but it is unaligned.
- * In this case, we cannot have a chain (pkt->next == NULL)
- */
- brcmf_dbg(DATA, "Creating aligned %s Packet, len=%d\n",
- write ? "TX" : "RX", pkt->len);
- mypkt = brcmu_pkt_buf_get_skb(pkt->len);
- if (!mypkt) {
- brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: len %d\n",
- pkt->len);
- return -EIO;
- }
- /* For a write, copy the buffer data into the packet. */
- if (write)
- memcpy(mypkt->data, pkt->data, pkt->len);
-
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, mypkt);
+ /* Claim host controller */
+ sdio_claim_host(sdiodev->func[func]);
- /* For a read, copy the packet data back to the buffer. */
- if (!write)
- memcpy(pkt->data, mypkt->data, mypkt->len);
+ pkt_len += 3;
+ pkt_len &= (uint)~3;
- brcmu_pkt_buf_free_skb(mypkt);
- } else { /* case 3: We have a packet and
- it is aligned. */
- brcmf_dbg(DATA, "Aligned %s Packet, direct DMA\n",
- write ? "Tx" : "Rx");
- Status = brcmf_sdioh_request_packet(sdiodev, fix_inc, write,
- func, addr, pkt);
+ status = brcmf_sdioh_request_data(sdiodev, write, fifo, func,
+ addr, pkt, pkt_len);
+ if (status) {
+ brcmf_dbg(ERROR, "%s FAILED %p, addr=0x%05x, pkt_len=%d, ERR=0x%08x\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len, status);
+ } else {
+ brcmf_dbg(TRACE, "%s xfr'd %p, addr=0x%05x, len=%d\n",
+ write ? "TX" : "RX", pkt, addr, pkt_len);
}
- return Status;
+ /* Release host controller */
+ sdio_release_host(sdiodev->func[func]);
+
+ return status;
}
/* Read client card reg */
@@ -494,6 +462,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
{
int ret = 0;
struct brcmf_sdio_dev *sdiodev;
+ struct brcmf_bus *bus_if;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(TRACE, "func->class=%x\n", func->class);
brcmf_dbg(TRACE, "sdio_vendor: 0x%04x\n", func->vendor);
@@ -505,17 +474,26 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
brcmf_dbg(ERROR, "card private drvdata occupied\n");
return -ENXIO;
}
+ bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL);
+ if (!bus_if)
+ return -ENOMEM;
sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL);
- if (!sdiodev)
+ if (!sdiodev) {
+ kfree(bus_if);
return -ENOMEM;
+ }
sdiodev->func[0] = func->card->sdio_func[0];
sdiodev->func[1] = func;
+ sdiodev->bus_if = bus_if;
+ bus_if->bus_priv = sdiodev;
+ bus_if->type = SDIO_BUS;
+ bus_if->align = BRCMF_SDALIGN;
dev_set_drvdata(&func->card->dev, sdiodev);
atomic_set(&sdiodev->suspend, false);
init_waitqueue_head(&sdiodev->request_byte_wait);
init_waitqueue_head(&sdiodev->request_word_wait);
- init_waitqueue_head(&sdiodev->request_packet_wait);
+ init_waitqueue_head(&sdiodev->request_chain_wait);
init_waitqueue_head(&sdiodev->request_buffer_wait);
}
@@ -525,6 +503,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
return -ENODEV;
sdiodev->func[2] = func;
+ bus_if = sdiodev->bus_if;
+ sdiodev->dev = &func->dev;
+ dev_set_drvdata(&func->dev, bus_if);
+
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_probe...\n");
ret = brcmf_sdio_probe(sdiodev);
}
@@ -534,6 +516,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
static void brcmf_ops_sdio_remove(struct sdio_func *func)
{
+ struct brcmf_bus *bus_if;
struct brcmf_sdio_dev *sdiodev;
brcmf_dbg(TRACE, "Enter\n");
brcmf_dbg(INFO, "func->class=%x\n", func->class);
@@ -542,10 +525,13 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
brcmf_dbg(INFO, "Function#: 0x%04x\n", func->num);
if (func->num == 2) {
- sdiodev = dev_get_drvdata(&func->card->dev);
+ bus_if = dev_get_drvdata(&func->dev);
+ sdiodev = bus_if->bus_priv;
brcmf_dbg(TRACE, "F2 found, calling brcmf_sdio_remove...\n");
brcmf_sdio_remove(sdiodev);
dev_set_drvdata(&func->card->dev, NULL);
+ dev_set_drvdata(&func->dev, NULL);
+ kfree(bus_if);
kfree(sdiodev);
}
}
@@ -554,14 +540,12 @@ static void brcmf_ops_sdio_remove(struct sdio_func *func)
static int brcmf_sdio_suspend(struct device *dev)
{
mmc_pm_flag_t sdio_flags;
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
int ret = 0;
brcmf_dbg(TRACE, "\n");
- sdiodev = dev_get_drvdata(&func->card->dev);
-
atomic_set(&sdiodev->suspend, true);
sdio_flags = sdio_get_host_pm_caps(sdiodev->func[1]);
@@ -583,10 +567,9 @@ static int brcmf_sdio_suspend(struct device *dev)
static int brcmf_sdio_resume(struct device *dev)
{
- struct brcmf_sdio_dev *sdiodev;
struct sdio_func *func = dev_to_sdio_func(dev);
+ struct brcmf_sdio_dev *sdiodev = dev_get_drvdata(&func->card->dev);
- sdiodev = dev_get_drvdata(&func->card->dev);
brcmf_sdio_wdtmr_enable(sdiodev, true);
atomic_set(&sdiodev->suspend, false);
return 0;
@@ -610,17 +593,26 @@ static struct sdio_driver brcmf_sdmmc_driver = {
#endif /* CONFIG_PM_SLEEP */
};
-/* bus register interface */
-int brcmf_bus_register(void)
+static void __exit brcmf_sdio_exit(void)
{
brcmf_dbg(TRACE, "Enter\n");
- return sdio_register_driver(&brcmf_sdmmc_driver);
+ sdio_unregister_driver(&brcmf_sdmmc_driver);
}
-void brcmf_bus_unregister(void)
+static int __init brcmf_sdio_init(void)
{
+ int ret;
+
brcmf_dbg(TRACE, "Enter\n");
- sdio_unregister_driver(&brcmf_sdmmc_driver);
+ ret = sdio_register_driver(&brcmf_sdmmc_driver);
+
+ if (ret)
+ brcmf_dbg(ERROR, "sdio_register_driver failed: %d\n", ret);
+
+ return ret;
}
+
+module_init(brcmf_sdio_init);
+module_exit(brcmf_sdio_exit);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
index 4645766..e58ea40 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd.h
@@ -87,7 +87,7 @@
#define TOE_TX_CSUM_OL 0x00000001
#define TOE_RX_CSUM_OL 0x00000002
-#define BRCMF_BSS_INFO_VERSION 108 /* current ver of brcmf_bss_info struct */
+#define BRCMF_BSS_INFO_VERSION 109 /* curr ver of brcmf_bss_info_le struct */
/* size of brcmf_scan_params not including variable length array */
#define BRCMF_SCAN_PARAMS_FIXED_SIZE 64
@@ -122,8 +122,6 @@
/* For supporting multiple interfaces */
#define BRCMF_MAX_IFS 16
-#define BRCMF_DEL_IF -0xe
-#define BRCMF_BAD_IF -0xf
#define DOT11_BSSTYPE_ANY 2
#define DOT11_MAX_DEFAULT_KEYS 4
@@ -158,18 +156,6 @@ struct brcmf_event {
struct brcmf_event_msg msg;
} __packed;
-struct dngl_stats {
- unsigned long rx_packets; /* total packets received */
- unsigned long tx_packets; /* total packets transmitted */
- unsigned long rx_bytes; /* total bytes received */
- unsigned long tx_bytes; /* total bytes transmitted */
- unsigned long rx_errors; /* bad packets received */
- unsigned long tx_errors; /* packet transmit problems */
- unsigned long rx_dropped; /* packets dropped by dongle */
- unsigned long tx_dropped; /* packets dropped by dongle */
- unsigned long multicast; /* multicast packets received */
-};
-
/* event codes sent by the dongle to this driver */
#define BRCMF_E_SET_SSID 0
#define BRCMF_E_JOIN 1
@@ -319,13 +305,6 @@ struct dngl_stats {
#define BRCMF_E_LINK_ASSOC_REC 3
#define BRCMF_E_LINK_BSSCFG_DIS 4
-/* The level of bus communication with the dongle */
-enum brcmf_bus_state {
- BRCMF_BUS_DOWN, /* Not ready for frame transfers */
- BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
- BRCMF_BUS_DATA /* Ready for frame transfers */
-};
-
/* Pattern matching filter. Specifies an offset within received packets to
* start matching, the pattern to match, the size of the pattern, and a bitmask
* that indicates which bits within the pattern should be matched.
@@ -365,7 +344,7 @@ struct brcmf_pkt_filter_enable_le {
* Applications MUST CHECK ie_offset field and length field to access IEs and
* next bss_info structure in a vector (in struct brcmf_scan_results)
*/
-struct brcmf_bss_info {
+struct brcmf_bss_info_le {
__le32 version; /* version field */
__le32 length; /* byte length of data in this record,
* starting at version and including IEs
@@ -466,14 +445,13 @@ struct brcmf_scan_results {
u32 buflen;
u32 version;
u32 count;
- struct brcmf_bss_info bss_info[1];
+ struct brcmf_bss_info_le bss_info_le[];
};
struct brcmf_scan_results_le {
__le32 buflen;
__le32 version;
__le32 count;
- struct brcmf_bss_info bss_info[1];
};
/* used for association with a specific BSSID and chanspec list */
@@ -493,10 +471,6 @@ struct brcmf_join_params {
struct brcmf_assoc_params_le params_le;
};
-/* size of brcmf_scan_results not including variable length array */
-#define BRCMF_SCAN_RESULTS_FIXED_SIZE \
- (sizeof(struct brcmf_scan_results) - sizeof(struct brcmf_bss_info))
-
/* incremental scan results struct */
struct brcmf_iscan_results {
union {
@@ -511,7 +485,7 @@ struct brcmf_iscan_results {
/* size of brcmf_iscan_results not including variable length array */
#define BRCMF_ISCAN_RESULTS_FIXED_SIZE \
- (BRCMF_SCAN_RESULTS_FIXED_SIZE + \
+ (sizeof(struct brcmf_scan_results) + \
offsetof(struct brcmf_iscan_results, results))
struct brcmf_wsec_key {
@@ -579,25 +553,19 @@ struct brcmf_dcmd {
};
/* Forward decls for struct brcmf_pub (see below) */
-struct brcmf_bus; /* device bus info */
struct brcmf_proto; /* device communication protocol info */
-struct brcmf_info; /* device driver info */
struct brcmf_cfg80211_dev; /* cfg80211 device info */
/* Common structure for module and instance linkage */
struct brcmf_pub {
/* Linkage ponters */
- struct brcmf_bus *bus;
+ struct brcmf_bus *bus_if;
struct brcmf_proto *prot;
- struct brcmf_info *info;
struct brcmf_cfg80211_dev *config;
+ struct device *dev; /* fullmac dongle device pointer */
/* Internal brcmf items */
- bool up; /* Driver up/down (to OS) */
- bool txoff; /* Transmit flow-controlled */
- enum brcmf_bus_state busstate;
uint hdrlen; /* Total BRCMF header length (proto + bus) */
- uint maxctl; /* Max size rxctl request from proto to bus */
uint rxsz; /* Rx buffer size bus module should use */
u8 wme_dp; /* wme discard priority */
@@ -605,48 +573,21 @@ struct brcmf_pub {
bool iswl; /* Dongle-resident driver is wl */
unsigned long drv_version; /* Version of dongle-resident driver */
u8 mac[ETH_ALEN]; /* MAC address obtained from dongle */
- struct dngl_stats dstats; /* Stats for dongle-based data */
/* Additional stats for the bus level */
- /* Data packets sent to dongle */
- unsigned long tx_packets;
/* Multicast data packets sent to dongle */
unsigned long tx_multicast;
- /* Errors in sending data to dongle */
- unsigned long tx_errors;
- /* Control packets sent to dongle */
- unsigned long tx_ctlpkts;
- /* Errors sending control frames to dongle */
- unsigned long tx_ctlerrs;
- /* Packets sent up the network interface */
- unsigned long rx_packets;
- /* Multicast packets sent up the network interface */
- unsigned long rx_multicast;
- /* Errors processing rx data packets */
- unsigned long rx_errors;
- /* Control frames processed from dongle */
- unsigned long rx_ctlpkts;
-
- /* Errors in processing rx control frames */
- unsigned long rx_ctlerrs;
- /* Packets dropped locally (no memory) */
- unsigned long rx_dropped;
/* Packets flushed due to unscheduled sendup thread */
unsigned long rx_flushed;
/* Number of times dpc scheduled by watchdog timer */
unsigned long wd_dpc_sched;
- /* Number of packets where header read-ahead was used. */
- unsigned long rx_readahead_cnt;
- /* Number of tx packets we had to realloc for headroom */
- unsigned long tx_realloc;
/* Number of flow control pkts recvd */
unsigned long fc_packets;
/* Last error return */
int bcmerror;
- uint tickcnt;
/* Last error from dongle */
int dongle_error;
@@ -664,6 +605,14 @@ struct brcmf_pub {
u8 country_code[BRCM_CNTRY_BUF_SZ];
char eventmask[BRCMF_EVENTING_MASK_LEN];
+ struct brcmf_if *iflist[BRCMF_MAX_IFS];
+
+ struct mutex proto_block;
+
+ struct work_struct setmacaddr_work;
+ struct work_struct multicast_work;
+ u8 macvalue[ETH_ALEN];
+ atomic_t pend_8021x_cnt;
};
struct brcmf_if_event {
@@ -683,67 +632,33 @@ extern const struct bcmevent_name bcmevent_names[];
extern uint brcmf_c_mkiovar(char *name, char *data, uint datalen,
char *buf, uint len);
-/* Indication from bus module regarding presence/insertion of dongle.
- * Return struct brcmf_pub pointer, used as handle to OS module in later calls.
- * Returned structure should have bus and prot pointers filled in.
- * bus_hdrlen specifies required headroom for bus module header.
- */
-extern struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus,
- uint bus_hdrlen);
extern int brcmf_net_attach(struct brcmf_pub *drvr, int idx);
extern int brcmf_netdev_wait_pend8021x(struct net_device *ndev);
extern s32 brcmf_exec_dcmd(struct net_device *dev, u32 cmd, void *arg, u32 len);
-/* Indication from bus module regarding removal/absence of dongle */
-extern void brcmf_detach(struct brcmf_pub *drvr);
-
-/* Indication from bus module to change flow-control state */
-extern void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool on);
-
-extern bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
- struct sk_buff *pkt, int prec);
-
-/* Receive frame for delivery to OS. Callee disposes of rxp. */
-extern void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx,
- struct sk_buff *rxp, int numpkt);
-
/* Return pointer to interface name */
extern char *brcmf_ifname(struct brcmf_pub *drvr, int idx);
-/* Notify tx completion */
-extern void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp,
- bool success);
-
/* Query dongle */
extern int brcmf_proto_cdc_query_dcmd(struct brcmf_pub *drvr, int ifidx,
uint cmd, void *buf, uint len);
-/* OS independent layer functions */
-extern int brcmf_os_proto_block(struct brcmf_pub *drvr);
-extern int brcmf_os_proto_unblock(struct brcmf_pub *drvr);
#ifdef BCMDBG
extern int brcmf_write_to_file(struct brcmf_pub *drvr, const u8 *buf, int size);
#endif /* BCMDBG */
-extern int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name);
-extern int brcmf_c_host_event(struct brcmf_info *drvr_priv, int *idx,
+extern int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name);
+extern int brcmf_c_host_event(struct brcmf_pub *drvr, int *idx,
void *pktdata, struct brcmf_event_msg *,
void **data_ptr);
-extern void brcmf_c_init(void);
-
-extern int brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx,
- struct net_device *ndev, char *name, u8 *mac_addr,
- u32 flags, u8 bssidx);
-extern void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx);
+extern void brcmf_del_if(struct brcmf_pub *drvr, int ifidx);
/* Send packet to dongle via data channel */
extern int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx,\
struct sk_buff *pkt);
-extern int brcmf_bus_start(struct brcmf_pub *drvr);
-
extern void brcmf_c_pktfilter_offload_set(struct brcmf_pub *drvr, char *arg);
extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
int enable, int master_mode);
@@ -752,25 +667,4 @@ extern void brcmf_c_pktfilter_offload_enable(struct brcmf_pub *drvr, char *arg,
#define BRCMF_DCMD_MEDLEN 1536 /* "med" cmd buffer required */
#define BRCMF_DCMD_MAXLEN 8192 /* max length cmd buffer required */
-/* message levels */
-#define BRCMF_ERROR_VAL 0x0001
-#define BRCMF_TRACE_VAL 0x0002
-#define BRCMF_INFO_VAL 0x0004
-#define BRCMF_DATA_VAL 0x0008
-#define BRCMF_CTL_VAL 0x0010
-#define BRCMF_TIMER_VAL 0x0020
-#define BRCMF_HDRS_VAL 0x0040
-#define BRCMF_BYTES_VAL 0x0080
-#define BRCMF_INTR_VAL 0x0100
-#define BRCMF_GLOM_VAL 0x0400
-#define BRCMF_EVENT_VAL 0x0800
-#define BRCMF_BTA_VAL 0x1000
-#define BRCMF_ISCAN_VAL 0x2000
-
-/* Enter idle immediately (no timeout) */
-#define BRCMF_IDLE_IMMEDIATE (-1)
-#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
- when idle */
-#define BRCMF_IDLE_INTERVAL 1
-
#endif /* _BRCMF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
index a249407..ad9be24 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_bus.h
@@ -17,41 +17,89 @@
#ifndef _BRCMF_BUS_H_
#define _BRCMF_BUS_H_
-/* Packet alignment for most efficient SDIO (can change based on platform) */
-#define BRCMF_SDALIGN (1 << 6)
+/* The level of bus communication with the dongle */
+enum brcmf_bus_state {
+ BRCMF_BUS_DOWN, /* Not ready for frame transfers */
+ BRCMF_BUS_LOAD, /* Download access only (CPU reset) */
+ BRCMF_BUS_DATA /* Ready for frame transfers */
+};
-/* watchdog polling interval in ms */
-#define BRCMF_WD_POLL_MS 10
+struct dngl_stats {
+ unsigned long rx_packets; /* total packets received */
+ unsigned long tx_packets; /* total packets transmitted */
+ unsigned long rx_bytes; /* total bytes received */
+ unsigned long tx_bytes; /* total bytes transmitted */
+ unsigned long rx_errors; /* bad packets received */
+ unsigned long tx_errors; /* packet transmit problems */
+ unsigned long rx_dropped; /* packets dropped by dongle */
+ unsigned long tx_dropped; /* packets dropped by dongle */
+ unsigned long multicast; /* multicast packets received */
+};
+
+/* interface structure between common and bus layer */
+struct brcmf_bus {
+ u8 type; /* bus type */
+ void *bus_priv; /* pointer to bus private structure */
+ void *drvr; /* pointer to driver pub structure brcmf_pub */
+ enum brcmf_bus_state state;
+ uint maxctl; /* Max size rxctl request from proto to bus */
+ bool drvr_up; /* Status flag of driver up/down */
+ unsigned long tx_realloc; /* Tx packets realloced for headroom */
+ struct dngl_stats dstats; /* Stats for dongle-based data */
+ u8 align; /* bus alignment requirement */
+
+ /* interface functions pointers */
+ /* Stop bus module: clear pending frames, disable data flow */
+ void (*brcmf_bus_stop)(struct device *);
+ /* Initialize bus module: prepare for communication w/dongle */
+ int (*brcmf_bus_init)(struct device *);
+ /* Send a data frame to the dongle. Callee disposes of txp. */
+ int (*brcmf_bus_txdata)(struct device *, struct sk_buff *);
+ /* Send/receive a control message to/from the dongle.
+ * Expects caller to enforce a single outstanding transaction.
+ */
+ int (*brcmf_bus_txctl)(struct device *, unsigned char *, uint);
+ int (*brcmf_bus_rxctl)(struct device *, unsigned char *, uint);
+};
/*
- * Exported from brcmf bus module (brcmf_usb, brcmf_sdio)
+ * interface functions from common layer
*/
-/* Indicate (dis)interest in finding dongles. */
-extern int brcmf_bus_register(void);
-extern void brcmf_bus_unregister(void);
+/* Remove any protocol-specific data header. */
+extern int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
+ struct sk_buff *rxp);
-/* obtain linux device object providing bus function */
-extern struct device *brcmf_bus_get_device(struct brcmf_bus *bus);
+extern bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
+ struct sk_buff *pkt, int prec);
-/* Stop bus module: clear pending frames, disable data flow */
-extern void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus);
+/* Receive frame for delivery to OS. Callee disposes of rxp. */
+extern void brcmf_rx_frame(struct device *dev, int ifidx,
+ struct sk_buff_head *rxlist);
+static inline void brcmf_rx_packet(struct device *dev, int ifidx,
+ struct sk_buff *pkt)
+{
+ struct sk_buff_head q;
-/* Initialize bus module: prepare for communication w/dongle */
-extern int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr);
+ skb_queue_head_init(&q);
+ skb_queue_tail(&q, pkt);
+ brcmf_rx_frame(dev, ifidx, &q);
+}
-/* Send a data frame to the dongle. Callee disposes of txp. */
-extern int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *txp);
+/* Indication from bus module regarding presence/insertion of dongle. */
+extern int brcmf_attach(uint bus_hdrlen, struct device *dev);
+/* Indication from bus module regarding removal/absence of dongle */
+extern void brcmf_detach(struct device *dev);
-/* Send/receive a control message to/from the dongle.
- * Expects caller to enforce a single outstanding transaction.
- */
-extern int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Indication from bus module to change flow-control state */
+extern void brcmf_txflowcontrol(struct device *dev, int ifidx, bool on);
-extern int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen);
+/* Notify tx completion */
+extern void brcmf_txcomplete(struct device *dev, struct sk_buff *txp,
+ bool success);
-extern void brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick);
+extern int brcmf_bus_start(struct device *dev);
+extern int brcmf_add_if(struct device *dev, int ifidx,
+ char *name, u8 *mac_addr);
#endif /* _BRCMF_BUS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
index e34c5c3..ac8d1f4 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_cdc.c
@@ -58,7 +58,7 @@ struct brcmf_proto_cdc_dcmd {
* Used on data packets to convey priority across USB.
*/
#define BDC_HEADER_LEN 4
-#define BDC_PROTO_VER 1 /* Protocol version */
+#define BDC_PROTO_VER 2 /* Protocol version */
#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */
#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */
#define BDC_FLAG_SUM_GOOD 0x04 /* Good RX checksums */
@@ -77,18 +77,19 @@ struct brcmf_proto_bdc_header {
u8 flags;
u8 priority; /* 802.1d Priority, 4:7 flow control info for usb */
u8 flags2;
- u8 rssi;
+ u8 data_offset;
};
#define RETRIES 2 /* # of retries to retrieve matching dcmd response */
-#define BUS_HEADER_LEN (16+BRCMF_SDALIGN) /* Must be atleast SDPCM_RESERVE
+#define BUS_HEADER_LEN (16+64) /* Must be atleast SDPCM_RESERVE
* (amount of header tha might be added)
* plus any space that might be needed
- * for alignment padding.
+ * for bus alignment padding.
*/
-#define ROUND_UP_MARGIN 2048 /* Biggest SDIO block size possible for
+#define ROUND_UP_MARGIN 2048 /* Biggest bus block size possible for
* round off at the end of buffer
+ * Currently is SDIO
*/
struct brcmf_proto {
@@ -116,8 +117,9 @@ static int brcmf_proto_cdc_msg(struct brcmf_pub *drvr)
len = CDC_MAX_MSG_SIZE;
/* Send request */
- return brcmf_sdbrcm_bus_txctl(drvr->bus, (unsigned char *)&prot->msg,
- len);
+ return drvr->bus_if->brcmf_bus_txctl(drvr->dev,
+ (unsigned char *)&prot->msg,
+ len);
}
static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
@@ -128,7 +130,7 @@ static int brcmf_proto_cdc_cmplt(struct brcmf_pub *drvr, u32 id, u32 len)
brcmf_dbg(TRACE, "Enter\n");
do {
- ret = brcmf_sdbrcm_bus_rxctl(drvr->bus,
+ ret = drvr->bus_if->brcmf_bus_rxctl(drvr->dev,
(unsigned char *)&prot->msg,
len + sizeof(struct brcmf_proto_cdc_dcmd));
if (ret < 0)
@@ -280,11 +282,11 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
struct brcmf_proto *prot = drvr->prot;
int ret = -1;
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (drvr->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do.\n");
return ret;
}
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
brcmf_dbg(TRACE, "Enter\n");
@@ -338,7 +340,7 @@ brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx, struct brcmf_dcmd *dcmd,
prot->pending = false;
done:
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return ret;
}
@@ -372,14 +374,16 @@ void brcmf_proto_hdrpush(struct brcmf_pub *drvr, int ifidx,
h->priority = (pktbuf->priority & BDC_PRIORITY_MASK);
h->flags2 = 0;
- h->rssi = 0;
+ h->data_offset = 0;
BDC_SET_IF_IDX(h, ifidx);
}
-int brcmf_proto_hdrpull(struct brcmf_pub *drvr, int *ifidx,
+int brcmf_proto_hdrpull(struct device *dev, int *ifidx,
struct sk_buff *pktbuf)
{
struct brcmf_proto_bdc_header *h;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
@@ -435,7 +439,7 @@ int brcmf_proto_attach(struct brcmf_pub *drvr)
drvr->prot = cdc;
drvr->hdrlen += BDC_HEADER_LEN;
- drvr->maxctl = BRCMF_DCMD_MAXLEN +
+ drvr->bus_if->maxctl = BRCMF_DCMD_MAXLEN +
sizeof(struct brcmf_proto_cdc_dcmd) + ROUND_UP_MARGIN;
return 0;
@@ -451,18 +455,6 @@ void brcmf_proto_detach(struct brcmf_pub *drvr)
drvr->prot = NULL;
}
-void brcmf_proto_dstats(struct brcmf_pub *drvr)
-{
- /* No stats from dongle added yet, copy bus stats */
- drvr->dstats.tx_packets = drvr->tx_packets;
- drvr->dstats.tx_errors = drvr->tx_errors;
- drvr->dstats.rx_packets = drvr->rx_packets;
- drvr->dstats.rx_errors = drvr->rx_errors;
- drvr->dstats.rx_dropped = drvr->rx_dropped;
- drvr->dstats.multicast = drvr->rx_multicast;
- return;
-}
-
int brcmf_proto_init(struct brcmf_pub *drvr)
{
int ret = 0;
@@ -470,19 +462,19 @@ int brcmf_proto_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
/* Get the device MAC address */
strcpy(buf, "cur_etheraddr");
ret = brcmf_proto_cdc_query_dcmd(drvr, 0, BRCMF_C_GET_VAR,
buf, sizeof(buf));
if (ret < 0) {
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return ret;
}
memcpy(drvr->mac, buf, ETH_ALEN);
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
ret = brcmf_c_preinit_dcmds(drvr);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
index 8918261..a51d8f5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c
@@ -32,8 +32,6 @@
#define PKTFILTER_BUF_SIZE 2048
#define BRCMF_ARPOL_MODE 0xb /* agent|snoop|peer_autoreply */
-int brcmf_msg_level;
-
#define MSGTRACE_VERSION 1
#define BRCMF_PKT_FILTER_FIXED_LEN offsetof(struct brcmf_pkt_filter_le, u)
@@ -85,25 +83,14 @@ brcmf_c_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen)
return len;
}
-void brcmf_c_init(void)
-{
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behaviour since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_msg_level = BRCMF_ERROR_VAL;
-}
-
-bool brcmf_c_prec_enq(struct brcmf_pub *drvr, struct pktq *q,
+bool brcmf_c_prec_enq(struct device *dev, struct pktq *q,
struct sk_buff *pkt, int prec)
{
struct sk_buff *p;
int eprec = -1; /* precedence to evict from */
bool discard_oldest;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
/* Fast case, precedence queue is not full and we are also not
* exceeding total queue length
@@ -446,7 +433,7 @@ brcmf_c_show_host_event(struct brcmf_event_msg *event, void *event_data)
#endif /* BCMDBG */
int
-brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
+brcmf_c_host_event(struct brcmf_pub *drvr, int *ifidx, void *pktdata,
struct brcmf_event_msg *event, void **data_ptr)
{
/* check whether packet is a BRCM event pkt */
@@ -488,19 +475,18 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
if (ifevent->ifidx > 0 && ifevent->ifidx < BRCMF_MAX_IFS) {
if (ifevent->action == BRCMF_E_IF_ADD)
- brcmf_add_if(drvr_priv, ifevent->ifidx, NULL,
+ brcmf_add_if(drvr->dev, ifevent->ifidx,
event->ifname,
- pvt_data->eth.h_dest,
- ifevent->flags, ifevent->bssidx);
+ pvt_data->eth.h_dest);
else
- brcmf_del_if(drvr_priv, ifevent->ifidx);
+ brcmf_del_if(drvr, ifevent->ifidx);
} else {
brcmf_dbg(ERROR, "Invalid ifidx %d for %s\n",
ifevent->ifidx, event->ifname);
}
/* send up the if event: btamp user needs it */
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ *ifidx = brcmf_ifname2idx(drvr, event->ifname);
break;
/* These are what external supplicant/authenticator wants */
@@ -512,7 +498,7 @@ brcmf_c_host_event(struct brcmf_info *drvr_priv, int *ifidx, void *pktdata,
default:
/* Fall through: this should get _everything_ */
- *ifidx = brcmf_ifname2idx(drvr_priv, event->ifname);
+ *ifidx = brcmf_ifname2idx(drvr, event->ifname);
brcmf_dbg(TRACE, "MAC event %d, flags %x, status %x\n",
type, flags, status);
@@ -812,7 +798,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
"event_msgs" + '\0' + bitvec */
uint up = 0;
char buf[128], *ptr;
- u32 dongle_align = BRCMF_SDALIGN;
+ u32 dongle_align = drvr->bus_if->align;
u32 glom = 0;
u32 roaming = 1;
uint bcn_timeout = 3;
@@ -820,7 +806,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
int scan_unassoc_time = 40;
int i;
- brcmf_os_proto_block(drvr);
+ mutex_lock(&drvr->proto_block);
/* Set Country code */
if (drvr->country_code[0] != 0) {
@@ -889,7 +875,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr)
0, true);
}
- brcmf_os_proto_unblock(drvr);
+ mutex_unlock(&drvr->proto_block);
return 0;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
index 7467922..bb26ee3 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_dbg.h
@@ -17,6 +17,21 @@
#ifndef _BRCMF_DBG_H_
#define _BRCMF_DBG_H_
+/* message levels */
+#define BRCMF_ERROR_VAL 0x0001
+#define BRCMF_TRACE_VAL 0x0002
+#define BRCMF_INFO_VAL 0x0004
+#define BRCMF_DATA_VAL 0x0008
+#define BRCMF_CTL_VAL 0x0010
+#define BRCMF_TIMER_VAL 0x0020
+#define BRCMF_HDRS_VAL 0x0040
+#define BRCMF_BYTES_VAL 0x0080
+#define BRCMF_INTR_VAL 0x0100
+#define BRCMF_GLOM_VAL 0x0400
+#define BRCMF_EVENT_VAL 0x0800
+#define BRCMF_BTA_VAL 0x1000
+#define BRCMF_ISCAN_VAL 0x2000
+
#if defined(BCMDBG)
#define brcmf_dbg(level, fmt, ...) \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index 4acbac5..eb9eb76 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -43,7 +43,6 @@
#include "dhd_proto.h"
#include "dhd_dbg.h"
#include "wl_cfg80211.h"
-#include "bcmchip.h"
MODULE_AUTHOR("Broadcom Corporation");
MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN fullmac driver.");
@@ -53,48 +52,19 @@ MODULE_LICENSE("Dual BSD/GPL");
/* Interface control information */
struct brcmf_if {
- struct brcmf_info *info; /* back pointer to brcmf_info */
+ struct brcmf_pub *drvr; /* back pointer to brcmf_pub */
/* OS/stack specifics */
struct net_device *ndev;
struct net_device_stats stats;
int idx; /* iface idx in dongle */
- int state; /* interface state */
u8 mac_addr[ETH_ALEN]; /* assigned MAC address */
};
-/* Local private structure (extension of pub) */
-struct brcmf_info {
- struct brcmf_pub pub;
-
- /* OS/stack specifics */
- struct brcmf_if *iflist[BRCMF_MAX_IFS];
-
- struct mutex proto_block;
-
- struct work_struct setmacaddr_work;
- struct work_struct multicast_work;
- u8 macvalue[ETH_ALEN];
- atomic_t pend_8021x_cnt;
-};
-
/* Error bits */
+int brcmf_msg_level = BRCMF_ERROR_VAL;
module_param(brcmf_msg_level, int, 0);
-
-static int brcmf_net2idx(struct brcmf_info *drvr_priv, struct net_device *ndev)
-{
- int i = 0;
-
- while (i < BRCMF_MAX_IFS) {
- if (drvr_priv->iflist[i] && drvr_priv->iflist[i]->ndev == ndev)
- return i;
- i++;
- }
-
- return BRCMF_BAD_IF;
-}
-
-int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
+int brcmf_ifname2idx(struct brcmf_pub *drvr, char *name)
{
int i = BRCMF_MAX_IFS;
struct brcmf_if *ifp;
@@ -103,7 +73,7 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
return 0;
while (--i > 0) {
- ifp = drvr_priv->iflist[i];
+ ifp = drvr->iflist[i];
if (ifp && !strncmp(ifp->ndev->name, name, IFNAMSIZ))
break;
}
@@ -115,20 +85,18 @@ int brcmf_ifname2idx(struct brcmf_info *drvr_priv, char *name)
char *brcmf_ifname(struct brcmf_pub *drvr, int ifidx)
{
- struct brcmf_info *drvr_priv = drvr->info;
-
if (ifidx < 0 || ifidx >= BRCMF_MAX_IFS) {
brcmf_dbg(ERROR, "ifidx %d out of range\n", ifidx);
return "<if_bad>";
}
- if (drvr_priv->iflist[ifidx] == NULL) {
+ if (drvr->iflist[ifidx] == NULL) {
brcmf_dbg(ERROR, "null i/f %d\n", ifidx);
return "<if_null>";
}
- if (drvr_priv->iflist[ifidx]->ndev)
- return drvr_priv->iflist[ifidx]->ndev->name;
+ if (drvr->iflist[ifidx]->ndev)
+ return drvr->iflist[ifidx]->ndev->name;
return "<if_none>";
}
@@ -146,10 +114,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
uint buflen;
int ret;
- struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
multicast_work);
- ndev = drvr_priv->iflist[0]->ndev;
+ ndev = drvr->iflist[0]->ndev;
cnt = netdev_mc_count(ndev);
/* Determine initial value of allmulti flag */
@@ -183,10 +151,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = buflen;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set mcast_list failed, cnt %d\n",
- brcmf_ifname(&drvr_priv->pub, 0), cnt);
+ brcmf_ifname(drvr, 0), cnt);
dcmd_value = cnt ? true : dcmd_value;
}
@@ -208,7 +176,7 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
("allmulti", (void *)&dcmd_le_value,
sizeof(dcmd_le_value), buf, buflen)) {
brcmf_dbg(ERROR, "%s: mkiovar failed for allmulti, datalen %d buflen %u\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
(int)sizeof(dcmd_value), buflen);
kfree(buf);
return;
@@ -220,10 +188,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = buflen;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set allmulti %d failed\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
le32_to_cpu(dcmd_le_value));
}
@@ -241,10 +209,10 @@ static void _brcmf_set_multicast_list(struct work_struct *work)
dcmd.len = sizeof(dcmd_le_value);
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: set promisc %d failed\n",
- brcmf_ifname(&drvr_priv->pub, 0),
+ brcmf_ifname(drvr, 0),
le32_to_cpu(dcmd_le_value));
}
}
@@ -256,14 +224,14 @@ _brcmf_set_mac_address(struct work_struct *work)
struct brcmf_dcmd dcmd;
int ret;
- struct brcmf_info *drvr_priv = container_of(work, struct brcmf_info,
+ struct brcmf_pub *drvr = container_of(work, struct brcmf_pub,
setmacaddr_work);
brcmf_dbg(TRACE, "enter\n");
- if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr_priv->macvalue,
+ if (!brcmf_c_mkiovar("cur_etheraddr", (char *)drvr->macvalue,
ETH_ALEN, buf, 32)) {
brcmf_dbg(ERROR, "%s: mkiovar failed for cur_etheraddr\n",
- brcmf_ifname(&drvr_priv->pub, 0));
+ brcmf_ifname(drvr, 0));
return;
}
memset(&dcmd, 0, sizeof(dcmd));
@@ -272,52 +240,40 @@ _brcmf_set_mac_address(struct work_struct *work)
dcmd.len = 32;
dcmd.set = true;
- ret = brcmf_proto_dcmd(&drvr_priv->pub, 0, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, 0, &dcmd, dcmd.len);
if (ret < 0)
brcmf_dbg(ERROR, "%s: set cur_etheraddr failed\n",
- brcmf_ifname(&drvr_priv->pub, 0));
+ brcmf_ifname(drvr, 0));
else
- memcpy(drvr_priv->iflist[0]->ndev->dev_addr,
- drvr_priv->macvalue, ETH_ALEN);
+ memcpy(drvr->iflist[0]->ndev->dev_addr,
+ drvr->macvalue, ETH_ALEN);
return;
}
static int brcmf_netdev_set_mac_address(struct net_device *ndev, void *addr)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
struct sockaddr *sa = (struct sockaddr *)addr;
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return -1;
- memcpy(&drvr_priv->macvalue, sa->sa_data, ETH_ALEN);
- schedule_work(&drvr_priv->setmacaddr_work);
+ memcpy(&drvr->macvalue, sa->sa_data, ETH_ALEN);
+ schedule_work(&drvr->setmacaddr_work);
return 0;
}
static void brcmf_netdev_set_multicast_list(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
-
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
- schedule_work(&drvr_priv->multicast_work);
+ schedule_work(&drvr->multicast_work);
}
int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
{
- struct brcmf_info *drvr_priv = drvr->info;
-
/* Reject if down */
- if (!drvr->up || (drvr->busstate == BRCMF_BUS_DOWN))
+ if (!drvr->bus_if->drvr_up || (drvr->bus_if->state == BRCMF_BUS_DOWN))
return -ENODEV;
/* Update multicast statistic */
@@ -328,122 +284,118 @@ int brcmf_sendpkt(struct brcmf_pub *drvr, int ifidx, struct sk_buff *pktbuf)
if (is_multicast_ether_addr(eh->h_dest))
drvr->tx_multicast++;
if (ntohs(eh->h_proto) == ETH_P_PAE)
- atomic_inc(&drvr_priv->pend_8021x_cnt);
+ atomic_inc(&drvr->pend_8021x_cnt);
}
/* If the protocol uses a data header, apply it */
brcmf_proto_hdrpush(drvr, ifidx, pktbuf);
/* Use bus module to send data frame */
- return brcmf_sdbrcm_bus_txdata(drvr->bus, pktbuf);
+ return drvr->bus_if->brcmf_bus_txdata(drvr->dev, pktbuf);
}
static int brcmf_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
int ret;
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
brcmf_dbg(TRACE, "Enter\n");
/* Reject if down */
- if (!drvr_priv->pub.up || (drvr_priv->pub.busstate == BRCMF_BUS_DOWN)) {
- brcmf_dbg(ERROR, "xmit rejected pub.up=%d busstate=%d\n",
- drvr_priv->pub.up, drvr_priv->pub.busstate);
+ if (!drvr->bus_if->drvr_up ||
+ (drvr->bus_if->state == BRCMF_BUS_DOWN)) {
+ brcmf_dbg(ERROR, "xmit rejected drvup=%d state=%d\n",
+ drvr->bus_if->drvr_up,
+ drvr->bus_if->state);
netif_stop_queue(ndev);
return -ENODEV;
}
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF) {
- brcmf_dbg(ERROR, "bad ifidx %d\n", ifidx);
+ if (!drvr->iflist[ifp->idx]) {
+ brcmf_dbg(ERROR, "bad ifidx %d\n", ifp->idx);
netif_stop_queue(ndev);
return -ENODEV;
}
/* Make sure there's enough room for any header */
- if (skb_headroom(skb) < drvr_priv->pub.hdrlen) {
+ if (skb_headroom(skb) < drvr->hdrlen) {
struct sk_buff *skb2;
brcmf_dbg(INFO, "%s: insufficient headroom\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
- drvr_priv->pub.tx_realloc++;
- skb2 = skb_realloc_headroom(skb, drvr_priv->pub.hdrlen);
+ brcmf_ifname(drvr, ifp->idx));
+ drvr->bus_if->tx_realloc++;
+ skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
skb = skb2;
if (skb == NULL) {
brcmf_dbg(ERROR, "%s: skb_realloc_headroom failed\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
+ brcmf_ifname(drvr, ifp->idx));
ret = -ENOMEM;
goto done;
}
}
- ret = brcmf_sendpkt(&drvr_priv->pub, ifidx, skb);
+ ret = brcmf_sendpkt(drvr, ifp->idx, skb);
done:
if (ret)
- drvr_priv->pub.dstats.tx_dropped++;
+ drvr->bus_if->dstats.tx_dropped++;
else
- drvr_priv->pub.tx_packets++;
+ drvr->bus_if->dstats.tx_packets++;
/* Return ok: we always eat the packet */
return 0;
}
-void brcmf_txflowcontrol(struct brcmf_pub *drvr, int ifidx, bool state)
+void brcmf_txflowcontrol(struct device *dev, int ifidx, bool state)
{
struct net_device *ndev;
- struct brcmf_info *drvr_priv = drvr->info;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- drvr->txoff = state;
- ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev = drvr->iflist[ifidx]->ndev;
if (state == ON)
netif_stop_queue(ndev);
else
netif_wake_queue(ndev);
}
-static int brcmf_host_event(struct brcmf_info *drvr_priv, int *ifidx,
+static int brcmf_host_event(struct brcmf_pub *drvr, int *ifidx,
void *pktdata, struct brcmf_event_msg *event,
void **data)
{
int bcmerror = 0;
- bcmerror = brcmf_c_host_event(drvr_priv, ifidx, pktdata, event, data);
+ bcmerror = brcmf_c_host_event(drvr, ifidx, pktdata, event, data);
if (bcmerror != 0)
return bcmerror;
- if (drvr_priv->iflist[*ifidx]->ndev)
- brcmf_cfg80211_event(drvr_priv->iflist[*ifidx]->ndev,
+ if (drvr->iflist[*ifidx]->ndev)
+ brcmf_cfg80211_event(drvr->iflist[*ifidx]->ndev,
event, *data);
return bcmerror;
}
-void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
- int numpkt)
+void brcmf_rx_frame(struct device *dev, int ifidx,
+ struct sk_buff_head *skb_list)
{
- struct brcmf_info *drvr_priv = drvr->info;
unsigned char *eth;
uint len;
void *data;
- struct sk_buff *pnext, *save_pktbuf;
- int i;
+ struct sk_buff *skb, *pnext;
struct brcmf_if *ifp;
struct brcmf_event_msg event;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- save_pktbuf = skb;
-
- for (i = 0; skb && i < numpkt; i++, skb = pnext) {
-
- pnext = skb->next;
- skb->next = NULL;
+ skb_queue_walk_safe(skb_list, skb, pnext) {
+ skb_unlink(skb, skb_list);
/* Get the protocol, maintain skb around eth_type_trans()
* The main reason for this hack is for the limitation of
@@ -460,15 +412,21 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
eth = skb->data;
len = skb->len;
- ifp = drvr_priv->iflist[ifidx];
+ ifp = drvr->iflist[ifidx];
if (ifp == NULL)
- ifp = drvr_priv->iflist[0];
+ ifp = drvr->iflist[0];
+
+ if (!ifp || !ifp->ndev ||
+ ifp->ndev->reg_state != NETREG_REGISTERED) {
+ brcmu_pkt_buf_free_skb(skb);
+ continue;
+ }
skb->dev = ifp->ndev;
skb->protocol = eth_type_trans(skb, skb->dev);
if (skb->pkt_type == PACKET_MULTICAST)
- drvr_priv->pub.rx_multicast++;
+ bus_if->dstats.multicast++;
skb->data = eth;
skb->len = len;
@@ -478,19 +436,17 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
/* Process special event packets and then discard them */
if (ntohs(skb->protocol) == ETH_P_LINK_CTL)
- brcmf_host_event(drvr_priv, &ifidx,
+ brcmf_host_event(drvr, &ifidx,
skb_mac_header(skb),
&event, &data);
- if (drvr_priv->iflist[ifidx] &&
- !drvr_priv->iflist[ifidx]->state)
- ifp = drvr_priv->iflist[ifidx];
-
- if (ifp->ndev)
+ if (drvr->iflist[ifidx]) {
+ ifp = drvr->iflist[ifidx];
ifp->ndev->last_rx = jiffies;
+ }
- drvr->dstats.rx_bytes += skb->len;
- drvr->rx_packets++; /* Local count */
+ bus_if->dstats.rx_bytes += skb->len;
+ bus_if->dstats.rx_packets++; /* Local count */
if (in_interrupt())
netif_rx(skb);
@@ -505,59 +461,48 @@ void brcmf_rx_frame(struct brcmf_pub *drvr, int ifidx, struct sk_buff *skb,
}
}
-void brcmf_txcomplete(struct brcmf_pub *drvr, struct sk_buff *txp, bool success)
+void brcmf_txcomplete(struct device *dev, struct sk_buff *txp, bool success)
{
uint ifidx;
- struct brcmf_info *drvr_priv = drvr->info;
struct ethhdr *eh;
u16 type;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_proto_hdrpull(drvr, &ifidx, txp);
+ brcmf_proto_hdrpull(dev, &ifidx, txp);
eh = (struct ethhdr *)(txp->data);
type = ntohs(eh->h_proto);
if (type == ETH_P_PAE)
- atomic_dec(&drvr_priv->pend_8021x_cnt);
+ atomic_dec(&drvr->pend_8021x_cnt);
}
static struct net_device_stats *brcmf_netdev_get_stats(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- struct brcmf_if *ifp;
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_bus *bus_if = ifp->drvr->bus_if;
brcmf_dbg(TRACE, "Enter\n");
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- if (ifidx == BRCMF_BAD_IF)
- return NULL;
-
- ifp = drvr_priv->iflist[ifidx];
-
- if (drvr_priv->pub.up)
- /* Use the protocol to get dongle stats */
- brcmf_proto_dstats(&drvr_priv->pub);
-
/* Copy dongle stats to net device stats */
- ifp->stats.rx_packets = drvr_priv->pub.dstats.rx_packets;
- ifp->stats.tx_packets = drvr_priv->pub.dstats.tx_packets;
- ifp->stats.rx_bytes = drvr_priv->pub.dstats.rx_bytes;
- ifp->stats.tx_bytes = drvr_priv->pub.dstats.tx_bytes;
- ifp->stats.rx_errors = drvr_priv->pub.dstats.rx_errors;
- ifp->stats.tx_errors = drvr_priv->pub.dstats.tx_errors;
- ifp->stats.rx_dropped = drvr_priv->pub.dstats.rx_dropped;
- ifp->stats.tx_dropped = drvr_priv->pub.dstats.tx_dropped;
- ifp->stats.multicast = drvr_priv->pub.dstats.multicast;
+ ifp->stats.rx_packets = bus_if->dstats.rx_packets;
+ ifp->stats.tx_packets = bus_if->dstats.tx_packets;
+ ifp->stats.rx_bytes = bus_if->dstats.rx_bytes;
+ ifp->stats.tx_bytes = bus_if->dstats.tx_bytes;
+ ifp->stats.rx_errors = bus_if->dstats.rx_errors;
+ ifp->stats.tx_errors = bus_if->dstats.tx_errors;
+ ifp->stats.rx_dropped = bus_if->dstats.rx_dropped;
+ ifp->stats.tx_dropped = bus_if->dstats.tx_dropped;
+ ifp->stats.multicast = bus_if->dstats.multicast;
return &ifp->stats;
}
/* Retrieve current toe component enables, which are kept
as a bitmap in toe_ol iovar */
-static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
+static int brcmf_toe_get(struct brcmf_pub *drvr, int ifidx, u32 *toe_ol)
{
struct brcmf_dcmd dcmd;
__le32 toe_le;
@@ -572,17 +517,17 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
dcmd.set = false;
strcpy(buf, "toe_ol");
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
/* Check for older dongle image that doesn't support toe_ol */
if (ret == -EIO) {
brcmf_dbg(ERROR, "%s: toe not supported by device\n",
- brcmf_ifname(&drvr_priv->pub, ifidx));
+ brcmf_ifname(drvr, ifidx));
return -EOPNOTSUPP;
}
brcmf_dbg(INFO, "%s: could not get toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -593,7 +538,7 @@ static int brcmf_toe_get(struct brcmf_info *drvr_priv, int ifidx, u32 *toe_ol)
/* Set current toe component enables in toe_ol iovar,
and set toe global enable iovar */
-static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
+static int brcmf_toe_set(struct brcmf_pub *drvr, int ifidx, u32 toe_ol)
{
struct brcmf_dcmd dcmd;
char buf[32];
@@ -611,10 +556,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
strcpy(buf, "toe_ol");
memcpy(&buf[sizeof("toe_ol")], &toe_le, sizeof(u32));
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: could not set toe_ol: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -624,10 +569,10 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
strcpy(buf, "toe");
memcpy(&buf[sizeof("toe")], &toe_le, sizeof(u32));
- ret = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, dcmd.len);
+ ret = brcmf_proto_dcmd(drvr, ifidx, &dcmd, dcmd.len);
if (ret < 0) {
brcmf_dbg(ERROR, "%s: could not set toe: ret=%d\n",
- brcmf_ifname(&drvr_priv->pub, ifidx), ret);
+ brcmf_ifname(drvr, ifidx), ret);
return ret;
}
@@ -637,21 +582,19 @@ static int brcmf_toe_set(struct brcmf_info *drvr_priv, int ifidx, u32 toe_ol)
static void brcmf_ethtool_get_drvinfo(struct net_device *ndev,
struct ethtool_drvinfo *info)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
sprintf(info->driver, KBUILD_MODNAME);
- sprintf(info->version, "%lu", drvr_priv->pub.drv_version);
- sprintf(info->fw_version, "%s", BCM4329_FW_NAME);
- sprintf(info->bus_info, "%s",
- dev_name(brcmf_bus_get_device(drvr_priv->pub.bus)));
+ sprintf(info->version, "%lu", drvr->drv_version);
+ sprintf(info->bus_info, "%s", dev_name(drvr->dev));
}
static struct ethtool_ops brcmf_ethtool_ops = {
.get_drvinfo = brcmf_ethtool_get_drvinfo
};
-static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
+static int brcmf_ethtool(struct brcmf_pub *drvr, void __user *uaddr)
{
struct ethtool_drvinfo info;
char drvname[sizeof(info.driver)];
@@ -685,18 +628,18 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
}
/* otherwise, require dongle to be up */
- else if (!drvr_priv->pub.up) {
+ else if (!drvr->bus_if->drvr_up) {
brcmf_dbg(ERROR, "dongle is not up\n");
return -ENODEV;
}
/* finally, report dongle driver type */
- else if (drvr_priv->pub.iswl)
+ else if (drvr->iswl)
sprintf(info.driver, "wl");
else
sprintf(info.driver, "xx");
- sprintf(info.version, "%lu", drvr_priv->pub.drv_version);
+ sprintf(info.version, "%lu", drvr->drv_version);
if (copy_to_user(uaddr, &info, sizeof(info)))
return -EFAULT;
brcmf_dbg(CTL, "given %*s, returning %s\n",
@@ -706,7 +649,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
/* Get toe offload components from dongle */
case ETHTOOL_GRXCSUM:
case ETHTOOL_GTXCSUM:
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -727,7 +670,7 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
return -EFAULT;
/* Read the current settings, update and write back */
- ret = brcmf_toe_get(drvr_priv, 0, &toe_cmpnt);
+ ret = brcmf_toe_get(drvr, 0, &toe_cmpnt);
if (ret < 0)
return ret;
@@ -739,17 +682,17 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
else
toe_cmpnt &= ~csum_dir;
- ret = brcmf_toe_set(drvr_priv, 0, toe_cmpnt);
+ ret = brcmf_toe_set(drvr, 0, toe_cmpnt);
if (ret < 0)
return ret;
/* If setting TX checksum mode, tell Linux the new mode */
if (cmd == ETHTOOL_STXCSUM) {
if (edata.data)
- drvr_priv->iflist[0]->ndev->features |=
+ drvr->iflist[0]->ndev->features |=
NETIF_F_IP_CSUM;
else
- drvr_priv->iflist[0]->ndev->features &=
+ drvr->iflist[0]->ndev->features &=
~NETIF_F_IP_CSUM;
}
@@ -765,18 +708,16 @@ static int brcmf_ethtool(struct brcmf_info *drvr_priv, void __user *uaddr)
static int brcmf_netdev_ioctl_entry(struct net_device *ndev, struct ifreq *ifr,
int cmd)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
- ifidx = brcmf_net2idx(drvr_priv, ndev);
- brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifidx, cmd);
+ brcmf_dbg(TRACE, "ifidx %d, cmd 0x%04x\n", ifp->idx, cmd);
- if (ifidx == BRCMF_BAD_IF)
+ if (!drvr->iflist[ifp->idx])
return -1;
if (cmd == SIOCETHTOOL)
- return brcmf_ethtool(drvr_priv, ifr->ifr_data);
+ return brcmf_ethtool(drvr, ifr->ifr_data);
return -EOPNOTSUPP;
}
@@ -788,28 +729,25 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
s32 err = 0;
int buflen = 0;
bool is_set_key_cmd;
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
- int ifidx;
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
memset(&dcmd, 0, sizeof(dcmd));
dcmd.cmd = cmd;
dcmd.buf = arg;
dcmd.len = len;
- ifidx = brcmf_net2idx(drvr_priv, ndev);
-
if (dcmd.buf != NULL)
buflen = min_t(uint, dcmd.len, BRCMF_DCMD_MAXLEN);
/* send to dongle (must be up, and wl) */
- if ((drvr_priv->pub.busstate != BRCMF_BUS_DATA)) {
+ if ((drvr->bus_if->state != BRCMF_BUS_DATA)) {
brcmf_dbg(ERROR, "DONGLE_DOWN\n");
err = -EIO;
goto done;
}
- if (!drvr_priv->pub.iswl) {
+ if (!drvr->iswl) {
err = -EIO;
goto done;
}
@@ -826,7 +764,7 @@ s32 brcmf_exec_dcmd(struct net_device *ndev, u32 cmd, void *arg, u32 len)
if (is_set_key_cmd)
brcmf_netdev_wait_pend8021x(ndev);
- err = brcmf_proto_dcmd(&drvr_priv->pub, ifidx, &dcmd, buflen);
+ err = brcmf_proto_dcmd(drvr, ifp->idx, &dcmd, buflen);
done:
if (err > 0)
@@ -837,15 +775,16 @@ done:
static int brcmf_netdev_stop(struct net_device *ndev)
{
- struct brcmf_pub *drvr = *(struct brcmf_pub **) netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
brcmf_dbg(TRACE, "Enter\n");
brcmf_cfg80211_down(drvr->config);
- if (drvr->up == 0)
+ if (drvr->bus_if->drvr_up == 0)
return 0;
/* Set state and stop OS transmissions */
- drvr->up = 0;
+ drvr->bus_if->drvr_up = false;
netif_stop_queue(ndev);
return 0;
@@ -853,39 +792,37 @@ static int brcmf_netdev_stop(struct net_device *ndev)
static int brcmf_netdev_open(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)
- netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
u32 toe_ol;
- int ifidx = brcmf_net2idx(drvr_priv, ndev);
s32 ret = 0;
- brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
-
- if (ifidx == 0) { /* do it only for primary eth0 */
+ brcmf_dbg(TRACE, "ifidx %d\n", ifp->idx);
+ if (ifp->idx == 0) { /* do it only for primary eth0 */
/* try to bring up bus */
- ret = brcmf_bus_start(&drvr_priv->pub);
+ ret = brcmf_bus_start(drvr->dev);
if (ret != 0) {
brcmf_dbg(ERROR, "failed with code %d\n", ret);
return -1;
}
- atomic_set(&drvr_priv->pend_8021x_cnt, 0);
+ atomic_set(&drvr->pend_8021x_cnt, 0);
- memcpy(ndev->dev_addr, drvr_priv->pub.mac, ETH_ALEN);
+ memcpy(ndev->dev_addr, drvr->mac, ETH_ALEN);
/* Get current TOE mode from dongle */
- if (brcmf_toe_get(drvr_priv, ifidx, &toe_ol) >= 0
+ if (brcmf_toe_get(drvr, ifp->idx, &toe_ol) >= 0
&& (toe_ol & TOE_TX_CSUM_OL) != 0)
- drvr_priv->iflist[ifidx]->ndev->features |=
+ drvr->iflist[ifp->idx]->ndev->features |=
NETIF_F_IP_CSUM;
else
- drvr_priv->iflist[ifidx]->ndev->features &=
+ drvr->iflist[ifp->idx]->ndev->features &=
~NETIF_F_IP_CSUM;
}
/* Allow transmit calls */
netif_start_queue(ndev);
- drvr_priv->pub.up = 1;
- if (brcmf_cfg80211_up(drvr_priv->pub.config)) {
+ drvr->bus_if->drvr_up = true;
+ if (brcmf_cfg80211_up(drvr->config)) {
brcmf_dbg(ERROR, "failed to bring up cfg80211\n");
return -1;
}
@@ -893,193 +830,155 @@ static int brcmf_netdev_open(struct net_device *ndev)
return ret;
}
+static const struct net_device_ops brcmf_netdev_ops_pri = {
+ .ndo_open = brcmf_netdev_open,
+ .ndo_stop = brcmf_netdev_stop,
+ .ndo_get_stats = brcmf_netdev_get_stats,
+ .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
+ .ndo_start_xmit = brcmf_netdev_start_xmit,
+ .ndo_set_mac_address = brcmf_netdev_set_mac_address,
+ .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
+};
+
int
-brcmf_add_if(struct brcmf_info *drvr_priv, int ifidx, struct net_device *ndev,
- char *name, u8 *mac_addr, u32 flags, u8 bssidx)
+brcmf_add_if(struct device *dev, int ifidx, char *name, u8 *mac_addr)
{
struct brcmf_if *ifp;
- int ret = 0, err = 0;
+ struct net_device *ndev;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
- brcmf_dbg(TRACE, "idx %d, handle->%p\n", ifidx, ndev);
+ brcmf_dbg(TRACE, "idx %d\n", ifidx);
- ifp = drvr_priv->iflist[ifidx];
- if (!ifp) {
- ifp = kmalloc(sizeof(struct brcmf_if), GFP_ATOMIC);
- if (!ifp)
- return -ENOMEM;
+ ifp = drvr->iflist[ifidx];
+ /*
+ * Delete the existing interface before overwriting it
+ * in case we missed the BRCMF_E_IF_DEL event.
+ */
+ if (ifp) {
+ brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
+ ifp->ndev->name);
+ netif_stop_queue(ifp->ndev);
+ unregister_netdev(ifp->ndev);
+ free_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
}
- memset(ifp, 0, sizeof(struct brcmf_if));
- ifp->info = drvr_priv;
- drvr_priv->iflist[ifidx] = ifp;
+ /* Allocate netdev, including space for private structure */
+ ndev = alloc_netdev(sizeof(struct brcmf_if), name, ether_setup);
+ if (!ndev) {
+ brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
+ return -ENOMEM;
+ }
+
+ ifp = netdev_priv(ndev);
+ ifp->ndev = ndev;
+ ifp->drvr = drvr;
+ drvr->iflist[ifidx] = ifp;
+ ifp->idx = ifidx;
if (mac_addr != NULL)
memcpy(&ifp->mac_addr, mac_addr, ETH_ALEN);
- if (ndev == NULL) {
- ifp->state = BRCMF_E_IF_ADD;
- ifp->idx = ifidx;
- /*
- * Delete the existing interface before overwriting it
- * in case we missed the BRCMF_E_IF_DEL event.
- */
- if (ifp->ndev != NULL) {
- brcmf_dbg(ERROR, "ERROR: netdev:%s already exists, try free & unregister\n",
- ifp->ndev->name);
- netif_stop_queue(ifp->ndev);
- unregister_netdev(ifp->ndev);
- free_netdev(ifp->ndev);
- }
-
- /* Allocate netdev, including space for private structure */
- ifp->ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d",
- ether_setup);
- if (!ifp->ndev) {
- brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- ret = -ENOMEM;
- }
-
- if (ret == 0) {
- memcpy(netdev_priv(ifp->ndev), &drvr_priv,
- sizeof(drvr_priv));
- err = brcmf_net_attach(&drvr_priv->pub, ifp->idx);
- if (err != 0) {
- brcmf_dbg(ERROR, "brcmf_net_attach failed, err %d\n",
- err);
- ret = -EOPNOTSUPP;
- } else {
- brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
- current->pid, ifp->ndev->name);
- ifp->state = 0;
- }
- }
-
- if (ret < 0) {
- if (ifp->ndev)
- free_netdev(ifp->ndev);
+ if (brcmf_net_attach(drvr, ifp->idx)) {
+ brcmf_dbg(ERROR, "brcmf_net_attach failed");
+ free_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
+ return -EOPNOTSUPP;
+ }
- drvr_priv->iflist[ifp->idx] = NULL;
- kfree(ifp);
- }
- } else
- ifp->ndev = ndev;
+ brcmf_dbg(TRACE, " ==== pid:%x, net_device for if:%s created ===\n",
+ current->pid, ifp->ndev->name);
return 0;
}
-void brcmf_del_if(struct brcmf_info *drvr_priv, int ifidx)
+void brcmf_del_if(struct brcmf_pub *drvr, int ifidx)
{
struct brcmf_if *ifp;
brcmf_dbg(TRACE, "idx %d\n", ifidx);
- ifp = drvr_priv->iflist[ifidx];
+ ifp = drvr->iflist[ifidx];
if (!ifp) {
brcmf_dbg(ERROR, "Null interface\n");
return;
}
+ if (ifp->ndev) {
+ if (ifidx == 0) {
+ if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
+ rtnl_lock();
+ brcmf_netdev_stop(ifp->ndev);
+ rtnl_unlock();
+ }
+ } else {
+ netif_stop_queue(ifp->ndev);
+ }
- ifp->state = BRCMF_E_IF_DEL;
- ifp->idx = ifidx;
- if (ifp->ndev != NULL) {
- netif_stop_queue(ifp->ndev);
unregister_netdev(ifp->ndev);
+ drvr->iflist[ifidx] = NULL;
+ if (ifidx == 0)
+ brcmf_cfg80211_detach(drvr->config);
free_netdev(ifp->ndev);
- drvr_priv->iflist[ifidx] = NULL;
- kfree(ifp);
}
}
-struct brcmf_pub *brcmf_attach(struct brcmf_bus *bus, uint bus_hdrlen)
+int brcmf_attach(uint bus_hdrlen, struct device *dev)
{
- struct brcmf_info *drvr_priv = NULL;
- struct net_device *ndev;
+ struct brcmf_pub *drvr = NULL;
+ int ret = 0;
brcmf_dbg(TRACE, "Enter\n");
- /* Allocate netdev, including space for private structure */
- ndev = alloc_netdev(sizeof(drvr_priv), "wlan%d", ether_setup);
- if (!ndev) {
- brcmf_dbg(ERROR, "OOM - alloc_netdev\n");
- goto fail;
- }
-
/* Allocate primary brcmf_info */
- drvr_priv = kzalloc(sizeof(struct brcmf_info), GFP_ATOMIC);
- if (!drvr_priv)
- goto fail;
-
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
- if (brcmf_add_if(drvr_priv, 0, ndev, ndev->name, NULL, 0, 0) ==
- BRCMF_BAD_IF)
- goto fail;
-
- ndev->netdev_ops = NULL;
- mutex_init(&drvr_priv->proto_block);
+ drvr = kzalloc(sizeof(struct brcmf_pub), GFP_ATOMIC);
+ if (!drvr)
+ return -ENOMEM;
- /* Link to info module */
- drvr_priv->pub.info = drvr_priv;
+ mutex_init(&drvr->proto_block);
/* Link to bus module */
- drvr_priv->pub.bus = bus;
- drvr_priv->pub.hdrlen = bus_hdrlen;
+ drvr->hdrlen = bus_hdrlen;
+ drvr->bus_if = dev_get_drvdata(dev);
+ drvr->bus_if->drvr = drvr;
+ drvr->dev = dev;
/* Attach and link in the protocol */
- if (brcmf_proto_attach(&drvr_priv->pub) != 0) {
+ ret = brcmf_proto_attach(drvr);
+ if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_prot_attach failed\n");
goto fail;
}
- /* Attach and link in the cfg80211 */
- drvr_priv->pub.config =
- brcmf_cfg80211_attach(ndev,
- brcmf_bus_get_device(bus),
- &drvr_priv->pub);
- if (drvr_priv->pub.config == NULL) {
- brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
- goto fail;
- }
-
- INIT_WORK(&drvr_priv->setmacaddr_work, _brcmf_set_mac_address);
- INIT_WORK(&drvr_priv->multicast_work, _brcmf_set_multicast_list);
+ INIT_WORK(&drvr->setmacaddr_work, _brcmf_set_mac_address);
+ INIT_WORK(&drvr->multicast_work, _brcmf_set_multicast_list);
- /*
- * Save the brcmf_info into the priv
- */
- memcpy(netdev_priv(ndev), &drvr_priv, sizeof(drvr_priv));
-
- return &drvr_priv->pub;
+ return ret;
fail:
- if (ndev)
- free_netdev(ndev);
- if (drvr_priv)
- brcmf_detach(&drvr_priv->pub);
+ brcmf_detach(dev);
- return NULL;
+ return ret;
}
-int brcmf_bus_start(struct brcmf_pub *drvr)
+int brcmf_bus_start(struct device *dev)
{
int ret = -1;
- struct brcmf_info *drvr_priv = drvr->info;
/* Room for "event_msgs" + '\0' + bitvec */
char iovbuf[BRCMF_EVENTING_MASK_LEN + 12];
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "\n");
/* Bring up the bus */
- ret = brcmf_sdbrcm_bus_init(&drvr_priv->pub);
+ ret = bus_if->brcmf_bus_init(dev);
if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_sdbrcm_bus_init failed %d\n", ret);
return ret;
}
/* If bus is not ready, can't come up */
- if (drvr_priv->pub.busstate != BRCMF_BUS_DATA) {
+ if (bus_if->state != BRCMF_BUS_DATA) {
brcmf_dbg(ERROR, "failed bus is not ready\n");
return -ENODEV;
}
@@ -1116,33 +1015,22 @@ int brcmf_bus_start(struct brcmf_pub *drvr)
drvr->pktfilter[0] = "100 0 0 0 0x01 0x00";
/* Bus is ready, do any protocol initialization */
- ret = brcmf_proto_init(&drvr_priv->pub);
+ ret = brcmf_proto_init(drvr);
if (ret < 0)
return ret;
return 0;
}
-static struct net_device_ops brcmf_netdev_ops_pri = {
- .ndo_open = brcmf_netdev_open,
- .ndo_stop = brcmf_netdev_stop,
- .ndo_get_stats = brcmf_netdev_get_stats,
- .ndo_do_ioctl = brcmf_netdev_ioctl_entry,
- .ndo_start_xmit = brcmf_netdev_start_xmit,
- .ndo_set_mac_address = brcmf_netdev_set_mac_address,
- .ndo_set_rx_mode = brcmf_netdev_set_multicast_list
-};
-
int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
{
- struct brcmf_info *drvr_priv = drvr->info;
struct net_device *ndev;
u8 temp_addr[ETH_ALEN] = {
0x00, 0x90, 0x4c, 0x11, 0x22, 0x33};
brcmf_dbg(TRACE, "ifidx %d\n", ifidx);
- ndev = drvr_priv->iflist[ifidx]->ndev;
+ ndev = drvr->iflist[ifidx]->ndev;
ndev->netdev_ops = &brcmf_netdev_ops_pri;
/*
@@ -1150,7 +1038,7 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
*/
if (ifidx != 0) {
/* for virtual interfaces use the primary MAC */
- memcpy(temp_addr, drvr_priv->pub.mac, ETH_ALEN);
+ memcpy(temp_addr, drvr->mac, ETH_ALEN);
}
@@ -1161,14 +1049,23 @@ int brcmf_net_attach(struct brcmf_pub *drvr, int ifidx)
- Locally Administered address */
}
- ndev->hard_header_len = ETH_HLEN + drvr_priv->pub.hdrlen;
+ ndev->hard_header_len = ETH_HLEN + drvr->hdrlen;
ndev->ethtool_ops = &brcmf_ethtool_ops;
- drvr_priv->pub.rxsz = ndev->mtu + ndev->hard_header_len +
- drvr_priv->pub.hdrlen;
+ drvr->rxsz = ndev->mtu + ndev->hard_header_len +
+ drvr->hdrlen;
memcpy(ndev->dev_addr, temp_addr, ETH_ALEN);
+ /* attach to cfg80211 for primary interface */
+ if (!ifidx) {
+ drvr->config = brcmf_cfg80211_attach(ndev, drvr->dev, drvr);
+ if (drvr->config == NULL) {
+ brcmf_dbg(ERROR, "wl_cfg80211_attach failed\n");
+ goto fail;
+ }
+ }
+
if (register_netdev(ndev) != 0) {
brcmf_dbg(ERROR, "couldn't register the net device\n");
goto fail;
@@ -1185,127 +1082,57 @@ fail:
static void brcmf_bus_detach(struct brcmf_pub *drvr)
{
- struct brcmf_info *drvr_priv;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- /* Stop the protocol module */
- brcmf_proto_stop(&drvr_priv->pub);
-
- /* Stop the bus module */
- brcmf_sdbrcm_bus_stop(drvr_priv->pub.bus);
- }
- }
-}
-
-void brcmf_detach(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv;
-
brcmf_dbg(TRACE, "Enter\n");
if (drvr) {
- drvr_priv = drvr->info;
- if (drvr_priv) {
- struct brcmf_if *ifp;
- int i;
-
- for (i = 1; i < BRCMF_MAX_IFS; i++)
- if (drvr_priv->iflist[i])
- brcmf_del_if(drvr_priv, i);
-
- ifp = drvr_priv->iflist[0];
- if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
- rtnl_lock();
- brcmf_netdev_stop(ifp->ndev);
- rtnl_unlock();
- unregister_netdev(ifp->ndev);
- }
-
- cancel_work_sync(&drvr_priv->setmacaddr_work);
- cancel_work_sync(&drvr_priv->multicast_work);
-
- brcmf_bus_detach(drvr);
-
- if (drvr->prot)
- brcmf_proto_detach(drvr);
-
- brcmf_cfg80211_detach(drvr->config);
+ /* Stop the protocol module */
+ brcmf_proto_stop(drvr);
- free_netdev(ifp->ndev);
- kfree(ifp);
- kfree(drvr_priv);
- }
+ /* Stop the bus module */
+ drvr->bus_if->brcmf_bus_stop(drvr->dev);
}
}
-static void __exit brcmf_module_cleanup(void)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- brcmf_bus_unregister();
-}
-
-static int __init brcmf_module_init(void)
+void brcmf_detach(struct device *dev)
{
- int error;
+ int i;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_pub *drvr = bus_if->drvr;
brcmf_dbg(TRACE, "Enter\n");
- error = brcmf_bus_register();
-
- if (error) {
- brcmf_dbg(ERROR, "brcmf_bus_register failed\n");
- goto failed;
- }
- return 0;
-
-failed:
- return -EINVAL;
-}
-module_init(brcmf_module_init);
-module_exit(brcmf_module_cleanup);
+ /* make sure primary interface removed last */
+ for (i = BRCMF_MAX_IFS-1; i > -1; i--)
+ if (drvr->iflist[i])
+ brcmf_del_if(drvr, i);
-int brcmf_os_proto_block(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
-
- if (drvr_priv) {
- mutex_lock(&drvr_priv->proto_block);
- return 1;
- }
- return 0;
-}
+ cancel_work_sync(&drvr->setmacaddr_work);
+ cancel_work_sync(&drvr->multicast_work);
-int brcmf_os_proto_unblock(struct brcmf_pub *drvr)
-{
- struct brcmf_info *drvr_priv = drvr->info;
+ brcmf_bus_detach(drvr);
- if (drvr_priv) {
- mutex_unlock(&drvr_priv->proto_block);
- return 1;
- }
+ if (drvr->prot)
+ brcmf_proto_detach(drvr);
- return 0;
+ bus_if->drvr = NULL;
+ kfree(drvr);
}
-static int brcmf_get_pend_8021x_cnt(struct brcmf_info *drvr_priv)
+static int brcmf_get_pend_8021x_cnt(struct brcmf_pub *drvr)
{
- return atomic_read(&drvr_priv->pend_8021x_cnt);
+ return atomic_read(&drvr->pend_8021x_cnt);
}
#define MAX_WAIT_FOR_8021X_TX 10
int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
{
- struct brcmf_info *drvr_priv = *(struct brcmf_info **)netdev_priv(ndev);
+ struct brcmf_if *ifp = netdev_priv(ndev);
+ struct brcmf_pub *drvr = ifp->drvr;
int timeout = 10 * HZ / 1000;
int ntimes = MAX_WAIT_FOR_8021X_TX;
- int pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ int pend = brcmf_get_pend_8021x_cnt(drvr);
while (ntimes && pend) {
if (pend) {
@@ -1314,7 +1141,7 @@ int brcmf_netdev_wait_pend8021x(struct net_device *ndev)
set_current_state(TASK_RUNNING);
ntimes--;
}
- pend = brcmf_get_pend_8021x_cnt(drvr_priv);
+ pend = brcmf_get_pend_8021x_cnt(drvr);
}
return pend;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
index 4ee1ea8..6bc4425 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_proto.h
@@ -41,17 +41,10 @@ extern void brcmf_proto_stop(struct brcmf_pub *drvr);
extern void brcmf_proto_hdrpush(struct brcmf_pub *, int ifidx,
struct sk_buff *txp);
-/* Remove any protocol-specific data header. */
-extern int brcmf_proto_hdrpull(struct brcmf_pub *, int *ifidx,
- struct sk_buff *rxp);
-
/* Use protocol to issue command to dongle */
extern int brcmf_proto_dcmd(struct brcmf_pub *drvr, int ifidx,
struct brcmf_dcmd *dcmd, int len);
-/* Update local copy of dongle statistics */
-extern void brcmf_proto_dstats(struct brcmf_pub *drvr);
-
extern int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr);
extern int brcmf_proto_cdc_set_dcmd(struct brcmf_pub *drvr, int ifidx,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 313b8bf..f7eeee1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -28,6 +28,7 @@
#include <linux/semaphore.h>
#include <linux/firmware.h>
#include <linux/module.h>
+#include <linux/bcma/bcma.h>
#include <asm/unaligned.h>
#include <defs.h>
#include <brcmu_wifi.h>
@@ -35,6 +36,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
+#include "sdio_chip.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -85,11 +87,8 @@ struct rte_console {
#endif /* BCMDBG */
#include <chipcommon.h>
-#include "dhd.h"
#include "dhd_bus.h"
-#include "dhd_proto.h"
#include "dhd_dbg.h"
-#include <bcmchip.h>
#define TXQLEN 2048 /* bulk tx queue length */
#define TXHI (TXQLEN - 256) /* turn on flow control above TXHI */
@@ -134,33 +133,6 @@ struct rte_console {
/* Force no backplane reset */
#define SBSDIO_DEVCTL_RST_NOBPRESET 0x20
-/* SBSDIO_FUNC1_CHIPCLKCSR */
-
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP 0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT 0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP 0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ 0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ 0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL 0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL 0x80
-
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-
-#define SBSDIO_CLKAV(regval, alponly) \
- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
/* direct(mapped) cis space */
/* MAPPED common CIS address */
@@ -335,49 +307,16 @@ struct rte_console {
/* Flags for SDH calls */
#define F2SYNC (SDIO_REQ_4BYTE | SDIO_REQ_FIXED)
-/* sbimstate */
-#define SBIM_IBE 0x20000 /* inbanderror */
-#define SBIM_TO 0x40000 /* timeout */
-#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */
-#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */
-
-/* sbtmstatelow */
-
-/* reset */
-#define SBTML_RESET 0x0001
-/* reject field */
-#define SBTML_REJ_MASK 0x0006
-/* reject */
-#define SBTML_REJ 0x0002
-/* temporary reject, for error recovery */
-#define SBTML_TMPREJ 0x0004
-
-/* Shift to locate the SI control flags in sbtml */
-#define SBTML_SICF_SHIFT 16
-
-/* sbtmstatehigh */
-#define SBTMH_SERR 0x0001 /* serror */
-#define SBTMH_INT 0x0002 /* interrupt */
-#define SBTMH_BUSY 0x0004 /* busy */
-#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */
-
-/* Shift to locate the SI status flags in sbtmh */
-#define SBTMH_SISF_SHIFT 16
-
-/* sbidlow */
-#define SBIDL_INIT 0x80 /* initiator */
-
-/* sbidhigh */
-#define SBIDH_RC_MASK 0x000f /* revision code */
-#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */
-#define SBIDH_RCE_SHIFT 8
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | \
- ((sbidh) & SBIDH_RC_MASK))
-#define SBIDH_CC_MASK 0x8ff0 /* core code */
-#define SBIDH_CC_SHIFT 4
-#define SBIDH_VC_MASK 0xffff0000 /* vendor code */
-#define SBIDH_VC_SHIFT 16
+#define BRCMFMAC_FW_NAME "brcm/brcmfmac.bin"
+#define BRCMFMAC_NV_NAME "brcm/brcmfmac.txt"
+MODULE_FIRMWARE(BRCMFMAC_FW_NAME);
+MODULE_FIRMWARE(BRCMFMAC_NV_NAME);
+
+#define BRCMF_IDLE_IMMEDIATE (-1) /* Enter idle immediately */
+#define BRCMF_IDLE_ACTIVE 0 /* Do not request any SD clock change
+ * when idle
+ */
+#define BRCMF_IDLE_INTERVAL 1
/*
* Conversion of 802.1D priority to precedence level
@@ -388,17 +327,6 @@ static uint prio2prec(u32 prio)
(prio^2) : prio;
}
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
- (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
/* core registers */
struct sdpcmd_regs {
u32 corecontrol; /* 0x00, rev8 */
@@ -524,25 +452,8 @@ struct sdpcm_shared_le {
/* misc chip info needed by some of the routines */
-struct chip_info {
- u32 chip;
- u32 chiprev;
- u32 cccorebase;
- u32 ccrev;
- u32 cccaps;
- u32 buscorebase; /* 32 bits backplane bus address */
- u32 buscorerev;
- u32 buscoretype;
- u32 ramcorebase;
- u32 armcorebase;
- u32 pmurev;
- u32 ramsize;
-};
-
/* Private data for SDIO bus interaction */
-struct brcmf_bus {
- struct brcmf_pub *drvr;
-
+struct brcmf_sdio {
struct brcmf_sdio_dev *sdiodev; /* sdio device handler */
struct chip_info *ci; /* Chip info struct */
char *vars; /* Variables (from CIS and/or other) */
@@ -574,7 +485,7 @@ struct brcmf_bus {
uint txminmax;
struct sk_buff *glomd; /* Packet containing glomming descriptor */
- struct sk_buff *glom; /* Packet chain for glommed superframe */
+ struct sk_buff_head glom; /* Packet list for glommed superframe */
uint glomerr; /* Glom packet read errors */
u8 *rxbuf; /* Buffer for receiving control packets */
@@ -637,6 +548,13 @@ struct brcmf_bus {
uint f2rxdata; /* Number of frame data reads */
uint f2txdata; /* Number of f2 frame writes */
uint f1regdata; /* Number of f1 register accesses */
+ uint tickcnt; /* Number of watchdog been schedule */
+ unsigned long tx_ctlerrs; /* Err of sending ctrl frames */
+ unsigned long tx_ctlpkts; /* Ctrl frames sent to dongle */
+ unsigned long rx_ctlerrs; /* Err of processing rx ctrl frames */
+ unsigned long rx_ctlpkts; /* Ctrl frames processed from dongle */
+ unsigned long rx_readahead_cnt; /* Number of packets where header
+ * read-ahead was used. */
u8 *ctrl_frame_buf;
u32 ctrl_frame_len;
@@ -657,50 +575,10 @@ struct brcmf_bus {
struct semaphore sdsem;
- const char *fw_name;
const struct firmware *firmware;
- const char *nv_name;
u32 fw_ptr;
-};
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
+ bool txoff; /* Transmit flow-controlled */
};
/* clkstate */
@@ -737,7 +615,7 @@ static void pkt_align(struct sk_buff *p, int len, int align)
}
/* To check if there's window offered */
-static bool data_ok(struct brcmf_bus *bus)
+static bool data_ok(struct brcmf_sdio *bus)
{
return (u8)(bus->tx_max - bus->tx_seq) != 0 &&
((u8)(bus->tx_max - bus->tx_seq) & 0x80) == 0;
@@ -748,12 +626,14 @@ static bool data_ok(struct brcmf_bus *bus)
* adresses on the 32 bit backplane bus.
*/
static void
-r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
+r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
{
+ u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
do {
*regvar = brcmf_sdcard_reg_read(bus->sdiodev,
- bus->ci->buscorebase + reg_offset, sizeof(u32));
+ bus->ci->c_inf[idx].base + reg_offset,
+ sizeof(u32));
} while (brcmf_sdcard_regfail(bus->sdiodev) &&
(++(*retryvar) <= retry_limit));
if (*retryvar) {
@@ -766,12 +646,13 @@ r_sdreg32(struct brcmf_bus *bus, u32 *regvar, u32 reg_offset, u32 *retryvar)
}
static void
-w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
+w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset, u32 *retryvar)
{
+ u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
*retryvar = 0;
do {
brcmf_sdcard_reg_write(bus->sdiodev,
- bus->ci->buscorebase + reg_offset,
+ bus->ci->c_inf[idx].base + reg_offset,
sizeof(u32), regval);
} while (brcmf_sdcard_regfail(bus->sdiodev) &&
(++(*retryvar) <= retry_limit));
@@ -790,14 +671,14 @@ w_sdreg32(struct brcmf_bus *bus, u32 regval, u32 reg_offset, u32 *retryvar)
/* Packet free applicable unconditionally for sdio and sdspi.
* Conditional if bufpool was present for gspi bus.
*/
-static void brcmf_sdbrcm_pktfree2(struct brcmf_bus *bus, struct sk_buff *pkt)
+static void brcmf_sdbrcm_pktfree2(struct brcmf_sdio *bus, struct sk_buff *pkt)
{
if (bus->usebufpool)
brcmu_pkt_buf_free_skb(pkt);
}
/* Turn backplane clock on or off */
-static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
+static int brcmf_sdbrcm_htclk(struct brcmf_sdio *bus, bool on, bool pendok)
{
int err;
u8 clkctl, clkreq, devctl;
@@ -812,10 +693,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
clkreq =
bus->alp_only ? SBSDIO_ALP_AVAIL_REQ : SBSDIO_HT_AVAIL_REQ;
- if ((bus->ci->chip == BCM4329_CHIP_ID)
- && (bus->ci->chiprev == 0))
- clkreq |= SBSDIO_FORCE_ALP;
-
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, clkreq, &err);
if (err) {
@@ -823,14 +700,6 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
return -EBADE;
}
- if (pendok && ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev == 9))) {
- u32 dummy, retries;
- r_sdreg32(bus, &dummy,
- offsetof(struct sdpcmd_regs, clockctlstatus),
- &retries);
- }
-
/* Check current status */
clkctl = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, &err);
@@ -930,7 +799,7 @@ static int brcmf_sdbrcm_htclk(struct brcmf_bus *bus, bool on, bool pendok)
}
/* Change idle/active SD state */
-static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
+static int brcmf_sdbrcm_sdclk(struct brcmf_sdio *bus, bool on)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -943,7 +812,7 @@ static int brcmf_sdbrcm_sdclk(struct brcmf_bus *bus, bool on)
}
/* Transition SD and backplane clock readiness */
-static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
+static int brcmf_sdbrcm_clkctl(struct brcmf_sdio *bus, uint target, bool pendok)
{
#ifdef BCMDBG
uint oldstate = bus->clkstate;
@@ -999,7 +868,7 @@ static int brcmf_sdbrcm_clkctl(struct brcmf_bus *bus, uint target, bool pendok)
return 0;
}
-static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
+static int brcmf_sdbrcm_bussleep(struct brcmf_sdio *bus, bool sleep)
{
uint retries = 0;
@@ -1034,11 +903,9 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
SBSDIO_FORCE_HW_CLKREQ_OFF, NULL);
/* Isolate the bus */
- if (bus->ci->chip != BCM4329_CHIP_ID) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL,
- SBSDIO_DEVCTL_PADS_ISO, NULL);
- }
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_DEVICE_CTL,
+ SBSDIO_DEVCTL_PADS_ISO, NULL);
/* Change state */
bus->sleeping = true;
@@ -1049,13 +916,6 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
- /* Force pad isolation off if possible
- (in case power never toggled) */
- if ((bus->ci->buscoretype == PCMCIA_CORE_ID)
- && (bus->ci->buscorerev >= 10))
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_DEVICE_CTL, 0, NULL);
-
/* Make sure the controller has the bus up */
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
@@ -1080,13 +940,13 @@ static int brcmf_sdbrcm_bussleep(struct brcmf_bus *bus, bool sleep)
return 0;
}
-static void bus_wake(struct brcmf_bus *bus)
+static void bus_wake(struct brcmf_sdio *bus)
{
if (bus->sleeping)
brcmf_sdbrcm_bussleep(bus, false);
}
-static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
+static u32 brcmf_sdbrcm_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
u32 hmb_data;
@@ -1162,7 +1022,7 @@ static u32 brcmf_sdbrcm_hostmail(struct brcmf_bus *bus)
return intstatus;
}
-static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
+static void brcmf_sdbrcm_rxfail(struct brcmf_sdio *bus, bool abort, bool rtx)
{
uint retries = 0;
u16 lastrbc;
@@ -1219,16 +1079,61 @@ static void brcmf_sdbrcm_rxfail(struct brcmf_bus *bus, bool abort, bool rtx)
/* If we can't reach the device, signal failure */
if (err || brcmf_sdcard_regfail(bus->sdiodev))
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+}
+
+/* copy a buffer into a pkt buffer chain */
+static uint brcmf_sdbrcm_glom_from_buf(struct brcmf_sdio *bus, uint len)
+{
+ uint n, ret = 0;
+ struct sk_buff *p;
+ u8 *buf;
+
+ buf = bus->dataptr;
+
+ /* copy the data */
+ skb_queue_walk(&bus->glom, p) {
+ n = min_t(uint, p->len, len);
+ memcpy(p->data, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ if (!len)
+ break;
+ }
+
+ return ret;
+}
+
+/* return total length of buffer chain */
+static uint brcmf_sdbrcm_glom_len(struct brcmf_sdio *bus)
+{
+ struct sk_buff *p;
+ uint total;
+
+ total = 0;
+ skb_queue_walk(&bus->glom, p)
+ total += p->len;
+ return total;
}
-static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
+static void brcmf_sdbrcm_free_glom(struct brcmf_sdio *bus)
+{
+ struct sk_buff *cur, *next;
+
+ skb_queue_walk_safe(&bus->glom, cur, next) {
+ skb_unlink(cur, &bus->glom);
+ brcmu_pkt_buf_free_skb(cur);
+ }
+}
+
+static u8 brcmf_sdbrcm_rxglom(struct brcmf_sdio *bus, u8 rxseq)
{
u16 dlen, totlen;
u8 *dptr, num = 0;
u16 sublen, check;
- struct sk_buff *pfirst, *plast, *pnext, *save_pfirst;
+ struct sk_buff *pfirst, *pnext;
int errcode;
u8 chan, seq, doff, sfdoff;
@@ -1240,11 +1145,12 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* If packets, issue read(s) and send up packet chain */
/* Return sequence numbers consumed? */
- brcmf_dbg(TRACE, "start: glomd %p glom %p\n", bus->glomd, bus->glom);
+ brcmf_dbg(TRACE, "start: glomd %p glom %p\n",
+ bus->glomd, skb_peek(&bus->glom));
/* If there's a descriptor, generate the packet chain */
if (bus->glomd) {
- pfirst = plast = pnext = NULL;
+ pfirst = pnext = NULL;
dlen = (u16) (bus->glomd->len);
dptr = bus->glomd->data;
if (!dlen || (dlen & 1)) {
@@ -1287,12 +1193,7 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, sublen);
break;
}
- if (!pfirst) {
- pfirst = plast = pnext;
- } else {
- plast->next = pnext;
- plast = pnext;
- }
+ skb_queue_tail(&bus->glom, pnext);
/* Adhere to start alignment requirements */
pkt_align(pnext, sublen, BRCMF_SDALIGN);
@@ -1308,12 +1209,9 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
brcmf_dbg(GLOM, "glomdesc mismatch: nextlen %d glomdesc %d rxseq %d\n",
bus->nextlen, totlen, rxseq);
}
- bus->glom = pfirst;
pfirst = pnext = NULL;
} else {
- if (pfirst)
- brcmu_pkt_buf_free_skb(pfirst);
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
num = 0;
}
@@ -1325,37 +1223,33 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Ok -- either we just generated a packet chain,
or had one from before */
- if (bus->glom) {
+ if (!skb_queue_empty(&bus->glom)) {
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "try superframe read, packet chain:\n");
- for (pnext = bus->glom; pnext; pnext = pnext->next) {
+ skb_queue_walk(&bus->glom, pnext) {
brcmf_dbg(GLOM, " %p: %p len 0x%04x (%d)\n",
pnext, (u8 *) (pnext->data),
pnext->len, pnext->len);
}
}
- pfirst = bus->glom;
- dlen = (u16) brcmu_pkttotlen(pfirst);
+ pfirst = skb_peek(&bus->glom);
+ dlen = (u16) brcmf_sdbrcm_glom_len(bus);
/* Do an SDIO read for the superframe. Configurable iovar to
* read directly into the chained packet, or allocate a large
* packet and and copy into the chain.
*/
if (usechain) {
- errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
+ errcode = brcmf_sdcard_recv_chain(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, (u8 *) pfirst->data, dlen,
- pfirst);
+ SDIO_FUNC_2, F2SYNC, &bus->glom);
} else if (bus->dataptr) {
errcode = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
- SDIO_FUNC_2,
- F2SYNC, bus->dataptr, dlen,
- NULL);
- sublen = (u16) brcmu_pktfrombuf(pfirst, 0, dlen,
- bus->dataptr);
+ SDIO_FUNC_2, F2SYNC,
+ bus->dataptr, dlen);
+ sublen = (u16) brcmf_sdbrcm_glom_from_buf(bus, dlen);
if (sublen != dlen) {
brcmf_dbg(ERROR, "FAILED TO COPY, dlen %d sublen %d\n",
dlen, sublen);
@@ -1373,16 +1267,15 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
if (errcode < 0) {
brcmf_dbg(ERROR, "glom read of %d bytes failed: %d\n",
dlen, errcode);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
if (bus->glomerr++ < 3) {
brcmf_sdbrcm_rxfail(bus, true, true);
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
}
return 0;
}
@@ -1455,10 +1348,14 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
/* Remove superframe header, remember offset */
skb_pull(pfirst, doff);
sfdoff = doff;
+ num = 0;
/* Validate all the subframe headers */
- for (num = 0, pnext = pfirst; pnext && !errcode;
- num++, pnext = pnext->next) {
+ skb_queue_walk(&bus->glom, pnext) {
+ /* leave when invalid subframe is found */
+ if (errcode)
+ break;
+
dptr = (u8 *) (pnext->data);
dlen = (u16) (pnext->len);
sublen = get_unaligned_le16(dptr);
@@ -1491,6 +1388,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
num, doff, sublen, SDPCM_HDRLEN);
errcode = -1;
}
+ /* increase the subframe count */
+ num++;
}
if (errcode) {
@@ -1503,23 +1402,16 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
} else {
bus->glomerr = 0;
brcmf_sdbrcm_rxfail(bus, true, false);
- brcmu_pkt_buf_free_skb(bus->glom);
bus->rxglomfail++;
- bus->glom = NULL;
+ brcmf_sdbrcm_free_glom(bus);
}
bus->nextlen = 0;
return 0;
}
/* Basic SD framing looks ok - process each packet (header) */
- save_pfirst = pfirst;
- bus->glom = NULL;
- plast = NULL;
-
- for (num = 0; pfirst; rxseq++, pfirst = pnext) {
- pnext = pfirst->next;
- pfirst->next = NULL;
+ skb_queue_walk_safe(&bus->glom, pfirst, pnext) {
dptr = (u8 *) (pfirst->data);
sublen = get_unaligned_le16(dptr);
chan = SDPCM_PACKET_CHANNEL(&dptr[SDPCM_FRAMETAG_LEN]);
@@ -1539,6 +1431,8 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
bus->rx_badseq++;
rxseq = seq;
}
+ rxseq++;
+
#ifdef BCMDBG
if (BRCMF_BYTES_ON() && BRCMF_DATA_ON()) {
printk(KERN_DEBUG "Rx Subframe Data:\n");
@@ -1551,36 +1445,22 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
skb_pull(pfirst, doff);
if (pfirst->len == 0) {
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx,
- pfirst) != 0) {
+ } else if (brcmf_proto_hdrpull(bus->sdiodev->dev,
+ &ifidx, pfirst) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
+ skb_unlink(pfirst, &bus->glom);
brcmu_pkt_buf_free_skb(pfirst);
- if (plast)
- plast->next = pnext;
- else
- save_pfirst = pnext;
-
continue;
}
- /* this packet will go up, link back into
- chain and count it */
- pfirst->next = pnext;
- plast = pfirst;
- num++;
-
#ifdef BCMDBG
if (BRCMF_GLOM_ON()) {
brcmf_dbg(GLOM, "subframe %d to stack, %p (%p/%d) nxt/lnk %p/%p\n",
- num, pfirst, pfirst->data,
+ bus->glom.qlen, pfirst, pfirst->data,
pfirst->len, pfirst->next,
pfirst->prev);
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET,
@@ -1589,19 +1469,20 @@ static u8 brcmf_sdbrcm_rxglom(struct brcmf_bus *bus, u8 rxseq)
}
#endif /* BCMDBG */
}
- if (num) {
+ /* sent any remaining packets up */
+ if (bus->glom.qlen) {
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, save_pfirst, num);
+ brcmf_rx_frame(bus->sdiodev->dev, ifidx, &bus->glom);
down(&bus->sdsem);
}
bus->rxglomframes++;
- bus->rxglompkts += num;
+ bus->rxglompkts += bus->glom.qlen;
}
return num;
}
-static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
+static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_sdio *bus, uint *condition,
bool *pending)
{
DECLARE_WAITQUEUE(wait, current);
@@ -1623,7 +1504,7 @@ static int brcmf_sdbrcm_dcmd_resp_wait(struct brcmf_bus *bus, uint *condition,
return timeout;
}
-static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->dcmd_resp_wait))
wake_up_interruptible(&bus->dcmd_resp_wait);
@@ -1631,7 +1512,7 @@ static int brcmf_sdbrcm_dcmd_resp_wake(struct brcmf_bus *bus)
return 0;
}
static void
-brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
+brcmf_sdbrcm_read_control(struct brcmf_sdio *bus, u8 *hdr, uint len, uint doff)
{
uint rdlen, pad;
@@ -1657,7 +1538,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
if (bus->roundup && bus->blocksize && (rdlen > bus->blocksize)) {
pad = bus->blocksize - (rdlen % bus->blocksize);
if ((pad <= bus->roundup) && (pad < bus->blocksize) &&
- ((len + pad) < bus->drvr->maxctl))
+ ((len + pad) < bus->sdiodev->bus_if->maxctl))
rdlen += pad;
} else if (rdlen % BRCMF_SDALIGN) {
rdlen += BRCMF_SDALIGN - (rdlen % BRCMF_SDALIGN);
@@ -1668,18 +1549,18 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
rdlen = roundup(rdlen, ALIGNMENT);
/* Drop if the read is too big or it exceeds our maximum */
- if ((rdlen + BRCMF_FIRSTREAD) > bus->drvr->maxctl) {
+ if ((rdlen + BRCMF_FIRSTREAD) > bus->sdiodev->bus_if->maxctl) {
brcmf_dbg(ERROR, "%d-byte control read exceeds %d-byte buffer\n",
- rdlen, bus->drvr->maxctl);
- bus->drvr->rx_errors++;
+ rdlen, bus->sdiodev->bus_if->maxctl);
+ bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
}
- if ((len - doff) > bus->drvr->maxctl) {
+ if ((len - doff) > bus->sdiodev->bus_if->maxctl) {
brcmf_dbg(ERROR, "%d-byte ctl frame (%d-byte ctl data) exceeds %d-byte limit\n",
- len, len - doff, bus->drvr->maxctl);
- bus->drvr->rx_errors++;
+ len, len - doff, bus->sdiodev->bus_if->maxctl);
+ bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
goto done;
@@ -1689,8 +1570,7 @@ brcmf_sdbrcm_read_control(struct brcmf_bus *bus, u8 *hdr, uint len, uint doff)
sdret = brcmf_sdcard_recv_buf(bus->sdiodev,
bus->sdiodev->sbwad,
SDIO_FUNC_2,
- F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen,
- NULL);
+ F2SYNC, (bus->rxctl + BRCMF_FIRSTREAD), rdlen);
bus->f2rxdata++;
/* Control frame failures need retransmission */
@@ -1721,7 +1601,7 @@ done:
}
/* Pad read to blocksize for efficiency */
-static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
+static void brcmf_pad(struct brcmf_sdio *bus, u16 *pad, u16 *rdlen)
{
if (bus->roundup && bus->blocksize && *rdlen > bus->blocksize) {
*pad = bus->blocksize - (*rdlen % bus->blocksize);
@@ -1734,7 +1614,7 @@ static void brcmf_pad(struct brcmf_bus *bus, u16 *pad, u16 *rdlen)
}
static void
-brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
+brcmf_alloc_pkt_and_read(struct brcmf_sdio *bus, u16 rdlen,
struct sk_buff **pkt, u8 **rxbuf)
{
int sdret; /* Return code from calls */
@@ -1746,16 +1626,15 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
pkt_align(*pkt, rdlen, BRCMF_SDALIGN);
*rxbuf = (u8 *) ((*pkt)->data);
/* Read the entire frame */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC,
- *rxbuf, rdlen, *pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, *pkt);
bus->f2rxdata++;
if (sdret < 0) {
brcmf_dbg(ERROR, "(nextlen): read %d bytes failed: %d\n",
rdlen, sdret);
brcmu_pkt_buf_free_skb(*pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
/* Force retry w/normal header read.
* Don't attempt NAK for
* gSPI
@@ -1767,7 +1646,7 @@ brcmf_alloc_pkt_and_read(struct brcmf_bus *bus, u16 rdlen,
/* Checks the header */
static int
-brcmf_check_rxbuf(struct brcmf_bus *bus, struct sk_buff *pkt, u8 *rxbuf,
+brcmf_check_rxbuf(struct brcmf_sdio *bus, struct sk_buff *pkt, u8 *rxbuf,
u8 rxseq, u16 nextlen, u16 *len)
{
u16 check;
@@ -1823,7 +1702,7 @@ fail:
/* Return true if there may be more frames to read */
static uint
-brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
+brcmf_sdbrcm_readframes(struct brcmf_sdio *bus, uint maxframes, bool *finished)
{
u16 len, check; /* Extracted hardware header fields */
u8 chan, seq, doff; /* Extracted software header fields */
@@ -1846,14 +1725,15 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
*finished = false;
for (rxseq = bus->rx_seq, rxleft = maxframes;
- !bus->rxskip && rxleft && bus->drvr->busstate != BRCMF_BUS_DOWN;
+ !bus->rxskip && rxleft &&
+ bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN;
rxseq++, rxleft--) {
/* Handle glomming separately */
- if (bus->glom || bus->glomd) {
+ if (bus->glomd || !skb_queue_empty(&bus->glom)) {
u8 cnt;
brcmf_dbg(GLOM, "calling rxglom: glomd %p, glom %p\n",
- bus->glomd, bus->glom);
+ bus->glomd, skb_peek(&bus->glom));
cnt = brcmf_sdbrcm_rxglom(bus, rxseq);
brcmf_dbg(GLOM, "rxglom returned %d\n", cnt);
rxseq += cnt - 1;
@@ -1905,7 +1785,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
bus->nextlen = 0;
}
- bus->drvr->rx_readahead_cnt++;
+ bus->rx_readahead_cnt++;
/* Handle Flow Control */
fcbits = SDPCM_FCMASK_VALUE(
@@ -1976,7 +1856,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Read frame header (hardware and software) */
sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, bus->rxhdr,
- BRCMF_FIRSTREAD, NULL);
+ BRCMF_FIRSTREAD);
bus->f2rxhdrs++;
if (sdret < 0) {
@@ -2103,7 +1983,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Too long -- skip this frame */
brcmf_dbg(ERROR, "too long: len %d rdlen %d\n",
len, rdlen);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
bus->rx_toolong++;
brcmf_sdbrcm_rxfail(bus, false, false);
continue;
@@ -2115,7 +1995,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
/* Give up on data, request rtx of events */
brcmf_dbg(ERROR, "brcmu_pkt_buf_get_skb failed: rdlen %d chan %d\n",
rdlen, chan);
- bus->drvr->rx_dropped++;
+ bus->sdiodev->bus_if->dstats.rx_dropped++;
brcmf_sdbrcm_rxfail(bus, false, RETRYCHAN(chan));
continue;
}
@@ -2125,9 +2005,8 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
pkt_align(pkt, rdlen, BRCMF_SDALIGN);
/* Read the remaining frame data */
- sdret = brcmf_sdcard_recv_buf(bus->sdiodev, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, ((u8 *) (pkt->data)),
- rdlen, pkt);
+ sdret = brcmf_sdcard_recv_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2rxdata++;
if (sdret < 0) {
@@ -2136,7 +2015,7 @@ brcmf_sdbrcm_readframes(struct brcmf_bus *bus, uint maxframes, bool *finished)
: ((chan == SDPCM_DATA_CHANNEL) ? "data"
: "test")), sdret);
brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
brcmf_sdbrcm_rxfail(bus, true, RETRYCHAN(chan));
continue;
}
@@ -2185,16 +2064,17 @@ deliver:
if (pkt->len == 0) {
brcmu_pkt_buf_free_skb(pkt);
continue;
- } else if (brcmf_proto_hdrpull(bus->drvr, &ifidx, pkt) != 0) {
+ } else if (brcmf_proto_hdrpull(bus->sdiodev->dev, &ifidx,
+ pkt) != 0) {
brcmf_dbg(ERROR, "rx protocol error\n");
brcmu_pkt_buf_free_skb(pkt);
- bus->drvr->rx_errors++;
+ bus->sdiodev->bus_if->dstats.rx_errors++;
continue;
}
/* Unlock during rx call */
up(&bus->sdsem);
- brcmf_rx_frame(bus->drvr, ifidx, pkt, 1);
+ brcmf_rx_packet(bus->sdiodev->dev, ifidx, pkt);
down(&bus->sdsem);
}
rxcount = maxframes - rxleft;
@@ -2214,16 +2094,8 @@ deliver:
return rxcount;
}
-static int
-brcmf_sdbrcm_send_buf(struct brcmf_bus *bus, u32 addr, uint fn, uint flags,
- u8 *buf, uint nbytes, struct sk_buff *pkt)
-{
- return brcmf_sdcard_send_buf
- (bus->sdiodev, addr, fn, flags, buf, nbytes, pkt);
-}
-
static void
-brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
+brcmf_sdbrcm_wait_for_event(struct brcmf_sdio *bus, bool *lockvar)
{
up(&bus->sdsem);
wait_event_interruptible_timeout(bus->ctrl_wait,
@@ -2233,7 +2105,7 @@ brcmf_sdbrcm_wait_for_event(struct brcmf_bus *bus, bool *lockvar)
}
static void
-brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
+brcmf_sdbrcm_wait_event_wakeup(struct brcmf_sdio *bus)
{
if (waitqueue_active(&bus->ctrl_wait))
wake_up_interruptible(&bus->ctrl_wait);
@@ -2242,7 +2114,7 @@ brcmf_sdbrcm_wait_event_wakeup(struct brcmf_bus *bus)
/* Writes a HW/SW header into the packet and sends it. */
/* Assumes: (a) header space already there, (b) caller holds lock */
-static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
+static int brcmf_sdbrcm_txpkt(struct brcmf_sdio *bus, struct sk_buff *pkt,
uint chan, bool free_pkt)
{
int ret;
@@ -2262,7 +2134,7 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (skb_headroom(pkt) < pad) {
brcmf_dbg(INFO, "insufficient headroom %d for %d pad\n",
skb_headroom(pkt), pad);
- bus->drvr->tx_realloc++;
+ bus->sdiodev->bus_if->tx_realloc++;
new = brcmu_pkt_buf_get_skb(pkt->len + BRCMF_SDALIGN);
if (!new) {
brcmf_dbg(ERROR, "couldn't allocate new %d-byte packet\n",
@@ -2331,9 +2203,8 @@ static int brcmf_sdbrcm_txpkt(struct brcmf_bus *bus, struct sk_buff *pkt,
if (len & (ALIGNMENT - 1))
len = roundup(len, ALIGNMENT);
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame,
- len, pkt);
+ ret = brcmf_sdcard_send_pkt(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, pkt);
bus->f2txdata++;
if (ret < 0) {
@@ -2371,7 +2242,7 @@ done:
/* restore pkt buffer pointer before calling tx complete routine */
skb_pull(pkt, SDPCM_HDRLEN + pad);
up(&bus->sdsem);
- brcmf_txcomplete(bus->drvr, pkt, ret != 0);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt, ret != 0);
down(&bus->sdsem);
if (free_pkt)
@@ -2380,7 +2251,7 @@ done:
return ret;
}
-static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
+static uint brcmf_sdbrcm_sendfromq(struct brcmf_sdio *bus, uint maxframes)
{
struct sk_buff *pkt;
u32 intstatus = 0;
@@ -2390,8 +2261,6 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
uint datalen;
u8 tx_prec_map;
- struct brcmf_pub *drvr = bus->drvr;
-
brcmf_dbg(TRACE, "Enter\n");
tx_prec_map = ~bus->flowcontrol;
@@ -2409,9 +2278,9 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
ret = brcmf_sdbrcm_txpkt(bus, pkt, SDPCM_DATA_CHANNEL, true);
if (ret)
- bus->drvr->tx_errors++;
+ bus->sdiodev->bus_if->dstats.tx_errors++;
else
- bus->drvr->dstats.tx_bytes += datalen;
+ bus->sdiodev->bus_if->dstats.tx_bytes += datalen;
/* In poll mode, need to check for other events */
if (!bus->intr && cnt) {
@@ -2428,14 +2297,98 @@ static uint brcmf_sdbrcm_sendfromq(struct brcmf_bus *bus, uint maxframes)
}
/* Deflow-control stack if needed */
- if (drvr->up && (drvr->busstate == BRCMF_BUS_DATA) &&
- drvr->txoff && (pktq_len(&bus->txq) < TXLOW))
- brcmf_txflowcontrol(drvr, 0, OFF);
+ if (bus->sdiodev->bus_if->drvr_up &&
+ (bus->sdiodev->bus_if->state == BRCMF_BUS_DATA) &&
+ bus->txoff && (pktq_len(&bus->txq) < TXLOW)) {
+ bus->txoff = OFF;
+ brcmf_txflowcontrol(bus->sdiodev->dev, 0, OFF);
+ }
return cnt;
}
-static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_bus_stop(struct device *dev)
+{
+ u32 local_hostintmask;
+ u8 saveclk;
+ uint retries;
+ int err;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
+ if (bus->dpc_tsk && bus->dpc_tsk != current) {
+ send_sig(SIGTERM, bus->dpc_tsk, 1);
+ kthread_stop(bus->dpc_tsk);
+ bus->dpc_tsk = NULL;
+ }
+
+ down(&bus->sdsem);
+
+ bus_wake(bus);
+
+ /* Enable clock for device interrupts */
+ brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
+
+ /* Disable and clear interrupts at the chip level also */
+ w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
+ local_hostintmask = bus->hostintmask;
+ bus->hostintmask = 0;
+
+ /* Change our idea of bus state */
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
+
+ /* Force clocks on backplane to be sure F2 interrupt propagates */
+ saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, &err);
+ if (!err) {
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR,
+ (saveclk | SBSDIO_FORCE_HT), &err);
+ }
+ if (err)
+ brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
+
+ /* Turn off the bus (F2), free any pending packets */
+ brcmf_dbg(INTR, "disable SDIO interrupts\n");
+ brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
+ SDIO_FUNC_ENABLE_1, NULL);
+
+ /* Clear any pending interrupts now that F2 is disabled */
+ w_sdreg32(bus, local_hostintmask,
+ offsetof(struct sdpcmd_regs, intstatus), &retries);
+
+ /* Turn off the backplane clock (only) */
+ brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
+
+ /* Clear the data packet queues */
+ brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
+
+ /* Clear any held glomming stuff */
+ if (bus->glomd)
+ brcmu_pkt_buf_free_skb(bus->glomd);
+ brcmf_sdbrcm_free_glom(bus);
+
+ /* Clear rx control and wake any waiters */
+ bus->rxlen = 0;
+ brcmf_sdbrcm_dcmd_resp_wake(bus);
+
+ /* Reset some F2 state stuff */
+ bus->rxskip = false;
+ bus->tx_seq = bus->rx_seq = 0;
+
+ up(&bus->sdsem);
+}
+
+static bool brcmf_sdbrcm_dpc(struct brcmf_sdio *bus)
{
u32 intstatus, newstatus = 0;
uint retries = 0;
@@ -2463,7 +2416,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
SBSDIO_DEVICE_CTL, &err);
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n", err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
#endif /* BCMDBG */
@@ -2473,7 +2426,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading CSR: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
brcmf_dbg(INFO, "DPC: PENDING, devctl 0x%02x clkctl 0x%02x\n",
@@ -2486,7 +2439,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error reading DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
devctl &= ~SBSDIO_DEVCTL_CA_INT_ONLY;
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
@@ -2494,7 +2447,7 @@ static bool brcmf_sdbrcm_dpc(struct brcmf_bus *bus)
if (err) {
brcmf_dbg(ERROR, "error writing DEVCTL: %d\n",
err);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
}
bus->clkstate = CLK_AVAIL;
} else {
@@ -2596,9 +2549,9 @@ clkwait:
(bus->clkstate == CLK_AVAIL)) {
int ret, i;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
SDIO_FUNC_2, F2SYNC, (u8 *) bus->ctrl_frame_buf,
- (u32) bus->ctrl_frame_len, NULL);
+ (u32) bus->ctrl_frame_len);
if (ret < 0) {
/* On failure, abort the command and
@@ -2650,11 +2603,11 @@ clkwait:
else await next interrupt */
/* On failed register access, all bets are off:
no resched or interrupts */
- if ((bus->drvr->busstate == BRCMF_BUS_DOWN) ||
+ if ((bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) ||
brcmf_sdcard_regfail(bus->sdiodev)) {
brcmf_dbg(ERROR, "failed backplane access over SDIO, halting operation %d\n",
brcmf_sdcard_regfail(bus->sdiodev));
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->intstatus = 0;
} else if (bus->clkstate == CLK_PENDING) {
brcmf_dbg(INFO, "rescheduled due to CLK_PENDING awaiting I_CHIPACTIVE interrupt\n");
@@ -2681,7 +2634,7 @@ clkwait:
static int brcmf_sdbrcm_dpc_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -2691,12 +2644,12 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
if (!wait_for_completion_interruptible(&bus->dpc_wait)) {
/* Call bus dpc unless it indicated down
(then clean stop) */
- if (bus->drvr->busstate != BRCMF_BUS_DOWN) {
+ if (bus->sdiodev->bus_if->state != BRCMF_BUS_DOWN) {
if (brcmf_sdbrcm_dpc(bus))
complete(&bus->dpc_wait);
} else {
/* after stopping the bus, exit thread */
- brcmf_sdbrcm_bus_stop(bus);
+ brcmf_sdbrcm_bus_stop(bus->sdiodev->dev);
bus->dpc_tsk = NULL;
break;
}
@@ -2706,10 +2659,13 @@ static int brcmf_sdbrcm_dpc_thread(void *data)
return 0;
}
-int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
+static int brcmf_sdbrcm_bus_txdata(struct device *dev, struct sk_buff *pkt)
{
int ret = -EBADE;
uint datalen, prec;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -2728,9 +2684,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
/* Priority based enq */
spin_lock_bh(&bus->txqlock);
- if (brcmf_c_prec_enq(bus->drvr, &bus->txq, pkt, prec) == false) {
+ if (brcmf_c_prec_enq(bus->sdiodev->dev, &bus->txq, pkt, prec) ==
+ false) {
skb_pull(pkt, SDPCM_HDRLEN);
- brcmf_txcomplete(bus->drvr, pkt, false);
+ brcmf_txcomplete(bus->sdiodev->dev, pkt, false);
brcmu_pkt_buf_free_skb(pkt);
brcmf_dbg(ERROR, "out of bus->txq !!!\n");
ret = -ENOSR;
@@ -2739,8 +2696,10 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
spin_unlock_bh(&bus->txqlock);
- if (pktq_len(&bus->txq) >= TXHI)
- brcmf_txflowcontrol(bus->drvr, 0, ON);
+ if (pktq_len(&bus->txq) >= TXHI) {
+ bus->txoff = ON;
+ brcmf_txflowcontrol(bus->sdiodev->dev, 0, ON);
+ }
#ifdef BCMDBG
if (pktq_plen(&bus->txq, prec) > qcount[prec])
@@ -2757,7 +2716,7 @@ int brcmf_sdbrcm_bus_txdata(struct brcmf_bus *bus, struct sk_buff *pkt)
}
static int
-brcmf_sdbrcm_membytes(struct brcmf_bus *bus, bool write, u32 address, u8 *data,
+brcmf_sdbrcm_membytes(struct brcmf_sdio *bus, bool write, u32 address, u8 *data,
uint size)
{
int bcmerror = 0;
@@ -2818,7 +2777,7 @@ xfer_done:
#ifdef BCMDBG
#define CONSOLE_LINE_MAX 192
-static int brcmf_sdbrcm_readconsole(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_readconsole(struct brcmf_sdio *bus)
{
struct brcmf_console *c = &bus->console;
u8 line[CONSOLE_LINE_MAX], ch;
@@ -2895,14 +2854,14 @@ break2:
}
#endif /* BCMDBG */
-static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
+static int brcmf_tx_frame(struct brcmf_sdio *bus, u8 *frame, u16 len)
{
int i;
int ret;
bus->ctrl_frame_stat = false;
- ret = brcmf_sdbrcm_send_buf(bus, bus->sdiodev->sbwad,
- SDIO_FUNC_2, F2SYNC, frame, len, NULL);
+ ret = brcmf_sdcard_send_buf(bus->sdiodev, bus->sdiodev->sbwad,
+ SDIO_FUNC_2, F2SYNC, frame, len);
if (ret < 0) {
/* On failure, abort the command and terminate the frame */
@@ -2937,8 +2896,8 @@ static int brcmf_tx_frame(struct brcmf_bus *bus, u8 *frame, u16 len)
return ret;
}
-int
-brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
{
u8 *frame;
u16 len;
@@ -2946,6 +2905,9 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
uint retries = 0;
u8 doff = 0;
int ret = -1;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3045,19 +3007,22 @@ brcmf_sdbrcm_bus_txctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
up(&bus->sdsem);
if (ret)
- bus->drvr->tx_ctlerrs++;
+ bus->tx_ctlerrs++;
else
- bus->drvr->tx_ctlpkts++;
+ bus->tx_ctlpkts++;
return ret ? -EIO : 0;
}
-int
-brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
+static int
+brcmf_sdbrcm_bus_rxctl(struct device *dev, unsigned char *msg, uint msglen)
{
int timeleft;
uint rxlen = 0;
bool pending;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -3083,21 +3048,21 @@ brcmf_sdbrcm_bus_rxctl(struct brcmf_bus *bus, unsigned char *msg, uint msglen)
}
if (rxlen)
- bus->drvr->rx_ctlpkts++;
+ bus->rx_ctlpkts++;
else
- bus->drvr->rx_ctlerrs++;
+ bus->rx_ctlerrs++;
return rxlen ? (int)rxlen : -ETIMEDOUT;
}
-static int brcmf_sdbrcm_downloadvars(struct brcmf_bus *bus, void *arg, int len)
+static int brcmf_sdbrcm_downloadvars(struct brcmf_sdio *bus, void *arg, int len)
{
int bcmerror = 0;
brcmf_dbg(TRACE, "Enter\n");
/* Basic sanity checks */
- if (bus->drvr->up) {
+ if (bus->sdiodev->bus_if->drvr_up) {
bcmerror = -EISCONN;
goto err;
}
@@ -3123,7 +3088,7 @@ err:
return bcmerror;
}
-static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_write_vars(struct brcmf_sdio *bus)
{
int bcmerror = 0;
u32 varsize;
@@ -3154,8 +3119,10 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
/* Verify NVRAM bytes */
brcmf_dbg(INFO, "Compare NVRAM dl & ul; varsize=%d\n", varsize);
nvram_ularray = kmalloc(varsize, GFP_ATOMIC);
- if (!nvram_ularray)
+ if (!nvram_ularray) {
+ kfree(vbuffer);
return -ENOMEM;
+ }
/* Upload image to verify downloaded contents. */
memset(nvram_ularray, 0xaa, varsize);
@@ -3210,135 +3177,11 @@ static int brcmf_sdbrcm_write_vars(struct brcmf_bus *bus)
return bcmerror;
}
-static void
-brcmf_sdbrcm_chip_disablecore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
- u32 regdata;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- if (regdata & SBTML_RESET)
- return;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- if ((regdata & (SICF_CLOCK_EN << SBTML_SICF_SHIFT)) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow),
- 4, regdata | SBTML_REJ);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4) &
- SBTMH_BUSY), 100000);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4);
- if (regdata & SBTMH_BUSY)
- brcmf_dbg(ERROR, "ARM core still busy\n");
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) |
- SBIM_RJ;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4);
- udelay(1);
- SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) &
- SBIM_BY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4,
- (((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_REJ | SBTML_RESET));
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatelow), 4);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbidlow), 4);
- if (regdata & SBIDL_INIT) {
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4) &
- ~SBIM_RJ;
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbimstate), 4,
- regdata);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SBTML_REJ | SBTML_RESET));
- udelay(1);
-}
-
-static void
-brcmf_sdbrcm_chip_resetcore(struct brcmf_sdio_dev *sdiodev, u32 corebase)
-{
- u32 regdata;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdbrcm_chip_disablecore(sdiodev, corebase);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- ((SICF_FGC | SICF_CLOCK_EN) << SBTML_SICF_SHIFT) |
- SBTML_RESET);
- udelay(1);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4);
- if (regdata & SBTMH_SERR)
- brcmf_sdcard_reg_write(sdiodev,
- CORE_SB(corebase, sbtmstatehigh), 4, 0);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(corebase, sbimstate), 4);
- if (regdata & (SBIM_IBE | SBIM_TO))
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbimstate), 4,
- regdata & ~(SBIM_IBE | SBIM_TO));
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_FGC << SBTML_SICF_SHIFT) |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdcard_reg_write(sdiodev, CORE_SB(corebase, sbtmstatelow), 4,
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- udelay(1);
-}
-
-static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
+static int brcmf_sdbrcm_download_state(struct brcmf_sdio *bus, bool enter)
{
uint retries;
- u32 regdata;
int bcmerror = 0;
+ struct chip_info *ci = bus->ci;
/* To enter download state, disable ARM and reset SOCRAM.
* To exit download state, simply reset ARM (default is RAM boot).
@@ -3346,10 +3189,9 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
if (enter) {
bus->alp_only = true;
- brcmf_sdbrcm_chip_disablecore(bus->sdiodev,
- bus->ci->armcorebase);
+ ci->coredisable(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
- brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->ramcorebase);
+ ci->resetcore(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM);
/* Clear the top bit of memory */
if (bus->ramsize) {
@@ -3358,11 +3200,7 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
(u8 *)&zeros, 4);
}
} else {
- regdata = brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_SB(bus->ci->ramcorebase, sbtmstatelow), 4);
- regdata &= (SBTML_RESET | SBTML_REJ_MASK |
- (SICF_CLOCK_EN << SBTML_SICF_SHIFT));
- if ((SICF_CLOCK_EN << SBTML_SICF_SHIFT) != regdata) {
+ if (!ci->iscoreup(bus->sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
brcmf_dbg(ERROR, "SOCRAM core is down after reset?\n");
bcmerror = -EBADE;
goto fail;
@@ -3377,18 +3215,18 @@ static int brcmf_sdbrcm_download_state(struct brcmf_bus *bus, bool enter)
w_sdreg32(bus, 0xFFFFFFFF,
offsetof(struct sdpcmd_regs, intstatus), &retries);
- brcmf_sdbrcm_chip_resetcore(bus->sdiodev, bus->ci->armcorebase);
+ ci->resetcore(bus->sdiodev, ci, BCMA_CORE_ARM_CM3);
/* Allow HT Clock now that the ARM is running. */
bus->alp_only = false;
- bus->drvr->busstate = BRCMF_BUS_LOAD;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_LOAD;
}
fail:
return bcmerror;
}
-static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
+static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_sdio *bus)
{
if (bus->firmware->size < bus->fw_ptr + len)
len = bus->firmware->size - bus->fw_ptr;
@@ -3398,10 +3236,7 @@ static int brcmf_sdbrcm_get_image(char *buf, int len, struct brcmf_bus *bus)
return len;
}
-MODULE_FIRMWARE(BCM4329_FW_NAME);
-MODULE_FIRMWARE(BCM4329_NV_NAME);
-
-static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_code_file(struct brcmf_sdio *bus)
{
int offset = 0;
uint len;
@@ -3410,8 +3245,7 @@ static int brcmf_sdbrcm_download_code_file(struct brcmf_bus *bus)
brcmf_dbg(INFO, "Enter\n");
- bus->fw_name = BCM4329_FW_NAME;
- ret = request_firmware(&bus->firmware, bus->fw_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_FW_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request firmware %d\n", ret);
@@ -3501,15 +3335,14 @@ static uint brcmf_process_nvram_vars(char *varbuf, uint len)
return buf_len;
}
-static int brcmf_sdbrcm_download_nvram(struct brcmf_bus *bus)
+static int brcmf_sdbrcm_download_nvram(struct brcmf_sdio *bus)
{
uint len;
char *memblock = NULL;
char *bufp;
int ret;
- bus->nv_name = BCM4329_NV_NAME;
- ret = request_firmware(&bus->firmware, bus->nv_name,
+ ret = request_firmware(&bus->firmware, BRCMFMAC_NV_NAME,
&bus->sdiodev->func[2]->dev);
if (ret) {
brcmf_dbg(ERROR, "Fail to request nvram %d\n", ret);
@@ -3549,7 +3382,7 @@ err:
return ret;
}
-static int _brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
int bcmerror = -1;
@@ -3582,7 +3415,7 @@ err:
}
static bool
-brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
+brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
{
bool ret;
@@ -3596,91 +3429,11 @@ brcmf_sdbrcm_download_firmware(struct brcmf_bus *bus)
return ret;
}
-void brcmf_sdbrcm_bus_stop(struct brcmf_bus *bus)
-{
- u32 local_hostintmask;
- u8 saveclk;
- uint retries;
- int err;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- if (bus->watchdog_tsk) {
- send_sig(SIGTERM, bus->watchdog_tsk, 1);
- kthread_stop(bus->watchdog_tsk);
- bus->watchdog_tsk = NULL;
- }
-
- if (bus->dpc_tsk && bus->dpc_tsk != current) {
- send_sig(SIGTERM, bus->dpc_tsk, 1);
- kthread_stop(bus->dpc_tsk);
- bus->dpc_tsk = NULL;
- }
-
- down(&bus->sdsem);
-
- bus_wake(bus);
-
- /* Enable clock for device interrupts */
- brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
-
- /* Disable and clear interrupts at the chip level also */
- w_sdreg32(bus, 0, offsetof(struct sdpcmd_regs, hostintmask), &retries);
- local_hostintmask = bus->hostintmask;
- bus->hostintmask = 0;
-
- /* Change our idea of bus state */
- bus->drvr->busstate = BRCMF_BUS_DOWN;
-
- /* Force clocks on backplane to be sure F2 interrupt propagates */
- saveclk = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, &err);
- if (!err) {
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- (saveclk | SBSDIO_FORCE_HT), &err);
- }
- if (err)
- brcmf_dbg(ERROR, "Failed to force clock for F2: err %d\n", err);
-
- /* Turn off the bus (F2), free any pending packets */
- brcmf_dbg(INTR, "disable SDIO interrupts\n");
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* Clear any pending interrupts now that F2 is disabled */
- w_sdreg32(bus, local_hostintmask,
- offsetof(struct sdpcmd_regs, intstatus), &retries);
-
- /* Turn off the backplane clock (only) */
- brcmf_sdbrcm_clkctl(bus, CLK_SDONLY, false);
-
- /* Clear the data packet queues */
- brcmu_pktq_flush(&bus->txq, true, NULL, NULL);
-
- /* Clear any held glomming stuff */
- if (bus->glomd)
- brcmu_pkt_buf_free_skb(bus->glomd);
-
- if (bus->glom)
- brcmu_pkt_buf_free_skb(bus->glom);
-
- bus->glom = bus->glomd = NULL;
-
- /* Clear rx control and wake any waiters */
- bus->rxlen = 0;
- brcmf_sdbrcm_dcmd_resp_wake(bus);
-
- /* Reset some F2 state stuff */
- bus->rxskip = false;
- bus->tx_seq = bus->rx_seq = 0;
-
- up(&bus->sdsem);
-}
-
-int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
+static int brcmf_sdbrcm_bus_init(struct device *dev)
{
- struct brcmf_bus *bus = drvr->bus;
+ struct brcmf_bus *bus_if = dev_get_drvdata(dev);
+ struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv;
+ struct brcmf_sdio *bus = sdiodev->bus;
unsigned long timeout;
uint retries = 0;
u8 ready, enable;
@@ -3690,16 +3443,16 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
brcmf_dbg(TRACE, "Enter\n");
/* try to download image and nvram to the dongle */
- if (drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus_if->state == BRCMF_BUS_DOWN) {
if (!(brcmf_sdbrcm_download_firmware(bus)))
return -1;
}
- if (!bus->drvr)
+ if (!bus->sdiodev->bus_if->drvr)
return 0;
/* Start the watchdog timer */
- bus->drvr->tickcnt = 0;
+ bus->tickcnt = 0;
brcmf_sdbrcm_wd_timer(bus, BRCMF_WD_POLL_MS);
down(&bus->sdsem);
@@ -3756,7 +3509,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_WATERMARK, 8, &err);
/* Set bus state according to enable result */
- drvr->busstate = BRCMF_BUS_DATA;
+ bus_if->state = BRCMF_BUS_DATA;
}
else {
@@ -3771,7 +3524,7 @@ int brcmf_sdbrcm_bus_init(struct brcmf_pub *drvr)
SBSDIO_FUNC1_CHIPCLKCSR, saveclk, &err);
/* If we didn't come up, turn off backplane clock */
- if (drvr->busstate != BRCMF_BUS_DATA)
+ if (bus_if->state != BRCMF_BUS_DATA)
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
exit:
@@ -3782,7 +3535,7 @@ exit:
void brcmf_sdbrcm_isr(void *arg)
{
- struct brcmf_bus *bus = (struct brcmf_bus *) arg;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *) arg;
brcmf_dbg(TRACE, "Enter\n");
@@ -3791,7 +3544,7 @@ void brcmf_sdbrcm_isr(void *arg)
return;
}
- if (bus->drvr->busstate == BRCMF_BUS_DOWN) {
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
brcmf_dbg(ERROR, "bus is down. we have nothing to do\n");
return;
}
@@ -3814,14 +3567,14 @@ void brcmf_sdbrcm_isr(void *arg)
complete(&bus->dpc_wait);
}
-static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
+static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_sdio *bus)
{
- struct brcmf_bus *bus;
+#ifdef BCMDBG
+ struct brcmf_bus *bus_if = dev_get_drvdata(bus->sdiodev->dev);
+#endif /* BCMDBG */
brcmf_dbg(TIMER, "Enter\n");
- bus = drvr->bus;
-
/* Ignore the timer if simulating bus down */
if (bus->sleeping)
return false;
@@ -3865,7 +3618,8 @@ static bool brcmf_sdbrcm_bus_watchdog(struct brcmf_pub *drvr)
}
#ifdef BCMDBG
/* Poll for console output periodically */
- if (drvr->busstate == BRCMF_BUS_DATA && bus->console_interval != 0) {
+ if (bus_if->state == BRCMF_BUS_DATA &&
+ bus->console_interval != 0) {
bus->console.count += BRCMF_WD_POLL_MS;
if (bus->console.count >= bus->console_interval) {
bus->console.count -= bus->console_interval;
@@ -3900,10 +3654,12 @@ static bool brcmf_sdbrcm_chipmatch(u16 chipid)
{
if (chipid == BCM4329_CHIP_ID)
return true;
+ if (chipid == BCM4330_CHIP_ID)
+ return true;
return false;
}
-static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -3915,13 +3671,13 @@ static void brcmf_sdbrcm_release_malloc(struct brcmf_bus *bus)
bus->databuf = NULL;
}
-static bool brcmf_sdbrcm_probe_malloc(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_malloc(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
- if (bus->drvr->maxctl) {
+ if (bus->sdiodev->bus_if->maxctl) {
bus->rxblen =
- roundup((bus->drvr->maxctl + SDPCM_HDRLEN),
+ roundup((bus->sdiodev->bus_if->maxctl + SDPCM_HDRLEN),
ALIGNMENT) + BRCMF_SDALIGN;
bus->rxbuf = kmalloc(bus->rxblen, GFP_ATOMIC);
if (!(bus->rxbuf))
@@ -3950,276 +3706,14 @@ fail:
return false;
}
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 1 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab1[] = {
- {
- 4, 0x2}, {
- 2, 0x3}, {
- 1, 0x0}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 2, 3 */
-static const struct sdiod_drive_str sdiod_drive_strength_tab2[] = {
- {
- 12, 0x7}, {
- 10, 0x6}, {
- 8, 0x5}, {
- 6, 0x4}, {
- 4, 0x2}, {
- 2, 0x1}, {
- 0, 0x0}
- };
-
-/* SDIO Drive Strength to sel value table for PMU Rev 8 (1.8V) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab3[] = {
- {
- 32, 0x7}, {
- 26, 0x6}, {
- 22, 0x5}, {
- 16, 0x4}, {
- 12, 0x3}, {
- 8, 0x2}, {
- 4, 0x1}, {
- 0, 0x0}
- };
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-
-static char *brcmf_chipname(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-
-static void brcmf_sdbrcm_sdiod_drive_strength_init(struct brcmf_bus *bus,
- u32 drivestrength) {
- struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask = 0;
- u32 str_shift = 0;
- char chn[8];
-
- if (!(bus->ci->cccaps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(bus->ci->chip, bus->ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 1):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab1;
- str_mask = 0x30000000;
- str_shift = 28;
- break;
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 2):
- case SDIOD_DRVSTR_KEY(BCM4325_CHIP_ID, 3):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab2;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4336_CHIP_ID, 8):
- str_tab = (struct sdiod_drive_str *)&sdiod_drive_strength_tab3;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- brcmf_chipname(bus->ci->chip, chn, 8),
- bus->ci->chiprev, bus->ci->pmurev);
- break;
- }
-
- if (str_tab != NULL) {
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- int i;
-
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
-
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, 1);
- cc_data_temp = brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr), 4);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(bus->ci->cccorebase, chipcontrol_addr),
- 4, cc_data_temp);
-
- brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
- drivestrength, cc_data_temp);
- }
-}
-
-static int
-brcmf_sdbrcm_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct chip_info *ci, u32 regs)
-{
- u32 regdata;
-
- /*
- * Get CC core rev
- * Chipid is assume to be at offset 0 from regs arg
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- ci->cccorebase = regs;
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, chipid), 4);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
-
- brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM4329_CHIP_ID:
- ci->buscorebase = BCM4329_CORE_BUS_BASE;
- ci->ramcorebase = BCM4329_CORE_SOCRAM_BASE;
- ci->armcorebase = BCM4329_CORE_ARM_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- break;
- default:
- brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
- return -ENODEV;
- }
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->cccorebase, sbidhigh), 4);
- ci->ccrev = SBCOREREV(regdata);
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, pmucapabilities), 4);
- ci->pmurev = regdata & PCAP_REV_MASK;
-
- regdata = brcmf_sdcard_reg_read(sdiodev,
- CORE_SB(ci->buscorebase, sbidhigh), 4);
- ci->buscorerev = SBCOREREV(regdata);
- ci->buscoretype = (regdata & SBIDH_CC_MASK) >> SBIDH_CC_SHIFT;
-
- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- ci->ccrev, ci->pmurev, ci->buscorerev, ci->buscoretype);
-
- /* get chipcommon capabilites */
- ci->cccaps = brcmf_sdcard_reg_read(sdiodev,
- CORE_CC_REG(ci->cccorebase, capabilities), 4);
-
- return 0;
-}
-
-static int
-brcmf_sdbrcm_chip_attach(struct brcmf_bus *bus, u32 regs)
-{
- struct chip_info *ci;
- int err;
- u8 clkval, clkset;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* alloc chip_info_t */
- ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
- if (NULL == ci)
- return -ENOMEM;
-
- /* bus/core/clk setup for register access */
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- if (err) {
- brcmf_dbg(ERROR, "error writing for HT off\n");
- goto fail;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
- if ((clkval & ~SBSDIO_AVBITS) == clkset) {
- SPINWAIT(((clkval =
- brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
- clkval);
- err = -EBUSY;
- goto fail;
- }
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF |
- SBSDIO_FORCE_ALP;
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR,
- clkset, &err);
- udelay(65);
- } else {
- brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- clkset, clkval);
- err = -EACCES;
- goto fail;
- }
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
- err = brcmf_sdbrcm_chip_recognition(bus->sdiodev, ci, regs);
- if (err)
- goto fail;
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- brcmf_sdbrcm_chip_disablecore(bus->sdiodev, ci->armcorebase);
-
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(ci->cccorebase, gpiopullup), 4, 0);
- brcmf_sdcard_reg_write(bus->sdiodev,
- CORE_CC_REG(ci->cccorebase, gpiopulldown), 4, 0);
-
- /* Disable F2 to clear any intermediate frame state on the dongle */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
- SDIO_FUNC_ENABLE_1, NULL);
-
- /* WAR: cmd52 backplane read so core HW will drop ALPReq */
- clkval = brcmf_sdcard_cfg_read(bus->sdiodev, SDIO_FUNC_1,
- 0, NULL);
-
- /* Done with backplane-dependent accesses, can drop clock... */
- brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_1,
- SBSDIO_FUNC1_CHIPCLKCSR, 0, NULL);
-
- bus->ci = ci;
- return 0;
-fail:
- bus->ci = NULL;
- kfree(ci);
- return err;
-}
-
static bool
-brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
+brcmf_sdbrcm_probe_attach(struct brcmf_sdio *bus, u32 regsva)
{
u8 clkctl = 0;
int err = 0;
int reg_addr;
u32 reg_val;
+ u8 idx;
bus->alp_only = true;
@@ -4234,7 +3728,7 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
#endif /* BCMDBG */
/*
- * Force PLL off until brcmf_sdbrcm_chip_attach()
+ * Force PLL off until brcmf_sdio_chip_attach()
* programs PLL control regs
*/
@@ -4252,8 +3746,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
goto fail;
}
- if (brcmf_sdbrcm_chip_attach(bus, regsva)) {
- brcmf_dbg(ERROR, "brcmf_sdbrcm_chip_attach failed!\n");
+ if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci, regsva)) {
+ brcmf_dbg(ERROR, "brcmf_sdio_chip_attach failed!\n");
goto fail;
}
@@ -4262,11 +3756,10 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
goto fail;
}
- brcmf_sdbrcm_sdiod_drive_strength_init(bus, SDIO_DRIVE_STRENGTH);
+ brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci,
+ SDIO_DRIVE_STRENGTH);
- /* Get info on the ARM and SOCRAM cores... */
- brcmf_sdcard_reg_read(bus->sdiodev,
- CORE_SB(bus->ci->armcorebase, sbidhigh), 4);
+ /* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
if (!(bus->ramsize)) {
brcmf_dbg(ERROR, "failed to find SOCRAM memory!\n");
@@ -4274,7 +3767,8 @@ brcmf_sdbrcm_probe_attach(struct brcmf_bus *bus, u32 regsva)
}
/* Set core control so an SDIO reset does a backplane reset */
- reg_addr = bus->ci->buscorebase +
+ idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ reg_addr = bus->ci->c_inf[idx].base +
offsetof(struct sdpcmd_regs, corecontrol);
reg_val = brcmf_sdcard_reg_read(bus->sdiodev, reg_addr, sizeof(u32));
brcmf_sdcard_reg_write(bus->sdiodev, reg_addr, sizeof(u32),
@@ -4298,7 +3792,7 @@ fail:
return false;
}
-static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
+static bool brcmf_sdbrcm_probe_init(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4306,7 +3800,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
brcmf_sdcard_cfg_write(bus->sdiodev, SDIO_FUNC_0, SDIO_CCCR_IOEx,
SDIO_FUNC_ENABLE_1, NULL);
- bus->drvr->busstate = BRCMF_BUS_DOWN;
+ bus->sdiodev->bus_if->state = BRCMF_BUS_DOWN;
bus->sleeping = false;
bus->rxflow = false;
@@ -4333,7 +3827,7 @@ static bool brcmf_sdbrcm_probe_init(struct brcmf_bus *bus)
static int
brcmf_sdbrcm_watchdog_thread(void *data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
allow_signal(SIGTERM);
/* Run until signal received */
@@ -4341,9 +3835,9 @@ brcmf_sdbrcm_watchdog_thread(void *data)
if (kthread_should_stop())
break;
if (!wait_for_completion_interruptible(&bus->watchdog_wait)) {
- brcmf_sdbrcm_bus_watchdog(bus->drvr);
+ brcmf_sdbrcm_bus_watchdog(bus);
/* Count the tick for reference */
- bus->drvr->tickcnt++;
+ bus->tickcnt++;
} else
break;
}
@@ -4353,7 +3847,7 @@ brcmf_sdbrcm_watchdog_thread(void *data)
static void
brcmf_sdbrcm_watchdog(unsigned long data)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)data;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
@@ -4364,23 +3858,14 @@ brcmf_sdbrcm_watchdog(unsigned long data)
}
}
-static void
-brcmf_sdbrcm_chip_detach(struct brcmf_bus *bus)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(bus->ci);
- bus->ci = NULL;
-}
-
-static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release_dongle(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
if (bus->ci) {
brcmf_sdbrcm_clkctl(bus, CLK_AVAIL, false);
brcmf_sdbrcm_clkctl(bus, CLK_NONE, false);
- brcmf_sdbrcm_chip_detach(bus);
+ brcmf_sdio_chip_detach(&bus->ci);
if (bus->vars && bus->varsz)
kfree(bus->vars);
bus->vars = NULL;
@@ -4390,7 +3875,7 @@ static void brcmf_sdbrcm_release_dongle(struct brcmf_bus *bus)
}
/* Detach and free everything */
-static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
+static void brcmf_sdbrcm_release(struct brcmf_sdio *bus)
{
brcmf_dbg(TRACE, "Enter\n");
@@ -4398,10 +3883,9 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
/* De-register interrupt handler */
brcmf_sdcard_intr_dereg(bus->sdiodev);
- if (bus->drvr) {
- brcmf_detach(bus->drvr);
+ if (bus->sdiodev->bus_if->drvr) {
+ brcmf_detach(bus->sdiodev->dev);
brcmf_sdbrcm_release_dongle(bus);
- bus->drvr = NULL;
}
brcmf_sdbrcm_release_malloc(bus);
@@ -4412,21 +3896,10 @@ static void brcmf_sdbrcm_release(struct brcmf_bus *bus)
brcmf_dbg(TRACE, "Disconnected\n");
}
-void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev)
+void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev)
{
int ret;
- struct brcmf_bus *bus;
-
- /* Init global variables at run-time, not as part of the declaration.
- * This is required to support init/de-init of the driver.
- * Initialization
- * of globals as part of the declaration results in non-deterministic
- * behavior since the value of the globals may be different on the
- * first time that the driver is initialized vs subsequent
- * initializations.
- */
- brcmf_c_init();
+ struct brcmf_sdio *bus;
brcmf_dbg(TRACE, "Enter\n");
@@ -4434,12 +3907,13 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
* regsva == SI_ENUM_BASE*/
/* Allocate private bus interface state */
- bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC);
+ bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC);
if (!bus)
goto fail;
bus->sdiodev = sdiodev;
sdiodev->bus = bus;
+ skb_queue_head_init(&bus->glom);
bus->txbound = BRCMF_TXBOUND;
bus->rxbound = BRCMF_RXBOUND;
bus->txminmax = BRCMF_TXMINMAX;
@@ -4484,9 +3958,15 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
bus->dpc_tsk = NULL;
}
+ /* Assign bus interface call back */
+ bus->sdiodev->bus_if->brcmf_bus_stop = brcmf_sdbrcm_bus_stop;
+ bus->sdiodev->bus_if->brcmf_bus_init = brcmf_sdbrcm_bus_init;
+ bus->sdiodev->bus_if->brcmf_bus_txdata = brcmf_sdbrcm_bus_txdata;
+ bus->sdiodev->bus_if->brcmf_bus_txctl = brcmf_sdbrcm_bus_txctl;
+ bus->sdiodev->bus_if->brcmf_bus_rxctl = brcmf_sdbrcm_bus_rxctl;
/* Attach to the brcmf/OS/network interface */
- bus->drvr = brcmf_attach(bus, SDPCM_RESERVE);
- if (!bus->drvr) {
+ ret = brcmf_attach(SDPCM_RESERVE, bus->sdiodev->dev);
+ if (ret != 0) {
brcmf_dbg(ERROR, "brcmf_attach failed\n");
goto fail;
}
@@ -4514,16 +3994,17 @@ void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
brcmf_dbg(INFO, "completed!!\n");
/* if firmware path present try to download and bring up bus */
- ret = brcmf_bus_start(bus->drvr);
+ ret = brcmf_bus_start(bus->sdiodev->dev);
if (ret != 0) {
if (ret == -ENOLINK) {
brcmf_dbg(ERROR, "dongle is not responding\n");
goto fail;
}
}
- /* Ok, have the per-port tell the stack we're open for business */
- if (brcmf_net_attach(bus->drvr, 0) != 0) {
- brcmf_dbg(ERROR, "Net attach failed!!\n");
+
+ /* add interface and open for business */
+ if (brcmf_add_if(bus->sdiodev->dev, 0, "wlan%d", NULL)) {
+ brcmf_dbg(ERROR, "Add primary net device interface failed!!\n");
goto fail;
}
@@ -4536,7 +4017,7 @@ fail:
void brcmf_sdbrcm_disconnect(void *ptr)
{
- struct brcmf_bus *bus = (struct brcmf_bus *)ptr;
+ struct brcmf_sdio *bus = (struct brcmf_sdio *)ptr;
brcmf_dbg(TRACE, "Enter\n");
@@ -4546,18 +4027,9 @@ void brcmf_sdbrcm_disconnect(void *ptr)
brcmf_dbg(TRACE, "Disconnected\n");
}
-struct device *brcmf_bus_get_device(struct brcmf_bus *bus)
-{
- return &bus->sdiodev->func[2]->dev;
-}
-
void
-brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
+brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick)
{
- /* don't start the wd until fw is loaded */
- if (bus->drvr->busstate == BRCMF_BUS_DOWN)
- return;
-
/* Totally stop the timer */
if (!wdtick && bus->wd_timer_valid == true) {
del_timer_sync(&bus->timer);
@@ -4566,6 +4038,10 @@ brcmf_sdbrcm_wd_timer(struct brcmf_bus *bus, uint wdtick)
return;
}
+ /* don't start the wd until fw is loaded */
+ if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN)
+ return;
+
if (wdtick) {
if (bus->save_ms != BRCMF_WD_POLL_MS) {
if (bus->wd_timer_valid == true)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
new file mode 100644
index 0000000..11b2d7c
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
@@ -0,0 +1,607 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/* ***** SDIO interface chip backplane handle functions ***** */
+
+#include <linux/types.h>
+#include <linux/netdevice.h>
+#include <linux/mmc/card.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+
+#include <chipcommon.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_wifi.h>
+#include <brcmu_utils.h>
+#include <soc.h>
+#include "dhd_dbg.h"
+#include "sdio_host.h"
+#include "sdio_chip.h"
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE 0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE 0x18002000
+#define BCM4329_RAMSIZE 0x48000
+
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+ ((sbidh) & SSB_IDHIGH_RCLO))
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
+
+u8
+brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+
+ for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
+ if (coreid == ci->c_inf[idx].id)
+ return idx;
+
+ return BRCMF_MAX_CORENUM;
+}
+
+static u32
+brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidhigh), 4);
+ return SBCOREREV(regdata);
+}
+
+static u32
+brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
+}
+
+static bool
+brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+ return (SSB_TMSLOW_CLOCK == regdata);
+}
+
+static bool
+brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+ bool ret;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4);
+ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+ return ret;
+}
+
+static void
+brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ if (regdata & SSB_TMSLOW_RESET)
+ return;
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ 4, regdata | SSB_TMSLOW_REJECT);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4) &
+ SSB_TMSHIGH_BUSY), 100000);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ if (regdata & SSB_TMSHIGH_BUSY)
+ brcmf_dbg(ERROR, "core state still busy\n");
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ if (regdata & SSB_IDLOW_INITIATOR) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) |
+ SSB_IMSTATE_REJECT;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ udelay(1);
+ SPINWAIT((brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ SSB_IMSTATE_BUSY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ (SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbidlow), 4);
+ if (regdata & SSB_IDLOW_INITIATOR) {
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4) &
+ ~SSB_IMSTATE_REJECT;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ udelay(1);
+}
+
+static void
+brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+ u32 regdata;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /* if core is already in reset, just return */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+ return;
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, 0);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ udelay(10);
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4, BCMA_RESET_CTL_RESET);
+ udelay(1);
+}
+
+static void
+brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u32 regdata;
+ u8 idx;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_sdio_sb_coredisable(sdiodev, ci, coreid);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+
+ /* clear any serror */
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4);
+ if (regdata & SSB_TMSHIGH_SERR)
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatehigh), 4, 0);
+
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4);
+ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbimstate), 4,
+ regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO));
+
+ /* clear reset and allow it to propagate throughout the core */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4,
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+
+ /* leave clock enabled */
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
+ 4, SSB_TMSLOW_CLOCK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_SB(ci->c_inf[idx].base, sbtmstatelow), 4);
+ udelay(1);
+}
+
+static void
+brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid)
+{
+ u8 idx;
+ u32 regdata;
+
+ idx = brcmf_sdio_chip_getinfidx(ci, coreid);
+
+ /* must disable first to work for arbitrary current core state */
+ brcmf_sdio_ai_coredisable(sdiodev, ci, coreid);
+
+ /* now do initialization sequence */
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
+ 4, 0);
+ udelay(1);
+
+ brcmf_sdcard_reg_write(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
+ 4, BCMA_IOCTL_CLK);
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ ci->c_inf[idx].wrapbase+BCMA_IOCTL, 4);
+ udelay(1);
+}
+
+static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 regs)
+{
+ u32 regdata;
+
+ /*
+ * Get CC core rev
+ * Chipid is assume to be at offset 0 from regs arg
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
+ ci->c_inf[0].base = regs;
+ regdata = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipid), 4);
+ ci->chip = regdata & CID_ID_MASK;
+ ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ ci->socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+ brcmf_dbg(INFO, "chipid=0x%x chiprev=%d\n", ci->chip, ci->chiprev);
+
+ /* Address of cores for new chips should be added here */
+ switch (ci->chip) {
+ case BCM4329_CHIP_ID:
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
+ ci->ramsize = BCM4329_RAMSIZE;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->c_inf[0].wrapbase = 0x18100000;
+ ci->c_inf[0].cib = 0x27004211;
+ ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
+ ci->c_inf[1].base = 0x18002000;
+ ci->c_inf[1].wrapbase = 0x18102000;
+ ci->c_inf[1].cib = 0x07004211;
+ ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
+ ci->c_inf[2].base = 0x18004000;
+ ci->c_inf[2].wrapbase = 0x18104000;
+ ci->c_inf[2].cib = 0x0d080401;
+ ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
+ ci->c_inf[3].base = 0x18003000;
+ ci->c_inf[3].wrapbase = 0x18103000;
+ ci->c_inf[3].cib = 0x03004211;
+ ci->ramsize = 0x48000;
+ break;
+ default:
+ brcmf_dbg(ERROR, "chipid 0x%x is not supported\n", ci->chip);
+ return -ENODEV;
+ }
+
+ switch (ci->socitype) {
+ case SOCI_SB:
+ ci->iscoreup = brcmf_sdio_sb_iscoreup;
+ ci->corerev = brcmf_sdio_sb_corerev;
+ ci->coredisable = brcmf_sdio_sb_coredisable;
+ ci->resetcore = brcmf_sdio_sb_resetcore;
+ break;
+ case SOCI_AI:
+ ci->iscoreup = brcmf_sdio_ai_iscoreup;
+ ci->corerev = brcmf_sdio_ai_corerev;
+ ci->coredisable = brcmf_sdio_ai_coredisable;
+ ci->resetcore = brcmf_sdio_ai_resetcore;
+ break;
+ default:
+ brcmf_dbg(ERROR, "socitype %u not supported\n", ci->socitype);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int
+brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
+{
+ int err = 0;
+ u8 clkval, clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_dbg(ERROR, "error writing for HT off\n");
+ return err;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+ brcmf_dbg(ERROR, "ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ return -EACCES;
+ }
+
+ SPINWAIT(((clkval = brcmf_sdcard_cfg_read(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_dbg(ERROR, "timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ return -EBUSY;
+ }
+
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ udelay(65);
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdcard_cfg_write(sdiodev, SDIO_FUNC_1,
+ SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ return 0;
+}
+
+static void
+brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci)
+{
+ /* get chipcommon rev */
+ ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
+
+ /* get chipcommon capabilites */
+ ci->c_inf[0].caps =
+ brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, capabilities), 4);
+
+ /* get pmu caps & rev */
+ if (ci->c_inf[0].caps & CC_CAP_PMU) {
+ ci->pmucaps = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, pmucapabilities), 4);
+ ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
+ }
+
+ ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
+ ci->c_inf[0].rev, ci->pmurev,
+ ci->c_inf[1].rev, ci->c_inf[1].id);
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3);
+}
+
+int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs)
+{
+ int ret;
+ struct chip_info *ci;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* alloc chip_info_t */
+ ci = kzalloc(sizeof(struct chip_info), GFP_ATOMIC);
+ if (!ci)
+ return -ENOMEM;
+
+ ret = brcmf_sdio_chip_buscoreprep(sdiodev);
+ if (ret != 0)
+ goto err;
+
+ ret = brcmf_sdio_chip_recognition(sdiodev, ci, regs);
+ if (ret != 0)
+ goto err;
+
+ brcmf_sdio_chip_buscoresetup(sdiodev, ci);
+
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, gpiopullup), 4, 0);
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, gpiopulldown), 4, 0);
+
+ *ci_ptr = ci;
+ return 0;
+
+err:
+ kfree(ci);
+ return ret;
+}
+
+void
+brcmf_sdio_chip_detach(struct chip_info **ci_ptr)
+{
+ brcmf_dbg(TRACE, "Enter\n");
+
+ kfree(*ci_ptr);
+ *ci_ptr = NULL;
+}
+
+static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+void
+brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u32 drivestrength)
+{
+ struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask = 0;
+ u32 str_shift = 0;
+ char chn[8];
+
+ if (!(ci->c_inf[0].caps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = (struct sdiod_drive_str *)&sdiod_drvstr_tab1_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_dbg(ERROR, "No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ brcmf_sdio_chip_name(ci->chip, chn, 8),
+ ci->chiprev, ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ int i;
+
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+ 4, 1);
+ cc_data_temp = brcmf_sdcard_reg_read(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr), 4);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdcard_reg_write(sdiodev,
+ CORE_CC_REG(ci->c_inf[0].base, chipcontrol_addr),
+ 4, cc_data_temp);
+
+ brcmf_dbg(INFO, "SDIO: %dmA drive strength selected, set to 0x%08x\n",
+ drivestrength, cc_data_temp);
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
new file mode 100644
index 0000000..ce974d7
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2011 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _BRCMFMAC_SDIO_CHIP_H_
+#define _BRCMFMAC_SDIO_CHIP_H_
+
+/*
+ * Core reg address translation.
+ * Both macro's returns a 32 bits byte address on the backplane bus.
+ */
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
+#define BRCMF_MAX_CORENUM 6
+
+struct chip_core_info {
+ u16 id;
+ u16 rev;
+ u32 base;
+ u32 wrapbase;
+ u32 caps;
+ u32 cib;
+};
+
+struct chip_info {
+ u32 chip;
+ u32 chiprev;
+ u32 socitype;
+ /* core info */
+ /* always put chipcommon core at 0, bus core at 1 */
+ struct chip_core_info c_inf[BRCMF_MAX_CORENUM];
+ u32 pmurev;
+ u32 pmucaps;
+ u32 ramsize;
+
+ bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u16 coreid);
+ u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct chip_info *ci,
+ u16 coreid);
+ void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid);
+ void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci, u16 coreid);
+};
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+extern int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info **ci_ptr, u32 regs);
+extern void brcmf_sdio_chip_detach(struct chip_info **ci_ptr);
+extern void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct chip_info *ci,
+ u32 drivestrength);
+extern u8 brcmf_sdio_chip_getinfidx(struct chip_info *ci, u16 coreid);
+
+
+#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 726fa89..0281d20 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -116,12 +116,20 @@
#define SUCCESS 0
#define ERROR 1
+/* Packet alignment for most efficient SDIO (can change based on platform) */
+#define BRCMF_SDALIGN (1 << 6)
+
+/* watchdog polling interval in ms */
+#define BRCMF_WD_POLL_MS 10
+
struct brcmf_sdreg {
int func;
int offset;
int value;
};
+struct brcmf_sdio;
+
struct brcmf_sdio_dev {
struct sdio_func *func[SDIO_MAX_FUNCS];
u8 num_funcs; /* Supported funcs on client */
@@ -132,9 +140,10 @@ struct brcmf_sdio_dev {
atomic_t suspend; /* suspend flag */
wait_queue_head_t request_byte_wait;
wait_queue_head_t request_word_wait;
- wait_queue_head_t request_packet_wait;
+ wait_queue_head_t request_chain_wait;
wait_queue_head_t request_buffer_wait;
-
+ struct device *dev;
+ struct brcmf_bus *bus_if;
};
/* Register/deregister device interrupt handler. */
@@ -182,11 +191,21 @@ extern bool brcmf_sdcard_regfail(struct brcmf_sdio_dev *sdiodev);
* NOTE: Async operation is not currently supported.
*/
extern int
+brcmf_sdcard_send_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
+extern int
brcmf_sdcard_send_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+
+extern int
+brcmf_sdcard_recv_pkt(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff *pkt);
extern int
brcmf_sdcard_recv_buf(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
- uint flags, u8 *buf, uint nbytes, struct sk_buff *pkt);
+ uint flags, u8 *buf, uint nbytes);
+extern int
+brcmf_sdcard_recv_chain(struct brcmf_sdio_dev *sdiodev, u32 addr, uint fn,
+ uint flags, struct sk_buff_head *pktq);
/* Flags bits */
@@ -237,16 +256,20 @@ brcmf_sdioh_request_word(struct brcmf_sdio_dev *sdiodev,
/* read or write any buffer using cmd53 */
extern int
brcmf_sdioh_request_buffer(struct brcmf_sdio_dev *sdiodev,
- uint fix_inc, uint rw, uint fnc_num,
- u32 addr, uint regwidth,
- u32 buflen, u8 *buffer, struct sk_buff *pkt);
+ uint fix_inc, uint rw, uint fnc_num, u32 addr,
+ struct sk_buff *pkt);
+extern int
+brcmf_sdioh_request_chain(struct brcmf_sdio_dev *sdiodev, uint fix_inc,
+ uint write, uint func, uint addr,
+ struct sk_buff_head *pktq);
/* Watchdog timer interface for pm ops */
extern void brcmf_sdio_wdtmr_enable(struct brcmf_sdio_dev *sdiodev,
bool enable);
-extern void *brcmf_sdbrcm_probe(u16 bus_no, u16 slot, u16 func, uint bustype,
- u32 regsva, struct brcmf_sdio_dev *sdiodev);
+extern void *brcmf_sdbrcm_probe(u32 regsva, struct brcmf_sdio_dev *sdiodev);
extern void brcmf_sdbrcm_disconnect(void *ptr);
extern void brcmf_sdbrcm_isr(void *arg);
+
+extern void brcmf_sdbrcm_wd_timer(struct brcmf_sdio *bus, uint wdtick);
#endif /* _BRCM_SDH_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 5eddabe..bf11850 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1429,7 +1429,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
static s32
brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
- enum nl80211_tx_power_setting type, s32 dbm)
+ enum nl80211_tx_power_setting type, s32 mbm)
{
struct brcmf_cfg80211_priv *cfg_priv = wiphy_to_cfg(wiphy);
@@ -1437,6 +1437,7 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
u16 txpwrmw;
s32 err = 0;
s32 disable = 0;
+ s32 dbm = MBM_TO_DBM(mbm);
WL_TRACE("Enter\n");
if (!check_sys_up(wiphy))
@@ -1446,12 +1447,6 @@ brcmf_cfg80211_set_tx_power(struct wiphy *wiphy,
case NL80211_TX_POWER_AUTOMATIC:
break;
case NL80211_TX_POWER_LIMITED:
- if (dbm < 0) {
- WL_ERR("TX_POWER_LIMITED - dbm is negative\n");
- err = -EINVAL;
- goto done;
- }
- break;
case NL80211_TX_POWER_FIXED:
if (dbm < 0) {
WL_ERR("TX_POWER_FIXED - dbm is negative\n");
@@ -1997,7 +1992,7 @@ done:
}
static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
- struct brcmf_bss_info *bi)
+ struct brcmf_bss_info_le *bi)
{
struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
@@ -2049,18 +2044,27 @@ static s32 brcmf_inform_single_bss(struct brcmf_cfg80211_priv *cfg_priv,
notify_timestamp, notify_capability, notify_interval, notify_ie,
notify_ielen, notify_signal, GFP_KERNEL);
- if (!bss) {
- WL_ERR("cfg80211_inform_bss_frame error\n");
- return -EINVAL;
- }
+ if (!bss)
+ return -ENOMEM;
+
+ cfg80211_put_bss(bss);
return err;
}
+static struct brcmf_bss_info_le *
+next_bss_le(struct brcmf_scan_results *list, struct brcmf_bss_info_le *bss)
+{
+ if (bss == NULL)
+ return list->bss_info_le;
+ return (struct brcmf_bss_info_le *)((unsigned long)bss +
+ le32_to_cpu(bss->length));
+}
+
static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
{
struct brcmf_scan_results *bss_list;
- struct brcmf_bss_info *bi = NULL; /* must be initialized */
+ struct brcmf_bss_info_le *bi = NULL; /* must be initialized */
s32 err = 0;
int i;
@@ -2072,7 +2076,7 @@ static s32 brcmf_inform_bss(struct brcmf_cfg80211_priv *cfg_priv)
}
WL_SCAN("scanned AP count (%d)\n", bss_list->count);
for (i = 0; i < bss_list->count && i < WL_AP_MAX; i++) {
- bi = next_bss(bss_list, bi);
+ bi = next_bss_le(bss_list, bi);
err = brcmf_inform_single_bss(cfg_priv, bi);
if (err)
break;
@@ -2085,8 +2089,9 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
{
struct wiphy *wiphy = cfg_to_wiphy(cfg_priv);
struct ieee80211_channel *notify_channel;
- struct brcmf_bss_info *bi = NULL;
+ struct brcmf_bss_info_le *bi = NULL;
struct ieee80211_supported_band *band;
+ struct cfg80211_bss *bss;
u8 *buf = NULL;
s32 err = 0;
u16 channel;
@@ -2114,7 +2119,7 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
goto CleanUp;
}
- bi = (struct brcmf_bss_info *)(buf + 4);
+ bi = (struct brcmf_bss_info_le *)(buf + 4);
channel = bi->ctl_ch ? bi->ctl_ch :
CHSPEC_CHANNEL(le16_to_cpu(bi->chanspec));
@@ -2140,10 +2145,17 @@ static s32 wl_inform_ibss(struct brcmf_cfg80211_priv *cfg_priv,
WL_CONN("signal: %d\n", notify_signal);
WL_CONN("notify_timestamp: %#018llx\n", notify_timestamp);
- cfg80211_inform_bss(wiphy, notify_channel, bssid,
+ bss = cfg80211_inform_bss(wiphy, notify_channel, bssid,
notify_timestamp, notify_capability, notify_interval,
notify_ie, notify_ielen, notify_signal, GFP_KERNEL);
+ if (!bss) {
+ err = -ENOMEM;
+ goto CleanUp;
+ }
+
+ cfg80211_put_bss(bss);
+
CleanUp:
kfree(buf);
@@ -2188,7 +2200,7 @@ static struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
{
- struct brcmf_bss_info *bi;
+ struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
struct brcmf_tlv *tim;
u16 beacon_interval;
@@ -2211,7 +2223,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_priv *cfg_priv)
goto update_bss_info_out;
}
- bi = (struct brcmf_bss_info *)(cfg_priv->extra_buf + 4);
+ bi = (struct brcmf_bss_info_le *)(cfg_priv->extra_buf + 4);
err = brcmf_inform_single_bss(cfg_priv, bi);
if (err)
goto update_bss_info_out;
@@ -2463,7 +2475,7 @@ static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
return err;
}
-static void brcmf_delay(u32 ms)
+static __always_inline void brcmf_delay(u32 ms)
{
if (ms < 1000 / HZ) {
cond_resched();
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 62dc461..a613b49 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -352,15 +352,6 @@ brcmf_cfg80211_connect_info *cfg_to_conn(struct brcmf_cfg80211_priv *cfg)
return &cfg->conn_info;
}
-static inline struct brcmf_bss_info *next_bss(struct brcmf_scan_results *list,
- struct brcmf_bss_info *bss)
-{
- return bss = bss ?
- (struct brcmf_bss_info *)((unsigned long)bss +
- le32_to_cpu(bss->length)) :
- list->bss_info;
-}
-
extern struct brcmf_cfg80211_dev *brcmf_cfg80211_attach(struct net_device *ndev,
struct device *busdev,
void *data);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
index 025fa0e..ab9bb11 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.c
@@ -16,6 +16,8 @@
* File contents: support functions for PCI/PCIe
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/delay.h>
#include <linux/pci.h>
@@ -316,51 +318,24 @@
#define BADIDX (SI_MAXCORES + 1)
-/* Newer chips can access PCI/PCIE and CC core without requiring to change
- * PCI BAR0 WIN
- */
-#define SI_FAST(si) (((si)->pub.buscoretype == PCIE_CORE_ID) || \
- (((si)->pub.buscoretype == PCI_CORE_ID) && \
- (si)->pub.buscorerev >= 13))
-
-#define CCREGS_FAST(si) (((char __iomem *)((si)->curmap) + \
- PCI_16KB0_CCREGS_OFFSET))
-
#define IS_SIM(chippkg) \
((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID))
-/*
- * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts
- * before after core switching to avoid invalid register accesss inside ISR.
- */
-#define INTR_OFF(si, intr_val) \
- if ((si)->intrsoff_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- intr_val = (*(si)->intrsoff_fn)((si)->intr_arg)
+#define PCI(sih) (ai_get_buscoretype(sih) == PCI_CORE_ID)
+#define PCIE(sih) (ai_get_buscoretype(sih) == PCIE_CORE_ID)
-#define INTR_RESTORE(si, intr_val) \
- if ((si)->intrsrestore_fn && \
- (si)->coreid[(si)->curidx] == (si)->dev_coreid) \
- (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val)
-
-#define PCI(si) ((si)->pub.buscoretype == PCI_CORE_ID)
-#define PCIE(si) ((si)->pub.buscoretype == PCIE_CORE_ID)
-
-#define PCI_FORCEHT(si) (PCIE(si) && (si->pub.chip == BCM4716_CHIP_ID))
+#define PCI_FORCEHT(sih) (PCIE(sih) && (ai_get_chip_id(sih) == BCM4716_CHIP_ID))
#ifdef BCMDBG
-#define SI_MSG(args) printk args
+#define SI_MSG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
#else
-#define SI_MSG(args)
+#define SI_MSG(fmt, ...) no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
#define GOODCOREADDR(x, b) \
(((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \
IS_ALIGNED((x), SI_CORE_SIZE))
-#define PCIEREGS(si) ((__iomem char *)((si)->curmap) + \
- PCI_16KB0_PCIREGS_OFFSET)
-
struct aidmp {
u32 oobselina30; /* 0x000 */
u32 oobselina74; /* 0x004 */
@@ -479,406 +454,13 @@ struct aidmp {
u32 componentid3; /* 0xffc */
};
-/* EROM parsing */
-
-static u32
-get_erom_ent(struct si_pub *sih, u32 __iomem **eromptr, u32 mask, u32 match)
-{
- u32 ent;
- uint inv = 0, nom = 0;
-
- while (true) {
- ent = R_REG(*eromptr);
- (*eromptr)++;
-
- if (mask == 0)
- break;
-
- if ((ent & ER_VALID) == 0) {
- inv++;
- continue;
- }
-
- if (ent == (ER_END | ER_VALID))
- break;
-
- if ((ent & mask) == match)
- break;
-
- nom++;
- }
-
- return ent;
-}
-
-static u32
-get_asd(struct si_pub *sih, u32 __iomem **eromptr, uint sp, uint ad, uint st,
- u32 *addrl, u32 *addrh, u32 *sizel, u32 *sizeh)
-{
- u32 asd, sz, szd;
-
- asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID);
- if (((asd & ER_TAG1) != ER_ADD) ||
- (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) ||
- ((asd & AD_ST_MASK) != st)) {
- /* This is not what we want, "push" it back */
- (*eromptr)--;
- return 0;
- }
- *addrl = asd & AD_ADDR_MASK;
- if (asd & AD_AG32)
- *addrh = get_erom_ent(sih, eromptr, 0, 0);
- else
- *addrh = 0;
- *sizeh = 0;
- sz = asd & AD_SZ_MASK;
- if (sz == AD_SZ_SZD) {
- szd = get_erom_ent(sih, eromptr, 0, 0);
- *sizel = szd & SD_SZ_MASK;
- if (szd & SD_SG32)
- *sizeh = get_erom_ent(sih, eromptr, 0, 0);
- } else
- *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT);
-
- return asd;
-}
-
-static void ai_hwfixup(struct si_info *sii)
-{
-}
-
-/* parse the enumeration rom to identify all cores */
-static void ai_scan(struct si_pub *sih, struct chipcregs __iomem *cc)
-{
- struct si_info *sii = (struct si_info *)sih;
-
- u32 erombase;
- u32 __iomem *eromptr, *eromlim;
- void __iomem *regs = cc;
-
- erombase = R_REG(&cc->eromptr);
-
- /* Set wrappers address */
- sii->curwrap = (void *)((unsigned long)cc + SI_CORE_SIZE);
-
- /* Now point the window at the erom */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, erombase);
- eromptr = regs;
- eromlim = eromptr + (ER_REMAPCONTROL / sizeof(u32));
-
- while (eromptr < eromlim) {
- u32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp;
- u32 mpd, asd, addrl, addrh, sizel, sizeh;
- u32 __iomem *base;
- uint i, j, idx;
- bool br;
-
- br = false;
-
- /* Grok a component */
- cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI);
- if (cia == (ER_END | ER_VALID)) {
- /* Found END of erom */
- ai_hwfixup(sii);
- return;
- }
- base = eromptr - 1;
- cib = get_erom_ent(sih, &eromptr, 0, 0);
-
- if ((cib & ER_TAG) != ER_CI) {
- /* CIA not followed by CIB */
- goto error;
- }
-
- cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT;
- mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
- crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
- nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT;
- nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT;
- nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT;
- nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT;
-
- if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0))
- continue;
- if ((nmw + nsw == 0)) {
- /* A component which is not a core */
- if (cid == OOB_ROUTER_CORE_ID) {
- asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd != 0)
- sii->oob_router = addrl;
- }
- continue;
- }
-
- idx = sii->numcores;
-/* sii->eromptr[idx] = base; */
- sii->cia[idx] = cia;
- sii->cib[idx] = cib;
- sii->coreid[idx] = cid;
-
- for (i = 0; i < nmp; i++) {
- mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID);
- if ((mpd & ER_TAG) != ER_MP) {
- /* Not enough MP entries for component */
- goto error;
- }
- }
-
- /* First Slave Address Descriptor should be port 0:
- * the main register space for the core
- */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh,
- &sizel, &sizeh);
- if (asd == 0) {
- /* Try again to see if it is a bridge */
- asd =
- get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd != 0)
- br = true;
- else if ((addrh != 0) || (sizeh != 0)
- || (sizel != SI_CORE_SIZE)) {
- /* First Slave ASD for core malformed */
- goto error;
- }
- }
- sii->coresba[idx] = addrl;
- sii->coresba_size[idx] = sizel;
- /* Get any more ASDs in port 0 */
- j = 1;
- do {
- asd =
- get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl,
- &addrh, &sizel, &sizeh);
- if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) {
- sii->coresba2[idx] = addrl;
- sii->coresba2_size[idx] = sizel;
- }
- j++;
- } while (asd != 0);
-
- /* Go through the ASDs for other slave ports */
- for (i = 1; i < nsp; i++) {
- j = 0;
- do {
- asd =
- get_asd(sih, &eromptr, i, j++, AD_ST_SLAVE,
- &addrl, &addrh, &sizel, &sizeh);
- } while (asd != 0);
- if (j == 0) {
- /* SP has no address descriptors */
- goto error;
- }
- }
-
- /* Now get master wrappers */
- for (i = 0; i < nmw; i++) {
- asd =
- get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl,
- &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for MW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Master wrapper %d is not 4KB */
- goto error;
- }
- if (i == 0)
- sii->wrapba[idx] = addrl;
- }
-
- /* And finally slave wrappers */
- for (i = 0; i < nsw; i++) {
- uint fwp = (nsp == 1) ? 0 : 1;
- asd =
- get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP,
- &addrl, &addrh, &sizel, &sizeh);
- if (asd == 0) {
- /* Missing descriptor for SW */
- goto error;
- }
- if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) {
- /* Slave wrapper is not 4KB */
- goto error;
- }
- if ((nmw == 0) && (i == 0))
- sii->wrapba[idx] = addrl;
- }
-
- /* Don't record bridges */
- if (br)
- continue;
-
- /* Done with core */
- sii->numcores++;
- }
-
- error:
- /* Reached end of erom without finding END */
- sii->numcores = 0;
- return;
-}
-
-/*
- * This function changes the logical "focus" to the indicated core.
- * Return the current core's virtual address. Since each core starts with the
- * same set of registers (BIST, clock control, etc), the returned address
- * contains the first register of this 'common' register block (not to be
- * confused with 'common core').
- */
-void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 addr = sii->coresba[coreidx];
- u32 wrap = sii->wrapba[coreidx];
-
- if (coreidx >= sii->numcores)
- return NULL;
-
- /* point bar0 window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN, addr);
- /* point bar0 2nd 4KB window */
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN2, wrap);
- sii->curidx = coreidx;
-
- return sii->curmap;
-}
-
-/* Return the number of address spaces in current core */
-int ai_numaddrspaces(struct si_pub *sih)
-{
- return 2;
-}
-
-/* Return the address of the nth address space in the current core */
-u32 ai_addrspace(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba[cidx];
- else if (asidx == 1)
- return sii->coresba2[cidx];
- else {
- /* Need to parse the erom again to find addr space */
- return 0;
- }
-}
-
-/* Return the size of the nth address space in the current core */
-u32 ai_addrspacesize(struct si_pub *sih, uint asidx)
-{
- struct si_info *sii;
- uint cidx;
-
- sii = (struct si_info *)sih;
- cidx = sii->curidx;
-
- if (asidx == 0)
- return sii->coresba_size[cidx];
- else if (asidx == 1)
- return sii->coresba2_size[cidx];
- else {
- /* Need to parse the erom again to find addr */
- return 0;
- }
-}
-
-uint ai_flag(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return R_REG(&ai->oobselouta30) & 0x1f;
-}
-
-void ai_setint(struct si_pub *sih, int siflag)
-{
-}
-
-uint ai_corevendor(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cia;
-
- sii = (struct si_info *)sih;
- cia = sii->cia[sii->curidx];
- return (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT;
-}
-
-uint ai_corerev(struct si_pub *sih)
-{
- struct si_info *sii;
- u32 cib;
-
- sii = (struct si_info *)sih;
- cib = sii->cib[sii->curidx];
- return (cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-bool ai_iscoreup(struct si_pub *sih)
-{
- struct si_info *sii;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- return (((R_REG(&ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) ==
- SICF_CLOCK_EN)
- && ((R_REG(&ai->resetctrl) & AIRC_RESET) == 0));
-}
-
-void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-}
-
-u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->ioctrl) & ~mask) | val);
- W_REG(&ai->ioctrl, w);
- }
-
- return R_REG(&ai->ioctrl);
-}
-
/* return true if PCIE capability exists in the pci config space */
static bool ai_ispcie(struct si_info *sii)
{
u8 cap_ptr;
cap_ptr =
- pcicore_find_pci_capability(sii->pbus, PCI_CAP_ID_EXP, NULL,
+ pcicore_find_pci_capability(sii->pcibus, PCI_CAP_ID_EXP, NULL,
NULL);
if (!cap_ptr)
return false;
@@ -894,117 +476,69 @@ static bool ai_buscore_prep(struct si_info *sii)
return true;
}
-u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 w;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- if (mask || val) {
- w = ((R_REG(&ai->iostatus) & ~mask) | val);
- W_REG(&ai->iostatus, w);
- }
-
- return R_REG(&ai->iostatus);
-}
-
static bool
-ai_buscore_setup(struct si_info *sii, u32 savewin, uint *origidx)
+ai_buscore_setup(struct si_info *sii, struct bcma_device *cc)
{
- bool pci, pcie;
- uint i;
- uint pciidx, pcieidx, pcirev, pcierev;
- struct chipcregs __iomem *cc;
+ struct bcma_device *pci = NULL;
+ struct bcma_device *pcie = NULL;
+ struct bcma_device *core;
- cc = ai_setcoreidx(&sii->pub, SI_CC_IDX);
+
+ /* no cores found, bail out */
+ if (cc->bus->nr_cores == 0)
+ return false;
/* get chipcommon rev */
- sii->pub.ccrev = (int)ai_corerev(&sii->pub);
+ sii->pub.ccrev = cc->id.rev;
/* get chipcommon chipstatus */
- if (sii->pub.ccrev >= 11)
- sii->pub.chipst = R_REG(&cc->chipstatus);
+ if (ai_get_ccrev(&sii->pub) >= 11)
+ sii->chipst = bcma_read32(cc, CHIPCREGOFFS(chipstatus));
/* get chipcommon capabilites */
- sii->pub.cccaps = R_REG(&cc->capabilities);
- /* get chipcommon extended capabilities */
-
- if (sii->pub.ccrev >= 35)
- sii->pub.cccaps_ext = R_REG(&cc->capabilities_ext);
+ sii->pub.cccaps = bcma_read32(cc, CHIPCREGOFFS(capabilities));
/* get pmu rev and caps */
- if (sii->pub.cccaps & CC_CAP_PMU) {
- sii->pub.pmucaps = R_REG(&cc->pmucapabilities);
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
+ sii->pub.pmucaps = bcma_read32(cc,
+ CHIPCREGOFFS(pmucapabilities));
sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK;
}
- /* figure out bus/orignal core idx */
- sii->pub.buscoretype = NODEV_CORE_ID;
- sii->pub.buscorerev = NOREV;
- sii->pub.buscoreidx = BADIDX;
-
- pci = pcie = false;
- pcirev = pcierev = NOREV;
- pciidx = pcieidx = BADIDX;
-
- for (i = 0; i < sii->numcores; i++) {
+ /* figure out buscore */
+ list_for_each_entry(core, &cc->bus->cores, list) {
uint cid, crev;
- ai_setcoreidx(&sii->pub, i);
- cid = ai_coreid(&sii->pub);
- crev = ai_corerev(&sii->pub);
+ cid = core->id.id;
+ crev = core->id.rev;
if (cid == PCI_CORE_ID) {
- pciidx = i;
- pcirev = crev;
- pci = true;
+ pci = core;
} else if (cid == PCIE_CORE_ID) {
- pcieidx = i;
- pcierev = crev;
- pcie = true;
+ pcie = core;
}
-
- /* find the core idx before entering this func. */
- if ((savewin && (savewin == sii->coresba[i])) ||
- (cc == sii->regs[i]))
- *origidx = i;
}
if (pci && pcie) {
if (ai_ispcie(sii))
- pci = false;
+ pci = NULL;
else
- pcie = false;
+ pcie = NULL;
}
if (pci) {
- sii->pub.buscoretype = PCI_CORE_ID;
- sii->pub.buscorerev = pcirev;
- sii->pub.buscoreidx = pciidx;
+ sii->buscore = pci;
} else if (pcie) {
- sii->pub.buscoretype = PCIE_CORE_ID;
- sii->pub.buscorerev = pcierev;
- sii->pub.buscoreidx = pcieidx;
+ sii->buscore = pcie;
}
/* fixup necessary chip/core configurations */
- if (SI_FAST(sii)) {
- if (!sii->pch) {
- sii->pch = pcicore_init(&sii->pub, sii->pbus,
- (__iomem void *)PCIEREGS(sii));
- if (sii->pch == NULL)
- return false;
- }
+ if (!sii->pch) {
+ sii->pch = pcicore_init(&sii->pub, sii->icbus->drv_pci.core);
+ if (sii->pch == NULL)
+ return false;
}
- if (ai_pci_fixcfg(&sii->pub)) {
- /* si_doattach: si_pci_fixcfg failed */
+ if (ai_pci_fixcfg(&sii->pub))
return false;
- }
-
- /* return to the original core */
- ai_setcoreidx(&sii->pub, *origidx);
return true;
}
@@ -1017,39 +551,27 @@ static __used void ai_nvram_process(struct si_info *sii)
uint w = 0;
/* do a pci config read to get subsystem id and subvendor id */
- pci_read_config_dword(sii->pbus, PCI_SUBSYSTEM_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_SUBSYSTEM_VENDOR_ID, &w);
sii->pub.boardvendor = w & 0xffff;
sii->pub.boardtype = (w >> 16) & 0xffff;
- sii->pub.boardflags = getintvar(&sii->pub, BRCMS_SROM_BOARDFLAGS);
}
static struct si_info *ai_doattach(struct si_info *sii,
- void __iomem *regs, struct pci_dev *pbus)
+ struct bcma_bus *pbus)
{
struct si_pub *sih = &sii->pub;
u32 w, savewin;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint socitype;
- uint origidx;
-
- memset((unsigned char *) sii, 0, sizeof(struct si_info));
savewin = 0;
- sih->buscoreidx = BADIDX;
-
- sii->curmap = regs;
- sii->pbus = pbus;
-
- /* find Chipcommon address */
- pci_read_config_dword(sii->pbus, PCI_BAR0_WIN, &savewin);
- if (!GOODCOREADDR(savewin, SI_ENUM_BASE))
- savewin = SI_ENUM_BASE;
+ sii->icbus = pbus;
+ sii->pcibus = pbus->host_pci;
- pci_write_config_dword(sii->pbus, PCI_BAR0_WIN,
- SI_ENUM_BASE);
- cc = (struct chipcregs __iomem *) regs;
+ /* switch to Chipcommon core */
+ cc = pbus->drv_cc.core;
/* bus/core/clk setup for register access */
if (!ai_buscore_prep(sii))
@@ -1062,94 +584,74 @@ static struct si_info *ai_doattach(struct si_info *sii,
* hosts w/o chipcommon), some way of recognizing them needs to
* be added here.
*/
- w = R_REG(&cc->chipid);
+ w = bcma_read32(cc, CHIPCREGOFFS(chipid));
socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
/* Might as wll fill in chip id rev & pkg */
sih->chip = w & CID_ID_MASK;
sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT;
sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT;
- sih->issim = false;
-
/* scan for cores */
- if (socitype == SOCI_AI) {
- SI_MSG(("Found chip type AI (0x%08x)\n", w));
- /* pass chipc address instead of original core base */
- ai_scan(&sii->pub, cc);
- } else {
- /* Found chip of unknown type */
- return NULL;
- }
- /* no cores found, bail out */
- if (sii->numcores == 0)
+ if (socitype != SOCI_AI)
return NULL;
- /* bus/core/clk setup */
- origidx = SI_CC_IDX;
- if (!ai_buscore_setup(sii, savewin, &origidx))
+ SI_MSG("Found chip type AI (0x%08x)\n", w);
+ if (!ai_buscore_setup(sii, cc))
goto exit;
/* Init nvram from sprom/otp if they exist */
- if (srom_var_init(&sii->pub, cc))
+ if (srom_var_init(&sii->pub))
goto exit;
ai_nvram_process(sii);
/* === NVRAM, clock is ready === */
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
- W_REG(&cc->gpiopullup, 0);
- W_REG(&cc->gpiopulldown, 0);
- ai_setcoreidx(sih, origidx);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopullup), 0);
+ bcma_write32(cc, CHIPCREGOFFS(gpiopulldown), 0);
/* PMU specific initializations */
- if (sih->cccaps & CC_CAP_PMU) {
- u32 xtalfreq;
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
si_pmu_init(sih);
- si_pmu_chip_init(sih);
-
- xtalfreq = si_pmu_measure_alpclk(sih);
- si_pmu_pll_init(sih, xtalfreq);
+ (void)si_pmu_measure_alpclk(sih);
si_pmu_res_init(sih);
- si_pmu_swreg_init(sih);
}
/* setup the GPIO based LED powersave register */
w = getintvar(sih, BRCMS_SROM_LEDDC);
if (w == 0)
w = DEFAULT_GPIOTIMERVAL;
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, gpiotimerval),
- ~0, w);
+ ai_cc_reg(sih, offsetof(struct chipcregs, gpiotimerval),
+ ~0, w);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_attach(sii->pch, SI_DOATTACH);
- if (sih->chip == BCM43224_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM43224_CHIP_ID) {
/*
* enable 12 mA drive strenth for 43224 and
* set chipControl register bit 15
*/
- if (sih->chiprev == 0) {
- SI_MSG(("Applying 43224A0 WARs\n"));
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- CCTRL43224_GPIO_TOGGLE,
- CCTRL43224_GPIO_TOGGLE);
+ if (ai_get_chiprev(sih) == 0) {
+ SI_MSG("Applying 43224A0 WARs\n");
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol),
+ CCTRL43224_GPIO_TOGGLE,
+ CCTRL43224_GPIO_TOGGLE);
si_pmu_chipcontrol(sih, 0, CCTRL_43224A0_12MA_LED_DRIVE,
CCTRL_43224A0_12MA_LED_DRIVE);
}
- if (sih->chiprev >= 1) {
- SI_MSG(("Applying 43224B0+ WARs\n"));
+ if (ai_get_chiprev(sih) >= 1) {
+ SI_MSG("Applying 43224B0+ WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_43224B0_12MA_LED_DRIVE,
CCTRL_43224B0_12MA_LED_DRIVE);
}
}
- if (sih->chip == BCM4313_CHIP_ID) {
+ if (ai_get_chip_id(sih) == BCM4313_CHIP_ID) {
/*
* enable 12 mA drive strenth for 4313 and
* set chipControl register bit 1
*/
- SI_MSG(("Applying 4313 WARs\n"));
+ SI_MSG("Applying 4313 WARs\n");
si_pmu_chipcontrol(sih, 0, CCTRL_4313_12MA_LED_DRIVE,
CCTRL_4313_12MA_LED_DRIVE);
}
@@ -1165,22 +667,19 @@ static struct si_info *ai_doattach(struct si_info *sii,
}
/*
- * Allocate a si handle.
- * devid - pci device id (used to determine chip#)
- * osh - opaque OS handle
- * regs - virtual address of initial core registers
+ * Allocate a si handle and do the attach.
*/
struct si_pub *
-ai_attach(void __iomem *regs, struct pci_dev *sdh)
+ai_attach(struct bcma_bus *pbus)
{
struct si_info *sii;
/* alloc struct si_info */
- sii = kmalloc(sizeof(struct si_info), GFP_ATOMIC);
+ sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC);
if (sii == NULL)
return NULL;
- if (ai_doattach(sii, regs, sdh) == NULL) {
+ if (ai_doattach(sii, pbus) == NULL) {
kfree(sii);
return NULL;
}
@@ -1209,292 +708,66 @@ void ai_detach(struct si_pub *sih)
kfree(sii);
}
-/* register driver interrupt disabling and restoring callback functions */
-void
-ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intr_arg = intr_arg;
- sii->intrsoff_fn = (u32 (*)(void *)) intrsoff_fn;
- sii->intrsrestore_fn = (void (*) (void *, u32)) intrsrestore_fn;
- sii->intrsenabled_fn = (bool (*)(void *)) intrsenabled_fn;
- /* save current core id. when this function called, the current core
- * must be the core which provides driver functions(il, et, wl, etc.)
- */
- sii->dev_coreid = sii->coreid[sii->curidx];
-}
-
-void ai_deregister_intr_callback(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- sii->intrsoff_fn = NULL;
-}
-
-uint ai_coreid(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->coreid[sii->curidx];
-}
-
-uint ai_coreidx(struct si_pub *sih)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- return sii->curidx;
-}
-
-bool ai_backplane64(struct si_pub *sih)
-{
- return (sih->cccaps & CC_CAP_BKPLN64) != 0;
-}
-
/* return index of coreid or BADIDX if not found */
-uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit)
+struct bcma_device *ai_findcore(struct si_pub *sih, u16 coreid, u16 coreunit)
{
+ struct bcma_device *core;
struct si_info *sii;
uint found;
- uint i;
sii = (struct si_info *)sih;
found = 0;
- for (i = 0; i < sii->numcores; i++)
- if (sii->coreid[i] == coreid) {
+ list_for_each_entry(core, &sii->icbus->cores, list)
+ if (core->id.id == coreid) {
if (found == coreunit)
- return i;
+ return core;
found++;
}
- return BADIDX;
-}
-
-/*
- * This function changes logical "focus" to the indicated core;
- * must be called with interrupts off.
- * Moreover, callers should keep interrupts off during switching
- * out of and back to d11 core.
- */
-void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit)
-{
- uint idx;
-
- idx = ai_findcoreidx(sih, coreid, coreunit);
- if (idx >= SI_MAXCORES)
- return NULL;
-
- return ai_setcoreidx(sih, idx);
-}
-
-/* Turn off interrupt as required by ai_setcore, before switch core */
-void __iomem *ai_switch_core(struct si_pub *sih, uint coreid, uint *origidx,
- uint *intr_val)
-{
- void __iomem *cc;
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
-
- if (SI_FAST(sii)) {
- /* Overloading the origidx variable to remember the coreid,
- * this works because the core ids cannot be confused with
- * core indices.
- */
- *origidx = coreid;
- if (coreid == CC_CORE_ID)
- return CCREGS_FAST(sii);
- else if (coreid == sih->buscoretype)
- return PCIEREGS(sii);
- }
- INTR_OFF(sii, *intr_val);
- *origidx = sii->curidx;
- cc = ai_setcore(sih, coreid, 0);
- return cc;
-}
-
-/* restore coreidx and restore interrupt */
-void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val)
-{
- struct si_info *sii;
-
- sii = (struct si_info *)sih;
- if (SI_FAST(sii)
- && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype)))
- return;
-
- ai_setcoreidx(sih, coreid);
- INTR_RESTORE(sii, intr_val);
-}
-
-void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val)
-{
- struct si_info *sii = (struct si_info *)sih;
- u32 *w = (u32 *) sii->curwrap;
- W_REG(w + (offset / 4), val);
- return;
+ return NULL;
}
/*
- * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set
- * operation, switch back to the original core, and return the new value.
- *
- * When using the silicon backplane, no fiddling with interrupts or core
- * switches is needed.
- *
- * Also, when using pci/pcie, we can optimize away the core switching for pci
- * registers and (on newer pci cores) chipcommon registers.
+ * read/modify chipcommon core register.
*/
-uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val)
+uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val)
{
- uint origidx = 0;
- u32 __iomem *r = NULL;
- uint w;
- uint intr_val = 0;
- bool fast = false;
+ struct bcma_device *cc;
+ u32 w;
struct si_info *sii;
sii = (struct si_info *)sih;
-
- if (coreidx >= SI_MAXCORES)
- return 0;
-
- /*
- * If pci/pcie, we can get at pci/pcie regs
- * and on newer cores to chipc
- */
- if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) {
- /* Chipc registers are mapped at 12KB */
- fast = true;
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_CCREGS_OFFSET + regoff);
- } else if (sii->pub.buscoreidx == coreidx) {
- /*
- * pci registers are at either in the last 2KB of
- * an 8KB window or, in pcie and pci rev 13 at 8KB
- */
- fast = true;
- if (SI_FAST(sii))
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- PCI_16KB0_PCIREGS_OFFSET + regoff);
- else
- r = (u32 __iomem *)((__iomem char *)sii->curmap +
- ((regoff >= SBCONFIGOFF) ?
- PCI_BAR0_PCISBR_OFFSET :
- PCI_BAR0_PCIREGS_OFFSET) + regoff);
- }
-
- if (!fast) {
- INTR_OFF(sii, intr_val);
-
- /* save current core index */
- origidx = ai_coreidx(&sii->pub);
-
- /* switch core */
- r = (u32 __iomem *) ((unsigned char __iomem *)
- ai_setcoreidx(&sii->pub, coreidx) + regoff);
- }
+ cc = sii->icbus->drv_cc.core;
/* mask and set */
if (mask || val) {
- w = (R_REG(r) & ~mask) | val;
- W_REG(r, w);
+ bcma_maskset32(cc, regoff, ~mask, val);
}
/* readback */
- w = R_REG(r);
-
- if (!fast) {
- /* restore core index */
- if (origidx != coreidx)
- ai_setcoreidx(&sii->pub, origidx);
-
- INTR_RESTORE(sii, intr_val);
- }
+ w = bcma_read32(cc, regoff);
return w;
}
-void ai_core_disable(struct si_pub *sih, u32 bits)
-{
- struct si_info *sii;
- u32 dummy;
- struct aidmp *ai;
-
- sii = (struct si_info *)sih;
-
- ai = sii->curwrap;
-
- /* if core is already in reset, just return */
- if (R_REG(&ai->resetctrl) & AIRC_RESET)
- return;
-
- W_REG(&ai->ioctrl, bits);
- dummy = R_REG(&ai->ioctrl);
- udelay(10);
-
- W_REG(&ai->resetctrl, AIRC_RESET);
- udelay(1);
-}
-
-/* reset and re-enable a core
- * inputs:
- * bits - core specific bits that are set during and after reset sequence
- * resetbits - core specific bits that are set only during reset sequence
- */
-void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits)
-{
- struct si_info *sii;
- struct aidmp *ai;
- u32 dummy;
-
- sii = (struct si_info *)sih;
- ai = sii->curwrap;
-
- /*
- * Must do the disable sequence first to work
- * for arbitrary current core state.
- */
- ai_core_disable(sih, (bits | resetbits));
-
- /*
- * Now do the initialization sequence.
- */
- W_REG(&ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- W_REG(&ai->resetctrl, 0);
- udelay(1);
-
- W_REG(&ai->ioctrl, (bits | SICF_CLOCK_EN));
- dummy = R_REG(&ai->ioctrl);
- udelay(1);
-}
-
/* return the slow clock source - LPO, XTAL, or PCI */
-static uint ai_slowclk_src(struct si_info *sii)
+static uint ai_slowclk_src(struct si_pub *sih, struct bcma_device *cc)
{
- struct chipcregs __iomem *cc;
+ struct si_info *sii;
u32 val;
- if (sii->pub.ccrev < 6) {
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT,
+ sii = (struct si_info *)sih;
+ if (ai_get_ccrev(&sii->pub) < 6) {
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT,
&val);
if (val & PCI_CFG_GPIO_SCS)
return SCC_SS_PCI;
return SCC_SS_XTAL;
- } else if (sii->pub.ccrev < 10) {
- cc = (struct chipcregs __iomem *)
- ai_setcoreidx(&sii->pub, sii->curidx);
- return R_REG(&cc->slow_clk_ctl) & SCC_SS_MASK;
+ } else if (ai_get_ccrev(&sii->pub) < 10) {
+ return bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_SS_MASK;
} else /* Insta-clock */
return SCC_SS_XTAL;
}
@@ -1503,24 +776,24 @@ static uint ai_slowclk_src(struct si_info *sii)
* return the ILP (slowclock) min or max frequency
* precondition: we've established the chip has dynamic clk control
*/
-static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
- struct chipcregs __iomem *cc)
+static uint ai_slowclk_freq(struct si_pub *sih, bool max_freq,
+ struct bcma_device *cc)
{
u32 slowclk;
uint div;
- slowclk = ai_slowclk_src(sii);
- if (sii->pub.ccrev < 6) {
+ slowclk = ai_slowclk_src(sih, cc);
+ if (ai_get_ccrev(sih) < 6) {
if (slowclk == SCC_SS_PCI)
return max_freq ? (PCIMAXFREQ / 64)
: (PCIMINFREQ / 64);
else
return max_freq ? (XTALMAXFREQ / 32)
: (XTALMINFREQ / 32);
- } else if (sii->pub.ccrev < 10) {
+ } else if (ai_get_ccrev(sih) < 10) {
div = 4 *
- (((R_REG(&cc->slow_clk_ctl) & SCC_CD_MASK) >>
- SCC_CD_SHIFT) + 1);
+ (((bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl)) &
+ SCC_CD_MASK) >> SCC_CD_SHIFT) + 1);
if (slowclk == SCC_SS_LPO)
return max_freq ? LPOMAXFREQ : LPOMINFREQ;
else if (slowclk == SCC_SS_XTAL)
@@ -1531,15 +804,15 @@ static uint ai_slowclk_freq(struct si_info *sii, bool max_freq,
: (PCIMINFREQ / div);
} else {
/* Chipc rev 10 is InstaClock */
- div = R_REG(&cc->system_clk_ctl) >> SYCC_CD_SHIFT;
- div = 4 * (div + 1);
+ div = bcma_read32(cc, CHIPCREGOFFS(system_clk_ctl));
+ div = 4 * ((div >> SYCC_CD_SHIFT) + 1);
return max_freq ? XTALMAXFREQ : (XTALMINFREQ / div);
}
return 0;
}
static void
-ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
+ai_clkctl_setdelay(struct si_pub *sih, struct bcma_device *cc)
{
uint slowmaxfreq, pll_delay, slowclk;
uint pll_on_delay, fref_sel_delay;
@@ -1552,55 +825,40 @@ ai_clkctl_setdelay(struct si_info *sii, struct chipcregs __iomem *cc)
* powered down by dynamic clk control logic.
*/
- slowclk = ai_slowclk_src(sii);
+ slowclk = ai_slowclk_src(sih, cc);
if (slowclk != SCC_SS_XTAL)
pll_delay += XTAL_ON_DELAY;
/* Starting with 4318 it is ILP that is used for the delays */
slowmaxfreq =
- ai_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? false : true, cc);
+ ai_slowclk_freq(sih,
+ (ai_get_ccrev(sih) >= 10) ? false : true, cc);
pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000;
fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000;
- W_REG(&cc->pll_on_delay, pll_on_delay);
- W_REG(&cc->fref_sel_delay, fref_sel_delay);
+ bcma_write32(cc, CHIPCREGOFFS(pll_on_delay), pll_on_delay);
+ bcma_write32(cc, CHIPCREGOFFS(fref_sel_delay), fref_sel_delay);
}
/* initialize power control delay registers */
void ai_clkctl_init(struct si_pub *sih)
{
- struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
- bool fast;
+ struct bcma_device *cc;
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return;
- sii = (struct si_info *)sih;
- fast = SI_FAST(sii);
- if (!fast) {
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- return;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- return;
- }
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ if (cc == NULL)
+ return;
/* set all Instaclk chip ILP to 1 MHz */
- if (sih->ccrev >= 10)
- SET_REG(&cc->system_clk_ctl, SYCC_CD_MASK,
- (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
+ if (ai_get_ccrev(sih) >= 10)
+ bcma_maskset32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_CD_MASK,
+ (ILP_DIV_1MHZ << SYCC_CD_SHIFT));
- ai_clkctl_setdelay(sii, cc);
-
- if (!fast)
- ai_setcoreidx(sih, origidx);
+ ai_clkctl_setdelay(sih, cc);
}
/*
@@ -1610,47 +868,25 @@ void ai_clkctl_init(struct si_pub *sih)
u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih)
{
struct si_info *sii;
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
uint slowminfreq;
u16 fpdelay;
- uint intr_val = 0;
- bool fast;
sii = (struct si_info *)sih;
- if (sih->cccaps & CC_CAP_PMU) {
- INTR_OFF(sii, intr_val);
+ if (ai_get_cccaps(sih) & CC_CAP_PMU) {
fpdelay = si_pmu_fast_pwrup_delay(sih);
- INTR_RESTORE(sii, intr_val);
return fpdelay;
}
- if (!(sih->cccaps & CC_CAP_PWR_CTL))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PWR_CTL))
return 0;
- fast = SI_FAST(sii);
fpdelay = 0;
- if (!fast) {
- origidx = sii->curidx;
- INTR_OFF(sii, intr_val);
- cc = (struct chipcregs __iomem *)
- ai_setcore(sih, CC_CORE_ID, 0);
- if (cc == NULL)
- goto done;
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
-
- slowminfreq = ai_slowclk_freq(sii, false, cc);
- fpdelay = (((R_REG(&cc->pll_on_delay) + 2) * 1000000) +
- (slowminfreq - 1)) / slowminfreq;
-
- done:
- if (!fast) {
- ai_setcoreidx(sih, origidx);
- INTR_RESTORE(sii, intr_val);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
+ if (cc) {
+ slowminfreq = ai_slowclk_freq(sih, false, cc);
+ fpdelay = (((bcma_read32(cc, CHIPCREGOFFS(pll_on_delay)) + 2)
+ * 1000000) + (slowminfreq - 1)) / slowminfreq;
}
return fpdelay;
}
@@ -1664,12 +900,12 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
sii = (struct si_info *)sih;
/* pcie core doesn't have any mapping to control the xtal pu */
- if (PCIE(sii))
+ if (PCIE(sih))
return -1;
- pci_read_config_dword(sii->pbus, PCI_GPIO_IN, &in);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUT, &out);
- pci_read_config_dword(sii->pbus, PCI_GPIO_OUTEN, &outen);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_IN, &in);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUT, &out);
+ pci_read_config_dword(sii->pcibus, PCI_GPIO_OUTEN, &outen);
/*
* Avoid glitching the clock if GPRS is already using it.
@@ -1690,9 +926,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out |= PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
udelay(XTAL_ON_DELAY);
}
@@ -1700,7 +936,7 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* turn pll on */
if (what & PLL) {
out &= ~PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
mdelay(2);
}
@@ -1709,9 +945,9 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
out &= ~PCI_CFG_GPIO_XTAL;
if (what & PLL)
out |= PCI_CFG_GPIO_PLL;
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUT, out);
- pci_write_config_dword(sii->pbus,
+ pci_write_config_dword(sii->pcibus,
PCI_GPIO_OUTEN, outen);
}
@@ -1721,63 +957,52 @@ int ai_clkctl_xtal(struct si_pub *sih, uint what, bool on)
/* clk control mechanism through chipcommon, no policy checking */
static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
{
- uint origidx = 0;
- struct chipcregs __iomem *cc;
+ struct bcma_device *cc;
u32 scc;
- uint intr_val = 0;
- bool fast = SI_FAST(sii);
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sii->pub.ccrev < 6)
+ if (ai_get_ccrev(&sii->pub) < 6)
return false;
- if (!fast) {
- INTR_OFF(sii, intr_val);
- origidx = sii->curidx;
- cc = (struct chipcregs __iomem *)
- ai_setcore(&sii->pub, CC_CORE_ID, 0);
- } else {
- cc = (struct chipcregs __iomem *) CCREGS_FAST(sii);
- if (cc == NULL)
- goto done;
- }
+ cc = ai_findcore(&sii->pub, BCMA_CORE_CHIPCOMMON, 0);
- if (!(sii->pub.cccaps & CC_CAP_PWR_CTL) && (sii->pub.ccrev < 20))
- goto done;
+ if (!(ai_get_cccaps(&sii->pub) & CC_CAP_PWR_CTL) &&
+ (ai_get_ccrev(&sii->pub) < 20))
+ return mode == CLK_FAST;
switch (mode) {
case CLK_FAST: /* FORCEHT, fast (pll) clock */
- if (sii->pub.ccrev < 10) {
+ if (ai_get_ccrev(&sii->pub) < 10) {
/*
* don't forget to force xtal back
* on before we clear SCC_DYN_XTAL..
*/
ai_clkctl_xtal(&sii->pub, XTAL, ON);
- SET_REG(&cc->slow_clk_ctl,
- (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
- } else if (sii->pub.ccrev < 20) {
- OR_REG(&cc->system_clk_ctl, SYCC_HR);
+ bcma_maskset32(cc, CHIPCREGOFFS(slow_clk_ctl),
+ (SCC_XC | SCC_FS | SCC_IP), SCC_IP);
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
+ bcma_set32(cc, CHIPCREGOFFS(system_clk_ctl), SYCC_HR);
} else {
- OR_REG(&cc->clk_ctl_st, CCS_FORCEHT);
+ bcma_set32(cc, CHIPCREGOFFS(clk_ctl_st), CCS_FORCEHT);
}
/* wait for the PLL */
- if (sii->pub.cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(&sii->pub) & CC_CAP_PMU) {
u32 htavail = CCS_HTAVAIL;
- SPINWAIT(((R_REG(&cc->clk_ctl_st) & htavail)
- == 0), PMU_MAX_TRANSITION_DLY);
+ SPINWAIT(((bcma_read32(cc, CHIPCREGOFFS(clk_ctl_st)) &
+ htavail) == 0), PMU_MAX_TRANSITION_DLY);
} else {
udelay(PLL_DELAY);
}
break;
case CLK_DYNAMIC: /* enable dynamic clock control */
- if (sii->pub.ccrev < 10) {
- scc = R_REG(&cc->slow_clk_ctl);
+ if (ai_get_ccrev(&sii->pub) < 10) {
+ scc = bcma_read32(cc, CHIPCREGOFFS(slow_clk_ctl));
scc &= ~(SCC_FS | SCC_IP | SCC_XC);
if ((scc & SCC_SS_MASK) != SCC_SS_XTAL)
scc |= SCC_XC;
- W_REG(&cc->slow_clk_ctl, scc);
+ bcma_write32(cc, CHIPCREGOFFS(slow_clk_ctl), scc);
/*
* for dynamic control, we have to
@@ -1785,11 +1010,11 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
*/
if (scc & SCC_XC)
ai_clkctl_xtal(&sii->pub, XTAL, OFF);
- } else if (sii->pub.ccrev < 20) {
+ } else if (ai_get_ccrev(&sii->pub) < 20) {
/* Instaclock */
- AND_REG(&cc->system_clk_ctl, ~SYCC_HR);
+ bcma_mask32(cc, CHIPCREGOFFS(system_clk_ctl), ~SYCC_HR);
} else {
- AND_REG(&cc->clk_ctl_st, ~CCS_FORCEHT);
+ bcma_mask32(cc, CHIPCREGOFFS(clk_ctl_st), ~CCS_FORCEHT);
}
break;
@@ -1797,11 +1022,6 @@ static bool _ai_clkctl_cc(struct si_info *sii, uint mode)
break;
}
- done:
- if (!fast) {
- ai_setcoreidx(&sii->pub, origidx);
- INTR_RESTORE(sii, intr_val);
- }
return mode == CLK_FAST;
}
@@ -1820,46 +1040,25 @@ bool ai_clkctl_cc(struct si_pub *sih, uint mode)
sii = (struct si_info *)sih;
/* chipcommon cores prior to rev6 don't support dynamic clock control */
- if (sih->ccrev < 6)
+ if (ai_get_ccrev(sih) < 6)
return false;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
return mode == CLK_FAST;
return _ai_clkctl_cc(sii, mode);
}
-/* Build device path */
-int ai_devpath(struct si_pub *sih, char *path, int size)
-{
- int slen;
-
- if (!path || size <= 0)
- return -1;
-
- slen = snprintf(path, (size_t) size, "pci/%u/%u/",
- ((struct si_info *)sih)->pbus->bus->number,
- PCI_SLOT(((struct pci_dev *)
- (((struct si_info *)(sih))->pbus))->devfn));
-
- if (slen < 0 || slen >= size) {
- path[0] = '\0';
- return -1;
- }
-
- return 0;
-}
-
void ai_pci_up(struct si_pub *sih)
{
struct si_info *sii;
sii = (struct si_info *)sih;
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_FAST);
- if (PCIE(sii))
+ if (PCIE(sih))
pcicore_up(sii->pch, SI_PCIUP);
}
@@ -1882,7 +1081,7 @@ void ai_pci_down(struct si_pub *sih)
sii = (struct si_info *)sih;
/* release FORCEHT since chip is going to "down" state */
- if (PCI_FORCEHT(sii))
+ if (PCI_FORCEHT(sih))
_ai_clkctl_cc(sii, CLK_DYNAMIC);
pcicore_down(sii->pch, SI_PCIDOWN);
@@ -1895,42 +1094,23 @@ void ai_pci_down(struct si_pub *sih)
void ai_pci_setup(struct si_pub *sih, uint coremask)
{
struct si_info *sii;
- struct sbpciregs __iomem *regs = NULL;
- u32 siflag = 0, w;
- uint idx = 0;
+ u32 w;
sii = (struct si_info *)sih;
- if (PCI(sii)) {
- /* get current core index */
- idx = sii->curidx;
-
- /* we interrupt on this backplane flag number */
- siflag = ai_flag(sih);
-
- /* switch over to pci core */
- regs = ai_setcoreidx(sih, sii->pub.buscoreidx);
- }
-
/*
* Enable sb->pci interrupts. Assume
* PCI rev 2.3 support was added in pci core rev 6 and things changed..
*/
- if (PCIE(sii) || (PCI(sii) && ((sii->pub.buscorerev) >= 6))) {
+ if (PCIE(sih) || (PCI(sih) && (ai_get_buscorerev(sih) >= 6))) {
/* pci config write to set this core bit in PCIIntMask */
- pci_read_config_dword(sii->pbus, PCI_INT_MASK, &w);
+ pci_read_config_dword(sii->pcibus, PCI_INT_MASK, &w);
w |= (coremask << PCI_SBIM_SHIFT);
- pci_write_config_dword(sii->pbus, PCI_INT_MASK, w);
- } else {
- /* set sbintvec bit for our flag number */
- ai_setint(sih, siflag);
+ pci_write_config_dword(sii->pcibus, PCI_INT_MASK, w);
}
- if (PCI(sii)) {
- pcicore_pci_setup(sii->pch, regs);
-
- /* switch back to previous core */
- ai_setcoreidx(sih, idx);
+ if (PCI(sih)) {
+ pcicore_pci_setup(sii->pch);
}
}
@@ -1940,25 +1120,11 @@ void ai_pci_setup(struct si_pub *sih, uint coremask)
*/
int ai_pci_fixcfg(struct si_pub *sih)
{
- uint origidx;
- void __iomem *regs = NULL;
struct si_info *sii = (struct si_info *)sih;
/* Fixup PI in SROM shadow area to enable the correct PCI core access */
- /* save the current index */
- origidx = ai_coreidx(&sii->pub);
-
/* check 'pi' is correct and fix it if not */
- regs = ai_setcore(&sii->pub, sii->pub.buscoretype, 0);
- if (sii->pub.buscoretype == PCIE_CORE_ID)
- pcicore_fixcfg_pcie(sii->pch,
- (struct sbpcieregs __iomem *)regs);
- else if (sii->pub.buscoretype == PCI_CORE_ID)
- pcicore_fixcfg_pci(sii->pch, (struct sbpciregs __iomem *)regs);
-
- /* restore the original index */
- ai_setcoreidx(&sii->pub, origidx);
-
+ pcicore_fixcfg(sii->pch);
pcicore_hwup(sii->pch);
return 0;
}
@@ -1969,58 +1135,42 @@ u32 ai_gpiocontrol(struct si_pub *sih, u32 mask, u32 val, u8 priority)
uint regoff;
regoff = offsetof(struct chipcregs, gpiocontrol);
- return ai_corereg(sih, SI_CC_IDX, regoff, mask, val);
+ return ai_cc_reg(sih, regoff, mask, val);
}
void ai_chipcontrl_epa4331(struct si_pub *sih, bool on)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
u32 val;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = (struct chipcregs __iomem *) ai_setcore(sih, CC_CORE_ID, 0);
-
- val = R_REG(&cc->chipcontrol);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
if (on) {
- if (sih->chippkg == 9 || sih->chippkg == 0xb)
+ if (ai_get_chippkg(sih) == 9 || ai_get_chippkg(sih) == 0xb)
/* Ext PA Controls for 4331 12x9 Package */
- W_REG(&cc->chipcontrol, val |
- CCTRL4331_EXTPA_EN |
- CCTRL4331_EXTPA_ON_GPIO2_5);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN |
+ CCTRL4331_EXTPA_ON_GPIO2_5);
else
/* Ext PA Controls for 4331 12x12 Package */
- W_REG(&cc->chipcontrol,
- val | CCTRL4331_EXTPA_EN);
+ bcma_set32(cc, CHIPCREGOFFS(chipcontrol),
+ CCTRL4331_EXTPA_EN);
} else {
val &= ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5);
- W_REG(&cc->chipcontrol, val);
+ bcma_mask32(cc, CHIPCREGOFFS(chipcontrol),
+ ~(CCTRL4331_EXTPA_EN | CCTRL4331_EXTPA_ON_GPIO2_5));
}
-
- ai_setcoreidx(sih, origidx);
}
/* Enable BT-COEX & Ex-PA for 4313 */
void ai_epa_4313war(struct si_pub *sih)
{
- struct si_info *sii;
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *cc;
- sii = (struct si_info *)sih;
- origidx = ai_coreidx(sih);
-
- cc = ai_setcore(sih, CC_CORE_ID, 0);
+ cc = ai_findcore(sih, CC_CORE_ID, 0);
/* EPA Fix */
- W_REG(&cc->gpiocontrol,
- R_REG(&cc->gpiocontrol) | GPIO_CTRL_EPA_EN_MASK);
-
- ai_setcoreidx(sih, origidx);
+ bcma_set32(cc, CHIPCREGOFFS(gpiocontrol), GPIO_CTRL_EPA_EN_MASK);
}
/* check if the device is removed */
@@ -2031,7 +1181,7 @@ bool ai_deviceremoved(struct si_pub *sih)
sii = (struct si_info *)sih;
- pci_read_config_dword(sii->pbus, PCI_VENDOR_ID, &w);
+ pci_read_config_dword(sii->pcibus, PCI_VENDOR_ID, &w);
if ((w & 0xFFFF) != PCI_VENDOR_ID_BROADCOM)
return true;
@@ -2040,26 +1190,23 @@ bool ai_deviceremoved(struct si_pub *sih)
bool ai_is_sprom_available(struct si_pub *sih)
{
- if (sih->ccrev >= 31) {
- struct si_info *sii;
- uint origidx;
- struct chipcregs __iomem *cc;
+ struct si_info *sii = (struct si_info *)sih;
+
+ if (ai_get_ccrev(sih) >= 31) {
+ struct bcma_device *cc;
u32 sromctrl;
- if ((sih->cccaps & CC_CAP_SROM) == 0)
+ if ((ai_get_cccaps(sih) & CC_CAP_SROM) == 0)
return false;
- sii = (struct si_info *)sih;
- origidx = sii->curidx;
- cc = ai_setcoreidx(sih, SI_CC_IDX);
- sromctrl = R_REG(&cc->sromcontrol);
- ai_setcoreidx(sih, origidx);
+ cc = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sromctrl = bcma_read32(cc, CHIPCREGOFFS(sromcontrol));
return sromctrl & SRC_PRESENT;
}
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_SPROM_PRESENT) != 0;
+ return (sii->chipst & CST4313_SPROM_PRESENT) != 0;
default:
return true;
}
@@ -2067,9 +1214,11 @@ bool ai_is_sprom_available(struct si_pub *sih)
bool ai_is_otp_disabled(struct si_pub *sih)
{
- switch (sih->chip) {
+ struct si_info *sii = (struct si_info *)sih;
+
+ switch (ai_get_chip_id(sih)) {
case BCM4313_CHIP_ID:
- return (sih->chipst & CST4313_OTP_PRESENT) == 0;
+ return (sii->chipst & CST4313_OTP_PRESENT) == 0;
/* These chips always have their OTP on */
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
@@ -2077,3 +1226,15 @@ bool ai_is_otp_disabled(struct si_pub *sih)
return false;
}
}
+
+uint ai_get_buscoretype(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.id;
+}
+
+uint ai_get_buscorerev(struct si_pub *sih)
+{
+ struct si_info *sii = (struct si_info *)sih;
+ return sii->buscore->id.rev;
+}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
index 106a742..f84c6f7 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/aiutils.h
@@ -17,6 +17,8 @@
#ifndef _BRCM_AIUTILS_H_
#define _BRCM_AIUTILS_H_
+#include <linux/bcma/bcma.h>
+
#include "types.h"
/*
@@ -38,88 +40,12 @@
/* PCIE Client Mode sb2pcitranslation2 (2 ZettaBytes), high 32 bits */
#define SI_PCIE_DMA_H32 0x80000000
-/* core codes */
-#define NODEV_CORE_ID 0x700 /* Invalid coreid */
-#define CC_CORE_ID 0x800 /* chipcommon core */
-#define ILINE20_CORE_ID 0x801 /* iline20 core */
-#define SRAM_CORE_ID 0x802 /* sram core */
-#define SDRAM_CORE_ID 0x803 /* sdram core */
-#define PCI_CORE_ID 0x804 /* pci core */
-#define MIPS_CORE_ID 0x805 /* mips core */
-#define ENET_CORE_ID 0x806 /* enet mac core */
-#define CODEC_CORE_ID 0x807 /* v90 codec core */
-#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */
-#define ADSL_CORE_ID 0x809 /* ADSL core */
-#define ILINE100_CORE_ID 0x80a /* iline100 core */
-#define IPSEC_CORE_ID 0x80b /* ipsec core */
-#define UTOPIA_CORE_ID 0x80c /* utopia core */
-#define PCMCIA_CORE_ID 0x80d /* pcmcia core */
-#define SOCRAM_CORE_ID 0x80e /* internal memory core */
-#define MEMC_CORE_ID 0x80f /* memc sdram core */
-#define OFDM_CORE_ID 0x810 /* OFDM phy core */
-#define EXTIF_CORE_ID 0x811 /* external interface core */
-#define D11_CORE_ID 0x812 /* 802.11 MAC core */
-#define APHY_CORE_ID 0x813 /* 802.11a phy core */
-#define BPHY_CORE_ID 0x814 /* 802.11b phy core */
-#define GPHY_CORE_ID 0x815 /* 802.11g phy core */
-#define MIPS33_CORE_ID 0x816 /* mips3302 core */
-#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */
-#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */
-#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */
-#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */
-#define SDIOH_CORE_ID 0x81b /* sdio host core */
-#define ROBO_CORE_ID 0x81c /* roboswitch core */
-#define ATA100_CORE_ID 0x81d /* parallel ATA core */
-#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */
-#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */
-#define PCIE_CORE_ID 0x820 /* pci express core */
-#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */
-#define SRAMC_CORE_ID 0x822 /* SRAM controller core */
-#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */
-#define ARM11_CORE_ID 0x824 /* ARM 1176 core */
-#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */
-#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */
-#define PMU_CORE_ID 0x827 /* PMU core */
-#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */
-#define SDIOD_CORE_ID 0x829 /* SDIO device core */
-#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */
-#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */
-#define MIPS74K_CORE_ID 0x82c /* mips 74k core */
-#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */
-#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */
-#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */
-#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */
-#define SC_CORE_ID 0x831 /* shared common core */
-#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */
-#define SPIH_CORE_ID 0x833 /* SPI host core */
-#define I2S_CORE_ID 0x834 /* I2S core */
-#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
-#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
-#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
- * maps all unused address ranges
- */
-
/* chipcommon being the first core: */
#define SI_CC_IDX 0
/* SOC Interconnect types (aka chip types) */
#define SOCI_AI 1
-/* Common core control flags */
-#define SICF_BIST_EN 0x8000
-#define SICF_PME_EN 0x4000
-#define SICF_CORE_BITS 0x3ffc
-#define SICF_FGC 0x0002
-#define SICF_CLOCK_EN 0x0001
-
-/* Common core status flags */
-#define SISF_BIST_DONE 0x8000
-#define SISF_BIST_ERROR 0x4000
-#define SISF_GATED_CLK 0x2000
-#define SISF_DMA64 0x1000
-#define SISF_CORE_BITS 0x0fff
-
/* A register that is common to all cores to
* communicate w/PMU regarding clock control.
*/
@@ -220,26 +146,15 @@
* public (read-only) portion of aiutils handle returned by si_attach()
*/
struct si_pub {
- uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */
- uint buscorerev; /* buscore rev */
- uint buscoreidx; /* buscore index */
int ccrev; /* chip common core rev */
u32 cccaps; /* chip common capabilities */
- u32 cccaps_ext; /* chip common capabilities extension */
int pmurev; /* pmu core rev */
u32 pmucaps; /* pmu capabilities */
uint boardtype; /* board type */
uint boardvendor; /* board vendor */
- uint boardflags; /* board flags */
- uint boardflags2; /* board flags2 */
uint chip; /* chip number */
uint chiprev; /* chip revision */
uint chippkg; /* chip package option */
- u32 chipst; /* chip status */
- bool issim; /* chip is in simulation or emulation */
- uint socirev; /* SOC interconnect rev */
- bool pci_pr32414;
-
};
struct pci_dev;
@@ -255,38 +170,13 @@ struct gpioh_item {
/* misc si info needed by some of the routines */
struct si_info {
struct si_pub pub; /* back plane public state (must be first) */
- struct pci_dev *pbus; /* handle to pci bus */
- uint dev_coreid; /* the core provides driver functions */
- void *intr_arg; /* interrupt callback function arg */
- u32 (*intrsoff_fn) (void *intr_arg); /* turns chip interrupts off */
- /* restore chip interrupts */
- void (*intrsrestore_fn) (void *intr_arg, u32 arg);
- /* check if interrupts are enabled */
- bool (*intrsenabled_fn) (void *intr_arg);
-
+ struct bcma_bus *icbus; /* handle to soc interconnect bus */
+ struct pci_dev *pcibus; /* handle to pci bus */
struct pcicore_info *pch; /* PCI/E core handle */
-
+ struct bcma_device *buscore;
struct list_head var_list; /* list of srom variables */
- void __iomem *curmap; /* current regs va */
- void __iomem *regs[SI_MAXCORES]; /* other regs va */
-
- uint curidx; /* current core index */
- uint numcores; /* # discovered cores */
- uint coreid[SI_MAXCORES]; /* id of each core */
- u32 coresba[SI_MAXCORES]; /* backplane address of each core */
- void *regs2[SI_MAXCORES]; /* 2nd virtual address per core (usbh20) */
- u32 coresba2[SI_MAXCORES]; /* 2nd phys address per core (usbh20) */
- u32 coresba_size[SI_MAXCORES]; /* backplane address space size */
- u32 coresba2_size[SI_MAXCORES]; /* second address space size */
-
- void *curwrap; /* current wrapper va */
- void *wrappers[SI_MAXCORES]; /* other cores wrapper va */
- u32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */
-
- u32 cia[SI_MAXCORES]; /* erom cia entry for each core */
- u32 cib[SI_MAXCORES]; /* erom cia entry for each core */
- u32 oob_router; /* oob router registers for axi */
+ u32 chipst; /* chip status */
};
/*
@@ -299,52 +189,15 @@ struct si_info {
/* AMBA Interconnect exported externs */
-extern uint ai_flag(struct si_pub *sih);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern uint ai_coreidx(struct si_pub *sih);
-extern uint ai_corevendor(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern void ai_core_cflags_wo(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern int ai_numaddrspaces(struct si_pub *sih);
-extern u32 ai_addrspace(struct si_pub *sih, uint asidx);
-extern u32 ai_addrspacesize(struct si_pub *sih, uint asidx);
-extern void ai_write_wrap_reg(struct si_pub *sih, u32 offset, u32 val);
+extern struct bcma_device *ai_findcore(struct si_pub *sih,
+ u16 coreid, u16 coreunit);
+extern u32 ai_core_cflags(struct bcma_device *core, u32 mask, u32 val);
/* === exported functions === */
-extern struct si_pub *ai_attach(void __iomem *regs, struct pci_dev *sdh);
+extern struct si_pub *ai_attach(struct bcma_bus *pbus);
extern void ai_detach(struct si_pub *sih);
-extern uint ai_coreid(struct si_pub *sih);
-extern uint ai_corerev(struct si_pub *sih);
-extern uint ai_corereg(struct si_pub *sih, uint coreidx, uint regoff, uint mask,
- uint val);
-extern void ai_write_wrapperreg(struct si_pub *sih, u32 offset, u32 val);
-extern u32 ai_core_cflags(struct si_pub *sih, u32 mask, u32 val);
-extern u32 ai_core_sflags(struct si_pub *sih, u32 mask, u32 val);
-extern bool ai_iscoreup(struct si_pub *sih);
-extern uint ai_findcoreidx(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_setcoreidx(struct si_pub *sih, uint coreidx);
-extern void __iomem *ai_setcore(struct si_pub *sih, uint coreid, uint coreunit);
-extern void __iomem *ai_switch_core(struct si_pub *sih, uint coreid,
- uint *origidx, uint *intr_val);
-extern void ai_restore_core(struct si_pub *sih, uint coreid, uint intr_val);
-extern void ai_core_reset(struct si_pub *sih, u32 bits, u32 resetbits);
-extern void ai_core_disable(struct si_pub *sih, u32 bits);
-extern u32 ai_alp_clock(struct si_pub *sih);
-extern u32 ai_ilp_clock(struct si_pub *sih);
+extern uint ai_cc_reg(struct si_pub *sih, uint regoff, u32 mask, u32 val);
extern void ai_pci_setup(struct si_pub *sih, uint coremask);
-extern void ai_setint(struct si_pub *sih, int siflag);
-extern bool ai_backplane64(struct si_pub *sih);
-extern void ai_register_intr_callback(struct si_pub *sih, void *intrsoff_fn,
- void *intrsrestore_fn,
- void *intrsenabled_fn, void *intr_arg);
-extern void ai_deregister_intr_callback(struct si_pub *sih);
extern void ai_clkctl_init(struct si_pub *sih);
extern u16 ai_clkctl_fast_pwrup_delay(struct si_pub *sih);
extern bool ai_clkctl_cc(struct si_pub *sih, uint mode);
@@ -359,13 +212,6 @@ extern bool ai_is_otp_disabled(struct si_pub *sih);
/* SPROM availability */
extern bool ai_is_sprom_available(struct si_pub *sih);
-/*
- * Build device path. Path size must be >= SI_DEVPATH_BUFSZ.
- * The returned path is NULL terminated and has trailing '/'.
- * Return 0 on success, nonzero otherwise.
- */
-extern int ai_devpath(struct si_pub *sih, char *path, int size);
-
extern void ai_pci_sleep(struct si_pub *sih);
extern void ai_pci_down(struct si_pub *sih);
extern void ai_pci_up(struct si_pub *sih);
@@ -375,4 +221,52 @@ extern void ai_chipcontrl_epa4331(struct si_pub *sih, bool on);
/* Enable Ex-PA for 4313 */
extern void ai_epa_4313war(struct si_pub *sih);
+extern uint ai_get_buscoretype(struct si_pub *sih);
+extern uint ai_get_buscorerev(struct si_pub *sih);
+
+static inline int ai_get_ccrev(struct si_pub *sih)
+{
+ return sih->ccrev;
+}
+
+static inline u32 ai_get_cccaps(struct si_pub *sih)
+{
+ return sih->cccaps;
+}
+
+static inline int ai_get_pmurev(struct si_pub *sih)
+{
+ return sih->pmurev;
+}
+
+static inline u32 ai_get_pmucaps(struct si_pub *sih)
+{
+ return sih->pmucaps;
+}
+
+static inline uint ai_get_boardtype(struct si_pub *sih)
+{
+ return sih->boardtype;
+}
+
+static inline uint ai_get_boardvendor(struct si_pub *sih)
+{
+ return sih->boardvendor;
+}
+
+static inline uint ai_get_chip_id(struct si_pub *sih)
+{
+ return sih->chip;
+}
+
+static inline uint ai_get_chiprev(struct si_pub *sih)
+{
+ return sih->chiprev;
+}
+
+static inline uint ai_get_chippkg(struct si_pub *sih)
+{
+ return sih->chippkg;
+}
+
#endif /* _BRCM_AIUTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
index 7f27dbd..30b5887 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
@@ -649,7 +649,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
len = roundup(len, 4);
ampdu_len += (len + (ndelim + 1) * AMPDU_DELIMITER_LEN);
- dma_len += (u16) brcmu_pkttotlen(p);
+ dma_len += (u16) p->len;
BCMMSG(wlc->wiphy, "wl%d: ampdu_len %d"
" seg_cnt %d null delim %d\n",
@@ -741,9 +741,7 @@ brcms_c_sendampdu(struct ampdu_info *ampdu, struct brcms_txq_info *qi,
if (p) {
if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
((u8) (p->priority) == tid)) {
-
- plen = brcmu_pkttotlen(p) +
- AMPDU_MAX_MPDU_OVERHEAD;
+ plen = p->len + AMPDU_MAX_MPDU_OVERHEAD;
plen = max(scb_ampdu->min_len, plen);
if ((plen + ampdu_len) > max_ampdu_bytes) {
@@ -1053,17 +1051,13 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
}
/* either retransmit or send bar if ack not recd */
if (!ack_recd) {
- struct ieee80211_tx_rate *txrate =
- tx_info->status.rates;
- if (retry && (txrate[0].count < (int)retry_limit)) {
+ if (retry && (ini->txretry[index] < (int)retry_limit)) {
ini->txretry[index]++;
ini->tx_in_transit--;
/*
* Use high prededence for retransmit to
* give some punch
*/
- /* brcms_c_txq_enq(wlc, scb, p,
- * BRCMS_PRIO_TO_PREC(tid)); */
brcms_c_txq_enq(wlc, scb, p,
BRCMS_PRIO_TO_HI_PREC(tid));
} else {
@@ -1076,9 +1070,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
IEEE80211_TX_STAT_AMPDU_NO_BACK;
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
- wiphy_err(wiphy, "%s: BA Timeout, seq %d, in_"
- "transit %d\n", "AMPDU status", seq,
- ini->tx_in_transit);
+ BCMMSG(wiphy,
+ "BA Timeout, seq %d, in_transit %d\n",
+ seq, ini->tx_in_transit);
ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
p);
}
@@ -1120,14 +1114,17 @@ brcms_c_ampdu_dotxstatus(struct ampdu_info *ampdu, struct scb *scb,
u8 status_delay = 0;
/* wait till the next 8 bytes of txstatus is available */
- while (((s1 = R_REG(&wlc->regs->frmtxstatus)) & TXS_V) == 0) {
+ s1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus));
+ while ((s1 & TXS_V) == 0) {
udelay(1);
status_delay++;
if (status_delay > 10)
return; /* error condition */
+ s1 = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(frmtxstatus));
}
- s2 = R_REG(&wlc->regs->frmtxstatus2);
+ s2 = bcma_read32(wlc->hw->d11core, D11REGOFFS(frmtxstatus2));
}
if (scb) {
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
index 89ad1b7..55e9f45 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/channel.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/channel.c
@@ -1153,121 +1153,6 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec,
&txpwr);
}
-#ifdef POWER_DBG
-static void wlc_phy_txpower_limits_dump(struct txpwr_limits *txpwr)
-{
- int i;
- char buf[80];
- char fraction[4][4] = { " ", ".25", ".5 ", ".75" };
-
- sprintf(buf, "CCK ");
- for (i = 0; i < BRCMS_NUM_RATES_CCK; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->cck[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->cck[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_cdd[i] % BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz OFDM CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_OFDM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->ofdm_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->ofdm_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "20 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_20_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_20_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 SISO ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_siso[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_siso[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 CDD ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_cdd[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_cdd[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS0-7 STBC ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_1_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_stbc[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_stbc[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- printk(KERN_DEBUG "%s\n", buf);
-
- sprintf(buf, "40 MHz MCS8-15 SDM ");
- for (i = 0; i < BRCMS_NUM_RATES_MCS_2_STREAM; i++)
- sprintf(buf[strlen(buf)], " %2d%s",
- txpwr->mcs_40_mimo[i] / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs_40_mimo[i] %
- BRCMS_TXPWR_DB_FACTOR]);
- }
- printk(KERN_DEBUG "%s\n", buf);
-
- printk(KERN_DEBUG "MCS32 %2d%s\n",
- txpwr->mcs32 / BRCMS_TXPWR_DB_FACTOR,
- fraction[txpwr->mcs32 % BRCMS_TXPWR_DB_FACTOR]);
-}
-#endif /* POWER_DBG */
-
void
brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
struct txpwr_limits *txpwr)
@@ -1478,9 +1363,6 @@ brcms_c_channel_reg_limits(struct brcms_cm_info *wlc_cm, u16 chanspec,
txpwr->mcs_40_stbc[i] = txpwr->mcs_40_cdd[i];
}
-#ifdef POWER_DBG
- wlc_phy_txpower_limits_dump(txpwr);
-#endif
return;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/d11.h b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
index ed51616..1948cb2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/d11.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/d11.h
@@ -430,6 +430,9 @@ struct d11regs {
u16 PAD[0x380]; /* 0x800 - 0xEFE */
};
+/* d11 register field offset */
+#define D11REGOFFS(field) offsetof(struct d11regs, field)
+
#define PIHR_BASE 0x0400 /* byte address of packed IHR region */
/* biststatus */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
index 6ebec8f..2e90a9a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.c
@@ -13,8 +13,10 @@
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/slab.h>
-#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/pci.h>
@@ -22,6 +24,14 @@
#include <aiutils.h>
#include "types.h"
#include "dma.h"
+#include "soc.h"
+
+/*
+ * dma register field offset calculation
+ */
+#define DMA64REGOFFS(field) offsetof(struct dma64regs, field)
+#define DMA64TXREGOFFS(di, field) (di->d64txregbase + DMA64REGOFFS(field))
+#define DMA64RXREGOFFS(di, field) (di->d64rxregbase + DMA64REGOFFS(field))
/*
* DMA hardware requires each descriptor ring to be 8kB aligned, and fit within
@@ -168,26 +178,25 @@
/* debug/trace */
#ifdef BCMDBG
-#define DMA_ERROR(args) \
- do { \
- if (!(*di->msg_level & 1)) \
- ; \
- else \
- printk args; \
- } while (0)
-#define DMA_TRACE(args) \
- do { \
- if (!(*di->msg_level & 2)) \
- ; \
- else \
- printk args; \
- } while (0)
+#define DMA_ERROR(fmt, ...) \
+do { \
+ if (*di->msg_level & 1) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
+#define DMA_TRACE(fmt, ...) \
+do { \
+ if (*di->msg_level & 2) \
+ pr_debug("%s: " fmt, __func__, ##__VA_ARGS__); \
+} while (0)
#else
-#define DMA_ERROR(args)
-#define DMA_TRACE(args)
+#define DMA_ERROR(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
+#define DMA_TRACE(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#endif /* BCMDBG */
-#define DMA_NONE(args)
+#define DMA_NONE(fmt, ...) \
+ no_printk(fmt, ##__VA_ARGS__)
#define MAXNAMEL 8 /* 8 char names */
@@ -218,15 +227,16 @@ struct dma_info {
uint *msg_level; /* message level pointer */
char name[MAXNAMEL]; /* callers name for diag msgs */
- struct pci_dev *pbus; /* bus handle */
+ struct bcma_device *core;
+ struct device *dmadev;
bool dma64; /* this dma engine is operating in 64-bit mode */
bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
/* 64-bit dma tx engine registers */
- struct dma64regs __iomem *d64txregs;
+ uint d64txregbase;
/* 64-bit dma rx engine registers */
- struct dma64regs __iomem *d64rxregs;
+ uint d64rxregbase;
/* pointer to dma64 tx descriptor ring */
struct dma64desc *txd64;
/* pointer to dma64 rx descriptor ring */
@@ -361,7 +371,7 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
uint dmactrlflags;
if (di == NULL) {
- DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n"));
+ DMA_ERROR("NULL dma handle\n");
return 0;
}
@@ -373,15 +383,16 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
if (dmactrlflags & DMA_CTRL_PEN) {
u32 control;
- control = R_REG(&di->d64txregs->control);
- W_REG(&di->d64txregs->control,
+ control = bcma_read32(di->core, DMA64TXREGOFFS(di, control));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
control | D64_XC_PD);
- if (R_REG(&di->d64txregs->control) & D64_XC_PD)
+ if (bcma_read32(di->core, DMA64TXREGOFFS(di, control)) &
+ D64_XC_PD)
/* We *can* disable it so it is supported,
* restore control register
*/
- W_REG(&di->d64txregs->control,
- control);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control),
+ control);
else
/* Not supported, don't allow it to be enabled */
dmactrlflags &= ~DMA_CTRL_PEN;
@@ -392,12 +403,12 @@ static uint _dma_ctrlflags(struct dma_info *di, uint mask, uint flags)
return dmactrlflags;
}
-static bool _dma64_addrext(struct dma64regs __iomem *dma64regs)
+static bool _dma64_addrext(struct dma_info *di, uint ctrl_offset)
{
u32 w;
- OR_REG(&dma64regs->control, D64_XC_AE);
- w = R_REG(&dma64regs->control);
- AND_REG(&dma64regs->control, ~D64_XC_AE);
+ bcma_set32(di->core, ctrl_offset, D64_XC_AE);
+ w = bcma_read32(di->core, ctrl_offset);
+ bcma_mask32(di->core, ctrl_offset, ~D64_XC_AE);
return (w & D64_XC_AE) == D64_XC_AE;
}
@@ -410,15 +421,15 @@ static bool _dma_isaddrext(struct dma_info *di)
/* DMA64 supports full 32- or 64-bit operation. AE is always valid */
/* not all tx or rx channel are available */
- if (di->d64txregs != NULL) {
- if (!_dma64_addrext(di->d64txregs))
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have "
- "AE set\n", di->name));
+ if (di->d64txregbase != 0) {
+ if (!_dma64_addrext(di, DMA64TXREGOFFS(di, control)))
+ DMA_ERROR("%s: DMA64 tx doesn't have AE set\n",
+ di->name);
return true;
- } else if (di->d64rxregs != NULL) {
- if (!_dma64_addrext(di->d64rxregs))
- DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have "
- "AE set\n", di->name));
+ } else if (di->d64rxregbase != 0) {
+ if (!_dma64_addrext(di, DMA64RXREGOFFS(di, control)))
+ DMA_ERROR("%s: DMA64 rx doesn't have AE set\n",
+ di->name);
return true;
}
@@ -430,14 +441,14 @@ static bool _dma_descriptor_align(struct dma_info *di)
u32 addrl;
/* Check to see if the descriptors need to be aligned on 4K/8K or not */
- if (di->d64txregs != NULL) {
- W_REG(&di->d64txregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64txregs->addrlow);
+ if (di->d64txregbase != 0) {
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64TXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
- } else if (di->d64rxregs != NULL) {
- W_REG(&di->d64rxregs->addrlow, 0xff0);
- addrl = R_REG(&di->d64rxregs->addrlow);
+ } else if (di->d64rxregbase != 0) {
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow), 0xff0);
+ addrl = bcma_read32(di->core, DMA64RXREGOFFS(di, addrlow));
if (addrl != 0)
return false;
}
@@ -448,7 +459,7 @@ static bool _dma_descriptor_align(struct dma_info *di)
* Descriptor table must start at the DMA hardware dictated alignment, so
* allocated memory must be large enough to support this requirement.
*/
-static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
+static void *dma_alloc_consistent(struct dma_info *di, uint size,
u16 align_bits, uint *alloced,
dma_addr_t *pap)
{
@@ -458,7 +469,7 @@ static void *dma_alloc_consistent(struct pci_dev *pdev, uint size,
size += align;
*alloced = size;
}
- return pci_alloc_consistent(pdev, size, pap);
+ return dma_alloc_coherent(di->dmadev, size, pap, GFP_ATOMIC);
}
static
@@ -484,7 +495,7 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
u32 desc_strtaddr;
u32 alignbytes = 1 << *alignbits;
- va = dma_alloc_consistent(di->pbus, size, *alignbits, alloced, descpa);
+ va = dma_alloc_consistent(di, size, *alignbits, alloced, descpa);
if (NULL == va)
return NULL;
@@ -493,8 +504,8 @@ static void *dma_ringalloc(struct dma_info *di, u32 boundary, uint size,
if (((desc_strtaddr + size - 1) & boundary) != (desc_strtaddr
& boundary)) {
*alignbits = dma_align_sizetobits(size);
- pci_free_consistent(di->pbus, size, va, *descpa);
- va = dma_alloc_consistent(di->pbus, size, *alignbits,
+ dma_free_coherent(di->dmadev, size, va, *descpa);
+ va = dma_alloc_consistent(di, size, *alignbits,
alloced, descpa);
}
return va;
@@ -519,8 +530,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->txdpaorig);
if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd)"
- " failed\n", di->name));
+ DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -533,8 +544,8 @@ static bool dma64_alloc(struct dma_info *di, uint direction)
va = dma_ringalloc(di, D64RINGALIGN, size, &align_bits,
&alloced, &di->rxdpaorig);
if (va == NULL) {
- DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd)"
- " failed\n", di->name));
+ DMA_ERROR("%s: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+ di->name);
return false;
}
align = (1 << align_bits);
@@ -554,12 +565,13 @@ static bool _dma_alloc(struct dma_info *di, uint direction)
}
struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level)
+ struct bcma_device *core,
+ uint txregbase, uint rxregbase, uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level)
{
struct dma_info *di;
+ u8 rev = core->id.rev;
uint size;
/* allocate private info structure */
@@ -570,11 +582,13 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->msg_level = msg_level ? msg_level : &dma_msg_level;
- di->dma64 = ((ai_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64);
+ di->dma64 =
+ ((bcma_aread32(core, BCMA_IOST) & SISF_DMA64) == SISF_DMA64);
- /* init dma reg pointer */
- di->d64txregs = (struct dma64regs __iomem *) dmaregstx;
- di->d64rxregs = (struct dma64regs __iomem *) dmaregsrx;
+ /* init dma reg info */
+ di->core = core;
+ di->d64txregbase = txregbase;
+ di->d64rxregbase = rxregbase;
/*
* Default flags (which can be changed by the driver calling
@@ -583,17 +597,17 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
*/
_dma_ctrlflags(di, DMA_CTRL_ROC | DMA_CTRL_PEN, 0);
- DMA_TRACE(("%s: dma_attach: %s flags 0x%x ntxd %d nrxd %d "
- "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
- "dmaregstx %p dmaregsrx %p\n", name, "DMA64",
- di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
- rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx));
+ DMA_TRACE("%s: %s flags 0x%x ntxd %d nrxd %d "
+ "rxbufsize %d rxextheadroom %d nrxpost %d rxoffset %d "
+ "txregbase %u rxregbase %u\n", name, "DMA64",
+ di->dma.dmactrlflags, ntxd, nrxd, rxbufsize,
+ rxextheadroom, nrxpost, rxoffset, txregbase, rxregbase);
/* make a private copy of our callers name */
strncpy(di->name, name, MAXNAMEL);
di->name[MAXNAMEL - 1] = '\0';
- di->pbus = ((struct si_info *)sih)->pbus;
+ di->dmadev = core->dma_dev;
/* save tunables */
di->ntxd = (u16) ntxd;
@@ -625,12 +639,12 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dataoffsetlow = di->ddoffsetlow;
di->dataoffsethigh = di->ddoffsethigh;
/* WAR64450 : DMACtl.Addr ext fields are not supported in SDIOD core. */
- if ((ai_coreid(sih) == SDIOD_CORE_ID)
- && ((ai_corerev(sih) > 0) && (ai_corerev(sih) <= 2)))
- di->addrext = 0;
- else if ((ai_coreid(sih) == I2S_CORE_ID) &&
- ((ai_corerev(sih) == 0) || (ai_corerev(sih) == 1)))
- di->addrext = 0;
+ if ((core->id.id == SDIOD_CORE_ID)
+ && ((rev > 0) && (rev <= 2)))
+ di->addrext = false;
+ else if ((core->id.id == I2S_CORE_ID) &&
+ ((rev == 0) || (rev == 1)))
+ di->addrext = false;
else
di->addrext = _dma_isaddrext(di);
@@ -645,8 +659,8 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
di->dmadesc_align = 4; /* 16 byte alignment */
}
- DMA_NONE(("DMA descriptor align_needed %d, align %d\n",
- di->aligndesc_4k, di->dmadesc_align));
+ DMA_NONE("DMA descriptor align_needed %d, align %d\n",
+ di->aligndesc_4k, di->dmadesc_align);
/* allocate tx packet pointer vector */
if (ntxd) {
@@ -684,21 +698,21 @@ struct dma_pub *dma_attach(char *name, struct si_pub *sih,
if ((di->ddoffsetlow != 0) && !di->addrext) {
if (di->txdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: txdpa 0x%x: addrext not "
- "supported\n", di->name, (u32)di->txdpa));
+ DMA_ERROR("%s: txdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->txdpa);
goto fail;
}
if (di->rxdpa > SI_PCI_DMA_SZ) {
- DMA_ERROR(("%s: dma_attach: rxdpa 0x%x: addrext not "
- "supported\n", di->name, (u32)di->rxdpa));
+ DMA_ERROR("%s: rxdpa 0x%x: addrext not supported\n",
+ di->name, (u32)di->rxdpa);
goto fail;
}
}
- DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x "
- "dataoffsethigh " "0x%x addrext %d\n", di->ddoffsetlow,
- di->ddoffsethigh, di->dataoffsetlow, di->dataoffsethigh,
- di->addrext));
+ DMA_TRACE("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh 0x%x addrext %d\n",
+ di->ddoffsetlow, di->ddoffsethigh,
+ di->dataoffsetlow, di->dataoffsethigh,
+ di->addrext);
return (struct dma_pub *) di;
@@ -744,17 +758,17 @@ void dma_detach(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_detach\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
/* free dma descriptor rings */
if (di->txd64)
- pci_free_consistent(di->pbus, di->txdalloc,
- ((s8 *)di->txd64 - di->txdalign),
- (di->txdpaorig));
+ dma_free_coherent(di->dmadev, di->txdalloc,
+ ((s8 *)di->txd64 - di->txdalign),
+ (di->txdpaorig));
if (di->rxd64)
- pci_free_consistent(di->pbus, di->rxdalloc,
- ((s8 *)di->rxd64 - di->rxdalign),
- (di->rxdpaorig));
+ dma_free_coherent(di->dmadev, di->rxdalloc,
+ ((s8 *)di->rxd64 - di->rxdalign),
+ (di->rxdpaorig));
/* free packet pointer vectors */
kfree(di->txp);
@@ -779,11 +793,15 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
if ((di->ddoffsetlow == 0)
|| !(pa & PCI32ADDR_HIGH)) {
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
}
} else {
/* DMA64 32bits address extension */
@@ -794,15 +812,19 @@ _dma_ddtable_init(struct dma_info *di, uint direction, dma_addr_t pa)
pa &= ~PCI32ADDR_HIGH;
if (direction == DMA_TX) {
- W_REG(&di->d64txregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64txregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64txregs->control,
- D64_XC_AE, (ae << D64_XC_AE_SHIFT));
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64TXREGOFFS(di, control),
+ D64_XC_AE, (ae << D64_XC_AE_SHIFT));
} else {
- W_REG(&di->d64rxregs->addrlow, pa + di->ddoffsetlow);
- W_REG(&di->d64rxregs->addrhigh, di->ddoffsethigh);
- SET_REG(&di->d64rxregs->control,
- D64_RC_AE, (ae << D64_RC_AE_SHIFT));
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrlow),
+ pa + di->ddoffsetlow);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, addrhigh),
+ di->ddoffsethigh);
+ bcma_maskset32(di->core, DMA64RXREGOFFS(di, control),
+ D64_RC_AE, (ae << D64_RC_AE_SHIFT));
}
}
}
@@ -812,11 +834,11 @@ static void _dma_rxenable(struct dma_info *di)
uint dmactrlflags = di->dma.dmactrlflags;
u32 control;
- DMA_TRACE(("%s: dma_rxenable\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
- control =
- (R_REG(&di->d64rxregs->control) & D64_RC_AE) |
- D64_RC_RE;
+ control = D64_RC_RE | (bcma_read32(di->core,
+ DMA64RXREGOFFS(di, control)) &
+ D64_RC_AE);
if ((dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_RC_PD;
@@ -824,7 +846,7 @@ static void _dma_rxenable(struct dma_info *di)
if (dmactrlflags & DMA_CTRL_ROC)
control |= D64_RC_OC;
- W_REG(&di->d64rxregs->control,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control),
((di->rxoffset << D64_RC_RO_SHIFT) | control));
}
@@ -832,7 +854,7 @@ void dma_rxinit(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_rxinit\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return;
@@ -867,7 +889,8 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
return NULL;
curr =
- B2I(((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) -
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) -
di->rcvptrbase) & D64_RS0_CD_MASK, struct dma64desc);
/* ignore curr if forceall */
@@ -881,7 +904,7 @@ static struct sk_buff *dma64_getnextrxp(struct dma_info *di, bool forceall)
pa = le32_to_cpu(di->rxd64[i].addrlow) - di->dataoffsetlow;
/* clear this packet from the descriptor ring */
- pci_unmap_single(di->pbus, pa, di->rxbufsize, PCI_DMA_FROMDEVICE);
+ dma_unmap_single(di->dmadev, pa, di->rxbufsize, DMA_FROM_DEVICE);
di->rxd64[i].addrlow = cpu_to_le32(0xdeadbeef);
di->rxd64[i].addrhigh = cpu_to_le32(0xdeadbeef);
@@ -901,7 +924,7 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
/*
* !! rx entry routine
- * returns a pointer to the next frame received, or NULL if there are no more
+ * returns the number packages in the next frame, or 0 if there are no more
* if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is
* supported with pkts chain
* otherwise, it's treated as giant pkt and will be tossed.
@@ -909,74 +932,83 @@ static struct sk_buff *_dma_getnextrxp(struct dma_info *di, bool forceall)
* buffer data. After it reaches the max size of buffer, the data continues
* in next DMA descriptor buffer WITHOUT DMA header
*/
-struct sk_buff *dma_rx(struct dma_pub *pub)
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list)
{
struct dma_info *di = (struct dma_info *)pub;
- struct sk_buff *p, *head, *tail;
+ struct sk_buff_head dma_frames;
+ struct sk_buff *p, *next;
uint len;
uint pkt_len;
int resid = 0;
+ int pktcnt = 1;
+ skb_queue_head_init(&dma_frames);
next_frame:
- head = _dma_getnextrxp(di, false);
- if (head == NULL)
- return NULL;
+ p = _dma_getnextrxp(di, false);
+ if (p == NULL)
+ return 0;
- len = le16_to_cpu(*(__le16 *) (head->data));
- DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
- dma_spin_for_len(len, head);
+ len = le16_to_cpu(*(__le16 *) (p->data));
+ DMA_TRACE("%s: dma_rx len %d\n", di->name, len);
+ dma_spin_for_len(len, p);
/* set actual length */
pkt_len = min((di->rxoffset + len), di->rxbufsize);
- __skb_trim(head, pkt_len);
+ __skb_trim(p, pkt_len);
+ skb_queue_tail(&dma_frames, p);
resid = len - (di->rxbufsize - di->rxoffset);
/* check for single or multi-buffer rx */
if (resid > 0) {
- tail = head;
while ((resid > 0) && (p = _dma_getnextrxp(di, false))) {
- tail->next = p;
pkt_len = min_t(uint, resid, di->rxbufsize);
__skb_trim(p, pkt_len);
-
- tail = p;
+ skb_queue_tail(&dma_frames, p);
resid -= di->rxbufsize;
+ pktcnt++;
}
#ifdef BCMDBG
if (resid > 0) {
uint cur;
cur =
- B2I(((R_REG(&di->d64rxregs->status0) &
- D64_RS0_CD_MASK) -
- di->rcvptrbase) & D64_RS0_CD_MASK,
- struct dma64desc);
- DMA_ERROR(("dma_rx, rxin %d rxout %d, hw_curr %d\n",
- di->rxin, di->rxout, cur));
+ B2I(((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_CD_MASK) - di->rcvptrbase) &
+ D64_RS0_CD_MASK, struct dma64desc);
+ DMA_ERROR("rxin %d rxout %d, hw_curr %d\n",
+ di->rxin, di->rxout, cur);
}
#endif /* BCMDBG */
if ((di->dma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) {
- DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n",
- di->name, len));
- brcmu_pkt_buf_free_skb(head);
+ DMA_ERROR("%s: bad frame length (%d)\n",
+ di->name, len);
+ skb_queue_walk_safe(&dma_frames, p, next) {
+ skb_unlink(p, &dma_frames);
+ brcmu_pkt_buf_free_skb(p);
+ }
di->dma.rxgiants++;
+ pktcnt = 1;
goto next_frame;
}
}
- return head;
+ skb_queue_splice_tail(&dma_frames, skb_list);
+ return pktcnt;
}
static bool dma64_rxidle(struct dma_info *di)
{
- DMA_TRACE(("%s: dma_rxidle\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->nrxd == 0)
return true;
- return ((R_REG(&di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
- (R_REG(&di->d64rxregs->ptr) & D64_RS0_CD_MASK));
+ return ((bcma_read32(di->core,
+ DMA64RXREGOFFS(di, status0)) & D64_RS0_CD_MASK) ==
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, ptr)) &
+ D64_RS0_CD_MASK));
}
/*
@@ -1010,7 +1042,7 @@ bool dma_rxfill(struct dma_pub *pub)
n = di->nrxpost - nrxdactive(di, rxin, rxout);
- DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+ DMA_TRACE("%s: post %d\n", di->name, n);
if (di->rxbufsize > BCMEXTRAHDROOM)
extra_offset = di->rxextrahdrroom;
@@ -1023,11 +1055,9 @@ bool dma_rxfill(struct dma_pub *pub)
p = brcmu_pkt_buf_get_skb(di->rxbufsize + extra_offset);
if (p == NULL) {
- DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n",
- di->name));
+ DMA_ERROR("%s: out of rxbufs\n", di->name);
if (i == 0 && dma64_rxidle(di)) {
- DMA_ERROR(("%s: rxfill64: ring is empty !\n",
- di->name));
+ DMA_ERROR("%s: ring is empty !\n", di->name);
ring_empty = true;
}
di->dma.rxnobuf++;
@@ -1042,8 +1072,8 @@ bool dma_rxfill(struct dma_pub *pub)
*/
*(u32 *) (p->data) = 0;
- pa = pci_map_single(di->pbus, p->data,
- di->rxbufsize, PCI_DMA_FROMDEVICE);
+ pa = dma_map_single(di->dmadev, p->data, di->rxbufsize,
+ DMA_FROM_DEVICE);
/* save the free packet pointer */
di->rxp[rxout] = p;
@@ -1061,7 +1091,7 @@ bool dma_rxfill(struct dma_pub *pub)
di->rxout = rxout;
/* update the chip lastdscr pointer */
- W_REG(&di->d64rxregs->ptr,
+ bcma_write32(di->core, DMA64RXREGOFFS(di, ptr),
di->rcvptrbase + I2B(rxout, struct dma64desc));
return ring_empty;
@@ -1072,7 +1102,7 @@ void dma_rxreclaim(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
while ((p = _dma_getnextrxp(di, true)))
brcmu_pkt_buf_free_skb(p);
@@ -1103,7 +1133,7 @@ void dma_txinit(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
u32 control = D64_XC_XE;
- DMA_TRACE(("%s: dma_txinit\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
@@ -1122,7 +1152,7 @@ void dma_txinit(struct dma_pub *pub)
if ((di->dma.dmactrlflags & DMA_CTRL_PEN) == 0)
control |= D64_XC_PD;
- OR_REG(&di->d64txregs->control, control);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), control);
/* DMA engine with alignment requirement requires table to be inited
* before enabling the engine
@@ -1135,24 +1165,24 @@ void dma_txsuspend(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
- OR_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_set32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
}
void dma_txresume(struct dma_pub *pub)
{
struct dma_info *di = (struct dma_info *)pub;
- DMA_TRACE(("%s: dma_txresume\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
if (di->ntxd == 0)
return;
- AND_REG(&di->d64txregs->control, ~D64_XC_SE);
+ bcma_mask32(di->core, DMA64TXREGOFFS(di, control), ~D64_XC_SE);
}
bool dma_txsuspended(struct dma_pub *pub)
@@ -1160,8 +1190,9 @@ bool dma_txsuspended(struct dma_pub *pub)
struct dma_info *di = (struct dma_info *)pub;
return (di->ntxd == 0) ||
- ((R_REG(&di->d64txregs->control) & D64_XC_SE) ==
- D64_XC_SE);
+ ((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, control)) & D64_XC_SE) ==
+ D64_XC_SE);
}
void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
@@ -1169,11 +1200,11 @@ void dma_txreclaim(struct dma_pub *pub, enum txd_range range)
struct dma_info *di = (struct dma_info *)pub;
struct sk_buff *p;
- DMA_TRACE(("%s: dma_txreclaim %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
+ DMA_TRACE("%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->txin == di->txout)
return;
@@ -1194,16 +1225,17 @@ bool dma_txreset(struct dma_pub *pub)
return true;
/* suspend tx DMA first */
- W_REG(&di->d64txregs->control, D64_XC_SE);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), D64_XC_SE);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED) && (status != D64_XS0_XS_IDLE)
- && (status != D64_XS0_XS_STOPPED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED) &&
+ (status != D64_XS0_XS_IDLE) && (status != D64_XS0_XS_STOPPED),
+ 10000);
- W_REG(&di->d64txregs->control, 0);
+ bcma_write32(di->core, DMA64TXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64txregs->status0) & D64_XS0_XS_MASK))
- != D64_XS0_XS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_XS_MASK)) != D64_XS0_XS_DISABLED), 10000);
/* wait for the last transaction to complete */
udelay(300);
@@ -1219,10 +1251,10 @@ bool dma_rxreset(struct dma_pub *pub)
if (di->nrxd == 0)
return true;
- W_REG(&di->d64rxregs->control, 0);
+ bcma_write32(di->core, DMA64RXREGOFFS(di, control), 0);
SPINWAIT(((status =
- (R_REG(&di->d64rxregs->status0) & D64_RS0_RS_MASK))
- != D64_RS0_RS_DISABLED), 10000);
+ (bcma_read32(di->core, DMA64RXREGOFFS(di, status0)) &
+ D64_RS0_RS_MASK)) != D64_RS0_RS_DISABLED), 10000);
return status == D64_RS0_RS_DISABLED;
}
@@ -1233,72 +1265,58 @@ bool dma_rxreset(struct dma_pub *pub)
* the error(toss frames) could be fatal and cause many subsequent hard
* to debug problems
*/
-int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
+int dma_txfast(struct dma_pub *pub, struct sk_buff *p, bool commit)
{
struct dma_info *di = (struct dma_info *)pub;
- struct sk_buff *p, *next;
unsigned char *data;
uint len;
u16 txout;
u32 flags = 0;
dma_addr_t pa;
- DMA_TRACE(("%s: dma_txfast\n", di->name));
+ DMA_TRACE("%s:\n", di->name);
txout = di->txout;
/*
- * Walk the chain of packet buffers
- * allocating and initializing transmit descriptor entries.
+ * obtain and initialize transmit descriptor entry.
*/
- for (p = p0; p; p = next) {
- data = p->data;
- len = p->len;
- next = p->next;
+ data = p->data;
+ len = p->len;
- /* return nonzero if out of tx descriptors */
- if (nexttxd(di, txout) == di->txin)
- goto outoftxd;
+ /* no use to transmit a zero length packet */
+ if (len == 0)
+ return 0;
- if (len == 0)
- continue;
+ /* return nonzero if out of tx descriptors */
+ if (nexttxd(di, txout) == di->txin)
+ goto outoftxd;
- /* get physical address of buffer start */
- pa = pci_map_single(di->pbus, data, len, PCI_DMA_TODEVICE);
+ /* get physical address of buffer start */
+ pa = dma_map_single(di->dmadev, data, len, DMA_TO_DEVICE);
- flags = 0;
- if (p == p0)
- flags |= D64_CTRL1_SOF;
-
- /* With a DMA segment list, Descriptor table is filled
- * using the segment list instead of looping over
- * buffers in multi-chain DMA. Therefore, EOF for SGLIST
- * is when end of segment list is reached.
- */
- if (next == NULL)
- flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
- if (txout == (di->ntxd - 1))
- flags |= D64_CTRL1_EOT;
-
- dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+ /* With a DMA segment list, Descriptor table is filled
+ * using the segment list instead of looping over
+ * buffers in multi-chain DMA. Therefore, EOF for SGLIST
+ * is when end of segment list is reached.
+ */
+ flags = D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF;
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
- txout = nexttxd(di, txout);
- }
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
- /* if last txd eof not set, fix it */
- if (!(flags & D64_CTRL1_EOF))
- di->txd64[prevtxd(di, txout)].ctrl1 =
- cpu_to_le32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF);
+ txout = nexttxd(di, txout);
/* save the packet */
- di->txp[prevtxd(di, txout)] = p0;
+ di->txp[prevtxd(di, txout)] = p;
/* bump the tx descriptor index */
di->txout = txout;
/* kick the chip */
if (commit)
- W_REG(&di->d64txregs->ptr,
+ bcma_write32(di->core, DMA64TXREGOFFS(di, ptr),
di->xmtptrbase + I2B(txout, struct dma64desc));
/* tx flow control */
@@ -1307,8 +1325,8 @@ int dma_txfast(struct dma_pub *pub, struct sk_buff *p0, bool commit)
return 0;
outoftxd:
- DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name));
- brcmu_pkt_buf_free_skb(p0);
+ DMA_ERROR("%s: out of txds !!!\n", di->name);
+ brcmu_pkt_buf_free_skb(p);
di->dma.txavail = 0;
di->dma.txnobuf++;
return -1;
@@ -1331,11 +1349,11 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
u16 active_desc;
struct sk_buff *txp;
- DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name,
- (range == DMA_RANGE_ALL) ? "all" :
- ((range ==
- DMA_RANGE_TRANSMITTED) ? "transmitted" :
- "transferred")));
+ DMA_TRACE("%s: %s\n",
+ di->name,
+ range == DMA_RANGE_ALL ? "all" :
+ range == DMA_RANGE_TRANSMITTED ? "transmitted" :
+ "transferred");
if (di->ntxd == 0)
return NULL;
@@ -1346,16 +1364,15 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
if (range == DMA_RANGE_ALL)
end = di->txout;
else {
- struct dma64regs __iomem *dregs = di->d64txregs;
-
- end = (u16) (B2I(((R_REG(&dregs->status0) &
- D64_XS0_CD_MASK) -
- di->xmtptrbase) & D64_XS0_CD_MASK,
- struct dma64desc));
+ end = (u16) (B2I(((bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status0)) &
+ D64_XS0_CD_MASK) - di->xmtptrbase) &
+ D64_XS0_CD_MASK, struct dma64desc));
if (range == DMA_RANGE_TRANSFERED) {
active_desc =
- (u16) (R_REG(&dregs->status1) &
+ (u16)(bcma_read32(di->core,
+ DMA64TXREGOFFS(di, status1)) &
D64_XS1_AD_MASK);
active_desc =
(active_desc - di->xmtptrbase) & D64_XS0_CD_MASK;
@@ -1384,7 +1401,7 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
txp = di->txp[i];
di->txp[i] = NULL;
- pci_unmap_single(di->pbus, pa, size, PCI_DMA_TODEVICE);
+ dma_unmap_single(di->dmadev, pa, size, DMA_TO_DEVICE);
}
di->txin = i;
@@ -1395,8 +1412,8 @@ struct sk_buff *dma_getnexttxp(struct dma_pub *pub, enum txd_range range)
return txp;
bogus:
- DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d "
- "force %d\n", start, end, di->txout, forceall));
+ DMA_NONE("bogus curr: start %d end %d txout %d\n",
+ start, end, di->txout);
return NULL;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/dma.h b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
index ebc5bc5..cc269ee 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/dma.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/dma.h
@@ -18,6 +18,7 @@
#define _BRCM_DMA_H_
#include <linux/delay.h>
+#include <linux/skbuff.h>
#include "types.h" /* forward structure declarations */
/* map/unmap direction */
@@ -74,13 +75,14 @@ struct dma_pub {
};
extern struct dma_pub *dma_attach(char *name, struct si_pub *sih,
- void __iomem *dmaregstx, void __iomem *dmaregsrx,
- uint ntxd, uint nrxd,
- uint rxbufsize, int rxextheadroom,
- uint nrxpost, uint rxoffset, uint *msg_level);
+ struct bcma_device *d11core,
+ uint txregbase, uint rxregbase,
+ uint ntxd, uint nrxd,
+ uint rxbufsize, int rxextheadroom,
+ uint nrxpost, uint rxoffset, uint *msg_level);
void dma_rxinit(struct dma_pub *pub);
-struct sk_buff *dma_rx(struct dma_pub *pub);
+int dma_rx(struct dma_pub *pub, struct sk_buff_head *skb_list);
bool dma_rxfill(struct dma_pub *pub);
bool dma_rxreset(struct dma_pub *pub);
bool dma_txreset(struct dma_pub *pub);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index 0d8a9cd..448ab9c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -17,11 +17,11 @@
#define __UNDEF_NO_VERSION__
#include <linux/etherdevice.h>
-#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/firmware.h>
#include <linux/interrupt.h>
#include <linux/module.h>
+#include <linux/bcma/bcma.h>
#include <net/mac80211.h>
#include <defs.h>
#include "nicpci.h"
@@ -40,10 +40,10 @@
#define MAC_FILTERS (FIF_PROMISC_IN_BSS | \
FIF_ALLMULTI | \
FIF_FCSFAIL | \
- FIF_PLCPFAIL | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
- FIF_BCN_PRBRESP_PROMISC)
+ FIF_BCN_PRBRESP_PROMISC | \
+ FIF_PSPOLL)
#define CHAN2GHZ(channel, freqency, chflags) { \
.band = IEEE80211_BAND_2GHZ, \
@@ -87,16 +87,14 @@ MODULE_DESCRIPTION("Broadcom 802.11n wireless LAN driver.");
MODULE_SUPPORTED_DEVICE("Broadcom 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
-/* recognized PCI IDs */
-static DEFINE_PCI_DEVICE_TABLE(brcms_pci_id_table) = {
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, /* 43225 2G */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, /* 43224 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, /* 4313 DUAL */
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, /* 43224 Ven */
- {0}
-};
-MODULE_DEVICE_TABLE(pci, brcms_pci_id_table);
+/* recognized BCMA Core IDs */
+static struct bcma_device_id brcms_coreid_table[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS),
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS),
+ BCMA_CORETABLE_END
+};
+MODULE_DEVICE_TABLE(bcma, brcms_coreid_table);
#ifdef BCMDBG
static int msglevel = 0xdeadbeef;
@@ -216,8 +214,7 @@ static const struct ieee80211_supported_band brcms_band_2GHz_nphy_template = {
.ht_cap = {
/* from include/linux/ieee80211.h */
.cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT,
+ IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -238,8 +235,7 @@ static const struct ieee80211_supported_band brcms_band_5GHz_nphy_template = {
BRCMS_LEGACY_5G_RATE_OFFSET,
.ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_40MHZ_INTOLERANT, /* No 40 mhz yet */
+ IEEE80211_HT_CAP_SGI_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = AMPDU_DEF_MPDU_DENSITY,
@@ -287,6 +283,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
{
struct brcms_info *wl = hw->priv;
bool blocked;
+ int err;
ieee80211_wake_queues(hw);
spin_lock_bh(&wl->lock);
@@ -295,57 +292,69 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- return 0;
+ spin_lock_bh(&wl->lock);
+ /* avoid acknowledging frames before a non-monitor device is added */
+ wl->mute_tx = true;
+
+ if (!wl->pub->up)
+ err = brcms_up(wl);
+ else
+ err = -ENODEV;
+ spin_unlock_bh(&wl->lock);
+
+ if (err != 0)
+ wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
+ err);
+ return err;
}
static void brcms_ops_stop(struct ieee80211_hw *hw)
{
+ struct brcms_info *wl = hw->priv;
+ int status;
+
ieee80211_stop_queues(hw);
+
+ if (wl->wlc == NULL)
+ return;
+
+ spin_lock_bh(&wl->lock);
+ status = brcms_c_chipmatch(wl->wlc->hw->vendorid,
+ wl->wlc->hw->deviceid);
+ spin_unlock_bh(&wl->lock);
+ if (!status) {
+ wiphy_err(wl->wiphy,
+ "wl: brcms_ops_stop: chipmatch failed\n");
+ return;
+ }
+
+ /* put driver in down state */
+ spin_lock_bh(&wl->lock);
+ brcms_down(wl);
+ spin_unlock_bh(&wl->lock);
}
static int
brcms_ops_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct brcms_info *wl;
- int err;
+ struct brcms_info *wl = hw->priv;
/* Just STA for now */
- if (vif->type != NL80211_IFTYPE_AP &&
- vif->type != NL80211_IFTYPE_MESH_POINT &&
- vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_WDS &&
- vif->type != NL80211_IFTYPE_ADHOC) {
+ if (vif->type != NL80211_IFTYPE_STATION) {
wiphy_err(hw->wiphy, "%s: Attempt to add type %d, only"
" STA for now\n", __func__, vif->type);
return -EOPNOTSUPP;
}
- wl = hw->priv;
- spin_lock_bh(&wl->lock);
- if (!wl->pub->up)
- err = brcms_up(wl);
- else
- err = -ENODEV;
- spin_unlock_bh(&wl->lock);
-
- if (err != 0)
- wiphy_err(hw->wiphy, "%s: brcms_up() returned %d\n", __func__,
- err);
+ wl->mute_tx = false;
+ brcms_c_mute(wl->wlc, false);
- return err;
+ return 0;
}
static void
brcms_ops_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
- struct brcms_info *wl;
-
- wl = hw->priv;
-
- /* put driver in down state */
- spin_lock_bh(&wl->lock);
- brcms_down(wl);
- spin_unlock_bh(&wl->lock);
}
static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
@@ -362,7 +371,7 @@ static int brcms_ops_config(struct ieee80211_hw *hw, u32 changed)
conf->listen_interval);
}
if (changed & IEEE80211_CONF_CHANGE_MONITOR)
- wiphy_err(wiphy, "%s: change monitor mode: %s (implement)\n",
+ wiphy_dbg(wiphy, "%s: change monitor mode: %s\n",
__func__, conf->flags & IEEE80211_CONF_MONITOR ?
"true" : "false");
if (changed & IEEE80211_CONF_CHANGE_PS)
@@ -539,29 +548,25 @@ brcms_ops_configure_filter(struct ieee80211_hw *hw,
changed_flags &= MAC_FILTERS;
*total_flags &= MAC_FILTERS;
+
if (changed_flags & FIF_PROMISC_IN_BSS)
- wiphy_err(wiphy, "FIF_PROMISC_IN_BSS\n");
+ wiphy_dbg(wiphy, "FIF_PROMISC_IN_BSS\n");
if (changed_flags & FIF_ALLMULTI)
- wiphy_err(wiphy, "FIF_ALLMULTI\n");
+ wiphy_dbg(wiphy, "FIF_ALLMULTI\n");
if (changed_flags & FIF_FCSFAIL)
- wiphy_err(wiphy, "FIF_FCSFAIL\n");
- if (changed_flags & FIF_PLCPFAIL)
- wiphy_err(wiphy, "FIF_PLCPFAIL\n");
+ wiphy_dbg(wiphy, "FIF_FCSFAIL\n");
if (changed_flags & FIF_CONTROL)
- wiphy_err(wiphy, "FIF_CONTROL\n");
+ wiphy_dbg(wiphy, "FIF_CONTROL\n");
if (changed_flags & FIF_OTHER_BSS)
- wiphy_err(wiphy, "FIF_OTHER_BSS\n");
- if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
- spin_lock_bh(&wl->lock);
- if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
- wl->pub->mac80211_state |= MAC80211_PROMISC_BCNS;
- brcms_c_mac_bcn_promisc_change(wl->wlc, 1);
- } else {
- brcms_c_mac_bcn_promisc_change(wl->wlc, 0);
- wl->pub->mac80211_state &= ~MAC80211_PROMISC_BCNS;
- }
- spin_unlock_bh(&wl->lock);
- }
+ wiphy_dbg(wiphy, "FIF_OTHER_BSS\n");
+ if (changed_flags & FIF_PSPOLL)
+ wiphy_dbg(wiphy, "FIF_PSPOLL\n");
+ if (changed_flags & FIF_BCN_PRBRESP_PROMISC)
+ wiphy_dbg(wiphy, "FIF_BCN_PRBRESP_PROMISC\n");
+
+ spin_lock_bh(&wl->lock);
+ brcms_c_mac_promisc(wl->wlc, *total_flags);
+ spin_unlock_bh(&wl->lock);
return;
}
@@ -609,13 +614,6 @@ brcms_ops_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
wl->pub->global_ampdu->scb = scb;
wl->pub->global_ampdu->max_pdu = 16;
- sta->ht_cap.ht_supported = true;
- sta->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- sta->ht_cap.ampdu_density = AMPDU_DEF_MPDU_DENSITY;
- sta->ht_cap.cap = IEEE80211_HT_CAP_GRN_FLD |
- IEEE80211_HT_CAP_SGI_20 |
- IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_40MHZ_INTOLERANT;
-
/*
* minstrel_ht initiates addBA on our behalf by calling
* ieee80211_start_tx_ba_session()
@@ -724,7 +722,7 @@ static const struct ieee80211_ops brcms_ops = {
};
/*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
static int brcms_set_hint(struct brcms_info *wl, char *abbrev)
{
@@ -864,56 +862,26 @@ static void brcms_free(struct brcms_info *wl)
#endif
kfree(t);
}
-
- /*
- * unregister_netdev() calls get_stats() which may read chip
- * registers so we cannot unmap the chip registers until
- * after calling unregister_netdev() .
- */
- if (wl->regsva)
- iounmap(wl->regsva);
-
- wl->regsva = NULL;
}
/*
-* called from both kernel as from this kernel module.
+* called from both kernel as from this kernel module (error flow on attach)
* precondition: perimeter lock is not acquired.
*/
-static void brcms_remove(struct pci_dev *pdev)
+static void brcms_remove(struct bcma_device *pdev)
{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- int status;
-
- hw = pci_get_drvdata(pdev);
- wl = hw->priv;
- if (!wl) {
- pr_err("wl: brcms_remove: pci_get_drvdata failed\n");
- return;
- }
+ struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+ struct brcms_info *wl = hw->priv;
- spin_lock_bh(&wl->lock);
- status = brcms_c_chipmatch(pdev->vendor, pdev->device);
- spin_unlock_bh(&wl->lock);
- if (!status) {
- wiphy_err(wl->wiphy, "wl: brcms_remove: chipmatch "
- "failed\n");
- return;
- }
if (wl->wlc) {
wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
ieee80211_unregister_hw(hw);
- spin_lock_bh(&wl->lock);
- brcms_down(wl);
- spin_unlock_bh(&wl->lock);
}
- pci_disable_device(pdev);
brcms_free(wl);
- pci_set_drvdata(pdev, NULL);
+ bcma_set_drvdata(pdev, NULL);
ieee80211_free_hw(hw);
}
@@ -1021,11 +989,9 @@ static int ieee_hw_init(struct ieee80211_hw *hw)
* it as static.
*
*
- * is called in brcms_pci_probe() context, therefore no locking required.
+ * is called in brcms_bcma_probe() context, therefore no locking required.
*/
-static struct brcms_info *brcms_attach(u16 vendor, u16 device,
- resource_size_t regs,
- struct pci_dev *btparam, uint irq)
+static struct brcms_info *brcms_attach(struct bcma_device *pdev)
{
struct brcms_info *wl = NULL;
int unit, err;
@@ -1039,7 +1005,7 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
return NULL;
/* allocate private info */
- hw = pci_get_drvdata(btparam); /* btparam == pdev */
+ hw = bcma_get_drvdata(pdev);
if (hw != NULL)
wl = hw->priv;
if (WARN_ON(hw == NULL) || WARN_ON(wl == NULL))
@@ -1051,26 +1017,20 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
/* setup the bottom half handler */
tasklet_init(&wl->tasklet, brcms_dpc, (unsigned long) wl);
- wl->regsva = ioremap_nocache(regs, PCI_BAR0_WINSZ);
- if (wl->regsva == NULL) {
- wiphy_err(wl->wiphy, "wl%d: ioremap() failed\n", unit);
- goto fail;
- }
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
/* prepare ucode */
- if (brcms_request_fw(wl, btparam) < 0) {
+ if (brcms_request_fw(wl, pdev->bus->host_pci) < 0) {
wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
"%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
brcms_release_fw(wl);
- brcms_remove(btparam);
+ brcms_remove(pdev);
return NULL;
}
/* common load-time initialization */
- wl->wlc = brcms_c_attach(wl, vendor, device, unit, false,
- wl->regsva, btparam, &err);
+ wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
brcms_release_fw(wl);
if (!wl->wlc) {
wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
@@ -1081,15 +1041,13 @@ static struct brcms_info *brcms_attach(u16 vendor, u16 device,
wl->pub->ieee_hw = hw;
- /* disable mpc */
- brcms_c_set_radio_mpc(wl->wlc, false);
-
/* register our interrupt handler */
- if (request_irq(irq, brcms_isr, IRQF_SHARED, KBUILD_MODNAME, wl)) {
+ if (request_irq(pdev->bus->host_pci->irq, brcms_isr,
+ IRQF_SHARED, KBUILD_MODNAME, wl)) {
wiphy_err(wl->wiphy, "wl%d: request_irq() failed\n", unit);
goto fail;
}
- wl->irq = irq;
+ wl->irq = pdev->bus->host_pci->irq;
/* register module */
brcms_c_module_register(wl->pub, "linux", wl, NULL);
@@ -1136,37 +1094,18 @@ fail:
*
* Perimeter lock is initialized in the course of this function.
*/
-static int __devinit
-brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
{
- int rc;
struct brcms_info *wl;
struct ieee80211_hw *hw;
- u32 val;
-
- dev_info(&pdev->dev, "bus %d slot %d func %d irq %d\n",
- pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn), pdev->irq);
- if ((pdev->vendor != PCI_VENDOR_ID_BROADCOM) ||
- ((pdev->device != 0x0576) &&
- ((pdev->device & 0xff00) != 0x4300) &&
- ((pdev->device & 0xff00) != 0x4700) &&
- ((pdev->device < 43000) || (pdev->device > 43999))))
- return -ENODEV;
+ dev_info(&pdev->dev, "mfg %x core %x rev %d class %d irq %d\n",
+ pdev->id.manuf, pdev->id.id, pdev->id.rev, pdev->id.class,
+ pdev->bus->host_pci->irq);
- rc = pci_enable_device(pdev);
- if (rc) {
- pr_err("%s: Cannot enable device %d-%d_%d\n",
- __func__, pdev->bus->number, PCI_SLOT(pdev->devfn),
- PCI_FUNC(pdev->devfn));
+ if ((pdev->id.manuf != BCMA_MANUF_BCM) ||
+ (pdev->id.id != BCMA_CORE_80211))
return -ENODEV;
- }
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
hw = ieee80211_alloc_hw(sizeof(struct brcms_info), &brcms_ops);
if (!hw) {
@@ -1176,14 +1115,11 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
+ bcma_set_drvdata(pdev, hw);
memset(hw->priv, 0, sizeof(*wl));
- wl = brcms_attach(pdev->vendor, pdev->device,
- pci_resource_start(pdev, 0), pdev,
- pdev->irq);
-
+ wl = brcms_attach(pdev);
if (!wl) {
pr_err("%s: %s: brcms_attach failed!\n", KBUILD_MODNAME,
__func__);
@@ -1192,16 +1128,16 @@ brcms_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
}
-static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
+static int brcms_suspend(struct bcma_device *pdev)
{
struct brcms_info *wl;
struct ieee80211_hw *hw;
- hw = pci_get_drvdata(pdev);
+ hw = bcma_get_drvdata(pdev);
wl = hw->priv;
if (!wl) {
wiphy_err(wl->wiphy,
- "brcms_suspend: pci_get_drvdata failed\n");
+ "brcms_suspend: bcma_get_drvdata failed\n");
return -ENODEV;
}
@@ -1210,60 +1146,28 @@ static int brcms_suspend(struct pci_dev *pdev, pm_message_t state)
wl->pub->hw_up = false;
spin_unlock_bh(&wl->lock);
- pci_save_state(pdev);
- pci_disable_device(pdev);
- return pci_set_power_state(pdev, PCI_D3hot);
+ pr_debug("brcms_suspend ok\n");
+
+ return 0;
}
-static int brcms_resume(struct pci_dev *pdev)
+static int brcms_resume(struct bcma_device *pdev)
{
- struct brcms_info *wl;
- struct ieee80211_hw *hw;
- int err = 0;
- u32 val;
-
- hw = pci_get_drvdata(pdev);
- wl = hw->priv;
- if (!wl) {
- wiphy_err(wl->wiphy,
- "wl: brcms_resume: pci_get_drvdata failed\n");
- return -ENODEV;
- }
-
- err = pci_set_power_state(pdev, PCI_D0);
- if (err)
- return err;
-
- pci_restore_state(pdev);
-
- err = pci_enable_device(pdev);
- if (err)
- return err;
-
- pci_set_master(pdev);
-
- pci_read_config_dword(pdev, 0x40, &val);
- if ((val & 0x0000ff00) != 0)
- pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
-
- /*
- * done. driver will be put in up state
- * in brcms_ops_add_interface() call.
- */
- return err;
+ pr_debug("brcms_resume ok\n");
+ return 0;
}
-static struct pci_driver brcms_pci_driver = {
+static struct bcma_driver brcms_bcma_driver = {
.name = KBUILD_MODNAME,
- .probe = brcms_pci_probe,
+ .probe = brcms_bcma_probe,
.suspend = brcms_suspend,
.resume = brcms_resume,
.remove = __devexit_p(brcms_remove),
- .id_table = brcms_pci_id_table,
+ .id_table = brcms_coreid_table,
};
/**
- * This is the main entry point for the WL driver.
+ * This is the main entry point for the brcmsmac driver.
*
* This function determines if a device pointed to by pdev is a WL device,
* and if so, performs a brcms_attach() on it.
@@ -1278,26 +1182,24 @@ static int __init brcms_module_init(void)
brcm_msg_level = msglevel;
#endif /* BCMDBG */
- error = pci_register_driver(&brcms_pci_driver);
+ error = bcma_driver_register(&brcms_bcma_driver);
+ printk(KERN_ERR "%s: register returned %d\n", __func__, error);
if (!error)
return 0;
-
-
return error;
}
/**
- * This function unloads the WL driver from the system.
+ * This function unloads the brcmsmac driver from the system.
*
- * This function unconditionally unloads the WL driver module from the
+ * This function unconditionally unloads the brcmsmac driver module from the
* system.
*
*/
static void __exit brcms_module_exit(void)
{
- pci_unregister_driver(&brcms_pci_driver);
-
+ bcma_driver_unregister(&brcms_bcma_driver);
}
module_init(brcms_module_init);
@@ -1319,8 +1221,7 @@ void brcms_init(struct brcms_info *wl)
{
BCMMSG(wl->pub->ieee_hw->wiphy, "wl%d\n", wl->pub->unit);
brcms_reset(wl);
-
- brcms_c_init(wl->wlc);
+ brcms_c_init(wl->wlc, wl->mute_tx);
}
/*
@@ -1332,11 +1233,19 @@ uint brcms_reset(struct brcms_info *wl)
brcms_c_reset(wl->wlc);
/* dpc will not be rescheduled */
- wl->resched = 0;
+ wl->resched = false;
return 0;
}
+void brcms_fatal_error(struct brcms_info *wl)
+{
+ wiphy_err(wl->wlc->wiphy, "wl%d: fatal error, reinitializing\n",
+ wl->wlc->pub->unit);
+ brcms_reset(wl);
+ ieee80211_restart_hw(wl->pub->ieee_hw);
+}
+
/*
* These are interrupt on/off entry points. Disable interrupts
* during interrupt state transition.
@@ -1561,11 +1470,10 @@ int brcms_ucode_init_buf(struct brcms_info *wl, void **pbuf, u32 idx)
if (le32_to_cpu(hdr->idx) == idx) {
pdata = wl->fw.fw_bin[i]->data +
le32_to_cpu(hdr->offset);
- *pbuf = kmalloc(len, GFP_ATOMIC);
+ *pbuf = kmemdup(pdata, len, GFP_ATOMIC);
if (*pbuf == NULL)
goto fail;
- memcpy(*pbuf, pdata, len);
return 0;
}
}
@@ -1578,7 +1486,7 @@ fail:
}
/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_ucode_init_uint(struct brcms_info *wl, size_t *n_bytes, u32 idx)
@@ -1618,7 +1526,7 @@ void brcms_ucode_free_buf(void *p)
/*
* checks validity of all firmware images loaded from user space
*
- * Precondition: Since this function is called in brcms_pci_probe() context,
+ * Precondition: Since this function is called in brcms_bcma_probe() context,
* no locking is required.
*/
int brcms_check_firmwares(struct brcms_info *wl)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
index 177f0e4..8f60419 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.h
@@ -68,8 +68,6 @@ struct brcms_info {
spinlock_t lock; /* per-device perimeter lock */
spinlock_t isr_lock; /* per-device ISR synchronization lock */
- /* regsva for unmap in brcms_free() */
- void __iomem *regsva; /* opaque chip registers virtual address */
/* timer related fields */
atomic_t callbacks; /* # outstanding callback functions */
@@ -80,6 +78,7 @@ struct brcms_info {
struct brcms_firmware fw;
struct wiphy *wiphy;
struct brcms_ucode ucode;
+ bool mute_tx;
};
/* misc callbacks */
@@ -104,5 +103,6 @@ extern bool brcms_del_timer(struct brcms_timer *timer);
extern void brcms_msleep(struct brcms_info *wl, uint ms);
extern void brcms_dpc(unsigned long data);
extern void brcms_timer(struct brcms_timer *t);
+extern void brcms_fatal_error(struct brcms_info *wl);
#endif /* _BRCM_MAC80211_IF_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 510e9bb..f6affc6 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -30,44 +30,21 @@
#include "mac80211_if.h"
#include "ucode_loader.h"
#include "main.h"
+#include "soc.h"
/*
* Indication for txflowcontrol that all priority bits in
* TXQ_STOP_FOR_PRIOFC_MASK are to be considered.
*/
-#define ALLPRIO -1
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+#define ALLPRIO -1
/* watchdog timer, in unit of ms */
-#define TIMER_INTERVAL_WATCHDOG 1000
+#define TIMER_INTERVAL_WATCHDOG 1000
/* radio monitor timer, in unit of ms */
-#define TIMER_INTERVAL_RADIOCHK 800
-
-/* Max MPC timeout, in unit of watchdog */
-#ifndef BRCMS_MPC_MAX_DELAYCNT
-#define BRCMS_MPC_MAX_DELAYCNT 10
-#endif
-
-/* Min MPC timeout, in unit of watchdog */
-#define BRCMS_MPC_MIN_DELAYCNT 1
-#define BRCMS_MPC_THRESHOLD 3 /* MPC count threshold level */
+#define TIMER_INTERVAL_RADIOCHK 800
/* beacon interval, in unit of 1024TU */
-#define BEACON_INTERVAL_DEFAULT 100
-/* DTIM interval, in unit of beacon interval */
-#define DTIM_INTERVAL_DEFAULT 3
-
-/* Scale down delays to accommodate QT slow speed */
-/* beacon interval, in unit of 1024TU */
-#define BEACON_INTERVAL_DEF_QT 20
-/* DTIM interval, in unit of beacon interval */
-#define DTIM_INTERVAL_DEF_QT 1
-
-#define TBTT_ALIGN_LEEWAY_US 100 /* min leeway before first TBTT in us */
+#define BEACON_INTERVAL_DEFAULT 100
/* n-mode support capability */
/* 2x2 includes both 1x1 & 2x2 devices
@@ -78,113 +55,71 @@
#define WL_11N_3x3 3
#define WL_11N_4x4 4
-/* define 11n feature disable flags */
-#define WLFEATURE_DISABLE_11N 0x00000001
-#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002
-#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004
-#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008
-#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010
-#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020
-#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040
-#define WLFEATURE_DISABLE_11N_GF 0x00000080
-
-#define EDCF_ACI_MASK 0x60
-#define EDCF_ACI_SHIFT 5
-#define EDCF_ECWMIN_MASK 0x0f
-#define EDCF_ECWMAX_SHIFT 4
-#define EDCF_AIFSN_MASK 0x0f
-#define EDCF_AIFSN_MAX 15
-#define EDCF_ECWMAX_MASK 0xf0
-
-#define EDCF_AC_BE_TXOP_STA 0x0000
-#define EDCF_AC_BK_TXOP_STA 0x0000
-#define EDCF_AC_VO_ACI_STA 0x62
-#define EDCF_AC_VO_ECW_STA 0x32
-#define EDCF_AC_VI_ACI_STA 0x42
-#define EDCF_AC_VI_ECW_STA 0x43
-#define EDCF_AC_BK_ECW_STA 0xA4
-#define EDCF_AC_VI_TXOP_STA 0x005e
-#define EDCF_AC_VO_TXOP_STA 0x002f
-#define EDCF_AC_BE_ACI_STA 0x03
-#define EDCF_AC_BE_ECW_STA 0xA4
-#define EDCF_AC_BK_ACI_STA 0x27
-#define EDCF_AC_VO_TXOP_AP 0x002f
-
-#define EDCF_TXOP2USEC(txop) ((txop) << 5)
-#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
-
-#define APHY_SYMBOL_TIME 4
-#define APHY_PREAMBLE_TIME 16
-#define APHY_SIGNAL_TIME 4
-#define APHY_SIFS_TIME 16
-#define APHY_SERVICE_NBITS 16
-#define APHY_TAIL_NBITS 6
-#define BPHY_SIFS_TIME 10
-#define BPHY_PLCP_SHORT_TIME 96
-
-#define PREN_PREAMBLE 24
-#define PREN_MM_EXT 12
-#define PREN_PREAMBLE_EXT 4
+#define EDCF_ACI_MASK 0x60
+#define EDCF_ACI_SHIFT 5
+#define EDCF_ECWMIN_MASK 0x0f
+#define EDCF_ECWMAX_SHIFT 4
+#define EDCF_AIFSN_MASK 0x0f
+#define EDCF_AIFSN_MAX 15
+#define EDCF_ECWMAX_MASK 0xf0
+
+#define EDCF_AC_BE_TXOP_STA 0x0000
+#define EDCF_AC_BK_TXOP_STA 0x0000
+#define EDCF_AC_VO_ACI_STA 0x62
+#define EDCF_AC_VO_ECW_STA 0x32
+#define EDCF_AC_VI_ACI_STA 0x42
+#define EDCF_AC_VI_ECW_STA 0x43
+#define EDCF_AC_BK_ECW_STA 0xA4
+#define EDCF_AC_VI_TXOP_STA 0x005e
+#define EDCF_AC_VO_TXOP_STA 0x002f
+#define EDCF_AC_BE_ACI_STA 0x03
+#define EDCF_AC_BE_ECW_STA 0xA4
+#define EDCF_AC_BK_ACI_STA 0x27
+#define EDCF_AC_VO_TXOP_AP 0x002f
+
+#define EDCF_TXOP2USEC(txop) ((txop) << 5)
+#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1)
+
+#define APHY_SYMBOL_TIME 4
+#define APHY_PREAMBLE_TIME 16
+#define APHY_SIGNAL_TIME 4
+#define APHY_SIFS_TIME 16
+#define APHY_SERVICE_NBITS 16
+#define APHY_TAIL_NBITS 6
+#define BPHY_SIFS_TIME 10
+#define BPHY_PLCP_SHORT_TIME 96
+
+#define PREN_PREAMBLE 24
+#define PREN_MM_EXT 12
+#define PREN_PREAMBLE_EXT 4
#define DOT11_MAC_HDR_LEN 24
-#define DOT11_ACK_LEN 10
-#define DOT11_BA_LEN 4
+#define DOT11_ACK_LEN 10
+#define DOT11_BA_LEN 4
#define DOT11_OFDM_SIGNAL_EXTENSION 6
#define DOT11_MIN_FRAG_LEN 256
-#define DOT11_RTS_LEN 16
-#define DOT11_CTS_LEN 10
+#define DOT11_RTS_LEN 16
+#define DOT11_CTS_LEN 10
#define DOT11_BA_BITMAP_LEN 128
#define DOT11_MIN_BEACON_PERIOD 1
#define DOT11_MAX_BEACON_PERIOD 0xFFFF
-#define DOT11_MAXNUMFRAGS 16
+#define DOT11_MAXNUMFRAGS 16
#define DOT11_MAX_FRAG_LEN 2346
-#define BPHY_PLCP_TIME 192
-#define RIFS_11N_TIME 2
-
-#define WME_VER 1
-#define WME_SUBTYPE_PARAM_IE 1
-#define WME_TYPE 2
-#define WME_OUI "\x00\x50\xf2"
+#define BPHY_PLCP_TIME 192
+#define RIFS_11N_TIME 2
-#define AC_BE 0
-#define AC_BK 1
-#define AC_VI 2
-#define AC_VO 3
-
-#define BCN_TMPL_LEN 512 /* length of the BCN template area */
+/* length of the BCN template area */
+#define BCN_TMPL_LEN 512
/* brcms_bss_info flag bit values */
-#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-
-/* Flags used in brcms_c_txq_info.stopped */
-/* per prio flow control bits */
-#define TXQ_STOP_FOR_PRIOFC_MASK 0x000000FF
-/* stop txq enqueue for packet drain */
-#define TXQ_STOP_FOR_PKT_DRAIN 0x00000100
-/* stop txq enqueue for ampdu flow control */
-#define TXQ_STOP_FOR_AMPDU_FLOW_CNTRL 0x00000200
-
-#define BRCMS_HWRXOFF 38 /* chip rx buffer offset */
+#define BRCMS_BSS_HT 0x0020 /* BSS is HT (MIMO) capable */
-/* Find basic rate for a given rate */
-static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
-{
- if (is_mcs_rate(rspec))
- return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
- .leg_ofdm];
- return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
-}
-
-static u16 frametype(u32 rspec, u8 mimoframe)
-{
- if (is_mcs_rate(rspec))
- return mimoframe;
- return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
-}
+/* chip rx buffer offset */
+#define BRCMS_HWRXOFF 38
/* rfdisable delay timer 500 ms, runs of ALP clock */
-#define RFDISABLE_DEFAULT 10000000
+#define RFDISABLE_DEFAULT 10000000
#define BRCMS_TEMPSENSE_PERIOD 10 /* 10 second timeout */
@@ -194,87 +129,83 @@ static u16 frametype(u32 rspec, u8 mimoframe)
* These constants are used ONLY by wlc_prio2prec_map. Do not use them
* elsewhere.
*/
-#define _BRCMS_PREC_NONE 0 /* None = - */
-#define _BRCMS_PREC_BK 2 /* BK - Background */
-#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
-#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
-#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
-#define _BRCMS_PREC_VI 10 /* Vi - Video */
-#define _BRCMS_PREC_VO 12 /* Vo - Voice */
-#define _BRCMS_PREC_NC 14 /* NC - Network Control */
-
-/* The BSS is generating beacons in HW */
-#define BRCMS_BSSCFG_HW_BCN 0x20
-
-#define SYNTHPU_DLY_APHY_US 3700 /* a phy synthpu_dly time in us */
-#define SYNTHPU_DLY_BPHY_US 1050 /* b/g phy synthpu_dly time in us */
-#define SYNTHPU_DLY_NPHY_US 2048 /* n phy REV3 synthpu_dly time in us */
-#define SYNTHPU_DLY_LPPHY_US 300 /* lpphy synthpu_dly time in us */
-
-#define SYNTHPU_DLY_PHY_US_QT 100 /* QT synthpu_dly time in us */
-
-#define ANTCNT 10 /* vanilla M_MAX_ANTCNT value */
+#define _BRCMS_PREC_NONE 0 /* None = - */
+#define _BRCMS_PREC_BK 2 /* BK - Background */
+#define _BRCMS_PREC_BE 4 /* BE - Best-effort */
+#define _BRCMS_PREC_EE 6 /* EE - Excellent-effort */
+#define _BRCMS_PREC_CL 8 /* CL - Controlled Load */
+#define _BRCMS_PREC_VI 10 /* Vi - Video */
+#define _BRCMS_PREC_VO 12 /* Vo - Voice */
+#define _BRCMS_PREC_NC 14 /* NC - Network Control */
+
+/* synthpu_dly times in us */
+#define SYNTHPU_DLY_APHY_US 3700
+#define SYNTHPU_DLY_BPHY_US 1050
+#define SYNTHPU_DLY_NPHY_US 2048
+#define SYNTHPU_DLY_LPPHY_US 300
+
+#define ANTCNT 10 /* vanilla M_MAX_ANTCNT val */
/* Per-AC retry limit register definitions; uses defs.h bitfield macros */
-#define EDCF_SHORT_S 0
-#define EDCF_SFB_S 4
-#define EDCF_LONG_S 8
-#define EDCF_LFB_S 12
-#define EDCF_SHORT_M BITFIELD_MASK(4)
-#define EDCF_SFB_M BITFIELD_MASK(4)
-#define EDCF_LONG_M BITFIELD_MASK(4)
-#define EDCF_LFB_M BITFIELD_MASK(4)
+#define EDCF_SHORT_S 0
+#define EDCF_SFB_S 4
+#define EDCF_LONG_S 8
+#define EDCF_LFB_S 12
+#define EDCF_SHORT_M BITFIELD_MASK(4)
+#define EDCF_SFB_M BITFIELD_MASK(4)
+#define EDCF_LONG_M BITFIELD_MASK(4)
+#define EDCF_LFB_M BITFIELD_MASK(4)
-#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
-#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
-#define RETRY_LONG_DEF 4 /* Default Long retry count */
-#define RETRY_SHORT_FB 3 /* Short count for fallback rate */
-#define RETRY_LONG_FB 2 /* Long count for fallback rate */
+#define RETRY_SHORT_DEF 7 /* Default Short retry Limit */
+#define RETRY_SHORT_MAX 255 /* Maximum Short retry Limit */
+#define RETRY_LONG_DEF 4 /* Default Long retry count */
+#define RETRY_SHORT_FB 3 /* Short count for fb rate */
+#define RETRY_LONG_FB 2 /* Long count for fb rate */
-#define APHY_CWMIN 15
-#define PHY_CWMAX 1023
+#define APHY_CWMIN 15
+#define PHY_CWMAX 1023
-#define EDCF_AIFSN_MIN 1
+#define EDCF_AIFSN_MIN 1
-#define FRAGNUM_MASK 0xF
+#define FRAGNUM_MASK 0xF
-#define APHY_SLOT_TIME 9
-#define BPHY_SLOT_TIME 20
+#define APHY_SLOT_TIME 9
+#define BPHY_SLOT_TIME 20
-#define WL_SPURAVOID_OFF 0
-#define WL_SPURAVOID_ON1 1
-#define WL_SPURAVOID_ON2 2
+#define WL_SPURAVOID_OFF 0
+#define WL_SPURAVOID_ON1 1
+#define WL_SPURAVOID_ON2 2
/* invalid core flags, use the saved coreflags */
-#define BRCMS_USE_COREFLAGS 0xffffffff
+#define BRCMS_USE_COREFLAGS 0xffffffff
/* values for PLCPHdr_override */
-#define BRCMS_PLCP_AUTO -1
-#define BRCMS_PLCP_SHORT 0
-#define BRCMS_PLCP_LONG 1
+#define BRCMS_PLCP_AUTO -1
+#define BRCMS_PLCP_SHORT 0
+#define BRCMS_PLCP_LONG 1
/* values for g_protection_override and n_protection_override */
#define BRCMS_PROTECTION_AUTO -1
#define BRCMS_PROTECTION_OFF 0
#define BRCMS_PROTECTION_ON 1
#define BRCMS_PROTECTION_MMHDR_ONLY 2
-#define BRCMS_PROTECTION_CTS_ONLY 3
+#define BRCMS_PROTECTION_CTS_ONLY 3
/* values for g_protection_control and n_protection_control */
-#define BRCMS_PROTECTION_CTL_OFF 0
+#define BRCMS_PROTECTION_CTL_OFF 0
#define BRCMS_PROTECTION_CTL_LOCAL 1
#define BRCMS_PROTECTION_CTL_OVERLAP 2
/* values for n_protection */
#define BRCMS_N_PROTECTION_OFF 0
#define BRCMS_N_PROTECTION_OPTIONAL 1
-#define BRCMS_N_PROTECTION_20IN40 2
+#define BRCMS_N_PROTECTION_20IN40 2
#define BRCMS_N_PROTECTION_MIXEDMODE 3
/* values for band specific 40MHz capabilities */
-#define BRCMS_N_BW_20ALL 0
-#define BRCMS_N_BW_40ALL 1
-#define BRCMS_N_BW_20IN2G_40IN5G 2
+#define BRCMS_N_BW_20ALL 0
+#define BRCMS_N_BW_40ALL 1
+#define BRCMS_N_BW_20IN2G_40IN5G 2
/* bitflags for SGI support (sgi_rx iovar) */
#define BRCMS_N_SGI_20 0x01
@@ -282,48 +213,42 @@ static u16 frametype(u32 rspec, u8 mimoframe)
/* defines used by the nrate iovar */
/* MSC in use,indicates b0-6 holds an mcs */
-#define NRATE_MCS_INUSE 0x00000080
+#define NRATE_MCS_INUSE 0x00000080
/* rate/mcs value */
-#define NRATE_RATE_MASK 0x0000007f
+#define NRATE_RATE_MASK 0x0000007f
/* stf mode mask: siso, cdd, stbc, sdm */
-#define NRATE_STF_MASK 0x0000ff00
+#define NRATE_STF_MASK 0x0000ff00
/* stf mode shift */
-#define NRATE_STF_SHIFT 8
-/* bit indicates override both rate & mode */
-#define NRATE_OVERRIDE 0x80000000
+#define NRATE_STF_SHIFT 8
/* bit indicate to override mcs only */
-#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
-#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
-#define NRATE_SGI_SHIFT 23 /* sgi mode */
-#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */
-#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
+#define NRATE_OVERRIDE_MCS_ONLY 0x40000000
+#define NRATE_SGI_MASK 0x00800000 /* sgi mode */
+#define NRATE_SGI_SHIFT 23 /* sgi mode */
+#define NRATE_LDPC_CODING 0x00400000 /* adv coding in use */
+#define NRATE_LDPC_SHIFT 22 /* ldpc shift */
-#define NRATE_STF_SISO 0 /* stf mode SISO */
-#define NRATE_STF_CDD 1 /* stf mode CDD */
-#define NRATE_STF_STBC 2 /* stf mode STBC */
-#define NRATE_STF_SDM 3 /* stf mode SDM */
+#define NRATE_STF_SISO 0 /* stf mode SISO */
+#define NRATE_STF_CDD 1 /* stf mode CDD */
+#define NRATE_STF_STBC 2 /* stf mode STBC */
+#define NRATE_STF_SDM 3 /* stf mode SDM */
-#define MAX_DMA_SEGS 4
+#define MAX_DMA_SEGS 4
/* Max # of entries in Tx FIFO based on 4kb page size */
-#define NTXD 256
+#define NTXD 256
/* Max # of entries in Rx FIFO based on 4kb page size */
-#define NRXD 256
+#define NRXD 256
/* try to keep this # rbufs posted to the chip */
-#define NRXBUFPOST 32
+#define NRXBUFPOST 32
/* data msg txq hiwat mark */
-#define BRCMS_DATAHIWAT 50
+#define BRCMS_DATAHIWAT 50
-/* bounded rx loops */
-#define RXBND 8 /* max # frames to process in brcms_c_recv() */
-#define TXSBND 8 /* max # tx status to process in wlc_txstatus() */
-
-/*
- * 32 SSID chars, max of 4 chars for each SSID char "\xFF", plus NULL.
- */
-#define SSID_FMT_BUF_LEN ((4 * IEEE80211_MAX_SSID_LEN) + 1)
+/* max # frames to process in brcms_c_recv() */
+#define RXBND 8
+/* max # tx status to process in wlc_txstatus() */
+#define TXSBND 8
/* brcmu_format_flags() bit description structure */
struct brcms_c_bit_desc {
@@ -375,10 +300,22 @@ uint brcm_msg_level =
#endif /* BCMDBG */
/* TX FIFO number to WME/802.1E Access Category */
-static const u8 wme_fifo2ac[] = { AC_BK, AC_BE, AC_VI, AC_VO, AC_BE, AC_BE };
+static const u8 wme_fifo2ac[] = {
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BE
+};
-/* WME/802.1E Access Category to TX FIFO number */
-static const u8 wme_ac2fifo[] = { 1, 0, 2, 3 };
+/* ieee80211 Access Category to TX FIFO number */
+static const u8 wme_ac2fifo[] = {
+ TX_AC_VO_FIFO,
+ TX_AC_VI_FIFO,
+ TX_AC_BE_FIFO,
+ TX_AC_BK_FIFO
+};
/* 802.1D Priority to precedence queue mapping */
const u8 wlc_prio2prec_map[] = {
@@ -405,13 +342,6 @@ static const u16 xmtfifo_sz[][NFIFO] = {
{9, 58, 22, 14, 14, 5},
};
-static const u8 acbitmap2maxprio[] = {
- PRIO_8021D_BE, PRIO_8021D_BE, PRIO_8021D_BK, PRIO_8021D_BK,
- PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI, PRIO_8021D_VI,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO,
- PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO, PRIO_8021D_VO
-};
-
#ifdef BCMDBG
static const char * const fifo_names[] = {
"AC_BK", "AC_BE", "AC_VI", "AC_VO", "BCMC", "ATIM" };
@@ -424,6 +354,22 @@ static const char fifo_names[6][0];
static struct brcms_c_info *wlc_info_dbg = (struct brcms_c_info *) (NULL);
#endif
+/* Find basic rate for a given rate */
+static u8 brcms_basic_rate(struct brcms_c_info *wlc, u32 rspec)
+{
+ if (is_mcs_rate(rspec))
+ return wlc->band->basic_rate[mcs_table[rspec & RSPEC_RATE_MASK]
+ .leg_ofdm];
+ return wlc->band->basic_rate[rspec & RSPEC_RATE_MASK];
+}
+
+static u16 frametype(u32 rspec, u8 mimoframe)
+{
+ if (is_mcs_rate(rspec))
+ return mimoframe;
+ return is_cck_rate(rspec) ? FT_CCK : FT_OFDM;
+}
+
/* currently the best mechanism for determining SIFS is the band in use */
static u16 get_sifs(struct brcms_band *band)
{
@@ -442,10 +388,13 @@ static u16 get_sifs(struct brcms_band *band)
*/
static bool brcms_deviceremoved(struct brcms_c_info *wlc)
{
+ u32 macctrl;
+
if (!wlc->hw->clk)
return ai_deviceremoved(wlc->hw->sih);
- return (R_REG(&wlc->hw->regs->maccontrol) &
- (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
+ macctrl = bcma_read32(wlc->hw->d11core,
+ D11REGOFFS(maccontrol));
+ return (macctrl & (MCTL_PSM_JMP_0 | MCTL_IHR_EN)) != MCTL_IHR_EN;
}
/* sum the individual fifo tx pending packet counts */
@@ -470,20 +419,6 @@ static int brcms_chspec_bw(u16 chanspec)
return BRCMS_10_MHZ;
}
-/*
- * return true if Minimum Power Consumption should
- * be entered, false otherwise
- */
-static bool brcms_c_is_non_delay_mpc(struct brcms_c_info *wlc)
-{
- return false;
-}
-
-static bool brcms_c_ismpc(struct brcms_c_info *wlc)
-{
- return (wlc->mpc_delay_off == 0) && (brcms_c_is_non_delay_mpc(wlc));
-}
-
static void brcms_c_bsscfg_mfree(struct brcms_bss_cfg *cfg)
{
if (cfg == NULL)
@@ -650,17 +585,15 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid)
static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
bool shortslot)
{
- struct d11regs __iomem *regs;
-
- regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
if (shortslot) {
/* 11g short slot: 11a timing */
- W_REG(&regs->ifs_slot, 0x0207); /* APHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0207);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, APHY_SLOT_TIME);
} else {
/* 11g long slot: 11b timing */
- W_REG(&regs->ifs_slot, 0x0212); /* BPHY_SLOT_TIME */
+ bcma_write16(core, D11REGOFFS(ifs_slot), 0x0212);
brcms_b_write_shm(wlc_hw, M_DOT11_SLOT, BPHY_SLOT_TIME);
}
}
@@ -669,9 +602,8 @@ static void brcms_b_update_slot_timing(struct brcms_hardware *wlc_hw,
* calculate frame duration of a given rate and length, return
* time in usec unit
*/
-uint
-brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
- u8 preamble_type, uint mac_len)
+static uint brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
+ u8 preamble_type, uint mac_len)
{
uint nsyms, dur = 0, Ndps, kNdps;
uint rate = rspec2rate(ratespec);
@@ -741,24 +673,22 @@ brcms_c_calc_frame_time(struct brcms_c_info *wlc, u32 ratespec,
static void brcms_c_write_inits(struct brcms_hardware *wlc_hw,
const struct d11init *inits)
{
+ struct bcma_device *core = wlc_hw->d11core;
int i;
- u8 __iomem *base;
- u8 __iomem *addr;
+ uint offset;
u16 size;
u32 value;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- base = (u8 __iomem *)wlc_hw->regs;
-
for (i = 0; inits[i].addr != cpu_to_le16(0xffff); i++) {
size = le16_to_cpu(inits[i].size);
- addr = base + le16_to_cpu(inits[i].addr);
+ offset = le16_to_cpu(inits[i].addr);
value = le32_to_cpu(inits[i].value);
if (size == 2)
- W_REG((u16 __iomem *)addr, value);
+ bcma_write16(core, offset, value);
else if (size == 4)
- W_REG((u32 __iomem *)addr, value);
+ bcma_write32(core, offset, value);
else
break;
}
@@ -808,6 +738,14 @@ static void brcms_c_ucode_bsinit(struct brcms_hardware *wlc_hw)
}
}
+static void brcms_b_core_ioctl(struct brcms_hardware *wlc_hw, u32 m, u32 v)
+{
+ struct bcma_device *core = wlc_hw->d11core;
+ u32 ioctl = bcma_aread32(core, BCMA_IOCTL) & ~m;
+
+ bcma_awrite32(core, BCMA_IOCTL, ioctl | v);
+}
+
static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: clk %d\n", wlc_hw->unit, clk);
@@ -816,17 +754,17 @@ static void brcms_b_core_phy_clk(struct brcms_hardware *wlc_hw, bool clk)
if (OFF == clk) { /* clear gmode bit, put phy into reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC | SICF_GMODE),
- (SICF_PRST | SICF_FGC));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC | SICF_GMODE),
+ (SICF_PRST | SICF_FGC));
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_PRST);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_PRST);
udelay(1);
} else { /* take phy out of reset */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_FGC), SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_FGC), SICF_FGC);
udelay(1);
- ai_core_cflags(wlc_hw->sih, (SICF_FGC), 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
udelay(1);
}
@@ -847,9 +785,14 @@ static void brcms_c_setxband(struct brcms_hardware *wlc_hw, uint bandunit)
wlc_hw->wlc->band = wlc_hw->wlc->bandstate[bandunit];
/* set gmode core flag */
- if (wlc_hw->sbclk && !wlc_hw->noreset)
- ai_core_cflags(wlc_hw->sih, SICF_GMODE,
- ((bandunit == 0) ? SICF_GMODE : 0));
+ if (wlc_hw->sbclk && !wlc_hw->noreset) {
+ u32 gmode = 0;
+
+ if (bandunit == 0)
+ gmode = SICF_GMODE;
+
+ brcms_b_core_ioctl(wlc_hw, SICF_GMODE, gmode);
+ }
}
/* switch to new band but leave it inactive */
@@ -857,10 +800,12 @@ static u32 brcms_c_setband_inact(struct brcms_c_info *wlc, uint bandunit)
{
struct brcms_hardware *wlc_hw = wlc->hw;
u32 macintmask;
+ u32 macctrl;
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
-
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ macctrl = bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol));
+ WARN_ON((macctrl & MCTL_EN_MAC) != 0);
/* disable interrupts */
macintmask = brcms_intrsoff(wlc->wl);
@@ -969,7 +914,7 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
lfbl, /* Long Frame Rate Fallback Limit */
fbl;
- if (queue < AC_COUNT) {
+ if (queue < IEEE80211_NUM_ACS) {
sfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
EDCF_SFB);
lfbl = GFIELD(wlc->wme_retries[wme_fifo2ac[queue]],
@@ -1018,14 +963,12 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs)
tx_info->flags |= IEEE80211_TX_STAT_ACK;
}
- totlen = brcmu_pkttotlen(p);
+ totlen = p->len;
free_pdu = true;
brcms_c_txfifo_complete(wlc, queue, 1);
if (lastframe) {
- p->next = NULL;
- p->prev = NULL;
/* remove PLCP & Broadcom tx descriptor header */
skb_pull(p, D11_PHY_HDR_LEN);
skb_pull(p, D11_TXH_LEN);
@@ -1053,7 +996,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
{
bool morepending = false;
struct brcms_c_info *wlc = wlc_hw->wlc;
- struct d11regs __iomem *regs;
+ struct bcma_device *core;
struct tx_status txstatus, *txs;
u32 s1, s2;
uint n = 0;
@@ -1066,18 +1009,18 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
txs = &txstatus;
- regs = wlc_hw->regs;
+ core = wlc_hw->d11core;
*fatal = false;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
while (!(*fatal)
- && (s1 = R_REG(&regs->frmtxstatus)) & TXS_V) {
+ && (s1 & TXS_V)) {
if (s1 == 0xffffffff) {
wiphy_err(wlc->wiphy, "wl%d: %s: dead chip\n",
wlc_hw->unit, __func__);
return morepending;
}
-
- s2 = R_REG(&regs->frmtxstatus2);
+ s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2));
txs->status = s1 & TXS_STATUS_MASK;
txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT;
@@ -1090,6 +1033,7 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal)
/* !give others some time to run! */
if (++n >= max_tx_num)
break;
+ s1 = bcma_read32(core, D11REGOFFS(frmtxstatus));
}
if (*fatal)
@@ -1134,12 +1078,12 @@ brcms_c_mhfdef(struct brcms_c_info *wlc, u16 *mhfs, u16 mhf2_init)
}
}
-static struct dma64regs __iomem *
-dmareg(struct brcms_hardware *hw, uint direction, uint fifonum)
+static uint
+dmareg(uint direction, uint fifonum)
{
if (direction == DMA_TX)
- return &(hw->regs->fifo64regs[fifonum].dmaxmt);
- return &(hw->regs->fifo64regs[fifonum].dmarcv);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmaxmt);
+ return offsetof(struct d11regs, fifo64regs[fifonum].dmarcv);
}
static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
@@ -1165,9 +1109,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_BK_FIFO (TX AC Background data packets)
* RX: RX_FIFO (RX data packets)
*/
- wlc_hw->di[0] = dma_attach(name, wlc_hw->sih,
- (wme ? dmareg(wlc_hw, DMA_TX, 0) :
- NULL), dmareg(wlc_hw, DMA_RX, 0),
+ wlc_hw->di[0] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ (wme ? dmareg(DMA_TX, 0) : 0),
+ dmareg(DMA_RX, 0),
(wme ? NTXD : 0), NRXD,
RXBUFSZ, -1, NRXBUFPOST,
BRCMS_HWRXOFF, &brcm_msg_level);
@@ -1179,8 +1123,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* (legacy) TX_DATA_FIFO (TX data packets)
* RX: UNUSED
*/
- wlc_hw->di[1] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 1), NULL,
+ wlc_hw->di[1] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 1), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[1]);
@@ -1190,8 +1134,8 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VI_FIFO (TX AC Video data packets)
* RX: UNUSED
*/
- wlc_hw->di[2] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 2), NULL,
+ wlc_hw->di[2] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 2), 0,
NTXD, 0, 0, -1, 0, 0,
&brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[2]);
@@ -1200,9 +1144,9 @@ static bool brcms_b_attach_dmapio(struct brcms_c_info *wlc, uint j, bool wme)
* TX: TX_AC_VO_FIFO (TX AC Voice data packets)
* (legacy) TX_CTL_FIFO (TX control & mgmt packets)
*/
- wlc_hw->di[3] = dma_attach(name, wlc_hw->sih,
- dmareg(wlc_hw, DMA_TX, 3),
- NULL, NTXD, 0, 0, -1,
+ wlc_hw->di[3] = dma_attach(name, wlc_hw->sih, wlc_hw->d11core,
+ dmareg(DMA_TX, 3),
+ 0, NTXD, 0, 0, -1,
0, 0, &brcm_msg_level);
dma_attach_err |= (NULL == wlc_hw->di[3]);
/* Cleaner to leave this as if with AP defined */
@@ -1276,7 +1220,7 @@ static void brcms_b_wait_for_wake(struct brcms_hardware *wlc_hw)
/* control chip clock to save power, enable dynamic clock or force fast clock */
static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
{
- if (wlc_hw->sih->cccaps & CC_CAP_PMU) {
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU) {
/* new chips with PMU, CCS_FORCEHT will distribute the HT clock
* on backplane, but mac core will still run on ALP(not HT) when
* it enters powersave mode, which means the FCA bit may not be
@@ -1285,29 +1229,33 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
if (wlc_hw->clk) {
if (mode == CLK_FAST) {
- OR_REG(&wlc_hw->regs->clk_ctl_st,
- CCS_FORCEHT);
+ bcma_set32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
+ CCS_FORCEHT);
udelay(64);
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL) == 0),
- PMU_MAX_TRANSITION_DLY);
- WARN_ON(!(R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL));
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ WARN_ON(!(bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ CCS_HTAVAIL));
} else {
- if ((wlc_hw->sih->pmurev == 0) &&
- (R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & (CCS_FORCEHT | CCS_HTAREQ)))
- SPINWAIT(((R_REG
- (&wlc_hw->regs->
- clk_ctl_st) & CCS_HTAVAIL)
- == 0),
- PMU_MAX_TRANSITION_DLY);
- AND_REG(&wlc_hw->regs->clk_ctl_st,
+ if ((ai_get_pmurev(wlc_hw->sih) == 0) &&
+ (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st)) &
+ (CCS_FORCEHT | CCS_HTAREQ)))
+ SPINWAIT(
+ ((bcma_read32(wlc_hw->d11core,
+ offsetof(struct d11regs,
+ clk_ctl_st)) &
+ CCS_HTAVAIL) == 0),
+ PMU_MAX_TRANSITION_DLY);
+ bcma_mask32(wlc_hw->d11core,
+ D11REGOFFS(clk_ctl_st),
~CCS_FORCEHT);
}
}
@@ -1322,7 +1270,7 @@ static void brcms_b_clkctl_clk(struct brcms_hardware *wlc_hw, uint mode)
/* check fast clock is available (if core is not in reset) */
if (wlc_hw->forcefastclk && wlc_hw->clk)
- WARN_ON(!(ai_core_sflags(wlc_hw->sih, 0, 0) &
+ WARN_ON(!(bcma_aread32(wlc_hw->d11core, BCMA_IOST) &
SISF_FCLKA));
/*
@@ -1439,7 +1387,8 @@ static void brcms_c_mctrl_write(struct brcms_hardware *wlc_hw)
maccontrol |= MCTL_INFRA;
}
- W_REG(&wlc_hw->regs->maccontrol, maccontrol);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(maccontrol),
+ maccontrol);
}
/* set or clear maccontrol bits */
@@ -1533,7 +1482,7 @@ static void
brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
const u8 *addr)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 mac_l;
u16 mac_m;
u16 mac_h;
@@ -1541,38 +1490,36 @@ brcms_b_set_addrmatch(struct brcms_hardware *wlc_hw, int match_reg_offset,
BCMMSG(wlc_hw->wlc->wiphy, "wl%d: brcms_b_set_addrmatch\n",
wlc_hw->unit);
- regs = wlc_hw->regs;
mac_l = addr[0] | (addr[1] << 8);
mac_m = addr[2] | (addr[3] << 8);
mac_h = addr[4] | (addr[5] << 8);
/* enter the MAC addr into the RXE match registers */
- W_REG(&regs->rcm_ctl, RCM_INC_DATA | match_reg_offset);
- W_REG(&regs->rcm_mat_data, mac_l);
- W_REG(&regs->rcm_mat_data, mac_m);
- W_REG(&regs->rcm_mat_data, mac_h);
-
+ bcma_write16(core, D11REGOFFS(rcm_ctl),
+ RCM_INC_DATA | match_reg_offset);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_l);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_m);
+ bcma_write16(core, D11REGOFFS(rcm_mat_data), mac_h);
}
void
brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
void *buf)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 word;
__le32 word_le;
__be32 word_be;
bool be_bit;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
- W_REG(&regs->tplatewrptr, offset);
+ bcma_write32(core, D11REGOFFS(tplatewrptr), offset);
/* if MCTL_BIGEND bit set in mac control register,
* the chip swaps data in fifo, as well as data in
* template ram
*/
- be_bit = (R_REG(&regs->maccontrol) & MCTL_BIGEND) != 0;
+ be_bit = (bcma_read32(core, D11REGOFFS(maccontrol)) & MCTL_BIGEND) != 0;
while (len > 0) {
memcpy(&word, buf, sizeof(u32));
@@ -1585,7 +1532,7 @@ brcms_b_write_template_ram(struct brcms_hardware *wlc_hw, int offset, int len,
word = *(u32 *)&word_le;
}
- W_REG(&regs->tplatewrdata, word);
+ bcma_write32(core, D11REGOFFS(tplatewrdata), word);
buf = (u8 *) buf + sizeof(u32);
len -= sizeof(u32);
@@ -1596,18 +1543,20 @@ static void brcms_b_set_cwmin(struct brcms_hardware *wlc_hw, u16 newmin)
{
wlc_hw->band->CWmin = newmin;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMIN);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmin);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMIN);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmin);
}
static void brcms_b_set_cwmax(struct brcms_hardware *wlc_hw, u16 newmax)
{
wlc_hw->band->CWmax = newmax;
- W_REG(&wlc_hw->regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_CWMAX);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, newmax);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_CWMAX);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), newmax);
}
void brcms_b_bw_set(struct brcms_hardware *wlc_hw, u16 bw)
@@ -1773,17 +1722,17 @@ void brcms_b_core_phypll_reset(struct brcms_hardware *wlc_hw)
{
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr), ~0, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 4);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 4);
udelay(1);
- ai_corereg(wlc_hw->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), 0x4, 0);
+ ai_cc_reg(wlc_hw->sih, offsetof(struct chipcregs, chipcontrol_data),
+ 0x4, 0);
udelay(1);
}
@@ -1797,18 +1746,18 @@ void brcms_b_phyclk_fgc(struct brcms_hardware *wlc_hw, bool clk)
return;
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_FGC, SICF_FGC);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, SICF_FGC);
else
- ai_core_cflags(wlc_hw->sih, SICF_FGC, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_FGC, 0);
}
void brcms_b_macphyclk_set(struct brcms_hardware *wlc_hw, bool clk)
{
if (ON == clk)
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, SICF_MPCLKE);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, SICF_MPCLKE);
else
- ai_core_cflags(wlc_hw->sih, SICF_MPCLKE, 0);
+ brcms_b_core_ioctl(wlc_hw, SICF_MPCLKE, 0);
}
void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
@@ -1828,7 +1777,7 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
if (BRCMS_ISNPHY(wlc_hw->band) && NREV_GE(wlc_hw->band->phyrev, 3) &&
NREV_LE(wlc_hw->band->phyrev, 4)) {
/* Set the PHY bandwidth */
- ai_core_cflags(wlc_hw->sih, SICF_BWMASK, phy_bw_clkbits);
+ brcms_b_core_ioctl(wlc_hw, SICF_BWMASK, phy_bw_clkbits);
udelay(1);
@@ -1836,13 +1785,13 @@ void brcms_b_phy_reset(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_reset(wlc_hw);
/* reset the PHY */
- ai_core_cflags(wlc_hw->sih, (SICF_PRST | SICF_PCLKE),
- (SICF_PRST | SICF_PCLKE));
+ brcms_b_core_ioctl(wlc_hw, (SICF_PRST | SICF_PCLKE),
+ (SICF_PRST | SICF_PCLKE));
phy_in_reset = true;
} else {
- ai_core_cflags(wlc_hw->sih,
- (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
- (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
+ brcms_b_core_ioctl(wlc_hw,
+ (SICF_PRST | SICF_PCLKE | SICF_BWMASK),
+ (SICF_PRST | SICF_PCLKE | phy_bw_clkbits));
}
udelay(2);
@@ -1859,8 +1808,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
u32 macintmask;
/* Enable the d11 core before accessing it */
- if (!ai_iscoreup(wlc_hw->sih)) {
- ai_core_reset(wlc_hw->sih, 0, 0);
+ if (!bcma_core_is_enabled(wlc_hw->d11core)) {
+ bcma_core_enable(wlc_hw->d11core, 0);
brcms_c_mctrl_reset(wlc_hw);
}
@@ -1886,7 +1835,8 @@ static void brcms_b_setband(struct brcms_hardware *wlc_hw, uint bandunit,
brcms_intrsrestore(wlc->wl, macintmask);
/* ucode should still be suspended.. */
- WARN_ON((R_REG(&wlc_hw->regs->maccontrol) & MCTL_EN_MAC) != 0);
+ WARN_ON((bcma_read32(wlc_hw->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0);
}
static bool brcms_c_isgoodchip(struct brcms_hardware *wlc_hw)
@@ -1914,7 +1864,7 @@ static bool brcms_c_validboardtype(struct brcms_hardware *wlc_hw)
uint b2 = boardrev & 0xf;
/* voards from other vendors are always considered valid */
- if (wlc_hw->sih->boardvendor != PCI_VENDOR_ID_BROADCOM)
+ if (ai_get_boardvendor(wlc_hw->sih) != PCI_VENDOR_ID_BROADCOM)
return true;
/* do some boardrev sanity checks when boardvendor is Broadcom */
@@ -1986,7 +1936,7 @@ static void brcms_b_xtal(struct brcms_hardware *wlc_hw, bool want)
static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
{
bool v, clk, xtal;
- u32 resetbits = 0, flags = 0;
+ u32 flags = 0;
xtal = wlc_hw->sbclk;
if (!xtal)
@@ -2003,22 +1953,22 @@ static bool brcms_b_radio_read_hwdisabled(struct brcms_hardware *wlc_hw)
flags |= SICF_PCLKE;
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+
+ bcma_core_enable(wlc_hw->d11core, flags);
brcms_c_mctrl_reset(wlc_hw);
}
- v = ((R_REG(&wlc_hw->regs->phydebug) & PDBG_RFD) != 0);
+ v = ((bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(phydebug)) & PDBG_RFD) != 0);
/* put core back into reset */
if (!clk)
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
if (!xtal)
brcms_b_xtal(wlc_hw, OFF);
@@ -2042,25 +1992,21 @@ static bool wlc_dma_rxreset(struct brcms_hardware *wlc_hw, uint fifo)
*/
void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
{
- struct d11regs __iomem *regs;
uint i;
bool fastclk;
- u32 resetbits = 0;
if (flags == BRCMS_USE_COREFLAGS)
flags = (wlc_hw->band->pi ? wlc_hw->band->core_flags : 0);
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* request FAST clock if not on */
fastclk = wlc_hw->forcefastclk;
if (!fastclk)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
/* reset the dma engines except first time thru */
- if (ai_iscoreup(wlc_hw->sih)) {
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
for (i = 0; i < NFIFO; i++)
if ((wlc_hw->di[i]) && (!dma_txreset(wlc_hw->di[i])))
wiphy_err(wlc_hw->wlc->wiphy, "wl%d: %s: "
@@ -2098,14 +2044,14 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
* they may touch chipcommon as well.
*/
wlc_hw->clk = false;
- ai_core_reset(wlc_hw->sih, flags, resetbits);
+ bcma_core_enable(wlc_hw->d11core, flags);
wlc_hw->clk = true;
if (wlc_hw->band && wlc_hw->band->pi)
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, true);
brcms_c_mctrl_reset(wlc_hw);
- if (wlc_hw->sih->cccaps & CC_CAP_PMU)
+ if (ai_get_cccaps(wlc_hw->sih) & CC_CAP_PMU)
brcms_b_clkctl_clk(wlc_hw, CLK_FAST);
brcms_b_phy_reset(wlc_hw);
@@ -2126,7 +2072,7 @@ void brcms_b_corereset(struct brcms_hardware *wlc_hw, u32 flags)
*/
static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u16 fifo_nu;
u16 txfifo_startblk = TXFIFO_START_BLK, txfifo_endblk;
u16 txfifo_def, txfifo_def1;
@@ -2147,11 +2093,11 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
txfifo_cmd =
TXFIFOCMD_RESET_MASK | (fifo_nu << TXFIFOCMD_FIFOSEL_SHIFT);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
- W_REG(&regs->xmtfifodef, txfifo_def);
- W_REG(&regs->xmtfifodef1, txfifo_def1);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifodef), txfifo_def);
+ bcma_write16(core, D11REGOFFS(xmtfifodef1), txfifo_def1);
- W_REG(&regs->xmtfifocmd, txfifo_cmd);
+ bcma_write16(core, D11REGOFFS(xmtfifocmd), txfifo_cmd);
txfifo_startblk += wlc_hw->xmtfifo_sz[fifo_nu];
}
@@ -2186,27 +2132,27 @@ static void brcms_b_corerev_fifofixup(struct brcms_hardware *wlc_hw)
void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43224_CHIP_ID) ||
+ (ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID)) {
if (spurmode == WL_SPURAVOID_ON2) { /* 126Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x2082);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x2082);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else if (spurmode == WL_SPURAVOID_ON1) { /* 123Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x5341);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x5341);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
} else { /* 120Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x8889);
- W_REG(&regs->tsf_clk_frac_h, 0x8);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x8889);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0x8);
}
} else if (BRCMS_ISLCNPHY(wlc_hw->band)) {
if (spurmode == WL_SPURAVOID_ON1) { /* 82Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0x7CE0);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0x7CE0);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
} else { /* 80Mhz */
- W_REG(&regs->tsf_clk_frac_l, 0xCCCD);
- W_REG(&regs->tsf_clk_frac_h, 0xC);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_l), 0xCCCD);
+ bcma_write16(core, D11REGOFFS(tsf_clk_frac_h), 0xC);
}
}
}
@@ -2215,11 +2161,8 @@ void brcms_b_switch_macfreq(struct brcms_hardware *wlc_hw, u8 spurmode)
static void brcms_c_gpio_init(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
u32 gc, gm;
- regs = wlc_hw->regs;
-
/* use GPIO select 0 to get all gpio signals from the gpio out reg */
brcms_b_mctrl(wlc_hw, MCTL_GPOUT_SEL_MASK, 0);
@@ -2250,10 +2193,10 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
* The board itself is powered by these GPIOs
* (when not sending pattern) so set them high
*/
- OR_REG(&regs->psm_gpio_oe,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
- OR_REG(&regs->psm_gpio_out,
- (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_oe),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
+ bcma_set16(wlc_hw->d11core, D11REGOFFS(psm_gpio_out),
+ (BOARD_GPIO_12 | BOARD_GPIO_13));
/* Enable antenna diversity, use 2x4 mode */
brcms_b_mhf(wlc_hw, MHF3, MHF3_ANTSEL_EN,
@@ -2280,7 +2223,7 @@ static void brcms_c_gpio_init(struct brcms_c_info *wlc)
static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
const __le32 ucode[], const size_t nbytes)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
uint i;
uint count;
@@ -2288,10 +2231,11 @@ static void brcms_ucode_write(struct brcms_hardware *wlc_hw,
count = (nbytes / sizeof(u32));
- W_REG(&regs->objaddr, (OBJADDR_AUTO_INC | OBJADDR_UCM_SEL));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_AUTO_INC | OBJADDR_UCM_SEL);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
for (i = 0; i < count; i++)
- W_REG(&regs->objdata, le32_to_cpu(ucode[i]));
+ bcma_write32(core, D11REGOFFS(objdata), le32_to_cpu(ucode[i]));
}
@@ -2352,19 +2296,12 @@ void brcms_b_antsel_type_set(struct brcms_hardware *wlc_hw, u8 antsel_type)
wlc_phy_antsel_type_set(wlc_hw->band->pi, antsel_type);
}
-static void brcms_c_fatal_error(struct brcms_c_info *wlc)
-{
- wiphy_err(wlc->wiphy, "wl%d: fatal error, reinitializing\n",
- wlc->pub->unit);
- brcms_init(wlc->wl);
-}
-
static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
{
bool fatal = false;
uint unit;
uint intstatus, idx;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
unit = wlc_hw->unit;
@@ -2372,7 +2309,9 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
for (idx = 0; idx < NFIFO; idx++) {
/* read intstatus register and ignore any non-error bits */
intstatus =
- R_REG(&regs->intctrlregs[idx].intstatus) & I_ERRORS;
+ bcma_read32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus)) &
+ I_ERRORS;
if (!intstatus)
continue;
@@ -2414,11 +2353,12 @@ static void brcms_b_fifoerrors(struct brcms_hardware *wlc_hw)
}
if (fatal) {
- brcms_c_fatal_error(wlc_hw->wlc); /* big hammer */
+ brcms_fatal_error(wlc_hw->wlc->wl); /* big hammer */
break;
} else
- W_REG(&regs->intctrlregs[idx].intstatus,
- intstatus);
+ bcma_write32(core,
+ D11REGOFFS(intctrlregs[idx].intstatus),
+ intstatus);
}
}
@@ -2426,28 +2366,7 @@ void brcms_c_intrson(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
wlc->macintmask = wlc->defmacintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
-}
-
-/*
- * callback for siutils.c, which has only wlc handler, no wl they both check
- * up, not only because there is no need to off/restore d11 interrupt but also
- * because per-port code may require sync with valid interrupt.
- */
-static u32 brcms_c_wlintrsoff(struct brcms_c_info *wlc)
-{
- if (!wlc->hw->up)
- return 0;
-
- return brcms_intrsoff(wlc->wl);
-}
-
-static void brcms_c_wlintrsrestore(struct brcms_c_info *wlc, u32 macintmask)
-{
- if (!wlc->hw->up)
- return;
-
- brcms_intrsrestore(wlc->wl, macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
@@ -2460,8 +2379,8 @@ u32 brcms_c_intrsoff(struct brcms_c_info *wlc)
macintmask = wlc->macintmask; /* isr can still happen */
- W_REG(&wlc_hw->regs->macintmask, 0);
- (void)R_REG(&wlc_hw->regs->macintmask); /* sync readback */
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(macintmask));
udelay(1); /* ensure int line is no longer driven */
wlc->macintmask = 0;
@@ -2476,9 +2395,10 @@ void brcms_c_intrsrestore(struct brcms_c_info *wlc, u32 macintmask)
return;
wlc->macintmask = macintmask;
- W_REG(&wlc_hw->regs->macintmask, wlc->macintmask);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(macintmask), wlc->macintmask);
}
+/* assumes that the d11 MAC is enabled */
static void brcms_b_tx_fifo_suspend(struct brcms_hardware *wlc_hw,
uint tx_fifo)
{
@@ -2535,11 +2455,12 @@ static void brcms_b_tx_fifo_resume(struct brcms_hardware *wlc_hw,
}
}
-static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
+/* precondition: requires the mac core to be enabled */
+static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool mute_tx)
{
static const u8 null_ether_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
- if (on) {
+ if (mute_tx) {
/* suspend tx fifos */
brcms_b_tx_fifo_suspend(wlc_hw, TX_DATA_FIFO);
brcms_b_tx_fifo_suspend(wlc_hw, TX_CTL_FIFO);
@@ -2561,14 +2482,20 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
wlc_hw->etheraddr);
}
- wlc_phy_mute_upd(wlc_hw->band->pi, on, flags);
+ wlc_phy_mute_upd(wlc_hw->band->pi, mute_tx, 0);
- if (on)
+ if (mute_tx)
brcms_c_ucode_mute_override_set(wlc_hw);
else
brcms_c_ucode_mute_override_clear(wlc_hw);
}
+void
+brcms_c_mute(struct brcms_c_info *wlc, bool mute_tx)
+{
+ brcms_b_mute(wlc->hw, mute_tx);
+}
+
/*
* Read and clear macintmask and macintstatus and intstatus registers.
* This routine should be called with interrupts off
@@ -2580,11 +2507,11 @@ static void brcms_b_mute(struct brcms_hardware *wlc_hw, bool on, u32 flags)
static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 macintstatus;
/* macintstatus includes a DMA interrupt summary bit */
- macintstatus = R_REG(&regs->macintstatus);
+ macintstatus = bcma_read32(core, D11REGOFFS(macintstatus));
BCMMSG(wlc->wiphy, "wl%d: macintstatus: 0x%x\n", wlc_hw->unit,
macintstatus);
@@ -2611,12 +2538,12 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* consequences
*/
/* turn off the interrupts */
- W_REG(&regs->macintmask, 0);
- (void)R_REG(&regs->macintmask); /* sync readback */
+ bcma_write32(core, D11REGOFFS(macintmask), 0);
+ (void)bcma_read32(core, D11REGOFFS(macintmask));
wlc->macintmask = 0;
/* clear device interrupts */
- W_REG(&regs->macintstatus, macintstatus);
+ bcma_write32(core, D11REGOFFS(macintstatus), macintstatus);
/* MI_DMAINT is indication of non-zero intstatus */
if (macintstatus & MI_DMAINT)
@@ -2625,8 +2552,8 @@ static inline u32 wlc_intstatus(struct brcms_c_info *wlc, bool in_isr)
* RX_FIFO. If MI_DMAINT is set, assume it
* is set and clear the interrupt.
*/
- W_REG(&regs->intctrlregs[RX_FIFO].intstatus,
- DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intstatus),
+ DEF_RXINTMASK);
return macintstatus;
}
@@ -2689,7 +2616,7 @@ bool brcms_c_isr(struct brcms_c_info *wlc, bool *wantdpc)
void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
struct wiphy *wiphy = wlc->wiphy;
@@ -2706,7 +2633,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
/* force the core awake */
brcms_c_ucode_wake_override_set(wlc_hw, BRCMS_WAKE_OVERRIDE_MACSUSPEND);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
@@ -2718,7 +2645,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
WARN_ON(!(mc & MCTL_PSM_RUN));
WARN_ON(!(mc & MCTL_EN_MAC));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
if (mi == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2729,21 +2656,21 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, 0);
- SPINWAIT(!(R_REG(&regs->macintstatus) & MI_MACSSPNDD),
+ SPINWAIT(!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD),
BRCMS_MAX_MAC_SUSPEND);
- if (!(R_REG(&regs->macintstatus) & MI_MACSSPNDD)) {
+ if (!(bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD)) {
wiphy_err(wiphy, "wl%d: wlc_suspend_mac_and_wait: waited %d uS"
" and MI_MACSSPNDD is still not on.\n",
wlc_hw->unit, BRCMS_MAX_MAC_SUSPEND);
wiphy_err(wiphy, "wl%d: psmdebug 0x%08x, phydebug 0x%08x, "
"psm_brc 0x%04x\n", wlc_hw->unit,
- R_REG(&regs->psmdebug),
- R_REG(&regs->phydebug),
- R_REG(&regs->psm_brc));
+ bcma_read32(core, D11REGOFFS(psmdebug)),
+ bcma_read32(core, D11REGOFFS(phydebug)),
+ bcma_read16(core, D11REGOFFS(psm_brc)));
}
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
if (mc == 0xffffffff) {
wiphy_err(wiphy, "wl%d: %s: dead chip\n", wlc_hw->unit,
__func__);
@@ -2758,7 +2685,7 @@ void brcms_c_suspend_mac_and_wait(struct brcms_c_info *wlc)
void brcms_c_enable_mac(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 mc, mi;
BCMMSG(wlc->wiphy, "wl%d: bandunit %d\n", wlc_hw->unit,
@@ -2771,20 +2698,20 @@ void brcms_c_enable_mac(struct brcms_c_info *wlc)
if (wlc_hw->mac_suspend_depth > 0)
return;
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(mc & MCTL_EN_MAC);
WARN_ON(!(mc & MCTL_PSM_RUN));
brcms_b_mctrl(wlc_hw, MCTL_EN_MAC, MCTL_EN_MAC);
- W_REG(&regs->macintstatus, MI_MACSSPNDD);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_MACSSPNDD);
- mc = R_REG(&regs->maccontrol);
+ mc = bcma_read32(core, D11REGOFFS(maccontrol));
WARN_ON(mc & MCTL_PSM_JMP_0);
WARN_ON(!(mc & MCTL_EN_MAC));
WARN_ON(!(mc & MCTL_PSM_RUN));
- mi = R_REG(&regs->macintstatus);
+ mi = bcma_read32(core, D11REGOFFS(macintstatus));
WARN_ON(mi & MI_MACSSPNDD);
brcms_c_ucode_wake_override_clear(wlc_hw,
@@ -2801,55 +2728,53 @@ void brcms_b_band_stf_ss_set(struct brcms_hardware *wlc_hw, u8 stf_mode)
static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 w, val;
struct wiphy *wiphy = wlc_hw->wlc->wiphy;
BCMMSG(wiphy, "wl%d\n", wlc_hw->unit);
- regs = wlc_hw->regs;
-
/* Validate dchip register access */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- w = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ w = bcma_read32(core, D11REGOFFS(objdata));
/* Can we write and read back a 32bit register? */
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0xaa5555aa);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0xaa5555aa);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0xaa5555aa) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0xaa5555aa\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, (u32) 0x55aaaa55);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), (u32) 0x55aaaa55);
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- val = R_REG(&regs->objdata);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ val = bcma_read32(core, D11REGOFFS(objdata));
if (val != (u32) 0x55aaaa55) {
wiphy_err(wiphy, "wl%d: validate_chip_access: SHM = 0x%x, "
"expected 0x55aaaa55\n", wlc_hw->unit, val);
return false;
}
- W_REG(&regs->objaddr, OBJADDR_SHM_SEL | 0);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, w);
+ bcma_write32(core, D11REGOFFS(objaddr), OBJADDR_SHM_SEL | 0);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), w);
/* clear CFPStart */
- W_REG(&regs->tsf_cfpstart, 0);
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), 0);
- w = R_REG(&regs->maccontrol);
+ w = bcma_read32(core, D11REGOFFS(maccontrol));
if ((w != (MCTL_IHR_EN | MCTL_WAKE)) &&
(w != (MCTL_IHR_EN | MCTL_GMODE | MCTL_WAKE))) {
wiphy_err(wiphy, "wl%d: validate_chip_access: maccontrol = "
@@ -2866,38 +2791,38 @@ static bool brcms_b_validate_chip_access(struct brcms_hardware *wlc_hw)
void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 tmp;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
tmp = 0;
- regs = wlc_hw->regs;
if (on) {
- if ((wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_HT | CCS_ERSRC_REQ_D11PLL |
- CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
- (CCS_ERSRC_AVAIL_HT)) != (CCS_ERSRC_AVAIL_HT),
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ CCS_ERSRC_REQ_HT |
+ CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
+ CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT,
PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
- if ((tmp & (CCS_ERSRC_AVAIL_HT)) !=
- (CCS_ERSRC_AVAIL_HT))
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
+ if ((tmp & CCS_ERSRC_AVAIL_HT) != CCS_ERSRC_AVAIL_HT)
wiphy_err(wlc_hw->wlc->wiphy, "%s: turn on PHY"
" PLL failed\n", __func__);
} else {
- OR_REG(&regs->clk_ctl_st,
- (CCS_ERSRC_REQ_D11PLL | CCS_ERSRC_REQ_PHYPLL));
- SPINWAIT((R_REG(&regs->clk_ctl_st) &
+ bcma_set32(core, D11REGOFFS(clk_ctl_st),
+ tmp | CCS_ERSRC_REQ_D11PLL |
+ CCS_ERSRC_REQ_PHYPLL);
+ SPINWAIT((bcma_read32(core, D11REGOFFS(clk_ctl_st)) &
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL)) !=
(CCS_ERSRC_AVAIL_D11PLL |
CCS_ERSRC_AVAIL_PHYPLL), PHYPLL_WAIT_US);
- tmp = R_REG(&regs->clk_ctl_st);
+ tmp = bcma_read32(core, D11REGOFFS(clk_ctl_st));
if ((tmp &
(CCS_ERSRC_AVAIL_D11PLL | CCS_ERSRC_AVAIL_PHYPLL))
!=
@@ -2911,8 +2836,9 @@ void brcms_b_core_phypll_ctl(struct brcms_hardware *wlc_hw, bool on)
* be requesting it; so we'll deassert the request but
* not wait for status to comply.
*/
- AND_REG(&regs->clk_ctl_st, ~CCS_ERSRC_REQ_PHYPLL);
- tmp = R_REG(&regs->clk_ctl_st);
+ bcma_mask32(core, D11REGOFFS(clk_ctl_st),
+ ~CCS_ERSRC_REQ_PHYPLL);
+ (void)bcma_read32(core, D11REGOFFS(clk_ctl_st));
}
}
@@ -2940,7 +2866,7 @@ static void brcms_c_coredisable(struct brcms_hardware *wlc_hw)
brcms_b_core_phypll_ctl(wlc_hw, false);
wlc_hw->clk = false;
- ai_core_disable(wlc_hw->sih, 0);
+ bcma_core_disable(wlc_hw->d11core, 0);
wlc_phy_hw_clk_state_upd(wlc_hw->band->pi, false);
}
@@ -2964,35 +2890,31 @@ static void brcms_c_flushqueues(struct brcms_c_info *wlc)
static u16
brcms_b_read_objmem(struct brcms_hardware *wlc_hw, uint offset, u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
- u16 v;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- v = R_REG(objdata_hi);
- else
- v = R_REG(objdata_lo);
+ objoff += 2;
- return v;
+ return bcma_read16(core, objoff);
+;
}
static void
brcms_b_write_objmem(struct brcms_hardware *wlc_hw, uint offset, u16 v,
u32 sel)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
- u16 __iomem *objdata_lo = (u16 __iomem *)&regs->objdata;
- u16 __iomem *objdata_hi = objdata_lo + 1;
+ struct bcma_device *core = wlc_hw->d11core;
+ u16 objoff = D11REGOFFS(objdata);
- W_REG(&regs->objaddr, sel | (offset >> 2));
- (void)R_REG(&regs->objaddr);
+ bcma_write32(core, D11REGOFFS(objaddr), sel | (offset >> 2));
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
if (offset & 2)
- W_REG(objdata_hi, v);
- else
- W_REG(objdata_lo, v);
+ objoff += 2;
+
+ bcma_write16(core, objoff, v);
}
/*
@@ -3078,14 +3000,14 @@ static void brcms_b_retrylimit_upd(struct brcms_hardware *wlc_hw,
/* write retry limit to SCR, shouldn't need to suspend */
if (wlc_hw->up) {
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->SRL);
- W_REG(&wlc_hw->regs->objaddr,
- OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&wlc_hw->regs->objaddr);
- W_REG(&wlc_hw->regs->objdata, wlc_hw->LRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(wlc_hw->d11core, D11REGOFFS(objaddr));
+ bcma_write32(wlc_hw->d11core, D11REGOFFS(objdata), wlc_hw->LRL);
}
}
@@ -3132,7 +3054,7 @@ static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
return false;
/* disallow PS when one of these meets when not scanning */
- if (wlc->monitor)
+ if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
return false;
if (cfg->associated) {
@@ -3267,9 +3189,9 @@ void brcms_c_init_scb(struct scb *scb)
static void brcms_b_coreinit(struct brcms_c_info *wlc)
{
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc_hw->d11core;
u32 sflags;
- uint bcnint_us;
+ u32 bcnint_us;
uint i = 0;
bool fifosz_fixup = false;
int err = 0;
@@ -3277,8 +3199,6 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
struct wiphy *wiphy = wlc->wiphy;
struct brcms_ucode *ucode = &wlc_hw->wlc->wl->ucode;
- regs = wlc_hw->regs;
-
BCMMSG(wlc->wiphy, "wl%d\n", wlc_hw->unit);
/* reset PSM */
@@ -3291,20 +3211,20 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
fifosz_fixup = true;
/* let the PSM run to the suspended state, set mode to BSS STA */
- W_REG(&regs->macintstatus, -1);
+ bcma_write32(core, D11REGOFFS(macintstatus), -1);
brcms_b_mctrl(wlc_hw, ~0,
(MCTL_IHR_EN | MCTL_INFRA | MCTL_PSM_RUN | MCTL_WAKE));
/* wait for ucode to self-suspend after auto-init */
- SPINWAIT(((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0),
- 1000 * 1000);
- if ((R_REG(&regs->macintstatus) & MI_MACSSPNDD) == 0)
+ SPINWAIT(((bcma_read32(core, D11REGOFFS(macintstatus)) &
+ MI_MACSSPNDD) == 0), 1000 * 1000);
+ if ((bcma_read32(core, D11REGOFFS(macintstatus)) & MI_MACSSPNDD) == 0)
wiphy_err(wiphy, "wl%d: wlc_coreinit: ucode did not self-"
"suspend!\n", wlc_hw->unit);
brcms_c_gpio_init(wlc);
- sflags = ai_core_sflags(wlc_hw->sih, 0, 0);
+ sflags = bcma_aread32(core, BCMA_IOST);
if (D11REV_IS(wlc_hw->corerev, 23)) {
if (BRCMS_ISNPHY(wlc_hw->band))
@@ -3368,7 +3288,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
wlc_hw->xmtfifo_sz[i], i);
/* make sure we can still talk to the mac */
- WARN_ON(R_REG(&regs->maccontrol) == 0xffffffff);
+ WARN_ON(bcma_read32(core, D11REGOFFS(maccontrol)) == 0xffffffff);
/* band-specific inits done by wlc_bsinit() */
@@ -3377,7 +3297,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
brcms_b_write_shm(wlc_hw, M_MAX_ANTCNT, ANTCNT);
/* enable one rx interrupt per received frame */
- W_REG(&regs->intrcvlazy[0], (1 << IRL_FC_SHIFT));
+ bcma_write32(core, D11REGOFFS(intrcvlazy[0]), (1 << IRL_FC_SHIFT));
/* set the station mode (BSS STA) */
brcms_b_mctrl(wlc_hw,
@@ -3386,19 +3306,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
/* set up Beacon interval */
bcnint_us = 0x8000 << 10;
- W_REG(&regs->tsf_cfprep, (bcnint_us << CFPREP_CBI_SHIFT));
- W_REG(&regs->tsf_cfpstart, bcnint_us);
- W_REG(&regs->macintstatus, MI_GP1);
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ (bcnint_us << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfpstart), bcnint_us);
+ bcma_write32(core, D11REGOFFS(macintstatus), MI_GP1);
/* write interrupt mask */
- W_REG(&regs->intctrlregs[RX_FIFO].intmask, DEF_RXINTMASK);
+ bcma_write32(core, D11REGOFFS(intctrlregs[RX_FIFO].intmask),
+ DEF_RXINTMASK);
/* allow the MAC to control the PHY clock (dynamic on/off) */
brcms_b_macphyclk_set(wlc_hw, ON);
/* program dynamic clock control fast powerup delay register */
wlc->fastpwrup_dly = ai_clkctl_fast_pwrup_delay(wlc_hw->sih);
- W_REG(&regs->scc_fastpwrup_dly, wlc->fastpwrup_dly);
+ bcma_write16(core, D11REGOFFS(scc_fastpwrup_dly), wlc->fastpwrup_dly);
/* tell the ucode the corerev */
brcms_b_write_shm(wlc_hw, M_MACHW_VER, (u16) wlc_hw->corerev);
@@ -3411,19 +3333,21 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
machwcap >> 16) & 0xffff));
/* write retry limits to SCR, this done after PSM init */
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->SRL);
- W_REG(&regs->objaddr, OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
- (void)R_REG(&regs->objaddr);
- W_REG(&regs->objdata, wlc_hw->LRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_SRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->SRL);
+ bcma_write32(core, D11REGOFFS(objaddr),
+ OBJADDR_SCR_SEL | S_DOT11_LRC_LMT);
+ (void)bcma_read32(core, D11REGOFFS(objaddr));
+ bcma_write32(core, D11REGOFFS(objdata), wlc_hw->LRL);
/* write rate fallback retry limits */
brcms_b_write_shm(wlc_hw, M_SFRMTXCNTFBRTHSD, wlc_hw->SFBL);
brcms_b_write_shm(wlc_hw, M_LFRMTXCNTFBRTHSD, wlc_hw->LFBL);
- AND_REG(&regs->ifs_ctl, 0x0FFF);
- W_REG(&regs->ifs_aifsn, EDCF_AIFSN_MIN);
+ bcma_mask16(core, D11REGOFFS(ifs_ctl), 0x0FFF);
+ bcma_write16(core, D11REGOFFS(ifs_aifsn), EDCF_AIFSN_MIN);
/* init the tx dma engines */
for (i = 0; i < NFIFO; i++) {
@@ -3437,8 +3361,7 @@ static void brcms_b_coreinit(struct brcms_c_info *wlc)
}
void
-static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
- bool mute) {
+static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec) {
u32 macintmask;
bool fastclk;
struct brcms_c_info *wlc = wlc_hw->wlc;
@@ -3463,10 +3386,6 @@ static brcms_b_init(struct brcms_hardware *wlc_hw, u16 chanspec,
/* core-specific initialization */
brcms_b_coreinit(wlc);
- /* suspend the tx fifos and mute the phy for preism cac time */
- if (mute)
- brcms_b_mute(wlc_hw, ON, PHY_MUTE_FOR_PREISM);
-
/* band-specific inits */
brcms_b_bsinit(wlc, chanspec);
@@ -3656,42 +3575,32 @@ static void brcms_c_bandinit_ordered(struct brcms_c_info *wlc,
brcms_c_set_phy_chanspec(wlc, chanspec);
}
-static void brcms_c_mac_bcn_promisc(struct brcms_c_info *wlc)
-{
- if (wlc->bcnmisc_monitor)
- brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, MCTL_BCNS_PROMISC);
- else
- brcms_b_mctrl(wlc->hw, MCTL_BCNS_PROMISC, 0);
-}
-
-void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc, bool promisc)
-{
- wlc->bcnmisc_monitor = promisc;
- brcms_c_mac_bcn_promisc(wlc);
-}
-
-/* set or clear maccontrol bits MCTL_PROMISC and MCTL_KEEPCONTROL */
-static void brcms_c_mac_promisc(struct brcms_c_info *wlc)
+/*
+ * Set or clear filtering related maccontrol bits based on
+ * specified filter flags
+ */
+void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags)
{
u32 promisc_bits = 0;
- /*
- * promiscuous mode just sets MCTL_PROMISC
- * Note: APs get all BSS traffic without the need to set
- * the MCTL_PROMISC bit since all BSS data traffic is
- * directed at the AP
- */
- if (wlc->pub->promisc)
+ wlc->filter_flags = filter_flags;
+
+ if (filter_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
promisc_bits |= MCTL_PROMISC;
- /* monitor mode needs both MCTL_PROMISC and MCTL_KEEPCONTROL
- * Note: monitor mode also needs MCTL_BCNS_PROMISC, but that is
- * handled in brcms_c_mac_bcn_promisc()
- */
- if (wlc->monitor)
- promisc_bits |= MCTL_PROMISC | MCTL_KEEPCONTROL;
+ if (filter_flags & FIF_BCN_PRBRESP_PROMISC)
+ promisc_bits |= MCTL_BCNS_PROMISC;
- brcms_b_mctrl(wlc->hw, MCTL_PROMISC | MCTL_KEEPCONTROL, promisc_bits);
+ if (filter_flags & FIF_FCSFAIL)
+ promisc_bits |= MCTL_KEEPBADFCS;
+
+ if (filter_flags & (FIF_CONTROL | FIF_PSPOLL))
+ promisc_bits |= MCTL_KEEPCONTROL;
+
+ brcms_b_mctrl(wlc->hw,
+ MCTL_PROMISC | MCTL_BCNS_PROMISC |
+ MCTL_KEEPCONTROL | MCTL_KEEPBADFCS,
+ promisc_bits);
}
/*
@@ -3721,10 +3630,6 @@ static void brcms_c_ucode_mac_upd(struct brcms_c_info *wlc)
} else {
/* disable an active IBSS if we are not on the home channel */
}
-
- /* update the various promisc bits */
- brcms_c_mac_bcn_promisc(wlc);
- brcms_c_mac_promisc(wlc);
}
static void brcms_c_write_rate_shm(struct brcms_c_info *wlc, u8 rate,
@@ -3899,7 +3804,7 @@ static void brcms_c_set_ps_ctrl(struct brcms_c_info *wlc)
BCMMSG(wlc->wiphy, "wl%d: hps %d\n", wlc->pub->unit, hps);
- v1 = R_REG(&wlc->regs->maccontrol);
+ v1 = bcma_read32(wlc->hw->d11core, D11REGOFFS(maccontrol));
v2 = MCTL_WAKE;
if (hps)
v2 |= MCTL_HPS;
@@ -3979,7 +3884,7 @@ static void brcms_c_set_home_chanspec(struct brcms_c_info *wlc, u16 chanspec)
void
brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
- bool mute, struct txpwr_limits *txpwr)
+ bool mute_tx, struct txpwr_limits *txpwr)
{
uint bandunit;
@@ -4005,7 +3910,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
}
}
- wlc_phy_initcal_enable(wlc_hw->band->pi, !mute);
+ wlc_phy_initcal_enable(wlc_hw->band->pi, !mute_tx);
if (!wlc_hw->up) {
if (wlc_hw->clk)
@@ -4017,7 +3922,7 @@ brcms_b_set_chanspec(struct brcms_hardware *wlc_hw, u16 chanspec,
wlc_phy_txpower_limit_set(wlc_hw->band->pi, txpwr, chanspec);
/* Update muting of the channel */
- brcms_b_mute(wlc_hw, mute, 0);
+ brcms_b_mute(wlc_hw, mute_tx);
}
}
@@ -4205,7 +4110,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
EDCF_TXOP2USEC(acp_shm.txop);
acp_shm.aifs = (params->aifs & EDCF_AIFSN_MASK);
- if (aci == AC_VI && acp_shm.txop == 0
+ if (aci == IEEE80211_AC_VI && acp_shm.txop == 0
&& acp_shm.aifs < EDCF_AIFSN_MAX)
acp_shm.aifs++;
@@ -4218,7 +4123,8 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
acp_shm.cwmax = params->cw_max;
acp_shm.cwcur = acp_shm.cwmin;
acp_shm.bslots =
- R_REG(&wlc->regs->tsf_random) & acp_shm.cwcur;
+ bcma_read16(wlc->hw->d11core, D11REGOFFS(tsf_random)) &
+ acp_shm.cwcur;
acp_shm.reggap = acp_shm.bslots + acp_shm.aifs;
/* Indicate the new params to the ucode */
acp_shm.status = brcms_b_read_shm(wlc->hw, (M_EDCF_QINFO +
@@ -4242,7 +4148,7 @@ void brcms_c_wme_setparams(struct brcms_c_info *wlc, u16 aci,
}
}
-void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
+static void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
{
u16 aci;
int i_ac;
@@ -4255,7 +4161,7 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
}; /* ucode needs these parameters during its initialization */
const struct edcf_acparam *edcf_acp = &default_edcf_acparams[0];
- for (i_ac = 0; i_ac < AC_COUNT; i_ac++, edcf_acp++) {
+ for (i_ac = 0; i_ac < IEEE80211_NUM_ACS; i_ac++, edcf_acp++) {
/* find out which ac this set of params applies to */
aci = (edcf_acp->ACI & EDCF_ACI_MASK) >> EDCF_ACI_SHIFT;
@@ -4277,17 +4183,6 @@ void brcms_c_edcf_setparams(struct brcms_c_info *wlc, bool suspend)
}
}
-/* maintain LED behavior in down state */
-static void brcms_c_down_led_upd(struct brcms_c_info *wlc)
-{
- /*
- * maintain LEDs while in down state, turn on sbclk if
- * not available yet. Turn on sbclk if necessary
- */
- brcms_b_pllreq(wlc->hw, true, BRCMS_PLLREQ_FLIP);
- brcms_b_pllreq(wlc->hw, false, BRCMS_PLLREQ_FLIP);
-}
-
static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
{
/* Don't start the timer if HWRADIO feature is disabled */
@@ -4299,28 +4194,6 @@ static void brcms_c_radio_monitor_start(struct brcms_c_info *wlc)
brcms_add_timer(wlc->radio_timer, TIMER_INTERVAL_RADIOCHK, true);
}
-static void brcms_c_radio_disable(struct brcms_c_info *wlc)
-{
- if (!wlc->pub->up) {
- brcms_c_down_led_upd(wlc);
- return;
- }
-
- brcms_c_radio_monitor_start(wlc);
- brcms_down(wlc->wl);
-}
-
-static void brcms_c_radio_enable(struct brcms_c_info *wlc)
-{
- if (wlc->pub->up)
- return;
-
- if (brcms_deviceremoved(wlc))
- return;
-
- brcms_up(wlc->wl);
-}
-
static bool brcms_c_radio_monitor_stop(struct brcms_c_info *wlc)
{
if (!wlc->radio_monitor)
@@ -4343,18 +4216,6 @@ static void brcms_c_radio_hwdisable_upd(struct brcms_c_info *wlc)
mboolclr(wlc->pub->radio_disabled, WL_RADIO_HW_DISABLE);
}
-/*
- * centralized radio disable/enable function,
- * invoke radio enable/disable after updating hwradio status
- */
-static void brcms_c_radio_upd(struct brcms_c_info *wlc)
-{
- if (wlc->pub->radio_disabled)
- brcms_c_radio_disable(wlc);
- else
- brcms_c_radio_enable(wlc);
-}
-
/* update hwradio status and return it */
bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc)
{
@@ -4376,12 +4237,7 @@ static void brcms_c_radio_timer(void *arg)
return;
}
- /* cap mpc off count */
- if (wlc->mpc_offcnt < BRCMS_MPC_MAX_DELAYCNT)
- wlc->mpc_offcnt++;
-
brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
}
/* common low-level watchdog code */
@@ -4407,60 +4263,6 @@ static void brcms_b_watchdog(void *arg)
wlc_phy_watchdog(wlc_hw->band->pi);
}
-static void brcms_c_radio_mpc_upd(struct brcms_c_info *wlc)
-{
- bool mpc_radio, radio_state;
-
- /*
- * Clear the WL_RADIO_MPC_DISABLE bit when mpc feature is disabled
- * in case the WL_RADIO_MPC_DISABLE bit was set. Stop the radio
- * monitor also when WL_RADIO_MPC_DISABLE is the only reason that
- * the radio is going down.
- */
- if (!wlc->mpc) {
- if (!wlc->pub->radio_disabled)
- return;
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (!wlc->pub->radio_disabled)
- brcms_c_radio_monitor_stop(wlc);
- return;
- }
-
- /*
- * sync ismpc logic with WL_RADIO_MPC_DISABLE bit in
- * wlc->pub->radio_disabled to go ON, always call radio_upd
- * synchronously to go OFF, postpone radio_upd to later when
- * context is safe(e.g. watchdog)
- */
- radio_state =
- (mboolisset(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE) ? OFF :
- ON);
- mpc_radio = (brcms_c_ismpc(wlc) == true) ? OFF : ON;
-
- if (radio_state == ON && mpc_radio == OFF)
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
- else if (radio_state == OFF && mpc_radio == ON) {
- mboolclr(wlc->pub->radio_disabled, WL_RADIO_MPC_DISABLE);
- brcms_c_radio_upd(wlc);
- if (wlc->mpc_offcnt < BRCMS_MPC_THRESHOLD)
- wlc->mpc_dlycnt = BRCMS_MPC_MAX_DELAYCNT;
- else
- wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
- }
- /*
- * Below logic is meant to capture the transition from mpc off
- * to mpc on for reasons other than wlc->mpc_delay_off keeping
- * the mpc off. In that case reset wlc->mpc_delay_off to
- * wlc->mpc_dlycnt, so that we restart the countdown of mpc_delay_off
- */
- if ((wlc->prev_non_delay_mpc == false) &&
- (brcms_c_is_non_delay_mpc(wlc) == true) && wlc->mpc_delay_off)
- wlc->mpc_delay_off = wlc->mpc_dlycnt;
-
- wlc->prev_non_delay_mpc = brcms_c_is_non_delay_mpc(wlc);
-}
-
/* common watchdog code */
static void brcms_c_watchdog(void *arg)
{
@@ -4481,21 +4283,7 @@ static void brcms_c_watchdog(void *arg)
/* increment second count */
wlc->pub->now++;
- /* delay radio disable */
- if (wlc->mpc_delay_off) {
- if (--wlc->mpc_delay_off == 0) {
- mboolset(wlc->pub->radio_disabled,
- WL_RADIO_MPC_DISABLE);
- if (wlc->mpc && brcms_c_ismpc(wlc))
- wlc->mpc_offcnt = 0;
- }
- }
-
- /* mpc sync */
- brcms_c_radio_mpc_upd(wlc);
- /* radio sync: sw/hw/mpc --> radio_disable/radio_enable */
brcms_c_radio_hwdisable_upd(wlc);
- brcms_c_radio_upd(wlc);
/* if radio is disable, driver may be down, quit here */
if (wlc->pub->radio_disabled)
return;
@@ -4599,9 +4387,6 @@ static void brcms_c_info_init(struct brcms_c_info *wlc, int unit)
/* WME QoS mode is Auto by default */
wlc->pub->_ampdu = AMPDU_AGG_HOST;
wlc->pub->bcmerror = 0;
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
}
static uint brcms_c_attach_module(struct brcms_c_info *wlc)
@@ -4647,21 +4432,21 @@ struct brcms_pub *brcms_c_pub(struct brcms_c_info *wlc)
* initialize software state for each core and band
* put the whole chip in reset(driver down state), no clock
*/
-static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
- uint unit, bool piomode, void __iomem *regsva,
- struct pci_dev *btparam)
+static int brcms_b_attach(struct brcms_c_info *wlc, struct bcma_device *core,
+ uint unit, bool piomode)
{
struct brcms_hardware *wlc_hw;
- struct d11regs __iomem *regs;
char *macaddr = NULL;
uint err = 0;
uint j;
bool wme = false;
struct shared_phy_params sha_params;
struct wiphy *wiphy = wlc->wiphy;
+ struct pci_dev *pcidev = core->bus->host_pci;
- BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit, vendor,
- device);
+ BCMMSG(wlc->wiphy, "wl%d: vendor 0x%x device 0x%x\n", unit,
+ pcidev->vendor,
+ pcidev->device);
wme = true;
@@ -4678,7 +4463,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
* Do the hardware portion of the attach. Also initialize software
* state that depends on the particular hardware we are running.
*/
- wlc_hw->sih = ai_attach(regsva, btparam);
+ wlc_hw->sih = ai_attach(core->bus);
if (wlc_hw->sih == NULL) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: si_attach failed\n",
unit);
@@ -4687,25 +4472,19 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
}
/* verify again the device is supported */
- if (!brcms_c_chipmatch(vendor, device)) {
+ if (!brcms_c_chipmatch(pcidev->vendor, pcidev->device)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported "
"vendor/device (0x%x/0x%x)\n",
- unit, vendor, device);
+ unit, pcidev->vendor, pcidev->device);
err = 12;
goto fail;
}
- wlc_hw->vendorid = vendor;
- wlc_hw->deviceid = device;
-
- /* set bar0 window to point at D11 core */
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
- wlc_hw->corerev = ai_corerev(wlc_hw->sih);
-
- regs = wlc_hw->regs;
+ wlc_hw->vendorid = pcidev->vendor;
+ wlc_hw->deviceid = pcidev->device;
- wlc->regs = wlc_hw->regs;
+ wlc_hw->d11core = core;
+ wlc_hw->corerev = core->id.rev;
/* validate chip, chiprev and corerev */
if (!brcms_c_isgoodchip(wlc_hw)) {
@@ -4740,8 +4519,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->boardrev = (u16) j;
if (!brcms_c_validboardtype(wlc_hw)) {
wiphy_err(wiphy, "wl%d: brcms_b_attach: Unsupported Broadcom "
- "board type (0x%x)" " or revision level (0x%x)\n",
- unit, wlc_hw->sih->boardtype, wlc_hw->boardrev);
+ "board type (0x%x)" " or revision level (0x%x)\n",
+ unit, ai_get_boardtype(wlc_hw->sih),
+ wlc_hw->boardrev);
err = 15;
goto fail;
}
@@ -4762,7 +4542,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
else
wlc_hw->_nbands = 1;
- if ((wlc_hw->sih->chip == BCM43225_CHIP_ID))
+ if ((ai_get_chip_id(wlc_hw->sih) == BCM43225_CHIP_ID))
wlc_hw->_nbands = 1;
/* BMAC_NOTE: remove init of pub values when brcms_c_attach()
@@ -4794,16 +4574,14 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
sha_params.corerev = wlc_hw->corerev;
sha_params.vid = wlc_hw->vendorid;
sha_params.did = wlc_hw->deviceid;
- sha_params.chip = wlc_hw->sih->chip;
- sha_params.chiprev = wlc_hw->sih->chiprev;
- sha_params.chippkg = wlc_hw->sih->chippkg;
+ sha_params.chip = ai_get_chip_id(wlc_hw->sih);
+ sha_params.chiprev = ai_get_chiprev(wlc_hw->sih);
+ sha_params.chippkg = ai_get_chippkg(wlc_hw->sih);
sha_params.sromrev = wlc_hw->sromrev;
- sha_params.boardtype = wlc_hw->sih->boardtype;
+ sha_params.boardtype = ai_get_boardtype(wlc_hw->sih);
sha_params.boardrev = wlc_hw->boardrev;
- sha_params.boardvendor = wlc_hw->sih->boardvendor;
sha_params.boardflags = wlc_hw->boardflags;
sha_params.boardflags2 = wlc_hw->boardflags2;
- sha_params.buscorerev = wlc_hw->sih->buscorerev;
/* alloc and save pointer to shared phy state area */
wlc_hw->phy_sh = wlc_phy_shared_attach(&sha_params);
@@ -4825,9 +4603,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
wlc_hw->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
wlc->band->bandunit = j;
wlc->band->bandtype = j ? BRCM_BAND_5G : BRCM_BAND_2G;
- wlc->core->coreidx = ai_coreidx(wlc_hw->sih);
+ wlc->core->coreidx = core->core_index;
- wlc_hw->machwcap = R_REG(&regs->machwcap);
+ wlc_hw->machwcap = bcma_read32(core, D11REGOFFS(machwcap));
wlc_hw->machwcap_backup = wlc_hw->machwcap;
/* init tx fifo size */
@@ -4836,7 +4614,7 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Get a phy for this band */
wlc_hw->band->pi =
- wlc_phy_attach(wlc_hw->phy_sh, regs,
+ wlc_phy_attach(wlc_hw->phy_sh, core,
wlc_hw->band->bandtype,
wlc->wiphy);
if (wlc_hw->band->pi == NULL) {
@@ -4910,10 +4688,6 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
/* Match driver "down" state */
ai_pci_down(wlc_hw->sih);
- /* register sb interrupt callback functions */
- ai_register_intr_callback(wlc_hw->sih, (void *)brcms_c_wlintrsoff,
- (void *)brcms_c_wlintrsrestore, NULL, wlc);
-
/* turn off pll and xtal to match driver "down" state */
brcms_b_xtal(wlc_hw, OFF);
@@ -4944,10 +4718,9 @@ static int brcms_b_attach(struct brcms_c_info *wlc, u16 vendor, u16 device,
goto fail;
}
- BCMMSG(wlc->wiphy,
- "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
- wlc_hw->deviceid, wlc_hw->_nbands,
- wlc_hw->sih->boardtype, macaddr);
+ BCMMSG(wlc->wiphy, "deviceid 0x%x nbands %d board 0x%x macaddr: %s\n",
+ wlc_hw->deviceid, wlc_hw->_nbands, ai_get_boardtype(wlc_hw->sih),
+ macaddr);
return err;
@@ -5185,7 +4958,6 @@ static int brcms_b_detach(struct brcms_c_info *wlc)
* and per-port interrupt object may has been freed. this must
* be done before sb core switch
*/
- ai_deregister_intr_callback(wlc_hw->sih);
ai_pci_sleep(wlc_hw->sih);
}
@@ -5259,9 +5031,6 @@ static void brcms_c_ap_upd(struct brcms_c_info *wlc)
{
/* STA-BSS; short capable */
wlc->PLCPHdr_override = BRCMS_PLCP_SHORT;
-
- /* fixup mpc */
- wlc->mpc = true;
}
/* Initialize just the hardware when coming out of POR or S3/S5 system states */
@@ -5283,13 +5052,11 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
ai_pci_fixcfg(wlc_hw->sih);
/*
+ * TODO: test suspend/resume
+ *
* AI chip doesn't restore bar0win2 on
* hibernation/resume, need sw fixup
*/
- if ((wlc_hw->sih->chip == BCM43224_CHIP_ID) ||
- (wlc_hw->sih->chip == BCM43225_CHIP_ID))
- wlc_hw->regs = (struct d11regs __iomem *)
- ai_setcore(wlc_hw->sih, D11_CORE_ID, 0);
/*
* Inform phy that a POR reset has occurred so
@@ -5301,7 +5068,7 @@ static void brcms_b_hw_up(struct brcms_hardware *wlc_hw)
wlc_hw->wlc->pub->hw_up = true;
if ((wlc_hw->boardflags & BFL_FEM)
- && (wlc_hw->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc_hw->sih) == BCM4313_CHIP_ID)) {
if (!
(wlc_hw->boardrev >= 0x1250
&& (wlc_hw->boardflags & BFL_FEM_BT)))
@@ -5376,7 +5143,7 @@ static void brcms_c_wme_retries_write(struct brcms_c_info *wlc)
if (!wlc->clk)
return;
- for (ac = 0; ac < AC_COUNT; ac++)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
brcms_b_write_shm(wlc->hw, M_AC_TXLMT_ADDR(ac),
wlc->wme_retries[ac]);
}
@@ -5396,7 +5163,7 @@ int brcms_c_up(struct brcms_c_info *wlc)
}
if ((wlc->pub->boardflags & BFL_FEM)
- && (wlc->pub->sih->chip == BCM4313_CHIP_ID)) {
+ && (ai_get_chip_id(wlc->hw->sih) == BCM4313_CHIP_ID)) {
if (wlc->pub->boardrev >= 0x1250
&& (wlc->pub->boardflags & BFL_FEM_BT))
brcms_b_mhf(wlc->hw, MHF5, MHF5_4313_GPIOCTRL,
@@ -5533,9 +5300,9 @@ static int brcms_b_down_finish(struct brcms_hardware *wlc_hw)
} else {
/* Reset and disable the core */
- if (ai_iscoreup(wlc_hw->sih)) {
- if (R_REG(&wlc_hw->regs->maccontrol) &
- MCTL_EN_MAC)
+ if (bcma_core_is_enabled(wlc_hw->d11core)) {
+ if (bcma_read32(wlc_hw->d11core,
+ D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
brcms_c_suspend_mac_and_wait(wlc_hw->wlc);
callbacks += brcms_reset(wlc_hw->wlc->wl);
brcms_c_coredisable(wlc_hw);
@@ -5575,7 +5342,6 @@ uint brcms_c_down(struct brcms_c_info *wlc)
if (!wlc->pub->up)
return callbacks;
- /* in between, mpc could try to bring down again.. */
wlc->going_down = true;
callbacks += brcms_b_bmac_down_prep(wlc->hw);
@@ -5852,7 +5618,7 @@ int brcms_c_set_rate_limit(struct brcms_c_info *wlc, u16 srl, u16 lrl)
brcms_b_retrylimit_upd(wlc->hw, wlc->SRL, wlc->LRL);
- for (ac = 0; ac < AC_COUNT; ac++) {
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
EDCF_SHORT, wlc->SRL);
wlc->wme_retries[ac] = SFIELD(wlc->wme_retries[ac],
@@ -6103,7 +5869,6 @@ void brcms_c_print_txdesc(struct d11txh *txh)
u8 *rtsph = txh->RTSPhyHeader;
struct ieee80211_rts rts = txh->rts_frame;
- char hexbuf[256];
/* add plcp header along with txh descriptor */
printk(KERN_DEBUG "Raw TxDesc + plcp header:\n");
@@ -6124,17 +5889,16 @@ void brcms_c_print_txdesc(struct d11txh *txh)
printk(KERN_DEBUG "XtraFrameTypes: %04x ", xtraft);
printk(KERN_DEBUG "\n");
- brcmu_format_hex(hexbuf, iv, sizeof(txh->IV));
- printk(KERN_DEBUG "SecIV: %s\n", hexbuf);
- brcmu_format_hex(hexbuf, ra, sizeof(txh->TxFrameRA));
- printk(KERN_DEBUG "RA: %s\n", hexbuf);
+ print_hex_dump_bytes("SecIV:", DUMP_PREFIX_OFFSET, iv, sizeof(txh->IV));
+ print_hex_dump_bytes("RA:", DUMP_PREFIX_OFFSET,
+ ra, sizeof(txh->TxFrameRA));
printk(KERN_DEBUG "Fb FES Time: %04x ", tfestfb);
- brcmu_format_hex(hexbuf, rtspfb, sizeof(txh->RTSPLCPFallback));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
+ print_hex_dump_bytes("Fb RTS PLCP:", DUMP_PREFIX_OFFSET,
+ rtspfb, sizeof(txh->RTSPLCPFallback));
printk(KERN_DEBUG "RTS DUR: %04x ", rtsdfb);
- brcmu_format_hex(hexbuf, fragpfb, sizeof(txh->FragPLCPFallback));
- printk(KERN_DEBUG "PLCP: %s ", hexbuf);
+ print_hex_dump_bytes("PLCP:", DUMP_PREFIX_OFFSET,
+ fragpfb, sizeof(txh->FragPLCPFallback));
printk(KERN_DEBUG "DUR: %04x", fragdfb);
printk(KERN_DEBUG "\n");
@@ -6149,18 +5913,18 @@ void brcms_c_print_txdesc(struct d11txh *txh)
printk(KERN_DEBUG "MaxAggbyte_fb: %04x\n", mabyte_f);
printk(KERN_DEBUG "MinByte: %04x\n", mmbyte);
- brcmu_format_hex(hexbuf, rtsph, sizeof(txh->RTSPhyHeader));
- printk(KERN_DEBUG "RTS PLCP: %s ", hexbuf);
- brcmu_format_hex(hexbuf, (u8 *) &rts, sizeof(txh->rts_frame));
- printk(KERN_DEBUG "RTS Frame: %s", hexbuf);
+ print_hex_dump_bytes("RTS PLCP:", DUMP_PREFIX_OFFSET,
+ rtsph, sizeof(txh->RTSPhyHeader));
+ print_hex_dump_bytes("RTS Frame:", DUMP_PREFIX_OFFSET,
+ (u8 *)&rts, sizeof(txh->rts_frame));
printk(KERN_DEBUG "\n");
}
#endif /* defined(BCMDBG) */
#if defined(BCMDBG)
-int
+static int
brcms_c_format_flags(const struct brcms_c_bit_desc *bd, u32 flags, char *buf,
- int len)
+ int len)
{
int i;
char *p = buf;
@@ -6916,7 +6680,7 @@ brcms_c_d11hdrs_mac80211(struct brcms_c_info *wlc, struct ieee80211_hw *hw,
qos = ieee80211_is_data_qos(h->frame_control);
/* compute length of frame in bytes for use in PLCP computations */
- len = brcmu_pkttotlen(p);
+ len = p->len;
phylen = len + FCS_LEN;
/* Get tx_info */
@@ -7695,11 +7459,11 @@ static void
brcms_b_read_tsf(struct brcms_hardware *wlc_hw, u32 *tsf_l_ptr,
u32 *tsf_h_ptr)
{
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
/* read the tsf timer low, then high to get an atomic read */
- *tsf_l_ptr = R_REG(&regs->tsf_timerlow);
- *tsf_h_ptr = R_REG(&regs->tsf_timerhigh);
+ *tsf_l_ptr = bcma_read32(core, D11REGOFFS(tsf_timerlow));
+ *tsf_h_ptr = bcma_read32(core, D11REGOFFS(tsf_timerhigh));
}
/*
@@ -8217,13 +7981,21 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
{
+ int timeout = 20;
+
/* flush packet queue when requested */
if (drop)
brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
/* wait for queue and DMA fifos to run dry */
- while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0)
+ while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) {
brcms_msleep(wlc->wl, 1);
+
+ if (--timeout == 0)
+ break;
+ }
+
+ WARN_ON_ONCE(timeout == 0);
}
void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
@@ -8253,12 +8025,6 @@ int brcms_c_get_tx_power(struct brcms_c_info *wlc)
return (int)(qdbm / BRCMS_TXPWR_DB_FACTOR);
}
-void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc)
-{
- wlc->mpc = mpc;
- brcms_c_radio_mpc_upd(wlc);
-}
-
/* Process received frames */
/*
* Return true if more frames need to be processed. false otherwise.
@@ -8293,14 +8059,8 @@ static void brcms_c_recv(struct brcms_c_info *wlc, struct sk_buff *p)
len = p->len;
if (rxh->RxStatus1 & RXS_FCSERR) {
- if (wlc->pub->mac80211_state & MAC80211_PROMISC_BCNS) {
- wiphy_err(wlc->wiphy, "FCSERR while scanning******* -"
- " tossing\n");
+ if (!(wlc->filter_flags & FIF_FCSFAIL))
goto toss;
- } else {
- wiphy_err(wlc->wiphy, "RCSERR!!!\n");
- goto toss;
- }
}
/* check received pkt has at least frame control field */
@@ -8328,21 +8088,17 @@ static bool
brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
{
struct sk_buff *p;
- struct sk_buff *head = NULL;
- struct sk_buff *tail = NULL;
+ struct sk_buff *next = NULL;
+ struct sk_buff_head recv_frames;
+
uint n = 0;
uint bound_limit = bound ? RXBND : -1;
BCMMSG(wlc_hw->wlc->wiphy, "wl%d\n", wlc_hw->unit);
- /* gather received frames */
- while ((p = dma_rx(wlc_hw->di[fifo]))) {
+ skb_queue_head_init(&recv_frames);
- if (!tail)
- head = tail = p;
- else {
- tail->prev = p;
- tail = p;
- }
+ /* gather received frames */
+ while (dma_rx(wlc_hw->di[fifo], &recv_frames)) {
/* !give others some time to run! */
if (++n >= bound_limit)
@@ -8353,12 +8109,11 @@ brcms_b_recv(struct brcms_hardware *wlc_hw, uint fifo, bool bound)
dma_rxfill(wlc_hw->di[fifo]);
/* process each frame */
- while ((p = head) != NULL) {
+ skb_queue_walk_safe(&recv_frames, p, next) {
struct d11rxhdr_le *rxh_le;
struct d11rxhdr *rxh;
- head = head->prev;
- p->prev = NULL;
+ skb_unlink(p, &recv_frames);
rxh_le = (struct d11rxhdr_le *)p->data;
rxh = (struct d11rxhdr *)p->data;
@@ -8389,7 +8144,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
{
u32 macintstatus;
struct brcms_hardware *wlc_hw = wlc->hw;
- struct d11regs __iomem *regs = wlc_hw->regs;
+ struct bcma_device *core = wlc_hw->d11core;
struct wiphy *wiphy = wlc->wiphy;
if (brcms_deviceremoved(wlc)) {
@@ -8425,7 +8180,7 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
/* ATIM window end */
if (macintstatus & MI_ATIMWINEND) {
BCMMSG(wlc->wiphy, "end of ATIM window\n");
- OR_REG(&regs->maccommand, wlc->qvalid);
+ bcma_set32(core, D11REGOFFS(maccommand), wlc->qvalid);
wlc->qvalid = 0;
}
@@ -8443,18 +8198,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
if (macintstatus & MI_GP0) {
wiphy_err(wiphy, "wl%d: PSM microcode watchdog fired at %d "
- "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
+ "(seconds). Resetting.\n", wlc_hw->unit, wlc_hw->now);
printk_once("%s : PSM Watchdog, chipid 0x%x, chiprev 0x%x\n",
- __func__, wlc_hw->sih->chip,
- wlc_hw->sih->chiprev);
- /* big hammer */
- brcms_init(wlc->wl);
+ __func__, ai_get_chip_id(wlc_hw->sih),
+ ai_get_chiprev(wlc_hw->sih));
+ brcms_fatal_error(wlc_hw->wlc->wl);
}
/* gptimer timeout */
if (macintstatus & MI_TO)
- W_REG(&regs->gptimer, 0);
+ bcma_write32(core, D11REGOFFS(gptimer), 0);
if (macintstatus & MI_RFDISABLE) {
BCMMSG(wlc->wiphy, "wl%d: BMAC Detected a change on the"
@@ -8470,20 +8224,17 @@ bool brcms_c_dpc(struct brcms_c_info *wlc, bool bounded)
return wlc->macintstatus != 0;
fatal:
- brcms_init(wlc->wl);
+ brcms_fatal_error(wlc_hw->wlc->wl);
return wlc->macintstatus != 0;
}
-void brcms_c_init(struct brcms_c_info *wlc)
+void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx)
{
- struct d11regs __iomem *regs;
+ struct bcma_device *core = wlc->hw->d11core;
u16 chanspec;
- bool mute = false;
BCMMSG(wlc->wiphy, "wl%d\n", wlc->pub->unit);
- regs = wlc->regs;
-
/*
* This will happen if a big-hammer was executed. In
* that case, we want to go back to the channel that
@@ -8494,7 +8245,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
else
chanspec = brcms_c_init_chanspec(wlc);
- brcms_b_init(wlc->hw, chanspec, mute);
+ brcms_b_init(wlc->hw, chanspec);
/* update beacon listen interval */
brcms_c_bcn_li_upd(wlc);
@@ -8513,8 +8264,8 @@ void brcms_c_init(struct brcms_c_info *wlc)
* update since init path would reset
* to default value
*/
- W_REG(&regs->tsf_cfprep,
- (bi << CFPREP_CBI_SHIFT));
+ bcma_write32(core, D11REGOFFS(tsf_cfprep),
+ bi << CFPREP_CBI_SHIFT);
/* Update maccontrol PM related bits */
brcms_c_set_ps_ctrl(wlc);
@@ -8544,7 +8295,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
brcms_c_bsinit(wlc);
/* Enable EDCF mode (while the MAC is suspended) */
- OR_REG(&regs->ifs_ctl, IFS_USEEDCF);
+ bcma_set16(core, D11REGOFFS(ifs_ctl), IFS_USEEDCF);
brcms_c_edcf_setparams(wlc, false);
/* Init precedence maps for empty FIFOs */
@@ -8560,14 +8311,15 @@ void brcms_c_init(struct brcms_c_info *wlc)
/* ..now really unleash hell (allow the MAC out of suspend) */
brcms_c_enable_mac(wlc);
+ /* suspend the tx fifos and mute the phy for preism cac time */
+ if (mute_tx)
+ brcms_b_mute(wlc->hw, true);
+
/* clear tx flow control */
brcms_c_txflowcontrol_reset(wlc);
/* enable the RF Disable Delay timer */
- W_REG(&wlc->regs->rfdisabledly, RFDISABLE_DEFAULT);
-
- /* initialize mpc delay */
- wlc->mpc_delay_off = wlc->mpc_dlycnt = BRCMS_MPC_MIN_DELAYCNT;
+ bcma_write32(core, D11REGOFFS(rfdisabledly), RFDISABLE_DEFAULT);
/*
* Initialize WME parameters; if they haven't been set by some other
@@ -8577,7 +8329,7 @@ void brcms_c_init(struct brcms_c_info *wlc)
/* Uninitialized; read from HW */
int ac;
- for (ac = 0; ac < AC_COUNT; ac++)
+ for (ac = 0; ac < IEEE80211_NUM_ACS; ac++)
wlc->wme_retries[ac] =
brcms_b_read_shm(wlc->hw, M_AC_TXLMT_ADDR(ac));
}
@@ -8587,9 +8339,8 @@ void brcms_c_init(struct brcms_c_info *wlc)
* The common driver entry routine. Error codes should be unique
*/
struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr)
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr)
{
struct brcms_c_info *wlc;
uint err = 0;
@@ -8597,7 +8348,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
struct brcms_pub *pub;
/* allocate struct brcms_c_info state and its substructures */
- wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, device);
+ wlc = (struct brcms_c_info *) brcms_c_attach_malloc(unit, &err, 0);
if (wlc == NULL)
goto fail;
wlc->wiphy = wl->wiphy;
@@ -8624,8 +8375,7 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
* low level attach steps(all hw accesses go
* inside, no more in rest of the attach)
*/
- err = brcms_b_attach(wlc, vendor, device, unit, piomode, regsva,
- btparam);
+ err = brcms_b_attach(wlc, core, unit, piomode);
if (err)
goto fail;
@@ -8754,8 +8504,6 @@ brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
brcms_c_ht_update_sgi_rx(wlc, 0);
}
- /* initialize radio_mpc_disable according to wlc->mpc */
- brcms_c_radio_mpc_upd(wlc);
brcms_b_antsel_set(wlc->hw, wlc->asi->antsel_avail);
if (perr)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.h b/drivers/net/wireless/brcm80211/brcmsmac/main.h
index c0e0fcf..adb136e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.h
@@ -44,8 +44,6 @@
/* transmit buffer max headroom for protocol headers */
#define TXOFF (D11_TXH_LEN + D11_PHY_HDR_LEN)
-#define AC_COUNT 4
-
/* Macros for doing definition and get/set of bitfields
* Usage example, e.g. a three-bit field (bits 4-6):
* #define <NAME>_M BITFIELD_MASK(3)
@@ -336,7 +334,7 @@ struct brcms_hardware {
u32 machwcap_backup; /* backup of machwcap */
struct si_pub *sih; /* SI handle (cookie for siutils calls) */
- struct d11regs __iomem *regs; /* pointer to device registers */
+ struct bcma_device *d11core; /* pointer to 802.11 core */
struct phy_shim_info *physhim; /* phy shim layer handler */
struct shared_phy *phy_sh; /* pointer to shared phy state */
struct brcms_hw_band *band;/* pointer to active per-band state */
@@ -402,7 +400,6 @@ struct brcms_txq_info {
*
* pub: pointer to driver public state.
* wl: pointer to specific private state.
- * regs: pointer to device registers.
* hw: HW related state.
* clkreq_override: setting for clkreq for PCIE : Auto, 0, 1.
* fastpwrup_dly: time in us needed to bring up d11 fast clock.
@@ -427,11 +424,6 @@ struct brcms_txq_info {
* bandinit_pending: track band init in auto band.
* radio_monitor: radio timer is running.
* going_down: down path intermediate variable.
- * mpc: enable minimum power consumption.
- * mpc_dlycnt: # of watchdog cnt before turn disable radio.
- * mpc_offcnt: # of watchdog cnt that radio is disabled.
- * mpc_delay_off: delay radio disable by # of watchdog cnt.
- * prev_non_delay_mpc: prev state brcms_c_is_non_delay_mpc.
* wdtimer: timer for watchdog routine.
* radio_timer: timer for hw radio button monitor routine.
* monitor: monitor (MPDU sniffing) mode.
@@ -441,7 +433,7 @@ struct brcms_txq_info {
* bcn_li_dtim: beacon listen interval in # dtims.
* WDarmed: watchdog timer is armed.
* WDlast: last time wlc_watchdog() was called.
- * edcf_txop[AC_COUNT]: current txop for each ac.
+ * edcf_txop[IEEE80211_NUM_ACS]: current txop for each ac.
* wme_retries: per-AC retry limits.
* tx_prec_map: Precedence map based on HW FIFO space.
* fifo2prec_map[NFIFO]: pointer to fifo2_prec map based on WME.
@@ -484,7 +476,6 @@ struct brcms_txq_info {
struct brcms_c_info {
struct brcms_pub *pub;
struct brcms_info *wl;
- struct d11regs __iomem *regs;
struct brcms_hardware *hw;
/* clock */
@@ -522,18 +513,11 @@ struct brcms_c_info {
bool radio_monitor;
bool going_down;
- bool mpc;
- u8 mpc_dlycnt;
- u8 mpc_offcnt;
- u8 mpc_delay_off;
- u8 prev_non_delay_mpc;
-
struct brcms_timer *wdtimer;
struct brcms_timer *radio_timer;
/* promiscuous */
- bool monitor;
- bool bcnmisc_monitor;
+ uint filter_flags;
/* driver feature */
bool _rifs;
@@ -546,9 +530,9 @@ struct brcms_c_info {
u32 WDlast;
/* WME */
- u16 edcf_txop[AC_COUNT];
+ u16 edcf_txop[IEEE80211_NUM_ACS];
- u16 wme_retries[AC_COUNT];
+ u16 wme_retries[IEEE80211_NUM_ACS];
u16 tx_prec_map;
u16 fifo2prec_map[NFIFO];
@@ -671,8 +655,7 @@ extern void brcms_c_print_txdesc(struct d11txh *txh);
#endif
extern int brcms_c_set_gmode(struct brcms_c_info *wlc, u8 gmode, bool config);
-extern void brcms_c_mac_bcn_promisc_change(struct brcms_c_info *wlc,
- bool promisc);
+extern void brcms_c_mac_promisc(struct brcms_c_info *wlc, uint filter_flags);
extern void brcms_c_send_q(struct brcms_c_info *wlc);
extern int brcms_c_prep_pdu(struct brcms_c_info *wlc, struct sk_buff *pdu,
uint *fifo);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
index 0bcb267..7fad6dc 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.c
@@ -139,6 +139,9 @@
#define SRSH_PI_MASK 0xf000 /* bit 15:12 */
#define SRSH_PI_SHIFT 12 /* bit 15:12 */
+#define PCIREGOFFS(field) offsetof(struct sbpciregs, field)
+#define PCIEREGOFFS(field) offsetof(struct sbpcieregs, field)
+
/* Sonics side: PCI core and host control registers */
struct sbpciregs {
u32 control; /* PCI control */
@@ -205,11 +208,7 @@ struct sbpcieregs {
};
struct pcicore_info {
- union {
- struct sbpcieregs __iomem *pcieregs;
- struct sbpciregs __iomem *pciregs;
- } regs; /* Memory mapped register to the core */
-
+ struct bcma_device *core;
struct si_pub *sih; /* System interconnect handle */
struct pci_dev *dev;
u8 pciecap_lcreg_offset;/* PCIE capability LCreg offset
@@ -224,9 +223,9 @@ struct pcicore_info {
};
#define PCIE_ASPM(sih) \
- (((sih)->buscoretype == PCIE_CORE_ID) && \
- (((sih)->buscorerev >= 3) && \
- ((sih)->buscorerev <= 5)))
+ ((ai_get_buscoretype(sih) == PCIE_CORE_ID) && \
+ ((ai_get_buscorerev(sih) >= 3) && \
+ (ai_get_buscorerev(sih) <= 5)))
/* delay needed between the mdio control/ mdiodata register data access */
@@ -238,8 +237,7 @@ static void pr28829_delay(void)
/* Initialize the PCI core.
* It's caller's responsibility to make sure that this is done only once
*/
-struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
- void __iomem *regs)
+struct pcicore_info *pcicore_init(struct si_pub *sih, struct bcma_device *core)
{
struct pcicore_info *pi;
@@ -249,17 +247,15 @@ struct pcicore_info *pcicore_init(struct si_pub *sih, struct pci_dev *pdev,
return NULL;
pi->sih = sih;
- pi->dev = pdev;
+ pi->dev = core->bus->host_pci;
+ pi->core = core;
- if (sih->buscoretype == PCIE_CORE_ID) {
+ if (core->id.id == PCIE_CORE_ID) {
u8 cap_ptr;
- pi->regs.pcieregs = regs;
cap_ptr = pcicore_find_pci_capability(pi->dev, PCI_CAP_ID_EXP,
NULL, NULL);
pi->pciecap_lcreg_offset = cap_ptr + PCIE_CAP_LINKCTRL_OFFSET;
- } else
- pi->regs.pciregs = regs;
-
+ }
return pi;
}
@@ -334,37 +330,37 @@ end:
/* ***** Register Access API */
static uint
-pcie_readreg(struct sbpcieregs __iomem *pcieregs, uint addrtype, uint offset)
+pcie_readreg(struct bcma_device *core, uint addrtype, uint offset)
{
uint retval = 0xFFFFFFFF;
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG(&pcieregs->configaddr, offset);
- (void)R_REG((&pcieregs->configaddr));
- retval = R_REG(&pcieregs->configdata);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(configaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(configdata));
break;
case PCIE_PCIEREGS:
- W_REG(&pcieregs->pcieindaddr, offset);
- (void)R_REG(&pcieregs->pcieindaddr);
- retval = R_REG(&pcieregs->pcieinddata);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ (void)bcma_read32(core, PCIEREGOFFS(pcieindaddr));
+ retval = bcma_read32(core, PCIEREGOFFS(pcieinddata));
break;
}
return retval;
}
-static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
+static uint pcie_writereg(struct bcma_device *core, uint addrtype,
uint offset, uint val)
{
switch (addrtype) {
case PCIE_CONFIGREGS:
- W_REG((&pcieregs->configaddr), offset);
- W_REG((&pcieregs->configdata), val);
+ bcma_write32(core, PCIEREGOFFS(configaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(configdata), val);
break;
case PCIE_PCIEREGS:
- W_REG((&pcieregs->pcieindaddr), offset);
- W_REG((&pcieregs->pcieinddata), val);
+ bcma_write32(core, PCIEREGOFFS(pcieindaddr), offset);
+ bcma_write32(core, PCIEREGOFFS(pcieinddata), val);
break;
default:
break;
@@ -374,7 +370,6 @@ static uint pcie_writereg(struct sbpcieregs __iomem *pcieregs, uint addrtype,
static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata, i = 0;
uint pcie_serdes_spinwait = 200;
@@ -382,12 +377,13 @@ static bool pcie_mdiosetblock(struct pcicore_info *pi, uint blk)
(MDIODATA_DEV_ADDR << MDIODATA_DEVADDR_SHF) |
(MDIODATA_BLK_ADDR << MDIODATA_REGADDR_SHF) |
(blk << 4));
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE)
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE)
break;
udelay(1000);
@@ -404,15 +400,15 @@ static int
pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
uint *val)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
uint mdiodata;
uint i = 0;
uint pcie_serdes_spinwait = 10;
/* enable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol),
+ MDIOCTL_PREAM_EN | MDIOCTL_DIVISOR_VAL);
- if (pi->sih->buscorerev >= 10) {
+ if (ai_get_buscorerev(pi->sih) >= 10) {
/* new serdes is slower in rw,
* using two layers of reg address mapping
*/
@@ -432,20 +428,22 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
mdiodata |= (MDIODATA_START | MDIODATA_WRITE | MDIODATA_TA |
*val);
- W_REG(&pcieregs->mdiodata, mdiodata);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiodata), mdiodata);
pr28829_delay();
/* retry till the transaction is complete */
while (i < pcie_serdes_spinwait) {
- if (R_REG(&pcieregs->mdiocontrol) & MDIOCTL_ACCESS_DONE) {
+ if (bcma_read32(pi->core, PCIEREGOFFS(mdiocontrol)) &
+ MDIOCTL_ACCESS_DONE) {
if (!write) {
pr28829_delay();
- *val = (R_REG(&pcieregs->mdiodata) &
+ *val = (bcma_read32(pi->core,
+ PCIEREGOFFS(mdiodata)) &
MDIODATA_MASK);
}
/* Disable mdio access to SERDES */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 0;
}
udelay(1000);
@@ -453,7 +451,7 @@ pcie_mdioop(struct pcicore_info *pi, uint physmedia, uint regaddr, bool write,
}
/* Timed out. Disable mdio access to SERDES. */
- W_REG(&pcieregs->mdiocontrol, 0);
+ bcma_write32(pi->core, PCIEREGOFFS(mdiocontrol), 0);
return 1;
}
@@ -502,18 +500,18 @@ static void pcie_extendL1timer(struct pcicore_info *pi, bool extend)
{
u32 w;
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- if (sih->buscoretype != PCIE_CORE_ID || sih->buscorerev < 7)
+ if (ai_get_buscoretype(sih) != PCIE_CORE_ID ||
+ ai_get_buscorerev(sih) < 7)
return;
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
if (extend)
w |= PCIE_ASPMTIMER_EXTEND;
else
w &= ~PCIE_ASPMTIMER_EXTEND;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG, w);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_PMTHRESHREG);
}
/* centralized clkreq control policy */
@@ -527,25 +525,27 @@ static void pcie_clkreq_upd(struct pcicore_info *pi, uint state)
pcie_clkreq(pi, 1, 0);
break;
case SI_PCIDOWN:
- if (sih->buscorerev == 6) { /* turn on serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0);
+ /* turn on serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0);
} else if (pi->pcie_pr42767) {
pcie_clkreq(pi, 1, 1);
}
break;
case SI_PCIUP:
- if (sih->buscorerev == 6) { /* turn off serdes PLL down */
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_addr),
- ~0, 0);
- ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data),
- ~0x40, 0x40);
+ /* turn off serdes PLL down */
+ if (ai_get_buscorerev(sih) == 6) {
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_addr),
+ ~0, 0);
+ ai_cc_reg(sih,
+ offsetof(struct chipcregs, chipcontrol_data),
+ ~0x40, 0x40);
} else if (PCIE_ASPM(sih)) { /* disable clkreq */
pcie_clkreq(pi, 1, 0);
}
@@ -562,7 +562,7 @@ static void pcie_war_polarity(struct pcicore_info *pi)
if (pi->pcie_polarity != 0)
return;
- w = pcie_readreg(pi->regs.pcieregs, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_PLP_STATUSREG);
/* Detect the current polarity at attach and force that polarity and
* disable changing the polarity
@@ -581,18 +581,15 @@ static void pcie_war_polarity(struct pcicore_info *pi)
*/
static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
struct si_pub *sih = pi->sih;
u16 val16;
- u16 __iomem *reg16;
u32 w;
if (!PCIE_ASPM(sih))
return;
/* bypass this on QT or VSIM */
- reg16 = &pcieregs->sprom[SRSH_ASPM_OFFSET];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]));
val16 &= ~SRSH_ASPM_ENB;
if (pi->pcie_war_aspm_ovr == PCIE_ASPM_ENAB)
@@ -602,15 +599,15 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
else if (pi->pcie_war_aspm_ovr == PCIE_ASPM_L0s_ENAB)
val16 |= SRSH_ASPM_L0s_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_ASPM_OFFSET]), val16);
pci_read_config_dword(pi->dev, pi->pciecap_lcreg_offset, &w);
w &= ~PCIE_ASPM_ENAB;
w |= pi->pcie_war_aspm_ovr;
pci_write_config_dword(pi->dev, pi->pciecap_lcreg_offset, w);
- reg16 = &pcieregs->sprom[SRSH_CLKREQ_OFFSET_REV5];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]));
if (pi->pcie_war_aspm_ovr != PCIE_ASPM_DISAB) {
val16 |= SRSH_CLKREQ_ENB;
@@ -618,7 +615,8 @@ static void pcie_war_aspm_clkreq(struct pcicore_info *pi)
} else
val16 &= ~SRSH_CLKREQ_ENB;
- W_REG(reg16, val16);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_CLKREQ_OFFSET_REV5]),
+ val16);
}
/* Apply the polarity determined at the start */
@@ -642,16 +640,15 @@ static void pcie_war_serdes(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_misc_config_fixup(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u16 val16;
- u16 __iomem *reg16;
- reg16 = &pcieregs->sprom[SRSH_PCIE_MISC_CONFIG];
- val16 = R_REG(reg16);
+ val16 = bcma_read16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]));
if ((val16 & SRSH_L23READY_EXIT_NOPERST) == 0) {
val16 |= SRSH_L23READY_EXIT_NOPERST;
- W_REG(reg16, val16);
+ bcma_write16(pi->core,
+ PCIEREGOFFS(sprom[SRSH_PCIE_MISC_CONFIG]), val16);
}
}
@@ -659,62 +656,57 @@ static void pcie_misc_config_fixup(struct pcicore_info *pi)
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_noplldown(struct pcicore_info *pi)
{
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
- u16 __iomem *reg16;
-
/* turn off serdes PLL down */
- ai_corereg(pi->sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol),
- CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
+ ai_cc_reg(pi->sih, offsetof(struct chipcregs, chipcontrol),
+ CHIPCTRL_4321_PLL_DOWN, CHIPCTRL_4321_PLL_DOWN);
/* clear srom shadow backdoor */
- reg16 = &pcieregs->sprom[SRSH_BD_OFFSET];
- W_REG(reg16, 0);
+ bcma_write16(pi->core, PCIEREGOFFS(sprom[SRSH_BD_OFFSET]), 0);
}
/* Needs to happen when coming out of 'standby'/'hibernate' */
static void pcie_war_pci_setup(struct pcicore_info *pi)
{
struct si_pub *sih = pi->sih;
- struct sbpcieregs __iomem *pcieregs = pi->regs.pcieregs;
u32 w;
- if (sih->buscorerev == 0 || sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ if (ai_get_buscorerev(sih) == 0 || ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG);
w |= 0x8;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_TLP_WORKAROUNDSREG, w);
}
- if (sih->buscorerev == 1) {
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
+ if (ai_get_buscorerev(sih) == 1) {
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG);
w |= 0x40;
- pcie_writereg(pcieregs, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
+ pcie_writereg(pi->core, PCIE_PCIEREGS, PCIE_DLLP_LCREG, w);
}
- if (sih->buscorerev == 0) {
+ if (ai_get_buscorerev(sih) == 0) {
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_TIMER1, 0x8128);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDR, 0x0100);
pcie_mdiowrite(pi, MDIODATA_DEV_RX, SERDES_RX_CDRBW, 0x1466);
} else if (PCIE_ASPM(sih)) {
/* Change the L1 threshold for better performance */
- w = pcie_readreg(pcieregs, PCIE_PCIEREGS,
+ w = pcie_readreg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG);
w &= ~PCIE_L1THRESHOLDTIME_MASK;
w |= PCIE_L1THRESHOLD_WARVAL << PCIE_L1THRESHOLDTIME_SHIFT;
- pcie_writereg(pcieregs, PCIE_PCIEREGS,
+ pcie_writereg(pi->core, PCIE_PCIEREGS,
PCIE_DLLP_PMTHRESHREG, w);
pcie_war_serdes(pi);
pcie_war_aspm_clkreq(pi);
- } else if (pi->sih->buscorerev == 7)
+ } else if (ai_get_buscorerev(pi->sih) == 7)
pcie_war_noplldown(pi);
/* Note that the fix is actually in the SROM,
* that's why this is open-ended
*/
- if (pi->sih->buscorerev >= 6)
+ if (ai_get_buscorerev(pi->sih) >= 6)
pcie_misc_config_fixup(pi);
}
@@ -745,7 +737,7 @@ void pcicore_attach(struct pcicore_info *pi, int state)
void pcicore_hwup(struct pcicore_info *pi)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_war_pci_setup(pi);
@@ -753,7 +745,7 @@ void pcicore_hwup(struct pcicore_info *pi)
void pcicore_up(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
/* Restore L1 timer for better performance */
@@ -781,7 +773,7 @@ void pcicore_sleep(struct pcicore_info *pi)
void pcicore_down(struct pcicore_info *pi, int state)
{
- if (!pi || pi->sih->buscoretype != PCIE_CORE_ID)
+ if (!pi || ai_get_buscoretype(pi->sih) != PCIE_CORE_ID)
return;
pcie_clkreq_upd(pi, state);
@@ -790,46 +782,45 @@ void pcicore_down(struct pcicore_info *pi, int state)
pcie_extendL1timer(pi, false);
}
-/* precondition: current core is sii->buscoretype */
-static void pcicore_fixcfg(struct pcicore_info *pi, u16 __iomem *reg16)
+void pcicore_fixcfg(struct pcicore_info *pi)
{
- struct si_info *sii = (struct si_info *)(pi->sih);
+ struct bcma_device *core = pi->core;
u16 val16;
- uint pciidx;
+ uint regoff;
- pciidx = ai_coreidx(&sii->pub);
- val16 = R_REG(reg16);
- if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) != (u16)pciidx) {
- val16 = (u16)(pciidx << SRSH_PI_SHIFT) |
- (val16 & ~SRSH_PI_MASK);
- W_REG(reg16, val16);
- }
-}
+ switch (pi->core->id.id) {
+ case BCMA_CORE_PCI:
+ regoff = PCIREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void
-pcicore_fixcfg_pci(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
-{
- pcicore_fixcfg(pi, &pciregs->sprom[SRSH_PI_OFFSET]);
-}
+ case BCMA_CORE_PCIE:
+ regoff = PCIEREGOFFS(sprom[SRSH_PI_OFFSET]);
+ break;
-void pcicore_fixcfg_pcie(struct pcicore_info *pi,
- struct sbpcieregs __iomem *pcieregs)
-{
- pcicore_fixcfg(pi, &pcieregs->sprom[SRSH_PI_OFFSET]);
+ default:
+ return;
+ }
+
+ val16 = bcma_read16(pi->core, regoff);
+ if (((val16 & SRSH_PI_MASK) >> SRSH_PI_SHIFT) !=
+ (u16)core->core_index) {
+ val16 = ((u16)core->core_index << SRSH_PI_SHIFT) |
+ (val16 & ~SRSH_PI_MASK);
+ bcma_write16(pi->core, regoff, val16);
+ }
}
/* precondition: current core is pci core */
void
-pcicore_pci_setup(struct pcicore_info *pi, struct sbpciregs __iomem *pciregs)
+pcicore_pci_setup(struct pcicore_info *pi)
{
- u32 w;
-
- OR_REG(&pciregs->sbtopci2, SBTOPCI_PREF | SBTOPCI_BURST);
-
- if (((struct si_info *)(pi->sih))->pub.buscorerev >= 11) {
- OR_REG(&pciregs->sbtopci2, SBTOPCI_RC_READMULTI);
- w = R_REG(&pciregs->clkrun);
- W_REG(&pciregs->clkrun, w | PCI_CLKRUN_DSBL);
- w = R_REG(&pciregs->clkrun);
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_PREF | SBTOPCI_BURST);
+
+ if (pi->core->id.rev >= 11) {
+ bcma_set32(pi->core, PCIREGOFFS(sbtopci2),
+ SBTOPCI_RC_READMULTI);
+ bcma_set32(pi->core, PCIREGOFFS(clkrun), PCI_CLKRUN_DSBL);
+ (void)bcma_read32(pi->core, PCIREGOFFS(clkrun));
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
index 58aa80d..9fc3ead 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/nicpci.h
@@ -62,8 +62,7 @@ struct sbpciregs;
struct sbpcieregs;
extern struct pcicore_info *pcicore_init(struct si_pub *sih,
- struct pci_dev *pdev,
- void __iomem *regs);
+ struct bcma_device *core);
extern void pcicore_deinit(struct pcicore_info *pch);
extern void pcicore_attach(struct pcicore_info *pch, int state);
extern void pcicore_hwup(struct pcicore_info *pch);
@@ -72,11 +71,7 @@ extern void pcicore_sleep(struct pcicore_info *pch);
extern void pcicore_down(struct pcicore_info *pch, int state);
extern u8 pcicore_find_pci_capability(struct pci_dev *dev, u8 req_cap_id,
unsigned char *buf, u32 *buflen);
-extern void pcicore_fixcfg_pci(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
-extern void pcicore_fixcfg_pcie(struct pcicore_info *pch,
- struct sbpcieregs __iomem *pciregs);
-extern void pcicore_pci_setup(struct pcicore_info *pch,
- struct sbpciregs __iomem *pciregs);
+extern void pcicore_fixcfg(struct pcicore_info *pch);
+extern void pcicore_pci_setup(struct pcicore_info *pch);
#endif /* _BRCM_NICPCI_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/otp.c b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
index edf5515..f1ca126 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/otp.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/otp.c
@@ -77,7 +77,7 @@ struct otp_fn_s {
};
struct otpinfo {
- uint ccrev; /* chipc revision */
+ struct bcma_device *core; /* chipc core */
const struct otp_fn_s *fn; /* OTP functions */
struct si_pub *sih; /* Saved sb handle */
@@ -133,9 +133,10 @@ struct otpinfo {
#define OTP_SZ_FU_144 (144/8) /* 144 bits */
static u16
-ipxotp_otpr(struct otpinfo *oi, struct chipcregs __iomem *cc, uint wn)
+ipxotp_otpr(struct otpinfo *oi, uint wn)
{
- return R_REG(&cc->sromotp[wn]);
+ return bcma_read16(oi->core,
+ CHIPCREGOFFS(sromotp[wn]));
}
/*
@@ -146,7 +147,7 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
{
int ret = 0;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
ret = osizew * 2 - OTP_SZ_FU_72 - OTP_SZ_CHECKSUM;
@@ -161,19 +162,21 @@ static int ipxotp_max_rgnsz(struct si_pub *sih, int osizew)
return ret;
}
-static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
+static void _ipxotp_init(struct otpinfo *oi)
{
uint k;
u32 otpp, st;
+ int ccrev = ai_get_ccrev(oi->sih);
+
/*
* record word offset of General Use Region
* for various chipcommon revs
*/
- if (oi->sih->ccrev == 21 || oi->sih->ccrev == 24
- || oi->sih->ccrev == 27) {
+ if (ccrev == 21 || ccrev == 24
+ || ccrev == 27) {
oi->otpgu_base = REVA4_OTPGU_BASE;
- } else if (oi->sih->ccrev == 36) {
+ } else if (ccrev == 36) {
/*
* OTP size greater than equal to 2KB (128 words),
* otpgu_base is similar to rev23
@@ -182,7 +185,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->otpgu_base = REVB8_OTPGU_BASE;
else
oi->otpgu_base = REV36_OTPGU_BASE;
- } else if (oi->sih->ccrev == 23 || oi->sih->ccrev >= 25) {
+ } else if (ccrev == 23 || ccrev >= 25) {
oi->otpgu_base = REVB8_OTPGU_BASE;
}
@@ -190,24 +193,21 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
otpp =
OTPP_START_BUSY | ((OTPPOC_INIT << OTPP_OC_SHIFT) & OTPP_OC_MASK);
- W_REG(&cc->otpprog, otpp);
- for (k = 0;
- ((st = R_REG(&cc->otpprog)) & OTPP_START_BUSY)
- && (k < OTPP_TRIES); k++)
- ;
+ bcma_write32(oi->core, CHIPCREGOFFS(otpprog), otpp);
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
+ for (k = 0; (st & OTPP_START_BUSY) && (k < OTPP_TRIES); k++)
+ st = bcma_read32(oi->core, CHIPCREGOFFS(otpprog));
if (k >= OTPP_TRIES)
return;
/* Read OTP lock bits and subregion programmed indication bits */
- oi->status = R_REG(&cc->otpstatus);
+ oi->status = bcma_read32(oi->core, CHIPCREGOFFS(otpstatus));
- if ((oi->sih->chip == BCM43224_CHIP_ID)
- || (oi->sih->chip == BCM43225_CHIP_ID)) {
+ if ((ai_get_chip_id(oi->sih) == BCM43224_CHIP_ID)
+ || (ai_get_chip_id(oi->sih) == BCM43225_CHIP_ID)) {
u32 p_bits;
- p_bits =
- (ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_P_OFF) &
- OTPGU_P_MSK)
- >> OTPGU_P_SHIFT;
+ p_bits = (ipxotp_otpr(oi, oi->otpgu_base + OTPGU_P_OFF) &
+ OTPGU_P_MSK) >> OTPGU_P_SHIFT;
oi->status |= (p_bits << OTPS_GUP_SHIFT);
}
@@ -220,7 +220,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
oi->hwlim = oi->wsize;
if (oi->status & OTPS_GUP_HW) {
oi->hwlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_HSB_OFF) / 16;
oi->swbase = oi->hwlim;
} else
oi->swbase = oi->hwbase;
@@ -230,7 +230,7 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
if (oi->status & OTPS_GUP_SW) {
oi->swlim =
- ipxotp_otpr(oi, cc, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
+ ipxotp_otpr(oi, oi->otpgu_base + OTPGU_SFB_OFF) / 16;
oi->fbase = oi->swlim;
} else
oi->fbase = oi->swbase;
@@ -240,11 +240,8 @@ static void _ipxotp_init(struct otpinfo *oi, struct chipcregs __iomem *cc)
static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
{
- uint idx;
- struct chipcregs __iomem *cc;
-
/* Make sure we're running IPX OTP */
- if (!OTPTYPE_IPX(sih->ccrev))
+ if (!OTPTYPE_IPX(ai_get_ccrev(sih)))
return -EBADE;
/* Make sure OTP is not disabled */
@@ -252,7 +249,7 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
return -EBADE;
/* Check for otp size */
- switch ((sih->cccaps & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
+ switch ((ai_get_cccaps(sih) & CC_CAP_OTPSIZE) >> CC_CAP_OTPSIZE_SHIFT) {
case 0:
/* Nothing there */
return -EBADE;
@@ -282,21 +279,13 @@ static int ipxotp_init(struct si_pub *sih, struct otpinfo *oi)
}
/* Retrieve OTP region info */
- idx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- _ipxotp_init(oi, cc);
-
- ai_setcoreidx(sih, idx);
-
+ _ipxotp_init(oi);
return 0;
}
static int
ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
{
- uint idx;
- struct chipcregs __iomem *cc;
uint base, i, sz;
/* Validate region selection */
@@ -365,14 +354,10 @@ ipxotp_read_region(struct otpinfo *oi, int region, u16 *data, uint *wlen)
return -EINVAL;
}
- idx = ai_coreidx(oi->sih);
- cc = ai_setcoreidx(oi->sih, SI_CC_IDX);
-
/* Read the data */
for (i = 0; i < sz; i++)
- data[i] = ipxotp_otpr(oi, cc, base + i);
+ data[i] = ipxotp_otpr(oi, base + i);
- ai_setcoreidx(oi->sih, idx);
*wlen = sz;
return 0;
}
@@ -384,14 +369,13 @@ static const struct otp_fn_s ipxotp_fn = {
static int otp_init(struct si_pub *sih, struct otpinfo *oi)
{
-
int ret;
memset(oi, 0, sizeof(struct otpinfo));
- oi->ccrev = sih->ccrev;
+ oi->core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (OTPTYPE_IPX(oi->ccrev))
+ if (OTPTYPE_IPX(ai_get_ccrev(sih)))
oi->fn = &ipxotp_fn;
if (oi->fn == NULL)
@@ -399,7 +383,7 @@ static int otp_init(struct si_pub *sih, struct otpinfo *oi)
oi->sih = sih;
- ret = (oi->fn->init) (sih, oi);
+ ret = (oi->fn->init)(sih, oi);
return ret;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
index a314925..264f8c4 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_cmn.c
@@ -109,10 +109,10 @@ static const struct chan_info_basic chan_info_all[] = {
{204, 5020},
{208, 5040},
{212, 5060},
- {216, 50800}
+ {216, 5080}
};
-const u8 ofdm_rate_lookup[] = {
+static const u8 ofdm_rate_lookup[] = {
BRCM_RATE_48M,
BRCM_RATE_24M,
@@ -149,9 +149,8 @@ void wlc_radioreg_enter(struct brcms_phy_pub *pih)
void wlc_radioreg_exit(struct brcms_phy_pub *pih)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- u16 dummy;
- dummy = R_REG(&pi->regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->phy_wreg = 0;
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_LOCK_RADIO, 0);
}
@@ -186,19 +185,11 @@ u16 read_radio_reg(struct brcms_phy *pi, u16 addr)
if ((D11REV_GE(pi->sh->corerev, 24)) ||
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- data = R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
-
-#ifdef __ARM_ARCH_4T__
- __asm__(" .align 4 ");
- __asm__(" nop ");
- data = R_REG(&pi->regs->phy4wdatalo);
-#else
- data = R_REG(&pi->regs->phy4wdatalo);
-#endif
-
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ data = bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
}
pi->phy_wreg = 0;
@@ -211,15 +202,15 @@ void write_radio_reg(struct brcms_phy *pi, u16 addr, u16 val)
(D11REV_IS(pi->sh->corerev, 22)
&& (pi->pubpi.phy_type != PHY_TYPE_SSN))) {
- W_REG_FLUSH(&pi->regs->radioregaddr, addr);
- W_REG(&pi->regs->radioregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(radioregdata), val);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, addr);
- W_REG(&pi->regs->phy4wdatalo, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phy4wdatalo), val);
}
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
pi->phy_wreg = 0;
}
}
@@ -231,19 +222,20 @@ static u32 read_radio_id(struct brcms_phy *pi)
if (D11REV_GE(pi->sh->corerev, 24)) {
u32 b0, b1, b2;
- W_REG_FLUSH(&pi->regs->radioregaddr, 0);
- b0 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 1);
- b1 = (u32) R_REG(&pi->regs->radioregdata);
- W_REG_FLUSH(&pi->regs->radioregaddr, 2);
- b2 = (u32) R_REG(&pi->regs->radioregdata);
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 0);
+ b0 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 1);
+ b1 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
+ bcma_wflush16(pi->d11core, D11REGOFFS(radioregaddr), 2);
+ b2 = (u32) bcma_read16(pi->d11core, D11REGOFFS(radioregdata));
id = ((b0 & 0xf) << 28) | (((b2 << 8) | b1) << 12) | ((b0 >> 4)
& 0xf);
} else {
- W_REG_FLUSH(&pi->regs->phy4waddr, RADIO_IDCODE);
- id = (u32) R_REG(&pi->regs->phy4wdatalo);
- id |= (u32) R_REG(&pi->regs->phy4wdatahi) << 16;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phy4waddr), RADIO_IDCODE);
+ id = (u32) bcma_read16(pi->d11core, D11REGOFFS(phy4wdatalo));
+ id |= (u32) bcma_read16(pi->d11core,
+ D11REGOFFS(phy4wdatahi)) << 16;
}
pi->phy_wreg = 0;
return id;
@@ -283,75 +275,52 @@ void mod_radio_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
void write_phy_channel_reg(struct brcms_phy *pi, uint val)
{
- W_REG(&pi->regs->phychannel, val);
+ bcma_write16(pi->d11core, D11REGOFFS(phychannel), val);
}
u16 read_phy_reg(struct brcms_phy *pi, u16 addr)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
pi->phy_wreg = 0;
- return R_REG(&regs->phyregdata);
+ return bcma_read16(pi->d11core, D11REGOFFS(phyregdata));
}
void write_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
#ifdef CONFIG_BCM47XX
- W_REG_FLUSH(&regs->phyregaddr, addr);
- W_REG(&regs->phyregdata, val);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), val);
if (addr == 0x72)
- (void)R_REG(&regs->phyregdata);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
#else
- W_REG((u32 __iomem *)(&regs->phyregaddr), addr | (val << 16));
+ bcma_write32(pi->d11core, D11REGOFFS(phyregaddr), addr | (val << 16));
if (++pi->phy_wreg >= pi->phy_wreg_limit) {
pi->phy_wreg = 0;
- (void)R_REG(&regs->phyversion);
+ (void)bcma_read16(pi->d11core, D11REGOFFS(phyversion));
}
#endif
}
void and_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) & val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_mask16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void or_phy_reg(struct brcms_phy *pi, u16 addr, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata, (R_REG(&regs->phyregdata) | val));
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata), val);
pi->phy_wreg = 0;
}
void mod_phy_reg(struct brcms_phy *pi, u16 addr, u16 mask, u16 val)
{
- struct d11regs __iomem *regs;
-
- regs = pi->regs;
-
- W_REG_FLUSH(&regs->phyregaddr, addr);
-
- W_REG(&regs->phyregdata,
- ((R_REG(&regs->phyregdata) & ~mask) | (val & mask)));
+ val &= mask;
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr), addr);
+ bcma_maskset16(pi->d11core, D11REGOFFS(phyregdata), ~mask, val);
pi->phy_wreg = 0;
}
@@ -412,10 +381,8 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp)
sh->sromrev = shp->sromrev;
sh->boardtype = shp->boardtype;
sh->boardrev = shp->boardrev;
- sh->boardvendor = shp->boardvendor;
sh->boardflags = shp->boardflags;
sh->boardflags2 = shp->boardflags2;
- sh->buscorerev = shp->buscorerev;
sh->fast_timer = PHY_SW_TIMER_FAST;
sh->slow_timer = PHY_SW_TIMER_SLOW;
@@ -458,7 +425,7 @@ static u32 wlc_phy_get_radio_ver(struct brcms_phy *pi)
}
struct brcms_phy_pub *
-wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
+wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy)
{
struct brcms_phy *pi;
@@ -470,7 +437,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (D11REV_IS(sh->corerev, 4))
sflags = SISF_2G_PHY | SISF_5G_PHY;
else
- sflags = ai_core_sflags(sh->sih, 0, 0);
+ sflags = bcma_aread32(d11core, BCMA_IOST);
if (bandtype == BRCM_BAND_5G) {
if ((sflags & (SISF_5G_PHY | SISF_DB_PHY)) == 0)
@@ -488,7 +455,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
if (pi == NULL)
return NULL;
pi->wiphy = wiphy;
- pi->regs = regs;
+ pi->d11core = d11core;
pi->sh = sh;
pi->phy_init_por = true;
pi->phy_wreg_limit = PHY_WREG_LIMIT;
@@ -503,7 +470,7 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.coreflags = SICF_GMODE;
wlapi_bmac_corereset(pi->sh->physhim, pi->pubpi.coreflags);
- phyversion = R_REG(&pi->regs->phyversion);
+ phyversion = bcma_read16(pi->d11core, D11REGOFFS(phyversion));
pi->pubpi.phy_type = PHY_TYPE(phyversion);
pi->pubpi.phy_rev = phyversion & PV_PV_MASK;
@@ -515,8 +482,8 @@ wlc_phy_attach(struct shared_phy *sh, struct d11regs __iomem *regs,
pi->pubpi.phy_corenum = PHY_CORE_NUM_2;
pi->pubpi.ana_rev = (phyversion & PV_AV_MASK) >> PV_AV_SHIFT;
- if (!pi->pubpi.phy_type == PHY_TYPE_N &&
- !pi->pubpi.phy_type == PHY_TYPE_LCN)
+ if (pi->pubpi.phy_type != PHY_TYPE_N &&
+ pi->pubpi.phy_type != PHY_TYPE_LCN)
goto err;
if (bandtype == BRCM_BAND_5G) {
@@ -787,14 +754,14 @@ void wlc_phy_init(struct brcms_phy_pub *pih, u16 chanspec)
pi->radio_chanspec = chanspec;
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (WARN(mc & MCTL_EN_MAC, "HW error MAC running on init"))
return;
if (!(pi->measure_hold & PHY_HOLD_FOR_SCAN))
pi->measure_hold |= PHY_HOLD_FOR_NOT_ASSOC;
- if (WARN(!(ai_core_sflags(pi->sh->sih, 0, 0) & SISF_FCLKA),
+ if (WARN(!(bcma_aread32(pi->d11core, BCMA_IOST) & SISF_FCLKA),
"HW error SISF_FCLKA\n"))
return;
@@ -833,8 +800,8 @@ void wlc_phy_cal_init(struct brcms_phy_pub *pih)
struct brcms_phy *pi = (struct brcms_phy *) pih;
void (*cal_init)(struct brcms_phy *) = NULL;
- if (WARN((R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) != 0,
- "HW error: MAC enabled during phy cal\n"))
+ if (WARN((bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) != 0, "HW error: MAC enabled during phy cal\n"))
return;
if (!pi->initialized) {
@@ -1025,7 +992,7 @@ wlc_phy_init_radio_regs(struct brcms_phy *pi,
void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
{
#define DUMMY_PKT_LEN 20
- struct d11regs __iomem *regs = pi->regs;
+ struct bcma_device *core = pi->d11core;
int i, count;
u8 ofdmpkt[DUMMY_PKT_LEN] = {
0xcc, 0x01, 0x02, 0x00, 0x00, 0x00, 0xd4, 0x00, 0x00, 0x00,
@@ -1041,26 +1008,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
wlapi_bmac_write_template_ram(pi->sh->physhim, 0, DUMMY_PKT_LEN,
dummypkt);
- W_REG(&regs->xmtsel, 0);
+ bcma_write16(core, D11REGOFFS(xmtsel), 0);
if (D11REV_GE(pi->sh->corerev, 11))
- W_REG(&regs->wepctl, 0x100);
+ bcma_write16(core, D11REGOFFS(wepctl), 0x100);
else
- W_REG(&regs->wepctl, 0);
+ bcma_write16(core, D11REGOFFS(wepctl), 0);
- W_REG(&regs->txe_phyctl, (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
+ bcma_write16(core, D11REGOFFS(txe_phyctl),
+ (ofdm ? 1 : 0) | PHY_TXC_ANT_0);
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_phyctl1, 0x1A02);
+ bcma_write16(core, D11REGOFFS(txe_phyctl1), 0x1A02);
- W_REG(&regs->txe_wm_0, 0);
- W_REG(&regs->txe_wm_1, 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_0), 0);
+ bcma_write16(core, D11REGOFFS(txe_wm_1), 0);
- W_REG(&regs->xmttplatetxptr, 0);
- W_REG(&regs->xmttxcnt, DUMMY_PKT_LEN);
+ bcma_write16(core, D11REGOFFS(xmttplatetxptr), 0);
+ bcma_write16(core, D11REGOFFS(xmttxcnt), DUMMY_PKT_LEN);
- W_REG(&regs->xmtsel, ((8 << 8) | (1 << 5) | (1 << 2) | 2));
+ bcma_write16(core, D11REGOFFS(xmtsel),
+ ((8 << 8) | (1 << 5) | (1 << 2) | 2));
- W_REG(&regs->txe_ctl, 0);
+ bcma_write16(core, D11REGOFFS(txe_ctl), 0);
if (!pa_on) {
if (ISNPHY(pi))
@@ -1068,27 +1037,28 @@ void wlc_phy_do_dummy_tx(struct brcms_phy *pi, bool ofdm, bool pa_on)
}
if (ISNPHY(pi) || ISLCNPHY(pi))
- W_REG(&regs->txe_aux, 0xD0);
+ bcma_write16(core, D11REGOFFS(txe_aux), 0xD0);
else
- W_REG(&regs->txe_aux, ((1 << 5) | (1 << 4)));
+ bcma_write16(core, D11REGOFFS(txe_aux), ((1 << 5) | (1 << 4)));
- (void)R_REG(&regs->txe_aux);
+ (void)bcma_read16(core, D11REGOFFS(txe_aux));
i = 0;
count = ofdm ? 30 : 250;
while ((i++ < count)
- && (R_REG(&regs->txe_status) & (1 << 7)))
+ && (bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 7)))
udelay(10);
i = 0;
- while ((i++ < 10)
- && ((R_REG(&regs->txe_status) & (1 << 10)) == 0))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(txe_status)) & (1 << 10)) == 0))
udelay(10);
i = 0;
- while ((i++ < 10) && ((R_REG(&regs->ifsstat) & (1 << 8))))
+ while ((i++ < 10) &&
+ ((bcma_read16(core, D11REGOFFS(ifsstat)) & (1 << 8))))
udelay(10);
if (!pa_on) {
@@ -1145,7 +1115,7 @@ static bool wlc_phy_cal_txpower_recalc_sw(struct brcms_phy *pi)
void wlc_phy_switch_radio(struct brcms_phy_pub *pih, bool on)
{
struct brcms_phy *pi = (struct brcms_phy *) pih;
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
if (ISNPHY(pi)) {
wlc_phy_switch_radio_nphy(pi, on);
@@ -1385,7 +1355,7 @@ void wlc_phy_txpower_target_set(struct brcms_phy_pub *ppi,
memcpy(&pi->tx_user_target[TXP_FIRST_MCS_40_SDM],
&txpwr->mcs_40_mimo[0], BRCMS_NUM_RATES_MCS_2_STREAM);
- if (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC)
+ if (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) & MCTL_EN_MAC)
mac_enabled = true;
if (mac_enabled)
@@ -1415,7 +1385,8 @@ int wlc_phy_txpower_set(struct brcms_phy_pub *ppi, uint qdbm, bool override)
if (!SCAN_INPROG_PHY(pi)) {
bool suspend;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) &
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
@@ -1868,18 +1839,17 @@ void wlc_phy_runbist_config(struct brcms_phy_pub *ppi, bool start_end)
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- rxc = R_REG(&pi->regs->phyregdata);
- W_REG(&pi->regs->phyregdata,
- (0x1 << 15) | rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_set16(pi->d11core, D11REGOFFS(phyregdata),
+ 0x1 << 15);
}
} else {
if (NREV_IS(pi->pubpi.phy_rev, 3)
|| NREV_IS(pi->pubpi.phy_rev, 4)) {
- W_REG(&pi->regs->phyregaddr, 0xa0);
- (void)R_REG(&pi->regs->phyregaddr);
- W_REG(&pi->regs->phyregdata, rxc);
+ bcma_wflush16(pi->d11core, D11REGOFFS(phyregaddr),
+ 0xa0);
+ bcma_write16(pi->d11core, D11REGOFFS(phyregdata), rxc);
}
wlc_phy_por_inform(ppi);
@@ -1999,7 +1969,9 @@ void wlc_phy_txpower_hw_ctrl_set(struct brcms_phy_pub *ppi, bool hwpwrctrl)
pi->txpwrctrl = hwpwrctrl;
if (ISNPHY(pi)) {
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2201,7 +2173,8 @@ void wlc_phy_ant_rxdiv_set(struct brcms_phy_pub *ppi, u8 val)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -2419,8 +2392,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, (bool) 0);
@@ -2438,8 +2411,8 @@ wlc_phy_noise_sample_request(struct brcms_phy_pub *pih, u8 reason, u8 ch)
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP2, 0);
wlapi_bmac_write_shm(pi->sh->physhim, M_PWRIND_MAP3, 0);
- OR_REG(&pi->regs->maccommand,
- MCMD_BG_NOISE);
+ bcma_set32(pi->d11core, D11REGOFFS(maccommand),
+ MCMD_BG_NOISE);
} else {
struct phy_iq_est est[PHY_CORE_MAX];
u32 cmplx_pwr[PHY_CORE_MAX];
@@ -2932,29 +2905,29 @@ void wlc_lcnphy_epa_switch(struct brcms_phy *pi, bool mode)
mod_phy_reg(pi, 0x44c, (0x1 << 2), (1) << 2);
}
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x40);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x40);
} else {
mod_phy_reg(pi, 0x44c, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x44d, (0x1 << 2), (0) << 2);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioout), 0x40,
- 0x00);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpioouten), 0x40,
- 0x0);
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, gpiocontrol),
- ~0x0, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioout),
+ 0x40, 0x00);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpioouten),
+ 0x40, 0x0);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, gpiocontrol),
+ ~0x0, 0x40);
}
}
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
index 96e1516..e34a71e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_hal.h
@@ -166,7 +166,6 @@ struct shared_phy_params {
struct phy_shim_info *physhim;
uint unit;
uint corerev;
- uint buscorerev;
u16 vid;
u16 did;
uint chip;
@@ -175,7 +174,6 @@ struct shared_phy_params {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
};
@@ -183,7 +181,7 @@ struct shared_phy_params {
extern struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp);
extern struct brcms_phy_pub *wlc_phy_attach(struct shared_phy *sh,
- struct d11regs __iomem *regs,
+ struct bcma_device *d11core,
int bandtype, struct wiphy *wiphy);
extern void wlc_phy_detach(struct brcms_phy_pub *ppi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
index bea8524..af00e2c 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_int.h
@@ -503,10 +503,8 @@ struct shared_phy {
uint sromrev;
uint boardtype;
uint boardrev;
- uint boardvendor;
u32 boardflags;
u32 boardflags2;
- uint buscorerev;
uint fast_timer;
uint slow_timer;
uint glacial_timer;
@@ -559,7 +557,7 @@ struct brcms_phy {
} u;
bool user_txpwr_at_rfport;
- struct d11regs __iomem *regs;
+ struct bcma_device *d11core;
struct brcms_phy *next;
struct brcms_phy_pub pubpi;
@@ -774,11 +772,6 @@ struct brcms_phy {
s16 nphy_noise_win[PHY_CORE_MAX][PHY_NOISE_WINDOW_SZ];
u8 nphy_noise_index;
- u8 nphy_txpid2g[PHY_CORE_NUM_2];
- u8 nphy_txpid5g[PHY_CORE_NUM_2];
- u8 nphy_txpid5gl[PHY_CORE_NUM_2];
- u8 nphy_txpid5gh[PHY_CORE_NUM_2];
-
bool nphy_gain_boost;
bool nphy_elna_gain_config;
u16 old_bphy_test;
@@ -1095,7 +1088,7 @@ extern void wlc_phy_table_write_nphy(struct brcms_phy *pi, u32, u32, u32,
#define BRCMS_PHY_WAR_PR51571(pi) \
if (NREV_LT((pi)->pubpi.phy_rev, 3)) \
- (void)R_REG(&(pi)->regs->maccontrol)
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol))
extern void wlc_phy_cal_perical_nphy_run(struct brcms_phy *pi, u8 caltype);
extern void wlc_phy_aci_reset_nphy(struct brcms_phy *pi);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index a63aa99..ce8562a 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1603,7 +1603,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
si_pmu_pllupd(pi->sh->sih);
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
write_phy_reg(pi, 0x425, 0x5907);
@@ -1616,7 +1616,7 @@ wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
- pi_lcn->lcnphy_spurmod = 0;
+ pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
write_phy_reg(pi, 0x425, 0x590a);
@@ -2325,7 +2325,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
{
s8 index, delta_brd, delta_temp, new_index, tempcorrx;
s16 manp, meas_temp, temp_diff;
- bool neg = 0;
+ bool neg = false;
u16 temp;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
@@ -2348,7 +2348,7 @@ static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
temp_diff = manp - meas_temp;
if (temp_diff < 0) {
- neg = 1;
+ neg = true;
temp_diff = -temp_diff;
}
@@ -2813,10 +2813,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
idleTssi = read_phy_reg(pi, 0x4ab);
- suspend =
- (0 ==
- (R_REG(&((struct brcms_phy *) pi)->regs->maccontrol) &
- MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
@@ -2890,7 +2888,8 @@ static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
for (i = 0; i < 14; i++)
values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
@@ -3016,8 +3015,8 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
bool suspend;
struct brcms_phy *pi = (struct brcms_phy *) ppi;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -3535,15 +3534,17 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
timer = 0;
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- curval1 = R_REG(&pi->regs->psm_corectlsts);
+ curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
ptr[130] = 0;
- W_REG(&pi->regs->psm_corectlsts, ((1 << 6) | curval1));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
+ ((1 << 6) | curval1));
- W_REG(&pi->regs->smpl_clct_strptr, 0x7E00);
- W_REG(&pi->regs->smpl_clct_stpptr, 0x8000);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
+ bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
udelay(20);
- curval2 = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2 | 0x30);
+ curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
+ curval2 | 0x30);
write_phy_reg(pi, 0x555, 0x0);
write_phy_reg(pi, 0x5a6, 0x5);
@@ -3560,19 +3561,19 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
- stpptr = R_REG(&pi->regs->smpl_clct_stpptr);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
do {
udelay(10);
- curptr = R_REG(&pi->regs->smpl_clct_curptr);
+ curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
timer++;
} while ((curptr != stpptr) && (timer < 500));
- W_REG(&pi->regs->psm_phy_hdr_param, 0x2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
strptr = 0x7E00;
- W_REG(&pi->regs->tplatewrptr, strptr);
+ bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
while (strptr < 0x8000) {
- val = R_REG(&pi->regs->tplatewrdata);
+ val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
imag = ((val >> 16) & 0x3ff);
real = ((val) & 0x3ff);
if (imag > 511)
@@ -3597,8 +3598,8 @@ wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
}
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- W_REG(&pi->regs->psm_phy_hdr_param, curval2);
- W_REG(&pi->regs->psm_corectlsts, curval1);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
}
static void
@@ -3681,8 +3682,8 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
udelay(20);
for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
- phy_c23 = 1;
- phy_c22 = 0;
+ phy_c23 = true;
+ phy_c22 = false;
switch (cal_type) {
case 0:
phy_c10 = 511;
@@ -3700,18 +3701,18 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c9 = read_phy_reg(pi, 0x93d);
phy_c9 = 2 * phy_c9;
- phy_c24 = 0;
+ phy_c24 = false;
phy_c5 = 7;
- phy_c25 = 1;
+ phy_c25 = true;
while (1) {
write_radio_reg(pi, RADIO_2064_REG026,
(phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
udelay(50);
- phy_c22 = 0;
+ phy_c22 = false;
ptr[130] = 0;
wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
if (ptr[130] == 1)
- phy_c22 = 1;
+ phy_c22 = true;
if (phy_c22)
phy_c5 -= 1;
if ((phy_c22 != phy_c24) && (!phy_c25))
@@ -3721,7 +3722,7 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
if (phy_c5 <= 0 || phy_c5 >= 7)
break;
phy_c24 = phy_c22;
- phy_c25 = 0;
+ phy_c25 = false;
}
if (phy_c5 < 0)
@@ -3772,10 +3773,10 @@ wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
phy_c13 = phy_c11;
phy_c14 = phy_c12;
}
- phy_c23 = 0;
+ phy_c23 = false;
}
}
- phy_c23 = 1;
+ phy_c23 = true;
phy_c15 = phy_c13;
phy_c16 = phy_c14;
phy_c7 = phy_c7 >> 1;
@@ -3965,12 +3966,12 @@ s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s16 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4007,14 +4008,14 @@ u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
@@ -4075,12 +4076,12 @@ s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
{
u16 vbatsenseval;
s32 avg = 0;
- bool suspend = 0;
+ bool suspend = false;
if (mode == 1) {
- suspend =
- (0 ==
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core,
+ D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
@@ -4127,8 +4128,8 @@ static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
s8 index;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, true);
@@ -4166,8 +4167,8 @@ static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
- suspend =
- (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend) {
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
index cd19c2f..a16f1ab 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
@@ -29,6 +29,7 @@
#include "phy_radio.h"
#include "phyreg_n.h"
#include "phytbl_n.h"
+#include "soc.h"
#define READ_RADIO_REG2(pi, radio_type, jspace, core, reg_name) \
read_radio_reg(pi, radio_type##_##jspace##_##reg_name | \
@@ -14417,12 +14418,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
switch (band_num) {
case 0:
- pi->nphy_txpid2g[PHY_CORE_0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID2GA0);
- pi->nphy_txpid2g[PHY_CORE_1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID2GA1);
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_2g =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP2GA0);
@@ -14486,12 +14481,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 1:
- pi->nphy_txpid5g[PHY_CORE_0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GA0);
- pi->nphy_txpid5g[PHY_CORE_1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GA1);
pi->nphy_pwrctrl_info[PHY_CORE_0].max_pwr_5gm =
(s8) wlapi_getintvar(shim, BRCMS_SROM_MAXP5GA0);
pi->nphy_pwrctrl_info[PHY_CORE_1].max_pwr_5gm =
@@ -14551,12 +14540,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 2:
- pi->nphy_txpid5gl[0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GLA0);
- pi->nphy_txpid5gl[1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GLA1);
pi->nphy_pwrctrl_info[0].max_pwr_5gl =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP5GLA0);
@@ -14615,12 +14598,6 @@ static void wlc_phy_txpwr_srom_read_ppr_nphy(struct brcms_phy *pi)
break;
case 3:
- pi->nphy_txpid5gh[0] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GHA0);
- pi->nphy_txpid5gh[1] =
- (u8) wlapi_getintvar(shim,
- BRCMS_SROM_TXPID5GHA1);
pi->nphy_pwrctrl_info[0].max_pwr_5gh =
(s8) wlapi_getintvar(shim,
BRCMS_SROM_MAXP5GHA0);
@@ -17825,7 +17802,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -17976,7 +17953,7 @@ static void wlc_phy_txpwrctrl_pwr_setup_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
@@ -19470,8 +19447,6 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
u8 tx_pwr_ctrl_state;
bool do_nphy_cal = false;
uint core;
- uint origidx, intr_val;
- struct d11regs __iomem *regs;
u32 d11_clk_ctl_st;
bool do_rssi_cal = false;
@@ -19485,25 +19460,21 @@ void wlc_phy_init_nphy(struct brcms_phy *pi)
(pi->sh->chippkg == BCM4718_PKG_ID))) {
if ((pi->sh->boardflags & BFL_EXTLNA) &&
(CHSPEC_IS2G(pi->radio_chanspec)))
- ai_corereg(pi->sh->sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol),
- 0x40, 0x40);
+ ai_cc_reg(pi->sh->sih,
+ offsetof(struct chipcregs, chipcontrol),
+ 0x40, 0x40);
}
if ((pi->nphy_gband_spurwar2_en) && CHSPEC_IS2G(pi->radio_chanspec) &&
CHSPEC_IS40(pi->radio_chanspec)) {
- regs = (struct d11regs __iomem *)
- ai_switch_core(pi->sh->sih,
- D11_CORE_ID, &origidx,
- &intr_val);
- d11_clk_ctl_st = R_REG(&regs->clk_ctl_st);
- AND_REG(&regs->clk_ctl_st,
- ~(CCS_FORCEHT | CCS_HTAREQ));
+ d11_clk_ctl_st = bcma_read32(pi->d11core,
+ D11REGOFFS(clk_ctl_st));
+ bcma_mask32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ ~(CCS_FORCEHT | CCS_HTAREQ));
- W_REG(&regs->clk_ctl_st, d11_clk_ctl_st);
-
- ai_restore_core(pi->sh->sih, origidx, intr_val);
+ bcma_write32(pi->d11core, D11REGOFFS(clk_ctl_st),
+ d11_clk_ctl_st);
}
pi->use_int_tx_iqlo_cal_nphy =
@@ -19908,7 +19879,8 @@ void wlc_phy_rxcore_setstate_nphy(struct brcms_phy_pub *pih, u8 rxcore_bitmask)
if (!pi->sh->clk)
return;
- suspend = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -21286,28 +21258,28 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
val = read_phy_reg(pi, 0x09) & NPHY_BandControl_currentBand;
if (CHSPEC_IS5G(chanspec) && !val) {
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
or_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(BBCFG_RESETCCA | BBCFG_RESETRX));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
or_phy_reg(pi, 0x09, NPHY_BandControl_currentBand);
} else if (!CHSPEC_IS5G(chanspec) && val) {
and_phy_reg(pi, 0x09, ~NPHY_BandControl_currentBand);
- val = R_REG(&pi->regs->psm_phy_hdr_param);
- W_REG(&pi->regs->psm_phy_hdr_param,
+ val = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
(val | MAC_PHY_FORCE_CLK));
and_phy_reg(pi, (NPHY_TO_BPHY_OFF + BPHY_BB_CONFIG),
(u16) (~(BBCFG_RESETCCA | BBCFG_RESETRX)));
- W_REG(&pi->regs->psm_phy_hdr_param, val);
+ bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), val);
}
write_phy_reg(pi, 0x1ce, ci->PHY_BW1a);
@@ -21365,24 +21337,23 @@ wlc_phy_chanspec_nphy_setup(struct brcms_phy *pi, u16 chanspec,
spuravoid = 1;
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, false);
- si_pmu_spuravoid(pi->sh->sih, spuravoid);
+ si_pmu_spuravoid_pllupdate(pi->sh->sih, spuravoid);
wlapi_bmac_core_phypll_ctl(pi->sh->physhim, true);
if ((pi->sh->chip == BCM43224_CHIP_ID) ||
(pi->sh->chip == BCM43225_CHIP_ID)) {
-
if (spuravoid == 1) {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x5341);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x5341);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
} else {
-
- W_REG(&pi->regs->tsf_clk_frac_l,
- 0x8889);
- W_REG(&pi->regs->tsf_clk_frac_h,
- 0x8);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_l),
+ 0x8889);
+ bcma_write16(pi->d11core,
+ D11REGOFFS(tsf_clk_frac_h), 0x8);
}
}
@@ -21522,13 +21493,13 @@ void wlc_phy_antsel_init(struct brcms_phy_pub *ppi, bool lut_init)
ai_gpiocontrol(pi->sh->sih, mask, mask, GPIO_DRV_PRIORITY);
- mc = R_REG(&pi->regs->maccontrol);
+ mc = bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
mc &= ~MCTL_GPOUT_SEL_MASK;
- W_REG(&pi->regs->maccontrol, mc);
+ bcma_write32(pi->d11core, D11REGOFFS(maccontrol), mc);
- OR_REG(&pi->regs->psm_gpio_oe, mask);
+ bcma_set16(pi->d11core, D11REGOFFS(psm_gpio_oe), mask);
- AND_REG(&pi->regs->psm_gpio_out, ~mask);
+ bcma_mask16(pi->d11core, D11REGOFFS(psm_gpio_out), ~mask);
if (lut_init) {
write_phy_reg(pi, 0xf8, 0x02d8);
@@ -21545,9 +21516,8 @@ u16 wlc_phy_classifier_nphy(struct brcms_phy *pi, u16 mask, u16 val)
bool suspended = false;
if (D11REV_IS(pi->sh->corerev, 16)) {
- suspended =
- (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC) ?
- false : true;
+ suspended = (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC) ? false : true;
if (!suspended)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
}
@@ -25406,7 +25376,8 @@ static void wlc_phy_a4(struct brcms_phy *pi, bool full_cal)
if (pi->nphy_papd_skip == 1)
return;
- phy_b3 = (0 == (R_REG(&pi->regs->maccontrol) & MCTL_EN_MAC));
+ phy_b3 = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
+ MCTL_EN_MAC));
if (!phy_b3)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
@@ -27994,20 +27965,11 @@ void wlc_phy_txpwr_fixpower_nphy(struct brcms_phy *pi)
chan_freq_range = wlc_phy_get_chan_freq_range_nphy(pi, 0);
switch (chan_freq_range) {
case WL_CHAN_FREQ_RANGE_2G:
- txpi[0] = pi->nphy_txpid2g[0];
- txpi[1] = pi->nphy_txpid2g[1];
- break;
case WL_CHAN_FREQ_RANGE_5GL:
- txpi[0] = pi->nphy_txpid5gl[0];
- txpi[1] = pi->nphy_txpid5gl[1];
- break;
case WL_CHAN_FREQ_RANGE_5GM:
- txpi[0] = pi->nphy_txpid5g[0];
- txpi[1] = pi->nphy_txpid5g[1];
- break;
case WL_CHAN_FREQ_RANGE_5GH:
- txpi[0] = pi->nphy_txpid5gh[0];
- txpi[1] = pi->nphy_txpid5gh[1];
+ txpi[0] = 0;
+ txpi[1] = 0;
break;
default:
txpi[0] = txpi[1] = 91;
@@ -28389,7 +28351,7 @@ void wlc_phy_txpower_recalc_target_nphy(struct brcms_phy *pi)
if (D11REV_IS(pi->sh->corerev, 11) || D11REV_IS(pi->sh->corerev, 12)) {
wlapi_bmac_mctrl(pi->sh->physhim, MCTL_PHYLOCK, MCTL_PHYLOCK);
- (void)R_REG(&pi->regs->maccontrol);
+ (void)bcma_read32(pi->d11core, D11REGOFFS(maccontrol));
udelay(1);
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
index 3b36e3a..4931d29 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.c
@@ -23,6 +23,7 @@
#include "pub.h"
#include "aiutils.h"
#include "pmu.h"
+#include "soc.h"
/*
* external LPO crystal frequency
@@ -114,10 +115,10 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
uint rsrcs;
/* # resources */
- rsrcs = (sih->pmucaps & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
+ rsrcs = (ai_get_pmucaps(sih) & PCAP_RC_MASK) >> PCAP_RC_SHIFT;
/* determine min/max rsrc masks */
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
/* ??? */
@@ -138,75 +139,84 @@ static void si_pmu_res_masks(struct si_pub *sih, u32 * pmin, u32 * pmax)
*pmax = max_mask;
}
-static void
-si_pmu_spuravoid_pllupdate(struct si_pub *sih, struct chipcregs __iomem *cc,
- u8 spuravoid)
+void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid)
{
u32 tmp = 0;
+ struct bcma_device *core;
- switch (sih->chip) {
+ /* switch to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
if (spuravoid == 1) {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11500010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000C0C06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x0F600a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x2001E920);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11500010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000C0C06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x0F600a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x2001E920);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
} else {
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100010);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x000c0c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888815);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x11100010);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL1);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x000c0c06);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL2);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x03000a08);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL3);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x00000000);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL4);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x200005c0);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_addr),
+ PMU1_PLL0_PLLCTL5);
+ bcma_write32(core, CHIPCREGOFFS(pllcontrol_data),
+ 0x88888815);
}
tmp = 1 << 10;
break;
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL0);
- W_REG(&cc->pllcontrol_data, 0x11100008);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL1);
- W_REG(&cc->pllcontrol_data, 0x0c000c06);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL2);
- W_REG(&cc->pllcontrol_data, 0x03000a08);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL3);
- W_REG(&cc->pllcontrol_data, 0x00000000);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL4);
- W_REG(&cc->pllcontrol_data, 0x200005c0);
- W_REG(&cc->pllcontrol_addr, PMU1_PLL0_PLLCTL5);
- W_REG(&cc->pllcontrol_data, 0x88888855);
-
- tmp = 1 << 10;
- break;
-
default:
/* bail out */
return;
}
- tmp |= R_REG(&cc->pmucontrol);
- W_REG(&cc->pmucontrol, tmp);
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), tmp);
}
u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
{
uint delay = PMU_MAX_TRANSITION_DLY;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -219,54 +229,35 @@ u16 si_pmu_fast_pwrup_delay(struct si_pub *sih)
return (u16) delay;
}
-void si_pmu_sprom_enable(struct si_pub *sih, bool enable)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
/* Read/write a chipcontrol reg */
u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, chipcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, chipcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, chipcontrol_data),
+ mask, val);
}
/* Read/write a regcontrol reg */
u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, regcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, regcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, regcontrol_data),
+ mask, val);
}
/* Read/write a pllcontrol reg */
u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pllcontrol_addr),
- ~0, reg);
- return ai_corereg(sih, SI_CC_IDX,
- offsetof(struct chipcregs, pllcontrol_data), mask,
- val);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_addr), ~0, reg);
+ return ai_cc_reg(sih, offsetof(struct chipcregs, pllcontrol_data),
+ mask, val);
}
/* PMU PLL update */
void si_pmu_pllupd(struct si_pub *sih)
{
- ai_corereg(sih, SI_CC_IDX, offsetof(struct chipcregs, pmucontrol),
- PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
+ ai_cc_reg(sih, offsetof(struct chipcregs, pmucontrol),
+ PCTL_PLL_PLLCTL_UPD, PCTL_PLL_PLLCTL_UPD);
}
/* query alp/xtal clock frequency */
@@ -275,10 +266,10 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
u32 clock = ALP_CLOCK;
/* bail out with default */
- if (!(sih->cccaps & CC_CAP_PMU))
+ if (!(ai_get_cccaps(sih) & CC_CAP_PMU))
return clock;
- switch (sih->chip) {
+ switch (ai_get_chip_id(sih)) {
case BCM43224_CHIP_ID:
case BCM43225_CHIP_ID:
case BCM4313_CHIP_ID:
@@ -292,95 +283,29 @@ u32 si_pmu_alp_clock(struct si_pub *sih)
return clock;
}
-void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid)
-{
- struct chipcregs __iomem *cc;
- uint origidx, intr_val;
-
- /* Remember original core before switch to chipc */
- cc = (struct chipcregs __iomem *)
- ai_switch_core(sih, CC_CORE_ID, &origidx, &intr_val);
-
- /* update the pll changes */
- si_pmu_spuravoid_pllupdate(sih, cc, spuravoid);
-
- /* Return to original core */
- ai_restore_core(sih, origidx, intr_val);
-}
-
/* initialize PMU */
void si_pmu_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- if (sih->pmurev == 1)
- AND_REG(&cc->pmucontrol, ~PCTL_NOILP_ON_WAIT);
- else if (sih->pmurev >= 2)
- OR_REG(&cc->pmucontrol, PCTL_NOILP_ON_WAIT);
+ /* select chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU chip controls and other chip level stuff */
-void si_pmu_chip_init(struct si_pub *sih)
-{
- uint origidx;
-
- /* Gate off SPROM clock and chip select signals */
- si_pmu_sprom_enable(sih, false);
-
- /* Remember original core */
- origidx = ai_coreidx(sih);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-}
-
-/* initialize PMU switch/regulators */
-void si_pmu_swreg_init(struct si_pub *sih)
-{
-}
-
-/* initialize PLL */
-void si_pmu_pll_init(struct si_pub *sih, uint xtalfreq)
-{
- struct chipcregs __iomem *cc;
- uint origidx;
-
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
-
- switch (sih->chip) {
- case BCM4313_CHIP_ID:
- case BCM43224_CHIP_ID:
- case BCM43225_CHIP_ID:
- /* ??? */
- break;
- default:
- break;
- }
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
+ if (ai_get_pmurev(sih) == 1)
+ bcma_mask32(core, CHIPCREGOFFS(pmucontrol),
+ ~PCTL_NOILP_ON_WAIT);
+ else if (ai_get_pmurev(sih) >= 2)
+ bcma_set32(core, CHIPCREGOFFS(pmucontrol), PCTL_NOILP_ON_WAIT);
}
/* initialize PMU resources */
void si_pmu_res_init(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 min_mask = 0, max_mask = 0;
- /* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ /* select to chipc */
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
/* Determine min/max rsrc masks */
si_pmu_res_masks(sih, &min_mask, &max_mask);
@@ -390,55 +315,50 @@ void si_pmu_res_init(struct si_pub *sih)
/* Program max resource mask */
if (max_mask)
- W_REG(&cc->max_res_mask, max_mask);
+ bcma_write32(core, CHIPCREGOFFS(max_res_mask), max_mask);
/* Program min resource mask */
if (min_mask)
- W_REG(&cc->min_res_mask, min_mask);
+ bcma_write32(core, CHIPCREGOFFS(min_res_mask), min_mask);
/* Add some delay; allow resources to come up and settle. */
mdelay(2);
-
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
}
u32 si_pmu_measure_alpclk(struct si_pub *sih)
{
- struct chipcregs __iomem *cc;
- uint origidx;
+ struct bcma_device *core;
u32 alp_khz;
- if (sih->pmurev < 10)
+ if (ai_get_pmurev(sih) < 10)
return 0;
/* Remember original core before switch to chipc */
- origidx = ai_coreidx(sih);
- cc = ai_setcoreidx(sih, SI_CC_IDX);
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
- if (R_REG(&cc->pmustatus) & PST_EXTLPOAVAIL) {
+ if (bcma_read32(core, CHIPCREGOFFS(pmustatus)) & PST_EXTLPOAVAIL) {
u32 ilp_ctr, alp_hz;
/*
* Enable the reg to measure the freq,
* in case it was disabled before
*/
- W_REG(&cc->pmu_xtalfreq,
- 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq),
+ 1U << PMU_XTALFREQ_REG_MEASURE_SHIFT);
/* Delay for well over 4 ILP clocks */
udelay(1000);
/* Read the latched number of ALP ticks per 4 ILP ticks */
- ilp_ctr =
- R_REG(&cc->pmu_xtalfreq) & PMU_XTALFREQ_REG_ILPCTR_MASK;
+ ilp_ctr = bcma_read32(core, CHIPCREGOFFS(pmu_xtalfreq)) &
+ PMU_XTALFREQ_REG_ILPCTR_MASK;
/*
* Turn off the PMU_XTALFREQ_REG_MEASURE_SHIFT
* bit to save power
*/
- W_REG(&cc->pmu_xtalfreq, 0);
+ bcma_write32(core, CHIPCREGOFFS(pmu_xtalfreq), 0);
/* Calculate ALP frequency */
alp_hz = (ilp_ctr * EXT_ILP_HZ) / 4;
@@ -451,8 +371,5 @@ u32 si_pmu_measure_alpclk(struct si_pub *sih)
} else
alp_khz = 0;
- /* Return to original core */
- ai_setcoreidx(sih, origidx);
-
return alp_khz;
}
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
index 3a08c62..3e39c5e 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pmu.h
@@ -26,13 +26,10 @@ extern u32 si_pmu_chipcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_regcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern u32 si_pmu_alp_clock(struct si_pub *sih);
extern void si_pmu_pllupd(struct si_pub *sih);
-extern void si_pmu_spuravoid(struct si_pub *sih, u8 spuravoid);
+extern void si_pmu_spuravoid_pllupdate(struct si_pub *sih, u8 spuravoid);
extern u32 si_pmu_pllcontrol(struct si_pub *sih, uint reg, u32 mask, u32 val);
extern void si_pmu_init(struct si_pub *sih);
-extern void si_pmu_chip_init(struct si_pub *sih);
-extern void si_pmu_pll_init(struct si_pub *sih, u32 xtalfreq);
extern void si_pmu_res_init(struct si_pub *sih);
-extern void si_pmu_swreg_init(struct si_pub *sih);
extern u32 si_pmu_measure_alpclk(struct si_pub *sih);
#endif /* _BRCM_PMU_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/pub.h b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
index 37bb2dc..f0038ad 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/pub.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/pub.h
@@ -17,6 +17,7 @@
#ifndef _BRCM_PUB_H_
#define _BRCM_PUB_H_
+#include <linux/bcma/bcma.h>
#include <brcmu_wifi.h>
#include "types.h"
#include "defs.h"
@@ -170,22 +171,6 @@ enum brcms_srom_id {
BRCMS_SROM_TSSIPOS2G,
BRCMS_SROM_TSSIPOS5G,
BRCMS_SROM_TXCHAIN,
- BRCMS_SROM_TXPID2GA0,
- BRCMS_SROM_TXPID2GA1,
- BRCMS_SROM_TXPID2GA2,
- BRCMS_SROM_TXPID2GA3,
- BRCMS_SROM_TXPID5GA0,
- BRCMS_SROM_TXPID5GA1,
- BRCMS_SROM_TXPID5GA2,
- BRCMS_SROM_TXPID5GA3,
- BRCMS_SROM_TXPID5GHA0,
- BRCMS_SROM_TXPID5GHA1,
- BRCMS_SROM_TXPID5GHA2,
- BRCMS_SROM_TXPID5GHA3,
- BRCMS_SROM_TXPID5GLA0,
- BRCMS_SROM_TXPID5GLA1,
- BRCMS_SROM_TXPID5GLA2,
- BRCMS_SROM_TXPID5GLA3,
/*
* per-path identifiers (see srom.c)
*/
@@ -225,10 +210,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA2GW2A1,
BRCMS_SROM_PA2GW2A2,
BRCMS_SROM_PA2GW2A3,
- BRCMS_SROM_PA2GW3A0,
- BRCMS_SROM_PA2GW3A1,
- BRCMS_SROM_PA2GW3A2,
- BRCMS_SROM_PA2GW3A3,
BRCMS_SROM_PA5GHW0A0,
BRCMS_SROM_PA5GHW0A1,
BRCMS_SROM_PA5GHW0A2,
@@ -241,10 +222,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GHW2A1,
BRCMS_SROM_PA5GHW2A2,
BRCMS_SROM_PA5GHW2A3,
- BRCMS_SROM_PA5GHW3A0,
- BRCMS_SROM_PA5GHW3A1,
- BRCMS_SROM_PA5GHW3A2,
- BRCMS_SROM_PA5GHW3A3,
BRCMS_SROM_PA5GLW0A0,
BRCMS_SROM_PA5GLW0A1,
BRCMS_SROM_PA5GLW0A2,
@@ -257,10 +234,6 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GLW2A1,
BRCMS_SROM_PA5GLW2A2,
BRCMS_SROM_PA5GLW2A3,
- BRCMS_SROM_PA5GLW3A0,
- BRCMS_SROM_PA5GLW3A1,
- BRCMS_SROM_PA5GLW3A2,
- BRCMS_SROM_PA5GLW3A3,
BRCMS_SROM_PA5GW0A0,
BRCMS_SROM_PA5GW0A1,
BRCMS_SROM_PA5GW0A2,
@@ -273,14 +246,9 @@ enum brcms_srom_id {
BRCMS_SROM_PA5GW2A1,
BRCMS_SROM_PA5GW2A2,
BRCMS_SROM_PA5GW2A3,
- BRCMS_SROM_PA5GW3A0,
- BRCMS_SROM_PA5GW3A1,
- BRCMS_SROM_PA5GW3A2,
- BRCMS_SROM_PA5GW3A3,
};
#define BRCMS_NUMRATES 16 /* max # of rates in a rateset */
-#define D11_PHY_HDR_LEN 6 /* Phy header length - 6 bytes */
/* phy types */
#define PHY_TYPE_A 0 /* Phy type A */
@@ -414,7 +382,6 @@ struct brcms_pub {
uint _nbands; /* # bands supported */
uint now; /* # elapsed seconds */
- bool promisc; /* promiscuous destination address */
bool delayed_down; /* down delayed */
bool associated; /* true:part of [I]BSS, false: not */
/* (union of stas_associated, aps_associated) */
@@ -564,15 +531,14 @@ struct brcms_antselcfg {
/* common functions for every port */
extern struct brcms_c_info *
-brcms_c_attach(struct brcms_info *wl, u16 vendor, u16 device, uint unit,
- bool piomode, void __iomem *regsva, struct pci_dev *btparam,
- uint *perr);
+brcms_c_attach(struct brcms_info *wl, struct bcma_device *core, uint unit,
+ bool piomode, uint *perr);
extern uint brcms_c_detach(struct brcms_c_info *wlc);
extern int brcms_c_up(struct brcms_c_info *wlc);
extern uint brcms_c_down(struct brcms_c_info *wlc);
extern bool brcms_c_chipmatch(u16 vendor, u16 device);
-extern void brcms_c_init(struct brcms_c_info *wlc);
+extern void brcms_c_init(struct brcms_c_info *wlc, bool mute_tx);
extern void brcms_c_reset(struct brcms_c_info *wlc);
extern void brcms_c_intrson(struct brcms_c_info *wlc);
@@ -628,7 +594,7 @@ extern void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc,
u8 interval);
extern int brcms_c_set_tx_power(struct brcms_c_info *wlc, int txpwr);
extern int brcms_c_get_tx_power(struct brcms_c_info *wlc);
-extern void brcms_c_set_radio_mpc(struct brcms_c_info *wlc, bool mpc);
extern bool brcms_c_check_radio_disabled(struct brcms_c_info *wlc);
+extern void brcms_c_mute(struct brcms_c_info *wlc, bool on);
#endif /* _BRCM_PUB_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/rate.h b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
index e7b9dc2..980d578 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/rate.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/rate.h
@@ -19,6 +19,7 @@
#include "types.h"
#include "d11.h"
+#include "phy_hal.h"
extern const u8 rate_info[];
extern const struct brcms_c_rateset cck_ofdm_mimo_rates;
@@ -198,11 +199,9 @@ static inline u8 cck_rspec(u8 cck)
/* Convert encoded rate value in plcp header to numerical rates in 500 KHz
* increments */
-extern const u8 ofdm_rate_lookup[];
-
static inline u8 ofdm_phy2mac_rate(u8 rlpt)
{
- return ofdm_rate_lookup[rlpt & 0x7];
+ return wlc_phy_get_ofdm_rate_lookup()[rlpt & 0x7];
}
static inline u8 cck_phy2mac_rate(u8 signal)
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.c b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
index 99f7910..5637436 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.c
@@ -28,6 +28,7 @@
#include "aiutils.h"
#include "otp.h"
#include "srom.h"
+#include "soc.h"
/*
* SROM CRC8 polynomial value:
@@ -62,9 +63,6 @@
#define SROM_MACHI_ET1 42
#define SROM_MACMID_ET1 43
#define SROM_MACLO_ET1 44
-#define SROM3_MACHI 37
-#define SROM3_MACMID 38
-#define SROM3_MACLO 39
#define SROM_BXARSSI2G 40
#define SROM_BXARSSI5G 41
@@ -101,7 +99,6 @@
#define SROM_BFL 57
#define SROM_BFL2 28
-#define SROM3_BFL2 61
#define SROM_AG10 58
@@ -109,99 +106,16 @@
#define SROM_OPO 60
-#define SROM3_LEDDC 62
-
#define SROM_CRCREV 63
-/* SROM Rev 4: Reallocate the software part of the srom to accommodate
- * MIMO features. It assumes up to two PCIE functions and 440 bytes
- * of usable srom i.e. the usable storage in chips with OTP that
- * implements hardware redundancy.
- */
-
#define SROM4_WORDS 220
-#define SROM4_SIGN 32
-#define SROM4_SIGNATURE 0x5372
-
-#define SROM4_BREV 33
-
-#define SROM4_BFL0 34
-#define SROM4_BFL1 35
-#define SROM4_BFL2 36
-#define SROM4_BFL3 37
-#define SROM5_BFL0 37
-#define SROM5_BFL1 38
-#define SROM5_BFL2 39
-#define SROM5_BFL3 40
-
-#define SROM4_MACHI 38
-#define SROM4_MACMID 39
-#define SROM4_MACLO 40
-#define SROM5_MACHI 41
-#define SROM5_MACMID 42
-#define SROM5_MACLO 43
-
-#define SROM4_CCODE 41
-#define SROM4_REGREV 42
-#define SROM5_CCODE 34
-#define SROM5_REGREV 35
-
-#define SROM4_LEDBH10 43
-#define SROM4_LEDBH32 44
-#define SROM5_LEDBH10 59
-#define SROM5_LEDBH32 60
-
-#define SROM4_LEDDC 45
-#define SROM5_LEDDC 45
-
-#define SROM4_AA 46
-
-#define SROM4_AG10 47
-#define SROM4_AG32 48
-
-#define SROM4_TXPID2G 49
-#define SROM4_TXPID5G 51
-#define SROM4_TXPID5GL 53
-#define SROM4_TXPID5GH 55
-
-#define SROM4_TXRXC 61
#define SROM4_TXCHAIN_MASK 0x000f
-#define SROM4_TXCHAIN_SHIFT 0
#define SROM4_RXCHAIN_MASK 0x00f0
-#define SROM4_RXCHAIN_SHIFT 4
#define SROM4_SWITCH_MASK 0xff00
-#define SROM4_SWITCH_SHIFT 8
/* Per-path fields */
#define MAX_PATH_SROM 4
-#define SROM4_PATH0 64
-#define SROM4_PATH1 87
-#define SROM4_PATH2 110
-#define SROM4_PATH3 133
-
-#define SROM4_2G_ITT_MAXP 0
-#define SROM4_2G_PA 1
-#define SROM4_5G_ITT_MAXP 5
-#define SROM4_5GLH_MAXP 6
-#define SROM4_5G_PA 7
-#define SROM4_5GL_PA 11
-#define SROM4_5GH_PA 15
-
-/* All the miriad power offsets */
-#define SROM4_2G_CCKPO 156
-#define SROM4_2G_OFDMPO 157
-#define SROM4_5G_OFDMPO 159
-#define SROM4_5GL_OFDMPO 161
-#define SROM4_5GH_OFDMPO 163
-#define SROM4_2G_MCSPO 165
-#define SROM4_5G_MCSPO 173
-#define SROM4_5GL_MCSPO 181
-#define SROM4_5GH_MCSPO 189
-#define SROM4_CDDPO 197
-#define SROM4_STBCPO 198
-#define SROM4_BW40PO 199
-#define SROM4_BWDUPPO 200
#define SROM4_CRCREV 219
@@ -424,103 +338,32 @@ struct brcms_varbuf {
static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_DEVID, 0xffffff00, SRFL_PRHEX | SRFL_NOVAR, PCI_F0DEVID,
0xffff},
- {BRCMS_SROM_BOARDREV, 0x0000000e, SRFL_PRHEX, SROM_AABREV,
- SROM_BR_MASK},
- {BRCMS_SROM_BOARDREV, 0x000000f0, SRFL_PRHEX, SROM4_BREV, 0xffff},
{BRCMS_SROM_BOARDREV, 0xffffff00, SRFL_PRHEX, SROM8_BREV, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000002, SRFL_PRHEX, SROM_BFL, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000004, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM_BFL2, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000008, SRFL_PRHEX | SRFL_MORE, SROM_BFL,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM3_BFL2, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL0,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM5_BFL1, 0xffff},
{BRCMS_SROM_BOARDFLAGS, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL0,
0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_BFL1, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0x00000010, SRFL_PRHEX | SRFL_MORE, SROM4_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_BFL3, 0xffff},
- {BRCMS_SROM_BOARDFLAGS2, 0x000000e0, SRFL_PRHEX | SRFL_MORE, SROM5_BFL2,
- 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM5_BFL3, 0xffff},
{BRCMS_SROM_BOARDFLAGS2, 0xffffff00, SRFL_PRHEX | SRFL_MORE, SROM8_BFL2,
0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_BFL3, 0xffff},
{BRCMS_SROM_BOARDTYPE, 0xfffffffc, SRFL_PRHEX, SROM_SSID, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000006, 0, SROM_MACLO_IL0, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000008, 0, SROM3_MACLO, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x00000010, 0, SROM4_MACLO, 0xffff},
- {BRCMS_SROM_BOARDNUM, 0x000000e0, 0, SROM5_MACLO, 0xffff},
{BRCMS_SROM_BOARDNUM, 0xffffff00, 0, SROM8_MACLO, 0xffff},
- {BRCMS_SROM_CC, 0x00000002, 0, SROM_AABREV, SROM_CC_MASK},
- {BRCMS_SROM_REGREV, 0x00000008, 0, SROM_OPO, 0xff00},
- {BRCMS_SROM_REGREV, 0x00000010, 0, SROM4_REGREV, 0x00ff},
- {BRCMS_SROM_REGREV, 0x000000e0, 0, SROM5_REGREV, 0x00ff},
{BRCMS_SROM_REGREV, 0xffffff00, 0, SROM8_REGREV, 0x00ff},
- {BRCMS_SROM_LEDBH0, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x0000000e, SRFL_NOFFS, SROM_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x0000000e, SRFL_NOFFS, SROM_LEDBH32, 0xff00},
- {BRCMS_SROM_LEDBH0, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x00000010, SRFL_NOFFS, SROM4_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x00000010, SRFL_NOFFS, SROM4_LEDBH32, 0xff00},
- {BRCMS_SROM_LEDBH0, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0x00ff},
- {BRCMS_SROM_LEDBH1, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH10, 0xff00},
- {BRCMS_SROM_LEDBH2, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0x00ff},
- {BRCMS_SROM_LEDBH3, 0x000000e0, SRFL_NOFFS, SROM5_LEDBH32, 0xff00},
{BRCMS_SROM_LEDBH0, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0x00ff},
{BRCMS_SROM_LEDBH1, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH10, 0xff00},
{BRCMS_SROM_LEDBH2, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0x00ff},
{BRCMS_SROM_LEDBH3, 0xffffff00, SRFL_NOFFS, SROM8_LEDBH32, 0xff00},
- {BRCMS_SROM_PA0B0, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB0, 0xffff},
- {BRCMS_SROM_PA0B1, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB1, 0xffff},
- {BRCMS_SROM_PA0B2, 0x0000000e, SRFL_PRHEX, SROM_WL0PAB2, 0xffff},
- {BRCMS_SROM_PA0ITSSIT, 0x0000000e, 0, SROM_ITT, 0x00ff},
- {BRCMS_SROM_PA0MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0x00ff},
{BRCMS_SROM_PA0B0, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB0, 0xffff},
{BRCMS_SROM_PA0B1, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB1, 0xffff},
{BRCMS_SROM_PA0B2, 0xffffff00, SRFL_PRHEX, SROM8_W0_PAB2, 0xffff},
{BRCMS_SROM_PA0ITSSIT, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0xff00},
{BRCMS_SROM_PA0MAXPWR, 0xffffff00, 0, SROM8_W0_ITTMAXP, 0x00ff},
- {BRCMS_SROM_OPO, 0x0000000c, 0, SROM_OPO, 0x00ff},
{BRCMS_SROM_OPO, 0xffffff00, 0, SROM8_2G_OFDMPO, 0x00ff},
- {BRCMS_SROM_AA2G, 0x0000000e, 0, SROM_AABREV, SROM_AA0_MASK},
- {BRCMS_SROM_AA2G, 0x000000f0, 0, SROM4_AA, 0x00ff},
{BRCMS_SROM_AA2G, 0xffffff00, 0, SROM8_AA, 0x00ff},
- {BRCMS_SROM_AA5G, 0x0000000e, 0, SROM_AABREV, SROM_AA1_MASK},
- {BRCMS_SROM_AA5G, 0x000000f0, 0, SROM4_AA, 0xff00},
{BRCMS_SROM_AA5G, 0xffffff00, 0, SROM8_AA, 0xff00},
- {BRCMS_SROM_AG0, 0x0000000e, 0, SROM_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0x0000000e, 0, SROM_AG10, 0xff00},
- {BRCMS_SROM_AG0, 0x000000f0, 0, SROM4_AG10, 0x00ff},
- {BRCMS_SROM_AG1, 0x000000f0, 0, SROM4_AG10, 0xff00},
- {BRCMS_SROM_AG2, 0x000000f0, 0, SROM4_AG32, 0x00ff},
- {BRCMS_SROM_AG3, 0x000000f0, 0, SROM4_AG32, 0xff00},
{BRCMS_SROM_AG0, 0xffffff00, 0, SROM8_AG10, 0x00ff},
{BRCMS_SROM_AG1, 0xffffff00, 0, SROM8_AG10, 0xff00},
{BRCMS_SROM_AG2, 0xffffff00, 0, SROM8_AG32, 0x00ff},
{BRCMS_SROM_AG3, 0xffffff00, 0, SROM8_AG32, 0xff00},
- {BRCMS_SROM_PA1B0, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB0, 0xffff},
- {BRCMS_SROM_PA1B1, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB1, 0xffff},
- {BRCMS_SROM_PA1B2, 0x0000000e, SRFL_PRHEX, SROM_WL1PAB2, 0xffff},
- {BRCMS_SROM_PA1LOB0, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB0, 0xffff},
- {BRCMS_SROM_PA1LOB1, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB1, 0xffff},
- {BRCMS_SROM_PA1LOB2, 0x0000000c, SRFL_PRHEX, SROM_WL1LPAB2, 0xffff},
- {BRCMS_SROM_PA1HIB0, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB0, 0xffff},
- {BRCMS_SROM_PA1HIB1, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB1, 0xffff},
- {BRCMS_SROM_PA1HIB2, 0x0000000c, SRFL_PRHEX, SROM_WL1HPAB2, 0xffff},
- {BRCMS_SROM_PA1ITSSIT, 0x0000000e, 0, SROM_ITT, 0xff00},
- {BRCMS_SROM_PA1MAXPWR, 0x0000000e, 0, SROM_WL10MAXP, 0xff00},
- {BRCMS_SROM_PA1LOMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0xff00},
- {BRCMS_SROM_PA1HIMAXPWR, 0x0000000c, 0, SROM_WL1LHMAXP, 0x00ff},
{BRCMS_SROM_PA1B0, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB0, 0xffff},
{BRCMS_SROM_PA1B1, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB1, 0xffff},
{BRCMS_SROM_PA1B2, 0xffffff00, SRFL_PRHEX, SROM8_W1_PAB2, 0xffff},
@@ -534,40 +377,20 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_PA1MAXPWR, 0xffffff00, 0, SROM8_W1_ITTMAXP, 0x00ff},
{BRCMS_SROM_PA1LOMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0xff00},
{BRCMS_SROM_PA1HIMAXPWR, 0xffffff00, 0, SROM8_W1_MAXP_LCHC, 0x00ff},
- {BRCMS_SROM_BXA2G, 0x00000008, 0, SROM_BXARSSI2G, 0x1800},
- {BRCMS_SROM_RSSISAV2G, 0x00000008, 0, SROM_BXARSSI2G, 0x0700},
- {BRCMS_SROM_RSSISMC2G, 0x00000008, 0, SROM_BXARSSI2G, 0x00f0},
- {BRCMS_SROM_RSSISMF2G, 0x00000008, 0, SROM_BXARSSI2G, 0x000f},
{BRCMS_SROM_BXA2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x1800},
{BRCMS_SROM_RSSISAV2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x0700},
{BRCMS_SROM_RSSISMC2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x00f0},
{BRCMS_SROM_RSSISMF2G, 0xffffff00, 0, SROM8_BXARSSI2G, 0x000f},
- {BRCMS_SROM_BXA5G, 0x00000008, 0, SROM_BXARSSI5G, 0x1800},
- {BRCMS_SROM_RSSISAV5G, 0x00000008, 0, SROM_BXARSSI5G, 0x0700},
- {BRCMS_SROM_RSSISMC5G, 0x00000008, 0, SROM_BXARSSI5G, 0x00f0},
- {BRCMS_SROM_RSSISMF5G, 0x00000008, 0, SROM_BXARSSI5G, 0x000f},
{BRCMS_SROM_BXA5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x1800},
{BRCMS_SROM_RSSISAV5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x0700},
{BRCMS_SROM_RSSISMC5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x00f0},
{BRCMS_SROM_RSSISMF5G, 0xffffff00, 0, SROM8_BXARSSI5G, 0x000f},
- {BRCMS_SROM_TRI2G, 0x00000008, 0, SROM_TRI52G, 0x00ff},
- {BRCMS_SROM_TRI5G, 0x00000008, 0, SROM_TRI52G, 0xff00},
- {BRCMS_SROM_TRI5GL, 0x00000008, 0, SROM_TRI5GHL, 0x00ff},
- {BRCMS_SROM_TRI5GH, 0x00000008, 0, SROM_TRI5GHL, 0xff00},
{BRCMS_SROM_TRI2G, 0xffffff00, 0, SROM8_TRI52G, 0x00ff},
{BRCMS_SROM_TRI5G, 0xffffff00, 0, SROM8_TRI52G, 0xff00},
{BRCMS_SROM_TRI5GL, 0xffffff00, 0, SROM8_TRI5GHL, 0x00ff},
{BRCMS_SROM_TRI5GH, 0xffffff00, 0, SROM8_TRI5GHL, 0xff00},
- {BRCMS_SROM_RXPO2G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0x00ff},
- {BRCMS_SROM_RXPO5G, 0x00000008, SRFL_PRSIGN, SROM_RXPO52G, 0xff00},
{BRCMS_SROM_RXPO2G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0x00ff},
{BRCMS_SROM_RXPO5G, 0xffffff00, SRFL_PRSIGN, SROM8_RXPO52G, 0xff00},
- {BRCMS_SROM_TXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_TXCHAIN_MASK},
- {BRCMS_SROM_RXCHAIN, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_RXCHAIN_MASK},
- {BRCMS_SROM_ANTSWITCH, 0x000000f0, SRFL_NOFFS, SROM4_TXRXC,
- SROM4_SWITCH_MASK},
{BRCMS_SROM_TXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
SROM4_TXCHAIN_MASK},
{BRCMS_SROM_RXCHAIN, 0xffffff00, SRFL_NOFFS, SROM8_TXRXC,
@@ -594,43 +417,11 @@ static const struct brcms_sromvar pci_sromvars[] = {
SROM8_FEM_ANTSWLUT_MASK},
{BRCMS_SROM_TEMPTHRESH, 0xffffff00, 0, SROM8_THERMAL, 0xff00},
{BRCMS_SROM_TEMPOFFSET, 0xffffff00, 0, SROM8_THERMAL, 0x00ff},
- {BRCMS_SROM_TXPID2GA0, 0x000000f0, 0, SROM4_TXPID2G, 0x00ff},
- {BRCMS_SROM_TXPID2GA1, 0x000000f0, 0, SROM4_TXPID2G, 0xff00},
- {BRCMS_SROM_TXPID2GA2, 0x000000f0, 0, SROM4_TXPID2G + 1, 0x00ff},
- {BRCMS_SROM_TXPID2GA3, 0x000000f0, 0, SROM4_TXPID2G + 1, 0xff00},
- {BRCMS_SROM_TXPID5GA0, 0x000000f0, 0, SROM4_TXPID5G, 0x00ff},
- {BRCMS_SROM_TXPID5GA1, 0x000000f0, 0, SROM4_TXPID5G, 0xff00},
- {BRCMS_SROM_TXPID5GA2, 0x000000f0, 0, SROM4_TXPID5G + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GA3, 0x000000f0, 0, SROM4_TXPID5G + 1, 0xff00},
- {BRCMS_SROM_TXPID5GLA0, 0x000000f0, 0, SROM4_TXPID5GL, 0x00ff},
- {BRCMS_SROM_TXPID5GLA1, 0x000000f0, 0, SROM4_TXPID5GL, 0xff00},
- {BRCMS_SROM_TXPID5GLA2, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GLA3, 0x000000f0, 0, SROM4_TXPID5GL + 1, 0xff00},
- {BRCMS_SROM_TXPID5GHA0, 0x000000f0, 0, SROM4_TXPID5GH, 0x00ff},
- {BRCMS_SROM_TXPID5GHA1, 0x000000f0, 0, SROM4_TXPID5GH, 0xff00},
- {BRCMS_SROM_TXPID5GHA2, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0x00ff},
- {BRCMS_SROM_TXPID5GHA3, 0x000000f0, 0, SROM4_TXPID5GH + 1, 0xff00},
-
- {BRCMS_SROM_CCODE, 0x0000000f, SRFL_CCODE, SROM_CCODE, 0xffff},
- {BRCMS_SROM_CCODE, 0x00000010, SRFL_CCODE, SROM4_CCODE, 0xffff},
- {BRCMS_SROM_CCODE, 0x000000e0, SRFL_CCODE, SROM5_CCODE, 0xffff},
+
{BRCMS_SROM_CCODE, 0xffffff00, SRFL_CCODE, SROM8_CCODE, 0xffff},
{BRCMS_SROM_MACADDR, 0xffffff00, SRFL_ETHADDR, SROM8_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x000000e0, SRFL_ETHADDR, SROM5_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x00000010, SRFL_ETHADDR, SROM4_MACHI, 0xffff},
- {BRCMS_SROM_MACADDR, 0x00000008, SRFL_ETHADDR, SROM3_MACHI, 0xffff},
- {BRCMS_SROM_IL0MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_IL0,
- 0xffff},
- {BRCMS_SROM_ET1MACADDR, 0x00000007, SRFL_ETHADDR, SROM_MACHI_ET1,
- 0xffff},
{BRCMS_SROM_LEDDC, 0xffffff00, SRFL_NOFFS | SRFL_LEDDC, SROM8_LEDDC,
0xffff},
- {BRCMS_SROM_LEDDC, 0x000000e0, SRFL_NOFFS | SRFL_LEDDC, SROM5_LEDDC,
- 0xffff},
- {BRCMS_SROM_LEDDC, 0x00000010, SRFL_NOFFS | SRFL_LEDDC, SROM4_LEDDC,
- 0xffff},
- {BRCMS_SROM_LEDDC, 0x00000008, SRFL_NOFFS | SRFL_LEDDC, SROM3_LEDDC,
- 0xffff},
{BRCMS_SROM_RAWTEMPSENSE, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
0x01ff},
{BRCMS_SROM_MEASPOWER, 0xffffff00, SRFL_PRHEX, SROM8_MPWR_RAWTS,
@@ -650,16 +441,7 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_PHYCAL_TEMPDELTA, 0xffffff00, 0, SROM8_PHYCAL_TEMPDELTA,
0x00ff},
- {BRCMS_SROM_CCK2GPO, 0x000000f0, 0, SROM4_2G_CCKPO, 0xffff},
{BRCMS_SROM_CCK2GPO, 0x00000100, 0, SROM8_2G_CCKPO, 0xffff},
- {BRCMS_SROM_OFDM2GPO, 0x000000f0, SRFL_MORE, SROM4_2G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_2G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GPO, 0x000000f0, SRFL_MORE, SROM4_5G_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5G_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GLPO, 0x000000f0, SRFL_MORE, SROM4_5GL_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5GL_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_OFDM5GHPO, 0x000000f0, SRFL_MORE, SROM4_5GH_OFDMPO, 0xffff},
- {BRCMS_SROM_CONT, 0, 0, SROM4_5GH_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM2GPO, 0x00000100, SRFL_MORE, SROM8_2G_OFDMPO, 0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_2G_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM5GPO, 0x00000100, SRFL_MORE, SROM8_5G_OFDMPO, 0xffff},
@@ -668,38 +450,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_CONT, 0, 0, SROM8_5GL_OFDMPO + 1, 0xffff},
{BRCMS_SROM_OFDM5GHPO, 0x00000100, SRFL_MORE, SROM8_5GH_OFDMPO, 0xffff},
{BRCMS_SROM_CONT, 0, 0, SROM8_5GH_OFDMPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO0, 0x000000f0, 0, SROM4_2G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS2GPO1, 0x000000f0, 0, SROM4_2G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS2GPO2, 0x000000f0, 0, SROM4_2G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS2GPO3, 0x000000f0, 0, SROM4_2G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS2GPO4, 0x000000f0, 0, SROM4_2G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS2GPO5, 0x000000f0, 0, SROM4_2G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS2GPO6, 0x000000f0, 0, SROM4_2G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS2GPO7, 0x000000f0, 0, SROM4_2G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GPO0, 0x000000f0, 0, SROM4_5G_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GPO1, 0x000000f0, 0, SROM4_5G_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GPO2, 0x000000f0, 0, SROM4_5G_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GPO3, 0x000000f0, 0, SROM4_5G_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GPO4, 0x000000f0, 0, SROM4_5G_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GPO5, 0x000000f0, 0, SROM4_5G_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GPO6, 0x000000f0, 0, SROM4_5G_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GPO7, 0x000000f0, 0, SROM4_5G_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GLPO0, 0x000000f0, 0, SROM4_5GL_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GLPO1, 0x000000f0, 0, SROM4_5GL_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GLPO2, 0x000000f0, 0, SROM4_5GL_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GLPO3, 0x000000f0, 0, SROM4_5GL_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GLPO4, 0x000000f0, 0, SROM4_5GL_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GLPO5, 0x000000f0, 0, SROM4_5GL_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GLPO6, 0x000000f0, 0, SROM4_5GL_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GLPO7, 0x000000f0, 0, SROM4_5GL_MCSPO + 7, 0xffff},
- {BRCMS_SROM_MCS5GHPO0, 0x000000f0, 0, SROM4_5GH_MCSPO, 0xffff},
- {BRCMS_SROM_MCS5GHPO1, 0x000000f0, 0, SROM4_5GH_MCSPO + 1, 0xffff},
- {BRCMS_SROM_MCS5GHPO2, 0x000000f0, 0, SROM4_5GH_MCSPO + 2, 0xffff},
- {BRCMS_SROM_MCS5GHPO3, 0x000000f0, 0, SROM4_5GH_MCSPO + 3, 0xffff},
- {BRCMS_SROM_MCS5GHPO4, 0x000000f0, 0, SROM4_5GH_MCSPO + 4, 0xffff},
- {BRCMS_SROM_MCS5GHPO5, 0x000000f0, 0, SROM4_5GH_MCSPO + 5, 0xffff},
- {BRCMS_SROM_MCS5GHPO6, 0x000000f0, 0, SROM4_5GH_MCSPO + 6, 0xffff},
- {BRCMS_SROM_MCS5GHPO7, 0x000000f0, 0, SROM4_5GH_MCSPO + 7, 0xffff},
{BRCMS_SROM_MCS2GPO0, 0x00000100, 0, SROM8_2G_MCSPO, 0xffff},
{BRCMS_SROM_MCS2GPO1, 0x00000100, 0, SROM8_2G_MCSPO + 1, 0xffff},
{BRCMS_SROM_MCS2GPO2, 0x00000100, 0, SROM8_2G_MCSPO + 2, 0xffff},
@@ -732,10 +482,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
{BRCMS_SROM_MCS5GHPO5, 0x00000100, 0, SROM8_5GH_MCSPO + 5, 0xffff},
{BRCMS_SROM_MCS5GHPO6, 0x00000100, 0, SROM8_5GH_MCSPO + 6, 0xffff},
{BRCMS_SROM_MCS5GHPO7, 0x00000100, 0, SROM8_5GH_MCSPO + 7, 0xffff},
- {BRCMS_SROM_CDDPO, 0x000000f0, 0, SROM4_CDDPO, 0xffff},
- {BRCMS_SROM_STBCPO, 0x000000f0, 0, SROM4_STBCPO, 0xffff},
- {BRCMS_SROM_BW40PO, 0x000000f0, 0, SROM4_BW40PO, 0xffff},
- {BRCMS_SROM_BWDUPPO, 0x000000f0, 0, SROM4_BWDUPPO, 0xffff},
{BRCMS_SROM_CDDPO, 0x00000100, 0, SROM8_CDDPO, 0xffff},
{BRCMS_SROM_STBCPO, 0x00000100, 0, SROM8_STBCPO, 0xffff},
{BRCMS_SROM_BW40PO, 0x00000100, 0, SROM8_BW40PO, 0xffff},
@@ -811,34 +557,6 @@ static const struct brcms_sromvar pci_sromvars[] = {
};
static const struct brcms_sromvar perpath_pci_sromvars[] = {
- {BRCMS_SROM_MAXP2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_ITT2GA0, 0x000000f0, 0, SROM4_2G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_ITT5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0xff00},
- {BRCMS_SROM_PA2GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA, 0xffff},
- {BRCMS_SROM_PA2GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 1, 0xffff},
- {BRCMS_SROM_PA2GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 2, 0xffff},
- {BRCMS_SROM_PA2GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_2G_PA + 3, 0xffff},
- {BRCMS_SROM_MAXP5GA0, 0x000000f0, 0, SROM4_5G_ITT_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GHA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0x00ff},
- {BRCMS_SROM_MAXP5GLA0, 0x000000f0, 0, SROM4_5GLH_MAXP, 0xff00},
- {BRCMS_SROM_PA5GW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA, 0xffff},
- {BRCMS_SROM_PA5GW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 1, 0xffff},
- {BRCMS_SROM_PA5GW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 2, 0xffff},
- {BRCMS_SROM_PA5GW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5G_PA + 3, 0xffff},
- {BRCMS_SROM_PA5GLW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA, 0xffff},
- {BRCMS_SROM_PA5GLW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GLW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GLW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GL_PA + 3,
- 0xffff},
- {BRCMS_SROM_PA5GHW0A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA, 0xffff},
- {BRCMS_SROM_PA5GHW1A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 1,
- 0xffff},
- {BRCMS_SROM_PA5GHW2A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 2,
- 0xffff},
- {BRCMS_SROM_PA5GHW3A0, 0x000000f0, SRFL_PRHEX, SROM4_5GH_PA + 3,
- 0xffff},
{BRCMS_SROM_MAXP2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0x00ff},
{BRCMS_SROM_ITT2GA0, 0xffffff00, 0, SROM8_2G_ITT_MAXP, 0xff00},
{BRCMS_SROM_ITT5GA0, 0xffffff00, 0, SROM8_5G_ITT_MAXP, 0xff00},
@@ -868,24 +586,6 @@ static const struct brcms_sromvar perpath_pci_sromvars[] = {
* shared between devices. */
static u8 brcms_srom_crc8_table[CRC8_TABLE_SIZE];
-static u16 __iomem *
-srom_window_address(struct si_pub *sih, u8 __iomem *curmap)
-{
- if (sih->ccrev < 32)
- return (u16 __iomem *)(curmap + PCI_BAR0_SPROM_OFFSET);
- if (sih->cccaps & CC_CAP_SROM)
- return (u16 __iomem *)
- (curmap + PCI_16KB0_CCREGS_OFFSET + CC_SROM_OTP);
-
- return NULL;
-}
-
-/* Parse SROM and create name=value pairs. 'srom' points to
- * the SROM word array. 'off' specifies the offset of the
- * first word 'srom' points to, which should be either 0 or
- * SROM3_SWRG_OFF (full SROM or software region).
- */
-
static uint mask_shift(u16 mask)
{
uint i;
@@ -906,18 +606,16 @@ static uint mask_width(u16 mask)
return 0;
}
-static inline void ltoh16_buf(u16 *buf, unsigned int size)
+static inline void le16_to_cpu_buf(u16 *buf, uint nwords)
{
- size /= 2;
- while (size--)
- *(buf + size) = le16_to_cpu(*(__le16 *)(buf + size));
+ while (nwords--)
+ *(buf + nwords) = le16_to_cpu(*(__le16 *)(buf + nwords));
}
-static inline void htol16_buf(u16 *buf, unsigned int size)
+static inline void cpu_to_le16_buf(u16 *buf, uint nwords)
{
- size /= 2;
- while (size--)
- *(__le16 *)(buf + size) = cpu_to_le16(*(buf + size));
+ while (nwords--)
+ *(__le16 *)(buf + nwords) = cpu_to_le16(*(buf + nwords));
}
/*
@@ -929,11 +627,14 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
struct brcms_srom_list_head *entry;
enum brcms_srom_id id;
u16 w;
- u32 val;
+ u32 val = 0;
const struct brcms_sromvar *srv;
uint width;
uint flags;
u32 sr = (1 << sromrev);
+ uint p;
+ uint pb = SROM8_PATH0;
+ const uint psz = SROM8_PATH1 - SROM8_PATH0;
/* first store the srom revision */
entry = kzalloc(sizeof(struct brcms_srom_list_head), GFP_KERNEL);
@@ -1031,90 +732,93 @@ _initvars_srom_pci(u8 sromrev, u16 *srom, struct list_head *var_list)
list_add(&entry->var_list, var_list);
}
- if (sromrev >= 4) {
- /* Do per-path variables */
- uint p, pb, psz;
-
- if (sromrev >= 8) {
- pb = SROM8_PATH0;
- psz = SROM8_PATH1 - SROM8_PATH0;
- } else {
- pb = SROM4_PATH0;
- psz = SROM4_PATH1 - SROM4_PATH0;
- }
-
- for (p = 0; p < MAX_PATH_SROM; p++) {
- for (srv = perpath_pci_sromvars;
- srv->varid != BRCMS_SROM_NULL; srv++) {
- if ((srv->revmask & sr) == 0)
- continue;
+ for (p = 0; p < MAX_PATH_SROM; p++) {
+ for (srv = perpath_pci_sromvars;
+ srv->varid != BRCMS_SROM_NULL; srv++) {
+ if ((srv->revmask & sr) == 0)
+ continue;
- if (srv->flags & SRFL_NOVAR)
- continue;
+ if (srv->flags & SRFL_NOVAR)
+ continue;
- w = srom[pb + srv->off];
- val = (w & srv->mask) >> mask_shift(srv->mask);
- width = mask_width(srv->mask);
+ w = srom[pb + srv->off];
+ val = (w & srv->mask) >> mask_shift(srv->mask);
+ width = mask_width(srv->mask);
- /* Cheating: no per-path var is more than
- * 1 word */
- if ((srv->flags & SRFL_NOFFS)
- && ((int)val == (1 << width) - 1))
- continue;
+ /* Cheating: no per-path var is more than
+ * 1 word */
+ if ((srv->flags & SRFL_NOFFS)
+ && ((int)val == (1 << width) - 1))
+ continue;
- entry =
- kzalloc(sizeof(struct brcms_srom_list_head),
- GFP_KERNEL);
- entry->varid = srv->varid+p;
- entry->var_type = BRCMS_SROM_UNUMBER;
- entry->uval = val;
- list_add(&entry->var_list, var_list);
- }
- pb += psz;
+ entry =
+ kzalloc(sizeof(struct brcms_srom_list_head),
+ GFP_KERNEL);
+ entry->varid = srv->varid+p;
+ entry->var_type = BRCMS_SROM_UNUMBER;
+ entry->uval = val;
+ list_add(&entry->var_list, var_list);
}
+ pb += psz;
}
}
/*
+ * The crc check is done on a little-endian array, we need
+ * to switch the bytes around before checking crc (and
+ * then switch it back).
+ */
+static int do_crc_check(u16 *buf, unsigned nwords)
+{
+ u8 crc;
+
+ cpu_to_le16_buf(buf, nwords);
+ crc = crc8(brcms_srom_crc8_table, (void *)buf, nwords << 1, CRC8_INIT_VALUE);
+ le16_to_cpu_buf(buf, nwords);
+
+ return crc == CRC8_GOOD_VALUE(brcms_srom_crc8_table);
+}
+
+/*
* Read in and validate sprom.
* Return 0 on success, nonzero on error.
*/
static int
-sprom_read_pci(struct si_pub *sih, u16 __iomem *sprom, uint wordoff,
- u16 *buf, uint nwords, bool check_crc)
+sprom_read_pci(struct si_pub *sih, u16 *buf, uint nwords, bool check_crc)
{
int err = 0;
uint i;
+ struct bcma_device *core;
+ uint sprom_offset;
+
+ /* determine core to read */
+ if (ai_get_ccrev(sih) < 32) {
+ core = ai_findcore(sih, BCMA_CORE_80211, 0);
+ sprom_offset = PCI_BAR0_SPROM_OFFSET;
+ } else {
+ core = ai_findcore(sih, BCMA_CORE_CHIPCOMMON, 0);
+ sprom_offset = CHIPCREGOFFS(sromotp);
+ }
/* read the sprom */
for (i = 0; i < nwords; i++)
- buf[i] = R_REG(&sprom[wordoff + i]);
+ buf[i] = bcma_read16(core, sprom_offset+i*2);
- if (check_crc) {
-
- if (buf[0] == 0xffff)
- /*
- * The hardware thinks that an srom that starts with
- * 0xffff is blank, regardless of the rest of the
- * content, so declare it bad.
- */
- return -ENODATA;
+ if (buf[0] == 0xffff)
+ /*
+ * The hardware thinks that an srom that starts with
+ * 0xffff is blank, regardless of the rest of the
+ * content, so declare it bad.
+ */
+ return -ENODATA;
- /* fixup the endianness so crc8 will pass */
- htol16_buf(buf, nwords * 2);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, nwords * 2,
- CRC8_INIT_VALUE) !=
- CRC8_GOOD_VALUE(brcms_srom_crc8_table))
- /* DBG only pci always read srom4 first, then srom8/9 */
- err = -EIO;
+ if (check_crc && !do_crc_check(buf, nwords))
+ err = -EIO;
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, nwords * 2);
- }
return err;
}
-static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
+static int otp_read_pci(struct si_pub *sih, u16 *buf, uint nwords)
{
u8 *otp;
uint sz = OTP_SZ_MAX / 2; /* size in words */
@@ -1126,7 +830,8 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
err = otp_read_region(sih, OTP_HW_RGN, (u16 *) otp, &sz);
- memcpy(buf, otp, bufsz);
+ sz = min_t(uint, sz, nwords);
+ memcpy(buf, otp, sz * 2);
kfree(otp);
@@ -1139,13 +844,13 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
return -ENODATA;
/* fixup the endianness so crc8 will pass */
- htol16_buf(buf, bufsz);
- if (crc8(brcms_srom_crc8_table, (u8 *) buf, SROM4_WORDS * 2,
+ cpu_to_le16_buf(buf, sz);
+ if (crc8(brcms_srom_crc8_table, (u8 *) buf, sz * 2,
CRC8_INIT_VALUE) != CRC8_GOOD_VALUE(brcms_srom_crc8_table))
err = -EIO;
-
- /* now correct the endianness of the byte array */
- ltoh16_buf(buf, bufsz);
+ else
+ /* now correct the endianness of the byte array */
+ le16_to_cpu_buf(buf, sz);
return err;
}
@@ -1154,10 +859,9 @@ static int otp_read_pci(struct si_pub *sih, u16 *buf, uint bufsz)
* Initialize nonvolatile variable table from sprom.
* Return 0 on success, nonzero on error.
*/
-static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
+int srom_var_init(struct si_pub *sih)
{
u16 *srom;
- u16 __iomem *sromwindow;
u8 sromrev = 0;
u32 sr;
int err = 0;
@@ -1169,33 +873,17 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
if (!srom)
return -ENOMEM;
- sromwindow = srom_window_address(sih, curmap);
-
crc8_populate_lsb(brcms_srom_crc8_table, SROM_CRC8_POLY);
if (ai_is_sprom_available(sih)) {
- err = sprom_read_pci(sih, sromwindow, 0, srom, SROM_WORDS,
- true);
-
- if ((srom[SROM4_SIGN] == SROM4_SIGNATURE) ||
- (((sih->buscoretype == PCIE_CORE_ID)
- && (sih->buscorerev >= 6))
- || ((sih->buscoretype == PCI_CORE_ID)
- && (sih->buscorerev >= 0xe)))) {
- /* sromrev >= 4, read more */
- err = sprom_read_pci(sih, sromwindow, 0, srom,
- SROM4_WORDS, true);
- sromrev = srom[SROM4_CRCREV] & 0xff;
- } else if (err == 0) {
- /* srom is good and is rev < 4 */
+ err = sprom_read_pci(sih, srom, SROM4_WORDS, true);
+
+ if (err == 0)
+ /* srom read and passed crc */
/* top word of sprom contains version and crc8 */
- sromrev = srom[SROM_CRCREV] & 0xff;
- /* bcm4401 sroms misprogrammed */
- if (sromrev == 0x10)
- sromrev = 1;
- }
+ sromrev = srom[SROM4_CRCREV] & 0xff;
} else {
/* Use OTP if SPROM not available */
- err = otp_read_pci(sih, srom, SROM_MAX);
+ err = otp_read_pci(sih, srom, SROM4_WORDS);
if (err == 0)
/* OTP only contain SROM rev8/rev9 for now */
sromrev = srom[SROM4_CRCREV] & 0xff;
@@ -1208,10 +896,9 @@ static int initvars_srom_pci(struct si_pub *sih, void __iomem *curmap)
sr = 1 << sromrev;
/*
- * srom version check: Current valid versions: 1, 2, 3, 4, 5, 8,
- * 9
+ * srom version check: Current valid versions: 8, 9
*/
- if ((sr & 0x33e) == 0) {
+ if ((sr & 0x300) == 0) {
err = -EINVAL;
goto errout;
}
@@ -1238,21 +925,6 @@ void srom_free_vars(struct si_pub *sih)
kfree(entry);
}
}
-/*
- * Initialize local vars from the right source for this platform.
- * Return 0 on success, nonzero on error.
- */
-int srom_var_init(struct si_pub *sih, void __iomem *curmap)
-{
- uint len;
-
- len = 0;
-
- if (curmap != NULL)
- return initvars_srom_pci(sih, curmap);
-
- return -EINVAL;
-}
/*
* Search the name=value vars for a specific one and return its value.
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/srom.h b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
index 708c43f..f2a58f2 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/srom.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/srom.h
@@ -20,15 +20,10 @@
#include "types.h"
/* Prototypes */
-extern int srom_var_init(struct si_pub *sih, void __iomem *curmap);
+extern int srom_var_init(struct si_pub *sih);
extern void srom_free_vars(struct si_pub *sih);
extern int srom_read(struct si_pub *sih, uint bus, void *curmap,
uint byteoff, uint nbytes, u16 *buf, bool check_crc);
-/* parse standard PCMCIA cis, normally used by SB/PCMCIA/SDIO/SPI/OTP
- * and extract from it into name=value pairs
- */
-extern int srom_parsecis(u8 **pcis, uint ciscnt,
- char **vars, uint *count);
#endif /* _BRCM_SROM_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/types.h b/drivers/net/wireless/brcm80211/brcmsmac/types.h
index 27a814b..e11ae83 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/types.h
+++ b/drivers/net/wireless/brcm80211/brcmsmac/types.h
@@ -250,66 +250,18 @@ do { \
wiphy_err(dev, "%s: " fmt, __func__, ##args); \
} while (0)
-/*
- * Register access macros.
- *
- * These macro's take a pointer to the address to read as one of their
- * arguments. The macro itself deduces the size of the IO transaction (u8, u16
- * or u32). Advantage of this approach in combination with using a struct to
- * define the registers in a register block, is that access size and access
- * location are defined in only one spot. This reduces the risk of the
- * programmer trying to use an unsupported transaction size on a register.
- *
- */
-
-#define R_REG(r) \
- ({ \
- __typeof(*(r)) __osl_v; \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- __osl_v = readb((u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- __osl_v = readw((u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- __osl_v = readl((u32 __iomem *)(r)); \
- break; \
- } \
- __osl_v; \
- })
-
-#define W_REG(r, v) do { \
- switch (sizeof(*(r))) { \
- case sizeof(u8): \
- writeb((u8)((v) & 0xFF), (u8 __iomem *)(r)); \
- break; \
- case sizeof(u16): \
- writew((u16)((v) & 0xFFFF), (u16 __iomem *)(r)); \
- break; \
- case sizeof(u32): \
- writel((u32)(v), (u32 __iomem *)(r)); \
- break; \
- } \
- } while (0)
-
#ifdef CONFIG_BCM47XX
/*
* bcm4716 (which includes 4717 & 4718), plus 4706 on PCIe can reorder
* transactions. As a fix, a read after write is performed on certain places
* in the code. Older chips and the newer 5357 family don't require this fix.
*/
-#define W_REG_FLUSH(r, v) ({ W_REG((r), (v)); (void)R_REG(r); })
+#define bcma_wflush16(c, o, v) \
+ ({ bcma_write16(c, o, v); (void)bcma_read16(c, o); })
#else
-#define W_REG_FLUSH(r, v) W_REG((r), (v))
+#define bcma_wflush16(c, o, v) bcma_write16(c, o, v)
#endif /* CONFIG_BCM47XX */
-#define AND_REG(r, v) W_REG((r), R_REG(r) & (v))
-#define OR_REG(r, v) W_REG((r), R_REG(r) | (v))
-
-#define SET_REG(r, mask, val) \
- W_REG((r), ((R_REG(r) & ~(mask)) | (val)))
-
/* multi-bool data type: set of bools, mbool is true if any is set */
/* set one bool */
diff --git a/drivers/net/wireless/brcm80211/brcmutil/utils.c b/drivers/net/wireless/brcm80211/brcmutil/utils.c
index f27c489..b7537f7 100644
--- a/drivers/net/wireless/brcm80211/brcmutil/utils.c
+++ b/drivers/net/wireless/brcm80211/brcmutil/utils.c
@@ -16,6 +16,7 @@
#include <linux/netdevice.h>
#include <linux/module.h>
+
#include <brcmu_utils.h>
MODULE_AUTHOR("Broadcom Corporation");
@@ -40,74 +41,20 @@ EXPORT_SYMBOL(brcmu_pkt_buf_get_skb);
/* Free the driver packet. Free the tag if present */
void brcmu_pkt_buf_free_skb(struct sk_buff *skb)
{
- struct sk_buff *nskb;
- int nest = 0;
-
- /* perversion: we use skb->next to chain multi-skb packets */
- while (skb) {
- nskb = skb->next;
- skb->next = NULL;
-
- if (skb->destructor)
- /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
- * destructor exists
- */
- dev_kfree_skb_any(skb);
- else
- /* can free immediately (even in_irq()) if destructor
- * does not exist
- */
- dev_kfree_skb(skb);
-
- nest++;
- skb = nskb;
- }
+ WARN_ON(skb->next);
+ if (skb->destructor)
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if
+ * destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ else
+ /* can free immediately (even in_irq()) if destructor
+ * does not exist
+ */
+ dev_kfree_skb(skb);
}
EXPORT_SYMBOL(brcmu_pkt_buf_free_skb);
-
-/* copy a buffer into a pkt buffer chain */
-uint brcmu_pktfrombuf(struct sk_buff *p, uint offset, int len,
- unsigned char *buf)
-{
- uint n, ret = 0;
-
- /* skip 'offset' bytes */
- for (; p && offset; p = p->next) {
- if (offset < (uint) (p->len))
- break;
- offset -= p->len;
- }
-
- if (!p)
- return 0;
-
- /* copy the data */
- for (; p && len; p = p->next) {
- n = min((uint) (p->len) - offset, (uint) len);
- memcpy(p->data + offset, buf, n);
- buf += n;
- len -= n;
- ret += n;
- offset = 0;
- }
-
- return ret;
-}
-EXPORT_SYMBOL(brcmu_pktfrombuf);
-
-/* return total length of buffer chain */
-uint brcmu_pkttotlen(struct sk_buff *p)
-{
- uint total;
-
- total = 0;
- for (; p; p = p->next)
- total += p->len;
- return total;
-}
-EXPORT_SYMBOL(brcmu_pkttotlen);
-
/*
* osl multiple-precedence packet queue
* hi_prec is always >= the number of the highest non-empty precedence
@@ -115,21 +62,13 @@ EXPORT_SYMBOL(brcmu_pkttotlen);
struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
struct sk_buff *p)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
- q = &pq->q[prec];
-
- if (q->head)
- q->tail->prev = p;
- else
- q->head = p;
-
- q->tail = p;
- q->len++;
-
+ q = &pq->q[prec].skblist;
+ skb_queue_tail(q, p);
pq->len++;
if (pq->hi_prec < prec)
@@ -142,20 +81,13 @@ EXPORT_SYMBOL(brcmu_pktq_penq);
struct sk_buff *brcmu_pktq_penq_head(struct pktq *pq, int prec,
struct sk_buff *p)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
if (pktq_full(pq) || pktq_pfull(pq, prec))
return NULL;
- q = &pq->q[prec];
-
- if (q->head == NULL)
- q->tail = p;
-
- p->prev = q->head;
- q->head = p;
- q->len++;
-
+ q = &pq->q[prec].skblist;
+ skb_queue_head(q, p);
pq->len++;
if (pq->hi_prec < prec)
@@ -167,53 +99,30 @@ EXPORT_SYMBOL(brcmu_pktq_penq_head);
struct sk_buff *brcmu_pktq_pdeq(struct pktq *pq, int prec)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
struct sk_buff *p;
- q = &pq->q[prec];
-
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue(q);
if (p == NULL)
return NULL;
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
-
pq->len--;
-
- p->prev = NULL;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq);
struct sk_buff *brcmu_pktq_pdeq_tail(struct pktq *pq, int prec)
{
- struct pktq_prec *q;
- struct sk_buff *p, *prev;
-
- q = &pq->q[prec];
+ struct sk_buff_head *q;
+ struct sk_buff *p;
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue_tail(q);
if (p == NULL)
return NULL;
- for (prev = NULL; p != q->tail; p = p->prev)
- prev = p;
-
- if (prev)
- prev->prev = NULL;
- else
- q->head = NULL;
-
- q->tail = prev;
- q->len--;
-
pq->len--;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_pdeq_tail);
@@ -222,31 +131,17 @@ void
brcmu_pktq_pflush(struct pktq *pq, int prec, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg)
{
- struct pktq_prec *q;
- struct sk_buff *p, *prev = NULL;
+ struct sk_buff_head *q;
+ struct sk_buff *p, *next;
- q = &pq->q[prec];
- p = q->head;
- while (p) {
+ q = &pq->q[prec].skblist;
+ skb_queue_walk_safe(q, p, next) {
if (fn == NULL || (*fn) (p, arg)) {
- bool head = (p == q->head);
- if (head)
- q->head = p->prev;
- else
- prev->prev = p->prev;
- p->prev = NULL;
+ skb_unlink(p, q);
brcmu_pkt_buf_free_skb(p);
- q->len--;
pq->len--;
- p = (head ? q->head : prev->prev);
- } else {
- prev = p;
- p = p->prev;
}
}
-
- if (q->head == NULL)
- q->tail = NULL;
}
EXPORT_SYMBOL(brcmu_pktq_pflush);
@@ -271,8 +166,10 @@ void brcmu_pktq_init(struct pktq *pq, int num_prec, int max_len)
pq->max = (u16) max_len;
- for (prec = 0; prec < num_prec; prec++)
+ for (prec = 0; prec < num_prec; prec++) {
pq->q[prec].max = pq->max;
+ skb_queue_head_init(&pq->q[prec].skblist);
+ }
}
EXPORT_SYMBOL(brcmu_pktq_init);
@@ -284,13 +181,13 @@ struct sk_buff *brcmu_pktq_peek_tail(struct pktq *pq, int *prec_out)
return NULL;
for (prec = 0; prec < pq->hi_prec; prec++)
- if (pq->q[prec].head)
+ if (!skb_queue_empty(&pq->q[prec].skblist))
break;
if (prec_out)
*prec_out = prec;
- return pq->q[prec].tail;
+ return skb_peek_tail(&pq->q[prec].skblist);
}
EXPORT_SYMBOL(brcmu_pktq_peek_tail);
@@ -303,7 +200,7 @@ int brcmu_pktq_mlen(struct pktq *pq, uint prec_bmp)
for (prec = 0; prec <= pq->hi_prec; prec++)
if (prec_bmp & (1 << prec))
- len += pq->q[prec].len;
+ len += pq->q[prec].skblist.qlen;
return len;
}
@@ -313,39 +210,32 @@ EXPORT_SYMBOL(brcmu_pktq_mlen);
struct sk_buff *brcmu_pktq_mdeq(struct pktq *pq, uint prec_bmp,
int *prec_out)
{
- struct pktq_prec *q;
+ struct sk_buff_head *q;
struct sk_buff *p;
int prec;
if (pq->len == 0)
return NULL;
- while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ while ((prec = pq->hi_prec) > 0 &&
+ skb_queue_empty(&pq->q[prec].skblist))
pq->hi_prec--;
- while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ while ((prec_bmp & (1 << prec)) == 0 ||
+ skb_queue_empty(&pq->q[prec].skblist))
if (prec-- == 0)
return NULL;
- q = &pq->q[prec];
-
- p = q->head;
+ q = &pq->q[prec].skblist;
+ p = skb_dequeue(q);
if (p == NULL)
return NULL;
- q->head = p->prev;
- if (q->head == NULL)
- q->tail = NULL;
-
- q->len--;
+ pq->len--;
if (prec_out)
*prec_out = prec;
- pq->len--;
-
- p->prev = NULL;
-
return p;
}
EXPORT_SYMBOL(brcmu_pktq_mdeq);
@@ -364,23 +254,3 @@ void brcmu_prpkt(const char *msg, struct sk_buff *p0)
}
EXPORT_SYMBOL(brcmu_prpkt);
#endif /* defined(BCMDBG) */
-
-#if defined(BCMDBG)
-/*
- * print bytes formatted as hex to a string. return the resulting
- * string length
- */
-int brcmu_format_hex(char *str, const void *bytes, int len)
-{
- int i;
- char *p = str;
- const u8 *src = (const u8 *)bytes;
-
- for (i = 0; i < len; i++) {
- p += snprintf(p, 3, "%02X", *src);
- src++;
- }
- return (int)(p - str);
-}
-EXPORT_SYMBOL(brcmu_format_hex);
-#endif /* defined(BCMDBG) */
diff --git a/drivers/net/wireless/brcm80211/include/brcmu_utils.h b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
index 7d0f46e..ad249a0 100644
--- a/drivers/net/wireless/brcm80211/include/brcmu_utils.h
+++ b/drivers/net/wireless/brcm80211/include/brcmu_utils.h
@@ -65,9 +65,7 @@
#define ETHER_ADDR_STR_LEN 18
struct pktq_prec {
- struct sk_buff *head; /* first packet to dequeue */
- struct sk_buff *tail; /* last packet to dequeue */
- u16 len; /* number of queued packets */
+ struct sk_buff_head skblist;
u16 max; /* maximum number of queued packets */
};
@@ -88,32 +86,32 @@ struct pktq {
static inline int pktq_plen(struct pktq *pq, int prec)
{
- return pq->q[prec].len;
+ return pq->q[prec].skblist.qlen;
}
static inline int pktq_pavail(struct pktq *pq, int prec)
{
- return pq->q[prec].max - pq->q[prec].len;
+ return pq->q[prec].max - pq->q[prec].skblist.qlen;
}
static inline bool pktq_pfull(struct pktq *pq, int prec)
{
- return pq->q[prec].len >= pq->q[prec].max;
+ return pq->q[prec].skblist.qlen >= pq->q[prec].max;
}
static inline bool pktq_pempty(struct pktq *pq, int prec)
{
- return pq->q[prec].len == 0;
+ return skb_queue_empty(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek(struct pktq *pq, int prec)
{
- return pq->q[prec].head;
+ return skb_peek(&pq->q[prec].skblist);
}
static inline struct sk_buff *pktq_ppeek_tail(struct pktq *pq, int prec)
{
- return pq->q[prec].tail;
+ return skb_peek_tail(&pq->q[prec].skblist);
}
extern struct sk_buff *brcmu_pktq_penq(struct pktq *pq, int prec,
@@ -172,24 +170,16 @@ extern void brcmu_pktq_flush(struct pktq *pq, bool dir,
bool (*fn)(struct sk_buff *, void *), void *arg);
/* externs */
-/* packet */
-extern uint brcmu_pktfrombuf(struct sk_buff *p,
- uint offset, int len, unsigned char *buf);
-extern uint brcmu_pkttotlen(struct sk_buff *p);
-
/* ip address */
struct ipv4_addr;
+
+/* externs */
+/* format/print */
#ifdef BCMDBG
extern void brcmu_prpkt(const char *msg, struct sk_buff *p0);
#else
#define brcmu_prpkt(a, b)
#endif /* BCMDBG */
-/* externs */
-/* format/print */
-#if defined(BCMDBG)
-extern int brcmu_format_hex(char *str, const void *bytes, int len);
-#endif
-
#endif /* _BRCMU_UTILS_H_ */
diff --git a/drivers/net/wireless/brcm80211/include/chipcommon.h b/drivers/net/wireless/brcm80211/include/chipcommon.h
index fefabc3..f96834a 100644
--- a/drivers/net/wireless/brcm80211/include/chipcommon.h
+++ b/drivers/net/wireless/brcm80211/include/chipcommon.h
@@ -19,6 +19,8 @@
#include "defs.h" /* for PAD macro */
+#define CHIPCREGOFFS(field) offsetof(struct chipcregs, field)
+
struct chipcregs {
u32 chipid; /* 0x0 */
u32 capabilities;
diff --git a/drivers/net/wireless/brcm80211/include/defs.h b/drivers/net/wireless/brcm80211/include/defs.h
index 1e5f310..f0d8c04 100644
--- a/drivers/net/wireless/brcm80211/include/defs.h
+++ b/drivers/net/wireless/brcm80211/include/defs.h
@@ -62,7 +62,6 @@
#define WL_RADIO_SW_DISABLE (1<<0)
#define WL_RADIO_HW_DISABLE (1<<1)
-#define WL_RADIO_MPC_DISABLE (1<<2)
/* some countries don't support any channel */
#define WL_RADIO_COUNTRY_DISABLE (1<<3)
diff --git a/drivers/net/wireless/brcm80211/include/soc.h b/drivers/net/wireless/brcm80211/include/soc.h
index 4fcb956..4e9b7e4 100644
--- a/drivers/net/wireless/brcm80211/include/soc.h
+++ b/drivers/net/wireless/brcm80211/include/soc.h
@@ -77,8 +77,9 @@
#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */
#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */
#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */
-/* Default component, in ai chips it maps all unused address ranges */
-#define DEF_AI_COMP 0xfff
+#define DEF_AI_COMP 0xfff /* Default component, in ai chips it
+ * maps all unused address ranges
+ */
/* Common core control flags */
#define SICF_BIST_EN 0x8000
@@ -87,4 +88,11 @@
#define SICF_FGC 0x0002
#define SICF_CLOCK_EN 0x0001
+/* Common core status flags */
+#define SISF_BIST_DONE 0x8000
+#define SISF_BIST_ERROR 0x4000
+#define SISF_GATED_CLK 0x2000
+#define SISF_DMA64 0x1000
+#define SISF_CORE_BITS 0x0fff
+
#endif /* _BRCM_SOC_H */
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index 5441ad1..89e9d3a 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -656,6 +656,9 @@ static const struct pcmcia_device_id hostap_cs_ids[] = {
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
+ "Canon", "Wireless LAN CF Card K30225", "Version 01.00",
+ 0x96ef6fe2, 0x263fcbab, 0xa57adb8c),
+ PCMCIA_DEVICE_PROD_ID123(
"D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02",
0x71b18589, 0xb6f1b0ab, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index 045a936..18054d9 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3872,8 +3872,8 @@ static void prism2_get_drvinfo(struct net_device *dev,
iface = netdev_priv(dev);
local = iface->local;
- strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
- snprintf(info->fw_version, sizeof(info->fw_version) - 1,
+ strlcpy(info->driver, "hostap", sizeof(info->driver));
+ snprintf(info->fw_version, sizeof(info->fw_version),
"%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
(local->sta_fw_ver >> 8) & 0xff,
local->sta_fw_ver & 0xff);
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 127e9c6..a0e5c21 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -5981,8 +5981,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ipw2100_priv *priv = libipw_priv(dev);
char fw_ver[64], ucode_ver[64];
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver));
ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver));
@@ -5990,7 +5990,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s",
fw_ver, priv->eeprom_version, ucode_ver);
- strcpy(info->bus_info, pci_name(priv->pci_dev));
+ strlcpy(info->bus_info, pci_name(priv->pci_dev),
+ sizeof(info->bus_info));
}
static u32 ipw2100_ethtool_get_link(struct net_device *dev)
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 99a710d..4fcdac6 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -131,6 +131,14 @@ static struct ieee80211_rate ipw2200_rates[] = {
#define ipw2200_bg_rates (ipw2200_rates + 0)
#define ipw2200_num_bg_rates 12
+/* Ugly macro to convert literal channel numbers into their mhz equivalents
+ * There are certianly some conditions that will break this (like feeding it '30')
+ * but they shouldn't arise since nothing talks on channel 30. */
+#define ieee80211chan2mhz(x) \
+ (((x) <= 14) ? \
+ (((x) == 14) ? 2484 : ((x) * 5) + 2407) : \
+ ((x) + 1000) * 5)
+
#ifdef CONFIG_IPW2200_QOS
static int qos_enable = 0;
static int qos_burst_enable = 0;
@@ -7840,7 +7848,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
* more efficiently than we can parse it. ORDER MATTERS HERE */
struct ipw_rt_hdr *ipw_rt;
- short len = le16_to_cpu(pkt->u.frame.length);
+ unsigned short len = le16_to_cpu(pkt->u.frame.length);
/* We received data from the HW, so stop the watchdog */
dev->trans_start = jiffies;
@@ -8015,7 +8023,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
s8 noise = (s8) le16_to_cpu(frame->noise);
u8 rate = frame->rate;
- short len = le16_to_cpu(pkt->u.frame.length);
+ unsigned short len = le16_to_cpu(pkt->u.frame.length);
struct sk_buff *skb;
int hdr_only = 0;
u16 filter = priv->prom_priv->filter;
@@ -10540,8 +10548,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
char date[32];
u32 len;
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
len = sizeof(vers);
ipw_get_ordinal(p, IPW_ORD_STAT_FW_VERSION, vers, &len);
@@ -10550,7 +10558,8 @@ static void ipw_ethtool_get_drvinfo(struct net_device *dev,
snprintf(info->fw_version, sizeof(info->fw_version), "%s (%s)",
vers, date);
- strcpy(info->bus_info, pci_name(p->pci_dev));
+ strlcpy(info->bus_info, pci_name(p->pci_dev),
+ sizeof(info->bus_info));
info->eedump_len = IPW_EEPROM_IMAGE_SIZE;
}
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index 70f5586..8874588 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -66,16 +66,8 @@ extern u32 libipw_debug_level;
do { if (libipw_debug_level & (level)) \
printk(KERN_DEBUG "libipw: %c %s " fmt, \
in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
- return (libipw_debug_level & level) && net_ratelimit();
-}
#else
#define LIBIPW_DEBUG(level, fmt, args...) do {} while (0)
-static inline bool libipw_ratelimit_debug(u32 level)
-{
- return false;
-}
#endif /* CONFIG_LIBIPW_DEBUG */
/*
@@ -813,9 +805,6 @@ struct libipw_device {
/* WEP and other encryption related settings at the device level */
int open_wep; /* Set to 1 to allow unencrypted frames */
- int reset_on_keychange; /* Set to 1 if the HW needs to be reset on
- * WEP key changes */
-
/* If the host performs {en,de}cryption, then set to 1 */
int host_encrypt;
int host_encrypt_msdu;
@@ -868,7 +857,6 @@ struct libipw_device {
struct libipw_security * sec);
netdev_tx_t (*hard_start_xmit) (struct libipw_txb * txb,
struct net_device * dev, int pri);
- int (*reset_port) (struct net_device * dev);
int (*is_queue_full) (struct net_device * dev, int pri);
int (*handle_management) (struct net_device * dev,
diff --git a/drivers/net/wireless/ipw2x00/libipw_wx.c b/drivers/net/wireless/ipw2x00/libipw_wx.c
index 6623e50..1571505 100644
--- a/drivers/net/wireless/ipw2x00/libipw_wx.c
+++ b/drivers/net/wireless/ipw2x00/libipw_wx.c
@@ -474,17 +474,6 @@ int libipw_wx_set_encode(struct libipw_device *ieee,
if (ieee->set_security)
ieee->set_security(dev, &sec);
- /* Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack. */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- printk(KERN_DEBUG "%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
return 0;
}
@@ -688,20 +677,6 @@ int libipw_wx_set_encodeext(struct libipw_device *ieee,
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
- /*
- * Do not reset port if card is in Managed mode since resetting will
- * generate new IEEE 802.11 authentication which may end up in looping
- * with IEEE 802.1X. If your hardware requires a reset after WEP
- * configuration (for example... Prism2), implement the reset_port in
- * the callbacks structures used to initialize the 802.11 stack.
- */
- if (ieee->reset_on_keychange &&
- ieee->iw_mode != IW_MODE_INFRA &&
- ieee->reset_port && ieee->reset_port(dev)) {
- LIBIPW_DEBUG_WX("%s: reset_port failed\n", dev->name);
- return -EINVAL;
- }
-
return ret;
}
diff --git a/drivers/net/wireless/iwlegacy/3945-debug.c b/drivers/net/wireless/iwlegacy/3945-debug.c
new file mode 100644
index 0000000..5e1a19f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-debug.c
@@ -0,0 +1,505 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include "common.h"
+#include "3945.h"
+
+static int
+il3945_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
+ le32_to_cpu(il->_3945.stats.flag));
+ if (le32_to_cpu(il->_3945.stats.flag) & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" : "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (le32_to_cpu(il->_3945.stats.flag) &
+ UCODE_STATS_NARROW_BAND_MSK) ? "enabled" : "disabled");
+ return p;
+}
+
+ssize_t
+il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct iwl39_stats_rx_phy) * 40 +
+ sizeof(struct iwl39_stats_rx_non_phy) * 40 + 400;
+ ssize_t ret;
+ struct iwl39_stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct iwl39_stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct iwl39_stats_rx_non_phy *general, *accum_general;
+ struct iwl39_stats_rx_non_phy *delta_general, *max_general;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_3945.stats.rx.ofdm;
+ cck = &il->_3945.stats.rx.cck;
+ general = &il->_3945.stats.rx.general;
+ accum_ofdm = &il->_3945.accum_stats.rx.ofdm;
+ accum_cck = &il->_3945.accum_stats.rx.cck;
+ accum_general = &il->_3945.accum_stats.rx.general;
+ delta_ofdm = &il->_3945.delta_stats.rx.ofdm;
+ delta_cck = &il->_3945.delta_stats.rx.cck;
+ delta_general = &il->_3945.delta_stats.rx.general;
+ max_ofdm = &il->_3945.max_delta.rx.ofdm;
+ max_cck = &il->_3945.max_delta.rx.cck;
+ max_general = &il->_3945.max_delta.rx.general;
+
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n",
+ "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct iwl39_stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_3945.stats.tx;
+ accum_tx = &il->_3945.accum_stats.tx;
+ delta_tx = &il->_3945.delta_stats.tx;
+ max_tx = &il->_3945.max_delta.tx;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il3945_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct iwl39_stats_general) * 10 + 300;
+ ssize_t ret;
+ struct iwl39_stats_general *general, *accum_general;
+ struct iwl39_stats_general *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct iwl39_stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * The statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_3945.stats.general;
+ dbg = &il->_3945.stats.general.dbg;
+ div = &il->_3945.stats.general.div;
+ accum_general = &il->_3945.accum_stats.general;
+ delta_general = &il->_3945.delta_stats.general;
+ max_general = &il->_3945.max_delta.general;
+ accum_dbg = &il->_3945.accum_stats.general.dbg;
+ delta_dbg = &il->_3945.delta_stats.general.dbg;
+ max_dbg = &il->_3945.max_delta.general.dbg;
+ accum_div = &il->_3945.accum_stats.general.div;
+ delta_div = &il->_3945.delta_stats.general.div;
+ max_div = &il->_3945.max_delta.general.div;
+ pos += il3945_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%-32s current"
+ "acumulative delta max\n",
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " %-30s %10u %10u %10u %10u\n", "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
new file mode 100644
index 0000000..a7dfba8
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
@@ -0,0 +1,3976 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/ieee80211_radiotap.h>
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl3945"
+
+#include "commands.h"
+#include "common.h"
+#include "3945.h"
+#include "iwl-spectrum.h"
+
+/*
+ * module name, copyright, version, etc.
+ */
+
+#define DRV_DESCRIPTION \
+"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+ /* module parameters */
+struct il_mod_params il3945_mod_params = {
+ .sw_crypto = 1,
+ .restart_fw = 1,
+ .disable_hw_scan = 1,
+ /* the rest are 0 by default */
+};
+
+/**
+ * il3945_get_antenna_flags - Get antenna flags for RXON command
+ * @il: eeprom and antenna fields are used to determine antenna flags
+ *
+ * il->eeprom39 is used to determine if antenna AUX/MAIN are reversed
+ * il3945_mod_params.antenna specifies the antenna diversity mode:
+ *
+ * IL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
+ * IL_ANTENNA_MAIN - Force MAIN antenna
+ * IL_ANTENNA_AUX - Force AUX antenna
+ */
+__le32
+il3945_get_antenna_flags(const struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ switch (il3945_mod_params.antenna) {
+ case IL_ANTENNA_DIVERSITY:
+ return 0;
+
+ case IL_ANTENNA_MAIN:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+
+ case IL_ANTENNA_AUX:
+ if (eeprom->antenna_switch_type)
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
+ return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
+ }
+
+ /* bad antenna selector value */
+ IL_ERR("Bad antenna selector value (0x%x)\n",
+ il3945_mod_params.antenna);
+
+ return 0; /* "diversity" is default if error */
+}
+
+static int
+il3945_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ int ret;
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+
+ if (sta_id == il->ctx.bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = keyconf->keyidx;
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ D_INFO("hwcrypto: modify ucode station key info\n");
+
+ ret = il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static int
+il3945_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_set_wep_dynamic_key_info(struct il_priv *il,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ return -EOPNOTSUPP;
+}
+
+static int
+il3945_clear_sta_key_info(struct il_priv *il, u8 sta_id)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ D_INFO("hwcrypto: clear ucode station key info\n");
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il3945_set_dynamic_key(struct il_priv *il, struct ieee80211_key_conf *keyconf,
+ u8 sta_id)
+{
+ int ret = 0;
+
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret = il3945_set_ccmp_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret = il3945_set_tkip_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il3945_set_wep_dynamic_key_info(il, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s alg=%x\n", __func__, keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+static int
+il3945_remove_static_key(struct il_priv *il)
+{
+ int ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static int
+il3945_set_static_key(struct il_priv *il, struct ieee80211_key_conf *key)
+{
+ if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
+ return -EOPNOTSUPP;
+
+ IL_ERR("Static key invalid: cipher %x\n", key->cipher);
+ return -EINVAL;
+}
+
+static void
+il3945_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il3945_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il3945_frame *
+il3945_get_free_frame(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il3945_frame, list);
+}
+
+static void
+il3945_free_frame(struct il_priv *il, struct il3945_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+unsigned int
+il3945_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+
+ if (!il_is_associated(il) || !il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+static int
+il3945_send_beacon_cmd(struct il_priv *il)
+{
+ struct il3945_frame *frame;
+ unsigned int frame_size;
+ int rc;
+ u8 rate;
+
+ frame = il3945_get_free_frame(il);
+
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ rate = il_get_lowest_plcp(il, &il->ctx);
+
+ frame_size = il3945_hw_get_beacon_cmd(il, frame, rate);
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il3945_free_frame(il, frame);
+
+ return rc;
+}
+
+static void
+il3945_unset_hw_params(struct il_priv *il)
+{
+ if (il->_3945.shared_virt)
+ dma_free_coherent(&il->pci_dev->dev,
+ sizeof(struct il3945_shared),
+ il->_3945.shared_virt, il->_3945.shared_phys);
+}
+
+static void
+il3945_build_tx_cmd_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_device_cmd *cmd,
+ struct sk_buff *skb_frag, int sta_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ struct il_hw_key *keyinfo = &il->stations[sta_id].keyinfo;
+
+ tx_cmd->sec_ctl = 0;
+
+ switch (keyinfo->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ TX_CMD_SEC_WEP | (info->control.hw_key->
+ hw_key_idx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT;
+
+ memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ info->control.hw_key->hw_key_idx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyinfo->cipher);
+ break;
+ }
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il3945_build_tx_cmd_basic(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+ __le32 tx_flags = tx_cmd->tx_flags;
+ __le16 fc = hdr->frame_control;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+/*
+ * start C_TX command process
+ */
+static int
+il3945_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il3945_tx_cmd *tx_cmd;
+ struct il_tx_queue *txq = NULL;
+ struct il_queue *q = NULL;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ int txq_id = skb_get_queue_mapping(skb);
+ u16 len, idx, hdr_len;
+ u8 id;
+ u8 unicast;
+ u8 sta_id;
+ u8 tid = 0;
+ __le16 fc;
+ u8 wait_write_ptr = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ if ((ieee80211_get_tx_rate(il->hw, info)->hw_value & 0xFF) ==
+ IL_INVALID_RATE) {
+ IL_ERR("ERROR: No TX rate available.\n");
+ goto drop_unlock;
+ }
+
+ unicast = !is_multicast_ether_addr(hdr->addr1);
+ id = 0;
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, info->control.sta);
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop;
+ }
+
+ D_RATE("station Id %d\n", sta_id);
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (unlikely(tid >= MAX_TID_COUNT))
+ goto drop;
+ }
+
+ /* Descriptor for chosen Tx queue */
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if ((il_queue_space(q) < q->high_mark))
+ goto drop;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ idx = il_get_cmd_idx(q, q->write_ptr, 0);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = &il->ctx;
+
+ /* Init first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+ tx_cmd = (struct il3945_tx_cmd *)out_cmd->cmd.payload;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(*tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ if (info->control.hw_key)
+ il3945_build_tx_cmd_hwcrypto(il, info, out_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il3945_build_tx_cmd_basic(il, out_cmd, info, hdr, sta_id);
+
+ il3945_hw_build_tx_cmd_rate(il, out_cmd, info, hdr, sta_id);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ il_dbg_log_tx_data_frame(il, len, hdr);
+ il_update_stats(il, true, fc, len);
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
+ tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr,
+ ieee80211_hdrlen(fc));
+
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len =
+ sizeof(struct il3945_tx_cmd) + sizeof(struct il_cmd_header) +
+ hdr_len;
+ len = (len + 3) & ~3;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, len, PCI_DMA_TODEVICE);
+ /* we do not map meta data ... so we can safely access address to
+ * provide to unmap command*/
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, len);
+
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, len, 1,
+ 0);
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ len = skb->len - hdr_len;
+ if (len) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, len,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ len, 0, U32_PAD(len));
+ }
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ il_stop_queue(il, txq);
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+drop:
+ return -1;
+}
+
+static int
+il3945_get_measurement(struct il_priv *il,
+ struct ieee80211_measurement_params *params, u8 type)
+{
+ struct il_spectrum_cmd spectrum;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SPECTRUM_MEASUREMENT,
+ .data = (void *)&spectrum,
+ .flags = CMD_WANT_SKB,
+ };
+ u32 add_time = le64_to_cpu(params->start_time);
+ int rc;
+ int spectrum_resp_status;
+ int duration = le16_to_cpu(params->duration);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (il_is_associated(il))
+ add_time =
+ il_usecs_to_beacons(il,
+ le64_to_cpu(params->start_time) -
+ il->_3945.last_tsf,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+
+ memset(&spectrum, 0, sizeof(spectrum));
+
+ spectrum.channel_count = cpu_to_le16(1);
+ spectrum.flags =
+ RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
+ spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
+ cmd.len = sizeof(spectrum);
+ spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
+
+ if (il_is_associated(il))
+ spectrum.start_time =
+ il_add_beacon_time(il, il->_3945.last_beacon_time, add_time,
+ le16_to_cpu(ctx->timing.
+ beacon_interval));
+ else
+ spectrum.start_time = 0;
+
+ spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
+ spectrum.channels[0].channel = params->channel;
+ spectrum.channels[0].type = type;
+ if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
+ spectrum.flags |=
+ RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_TGG_PROTECT_MSK;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from N_RX_ON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
+ switch (spectrum_resp_status) {
+ case 0: /* Command will be handled */
+ if (pkt->u.spectrum.id != 0xff) {
+ D_INFO("Replaced existing measurement: %d\n",
+ pkt->u.spectrum.id);
+ il->measurement_status &= ~MEASUREMENT_READY;
+ }
+ il->measurement_status |= MEASUREMENT_ACTIVE;
+ rc = 0;
+ break;
+
+ case 1: /* Command will not be handled */
+ rc = -EAGAIN;
+ break;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+static void
+il3945_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ il3945_disable_events(il);
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+static void
+il3945_hdl_add_sta(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+#endif
+
+ D_RX("Received C_ADD_STA: 0x%02X\n", pkt->u.status);
+}
+
+static void
+il3945_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_beacon_notif *beacon = &(pkt->u.beacon_status);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = beacon->beacon_notify_hdr.rate;
+
+ D_RX("beacon status %x retries %d iss %d " "tsf %d %d rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il3945_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ IL_WARN("Card state received: HW:%s SW:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il3945_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il3945_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il3945_hdl_alive;
+ il->handlers[C_ADD_STA] = il3945_hdl_add_sta;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il3945_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il3945_hdl_c_stats;
+ il->handlers[N_STATS] = il3945_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+ il->handlers[N_CARD_STATE] = il3945_hdl_card_state;
+
+ /* Set up hardware specific Rx handlers */
+ il3945_hw_handler_setup(il);
+}
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * The host allocates 32 DMA target addresses and passes the host address
+ * to the firmware at register IL_RFDS_TBL_LOWER + N * RFD_SIZE where N is
+ * 0 to 31
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il3945_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il3945_rx_queue_restock
+ * il3945_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il3945_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il3945_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il3945_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il3945_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) dma_addr);
+}
+
+/**
+ * il3945_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+static void
+il3945_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+ int write;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ write = rxq->write & ~0x7;
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il3945_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7) ||
+ abs(rxq->write - rxq->read) > 7) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il3945_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il3945_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il3945_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("Failed to allocate SKB buffer.\n");
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to allocate SKB buffer with %0x."
+ "Only %u free buffers remaining.\n",
+ priority, rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ break;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ rxb->page = page;
+ /* Get physical address of RB/SKB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+void
+il3945_rx_replenish(void *data)
+{
+ struct il_priv *il = data;
+ unsigned long flags;
+
+ il3945_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il3945_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+static void
+il3945_rx_replenish_now(struct il_priv *il)
+{
+ il3945_rx_allocate(il, GFP_ATOMIC);
+
+ il3945_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+static void
+il3945_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+/* Convert linear signal-to-noise ratio into dB */
+static u8 ratio2dB[100] = {
+/* 0 1 2 3 4 5 6 7 8 9 */
+ 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
+ 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
+ 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
+ 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
+ 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
+ 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
+ 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
+ 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
+ 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
+ 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
+};
+
+/* Calculates a relative dB value from a ratio of linear
+ * (i.e. not dB) signal levels.
+ * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
+int
+il3945_calc_db_from_ratio(int sig_ratio)
+{
+ /* 1000:1 or higher just report as 60 dB */
+ if (sig_ratio >= 1000)
+ return 60;
+
+ /* 100:1 or higher, divide by 10 and use table,
+ * add 20 dB to make up for divide by 10 */
+ if (sig_ratio >= 100)
+ return 20 + (int)ratio2dB[sig_ratio / 10];
+
+ /* We shouldn't see this */
+ if (sig_ratio < 1)
+ return 0;
+
+ /* Use table for ratios 1:1 - 99:1 */
+ return (int)ratio2dB[sig_ratio];
+}
+
+/**
+ * il3945_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+static void
+il3945_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty = 0;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ pkt->hdr.cmd != N_STATS && pkt->hdr.cmd != C_TX;
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il3945_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode won't assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il3945_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il3945_rx_replenish_now(il);
+ else
+ il3945_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il3945_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static const char *
+il3945_desc_lookup(int i)
+{
+ switch (i) {
+ case 1:
+ return "FAIL";
+ case 2:
+ return "BAD_PARAM";
+ case 3:
+ return "BAD_CHECKSUM";
+ case 4:
+ return "NMI_INTERRUPT";
+ case 5:
+ return "SYSASSERT";
+ case 6:
+ return "FATAL_ERROR";
+ }
+
+ return "UNKNOWN";
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il3945_dump_nic_error_log(struct il_priv *il)
+{
+ u32 i;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X\n", base);
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ IL_ERR("Desc Time asrtPC blink2 "
+ "ilink1 nmiPC Line\n");
+ for (i = ERROR_START_OFFSET;
+ i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
+ i += ERROR_ELEM_SIZE) {
+ desc = il_read_targ_mem(il, base + i);
+ time = il_read_targ_mem(il, base + i + 1 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + i + 2 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + i + 3 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + i + 4 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + i + 5 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + i + 6 * sizeof(u32));
+
+ IL_ERR("%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
+ il3945_desc_lookup(desc), desc, time, blink1, blink2,
+ ilink1, ilink2, data1);
+ }
+}
+
+static void
+il3945_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR39_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR39_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " "Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /* uCode wakes up after power-down sleep */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ il_txq_update_write_ptr(il, &il->txq[0]);
+ il_txq_update_write_ptr(il, &il->txq[1]);
+ il_txq_update_write_ptr(il, &il->txq[2]);
+ il_txq_update_write_ptr(il, &il->txq[3]);
+ il_txq_update_write_ptr(il, &il->txq[4]);
+ il_txq_update_write_ptr(il, &il->txq[5]);
+
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il3945_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("Tx interrupt\n");
+ il->isr_stats.tx++;
+
+ _il_wr(il, CSR_FH_INT_STATUS, (1 << 6));
+ il_wr(il, FH39_TCSR_CREDIT(FH39_SRVC_CHNL), 0x0);
+ handled |= CSR_INT_BIT_FH_TX;
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~il->inta_mask) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with inta_fh = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+static int
+il3945_get_channels_for_scan(struct il_priv *il, enum ieee80211_band band,
+ u8 is_active, u8 n_probes,
+ struct il3945_scan_channel *scan_ch,
+ struct ieee80211_vif *vif)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ scan_ch->channel = chan->hw_value;
+
+ ch_info = il_get_channel_info(il, band, scan_ch->channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ scan_ch->channel);
+ continue;
+ }
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* If passive , set up for auto-switch
+ * and use long active_dwell time.
+ */
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
+ scan_ch->type = 0; /* passive */
+ if (IL_UCODE_API(il->ucode_ver) == 1)
+ scan_ch->active_dwell =
+ cpu_to_le16(passive_dwell - 1);
+ } else {
+ scan_ch->type = 1; /* active */
+ }
+
+ /* Set direct probe bits. These may be used both for active
+ * scan channels (probes gets sent right away),
+ * or for passive channels (probes get se sent only after
+ * hearing clear Rx packet).*/
+ if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ if (n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ } else {
+ /* uCode v1 does not allow setting direct probe bits on
+ * passive channel. */
+ if ((scan_ch->type & 1) && n_probes)
+ scan_ch->type |= IL39_SCAN_PROBE_MASK(n_probes);
+ }
+
+ /* Set txpower levels to defaults */
+ scan_ch->tpc.dsp_atten = 110;
+ /* scan_pwr_info->tpc.dsp_atten; */
+
+ /*scan_pwr_info->tpc.tx_gain; */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else {
+ scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ }
+
+ D_SCAN("Scanning %d [%s %d]\n", scan_ch->channel,
+ (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->type & 1) ? active_dwell : passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static void
+il3945_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il3945_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if (i > IL39_LAST_OFDM_RATE || i < IL_FIRST_OFDM_RATE) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il3945_rates[i].plcp ==
+ 10) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il3945_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+/**
+ * il3945_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il3945_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int rc = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL39_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return rc;
+}
+
+/**
+ * il3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il3945_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int rc = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL39_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+#if 0 /* Enable this if you want to see details */
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n", i, val,
+ *image);
+#endif
+ rc = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * il3945_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+static int
+il3945_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int rc = 0;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ rc = il3945_verify_inst_sparse(il, image, len);
+ if (rc == 0) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ rc = il3945_verify_inst_full(il, image, len);
+
+ return rc;
+}
+
+static void
+il3945_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+#define IL3945_UCODE_GET(item) \
+static u32 il3945_ucode_get_##item(const struct il_ucode_header *ucode)\
+{ \
+ return le32_to_cpu(ucode->v1.item); \
+}
+
+static u32
+il3945_ucode_get_header_size(u32 api_ver)
+{
+ return 24;
+}
+
+static u8 *
+il3945_ucode_get_data(const struct il_ucode_header *ucode)
+{
+ return (u8 *) ucode->v1.data;
+}
+
+IL3945_UCODE_GET(inst_size);
+IL3945_UCODE_GET(data_size);
+IL3945_UCODE_GET(init_size);
+IL3945_UCODE_GET(init_data_size);
+IL3945_UCODE_GET(boot_size);
+
+/**
+ * il3945_read_ucode - Read uCode images from disk file.
+ *
+ * Copy into buffers for card to fetch via bus-mastering
+ */
+static int
+il3945_read_ucode(struct il_priv *il)
+{
+ const struct il_ucode_header *ucode;
+ int ret = -EINVAL, idx;
+ const struct firmware *ucode_raw;
+ /* firmware file name contains uCode/driver compatibility version */
+ const char *name_pre = il->cfg->fw_name_pre;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ char buf[25];
+ u8 *src;
+ size_t len;
+ u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
+
+ /* Ask kernel firmware_class module to get the boot firmware off disk.
+ * request_firmware() is synchronous, file is in memory on return. */
+ for (idx = api_max; idx >= api_min; idx--) {
+ sprintf(buf, "%s%u%s", name_pre, idx, ".ucode");
+ ret = request_firmware(&ucode_raw, buf, &il->pci_dev->dev);
+ if (ret < 0) {
+ IL_ERR("%s firmware file req failed: %d\n", buf, ret);
+ if (ret == -ENOENT)
+ continue;
+ else
+ goto error;
+ } else {
+ if (idx < api_max)
+ IL_ERR("Loaded firmware %s, "
+ "which is deprecated. "
+ " Please use API v%u instead.\n", buf,
+ api_max);
+ D_INFO("Got firmware '%s' file "
+ "(%zd bytes) from disk\n", buf, ucode_raw->size);
+ break;
+ }
+ }
+
+ if (ret < 0)
+ goto error;
+
+ /* Make sure that we got at least our header! */
+ if (ucode_raw->size < il3945_ucode_get_header_size(1)) {
+ IL_ERR("File size way too small!\n");
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+ inst_size = il3945_ucode_get_inst_size(ucode);
+ data_size = il3945_ucode_get_data_size(ucode);
+ init_size = il3945_ucode_get_init_size(ucode);
+ init_data_size = il3945_ucode_get_init_data_size(ucode);
+ boot_size = il3945_ucode_get_boot_size(ucode);
+ src = il3945_ucode_get_data(ucode);
+
+ /* api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward */
+
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ il->ucode_ver = 0;
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected %u, "
+ "got %u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %u\n", inst_size);
+ D_INFO("f/w package hdr runtime data size = %u\n", data_size);
+ D_INFO("f/w package hdr init inst size = %u\n", init_size);
+ D_INFO("f/w package hdr init data size = %u\n", init_data_size);
+ D_INFO("f/w package hdr boot inst size = %u\n", boot_size);
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ il3945_ucode_get_header_size(api_ver) + inst_size + data_size +
+ init_size + init_data_size + boot_size) {
+
+ D_INFO("uCode file size %zd does not match expected size\n",
+ ucode_raw->size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (inst_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode instr len %d too large to fit in\n", inst_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ if (data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode data len %d too large to fit in\n", data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_size > IL39_MAX_INST_SIZE) {
+ D_INFO("uCode init instr len %d too large to fit in\n",
+ init_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (init_data_size > IL39_MAX_DATA_SIZE) {
+ D_INFO("uCode init data len %d too large to fit in\n",
+ init_data_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+ if (boot_size > IL39_MAX_BSM_SIZE) {
+ D_INFO("uCode boot instr len %d too large to fit in\n",
+ boot_size);
+ ret = -EINVAL;
+ goto err_release;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (init_size && init_data_size) {
+ il->ucode_init.len = init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (boot_size) {
+ il->ucode_boot.len = boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ len = inst_size;
+ D_INFO("Copying (but not loading) uCode instr len %zd\n", len);
+ memcpy(il->ucode_code.v_addr, src, len);
+ src += len;
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /* Runtime data (2nd block)
+ * NOTE: Copy into backup buffer will be done in il3945_up() */
+ len = data_size;
+ D_INFO("Copying (but not loading) uCode data len %zd\n", len);
+ memcpy(il->ucode_data.v_addr, src, len);
+ memcpy(il->ucode_data_backup.v_addr, src, len);
+ src += len;
+
+ /* Initialization instructions (3rd block) */
+ if (init_size) {
+ len = init_size;
+ D_INFO("Copying (but not loading) init instr len %zd\n", len);
+ memcpy(il->ucode_init.v_addr, src, len);
+ src += len;
+ }
+
+ /* Initialization data (4th block) */
+ if (init_data_size) {
+ len = init_data_size;
+ D_INFO("Copying (but not loading) init data len %zd\n", len);
+ memcpy(il->ucode_init_data.v_addr, src, len);
+ src += len;
+ }
+
+ /* Bootstrap instructions (5th block) */
+ len = boot_size;
+ D_INFO("Copying (but not loading) boot instr len %zd\n", len);
+ memcpy(il->ucode_boot.v_addr, src, len);
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ return 0;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ ret = -ENOMEM;
+ il3945_dealloc_ucode_pci(il);
+
+err_release:
+ release_firmware(ucode_raw);
+
+error:
+ return ret;
+}
+
+/**
+ * il3945_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il3945_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+
+ /* bits 31:0 for 3945 */
+ pinst = il->ucode_code.p_addr;
+ pdata = il->ucode_data_backup.p_addr;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return 0;
+}
+
+/**
+ * il3945_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+ */
+static void
+il3945_init_alive_start(struct il_priv *il)
+{
+ /* Check alive response for "valid" sign from uCode */
+ if (il->card_alive_init.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Initialize Alive failed.\n");
+ goto restart;
+ }
+
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il3945_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+/**
+ * il3945_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il3945_init_alive_start()).
+ */
+static void
+il3945_alive_start(struct il_priv *il)
+{
+ int thermal_spin = 0;
+ u32 rfkill;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il3945_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ rfkill = il_rd_prph(il, APMG_RFKILL_REG);
+ D_INFO("RFKILL status: 0x%x\n", rfkill);
+
+ if (rfkill & 0x1) {
+ clear_bit(S_RF_KILL_HW, &il->status);
+ /* if RFKILL is not on, then wait for thermal
+ * sensor in adapter to kick in */
+ while (il3945_hw_get_temperature(il) == 0) {
+ thermal_spin++;
+ udelay(10);
+ }
+
+ if (thermal_spin)
+ D_INFO("Thermal calibration took %dus\n",
+ thermal_spin * 10);
+ } else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ /* After the ALIVE response, we can send commands to 3945 uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK_3945;
+
+ il_power_update_mode(il, true);
+
+ if (il_is_associated(il)) {
+ struct il3945_rxon_cmd *active_rxon =
+ (struct il3945_rxon_cmd *)(&ctx->active);
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, ctx);
+ }
+
+ /* Configure Bluetooth device coexistence support */
+ il_send_bt_config(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il3945_commit_rxon(il, ctx);
+
+ il3945_reg_txpower_periodic(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il3945_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il3945_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ /* Station information will now be cleared in device */
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il3945_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il3945_init() then
+ * clear all bits but the RF Kill bits and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il3945_hw_txq_ctx_stop(il);
+ il3945_hw_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il3945_clear_free_frames(il);
+}
+
+static void
+il3945_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il3945_down(il);
+ mutex_unlock(&il->mutex);
+
+ il3945_cancel_deferred_work(il);
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+il3945_alloc_bcast_station(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+__il3945_up(struct il_priv *il)
+{
+ int rc, i;
+
+ rc = il3945_alloc_bcast_station(il);
+ if (rc)
+ return rc;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bring up\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else {
+ set_bit(S_RF_KILL_HW, &il->status);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return -ENODEV;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ rc = il3945_hw_nic_init(il);
+ if (rc) {
+ IL_ERR("Unable to int nic\n");
+ return rc;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ /* We return success when we resume from suspend and rf_kill is on. */
+ if (test_bit(S_RF_KILL_HW, &il->status))
+ return 0;
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ rc = il->cfg->ops->lib->load_ucode(il);
+
+ if (rc) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", rc);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il3945_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il3945_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il3945_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il3945_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
+ goto out;
+
+ il3945_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+/*
+ * 3945 cannot interrupt driver when hardware rf kill switch toggles;
+ * driver must poll CSR_GP_CNTRL_REG register for change. This register
+ * *is* readable even when device has been SW_RESET into low power mode
+ * (e.g. during RF KILL).
+ */
+static void
+il3945_rfkill_poll(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, _3945.rfkill_poll.work);
+ bool old_rfkill = test_bit(S_RF_KILL_HW, &il->status);
+ bool new_rfkill =
+ !(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
+
+ if (new_rfkill != old_rfkill) {
+ if (new_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, new_rfkill);
+
+ D_RF_KILL("RF_KILL bit toggled to %s.\n",
+ new_rfkill ? "disable radio" : "enable radio");
+ }
+
+ /* Keep this running, even if radio now enabled. This will be
+ * cancelled in mac_start() if system decides to start again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+}
+
+int
+il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il3945_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il3945_scan_cmd *scan;
+ u8 n_probes = 0;
+ enum ieee80211_band band;
+ bool is_active = false;
+ int ret;
+ u16 len;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("Fail to allocate scan memory\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il3945_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+ /*
+ * suspend time format:
+ * 0-19: beacon interval in usec (time before exec.)
+ * 20-23: 0
+ * 24-31: number of beacons (suspend between channels)
+ */
+
+ extra = (suspend_time / interval) << 24;
+ scan_suspend_time =
+ 0xFF0FFFFF & (extra | ((suspend_time % interval) * 1024));
+
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Kicking off passive scan.\n");
+
+ /* We don't build a direct scan probe request; the uCode will do
+ * that based on the direct_mask added to each channel entry */
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = il->ctx.bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ /* flags + rate selection */
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ scan->tx_cmd.rate = RATE_1M_PLCP;
+ band = IEEE80211_BAND_2GHZ;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ scan->tx_cmd.rate = RATE_6M_PLCP;
+ band = IEEE80211_BAND_5GHZ;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scaning is requested but a certain channel is marked
+ * passive, we can do active scanning if we detect transmissions. For
+ * passive only scanning disable switching to active on any channel.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+ len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(len);
+
+ /* select Rx antennas */
+ scan->flags |= il3945_get_antenna_flags(il);
+
+ scan->channel_count =
+ il3945_get_channels_for_scan(il, band, is_active, n_probes,
+ (void *)&scan->data[len], vif);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il3945_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+ return ret;
+}
+
+void
+il3945_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il3945_commit_rxon(il, ctx);
+}
+
+static void
+il3945_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+ mutex_unlock(&il->mutex);
+ il3945_down(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il3945_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il3945_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il3945_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il3945_rx_replenish(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il3945_post_associate(struct il_priv *il)
+{
+ int rc = 0;
+ struct ieee80211_conf *conf = NULL;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (!ctx->vif || !il->is_open)
+ return;
+
+ D_ASSOC("Associated as %d to: %pM\n", ctx->vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", ctx->vif->bss_conf.aid,
+ ctx->vif->bss_conf.beacon_int);
+
+ if (ctx->vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (ctx->vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il3945_commit_rxon(il, ctx);
+
+ switch (ctx->vif->type) {
+ case NL80211_IFTYPE_STATION:
+ il3945_rate_scale_init(il->hw, IL_AP_ID);
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il3945_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ ctx->vif->type);
+ break;
+ }
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (2 * HZ)
+
+static int
+il3945_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+
+ /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
+ * ucode filename and max sizes are card-specific. */
+
+ if (!il->ucode_code.len) {
+ ret = il3945_read_ucode(il);
+ if (ret) {
+ IL_ERR("Could not read microcode: %d\n", ret);
+ mutex_unlock(&il->mutex);
+ goto out_release_irq;
+ }
+ }
+
+ ret = __il3945_up(il);
+
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ goto out_release_irq;
+
+ D_INFO("Start UP work.\n");
+
+ /* Wait for START_ALIVE from ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("Wait for START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ ret = -ETIMEDOUT;
+ goto out_release_irq;
+ }
+ }
+
+ /* ucode is running and will send rfkill notifications,
+ * no need to poll the killswitch state anymore */
+ cancel_delayed_work(&il->_3945.rfkill_poll);
+
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+
+out_release_irq:
+ il->is_open = 0;
+ D_MAC80211("leave - failed\n");
+ return ret;
+}
+
+static void
+il3945_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open) {
+ D_MAC80211("leave - skip\n");
+ return;
+ }
+
+ il->is_open = 0;
+
+ il3945_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* start polling the killswitch state again */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll,
+ round_jiffies_relative(2 * HZ));
+
+ D_MAC80211("leave\n");
+}
+
+static void
+il3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il3945_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il3945_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int rc = 0;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!(il_is_associated(il))) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ rc = il_send_rxon_timing(il, ctx);
+ if (rc)
+ IL_WARN("C_RXON_TIMING failed - "
+ "Attempting to continue.\n");
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il3945_commit_rxon(il, ctx);
+ }
+ il3945_send_beacon_cmd(il);
+}
+
+static int
+il3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ int ret = 0;
+ u8 sta_id = IL_INVALID_STATION;
+ u8 static_key;
+
+ D_MAC80211("enter\n");
+
+ if (il3945_mod_params.sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ /*
+ * To support IBSS RSN, don't program group keys in IBSS, the
+ * hardware will then not attempt to decrypt the frames.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ return -EOPNOTSUPP;
+
+ static_key = !il_is_associated(il);
+
+ if (!static_key) {
+ sta_id = il_sta_id_or_broadcast(il, &il->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+ }
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ switch (cmd) {
+ case SET_KEY:
+ if (static_key)
+ ret = il3945_set_static_key(il, key);
+ else
+ ret = il3945_set_dynamic_key(il, key, sta_id);
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (static_key)
+ ret = il3945_remove_static_key(il);
+ else
+ ret = il3945_clear_sta_key_info(il, sta_id);
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+static int
+il3945_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il3945_sta_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ ret =
+ il_add_station_common(il, &il->ctx, sta->addr, is_ap, sta, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il3945_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+static void
+il3945_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but even if hw is ready, committing here breaks for some reason,
+ * we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il3945_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il3945_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_INFO("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il3945_show_debug_level,
+ il3945_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il3945_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_hw_get_temperature(il));
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il3945_show_temperature, NULL);
+
+static ssize_t
+il3945_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il3945_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+ u32 val;
+
+ val = simple_strtoul(p, &p, 10);
+ if (p == buf)
+ IL_INFO(": %s is not in decimal form.\n", buf);
+ else
+ il3945_hw_reg_set_txpower(il, val);
+
+ return count;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il3945_show_tx_power,
+ il3945_store_tx_power);
+
+static ssize_t
+il3945_show_flags(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", ctx->active.flags);
+}
+
+static ssize_t
+il3945_store_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ u32 flags = simple_strtoul(buf, NULL, 0);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.flags) != flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.flags = 0x%04X\n", flags);
+ ctx->staging.flags = cpu_to_le32(flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, il3945_show_flags,
+ il3945_store_flags);
+
+static ssize_t
+il3945_show_filter_flags(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+
+ return sprintf(buf, "0x%04X\n", le32_to_cpu(ctx->active.filter_flags));
+}
+
+static ssize_t
+il3945_store_filter_flags(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 filter_flags = simple_strtoul(buf, NULL, 0);
+
+ mutex_lock(&il->mutex);
+ if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
+ /* Cancel any currently running scans... */
+ if (il_scan_cancel_timeout(il, 100))
+ IL_WARN("Could not cancel scan.\n");
+ else {
+ D_INFO("Committing rxon.filter_flags = " "0x%04X\n",
+ filter_flags);
+ ctx->staging.filter_flags = cpu_to_le32(filter_flags);
+ il3945_commit_rxon(il, ctx);
+ }
+ }
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, il3945_show_filter_flags,
+ il3945_store_filter_flags);
+
+static ssize_t
+il3945_show_measurement(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_spectrum_notification measure_report;
+ u32 size = sizeof(measure_report), len = 0, ofs = 0;
+ u8 *data = (u8 *) &measure_report;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (!(il->measurement_status & MEASUREMENT_READY)) {
+ spin_unlock_irqrestore(&il->lock, flags);
+ return 0;
+ }
+ memcpy(&measure_report, &il->measure_report, size);
+ il->measurement_status = 0;
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ while (size && PAGE_SIZE - len) {
+ hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
+ PAGE_SIZE - len, 1);
+ len = strlen(buf);
+ if (PAGE_SIZE - len)
+ buf[len++] = '\n';
+
+ ofs += 16;
+ size -= min(size, 16U);
+ }
+
+ return len;
+}
+
+static ssize_t
+il3945_store_measurement(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_measurement_params params = {
+ .channel = le16_to_cpu(ctx->active.channel),
+ .start_time = cpu_to_le64(il->_3945.last_tsf),
+ .duration = cpu_to_le16(1),
+ };
+ u8 type = IL_MEASURE_BASIC;
+ u8 buffer[32];
+ u8 channel;
+
+ if (count) {
+ char *p = buffer;
+ strncpy(buffer, buf, min(sizeof(buffer), count));
+ channel = simple_strtoul(p, NULL, 0);
+ if (channel)
+ params.channel = channel;
+
+ p = buffer;
+ while (*p && *p != ' ')
+ p++;
+ if (*p)
+ type = simple_strtoul(p + 1, NULL, 0);
+ }
+
+ D_INFO("Invoking measurement of type %d on " "channel %d (for '%s')\n",
+ type, params.channel, buf);
+ il3945_get_measurement(il, &params, type);
+
+ return count;
+}
+
+static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR, il3945_show_measurement,
+ il3945_store_measurement);
+
+static ssize_t
+il3945_store_retry_rate(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ il->retry_rate = simple_strtoul(buf, NULL, 0);
+ if (il->retry_rate <= 0)
+ il->retry_rate = 1;
+
+ return count;
+}
+
+static ssize_t
+il3945_show_retry_rate(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "%d", il->retry_rate);
+}
+
+static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, il3945_show_retry_rate,
+ il3945_store_retry_rate);
+
+static ssize_t
+il3945_show_channels(struct device *d, struct device_attribute *attr, char *buf)
+{
+ /* all this shit doesn't belong into sysfs anyway */
+ return 0;
+}
+
+static DEVICE_ATTR(channels, S_IRUSR, il3945_show_channels, NULL);
+
+static ssize_t
+il3945_show_antenna(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il3945_mod_params.antenna);
+}
+
+static ssize_t
+il3945_store_antenna(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il __maybe_unused = dev_get_drvdata(d);
+ int ant;
+
+ if (count == 0)
+ return 0;
+
+ if (sscanf(buf, "%1i", &ant) != 1) {
+ D_INFO("not in hex or decimal form.\n");
+ return count;
+ }
+
+ if (ant >= 0 && ant <= 2) {
+ D_INFO("Setting antenna select to %d.\n", ant);
+ il3945_mod_params.antenna = (enum il3945_antenna)ant;
+ } else
+ D_INFO("Bad antenna select value %d.\n", ant);
+
+ return count;
+}
+
+static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, il3945_show_antenna,
+ il3945_store_antenna);
+
+static ssize_t
+il3945_show_status(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ if (!il_is_alive(il))
+ return -EAGAIN;
+ return sprintf(buf, "0x%08x\n", (int)il->status);
+}
+
+static DEVICE_ATTR(status, S_IRUGO, il3945_show_status, NULL);
+
+static ssize_t
+il3945_dump_error_log(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ char *p = (char *)buf;
+
+ if (p[0] == '1')
+ il3945_dump_nic_error_log(il);
+
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, il3945_dump_error_log);
+
+/*****************************************************************************
+ *
+ * driver setup and tear down
+ *
+ *****************************************************************************/
+
+static void
+il3945_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il3945_bg_restart);
+ INIT_WORK(&il->rx_replenish, il3945_bg_rx_replenish);
+ INIT_DELAYED_WORK(&il->init_alive_start, il3945_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il3945_bg_alive_start);
+ INIT_DELAYED_WORK(&il->_3945.rfkill_poll, il3945_rfkill_poll);
+
+ il_setup_scan_deferred_work(il);
+
+ il3945_hw_setup_deferred_work(il);
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il3945_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il3945_cancel_deferred_work(struct il_priv *il)
+{
+ il3945_hw_cancel_deferred_work(il);
+
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+
+ il_cancel_scan_deferred_work(il);
+}
+
+static struct attribute *il3945_sysfs_entries[] = {
+ &dev_attr_antenna.attr,
+ &dev_attr_channels.attr,
+ &dev_attr_dump_errors.attr,
+ &dev_attr_flags.attr,
+ &dev_attr_filter_flags.attr,
+ &dev_attr_measurement.attr,
+ &dev_attr_retry_rate.attr,
+ &dev_attr_status.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il3945_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il3945_sysfs_entries,
+};
+
+struct ieee80211_ops il3945_hw_ops = {
+ .tx = il3945_mac_tx,
+ .start = il3945_mac_start,
+ .stop = il3945_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il3945_configure_filter,
+ .set_key = il3945_mac_set_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il3945_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static int
+il3945_init_drv(struct il_priv *il)
+{
+ int ret;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+
+ il->retry_rate = 1;
+ il->beacon_skb = NULL;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
+ IL_WARN("Unsupported EEPROM version: 0x%04X\n",
+ eeprom->version);
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ /* Set up txpower settings in driver for all channels */
+ if (il3945_txpower_set_from_eeprom(il)) {
+ ret = -EIO;
+ goto err_free_channel_map;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il3945_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+#define IL3945_MAX_PROBE_REQUEST 200
+
+static int
+il3945_setup_mac(struct il_priv *il)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-3945-rs";
+ hw->sta_data_size = sizeof(struct il3945_sta_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_SPECTRUM_MGMT;
+
+ hw->wiphy->interface_modes = il->ctx.interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = IL3945_MAX_PROBE_REQUEST - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+static int
+il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ struct il3945_eeprom *eeprom;
+ unsigned long flags;
+
+ /***********************
+ * 1. Allocating HW data
+ * ********************/
+
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ hw = il_alloc_all(cfg);
+ if (hw == NULL) {
+ pr_err("Can not allocate network device\n");
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ il->cmd_queue = IL39_CMD_QUEUE_NUM;
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.interface_modes =
+ BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ /*
+ * Disabling hardware scan means that mac80211 will perform scans
+ * "the hard way", rather than using device's scan.
+ */
+ if (il3945_mod_params.disable_hw_scan) {
+ D_INFO("Disabling hw_scan\n");
+ il3945_hw_ops.hw_scan = NULL;
+ }
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /***************************
+ * 2. Initializing PCI bus
+ * *************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+
+ pci_set_drvdata(pdev, il);
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ /***********************
+ * 3. Read REV Register
+ * ********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, 0x41, 0x00);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /***********************
+ * 4. Read EEPROM
+ * ********************/
+
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ /* MAC Address location in EEPROM same for 3945/4965 */
+ eeprom = (struct il3945_eeprom *)il->eeprom;
+ D_INFO("MAC address: %pM\n", eeprom->mac_address);
+ SET_IEEE80211_PERM_ADDR(il->hw, eeprom->mac_address);
+
+ /***********************
+ * 5. Setup HW Constants
+ * ********************/
+ /* Device-specific setup */
+ if (il3945_hw_set_hw_params(il)) {
+ IL_ERR("failed to set hw settings\n");
+ goto out_eeprom_free;
+ }
+
+ /***********************
+ * 6. Setup il
+ * ********************/
+
+ err = il3945_init_drv(il);
+ if (err) {
+ IL_ERR("initializing driver failed\n");
+ goto out_unset_hw_params;
+ }
+
+ IL_INFO("Detected Intel Wireless WiFi Link %s\n", il->cfg->name);
+
+ /***********************
+ * 7. Setup Services
+ * ********************/
+
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ err = sysfs_create_group(&pdev->dev.kobj, &il3945_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_release_irq;
+ }
+
+ il_set_rxon_channel(il, &il->bands[IEEE80211_BAND_2GHZ].channels[5],
+ &il->ctx);
+ il3945_setup_deferred_work(il);
+ il3945_setup_handlers(il);
+ il_power_initialize(il);
+
+ /*********************************
+ * 8. Setup and Register mac80211
+ * *******************************/
+
+ il_enable_interrupts(il);
+
+ err = il3945_setup_mac(il);
+ if (err)
+ goto out_remove_sysfs;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ /* Start monitoring the killswitch */
+ queue_delayed_work(il->workqueue, &il->_3945.rfkill_poll, 2 * HZ);
+
+ return 0;
+
+out_remove_sysfs:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+out_release_irq:
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il_free_geos(il);
+ il_free_channel_map(il);
+out_unset_hw_params:
+ il3945_unset_hw_params(il);
+out_eeprom_free:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il3945_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il3945_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il_down(), but there are paths to
+ * run il_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_synchronize_irq(il);
+
+ sysfs_remove_group(&pdev->dev.kobj, &il3945_attribute_group);
+
+ cancel_delayed_work_sync(&il->_3945.rfkill_poll);
+
+ il3945_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il3945_rx_queue_free(il, &il->rxq);
+ il3945_hw_txq_ctx_free(il);
+
+ il3945_unset_hw_params(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il3945_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(pdev->irq, il);
+ pci_disable_msi(pdev);
+
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il_free_channel_map(il);
+ il_free_geos(il);
+ kfree(il->scan_cmd);
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+static struct pci_driver il3945_driver = {
+ .name = DRV_NAME,
+ .id_table = il3945_hw_card_ids,
+ .probe = il3945_pci_probe,
+ .remove = __devexit_p(il3945_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il3945_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il3945_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il3945_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il3945_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il3945_exit(void)
+{
+ pci_unregister_driver(&il3945_driver);
+ il3945_rate_control_unregister();
+}
+
+MODULE_FIRMWARE(IL3945_MODULE_FIRMWARE(IL3945_UCODE_API_MAX));
+
+module_param_named(antenna, il3945_mod_params.antenna, int, S_IRUGO);
+MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
+module_param_named(swcrypto, il3945_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using software crypto (default 1 [software])");
+module_param_named(disable_hw_scan, il3945_mod_params.disable_hw_scan, int,
+ S_IRUGO);
+MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+module_param_named(fw_restart, il3945_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
+
+module_exit(il3945_exit);
+module_init(il3945_init);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
new file mode 100644
index 0000000..d7a83f2
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -0,0 +1,986 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "commands.h"
+#include "3945.h"
+
+#define RS_NAME "iwl-3945-rs"
+
+static s32 il3945_expected_tpt_g[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
+};
+
+static s32 il3945_expected_tpt_g_prot[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
+};
+
+static s32 il3945_expected_tpt_a[RATE_COUNT_3945] = {
+ 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
+};
+
+static s32 il3945_expected_tpt_b[RATE_COUNT_3945] = {
+ 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+struct il3945_tpt_entry {
+ s8 min_rssi;
+ u8 idx;
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_a[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-72, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-87, RATE_9M_IDX},
+ {-89, RATE_6M_IDX}
+};
+
+static struct il3945_tpt_entry il3945_tpt_table_g[] = {
+ {-60, RATE_54M_IDX},
+ {-64, RATE_48M_IDX},
+ {-68, RATE_36M_IDX},
+ {-80, RATE_24M_IDX},
+ {-84, RATE_18M_IDX},
+ {-85, RATE_12M_IDX},
+ {-86, RATE_11M_IDX},
+ {-88, RATE_5M_IDX},
+ {-90, RATE_2M_IDX},
+ {-92, RATE_1M_IDX}
+};
+
+#define RATE_MAX_WINDOW 62
+#define RATE_FLUSH (3*HZ)
+#define RATE_WIN_FLUSH (HZ/2)
+#define IL39_RATE_HIGH_TH 11520
+#define IL_SUCCESS_UP_TH 8960
+#define IL_SUCCESS_DOWN_TH 10880
+#define RATE_MIN_FAILURE_TH 6
+#define RATE_MIN_SUCCESS_TH 8
+#define RATE_DECREASE_TH 1920
+#define RATE_RETRY_TH 15
+
+static u8
+il3945_get_rate_idx_by_rssi(s32 rssi, enum ieee80211_band band)
+{
+ u32 idx = 0;
+ u32 table_size = 0;
+ struct il3945_tpt_entry *tpt_table = NULL;
+
+ if (rssi < IL_MIN_RSSI_VAL || rssi > IL_MAX_RSSI_VAL)
+ rssi = IL_MIN_RSSI_VAL;
+
+ switch (band) {
+ case IEEE80211_BAND_2GHZ:
+ tpt_table = il3945_tpt_table_g;
+ table_size = ARRAY_SIZE(il3945_tpt_table_g);
+ break;
+ case IEEE80211_BAND_5GHZ:
+ tpt_table = il3945_tpt_table_a;
+ table_size = ARRAY_SIZE(il3945_tpt_table_a);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ while (idx < table_size && rssi < tpt_table[idx].min_rssi)
+ idx++;
+
+ idx = min(idx, table_size - 1);
+
+ return tpt_table[idx].idx;
+}
+
+static void
+il3945_clear_win(struct il3945_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = -1;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+/**
+ * il3945_rate_scale_flush_wins - flush out the rate scale wins
+ *
+ * Returns the number of wins that have gathered data but were
+ * not flushed. If there were any that were not flushed, then
+ * reschedule the rate flushing routine.
+ */
+static int
+il3945_rate_scale_flush_wins(struct il3945_rs_sta *rs_sta)
+{
+ int unflushed = 0;
+ int i;
+ unsigned long flags;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /*
+ * For each rate, if we have collected data on that rate
+ * and it has been more than RATE_WIN_FLUSH
+ * since we flushed, clear out the gathered stats
+ */
+ for (i = 0; i < RATE_COUNT_3945; i++) {
+ if (!rs_sta->win[i].counter)
+ continue;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+ if (time_after(jiffies, rs_sta->win[i].stamp + RATE_WIN_FLUSH)) {
+ D_RATE("flushing %d samples of rate " "idx %d\n",
+ rs_sta->win[i].counter, i);
+ il3945_clear_win(&rs_sta->win[i]);
+ } else
+ unflushed++;
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+ }
+
+ return unflushed;
+}
+
+#define RATE_FLUSH_MAX 5000 /* msec */
+#define RATE_FLUSH_MIN 50 /* msec */
+#define IL_AVERAGE_PACKETS 1500
+
+static void
+il3945_bg_rate_scale_flush(unsigned long data)
+{
+ struct il3945_rs_sta *rs_sta = (void *)data;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+ int unflushed = 0;
+ unsigned long flags;
+ u32 packet_count, duration, pps;
+
+ D_RATE("enter\n");
+
+ unflushed = il3945_rate_scale_flush_wins(rs_sta);
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* Number of packets Rx'd since last time this timer ran */
+ packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
+
+ rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
+
+ if (unflushed) {
+ duration =
+ jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
+
+ D_RATE("Tx'd %d packets in %dms\n", packet_count, duration);
+
+ /* Determine packets per second */
+ if (duration)
+ pps = (packet_count * 1000) / duration;
+ else
+ pps = 0;
+
+ if (pps) {
+ duration = (IL_AVERAGE_PACKETS * 1000) / pps;
+ if (duration < RATE_FLUSH_MIN)
+ duration = RATE_FLUSH_MIN;
+ else if (duration > RATE_FLUSH_MAX)
+ duration = RATE_FLUSH_MAX;
+ } else
+ duration = RATE_FLUSH_MAX;
+
+ rs_sta->flush_time = msecs_to_jiffies(duration);
+
+ D_RATE("new flush period: %d msec ave %d\n", duration,
+ packet_count);
+
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+
+ rs_sta->last_partial_flush = jiffies;
+ } else {
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->flush_pending = 0;
+ }
+ /* If there weren't any unflushed entries, we don't schedule the timer
+ * to run again */
+
+ rs_sta->last_flush = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+/**
+ * il3945_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 64 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static void
+il3945_collect_tx_data(struct il3945_rs_sta *rs_sta,
+ struct il3945_rate_scale_data *win, int success,
+ int retries, int idx)
+{
+ unsigned long flags;
+ s32 fail_count;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ if (!retries) {
+ D_RATE("leave: retries == 0 -- should be at least 1\n");
+ return;
+ }
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ * */
+ while (retries > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & (1ULL << (RATE_MAX_WINDOW - 1))) {
+ win->data &= ~(1ULL << (RATE_MAX_WINDOW - 1));
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame (throw away oldest history),
+ * OR in "1", and increment "success" if this
+ * frame was successful. */
+ win->data <<= 1;
+ if (success > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ success--;
+ }
+
+ retries--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt =
+ ((win->success_ratio * rs_sta->expected_tpt[idx] +
+ 64) / 128);
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct il3945_sta_priv *psta;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_supported_band *sband;
+ int i;
+
+ D_INFO("enter\n");
+ if (sta_id == il->ctx.bcast_sta_id)
+ goto out;
+
+ psta = (struct il3945_sta_priv *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ rs_sta->il = il;
+
+ rs_sta->start_rate = RATE_INVALID;
+
+ /* default to just 802.11b */
+ rs_sta->expected_tpt = il3945_expected_tpt_b;
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->last_flush = jiffies;
+ rs_sta->flush_time = RATE_FLUSH;
+ rs_sta->last_tx_packets = 0;
+
+ rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
+ rs_sta->rate_scale_flush.function = il3945_bg_rate_scale_flush;
+
+ for (i = 0; i < RATE_COUNT_3945; i++)
+ il3945_clear_win(&rs_sta->win[i]);
+
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ for (i = sband->n_bitrates - 1; i >= 0; i--) {
+ if (sta->supp_rates[sband->band] & (1 << i)) {
+ rs_sta->last_txrate_idx = i;
+ break;
+ }
+ }
+
+ il->_3945.sta_supp_rates = sta->supp_rates[sband->band];
+ /* For 5 GHz band it start at IL_FIRST_OFDM_RATE */
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ rs_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ il->_3945.sta_supp_rates <<= IL_FIRST_OFDM_RATE;
+ }
+
+out:
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+
+ D_INFO("leave\n");
+}
+
+static void *
+il3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il3945_rs_free(void *il)
+{
+}
+
+static void *
+il3945_rs_alloc_sta(void *il_priv, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il3945_rs_sta *rs_sta;
+ struct il3945_sta_priv *psta = (void *)sta->drv_priv;
+ struct il_priv *il __maybe_unused = il_priv;
+
+ D_RATE("enter\n");
+
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_init(&rs_sta->lock);
+ init_timer(&rs_sta->rate_scale_flush);
+
+ D_RATE("leave\n");
+
+ return rs_sta;
+}
+
+static void
+il3945_rs_free_sta(void *il_priv, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il3945_rs_sta *rs_sta = il_sta;
+
+ /*
+ * Be careful not to use any members of il3945_rs_sta (like trying
+ * to use il_priv to print out debugging) since it may not be fully
+ * initialized at this point.
+ */
+ del_timer_sync(&rs_sta->rate_scale_flush);
+}
+
+/**
+ * il3945_rs_tx_status - Update rate control values based on Tx results
+ *
+ * NOTE: Uses il_priv->retry_rate for the # of retries attempted by
+ * the hardware for each rate.
+ */
+static void
+il3945_rs_tx_status(void *il_rate, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ s8 retries = 0, current_count;
+ int scale_rate_idx, first_idx, last_idx;
+ unsigned long flags;
+ struct il_priv *il = (struct il_priv *)il_rate;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ retries = info->status.rates[0].count;
+ /* Sanity Check for retries */
+ if (retries > RATE_RETRY_TH)
+ retries = RATE_RETRY_TH;
+
+ first_idx = sband->bitrates[info->status.rates[0].idx].hw_value;
+ if (first_idx < 0 || first_idx >= RATE_COUNT_3945) {
+ D_RATE("leave: Rate out of bounds: %d\n", first_idx);
+ return;
+ }
+
+ if (!il_sta) {
+ D_RATE("leave: No STA il data to update!\n");
+ return;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!rs_sta->il) {
+ D_RATE("leave: STA il data uninitialized!\n");
+ return;
+ }
+
+ rs_sta->tx_packets++;
+
+ scale_rate_idx = first_idx;
+ last_idx = first_idx;
+
+ /*
+ * Update the win for each rate. We determine which rates
+ * were Tx'd based on the total number of retries vs. the number
+ * of retries configured for each rate -- currently set to the
+ * il value 'retry_rate' vs. rate specific
+ *
+ * On exit from this while loop last_idx indicates the rate
+ * at which the frame was finally transmitted (or failed if no
+ * ACK)
+ */
+ while (retries > 1) {
+ if ((retries - 1) < il->retry_rate) {
+ current_count = (retries - 1);
+ last_idx = scale_rate_idx;
+ } else {
+ current_count = il->retry_rate;
+ last_idx = il3945_rs_next_rate(il, scale_rate_idx);
+ }
+
+ /* Update this rate accounting for as many retries
+ * as was used for it (per current_count) */
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[scale_rate_idx], 0,
+ current_count, scale_rate_idx);
+ D_RATE("Update rate %d for %d retries.\n", scale_rate_idx,
+ current_count);
+
+ retries -= current_count;
+
+ scale_rate_idx = last_idx;
+ }
+
+ /* Update the last idx win with success/failure based on ACK */
+ D_RATE("Update rate %d with %s.\n", last_idx,
+ (info->flags & IEEE80211_TX_STAT_ACK) ? "success" : "failure");
+ il3945_collect_tx_data(rs_sta, &rs_sta->win[last_idx],
+ info->flags & IEEE80211_TX_STAT_ACK, 1,
+ last_idx);
+
+ /* We updated the rate scale win -- if its been more than
+ * flush_time since the last run, schedule the flush
+ * again */
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ if (!rs_sta->flush_pending &&
+ time_after(jiffies, rs_sta->last_flush + rs_sta->flush_time)) {
+
+ rs_sta->last_partial_flush = jiffies;
+ rs_sta->flush_pending = 1;
+ mod_timer(&rs_sta->rate_scale_flush,
+ jiffies + rs_sta->flush_time);
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("leave\n");
+}
+
+static u16
+il3945_get_adjacent_rate(struct il3945_rs_sta *rs_sta, u8 idx, u16 rate_mask,
+ enum ieee80211_band band)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+ struct il_priv *il __maybe_unused = rs_sta->il;
+
+ /* 802.11A walks to the next literal adjacent rate in
+ * the rate table */
+ if (unlikely(band == IEEE80211_BAND_5GHZ)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT_3945; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ if (rs_sta->tgg)
+ low = il3945_rates[low].prev_rs_tgg;
+ else
+ low = il3945_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ if (rs_sta->tgg)
+ high = il3945_rates[high].next_rs_tgg;
+ else
+ high = il3945_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+/**
+ * il3945_rs_get_rate - find the rate for the requested packet
+ *
+ * Returns the ieee80211_rate structure allocated by the driver.
+ *
+ * The rate control algorithm has no internal mapping between hw_mode's
+ * rate ordering and the rate ordering used by the rate control algorithm.
+ *
+ * The rate control algorithm uses a single table of rates that goes across
+ * the entire A/B/G spectrum vs. being limited to just one particular
+ * hw_mode.
+ *
+ * As such, we can't convert the idx obtained below into the hw_mode's
+ * rate table and must reference the driver allocated rate table
+ *
+ */
+static void
+il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct sk_buff *skb = txrc->skb;
+ u8 low = RATE_INVALID;
+ u8 high = RATE_INVALID;
+ u16 high_low;
+ int idx;
+ struct il3945_rs_sta *rs_sta = il_sta;
+ struct il3945_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ unsigned long flags;
+ u16 rate_mask;
+ s8 max_rate_idx = -1;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+
+ D_RATE("enter\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (rs_sta && !rs_sta->il) {
+ D_RATE("Rate scaling information not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ rate_mask = sta->supp_rates[sband->band];
+
+ /* get user max rate if set */
+ max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ && max_rate_idx != -1)
+ max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (max_rate_idx < 0 || max_rate_idx >= RATE_COUNT)
+ max_rate_idx = -1;
+
+ idx = min(rs_sta->last_txrate_idx & 0xffff, RATE_COUNT_3945 - 1);
+
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_mask = rate_mask << IL_FIRST_OFDM_RATE;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ /* for recent assoc, choose best rate regarding
+ * to rssi value
+ */
+ if (rs_sta->start_rate != RATE_INVALID) {
+ if (rs_sta->start_rate < idx &&
+ (rate_mask & (1 << rs_sta->start_rate)))
+ idx = rs_sta->start_rate;
+ rs_sta->start_rate = RATE_INVALID;
+ }
+
+ /* force user max rate if set by user */
+ if (max_rate_idx != -1 && max_rate_idx < idx) {
+ if (rate_mask & (1 << max_rate_idx))
+ idx = max_rate_idx;
+ }
+
+ win = &(rs_sta->win[idx]);
+
+ fail_count = win->counter - win->success_counter;
+
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ D_RATE("Invalid average_tpt on rate %d: "
+ "counter: %d, success_counter: %d, "
+ "expected_tpt is %sNULL\n", idx, win->counter,
+ win->success_counter,
+ rs_sta->expected_tpt ? "not " : "");
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+ goto out;
+
+ }
+
+ current_tpt = win->average_tpt;
+
+ high_low =
+ il3945_get_adjacent_rate(rs_sta, idx, rate_mask, sband->band);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (max_rate_idx != -1 && max_rate_idx < high)
+ high = RATE_INVALID;
+
+ /* Collect Measured throughputs of adjacent rates */
+ if (low != RATE_INVALID)
+ low_tpt = rs_sta->win[low].average_tpt;
+
+ if (high != RATE_INVALID)
+ high_tpt = rs_sta->win[high].average_tpt;
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ scale_action = 0;
+
+ /* Low success ratio , need to drop the rate */
+ if (win->success_ratio < RATE_DECREASE_TH || !current_tpt) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+ /* No throughput measured yet for adjacent rates,
+ * try increase */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+
+ /* Both adjacent throughputs are measured, but neither one has
+ * better throughput; we're using the best rate, don't change
+ * it! */
+ } else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE
+ && low_tpt < current_tpt && high_tpt < current_tpt) {
+
+ D_RATE("No action -- low [%d] & high [%d] < "
+ "current_tpt [%d]\n", low_tpt, high_tpt, current_tpt);
+ scale_action = 0;
+
+ /* At least one of the rates has better throughput */
+ } else {
+ if (high_tpt != IL_INVALID_VALUE) {
+
+ /* High rate has better throughput, Increase
+ * rate */
+ if (high_tpt > current_tpt &&
+ win->success_ratio >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else {
+ D_RATE("decrease rate because of high tpt\n");
+ scale_action = 0;
+ }
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (win->success_ratio >= RATE_INCREASE_TH) {
+ /* Lower rate has better
+ * throughput,decrease rate */
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (win->success_ratio > RATE_HIGH_TH ||
+ current_tpt > 100 * rs_sta->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrese rate */
+ if (low != RATE_INVALID)
+ idx = low;
+ break;
+ case 1:
+ /* Increase rate */
+ if (high != RATE_INVALID)
+ idx = high;
+
+ break;
+ case 0:
+ default:
+ /* No change */
+ break;
+ }
+
+ D_RATE("Selected %d (action %d) - low %d high %d\n", idx, scale_action,
+ low, high);
+
+out:
+
+ if (sband->band == IEEE80211_BAND_5GHZ) {
+ if (WARN_ON_ONCE(idx < IL_FIRST_OFDM_RATE))
+ idx = IL_FIRST_OFDM_RATE;
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = idx - IL_FIRST_OFDM_RATE;
+ } else {
+ rs_sta->last_txrate_idx = idx;
+ info->control.rates[0].idx = rs_sta->last_txrate_idx;
+ }
+
+ D_RATE("leave: %d\n", idx);
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il3945_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t
+il3945_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int j;
+ ssize_t ret;
+ struct il3945_rs_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc +=
+ sprintf(buff + desc,
+ "tx packets=%d last rate idx=%d\n"
+ "rate=0x%X flush time %d\n", lq_sta->tx_packets,
+ lq_sta->last_txrate_idx, lq_sta->start_rate,
+ jiffies_to_msecs(lq_sta->flush_time));
+ for (j = 0; j < RATE_COUNT_3945; j++) {
+ desc +=
+ sprintf(buff + desc, "counter=%d success=%d %%=%d\n",
+ lq_sta->win[j].counter,
+ lq_sta->win[j].success_counter,
+ lq_sta->win[j].success_ratio);
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il3945_sta_dbgfs_stats_table_read,
+ .open = il3945_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il3945_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", 0600, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+
+}
+
+static void
+il3945_remove_debugfs(void *il, void *il_sta)
+{
+ struct il3945_rs_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_ops = {
+ .module = NULL,
+ .name = RS_NAME,
+ .tx_status = il3945_rs_tx_status,
+ .get_rate = il3945_rs_get_rate,
+ .rate_init = il3945_rs_rate_init_stub,
+ .alloc = il3945_rs_alloc,
+ .free = il3945_rs_free,
+ .alloc_sta = il3945_rs_alloc_sta,
+ .free_sta = il3945_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il3945_add_debugfs,
+ .remove_sta_debugfs = il3945_remove_debugfs,
+#endif
+
+};
+
+void
+il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
+{
+ struct il_priv *il = hw->priv;
+ s32 rssi = 0;
+ unsigned long flags;
+ struct il3945_rs_sta *rs_sta;
+ struct ieee80211_sta *sta;
+ struct il3945_sta_priv *psta;
+
+ D_RATE("enter\n");
+
+ rcu_read_lock();
+
+ sta =
+ ieee80211_find_sta(il->ctx.vif, il->stations[sta_id].sta.sta.addr);
+ if (!sta) {
+ D_RATE("Unable to find station to initialize rate scaling.\n");
+ rcu_read_unlock();
+ return;
+ }
+
+ psta = (void *)sta->drv_priv;
+ rs_sta = &psta->rs_sta;
+
+ spin_lock_irqsave(&rs_sta->lock, flags);
+
+ rs_sta->tgg = 0;
+ switch (il->band) {
+ case IEEE80211_BAND_2GHZ:
+ /* TODO: this always does G, not a regression */
+ if (il->ctx.active.flags & RXON_FLG_TGG_PROTECT_MSK) {
+ rs_sta->tgg = 1;
+ rs_sta->expected_tpt = il3945_expected_tpt_g_prot;
+ } else
+ rs_sta->expected_tpt = il3945_expected_tpt_g;
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rs_sta->expected_tpt = il3945_expected_tpt_a;
+ break;
+ case IEEE80211_NUM_BANDS:
+ BUG();
+ break;
+ }
+
+ spin_unlock_irqrestore(&rs_sta->lock, flags);
+
+ rssi = il->_3945.last_rx_rssi;
+ if (rssi == 0)
+ rssi = IL_MIN_RSSI_VAL;
+
+ D_RATE("Network RSSI: %d\n", rssi);
+
+ rs_sta->start_rate = il3945_get_rate_idx_by_rssi(rssi, il->band);
+
+ D_RATE("leave: rssi %d assign rate idx: " "%d (plcp 0x%x)\n", rssi,
+ rs_sta->start_rate, il3945_rates[rs_sta->start_rate].plcp);
+ rcu_read_unlock();
+}
+
+int
+il3945_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_ops);
+}
+
+void
+il3945_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/3945.c b/drivers/net/wireless/iwlegacy/3945.c
new file mode 100644
index 0000000..c80eb9b
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.c
@@ -0,0 +1,2744 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+#include "3945.h"
+
+/* Send led command */
+static int
+il3945_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+
+ return il_send_cmd(il, &cmd);
+}
+
+const struct il_led_ops il3945_led_ops = {
+ .cmd = il3945_send_led_cmd,
+};
+
+#define IL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX, \
+ RATE_##r##M_IDX_TBL, \
+ RATE_##ip##M_IDX_TBL }
+
+/*
+ * Parameter order:
+ * rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il3945_rate_info il3945_rates[RATE_COUNT_3945] = {
+ IL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV), /* 54mbps */
+};
+
+static inline u8
+il3945_get_prev_ieee_rate(u8 rate_idx)
+{
+ u8 rate = il3945_rates[rate_idx].prev_ieee;
+
+ if (rate == RATE_INVALID)
+ rate = rate_idx;
+ return rate;
+}
+
+/* 1 = enable the il3945_disable_events() function */
+#define IL_EVT_DISABLE (0)
+#define IL_EVT_DISABLE_SIZE (1532/32)
+
+/**
+ * il3945_disable_events - Disable selected events in uCode event log
+ *
+ * Disable an event by writing "1"s into "disable"
+ * bitmap in SRAM. Bit position corresponds to Event # (id/type).
+ * Default values of 0 enable uCode events to be logged.
+ * Use for only special debugging. This function is just a placeholder as-is,
+ * you'll need to provide the special bits! ...
+ * ... and set IL_EVT_DISABLE to 1. */
+void
+il3945_disable_events(struct il_priv *il)
+{
+ int i;
+ u32 base; /* SRAM address of event log header */
+ u32 disable_ptr; /* SRAM address of event-disable bitmap array */
+ u32 array_size; /* # of u32 entries in array */
+ static const u32 evt_disable[IL_EVT_DISABLE_SIZE] = {
+ 0x00000000, /* 31 - 0 Event id numbers */
+ 0x00000000, /* 63 - 32 */
+ 0x00000000, /* 95 - 64 */
+ 0x00000000, /* 127 - 96 */
+ 0x00000000, /* 159 - 128 */
+ 0x00000000, /* 191 - 160 */
+ 0x00000000, /* 223 - 192 */
+ 0x00000000, /* 255 - 224 */
+ 0x00000000, /* 287 - 256 */
+ 0x00000000, /* 319 - 288 */
+ 0x00000000, /* 351 - 320 */
+ 0x00000000, /* 383 - 352 */
+ 0x00000000, /* 415 - 384 */
+ 0x00000000, /* 447 - 416 */
+ 0x00000000, /* 479 - 448 */
+ 0x00000000, /* 511 - 480 */
+ 0x00000000, /* 543 - 512 */
+ 0x00000000, /* 575 - 544 */
+ 0x00000000, /* 607 - 576 */
+ 0x00000000, /* 639 - 608 */
+ 0x00000000, /* 671 - 640 */
+ 0x00000000, /* 703 - 672 */
+ 0x00000000, /* 735 - 704 */
+ 0x00000000, /* 767 - 736 */
+ 0x00000000, /* 799 - 768 */
+ 0x00000000, /* 831 - 800 */
+ 0x00000000, /* 863 - 832 */
+ 0x00000000, /* 895 - 864 */
+ 0x00000000, /* 927 - 896 */
+ 0x00000000, /* 959 - 928 */
+ 0x00000000, /* 991 - 960 */
+ 0x00000000, /* 1023 - 992 */
+ 0x00000000, /* 1055 - 1024 */
+ 0x00000000, /* 1087 - 1056 */
+ 0x00000000, /* 1119 - 1088 */
+ 0x00000000, /* 1151 - 1120 */
+ 0x00000000, /* 1183 - 1152 */
+ 0x00000000, /* 1215 - 1184 */
+ 0x00000000, /* 1247 - 1216 */
+ 0x00000000, /* 1279 - 1248 */
+ 0x00000000, /* 1311 - 1280 */
+ 0x00000000, /* 1343 - 1312 */
+ 0x00000000, /* 1375 - 1344 */
+ 0x00000000, /* 1407 - 1376 */
+ 0x00000000, /* 1439 - 1408 */
+ 0x00000000, /* 1471 - 1440 */
+ 0x00000000, /* 1503 - 1472 */
+ };
+
+ base = le32_to_cpu(il->card_alive.log_event_table_ptr);
+ if (!il3945_hw_valid_rtc_data_addr(base)) {
+ IL_ERR("Invalid event log pointer 0x%08X\n", base);
+ return;
+ }
+
+ disable_ptr = il_read_targ_mem(il, base + (4 * sizeof(u32)));
+ array_size = il_read_targ_mem(il, base + (5 * sizeof(u32)));
+
+ if (IL_EVT_DISABLE && array_size == IL_EVT_DISABLE_SIZE) {
+ D_INFO("Disabling selected uCode log events at 0x%x\n",
+ disable_ptr);
+ for (i = 0; i < IL_EVT_DISABLE_SIZE; i++)
+ il_write_targ_mem(il, disable_ptr + (i * sizeof(u32)),
+ evt_disable[i]);
+
+ } else {
+ D_INFO("Selected uCode log events may be disabled\n");
+ D_INFO(" by writing \"1\"s into disable bitmap\n");
+ D_INFO(" in SRAM at 0x%x, size %d u32s\n", disable_ptr,
+ array_size);
+ }
+
+}
+
+static int
+il3945_hwrate_to_plcp_idx(u8 plcp)
+{
+ int idx;
+
+ for (idx = 0; idx < RATE_COUNT_3945; idx++)
+ if (il3945_rates[idx].plcp == plcp)
+ return idx;
+ return -1;
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
+
+static const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ switch (status & TX_STATUS_MSK) {
+ case TX_3945_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_ENTRY(SHORT_LIMIT);
+ TX_STATUS_ENTRY(LONG_LIMIT);
+ TX_STATUS_ENTRY(FIFO_UNDERRUN);
+ TX_STATUS_ENTRY(MGMNT_ABORT);
+ TX_STATUS_ENTRY(NEXT_FRAG);
+ TX_STATUS_ENTRY(LIFE_EXPIRE);
+ TX_STATUS_ENTRY(DEST_PS);
+ TX_STATUS_ENTRY(ABORTED);
+ TX_STATUS_ENTRY(BT_RETRY);
+ TX_STATUS_ENTRY(STA_INVALID);
+ TX_STATUS_ENTRY(FRAG_DROPPED);
+ TX_STATUS_ENTRY(TID_DISABLE);
+ TX_STATUS_ENTRY(FRAME_FLUSHED);
+ TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
+ TX_STATUS_ENTRY(TX_LOCKED);
+ TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+}
+#else
+static inline const char *
+il3945_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/*
+ * get ieee prev rate from rate scale table.
+ * for A and B mode we need to overright prev
+ * value
+ */
+int
+il3945_rs_next_rate(struct il_priv *il, int rate)
+{
+ int next_rate = il3945_get_prev_ieee_rate(rate);
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ if (rate == RATE_12M_IDX)
+ next_rate = RATE_9M_IDX;
+ else if (rate == RATE_6M_IDX)
+ next_rate = RATE_6M_IDX;
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+ if (rate == RATE_11M_IDX)
+ next_rate = RATE_5M_IDX;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ return next_rate;
+}
+
+/**
+ * il3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il3945_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+
+ BUG_ON(txq_id == IL39_CMD_QUEUE_NUM);
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+ tx_info->skb = NULL;
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+
+ if (il_queue_space(q) > q->low_mark && txq_id >= 0 &&
+ txq_id != IL39_CMD_QUEUE_NUM && il->mac80211_registered)
+ il_wake_queue(il, txq);
+}
+
+/**
+ * il3945_hdl_tx - Handle Tx response
+ */
+static void
+il3945_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_tx_info *info;
+ struct il3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->status);
+ int rate_idx;
+ int fail;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ ieee80211_tx_info_clear_status(info);
+
+ /* Fill the MRR chain with some info about on-chip retransmissions */
+ rate_idx = il3945_hwrate_to_plcp_idx(tx_resp->rate);
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+
+ fail = tx_resp->failure_frame;
+
+ info->status.rates[0].idx = rate_idx;
+ info->status.rates[0].count = fail + 1; /* add final attempt */
+
+ /* tx_status->rts_retry_count = tx_resp->failure_rts; */
+ info->flags |=
+ ((status & TX_STATUS_MSK) ==
+ TX_STATUS_SUCCESS) ? IEEE80211_TX_STAT_ACK : 0;
+
+ D_TX("Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n", txq_id,
+ il3945_get_tx_fail_reason(status), status, tx_resp->rate,
+ tx_resp->failure_frame);
+
+ D_TX_REPLY("Tx queue reclaim %d\n", idx);
+ il3945_tx_queue_reclaim(il, txq_id, idx);
+
+ if (status & TX_ABORT_REQUIRED_MSK)
+ IL_ERR("TODO: Implement Tx ABORT REQUIRED!!!\n");
+}
+
+/*****************************************************************************
+ *
+ * Intel PRO/Wireless 3945ABG/BG Network Connection
+ *
+ * RX handler implementations
+ *
+ *****************************************************************************/
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+static void
+il3945_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+
+ prev_stats = (__le32 *) &il->_3945.stats;
+ accum_stats = (u32 *) &il->_3945.accum_stats;
+ delta = (u32 *) &il->_3945.delta_stats;
+ max_delta = (u32 *) &il->_3945.max_delta;
+
+ for (i = sizeof(__le32); i < sizeof(struct il3945_notif_stats);
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ il->_3945.accum_stats.general.temperature =
+ il->_3945.stats.general.temperature;
+ il->_3945.accum_stats.general.ttl_timestamp =
+ il->_3945.stats.general.ttl_timestamp;
+}
+#endif
+
+void
+il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il3945_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il3945_accumulative_stats(il, (__le32 *) &pkt->u.raw);
+#endif
+
+ memcpy(&il->_3945.stats, pkt->u.raw, sizeof(il->_3945.stats));
+}
+
+void
+il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ __le32 *flag = (__le32 *) &pkt->u.raw;
+
+ if (le32_to_cpu(*flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_3945.accum_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.delta_stats, 0,
+ sizeof(struct il3945_notif_stats));
+ memset(&il->_3945.max_delta, 0,
+ sizeof(struct il3945_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il3945_hdl_stats(il, rxb);
+}
+
+/******************************************************************************
+ *
+ * Misc. internal state and helper functions
+ *
+ ******************************************************************************/
+
+/* This is necessary only for a number of stats, see the caller. */
+static int
+il3945_is_network_packet(struct il_priv *il, struct ieee80211_hdr *header)
+{
+ /* Filter incoming packets to determine if they are targeted toward
+ * this network, discarding packets coming from ourselves */
+ switch (il->iw_mode) {
+ case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr3, il->bssid);
+ case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
+ /* packets to our IBSS update information */
+ return !compare_ether_addr(header->addr2, il->bssid);
+ default:
+ return 1;
+ }
+}
+
+static void
+il3945_pass_packet_to_mac80211(struct il_priv *il, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 len = le16_to_cpu(rx_hdr->len);
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We received data from the HW, so stop the watchdog */
+ if (unlikely
+ (len + IL39_RX_FRAME_SIZE >
+ PAGE_SIZE << il->hw_params.rx_page_order)) {
+ D_DROP("Corruption detected!\n");
+ return;
+ }
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ if (!il3945_mod_params.sw_crypto)
+ il_set_decrypted_flag(il, (struct ieee80211_hdr *)rxb_addr(rxb),
+ le32_to_cpu(rx_end->status), stats);
+
+ skb_add_rx_frag(skb, 0, rxb->page,
+ (void *)rx_hdr->payload - (void *)pkt, len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+#define IL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
+
+static void
+il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il3945_rx_frame_stats *rx_stats = IL_RX_STATS(pkt);
+ struct il3945_rx_frame_hdr *rx_hdr = IL_RX_HDR(pkt);
+ struct il3945_rx_frame_end *rx_end = IL_RX_END(pkt);
+ u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
+ u16 rx_stats_noise_diff __maybe_unused =
+ le16_to_cpu(rx_stats->noise_diff);
+ u8 network_packet;
+
+ rx_status.flag = 0;
+ rx_status.mactime = le64_to_cpu(rx_end->timestamp);
+ rx_status.band =
+ (rx_hdr->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
+ rx_status.band);
+
+ rx_status.rate_idx = il3945_hwrate_to_plcp_idx(rx_hdr->rate);
+ if (rx_status.band == IEEE80211_BAND_5GHZ)
+ rx_status.rate_idx -= IL_FIRST_OFDM_RATE;
+
+ rx_status.antenna =
+ (le16_to_cpu(rx_hdr->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ 4;
+
+ /* set the preamble flag if appropriate */
+ if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ if ((unlikely(rx_stats->phy_count > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ rx_stats->phy_count);
+ return;
+ }
+
+ if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
+ return;
+ }
+
+ /* Convert 3945's rssi indicator to dBm */
+ rx_status.signal = rx_stats->rssi - IL39_RSSI_OFFSET;
+
+ D_STATS("Rssi %d sig_avg %d noise_diff %d\n", rx_status.signal,
+ rx_stats_sig_avg, rx_stats_noise_diff);
+
+ header = (struct ieee80211_hdr *)IL_RX_DATA(pkt);
+
+ network_packet = il3945_is_network_packet(il, header);
+
+ D_STATS("[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
+ network_packet ? '*' : ' ', le16_to_cpu(rx_hdr->channel),
+ rx_status.signal, rx_status.signal, rx_status.rate_idx);
+
+ il_dbg_log_rx_data_frame(il, le16_to_cpu(rx_hdr->len), header);
+
+ if (network_packet) {
+ il->_3945.last_beacon_time =
+ le32_to_cpu(rx_end->beacon_timestamp);
+ il->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
+ il->_3945.last_rx_rssi = rx_status.signal;
+ }
+
+ il3945_pass_packet_to_mac80211(il, rxb, &rx_status);
+}
+
+int
+il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ int count;
+ struct il_queue *q;
+ struct il3945_tfd *tfd, *tfd_tmp;
+
+ q = &txq->q;
+ tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+
+ if (count >= NUM_TFD_CHUNKS || count < 0) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ NUM_TFD_CHUNKS);
+ return -EINVAL;
+ }
+
+ tfd->tbs[count].addr = cpu_to_le32(addr);
+ tfd->tbs[count].len = cpu_to_le32(len);
+
+ count++;
+
+ tfd->control_flags =
+ cpu_to_le32(TFD_CTL_COUNT_SET(count) | TFD_CTL_PAD_SET(pad));
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_free_tfd - Free one TFD, those at idx [txq->q.read_ptr]
+ *
+ * Does NOT advance any idxes
+ */
+void
+il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il3945_tfd *tfd_tmp = (struct il3945_tfd *)txq->tfds;
+ int idx = txq->q.read_ptr;
+ struct il3945_tfd *tfd = &tfd_tmp[idx];
+ struct pci_dev *dev = il->pci_dev;
+ int i;
+ int counter;
+
+ /* sanity check */
+ counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
+ if (counter > NUM_TFD_CHUNKS) {
+ IL_ERR("Too many chunks: %i\n", counter);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (counter)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_TODEVICE);
+
+ /* unmap chunks if any */
+
+ for (i = 1; i < counter; i++)
+ pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
+ le32_to_cpu(tfd->tbs[i].len),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+/**
+ * il3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
+ *
+*/
+void
+il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id)
+{
+ u16 hw_value = ieee80211_get_tx_rate(il->hw, info)->hw_value;
+ u16 rate_idx = min(hw_value & 0xffff, RATE_COUNT_3945 - 1);
+ u16 rate_mask;
+ int rate;
+ const u8 rts_retry_limit = 7;
+ u8 data_retry_limit;
+ __le32 tx_flags;
+ __le16 fc = hdr->frame_control;
+ struct il3945_tx_cmd *tx_cmd = (struct il3945_tx_cmd *)cmd->cmd.payload;
+
+ rate = il3945_rates[rate_idx].plcp;
+ tx_flags = tx_cmd->tx_flags;
+
+ /* We need to figure out how to get the sta->supp_rates while
+ * in this running context */
+ rate_mask = RATES_MASK_3945;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+ tx_cmd->rate = rate;
+ tx_cmd->tx_flags = tx_flags;
+
+ /* OFDM */
+ tx_cmd->supp_rates[0] =
+ ((rate_mask & IL_OFDM_RATES_MASK) >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ /* CCK */
+ tx_cmd->supp_rates[1] = (rate_mask & 0xF);
+
+ D_RATE("Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
+ "cck/ofdm mask: 0x%x/0x%x\n", sta_id, tx_cmd->rate,
+ le32_to_cpu(tx_cmd->tx_flags), tx_cmd->supp_rates[1],
+ tx_cmd->supp_rates[0]);
+}
+
+static u8
+il3945_sync_sta(struct il_priv *il, int sta_id, u16 tx_rate)
+{
+ unsigned long flags_spin;
+ struct il_station_entry *station;
+
+ if (sta_id == IL_INVALID_STATION)
+ return IL_INVALID_STATION;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ station = &il->stations[sta_id];
+
+ station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
+ station->sta.rate_n_flags = cpu_to_le16(tx_rate);
+ station->sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &station->sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ D_RATE("SCALE sync station %d to rate %d\n", sta_id, tx_rate);
+ return sta_id;
+}
+
+static void
+il3945_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold)) {
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN,
+ CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+ }
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+
+ _il_poll_bit(il, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
+ CSR_GPIO_IN_BIT_AUX_POWER, 5000);
+}
+
+static int
+il3945_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ il_wr(il, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
+ il_wr(il, FH39_RCSR_RPTR_ADDR(0), rxq->rb_stts_dma);
+ il_wr(il, FH39_RCSR_WPTR(0), 0);
+ il_wr(il, FH39_RCSR_CONFIG(0),
+ FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
+ FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
+ FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 | (RX_QUEUE_SIZE_LOG
+ <<
+ FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST | (1 <<
+ FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH)
+ | FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
+
+ /* fake read to flush all prev I/O */
+ il_rd(il, FH39_RSSR_CTRL);
+
+ return 0;
+}
+
+static int
+il3945_tx_reset(struct il_priv *il)
+{
+
+ /* bypass mode */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0x2);
+
+ /* RA 0 is active */
+ il_wr_prph(il, ALM_SCD_ARASTAT_REG, 0x01);
+
+ /* all 6 fifo are active */
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0x3f);
+
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
+ il_wr_prph(il, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
+ il_wr_prph(il, ALM_SCD_TXF4MF_REG, 0x000004);
+ il_wr_prph(il, ALM_SCD_TXF5MF_REG, 0x000005);
+
+ il_wr(il, FH39_TSSR_CBB_BASE, il->_3945.shared_phys);
+
+ il_wr(il, FH39_TSSR_MSG_CONFIG,
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
+ FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
+
+ return 0;
+}
+
+/**
+ * il3945_txq_ctx_reset - Reset TX queue context
+ *
+ * Destroys all DMA structures and initialize them again
+ */
+static int
+il3945_txq_ctx_reset(struct il_priv *il)
+{
+ int rc;
+ int txq_id, slots_num;
+
+ il3945_hw_txq_ctx_free(il);
+
+ /* allocate tx queue structure */
+ rc = il_alloc_txq_mem(il);
+ if (rc)
+ return rc;
+
+ /* Tx CMD queue */
+ rc = il3945_tx_reset(il);
+ if (rc)
+ goto error;
+
+ /* Tx queue(s) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ IL39_CMD_QUEUE_NUM) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ rc = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (rc) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ il3945_hw_txq_ctx_free(il);
+ return rc;
+}
+
+/*
+ * Start up 3945's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+static int
+il3945_apm_init(struct il_priv *il)
+{
+ int ret = il_apm_init(il);
+
+ /* Clear APMG (NIC's internal power management) interrupts */
+ il_wr_prph(il, APMG_RTC_INT_MSK_REG, 0x0);
+ il_wr_prph(il, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
+
+ /* Reset radio chip */
+ il_set_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+ udelay(5);
+ il_clear_bits_prph(il, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
+
+ return ret;
+}
+
+static void
+il3945_nic_config(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ unsigned long flags;
+ u8 rev_id = il->pci_dev->revision;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Determine HW type */
+ D_INFO("HW Revision ID = 0x%X\n", rev_id);
+
+ if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
+ D_INFO("RTP type\n");
+ else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
+ D_INFO("3945 RADIO-MB type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
+ } else {
+ D_INFO("3945 RADIO-MM type\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
+ }
+
+ if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
+ D_INFO("SKU OP mode is mrc\n");
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
+ } else
+ D_INFO("SKU OP mode is basic\n");
+
+ if ((eeprom->board_revision & 0xF0) == 0xD0) {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ } else {
+ D_INFO("3945ABG revision is 0x%X\n", eeprom->board_revision);
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
+ }
+
+ if (eeprom->almgor_m_version <= 1) {
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
+ D_INFO("Card M type A version is 0x%X\n",
+ eeprom->almgor_m_version);
+ } else {
+ D_INFO("Card M type B version is 0x%X\n",
+ eeprom->almgor_m_version);
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
+ D_RF_KILL("SW RF KILL supported in EEPROM.\n");
+
+ if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
+ D_RF_KILL("HW RF KILL supported in EEPROM.\n");
+}
+
+int
+il3945_hw_nic_init(struct il_priv *il)
+{
+ int rc;
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il3945_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ rc = il_rx_queue_alloc(il);
+ if (rc) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il3945_rx_queue_reset(il, rxq);
+
+ il3945_rx_replenish(il);
+
+ il3945_rx_init(il, rxq);
+
+ /* Look at using this instead:
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+ */
+
+ il_wr(il, FH39_RCSR_WPTR(0), rxq->write & ~7);
+
+ rc = il3945_txq_ctx_reset(il);
+ if (rc)
+ return rc;
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il3945_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il3945_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq)
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == IL39_CMD_QUEUE_NUM)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+void
+il3945_hw_txq_ctx_stop(struct il_priv *il)
+{
+ int txq_id;
+
+ /* stop SCD */
+ il_wr_prph(il, ALM_SCD_MODE_REG, 0);
+ il_wr_prph(il, ALM_SCD_TXFACT_REG, 0);
+
+ /* reset TFD queues */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ il_wr(il, FH39_TCSR_CONFIG(txq_id), 0x0);
+ il_poll_bit(il, FH39_TSSR_TX_STATUS,
+ FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
+ 1000);
+ }
+
+ il3945_hw_txq_ctx_free(il);
+}
+
+/**
+ * il3945_hw_reg_adjust_power_by_temp
+ * return idx delta into power gain settings table
+*/
+static int
+il3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
+{
+ return (new_reading - old_reading) * (-11) / 100;
+}
+
+/**
+ * il3945_hw_reg_temp_out_of_range - Keep temperature in sane range
+ */
+static inline int
+il3945_hw_reg_temp_out_of_range(int temperature)
+{
+ return (temperature < -260 || temperature > 25) ? 1 : 0;
+}
+
+int
+il3945_hw_get_temperature(struct il_priv *il)
+{
+ return _il_rd(il, CSR_UCODE_DRV_GP2);
+}
+
+/**
+ * il3945_hw_reg_txpower_get_temperature
+ * get the current temperature by reading from NIC
+*/
+static int
+il3945_hw_reg_txpower_get_temperature(struct il_priv *il)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int temperature;
+
+ temperature = il3945_hw_get_temperature(il);
+
+ /* driver's okay range is -260 to +25.
+ * human readable okay range is 0 to +285 */
+ D_INFO("Temperature: %d\n", temperature + IL_TEMP_CONVERT);
+
+ /* handle insane temp reading */
+ if (il3945_hw_reg_temp_out_of_range(temperature)) {
+ IL_ERR("Error bad temperature value %d\n", temperature);
+
+ /* if really really hot(?),
+ * substitute the 3rd band/group's temp measured at factory */
+ if (il->last_temperature > 100)
+ temperature = eeprom->groups[2].temperature;
+ else /* else use most recent "sane" value from driver */
+ temperature = il->last_temperature;
+ }
+
+ return temperature; /* raw, not "human readable" */
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold.
+ *
+ * Both are lower than older versions' 9 degrees */
+#define IL_TEMPERATURE_LIMIT_TIMER 6
+
+/**
+ * il3945_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * records new temperature in tx_mgr->temperature.
+ * replaces tx_mgr->last_temperature *only* if calib needed
+ * (assumes caller will actually do the calibration!). */
+static int
+il3945_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ il->temperature = il3945_hw_reg_txpower_get_temperature(il);
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d,\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Same temp,\n");
+ else
+ D_POWER("Getting warmer, delta %d,\n", temp_diff);
+
+ /* if we don't need calibration, *don't* update last_temperature */
+ if (temp_diff < IL_TEMPERATURE_LIMIT_TIMER) {
+ D_POWER("Timed thermal calib not needed\n");
+ return 0;
+ }
+
+ D_POWER("Timed thermal calib needed\n");
+
+ /* assume that caller will actually do calib ...
+ * update the "last temperature" value */
+ il->last_temperature = il->temperature;
+ return 1;
+}
+
+#define IL_MAX_GAIN_ENTRIES 78
+#define IL_CCK_FROM_OFDM_POWER_DIFF -5
+#define IL_CCK_FROM_OFDM_IDX_DIFF (10)
+
+/* radio and DSP power table, each step is 1/2 dB.
+ * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
+static struct il3945_tx_power power_gain_table[2][IL_MAX_GAIN_ENTRIES] = {
+ {
+ {251, 127}, /* 2.4 GHz, highest power */
+ {251, 127},
+ {251, 127},
+ {251, 127},
+ {251, 125},
+ {251, 110},
+ {251, 105},
+ {251, 98},
+ {187, 125},
+ {187, 115},
+ {187, 108},
+ {187, 99},
+ {243, 119},
+ {243, 111},
+ {243, 105},
+ {243, 97},
+ {243, 92},
+ {211, 106},
+ {211, 100},
+ {179, 120},
+ {179, 113},
+ {179, 107},
+ {147, 125},
+ {147, 119},
+ {147, 112},
+ {147, 106},
+ {147, 101},
+ {147, 97},
+ {147, 91},
+ {115, 107},
+ {235, 121},
+ {235, 115},
+ {235, 109},
+ {203, 127},
+ {203, 121},
+ {203, 115},
+ {203, 108},
+ {203, 102},
+ {203, 96},
+ {203, 92},
+ {171, 110},
+ {171, 104},
+ {171, 98},
+ {139, 116},
+ {227, 125},
+ {227, 119},
+ {227, 113},
+ {227, 107},
+ {227, 101},
+ {227, 96},
+ {195, 113},
+ {195, 106},
+ {195, 102},
+ {195, 95},
+ {163, 113},
+ {163, 106},
+ {163, 102},
+ {163, 95},
+ {131, 113},
+ {131, 106},
+ {131, 102},
+ {131, 95},
+ {99, 113},
+ {99, 106},
+ {99, 102},
+ {99, 95},
+ {67, 113},
+ {67, 106},
+ {67, 102},
+ {67, 95},
+ {35, 113},
+ {35, 106},
+ {35, 102},
+ {35, 95},
+ {3, 113},
+ {3, 106},
+ {3, 102},
+ {3, 95} /* 2.4 GHz, lowest power */
+ },
+ {
+ {251, 127}, /* 5.x GHz, highest power */
+ {251, 120},
+ {251, 114},
+ {219, 119},
+ {219, 101},
+ {187, 113},
+ {187, 102},
+ {155, 114},
+ {155, 103},
+ {123, 117},
+ {123, 107},
+ {123, 99},
+ {123, 92},
+ {91, 108},
+ {59, 125},
+ {59, 118},
+ {59, 109},
+ {59, 102},
+ {59, 96},
+ {59, 90},
+ {27, 104},
+ {27, 98},
+ {27, 92},
+ {115, 118},
+ {115, 111},
+ {115, 104},
+ {83, 126},
+ {83, 121},
+ {83, 113},
+ {83, 105},
+ {83, 99},
+ {51, 118},
+ {51, 111},
+ {51, 104},
+ {51, 98},
+ {19, 116},
+ {19, 109},
+ {19, 102},
+ {19, 98},
+ {19, 93},
+ {171, 113},
+ {171, 107},
+ {171, 99},
+ {139, 120},
+ {139, 113},
+ {139, 107},
+ {139, 99},
+ {107, 120},
+ {107, 113},
+ {107, 107},
+ {107, 99},
+ {75, 120},
+ {75, 113},
+ {75, 107},
+ {75, 99},
+ {43, 120},
+ {43, 113},
+ {43, 107},
+ {43, 99},
+ {11, 120},
+ {11, 113},
+ {11, 107},
+ {11, 99},
+ {131, 107},
+ {131, 99},
+ {99, 120},
+ {99, 113},
+ {99, 107},
+ {99, 99},
+ {67, 120},
+ {67, 113},
+ {67, 107},
+ {67, 99},
+ {35, 120},
+ {35, 113},
+ {35, 107},
+ {35, 99},
+ {3, 120} /* 5.x GHz, lowest power */
+ }
+};
+
+static inline u8
+il3945_hw_reg_fix_power_idx(int idx)
+{
+ if (idx < 0)
+ return 0;
+ if (idx >= IL_MAX_GAIN_ENTRIES)
+ return IL_MAX_GAIN_ENTRIES - 1;
+ return (u8) idx;
+}
+
+/* Kick off thermal recalibration check every 60 seconds */
+#define REG_RECALIB_PERIOD (60)
+
+/**
+ * il3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
+ *
+ * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
+ * or 6 Mbit (OFDM) rates.
+ */
+static void
+il3945_hw_reg_set_scan_power(struct il_priv *il, u32 scan_tbl_idx, s32 rate_idx,
+ const s8 *clip_pwrs,
+ struct il_channel_info *ch_info, int band_idx)
+{
+ struct il3945_scan_power_info *scan_power_info;
+ s8 power;
+ u8 power_idx;
+
+ scan_power_info = &ch_info->scan_pwr_info[scan_tbl_idx];
+
+ /* use this channel group's 6Mbit clipping/saturation pwr,
+ * but cap at regulatory scan power restriction (set during init
+ * based on eeprom channel data) for this channel. */
+ power = min(ch_info->scan_power, clip_pwrs[RATE_6M_IDX_TBL]);
+
+ power = min(power, il->tx_power_user_lmt);
+ scan_power_info->requested_power = power;
+
+ /* find difference between new scan *power* and current "normal"
+ * Tx *power* for 6Mb. Use this difference (x2) to adjust the
+ * current "normal" temperature-compensated Tx power *idx* for
+ * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
+ * *idx*. */
+ power_idx =
+ ch_info->power_info[rate_idx].power_table_idx - (power -
+ ch_info->
+ power_info
+ [RATE_6M_IDX_TBL].
+ requested_power) *
+ 2;
+
+ /* store reference idx that we use when adjusting *all* scan
+ * powers. So we can accommodate user (all channel) or spectrum
+ * management (single channel) power changes "between" temperature
+ * feedback compensation procedures.
+ * don't force fit this reference idx into gain table; it may be a
+ * negative number. This will help avoid errors when we're at
+ * the lower bounds (highest gains, for warmest temperatures)
+ * of the table. */
+
+ /* don't exceed table bounds for "real" setting */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ scan_power_info->power_table_idx = power_idx;
+ scan_power_info->tpc.tx_gain =
+ power_gain_table[band_idx][power_idx].tx_gain;
+ scan_power_info->tpc.dsp_atten =
+ power_gain_table[band_idx][power_idx].dsp_atten;
+}
+
+/**
+ * il3945_send_tx_power - fill in Tx Power command with gain settings
+ *
+ * Configures power settings for all rates for the current channel,
+ * using values from channel info struct, and send to NIC
+ */
+static int
+il3945_send_tx_power(struct il_priv *il)
+{
+ int rate_idx, i;
+ const struct il_channel_info *ch_info = NULL;
+ struct il3945_txpowertable_cmd txpower = {
+ .channel = il->ctx.active.channel,
+ };
+ u16 chan;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ chan = le16_to_cpu(il->ctx.active.channel);
+
+ txpower.band = (il->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
+ ch_info = il_get_channel_info(il, il->band, chan);
+ if (!ch_info) {
+ IL_ERR("Failed to get channel info for channel %d [%d]\n", chan,
+ il->band);
+ return -EINVAL;
+ }
+
+ if (!il_is_channel_valid(ch_info)) {
+ D_POWER("Not calling TX_PWR_TBL_CMD on " "non-Tx channel.\n");
+ return 0;
+ }
+
+ /* fill cmd with power settings for all rates for current channel */
+ /* Fill OFDM rate */
+ for (rate_idx = IL_FIRST_OFDM_RATE, i = 0;
+ rate_idx <= IL39_LAST_OFDM_RATE; rate_idx++, i++) {
+
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+ /* Fill CCK rates */
+ for (rate_idx = IL_FIRST_CCK_RATE; rate_idx <= IL_LAST_CCK_RATE;
+ rate_idx++, i++) {
+ txpower.power[i].tpc = ch_info->power_info[i].tpc;
+ txpower.power[i].rate = il3945_rates[rate_idx].plcp;
+
+ D_POWER("ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
+ le16_to_cpu(txpower.channel), txpower.band,
+ txpower.power[i].tpc.tx_gain,
+ txpower.power[i].tpc.dsp_atten, txpower.power[i].rate);
+ }
+
+ return il_send_cmd_pdu(il, C_TX_PWR_TBL,
+ sizeof(struct il3945_txpowertable_cmd),
+ &txpower);
+
+}
+
+/**
+ * il3945_hw_reg_set_new_power - Configures power tables at new levels
+ * @ch_info: Channel to update. Uses power_info.requested_power.
+ *
+ * Replace requested_power and base_power_idx ch_info fields for
+ * one channel.
+ *
+ * Called if user or spectrum management changes power preferences.
+ * Takes into account h/w and modulation limitations (clip power).
+ *
+ * This does *not* send anything to NIC, just sets up ch_info for one channel.
+ *
+ * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
+ * properly fill out the scan powers, and actual h/w gain settings,
+ * and send changes to NIC
+ */
+static int
+il3945_hw_reg_set_new_power(struct il_priv *il, struct il_channel_info *ch_info)
+{
+ struct il3945_channel_power_info *power_info;
+ int power_changed = 0;
+ int i;
+ const s8 *clip_pwrs;
+ int power;
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs = il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* Get this channel's rate-to-current-power settings table */
+ power_info = ch_info->power_info;
+
+ /* update OFDM Txpower settings */
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++, ++power_info) {
+ int delta_idx;
+
+ /* limit new power to be no more than h/w capability */
+ power = min(ch_info->curr_txpow, clip_pwrs[i]);
+ if (power == power_info->requested_power)
+ continue;
+
+ /* find difference between old and new requested powers,
+ * update base (non-temp-compensated) power idx */
+ delta_idx = (power - power_info->requested_power) * 2;
+ power_info->base_power_idx -= delta_idx;
+
+ /* save new requested power value */
+ power_info->requested_power = power;
+
+ power_changed = 1;
+ }
+
+ /* update CCK Txpower settings, based on OFDM 12M setting ...
+ * ... all CCK power settings for a given channel are the *same*. */
+ if (power_changed) {
+ power =
+ ch_info->power_info[RATE_12M_IDX_TBL].requested_power +
+ IL_CCK_FROM_OFDM_POWER_DIFF;
+
+ /* do all CCK rates' il3945_channel_power_info structures */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++) {
+ power_info->requested_power = power;
+ power_info->base_power_idx =
+ ch_info->power_info[RATE_12M_IDX_TBL].
+ base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ ++power_info;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
+ *
+ * NOTE: Returned power limit may be less (but not more) than requested,
+ * based strictly on regulatory (eeprom and spectrum mgt) limitations
+ * (no consideration for h/w clipping limitations).
+ */
+static int
+il3945_hw_reg_get_ch_txpower_limit(struct il_channel_info *ch_info)
+{
+ s8 max_power;
+
+#if 0
+ /* if we're using TGd limits, use lower of TGd or EEPROM */
+ if (ch_info->tgd_data.max_power != 0)
+ max_power =
+ min(ch_info->tgd_data.max_power,
+ ch_info->eeprom.max_power_avg);
+
+ /* else just use EEPROM limits */
+ else
+#endif
+ max_power = ch_info->eeprom.max_power_avg;
+
+ return min(max_power, ch_info->max_power_avg);
+}
+
+/**
+ * il3945_hw_reg_comp_txpower_temp - Compensate for temperature
+ *
+ * Compensate txpower settings of *all* channels for temperature.
+ * This only accounts for the difference between current temperature
+ * and the factory calibration temperatures, and bases the new settings
+ * on the channel's base_power_idx.
+ *
+ * If RxOn is "associated", this sends the new Txpower to NIC!
+ */
+static int
+il3945_hw_reg_comp_txpower_temp(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
+ u8 a_band;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ u8 i;
+ int ref_temp;
+ int temperature = il->temperature;
+
+ if (il->disable_tx_power_cal || test_bit(S_SCANNING, &il->status)) {
+ /* do not perform tx power calibration */
+ return 0;
+ }
+ /* set up new Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* Get this chnlgrp's factory calibration temperature */
+ ref_temp = (s16) eeprom->groups[ch_info->group_idx].temperature;
+
+ /* get power idx adjustment based on current and factory
+ * temps */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature, ref_temp);
+
+ /* set tx power value for all rates, OFDM and CCK */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945; rate_idx++) {
+ int power_idx =
+ ch_info->power_info[rate_idx].base_power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within table range */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+ ch_info->power_info[rate_idx].power_table_idx =
+ (u8) power_idx;
+ ch_info->power_info[rate_idx].tpc =
+ power_gain_table[a_band][power_idx];
+ }
+
+ /* Get this chnlgrp's rate-to-max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ /* send Txpower command for current channel to ucode */
+ return il->cfg->ops->lib->send_tx_power(il);
+}
+
+int
+il3945_hw_reg_set_txpower(struct il_priv *il, s8 power)
+{
+ struct il_channel_info *ch_info;
+ s8 max_power;
+ u8 a_band;
+ u8 i;
+
+ if (il->tx_power_user_lmt == power) {
+ D_POWER("Requested Tx power same as current " "limit: %ddBm.\n",
+ power);
+ return 0;
+ }
+
+ D_POWER("Setting upper limit clamp to %ddBm.\n", power);
+ il->tx_power_user_lmt = power;
+
+ /* set up new Tx powers for each and every channel, 2.4 and 5.x */
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch_info = &il->channel_info[i];
+ a_band = il_is_channel_a_band(ch_info);
+
+ /* find minimum power of all user and regulatory constraints
+ * (does not consider h/w clipping limitations) */
+ max_power = il3945_hw_reg_get_ch_txpower_limit(ch_info);
+ max_power = min(power, max_power);
+ if (max_power != ch_info->curr_txpow) {
+ ch_info->curr_txpow = max_power;
+
+ /* this considers the h/w clipping limitations */
+ il3945_hw_reg_set_new_power(il, ch_info);
+ }
+ }
+
+ /* update txpower settings for all channels,
+ * send to NIC if associated. */
+ il3945_is_temp_calib_needed(il);
+ il3945_hw_reg_comp_txpower_temp(il);
+
+ return 0;
+}
+
+static int
+il3945_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int rc = 0;
+ struct il_rx_pkt *pkt;
+ struct il3945_rxon_assoc_cmd rxon_assoc;
+ struct il_host_cmd cmd = {
+ .id = C_RXON_ASSOC,
+ .len = sizeof(rxon_assoc),
+ .flags = CMD_WANT_SKB,
+ .data = &rxon_assoc,
+ };
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+
+ rc = il_send_cmd_sync(il, &cmd);
+ if (rc)
+ return rc;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_RXON_ASSOC command\n");
+ rc = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+
+ return rc;
+}
+
+/**
+ * il3945_commit_rxon - commit staging_rxon to hardware
+ *
+ * The RXON command in staging_rxon is committed to the hardware and
+ * the active_rxon structure is updated with the new data. This
+ * function correctly transitions out of the RXON_ASSOC_MSK state if
+ * a HW tune is required based on the RXON structure changes.
+ */
+int
+il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il3945_rxon_cmd *active_rxon = (void *)&ctx->active;
+ struct il3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
+ int rc = 0;
+ bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ if (!il_is_alive(il))
+ return -1;
+
+ /* always get timestamp with Rx frame */
+ staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
+
+ /* select antenna */
+ staging_rxon->flags &= ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
+ staging_rxon->flags |= il3945_get_antenna_flags(il);
+
+ rc = il_check_rxon_cmd(il, ctx);
+ if (rc) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il3945_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, &il->ctx)) {
+ rc = il_send_rxon_assoc(il, &il->ctx);
+ if (rc) {
+ IL_ERR("Error setting RXON_ASSOC "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated(il) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ active_rxon->reserved4 = 0;
+ active_rxon->reserved5 = 0;
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ &il->ctx.active);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (rc) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK on current "
+ "configuration (%d).\n", rc);
+ return rc;
+ }
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(staging_rxon->channel), staging_rxon->bssid_addr);
+
+ /*
+ * reserved4 and 5 could have been filled by the iwlcore code.
+ * Let's clear them before pushing to the 3945.
+ */
+ staging_rxon->reserved4 = 0;
+ staging_rxon->reserved5 = 0;
+
+ il_set_rxon_hwcrypto(il, ctx, !il3945_mod_params.sw_crypto);
+
+ /* Apply the new configuration */
+ rc = il_send_cmd_pdu(il, C_RXON, sizeof(struct il3945_rxon_cmd),
+ staging_rxon);
+ if (rc) {
+ IL_ERR("Error setting new configuration (%d).\n", rc);
+ return rc;
+ }
+
+ memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
+
+ if (!new_assoc) {
+ il_clear_ucode_stations(il, &il->ctx);
+ il_restore_stations(il, &il->ctx);
+ }
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ rc = il_set_tx_power(il, il->tx_power_next, true);
+ if (rc) {
+ IL_ERR("Error setting Tx power (%d).\n", rc);
+ return rc;
+ }
+
+ /* Init the hardware's rate fallback order based on the band */
+ rc = il3945_init_hw_rate_table(il);
+ if (rc) {
+ IL_ERR("Error setting HW rate table: %02X\n", rc);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * il3945_reg_txpower_periodic - called when time to check our temperature.
+ *
+ * -- reset periodic timer
+ * -- see if temp has changed enough to warrant re-calibration ... if so:
+ * -- correct coeffs for temp (can reset temp timer)
+ * -- save this temp as "last",
+ * -- send new set of gain settings to NIC
+ * NOTE: This should continue working, even when we're not associated,
+ * so we can keep our internal table of scan powers current. */
+void
+il3945_reg_txpower_periodic(struct il_priv *il)
+{
+ /* This will kick in the "brute force"
+ * il3945_hw_reg_comp_txpower_temp() below */
+ if (!il3945_is_temp_calib_needed(il))
+ goto reschedule;
+
+ /* Set up a new set of temp-adjusted TxPowers, send to NIC.
+ * This is based *only* on current temperature,
+ * ignoring any previous power measurements */
+ il3945_hw_reg_comp_txpower_temp(il);
+
+reschedule:
+ queue_delayed_work(il->workqueue, &il->_3945.thermal_periodic,
+ REG_RECALIB_PERIOD * HZ);
+}
+
+static void
+il3945_bg_reg_txpower_periodic(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ _3945.thermal_periodic.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status) || il->txq == NULL)
+ goto out;
+
+ il3945_reg_txpower_periodic(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il3945_hw_reg_get_ch_grp_idx - find the channel-group idx (0-4) for channel.
+ *
+ * This function is used when initializing channel-info structs.
+ *
+ * NOTE: These channel groups do *NOT* match the bands above!
+ * These channel groups are based on factory-tested channels;
+ * on A-band, EEPROM's "group frequency" entries represent the top
+ * channel in each group 1-4. Group 5 All B/G channels are in group 0.
+ */
+static u16
+il3945_hw_reg_get_ch_grp_idx(struct il_priv *il,
+ const struct il_channel_info *ch_info)
+{
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ struct il3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
+ u8 group;
+ u16 group_idx = 0; /* based on factory calib frequencies */
+ u8 grp_channel;
+
+ /* Find the group idx for the channel ... don't use idx 1(?) */
+ if (il_is_channel_a_band(ch_info)) {
+ for (group = 1; group < 5; group++) {
+ grp_channel = ch_grp[group].group_channel;
+ if (ch_info->channel <= grp_channel) {
+ group_idx = group;
+ break;
+ }
+ }
+ /* group 4 has a few channels *above* its factory cal freq */
+ if (group == 5)
+ group_idx = 4;
+ } else
+ group_idx = 0; /* 2.4 GHz, group 0 */
+
+ D_POWER("Chnl %d mapped to grp %d\n", ch_info->channel, group_idx);
+ return group_idx;
+}
+
+/**
+ * il3945_hw_reg_get_matched_power_idx - Interpolate to get nominal idx
+ *
+ * Interpolate to get nominal (i.e. at factory calibration temperature) idx
+ * into radio/DSP gain settings table for requested power.
+ */
+static int
+il3945_hw_reg_get_matched_power_idx(struct il_priv *il, s8 requested_power,
+ s32 setting_idx, s32 *new_idx)
+{
+ const struct il3945_eeprom_txpower_group *chnl_grp = NULL;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ s32 idx0, idx1;
+ s32 power = 2 * requested_power;
+ s32 i;
+ const struct il3945_eeprom_txpower_sample *samples;
+ s32 gains0, gains1;
+ s32 res;
+ s32 denominator;
+
+ chnl_grp = &eeprom->groups[setting_idx];
+ samples = chnl_grp->samples;
+ for (i = 0; i < 5; i++) {
+ if (power == samples[i].power) {
+ *new_idx = samples[i].gain_idx;
+ return 0;
+ }
+ }
+
+ if (power > samples[1].power) {
+ idx0 = 0;
+ idx1 = 1;
+ } else if (power > samples[2].power) {
+ idx0 = 1;
+ idx1 = 2;
+ } else if (power > samples[3].power) {
+ idx0 = 2;
+ idx1 = 3;
+ } else {
+ idx0 = 3;
+ idx1 = 4;
+ }
+
+ denominator = (s32) samples[idx1].power - (s32) samples[idx0].power;
+ if (denominator == 0)
+ return -EINVAL;
+ gains0 = (s32) samples[idx0].gain_idx * (1 << 19);
+ gains1 = (s32) samples[idx1].gain_idx * (1 << 19);
+ res =
+ gains0 + (gains1 - gains0) * ((s32) power -
+ (s32) samples[idx0].power) /
+ denominator + (1 << 18);
+ *new_idx = res >> 19;
+ return 0;
+}
+
+static void
+il3945_hw_reg_init_channel_groups(struct il_priv *il)
+{
+ u32 i;
+ s32 rate_idx;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ const struct il3945_eeprom_txpower_group *group;
+
+ D_POWER("Initializing factory calib info from EEPROM\n");
+
+ for (i = 0; i < IL_NUM_TX_CALIB_GROUPS; i++) {
+ s8 *clip_pwrs; /* table of power levels for each rate */
+ s8 satur_pwr; /* saturation power for each chnl group */
+ group = &eeprom->groups[i];
+
+ /* sanity check on factory saturation power value */
+ if (group->saturation_power < 40) {
+ IL_WARN("Error: saturation power is %d, "
+ "less than minimum expected 40\n",
+ group->saturation_power);
+ return;
+ }
+
+ /*
+ * Derive requested power levels for each rate, based on
+ * hardware capabilities (saturation power for band).
+ * Basic value is 3dB down from saturation, with further
+ * power reductions for highest 3 data rates. These
+ * backoffs provide headroom for high rate modulation
+ * power peaks, without too much distortion (clipping).
+ */
+ /* we'll fill in this array with h/w max power levels */
+ clip_pwrs = (s8 *) il->_3945.clip_groups[i].clip_powers;
+
+ /* divide factory saturation power by 2 to find -3dB level */
+ satur_pwr = (s8) (group->saturation_power >> 1);
+
+ /* fill in channel group's nominal powers for each rate */
+ for (rate_idx = 0; rate_idx < RATE_COUNT_3945;
+ rate_idx++, clip_pwrs++) {
+ switch (rate_idx) {
+ case RATE_36M_IDX_TBL:
+ if (i == 0) /* B/G */
+ *clip_pwrs = satur_pwr;
+ else /* A */
+ *clip_pwrs = satur_pwr - 5;
+ break;
+ case RATE_48M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 7;
+ else
+ *clip_pwrs = satur_pwr - 10;
+ break;
+ case RATE_54M_IDX_TBL:
+ if (i == 0)
+ *clip_pwrs = satur_pwr - 9;
+ else
+ *clip_pwrs = satur_pwr - 12;
+ break;
+ default:
+ *clip_pwrs = satur_pwr;
+ break;
+ }
+ }
+ }
+}
+
+/**
+ * il3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
+ *
+ * Second pass (during init) to set up il->channel_info
+ *
+ * Set up Tx-power settings in our channel info database for each VALID
+ * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
+ * and current temperature.
+ *
+ * Since this is based on current temperature (at init time), these values may
+ * not be valid for very long, but it gives us a starting/default point,
+ * and allows us to active (i.e. using Tx) scan.
+ *
+ * This does *not* write values to NIC, just sets up our internal table.
+ */
+int
+il3945_txpower_set_from_eeprom(struct il_priv *il)
+{
+ struct il_channel_info *ch_info = NULL;
+ struct il3945_channel_power_info *pwr_info;
+ struct il3945_eeprom *eeprom = (struct il3945_eeprom *)il->eeprom;
+ int delta_idx;
+ u8 rate_idx;
+ u8 scan_tbl_idx;
+ const s8 *clip_pwrs; /* array of power levels for each rate */
+ u8 gain, dsp_atten;
+ s8 power;
+ u8 pwr_idx, base_pwr_idx, a_band;
+ u8 i;
+ int temperature;
+
+ /* save temperature reference,
+ * so we can determine next time to calibrate */
+ temperature = il3945_hw_reg_txpower_get_temperature(il);
+ il->last_temperature = temperature;
+
+ il3945_hw_reg_init_channel_groups(il);
+
+ /* initialize Tx power info for each and every channel, 2.4 and 5.x */
+ for (i = 0, ch_info = il->channel_info; i < il->channel_count;
+ i++, ch_info++) {
+ a_band = il_is_channel_a_band(ch_info);
+ if (!il_is_channel_valid(ch_info))
+ continue;
+
+ /* find this channel's channel group (*not* "band") idx */
+ ch_info->group_idx = il3945_hw_reg_get_ch_grp_idx(il, ch_info);
+
+ /* Get this chnlgrp's rate->max/clip-powers table */
+ clip_pwrs =
+ il->_3945.clip_groups[ch_info->group_idx].clip_powers;
+
+ /* calculate power idx *adjustment* value according to
+ * diff between current temperature and factory temperature */
+ delta_idx =
+ il3945_hw_reg_adjust_power_by_temp(temperature,
+ eeprom->groups[ch_info->
+ group_idx].
+ temperature);
+
+ D_POWER("Delta idx for channel %d: %d [%d]\n", ch_info->channel,
+ delta_idx, temperature + IL_TEMP_CONVERT);
+
+ /* set tx power value for all OFDM rates */
+ for (rate_idx = 0; rate_idx < IL_OFDM_RATES; rate_idx++) {
+ s32 uninitialized_var(power_idx);
+ int rc;
+
+ /* use channel group's clip-power table,
+ * but don't exceed channel's max power */
+ s8 pwr = min(ch_info->max_power_avg,
+ clip_pwrs[rate_idx]);
+
+ pwr_info = &ch_info->power_info[rate_idx];
+
+ /* get base (i.e. at factory-measured temperature)
+ * power table idx for this rate's power */
+ rc = il3945_hw_reg_get_matched_power_idx(il, pwr,
+ ch_info->
+ group_idx,
+ &power_idx);
+ if (rc) {
+ IL_ERR("Invalid power idx\n");
+ return rc;
+ }
+ pwr_info->base_power_idx = (u8) power_idx;
+
+ /* temperature compensate */
+ power_idx += delta_idx;
+
+ /* stay within range of gain table */
+ power_idx = il3945_hw_reg_fix_power_idx(power_idx);
+
+ /* fill 1 OFDM rate's il3945_channel_power_info struct */
+ pwr_info->requested_power = pwr;
+ pwr_info->power_table_idx = (u8) power_idx;
+ pwr_info->tpc.tx_gain =
+ power_gain_table[a_band][power_idx].tx_gain;
+ pwr_info->tpc.dsp_atten =
+ power_gain_table[a_band][power_idx].dsp_atten;
+ }
+
+ /* set tx power for CCK rates, based on OFDM 12 Mbit settings */
+ pwr_info = &ch_info->power_info[RATE_12M_IDX_TBL];
+ power = pwr_info->requested_power + IL_CCK_FROM_OFDM_POWER_DIFF;
+ pwr_idx = pwr_info->power_table_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+ base_pwr_idx =
+ pwr_info->base_power_idx + IL_CCK_FROM_OFDM_IDX_DIFF;
+
+ /* stay within table range */
+ pwr_idx = il3945_hw_reg_fix_power_idx(pwr_idx);
+ gain = power_gain_table[a_band][pwr_idx].tx_gain;
+ dsp_atten = power_gain_table[a_band][pwr_idx].dsp_atten;
+
+ /* fill each CCK rate's il3945_channel_power_info structure
+ * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
+ * NOTE: CCK rates start at end of OFDM rates! */
+ for (rate_idx = 0; rate_idx < IL_CCK_RATES; rate_idx++) {
+ pwr_info =
+ &ch_info->power_info[rate_idx + IL_OFDM_RATES];
+ pwr_info->requested_power = power;
+ pwr_info->power_table_idx = pwr_idx;
+ pwr_info->base_power_idx = base_pwr_idx;
+ pwr_info->tpc.tx_gain = gain;
+ pwr_info->tpc.dsp_atten = dsp_atten;
+ }
+
+ /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
+ for (scan_tbl_idx = 0; scan_tbl_idx < IL_NUM_SCAN_RATES;
+ scan_tbl_idx++) {
+ s32 actual_idx =
+ (scan_tbl_idx ==
+ 0) ? RATE_1M_IDX_TBL : RATE_6M_IDX_TBL;
+ il3945_hw_reg_set_scan_power(il, scan_tbl_idx,
+ actual_idx, clip_pwrs,
+ ch_info, a_band);
+ }
+ }
+
+ return 0;
+}
+
+int
+il3945_hw_rxq_stop(struct il_priv *il)
+{
+ int rc;
+
+ il_wr(il, FH39_RCSR_CONFIG(0), 0);
+ rc = il_poll_bit(il, FH39_RSSR_STATUS,
+ FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+ if (rc < 0)
+ IL_ERR("Can't stop Rx DMA.\n");
+
+ return 0;
+}
+
+int
+il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ struct il3945_shared *shared_data = il->_3945.shared_virt;
+
+ shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32) txq->q.dma_addr);
+
+ il_wr(il, FH39_CBCC_CTRL(txq_id), 0);
+ il_wr(il, FH39_CBCC_BASE(txq_id), 0);
+
+ il_wr(il, FH39_TCSR_CONFIG(txq_id),
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
+ FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
+ FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
+ FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
+
+ /* fake read to flush all prev. writes */
+ _il_rd(il, FH39_TSSR_CBB_BASE);
+
+ return 0;
+}
+
+/*
+ * HCMD utils
+ */
+static u16
+il3945_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return sizeof(struct il3945_rxon_cmd);
+ case C_POWER_TBL:
+ return sizeof(struct il3945_powertable_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il3945_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il3945_addsta_cmd *addsta = (struct il3945_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cpu_to_le16(0);
+ addsta->rate_n_flags = cmd->rate_n_flags;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+
+ return (u16) sizeof(struct il3945_addsta_cmd);
+}
+
+static int
+il3945_add_bssid_station(struct il_priv *il, const u8 * addr, u8 * sta_id_r)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int ret;
+ u8 sta_id;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il3945_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int ret;
+
+ if (add) {
+ ret =
+ il3945_add_bssid_station(il, vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ if (ret)
+ return ret;
+
+ il3945_sync_sta(il, vif_priv->ibss_bssid_sta_id,
+ (il->band ==
+ IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP :
+ RATE_1M_PLCP);
+ il3945_rate_scale_init(il->hw, vif_priv->ibss_bssid_sta_id);
+
+ return 0;
+ }
+
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+/**
+ * il3945_init_hw_rate_table - Initialize the hardware rate fallback table
+ */
+int
+il3945_init_hw_rate_table(struct il_priv *il)
+{
+ int rc, i, idx, prev_idx;
+ struct il3945_rate_scaling_cmd rate_cmd = {
+ .reserved = {0, 0, 0},
+ };
+ struct il3945_rate_scaling_info *table = rate_cmd.table;
+
+ for (i = 0; i < ARRAY_SIZE(il3945_rates); i++) {
+ idx = il3945_rates[i].table_rs_idx;
+
+ table[idx].rate_n_flags = cpu_to_le16(il3945_rates[i].plcp);
+ table[idx].try_cnt = il->retry_rate;
+ prev_idx = il3945_get_prev_ieee_rate(i);
+ table[idx].next_rate_idx = il3945_rates[prev_idx].table_rs_idx;
+ }
+
+ switch (il->band) {
+ case IEEE80211_BAND_5GHZ:
+ D_RATE("Select A mode rate scale\n");
+ /* If one of the following CCK rates is used,
+ * have it fall back to the 6M OFDM rate */
+ for (i = RATE_1M_IDX_TBL; i <= RATE_11M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+
+ /* Don't fall back to CCK rates */
+ table[RATE_12M_IDX_TBL].next_rate_idx = RATE_9M_IDX_TBL;
+
+ /* Don't drop out of OFDM rates */
+ table[RATE_6M_IDX_TBL].next_rate_idx =
+ il3945_rates[IL_FIRST_OFDM_RATE].table_rs_idx;
+ break;
+
+ case IEEE80211_BAND_2GHZ:
+ D_RATE("Select B/G mode rate scale\n");
+ /* If an OFDM rate is used, have it fall back to the
+ * 1M CCK rates */
+
+ if (!(il->_3945.sta_supp_rates & IL_OFDM_RATES_MASK) &&
+ il_is_associated(il)) {
+
+ idx = IL_FIRST_CCK_RATE;
+ for (i = RATE_6M_IDX_TBL; i <= RATE_54M_IDX_TBL; i++)
+ table[i].next_rate_idx =
+ il3945_rates[idx].table_rs_idx;
+
+ idx = RATE_11M_IDX_TBL;
+ /* CCK shouldn't fall back to OFDM... */
+ table[idx].next_rate_idx = RATE_5M_IDX_TBL;
+ }
+ break;
+
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ /* Update the rate scaling for control frame Tx */
+ rate_cmd.table_id = 0;
+ rc = il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+ if (rc)
+ return rc;
+
+ /* Update the rate scaling for data frame Tx */
+ rate_cmd.table_id = 1;
+ return il_send_cmd_pdu(il, C_RATE_SCALE, sizeof(rate_cmd), &rate_cmd);
+}
+
+/* Called when initializing driver */
+int
+il3945_hw_set_hw_params(struct il_priv *il)
+{
+ memset((void *)&il->hw_params, 0, sizeof(struct il_hw_params));
+
+ il->_3945.shared_virt =
+ dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
+ &il->_3945.shared_phys, GFP_KERNEL);
+ if (!il->_3945.shared_virt) {
+ IL_ERR("failed to allocate pci memory\n");
+ return -ENOMEM;
+ }
+
+ /* Assign number of Usable TX queues */
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+
+ il->hw_params.tfd_size = sizeof(struct il3945_tfd);
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_3K);
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ il->hw_params.max_stations = IL3945_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL3945_BROADCAST_ID;
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ il->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
+ il->hw_params.max_beacon_itrvl = IL39_MAX_UCODE_BEACON_INTERVAL;
+ il->hw_params.beacon_time_tsf_bits = IL3945_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+unsigned int
+il3945_hw_get_beacon_cmd(struct il_priv *il, struct il3945_frame *frame,
+ u8 rate)
+{
+ struct il3945_tx_beacon_cmd *tx_beacon_cmd;
+ unsigned int frame_size;
+
+ tx_beacon_cmd = (struct il3945_tx_beacon_cmd *)&frame->u;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ tx_beacon_cmd->tx.sta_id = il->ctx.bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ frame_size =
+ il3945_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+
+ BUG_ON(frame_size > MAX_MPDU_SIZE);
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+
+ tx_beacon_cmd->tx.rate = rate;
+ tx_beacon_cmd->tx.tx_flags =
+ (TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK);
+
+ /* supp_rates[0] == OFDM start at IL_FIRST_OFDM_RATE */
+ tx_beacon_cmd->tx.supp_rates[0] =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+
+ tx_beacon_cmd->tx.supp_rates[1] = (IL_CCK_BASIC_RATES_MASK & 0xF);
+
+ return sizeof(struct il3945_tx_beacon_cmd) + frame_size;
+}
+
+void
+il3945_hw_handler_setup(struct il_priv *il)
+{
+ il->handlers[C_TX] = il3945_hdl_tx;
+ il->handlers[N_3945_RX] = il3945_hdl_rx;
+}
+
+void
+il3945_hw_setup_deferred_work(struct il_priv *il)
+{
+ INIT_DELAYED_WORK(&il->_3945.thermal_periodic,
+ il3945_bg_reg_txpower_periodic);
+}
+
+void
+il3945_hw_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_delayed_work(&il->_3945.thermal_periodic);
+}
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il3945_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+ ******************************************************************************/
+
+/*
+ * Clear the OWNER_MSK, to establish driver (instead of uCode running on
+ * embedded controller) as EEPROM reader; each read is a series of pulses
+ * to/from the EEPROM chip, not a single event, so even reads could conflict
+ * if they weren't arbitrated by some ownership mechanism. Here, the driver
+ * simply claims ownership, which should be safe when this function is called
+ * (i.e. before loading uCode!).
+ */
+static int
+il3945_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
+ return 0;
+}
+
+static void
+il3945_eeprom_release_semaphore(struct il_priv *il)
+{
+ return;
+}
+
+ /**
+ * il3945_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il3945_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int rc;
+ int i;
+ u32 done;
+ u32 reg_offset;
+
+ D_INFO("Begin load bsm\n");
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL39_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
+ * NOTE: il3945_initialize_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache. */
+ pinst = il->ucode_init.p_addr;
+ pdata = il->ucode_init_data.p_addr;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ rc = il3945_verify_bsm(il);
+ if (rc)
+ return rc;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL39_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+static struct il_hcmd_ops il3945_hcmd = {
+ .rxon_assoc = il3945_send_rxon_assoc,
+ .commit_rxon = il3945_commit_rxon,
+};
+
+static struct il_lib_ops il3945_lib = {
+ .txq_attach_buf_to_tfd = il3945_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il3945_hw_txq_free_tfd,
+ .txq_init = il3945_hw_tx_queue_init,
+ .load_ucode = il3945_load_bsm,
+ .dump_nic_error_log = il3945_dump_nic_error_log,
+ .apm_ops = {
+ .init = il3945_apm_init,
+ .config = il3945_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ EEPROM_REGULATORY_BAND_NO_HT40,
+ },
+ .acquire_semaphore = il3945_eeprom_acquire_semaphore,
+ .release_semaphore = il3945_eeprom_release_semaphore,
+ },
+ .send_tx_power = il3945_send_tx_power,
+ .is_valid_rtc_data_addr = il3945_hw_valid_rtc_data_addr,
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il3945_ucode_rx_stats_read,
+ .tx_stats_read = il3945_ucode_tx_stats_read,
+ .general_stats_read = il3945_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il3945_legacy_ops = {
+ .post_associate = il3945_post_associate,
+ .config_ap = il3945_config_ap,
+ .manage_ibss_station = il3945_manage_ibss_station,
+};
+
+static struct il_hcmd_utils_ops il3945_hcmd_utils = {
+ .get_hcmd_size = il3945_get_hcmd_size,
+ .build_addsta_hcmd = il3945_build_addsta_hcmd,
+ .request_scan = il3945_request_scan,
+ .post_scan = il3945_post_scan,
+};
+
+static const struct il_ops il3945_ops = {
+ .lib = &il3945_lib,
+ .hcmd = &il3945_hcmd,
+ .utils = &il3945_hcmd_utils,
+ .led = &il3945_led_ops,
+ .legacy = &il3945_legacy_ops,
+ .ieee80211_ops = &il3945_hw_ops,
+};
+
+static struct il_base_params il3945_base_params = {
+ .eeprom_size = IL3945_EEPROM_IMG_SIZE,
+ .num_of_queues = IL39_NUM_QUEUES,
+ .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
+ .set_l0s = false,
+ .use_bsm = true,
+ .led_compensation = 64,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+};
+
+static struct il_cfg il3945_bg_cfg = {
+ .name = "3945BG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+static struct il_cfg il3945_abg_cfg = {
+ .name = "3945ABG",
+ .fw_name_pre = IL3945_FW_PRE,
+ .ucode_api_max = IL3945_UCODE_API_MAX,
+ .ucode_api_min = IL3945_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G,
+ .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
+ .ops = &il3945_ops,
+ .mod_params = &il3945_mod_params,
+ .base_params = &il3945_base_params,
+ .led_mode = IL_LED_BLINK,
+};
+
+DEFINE_PCI_DEVICE_TABLE(il3945_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4222, 0x1005, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1034, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, 0x1044, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4227, 0x1014, il3945_bg_cfg)},
+ {IL_PCI_DEVICE(0x4222, PCI_ANY_ID, il3945_abg_cfg)},
+ {IL_PCI_DEVICE(0x4227, PCI_ANY_ID, il3945_abg_cfg)},
+ {0}
+};
+
+MODULE_DEVICE_TABLE(pci, il3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/3945.h b/drivers/net/wireless/iwlegacy/3945.h
new file mode 100644
index 0000000..9f42f79
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/3945.h
@@ -0,0 +1,607 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_3945_h__
+#define __il_3945_h__
+
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <net/ieee80211_radiotap.h>
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+extern const struct pci_device_id il3945_hw_card_ids[];
+
+#include "common.h"
+
+/* Highest firmware API version supported */
+#define IL3945_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL3945_UCODE_API_MIN 1
+
+#define IL3945_FW_PRE "iwlwifi-3945-"
+#define _IL3945_MODULE_FIRMWARE(api) IL3945_FW_PRE #api ".ucode"
+#define IL3945_MODULE_FIRMWARE(api) _IL3945_MODULE_FIRMWARE(api)
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/* Module parameters accessible from iwl-*.c */
+extern struct il_mod_params il3945_mod_params;
+
+struct il3945_rate_scale_data {
+ u64 data;
+ s32 success_counter;
+ s32 success_ratio;
+ s32 counter;
+ s32 average_tpt;
+ unsigned long stamp;
+};
+
+struct il3945_rs_sta {
+ spinlock_t lock;
+ struct il_priv *il;
+ s32 *expected_tpt;
+ unsigned long last_partial_flush;
+ unsigned long last_flush;
+ u32 flush_time;
+ u32 last_tx_packets;
+ u32 tx_packets;
+ u8 tgg;
+ u8 flush_pending;
+ u8 start_rate;
+ struct timer_list rate_scale_flush;
+ struct il3945_rate_scale_data win[RATE_COUNT_3945];
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+#endif
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+};
+
+/*
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il3945_sta_priv {
+ struct il_station_priv_common common;
+ struct il3945_rs_sta rs_sta;
+};
+
+enum il3945_antenna {
+ IL_ANTENNA_DIVERSITY,
+ IL_ANTENNA_MAIN,
+ IL_ANTENNA_AUX
+};
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+#define IL_TX_FIFO_AC0 0
+#define IL_TX_FIFO_AC1 1
+#define IL_TX_FIFO_AC2 2
+#define IL_TX_FIFO_AC3 3
+#define IL_TX_FIFO_HCCA_1 5
+#define IL_TX_FIFO_HCCA_2 6
+#define IL_TX_FIFO_NONE 7
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il3945_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il3945_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define SCAN_INTERVAL 100
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+#define STA_PS_STATUS_WAKE 0
+#define STA_PS_STATUS_SLEEP 1
+
+struct il3945_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+#define IL_RX_HDR(x) ((struct il3945_rx_frame_hdr *)(\
+ x->u.rx_frame.stats.payload + \
+ x->u.rx_frame.stats.phy_count))
+#define IL_RX_END(x) ((struct il3945_rx_frame_end *)(\
+ IL_RX_HDR(x)->payload + \
+ le16_to_cpu(IL_RX_HDR(x)->len)))
+#define IL_RX_STATS(x) (&x->u.rx_frame.stats)
+#define IL_RX_DATA(x) (IL_RX_HDR(x)->payload)
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl3945-base.c which are forward declared here
+ * for use by iwl-*.c
+ *
+ *****************************************************************************/
+extern int il3945_calc_db_from_ratio(int sig_ratio);
+extern void il3945_rx_replenish(void *data);
+extern void il3945_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+extern unsigned int il3945_fill_beacon_frame(struct il_priv *il,
+ struct ieee80211_hdr *hdr,
+ int left);
+extern int il3945_dump_nic_event_log(struct il_priv *il, bool full_log,
+ char **buf, bool display);
+extern void il3945_dump_nic_error_log(struct il_priv *il);
+
+/******************************************************************************
+ *
+ * Functions implemented in iwl-[34]*.c which are forward declared here
+ * for use by iwl3945-base.c
+ *
+ * NOTE: The implementation of these functions are hardware specific
+ * which is why they are in the hardware specific files (vs. iwl-base.c)
+ *
+ * Naming convention --
+ * il3945_ <-- Its part of iwlwifi (should be changed to il3945_)
+ * il3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il3945_bg_ <-- Called from work queue context
+ * il3945_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il3945_hw_handler_setup(struct il_priv *il);
+extern void il3945_hw_setup_deferred_work(struct il_priv *il);
+extern void il3945_hw_cancel_deferred_work(struct il_priv *il);
+extern int il3945_hw_rxq_stop(struct il_priv *il);
+extern int il3945_hw_set_hw_params(struct il_priv *il);
+extern int il3945_hw_nic_init(struct il_priv *il);
+extern int il3945_hw_nic_stop_master(struct il_priv *il);
+extern void il3945_hw_txq_ctx_free(struct il_priv *il);
+extern void il3945_hw_txq_ctx_stop(struct il_priv *il);
+extern int il3945_hw_nic_reset(struct il_priv *il);
+extern int il3945_hw_txq_attach_buf_to_tfd(struct il_priv *il,
+ struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset,
+ u8 pad);
+extern void il3945_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+extern int il3945_hw_get_temperature(struct il_priv *il);
+extern int il3945_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+extern unsigned int il3945_hw_get_beacon_cmd(struct il_priv *il,
+ struct il3945_frame *frame,
+ u8 rate);
+void il3945_hw_build_tx_cmd_rate(struct il_priv *il, struct il_device_cmd *cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, int sta_id);
+extern int il3945_hw_reg_send_txpower(struct il_priv *il);
+extern int il3945_hw_reg_set_txpower(struct il_priv *il, s8 power);
+extern void il3945_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il3945_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+extern void il3945_disable_events(struct il_priv *il);
+extern int il4965_get_temperature(const struct il_priv *il);
+extern void il3945_post_associate(struct il_priv *il);
+extern void il3945_config_ap(struct il_priv *il);
+
+extern int il3945_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx);
+
+/**
+ * il3945_hw_find_station - Find station id for a given BSSID
+ * @bssid: MAC address of station ID to find
+ *
+ * NOTE: This should not be hardware specific but the code has
+ * not yet been merged into a single common layer for managing the
+ * station tables.
+ */
+extern u8 il3945_hw_find_station(struct il_priv *il, const u8 * bssid);
+
+extern struct ieee80211_ops il3945_hw_ops;
+
+extern __le32 il3945_get_antenna_flags(const struct il_priv *il);
+extern int il3945_init_hw_rate_table(struct il_priv *il);
+extern void il3945_reg_txpower_periodic(struct il_priv *il);
+extern int il3945_txpower_set_from_eeprom(struct il_priv *il);
+
+extern int il3945_rs_next_rate(struct il_priv *il, int rate);
+
+/* scanning */
+int il3945_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+void il3945_post_scan(struct il_priv *il);
+
+/* rates */
+extern const struct il3945_rate_info il3945_rates[RATE_COUNT_3945];
+
+/* RSSI to dBm */
+#define IL39_RSSI_OFFSET 95
+
+/*
+ * EEPROM related constants, enums, and structures.
+ */
+#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
+
+/*
+ * Mapping of a Tx power level, at factory calibration temperature,
+ * to a radio/DSP gain table idx.
+ * One for each of 5 "sample" power levels in each band.
+ * v_det is measured at the factory, using the 3945's built-in power amplifier
+ * (PA) output voltage detector. This same detector is used during Tx of
+ * long packets in normal operation to provide feedback as to proper output
+ * level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_sample {
+ u8 gain_idx; /* idx into power (gain) setup table ... */
+ s8 power; /* ... for this pwr level for this chnl group */
+ u16 v_det; /* PA output voltage */
+} __packed;
+
+/*
+ * Mappings of Tx power levels -> nominal radio/DSP gain table idxes.
+ * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
+ * Tx power setup code interpolates between the 5 "sample" power levels
+ * to determine the nominal setup for a requested power level.
+ * Data copied from EEPROM.
+ * DO NOT ALTER THIS STRUCTURE!!!
+ */
+struct il3945_eeprom_txpower_group {
+ struct il3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
+ s32 a, b, c, d, e; /* coefficients for voltage->power
+ * formula (signed) */
+ s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
+ * frequency (signed) */
+ s8 saturation_power; /* highest power possible by h/w in this
+ * band */
+ u8 group_channel; /* "representative" channel # in this band */
+ s16 temperature; /* h/w temperature at factory calib this band
+ * (signed) */
+} __packed;
+
+/*
+ * Temperature-based Tx-power compensation data, not band-specific.
+ * These coefficients are use to modify a/b/c/d/e coeffs based on
+ * difference between current temperature and factory calib temperature.
+ * Data copied from EEPROM.
+ */
+struct il3945_eeprom_temperature_corr {
+ u32 Ta;
+ u32 Tb;
+ u32 Tc;
+ u32 Td;
+ u32 Te;
+} __packed;
+
+/*
+ * EEPROM map
+ */
+struct il3945_eeprom {
+ u8 reserved0[16];
+ u16 device_id; /* abs.ofs: 16 */
+ u8 reserved1[2];
+ u16 pmc; /* abs.ofs: 20 */
+ u8 reserved2[20];
+ u8 mac_address[6]; /* abs.ofs: 42 */
+ u8 reserved3[58];
+ u16 board_revision; /* abs.ofs: 106 */
+ u8 reserved4[11];
+ u8 board_pba_number[9]; /* abs.ofs: 119 */
+ u8 reserved5[8];
+ u16 version; /* abs.ofs: 136 */
+ u8 sku_cap; /* abs.ofs: 138 */
+ u8 leds_mode; /* abs.ofs: 139 */
+ u16 oem_mode;
+ u16 wowlan_mode; /* abs.ofs: 142 */
+ u16 leds_time_interval; /* abs.ofs: 144 */
+ u8 leds_off_time; /* abs.ofs: 146 */
+ u8 leds_on_time; /* abs.ofs: 147 */
+ u8 almgor_m_version; /* abs.ofs: 148 */
+ u8 antenna_switch_type; /* abs.ofs: 149 */
+ u8 reserved6[42];
+ u8 sku_id[4]; /* abs.ofs: 192 */
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by 3945 has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+ u16 band_1_count; /* abs.ofs: 196 */
+ struct il_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+ u16 band_2_count; /* abs.ofs: 226 */
+ struct il_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+ u16 band_3_count; /* abs.ofs: 254 */
+ struct il_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+ u16 band_4_count; /* abs.ofs: 280 */
+ struct il_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+ u16 band_5_count; /* abs.ofs: 304 */
+ struct il_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
+
+ u8 reserved9[194];
+
+/*
+ * 3945 Txpower calibration data.
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+ struct il3945_eeprom_txpower_group groups[IL_NUM_TX_CALIB_GROUPS];
+/* abs.ofs: 512 */
+ struct il3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
+ u8 reserved16[172]; /* fill out to full 1024 byte block */
+} __packed;
+
+#define IL3945_EEPROM_IMG_SIZE 1024
+
+/* End of EEPROM */
+
+#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
+#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
+
+/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
+#define IL39_NUM_QUEUES 5
+#define IL39_CMD_QUEUE_NUM 4
+
+#define IL_DEFAULT_TX_RETRY 15
+
+/*********************************************/
+
+#define RFD_SIZE 4
+#define NUM_TFD_CHUNKS 4
+
+#define TFD_CTL_COUNT_SET(n) (n << 24)
+#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
+#define TFD_CTL_PAD_SET(n) (n << 28)
+#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL39_RTC_INST_LOWER_BOUND (0x000000)
+#define IL39_RTC_INST_UPPER_BOUND (0x014000)
+
+#define IL39_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL39_RTC_DATA_UPPER_BOUND (0x808000)
+
+#define IL39_RTC_INST_SIZE (IL39_RTC_INST_UPPER_BOUND - \
+ IL39_RTC_INST_LOWER_BOUND)
+#define IL39_RTC_DATA_SIZE (IL39_RTC_DATA_UPPER_BOUND - \
+ IL39_RTC_DATA_LOWER_BOUND)
+
+#define IL39_MAX_INST_SIZE IL39_RTC_INST_SIZE
+#define IL39_MAX_DATA_SIZE IL39_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL39_MAX_BSM_SIZE IL39_RTC_INST_SIZE
+
+static inline int
+il3945_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL39_RTC_DATA_LOWER_BOUND &&
+ addr < IL39_RTC_DATA_UPPER_BOUND);
+}
+
+/* Base physical address of il3945_shared is provided to FH39_TSSR_CBB_BASE
+ * and &il3945_shared.rx_read_ptr[0] is provided to FH39_RCSR_RPTR_ADDR(0) */
+struct il3945_shared {
+ __le32 tx_base_ptr[8];
+} __packed;
+
+/************************************/
+/* iwl3945 Flow Handler Definitions */
+/************************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH39_MEM_LOWER_BOUND (0x0800)
+#define FH39_MEM_UPPER_BOUND (0x1000)
+
+#define FH39_CBCC_TBL (FH39_MEM_LOWER_BOUND + 0x140)
+#define FH39_TFDB_TBL (FH39_MEM_LOWER_BOUND + 0x180)
+#define FH39_RCSR_TBL (FH39_MEM_LOWER_BOUND + 0x400)
+#define FH39_RSSR_TBL (FH39_MEM_LOWER_BOUND + 0x4c0)
+#define FH39_TCSR_TBL (FH39_MEM_LOWER_BOUND + 0x500)
+#define FH39_TSSR_TBL (FH39_MEM_LOWER_BOUND + 0x680)
+
+/* TFDB (Transmit Frame Buffer Descriptor) */
+#define FH39_TFDB(_ch, buf) (FH39_TFDB_TBL + \
+ ((_ch) * 2 + (buf)) * 0x28)
+#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TBL + 0x50 * (_ch))
+
+/* CBCC channel is [0,2] */
+#define FH39_CBCC(_ch) (FH39_CBCC_TBL + (_ch) * 0x8)
+#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
+#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
+
+/* RCSR channel is [0,2] */
+#define FH39_RCSR(_ch) (FH39_RCSR_TBL + (_ch) * 0x40)
+#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
+#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
+#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
+#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
+
+#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
+
+/* RSSR */
+#define FH39_RSSR_CTRL (FH39_RSSR_TBL + 0x000)
+#define FH39_RSSR_STATUS (FH39_RSSR_TBL + 0x004)
+
+/* TCSR */
+#define FH39_TCSR(_ch) (FH39_TCSR_TBL + (_ch) * 0x20)
+#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
+#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
+#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
+
+/* TSSR */
+#define FH39_TSSR_CBB_BASE (FH39_TSSR_TBL + 0x000)
+#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TBL + 0x008)
+#define FH39_TSSR_TX_STATUS (FH39_TSSR_TBL + 0x010)
+
+/* DBM */
+
+#define FH39_SRVC_CHNL (6)
+
+#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
+#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
+
+#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
+
+#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
+
+#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
+
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
+#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
+
+#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
+#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
+
+#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
+ (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
+ FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
+
+#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+struct il3945_tfd_tb {
+ __le32 addr;
+ __le32 len;
+} __packed;
+
+struct il3945_tfd {
+ __le32 control_flags;
+ struct il3945_tfd_tb tbs[4];
+ u8 __pad[28];
+} __packed;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il3945_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c b/drivers/net/wireless/iwlegacy/4965-calib.c
index 162d877..d3248e3 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.c
+++ b/drivers/net/wireless/iwlegacy/4965-calib.c
@@ -63,15 +63,14 @@
#include <linux/slab.h>
#include <net/mac80211.h>
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
+#include "common.h"
+#include "4965.h"
/*****************************************************************************
* INIT calibrations framework
*****************************************************************************/
-struct statistics_general_data {
+struct stats_general_data {
u32 beacon_silence_rssi_a;
u32 beacon_silence_rssi_b;
u32 beacon_silence_rssi_c;
@@ -80,14 +79,15 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-void iwl4965_calib_free_results(struct iwl_priv *priv)
+void
+il4965_calib_free_results(struct il_priv *il)
{
int i;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ for (i = 0; i < IL_CALIB_MAX; i++) {
+ kfree(il->calib_results[i].buf);
+ il->calib_results[i].buf = NULL;
+ il->calib_results[i].buf_len = 0;
}
}
@@ -103,10 +103,9 @@ void iwl4965_calib_free_results(struct iwl_priv *priv)
* enough to receive all of our own network traffic, but not so
* high that our DSP gets too busy trying to lock onto non-network
* activity/noise. */
-static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time,
- struct statistics_general_data *rx_info)
+static int
+il4965_sens_energy_cck(struct il_priv *il, u32 norm_fa, u32 rx_enable_time,
+ struct stats_general_data *rx_info)
{
u32 max_nrg_cck = 0;
int i = 0;
@@ -129,22 +128,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_CCK * rx_enable_time;
u32 min_false_alarms = MIN_FA_CCK * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
data->nrg_auto_corr_silence_diff = 0;
/* Find max silence rssi among all 3 receivers.
* This is background noise, which may include transmissions from other
* networks, measured during silence before our network's beacon */
- silence_rssi_a = (u8)((rx_info->beacon_silence_rssi_a &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_b = (u8)((rx_info->beacon_silence_rssi_b &
- ALL_BAND_FILTER) >> 8);
- silence_rssi_c = (u8)((rx_info->beacon_silence_rssi_c &
- ALL_BAND_FILTER) >> 8);
+ silence_rssi_a =
+ (u8) ((rx_info->beacon_silence_rssi_a & ALL_BAND_FILTER) >> 8);
+ silence_rssi_b =
+ (u8) ((rx_info->beacon_silence_rssi_b & ALL_BAND_FILTER) >> 8);
+ silence_rssi_c =
+ (u8) ((rx_info->beacon_silence_rssi_c & ALL_BAND_FILTER) >> 8);
val = max(silence_rssi_b, silence_rssi_c);
max_silence_rssi = max(silence_rssi_a, (u8) val);
@@ -160,9 +159,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
val = data->nrg_silence_rssi[i];
silence_ref = max(silence_ref, val);
}
- IWL_DEBUG_CALIB(priv, "silence a %u, b %u, c %u, 20-bcn max %u\n",
- silence_rssi_a, silence_rssi_b, silence_rssi_c,
- silence_ref);
+ D_CALIB("silence a %u, b %u, c %u, 20-bcn max %u\n", silence_rssi_a,
+ silence_rssi_b, silence_rssi_c, silence_ref);
/* Find max rx energy (min value!) among all 3 receivers,
* measured during beacon frame.
@@ -184,9 +182,9 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
max_nrg_cck = (u32) max(max_nrg_cck, (data->nrg_value[i]));
max_nrg_cck += 6;
- IWL_DEBUG_CALIB(priv, "rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
- rx_info->beacon_energy_a, rx_info->beacon_energy_b,
- rx_info->beacon_energy_c, max_nrg_cck - 6);
+ D_CALIB("rx energy a %u, b %u, c %u, 10-bcn max/min %u\n",
+ rx_info->beacon_energy_a, rx_info->beacon_energy_b,
+ rx_info->beacon_energy_c, max_nrg_cck - 6);
/* Count number of consecutive beacons with fewer-than-desired
* false alarms. */
@@ -194,35 +192,34 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
data->num_in_cck_no_fa++;
else
data->num_in_cck_no_fa = 0;
- IWL_DEBUG_CALIB(priv, "consecutive bcns with few false alarms = %u\n",
- data->num_in_cck_no_fa);
+ D_CALIB("consecutive bcns with few false alarms = %u\n",
+ data->num_in_cck_no_fa);
/* If we got too many false alarms this time, reduce sensitivity */
- if ((false_alarms > max_false_alarms) &&
- (data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK)) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u\n",
- false_alarms, max_false_alarms);
- IWL_DEBUG_CALIB(priv, "... reducing sensitivity\n");
- data->nrg_curr_state = IWL_FA_TOO_MANY;
+ if (false_alarms > max_false_alarms &&
+ data->auto_corr_cck > AUTO_CORR_MAX_TH_CCK) {
+ D_CALIB("norm FA %u > max FA %u\n", false_alarms,
+ max_false_alarms);
+ D_CALIB("... reducing sensitivity\n");
+ data->nrg_curr_state = IL_FA_TOO_MANY;
/* Store for "fewer than desired" on later beacon */
data->nrg_silence_ref = silence_ref;
/* increase energy threshold (reduce nrg value)
* to decrease sensitivity */
data->nrg_th_cck = data->nrg_th_cck - NRG_STEP_CCK;
- /* Else if we got fewer than desired, increase sensitivity */
+ /* Else if we got fewer than desired, increase sensitivity */
} else if (false_alarms < min_false_alarms) {
- data->nrg_curr_state = IWL_FA_TOO_FEW;
+ data->nrg_curr_state = IL_FA_TOO_FEW;
/* Compare silence level with silence level for most recent
* healthy number or too many false alarms */
- data->nrg_auto_corr_silence_diff = (s32)data->nrg_silence_ref -
- (s32)silence_ref;
+ data->nrg_auto_corr_silence_diff =
+ (s32) data->nrg_silence_ref - (s32) silence_ref;
- IWL_DEBUG_CALIB(priv,
- "norm FA %u < min FA %u, silence diff %d\n",
- false_alarms, min_false_alarms,
- data->nrg_auto_corr_silence_diff);
+ D_CALIB("norm FA %u < min FA %u, silence diff %d\n",
+ false_alarms, min_false_alarms,
+ data->nrg_auto_corr_silence_diff);
/* Increase value to increase sensitivity, but only if:
* 1a) previous beacon did *not* have *too many* false alarms
@@ -230,23 +227,22 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* from a previous beacon with too many, or healthy # FAs
* OR 2) We've seen a lot of beacons (100) with too few
* false alarms */
- if ((data->nrg_prev_state != IWL_FA_TOO_MANY) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ if (data->nrg_prev_state != IL_FA_TOO_MANY &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
- IWL_DEBUG_CALIB(priv, "... increasing sensitivity\n");
+ D_CALIB("... increasing sensitivity\n");
/* Increase nrg value to increase sensitivity */
val = data->nrg_th_cck + NRG_STEP_CCK;
- data->nrg_th_cck = min((u32)ranges->min_nrg_cck, val);
+ data->nrg_th_cck = min((u32) ranges->min_nrg_cck, val);
} else {
- IWL_DEBUG_CALIB(priv,
- "... but not changing sensitivity\n");
+ D_CALIB("... but not changing sensitivity\n");
}
- /* Else we got a healthy number of false alarms, keep status quo */
+ /* Else we got a healthy number of false alarms, keep status quo */
} else {
- IWL_DEBUG_CALIB(priv, " FA in safe zone\n");
- data->nrg_curr_state = IWL_FA_GOOD_RANGE;
+ D_CALIB(" FA in safe zone\n");
+ data->nrg_curr_state = IL_FA_GOOD_RANGE;
/* Store for use in "fewer than desired" with later beacon */
data->nrg_silence_ref = silence_ref;
@@ -254,8 +250,8 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
/* If previous beacon had too many false alarms,
* give it some extra margin by reducing sensitivity again
* (but don't go below measured energy of desired Rx) */
- if (IWL_FA_TOO_MANY == data->nrg_prev_state) {
- IWL_DEBUG_CALIB(priv, "... increasing margin\n");
+ if (IL_FA_TOO_MANY == data->nrg_prev_state) {
+ D_CALIB("... increasing margin\n");
if (data->nrg_th_cck > (max_nrg_cck + NRG_MARGIN))
data->nrg_th_cck -= NRG_MARGIN;
else
@@ -269,7 +265,7 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
* Lower value is higher energy, so we use max()!
*/
data->nrg_th_cck = max(max_nrg_cck, data->nrg_th_cck);
- IWL_DEBUG_CALIB(priv, "new nrg_th_cck %u\n", data->nrg_th_cck);
+ D_CALIB("new nrg_th_cck %u\n", data->nrg_th_cck);
data->nrg_prev_state = data->nrg_curr_state;
@@ -284,190 +280,187 @@ static int iwl4965_sens_energy_cck(struct iwl_priv *priv,
else {
val = data->auto_corr_cck + AUTO_CORR_STEP_CCK;
data->auto_corr_cck =
- min((u32)ranges->auto_corr_max_cck, val);
+ min((u32) ranges->auto_corr_max_cck, val);
}
val = data->auto_corr_cck_mrc + AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- min((u32)ranges->auto_corr_max_cck_mrc, val);
- } else if ((false_alarms < min_false_alarms) &&
- ((data->nrg_auto_corr_silence_diff > NRG_DIFF) ||
- (data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA))) {
+ min((u32) ranges->auto_corr_max_cck_mrc, val);
+ } else if (false_alarms < min_false_alarms &&
+ (data->nrg_auto_corr_silence_diff > NRG_DIFF ||
+ data->num_in_cck_no_fa > MAX_NUMBER_CCK_NO_FA)) {
/* Decrease auto_corr values to increase sensitivity */
val = data->auto_corr_cck - AUTO_CORR_STEP_CCK;
- data->auto_corr_cck =
- max((u32)ranges->auto_corr_min_cck, val);
+ data->auto_corr_cck = max((u32) ranges->auto_corr_min_cck, val);
val = data->auto_corr_cck_mrc - AUTO_CORR_STEP_CCK;
data->auto_corr_cck_mrc =
- max((u32)ranges->auto_corr_min_cck_mrc, val);
+ max((u32) ranges->auto_corr_min_cck_mrc, val);
}
return 0;
}
-
-static int iwl4965_sens_auto_corr_ofdm(struct iwl_priv *priv,
- u32 norm_fa,
- u32 rx_enable_time)
+static int
+il4965_sens_auto_corr_ofdm(struct il_priv *il, u32 norm_fa, u32 rx_enable_time)
{
u32 val;
u32 false_alarms = norm_fa * 200 * 1024;
u32 max_false_alarms = MAX_FA_OFDM * rx_enable_time;
u32 min_false_alarms = MIN_FA_OFDM * rx_enable_time;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
/* If we got too many false alarms this time, reduce sensitivity */
if (false_alarms > max_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u > max FA %u)\n",
- false_alarms, max_false_alarms);
+ D_CALIB("norm FA %u > max FA %u)\n", false_alarms,
+ max_false_alarms);
val = data->auto_corr_ofdm + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- min((u32)ranges->auto_corr_max_ofdm, val);
+ min((u32) ranges->auto_corr_max_ofdm, val);
val = data->auto_corr_ofdm_mrc + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- min((u32)ranges->auto_corr_max_ofdm_mrc, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- min((u32)ranges->auto_corr_max_ofdm_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 + AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- min((u32)ranges->auto_corr_max_ofdm_mrc_x1, val);
+ min((u32) ranges->auto_corr_max_ofdm_mrc_x1, val);
}
/* Else if we got fewer than desired, increase sensitivity */
else if (false_alarms < min_false_alarms) {
- IWL_DEBUG_CALIB(priv, "norm FA %u < min FA %u\n",
- false_alarms, min_false_alarms);
+ D_CALIB("norm FA %u < min FA %u\n", false_alarms,
+ min_false_alarms);
val = data->auto_corr_ofdm - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm =
- max((u32)ranges->auto_corr_min_ofdm, val);
+ max((u32) ranges->auto_corr_min_ofdm, val);
val = data->auto_corr_ofdm_mrc - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc =
- max((u32)ranges->auto_corr_min_ofdm_mrc, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc, val);
val = data->auto_corr_ofdm_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_x1 =
- max((u32)ranges->auto_corr_min_ofdm_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_x1, val);
val = data->auto_corr_ofdm_mrc_x1 - AUTO_CORR_STEP_OFDM;
data->auto_corr_ofdm_mrc_x1 =
- max((u32)ranges->auto_corr_min_ofdm_mrc_x1, val);
+ max((u32) ranges->auto_corr_min_ofdm_mrc_x1, val);
} else {
- IWL_DEBUG_CALIB(priv, "min FA %u < norm FA %u < max FA %u OK\n",
- min_false_alarms, false_alarms, max_false_alarms);
+ D_CALIB("min FA %u < norm FA %u < max FA %u OK\n",
+ min_false_alarms, false_alarms, max_false_alarms);
}
return 0;
}
-static void iwl4965_prepare_legacy_sensitivity_tbl(struct iwl_priv *priv,
- struct iwl_sensitivity_data *data,
- __le16 *tbl)
+static void
+il4965_prepare_legacy_sensitivity_tbl(struct il_priv *il,
+ struct il_sensitivity_data *data,
+ __le16 *tbl)
{
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm);
- tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_x1);
- tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_ofdm_mrc_x1);
-
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck);
- tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16((u16)data->auto_corr_cck_mrc);
-
- tbl[HD_MIN_ENERGY_CCK_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_cck);
- tbl[HD_MIN_ENERGY_OFDM_DET_INDEX] =
- cpu_to_le16((u16)data->nrg_th_ofdm);
-
- tbl[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
- cpu_to_le16(data->barker_corr_th_min);
- tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
- cpu_to_le16(data->barker_corr_th_min_mrc);
- tbl[HD_OFDM_ENERGY_TH_IN_INDEX] =
- cpu_to_le16(data->nrg_th_cca);
-
- IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
- data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
- data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
- data->nrg_th_ofdm);
-
- IWL_DEBUG_CALIB(priv, "cck: ac %u mrc %u thresh %u\n",
- data->auto_corr_cck, data->auto_corr_cck_mrc,
- data->nrg_th_cck);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm);
+ tbl[HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_x1);
+ tbl[HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_ofdm_mrc_x1);
+
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck);
+ tbl[HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16((u16) data->auto_corr_cck_mrc);
+
+ tbl[HD_MIN_ENERGY_CCK_DET_IDX] = cpu_to_le16((u16) data->nrg_th_cck);
+ tbl[HD_MIN_ENERGY_OFDM_DET_IDX] = cpu_to_le16((u16) data->nrg_th_ofdm);
+
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_IDX] =
+ cpu_to_le16(data->barker_corr_th_min);
+ tbl[HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX] =
+ cpu_to_le16(data->barker_corr_th_min_mrc);
+ tbl[HD_OFDM_ENERGY_TH_IN_IDX] = cpu_to_le16(data->nrg_th_cca);
+
+ D_CALIB("ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
+ data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
+ data->auto_corr_ofdm_x1, data->auto_corr_ofdm_mrc_x1,
+ data->nrg_th_ofdm);
+
+ D_CALIB("cck: ac %u mrc %u thresh %u\n", data->auto_corr_cck,
+ data->auto_corr_cck_mrc, data->nrg_th_cck);
}
-/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
-static int iwl4965_sensitivity_write(struct iwl_priv *priv)
+/* Prepare a C_SENSITIVITY, send to uCode if values have changed */
+static int
+il4965_sensitivity_write(struct il_priv *il)
{
- struct iwl_sensitivity_cmd cmd;
- struct iwl_sensitivity_data *data = NULL;
- struct iwl_host_cmd cmd_out = {
- .id = SENSITIVITY_CMD,
- .len = sizeof(struct iwl_sensitivity_cmd),
+ struct il_sensitivity_cmd cmd;
+ struct il_sensitivity_data *data = NULL;
+ struct il_host_cmd cmd_out = {
+ .id = C_SENSITIVITY,
+ .len = sizeof(struct il_sensitivity_cmd),
.flags = CMD_ASYNC,
.data = &cmd,
};
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
memset(&cmd, 0, sizeof(cmd));
- iwl4965_prepare_legacy_sensitivity_tbl(priv, data, &cmd.table[0]);
+ il4965_prepare_legacy_sensitivity_tbl(il, data, &cmd.table[0]);
/* Update uCode's "work" table, and copy it to DSP */
- cmd.control = SENSITIVITY_CMD_CONTROL_WORK_TABLE;
+ cmd.control = C_SENSITIVITY_CONTROL_WORK_TBL;
/* Don't send command to uCode if nothing has changed */
- if (!memcmp(&cmd.table[0], &(priv->sensitivity_tbl[0]),
- sizeof(u16)*HD_TABLE_SIZE)) {
- IWL_DEBUG_CALIB(priv, "No change in SENSITIVITY_CMD\n");
+ if (!memcmp
+ (&cmd.table[0], &(il->sensitivity_tbl[0]),
+ sizeof(u16) * HD_TBL_SIZE)) {
+ D_CALIB("No change in C_SENSITIVITY\n");
return 0;
}
/* Copy table for comparison next time */
- memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
- sizeof(u16)*HD_TABLE_SIZE);
+ memcpy(&(il->sensitivity_tbl[0]), &(cmd.table[0]),
+ sizeof(u16) * HD_TBL_SIZE);
- return iwl_legacy_send_cmd(priv, &cmd_out);
+ return il_send_cmd(il, &cmd_out);
}
-void iwl4965_init_sensitivity(struct iwl_priv *priv)
+void
+il4965_init_sensitivity(struct il_priv *il)
{
int ret = 0;
int i;
- struct iwl_sensitivity_data *data = NULL;
- const struct iwl_sensitivity_ranges *ranges = priv->hw_params.sens;
+ struct il_sensitivity_data *data = NULL;
+ const struct il_sensitivity_ranges *ranges = il->hw_params.sens;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- IWL_DEBUG_CALIB(priv, "Start iwl4965_init_sensitivity\n");
+ D_CALIB("Start il4965_init_sensitivity\n");
/* Clear driver's sensitivity algo data */
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
if (ranges == NULL)
return;
- memset(data, 0, sizeof(struct iwl_sensitivity_data));
+ memset(data, 0, sizeof(struct il_sensitivity_data));
data->num_in_cck_no_fa = 0;
- data->nrg_curr_state = IWL_FA_TOO_MANY;
- data->nrg_prev_state = IWL_FA_TOO_MANY;
+ data->nrg_curr_state = IL_FA_TOO_MANY;
+ data->nrg_prev_state = IL_FA_TOO_MANY;
data->nrg_silence_ref = 0;
data->nrg_silence_idx = 0;
data->nrg_energy_idx = 0;
@@ -478,9 +471,9 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
data->nrg_silence_rssi[i] = 0;
- data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
+ data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
- data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
+ data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
data->auto_corr_cck = AUTO_CORR_CCK_MIN_VAL_DEF;
data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
@@ -495,11 +488,12 @@ void iwl4965_init_sensitivity(struct iwl_priv *priv)
data->last_bad_plcp_cnt_cck = 0;
data->last_fa_cnt_cck = 0;
- ret |= iwl4965_sensitivity_write(priv);
- IWL_DEBUG_CALIB(priv, "<<return 0x%X\n", ret);
+ ret |= il4965_sensitivity_write(il);
+ D_CALIB("<<return 0x%X\n", ret);
}
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
+void
+il4965_sensitivity_calibration(struct il_priv *il, void *resp)
{
u32 rx_enable_time;
u32 fa_cck;
@@ -508,31 +502,31 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
u32 bad_plcp_ofdm;
u32 norm_fa_ofdm;
u32 norm_fa_cck;
- struct iwl_sensitivity_data *data = NULL;
- struct statistics_rx_non_phy *rx_info;
- struct statistics_rx_phy *ofdm, *cck;
+ struct il_sensitivity_data *data = NULL;
+ struct stats_rx_non_phy *rx_info;
+ struct stats_rx_phy *ofdm, *cck;
unsigned long flags;
- struct statistics_general_data statis;
+ struct stats_general_data statis;
- if (priv->disable_sens_cal)
+ if (il->disable_sens_cal)
return;
- data = &(priv->sensitivity_data);
+ data = &(il->sensitivity_data);
- if (!iwl_legacy_is_any_associated(priv)) {
- IWL_DEBUG_CALIB(priv, "<< - not associated\n");
+ if (!il_is_any_associated(il)) {
+ D_CALIB("<< - not associated\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)resp)->rx.general);
- ofdm = &(((struct iwl_notif_statistics *)resp)->rx.ofdm);
- cck = &(((struct iwl_notif_statistics *)resp)->rx.cck);
+ rx_info = &(((struct il_notif_stats *)resp)->rx.general);
+ ofdm = &(((struct il_notif_stats *)resp)->rx.ofdm);
+ cck = &(((struct il_notif_stats *)resp)->rx.cck);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, "<< invalid data.\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB("<< invalid data.\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
@@ -544,30 +538,27 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
bad_plcp_ofdm = le32_to_cpu(ofdm->plcp_err);
statis.beacon_silence_rssi_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a);
+ le32_to_cpu(rx_info->beacon_silence_rssi_a);
statis.beacon_silence_rssi_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b);
+ le32_to_cpu(rx_info->beacon_silence_rssi_b);
statis.beacon_silence_rssi_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c);
- statis.beacon_energy_a =
- le32_to_cpu(rx_info->beacon_energy_a);
- statis.beacon_energy_b =
- le32_to_cpu(rx_info->beacon_energy_b);
- statis.beacon_energy_c =
- le32_to_cpu(rx_info->beacon_energy_c);
+ le32_to_cpu(rx_info->beacon_silence_rssi_c);
+ statis.beacon_energy_a = le32_to_cpu(rx_info->beacon_energy_a);
+ statis.beacon_energy_b = le32_to_cpu(rx_info->beacon_energy_b);
+ statis.beacon_energy_c = le32_to_cpu(rx_info->beacon_energy_c);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
- IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
+ D_CALIB("rx_enable_time = %u usecs\n", rx_enable_time);
if (!rx_enable_time) {
- IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
+ D_CALIB("<< RX Enable Time == 0!\n");
return;
}
- /* These statistics increase monotonically, and do not reset
+ /* These stats increase monotonically, and do not reset
* at each beacon. Calculate difference from last value, or just
- * use the new statistics value if it has reset or wrapped around. */
+ * use the new stats value if it has reset or wrapped around. */
if (data->last_bad_plcp_cnt_cck > bad_plcp_cck)
data->last_bad_plcp_cnt_cck = bad_plcp_cck;
else {
@@ -600,17 +591,17 @@ void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp)
norm_fa_ofdm = fa_ofdm + bad_plcp_ofdm;
norm_fa_cck = fa_cck + bad_plcp_cck;
- IWL_DEBUG_CALIB(priv,
- "cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
- bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
+ D_CALIB("cck: fa %u badp %u ofdm: fa %u badp %u\n", fa_cck,
+ bad_plcp_cck, fa_ofdm, bad_plcp_ofdm);
- iwl4965_sens_auto_corr_ofdm(priv, norm_fa_ofdm, rx_enable_time);
- iwl4965_sens_energy_cck(priv, norm_fa_cck, rx_enable_time, &statis);
+ il4965_sens_auto_corr_ofdm(il, norm_fa_ofdm, rx_enable_time);
+ il4965_sens_energy_cck(il, norm_fa_cck, rx_enable_time, &statis);
- iwl4965_sensitivity_write(priv);
+ il4965_sensitivity_write(il);
}
-static inline u8 iwl4965_find_first_chain(u8 mask)
+static inline u8
+il4965_find_first_chain(u8 mask)
{
if (mask & ANT_A)
return CHAIN_A;
@@ -624,8 +615,8 @@ static inline u8 iwl4965_find_first_chain(u8 mask)
* disconnected.
*/
static void
-iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
- struct iwl_chain_noise_data *data)
+il4965_find_disconn_antenna(struct il_priv *il, u32 * average_sig,
+ struct il_chain_noise_data *data)
{
u32 active_chains = 0;
u32 max_average_sig;
@@ -634,12 +625,15 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
u8 first_chain;
u16 i = 0;
- average_sig[0] = data->chain_signal_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[1] = data->chain_signal_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_sig[2] = data->chain_signal_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_sig[0] =
+ data->chain_signal_a /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[1] =
+ data->chain_signal_b /
+ il->cfg->base_params->chain_noise_num_beacons;
+ average_sig[2] =
+ data->chain_signal_c /
+ il->cfg->base_params->chain_noise_num_beacons;
if (average_sig[0] >= average_sig[1]) {
max_average_sig = average_sig[0];
@@ -657,10 +651,10 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
active_chains = (1 << max_average_sig_antenna_i);
}
- IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
- average_sig[0], average_sig[1], average_sig[2]);
- IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
- max_average_sig, max_average_sig_antenna_i);
+ D_CALIB("average_sig: a %d b %d c %d\n", average_sig[0], average_sig[1],
+ average_sig[2]);
+ D_CALIB("max_average_sig = %d, antenna %d\n", max_average_sig,
+ max_average_sig_antenna_i);
/* Compare signal strengths for all 3 receivers. */
for (i = 0; i < NUM_RX_CHAINS; i++) {
@@ -673,9 +667,9 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
data->disconn_array[i] = 1;
else
active_chains |= (1 << i);
- IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
- "disconn_array[i] = %d\n",
- i, rssi_delta, data->disconn_array[i]);
+ D_CALIB("i = %d rssiDelta = %d "
+ "disconn_array[i] = %d\n", i, rssi_delta,
+ data->disconn_array[i]);
}
}
@@ -689,119 +683,110 @@ iwl4965_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* To be safe, simply mask out any chains that we know
* are not on the device.
*/
- active_chains &= priv->hw_params.valid_rx_ant;
+ active_chains &= il->hw_params.valid_rx_ant;
num_tx_chains = 0;
for (i = 0; i < NUM_RX_CHAINS; i++) {
/* loops on all the bits of
- * priv->hw_setting.valid_tx_ant */
+ * il->hw_setting.valid_tx_ant */
u8 ant_msk = (1 << i);
- if (!(priv->hw_params.valid_tx_ant & ant_msk))
+ if (!(il->hw_params.valid_tx_ant & ant_msk))
continue;
num_tx_chains++;
if (data->disconn_array[i] == 0)
/* there is a Tx antenna connected */
break;
- if (num_tx_chains == priv->hw_params.tx_chains_num &&
+ if (num_tx_chains == il->hw_params.tx_chains_num &&
data->disconn_array[i]) {
/*
* If all chains are disconnected
* connect the first valid tx chain
*/
first_chain =
- iwl4965_find_first_chain(priv->cfg->valid_tx_ant);
+ il4965_find_first_chain(il->cfg->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
- IWL_DEBUG_CALIB(priv,
- "All Tx chains are disconnected W/A - declare %d as connected\n",
- first_chain);
+ D_CALIB("All Tx chains are disconnected"
+ "- declare %d as connected\n", first_chain);
break;
}
}
- if (active_chains != priv->hw_params.valid_rx_ant &&
- active_chains != priv->chain_noise_data.active_chains)
- IWL_DEBUG_CALIB(priv,
- "Detected that not all antennas are connected! "
- "Connected: %#x, valid: %#x.\n",
- active_chains, priv->hw_params.valid_rx_ant);
+ if (active_chains != il->hw_params.valid_rx_ant &&
+ active_chains != il->chain_noise_data.active_chains)
+ D_CALIB("Detected that not all antennas are connected! "
+ "Connected: %#x, valid: %#x.\n", active_chains,
+ il->hw_params.valid_rx_ant);
/* Save for use within RXON, TX, SCAN commands, etc. */
data->active_chains = active_chains;
- IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
- active_chains);
+ D_CALIB("active_chains (bitwise) = 0x%x\n", active_chains);
}
-static void iwl4965_gain_computation(struct iwl_priv *priv,
- u32 *average_noise,
- u16 min_average_noise_antenna_i,
- u32 min_average_noise,
- u8 default_chain)
+static void
+il4965_gain_computation(struct il_priv *il, u32 * average_noise,
+ u16 min_average_noise_antenna_i, u32 min_average_noise,
+ u8 default_chain)
{
int i, ret;
- struct iwl_chain_noise_data *data = &priv->chain_noise_data;
+ struct il_chain_noise_data *data = &il->chain_noise_data;
data->delta_gain_code[min_average_noise_antenna_i] = 0;
for (i = default_chain; i < NUM_RX_CHAINS; i++) {
s32 delta_g = 0;
- if (!(data->disconn_array[i]) &&
- (data->delta_gain_code[i] ==
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL)) {
+ if (!data->disconn_array[i] &&
+ data->delta_gain_code[i] ==
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL) {
delta_g = average_noise[i] - min_average_noise;
- data->delta_gain_code[i] = (u8)((delta_g * 10) / 15);
+ data->delta_gain_code[i] = (u8) ((delta_g * 10) / 15);
data->delta_gain_code[i] =
- min(data->delta_gain_code[i],
+ min(data->delta_gain_code[i],
(u8) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
data->delta_gain_code[i] =
- (data->delta_gain_code[i] | (1 << 2));
+ (data->delta_gain_code[i] | (1 << 2));
} else {
data->delta_gain_code[i] = 0;
}
}
- IWL_DEBUG_CALIB(priv, "delta_gain_codes: a %d b %d c %d\n",
- data->delta_gain_code[0],
- data->delta_gain_code[1],
- data->delta_gain_code[2]);
+ D_CALIB("delta_gain_codes: a %d b %d c %d\n", data->delta_gain_code[0],
+ data->delta_gain_code[1], data->delta_gain_code[2]);
/* Differential gain gets sent to uCode only once */
if (!data->radio_write) {
- struct iwl_calib_diff_gain_cmd cmd;
+ struct il_calib_diff_gain_cmd cmd;
data->radio_write = 1;
memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
cmd.diff_gain_a = data->delta_gain_code[0];
cmd.diff_gain_b = data->delta_gain_code[1];
cmd.diff_gain_c = data->delta_gain_code[2];
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd);
+ ret = il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd);
if (ret)
- IWL_DEBUG_CALIB(priv, "fail sending cmd "
- "REPLY_PHY_CALIBRATION_CMD\n");
+ D_CALIB("fail sending cmd " "C_PHY_CALIBRATION\n");
/* TODO we might want recalculate
* rx_chain in rxon cmd */
/* Mark so we run this algo only once! */
- data->state = IWL_CHAIN_NOISE_CALIBRATED;
+ data->state = IL_CHAIN_NOISE_CALIBRATED;
}
}
-
-
/*
- * Accumulate 16 beacons of signal and noise statistics for each of
+ * Accumulate 16 beacons of signal and noise stats for each of
* 3 receivers/antennas/rx-chains, then figure out:
* 1) Which antennas are connected.
* 2) Differential rx gain settings to balance the 3 receivers.
*/
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
+void
+il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp)
{
- struct iwl_chain_noise_data *data = NULL;
+ struct il_chain_noise_data *data = NULL;
u32 chain_noise_a;
u32 chain_noise_b;
@@ -809,8 +794,8 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u32 chain_sig_a;
u32 chain_sig_b;
u32 chain_sig_c;
- u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
- u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
+ u32 average_sig[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
+ u32 average_noise[NUM_RX_CHAINS] = { INITIALIZATION_VALUE };
u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
u16 i = 0;
@@ -819,70 +804,69 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
u8 rxon_band24;
u8 stat_band24;
unsigned long flags;
- struct statistics_rx_non_phy *rx_info;
+ struct stats_rx_non_phy *rx_info;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct il_rxon_context *ctx = &il->ctx;
- if (priv->disable_chain_noise_cal)
+ if (il->disable_chain_noise_cal)
return;
- data = &(priv->chain_noise_data);
+ data = &(il->chain_noise_data);
/*
* Accumulate just the first "chain_noise_num_beacons" after
* the first association, then we're done forever.
*/
- if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
- if (data->state == IWL_CHAIN_NOISE_ALIVE)
- IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
+ if (data->state != IL_CHAIN_NOISE_ACCUMULATE) {
+ if (data->state == IL_CHAIN_NOISE_ALIVE)
+ D_CALIB("Wait for noise calib reset\n");
return;
}
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&il->lock, flags);
- rx_info = &(((struct iwl_notif_statistics *)stat_resp)->
- rx.general);
+ rx_info = &(((struct il_notif_stats *)stat_resp)->rx.general);
if (rx_info->interference_data_flag != INTERFERENCE_DATA_AVAILABLE) {
- IWL_DEBUG_CALIB(priv, " << Interference data unavailable\n");
- spin_unlock_irqrestore(&priv->lock, flags);
+ D_CALIB(" << Interference data unavailable\n");
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
rxon_band24 = !!(ctx->staging.flags & RXON_FLG_BAND_24G_MSK);
rxon_chnum = le16_to_cpu(ctx->staging.channel);
- stat_band24 = !!(((struct iwl_notif_statistics *)
- stat_resp)->flag &
- STATISTICS_REPLY_FLG_BAND_24G_MSK);
- stat_chnum = le32_to_cpu(((struct iwl_notif_statistics *)
- stat_resp)->flag) >> 16;
+ stat_band24 =
+ !!(((struct il_notif_stats *)stat_resp)->
+ flag & STATS_REPLY_FLG_BAND_24G_MSK);
+ stat_chnum =
+ le32_to_cpu(((struct il_notif_stats *)stat_resp)->flag) >> 16;
/* Make sure we accumulate data for just the associated channel
* (even if scanning). */
- if ((rxon_chnum != stat_chnum) || (rxon_band24 != stat_band24)) {
- IWL_DEBUG_CALIB(priv, "Stats not from chan=%d, band24=%d\n",
- rxon_chnum, rxon_band24);
- spin_unlock_irqrestore(&priv->lock, flags);
+ if (rxon_chnum != stat_chnum || rxon_band24 != stat_band24) {
+ D_CALIB("Stats not from chan=%d, band24=%d\n", rxon_chnum,
+ rxon_band24);
+ spin_unlock_irqrestore(&il->lock, flags);
return;
}
/*
- * Accumulate beacon statistics values across
+ * Accumulate beacon stats values across
* "chain_noise_num_beacons"
*/
- chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
- IN_BAND_FILTER;
- chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
- IN_BAND_FILTER;
- chain_noise_c = le32_to_cpu(rx_info->beacon_silence_rssi_c) &
- IN_BAND_FILTER;
+ chain_noise_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ chain_noise_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ chain_noise_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
chain_sig_a = le32_to_cpu(rx_info->beacon_rssi_a) & IN_BAND_FILTER;
chain_sig_b = le32_to_cpu(rx_info->beacon_rssi_b) & IN_BAND_FILTER;
chain_sig_c = le32_to_cpu(rx_info->beacon_rssi_c) & IN_BAND_FILTER;
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&il->lock, flags);
data->beacon_count++;
@@ -894,34 +878,33 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
data->chain_signal_b = (chain_sig_b + data->chain_signal_b);
data->chain_signal_c = (chain_sig_c + data->chain_signal_c);
- IWL_DEBUG_CALIB(priv, "chan=%d, band24=%d, beacon=%d\n",
- rxon_chnum, rxon_band24, data->beacon_count);
- IWL_DEBUG_CALIB(priv, "chain_sig: a %d b %d c %d\n",
- chain_sig_a, chain_sig_b, chain_sig_c);
- IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
- chain_noise_a, chain_noise_b, chain_noise_c);
+ D_CALIB("chan=%d, band24=%d, beacon=%d\n", rxon_chnum, rxon_band24,
+ data->beacon_count);
+ D_CALIB("chain_sig: a %d b %d c %d\n", chain_sig_a, chain_sig_b,
+ chain_sig_c);
+ D_CALIB("chain_noise: a %d b %d c %d\n", chain_noise_a, chain_noise_b,
+ chain_noise_c);
/* If this is the "chain_noise_num_beacons", determine:
* 1) Disconnected antennas (using signal strengths)
* 2) Differential gain (using silence noise) to balance receivers */
- if (data->beacon_count !=
- priv->cfg->base_params->chain_noise_num_beacons)
+ if (data->beacon_count != il->cfg->base_params->chain_noise_num_beacons)
return;
/* Analyze signal for disconnected antenna */
- iwl4965_find_disconn_antenna(priv, average_sig, data);
+ il4965_find_disconn_antenna(il, average_sig, data);
/* Analyze noise for rx balance */
- average_noise[0] = data->chain_noise_a /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[1] = data->chain_noise_b /
- priv->cfg->base_params->chain_noise_num_beacons;
- average_noise[2] = data->chain_noise_c /
- priv->cfg->base_params->chain_noise_num_beacons;
+ average_noise[0] =
+ data->chain_noise_a / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[1] =
+ data->chain_noise_b / il->cfg->base_params->chain_noise_num_beacons;
+ average_noise[2] =
+ data->chain_noise_c / il->cfg->base_params->chain_noise_num_beacons;
for (i = 0; i < NUM_RX_CHAINS; i++) {
- if (!(data->disconn_array[i]) &&
- (average_noise[i] <= min_average_noise)) {
+ if (!data->disconn_array[i] &&
+ average_noise[i] <= min_average_noise) {
/* This means that chain i is active and has
* lower noise values so far: */
min_average_noise = average_noise[i];
@@ -929,39 +912,37 @@ void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
}
}
- IWL_DEBUG_CALIB(priv, "average_noise: a %d b %d c %d\n",
- average_noise[0], average_noise[1],
- average_noise[2]);
+ D_CALIB("average_noise: a %d b %d c %d\n", average_noise[0],
+ average_noise[1], average_noise[2]);
- IWL_DEBUG_CALIB(priv, "min_average_noise = %d, antenna %d\n",
- min_average_noise, min_average_noise_antenna_i);
+ D_CALIB("min_average_noise = %d, antenna %d\n", min_average_noise,
+ min_average_noise_antenna_i);
- iwl4965_gain_computation(priv, average_noise,
- min_average_noise_antenna_i, min_average_noise,
- iwl4965_find_first_chain(priv->cfg->valid_rx_ant));
+ il4965_gain_computation(il, average_noise, min_average_noise_antenna_i,
+ min_average_noise,
+ il4965_find_first_chain(il->cfg->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
*/
- if (priv->cfg->ops->lib->update_chain_flags)
- priv->cfg->ops->lib->update_chain_flags(priv);
+ if (il->cfg->ops->lib->update_chain_flags)
+ il->cfg->ops->lib->update_chain_flags(il);
- data->state = IWL_CHAIN_NOISE_DONE;
- iwl_legacy_power_update_mode(priv, false);
+ data->state = IL_CHAIN_NOISE_DONE;
+ il_power_update_mode(il, false);
}
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv)
+void
+il4965_reset_run_time_calib(struct il_priv *il)
{
int i;
- memset(&(priv->sensitivity_data), 0,
- sizeof(struct iwl_sensitivity_data));
- memset(&(priv->chain_noise_data), 0,
- sizeof(struct iwl_chain_noise_data));
+ memset(&(il->sensitivity_data), 0, sizeof(struct il_sensitivity_data));
+ memset(&(il->chain_noise_data), 0, sizeof(struct il_chain_noise_data));
for (i = 0; i < NUM_RX_CHAINS; i++)
- priv->chain_noise_data.delta_gain_code[i] =
- CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
+ il->chain_noise_data.delta_gain_code[i] =
+ CHAIN_NOISE_DELTA_GAIN_INIT_VAL;
- /* Ask for statistics now, the uCode will send notification
+ /* Ask for stats now, the uCode will send notification
* periodically after association */
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, true);
+ il_send_stats_request(il, CMD_ASYNC, true);
}
diff --git a/drivers/net/wireless/iwlegacy/4965-debug.c b/drivers/net/wireless/iwlegacy/4965-debug.c
new file mode 100644
index 0000000..98ec39f
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-debug.c
@@ -0,0 +1,746 @@
+/******************************************************************************
+*
+* GPL LICENSE SUMMARY
+*
+* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of version 2 of the GNU General Public License as
+* published by the Free Software Foundation.
+*
+* This program is distributed in the hope that it will be useful, but
+* WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+* General Public License for more details.
+*
+* You should have received a copy of the GNU General Public License
+* along with this program; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+* USA
+*
+* The full GNU General Public License is included in this distribution
+* in the file called LICENSE.GPL.
+*
+* Contact Information:
+* Intel Linux Wireless <ilw@linux.intel.com>
+* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+*****************************************************************************/
+#include "common.h"
+#include "4965.h"
+
+static const char *fmt_value = " %-30s %10u\n";
+static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
+static const char *fmt_header =
+ "%-32s current cumulative delta max\n";
+
+static int
+il4965_stats_flag(struct il_priv *il, char *buf, int bufsz)
+{
+ int p = 0;
+ u32 flag;
+
+ flag = le32_to_cpu(il->_4965.stats.flag);
+
+ p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
+ if (flag & UCODE_STATS_CLEAR_MSK)
+ p += scnprintf(buf + p, bufsz - p,
+ "\tStatistics have been cleared\n");
+ p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
+ (flag & UCODE_STATS_FREQUENCY_MSK) ? "2.4 GHz" :
+ "5.2 GHz");
+ p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
+ (flag & UCODE_STATS_NARROW_BAND_MSK) ? "enabled" :
+ "disabled");
+
+ return p;
+}
+
+ssize_t
+il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz =
+ sizeof(struct stats_rx_phy) * 40 +
+ sizeof(struct stats_rx_non_phy) * 40 +
+ sizeof(struct stats_rx_ht_phy) * 40 + 400;
+ ssize_t ret;
+ struct stats_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct stats_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
+ struct stats_rx_non_phy *general, *accum_general;
+ struct stats_rx_non_phy *delta_general, *max_general;
+ struct stats_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /*
+ * the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ ofdm = &il->_4965.stats.rx.ofdm;
+ cck = &il->_4965.stats.rx.cck;
+ general = &il->_4965.stats.rx.general;
+ ht = &il->_4965.stats.rx.ofdm_ht;
+ accum_ofdm = &il->_4965.accum_stats.rx.ofdm;
+ accum_cck = &il->_4965.accum_stats.rx.cck;
+ accum_general = &il->_4965.accum_stats.rx.general;
+ accum_ht = &il->_4965.accum_stats.rx.ofdm_ht;
+ delta_ofdm = &il->_4965.delta_stats.rx.ofdm;
+ delta_cck = &il->_4965.delta_stats.rx.cck;
+ delta_general = &il->_4965.delta_stats.rx.general;
+ delta_ht = &il->_4965.delta_stats.rx.ofdm_ht;
+ max_ofdm = &il->_4965.max_delta.rx.ofdm;
+ max_cck = &il->_4965.max_delta.rx.cck;
+ max_general = &il->_4965.max_delta.rx.general;
+ max_ht = &il->_4965.max_delta.rx.ofdm_ht;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ofdm->overrun_err), accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ofdm->early_overrun_err),
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ofdm->crc32_good), accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(ofdm->false_alarm_cnt),
+ accum_ofdm->false_alarm_cnt, delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(ofdm->fina_sync_err_cnt),
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(ofdm->sfd_timeout), accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout, max_ofdm->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(ofdm->fina_timeout), accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout, max_ofdm->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(ofdm->unresponded_rts),
+ accum_ofdm->unresponded_rts, delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(ofdm->rxe_frame_limit_overrun),
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(ofdm->sent_ack_cnt), accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt, max_ofdm->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(ofdm->sent_cts_cnt), accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(ofdm->sent_ba_rsp_cnt),
+ accum_ofdm->sent_ba_rsp_cnt, delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(ofdm->dsp_self_kill),
+ accum_ofdm->dsp_self_kill, delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ofdm->mh_format_err),
+ accum_ofdm->mh_format_err, delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(ofdm->re_acq_main_rssi_sum),
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - CCK:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(cck->overrun_err), accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(cck->early_overrun_err),
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err, max_cck->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good, max_cck->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "false_alarm_cnt:",
+ le32_to_cpu(cck->false_alarm_cnt),
+ accum_cck->false_alarm_cnt, delta_cck->false_alarm_cnt,
+ max_cck->false_alarm_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_sync_err_cnt:",
+ le32_to_cpu(cck->fina_sync_err_cnt),
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt, max_cck->fina_sync_err_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sfd_timeout:",
+ le32_to_cpu(cck->sfd_timeout), accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "fina_timeout:",
+ le32_to_cpu(cck->fina_timeout), accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unresponded_rts:",
+ le32_to_cpu(cck->unresponded_rts),
+ accum_cck->unresponded_rts, delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rxe_frame_lmt_ovrun:",
+ le32_to_cpu(cck->rxe_frame_limit_overrun),
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ack_cnt:",
+ le32_to_cpu(cck->sent_ack_cnt), accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt, max_cck->sent_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_cts_cnt:",
+ le32_to_cpu(cck->sent_cts_cnt), accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt, max_cck->sent_cts_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sent_ba_rsp_cnt:",
+ le32_to_cpu(cck->sent_ba_rsp_cnt),
+ accum_cck->sent_ba_rsp_cnt, delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_self_kill:",
+ le32_to_cpu(cck->dsp_self_kill), accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill, max_cck->dsp_self_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(cck->mh_format_err), accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "re_acq_main_rssi_sum:",
+ le32_to_cpu(cck->re_acq_main_rssi_sum),
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - GENERAL:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_cts:",
+ le32_to_cpu(general->bogus_cts), accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bogus_ack:",
+ le32_to_cpu(general->bogus_ack), accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_bssid_frames:",
+ le32_to_cpu(general->non_bssid_frames),
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "filtered_frames:",
+ le32_to_cpu(general->filtered_frames),
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "non_channel_beacons:",
+ le32_to_cpu(general->non_channel_beacons),
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_beacons:",
+ le32_to_cpu(general->channel_beacons),
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_missed_bcon:",
+ le32_to_cpu(general->num_missed_bcon),
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "adc_rx_saturation_time:",
+ le32_to_cpu(general->adc_rx_saturation_time),
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ina_detect_search_tm:",
+ le32_to_cpu(general->ina_detection_search_time),
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_a:",
+ le32_to_cpu(general->beacon_silence_rssi_a),
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_b:",
+ le32_to_cpu(general->beacon_silence_rssi_b),
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "beacon_silence_rssi_c:",
+ le32_to_cpu(general->beacon_silence_rssi_c),
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "interference_data_flag:",
+ le32_to_cpu(general->interference_data_flag),
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "channel_load:",
+ le32_to_cpu(general->channel_load),
+ accum_general->channel_load, delta_general->channel_load,
+ max_general->channel_load);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dsp_false_alarms:",
+ le32_to_cpu(general->dsp_false_alarms),
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_a:",
+ le32_to_cpu(general->beacon_rssi_a),
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a, max_general->beacon_rssi_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_b:",
+ le32_to_cpu(general->beacon_rssi_b),
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b, max_general->beacon_rssi_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_rssi_c:",
+ le32_to_cpu(general->beacon_rssi_c),
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c, max_general->beacon_rssi_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_a:",
+ le32_to_cpu(general->beacon_energy_a),
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_b:",
+ le32_to_cpu(general->beacon_energy_b),
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "beacon_energy_c:",
+ le32_to_cpu(general->beacon_energy_c),
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "early_overrun_err:",
+ le32_to_cpu(ht->early_overrun_err),
+ accum_ht->early_overrun_err, delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "mh_format_err:",
+ le32_to_cpu(ht->mh_format_err), accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_crc32_good:",
+ le32_to_cpu(ht->agg_crc32_good), accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_mpdu_cnt:",
+ le32_to_cpu(ht->agg_mpdu_cnt), accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs), accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = (sizeof(struct stats_tx) * 48) + 250;
+ ssize_t ret;
+ struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta_tx = &il->_4965.delta_stats.tx;
+ max_tx = &il->_4965.max_delta.tx;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos += scnprintf(buf + pos, bufsz - pos, fmt_header, "Statistics_Tx:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "preamble:",
+ le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_detected_cnt:",
+ le32_to_cpu(tx->rx_detected_cnt),
+ accum_tx->rx_detected_cnt, delta_tx->rx_detected_cnt,
+ max_tx->rx_detected_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_defer_cnt:",
+ le32_to_cpu(tx->bt_prio_defer_cnt),
+ accum_tx->bt_prio_defer_cnt, delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "bt_prio_kill_cnt:",
+ le32_to_cpu(tx->bt_prio_kill_cnt),
+ accum_tx->bt_prio_kill_cnt, delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "few_bytes_cnt:",
+ le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "ack_timeout:",
+ le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "expected_ack_cnt:",
+ le32_to_cpu(tx->expected_ack_cnt),
+ accum_tx->expected_ack_cnt, delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "actual_ack_cnt:",
+ le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt, max_tx->actual_ack_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "dump_msdu_cnt:",
+ le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt, max_tx->dump_msdu_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_nxt_frame_mismatch:",
+ le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "abort_missing_nxt_frame:",
+ le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "cts_timeout_collision:",
+ le32_to_cpu(tx->cts_timeout_collision),
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "ack_ba_timeout_collision:",
+ le32_to_cpu(tx->ack_or_ba_timeout_collision),
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg ba_timeout:",
+ le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout, max_tx->agg.ba_timeout);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg ba_resched_frames:",
+ le32_to_cpu(tx->agg.ba_reschedule_frames),
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_agg_frame:",
+ le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_no_agg:",
+ le32_to_cpu(tx->agg.scd_query_no_agg),
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg scd_query_agg:",
+ le32_to_cpu(tx->agg.scd_query_agg),
+ accum_tx->agg.scd_query_agg, delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "agg scd_query_mismatch:",
+ le32_to_cpu(tx->agg.scd_query_mismatch),
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg frame_not_ready:",
+ le32_to_cpu(tx->agg.frame_not_ready),
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg underrun:",
+ le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg bt_prio_kill:",
+ le32_to_cpu(tx->agg.bt_prio_kill),
+ accum_tx->agg.bt_prio_kill, delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "agg rx_ba_rsp_cnt:",
+ le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
+ accum_tx->agg.rx_ba_rsp_cnt, delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+ssize_t
+il4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char *buf;
+ int bufsz = sizeof(struct stats_general) * 10 + 300;
+ ssize_t ret;
+ struct stats_general_common *general, *accum_general;
+ struct stats_general_common *delta_general, *max_general;
+ struct stats_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct stats_div *div, *accum_div, *delta_div, *max_div;
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ /* the statistic information display here is based on
+ * the last stats notification from uCode
+ * might not reflect the current uCode activity
+ */
+ general = &il->_4965.stats.general.common;
+ dbg = &il->_4965.stats.general.common.dbg;
+ div = &il->_4965.stats.general.common.div;
+ accum_general = &il->_4965.accum_stats.general.common;
+ accum_dbg = &il->_4965.accum_stats.general.common.dbg;
+ accum_div = &il->_4965.accum_stats.general.common.div;
+ delta_general = &il->_4965.delta_stats.general.common;
+ max_general = &il->_4965.max_delta.general.common;
+ delta_dbg = &il->_4965.delta_stats.general.common.dbg;
+ max_dbg = &il->_4965.max_delta.general.common.dbg;
+ delta_div = &il->_4965.delta_stats.general.common.div;
+ max_div = &il->_4965.max_delta.general.common.div;
+
+ pos += il4965_stats_flag(il, buf, bufsz);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_header,
+ "Statistics_General:");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "temperature:",
+ le32_to_cpu(general->temperature));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_value, "ttl_timestamp:",
+ le32_to_cpu(general->ttl_timestamp));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_check:",
+ le32_to_cpu(dbg->burst_check), accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "burst_count:",
+ le32_to_cpu(dbg->burst_count), accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table,
+ "wait_for_silence_timeout_count:",
+ le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
+ accum_dbg->wait_for_silence_timeout_cnt,
+ delta_dbg->wait_for_silence_timeout_cnt,
+ max_dbg->wait_for_silence_timeout_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "sleep_time:",
+ le32_to_cpu(general->sleep_time),
+ accum_general->sleep_time, delta_general->sleep_time,
+ max_general->sleep_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_out:",
+ le32_to_cpu(general->slots_out), accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "slots_idle:",
+ le32_to_cpu(general->slots_idle),
+ accum_general->slots_idle, delta_general->slots_idle,
+ max_general->slots_idle);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "rx_enable_counter:",
+ le32_to_cpu(general->rx_enable_counter),
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, fmt_table, "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
new file mode 100644
index 0000000..1667232
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -0,0 +1,6515 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci-aspm.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#define DRV_NAME "iwl4965"
+
+#include "common.h"
+#include "4965.h"
+
+/******************************************************************************
+ *
+ * module boiler plate
+ *
+ ******************************************************************************/
+
+/*
+ * module name, copyright, version, etc.
+ */
+#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define VD "d"
+#else
+#define VD
+#endif
+
+#define DRV_VERSION IWLWIFI_VERSION VD
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_VERSION(DRV_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("iwl4965");
+
+void
+il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status)
+{
+ if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
+ IL_ERR("Tx flush command to flush out all frames\n");
+ if (!test_bit(S_EXIT_PENDING, &il->status))
+ queue_work(il->workqueue, &il->tx_flush);
+ }
+}
+
+/*
+ * EEPROM
+ */
+struct il_mod_params il4965_mod_params = {
+ .amsdu_size_8K = 1,
+ .restart_fw = 1,
+ /* the rest are 0 by default */
+};
+
+void
+il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ unsigned long flags;
+ int i;
+ spin_lock_irqsave(&rxq->lock, flags);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
+ /* In the reset function, these buffers may have been allocated
+ * to an SKB, so we need to unmap and free potential storage */
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+ }
+
+ for (i = 0; i < RX_QUEUE_SIZE; i++)
+ rxq->queue[i] = NULL;
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+}
+
+int
+il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ u32 rb_size;
+ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
+ u32 rb_timeout = 0;
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
+ else
+ rb_size = FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
+
+ /* Stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+
+ /* Reset driver's Rx queue write idx */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
+
+ /* Tell device where to find RBD circular buffer in DRAM */
+ il_wr(il, FH49_RSCSR_CHNL0_RBDCB_BASE_REG, (u32) (rxq->bd_dma >> 8));
+
+ /* Tell device where in DRAM to update its Rx status */
+ il_wr(il, FH49_RSCSR_CHNL0_STTS_WPTR_REG, rxq->rb_stts_dma >> 4);
+
+ /* Enable Rx DMA
+ * Direct rx interrupts to hosts
+ * Rx buffer size 4 or 8k
+ * RB timeout 0x10
+ * 256 RBDs
+ */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ rb_size |
+ (rb_timeout << FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
+ (rfdnlog << FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
+
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_TIMEOUT_DEF);
+
+ return 0;
+}
+
+static void
+il4965_set_pwr_vmain(struct il_priv *il)
+{
+/*
+ * (for documentation purposes)
+ * to set power to V_AUX, do:
+
+ if (pci_pme_capable(il->pci_dev, PCI_D3cold))
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+ */
+
+ il_set_bits_mask_prph(il, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
+ ~APMG_PS_CTRL_MSK_PWR_SRC);
+}
+
+int
+il4965_hw_nic_init(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rx_queue *rxq = &il->rxq;
+ int ret;
+
+ /* nic_init */
+ spin_lock_irqsave(&il->lock, flags);
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ il_write8(il, CSR_INT_COALESCING, IL_HOST_INT_CALIB_TIMEOUT_DEF);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_set_pwr_vmain(il);
+
+ il->cfg->ops->lib->apm_ops.config(il);
+
+ /* Allocate the RX queue, or reset if it is already allocated */
+ if (!rxq->bd) {
+ ret = il_rx_queue_alloc(il);
+ if (ret) {
+ IL_ERR("Unable to initialize Rx queue\n");
+ return -ENOMEM;
+ }
+ } else
+ il4965_rx_queue_reset(il, rxq);
+
+ il4965_rx_replenish(il);
+
+ il4965_rx_init(il, rxq);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ rxq->need_update = 1;
+ il_rx_queue_update_write_ptr(il, rxq);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Allocate or reset and init all Tx and Command queues */
+ if (!il->txq) {
+ ret = il4965_txq_ctx_alloc(il);
+ if (ret)
+ return ret;
+ } else
+ il4965_txq_ctx_reset(il);
+
+ set_bit(S_INIT, &il->status);
+
+ return 0;
+}
+
+/**
+ * il4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
+ */
+static inline __le32
+il4965_dma_addr2rbd_ptr(struct il_priv *il, dma_addr_t dma_addr)
+{
+ return cpu_to_le32((u32) (dma_addr >> 8));
+}
+
+/**
+ * il4965_rx_queue_restock - refill RX queue from pre-allocated pool
+ *
+ * If there are slots in the RX queue that need to be restocked,
+ * and we have free pre-allocated buffers, fill the ranks as much
+ * as we can, pulling from rx_free.
+ *
+ * This moves the 'write' idx forward to catch up with 'processed', and
+ * also updates the memory address in the firmware to reference the new
+ * target buffer.
+ */
+void
+il4965_rx_queue_restock(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rxq->lock, flags);
+ while (il_rx_queue_space(rxq) > 0 && rxq->free_count) {
+ /* The overwritten rxb must be a used one */
+ rxb = rxq->queue[rxq->write];
+ BUG_ON(rxb && rxb->page);
+
+ /* Get next free Rx buffer, remove from free list */
+ element = rxq->rx_free.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ /* Point to Rx buffer via next RBD in circular buffer */
+ rxq->bd[rxq->write] =
+ il4965_dma_addr2rbd_ptr(il, rxb->page_dma);
+ rxq->queue[rxq->write] = rxb;
+ rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
+ rxq->free_count--;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ /* If the pre-allocated buffer pool is dropping low, schedule to
+ * refill it */
+ if (rxq->free_count <= RX_LOW_WATERMARK)
+ queue_work(il->workqueue, &il->rx_replenish);
+
+ /* If we've added more space for the firmware to place data, tell it.
+ * Increment device's write pointer in multiples of 8. */
+ if (rxq->write_actual != (rxq->write & ~0x7)) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ rxq->need_update = 1;
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ il_rx_queue_update_write_ptr(il, rxq);
+ }
+}
+
+/**
+ * il4965_rx_replenish - Move all used packet from rx_used to rx_free
+ *
+ * When moving to rx_free an SKB is allocated for the slot.
+ *
+ * Also restock the Rx queue via il_rx_queue_restock.
+ * This is called as a scheduled work item (except for during initialization)
+ */
+static void
+il4965_rx_allocate(struct il_priv *il, gfp_t priority)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct list_head *element;
+ struct il_rx_buf *rxb;
+ struct page *page;
+ unsigned long flags;
+ gfp_t gfp_mask = priority;
+
+ while (1) {
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ if (rxq->free_count > RX_LOW_WATERMARK)
+ gfp_mask |= __GFP_NOWARN;
+
+ if (il->hw_params.rx_page_order > 0)
+ gfp_mask |= __GFP_COMP;
+
+ /* Alloc a new receive buffer */
+ page = alloc_pages(gfp_mask, il->hw_params.rx_page_order);
+ if (!page) {
+ if (net_ratelimit())
+ D_INFO("alloc_pages failed, " "order: %d\n",
+ il->hw_params.rx_page_order);
+
+ if (rxq->free_count <= RX_LOW_WATERMARK &&
+ net_ratelimit())
+ IL_ERR("Failed to alloc_pages with %s. "
+ "Only %u free buffers remaining.\n",
+ priority ==
+ GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
+ rxq->free_count);
+ /* We don't reschedule replenish work here -- we will
+ * call the restock method and if it still needs
+ * more buffers it will schedule replenish */
+ return;
+ }
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ if (list_empty(&rxq->rx_used)) {
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ __free_pages(page, il->hw_params.rx_page_order);
+ return;
+ }
+ element = rxq->rx_used.next;
+ rxb = list_entry(element, struct il_rx_buf, list);
+ list_del(element);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ BUG_ON(rxb->page);
+ rxb->page = page;
+ /* Get physical address of the RB */
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, page, 0,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ /* dma address must be no more than 36 bits */
+ BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
+ /* and also 256 byte aligned! */
+ BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
+
+ spin_lock_irqsave(&rxq->lock, flags);
+
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ il->alloc_rxb_page++;
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+ }
+}
+
+void
+il4965_rx_replenish(struct il_priv *il)
+{
+ unsigned long flags;
+
+ il4965_rx_allocate(il, GFP_KERNEL);
+
+ spin_lock_irqsave(&il->lock, flags);
+ il4965_rx_queue_restock(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+void
+il4965_rx_replenish_now(struct il_priv *il)
+{
+ il4965_rx_allocate(il, GFP_ATOMIC);
+
+ il4965_rx_queue_restock(il);
+}
+
+/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
+ * If an SKB has been detached, the POOL needs to have its SKB set to NULL
+ * This free routine walks the list of POOL entries and if SKB is set to
+ * non NULL it is unmapped and freed
+ */
+void
+il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq)
+{
+ int i;
+ for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
+ if (rxq->pool[i].page != NULL) {
+ pci_unmap_page(il->pci_dev, rxq->pool[i].page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ __il_free_pages(il, rxq->pool[i].page);
+ rxq->pool[i].page = NULL;
+ }
+ }
+
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+ dma_free_coherent(&il->pci_dev->dev, sizeof(struct il_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
+ rxq->bd = NULL;
+ rxq->rb_stts = NULL;
+}
+
+int
+il4965_rxq_stop(struct il_priv *il)
+{
+
+ /* stop Rx DMA */
+ il_wr(il, FH49_MEM_RCSR_CHNL0_CONFIG_REG, 0);
+ il_poll_bit(il, FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
+
+ return 0;
+}
+
+int
+il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
+{
+ int idx = 0;
+ int band_offset = 0;
+
+ /* HT rate format: mac80211 wants an MCS number, which is just LSB */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+ return idx;
+ /* Legacy rate format, search for match in table */
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ band_offset = IL_FIRST_OFDM_RATE;
+ for (idx = band_offset; idx < RATE_COUNT_LEGACY; idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx - band_offset;
+ }
+
+ return -1;
+}
+
+static int
+il4965_calc_rssi(struct il_priv *il, struct il_rx_phy_res *rx_resp)
+{
+ /* data from PHY/DSP regarding signal strength, etc.,
+ * contents are always there, not configurable by host. */
+ struct il4965_rx_non_cfg_phy *ncphy =
+ (struct il4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
+ u32 agc =
+ (le16_to_cpu(ncphy->agc_info) & IL49_AGC_DB_MASK) >>
+ IL49_AGC_DB_POS;
+
+ u32 valid_antennae =
+ (le16_to_cpu(rx_resp->phy_flags) & IL49_RX_PHY_FLAGS_ANTENNAE_MASK)
+ >> IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
+ u8 max_rssi = 0;
+ u32 i;
+
+ /* Find max rssi among 3 possible receivers.
+ * These values are measured by the digital signal processor (DSP).
+ * They should stay fairly constant even as the signal strength varies,
+ * if the radio's automatic gain control (AGC) is working right.
+ * AGC value (see below) will provide the "interesting" info. */
+ for (i = 0; i < 3; i++)
+ if (valid_antennae & (1 << i))
+ max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
+
+ D_STATS("Rssi In A %d B %d C %d Max %d AGC dB %d\n",
+ ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
+ max_rssi, agc);
+
+ /* dBm = max_rssi dB - agc dB - constant.
+ * Higher AGC (higher radio gain) means lower signal. */
+ return max_rssi - agc - IL4965_RSSI_OFFSET;
+}
+
+static u32
+il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in)
+{
+ u32 decrypt_out = 0;
+
+ if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
+ RX_RES_STATUS_STATION_FOUND)
+ decrypt_out |=
+ (RX_RES_STATUS_STATION_FOUND |
+ RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
+
+ decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
+
+ /* packet was not encrypted */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_NONE)
+ return decrypt_out;
+
+ /* packet was encrypted with unknown alg */
+ if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
+ RX_RES_STATUS_SEC_TYPE_ERR)
+ return decrypt_out;
+
+ /* decryption was not done in HW */
+ if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
+ RX_MPDU_RES_STATUS_DEC_DONE_MSK)
+ return decrypt_out;
+
+ switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
+
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ /* alg is CCM: check MIC only */
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
+ /* Bad MIC */
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
+ /* Bad TTAK */
+ decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
+ break;
+ }
+ /* fall through if TTAK OK */
+ default:
+ if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
+ decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
+ else
+ decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
+ break;
+ }
+
+ D_RX("decrypt_in:0x%x decrypt_out = 0x%x\n", decrypt_in, decrypt_out);
+
+ return decrypt_out;
+}
+
+static void
+il4965_pass_packet_to_mac80211(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u16 len, u32 ampdu_status, struct il_rx_buf *rxb,
+ struct ieee80211_rx_status *stats)
+{
+ struct sk_buff *skb;
+ __le16 fc = hdr->frame_control;
+
+ /* We only process data packets if the interface is open */
+ if (unlikely(!il->is_open)) {
+ D_DROP("Dropping packet while interface is not open.\n");
+ return;
+ }
+
+ /* In case of HW accelerated crypto and bad decryption, drop */
+ if (!il->cfg->mod_params->sw_crypto &&
+ il_set_decrypted_flag(il, hdr, ampdu_status, stats))
+ return;
+
+ skb = dev_alloc_skb(128);
+ if (!skb) {
+ IL_ERR("dev_alloc_skb failed\n");
+ return;
+ }
+
+ skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
+
+ il_update_stats(il, false, fc, len);
+ memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
+
+ ieee80211_rx(il->hw, skb);
+ il->alloc_rxb_page--;
+ rxb->page = NULL;
+}
+
+/* Called for N_RX (legacy ABG frames), or
+ * N_RX_MPDU (HT high-throughput N frames). */
+void
+il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct ieee80211_hdr *header;
+ struct ieee80211_rx_status rx_status;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_rx_phy_res *phy_res;
+ __le32 rx_pkt_status;
+ struct il_rx_mpdu_res_start *amsdu;
+ u32 len;
+ u32 ampdu_status;
+ u32 rate_n_flags;
+
+ /**
+ * N_RX and N_RX_MPDU are handled differently.
+ * N_RX: physical layer info is in this buffer
+ * N_RX_MPDU: physical layer info was sent in separate
+ * command and cached in il->last_phy_res
+ *
+ * Here we set up local variables depending on which command is
+ * received.
+ */
+ if (pkt->hdr.cmd == N_RX) {
+ phy_res = (struct il_rx_phy_res *)pkt->u.raw;
+ header =
+ (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt);
+
+ len = le16_to_cpu(phy_res->byte_count);
+ rx_pkt_status =
+ *(__le32 *) (pkt->u.raw + sizeof(*phy_res) +
+ phy_res->cfg_phy_cnt + len);
+ ampdu_status = le32_to_cpu(rx_pkt_status);
+ } else {
+ if (!il->_4965.last_phy_res_valid) {
+ IL_ERR("MPDU frame without cached PHY data\n");
+ return;
+ }
+ phy_res = &il->_4965.last_phy_res;
+ amsdu = (struct il_rx_mpdu_res_start *)pkt->u.raw;
+ header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
+ len = le16_to_cpu(amsdu->byte_count);
+ rx_pkt_status = *(__le32 *) (pkt->u.raw + sizeof(*amsdu) + len);
+ ampdu_status =
+ il4965_translate_rx_status(il, le32_to_cpu(rx_pkt_status));
+ }
+
+ if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
+ D_DROP("dsp size out of range [0,20]: %d/n",
+ phy_res->cfg_phy_cnt);
+ return;
+ }
+
+ if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
+ !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
+ D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status));
+ return;
+ }
+
+ /* This will be used in several places later */
+ rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
+
+ /* rx_status carries information about the packet to mac80211 */
+ rx_status.mactime = le64_to_cpu(phy_res->timestamp);
+ rx_status.band =
+ (phy_res->
+ phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ? IEEE80211_BAND_2GHZ :
+ IEEE80211_BAND_5GHZ;
+ rx_status.freq =
+ ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
+ rx_status.band);
+ rx_status.rate_idx =
+ il4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
+ rx_status.flag = 0;
+
+ /* TSF isn't reliable. In order to allow smooth user experience,
+ * this W/A doesn't propagate it to the mac80211 */
+ /*rx_status.flag |= RX_FLAG_MACTIME_MPDU; */
+
+ il->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
+
+ /* Find max signal strength (dBm) among 3 antenna/receiver chains */
+ rx_status.signal = il4965_calc_rssi(il, phy_res);
+
+ il_dbg_log_rx_data_frame(il, len, header);
+ D_STATS("Rssi %d, TSF %llu\n", rx_status.signal,
+ (unsigned long long)rx_status.mactime);
+
+ /*
+ * "antenna number"
+ *
+ * It seems that the antenna field in the phy flags value
+ * is actually a bit field. This is undefined by radiotap,
+ * it wants an actual antenna number but I always get "7"
+ * for most legacy frames I receive indicating that the
+ * same frame was received on all three RX chains.
+ *
+ * I think this field should be removed in favor of a
+ * new 802.11n radiotap field "RX chains" that is defined
+ * as a bitmask.
+ */
+ rx_status.antenna =
+ (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK) >>
+ RX_RES_PHY_FLAGS_ANTENNA_POS;
+
+ /* set the preamble flag if appropriate */
+ if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
+ rx_status.flag |= RX_FLAG_SHORTPRE;
+
+ /* Set up the HT phy flags */
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ rx_status.flag |= RX_FLAG_HT;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ rx_status.flag |= RX_FLAG_40MHZ;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ rx_status.flag |= RX_FLAG_SHORT_GI;
+
+ il4965_pass_packet_to_mac80211(il, header, len, ampdu_status, rxb,
+ &rx_status);
+}
+
+/* Cache phy data (Rx signal strength, etc) for HT frame (N_RX_PHY).
+ * This will be used later in il_hdl_rx() for N_RX_MPDU. */
+void
+il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ il->_4965.last_phy_res_valid = true;
+ memcpy(&il->_4965.last_phy_res, pkt->u.raw,
+ sizeof(struct il_rx_phy_res));
+}
+
+static int
+il4965_get_channels_for_scan(struct il_priv *il, struct ieee80211_vif *vif,
+ enum ieee80211_band band, u8 is_active,
+ u8 n_probes, struct il_scan_channel *scan_ch)
+{
+ struct ieee80211_channel *chan;
+ const struct ieee80211_supported_band *sband;
+ const struct il_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int added, i;
+ u16 channel;
+
+ sband = il_get_hw_mode(il, band);
+ if (!sband)
+ return 0;
+
+ active_dwell = il_get_active_dwell_time(il, band, n_probes);
+ passive_dwell = il_get_passive_dwell_time(il, band, vif);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ for (i = 0, added = 0; i < il->scan_request->n_channels; i++) {
+ chan = il->scan_request->channels[i];
+
+ if (chan->band != band)
+ continue;
+
+ channel = chan->hw_value;
+ scan_ch->channel = cpu_to_le16(channel);
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info)) {
+ D_SCAN("Channel %d is INVALID for this band.\n",
+ channel);
+ continue;
+ }
+
+ if (!is_active || il_is_channel_passive(ch_info) ||
+ (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ else
+ scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
+
+ if (n_probes)
+ scan_ch->type |= IL_SCAN_PROBE_MASK(n_probes);
+
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+
+ /* NOTE: if we were doing 6Mb OFDM for scans we'd use
+ * power level:
+ * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
+ */
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+
+ D_SCAN("Scanning ch=%d prob=0x%X [%s %d]\n", channel,
+ le32_to_cpu(scan_ch->type),
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? "ACTIVE" : "PASSIVE",
+ (scan_ch->
+ type & SCAN_CHANNEL_TYPE_ACTIVE) ? active_dwell :
+ passive_dwell);
+
+ scan_ch++;
+ added++;
+ }
+
+ D_SCAN("total channels to scan %d\n", added);
+ return added;
+}
+
+static void
+il4965_toggle_tx_ant(struct il_priv *il, u8 *ant, u8 valid)
+{
+ int i;
+ u8 ind = *ant;
+
+ for (i = 0; i < RATE_ANT_NUM - 1; i++) {
+ ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
+ if (valid & BIT(ind)) {
+ *ant = ind;
+ return;
+ }
+ }
+}
+
+int
+il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_host_cmd cmd = {
+ .id = C_SCAN,
+ .len = sizeof(struct il_scan_cmd),
+ .flags = CMD_SIZE_HUGE,
+ };
+ struct il_scan_cmd *scan;
+ struct il_rxon_context *ctx = &il->ctx;
+ u32 rate_flags = 0;
+ u16 cmd_len;
+ u16 rx_chain = 0;
+ enum ieee80211_band band;
+ u8 n_probes = 0;
+ u8 rx_ant = il->hw_params.valid_rx_ant;
+ u8 rate;
+ bool is_active = false;
+ int chan_mod;
+ u8 active_chains;
+ u8 scan_tx_antennas = il->hw_params.valid_tx_ant;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx = il_rxon_ctx_from_vif(vif);
+
+ if (!il->scan_cmd) {
+ il->scan_cmd =
+ kmalloc(sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE,
+ GFP_KERNEL);
+ if (!il->scan_cmd) {
+ D_SCAN("fail to allocate memory for scan\n");
+ return -ENOMEM;
+ }
+ }
+ scan = il->scan_cmd;
+ memset(scan, 0, sizeof(struct il_scan_cmd) + IL_MAX_SCAN_SIZE);
+
+ scan->quiet_plcp_th = IL_PLCP_QUIET_THRESH;
+ scan->quiet_time = IL_ACTIVE_QUIET_TIME;
+
+ if (il_is_any_associated(il)) {
+ u16 interval;
+ u32 extra;
+ u32 suspend_time = 100;
+ u32 scan_suspend_time = 100;
+
+ D_INFO("Scanning while associated...\n");
+ interval = vif->bss_conf.beacon_int;
+
+ scan->suspend_time = 0;
+ scan->max_out_time = cpu_to_le32(200 * 1024);
+ if (!interval)
+ interval = suspend_time;
+
+ extra = (suspend_time / interval) << 22;
+ scan_suspend_time =
+ (extra | ((suspend_time % interval) * 1024));
+ scan->suspend_time = cpu_to_le32(scan_suspend_time);
+ D_SCAN("suspend_time 0x%X beacon interval %d\n",
+ scan_suspend_time, interval);
+ }
+
+ if (il->scan_request->n_ssids) {
+ int i, p = 0;
+ D_SCAN("Kicking off active scan\n");
+ for (i = 0; i < il->scan_request->n_ssids; i++) {
+ /* always does wildcard anyway */
+ if (!il->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ il->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ il->scan_request->ssids[i].ssid,
+ il->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
+ is_active = true;
+ } else
+ D_SCAN("Start passive scan.\n");
+
+ scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
+ scan->tx_cmd.sta_id = ctx->bcast_sta_id;
+ scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+
+ switch (il->scan_band) {
+ case IEEE80211_BAND_2GHZ:
+ scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
+ chan_mod =
+ le32_to_cpu(il->ctx.active.
+ flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ if (chan_mod == CHANNEL_MODE_PURE_40) {
+ rate = RATE_6M_PLCP;
+ } else {
+ rate = RATE_1M_PLCP;
+ rate_flags = RATE_MCS_CCK_MSK;
+ }
+ break;
+ case IEEE80211_BAND_5GHZ:
+ rate = RATE_6M_PLCP;
+ break;
+ default:
+ IL_WARN("Invalid scan band\n");
+ return -EIO;
+ }
+
+ /*
+ * If active scanning is requested but a certain channel is
+ * marked passive, we can do active scanning if we detect
+ * transmissions.
+ *
+ * There is an issue with some firmware versions that triggers
+ * a sysassert on a "good CRC threshold" of zero (== disabled),
+ * on a radar channel even though this means that we should NOT
+ * send probes.
+ *
+ * The "good CRC threshold" is the number of frames that we
+ * need to receive during our dwell time on a channel before
+ * sending out probes -- setting this to a huge value will
+ * mean we never reach it, but at the same time work around
+ * the aforementioned issue. Thus use IL_GOOD_CRC_TH_NEVER
+ * here instead of IL_GOOD_CRC_TH_DISABLED.
+ */
+ scan->good_CRC_th =
+ is_active ? IL_GOOD_CRC_TH_DEFAULT : IL_GOOD_CRC_TH_NEVER;
+
+ band = il->scan_band;
+
+ if (il->cfg->scan_rx_antennas[band])
+ rx_ant = il->cfg->scan_rx_antennas[band];
+
+ il4965_toggle_tx_ant(il, &il->scan_tx_ant[band], scan_tx_antennas);
+ rate_flags |= BIT(il->scan_tx_ant[band]) << RATE_MCS_ANT_POS;
+ scan->tx_cmd.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+ /* In power save mode use one chain, otherwise use all chains */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* rx_ant has been set to all valid chains previously */
+ active_chains =
+ rx_ant & ((u8) (il->chain_noise_data.active_chains));
+ if (!active_chains)
+ active_chains = rx_ant;
+
+ D_SCAN("chain_noise_data.active_chains: %u\n",
+ il->chain_noise_data.active_chains);
+
+ rx_ant = il4965_first_antenna(active_chains);
+ }
+
+ /* MIMO is not used here, but value is required */
+ rx_chain |= il->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
+ rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
+ rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
+ scan->rx_chain = cpu_to_le16(rx_chain);
+
+ cmd_len =
+ il_fill_probe_req(il, (struct ieee80211_mgmt *)scan->data,
+ vif->addr, il->scan_request->ie,
+ il->scan_request->ie_len,
+ IL_MAX_SCAN_SIZE - sizeof(*scan));
+ scan->tx_cmd.len = cpu_to_le16(cmd_len);
+
+ scan->filter_flags |=
+ (RXON_FILTER_ACCEPT_GRP_MSK | RXON_FILTER_BCON_AWARE_MSK);
+
+ scan->channel_count =
+ il4965_get_channels_for_scan(il, vif, band, is_active, n_probes,
+ (void *)&scan->data[cmd_len]);
+ if (scan->channel_count == 0) {
+ D_SCAN("channel count %d\n", scan->channel_count);
+ return -EIO;
+ }
+
+ cmd.len +=
+ le16_to_cpu(scan->tx_cmd.len) +
+ scan->channel_count * sizeof(struct il_scan_channel);
+ cmd.data = scan;
+ scan->len = cpu_to_le16(cmd.len);
+
+ set_bit(S_SCAN_HW, &il->status);
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ clear_bit(S_SCAN_HW, &il->status);
+
+ return ret;
+}
+
+int
+il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ if (add)
+ return il4965_add_bssid_station(il, vif_priv->ctx,
+ vif->bss_conf.bssid,
+ &vif_priv->ibss_bssid_sta_id);
+ return il_remove_station(il, vif_priv->ibss_bssid_sta_id,
+ vif->bss_conf.bssid);
+}
+
+void
+il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid, int freed)
+{
+ lockdep_assert_held(&il->sta_lock);
+
+ if (il->stations[sta_id].tid[tid].tfds_in_queue >= freed)
+ il->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+ else {
+ D_TX("free more than tfds_in_queue (%u:%d)\n",
+ il->stations[sta_id].tid[tid].tfds_in_queue, freed);
+ il->stations[sta_id].tid[tid].tfds_in_queue = 0;
+ }
+}
+
+#define IL_TX_QUEUE_MSK 0xfffff
+
+static bool
+il4965_is_single_rx_stream(struct il_priv *il)
+{
+ return il->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
+ il->current_ht_config.single_chain_sufficient;
+}
+
+#define IL_NUM_RX_CHAINS_MULTIPLE 3
+#define IL_NUM_RX_CHAINS_SINGLE 2
+#define IL_NUM_IDLE_CHAINS_DUAL 2
+#define IL_NUM_IDLE_CHAINS_SINGLE 1
+
+/*
+ * Determine how many receiver/antenna chains to use.
+ *
+ * More provides better reception via diversity. Fewer saves power
+ * at the expense of throughput, but only when not in powersave to
+ * start with.
+ *
+ * MIMO (dual stream) requires at least 2, but works better with 3.
+ * This does not determine *which* chains to use, just how many.
+ */
+static int
+il4965_get_active_rx_chain_count(struct il_priv *il)
+{
+ /* # of Rx chains to use when expecting MIMO. */
+ if (il4965_is_single_rx_stream(il))
+ return IL_NUM_RX_CHAINS_SINGLE;
+ else
+ return IL_NUM_RX_CHAINS_MULTIPLE;
+}
+
+/*
+ * When we are in power saving mode, unless device support spatial
+ * multiplexing power save, use the active count for rx chain count.
+ */
+static int
+il4965_get_idle_rx_chain_count(struct il_priv *il, int active_cnt)
+{
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (il->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
+ default:
+ WARN(1, "invalid SMPS mode %d", il->current_ht_config.smps);
+ return active_cnt;
+ }
+}
+
+/* up to 4 chains */
+static u8
+il4965_count_chain_bitmap(u32 chain_bitmap)
+{
+ u8 res;
+ res = (chain_bitmap & BIT(0)) >> 0;
+ res += (chain_bitmap & BIT(1)) >> 1;
+ res += (chain_bitmap & BIT(2)) >> 2;
+ res += (chain_bitmap & BIT(3)) >> 3;
+ return res;
+}
+
+/**
+ * il4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
+ *
+ * Selects how many and which Rx receivers/antennas/chains to use.
+ * This should not be used for scan command ... it puts data in wrong place.
+ */
+void
+il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ bool is_single = il4965_is_single_rx_stream(il);
+ bool is_cam = !test_bit(S_POWER_PMI, &il->status);
+ u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
+ u32 active_chains;
+ u16 rx_chain;
+
+ /* Tell uCode which antennas are actually connected.
+ * Before first association, we assume all antennas are connected.
+ * Just after first association, il4965_chain_noise_calibration()
+ * checks which antennas actually *are* connected. */
+ if (il->chain_noise_data.active_chains)
+ active_chains = il->chain_noise_data.active_chains;
+ else
+ active_chains = il->hw_params.valid_rx_ant;
+
+ rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
+
+ /* How many receivers should we use? */
+ active_rx_cnt = il4965_get_active_rx_chain_count(il);
+ idle_rx_cnt = il4965_get_idle_rx_chain_count(il, active_rx_cnt);
+
+ /* correct rx chain count according hw settings
+ * and chain noise calibration
+ */
+ valid_rx_cnt = il4965_count_chain_bitmap(active_chains);
+ if (valid_rx_cnt < active_rx_cnt)
+ active_rx_cnt = valid_rx_cnt;
+
+ if (valid_rx_cnt < idle_rx_cnt)
+ idle_rx_cnt = valid_rx_cnt;
+
+ rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
+ rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
+
+ ctx->staging.rx_chain = cpu_to_le16(rx_chain);
+
+ if (!is_single && active_rx_cnt >= IL_NUM_RX_CHAINS_SINGLE && is_cam)
+ ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
+ else
+ ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
+
+ D_ASSOC("rx_chain=0x%X active=%d idle=%d\n", ctx->staging.rx_chain,
+ active_rx_cnt, idle_rx_cnt);
+
+ WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
+ active_rx_cnt < idle_rx_cnt);
+}
+
+static const char *
+il4965_get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(FH49_RSCSR_CHNL0_STTS_WPTR_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IL_CMD(FH49_RSCSR_CHNL0_WPTR);
+ IL_CMD(FH49_MEM_RCSR_CHNL0_CONFIG_REG);
+ IL_CMD(FH49_MEM_RSSR_SHARED_CTRL_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_STATUS_REG);
+ IL_CMD(FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IL_CMD(FH49_TSSR_TX_STATUS_REG);
+ IL_CMD(FH49_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+int
+il4965_dump_fh(struct il_priv *il, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ static const u32 fh_tbl[] = {
+ FH49_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH49_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH49_RSCSR_CHNL0_WPTR,
+ FH49_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH49_MEM_RSSR_SHARED_CTRL_REG,
+ FH49_MEM_RSSR_RX_STATUS_REG,
+ FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos +=
+ scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IL_ERR("FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IL_ERR(" %34s: 0X%08x\n", il4965_get_fh_string(fh_tbl[i]),
+ il_rd(il, fh_tbl[i]));
+ }
+ return 0;
+}
+
+void
+il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_missed_beacon_notif *missed_beacon;
+
+ missed_beacon = &pkt->u.missed_beacon;
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ il->missed_beacon_threshold) {
+ D_CALIB("missed bcn cnsq %d totl %d rcd %d expctd %d\n",
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
+ le32_to_cpu(missed_beacon->total_missed_becons),
+ le32_to_cpu(missed_beacon->num_recvd_beacons),
+ le32_to_cpu(missed_beacon->num_expected_beacons));
+ if (!test_bit(S_SCANNING, &il->status))
+ il4965_init_sensitivity(il);
+ }
+}
+
+/* Calculate noise level, based on measurements during network silence just
+ * before arriving beacon. This measurement can be done only if we know
+ * exactly when to expect beacons, therefore only when we're associated. */
+static void
+il4965_rx_calc_noise(struct il_priv *il)
+{
+ struct stats_rx_non_phy *rx_info;
+ int num_active_rx = 0;
+ int total_silence = 0;
+ int bcn_silence_a, bcn_silence_b, bcn_silence_c;
+ int last_rx_noise;
+
+ rx_info = &(il->_4965.stats.rx.general);
+ bcn_silence_a =
+ le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
+ bcn_silence_b =
+ le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
+ bcn_silence_c =
+ le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
+
+ if (bcn_silence_a) {
+ total_silence += bcn_silence_a;
+ num_active_rx++;
+ }
+ if (bcn_silence_b) {
+ total_silence += bcn_silence_b;
+ num_active_rx++;
+ }
+ if (bcn_silence_c) {
+ total_silence += bcn_silence_c;
+ num_active_rx++;
+ }
+
+ /* Average among active antennas */
+ if (num_active_rx)
+ last_rx_noise = (total_silence / num_active_rx) - 107;
+ else
+ last_rx_noise = IL_NOISE_MEAS_NOT_AVAILABLE;
+
+ D_CALIB("inband silence a %u, b %u, c %u, dBm %d\n", bcn_silence_a,
+ bcn_silence_b, bcn_silence_c, last_rx_noise);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+/*
+ * based on the assumption of all stats counter are in DWORD
+ * FIXME: This function is for debugging, do not deal with
+ * the case of counters roll-over.
+ */
+static void
+il4965_accumulative_stats(struct il_priv *il, __le32 * stats)
+{
+ int i, size;
+ __le32 *prev_stats;
+ u32 *accum_stats;
+ u32 *delta, *max_delta;
+ struct stats_general_common *general, *accum_general;
+ struct stats_tx *tx, *accum_tx;
+
+ prev_stats = (__le32 *) &il->_4965.stats;
+ accum_stats = (u32 *) &il->_4965.accum_stats;
+ size = sizeof(struct il_notif_stats);
+ general = &il->_4965.stats.general.common;
+ accum_general = &il->_4965.accum_stats.general.common;
+ tx = &il->_4965.stats.tx;
+ accum_tx = &il->_4965.accum_stats.tx;
+ delta = (u32 *) &il->_4965.delta_stats;
+ max_delta = (u32 *) &il->_4965.max_delta;
+
+ for (i = sizeof(__le32); i < size;
+ i +=
+ sizeof(__le32), stats++, prev_stats++, delta++, max_delta++,
+ accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta =
+ (le32_to_cpu(*stats) - le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
+
+ /* reset accumulative stats for "no-counter" type stats */
+ accum_general->temperature = general->temperature;
+ accum_general->ttl_timestamp = general->ttl_timestamp;
+}
+#endif
+
+#define REG_RECALIB_PERIOD (60)
+
+void
+il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ int change;
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ D_RX("Statistics notification received (%d vs %d).\n",
+ (int)sizeof(struct il_notif_stats),
+ le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK);
+
+ change =
+ ((il->_4965.stats.general.common.temperature !=
+ pkt->u.stats.general.common.temperature) ||
+ ((il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK) !=
+ (pkt->u.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)));
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ il4965_accumulative_stats(il, (__le32 *) &pkt->u.stats);
+#endif
+
+ /* TODO: reading some of stats is unneeded */
+ memcpy(&il->_4965.stats, &pkt->u.stats, sizeof(il->_4965.stats));
+
+ set_bit(S_STATS, &il->status);
+
+ /* Reschedule the stats timer to occur in
+ * REG_RECALIB_PERIOD seconds to ensure we get a
+ * thermal update even if the uCode doesn't give
+ * us one */
+ mod_timer(&il->stats_periodic,
+ jiffies + msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
+
+ if (unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ (pkt->hdr.cmd == N_STATS)) {
+ il4965_rx_calc_noise(il);
+ queue_work(il->workqueue, &il->run_time_calib_work);
+ }
+ if (il->cfg->ops->lib->temp_ops.temperature && change)
+ il->cfg->ops->lib->temp_ops.temperature(il);
+}
+
+void
+il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATS_CLEAR_MSK) {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ memset(&il->_4965.accum_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.delta_stats, 0,
+ sizeof(struct il_notif_stats));
+ memset(&il->_4965.max_delta, 0, sizeof(struct il_notif_stats));
+#endif
+ D_RX("Statistics have been cleared\n");
+ }
+ il4965_hdl_stats(il, rxb);
+}
+
+
+/*
+ * mac80211 queues, ACs, hardware queues, FIFOs.
+ *
+ * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
+ *
+ * Mac80211 uses the following numbers, which we get as from it
+ * by way of skb_get_queue_mapping(skb):
+ *
+ * VO 0
+ * VI 1
+ * BE 2
+ * BK 3
+ *
+ *
+ * Regular (not A-MPDU) frames are put into hardware queues corresponding
+ * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
+ * own queue per aggregation session (RA/TID combination), such queues are
+ * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
+ * order to map frames to the right queue, we also need an AC->hw queue
+ * mapping. This is implemented here.
+ *
+ * Due to the way hw queues are set up (by the hw specific modules like
+ * 4965.c), the AC->hw queue mapping is the identity
+ * mapping.
+ */
+
+static const u8 tid_to_ac[] = {
+ IEEE80211_AC_BE,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BK,
+ IEEE80211_AC_BE,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VI,
+ IEEE80211_AC_VO,
+ IEEE80211_AC_VO
+};
+
+static inline int
+il4965_get_ac_from_tid(u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return tid_to_ac[tid];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+static inline int
+il4965_get_fifo_from_tid(struct il_rxon_context *ctx, u16 tid)
+{
+ if (likely(tid < ARRAY_SIZE(tid_to_ac)))
+ return ctx->ac_to_fifo[tid_to_ac[tid]];
+
+ /* no support for TIDs 8-15 yet */
+ return -EINVAL;
+}
+
+/*
+ * handle build C_TX command notification.
+ */
+static void
+il4965_tx_cmd_build_basic(struct il_priv *il, struct sk_buff *skb,
+ struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info,
+ struct ieee80211_hdr *hdr, u8 std_id)
+{
+ __le16 fc = hdr->frame_control;
+ __le32 tx_flags = tx_cmd->tx_flags;
+
+ tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
+ tx_flags |= TX_CMD_FLG_ACK_MSK;
+ if (ieee80211_is_mgmt(fc))
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ if (ieee80211_is_probe_resp(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
+ tx_flags |= TX_CMD_FLG_TSF_MSK;
+ } else {
+ tx_flags &= (~TX_CMD_FLG_ACK_MSK);
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ if (ieee80211_is_back_req(fc))
+ tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
+
+ tx_cmd->sta_id = std_id;
+ if (ieee80211_has_morefrags(fc))
+ tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
+
+ if (ieee80211_is_data_qos(fc)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tx_cmd->tid_tspec = qc[0] & 0xf;
+ tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
+ } else {
+ tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
+ }
+
+ il_tx_cmd_protection(il, info, fc, &tx_flags);
+
+ tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
+ else
+ tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
+ } else {
+ tx_cmd->timeout.pm_frame_timeout = 0;
+ }
+
+ tx_cmd->driver_txop = 0;
+ tx_cmd->tx_flags = tx_flags;
+ tx_cmd->next_frame_len = 0;
+}
+
+static void
+il4965_tx_cmd_build_rate(struct il_priv *il, struct il_tx_cmd *tx_cmd,
+ struct ieee80211_tx_info *info, __le16 fc)
+{
+ const u8 rts_retry_limit = 60;
+ u32 rate_flags;
+ int rate_idx;
+ u8 data_retry_limit;
+ u8 rate_plcp;
+
+ /* Set retry limit on DATA packets and Probe Responses */
+ if (ieee80211_is_probe_resp(fc))
+ data_retry_limit = 3;
+ else
+ data_retry_limit = IL4965_DEFAULT_TX_RETRY;
+ tx_cmd->data_retry_limit = data_retry_limit;
+ /* Set retry limit on RTS packets */
+ tx_cmd->rts_retry_limit = min(data_retry_limit, rts_retry_limit);
+
+ /* DATA packets will use the uCode station table for rate/antenna
+ * selection */
+ if (ieee80211_is_data(fc)) {
+ tx_cmd->initial_rate_idx = 0;
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
+ return;
+ }
+
+ /**
+ * If the current TX rate stored in mac80211 has the MCS bit set, it's
+ * not really a TX rate. Thus, we use the lowest supported rate for
+ * this band. Also use the lowest supported rate if the stored rate
+ * idx is invalid.
+ */
+ rate_idx = info->control.rates[0].idx;
+ if ((info->control.rates[0].flags & IEEE80211_TX_RC_MCS) || rate_idx < 0
+ || rate_idx > RATE_COUNT_LEGACY)
+ rate_idx =
+ rate_lowest_index(&il->bands[info->band],
+ info->control.sta);
+ /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
+ if (info->band == IEEE80211_BAND_5GHZ)
+ rate_idx += IL_FIRST_OFDM_RATE;
+ /* Get PLCP rate for tx_cmd->rate_n_flags */
+ rate_plcp = il_rates[rate_idx].plcp;
+ /* Zero out flags for this packet */
+ rate_flags = 0;
+
+ /* Set CCK flag as needed */
+ if (rate_idx >= IL_FIRST_CCK_RATE && rate_idx <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ /* Set up antennas */
+ il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+ rate_flags |= BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+
+ /* Set the rate in the TX cmd */
+ tx_cmd->rate_n_flags = cpu_to_le32(rate_plcp | rate_flags);
+}
+
+static void
+il4965_tx_cmd_build_hwcrypto(struct il_priv *il, struct ieee80211_tx_info *info,
+ struct il_tx_cmd *tx_cmd, struct sk_buff *skb_frag,
+ int sta_id)
+{
+ struct ieee80211_key_conf *keyconf = info->control.hw_key;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
+ memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
+ D_TX("tx_cmd with AES hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_TKIP:
+ tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
+ ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
+ D_TX("tx_cmd with tkip hwcrypto\n");
+ break;
+
+ case WLAN_CIPHER_SUITE_WEP104:
+ tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_WEP40:
+ tx_cmd->sec_ctl |=
+ (TX_CMD_SEC_WEP | (keyconf->keyidx & TX_CMD_SEC_MSK) <<
+ TX_CMD_SEC_SHIFT);
+
+ memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
+
+ D_TX("Configuring packet for WEP encryption " "with key %d\n",
+ keyconf->keyidx);
+ break;
+
+ default:
+ IL_ERR("Unknown encode cipher %x\n", keyconf->cipher);
+ break;
+ }
+}
+
+/*
+ * start C_TX command process
+ */
+int
+il4965_tx_skb(struct il_priv *il, struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = info->control.sta;
+ struct il_station_priv *sta_priv = NULL;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ struct il_tx_cmd *tx_cmd;
+ struct il_rxon_context *ctx = &il->ctx;
+ int txq_id;
+ dma_addr_t phys_addr;
+ dma_addr_t txcmd_phys;
+ dma_addr_t scratch_phys;
+ u16 len, firstlen, secondlen;
+ u16 seq_number = 0;
+ __le16 fc;
+ u8 hdr_len;
+ u8 sta_id;
+ u8 wait_write_ptr = 0;
+ u8 tid = 0;
+ u8 *qc = NULL;
+ unsigned long flags;
+ bool is_agg = false;
+
+ if (info->control.vif)
+ ctx = il_rxon_ctx_from_vif(info->control.vif);
+
+ spin_lock_irqsave(&il->lock, flags);
+ if (il_is_rfkill(il)) {
+ D_DROP("Dropping - RF KILL\n");
+ goto drop_unlock;
+ }
+
+ fc = hdr->frame_control;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (ieee80211_is_auth(fc))
+ D_TX("Sending AUTH frame\n");
+ else if (ieee80211_is_assoc_req(fc))
+ D_TX("Sending ASSOC frame\n");
+ else if (ieee80211_is_reassoc_req(fc))
+ D_TX("Sending REASSOC frame\n");
+#endif
+
+ hdr_len = ieee80211_hdrlen(fc);
+
+ /* For management frames use broadcast id to do not break aggregation */
+ if (!ieee80211_is_data(fc))
+ sta_id = ctx->bcast_sta_id;
+ else {
+ /* Find idx into station table for destination station */
+ sta_id = il_sta_id_or_broadcast(il, ctx, info->control.sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ D_DROP("Dropping - INVALID STATION: %pM\n", hdr->addr1);
+ goto drop_unlock;
+ }
+ }
+
+ D_TX("station Id %d\n", sta_id);
+
+ if (sta)
+ sta_priv = (void *)sta->drv_priv;
+
+ if (sta_priv && sta_priv->asleep &&
+ (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
+ /*
+ * This sends an asynchronous command to the device,
+ * but we can rely on it being processed before the
+ * next frame is processed -- and the next frame to
+ * this station is the one that will consume this
+ * counter.
+ * For now set the counter to just 1 since we do not
+ * support uAPSD yet.
+ */
+ il4965_sta_modify_sleep_tx_count(il, sta_id, 1);
+ }
+
+ /*
+ * Send this frame after DTIM -- there's a special queue
+ * reserved for this for contexts that support AP mode.
+ */
+ if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
+ txq_id = ctx->mcast_queue;
+ /*
+ * The microcode will clear the more data
+ * bit in the last frame it transmits.
+ */
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
+ } else
+ txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
+
+ /* irqs already disabled/saved above when locking il->lock */
+ spin_lock(&il->sta_lock);
+
+ if (ieee80211_is_data_qos(fc)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+ seq_number = il->stations[sta_id].tid[tid].seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl =
+ hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ il->stations[sta_id].tid[tid].agg.state == IL_AGG_ON) {
+ txq_id = il->stations[sta_id].tid[tid].agg.txq_id;
+ is_agg = true;
+ }
+ }
+
+ txq = &il->txq[txq_id];
+ q = &txq->q;
+
+ if (unlikely(il_queue_space(q) < q->high_mark)) {
+ spin_unlock(&il->sta_lock);
+ goto drop_unlock;
+ }
+
+ if (ieee80211_is_data_qos(fc)) {
+ il->stations[sta_id].tid[tid].tfds_in_queue++;
+ if (!ieee80211_has_morefrags(fc))
+ il->stations[sta_id].tid[tid].seq_number = seq_number;
+ }
+
+ spin_unlock(&il->sta_lock);
+
+ /* Set up driver data for this TFD */
+ memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct il_tx_info));
+ txq->txb[q->write_ptr].skb = skb;
+ txq->txb[q->write_ptr].ctx = ctx;
+
+ /* Set up first empty entry in queue's array of Tx/cmd buffers */
+ out_cmd = txq->cmd[q->write_ptr];
+ out_meta = &txq->meta[q->write_ptr];
+ tx_cmd = &out_cmd->cmd.tx;
+ memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
+ memset(tx_cmd, 0, sizeof(struct il_tx_cmd));
+
+ /*
+ * Set up the Tx-command (not MAC!) header.
+ * Store the chosen Tx queue and TFD idx within the sequence field;
+ * after Tx, uCode's Tx response will return this value so driver can
+ * locate the frame within the tx queue and do post-tx processing.
+ */
+ out_cmd->hdr.cmd = C_TX;
+ out_cmd->hdr.sequence =
+ cpu_to_le16((u16)
+ (QUEUE_TO_SEQ(txq_id) | IDX_TO_SEQ(q->write_ptr)));
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ /* Total # bytes to be transmitted */
+ len = (u16) skb->len;
+ tx_cmd->len = cpu_to_le16(len);
+
+ if (info->control.hw_key)
+ il4965_tx_cmd_build_hwcrypto(il, info, tx_cmd, skb, sta_id);
+
+ /* TODO need this for burst mode later on */
+ il4965_tx_cmd_build_basic(il, skb, tx_cmd, info, hdr, sta_id);
+ il_dbg_log_tx_data_frame(il, len, hdr);
+
+ il4965_tx_cmd_build_rate(il, tx_cmd, info, fc);
+
+ il_update_stats(il, true, fc, len);
+ /*
+ * Use the first empty entry in this queue's command buffer array
+ * to contain the Tx command and MAC header concatenated together
+ * (payload data will be in another buffer).
+ * Size of this varies, due to varying MAC header length.
+ * If end is not dword aligned, we'll have 2 extra bytes at the end
+ * of the MAC header (device reads on dword boundaries).
+ * We'll tell device about this padding later.
+ */
+ len = sizeof(struct il_tx_cmd) + sizeof(struct il_cmd_header) + hdr_len;
+ firstlen = (len + 3) & ~3;
+
+ /* Tell NIC about any 2-byte padding after MAC header */
+ if (firstlen != len)
+ tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
+
+ /* Physical address of this Tx command's header (not MAC header!),
+ * within command buffer array. */
+ txcmd_phys =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
+ dma_unmap_len_set(out_meta, len, firstlen);
+ /* Add buffer containing Tx command and MAC(!) header to TFD's
+ * first entry */
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, txcmd_phys, firstlen,
+ 1, 0);
+
+ if (!ieee80211_has_morefrags(hdr->frame_control)) {
+ txq->need_update = 1;
+ } else {
+ wait_write_ptr = 1;
+ txq->need_update = 0;
+ }
+
+ /* Set up TFD's 2nd entry to point directly to remainder of skb,
+ * if any (802.11 null frames have no payload). */
+ secondlen = skb->len - hdr_len;
+ if (secondlen > 0) {
+ phys_addr =
+ pci_map_single(il->pci_dev, skb->data + hdr_len, secondlen,
+ PCI_DMA_TODEVICE);
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr,
+ secondlen, 0, 0);
+ }
+
+ scratch_phys =
+ txcmd_phys + sizeof(struct il_cmd_header) +
+ offsetof(struct il_tx_cmd, scratch);
+
+ /* take back ownership of DMA buffer to enable update */
+ pci_dma_sync_single_for_cpu(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+ tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
+ tx_cmd->dram_msb_ptr = il_get_dma_hi_addr(scratch_phys);
+
+ D_TX("sequence nr = 0X%x\n", le16_to_cpu(out_cmd->hdr.sequence));
+ D_TX("tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd, sizeof(*tx_cmd));
+ il_print_hex_dump(il, IL_DL_TX, (u8 *) tx_cmd->hdr, hdr_len);
+
+ /* Set up entry for this TFD in Tx byte-count array */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq,
+ le16_to_cpu(tx_cmd->
+ len));
+
+ pci_dma_sync_single_for_device(il->pci_dev, txcmd_phys, firstlen,
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Tell device the write idx *just past* this latest filled TFD */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /*
+ * At this point the frame is "transmitted" successfully
+ * and we will get a TX status notification eventually,
+ * regardless of the value of ret. "ret" only indicates
+ * whether or not we should update the write pointer.
+ */
+
+ /*
+ * Avoid atomic ops if it isn't an associated client.
+ * Also, if this is a packet for aggregation, don't
+ * increase the counter because the ucode will stop
+ * aggregation queues when their respective station
+ * goes to sleep.
+ */
+ if (sta_priv && sta_priv->client && !is_agg)
+ atomic_inc(&sta_priv->pending_frames);
+
+ if (il_queue_space(q) < q->high_mark && il->mac80211_registered) {
+ if (wait_write_ptr) {
+ spin_lock_irqsave(&il->lock, flags);
+ txq->need_update = 1;
+ il_txq_update_write_ptr(il, txq);
+ spin_unlock_irqrestore(&il->lock, flags);
+ } else {
+ il_stop_queue(il, txq);
+ }
+ }
+
+ return 0;
+
+drop_unlock:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return -1;
+}
+
+static inline int
+il4965_alloc_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr, size_t size)
+{
+ ptr->addr =
+ dma_alloc_coherent(&il->pci_dev->dev, size, &ptr->dma, GFP_KERNEL);
+ if (!ptr->addr)
+ return -ENOMEM;
+ ptr->size = size;
+ return 0;
+}
+
+static inline void
+il4965_free_dma_ptr(struct il_priv *il, struct il_dma_ptr *ptr)
+{
+ if (unlikely(!ptr->addr))
+ return;
+
+ dma_free_coherent(&il->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
+ memset(ptr, 0, sizeof(*ptr));
+}
+
+/**
+ * il4965_hw_txq_ctx_free - Free TXQ Context
+ *
+ * Destroy all TX DMA queues and structures
+ */
+void
+il4965_hw_txq_ctx_free(struct il_priv *il)
+{
+ int txq_id;
+
+ /* Tx queues */
+ if (il->txq) {
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_free(il);
+ else
+ il_tx_queue_free(il, txq_id);
+ }
+ il4965_free_dma_ptr(il, &il->kw);
+
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+
+ /* free tx queue structure */
+ il_txq_mem(il);
+}
+
+/**
+ * il4965_txq_ctx_alloc - allocate TX queue context
+ * Allocate all Tx DMA structures and initialize them
+ *
+ * @param il
+ * @return error code
+ */
+int
+il4965_txq_ctx_alloc(struct il_priv *il)
+{
+ int ret;
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ /* Free all tx/cmd queues and keep-warm buffer */
+ il4965_hw_txq_ctx_free(il);
+
+ ret =
+ il4965_alloc_dma_ptr(il, &il->scd_bc_tbls,
+ il->hw_params.scd_bc_tbls_size);
+ if (ret) {
+ IL_ERR("Scheduler BC Table allocation failed\n");
+ goto error_bc_tbls;
+ }
+ /* Alloc keep-warm buffer */
+ ret = il4965_alloc_dma_ptr(il, &il->kw, IL_KW_SIZE);
+ if (ret) {
+ IL_ERR("Keep Warm allocation failed\n");
+ goto error_kw;
+ }
+
+ /* allocate tx queue structure */
+ ret = il_alloc_txq_mem(il);
+ if (ret)
+ goto error;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4/#9) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ (txq_id ==
+ il->cmd_queue) ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ ret = il_tx_queue_init(il, &il->txq[txq_id], slots_num, txq_id);
+ if (ret) {
+ IL_ERR("Tx %d queue init failed\n", txq_id);
+ goto error;
+ }
+ }
+
+ return ret;
+
+error:
+ il4965_hw_txq_ctx_free(il);
+ il4965_free_dma_ptr(il, &il->kw);
+error_kw:
+ il4965_free_dma_ptr(il, &il->scd_bc_tbls);
+error_bc_tbls:
+ return ret;
+}
+
+void
+il4965_txq_ctx_reset(struct il_priv *il)
+{
+ int txq_id, slots_num;
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Turn off all Tx DMA fifos */
+ il4965_txq_set_sched(il, 0);
+
+ /* Tell NIC where to find the "keep warm" buffer */
+ il_wr(il, FH49_KW_MEM_ADDR_REG, il->kw.dma >> 4);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Alloc and init all Tx queues, including the command queue (#4) */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++) {
+ slots_num =
+ txq_id == il->cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
+ il_tx_queue_reset(il, &il->txq[txq_id], slots_num, txq_id);
+ }
+}
+
+/**
+ * il4965_txq_ctx_stop - Stop all Tx DMA channels
+ */
+void
+il4965_txq_ctx_stop(struct il_priv *il)
+{
+ int ch, txq_id;
+ unsigned long flags;
+
+ /* Turn off all Tx DMA fifos */
+ spin_lock_irqsave(&il->lock, flags);
+
+ il4965_txq_set_sched(il, 0);
+
+ /* Stop each Tx DMA channel, and wait for it to be idle */
+ for (ch = 0; ch < il->hw_params.dma_chnl_num; ch++) {
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
+ if (il_poll_bit
+ (il, FH49_TSSR_TX_STATUS_REG,
+ FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch), 1000))
+ IL_ERR("Failing on timeout while stopping"
+ " DMA channel %d [0x%08x]", ch,
+ il_rd(il, FH49_TSSR_TX_STATUS_REG));
+ }
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il->txq)
+ return;
+
+ /* Unmap DMA from host system and free skb's */
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (txq_id == il->cmd_queue)
+ il_cmd_queue_unmap(il);
+ else
+ il_tx_queue_unmap(il, txq_id);
+}
+
+/*
+ * Find first available (lowest unused) Tx Queue, mark it "active".
+ * Called only when finding queue for aggregation.
+ * Should never return anything < 7, because they should already
+ * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
+ */
+static int
+il4965_txq_ctx_activate_free(struct il_priv *il)
+{
+ int txq_id;
+
+ for (txq_id = 0; txq_id < il->hw_params.max_txq_num; txq_id++)
+ if (!test_and_set_bit(txq_id, &il->txq_ctx_active_msk))
+ return txq_id;
+ return -1;
+}
+
+/**
+ * il4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
+ */
+static void
+il4965_tx_queue_stop_scheduler(struct il_priv *il, u16 txq_id)
+{
+ /* Simply stop the queue, but don't change any configuration;
+ * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (0 << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (1 << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+}
+
+/**
+ * il4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
+ */
+static int
+il4965_tx_queue_set_q2ratid(struct il_priv *il, u16 ra_tid, u16 txq_id)
+{
+ u32 tbl_dw_addr;
+ u32 tbl_dw;
+ u16 scd_q2ratid;
+
+ scd_q2ratid = ra_tid & IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
+
+ tbl_dw_addr =
+ il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
+
+ tbl_dw = il_read_targ_mem(il, tbl_dw_addr);
+
+ if (txq_id & 0x1)
+ tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
+ else
+ tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
+
+ il_write_targ_mem(il, tbl_dw_addr, tbl_dw);
+
+ return 0;
+}
+
+/**
+ * il4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
+ *
+ * NOTE: txq_id must be greater than IL49_FIRST_AMPDU_QUEUE,
+ * i.e. it must be one of the higher queues used for aggregation
+ */
+static int
+il4965_txq_agg_enable(struct il_priv *il, int txq_id, int tx_fifo, int sta_id,
+ int tid, u16 ssn_idx)
+{
+ unsigned long flags;
+ u16 ra_tid;
+ int ret;
+
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ ra_tid = BUILD_RAxTID(sta_id, tid);
+
+ /* Modify device's station table to Tx this TID */
+ ret = il4965_sta_tx_modify_enable_tid(il, sta_id, tid);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Stop this Tx queue before configuring it */
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ /* Map receiver-address / traffic-ID to this queue */
+ il4965_tx_queue_set_q2ratid(il, ra_tid, txq_id);
+
+ /* Set this queue as a chain-building queue */
+ il_set_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ /* Place first TFD at idx corresponding to start sequence number.
+ * Assumes that ssn_idx is valid (!= 0xFFF) */
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ /* Set up Tx win size and frame limit for this queue */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
+ (SCD_WIN_SIZE << IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS)
+ & IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ il_set_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+
+ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 1);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn)
+{
+ int sta_id;
+ int tx_fifo;
+ int txq_id;
+ int ret;
+ unsigned long flags;
+ struct il_tid_data *tid_data;
+
+ tx_fifo = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo < 0))
+ return tx_fifo;
+
+ D_HT("%s on ra = %pM tid = %d\n", __func__, sta->addr, tid);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Start AGG on invalid station\n");
+ return -ENXIO;
+ }
+ if (unlikely(tid >= MAX_TID_COUNT))
+ return -EINVAL;
+
+ if (il->stations[sta_id].tid[tid].agg.state != IL_AGG_OFF) {
+ IL_ERR("Start AGG when state is not IL_AGG_OFF !\n");
+ return -ENXIO;
+ }
+
+ txq_id = il4965_txq_ctx_activate_free(il);
+ if (txq_id == -1) {
+ IL_ERR("No free aggregation queue available\n");
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ *ssn = SEQ_TO_SN(tid_data->seq_number);
+ tid_data->agg.txq_id = txq_id;
+ il_set_swq_id(&il->txq[txq_id], il4965_get_ac_from_tid(tid), txq_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ ret = il4965_txq_agg_enable(il, txq_id, tx_fifo, sta_id, tid, *ssn);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ tid_data = &il->stations[sta_id].tid[tid];
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue is empty\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ D_HT("HW queue is NOT empty: %d packets in HW queue\n",
+ tid_data->tfds_in_queue);
+ tid_data->agg.state = IL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+/**
+ * txq_id must be greater than IL49_FIRST_AMPDU_QUEUE
+ * il->lock must be held by the caller
+ */
+static int
+il4965_txq_agg_disable(struct il_priv *il, u16 txq_id, u16 ssn_idx, u8 tx_fifo)
+{
+ if ((IL49_FIRST_AMPDU_QUEUE > txq_id) ||
+ (IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
+ IL_WARN("queue number out of range: %d, must be %d to %d\n",
+ txq_id, IL49_FIRST_AMPDU_QUEUE,
+ IL49_FIRST_AMPDU_QUEUE +
+ il->cfg->base_params->num_of_ampdu_queues - 1);
+ return -EINVAL;
+ }
+
+ il4965_tx_queue_stop_scheduler(il, txq_id);
+
+ il_clear_bits_prph(il, IL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
+
+ il->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
+ il->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ il4965_set_wr_ptrs(il, txq_id, ssn_idx);
+
+ il_clear_bits_prph(il, IL49_SCD_INTERRUPT_MASK, (1 << txq_id));
+ il_txq_ctx_deactivate(il, txq_id);
+ il4965_tx_queue_set_status(il, &il->txq[txq_id], tx_fifo, 0);
+
+ return 0;
+}
+
+int
+il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ int tx_fifo_id, txq_id, sta_id, ssn;
+ struct il_tid_data *tid_data;
+ int write_ptr, read_ptr;
+ unsigned long flags;
+
+ tx_fifo_id = il4965_get_fifo_from_tid(il_rxon_ctx_from_vif(vif), tid);
+ if (unlikely(tx_fifo_id < 0))
+ return tx_fifo_id;
+
+ sta_id = il_sta_id(sta);
+
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ tid_data = &il->stations[sta_id].tid[tid];
+ ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
+ txq_id = tid_data->agg.txq_id;
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ D_HT("AGG stop before setup done\n");
+ goto turn_off;
+ case IL_AGG_ON:
+ break;
+ default:
+ IL_WARN("Stopping AGG while state not ON or starting\n");
+ }
+
+ write_ptr = il->txq[txq_id].q.write_ptr;
+ read_ptr = il->txq[txq_id].q.read_ptr;
+
+ /* The queue is not empty */
+ if (write_ptr != read_ptr) {
+ D_HT("Stopping a non empty AGG HW QUEUE\n");
+ il->stations[sta_id].tid[tid].agg.state =
+ IL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ D_HT("HW queue is empty\n");
+turn_off:
+ il->stations[sta_id].tid[tid].agg.state = IL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&il->sta_lock);
+ spin_lock(&il->lock);
+
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo_id);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
+int
+il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id)
+{
+ struct il_queue *q = &il->txq[txq_id].q;
+ u8 *addr = il->stations[sta_id].sta.sta.addr;
+ struct il_tid_data *tid_data = &il->stations[sta_id].tid[tid];
+ struct il_rxon_context *ctx;
+
+ ctx = &il->ctx;
+
+ lockdep_assert_held(&il->sta_lock);
+
+ switch (il->stations[sta_id].tid[tid].agg.state) {
+ case IL_EMPTYING_HW_QUEUE_DELBA:
+ /* We are reclaiming the last packet of the */
+ /* aggregated HW queue */
+ if (txq_id == tid_data->agg.txq_id &&
+ q->read_ptr == q->write_ptr) {
+ u16 ssn = SEQ_TO_SN(tid_data->seq_number);
+ int tx_fifo = il4965_get_fifo_from_tid(ctx, tid);
+ D_HT("HW queue empty: continue DELBA flow\n");
+ il4965_txq_agg_disable(il, txq_id, ssn, tx_fifo);
+ tid_data->agg.state = IL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ case IL_EMPTYING_HW_QUEUE_ADDBA:
+ /* We are reclaiming the last packet of the queue */
+ if (tid_data->tfds_in_queue == 0) {
+ D_HT("HW queue empty: continue ADDBA flow\n");
+ tid_data->agg.state = IL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void
+il4965_non_agg_tx_status(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr1)
+{
+ struct ieee80211_sta *sta;
+ struct il_station_priv *sta_priv;
+
+ rcu_read_lock();
+ sta = ieee80211_find_sta(ctx->vif, addr1);
+ if (sta) {
+ sta_priv = (void *)sta->drv_priv;
+ /* avoid atomic ops if this isn't a client */
+ if (sta_priv->client &&
+ atomic_dec_return(&sta_priv->pending_frames) == 0)
+ ieee80211_sta_block_awake(il->hw, sta, false);
+ }
+ rcu_read_unlock();
+}
+
+static void
+il4965_tx_status(struct il_priv *il, struct il_tx_info *tx_info, bool is_agg)
+{
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+
+ if (!is_agg)
+ il4965_non_agg_tx_status(il, tx_info->ctx, hdr->addr1);
+
+ ieee80211_tx_status_irqsafe(il->hw, tx_info->skb);
+}
+
+int
+il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ struct il_tx_info *tx_info;
+ int nfreed = 0;
+ struct ieee80211_hdr *hdr;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return 0;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ tx_info = &txq->txb[txq->q.read_ptr];
+
+ if (WARN_ON_ONCE(tx_info->skb == NULL))
+ continue;
+
+ hdr = (struct ieee80211_hdr *)tx_info->skb->data;
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ nfreed++;
+
+ il4965_tx_status(il, tx_info,
+ txq_id >= IL4965_FIRST_AMPDU_QUEUE);
+ tx_info->skb = NULL;
+
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ }
+ return nfreed;
+}
+
+/**
+ * il4965_tx_status_reply_compressed_ba - Update tx status from block-ack
+ *
+ * Go through block-ack's bitmap of ACK'd frames, update driver's record of
+ * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
+ */
+static int
+il4965_tx_status_reply_compressed_ba(struct il_priv *il, struct il_ht_agg *agg,
+ struct il_compressed_ba_resp *ba_resp)
+{
+ int i, sh, ack;
+ u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+ int successes = 0;
+ struct ieee80211_tx_info *info;
+ u64 bitmap, sent_bitmap;
+
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IL_ERR("Received BA when not expected\n");
+ return -EINVAL;
+ }
+
+ /* Mark that the expected block-ack response arrived */
+ agg->wait_for_ba = 0;
+ D_TX_REPLY("BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
+
+ /* Calculate shift to align block-ack bits with our Tx win bits */
+ sh = agg->start_idx - SEQ_TO_IDX(seq_ctl >> 4);
+ if (sh < 0) /* tbw something is wrong with indices */
+ sh += 0x100;
+
+ if (agg->frame_count > (64 - sh)) {
+ D_TX_REPLY("more frames than bitmap size");
+ return -1;
+ }
+
+ /* don't use 64-bit values for now */
+ bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
+
+ /* check for success or failure according to the
+ * transmitted bitmap and block-ack bitmap */
+ sent_bitmap = bitmap & agg->bitmap;
+
+ /* For each frame attempted in aggregation,
+ * update driver's record of tx frame's status. */
+ i = 0;
+ while (sent_bitmap) {
+ ack = sent_bitmap & 1ULL;
+ successes += ack;
+ D_TX_REPLY("%s ON i=%d idx=%d raw=%d\n", ack ? "ACK" : "NACK",
+ i, (agg->start_idx + i) & 0xff, agg->start_idx + i);
+ sent_bitmap >>= 1;
+ ++i;
+ }
+
+ D_TX_REPLY("Bitmap %llx\n", (unsigned long long)bitmap);
+
+ info = IEEE80211_SKB_CB(il->txq[scd_flow].txb[agg->start_idx].skb);
+ memset(&info->status, 0, sizeof(info->status));
+ info->flags |= IEEE80211_TX_STAT_ACK;
+ info->flags |= IEEE80211_TX_STAT_AMPDU;
+ info->status.ampdu_ack_len = successes;
+ info->status.ampdu_len = agg->frame_count;
+ il4965_hwrate_to_tx_control(il, agg->rate_n_flags, info);
+
+ return 0;
+}
+
+/**
+ * translate ucode response to mac80211 tx status control values
+ */
+void
+il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_rate *r = &info->control.rates[0];
+
+ info->antenna_sel_tx =
+ ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
+ if (rate_n_flags & RATE_MCS_HT_MSK)
+ r->flags |= IEEE80211_TX_RC_MCS;
+ if (rate_n_flags & RATE_MCS_GF_MSK)
+ r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
+ if (rate_n_flags & RATE_MCS_HT40_MSK)
+ r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ r->flags |= IEEE80211_TX_RC_DUP_DATA;
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ r->flags |= IEEE80211_TX_RC_SHORT_GI;
+ r->idx = il4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
+}
+
+/**
+ * il4965_hdl_compressed_ba - Handler for N_COMPRESSED_BA
+ *
+ * Handles block-acknowledge notification from device, which reports success
+ * of frames sent via aggregation.
+ */
+void
+il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
+ struct il_tx_queue *txq = NULL;
+ struct il_ht_agg *agg;
+ int idx;
+ int sta_id;
+ int tid;
+ unsigned long flags;
+
+ /* "flow" corresponds to Tx queue */
+ u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
+
+ /* "ssn" is start of block-ack Tx win, corresponds to idx
+ * (in Tx queue's circular buffer) of first TFD/frame in win */
+ u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
+
+ if (scd_flow >= il->hw_params.max_txq_num) {
+ IL_ERR("BUG_ON scd_flow is bigger than number of queues\n");
+ return;
+ }
+
+ txq = &il->txq[scd_flow];
+ sta_id = ba_resp->sta_id;
+ tid = ba_resp->tid;
+ agg = &il->stations[sta_id].tid[tid].agg;
+ if (unlikely(agg->txq_id != scd_flow)) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now!
+ * since it is possible happen very often and in order
+ * not to fill the syslog, don't enable the logging by default
+ */
+ D_TX_REPLY("BA scd_flow %d does not match txq_id %d\n",
+ scd_flow, agg->txq_id);
+ return;
+ }
+
+ /* Find idx just before block-ack win */
+ idx = il_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ D_TX_REPLY("N_COMPRESSED_BA [%d] Received from %pM, " "sta_id = %d\n",
+ agg->wait_for_ba, (u8 *) &ba_resp->sta_addr_lo32,
+ ba_resp->sta_id);
+ D_TX_REPLY("TID = %d, SeqCtl = %d, bitmap = 0x%llx," "scd_flow = "
+ "%d, scd_ssn = %d\n", ba_resp->tid, ba_resp->seq_ctl,
+ (unsigned long long)le64_to_cpu(ba_resp->bitmap),
+ ba_resp->scd_flow, ba_resp->scd_ssn);
+ D_TX_REPLY("DAT start_idx = %d, bitmap = 0x%llx\n", agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ /* Update driver's record of ACK vs. not for each frame in win */
+ il4965_tx_status_reply_compressed_ba(il, agg, ba_resp);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack win (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
+ /* calculate mac80211 ampdu sw queue to wake */
+ int freed = il4965_tx_queue_reclaim(il, scd_flow, idx);
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+
+ if (il_queue_space(&txq->q) > txq->q.low_mark &&
+ il->mac80211_registered &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+
+ il4965_txq_check_empty(il, sta_id, tid, scd_flow);
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
+#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
+
+ switch (status & TX_STATUS_MSK) {
+ case TX_STATUS_SUCCESS:
+ return "SUCCESS";
+ TX_STATUS_POSTPONE(DELAY);
+ TX_STATUS_POSTPONE(FEW_BYTES);
+ TX_STATUS_POSTPONE(QUIET_PERIOD);
+ TX_STATUS_POSTPONE(CALC_TTAK);
+ TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
+ TX_STATUS_FAIL(SHORT_LIMIT);
+ TX_STATUS_FAIL(LONG_LIMIT);
+ TX_STATUS_FAIL(FIFO_UNDERRUN);
+ TX_STATUS_FAIL(DRAIN_FLOW);
+ TX_STATUS_FAIL(RFKILL_FLUSH);
+ TX_STATUS_FAIL(LIFE_EXPIRE);
+ TX_STATUS_FAIL(DEST_PS);
+ TX_STATUS_FAIL(HOST_ABORTED);
+ TX_STATUS_FAIL(BT_RETRY);
+ TX_STATUS_FAIL(STA_INVALID);
+ TX_STATUS_FAIL(FRAG_DROPPED);
+ TX_STATUS_FAIL(TID_DISABLE);
+ TX_STATUS_FAIL(FIFO_FLUSHED);
+ TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
+ TX_STATUS_FAIL(PASSIVE_NO_RX);
+ TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
+ }
+
+ return "UNKNOWN";
+
+#undef TX_STATUS_FAIL
+#undef TX_STATUS_POSTPONE
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static struct il_link_quality_cmd *
+il4965_sta_alloc_lq(struct il_priv *il, u8 sta_id)
+{
+ int i, r;
+ struct il_link_quality_cmd *link_cmd;
+ u32 rate_flags = 0;
+ __le32 rate_n_flags;
+
+ link_cmd = kzalloc(sizeof(struct il_link_quality_cmd), GFP_KERNEL);
+ if (!link_cmd) {
+ IL_ERR("Unable to allocate memory for LQ cmd.\n");
+ return NULL;
+ }
+ /* Set up the rate scaling to start at selected rate, fall back
+ * all the way down to 1M in IEEE order, and then spin on 1M */
+ if (il->band == IEEE80211_BAND_5GHZ)
+ r = RATE_6M_IDX;
+ else
+ r = RATE_1M_IDX;
+
+ if (r >= IL_FIRST_CCK_RATE && r <= IL_LAST_CCK_RATE)
+ rate_flags |= RATE_MCS_CCK_MSK;
+
+ rate_flags |=
+ il4965_first_antenna(il->hw_params.
+ valid_tx_ant) << RATE_MCS_ANT_POS;
+ rate_n_flags = cpu_to_le32(il_rates[r].plcp | rate_flags);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
+
+ link_cmd->general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!link_cmd->general_params.dual_stream_ant_msk) {
+ link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ link_cmd->general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+ link_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+
+ link_cmd->sta_id = sta_id;
+
+ return link_cmd;
+}
+
+/*
+ * il4965_add_bssid_station - Add the special IBSS BSSID station
+ *
+ * Function sleeps.
+ */
+int
+il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r)
+{
+ int ret;
+ u8 sta_id;
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+
+ if (sta_id_r)
+ *sta_id_r = IL_INVALID_STATION;
+
+ ret = il_add_station_common(il, ctx, addr, 0, NULL, &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM\n", addr);
+ return ret;
+ }
+
+ if (sta_id_r)
+ *sta_id_r = sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].used |= IL_STA_LOCAL;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ /* Set up default rate scaling table in device's station table */
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for station %pM.\n",
+ addr);
+ return -ENOMEM;
+ }
+
+ ret = il_send_lq_cmd(il, ctx, link_cmd, CMD_SYNC, true);
+ if (ret)
+ IL_ERR("Link quality command failed (%d)\n", ret);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+static int
+il4965_static_wepkey_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ bool send_if_empty)
+{
+ int i, not_empty = 0;
+ u8 buff[sizeof(struct il_wep_cmd) +
+ sizeof(struct il_wep_key) * WEP_KEYS_MAX];
+ struct il_wep_cmd *wep_cmd = (struct il_wep_cmd *)buff;
+ size_t cmd_size = sizeof(struct il_wep_cmd);
+ struct il_host_cmd cmd = {
+ .id = ctx->wep_key_cmd,
+ .data = wep_cmd,
+ .flags = CMD_SYNC,
+ };
+
+ might_sleep();
+
+ memset(wep_cmd, 0,
+ cmd_size + (sizeof(struct il_wep_key) * WEP_KEYS_MAX));
+
+ for (i = 0; i < WEP_KEYS_MAX; i++) {
+ wep_cmd->key[i].key_idx = i;
+ if (ctx->wep_keys[i].key_size) {
+ wep_cmd->key[i].key_offset = i;
+ not_empty = 1;
+ } else {
+ wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
+ }
+
+ wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
+ memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
+ ctx->wep_keys[i].key_size);
+ }
+
+ wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
+ wep_cmd->num_keys = WEP_KEYS_MAX;
+
+ cmd_size += sizeof(struct il_wep_key) * WEP_KEYS_MAX;
+
+ cmd.len = cmd_size;
+
+ if (not_empty || send_if_empty)
+ return il_send_cmd(il, &cmd);
+ else
+ return 0;
+}
+
+int
+il4965_restore_default_wep_keys(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ lockdep_assert_held(&il->mutex);
+
+ return il4965_static_wepkey_cmd(il, ctx, false);
+}
+
+int
+il4965_remove_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ D_WEP("Removing default WEP key: idx=%d\n", keyconf->keyidx);
+
+ memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
+ if (il_is_rfkill(il)) {
+ D_WEP("Not sending C_WEPKEY command due to RFKILL.\n");
+ /* but keys in device are clear anyway so return success */
+ return 0;
+ }
+ ret = il4965_static_wepkey_cmd(il, ctx, 1);
+ D_WEP("Remove default WEP key: idx=%d ret=%d\n", keyconf->keyidx, ret);
+
+ return ret;
+}
+
+int
+il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (keyconf->keylen != WEP_KEY_LEN_128 &&
+ keyconf->keylen != WEP_KEY_LEN_64) {
+ D_WEP("Bad WEP key length %d\n", keyconf->keylen);
+ return -EINVAL;
+ }
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->hw_key_idx = HW_KEY_DEFAULT;
+ il->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
+
+ ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
+ memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
+ keyconf->keylen);
+
+ ret = il4965_static_wepkey_cmd(il, ctx, false);
+ D_WEP("Set default WEP key: len=%d idx=%d ret=%d\n", keyconf->keylen,
+ keyconf->keyidx, ret);
+
+ return ret;
+}
+
+static int
+il4965_set_wep_dynamic_key_info(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (keyconf->keylen == WEP_KEY_LEN_128)
+ key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+ il->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(&il->stations[sta_id].sta.key.key[3], keyconf->key,
+ keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_ccmp_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ __le16 key_flags = 0;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = keyconf->keylen;
+
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, keyconf->keylen);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, keyconf->keylen);
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+static int
+il4965_set_tkip_dynamic_key_info(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ int ret = 0;
+ __le16 key_flags = 0;
+
+ key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
+ key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags &= ~STA_KEY_FLG_INVALID;
+
+ if (sta_id == ctx->bcast_sta_id)
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].keyinfo.cipher = keyconf->cipher;
+ il->stations[sta_id].keyinfo.keylen = 16;
+
+ if ((il->stations[sta_id].sta.key.
+ key_flags & STA_KEY_FLG_ENCRYPT_MSK) == STA_KEY_FLG_NO_ENC)
+ il->stations[sta_id].sta.key.key_offset =
+ il_get_free_ucode_key_idx(il);
+ /* else, we are overriding an existing key => no need to allocated room
+ * in uCode. */
+
+ WARN(il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
+ "no space for a new key");
+
+ il->stations[sta_id].sta.key.key_flags = key_flags;
+
+ /* This copy is acutally not needed: we get the key with each TX */
+ memcpy(il->stations[sta_id].keyinfo.key, keyconf->key, 16);
+
+ memcpy(il->stations[sta_id].sta.key.key, keyconf->key, 16);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+void
+il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ u8 sta_id;
+ unsigned long flags;
+ int i;
+
+ if (il_scan_cancel(il)) {
+ /* cancel scan failed, just live w/ bad key and rely
+ briefly on SW decryption */
+ return;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ il->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
+
+ for (i = 0; i < 5; i++)
+ il->stations[sta_id].sta.key.tkip_rx_ttak[i] =
+ cpu_to_le16(phase1key[i]);
+
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+int
+il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ unsigned long flags;
+ u16 key_flags;
+ u8 keyidx;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys--;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ key_flags = le16_to_cpu(il->stations[sta_id].sta.key.key_flags);
+ keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
+
+ D_WEP("Remove dynamic key: idx=%d sta=%d\n", keyconf->keyidx, sta_id);
+
+ if (keyconf->keyidx != keyidx) {
+ /* We need to remove a key with idx different that the one
+ * in the uCode. This means that the key we need to remove has
+ * been replaced by another one with different idx.
+ * Don't do anything and return ok
+ */
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (il->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
+ IL_WARN("Removing wrong key %d 0x%x\n", keyconf->keyidx,
+ key_flags);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+
+ if (!test_and_clear_bit
+ (il->stations[sta_id].sta.key.key_offset, &il->ucode_key_table))
+ IL_ERR("idx %d not used in uCode key table.\n",
+ il->stations[sta_id].sta.key.key_offset);
+ memset(&il->stations[sta_id].keyinfo, 0, sizeof(struct il_hw_key));
+ memset(&il->stations[sta_id].sta.key, 0, sizeof(struct il4965_keyinfo));
+ il->stations[sta_id].sta.key.key_flags =
+ STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ il->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+
+ if (il_is_rfkill(il)) {
+ D_WEP
+ ("Not sending C_ADD_STA command because RFKILL enabled.\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return 0;
+ }
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf, u8 sta_id)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ ctx->key_mapping_keys++;
+ keyconf->hw_key_idx = HW_KEY_DYNAMIC;
+
+ switch (keyconf->cipher) {
+ case WLAN_CIPHER_SUITE_CCMP:
+ ret =
+ il4965_set_ccmp_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_TKIP:
+ ret =
+ il4965_set_tkip_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ case WLAN_CIPHER_SUITE_WEP40:
+ case WLAN_CIPHER_SUITE_WEP104:
+ ret = il4965_set_wep_dynamic_key_info(il, ctx, keyconf, sta_id);
+ break;
+ default:
+ IL_ERR("Unknown alg: %s cipher = %x\n", __func__,
+ keyconf->cipher);
+ ret = -EINVAL;
+ }
+
+ D_WEP("Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
+ keyconf->cipher, keyconf->keylen, keyconf->keyidx, sta_id, ret);
+
+ return ret;
+}
+
+/**
+ * il4965_alloc_bcast_station - add broadcast station into driver's station table.
+ *
+ * This adds the broadcast station into the driver's station table
+ * and marks it driver active, so that it will be restored to the
+ * device at the next best time.
+ */
+int
+il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_link_quality_cmd *link_cmd;
+ unsigned long flags;
+ u8 sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ sta_id = il_prep_station(il, ctx, il_bcast_addr, false, NULL);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare broadcast station\n");
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return -EINVAL;
+ }
+
+ il->stations[sta_id].used |= IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used |= IL_STA_BCAST;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR
+ ("Unable to initialize rate scaling for bcast station.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_update_bcast_station - update broadcast station's LQ command
+ *
+ * Only used by iwl4965. Placed here to have all bcast station management
+ * code together.
+ */
+static int
+il4965_update_bcast_station(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ unsigned long flags;
+ struct il_link_quality_cmd *link_cmd;
+ u8 sta_id = ctx->bcast_sta_id;
+
+ link_cmd = il4965_sta_alloc_lq(il, sta_id);
+ if (!link_cmd) {
+ IL_ERR("Unable to initialize rate scaling for bcast sta.\n");
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (il->stations[sta_id].lq)
+ kfree(il->stations[sta_id].lq);
+ else
+ D_INFO("Bcast sta rate scaling has not been initialized.\n");
+ il->stations[sta_id].lq = link_cmd;
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return 0;
+}
+
+int
+il4965_update_bcast_stations(struct il_priv *il)
+{
+ return il4965_update_bcast_station(il, &il->ctx);
+}
+
+/**
+ * il4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
+ */
+int
+il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid)
+{
+ unsigned long flags;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Remove "disable" flag, to enable Tx for this TID */
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
+ il->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta, int tid,
+ u16 ssn)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -ENXIO;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
+ il->stations[sta_id].sta.add_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+int
+il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta, int tid)
+{
+ unsigned long flags;
+ int sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ lockdep_assert_held(&il->mutex);
+
+ sta_id = il_sta_id(sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags_msk = 0;
+ il->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
+ il->stations[sta_id].sta.remove_immediate_ba_tid = (u8) tid;
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+}
+
+void
+il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ il->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ il->stations[sta_id].sta.sta.modify_mask =
+ STA_MODIFY_SLEEP_TX_COUNT_MSK;
+ il->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
+ il->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ il_send_add_sta(il, &il->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+}
+
+void
+il4965_update_chain_flags(struct il_priv *il)
+{
+ if (il->cfg->ops->hcmd->set_rxon_chain) {
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ if (il->ctx.active.rx_chain != il->ctx.staging.rx_chain)
+ il_commit_rxon(il, &il->ctx);
+ }
+}
+
+static void
+il4965_clear_free_frames(struct il_priv *il)
+{
+ struct list_head *element;
+
+ D_INFO("%d frames on pre-allocated heap on clear.\n", il->frames_count);
+
+ while (!list_empty(&il->free_frames)) {
+ element = il->free_frames.next;
+ list_del(element);
+ kfree(list_entry(element, struct il_frame, list));
+ il->frames_count--;
+ }
+
+ if (il->frames_count) {
+ IL_WARN("%d frames still in use. Did we lose one?\n",
+ il->frames_count);
+ il->frames_count = 0;
+ }
+}
+
+static struct il_frame *
+il4965_get_free_frame(struct il_priv *il)
+{
+ struct il_frame *frame;
+ struct list_head *element;
+ if (list_empty(&il->free_frames)) {
+ frame = kzalloc(sizeof(*frame), GFP_KERNEL);
+ if (!frame) {
+ IL_ERR("Could not allocate frame!\n");
+ return NULL;
+ }
+
+ il->frames_count++;
+ return frame;
+ }
+
+ element = il->free_frames.next;
+ list_del(element);
+ return list_entry(element, struct il_frame, list);
+}
+
+static void
+il4965_free_frame(struct il_priv *il, struct il_frame *frame)
+{
+ memset(frame, 0, sizeof(*frame));
+ list_add(&frame->list, &il->free_frames);
+}
+
+static u32
+il4965_fill_beacon_frame(struct il_priv *il, struct ieee80211_hdr *hdr,
+ int left)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_skb)
+ return 0;
+
+ if (il->beacon_skb->len > left)
+ return 0;
+
+ memcpy(hdr, il->beacon_skb->data, il->beacon_skb->len);
+
+ return il->beacon_skb->len;
+}
+
+/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
+static void
+il4965_set_beacon_tim(struct il_priv *il,
+ struct il_tx_beacon_cmd *tx_beacon_cmd, u8 * beacon,
+ u32 frame_size)
+{
+ u16 tim_idx;
+ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
+
+ /*
+ * The idx is relative to frame start but we start looking at the
+ * variable-length part of the beacon.
+ */
+ tim_idx = mgmt->u.beacon.variable - beacon;
+
+ /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
+ while ((tim_idx < (frame_size - 2)) &&
+ (beacon[tim_idx] != WLAN_EID_TIM))
+ tim_idx += beacon[tim_idx + 1] + 2;
+
+ /* If TIM field was found, set variables */
+ if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
+ tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
+ tx_beacon_cmd->tim_size = beacon[tim_idx + 1];
+ } else
+ IL_WARN("Unable to find TIM Element in beacon\n");
+}
+
+static unsigned int
+il4965_hw_get_beacon_cmd(struct il_priv *il, struct il_frame *frame)
+{
+ struct il_tx_beacon_cmd *tx_beacon_cmd;
+ u32 frame_size;
+ u32 rate_flags;
+ u32 rate;
+ /*
+ * We have to set up the TX command, the TX Beacon command, and the
+ * beacon contents.
+ */
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("trying to build beacon w/o beacon context!\n");
+ return 0;
+ }
+
+ /* Initialize memory */
+ tx_beacon_cmd = &frame->u.beacon;
+ memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
+
+ /* Set up TX beacon contents */
+ frame_size =
+ il4965_fill_beacon_frame(il, tx_beacon_cmd->frame,
+ sizeof(frame->u) - sizeof(*tx_beacon_cmd));
+ if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
+ return 0;
+ if (!frame_size)
+ return 0;
+
+ /* Set up TX command fields */
+ tx_beacon_cmd->tx.len = cpu_to_le16((u16) frame_size);
+ tx_beacon_cmd->tx.sta_id = il->beacon_ctx->bcast_sta_id;
+ tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
+ tx_beacon_cmd->tx.tx_flags =
+ TX_CMD_FLG_SEQ_CTL_MSK | TX_CMD_FLG_TSF_MSK |
+ TX_CMD_FLG_STA_RATE_MSK;
+
+ /* Set up TX beacon command fields */
+ il4965_set_beacon_tim(il, tx_beacon_cmd, (u8 *) tx_beacon_cmd->frame,
+ frame_size);
+
+ /* Set up packet rate and flags */
+ rate = il_get_lowest_plcp(il, il->beacon_ctx);
+ il4965_toggle_tx_ant(il, &il->mgmt_tx_ant, il->hw_params.valid_tx_ant);
+ rate_flags = BIT(il->mgmt_tx_ant) << RATE_MCS_ANT_POS;
+ if ((rate >= IL_FIRST_CCK_RATE) && (rate <= IL_LAST_CCK_RATE))
+ rate_flags |= RATE_MCS_CCK_MSK;
+ tx_beacon_cmd->tx.rate_n_flags = cpu_to_le32(rate | rate_flags);
+
+ return sizeof(*tx_beacon_cmd) + frame_size;
+}
+
+int
+il4965_send_beacon_cmd(struct il_priv *il)
+{
+ struct il_frame *frame;
+ unsigned int frame_size;
+ int rc;
+
+ frame = il4965_get_free_frame(il);
+ if (!frame) {
+ IL_ERR("Could not obtain free frame buffer for beacon "
+ "command.\n");
+ return -ENOMEM;
+ }
+
+ frame_size = il4965_hw_get_beacon_cmd(il, frame);
+ if (!frame_size) {
+ IL_ERR("Error configuring the beacon command\n");
+ il4965_free_frame(il, frame);
+ return -EINVAL;
+ }
+
+ rc = il_send_cmd_pdu(il, C_TX_BEACON, frame_size, &frame->u.cmd[0]);
+
+ il4965_free_frame(il, frame);
+
+ return rc;
+}
+
+static inline dma_addr_t
+il4965_tfd_tb_get_addr(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ dma_addr_t addr = get_unaligned_le32(&tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ addr |=
+ ((dma_addr_t) (le16_to_cpu(tb->hi_n_len) & 0xF) << 16) <<
+ 16;
+
+ return addr;
+}
+
+static inline u16
+il4965_tfd_tb_get_len(struct il_tfd *tfd, u8 idx)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+
+ return le16_to_cpu(tb->hi_n_len) >> 4;
+}
+
+static inline void
+il4965_tfd_set_tb(struct il_tfd *tfd, u8 idx, dma_addr_t addr, u16 len)
+{
+ struct il_tfd_tb *tb = &tfd->tbs[idx];
+ u16 hi_n_len = len << 4;
+
+ put_unaligned_le32(addr, &tb->lo);
+ if (sizeof(dma_addr_t) > sizeof(u32))
+ hi_n_len |= ((addr >> 16) >> 16) & 0xF;
+
+ tb->hi_n_len = cpu_to_le16(hi_n_len);
+
+ tfd->num_tbs = idx + 1;
+}
+
+static inline u8
+il4965_tfd_get_num_tbs(struct il_tfd *tfd)
+{
+ return tfd->num_tbs & 0x1f;
+}
+
+/**
+ * il4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
+ * @il - driver ilate data
+ * @txq - tx queue
+ *
+ * Does NOT advance any TFD circular buffer read/write idxes
+ * Does NOT free the TFD itself (which is within circular buffer)
+ */
+void
+il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq)
+{
+ struct il_tfd *tfd_tmp = (struct il_tfd *)txq->tfds;
+ struct il_tfd *tfd;
+ struct pci_dev *dev = il->pci_dev;
+ int idx = txq->q.read_ptr;
+ int i;
+ int num_tbs;
+
+ tfd = &tfd_tmp[idx];
+
+ /* Sanity check on number of chunks */
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Too many chunks: %i\n", num_tbs);
+ /* @todo issue fatal error, it is quite serious situation */
+ return;
+ }
+
+ /* Unmap tx_cmd */
+ if (num_tbs)
+ pci_unmap_single(dev, dma_unmap_addr(&txq->meta[idx], mapping),
+ dma_unmap_len(&txq->meta[idx], len),
+ PCI_DMA_BIDIRECTIONAL);
+
+ /* Unmap chunks, if any. */
+ for (i = 1; i < num_tbs; i++)
+ pci_unmap_single(dev, il4965_tfd_tb_get_addr(tfd, i),
+ il4965_tfd_tb_get_len(tfd, i),
+ PCI_DMA_TODEVICE);
+
+ /* free SKB */
+ if (txq->txb) {
+ struct sk_buff *skb;
+
+ skb = txq->txb[txq->q.read_ptr].skb;
+
+ /* can be called from irqs-disabled context */
+ if (skb) {
+ dev_kfree_skb_any(skb);
+ txq->txb[txq->q.read_ptr].skb = NULL;
+ }
+ }
+}
+
+int
+il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad)
+{
+ struct il_queue *q;
+ struct il_tfd *tfd, *tfd_tmp;
+ u32 num_tbs;
+
+ q = &txq->q;
+ tfd_tmp = (struct il_tfd *)txq->tfds;
+ tfd = &tfd_tmp[q->write_ptr];
+
+ if (reset)
+ memset(tfd, 0, sizeof(*tfd));
+
+ num_tbs = il4965_tfd_get_num_tbs(tfd);
+
+ /* Each TFD can point to a maximum 20 Tx buffers */
+ if (num_tbs >= IL_NUM_OF_TBS) {
+ IL_ERR("Error can not send more than %d chunks\n",
+ IL_NUM_OF_TBS);
+ return -EINVAL;
+ }
+
+ BUG_ON(addr & ~DMA_BIT_MASK(36));
+ if (unlikely(addr & ~IL_TX_DMA_MASK))
+ IL_ERR("Unaligned address = %llx\n", (unsigned long long)addr);
+
+ il4965_tfd_set_tb(tfd, num_tbs, addr, len);
+
+ return 0;
+}
+
+/*
+ * Tell nic where to find circular buffer of Tx Frame Descriptors for
+ * given Tx queue, and enable the DMA channel used for that queue.
+ *
+ * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
+ * channels supported in hardware.
+ */
+int
+il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq)
+{
+ int txq_id = txq->q.id;
+
+ /* Circular buffer (TFD queue in DRAM) physical base address */
+ il_wr(il, FH49_MEM_CBBC_QUEUE(txq_id), txq->q.dma_addr >> 8);
+
+ return 0;
+}
+
+/******************************************************************************
+ *
+ * Generic RX handler implementations
+ *
+ ******************************************************************************/
+static void
+il4965_hdl_alive(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_alive_resp *palive;
+ struct delayed_work *pwork;
+
+ palive = &pkt->u.alive_frame;
+
+ D_INFO("Alive ucode status 0x%08X revision " "0x%01X 0x%01X\n",
+ palive->is_valid, palive->ver_type, palive->ver_subtype);
+
+ if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
+ D_INFO("Initialization Alive received.\n");
+ memcpy(&il->card_alive_init, &pkt->u.alive_frame,
+ sizeof(struct il_init_alive_resp));
+ pwork = &il->init_alive_start;
+ } else {
+ D_INFO("Runtime Alive received.\n");
+ memcpy(&il->card_alive, &pkt->u.alive_frame,
+ sizeof(struct il_alive_resp));
+ pwork = &il->alive_start;
+ }
+
+ /* We delay the ALIVE response by 5ms to
+ * give the HW RF Kill time to activate... */
+ if (palive->is_valid == UCODE_VALID_OK)
+ queue_delayed_work(il->workqueue, pwork, msecs_to_jiffies(5));
+ else
+ IL_WARN("uCode did not respond OK.\n");
+}
+
+/**
+ * il4965_bg_stats_periodic - Timer callback to queue stats
+ *
+ * This callback is provided in order to send a stats request.
+ *
+ * This timer function is continually reset to execute within
+ * REG_RECALIB_PERIOD seconds since the last N_STATS
+ * was received. We need to ensure we receive the stats in order
+ * to update the temperature used for calibrating the TXPOWER.
+ */
+static void
+il4965_bg_stats_periodic(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* dont send host command if rf-kill is on */
+ if (!il_is_ready_rf(il))
+ return;
+
+ il_send_stats_request(il, CMD_ASYNC, false);
+}
+
+static void
+il4965_hdl_beacon(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il4965_beacon_notif *beacon =
+ (struct il4965_beacon_notif *)pkt->u.raw;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u8 rate = il4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
+
+ D_RX("beacon status %x retries %d iss %d tsf:0x%.8x%.8x rate %d\n",
+ le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
+ beacon->beacon_notify_hdr.failure_frame,
+ le32_to_cpu(beacon->ibss_mgr_status),
+ le32_to_cpu(beacon->high_tsf), le32_to_cpu(beacon->low_tsf), rate);
+#endif
+ il->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
+}
+
+static void
+il4965_perform_ct_kill_task(struct il_priv *il)
+{
+ unsigned long flags;
+
+ D_POWER("Stop all queues\n");
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ spin_lock_irqsave(&il->reg_lock, flags);
+ if (!_il_grab_nic_access(il))
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, flags);
+}
+
+/* Handle notification from uCode that card's power state is changing
+ * due to software, hardware, or critical temperature RFKILL */
+static void
+il4965_hdl_card_state(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
+ unsigned long status = il->status;
+
+ D_RF_KILL("Card state received: HW:%s SW:%s CT:%s\n",
+ (flags & HW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ? "Reached" : "Not reached");
+
+ if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED | CT_CARD_DISABLED)) {
+
+ _il_wr(il, CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ il_wr(il, HBUS_TARG_MBX_C, HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+
+ if (!(flags & RXON_CARD_DISABLED)) {
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+ il_wr(il, HBUS_TARG_MBX_C,
+ HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
+ }
+ }
+
+ if (flags & CT_CARD_DISABLED)
+ il4965_perform_ct_kill_task(il);
+
+ if (flags & HW_CARD_DISABLED)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ if (!(flags & RXON_CARD_DISABLED))
+ il_scan_cancel(il);
+
+ if ((test_bit(S_RF_KILL_HW, &status) !=
+ test_bit(S_RF_KILL_HW, &il->status)))
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+ else
+ wake_up(&il->wait_command_queue);
+}
+
+/**
+ * il4965_setup_handlers - Initialize Rx handler callbacks
+ *
+ * Setup the RX handlers for each of the reply types sent from the uCode
+ * to the host.
+ *
+ * This function chains into the hardware specific files for them to setup
+ * any hardware specific handlers as well.
+ */
+static void
+il4965_setup_handlers(struct il_priv *il)
+{
+ il->handlers[N_ALIVE] = il4965_hdl_alive;
+ il->handlers[N_ERROR] = il_hdl_error;
+ il->handlers[N_CHANNEL_SWITCH] = il_hdl_csa;
+ il->handlers[N_SPECTRUM_MEASUREMENT] = il_hdl_spectrum_measurement;
+ il->handlers[N_PM_SLEEP] = il_hdl_pm_sleep;
+ il->handlers[N_PM_DEBUG_STATS] = il_hdl_pm_debug_stats;
+ il->handlers[N_BEACON] = il4965_hdl_beacon;
+
+ /*
+ * The same handler is used for both the REPLY to a discrete
+ * stats request from the host as well as for the periodic
+ * stats notifications (after received beacons) from the uCode.
+ */
+ il->handlers[C_STATS] = il4965_hdl_c_stats;
+ il->handlers[N_STATS] = il4965_hdl_stats;
+
+ il_setup_rx_scan_handlers(il);
+
+ /* status change handler */
+ il->handlers[N_CARD_STATE] = il4965_hdl_card_state;
+
+ il->handlers[N_MISSED_BEACONS] = il4965_hdl_missed_beacon;
+ /* Rx handlers */
+ il->handlers[N_RX_PHY] = il4965_hdl_rx_phy;
+ il->handlers[N_RX_MPDU] = il4965_hdl_rx;
+ /* block ack */
+ il->handlers[N_COMPRESSED_BA] = il4965_hdl_compressed_ba;
+ /* Set up hardware specific Rx handlers */
+ il->cfg->ops->lib->handler_setup(il);
+}
+
+/**
+ * il4965_rx_handle - Main entry function for receiving responses from uCode
+ *
+ * Uses the il->handlers callback function array to invoke
+ * the appropriate handlers, including command responses,
+ * frame-received notifications, and other notifications.
+ */
+void
+il4965_rx_handle(struct il_priv *il)
+{
+ struct il_rx_buf *rxb;
+ struct il_rx_pkt *pkt;
+ struct il_rx_queue *rxq = &il->rxq;
+ u32 r, i;
+ int reclaim;
+ unsigned long flags;
+ u8 fill_rx = 0;
+ u32 count = 8;
+ int total_empty;
+
+ /* uCode's read idx (stored in shared DRAM) indicates the last Rx
+ * buffer that the driver may process (last buffer filled by ucode). */
+ r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
+ i = rxq->read;
+
+ /* Rx interrupt, but nothing sent from uCode */
+ if (i == r)
+ D_RX("r = %d, i = %d\n", r, i);
+
+ /* calculate total frames need to be restock after handling RX */
+ total_empty = r - rxq->write_actual;
+ if (total_empty < 0)
+ total_empty += RX_QUEUE_SIZE;
+
+ if (total_empty > (RX_QUEUE_SIZE / 2))
+ fill_rx = 1;
+
+ while (i != r) {
+ int len;
+
+ rxb = rxq->queue[i];
+
+ /* If an RXB doesn't have a Rx queue slot associated with it,
+ * then a bug has been introduced in the queue refilling
+ * routines -- catch it here */
+ BUG_ON(rxb == NULL);
+
+ rxq->queue[i] = NULL;
+
+ pci_unmap_page(il->pci_dev, rxb->page_dma,
+ PAGE_SIZE << il->hw_params.rx_page_order,
+ PCI_DMA_FROMDEVICE);
+ pkt = rxb_addr(rxb);
+
+ len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ len += sizeof(u32); /* account for status word */
+
+ /* Reclaim a command buffer only if this packet is a response
+ * to a (driver-originated) command.
+ * If the packet (e.g. Rx frame) originated from uCode,
+ * there is no command buffer to reclaim.
+ * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
+ * but apparently a few don't get set; catch them here. */
+ reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
+ (pkt->hdr.cmd != N_RX_PHY) && (pkt->hdr.cmd != N_RX) &&
+ (pkt->hdr.cmd != N_RX_MPDU) &&
+ (pkt->hdr.cmd != N_COMPRESSED_BA) &&
+ (pkt->hdr.cmd != N_STATS) && (pkt->hdr.cmd != C_TX);
+
+ /* Based on type of command response or notification,
+ * handle those that need handling via function in
+ * handlers table. See il4965_setup_handlers() */
+ if (il->handlers[pkt->hdr.cmd]) {
+ D_RX("r = %d, i = %d, %s, 0x%02x\n", r, i,
+ il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ il->isr_stats.handlers[pkt->hdr.cmd]++;
+ il->handlers[pkt->hdr.cmd] (il, rxb);
+ } else {
+ /* No handling needed */
+ D_RX("r %d i %d No handler needed for %s, 0x%02x\n", r,
+ i, il_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
+ }
+
+ /*
+ * XXX: After here, we should always check rxb->page
+ * against NULL before touching it or its virtual
+ * memory (pkt). Because some handler might have
+ * already taken or freed the pages.
+ */
+
+ if (reclaim) {
+ /* Invoke any callbacks, transfer the buffer to caller,
+ * and fire off the (possibly) blocking il_send_cmd()
+ * as we reclaim the driver command queue */
+ if (rxb->page)
+ il_tx_cmd_complete(il, rxb);
+ else
+ IL_WARN("Claim null rxb?\n");
+ }
+
+ /* Reuse the page if possible. For notification packets and
+ * SKBs that fail to Rx correctly, add them back into the
+ * rx_free list for reuse later. */
+ spin_lock_irqsave(&rxq->lock, flags);
+ if (rxb->page != NULL) {
+ rxb->page_dma =
+ pci_map_page(il->pci_dev, rxb->page, 0,
+ PAGE_SIZE << il->hw_params.
+ rx_page_order, PCI_DMA_FROMDEVICE);
+ list_add_tail(&rxb->list, &rxq->rx_free);
+ rxq->free_count++;
+ } else
+ list_add_tail(&rxb->list, &rxq->rx_used);
+
+ spin_unlock_irqrestore(&rxq->lock, flags);
+
+ i = (i + 1) & RX_QUEUE_MASK;
+ /* If there are a lot of unused frames,
+ * restock the Rx queue so ucode wont assert. */
+ if (fill_rx) {
+ count++;
+ if (count >= 8) {
+ rxq->read = i;
+ il4965_rx_replenish_now(il);
+ count = 0;
+ }
+ }
+ }
+
+ /* Backtrack one entry */
+ rxq->read = i;
+ if (fill_rx)
+ il4965_rx_replenish_now(il);
+ else
+ il4965_rx_queue_restock(il);
+}
+
+/* call this function to flush any scheduled tasklet */
+static inline void
+il4965_synchronize_irq(struct il_priv *il)
+{
+ /* wait to make sure we flush pending tasklet */
+ synchronize_irq(il->pci_dev->irq);
+ tasklet_kill(&il->irq_tasklet);
+}
+
+static void
+il4965_irq_tasklet(struct il_priv *il)
+{
+ u32 inta, handled = 0;
+ u32 inta_fh;
+ unsigned long flags;
+ u32 i;
+#ifdef CONFIG_IWLEGACY_DEBUG
+ u32 inta_mask;
+#endif
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Ack/clear/reset pending uCode interrupts.
+ * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
+ * and will clear only when CSR_FH_INT_STATUS gets cleared. */
+ inta = _il_rd(il, CSR_INT);
+ _il_wr(il, CSR_INT, inta);
+
+ /* Ack/clear/reset pending flow-handler (DMA) interrupts.
+ * Any new interrupts that happen after this, either while we're
+ * in this tasklet, or later, will show up in next ISR/tasklet. */
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ _il_wr(il, CSR_FH_INT_STATUS, inta_fh);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_ISR) {
+ /* just for debug */
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ D_ISR("inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta,
+ inta_mask, inta_fh);
+ }
+#endif
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
+ * atomic, make sure that inta covers all the interrupts that
+ * we've discovered, even if FH interrupt came in just after
+ * reading CSR_INT. */
+ if (inta_fh & CSR49_FH_INT_RX_MASK)
+ inta |= CSR_INT_BIT_FH_RX;
+ if (inta_fh & CSR49_FH_INT_TX_MASK)
+ inta |= CSR_INT_BIT_FH_TX;
+
+ /* Now service all interrupt bits discovered above. */
+ if (inta & CSR_INT_BIT_HW_ERR) {
+ IL_ERR("Hardware error detected. Restarting.\n");
+
+ /* Tell the device to stop sending interrupts */
+ il_disable_interrupts(il);
+
+ il->isr_stats.hw++;
+ il_irq_handle_error(il);
+
+ handled |= CSR_INT_BIT_HW_ERR;
+
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ /* NIC fires this, but we don't use it, redundant with WAKEUP */
+ if (inta & CSR_INT_BIT_SCD) {
+ D_ISR("Scheduler finished to transmit "
+ "the frame/frames.\n");
+ il->isr_stats.sch++;
+ }
+
+ /* Alive notification via Rx interrupt will do the real work */
+ if (inta & CSR_INT_BIT_ALIVE) {
+ D_ISR("Alive interrupt\n");
+ il->isr_stats.alive++;
+ }
+ }
+#endif
+ /* Safely ignore these bits for debug checks below */
+ inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
+
+ /* HW RF KILL switch toggled */
+ if (inta & CSR_INT_BIT_RF_KILL) {
+ int hw_rf_kill = 0;
+ if (!
+ (_il_rd(il, CSR_GP_CNTRL) &
+ CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rf_kill = 1;
+
+ IL_WARN("RF_KILL bit toggled to %s.\n",
+ hw_rf_kill ? "disable radio" : "enable radio");
+
+ il->isr_stats.rfkill++;
+
+ /* driver only loads ucode once setting the interface up.
+ * the driver allows loading the ucode even if the radio
+ * is killed. Hence update the killswitch state here. The
+ * rfkill handler will care about restarting if needed.
+ */
+ if (!test_bit(S_ALIVE, &il->status)) {
+ if (hw_rf_kill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill);
+ }
+
+ handled |= CSR_INT_BIT_RF_KILL;
+ }
+
+ /* Chip got too hot and stopped itself */
+ if (inta & CSR_INT_BIT_CT_KILL) {
+ IL_ERR("Microcode CT kill error detected.\n");
+ il->isr_stats.ctkill++;
+ handled |= CSR_INT_BIT_CT_KILL;
+ }
+
+ /* Error detected by uCode */
+ if (inta & CSR_INT_BIT_SW_ERR) {
+ IL_ERR("Microcode SW error detected. " " Restarting 0x%X.\n",
+ inta);
+ il->isr_stats.sw++;
+ il_irq_handle_error(il);
+ handled |= CSR_INT_BIT_SW_ERR;
+ }
+
+ /*
+ * uCode wakes up after power-down sleep.
+ * Tell device about any new tx or host commands enqueued,
+ * and about any Rx buffers made available while asleep.
+ */
+ if (inta & CSR_INT_BIT_WAKEUP) {
+ D_ISR("Wakeup interrupt\n");
+ il_rx_queue_update_write_ptr(il, &il->rxq);
+ for (i = 0; i < il->hw_params.max_txq_num; i++)
+ il_txq_update_write_ptr(il, &il->txq[i]);
+ il->isr_stats.wakeup++;
+ handled |= CSR_INT_BIT_WAKEUP;
+ }
+
+ /* All uCode command responses, including Tx command responses,
+ * Rx "responses" (frame-received notification), and other
+ * notifications from uCode come through here*/
+ if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
+ il4965_rx_handle(il);
+ il->isr_stats.rx++;
+ handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
+ }
+
+ /* This "Tx" DMA channel is used only for loading uCode */
+ if (inta & CSR_INT_BIT_FH_TX) {
+ D_ISR("uCode load interrupt\n");
+ il->isr_stats.tx++;
+ handled |= CSR_INT_BIT_FH_TX;
+ /* Wake up uCode load routine, now that load is complete */
+ il->ucode_write_complete = 1;
+ wake_up(&il->wait_command_queue);
+ }
+
+ if (inta & ~handled) {
+ IL_ERR("Unhandled INTA bits 0x%08x\n", inta & ~handled);
+ il->isr_stats.unhandled++;
+ }
+
+ if (inta & ~(il->inta_mask)) {
+ IL_WARN("Disabled INTA bits 0x%08x were pending\n",
+ inta & ~il->inta_mask);
+ IL_WARN(" with FH49_INT = 0x%08x\n", inta_fh);
+ }
+
+ /* Re-enable all interrupts */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ /* Re-enable RF_KILL if it occurred */
+ else if (handled & CSR_INT_BIT_RF_KILL)
+ il_enable_rfkill_int(il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & (IL_DL_ISR)) {
+ inta = _il_rd(il, CSR_INT);
+ inta_mask = _il_rd(il, CSR_INT_MASK);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+ D_ISR("End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
+ "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
+ }
+#endif
+}
+
+/*****************************************************************************
+ *
+ * sysfs attributes
+ *
+ *****************************************************************************/
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+
+/*
+ * The following adds a new attribute to the sysfs representation
+ * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
+ * used for controlling the debug level.
+ *
+ * See the level definitions in iwl for details.
+ *
+ * The debug_level being managed using sysfs below is a per device debug
+ * level that is used instead of the global debug level if it (the per
+ * device debug level) is set.
+ */
+static ssize_t
+il4965_show_debug_level(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ return sprintf(buf, "0x%08X\n", il_get_debug_level(il));
+}
+
+static ssize_t
+il4965_store_debug_level(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret)
+ IL_ERR("%s is not in hex or decimal form.\n", buf);
+ else {
+ il->debug_level = val;
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+ }
+ return strnlen(buf, count);
+}
+
+static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO, il4965_show_debug_level,
+ il4965_store_debug_level);
+
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+static ssize_t
+il4965_show_temperature(struct device *d, struct device_attribute *attr,
+ char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_alive(il))
+ return -EAGAIN;
+
+ return sprintf(buf, "%d\n", il->temperature);
+}
+
+static DEVICE_ATTR(temperature, S_IRUGO, il4965_show_temperature, NULL);
+
+static ssize_t
+il4965_show_tx_power(struct device *d, struct device_attribute *attr, char *buf)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+
+ if (!il_is_ready_rf(il))
+ return sprintf(buf, "off\n");
+ else
+ return sprintf(buf, "%d\n", il->tx_power_user_lmt);
+}
+
+static ssize_t
+il4965_store_tx_power(struct device *d, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct il_priv *il = dev_get_drvdata(d);
+ unsigned long val;
+ int ret;
+
+ ret = strict_strtoul(buf, 10, &val);
+ if (ret)
+ IL_INFO("%s is not in decimal form.\n", buf);
+ else {
+ ret = il_set_tx_power(il, val, false);
+ if (ret)
+ IL_ERR("failed setting tx power (0x%d).\n", ret);
+ else
+ ret = count;
+ }
+ return ret;
+}
+
+static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, il4965_show_tx_power,
+ il4965_store_tx_power);
+
+static struct attribute *il_sysfs_entries[] = {
+ &dev_attr_temperature.attr,
+ &dev_attr_tx_power.attr,
+#ifdef CONFIG_IWLEGACY_DEBUG
+ &dev_attr_debug_level.attr,
+#endif
+ NULL
+};
+
+static struct attribute_group il_attribute_group = {
+ .name = NULL, /* put in device directory */
+ .attrs = il_sysfs_entries,
+};
+
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void
+il4965_dealloc_ucode_pci(struct il_priv *il)
+{
+ il_free_fw_desc(il->pci_dev, &il->ucode_code);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_data_backup);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init);
+ il_free_fw_desc(il->pci_dev, &il->ucode_init_data);
+ il_free_fw_desc(il->pci_dev, &il->ucode_boot);
+}
+
+static void
+il4965_nic_start(struct il_priv *il)
+{
+ /* Remove all resets to allow NIC to operate */
+ _il_wr(il, CSR_RESET, 0);
+}
+
+static void il4965_ucode_callback(const struct firmware *ucode_raw,
+ void *context);
+static int il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length);
+
+static int __must_check
+il4965_request_firmware(struct il_priv *il, bool first)
+{
+ const char *name_pre = il->cfg->fw_name_pre;
+ char tag[8];
+
+ if (first) {
+ il->fw_idx = il->cfg->ucode_api_max;
+ sprintf(tag, "%d", il->fw_idx);
+ } else {
+ il->fw_idx--;
+ sprintf(tag, "%d", il->fw_idx);
+ }
+
+ if (il->fw_idx < il->cfg->ucode_api_min) {
+ IL_ERR("no suitable firmware found!\n");
+ return -ENOENT;
+ }
+
+ sprintf(il->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+
+ D_INFO("attempting to load firmware '%s'\n", il->firmware_name);
+
+ return request_firmware_nowait(THIS_MODULE, 1, il->firmware_name,
+ &il->pci_dev->dev, GFP_KERNEL, il,
+ il4965_ucode_callback);
+}
+
+struct il4965_firmware_pieces {
+ const void *inst, *data, *init, *init_data, *boot;
+ size_t inst_size, data_size, init_size, init_data_size, boot_size;
+};
+
+static int
+il4965_load_firmware(struct il_priv *il, const struct firmware *ucode_raw,
+ struct il4965_firmware_pieces *pieces)
+{
+ struct il_ucode_header *ucode = (void *)ucode_raw->data;
+ u32 api_ver, hdr_size;
+ const u8 *src;
+
+ il->ucode_ver = le32_to_cpu(ucode->ver);
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ switch (api_ver) {
+ default:
+ case 0:
+ case 1:
+ case 2:
+ hdr_size = 24;
+ if (ucode_raw->size < hdr_size) {
+ IL_ERR("File size too small!\n");
+ return -EINVAL;
+ }
+ pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
+ pieces->data_size = le32_to_cpu(ucode->v1.data_size);
+ pieces->init_size = le32_to_cpu(ucode->v1.init_size);
+ pieces->init_data_size = le32_to_cpu(ucode->v1.init_data_size);
+ pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
+ src = ucode->v1.data;
+ break;
+ }
+
+ /* Verify size of file vs. image size info in file's header */
+ if (ucode_raw->size !=
+ hdr_size + pieces->inst_size + pieces->data_size +
+ pieces->init_size + pieces->init_data_size + pieces->boot_size) {
+
+ IL_ERR("uCode file size %d does not match expected size\n",
+ (int)ucode_raw->size);
+ return -EINVAL;
+ }
+
+ pieces->inst = src;
+ src += pieces->inst_size;
+ pieces->data = src;
+ src += pieces->data_size;
+ pieces->init = src;
+ src += pieces->init_size;
+ pieces->init_data = src;
+ src += pieces->init_data_size;
+ pieces->boot = src;
+ src += pieces->boot_size;
+
+ return 0;
+}
+
+/**
+ * il4965_ucode_callback - callback when firmware was loaded
+ *
+ * If loaded successfully, copies the firmware into buffers
+ * for the card to fetch (via DMA).
+ */
+static void
+il4965_ucode_callback(const struct firmware *ucode_raw, void *context)
+{
+ struct il_priv *il = context;
+ struct il_ucode_header *ucode;
+ int err;
+ struct il4965_firmware_pieces pieces;
+ const unsigned int api_max = il->cfg->ucode_api_max;
+ const unsigned int api_min = il->cfg->ucode_api_min;
+ u32 api_ver;
+
+ u32 max_probe_length = 200;
+ u32 standard_phy_calibration_size =
+ IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
+
+ memset(&pieces, 0, sizeof(pieces));
+
+ if (!ucode_raw) {
+ if (il->fw_idx <= il->cfg->ucode_api_max)
+ IL_ERR("request for firmware file '%s' failed.\n",
+ il->firmware_name);
+ goto try_again;
+ }
+
+ D_INFO("Loaded firmware file '%s' (%zd bytes).\n", il->firmware_name,
+ ucode_raw->size);
+
+ /* Make sure that we got at least the API version number */
+ if (ucode_raw->size < 4) {
+ IL_ERR("File size way too small!\n");
+ goto try_again;
+ }
+
+ /* Data from ucode file: header followed by uCode images */
+ ucode = (struct il_ucode_header *)ucode_raw->data;
+
+ err = il4965_load_firmware(il, ucode_raw, &pieces);
+
+ if (err)
+ goto try_again;
+
+ api_ver = IL_UCODE_API(il->ucode_ver);
+
+ /*
+ * api_ver should match the api version forming part of the
+ * firmware filename ... but we don't check for that and only rely
+ * on the API version read from firmware header from here on forward
+ */
+ if (api_ver < api_min || api_ver > api_max) {
+ IL_ERR("Driver unable to support your firmware API. "
+ "Driver supports v%u, firmware is v%u.\n", api_max,
+ api_ver);
+ goto try_again;
+ }
+
+ if (api_ver != api_max)
+ IL_ERR("Firmware has old API version. Expected v%u, "
+ "got v%u. New firmware can be obtained "
+ "from http://www.intellinuxwireless.org.\n", api_max,
+ api_ver);
+
+ IL_INFO("loaded firmware version %u.%u.%u.%u\n",
+ IL_UCODE_MAJOR(il->ucode_ver), IL_UCODE_MINOR(il->ucode_ver),
+ IL_UCODE_API(il->ucode_ver), IL_UCODE_SERIAL(il->ucode_ver));
+
+ snprintf(il->hw->wiphy->fw_version, sizeof(il->hw->wiphy->fw_version),
+ "%u.%u.%u.%u", IL_UCODE_MAJOR(il->ucode_ver),
+ IL_UCODE_MINOR(il->ucode_ver), IL_UCODE_API(il->ucode_ver),
+ IL_UCODE_SERIAL(il->ucode_ver));
+
+ /*
+ * For any of the failures below (before allocating pci memory)
+ * we will try to load a version with a smaller API -- maybe the
+ * user just got a corrupted version of the latest API.
+ */
+
+ D_INFO("f/w package hdr ucode version raw = 0x%x\n", il->ucode_ver);
+ D_INFO("f/w package hdr runtime inst size = %Zd\n", pieces.inst_size);
+ D_INFO("f/w package hdr runtime data size = %Zd\n", pieces.data_size);
+ D_INFO("f/w package hdr init inst size = %Zd\n", pieces.init_size);
+ D_INFO("f/w package hdr init data size = %Zd\n", pieces.init_data_size);
+ D_INFO("f/w package hdr boot inst size = %Zd\n", pieces.boot_size);
+
+ /* Verify that uCode images will fit in card's SRAM */
+ if (pieces.inst_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode instr len %Zd too large to fit in\n",
+ pieces.inst_size);
+ goto try_again;
+ }
+
+ if (pieces.data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode data len %Zd too large to fit in\n",
+ pieces.data_size);
+ goto try_again;
+ }
+
+ if (pieces.init_size > il->hw_params.max_inst_size) {
+ IL_ERR("uCode init instr len %Zd too large to fit in\n",
+ pieces.init_size);
+ goto try_again;
+ }
+
+ if (pieces.init_data_size > il->hw_params.max_data_size) {
+ IL_ERR("uCode init data len %Zd too large to fit in\n",
+ pieces.init_data_size);
+ goto try_again;
+ }
+
+ if (pieces.boot_size > il->hw_params.max_bsm_size) {
+ IL_ERR("uCode boot instr len %Zd too large to fit in\n",
+ pieces.boot_size);
+ goto try_again;
+ }
+
+ /* Allocate ucode buffers for card's bus-master loading ... */
+
+ /* Runtime instructions and 2 copies of data:
+ * 1) unmodified from disk
+ * 2) backup cache for save/restore during power-downs */
+ il->ucode_code.len = pieces.inst_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_code);
+
+ il->ucode_data.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data);
+
+ il->ucode_data_backup.len = pieces.data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_data_backup);
+
+ if (!il->ucode_code.v_addr || !il->ucode_data.v_addr ||
+ !il->ucode_data_backup.v_addr)
+ goto err_pci_alloc;
+
+ /* Initialization instructions and data */
+ if (pieces.init_size && pieces.init_data_size) {
+ il->ucode_init.len = pieces.init_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init);
+
+ il->ucode_init_data.len = pieces.init_data_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_init_data);
+
+ if (!il->ucode_init.v_addr || !il->ucode_init_data.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Bootstrap (instructions only, no data) */
+ if (pieces.boot_size) {
+ il->ucode_boot.len = pieces.boot_size;
+ il_alloc_fw_desc(il->pci_dev, &il->ucode_boot);
+
+ if (!il->ucode_boot.v_addr)
+ goto err_pci_alloc;
+ }
+
+ /* Now that we can no longer fail, copy information */
+
+ il->sta_key_max_num = STA_KEY_MAX_NUM;
+
+ /* Copy images into buffers for card's bus-master reads ... */
+
+ /* Runtime instructions (first block of data in file) */
+ D_INFO("Copying (but not loading) uCode instr len %Zd\n",
+ pieces.inst_size);
+ memcpy(il->ucode_code.v_addr, pieces.inst, pieces.inst_size);
+
+ D_INFO("uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
+ il->ucode_code.v_addr, (u32) il->ucode_code.p_addr);
+
+ /*
+ * Runtime data
+ * NOTE: Copy into backup buffer will be done in il_up()
+ */
+ D_INFO("Copying (but not loading) uCode data len %Zd\n",
+ pieces.data_size);
+ memcpy(il->ucode_data.v_addr, pieces.data, pieces.data_size);
+ memcpy(il->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
+
+ /* Initialization instructions */
+ if (pieces.init_size) {
+ D_INFO("Copying (but not loading) init instr len %Zd\n",
+ pieces.init_size);
+ memcpy(il->ucode_init.v_addr, pieces.init, pieces.init_size);
+ }
+
+ /* Initialization data */
+ if (pieces.init_data_size) {
+ D_INFO("Copying (but not loading) init data len %Zd\n",
+ pieces.init_data_size);
+ memcpy(il->ucode_init_data.v_addr, pieces.init_data,
+ pieces.init_data_size);
+ }
+
+ /* Bootstrap instructions */
+ D_INFO("Copying (but not loading) boot instr len %Zd\n",
+ pieces.boot_size);
+ memcpy(il->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
+
+ /*
+ * figure out the offset of chain noise reset and gain commands
+ * base on the size of standard phy calibration commands table size
+ */
+ il->_4965.phy_calib_chain_noise_reset_cmd =
+ standard_phy_calibration_size;
+ il->_4965.phy_calib_chain_noise_gain_cmd =
+ standard_phy_calibration_size + 1;
+
+ /**************************************************
+ * This is still part of probe() in a sense...
+ *
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = il4965_mac_setup_register(il, max_probe_length);
+ if (err)
+ goto out_unbind;
+
+ err = il_dbgfs_register(il, DRV_NAME);
+ if (err)
+ IL_ERR("failed to create debugfs files. Ignoring error: %d\n",
+ err);
+
+ err = sysfs_create_group(&il->pci_dev->dev.kobj, &il_attribute_group);
+ if (err) {
+ IL_ERR("failed to create sysfs device attributes\n");
+ goto out_unbind;
+ }
+
+ /* We have our copies now, allow OS release its copies */
+ release_firmware(ucode_raw);
+ complete(&il->_4965.firmware_loading_complete);
+ return;
+
+try_again:
+ /* try next, if any */
+ if (il4965_request_firmware(il, false))
+ goto out_unbind;
+ release_firmware(ucode_raw);
+ return;
+
+err_pci_alloc:
+ IL_ERR("failed to allocate pci memory\n");
+ il4965_dealloc_ucode_pci(il);
+out_unbind:
+ complete(&il->_4965.firmware_loading_complete);
+ device_release_driver(&il->pci_dev->dev);
+ release_firmware(ucode_raw);
+}
+
+static const char *const desc_lookup_text[] = {
+ "OK",
+ "FAIL",
+ "BAD_PARAM",
+ "BAD_CHECKSUM",
+ "NMI_INTERRUPT_WDG",
+ "SYSASSERT",
+ "FATAL_ERROR",
+ "BAD_COMMAND",
+ "HW_ERROR_TUNE_LOCK",
+ "HW_ERROR_TEMPERATURE",
+ "ILLEGAL_CHAN_FREQ",
+ "VCC_NOT_STBL",
+ "FH49_ERROR",
+ "NMI_INTERRUPT_HOST",
+ "NMI_INTERRUPT_ACTION_PT",
+ "NMI_INTERRUPT_UNKNOWN",
+ "UCODE_VERSION_MISMATCH",
+ "HW_ERROR_ABS_LOCK",
+ "HW_ERROR_CAL_LOCK_FAIL",
+ "NMI_INTERRUPT_INST_ACTION_PT",
+ "NMI_INTERRUPT_DATA_ACTION_PT",
+ "NMI_TRM_HW_ER",
+ "NMI_INTERRUPT_TRM",
+ "NMI_INTERRUPT_BREAK_POINT",
+ "DEBUG_0",
+ "DEBUG_1",
+ "DEBUG_2",
+ "DEBUG_3",
+};
+
+static struct {
+ char *name;
+ u8 num;
+} advanced_lookup[] = {
+ {
+ "NMI_INTERRUPT_WDG", 0x34}, {
+ "SYSASSERT", 0x35}, {
+ "UCODE_VERSION_MISMATCH", 0x37}, {
+ "BAD_COMMAND", 0x38}, {
+ "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C}, {
+ "FATAL_ERROR", 0x3D}, {
+ "NMI_TRM_HW_ERR", 0x46}, {
+ "NMI_INTERRUPT_TRM", 0x4C}, {
+ "NMI_INTERRUPT_BREAK_POINT", 0x54}, {
+ "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C}, {
+ "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64}, {
+ "NMI_INTERRUPT_HOST", 0x66}, {
+ "NMI_INTERRUPT_ACTION_PT", 0x7C}, {
+ "NMI_INTERRUPT_UNKNOWN", 0x84}, {
+ "NMI_INTERRUPT_INST_ACTION_PT", 0x86}, {
+"ADVANCED_SYSASSERT", 0},};
+
+static const char *
+il4965_desc_lookup(u32 num)
+{
+ int i;
+ int max = ARRAY_SIZE(desc_lookup_text);
+
+ if (num < max)
+ return desc_lookup_text[num];
+
+ max = ARRAY_SIZE(advanced_lookup) - 1;
+ for (i = 0; i < max; i++) {
+ if (advanced_lookup[i].num == num)
+ break;
+ }
+ return advanced_lookup[i].name;
+}
+
+#define ERROR_START_OFFSET (1 * sizeof(u32))
+#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+
+void
+il4965_dump_nic_error_log(struct il_priv *il)
+{
+ u32 data2, line;
+ u32 desc, time, count, base, data1;
+ u32 blink1, blink2, ilink1, ilink2;
+ u32 pc, hcmd;
+
+ if (il->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(il->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(il->card_alive.error_event_table_ptr);
+
+ if (!il->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ IL_ERR("Not valid error log pointer 0x%08X for %s uCode\n",
+ base, (il->ucode_type == UCODE_INIT) ? "Init" : "RT");
+ return;
+ }
+
+ count = il_read_targ_mem(il, base);
+
+ if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
+ IL_ERR("Start IWL Error Log Dump:\n");
+ IL_ERR("Status: 0x%08lX, count: %d\n", il->status, count);
+ }
+
+ desc = il_read_targ_mem(il, base + 1 * sizeof(u32));
+ il->isr_stats.err_code = desc;
+ pc = il_read_targ_mem(il, base + 2 * sizeof(u32));
+ blink1 = il_read_targ_mem(il, base + 3 * sizeof(u32));
+ blink2 = il_read_targ_mem(il, base + 4 * sizeof(u32));
+ ilink1 = il_read_targ_mem(il, base + 5 * sizeof(u32));
+ ilink2 = il_read_targ_mem(il, base + 6 * sizeof(u32));
+ data1 = il_read_targ_mem(il, base + 7 * sizeof(u32));
+ data2 = il_read_targ_mem(il, base + 8 * sizeof(u32));
+ line = il_read_targ_mem(il, base + 9 * sizeof(u32));
+ time = il_read_targ_mem(il, base + 11 * sizeof(u32));
+ hcmd = il_read_targ_mem(il, base + 22 * sizeof(u32));
+
+ IL_ERR("Desc Time "
+ "data1 data2 line\n");
+ IL_ERR("%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
+ il4965_desc_lookup(desc), desc, time, data1, data2, line);
+ IL_ERR("pc blink1 blink2 ilink1 ilink2 hcmd\n");
+ IL_ERR("0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n", pc, blink1,
+ blink2, ilink1, ilink2, hcmd);
+}
+
+static void
+il4965_rf_kill_ct_config(struct il_priv *il)
+{
+ struct il_ct_kill_config cmd;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&il->lock, flags);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ cmd.critical_temperature_R =
+ cpu_to_le32(il->hw_params.ct_kill_threshold);
+
+ ret = il_send_cmd_pdu(il, C_CT_KILL_CONFIG, sizeof(cmd), &cmd);
+ if (ret)
+ IL_ERR("C_CT_KILL_CONFIG failed\n");
+ else
+ D_INFO("C_CT_KILL_CONFIG " "succeeded, "
+ "critical temperature is %d\n",
+ il->hw_params.ct_kill_threshold);
+}
+
+static const s8 default_queue_to_tx_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+ IL49_CMD_FIFO_NUM,
+ IL_TX_FIFO_UNUSED,
+ IL_TX_FIFO_UNUSED,
+};
+
+#define IL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
+
+static int
+il4965_alive_notify(struct il_priv *il)
+{
+ u32 a;
+ unsigned long flags;
+ int i, chan;
+ u32 reg_val;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Clear 4965's internal Tx Scheduler data base */
+ il->scd_base_addr = il_rd_prph(il, IL49_SCD_SRAM_BASE_ADDR);
+ a = il->scd_base_addr + IL49_SCD_CONTEXT_DATA_OFFSET;
+ for (; a < il->scd_base_addr + IL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (; a < il->scd_base_addr + IL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
+ il_write_targ_mem(il, a, 0);
+ for (;
+ a <
+ il->scd_base_addr +
+ IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(il->hw_params.max_txq_num);
+ a += 4)
+ il_write_targ_mem(il, a, 0);
+
+ /* Tel 4965 where to find Tx byte count tables */
+ il_wr_prph(il, IL49_SCD_DRAM_BASE_ADDR, il->scd_bc_tbls.dma >> 10);
+
+ /* Enable DMA channel */
+ for (chan = 0; chan < FH49_TCSR_CHNL_NUM; chan++)
+ il_wr(il, FH49_TCSR_CHNL_TX_CONFIG_REG(chan),
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
+
+ /* Update FH chicken bits */
+ reg_val = il_rd(il, FH49_TX_CHICKEN_BITS_REG);
+ il_wr(il, FH49_TX_CHICKEN_BITS_REG,
+ reg_val | FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
+
+ /* Disable chain mode for all queues */
+ il_wr_prph(il, IL49_SCD_QUEUECHAIN_SEL, 0);
+
+ /* Initialize each Tx queue (including the command queue) */
+ for (i = 0; i < il->hw_params.max_txq_num; i++) {
+
+ /* TFD circular buffer read/write idxes */
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(i), 0);
+ il_wr(il, HBUS_TARG_WRPTR, 0 | (i << 8));
+
+ /* Max Tx Window size for Scheduler-ACK mode */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i),
+ (SCD_WIN_SIZE <<
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
+ IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
+
+ /* Frame limit */
+ il_write_targ_mem(il,
+ il->scd_base_addr +
+ IL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
+ sizeof(u32),
+ (SCD_FRAME_LIMIT <<
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
+
+ }
+ il_wr_prph(il, IL49_SCD_INTERRUPT_MASK,
+ (1 << il->hw_params.max_txq_num) - 1);
+
+ /* Activate all Tx DMA/FIFO channels */
+ il4965_txq_set_sched(il, IL_MASK(0, 6));
+
+ il4965_set_wr_ptrs(il, IL_DEFAULT_CMD_QUEUE_NUM, 0);
+
+ /* make sure all queue are not stopped */
+ memset(&il->queue_stopped[0], 0, sizeof(il->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&il->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ il->txq_ctx_active_msk = 0;
+ /* Map each Tx/cmd queue to its corresponding fifo */
+ BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
+
+ for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
+ int ac = default_queue_to_tx_fifo[i];
+
+ il_txq_ctx_activate(il, i);
+
+ if (ac == IL_TX_FIFO_UNUSED)
+ continue;
+
+ il4965_tx_queue_set_status(il, &il->txq[i], ac, 0);
+ }
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ return 0;
+}
+
+/**
+ * il4965_alive_start - called after N_ALIVE notification received
+ * from protocol/runtime uCode (initialization uCode's
+ * Alive gets handled by il_init_alive_start()).
+ */
+static void
+il4965_alive_start(struct il_priv *il)
+{
+ int ret = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ D_INFO("Runtime Alive received.\n");
+
+ if (il->card_alive.is_valid != UCODE_VALID_OK) {
+ /* We had an error bringing up the hardware, so take it
+ * all the way back down so we can try again */
+ D_INFO("Alive failed.\n");
+ goto restart;
+ }
+
+ /* Initialize uCode has loaded Runtime uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "runtime" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad runtime uCode load.\n");
+ goto restart;
+ }
+
+ ret = il4965_alive_notify(il);
+ if (ret) {
+ IL_WARN("Could not complete ALIVE transition [ntf]: %d\n", ret);
+ goto restart;
+ }
+
+ /* After the ALIVE response, we can send host commands to the uCode */
+ set_bit(S_ALIVE, &il->status);
+
+ /* Enable watchdog to monitor the driver tx queues */
+ il_setup_watchdog(il);
+
+ if (il_is_rfkill(il))
+ return;
+
+ ieee80211_wake_queues(il->hw);
+
+ il->active_rate = RATES_MASK;
+
+ if (il_is_associated_ctx(ctx)) {
+ struct il_rxon_cmd *active_rxon =
+ (struct il_rxon_cmd *)&ctx->active;
+ /* apply any changes in staging */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ } else {
+ /* Initialize our rx_config data */
+ il_connection_init_rx_config(il, &il->ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ /* Configure bluetooth coexistence if enabled */
+ il_send_bt_config(il);
+
+ il4965_reset_run_time_calib(il);
+
+ set_bit(S_READY, &il->status);
+
+ /* Configure the adapter for unassociated operation */
+ il_commit_rxon(il, ctx);
+
+ /* At this point, the NIC is initialized and operational */
+ il4965_rf_kill_ct_config(il);
+
+ D_INFO("ALIVE processing complete.\n");
+ wake_up(&il->wait_command_queue);
+
+ il_power_update_mode(il, true);
+ D_INFO("Updated power mode\n");
+
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static void il4965_cancel_deferred_work(struct il_priv *il);
+
+static void
+__il4965_down(struct il_priv *il)
+{
+ unsigned long flags;
+ int exit_pending;
+
+ D_INFO(DRV_NAME " is going down\n");
+
+ il_scan_cancel_timeout(il, 200);
+
+ exit_pending = test_and_set_bit(S_EXIT_PENDING, &il->status);
+
+ /* Stop TX queues watchdog. We need to have S_EXIT_PENDING bit set
+ * to prevent rearm timer */
+ del_timer_sync(&il->watchdog);
+
+ il_clear_ucode_stations(il, NULL);
+ il_dealloc_bcast_stations(il);
+ il_clear_driver_stations(il);
+
+ /* Unblock any waiting calls */
+ wake_up_all(&il->wait_command_queue);
+
+ /* Wipe out the EXIT_PENDING status bit if we are not actually
+ * exiting the module */
+ if (!exit_pending)
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* stop and reset the on-board processor */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ /* tell the device to stop sending interrupts */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ il4965_synchronize_irq(il);
+
+ if (il->mac80211_registered)
+ ieee80211_stop_queues(il->hw);
+
+ /* If we have not previously called il_init() then
+ * clear all bits but the RF Kill bit and return */
+ if (!il_is_init(il)) {
+ il->status =
+ test_bit(S_RF_KILL_HW,
+ &il->
+ status) << S_RF_KILL_HW |
+ test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) << S_GEO_CONFIGURED |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+ goto exit;
+ }
+
+ /* ...otherwise clear out all the status bits but the RF Kill
+ * bit and continue taking the NIC down. */
+ il->status &=
+ test_bit(S_RF_KILL_HW,
+ &il->status) << S_RF_KILL_HW | test_bit(S_GEO_CONFIGURED,
+ &il->
+ status) <<
+ S_GEO_CONFIGURED | test_bit(S_FW_ERROR,
+ &il->
+ status) << S_FW_ERROR |
+ test_bit(S_EXIT_PENDING, &il->status) << S_EXIT_PENDING;
+
+ il4965_txq_ctx_stop(il);
+ il4965_rxq_stop(il);
+
+ /* Power-down device's busmaster DMA clocks */
+ il_wr_prph(il, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(5);
+
+ /* Make sure (redundant) we've released our request to stay awake */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /* Stop the device, and put it in low power state */
+ il_apm_stop(il);
+
+exit:
+ memset(&il->card_alive, 0, sizeof(struct il_alive_resp));
+
+ dev_kfree_skb(il->beacon_skb);
+ il->beacon_skb = NULL;
+
+ /* clear out any free frames */
+ il4965_clear_free_frames(il);
+}
+
+static void
+il4965_down(struct il_priv *il)
+{
+ mutex_lock(&il->mutex);
+ __il4965_down(il);
+ mutex_unlock(&il->mutex);
+
+ il4965_cancel_deferred_work(il);
+}
+
+#define HW_READY_TIMEOUT (50)
+
+static int
+il4965_set_hw_ready(struct il_priv *il)
+{
+ int ret = 0;
+
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_READY, HW_READY_TIMEOUT);
+ if (ret != -ETIMEDOUT)
+ il->hw_ready = true;
+ else
+ il->hw_ready = false;
+
+ D_INFO("hardware %s\n", (il->hw_ready == 1) ? "ready" : "not ready");
+ return ret;
+}
+
+static int
+il4965_prepare_card_hw(struct il_priv *il)
+{
+ int ret = 0;
+
+ D_INFO("il4965_prepare_card_hw enter\n");
+
+ ret = il4965_set_hw_ready(il);
+ if (il->hw_ready)
+ return ret;
+
+ /* If HW is not ready, prepare the conditions to check again */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG, CSR_HW_IF_CONFIG_REG_PREPARE);
+
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
+ CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
+
+ /* HW should be ready by now, check again. */
+ if (ret != -ETIMEDOUT)
+ il4965_set_hw_ready(il);
+
+ return ret;
+}
+
+#define MAX_HW_RESTARTS 5
+
+static int
+__il4965_up(struct il_priv *il)
+{
+ int i;
+ int ret;
+
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_WARN("Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ if (!il->ucode_data_backup.v_addr || !il->ucode_data.v_addr) {
+ IL_ERR("ucode not available for device bringup\n");
+ return -EIO;
+ }
+
+ ret = il4965_alloc_bcast_station(il, &il->ctx);
+ if (ret) {
+ il_dealloc_bcast_stations(il);
+ return ret;
+ }
+
+ il4965_prepare_card_hw(il);
+
+ if (!il->hw_ready) {
+ IL_WARN("Exit HW not ready\n");
+ return -EIO;
+ }
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ if (il_is_rfkill(il)) {
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, true);
+
+ il_enable_interrupts(il);
+ IL_WARN("Radio disabled by HW RF Kill switch\n");
+ return 0;
+ }
+
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+
+ /* must be initialised before il_hw_nic_init */
+ il->cmd_queue = IL_DEFAULT_CMD_QUEUE_NUM;
+
+ ret = il4965_hw_nic_init(il);
+ if (ret) {
+ IL_ERR("Unable to init nic\n");
+ return ret;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_interrupts(il);
+
+ /* really make sure rfkill handshake bits are cleared */
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ _il_wr(il, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Copy original ucode data image from disk into backup cache.
+ * This will be used to initialize the on-board processor's
+ * data SRAM for a clean start when the runtime program first loads. */
+ memcpy(il->ucode_data_backup.v_addr, il->ucode_data.v_addr,
+ il->ucode_data.len);
+
+ for (i = 0; i < MAX_HW_RESTARTS; i++) {
+
+ /* load bootstrap state machine,
+ * load bootstrap program into processor's memory,
+ * prepare to load the "initialize" uCode */
+ ret = il->cfg->ops->lib->load_ucode(il);
+
+ if (ret) {
+ IL_ERR("Unable to set up bootstrap uCode: %d\n", ret);
+ continue;
+ }
+
+ /* start card; "initialize" will load runtime ucode */
+ il4965_nic_start(il);
+
+ D_INFO(DRV_NAME " is coming up\n");
+
+ return 0;
+ }
+
+ set_bit(S_EXIT_PENDING, &il->status);
+ __il4965_down(il);
+ clear_bit(S_EXIT_PENDING, &il->status);
+
+ /* tried to restart and config the device for as long as our
+ * patience could withstand */
+ IL_ERR("Unable to initialize device after %d attempts.\n", i);
+ return -EIO;
+}
+
+/*****************************************************************************
+ *
+ * Workqueue callbacks
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_init_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, init_alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il->cfg->ops->lib->init_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_alive_start(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, alive_start.work);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ goto out;
+
+ il4965_alive_start(il);
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_run_time_calib_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ run_time_calib_work);
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (il->start_calib) {
+ il4965_chain_noise_calibration(il, (void *)&il->_4965.stats);
+ il4965_sensitivity_calibration(il, (void *)&il->_4965.stats);
+ }
+
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_bg_restart(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, restart);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_FW_ERROR, &il->status)) {
+ mutex_lock(&il->mutex);
+ il->ctx.vif = NULL;
+ il->is_open = 0;
+
+ __il4965_down(il);
+
+ mutex_unlock(&il->mutex);
+ il4965_cancel_deferred_work(il);
+ ieee80211_restart_hw(il->hw);
+ } else {
+ il4965_down(il);
+
+ mutex_lock(&il->mutex);
+ if (test_bit(S_EXIT_PENDING, &il->status)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ __il4965_up(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+
+static void
+il4965_bg_rx_replenish(struct work_struct *data)
+{
+ struct il_priv *il = container_of(data, struct il_priv, rx_replenish);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ mutex_lock(&il->mutex);
+ il4965_rx_replenish(il);
+ mutex_unlock(&il->mutex);
+}
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+#define UCODE_READY_TIMEOUT (4 * HZ)
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+static int
+il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
+{
+ int ret;
+ struct ieee80211_hw *hw = il->hw;
+
+ hw->rate_control_algorithm = "iwl-4965-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags =
+ IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD | IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (il->cfg->sku & IL_SKU_N)
+ hw->flags |=
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ hw->sta_data_size = sizeof(struct il_station_priv);
+ hw->vif_data_size = sizeof(struct il_vif_priv);
+
+ hw->wiphy->interface_modes |= il->ctx.interface_modes;
+ hw->wiphy->interface_modes |= il->ctx.exclusive_interface_modes;
+
+ hw->wiphy->flags |=
+ WIPHY_FLAG_CUSTOM_REGULATORY | WIPHY_FLAG_DISABLE_BEACON_HINTS;
+
+ /*
+ * For now, disable PS by default because it affects
+ * RX performance significantly.
+ */
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &il->bands[IEEE80211_BAND_2GHZ];
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels)
+ il->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &il->bands[IEEE80211_BAND_5GHZ];
+
+ il_leds_init(il);
+
+ ret = ieee80211_register_hw(il->hw);
+ if (ret) {
+ IL_ERR("Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ il->mac80211_registered = 1;
+
+ return 0;
+}
+
+int
+il4965_mac_start(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&il->mutex);
+ ret = __il4965_up(il);
+ mutex_unlock(&il->mutex);
+
+ if (ret)
+ return ret;
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ D_INFO("Start UP work done.\n");
+
+ /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
+ * mac80211 will not be run successfully. */
+ ret = wait_event_timeout(il->wait_command_queue,
+ test_bit(S_READY, &il->status),
+ UCODE_READY_TIMEOUT);
+ if (!ret) {
+ if (!test_bit(S_READY, &il->status)) {
+ IL_ERR("START_ALIVE timeout after %dms.\n",
+ jiffies_to_msecs(UCODE_READY_TIMEOUT));
+ return -ETIMEDOUT;
+ }
+ }
+
+ il4965_led_enable(il);
+
+out:
+ il->is_open = 1;
+ D_MAC80211("leave\n");
+ return 0;
+}
+
+void
+il4965_mac_stop(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MAC80211("enter\n");
+
+ if (!il->is_open)
+ return;
+
+ il->is_open = 0;
+
+ il4965_down(il);
+
+ flush_workqueue(il->workqueue);
+
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
+ _il_wr(il, CSR_INT, 0xFFFFFFFF);
+ il_enable_rfkill_int(il);
+
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct il_priv *il = hw->priv;
+
+ D_MACDUMP("enter\n");
+
+ D_TX("dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (il4965_tx_skb(il, skb))
+ dev_kfree_skb_any(skb);
+
+ D_MACDUMP("leave\n");
+}
+
+void
+il4965_mac_update_tkip_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32, u16 * phase1key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ D_MAC80211("enter\n");
+
+ il4965_update_tkip_key(il, vif_priv->ctx, keyconf, sta, iv32,
+ phase1key);
+
+ D_MAC80211("leave\n");
+}
+
+int
+il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct il_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+ bool is_default_wep_key = false;
+
+ D_MAC80211("enter\n");
+
+ if (il->cfg->mod_params->sw_crypto) {
+ D_MAC80211("leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ sta_id = il_sta_id_or_broadcast(il, vif_priv->ctx, sta);
+ if (sta_id == IL_INVALID_STATION)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 100);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ (key->hw_key_idx == HW_KEY_DEFAULT);
+ }
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key)
+ ret =
+ il4965_set_default_wep_key(il, vif_priv->ctx, key);
+ else
+ ret =
+ il4965_set_dynamic_key(il, vif_priv->ctx, key,
+ sta_id);
+
+ D_MAC80211("enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = il4965_remove_default_wep_key(il, ctx, key);
+ else
+ ret = il4965_remove_dynamic_key(il, ctx, key, sta_id);
+
+ D_MAC80211("disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+
+ return ret;
+}
+
+int
+il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size)
+{
+ struct il_priv *il = hw->priv;
+ int ret = -EINVAL;
+
+ D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
+
+ if (!(il->cfg->sku & IL_SKU_N))
+ return -EACCES;
+
+ mutex_lock(&il->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ D_HT("start Rx\n");
+ ret = il4965_sta_rx_agg_start(il, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ D_HT("stop Rx\n");
+ ret = il4965_sta_rx_agg_stop(il, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ D_HT("start Tx\n");
+ ret = il4965_tx_agg_start(il, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ D_HT("stop Tx\n");
+ ret = il4965_tx_agg_stop(il, vif, sta, tid);
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = 0;
+ break;
+ }
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+
+int
+il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret;
+ u8 sta_id;
+
+ D_INFO("received request to add station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to add station %pM\n", sta->addr);
+ sta_priv->common.sta_id = IL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+
+ ret =
+ il_add_station_common(il, vif_priv->ctx, sta->addr, is_ap, sta,
+ &sta_id);
+ if (ret) {
+ IL_ERR("Unable to add station %pM (%d)\n", sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ mutex_unlock(&il->mutex);
+ return ret;
+ }
+
+ sta_priv->common.sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ D_INFO("Initializing rate scaling for station %pM\n", sta->addr);
+ il4965_rs_rate_init(il, sta, sta_id);
+ mutex_unlock(&il->mutex);
+
+ return 0;
+}
+
+void
+il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 ch;
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ if (il_is_rfkill(il))
+ goto out;
+
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status) ||
+ test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ goto out;
+
+ if (!il_is_associated_ctx(ctx))
+ goto out;
+
+ if (!il->cfg->ops->lib->set_channel_switch)
+ goto out;
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&il->lock);
+
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&il->lock);
+
+ il_set_rate(il);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = cpu_to_le16(ch);
+ if (il->cfg->ops->lib->set_channel_switch(il, ch_switch)) {
+ clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status);
+ il->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+ D_MAC80211("leave\n");
+}
+
+void
+il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast)
+{
+ struct il_priv *il = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ D_MAC80211("Enter: changed: 0x%x, total: 0x%x\n", changed_flags,
+ *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&il->mutex);
+
+ il->ctx.staging.filter_flags &= ~filter_nand;
+ il->ctx.staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+
+ mutex_unlock(&il->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in il_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &=
+ FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+/*****************************************************************************
+ *
+ * driver setup and teardown
+ *
+ *****************************************************************************/
+
+static void
+il4965_bg_txpower_work(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv,
+ txpower_work);
+
+ mutex_lock(&il->mutex);
+
+ /* If a scan happened to start before we got here
+ * then just return; the stats notification will
+ * kick off another scheduled work to compensate for
+ * any temperature delta we missed here. */
+ if (test_bit(S_EXIT_PENDING, &il->status) ||
+ test_bit(S_SCANNING, &il->status))
+ goto out;
+
+ /* Regardless of if we are associated, we must reconfigure the
+ * TX power since frames can be sent on non-radar channels while
+ * not associated */
+ il->cfg->ops->lib->send_tx_power(il);
+
+ /* Update last_temperature to keep is_calib_needed from running
+ * when it isn't needed... */
+ il->last_temperature = il->temperature;
+out:
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il4965_setup_deferred_work(struct il_priv *il)
+{
+ il->workqueue = create_singlethread_workqueue(DRV_NAME);
+
+ init_waitqueue_head(&il->wait_command_queue);
+
+ INIT_WORK(&il->restart, il4965_bg_restart);
+ INIT_WORK(&il->rx_replenish, il4965_bg_rx_replenish);
+ INIT_WORK(&il->run_time_calib_work, il4965_bg_run_time_calib_work);
+ INIT_DELAYED_WORK(&il->init_alive_start, il4965_bg_init_alive_start);
+ INIT_DELAYED_WORK(&il->alive_start, il4965_bg_alive_start);
+
+ il_setup_scan_deferred_work(il);
+
+ INIT_WORK(&il->txpower_work, il4965_bg_txpower_work);
+
+ init_timer(&il->stats_periodic);
+ il->stats_periodic.data = (unsigned long)il;
+ il->stats_periodic.function = il4965_bg_stats_periodic;
+
+ init_timer(&il->watchdog);
+ il->watchdog.data = (unsigned long)il;
+ il->watchdog.function = il_bg_watchdog;
+
+ tasklet_init(&il->irq_tasklet,
+ (void (*)(unsigned long))il4965_irq_tasklet,
+ (unsigned long)il);
+}
+
+static void
+il4965_cancel_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->txpower_work);
+ cancel_delayed_work_sync(&il->init_alive_start);
+ cancel_delayed_work(&il->alive_start);
+ cancel_work_sync(&il->run_time_calib_work);
+
+ il_cancel_scan_deferred_work(il);
+
+ del_timer_sync(&il->stats_periodic);
+}
+
+static void
+il4965_init_hw_rates(struct il_priv *il, struct ieee80211_rate *rates)
+{
+ int i;
+
+ for (i = 0; i < RATE_COUNT_LEGACY; i++) {
+ rates[i].bitrate = il_rates[i].ieee * 5;
+ rates[i].hw_value = i; /* Rate scaling will work on idxes */
+ rates[i].hw_value_short = i;
+ rates[i].flags = 0;
+ if ((i >= IL_FIRST_CCK_RATE) && (i <= IL_LAST_CCK_RATE)) {
+ /*
+ * If CCK != 1M then set short preamble rate flag.
+ */
+ rates[i].flags |=
+ (il_rates[i].plcp ==
+ RATE_1M_PLCP) ? 0 : IEEE80211_RATE_SHORT_PREAMBLE;
+ }
+ }
+}
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void
+il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx)
+{
+ il_wr(il, HBUS_TARG_WRPTR, (idx & 0xff) | (txq_id << 8));
+ il_wr_prph(il, IL49_SCD_QUEUE_RDPTR(txq_id), idx);
+}
+
+void
+il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry)
+{
+ int txq_id = txq->q.id;
+
+ /* Find out whether to activate Tx queue */
+ int active = test_bit(txq_id, &il->txq_ctx_active_msk) ? 1 : 0;
+
+ /* Set up and activate */
+ il_wr_prph(il, IL49_SCD_QUEUE_STATUS_BITS(txq_id),
+ (active << IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (tx_fifo_id << IL49_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_WSL) |
+ (scd_retry << IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
+ IL49_SCD_QUEUE_STTS_REG_MSK);
+
+ txq->sched_retry = scd_retry;
+
+ D_INFO("%s %s Queue %d on AC %d\n", active ? "Activate" : "Deactivate",
+ scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
+}
+
+static int
+il4965_init_drv(struct il_priv *il)
+{
+ int ret;
+
+ spin_lock_init(&il->sta_lock);
+ spin_lock_init(&il->hcmd_lock);
+
+ INIT_LIST_HEAD(&il->free_frames);
+
+ mutex_init(&il->mutex);
+
+ il->ieee_channels = NULL;
+ il->ieee_rates = NULL;
+ il->band = IEEE80211_BAND_2GHZ;
+
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ il->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ il->force_reset.reset_duration = IL_DELAY_NEXT_FORCE_FW_RELOAD;
+
+ /* Choose which receivers/antennas to use */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+
+ il_init_scan_params(il);
+
+ ret = il_init_channel_map(il);
+ if (ret) {
+ IL_ERR("initializing regulatory failed: %d\n", ret);
+ goto err;
+ }
+
+ ret = il_init_geos(il);
+ if (ret) {
+ IL_ERR("initializing geos failed: %d\n", ret);
+ goto err_free_channel_map;
+ }
+ il4965_init_hw_rates(il, il->ieee_rates);
+
+ return 0;
+
+err_free_channel_map:
+ il_free_channel_map(il);
+err:
+ return ret;
+}
+
+static void
+il4965_uninit_drv(struct il_priv *il)
+{
+ il4965_calib_free_results(il);
+ il_free_geos(il);
+ il_free_channel_map(il);
+ kfree(il->scan_cmd);
+}
+
+static void
+il4965_hw_detect(struct il_priv *il)
+{
+ il->hw_rev = _il_rd(il, CSR_HW_REV);
+ il->hw_wa_rev = _il_rd(il, CSR_HW_REV_WA_REG);
+ il->rev_id = il->pci_dev->revision;
+ D_INFO("HW Revision ID = 0x%X\n", il->rev_id);
+}
+
+static int
+il4965_set_hw_params(struct il_priv *il)
+{
+ il->hw_params.max_rxq_size = RX_QUEUE_SIZE;
+ il->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
+ if (il->cfg->mod_params->amsdu_size_8K)
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_8K);
+ else
+ il->hw_params.rx_page_order = get_order(IL_RX_BUF_SIZE_4K);
+
+ il->hw_params.max_beacon_itrvl = IL_MAX_UCODE_BEACON_INTERVAL;
+
+ if (il->cfg->mod_params->disable_11n)
+ il->cfg->sku &= ~IL_SKU_N;
+
+ /* Device-specific setup */
+ return il->cfg->ops->lib->set_hw_params(il);
+}
+
+static const u8 il4965_bss_ac_to_fifo[] = {
+ IL_TX_FIFO_VO,
+ IL_TX_FIFO_VI,
+ IL_TX_FIFO_BE,
+ IL_TX_FIFO_BK,
+};
+
+static const u8 il4965_bss_ac_to_queue[] = {
+ 0, 1, 2, 3,
+};
+
+static int
+il4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err = 0;
+ struct il_priv *il;
+ struct ieee80211_hw *hw;
+ struct il_cfg *cfg = (struct il_cfg *)(ent->driver_data);
+ unsigned long flags;
+ u16 pci_cmd;
+
+ /************************
+ * 1. Allocating HW data
+ ************************/
+
+ hw = il_alloc_all(cfg);
+ if (!hw) {
+ err = -ENOMEM;
+ goto out;
+ }
+ il = hw->priv;
+ /* At this point both hw and il are allocated. */
+
+ il->ctx.ctxid = 0;
+
+ il->ctx.always_active = true;
+ il->ctx.is_active = true;
+ il->ctx.rxon_cmd = C_RXON;
+ il->ctx.rxon_timing_cmd = C_RXON_TIMING;
+ il->ctx.rxon_assoc_cmd = C_RXON_ASSOC;
+ il->ctx.qos_cmd = C_QOS_PARAM;
+ il->ctx.ap_sta_id = IL_AP_ID;
+ il->ctx.wep_key_cmd = C_WEPKEY;
+ il->ctx.ac_to_fifo = il4965_bss_ac_to_fifo;
+ il->ctx.ac_to_queue = il4965_bss_ac_to_queue;
+ il->ctx.exclusive_interface_modes = BIT(NL80211_IFTYPE_ADHOC);
+ il->ctx.interface_modes = BIT(NL80211_IFTYPE_STATION);
+ il->ctx.ap_devtype = RXON_DEV_TYPE_AP;
+ il->ctx.ibss_devtype = RXON_DEV_TYPE_IBSS;
+ il->ctx.station_devtype = RXON_DEV_TYPE_ESS;
+ il->ctx.unused_devtype = RXON_DEV_TYPE_ESS;
+
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+
+ D_INFO("*** LOAD DRIVER ***\n");
+ il->cfg = cfg;
+ il->pci_dev = pdev;
+ il->inta_mask = CSR_INI_SET_MASK;
+
+ if (il_alloc_traffic_mem(il))
+ IL_ERR("Not enough memory to generate traffic log\n");
+
+ /**************************
+ * 2. Initializing PCI bus
+ **************************/
+ pci_disable_link_state(pdev,
+ PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM);
+
+ if (pci_enable_device(pdev)) {
+ err = -ENODEV;
+ goto out_ieee80211_free_hw;
+ }
+
+ pci_set_master(pdev);
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
+ if (err) {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err =
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ /* both attempts failed: */
+ if (err) {
+ IL_WARN("No suitable DMA available.\n");
+ goto out_pci_disable_device;
+ }
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err)
+ goto out_pci_disable_device;
+
+ pci_set_drvdata(pdev, il);
+
+ /***********************
+ * 3. Read REV register
+ ***********************/
+ il->hw_base = pci_iomap(pdev, 0, 0);
+ if (!il->hw_base) {
+ err = -ENODEV;
+ goto out_pci_release_regions;
+ }
+
+ D_INFO("pci_resource_len = 0x%08llx\n",
+ (unsigned long long)pci_resource_len(pdev, 0));
+ D_INFO("pci_resource_base = %p\n", il->hw_base);
+
+ /* these spin locks will be used in apm_ops.init and EEPROM access
+ * we should init now
+ */
+ spin_lock_init(&il->reg_lock);
+ spin_lock_init(&il->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
+ il4965_hw_detect(il);
+ IL_INFO("Detected %s, REV=0x%X\n", il->cfg->name, il->hw_rev);
+
+ /* We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il4965_prepare_card_hw(il);
+ if (!il->hw_ready) {
+ IL_WARN("Failed, HW not ready\n");
+ goto out_iounmap;
+ }
+
+ /*****************
+ * 4. Read EEPROM
+ *****************/
+ /* Read the EEPROM */
+ err = il_eeprom_init(il);
+ if (err) {
+ IL_ERR("Unable to init EEPROM\n");
+ goto out_iounmap;
+ }
+ err = il4965_eeprom_check_version(il);
+ if (err)
+ goto out_free_eeprom;
+
+ if (err)
+ goto out_free_eeprom;
+
+ /* extract MAC Address */
+ il4965_eeprom_get_mac(il, il->addresses[0].addr);
+ D_INFO("MAC address: %pM\n", il->addresses[0].addr);
+ il->hw->wiphy->addresses = il->addresses;
+ il->hw->wiphy->n_addresses = 1;
+
+ /************************
+ * 5. Setup HW constants
+ ************************/
+ if (il4965_set_hw_params(il)) {
+ IL_ERR("failed to set hw parameters\n");
+ goto out_free_eeprom;
+ }
+
+ /*******************
+ * 6. Setup il
+ *******************/
+
+ err = il4965_init_drv(il);
+ if (err)
+ goto out_free_eeprom;
+ /* At this point both hw and il are initialized. */
+
+ /********************
+ * 7. Setup services
+ ********************/
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ pci_enable_msi(il->pci_dev);
+
+ err = request_irq(il->pci_dev->irq, il_isr, IRQF_SHARED, DRV_NAME, il);
+ if (err) {
+ IL_ERR("Error allocating IRQ %d\n", il->pci_dev->irq);
+ goto out_disable_msi;
+ }
+
+ il4965_setup_deferred_work(il);
+ il4965_setup_handlers(il);
+
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
+
+ /* enable rfkill interrupt: hw bug w/a */
+ pci_read_config_word(il->pci_dev, PCI_COMMAND, &pci_cmd);
+ if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
+ pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
+ pci_write_config_word(il->pci_dev, PCI_COMMAND, pci_cmd);
+ }
+
+ il_enable_rfkill_int(il);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ if (_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
+ clear_bit(S_RF_KILL_HW, &il->status);
+ else
+ set_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy,
+ test_bit(S_RF_KILL_HW, &il->status));
+
+ il_power_initialize(il);
+
+ init_completion(&il->_4965.firmware_loading_complete);
+
+ err = il4965_request_firmware(il, true);
+ if (err)
+ goto out_destroy_workqueue;
+
+ return 0;
+
+out_destroy_workqueue:
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ free_irq(il->pci_dev->irq, il);
+out_disable_msi:
+ pci_disable_msi(il->pci_dev);
+ il4965_uninit_drv(il);
+out_free_eeprom:
+ il_eeprom_free(il);
+out_iounmap:
+ pci_iounmap(pdev, il->hw_base);
+out_pci_release_regions:
+ pci_set_drvdata(pdev, NULL);
+ pci_release_regions(pdev);
+out_pci_disable_device:
+ pci_disable_device(pdev);
+out_ieee80211_free_hw:
+ il_free_traffic_mem(il);
+ ieee80211_free_hw(il->hw);
+out:
+ return err;
+}
+
+static void __devexit
+il4965_pci_remove(struct pci_dev *pdev)
+{
+ struct il_priv *il = pci_get_drvdata(pdev);
+ unsigned long flags;
+
+ if (!il)
+ return;
+
+ wait_for_completion(&il->_4965.firmware_loading_complete);
+
+ D_INFO("*** UNLOAD DRIVER ***\n");
+
+ il_dbgfs_unregister(il);
+ sysfs_remove_group(&pdev->dev.kobj, &il_attribute_group);
+
+ /* ieee80211_unregister_hw call wil cause il_mac_stop to
+ * to be called and il4965_down since we are removing the device
+ * we need to set S_EXIT_PENDING bit.
+ */
+ set_bit(S_EXIT_PENDING, &il->status);
+
+ il_leds_exit(il);
+
+ if (il->mac80211_registered) {
+ ieee80211_unregister_hw(il->hw);
+ il->mac80211_registered = 0;
+ } else {
+ il4965_down(il);
+ }
+
+ /*
+ * Make sure device is reset to low power before unloading driver.
+ * This may be redundant with il4965_down(), but there are paths to
+ * run il4965_down() without calling apm_ops.stop(), and there are
+ * paths to avoid running il4965_down() at all before leaving driver.
+ * This (inexpensive) call *makes sure* device is reset.
+ */
+ il_apm_stop(il);
+
+ /* make sure we flush any pending irq or
+ * tasklet for the driver
+ */
+ spin_lock_irqsave(&il->lock, flags);
+ il_disable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il4965_synchronize_irq(il);
+
+ il4965_dealloc_ucode_pci(il);
+
+ if (il->rxq.bd)
+ il4965_rx_queue_free(il, &il->rxq);
+ il4965_hw_txq_ctx_free(il);
+
+ il_eeprom_free(il);
+
+ /*netif_stop_queue(dev); */
+ flush_workqueue(il->workqueue);
+
+ /* ieee80211_unregister_hw calls il_mac_stop, which flushes
+ * il->workqueue... so we can't take down the workqueue
+ * until now... */
+ destroy_workqueue(il->workqueue);
+ il->workqueue = NULL;
+ il_free_traffic_mem(il);
+
+ free_irq(il->pci_dev->irq, il);
+ pci_disable_msi(il->pci_dev);
+ pci_iounmap(pdev, il->hw_base);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ il4965_uninit_drv(il);
+
+ dev_kfree_skb(il->beacon_skb);
+
+ ieee80211_free_hw(il->hw);
+}
+
+/*
+ * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
+ * must be called under il->lock and mac access
+ */
+void
+il4965_txq_set_sched(struct il_priv *il, u32 mask)
+{
+ il_wr_prph(il, IL49_SCD_TXFACT, mask);
+}
+
+/*****************************************************************************
+ *
+ * driver and module entry point
+ *
+ *****************************************************************************/
+
+/* Hardware specific file defines the PCI IDs table for that hardware module */
+static DEFINE_PCI_DEVICE_TABLE(il4965_hw_card_ids) = {
+ {IL_PCI_DEVICE(0x4229, PCI_ANY_ID, il4965_cfg)},
+ {IL_PCI_DEVICE(0x4230, PCI_ANY_ID, il4965_cfg)},
+ {0}
+};
+MODULE_DEVICE_TABLE(pci, il4965_hw_card_ids);
+
+static struct pci_driver il4965_driver = {
+ .name = DRV_NAME,
+ .id_table = il4965_hw_card_ids,
+ .probe = il4965_pci_probe,
+ .remove = __devexit_p(il4965_pci_remove),
+ .driver.pm = IL_LEGACY_PM_OPS,
+};
+
+static int __init
+il4965_init(void)
+{
+
+ int ret;
+ pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
+ pr_info(DRV_COPYRIGHT "\n");
+
+ ret = il4965_rate_control_register();
+ if (ret) {
+ pr_err("Unable to register rate control algorithm: %d\n", ret);
+ return ret;
+ }
+
+ ret = pci_register_driver(&il4965_driver);
+ if (ret) {
+ pr_err("Unable to initialize PCI module\n");
+ goto error_register;
+ }
+
+ return ret;
+
+error_register:
+ il4965_rate_control_unregister();
+ return ret;
+}
+
+static void __exit
+il4965_exit(void)
+{
+ pci_unregister_driver(&il4965_driver);
+ il4965_rate_control_unregister();
+}
+
+module_exit(il4965_exit);
+module_init(il4965_init);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+module_param_named(debug, il_debug_level, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug, "debug output mask");
+#endif
+
+module_param_named(swcrypto, il4965_mod_params.sw_crypto, int, S_IRUGO);
+MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
+module_param_named(queues_num, il4965_mod_params.num_of_queues, int, S_IRUGO);
+MODULE_PARM_DESC(queues_num, "number of hw queues.");
+module_param_named(11n_disable, il4965_mod_params.disable_11n, int, S_IRUGO);
+MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(amsdu_size_8K, il4965_mod_params.amsdu_size_8K, int,
+ S_IRUGO);
+MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
+module_param_named(fw_restart, il4965_mod_params.restart_fw, int, S_IRUGO);
+MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
new file mode 100644
index 0000000..467d0cb
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -0,0 +1,2860 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <net/mac80211.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+
+#include <linux/workqueue.h>
+
+#include "common.h"
+#include "4965.h"
+
+#define IL4965_RS_NAME "iwl-4965-rs"
+
+#define NUM_TRY_BEFORE_ANT_TOGGLE 1
+#define IL_NUMBER_TRY 1
+#define IL_HT_NUMBER_TRY 3
+
+#define RATE_MAX_WINDOW 62 /* # tx in history win */
+#define RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
+#define RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
+
+/* max allowed rate miss before sync LQ cmd */
+#define IL_MISSED_RATE_MAX 15
+/* max time to accum history 2 seconds */
+#define RATE_SCALE_FLUSH_INTVL (3*HZ)
+
+static u8 rs_ht_to_legacy[] = {
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX, RATE_6M_IDX,
+ RATE_6M_IDX,
+ RATE_6M_IDX, RATE_9M_IDX,
+ RATE_12M_IDX, RATE_18M_IDX,
+ RATE_24M_IDX, RATE_36M_IDX,
+ RATE_48M_IDX, RATE_54M_IDX
+};
+
+static const u8 ant_toggle_lookup[] = {
+ /*ANT_NONE -> */ ANT_NONE,
+ /*ANT_A -> */ ANT_B,
+ /*ANT_B -> */ ANT_C,
+ /*ANT_AB -> */ ANT_BC,
+ /*ANT_C -> */ ANT_A,
+ /*ANT_AC -> */ ANT_AB,
+ /*ANT_BC -> */ ANT_AC,
+ /*ANT_ABC -> */ ANT_ABC,
+};
+
+#define IL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
+ [RATE_##r##M_IDX] = { RATE_##r##M_PLCP, \
+ RATE_SISO_##s##M_PLCP, \
+ RATE_MIMO2_##s##M_PLCP,\
+ RATE_##r##M_IEEE, \
+ RATE_##ip##M_IDX, \
+ RATE_##in##M_IDX, \
+ RATE_##rp##M_IDX, \
+ RATE_##rn##M_IDX, \
+ RATE_##pp##M_IDX, \
+ RATE_##np##M_IDX }
+
+/*
+ * Parameter order:
+ * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
+ *
+ * If there isn't a valid next or previous rate then INV is used which
+ * maps to RATE_INVALID
+ *
+ */
+const struct il_rate_info il_rates[RATE_COUNT] = {
+ IL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
+ IL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
+ IL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
+ IL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
+ IL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
+ IL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
+ IL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
+ IL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
+ IL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
+ IL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
+ IL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
+ IL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
+ IL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
+};
+
+static int
+il4965_hwrate_to_plcp_idx(u32 rate_n_flags)
+{
+ int idx = 0;
+
+ /* HT rate format */
+ if (rate_n_flags & RATE_MCS_HT_MSK) {
+ idx = (rate_n_flags & 0xff);
+
+ if (idx >= RATE_MIMO2_6M_PLCP)
+ idx = idx - RATE_MIMO2_6M_PLCP;
+
+ idx += IL_FIRST_OFDM_RATE;
+ /* skip 9M not supported in ht */
+ if (idx >= RATE_9M_IDX)
+ idx += 1;
+ if (idx >= IL_FIRST_OFDM_RATE && idx <= IL_LAST_OFDM_RATE)
+ return idx;
+
+ /* legacy rate format, search for match in table */
+ } else {
+ for (idx = 0; idx < ARRAY_SIZE(il_rates); idx++)
+ if (il_rates[idx].plcp == (rate_n_flags & 0xFF))
+ return idx;
+ }
+
+ return -1;
+}
+
+static void il4965_rs_rate_scale_perform(struct il_priv *il,
+ struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta);
+static void il4965_rs_fill_link_cmd(struct il_priv *il,
+ struct il_lq_sta *lq_sta, u32 rate_n_flags);
+static void il4965_rs_stay_in_table(struct il_lq_sta *lq_sta,
+ bool force_search);
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static void il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta,
+ u32 *rate_n_flags, int idx);
+#else
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+}
+#endif
+
+/**
+ * The following tables contain the expected throughput metrics for all rates
+ *
+ * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
+ *
+ * where invalid entries are zeros.
+ *
+ * CCK rates are only valid in legacy table and will only be used in G
+ * (2.4 GHz) band.
+ */
+
+static s32 expected_tpt_legacy[RATE_COUNT] = {
+ 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
+};
+
+static s32 expected_tpt_siso20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
+ {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
+ {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
+ {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_siso40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
+ {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
+ {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
+ {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_20MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
+ {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
+ {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
+ {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI */
+};
+
+static s32 expected_tpt_mimo2_40MHz[4][RATE_COUNT] = {
+ {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
+ {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
+ {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
+ {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
+};
+
+/* mbps, mcs */
+static const struct il_rate_mcs_info il_rate_mcs[RATE_COUNT] = {
+ {"1", "BPSK DSSS"},
+ {"2", "QPSK DSSS"},
+ {"5.5", "BPSK CCK"},
+ {"11", "QPSK CCK"},
+ {"6", "BPSK 1/2"},
+ {"9", "BPSK 1/2"},
+ {"12", "QPSK 1/2"},
+ {"18", "QPSK 3/4"},
+ {"24", "16QAM 1/2"},
+ {"36", "16QAM 3/4"},
+ {"48", "64QAM 2/3"},
+ {"54", "64QAM 3/4"},
+ {"60", "64QAM 5/6"},
+};
+
+#define MCS_IDX_PER_STREAM (8)
+
+static inline u8
+il4965_rs_extract_rate(u32 rate_n_flags)
+{
+ return (u8) (rate_n_flags & 0xFF);
+}
+
+static void
+il4965_rs_rate_scale_clear_win(struct il_rate_scale_data *win)
+{
+ win->data = 0;
+ win->success_counter = 0;
+ win->success_ratio = IL_INVALID_VALUE;
+ win->counter = 0;
+ win->average_tpt = IL_INVALID_VALUE;
+ win->stamp = 0;
+}
+
+static inline u8
+il4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
+{
+ return (ant_type & valid_antenna) == ant_type;
+}
+
+/*
+ * removes the old data from the stats. All data that is older than
+ * TID_MAX_TIME_DIFF, will be deleted.
+ */
+static void
+il4965_rs_tl_rm_old_stats(struct il_traffic_load *tl, u32 curr_time)
+{
+ /* The oldest age we want to keep */
+ u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
+
+ while (tl->queue_count && tl->time_stamp < oldest_time) {
+ tl->total -= tl->packet_count[tl->head];
+ tl->packet_count[tl->head] = 0;
+ tl->time_stamp += TID_QUEUE_CELL_SPACING;
+ tl->queue_count--;
+ tl->head++;
+ if (tl->head >= TID_QUEUE_MAX_SIZE)
+ tl->head = 0;
+ }
+}
+
+/*
+ * increment traffic load value for tid and also remove
+ * any old values if passed the certain time period
+ */
+static u8
+il4965_rs_tl_add_packet(struct il_lq_sta *lq_data, struct ieee80211_hdr *hdr)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+ u8 tid;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ u8 *qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ } else
+ return MAX_TID_COUNT;
+
+ if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ return MAX_TID_COUNT;
+
+ tl = &lq_data->load[tid];
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ /* Happens only for the first packet. Initialize the data */
+ if (!(tl->queue_count)) {
+ tl->total = 1;
+ tl->time_stamp = curr_time;
+ tl->queue_count = 1;
+ tl->head = 0;
+ tl->packet_count[0] = 1;
+ return MAX_TID_COUNT;
+ }
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ idx = (tl->head + idx) % TID_QUEUE_MAX_SIZE;
+ tl->packet_count[idx] = tl->packet_count[idx] + 1;
+ tl->total = tl->total + 1;
+
+ if ((idx + 1) > tl->queue_count)
+ tl->queue_count = idx + 1;
+
+ return tid;
+}
+
+/*
+ get the traffic load value for tid
+*/
+static u32
+il4965_rs_tl_get_load(struct il_lq_sta *lq_data, u8 tid)
+{
+ u32 curr_time = jiffies_to_msecs(jiffies);
+ u32 time_diff;
+ s32 idx;
+ struct il_traffic_load *tl = NULL;
+
+ if (tid >= TID_MAX_LOAD_COUNT)
+ return 0;
+
+ tl = &(lq_data->load[tid]);
+
+ curr_time -= curr_time % TID_ROUND_VALUE;
+
+ if (!(tl->queue_count))
+ return 0;
+
+ time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
+ idx = time_diff / TID_QUEUE_CELL_SPACING;
+
+ /* The history is too long: remove data that is older than */
+ /* TID_MAX_TIME_DIFF */
+ if (idx >= TID_QUEUE_MAX_SIZE)
+ il4965_rs_tl_rm_old_stats(tl, curr_time);
+
+ return tl->total;
+}
+
+static int
+il4965_rs_tl_turn_on_agg_for_tid(struct il_priv *il, struct il_lq_sta *lq_data,
+ u8 tid, struct ieee80211_sta *sta)
+{
+ int ret = -EAGAIN;
+ u32 load;
+
+ load = il4965_rs_tl_get_load(lq_data, tid);
+
+ if (load > IL_AGG_LOAD_THRESHOLD) {
+ D_HT("Starting Tx agg: STA: %pM tid: %d\n", sta->addr, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IL_ERR("Fail start Tx agg on tid: %d\n", tid);
+ ieee80211_stop_tx_ba_session(sta, tid);
+ }
+ } else
+ D_HT("Aggregation not enabled for tid %d because load = %u\n",
+ tid, load);
+
+ return ret;
+}
+
+static void
+il4965_rs_tl_turn_on_agg(struct il_priv *il, u8 tid, struct il_lq_sta *lq_data,
+ struct ieee80211_sta *sta)
+{
+ if (tid < TID_MAX_LOAD_COUNT)
+ il4965_rs_tl_turn_on_agg_for_tid(il, lq_data, tid, sta);
+ else
+ IL_ERR("tid exceeds max load count: %d/%d\n", tid,
+ TID_MAX_LOAD_COUNT);
+}
+
+static inline int
+il4965_get_il4965_num_of_ant_from_rate(u32 rate_n_flags)
+{
+ return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
+ !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
+}
+
+/*
+ * Static function to get the expected throughput from an il_scale_tbl_info
+ * that wraps a NULL pointer check
+ */
+static s32
+il4965_get_expected_tpt(struct il_scale_tbl_info *tbl, int rs_idx)
+{
+ if (tbl->expected_tpt)
+ return tbl->expected_tpt[rs_idx];
+ return 0;
+}
+
+/**
+ * il4965_rs_collect_tx_data - Update the success/failure sliding win
+ *
+ * We keep a sliding win of the last 62 packets transmitted
+ * at this rate. win->data contains the bitmask of successful
+ * packets.
+ */
+static int
+il4965_rs_collect_tx_data(struct il_scale_tbl_info *tbl, int scale_idx,
+ int attempts, int successes)
+{
+ struct il_rate_scale_data *win = NULL;
+ static const u64 mask = (((u64) 1) << (RATE_MAX_WINDOW - 1));
+ s32 fail_count, tpt;
+
+ if (scale_idx < 0 || scale_idx >= RATE_COUNT)
+ return -EINVAL;
+
+ /* Select win for current tx bit rate */
+ win = &(tbl->win[scale_idx]);
+
+ /* Get expected throughput */
+ tpt = il4965_get_expected_tpt(tbl, scale_idx);
+
+ /*
+ * Keep track of only the latest 62 tx frame attempts in this rate's
+ * history win; anything older isn't really relevant any more.
+ * If we have filled up the sliding win, drop the oldest attempt;
+ * if the oldest attempt (highest bit in bitmap) shows "success",
+ * subtract "1" from the success counter (this is the main reason
+ * we keep these bitmaps!).
+ */
+ while (attempts > 0) {
+ if (win->counter >= RATE_MAX_WINDOW) {
+
+ /* remove earliest */
+ win->counter = RATE_MAX_WINDOW - 1;
+
+ if (win->data & mask) {
+ win->data &= ~mask;
+ win->success_counter--;
+ }
+ }
+
+ /* Increment frames-attempted counter */
+ win->counter++;
+
+ /* Shift bitmap by one frame to throw away oldest history */
+ win->data <<= 1;
+
+ /* Mark the most recent #successes attempts as successful */
+ if (successes > 0) {
+ win->success_counter++;
+ win->data |= 0x1;
+ successes--;
+ }
+
+ attempts--;
+ }
+
+ /* Calculate current success ratio, avoid divide-by-0! */
+ if (win->counter > 0)
+ win->success_ratio =
+ 128 * (100 * win->success_counter) / win->counter;
+ else
+ win->success_ratio = IL_INVALID_VALUE;
+
+ fail_count = win->counter - win->success_counter;
+
+ /* Calculate average throughput, if we have enough history. */
+ if (fail_count >= RATE_MIN_FAILURE_TH ||
+ win->success_counter >= RATE_MIN_SUCCESS_TH)
+ win->average_tpt = (win->success_ratio * tpt + 64) / 128;
+ else
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Tag this win as having been updated */
+ win->stamp = jiffies;
+
+ return 0;
+}
+
+/*
+ * Fill uCode API rate_n_flags field, based on "search" or "active" table.
+ */
+static u32
+il4965_rate_n_flags_from_tbl(struct il_priv *il, struct il_scale_tbl_info *tbl,
+ int idx, u8 use_green)
+{
+ u32 rate_n_flags = 0;
+
+ if (is_legacy(tbl->lq_type)) {
+ rate_n_flags = il_rates[idx].plcp;
+ if (idx >= IL_FIRST_CCK_RATE && idx <= IL_LAST_CCK_RATE)
+ rate_n_flags |= RATE_MCS_CCK_MSK;
+
+ } else if (is_Ht(tbl->lq_type)) {
+ if (idx > IL_LAST_OFDM_RATE) {
+ IL_ERR("Invalid HT rate idx %d\n", idx);
+ idx = IL_LAST_OFDM_RATE;
+ }
+ rate_n_flags = RATE_MCS_HT_MSK;
+
+ if (is_siso(tbl->lq_type))
+ rate_n_flags |= il_rates[idx].plcp_siso;
+ else
+ rate_n_flags |= il_rates[idx].plcp_mimo2;
+ } else {
+ IL_ERR("Invalid tbl->lq_type %d\n", tbl->lq_type);
+ }
+
+ rate_n_flags |=
+ ((tbl->ant_type << RATE_MCS_ANT_POS) & RATE_MCS_ANT_ABC_MSK);
+
+ if (is_Ht(tbl->lq_type)) {
+ if (tbl->is_ht40) {
+ if (tbl->is_dup)
+ rate_n_flags |= RATE_MCS_DUP_MSK;
+ else
+ rate_n_flags |= RATE_MCS_HT40_MSK;
+ }
+ if (tbl->is_SGI)
+ rate_n_flags |= RATE_MCS_SGI_MSK;
+
+ if (use_green) {
+ rate_n_flags |= RATE_MCS_GF_MSK;
+ if (is_siso(tbl->lq_type) && tbl->is_SGI) {
+ rate_n_flags &= ~RATE_MCS_SGI_MSK;
+ IL_ERR("GF was set with SGI:SISO\n");
+ }
+ }
+ }
+ return rate_n_flags;
+}
+
+/*
+ * Interpret uCode API's rate_n_flags format,
+ * fill "search" or "active" tx mode table.
+ */
+static int
+il4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
+ enum ieee80211_band band,
+ struct il_scale_tbl_info *tbl, int *rate_idx)
+{
+ u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
+ u8 il4965_num_of_ant =
+ il4965_get_il4965_num_of_ant_from_rate(rate_n_flags);
+ u8 mcs;
+
+ memset(tbl, 0, sizeof(struct il_scale_tbl_info));
+ *rate_idx = il4965_hwrate_to_plcp_idx(rate_n_flags);
+
+ if (*rate_idx == RATE_INVALID) {
+ *rate_idx = -1;
+ return -EINVAL;
+ }
+ tbl->is_SGI = 0; /* default legacy setup */
+ tbl->is_ht40 = 0;
+ tbl->is_dup = 0;
+ tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
+ tbl->lq_type = LQ_NONE;
+ tbl->max_search = IL_MAX_SEARCH;
+
+ /* legacy rate format */
+ if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
+ if (il4965_num_of_ant == 1) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+ }
+ /* HT rate format */
+ } else {
+ if (rate_n_flags & RATE_MCS_SGI_MSK)
+ tbl->is_SGI = 1;
+
+ if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
+ (rate_n_flags & RATE_MCS_DUP_MSK))
+ tbl->is_ht40 = 1;
+
+ if (rate_n_flags & RATE_MCS_DUP_MSK)
+ tbl->is_dup = 1;
+
+ mcs = il4965_rs_extract_rate(rate_n_flags);
+
+ /* SISO */
+ if (mcs <= RATE_SISO_60M_PLCP) {
+ if (il4965_num_of_ant == 1)
+ tbl->lq_type = LQ_SISO; /*else NONE */
+ /* MIMO2 */
+ } else {
+ if (il4965_num_of_ant == 2)
+ tbl->lq_type = LQ_MIMO2;
+ }
+ }
+ return 0;
+}
+
+/* switch to another antenna/antennas and return 1 */
+/* if no other valid antenna found, return 0 */
+static int
+il4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
+ struct il_scale_tbl_info *tbl)
+{
+ u8 new_ant_type;
+
+ if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
+ return 0;
+
+ if (!il4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
+ return 0;
+
+ new_ant_type = ant_toggle_lookup[tbl->ant_type];
+
+ while (new_ant_type != tbl->ant_type &&
+ !il4965_rs_is_valid_ant(valid_ant, new_ant_type))
+ new_ant_type = ant_toggle_lookup[new_ant_type];
+
+ if (new_ant_type == tbl->ant_type)
+ return 0;
+
+ tbl->ant_type = new_ant_type;
+ *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
+ *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
+ return 1;
+}
+
+/**
+ * Green-field mode is valid if the station supports it and
+ * there are no non-GF stations present in the BSS.
+ */
+static bool
+il4965_rs_use_green(struct ieee80211_sta *sta)
+{
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
+ !(ctx->ht.non_gf_sta_present);
+}
+
+/**
+ * il4965_rs_get_supported_rates - get the available rates
+ *
+ * if management frame or broadcast frame only return
+ * basic available rates.
+ *
+ */
+static u16
+il4965_rs_get_supported_rates(struct il_lq_sta *lq_sta,
+ struct ieee80211_hdr *hdr,
+ enum il_table_type rate_type)
+{
+ if (is_legacy(rate_type)) {
+ return lq_sta->active_legacy_rate;
+ } else {
+ if (is_siso(rate_type))
+ return lq_sta->active_siso_rate;
+ else
+ return lq_sta->active_mimo2_rate;
+ }
+}
+
+static u16
+il4965_rs_get_adjacent_rate(struct il_priv *il, u8 idx, u16 rate_mask,
+ int rate_type)
+{
+ u8 high = RATE_INVALID;
+ u8 low = RATE_INVALID;
+
+ /* 802.11A or ht walks to the next literal adjacent rate in
+ * the rate table */
+ if (is_a_band(rate_type) || !is_legacy(rate_type)) {
+ int i;
+ u32 mask;
+
+ /* Find the previous rate that is in the rate mask */
+ i = idx - 1;
+ for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
+ if (rate_mask & mask) {
+ low = i;
+ break;
+ }
+ }
+
+ /* Find the next rate that is in the rate mask */
+ i = idx + 1;
+ for (mask = (1 << i); i < RATE_COUNT; i++, mask <<= 1) {
+ if (rate_mask & mask) {
+ high = i;
+ break;
+ }
+ }
+
+ return (high << 8) | low;
+ }
+
+ low = idx;
+ while (low != RATE_INVALID) {
+ low = il_rates[low].prev_rs;
+ if (low == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << low))
+ break;
+ D_RATE("Skipping masked lower rate: %d\n", low);
+ }
+
+ high = idx;
+ while (high != RATE_INVALID) {
+ high = il_rates[high].next_rs;
+ if (high == RATE_INVALID)
+ break;
+ if (rate_mask & (1 << high))
+ break;
+ D_RATE("Skipping masked higher rate: %d\n", high);
+ }
+
+ return (high << 8) | low;
+}
+
+static u32
+il4965_rs_get_lower_rate(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, u8 scale_idx,
+ u8 ht_possible)
+{
+ s32 low;
+ u16 rate_mask;
+ u16 high_low;
+ u8 switch_to_legacy = 0;
+ u8 is_green = lq_sta->is_green;
+ struct il_priv *il = lq_sta->drv;
+
+ /* check if we need to switch from HT to legacy rates.
+ * assumption is that mandatory rates (1Mbps or 6Mbps)
+ * are always supported (spec demand) */
+ if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_idx)) {
+ switch_to_legacy = 1;
+ scale_idx = rs_ht_to_legacy[scale_idx];
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ tbl->lq_type = LQ_A;
+ else
+ tbl->lq_type = LQ_G;
+
+ if (il4965_num_of_ant(tbl->ant_type) > 1)
+ tbl->ant_type =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+
+ tbl->is_ht40 = 0;
+ tbl->is_SGI = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ }
+
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
+
+ /* Mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ /* supp_rates has no CCK bits in A mode */
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ rate_mask =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_mask = (u16) (rate_mask & lq_sta->supp_rates);
+ }
+
+ /* If we switched from HT to legacy, check current rate */
+ if (switch_to_legacy && (rate_mask & (1 << scale_idx))) {
+ low = scale_idx;
+ goto out;
+ }
+
+ high_low =
+ il4965_rs_get_adjacent_rate(lq_sta->drv, scale_idx, rate_mask,
+ tbl->lq_type);
+ low = high_low & 0xff;
+
+ if (low == RATE_INVALID)
+ low = scale_idx;
+
+out:
+ return il4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
+}
+
+/*
+ * Simple function to compare two rate scale table types
+ */
+static bool
+il4965_table_type_matches(struct il_scale_tbl_info *a,
+ struct il_scale_tbl_info *b)
+{
+ return (a->lq_type == b->lq_type && a->ant_type == b->ant_type &&
+ a->is_SGI == b->is_SGI);
+}
+
+/*
+ * mac80211 sends us Tx status
+ */
+static void
+il4965_rs_tx_status(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta,
+ struct sk_buff *skb)
+{
+ int legacy_success;
+ int retries;
+ int rs_idx, mac_idx, i;
+ struct il_lq_sta *lq_sta = il_sta;
+ struct il_link_quality_cmd *table;
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct il_priv *il = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ enum mac80211_rate_control_flags mac_flags;
+ u32 tx_rate;
+ struct il_scale_tbl_info tbl_type;
+ struct il_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("get frame ack response, update rate scale win\n");
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (!lq_sta) {
+ D_RATE("Station rate scaling not created yet.\n");
+ return;
+ } else if (!lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ return;
+ }
+
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ /* This packet was aggregated but doesn't carry status info */
+ if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
+ !(info->flags & IEEE80211_TX_STAT_AMPDU))
+ return;
+
+ /*
+ * Ignore this Tx frame response if its initial rate doesn't match
+ * that of latest Link Quality command. There may be stragglers
+ * from a previous Link Quality command, but we're no longer interested
+ * in those; they're either from the "active" mode while we're trying
+ * to check "search" mode, or a prior "search" mode after we've moved
+ * to a new "search" mode (which might become the new "active" mode).
+ */
+ table = &lq_sta->lq;
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type, &rs_idx);
+ if (il->band == IEEE80211_BAND_5GHZ)
+ rs_idx -= IL_FIRST_OFDM_RATE;
+ mac_flags = info->status.rates[0].flags;
+ mac_idx = info->status.rates[0].idx;
+ /* For HT packets, map MCS to PLCP */
+ if (mac_flags & IEEE80211_TX_RC_MCS) {
+ mac_idx &= RATE_MCS_CODE_MSK; /* Remove # of streams */
+ if (mac_idx >= (RATE_9M_IDX - IL_FIRST_OFDM_RATE))
+ mac_idx++;
+ /*
+ * mac80211 HT idx is always zero-idxed; we need to move
+ * HT OFDM rates after CCK rates in 2.4 GHz band
+ */
+ if (il->band == IEEE80211_BAND_2GHZ)
+ mac_idx += IL_FIRST_OFDM_RATE;
+ }
+ /* Here we actually compare this rate to the latest LQ command */
+ if (mac_idx < 0 ||
+ tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI) ||
+ tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH) ||
+ tbl_type.is_dup != !!(mac_flags & IEEE80211_TX_RC_DUP_DATA) ||
+ tbl_type.ant_type != info->antenna_sel_tx ||
+ !!(tx_rate & RATE_MCS_HT_MSK) != !!(mac_flags & IEEE80211_TX_RC_MCS)
+ || !!(tx_rate & RATE_MCS_GF_MSK) !=
+ !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD) || rs_idx != mac_idx) {
+ D_RATE("initial rate %d does not match %d (0x%x)\n", mac_idx,
+ rs_idx, tx_rate);
+ /*
+ * Since rates mis-match, the last LQ command may have failed.
+ * After IL_MISSED_RATE_MAX mis-matches, resync the uCode with
+ * ... driver.
+ */
+ lq_sta->missed_rate_counter++;
+ if (lq_sta->missed_rate_counter > IL_MISSED_RATE_MAX) {
+ lq_sta->missed_rate_counter = 0;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+ /* Regardless, ignore this status info for outdated rate */
+ return;
+ } else
+ /* Rate did match, so reset the missed_rate_counter */
+ lq_sta->missed_rate_counter = 0;
+
+ /* Figure out if rate scale algorithm is in active or search table */
+ if (il4965_table_type_matches
+ (&tbl_type, &(lq_sta->lq_info[lq_sta->active_tbl]))) {
+ curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ } else
+ if (il4965_table_type_matches
+ (&tbl_type, &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
+ curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ } else {
+ D_RATE("Neither active nor search matches tx rate\n");
+ tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ D_RATE("active- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
+ D_RATE("search- lq:%x, ant:%x, SGI:%d\n", tmp_tbl->lq_type,
+ tmp_tbl->ant_type, tmp_tbl->is_SGI);
+ D_RATE("actual- lq:%x, ant:%x, SGI:%d\n", tbl_type.lq_type,
+ tbl_type.ant_type, tbl_type.is_SGI);
+ /*
+ * no matching table found, let's by-pass the data collection
+ * and continue to perform rate scale to find the rate table
+ */
+ il4965_rs_stay_in_table(lq_sta, true);
+ goto done;
+ }
+
+ /*
+ * Updating the frame history depends on whether packets were
+ * aggregated.
+ *
+ * For aggregation, all packets were transmitted at the same rate, the
+ * first idx into rate scale table.
+ */
+ if (info->flags & IEEE80211_TX_STAT_AMPDU) {
+ tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band, &tbl_type,
+ &rs_idx);
+ il4965_rs_collect_tx_data(curr_tbl, rs_idx,
+ info->status.ampdu_len,
+ info->status.ampdu_ack_len);
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += info->status.ampdu_ack_len;
+ lq_sta->total_failed +=
+ (info->status.ampdu_len -
+ info->status.ampdu_ack_len);
+ }
+ } else {
+ /*
+ * For legacy, update frame history with for each Tx retry.
+ */
+ retries = info->status.rates[0].count - 1;
+ /* HW doesn't send more than 15 retries */
+ retries = min(retries, 15);
+
+ /* The last transmission may have been successful */
+ legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
+ /* Collect data for each rate used during failed TX attempts */
+ for (i = 0; i <= retries; ++i) {
+ tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
+ il4965_rs_get_tbl_info_from_mcs(tx_rate, il->band,
+ &tbl_type, &rs_idx);
+ /*
+ * Only collect stats if retried rate is in the same RS
+ * table as active/search.
+ */
+ if (il4965_table_type_matches(&tbl_type, curr_tbl))
+ tmp_tbl = curr_tbl;
+ else if (il4965_table_type_matches
+ (&tbl_type, other_tbl))
+ tmp_tbl = other_tbl;
+ else
+ continue;
+ il4965_rs_collect_tx_data(tmp_tbl, rs_idx, 1,
+ i <
+ retries ? 0 : legacy_success);
+ }
+
+ /* Update success/fail counts if not searching for new mode */
+ if (lq_sta->stay_in_tbl) {
+ lq_sta->total_success += legacy_success;
+ lq_sta->total_failed += retries + (1 - legacy_success);
+ }
+ }
+ /* The last TX rate is cached in lq_sta; it's set in if/else above */
+ lq_sta->last_rate_n_flags = tx_rate;
+done:
+ /* See if there's a better rate or modulation mode to try. */
+ if (sta->supp_rates[sband->band])
+ il4965_rs_rate_scale_perform(il, skb, sta, lq_sta);
+}
+
+/*
+ * Begin a period of staying with a selected modulation mode.
+ * Set "stay_in_tbl" flag to prevent any mode switches.
+ * Set frame tx success limits according to legacy vs. high-throughput,
+ * and reset overall (spanning all rates) tx success history stats.
+ * These control how long we stay using same modulation mode before
+ * searching for a new mode.
+ */
+static void
+il4965_rs_set_stay_in_table(struct il_priv *il, u8 is_legacy,
+ struct il_lq_sta *lq_sta)
+{
+ D_RATE("we are staying in the same table\n");
+ lq_sta->stay_in_tbl = 1; /* only place this gets set */
+ if (is_legacy) {
+ lq_sta->table_count_limit = IL_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_LEGACY_SUCCESS_LIMIT;
+ } else {
+ lq_sta->table_count_limit = IL_NONE_LEGACY_TBL_COUNT;
+ lq_sta->max_failure_limit = IL_NONE_LEGACY_FAILURE_LIMIT;
+ lq_sta->max_success_limit = IL_NONE_LEGACY_SUCCESS_LIMIT;
+ }
+ lq_sta->table_count = 0;
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = jiffies;
+ lq_sta->action_counter = 0;
+}
+
+/*
+ * Find correct throughput table for given mode of modulation
+ */
+static void
+il4965_rs_set_expected_tpt_table(struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl)
+{
+ /* Used to choose among HT tables */
+ s32(*ht_tbl_pointer)[RATE_COUNT];
+
+ /* Check for invalid LQ type */
+ if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Legacy rates have only one table */
+ if (is_legacy(tbl->lq_type)) {
+ tbl->expected_tpt = expected_tpt_legacy;
+ return;
+ }
+
+ /* Choose among many HT tables depending on number of streams
+ * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
+ * status */
+ if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_siso20MHz;
+ else if (is_siso(tbl->lq_type))
+ ht_tbl_pointer = expected_tpt_siso40MHz;
+ else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
+ ht_tbl_pointer = expected_tpt_mimo2_20MHz;
+ else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
+ ht_tbl_pointer = expected_tpt_mimo2_40MHz;
+
+ if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
+ tbl->expected_tpt = ht_tbl_pointer[0];
+ else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
+ tbl->expected_tpt = ht_tbl_pointer[1];
+ else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
+ tbl->expected_tpt = ht_tbl_pointer[2];
+ else /* AGG+SGI */
+ tbl->expected_tpt = ht_tbl_pointer[3];
+}
+
+/*
+ * Find starting rate for new "search" high-throughput mode of modulation.
+ * Goal is to find lowest expected rate (under perfect conditions) that is
+ * above the current measured throughput of "active" mode, to give new mode
+ * a fair chance to prove itself without too many challenges.
+ *
+ * This gets called when transitioning to more aggressive modulation
+ * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
+ * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
+ * to decrease to match "active" throughput. When moving from MIMO to SISO,
+ * bit rate will typically need to increase, but not if performance was bad.
+ */
+static s32
+il4965_rs_get_best_rate(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, /* "search" */
+ u16 rate_mask, s8 idx)
+{
+ /* "active" values */
+ struct il_scale_tbl_info *active_tbl =
+ &(lq_sta->lq_info[lq_sta->active_tbl]);
+ s32 active_sr = active_tbl->win[idx].success_ratio;
+ s32 active_tpt = active_tbl->expected_tpt[idx];
+
+ /* expected "search" throughput */
+ s32 *tpt_tbl = tbl->expected_tpt;
+
+ s32 new_rate, high, low, start_hi;
+ u16 high_low;
+ s8 rate = idx;
+
+ new_rate = high = low = start_hi = RATE_INVALID;
+
+ for (;;) {
+ high_low =
+ il4965_rs_get_adjacent_rate(il, rate, rate_mask,
+ tbl->lq_type);
+
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /*
+ * Lower the "search" bit rate, to give new "search" mode
+ * approximately the same throughput as "active" if:
+ *
+ * 1) "Active" mode has been working modestly well (but not
+ * great), and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above the actual
+ * measured "active" throughput (but less than expected
+ * "active" throughput under perfect conditions).
+ * OR
+ * 2) "Active" mode has been working perfectly or very well
+ * and expected "search" throughput (under perfect
+ * conditions) at candidate rate is above expected
+ * "active" throughput (under perfect conditions).
+ */
+ if ((100 * tpt_tbl[rate] > lq_sta->last_tpt &&
+ (active_sr > RATE_DECREASE_TH && active_sr <= RATE_HIGH_TH
+ && tpt_tbl[rate] <= active_tpt)) ||
+ (active_sr >= RATE_SCALE_SWITCH &&
+ tpt_tbl[rate] > active_tpt)) {
+
+ /* (2nd or later pass)
+ * If we've already tried to raise the rate, and are
+ * now trying to lower it, use the higher rate. */
+ if (start_hi != RATE_INVALID) {
+ new_rate = start_hi;
+ break;
+ }
+
+ new_rate = rate;
+
+ /* Loop again with lower rate */
+ if (low != RATE_INVALID)
+ rate = low;
+
+ /* Lower rate not available, use the original */
+ else
+ break;
+
+ /* Else try to raise the "search" rate to match "active" */
+ } else {
+ /* (2nd or later pass)
+ * If we've already tried to lower the rate, and are
+ * now trying to raise it, use the lower rate. */
+ if (new_rate != RATE_INVALID)
+ break;
+
+ /* Loop again with higher rate */
+ else if (high != RATE_INVALID) {
+ start_hi = high;
+ rate = high;
+
+ /* Higher rate not available, use the original */
+ } else {
+ new_rate = rate;
+ break;
+ }
+ }
+ }
+
+ return new_rate;
+}
+
+/*
+ * Set up search table for MIMO2
+ */
+static int
+il4965_rs_switch_to_mimo2(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ s32 rate;
+ s8 is_green = lq_sta->is_green;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2) ==
+ WLAN_HT_CAP_SM_PS_STATIC)
+ return -1;
+
+ /* Need both Tx chains/antennas to support MIMO */
+ if (il->hw_params.tx_chains_num < 2)
+ return -1;
+
+ D_RATE("LQ: try to switch to MIMO2\n");
+
+ tbl->lq_type = LQ_MIMO2;
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_mimo2_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: MIMO2 best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("Can't switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Set up search table for SISO
+ */
+static int
+il4965_rs_switch_to_siso(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf, struct ieee80211_sta *sta,
+ struct il_scale_tbl_info *tbl, int idx)
+{
+ u16 rate_mask;
+ u8 is_green = lq_sta->is_green;
+ s32 rate;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
+ return -1;
+
+ D_RATE("LQ: try to switch to SISO\n");
+
+ tbl->is_dup = lq_sta->is_dup;
+ tbl->lq_type = LQ_SISO;
+ tbl->action = 0;
+ tbl->max_search = IL_MAX_SEARCH;
+ rate_mask = lq_sta->active_siso_rate;
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ tbl->is_ht40 = 1;
+ else
+ tbl->is_ht40 = 0;
+
+ if (is_green)
+ tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield */
+
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ rate = il4965_rs_get_best_rate(il, lq_sta, tbl, rate_mask, idx);
+
+ D_RATE("LQ: get best rate %d mask %X\n", rate, rate_mask);
+ if (rate == RATE_INVALID || !((1 << rate) & rate_mask)) {
+ D_RATE("can not switch with idx %d rate mask %x\n", rate,
+ rate_mask);
+ return -1;
+ }
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, rate, is_green);
+ D_RATE("LQ: Switch to new mcs %X idx is green %X\n", tbl->current_rate,
+ is_green);
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from legacy
+ */
+static int
+il4965_rs_move_legacy_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ int ret = 0;
+ u8 update_search_tbl_counter = 0;
+
+ tbl->action = IL_LEGACY_SWITCH_SISO;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_LEGACY_SWITCH_ANTENNA1:
+ case IL_LEGACY_SWITCH_ANTENNA2:
+ D_RATE("LQ: Legacy toggle Antenna\n");
+
+ if ((tbl->action == IL_LEGACY_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_LEGACY_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ /* Don't change antenna if success has been great */
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ /* Set up search table to try other antenna */
+ memcpy(search_tbl, tbl, sz);
+
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ il4965_rs_set_expected_tpt_table(lq_sta,
+ search_tbl);
+ goto out;
+ }
+ break;
+ case IL_LEGACY_SWITCH_SISO:
+ D_RATE("LQ: Legacy switch to SISO\n");
+
+ /* Set up search table to try SISO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+
+ break;
+ case IL_LEGACY_SWITCH_MIMO2_AB:
+ case IL_LEGACY_SWITCH_MIMO2_AC:
+ case IL_LEGACY_SWITCH_MIMO2_BC:
+ D_RATE("LQ: Legacy switch to MIMO2\n");
+
+ /* Set up search table to try MIMO */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_LEGACY_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret) {
+ lq_sta->action_counter = 0;
+ goto out;
+ }
+ break;
+ }
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_LEGACY_SWITCH_MIMO2_BC)
+ tbl->action = IL_LEGACY_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+ return 0;
+
+}
+
+/*
+ * Try to switch to new modulation mode from SISO
+ */
+static int
+il4965_rs_move_siso_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ u8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_SISO_SWITCH_ANTENNA1:
+ case IL_SISO_SWITCH_ANTENNA2:
+ D_RATE("LQ: SISO toggle Antenna\n");
+ if ((tbl->action == IL_SISO_SWITCH_ANTENNA1 &&
+ tx_chains_num <= 1) ||
+ (tbl->action == IL_SISO_SWITCH_ANTENNA2 &&
+ tx_chains_num <= 2))
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_SISO_SWITCH_MIMO2_AB:
+ case IL_SISO_SWITCH_MIMO2_AC:
+ case IL_SISO_SWITCH_MIMO2_BC:
+ D_RATE("LQ: SISO switch to MIMO2\n");
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = 0;
+
+ if (tbl->action == IL_SISO_SWITCH_MIMO2_AB)
+ search_tbl->ant_type = ANT_AB;
+ else if (tbl->action == IL_SISO_SWITCH_MIMO2_AC)
+ search_tbl->ant_type = ANT_AC;
+ else
+ search_tbl->ant_type = ANT_BC;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_mimo2(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+ break;
+ case IL_SISO_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: SISO toggle SGI/NGI\n");
+
+ memcpy(search_tbl, tbl, sz);
+ if (is_green) {
+ if (!tbl->is_SGI)
+ break;
+ else
+ IL_ERR("SGI was set in GF+SISO\n");
+ }
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_SISO_SWITCH_GI)
+ tbl->action = IL_SISO_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+}
+
+/*
+ * Try to switch to new modulation mode from MIMO2
+ */
+static int
+il4965_rs_move_mimo2_to_other(struct il_priv *il, struct il_lq_sta *lq_sta,
+ struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, int idx)
+{
+ s8 is_green = lq_sta->is_green;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ struct il_scale_tbl_info *search_tbl =
+ &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ struct il_rate_scale_data *win = &(tbl->win[idx]);
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ u32 sz =
+ (sizeof(struct il_scale_tbl_info) -
+ (sizeof(struct il_rate_scale_data) * RATE_COUNT));
+ u8 start_action;
+ u8 valid_tx_ant = il->hw_params.valid_tx_ant;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+ u8 update_search_tbl_counter = 0;
+ int ret;
+
+ start_action = tbl->action;
+ for (;;) {
+ lq_sta->action_counter++;
+ switch (tbl->action) {
+ case IL_MIMO2_SWITCH_ANTENNA1:
+ case IL_MIMO2_SWITCH_ANTENNA2:
+ D_RATE("LQ: MIMO2 toggle Antennas\n");
+
+ if (tx_chains_num <= 2)
+ break;
+
+ if (win->success_ratio >= IL_RS_GOOD_RATIO)
+ break;
+
+ memcpy(search_tbl, tbl, sz);
+ if (il4965_rs_toggle_antenna
+ (valid_tx_ant, &search_tbl->current_rate,
+ search_tbl)) {
+ update_search_tbl_counter = 1;
+ goto out;
+ }
+ break;
+ case IL_MIMO2_SWITCH_SISO_A:
+ case IL_MIMO2_SWITCH_SISO_B:
+ case IL_MIMO2_SWITCH_SISO_C:
+ D_RATE("LQ: MIMO2 switch to SISO\n");
+
+ /* Set up new search table for SISO */
+ memcpy(search_tbl, tbl, sz);
+
+ if (tbl->action == IL_MIMO2_SWITCH_SISO_A)
+ search_tbl->ant_type = ANT_A;
+ else if (tbl->action == IL_MIMO2_SWITCH_SISO_B)
+ search_tbl->ant_type = ANT_B;
+ else
+ search_tbl->ant_type = ANT_C;
+
+ if (!il4965_rs_is_valid_ant
+ (valid_tx_ant, search_tbl->ant_type))
+ break;
+
+ ret =
+ il4965_rs_switch_to_siso(il, lq_sta, conf, sta,
+ search_tbl, idx);
+ if (!ret)
+ goto out;
+
+ break;
+
+ case IL_MIMO2_SWITCH_GI:
+ if (!tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_20))
+ break;
+ if (tbl->is_ht40 &&
+ !(ht_cap->cap & IEEE80211_HT_CAP_SGI_40))
+ break;
+
+ D_RATE("LQ: MIMO2 toggle SGI/NGI\n");
+
+ /* Set up new search table for MIMO2 */
+ memcpy(search_tbl, tbl, sz);
+ search_tbl->is_SGI = !tbl->is_SGI;
+ il4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
+ /*
+ * If active table already uses the fastest possible
+ * modulation (dual stream with short guard interval),
+ * and it's working well, there's no need to look
+ * for a better type of modulation!
+ */
+ if (tbl->is_SGI) {
+ s32 tpt = lq_sta->last_tpt / 100;
+ if (tpt >= search_tbl->expected_tpt[idx])
+ break;
+ }
+ search_tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, search_tbl, idx,
+ is_green);
+ update_search_tbl_counter = 1;
+ goto out;
+
+ }
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+
+ if (tbl->action == start_action)
+ break;
+ }
+ search_tbl->lq_type = LQ_NONE;
+ return 0;
+out:
+ lq_sta->search_better_tbl = 1;
+ tbl->action++;
+ if (tbl->action > IL_MIMO2_SWITCH_GI)
+ tbl->action = IL_MIMO2_SWITCH_ANTENNA1;
+ if (update_search_tbl_counter)
+ search_tbl->action = tbl->action;
+
+ return 0;
+
+}
+
+/*
+ * Check whether we should continue using same modulation mode, or
+ * begin search for a new mode, based on:
+ * 1) # tx successes or failures while using this mode
+ * 2) # times calling this function
+ * 3) elapsed time in this mode (not used, for now)
+ */
+static void
+il4965_rs_stay_in_table(struct il_lq_sta *lq_sta, bool force_search)
+{
+ struct il_scale_tbl_info *tbl;
+ int i;
+ int active_tbl;
+ int flush_interval_passed = 0;
+ struct il_priv *il;
+
+ il = lq_sta->drv;
+ active_tbl = lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* If we've been disallowing search, see if we should now allow it */
+ if (lq_sta->stay_in_tbl) {
+
+ /* Elapsed time using current modulation mode */
+ if (lq_sta->flush_timer)
+ flush_interval_passed =
+ time_after(jiffies,
+ (unsigned long)(lq_sta->flush_timer +
+ RATE_SCALE_FLUSH_INTVL));
+
+ /*
+ * Check if we should allow search for new modulation mode.
+ * If many frames have failed or succeeded, or we've used
+ * this same modulation for a long time, allow search, and
+ * reset history stats that keep track of whether we should
+ * allow a new search. Also (below) reset all bitmaps and
+ * stats in active history.
+ */
+ if (force_search ||
+ lq_sta->total_failed > lq_sta->max_failure_limit ||
+ lq_sta->total_success > lq_sta->max_success_limit ||
+ (!lq_sta->search_better_tbl && lq_sta->flush_timer &&
+ flush_interval_passed)) {
+ D_RATE("LQ: stay is expired %d %d %d\n:",
+ lq_sta->total_failed, lq_sta->total_success,
+ flush_interval_passed);
+
+ /* Allow search for new mode */
+ lq_sta->stay_in_tbl = 0; /* only place reset */
+ lq_sta->total_failed = 0;
+ lq_sta->total_success = 0;
+ lq_sta->flush_timer = 0;
+
+ /*
+ * Else if we've used this modulation mode enough repetitions
+ * (regardless of elapsed time or success/failure), reset
+ * history bitmaps and rate-specific stats for all rates in
+ * active table.
+ */
+ } else {
+ lq_sta->table_count++;
+ if (lq_sta->table_count >= lq_sta->table_count_limit) {
+ lq_sta->table_count = 0;
+
+ D_RATE("LQ: stay in table clear win\n");
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&
+ (tbl->
+ win
+ [i]));
+ }
+ }
+
+ /* If transitioning to allow "search", reset all history
+ * bitmaps and stats in active table (this will become the new
+ * "search" table). */
+ if (!lq_sta->stay_in_tbl) {
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+ }
+ }
+}
+
+/*
+ * setup rate table in uCode
+ */
+static void
+il4965_rs_update_rate_tbl(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_lq_sta *lq_sta,
+ struct il_scale_tbl_info *tbl, int idx, u8 is_green)
+{
+ u32 rate;
+
+ /* Update uCode's rate table. */
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ il4965_rs_fill_link_cmd(il, lq_sta, rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+}
+
+/*
+ * Do rate scaling and search for new modulation mode.
+ */
+static void
+il4965_rs_rate_scale_perform(struct il_priv *il, struct sk_buff *skb,
+ struct ieee80211_sta *sta,
+ struct il_lq_sta *lq_sta)
+{
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ int low = RATE_INVALID;
+ int high = RATE_INVALID;
+ int idx;
+ int i;
+ struct il_rate_scale_data *win = NULL;
+ int current_tpt = IL_INVALID_VALUE;
+ int low_tpt = IL_INVALID_VALUE;
+ int high_tpt = IL_INVALID_VALUE;
+ u32 fail_count;
+ s8 scale_action = 0;
+ u16 rate_mask;
+ u8 update_lq = 0;
+ struct il_scale_tbl_info *tbl, *tbl1;
+ u16 rate_scale_idx_msk = 0;
+ u8 is_green = 0;
+ u8 active_tbl = 0;
+ u8 done_search = 0;
+ u16 high_low;
+ s32 sr;
+ u8 tid = MAX_TID_COUNT;
+ struct il_tid_data *tid_data;
+ struct il_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ /* TODO: this could probably be improved.. */
+ if (!ieee80211_is_data(hdr->frame_control) ||
+ (info->flags & IEEE80211_TX_CTL_NO_ACK))
+ return;
+
+ lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
+
+ tid = il4965_rs_tl_add_packet(lq_sta, hdr);
+ if (tid != MAX_TID_COUNT && (lq_sta->tx_agg_tid_en & (1 << tid))) {
+ tid_data = &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF)
+ lq_sta->is_agg = 0;
+ else
+ lq_sta->is_agg = 1;
+ } else
+ lq_sta->is_agg = 0;
+
+ /*
+ * Select rate-scale / modulation-mode table to work with in
+ * the rest of this function: "search" if searching for better
+ * modulation mode, or "active" if doing rate scaling within a mode.
+ */
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+ if (is_legacy(tbl->lq_type))
+ lq_sta->is_green = 0;
+ else
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ is_green = lq_sta->is_green;
+
+ /* current tx rate */
+ idx = lq_sta->last_txrate_idx;
+
+ D_RATE("Rate scale idx %d for type %d\n", idx, tbl->lq_type);
+
+ /* rates available for this association, and for modulation mode */
+ rate_mask = il4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
+
+ D_RATE("mask 0x%04X\n", rate_mask);
+
+ /* mask with station rate restriction */
+ if (is_legacy(tbl->lq_type)) {
+ if (lq_sta->band == IEEE80211_BAND_5GHZ)
+ /* supp_rates has no CCK bits in A mode */
+ rate_scale_idx_msk =
+ (u16) (rate_mask &
+ (lq_sta->supp_rates << IL_FIRST_OFDM_RATE));
+ else
+ rate_scale_idx_msk =
+ (u16) (rate_mask & lq_sta->supp_rates);
+
+ } else
+ rate_scale_idx_msk = rate_mask;
+
+ if (!rate_scale_idx_msk)
+ rate_scale_idx_msk = rate_mask;
+
+ if (!((1 << idx) & rate_scale_idx_msk)) {
+ IL_ERR("Current Rate is not valid\n");
+ if (lq_sta->search_better_tbl) {
+ /* revert to active table if search table is not valid */
+ tbl->lq_type = LQ_NONE;
+ lq_sta->search_better_tbl = 0;
+ tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ /* get "active" rate info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+ }
+ return;
+ }
+
+ /* Get expected throughput table and history win for current rate */
+ if (!tbl->expected_tpt) {
+ IL_ERR("tbl->expected_tpt is NULL\n");
+ return;
+ }
+
+ /* force user max rate if set by user */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < idx) {
+ idx = lq_sta->max_rate_idx;
+ update_lq = 1;
+ win = &(tbl->win[idx]);
+ goto lq_update;
+ }
+
+ win = &(tbl->win[idx]);
+
+ /*
+ * If there is not enough history to calculate actual average
+ * throughput, keep analyzing results of more tx frames, without
+ * changing rate or mode (bypass most of the rest of this function).
+ * Set up new rate table in uCode only if old rate is not supported
+ * in current association (use new rate found above).
+ */
+ fail_count = win->counter - win->success_counter;
+ if (fail_count < RATE_MIN_FAILURE_TH &&
+ win->success_counter < RATE_MIN_SUCCESS_TH) {
+ D_RATE("LQ: still below TH. succ=%d total=%d " "for idx %d\n",
+ win->success_counter, win->counter, idx);
+
+ /* Can't calculate this yet; not enough history */
+ win->average_tpt = IL_INVALID_VALUE;
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ goto out;
+ }
+ /* Else we have enough samples; calculate estimate of
+ * actual average throughput */
+ if (win->average_tpt !=
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128)) {
+ IL_ERR("expected_tpt should have been calculated by now\n");
+ win->average_tpt =
+ ((win->success_ratio * tbl->expected_tpt[idx] + 64) / 128);
+ }
+
+ /* If we are searching for better modulation mode, check success. */
+ if (lq_sta->search_better_tbl) {
+ /* If good success, continue using the "search" mode;
+ * no need to send new link quality command, since we're
+ * continuing to use the setup that we've been trying. */
+ if (win->average_tpt > lq_sta->last_tpt) {
+
+ D_RATE("LQ: SWITCHING TO NEW TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ if (!is_legacy(tbl->lq_type))
+ lq_sta->enable_counter = 1;
+
+ /* Swap tables; "search" becomes "active" */
+ lq_sta->active_tbl = active_tbl;
+ current_tpt = win->average_tpt;
+
+ /* Else poor success; go back to mode in "active" table */
+ } else {
+
+ D_RATE("LQ: GOING BACK TO THE OLD TBL "
+ "suc=%d cur-tpt=%d old-tpt=%d\n",
+ win->success_ratio, win->average_tpt,
+ lq_sta->last_tpt);
+
+ /* Nullify "search" table */
+ tbl->lq_type = LQ_NONE;
+
+ /* Revert to "active" table */
+ active_tbl = lq_sta->active_tbl;
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ /* Revert to "active" rate and throughput info */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+ current_tpt = lq_sta->last_tpt;
+
+ /* Need to set up a new rate table in uCode */
+ update_lq = 1;
+ }
+
+ /* Either way, we've made a decision; modulation mode
+ * search is done, allow rate adjustment next time. */
+ lq_sta->search_better_tbl = 0;
+ done_search = 1; /* Don't switch modes below! */
+ goto lq_update;
+ }
+
+ /* (Else) not in search of better modulation mode, try for better
+ * starting rate, while staying in this mode. */
+ high_low =
+ il4965_rs_get_adjacent_rate(il, idx, rate_scale_idx_msk,
+ tbl->lq_type);
+ low = high_low & 0xff;
+ high = (high_low >> 8) & 0xff;
+
+ /* If user set max rate, dont allow higher than user constrain */
+ if (lq_sta->max_rate_idx != -1 && lq_sta->max_rate_idx < high)
+ high = RATE_INVALID;
+
+ sr = win->success_ratio;
+
+ /* Collect measured throughputs for current and adjacent rates */
+ current_tpt = win->average_tpt;
+ if (low != RATE_INVALID)
+ low_tpt = tbl->win[low].average_tpt;
+ if (high != RATE_INVALID)
+ high_tpt = tbl->win[high].average_tpt;
+
+ scale_action = 0;
+
+ /* Too many failures, decrease rate */
+ if (sr <= RATE_DECREASE_TH || current_tpt == 0) {
+ D_RATE("decrease rate because of low success_ratio\n");
+ scale_action = -1;
+
+ /* No throughput measured yet for adjacent rates; try increase. */
+ } else if (low_tpt == IL_INVALID_VALUE && high_tpt == IL_INVALID_VALUE) {
+
+ if (high != RATE_INVALID && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else if (low != RATE_INVALID)
+ scale_action = 0;
+ }
+
+ /* Both adjacent throughputs are measured, but neither one has better
+ * throughput; we're using the best rate, don't change it! */
+ else if (low_tpt != IL_INVALID_VALUE && high_tpt != IL_INVALID_VALUE &&
+ low_tpt < current_tpt && high_tpt < current_tpt)
+ scale_action = 0;
+
+ /* At least one adjacent rate's throughput is measured,
+ * and may have better performance. */
+ else {
+ /* Higher adjacent rate's throughput is measured */
+ if (high_tpt != IL_INVALID_VALUE) {
+ /* Higher rate has better throughput */
+ if (high_tpt > current_tpt && sr >= RATE_INCREASE_TH)
+ scale_action = 1;
+ else
+ scale_action = 0;
+
+ /* Lower adjacent rate's throughput is measured */
+ } else if (low_tpt != IL_INVALID_VALUE) {
+ /* Lower rate has better throughput */
+ if (low_tpt > current_tpt) {
+ D_RATE("decrease rate because of low tpt\n");
+ scale_action = -1;
+ } else if (sr >= RATE_INCREASE_TH) {
+ scale_action = 1;
+ }
+ }
+ }
+
+ /* Sanity check; asked for decrease, but success rate or throughput
+ * has been good at old rate. Don't change it. */
+ if (scale_action == -1 && low != RATE_INVALID &&
+ (sr > RATE_HIGH_TH || current_tpt > 100 * tbl->expected_tpt[low]))
+ scale_action = 0;
+
+ switch (scale_action) {
+ case -1:
+ /* Decrease starting rate, update uCode's rate table */
+ if (low != RATE_INVALID) {
+ update_lq = 1;
+ idx = low;
+ }
+
+ break;
+ case 1:
+ /* Increase starting rate, update uCode's rate table */
+ if (high != RATE_INVALID) {
+ update_lq = 1;
+ idx = high;
+ }
+
+ break;
+ case 0:
+ /* No change */
+ default:
+ break;
+ }
+
+ D_RATE("choose rate scale idx %d action %d low %d " "high %d type %d\n",
+ idx, scale_action, low, high, tbl->lq_type);
+
+lq_update:
+ /* Replace uCode's rate table for the destination station. */
+ if (update_lq)
+ il4965_rs_update_rate_tbl(il, ctx, lq_sta, tbl, idx,
+ is_green);
+
+ /* Should we stay with this modulation mode,
+ * or search for a new one? */
+ il4965_rs_stay_in_table(lq_sta, false);
+
+ /*
+ * Search for new modulation mode if we're:
+ * 1) Not changing rates right now
+ * 2) Not just finishing up a search
+ * 3) Allowing a new search
+ */
+ if (!update_lq && !done_search && !lq_sta->stay_in_tbl && win->counter) {
+ /* Save current throughput to compare with "search" throughput */
+ lq_sta->last_tpt = current_tpt;
+
+ /* Select a new "search" modulation mode to try.
+ * If one is found, set up the new "search" table. */
+ if (is_legacy(tbl->lq_type))
+ il4965_rs_move_legacy_other(il, lq_sta, conf, sta, idx);
+ else if (is_siso(tbl->lq_type))
+ il4965_rs_move_siso_to_other(il, lq_sta, conf, sta,
+ idx);
+ else /* (is_mimo2(tbl->lq_type)) */
+ il4965_rs_move_mimo2_to_other(il, lq_sta, conf, sta,
+ idx);
+
+ /* If new "search" mode was selected, set up in uCode table */
+ if (lq_sta->search_better_tbl) {
+ /* Access the "search" table, clear its history. */
+ tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&(tbl->win[i]));
+
+ /* Use new "search" start rate */
+ idx = il4965_hwrate_to_plcp_idx(tbl->current_rate);
+
+ D_RATE("Switch current mcs: %X idx: %d\n",
+ tbl->current_rate, idx);
+ il4965_rs_fill_link_cmd(il, lq_sta, tbl->current_rate);
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ } else
+ done_search = 1;
+ }
+
+ if (done_search && !lq_sta->stay_in_tbl) {
+ /* If the "active" (non-search) mode was legacy,
+ * and we've tried switching antennas,
+ * but we haven't been able to try HT modes (not available),
+ * stay with best antenna legacy modulation for a while
+ * before next round of mode comparisons. */
+ tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
+ if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
+ lq_sta->action_counter > tbl1->max_search) {
+ D_RATE("LQ: STAY in legacy table\n");
+ il4965_rs_set_stay_in_table(il, 1, lq_sta);
+ }
+
+ /* If we're in an HT mode, and all 3 mode switch actions
+ * have been tried and compared, stay in this best modulation
+ * mode for a while before next round of mode comparisons. */
+ if (lq_sta->enable_counter &&
+ lq_sta->action_counter >= tbl1->max_search) {
+ if (lq_sta->last_tpt > IL_AGG_TPT_THREHOLD &&
+ (lq_sta->tx_agg_tid_en & (1 << tid)) &&
+ tid != MAX_TID_COUNT) {
+ tid_data =
+ &il->stations[lq_sta->lq.sta_id].tid[tid];
+ if (tid_data->agg.state == IL_AGG_OFF) {
+ D_RATE("try to aggregate tid %d\n",
+ tid);
+ il4965_rs_tl_turn_on_agg(il, tid,
+ lq_sta, sta);
+ }
+ }
+ il4965_rs_set_stay_in_table(il, 0, lq_sta);
+ }
+ }
+
+out:
+ tbl->current_rate =
+ il4965_rate_n_flags_from_tbl(il, tbl, idx, is_green);
+ i = idx;
+ lq_sta->last_txrate_idx = i;
+}
+
+/**
+ * il4965_rs_initialize_lq - Initialize a station's hardware rate table
+ *
+ * The uCode's station table contains a table of fallback rates
+ * for automatic fallback during transmission.
+ *
+ * NOTE: This sets up a default set of values. These will be replaced later
+ * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
+ * rc80211_simple.
+ *
+ * NOTE: Run C_ADD_STA command to set up station table entry, before
+ * calling this function (which runs C_TX_LINK_QUALITY_CMD,
+ * which requires station table entry to exist).
+ */
+static void
+il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
+ struct ieee80211_sta *sta, struct il_lq_sta *lq_sta)
+{
+ struct il_scale_tbl_info *tbl;
+ int rate_idx;
+ int i;
+ u32 rate;
+ u8 use_green = il4965_rs_use_green(sta);
+ u8 active_tbl = 0;
+ u8 valid_tx_ant;
+ struct il_station_priv *sta_priv;
+ struct il_rxon_context *ctx;
+
+ if (!sta || !lq_sta)
+ return;
+
+ sta_priv = (void *)sta->drv_priv;
+ ctx = sta_priv->common.ctx;
+
+ i = lq_sta->last_txrate_idx;
+
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ if (!lq_sta->search_better_tbl)
+ active_tbl = lq_sta->active_tbl;
+ else
+ active_tbl = 1 - lq_sta->active_tbl;
+
+ tbl = &(lq_sta->lq_info[active_tbl]);
+
+ if (i < 0 || i >= RATE_COUNT)
+ i = 0;
+
+ rate = il_rates[i].plcp;
+ tbl->ant_type = il4965_first_antenna(valid_tx_ant);
+ rate |= tbl->ant_type << RATE_MCS_ANT_POS;
+
+ if (i >= IL_FIRST_CCK_RATE && i <= IL_LAST_CCK_RATE)
+ rate |= RATE_MCS_CCK_MSK;
+
+ il4965_rs_get_tbl_info_from_mcs(rate, il->band, tbl, &rate_idx);
+ if (!il4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
+ il4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
+
+ rate = il4965_rate_n_flags_from_tbl(il, tbl, rate_idx, use_green);
+ tbl->current_rate = rate;
+ il4965_rs_set_expected_tpt_table(lq_sta, tbl);
+ il4965_rs_fill_link_cmd(NULL, lq_sta, rate);
+ il->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
+ il_send_lq_cmd(il, ctx, &lq_sta->lq, CMD_SYNC, true);
+}
+
+static void
+il4965_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta,
+ struct ieee80211_tx_rate_control *txrc)
+{
+
+ struct sk_buff *skb = txrc->skb;
+ struct ieee80211_supported_band *sband = txrc->sband;
+ struct il_priv *il __maybe_unused = (struct il_priv *)il_r;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct il_lq_sta *lq_sta = il_sta;
+ int rate_idx;
+
+ D_RATE("rate scale calculate new rate for skb\n");
+
+ /* Get max rate if user set max rate */
+ if (lq_sta) {
+ lq_sta->max_rate_idx = txrc->max_rate_idx;
+ if (sband->band == IEEE80211_BAND_5GHZ &&
+ lq_sta->max_rate_idx != -1)
+ lq_sta->max_rate_idx += IL_FIRST_OFDM_RATE;
+ if (lq_sta->max_rate_idx < 0 ||
+ lq_sta->max_rate_idx >= RATE_COUNT)
+ lq_sta->max_rate_idx = -1;
+ }
+
+ /* Treat uninitialized rate scaling data same as non-existing. */
+ if (lq_sta && !lq_sta->drv) {
+ D_RATE("Rate scaling not initialized yet.\n");
+ il_sta = NULL;
+ }
+
+ /* Send management frames and NO_ACK data using lowest rate. */
+ if (rate_control_send_low(sta, il_sta, txrc))
+ return;
+
+ if (!lq_sta)
+ return;
+
+ rate_idx = lq_sta->last_txrate_idx;
+
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ /* 6M and 9M shared same MCS idx */
+ rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
+ if (il4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
+ RATE_MIMO2_6M_PLCP)
+ rate_idx = rate_idx + MCS_IDX_PER_STREAM;
+ info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_SHORT_GI;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_DUP_DATA;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_40_MHZ_WIDTH;
+ if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
+ info->control.rates[0].flags |=
+ IEEE80211_TX_RC_GREEN_FIELD;
+ } else {
+ /* Check for invalid rates */
+ if (rate_idx < 0 || rate_idx >= RATE_COUNT_LEGACY ||
+ (sband->band == IEEE80211_BAND_5GHZ &&
+ rate_idx < IL_FIRST_OFDM_RATE))
+ rate_idx = rate_lowest_index(sband, sta);
+ /* On valid 5 GHz rate, adjust idx */
+ else if (sband->band == IEEE80211_BAND_5GHZ)
+ rate_idx -= IL_FIRST_OFDM_RATE;
+ info->control.rates[0].flags = 0;
+ }
+ info->control.rates[0].idx = rate_idx;
+
+}
+
+static void *
+il4965_rs_alloc_sta(void *il_rate, struct ieee80211_sta *sta, gfp_t gfp)
+{
+ struct il_station_priv *sta_priv =
+ (struct il_station_priv *)sta->drv_priv;
+ struct il_priv *il;
+
+ il = (struct il_priv *)il_rate;
+ D_RATE("create station rate scale win\n");
+
+ return &sta_priv->lq_sta;
+}
+
+/*
+ * Called after adding a new station to initialize rate scaling
+ */
+void
+il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta, u8 sta_id)
+{
+ int i, j;
+ struct ieee80211_hw *hw = il->hw;
+ struct ieee80211_conf *conf = &il->hw->conf;
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ struct il_station_priv *sta_priv;
+ struct il_lq_sta *lq_sta;
+ struct ieee80211_supported_band *sband;
+
+ sta_priv = (struct il_station_priv *)sta->drv_priv;
+ lq_sta = &sta_priv->lq_sta;
+ sband = hw->wiphy->bands[conf->channel->band];
+
+ lq_sta->lq.sta_id = sta_id;
+
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ lq_sta->flush_timer = 0;
+ lq_sta->supp_rates = sta->supp_rates[sband->band];
+ for (j = 0; j < LQ_SIZE; j++)
+ for (i = 0; i < RATE_COUNT; i++)
+ il4965_rs_rate_scale_clear_win(&lq_sta->lq_info[j].
+ win[i]);
+
+ D_RATE("LQ:" "*** rate scale station global init for station %d ***\n",
+ sta_id);
+ /* TODO: what is a good starting rate for STA? About middle? Maybe not
+ * the lowest or the highest rate.. Could consider using RSSI from
+ * previous packets? Need to have IEEE 802.1X auth succeed immediately
+ * after assoc.. */
+
+ lq_sta->is_dup = 0;
+ lq_sta->max_rate_idx = -1;
+ lq_sta->missed_rate_counter = IL_MISSED_RATE_MAX;
+ lq_sta->is_green = il4965_rs_use_green(sta);
+ lq_sta->active_legacy_rate = il->active_rate & ~(0x1000);
+ lq_sta->band = il->band;
+ /*
+ * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
+ * supp_rates[] does not; shift to convert format, force 9 MBits off.
+ */
+ lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
+ lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
+ lq_sta->active_siso_rate &= ~((u16) 0x2);
+ lq_sta->active_siso_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* Same here */
+ lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
+ lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
+ lq_sta->active_mimo2_rate &= ~((u16) 0x2);
+ lq_sta->active_mimo2_rate <<= IL_FIRST_OFDM_RATE;
+
+ /* These values will be overridden later */
+ lq_sta->lq.general_params.single_stream_ant_msk =
+ il4965_first_antenna(il->hw_params.valid_tx_ant);
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant & ~il4965_first_antenna(il->hw_params.
+ valid_tx_ant);
+ if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
+ lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
+ } else if (il4965_num_of_ant(il->hw_params.valid_tx_ant) == 2) {
+ lq_sta->lq.general_params.dual_stream_ant_msk =
+ il->hw_params.valid_tx_ant;
+ }
+
+ /* as default allow aggregation for all tids */
+ lq_sta->tx_agg_tid_en = IL_AGG_ALL_TID;
+ lq_sta->drv = il;
+
+ /* Set last_txrate_idx to lowest rate */
+ lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
+ if (sband->band == IEEE80211_BAND_5GHZ)
+ lq_sta->last_txrate_idx += IL_FIRST_OFDM_RATE;
+ lq_sta->is_agg = 0;
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+ lq_sta->dbg_fixed_rate = 0;
+#endif
+
+ il4965_rs_initialize_lq(il, conf, sta, lq_sta);
+}
+
+static void
+il4965_rs_fill_link_cmd(struct il_priv *il, struct il_lq_sta *lq_sta,
+ u32 new_rate)
+{
+ struct il_scale_tbl_info tbl_type;
+ int idx = 0;
+ int rate_idx;
+ int repeat_rate = 0;
+ u8 ant_toggle_cnt = 0;
+ u8 use_ht_possible = 1;
+ u8 valid_tx_ant = 0;
+ struct il_link_quality_cmd *lq_cmd = &lq_sta->lq;
+
+ /* Override starting rate (idx 0) if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Interpret new_rate (rate_n_flags) */
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band, &tbl_type,
+ &rate_idx);
+
+ /* How many times should we repeat the initial rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ ant_toggle_cnt = 1;
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ lq_cmd->general_params.mimo_delimiter =
+ is_mimo(tbl_type.lq_type) ? 1 : 0;
+
+ /* Fill 1st table entry (idx 0) */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ if (il4965_num_of_ant(tbl_type.ant_type) == 1) {
+ lq_cmd->general_params.single_stream_ant_msk =
+ tbl_type.ant_type;
+ } else if (il4965_num_of_ant(tbl_type.ant_type) == 2) {
+ lq_cmd->general_params.dual_stream_ant_msk = tbl_type.ant_type;
+ }
+ /* otherwise we don't modify the existing value */
+ idx++;
+ repeat_rate--;
+ if (il)
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+
+ /* Fill rest of rate table */
+ while (idx < LINK_QUAL_MAX_RETRY_NUM) {
+ /* Repeat initial/next rate.
+ * For legacy IL_NUMBER_TRY == 1, this loop will not execute.
+ * For HT IL_HT_NUMBER_TRY == 3, this executes twice. */
+ while (repeat_rate > 0 && idx < LINK_QUAL_MAX_RETRY_NUM) {
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate,
+ &tbl_type))
+ ant_toggle_cnt = 1;
+ }
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags =
+ cpu_to_le32(new_rate);
+ repeat_rate--;
+ idx++;
+ }
+
+ il4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
+ &tbl_type, &rate_idx);
+
+ /* Indicate to uCode which entries might be MIMO.
+ * If initial rate was MIMO, this will finally end up
+ * as (IL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
+ if (is_mimo(tbl_type.lq_type))
+ lq_cmd->general_params.mimo_delimiter = idx;
+
+ /* Get next rate */
+ new_rate =
+ il4965_rs_get_lower_rate(lq_sta, &tbl_type, rate_idx,
+ use_ht_possible);
+
+ /* How many times should we repeat the next rate? */
+ if (is_legacy(tbl_type.lq_type)) {
+ if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
+ ant_toggle_cnt++;
+ else if (il &&
+ il4965_rs_toggle_antenna(valid_tx_ant,
+ &new_rate, &tbl_type))
+ ant_toggle_cnt = 1;
+
+ repeat_rate = IL_NUMBER_TRY;
+ } else {
+ repeat_rate = IL_HT_NUMBER_TRY;
+ }
+
+ /* Don't allow HT rates after next pass.
+ * il4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
+ use_ht_possible = 0;
+
+ /* Override next rate if needed for debug purposes */
+ il4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, idx);
+
+ /* Fill next table entry */
+ lq_cmd->rs_table[idx].rate_n_flags = cpu_to_le32(new_rate);
+
+ idx++;
+ repeat_rate--;
+ }
+
+ lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
+
+ lq_cmd->agg_params.agg_time_limit =
+ cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
+}
+
+static void *
+il4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
+{
+ return hw->priv;
+}
+
+/* rate scale requires free function to be implemented */
+static void
+il4965_rs_free(void *il_rate)
+{
+ return;
+}
+
+static void
+il4965_rs_free_sta(void *il_r, struct ieee80211_sta *sta, void *il_sta)
+{
+ struct il_priv *il __maybe_unused = il_r;
+
+ D_RATE("enter\n");
+ D_RATE("leave\n");
+}
+
+#ifdef CONFIG_MAC80211_DEBUGFS
+static int
+il4965_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static void
+il4965_rs_dbgfs_set_mcs(struct il_lq_sta *lq_sta, u32 * rate_n_flags, int idx)
+{
+ struct il_priv *il;
+ u8 valid_tx_ant;
+ u8 ant_sel_tx;
+
+ il = lq_sta->drv;
+ valid_tx_ant = il->hw_params.valid_tx_ant;
+ if (lq_sta->dbg_fixed_rate) {
+ ant_sel_tx =
+ ((lq_sta->
+ dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK) >>
+ RATE_MCS_ANT_POS);
+ if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
+ *rate_n_flags = lq_sta->dbg_fixed_rate;
+ D_RATE("Fixed rate ON\n");
+ } else {
+ lq_sta->dbg_fixed_rate = 0;
+ IL_ERR
+ ("Invalid antenna selection 0x%X, Valid is 0x%X\n",
+ ant_sel_tx, valid_tx_ant);
+ D_RATE("Fixed rate OFF\n");
+ }
+ } else {
+ D_RATE("Fixed rate OFF\n");
+ }
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ char buf[64];
+ size_t buf_size;
+ u32 parsed_rate;
+ struct il_station_priv *sta_priv =
+ container_of(lq_sta, struct il_station_priv, lq_sta);
+ struct il_rxon_context *ctx = sta_priv->common.ctx;
+
+ il = lq_sta->drv;
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x", &parsed_rate) == 1)
+ lq_sta->dbg_fixed_rate = parsed_rate;
+ else
+ lq_sta->dbg_fixed_rate = 0;
+
+ lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
+ lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+ lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
+
+ D_RATE("sta_id %d rate 0x%X\n", lq_sta->lq.sta_id,
+ lq_sta->dbg_fixed_rate);
+
+ if (lq_sta->dbg_fixed_rate) {
+ il4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
+ il_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC, false);
+ }
+
+ return count;
+}
+
+static ssize_t
+il4965_rs_sta_dbgfs_scale_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i = 0;
+ int idx = 0;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_priv *il;
+ struct il_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
+
+ il = lq_sta->drv;
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ desc += sprintf(buff + desc, "sta_id %d\n", lq_sta->lq.sta_id);
+ desc +=
+ sprintf(buff + desc, "failed=%d success=%d rate=0%X\n",
+ lq_sta->total_failed, lq_sta->total_success,
+ lq_sta->active_legacy_rate);
+ desc +=
+ sprintf(buff + desc, "fixed rate 0x%X\n", lq_sta->dbg_fixed_rate);
+ desc +=
+ sprintf(buff + desc, "valid_tx_ant %s%s%s\n",
+ (il->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
+ (il->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
+ (il->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
+ desc +=
+ sprintf(buff + desc, "lq type %s\n",
+ (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
+ if (is_Ht(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " %s",
+ (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
+ desc +=
+ sprintf(buff + desc, " %s",
+ (tbl->is_ht40) ? "40MHz" : "20MHz");
+ desc +=
+ sprintf(buff + desc, " %s %s %s\n",
+ (tbl->is_SGI) ? "SGI" : "",
+ (lq_sta->is_green) ? "GF enabled" : "",
+ (lq_sta->is_agg) ? "AGG on" : "");
+ }
+ desc +=
+ sprintf(buff + desc, "last tx rate=0x%X\n",
+ lq_sta->last_rate_n_flags);
+ desc +=
+ sprintf(buff + desc,
+ "general:" "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
+ lq_sta->lq.general_params.flags,
+ lq_sta->lq.general_params.mimo_delimiter,
+ lq_sta->lq.general_params.single_stream_ant_msk,
+ lq_sta->lq.general_params.dual_stream_ant_msk);
+
+ desc +=
+ sprintf(buff + desc,
+ "agg:"
+ "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
+ le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
+ lq_sta->lq.agg_params.agg_dis_start_th,
+ lq_sta->lq.agg_params.agg_frame_cnt_limit);
+
+ desc +=
+ sprintf(buff + desc,
+ "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
+ lq_sta->lq.general_params.start_rate_idx[0],
+ lq_sta->lq.general_params.start_rate_idx[1],
+ lq_sta->lq.general_params.start_rate_idx[2],
+ lq_sta->lq.general_params.start_rate_idx[3]);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ idx =
+ il4965_hwrate_to_plcp_idx(le32_to_cpu
+ (lq_sta->lq.rs_table[i].
+ rate_n_flags));
+ if (is_legacy(tbl->lq_type)) {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps\n", i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps);
+ } else {
+ desc +=
+ sprintf(buff + desc, " rate[%d] 0x%X %smbps (%s)\n",
+ i,
+ le32_to_cpu(lq_sta->lq.rs_table[i].
+ rate_n_flags),
+ il_rate_mcs[idx].mbps,
+ il_rate_mcs[idx].mcs);
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
+ .write = il4965_rs_sta_dbgfs_scale_table_write,
+ .read = il4965_rs_sta_dbgfs_scale_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_stats_table_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ char *buff;
+ int desc = 0;
+ int i, j;
+ ssize_t ret;
+
+ struct il_lq_sta *lq_sta = file->private_data;
+
+ buff = kmalloc(1024, GFP_KERNEL);
+ if (!buff)
+ return -ENOMEM;
+
+ for (i = 0; i < LQ_SIZE; i++) {
+ desc +=
+ sprintf(buff + desc,
+ "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
+ "rate=0x%X\n", lq_sta->active_tbl == i ? "*" : "x",
+ lq_sta->lq_info[i].lq_type,
+ lq_sta->lq_info[i].is_SGI,
+ lq_sta->lq_info[i].is_ht40,
+ lq_sta->lq_info[i].is_dup, lq_sta->is_green,
+ lq_sta->lq_info[i].current_rate);
+ for (j = 0; j < RATE_COUNT; j++) {
+ desc +=
+ sprintf(buff + desc,
+ "counter=%d success=%d %%=%d\n",
+ lq_sta->lq_info[i].win[j].counter,
+ lq_sta->lq_info[i].win[j].success_counter,
+ lq_sta->lq_info[i].win[j].success_ratio);
+ }
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+ kfree(buff);
+ return ret;
+}
+
+static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
+ .read = il4965_rs_sta_dbgfs_stats_table_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static ssize_t
+il4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ char buff[120];
+ int desc = 0;
+ struct il_lq_sta *lq_sta = file->private_data;
+ struct il_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
+
+ if (is_Ht(tbl->lq_type))
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ tbl->expected_tpt[lq_sta->last_txrate_idx]);
+ else
+ desc +=
+ sprintf(buff + desc, "Bit Rate= %d Mb/s\n",
+ il_rates[lq_sta->last_txrate_idx].ieee >> 1);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buff, desc);
+}
+
+static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
+ .read = il4965_rs_sta_dbgfs_rate_scale_data_read,
+ .open = il4965_open_file_generic,
+ .llseek = default_llseek,
+};
+
+static void
+il4965_rs_add_debugfs(void *il, void *il_sta, struct dentry *dir)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ lq_sta->rs_sta_dbgfs_scale_table_file =
+ debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
+ lq_sta, &rs_sta_dbgfs_scale_table_ops);
+ lq_sta->rs_sta_dbgfs_stats_table_file =
+ debugfs_create_file("rate_stats_table", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_stats_table_ops);
+ lq_sta->rs_sta_dbgfs_rate_scale_data_file =
+ debugfs_create_file("rate_scale_data", S_IRUSR, dir, lq_sta,
+ &rs_sta_dbgfs_rate_scale_data_ops);
+ lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
+ debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
+ &lq_sta->tx_agg_tid_en);
+
+}
+
+static void
+il4965_rs_remove_debugfs(void *il, void *il_sta)
+{
+ struct il_lq_sta *lq_sta = il_sta;
+ debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
+ debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
+}
+#endif
+
+/*
+ * Initialization of rate scaling information is done by driver after
+ * the station is added. Since mac80211 calls this function before a
+ * station is added we ignore it.
+ */
+static void
+il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
+ struct ieee80211_sta *sta, void *il_sta)
+{
+}
+
+static struct rate_control_ops rs_4965_ops = {
+ .module = NULL,
+ .name = IL4965_RS_NAME,
+ .tx_status = il4965_rs_tx_status,
+ .get_rate = il4965_rs_get_rate,
+ .rate_init = il4965_rs_rate_init_stub,
+ .alloc = il4965_rs_alloc,
+ .free = il4965_rs_free,
+ .alloc_sta = il4965_rs_alloc_sta,
+ .free_sta = il4965_rs_free_sta,
+#ifdef CONFIG_MAC80211_DEBUGFS
+ .add_sta_debugfs = il4965_rs_add_debugfs,
+ .remove_sta_debugfs = il4965_rs_remove_debugfs,
+#endif
+};
+
+int
+il4965_rate_control_register(void)
+{
+ return ieee80211_rate_control_register(&rs_4965_ops);
+}
+
+void
+il4965_rate_control_unregister(void)
+{
+ ieee80211_rate_control_unregister(&rs_4965_ops);
+}
diff --git a/drivers/net/wireless/iwlegacy/4965.c b/drivers/net/wireless/iwlegacy/4965.c
new file mode 100644
index 0000000..cacbc03
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.c
@@ -0,0 +1,2402 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <net/mac80211.h>
+#include <linux/etherdevice.h>
+#include <asm/unaligned.h>
+
+#include "common.h"
+#include "4965.h"
+
+/**
+ * il_verify_inst_sparse - verify runtime uCode image in card vs. host,
+ * using sample data 100 bytes apart. If these sample points are good,
+ * it's a pretty good bet that everything between them is good, too.
+ */
+static int
+il4965_verify_inst_sparse(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ int ret = 0;
+ u32 errcnt = 0;
+ u32 i;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ for (i = 0; i < len; i += 100, image += 100 / sizeof(u32)) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ il_wr(il, HBUS_TARG_MEM_RADDR, i + IL4965_RTC_INST_LOWER_BOUND);
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 3)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/**
+ * il4965_verify_inst_full - verify runtime uCode image in card vs. host,
+ * looking at all data.
+ */
+static int
+il4965_verify_inst_full(struct il_priv *il, __le32 * image, u32 len)
+{
+ u32 val;
+ u32 save_len = len;
+ int ret = 0;
+ u32 errcnt;
+
+ D_INFO("ucode inst image size is %u\n", len);
+
+ il_wr(il, HBUS_TARG_MEM_RADDR, IL4965_RTC_INST_LOWER_BOUND);
+
+ errcnt = 0;
+ for (; len > 0; len -= sizeof(u32), image++) {
+ /* read data comes through single port, auto-incr addr */
+ /* NOTE: Use the debugless read so we don't flood kernel log
+ * if IL_DL_IO is set */
+ val = _il_rd(il, HBUS_TARG_MEM_RDAT);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("uCode INST section is invalid at "
+ "offset 0x%x, is 0x%x, s/b 0x%x\n",
+ save_len - len, val, le32_to_cpu(*image));
+ ret = -EIO;
+ errcnt++;
+ if (errcnt >= 20)
+ break;
+ }
+ }
+
+ if (!errcnt)
+ D_INFO("ucode image in INSTRUCTION memory is good\n");
+
+ return ret;
+}
+
+/**
+ * il4965_verify_ucode - determine which instruction image is in SRAM,
+ * and verify its contents
+ */
+int
+il4965_verify_ucode(struct il_priv *il)
+{
+ __le32 *image;
+ u32 len;
+ int ret;
+
+ /* Try bootstrap */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Bootstrap uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try initialize */
+ image = (__le32 *) il->ucode_init.v_addr;
+ len = il->ucode_init.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Initialize uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ /* Try runtime/protocol */
+ image = (__le32 *) il->ucode_code.v_addr;
+ len = il->ucode_code.len;
+ ret = il4965_verify_inst_sparse(il, image, len);
+ if (!ret) {
+ D_INFO("Runtime uCode is good in inst SRAM\n");
+ return 0;
+ }
+
+ IL_ERR("NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
+
+ /* Since nothing seems to match, show first several data entries in
+ * instruction SRAM, so maybe visual inspection will give a clue.
+ * Selection of bootstrap image (vs. other images) is arbitrary. */
+ image = (__le32 *) il->ucode_boot.v_addr;
+ len = il->ucode_boot.len;
+ ret = il4965_verify_inst_full(il, image, len);
+
+ return ret;
+}
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+/*
+ * The device's EEPROM semaphore prevents conflicts between driver and uCode
+ * when accessing the EEPROM; each access is a series of pulses to/from the
+ * EEPROM chip, not a single event, so even reads could conflict if they
+ * weren't arbitrated by the semaphore.
+ */
+int
+il4965_eeprom_acquire_semaphore(struct il_priv *il)
+{
+ u16 count;
+ int ret;
+
+ for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
+ /* Request semaphore */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+ /* See if we got it */
+ ret =
+ _il_poll_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
+ EEPROM_SEM_TIMEOUT);
+ if (ret >= 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+void
+il4965_eeprom_release_semaphore(struct il_priv *il)
+{
+ il_clear_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
+
+}
+
+int
+il4965_eeprom_check_version(struct il_priv *il)
+{
+ u16 eeprom_ver;
+ u16 calib_ver;
+
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ calib_ver = il_eeprom_query16(il, EEPROM_4965_CALIB_VERSION_OFFSET);
+
+ if (eeprom_ver < il->cfg->eeprom_ver ||
+ calib_ver < il->cfg->eeprom_calib_ver)
+ goto err;
+
+ IL_INFO("device EEPROM VER=0x%x, CALIB=0x%x\n", eeprom_ver, calib_ver);
+
+ return 0;
+err:
+ IL_ERR("Unsupported (too old) EEPROM VER=0x%x < 0x%x "
+ "CALIB=0x%x < 0x%x\n", eeprom_ver, il->cfg->eeprom_ver,
+ calib_ver, il->cfg->eeprom_calib_ver);
+ return -EINVAL;
+
+}
+
+void
+il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac)
+{
+ const u8 *addr = il_eeprom_query_addr(il,
+ EEPROM_MAC_ADDRESS);
+ memcpy(mac, addr, ETH_ALEN);
+}
+
+/* Send led command */
+static int
+il4965_send_led_cmd(struct il_priv *il, struct il_led_cmd *led_cmd)
+{
+ struct il_host_cmd cmd = {
+ .id = C_LEDS,
+ .len = sizeof(struct il_led_cmd),
+ .data = led_cmd,
+ .flags = CMD_ASYNC,
+ .callback = NULL,
+ };
+ u32 reg;
+
+ reg = _il_rd(il, CSR_LED_REG);
+ if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
+ _il_wr(il, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
+
+ return il_send_cmd(il, &cmd);
+}
+
+/* Set led register off */
+void
+il4965_led_enable(struct il_priv *il)
+{
+ _il_wr(il, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
+}
+
+const struct il_led_ops il4965_led_ops = {
+ .cmd = il4965_send_led_cmd,
+};
+
+static int il4965_send_tx_power(struct il_priv *il);
+static int il4965_hw_get_temperature(struct il_priv *il);
+
+/* Highest firmware API version supported */
+#define IL4965_UCODE_API_MAX 2
+
+/* Lowest firmware API version supported */
+#define IL4965_UCODE_API_MIN 2
+
+#define IL4965_FW_PRE "iwlwifi-4965-"
+#define _IL4965_MODULE_FIRMWARE(api) IL4965_FW_PRE #api ".ucode"
+#define IL4965_MODULE_FIRMWARE(api) _IL4965_MODULE_FIRMWARE(api)
+
+/* check contents of special bootstrap uCode SRAM */
+static int
+il4965_verify_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ u32 reg;
+ u32 val;
+
+ D_INFO("Begin verify bsm\n");
+
+ /* verify BSM SRAM contents */
+ val = il_rd_prph(il, BSM_WR_DWCOUNT_REG);
+ for (reg = BSM_SRAM_LOWER_BOUND; reg < BSM_SRAM_LOWER_BOUND + len;
+ reg += sizeof(u32), image++) {
+ val = il_rd_prph(il, reg);
+ if (val != le32_to_cpu(*image)) {
+ IL_ERR("BSM uCode verification failed at "
+ "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
+ BSM_SRAM_LOWER_BOUND, reg - BSM_SRAM_LOWER_BOUND,
+ len, val, le32_to_cpu(*image));
+ return -EIO;
+ }
+ }
+
+ D_INFO("BSM bootstrap uCode image OK\n");
+
+ return 0;
+}
+
+/**
+ * il4965_load_bsm - Load bootstrap instructions
+ *
+ * BSM operation:
+ *
+ * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
+ * in special SRAM that does not power down during RFKILL. When powering back
+ * up after power-saving sleeps (or during initial uCode load), the BSM loads
+ * the bootstrap program into the on-board processor, and starts it.
+ *
+ * The bootstrap program loads (via DMA) instructions and data for a new
+ * program from host DRAM locations indicated by the host driver in the
+ * BSM_DRAM_* registers. Once the new program is loaded, it starts
+ * automatically.
+ *
+ * When initializing the NIC, the host driver points the BSM to the
+ * "initialize" uCode image. This uCode sets up some internal data, then
+ * notifies host via "initialize alive" that it is complete.
+ *
+ * The host then replaces the BSM_DRAM_* pointer values to point to the
+ * normal runtime uCode instructions and a backup uCode data cache buffer
+ * (filled initially with starting data values for the on-board processor),
+ * then triggers the "initialize" uCode to load and launch the runtime uCode,
+ * which begins normal operation.
+ *
+ * When doing a power-save shutdown, runtime uCode saves data SRAM into
+ * the backup data cache in DRAM before SRAM is powered down.
+ *
+ * When powering back up, the BSM loads the bootstrap program. This reloads
+ * the runtime uCode instructions and the backup data cache into SRAM,
+ * and re-launches the runtime uCode from where it left off.
+ */
+static int
+il4965_load_bsm(struct il_priv *il)
+{
+ __le32 *image = il->ucode_boot.v_addr;
+ u32 len = il->ucode_boot.len;
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ u32 inst_len;
+ u32 data_len;
+ int i;
+ u32 done;
+ u32 reg_offset;
+ int ret;
+
+ D_INFO("Begin load bsm\n");
+
+ il->ucode_type = UCODE_RT;
+
+ /* make sure bootstrap program is no larger than BSM's SRAM size */
+ if (len > IL49_MAX_BSM_SIZE)
+ return -EINVAL;
+
+ /* Tell bootstrap uCode where to find the "Initialize" uCode
+ * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
+ * NOTE: il_init_alive_start() will replace these values,
+ * after the "initialize" uCode has run, to point to
+ * runtime/protocol instructions and backup data cache.
+ */
+ pinst = il->ucode_init.p_addr >> 4;
+ pdata = il->ucode_init_data.p_addr >> 4;
+ inst_len = il->ucode_init.len;
+ data_len = il->ucode_init_data.len;
+
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
+
+ /* Fill BSM memory with bootstrap instructions */
+ for (reg_offset = BSM_SRAM_LOWER_BOUND;
+ reg_offset < BSM_SRAM_LOWER_BOUND + len;
+ reg_offset += sizeof(u32), image++)
+ _il_wr_prph(il, reg_offset, le32_to_cpu(*image));
+
+ ret = il4965_verify_bsm(il);
+ if (ret)
+ return ret;
+
+ /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
+ il_wr_prph(il, BSM_WR_MEM_SRC_REG, 0x0);
+ il_wr_prph(il, BSM_WR_MEM_DST_REG, IL49_RTC_INST_LOWER_BOUND);
+ il_wr_prph(il, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
+
+ /* Load bootstrap code into instruction SRAM now,
+ * to prepare to load "initialize" uCode */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
+
+ /* Wait for load of bootstrap uCode to finish */
+ for (i = 0; i < 100; i++) {
+ done = il_rd_prph(il, BSM_WR_CTRL_REG);
+ if (!(done & BSM_WR_CTRL_REG_BIT_START))
+ break;
+ udelay(10);
+ }
+ if (i < 100)
+ D_INFO("BSM write complete, poll %d iterations\n", i);
+ else {
+ IL_ERR("BSM write did not complete!\n");
+ return -EIO;
+ }
+
+ /* Enable future boot loads whenever power management unit triggers it
+ * (e.g. when powering back up after power-save shutdown) */
+ il_wr_prph(il, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
+
+ return 0;
+}
+
+/**
+ * il4965_set_ucode_ptrs - Set uCode address location
+ *
+ * Tell initialization uCode where to find runtime uCode.
+ *
+ * BSM registers initially contain pointers to initialization uCode.
+ * We need to replace them to load runtime uCode inst and data,
+ * and to save runtime data when powering down.
+ */
+static int
+il4965_set_ucode_ptrs(struct il_priv *il)
+{
+ dma_addr_t pinst;
+ dma_addr_t pdata;
+ int ret = 0;
+
+ /* bits 35:4 for 4965 */
+ pinst = il->ucode_code.p_addr >> 4;
+ pdata = il->ucode_data_backup.p_addr >> 4;
+
+ /* Tell bootstrap uCode where to find image to load */
+ il_wr_prph(il, BSM_DRAM_INST_PTR_REG, pinst);
+ il_wr_prph(il, BSM_DRAM_DATA_PTR_REG, pdata);
+ il_wr_prph(il, BSM_DRAM_DATA_BYTECOUNT_REG, il->ucode_data.len);
+
+ /* Inst byte count must be last to set up, bit 31 signals uCode
+ * that all new ptr/size info is in place */
+ il_wr_prph(il, BSM_DRAM_INST_BYTECOUNT_REG,
+ il->ucode_code.len | BSM_DRAM_INST_LOAD);
+ D_INFO("Runtime uCode pointers are set.\n");
+
+ return ret;
+}
+
+/**
+ * il4965_init_alive_start - Called after N_ALIVE notification received
+ *
+ * Called after N_ALIVE notification received from "initialize" uCode.
+ *
+ * The 4965 "initialize" ALIVE reply contains calibration data for:
+ * Voltage, temperature, and MIMO tx gain correction, now stored in il
+ * (3945 does not contain this data).
+ *
+ * Tell "initialize" uCode to go ahead and load the runtime uCode.
+*/
+static void
+il4965_init_alive_start(struct il_priv *il)
+{
+ /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
+ * This is a paranoid check, because we would not have gotten the
+ * "initialize" alive if code weren't properly loaded. */
+ if (il4965_verify_ucode(il)) {
+ /* Runtime instruction load was bad;
+ * take it all the way back down so we can try again */
+ D_INFO("Bad \"initialize\" uCode load.\n");
+ goto restart;
+ }
+
+ /* Calculate temperature */
+ il->temperature = il4965_hw_get_temperature(il);
+
+ /* Send pointers to protocol/runtime uCode image ... init code will
+ * load and launch runtime uCode, which will send us another "Alive"
+ * notification. */
+ D_INFO("Initialization Alive received.\n");
+ if (il4965_set_ucode_ptrs(il)) {
+ /* Runtime instruction load won't happen;
+ * take it all the way back down so we can try again */
+ D_INFO("Couldn't set up uCode pointers.\n");
+ goto restart;
+ }
+ return;
+
+restart:
+ queue_work(il->workqueue, &il->restart);
+}
+
+static bool
+iw4965_is_ht40_channel(__le32 rxon_flags)
+{
+ int chan_mod =
+ le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK) >>
+ RXON_FLG_CHANNEL_MODE_POS;
+ return (chan_mod == CHANNEL_MODE_PURE_40 ||
+ chan_mod == CHANNEL_MODE_MIXED);
+}
+
+static void
+il4965_nic_config(struct il_priv *il)
+{
+ unsigned long flags;
+ u16 radio_cfg;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ radio_cfg = il_eeprom_query16(il, EEPROM_RADIO_CONFIG);
+
+ /* write radio config values to register */
+ if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
+ EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
+ EEPROM_RF_CFG_DASH_MSK(radio_cfg));
+
+ /* set CSR_HW_CONFIG_REG for uCode use */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
+ CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
+
+ il->calib_info =
+ (struct il_eeprom_calib_info *)
+ il_eeprom_query_addr(il, EEPROM_4965_CALIB_TXPOWER_OFFSET);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+}
+
+/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
+ * Called after every association, but this runs only once!
+ * ... once chain noise is calibrated the first time, it's good forever. */
+static void
+il4965_chain_noise_reset(struct il_priv *il)
+{
+ struct il_chain_noise_data *data = &(il->chain_noise_data);
+
+ if (data->state == IL_CHAIN_NOISE_ALIVE && il_is_any_associated(il)) {
+ struct il_calib_diff_gain_cmd cmd;
+
+ /* clear data for chain noise calibration algorithm */
+ data->chain_noise_a = 0;
+ data->chain_noise_b = 0;
+ data->chain_noise_c = 0;
+ data->chain_signal_a = 0;
+ data->chain_signal_b = 0;
+ data->chain_signal_c = 0;
+ data->beacon_count = 0;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.hdr.op_code = IL_PHY_CALIBRATE_DIFF_GAIN_CMD;
+ cmd.diff_gain_a = 0;
+ cmd.diff_gain_b = 0;
+ cmd.diff_gain_c = 0;
+ if (il_send_cmd_pdu(il, C_PHY_CALIBRATION, sizeof(cmd), &cmd))
+ IL_ERR("Could not send C_PHY_CALIBRATION\n");
+ data->state = IL_CHAIN_NOISE_ACCUMULATE;
+ D_CALIB("Run chain_noise_calibrate\n");
+ }
+}
+
+static struct il_sensitivity_ranges il4965_sensitivity = {
+ .min_nrg_cck = 97,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+
+ .auto_corr_min_ofdm = 85,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 105,
+ .auto_corr_min_ofdm_mrc_x1 = 220,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 140,
+ .auto_corr_max_ofdm_mrc_x1 = 270,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 200,
+ .auto_corr_max_cck_mrc = 400,
+
+ .nrg_th_cck = 100,
+ .nrg_th_ofdm = 100,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static void
+il4965_set_ct_threshold(struct il_priv *il)
+{
+ /* want Kelvin */
+ il->hw_params.ct_kill_threshold =
+ CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
+}
+
+/**
+ * il4965_hw_set_hw_params
+ *
+ * Called when initializing driver
+ */
+static int
+il4965_hw_set_hw_params(struct il_priv *il)
+{
+ if (il->cfg->mod_params->num_of_queues >= IL_MIN_NUM_QUEUES &&
+ il->cfg->mod_params->num_of_queues <= IL49_NUM_QUEUES)
+ il->cfg->base_params->num_of_queues =
+ il->cfg->mod_params->num_of_queues;
+
+ il->hw_params.max_txq_num = il->cfg->base_params->num_of_queues;
+ il->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
+ il->hw_params.scd_bc_tbls_size =
+ il->cfg->base_params->num_of_queues *
+ sizeof(struct il4965_scd_bc_tbl);
+ il->hw_params.tfd_size = sizeof(struct il_tfd);
+ il->hw_params.max_stations = IL4965_STATION_COUNT;
+ il->ctx.bcast_sta_id = IL4965_BROADCAST_ID;
+ il->hw_params.max_data_size = IL49_RTC_DATA_SIZE;
+ il->hw_params.max_inst_size = IL49_RTC_INST_SIZE;
+ il->hw_params.max_bsm_size = BSM_SRAM_SIZE;
+ il->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
+
+ il->hw_params.rx_wrt_ptr_reg = FH49_RSCSR_CHNL0_WPTR;
+
+ il->hw_params.tx_chains_num = il4965_num_of_ant(il->cfg->valid_tx_ant);
+ il->hw_params.rx_chains_num = il4965_num_of_ant(il->cfg->valid_rx_ant);
+ il->hw_params.valid_tx_ant = il->cfg->valid_tx_ant;
+ il->hw_params.valid_rx_ant = il->cfg->valid_rx_ant;
+
+ il4965_set_ct_threshold(il);
+
+ il->hw_params.sens = &il4965_sensitivity;
+ il->hw_params.beacon_time_tsf_bits = IL4965_EXT_BEACON_TIME_POS;
+
+ return 0;
+}
+
+static s32
+il4965_math_div_round(s32 num, s32 denom, s32 * res)
+{
+ s32 sign = 1;
+
+ if (num < 0) {
+ sign = -sign;
+ num = -num;
+ }
+ if (denom < 0) {
+ sign = -sign;
+ denom = -denom;
+ }
+ *res = 1;
+ *res = ((num * 2 + denom) / (denom * 2)) * sign;
+
+ return 1;
+}
+
+/**
+ * il4965_get_voltage_compensation - Power supply voltage comp for txpower
+ *
+ * Determines power supply voltage compensation for txpower calculations.
+ * Returns number of 1/2-dB steps to subtract from gain table idx,
+ * to compensate for difference between power supply voltage during
+ * factory measurements, vs. current power supply voltage.
+ *
+ * Voltage indication is higher for lower voltage.
+ * Lower voltage requires more gain (lower gain table idx).
+ */
+static s32
+il4965_get_voltage_compensation(s32 eeprom_voltage, s32 current_voltage)
+{
+ s32 comp = 0;
+
+ if (TX_POWER_IL_ILLEGAL_VOLTAGE == eeprom_voltage ||
+ TX_POWER_IL_ILLEGAL_VOLTAGE == current_voltage)
+ return 0;
+
+ il4965_math_div_round(current_voltage - eeprom_voltage,
+ TX_POWER_IL_VOLTAGE_CODES_PER_03V, &comp);
+
+ if (current_voltage > eeprom_voltage)
+ comp *= 2;
+ if ((comp < -2) || (comp > 2))
+ comp = 0;
+
+ return comp;
+}
+
+static s32
+il4965_get_tx_atten_grp(u16 channel)
+{
+ if (channel >= CALIB_IL_TX_ATTEN_GR5_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR5_LCH)
+ return CALIB_CH_GROUP_5;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR1_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR1_LCH)
+ return CALIB_CH_GROUP_1;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR2_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR2_LCH)
+ return CALIB_CH_GROUP_2;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR3_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR3_LCH)
+ return CALIB_CH_GROUP_3;
+
+ if (channel >= CALIB_IL_TX_ATTEN_GR4_FCH &&
+ channel <= CALIB_IL_TX_ATTEN_GR4_LCH)
+ return CALIB_CH_GROUP_4;
+
+ return -EINVAL;
+}
+
+static u32
+il4965_get_sub_band(const struct il_priv *il, u32 channel)
+{
+ s32 b = -1;
+
+ for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
+ if (il->calib_info->band_info[b].ch_from == 0)
+ continue;
+
+ if (channel >= il->calib_info->band_info[b].ch_from &&
+ channel <= il->calib_info->band_info[b].ch_to)
+ break;
+ }
+
+ return b;
+}
+
+static s32
+il4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
+{
+ s32 val;
+
+ if (x2 == x1)
+ return y1;
+ else {
+ il4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
+ return val + y2;
+ }
+}
+
+/**
+ * il4965_interpolate_chan - Interpolate factory measurements for one channel
+ *
+ * Interpolates factory measurements from the two sample channels within a
+ * sub-band, to apply to channel of interest. Interpolation is proportional to
+ * differences in channel frequencies, which is proportional to differences
+ * in channel number.
+ */
+static int
+il4965_interpolate_chan(struct il_priv *il, u32 channel,
+ struct il_eeprom_calib_ch_info *chan_info)
+{
+ s32 s = -1;
+ u32 c;
+ u32 m;
+ const struct il_eeprom_calib_measure *m1;
+ const struct il_eeprom_calib_measure *m2;
+ struct il_eeprom_calib_measure *omeas;
+ u32 ch_i1;
+ u32 ch_i2;
+
+ s = il4965_get_sub_band(il, channel);
+ if (s >= EEPROM_TX_POWER_BANDS) {
+ IL_ERR("Tx Power can not find channel %d\n", channel);
+ return -1;
+ }
+
+ ch_i1 = il->calib_info->band_info[s].ch1.ch_num;
+ ch_i2 = il->calib_info->band_info[s].ch2.ch_num;
+ chan_info->ch_num = (u8) channel;
+
+ D_TXPOWER("channel %d subband %d factory cal ch %d & %d\n", channel, s,
+ ch_i1, ch_i2);
+
+ for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
+ for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
+ m1 = &(il->calib_info->band_info[s].ch1.
+ measurements[c][m]);
+ m2 = &(il->calib_info->band_info[s].ch2.
+ measurements[c][m]);
+ omeas = &(chan_info->measurements[c][m]);
+
+ omeas->actual_pow =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->actual_pow, ch_i2,
+ m2->actual_pow);
+ omeas->gain_idx =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->gain_idx, ch_i2,
+ m2->gain_idx);
+ omeas->temperature =
+ (u8) il4965_interpolate_value(channel, ch_i1,
+ m1->temperature,
+ ch_i2,
+ m2->temperature);
+ omeas->pa_det =
+ (s8) il4965_interpolate_value(channel, ch_i1,
+ m1->pa_det, ch_i2,
+ m2->pa_det);
+
+ D_TXPOWER("chain %d meas %d AP1=%d AP2=%d AP=%d\n", c,
+ m, m1->actual_pow, m2->actual_pow,
+ omeas->actual_pow);
+ D_TXPOWER("chain %d meas %d NI1=%d NI2=%d NI=%d\n", c,
+ m, m1->gain_idx, m2->gain_idx,
+ omeas->gain_idx);
+ D_TXPOWER("chain %d meas %d PA1=%d PA2=%d PA=%d\n", c,
+ m, m1->pa_det, m2->pa_det, omeas->pa_det);
+ D_TXPOWER("chain %d meas %d T1=%d T2=%d T=%d\n", c,
+ m, m1->temperature, m2->temperature,
+ omeas->temperature);
+ }
+ }
+
+ return 0;
+}
+
+/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
+ * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
+static s32 back_off_table[] = {
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
+ 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
+ 10 /* CCK */
+};
+
+/* Thermal compensation values for txpower for various frequency ranges ...
+ * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
+static struct il4965_txpower_comp_entry {
+ s32 degrees_per_05db_a;
+ s32 degrees_per_05db_a_denom;
+} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
+ {
+ 9, 2}, /* group 0 5.2, ch 34-43 */
+ {
+ 4, 1}, /* group 1 5.2, ch 44-70 */
+ {
+ 4, 1}, /* group 2 5.2, ch 71-124 */
+ {
+ 4, 1}, /* group 3 5.2, ch 125-200 */
+ {
+ 3, 1} /* group 4 2.4, ch all */
+};
+
+static s32
+get_min_power_idx(s32 rate_power_idx, u32 band)
+{
+ if (!band) {
+ if ((rate_power_idx & 7) <= 4)
+ return MIN_TX_GAIN_IDX_52GHZ_EXT;
+ }
+ return MIN_TX_GAIN_IDX;
+}
+
+struct gain_entry {
+ u8 dsp;
+ u8 radio;
+};
+
+static const struct gain_entry gain_table[2][108] = {
+ /* 5.2GHz power gain idx table */
+ {
+ {123, 0x3F}, /* highest txpower */
+ {117, 0x3F},
+ {110, 0x3F},
+ {104, 0x3F},
+ {98, 0x3F},
+ {110, 0x3E},
+ {104, 0x3E},
+ {98, 0x3E},
+ {110, 0x3D},
+ {104, 0x3D},
+ {98, 0x3D},
+ {110, 0x3C},
+ {104, 0x3C},
+ {98, 0x3C},
+ {110, 0x3B},
+ {104, 0x3B},
+ {98, 0x3B},
+ {110, 0x3A},
+ {104, 0x3A},
+ {98, 0x3A},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x25},
+ {104, 0x25},
+ {98, 0x25},
+ {110, 0x24},
+ {104, 0x24},
+ {98, 0x24},
+ {110, 0x23},
+ {104, 0x23},
+ {98, 0x23},
+ {110, 0x22},
+ {104, 0x18},
+ {98, 0x18},
+ {110, 0x17},
+ {104, 0x17},
+ {98, 0x17},
+ {110, 0x16},
+ {104, 0x16},
+ {98, 0x16},
+ {110, 0x15},
+ {104, 0x15},
+ {98, 0x15},
+ {110, 0x14},
+ {104, 0x14},
+ {98, 0x14},
+ {110, 0x13},
+ {104, 0x13},
+ {98, 0x13},
+ {110, 0x12},
+ {104, 0x08},
+ {98, 0x08},
+ {110, 0x07},
+ {104, 0x07},
+ {98, 0x07},
+ {110, 0x06},
+ {104, 0x06},
+ {98, 0x06},
+ {110, 0x05},
+ {104, 0x05},
+ {98, 0x05},
+ {110, 0x04},
+ {104, 0x04},
+ {98, 0x04},
+ {110, 0x03},
+ {104, 0x03},
+ {98, 0x03},
+ {110, 0x02},
+ {104, 0x02},
+ {98, 0x02},
+ {110, 0x01},
+ {104, 0x01},
+ {98, 0x01},
+ {110, 0x00},
+ {104, 0x00},
+ {98, 0x00},
+ {93, 0x00},
+ {88, 0x00},
+ {83, 0x00},
+ {78, 0x00},
+ },
+ /* 2.4GHz power gain idx table */
+ {
+ {110, 0x3f}, /* highest txpower */
+ {104, 0x3f},
+ {98, 0x3f},
+ {110, 0x3e},
+ {104, 0x3e},
+ {98, 0x3e},
+ {110, 0x3d},
+ {104, 0x3d},
+ {98, 0x3d},
+ {110, 0x3c},
+ {104, 0x3c},
+ {98, 0x3c},
+ {110, 0x3b},
+ {104, 0x3b},
+ {98, 0x3b},
+ {110, 0x3a},
+ {104, 0x3a},
+ {98, 0x3a},
+ {110, 0x39},
+ {104, 0x39},
+ {98, 0x39},
+ {110, 0x38},
+ {104, 0x38},
+ {98, 0x38},
+ {110, 0x37},
+ {104, 0x37},
+ {98, 0x37},
+ {110, 0x36},
+ {104, 0x36},
+ {98, 0x36},
+ {110, 0x35},
+ {104, 0x35},
+ {98, 0x35},
+ {110, 0x34},
+ {104, 0x34},
+ {98, 0x34},
+ {110, 0x33},
+ {104, 0x33},
+ {98, 0x33},
+ {110, 0x32},
+ {104, 0x32},
+ {98, 0x32},
+ {110, 0x31},
+ {104, 0x31},
+ {98, 0x31},
+ {110, 0x30},
+ {104, 0x30},
+ {98, 0x30},
+ {110, 0x6},
+ {104, 0x6},
+ {98, 0x6},
+ {110, 0x5},
+ {104, 0x5},
+ {98, 0x5},
+ {110, 0x4},
+ {104, 0x4},
+ {98, 0x4},
+ {110, 0x3},
+ {104, 0x3},
+ {98, 0x3},
+ {110, 0x2},
+ {104, 0x2},
+ {98, 0x2},
+ {110, 0x1},
+ {104, 0x1},
+ {98, 0x1},
+ {110, 0x0},
+ {104, 0x0},
+ {98, 0x0},
+ {97, 0},
+ {96, 0},
+ {95, 0},
+ {94, 0},
+ {93, 0},
+ {92, 0},
+ {91, 0},
+ {90, 0},
+ {89, 0},
+ {88, 0},
+ {87, 0},
+ {86, 0},
+ {85, 0},
+ {84, 0},
+ {83, 0},
+ {82, 0},
+ {81, 0},
+ {80, 0},
+ {79, 0},
+ {78, 0},
+ {77, 0},
+ {76, 0},
+ {75, 0},
+ {74, 0},
+ {73, 0},
+ {72, 0},
+ {71, 0},
+ {70, 0},
+ {69, 0},
+ {68, 0},
+ {67, 0},
+ {66, 0},
+ {65, 0},
+ {64, 0},
+ {63, 0},
+ {62, 0},
+ {61, 0},
+ {60, 0},
+ {59, 0},
+ }
+};
+
+static int
+il4965_fill_txpower_tbl(struct il_priv *il, u8 band, u16 channel, u8 is_ht40,
+ u8 ctrl_chan_high,
+ struct il4965_tx_power_db *tx_power_tbl)
+{
+ u8 saturation_power;
+ s32 target_power;
+ s32 user_target_power;
+ s32 power_limit;
+ s32 current_temp;
+ s32 reg_limit;
+ s32 current_regulatory;
+ s32 txatten_grp = CALIB_CH_GROUP_MAX;
+ int i;
+ int c;
+ const struct il_channel_info *ch_info = NULL;
+ struct il_eeprom_calib_ch_info ch_eeprom_info;
+ const struct il_eeprom_calib_measure *measurement;
+ s16 voltage;
+ s32 init_voltage;
+ s32 voltage_compensation;
+ s32 degrees_per_05db_num;
+ s32 degrees_per_05db_denom;
+ s32 factory_temp;
+ s32 temperature_comp[2];
+ s32 factory_gain_idx[2];
+ s32 factory_actual_pwr[2];
+ s32 power_idx;
+
+ /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
+ * are used for idxing into txpower table) */
+ user_target_power = 2 * il->tx_power_user_lmt;
+
+ /* Get current (RXON) channel, band, width */
+ D_TXPOWER("chan %d band %d is_ht40 %d\n", channel, band, is_ht40);
+
+ ch_info = il_get_channel_info(il, il->band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -EINVAL;
+
+ /* get txatten group, used to select 1) thermal txpower adjustment
+ * and 2) mimo txpower balance between Tx chains. */
+ txatten_grp = il4965_get_tx_atten_grp(channel);
+ if (txatten_grp < 0) {
+ IL_ERR("Can't find txatten group for channel %d.\n", channel);
+ return txatten_grp;
+ }
+
+ D_TXPOWER("channel %d belongs to txatten group %d\n", channel,
+ txatten_grp);
+
+ if (is_ht40) {
+ if (ctrl_chan_high)
+ channel -= 2;
+ else
+ channel += 2;
+ }
+
+ /* hardware txpower limits ...
+ * saturation (clipping distortion) txpowers are in half-dBm */
+ if (band)
+ saturation_power = il->calib_info->saturation_power24;
+ else
+ saturation_power = il->calib_info->saturation_power52;
+
+ if (saturation_power < IL_TX_POWER_SATURATION_MIN ||
+ saturation_power > IL_TX_POWER_SATURATION_MAX) {
+ if (band)
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_24;
+ else
+ saturation_power = IL_TX_POWER_DEFAULT_SATURATION_52;
+ }
+
+ /* regulatory txpower limits ... reg_limit values are in half-dBm,
+ * max_power_avg values are in dBm, convert * 2 */
+ if (is_ht40)
+ reg_limit = ch_info->ht40_max_power_avg * 2;
+ else
+ reg_limit = ch_info->max_power_avg * 2;
+
+ if ((reg_limit < IL_TX_POWER_REGULATORY_MIN) ||
+ (reg_limit > IL_TX_POWER_REGULATORY_MAX)) {
+ if (band)
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_24;
+ else
+ reg_limit = IL_TX_POWER_DEFAULT_REGULATORY_52;
+ }
+
+ /* Interpolate txpower calibration values for this channel,
+ * based on factory calibration tests on spaced channels. */
+ il4965_interpolate_chan(il, channel, &ch_eeprom_info);
+
+ /* calculate tx gain adjustment based on power supply voltage */
+ voltage = le16_to_cpu(il->calib_info->voltage);
+ init_voltage = (s32) le32_to_cpu(il->card_alive_init.voltage);
+ voltage_compensation =
+ il4965_get_voltage_compensation(voltage, init_voltage);
+
+ D_TXPOWER("curr volt %d eeprom volt %d volt comp %d\n", init_voltage,
+ voltage, voltage_compensation);
+
+ /* get current temperature (Celsius) */
+ current_temp = max(il->temperature, IL_TX_POWER_TEMPERATURE_MIN);
+ current_temp = min(il->temperature, IL_TX_POWER_TEMPERATURE_MAX);
+ current_temp = KELVIN_TO_CELSIUS(current_temp);
+
+ /* select thermal txpower adjustment params, based on channel group
+ * (same frequency group used for mimo txatten adjustment) */
+ degrees_per_05db_num =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
+ degrees_per_05db_denom =
+ tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
+
+ /* get per-chain txpower values from factory measurements */
+ for (c = 0; c < 2; c++) {
+ measurement = &ch_eeprom_info.measurements[c][1];
+
+ /* txgain adjustment (in half-dB steps) based on difference
+ * between factory and current temperature */
+ factory_temp = measurement->temperature;
+ il4965_math_div_round((current_temp -
+ factory_temp) * degrees_per_05db_denom,
+ degrees_per_05db_num,
+ &temperature_comp[c]);
+
+ factory_gain_idx[c] = measurement->gain_idx;
+ factory_actual_pwr[c] = measurement->actual_pow;
+
+ D_TXPOWER("chain = %d\n", c);
+ D_TXPOWER("fctry tmp %d, " "curr tmp %d, comp %d steps\n",
+ factory_temp, current_temp, temperature_comp[c]);
+
+ D_TXPOWER("fctry idx %d, fctry pwr %d\n", factory_gain_idx[c],
+ factory_actual_pwr[c]);
+ }
+
+ /* for each of 33 bit-rates (including 1 for CCK) */
+ for (i = 0; i < POWER_TBL_NUM_ENTRIES; i++) {
+ u8 is_mimo_rate;
+ union il4965_tx_power_dual_stream tx_power;
+
+ /* for mimo, reduce each chain's txpower by half
+ * (3dB, 6 steps), so total output power is regulatory
+ * compliant. */
+ if (i & 0x8) {
+ current_regulatory =
+ reg_limit -
+ IL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
+ is_mimo_rate = 1;
+ } else {
+ current_regulatory = reg_limit;
+ is_mimo_rate = 0;
+ }
+
+ /* find txpower limit, either hardware or regulatory */
+ power_limit = saturation_power - back_off_table[i];
+ if (power_limit > current_regulatory)
+ power_limit = current_regulatory;
+
+ /* reduce user's txpower request if necessary
+ * for this rate on this channel */
+ target_power = user_target_power;
+ if (target_power > power_limit)
+ target_power = power_limit;
+
+ D_TXPOWER("rate %d sat %d reg %d usr %d tgt %d\n", i,
+ saturation_power - back_off_table[i],
+ current_regulatory, user_target_power, target_power);
+
+ /* for each of 2 Tx chains (radio transmitters) */
+ for (c = 0; c < 2; c++) {
+ s32 atten_value;
+
+ if (is_mimo_rate)
+ atten_value =
+ (s32) le32_to_cpu(il->card_alive_init.
+ tx_atten[txatten_grp][c]);
+ else
+ atten_value = 0;
+
+ /* calculate idx; higher idx means lower txpower */
+ power_idx =
+ (u8) (factory_gain_idx[c] -
+ (target_power - factory_actual_pwr[c]) -
+ temperature_comp[c] - voltage_compensation +
+ atten_value);
+
+/* D_TXPOWER("calculated txpower idx %d\n",
+ power_idx); */
+
+ if (power_idx < get_min_power_idx(i, band))
+ power_idx = get_min_power_idx(i, band);
+
+ /* adjust 5 GHz idx to support negative idxes */
+ if (!band)
+ power_idx += 9;
+
+ /* CCK, rate 32, reduce txpower for CCK */
+ if (i == POWER_TBL_CCK_ENTRY)
+ power_idx +=
+ IL_TX_POWER_CCK_COMPENSATION_C_STEP;
+
+ /* stay within the table! */
+ if (power_idx > 107) {
+ IL_WARN("txpower idx %d > 107\n", power_idx);
+ power_idx = 107;
+ }
+ if (power_idx < 0) {
+ IL_WARN("txpower idx %d < 0\n", power_idx);
+ power_idx = 0;
+ }
+
+ /* fill txpower command for this rate/chain */
+ tx_power.s.radio_tx_gain[c] =
+ gain_table[band][power_idx].radio;
+ tx_power.s.dsp_predis_atten[c] =
+ gain_table[band][power_idx].dsp;
+
+ D_TXPOWER("chain %d mimo %d idx %d "
+ "gain 0x%02x dsp %d\n", c, atten_value,
+ power_idx, tx_power.s.radio_tx_gain[c],
+ tx_power.s.dsp_predis_atten[c]);
+ } /* for each chain */
+
+ tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
+
+ } /* for each rate */
+
+ return 0;
+}
+
+/**
+ * il4965_send_tx_power - Configure the TXPOWER level user limit
+ *
+ * Uses the active RXON for channel, band, and characteristics (ht40, high)
+ * The power limit is taken from il->tx_power_user_lmt.
+ */
+static int
+il4965_send_tx_power(struct il_priv *il)
+{
+ struct il4965_txpowertable_cmd cmd = { 0 };
+ int ret;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ONCE
+ (test_bit(S_SCAN_HW, &il->status),
+ "TX Power requested while scanning!\n"))
+ return -EAGAIN;
+
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
+
+ if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.channel = ctx->active.channel;
+
+ ret =
+ il4965_fill_txpower_tbl(il, band, le16_to_cpu(ctx->active.channel),
+ is_ht40, ctrl_chan_high, &cmd.tx_power);
+ if (ret)
+ goto out;
+
+ ret = il_send_cmd_pdu(il, C_TX_PWR_TBL, sizeof(cmd), &cmd);
+
+out:
+ return ret;
+}
+
+static int
+il4965_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int ret = 0;
+ struct il4965_rxon_assoc_cmd rxon_assoc;
+ const struct il_rxon_cmd *rxon1 = &ctx->staging;
+ const struct il_rxon_cmd *rxon2 = &ctx->active;
+
+ if (rxon1->flags == rxon2->flags &&
+ rxon1->filter_flags == rxon2->filter_flags &&
+ rxon1->cck_basic_rates == rxon2->cck_basic_rates &&
+ rxon1->ofdm_ht_single_stream_basic_rates ==
+ rxon2->ofdm_ht_single_stream_basic_rates &&
+ rxon1->ofdm_ht_dual_stream_basic_rates ==
+ rxon2->ofdm_ht_dual_stream_basic_rates &&
+ rxon1->rx_chain == rxon2->rx_chain &&
+ rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates) {
+ D_INFO("Using current RXON_ASSOC. Not resending.\n");
+ return 0;
+ }
+
+ rxon_assoc.flags = ctx->staging.flags;
+ rxon_assoc.filter_flags = ctx->staging.filter_flags;
+ rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
+ rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
+ rxon_assoc.reserved = 0;
+ rxon_assoc.ofdm_ht_single_stream_basic_rates =
+ ctx->staging.ofdm_ht_single_stream_basic_rates;
+ rxon_assoc.ofdm_ht_dual_stream_basic_rates =
+ ctx->staging.ofdm_ht_dual_stream_basic_rates;
+ rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
+
+ ret =
+ il_send_cmd_pdu_async(il, C_RXON_ASSOC, sizeof(rxon_assoc),
+ &rxon_assoc, NULL);
+
+ return ret;
+}
+
+static int
+il4965_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /* cast away the const for active_rxon in this function */
+ struct il_rxon_cmd *active_rxon = (void *)&ctx->active;
+ int ret;
+ bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
+
+ if (!il_is_alive(il))
+ return -EBUSY;
+
+ if (!ctx->is_active)
+ return 0;
+
+ /* always get timestamp with Rx frame */
+ ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
+
+ ret = il_check_rxon_cmd(il, ctx);
+ if (ret) {
+ IL_ERR("Invalid RXON configuration. Not committing.\n");
+ return -EINVAL;
+ }
+
+ /*
+ * receive commit_rxon request
+ * abort any previous channel switch if still in process
+ */
+ if (test_bit(S_CHANNEL_SWITCH_PENDING, &il->status) &&
+ il->switch_channel != ctx->staging.channel) {
+ D_11H("abort channel switch on %d\n",
+ le16_to_cpu(il->switch_channel));
+ il_chswitch_done(il, false);
+ }
+
+ /* If we don't need to send a full RXON, we can use
+ * il_rxon_assoc_cmd which is used to reconfigure filter
+ * and other flags for the current radio configuration. */
+ if (!il_full_rxon_required(il, ctx)) {
+ ret = il_send_rxon_assoc(il, ctx);
+ if (ret) {
+ IL_ERR("Error setting RXON_ASSOC (%d)\n", ret);
+ return ret;
+ }
+
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_print_rx_config_cmd(il, ctx);
+ /*
+ * We do not commit tx power settings while channel changing,
+ * do it now if tx power changed.
+ */
+ il_set_tx_power(il, il->tx_power_next, false);
+ return 0;
+ }
+
+ /* If we are currently associated and the new config requires
+ * an RXON_ASSOC and the new config wants the associated mask enabled,
+ * we must clear the associated from the active configuration
+ * before we apply the new config */
+ if (il_is_associated_ctx(ctx) && new_assoc) {
+ D_INFO("Toggling associated bit on current RXON\n");
+ active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), active_rxon);
+
+ /* If the mask clearing failed then we set
+ * active_rxon back to what it was previously */
+ if (ret) {
+ active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
+ IL_ERR("Error clearing ASSOC_MSK (%d)\n", ret);
+ return ret;
+ }
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+
+ D_INFO("Sending RXON\n" "* with%s RXON_FILTER_ASSOC_MSK\n"
+ "* channel = %d\n" "* bssid = %pM\n", (new_assoc ? "" : "out"),
+ le16_to_cpu(ctx->staging.channel), ctx->staging.bssid_addr);
+
+ il_set_rxon_hwcrypto(il, ctx, !il->cfg->mod_params->sw_crypto);
+
+ /* Apply the new configuration
+ * RXON unassoc clears the station table in uCode so restoration of
+ * stations is needed after it (the RXON command) completes
+ */
+ if (!new_assoc) {
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ D_INFO("Return from !new_assoc RXON.\n");
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ il_clear_ucode_stations(il, ctx);
+ il_restore_stations(il, ctx);
+ ret = il4965_restore_default_wep_keys(il, ctx);
+ if (ret) {
+ IL_ERR("Failed to restore WEP keys (%d)\n", ret);
+ return ret;
+ }
+ }
+ if (new_assoc) {
+ il->start_calib = 0;
+ /* Apply the new configuration
+ * RXON assoc doesn't clear the station table in uCode,
+ */
+ ret =
+ il_send_cmd_pdu(il, ctx->rxon_cmd,
+ sizeof(struct il_rxon_cmd), &ctx->staging);
+ if (ret) {
+ IL_ERR("Error setting new RXON (%d)\n", ret);
+ return ret;
+ }
+ memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
+ }
+ il_print_rx_config_cmd(il, ctx);
+
+ il4965_init_sensitivity(il);
+
+ /* If we issue a new RXON command which required a tune then we must
+ * send a new TXPOWER command or we won't be able to Tx any frames */
+ ret = il_set_tx_power(il, il->tx_power_next, true);
+ if (ret) {
+ IL_ERR("Error sending TX power (%d)\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+il4965_hw_channel_switch(struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ int rc;
+ u8 band = 0;
+ bool is_ht40 = false;
+ u8 ctrl_chan_high = 0;
+ struct il4965_channel_switch_cmd cmd;
+ const struct il_channel_info *ch_info;
+ u32 switch_time_in_usec, ucode_switch_time;
+ u16 ch;
+ u32 tsf_low;
+ u8 switch_count;
+ u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
+ struct ieee80211_vif *vif = ctx->vif;
+ band = il->band == IEEE80211_BAND_2GHZ;
+
+ is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
+
+ if (is_ht40 && (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
+ ctrl_chan_high = 1;
+
+ cmd.band = band;
+ cmd.expect_beacon = 0;
+ ch = ch_switch->channel->hw_value;
+ cmd.channel = cpu_to_le16(ch);
+ cmd.rxon_flags = ctx->staging.flags;
+ cmd.rxon_filter_flags = ctx->staging.filter_flags;
+ switch_count = ch_switch->count;
+ tsf_low = ch_switch->timestamp & 0x0ffffffff;
+ /*
+ * calculate the ucode channel switch time
+ * adding TSF as one of the factor for when to switch
+ */
+ if (il->ucode_beacon_time > tsf_low && beacon_interval) {
+ if (switch_count >
+ ((il->ucode_beacon_time - tsf_low) / beacon_interval)) {
+ switch_count -=
+ (il->ucode_beacon_time - tsf_low) / beacon_interval;
+ } else
+ switch_count = 0;
+ }
+ if (switch_count <= 1)
+ cmd.switch_time = cpu_to_le32(il->ucode_beacon_time);
+ else {
+ switch_time_in_usec =
+ vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
+ ucode_switch_time =
+ il_usecs_to_beacons(il, switch_time_in_usec,
+ beacon_interval);
+ cmd.switch_time =
+ il_add_beacon_time(il, il->ucode_beacon_time,
+ ucode_switch_time, beacon_interval);
+ }
+ D_11H("uCode time for the switch is 0x%x\n", cmd.switch_time);
+ ch_info = il_get_channel_info(il, il->band, ch);
+ if (ch_info)
+ cmd.expect_beacon = il_is_channel_radar(ch_info);
+ else {
+ IL_ERR("invalid channel switch from %u to %u\n",
+ ctx->active.channel, ch);
+ return -EFAULT;
+ }
+
+ rc = il4965_fill_txpower_tbl(il, band, ch, is_ht40, ctrl_chan_high,
+ &cmd.tx_power);
+ if (rc) {
+ D_11H("error:%d fill txpower_tbl\n", rc);
+ return rc;
+ }
+
+ return il_send_cmd_pdu(il, C_CHANNEL_SWITCH, sizeof(cmd), &cmd);
+}
+
+/**
+ * il4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
+ */
+static void
+il4965_txq_update_byte_cnt_tbl(struct il_priv *il, struct il_tx_queue *txq,
+ u16 byte_cnt)
+{
+ struct il4965_scd_bc_tbl *scd_bc_tbl = il->scd_bc_tbls.addr;
+ int txq_id = txq->q.id;
+ int write_ptr = txq->q.write_ptr;
+ int len = byte_cnt + IL_TX_CRC_SIZE + IL_TX_DELIMITER_SIZE;
+ __le16 bc_ent;
+
+ WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
+
+ bc_ent = cpu_to_le16(len & 0xFFF);
+ /* Set up byte count within first 256 entries */
+ scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
+
+ /* If within first 64 entries, duplicate at end */
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
+ bc_ent;
+}
+
+/**
+ * il4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
+ * @stats: Provides the temperature reading from the uCode
+ *
+ * A return of <0 indicates bogus data in the stats
+ */
+static int
+il4965_hw_get_temperature(struct il_priv *il)
+{
+ s32 temperature;
+ s32 vt;
+ s32 R1, R2, R3;
+ u32 R4;
+
+ if (test_bit(S_TEMPERATURE, &il->status) &&
+ (il->_4965.stats.flag & STATS_REPLY_FLG_HT40_MODE_MSK)) {
+ D_TEMP("Running HT40 temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[1]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[1]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[1]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[1]);
+ } else {
+ D_TEMP("Running temperature calibration\n");
+ R1 = (s32) le32_to_cpu(il->card_alive_init.therm_r1[0]);
+ R2 = (s32) le32_to_cpu(il->card_alive_init.therm_r2[0]);
+ R3 = (s32) le32_to_cpu(il->card_alive_init.therm_r3[0]);
+ R4 = le32_to_cpu(il->card_alive_init.therm_r4[0]);
+ }
+
+ /*
+ * Temperature is only 23 bits, so sign extend out to 32.
+ *
+ * NOTE If we haven't received a stats notification yet
+ * with an updated temperature, use R4 provided to us in the
+ * "initialize" ALIVE response.
+ */
+ if (!test_bit(S_TEMPERATURE, &il->status))
+ vt = sign_extend32(R4, 23);
+ else
+ vt = sign_extend32(le32_to_cpu
+ (il->_4965.stats.general.common.temperature),
+ 23);
+
+ D_TEMP("Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
+
+ if (R3 == R1) {
+ IL_ERR("Calibration conflict R1 == R3\n");
+ return -1;
+ }
+
+ /* Calculate temperature in degrees Kelvin, adjust by 97%.
+ * Add offset to center the adjustment around 0 degrees Centigrade. */
+ temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
+ temperature /= (R3 - R1);
+ temperature =
+ (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
+
+ D_TEMP("Calibrated temperature: %dK, %dC\n", temperature,
+ KELVIN_TO_CELSIUS(temperature));
+
+ return temperature;
+}
+
+/* Adjust Txpower only if temperature variance is greater than threshold. */
+#define IL_TEMPERATURE_THRESHOLD 3
+
+/**
+ * il4965_is_temp_calib_needed - determines if new calibration is needed
+ *
+ * If the temperature changed has changed sufficiently, then a recalibration
+ * is needed.
+ *
+ * Assumes caller will replace il->last_temperature once calibration
+ * executed.
+ */
+static int
+il4965_is_temp_calib_needed(struct il_priv *il)
+{
+ int temp_diff;
+
+ if (!test_bit(S_STATS, &il->status)) {
+ D_TEMP("Temperature not updated -- no stats.\n");
+ return 0;
+ }
+
+ temp_diff = il->temperature - il->last_temperature;
+
+ /* get absolute value */
+ if (temp_diff < 0) {
+ D_POWER("Getting cooler, delta %d\n", temp_diff);
+ temp_diff = -temp_diff;
+ } else if (temp_diff == 0)
+ D_POWER("Temperature unchanged\n");
+ else
+ D_POWER("Getting warmer, delta %d\n", temp_diff);
+
+ if (temp_diff < IL_TEMPERATURE_THRESHOLD) {
+ D_POWER(" => thermal txpower calib not needed\n");
+ return 0;
+ }
+
+ D_POWER(" => thermal txpower calib needed\n");
+
+ return 1;
+}
+
+static void
+il4965_temperature_calib(struct il_priv *il)
+{
+ s32 temp;
+
+ temp = il4965_hw_get_temperature(il);
+ if (IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
+ return;
+
+ if (il->temperature != temp) {
+ if (il->temperature)
+ D_TEMP("Temperature changed " "from %dC to %dC\n",
+ KELVIN_TO_CELSIUS(il->temperature),
+ KELVIN_TO_CELSIUS(temp));
+ else
+ D_TEMP("Temperature " "initialized to %dC\n",
+ KELVIN_TO_CELSIUS(temp));
+ }
+
+ il->temperature = temp;
+ set_bit(S_TEMPERATURE, &il->status);
+
+ if (!il->disable_tx_power_cal &&
+ unlikely(!test_bit(S_SCANNING, &il->status)) &&
+ il4965_is_temp_calib_needed(il))
+ queue_work(il->workqueue, &il->txpower_work);
+}
+
+static u16
+il4965_get_hcmd_size(u8 cmd_id, u16 len)
+{
+ switch (cmd_id) {
+ case C_RXON:
+ return (u16) sizeof(struct il4965_rxon_cmd);
+ default:
+ return len;
+ }
+}
+
+static u16
+il4965_build_addsta_hcmd(const struct il_addsta_cmd *cmd, u8 * data)
+{
+ struct il4965_addsta_cmd *addsta = (struct il4965_addsta_cmd *)data;
+ addsta->mode = cmd->mode;
+ memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
+ memcpy(&addsta->key, &cmd->key, sizeof(struct il4965_keyinfo));
+ addsta->station_flags = cmd->station_flags;
+ addsta->station_flags_msk = cmd->station_flags_msk;
+ addsta->tid_disable_tx = cmd->tid_disable_tx;
+ addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
+ addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
+ addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
+ addsta->sleep_tx_count = cmd->sleep_tx_count;
+ addsta->reserved1 = cpu_to_le16(0);
+ addsta->reserved2 = cpu_to_le16(0);
+
+ return (u16) sizeof(struct il4965_addsta_cmd);
+}
+
+static inline u32
+il4965_get_scd_ssn(struct il4965_tx_resp *tx_resp)
+{
+ return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
+}
+
+static inline u32
+il4965_tx_status_to_mac80211(u32 status)
+{
+ status &= TX_STATUS_MSK;
+
+ switch (status) {
+ case TX_STATUS_SUCCESS:
+ case TX_STATUS_DIRECT_DONE:
+ return IEEE80211_TX_STAT_ACK;
+ case TX_STATUS_FAIL_DEST_PS:
+ return IEEE80211_TX_STAT_TX_FILTERED;
+ default:
+ return 0;
+ }
+}
+
+static inline bool
+il4965_is_tx_success(u32 status)
+{
+ status &= TX_STATUS_MSK;
+ return (status == TX_STATUS_SUCCESS || status == TX_STATUS_DIRECT_DONE);
+}
+
+/**
+ * il4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
+ */
+static int
+il4965_tx_status_reply_tx(struct il_priv *il, struct il_ht_agg *agg,
+ struct il4965_tx_resp *tx_resp, int txq_id,
+ u16 start_idx)
+{
+ u16 status;
+ struct agg_tx_status *frame_status = tx_resp->u.agg_status;
+ struct ieee80211_tx_info *info = NULL;
+ struct ieee80211_hdr *hdr = NULL;
+ u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
+ int i, sh, idx;
+ u16 seq;
+ if (agg->wait_for_ba)
+ D_TX_REPLY("got tx response w/o block-ack\n");
+
+ agg->frame_count = tx_resp->frame_count;
+ agg->start_idx = start_idx;
+ agg->rate_n_flags = rate_n_flags;
+ agg->bitmap = 0;
+
+ /* num frames attempted by Tx command */
+ if (agg->frame_count == 1) {
+ /* Only one frame was attempted; no block-ack will arrive */
+ status = le16_to_cpu(frame_status[0].status);
+ idx = start_idx;
+
+ D_TX_REPLY("FrameCnt = %d, StartIdx=%d idx=%d\n",
+ agg->frame_count, agg->start_idx, idx);
+
+ info = IEEE80211_SKB_CB(il->txq[txq_id].txb[idx].skb);
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il, rate_n_flags, info);
+
+ D_TX_REPLY("1 Frame 0x%x failure :%d\n", status & 0xff,
+ tx_resp->failure_frame);
+ D_TX_REPLY("Rate Info rate_n_flags=%x\n", rate_n_flags);
+
+ agg->wait_for_ba = 0;
+ } else {
+ /* Two or more frames were attempted; expect block-ack */
+ u64 bitmap = 0;
+ int start = agg->start_idx;
+
+ /* Construct bit-map of pending frames within Tx win */
+ for (i = 0; i < agg->frame_count; i++) {
+ u16 sc;
+ status = le16_to_cpu(frame_status[i].status);
+ seq = le16_to_cpu(frame_status[i].sequence);
+ idx = SEQ_TO_IDX(seq);
+ txq_id = SEQ_TO_QUEUE(seq);
+
+ if (status &
+ (AGG_TX_STATE_FEW_BYTES_MSK |
+ AGG_TX_STATE_ABORT_MSK))
+ continue;
+
+ D_TX_REPLY("FrameCnt = %d, txq_id=%d idx=%d\n",
+ agg->frame_count, txq_id, idx);
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (!hdr) {
+ IL_ERR("BUG_ON idx doesn't point to valid skb"
+ " idx=%d, txq_id=%d\n", idx, txq_id);
+ return -1;
+ }
+
+ sc = le16_to_cpu(hdr->seq_ctrl);
+ if (idx != (SEQ_TO_SN(sc) & 0xff)) {
+ IL_ERR("BUG_ON idx doesn't match seq control"
+ " idx=%d, seq_idx=%d, seq=%d\n", idx,
+ SEQ_TO_SN(sc), hdr->seq_ctrl);
+ return -1;
+ }
+
+ D_TX_REPLY("AGG Frame i=%d idx %d seq=%d\n", i, idx,
+ SEQ_TO_SN(sc));
+
+ sh = idx - start;
+ if (sh > 64) {
+ sh = (start - idx) + 0xff;
+ bitmap = bitmap << sh;
+ sh = 0;
+ start = idx;
+ } else if (sh < -64)
+ sh = 0xff - (start - idx);
+ else if (sh < 0) {
+ sh = start - idx;
+ start = idx;
+ bitmap = bitmap << sh;
+ sh = 0;
+ }
+ bitmap |= 1ULL << sh;
+ D_TX_REPLY("start=%d bitmap=0x%llx\n", start,
+ (unsigned long long)bitmap);
+ }
+
+ agg->bitmap = bitmap;
+ agg->start_idx = start;
+ D_TX_REPLY("Frames %d start_idx=%d bitmap=0x%llx\n",
+ agg->frame_count, agg->start_idx,
+ (unsigned long long)agg->bitmap);
+
+ if (bitmap)
+ agg->wait_for_ba = 1;
+ }
+ return 0;
+}
+
+static u8
+il4965_find_station(struct il_priv *il, const u8 * addr)
+{
+ int i;
+ int start = 0;
+ int ret = IL_INVALID_STATION;
+ unsigned long flags;
+
+ if ((il->iw_mode == NL80211_IFTYPE_ADHOC))
+ start = IL_STA_ID;
+
+ if (is_broadcast_ether_addr(addr))
+ return il->ctx.bcast_sta_id;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = start; i < il->hw_params.max_stations; i++)
+ if (il->stations[i].used &&
+ (!compare_ether_addr(il->stations[i].sta.sta.addr, addr))) {
+ ret = i;
+ goto out;
+ }
+
+ D_ASSOC("can not find STA %pM total %d\n", addr, il->num_stations);
+
+out:
+ /*
+ * It may be possible that more commands interacting with stations
+ * arrive before we completed processing the adding of
+ * station
+ */
+ if (ret != IL_INVALID_STATION &&
+ (!(il->stations[ret].used & IL_STA_UCODE_ACTIVE) ||
+ ((il->stations[ret].used & IL_STA_UCODE_ACTIVE) &&
+ (il->stations[ret].used & IL_STA_UCODE_INPROGRESS)))) {
+ IL_ERR("Requested station info for sta %d before ready.\n",
+ ret);
+ ret = IL_INVALID_STATION;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return ret;
+}
+
+static int
+il4965_get_ra_sta_id(struct il_priv *il, struct ieee80211_hdr *hdr)
+{
+ if (il->iw_mode == NL80211_IFTYPE_STATION) {
+ return IL_AP_ID;
+ } else {
+ u8 *da = ieee80211_get_DA(hdr);
+ return il4965_find_station(il, da);
+ }
+}
+
+/**
+ * il4965_hdl_tx - Handle standard (non-aggregation) Tx response
+ */
+static void
+il4965_hdl_tx(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct ieee80211_hdr *hdr;
+ struct ieee80211_tx_info *info;
+ struct il4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ u32 status = le32_to_cpu(tx_resp->u.status);
+ int uninitialized_var(tid);
+ int sta_id;
+ int freed;
+ u8 *qc = NULL;
+ unsigned long flags;
+
+ if (idx >= txq->q.n_bd || il_queue_used(&txq->q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq_id (%d) idx %d "
+ "is out of range [0-%d] %d %d\n", txq_id, idx,
+ txq->q.n_bd, txq->q.write_ptr, txq->q.read_ptr);
+ return;
+ }
+
+ txq->time_stamp = jiffies;
+ info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
+ memset(&info->status, 0, sizeof(info->status));
+
+ hdr = il_tx_queue_get_hdr(il, txq_id, idx);
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & 0xf;
+ }
+
+ sta_id = il4965_get_ra_sta_id(il, hdr);
+ if (txq->sched_retry && unlikely(sta_id == IL_INVALID_STATION)) {
+ IL_ERR("Station not known\n");
+ return;
+ }
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ if (txq->sched_retry) {
+ const u32 scd_ssn = il4965_get_scd_ssn(tx_resp);
+ struct il_ht_agg *agg = NULL;
+ WARN_ON(!qc);
+
+ agg = &il->stations[sta_id].tid[tid].agg;
+
+ il4965_tx_status_reply_tx(il, agg, tx_resp, txq_id, idx);
+
+ /* check if BAR is needed */
+ if ((tx_resp->frame_count == 1) &&
+ !il4965_is_tx_success(status))
+ info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
+
+ if (txq->q.read_ptr != (scd_ssn & 0xff)) {
+ idx = il_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
+ D_TX_REPLY("Retry scheduler reclaim scd_ssn "
+ "%d idx %d\n", scd_ssn, idx);
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc)
+ il4965_free_tfds_in_queue(il, sta_id, tid,
+ freed);
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark &&
+ agg->state != IL_EMPTYING_HW_QUEUE_DELBA)
+ il_wake_queue(il, txq);
+ }
+ } else {
+ info->status.rates[0].count = tx_resp->failure_frame + 1;
+ info->flags |= il4965_tx_status_to_mac80211(status);
+ il4965_hwrate_to_tx_control(il,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ info);
+
+ D_TX_REPLY("TXQ %d status %s (0x%08x) "
+ "rate_n_flags 0x%x retries %d\n", txq_id,
+ il4965_get_tx_fail_reason(status), status,
+ le32_to_cpu(tx_resp->rate_n_flags),
+ tx_resp->failure_frame);
+
+ freed = il4965_tx_queue_reclaim(il, txq_id, idx);
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_free_tfds_in_queue(il, sta_id, tid, freed);
+ else if (sta_id == IL_INVALID_STATION)
+ D_TX_REPLY("Station not known\n");
+
+ if (il->mac80211_registered &&
+ il_queue_space(&txq->q) > txq->q.low_mark)
+ il_wake_queue(il, txq);
+ }
+ if (qc && likely(sta_id != IL_INVALID_STATION))
+ il4965_txq_check_empty(il, sta_id, tid, txq_id);
+
+ il4965_check_abort_status(il, tx_resp->frame_count, status);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+/* Set up 4965-specific Rx frame reply handlers */
+static void
+il4965_handler_setup(struct il_priv *il)
+{
+ /* Legacy Rx frames */
+ il->handlers[N_RX] = il4965_hdl_rx;
+ /* Tx response */
+ il->handlers[C_TX] = il4965_hdl_tx;
+}
+
+static struct il_hcmd_ops il4965_hcmd = {
+ .rxon_assoc = il4965_send_rxon_assoc,
+ .commit_rxon = il4965_commit_rxon,
+ .set_rxon_chain = il4965_set_rxon_chain,
+};
+
+static void
+il4965_post_scan(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ /*
+ * Since setting the RXON may have been deferred while
+ * performing the scan, fire one off if needed
+ */
+ if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il4965_post_associate(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ struct ieee80211_conf *conf = NULL;
+ int ret = 0;
+
+ if (!vif || !il->is_open)
+ return;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ il_scan_cancel_timeout(il, 200);
+
+ conf = &il->hw->conf;
+
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing - " "Attempting to continue.\n");
+
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+
+ il_set_rxon_ht(il, &il->current_ht_config);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
+
+ D_ASSOC("assoc id %d beacon interval %d\n", vif->bss_conf.aid,
+ vif->bss_conf.beacon_int);
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+
+ il_commit_rxon(il, ctx);
+
+ D_ASSOC("Associated as %d to: %pM\n", vif->bss_conf.aid,
+ ctx->active.bssid_addr);
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ il4965_send_beacon_cmd(il);
+ break;
+ default:
+ IL_ERR("%s Should not be called in %d mode\n", __func__,
+ vif->type);
+ break;
+ }
+
+ /* the chain noise calibration will enabled PM upon completion
+ * If chain noise has already been run, then we need to enable
+ * power management here */
+ if (il->chain_noise_data.state == IL_CHAIN_NOISE_DONE)
+ il_power_update_mode(il, false);
+
+ /* Enable Rx differential gain and sensitivity calibrations */
+ il4965_chain_noise_reset(il);
+ il->start_calib = 1;
+}
+
+static void
+il4965_config_ap(struct il_priv *il)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ struct ieee80211_vif *vif = ctx->vif;
+ int ret = 0;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ /* The following should be done only at AP bring up */
+ if (!il_is_associated_ctx(ctx)) {
+
+ /* RXON - unassoc (to set timing command) */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ /* RXON Timing */
+ ret = il_send_rxon_timing(il, ctx);
+ if (ret)
+ IL_WARN("RXON timing failed - "
+ "Attempting to continue.\n");
+
+ /* AP has all antennas */
+ il->chain_noise_data.active_chains = il->hw_params.valid_rx_ant;
+ il_set_rxon_ht(il, &il->current_ht_config);
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ ctx->staging.assoc_id = 0;
+
+ if (vif->bss_conf.use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
+ if (vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+ }
+ /* need to send beacon cmd before committing assoc RXON! */
+ il4965_send_beacon_cmd(il);
+ /* restore RXON assoc */
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+ }
+ il4965_send_beacon_cmd(il);
+}
+
+static struct il_hcmd_utils_ops il4965_hcmd_utils = {
+ .get_hcmd_size = il4965_get_hcmd_size,
+ .build_addsta_hcmd = il4965_build_addsta_hcmd,
+ .request_scan = il4965_request_scan,
+ .post_scan = il4965_post_scan,
+};
+
+static struct il_lib_ops il4965_lib = {
+ .set_hw_params = il4965_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = il4965_txq_update_byte_cnt_tbl,
+ .txq_attach_buf_to_tfd = il4965_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = il4965_hw_txq_free_tfd,
+ .txq_init = il4965_hw_tx_queue_init,
+ .handler_setup = il4965_handler_setup,
+ .is_valid_rtc_data_addr = il4965_hw_valid_rtc_data_addr,
+ .init_alive_start = il4965_init_alive_start,
+ .load_ucode = il4965_load_bsm,
+ .dump_nic_error_log = il4965_dump_nic_error_log,
+ .dump_fh = il4965_dump_fh,
+ .set_channel_switch = il4965_hw_channel_switch,
+ .apm_ops = {
+ .init = il_apm_init,
+ .config = il4965_nic_config,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_REGULATORY_BAND_1_CHANNELS,
+ EEPROM_REGULATORY_BAND_2_CHANNELS,
+ EEPROM_REGULATORY_BAND_3_CHANNELS,
+ EEPROM_REGULATORY_BAND_4_CHANNELS,
+ EEPROM_REGULATORY_BAND_5_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
+ EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS},
+ .acquire_semaphore = il4965_eeprom_acquire_semaphore,
+ .release_semaphore = il4965_eeprom_release_semaphore,
+ },
+ .send_tx_power = il4965_send_tx_power,
+ .update_chain_flags = il4965_update_chain_flags,
+ .temp_ops = {
+ .temperature = il4965_temperature_calib,
+ },
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ .debugfs_ops = {
+ .rx_stats_read = il4965_ucode_rx_stats_read,
+ .tx_stats_read = il4965_ucode_tx_stats_read,
+ .general_stats_read = il4965_ucode_general_stats_read,
+ },
+#endif
+};
+
+static const struct il_legacy_ops il4965_legacy_ops = {
+ .post_associate = il4965_post_associate,
+ .config_ap = il4965_config_ap,
+ .manage_ibss_station = il4965_manage_ibss_station,
+ .update_bcast_stations = il4965_update_bcast_stations,
+};
+
+struct ieee80211_ops il4965_hw_ops = {
+ .tx = il4965_mac_tx,
+ .start = il4965_mac_start,
+ .stop = il4965_mac_stop,
+ .add_interface = il_mac_add_interface,
+ .remove_interface = il_mac_remove_interface,
+ .change_interface = il_mac_change_interface,
+ .config = il_mac_config,
+ .configure_filter = il4965_configure_filter,
+ .set_key = il4965_mac_set_key,
+ .update_tkip_key = il4965_mac_update_tkip_key,
+ .conf_tx = il_mac_conf_tx,
+ .reset_tsf = il_mac_reset_tsf,
+ .bss_info_changed = il_mac_bss_info_changed,
+ .ampdu_action = il4965_mac_ampdu_action,
+ .hw_scan = il_mac_hw_scan,
+ .sta_add = il4965_mac_sta_add,
+ .sta_remove = il_mac_sta_remove,
+ .channel_switch = il4965_mac_channel_switch,
+ .tx_last_beacon = il_mac_tx_last_beacon,
+};
+
+static const struct il_ops il4965_ops = {
+ .lib = &il4965_lib,
+ .hcmd = &il4965_hcmd,
+ .utils = &il4965_hcmd_utils,
+ .led = &il4965_led_ops,
+ .legacy = &il4965_legacy_ops,
+ .ieee80211_ops = &il4965_hw_ops,
+};
+
+static struct il_base_params il4965_base_params = {
+ .eeprom_size = IL4965_EEPROM_IMG_SIZE,
+ .num_of_queues = IL49_NUM_QUEUES,
+ .num_of_ampdu_queues = IL49_NUM_AMPDU_QUEUES,
+ .pll_cfg_val = 0,
+ .set_l0s = true,
+ .use_bsm = true,
+ .led_compensation = 61,
+ .chain_noise_num_beacons = IL4965_CAL_NUM_BEACONS,
+ .wd_timeout = IL_DEF_WD_TIMEOUT,
+ .temperature_kelvin = true,
+ .ucode_tracing = true,
+ .sensitivity_calib_by_driver = true,
+ .chain_noise_calib_by_driver = true,
+};
+
+struct il_cfg il4965_cfg = {
+ .name = "Intel(R) Wireless WiFi Link 4965AGN",
+ .fw_name_pre = IL4965_FW_PRE,
+ .ucode_api_max = IL4965_UCODE_API_MAX,
+ .ucode_api_min = IL4965_UCODE_API_MIN,
+ .sku = IL_SKU_A | IL_SKU_G | IL_SKU_N,
+ .valid_tx_ant = ANT_AB,
+ .valid_rx_ant = ANT_ABC,
+ .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
+ .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
+ .ops = &il4965_ops,
+ .mod_params = &il4965_mod_params,
+ .base_params = &il4965_base_params,
+ .led_mode = IL_LED_BLINK,
+ /*
+ * Force use of chains B and C for scan RX on 5 GHz band
+ * because the device has off-channel reception on chain A.
+ */
+ .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
+};
+
+/* Module firmware */
+MODULE_FIRMWARE(IL4965_MODULE_FIRMWARE(IL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/4965.h b/drivers/net/wireless/iwlegacy/4965.h
new file mode 100644
index 0000000..f280e01
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/4965.h
@@ -0,0 +1,1301 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+
+#ifndef __il_4965_h__
+#define __il_4965_h__
+
+struct il_rx_queue;
+struct il_rx_buf;
+struct il_rx_pkt;
+struct il_tx_queue;
+struct il_rxon_context;
+
+/* configuration for the _4965 devices */
+extern struct il_cfg il4965_cfg;
+
+extern struct il_mod_params il4965_mod_params;
+
+extern struct ieee80211_ops il4965_hw_ops;
+
+/* tx queue */
+void il4965_free_tfds_in_queue(struct il_priv *il, int sta_id, int tid,
+ int freed);
+
+/* RXON */
+void il4965_set_rxon_chain(struct il_priv *il, struct il_rxon_context *ctx);
+
+/* uCode */
+int il4965_verify_ucode(struct il_priv *il);
+
+/* lib */
+void il4965_check_abort_status(struct il_priv *il, u8 frame_count, u32 status);
+
+void il4965_rx_queue_reset(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rx_init(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_hw_nic_init(struct il_priv *il);
+int il4965_dump_fh(struct il_priv *il, char **buf, bool display);
+
+/* rx */
+void il4965_rx_queue_restock(struct il_priv *il);
+void il4965_rx_replenish(struct il_priv *il);
+void il4965_rx_replenish_now(struct il_priv *il);
+void il4965_rx_queue_free(struct il_priv *il, struct il_rx_queue *rxq);
+int il4965_rxq_stop(struct il_priv *il);
+int il4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
+void il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_rx_phy(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_rx_handle(struct il_priv *il);
+
+/* tx */
+void il4965_hw_txq_free_tfd(struct il_priv *il, struct il_tx_queue *txq);
+int il4965_hw_txq_attach_buf_to_tfd(struct il_priv *il, struct il_tx_queue *txq,
+ dma_addr_t addr, u16 len, u8 reset, u8 pad);
+int il4965_hw_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq);
+void il4965_hwrate_to_tx_control(struct il_priv *il, u32 rate_n_flags,
+ struct ieee80211_tx_info *info);
+int il4965_tx_skb(struct il_priv *il, struct sk_buff *skb);
+int il4965_tx_agg_start(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn);
+int il4965_tx_agg_stop(struct il_priv *il, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid);
+int il4965_txq_check_empty(struct il_priv *il, int sta_id, u8 tid, int txq_id);
+void il4965_hdl_compressed_ba(struct il_priv *il, struct il_rx_buf *rxb);
+int il4965_tx_queue_reclaim(struct il_priv *il, int txq_id, int idx);
+void il4965_hw_txq_ctx_free(struct il_priv *il);
+int il4965_txq_ctx_alloc(struct il_priv *il);
+void il4965_txq_ctx_reset(struct il_priv *il);
+void il4965_txq_ctx_stop(struct il_priv *il);
+void il4965_txq_set_sched(struct il_priv *il, u32 mask);
+
+/*
+ * Acquire il->lock before calling this function !
+ */
+void il4965_set_wr_ptrs(struct il_priv *il, int txq_id, u32 idx);
+/**
+ * il4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
+ * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
+ * @scd_retry: (1) Indicates queue will be used in aggregation mode
+ *
+ * NOTE: Acquire il->lock before calling this function !
+ */
+void il4965_tx_queue_set_status(struct il_priv *il, struct il_tx_queue *txq,
+ int tx_fifo_id, int scd_retry);
+
+/* rx */
+void il4965_hdl_missed_beacon(struct il_priv *il, struct il_rx_buf *rxb);
+bool il4965_good_plcp_health(struct il_priv *il, struct il_rx_pkt *pkt);
+void il4965_hdl_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il4965_hdl_c_stats(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* scan */
+int il4965_request_scan(struct il_priv *il, struct ieee80211_vif *vif);
+
+/* station mgmt */
+int il4965_manage_ibss_station(struct il_priv *il, struct ieee80211_vif *vif,
+ bool add);
+
+/* hcmd */
+int il4965_send_beacon_cmd(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+const char *il4965_get_tx_fail_reason(u32 status);
+#else
+static inline const char *
+il4965_get_tx_fail_reason(u32 status)
+{
+ return "";
+}
+#endif
+
+/* station management */
+int il4965_alloc_bcast_station(struct il_priv *il, struct il_rxon_context *ctx);
+int il4965_add_bssid_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, u8 *sta_id_r);
+int il4965_remove_default_wep_key(struct il_priv *il,
+ struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_set_default_wep_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key);
+int il4965_restore_default_wep_keys(struct il_priv *il,
+ struct il_rxon_context *ctx);
+int il4965_set_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+int il4965_remove_dynamic_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *key, u8 sta_id);
+void il4965_update_tkip_key(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_sta_tx_modify_enable_tid(struct il_priv *il, int sta_id, int tid);
+int il4965_sta_rx_agg_start(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid, u16 ssn);
+int il4965_sta_rx_agg_stop(struct il_priv *il, struct ieee80211_sta *sta,
+ int tid);
+void il4965_sta_modify_sleep_tx_count(struct il_priv *il, int sta_id, int cnt);
+int il4965_update_bcast_stations(struct il_priv *il);
+
+/* rate */
+static inline u8
+il4965_hw_get_rate(__le32 rate_n_flags)
+{
+ return le32_to_cpu(rate_n_flags) & 0xFF;
+}
+
+/* eeprom */
+void il4965_eeprom_get_mac(const struct il_priv *il, u8 * mac);
+int il4965_eeprom_acquire_semaphore(struct il_priv *il);
+void il4965_eeprom_release_semaphore(struct il_priv *il);
+int il4965_eeprom_check_version(struct il_priv *il);
+
+/* mac80211 handlers (for 4965) */
+void il4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
+int il4965_mac_start(struct ieee80211_hw *hw);
+void il4965_mac_stop(struct ieee80211_hw *hw);
+void il4965_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags, u64 multicast);
+int il4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif, struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key);
+void il4965_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta, u32 iv32,
+ u16 *phase1key);
+int il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 * ssn,
+ u8 buf_size);
+int il4965_mac_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+void il4965_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch);
+
+void il4965_led_enable(struct il_priv *il);
+
+/* EEPROM */
+#define IL4965_EEPROM_IMG_SIZE 1024
+
+/*
+ * uCode queue management definitions ...
+ * The first queue used for block-ack aggregation is #7 (4965 only).
+ * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
+ */
+#define IL49_FIRST_AMPDU_QUEUE 7
+
+/* Sizes and addresses for instruction and data memory (SRAM) in
+ * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
+#define IL49_RTC_INST_LOWER_BOUND (0x000000)
+#define IL49_RTC_INST_UPPER_BOUND (0x018000)
+
+#define IL49_RTC_DATA_LOWER_BOUND (0x800000)
+#define IL49_RTC_DATA_UPPER_BOUND (0x80A000)
+
+#define IL49_RTC_INST_SIZE (IL49_RTC_INST_UPPER_BOUND - \
+ IL49_RTC_INST_LOWER_BOUND)
+#define IL49_RTC_DATA_SIZE (IL49_RTC_DATA_UPPER_BOUND - \
+ IL49_RTC_DATA_LOWER_BOUND)
+
+#define IL49_MAX_INST_SIZE IL49_RTC_INST_SIZE
+#define IL49_MAX_DATA_SIZE IL49_RTC_DATA_SIZE
+
+/* Size of uCode instruction memory in bootstrap state machine */
+#define IL49_MAX_BSM_SIZE BSM_SRAM_SIZE
+
+static inline int
+il4965_hw_valid_rtc_data_addr(u32 addr)
+{
+ return (addr >= IL49_RTC_DATA_LOWER_BOUND &&
+ addr < IL49_RTC_DATA_UPPER_BOUND);
+}
+
+/********************* START TEMPERATURE *************************************/
+
+/**
+ * 4965 temperature calculation.
+ *
+ * The driver must calculate the device temperature before calculating
+ * a txpower setting (amplifier gain is temperature dependent). The
+ * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
+ * values used for the life of the driver, and one of which (R4) is the
+ * real-time temperature indicator.
+ *
+ * uCode provides all 4 values to the driver via the "initialize alive"
+ * notification (see struct il4965_init_alive_resp). After the runtime uCode
+ * image loads, uCode updates the R4 value via stats notifications
+ * (see N_STATS), which occur after each received beacon
+ * when associated, or can be requested via C_STATS.
+ *
+ * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
+ * must sign-extend to 32 bits before applying formula below.
+ *
+ * Formula:
+ *
+ * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
+ *
+ * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
+ * an additional correction, which should be centered around 0 degrees
+ * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
+ * centering the 97/100 correction around 0 degrees K.
+ *
+ * Add 273 to Kelvin value to find degrees Celsius, for comparing current
+ * temperature with factory-measured temperatures when calculating txpower
+ * settings.
+ */
+#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
+#define TEMPERATURE_CALIB_A_VAL 259
+
+/* Limit range of calculated temperature to be between these Kelvin values */
+#define IL_TX_POWER_TEMPERATURE_MIN (263)
+#define IL_TX_POWER_TEMPERATURE_MAX (410)
+
+#define IL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
+ ((t) < IL_TX_POWER_TEMPERATURE_MIN || \
+ (t) > IL_TX_POWER_TEMPERATURE_MAX)
+
+/********************* END TEMPERATURE ***************************************/
+
+/********************* START TXPOWER *****************************************/
+
+/**
+ * 4965 txpower calculations rely on information from three sources:
+ *
+ * 1) EEPROM
+ * 2) "initialize" alive notification
+ * 3) stats notifications
+ *
+ * EEPROM data consists of:
+ *
+ * 1) Regulatory information (max txpower and channel usage flags) is provided
+ * separately for each channel that can possibly supported by 4965.
+ * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
+ * (legacy) channels.
+ *
+ * See struct il4965_eeprom_channel for format, and struct il4965_eeprom
+ * for locations in EEPROM.
+ *
+ * 2) Factory txpower calibration information is provided separately for
+ * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
+ * but 5 GHz has several sub-bands.
+ *
+ * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
+ *
+ * See struct il4965_eeprom_calib_info (and the tree of structures
+ * contained within it) for format, and struct il4965_eeprom for
+ * locations in EEPROM.
+ *
+ * "Initialization alive" notification (see struct il4965_init_alive_resp)
+ * consists of:
+ *
+ * 1) Temperature calculation parameters.
+ *
+ * 2) Power supply voltage measurement.
+ *
+ * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
+ *
+ * Statistics notifications deliver:
+ *
+ * 1) Current values for temperature param R4.
+ */
+
+/**
+ * To calculate a txpower setting for a given desired target txpower, channel,
+ * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
+ * support MIMO and transmit diversity), driver must do the following:
+ *
+ * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
+ * Do not exceed regulatory limit; reduce target txpower if necessary.
+ *
+ * If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * 2 transmitters will be used simultaneously; driver must reduce the
+ * regulatory limit by 3 dB (half-power) for each transmitter, so the
+ * combined total output of the 2 transmitters is within regulatory limits.
+ *
+ *
+ * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
+ * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
+ * reduce target txpower if necessary.
+ *
+ * Backoff values below are in 1/2 dB units (equivalent to steps in
+ * txpower gain tables):
+ *
+ * OFDM 6 - 36 MBit: 10 steps (5 dB)
+ * OFDM 48 MBit: 15 steps (7.5 dB)
+ * OFDM 54 MBit: 17 steps (8.5 dB)
+ * OFDM 60 MBit: 20 steps (10 dB)
+ * CCK all rates: 10 steps (5 dB)
+ *
+ * Backoff values apply to saturation txpower on a per-transmitter basis;
+ * when using MIMO (2 transmitters), each transmitter uses the same
+ * saturation level provided in EEPROM, and the same backoff values;
+ * no reduction (such as with regulatory txpower limits) is required.
+ *
+ * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
+ * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
+ * factory measurement for ht40 channels.
+ *
+ * The result of this step is the final target txpower. The rest of
+ * the steps figure out the proper settings for the device to achieve
+ * that target txpower.
+ *
+ *
+ * 3) Determine (EEPROM) calibration sub band for the target channel, by
+ * comparing against first and last channels in each sub band
+ * (see struct il4965_eeprom_calib_subband_info).
+ *
+ *
+ * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
+ * referencing the 2 factory-measured (sample) channels within the sub band.
+ *
+ * Interpolation is based on difference between target channel's frequency
+ * and the sample channels' frequencies. Since channel numbers are based
+ * on frequency (5 MHz between each channel number), this is equivalent
+ * to interpolating based on channel number differences.
+ *
+ * Note that the sample channels may or may not be the channels at the
+ * edges of the sub band. The target channel may be "outside" of the
+ * span of the sampled channels.
+ *
+ * Driver may choose the pair (for 2 Tx chains) of measurements (see
+ * struct il4965_eeprom_calib_ch_info) for which the actual measured
+ * txpower comes closest to the desired txpower. Usually, though,
+ * the middle set of measurements is closest to the regulatory limits,
+ * and is therefore a good choice for all txpower calculations (this
+ * assumes that high accuracy is needed for maximizing legal txpower,
+ * while lower txpower configurations do not need as much accuracy).
+ *
+ * Driver should interpolate both members of the chosen measurement pair,
+ * i.e. for both Tx chains (radio transmitters), unless the driver knows
+ * that only one of the chains will be used (e.g. only one tx antenna
+ * connected, but this should be unusual). The rate scaling algorithm
+ * switches antennas to find best performance, so both Tx chains will
+ * be used (although only one at a time) even for non-MIMO transmissions.
+ *
+ * Driver should interpolate factory values for temperature, gain table
+ * idx, and actual power. The power amplifier detector values are
+ * not used by the driver.
+ *
+ * Sanity check: If the target channel happens to be one of the sample
+ * channels, the results should agree with the sample channel's
+ * measurements!
+ *
+ *
+ * 5) Find difference between desired txpower and (interpolated)
+ * factory-measured txpower. Using (interpolated) factory gain table idx
+ * (shown elsewhere) as a starting point, adjust this idx lower to
+ * increase txpower, or higher to decrease txpower, until the target
+ * txpower is reached. Each step in the gain table is 1/2 dB.
+ *
+ * For example, if factory measured txpower is 16 dBm, and target txpower
+ * is 13 dBm, add 6 steps to the factory gain idx to reduce txpower
+ * by 3 dB.
+ *
+ *
+ * 6) Find difference between current device temperature and (interpolated)
+ * factory-measured temperature for sub-band. Factory values are in
+ * degrees Celsius. To calculate current temperature, see comments for
+ * "4965 temperature calculation".
+ *
+ * If current temperature is higher than factory temperature, driver must
+ * increase gain (lower gain table idx), and vice verse.
+ *
+ * Temperature affects gain differently for different channels:
+ *
+ * 2.4 GHz all channels: 3.5 degrees per half-dB step
+ * 5 GHz channels 34-43: 4.5 degrees per half-dB step
+ * 5 GHz channels >= 44: 4.0 degrees per half-dB step
+ *
+ * NOTE: Temperature can increase rapidly when transmitting, especially
+ * with heavy traffic at high txpowers. Driver should update
+ * temperature calculations often under these conditions to
+ * maintain strong txpower in the face of rising temperature.
+ *
+ *
+ * 7) Find difference between current power supply voltage indicator
+ * (from "initialize alive") and factory-measured power supply voltage
+ * indicator (EEPROM).
+ *
+ * If the current voltage is higher (indicator is lower) than factory
+ * voltage, gain should be reduced (gain table idx increased) by:
+ *
+ * (eeprom - current) / 7
+ *
+ * If the current voltage is lower (indicator is higher) than factory
+ * voltage, gain should be increased (gain table idx decreased) by:
+ *
+ * 2 * (current - eeprom) / 7
+ *
+ * If number of idx steps in either direction turns out to be > 2,
+ * something is wrong ... just use 0.
+ *
+ * NOTE: Voltage compensation is independent of band/channel.
+ *
+ * NOTE: "Initialize" uCode measures current voltage, which is assumed
+ * to be constant after this initial measurement. Voltage
+ * compensation for txpower (number of steps in gain table)
+ * may be calculated once and used until the next uCode bootload.
+ *
+ *
+ * 8) If setting up txpowers for MIMO rates (rate idxes 8-15, 24-31),
+ * adjust txpower for each transmitter chain, so txpower is balanced
+ * between the two chains. There are 5 pairs of tx_atten[group][chain]
+ * values in "initialize alive", one pair for each of 5 channel ranges:
+ *
+ * Group 0: 5 GHz channel 34-43
+ * Group 1: 5 GHz channel 44-70
+ * Group 2: 5 GHz channel 71-124
+ * Group 3: 5 GHz channel 125-200
+ * Group 4: 2.4 GHz all channels
+ *
+ * Add the tx_atten[group][chain] value to the idx for the target chain.
+ * The values are signed, but are in pairs of 0 and a non-negative number,
+ * so as to reduce gain (if necessary) of the "hotter" channel. This
+ * avoids any need to double-check for regulatory compliance after
+ * this step.
+ *
+ *
+ * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
+ * value to the idx:
+ *
+ * Hardware rev B: 9 steps (4.5 dB)
+ * Hardware rev C: 5 steps (2.5 dB)
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ *
+ * NOTE: This compensation is in addition to any saturation backoff that
+ * might have been applied in an earlier step.
+ *
+ *
+ * 10) Select the gain table, based on band (2.4 vs 5 GHz).
+ *
+ * Limit the adjusted idx to stay within the table!
+ *
+ *
+ * 11) Read gain table entries for DSP and radio gain, place into appropriate
+ * location(s) in command (struct il4965_txpowertable_cmd).
+ */
+
+/**
+ * When MIMO is used (2 transmitters operating simultaneously), driver should
+ * limit each transmitter to deliver a max of 3 dB below the regulatory limit
+ * for the device. That is, use half power for each transmitter, so total
+ * txpower is within regulatory limits.
+ *
+ * The value "6" represents number of steps in gain table to reduce power 3 dB.
+ * Each step is 1/2 dB.
+ */
+#define IL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
+
+/**
+ * CCK gain compensation.
+ *
+ * When calculating txpowers for CCK, after making sure that the target power
+ * is within regulatory and saturation limits, driver must additionally
+ * back off gain by adding these values to the gain table idx.
+ *
+ * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
+ * bits [3:2], 1 = B, 2 = C.
+ */
+#define IL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
+#define IL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
+
+/*
+ * 4965 power supply voltage compensation for txpower
+ */
+#define TX_POWER_IL_VOLTAGE_CODES_PER_03V (7)
+
+/**
+ * Gain tables.
+ *
+ * The following tables contain pair of values for setting txpower, i.e.
+ * gain settings for the output of the device's digital signal processor (DSP),
+ * and for the analog gain structure of the transmitter.
+ *
+ * Each entry in the gain tables represents a step of 1/2 dB. Note that these
+ * are *relative* steps, not indications of absolute output power. Output
+ * power varies with temperature, voltage, and channel frequency, and also
+ * requires consideration of average power (to satisfy regulatory constraints),
+ * and peak power (to avoid distortion of the output signal).
+ *
+ * Each entry contains two values:
+ * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
+ * linear value that multiplies the output of the digital signal processor,
+ * before being sent to the analog radio.
+ * 2) Radio gain. This sets the analog gain of the radio Tx path.
+ * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
+ *
+ * EEPROM contains factory calibration data for txpower. This maps actual
+ * measured txpower levels to gain settings in the "well known" tables
+ * below ("well-known" means here that both factory calibration *and* the
+ * driver work with the same table).
+ *
+ * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
+ * has an extension (into negative idxes), in case the driver needs to
+ * boost power setting for high device temperatures (higher than would be
+ * present during factory calibration). A 5 Ghz EEPROM idx of "40"
+ * corresponds to the 49th entry in the table used by the driver.
+ */
+#define MIN_TX_GAIN_IDX (0) /* highest gain, lowest idx, 2.4 */
+#define MIN_TX_GAIN_IDX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
+
+/**
+ * 2.4 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * 0 110 0x3f (highest gain)
+ * 1 104 0x3f
+ * 2 98 0x3f
+ * 3 110 0x3e
+ * 4 104 0x3e
+ * 5 98 0x3e
+ * 6 110 0x3d
+ * 7 104 0x3d
+ * 8 98 0x3d
+ * 9 110 0x3c
+ * 10 104 0x3c
+ * 11 98 0x3c
+ * 12 110 0x3b
+ * 13 104 0x3b
+ * 14 98 0x3b
+ * 15 110 0x3a
+ * 16 104 0x3a
+ * 17 98 0x3a
+ * 18 110 0x39
+ * 19 104 0x39
+ * 20 98 0x39
+ * 21 110 0x38
+ * 22 104 0x38
+ * 23 98 0x38
+ * 24 110 0x37
+ * 25 104 0x37
+ * 26 98 0x37
+ * 27 110 0x36
+ * 28 104 0x36
+ * 29 98 0x36
+ * 30 110 0x35
+ * 31 104 0x35
+ * 32 98 0x35
+ * 33 110 0x34
+ * 34 104 0x34
+ * 35 98 0x34
+ * 36 110 0x33
+ * 37 104 0x33
+ * 38 98 0x33
+ * 39 110 0x32
+ * 40 104 0x32
+ * 41 98 0x32
+ * 42 110 0x31
+ * 43 104 0x31
+ * 44 98 0x31
+ * 45 110 0x30
+ * 46 104 0x30
+ * 47 98 0x30
+ * 48 110 0x6
+ * 49 104 0x6
+ * 50 98 0x6
+ * 51 110 0x5
+ * 52 104 0x5
+ * 53 98 0x5
+ * 54 110 0x4
+ * 55 104 0x4
+ * 56 98 0x4
+ * 57 110 0x3
+ * 58 104 0x3
+ * 59 98 0x3
+ * 60 110 0x2
+ * 61 104 0x2
+ * 62 98 0x2
+ * 63 110 0x1
+ * 64 104 0x1
+ * 65 98 0x1
+ * 66 110 0x0
+ * 67 104 0x0
+ * 68 98 0x0
+ * 69 97 0
+ * 70 96 0
+ * 71 95 0
+ * 72 94 0
+ * 73 93 0
+ * 74 92 0
+ * 75 91 0
+ * 76 90 0
+ * 77 89 0
+ * 78 88 0
+ * 79 87 0
+ * 80 86 0
+ * 81 85 0
+ * 82 84 0
+ * 83 83 0
+ * 84 82 0
+ * 85 81 0
+ * 86 80 0
+ * 87 79 0
+ * 88 78 0
+ * 89 77 0
+ * 90 76 0
+ * 91 75 0
+ * 92 74 0
+ * 93 73 0
+ * 94 72 0
+ * 95 71 0
+ * 96 70 0
+ * 97 69 0
+ * 98 68 0
+ */
+
+/**
+ * 5 GHz gain table
+ *
+ * Index Dsp gain Radio gain
+ * -9 123 0x3F (highest gain)
+ * -8 117 0x3F
+ * -7 110 0x3F
+ * -6 104 0x3F
+ * -5 98 0x3F
+ * -4 110 0x3E
+ * -3 104 0x3E
+ * -2 98 0x3E
+ * -1 110 0x3D
+ * 0 104 0x3D
+ * 1 98 0x3D
+ * 2 110 0x3C
+ * 3 104 0x3C
+ * 4 98 0x3C
+ * 5 110 0x3B
+ * 6 104 0x3B
+ * 7 98 0x3B
+ * 8 110 0x3A
+ * 9 104 0x3A
+ * 10 98 0x3A
+ * 11 110 0x39
+ * 12 104 0x39
+ * 13 98 0x39
+ * 14 110 0x38
+ * 15 104 0x38
+ * 16 98 0x38
+ * 17 110 0x37
+ * 18 104 0x37
+ * 19 98 0x37
+ * 20 110 0x36
+ * 21 104 0x36
+ * 22 98 0x36
+ * 23 110 0x35
+ * 24 104 0x35
+ * 25 98 0x35
+ * 26 110 0x34
+ * 27 104 0x34
+ * 28 98 0x34
+ * 29 110 0x33
+ * 30 104 0x33
+ * 31 98 0x33
+ * 32 110 0x32
+ * 33 104 0x32
+ * 34 98 0x32
+ * 35 110 0x31
+ * 36 104 0x31
+ * 37 98 0x31
+ * 38 110 0x30
+ * 39 104 0x30
+ * 40 98 0x30
+ * 41 110 0x25
+ * 42 104 0x25
+ * 43 98 0x25
+ * 44 110 0x24
+ * 45 104 0x24
+ * 46 98 0x24
+ * 47 110 0x23
+ * 48 104 0x23
+ * 49 98 0x23
+ * 50 110 0x22
+ * 51 104 0x18
+ * 52 98 0x18
+ * 53 110 0x17
+ * 54 104 0x17
+ * 55 98 0x17
+ * 56 110 0x16
+ * 57 104 0x16
+ * 58 98 0x16
+ * 59 110 0x15
+ * 60 104 0x15
+ * 61 98 0x15
+ * 62 110 0x14
+ * 63 104 0x14
+ * 64 98 0x14
+ * 65 110 0x13
+ * 66 104 0x13
+ * 67 98 0x13
+ * 68 110 0x12
+ * 69 104 0x08
+ * 70 98 0x08
+ * 71 110 0x07
+ * 72 104 0x07
+ * 73 98 0x07
+ * 74 110 0x06
+ * 75 104 0x06
+ * 76 98 0x06
+ * 77 110 0x05
+ * 78 104 0x05
+ * 79 98 0x05
+ * 80 110 0x04
+ * 81 104 0x04
+ * 82 98 0x04
+ * 83 110 0x03
+ * 84 104 0x03
+ * 85 98 0x03
+ * 86 110 0x02
+ * 87 104 0x02
+ * 88 98 0x02
+ * 89 110 0x01
+ * 90 104 0x01
+ * 91 98 0x01
+ * 92 110 0x00
+ * 93 104 0x00
+ * 94 98 0x00
+ * 95 93 0x00
+ * 96 88 0x00
+ * 97 83 0x00
+ * 98 78 0x00
+ */
+
+/**
+ * Sanity checks and default values for EEPROM regulatory levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Regulatory limits refer to the maximum average txpower allowed by
+ * regulatory agencies in the geographies in which the device is meant
+ * to be operated. These limits are SKU-specific (i.e. geography-specific),
+ * and channel-specific; each channel has an individual regulatory limit
+ * listed in the EEPROM.
+ *
+ * Units are in half-dBm (i.e. "34" means 17 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_REGULATORY_24 (34)
+#define IL_TX_POWER_DEFAULT_REGULATORY_52 (34)
+#define IL_TX_POWER_REGULATORY_MIN (0)
+#define IL_TX_POWER_REGULATORY_MAX (34)
+
+/**
+ * Sanity checks and default values for EEPROM saturation levels.
+ * If EEPROM values fall outside MIN/MAX range, use default values.
+ *
+ * Saturation is the highest level that the output power amplifier can produce
+ * without significant clipping distortion. This is a "peak" power level.
+ * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
+ * require differing amounts of backoff, relative to their average power output,
+ * in order to avoid clipping distortion.
+ *
+ * Driver must make sure that it is violating neither the saturation limit,
+ * nor the regulatory limit, when calculating Tx power settings for various
+ * rates.
+ *
+ * Units are in half-dBm (i.e. "38" means 19 dBm).
+ */
+#define IL_TX_POWER_DEFAULT_SATURATION_24 (38)
+#define IL_TX_POWER_DEFAULT_SATURATION_52 (38)
+#define IL_TX_POWER_SATURATION_MIN (20)
+#define IL_TX_POWER_SATURATION_MAX (50)
+
+/**
+ * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
+ * and thermal Txpower calibration.
+ *
+ * When calculating txpower, driver must compensate for current device
+ * temperature; higher temperature requires higher gain. Driver must calculate
+ * current temperature (see "4965 temperature calculation"), then compare vs.
+ * factory calibration temperature in EEPROM; if current temperature is higher
+ * than factory temperature, driver must *increase* gain by proportions shown
+ * in table below. If current temperature is lower than factory, driver must
+ * *decrease* gain.
+ *
+ * Different frequency ranges require different compensation, as shown below.
+ */
+/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR1_FCH 34
+#define CALIB_IL_TX_ATTEN_GR1_LCH 43
+
+/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR2_FCH 44
+#define CALIB_IL_TX_ATTEN_GR2_LCH 70
+
+/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR3_FCH 71
+#define CALIB_IL_TX_ATTEN_GR3_LCH 124
+
+/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR4_FCH 125
+#define CALIB_IL_TX_ATTEN_GR4_LCH 200
+
+/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
+#define CALIB_IL_TX_ATTEN_GR5_FCH 1
+#define CALIB_IL_TX_ATTEN_GR5_LCH 20
+
+enum {
+ CALIB_CH_GROUP_1 = 0,
+ CALIB_CH_GROUP_2 = 1,
+ CALIB_CH_GROUP_3 = 2,
+ CALIB_CH_GROUP_4 = 3,
+ CALIB_CH_GROUP_5 = 4,
+ CALIB_CH_GROUP_MAX
+};
+
+/********************* END TXPOWER *****************************************/
+
+/**
+ * Tx/Rx Queues
+ *
+ * Most communication between driver and 4965 is via queues of data buffers.
+ * For example, all commands that the driver issues to device's embedded
+ * controller (uCode) are via the command queue (one of the Tx queues). All
+ * uCode command responses/replies/notifications, including Rx frames, are
+ * conveyed from uCode to driver via the Rx queue.
+ *
+ * Most support for these queues, including handshake support, resides in
+ * structures in host DRAM, shared between the driver and the device. When
+ * allocating this memory, the driver must make sure that data written by
+ * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
+ * cache memory), so DRAM and cache are consistent, and the device can
+ * immediately see changes made by the driver.
+ *
+ * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
+ * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
+ * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
+ */
+#define IL49_NUM_FIFOS 7
+#define IL49_CMD_FIFO_NUM 4
+#define IL49_NUM_QUEUES 16
+#define IL49_NUM_AMPDU_QUEUES 8
+
+/**
+ * struct il4965_schedq_bc_tbl
+ *
+ * Byte Count table
+ *
+ * Each Tx queue uses a byte-count table containing 320 entries:
+ * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
+ * duplicate the first 64 entries (to avoid wrap-around within a Tx win;
+ * max Tx win is 64 TFDs).
+ *
+ * When driver sets up a new TFD, it must also enter the total byte count
+ * of the frame to be transmitted into the corresponding entry in the byte
+ * count table for the chosen Tx queue. If the TFD idx is 0-63, the driver
+ * must duplicate the byte count entry in corresponding idx 256-319.
+ *
+ * padding puts each byte count table on a 1024-byte boundary;
+ * 4965 assumes tables are separated by 1024 bytes.
+ */
+struct il4965_scd_bc_tbl {
+ __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
+ u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
+} __packed;
+
+#define IL4965_RTC_INST_LOWER_BOUND (0x000000)
+
+/* RSSI to dBm */
+#define IL4965_RSSI_OFFSET 44
+
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+#define IL4965_DEFAULT_TX_RETRY 15
+
+/* EEPROM */
+#define IL4965_FIRST_AMPDU_QUEUE 10
+
+/* Calibration */
+void il4965_chain_noise_calibration(struct il_priv *il, void *stat_resp);
+void il4965_sensitivity_calibration(struct il_priv *il, void *resp);
+void il4965_init_sensitivity(struct il_priv *il);
+void il4965_reset_run_time_calib(struct il_priv *il);
+void il4965_calib_free_results(struct il_priv *il);
+
+/* Debug */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ssize_t il4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ssize_t il4965_ucode_general_stats_read(struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+#endif
+
+/****************************/
+/* Flow Handler Definitions */
+/****************************/
+
+/**
+ * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
+ * Addresses are offsets from device's PCI hardware base address.
+ */
+#define FH49_MEM_LOWER_BOUND (0x1000)
+#define FH49_MEM_UPPER_BOUND (0x2000)
+
+/**
+ * Keep-Warm (KW) buffer base address.
+ *
+ * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
+ * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
+ * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
+ * from going into a power-savings mode that would cause higher DRAM latency,
+ * and possible data over/under-runs, before all Tx/Rx is complete.
+ *
+ * Driver loads FH49_KW_MEM_ADDR_REG with the physical address (bits 35:4)
+ * of the buffer, which must be 4K aligned. Once this is set up, the 4965
+ * automatically invokes keep-warm accesses when normal accesses might not
+ * be sufficient to maintain fast DRAM response.
+ *
+ * Bit fields:
+ * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
+ */
+#define FH49_KW_MEM_ADDR_REG (FH49_MEM_LOWER_BOUND + 0x97C)
+
+/**
+ * TFD Circular Buffers Base (CBBC) addresses
+ *
+ * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
+ * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
+ * (see struct il_tfd_frame). These 16 pointer registers are offset by 0x04
+ * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
+ * aligned (address bits 0-7 must be 0).
+ *
+ * Bit fields in each pointer register:
+ * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
+ */
+#define FH49_MEM_CBBC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_MEM_CBBC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xA10)
+
+/* Find TFD CB base pointer for given queue (range 0-15). */
+#define FH49_MEM_CBBC_QUEUE(x) (FH49_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
+
+/**
+ * Rx SRAM Control and Status Registers (RSCSR)
+ *
+ * These registers provide handshake between driver and 4965 for the Rx queue
+ * (this queue handles *all* command responses, notifications, Rx data, etc.
+ * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
+ * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
+ * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
+ * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
+ * mapping between RBDs and RBs.
+ *
+ * Driver must allocate host DRAM memory for the following, and set the
+ * physical address of each into 4965 registers:
+ *
+ * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
+ * entries (although any power of 2, up to 4096, is selectable by driver).
+ * Each entry (1 dword) points to a receive buffer (RB) of consistent size
+ * (typically 4K, although 8K or 16K are also selectable by driver).
+ * Driver sets up RB size and number of RBDs in the CB via Rx config
+ * register FH49_MEM_RCSR_CHNL0_CONFIG_REG.
+ *
+ * Bit fields within one RBD:
+ * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
+ *
+ * Driver sets physical address [35:8] of base of RBD circular buffer
+ * into FH49_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
+ *
+ * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
+ * (RBs) have been filled, via a "write pointer", actually the idx of
+ * the RB's corresponding RBD within the circular buffer. Driver sets
+ * physical address [35:4] into FH49_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
+ *
+ * Bit fields in lower dword of Rx status buffer (upper dword not used
+ * by driver; see struct il4965_shared, val0):
+ * 31-12: Not used by driver
+ * 11- 0: Index of last filled Rx buffer descriptor
+ * (4965 writes, driver reads this value)
+ *
+ * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
+ * enter pointers to these RBs into contiguous RBD circular buffer entries,
+ * and update the 4965's "write" idx register,
+ * FH49_RSCSR_CHNL0_RBDCB_WPTR_REG.
+ *
+ * This "write" idx corresponds to the *next* RBD that the driver will make
+ * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
+ * the circular buffer. This value should initially be 0 (before preparing any
+ * RBs), should be 8 after preparing the first 8 RBs (for example), and must
+ * wrap back to 0 at the end of the circular buffer (but don't wrap before
+ * "read" idx has advanced past 1! See below).
+ * NOTE: 4965 EXPECTS THE WRITE IDX TO BE INCREMENTED IN MULTIPLES OF 8.
+ *
+ * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
+ * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
+ * to tell the driver the idx of the latest filled RBD. The driver must
+ * read this "read" idx from DRAM after receiving an Rx interrupt from 4965.
+ *
+ * The driver must also internally keep track of a third idx, which is the
+ * next RBD to process. When receiving an Rx interrupt, driver should process
+ * all filled but unprocessed RBs up to, but not including, the RB
+ * corresponding to the "read" idx. For example, if "read" idx becomes "1",
+ * driver may process the RB pointed to by RBD 0. Depending on volume of
+ * traffic, there may be many RBs to process.
+ *
+ * If read idx == write idx, 4965 thinks there is no room to put new data.
+ * Due to this, the maximum number of filled RBs is 255, instead of 256. To
+ * be safe, make sure that there is a gap of at least 2 RBDs between "write"
+ * and "read" idxes; that is, make sure that there are no more than 254
+ * buffers waiting to be filled.
+ */
+#define FH49_MEM_RSCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xBC0)
+#define FH49_MEM_RSCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RSCSR_CHNL0 (FH49_MEM_RSCSR_LOWER_BOUND)
+
+/**
+ * Physical base address of 8-byte Rx Status buffer.
+ * Bit fields:
+ * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_STTS_WPTR_REG (FH49_MEM_RSCSR_CHNL0)
+
+/**
+ * Physical base address of Rx Buffer Descriptor Circular Buffer.
+ * Bit fields:
+ * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_BASE_REG (FH49_MEM_RSCSR_CHNL0 + 0x004)
+
+/**
+ * Rx write pointer (idx, really!).
+ * Bit fields:
+ * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
+ * NOTE: For 256-entry circular buffer, use only bits [7:0].
+ */
+#define FH49_RSCSR_CHNL0_RBDCB_WPTR_REG (FH49_MEM_RSCSR_CHNL0 + 0x008)
+#define FH49_RSCSR_CHNL0_WPTR (FH49_RSCSR_CHNL0_RBDCB_WPTR_REG)
+
+/**
+ * Rx Config/Status Registers (RCSR)
+ * Rx Config Reg for channel 0 (only channel used)
+ *
+ * Driver must initialize FH49_MEM_RCSR_CHNL0_CONFIG_REG as follows for
+ * normal operation (see bit fields).
+ *
+ * Clearing FH49_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
+ * Driver should poll FH49_MEM_RSSR_RX_STATUS_REG for
+ * FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
+ *
+ * Bit fields:
+ * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29-24: reserved
+ * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
+ * min "5" for 32 RBDs, max "12" for 4096 RBDs.
+ * 19-18: reserved
+ * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
+ * '10' 12K, '11' 16K.
+ * 15-14: reserved
+ * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
+ * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
+ * typical value 0x10 (about 1/2 msec)
+ * 3- 0: reserved
+ */
+#define FH49_MEM_RCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC00)
+#define FH49_MEM_RCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xCC0)
+#define FH49_MEM_RCSR_CHNL0 (FH49_MEM_RCSR_LOWER_BOUND)
+
+#define FH49_MEM_RCSR_CHNL0_CONFIG_REG (FH49_MEM_RCSR_CHNL0)
+
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
+#define FH49_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31 */
+
+#define FH49_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
+#define FH49_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
+#define RX_RB_TIMEOUT (0x10)
+
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
+#define FH49_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
+
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
+#define FH49_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
+
+#define FH49_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
+#define FH49_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
+
+/**
+ * Rx Shared Status Registers (RSSR)
+ *
+ * After stopping Rx DMA channel (writing 0 to
+ * FH49_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
+ * FH49_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
+ *
+ * Bit fields:
+ * 24: 1 = Channel 0 is idle
+ *
+ * FH49_MEM_RSSR_SHARED_CTRL_REG and FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
+ * contain default values that should not be altered by the driver.
+ */
+#define FH49_MEM_RSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xC40)
+#define FH49_MEM_RSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+
+#define FH49_MEM_RSSR_SHARED_CTRL_REG (FH49_MEM_RSSR_LOWER_BOUND)
+#define FH49_MEM_RSSR_RX_STATUS_REG (FH49_MEM_RSSR_LOWER_BOUND + 0x004)
+#define FH49_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
+ (FH49_MEM_RSSR_LOWER_BOUND + 0x008)
+
+#define FH49_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
+
+#define FH49_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+
+/* TFDB Area - TFDs buffer table */
+#define FH49_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
+#define FH49_TFDIB_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x900)
+#define FH49_TFDIB_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x958)
+#define FH49_TFDIB_CTRL0_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
+#define FH49_TFDIB_CTRL1_REG(_chnl) (FH49_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
+
+/**
+ * Transmit DMA Channel Control/Status Registers (TCSR)
+ *
+ * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
+ * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
+ * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
+ *
+ * To use a Tx DMA channel, driver must initialize its
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
+ *
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
+ * FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
+ *
+ * All other bits should be 0.
+ *
+ * Bit fields:
+ * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
+ * '10' operate normally
+ * 29- 4: Reserved, set to "0"
+ * 3: Enable internal DMA requests (1, normal operation), disable (0)
+ * 2- 0: Reserved, set to "0"
+ */
+#define FH49_TCSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xD00)
+#define FH49_TCSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xE60)
+
+/* Find Control/Status reg for given Tx DMA/FIFO channel */
+#define FH49_TCSR_CHNL_NUM (7)
+#define FH50_TCSR_CHNL_NUM (8)
+
+/* TCSR: tx_config register values */
+#define FH49_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl))
+#define FH49_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
+ (FH49_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
+
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
+#define FH49_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
+
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
+#define FH49_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
+
+/**
+ * Tx Shared Status Registers (TSSR)
+ *
+ * After stopping Tx DMA channel (writing 0 to
+ * FH49_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
+ * FH49_TSSR_TX_STATUS_REG until selected Tx channel is idle
+ * (channel's buffers empty | no pending requests).
+ *
+ * Bit fields:
+ * 31-24: 1 = Channel buffers empty (channel 7:0)
+ * 23-16: 1 = No pending requests (channel 7:0)
+ */
+#define FH49_TSSR_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0xEA0)
+#define FH49_TSSR_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0xEC0)
+
+#define FH49_TSSR_TX_STATUS_REG (FH49_TSSR_LOWER_BOUND + 0x010)
+
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH49_TSSR_TX_ERROR_REG (FH49_TSSR_LOWER_BOUND + 0x018)
+
+#define FH49_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
+
+/* Tx service channels */
+#define FH49_SRVC_CHNL (9)
+#define FH49_SRVC_LOWER_BOUND (FH49_MEM_LOWER_BOUND + 0x9C8)
+#define FH49_SRVC_UPPER_BOUND (FH49_MEM_LOWER_BOUND + 0x9D0)
+#define FH49_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
+ (FH49_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
+
+#define FH49_TX_CHICKEN_BITS_REG (FH49_MEM_LOWER_BOUND + 0xE98)
+/* Instruct FH to increment the retry count of a packet when
+ * it is brought from the memory to TX-FIFO
+ */
+#define FH49_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
+
+/* Keep Warm Size */
+#define IL_KW_SIZE 0x1000 /* 4k */
+
+#endif /* __il_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/Kconfig b/drivers/net/wireless/iwlegacy/Kconfig
index aef65cd..05bd375 100644
--- a/drivers/net/wireless/iwlegacy/Kconfig
+++ b/drivers/net/wireless/iwlegacy/Kconfig
@@ -1,4 +1,4 @@
-config IWLWIFI_LEGACY
+config IWLEGACY
tristate
select FW_LOADER
select NEW_LEDS
@@ -7,13 +7,13 @@ config IWLWIFI_LEGACY
select MAC80211_LEDS
menu "Debugging Options"
- depends on IWLWIFI_LEGACY
+ depends on IWLEGACY
-config IWLWIFI_LEGACY_DEBUG
- bool "Enable full debugging output in 4965 and 3945 drivers"
- depends on IWLWIFI_LEGACY
+config IWLEGACY_DEBUG
+ bool "Enable full debugging output in iwlegacy (iwl 3945/4965) drivers"
+ depends on IWLEGACY
---help---
- This option will enable debug tracing output for the iwlwifilegacy
+ This option will enable debug tracing output for the iwlegacy
drivers.
This will result in the kernel module being ~100k larger. You can
@@ -29,43 +29,26 @@ config IWLWIFI_LEGACY_DEBUG
% echo 0x43fff > /sys/class/net/wlan0/device/debug_level
You can find the list of debug mask values in:
- drivers/net/wireless/iwlwifilegacy/iwl-debug.h
+ drivers/net/wireless/iwlegacy/common.h
If this is your first time using this driver, you should say Y here
as the debug information can assist others in helping you resolve
any problems you may encounter.
-config IWLWIFI_LEGACY_DEBUGFS
- bool "4965 and 3945 debugfs support"
- depends on IWLWIFI_LEGACY && MAC80211_DEBUGFS
+config IWLEGACY_DEBUGFS
+ bool "iwlegacy (iwl 3945/4965) debugfs support"
+ depends on IWLEGACY && MAC80211_DEBUGFS
---help---
- Enable creation of debugfs files for the iwlwifilegacy drivers. This
+ Enable creation of debugfs files for the iwlegacy drivers. This
is a low-impact option that allows getting insight into the
driver's state at runtime.
-config IWLWIFI_LEGACY_DEVICE_TRACING
- bool "iwlwifilegacy legacy device access tracing"
- depends on IWLWIFI_LEGACY
- depends on EVENT_TRACING
- help
- Say Y here to trace all commands, including TX frames and IO
- accesses, sent to the device. If you say yes, iwlwifilegacy will
- register with the ftrace framework for event tracing and dump
- all this information to the ringbuffer, you may need to
- increase the ringbuffer size. See the ftrace documentation
- for more information.
-
- When tracing is not enabled, this option still has some
- (though rather small) overhead.
-
- If unsure, say Y so we can help you better when problems
- occur.
endmenu
config IWL4965
tristate "Intel Wireless WiFi 4965AGN (iwl4965)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
This option enables support for
@@ -93,7 +76,7 @@ config IWL4965
config IWL3945
tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
depends on PCI && MAC80211
- select IWLWIFI_LEGACY
+ select IWLEGACY
---help---
Select to build the driver supporting the:
diff --git a/drivers/net/wireless/iwlegacy/Makefile b/drivers/net/wireless/iwlegacy/Makefile
index d56aeb3..c985a01 100644
--- a/drivers/net/wireless/iwlegacy/Makefile
+++ b/drivers/net/wireless/iwlegacy/Makefile
@@ -1,25 +1,17 @@
-obj-$(CONFIG_IWLWIFI_LEGACY) += iwl-legacy.o
-iwl-legacy-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
-iwl-legacy-objs += iwl-rx.o iwl-tx.o iwl-sta.o
-iwl-legacy-objs += iwl-scan.o iwl-led.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-debugfs.o
-iwl-legacy-$(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) += iwl-devtrace.o
+obj-$(CONFIG_IWLEGACY) += iwlegacy.o
+iwlegacy-objs := common.o
+iwlegacy-$(CONFIG_IWLEGACY_DEBUGFS) += debug.o
-iwl-legacy-objs += $(iwl-legacy-m)
-
-CFLAGS_iwl-devtrace.o := -I$(src)
+iwlegacy-objs += $(iwlegacy-m)
# 4965
obj-$(CONFIG_IWL4965) += iwl4965.o
-iwl4965-objs := iwl-4965.o iwl4965-base.o iwl-4965-rs.o iwl-4965-led.o
-iwl4965-objs += iwl-4965-ucode.o iwl-4965-tx.o
-iwl4965-objs += iwl-4965-lib.o iwl-4965-rx.o iwl-4965-calib.o
-iwl4965-objs += iwl-4965-sta.o iwl-4965-eeprom.o
-iwl4965-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-4965-debugfs.o
+iwl4965-objs := 4965.o 4965-mac.o 4965-rs.o 4965-calib.o
+iwl4965-$(CONFIG_IWLEGACY_DEBUGFS) += 4965-debug.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
-iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
-iwl3945-$(CONFIG_IWLWIFI_LEGACY_DEBUGFS) += iwl-3945-debugfs.o
+iwl3945-objs := 3945-mac.o 3945.o 3945-rs.o
+iwl3945-$(CONFIG_IWLEGACY_DEBUGFS) += 3945-debug.o
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlegacy/iwl-commands.h b/drivers/net/wireless/iwlegacy/commands.h
index 8990405..25dd7d2 100644
--- a/drivers/net/wireless/iwlegacy/iwl-commands.h
+++ b/drivers/net/wireless/iwlegacy/commands.h
@@ -60,100 +60,96 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-/*
- * Please use this file (iwl-commands.h) only for uCode API definitions.
- * Please use iwl-xxxx-hw.h for hardware-related definitions.
- * Please use iwl-dev.h for driver implementation definitions.
- */
-#ifndef __iwl_legacy_commands_h__
-#define __iwl_legacy_commands_h__
+#ifndef __il_commands_h__
+#define __il_commands_h__
-struct iwl_priv;
+#include <linux/ieee80211.h>
-/* uCode version contains 4 values: Major/Minor/API/Serial */
-#define IWL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
-#define IWL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
-#define IWL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
-#define IWL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
+struct il_priv;
+/* uCode version contains 4 values: Major/Minor/API/Serial */
+#define IL_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
+#define IL_UCODE_MINOR(ver) (((ver) & 0x00FF0000) >> 16)
+#define IL_UCODE_API(ver) (((ver) & 0x0000FF00) >> 8)
+#define IL_UCODE_SERIAL(ver) ((ver) & 0x000000FF)
/* Tx rates */
-#define IWL_CCK_RATES 4
-#define IWL_OFDM_RATES 8
-#define IWL_MAX_RATES (IWL_CCK_RATES + IWL_OFDM_RATES)
+#define IL_CCK_RATES 4
+#define IL_OFDM_RATES 8
+#define IL_MAX_RATES (IL_CCK_RATES + IL_OFDM_RATES)
enum {
- REPLY_ALIVE = 0x1,
- REPLY_ERROR = 0x2,
+ N_ALIVE = 0x1,
+ N_ERROR = 0x2,
/* RXON and QOS commands */
- REPLY_RXON = 0x10,
- REPLY_RXON_ASSOC = 0x11,
- REPLY_QOS_PARAM = 0x13,
- REPLY_RXON_TIMING = 0x14,
+ C_RXON = 0x10,
+ C_RXON_ASSOC = 0x11,
+ C_QOS_PARAM = 0x13,
+ C_RXON_TIMING = 0x14,
/* Multi-Station support */
- REPLY_ADD_STA = 0x18,
- REPLY_REMOVE_STA = 0x19,
+ C_ADD_STA = 0x18,
+ C_REM_STA = 0x19,
/* Security */
- REPLY_WEPKEY = 0x20,
+ C_WEPKEY = 0x20,
/* RX, TX, LEDs */
- REPLY_3945_RX = 0x1b, /* 3945 only */
- REPLY_TX = 0x1c,
- REPLY_RATE_SCALE = 0x47, /* 3945 only */
- REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ N_3945_RX = 0x1b, /* 3945 only */
+ C_TX = 0x1c,
+ C_RATE_SCALE = 0x47, /* 3945 only */
+ C_LEDS = 0x48,
+ C_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 */
/* 802.11h related */
- REPLY_CHANNEL_SWITCH = 0x72,
- CHANNEL_SWITCH_NOTIFICATION = 0x73,
- REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74,
- SPECTRUM_MEASURE_NOTIFICATION = 0x75,
+ C_CHANNEL_SWITCH = 0x72,
+ N_CHANNEL_SWITCH = 0x73,
+ C_SPECTRUM_MEASUREMENT = 0x74,
+ N_SPECTRUM_MEASUREMENT = 0x75,
/* Power Management */
- POWER_TABLE_CMD = 0x77,
- PM_SLEEP_NOTIFICATION = 0x7A,
- PM_DEBUG_STATISTIC_NOTIFIC = 0x7B,
+ C_POWER_TBL = 0x77,
+ N_PM_SLEEP = 0x7A,
+ N_PM_DEBUG_STATS = 0x7B,
/* Scan commands and notifications */
- REPLY_SCAN_CMD = 0x80,
- REPLY_SCAN_ABORT_CMD = 0x81,
- SCAN_START_NOTIFICATION = 0x82,
- SCAN_RESULTS_NOTIFICATION = 0x83,
- SCAN_COMPLETE_NOTIFICATION = 0x84,
+ C_SCAN = 0x80,
+ C_SCAN_ABORT = 0x81,
+ N_SCAN_START = 0x82,
+ N_SCAN_RESULTS = 0x83,
+ N_SCAN_COMPLETE = 0x84,
/* IBSS/AP commands */
- BEACON_NOTIFICATION = 0x90,
- REPLY_TX_BEACON = 0x91,
+ N_BEACON = 0x90,
+ C_TX_BEACON = 0x91,
/* Miscellaneous commands */
- REPLY_TX_PWR_TABLE_CMD = 0x97,
+ C_TX_PWR_TBL = 0x97,
/* Bluetooth device coexistence config command */
- REPLY_BT_CONFIG = 0x9b,
+ C_BT_CONFIG = 0x9b,
/* Statistics */
- REPLY_STATISTICS_CMD = 0x9c,
- STATISTICS_NOTIFICATION = 0x9d,
+ C_STATS = 0x9c,
+ N_STATS = 0x9d,
/* RF-KILL commands and notifications */
- CARD_STATE_NOTIFICATION = 0xa1,
+ N_CARD_STATE = 0xa1,
/* Missed beacons notification */
- MISSED_BEACONS_NOTIFICATION = 0xa2,
+ N_MISSED_BEACONS = 0xa2,
- REPLY_CT_KILL_CONFIG_CMD = 0xa4,
- SENSITIVITY_CMD = 0xa8,
- REPLY_PHY_CALIBRATION_CMD = 0xb0,
- REPLY_RX_PHY_CMD = 0xc0,
- REPLY_RX_MPDU_CMD = 0xc1,
- REPLY_RX = 0xc3,
- REPLY_COMPRESSED_BA = 0xc5,
+ C_CT_KILL_CONFIG = 0xa4,
+ C_SENSITIVITY = 0xa8,
+ C_PHY_CALIBRATION = 0xb0,
+ N_RX_PHY = 0xc0,
+ N_RX_MPDU = 0xc1,
+ N_RX = 0xc3,
+ N_COMPRESSED_BA = 0xc5,
- REPLY_MAX = 0xff
+ IL_CN_MAX = 0xff
};
/******************************************************************************
@@ -163,25 +159,25 @@ enum {
*
*****************************************************************************/
-/* iwl_cmd_header flags value */
-#define IWL_CMD_FAILED_MSK 0x40
+/* il_cmd_header flags value */
+#define IL_CMD_FAILED_MSK 0x40
#define SEQ_TO_QUEUE(s) (((s) >> 8) & 0x1f)
#define QUEUE_TO_SEQ(q) (((q) & 0x1f) << 8)
-#define SEQ_TO_INDEX(s) ((s) & 0xff)
-#define INDEX_TO_SEQ(i) ((i) & 0xff)
+#define SEQ_TO_IDX(s) ((s) & 0xff)
+#define IDX_TO_SEQ(i) ((i) & 0xff)
#define SEQ_HUGE_FRAME cpu_to_le16(0x4000)
#define SEQ_RX_FRAME cpu_to_le16(0x8000)
/**
- * struct iwl_cmd_header
+ * struct il_cmd_header
*
* This header format appears in the beginning of each command sent from the
* driver, and each response/notification received from uCode.
*/
-struct iwl_cmd_header {
- u8 cmd; /* Command ID: REPLY_RXON, etc. */
- u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
+struct il_cmd_header {
+ u8 cmd; /* Command ID: C_RXON, etc. */
+ u8 flags; /* 0:5 reserved, 6 abort, 7 internal */
/*
* The driver sets up the sequence number to values of its choosing.
* uCode does not use this value, but passes it back to the driver
@@ -192,29 +188,28 @@ struct iwl_cmd_header {
* There is one exception: uCode sets bit 15 when it originates
* the response/notification, i.e. when the response/notification
* is not a direct response to a command sent by the driver. For
- * example, uCode issues REPLY_3945_RX when it sends a received frame
+ * example, uCode issues N_3945_RX when it sends a received frame
* to the driver; it is not a direct response to any driver command.
*
* The Linux driver uses the following format:
*
- * 0:7 tfd index - position within TX queue
- * 8:12 TX queue id
- * 13 reserved
- * 14 huge - driver sets this to indicate command is in the
- * 'huge' storage at the end of the command buffers
- * 15 unsolicited RX or uCode-originated notification
- */
+ * 0:7 tfd idx - position within TX queue
+ * 8:12 TX queue id
+ * 13 reserved
+ * 14 huge - driver sets this to indicate command is in the
+ * 'huge' storage at the end of the command buffers
+ * 15 unsolicited RX or uCode-originated notification
+ */
__le16 sequence;
/* command or response/notification data follows immediately */
u8 data[0];
} __packed;
-
/**
- * struct iwl3945_tx_power
+ * struct il3945_tx_power
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_SCAN_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_SCAN, C_CHANNEL_SWITCH
*
* Each entry contains two values:
* 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
@@ -223,21 +218,21 @@ struct iwl_cmd_header {
* 2) Radio gain. This sets the analog gain of the radio Tx path.
* It is a coarser setting, and behaves in a logarithmic (dB) fashion.
*
- * Driver obtains values from struct iwl3945_tx_power power_gain_table[][].
+ * Driver obtains values from struct il3945_tx_power power_gain_table[][].
*/
-struct iwl3945_tx_power {
+struct il3945_tx_power {
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
} __packed;
/**
- * struct iwl3945_power_per_rate
+ * struct il3945_power_per_rate
*
- * Used in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Used in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl3945_power_per_rate {
+struct il3945_power_per_rate {
u8 rate; /* plcp */
- struct iwl3945_tx_power tpc;
+ struct il3945_tx_power tpc;
u8 reserved;
} __packed;
@@ -245,10 +240,10 @@ struct iwl3945_power_per_rate {
* iwl4965 rate_n_flags bit fields
*
* rate_n_flags format is used in following iwl4965 commands:
- * REPLY_RX (response only)
- * REPLY_RX_MPDU (response only)
- * REPLY_TX (both command and response)
- * REPLY_TX_LINK_QUALITY_CMD
+ * N_RX (response only)
+ * N_RX_MPDU (response only)
+ * C_TX (both command and response)
+ * C_TX_LINK_QUALITY_CMD
*
* High-throughput (HT) rate format for bits 7:0 (bit 8 must be "1"):
* 2-0: 0) 6 Mbps
@@ -326,17 +321,17 @@ struct iwl3945_power_per_rate {
#define RATE_MCS_ANT_ABC_MSK (RATE_MCS_ANT_AB_MSK | RATE_MCS_ANT_C_MSK)
#define RATE_ANT_NUM 3
-#define POWER_TABLE_NUM_ENTRIES 33
-#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
-#define POWER_TABLE_CCK_ENTRY 32
+#define POWER_TBL_NUM_ENTRIES 33
+#define POWER_TBL_NUM_HT_OFDM_ENTRIES 32
+#define POWER_TBL_CCK_ENTRY 32
-#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
-#define IWL_PWR_CCK_ENTRIES 2
+#define IL_PWR_NUM_HT_OFDM_ENTRIES 24
+#define IL_PWR_CCK_ENTRIES 2
/**
- * union iwl4965_tx_power_dual_stream
+ * union il4965_tx_power_dual_stream
*
- * Host format used for REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Host format used for C_TX_PWR_TBL, C_CHANNEL_SWITCH
* Use __le32 version (struct tx_power_dual_stream) when building command.
*
* Driver provides radio gain and DSP attenuation settings to device in pairs,
@@ -347,9 +342,9 @@ struct iwl3945_power_per_rate {
* For MIMO rates, one value may be different from the other,
* in order to balance the Tx output between the two transmitters.
*
- * See more details in doc for TXPOWER in iwl-4965-hw.h.
+ * See more details in doc for TXPOWER in 4965.h.
*/
-union iwl4965_tx_power_dual_stream {
+union il4965_tx_power_dual_stream {
struct {
u8 radio_tx_gain[2];
u8 dsp_predis_atten[2];
@@ -360,21 +355,21 @@ union iwl4965_tx_power_dual_stream {
/**
* struct tx_power_dual_stream
*
- * Table entries in REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Table entries in C_TX_PWR_TBL, C_CHANNEL_SWITCH
*
- * Same format as iwl_tx_power_dual_stream, but __le32
+ * Same format as il_tx_power_dual_stream, but __le32
*/
struct tx_power_dual_stream {
__le32 dw;
} __packed;
/**
- * struct iwl4965_tx_power_db
+ * struct il4965_tx_power_db
*
- * Entire table within REPLY_TX_PWR_TABLE_CMD, REPLY_CHANNEL_SWITCH
+ * Entire table within C_TX_PWR_TBL, C_CHANNEL_SWITCH
*/
-struct iwl4965_tx_power_db {
- struct tx_power_dual_stream power_tbl[POWER_TABLE_NUM_ENTRIES];
+struct il4965_tx_power_db {
+ struct tx_power_dual_stream power_tbl[POWER_TBL_NUM_ENTRIES];
} __packed;
/******************************************************************************
@@ -387,7 +382,7 @@ struct iwl4965_tx_power_db {
#define INITIALIZE_SUBTYPE (9)
/*
- * ("Initialize") REPLY_ALIVE = 0x1 (response only, not a command)
+ * ("Initialize") N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "initialize alive" notification once the initialization
* uCode image has completed its work, and is ready to load the runtime image.
@@ -410,7 +405,7 @@ struct iwl4965_tx_power_db {
* 3) Tx gain compensation to balance 4965's 2 Tx chains for MIMO operation,
* for each of 5 frequency ranges.
*/
-struct iwl_init_alive_resp {
+struct il_init_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
@@ -433,9 +428,8 @@ struct iwl_init_alive_resp {
* 2 Tx chains */
} __packed;
-
/**
- * REPLY_ALIVE = 0x1 (response only, not a command)
+ * N_ALIVE = 0x1 (response only, not a command)
*
* uCode issues this "alive" notification once the runtime image is ready
* to receive commands from the driver. This is the *second* "alive"
@@ -454,7 +448,7 @@ struct iwl_init_alive_resp {
* __le32 log_size; log capacity (in number of entries)
* __le32 type; (1) timestamp with each entry, (0) no timestamp
* __le32 wraps; # times uCode has wrapped to top of circular buffer
- * __le32 write_index; next circular buffer entry that uCode would fill
+ * __le32 write_idx; next circular buffer entry that uCode would fill
*
* The header is followed by the circular buffer of log entries. Entries
* with timestamps have the following format:
@@ -511,13 +505,13 @@ struct iwl_init_alive_resp {
* The Linux driver can print both logs to the system log when a uCode error
* occurs.
*/
-struct iwl_alive_resp {
+struct il_alive_resp {
u8 ucode_minor;
u8 ucode_major;
__le16 reserved1;
u8 sw_rev[8];
u8 ver_type;
- u8 ver_subtype; /* not "9" for runtime alive */
+ u8 ver_subtype; /* not "9" for runtime alive */
__le16 reserved2;
__le32 log_event_table_ptr; /* SRAM address for event log */
__le32 error_event_table_ptr; /* SRAM address for error log */
@@ -526,9 +520,9 @@ struct iwl_alive_resp {
} __packed;
/*
- * REPLY_ERROR = 0x2 (response only, not a command)
+ * N_ERROR = 0x2 (response only, not a command)
*/
-struct iwl_error_resp {
+struct il_error_resp {
__le32 error_type;
u8 cmd_id;
u8 reserved1;
@@ -554,7 +548,6 @@ enum {
RXON_DEV_TYPE_SNIFFER = 6,
};
-
#define RXON_RX_CHAIN_DRIVER_FORCE_MSK cpu_to_le16(0x1 << 0)
#define RXON_RX_CHAIN_DRIVER_FORCE_POS (0)
#define RXON_RX_CHAIN_VALID_MSK cpu_to_le16(0x7 << 1)
@@ -593,7 +586,6 @@ enum {
* (according to ON_AIR deassertion) */
#define RXON_FLG_TSF2HOST_MSK cpu_to_le32(1 << 15)
-
/* HT flags */
#define RXON_FLG_CTRL_CHANNEL_LOC_POS (22)
#define RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK cpu_to_le32(0x1 << 22)
@@ -640,7 +632,7 @@ enum {
#define RXON_FILTER_BCON_AWARE_MSK cpu_to_le32(1 << 6)
/**
- * REPLY_RXON = 0x10 (command, has simple generic response)
+ * C_RXON = 0x10 (command, has simple generic response)
*
* RXON tunes the radio tuner to a service channel, and sets up a number
* of parameters that are used primarily for Rx, but also for Tx operations.
@@ -653,11 +645,11 @@ enum {
* channel.
*
* NOTE: All RXONs wipe clean the internal txpower table. Driver must
- * issue a new REPLY_TX_PWR_TABLE_CMD after each REPLY_RXON (0x10),
+ * issue a new C_TX_PWR_TBL after each C_RXON (0x10),
* regardless of whether RXON_FILTER_ASSOC_MSK is set.
*/
-struct iwl3945_rxon_cmd {
+struct il3945_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -676,7 +668,7 @@ struct iwl3945_rxon_cmd {
__le16 reserved5;
} __packed;
-struct iwl4965_rxon_cmd {
+struct il4965_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -699,7 +691,7 @@ struct iwl4965_rxon_cmd {
/* Create a common rxon cmd which will be typecast into the 3945 or 4965
* specific rxon cmd, depending on where it is called from.
*/
-struct iwl_legacy_rxon_cmd {
+struct il_rxon_cmd {
u8 node_addr[6];
__le16 reserved1;
u8 bssid_addr[6];
@@ -721,11 +713,10 @@ struct iwl_legacy_rxon_cmd {
u8 reserved5;
} __packed;
-
/*
- * REPLY_RXON_ASSOC = 0x11 (command, has simple generic response)
+ * C_RXON_ASSOC = 0x11 (command, has simple generic response)
*/
-struct iwl3945_rxon_assoc_cmd {
+struct il3945_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -733,7 +724,7 @@ struct iwl3945_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-struct iwl4965_rxon_assoc_cmd {
+struct il4965_rxon_assoc_cmd {
__le32 flags;
__le32 filter_flags;
u8 ofdm_basic_rates;
@@ -744,17 +735,17 @@ struct iwl4965_rxon_assoc_cmd {
__le16 reserved;
} __packed;
-#define IWL_CONN_MAX_LISTEN_INTERVAL 10
-#define IWL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
-#define IWL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
+#define IL_CONN_MAX_LISTEN_INTERVAL 10
+#define IL_MAX_UCODE_BEACON_INTERVAL 4 /* 4096 */
+#define IL39_MAX_UCODE_BEACON_INTERVAL 1 /* 1024 */
/*
- * REPLY_RXON_TIMING = 0x14 (command, has simple generic response)
+ * C_RXON_TIMING = 0x14 (command, has simple generic response)
*/
-struct iwl_rxon_time_cmd {
+struct il_rxon_time_cmd {
__le64 timestamp;
__le16 beacon_interval;
- __le16 atim_window;
+ __le16 atim_win;
__le32 beacon_init_val;
__le16 listen_interval;
u8 dtim_period;
@@ -762,32 +753,32 @@ struct iwl_rxon_time_cmd {
} __packed;
/*
- * REPLY_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
+ * C_CHANNEL_SWITCH = 0x72 (command, has simple generic response)
*/
-struct iwl3945_channel_switch_cmd {
+struct il3945_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_channel_switch_cmd {
+struct il4965_channel_switch_cmd {
u8 band;
u8 expect_beacon;
__le16 channel;
__le32 rxon_flags;
__le32 rxon_filter_flags;
__le32 switch_time;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
/*
- * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
+ * N_CHANNEL_SWITCH = 0x73 (notification only, not a command)
*/
-struct iwl_csa_notification {
+struct il_csa_notification {
__le16 band;
__le16 channel;
__le32 status; /* 0 - OK, 1 - fail */
@@ -800,22 +791,22 @@ struct iwl_csa_notification {
*****************************************************************************/
/**
- * struct iwl_ac_qos -- QOS timing params for REPLY_QOS_PARAM
- * One for each of 4 EDCA access categories in struct iwl_qosparam_cmd
+ * struct il_ac_qos -- QOS timing params for C_QOS_PARAM
+ * One for each of 4 EDCA access categories in struct il_qosparam_cmd
*
- * @cw_min: Contention window, start value in numbers of slots.
+ * @cw_min: Contention win, start value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x0f.
- * @cw_max: Contention window, max value in numbers of slots.
+ * @cw_max: Contention win, max value in numbers of slots.
* Should be a power-of-2, minus 1. Device's default is 0x3f.
* @aifsn: Number of slots in Arbitration Interframe Space (before
* performing random backoff timing prior to Tx). Device default 1.
* @edca_txop: Length of Tx opportunity, in uSecs. Device default is 0.
*
- * Device will automatically increase contention window by (2*CW) + 1 for each
+ * Device will automatically increase contention win by (2*CW) + 1 for each
* transmission retry. Device uses cw_max as a bit mask, ANDed with new CW
* value, to cap the CW value.
*/
-struct iwl_ac_qos {
+struct il_ac_qos {
__le16 cw_min;
__le16 cw_max;
u8 aifsn;
@@ -832,14 +823,14 @@ struct iwl_ac_qos {
#define AC_NUM 4
/*
- * REPLY_QOS_PARAM = 0x13 (command, has simple generic response)
+ * C_QOS_PARAM = 0x13 (command, has simple generic response)
*
* This command sets up timings for each of the 4 prioritized EDCA Tx FIFOs
* 0: Background, 1: Best Effort, 2: Video, 3: Voice.
*/
-struct iwl_qosparam_cmd {
+struct il_qosparam_cmd {
__le32 qos_flags;
- struct iwl_ac_qos ac[AC_NUM];
+ struct il_ac_qos ac[AC_NUM];
} __packed;
/******************************************************************************
@@ -852,15 +843,15 @@ struct iwl_qosparam_cmd {
*/
/* Special, dedicated locations within device's station table */
-#define IWL_AP_ID 0
-#define IWL_STA_ID 2
-#define IWL3945_BROADCAST_ID 24
-#define IWL3945_STATION_COUNT 25
-#define IWL4965_BROADCAST_ID 31
-#define IWL4965_STATION_COUNT 32
+#define IL_AP_ID 0
+#define IL_STA_ID 2
+#define IL3945_BROADCAST_ID 24
+#define IL3945_STATION_COUNT 25
+#define IL4965_BROADCAST_ID 31
+#define IL4965_STATION_COUNT 32
-#define IWL_STATION_COUNT 32 /* MAX(3945,4965)*/
-#define IWL_INVALID_STATION 255
+#define IL_STATION_COUNT 32 /* MAX(3945,4965) */
+#define IL_INVALID_STATION 255
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -901,11 +892,11 @@ struct iwl_qosparam_cmd {
#define STA_MODIFY_DELBA_TID_MSK 0x10
#define STA_MODIFY_SLEEP_TX_COUNT_MSK 0x20
-/* Receiver address (actually, Rx station's index into station table),
+/* Receiver address (actually, Rx station's idx into station table),
* combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
#define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
-struct iwl4965_keyinfo {
+struct il4965_keyinfo {
__le16 key_flags;
u8 tkip_rx_tsc_byte2; /* TSC[2] for key mix ph1 detection */
u8 reserved1;
@@ -918,12 +909,12 @@ struct iwl4965_keyinfo {
/**
* struct sta_id_modify
* @addr[ETH_ALEN]: station's MAC address
- * @sta_id: index of station in uCode's station table
+ * @sta_id: idx of station in uCode's station table
* @modify_mask: STA_MODIFY_*, 1: modify, 0: don't change
*
- * Driver selects unused table index when adding new station,
- * or the index to a pre-existing station entry when modifying that station.
- * Some indexes have special purposes (IWL_AP_ID, index 0, is for AP).
+ * Driver selects unused table idx when adding new station,
+ * or the idx to a pre-existing station entry when modifying that station.
+ * Some idxes have special purposes (IL_AP_ID, idx 0, is for AP).
*
* modify_mask flags select which parameters to modify vs. leave alone.
*/
@@ -936,15 +927,15 @@ struct sta_id_modify {
} __packed;
/*
- * REPLY_ADD_STA = 0x18 (command)
+ * C_ADD_STA = 0x18 (command)
*
* The device contains an internal table of per-station information,
* with info on security keys, aggregation parameters, and Tx rates for
* initial Tx attempt and any retries (4965 devices uses
- * REPLY_TX_LINK_QUALITY_CMD,
- * 3945 uses REPLY_RATE_SCALE to set up rate tables).
+ * C_TX_LINK_QUALITY_CMD,
+ * 3945 uses C_RATE_SCALE to set up rate tables).
*
- * REPLY_ADD_STA sets up the table entry for one station, either creating
+ * C_ADD_STA sets up the table entry for one station, either creating
* a new entry, or modifying a pre-existing one.
*
* NOTE: RXON command (without "associated" bit set) wipes the station table
@@ -954,20 +945,20 @@ struct sta_id_modify {
* their own txpower/rate setup data).
*
* When getting started on a new channel, driver must set up the
- * IWL_BROADCAST_ID entry (last entry in the table). For a client
+ * IL_BROADCAST_ID entry (last entry in the table). For a client
* station in a BSS, once an AP is selected, driver sets up the AP STA
- * in the IWL_AP_ID entry (1st entry in the table). BROADCAST and AP
+ * in the IL_AP_ID entry (1st entry in the table). BROADCAST and AP
* are all that are needed for a BSS client station. If the device is
* used as AP, or in an IBSS network, driver must set up station table
- * entries for all STAs in network, starting with index IWL_STA_ID.
+ * entries for all STAs in network, starting with idx IL_STA_ID.
*/
-struct iwl3945_addsta_cmd {
+struct il3945_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -990,12 +981,12 @@ struct iwl3945_addsta_cmd {
__le16 add_immediate_ba_ssn;
} __packed;
-struct iwl4965_addsta_cmd {
+struct il4965_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1003,7 +994,7 @@ struct iwl4965_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 reserved1;
+ __le16 reserved1;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1028,12 +1019,12 @@ struct iwl4965_addsta_cmd {
} __packed;
/* Wrapper struct for 3945 and 4965 addsta_cmd structures */
-struct iwl_legacy_addsta_cmd {
+struct il_addsta_cmd {
u8 mode; /* 1: modify existing, 0: add new station */
u8 reserved[3];
struct sta_id_modify sta;
- struct iwl4965_keyinfo key;
- __le32 station_flags; /* STA_FLG_* */
+ struct il4965_keyinfo key;
+ __le32 station_flags; /* STA_FLG_* */
__le32 station_flags_msk; /* STA_FLG_* */
/* bit field to disable (1) or enable (0) Tx for Traffic ID (TID)
@@ -1041,7 +1032,7 @@ struct iwl_legacy_addsta_cmd {
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
- __le16 rate_n_flags; /* 3945 only */
+ __le16 rate_n_flags; /* 3945 only */
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1065,51 +1056,50 @@ struct iwl_legacy_addsta_cmd {
__le16 reserved2;
} __packed;
-
#define ADD_STA_SUCCESS_MSK 0x1
-#define ADD_STA_NO_ROOM_IN_TABLE 0x2
+#define ADD_STA_NO_ROOM_IN_TBL 0x2
#define ADD_STA_NO_BLOCK_ACK_RESOURCE 0x4
#define ADD_STA_MODIFY_NON_EXIST_STA 0x8
/*
- * REPLY_ADD_STA = 0x18 (response)
+ * C_ADD_STA = 0x18 (response)
*/
-struct iwl_add_sta_resp {
- u8 status; /* ADD_STA_* */
+struct il_add_sta_resp {
+ u8 status; /* ADD_STA_* */
} __packed;
#define REM_STA_SUCCESS_MSK 0x1
/*
- * REPLY_REM_STA = 0x19 (response)
+ * C_REM_STA = 0x19 (response)
*/
-struct iwl_rem_sta_resp {
+struct il_rem_sta_resp {
u8 status;
} __packed;
/*
- * REPLY_REM_STA = 0x19 (command)
+ * C_REM_STA = 0x19 (command)
*/
-struct iwl_rem_sta_cmd {
- u8 num_sta; /* number of removed stations */
+struct il_rem_sta_cmd {
+ u8 num_sta; /* number of removed stations */
u8 reserved[3];
- u8 addr[ETH_ALEN]; /* MAC addr of the first station */
+ u8 addr[ETH_ALEN]; /* MAC addr of the first station */
u8 reserved2[2];
} __packed;
-#define IWL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
-#define IWL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
-#define IWL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
-#define IWL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
-#define IWL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
+#define IL_TX_FIFO_BK_MSK cpu_to_le32(BIT(0))
+#define IL_TX_FIFO_BE_MSK cpu_to_le32(BIT(1))
+#define IL_TX_FIFO_VI_MSK cpu_to_le32(BIT(2))
+#define IL_TX_FIFO_VO_MSK cpu_to_le32(BIT(3))
+#define IL_AGG_TX_QUEUE_MSK cpu_to_le32(0xffc00)
-#define IWL_DROP_SINGLE 0
-#define IWL_DROP_SELECTED 1
-#define IWL_DROP_ALL 2
+#define IL_DROP_SINGLE 0
+#define IL_DROP_SELECTED 1
+#define IL_DROP_ALL 2
/*
* REPLY_WEP_KEY = 0x20
*/
-struct iwl_wep_key {
- u8 key_index;
+struct il_wep_key {
+ u8 key_idx;
u8 key_offset;
u8 reserved1[2];
u8 key_size;
@@ -1117,12 +1107,12 @@ struct iwl_wep_key {
u8 key[16];
} __packed;
-struct iwl_wep_cmd {
+struct il_wep_cmd {
u8 num_keys;
u8 global_key_type;
u8 flags;
u8 reserved;
- struct iwl_wep_key key[0];
+ struct il_wep_key key[0];
} __packed;
#define WEP_KEY_WEP_TYPE 1
@@ -1168,8 +1158,7 @@ struct iwl_wep_cmd {
#define RX_MPDU_RES_STATUS_TTAK_OK (1 << 7)
#define RX_MPDU_RES_STATUS_DEC_DONE_MSK (0x800)
-
-struct iwl3945_rx_frame_stats {
+struct il3945_rx_frame_stats {
u8 phy_count;
u8 id;
u8 rssi;
@@ -1179,7 +1168,7 @@ struct iwl3945_rx_frame_stats {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_hdr {
+struct il3945_rx_frame_hdr {
__le16 channel;
__le16 phy_flags;
u8 reserved1;
@@ -1188,73 +1177,71 @@ struct iwl3945_rx_frame_hdr {
u8 payload[0];
} __packed;
-struct iwl3945_rx_frame_end {
+struct il3945_rx_frame_end {
__le32 status;
__le64 timestamp;
__le32 beacon_timestamp;
} __packed;
/*
- * REPLY_3945_RX = 0x1b (response only, not a command)
+ * N_3945_RX = 0x1b (response only, not a command)
*
* NOTE: DO NOT dereference from casts to this structure
* It is provided only for calculating minimum data set size.
* The actual offsets of the hdr and end are dynamic based on
* stats.phy_count
*/
-struct iwl3945_rx_frame {
- struct iwl3945_rx_frame_stats stats;
- struct iwl3945_rx_frame_hdr hdr;
- struct iwl3945_rx_frame_end end;
+struct il3945_rx_frame {
+ struct il3945_rx_frame_stats stats;
+ struct il3945_rx_frame_hdr hdr;
+ struct il3945_rx_frame_end end;
} __packed;
-#define IWL39_RX_FRAME_SIZE (4 + sizeof(struct iwl3945_rx_frame))
+#define IL39_RX_FRAME_SIZE (4 + sizeof(struct il3945_rx_frame))
/* Fixed (non-configurable) rx data from phy */
-#define IWL49_RX_RES_PHY_CNT 14
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
-#define IWL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
-#define IWL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
-#define IWL49_AGC_DB_POS (7)
-struct iwl4965_rx_non_cfg_phy {
+#define IL49_RX_RES_PHY_CNT 14
+#define IL49_RX_PHY_FLAGS_ANTENNAE_OFFSET (4)
+#define IL49_RX_PHY_FLAGS_ANTENNAE_MASK (0x70)
+#define IL49_AGC_DB_MASK (0x3f80) /* MASK(7,13) */
+#define IL49_AGC_DB_POS (7)
+struct il4965_rx_non_cfg_phy {
__le16 ant_selection; /* ant A bit 4, ant B bit 5, ant C bit 6 */
__le16 agc_info; /* agc code 0:6, agc dB 7:13, reserved 14:15 */
u8 rssi_info[6]; /* we use even entries, 0/2/4 for A/B/C rssi */
u8 pad[0];
} __packed;
-
/*
- * REPLY_RX = 0xc3 (response only, not a command)
+ * N_RX = 0xc3 (response only, not a command)
* Used only for legacy (non 11n) frames.
*/
-struct iwl_rx_phy_res {
- u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
+struct il_rx_phy_res {
+ u8 non_cfg_phy_cnt; /* non configurable DSP phy data byte count */
u8 cfg_phy_cnt; /* configurable DSP phy data byte count */
u8 stat_id; /* configurable DSP phy data set ID */
u8 reserved1;
__le64 timestamp; /* TSF at on air rise */
- __le32 beacon_time_stamp; /* beacon at on-air rise */
+ __le32 beacon_time_stamp; /* beacon at on-air rise */
__le16 phy_flags; /* general phy flags: band, modulation, ... */
__le16 channel; /* channel number */
- u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
+ u8 non_cfg_phy_buf[32]; /* for various implementations of non_cfg_phy */
__le32 rate_n_flags; /* RATE_MCS_* */
__le16 byte_count; /* frame's byte-count */
__le16 frame_time; /* frame's time on the air */
} __packed;
-struct iwl_rx_mpdu_res_start {
+struct il_rx_mpdu_res_start {
__le16 byte_count;
__le16 reserved;
} __packed;
-
/******************************************************************************
* (5)
* Tx Commands & Responses:
*
- * Driver must place each REPLY_TX command into one of the prioritized Tx
+ * Driver must place each C_TX command into one of the prioritized Tx
* queues in host DRAM, shared between driver and device (see comments for
* SCD registers and Tx/Rx Queues). When the device's Tx scheduler and uCode
* are preparing to transmit, the device pulls the Tx command over the PCI
@@ -1264,18 +1251,18 @@ struct iwl_rx_mpdu_res_start {
* uCode handles all timing and protocol related to control frames
* (RTS/CTS/ACK), based on flags in the Tx command. uCode and Tx scheduler
* handle reception of block-acks; uCode updates the host driver via
- * REPLY_COMPRESSED_BA.
+ * N_COMPRESSED_BA.
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (4965).
+ * command, as set up by the C_RATE_SCALE (for 3945) or
+ * C_TX_LINK_QUALITY_CMD (4965).
*
- * Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
+ * Driver sets up transmit power for various rates via C_TX_PWR_TBL.
* This command must be executed after every RXON command, before Tx can occur.
*****************************************************************************/
-/* REPLY_TX Tx flags field */
+/* C_TX Tx flags field */
/*
* 1: Use Request-To-Send protocol before this frame.
@@ -1296,8 +1283,8 @@ struct iwl_rx_mpdu_res_start {
#define TX_CMD_FLG_ACK_MSK cpu_to_le32(1 << 3)
/* For 4965 devices:
- * 1: Use rate scale table (see REPLY_TX_LINK_QUALITY_CMD).
- * Tx command's initial_rate_index indicates first rate to try;
+ * 1: Use rate scale table (see C_TX_LINK_QUALITY_CMD).
+ * Tx command's initial_rate_idx indicates first rate to try;
* uCode walks through table for additional Tx attempts.
* 0: Use Tx rate/MCS from Tx command's rate_n_flags field.
* This rate will be used for all Tx attempts; it will not be scaled. */
@@ -1322,7 +1309,7 @@ struct iwl_rx_mpdu_res_start {
/* 1: uCode overrides sequence control field in MAC header.
* 0: Driver provides sequence control field in MAC header.
* Set this for management frames, non-QOS data frames, non-unicast frames,
- * and also in Tx command embedded in REPLY_SCAN_CMD for active scans. */
+ * and also in Tx command embedded in C_SCAN for active scans. */
#define TX_CMD_FLG_SEQ_CTL_MSK cpu_to_le32(1 << 13)
/* 1: This frame is non-last MPDU; more fragments are coming.
@@ -1349,7 +1336,6 @@ struct iwl_rx_mpdu_res_start {
/* HCCA-AP - disable duration overwriting. */
#define TX_CMD_FLG_DUR_MSK cpu_to_le32(1 << 25)
-
/*
* TX command security control
*/
@@ -1369,10 +1355,10 @@ struct iwl_rx_mpdu_res_start {
#define TKIP_ICV_LEN 4
/*
- * REPLY_TX = 0x1c (command)
+ * C_TX = 0x1c (command)
*/
-struct iwl3945_tx_cmd {
+struct il3945_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1434,9 +1420,9 @@ struct iwl3945_tx_cmd {
} __packed;
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*/
-struct iwl3945_tx_resp {
+struct il3945_tx_resp {
u8 failure_rts;
u8 failure_frame;
u8 bt_kill_count;
@@ -1445,19 +1431,18 @@ struct iwl3945_tx_resp {
__le32 status; /* TX status */
} __packed;
-
/*
* 4965 uCode updates these Tx attempt count values in host DRAM.
* Used for managing Tx retries when expecting block-acks.
* Driver should set these fields to 0.
*/
-struct iwl_dram_scratch {
+struct il_dram_scratch {
u8 try_cnt; /* Tx attempts */
u8 bt_kill_cnt; /* Tx attempts blocked by Bluetooth device */
__le16 reserved;
} __packed;
-struct iwl_tx_cmd {
+struct il_tx_cmd {
/*
* MPDU byte count:
* MAC header (24/26/30/32 bytes) + 2 bytes pad if 26/30 header size,
@@ -1481,7 +1466,7 @@ struct iwl_tx_cmd {
/* uCode may modify this field of the Tx command (in host DRAM!).
* Driver must also set dram_lsb_ptr and dram_msb_ptr in this cmd. */
- struct iwl_dram_scratch scratch;
+ struct il_dram_scratch scratch;
/* Rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is cleared. */
__le32 rate_n_flags; /* RATE_MCS_* */
@@ -1493,13 +1478,13 @@ struct iwl_tx_cmd {
u8 sec_ctl; /* TX_CMD_SEC_* */
/*
- * Index into rate table (see REPLY_TX_LINK_QUALITY_CMD) for initial
+ * Index into rate table (see C_TX_LINK_QUALITY_CMD) for initial
* Tx attempt, if TX_CMD_FLG_STA_RATE_MSK is set. Normally "0" for
* data frames, this field may be used to selectively reduce initial
* rate (via non-0 value) for special frames (e.g. management), while
* still supporting rate scaling for all frames.
*/
- u8 initial_rate_index;
+ u8 initial_rate_idx;
u8 reserved;
u8 key[16];
__le16 next_frame_flags;
@@ -1628,12 +1613,12 @@ enum {
};
enum {
- TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
+ TX_STATUS_MSK = 0x000000ff, /* bits 0:7 */
TX_STATUS_DELAY_MSK = 0x00000040,
TX_STATUS_ABORT_MSK = 0x00000080,
TX_PACKET_MODE_MSK = 0x0000ff00, /* bits 8:15 */
TX_FIFO_NUMBER_MSK = 0x00070000, /* bits 16:18 */
- TX_RESERVED = 0x00780000, /* bits 19:22 */
+ TX_RESERVED = 0x00780000, /* bits 19:22 */
TX_POWER_PA_DETECT_MSK = 0x7f800000, /* bits 23:30 */
TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
};
@@ -1671,7 +1656,7 @@ enum {
#define AGG_TX_STATE_SEQ_NUM_MSK 0xffff0000
/*
- * REPLY_TX = 0x1c (response)
+ * C_TX = 0x1c (response)
*
* This response may be in one of two slightly different formats, indicated
* by the frame_count field:
@@ -1697,7 +1682,7 @@ struct agg_tx_status {
__le16 sequence;
} __packed;
-struct iwl4965_tx_resp {
+struct il4965_tx_resp {
u8 frame_count; /* 1 no aggregation, >1 aggregation */
u8 bt_kill_count; /* # blocked by bluetooth (unused for agg) */
u8 failure_rts; /* # failures due to unsuccessful RTS */
@@ -1730,16 +1715,16 @@ struct iwl4965_tx_resp {
*/
union {
__le32 status;
- struct agg_tx_status agg_status[0]; /* for each agg frame */
+ struct agg_tx_status agg_status[0]; /* for each agg frame */
} u;
} __packed;
/*
- * REPLY_COMPRESSED_BA = 0xc5 (response only, not a command)
+ * N_COMPRESSED_BA = 0xc5 (response only, not a command)
*
* Reports Block-Acknowledge from recipient station
*/
-struct iwl_compressed_ba_resp {
+struct il_compressed_ba_resp {
__le32 sta_addr_lo32;
__le16 sta_addr_hi16;
__le16 reserved;
@@ -1754,30 +1739,29 @@ struct iwl_compressed_ba_resp {
} __packed;
/*
- * REPLY_TX_PWR_TABLE_CMD = 0x97 (command, has simple generic response)
+ * C_TX_PWR_TBL = 0x97 (command, has simple generic response)
*
- * See details under "TXPOWER" in iwl-4965-hw.h.
+ * See details under "TXPOWER" in 4965.h.
*/
-struct iwl3945_txpowertable_cmd {
+struct il3945_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl3945_power_per_rate power[IWL_MAX_RATES];
+ struct il3945_power_per_rate power[IL_MAX_RATES];
} __packed;
-struct iwl4965_txpowertable_cmd {
+struct il4965_txpowertable_cmd {
u8 band; /* 0: 5 GHz, 1: 2.4 GHz */
u8 reserved;
__le16 channel;
- struct iwl4965_tx_power_db tx_power;
+ struct il4965_tx_power_db tx_power;
} __packed;
-
/**
- * struct iwl3945_rate_scaling_cmd - Rate Scaling Command & Response
+ * struct il3945_rate_scaling_cmd - Rate Scaling Command & Response
*
- * REPLY_RATE_SCALE = 0x47 (command, has simple generic response)
+ * C_RATE_SCALE = 0x47 (command, has simple generic response)
*
* NOTE: The table of rates passed to the uCode via the
* RATE_SCALE command sets up the corresponding order of
@@ -1786,22 +1770,21 @@ struct iwl4965_txpowertable_cmd {
*
* For example, if you set 9MB (PLCP 0x0f) as the first
* rate in the rate table, the bit mask for that rate
- * when passed through ofdm_basic_rates on the REPLY_RXON
+ * when passed through ofdm_basic_rates on the C_RXON
* command would be bit 0 (1 << 0)
*/
-struct iwl3945_rate_scaling_info {
+struct il3945_rate_scaling_info {
__le16 rate_n_flags;
u8 try_cnt;
- u8 next_rate_index;
+ u8 next_rate_idx;
} __packed;
-struct iwl3945_rate_scaling_cmd {
+struct il3945_rate_scaling_cmd {
u8 table_id;
u8 reserved[3];
- struct iwl3945_rate_scaling_info table[IWL_MAX_RATES];
+ struct il3945_rate_scaling_info table[IL_MAX_RATES];
} __packed;
-
/*RS_NEW_API: only TLC_RTS remains and moved to bit 0 */
#define LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK (1 << 0)
@@ -1816,28 +1799,27 @@ struct iwl3945_rate_scaling_cmd {
#define LINK_QUAL_ANT_B_MSK (1 << 1)
#define LINK_QUAL_ANT_MSK (LINK_QUAL_ANT_A_MSK|LINK_QUAL_ANT_B_MSK)
-
/**
- * struct iwl_link_qual_general_params
+ * struct il_link_qual_general_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_general_params {
+struct il_link_qual_general_params {
u8 flags;
- /* No entries at or above this (driver chosen) index contain MIMO */
+ /* No entries at or above this (driver chosen) idx contain MIMO */
u8 mimo_delimiter;
/* Best single antenna to use for single stream (legacy, SISO). */
u8 single_stream_ant_msk; /* LINK_QUAL_ANT_* */
/* Best antennas to use for MIMO (unused for 4965, assumes both). */
- u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
+ u8 dual_stream_ant_msk; /* LINK_QUAL_ANT_* */
/*
* If driver needs to use different initial rates for different
* EDCA QOS access categories (as implemented by tx fifos 0-3),
- * this table will set that up, by indicating the indexes in the
+ * this table will set that up, by indicating the idxes in the
* rs_table[LINK_QUAL_MAX_RETRY_NUM] rate table at which to start.
* Otherwise, driver should set all entries to 0.
*
@@ -1845,10 +1827,10 @@ struct iwl_link_qual_general_params {
* 0 = Background, 1 = Best Effort (normal), 2 = Video, 3 = Voice
* TX FIFOs above 3 use same value (typically 0) as TX FIFO 3.
*/
- u8 start_rate_index[LINK_QUAL_AC_NUM];
+ u8 start_rate_idx[LINK_QUAL_AC_NUM];
} __packed;
-#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
+#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000) /* 4 milliseconds */
#define LINK_QUAL_AGG_TIME_LIMIT_MAX (8000)
#define LINK_QUAL_AGG_TIME_LIMIT_MIN (100)
@@ -1861,11 +1843,11 @@ struct iwl_link_qual_general_params {
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
/**
- * struct iwl_link_qual_agg_params
+ * struct il_link_qual_agg_params
*
- * Used in REPLY_TX_LINK_QUALITY_CMD
+ * Used in C_TX_LINK_QUALITY_CMD
*/
-struct iwl_link_qual_agg_params {
+struct il_link_qual_agg_params {
/*
*Maximum number of uSec in aggregation.
@@ -1892,9 +1874,9 @@ struct iwl_link_qual_agg_params {
} __packed;
/*
- * REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
+ * C_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For 4965 devices only; 3945 uses REPLY_RATE_SCALE.
+ * For 4965 devices only; 3945 uses C_RATE_SCALE.
*
* Each station in the 4965 device's internal station table has its own table
* of 16
@@ -1903,13 +1885,13 @@ struct iwl_link_qual_agg_params {
* one station.
*
* NOTE: Station must already be in 4965 device's station table.
- * Use REPLY_ADD_STA.
+ * Use C_ADD_STA.
*
* The rate scaling procedures described below work well. Of course, other
* procedures are possible, and may work better for particular environments.
*
*
- * FILLING THE RATE TABLE
+ * FILLING THE RATE TBL
*
* Given a particular initial rate and mode, as determined by the rate
* scaling algorithm described below, the Linux driver uses the following
@@ -1948,13 +1930,13 @@ struct iwl_link_qual_agg_params {
* speculative mode as the new current active mode.
*
* Each history set contains, separately for each possible rate, data for a
- * sliding window of the 62 most recent tx attempts at that rate. The data
+ * sliding win of the 62 most recent tx attempts at that rate. The data
* includes a shifting bitmap of success(1)/failure(0), and sums of successful
* and attempted frames, from which the driver can additionally calculate a
* success ratio (success / attempted) and number of failures
- * (attempted - success), and control the size of the window (attempted).
+ * (attempted - success), and control the size of the win (attempted).
* The driver uses the bit map to remove successes from the success sum, as
- * the oldest tx attempts fall out of the window.
+ * the oldest tx attempts fall out of the win.
*
* When the 4965 device makes multiple tx attempts for a given frame, each
* attempt might be at a different rate, and have different modulation
@@ -1966,7 +1948,7 @@ struct iwl_link_qual_agg_params {
*
* When using block-ack (aggregation), all frames are transmitted at the same
* rate, since there is no per-attempt acknowledgment from the destination
- * station. The Tx response struct iwl_tx_resp indicates the Tx rate in
+ * station. The Tx response struct il_tx_resp indicates the Tx rate in
* rate_n_flags field. After receiving a block-ack, the driver can update
* history for the entire block all at once.
*
@@ -2016,8 +1998,8 @@ struct iwl_link_qual_agg_params {
* good performance; higher rate is sure to have poorer success.
*
* 6) Re-evaluate the rate after each tx frame. If working with block-
- * acknowledge, history and statistics may be calculated for the entire
- * block (including prior history that fits within the history windows),
+ * acknowledge, history and stats may be calculated for the entire
+ * block (including prior history that fits within the history wins),
* before re-evaluation.
*
* FINDING BEST STARTING MODULATION MODE:
@@ -2079,22 +2061,22 @@ struct iwl_link_qual_agg_params {
* legacy), and then repeat the search process.
*
*/
-struct iwl_link_quality_cmd {
+struct il_link_quality_cmd {
/* Index of destination/recipient station in uCode's station table */
u8 sta_id;
u8 reserved1;
__le16 control; /* not used */
- struct iwl_link_qual_general_params general_params;
- struct iwl_link_qual_agg_params agg_params;
+ struct il_link_qual_general_params general_params;
+ struct il_link_qual_agg_params agg_params;
/*
- * Rate info; when using rate-scaling, Tx command's initial_rate_index
- * specifies 1st Tx rate attempted, via index into this table.
+ * Rate info; when using rate-scaling, Tx command's initial_rate_idx
+ * specifies 1st Tx rate attempted, via idx into this table.
* 4965 devices works its way through table when retrying Tx.
*/
struct {
- __le32 rate_n_flags; /* RATE_MCS_*, IWL_RATE_* */
+ __le32 rate_n_flags; /* RATE_MCS_*, RATE_* */
} rs_table[LINK_QUAL_MAX_RETRY_NUM];
__le32 reserved2;
} __packed;
@@ -2117,13 +2099,13 @@ struct iwl_link_quality_cmd {
#define BT_MAX_KILL_DEF (0x5)
/*
- * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
+ * C_BT_CONFIG = 0x9b (command, has simple generic response)
*
* 3945 and 4965 devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
-struct iwl_bt_cmd {
+struct il_bt_cmd {
u8 flags;
u8 lead_time;
u8 max_kill;
@@ -2132,7 +2114,6 @@ struct iwl_bt_cmd {
__le32 kill_cts_mask;
} __packed;
-
/******************************************************************************
* (6)
* Spectrum Management (802.11h) Commands, Responses, Notifications:
@@ -2150,18 +2131,18 @@ struct iwl_bt_cmd {
RXON_FILTER_ASSOC_MSK | \
RXON_FILTER_BCON_AWARE_MSK)
-struct iwl_measure_channel {
+struct il_measure_channel {
__le32 duration; /* measurement duration in extended beacon
* format */
u8 channel; /* channel to measure */
- u8 type; /* see enum iwl_measure_type */
+ u8 type; /* see enum il_measure_type */
__le16 reserved;
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (command)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (command)
*/
-struct iwl_spectrum_cmd {
+struct il_spectrum_cmd {
__le16 len; /* number of bytes starting from token */
u8 token; /* token id */
u8 id; /* measurement id -- 0 or 1 */
@@ -2174,13 +2155,13 @@ struct iwl_spectrum_cmd {
__le32 filter_flags; /* rxon filter flags */
__le16 channel_count; /* minimum 1, maximum 10 */
__le16 reserved3;
- struct iwl_measure_channel channels[10];
+ struct il_measure_channel channels[10];
} __packed;
/*
- * REPLY_SPECTRUM_MEASUREMENT_CMD = 0x74 (response)
+ * C_SPECTRUM_MEASUREMENT = 0x74 (response)
*/
-struct iwl_spectrum_resp {
+struct il_spectrum_resp {
u8 token;
u8 id; /* id of the prior command replaced, or 0xff */
__le16 status; /* 0 - command will be handled
@@ -2188,57 +2169,57 @@ struct iwl_spectrum_resp {
* measurement) */
} __packed;
-enum iwl_measurement_state {
- IWL_MEASUREMENT_START = 0,
- IWL_MEASUREMENT_STOP = 1,
+enum il_measurement_state {
+ IL_MEASUREMENT_START = 0,
+ IL_MEASUREMENT_STOP = 1,
};
-enum iwl_measurement_status {
- IWL_MEASUREMENT_OK = 0,
- IWL_MEASUREMENT_CONCURRENT = 1,
- IWL_MEASUREMENT_CSA_CONFLICT = 2,
- IWL_MEASUREMENT_TGH_CONFLICT = 3,
+enum il_measurement_status {
+ IL_MEASUREMENT_OK = 0,
+ IL_MEASUREMENT_CONCURRENT = 1,
+ IL_MEASUREMENT_CSA_CONFLICT = 2,
+ IL_MEASUREMENT_TGH_CONFLICT = 3,
/* 4-5 reserved */
- IWL_MEASUREMENT_STOPPED = 6,
- IWL_MEASUREMENT_TIMEOUT = 7,
- IWL_MEASUREMENT_PERIODIC_FAILED = 8,
+ IL_MEASUREMENT_STOPPED = 6,
+ IL_MEASUREMENT_TIMEOUT = 7,
+ IL_MEASUREMENT_PERIODIC_FAILED = 8,
};
#define NUM_ELEMENTS_IN_HISTOGRAM 8
-struct iwl_measurement_histogram {
+struct il_measurement_histogram {
__le32 ofdm[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 0.8usec counts */
__le32 cck[NUM_ELEMENTS_IN_HISTOGRAM]; /* in 1usec counts */
} __packed;
/* clear channel availability counters */
-struct iwl_measurement_cca_counters {
+struct il_measurement_cca_counters {
__le32 ofdm;
__le32 cck;
} __packed;
-enum iwl_measure_type {
- IWL_MEASURE_BASIC = (1 << 0),
- IWL_MEASURE_CHANNEL_LOAD = (1 << 1),
- IWL_MEASURE_HISTOGRAM_RPI = (1 << 2),
- IWL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
- IWL_MEASURE_FRAME = (1 << 4),
+enum il_measure_type {
+ IL_MEASURE_BASIC = (1 << 0),
+ IL_MEASURE_CHANNEL_LOAD = (1 << 1),
+ IL_MEASURE_HISTOGRAM_RPI = (1 << 2),
+ IL_MEASURE_HISTOGRAM_NOISE = (1 << 3),
+ IL_MEASURE_FRAME = (1 << 4),
/* bits 5:6 are reserved */
- IWL_MEASURE_IDLE = (1 << 7),
+ IL_MEASURE_IDLE = (1 << 7),
};
/*
- * SPECTRUM_MEASURE_NOTIFICATION = 0x75 (notification only, not a command)
+ * N_SPECTRUM_MEASUREMENT = 0x75 (notification only, not a command)
*/
-struct iwl_spectrum_notification {
+struct il_spectrum_notification {
u8 id; /* measurement id -- 0 or 1 */
u8 token;
- u8 channel_index; /* index in measurement channel list */
+ u8 channel_idx; /* idx in measurement channel list */
u8 state; /* 0 - start, 1 - stop */
__le32 start_time; /* lower 32-bits of TSF */
u8 band; /* 0 - 5.2GHz, 1 - 2.4GHz */
u8 channel;
- u8 type; /* see enum iwl_measurement_type */
+ u8 type; /* see enum il_measurement_type */
u8 reserved1;
/* NOTE: cca_ofdm, cca_cck, basic_type, and histogram are only only
* valid if applicable for measurement type requested. */
@@ -2248,9 +2229,9 @@ struct iwl_spectrum_notification {
u8 basic_type; /* 0 - bss, 1 - ofdm preamble, 2 -
* unidentified */
u8 reserved2[3];
- struct iwl_measurement_histogram histogram;
+ struct il_measurement_histogram histogram;
__le32 stop_time; /* lower 32-bits of TSF */
- __le32 status; /* see iwl_measurement_status */
+ __le32 status; /* see il_measurement_status */
} __packed;
/******************************************************************************
@@ -2260,10 +2241,10 @@ struct iwl_spectrum_notification {
*****************************************************************************/
/**
- * struct iwl_powertable_cmd - Power Table Command
+ * struct il_powertable_cmd - Power Table Command
* @flags: See below:
*
- * POWER_TABLE_CMD = 0x77 (command, has simple generic response)
+ * C_POWER_TBL = 0x77 (command, has simple generic response)
*
* PM allow:
* bit 0 - '0' Driver not allow power management
@@ -2290,38 +2271,38 @@ struct iwl_spectrum_notification {
* '10' force xtal sleep
* '11' Illegal set
*
- * NOTE: if sleep_interval[SLEEP_INTRVL_TABLE_SIZE-1] > DTIM period then
+ * NOTE: if sleep_interval[SLEEP_INTRVL_TBL_SIZE-1] > DTIM period then
* ucode assume sleep over DTIM is allowed and we don't need to wake up
* for every DTIM.
*/
-#define IWL_POWER_VEC_SIZE 5
+#define IL_POWER_VEC_SIZE 5
-#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
-#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
+#define IL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
+#define IL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
-struct iwl3945_powertable_cmd {
+struct il3945_powertable_cmd {
__le16 flags;
u8 reserved[2];
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
} __packed;
-struct iwl_powertable_cmd {
+struct il_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds; /* 3945 reserved */
+ u8 debug_flags; /* 3945 reserved */
__le32 rx_data_timeout;
__le32 tx_data_timeout;
- __le32 sleep_interval[IWL_POWER_VEC_SIZE];
+ __le32 sleep_interval[IL_POWER_VEC_SIZE];
__le32 keep_alive_beacons;
} __packed;
/*
- * PM_SLEEP_NOTIFICATION = 0x7A (notification only, not a command)
+ * N_PM_SLEEP = 0x7A (notification only, not a command)
* all devices identical.
*/
-struct iwl_sleep_notification {
+struct il_sleep_notification {
u8 pm_sleep_mode;
u8 pm_wakeup_src;
__le16 reserved;
@@ -2332,23 +2313,23 @@ struct iwl_sleep_notification {
/* Sleep states. all devices identical. */
enum {
- IWL_PM_NO_SLEEP = 0,
- IWL_PM_SLP_MAC = 1,
- IWL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
- IWL_PM_SLP_FULL_MAC_CARD_STATE = 3,
- IWL_PM_SLP_PHY = 4,
- IWL_PM_SLP_REPENT = 5,
- IWL_PM_WAKEUP_BY_TIMER = 6,
- IWL_PM_WAKEUP_BY_DRIVER = 7,
- IWL_PM_WAKEUP_BY_RFKILL = 8,
+ IL_PM_NO_SLEEP = 0,
+ IL_PM_SLP_MAC = 1,
+ IL_PM_SLP_FULL_MAC_UNASSOCIATE = 2,
+ IL_PM_SLP_FULL_MAC_CARD_STATE = 3,
+ IL_PM_SLP_PHY = 4,
+ IL_PM_SLP_REPENT = 5,
+ IL_PM_WAKEUP_BY_TIMER = 6,
+ IL_PM_WAKEUP_BY_DRIVER = 7,
+ IL_PM_WAKEUP_BY_RFKILL = 8,
/* 3 reserved */
- IWL_PM_NUM_OF_MODES = 12,
+ IL_PM_NUM_OF_MODES = 12,
};
/*
- * CARD_STATE_NOTIFICATION = 0xa1 (notification only, not a command)
+ * N_CARD_STATE = 0xa1 (notification only, not a command)
*/
-struct iwl_card_state_notif {
+struct il_card_state_notif {
__le32 flags;
} __packed;
@@ -2357,11 +2338,11 @@ struct iwl_card_state_notif {
#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
-struct iwl_ct_kill_config {
- __le32 reserved;
- __le32 critical_temperature_M;
- __le32 critical_temperature_R;
-} __packed;
+struct il_ct_kill_config {
+ __le32 reserved;
+ __le32 critical_temperature_M;
+ __le32 critical_temperature_R;
+} __packed;
/******************************************************************************
* (8)
@@ -2373,7 +2354,7 @@ struct iwl_ct_kill_config {
#define SCAN_CHANNEL_TYPE_ACTIVE cpu_to_le32(1)
/**
- * struct iwl_scan_channel - entry in REPLY_SCAN_CMD channel table
+ * struct il_scan_channel - entry in C_SCAN channel table
*
* One for each channel in the scan list.
* Each channel can independently select:
@@ -2383,7 +2364,7 @@ struct iwl_ct_kill_config {
* quiet_plcp_th, good_CRC_th)
*
* To avoid uCode errors, make sure the following are true (see comments
- * under struct iwl_scan_cmd about max_out_time and quiet_time):
+ * under struct il_scan_cmd about max_out_time and quiet_time):
* 1) If using passive_dwell (i.e. passive_dwell != 0):
* active_dwell <= passive_dwell (< max_out_time if max_out_time != 0)
* 2) quiet_time <= active_dwell
@@ -2391,7 +2372,7 @@ struct iwl_ct_kill_config {
* passive_dwell < max_out_time
* active_dwell < max_out_time
*/
-struct iwl3945_scan_channel {
+struct il3945_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2400,16 +2381,16 @@ struct iwl3945_scan_channel {
* 5:7 reserved
*/
u8 type;
- u8 channel; /* band is selected by iwl3945_scan_cmd "flags" field */
- struct iwl3945_tx_power tpc;
+ u8 channel; /* band is selected by il3945_scan_cmd "flags" field */
+ struct il3945_tx_power tpc;
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
__le16 passive_dwell; /* in 1024-uSec TU (time units), typ 20-500 */
} __packed;
/* set number of direct probes u8 type */
-#define IWL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
+#define IL39_SCAN_PROBE_MASK(n) ((BIT(n) | (BIT(n) - BIT(1))))
-struct iwl_scan_channel {
+struct il_scan_channel {
/*
* type is defined as:
* 0:0 1 = active, 0 = passive
@@ -2418,7 +2399,7 @@ struct iwl_scan_channel {
* 21:31 reserved
*/
__le32 type;
- __le16 channel; /* band is selected by iwl_scan_cmd "flags" field */
+ __le16 channel; /* band is selected by il_scan_cmd "flags" field */
u8 tx_gain; /* gain for analog radio */
u8 dsp_atten; /* gain for DSP */
__le16 active_dwell; /* in 1024-uSec TU (time units), typ 5-50 */
@@ -2426,17 +2407,17 @@ struct iwl_scan_channel {
} __packed;
/* set number of direct probes __le32 type */
-#define IWL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
+#define IL_SCAN_PROBE_MASK(n) cpu_to_le32((BIT(n) | (BIT(n) - BIT(1))))
/**
- * struct iwl_ssid_ie - directed scan network information element
+ * struct il_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
+ * Up to 20 of these may appear in C_SCAN (Note: Only 4 are in
+ * 3945 SCAN api), selected by "type" bit field in struct il_scan_channel;
* each channel may select different ssids from among the 20 (4) entries.
* SSID IEs get transmitted in reverse order of entry.
*/
-struct iwl_ssid_ie {
+struct il_ssid_ie {
u8 id;
u8 len;
u8 ssid[32];
@@ -2445,14 +2426,14 @@ struct iwl_ssid_ie {
#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
-#define IWL_GOOD_CRC_TH_DISABLED 0
-#define IWL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
-#define IWL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
-#define IWL_MAX_SCAN_SIZE 1024
-#define IWL_MAX_CMD_SIZE 4096
+#define IL_GOOD_CRC_TH_DISABLED 0
+#define IL_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IL_GOOD_CRC_TH_NEVER cpu_to_le16(0xffff)
+#define IL_MAX_SCAN_SIZE 1024
+#define IL_MAX_CMD_SIZE 4096
/*
- * REPLY_SCAN_CMD = 0x80 (command)
+ * C_SCAN = 0x80 (command)
*
* The hardware scan command is very powerful; the driver can set it up to
* maintain (relatively) normal network traffic while doing a scan in the
@@ -2501,10 +2482,10 @@ struct iwl_ssid_ie {
* Driver must use separate scan commands for 2.4 vs. 5 GHz bands.
*
* To avoid uCode errors, see timing restrictions described under
- * struct iwl_scan_channel.
+ * struct il_scan_channel.
*/
-struct iwl3945_scan_cmd {
+struct il3945_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2525,10 +2506,10 @@ struct iwl3945_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl3945_tx_cmd tx_cmd;
+ struct il3945_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX_3945];
/*
* Probe request frame, followed by channel list.
@@ -2538,17 +2519,17 @@ struct iwl3945_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl3945_scan_channel channels[0];
+ * struct il3945_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
} __packed;
-struct iwl_scan_cmd {
+struct il_scan_cmd {
__le16 len;
u8 reserved0;
u8 channel_count; /* # channels in channel list */
@@ -2569,10 +2550,10 @@ struct iwl_scan_cmd {
/* For active scans (set to all-0s for passive scans).
* Does not include payload. Must specify Tx rate; no rate scaling. */
- struct iwl_tx_cmd tx_cmd;
+ struct il_tx_cmd tx_cmd;
/* For directed active scans (set to all-0s otherwise) */
- struct iwl_ssid_ie direct_scan[PROBE_OPTION_MAX];
+ struct il_ssid_ie direct_scan[PROBE_OPTION_MAX];
/*
* Probe request frame, followed by channel list.
@@ -2582,11 +2563,11 @@ struct iwl_scan_cmd {
* Number of channels in list is specified by channel_count.
* Each channel in list is of type:
*
- * struct iwl_scan_channel channels[0];
+ * struct il_scan_channel channels[0];
*
* NOTE: Only one band of channels can be scanned per pass. You
* must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive SCAN_COMPLETE_NOTIFICATION)
+ * for one scan to complete (i.e. receive N_SCAN_COMPLETE)
* before requesting another scan.
*/
u8 data[0];
@@ -2598,16 +2579,16 @@ struct iwl_scan_cmd {
#define ABORT_STATUS 0x2
/*
- * REPLY_SCAN_CMD = 0x80 (response)
+ * C_SCAN = 0x80 (response)
*/
-struct iwl_scanreq_notification {
+struct il_scanreq_notification {
__le32 status; /* 1: okay, 2: cannot fulfill request */
} __packed;
/*
- * SCAN_START_NOTIFICATION = 0x82 (notification only, not a command)
+ * N_SCAN_START = 0x82 (notification only, not a command)
*/
-struct iwl_scanstart_notification {
+struct il_scanstart_notification {
__le32 tsf_low;
__le32 tsf_high;
__le32 beacon_timer;
@@ -2620,30 +2601,30 @@ struct iwl_scanstart_notification {
#define SCAN_OWNER_STATUS 0x1
#define MEASURE_OWNER_STATUS 0x2
-#define IWL_PROBE_STATUS_OK 0
-#define IWL_PROBE_STATUS_TX_FAILED BIT(0)
+#define IL_PROBE_STATUS_OK 0
+#define IL_PROBE_STATUS_TX_FAILED BIT(0)
/* error statuses combined with TX_FAILED */
-#define IWL_PROBE_STATUS_FAIL_TTL BIT(1)
-#define IWL_PROBE_STATUS_FAIL_BT BIT(2)
+#define IL_PROBE_STATUS_FAIL_TTL BIT(1)
+#define IL_PROBE_STATUS_FAIL_BT BIT(2)
-#define NUMBER_OF_STATISTICS 1 /* first __le32 is good CRC */
+#define NUMBER_OF_STATS 1 /* first __le32 is good CRC */
/*
- * SCAN_RESULTS_NOTIFICATION = 0x83 (notification only, not a command)
+ * N_SCAN_RESULTS = 0x83 (notification only, not a command)
*/
-struct iwl_scanresults_notification {
+struct il_scanresults_notification {
u8 channel;
u8 band;
u8 probe_status;
- u8 num_probe_not_sent; /* not enough time to send */
+ u8 num_probe_not_sent; /* not enough time to send */
__le32 tsf_low;
__le32 tsf_high;
- __le32 statistics[NUMBER_OF_STATISTICS];
+ __le32 stats[NUMBER_OF_STATS];
} __packed;
/*
- * SCAN_COMPLETE_NOTIFICATION = 0x84 (notification only, not a command)
+ * N_SCAN_COMPLETE = 0x84 (notification only, not a command)
*/
-struct iwl_scancomplete_notification {
+struct il_scancomplete_notification {
u8 scanned_channels;
u8 status;
u8 last_channel;
@@ -2651,50 +2632,49 @@ struct iwl_scancomplete_notification {
__le32 tsf_high;
} __packed;
-
/******************************************************************************
* (9)
* IBSS/AP Commands and Notifications:
*
*****************************************************************************/
-enum iwl_ibss_manager {
- IWL_NOT_IBSS_MANAGER = 0,
- IWL_IBSS_MANAGER = 1,
+enum il_ibss_manager {
+ IL_NOT_IBSS_MANAGER = 0,
+ IL_IBSS_MANAGER = 1,
};
/*
- * BEACON_NOTIFICATION = 0x90 (notification only, not a command)
+ * N_BEACON = 0x90 (notification only, not a command)
*/
-struct iwl3945_beacon_notif {
- struct iwl3945_tx_resp beacon_notify_hdr;
+struct il3945_beacon_notif {
+ struct il3945_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
-struct iwl4965_beacon_notif {
- struct iwl4965_tx_resp beacon_notify_hdr;
+struct il4965_beacon_notif {
+ struct il4965_tx_resp beacon_notify_hdr;
__le32 low_tsf;
__le32 high_tsf;
__le32 ibss_mgr_status;
} __packed;
/*
- * REPLY_TX_BEACON = 0x91 (command, has simple generic response)
+ * C_TX_BEACON= 0x91 (command, has simple generic response)
*/
-struct iwl3945_tx_beacon_cmd {
- struct iwl3945_tx_cmd tx;
+struct il3945_tx_beacon_cmd {
+ struct il3945_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
struct ieee80211_hdr frame[0]; /* beacon frame */
} __packed;
-struct iwl_tx_beacon_cmd {
- struct iwl_tx_cmd tx;
+struct il_tx_beacon_cmd {
+ struct il_tx_cmd tx;
__le16 tim_idx;
u8 tim_size;
u8 reserved1;
@@ -2707,7 +2687,7 @@ struct iwl_tx_beacon_cmd {
*
*****************************************************************************/
-#define IWL_TEMP_CONVERT 260
+#define IL_TEMP_CONVERT 260
#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
@@ -2727,9 +2707,9 @@ struct rate_histogram {
} failed;
} __packed;
-/* statistics command response */
+/* stats command response */
-struct iwl39_statistics_rx_phy {
+struct iwl39_stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2747,7 +2727,7 @@ struct iwl39_statistics_rx_phy {
__le32 sent_cts_cnt;
} __packed;
-struct iwl39_statistics_rx_non_phy {
+struct iwl39_stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2758,13 +2738,13 @@ struct iwl39_statistics_rx_non_phy {
* our serving channel */
} __packed;
-struct iwl39_statistics_rx {
- struct iwl39_statistics_rx_phy ofdm;
- struct iwl39_statistics_rx_phy cck;
- struct iwl39_statistics_rx_non_phy general;
+struct iwl39_stats_rx {
+ struct iwl39_stats_rx_phy ofdm;
+ struct iwl39_stats_rx_phy cck;
+ struct iwl39_stats_rx_non_phy general;
} __packed;
-struct iwl39_statistics_tx {
+struct iwl39_stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2776,31 +2756,31 @@ struct iwl39_statistics_tx {
__le32 actual_ack_cnt;
} __packed;
-struct statistics_dbg {
+struct stats_dbg {
__le32 burst_check;
__le32 burst_count;
__le32 wait_for_silence_timeout_cnt;
__le32 reserved[3];
} __packed;
-struct iwl39_statistics_div {
+struct iwl39_stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
__le32 probe_time;
} __packed;
-struct iwl39_statistics_general {
+struct iwl39_stats_general {
__le32 temperature;
- struct statistics_dbg dbg;
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct iwl39_statistics_div div;
+ struct iwl39_stats_div div;
} __packed;
-struct statistics_rx_phy {
+struct stats_rx_phy {
__le32 ina_cnt;
__le32 fina_cnt;
__le32 plcp_err;
@@ -2823,7 +2803,7 @@ struct statistics_rx_phy {
__le32 reserved3;
} __packed;
-struct statistics_rx_ht_phy {
+struct stats_rx_ht_phy {
__le32 plcp_err;
__le32 overrun_err;
__le32 early_overrun_err;
@@ -2838,7 +2818,7 @@ struct statistics_rx_ht_phy {
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
-struct statistics_rx_non_phy {
+struct stats_rx_non_phy {
__le32 bogus_cts; /* CTS received when not expecting CTS */
__le32 bogus_ack; /* ACK received when not expecting ACK */
__le32 non_bssid_frames; /* number of frames with BSSID that
@@ -2852,15 +2832,15 @@ struct statistics_rx_non_phy {
__le32 num_missed_bcon; /* number of missed beacons */
__le32 adc_rx_saturation_time; /* count in 0.8us units the time the
* ADC was in saturation */
- __le32 ina_detection_search_time;/* total time (in 0.8us) searched
- * for INA */
+ __le32 ina_detection_search_time; /* total time (in 0.8us) searched
+ * for INA */
__le32 beacon_silence_rssi_a; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_b; /* RSSI silence after beacon frame */
__le32 beacon_silence_rssi_c; /* RSSI silence after beacon frame */
__le32 interference_data_flag; /* flag for interference data
* availability. 1 when data is
* available. */
- __le32 channel_load; /* counts RX Enable time in uSec */
+ __le32 channel_load; /* counts RX Enable time in uSec */
__le32 dsp_false_alarms; /* DSP false alarm (both OFDM
* and CCK) counter */
__le32 beacon_rssi_a;
@@ -2871,28 +2851,28 @@ struct statistics_rx_non_phy {
__le32 beacon_energy_c;
} __packed;
-struct statistics_rx {
- struct statistics_rx_phy ofdm;
- struct statistics_rx_phy cck;
- struct statistics_rx_non_phy general;
- struct statistics_rx_ht_phy ofdm_ht;
+struct stats_rx {
+ struct stats_rx_phy ofdm;
+ struct stats_rx_phy cck;
+ struct stats_rx_non_phy general;
+ struct stats_rx_ht_phy ofdm_ht;
} __packed;
/**
- * struct statistics_tx_power - current tx power
+ * struct stats_tx_power - current tx power
*
* @ant_a: current tx power on chain a in 1/2 dB step
* @ant_b: current tx power on chain b in 1/2 dB step
* @ant_c: current tx power on chain c in 1/2 dB step
*/
-struct statistics_tx_power {
+struct stats_tx_power {
u8 ant_a;
u8 ant_b;
u8 ant_c;
u8 reserved;
} __packed;
-struct statistics_tx_non_phy_agg {
+struct stats_tx_non_phy_agg {
__le32 ba_timeout;
__le32 ba_reschedule_frames;
__le32 scd_query_agg_frame_cnt;
@@ -2905,7 +2885,7 @@ struct statistics_tx_non_phy_agg {
__le32 rx_ba_rsp_cnt;
} __packed;
-struct statistics_tx {
+struct stats_tx {
__le32 preamble_cnt;
__le32 rx_detected_cnt;
__le32 bt_prio_defer_cnt;
@@ -2920,13 +2900,12 @@ struct statistics_tx {
__le32 burst_abort_missing_next_frame_cnt;
__le32 cts_timeout_collision;
__le32 ack_or_ba_timeout_collision;
- struct statistics_tx_non_phy_agg agg;
+ struct stats_tx_non_phy_agg agg;
__le32 reserved1;
} __packed;
-
-struct statistics_div {
+struct stats_div {
__le32 tx_on_a;
__le32 tx_on_b;
__le32 exec_time;
@@ -2935,14 +2914,14 @@ struct statistics_div {
__le32 reserved2;
} __packed;
-struct statistics_general_common {
- __le32 temperature; /* radio temperature */
- struct statistics_dbg dbg;
+struct stats_general_common {
+ __le32 temperature; /* radio temperature */
+ struct stats_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
__le32 slots_idle;
__le32 ttl_timestamp;
- struct statistics_div div;
+ struct stats_div div;
__le32 rx_enable_counter;
/*
* num_of_sos_states:
@@ -2952,73 +2931,73 @@ struct statistics_general_common {
__le32 num_of_sos_states;
} __packed;
-struct statistics_general {
- struct statistics_general_common common;
+struct stats_general {
+ struct stats_general_common common;
__le32 reserved2;
__le32 reserved3;
} __packed;
-#define UCODE_STATISTICS_CLEAR_MSK (0x1 << 0)
-#define UCODE_STATISTICS_FREQUENCY_MSK (0x1 << 1)
-#define UCODE_STATISTICS_NARROW_BAND_MSK (0x1 << 2)
+#define UCODE_STATS_CLEAR_MSK (0x1 << 0)
+#define UCODE_STATS_FREQUENCY_MSK (0x1 << 1)
+#define UCODE_STATS_NARROW_BAND_MSK (0x1 << 2)
/*
- * REPLY_STATISTICS_CMD = 0x9c,
+ * C_STATS = 0x9c,
* all devices identical.
*
- * This command triggers an immediate response containing uCode statistics.
- * The response is in the same format as STATISTICS_NOTIFICATION 0x9d, below.
+ * This command triggers an immediate response containing uCode stats.
+ * The response is in the same format as N_STATS 0x9d, below.
*
* If the CLEAR_STATS configuration flag is set, uCode will clear its
- * internal copy of the statistics (counters) after issuing the response.
- * This flag does not affect STATISTICS_NOTIFICATIONs after beacons (see below).
+ * internal copy of the stats (counters) after issuing the response.
+ * This flag does not affect N_STATSs after beacons (see below).
*
* If the DISABLE_NOTIF configuration flag is set, uCode will not issue
- * STATISTICS_NOTIFICATIONs after received beacons (see below). This flag
- * does not affect the response to the REPLY_STATISTICS_CMD 0x9c itself.
+ * N_STATSs after received beacons (see below). This flag
+ * does not affect the response to the C_STATS 0x9c itself.
*/
-#define IWL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
-#define IWL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2)/* see above */
-struct iwl_statistics_cmd {
- __le32 configuration_flags; /* IWL_STATS_CONF_* */
+#define IL_STATS_CONF_CLEAR_STATS cpu_to_le32(0x1) /* see above */
+#define IL_STATS_CONF_DISABLE_NOTIF cpu_to_le32(0x2) /* see above */
+struct il_stats_cmd {
+ __le32 configuration_flags; /* IL_STATS_CONF_* */
} __packed;
/*
- * STATISTICS_NOTIFICATION = 0x9d (notification only, not a command)
+ * N_STATS = 0x9d (notification only, not a command)
*
* By default, uCode issues this notification after receiving a beacon
* while associated. To disable this behavior, set DISABLE_NOTIF flag in the
- * REPLY_STATISTICS_CMD 0x9c, above.
+ * C_STATS 0x9c, above.
*
* Statistics counters continue to increment beacon after beacon, but are
- * cleared when changing channels or when driver issues REPLY_STATISTICS_CMD
+ * cleared when changing channels or when driver issues C_STATS
* 0x9c with CLEAR_STATS bit set (see above).
*
- * uCode also issues this notification during scans. uCode clears statistics
- * appropriately so that each notification contains statistics for only the
+ * uCode also issues this notification during scans. uCode clears stats
+ * appropriately so that each notification contains stats for only the
* one channel that has just been scanned.
*/
-#define STATISTICS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
-#define STATISTICS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
+#define STATS_REPLY_FLG_BAND_24G_MSK cpu_to_le32(0x2)
+#define STATS_REPLY_FLG_HT40_MODE_MSK cpu_to_le32(0x8)
-struct iwl3945_notif_statistics {
+struct il3945_notif_stats {
__le32 flag;
- struct iwl39_statistics_rx rx;
- struct iwl39_statistics_tx tx;
- struct iwl39_statistics_general general;
+ struct iwl39_stats_rx rx;
+ struct iwl39_stats_tx tx;
+ struct iwl39_stats_general general;
} __packed;
-struct iwl_notif_statistics {
+struct il_notif_stats {
__le32 flag;
- struct statistics_rx rx;
- struct statistics_tx tx;
- struct statistics_general general;
+ struct stats_rx rx;
+ struct stats_tx tx;
+ struct stats_general general;
} __packed;
/*
- * MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ * N_MISSED_BEACONS = 0xa2 (notification only, not a command)
*
- * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * uCode send N_MISSED_BEACONS to driver when detect beacon missed
* in regardless of how many missed beacons, which mean when driver receive the
* notification, inside the command, it can find all the beacons information
* which include number of total missed beacons, number of consecutive missed
@@ -3035,18 +3014,17 @@ struct iwl_notif_statistics {
*
*/
-#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
-#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
-#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
+#define IL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IL_MISSED_BEACON_THRESHOLD_MAX IL_MISSED_BEACON_THRESHOLD_DEF
-struct iwl_missed_beacon_notif {
+struct il_missed_beacon_notif {
__le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
} __packed;
-
/******************************************************************************
* (11)
* Rx Calibration Commands:
@@ -3062,7 +3040,7 @@ struct iwl_missed_beacon_notif {
*****************************************************************************/
/**
- * SENSITIVITY_CMD = 0xa8 (command, has simple generic response)
+ * C_SENSITIVITY = 0xa8 (command, has simple generic response)
*
* This command sets up the Rx signal detector for a sensitivity level that
* is high enough to lock onto all signals within the associated network,
@@ -3076,12 +3054,12 @@ struct iwl_missed_beacon_notif {
* time listening, not transmitting). Driver must adjust sensitivity so that
* the ratio of actual false alarms to actual Rx time falls within this range.
*
- * While associated, uCode delivers STATISTICS_NOTIFICATIONs after each
+ * While associated, uCode delivers N_STATSs after each
* received beacon. These provide information to the driver to analyze the
- * sensitivity. Don't analyze statistics that come in from scanning, or any
- * other non-associated-network source. Pertinent statistics include:
+ * sensitivity. Don't analyze stats that come in from scanning, or any
+ * other non-associated-network source. Pertinent stats include:
*
- * From "general" statistics (struct statistics_rx_non_phy):
+ * From "general" stats (struct stats_rx_non_phy):
*
* (beacon_energy_[abc] & 0x0FF00) >> 8 (unsigned, higher value is lower level)
* Measure of energy of desired signal. Used for establishing a level
@@ -3094,7 +3072,7 @@ struct iwl_missed_beacon_notif {
* uSecs of actual Rx time during beacon period (varies according to
* how much time was spent transmitting).
*
- * From "cck" and "ofdm" statistics (struct statistics_rx_phy), separately:
+ * From "cck" and "ofdm" stats (struct stats_rx_phy), separately:
*
* false_alarm_cnt
* Signal locks abandoned early (before phy-level header).
@@ -3111,15 +3089,15 @@ struct iwl_missed_beacon_notif {
*
* Total number of false alarms = false_alarms + plcp_errs
*
- * For OFDM, adjust the following table entries in struct iwl_sensitivity_cmd
+ * For OFDM, adjust the following table entries in struct il_sensitivity_cmd
* (notice that the start points for OFDM are at or close to settings for
* maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX 90 / 85 / 120
- * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX 170 / 170 / 210
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX 105 / 105 / 140
- * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX 220 / 220 / 270
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX 90 / 85 / 120
+ * HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX 170 / 170 / 210
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX 105 / 105 / 140
+ * HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX 220 / 220 / 270
*
* If actual rate of OFDM false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), reduce sensitivity
@@ -3152,30 +3130,30 @@ struct iwl_missed_beacon_notif {
* Reset this to 0 at the first beacon period that falls within the
* "good" range (5 to 50 false alarms per 204.8 milliseconds rx).
*
- * Then, adjust the following CCK table entries in struct iwl_sensitivity_cmd
+ * Then, adjust the following CCK table entries in struct il_sensitivity_cmd
* (notice that the start points for CCK are at maximum sensitivity):
*
* START / MIN / MAX
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX 125 / 125 / 200
- * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX 200 / 200 / 400
- * HD_MIN_ENERGY_CCK_DET_INDEX 100 / 0 / 100
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX 125 / 125 / 200
+ * HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX 200 / 200 / 400
+ * HD_MIN_ENERGY_CCK_DET_IDX 100 / 0 / 100
*
* If actual rate of CCK false alarms (+ plcp_errors) is too high
* (greater than 50 for each 204.8 msecs listening), method for reducing
* sensitivity is:
*
- * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 1) *Add* 3 to value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* up to max 400.
*
- * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is < 160,
+ * 2) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is < 160,
* sensitivity has been reduced a significant amount; bring it up to
* a moderate 161. Otherwise, *add* 3, up to max 200.
*
- * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX is > 160,
+ * 3) a) If current value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX is > 160,
* sensitivity has been reduced only a moderate or small amount;
- * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_INDEX,
+ * *subtract* 2 from value in HD_MIN_ENERGY_CCK_DET_IDX,
* down to min 0. Otherwise (if gain has been significantly reduced),
- * don't change the HD_MIN_ENERGY_CCK_DET_INDEX value.
+ * don't change the HD_MIN_ENERGY_CCK_DET_IDX value.
*
* b) Save a snapshot of the "silence reference".
*
@@ -3191,13 +3169,13 @@ struct iwl_missed_beacon_notif {
*
* Method for increasing sensitivity:
*
- * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX,
+ * 1) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX,
* down to min 125.
*
- * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX,
+ * 2) *Subtract* 3 from value in HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX,
* down to min 200.
*
- * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_INDEX, up to max 100.
+ * 3) *Add* 2 to value in HD_MIN_ENERGY_CCK_DET_IDX, up to max 100.
*
* If actual rate of CCK false alarms (+ plcp_errors) is within good range
* (between 5 and 50 for each 204.8 msecs listening):
@@ -3206,57 +3184,56 @@ struct iwl_missed_beacon_notif {
*
* 2) If previous beacon had too many CCK false alarms (+ plcp_errors),
* give some extra margin to energy threshold by *subtracting* 8
- * from value in HD_MIN_ENERGY_CCK_DET_INDEX.
+ * from value in HD_MIN_ENERGY_CCK_DET_IDX.
*
* For all cases (too few, too many, good range), make sure that the CCK
* detection threshold (energy) is below the energy level for robust
* detection over the past 10 beacon periods, the "Max cck energy".
* Lower values mean higher energy; this means making sure that the value
- * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
+ * in HD_MIN_ENERGY_CCK_DET_IDX is at or *above* "Max cck energy".
*
*/
/*
- * Table entries in SENSITIVITY_CMD (struct iwl_sensitivity_cmd)
- */
-#define HD_TABLE_SIZE (11) /* number of entries */
-#define HD_MIN_ENERGY_CCK_DET_INDEX (0) /* table indexes */
-#define HD_MIN_ENERGY_OFDM_DET_INDEX (1)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_INDEX (2)
-#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_INDEX (3)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_INDEX (4)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_INDEX (5)
-#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_INDEX (6)
-#define HD_BARKER_CORR_TH_ADD_MIN_INDEX (7)
-#define HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX (8)
-#define HD_AUTO_CORR40_X4_TH_ADD_MIN_INDEX (9)
-#define HD_OFDM_ENERGY_TH_IN_INDEX (10)
-
-/* Control field in struct iwl_sensitivity_cmd */
-#define SENSITIVITY_CMD_CONTROL_DEFAULT_TABLE cpu_to_le16(0)
-#define SENSITIVITY_CMD_CONTROL_WORK_TABLE cpu_to_le16(1)
+ * Table entries in C_SENSITIVITY (struct il_sensitivity_cmd)
+ */
+#define HD_TBL_SIZE (11) /* number of entries */
+#define HD_MIN_ENERGY_CCK_DET_IDX (0) /* table idxes */
+#define HD_MIN_ENERGY_OFDM_DET_IDX (1)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_IDX (2)
+#define HD_AUTO_CORR32_X1_TH_ADD_MIN_MRC_IDX (3)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_MRC_IDX (4)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_IDX (5)
+#define HD_AUTO_CORR32_X4_TH_ADD_MIN_MRC_IDX (6)
+#define HD_BARKER_CORR_TH_ADD_MIN_IDX (7)
+#define HD_BARKER_CORR_TH_ADD_MIN_MRC_IDX (8)
+#define HD_AUTO_CORR40_X4_TH_ADD_MIN_IDX (9)
+#define HD_OFDM_ENERGY_TH_IN_IDX (10)
+
+/* Control field in struct il_sensitivity_cmd */
+#define C_SENSITIVITY_CONTROL_DEFAULT_TBL cpu_to_le16(0)
+#define C_SENSITIVITY_CONTROL_WORK_TBL cpu_to_le16(1)
/**
- * struct iwl_sensitivity_cmd
+ * struct il_sensitivity_cmd
* @control: (1) updates working table, (0) updates default table
- * @table: energy threshold values, use HD_* as index into table
+ * @table: energy threshold values, use HD_* as idx into table
*
* Always use "1" in "control" to update uCode's working table and DSP.
*/
-struct iwl_sensitivity_cmd {
- __le16 control; /* always use "1" */
- __le16 table[HD_TABLE_SIZE]; /* use HD_* as index */
+struct il_sensitivity_cmd {
+ __le16 control; /* always use "1" */
+ __le16 table[HD_TBL_SIZE]; /* use HD_* as idx */
} __packed;
-
/**
- * REPLY_PHY_CALIBRATION_CMD = 0xb0 (command, has simple generic response)
+ * C_PHY_CALIBRATION = 0xb0 (command, has simple generic response)
*
* This command sets the relative gains of 4965 device's 3 radio receiver chains.
*
* After the first association, driver should accumulate signal and noise
- * statistics from the STATISTICS_NOTIFICATIONs that follow the first 20
- * beacons from the associated network (don't collect statistics that come
+ * stats from the N_STATSs that follow the first 20
+ * beacons from the associated network (don't collect stats that come
* in from scanning, or any other non-network source).
*
* DISCONNECTED ANTENNA:
@@ -3264,7 +3241,7 @@ struct iwl_sensitivity_cmd {
* Driver should determine which antennas are actually connected, by comparing
* average beacon signal levels for the 3 Rx chains. Accumulate (add) the
* following values over 20 beacons, one accumulator for each of the chains
- * a/b/c, from struct statistics_rx_non_phy:
+ * a/b/c, from struct stats_rx_non_phy:
*
* beacon_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3283,7 +3260,7 @@ struct iwl_sensitivity_cmd {
* to antennas, see above) for gain, by comparing the average signal levels
* detected during the silence after each beacon (background noise).
* Accumulate (add) the following values over 20 beacons, one accumulator for
- * each of the chains a/b/c, from struct statistics_rx_non_phy:
+ * each of the chains a/b/c, from struct stats_rx_non_phy:
*
* beacon_silence_rssi_[abc] & 0x0FF (unsigned, units in dB)
*
@@ -3294,7 +3271,7 @@ struct iwl_sensitivity_cmd {
* (accum_noise[i] - accum_noise[reference]) / 30
*
* The "30" adjusts the dB in the 20 accumulated samples to units of 1.5 dB.
- * For use in diff_gain_[abc] fields of struct iwl_calibration_cmd, the
+ * For use in diff_gain_[abc] fields of struct il_calibration_cmd, the
* driver should limit the difference results to a range of 0-3 (0-4.5 dB),
* and set bit 2 to indicate "reduce gain". The value for the reference
* (weakest) chain should be "0".
@@ -3306,24 +3283,24 @@ struct iwl_sensitivity_cmd {
/* Phy calibration command for series */
/* The default calibrate table size if not specified by firmware */
-#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
+#define IL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
enum {
- IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
- IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
+ IL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
+ IL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE = 19,
};
-#define IWL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
+#define IL_MAX_PHY_CALIBRATE_TBL_SIZE (253)
-struct iwl_calib_hdr {
+struct il_calib_hdr {
u8 op_code;
u8 first_group;
u8 groups_num;
u8 data_valid;
} __packed;
-/* IWL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
-struct iwl_calib_diff_gain_cmd {
- struct iwl_calib_hdr hdr;
+/* IL_PHY_CALIBRATE_DIFF_GAIN_CMD (7) */
+struct il_calib_diff_gain_cmd {
+ struct il_calib_hdr hdr;
s8 diff_gain_a; /* see above */
s8 diff_gain_b;
s8 diff_gain_c;
@@ -3338,12 +3315,12 @@ struct iwl_calib_diff_gain_cmd {
/*
* LEDs Command & Response
- * REPLY_LEDS_CMD = 0x48 (command, has simple generic response)
+ * C_LEDS = 0x48 (command, has simple generic response)
*
* For each of 3 possible LEDs (Activity/Link/Tech, selected by "id" field),
* this command turns it on or off, or sets up a periodic blinking cycle.
*/
-struct iwl_led_cmd {
+struct il_led_cmd {
__le32 interval; /* "interval" in uSec */
u8 id; /* 1: Activity, 2: Link, 3: Tech */
u8 off; /* # intervals off while blinking;
@@ -3353,14 +3330,15 @@ struct iwl_led_cmd {
u8 reserved;
} __packed;
-
/******************************************************************************
* (13)
* Union of all expected notifications/responses:
*
*****************************************************************************/
-struct iwl_rx_packet {
+#define IL_RX_FRAME_SIZE_MSK 0x00003fff
+
+struct il_rx_pkt {
/*
* The first 4 bytes of the RX frame header contain both the RX frame
* size and some flags.
@@ -3372,27 +3350,27 @@ struct iwl_rx_packet {
* 13-00: RX frame size
*/
__le32 len_n_flags;
- struct iwl_cmd_header hdr;
+ struct il_cmd_header hdr;
union {
- struct iwl3945_rx_frame rx_frame;
- struct iwl3945_tx_resp tx_resp;
- struct iwl3945_beacon_notif beacon_status;
-
- struct iwl_alive_resp alive_frame;
- struct iwl_spectrum_notification spectrum_notif;
- struct iwl_csa_notification csa_notif;
- struct iwl_error_resp err_resp;
- struct iwl_card_state_notif card_state_notif;
- struct iwl_add_sta_resp add_sta;
- struct iwl_rem_sta_resp rem_sta;
- struct iwl_sleep_notification sleep_notif;
- struct iwl_spectrum_resp spectrum;
- struct iwl_notif_statistics stats;
- struct iwl_compressed_ba_resp compressed_ba;
- struct iwl_missed_beacon_notif missed_beacon;
+ struct il3945_rx_frame rx_frame;
+ struct il3945_tx_resp tx_resp;
+ struct il3945_beacon_notif beacon_status;
+
+ struct il_alive_resp alive_frame;
+ struct il_spectrum_notification spectrum_notif;
+ struct il_csa_notification csa_notif;
+ struct il_error_resp err_resp;
+ struct il_card_state_notif card_state_notif;
+ struct il_add_sta_resp add_sta;
+ struct il_rem_sta_resp rem_sta;
+ struct il_sleep_notification sleep_notif;
+ struct il_spectrum_resp spectrum;
+ struct il_notif_stats stats;
+ struct il_compressed_ba_resp compressed_ba;
+ struct il_missed_beacon_notif missed_beacon;
__le32 status;
u8 raw[0];
} u;
} __packed;
-#endif /* __iwl_legacy_commands_h__ */
+#endif /* __il_commands_h__ */
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
new file mode 100644
index 0000000..36454d0
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -0,0 +1,5867 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/lockdep.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/skbuff.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+int
+_il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout)
+{
+ const int interval = 10; /* microseconds */
+ int t = 0;
+
+ do {
+ if ((_il_rd(il, addr) & mask) == (bits & mask))
+ return t;
+ udelay(interval);
+ t += interval;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(_il_poll_bit);
+
+void
+il_set_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_set_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_set_bit);
+
+void
+il_clear_bit(struct il_priv *p, u32 r, u32 m)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&p->reg_lock, reg_flags);
+ _il_clear_bit(p, r, m);
+ spin_unlock_irqrestore(&p->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_clear_bit);
+
+int
+_il_grab_nic_access(struct il_priv *il)
+{
+ int ret;
+ u32 val;
+
+ /* this bit wakes up the NIC */
+ _il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+
+ /*
+ * These bits say the device is running, and should keep running for
+ * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
+ * but they do not indicate that embedded SRAM is restored yet;
+ * 3945 and 4965 have volatile SRAM, and must save/restore contents
+ * to/from host DRAM when sleeping/waking for power-saving.
+ * Each direction takes approximately 1/4 millisecond; with this
+ * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
+ * series of register accesses are expected (e.g. reading Event Log),
+ * to keep device from sleeping.
+ *
+ * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
+ * SRAM is okay/restored. We don't check that here because this call
+ * is just for hardware register access; but GP1 MAC_SLEEP check is a
+ * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
+ *
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
+ if (ret < 0) {
+ val = _il_rd(il, CSR_GP_CNTRL);
+ IL_ERR("MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
+ _il_wr(il, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(_il_grab_nic_access);
+
+int
+il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout)
+{
+ const int interval = 10; /* microseconds */
+ int t = 0;
+
+ do {
+ if ((il_rd(il, addr) & mask) == mask)
+ return t;
+ udelay(interval);
+ t += interval;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(il_poll_bit);
+
+u32
+il_rd_prph(struct il_priv *il, u32 reg)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return val;
+}
+EXPORT_SYMBOL(il_rd_prph);
+
+void
+il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr_prph(il, addr, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_wr_prph);
+
+u32
+il_read_targ_mem(struct il_priv *il, u32 addr)
+{
+ unsigned long reg_flags;
+ u32 value;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+
+ _il_wr(il, HBUS_TARG_MEM_RADDR, addr);
+ rmb();
+ value = _il_rd(il, HBUS_TARG_MEM_RDAT);
+
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+}
+EXPORT_SYMBOL(il_read_targ_mem);
+
+void
+il_write_targ_mem(struct il_priv *il, u32 addr, u32 val)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, HBUS_TARG_MEM_WADDR, addr);
+ wmb();
+ _il_wr(il, HBUS_TARG_MEM_WDAT, val);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+EXPORT_SYMBOL(il_write_targ_mem);
+
+const char *
+il_get_cmd_string(u8 cmd)
+{
+ switch (cmd) {
+ IL_CMD(N_ALIVE);
+ IL_CMD(N_ERROR);
+ IL_CMD(C_RXON);
+ IL_CMD(C_RXON_ASSOC);
+ IL_CMD(C_QOS_PARAM);
+ IL_CMD(C_RXON_TIMING);
+ IL_CMD(C_ADD_STA);
+ IL_CMD(C_REM_STA);
+ IL_CMD(C_WEPKEY);
+ IL_CMD(N_3945_RX);
+ IL_CMD(C_TX);
+ IL_CMD(C_RATE_SCALE);
+ IL_CMD(C_LEDS);
+ IL_CMD(C_TX_LINK_QUALITY_CMD);
+ IL_CMD(C_CHANNEL_SWITCH);
+ IL_CMD(N_CHANNEL_SWITCH);
+ IL_CMD(C_SPECTRUM_MEASUREMENT);
+ IL_CMD(N_SPECTRUM_MEASUREMENT);
+ IL_CMD(C_POWER_TBL);
+ IL_CMD(N_PM_SLEEP);
+ IL_CMD(N_PM_DEBUG_STATS);
+ IL_CMD(C_SCAN);
+ IL_CMD(C_SCAN_ABORT);
+ IL_CMD(N_SCAN_START);
+ IL_CMD(N_SCAN_RESULTS);
+ IL_CMD(N_SCAN_COMPLETE);
+ IL_CMD(N_BEACON);
+ IL_CMD(C_TX_BEACON);
+ IL_CMD(C_TX_PWR_TBL);
+ IL_CMD(C_BT_CONFIG);
+ IL_CMD(C_STATS);
+ IL_CMD(N_STATS);
+ IL_CMD(N_CARD_STATE);
+ IL_CMD(N_MISSED_BEACONS);
+ IL_CMD(C_CT_KILL_CONFIG);
+ IL_CMD(C_SENSITIVITY);
+ IL_CMD(C_PHY_CALIBRATION);
+ IL_CMD(N_RX_PHY);
+ IL_CMD(N_RX_MPDU);
+ IL_CMD(N_RX);
+ IL_CMD(N_COMPRESSED_BA);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+EXPORT_SYMBOL(il_get_cmd_string);
+
+#define HOST_COMPLETE_TIMEOUT (HZ / 2)
+
+static void
+il_generic_cmd_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ return;
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("back from %s (0x%08X)\n",
+ il_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
+ break;
+ default:
+ D_HC("back from %s (0x%08X)\n", il_get_cmd_string(cmd->hdr.cmd),
+ pkt->hdr.flags);
+ }
+#endif
+}
+
+static int
+il_send_cmd_async(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int ret;
+
+ BUG_ON(!(cmd->flags & CMD_ASYNC));
+
+ /* An asynchronous command can not expect an SKB to be set. */
+ BUG_ON(cmd->flags & CMD_WANT_SKB);
+
+ /* Assign a generic callback if one is not provided */
+ if (!cmd->callback)
+ cmd->callback = il_generic_cmd_callback;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EBUSY;
+
+ ret = il_enqueue_hcmd(il, cmd);
+ if (ret < 0) {
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ return ret;
+ }
+ return 0;
+}
+
+int
+il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ int cmd_idx;
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ BUG_ON(cmd->flags & CMD_ASYNC);
+
+ /* A synchronous command can not have a callback set. */
+ BUG_ON(cmd->callback);
+
+ D_INFO("Attempting to send sync command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ set_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Setting HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+
+ cmd_idx = il_enqueue_hcmd(il, cmd);
+ if (cmd_idx < 0) {
+ ret = cmd_idx;
+ IL_ERR("Error sending %s: enqueue_hcmd failed: %d\n",
+ il_get_cmd_string(cmd->id), ret);
+ goto out;
+ }
+
+ ret = wait_event_timeout(il->wait_command_queue,
+ !test_bit(S_HCMD_ACTIVE, &il->status),
+ HOST_COMPLETE_TIMEOUT);
+ if (!ret) {
+ if (test_bit(S_HCMD_ACTIVE, &il->status)) {
+ IL_ERR("Error sending %s: time out after %dms.\n",
+ il_get_cmd_string(cmd->id),
+ jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
+
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ETIMEDOUT;
+ goto cancel;
+ }
+ }
+
+ if (test_bit(S_RF_KILL_HW, &il->status)) {
+ IL_ERR("Command %s aborted: RF KILL Switch\n",
+ il_get_cmd_string(cmd->id));
+ ret = -ECANCELED;
+ goto fail;
+ }
+ if (test_bit(S_FW_ERROR, &il->status)) {
+ IL_ERR("Command %s failed: FW Error\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto fail;
+ }
+ if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
+ IL_ERR("Error: Response NULL in '%s'\n",
+ il_get_cmd_string(cmd->id));
+ ret = -EIO;
+ goto cancel;
+ }
+
+ ret = 0;
+ goto out;
+
+cancel:
+ if (cmd->flags & CMD_WANT_SKB) {
+ /*
+ * Cancel the CMD_WANT_SKB flag for the cmd in the
+ * TX cmd queue. Otherwise in case the cmd comes
+ * in later, it will possibly set an invalid
+ * address (cmd->meta.source).
+ */
+ il->txq[il->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB;
+ }
+fail:
+ if (cmd->reply_page) {
+ il_free_pages(il, cmd->reply_page);
+ cmd->reply_page = 0;
+ }
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_send_cmd_sync);
+
+int
+il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ if (cmd->flags & CMD_ASYNC)
+ return il_send_cmd_async(il, cmd);
+
+ return il_send_cmd_sync(il, cmd);
+}
+EXPORT_SYMBOL(il_send_cmd);
+
+int
+il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len, const void *data)
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ return il_send_cmd_sync(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu);
+
+int
+il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt))
+{
+ struct il_host_cmd cmd = {
+ .id = id,
+ .len = len,
+ .data = data,
+ };
+
+ cmd.flags |= CMD_ASYNC;
+ cmd.callback = callback;
+
+ return il_send_cmd_async(il, &cmd);
+}
+EXPORT_SYMBOL(il_send_cmd_pdu_async);
+
+/* default: IL_LED_BLINK(0) using blinking idx table */
+static int led_mode;
+module_param(led_mode, int, S_IRUGO);
+MODULE_PARM_DESC(led_mode,
+ "0=system default, " "1=On(RF On)/Off(RF Off), 2=blinking");
+
+/* Throughput OFF time(ms) ON time (ms)
+ * >300 25 25
+ * >200 to 300 40 40
+ * >100 to 200 55 55
+ * >70 to 100 65 65
+ * >50 to 70 75 75
+ * >20 to 50 85 85
+ * >10 to 20 95 95
+ * >5 to 10 110 110
+ * >1 to 5 130 130
+ * >0 to 1 167 167
+ * <=0 SOLID ON
+ */
+static const struct ieee80211_tpt_blink il_blink[] = {
+ {.throughput = 0, .blink_time = 334},
+ {.throughput = 1 * 1024 - 1, .blink_time = 260},
+ {.throughput = 5 * 1024 - 1, .blink_time = 220},
+ {.throughput = 10 * 1024 - 1, .blink_time = 190},
+ {.throughput = 20 * 1024 - 1, .blink_time = 170},
+ {.throughput = 50 * 1024 - 1, .blink_time = 150},
+ {.throughput = 70 * 1024 - 1, .blink_time = 130},
+ {.throughput = 100 * 1024 - 1, .blink_time = 110},
+ {.throughput = 200 * 1024 - 1, .blink_time = 80},
+ {.throughput = 300 * 1024 - 1, .blink_time = 50},
+};
+
+/*
+ * Adjust led blink rate to compensate on a MAC Clock difference on every HW
+ * Led blink rate analysis showed an average deviation of 0% on 3945,
+ * 5% on 4965 HW.
+ * Need to compensate on the led on/off time per HW according to the deviation
+ * to achieve the desired led frequency
+ * The calculation is: (100-averageDeviation)/100 * blinkTime
+ * For code efficiency the calculation will be:
+ * compensation = (100 - averageDeviation) * 64 / 100
+ * NewBlinkTime = (compensation * BlinkTime) / 64
+ */
+static inline u8
+il_blink_compensation(struct il_priv *il, u8 time, u16 compensation)
+{
+ if (!compensation) {
+ IL_ERR("undefined blink compensation: "
+ "use pre-defined blinking time\n");
+ return time;
+ }
+
+ return (u8) ((time * compensation) >> 6);
+}
+
+/* Set led pattern command */
+static int
+il_led_cmd(struct il_priv *il, unsigned long on, unsigned long off)
+{
+ struct il_led_cmd led_cmd = {
+ .id = IL_LED_LINK,
+ .interval = IL_DEF_LED_INTRVL
+ };
+ int ret;
+
+ if (!test_bit(S_READY, &il->status))
+ return -EBUSY;
+
+ if (il->blink_on == on && il->blink_off == off)
+ return 0;
+
+ if (off == 0) {
+ /* led is SOLID_ON */
+ on = IL_LED_SOLID;
+ }
+
+ D_LED("Led blink time compensation=%u\n",
+ il->cfg->base_params->led_compensation);
+ led_cmd.on =
+ il_blink_compensation(il, on,
+ il->cfg->base_params->led_compensation);
+ led_cmd.off =
+ il_blink_compensation(il, off,
+ il->cfg->base_params->led_compensation);
+
+ ret = il->cfg->ops->led->cmd(il, &led_cmd);
+ if (!ret) {
+ il->blink_on = on;
+ il->blink_off = off;
+ }
+ return ret;
+}
+
+static void
+il_led_brightness_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+ unsigned long on = 0;
+
+ if (brightness > 0)
+ on = IL_LED_SOLID;
+
+ il_led_cmd(il, on, 0);
+}
+
+static int
+il_led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off)
+{
+ struct il_priv *il = container_of(led_cdev, struct il_priv, led);
+
+ return il_led_cmd(il, *delay_on, *delay_off);
+}
+
+void
+il_leds_init(struct il_priv *il)
+{
+ int mode = led_mode;
+ int ret;
+
+ if (mode == IL_LED_DEFAULT)
+ mode = il->cfg->led_mode;
+
+ il->led.name =
+ kasprintf(GFP_KERNEL, "%s-led", wiphy_name(il->hw->wiphy));
+ il->led.brightness_set = il_led_brightness_set;
+ il->led.blink_set = il_led_blink_set;
+ il->led.max_brightness = 1;
+
+ switch (mode) {
+ case IL_LED_DEFAULT:
+ WARN_ON(1);
+ break;
+ case IL_LED_BLINK:
+ il->led.default_trigger =
+ ieee80211_create_tpt_led_trigger(il->hw,
+ IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
+ il_blink,
+ ARRAY_SIZE(il_blink));
+ break;
+ case IL_LED_RF_STATE:
+ il->led.default_trigger = ieee80211_get_radio_led_name(il->hw);
+ break;
+ }
+
+ ret = led_classdev_register(&il->pci_dev->dev, &il->led);
+ if (ret) {
+ kfree(il->led.name);
+ return;
+ }
+
+ il->led_registered = true;
+}
+EXPORT_SYMBOL(il_leds_init);
+
+void
+il_leds_exit(struct il_priv *il)
+{
+ if (!il->led_registered)
+ return;
+
+ led_classdev_unregister(&il->led);
+ kfree(il->led.name);
+}
+EXPORT_SYMBOL(il_leds_exit);
+
+/************************** EEPROM BANDS ****************************
+ *
+ * The il_eeprom_band definitions below provide the mapping from the
+ * EEPROM contents to the specific channel number supported for each
+ * band.
+ *
+ * For example, il_priv->eeprom.band_3_channels[4] from the band_3
+ * definition below maps to physical channel 42 in the 5.2GHz spectrum.
+ * The specific geography and calibration information for that channel
+ * is contained in the eeprom map itself.
+ *
+ * During init, we copy the eeprom information and channel map
+ * information into il->channel_info_24/52 and il->channel_map_24/52
+ *
+ * channel_map_24/52 provides the idx in the channel_info array for a
+ * given channel. We have to have two separate maps as there is channel
+ * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
+ * band_2
+ *
+ * A value of 0xff stored in the channel_map indicates that the channel
+ * is not supported by the hardware at all.
+ *
+ * A value of 0xfe in the channel_map indicates that the channel is not
+ * valid for Tx with the current hardware. This means that
+ * while the system can tune and receive on a given channel, it may not
+ * be able to associate or transmit any frames on that
+ * channel. There is no corresponding channel information for that
+ * entry.
+ *
+ *********************************************************************/
+
+/* 2.4 GHz */
+const u8 il_eeprom_band_1[14] = {
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+};
+
+/* 5.2 GHz bands */
+static const u8 il_eeprom_band_2[] = { /* 4915-5080MHz */
+ 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
+};
+
+static const u8 il_eeprom_band_3[] = { /* 5170-5320MHz */
+ 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+};
+
+static const u8 il_eeprom_band_4[] = { /* 5500-5700MHz */
+ 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+};
+
+static const u8 il_eeprom_band_5[] = { /* 5725-5825MHz */
+ 145, 149, 153, 157, 161, 165
+};
+
+static const u8 il_eeprom_band_6[] = { /* 2.4 ht40 channel */
+ 1, 2, 3, 4, 5, 6, 7
+};
+
+static const u8 il_eeprom_band_7[] = { /* 5.2 ht40 channel */
+ 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
+};
+
+/******************************************************************************
+ *
+ * EEPROM related functions
+ *
+******************************************************************************/
+
+static int
+il_eeprom_verify_signature(struct il_priv *il)
+{
+ u32 gp = _il_rd(il, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ int ret = 0;
+
+ D_EEPROM("EEPROM signature=0x%08x\n", gp);
+ switch (gp) {
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
+ case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
+ break;
+ default:
+ IL_ERR("bad EEPROM signature," "EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ break;
+ }
+ return ret;
+}
+
+const u8 *
+il_eeprom_query_addr(const struct il_priv *il, size_t offset)
+{
+ BUG_ON(offset >= il->cfg->base_params->eeprom_size);
+ return &il->eeprom[offset];
+}
+EXPORT_SYMBOL(il_eeprom_query_addr);
+
+u16
+il_eeprom_query16(const struct il_priv *il, size_t offset)
+{
+ if (!il->eeprom)
+ return 0;
+ return (u16) il->eeprom[offset] | ((u16) il->eeprom[offset + 1] << 8);
+}
+EXPORT_SYMBOL(il_eeprom_query16);
+
+/**
+ * il_eeprom_init - read EEPROM contents
+ *
+ * Load the EEPROM contents from adapter into il->eeprom
+ *
+ * NOTE: This routine uses the non-debug IO access functions.
+ */
+int
+il_eeprom_init(struct il_priv *il)
+{
+ __le16 *e;
+ u32 gp = _il_rd(il, CSR_EEPROM_GP);
+ int sz;
+ int ret;
+ u16 addr;
+
+ /* allocate eeprom */
+ sz = il->cfg->base_params->eeprom_size;
+ D_EEPROM("NVM size = %d\n", sz);
+ il->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!il->eeprom) {
+ ret = -ENOMEM;
+ goto alloc_err;
+ }
+ e = (__le16 *) il->eeprom;
+
+ il->cfg->ops->lib->apm_ops.init(il);
+
+ ret = il_eeprom_verify_signature(il);
+ if (ret < 0) {
+ IL_ERR("EEPROM not found, EEPROM_GP=0x%08x\n", gp);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* Make sure driver (instead of uCode) is allowed to read EEPROM */
+ ret = il->cfg->ops->lib->eeprom_ops.acquire_semaphore(il);
+ if (ret < 0) {
+ IL_ERR("Failed to acquire EEPROM semaphore.\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* eeprom is an array of 16bit values */
+ for (addr = 0; addr < sz; addr += sizeof(u16)) {
+ u32 r;
+
+ _il_wr(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
+
+ ret =
+ _il_poll_bit(il, CSR_EEPROM_REG,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ CSR_EEPROM_REG_READ_VALID_MSK,
+ IL_EEPROM_ACCESS_TIMEOUT);
+ if (ret < 0) {
+ IL_ERR("Time out reading EEPROM[%d]\n", addr);
+ goto done;
+ }
+ r = _il_rd(il, CSR_EEPROM_REG);
+ e[addr / 2] = cpu_to_le16(r >> 16);
+ }
+
+ D_EEPROM("NVM Type: %s, version: 0x%x\n", "EEPROM",
+ il_eeprom_query16(il, EEPROM_VERSION));
+
+ ret = 0;
+done:
+ il->cfg->ops->lib->eeprom_ops.release_semaphore(il);
+
+err:
+ if (ret)
+ il_eeprom_free(il);
+ /* Reset chip to save power until we load uCode during "up". */
+ il_apm_stop(il);
+alloc_err:
+ return ret;
+}
+EXPORT_SYMBOL(il_eeprom_init);
+
+void
+il_eeprom_free(struct il_priv *il)
+{
+ kfree(il->eeprom);
+ il->eeprom = NULL;
+}
+EXPORT_SYMBOL(il_eeprom_free);
+
+static void
+il_init_band_reference(const struct il_priv *il, int eep_band,
+ int *eeprom_ch_count,
+ const struct il_eeprom_channel **eeprom_ch_info,
+ const u8 **eeprom_ch_idx)
+{
+ u32 offset =
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[eep_band - 1];
+ switch (eep_band) {
+ case 1: /* 2.4GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_1);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_1;
+ break;
+ case 2: /* 4.9GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_2);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_2;
+ break;
+ case 3: /* 5.2GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_3);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_3;
+ break;
+ case 4: /* 5.5GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_4);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_4;
+ break;
+ case 5: /* 5.7GHz band */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_5);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_5;
+ break;
+ case 6: /* 2.4GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_6);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_6;
+ break;
+ case 7: /* 5 GHz ht40 channels */
+ *eeprom_ch_count = ARRAY_SIZE(il_eeprom_band_7);
+ *eeprom_ch_info =
+ (struct il_eeprom_channel *)il_eeprom_query_addr(il,
+ offset);
+ *eeprom_ch_idx = il_eeprom_band_7;
+ break;
+ default:
+ BUG();
+ }
+}
+
+#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+/**
+ * il_mod_ht40_chan_info - Copy ht40 channel info into driver's il.
+ *
+ * Does not set up a command, or touch hardware.
+ */
+static int
+il_mod_ht40_chan_info(struct il_priv *il, enum ieee80211_band band, u16 channel,
+ const struct il_eeprom_channel *eeprom_ch,
+ u8 clear_ht40_extension_channel)
+{
+ struct il_channel_info *ch_info;
+
+ ch_info =
+ (struct il_channel_info *)il_get_channel_info(il, band, channel);
+
+ if (!il_is_channel_valid(ch_info))
+ return -1;
+
+ D_EEPROM("HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT(IBSS), CHECK_AND_PRINT(ACTIVE),
+ CHECK_AND_PRINT(RADAR), CHECK_AND_PRINT(WIDE),
+ CHECK_AND_PRINT(DFS), eeprom_ch->flags,
+ eeprom_ch->max_power_avg,
+ ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ? "" : "not ");
+
+ ch_info->ht40_eeprom = *eeprom_ch;
+ ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
+ ch_info->ht40_flags = eeprom_ch->flags;
+ if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
+ ch_info->ht40_extension_channel &=
+ ~clear_ht40_extension_channel;
+
+ return 0;
+}
+
+#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
+ ? # x " " : "")
+
+/**
+ * il_init_channel_map - Set up driver's info for all possible channels
+ */
+int
+il_init_channel_map(struct il_priv *il)
+{
+ int eeprom_ch_count = 0;
+ const u8 *eeprom_ch_idx = NULL;
+ const struct il_eeprom_channel *eeprom_ch_info = NULL;
+ int band, ch;
+ struct il_channel_info *ch_info;
+
+ if (il->channel_count) {
+ D_EEPROM("Channel map already initialized.\n");
+ return 0;
+ }
+
+ D_EEPROM("Initializing regulatory info from EEPROM\n");
+
+ il->channel_count =
+ ARRAY_SIZE(il_eeprom_band_1) + ARRAY_SIZE(il_eeprom_band_2) +
+ ARRAY_SIZE(il_eeprom_band_3) + ARRAY_SIZE(il_eeprom_band_4) +
+ ARRAY_SIZE(il_eeprom_band_5);
+
+ D_EEPROM("Parsing data for %d channels.\n", il->channel_count);
+
+ il->channel_info =
+ kzalloc(sizeof(struct il_channel_info) * il->channel_count,
+ GFP_KERNEL);
+ if (!il->channel_info) {
+ IL_ERR("Could not allocate channel_info\n");
+ il->channel_count = 0;
+ return -ENOMEM;
+ }
+
+ ch_info = il->channel_info;
+
+ /* Loop through the 5 EEPROM bands adding them in order to the
+ * channel map we maintain (that contains additional information than
+ * what just in the EEPROM) */
+ for (band = 1; band <= 5; band++) {
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ ch_info->channel = eeprom_ch_idx[ch];
+ ch_info->band =
+ (band ==
+ 1) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* permanently store EEPROM's channel regulatory flags
+ * and max power in channel info database. */
+ ch_info->eeprom = eeprom_ch_info[ch];
+
+ /* Copy the run-time flags so they are there even on
+ * invalid channels */
+ ch_info->flags = eeprom_ch_info[ch].flags;
+ /* First write that ht40 is not enabled, and then enable
+ * one by one */
+ ch_info->ht40_extension_channel =
+ IEEE80211_CHAN_NO_HT40;
+
+ if (!(il_is_channel_valid(ch_info))) {
+ D_EEPROM("Ch. %d Flags %x [%sGHz] - "
+ "No traffic\n", ch_info->channel,
+ ch_info->flags,
+ il_is_channel_a_band(ch_info) ? "5.2" :
+ "2.4");
+ ch_info++;
+ continue;
+ }
+
+ /* Initialize regulatory-based run-time data */
+ ch_info->max_power_avg = ch_info->curr_txpow =
+ eeprom_ch_info[ch].max_power_avg;
+ ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
+ ch_info->min_power = 0;
+
+ D_EEPROM("Ch. %d [%sGHz] " "%s%s%s%s%s%s(0x%02x %ddBm):"
+ " Ad-Hoc %ssupported\n", ch_info->channel,
+ il_is_channel_a_band(ch_info) ? "5.2" : "2.4",
+ CHECK_AND_PRINT_I(VALID),
+ CHECK_AND_PRINT_I(IBSS),
+ CHECK_AND_PRINT_I(ACTIVE),
+ CHECK_AND_PRINT_I(RADAR),
+ CHECK_AND_PRINT_I(WIDE),
+ CHECK_AND_PRINT_I(DFS),
+ eeprom_ch_info[ch].flags,
+ eeprom_ch_info[ch].max_power_avg,
+ ((eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_IBSS) &&
+ !(eeprom_ch_info[ch].
+ flags & EEPROM_CHANNEL_RADAR)) ? "" :
+ "not ");
+
+ ch_info++;
+ }
+ }
+
+ /* Check if we do have HT40 channels */
+ if (il->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
+ EEPROM_REGULATORY_BAND_NO_HT40 &&
+ il->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
+ EEPROM_REGULATORY_BAND_NO_HT40)
+ return 0;
+
+ /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
+ for (band = 6; band <= 7; band++) {
+ enum ieee80211_band ieeeband;
+
+ il_init_band_reference(il, band, &eeprom_ch_count,
+ &eeprom_ch_info, &eeprom_ch_idx);
+
+ /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
+ ieeeband =
+ (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
+
+ /* Loop through each band adding each of the channels */
+ for (ch = 0; ch < eeprom_ch_count; ch++) {
+ /* Set up driver's info for lower half */
+ il_mod_ht40_chan_info(il, ieeeband, eeprom_ch_idx[ch],
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40PLUS);
+
+ /* Set up driver's info for upper half */
+ il_mod_ht40_chan_info(il, ieeeband,
+ eeprom_ch_idx[ch] + 4,
+ &eeprom_ch_info[ch],
+ IEEE80211_CHAN_NO_HT40MINUS);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_channel_map);
+
+/*
+ * il_free_channel_map - undo allocations in il_init_channel_map
+ */
+void
+il_free_channel_map(struct il_priv *il)
+{
+ kfree(il->channel_info);
+ il->channel_count = 0;
+}
+EXPORT_SYMBOL(il_free_channel_map);
+
+/**
+ * il_get_channel_info - Find driver's ilate channel info
+ *
+ * Based on band and channel number.
+ */
+const struct il_channel_info *
+il_get_channel_info(const struct il_priv *il, enum ieee80211_band band,
+ u16 channel)
+{
+ int i;
+
+ switch (band) {
+ case IEEE80211_BAND_5GHZ:
+ for (i = 14; i < il->channel_count; i++) {
+ if (il->channel_info[i].channel == channel)
+ return &il->channel_info[i];
+ }
+ break;
+ case IEEE80211_BAND_2GHZ:
+ if (channel >= 1 && channel <= 14)
+ return &il->channel_info[channel - 1];
+ break;
+ default:
+ BUG();
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL(il_get_channel_info);
+
+/*
+ * Setting power level allows the card to go to sleep when not busy.
+ *
+ * We calculate a sleep command based on the required latency, which
+ * we get from mac80211. In order to handle thermal throttling, we can
+ * also use pre-defined power levels.
+ */
+
+/*
+ * This defines the old power levels. They are still used by default
+ * (level 1) and for thermal throttle (levels 3 through 5)
+ */
+
+struct il_power_vec_entry {
+ struct il_powertable_cmd cmd;
+ u8 no_dtim; /* number of skip dtim */
+};
+
+static void
+il_power_sleep_cam_cmd(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ memset(cmd, 0, sizeof(*cmd));
+
+ if (il->power_data.pci_pm)
+ cmd->flags |= IL_POWER_PCI_PM_MSK;
+
+ D_POWER("Sleep command for CAM\n");
+}
+
+static int
+il_set_power(struct il_priv *il, struct il_powertable_cmd *cmd)
+{
+ D_POWER("Sending power/sleep command\n");
+ D_POWER("Flags value = 0x%08X\n", cmd->flags);
+ D_POWER("Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout));
+ D_POWER("Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout));
+ D_POWER("Sleep interval vector = { %d , %d , %d , %d , %d }\n",
+ le32_to_cpu(cmd->sleep_interval[0]),
+ le32_to_cpu(cmd->sleep_interval[1]),
+ le32_to_cpu(cmd->sleep_interval[2]),
+ le32_to_cpu(cmd->sleep_interval[3]),
+ le32_to_cpu(cmd->sleep_interval[4]));
+
+ return il_send_cmd_pdu(il, C_POWER_TBL,
+ sizeof(struct il_powertable_cmd), cmd);
+}
+
+int
+il_power_set_mode(struct il_priv *il, struct il_powertable_cmd *cmd, bool force)
+{
+ int ret;
+ bool update_chains;
+
+ lockdep_assert_held(&il->mutex);
+
+ /* Don't update the RX chain when chain noise calibration is running */
+ update_chains = il->chain_noise_data.state == IL_CHAIN_NOISE_DONE ||
+ il->chain_noise_data.state == IL_CHAIN_NOISE_ALIVE;
+
+ if (!memcmp(&il->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
+ return 0;
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete use sleep_power_next, need to be updated */
+ memcpy(&il->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
+ if (test_bit(S_SCANNING, &il->status) && !force) {
+ D_INFO("Defer power set mode while scanning\n");
+ return 0;
+ }
+
+ if (cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK)
+ set_bit(S_POWER_PMI, &il->status);
+
+ ret = il_set_power(il, cmd);
+ if (!ret) {
+ if (!(cmd->flags & IL_POWER_DRIVER_ALLOW_SLEEP_MSK))
+ clear_bit(S_POWER_PMI, &il->status);
+
+ if (il->cfg->ops->lib->update_chain_flags && update_chains)
+ il->cfg->ops->lib->update_chain_flags(il);
+ else if (il->cfg->ops->lib->update_chain_flags)
+ D_POWER("Cannot update the power, chain noise "
+ "calibration running: %d\n",
+ il->chain_noise_data.state);
+
+ memcpy(&il->power_data.sleep_cmd, cmd, sizeof(*cmd));
+ } else
+ IL_ERR("set power fail, ret = %d", ret);
+
+ return ret;
+}
+
+int
+il_power_update_mode(struct il_priv *il, bool force)
+{
+ struct il_powertable_cmd cmd;
+
+ il_power_sleep_cam_cmd(il, &cmd);
+ return il_power_set_mode(il, &cmd, force);
+}
+EXPORT_SYMBOL(il_power_update_mode);
+
+/* initialize to default */
+void
+il_power_initialize(struct il_priv *il)
+{
+ u16 lctl = il_pcie_link_ctl(il);
+
+ il->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
+
+ il->power_data.debug_sleep_level_override = -1;
+
+ memset(&il->power_data.sleep_cmd, 0, sizeof(il->power_data.sleep_cmd));
+}
+EXPORT_SYMBOL(il_power_initialize);
+
+/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
+ * sending probe req. This should be set long enough to hear probe responses
+ * from more than one AP. */
+#define IL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
+#define IL_ACTIVE_DWELL_TIME_52 (20)
+
+#define IL_ACTIVE_DWELL_FACTOR_24GHZ (3)
+#define IL_ACTIVE_DWELL_FACTOR_52GHZ (2)
+
+/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
+ * Must be set longer than active dwell time.
+ * For the most reliable scan, set > AP beacon interval (typically 100msec). */
+#define IL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
+#define IL_PASSIVE_DWELL_TIME_52 (10)
+#define IL_PASSIVE_DWELL_BASE (100)
+#define IL_CHANNEL_TUNE_TIME 5
+
+static int
+il_send_scan_abort(struct il_priv *il)
+{
+ int ret;
+ struct il_rx_pkt *pkt;
+ struct il_host_cmd cmd = {
+ .id = C_SCAN_ABORT,
+ .flags = CMD_WANT_SKB,
+ };
+
+ /* Exit instantly with error when device is not ready
+ * to receive scan abort command or it does not perform
+ * hardware scan currently */
+ if (!test_bit(S_READY, &il->status) ||
+ !test_bit(S_GEO_CONFIGURED, &il->status) ||
+ !test_bit(S_SCAN_HW, &il->status) ||
+ test_bit(S_FW_ERROR, &il->status) ||
+ test_bit(S_EXIT_PENDING, &il->status))
+ return -EIO;
+
+ ret = il_send_cmd_sync(il, &cmd);
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->u.status != CAN_ABORT_STATUS) {
+ /* The scan abort will return 1 for success or
+ * 2 for "failure". A failure condition can be
+ * due to simply not being in an active scan which
+ * can occur if we send the scan abort before we
+ * the microcode has notified us that a scan is
+ * completed. */
+ D_SCAN("SCAN_ABORT ret %d.\n", pkt->u.status);
+ ret = -EIO;
+ }
+
+ il_free_pages(il, cmd.reply_page);
+ return ret;
+}
+
+static void
+il_complete_scan(struct il_priv *il, bool aborted)
+{
+ /* check if scan was requested from mac80211 */
+ if (il->scan_request) {
+ D_SCAN("Complete scan in mac80211\n");
+ ieee80211_scan_completed(il->hw, aborted);
+ }
+
+ il->scan_vif = NULL;
+ il->scan_request = NULL;
+}
+
+void
+il_force_scan_end(struct il_priv *il)
+{
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Forcing scan end while not scanning\n");
+ return;
+ }
+
+ D_SCAN("Forcing scan end\n");
+ clear_bit(S_SCANNING, &il->status);
+ clear_bit(S_SCAN_HW, &il->status);
+ clear_bit(S_SCAN_ABORTING, &il->status);
+ il_complete_scan(il, true);
+}
+
+static void
+il_do_scan_abort(struct il_priv *il)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Not performing scan to abort\n");
+ return;
+ }
+
+ if (test_and_set_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan abort in progress\n");
+ return;
+ }
+
+ ret = il_send_scan_abort(il);
+ if (ret) {
+ D_SCAN("Send scan abort failed %d\n", ret);
+ il_force_scan_end(il);
+ } else
+ D_SCAN("Successfully send scan abort\n");
+}
+
+/**
+ * il_scan_cancel - Cancel any currently executing HW scan
+ */
+int
+il_scan_cancel(struct il_priv *il)
+{
+ D_SCAN("Queuing abort scan\n");
+ queue_work(il->workqueue, &il->abort_scan);
+ return 0;
+}
+EXPORT_SYMBOL(il_scan_cancel);
+
+/**
+ * il_scan_cancel_timeout - Cancel any currently executing HW scan
+ * @ms: amount of time to wait (in milliseconds) for scan to abort
+ *
+ */
+int
+il_scan_cancel_timeout(struct il_priv *il, unsigned long ms)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(ms);
+
+ lockdep_assert_held(&il->mutex);
+
+ D_SCAN("Scan cancel timeout\n");
+
+ il_do_scan_abort(il);
+
+ while (time_before_eq(jiffies, timeout)) {
+ if (!test_bit(S_SCAN_HW, &il->status))
+ break;
+ msleep(20);
+ }
+
+ return test_bit(S_SCAN_HW, &il->status);
+}
+EXPORT_SYMBOL(il_scan_cancel_timeout);
+
+/* Service response to C_SCAN (0x80) */
+static void
+il_hdl_scan(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanreq_notification *notif =
+ (struct il_scanreq_notification *)pkt->u.raw;
+
+ D_SCAN("Scan request status = 0x%x\n", notif->status);
+#endif
+}
+
+/* Service N_SCAN_START (0x82) */
+static void
+il_hdl_scan_start(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanstart_notification *notif =
+ (struct il_scanstart_notification *)pkt->u.raw;
+ il->scan_start_tsf = le32_to_cpu(notif->tsf_low);
+ D_SCAN("Scan start: " "%d [802.11%s] "
+ "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n", notif->channel,
+ notif->band ? "bg" : "a", le32_to_cpu(notif->tsf_high),
+ le32_to_cpu(notif->tsf_low), notif->status, notif->beacon_timer);
+}
+
+/* Service N_SCAN_RESULTS (0x83) */
+static void
+il_hdl_scan_results(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scanresults_notification *notif =
+ (struct il_scanresults_notification *)pkt->u.raw;
+
+ D_SCAN("Scan ch.res: " "%d [802.11%s] " "(TSF: 0x%08X:%08X) - %d "
+ "elapsed=%lu usec\n", notif->channel, notif->band ? "bg" : "a",
+ le32_to_cpu(notif->tsf_high), le32_to_cpu(notif->tsf_low),
+ le32_to_cpu(notif->stats[0]),
+ le32_to_cpu(notif->tsf_low) - il->scan_start_tsf);
+#endif
+}
+
+/* Service N_SCAN_COMPLETE (0x84) */
+static void
+il_hdl_scan_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
+#endif
+
+ D_SCAN("Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
+ scan_notif->scanned_channels, scan_notif->tsf_low,
+ scan_notif->tsf_high, scan_notif->status);
+
+ /* The HW is no longer scanning */
+ clear_bit(S_SCAN_HW, &il->status);
+
+ D_SCAN("Scan on %sGHz took %dms\n",
+ (il->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
+ jiffies_to_msecs(jiffies - il->scan_start));
+
+ queue_work(il->workqueue, &il->scan_completed);
+}
+
+void
+il_setup_rx_scan_handlers(struct il_priv *il)
+{
+ /* scan handlers */
+ il->handlers[C_SCAN] = il_hdl_scan;
+ il->handlers[N_SCAN_START] = il_hdl_scan_start;
+ il->handlers[N_SCAN_RESULTS] = il_hdl_scan_results;
+ il->handlers[N_SCAN_COMPLETE] = il_hdl_scan_complete;
+}
+EXPORT_SYMBOL(il_setup_rx_scan_handlers);
+
+inline u16
+il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes)
+{
+ if (band == IEEE80211_BAND_5GHZ)
+ return IL_ACTIVE_DWELL_TIME_52 +
+ IL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
+ else
+ return IL_ACTIVE_DWELL_TIME_24 +
+ IL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
+}
+EXPORT_SYMBOL(il_get_active_dwell_time);
+
+u16
+il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+ u16 value;
+
+ u16 passive =
+ (band ==
+ IEEE80211_BAND_2GHZ) ? IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_24 : IL_PASSIVE_DWELL_BASE +
+ IL_PASSIVE_DWELL_TIME_52;
+
+ if (il_is_any_associated(il)) {
+ /*
+ * If we're associated, we clamp the maximum passive
+ * dwell time to be 98% of the smallest beacon interval
+ * (minus 2 * channel tune time)
+ */
+ value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
+ if (value > IL_PASSIVE_DWELL_BASE || !value)
+ value = IL_PASSIVE_DWELL_BASE;
+ value = (value * 98) / 100 - IL_CHANNEL_TUNE_TIME * 2;
+ passive = min(value, passive);
+ }
+
+ return passive;
+}
+EXPORT_SYMBOL(il_get_passive_dwell_time);
+
+void
+il_init_scan_params(struct il_priv *il)
+{
+ u8 ant_idx = fls(il->hw_params.valid_tx_ant) - 1;
+ if (!il->scan_tx_ant[IEEE80211_BAND_5GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
+ if (!il->scan_tx_ant[IEEE80211_BAND_2GHZ])
+ il->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
+}
+EXPORT_SYMBOL(il_init_scan_params);
+
+static int
+il_scan_initiate(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ int ret;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (WARN_ON(!il->cfg->ops->utils->request_scan))
+ return -EOPNOTSUPP;
+
+ cancel_delayed_work(&il->scan_check);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Request scan called when driver not ready.\n");
+ return -EIO;
+ }
+
+ if (test_bit(S_SCAN_HW, &il->status)) {
+ D_SCAN("Multiple concurrent scan requests in parallel.\n");
+ return -EBUSY;
+ }
+
+ if (test_bit(S_SCAN_ABORTING, &il->status)) {
+ D_SCAN("Scan request while abort pending.\n");
+ return -EBUSY;
+ }
+
+ D_SCAN("Starting scan...\n");
+
+ set_bit(S_SCANNING, &il->status);
+ il->scan_start = jiffies;
+
+ ret = il->cfg->ops->utils->request_scan(il, vif);
+ if (ret) {
+ clear_bit(S_SCANNING, &il->status);
+ return ret;
+ }
+
+ queue_delayed_work(il->workqueue, &il->scan_check,
+ IL_SCAN_CHECK_WATCHDOG);
+
+ return 0;
+}
+
+int
+il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct il_priv *il = hw->priv;
+ int ret;
+
+ D_MAC80211("enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&il->mutex);
+
+ if (test_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ /* mac80211 will only ask for one band at a time */
+ il->scan_request = req;
+ il->scan_vif = vif;
+ il->scan_band = req->channels[0]->band;
+
+ ret = il_scan_initiate(il, vif);
+
+ D_MAC80211("leave\n");
+
+out_unlock:
+ mutex_unlock(&il->mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_hw_scan);
+
+static void
+il_bg_scan_check(struct work_struct *data)
+{
+ struct il_priv *il =
+ container_of(data, struct il_priv, scan_check.work);
+
+ D_SCAN("Scan check work\n");
+
+ /* Since we are here firmware does not finish scan and
+ * most likely is in bad shape, so we don't bother to
+ * send abort command, just force scan complete to mac80211 */
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+}
+
+/**
+ * il_fill_probe_req - fill in all required fields and IE for probe request
+ */
+
+u16
+il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ies, int ie_len, int left)
+{
+ int len = 0;
+ u8 *pos = NULL;
+
+ /* Make sure there is enough space for the probe request,
+ * two mandatory IEs and the data */
+ left -= 24;
+ if (left < 0)
+ return 0;
+
+ frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ memcpy(frame->da, il_bcast_addr, ETH_ALEN);
+ memcpy(frame->sa, ta, ETH_ALEN);
+ memcpy(frame->bssid, il_bcast_addr, ETH_ALEN);
+ frame->seq_ctrl = 0;
+
+ len += 24;
+
+ /* ...next IE... */
+ pos = &frame->u.probe_req.variable[0];
+
+ /* fill in our indirect SSID IE */
+ left -= 2;
+ if (left < 0)
+ return 0;
+ *pos++ = WLAN_EID_SSID;
+ *pos++ = 0;
+
+ len += 2;
+
+ if (WARN_ON(left < ie_len))
+ return len;
+
+ if (ies && ie_len) {
+ memcpy(pos, ies, ie_len);
+ len += ie_len;
+ }
+
+ return (u16) len;
+}
+EXPORT_SYMBOL(il_fill_probe_req);
+
+static void
+il_bg_abort_scan(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, abort_scan);
+
+ D_SCAN("Abort scan work\n");
+
+ /* We keep scan_check work queued in case when firmware will not
+ * report back scan completed notification */
+ mutex_lock(&il->mutex);
+ il_scan_cancel_timeout(il, 200);
+ mutex_unlock(&il->mutex);
+}
+
+static void
+il_bg_scan_completed(struct work_struct *work)
+{
+ struct il_priv *il = container_of(work, struct il_priv, scan_completed);
+ bool aborted;
+
+ D_SCAN("Completed scan.\n");
+
+ cancel_delayed_work(&il->scan_check);
+
+ mutex_lock(&il->mutex);
+
+ aborted = test_and_clear_bit(S_SCAN_ABORTING, &il->status);
+ if (aborted)
+ D_SCAN("Aborted scan completed.\n");
+
+ if (!test_and_clear_bit(S_SCANNING, &il->status)) {
+ D_SCAN("Scan already completed.\n");
+ goto out_settings;
+ }
+
+ il_complete_scan(il, aborted);
+
+out_settings:
+ /* Can we still talk to firmware ? */
+ if (!il_is_ready_rf(il))
+ goto out;
+
+ /*
+ * We do not commit power settings while scan is pending,
+ * do it now if the settings changed.
+ */
+ il_power_set_mode(il, &il->power_data.sleep_cmd_next, false);
+ il_set_tx_power(il, il->tx_power_next, false);
+
+ il->cfg->ops->utils->post_scan(il);
+
+out:
+ mutex_unlock(&il->mutex);
+}
+
+void
+il_setup_scan_deferred_work(struct il_priv *il)
+{
+ INIT_WORK(&il->scan_completed, il_bg_scan_completed);
+ INIT_WORK(&il->abort_scan, il_bg_abort_scan);
+ INIT_DELAYED_WORK(&il->scan_check, il_bg_scan_check);
+}
+EXPORT_SYMBOL(il_setup_scan_deferred_work);
+
+void
+il_cancel_scan_deferred_work(struct il_priv *il)
+{
+ cancel_work_sync(&il->abort_scan);
+ cancel_work_sync(&il->scan_completed);
+
+ if (cancel_delayed_work_sync(&il->scan_check)) {
+ mutex_lock(&il->mutex);
+ il_force_scan_end(il);
+ mutex_unlock(&il->mutex);
+ }
+}
+EXPORT_SYMBOL(il_cancel_scan_deferred_work);
+
+/* il->sta_lock must be held */
+static void
+il_sta_ucode_activate(struct il_priv *il, u8 sta_id)
+{
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE))
+ IL_ERR("ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, il->stations[sta_id].sta.sta.addr);
+
+ if (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) {
+ D_ASSOC("STA id %u addr %pM already present"
+ " in uCode (according to driver)\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ } else {
+ il->stations[sta_id].used |= IL_STA_UCODE_ACTIVE;
+ D_ASSOC("Added STA id %u addr %pM to uCode\n", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+ }
+}
+
+static int
+il_process_add_sta_resp(struct il_priv *il, struct il_addsta_cmd *addsta,
+ struct il_rx_pkt *pkt, bool sync)
+{
+ u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
+ int ret = -EIO;
+
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_ADD_STA (0x%08X)\n", pkt->hdr.flags);
+ return ret;
+ }
+
+ D_INFO("Processing response for adding station %u\n", sta_id);
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ switch (pkt->u.add_sta.status) {
+ case ADD_STA_SUCCESS_MSK:
+ D_INFO("C_ADD_STA PASSED\n");
+ il_sta_ucode_activate(il, sta_id);
+ ret = 0;
+ break;
+ case ADD_STA_NO_ROOM_IN_TBL:
+ IL_ERR("Adding station %d failed, no room in table.\n", sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IL_ERR("Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IL_ERR("Attempting to modify non-existing station %d\n",
+ sta_id);
+ break;
+ default:
+ D_ASSOC("Received C_ADD_STA:(0x%08X)\n", pkt->u.add_sta.status);
+ break;
+ }
+
+ D_INFO("%s station id %u addr %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", sta_id,
+ il->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC address
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ D_INFO("%s station according to cmd buffer %pM\n",
+ il->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", addsta->sta.addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return ret;
+}
+
+static void
+il_add_sta_callback(struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt)
+{
+ struct il_addsta_cmd *addsta = (struct il_addsta_cmd *)cmd->cmd.payload;
+
+ il_process_add_sta_resp(il, addsta, pkt, false);
+
+}
+
+int
+il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags)
+{
+ struct il_rx_pkt *pkt = NULL;
+ int ret = 0;
+ u8 data[sizeof(*sta)];
+ struct il_host_cmd cmd = {
+ .id = C_ADD_STA,
+ .flags = flags,
+ .data = data,
+ };
+ u8 sta_id __maybe_unused = sta->sta.sta_id;
+
+ D_INFO("Adding sta %u (%pM) %ssynchronously\n", sta_id, sta->sta.addr,
+ flags & CMD_ASYNC ? "a" : "");
+
+ if (flags & CMD_ASYNC)
+ cmd.callback = il_add_sta_callback;
+ else {
+ cmd.flags |= CMD_WANT_SKB;
+ might_sleep();
+ }
+
+ cmd.len = il->cfg->ops->utils->build_addsta_hcmd(sta, data);
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret || (flags & CMD_ASYNC))
+ return ret;
+
+ if (ret == 0) {
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ ret = il_process_add_sta_resp(il, sta, pkt, true);
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+EXPORT_SYMBOL(il_send_add_sta);
+
+static void
+il_set_ht_add_station(struct il_priv *il, u8 idx, struct ieee80211_sta *sta,
+ struct il_rxon_context *ctx)
+{
+ struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
+ __le32 sta_flags;
+ u8 mimo_ps_mode;
+
+ if (!sta || !sta_ht_inf->ht_supported)
+ goto done;
+
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+ D_ASSOC("spatial multiplexing power save mode: %s\n",
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ? "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ? "dynamic" :
+ "disabled");
+
+ sta_flags = il->stations[idx].sta.station_flags;
+
+ sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
+
+ switch (mimo_ps_mode) {
+ case WLAN_HT_CAP_SM_PS_STATIC:
+ sta_flags |= STA_FLG_MIMO_DIS_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DYNAMIC:
+ sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
+ break;
+ case WLAN_HT_CAP_SM_PS_DISABLED:
+ break;
+ default:
+ IL_WARN("Invalid MIMO PS mode %d\n", mimo_ps_mode);
+ break;
+ }
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
+
+ sta_flags |=
+ cpu_to_le32((u32) sta_ht_inf->
+ ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
+
+ if (il_is_ht40_tx_allowed(il, ctx, &sta->ht_cap))
+ sta_flags |= STA_FLG_HT40_EN_MSK;
+ else
+ sta_flags &= ~STA_FLG_HT40_EN_MSK;
+
+ il->stations[idx].sta.station_flags = sta_flags;
+done:
+ return;
+}
+
+/**
+ * il_prep_station - Prepare station information for addition
+ *
+ * should be called with sta_lock held
+ */
+u8
+il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
+{
+ struct il_station_entry *station;
+ int i;
+ u8 sta_id = IL_INVALID_STATION;
+ u16 rate;
+
+ if (is_ap)
+ sta_id = ctx->ap_sta_id;
+ else if (is_broadcast_ether_addr(addr))
+ sta_id = ctx->bcast_sta_id;
+ else
+ for (i = IL_STA_ID; i < il->hw_params.max_stations; i++) {
+ if (!compare_ether_addr
+ (il->stations[i].sta.sta.addr, addr)) {
+ sta_id = i;
+ break;
+ }
+
+ if (!il->stations[i].used &&
+ sta_id == IL_INVALID_STATION)
+ sta_id = i;
+ }
+
+ /*
+ * These two conditions have the same outcome, but keep them
+ * separate
+ */
+ if (unlikely(sta_id == IL_INVALID_STATION))
+ return sta_id;
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ return sta_id;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE) &&
+ !compare_ether_addr(il->stations[sta_id].sta.sta.addr, addr)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ return sta_id;
+ }
+
+ station = &il->stations[sta_id];
+ station->used = IL_STA_DRIVER_ACTIVE;
+ D_ASSOC("Add STA to driver ID %d: %pM\n", sta_id, addr);
+ il->num_stations++;
+
+ /* Set up the C_ADD_STA command to send to device */
+ memset(&station->sta, 0, sizeof(struct il_addsta_cmd));
+ memcpy(station->sta.sta.addr, addr, ETH_ALEN);
+ station->sta.mode = 0;
+ station->sta.sta.sta_id = sta_id;
+ station->sta.station_flags = ctx->station_flags;
+ station->ctxid = ctx->ctxid;
+
+ if (sta) {
+ struct il_station_priv_common *sta_priv;
+
+ sta_priv = (void *)sta->drv_priv;
+ sta_priv->ctx = ctx;
+ }
+
+ /*
+ * OK to call unconditionally, since local stations (IBSS BSSID
+ * STA and broadcast STA) pass in a NULL sta, and mac80211
+ * doesn't allow HT IBSS.
+ */
+ il_set_ht_add_station(il, sta_id, sta, ctx);
+
+ /* 3945 only */
+ rate = (il->band == IEEE80211_BAND_5GHZ) ? RATE_6M_PLCP : RATE_1M_PLCP;
+ /* Turn on both antennas for the station... */
+ station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
+
+ return sta_id;
+
+}
+EXPORT_SYMBOL_GPL(il_prep_station);
+
+#define STA_WAIT_TIMEOUT (HZ/2)
+
+/**
+ * il_add_station_common -
+ */
+int
+il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta,
+ u8 *sta_id_r)
+{
+ unsigned long flags_spin;
+ int ret = 0;
+ u8 sta_id;
+ struct il_addsta_cmd sta_cmd;
+
+ *sta_id_r = 0;
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ sta_id = il_prep_station(il, ctx, addr, is_ap, sta);
+ if (sta_id == IL_INVALID_STATION) {
+ IL_ERR("Unable to prepare station %pM for addition\n", addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+
+ /*
+ * uCode is not able to deal with multiple requests to add a
+ * station. Keep track if one is in progress so that we do not send
+ * another.
+ */
+ if (il->stations[sta_id].used & IL_STA_UCODE_INPROGRESS) {
+ D_INFO("STA %d already in process of being added.\n", sta_id);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ if ((il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE) &&
+ (il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("STA %d (%pM) already added, not adding again.\n",
+ sta_id, addr);
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EEXIST;
+ }
+
+ il->stations[sta_id].used |= IL_STA_UCODE_INPROGRESS;
+ memcpy(&sta_cmd, &il->stations[sta_id].sta,
+ sizeof(struct il_addsta_cmd));
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ /* Add station to device's station table */
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[sta_id].sta.sta.addr);
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ *sta_id_r = sta_id;
+ return ret;
+}
+EXPORT_SYMBOL(il_add_station_common);
+
+/**
+ * il_sta_ucode_deactivate - deactivate ucode status for a station
+ *
+ * il->sta_lock must be held
+ */
+static void
+il_sta_ucode_deactivate(struct il_priv *il, u8 sta_id)
+{
+ /* Ucode must be active and driver must be non active */
+ if ((il->stations[sta_id].
+ used & (IL_STA_UCODE_ACTIVE | IL_STA_DRIVER_ACTIVE)) !=
+ IL_STA_UCODE_ACTIVE)
+ IL_ERR("removed non active STA %u\n", sta_id);
+
+ il->stations[sta_id].used &= ~IL_STA_UCODE_ACTIVE;
+
+ memset(&il->stations[sta_id], 0, sizeof(struct il_station_entry));
+ D_ASSOC("Removed STA %u\n", sta_id);
+}
+
+static int
+il_send_remove_station(struct il_priv *il, const u8 * addr, int sta_id,
+ bool temporary)
+{
+ struct il_rx_pkt *pkt;
+ int ret;
+
+ unsigned long flags_spin;
+ struct il_rem_sta_cmd rm_sta_cmd;
+
+ struct il_host_cmd cmd = {
+ .id = C_REM_STA,
+ .len = sizeof(struct il_rem_sta_cmd),
+ .flags = CMD_SYNC,
+ .data = &rm_sta_cmd,
+ };
+
+ memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
+ rm_sta_cmd.num_sta = 1;
+ memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
+
+ cmd.flags |= CMD_WANT_SKB;
+
+ ret = il_send_cmd(il, &cmd);
+
+ if (ret)
+ return ret;
+
+ pkt = (struct il_rx_pkt *)cmd.reply_page;
+ if (pkt->hdr.flags & IL_CMD_FAILED_MSK) {
+ IL_ERR("Bad return from C_REM_STA (0x%08X)\n", pkt->hdr.flags);
+ ret = -EIO;
+ }
+
+ if (!ret) {
+ switch (pkt->u.rem_sta.status) {
+ case REM_STA_SUCCESS_MSK:
+ if (!temporary) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il_sta_ucode_deactivate(il, sta_id);
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ D_ASSOC("C_REM_STA PASSED\n");
+ break;
+ default:
+ ret = -EIO;
+ IL_ERR("C_REM_STA failed\n");
+ break;
+ }
+ }
+ il_free_pages(il, cmd.reply_page);
+
+ return ret;
+}
+
+/**
+ * il_remove_station - Remove driver's knowledge of station.
+ */
+int
+il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr)
+{
+ unsigned long flags;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Unable to remove station %pM, device not ready.\n",
+ addr);
+ /*
+ * It is typical for stations to be removed when we are
+ * going down. Return success since device will be down
+ * soon anyway
+ */
+ return 0;
+ }
+
+ D_ASSOC("Removing STA from driver:%d %pM\n", sta_id, addr);
+
+ if (WARN_ON(sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+
+ if (!(il->stations[sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ D_INFO("Removing %pM but non DRIVER active\n", addr);
+ goto out_err;
+ }
+
+ if (!(il->stations[sta_id].used & IL_STA_UCODE_ACTIVE)) {
+ D_INFO("Removing %pM but non UCODE active\n", addr);
+ goto out_err;
+ }
+
+ if (il->stations[sta_id].used & IL_STA_LOCAL) {
+ kfree(il->stations[sta_id].lq);
+ il->stations[sta_id].lq = NULL;
+ }
+
+ il->stations[sta_id].used &= ~IL_STA_DRIVER_ACTIVE;
+
+ il->num_stations--;
+
+ BUG_ON(il->num_stations < 0);
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+
+ return il_send_remove_station(il, addr, sta_id, false);
+out_err:
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(il_remove_station);
+
+/**
+ * il_clear_ucode_stations - clear ucode station table bits
+ *
+ * This function clears all the bits in the driver indicating
+ * which stations are active in the ucode. Call when something
+ * other than explicit station management would cause this in
+ * the ucode, e.g. unassociated RXON.
+ */
+void
+il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ int i;
+ unsigned long flags_spin;
+ bool cleared = false;
+
+ D_INFO("Clearing ucode stations in driver\n");
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx && ctx->ctxid != il->stations[i].ctxid)
+ continue;
+
+ if (il->stations[i].used & IL_STA_UCODE_ACTIVE) {
+ D_INFO("Clearing ucode active for station %d\n", i);
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ cleared = true;
+ }
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ if (!cleared)
+ D_INFO("No active stations found to be cleared\n");
+}
+EXPORT_SYMBOL(il_clear_ucode_stations);
+
+/**
+ * il_restore_stations() - Restore driver known stations to device
+ *
+ * All stations considered active by driver, but not present in ucode, is
+ * restored.
+ *
+ * Function sleeps.
+ */
+void
+il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_addsta_cmd sta_cmd;
+ struct il_link_quality_cmd lq;
+ unsigned long flags_spin;
+ int i;
+ bool found = false;
+ int ret;
+ bool send_lq;
+
+ if (!il_is_ready(il)) {
+ D_INFO("Not ready yet, not restoring any stations.\n");
+ return;
+ }
+
+ D_ASSOC("Restoring all known stations ... start.\n");
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (ctx->ctxid != il->stations[i].ctxid)
+ continue;
+ if ((il->stations[i].used & IL_STA_DRIVER_ACTIVE) &&
+ !(il->stations[i].used & IL_STA_UCODE_ACTIVE)) {
+ D_ASSOC("Restoring sta %pM\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].sta.mode = 0;
+ il->stations[i].used |= IL_STA_UCODE_INPROGRESS;
+ found = true;
+ }
+ }
+
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if ((il->stations[i].used & IL_STA_UCODE_INPROGRESS)) {
+ memcpy(&sta_cmd, &il->stations[i].sta,
+ sizeof(struct il_addsta_cmd));
+ send_lq = false;
+ if (il->stations[i].lq) {
+ memcpy(&lq, il->stations[i].lq,
+ sizeof(struct il_link_quality_cmd));
+ send_lq = true;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ ret = il_send_add_sta(il, &sta_cmd, CMD_SYNC);
+ if (ret) {
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ IL_ERR("Adding station %pM failed.\n",
+ il->stations[i].sta.sta.addr);
+ il->stations[i].used &= ~IL_STA_DRIVER_ACTIVE;
+ il->stations[i].used &=
+ ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock,
+ flags_spin);
+ }
+ /*
+ * Rate scaling has already been initialized, send
+ * current LQ command
+ */
+ if (send_lq)
+ il_send_lq_cmd(il, ctx, &lq, CMD_SYNC, true);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[i].used &= ~IL_STA_UCODE_INPROGRESS;
+ }
+ }
+
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ if (!found)
+ D_INFO("Restoring all known stations"
+ " .... no stations to be restored.\n");
+ else
+ D_INFO("Restoring all known stations" " .... complete.\n");
+}
+EXPORT_SYMBOL(il_restore_stations);
+
+int
+il_get_free_ucode_key_idx(struct il_priv *il)
+{
+ int i;
+
+ for (i = 0; i < il->sta_key_max_num; i++)
+ if (!test_and_set_bit(i, &il->ucode_key_table))
+ return i;
+
+ return WEP_INVALID_OFFSET;
+}
+EXPORT_SYMBOL(il_get_free_ucode_key_idx);
+
+void
+il_dealloc_bcast_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ for (i = 0; i < il->hw_params.max_stations; i++) {
+ if (!(il->stations[i].used & IL_STA_BCAST))
+ continue;
+
+ il->stations[i].used &= ~IL_STA_UCODE_ACTIVE;
+ il->num_stations--;
+ BUG_ON(il->num_stations < 0);
+ kfree(il->stations[i].lq);
+ il->stations[i].lq = NULL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+EXPORT_SYMBOL_GPL(il_dealloc_bcast_stations);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+static void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+ int i;
+ D_RATE("lq station id 0x%x\n", lq->sta_id);
+ D_RATE("lq ant 0x%X 0x%X\n", lq->general_params.single_stream_ant_msk,
+ lq->general_params.dual_stream_ant_msk);
+
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
+ D_RATE("lq idx %d 0x%X\n", i, lq->rs_table[i].rate_n_flags);
+}
+#else
+static inline void
+il_dump_lq_cmd(struct il_priv *il, struct il_link_quality_cmd *lq)
+{
+}
+#endif
+
+/**
+ * il_is_lq_table_valid() - Test one aspect of LQ cmd for validity
+ *
+ * It sometimes happens when a HT rate has been in use and we
+ * loose connectivity with AP then mac80211 will first tell us that the
+ * current channel is not HT anymore before removing the station. In such a
+ * scenario the RXON flags will be updated to indicate we are not
+ * communicating HT anymore, but the LQ command may still contain HT rates.
+ * Test for this to prevent driver from sending LQ command between the time
+ * RXON flags are updated and when LQ command is updated.
+ */
+static bool
+il_is_lq_table_valid(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq)
+{
+ int i;
+
+ if (ctx->ht.enabled)
+ return true;
+
+ D_INFO("Channel %u is not an HT channel\n", ctx->active.channel);
+ for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
+ if (le32_to_cpu(lq->rs_table[i].rate_n_flags) & RATE_MCS_HT_MSK) {
+ D_INFO("idx %d of LQ expects HT channel\n", i);
+ return false;
+ }
+ }
+ return true;
+}
+
+/**
+ * il_send_lq_cmd() - Send link quality command
+ * @init: This command is sent as part of station initialization right
+ * after station has been added.
+ *
+ * The link quality command is sent as the last step of station creation.
+ * This is the special case in which init is set and we call a callback in
+ * this case to clear the state indicating that station creation is in
+ * progress.
+ */
+int
+il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init)
+{
+ int ret = 0;
+ unsigned long flags_spin;
+
+ struct il_host_cmd cmd = {
+ .id = C_TX_LINK_QUALITY_CMD,
+ .len = sizeof(struct il_link_quality_cmd),
+ .flags = flags,
+ .data = lq,
+ };
+
+ if (WARN_ON(lq->sta_id == IL_INVALID_STATION))
+ return -EINVAL;
+
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ if (!(il->stations[lq->sta_id].used & IL_STA_DRIVER_ACTIVE)) {
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+
+ il_dump_lq_cmd(il, lq);
+ BUG_ON(init && (cmd.flags & CMD_ASYNC));
+
+ if (il_is_lq_table_valid(il, ctx, lq))
+ ret = il_send_cmd(il, &cmd);
+ else
+ ret = -EINVAL;
+
+ if (cmd.flags & CMD_ASYNC)
+ return ret;
+
+ if (init) {
+ D_INFO("init LQ command complete,"
+ " clearing sta addition status for sta %d\n",
+ lq->sta_id);
+ spin_lock_irqsave(&il->sta_lock, flags_spin);
+ il->stations[lq->sta_id].used &= ~IL_STA_UCODE_INPROGRESS;
+ spin_unlock_irqrestore(&il->sta_lock, flags_spin);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_send_lq_cmd);
+
+int
+il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct il_priv *il = hw->priv;
+ struct il_station_priv_common *sta_common = (void *)sta->drv_priv;
+ int ret;
+
+ D_INFO("received request to remove station %pM\n", sta->addr);
+ mutex_lock(&il->mutex);
+ D_INFO("proceeding to remove station %pM\n", sta->addr);
+ ret = il_remove_station(il, sta_common->sta_id, sta->addr);
+ if (ret)
+ IL_ERR("Error removing station %pM\n", sta->addr);
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_sta_remove);
+
+/************************** RX-FUNCTIONS ****************************/
+/*
+ * Rx theory of operation
+ *
+ * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
+ * each of which point to Receive Buffers to be filled by the NIC. These get
+ * used not only for Rx frames, but for any command response or notification
+ * from the NIC. The driver and NIC manage the Rx buffers by means
+ * of idxes into the circular buffer.
+ *
+ * Rx Queue Indexes
+ * The host/firmware share two idx registers for managing the Rx buffers.
+ *
+ * The READ idx maps to the first position that the firmware may be writing
+ * to -- the driver can read up to (but not including) this position and get
+ * good data.
+ * The READ idx is managed by the firmware once the card is enabled.
+ *
+ * The WRITE idx maps to the last position the driver has read from -- the
+ * position preceding WRITE is the last slot the firmware can place a packet.
+ *
+ * The queue is empty (no good data) if WRITE = READ - 1, and is full if
+ * WRITE = READ.
+ *
+ * During initialization, the host sets up the READ queue position to the first
+ * IDX position, and WRITE to the last (READ - 1 wrapped)
+ *
+ * When the firmware places a packet in a buffer, it will advance the READ idx
+ * and fire the RX interrupt. The driver can then query the READ idx and
+ * process as many packets as possible, moving the WRITE idx forward as it
+ * resets the Rx queue buffers with new memory.
+ *
+ * The management in the driver is as follows:
+ * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
+ * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
+ * to replenish the iwl->rxq->rx_free.
+ * + In il_rx_replenish (scheduled) if 'processed' != 'read' then the
+ * iwl->rxq is replenished and the READ IDX is updated (updating the
+ * 'processed' and 'read' driver idxes as well)
+ * + A received packet is processed and handed to the kernel network stack,
+ * detached from the iwl->rxq. The driver 'processed' idx is updated.
+ * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
+ * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
+ * IDX is not incremented and iwl->status(RX_STALLED) is set. If there
+ * were enough free buffers and RX_STALLED is set it is cleared.
+ *
+ *
+ * Driver sequence:
+ *
+ * il_rx_queue_alloc() Allocates rx_free
+ * il_rx_replenish() Replenishes rx_free list from rx_used, and calls
+ * il_rx_queue_restock
+ * il_rx_queue_restock() Moves available buffers from rx_free into Rx
+ * queue, updates firmware pointers, and updates
+ * the WRITE idx. If insufficient rx_free buffers
+ * are available, schedules il_rx_replenish
+ *
+ * -- enable interrupts --
+ * ISR - il_rx() Detach il_rx_bufs from pool up to the
+ * READ IDX, detaching the SKB from the pool.
+ * Moves the packet buffer from queue to rx_used.
+ * Calls il_rx_queue_restock to refill any empty
+ * slots.
+ * ...
+ *
+ */
+
+/**
+ * il_rx_queue_space - Return number of free slots available in queue.
+ */
+int
+il_rx_queue_space(const struct il_rx_queue *q)
+{
+ int s = q->read - q->write;
+ if (s <= 0)
+ s += RX_QUEUE_SIZE;
+ /* keep some buffer to not confuse full and empty queue */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_rx_queue_space);
+
+/**
+ * il_rx_queue_update_write_ptr - Update the write pointer for the RX queue
+ */
+void
+il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q)
+{
+ unsigned long flags;
+ u32 rx_wrt_ptr_reg = il->hw_params.rx_wrt_ptr_reg;
+ u32 reg;
+
+ spin_lock_irqsave(&q->lock, flags);
+
+ if (q->need_update == 0)
+ goto exit_unlock;
+
+ /* If power-saving is in use, make sure device is awake */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Rx queue requesting wakeup," " GP1 = 0x%x\n",
+ reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ goto exit_unlock;
+ }
+
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+
+ /* Else device is assumed to be awake */
+ } else {
+ /* Device expects a multiple of 8 */
+ q->write_actual = (q->write & ~0x7);
+ il_wr(il, rx_wrt_ptr_reg, q->write_actual);
+ }
+
+ q->need_update = 0;
+
+exit_unlock:
+ spin_unlock_irqrestore(&q->lock, flags);
+}
+EXPORT_SYMBOL(il_rx_queue_update_write_ptr);
+
+int
+il_rx_queue_alloc(struct il_priv *il)
+{
+ struct il_rx_queue *rxq = &il->rxq;
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ spin_lock_init(&rxq->lock);
+ INIT_LIST_HEAD(&rxq->rx_free);
+ INIT_LIST_HEAD(&rxq->rx_used);
+
+ /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
+ rxq->bd =
+ dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
+ GFP_KERNEL);
+ if (!rxq->bd)
+ goto err_bd;
+
+ rxq->rb_stts =
+ dma_alloc_coherent(dev, sizeof(struct il_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
+ if (!rxq->rb_stts)
+ goto err_rb;
+
+ /* Fill the rx_used queue with _all_ of the Rx buffers */
+ for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
+ list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
+
+ /* Set us so that we have processed and used all buffers, but have
+ * not restocked the Rx queue with fresh buffers */
+ rxq->read = rxq->write = 0;
+ rxq->write_actual = 0;
+ rxq->free_count = 0;
+ rxq->need_update = 0;
+ return 0;
+
+err_rb:
+ dma_free_coherent(&il->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->bd_dma);
+err_bd:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_rx_queue_alloc);
+
+void
+il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ D_11H("Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&il->measure_report, report, sizeof(*report));
+ il->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(il_hdl_spectrum_measurement);
+
+/*
+ * returns non-zero if packet should be dropped
+ */
+int
+il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats)
+{
+ u16 fc = le16_to_cpu(hdr->frame_control);
+
+ /*
+ * All contexts have the same setting here due to it being
+ * a module parameter, so OK to check any context.
+ */
+ if (il->ctx.active.filter_flags & RXON_FILTER_DIS_DECRYPT_MSK)
+ return 0;
+
+ if (!(fc & IEEE80211_FCTL_PROTECTED))
+ return 0;
+
+ D_RX("decrypt_res:0x%x\n", decrypt_res);
+ switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
+ case RX_RES_STATUS_SEC_TYPE_TKIP:
+ /* The uCode has got a bad phase 1 Key, pushes the packet.
+ * Decryption will be done in SW. */
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_KEY_TTAK)
+ break;
+
+ case RX_RES_STATUS_SEC_TYPE_WEP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_BAD_ICV_MIC) {
+ /* bad ICV, the packet is destroyed since the
+ * decryption is inplace, drop it */
+ D_RX("Packet destroyed\n");
+ return -1;
+ }
+ case RX_RES_STATUS_SEC_TYPE_CCMP:
+ if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
+ RX_RES_STATUS_DECRYPT_OK) {
+ D_RX("hw decrypt successfully!!!\n");
+ stats->flag |= RX_FLAG_DECRYPTED;
+ }
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_set_decrypted_flag);
+
+/**
+ * il_txq_update_write_ptr - Send new write idx to hardware
+ */
+void
+il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u32 reg = 0;
+ int txq_id = txq->q.id;
+
+ if (txq->need_update == 0)
+ return;
+
+ /* if we're trying to save power */
+ if (test_bit(S_POWER_PMI, &il->status)) {
+ /* wake up nic if it's powered down ...
+ * uCode will wake up, and interrupt us again, so next
+ * time we'll skip this part. */
+ reg = _il_rd(il, CSR_UCODE_DRV_GP1);
+
+ if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
+ D_INFO("Tx queue %d requesting wakeup," " GP1 = 0x%x\n",
+ txq_id, reg);
+ il_set_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ return;
+ }
+
+ il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+
+ /*
+ * else not in power-save mode,
+ * uCode will never sleep when we're
+ * trying to tx (during RFKILL, we're not trying to tx).
+ */
+ } else
+ _il_wr(il, HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8));
+ txq->need_update = 0;
+}
+EXPORT_SYMBOL(il_txq_update_write_ptr);
+
+/**
+ * il_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
+ */
+void
+il_tx_queue_unmap(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->write_ptr != q->read_ptr) {
+ il->cfg->ops->lib->txq_free_tfd(il, txq);
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+}
+EXPORT_SYMBOL(il_tx_queue_unmap);
+
+/**
+ * il_tx_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_tx_queue_free(struct il_priv *il, int txq_id)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_tx_queue_unmap(il, txq_id);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* De-alloc array of per-TFD driver data */
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_tx_queue_free);
+
+/**
+ * il_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
+ */
+void
+il_cmd_queue_unmap(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ int i;
+
+ if (q->n_bd == 0)
+ return;
+
+ while (q->read_ptr != q->write_ptr) {
+ i = il_get_cmd_idx(q, q->read_ptr, 0);
+
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd);
+ }
+
+ i = q->n_win;
+ if (txq->meta[i].flags & CMD_MAPPED) {
+ pci_unmap_single(il->pci_dev,
+ dma_unmap_addr(&txq->meta[i], mapping),
+ dma_unmap_len(&txq->meta[i], len),
+ PCI_DMA_BIDIRECTIONAL);
+ txq->meta[i].flags = 0;
+ }
+}
+EXPORT_SYMBOL(il_cmd_queue_unmap);
+
+/**
+ * il_cmd_queue_free - Deallocate DMA queue.
+ * @txq: Transmit queue to deallocate.
+ *
+ * Empty queue by removing and destroying all BD's.
+ * Free all buffers.
+ * 0-fill, but do not free "txq" descriptor structure.
+ */
+void
+il_cmd_queue_free(struct il_priv *il)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct device *dev = &il->pci_dev->dev;
+ int i;
+
+ il_cmd_queue_unmap(il);
+
+ /* De-alloc array of command/tx buffers */
+ for (i = 0; i <= TFD_CMD_SLOTS; i++)
+ kfree(txq->cmd[i]);
+
+ /* De-alloc circular buffer of TFDs */
+ if (txq->q.n_bd)
+ dma_free_coherent(dev, il->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
+
+ /* deallocate arrays */
+ kfree(txq->cmd);
+ kfree(txq->meta);
+ txq->cmd = NULL;
+ txq->meta = NULL;
+
+ /* 0-fill queue descriptor structure */
+ memset(txq, 0, sizeof(*txq));
+}
+EXPORT_SYMBOL(il_cmd_queue_free);
+
+/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
+ * DMA services
+ *
+ * Theory of operation
+ *
+ * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
+ * of buffer descriptors, each of which points to one or more data buffers for
+ * the device to read from or fill. Driver and device exchange status of each
+ * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
+ * entries in each circular buffer, to protect against confusing empty and full
+ * queue states.
+ *
+ * The device reads or writes the data in the queues via the device's several
+ * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
+ *
+ * For Tx queue, there are low mark and high mark limits. If, after queuing
+ * the packet for Tx, free space become < low mark, Tx queue stopped. When
+ * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
+ * Tx queue resumed.
+ *
+ * See more detailed info in 4965.h.
+ ***************************************************/
+
+int
+il_queue_space(const struct il_queue *q)
+{
+ int s = q->read_ptr - q->write_ptr;
+
+ if (q->read_ptr > q->write_ptr)
+ s -= q->n_bd;
+
+ if (s <= 0)
+ s += q->n_win;
+ /* keep some reserve to not confuse empty and full situations */
+ s -= 2;
+ if (s < 0)
+ s = 0;
+ return s;
+}
+EXPORT_SYMBOL(il_queue_space);
+
+
+/**
+ * il_queue_init - Initialize queue's high/low-water and read/write idxes
+ */
+static int
+il_queue_init(struct il_priv *il, struct il_queue *q, int count, int slots_num,
+ u32 id)
+{
+ q->n_bd = count;
+ q->n_win = slots_num;
+ q->id = id;
+
+ /* count must be power-of-two size, otherwise il_queue_inc_wrap
+ * and il_queue_dec_wrap are broken. */
+ BUG_ON(!is_power_of_2(count));
+
+ /* slots_num must be power-of-two size, otherwise
+ * il_get_cmd_idx is broken. */
+ BUG_ON(!is_power_of_2(slots_num));
+
+ q->low_mark = q->n_win / 4;
+ if (q->low_mark < 4)
+ q->low_mark = 4;
+
+ q->high_mark = q->n_win / 8;
+ if (q->high_mark < 2)
+ q->high_mark = 2;
+
+ q->write_ptr = q->read_ptr = 0;
+
+ return 0;
+}
+
+/**
+ * il_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
+ */
+static int
+il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
+{
+ struct device *dev = &il->pci_dev->dev;
+ size_t tfd_sz = il->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
+
+ /* Driver ilate data, only for Tx (not command) queues,
+ * not shared with device. */
+ if (id != il->cmd_queue) {
+ txq->txb = kcalloc(TFD_QUEUE_SIZE_MAX, sizeof(txq->txb[0]),
+ GFP_KERNEL);
+ if (!txq->txb) {
+ IL_ERR("kmalloc for auxiliary BD "
+ "structures failed\n");
+ goto error;
+ }
+ } else {
+ txq->txb = NULL;
+ }
+
+ /* Circular buffer of transmit frame descriptors (TFDs),
+ * shared with device */
+ txq->tfds =
+ dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
+ if (!txq->tfds) {
+ IL_ERR("pci_alloc_consistent(%zd) failed\n", tfd_sz);
+ goto error;
+ }
+ txq->q.id = id;
+
+ return 0;
+
+error:
+ kfree(txq->txb);
+ txq->txb = NULL;
+
+ return -ENOMEM;
+}
+
+/**
+ * il_tx_queue_init - Allocate and initialize one tx/cmd queue
+ */
+int
+il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int i, len;
+ int ret;
+ int actual_slots = slots_num;
+
+ /*
+ * Alloc buffer array for commands (Tx or other types of commands).
+ * For the command queue (#4/#9), allocate command space + one big
+ * command for scan, since scan command is very huge; the system will
+ * not have two scans at the same time, so only one is needed.
+ * For normal Tx queues (all other queues), no super-size command
+ * space is needed.
+ */
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ txq->meta =
+ kzalloc(sizeof(struct il_cmd_meta) * actual_slots, GFP_KERNEL);
+ txq->cmd =
+ kzalloc(sizeof(struct il_device_cmd *) * actual_slots, GFP_KERNEL);
+
+ if (!txq->meta || !txq->cmd)
+ goto out_free_arrays;
+
+ len = sizeof(struct il_device_cmd);
+ for (i = 0; i < actual_slots; i++) {
+ /* only happens for cmd queue */
+ if (i == slots_num)
+ len = IL_MAX_CMD_SIZE;
+
+ txq->cmd[i] = kmalloc(len, GFP_KERNEL);
+ if (!txq->cmd[i])
+ goto err;
+ }
+
+ /* Alloc driver data array and TFD circular buffer */
+ ret = il_tx_queue_alloc(il, txq, txq_id);
+ if (ret)
+ goto err;
+
+ txq->need_update = 0;
+
+ /*
+ * For the default queues 0-3, set up the swq_id
+ * already -- all others need to get one later
+ * (if they need one at all).
+ */
+ if (txq_id < 4)
+ il_set_swq_id(txq, txq_id, txq_id);
+
+ /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
+ * il_queue_inc_wrap and il_queue_dec_wrap are broken. */
+ BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+
+ return 0;
+err:
+ for (i = 0; i < actual_slots; i++)
+ kfree(txq->cmd[i]);
+out_free_arrays:
+ kfree(txq->meta);
+ kfree(txq->cmd);
+
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_tx_queue_init);
+
+void
+il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id)
+{
+ int actual_slots = slots_num;
+
+ if (txq_id == il->cmd_queue)
+ actual_slots++;
+
+ memset(txq->meta, 0, sizeof(struct il_cmd_meta) * actual_slots);
+
+ txq->need_update = 0;
+
+ /* Initialize queue's high/low-water marks, and head/tail idxes */
+ il_queue_init(il, &txq->q, TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
+
+ /* Tell device where to find queue */
+ il->cfg->ops->lib->txq_init(il, txq);
+}
+EXPORT_SYMBOL(il_tx_queue_reset);
+
+/*************** HOST COMMAND QUEUE FUNCTIONS *****/
+
+/**
+ * il_enqueue_hcmd - enqueue a uCode command
+ * @il: device ilate data point
+ * @cmd: a point to the ucode command structure
+ *
+ * The function returns < 0 values to indicate the operation is
+ * failed. On success, it turns the idx (> 0) of command in the
+ * command queue.
+ */
+int
+il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd)
+{
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ struct il_queue *q = &txq->q;
+ struct il_device_cmd *out_cmd;
+ struct il_cmd_meta *out_meta;
+ dma_addr_t phys_addr;
+ unsigned long flags;
+ int len;
+ u32 idx;
+ u16 fix_size;
+
+ cmd->len = il->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
+ fix_size = (u16) (cmd->len + sizeof(out_cmd->hdr));
+
+ /* If any of the command structures end up being larger than
+ * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
+ BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
+ !(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IL_MAX_CMD_SIZE);
+
+ if (il_is_rfkill(il) || il_is_ctkill(il)) {
+ IL_WARN("Not sending command - %s KILL\n",
+ il_is_rfkill(il) ? "RF" : "CT");
+ return -EIO;
+ }
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ if (il_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+
+ IL_ERR("Restarting adapter due to command queue full\n");
+ queue_work(il->workqueue, &il->restart);
+ return -ENOSPC;
+ }
+
+ idx = il_get_cmd_idx(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
+ out_cmd = txq->cmd[idx];
+ out_meta = &txq->meta[idx];
+
+ if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return -ENOSPC;
+ }
+
+ memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
+ out_meta->flags = cmd->flags | CMD_MAPPED;
+ if (cmd->flags & CMD_WANT_SKB)
+ out_meta->source = cmd;
+ if (cmd->flags & CMD_ASYNC)
+ out_meta->callback = cmd->callback;
+
+ out_cmd->hdr.cmd = cmd->id;
+ memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
+
+ /* At this point, the out_cmd now has all of the incoming cmd
+ * information */
+
+ out_cmd->hdr.flags = 0;
+ out_cmd->hdr.sequence =
+ cpu_to_le16(QUEUE_TO_SEQ(il->cmd_queue) | IDX_TO_SEQ(q->write_ptr));
+ if (cmd->flags & CMD_SIZE_HUGE)
+ out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
+ len = sizeof(struct il_device_cmd);
+ if (idx == TFD_CMD_SLOTS)
+ len = IL_MAX_CMD_SIZE;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ switch (out_cmd->hdr.cmd) {
+ case C_TX_LINK_QUALITY_CMD:
+ case C_SENSITIVITY:
+ D_HC_DUMP("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size,
+ q->write_ptr, idx, il->cmd_queue);
+ break;
+ default:
+ D_HC("Sending command %s (#%x), seq: 0x%04X, "
+ "%d bytes at %d[%d]:%d\n",
+ il_get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd,
+ le16_to_cpu(out_cmd->hdr.sequence), fix_size, q->write_ptr,
+ idx, il->cmd_queue);
+ }
+#endif
+ txq->need_update = 1;
+
+ if (il->cfg->ops->lib->txq_update_byte_cnt_tbl)
+ /* Set up entry in queue's byte count circular buffer */
+ il->cfg->ops->lib->txq_update_byte_cnt_tbl(il, txq, 0);
+
+ phys_addr =
+ pci_map_single(il->pci_dev, &out_cmd->hdr, fix_size,
+ PCI_DMA_BIDIRECTIONAL);
+ dma_unmap_addr_set(out_meta, mapping, phys_addr);
+ dma_unmap_len_set(out_meta, len, fix_size);
+
+ il->cfg->ops->lib->txq_attach_buf_to_tfd(il, txq, phys_addr, fix_size,
+ 1, U32_PAD(cmd->len));
+
+ /* Increment and update queue's write idx */
+ q->write_ptr = il_queue_inc_wrap(q->write_ptr, q->n_bd);
+ il_txq_update_write_ptr(il, txq);
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+ return idx;
+}
+
+/**
+ * il_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
+ *
+ * When FW advances 'R' idx, all entries between old and new 'R' idx
+ * need to be reclaimed. As result, some free space forms. If there is
+ * enough free space (> low mark), wake the stack that feeds us.
+ */
+static void
+il_hcmd_queue_reclaim(struct il_priv *il, int txq_id, int idx, int cmd_idx)
+{
+ struct il_tx_queue *txq = &il->txq[txq_id];
+ struct il_queue *q = &txq->q;
+ int nfreed = 0;
+
+ if (idx >= q->n_bd || il_queue_used(q, idx) == 0) {
+ IL_ERR("Read idx for DMA queue txq id (%d), idx %d, "
+ "is out of range [0-%d] %d %d.\n", txq_id, idx, q->n_bd,
+ q->write_ptr, q->read_ptr);
+ return;
+ }
+
+ for (idx = il_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
+ q->read_ptr = il_queue_inc_wrap(q->read_ptr, q->n_bd)) {
+
+ if (nfreed++ > 0) {
+ IL_ERR("HCMD skipped: idx (%d) %d %d\n", idx,
+ q->write_ptr, q->read_ptr);
+ queue_work(il->workqueue, &il->restart);
+ }
+
+ }
+}
+
+/**
+ * il_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
+ * @rxb: Rx buffer to reclaim
+ *
+ * If an Rx buffer has an async callback associated with it the callback
+ * will be executed. The attached skb (if present) will only be freed
+ * if the callback returns 1
+ */
+void
+il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u16 sequence = le16_to_cpu(pkt->hdr.sequence);
+ int txq_id = SEQ_TO_QUEUE(sequence);
+ int idx = SEQ_TO_IDX(sequence);
+ int cmd_idx;
+ bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
+ struct il_device_cmd *cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_queue *txq = &il->txq[il->cmd_queue];
+ unsigned long flags;
+
+ /* If a Tx command is being handled and it isn't in the actual
+ * command queue then there a command routing bug has been introduced
+ * in the queue management code. */
+ if (WARN
+ (txq_id != il->cmd_queue,
+ "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
+ txq_id, il->cmd_queue, sequence, il->txq[il->cmd_queue].q.read_ptr,
+ il->txq[il->cmd_queue].q.write_ptr)) {
+ il_print_hex_error(il, pkt, 32);
+ return;
+ }
+
+ cmd_idx = il_get_cmd_idx(&txq->q, idx, huge);
+ cmd = txq->cmd[cmd_idx];
+ meta = &txq->meta[cmd_idx];
+
+ txq->time_stamp = jiffies;
+
+ pci_unmap_single(il->pci_dev, dma_unmap_addr(meta, mapping),
+ dma_unmap_len(meta, len), PCI_DMA_BIDIRECTIONAL);
+
+ /* Input error checking is done when commands are added to queue. */
+ if (meta->flags & CMD_WANT_SKB) {
+ meta->source->reply_page = (unsigned long)rxb_addr(rxb);
+ rxb->page = NULL;
+ } else if (meta->callback)
+ meta->callback(il, cmd, pkt);
+
+ spin_lock_irqsave(&il->hcmd_lock, flags);
+
+ il_hcmd_queue_reclaim(il, txq_id, idx, cmd_idx);
+
+ if (!(meta->flags & CMD_ASYNC)) {
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+ D_INFO("Clearing HCMD_ACTIVE for command %s\n",
+ il_get_cmd_string(cmd->hdr.cmd));
+ wake_up(&il->wait_command_queue);
+ }
+
+ /* Mark as unmapped */
+ meta->flags = 0;
+
+ spin_unlock_irqrestore(&il->hcmd_lock, flags);
+}
+EXPORT_SYMBOL(il_tx_cmd_complete);
+
+MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
+MODULE_VERSION(IWLWIFI_VERSION);
+MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
+MODULE_LICENSE("GPL");
+
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
+
+u32 il_debug_level;
+EXPORT_SYMBOL(il_debug_level);
+
+const u8 il_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+EXPORT_SYMBOL(il_bcast_addr);
+
+/* This function both allocates and initializes hw and il. */
+struct ieee80211_hw *
+il_alloc_all(struct il_cfg *cfg)
+{
+ struct il_priv *il;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's ilate structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct il_priv),
+ cfg->ops->ieee80211_ops);
+ if (hw == NULL) {
+ pr_err("%s: Can not allocate network device\n", cfg->name);
+ goto out;
+ }
+
+ il = hw->priv;
+ il->hw = hw;
+
+out:
+ return hw;
+}
+EXPORT_SYMBOL(il_alloc_all);
+
+#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
+#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
+static void
+il_init_ht_hw_capab(const struct il_priv *il,
+ struct ieee80211_sta_ht_cap *ht_info,
+ enum ieee80211_band band)
+{
+ u16 max_bit_rate = 0;
+ u8 rx_chains_num = il->hw_params.rx_chains_num;
+ u8 tx_chains_num = il->hw_params.tx_chains_num;
+
+ ht_info->cap = 0;
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+
+ ht_info->ht_supported = true;
+
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
+ max_bit_rate = MAX_BIT_RATE_20_MHZ;
+ if (il->hw_params.ht40_channel & BIT(band)) {
+ ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+ ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
+ ht_info->mcs.rx_mask[4] = 0x01;
+ max_bit_rate = MAX_BIT_RATE_40_MHZ;
+ }
+
+ if (il->cfg->mod_params->amsdu_size_8K)
+ ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+
+ ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
+ ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
+
+ ht_info->mcs.rx_mask[0] = 0xFF;
+ if (rx_chains_num >= 2)
+ ht_info->mcs.rx_mask[1] = 0xFF;
+ if (rx_chains_num >= 3)
+ ht_info->mcs.rx_mask[2] = 0xFF;
+
+ /* Highest supported Rx data rate */
+ max_bit_rate *= rx_chains_num;
+ WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
+ ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
+
+ /* Tx MCS capabilities */
+ ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+ if (tx_chains_num != rx_chains_num) {
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |=
+ ((tx_chains_num -
+ 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+}
+
+/**
+ * il_init_geos - Initialize mac80211's geo/channel info based from eeprom
+ */
+int
+il_init_geos(struct il_priv *il)
+{
+ struct il_channel_info *ch;
+ struct ieee80211_supported_band *sband;
+ struct ieee80211_channel *channels;
+ struct ieee80211_channel *geo_ch;
+ struct ieee80211_rate *rates;
+ int i = 0;
+ s8 max_tx_power = 0;
+
+ if (il->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
+ il->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
+ D_INFO("Geography modes already initialized.\n");
+ set_bit(S_GEO_CONFIGURED, &il->status);
+ return 0;
+ }
+
+ channels =
+ kzalloc(sizeof(struct ieee80211_channel) * il->channel_count,
+ GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ rates =
+ kzalloc((sizeof(struct ieee80211_rate) * RATE_COUNT_LEGACY),
+ GFP_KERNEL);
+ if (!rates) {
+ kfree(channels);
+ return -ENOMEM;
+ }
+
+ /* 5.2GHz channels start after the 2.4GHz channels */
+ sband = &il->bands[IEEE80211_BAND_5GHZ];
+ sband->channels = &channels[ARRAY_SIZE(il_eeprom_band_1)];
+ /* just OFDM */
+ sband->bitrates = &rates[IL_FIRST_OFDM_RATE];
+ sband->n_bitrates = RATE_COUNT_LEGACY - IL_FIRST_OFDM_RATE;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_5GHZ);
+
+ sband = &il->bands[IEEE80211_BAND_2GHZ];
+ sband->channels = channels;
+ /* OFDM & CCK */
+ sband->bitrates = rates;
+ sband->n_bitrates = RATE_COUNT_LEGACY;
+
+ if (il->cfg->sku & IL_SKU_N)
+ il_init_ht_hw_capab(il, &sband->ht_cap, IEEE80211_BAND_2GHZ);
+
+ il->ieee_channels = channels;
+ il->ieee_rates = rates;
+
+ for (i = 0; i < il->channel_count; i++) {
+ ch = &il->channel_info[i];
+
+ if (!il_is_channel_valid(ch))
+ continue;
+
+ sband = &il->bands[ch->band];
+
+ geo_ch = &sband->channels[sband->n_channels++];
+
+ geo_ch->center_freq =
+ ieee80211_channel_to_frequency(ch->channel, ch->band);
+ geo_ch->max_power = ch->max_power_avg;
+ geo_ch->max_antenna_gain = 0xff;
+ geo_ch->hw_value = ch->channel;
+
+ if (il_is_channel_valid(ch)) {
+ if (!(ch->flags & EEPROM_CHANNEL_IBSS))
+ geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
+
+ if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
+ geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
+
+ if (ch->flags & EEPROM_CHANNEL_RADAR)
+ geo_ch->flags |= IEEE80211_CHAN_RADAR;
+
+ geo_ch->flags |= ch->ht40_extension_channel;
+
+ if (ch->max_power_avg > max_tx_power)
+ max_tx_power = ch->max_power_avg;
+ } else {
+ geo_ch->flags |= IEEE80211_CHAN_DISABLED;
+ }
+
+ D_INFO("Channel %d Freq=%d[%sGHz] %s flag=0x%X\n", ch->channel,
+ geo_ch->center_freq,
+ il_is_channel_a_band(ch) ? "5.2" : "2.4",
+ geo_ch->
+ flags & IEEE80211_CHAN_DISABLED ? "restricted" : "valid",
+ geo_ch->flags);
+ }
+
+ il->tx_power_device_lmt = max_tx_power;
+ il->tx_power_user_lmt = max_tx_power;
+ il->tx_power_next = max_tx_power;
+
+ if (il->bands[IEEE80211_BAND_5GHZ].n_channels == 0 &&
+ (il->cfg->sku & IL_SKU_A)) {
+ IL_INFO("Incorrectly detected BG card as ABG. "
+ "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
+ il->pci_dev->device, il->pci_dev->subsystem_device);
+ il->cfg->sku &= ~IL_SKU_A;
+ }
+
+ IL_INFO("Tunable channels: %d 802.11bg, %d 802.11a channels\n",
+ il->bands[IEEE80211_BAND_2GHZ].n_channels,
+ il->bands[IEEE80211_BAND_5GHZ].n_channels);
+
+ set_bit(S_GEO_CONFIGURED, &il->status);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_init_geos);
+
+/*
+ * il_free_geos - undo allocations in il_init_geos
+ */
+void
+il_free_geos(struct il_priv *il)
+{
+ kfree(il->ieee_channels);
+ kfree(il->ieee_rates);
+ clear_bit(S_GEO_CONFIGURED, &il->status);
+}
+EXPORT_SYMBOL(il_free_geos);
+
+static bool
+il_is_channel_extension(struct il_priv *il, enum ieee80211_band band,
+ u16 channel, u8 extension_chan_offset)
+{
+ const struct il_channel_info *ch_info;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (!il_is_channel_valid(ch_info))
+ return false;
+
+ if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40PLUS);
+ else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
+ return !(ch_info->
+ ht40_extension_channel & IEEE80211_CHAN_NO_HT40MINUS);
+
+ return false;
+}
+
+bool
+il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap)
+{
+ if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
+ return false;
+
+ /*
+ * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
+ * the bit will not set if it is pure 40MHz case
+ */
+ if (ht_cap && !ht_cap->ht_supported)
+ return false;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ if (il->disable_ht40)
+ return false;
+#endif
+
+ return il_is_channel_extension(il, il->band,
+ le16_to_cpu(ctx->staging.channel),
+ ctx->ht.extension_chan_offset);
+}
+EXPORT_SYMBOL(il_is_ht40_tx_allowed);
+
+static u16
+il_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
+{
+ u16 new_val;
+ u16 beacon_factor;
+
+ /*
+ * If mac80211 hasn't given us a beacon interval, program
+ * the default into the device.
+ */
+ if (!beacon_val)
+ return DEFAULT_BEACON_INTERVAL;
+
+ /*
+ * If the beacon interval we obtained from the peer
+ * is too large, we'll have to wake up more often
+ * (and in IBSS case, we'll beacon too much)
+ *
+ * For example, if max_beacon_val is 4096, and the
+ * requested beacon interval is 7000, we'll have to
+ * use 3500 to be able to wake up on the beacons.
+ *
+ * This could badly influence beacon detection stats.
+ */
+
+ beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
+ new_val = beacon_val / beacon_factor;
+
+ if (!new_val)
+ new_val = max_beacon_val;
+
+ return new_val;
+}
+
+int
+il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ u64 tsf;
+ s32 interval_tm, rem;
+ struct ieee80211_conf *conf = NULL;
+ u16 beacon_int;
+ struct ieee80211_vif *vif = ctx->vif;
+
+ conf = &il->hw->conf;
+
+ lockdep_assert_held(&il->mutex);
+
+ memset(&ctx->timing, 0, sizeof(struct il_rxon_time_cmd));
+
+ ctx->timing.timestamp = cpu_to_le64(il->timestamp);
+ ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
+
+ beacon_int = vif ? vif->bss_conf.beacon_int : 0;
+
+ /*
+ * TODO: For IBSS we need to get atim_win from mac80211,
+ * for now just always use 0
+ */
+ ctx->timing.atim_win = 0;
+
+ beacon_int =
+ il_adjust_beacon_interval(beacon_int,
+ il->hw_params.max_beacon_itrvl *
+ TIME_UNIT);
+ ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
+
+ tsf = il->timestamp; /* tsf is modifed by do_div: copy it */
+ interval_tm = beacon_int * TIME_UNIT;
+ rem = do_div(tsf, interval_tm);
+ ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
+
+ ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ? : 1) : 1;
+
+ D_ASSOC("beacon interval %d beacon timer %d beacon tim %d\n",
+ le16_to_cpu(ctx->timing.beacon_interval),
+ le32_to_cpu(ctx->timing.beacon_init_val),
+ le16_to_cpu(ctx->timing.atim_win));
+
+ return il_send_cmd_pdu(il, ctx->rxon_timing_cmd, sizeof(ctx->timing),
+ &ctx->timing);
+}
+EXPORT_SYMBOL(il_send_rxon_timing);
+
+void
+il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (hw_decrypt)
+ rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
+ else
+ rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
+
+}
+EXPORT_SYMBOL(il_set_rxon_hwcrypto);
+
+/* validate RXON structure is valid */
+int
+il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+ bool error = false;
+
+ if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
+ if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
+ IL_WARN("check 2.4G: wrong narrow\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
+ IL_WARN("check 2.4G: wrong radar\n");
+ error = true;
+ }
+ } else {
+ if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("check 5.2G: not short slot!\n");
+ error = true;
+ }
+ if (rxon->flags & RXON_FLG_CCK_MSK) {
+ IL_WARN("check 5.2G: CCK!\n");
+ error = true;
+ }
+ }
+ if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
+ IL_WARN("mac/bssid mcast!\n");
+ error = true;
+ }
+
+ /* make sure basic rates 6Mbps and 1Mbps are supported */
+ if ((rxon->ofdm_basic_rates & RATE_6M_MASK) == 0 &&
+ (rxon->cck_basic_rates & RATE_1M_MASK) == 0) {
+ IL_WARN("neither 1 nor 6 are basic\n");
+ error = true;
+ }
+
+ if (le16_to_cpu(rxon->assoc_id) > 2007) {
+ IL_WARN("aid > 2007\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
+ IL_WARN("CCK and short slot\n");
+ error = true;
+ }
+
+ if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) ==
+ (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
+ IL_WARN("CCK and auto detect");
+ error = true;
+ }
+
+ if ((rxon->
+ flags & (RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK)) ==
+ RXON_FLG_TGG_PROTECT_MSK) {
+ IL_WARN("TGg but no auto-detect\n");
+ error = true;
+ }
+
+ if (error)
+ IL_WARN("Tuning to channel %d\n", le16_to_cpu(rxon->channel));
+
+ if (error) {
+ IL_ERR("Invalid RXON\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_check_rxon_cmd);
+
+/**
+ * il_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
+ * @il: staging_rxon is compared to active_rxon
+ *
+ * If the RXON structure is changing enough to require a new tune,
+ * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
+ * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
+ */
+int
+il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_rxon_cmd *staging = &ctx->staging;
+ const struct il_rxon_cmd *active = &ctx->active;
+
+#define CHK(cond) \
+ if ((cond)) { \
+ D_INFO("need full RXON - " #cond "\n"); \
+ return 1; \
+ }
+
+#define CHK_NEQ(c1, c2) \
+ if ((c1) != (c2)) { \
+ D_INFO("need full RXON - " \
+ #c1 " != " #c2 " - %d != %d\n", \
+ (c1), (c2)); \
+ return 1; \
+ }
+
+ /* These items are only settable from the full RXON command */
+ CHK(!il_is_associated_ctx(ctx));
+ CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
+ CHK(compare_ether_addr(staging->node_addr, active->node_addr));
+ CHK(compare_ether_addr
+ (staging->wlap_bssid_addr, active->wlap_bssid_addr));
+ CHK_NEQ(staging->dev_type, active->dev_type);
+ CHK_NEQ(staging->channel, active->channel);
+ CHK_NEQ(staging->air_propagation, active->air_propagation);
+ CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
+ active->ofdm_ht_single_stream_basic_rates);
+ CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
+ active->ofdm_ht_dual_stream_basic_rates);
+ CHK_NEQ(staging->assoc_id, active->assoc_id);
+
+ /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
+ * be updated with the RXON_ASSOC command -- however only some
+ * flag transitions are allowed using RXON_ASSOC */
+
+ /* Check if we are not switching bands */
+ CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
+ active->flags & RXON_FLG_BAND_24G_MSK);
+
+ /* Check if we are switching association toggle */
+ CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
+ active->filter_flags & RXON_FILTER_ASSOC_MSK);
+
+#undef CHK
+#undef CHK_NEQ
+
+ return 0;
+}
+EXPORT_SYMBOL(il_full_rxon_required);
+
+u8
+il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ /*
+ * Assign the lowest rate -- should really get this from
+ * the beacon skb from mac80211.
+ */
+ if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
+ return RATE_1M_PLCP;
+ else
+ return RATE_6M_PLCP;
+}
+EXPORT_SYMBOL(il_get_lowest_plcp);
+
+static void
+_il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf,
+ struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ if (!ctx->ht.enabled) {
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK |
+ RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | RXON_FLG_HT40_PROT_MSK
+ | RXON_FLG_HT_PROT_MSK);
+ return;
+ }
+
+ rxon->flags |=
+ cpu_to_le32(ctx->ht.protection << RXON_FLG_HT_OPERATING_MODE_POS);
+
+ /* Set up channel bandwidth:
+ * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
+ /* clear the HT channel mode before set the mode */
+ rxon->flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MSK | RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ if (il_is_ht40_tx_allowed(il, ctx, NULL)) {
+ /* pure ht40 */
+ if (ctx->ht.protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ break;
+ }
+ } else {
+ /* Note: control channel is opposite of extension channel */
+ switch (ctx->ht.extension_chan_offset) {
+ case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
+ rxon->flags &=
+ ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
+ rxon->flags |= RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
+ break;
+ case IEEE80211_HT_PARAM_CHA_SEC_NONE:
+ default:
+ /* channel location only valid if in Mixed mode */
+ IL_ERR("invalid extension channel offset\n");
+ break;
+ }
+ }
+ } else {
+ rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
+ }
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ D_ASSOC("rxon flags 0x%X operation mode :0x%X "
+ "extension channel offset 0x%x\n", le32_to_cpu(rxon->flags),
+ ctx->ht.protection, ctx->ht.extension_chan_offset);
+}
+
+void
+il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf)
+{
+ _il_set_rxon_ht(il, ht_conf, &il->ctx);
+}
+EXPORT_SYMBOL(il_set_rxon_ht);
+
+/* Return valid, unused, channel for a passive scan to reset the RF */
+u8
+il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band)
+{
+ const struct il_channel_info *ch_info;
+ int i;
+ u8 channel = 0;
+ u8 min, max;
+
+ if (band == IEEE80211_BAND_5GHZ) {
+ min = 14;
+ max = il->channel_count;
+ } else {
+ min = 0;
+ max = 14;
+ }
+
+ for (i = min; i < max; i++) {
+ channel = il->channel_info[i].channel;
+ if (channel == le16_to_cpu(il->ctx.staging.channel))
+ continue;
+
+ ch_info = il_get_channel_info(il, band, channel);
+ if (il_is_channel_valid(ch_info))
+ break;
+ }
+
+ return channel;
+}
+EXPORT_SYMBOL(il_get_single_channel_number);
+
+/**
+ * il_set_rxon_channel - Set the band and channel values in staging RXON
+ * @ch: requested channel as a pointer to struct ieee80211_channel
+
+ * NOTE: Does not commit to the hardware; it sets appropriate bit fields
+ * in the staging RXON flag structure based on the ch->band
+ */
+int
+il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx)
+{
+ enum ieee80211_band band = ch->band;
+ u16 channel = ch->hw_value;
+
+ if (le16_to_cpu(ctx->staging.channel) == channel && il->band == band)
+ return 0;
+
+ ctx->staging.channel = cpu_to_le16(channel);
+ if (band == IEEE80211_BAND_5GHZ)
+ ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+
+ il->band = band;
+
+ D_INFO("Staging channel set to %d [%d]\n", channel, band);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_set_rxon_channel);
+
+void
+il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif)
+{
+ if (band == IEEE80211_BAND_5GHZ) {
+ ctx->staging.flags &=
+ ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK |
+ RXON_FLG_CCK_MSK);
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ } else {
+ /* Copied from il_post_associate() */
+ if (vif && vif->bss_conf.use_short_slot)
+ ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
+
+ ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
+ ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
+ ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
+ }
+}
+EXPORT_SYMBOL(il_set_flags_for_band);
+
+/*
+ * initialize rxon structure with default values from eeprom
+ */
+void
+il_connection_init_rx_config(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ const struct il_channel_info *ch_info;
+
+ memset(&ctx->staging, 0, sizeof(ctx->staging));
+
+ if (!ctx->vif) {
+ ctx->staging.dev_type = ctx->unused_devtype;
+ } else
+ switch (ctx->vif->type) {
+
+ case NL80211_IFTYPE_STATION:
+ ctx->staging.dev_type = ctx->station_devtype;
+ ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ case NL80211_IFTYPE_ADHOC:
+ ctx->staging.dev_type = ctx->ibss_devtype;
+ ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
+ ctx->staging.filter_flags =
+ RXON_FILTER_BCON_AWARE_MSK |
+ RXON_FILTER_ACCEPT_GRP_MSK;
+ break;
+
+ default:
+ IL_ERR("Unsupported interface type %d\n",
+ ctx->vif->type);
+ break;
+ }
+
+#if 0
+ /* TODO: Figure out when short_preamble would be set and cache from
+ * that */
+ if (!hw_to_local(il->hw)->short_preamble)
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+#endif
+
+ ch_info =
+ il_get_channel_info(il, il->band, le16_to_cpu(ctx->active.channel));
+
+ if (!ch_info)
+ ch_info = &il->channel_info[0];
+
+ ctx->staging.channel = cpu_to_le16(ch_info->channel);
+ il->band = ch_info->band;
+
+ il_set_flags_for_band(il, ctx, il->band, ctx->vif);
+
+ ctx->staging.ofdm_basic_rates =
+ (IL_OFDM_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+ ctx->staging.cck_basic_rates =
+ (IL_CCK_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ /* clear both MIX and PURE40 mode flag */
+ ctx->staging.flags &=
+ ~(RXON_FLG_CHANNEL_MODE_MIXED | RXON_FLG_CHANNEL_MODE_PURE_40);
+ if (ctx->vif)
+ memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
+
+ ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
+ ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
+}
+EXPORT_SYMBOL(il_connection_init_rx_config);
+
+void
+il_set_rate(struct il_priv *il)
+{
+ const struct ieee80211_supported_band *hw = NULL;
+ struct ieee80211_rate *rate;
+ int i;
+
+ hw = il_get_hw_mode(il, il->band);
+ if (!hw) {
+ IL_ERR("Failed to set rate: unable to get hw mode\n");
+ return;
+ }
+
+ il->active_rate = 0;
+
+ for (i = 0; i < hw->n_bitrates; i++) {
+ rate = &(hw->bitrates[i]);
+ if (rate->hw_value < RATE_COUNT_LEGACY)
+ il->active_rate |= (1 << rate->hw_value);
+ }
+
+ D_RATE("Set active_rate = %0x\n", il->active_rate);
+
+ il->ctx.staging.cck_basic_rates =
+ (IL_CCK_BASIC_RATES_MASK >> IL_FIRST_CCK_RATE) & 0xF;
+
+ il->ctx.staging.ofdm_basic_rates =
+ (IL_OFDM_BASIC_RATES_MASK >> IL_FIRST_OFDM_RATE) & 0xFF;
+}
+EXPORT_SYMBOL(il_set_rate);
+
+void
+il_chswitch_done(struct il_priv *il, bool is_success)
+{
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (test_and_clear_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ ieee80211_chswitch_done(ctx->vif, is_success);
+}
+EXPORT_SYMBOL(il_chswitch_done);
+
+void
+il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_csa_notification *csa = &(pkt->u.csa_notif);
+
+ struct il_rxon_context *ctx = &il->ctx;
+ struct il_rxon_cmd *rxon = (void *)&ctx->active;
+
+ if (!test_bit(S_CHANNEL_SWITCH_PENDING, &il->status))
+ return;
+
+ if (!le32_to_cpu(csa->status) && csa->channel == il->switch_channel) {
+ rxon->channel = csa->channel;
+ ctx->staging.channel = csa->channel;
+ D_11H("CSA notif: channel %d\n", le16_to_cpu(csa->channel));
+ il_chswitch_done(il, true);
+ } else {
+ IL_ERR("CSA notif (fail) : channel %d\n",
+ le16_to_cpu(csa->channel));
+ il_chswitch_done(il, false);
+ }
+}
+EXPORT_SYMBOL(il_hdl_csa);
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct il_rxon_cmd *rxon = &ctx->staging;
+
+ D_RADIO("RX CONFIG:\n");
+ il_print_hex_dump(il, IL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
+ D_RADIO("u16 channel: 0x%x\n", le16_to_cpu(rxon->channel));
+ D_RADIO("u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
+ D_RADIO("u32 filter_flags: 0x%08x\n", le32_to_cpu(rxon->filter_flags));
+ D_RADIO("u8 dev_type: 0x%x\n", rxon->dev_type);
+ D_RADIO("u8 ofdm_basic_rates: 0x%02x\n", rxon->ofdm_basic_rates);
+ D_RADIO("u8 cck_basic_rates: 0x%02x\n", rxon->cck_basic_rates);
+ D_RADIO("u8[6] node_addr: %pM\n", rxon->node_addr);
+ D_RADIO("u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
+ D_RADIO("u16 assoc_id: 0x%x\n", le16_to_cpu(rxon->assoc_id));
+}
+EXPORT_SYMBOL(il_print_rx_config_cmd);
+#endif
+/**
+ * il_irq_handle_error - called for HW or SW error interrupt from card
+ */
+void
+il_irq_handle_error(struct il_priv *il)
+{
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+
+ /* Cancel currently queued command. */
+ clear_bit(S_HCMD_ACTIVE, &il->status);
+
+ IL_ERR("Loaded firmware version: %s\n", il->hw->wiphy->fw_version);
+
+ il->cfg->ops->lib->dump_nic_error_log(il);
+ if (il->cfg->ops->lib->dump_fh)
+ il->cfg->ops->lib->dump_fh(il, NULL, false);
+#ifdef CONFIG_IWLEGACY_DEBUG
+ if (il_get_debug_level(il) & IL_DL_FW_ERRORS)
+ il_print_rx_config_cmd(il, &il->ctx);
+#endif
+
+ wake_up(&il->wait_command_queue);
+
+ /* Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit */
+ clear_bit(S_READY, &il->status);
+
+ if (!test_bit(S_EXIT_PENDING, &il->status)) {
+ IL_DBG(IL_DL_FW_ERRORS,
+ "Restarting adapter due to uCode error.\n");
+
+ if (il->cfg->mod_params->restart_fw)
+ queue_work(il->workqueue, &il->restart);
+ }
+}
+EXPORT_SYMBOL(il_irq_handle_error);
+
+static int
+il_apm_stop_master(struct il_priv *il)
+{
+ int ret = 0;
+
+ /* stop device's busmaster DMA activity */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
+
+ ret =
+ _il_poll_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
+ CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
+ if (ret)
+ IL_WARN("Master Disable Timed Out, 100 usec\n");
+
+ D_INFO("stop master\n");
+
+ return ret;
+}
+
+void
+il_apm_stop(struct il_priv *il)
+{
+ D_INFO("Stop card, put in low power state\n");
+
+ /* Stop device's DMA activity */
+ il_apm_stop_master(il);
+
+ /* Reset the entire device */
+ il_set_bit(il, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+
+ udelay(10);
+
+ /*
+ * Clear "initialization complete" bit to move adapter from
+ * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
+ */
+ il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+}
+EXPORT_SYMBOL(il_apm_stop);
+
+/*
+ * Start up NIC's basic functionality after it has been reset
+ * (e.g. after platform boot, or shutdown via il_apm_stop())
+ * NOTE: This does not load uCode nor start the embedded processor
+ */
+int
+il_apm_init(struct il_priv *il)
+{
+ int ret = 0;
+ u16 lctl;
+
+ D_INFO("Init card's basic functions\n");
+
+ /*
+ * Use "set_bit" below rather than "write", to preserve any hardware
+ * bits already set by default after reset.
+ */
+
+ /* Disable L0S exit timer (platform NMI Work/Around) */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+
+ /*
+ * Disable L0s without affecting L1;
+ * don't wait for ICH L0s (ICH bug W/A)
+ */
+ il_set_bit(il, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
+
+ /* Set FH wait threshold to maximum (HW error during stress W/A) */
+ il_set_bit(il, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
+
+ /*
+ * Enable HAP INTA (interrupt from management bus) to
+ * wake device's PCI Express link L1a -> L0s
+ * NOTE: This is no-op for 3945 (non-existent bit)
+ */
+ il_set_bit(il, CSR_HW_IF_CONFIG_REG,
+ CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
+
+ /*
+ * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
+ * Check if BIOS (or OS) enabled L1-ASPM on this device.
+ * If so (likely), disable L0S, so device moves directly L0->L1;
+ * costs negligible amount of power savings.
+ * If not (unlikely), enable L0S, so there is at least some
+ * power savings, even without L1.
+ */
+ if (il->cfg->base_params->set_l0s) {
+ lctl = il_pcie_link_ctl(il);
+ if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
+ PCI_CFG_LINK_CTRL_VAL_L1_EN) {
+ /* L1-ASPM enabled; disable(!) L0S */
+ il_set_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Enabled; Disabling L0S\n");
+ } else {
+ /* L1-ASPM disabled; enable(!) L0S */
+ il_clear_bit(il, CSR_GIO_REG,
+ CSR_GIO_REG_VAL_L0S_ENABLED);
+ D_POWER("L1 Disabled; Enabling L0S\n");
+ }
+ }
+
+ /* Configure analog phase-lock-loop before activating to D0A */
+ if (il->cfg->base_params->pll_cfg_val)
+ il_set_bit(il, CSR_ANA_PLL_CFG,
+ il->cfg->base_params->pll_cfg_val);
+
+ /*
+ * Set "initialization complete" bit to move adapter from
+ * D0U* --> D0A* (powered-up active) state.
+ */
+ il_set_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+
+ /*
+ * Wait for clock stabilization; once stabilized, access to
+ * device-internal resources is supported, e.g. il_wr_prph()
+ * and accesses to uCode SRAM.
+ */
+ ret =
+ _il_poll_bit(il, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
+ if (ret < 0) {
+ D_INFO("Failed to init the card\n");
+ goto out;
+ }
+
+ /*
+ * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
+ * BSM (Boostrap State Machine) is only in 3945 and 4965.
+ *
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
+ * do not disable clocks. This preserves any hardware bits already
+ * set by default in "CLK_CTRL_REG" after reset.
+ */
+ if (il->cfg->base_params->use_bsm)
+ il_wr_prph(il, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
+ else
+ il_wr_prph(il, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
+
+ /* Disable L1-Active */
+ il_set_bits_prph(il, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(il_apm_init);
+
+int
+il_set_tx_power(struct il_priv *il, s8 tx_power, bool force)
+{
+ int ret;
+ s8 prev_tx_power;
+ bool defer;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->tx_power_user_lmt == tx_power && !force)
+ return 0;
+
+ if (!il->cfg->ops->lib->send_tx_power)
+ return -EOPNOTSUPP;
+
+ /* 0 dBm mean 1 milliwatt */
+ if (tx_power < 0) {
+ IL_WARN("Requested user TXPOWER %d below 1 mW.\n", tx_power);
+ return -EINVAL;
+ }
+
+ if (tx_power > il->tx_power_device_lmt) {
+ IL_WARN("Requested user TXPOWER %d above upper limit %d.\n",
+ tx_power, il->tx_power_device_lmt);
+ return -EINVAL;
+ }
+
+ if (!il_is_ready_rf(il))
+ return -EIO;
+
+ /* scan complete and commit_rxon use tx_power_next value,
+ * it always need to be updated for newest request */
+ il->tx_power_next = tx_power;
+
+ /* do not set tx power when scanning or channel changing */
+ defer = test_bit(S_SCANNING, &il->status) ||
+ memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
+ if (defer && !force) {
+ D_INFO("Deferring tx power set\n");
+ return 0;
+ }
+
+ prev_tx_power = il->tx_power_user_lmt;
+ il->tx_power_user_lmt = tx_power;
+
+ ret = il->cfg->ops->lib->send_tx_power(il);
+
+ /* if fail to set tx_power, restore the orig. tx power */
+ if (ret) {
+ il->tx_power_user_lmt = prev_tx_power;
+ il->tx_power_next = prev_tx_power;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(il_set_tx_power);
+
+void
+il_send_bt_config(struct il_priv *il)
+{
+ struct il_bt_cmd bt_cmd = {
+ .lead_time = BT_LEAD_TIME_DEF,
+ .max_kill = BT_MAX_KILL_DEF,
+ .kill_ack_mask = 0,
+ .kill_cts_mask = 0,
+ };
+
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ D_INFO("BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
+ if (il_send_cmd_pdu(il, C_BT_CONFIG, sizeof(struct il_bt_cmd), &bt_cmd))
+ IL_ERR("failed to send BT Coex Config\n");
+}
+EXPORT_SYMBOL(il_send_bt_config);
+
+int
+il_send_stats_request(struct il_priv *il, u8 flags, bool clear)
+{
+ struct il_stats_cmd stats_cmd = {
+ .configuration_flags = clear ? IL_STATS_CONF_CLEAR_STATS : 0,
+ };
+
+ if (flags & CMD_ASYNC)
+ return il_send_cmd_pdu_async(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd, NULL);
+ else
+ return il_send_cmd_pdu(il, C_STATS, sizeof(struct il_stats_cmd),
+ &stats_cmd);
+}
+EXPORT_SYMBOL(il_send_stats_request);
+
+void
+il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb)
+{
+#ifdef CONFIG_IWLEGACY_DEBUG
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ struct il_sleep_notification *sleep = &(pkt->u.sleep_notif);
+ D_RX("sleep mode: %d, src: %d\n",
+ sleep->pm_sleep_mode, sleep->pm_wakeup_src);
+#endif
+}
+EXPORT_SYMBOL(il_hdl_pm_sleep);
+
+void
+il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+ u32 len = le32_to_cpu(pkt->len_n_flags) & IL_RX_FRAME_SIZE_MSK;
+ D_RADIO("Dumping %d bytes of unhandled notification for %s:\n", len,
+ il_get_cmd_string(pkt->hdr.cmd));
+ il_print_hex_dump(il, IL_DL_RADIO, pkt->u.raw, len);
+}
+EXPORT_SYMBOL(il_hdl_pm_debug_stats);
+
+void
+il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb)
+{
+ struct il_rx_pkt *pkt = rxb_addr(rxb);
+
+ IL_ERR("Error Reply type 0x%08X cmd %s (0x%02X) "
+ "seq 0x%04X ser 0x%08X\n",
+ le32_to_cpu(pkt->u.err_resp.error_type),
+ il_get_cmd_string(pkt->u.err_resp.cmd_id),
+ pkt->u.err_resp.cmd_id,
+ le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
+ le32_to_cpu(pkt->u.err_resp.error_info));
+}
+EXPORT_SYMBOL(il_hdl_error);
+
+void
+il_clear_isr_stats(struct il_priv *il)
+{
+ memset(&il->isr_stats, 0, sizeof(il->isr_stats));
+}
+
+int
+il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ int q;
+
+ D_MAC80211("enter\n");
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ D_MAC80211("leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ il->ctx.qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ il->ctx.qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ il->ctx.qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ il->ctx.qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ D_MAC80211("leave\n");
+ return 0;
+}
+EXPORT_SYMBOL(il_mac_conf_tx);
+
+int
+il_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct il_priv *il = hw->priv;
+
+ return il->ibss_manager == IL_IBSS_MANAGER;
+}
+EXPORT_SYMBOL_GPL(il_mac_tx_last_beacon);
+
+static int
+il_set_mode(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ il_connection_init_rx_config(il, ctx);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+
+ return il_commit_rxon(il, ctx);
+}
+
+static int
+il_setup_interface(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&il->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ il->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = il_set_mode(il, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ return 0;
+}
+
+int
+il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+ int err;
+ u32 modes;
+
+ D_MAC80211("enter: type %d, addr %pM\n", vif->type, vif->addr);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_ready_rf(il)) {
+ IL_WARN("Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* check if busy context is exclusive */
+ if (il->ctx.vif &&
+ (il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ modes = il->ctx.interface_modes | il->ctx.exclusive_interface_modes;
+ if (!(modes & BIT(vif->type))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = &il->ctx;
+ il->ctx.vif = vif;
+
+ err = il_setup_interface(il, &il->ctx);
+ if (err) {
+ il->ctx.vif = NULL;
+ il->iw_mode = NL80211_IFTYPE_STATION;
+ }
+
+out:
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+ return err;
+}
+EXPORT_SYMBOL(il_mac_add_interface);
+
+static void
+il_teardown_interface(struct il_priv *il, struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&il->mutex);
+
+ if (il->scan_vif == vif) {
+ il_scan_cancel_timeout(il, 200);
+ il_force_scan_end(il);
+ }
+
+ if (!mode_change) {
+ il_set_mode(il, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+}
+
+void
+il_mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_MAC80211("enter\n");
+
+ mutex_lock(&il->mutex);
+
+ WARN_ON(ctx->vif != vif);
+ ctx->vif = NULL;
+
+ il_teardown_interface(il, vif, false);
+
+ memset(il->bssid, 0, ETH_ALEN);
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+
+}
+EXPORT_SYMBOL(il_mac_remove_interface);
+
+int
+il_alloc_txq_mem(struct il_priv *il)
+{
+ if (!il->txq)
+ il->txq =
+ kzalloc(sizeof(struct il_tx_queue) *
+ il->cfg->base_params->num_of_queues, GFP_KERNEL);
+ if (!il->txq) {
+ IL_ERR("Not enough memory for txq\n");
+ return -ENOMEM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_txq_mem);
+
+void
+il_txq_mem(struct il_priv *il)
+{
+ kfree(il->txq);
+ il->txq = NULL;
+}
+EXPORT_SYMBOL(il_txq_mem);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+
+#define IL_TRAFFIC_DUMP_SIZE (IL_TRAFFIC_ENTRY_SIZE * IL_TRAFFIC_ENTRIES)
+
+void
+il_reset_traffic_log(struct il_priv *il)
+{
+ il->tx_traffic_idx = 0;
+ il->rx_traffic_idx = 0;
+ if (il->tx_traffic)
+ memset(il->tx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+ if (il->rx_traffic)
+ memset(il->rx_traffic, 0, IL_TRAFFIC_DUMP_SIZE);
+}
+
+int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ u32 traffic_size = IL_TRAFFIC_DUMP_SIZE;
+
+ if (il_debug_level & IL_DL_TX) {
+ if (!il->tx_traffic) {
+ il->tx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->tx_traffic)
+ return -ENOMEM;
+ }
+ }
+ if (il_debug_level & IL_DL_RX) {
+ if (!il->rx_traffic) {
+ il->rx_traffic = kzalloc(traffic_size, GFP_KERNEL);
+ if (!il->rx_traffic)
+ return -ENOMEM;
+ }
+ }
+ il_reset_traffic_log(il);
+ return 0;
+}
+EXPORT_SYMBOL(il_alloc_traffic_mem);
+
+void
+il_free_traffic_mem(struct il_priv *il)
+{
+ kfree(il->tx_traffic);
+ il->tx_traffic = NULL;
+
+ kfree(il->rx_traffic);
+ il->rx_traffic = NULL;
+}
+EXPORT_SYMBOL(il_free_traffic_mem);
+
+void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_TX)))
+ return;
+
+ if (!il->tx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->tx_traffic +
+ (il->tx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->tx_traffic_idx =
+ (il->tx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_tx_data_frame);
+
+void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+ __le16 fc;
+ u16 len;
+
+ if (likely(!(il_debug_level & IL_DL_RX)))
+ return;
+
+ if (!il->rx_traffic)
+ return;
+
+ fc = header->frame_control;
+ if (ieee80211_is_data(fc)) {
+ len =
+ (length >
+ IL_TRAFFIC_ENTRY_SIZE) ? IL_TRAFFIC_ENTRY_SIZE : length;
+ memcpy((il->rx_traffic +
+ (il->rx_traffic_idx * IL_TRAFFIC_ENTRY_SIZE)), header,
+ len);
+ il->rx_traffic_idx =
+ (il->rx_traffic_idx + 1) % IL_TRAFFIC_ENTRIES;
+ }
+}
+EXPORT_SYMBOL(il_dbg_log_rx_data_frame);
+
+const char *
+il_get_mgmt_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(MANAGEMENT_ASSOC_REQ);
+ IL_CMD(MANAGEMENT_ASSOC_RESP);
+ IL_CMD(MANAGEMENT_REASSOC_REQ);
+ IL_CMD(MANAGEMENT_REASSOC_RESP);
+ IL_CMD(MANAGEMENT_PROBE_REQ);
+ IL_CMD(MANAGEMENT_PROBE_RESP);
+ IL_CMD(MANAGEMENT_BEACON);
+ IL_CMD(MANAGEMENT_ATIM);
+ IL_CMD(MANAGEMENT_DISASSOC);
+ IL_CMD(MANAGEMENT_AUTH);
+ IL_CMD(MANAGEMENT_DEAUTH);
+ IL_CMD(MANAGEMENT_ACTION);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+const char *
+il_get_ctrl_string(int cmd)
+{
+ switch (cmd) {
+ IL_CMD(CONTROL_BACK_REQ);
+ IL_CMD(CONTROL_BACK);
+ IL_CMD(CONTROL_PSPOLL);
+ IL_CMD(CONTROL_RTS);
+ IL_CMD(CONTROL_CTS);
+ IL_CMD(CONTROL_ACK);
+ IL_CMD(CONTROL_CFEND);
+ IL_CMD(CONTROL_CFENDACK);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void
+il_clear_traffic_stats(struct il_priv *il)
+{
+ memset(&il->tx_stats, 0, sizeof(struct traffic_stats));
+ memset(&il->rx_stats, 0, sizeof(struct traffic_stats));
+}
+
+/*
+ * if CONFIG_IWLEGACY_DEBUGFS defined,
+ * il_update_stats function will
+ * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
+ * Use debugFs to display the rx/rx_stats
+ * if CONFIG_IWLEGACY_DEBUGFS not being defined, then no MGMT and CTRL
+ * information will be recorded, but DATA pkt still will be recorded
+ * for the reason of il_led.c need to control the led blinking based on
+ * number of tx and rx data.
+ *
+ */
+void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+ struct traffic_stats *stats;
+
+ if (is_tx)
+ stats = &il->tx_stats;
+ else
+ stats = &il->rx_stats;
+
+ if (ieee80211_is_mgmt(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
+ stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
+ stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
+ stats->mgmt[MANAGEMENT_PROBE_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
+ stats->mgmt[MANAGEMENT_PROBE_RESP]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BEACON):
+ stats->mgmt[MANAGEMENT_BEACON]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ATIM):
+ stats->mgmt[MANAGEMENT_ATIM]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
+ stats->mgmt[MANAGEMENT_DISASSOC]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ stats->mgmt[MANAGEMENT_AUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ stats->mgmt[MANAGEMENT_DEAUTH]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACTION):
+ stats->mgmt[MANAGEMENT_ACTION]++;
+ break;
+ }
+ } else if (ieee80211_is_ctl(fc)) {
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
+ stats->ctrl[CONTROL_BACK_REQ]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_BACK):
+ stats->ctrl[CONTROL_BACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
+ stats->ctrl[CONTROL_PSPOLL]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_RTS):
+ stats->ctrl[CONTROL_RTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CTS):
+ stats->ctrl[CONTROL_CTS]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_ACK):
+ stats->ctrl[CONTROL_ACK]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFEND):
+ stats->ctrl[CONTROL_CFEND]++;
+ break;
+ case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
+ stats->ctrl[CONTROL_CFENDACK]++;
+ break;
+ }
+ } else {
+ /* data */
+ stats->data_cnt++;
+ stats->data_bytes += len;
+ }
+}
+EXPORT_SYMBOL(il_update_stats);
+#endif
+
+int
+il_force_reset(struct il_priv *il, bool external)
+{
+ struct il_force_reset *force_reset;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return -EINVAL;
+
+ force_reset = &il->force_reset;
+ force_reset->reset_request_count++;
+ if (!external) {
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ D_INFO("force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+
+ /*
+ * if the request is from external(ex: debugfs),
+ * then always perform the request in regardless the module
+ * parameter setting
+ * if the request is from internal (uCode error or driver
+ * detect failure), then fw_restart module parameter
+ * need to be check before performing firmware reload
+ */
+
+ if (!external && !il->cfg->mod_params->restart_fw) {
+ D_INFO("Cancel firmware reload based on "
+ "module parameter setting\n");
+ return 0;
+ }
+
+ IL_ERR("On demand firmware reload\n");
+
+ /* Set the FW error flag -- cleared on il_down */
+ set_bit(S_FW_ERROR, &il->status);
+ wake_up(&il->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(S_READY, &il->status);
+ queue_work(il->workqueue, &il->restart);
+
+ return 0;
+}
+
+int
+il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ u32 modes;
+ int err;
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&il->mutex);
+
+ if (!ctx->vif || !il_is_ready_rf(il)) {
+ /*
+ * Huh? But wait ... this can maybe happen when
+ * we're in the middle of a firmware restart!
+ */
+ err = -EBUSY;
+ goto out;
+ }
+
+ modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+ if (!(modes & BIT(newtype))) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ if ((il->ctx.exclusive_interface_modes & BIT(il->ctx.vif->type)) ||
+ (il->ctx.exclusive_interface_modes & BIT(newtype))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ /* success */
+ il_teardown_interface(il, vif, true);
+ vif->type = newtype;
+ vif->p2p = newp2p;
+ err = il_setup_interface(il, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+out:
+ mutex_unlock(&il->mutex);
+ return err;
+}
+EXPORT_SYMBOL(il_mac_change_interface);
+
+/*
+ * On every watchdog tick we check (latest) time stamp. If it does not
+ * change during timeout period and queue is not empty we reset firmware.
+ */
+static int
+il_check_stuck_queue(struct il_priv *il, int cnt)
+{
+ struct il_tx_queue *txq = &il->txq[cnt];
+ struct il_queue *q = &txq->q;
+ unsigned long timeout;
+ int ret;
+
+ if (q->read_ptr == q->write_ptr) {
+ txq->time_stamp = jiffies;
+ return 0;
+ }
+
+ timeout =
+ txq->time_stamp +
+ msecs_to_jiffies(il->cfg->base_params->wd_timeout);
+
+ if (time_after(jiffies, timeout)) {
+ IL_ERR("Queue %d stuck for %u ms.\n", q->id,
+ il->cfg->base_params->wd_timeout);
+ ret = il_force_reset(il, false);
+ return (ret == -EAGAIN) ? 0 : 1;
+ }
+
+ return 0;
+}
+
+/*
+ * Making watchdog tick be a quarter of timeout assure we will
+ * discover the queue hung between timeout and 1.25*timeout
+ */
+#define IL_WD_TICK(timeout) ((timeout) / 4)
+
+/*
+ * Watchdog timer callback, we check each tx queue for stuck, if if hung
+ * we reset the firmware. If everything is fine just rearm the timer.
+ */
+void
+il_bg_watchdog(unsigned long data)
+{
+ struct il_priv *il = (struct il_priv *)data;
+ int cnt;
+ unsigned long timeout;
+
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ timeout = il->cfg->base_params->wd_timeout;
+ if (timeout == 0)
+ return;
+
+ /* monitor and check for stuck cmd queue */
+ if (il_check_stuck_queue(il, il->cmd_queue))
+ return;
+
+ /* monitor and check for other stuck queues */
+ if (il_is_any_associated(il)) {
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ /* skip as we already checked the command queue */
+ if (cnt == il->cmd_queue)
+ continue;
+ if (il_check_stuck_queue(il, cnt))
+ return;
+ }
+ }
+
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+}
+EXPORT_SYMBOL(il_bg_watchdog);
+
+void
+il_setup_watchdog(struct il_priv *il)
+{
+ unsigned int timeout = il->cfg->base_params->wd_timeout;
+
+ if (timeout)
+ mod_timer(&il->watchdog,
+ jiffies + msecs_to_jiffies(IL_WD_TICK(timeout)));
+ else
+ del_timer(&il->watchdog);
+}
+EXPORT_SYMBOL(il_setup_watchdog);
+
+/*
+ * extended beacon time format
+ * time in usec will be changed into a 32-bit value in extended:internal format
+ * the extended part is the beacon counts
+ * the internal part is the time in usec within one beacon interval
+ */
+u32
+il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval)
+{
+ u32 quot;
+ u32 rem;
+ u32 interval = beacon_interval * TIME_UNIT;
+
+ if (!interval || !usec)
+ return 0;
+
+ quot =
+ (usec /
+ interval) & (il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits) >> il->
+ hw_params.beacon_time_tsf_bits);
+ rem =
+ (usec % interval) & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+
+ return (quot << il->hw_params.beacon_time_tsf_bits) + rem;
+}
+EXPORT_SYMBOL(il_usecs_to_beacons);
+
+/* base is usually what we get from ucode with each received frame,
+ * the same as HW timer counter counting down
+ */
+__le32
+il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval)
+{
+ u32 base_low = base & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 addon_low = addon & il_beacon_time_mask_low(il,
+ il->hw_params.
+ beacon_time_tsf_bits);
+ u32 interval = beacon_interval * TIME_UNIT;
+ u32 res = (base & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits)) +
+ (addon & il_beacon_time_mask_high(il,
+ il->hw_params.
+ beacon_time_tsf_bits));
+
+ if (base_low > addon_low)
+ res += base_low - addon_low;
+ else if (base_low < addon_low) {
+ res += interval + base_low - addon_low;
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+ } else
+ res += (1 << il->hw_params.beacon_time_tsf_bits);
+
+ return cpu_to_le32(res);
+}
+EXPORT_SYMBOL(il_add_beacon_time);
+
+#ifdef CONFIG_PM
+
+int
+il_pci_suspend(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+
+ /*
+ * This function is called when system goes into suspend state
+ * mac80211 will call il_mac_stop() from the mac80211 suspend function
+ * first but since il_mac_stop() has no knowledge of who the caller is,
+ * it will not call apm_ops.stop() to stop the DMA operation.
+ * Calling apm_ops.stop here to make sure we stop the DMA.
+ */
+ il_apm_stop(il);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_suspend);
+
+int
+il_pci_resume(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct il_priv *il = pci_get_drvdata(pdev);
+ bool hw_rfkill = false;
+
+ /*
+ * We disable the RETRY_TIMEOUT register (0x41) to keep
+ * PCI Tx retries from interfering with C3 CPU state.
+ */
+ pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
+
+ il_enable_interrupts(il);
+
+ if (!(_il_rd(il, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
+ hw_rfkill = true;
+
+ if (hw_rfkill)
+ set_bit(S_RF_KILL_HW, &il->status);
+ else
+ clear_bit(S_RF_KILL_HW, &il->status);
+
+ wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rfkill);
+
+ return 0;
+}
+EXPORT_SYMBOL(il_pci_resume);
+
+const struct dev_pm_ops il_pm_ops = {
+ .suspend = il_pci_suspend,
+ .resume = il_pci_resume,
+ .freeze = il_pci_suspend,
+ .thaw = il_pci_resume,
+ .poweroff = il_pci_suspend,
+ .restore = il_pci_resume,
+};
+EXPORT_SYMBOL(il_pm_ops);
+
+#endif /* CONFIG_PM */
+
+static void
+il_update_qos(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ if (test_bit(S_EXIT_PENDING, &il->status))
+ return;
+
+ if (!ctx->is_active)
+ return;
+
+ ctx->qos_data.def_qos_parm.qos_flags = 0;
+
+ if (ctx->qos_data.qos_active)
+ ctx->qos_data.def_qos_parm.qos_flags |=
+ QOS_PARAM_FLG_UPDATE_EDCA_MSK;
+
+ if (ctx->ht.enabled)
+ ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
+
+ D_QOS("send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ ctx->qos_data.qos_active, ctx->qos_data.def_qos_parm.qos_flags);
+
+ il_send_cmd_pdu_async(il, ctx->qos_cmd, sizeof(struct il_qosparam_cmd),
+ &ctx->qos_data.def_qos_parm, NULL);
+}
+
+/**
+ * il_mac_config - mac80211 config callback
+ */
+int
+il_mac_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct il_priv *il = hw->priv;
+ const struct il_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = conf->channel;
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct il_rxon_context *ctx = &il->ctx;
+ unsigned long flags = 0;
+ int ret = 0;
+ u16 ch;
+ int scan_active = 0;
+ bool ht_changed = false;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&il->mutex);
+
+ D_MAC80211("enter to channel %d changed 0x%X\n", channel->hw_value,
+ changed);
+
+ if (unlikely(test_bit(S_SCANNING, &il->status))) {
+ scan_active = 1;
+ D_MAC80211("scan active\n");
+ }
+
+ if (changed &
+ (IEEE80211_CONF_CHANGE_SMPS | IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ il->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, &il->ctx);
+ }
+
+ /* during scanning mac80211 will delay channel setting until
+ * scan finish with changed = 0
+ */
+ if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
+
+ if (scan_active)
+ goto set_ch_out;
+
+ ch = channel->hw_value;
+ ch_info = il_get_channel_info(il, channel->band, ch);
+ if (!il_is_channel_valid(ch_info)) {
+ D_MAC80211("leave - invalid channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ if (il->iw_mode == NL80211_IFTYPE_ADHOC &&
+ !il_is_channel_ibss(ch_info)) {
+ D_MAC80211("leave - not IBSS channel\n");
+ ret = -EINVAL;
+ goto set_ch_out;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Configure HT40 channels */
+ if (ctx->ht.enabled != conf_is_ht(conf)) {
+ ctx->ht.enabled = conf_is_ht(conf);
+ ht_changed = true;
+ }
+ if (ctx->ht.enabled) {
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+ } else
+ ctx->ht.is_40mhz = false;
+
+ /*
+ * Default to no protection. Protection mode will
+ * later be set from BSS config in il_ht_conf
+ */
+ ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
+
+ /* if we are switching from ht to 2.4 clear flags
+ * from any ht related info since 2.4 does not
+ * support ht */
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ il_set_rxon_channel(il, channel, ctx);
+ il_set_rxon_ht(il, ht_conf);
+
+ il_set_flags_for_band(il, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (il->cfg->ops->legacy->update_bcast_stations)
+ ret = il->cfg->ops->legacy->update_bcast_stations(il);
+
+set_ch_out:
+ /* The list of supported rates and rate mask can be different
+ * for each band; since the band may have changed, reset
+ * the rate mask to what mac80211 lists */
+ il_set_rate(il);
+ }
+
+ if (changed & (IEEE80211_CONF_CHANGE_PS | IEEE80211_CONF_CHANGE_IDLE)) {
+ ret = il_power_update_mode(il, false);
+ if (ret)
+ D_MAC80211("Error setting sleep level\n");
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
+ D_MAC80211("TX Power old=%d new=%d\n", il->tx_power_user_lmt,
+ conf->power_level);
+
+ il_set_tx_power(il, conf->power_level, false);
+ }
+
+ if (!il_is_ready(il)) {
+ D_MAC80211("leave - not ready\n");
+ goto out;
+ }
+
+ if (scan_active)
+ goto out;
+
+ if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
+ il_commit_rxon(il, ctx);
+ else
+ D_INFO("Not re-sending same RXON configuration.\n");
+ if (ht_changed)
+ il_update_qos(il, ctx);
+
+out:
+ D_MAC80211("leave\n");
+ mutex_unlock(&il->mutex);
+ return ret;
+}
+EXPORT_SYMBOL(il_mac_config);
+
+void
+il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ mutex_lock(&il->mutex);
+ D_MAC80211("enter\n");
+
+ spin_lock_irqsave(&il->lock, flags);
+ memset(&il->current_ht_config, 0, sizeof(struct il_ht_config));
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* new association get rid of ibss beacon skb */
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = NULL;
+
+ il->timestamp = 0;
+
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ il_scan_cancel_timeout(il, 100);
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - not ready\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* we are restarting association process
+ * clear RXON_FILTER_ASSOC_MSK bit
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ il_commit_rxon(il, ctx);
+
+ il_set_rate(il);
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_reset_tsf);
+
+static void
+il_ht_conf(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_ht_config *ht_conf = &il->current_ht_config;
+ struct ieee80211_sta *sta;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ D_ASSOC("enter:\n");
+
+ if (!ctx->ht.enabled)
+ return;
+
+ ctx->ht.protection =
+ bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
+ ctx->ht.non_gf_sta_present =
+ !!(bss_conf->
+ ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+
+ ht_conf->single_chain_sufficient = false;
+
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, bss_conf->bssid);
+ if (sta) {
+ struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
+ int maxstreams;
+
+ maxstreams =
+ (ht_cap->mcs.
+ tx_params & IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
+ >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ maxstreams += 1;
+
+ if (ht_cap->mcs.rx_mask[1] == 0 &&
+ ht_cap->mcs.rx_mask[2] == 0)
+ ht_conf->single_chain_sufficient = true;
+ if (maxstreams <= 1)
+ ht_conf->single_chain_sufficient = true;
+ } else {
+ /*
+ * If at all, this can only happen through a race
+ * when the AP disconnects us while we're still
+ * setting up the connection, in that case mac80211
+ * will soon tell us about that.
+ */
+ ht_conf->single_chain_sufficient = true;
+ }
+ rcu_read_unlock();
+ break;
+ case NL80211_IFTYPE_ADHOC:
+ ht_conf->single_chain_sufficient = true;
+ break;
+ default:
+ break;
+ }
+
+ D_ASSOC("leave\n");
+}
+
+static inline void
+il_set_no_assoc(struct il_priv *il, struct ieee80211_vif *vif)
+{
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+
+ /*
+ * inform the ucode that there is no longer an
+ * association and that no more packets should be
+ * sent
+ */
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ ctx->staging.assoc_id = 0;
+ il_commit_rxon(il, ctx);
+}
+
+static void
+il_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
+{
+ struct il_priv *il = hw->priv;
+ unsigned long flags;
+ __le64 timestamp;
+ struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
+
+ if (!skb)
+ return;
+
+ D_MAC80211("enter\n");
+
+ lockdep_assert_held(&il->mutex);
+
+ if (!il->beacon_ctx) {
+ IL_ERR("update beacon but no beacon context!\n");
+ dev_kfree_skb(skb);
+ return;
+ }
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ if (il->beacon_skb)
+ dev_kfree_skb(il->beacon_skb);
+
+ il->beacon_skb = skb;
+
+ timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
+ il->timestamp = le64_to_cpu(timestamp);
+
+ D_MAC80211("leave\n");
+ spin_unlock_irqrestore(&il->lock, flags);
+
+ if (!il_is_ready_rf(il)) {
+ D_MAC80211("leave - RF not ready\n");
+ return;
+ }
+
+ il->cfg->ops->legacy->post_associate(il);
+}
+
+void
+il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes)
+{
+ struct il_priv *il = hw->priv;
+ struct il_rxon_context *ctx = il_rxon_ctx_from_vif(vif);
+ int ret;
+
+ if (WARN_ON(!il->cfg->ops->legacy))
+ return;
+
+ D_MAC80211("changes = 0x%X\n", changes);
+
+ mutex_lock(&il->mutex);
+
+ if (!il_is_alive(il)) {
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ if (changes & BSS_CHANGED_QOS) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&il->lock, flags);
+ ctx->qos_data.qos_active = bss_conf->qos;
+ il_update_qos(il, ctx);
+ spin_unlock_irqrestore(&il->lock, flags);
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ /*
+ * the add_interface code must make sure we only ever
+ * have a single interface that could be beaconing at
+ * any time.
+ */
+ if (vif->bss_conf.enable_beacon)
+ il->beacon_ctx = ctx;
+ else
+ il->beacon_ctx = NULL;
+ }
+
+ if (changes & BSS_CHANGED_BSSID) {
+ D_MAC80211("BSSID %pM\n", bss_conf->bssid);
+
+ /*
+ * If there is currently a HW scan going on in the
+ * background then we need to cancel it else the RXON
+ * below/in post_associate will fail.
+ */
+ if (il_scan_cancel_timeout(il, 100)) {
+ IL_WARN("Aborted scan still in progress after 100ms\n");
+ D_MAC80211("leaving - scan abort failed.\n");
+ mutex_unlock(&il->mutex);
+ return;
+ }
+
+ /* mac80211 only sets assoc when in STATION mode */
+ if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+
+ /* currently needed in a few places */
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ } else {
+ ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
+ }
+
+ }
+
+ /*
+ * This needs to be after setting the BSSID in case
+ * mac80211 decides to do both changes at once because
+ * it will invoke post_associate.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC && (changes & BSS_CHANGED_BEACON))
+ il_beacon_update(hw, vif);
+
+ if (changes & BSS_CHANGED_ERP_PREAMBLE) {
+ D_MAC80211("ERP_PREAMBLE %d\n", bss_conf->use_short_preamble);
+ if (bss_conf->use_short_preamble)
+ ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
+ }
+
+ if (changes & BSS_CHANGED_ERP_CTS_PROT) {
+ D_MAC80211("ERP_CTS %d\n", bss_conf->use_cts_prot);
+ if (bss_conf->use_cts_prot && il->band != IEEE80211_BAND_5GHZ)
+ ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
+ else
+ ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
+ if (bss_conf->use_cts_prot)
+ ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
+ else
+ ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
+ }
+
+ if (changes & BSS_CHANGED_BASIC_RATES) {
+ /* XXX use this information
+ *
+ * To do that, remove code from il_set_rate() and put something
+ * like this here:
+ *
+ if (A-band)
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates;
+ else
+ ctx->staging.ofdm_basic_rates =
+ bss_conf->basic_rates >> 4;
+ ctx->staging.cck_basic_rates =
+ bss_conf->basic_rates & 0xF;
+ */
+ }
+
+ if (changes & BSS_CHANGED_HT) {
+ il_ht_conf(il, vif);
+
+ if (il->cfg->ops->hcmd->set_rxon_chain)
+ il->cfg->ops->hcmd->set_rxon_chain(il, ctx);
+ }
+
+ if (changes & BSS_CHANGED_ASSOC) {
+ D_MAC80211("ASSOC %d\n", bss_conf->assoc);
+ if (bss_conf->assoc) {
+ il->timestamp = bss_conf->timestamp;
+
+ if (!il_is_rfkill(il))
+ il->cfg->ops->legacy->post_associate(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes && il_is_associated_ctx(ctx) && bss_conf->aid) {
+ D_MAC80211("Changes (%#x) while associated\n", changes);
+ ret = il_send_rxon_assoc(il, ctx);
+ if (!ret) {
+ /* Sync active_rxon with latest change. */
+ memcpy((void *)&ctx->active, &ctx->staging,
+ sizeof(struct il_rxon_cmd));
+ }
+ }
+
+ if (changes & BSS_CHANGED_BEACON_ENABLED) {
+ if (vif->bss_conf.enable_beacon) {
+ memcpy(ctx->staging.bssid_addr, bss_conf->bssid,
+ ETH_ALEN);
+ memcpy(il->bssid, bss_conf->bssid, ETH_ALEN);
+ il->cfg->ops->legacy->config_ap(il);
+ } else
+ il_set_no_assoc(il, vif);
+ }
+
+ if (changes & BSS_CHANGED_IBSS) {
+ ret =
+ il->cfg->ops->legacy->manage_ibss_station(il, vif,
+ bss_conf->
+ ibss_joined);
+ if (ret)
+ IL_ERR("failed to %s IBSS station %pM\n",
+ bss_conf->ibss_joined ? "add" : "remove",
+ bss_conf->bssid);
+ }
+
+ mutex_unlock(&il->mutex);
+
+ D_MAC80211("leave\n");
+}
+EXPORT_SYMBOL(il_mac_bss_info_changed);
+
+irqreturn_t
+il_isr(int irq, void *data)
+{
+ struct il_priv *il = data;
+ u32 inta, inta_mask;
+ u32 inta_fh;
+ unsigned long flags;
+ if (!il)
+ return IRQ_NONE;
+
+ spin_lock_irqsave(&il->lock, flags);
+
+ /* Disable (but don't clear!) interrupts here to avoid
+ * back-to-back ISRs and sporadic interrupts from our NIC.
+ * If we have something to service, the tasklet will re-enable ints.
+ * If we *don't* have something, we'll re-enable before leaving here. */
+ inta_mask = _il_rd(il, CSR_INT_MASK); /* just for debug */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* Discover which interrupts are active/pending */
+ inta = _il_rd(il, CSR_INT);
+ inta_fh = _il_rd(il, CSR_FH_INT_STATUS);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ if (!inta && !inta_fh) {
+ D_ISR("Ignore interrupt, inta == 0, inta_fh == 0\n");
+ goto none;
+ }
+
+ if (inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0) {
+ /* Hardware disappeared. It might have already raised
+ * an interrupt */
+ IL_WARN("HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ goto unplugged;
+ }
+
+ D_ISR("ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n", inta, inta_mask,
+ inta_fh);
+
+ inta &= ~CSR_INT_BIT_SCD;
+
+ /* il_irq_tasklet() will service interrupts and re-enable them */
+ if (likely(inta || inta_fh))
+ tasklet_schedule(&il->irq_tasklet);
+
+unplugged:
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_HANDLED;
+
+none:
+ /* re-enable interrupts here since we don't have anything to service. */
+ /* only Re-enable if disabled by irq */
+ if (test_bit(S_INT_ENABLED, &il->status))
+ il_enable_interrupts(il);
+ spin_unlock_irqrestore(&il->lock, flags);
+ return IRQ_NONE;
+}
+EXPORT_SYMBOL(il_isr);
+
+/*
+ * il_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
+ * function.
+ */
+void
+il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags)
+{
+ if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
+ *tx_flags |= TX_CMD_FLG_RTS_MSK;
+ *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+
+ if (!ieee80211_is_mgmt(fc))
+ return;
+
+ switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
+ case cpu_to_le16(IEEE80211_STYPE_AUTH):
+ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
+ case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
+ case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ break;
+ }
+ } else if (info->control.rates[0].
+ flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
+ *tx_flags |= TX_CMD_FLG_CTS_MSK;
+ *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
+ }
+}
+EXPORT_SYMBOL(il_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/common.h b/drivers/net/wireless/iwlegacy/common.h
new file mode 100644
index 0000000..abfa388
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/common.h
@@ -0,0 +1,3246 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#ifndef __il_core_h__
+#define __il_core_h__
+
+#include <linux/interrupt.h>
+#include <linux/pci.h> /* for struct pci_device_id */
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/wait.h>
+#include <linux/io.h>
+#include <net/mac80211.h>
+#include <net/ieee80211_radiotap.h>
+
+#include "commands.h"
+#include "csr.h"
+#include "prph.h"
+
+struct il_host_cmd;
+struct il_cmd;
+struct il_tx_queue;
+
+#define IL_ERR(f, a...) dev_err(&il->pci_dev->dev, f, ## a)
+#define IL_WARN(f, a...) dev_warn(&il->pci_dev->dev, f, ## a)
+#define IL_INFO(f, a...) dev_info(&il->pci_dev->dev, f, ## a)
+
+#define RX_QUEUE_SIZE 256
+#define RX_QUEUE_MASK 255
+#define RX_QUEUE_SIZE_LOG 8
+
+/*
+ * RX related structures and functions
+ */
+#define RX_FREE_BUFFERS 64
+#define RX_LOW_WATERMARK 8
+
+#define U32_PAD(n) ((4-(n))&0x3)
+
+/* CT-KILL constants */
+#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
+
+/* Default noise level to report when noise measurement is not available.
+ * This may be because we're:
+ * 1) Not associated (4965, no beacon stats being sent to driver)
+ * 2) Scanning (noise measurement does not apply to associated channel)
+ * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
+ * Use default noise value of -127 ... this is below the range of measurable
+ * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Also, -127 works better than 0 when averaging frames with/without
+ * noise info (e.g. averaging might be done in app); measured dBm values are
+ * always negative ... using a negative value as the default keeps all
+ * averages within an s8's (used in some apps) range of negative values. */
+#define IL_NOISE_MEAS_NOT_AVAILABLE (-127)
+
+/*
+ * RTS threshold here is total size [2347] minus 4 FCS bytes
+ * Per spec:
+ * a value of 0 means RTS on all data/management packets
+ * a value > max MSDU size means no RTS
+ * else RTS for data/management frames where MPDU is larger
+ * than RTS value.
+ */
+#define DEFAULT_RTS_THRESHOLD 2347U
+#define MIN_RTS_THRESHOLD 0U
+#define MAX_RTS_THRESHOLD 2347U
+#define MAX_MSDU_SIZE 2304U
+#define MAX_MPDU_SIZE 2346U
+#define DEFAULT_BEACON_INTERVAL 100U
+#define DEFAULT_SHORT_RETRY_LIMIT 7U
+#define DEFAULT_LONG_RETRY_LIMIT 4U
+
+struct il_rx_buf {
+ dma_addr_t page_dma;
+ struct page *page;
+ struct list_head list;
+};
+
+#define rxb_addr(r) page_address(r->page)
+
+/* defined below */
+struct il_device_cmd;
+
+struct il_cmd_meta {
+ /* only for SYNC commands, iff the reply skb is wanted */
+ struct il_host_cmd *source;
+ /*
+ * only for ASYNC commands
+ * (which is somewhat stupid -- look at common.c for instance
+ * which duplicates a bunch of code because the callback isn't
+ * invoked for SYNC commands, if it were and its result passed
+ * through it would be simpler...)
+ */
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+
+ /* The CMD_SIZE_HUGE flag bit indicates that the command
+ * structure is stored at the end of the shared queue memory. */
+ u32 flags;
+
+ DEFINE_DMA_UNMAP_ADDR(mapping);
+ DEFINE_DMA_UNMAP_LEN(len);
+};
+
+/*
+ * Generic queue structure
+ *
+ * Contains common data for Rx and Tx queues
+ */
+struct il_queue {
+ int n_bd; /* number of BDs in this queue */
+ int write_ptr; /* 1-st empty entry (idx) host_w */
+ int read_ptr; /* last used entry (idx) host_r */
+ /* use for monitoring and recovering the stuck queue */
+ dma_addr_t dma_addr; /* physical addr for BD's */
+ int n_win; /* safe queue win */
+ u32 id;
+ int low_mark; /* low watermark, resume queue if free
+ * space more than this */
+ int high_mark; /* high watermark, stop queue if free
+ * space less than this */
+};
+
+/* One for each TFD */
+struct il_tx_info {
+ struct sk_buff *skb;
+ struct il_rxon_context *ctx;
+};
+
+/**
+ * struct il_tx_queue - Tx Queue for DMA
+ * @q: generic Rx/Tx queue descriptor
+ * @bd: base of circular buffer of TFDs
+ * @cmd: array of command/TX buffer pointers
+ * @meta: array of meta data for each command/tx buffer
+ * @dma_addr_cmd: physical address of cmd/tx buffer array
+ * @txb: array of per-TFD driver data
+ * @time_stamp: time (in jiffies) of last read_ptr change
+ * @need_update: indicates need to update read/write idx
+ * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
+ *
+ * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
+ * descriptors) and required locking structures.
+ */
+#define TFD_TX_CMD_SLOTS 256
+#define TFD_CMD_SLOTS 32
+
+struct il_tx_queue {
+ struct il_queue q;
+ void *tfds;
+ struct il_device_cmd **cmd;
+ struct il_cmd_meta *meta;
+ struct il_tx_info *txb;
+ unsigned long time_stamp;
+ u8 need_update;
+ u8 sched_retry;
+ u8 active;
+ u8 swq_id;
+};
+
+/*
+ * EEPROM access time values:
+ *
+ * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
+ * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
+ * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
+ * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
+ */
+#define IL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
+
+#define IL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
+#define IL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+/*
+ * Regulatory channel usage flags in EEPROM struct il4965_eeprom_channel.flags.
+ *
+ * IBSS and/or AP operation is allowed *only* on those channels with
+ * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
+ * RADAR detection is not supported by the 4965 driver, but is a
+ * requirement for establishing a new network for legal operation on channels
+ * requiring RADAR detection or restricting ACTIVE scanning.
+ *
+ * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
+ * It only indicates that 20 MHz channel use is supported; HT40 channel
+ * usage is indicated by a separate set of regulatory flags for each
+ * HT40 channel pair.
+ *
+ * NOTE: Using a channel inappropriately will result in a uCode error!
+ */
+#define IL_NUM_TX_CALIB_GROUPS 5
+enum {
+ EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
+ EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
+ /* Bit 2 Reserved */
+ EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
+ EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
+ EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
+ /* Bit 6 Reserved (was Narrow Channel) */
+ EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
+};
+
+/* SKU Capabilities */
+/* 3945 only */
+#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
+#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
+
+/* *regulatory* channel data format in eeprom, one for each channel.
+ * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
+struct il_eeprom_channel {
+ u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
+ s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
+} __packed;
+
+/* 3945 Specific */
+#define EEPROM_3945_EEPROM_VERSION (0x2f)
+
+/* 4965 has two radio transmitters (and 3 radio receivers) */
+#define EEPROM_TX_POWER_TX_CHAINS (2)
+
+/* 4965 has room for up to 8 sets of txpower calibration data */
+#define EEPROM_TX_POWER_BANDS (8)
+
+/* 4965 factory calibration measures txpower gain settings for
+ * each of 3 target output levels */
+#define EEPROM_TX_POWER_MEASUREMENTS (3)
+
+/* 4965 Specific */
+/* 4965 driver does not work with txpower calibration version < 5 */
+#define EEPROM_4965_TX_POWER_VERSION (5)
+#define EEPROM_4965_EEPROM_VERSION (0x2f)
+#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
+#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
+#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
+#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
+
+/* 2.4 GHz */
+extern const u8 il_eeprom_band_1[14];
+
+/*
+ * factory calibration data for one txpower level, on one channel,
+ * measured on one of the 2 tx chains (radio transmitter and associated
+ * antenna). EEPROM contains:
+ *
+ * 1) Temperature (degrees Celsius) of device when measurement was made.
+ *
+ * 2) Gain table idx used to achieve the target measurement power.
+ * This refers to the "well-known" gain tables (see 4965.h).
+ *
+ * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
+ *
+ * 4) RF power amplifier detector level measurement (not used).
+ */
+struct il_eeprom_calib_measure {
+ u8 temperature; /* Device temperature (Celsius) */
+ u8 gain_idx; /* Index into gain table */
+ u8 actual_pow; /* Measured RF output power, half-dBm */
+ s8 pa_det; /* Power amp detector level (not used) */
+} __packed;
+
+/*
+ * measurement set for one channel. EEPROM contains:
+ *
+ * 1) Channel number measured
+ *
+ * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
+ * (a.k.a. "tx chains") (6 measurements altogether)
+ */
+struct il_eeprom_calib_ch_info {
+ u8 ch_num;
+ struct il_eeprom_calib_measure
+ measurements[EEPROM_TX_POWER_TX_CHAINS]
+ [EEPROM_TX_POWER_MEASUREMENTS];
+} __packed;
+
+/*
+ * txpower subband info.
+ *
+ * For each frequency subband, EEPROM contains the following:
+ *
+ * 1) First and last channels within range of the subband. "0" values
+ * indicate that this sample set is not being used.
+ *
+ * 2) Sample measurement sets for 2 channels close to the range endpoints.
+ */
+struct il_eeprom_calib_subband_info {
+ u8 ch_from; /* channel number of lowest channel in subband */
+ u8 ch_to; /* channel number of highest channel in subband */
+ struct il_eeprom_calib_ch_info ch1;
+ struct il_eeprom_calib_ch_info ch2;
+} __packed;
+
+/*
+ * txpower calibration info. EEPROM contains:
+ *
+ * 1) Factory-measured saturation power levels (maximum levels at which
+ * tx power amplifier can output a signal without too much distortion).
+ * There is one level for 2.4 GHz band and one for 5 GHz band. These
+ * values apply to all channels within each of the bands.
+ *
+ * 2) Factory-measured power supply voltage level. This is assumed to be
+ * constant (i.e. same value applies to all channels/bands) while the
+ * factory measurements are being made.
+ *
+ * 3) Up to 8 sets of factory-measured txpower calibration values.
+ * These are for different frequency ranges, since txpower gain
+ * characteristics of the analog radio circuitry vary with frequency.
+ *
+ * Not all sets need to be filled with data;
+ * struct il_eeprom_calib_subband_info contains range of channels
+ * (0 if unused) for each set of data.
+ */
+struct il_eeprom_calib_info {
+ u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
+ u8 saturation_power52; /* half-dBm */
+ __le16 voltage; /* signed */
+ struct il_eeprom_calib_subband_info band_info[EEPROM_TX_POWER_BANDS];
+} __packed;
+
+/* General */
+#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
+#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
+#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
+#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
+#define EEPROM_VERSION (2*0x44) /* 2 bytes */
+#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
+#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
+#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
+#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
+#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
+
+/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
+#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
+#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
+#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
+#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
+#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
+#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+
+#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
+#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
+
+/*
+ * Per-channel regulatory data.
+ *
+ * Each channel that *might* be supported by iwl has a fixed location
+ * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
+ * txpower (MSB).
+ *
+ * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
+ * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
+ *
+ * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ */
+#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
+#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
+
+/*
+ * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
+ * 5.0 GHz channels 7, 8, 11, 12, 16
+ * (4915-5080MHz) (none of these is ever supported)
+ */
+#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
+
+/*
+ * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
+ * (5170-5320MHz)
+ */
+#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
+
+/*
+ * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
+ * (5500-5700MHz)
+ */
+#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
+
+/*
+ * 5.7 GHz channels 145, 149, 153, 157, 161, 165
+ * (5725-5825MHz)
+ */
+#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
+#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
+
+/*
+ * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
+ *
+ * The channel listed is the center of the lower 20 MHz half of the channel.
+ * The overall center frequency is actually 2 channels (10 MHz) above that,
+ * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
+ * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
+ * and the overall HT40 channel width centers on channel 3.
+ *
+ * NOTE: The RXON command uses 20 MHz channel numbers to specify the
+ * control channel to which to tune. RXON also specifies whether the
+ * control channel is the upper or lower half of a HT40 channel.
+ *
+ * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
+ */
+#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
+
+/*
+ * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
+ * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
+ */
+#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
+
+#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
+
+struct il_eeprom_ops {
+ const u32 regulatory_bands[7];
+ int (*acquire_semaphore) (struct il_priv *il);
+ void (*release_semaphore) (struct il_priv *il);
+};
+
+int il_eeprom_init(struct il_priv *il);
+void il_eeprom_free(struct il_priv *il);
+const u8 *il_eeprom_query_addr(const struct il_priv *il, size_t offset);
+u16 il_eeprom_query16(const struct il_priv *il, size_t offset);
+int il_init_channel_map(struct il_priv *il);
+void il_free_channel_map(struct il_priv *il);
+const struct il_channel_info *il_get_channel_info(const struct il_priv *il,
+ enum ieee80211_band band,
+ u16 channel);
+
+#define IL_NUM_SCAN_RATES (2)
+
+struct il4965_channel_tgd_info {
+ u8 type;
+ s8 max_power;
+};
+
+struct il4965_channel_tgh_info {
+ s64 last_radar_time;
+};
+
+#define IL4965_MAX_RATE (33)
+
+struct il3945_clip_group {
+ /* maximum power level to prevent clipping for each rate, derived by
+ * us from this band's saturation power in EEPROM */
+ const s8 clip_powers[IL_MAX_RATES];
+};
+
+/* current Tx power values to use, one for each rate for each channel.
+ * requested power is limited by:
+ * -- regulatory EEPROM limits for this channel
+ * -- hardware capabilities (clip-powers)
+ * -- spectrum management
+ * -- user preference (e.g. iwconfig)
+ * when requested power is set, base power idx must also be set. */
+struct il3945_channel_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 base_power_idx; /* gain idx for power at factory temp. */
+ s8 requested_power; /* power (dBm) requested for this chnl/rate */
+};
+
+/* current scan Tx power values to use, one for each scan rate for each
+ * channel. */
+struct il3945_scan_power_info {
+ struct il3945_tx_power tpc; /* actual radio and DSP gain settings */
+ s8 power_table_idx; /* actual (compenst'd) idx into gain table */
+ s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
+};
+
+/*
+ * One for each channel, holds all channel setup data
+ * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
+ * with one another!
+ */
+struct il_channel_info {
+ struct il4965_channel_tgd_info tgd;
+ struct il4965_channel_tgh_info tgh;
+ struct il_eeprom_channel eeprom; /* EEPROM regulatory limit */
+ struct il_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
+ * HT40 channel */
+
+ u8 channel; /* channel number */
+ u8 flags; /* flags copied from EEPROM */
+ s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
+ s8 min_power; /* always 0 */
+ s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
+
+ u8 group_idx; /* 0-4, maps channel to group1/2/3/4/5 */
+ u8 band_idx; /* 0-4, maps channel to band1/2/3/4/5 */
+ enum ieee80211_band band;
+
+ /* HT40 channel info */
+ s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
+ u8 ht40_flags; /* flags copied from EEPROM */
+ u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
+
+ /* Radio/DSP gain settings for each "normal" data Tx rate.
+ * These include, in addition to RF and DSP gain, a few fields for
+ * remembering/modifying gain settings (idxes). */
+ struct il3945_channel_power_info power_info[IL4965_MAX_RATE];
+
+ /* Radio/DSP gain settings for each scan rate, for directed scans. */
+ struct il3945_scan_power_info scan_pwr_info[IL_NUM_SCAN_RATES];
+};
+
+#define IL_TX_FIFO_BK 0 /* shared */
+#define IL_TX_FIFO_BE 1
+#define IL_TX_FIFO_VI 2 /* shared */
+#define IL_TX_FIFO_VO 3
+#define IL_TX_FIFO_UNUSED -1
+
+/* Minimum number of queues. MAX_NUM is defined in hw specific files.
+ * Set the minimum to accommodate the 4 standard TX queues, 1 command
+ * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
+#define IL_MIN_NUM_QUEUES 10
+
+#define IL_DEFAULT_CMD_QUEUE_NUM 4
+
+#define IEEE80211_DATA_LEN 2304
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+struct il_frame {
+ union {
+ struct ieee80211_hdr frame;
+ struct il_tx_beacon_cmd beacon;
+ u8 raw[IEEE80211_FRAME_LEN];
+ u8 cmd[360];
+ } u;
+ struct list_head list;
+};
+
+#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
+#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
+#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
+
+enum {
+ CMD_SYNC = 0,
+ CMD_SIZE_NORMAL = 0,
+ CMD_NO_SKB = 0,
+ CMD_SIZE_HUGE = (1 << 0),
+ CMD_ASYNC = (1 << 1),
+ CMD_WANT_SKB = (1 << 2),
+ CMD_MAPPED = (1 << 3),
+};
+
+#define DEF_CMD_PAYLOAD_SIZE 320
+
+/**
+ * struct il_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for a scan command
+ * (which is relatively huge; space is allocated separately).
+ */
+struct il_device_cmd {
+ struct il_cmd_header hdr; /* uCode API */
+ union {
+ u32 flags;
+ u8 val8;
+ u16 val16;
+ u32 val32;
+ struct il_tx_cmd tx;
+ u8 payload[DEF_CMD_PAYLOAD_SIZE];
+ } __packed cmd;
+} __packed;
+
+#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct il_device_cmd))
+
+struct il_host_cmd {
+ const void *data;
+ unsigned long reply_page;
+ void (*callback) (struct il_priv *il, struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt);
+ u32 flags;
+ u16 len;
+ u8 id;
+};
+
+#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
+#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
+#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
+
+/**
+ * struct il_rx_queue - Rx queue
+ * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
+ * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
+ * @read: Shared idx to newest available Rx buffer
+ * @write: Shared idx to oldest written Rx packet
+ * @free_count: Number of pre-allocated buffers in rx_free
+ * @rx_free: list of free SKBs for use
+ * @rx_used: List of Rx buffers with no SKB
+ * @need_update: flag to indicate we need to update read/write idx
+ * @rb_stts: driver's pointer to receive buffer status
+ * @rb_stts_dma: bus address of receive buffer status
+ *
+ * NOTE: rx_free and rx_used are used as a FIFO for il_rx_bufs
+ */
+struct il_rx_queue {
+ __le32 *bd;
+ dma_addr_t bd_dma;
+ struct il_rx_buf pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
+ struct il_rx_buf *queue[RX_QUEUE_SIZE];
+ u32 read;
+ u32 write;
+ u32 free_count;
+ u32 write_actual;
+ struct list_head rx_free;
+ struct list_head rx_used;
+ int need_update;
+ struct il_rb_status *rb_stts;
+ dma_addr_t rb_stts_dma;
+ spinlock_t lock;
+};
+
+#define IL_SUPPORTED_RATES_IE_LEN 8
+
+#define MAX_TID_COUNT 9
+
+#define IL_INVALID_RATE 0xFF
+#define IL_INVALID_VALUE -1
+
+/**
+ * struct il_ht_agg -- aggregation status while waiting for block-ack
+ * @txq_id: Tx queue used for Tx attempt
+ * @frame_count: # frames attempted by Tx command
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx win
+ * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx win
+ * @bitmap1: High order, one bit for each frame pending ACK in Tx win
+ * @rate_n_flags: Rate at which Tx was attempted
+ *
+ * If C_TX indicates that aggregation was attempted, driver must wait
+ * for block ack (N_COMPRESSED_BA). This struct stores tx reply info
+ * until block ack arrives.
+ */
+struct il_ht_agg {
+ u16 txq_id;
+ u16 frame_count;
+ u16 wait_for_ba;
+ u16 start_idx;
+ u64 bitmap;
+ u32 rate_n_flags;
+#define IL_AGG_OFF 0
+#define IL_AGG_ON 1
+#define IL_EMPTYING_HW_QUEUE_ADDBA 2
+#define IL_EMPTYING_HW_QUEUE_DELBA 3
+ u8 state;
+};
+
+struct il_tid_data {
+ u16 seq_number; /* 4965 only */
+ u16 tfds_in_queue;
+ struct il_ht_agg agg;
+};
+
+struct il_hw_key {
+ u32 cipher;
+ int keylen;
+ u8 keyidx;
+ u8 key[32];
+};
+
+union il_ht_rate_supp {
+ u16 rates;
+ struct {
+ u8 siso_rate;
+ u8 mimo_rate;
+ };
+};
+
+#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
+#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
+#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
+#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
+#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
+#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
+
+/*
+ * Maximal MPDU density for TX aggregation
+ * 4 - 2us density
+ * 5 - 4us density
+ * 6 - 8us density
+ * 7 - 16us density
+ */
+#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
+#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
+#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
+#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
+#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
+#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
+#define CFG_HT_MPDU_DENSITY_MIN (0x1)
+
+struct il_ht_config {
+ bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
+};
+
+/* QoS structures */
+struct il_qos_info {
+ int qos_active;
+ struct il_qosparam_cmd def_qos_parm;
+};
+
+/*
+ * Structure should be accessed with sta_lock held. When station addition
+ * is in progress (IL_STA_UCODE_INPROGRESS) it is possible to access only
+ * the commands (il_addsta_cmd and il_link_quality_cmd) without
+ * sta_lock held.
+ */
+struct il_station_entry {
+ struct il_addsta_cmd sta;
+ struct il_tid_data tid[MAX_TID_COUNT];
+ u8 used, ctxid;
+ struct il_hw_key keyinfo;
+ struct il_link_quality_cmd *lq;
+};
+
+struct il_station_priv_common {
+ struct il_rxon_context *ctx;
+ u8 sta_id;
+};
+
+/**
+ * struct il_vif_priv - driver's ilate per-interface information
+ *
+ * When mac80211 allocates a virtual interface, it can allocate
+ * space for us to put data into.
+ */
+struct il_vif_priv {
+ struct il_rxon_context *ctx;
+ u8 ibss_bssid_sta_id;
+};
+
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ void *v_addr; /* access by driver */
+ dma_addr_t p_addr; /* access by card's busmaster DMA */
+ u32 len; /* bytes */
+};
+
+/* uCode file layout */
+struct il_ucode_header {
+ __le32 ver; /* major/minor/API/serial */
+ struct {
+ __le32 inst_size; /* bytes of runtime code */
+ __le32 data_size; /* bytes of runtime data */
+ __le32 init_size; /* bytes of init code */
+ __le32 init_data_size; /* bytes of init data */
+ __le32 boot_size; /* bytes of bootstrap code */
+ u8 data[0]; /* in same order as sizes */
+ } v1;
+};
+
+struct il4965_ibss_seq {
+ u8 mac[ETH_ALEN];
+ u16 seq_num;
+ u16 frag_num;
+ unsigned long packet_time;
+ struct list_head list;
+};
+
+struct il_sensitivity_ranges {
+ u16 min_nrg_cck;
+ u16 max_nrg_cck;
+
+ u16 nrg_th_cck;
+ u16 nrg_th_ofdm;
+
+ u16 auto_corr_min_ofdm;
+ u16 auto_corr_min_ofdm_mrc;
+ u16 auto_corr_min_ofdm_x1;
+ u16 auto_corr_min_ofdm_mrc_x1;
+
+ u16 auto_corr_max_ofdm;
+ u16 auto_corr_max_ofdm_mrc;
+ u16 auto_corr_max_ofdm_x1;
+ u16 auto_corr_max_ofdm_mrc_x1;
+
+ u16 auto_corr_max_cck;
+ u16 auto_corr_max_cck_mrc;
+ u16 auto_corr_min_cck;
+ u16 auto_corr_min_cck_mrc;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+#define KELVIN_TO_CELSIUS(x) ((x)-273)
+#define CELSIUS_TO_KELVIN(x) ((x)+273)
+
+/**
+ * struct il_hw_params
+ * @max_txq_num: Max # Tx queues supported
+ * @dma_chnl_num: Number of Tx DMA/FIFO channels
+ * @scd_bc_tbls_size: size of scheduler byte count tables
+ * @tfd_size: TFD size
+ * @tx/rx_chains_num: Number of TX/RX chains
+ * @valid_tx/rx_ant: usable antennas
+ * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
+ * @max_rxq_log: Log-base-2 of max_rxq_size
+ * @rx_page_order: Rx buffer page order
+ * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
+ * @max_stations:
+ * @ht40_channel: is 40MHz width possible in band 2.4
+ * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
+ * @sw_crypto: 0 for hw, 1 for sw
+ * @max_xxx_size: for ucode uses
+ * @ct_kill_threshold: temperature threshold
+ * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
+ * @struct il_sensitivity_ranges: range of sensitivity values
+ */
+struct il_hw_params {
+ u8 max_txq_num;
+ u8 dma_chnl_num;
+ u16 scd_bc_tbls_size;
+ u32 tfd_size;
+ u8 tx_chains_num;
+ u8 rx_chains_num;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 max_rxq_size;
+ u16 max_rxq_log;
+ u32 rx_page_order;
+ u32 rx_wrt_ptr_reg;
+ u8 max_stations;
+ u8 ht40_channel;
+ u8 max_beacon_itrvl; /* in 1024 ms */
+ u32 max_inst_size;
+ u32 max_data_size;
+ u32 max_bsm_size;
+ u32 ct_kill_threshold; /* value in hw-dependent units */
+ u16 beacon_time_tsf_bits;
+ const struct il_sensitivity_ranges *sens;
+};
+
+/******************************************************************************
+ *
+ * Functions implemented in core module which are forward declared here
+ * for use by iwl-[4-5].c
+ *
+ * NOTE: The implementation of these functions are not hardware specific
+ * which is why they are in the core module files.
+ *
+ * Naming convention --
+ * il_ <-- Is part of iwlwifi
+ * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
+ * il4965_bg_ <-- Called from work queue context
+ * il4965_mac_ <-- mac80211 callback
+ *
+ ****************************************************************************/
+extern void il4965_update_chain_flags(struct il_priv *il);
+extern const u8 il_bcast_addr[ETH_ALEN];
+extern int il_queue_space(const struct il_queue *q);
+static inline int
+il_queue_used(const struct il_queue *q, int i)
+{
+ return q->write_ptr >= q->read_ptr ? (i >= q->read_ptr &&
+ i < q->write_ptr) : !(i <
+ q->read_ptr
+ && i >=
+ q->
+ write_ptr);
+}
+
+static inline u8
+il_get_cmd_idx(struct il_queue *q, u32 idx, int is_huge)
+{
+ /*
+ * This is for init calibration result and scan command which
+ * required buffer > TFD_MAX_PAYLOAD_SIZE,
+ * the big buffer at end of command array
+ */
+ if (is_huge)
+ return q->n_win; /* must be power of 2 */
+
+ /* Otherwise, use normal size buffers */
+ return idx & (q->n_win - 1);
+}
+
+struct il_dma_ptr {
+ dma_addr_t dma;
+ void *addr;
+ size_t size;
+};
+
+#define IL_OPERATION_MODE_AUTO 0
+#define IL_OPERATION_MODE_HT_ONLY 1
+#define IL_OPERATION_MODE_MIXED 2
+#define IL_OPERATION_MODE_20MHZ 3
+
+#define IL_TX_CRC_SIZE 4
+#define IL_TX_DELIMITER_SIZE 4
+
+#define TX_POWER_IL_ILLEGAL_VOLTAGE -10000
+
+/* Sensitivity and chain noise calibration */
+#define INITIALIZATION_VALUE 0xFFFF
+#define IL4965_CAL_NUM_BEACONS 20
+#define IL_CAL_NUM_BEACONS 16
+#define MAXIMUM_ALLOWED_PATHLOSS 15
+
+#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
+
+#define MAX_FA_OFDM 50
+#define MIN_FA_OFDM 5
+#define MAX_FA_CCK 50
+#define MIN_FA_CCK 5
+
+#define AUTO_CORR_STEP_OFDM 1
+
+#define AUTO_CORR_STEP_CCK 3
+#define AUTO_CORR_MAX_TH_CCK 160
+
+#define NRG_DIFF 2
+#define NRG_STEP_CCK 2
+#define NRG_MARGIN 8
+#define MAX_NUMBER_CCK_NO_FA 100
+
+#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
+
+#define CHAIN_A 0
+#define CHAIN_B 1
+#define CHAIN_C 2
+#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
+#define ALL_BAND_FILTER 0xFF00
+#define IN_BAND_FILTER 0xFF
+#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
+
+#define NRG_NUM_PREV_STAT_L 20
+#define NUM_RX_CHAINS 3
+
+enum il4965_false_alarm_state {
+ IL_FA_TOO_MANY = 0,
+ IL_FA_TOO_FEW = 1,
+ IL_FA_GOOD_RANGE = 2,
+};
+
+enum il4965_chain_noise_state {
+ IL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
+ IL_CHAIN_NOISE_ACCUMULATE,
+ IL_CHAIN_NOISE_CALIBRATED,
+ IL_CHAIN_NOISE_DONE,
+};
+
+enum il4965_calib_enabled_state {
+ IL_CALIB_DISABLED = 0, /* must be 0 */
+ IL_CALIB_ENABLED = 1,
+};
+
+/*
+ * enum il_calib
+ * defines the order in which results of initial calibrations
+ * should be sent to the runtime uCode
+ */
+enum il_calib {
+ IL_CALIB_MAX,
+};
+
+/* Opaque calibration results */
+struct il_calib_result {
+ void *buf;
+ size_t buf_len;
+};
+
+enum ucode_type {
+ UCODE_NONE = 0,
+ UCODE_INIT,
+ UCODE_RT
+};
+
+/* Sensitivity calib data */
+struct il_sensitivity_data {
+ u32 auto_corr_ofdm;
+ u32 auto_corr_ofdm_mrc;
+ u32 auto_corr_ofdm_x1;
+ u32 auto_corr_ofdm_mrc_x1;
+ u32 auto_corr_cck;
+ u32 auto_corr_cck_mrc;
+
+ u32 last_bad_plcp_cnt_ofdm;
+ u32 last_fa_cnt_ofdm;
+ u32 last_bad_plcp_cnt_cck;
+ u32 last_fa_cnt_cck;
+
+ u32 nrg_curr_state;
+ u32 nrg_prev_state;
+ u32 nrg_value[10];
+ u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
+ u32 nrg_silence_ref;
+ u32 nrg_energy_idx;
+ u32 nrg_silence_idx;
+ u32 nrg_th_cck;
+ s32 nrg_auto_corr_silence_diff;
+ u32 num_in_cck_no_fa;
+ u32 nrg_th_ofdm;
+
+ u16 barker_corr_th_min;
+ u16 barker_corr_th_min_mrc;
+ u16 nrg_th_cca;
+};
+
+/* Chain noise (differential Rx gain) calib data */
+struct il_chain_noise_data {
+ u32 active_chains;
+ u32 chain_noise_a;
+ u32 chain_noise_b;
+ u32 chain_noise_c;
+ u32 chain_signal_a;
+ u32 chain_signal_b;
+ u32 chain_signal_c;
+ u16 beacon_count;
+ u8 disconn_array[NUM_RX_CHAINS];
+ u8 delta_gain_code[NUM_RX_CHAINS];
+ u8 radio_write;
+ u8 state;
+};
+
+#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
+#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
+
+#define IL_TRAFFIC_ENTRIES (256)
+#define IL_TRAFFIC_ENTRY_SIZE (64)
+
+enum {
+ MEASUREMENT_READY = (1 << 0),
+ MEASUREMENT_ACTIVE = (1 << 1),
+};
+
+/* interrupt stats */
+struct isr_stats {
+ u32 hw;
+ u32 sw;
+ u32 err_code;
+ u32 sch;
+ u32 alive;
+ u32 rfkill;
+ u32 ctkill;
+ u32 wakeup;
+ u32 rx;
+ u32 handlers[IL_CN_MAX];
+ u32 tx;
+ u32 unhandled;
+};
+
+/* management stats */
+enum il_mgmt_stats {
+ MANAGEMENT_ASSOC_REQ = 0,
+ MANAGEMENT_ASSOC_RESP,
+ MANAGEMENT_REASSOC_REQ,
+ MANAGEMENT_REASSOC_RESP,
+ MANAGEMENT_PROBE_REQ,
+ MANAGEMENT_PROBE_RESP,
+ MANAGEMENT_BEACON,
+ MANAGEMENT_ATIM,
+ MANAGEMENT_DISASSOC,
+ MANAGEMENT_AUTH,
+ MANAGEMENT_DEAUTH,
+ MANAGEMENT_ACTION,
+ MANAGEMENT_MAX,
+};
+/* control stats */
+enum il_ctrl_stats {
+ CONTROL_BACK_REQ = 0,
+ CONTROL_BACK,
+ CONTROL_PSPOLL,
+ CONTROL_RTS,
+ CONTROL_CTS,
+ CONTROL_ACK,
+ CONTROL_CFEND,
+ CONTROL_CFENDACK,
+ CONTROL_MAX,
+};
+
+struct traffic_stats {
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ u32 mgmt[MANAGEMENT_MAX];
+ u32 ctrl[CONTROL_MAX];
+ u32 data_cnt;
+ u64 data_bytes;
+#endif
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+#define IL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+/* TX queue watchdog timeouts in mSecs */
+#define IL_DEF_WD_TIMEOUT (2000)
+#define IL_LONG_WD_TIMEOUT (10000)
+#define IL_MAX_WD_TIMEOUT (120000)
+
+struct il_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
+/* extend beacon time format bit shifting */
+/*
+ * for _3945 devices
+ * bits 31:24 - extended
+ * bits 23:0 - interval
+ */
+#define IL3945_EXT_BEACON_TIME_POS 24
+/*
+ * for _4965 devices
+ * bits 31:22 - extended
+ * bits 21:0 - interval
+ */
+#define IL4965_EXT_BEACON_TIME_POS 22
+
+struct il_rxon_context {
+ struct ieee80211_vif *vif;
+
+ const u8 *ac_to_fifo;
+ const u8 *ac_to_queue;
+ u8 mcast_queue;
+
+ /*
+ * We could use the vif to indicate active, but we
+ * also need it to be active during disabling when
+ * we already removed the vif for type setting.
+ */
+ bool always_active, is_active;
+
+ bool ht_need_multiple_chains;
+
+ int ctxid;
+
+ u32 interface_modes, exclusive_interface_modes;
+ u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
+
+ /*
+ * We declare this const so it can only be
+ * changed via explicit cast within the
+ * routines that actually update the physical
+ * hardware.
+ */
+ const struct il_rxon_cmd active;
+ struct il_rxon_cmd staging;
+
+ struct il_rxon_time_cmd timing;
+
+ struct il_qos_info qos_data;
+
+ u8 bcast_sta_id, ap_sta_id;
+
+ u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
+ u8 qos_cmd;
+ u8 wep_key_cmd;
+
+ struct il_wep_key wep_keys[WEP_KEYS_MAX];
+ u8 key_mapping_keys;
+
+ __le32 station_flags;
+
+ struct {
+ bool non_gf_sta_present;
+ u8 protection;
+ bool enabled, is_40mhz;
+ u8 extension_chan_offset;
+ } ht;
+};
+
+struct il_power_mgr {
+ struct il_powertable_cmd sleep_cmd;
+ struct il_powertable_cmd sleep_cmd_next;
+ int debug_sleep_level_override;
+ bool pci_pm;
+};
+
+struct il_priv {
+
+ /* ieee device used by generic ieee processing code */
+ struct ieee80211_hw *hw;
+ struct ieee80211_channel *ieee_channels;
+ struct ieee80211_rate *ieee_rates;
+ struct il_cfg *cfg;
+
+ /* temporary frame storage list */
+ struct list_head free_frames;
+ int frames_count;
+
+ enum ieee80211_band band;
+ int alloc_rxb_page;
+
+ void (*handlers[IL_CN_MAX]) (struct il_priv *il,
+ struct il_rx_buf *rxb);
+
+ struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
+
+ /* spectrum measurement report caching */
+ struct il_spectrum_notification measure_report;
+ u8 measurement_status;
+
+ /* ucode beacon time */
+ u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* track IBSS manager (last beacon) status */
+ u32 ibss_manager;
+
+ /* force reset */
+ struct il_force_reset force_reset;
+
+ /* we allocate array of il_channel_info for NIC's valid channels.
+ * Access via channel # using indirect idx array */
+ struct il_channel_info *channel_info; /* channel info array */
+ u8 channel_count; /* # of channels */
+
+ /* thermal calibration */
+ s32 temperature; /* degrees Kelvin */
+ s32 last_temperature;
+
+ /* init calibration results */
+ struct il_calib_result calib_results[IL_CALIB_MAX];
+
+ /* Scan related variables */
+ unsigned long scan_start;
+ unsigned long scan_start_tsf;
+ void *scan_cmd;
+ enum ieee80211_band scan_band;
+ struct cfg80211_scan_request *scan_request;
+ struct ieee80211_vif *scan_vif;
+ u8 scan_tx_ant[IEEE80211_NUM_BANDS];
+ u8 mgmt_tx_ant;
+
+ /* spinlock */
+ spinlock_t lock; /* protect general shared data */
+ spinlock_t hcmd_lock; /* protect hcmd */
+ spinlock_t reg_lock; /* protect hw register access */
+ struct mutex mutex;
+
+ /* basic pci-network driver stuff */
+ struct pci_dev *pci_dev;
+
+ /* pci hardware address support */
+ void __iomem *hw_base;
+ u32 hw_rev;
+ u32 hw_wa_rev;
+ u8 rev_id;
+
+ /* command queue number */
+ u8 cmd_queue;
+
+ /* max number of station keys */
+ u8 sta_key_max_num;
+
+ /* EEPROM MAC addresses */
+ struct mac_address addresses[1];
+
+ /* uCode images, save to reload in case of failure */
+ int fw_idx; /* firmware we're trying to load */
+ u32 ucode_ver; /* version of ucode, copy of
+ il_ucode.ver */
+ struct fw_desc ucode_code; /* runtime inst */
+ struct fw_desc ucode_data; /* runtime data original */
+ struct fw_desc ucode_data_backup; /* runtime data save/restore */
+ struct fw_desc ucode_init; /* initialization inst */
+ struct fw_desc ucode_init_data; /* initialization data */
+ struct fw_desc ucode_boot; /* bootstrap inst */
+ enum ucode_type ucode_type;
+ u8 ucode_write_complete; /* the image write is complete */
+ char firmware_name[25];
+
+ struct il_rxon_context ctx;
+
+ __le16 switch_channel;
+
+ /* 1st responses from initialize and runtime uCode images.
+ * _4965's initialize alive response contains some calibration data. */
+ struct il_init_alive_resp card_alive_init;
+ struct il_alive_resp card_alive;
+
+ u16 active_rate;
+
+ u8 start_calib;
+ struct il_sensitivity_data sensitivity_data;
+ struct il_chain_noise_data chain_noise_data;
+ __le16 sensitivity_tbl[HD_TBL_SIZE];
+
+ struct il_ht_config current_ht_config;
+
+ /* Rate scaling data */
+ u8 retry_rate;
+
+ wait_queue_head_t wait_command_queue;
+
+ int activity_timer_active;
+
+ /* Rx and Tx DMA processing queues */
+ struct il_rx_queue rxq;
+ struct il_tx_queue *txq;
+ unsigned long txq_ctx_active_msk;
+ struct il_dma_ptr kw; /* keep warm address */
+ struct il_dma_ptr scd_bc_tbls;
+
+ u32 scd_base_addr; /* scheduler sram base address */
+
+ unsigned long status;
+
+ /* counts mgmt, ctl, and data packets */
+ struct traffic_stats tx_stats;
+ struct traffic_stats rx_stats;
+
+ /* counts interrupts */
+ struct isr_stats isr_stats;
+
+ struct il_power_mgr power_data;
+
+ /* context information */
+ u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
+
+ /* station table variables */
+
+ /* Note: if lock and sta_lock are needed, lock must be acquired first */
+ spinlock_t sta_lock;
+ int num_stations;
+ struct il_station_entry stations[IL_STATION_COUNT];
+ unsigned long ucode_key_table;
+
+ /* queue refcounts */
+#define IL_MAX_HW_QUEUES 32
+ unsigned long queue_stopped[BITS_TO_LONGS(IL_MAX_HW_QUEUES)];
+ /* for each AC */
+ atomic_t queue_stop_count[4];
+
+ /* Indication if ieee80211_ops->open has been called */
+ u8 is_open;
+
+ u8 mac80211_registered;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+ struct il_eeprom_calib_info *calib_info;
+
+ enum nl80211_iftype iw_mode;
+
+ /* Last Rx'd beacon timestamp */
+ u64 timestamp;
+
+ union {
+#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
+ struct {
+ void *shared_virt;
+ dma_addr_t shared_phys;
+
+ struct delayed_work thermal_periodic;
+ struct delayed_work rfkill_poll;
+
+ struct il3945_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il3945_notif_stats accum_stats;
+ struct il3945_notif_stats delta_stats;
+ struct il3945_notif_stats max_delta;
+#endif
+
+ u32 sta_supp_rates;
+ int last_rx_rssi; /* From Rx packet stats */
+
+ /* Rx'd packet timing information */
+ u32 last_beacon_time;
+ u64 last_tsf;
+
+ /*
+ * each calibration channel group in the
+ * EEPROM has a derived clip setting for
+ * each rate.
+ */
+ const struct il3945_clip_group clip_groups[5];
+
+ } _3945;
+#endif
+#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
+ struct {
+ struct il_rx_phy_res last_phy_res;
+ bool last_phy_res_valid;
+
+ struct completion firmware_loading_complete;
+
+ /*
+ * chain noise reset and gain commands are the
+ * two extra calibration commands follows the standard
+ * phy calibration commands
+ */
+ u8 phy_calib_chain_noise_reset_cmd;
+ u8 phy_calib_chain_noise_gain_cmd;
+
+ struct il_notif_stats stats;
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_notif_stats accum_stats;
+ struct il_notif_stats delta_stats;
+ struct il_notif_stats max_delta;
+#endif
+
+ } _4965;
+#endif
+ };
+
+ struct il_hw_params hw_params;
+
+ u32 inta_mask;
+
+ struct workqueue_struct *workqueue;
+
+ struct work_struct restart;
+ struct work_struct scan_completed;
+ struct work_struct rx_replenish;
+ struct work_struct abort_scan;
+
+ struct il_rxon_context *beacon_ctx;
+ struct sk_buff *beacon_skb;
+
+ struct work_struct tx_flush;
+
+ struct tasklet_struct irq_tasklet;
+
+ struct delayed_work init_alive_start;
+ struct delayed_work alive_start;
+ struct delayed_work scan_check;
+
+ /* TX Power */
+ s8 tx_power_user_lmt;
+ s8 tx_power_device_lmt;
+ s8 tx_power_next;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+ /* debugging info */
+ u32 debug_level; /* per device debugging will override global
+ il_debug_level if set */
+#endif /* CONFIG_IWLEGACY_DEBUG */
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ /* debugfs */
+ u16 tx_traffic_idx;
+ u16 rx_traffic_idx;
+ u8 *tx_traffic;
+ u8 *rx_traffic;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
+ bool disable_ht40;
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+ struct work_struct txpower_work;
+ u32 disable_sens_cal;
+ u32 disable_chain_noise_cal;
+ u32 disable_tx_power_cal;
+ struct work_struct run_time_calib_work;
+ struct timer_list stats_periodic;
+ struct timer_list watchdog;
+ bool hw_ready;
+
+ struct led_classdev led;
+ unsigned long blink_on, blink_off;
+ bool led_registered;
+}; /*il_priv */
+
+static inline void
+il_txq_ctx_activate(struct il_priv *il, int txq_id)
+{
+ set_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline void
+il_txq_ctx_deactivate(struct il_priv *il, int txq_id)
+{
+ clear_bit(txq_id, &il->txq_ctx_active_msk);
+}
+
+static inline struct ieee80211_hdr *
+il_tx_queue_get_hdr(struct il_priv *il, int txq_id, int idx)
+{
+ if (il->txq[txq_id].txb[idx].skb)
+ return (struct ieee80211_hdr *)il->txq[txq_id].txb[idx].skb->
+ data;
+ return NULL;
+}
+
+static inline struct il_rxon_context *
+il_rxon_ctx_from_vif(struct ieee80211_vif *vif)
+{
+ struct il_vif_priv *vif_priv = (void *)vif->drv_priv;
+
+ return vif_priv->ctx;
+}
+
+#define for_each_context(il, _ctx) \
+ for (_ctx = &il->ctx; _ctx == &il->ctx; _ctx++)
+
+static inline int
+il_is_associated(struct il_priv *il)
+{
+ return (il->ctx.active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_any_associated(struct il_priv *il)
+{
+ return il_is_associated(il);
+}
+
+static inline int
+il_is_associated_ctx(struct il_rxon_context *ctx)
+{
+ return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_valid(const struct il_channel_info *ch_info)
+{
+ if (ch_info == NULL)
+ return 0;
+ return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_radar(const struct il_channel_info *ch_info)
+{
+ return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
+}
+
+static inline u8
+il_is_channel_a_band(const struct il_channel_info *ch_info)
+{
+ return ch_info->band == IEEE80211_BAND_5GHZ;
+}
+
+static inline int
+il_is_channel_passive(const struct il_channel_info *ch)
+{
+ return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
+}
+
+static inline int
+il_is_channel_ibss(const struct il_channel_info *ch)
+{
+ return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
+}
+
+static inline void
+__il_free_pages(struct il_priv *il, struct page *page)
+{
+ __free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+static inline void
+il_free_pages(struct il_priv *il, unsigned long page)
+{
+ free_pages(page, il->hw_params.rx_page_order);
+ il->alloc_rxb_page--;
+}
+
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
+#define DRV_AUTHOR "<ilw@linux.intel.com>"
+
+#define IL_PCI_DEVICE(dev, subdev, cfg) \
+ .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
+ .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
+ .driver_data = (kernel_ulong_t)&(cfg)
+
+#define TIME_UNIT 1024
+
+#define IL_SKU_G 0x1
+#define IL_SKU_A 0x2
+#define IL_SKU_N 0x8
+
+#define IL_CMD(x) case x: return #x
+
+/* Size of one Rx buffer in host DRAM */
+#define IL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
+#define IL_RX_BUF_SIZE_4K (4 * 1024)
+#define IL_RX_BUF_SIZE_8K (8 * 1024)
+
+struct il_hcmd_ops {
+ int (*rxon_assoc) (struct il_priv *il, struct il_rxon_context *ctx);
+ int (*commit_rxon) (struct il_priv *il, struct il_rxon_context *ctx);
+ void (*set_rxon_chain) (struct il_priv *il,
+ struct il_rxon_context *ctx);
+};
+
+struct il_hcmd_utils_ops {
+ u16(*get_hcmd_size) (u8 cmd_id, u16 len);
+ u16(*build_addsta_hcmd) (const struct il_addsta_cmd *cmd, u8 *data);
+ int (*request_scan) (struct il_priv *il, struct ieee80211_vif *vif);
+ void (*post_scan) (struct il_priv *il);
+};
+
+struct il_apm_ops {
+ int (*init) (struct il_priv *il);
+ void (*config) (struct il_priv *il);
+};
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+struct il_debugfs_ops {
+ ssize_t(*rx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*tx_stats_read) (struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos);
+ ssize_t(*general_stats_read) (struct file *file,
+ char __user *user_buf, size_t count,
+ loff_t *ppos);
+};
+#endif
+
+struct il_temp_ops {
+ void (*temperature) (struct il_priv *il);
+};
+
+struct il_lib_ops {
+ /* set hw dependent parameters */
+ int (*set_hw_params) (struct il_priv *il);
+ /* Handling TX */
+ void (*txq_update_byte_cnt_tbl) (struct il_priv *il,
+ struct il_tx_queue *txq,
+ u16 byte_cnt);
+ int (*txq_attach_buf_to_tfd) (struct il_priv *il,
+ struct il_tx_queue *txq, dma_addr_t addr,
+ u16 len, u8 reset, u8 pad);
+ void (*txq_free_tfd) (struct il_priv *il, struct il_tx_queue *txq);
+ int (*txq_init) (struct il_priv *il, struct il_tx_queue *txq);
+ /* setup Rx handler */
+ void (*handler_setup) (struct il_priv *il);
+ /* alive notification after init uCode load */
+ void (*init_alive_start) (struct il_priv *il);
+ /* check validity of rtc data address */
+ int (*is_valid_rtc_data_addr) (u32 addr);
+ /* 1st ucode load */
+ int (*load_ucode) (struct il_priv *il);
+
+ void (*dump_nic_error_log) (struct il_priv *il);
+ int (*dump_fh) (struct il_priv *il, char **buf, bool display);
+ int (*set_channel_switch) (struct il_priv *il,
+ struct ieee80211_channel_switch *ch_switch);
+ /* power management */
+ struct il_apm_ops apm_ops;
+
+ /* power */
+ int (*send_tx_power) (struct il_priv *il);
+ void (*update_chain_flags) (struct il_priv *il);
+
+ /* eeprom operations */
+ struct il_eeprom_ops eeprom_ops;
+
+ /* temperature */
+ struct il_temp_ops temp_ops;
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+ struct il_debugfs_ops debugfs_ops;
+#endif
+
+};
+
+struct il_led_ops {
+ int (*cmd) (struct il_priv *il, struct il_led_cmd *led_cmd);
+};
+
+struct il_legacy_ops {
+ void (*post_associate) (struct il_priv *il);
+ void (*config_ap) (struct il_priv *il);
+ /* station management */
+ int (*update_bcast_stations) (struct il_priv *il);
+ int (*manage_ibss_station) (struct il_priv *il,
+ struct ieee80211_vif *vif, bool add);
+};
+
+struct il_ops {
+ const struct il_lib_ops *lib;
+ const struct il_hcmd_ops *hcmd;
+ const struct il_hcmd_utils_ops *utils;
+ const struct il_led_ops *led;
+ const struct il_nic_ops *nic;
+ const struct il_legacy_ops *legacy;
+ const struct ieee80211_ops *ieee80211_ops;
+};
+
+struct il_mod_params {
+ int sw_crypto; /* def: 0 = using hardware encryption */
+ int disable_hw_scan; /* def: 0 = use h/w scan */
+ int num_of_queues; /* def: HW dependent */
+ int disable_11n; /* def: 0 = 11n capabilities enabled */
+ int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
+ int antenna; /* def: 0 = both antennas (use diversity) */
+ int restart_fw; /* def: 1 = restart firmware */
+};
+
+/*
+ * @led_compensation: compensate on the led on/off time per HW according
+ * to the deviation to achieve the desired led frequency.
+ * The detail algorithm is described in common.c
+ * @chain_noise_num_beacons: number of beacons used to compute chain noise
+ * @wd_timeout: TX queues watchdog timeout
+ * @temperature_kelvin: temperature report by uCode in kelvin
+ * @ucode_tracing: support ucode continuous tracing
+ * @sensitivity_calib_by_driver: driver has the capability to perform
+ * sensitivity calibration operation
+ * @chain_noise_calib_by_driver: driver has the capability to perform
+ * chain noise calibration operation
+ */
+struct il_base_params {
+ int eeprom_size;
+ int num_of_queues; /* def: HW dependent */
+ int num_of_ampdu_queues; /* def: HW dependent */
+ /* for il_apm_init() */
+ u32 pll_cfg_val;
+ bool set_l0s;
+ bool use_bsm;
+
+ u16 led_compensation;
+ int chain_noise_num_beacons;
+ unsigned int wd_timeout;
+ bool temperature_kelvin;
+ const bool ucode_tracing;
+ const bool sensitivity_calib_by_driver;
+ const bool chain_noise_calib_by_driver;
+};
+
+#define IL_LED_SOLID 11
+#define IL_DEF_LED_INTRVL cpu_to_le32(1000)
+
+#define IL_LED_ACTIVITY (0<<1)
+#define IL_LED_LINK (1<<1)
+
+/*
+ * LED mode
+ * IL_LED_DEFAULT: use device default
+ * IL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum il_led_mode {
+ IL_LED_DEFAULT,
+ IL_LED_RF_STATE,
+ IL_LED_BLINK,
+};
+
+void il_leds_init(struct il_priv *il);
+void il_leds_exit(struct il_priv *il);
+
+/**
+ * struct il_cfg
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @scan_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version. The firmware's API version will be
+ * stored in @il_priv, enabling the driver to make runtime changes based
+ * on firmware version used.
+ *
+ * For example,
+ * if (IL_UCODE_API(il->ucode_ver) >= 2) {
+ * Driver interacts with Firmware API version >= 2.
+ * } else {
+ * Driver interacts with Firmware API version 1.
+ * }
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision. That is, through utilizing the
+ * il_hcmd_utils_ops etc. we accommodate different command structures
+ * and flows between hardware versions as well as their API
+ * versions.
+ *
+ */
+struct il_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ unsigned int sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct il_ops *ops;
+ /* module based parameters which can be set from modprobe cmd */
+ const struct il_mod_params *mod_params;
+ /* params not likely to change within a device family */
+ struct il_base_params *base_params;
+ /* params likely to change within a device family */
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ enum il_led_mode led_mode;
+};
+
+/***************************
+ * L i b *
+ ***************************/
+
+struct ieee80211_hw *il_alloc_all(struct il_cfg *cfg);
+int il_mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u16 queue, const struct ieee80211_tx_queue_params *params);
+int il_mac_tx_last_beacon(struct ieee80211_hw *hw);
+
+void il_set_rxon_hwcrypto(struct il_priv *il, struct il_rxon_context *ctx,
+ int hw_decrypt);
+int il_check_rxon_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+int il_full_rxon_required(struct il_priv *il, struct il_rxon_context *ctx);
+int il_set_rxon_channel(struct il_priv *il, struct ieee80211_channel *ch,
+ struct il_rxon_context *ctx);
+void il_set_flags_for_band(struct il_priv *il, struct il_rxon_context *ctx,
+ enum ieee80211_band band, struct ieee80211_vif *vif);
+u8 il_get_single_channel_number(struct il_priv *il, enum ieee80211_band band);
+void il_set_rxon_ht(struct il_priv *il, struct il_ht_config *ht_conf);
+bool il_is_ht40_tx_allowed(struct il_priv *il, struct il_rxon_context *ctx,
+ struct ieee80211_sta_ht_cap *ht_cap);
+void il_connection_init_rx_config(struct il_priv *il,
+ struct il_rxon_context *ctx);
+void il_set_rate(struct il_priv *il);
+int il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr,
+ u32 decrypt_res, struct ieee80211_rx_status *stats);
+void il_irq_handle_error(struct il_priv *il);
+int il_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif);
+int il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p);
+int il_alloc_txq_mem(struct il_priv *il);
+void il_txq_mem(struct il_priv *il);
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_alloc_traffic_mem(struct il_priv *il);
+void il_free_traffic_mem(struct il_priv *il);
+void il_reset_traffic_log(struct il_priv *il);
+void il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+void il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header);
+const char *il_get_mgmt_string(int cmd);
+const char *il_get_ctrl_string(int cmd);
+void il_clear_traffic_stats(struct il_priv *il);
+void il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len);
+#else
+static inline int
+il_alloc_traffic_mem(struct il_priv *il)
+{
+ return 0;
+}
+
+static inline void
+il_free_traffic_mem(struct il_priv *il)
+{
+}
+
+static inline void
+il_reset_traffic_log(struct il_priv *il)
+{
+}
+
+static inline void
+il_dbg_log_tx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_dbg_log_rx_data_frame(struct il_priv *il, u16 length,
+ struct ieee80211_hdr *header)
+{
+}
+
+static inline void
+il_update_stats(struct il_priv *il, bool is_tx, __le16 fc, u16 len)
+{
+}
+#endif
+/*****************************************************
+ * RX handlers.
+ * **************************************************/
+void il_hdl_pm_sleep(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_pm_debug_stats(struct il_priv *il, struct il_rx_buf *rxb);
+void il_hdl_error(struct il_priv *il, struct il_rx_buf *rxb);
+
+/*****************************************************
+* RX
+******************************************************/
+void il_cmd_queue_unmap(struct il_priv *il);
+void il_cmd_queue_free(struct il_priv *il);
+int il_rx_queue_alloc(struct il_priv *il);
+void il_rx_queue_update_write_ptr(struct il_priv *il, struct il_rx_queue *q);
+int il_rx_queue_space(const struct il_rx_queue *q);
+void il_tx_cmd_complete(struct il_priv *il, struct il_rx_buf *rxb);
+/* Handlers */
+void il_hdl_spectrum_measurement(struct il_priv *il, struct il_rx_buf *rxb);
+void il_recover_from_stats(struct il_priv *il, struct il_rx_pkt *pkt);
+void il_chswitch_done(struct il_priv *il, bool is_success);
+void il_hdl_csa(struct il_priv *il, struct il_rx_buf *rxb);
+
+/* TX helpers */
+
+/*****************************************************
+* TX
+******************************************************/
+void il_txq_update_write_ptr(struct il_priv *il, struct il_tx_queue *txq);
+int il_tx_queue_init(struct il_priv *il, struct il_tx_queue *txq, int slots_num,
+ u32 txq_id);
+void il_tx_queue_reset(struct il_priv *il, struct il_tx_queue *txq,
+ int slots_num, u32 txq_id);
+void il_tx_queue_unmap(struct il_priv *il, int txq_id);
+void il_tx_queue_free(struct il_priv *il, int txq_id);
+void il_setup_watchdog(struct il_priv *il);
+/*****************************************************
+ * TX power
+ ****************************************************/
+int il_set_tx_power(struct il_priv *il, s8 tx_power, bool force);
+
+/*******************************************************************************
+ * Rate
+ ******************************************************************************/
+
+u8 il_get_lowest_plcp(struct il_priv *il, struct il_rxon_context *ctx);
+
+/*******************************************************************************
+ * Scanning
+ ******************************************************************************/
+void il_init_scan_params(struct il_priv *il);
+int il_scan_cancel(struct il_priv *il);
+int il_scan_cancel_timeout(struct il_priv *il, unsigned long ms);
+void il_force_scan_end(struct il_priv *il);
+int il_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req);
+void il_internal_short_hw_scan(struct il_priv *il);
+int il_force_reset(struct il_priv *il, bool external);
+u16 il_fill_probe_req(struct il_priv *il, struct ieee80211_mgmt *frame,
+ const u8 *ta, const u8 *ie, int ie_len, int left);
+void il_setup_rx_scan_handlers(struct il_priv *il);
+u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ u8 n_probes);
+u16 il_get_passive_dwell_time(struct il_priv *il, enum ieee80211_band band,
+ struct ieee80211_vif *vif);
+void il_setup_scan_deferred_work(struct il_priv *il);
+void il_cancel_scan_deferred_work(struct il_priv *il);
+
+/* For faster active scanning, scan will move to the next channel if fewer than
+ * PLCP_QUIET_THRESH packets are heard on this channel within
+ * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
+ * time if it's a quiet channel (nothing responded to our probe, and there's
+ * no other traffic).
+ * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
+#define IL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
+#define IL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
+
+#define IL_SCAN_CHECK_WATCHDOG (HZ * 7)
+
+/*****************************************************
+ * S e n d i n g H o s t C o m m a n d s *
+ *****************************************************/
+
+const char *il_get_cmd_string(u8 cmd);
+int __must_check il_send_cmd_sync(struct il_priv *il, struct il_host_cmd *cmd);
+int il_send_cmd(struct il_priv *il, struct il_host_cmd *cmd);
+int __must_check il_send_cmd_pdu(struct il_priv *il, u8 id, u16 len,
+ const void *data);
+int il_send_cmd_pdu_async(struct il_priv *il, u8 id, u16 len, const void *data,
+ void (*callback) (struct il_priv *il,
+ struct il_device_cmd *cmd,
+ struct il_rx_pkt *pkt));
+
+int il_enqueue_hcmd(struct il_priv *il, struct il_host_cmd *cmd);
+
+/*****************************************************
+ * PCI *
+ *****************************************************/
+
+static inline u16
+il_pcie_link_ctl(struct il_priv *il)
+{
+ int pos;
+ u16 pci_lnk_ctl;
+ pos = pci_pcie_cap(il->pci_dev);
+ pci_read_config_word(il->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
+ return pci_lnk_ctl;
+}
+
+void il_bg_watchdog(unsigned long data);
+u32 il_usecs_to_beacons(struct il_priv *il, u32 usec, u32 beacon_interval);
+__le32 il_add_beacon_time(struct il_priv *il, u32 base, u32 addon,
+ u32 beacon_interval);
+
+#ifdef CONFIG_PM
+int il_pci_suspend(struct device *device);
+int il_pci_resume(struct device *device);
+extern const struct dev_pm_ops il_pm_ops;
+
+#define IL_LEGACY_PM_OPS (&il_pm_ops)
+
+#else /* !CONFIG_PM */
+
+#define IL_LEGACY_PM_OPS NULL
+
+#endif /* !CONFIG_PM */
+
+/*****************************************************
+* Error Handling Debugging
+******************************************************/
+void il4965_dump_nic_error_log(struct il_priv *il);
+#ifdef CONFIG_IWLEGACY_DEBUG
+void il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx);
+#else
+static inline void
+il_print_rx_config_cmd(struct il_priv *il, struct il_rxon_context *ctx)
+{
+}
+#endif
+
+void il_clear_isr_stats(struct il_priv *il);
+
+/*****************************************************
+* GEOS
+******************************************************/
+int il_init_geos(struct il_priv *il);
+void il_free_geos(struct il_priv *il);
+
+/*************** DRIVER STATUS FUNCTIONS *****/
+
+#define S_HCMD_ACTIVE 0 /* host command in progress */
+/* 1 is unused (used to be S_HCMD_SYNC_ACTIVE) */
+#define S_INT_ENABLED 2
+#define S_RF_KILL_HW 3
+#define S_CT_KILL 4
+#define S_INIT 5
+#define S_ALIVE 6
+#define S_READY 7
+#define S_TEMPERATURE 8
+#define S_GEO_CONFIGURED 9
+#define S_EXIT_PENDING 10
+#define S_STATS 12
+#define S_SCANNING 13
+#define S_SCAN_ABORTING 14
+#define S_SCAN_HW 15
+#define S_POWER_PMI 16
+#define S_FW_ERROR 17
+#define S_CHANNEL_SWITCH_PENDING 18
+
+static inline int
+il_is_ready(struct il_priv *il)
+{
+ /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
+ * set but EXIT_PENDING is not */
+ return test_bit(S_READY, &il->status) &&
+ test_bit(S_GEO_CONFIGURED, &il->status) &&
+ !test_bit(S_EXIT_PENDING, &il->status);
+}
+
+static inline int
+il_is_alive(struct il_priv *il)
+{
+ return test_bit(S_ALIVE, &il->status);
+}
+
+static inline int
+il_is_init(struct il_priv *il)
+{
+ return test_bit(S_INIT, &il->status);
+}
+
+static inline int
+il_is_rfkill_hw(struct il_priv *il)
+{
+ return test_bit(S_RF_KILL_HW, &il->status);
+}
+
+static inline int
+il_is_rfkill(struct il_priv *il)
+{
+ return il_is_rfkill_hw(il);
+}
+
+static inline int
+il_is_ctkill(struct il_priv *il)
+{
+ return test_bit(S_CT_KILL, &il->status);
+}
+
+static inline int
+il_is_ready_rf(struct il_priv *il)
+{
+
+ if (il_is_rfkill(il))
+ return 0;
+
+ return il_is_ready(il);
+}
+
+extern void il_send_bt_config(struct il_priv *il);
+extern int il_send_stats_request(struct il_priv *il, u8 flags, bool clear);
+void il_apm_stop(struct il_priv *il);
+int il_apm_init(struct il_priv *il);
+
+int il_send_rxon_timing(struct il_priv *il, struct il_rxon_context *ctx);
+static inline int
+il_send_rxon_assoc(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->rxon_assoc(il, ctx);
+}
+
+static inline int
+il_commit_rxon(struct il_priv *il, struct il_rxon_context *ctx)
+{
+ return il->cfg->ops->hcmd->commit_rxon(il, ctx);
+}
+
+static inline const struct ieee80211_supported_band *
+il_get_hw_mode(struct il_priv *il, enum ieee80211_band band)
+{
+ return il->hw->wiphy->bands[band];
+}
+
+/* mac80211 handlers */
+int il_mac_config(struct ieee80211_hw *hw, u32 changed);
+void il_mac_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
+void il_mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *bss_conf, u32 changes);
+void il_tx_cmd_protection(struct il_priv *il, struct ieee80211_tx_info *info,
+ __le16 fc, __le32 *tx_flags);
+
+irqreturn_t il_isr(int irq, void *data);
+
+extern void il_set_bit(struct il_priv *p, u32 r, u32 m);
+extern void il_clear_bit(struct il_priv *p, u32 r, u32 m);
+extern int _il_grab_nic_access(struct il_priv *il);
+extern int _il_poll_bit(struct il_priv *il, u32 addr, u32 bits, u32 mask, int timeout);
+extern int il_poll_bit(struct il_priv *il, u32 addr, u32 mask, int timeout);
+extern u32 il_rd_prph(struct il_priv *il, u32 reg);
+extern void il_wr_prph(struct il_priv *il, u32 addr, u32 val);
+extern u32 il_read_targ_mem(struct il_priv *il, u32 addr);
+extern void il_write_targ_mem(struct il_priv *il, u32 addr, u32 val);
+
+static inline void
+_il_write8(struct il_priv *il, u32 ofs, u8 val)
+{
+ iowrite8(val, il->hw_base + ofs);
+}
+#define il_write8(il, ofs, val) _il_write8(il, ofs, val)
+
+static inline void
+_il_wr(struct il_priv *il, u32 ofs, u32 val)
+{
+ iowrite32(val, il->hw_base + ofs);
+}
+
+static inline u32
+_il_rd(struct il_priv *il, u32 ofs)
+{
+ return ioread32(il->hw_base + ofs);
+}
+
+static inline void
+_il_clear_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) & ~mask);
+}
+
+static inline void
+_il_set_bit(struct il_priv *il, u32 reg, u32 mask)
+{
+ _il_wr(il, reg, _il_rd(il, reg) | mask);
+}
+
+static inline void
+_il_release_nic_access(struct il_priv *il)
+{
+ _il_clear_bit(il, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+}
+
+static inline u32
+il_rd(struct il_priv *il, u32 reg)
+{
+ u32 value;
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ value = _il_rd(il, reg);
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+ return value;
+}
+
+static inline void
+il_wr(struct il_priv *il, u32 reg, u32 value)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ if (!_il_grab_nic_access(il)) {
+ _il_wr(il, reg, value);
+ _il_release_nic_access(il);
+ }
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline u32
+_il_rd_prph(struct il_priv *il, u32 reg)
+{
+ _il_wr(il, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
+ rmb();
+ return _il_rd(il, HBUS_TARG_PRPH_RDAT);
+}
+
+static inline void
+_il_wr_prph(struct il_priv *il, u32 addr, u32 val)
+{
+ _il_wr(il, HBUS_TARG_PRPH_WADDR, ((addr & 0x0000FFFF) | (3 << 24)));
+ wmb();
+ _il_wr(il, HBUS_TARG_PRPH_WDAT, val);
+}
+
+static inline void
+il_set_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_wr_prph(il, reg, (_il_rd_prph(il, reg) | mask));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_set_bits_mask_prph(struct il_priv *il, u32 reg, u32 bits, u32 mask)
+{
+ unsigned long reg_flags;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ _il_wr_prph(il, reg, ((_il_rd_prph(il, reg) & mask) | bits));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+static inline void
+il_clear_bits_prph(struct il_priv *il, u32 reg, u32 mask)
+{
+ unsigned long reg_flags;
+ u32 val;
+
+ spin_lock_irqsave(&il->reg_lock, reg_flags);
+ _il_grab_nic_access(il);
+ val = _il_rd_prph(il, reg);
+ _il_wr_prph(il, reg, (val & ~mask));
+ _il_release_nic_access(il);
+ spin_unlock_irqrestore(&il->reg_lock, reg_flags);
+}
+
+#define HW_KEY_DYNAMIC 0
+#define HW_KEY_DEFAULT 1
+
+#define IL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
+#define IL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
+#define IL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
+ being activated */
+#define IL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
+ (this is for the IBSS BSSID stations) */
+#define IL_STA_BCAST BIT(4) /* this station is the special bcast station */
+
+void il_restore_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_clear_ucode_stations(struct il_priv *il, struct il_rxon_context *ctx);
+void il_dealloc_bcast_stations(struct il_priv *il);
+int il_get_free_ucode_key_idx(struct il_priv *il);
+int il_send_add_sta(struct il_priv *il, struct il_addsta_cmd *sta, u8 flags);
+int il_add_station_common(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap,
+ struct ieee80211_sta *sta, u8 *sta_id_r);
+int il_remove_station(struct il_priv *il, const u8 sta_id, const u8 * addr);
+int il_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta);
+
+u8 il_prep_station(struct il_priv *il, struct il_rxon_context *ctx,
+ const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
+
+int il_send_lq_cmd(struct il_priv *il, struct il_rxon_context *ctx,
+ struct il_link_quality_cmd *lq, u8 flags, bool init);
+
+/**
+ * il_clear_driver_stations - clear knowledge of all stations from driver
+ * @il: iwl il struct
+ *
+ * This is called during il_down() to make sure that in the case
+ * we're coming there from a hardware restart mac80211 will be
+ * able to reconfigure stations -- if we're getting there in the
+ * normal down flow then the stations will already be cleared.
+ */
+static inline void
+il_clear_driver_stations(struct il_priv *il)
+{
+ unsigned long flags;
+ struct il_rxon_context *ctx = &il->ctx;
+
+ spin_lock_irqsave(&il->sta_lock, flags);
+ memset(il->stations, 0, sizeof(il->stations));
+ il->num_stations = 0;
+
+ il->ucode_key_table = 0;
+
+ /*
+ * Remove all key information that is not stored as part
+ * of station information since mac80211 may not have had
+ * a chance to remove all the keys. When device is
+ * reconfigured by mac80211 after an error all keys will
+ * be reconfigured.
+ */
+ memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
+ ctx->key_mapping_keys = 0;
+
+ spin_unlock_irqrestore(&il->sta_lock, flags);
+}
+
+static inline int
+il_sta_id(struct ieee80211_sta *sta)
+{
+ if (WARN_ON(!sta))
+ return IL_INVALID_STATION;
+
+ return ((struct il_station_priv_common *)sta->drv_priv)->sta_id;
+}
+
+/**
+ * il_sta_id_or_broadcast - return sta_id or broadcast sta
+ * @il: iwl il
+ * @context: the current context
+ * @sta: mac80211 station
+ *
+ * In certain circumstances mac80211 passes a station pointer
+ * that may be %NULL, for example during TX or key setup. In
+ * that case, we need to use the broadcast station, so this
+ * inline wraps that pattern.
+ */
+static inline int
+il_sta_id_or_broadcast(struct il_priv *il, struct il_rxon_context *context,
+ struct ieee80211_sta *sta)
+{
+ int sta_id;
+
+ if (!sta)
+ return context->bcast_sta_id;
+
+ sta_id = il_sta_id(sta);
+
+ /*
+ * mac80211 should not be passing a partially
+ * initialised station!
+ */
+ WARN_ON(sta_id == IL_INVALID_STATION);
+
+ return sta_id;
+}
+
+/**
+ * il_queue_inc_wrap - increment queue idx, wrap back to beginning
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_inc_wrap(int idx, int n_bd)
+{
+ return ++idx & (n_bd - 1);
+}
+
+/**
+ * il_queue_dec_wrap - decrement queue idx, wrap back to end
+ * @idx -- current idx
+ * @n_bd -- total number of entries in queue (must be power of 2)
+ */
+static inline int
+il_queue_dec_wrap(int idx, int n_bd)
+{
+ return --idx & (n_bd - 1);
+}
+
+/* TODO: Move fw_desc functions to iwl-pci.ko */
+static inline void
+il_free_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(&pci_dev->dev, desc->len, desc->v_addr,
+ desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static inline int
+il_alloc_fw_desc(struct pci_dev *pci_dev, struct fw_desc *desc)
+{
+ if (!desc->len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr =
+ dma_alloc_coherent(&pci_dev->dev, desc->len, &desc->p_addr,
+ GFP_KERNEL);
+ return (desc->v_addr != NULL) ? 0 : -ENOMEM;
+}
+
+/*
+ * we have 8 bits used like this:
+ *
+ * 7 6 5 4 3 2 1 0
+ * | | | | | | | |
+ * | | | | | | +-+-------- AC queue (0-3)
+ * | | | | | |
+ * | +-+-+-+-+------------ HW queue ID
+ * |
+ * +---------------------- unused
+ */
+static inline void
+il_set_swq_id(struct il_tx_queue *txq, u8 ac, u8 hwq)
+{
+ BUG_ON(ac > 3); /* only have 2 bits */
+ BUG_ON(hwq > 31); /* only use 5 bits */
+
+ txq->swq_id = (hwq << 2) | ac;
+}
+
+static inline void
+il_wake_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (test_and_clear_bit(hwq, il->queue_stopped))
+ if (atomic_dec_return(&il->queue_stop_count[ac]) <= 0)
+ ieee80211_wake_queue(il->hw, ac);
+}
+
+static inline void
+il_stop_queue(struct il_priv *il, struct il_tx_queue *txq)
+{
+ u8 queue = txq->swq_id;
+ u8 ac = queue & 3;
+ u8 hwq = (queue >> 2) & 0x1f;
+
+ if (!test_and_set_bit(hwq, il->queue_stopped))
+ if (atomic_inc_return(&il->queue_stop_count[ac]) > 0)
+ ieee80211_stop_queue(il->hw, ac);
+}
+
+#ifdef ieee80211_stop_queue
+#undef ieee80211_stop_queue
+#endif
+
+#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
+
+#ifdef ieee80211_wake_queue
+#undef ieee80211_wake_queue
+#endif
+
+#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
+
+static inline void
+il_disable_interrupts(struct il_priv *il)
+{
+ clear_bit(S_INT_ENABLED, &il->status);
+
+ /* disable interrupts from uCode/NIC to host */
+ _il_wr(il, CSR_INT_MASK, 0x00000000);
+
+ /* acknowledge/clear/reset any interrupts still pending
+ * from uCode or flow handler (Rx/Tx DMA) */
+ _il_wr(il, CSR_INT, 0xffffffff);
+ _il_wr(il, CSR_FH_INT_STATUS, 0xffffffff);
+}
+
+static inline void
+il_enable_rfkill_int(struct il_priv *il)
+{
+ _il_wr(il, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+}
+
+static inline void
+il_enable_interrupts(struct il_priv *il)
+{
+ set_bit(S_INT_ENABLED, &il->status);
+ _il_wr(il, CSR_INT_MASK, il->inta_mask);
+}
+
+/**
+ * il_beacon_time_mask_low - mask of lower 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_low(struct il_priv *il, u16 tsf_bits)
+{
+ return (1 << tsf_bits) - 1;
+}
+
+/**
+ * il_beacon_time_mask_high - mask of higher 32 bit of beacon time
+ * @il -- pointer to il_priv data structure
+ * @tsf_bits -- number of bits need to shift for masking)
+ */
+static inline u32
+il_beacon_time_mask_high(struct il_priv *il, u16 tsf_bits)
+{
+ return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
+}
+
+/**
+ * struct il_rb_status - reseve buffer status host memory mapped FH registers
+ *
+ * @closed_rb_num [0:11] - Indicates the idx of the RB which was closed
+ * @closed_fr_num [0:11] - Indicates the idx of the RX Frame which was closed
+ * @finished_rb_num [0:11] - Indicates the idx of the current RB
+ * in which the last frame was written to
+ * @finished_fr_num [0:11] - Indicates the idx of the RX Frame
+ * which was transferred
+ */
+struct il_rb_status {
+ __le16 closed_rb_num;
+ __le16 closed_fr_num;
+ __le16 finished_rb_num;
+ __le16 finished_fr_nam;
+ __le32 __unused; /* 3945 only */
+} __packed;
+
+#define TFD_QUEUE_SIZE_MAX (256)
+#define TFD_QUEUE_SIZE_BC_DUP (64)
+#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
+#define IL_TX_DMA_MASK DMA_BIT_MASK(36)
+#define IL_NUM_OF_TBS 20
+
+static inline u8
+il_get_dma_hi_addr(dma_addr_t addr)
+{
+ return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
+}
+
+/**
+ * struct il_tfd_tb transmit buffer descriptor within transmit frame descriptor
+ *
+ * This structure contains dma address and length of transmission address
+ *
+ * @lo: low [31:0] portion of the dma address of TX buffer every even is
+ * unaligned on 16 bit boundary
+ * @hi_n_len: 0-3 [35:32] portion of dma
+ * 4-15 length of the tx buffer
+ */
+struct il_tfd_tb {
+ __le32 lo;
+ __le16 hi_n_len;
+} __packed;
+
+/**
+ * struct il_tfd
+ *
+ * Transmit Frame Descriptor (TFD)
+ *
+ * @ __reserved1[3] reserved
+ * @ num_tbs 0-4 number of active tbs
+ * 5 reserved
+ * 6-7 padding (not used)
+ * @ tbs[20] transmit frame buffer descriptors
+ * @ __pad padding
+ *
+ * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
+ * Both driver and device share these circular buffers, each of which must be
+ * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
+ *
+ * Driver must indicate the physical address of the base of each
+ * circular buffer via the FH49_MEM_CBBC_QUEUE registers.
+ *
+ * Each TFD contains pointer/size information for up to 20 data buffers
+ * in host DRAM. These buffers collectively contain the (one) frame described
+ * by the TFD. Each buffer must be a single contiguous block of memory within
+ * itself, but buffers may be scattered in host DRAM. Each buffer has max size
+ * of (4K - 4). The concatenates all of a TFD's buffers into a single
+ * Tx frame, up to 8 KBytes in size.
+ *
+ * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
+ */
+struct il_tfd {
+ u8 __reserved1[3];
+ u8 num_tbs;
+ struct il_tfd_tb tbs[IL_NUM_OF_TBS];
+ __le32 __pad;
+} __packed;
+/* PCI registers */
+#define PCI_CFG_RETRY_TIMEOUT 0x041
+
+/* PCI register values */
+#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
+#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
+
+struct il_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 plcp_siso; /* uCode API: RATE_SISO_6M_PLCP, etc. */
+ u8 plcp_mimo2; /* uCode API: RATE_MIMO2_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+};
+
+struct il3945_rate_info {
+ u8 plcp; /* uCode API: RATE_6M_PLCP, etc. */
+ u8 ieee; /* MAC header: RATE_6M_IEEE, etc. */
+ u8 prev_ieee; /* previous rate in IEEE speeds */
+ u8 next_ieee; /* next rate in IEEE speeds */
+ u8 prev_rs; /* previous rate used in rs algo */
+ u8 next_rs; /* next rate used in rs algo */
+ u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
+ u8 next_rs_tgg; /* next rate used in TGG rs algo */
+ u8 table_rs_idx; /* idx in rate scale table cmd */
+ u8 prev_table_rs; /* prev in rate table cmd */
+};
+
+/*
+ * These serve as idxes into
+ * struct il_rate_info il_rates[RATE_COUNT];
+ */
+enum {
+ RATE_1M_IDX = 0,
+ RATE_2M_IDX,
+ RATE_5M_IDX,
+ RATE_11M_IDX,
+ RATE_6M_IDX,
+ RATE_9M_IDX,
+ RATE_12M_IDX,
+ RATE_18M_IDX,
+ RATE_24M_IDX,
+ RATE_36M_IDX,
+ RATE_48M_IDX,
+ RATE_54M_IDX,
+ RATE_60M_IDX,
+ RATE_COUNT,
+ RATE_COUNT_LEGACY = RATE_COUNT - 1, /* Excluding 60M */
+ RATE_COUNT_3945 = RATE_COUNT - 1,
+ RATE_INVM_IDX = RATE_COUNT,
+ RATE_INVALID = RATE_COUNT,
+};
+
+enum {
+ RATE_6M_IDX_TBL = 0,
+ RATE_9M_IDX_TBL,
+ RATE_12M_IDX_TBL,
+ RATE_18M_IDX_TBL,
+ RATE_24M_IDX_TBL,
+ RATE_36M_IDX_TBL,
+ RATE_48M_IDX_TBL,
+ RATE_54M_IDX_TBL,
+ RATE_1M_IDX_TBL,
+ RATE_2M_IDX_TBL,
+ RATE_5M_IDX_TBL,
+ RATE_11M_IDX_TBL,
+ RATE_INVM_IDX_TBL = RATE_INVM_IDX - 1,
+};
+
+enum {
+ IL_FIRST_OFDM_RATE = RATE_6M_IDX,
+ IL39_LAST_OFDM_RATE = RATE_54M_IDX,
+ IL_LAST_OFDM_RATE = RATE_60M_IDX,
+ IL_FIRST_CCK_RATE = RATE_1M_IDX,
+ IL_LAST_CCK_RATE = RATE_11M_IDX,
+};
+
+/* #define vs. enum to keep from defaulting to 'large integer' */
+#define RATE_6M_MASK (1 << RATE_6M_IDX)
+#define RATE_9M_MASK (1 << RATE_9M_IDX)
+#define RATE_12M_MASK (1 << RATE_12M_IDX)
+#define RATE_18M_MASK (1 << RATE_18M_IDX)
+#define RATE_24M_MASK (1 << RATE_24M_IDX)
+#define RATE_36M_MASK (1 << RATE_36M_IDX)
+#define RATE_48M_MASK (1 << RATE_48M_IDX)
+#define RATE_54M_MASK (1 << RATE_54M_IDX)
+#define RATE_60M_MASK (1 << RATE_60M_IDX)
+#define RATE_1M_MASK (1 << RATE_1M_IDX)
+#define RATE_2M_MASK (1 << RATE_2M_IDX)
+#define RATE_5M_MASK (1 << RATE_5M_IDX)
+#define RATE_11M_MASK (1 << RATE_11M_IDX)
+
+/* uCode API values for legacy bit rates, both OFDM and CCK */
+enum {
+ RATE_6M_PLCP = 13,
+ RATE_9M_PLCP = 15,
+ RATE_12M_PLCP = 5,
+ RATE_18M_PLCP = 7,
+ RATE_24M_PLCP = 9,
+ RATE_36M_PLCP = 11,
+ RATE_48M_PLCP = 1,
+ RATE_54M_PLCP = 3,
+ RATE_60M_PLCP = 3, /*FIXME:RS:should be removed */
+ RATE_1M_PLCP = 10,
+ RATE_2M_PLCP = 20,
+ RATE_5M_PLCP = 55,
+ RATE_11M_PLCP = 110,
+ /*FIXME:RS:add RATE_LEGACY_INVM_PLCP = 0, */
+};
+
+/* uCode API values for OFDM high-throughput (HT) bit rates */
+enum {
+ RATE_SISO_6M_PLCP = 0,
+ RATE_SISO_12M_PLCP = 1,
+ RATE_SISO_18M_PLCP = 2,
+ RATE_SISO_24M_PLCP = 3,
+ RATE_SISO_36M_PLCP = 4,
+ RATE_SISO_48M_PLCP = 5,
+ RATE_SISO_54M_PLCP = 6,
+ RATE_SISO_60M_PLCP = 7,
+ RATE_MIMO2_6M_PLCP = 0x8,
+ RATE_MIMO2_12M_PLCP = 0x9,
+ RATE_MIMO2_18M_PLCP = 0xa,
+ RATE_MIMO2_24M_PLCP = 0xb,
+ RATE_MIMO2_36M_PLCP = 0xc,
+ RATE_MIMO2_48M_PLCP = 0xd,
+ RATE_MIMO2_54M_PLCP = 0xe,
+ RATE_MIMO2_60M_PLCP = 0xf,
+ RATE_SISO_INVM_PLCP,
+ RATE_MIMO2_INVM_PLCP = RATE_SISO_INVM_PLCP,
+};
+
+/* MAC header values for bit rates */
+enum {
+ RATE_6M_IEEE = 12,
+ RATE_9M_IEEE = 18,
+ RATE_12M_IEEE = 24,
+ RATE_18M_IEEE = 36,
+ RATE_24M_IEEE = 48,
+ RATE_36M_IEEE = 72,
+ RATE_48M_IEEE = 96,
+ RATE_54M_IEEE = 108,
+ RATE_60M_IEEE = 120,
+ RATE_1M_IEEE = 2,
+ RATE_2M_IEEE = 4,
+ RATE_5M_IEEE = 11,
+ RATE_11M_IEEE = 22,
+};
+
+#define IL_CCK_BASIC_RATES_MASK \
+ (RATE_1M_MASK | \
+ RATE_2M_MASK)
+
+#define IL_CCK_RATES_MASK \
+ (IL_CCK_BASIC_RATES_MASK | \
+ RATE_5M_MASK | \
+ RATE_11M_MASK)
+
+#define IL_OFDM_BASIC_RATES_MASK \
+ (RATE_6M_MASK | \
+ RATE_12M_MASK | \
+ RATE_24M_MASK)
+
+#define IL_OFDM_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ RATE_9M_MASK | \
+ RATE_18M_MASK | \
+ RATE_36M_MASK | \
+ RATE_48M_MASK | \
+ RATE_54M_MASK)
+
+#define IL_BASIC_RATES_MASK \
+ (IL_OFDM_BASIC_RATES_MASK | \
+ IL_CCK_BASIC_RATES_MASK)
+
+#define RATES_MASK ((1 << RATE_COUNT) - 1)
+#define RATES_MASK_3945 ((1 << RATE_COUNT_3945) - 1)
+
+#define IL_INVALID_VALUE -1
+
+#define IL_MIN_RSSI_VAL -100
+#define IL_MAX_RSSI_VAL 0
+
+/* These values specify how many Tx frame attempts before
+ * searching for a new modulation mode */
+#define IL_LEGACY_FAILURE_LIMIT 160
+#define IL_LEGACY_SUCCESS_LIMIT 480
+#define IL_LEGACY_TBL_COUNT 160
+
+#define IL_NONE_LEGACY_FAILURE_LIMIT 400
+#define IL_NONE_LEGACY_SUCCESS_LIMIT 4500
+#define IL_NONE_LEGACY_TBL_COUNT 1500
+
+/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
+#define IL_RS_GOOD_RATIO 12800 /* 100% */
+#define RATE_SCALE_SWITCH 10880 /* 85% */
+#define RATE_HIGH_TH 10880 /* 85% */
+#define RATE_INCREASE_TH 6400 /* 50% */
+#define RATE_DECREASE_TH 1920 /* 15% */
+
+/* possible actions when in legacy mode */
+#define IL_LEGACY_SWITCH_ANTENNA1 0
+#define IL_LEGACY_SWITCH_ANTENNA2 1
+#define IL_LEGACY_SWITCH_SISO 2
+#define IL_LEGACY_SWITCH_MIMO2_AB 3
+#define IL_LEGACY_SWITCH_MIMO2_AC 4
+#define IL_LEGACY_SWITCH_MIMO2_BC 5
+
+/* possible actions when in siso mode */
+#define IL_SISO_SWITCH_ANTENNA1 0
+#define IL_SISO_SWITCH_ANTENNA2 1
+#define IL_SISO_SWITCH_MIMO2_AB 2
+#define IL_SISO_SWITCH_MIMO2_AC 3
+#define IL_SISO_SWITCH_MIMO2_BC 4
+#define IL_SISO_SWITCH_GI 5
+
+/* possible actions when in mimo mode */
+#define IL_MIMO2_SWITCH_ANTENNA1 0
+#define IL_MIMO2_SWITCH_ANTENNA2 1
+#define IL_MIMO2_SWITCH_SISO_A 2
+#define IL_MIMO2_SWITCH_SISO_B 3
+#define IL_MIMO2_SWITCH_SISO_C 4
+#define IL_MIMO2_SWITCH_GI 5
+
+#define IL_MAX_SEARCH IL_MIMO2_SWITCH_GI
+
+#define IL_ACTION_LIMIT 3 /* # possible actions */
+
+#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
+
+/* load per tid defines for A-MPDU activation */
+#define IL_AGG_TPT_THREHOLD 0
+#define IL_AGG_LOAD_THRESHOLD 10
+#define IL_AGG_ALL_TID 0xff
+#define TID_QUEUE_CELL_SPACING 50 /*mS */
+#define TID_QUEUE_MAX_SIZE 20
+#define TID_ROUND_VALUE 5 /* mS */
+#define TID_MAX_LOAD_COUNT 8
+
+#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
+#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
+
+extern const struct il_rate_info il_rates[RATE_COUNT];
+
+enum il_table_type {
+ LQ_NONE,
+ LQ_G, /* legacy types */
+ LQ_A,
+ LQ_SISO, /* high-throughput types */
+ LQ_MIMO2,
+ LQ_MAX,
+};
+
+#define is_legacy(tbl) ((tbl) == LQ_G || (tbl) == LQ_A)
+#define is_siso(tbl) ((tbl) == LQ_SISO)
+#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
+#define is_mimo(tbl) (is_mimo2(tbl))
+#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
+#define is_a_band(tbl) ((tbl) == LQ_A)
+#define is_g_and(tbl) ((tbl) == LQ_G)
+
+#define ANT_NONE 0x0
+#define ANT_A BIT(0)
+#define ANT_B BIT(1)
+#define ANT_AB (ANT_A | ANT_B)
+#define ANT_C BIT(2)
+#define ANT_AC (ANT_A | ANT_C)
+#define ANT_BC (ANT_B | ANT_C)
+#define ANT_ABC (ANT_AB | ANT_C)
+
+#define IL_MAX_MCS_DISPLAY_SIZE 12
+
+struct il_rate_mcs_info {
+ char mbps[IL_MAX_MCS_DISPLAY_SIZE];
+ char mcs[IL_MAX_MCS_DISPLAY_SIZE];
+};
+
+/**
+ * struct il_rate_scale_data -- tx success history for one rate
+ */
+struct il_rate_scale_data {
+ u64 data; /* bitmap of successful frames */
+ s32 success_counter; /* number of frames successful */
+ s32 success_ratio; /* per-cent * 128 */
+ s32 counter; /* number of frames attempted */
+ s32 average_tpt; /* success ratio * expected throughput */
+ unsigned long stamp;
+};
+
+/**
+ * struct il_scale_tbl_info -- tx params and success history for all rates
+ *
+ * There are two of these in struct il_lq_sta,
+ * one for "active", and one for "search".
+ */
+struct il_scale_tbl_info {
+ enum il_table_type lq_type;
+ u8 ant_type;
+ u8 is_SGI; /* 1 = short guard interval */
+ u8 is_ht40; /* 1 = 40 MHz channel width */
+ u8 is_dup; /* 1 = duplicated data streams */
+ u8 action; /* change modulation; IL_[LEGACY/SISO/MIMO]_SWITCH_* */
+ u8 max_search; /* maximun number of tables we can search */
+ s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ u32 current_rate; /* rate_n_flags, uCode API format */
+ struct il_rate_scale_data win[RATE_COUNT]; /* rate histories */
+};
+
+struct il_traffic_load {
+ unsigned long time_stamp; /* age of the oldest stats */
+ u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
+ * slice */
+ u32 total; /* total num of packets during the
+ * last TID_MAX_TIME_DIFF */
+ u8 queue_count; /* number of queues that has
+ * been used since the last cleanup */
+ u8 head; /* start of the circular buffer */
+};
+
+/**
+ * struct il_lq_sta -- driver's rate scaling ilate structure
+ *
+ * Pointer to this gets passed back and forth between driver and mac80211.
+ */
+struct il_lq_sta {
+ u8 active_tbl; /* idx of active table, range 0-1 */
+ u8 enable_counter; /* indicates HT mode */
+ u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
+ u8 search_better_tbl; /* 1: currently trying alternate mode */
+ s32 last_tpt;
+
+ /* The following determine when to search for a new mode */
+ u32 table_count_limit;
+ u32 max_failure_limit; /* # failed frames before new search */
+ u32 max_success_limit; /* # successful frames before new search */
+ u32 table_count;
+ u32 total_failed; /* total failed frames, any/all rates */
+ u32 total_success; /* total successful frames, any/all rates */
+ u64 flush_timer; /* time staying in mode before new search */
+
+ u8 action_counter; /* # mode-switch actions tried */
+ u8 is_green;
+ u8 is_dup;
+ enum ieee80211_band band;
+
+ /* The following are bitmaps of rates; RATE_6M_MASK, etc. */
+ u32 supp_rates;
+ u16 active_legacy_rate;
+ u16 active_siso_rate;
+ u16 active_mimo2_rate;
+ s8 max_rate_idx; /* Max rate set by user */
+ u8 missed_rate_counter;
+
+ struct il_link_quality_cmd lq;
+ struct il_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
+ struct il_traffic_load load[TID_MAX_LOAD_COUNT];
+ u8 tx_agg_tid_en;
+#ifdef CONFIG_MAC80211_DEBUGFS
+ struct dentry *rs_sta_dbgfs_scale_table_file;
+ struct dentry *rs_sta_dbgfs_stats_table_file;
+ struct dentry *rs_sta_dbgfs_rate_scale_data_file;
+ struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
+ u32 dbg_fixed_rate;
+#endif
+ struct il_priv *drv;
+
+ /* used to be in sta_info */
+ int last_txrate_idx;
+ /* last tx rate_n_flags */
+ u32 last_rate_n_flags;
+ /* packets destined for this STA are aggregated */
+ u8 is_agg;
+};
+
+/*
+ * il_station_priv: Driver's ilate station information
+ *
+ * When mac80211 creates a station it reserves some space (hw->sta_data_size)
+ * in the structure for use by driver. This structure is places in that
+ * space.
+ *
+ * The common struct MUST be first because it is shared between
+ * 3945 and 4965!
+ */
+struct il_station_priv {
+ struct il_station_priv_common common;
+ struct il_lq_sta lq_sta;
+ atomic_t pending_frames;
+ bool client;
+ bool asleep;
+};
+
+static inline u8
+il4965_num_of_ant(u8 m)
+{
+ return !!(m & ANT_A) + !!(m & ANT_B) + !!(m & ANT_C);
+}
+
+static inline u8
+il4965_first_antenna(u8 mask)
+{
+ if (mask & ANT_A)
+ return ANT_A;
+ if (mask & ANT_B)
+ return ANT_B;
+ return ANT_C;
+}
+
+/**
+ * il3945_rate_scale_init - Initialize the rate scale table based on assoc info
+ *
+ * The specific throughput table used is based on the type of network
+ * the associated with, including A, B, G, and G w/ TGG protection
+ */
+extern void il3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
+
+/* Initialize station's rate scaling information after adding station */
+extern void il4965_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+extern void il3945_rs_rate_init(struct il_priv *il, struct ieee80211_sta *sta,
+ u8 sta_id);
+
+/**
+ * il_rate_control_register - Register the rate control algorithm callbacks
+ *
+ * Since the rate control algorithm is hardware specific, there is no need
+ * or reason to place it as a stand alone module. The driver can call
+ * il_rate_control_register in order to register the rate control callbacks
+ * with the mac80211 subsystem. This should be performed prior to calling
+ * ieee80211_register_hw
+ *
+ */
+extern int il4965_rate_control_register(void);
+extern int il3945_rate_control_register(void);
+
+/**
+ * il_rate_control_unregister - Unregister the rate control callbacks
+ *
+ * This should be called after calling ieee80211_unregister_hw, but before
+ * the driver is unloaded.
+ */
+extern void il4965_rate_control_unregister(void);
+extern void il3945_rate_control_unregister(void);
+
+extern int il_power_update_mode(struct il_priv *il, bool force);
+extern void il_power_initialize(struct il_priv *il);
+
+extern u32 il_debug_level;
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+/*
+ * il_get_debug_level: Return active debug level for device
+ *
+ * Using sysfs it is possible to set per device debug level. This debug
+ * level will be used if set, otherwise the global debug level which can be
+ * set via module parameter is used.
+ */
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ if (il->debug_level)
+ return il->debug_level;
+ else
+ return il_debug_level;
+}
+#else
+static inline u32
+il_get_debug_level(struct il_priv *il)
+{
+ return il_debug_level;
+}
+#endif
+
+#define il_print_hex_error(il, p, len) \
+do { \
+ print_hex_dump(KERN_ERR, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#ifdef CONFIG_IWLEGACY_DEBUG
+#define IL_DBG(level, fmt, args...) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ dev_printk(KERN_ERR, &il->hw->wiphy->dev, \
+ "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
+ __func__ , ## args); \
+} while (0)
+
+#define il_print_hex_dump(il, level, p, len) \
+do { \
+ if (il_get_debug_level(il) & level) \
+ print_hex_dump(KERN_DEBUG, "iwl data: ", \
+ DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
+} while (0)
+
+#else
+#define IL_DBG(level, fmt, args...)
+static inline void
+il_print_hex_dump(struct il_priv *il, int level, const void *p, u32 len)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUG */
+
+#ifdef CONFIG_IWLEGACY_DEBUGFS
+int il_dbgfs_register(struct il_priv *il, const char *name);
+void il_dbgfs_unregister(struct il_priv *il);
+#else
+static inline int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ return 0;
+}
+
+static inline void
+il_dbgfs_unregister(struct il_priv *il)
+{
+}
+#endif /* CONFIG_IWLEGACY_DEBUGFS */
+
+/*
+ * To use the debug system:
+ *
+ * If you are defining a new debug classification, simply add it to the #define
+ * list here in the form of
+ *
+ * #define IL_DL_xxxx VALUE
+ *
+ * where xxxx should be the name of the classification (for example, WEP).
+ *
+ * You then need to either add a IL_xxxx_DEBUG() macro definition for your
+ * classification, or use IL_DBG(IL_DL_xxxx, ...) whenever you want
+ * to send output to that classification.
+ *
+ * The active debug levels can be accessed via files
+ *
+ * /sys/module/iwl4965/parameters/debug
+ * /sys/module/iwl3945/parameters/debug
+ * /sys/class/net/wlan0/device/debug_level
+ *
+ * when CONFIG_IWLEGACY_DEBUG=y.
+ */
+
+/* 0x0000000F - 0x00000001 */
+#define IL_DL_INFO (1 << 0)
+#define IL_DL_MAC80211 (1 << 1)
+#define IL_DL_HCMD (1 << 2)
+#define IL_DL_STATE (1 << 3)
+/* 0x000000F0 - 0x00000010 */
+#define IL_DL_MACDUMP (1 << 4)
+#define IL_DL_HCMD_DUMP (1 << 5)
+#define IL_DL_EEPROM (1 << 6)
+#define IL_DL_RADIO (1 << 7)
+/* 0x00000F00 - 0x00000100 */
+#define IL_DL_POWER (1 << 8)
+#define IL_DL_TEMP (1 << 9)
+#define IL_DL_NOTIF (1 << 10)
+#define IL_DL_SCAN (1 << 11)
+/* 0x0000F000 - 0x00001000 */
+#define IL_DL_ASSOC (1 << 12)
+#define IL_DL_DROP (1 << 13)
+#define IL_DL_TXPOWER (1 << 14)
+#define IL_DL_AP (1 << 15)
+/* 0x000F0000 - 0x00010000 */
+#define IL_DL_FW (1 << 16)
+#define IL_DL_RF_KILL (1 << 17)
+#define IL_DL_FW_ERRORS (1 << 18)
+#define IL_DL_LED (1 << 19)
+/* 0x00F00000 - 0x00100000 */
+#define IL_DL_RATE (1 << 20)
+#define IL_DL_CALIB (1 << 21)
+#define IL_DL_WEP (1 << 22)
+#define IL_DL_TX (1 << 23)
+/* 0x0F000000 - 0x01000000 */
+#define IL_DL_RX (1 << 24)
+#define IL_DL_ISR (1 << 25)
+#define IL_DL_HT (1 << 26)
+/* 0xF0000000 - 0x10000000 */
+#define IL_DL_11H (1 << 28)
+#define IL_DL_STATS (1 << 29)
+#define IL_DL_TX_REPLY (1 << 30)
+#define IL_DL_QOS (1 << 31)
+
+#define D_INFO(f, a...) IL_DBG(IL_DL_INFO, f, ## a)
+#define D_MAC80211(f, a...) IL_DBG(IL_DL_MAC80211, f, ## a)
+#define D_MACDUMP(f, a...) IL_DBG(IL_DL_MACDUMP, f, ## a)
+#define D_TEMP(f, a...) IL_DBG(IL_DL_TEMP, f, ## a)
+#define D_SCAN(f, a...) IL_DBG(IL_DL_SCAN, f, ## a)
+#define D_RX(f, a...) IL_DBG(IL_DL_RX, f, ## a)
+#define D_TX(f, a...) IL_DBG(IL_DL_TX, f, ## a)
+#define D_ISR(f, a...) IL_DBG(IL_DL_ISR, f, ## a)
+#define D_LED(f, a...) IL_DBG(IL_DL_LED, f, ## a)
+#define D_WEP(f, a...) IL_DBG(IL_DL_WEP, f, ## a)
+#define D_HC(f, a...) IL_DBG(IL_DL_HCMD, f, ## a)
+#define D_HC_DUMP(f, a...) IL_DBG(IL_DL_HCMD_DUMP, f, ## a)
+#define D_EEPROM(f, a...) IL_DBG(IL_DL_EEPROM, f, ## a)
+#define D_CALIB(f, a...) IL_DBG(IL_DL_CALIB, f, ## a)
+#define D_FW(f, a...) IL_DBG(IL_DL_FW, f, ## a)
+#define D_RF_KILL(f, a...) IL_DBG(IL_DL_RF_KILL, f, ## a)
+#define D_DROP(f, a...) IL_DBG(IL_DL_DROP, f, ## a)
+#define D_AP(f, a...) IL_DBG(IL_DL_AP, f, ## a)
+#define D_TXPOWER(f, a...) IL_DBG(IL_DL_TXPOWER, f, ## a)
+#define D_RATE(f, a...) IL_DBG(IL_DL_RATE, f, ## a)
+#define D_NOTIF(f, a...) IL_DBG(IL_DL_NOTIF, f, ## a)
+#define D_ASSOC(f, a...) IL_DBG(IL_DL_ASSOC, f, ## a)
+#define D_HT(f, a...) IL_DBG(IL_DL_HT, f, ## a)
+#define D_STATS(f, a...) IL_DBG(IL_DL_STATS, f, ## a)
+#define D_TX_REPLY(f, a...) IL_DBG(IL_DL_TX_REPLY, f, ## a)
+#define D_QOS(f, a...) IL_DBG(IL_DL_QOS, f, ## a)
+#define D_RADIO(f, a...) IL_DBG(IL_DL_RADIO, f, ## a)
+#define D_POWER(f, a...) IL_DBG(IL_DL_POWER, f, ## a)
+#define D_11H(f, a...) IL_DBG(IL_DL_11H, f, ## a)
+
+#endif /* __il_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-csr.h b/drivers/net/wireless/iwlegacy/csr.h
index 668a961..9138e15 100644
--- a/drivers/net/wireless/iwlegacy/iwl-csr.h
+++ b/drivers/net/wireless/iwlegacy/csr.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
-#ifndef __iwl_legacy_csr_h__
-#define __iwl_legacy_csr_h__
+#ifndef __il_csr_h__
+#define __il_csr_h__
/*
* CSR (control and status registers)
*
@@ -70,9 +70,9 @@
* low power states due to driver-invoked device resets
* (e.g. CSR_RESET_REG_FLAG_SW_RESET) or uCode-driven power-saving modes.
*
- * Use iwl_write32() and iwl_read32() family to access these registers;
+ * Use _il_wr() and _il_rd() family to access these registers;
* these provide simple PCI bus access, without waking up the MAC.
- * Do not use iwl_legacy_write_direct32() family for these registers;
+ * Do not use il_wr() family for these registers;
* no need to "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ.
* The MAC (uCode processor, etc.) does not need to be powered up for accessing
* the CSR registers.
@@ -82,16 +82,16 @@
*/
#define CSR_BASE (0x000)
-#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
-#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
-#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
-#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
-#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack*/
-#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
-#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc*/
+#define CSR_HW_IF_CONFIG_REG (CSR_BASE+0x000) /* hardware interface config */
+#define CSR_INT_COALESCING (CSR_BASE+0x004) /* accum ints, 32-usec units */
+#define CSR_INT (CSR_BASE+0x008) /* host interrupt status/ack */
+#define CSR_INT_MASK (CSR_BASE+0x00c) /* host interrupt enable */
+#define CSR_FH_INT_STATUS (CSR_BASE+0x010) /* busmaster int status/ack */
+#define CSR_GPIO_IN (CSR_BASE+0x018) /* read external chip pins */
+#define CSR_RESET (CSR_BASE+0x020) /* busmaster enable, NMI, etc */
#define CSR_GP_CNTRL (CSR_BASE+0x024)
-/* 2nd byte of CSR_INT_COALESCING, not accessible via iwl_write32()! */
+/* 2nd byte of CSR_INT_COALESCING, not accessible via _il_wr()! */
#define CSR_INT_PERIODIC_REG (CSR_BASE+0x005)
/*
@@ -166,26 +166,26 @@
#define CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A (0x00080000)
#define CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM (0x00200000)
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
-#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
-#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
+#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
+#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
-#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
-#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
+#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int */
+#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec */
/* interrupt flags in INTA, set by uCode or hardware (e.g. dma),
* acknowledged (reset) by host writing "1" to flagged bits. */
-#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
-#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
-#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
-#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
-#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
-#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
-#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
-#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
-#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
-#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
-#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
+#define CSR_INT_BIT_FH_RX (1 << 31) /* Rx DMA, cmd responses, FH_INT[17:16] */
+#define CSR_INT_BIT_HW_ERR (1 << 29) /* DMA hardware error FH_INT[31] */
+#define CSR_INT_BIT_RX_PERIODIC (1 << 28) /* Rx periodic */
+#define CSR_INT_BIT_FH_TX (1 << 27) /* Tx DMA FH_INT[1:0] */
+#define CSR_INT_BIT_SCD (1 << 26) /* TXQ pointer advanced */
+#define CSR_INT_BIT_SW_ERR (1 << 25) /* uCode error */
+#define CSR_INT_BIT_RF_KILL (1 << 7) /* HW RFKILL switch GP_CNTRL[27] toggled */
+#define CSR_INT_BIT_CT_KILL (1 << 6) /* Critical temp (chip too hot) rfkill */
+#define CSR_INT_BIT_SW_RX (1 << 3) /* Rx, command responses, 3945 */
+#define CSR_INT_BIT_WAKEUP (1 << 1) /* NIC controller waking up (pwr mgmt) */
+#define CSR_INT_BIT_ALIVE (1 << 0) /* uCode interrupts once it initializes */
#define CSR_INI_SET_MASK (CSR_INT_BIT_FH_RX | \
CSR_INT_BIT_HW_ERR | \
@@ -197,21 +197,20 @@
CSR_INT_BIT_ALIVE)
/* interrupt flags in FH (flow handler) (PCI busmaster DMA) */
-#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
-#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
-#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
-#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
-#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
-#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
-#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
-#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
+#define CSR_FH_INT_BIT_ERR (1 << 31) /* Error */
+#define CSR_FH_INT_BIT_HI_PRIOR (1 << 30) /* High priority Rx, bypass coalescing */
+#define CSR39_FH_INT_BIT_RX_CHNL2 (1 << 18) /* Rx channel 2 (3945 only) */
+#define CSR_FH_INT_BIT_RX_CHNL1 (1 << 17) /* Rx channel 1 */
+#define CSR_FH_INT_BIT_RX_CHNL0 (1 << 16) /* Rx channel 0 */
+#define CSR39_FH_INT_BIT_TX_CHNL6 (1 << 6) /* Tx channel 6 (3945 only) */
+#define CSR_FH_INT_BIT_TX_CHNL1 (1 << 1) /* Tx channel 1 */
+#define CSR_FH_INT_BIT_TX_CHNL0 (1 << 0) /* Tx channel 0 */
#define CSR39_FH_INT_RX_MASK (CSR_FH_INT_BIT_HI_PRIOR | \
CSR39_FH_INT_BIT_RX_CHNL2 | \
CSR_FH_INT_BIT_RX_CHNL1 | \
CSR_FH_INT_BIT_RX_CHNL0)
-
#define CSR39_FH_INT_TX_MASK (CSR39_FH_INT_BIT_TX_CHNL6 | \
CSR_FH_INT_BIT_TX_CHNL1 | \
CSR_FH_INT_BIT_TX_CHNL0)
@@ -285,7 +284,6 @@
#define CSR_GP_CNTRL_REG_FLAG_MAC_POWER_SAVE (0x04000000)
#define CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW (0x08000000)
-
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
@@ -293,19 +291,18 @@
#define CSR_EEPROM_REG_MSK_DATA (0xFFFF0000)
/* EEPROM GP */
-#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
+#define CSR_EEPROM_GP_VALID_MSK (0x00000007) /* signature */
#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
/* GP REG */
-#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
+#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
-
/* CSR GIO */
#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
@@ -357,7 +354,7 @@
/* HPET MEM debug */
#define CSR_DBG_HPET_MEM_REG_VAL (0xFFFF0000)
-/* DRAM INT TABLE */
+/* DRAM INT TBL */
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
@@ -368,13 +365,13 @@
* to indirectly access device's internal memory or registers that
* may be powered-down.
*
- * Use iwl_legacy_write_direct32()/iwl_legacy_read_direct32() family
+ * Use il_wr()/il_rd() family
* for these registers;
* host must "grab nic access" via CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
* to make sure the MAC (uCode processor, etc.) is powered up for accessing
* internal resources.
*
- * Do not use iwl_write32()/iwl_read32() family to access these registers;
+ * Do not use _il_wr()/_il_rd() family to access these registers;
* these provide only simple PCI bus access, without waking up the MAC.
*/
#define HBUS_BASE (0x400)
@@ -411,12 +408,12 @@
#define HBUS_TARG_PRPH_RDAT (HBUS_BASE+0x050)
/*
- * Per-Tx-queue write pointer (index, really!)
- * Indicates index to next TFD that driver will fill (1 past latest filled).
+ * Per-Tx-queue write pointer (idx, really!)
+ * Indicates idx to next TFD that driver will fill (1 past latest filled).
* Bit usage:
- * 0-7: queue write index
+ * 0-7: queue write idx
* 11-8: queue selector
*/
#define HBUS_TARG_WRPTR (HBUS_BASE+0x060)
-#endif /* !__iwl_legacy_csr_h__ */
+#endif /* !__il_csr_h__ */
diff --git a/drivers/net/wireless/iwlegacy/debug.c b/drivers/net/wireless/iwlegacy/debug.c
new file mode 100644
index 0000000..b1b8926
--- /dev/null
+++ b/drivers/net/wireless/iwlegacy/debug.c
@@ -0,0 +1,1411 @@
+/******************************************************************************
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called LICENSE.GPL.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *****************************************************************************/
+#include <linux/ieee80211.h>
+#include <linux/export.h>
+#include <net/mac80211.h>
+
+#include "common.h"
+
+/* create and remove of files */
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, il, \
+ &il_dbgfs_##name##_ops)) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
+} while (0)
+
+/* file operation */
+#define DEBUGFS_READ_FUNC(name) \
+static ssize_t il_dbgfs_##name##_read(struct file *file, \
+ char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+#define DEBUGFS_WRITE_FUNC(name) \
+static ssize_t il_dbgfs_##name##_write(struct file *file, \
+ const char __user *user_buf, \
+ size_t count, loff_t *ppos);
+
+static int
+il_dbgfs_open_file_generic(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+#define DEBUGFS_READ_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_WRITE_FILE_OPS(name) \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
+ DEBUGFS_READ_FUNC(name); \
+ DEBUGFS_WRITE_FUNC(name); \
+static const struct file_operations il_dbgfs_##name##_ops = { \
+ .write = il_dbgfs_##name##_write, \
+ .read = il_dbgfs_##name##_read, \
+ .open = il_dbgfs_open_file_generic, \
+ .llseek = generic_file_llseek, \
+};
+
+static ssize_t
+il_dbgfs_tx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->tx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->tx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->tx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->tx_stats.data_bytes);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_clear_traffic_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ u32 clear_flag;
+ char buf[8];
+ int buf_size;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &clear_flag) != 1)
+ return -EFAULT;
+ il_clear_traffic_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rx_stats_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ ssize_t ret;
+ const size_t bufsz =
+ 100 + sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
+ for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_mgmt_string(cnt), il->rx_stats.mgmt[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
+ for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\t%25s\t\t: %u\n",
+ il_get_ctrl_string(cnt), il->rx_stats.ctrl[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
+ il->rx_stats.data_cnt);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
+ il->rx_stats.data_bytes);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+#define BYTE1_MASK 0x000000ff;
+#define BYTE2_MASK 0x0000ffff;
+#define BYTE3_MASK 0x00ffffff;
+static ssize_t
+il_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ u32 val;
+ char *buf;
+ ssize_t ret;
+ int i;
+ int pos = 0;
+ struct il_priv *il = file->private_data;
+ size_t bufsz;
+
+ /* default is to dump the entire data segment */
+ if (!il->dbgfs_sram_offset && !il->dbgfs_sram_len) {
+ il->dbgfs_sram_offset = 0x800000;
+ if (il->ucode_type == UCODE_INIT)
+ il->dbgfs_sram_len = il->ucode_init_data.len;
+ else
+ il->dbgfs_sram_len = il->ucode_data.len;
+ }
+ bufsz = 30 + il->dbgfs_sram_len * sizeof(char) * 10;
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
+ il->dbgfs_sram_len);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
+ il->dbgfs_sram_offset);
+ for (i = il->dbgfs_sram_len; i > 0; i -= 4) {
+ val =
+ il_read_targ_mem(il,
+ il->dbgfs_sram_offset +
+ il->dbgfs_sram_len - i);
+ if (i < 4) {
+ switch (i) {
+ case 1:
+ val &= BYTE1_MASK;
+ break;
+ case 2:
+ val &= BYTE2_MASK;
+ break;
+ case 3:
+ val &= BYTE3_MASK;
+ break;
+ }
+ }
+ if (!(i % 16))
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_sram_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[64];
+ int buf_size;
+ u32 offset, len;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
+ il->dbgfs_sram_offset = offset;
+ il->dbgfs_sram_len = len;
+ } else {
+ il->dbgfs_sram_offset = 0;
+ il->dbgfs_sram_len = 0;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_stations_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_station_entry *station;
+ int max_sta = il->hw_params.max_stations;
+ char *buf;
+ int i, j, pos = 0;
+ ssize_t ret;
+ /* Add 30 for initial string */
+ const size_t bufsz = 30 + sizeof(char) * 500 * (il->num_stations);
+
+ buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
+ il->num_stations);
+
+ for (i = 0; i < max_sta; i++) {
+ station = &il->stations[i];
+ if (!station->used)
+ continue;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "station %d - addr: %pM, flags: %#x\n", i,
+ station->sta.sta.addr,
+ station->sta.station_flags_msk);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "TID\tseq_num\ttxq_id\tframes\ttfds\t");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "start_idx\tbitmap\t\t\trate_n_flags\n");
+
+ for (j = 0; j < MAX_TID_COUNT; j++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
+ j, station->tid[j].seq_number,
+ station->tid[j].agg.txq_id,
+ station->tid[j].agg.frame_count,
+ station->tid[j].tfds_in_queue,
+ station->tid[j].agg.start_idx,
+ station->tid[j].agg.bitmap,
+ station->tid[j].agg.rate_n_flags);
+
+ if (station->tid[j].agg.wait_for_ba)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " - waitforba");
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_nvm_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ ssize_t ret;
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0, buf_size = 0;
+ const u8 *ptr;
+ char *buf;
+ u16 eeprom_ver;
+ size_t eeprom_len = il->cfg->base_params->eeprom_size;
+ buf_size = 4 * eeprom_len + 256;
+
+ if (eeprom_len % 16) {
+ IL_ERR("NVM size is not multiple of 16.\n");
+ return -ENODATA;
+ }
+
+ ptr = il->eeprom;
+ if (!ptr) {
+ IL_ERR("Invalid EEPROM memory\n");
+ return -ENOMEM;
+ }
+
+ /* 4 characters for byte 0xYY */
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+ eeprom_ver = il_eeprom_query16(il, EEPROM_VERSION);
+ pos +=
+ scnprintf(buf + pos, buf_size - pos, "EEPROM " "version: 0x%x\n",
+ eeprom_ver);
+ for (ofs = 0; ofs < eeprom_len; ofs += 16) {
+ pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2, buf + pos,
+ buf_size - pos, 0);
+ pos += strlen(buf + pos);
+ if (buf_size - pos > 0)
+ buf[pos++] = '\n';
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_channels_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct ieee80211_channel *channels = NULL;
+ const struct ieee80211_supported_band *supp_band = NULL;
+ int pos = 0, i, bufsz = PAGE_SIZE;
+ char *buf;
+ ssize_t ret;
+
+ if (!test_bit(S_GEO_CONFIGURED, &il->status))
+ return -EAGAIN;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_2GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 2.4GHz band 802.11bg):\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ supp_band = il_get_hw_mode(il, IEEE80211_BAND_5GHZ);
+ if (supp_band) {
+ channels = supp_band->channels;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "Displaying %d channels in 5.2GHz band (802.11a)\n",
+ supp_band->n_channels);
+
+ for (i = 0; i < supp_band->n_channels; i++)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "%d: %ddBm: BSS%s%s, %s.\n",
+ channels[i].hw_value,
+ channels[i].max_power,
+ channels[i].
+ flags & IEEE80211_CHAN_RADAR ?
+ " (IEEE 802.11h required)" : "",
+ ((channels[i].
+ flags & IEEE80211_CHAN_NO_IBSS) ||
+ (channels[i].
+ flags & IEEE80211_CHAN_RADAR)) ? "" :
+ ", IBSS",
+ channels[i].
+ flags & IEEE80211_CHAN_PASSIVE_SCAN ?
+ "passive only" : "active/passive");
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_status_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[512];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_HCMD_ACTIVE:\t %d\n",
+ test_bit(S_HCMD_ACTIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INT_ENABLED:\t %d\n",
+ test_bit(S_INT_ENABLED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_RF_KILL_HW:\t %d\n",
+ test_bit(S_RF_KILL_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_CT_KILL:\t\t %d\n",
+ test_bit(S_CT_KILL, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_INIT:\t\t %d\n",
+ test_bit(S_INIT, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_ALIVE:\t\t %d\n",
+ test_bit(S_ALIVE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_READY:\t\t %d\n",
+ test_bit(S_READY, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_TEMPERATURE:\t %d\n",
+ test_bit(S_TEMPERATURE, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_GEO_CONFIGURED:\t %d\n",
+ test_bit(S_GEO_CONFIGURED, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_EXIT_PENDING:\t %d\n",
+ test_bit(S_EXIT_PENDING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_STATS:\t %d\n",
+ test_bit(S_STATS, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCANNING:\t %d\n",
+ test_bit(S_SCANNING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_ABORTING:\t %d\n",
+ test_bit(S_SCAN_ABORTING, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_SCAN_HW:\t\t %d\n",
+ test_bit(S_SCAN_HW, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_POWER_PMI:\t %d\n",
+ test_bit(S_POWER_PMI, &il->status));
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "S_FW_ERROR:\t %d\n",
+ test_bit(S_FW_ERROR, &il->status));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_interrupt_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = 24 * 64; /* 24 items * 64 char per item */
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Interrupt Statistics Report:\n");
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
+ il->isr_stats.hw);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
+ il->isr_stats.sw);
+ if (il->isr_stats.sw || il->isr_stats.hw) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tLast Restarting Code: 0x%X\n",
+ il->isr_stats.err_code);
+ }
+#ifdef CONFIG_IWLEGACY_DEBUG
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
+ il->isr_stats.sch);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
+ il->isr_stats.alive);
+#endif
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "HW RF KILL switch toggled:\t %u\n",
+ il->isr_stats.rfkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
+ il->isr_stats.ctkill);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
+ il->isr_stats.wakeup);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx command responses:\t\t %u\n",
+ il->isr_stats.rx);
+ for (cnt = 0; cnt < IL_CN_MAX; cnt++) {
+ if (il->isr_stats.handlers[cnt] > 0)
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tRx handler[%36s]:\t\t %u\n",
+ il_get_cmd_string(cnt),
+ il->isr_stats.handlers[cnt]);
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
+ il->isr_stats.tx);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
+ il->isr_stats.unhandled);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_interrupt_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ u32 reset_flag;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%x", &reset_flag) != 1)
+ return -EFAULT;
+ if (reset_flag == 0)
+ il_clear_isr_stats(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_qos_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ struct il_rxon_context *ctx = &il->ctx;
+ int pos = 0, i;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n", ctx->ctxid);
+ for (i = 0; i < AC_NUM; i++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tcw_min\tcw_max\taifsn\ttxop\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "AC[%d]\t%u\t%u\t%u\t%u\n", i,
+ ctx->qos_data.def_qos_parm.ac[i].cw_min,
+ ctx->qos_data.def_qos_parm.ac[i].cw_max,
+ ctx->qos_data.def_qos_parm.ac[i].aifsn,
+ ctx->qos_data.def_qos_parm.ac[i].edca_txop);
+ }
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int ht40;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &ht40) != 1)
+ return -EFAULT;
+ if (!il_is_any_associated(il))
+ il->disable_ht40 = ht40 ? true : false;
+ else {
+ IL_ERR("Sta associated with AP - "
+ "Change to 40MHz channel support is not allowed\n");
+ return -EINVAL;
+ }
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_disable_ht40_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[100];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "11n 40MHz Mode: %s\n",
+ il->disable_ht40 ? "Disabled" : "Enabled");
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+DEBUGFS_READ_WRITE_FILE_OPS(sram);
+DEBUGFS_READ_FILE_OPS(nvm);
+DEBUGFS_READ_FILE_OPS(stations);
+DEBUGFS_READ_FILE_OPS(channels);
+DEBUGFS_READ_FILE_OPS(status);
+DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
+DEBUGFS_READ_FILE_OPS(qos);
+DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
+
+static ssize_t
+il_dbgfs_traffic_log_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ int pos = 0, ofs = 0;
+ int cnt = 0, entry;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ struct il_rx_queue *rxq = &il->rxq;
+ char *buf;
+ int bufsz =
+ ((IL_TRAFFIC_ENTRIES * IL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
+ (il->cfg->base_params->num_of_queues * 32 * 8) + 400;
+ const u8 *ptr;
+ ssize_t ret;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate buffer\n");
+ return -ENOMEM;
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "q[%d]: read_ptr: %u, write_ptr: %u\n", cnt,
+ q->read_ptr, q->write_ptr);
+ }
+ if (il->tx_traffic && (il_debug_level & IL_DL_TX)) {
+ ptr = il->tx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Tx Traffic idx: %u\n",
+ il->tx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "read: %u, write: %u\n",
+ rxq->read, rxq->write);
+
+ if (il->rx_traffic && (il_debug_level & IL_DL_RX)) {
+ ptr = il->rx_traffic;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "Rx Traffic idx: %u\n",
+ il->rx_traffic_idx);
+ for (cnt = 0, ofs = 0; cnt < IL_TRAFFIC_ENTRIES; cnt++) {
+ for (entry = 0; entry < IL_TRAFFIC_ENTRY_SIZE / 16;
+ entry++, ofs += 16) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "0x%.4x ",
+ ofs);
+ hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
+ buf + pos, bufsz - pos, 0);
+ pos += strlen(buf + pos);
+ if (bufsz - pos > 0)
+ buf[pos++] = '\n';
+ }
+ }
+ }
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_traffic_log_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int traffic_log;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &traffic_log) != 1)
+ return -EFAULT;
+ if (traffic_log == 0)
+ il_reset_traffic_log(il);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_tx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_tx_queue *txq;
+ struct il_queue *q;
+ char *buf;
+ int pos = 0;
+ int cnt;
+ int ret;
+ const size_t bufsz =
+ sizeof(char) * 64 * il->cfg->base_params->num_of_queues;
+
+ if (!il->txq) {
+ IL_ERR("txq not ready\n");
+ return -EAGAIN;
+ }
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (cnt = 0; cnt < il->hw_params.max_txq_num; cnt++) {
+ txq = &il->txq[cnt];
+ q = &txq->q;
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "hwq %.2d: read=%u write=%u stop=%d"
+ " swq_id=%#.2x (ac %d/hwq %d)\n", cnt,
+ q->read_ptr, q->write_ptr,
+ !!test_bit(cnt, il->queue_stopped),
+ txq->swq_id, txq->swq_id & 3,
+ (txq->swq_id >> 2) & 0x1f);
+ if (cnt >= 4)
+ continue;
+ /* for the ACs, display the stop count too */
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ " stop-count: %d\n",
+ atomic_read(&il->queue_stop_count[cnt]));
+ }
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_rx_queue_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ struct il_rx_queue *rxq = &il->rxq;
+ char buf[256];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n", rxq->read);
+ pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n", rxq->write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
+ rxq->free_count);
+ if (rxq->rb_stts) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
+ le16_to_cpu(rxq->rb_stts->
+ closed_rb_num) & 0x0FFF);
+ } else {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "closed_rb_num: Not Allocated\n");
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_ucode_rx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.rx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_tx_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.tx_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_ucode_general_stats_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ return il->cfg->ops->lib->debugfs_ops.general_stats_read(file, user_buf,
+ count, ppos);
+}
+
+static ssize_t
+il_dbgfs_sensitivity_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_sensitivity_data) * 4 + 100;
+ ssize_t ret;
+ struct il_sensitivity_data *data;
+
+ data = &il->sensitivity_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
+ data->auto_corr_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc:\t\t %u\n",
+ data->auto_corr_ofdm_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
+ data->auto_corr_ofdm_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_mrc_x1:\t\t %u\n",
+ data->auto_corr_ofdm_mrc_x1);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
+ data->auto_corr_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
+ data->auto_corr_cck_mrc);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "last_bad_plcp_cnt_ofdm:\t\t %u\n",
+ data->last_bad_plcp_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
+ data->last_fa_cnt_ofdm);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_bad_plcp_cnt_cck:\t\t %u\n",
+ data->last_bad_plcp_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
+ data->last_fa_cnt_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
+ data->nrg_curr_state);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
+ data->nrg_prev_state);
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
+ for (cnt = 0; cnt < 10; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_value[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
+ for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->nrg_silence_rssi[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
+ data->nrg_silence_ref);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
+ data->nrg_energy_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
+ data->nrg_silence_idx);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
+ data->nrg_th_cck);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "nrg_auto_corr_silence_diff:\t %u\n",
+ data->nrg_auto_corr_silence_diff);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
+ data->num_in_cck_no_fa);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
+ data->nrg_th_ofdm);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_chain_noise_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ int cnt = 0;
+ char *buf;
+ int bufsz = sizeof(struct il_chain_noise_data) * 4 + 100;
+ ssize_t ret;
+ struct il_chain_noise_data *data;
+
+ data = &il->chain_noise_data;
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf) {
+ IL_ERR("Can not allocate Buffer\n");
+ return -ENOMEM;
+ }
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
+ data->active_chains);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
+ data->chain_noise_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
+ data->chain_noise_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
+ data->chain_noise_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
+ data->chain_signal_a);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
+ data->chain_signal_b);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
+ data->chain_signal_c);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
+ data->beacon_count);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->disconn_array[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
+ for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, " %u",
+ data->delta_gain_code[cnt]);
+ }
+ pos += scnprintf(buf + pos, bufsz - pos, "\n");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
+ data->radio_write);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
+ data->state);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_power_save_status_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[60];
+ int pos = 0;
+ const size_t bufsz = sizeof(buf);
+ u32 pwrsave_status;
+
+ pwrsave_status =
+ _il_rd(il, CSR_GP_CNTRL) & CSR_GP_REG_POWER_SAVE_STATUS_MSK;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%s\n",
+ (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
+ (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
+ (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
+ "error");
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_clear_ucode_stats_write(struct file *file,
+ const char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int clear;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &clear) != 1)
+ return -EFAULT;
+
+ /* make request to uCode to retrieve stats information */
+ mutex_lock(&il->mutex);
+ il_send_stats_request(il, CMD_SYNC, true);
+ mutex_unlock(&il->mutex);
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_rxon_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len = sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_rxon_filter_flags_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int len = 0;
+ char buf[20];
+
+ len =
+ sprintf(buf, "0x%04X\n", le32_to_cpu(il->ctx.active.filter_flags));
+ return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t
+il_dbgfs_fh_reg_read(struct file *file, char __user *user_buf, size_t count,
+ loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (il->cfg->ops->lib->dump_fh) {
+ ret = pos = il->cfg->ops->lib->dump_fh(il, &buf, true);
+ if (buf) {
+ ret =
+ simple_read_from_buffer(user_buf, count, ppos, buf,
+ pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "%d\n",
+ il->missed_beacon_threshold);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_missed_beacon_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IL_MISSED_BEACON_THRESHOLD_MAX)
+ il->missed_beacon_threshold = IL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ il->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t
+il_dbgfs_force_reset_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ int pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct il_force_reset *force_reset;
+
+ force_reset = &il->force_reset;
+
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos +=
+ scnprintf(buf + pos, bufsz - pos, "\treset duration: %lu\n",
+ force_reset->reset_duration);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t
+il_dbgfs_force_reset_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ int ret;
+ struct il_priv *il = file->private_data;
+
+ ret = il_force_reset(il, true);
+
+ return ret ? ret : count;
+}
+
+static ssize_t
+il_dbgfs_wd_timeout_write(struct file *file, const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+
+ struct il_priv *il = file->private_data;
+ char buf[8];
+ int buf_size;
+ int timeout;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &timeout) != 1)
+ return -EINVAL;
+ if (timeout < 0 || timeout > IL_MAX_WD_TIMEOUT)
+ timeout = IL_DEF_WD_TIMEOUT;
+
+ il->cfg->base_params->wd_timeout = timeout;
+ il_setup_watchdog(il);
+ return count;
+}
+
+DEBUGFS_READ_FILE_OPS(rx_stats);
+DEBUGFS_READ_FILE_OPS(tx_stats);
+DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
+DEBUGFS_READ_FILE_OPS(rx_queue);
+DEBUGFS_READ_FILE_OPS(tx_queue);
+DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
+DEBUGFS_READ_FILE_OPS(ucode_general_stats);
+DEBUGFS_READ_FILE_OPS(sensitivity);
+DEBUGFS_READ_FILE_OPS(chain_noise);
+DEBUGFS_READ_FILE_OPS(power_save_status);
+DEBUGFS_WRITE_FILE_OPS(clear_ucode_stats);
+DEBUGFS_WRITE_FILE_OPS(clear_traffic_stats);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
+DEBUGFS_READ_FILE_OPS(rxon_flags);
+DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
+DEBUGFS_WRITE_FILE_OPS(wd_timeout);
+
+/*
+ * Create the debugfs files and directories
+ *
+ */
+int
+il_dbgfs_register(struct il_priv *il, const char *name)
+{
+ struct dentry *phyd = il->hw->wiphy->debugfsdir;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
+
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ il->debugfs_dir = dir_drv;
+
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
+ goto err;
+
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_stats, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
+ if (il->cfg->base_params->sensitivity_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
+ &il->disable_sens_cal);
+ if (il->cfg->base_params->chain_noise_calib_by_driver)
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
+ &il->disable_chain_noise_cal);
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf, &il->disable_tx_power_cal);
+ return 0;
+
+err:
+ IL_ERR("Can't create the debugfs directory\n");
+ il_dbgfs_unregister(il);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(il_dbgfs_register);
+
+/**
+ * Remove the debugfs files and directories
+ *
+ */
+void
+il_dbgfs_unregister(struct il_priv *il)
+{
+ if (!il->debugfs_dir)
+ return;
+
+ debugfs_remove_recursive(il->debugfs_dir);
+ il->debugfs_dir = NULL;
+}
+EXPORT_SYMBOL(il_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
deleted file mode 100644
index cfabb38..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.c
+++ /dev/null
@@ -1,523 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-3945-debugfs.h"
-
-
-static int iwl3945_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n",
- le32_to_cpu(priv->_3945.statistics.flag));
- if (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (le32_to_cpu(priv->_3945.statistics.flag) &
- UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
- return p;
-}
-
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_rx_phy) * 40 +
- sizeof(struct iwl39_statistics_rx_non_phy) * 40 + 400;
- ssize_t ret;
- struct iwl39_statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm,
- *max_ofdm;
- struct iwl39_statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct iwl39_statistics_rx_non_phy *general, *accum_general;
- struct iwl39_statistics_rx_non_phy *delta_general, *max_general;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_3945.statistics.rx.ofdm;
- cck = &priv->_3945.statistics.rx.cck;
- general = &priv->_3945.statistics.rx.general;
- accum_ofdm = &priv->_3945.accum_statistics.rx.ofdm;
- accum_cck = &priv->_3945.accum_statistics.rx.cck;
- accum_general = &priv->_3945.accum_statistics.rx.general;
- delta_ofdm = &priv->_3945.delta_statistics.rx.ofdm;
- delta_cck = &priv->_3945.delta_statistics.rx.cck;
- delta_general = &priv->_3945.delta_statistics.rx.general;
- max_ofdm = &priv->_3945.max_delta.rx.ofdm;
- max_cck = &priv->_3945.max_delta.rx.cck;
- max_general = &priv->_3945.max_delta.rx.general;
-
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:", le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n", "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout,
- delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout,
- delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt,
- delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt,
- delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err,
- delta_cck->overrun_err, max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good,
- max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout,
- delta_cck->sfd_timeout, max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout,
- delta_cck->fina_timeout, max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts,
- delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt,
- delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt,
- delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts,
- delta_general->bogus_cts, max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack,
- delta_general->bogus_ack, max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct iwl39_statistics_tx) * 48) + 250;
- ssize_t ret;
- struct iwl39_statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_3945.statistics.tx;
- accum_tx = &priv->_3945.accum_statistics.tx;
- delta_tx = &priv->_3945.delta_statistics.tx;
- max_tx = &priv->_3945.max_delta.tx;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct iwl39_statistics_general) * 10 + 300;
- ssize_t ret;
- struct iwl39_statistics_general *general, *accum_general;
- struct iwl39_statistics_general *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct iwl39_statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * The statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_3945.statistics.general;
- dbg = &priv->_3945.statistics.general.dbg;
- div = &priv->_3945.statistics.general.div;
- accum_general = &priv->_3945.accum_statistics.general;
- delta_general = &priv->_3945.delta_statistics.general;
- max_general = &priv->_3945.max_delta.general;
- accum_dbg = &priv->_3945.accum_statistics.general.dbg;
- delta_dbg = &priv->_3945.delta_statistics.general.dbg;
- max_dbg = &priv->_3945.max_delta.general.dbg;
- accum_div = &priv->_3945.accum_statistics.general.div;
- delta_div = &priv->_3945.delta_statistics.general.div;
- max_div = &priv->_3945.max_delta.general.div;
- pos += iwl3945_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "%-32s current"
- "acumulative delta max\n",
- "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- " %-30s %10u %10u %10u %10u\n",
- "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
deleted file mode 100644
index 8fef4b3..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-debugfs.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl3945_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos);
-#else
-static ssize_t iwl3945_ucode_rx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_tx_stats_read(struct file *file,
- char __user *user_buf, size_t count,
- loff_t *ppos)
-{
- return 0;
-}
-static ssize_t iwl3945_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h b/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
deleted file mode 100644
index 836c991..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-fh.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_3945_fh_h__
-#define __iwl_3945_fh_h__
-
-/************************************/
-/* iwl3945 Flow Handler Definitions */
-/************************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH39_MEM_LOWER_BOUND (0x0800)
-#define FH39_MEM_UPPER_BOUND (0x1000)
-
-#define FH39_CBCC_TABLE (FH39_MEM_LOWER_BOUND + 0x140)
-#define FH39_TFDB_TABLE (FH39_MEM_LOWER_BOUND + 0x180)
-#define FH39_RCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x400)
-#define FH39_RSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x4c0)
-#define FH39_TCSR_TABLE (FH39_MEM_LOWER_BOUND + 0x500)
-#define FH39_TSSR_TABLE (FH39_MEM_LOWER_BOUND + 0x680)
-
-/* TFDB (Transmit Frame Buffer Descriptor) */
-#define FH39_TFDB(_ch, buf) (FH39_TFDB_TABLE + \
- ((_ch) * 2 + (buf)) * 0x28)
-#define FH39_TFDB_CHNL_BUF_CTRL_REG(_ch) (FH39_TFDB_TABLE + 0x50 * (_ch))
-
-/* CBCC channel is [0,2] */
-#define FH39_CBCC(_ch) (FH39_CBCC_TABLE + (_ch) * 0x8)
-#define FH39_CBCC_CTRL(_ch) (FH39_CBCC(_ch) + 0x00)
-#define FH39_CBCC_BASE(_ch) (FH39_CBCC(_ch) + 0x04)
-
-/* RCSR channel is [0,2] */
-#define FH39_RCSR(_ch) (FH39_RCSR_TABLE + (_ch) * 0x40)
-#define FH39_RCSR_CONFIG(_ch) (FH39_RCSR(_ch) + 0x00)
-#define FH39_RCSR_RBD_BASE(_ch) (FH39_RCSR(_ch) + 0x04)
-#define FH39_RCSR_WPTR(_ch) (FH39_RCSR(_ch) + 0x20)
-#define FH39_RCSR_RPTR_ADDR(_ch) (FH39_RCSR(_ch) + 0x24)
-
-#define FH39_RSCSR_CHNL0_WPTR (FH39_RCSR_WPTR(0))
-
-/* RSSR */
-#define FH39_RSSR_CTRL (FH39_RSSR_TABLE + 0x000)
-#define FH39_RSSR_STATUS (FH39_RSSR_TABLE + 0x004)
-
-/* TCSR */
-#define FH39_TCSR(_ch) (FH39_TCSR_TABLE + (_ch) * 0x20)
-#define FH39_TCSR_CONFIG(_ch) (FH39_TCSR(_ch) + 0x00)
-#define FH39_TCSR_CREDIT(_ch) (FH39_TCSR(_ch) + 0x04)
-#define FH39_TCSR_BUFF_STTS(_ch) (FH39_TCSR(_ch) + 0x08)
-
-/* TSSR */
-#define FH39_TSSR_CBB_BASE (FH39_TSSR_TABLE + 0x000)
-#define FH39_TSSR_MSG_CONFIG (FH39_TSSR_TABLE + 0x008)
-#define FH39_TSSR_TX_STATUS (FH39_TSSR_TABLE + 0x010)
-
-
-/* DBM */
-
-#define FH39_SRVC_CHNL (6)
-
-#define FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE (20)
-#define FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH (4)
-
-#define FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN (0x08000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE (0x80000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE (0x20000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 (0x01000000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST (0x00001000)
-
-#define FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRIVER (0x00000001)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE_VAL (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL (0x00000008)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00004000)
-
-#define FH39_TCSR_CHNL_TX_BUF_STS_REG_BIT_TFDB_WPTR (0x00000001)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON (0xFF000000)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON (0x00FF0000)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B (0x00000400)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON (0x00000100)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON (0x00000080)
-
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH (0x00000020)
-#define FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH (0x00000005)
-
-#define FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) (BIT(_ch) << 24)
-#define FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch) (BIT(_ch) << 16)
-
-#define FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_ch) \
- (FH39_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_ch) | \
- FH39_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_ch))
-
-#define FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-struct iwl3945_tfd_tb {
- __le32 addr;
- __le32 len;
-} __packed;
-
-struct iwl3945_tfd {
- __le32 control_flags;
- struct iwl3945_tfd_tb tbs[4];
- u8 __pad[28];
-} __packed;
-
-
-#endif /* __iwl_3945_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h b/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
deleted file mode 100644
index 5c3a68d..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-hw.h
+++ /dev/null
@@ -1,291 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945-hw.h) only for hardware-related definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-3945.h for driver implementation definitions.
- */
-
-#ifndef __iwl_3945_hw__
-#define __iwl_3945_hw__
-
-#include "iwl-eeprom.h"
-
-/* RSSI to dBm */
-#define IWL39_RSSI_OFFSET 95
-
-/*
- * EEPROM related constants, enums, and structures.
- */
-#define EEPROM_SKU_CAP_OP_MODE_MRC (1 << 7)
-
-/*
- * Mapping of a Tx power level, at factory calibration temperature,
- * to a radio/DSP gain table index.
- * One for each of 5 "sample" power levels in each band.
- * v_det is measured at the factory, using the 3945's built-in power amplifier
- * (PA) output voltage detector. This same detector is used during Tx of
- * long packets in normal operation to provide feedback as to proper output
- * level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_sample {
- u8 gain_index; /* index into power (gain) setup table ... */
- s8 power; /* ... for this pwr level for this chnl group */
- u16 v_det; /* PA output voltage */
-} __packed;
-
-/*
- * Mappings of Tx power levels -> nominal radio/DSP gain table indexes.
- * One for each channel group (a.k.a. "band") (1 for BG, 4 for A).
- * Tx power setup code interpolates between the 5 "sample" power levels
- * to determine the nominal setup for a requested power level.
- * Data copied from EEPROM.
- * DO NOT ALTER THIS STRUCTURE!!!
- */
-struct iwl3945_eeprom_txpower_group {
- struct iwl3945_eeprom_txpower_sample samples[5]; /* 5 power levels */
- s32 a, b, c, d, e; /* coefficients for voltage->power
- * formula (signed) */
- s32 Fa, Fb, Fc, Fd, Fe; /* these modify coeffs based on
- * frequency (signed) */
- s8 saturation_power; /* highest power possible by h/w in this
- * band */
- u8 group_channel; /* "representative" channel # in this band */
- s16 temperature; /* h/w temperature at factory calib this band
- * (signed) */
-} __packed;
-
-/*
- * Temperature-based Tx-power compensation data, not band-specific.
- * These coefficients are use to modify a/b/c/d/e coeffs based on
- * difference between current temperature and factory calib temperature.
- * Data copied from EEPROM.
- */
-struct iwl3945_eeprom_temperature_corr {
- u32 Ta;
- u32 Tb;
- u32 Tc;
- u32 Td;
- u32 Te;
-} __packed;
-
-/*
- * EEPROM map
- */
-struct iwl3945_eeprom {
- u8 reserved0[16];
- u16 device_id; /* abs.ofs: 16 */
- u8 reserved1[2];
- u16 pmc; /* abs.ofs: 20 */
- u8 reserved2[20];
- u8 mac_address[6]; /* abs.ofs: 42 */
- u8 reserved3[58];
- u16 board_revision; /* abs.ofs: 106 */
- u8 reserved4[11];
- u8 board_pba_number[9]; /* abs.ofs: 119 */
- u8 reserved5[8];
- u16 version; /* abs.ofs: 136 */
- u8 sku_cap; /* abs.ofs: 138 */
- u8 leds_mode; /* abs.ofs: 139 */
- u16 oem_mode;
- u16 wowlan_mode; /* abs.ofs: 142 */
- u16 leds_time_interval; /* abs.ofs: 144 */
- u8 leds_off_time; /* abs.ofs: 146 */
- u8 leds_on_time; /* abs.ofs: 147 */
- u8 almgor_m_version; /* abs.ofs: 148 */
- u8 antenna_switch_type; /* abs.ofs: 149 */
- u8 reserved6[42];
- u8 sku_id[4]; /* abs.ofs: 192 */
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by 3945 has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
- u16 band_1_count; /* abs.ofs: 196 */
- struct iwl_eeprom_channel band_1_channels[14]; /* abs.ofs: 198 */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
- u16 band_2_count; /* abs.ofs: 226 */
- struct iwl_eeprom_channel band_2_channels[13]; /* abs.ofs: 228 */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
- u16 band_3_count; /* abs.ofs: 254 */
- struct iwl_eeprom_channel band_3_channels[12]; /* abs.ofs: 256 */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
- u16 band_4_count; /* abs.ofs: 280 */
- struct iwl_eeprom_channel band_4_channels[11]; /* abs.ofs: 282 */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
- u16 band_5_count; /* abs.ofs: 304 */
- struct iwl_eeprom_channel band_5_channels[6]; /* abs.ofs: 306 */
-
- u8 reserved9[194];
-
-/*
- * 3945 Txpower calibration data.
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
- struct iwl3945_eeprom_txpower_group groups[IWL_NUM_TX_CALIB_GROUPS];
-/* abs.ofs: 512 */
- struct iwl3945_eeprom_temperature_corr corrections; /* abs.ofs: 832 */
- u8 reserved16[172]; /* fill out to full 1024 byte block */
-} __packed;
-
-#define IWL3945_EEPROM_IMG_SIZE 1024
-
-/* End of EEPROM */
-
-#define PCI_CFG_REV_ID_BIT_BASIC_SKU (0x40) /* bit 6 */
-#define PCI_CFG_REV_ID_BIT_RTP (0x80) /* bit 7 */
-
-/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
-#define IWL39_NUM_QUEUES 5
-#define IWL39_CMD_QUEUE_NUM 4
-
-#define IWL_DEFAULT_TX_RETRY 15
-
-/*********************************************/
-
-#define RFD_SIZE 4
-#define NUM_TFD_CHUNKS 4
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-#define U32_PAD(n) ((4-(n))&0x3)
-
-#define TFD_CTL_COUNT_SET(n) (n << 24)
-#define TFD_CTL_COUNT_GET(ctl) ((ctl >> 24) & 7)
-#define TFD_CTL_PAD_SET(n) (n << 28)
-#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL39_RTC_INST_UPPER_BOUND (0x014000)
-
-#define IWL39_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL39_RTC_DATA_UPPER_BOUND (0x808000)
-
-#define IWL39_RTC_INST_SIZE (IWL39_RTC_INST_UPPER_BOUND - \
- IWL39_RTC_INST_LOWER_BOUND)
-#define IWL39_RTC_DATA_SIZE (IWL39_RTC_DATA_UPPER_BOUND - \
- IWL39_RTC_DATA_LOWER_BOUND)
-
-#define IWL39_MAX_INST_SIZE IWL39_RTC_INST_SIZE
-#define IWL39_MAX_DATA_SIZE IWL39_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL39_MAX_BSM_SIZE IWL39_RTC_INST_SIZE
-
-static inline int iwl3945_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL39_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL39_RTC_DATA_UPPER_BOUND);
-}
-
-/* Base physical address of iwl3945_shared is provided to FH_TSSR_CBB_BASE
- * and &iwl3945_shared.rx_read_ptr[0] is provided to FH_RCSR_RPTR_ADDR(0) */
-struct iwl3945_shared {
- __le32 tx_base_ptr[8];
-} __packed;
-
-static inline u8 iwl3945_hw_get_rate(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline u16 iwl3945_hw_get_rate_n_flags(__le16 rate_n_flags)
-{
- return le16_to_cpu(rate_n_flags);
-}
-
-static inline __le16 iwl3945_hw_set_rate_n_flags(u8 rate, u16 flags)
-{
- return cpu_to_le16((u16)rate|flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.c b/drivers/net/wireless/iwlegacy/iwl-3945-led.c
deleted file mode 100644
index 7a7f0f3..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.c
+++ /dev/null
@@ -1,63 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-dev.h"
-#include "iwl-3945-led.h"
-
-
-/* Send led command */
-static int iwl3945_send_led_cmd(struct iwl_priv *priv,
- struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-const struct iwl_led_ops iwl3945_led_ops = {
- .cmd = iwl3945_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-led.h b/drivers/net/wireless/iwlegacy/iwl-3945-led.h
deleted file mode 100644
index 9671627..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-led.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_3945_led_h__
-#define __iwl_3945_led_h__
-
-extern const struct iwl_led_ops iwl3945_led_ops;
-
-#endif /* __iwl_3945_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c b/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
deleted file mode 100644
index 8faeaf2..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945-rs.c
+++ /dev/null
@@ -1,996 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-commands.h"
-#include "iwl-3945.h"
-#include "iwl-sta.h"
-
-#define RS_NAME "iwl-3945-rs"
-
-static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
-};
-
-static s32 iwl3945_expected_tpt_g_prot[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 80, 93, 113, 123, 125
-};
-
-static s32 iwl3945_expected_tpt_a[IWL_RATE_COUNT_3945] = {
- 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186
-};
-
-static s32 iwl3945_expected_tpt_b[IWL_RATE_COUNT_3945] = {
- 7, 13, 35, 58, 0, 0, 0, 0, 0, 0, 0, 0
-};
-
-struct iwl3945_tpt_entry {
- s8 min_rssi;
- u8 index;
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_a[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-72, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-87, IWL_RATE_9M_INDEX},
- {-89, IWL_RATE_6M_INDEX}
-};
-
-static struct iwl3945_tpt_entry iwl3945_tpt_table_g[] = {
- {-60, IWL_RATE_54M_INDEX},
- {-64, IWL_RATE_48M_INDEX},
- {-68, IWL_RATE_36M_INDEX},
- {-80, IWL_RATE_24M_INDEX},
- {-84, IWL_RATE_18M_INDEX},
- {-85, IWL_RATE_12M_INDEX},
- {-86, IWL_RATE_11M_INDEX},
- {-88, IWL_RATE_5M_INDEX},
- {-90, IWL_RATE_2M_INDEX},
- {-92, IWL_RATE_1M_INDEX}
-};
-
-#define IWL_RATE_MAX_WINDOW 62
-#define IWL_RATE_FLUSH (3*HZ)
-#define IWL_RATE_WIN_FLUSH (HZ/2)
-#define IWL39_RATE_HIGH_TH 11520
-#define IWL_SUCCESS_UP_TH 8960
-#define IWL_SUCCESS_DOWN_TH 10880
-#define IWL_RATE_MIN_FAILURE_TH 6
-#define IWL_RATE_MIN_SUCCESS_TH 8
-#define IWL_RATE_DECREASE_TH 1920
-#define IWL_RATE_RETRY_TH 15
-
-static u8 iwl3945_get_rate_index_by_rssi(s32 rssi, enum ieee80211_band band)
-{
- u32 index = 0;
- u32 table_size = 0;
- struct iwl3945_tpt_entry *tpt_table = NULL;
-
- if ((rssi < IWL_MIN_RSSI_VAL) || (rssi > IWL_MAX_RSSI_VAL))
- rssi = IWL_MIN_RSSI_VAL;
-
- switch (band) {
- case IEEE80211_BAND_2GHZ:
- tpt_table = iwl3945_tpt_table_g;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_g);
- break;
-
- case IEEE80211_BAND_5GHZ:
- tpt_table = iwl3945_tpt_table_a;
- table_size = ARRAY_SIZE(iwl3945_tpt_table_a);
- break;
-
- default:
- BUG();
- break;
- }
-
- while ((index < table_size) && (rssi < tpt_table[index].min_rssi))
- index++;
-
- index = min(index, (table_size - 1));
-
- return tpt_table[index].index;
-}
-
-static void iwl3945_clear_window(struct iwl3945_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = -1;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-/**
- * iwl3945_rate_scale_flush_windows - flush out the rate scale windows
- *
- * Returns the number of windows that have gathered data but were
- * not flushed. If there were any that were not flushed, then
- * reschedule the rate flushing routine.
- */
-static int iwl3945_rate_scale_flush_windows(struct iwl3945_rs_sta *rs_sta)
-{
- int unflushed = 0;
- int i;
- unsigned long flags;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /*
- * For each rate, if we have collected data on that rate
- * and it has been more than IWL_RATE_WIN_FLUSH
- * since we flushed, clear out the gathered statistics
- */
- for (i = 0; i < IWL_RATE_COUNT_3945; i++) {
- if (!rs_sta->win[i].counter)
- continue;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
- if (time_after(jiffies, rs_sta->win[i].stamp +
- IWL_RATE_WIN_FLUSH)) {
- IWL_DEBUG_RATE(priv, "flushing %d samples of rate "
- "index %d\n",
- rs_sta->win[i].counter, i);
- iwl3945_clear_window(&rs_sta->win[i]);
- } else
- unflushed++;
- spin_unlock_irqrestore(&rs_sta->lock, flags);
- }
-
- return unflushed;
-}
-
-#define IWL_RATE_FLUSH_MAX 5000 /* msec */
-#define IWL_RATE_FLUSH_MIN 50 /* msec */
-#define IWL_AVERAGE_PACKETS 1500
-
-static void iwl3945_bg_rate_scale_flush(unsigned long data)
-{
- struct iwl3945_rs_sta *rs_sta = (void *)data;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
- int unflushed = 0;
- unsigned long flags;
- u32 packet_count, duration, pps;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- unflushed = iwl3945_rate_scale_flush_windows(rs_sta);
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* Number of packets Rx'd since last time this timer ran */
- packet_count = (rs_sta->tx_packets - rs_sta->last_tx_packets) + 1;
-
- rs_sta->last_tx_packets = rs_sta->tx_packets + 1;
-
- if (unflushed) {
- duration =
- jiffies_to_msecs(jiffies - rs_sta->last_partial_flush);
-
- IWL_DEBUG_RATE(priv, "Tx'd %d packets in %dms\n",
- packet_count, duration);
-
- /* Determine packets per second */
- if (duration)
- pps = (packet_count * 1000) / duration;
- else
- pps = 0;
-
- if (pps) {
- duration = (IWL_AVERAGE_PACKETS * 1000) / pps;
- if (duration < IWL_RATE_FLUSH_MIN)
- duration = IWL_RATE_FLUSH_MIN;
- else if (duration > IWL_RATE_FLUSH_MAX)
- duration = IWL_RATE_FLUSH_MAX;
- } else
- duration = IWL_RATE_FLUSH_MAX;
-
- rs_sta->flush_time = msecs_to_jiffies(duration);
-
- IWL_DEBUG_RATE(priv, "new flush period: %d msec ave %d\n",
- duration, packet_count);
-
- mod_timer(&rs_sta->rate_scale_flush, jiffies +
- rs_sta->flush_time);
-
- rs_sta->last_partial_flush = jiffies;
- } else {
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->flush_pending = 0;
- }
- /* If there weren't any unflushed entries, we don't schedule the timer
- * to run again */
-
- rs_sta->last_flush = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-/**
- * iwl3945_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 64 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
- struct iwl3945_rate_scale_data *window,
- int success, int retries, int index)
-{
- unsigned long flags;
- s32 fail_count;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- if (!retries) {
- IWL_DEBUG_RATE(priv, "leave: retries == 0 -- should be at least 1\n");
- return;
- }
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- * */
- while (retries > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & (1ULL << (IWL_RATE_MAX_WINDOW - 1))) {
- window->data &= ~(1ULL << (IWL_RATE_MAX_WINDOW - 1));
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame (throw away oldest history),
- * OR in "1", and increment "success" if this
- * frame was successful. */
- window->data <<= 1;
- if (success > 0) {
- window->success_counter++;
- window->data |= 0x1;
- success--;
- }
-
- retries--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = ((window->success_ratio *
- rs_sta->expected_tpt[index] + 64) / 128);
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct iwl3945_sta_priv *psta;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_supported_band *sband;
- int i;
-
- IWL_DEBUG_INFO(priv, "enter\n");
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- goto out;
-
- psta = (struct iwl3945_sta_priv *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
- rs_sta->priv = priv;
-
- rs_sta->start_rate = IWL_RATE_INVALID;
-
- /* default to just 802.11b */
- rs_sta->expected_tpt = iwl3945_expected_tpt_b;
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->last_flush = jiffies;
- rs_sta->flush_time = IWL_RATE_FLUSH;
- rs_sta->last_tx_packets = 0;
-
- rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
- rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
-
- for (i = 0; i < IWL_RATE_COUNT_3945; i++)
- iwl3945_clear_window(&rs_sta->win[i]);
-
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- for (i = sband->n_bitrates - 1; i >= 0; i--) {
- if (sta->supp_rates[sband->band] & (1 << i)) {
- rs_sta->last_txrate_idx = i;
- break;
- }
- }
-
- priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
- /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
- if (sband->band == IEEE80211_BAND_5GHZ) {
- rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
- IWL_FIRST_OFDM_RATE;
- }
-
-out:
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
-
- IWL_DEBUG_INFO(priv, "leave\n");
-}
-
-static void *iwl3945_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-/* rate scale requires free function to be implemented */
-static void iwl3945_rs_free(void *priv)
-{
- return;
-}
-
-static void *iwl3945_rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- struct iwl3945_rs_sta *rs_sta;
- struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
- struct iwl_priv *priv __maybe_unused = iwl_priv;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rs_sta = &psta->rs_sta;
-
- spin_lock_init(&rs_sta->lock);
- init_timer(&rs_sta->rate_scale_flush);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-
- return rs_sta;
-}
-
-static void iwl3945_rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl3945_rs_sta *rs_sta = priv_sta;
-
- /*
- * Be careful not to use any members of iwl3945_rs_sta (like trying
- * to use iwl_priv to print out debugging) since it may not be fully
- * initialized at this point.
- */
- del_timer_sync(&rs_sta->rate_scale_flush);
-}
-
-
-/**
- * iwl3945_rs_tx_status - Update rate control values based on Tx results
- *
- * NOTE: Uses iwl_priv->retry_rate for the # of retries attempted by
- * the hardware for each rate.
- */
-static void iwl3945_rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- s8 retries = 0, current_count;
- int scale_rate_index, first_index, last_index;
- unsigned long flags;
- struct iwl_priv *priv = (struct iwl_priv *)priv_rate;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- retries = info->status.rates[0].count;
- /* Sanity Check for retries */
- if (retries > IWL_RATE_RETRY_TH)
- retries = IWL_RATE_RETRY_TH;
-
- first_index = sband->bitrates[info->status.rates[0].idx].hw_value;
- if ((first_index < 0) || (first_index >= IWL_RATE_COUNT_3945)) {
- IWL_DEBUG_RATE(priv, "leave: Rate out of bounds: %d\n", first_index);
- return;
- }
-
- if (!priv_sta) {
- IWL_DEBUG_RATE(priv, "leave: No STA priv data to update!\n");
- return;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
- return;
- }
-
-
- rs_sta->tx_packets++;
-
- scale_rate_index = first_index;
- last_index = first_index;
-
- /*
- * Update the window for each rate. We determine which rates
- * were Tx'd based on the total number of retries vs. the number
- * of retries configured for each rate -- currently set to the
- * priv value 'retry_rate' vs. rate specific
- *
- * On exit from this while loop last_index indicates the rate
- * at which the frame was finally transmitted (or failed if no
- * ACK)
- */
- while (retries > 1) {
- if ((retries - 1) < priv->retry_rate) {
- current_count = (retries - 1);
- last_index = scale_rate_index;
- } else {
- current_count = priv->retry_rate;
- last_index = iwl3945_rs_next_rate(priv,
- scale_rate_index);
- }
-
- /* Update this rate accounting for as many retries
- * as was used for it (per current_count) */
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[scale_rate_index],
- 0, current_count, scale_rate_index);
- IWL_DEBUG_RATE(priv, "Update rate %d for %d retries.\n",
- scale_rate_index, current_count);
-
- retries -= current_count;
-
- scale_rate_index = last_index;
- }
-
-
- /* Update the last index window with success/failure based on ACK */
- IWL_DEBUG_RATE(priv, "Update rate %d with %s.\n",
- last_index,
- (info->flags & IEEE80211_TX_STAT_ACK) ?
- "success" : "failure");
- iwl3945_collect_tx_data(rs_sta,
- &rs_sta->win[last_index],
- info->flags & IEEE80211_TX_STAT_ACK, 1, last_index);
-
- /* We updated the rate scale window -- if its been more than
- * flush_time since the last run, schedule the flush
- * again */
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- if (!rs_sta->flush_pending &&
- time_after(jiffies, rs_sta->last_flush +
- rs_sta->flush_time)) {
-
- rs_sta->last_partial_flush = jiffies;
- rs_sta->flush_pending = 1;
- mod_timer(&rs_sta->rate_scale_flush,
- jiffies + rs_sta->flush_time);
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-static u16 iwl3945_get_adjacent_rate(struct iwl3945_rs_sta *rs_sta,
- u8 index, u16 rate_mask, enum ieee80211_band band)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
- struct iwl_priv *priv __maybe_unused = rs_sta->priv;
-
- /* 802.11A walks to the next literal adjacent rate in
- * the rate table */
- if (unlikely(band == IEEE80211_BAND_5GHZ)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT_3945;
- i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- low = iwl3945_rates[low].prev_rs_tgg;
- else
- low = iwl3945_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- if (rs_sta->tgg)
- high = iwl3945_rates[high].next_rs_tgg;
- else
- high = iwl3945_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-/**
- * iwl3945_rs_get_rate - find the rate for the requested packet
- *
- * Returns the ieee80211_rate structure allocated by the driver.
- *
- * The rate control algorithm has no internal mapping between hw_mode's
- * rate ordering and the rate ordering used by the rate control algorithm.
- *
- * The rate control algorithm uses a single table of rates that goes across
- * the entire A/B/G spectrum vs. being limited to just one particular
- * hw_mode.
- *
- * As such, we can't convert the index obtained below into the hw_mode's
- * rate table and must reference the driver allocated rate table
- *
- */
-static void iwl3945_rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta, struct ieee80211_tx_rate_control *txrc)
-{
- struct ieee80211_supported_band *sband = txrc->sband;
- struct sk_buff *skb = txrc->skb;
- u8 low = IWL_RATE_INVALID;
- u8 high = IWL_RATE_INVALID;
- u16 high_low;
- int index;
- struct iwl3945_rs_sta *rs_sta = priv_sta;
- struct iwl3945_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- unsigned long flags;
- u16 rate_mask;
- s8 max_rate_idx = -1;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (rs_sta && !rs_sta->priv) {
- IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
- priv_sta = NULL;
- }
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- rate_mask = sta->supp_rates[sband->band];
-
- /* get user max rate if set */
- max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) && (max_rate_idx != -1))
- max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((max_rate_idx < 0) || (max_rate_idx >= IWL_RATE_COUNT))
- max_rate_idx = -1;
-
- index = min(rs_sta->last_txrate_idx & 0xffff, IWL_RATE_COUNT_3945 - 1);
-
- if (sband->band == IEEE80211_BAND_5GHZ)
- rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- /* for recent assoc, choose best rate regarding
- * to rssi value
- */
- if (rs_sta->start_rate != IWL_RATE_INVALID) {
- if (rs_sta->start_rate < index &&
- (rate_mask & (1 << rs_sta->start_rate)))
- index = rs_sta->start_rate;
- rs_sta->start_rate = IWL_RATE_INVALID;
- }
-
- /* force user max rate if set by user */
- if ((max_rate_idx != -1) && (max_rate_idx < index)) {
- if (rate_mask & (1 << max_rate_idx))
- index = max_rate_idx;
- }
-
- window = &(rs_sta->win[index]);
-
- fail_count = window->counter - window->success_counter;
-
- if (((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH))) {
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- IWL_DEBUG_RATE(priv, "Invalid average_tpt on rate %d: "
- "counter: %d, success_counter: %d, "
- "expected_tpt is %sNULL\n",
- index,
- window->counter,
- window->success_counter,
- rs_sta->expected_tpt ? "not " : "");
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
- goto out;
-
- }
-
- current_tpt = window->average_tpt;
-
- high_low = iwl3945_get_adjacent_rate(rs_sta, index, rate_mask,
- sband->band);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((max_rate_idx != -1) && (max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- /* Collect Measured throughputs of adjacent rates */
- if (low != IWL_RATE_INVALID)
- low_tpt = rs_sta->win[low].average_tpt;
-
- if (high != IWL_RATE_INVALID)
- high_tpt = rs_sta->win[high].average_tpt;
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- scale_action = 0;
-
- /* Low success ratio , need to drop the rate */
- if ((window->success_ratio < IWL_RATE_DECREASE_TH) || !current_tpt) {
- IWL_DEBUG_RATE(priv, "decrease rate because of low success_ratio\n");
- scale_action = -1;
- /* No throughput measured yet for adjacent rates,
- * try increase */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
-
- /* Both adjacent throughputs are measured, but neither one has
- * better throughput; we're using the best rate, don't change
- * it! */
- } else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) && (high_tpt < current_tpt)) {
-
- IWL_DEBUG_RATE(priv, "No action -- low [%d] & high [%d] < "
- "current_tpt [%d]\n",
- low_tpt, high_tpt, current_tpt);
- scale_action = 0;
-
- /* At least one of the rates has better throughput */
- } else {
- if (high_tpt != IWL_INVALID_VALUE) {
-
- /* High rate has better throughput, Increase
- * rate */
- if (high_tpt > current_tpt &&
- window->success_ratio >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of high tpt\n");
- scale_action = 0;
- }
- } else if (low_tpt != IWL_INVALID_VALUE) {
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (window->success_ratio >= IWL_RATE_INCREASE_TH) {
- /* Lower rate has better
- * throughput,decrease rate */
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((window->success_ratio > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * rs_sta->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
-
- /* Decrese rate */
- if (low != IWL_RATE_INVALID)
- index = low;
- break;
-
- case 1:
- /* Increase rate */
- if (high != IWL_RATE_INVALID)
- index = high;
-
- break;
-
- case 0:
- default:
- /* No change */
- break;
- }
-
- IWL_DEBUG_RATE(priv, "Selected %d (action %d) - low %d high %d\n",
- index, scale_action, low, high);
-
- out:
-
- if (sband->band == IEEE80211_BAND_5GHZ) {
- if (WARN_ON_ONCE(index < IWL_FIRST_OFDM_RATE))
- index = IWL_FIRST_OFDM_RATE;
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = index - IWL_FIRST_OFDM_RATE;
- } else {
- rs_sta->last_txrate_idx = index;
- info->control.rates[0].idx = rs_sta->last_txrate_idx;
- }
-
- IWL_DEBUG_RATE(priv, "leave: %d\n", index);
-}
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl3945_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-static ssize_t iwl3945_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int j;
- ssize_t ret;
- struct iwl3945_rs_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff + desc, "tx packets=%d last rate index=%d\n"
- "rate=0x%X flush time %d\n",
- lq_sta->tx_packets,
- lq_sta->last_txrate_idx,
- lq_sta->start_rate, jiffies_to_msecs(lq_sta->flush_time));
- for (j = 0; j < IWL_RATE_COUNT_3945; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->win[j].counter,
- lq_sta->win[j].success_counter,
- lq_sta->win[j].success_ratio);
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl3945_sta_dbgfs_stats_table_read,
- .open = iwl3945_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl3945_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
-
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", 0600, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
-
-}
-
-static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl3945_rs_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void iwl3945_rs_rate_init_stub(void *priv_r,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-
-static struct rate_control_ops rs_ops = {
- .module = NULL,
- .name = RS_NAME,
- .tx_status = iwl3945_rs_tx_status,
- .get_rate = iwl3945_rs_get_rate,
- .rate_init = iwl3945_rs_rate_init_stub,
- .alloc = iwl3945_rs_alloc,
- .free = iwl3945_rs_free,
- .alloc_sta = iwl3945_rs_alloc_sta,
- .free_sta = iwl3945_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl3945_add_debugfs,
- .remove_sta_debugfs = iwl3945_remove_debugfs,
-#endif
-
-};
-void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
-{
- struct iwl_priv *priv = hw->priv;
- s32 rssi = 0;
- unsigned long flags;
- struct iwl3945_rs_sta *rs_sta;
- struct ieee80211_sta *sta;
- struct iwl3945_sta_priv *psta;
-
- IWL_DEBUG_RATE(priv, "enter\n");
-
- rcu_read_lock();
-
- sta = ieee80211_find_sta(priv->contexts[IWL_RXON_CTX_BSS].vif,
- priv->stations[sta_id].sta.sta.addr);
- if (!sta) {
- IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
- rcu_read_unlock();
- return;
- }
-
- psta = (void *) sta->drv_priv;
- rs_sta = &psta->rs_sta;
-
- spin_lock_irqsave(&rs_sta->lock, flags);
-
- rs_sta->tgg = 0;
- switch (priv->band) {
- case IEEE80211_BAND_2GHZ:
- /* TODO: this always does G, not a regression */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_TGG_PROTECT_MSK) {
- rs_sta->tgg = 1;
- rs_sta->expected_tpt = iwl3945_expected_tpt_g_prot;
- } else
- rs_sta->expected_tpt = iwl3945_expected_tpt_g;
- break;
-
- case IEEE80211_BAND_5GHZ:
- rs_sta->expected_tpt = iwl3945_expected_tpt_a;
- break;
- case IEEE80211_NUM_BANDS:
- BUG();
- break;
- }
-
- spin_unlock_irqrestore(&rs_sta->lock, flags);
-
- rssi = priv->_3945.last_rx_rssi;
- if (rssi == 0)
- rssi = IWL_MIN_RSSI_VAL;
-
- IWL_DEBUG_RATE(priv, "Network RSSI: %d\n", rssi);
-
- rs_sta->start_rate = iwl3945_get_rate_index_by_rssi(rssi, priv->band);
-
- IWL_DEBUG_RATE(priv, "leave: rssi %d assign rate index: "
- "%d (plcp 0x%x)\n", rssi, rs_sta->start_rate,
- iwl3945_rates[rs_sta->start_rate].plcp);
- rcu_read_unlock();
-}
-
-int iwl3945_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_ops);
-}
-
-void iwl3945_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.c b/drivers/net/wireless/iwlegacy/iwl-3945.c
deleted file mode 100644
index f7c0a74..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.c
+++ /dev/null
@@ -1,2741 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-#include <net/mac80211.h>
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-led.h"
-#include "iwl-3945-led.h"
-#include "iwl-3945-debugfs.h"
-
-#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX, \
- IWL_RATE_##r##M_INDEX_TABLE, \
- IWL_RATE_##ip##M_INDEX_TABLE }
-
-/*
- * Parameter order:
- * rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945] = {
- IWL_DECLARE_RATE_INFO(1, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, 9, 12, 5, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 11, 5, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 48, INV, 48, INV, 48, INV),/* 54mbps */
-};
-
-static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
-{
- u8 rate = iwl3945_rates[rate_index].prev_ieee;
-
- if (rate == IWL_RATE_INVALID)
- rate = rate_index;
- return rate;
-}
-
-/* 1 = enable the iwl3945_disable_events() function */
-#define IWL_EVT_DISABLE (0)
-#define IWL_EVT_DISABLE_SIZE (1532/32)
-
-/**
- * iwl3945_disable_events - Disable selected events in uCode event log
- *
- * Disable an event by writing "1"s into "disable"
- * bitmap in SRAM. Bit position corresponds to Event # (id/type).
- * Default values of 0 enable uCode events to be logged.
- * Use for only special debugging. This function is just a placeholder as-is,
- * you'll need to provide the special bits! ...
- * ... and set IWL_EVT_DISABLE to 1. */
-void iwl3945_disable_events(struct iwl_priv *priv)
-{
- int i;
- u32 base; /* SRAM address of event log header */
- u32 disable_ptr; /* SRAM address of event-disable bitmap array */
- u32 array_size; /* # of u32 entries in array */
- static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
- 0x00000000, /* 31 - 0 Event id numbers */
- 0x00000000, /* 63 - 32 */
- 0x00000000, /* 95 - 64 */
- 0x00000000, /* 127 - 96 */
- 0x00000000, /* 159 - 128 */
- 0x00000000, /* 191 - 160 */
- 0x00000000, /* 223 - 192 */
- 0x00000000, /* 255 - 224 */
- 0x00000000, /* 287 - 256 */
- 0x00000000, /* 319 - 288 */
- 0x00000000, /* 351 - 320 */
- 0x00000000, /* 383 - 352 */
- 0x00000000, /* 415 - 384 */
- 0x00000000, /* 447 - 416 */
- 0x00000000, /* 479 - 448 */
- 0x00000000, /* 511 - 480 */
- 0x00000000, /* 543 - 512 */
- 0x00000000, /* 575 - 544 */
- 0x00000000, /* 607 - 576 */
- 0x00000000, /* 639 - 608 */
- 0x00000000, /* 671 - 640 */
- 0x00000000, /* 703 - 672 */
- 0x00000000, /* 735 - 704 */
- 0x00000000, /* 767 - 736 */
- 0x00000000, /* 799 - 768 */
- 0x00000000, /* 831 - 800 */
- 0x00000000, /* 863 - 832 */
- 0x00000000, /* 895 - 864 */
- 0x00000000, /* 927 - 896 */
- 0x00000000, /* 959 - 928 */
- 0x00000000, /* 991 - 960 */
- 0x00000000, /* 1023 - 992 */
- 0x00000000, /* 1055 - 1024 */
- 0x00000000, /* 1087 - 1056 */
- 0x00000000, /* 1119 - 1088 */
- 0x00000000, /* 1151 - 1120 */
- 0x00000000, /* 1183 - 1152 */
- 0x00000000, /* 1215 - 1184 */
- 0x00000000, /* 1247 - 1216 */
- 0x00000000, /* 1279 - 1248 */
- 0x00000000, /* 1311 - 1280 */
- 0x00000000, /* 1343 - 1312 */
- 0x00000000, /* 1375 - 1344 */
- 0x00000000, /* 1407 - 1376 */
- 0x00000000, /* 1439 - 1408 */
- 0x00000000, /* 1471 - 1440 */
- 0x00000000, /* 1503 - 1472 */
- };
-
- base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
- }
-
- disable_ptr = iwl_legacy_read_targ_mem(priv, base + (4 * sizeof(u32)));
- array_size = iwl_legacy_read_targ_mem(priv, base + (5 * sizeof(u32)));
-
- if (IWL_EVT_DISABLE && (array_size == IWL_EVT_DISABLE_SIZE)) {
- IWL_DEBUG_INFO(priv, "Disabling selected uCode log events at 0x%x\n",
- disable_ptr);
- for (i = 0; i < IWL_EVT_DISABLE_SIZE; i++)
- iwl_legacy_write_targ_mem(priv,
- disable_ptr + (i * sizeof(u32)),
- evt_disable[i]);
-
- } else {
- IWL_DEBUG_INFO(priv, "Selected uCode log events may be disabled\n");
- IWL_DEBUG_INFO(priv, " by writing \"1\"s into disable bitmap\n");
- IWL_DEBUG_INFO(priv, " in SRAM at 0x%x, size %d u32s\n",
- disable_ptr, array_size);
- }
-
-}
-
-static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
-{
- int idx;
-
- for (idx = 0; idx < IWL_RATE_COUNT_3945; idx++)
- if (iwl3945_rates[idx].plcp == plcp)
- return idx;
- return -1;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
-
-static const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- switch (status & TX_STATUS_MSK) {
- case TX_3945_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_ENTRY(SHORT_LIMIT);
- TX_STATUS_ENTRY(LONG_LIMIT);
- TX_STATUS_ENTRY(FIFO_UNDERRUN);
- TX_STATUS_ENTRY(MGMNT_ABORT);
- TX_STATUS_ENTRY(NEXT_FRAG);
- TX_STATUS_ENTRY(LIFE_EXPIRE);
- TX_STATUS_ENTRY(DEST_PS);
- TX_STATUS_ENTRY(ABORTED);
- TX_STATUS_ENTRY(BT_RETRY);
- TX_STATUS_ENTRY(STA_INVALID);
- TX_STATUS_ENTRY(FRAG_DROPPED);
- TX_STATUS_ENTRY(TID_DISABLE);
- TX_STATUS_ENTRY(FRAME_FLUSHED);
- TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL);
- TX_STATUS_ENTRY(TX_LOCKED);
- TX_STATUS_ENTRY(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-}
-#else
-static inline const char *iwl3945_get_tx_fail_reason(u32 status)
-{
- return "";
-}
-#endif
-
-/*
- * get ieee prev rate from rate scale table.
- * for A and B mode we need to overright prev
- * value
- */
-int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
-{
- int next_rate = iwl3945_get_prev_ieee_rate(rate);
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- if (rate == IWL_RATE_12M_INDEX)
- next_rate = IWL_RATE_9M_INDEX;
- else if (rate == IWL_RATE_6M_INDEX)
- next_rate = IWL_RATE_6M_INDEX;
- break;
- case IEEE80211_BAND_2GHZ:
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- if (rate == IWL_RATE_11M_INDEX)
- next_rate = IWL_RATE_5M_INDEX;
- }
- break;
-
- default:
- break;
- }
-
- return next_rate;
-}
-
-
-/**
- * iwl3945_tx_queue_reclaim - Reclaim Tx queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
- int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
-
- BUG_ON(txq_id == IWL39_CMD_QUEUE_NUM);
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
- tx_info->skb = NULL;
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
-
- if (iwl_legacy_queue_space(q) > q->low_mark && (txq_id >= 0) &&
- (txq_id != IWL39_CMD_QUEUE_NUM) &&
- priv->mac80211_registered)
- iwl_legacy_wake_queue(priv, txq);
-}
-
-/**
- * iwl3945_rx_reply_tx - Handle Tx response
- */
-static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_tx_info *info;
- struct iwl3945_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->status);
- int rate_idx;
- int fail;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- ieee80211_tx_info_clear_status(info);
-
- /* Fill the MRR chain with some info about on-chip retransmissions */
- rate_idx = iwl3945_hwrate_to_plcp_idx(tx_resp->rate);
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
-
- fail = tx_resp->failure_frame;
-
- info->status.rates[0].idx = rate_idx;
- info->status.rates[0].count = fail + 1; /* add final attempt */
-
- /* tx_status->rts_retry_count = tx_resp->failure_rts; */
- info->flags |= ((status & TX_STATUS_MSK) == TX_STATUS_SUCCESS) ?
- IEEE80211_TX_STAT_ACK : 0;
-
- IWL_DEBUG_TX(priv, "Tx queue %d Status %s (0x%08x) plcp rate %d retries %d\n",
- txq_id, iwl3945_get_tx_fail_reason(status), status,
- tx_resp->rate, tx_resp->failure_frame);
-
- IWL_DEBUG_TX_REPLY(priv, "Tx queue reclaim %d\n", index);
- iwl3945_tx_queue_reclaim(priv, txq_id, index);
-
- if (status & TX_ABORT_REQUIRED_MSK)
- IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
-}
-
-
-
-/*****************************************************************************
- *
- * Intel PRO/Wireless 3945ABG/BG Network Connection
- *
- * RX handler implementations
- *
- *****************************************************************************/
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
-
- prev_stats = (__le32 *)&priv->_3945.statistics;
- accum_stats = (u32 *)&priv->_3945.accum_statistics;
- delta = (u32 *)&priv->_3945.delta_statistics;
- max_delta = (u32 *)&priv->_3945.max_delta;
-
- for (i = sizeof(__le32); i < sizeof(struct iwl3945_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- priv->_3945.accum_statistics.general.temperature =
- priv->_3945.statistics.general.temperature;
- priv->_3945.accum_statistics.general.ttl_timestamp =
- priv->_3945.statistics.general.ttl_timestamp;
-}
-#endif
-
-void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl3945_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl3945_accumulative_statistics(priv, (__le32 *)&pkt->u.raw);
-#endif
-
- memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
-}
-
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- __le32 *flag = (__le32 *)&pkt->u.raw;
-
- if (le32_to_cpu(*flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_3945.accum_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.delta_statistics, 0,
- sizeof(struct iwl3945_notif_statistics));
- memset(&priv->_3945.max_delta, 0,
- sizeof(struct iwl3945_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl3945_hw_rx_statistics(priv, rxb);
-}
-
-
-/******************************************************************************
- *
- * Misc. internal state and helper functions
- *
- ******************************************************************************/
-
-/* This is necessary only for a number of statistics, see the caller. */
-static int iwl3945_is_network_packet(struct iwl_priv *priv,
- struct ieee80211_hdr *header)
-{
- /* Filter incoming packets to determine if they are targeted toward
- * this network, discarding packets coming from ourselves */
- switch (priv->iw_mode) {
- case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr3, priv->bssid);
- case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
- /* packets to our IBSS update information */
- return !compare_ether_addr(header->addr2, priv->bssid);
- default:
- return 1;
- }
-}
-
-static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 len = le16_to_cpu(rx_hdr->len);
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We received data from the HW, so stop the watchdog */
- if (unlikely(len + IWL39_RX_FRAME_SIZE >
- PAGE_SIZE << priv->hw_params.rx_page_order)) {
- IWL_DEBUG_DROP(priv, "Corruption detected!\n");
- return;
- }
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- if (!iwl3945_mod_params.sw_crypto)
- iwl_legacy_set_decrypted_flag(priv,
- (struct ieee80211_hdr *)rxb_addr(rxb),
- le32_to_cpu(rx_end->status), stats);
-
- skb_add_rx_frag(skb, 0, rxb->page,
- (void *)rx_hdr->payload - (void *)pkt, len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
-
-static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
- struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
- struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
- u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
- u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
- u8 network_packet;
-
- rx_status.flag = 0;
- rx_status.mactime = le64_to_cpu(rx_end->timestamp);
- rx_status.band = (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(rx_hdr->channel),
- rx_status.band);
-
- rx_status.rate_idx = iwl3945_hwrate_to_plcp_idx(rx_hdr->rate);
- if (rx_status.band == IEEE80211_BAND_5GHZ)
- rx_status.rate_idx -= IWL_FIRST_OFDM_RATE;
-
- rx_status.antenna = (le16_to_cpu(rx_hdr->phy_flags) &
- RX_RES_PHY_FLAGS_ANTENNA_MSK) >> 4;
-
- /* set the preamble flag if appropriate */
- if (rx_hdr->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- if ((unlikely(rx_stats->phy_count > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- rx_stats->phy_count);
- return;
- }
-
- if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR)
- || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n", rx_end->status);
- return;
- }
-
-
-
- /* Convert 3945's rssi indicator to dBm */
- rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
-
- IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
- rx_status.signal, rx_stats_sig_avg,
- rx_stats_noise_diff);
-
- header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
-
- network_packet = iwl3945_is_network_packet(priv, header);
-
- IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
- network_packet ? '*' : ' ',
- le16_to_cpu(rx_hdr->channel),
- rx_status.signal, rx_status.signal,
- rx_status.rate_idx);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len),
- header);
-
- if (network_packet) {
- priv->_3945.last_beacon_time =
- le32_to_cpu(rx_end->beacon_timestamp);
- priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
- priv->_3945.last_rx_rssi = rx_status.signal;
- }
-
- iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
-}
-
-int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad)
-{
- int count;
- struct iwl_queue *q;
- struct iwl3945_tfd *tfd, *tfd_tmp;
-
- q = &txq->q;
- tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- count = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
-
- if ((count >= NUM_TFD_CHUNKS) || (count < 0)) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- NUM_TFD_CHUNKS);
- return -EINVAL;
- }
-
- tfd->tbs[count].addr = cpu_to_le32(addr);
- tfd->tbs[count].len = cpu_to_le32(len);
-
- count++;
-
- tfd->control_flags = cpu_to_le32(TFD_CTL_COUNT_SET(count) |
- TFD_CTL_PAD_SET(pad));
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_free_tfd - Free one TFD, those at index [txq->q.read_ptr]
- *
- * Does NOT advance any indexes
- */
-void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl3945_tfd *tfd_tmp = (struct iwl3945_tfd *)txq->tfds;
- int index = txq->q.read_ptr;
- struct iwl3945_tfd *tfd = &tfd_tmp[index];
- struct pci_dev *dev = priv->pci_dev;
- int i;
- int counter;
-
- /* sanity check */
- counter = TFD_CTL_COUNT_GET(le32_to_cpu(tfd->control_flags));
- if (counter > NUM_TFD_CHUNKS) {
- IWL_ERR(priv, "Too many chunks: %i\n", counter);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (counter)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_TODEVICE);
-
- /* unmap chunks if any */
-
- for (i = 1; i < counter; i++)
- pci_unmap_single(dev, le32_to_cpu(tfd->tbs[i].addr),
- le32_to_cpu(tfd->tbs[i].len), PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-/**
- * iwl3945_hw_build_tx_cmd_rate - Add rate portion to TX_CMD:
- *
-*/
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id)
-{
- u16 hw_value = ieee80211_get_tx_rate(priv->hw, info)->hw_value;
- u16 rate_index = min(hw_value & 0xffff, IWL_RATE_COUNT_3945);
- u16 rate_mask;
- int rate;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- __le32 tx_flags;
- __le16 fc = hdr->frame_control;
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
-
- rate = iwl3945_rates[rate_index].plcp;
- tx_flags = tx_cmd->tx_flags;
-
- /* We need to figure out how to get the sta->supp_rates while
- * in this running context */
- rate_mask = IWL_RATES_MASK_3945;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- if (tx_id >= IWL39_CMD_QUEUE_NUM)
- rts_retry_limit = 3;
- else
- rts_retry_limit = 7;
-
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- tx_cmd->rate = rate;
- tx_cmd->tx_flags = tx_flags;
-
- /* OFDM */
- tx_cmd->supp_rates[0] =
- ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- /* CCK */
- tx_cmd->supp_rates[1] = (rate_mask & 0xF);
-
- IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
- "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
- tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
- tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
-}
-
-static u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate)
-{
- unsigned long flags_spin;
- struct iwl_station_entry *station;
-
- if (sta_id == IWL_INVALID_STATION)
- return IWL_INVALID_STATION;
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- station = &priv->stations[sta_id];
-
- station->sta.sta.modify_mask = STA_MODIFY_TX_RATE_MSK;
- station->sta.rate_n_flags = cpu_to_le16(tx_rate);
- station->sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv, &station->sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- IWL_DEBUG_RATE(priv, "SCALE sync station %d to rate %d\n",
- sta_id, tx_rate);
- return sta_id;
-}
-
-static void iwl3945_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold)) {
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN,
- CSR_GPIO_IN_VAL_VAUX_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000);
- }
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-
- iwl_poll_bit(priv, CSR_GPIO_IN, CSR_GPIO_IN_VAL_VMAIN_PWR_SRC,
- CSR_GPIO_IN_BIT_AUX_POWER, 5000); /* uS */
-}
-
-static int iwl3945_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- iwl_legacy_write_direct32(priv, FH39_RCSR_RBD_BASE(0), rxq->bd_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_RPTR_ADDR(0),
- rxq->rb_stts_dma);
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), 0);
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0),
- FH39_RCSR_RX_CONFIG_REG_VAL_DMA_CHNL_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_VAL_RDRBD_EN_ENABLE |
- FH39_RCSR_RX_CONFIG_REG_BIT_WR_STTS_EN |
- FH39_RCSR_RX_CONFIG_REG_VAL_MAX_FRAG_SIZE_128 |
- (RX_QUEUE_SIZE_LOG << FH39_RCSR_RX_CONFIG_REG_POS_RBDC_SIZE) |
- FH39_RCSR_RX_CONFIG_REG_VAL_IRQ_DEST_INT_HOST |
- (1 << FH39_RCSR_RX_CONFIG_REG_POS_IRQ_RBTH) |
- FH39_RCSR_RX_CONFIG_REG_VAL_MSG_MODE_FH);
-
- /* fake read to flush all prev I/O */
- iwl_legacy_read_direct32(priv, FH39_RSSR_CTRL);
-
- return 0;
-}
-
-static int iwl3945_tx_reset(struct iwl_priv *priv)
-{
-
- /* bypass mode */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0x2);
-
- /* RA 0 is active */
- iwl_legacy_write_prph(priv, ALM_SCD_ARASTAT_REG, 0x01);
-
- /* all 6 fifo are active */
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0x3f);
-
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_1_REG, 0x010000);
- iwl_legacy_write_prph(priv, ALM_SCD_SBYP_MODE_2_REG, 0x030002);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF4MF_REG, 0x000004);
- iwl_legacy_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_CBB_BASE,
- priv->_3945.shared_phys);
-
- iwl_legacy_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_TXPD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_MAX_FRAG_SIZE_128B |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TFD_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RD_CBB_ON |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_ORDER_RSP_WAIT_TH |
- FH39_TSSR_TX_MSG_CONFIG_REG_VAL_RSP_WAIT_TH);
-
-
- return 0;
-}
-
-/**
- * iwl3945_txq_ctx_reset - Reset TX queue context
- *
- * Destroys all DMA structures and initialize them again
- */
-static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
-{
- int rc;
- int txq_id, slots_num;
-
- iwl3945_hw_txq_ctx_free(priv);
-
- /* allocate tx queue structure */
- rc = iwl_legacy_alloc_txq_mem(priv);
- if (rc)
- return rc;
-
- /* Tx CMD queue */
- rc = iwl3945_tx_reset(priv);
- if (rc)
- goto error;
-
- /* Tx queue(s) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == IWL39_CMD_QUEUE_NUM) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- rc = iwl_legacy_tx_queue_init(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- if (rc) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return rc;
-
- error:
- iwl3945_hw_txq_ctx_free(priv);
- return rc;
-}
-
-
-/*
- * Start up 3945's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-static int iwl3945_apm_init(struct iwl_priv *priv)
-{
- int ret = iwl_legacy_apm_init(priv);
-
- /* Clear APMG (NIC's internal power management) interrupts */
- iwl_legacy_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
- iwl_legacy_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
-
- /* Reset radio chip */
- iwl_legacy_set_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
- udelay(5);
- iwl_legacy_clear_bits_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_RESET_REQ);
-
- return ret;
-}
-
-static void iwl3945_nic_config(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- unsigned long flags;
- u8 rev_id = priv->pci_dev->revision;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Determine HW type */
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
-
- if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
- IWL_DEBUG_INFO(priv, "RTP type\n");
- else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MB);
- } else {
- IWL_DEBUG_INFO(priv, "3945 RADIO-MM type\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_3945_MM);
- }
-
- if (EEPROM_SKU_CAP_OP_MODE_MRC == eeprom->sku_cap) {
- IWL_DEBUG_INFO(priv, "SKU OP mode is mrc\n");
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_SKU_MRC);
- } else
- IWL_DEBUG_INFO(priv, "SKU OP mode is basic\n");
-
- if ((eeprom->board_revision & 0xF0) == 0xD0) {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- } else {
- IWL_DEBUG_INFO(priv, "3945ABG revision is 0x%X\n",
- eeprom->board_revision);
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BIT_BOARD_TYPE);
- }
-
- if (eeprom->almgor_m_version <= 1) {
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_A);
- IWL_DEBUG_INFO(priv, "Card M type A version is 0x%X\n",
- eeprom->almgor_m_version);
- } else {
- IWL_DEBUG_INFO(priv, "Card M type B version is 0x%X\n",
- eeprom->almgor_m_version);
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR39_HW_IF_CONFIG_REG_BITS_SILICON_TYPE_B);
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_SW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "SW RF KILL supported in EEPROM.\n");
-
- if (eeprom->sku_cap & EEPROM_SKU_CAP_HW_RF_KILL_ENABLE)
- IWL_DEBUG_RF_KILL(priv, "HW RF KILL supported in EEPROM.\n");
-}
-
-int iwl3945_hw_nic_init(struct iwl_priv *priv)
-{
- int rc;
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
-
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- rc = iwl_legacy_rx_queue_alloc(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl3945_rx_queue_reset(priv, rxq);
-
- iwl3945_rx_replenish(priv);
-
- iwl3945_rx_init(priv, rxq);
-
-
- /* Look at using this instead:
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- */
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_WPTR(0), rxq->write & ~7);
-
- rc = iwl3945_txq_ctx_reset(priv);
- if (rc)
- return rc;
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl3945_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq)
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
- txq_id++)
- if (txq_id == IWL39_CMD_QUEUE_NUM)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* stop SCD */
- iwl_legacy_write_prph(priv, ALM_SCD_MODE_REG, 0);
- iwl_legacy_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
-
- /* reset TFD queues */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id), 0x0);
- iwl_poll_direct_bit(priv, FH39_TSSR_TX_STATUS,
- FH39_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(txq_id),
- 1000);
- }
-
- iwl3945_hw_txq_ctx_free(priv);
-}
-
-/**
- * iwl3945_hw_reg_adjust_power_by_temp
- * return index delta into power gain settings table
-*/
-static int iwl3945_hw_reg_adjust_power_by_temp(int new_reading, int old_reading)
-{
- return (new_reading - old_reading) * (-11) / 100;
-}
-
-/**
- * iwl3945_hw_reg_temp_out_of_range - Keep temperature in sane range
- */
-static inline int iwl3945_hw_reg_temp_out_of_range(int temperature)
-{
- return ((temperature < -260) || (temperature > 25)) ? 1 : 0;
-}
-
-int iwl3945_hw_get_temperature(struct iwl_priv *priv)
-{
- return iwl_read32(priv, CSR_UCODE_DRV_GP2);
-}
-
-/**
- * iwl3945_hw_reg_txpower_get_temperature
- * get the current temperature by reading from NIC
-*/
-static int iwl3945_hw_reg_txpower_get_temperature(struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int temperature;
-
- temperature = iwl3945_hw_get_temperature(priv);
-
- /* driver's okay range is -260 to +25.
- * human readable okay range is 0 to +285 */
- IWL_DEBUG_INFO(priv, "Temperature: %d\n", temperature + IWL_TEMP_CONVERT);
-
- /* handle insane temp reading */
- if (iwl3945_hw_reg_temp_out_of_range(temperature)) {
- IWL_ERR(priv, "Error bad temperature value %d\n", temperature);
-
- /* if really really hot(?),
- * substitute the 3rd band/group's temp measured at factory */
- if (priv->last_temperature > 100)
- temperature = eeprom->groups[2].temperature;
- else /* else use most recent "sane" value from driver */
- temperature = priv->last_temperature;
- }
-
- return temperature; /* raw, not "human readable" */
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold.
- *
- * Both are lower than older versions' 9 degrees */
-#define IWL_TEMPERATURE_LIMIT_TIMER 6
-
-/**
- * iwl3945_is_temp_calib_needed - determines if new calibration is needed
- *
- * records new temperature in tx_mgr->temperature.
- * replaces tx_mgr->last_temperature *only* if calib needed
- * (assumes caller will actually do the calibration!). */
-static int iwl3945_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- priv->temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d,\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Same temp,\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d,\n", temp_diff);
-
- /* if we don't need calibration, *don't* update last_temperature */
- if (temp_diff < IWL_TEMPERATURE_LIMIT_TIMER) {
- IWL_DEBUG_POWER(priv, "Timed thermal calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Timed thermal calib needed\n");
-
- /* assume that caller will actually do calib ...
- * update the "last temperature" value */
- priv->last_temperature = priv->temperature;
- return 1;
-}
-
-#define IWL_MAX_GAIN_ENTRIES 78
-#define IWL_CCK_FROM_OFDM_POWER_DIFF -5
-#define IWL_CCK_FROM_OFDM_INDEX_DIFF (10)
-
-/* radio and DSP power table, each step is 1/2 dB.
- * 1st number is for RF analog gain, 2nd number is for DSP pre-DAC gain. */
-static struct iwl3945_tx_power power_gain_table[2][IWL_MAX_GAIN_ENTRIES] = {
- {
- {251, 127}, /* 2.4 GHz, highest power */
- {251, 127},
- {251, 127},
- {251, 127},
- {251, 125},
- {251, 110},
- {251, 105},
- {251, 98},
- {187, 125},
- {187, 115},
- {187, 108},
- {187, 99},
- {243, 119},
- {243, 111},
- {243, 105},
- {243, 97},
- {243, 92},
- {211, 106},
- {211, 100},
- {179, 120},
- {179, 113},
- {179, 107},
- {147, 125},
- {147, 119},
- {147, 112},
- {147, 106},
- {147, 101},
- {147, 97},
- {147, 91},
- {115, 107},
- {235, 121},
- {235, 115},
- {235, 109},
- {203, 127},
- {203, 121},
- {203, 115},
- {203, 108},
- {203, 102},
- {203, 96},
- {203, 92},
- {171, 110},
- {171, 104},
- {171, 98},
- {139, 116},
- {227, 125},
- {227, 119},
- {227, 113},
- {227, 107},
- {227, 101},
- {227, 96},
- {195, 113},
- {195, 106},
- {195, 102},
- {195, 95},
- {163, 113},
- {163, 106},
- {163, 102},
- {163, 95},
- {131, 113},
- {131, 106},
- {131, 102},
- {131, 95},
- {99, 113},
- {99, 106},
- {99, 102},
- {99, 95},
- {67, 113},
- {67, 106},
- {67, 102},
- {67, 95},
- {35, 113},
- {35, 106},
- {35, 102},
- {35, 95},
- {3, 113},
- {3, 106},
- {3, 102},
- {3, 95} }, /* 2.4 GHz, lowest power */
- {
- {251, 127}, /* 5.x GHz, highest power */
- {251, 120},
- {251, 114},
- {219, 119},
- {219, 101},
- {187, 113},
- {187, 102},
- {155, 114},
- {155, 103},
- {123, 117},
- {123, 107},
- {123, 99},
- {123, 92},
- {91, 108},
- {59, 125},
- {59, 118},
- {59, 109},
- {59, 102},
- {59, 96},
- {59, 90},
- {27, 104},
- {27, 98},
- {27, 92},
- {115, 118},
- {115, 111},
- {115, 104},
- {83, 126},
- {83, 121},
- {83, 113},
- {83, 105},
- {83, 99},
- {51, 118},
- {51, 111},
- {51, 104},
- {51, 98},
- {19, 116},
- {19, 109},
- {19, 102},
- {19, 98},
- {19, 93},
- {171, 113},
- {171, 107},
- {171, 99},
- {139, 120},
- {139, 113},
- {139, 107},
- {139, 99},
- {107, 120},
- {107, 113},
- {107, 107},
- {107, 99},
- {75, 120},
- {75, 113},
- {75, 107},
- {75, 99},
- {43, 120},
- {43, 113},
- {43, 107},
- {43, 99},
- {11, 120},
- {11, 113},
- {11, 107},
- {11, 99},
- {131, 107},
- {131, 99},
- {99, 120},
- {99, 113},
- {99, 107},
- {99, 99},
- {67, 120},
- {67, 113},
- {67, 107},
- {67, 99},
- {35, 120},
- {35, 113},
- {35, 107},
- {35, 99},
- {3, 120} } /* 5.x GHz, lowest power */
-};
-
-static inline u8 iwl3945_hw_reg_fix_power_index(int index)
-{
- if (index < 0)
- return 0;
- if (index >= IWL_MAX_GAIN_ENTRIES)
- return IWL_MAX_GAIN_ENTRIES - 1;
- return (u8) index;
-}
-
-/* Kick off thermal recalibration check every 60 seconds */
-#define REG_RECALIB_PERIOD (60)
-
-/**
- * iwl3945_hw_reg_set_scan_power - Set Tx power for scan probe requests
- *
- * Set (in our channel info database) the direct scan Tx power for 1 Mbit (CCK)
- * or 6 Mbit (OFDM) rates.
- */
-static void iwl3945_hw_reg_set_scan_power(struct iwl_priv *priv, u32 scan_tbl_index,
- s32 rate_index, const s8 *clip_pwrs,
- struct iwl_channel_info *ch_info,
- int band_index)
-{
- struct iwl3945_scan_power_info *scan_power_info;
- s8 power;
- u8 power_index;
-
- scan_power_info = &ch_info->scan_pwr_info[scan_tbl_index];
-
- /* use this channel group's 6Mbit clipping/saturation pwr,
- * but cap at regulatory scan power restriction (set during init
- * based on eeprom channel data) for this channel. */
- power = min(ch_info->scan_power, clip_pwrs[IWL_RATE_6M_INDEX_TABLE]);
-
- power = min(power, priv->tx_power_user_lmt);
- scan_power_info->requested_power = power;
-
- /* find difference between new scan *power* and current "normal"
- * Tx *power* for 6Mb. Use this difference (x2) to adjust the
- * current "normal" temperature-compensated Tx power *index* for
- * this rate (1Mb or 6Mb) to yield new temp-compensated scan power
- * *index*. */
- power_index = ch_info->power_info[rate_index].power_table_index
- - (power - ch_info->power_info
- [IWL_RATE_6M_INDEX_TABLE].requested_power) * 2;
-
- /* store reference index that we use when adjusting *all* scan
- * powers. So we can accommodate user (all channel) or spectrum
- * management (single channel) power changes "between" temperature
- * feedback compensation procedures.
- * don't force fit this reference index into gain table; it may be a
- * negative number. This will help avoid errors when we're at
- * the lower bounds (highest gains, for warmest temperatures)
- * of the table. */
-
- /* don't exceed table bounds for "real" setting */
- power_index = iwl3945_hw_reg_fix_power_index(power_index);
-
- scan_power_info->power_table_index = power_index;
- scan_power_info->tpc.tx_gain =
- power_gain_table[band_index][power_index].tx_gain;
- scan_power_info->tpc.dsp_atten =
- power_gain_table[band_index][power_index].dsp_atten;
-}
-
-/**
- * iwl3945_send_tx_power - fill in Tx Power command with gain settings
- *
- * Configures power settings for all rates for the current channel,
- * using values from channel info struct, and send to NIC
- */
-static int iwl3945_send_tx_power(struct iwl_priv *priv)
-{
- int rate_idx, i;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_txpowertable_cmd txpower = {
- .channel = priv->contexts[IWL_RXON_CTX_BSS].active.channel,
- };
- u16 chan;
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
-
- txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, chan);
- if (!ch_info) {
- IWL_ERR(priv,
- "Failed to get channel info for channel %d [%d]\n",
- chan, priv->band);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_POWER(priv, "Not calling TX_PWR_TABLE_CMD on "
- "non-Tx channel.\n");
- return 0;
- }
-
- /* fill cmd with power settings for all rates for current channel */
- /* Fill OFDM rate */
- for (rate_idx = IWL_FIRST_OFDM_RATE, i = 0;
- rate_idx <= IWL39_LAST_OFDM_RATE; rate_idx++, i++) {
-
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
- /* Fill CCK rates */
- for (rate_idx = IWL_FIRST_CCK_RATE;
- rate_idx <= IWL_LAST_CCK_RATE; rate_idx++, i++) {
- txpower.power[i].tpc = ch_info->power_info[i].tpc;
- txpower.power[i].rate = iwl3945_rates[rate_idx].plcp;
-
- IWL_DEBUG_POWER(priv, "ch %d:%d rf %d dsp %3d rate code 0x%02x\n",
- le16_to_cpu(txpower.channel),
- txpower.band,
- txpower.power[i].tpc.tx_gain,
- txpower.power[i].tpc.dsp_atten,
- txpower.power[i].rate);
- }
-
- return iwl_legacy_send_cmd_pdu(priv, REPLY_TX_PWR_TABLE_CMD,
- sizeof(struct iwl3945_txpowertable_cmd),
- &txpower);
-
-}
-
-/**
- * iwl3945_hw_reg_set_new_power - Configures power tables at new levels
- * @ch_info: Channel to update. Uses power_info.requested_power.
- *
- * Replace requested_power and base_power_index ch_info fields for
- * one channel.
- *
- * Called if user or spectrum management changes power preferences.
- * Takes into account h/w and modulation limitations (clip power).
- *
- * This does *not* send anything to NIC, just sets up ch_info for one channel.
- *
- * NOTE: reg_compensate_for_temperature_dif() *must* be run after this to
- * properly fill out the scan powers, and actual h/w gain settings,
- * and send changes to NIC
- */
-static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
- struct iwl_channel_info *ch_info)
-{
- struct iwl3945_channel_power_info *power_info;
- int power_changed = 0;
- int i;
- const s8 *clip_pwrs;
- int power;
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* Get this channel's rate-to-current-power settings table */
- power_info = ch_info->power_info;
-
- /* update OFDM Txpower settings */
- for (i = IWL_RATE_6M_INDEX_TABLE; i <= IWL_RATE_54M_INDEX_TABLE;
- i++, ++power_info) {
- int delta_idx;
-
- /* limit new power to be no more than h/w capability */
- power = min(ch_info->curr_txpow, clip_pwrs[i]);
- if (power == power_info->requested_power)
- continue;
-
- /* find difference between old and new requested powers,
- * update base (non-temp-compensated) power index */
- delta_idx = (power - power_info->requested_power) * 2;
- power_info->base_power_index -= delta_idx;
-
- /* save new requested power value */
- power_info->requested_power = power;
-
- power_changed = 1;
- }
-
- /* update CCK Txpower settings, based on OFDM 12M setting ...
- * ... all CCK power settings for a given channel are the *same*. */
- if (power_changed) {
- power =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- requested_power + IWL_CCK_FROM_OFDM_POWER_DIFF;
-
- /* do all CCK rates' iwl3945_channel_power_info structures */
- for (i = IWL_RATE_1M_INDEX_TABLE; i <= IWL_RATE_11M_INDEX_TABLE; i++) {
- power_info->requested_power = power;
- power_info->base_power_index =
- ch_info->power_info[IWL_RATE_12M_INDEX_TABLE].
- base_power_index + IWL_CCK_FROM_OFDM_INDEX_DIFF;
- ++power_info;
- }
- }
-
- return 0;
-}
-
-/**
- * iwl3945_hw_reg_get_ch_txpower_limit - returns new power limit for channel
- *
- * NOTE: Returned power limit may be less (but not more) than requested,
- * based strictly on regulatory (eeprom and spectrum mgt) limitations
- * (no consideration for h/w clipping limitations).
- */
-static int iwl3945_hw_reg_get_ch_txpower_limit(struct iwl_channel_info *ch_info)
-{
- s8 max_power;
-
-#if 0
- /* if we're using TGd limits, use lower of TGd or EEPROM */
- if (ch_info->tgd_data.max_power != 0)
- max_power = min(ch_info->tgd_data.max_power,
- ch_info->eeprom.max_power_avg);
-
- /* else just use EEPROM limits */
- else
-#endif
- max_power = ch_info->eeprom.max_power_avg;
-
- return min(max_power, ch_info->max_power_avg);
-}
-
-/**
- * iwl3945_hw_reg_comp_txpower_temp - Compensate for temperature
- *
- * Compensate txpower settings of *all* channels for temperature.
- * This only accounts for the difference between current temperature
- * and the factory calibration temperatures, and bases the new settings
- * on the channel's base_power_index.
- *
- * If RxOn is "associated", this sends the new Txpower to NIC!
- */
-static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- const s8 *clip_pwrs; /* array of h/w max power levels for each rate */
- u8 a_band;
- u8 rate_index;
- u8 scan_tbl_index;
- u8 i;
- int ref_temp;
- int temperature = priv->temperature;
-
- if (priv->disable_tx_power_cal ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- /* do not perform tx power calibration */
- return 0;
- }
- /* set up new Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* Get this chnlgrp's factory calibration temperature */
- ref_temp = (s16)eeprom->groups[ch_info->group_index].
- temperature;
-
- /* get power index adjustment based on current and factory
- * temps */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- ref_temp);
-
- /* set tx power value for all rates, OFDM and CCK */
- for (rate_index = 0; rate_index < IWL_RATE_COUNT_3945;
- rate_index++) {
- int power_idx =
- ch_info->power_info[rate_index].base_power_index;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within table range */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
- ch_info->power_info[rate_index].
- power_table_index = (u8) power_idx;
- ch_info->power_info[rate_index].tpc =
- power_gain_table[a_band][power_idx];
- }
-
- /* Get this chnlgrp's rate-to-max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs,
- ch_info, a_band);
- }
- }
-
- /* send Txpower command for current channel to ucode */
- return priv->cfg->ops->lib->send_tx_power(priv);
-}
-
-int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
-{
- struct iwl_channel_info *ch_info;
- s8 max_power;
- u8 a_band;
- u8 i;
-
- if (priv->tx_power_user_lmt == power) {
- IWL_DEBUG_POWER(priv, "Requested Tx power same as current "
- "limit: %ddBm.\n", power);
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, "Setting upper limit clamp to %ddBm.\n", power);
- priv->tx_power_user_lmt = power;
-
- /* set up new Tx powers for each and every channel, 2.4 and 5.x */
-
- for (i = 0; i < priv->channel_count; i++) {
- ch_info = &priv->channel_info[i];
- a_band = iwl_legacy_is_channel_a_band(ch_info);
-
- /* find minimum power of all user and regulatory constraints
- * (does not consider h/w clipping limitations) */
- max_power = iwl3945_hw_reg_get_ch_txpower_limit(ch_info);
- max_power = min(power, max_power);
- if (max_power != ch_info->curr_txpow) {
- ch_info->curr_txpow = max_power;
-
- /* this considers the h/w clipping limitations */
- iwl3945_hw_reg_set_new_power(priv, ch_info);
- }
- }
-
- /* update txpower settings for all channels,
- * send to NIC if associated. */
- iwl3945_is_temp_calib_needed(priv);
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- return 0;
-}
-
-static int iwl3945_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int rc = 0;
- struct iwl_rx_packet *pkt;
- struct iwl3945_rxon_assoc_cmd rxon_assoc;
- struct iwl_host_cmd cmd = {
- .id = REPLY_RXON_ASSOC,
- .len = sizeof(rxon_assoc),
- .flags = CMD_WANT_SKB,
- .data = &rxon_assoc,
- };
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
- rc = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-/**
- * iwl3945_commit_rxon - commit staging_rxon to hardware
- *
- * The RXON command in staging_rxon is committed to the hardware and
- * the active_rxon structure is updated with the new data. This
- * function correctly transitions out of the RXON_ASSOC_MSK state if
- * a HW tune is required based on the RXON structure changes.
- */
-int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl3945_rxon_cmd *active_rxon = (void *)&ctx->active;
- struct iwl3945_rxon_cmd *staging_rxon = (void *)&ctx->staging;
- int rc = 0;
- bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- if (!iwl_legacy_is_alive(priv))
- return -1;
-
- /* always get timestamp with Rx frame */
- staging_rxon->flags |= RXON_FLG_TSF2HOST_MSK;
-
- /* select antenna */
- staging_rxon->flags &=
- ~(RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_SEL_MSK);
- staging_rxon->flags |= iwl3945_get_antenna_flags(priv);
-
- rc = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (rc) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl3945_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv,
- &priv->contexts[IWL_RXON_CTX_BSS])) {
- rc = iwl_legacy_send_rxon_assoc(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- if (rc) {
- IWL_ERR(priv, "Error setting RXON_ASSOC "
- "configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- active_rxon->reserved4 = 0;
- active_rxon->reserved5 = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- &priv->contexts[IWL_RXON_CTX_BSS].active);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (rc) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK on current "
- "configuration (%d).\n", rc);
- return rc;
- }
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(staging_rxon->channel),
- staging_rxon->bssid_addr);
-
- /*
- * reserved4 and 5 could have been filled by the iwlcore code.
- * Let's clear them before pushing to the 3945.
- */
- staging_rxon->reserved4 = 0;
- staging_rxon->reserved5 = 0;
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx, !iwl3945_mod_params.sw_crypto);
-
- /* Apply the new configuration */
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RXON,
- sizeof(struct iwl3945_rxon_cmd),
- staging_rxon);
- if (rc) {
- IWL_ERR(priv, "Error setting new configuration (%d).\n", rc);
- return rc;
- }
-
- memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
-
- if (!new_assoc) {
- iwl_legacy_clear_ucode_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl_legacy_restore_stations(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
- }
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- rc = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (rc) {
- IWL_ERR(priv, "Error setting Tx power (%d).\n", rc);
- return rc;
- }
-
- /* Init the hardware's rate fallback order based on the band */
- rc = iwl3945_init_hw_rate_table(priv);
- if (rc) {
- IWL_ERR(priv, "Error setting HW rate table: %02X\n", rc);
- return -EIO;
- }
-
- return 0;
-}
-
-/**
- * iwl3945_reg_txpower_periodic - called when time to check our temperature.
- *
- * -- reset periodic timer
- * -- see if temp has changed enough to warrant re-calibration ... if so:
- * -- correct coeffs for temp (can reset temp timer)
- * -- save this temp as "last",
- * -- send new set of gain settings to NIC
- * NOTE: This should continue working, even when we're not associated,
- * so we can keep our internal table of scan powers current. */
-void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
-{
- /* This will kick in the "brute force"
- * iwl3945_hw_reg_comp_txpower_temp() below */
- if (!iwl3945_is_temp_calib_needed(priv))
- goto reschedule;
-
- /* Set up a new set of temp-adjusted TxPowers, send to NIC.
- * This is based *only* on current temperature,
- * ignoring any previous power measurements */
- iwl3945_hw_reg_comp_txpower_temp(priv);
-
- reschedule:
- queue_delayed_work(priv->workqueue,
- &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
-}
-
-static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- _3945.thermal_periodic.work);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl3945_reg_txpower_periodic(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl3945_hw_reg_get_ch_grp_index - find the channel-group index (0-4)
- * for the channel.
- *
- * This function is used when initializing channel-info structs.
- *
- * NOTE: These channel groups do *NOT* match the bands above!
- * These channel groups are based on factory-tested channels;
- * on A-band, EEPROM's "group frequency" entries represent the top
- * channel in each group 1-4. Group 5 All B/G channels are in group 0.
- */
-static u16 iwl3945_hw_reg_get_ch_grp_index(struct iwl_priv *priv,
- const struct iwl_channel_info *ch_info)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- struct iwl3945_eeprom_txpower_group *ch_grp = &eeprom->groups[0];
- u8 group;
- u16 group_index = 0; /* based on factory calib frequencies */
- u8 grp_channel;
-
- /* Find the group index for the channel ... don't use index 1(?) */
- if (iwl_legacy_is_channel_a_band(ch_info)) {
- for (group = 1; group < 5; group++) {
- grp_channel = ch_grp[group].group_channel;
- if (ch_info->channel <= grp_channel) {
- group_index = group;
- break;
- }
- }
- /* group 4 has a few channels *above* its factory cal freq */
- if (group == 5)
- group_index = 4;
- } else
- group_index = 0; /* 2.4 GHz, group 0 */
-
- IWL_DEBUG_POWER(priv, "Chnl %d mapped to grp %d\n", ch_info->channel,
- group_index);
- return group_index;
-}
-
-/**
- * iwl3945_hw_reg_get_matched_power_index - Interpolate to get nominal index
- *
- * Interpolate to get nominal (i.e. at factory calibration temperature) index
- * into radio/DSP gain settings table for requested power.
- */
-static int iwl3945_hw_reg_get_matched_power_index(struct iwl_priv *priv,
- s8 requested_power,
- s32 setting_index, s32 *new_index)
-{
- const struct iwl3945_eeprom_txpower_group *chnl_grp = NULL;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- s32 index0, index1;
- s32 power = 2 * requested_power;
- s32 i;
- const struct iwl3945_eeprom_txpower_sample *samples;
- s32 gains0, gains1;
- s32 res;
- s32 denominator;
-
- chnl_grp = &eeprom->groups[setting_index];
- samples = chnl_grp->samples;
- for (i = 0; i < 5; i++) {
- if (power == samples[i].power) {
- *new_index = samples[i].gain_index;
- return 0;
- }
- }
-
- if (power > samples[1].power) {
- index0 = 0;
- index1 = 1;
- } else if (power > samples[2].power) {
- index0 = 1;
- index1 = 2;
- } else if (power > samples[3].power) {
- index0 = 2;
- index1 = 3;
- } else {
- index0 = 3;
- index1 = 4;
- }
-
- denominator = (s32) samples[index1].power - (s32) samples[index0].power;
- if (denominator == 0)
- return -EINVAL;
- gains0 = (s32) samples[index0].gain_index * (1 << 19);
- gains1 = (s32) samples[index1].gain_index * (1 << 19);
- res = gains0 + (gains1 - gains0) *
- ((s32) power - (s32) samples[index0].power) / denominator +
- (1 << 18);
- *new_index = res >> 19;
- return 0;
-}
-
-static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
-{
- u32 i;
- s32 rate_index;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- const struct iwl3945_eeprom_txpower_group *group;
-
- IWL_DEBUG_POWER(priv, "Initializing factory calib info from EEPROM\n");
-
- for (i = 0; i < IWL_NUM_TX_CALIB_GROUPS; i++) {
- s8 *clip_pwrs; /* table of power levels for each rate */
- s8 satur_pwr; /* saturation power for each chnl group */
- group = &eeprom->groups[i];
-
- /* sanity check on factory saturation power value */
- if (group->saturation_power < 40) {
- IWL_WARN(priv, "Error: saturation power is %d, "
- "less than minimum expected 40\n",
- group->saturation_power);
- return;
- }
-
- /*
- * Derive requested power levels for each rate, based on
- * hardware capabilities (saturation power for band).
- * Basic value is 3dB down from saturation, with further
- * power reductions for highest 3 data rates. These
- * backoffs provide headroom for high rate modulation
- * power peaks, without too much distortion (clipping).
- */
- /* we'll fill in this array with h/w max power levels */
- clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
-
- /* divide factory saturation power by 2 to find -3dB level */
- satur_pwr = (s8) (group->saturation_power >> 1);
-
- /* fill in channel group's nominal powers for each rate */
- for (rate_index = 0;
- rate_index < IWL_RATE_COUNT_3945; rate_index++, clip_pwrs++) {
- switch (rate_index) {
- case IWL_RATE_36M_INDEX_TABLE:
- if (i == 0) /* B/G */
- *clip_pwrs = satur_pwr;
- else /* A */
- *clip_pwrs = satur_pwr - 5;
- break;
- case IWL_RATE_48M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 7;
- else
- *clip_pwrs = satur_pwr - 10;
- break;
- case IWL_RATE_54M_INDEX_TABLE:
- if (i == 0)
- *clip_pwrs = satur_pwr - 9;
- else
- *clip_pwrs = satur_pwr - 12;
- break;
- default:
- *clip_pwrs = satur_pwr;
- break;
- }
- }
- }
-}
-
-/**
- * iwl3945_txpower_set_from_eeprom - Set channel power info based on EEPROM
- *
- * Second pass (during init) to set up priv->channel_info
- *
- * Set up Tx-power settings in our channel info database for each VALID
- * (for this geo/SKU) channel, at all Tx data rates, based on eeprom values
- * and current temperature.
- *
- * Since this is based on current temperature (at init time), these values may
- * not be valid for very long, but it gives us a starting/default point,
- * and allows us to active (i.e. using Tx) scan.
- *
- * This does *not* write values to NIC, just sets up our internal table.
- */
-int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch_info = NULL;
- struct iwl3945_channel_power_info *pwr_info;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- int delta_index;
- u8 rate_index;
- u8 scan_tbl_index;
- const s8 *clip_pwrs; /* array of power levels for each rate */
- u8 gain, dsp_atten;
- s8 power;
- u8 pwr_index, base_pwr_index, a_band;
- u8 i;
- int temperature;
-
- /* save temperature reference,
- * so we can determine next time to calibrate */
- temperature = iwl3945_hw_reg_txpower_get_temperature(priv);
- priv->last_temperature = temperature;
-
- iwl3945_hw_reg_init_channel_groups(priv);
-
- /* initialize Tx power info for each and every channel, 2.4 and 5.x */
- for (i = 0, ch_info = priv->channel_info; i < priv->channel_count;
- i++, ch_info++) {
- a_band = iwl_legacy_is_channel_a_band(ch_info);
- if (!iwl_legacy_is_channel_valid(ch_info))
- continue;
-
- /* find this channel's channel group (*not* "band") index */
- ch_info->group_index =
- iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
-
- /* Get this chnlgrp's rate->max/clip-powers table */
- clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
-
- /* calculate power index *adjustment* value according to
- * diff between current temperature and factory temperature */
- delta_index = iwl3945_hw_reg_adjust_power_by_temp(temperature,
- eeprom->groups[ch_info->group_index].
- temperature);
-
- IWL_DEBUG_POWER(priv, "Delta index for channel %d: %d [%d]\n",
- ch_info->channel, delta_index, temperature +
- IWL_TEMP_CONVERT);
-
- /* set tx power value for all OFDM rates */
- for (rate_index = 0; rate_index < IWL_OFDM_RATES;
- rate_index++) {
- s32 uninitialized_var(power_idx);
- int rc;
-
- /* use channel group's clip-power table,
- * but don't exceed channel's max power */
- s8 pwr = min(ch_info->max_power_avg,
- clip_pwrs[rate_index]);
-
- pwr_info = &ch_info->power_info[rate_index];
-
- /* get base (i.e. at factory-measured temperature)
- * power table index for this rate's power */
- rc = iwl3945_hw_reg_get_matched_power_index(priv, pwr,
- ch_info->group_index,
- &power_idx);
- if (rc) {
- IWL_ERR(priv, "Invalid power index\n");
- return rc;
- }
- pwr_info->base_power_index = (u8) power_idx;
-
- /* temperature compensate */
- power_idx += delta_index;
-
- /* stay within range of gain table */
- power_idx = iwl3945_hw_reg_fix_power_index(power_idx);
-
- /* fill 1 OFDM rate's iwl3945_channel_power_info struct */
- pwr_info->requested_power = pwr;
- pwr_info->power_table_index = (u8) power_idx;
- pwr_info->tpc.tx_gain =
- power_gain_table[a_band][power_idx].tx_gain;
- pwr_info->tpc.dsp_atten =
- power_gain_table[a_band][power_idx].dsp_atten;
- }
-
- /* set tx power for CCK rates, based on OFDM 12 Mbit settings*/
- pwr_info = &ch_info->power_info[IWL_RATE_12M_INDEX_TABLE];
- power = pwr_info->requested_power +
- IWL_CCK_FROM_OFDM_POWER_DIFF;
- pwr_index = pwr_info->power_table_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
- base_pwr_index = pwr_info->base_power_index +
- IWL_CCK_FROM_OFDM_INDEX_DIFF;
-
- /* stay within table range */
- pwr_index = iwl3945_hw_reg_fix_power_index(pwr_index);
- gain = power_gain_table[a_band][pwr_index].tx_gain;
- dsp_atten = power_gain_table[a_band][pwr_index].dsp_atten;
-
- /* fill each CCK rate's iwl3945_channel_power_info structure
- * NOTE: All CCK-rate Txpwrs are the same for a given chnl!
- * NOTE: CCK rates start at end of OFDM rates! */
- for (rate_index = 0;
- rate_index < IWL_CCK_RATES; rate_index++) {
- pwr_info = &ch_info->power_info[rate_index+IWL_OFDM_RATES];
- pwr_info->requested_power = power;
- pwr_info->power_table_index = pwr_index;
- pwr_info->base_power_index = base_pwr_index;
- pwr_info->tpc.tx_gain = gain;
- pwr_info->tpc.dsp_atten = dsp_atten;
- }
-
- /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
- for (scan_tbl_index = 0;
- scan_tbl_index < IWL_NUM_SCAN_RATES; scan_tbl_index++) {
- s32 actual_index = (scan_tbl_index == 0) ?
- IWL_RATE_1M_INDEX_TABLE : IWL_RATE_6M_INDEX_TABLE;
- iwl3945_hw_reg_set_scan_power(priv, scan_tbl_index,
- actual_index, clip_pwrs, ch_info, a_band);
- }
- }
-
- return 0;
-}
-
-int iwl3945_hw_rxq_stop(struct iwl_priv *priv)
-{
- int rc;
-
- iwl_legacy_write_direct32(priv, FH39_RCSR_CONFIG(0), 0);
- rc = iwl_poll_direct_bit(priv, FH39_RSSR_STATUS,
- FH39_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
- if (rc < 0)
- IWL_ERR(priv, "Can't stop Rx DMA.\n");
-
- return 0;
-}
-
-int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
-
- shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
-
- iwl_legacy_write_direct32(priv, FH39_CBCC_CTRL(txq_id), 0);
- iwl_legacy_write_direct32(priv, FH39_CBCC_BASE(txq_id), 0);
-
- iwl_legacy_write_direct32(priv, FH39_TCSR_CONFIG(txq_id),
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT |
- FH39_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF |
- FH39_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL |
- FH39_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE);
-
- /* fake read to flush all prev. writes */
- iwl_read32(priv, FH39_TSSR_CBB_BASE);
-
- return 0;
-}
-
-/*
- * HCMD utils
- */
-static u16 iwl3945_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return sizeof(struct iwl3945_rxon_cmd);
- case POWER_TABLE_CMD:
- return sizeof(struct iwl3945_powertable_cmd);
- default:
- return len;
- }
-}
-
-
-static u16 iwl3945_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl3945_addsta_cmd *addsta = (struct iwl3945_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cpu_to_le16(0);
- addsta->rate_n_flags = cmd->rate_n_flags;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
-
- return (u16)sizeof(struct iwl3945_addsta_cmd);
-}
-
-static int iwl3945_add_bssid_station(struct iwl_priv *priv,
- const u8 *addr, u8 *sta_id_r)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int ret;
- u8 sta_id;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-static int iwl3945_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- int ret;
-
- if (add) {
- ret = iwl3945_add_bssid_station(priv, vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- if (ret)
- return ret;
-
- iwl3945_sync_sta(priv, vif_priv->ibss_bssid_sta_id,
- (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP);
- iwl3945_rate_scale_init(priv->hw, vif_priv->ibss_bssid_sta_id);
-
- return 0;
- }
-
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-/**
- * iwl3945_init_hw_rate_table - Initialize the hardware rate fallback table
- */
-int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
-{
- int rc, i, index, prev_index;
- struct iwl3945_rate_scaling_cmd rate_cmd = {
- .reserved = {0, 0, 0},
- };
- struct iwl3945_rate_scaling_info *table = rate_cmd.table;
-
- for (i = 0; i < ARRAY_SIZE(iwl3945_rates); i++) {
- index = iwl3945_rates[i].table_rs_index;
-
- table[index].rate_n_flags =
- iwl3945_hw_set_rate_n_flags(iwl3945_rates[i].plcp, 0);
- table[index].try_cnt = priv->retry_rate;
- prev_index = iwl3945_get_prev_ieee_rate(i);
- table[index].next_rate_index =
- iwl3945_rates[prev_index].table_rs_index;
- }
-
- switch (priv->band) {
- case IEEE80211_BAND_5GHZ:
- IWL_DEBUG_RATE(priv, "Select A mode rate scale\n");
- /* If one of the following CCK rates is used,
- * have it fall back to the 6M OFDM rate */
- for (i = IWL_RATE_1M_INDEX_TABLE;
- i <= IWL_RATE_11M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
-
- /* Don't fall back to CCK rates */
- table[IWL_RATE_12M_INDEX_TABLE].next_rate_index =
- IWL_RATE_9M_INDEX_TABLE;
-
- /* Don't drop out of OFDM rates */
- table[IWL_RATE_6M_INDEX_TABLE].next_rate_index =
- iwl3945_rates[IWL_FIRST_OFDM_RATE].table_rs_index;
- break;
-
- case IEEE80211_BAND_2GHZ:
- IWL_DEBUG_RATE(priv, "Select B/G mode rate scale\n");
- /* If an OFDM rate is used, have it fall back to the
- * 1M CCK rates */
-
- if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
- iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
-
- index = IWL_FIRST_CCK_RATE;
- for (i = IWL_RATE_6M_INDEX_TABLE;
- i <= IWL_RATE_54M_INDEX_TABLE; i++)
- table[i].next_rate_index =
- iwl3945_rates[index].table_rs_index;
-
- index = IWL_RATE_11M_INDEX_TABLE;
- /* CCK shouldn't fall back to OFDM... */
- table[index].next_rate_index = IWL_RATE_5M_INDEX_TABLE;
- }
- break;
-
- default:
- WARN_ON(1);
- break;
- }
-
- /* Update the rate scaling for control frame Tx */
- rate_cmd.table_id = 0;
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
- if (rc)
- return rc;
-
- /* Update the rate scaling for data frame Tx */
- rate_cmd.table_id = 1;
- return iwl_legacy_send_cmd_pdu(priv, REPLY_RATE_SCALE, sizeof(rate_cmd),
- &rate_cmd);
-}
-
-/* Called when initializing driver */
-int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
-{
- memset((void *)&priv->hw_params, 0,
- sizeof(struct iwl_hw_params));
-
- priv->_3945.shared_virt =
- dma_alloc_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- &priv->_3945.shared_phys, GFP_KERNEL);
- if (!priv->_3945.shared_virt) {
- IWL_ERR(priv, "failed to allocate pci memory\n");
- return -ENOMEM;
- }
-
- /* Assign number of Usable TX queues */
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
-
- priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- priv->hw_params.max_stations = IWL3945_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL3945_BROADCAST_ID;
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- priv->hw_params.rx_wrt_ptr_reg = FH39_RSCSR_CHNL0_WPTR;
- priv->hw_params.max_beacon_itrvl = IWL39_MAX_UCODE_BEACON_INTERVAL;
- priv->hw_params.beacon_time_tsf_bits = IWL3945_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate)
-{
- struct iwl3945_tx_beacon_cmd *tx_beacon_cmd;
- unsigned int frame_size;
-
- tx_beacon_cmd = (struct iwl3945_tx_beacon_cmd *)&frame->u;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- tx_beacon_cmd->tx.sta_id =
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- frame_size = iwl3945_fill_beacon_frame(priv,
- tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
-
- BUG_ON(frame_size > MAX_MPDU_SIZE);
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
-
- tx_beacon_cmd->tx.rate = rate;
- tx_beacon_cmd->tx.tx_flags = (TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK);
-
- /* supp_rates[0] == OFDM start at IWL_FIRST_OFDM_RATE*/
- tx_beacon_cmd->tx.supp_rates[0] =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
-
- tx_beacon_cmd->tx.supp_rates[1] =
- (IWL_CCK_BASIC_RATES_MASK & 0xF);
-
- return sizeof(struct iwl3945_tx_beacon_cmd) + frame_size;
-}
-
-void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_TX] = iwl3945_rx_reply_tx;
- priv->rx_handlers[REPLY_3945_RX] = iwl3945_rx_reply_rx;
-}
-
-void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
-{
- INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
- iwl3945_bg_reg_txpower_periodic);
-}
-
-void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_delayed_work(&priv->_3945.thermal_periodic);
-}
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl3945_verify_bsm(struct iwl_priv *priv)
- {
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
- ******************************************************************************/
-
-/*
- * Clear the OWNER_MSK, to establish driver (instead of uCode running on
- * embedded controller) as EEPROM reader; each read is a series of pulses
- * to/from the EEPROM chip, not a single event, so even reads could conflict
- * if they weren't arbitrated by some ownership mechanism. Here, the driver
- * simply claims ownership, which should be safe when this function is called
- * (i.e. before loading uCode!).
- */
-static int iwl3945_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_EEPROM_GP, CSR_EEPROM_GP_IF_OWNER_MSK);
- return 0;
-}
-
-
-static void iwl3945_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- return;
-}
-
- /**
- * iwl3945_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl3945_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int rc;
- int i;
- u32 done;
- u32 reg_offset;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL39_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 31:0 for 3945.
- * NOTE: iwl3945_initialize_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache. */
- pinst = priv->ucode_init.p_addr;
- pdata = priv->ucode_init_data.p_addr;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset,
- le32_to_cpu(*image));
-
- rc = iwl3945_verify_bsm(priv);
- if (rc)
- return rc;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv, BSM_WR_MEM_DST_REG,
- IWL39_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG,
- BSM_WR_CTRL_REG_BIT_START_EN);
-
- return 0;
-}
-
-static struct iwl_hcmd_ops iwl3945_hcmd = {
- .rxon_assoc = iwl3945_send_rxon_assoc,
- .commit_rxon = iwl3945_commit_rxon,
-};
-
-static struct iwl_lib_ops iwl3945_lib = {
- .txq_attach_buf_to_tfd = iwl3945_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl3945_hw_txq_free_tfd,
- .txq_init = iwl3945_hw_tx_queue_init,
- .load_ucode = iwl3945_load_bsm,
- .dump_nic_error_log = iwl3945_dump_nic_error_log,
- .apm_ops = {
- .init = iwl3945_apm_init,
- .config = iwl3945_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_REGULATORY_BAND_NO_HT40,
- EEPROM_REGULATORY_BAND_NO_HT40,
- },
- .acquire_semaphore = iwl3945_eeprom_acquire_semaphore,
- .release_semaphore = iwl3945_eeprom_release_semaphore,
- },
- .send_tx_power = iwl3945_send_tx_power,
- .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
-
- .debugfs_ops = {
- .rx_stats_read = iwl3945_ucode_rx_stats_read,
- .tx_stats_read = iwl3945_ucode_tx_stats_read,
- .general_stats_read = iwl3945_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl3945_legacy_ops = {
- .post_associate = iwl3945_post_associate,
- .config_ap = iwl3945_config_ap,
- .manage_ibss_station = iwl3945_manage_ibss_station,
-};
-
-static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
- .get_hcmd_size = iwl3945_get_hcmd_size,
- .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
- .request_scan = iwl3945_request_scan,
- .post_scan = iwl3945_post_scan,
-};
-
-static const struct iwl_ops iwl3945_ops = {
- .lib = &iwl3945_lib,
- .hcmd = &iwl3945_hcmd,
- .utils = &iwl3945_hcmd_utils,
- .led = &iwl3945_led_ops,
- .legacy = &iwl3945_legacy_ops,
- .ieee80211_ops = &iwl3945_hw_ops,
-};
-
-static struct iwl_base_params iwl3945_base_params = {
- .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
- .num_of_queues = IWL39_NUM_QUEUES,
- .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
- .set_l0s = false,
- .use_bsm = true,
- .led_compensation = 64,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
-};
-
-static struct iwl_cfg iwl3945_bg_cfg = {
- .name = "3945BG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-static struct iwl_cfg iwl3945_abg_cfg = {
- .name = "3945ABG",
- .fw_name_pre = IWL3945_FW_PRE,
- .ucode_api_max = IWL3945_UCODE_API_MAX,
- .ucode_api_min = IWL3945_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G,
- .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
- .ops = &iwl3945_ops,
- .mod_params = &iwl3945_mod_params,
- .base_params = &iwl3945_base_params,
- .led_mode = IWL_LED_BLINK,
-};
-
-DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
- {IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4227, 0x1014, iwl3945_bg_cfg)},
- {IWL_PCI_DEVICE(0x4222, PCI_ANY_ID, iwl3945_abg_cfg)},
- {IWL_PCI_DEVICE(0x4227, PCI_ANY_ID, iwl3945_abg_cfg)},
- {0}
-};
-
-MODULE_DEVICE_TABLE(pci, iwl3945_hw_card_ids);
diff --git a/drivers/net/wireless/iwlegacy/iwl-3945.h b/drivers/net/wireless/iwlegacy/iwl-3945.h
deleted file mode 100644
index b118b59..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-3945.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-3945.h) for driver implementation definitions.
- * Please use iwl-3945-commands.h for uCode API definitions.
- * Please use iwl-3945-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_3945_h__
-#define __iwl_3945_h__
-
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <net/ieee80211_radiotap.h>
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern const struct pci_device_id iwl3945_hw_card_ids[];
-
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-3945-hw.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-#include "iwl-dev.h"
-#include "iwl-led.h"
-
-/* Highest firmware API version supported */
-#define IWL3945_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL3945_UCODE_API_MIN 1
-
-#define IWL3945_FW_PRE "iwlwifi-3945-"
-#define _IWL3945_MODULE_FIRMWARE(api) IWL3945_FW_PRE #api ".ucode"
-#define IWL3945_MODULE_FIRMWARE(api) _IWL3945_MODULE_FIRMWARE(api)
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/* Module parameters accessible from iwl-*.c */
-extern struct iwl_mod_params iwl3945_mod_params;
-
-struct iwl3945_rate_scale_data {
- u64 data;
- s32 success_counter;
- s32 success_ratio;
- s32 counter;
- s32 average_tpt;
- unsigned long stamp;
-};
-
-struct iwl3945_rs_sta {
- spinlock_t lock;
- struct iwl_priv *priv;
- s32 *expected_tpt;
- unsigned long last_partial_flush;
- unsigned long last_flush;
- u32 flush_time;
- u32 last_tx_packets;
- u32 tx_packets;
- u8 tgg;
- u8 flush_pending;
- u8 start_rate;
- struct timer_list rate_scale_flush;
- struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_stats_table_file;
-#endif
-
- /* used to be in sta_info */
- int last_txrate_idx;
-};
-
-
-/*
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl3945_sta_priv {
- struct iwl_station_priv_common common;
- struct iwl3945_rs_sta rs_sta;
-};
-
-enum iwl3945_antenna {
- IWL_ANTENNA_DIVERSITY,
- IWL_ANTENNA_MAIN,
- IWL_ANTENNA_AUX
-};
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-#define IWL_TX_FIFO_AC0 0
-#define IWL_TX_FIFO_AC1 1
-#define IWL_TX_FIFO_AC2 2
-#define IWL_TX_FIFO_AC3 3
-#define IWL_TX_FIFO_HCCA_1 5
-#define IWL_TX_FIFO_HCCA_2 6
-#define IWL_TX_FIFO_NONE 7
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl3945_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl3945_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define SCAN_INTERVAL 100
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-#define STA_PS_STATUS_WAKE 0
-#define STA_PS_STATUS_SLEEP 1
-
-struct iwl3945_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-#define IWL_RX_HDR(x) ((struct iwl3945_rx_frame_hdr *)(\
- x->u.rx_frame.stats.payload + \
- x->u.rx_frame.stats.phy_count))
-#define IWL_RX_END(x) ((struct iwl3945_rx_frame_end *)(\
- IWL_RX_HDR(x)->payload + \
- le16_to_cpu(IWL_RX_HDR(x)->len)))
-#define IWL_RX_STATS(x) (&x->u.rx_frame.stats)
-#define IWL_RX_DATA(x) (IWL_RX_HDR(x)->payload)
-
-
-/******************************************************************************
- *
- * Functions implemented in iwl3945-base.c which are forward declared here
- * for use by iwl-*.c
- *
- *****************************************************************************/
-extern int iwl3945_calc_db_from_ratio(int sig_ratio);
-extern void iwl3945_rx_replenish(void *data);
-extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr, int left);
-extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
- char **buf, bool display);
-extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
-
-/******************************************************************************
- *
- * Functions implemented in iwl-[34]*.c which are forward declared here
- * for use by iwl3945-base.c
- *
- * NOTE: The implementation of these functions are hardware specific
- * which is why they are in the hardware specific files (vs. iwl-base.c)
- *
- * Naming convention --
- * iwl3945_ <-- Its part of iwlwifi (should be changed to iwl3945_)
- * iwl3945_hw_ <-- Hardware specific (implemented in iwl-XXXX.c by all HW)
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl3945_bg_ <-- Called from work queue context
- * iwl3945_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv);
-extern void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv);
-extern void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv);
-extern int iwl3945_hw_rxq_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_set_hw_params(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_init(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_stop_master(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv);
-extern void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv);
-extern int iwl3945_hw_nic_reset(struct iwl_priv *priv);
-extern int iwl3945_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad);
-extern void iwl3945_hw_txq_free_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern int iwl3945_hw_get_temperature(struct iwl_priv *priv);
-extern int iwl3945_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-extern unsigned int iwl3945_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl3945_frame *frame, u8 rate);
-void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- int sta_id, int tx_id);
-extern int iwl3945_hw_reg_send_txpower(struct iwl_priv *priv);
-extern int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power);
-extern void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl3945_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-extern void iwl3945_disable_events(struct iwl_priv *priv);
-extern int iwl4965_get_temperature(const struct iwl_priv *priv);
-extern void iwl3945_post_associate(struct iwl_priv *priv);
-extern void iwl3945_config_ap(struct iwl_priv *priv);
-
-extern int iwl3945_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/**
- * iwl3945_hw_find_station - Find station id for a given BSSID
- * @bssid: MAC address of station ID to find
- *
- * NOTE: This should not be hardware specific but the code has
- * not yet been merged into a single common layer for managing the
- * station tables.
- */
-extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
-
-extern struct ieee80211_ops iwl3945_hw_ops;
-
-/*
- * Forward declare iwl-3945.c functions for iwl3945-base.c
- */
-extern __le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv);
-extern int iwl3945_init_hw_rate_table(struct iwl_priv *priv);
-extern void iwl3945_reg_txpower_periodic(struct iwl_priv *priv);
-extern int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv);
-
-extern const struct iwl_channel_info *iwl3945_get_channel_info(
- const struct iwl_priv *priv, enum ieee80211_band band, u16 channel);
-
-extern int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate);
-
-/* scanning */
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-void iwl3945_post_scan(struct iwl_priv *priv);
-
-/* rates */
-extern const struct iwl3945_rate_info iwl3945_rates[IWL_RATE_COUNT_3945];
-
-/* Requires full declaration of iwl_priv before including */
-#include "iwl-io.h"
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
deleted file mode 100644
index 1c93665..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/******************************************************************************
-*
-* GPL LICENSE SUMMARY
-*
-* Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of version 2 of the GNU General Public License as
-* published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License
-* along with this program; if not, write to the Free Software
-* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
-* USA
-*
-* The full GNU General Public License is included in this distribution
-* in the file called LICENSE.GPL.
-*
-* Contact Information:
-* Intel Linux Wireless <ilw@linux.intel.com>
-* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-*****************************************************************************/
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static const char *fmt_value = " %-30s %10u\n";
-static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
-static const char *fmt_header =
- "%-32s current cumulative delta max\n";
-
-static int iwl4965_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
-{
- int p = 0;
- u32 flag;
-
- flag = le32_to_cpu(priv->_4965.statistics.flag);
-
- p += scnprintf(buf + p, bufsz - p, "Statistics Flag(0x%X):\n", flag);
- if (flag & UCODE_STATISTICS_CLEAR_MSK)
- p += scnprintf(buf + p, bufsz - p,
- "\tStatistics have been cleared\n");
- p += scnprintf(buf + p, bufsz - p, "\tOperational Frequency: %s\n",
- (flag & UCODE_STATISTICS_FREQUENCY_MSK)
- ? "2.4 GHz" : "5.2 GHz");
- p += scnprintf(buf + p, bufsz - p, "\tTGj Narrow Band: %s\n",
- (flag & UCODE_STATISTICS_NARROW_BAND_MSK)
- ? "enabled" : "disabled");
-
- return p;
-}
-
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 40 +
- sizeof(struct statistics_rx_non_phy) * 40 +
- sizeof(struct statistics_rx_ht_phy) * 40 + 400;
- ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
- struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
- struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_non_phy *delta_general, *max_general;
- struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /*
- * the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- ofdm = &priv->_4965.statistics.rx.ofdm;
- cck = &priv->_4965.statistics.rx.cck;
- general = &priv->_4965.statistics.rx.general;
- ht = &priv->_4965.statistics.rx.ofdm_ht;
- accum_ofdm = &priv->_4965.accum_statistics.rx.ofdm;
- accum_cck = &priv->_4965.accum_statistics.rx.cck;
- accum_general = &priv->_4965.accum_statistics.rx.general;
- accum_ht = &priv->_4965.accum_statistics.rx.ofdm_ht;
- delta_ofdm = &priv->_4965.delta_statistics.rx.ofdm;
- delta_cck = &priv->_4965.delta_statistics.rx.cck;
- delta_general = &priv->_4965.delta_statistics.rx.general;
- delta_ht = &priv->_4965.delta_statistics.rx.ofdm_ht;
- max_ofdm = &priv->_4965.max_delta.rx.ofdm;
- max_cck = &priv->_4965.max_delta.rx.cck;
- max_general = &priv->_4965.max_delta.rx.general;
- max_ht = &priv->_4965.max_delta.rx.ofdm_ht;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(ofdm->ina_cnt),
- accum_ofdm->ina_cnt,
- delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
- delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
- delta_ofdm->plcp_err, max_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
- delta_ofdm->crc32_err, max_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err, delta_ofdm->overrun_err,
- max_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err,
- delta_ofdm->early_overrun_err,
- max_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good, delta_ofdm->crc32_good,
- max_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt,
- delta_ofdm->false_alarm_cnt,
- max_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt,
- delta_ofdm->fina_sync_err_cnt,
- max_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout, delta_ofdm->sfd_timeout,
- max_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout, delta_ofdm->fina_timeout,
- max_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts,
- delta_ofdm->unresponded_rts,
- max_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun,
- delta_ofdm->rxe_frame_limit_overrun,
- max_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt, delta_ofdm->sent_ack_cnt,
- max_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt, delta_ofdm->sent_cts_cnt,
- max_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt,
- delta_ofdm->sent_ba_rsp_cnt,
- max_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill,
- delta_ofdm->dsp_self_kill,
- max_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err,
- delta_ofdm->mh_format_err,
- max_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum,
- delta_ofdm->re_acq_main_rssi_sum,
- max_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - CCK:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_cnt:",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
- delta_cck->ina_cnt, max_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_cnt:",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
- delta_cck->fina_cnt, max_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
- delta_cck->plcp_err, max_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
- delta_cck->crc32_err, max_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err, delta_cck->overrun_err,
- max_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err,
- delta_cck->early_overrun_err,
- max_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
- delta_cck->crc32_good, max_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "false_alarm_cnt:",
- le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt,
- delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_sync_err_cnt:",
- le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt,
- delta_cck->fina_sync_err_cnt,
- max_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sfd_timeout:",
- le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout, delta_cck->sfd_timeout,
- max_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "fina_timeout:",
- le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout, delta_cck->fina_timeout,
- max_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unresponded_rts:",
- le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts, delta_cck->unresponded_rts,
- max_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rxe_frame_lmt_ovrun:",
- le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun,
- delta_cck->rxe_frame_limit_overrun,
- max_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ack_cnt:",
- le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt, delta_cck->sent_ack_cnt,
- max_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_cts_cnt:",
- le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt, delta_cck->sent_cts_cnt,
- max_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sent_ba_rsp_cnt:",
- le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt,
- delta_cck->sent_ba_rsp_cnt,
- max_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_self_kill:",
- le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill, delta_cck->dsp_self_kill,
- max_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err, delta_cck->mh_format_err,
- max_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "re_acq_main_rssi_sum:",
- le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum,
- delta_cck->re_acq_main_rssi_sum,
- max_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - GENERAL:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_cts:",
- le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts, delta_general->bogus_cts,
- max_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bogus_ack:",
- le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack, delta_general->bogus_ack,
- max_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_bssid_frames:",
- le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames,
- delta_general->non_bssid_frames,
- max_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "filtered_frames:",
- le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames,
- delta_general->filtered_frames,
- max_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "non_channel_beacons:",
- le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons,
- delta_general->non_channel_beacons,
- max_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_beacons:",
- le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons,
- delta_general->channel_beacons,
- max_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_missed_bcon:",
- le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon,
- delta_general->num_missed_bcon,
- max_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "adc_rx_saturation_time:",
- le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time,
- delta_general->adc_rx_saturation_time,
- max_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ina_detect_search_tm:",
- le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time,
- delta_general->ina_detection_search_time,
- max_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_a:",
- le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a,
- delta_general->beacon_silence_rssi_a,
- max_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_b:",
- le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b,
- delta_general->beacon_silence_rssi_b,
- max_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_silence_rssi_c:",
- le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c,
- delta_general->beacon_silence_rssi_c,
- max_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "interference_data_flag:",
- le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag,
- delta_general->interference_data_flag,
- max_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "channel_load:",
- le32_to_cpu(general->channel_load),
- accum_general->channel_load,
- delta_general->channel_load,
- max_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dsp_false_alarms:",
- le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms,
- delta_general->dsp_false_alarms,
- max_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_a:",
- le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a,
- delta_general->beacon_rssi_a,
- max_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_b:",
- le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b,
- delta_general->beacon_rssi_b,
- max_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_rssi_c:",
- le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c,
- delta_general->beacon_rssi_c,
- max_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_a:",
- le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a,
- delta_general->beacon_energy_a,
- max_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_b:",
- le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b,
- delta_general->beacon_energy_b,
- max_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "beacon_energy_c:",
- le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c,
- delta_general->beacon_energy_c,
- max_general->beacon_energy_c);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Rx - OFDM_HT:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "plcp_err:",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
- delta_ht->plcp_err, max_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "overrun_err:",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
- delta_ht->overrun_err, max_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "early_overrun_err:",
- le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err,
- delta_ht->early_overrun_err,
- max_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_good:",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
- delta_ht->crc32_good, max_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "crc32_err:",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
- delta_ht->crc32_err, max_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "mh_format_err:",
- le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err,
- delta_ht->mh_format_err, max_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_crc32_good:",
- le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good,
- delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_mpdu_cnt:",
- le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt,
- delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg_cnt:",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
- delta_ht->agg_cnt, max_ht->agg_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "unsupport_mcs:",
- le32_to_cpu(ht->unsupport_mcs),
- accum_ht->unsupport_mcs,
- delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
- ssize_t ret;
- struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta_tx = &priv->_4965.delta_statistics.tx;
- max_tx = &priv->_4965.max_delta.tx;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_Tx:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "preamble:",
- le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt,
- delta_tx->preamble_cnt, max_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_detected_cnt:",
- le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt,
- delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_defer_cnt:",
- le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt,
- delta_tx->bt_prio_defer_cnt,
- max_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "bt_prio_kill_cnt:",
- le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt,
- delta_tx->bt_prio_kill_cnt,
- max_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "few_bytes_cnt:",
- le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt,
- delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout:",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
- delta_tx->cts_timeout, max_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_timeout:",
- le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout,
- delta_tx->ack_timeout, max_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "expected_ack_cnt:",
- le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt,
- delta_tx->expected_ack_cnt,
- max_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "actual_ack_cnt:",
- le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt,
- delta_tx->actual_ack_cnt,
- max_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "dump_msdu_cnt:",
- le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt,
- delta_tx->dump_msdu_cnt,
- max_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_nxt_frame_mismatch:",
- le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt,
- delta_tx->burst_abort_next_frame_mismatch_cnt,
- max_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "abort_missing_nxt_frame:",
- le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt,
- delta_tx->burst_abort_missing_next_frame_cnt,
- max_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "cts_timeout_collision:",
- le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision,
- delta_tx->cts_timeout_collision,
- max_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "ack_ba_timeout_collision:",
- le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision,
- delta_tx->ack_or_ba_timeout_collision,
- max_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_timeout:",
- le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout,
- delta_tx->agg.ba_timeout,
- max_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg ba_resched_frames:",
- le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames,
- delta_tx->agg.ba_reschedule_frames,
- max_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg_frame:",
- le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt,
- delta_tx->agg.scd_query_agg_frame_cnt,
- max_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_no_agg:",
- le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg,
- delta_tx->agg.scd_query_no_agg,
- max_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_agg:",
- le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg,
- delta_tx->agg.scd_query_agg,
- max_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg scd_query_mismatch:",
- le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch,
- delta_tx->agg.scd_query_mismatch,
- max_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg frame_not_ready:",
- le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready,
- delta_tx->agg.frame_not_ready,
- max_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg underrun:",
- le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun,
- delta_tx->agg.underrun, max_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg bt_prio_kill:",
- le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill,
- delta_tx->agg.bt_prio_kill,
- max_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "agg rx_ba_rsp_cnt:",
- le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt,
- delta_tx->agg.rx_ba_rsp_cnt,
- max_tx->agg.rx_ba_rsp_cnt);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char *buf;
- int bufsz = sizeof(struct statistics_general) * 10 + 300;
- ssize_t ret;
- struct statistics_general_common *general, *accum_general;
- struct statistics_general_common *delta_general, *max_general;
- struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
- struct statistics_div *div, *accum_div, *delta_div, *max_div;
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- /* the statistic information display here is based on
- * the last statistics notification from uCode
- * might not reflect the current uCode activity
- */
- general = &priv->_4965.statistics.general.common;
- dbg = &priv->_4965.statistics.general.common.dbg;
- div = &priv->_4965.statistics.general.common.div;
- accum_general = &priv->_4965.accum_statistics.general.common;
- accum_dbg = &priv->_4965.accum_statistics.general.common.dbg;
- accum_div = &priv->_4965.accum_statistics.general.common.div;
- delta_general = &priv->_4965.delta_statistics.general.common;
- max_general = &priv->_4965.max_delta.general.common;
- delta_dbg = &priv->_4965.delta_statistics.general.common.dbg;
- max_dbg = &priv->_4965.max_delta.general.common.dbg;
- delta_div = &priv->_4965.delta_statistics.general.common.div;
- max_div = &priv->_4965.max_delta.general.common.div;
-
- pos += iwl4965_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_header, "Statistics_General:");
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "temperature:",
- le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_value, "ttl_timestamp:",
- le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_check:",
- le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check,
- delta_dbg->burst_check, max_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "burst_count:",
- le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count,
- delta_dbg->burst_count, max_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "wait_for_silence_timeout_count:",
- le32_to_cpu(dbg->wait_for_silence_timeout_cnt),
- accum_dbg->wait_for_silence_timeout_cnt,
- delta_dbg->wait_for_silence_timeout_cnt,
- max_dbg->wait_for_silence_timeout_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "sleep_time:",
- le32_to_cpu(general->sleep_time),
- accum_general->sleep_time,
- delta_general->sleep_time, max_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_out:",
- le32_to_cpu(general->slots_out),
- accum_general->slots_out,
- delta_general->slots_out, max_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "slots_idle:",
- le32_to_cpu(general->slots_idle),
- accum_general->slots_idle,
- delta_general->slots_idle, max_general->slots_idle);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_a:",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
- delta_div->tx_on_a, max_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "tx_on_b:",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
- delta_div->tx_on_b, max_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "exec_time:",
- le32_to_cpu(div->exec_time), accum_div->exec_time,
- delta_div->exec_time, max_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "probe_time:",
- le32_to_cpu(div->probe_time), accum_div->probe_time,
- delta_div->probe_time, max_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "rx_enable_counter:",
- le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter,
- delta_general->rx_enable_counter,
- max_general->rx_enable_counter);
- pos += scnprintf(buf + pos, bufsz - pos,
- fmt_table, "num_of_sos_states:",
- le32_to_cpu(general->num_of_sos_states),
- accum_general->num_of_sos_states,
- delta_general->num_of_sos_states,
- max_general->num_of_sos_states);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h b/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
deleted file mode 100644
index 6c8e353..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-debugfs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-ssize_t iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-ssize_t iwl4965_ucode_general_stats_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos);
-#else
-static ssize_t
-iwl4965_ucode_rx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_tx_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-static ssize_t
-iwl4965_ucode_general_stats_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
deleted file mode 100644
index cb9baab..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-eeprom.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-4965.h"
-#include "iwl-io.h"
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-/*
- * The device's EEPROM semaphore prevents conflicts between driver and uCode
- * when accessing the EEPROM; each access is a series of pulses to/from the
- * EEPROM chip, not a single event, so even reads could conflict if they
- * weren't arbitrated by the semaphore.
- */
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv)
-{
- u16 count;
- int ret;
-
- for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
- /* Request semaphore */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
- EEPROM_SEM_TIMEOUT);
- if (ret >= 0) {
- IWL_DEBUG_IO(priv,
- "Acquired semaphore after %d tries.\n",
- count+1);
- return ret;
- }
- }
-
- return ret;
-}
-
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv)
-{
- iwl_legacy_clear_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
-
-}
-
-int iwl4965_eeprom_check_version(struct iwl_priv *priv)
-{
- u16 eeprom_ver;
- u16 calib_ver;
-
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwl_legacy_eeprom_query16(priv,
- EEPROM_4965_CALIB_VERSION_OFFSET);
-
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
- goto err;
-
- IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
- eeprom_ver, calib_ver);
-
- return 0;
-err:
- IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
- "CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
- return -EINVAL;
-
-}
-
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
-{
- const u8 *addr = iwl_legacy_eeprom_query_addr(priv,
- EEPROM_MAC_ADDRESS);
- memcpy(mac, addr, ETH_ALEN);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h b/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
deleted file mode 100644
index fc6fa28..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-hw.h
+++ /dev/null
@@ -1,811 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-4965-hw.h) only for hardware-related definitions.
- * Use iwl-commands.h for uCode API definitions.
- * Use iwl-dev.h for driver implementation definitions.
- */
-
-#ifndef __iwl_4965_hw_h__
-#define __iwl_4965_hw_h__
-
-#include "iwl-fh.h"
-
-/* EEPROM */
-#define IWL4965_EEPROM_IMG_SIZE 1024
-
-/*
- * uCode queue management definitions ...
- * The first queue used for block-ack aggregation is #7 (4965 only).
- * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
- */
-#define IWL49_FIRST_AMPDU_QUEUE 7
-
-/* Sizes and addresses for instruction and data memory (SRAM) in
- * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
-#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
-#define IWL49_RTC_INST_UPPER_BOUND (0x018000)
-
-#define IWL49_RTC_DATA_LOWER_BOUND (0x800000)
-#define IWL49_RTC_DATA_UPPER_BOUND (0x80A000)
-
-#define IWL49_RTC_INST_SIZE (IWL49_RTC_INST_UPPER_BOUND - \
- IWL49_RTC_INST_LOWER_BOUND)
-#define IWL49_RTC_DATA_SIZE (IWL49_RTC_DATA_UPPER_BOUND - \
- IWL49_RTC_DATA_LOWER_BOUND)
-
-#define IWL49_MAX_INST_SIZE IWL49_RTC_INST_SIZE
-#define IWL49_MAX_DATA_SIZE IWL49_RTC_DATA_SIZE
-
-/* Size of uCode instruction memory in bootstrap state machine */
-#define IWL49_MAX_BSM_SIZE BSM_SRAM_SIZE
-
-static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
-{
- return (addr >= IWL49_RTC_DATA_LOWER_BOUND) &&
- (addr < IWL49_RTC_DATA_UPPER_BOUND);
-}
-
-/********************* START TEMPERATURE *************************************/
-
-/**
- * 4965 temperature calculation.
- *
- * The driver must calculate the device temperature before calculating
- * a txpower setting (amplifier gain is temperature dependent). The
- * calculation uses 4 measurements, 3 of which (R1, R2, R3) are calibration
- * values used for the life of the driver, and one of which (R4) is the
- * real-time temperature indicator.
- *
- * uCode provides all 4 values to the driver via the "initialize alive"
- * notification (see struct iwl4965_init_alive_resp). After the runtime uCode
- * image loads, uCode updates the R4 value via statistics notifications
- * (see STATISTICS_NOTIFICATION), which occur after each received beacon
- * when associated, or can be requested via REPLY_STATISTICS_CMD.
- *
- * NOTE: uCode provides the R4 value as a 23-bit signed value. Driver
- * must sign-extend to 32 bits before applying formula below.
- *
- * Formula:
- *
- * degrees Kelvin = ((97 * 259 * (R4 - R2) / (R3 - R1)) / 100) + 8
- *
- * NOTE: The basic formula is 259 * (R4-R2) / (R3-R1). The 97/100 is
- * an additional correction, which should be centered around 0 degrees
- * Celsius (273 degrees Kelvin). The 8 (3 percent of 273) compensates for
- * centering the 97/100 correction around 0 degrees K.
- *
- * Add 273 to Kelvin value to find degrees Celsius, for comparing current
- * temperature with factory-measured temperatures when calculating txpower
- * settings.
- */
-#define TEMPERATURE_CALIB_KELVIN_OFFSET 8
-#define TEMPERATURE_CALIB_A_VAL 259
-
-/* Limit range of calculated temperature to be between these Kelvin values */
-#define IWL_TX_POWER_TEMPERATURE_MIN (263)
-#define IWL_TX_POWER_TEMPERATURE_MAX (410)
-
-#define IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(t) \
- (((t) < IWL_TX_POWER_TEMPERATURE_MIN) || \
- ((t) > IWL_TX_POWER_TEMPERATURE_MAX))
-
-/********************* END TEMPERATURE ***************************************/
-
-/********************* START TXPOWER *****************************************/
-
-/**
- * 4965 txpower calculations rely on information from three sources:
- *
- * 1) EEPROM
- * 2) "initialize" alive notification
- * 3) statistics notifications
- *
- * EEPROM data consists of:
- *
- * 1) Regulatory information (max txpower and channel usage flags) is provided
- * separately for each channel that can possibly supported by 4965.
- * 40 MHz wide (.11n HT40) channels are listed separately from 20 MHz
- * (legacy) channels.
- *
- * See struct iwl4965_eeprom_channel for format, and struct iwl4965_eeprom
- * for locations in EEPROM.
- *
- * 2) Factory txpower calibration information is provided separately for
- * sub-bands of contiguous channels. 2.4GHz has just one sub-band,
- * but 5 GHz has several sub-bands.
- *
- * In addition, per-band (2.4 and 5 Ghz) saturation txpowers are provided.
- *
- * See struct iwl4965_eeprom_calib_info (and the tree of structures
- * contained within it) for format, and struct iwl4965_eeprom for
- * locations in EEPROM.
- *
- * "Initialization alive" notification (see struct iwl4965_init_alive_resp)
- * consists of:
- *
- * 1) Temperature calculation parameters.
- *
- * 2) Power supply voltage measurement.
- *
- * 3) Tx gain compensation to balance 2 transmitters for MIMO use.
- *
- * Statistics notifications deliver:
- *
- * 1) Current values for temperature param R4.
- */
-
-/**
- * To calculate a txpower setting for a given desired target txpower, channel,
- * modulation bit rate, and transmitter chain (4965 has 2 transmitters to
- * support MIMO and transmit diversity), driver must do the following:
- *
- * 1) Compare desired txpower vs. (EEPROM) regulatory limit for this channel.
- * Do not exceed regulatory limit; reduce target txpower if necessary.
- *
- * If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * 2 transmitters will be used simultaneously; driver must reduce the
- * regulatory limit by 3 dB (half-power) for each transmitter, so the
- * combined total output of the 2 transmitters is within regulatory limits.
- *
- *
- * 2) Compare target txpower vs. (EEPROM) saturation txpower *reduced by
- * backoff for this bit rate*. Do not exceed (saturation - backoff[rate]);
- * reduce target txpower if necessary.
- *
- * Backoff values below are in 1/2 dB units (equivalent to steps in
- * txpower gain tables):
- *
- * OFDM 6 - 36 MBit: 10 steps (5 dB)
- * OFDM 48 MBit: 15 steps (7.5 dB)
- * OFDM 54 MBit: 17 steps (8.5 dB)
- * OFDM 60 MBit: 20 steps (10 dB)
- * CCK all rates: 10 steps (5 dB)
- *
- * Backoff values apply to saturation txpower on a per-transmitter basis;
- * when using MIMO (2 transmitters), each transmitter uses the same
- * saturation level provided in EEPROM, and the same backoff values;
- * no reduction (such as with regulatory txpower limits) is required.
- *
- * Saturation and Backoff values apply equally to 20 Mhz (legacy) channel
- * widths and 40 Mhz (.11n HT40) channel widths; there is no separate
- * factory measurement for ht40 channels.
- *
- * The result of this step is the final target txpower. The rest of
- * the steps figure out the proper settings for the device to achieve
- * that target txpower.
- *
- *
- * 3) Determine (EEPROM) calibration sub band for the target channel, by
- * comparing against first and last channels in each sub band
- * (see struct iwl4965_eeprom_calib_subband_info).
- *
- *
- * 4) Linearly interpolate (EEPROM) factory calibration measurement sets,
- * referencing the 2 factory-measured (sample) channels within the sub band.
- *
- * Interpolation is based on difference between target channel's frequency
- * and the sample channels' frequencies. Since channel numbers are based
- * on frequency (5 MHz between each channel number), this is equivalent
- * to interpolating based on channel number differences.
- *
- * Note that the sample channels may or may not be the channels at the
- * edges of the sub band. The target channel may be "outside" of the
- * span of the sampled channels.
- *
- * Driver may choose the pair (for 2 Tx chains) of measurements (see
- * struct iwl4965_eeprom_calib_ch_info) for which the actual measured
- * txpower comes closest to the desired txpower. Usually, though,
- * the middle set of measurements is closest to the regulatory limits,
- * and is therefore a good choice for all txpower calculations (this
- * assumes that high accuracy is needed for maximizing legal txpower,
- * while lower txpower configurations do not need as much accuracy).
- *
- * Driver should interpolate both members of the chosen measurement pair,
- * i.e. for both Tx chains (radio transmitters), unless the driver knows
- * that only one of the chains will be used (e.g. only one tx antenna
- * connected, but this should be unusual). The rate scaling algorithm
- * switches antennas to find best performance, so both Tx chains will
- * be used (although only one at a time) even for non-MIMO transmissions.
- *
- * Driver should interpolate factory values for temperature, gain table
- * index, and actual power. The power amplifier detector values are
- * not used by the driver.
- *
- * Sanity check: If the target channel happens to be one of the sample
- * channels, the results should agree with the sample channel's
- * measurements!
- *
- *
- * 5) Find difference between desired txpower and (interpolated)
- * factory-measured txpower. Using (interpolated) factory gain table index
- * (shown elsewhere) as a starting point, adjust this index lower to
- * increase txpower, or higher to decrease txpower, until the target
- * txpower is reached. Each step in the gain table is 1/2 dB.
- *
- * For example, if factory measured txpower is 16 dBm, and target txpower
- * is 13 dBm, add 6 steps to the factory gain index to reduce txpower
- * by 3 dB.
- *
- *
- * 6) Find difference between current device temperature and (interpolated)
- * factory-measured temperature for sub-band. Factory values are in
- * degrees Celsius. To calculate current temperature, see comments for
- * "4965 temperature calculation".
- *
- * If current temperature is higher than factory temperature, driver must
- * increase gain (lower gain table index), and vice verse.
- *
- * Temperature affects gain differently for different channels:
- *
- * 2.4 GHz all channels: 3.5 degrees per half-dB step
- * 5 GHz channels 34-43: 4.5 degrees per half-dB step
- * 5 GHz channels >= 44: 4.0 degrees per half-dB step
- *
- * NOTE: Temperature can increase rapidly when transmitting, especially
- * with heavy traffic at high txpowers. Driver should update
- * temperature calculations often under these conditions to
- * maintain strong txpower in the face of rising temperature.
- *
- *
- * 7) Find difference between current power supply voltage indicator
- * (from "initialize alive") and factory-measured power supply voltage
- * indicator (EEPROM).
- *
- * If the current voltage is higher (indicator is lower) than factory
- * voltage, gain should be reduced (gain table index increased) by:
- *
- * (eeprom - current) / 7
- *
- * If the current voltage is lower (indicator is higher) than factory
- * voltage, gain should be increased (gain table index decreased) by:
- *
- * 2 * (current - eeprom) / 7
- *
- * If number of index steps in either direction turns out to be > 2,
- * something is wrong ... just use 0.
- *
- * NOTE: Voltage compensation is independent of band/channel.
- *
- * NOTE: "Initialize" uCode measures current voltage, which is assumed
- * to be constant after this initial measurement. Voltage
- * compensation for txpower (number of steps in gain table)
- * may be calculated once and used until the next uCode bootload.
- *
- *
- * 8) If setting up txpowers for MIMO rates (rate indexes 8-15, 24-31),
- * adjust txpower for each transmitter chain, so txpower is balanced
- * between the two chains. There are 5 pairs of tx_atten[group][chain]
- * values in "initialize alive", one pair for each of 5 channel ranges:
- *
- * Group 0: 5 GHz channel 34-43
- * Group 1: 5 GHz channel 44-70
- * Group 2: 5 GHz channel 71-124
- * Group 3: 5 GHz channel 125-200
- * Group 4: 2.4 GHz all channels
- *
- * Add the tx_atten[group][chain] value to the index for the target chain.
- * The values are signed, but are in pairs of 0 and a non-negative number,
- * so as to reduce gain (if necessary) of the "hotter" channel. This
- * avoids any need to double-check for regulatory compliance after
- * this step.
- *
- *
- * 9) If setting up for a CCK rate, lower the gain by adding a CCK compensation
- * value to the index:
- *
- * Hardware rev B: 9 steps (4.5 dB)
- * Hardware rev C: 5 steps (2.5 dB)
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- *
- * NOTE: This compensation is in addition to any saturation backoff that
- * might have been applied in an earlier step.
- *
- *
- * 10) Select the gain table, based on band (2.4 vs 5 GHz).
- *
- * Limit the adjusted index to stay within the table!
- *
- *
- * 11) Read gain table entries for DSP and radio gain, place into appropriate
- * location(s) in command (struct iwl4965_txpowertable_cmd).
- */
-
-/**
- * When MIMO is used (2 transmitters operating simultaneously), driver should
- * limit each transmitter to deliver a max of 3 dB below the regulatory limit
- * for the device. That is, use half power for each transmitter, so total
- * txpower is within regulatory limits.
- *
- * The value "6" represents number of steps in gain table to reduce power 3 dB.
- * Each step is 1/2 dB.
- */
-#define IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION (6)
-
-/**
- * CCK gain compensation.
- *
- * When calculating txpowers for CCK, after making sure that the target power
- * is within regulatory and saturation limits, driver must additionally
- * back off gain by adding these values to the gain table index.
- *
- * Hardware rev for 4965 can be determined by reading CSR_HW_REV_WA_REG,
- * bits [3:2], 1 = B, 2 = C.
- */
-#define IWL_TX_POWER_CCK_COMPENSATION_B_STEP (9)
-#define IWL_TX_POWER_CCK_COMPENSATION_C_STEP (5)
-
-/*
- * 4965 power supply voltage compensation for txpower
- */
-#define TX_POWER_IWL_VOLTAGE_CODES_PER_03V (7)
-
-/**
- * Gain tables.
- *
- * The following tables contain pair of values for setting txpower, i.e.
- * gain settings for the output of the device's digital signal processor (DSP),
- * and for the analog gain structure of the transmitter.
- *
- * Each entry in the gain tables represents a step of 1/2 dB. Note that these
- * are *relative* steps, not indications of absolute output power. Output
- * power varies with temperature, voltage, and channel frequency, and also
- * requires consideration of average power (to satisfy regulatory constraints),
- * and peak power (to avoid distortion of the output signal).
- *
- * Each entry contains two values:
- * 1) DSP gain (or sometimes called DSP attenuation). This is a fine-grained
- * linear value that multiplies the output of the digital signal processor,
- * before being sent to the analog radio.
- * 2) Radio gain. This sets the analog gain of the radio Tx path.
- * It is a coarser setting, and behaves in a logarithmic (dB) fashion.
- *
- * EEPROM contains factory calibration data for txpower. This maps actual
- * measured txpower levels to gain settings in the "well known" tables
- * below ("well-known" means here that both factory calibration *and* the
- * driver work with the same table).
- *
- * There are separate tables for 2.4 GHz and 5 GHz bands. The 5 GHz table
- * has an extension (into negative indexes), in case the driver needs to
- * boost power setting for high device temperatures (higher than would be
- * present during factory calibration). A 5 Ghz EEPROM index of "40"
- * corresponds to the 49th entry in the table used by the driver.
- */
-#define MIN_TX_GAIN_INDEX (0) /* highest gain, lowest idx, 2.4 */
-#define MIN_TX_GAIN_INDEX_52GHZ_EXT (-9) /* highest gain, lowest idx, 5 */
-
-/**
- * 2.4 GHz gain table
- *
- * Index Dsp gain Radio gain
- * 0 110 0x3f (highest gain)
- * 1 104 0x3f
- * 2 98 0x3f
- * 3 110 0x3e
- * 4 104 0x3e
- * 5 98 0x3e
- * 6 110 0x3d
- * 7 104 0x3d
- * 8 98 0x3d
- * 9 110 0x3c
- * 10 104 0x3c
- * 11 98 0x3c
- * 12 110 0x3b
- * 13 104 0x3b
- * 14 98 0x3b
- * 15 110 0x3a
- * 16 104 0x3a
- * 17 98 0x3a
- * 18 110 0x39
- * 19 104 0x39
- * 20 98 0x39
- * 21 110 0x38
- * 22 104 0x38
- * 23 98 0x38
- * 24 110 0x37
- * 25 104 0x37
- * 26 98 0x37
- * 27 110 0x36
- * 28 104 0x36
- * 29 98 0x36
- * 30 110 0x35
- * 31 104 0x35
- * 32 98 0x35
- * 33 110 0x34
- * 34 104 0x34
- * 35 98 0x34
- * 36 110 0x33
- * 37 104 0x33
- * 38 98 0x33
- * 39 110 0x32
- * 40 104 0x32
- * 41 98 0x32
- * 42 110 0x31
- * 43 104 0x31
- * 44 98 0x31
- * 45 110 0x30
- * 46 104 0x30
- * 47 98 0x30
- * 48 110 0x6
- * 49 104 0x6
- * 50 98 0x6
- * 51 110 0x5
- * 52 104 0x5
- * 53 98 0x5
- * 54 110 0x4
- * 55 104 0x4
- * 56 98 0x4
- * 57 110 0x3
- * 58 104 0x3
- * 59 98 0x3
- * 60 110 0x2
- * 61 104 0x2
- * 62 98 0x2
- * 63 110 0x1
- * 64 104 0x1
- * 65 98 0x1
- * 66 110 0x0
- * 67 104 0x0
- * 68 98 0x0
- * 69 97 0
- * 70 96 0
- * 71 95 0
- * 72 94 0
- * 73 93 0
- * 74 92 0
- * 75 91 0
- * 76 90 0
- * 77 89 0
- * 78 88 0
- * 79 87 0
- * 80 86 0
- * 81 85 0
- * 82 84 0
- * 83 83 0
- * 84 82 0
- * 85 81 0
- * 86 80 0
- * 87 79 0
- * 88 78 0
- * 89 77 0
- * 90 76 0
- * 91 75 0
- * 92 74 0
- * 93 73 0
- * 94 72 0
- * 95 71 0
- * 96 70 0
- * 97 69 0
- * 98 68 0
- */
-
-/**
- * 5 GHz gain table
- *
- * Index Dsp gain Radio gain
- * -9 123 0x3F (highest gain)
- * -8 117 0x3F
- * -7 110 0x3F
- * -6 104 0x3F
- * -5 98 0x3F
- * -4 110 0x3E
- * -3 104 0x3E
- * -2 98 0x3E
- * -1 110 0x3D
- * 0 104 0x3D
- * 1 98 0x3D
- * 2 110 0x3C
- * 3 104 0x3C
- * 4 98 0x3C
- * 5 110 0x3B
- * 6 104 0x3B
- * 7 98 0x3B
- * 8 110 0x3A
- * 9 104 0x3A
- * 10 98 0x3A
- * 11 110 0x39
- * 12 104 0x39
- * 13 98 0x39
- * 14 110 0x38
- * 15 104 0x38
- * 16 98 0x38
- * 17 110 0x37
- * 18 104 0x37
- * 19 98 0x37
- * 20 110 0x36
- * 21 104 0x36
- * 22 98 0x36
- * 23 110 0x35
- * 24 104 0x35
- * 25 98 0x35
- * 26 110 0x34
- * 27 104 0x34
- * 28 98 0x34
- * 29 110 0x33
- * 30 104 0x33
- * 31 98 0x33
- * 32 110 0x32
- * 33 104 0x32
- * 34 98 0x32
- * 35 110 0x31
- * 36 104 0x31
- * 37 98 0x31
- * 38 110 0x30
- * 39 104 0x30
- * 40 98 0x30
- * 41 110 0x25
- * 42 104 0x25
- * 43 98 0x25
- * 44 110 0x24
- * 45 104 0x24
- * 46 98 0x24
- * 47 110 0x23
- * 48 104 0x23
- * 49 98 0x23
- * 50 110 0x22
- * 51 104 0x18
- * 52 98 0x18
- * 53 110 0x17
- * 54 104 0x17
- * 55 98 0x17
- * 56 110 0x16
- * 57 104 0x16
- * 58 98 0x16
- * 59 110 0x15
- * 60 104 0x15
- * 61 98 0x15
- * 62 110 0x14
- * 63 104 0x14
- * 64 98 0x14
- * 65 110 0x13
- * 66 104 0x13
- * 67 98 0x13
- * 68 110 0x12
- * 69 104 0x08
- * 70 98 0x08
- * 71 110 0x07
- * 72 104 0x07
- * 73 98 0x07
- * 74 110 0x06
- * 75 104 0x06
- * 76 98 0x06
- * 77 110 0x05
- * 78 104 0x05
- * 79 98 0x05
- * 80 110 0x04
- * 81 104 0x04
- * 82 98 0x04
- * 83 110 0x03
- * 84 104 0x03
- * 85 98 0x03
- * 86 110 0x02
- * 87 104 0x02
- * 88 98 0x02
- * 89 110 0x01
- * 90 104 0x01
- * 91 98 0x01
- * 92 110 0x00
- * 93 104 0x00
- * 94 98 0x00
- * 95 93 0x00
- * 96 88 0x00
- * 97 83 0x00
- * 98 78 0x00
- */
-
-
-/**
- * Sanity checks and default values for EEPROM regulatory levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Regulatory limits refer to the maximum average txpower allowed by
- * regulatory agencies in the geographies in which the device is meant
- * to be operated. These limits are SKU-specific (i.e. geography-specific),
- * and channel-specific; each channel has an individual regulatory limit
- * listed in the EEPROM.
- *
- * Units are in half-dBm (i.e. "34" means 17 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_REGULATORY_24 (34)
-#define IWL_TX_POWER_DEFAULT_REGULATORY_52 (34)
-#define IWL_TX_POWER_REGULATORY_MIN (0)
-#define IWL_TX_POWER_REGULATORY_MAX (34)
-
-/**
- * Sanity checks and default values for EEPROM saturation levels.
- * If EEPROM values fall outside MIN/MAX range, use default values.
- *
- * Saturation is the highest level that the output power amplifier can produce
- * without significant clipping distortion. This is a "peak" power level.
- * Different types of modulation (i.e. various "rates", and OFDM vs. CCK)
- * require differing amounts of backoff, relative to their average power output,
- * in order to avoid clipping distortion.
- *
- * Driver must make sure that it is violating neither the saturation limit,
- * nor the regulatory limit, when calculating Tx power settings for various
- * rates.
- *
- * Units are in half-dBm (i.e. "38" means 19 dBm).
- */
-#define IWL_TX_POWER_DEFAULT_SATURATION_24 (38)
-#define IWL_TX_POWER_DEFAULT_SATURATION_52 (38)
-#define IWL_TX_POWER_SATURATION_MIN (20)
-#define IWL_TX_POWER_SATURATION_MAX (50)
-
-/**
- * Channel groups used for Tx Attenuation calibration (MIMO tx channel balance)
- * and thermal Txpower calibration.
- *
- * When calculating txpower, driver must compensate for current device
- * temperature; higher temperature requires higher gain. Driver must calculate
- * current temperature (see "4965 temperature calculation"), then compare vs.
- * factory calibration temperature in EEPROM; if current temperature is higher
- * than factory temperature, driver must *increase* gain by proportions shown
- * in table below. If current temperature is lower than factory, driver must
- * *decrease* gain.
- *
- * Different frequency ranges require different compensation, as shown below.
- */
-/* Group 0, 5.2 GHz ch 34-43: 4.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR1_FCH 34
-#define CALIB_IWL_TX_ATTEN_GR1_LCH 43
-
-/* Group 1, 5.3 GHz ch 44-70: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR2_FCH 44
-#define CALIB_IWL_TX_ATTEN_GR2_LCH 70
-
-/* Group 2, 5.5 GHz ch 71-124: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR3_FCH 71
-#define CALIB_IWL_TX_ATTEN_GR3_LCH 124
-
-/* Group 3, 5.7 GHz ch 125-200: 4.0 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR4_FCH 125
-#define CALIB_IWL_TX_ATTEN_GR4_LCH 200
-
-/* Group 4, 2.4 GHz all channels: 3.5 degrees per 1/2 dB. */
-#define CALIB_IWL_TX_ATTEN_GR5_FCH 1
-#define CALIB_IWL_TX_ATTEN_GR5_LCH 20
-
-enum {
- CALIB_CH_GROUP_1 = 0,
- CALIB_CH_GROUP_2 = 1,
- CALIB_CH_GROUP_3 = 2,
- CALIB_CH_GROUP_4 = 3,
- CALIB_CH_GROUP_5 = 4,
- CALIB_CH_GROUP_MAX
-};
-
-/********************* END TXPOWER *****************************************/
-
-
-/**
- * Tx/Rx Queues
- *
- * Most communication between driver and 4965 is via queues of data buffers.
- * For example, all commands that the driver issues to device's embedded
- * controller (uCode) are via the command queue (one of the Tx queues). All
- * uCode command responses/replies/notifications, including Rx frames, are
- * conveyed from uCode to driver via the Rx queue.
- *
- * Most support for these queues, including handshake support, resides in
- * structures in host DRAM, shared between the driver and the device. When
- * allocating this memory, the driver must make sure that data written by
- * the host CPU updates DRAM immediately (and does not get "stuck" in CPU's
- * cache memory), so DRAM and cache are consistent, and the device can
- * immediately see changes made by the driver.
- *
- * 4965 supports up to 16 DRAM-based Tx queues, and services these queues via
- * up to 7 DMA channels (FIFOs). Each Tx queue is supported by a circular array
- * in DRAM containing 256 Transmit Frame Descriptors (TFDs).
- */
-#define IWL49_NUM_FIFOS 7
-#define IWL49_CMD_FIFO_NUM 4
-#define IWL49_NUM_QUEUES 16
-#define IWL49_NUM_AMPDU_QUEUES 8
-
-
-/**
- * struct iwl4965_schedq_bc_tbl
- *
- * Byte Count table
- *
- * Each Tx queue uses a byte-count table containing 320 entries:
- * one 16-bit entry for each of 256 TFDs, plus an additional 64 entries that
- * duplicate the first 64 entries (to avoid wrap-around within a Tx window;
- * max Tx window is 64 TFDs).
- *
- * When driver sets up a new TFD, it must also enter the total byte count
- * of the frame to be transmitted into the corresponding entry in the byte
- * count table for the chosen Tx queue. If the TFD index is 0-63, the driver
- * must duplicate the byte count entry in corresponding index 256-319.
- *
- * padding puts each byte count table on a 1024-byte boundary;
- * 4965 assumes tables are separated by 1024 bytes.
- */
-struct iwl4965_scd_bc_tbl {
- __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
- u8 pad[1024 - (TFD_QUEUE_BC_SIZE) * sizeof(__le16)];
-} __packed;
-
-
-#define IWL4965_RTC_INST_LOWER_BOUND (0x000000)
-
-/* RSSI to dBm */
-#define IWL4965_RSSI_OFFSET 44
-
-/* PCI registers */
-#define PCI_CFG_RETRY_TIMEOUT 0x041
-
-/* PCI register values */
-#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
-#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
-
-#define IWL4965_DEFAULT_TX_RETRY 15
-
-/* EEPROM */
-#define IWL4965_FIRST_AMPDU_QUEUE 10
-
-
-#endif /* !__iwl_4965_hw_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.c b/drivers/net/wireless/iwlegacy/iwl-4965-led.c
deleted file mode 100644
index 6862fdc..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-4965-led.h"
-
-/* Send led command */
-static int
-iwl4965_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_LEDS_CMD,
- .len = sizeof(struct iwl_led_cmd),
- .data = led_cmd,
- .flags = CMD_ASYNC,
- .callback = NULL,
- };
- u32 reg;
-
- reg = iwl_read32(priv, CSR_LED_REG);
- if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
- iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
-
- return iwl_legacy_send_cmd(priv, &cmd);
-}
-
-/* Set led register off */
-void iwl4965_led_enable(struct iwl_priv *priv)
-{
- iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
-}
-
-const struct iwl_led_ops iwl4965_led_ops = {
- .cmd = iwl4965_send_led_cmd,
-};
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-led.h b/drivers/net/wireless/iwlegacy/iwl-4965-led.h
deleted file mode 100644
index 5ed3615..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-led.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_4965_led_h__
-#define __iwl_4965_led_h__
-
-extern const struct iwl_led_ops iwl4965_led_ops;
-void iwl4965_led_enable(struct iwl_priv *priv);
-
-#endif /* __iwl_4965_led_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c b/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
deleted file mode 100644
index 2be6d9e..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-lib.c
+++ /dev/null
@@ -1,1194 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/etherdevice.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-sta.h"
-
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status)
-{
- if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
- IWL_ERR(priv, "Tx flush command to flush out all frames\n");
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status))
- queue_work(priv->workqueue, &priv->tx_flush);
- }
-}
-
-/*
- * EEPROM
- */
-struct iwl_mod_params iwl4965_mod_params = {
- .amsdu_size_8K = 1,
- .restart_fw = 1,
- /* the rest are 0 by default */
-};
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0;
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_legacy_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-static void iwl4965_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_legacy_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwl4965_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_legacy_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwl4965_rx_queue_reset(priv, rxq);
-
- iwl4965_rx_replenish(priv);
-
- iwl4965_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwl4965_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwl4965_txq_ctx_reset(priv);
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwl4965_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl4965_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwl4965_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl4965_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl4965_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl4965_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv,
- "Failed to alloc_pages with %s. "
- "Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ?
- "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl4965_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwl4965_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl4965_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwl4965_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl4965_rx_allocate(priv, GFP_ATOMIC);
-
- iwl4965_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-int iwl4965_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_legacy_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
-{
- int idx = 0;
- int band_offset = 0;
-
- /* HT rate format: mac80211 wants an MCS number, which is just LSB */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
- return idx;
- /* Legacy rate format, search for match in table */
- } else {
- if (band == IEEE80211_BAND_5GHZ)
- band_offset = IWL_FIRST_OFDM_RATE;
- for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx - band_offset;
- }
-
- return -1;
-}
-
-static int iwl4965_calc_rssi(struct iwl_priv *priv,
- struct iwl_rx_phy_res *rx_resp)
-{
- /* data from PHY/DSP regarding signal strength, etc.,
- * contents are always there, not configurable by host. */
- struct iwl4965_rx_non_cfg_phy *ncphy =
- (struct iwl4965_rx_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
- u32 agc = (le16_to_cpu(ncphy->agc_info) & IWL49_AGC_DB_MASK)
- >> IWL49_AGC_DB_POS;
-
- u32 valid_antennae =
- (le16_to_cpu(rx_resp->phy_flags) & IWL49_RX_PHY_FLAGS_ANTENNAE_MASK)
- >> IWL49_RX_PHY_FLAGS_ANTENNAE_OFFSET;
- u8 max_rssi = 0;
- u32 i;
-
- /* Find max rssi among 3 possible receivers.
- * These values are measured by the digital signal processor (DSP).
- * They should stay fairly constant even as the signal strength varies,
- * if the radio's automatic gain control (AGC) is working right.
- * AGC value (see below) will provide the "interesting" info. */
- for (i = 0; i < 3; i++)
- if (valid_antennae & (1 << i))
- max_rssi = max(ncphy->rssi_info[i << 1], max_rssi);
-
- IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
- ncphy->rssi_info[0], ncphy->rssi_info[2], ncphy->rssi_info[4],
- max_rssi, agc);
-
- /* dBm = max_rssi dB - agc dB - constant.
- * Higher AGC (higher radio gain) means lower signal. */
- return max_rssi - agc - IWL4965_RSSI_OFFSET;
-}
-
-
-static u32 iwl4965_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
-{
- u32 decrypt_out = 0;
-
- if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
- RX_RES_STATUS_STATION_FOUND)
- decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
- RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
-
- decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
-
- /* packet was not encrypted */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_NONE)
- return decrypt_out;
-
- /* packet was encrypted with unknown alg */
- if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
- RX_RES_STATUS_SEC_TYPE_ERR)
- return decrypt_out;
-
- /* decryption was not done in HW */
- if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
- RX_MPDU_RES_STATUS_DEC_DONE_MSK)
- return decrypt_out;
-
- switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
-
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- /* alg is CCM: check MIC only */
- if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
- /* Bad MIC */
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
-
- break;
-
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
- /* Bad TTAK */
- decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
- break;
- }
- /* fall through if TTAK OK */
- default:
- if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
- decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
- else
- decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
- break;
- }
-
- IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
- decrypt_in, decrypt_out);
-
- return decrypt_out;
-}
-
-static void iwl4965_pass_packet_to_mac80211(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u16 len,
- u32 ampdu_status,
- struct iwl_rx_mem_buffer *rxb,
- struct ieee80211_rx_status *stats)
-{
- struct sk_buff *skb;
- __le16 fc = hdr->frame_control;
-
- /* We only process data packets if the interface is open */
- if (unlikely(!priv->is_open)) {
- IWL_DEBUG_DROP_LIMIT(priv,
- "Dropping packet while interface is not open.\n");
- return;
- }
-
- /* In case of HW accelerated crypto and bad decryption, drop */
- if (!priv->cfg->mod_params->sw_crypto &&
- iwl_legacy_set_decrypted_flag(priv, hdr, ampdu_status, stats))
- return;
-
- skb = dev_alloc_skb(128);
- if (!skb) {
- IWL_ERR(priv, "dev_alloc_skb failed\n");
- return;
- }
-
- skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
-
- iwl_legacy_update_stats(priv, false, fc, len);
- memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
-
- ieee80211_rx(priv->hw, skb);
- priv->alloc_rxb_page--;
- rxb->page = NULL;
-}
-
-/* Called for REPLY_RX (legacy ABG frames), or
- * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct ieee80211_hdr *header;
- struct ieee80211_rx_status rx_status;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_rx_phy_res *phy_res;
- __le32 rx_pkt_status;
- struct iwl_rx_mpdu_res_start *amsdu;
- u32 len;
- u32 ampdu_status;
- u32 rate_n_flags;
-
- /**
- * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
- * REPLY_RX: physical layer info is in this buffer
- * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
- * command and cached in priv->last_phy_res
- *
- * Here we set up local variables depending on which command is
- * received.
- */
- if (pkt->hdr.cmd == REPLY_RX) {
- phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
- + phy_res->cfg_phy_cnt);
-
- len = le16_to_cpu(phy_res->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
- phy_res->cfg_phy_cnt + len);
- ampdu_status = le32_to_cpu(rx_pkt_status);
- } else {
- if (!priv->_4965.last_phy_res_valid) {
- IWL_ERR(priv, "MPDU frame without cached PHY data\n");
- return;
- }
- phy_res = &priv->_4965.last_phy_res;
- amsdu = (struct iwl_rx_mpdu_res_start *)pkt->u.raw;
- header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
- len = le16_to_cpu(amsdu->byte_count);
- rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
- ampdu_status = iwl4965_translate_rx_status(priv,
- le32_to_cpu(rx_pkt_status));
- }
-
- if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
- IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
- phy_res->cfg_phy_cnt);
- return;
- }
-
- if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
- !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
- IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
- le32_to_cpu(rx_pkt_status));
- return;
- }
-
- /* This will be used in several places later */
- rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
-
- /* rx_status carries information about the packet to mac80211 */
- rx_status.mactime = le64_to_cpu(phy_res->timestamp);
- rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
- IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
- rx_status.freq =
- ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel),
- rx_status.band);
- rx_status.rate_idx =
- iwl4965_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
- rx_status.flag = 0;
-
- /* TSF isn't reliable. In order to allow smooth user experience,
- * this W/A doesn't propagate it to the mac80211 */
- /*rx_status.flag |= RX_FLAG_MACTIME_MPDU;*/
-
- priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
-
- /* Find max signal strength (dBm) among 3 antenna/receiver chains */
- rx_status.signal = iwl4965_calc_rssi(priv, phy_res);
-
- iwl_legacy_dbg_log_rx_data_frame(priv, len, header);
- IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
- rx_status.signal, (unsigned long long)rx_status.mactime);
-
- /*
- * "antenna number"
- *
- * It seems that the antenna field in the phy flags value
- * is actually a bit field. This is undefined by radiotap,
- * it wants an actual antenna number but I always get "7"
- * for most legacy frames I receive indicating that the
- * same frame was received on all three RX chains.
- *
- * I think this field should be removed in favor of a
- * new 802.11n radiotap field "RX chains" that is defined
- * as a bitmask.
- */
- rx_status.antenna =
- (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
- >> RX_RES_PHY_FLAGS_ANTENNA_POS;
-
- /* set the preamble flag if appropriate */
- if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
- rx_status.flag |= RX_FLAG_SHORTPRE;
-
- /* Set up the HT phy flags */
- if (rate_n_flags & RATE_MCS_HT_MSK)
- rx_status.flag |= RX_FLAG_HT;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- rx_status.flag |= RX_FLAG_40MHZ;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- rx_status.flag |= RX_FLAG_SHORT_GI;
-
- iwl4965_pass_packet_to_mac80211(priv, header, len, ampdu_status,
- rxb, &rx_status);
-}
-
-/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
- * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- priv->_4965.last_phy_res_valid = true;
- memcpy(&priv->_4965.last_phy_res, pkt->u.raw,
- sizeof(struct iwl_rx_phy_res));
-}
-
-static int iwl4965_get_channels_for_scan(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl_scan_channel *scan_ch)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
- u16 channel;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- channel = chan->hw_value;
- scan_ch->channel = cpu_to_le16(channel);
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- channel);
- continue;
- }
-
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN))
- scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
- else
- scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
-
- if (n_probes)
- scan_ch->type |= IWL_SCAN_PROBE_MASK(n_probes);
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
-
- /* Set txpower levels to defaults */
- scan_ch->dsp_atten = 110;
-
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else
- scan_ch->tx_gain = ((1 << 5) | (5 << 3));
-
- IWL_DEBUG_SCAN(priv, "Scanning ch=%d prob=0x%X [%s %d]\n",
- channel, le32_to_cpu(scan_ch->type),
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- "ACTIVE" : "PASSIVE",
- (scan_ch->type & SCAN_CHANNEL_TYPE_ACTIVE) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl_scan_cmd *scan;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 rate_flags = 0;
- u16 cmd_len;
- u16 rx_chain = 0;
- enum ieee80211_band band;
- u8 n_probes = 0;
- u8 rx_ant = priv->hw_params.valid_rx_ant;
- u8 rate;
- bool is_active = false;
- int chan_mod;
- u8 active_chains;
- u8 scan_tx_antennas = priv->hw_params.valid_tx_ant;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv,
- "fail to allocate memory for scan\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_any_associated(priv)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
-
- extra = (suspend_time / interval) << 22;
- scan_suspend_time = (extra |
- ((suspend_time % interval) * 1024));
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Start passive scan.\n");
-
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = ctx->bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- chan_mod = le32_to_cpu(
- priv->contexts[IWL_RXON_CTX_BSS].active.flags &
- RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
- rate = IWL_RATE_6M_PLCP;
- } else {
- rate = IWL_RATE_1M_PLCP;
- rate_flags = RATE_MCS_CCK_MSK;
- }
- break;
- case IEEE80211_BAND_5GHZ:
- rate = IWL_RATE_6M_PLCP;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scanning is requested but a certain channel is
- * marked passive, we can do active scanning if we detect
- * transmissions.
- *
- * There is an issue with some firmware versions that triggers
- * a sysassert on a "good CRC threshold" of zero (== disabled),
- * on a radar channel even though this means that we should NOT
- * send probes.
- *
- * The "good CRC threshold" is the number of frames that we
- * need to receive during our dwell time on a channel before
- * sending out probes -- setting this to a huge value will
- * mean we never reach it, but at the same time work around
- * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER
- * here instead of IWL_GOOD_CRC_TH_DISABLED.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_NEVER;
-
- band = priv->scan_band;
-
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
-
- priv->scan_tx_ant[band] = iwl4965_toggle_tx_ant(priv,
- priv->scan_tx_ant[band],
- scan_tx_antennas);
- rate_flags |= iwl4965_ant_idx_to_flags(priv->scan_tx_ant[band]);
- scan->tx_cmd.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate, rate_flags);
-
- /* In power save mode use one chain, otherwise use all chains */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* rx_ant has been set to all valid chains previously */
- active_chains = rx_ant &
- ((u8)(priv->chain_noise_data.active_chains));
- if (!active_chains)
- active_chains = rx_ant;
-
- IWL_DEBUG_SCAN(priv, "chain_noise_data.active_chains: %u\n",
- priv->chain_noise_data.active_chains);
-
- rx_ant = iwl4965_first_antenna(active_chains);
- }
-
- /* MIMO is not used here, but value is required */
- rx_chain |= priv->hw_params.valid_rx_ant << RXON_RX_CHAIN_VALID_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
- rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
- rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
- scan->rx_chain = cpu_to_le16(rx_chain);
-
- cmd_len = iwl_legacy_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- vif->addr,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
- scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
- RXON_FILTER_BCON_AWARE_MSK);
-
- scan->channel_count = iwl4965_get_channels_for_scan(priv, vif, band,
- is_active, n_probes,
- (void *)&scan->data[cmd_len]);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- return ret;
-}
-
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- if (add)
- return iwl4965_add_bssid_station(priv, vif_priv->ctx,
- vif->bss_conf.bssid,
- &vif_priv->ibss_bssid_sta_id);
- return iwl_legacy_remove_station(priv, vif_priv->ibss_bssid_sta_id,
- vif->bss_conf.bssid);
-}
-
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&priv->sta_lock);
-
- if (priv->stations[sta_id].tid[tid].tfds_in_queue >= freed)
- priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(priv, "free more than tfds_in_queue (%u:%d)\n",
- priv->stations[sta_id].tid[tid].tfds_in_queue,
- freed);
- priv->stations[sta_id].tid[tid].tfds_in_queue = 0;
- }
-}
-
-#define IWL_TX_QUEUE_MSK 0xfffff
-
-static bool iwl4965_is_single_rx_stream(struct iwl_priv *priv)
-{
- return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
- priv->current_ht_config.single_chain_sufficient;
-}
-
-#define IWL_NUM_RX_CHAINS_MULTIPLE 3
-#define IWL_NUM_RX_CHAINS_SINGLE 2
-#define IWL_NUM_IDLE_CHAINS_DUAL 2
-#define IWL_NUM_IDLE_CHAINS_SINGLE 1
-
-/*
- * Determine how many receiver/antenna chains to use.
- *
- * More provides better reception via diversity. Fewer saves power
- * at the expense of throughput, but only when not in powersave to
- * start with.
- *
- * MIMO (dual stream) requires at least 2, but works better with 3.
- * This does not determine *which* chains to use, just how many.
- */
-static int iwl4965_get_active_rx_chain_count(struct iwl_priv *priv)
-{
- /* # of Rx chains to use when expecting MIMO. */
- if (iwl4965_is_single_rx_stream(priv))
- return IWL_NUM_RX_CHAINS_SINGLE;
- else
- return IWL_NUM_RX_CHAINS_MULTIPLE;
-}
-
-/*
- * When we are in power saving mode, unless device support spatial
- * multiplexing power save, use the active count for rx chain count.
- */
-static int
-iwl4965_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
-{
- /* # Rx chains when idling, depending on SMPS mode */
- switch (priv->current_ht_config.smps) {
- case IEEE80211_SMPS_STATIC:
- case IEEE80211_SMPS_DYNAMIC:
- return IWL_NUM_IDLE_CHAINS_SINGLE;
- case IEEE80211_SMPS_OFF:
- return active_cnt;
- default:
- WARN(1, "invalid SMPS mode %d",
- priv->current_ht_config.smps);
- return active_cnt;
- }
-}
-
-/* up to 4 chains */
-static u8 iwl4965_count_chain_bitmap(u32 chain_bitmap)
-{
- u8 res;
- res = (chain_bitmap & BIT(0)) >> 0;
- res += (chain_bitmap & BIT(1)) >> 1;
- res += (chain_bitmap & BIT(2)) >> 2;
- res += (chain_bitmap & BIT(3)) >> 3;
- return res;
-}
-
-/**
- * iwl4965_set_rxon_chain - Set up Rx chain usage in "staging" RXON image
- *
- * Selects how many and which Rx receivers/antennas/chains to use.
- * This should not be used for scan command ... it puts data in wrong place.
- */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- bool is_single = iwl4965_is_single_rx_stream(priv);
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
- u8 idle_rx_cnt, active_rx_cnt, valid_rx_cnt;
- u32 active_chains;
- u16 rx_chain;
-
- /* Tell uCode which antennas are actually connected.
- * Before first association, we assume all antennas are connected.
- * Just after first association, iwl4965_chain_noise_calibration()
- * checks which antennas actually *are* connected. */
- if (priv->chain_noise_data.active_chains)
- active_chains = priv->chain_noise_data.active_chains;
- else
- active_chains = priv->hw_params.valid_rx_ant;
-
- rx_chain = active_chains << RXON_RX_CHAIN_VALID_POS;
-
- /* How many receivers should we use? */
- active_rx_cnt = iwl4965_get_active_rx_chain_count(priv);
- idle_rx_cnt = iwl4965_get_idle_rx_chain_count(priv, active_rx_cnt);
-
-
- /* correct rx chain count according hw settings
- * and chain noise calibration
- */
- valid_rx_cnt = iwl4965_count_chain_bitmap(active_chains);
- if (valid_rx_cnt < active_rx_cnt)
- active_rx_cnt = valid_rx_cnt;
-
- if (valid_rx_cnt < idle_rx_cnt)
- idle_rx_cnt = valid_rx_cnt;
-
- rx_chain |= active_rx_cnt << RXON_RX_CHAIN_MIMO_CNT_POS;
- rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
-
- ctx->staging.rx_chain = cpu_to_le16(rx_chain);
-
- if (!is_single && (active_rx_cnt >= IWL_NUM_RX_CHAINS_SINGLE) && is_cam)
- ctx->staging.rx_chain |= RXON_RX_CHAIN_MIMO_FORCE_MSK;
- else
- ctx->staging.rx_chain &= ~RXON_RX_CHAIN_MIMO_FORCE_MSK;
-
- IWL_DEBUG_ASSOC(priv, "rx_chain=0x%X active=%d idle=%d\n",
- ctx->staging.rx_chain,
- active_rx_cnt, idle_rx_cnt);
-
- WARN_ON(active_rx_cnt == 0 || idle_rx_cnt == 0 ||
- active_rx_cnt < idle_rx_cnt);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
-{
- int i;
- u8 ind = ant;
-
- for (i = 0; i < RATE_ANT_NUM - 1; i++) {
- ind = (ind + 1) < RATE_ANT_NUM ? ind + 1 : 0;
- if (valid & BIT(ind))
- return ind;
- }
- return ant;
-}
-
-static const char *iwl4965_get_fh_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
- IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
- IWL_CMD(FH_RSCSR_CHNL0_WPTR);
- IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
- IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
- IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
- IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
- IWL_CMD(FH_TSSR_TX_STATUS_REG);
- IWL_CMD(FH_TSSR_TX_ERROR_REG);
- default:
- return "UNKNOWN";
- }
-}
-
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display)
-{
- int i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- int pos = 0;
- size_t bufsz = 0;
-#endif
- static const u32 fh_tbl[] = {
- FH_RSCSR_CHNL0_STTS_WPTR_REG,
- FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- FH_RSCSR_CHNL0_WPTR,
- FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_MEM_RSSR_SHARED_CTRL_REG,
- FH_MEM_RSSR_RX_STATUS_REG,
- FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
- FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_ERROR_REG
- };
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (display) {
- bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
- *buf = kmalloc(bufsz, GFP_KERNEL);
- if (!*buf)
- return -ENOMEM;
- pos += scnprintf(*buf + pos, bufsz - pos,
- "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- pos += scnprintf(*buf + pos, bufsz - pos,
- " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return pos;
- }
-#endif
- IWL_ERR(priv, "FH register values:\n");
- for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
- IWL_ERR(priv, " %34s: 0X%08x\n",
- iwl4965_get_fh_string(fh_tbl[i]),
- iwl_legacy_read_direct32(priv, fh_tbl[i]));
- }
- return 0;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c b/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
deleted file mode 100644
index 57ebe21..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rs.c
+++ /dev/null
@@ -1,2871 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/skbuff.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/delay.h>
-
-#include <linux/workqueue.h>
-
-#include "iwl-dev.h"
-#include "iwl-sta.h"
-#include "iwl-core.h"
-#include "iwl-4965.h"
-
-#define IWL4965_RS_NAME "iwl-4965-rs"
-
-#define NUM_TRY_BEFORE_ANT_TOGGLE 1
-#define IWL_NUMBER_TRY 1
-#define IWL_HT_NUMBER_TRY 3
-
-#define IWL_RATE_MAX_WINDOW 62 /* # tx in history window */
-#define IWL_RATE_MIN_FAILURE_TH 6 /* min failures to calc tpt */
-#define IWL_RATE_MIN_SUCCESS_TH 8 /* min successes to calc tpt */
-
-/* max allowed rate miss before sync LQ cmd */
-#define IWL_MISSED_RATE_MAX 15
-/* max time to accum history 2 seconds */
-#define IWL_RATE_SCALE_FLUSH_INTVL (3*HZ)
-
-static u8 rs_ht_to_legacy[] = {
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_6M_INDEX, IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX, IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX, IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX, IWL_RATE_54M_INDEX
-};
-
-static const u8 ant_toggle_lookup[] = {
- /*ANT_NONE -> */ ANT_NONE,
- /*ANT_A -> */ ANT_B,
- /*ANT_B -> */ ANT_C,
- /*ANT_AB -> */ ANT_BC,
- /*ANT_C -> */ ANT_A,
- /*ANT_AC -> */ ANT_AB,
- /*ANT_BC -> */ ANT_AC,
- /*ANT_ABC -> */ ANT_ABC,
-};
-
-#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
- [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
- IWL_RATE_SISO_##s##M_PLCP, \
- IWL_RATE_MIMO2_##s##M_PLCP,\
- IWL_RATE_##r##M_IEEE, \
- IWL_RATE_##ip##M_INDEX, \
- IWL_RATE_##in##M_INDEX, \
- IWL_RATE_##rp##M_INDEX, \
- IWL_RATE_##rn##M_INDEX, \
- IWL_RATE_##pp##M_INDEX, \
- IWL_RATE_##np##M_INDEX }
-
-/*
- * Parameter order:
- * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
- *
- * If there isn't a valid next or previous rate then INV is used which
- * maps to IWL_RATE_INVALID
- *
- */
-const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT] = {
- IWL_DECLARE_RATE_INFO(1, INV, INV, 2, INV, 2, INV, 2), /* 1mbps */
- IWL_DECLARE_RATE_INFO(2, INV, 1, 5, 1, 5, 1, 5), /* 2mbps */
- IWL_DECLARE_RATE_INFO(5, INV, 2, 6, 2, 11, 2, 11), /*5.5mbps */
- IWL_DECLARE_RATE_INFO(11, INV, 9, 12, 9, 12, 5, 18), /* 11mbps */
- IWL_DECLARE_RATE_INFO(6, 6, 5, 9, 5, 11, 5, 11), /* 6mbps */
- IWL_DECLARE_RATE_INFO(9, 6, 6, 11, 6, 11, 5, 11), /* 9mbps */
- IWL_DECLARE_RATE_INFO(12, 12, 11, 18, 11, 18, 11, 18), /* 12mbps */
- IWL_DECLARE_RATE_INFO(18, 18, 12, 24, 12, 24, 11, 24), /* 18mbps */
- IWL_DECLARE_RATE_INFO(24, 24, 18, 36, 18, 36, 18, 36), /* 24mbps */
- IWL_DECLARE_RATE_INFO(36, 36, 24, 48, 24, 48, 24, 48), /* 36mbps */
- IWL_DECLARE_RATE_INFO(48, 48, 36, 54, 36, 54, 36, 54), /* 48mbps */
- IWL_DECLARE_RATE_INFO(54, 54, 48, INV, 48, INV, 48, INV),/* 54mbps */
- IWL_DECLARE_RATE_INFO(60, 60, 48, INV, 48, INV, 48, INV),/* 60mbps */
-};
-
-static int iwl4965_hwrate_to_plcp_idx(u32 rate_n_flags)
-{
- int idx = 0;
-
- /* HT rate format */
- if (rate_n_flags & RATE_MCS_HT_MSK) {
- idx = (rate_n_flags & 0xff);
-
- if (idx >= IWL_RATE_MIMO2_6M_PLCP)
- idx = idx - IWL_RATE_MIMO2_6M_PLCP;
-
- idx += IWL_FIRST_OFDM_RATE;
- /* skip 9M not supported in ht*/
- if (idx >= IWL_RATE_9M_INDEX)
- idx += 1;
- if ((idx >= IWL_FIRST_OFDM_RATE) && (idx <= IWL_LAST_OFDM_RATE))
- return idx;
-
- /* legacy rate format, search for match in table */
- } else {
- for (idx = 0; idx < ARRAY_SIZE(iwlegacy_rates); idx++)
- if (iwlegacy_rates[idx].plcp == (rate_n_flags & 0xFF))
- return idx;
- }
-
- return -1;
-}
-
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta);
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 rate_n_flags);
-static void iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta,
- bool force_search);
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index);
-#else
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{}
-#endif
-
-/**
- * The following tables contain the expected throughput metrics for all rates
- *
- * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
- *
- * where invalid entries are zeros.
- *
- * CCK rates are only valid in legacy table and will only be used in G
- * (2.4 GHz) band.
- */
-
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
- 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
-};
-
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
- {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
- {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
- {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
- {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
- {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
- {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
-};
-
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
- {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
- {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
- {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
-};
-
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
- {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
- {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
- {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
- {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
-};
-
-/* mbps, mcs */
-static const struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
- { "1", "BPSK DSSS"},
- { "2", "QPSK DSSS"},
- {"5.5", "BPSK CCK"},
- { "11", "QPSK CCK"},
- { "6", "BPSK 1/2"},
- { "9", "BPSK 1/2"},
- { "12", "QPSK 1/2"},
- { "18", "QPSK 3/4"},
- { "24", "16QAM 1/2"},
- { "36", "16QAM 3/4"},
- { "48", "64QAM 2/3"},
- { "54", "64QAM 3/4"},
- { "60", "64QAM 5/6"},
-};
-
-#define MCS_INDEX_PER_STREAM (8)
-
-static inline u8 iwl4965_rs_extract_rate(u32 rate_n_flags)
-{
- return (u8)(rate_n_flags & 0xFF);
-}
-
-static void
-iwl4965_rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
-{
- window->data = 0;
- window->success_counter = 0;
- window->success_ratio = IWL_INVALID_VALUE;
- window->counter = 0;
- window->average_tpt = IWL_INVALID_VALUE;
- window->stamp = 0;
-}
-
-static inline u8 iwl4965_rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
-{
- return (ant_type & valid_antenna) == ant_type;
-}
-
-/*
- * removes the old data from the statistics. All data that is older than
- * TID_MAX_TIME_DIFF, will be deleted.
- */
-static void
-iwl4965_rs_tl_rm_old_stats(struct iwl_traffic_load *tl, u32 curr_time)
-{
- /* The oldest age we want to keep */
- u32 oldest_time = curr_time - TID_MAX_TIME_DIFF;
-
- while (tl->queue_count &&
- (tl->time_stamp < oldest_time)) {
- tl->total -= tl->packet_count[tl->head];
- tl->packet_count[tl->head] = 0;
- tl->time_stamp += TID_QUEUE_CELL_SPACING;
- tl->queue_count--;
- tl->head++;
- if (tl->head >= TID_QUEUE_MAX_SIZE)
- tl->head = 0;
- }
-}
-
-/*
- * increment traffic load value for tid and also remove
- * any old values if passed the certain time period
- */
-static u8 iwl4965_rs_tl_add_packet(struct iwl_lq_sta *lq_data,
- struct ieee80211_hdr *hdr)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
- u8 tid;
-
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- } else
- return MAX_TID_COUNT;
-
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
- return MAX_TID_COUNT;
-
- tl = &lq_data->load[tid];
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- /* Happens only for the first packet. Initialize the data */
- if (!(tl->queue_count)) {
- tl->total = 1;
- tl->time_stamp = curr_time;
- tl->queue_count = 1;
- tl->head = 0;
- tl->packet_count[0] = 1;
- return MAX_TID_COUNT;
- }
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- index = (tl->head + index) % TID_QUEUE_MAX_SIZE;
- tl->packet_count[index] = tl->packet_count[index] + 1;
- tl->total = tl->total + 1;
-
- if ((index + 1) > tl->queue_count)
- tl->queue_count = index + 1;
-
- return tid;
-}
-
-/*
- get the traffic load value for tid
-*/
-static u32 iwl4965_rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
-{
- u32 curr_time = jiffies_to_msecs(jiffies);
- u32 time_diff;
- s32 index;
- struct iwl_traffic_load *tl = NULL;
-
- if (tid >= TID_MAX_LOAD_COUNT)
- return 0;
-
- tl = &(lq_data->load[tid]);
-
- curr_time -= curr_time % TID_ROUND_VALUE;
-
- if (!(tl->queue_count))
- return 0;
-
- time_diff = TIME_WRAP_AROUND(tl->time_stamp, curr_time);
- index = time_diff / TID_QUEUE_CELL_SPACING;
-
- /* The history is too long: remove data that is older than */
- /* TID_MAX_TIME_DIFF */
- if (index >= TID_QUEUE_MAX_SIZE)
- iwl4965_rs_tl_rm_old_stats(tl, curr_time);
-
- return tl->total;
-}
-
-static int iwl4965_rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_data, u8 tid,
- struct ieee80211_sta *sta)
-{
- int ret = -EAGAIN;
- u32 load;
-
- load = iwl4965_rs_tl_get_load(lq_data, tid);
-
- if (load > IWL_AGG_LOAD_THRESHOLD) {
- IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
- sta->addr, tid);
- ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
- if (ret == -EAGAIN) {
- /*
- * driver and mac80211 is out of sync
- * this might be cause by reloading firmware
- * stop the tx ba session here
- */
- IWL_ERR(priv, "Fail start Tx agg on tid: %d\n",
- tid);
- ieee80211_stop_tx_ba_session(sta, tid);
- }
- } else {
- IWL_ERR(priv, "Aggregation not enabled for tid %d "
- "because load = %u\n", tid, load);
- }
- return ret;
-}
-
-static void iwl4965_rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
- struct iwl_lq_sta *lq_data,
- struct ieee80211_sta *sta)
-{
- if (tid < TID_MAX_LOAD_COUNT)
- iwl4965_rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
- else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
-}
-
-static inline int iwl4965_get_iwl4965_num_of_ant_from_rate(u32 rate_n_flags)
-{
- return !!(rate_n_flags & RATE_MCS_ANT_A_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_B_MSK) +
- !!(rate_n_flags & RATE_MCS_ANT_C_MSK);
-}
-
-/*
- * Static function to get the expected throughput from an iwl_scale_tbl_info
- * that wraps a NULL pointer check
- */
-static s32
-iwl4965_get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
-{
- if (tbl->expected_tpt)
- return tbl->expected_tpt[rs_index];
- return 0;
-}
-
-/**
- * iwl4965_rs_collect_tx_data - Update the success/failure sliding window
- *
- * We keep a sliding window of the last 62 packets transmitted
- * at this rate. window->data contains the bitmask of successful
- * packets.
- */
-static int iwl4965_rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
- int scale_index, int attempts, int successes)
-{
- struct iwl_rate_scale_data *window = NULL;
- static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
- s32 fail_count, tpt;
-
- if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
- return -EINVAL;
-
- /* Select window for current tx bit rate */
- window = &(tbl->win[scale_index]);
-
- /* Get expected throughput */
- tpt = iwl4965_get_expected_tpt(tbl, scale_index);
-
- /*
- * Keep track of only the latest 62 tx frame attempts in this rate's
- * history window; anything older isn't really relevant any more.
- * If we have filled up the sliding window, drop the oldest attempt;
- * if the oldest attempt (highest bit in bitmap) shows "success",
- * subtract "1" from the success counter (this is the main reason
- * we keep these bitmaps!).
- */
- while (attempts > 0) {
- if (window->counter >= IWL_RATE_MAX_WINDOW) {
-
- /* remove earliest */
- window->counter = IWL_RATE_MAX_WINDOW - 1;
-
- if (window->data & mask) {
- window->data &= ~mask;
- window->success_counter--;
- }
- }
-
- /* Increment frames-attempted counter */
- window->counter++;
-
- /* Shift bitmap by one frame to throw away oldest history */
- window->data <<= 1;
-
- /* Mark the most recent #successes attempts as successful */
- if (successes > 0) {
- window->success_counter++;
- window->data |= 0x1;
- successes--;
- }
-
- attempts--;
- }
-
- /* Calculate current success ratio, avoid divide-by-0! */
- if (window->counter > 0)
- window->success_ratio = 128 * (100 * window->success_counter)
- / window->counter;
- else
- window->success_ratio = IWL_INVALID_VALUE;
-
- fail_count = window->counter - window->success_counter;
-
- /* Calculate average throughput, if we have enough history. */
- if ((fail_count >= IWL_RATE_MIN_FAILURE_TH) ||
- (window->success_counter >= IWL_RATE_MIN_SUCCESS_TH))
- window->average_tpt = (window->success_ratio * tpt + 64) / 128;
- else
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Tag this window as having been updated */
- window->stamp = jiffies;
-
- return 0;
-}
-
-/*
- * Fill uCode API rate_n_flags field, based on "search" or "active" table.
- */
-static u32 iwl4965_rate_n_flags_from_tbl(struct iwl_priv *priv,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 use_green)
-{
- u32 rate_n_flags = 0;
-
- if (is_legacy(tbl->lq_type)) {
- rate_n_flags = iwlegacy_rates[index].plcp;
- if (index >= IWL_FIRST_CCK_RATE && index <= IWL_LAST_CCK_RATE)
- rate_n_flags |= RATE_MCS_CCK_MSK;
-
- } else if (is_Ht(tbl->lq_type)) {
- if (index > IWL_LAST_OFDM_RATE) {
- IWL_ERR(priv, "Invalid HT rate index %d\n", index);
- index = IWL_LAST_OFDM_RATE;
- }
- rate_n_flags = RATE_MCS_HT_MSK;
-
- if (is_siso(tbl->lq_type))
- rate_n_flags |= iwlegacy_rates[index].plcp_siso;
- else
- rate_n_flags |= iwlegacy_rates[index].plcp_mimo2;
- } else {
- IWL_ERR(priv, "Invalid tbl->lq_type %d\n", tbl->lq_type);
- }
-
- rate_n_flags |= ((tbl->ant_type << RATE_MCS_ANT_POS) &
- RATE_MCS_ANT_ABC_MSK);
-
- if (is_Ht(tbl->lq_type)) {
- if (tbl->is_ht40) {
- if (tbl->is_dup)
- rate_n_flags |= RATE_MCS_DUP_MSK;
- else
- rate_n_flags |= RATE_MCS_HT40_MSK;
- }
- if (tbl->is_SGI)
- rate_n_flags |= RATE_MCS_SGI_MSK;
-
- if (use_green) {
- rate_n_flags |= RATE_MCS_GF_MSK;
- if (is_siso(tbl->lq_type) && tbl->is_SGI) {
- rate_n_flags &= ~RATE_MCS_SGI_MSK;
- IWL_ERR(priv, "GF was set with SGI:SISO\n");
- }
- }
- }
- return rate_n_flags;
-}
-
-/*
- * Interpret uCode API's rate_n_flags format,
- * fill "search" or "active" tx mode table.
- */
-static int iwl4965_rs_get_tbl_info_from_mcs(const u32 rate_n_flags,
- enum ieee80211_band band,
- struct iwl_scale_tbl_info *tbl,
- int *rate_idx)
-{
- u32 ant_msk = (rate_n_flags & RATE_MCS_ANT_ABC_MSK);
- u8 iwl4965_num_of_ant = iwl4965_get_iwl4965_num_of_ant_from_rate(rate_n_flags);
- u8 mcs;
-
- memset(tbl, 0, sizeof(struct iwl_scale_tbl_info));
- *rate_idx = iwl4965_hwrate_to_plcp_idx(rate_n_flags);
-
- if (*rate_idx == IWL_RATE_INVALID) {
- *rate_idx = -1;
- return -EINVAL;
- }
- tbl->is_SGI = 0; /* default legacy setup */
- tbl->is_ht40 = 0;
- tbl->is_dup = 0;
- tbl->ant_type = (ant_msk >> RATE_MCS_ANT_POS);
- tbl->lq_type = LQ_NONE;
- tbl->max_search = IWL_MAX_SEARCH;
-
- /* legacy rate format */
- if (!(rate_n_flags & RATE_MCS_HT_MSK)) {
- if (iwl4965_num_of_ant == 1) {
- if (band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
- }
- /* HT rate format */
- } else {
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- tbl->is_SGI = 1;
-
- if ((rate_n_flags & RATE_MCS_HT40_MSK) ||
- (rate_n_flags & RATE_MCS_DUP_MSK))
- tbl->is_ht40 = 1;
-
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- tbl->is_dup = 1;
-
- mcs = iwl4965_rs_extract_rate(rate_n_flags);
-
- /* SISO */
- if (mcs <= IWL_RATE_SISO_60M_PLCP) {
- if (iwl4965_num_of_ant == 1)
- tbl->lq_type = LQ_SISO; /*else NONE*/
- /* MIMO2 */
- } else {
- if (iwl4965_num_of_ant == 2)
- tbl->lq_type = LQ_MIMO2;
- }
- }
- return 0;
-}
-
-/* switch to another antenna/antennas and return 1 */
-/* if no other valid antenna found, return 0 */
-static int iwl4965_rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
- struct iwl_scale_tbl_info *tbl)
-{
- u8 new_ant_type;
-
- if (!tbl->ant_type || tbl->ant_type > ANT_ABC)
- return 0;
-
- if (!iwl4965_rs_is_valid_ant(valid_ant, tbl->ant_type))
- return 0;
-
- new_ant_type = ant_toggle_lookup[tbl->ant_type];
-
- while ((new_ant_type != tbl->ant_type) &&
- !iwl4965_rs_is_valid_ant(valid_ant, new_ant_type))
- new_ant_type = ant_toggle_lookup[new_ant_type];
-
- if (new_ant_type == tbl->ant_type)
- return 0;
-
- tbl->ant_type = new_ant_type;
- *rate_n_flags &= ~RATE_MCS_ANT_ABC_MSK;
- *rate_n_flags |= new_ant_type << RATE_MCS_ANT_POS;
- return 1;
-}
-
-/**
- * Green-field mode is valid if the station supports it and
- * there are no non-GF stations present in the BSS.
- */
-static bool iwl4965_rs_use_green(struct ieee80211_sta *sta)
-{
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
- !(ctx->ht.non_gf_sta_present);
-}
-
-/**
- * iwl4965_rs_get_supported_rates - get the available rates
- *
- * if management frame or broadcast frame only return
- * basic available rates.
- *
- */
-static u16 iwl4965_rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
- struct ieee80211_hdr *hdr,
- enum iwl_table_type rate_type)
-{
- if (is_legacy(rate_type)) {
- return lq_sta->active_legacy_rate;
- } else {
- if (is_siso(rate_type))
- return lq_sta->active_siso_rate;
- else
- return lq_sta->active_mimo2_rate;
- }
-}
-
-static u16
-iwl4965_rs_get_adjacent_rate(struct iwl_priv *priv, u8 index, u16 rate_mask,
- int rate_type)
-{
- u8 high = IWL_RATE_INVALID;
- u8 low = IWL_RATE_INVALID;
-
- /* 802.11A or ht walks to the next literal adjacent rate in
- * the rate table */
- if (is_a_band(rate_type) || !is_legacy(rate_type)) {
- int i;
- u32 mask;
-
- /* Find the previous rate that is in the rate mask */
- i = index - 1;
- for (mask = (1 << i); i >= 0; i--, mask >>= 1) {
- if (rate_mask & mask) {
- low = i;
- break;
- }
- }
-
- /* Find the next rate that is in the rate mask */
- i = index + 1;
- for (mask = (1 << i); i < IWL_RATE_COUNT; i++, mask <<= 1) {
- if (rate_mask & mask) {
- high = i;
- break;
- }
- }
-
- return (high << 8) | low;
- }
-
- low = index;
- while (low != IWL_RATE_INVALID) {
- low = iwlegacy_rates[low].prev_rs;
- if (low == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << low))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked lower rate: %d\n", low);
- }
-
- high = index;
- while (high != IWL_RATE_INVALID) {
- high = iwlegacy_rates[high].next_rs;
- if (high == IWL_RATE_INVALID)
- break;
- if (rate_mask & (1 << high))
- break;
- IWL_DEBUG_RATE(priv, "Skipping masked higher rate: %d\n", high);
- }
-
- return (high << 8) | low;
-}
-
-static u32 iwl4965_rs_get_lower_rate(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- u8 scale_index, u8 ht_possible)
-{
- s32 low;
- u16 rate_mask;
- u16 high_low;
- u8 switch_to_legacy = 0;
- u8 is_green = lq_sta->is_green;
- struct iwl_priv *priv = lq_sta->drv;
-
- /* check if we need to switch from HT to legacy rates.
- * assumption is that mandatory rates (1Mbps or 6Mbps)
- * are always supported (spec demand) */
- if (!is_legacy(tbl->lq_type) && (!ht_possible || !scale_index)) {
- switch_to_legacy = 1;
- scale_index = rs_ht_to_legacy[scale_index];
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- tbl->lq_type = LQ_A;
- else
- tbl->lq_type = LQ_G;
-
- if (iwl4965_num_of_ant(tbl->ant_type) > 1)
- tbl->ant_type =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- tbl->is_ht40 = 0;
- tbl->is_SGI = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- }
-
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, NULL, tbl->lq_type);
-
- /* Mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- /* supp_rates has no CCK bits in A mode */
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- rate_mask = (u16)(rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_mask = (u16)(rate_mask & lq_sta->supp_rates);
- }
-
- /* If we switched from HT to legacy, check current rate */
- if (switch_to_legacy && (rate_mask & (1 << scale_index))) {
- low = scale_index;
- goto out;
- }
-
- high_low = iwl4965_rs_get_adjacent_rate(lq_sta->drv,
- scale_index, rate_mask,
- tbl->lq_type);
- low = high_low & 0xff;
-
- if (low == IWL_RATE_INVALID)
- low = scale_index;
-
-out:
- return iwl4965_rate_n_flags_from_tbl(lq_sta->drv, tbl, low, is_green);
-}
-
-/*
- * Simple function to compare two rate scale table types
- */
-static bool iwl4965_table_type_matches(struct iwl_scale_tbl_info *a,
- struct iwl_scale_tbl_info *b)
-{
- return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
- (a->is_SGI == b->is_SGI);
-}
-
-/*
- * mac80211 sends us Tx status
- */
-static void
-iwl4965_rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- int legacy_success;
- int retries;
- int rs_index, mac_index, i;
- struct iwl_lq_sta *lq_sta = priv_sta;
- struct iwl_link_quality_cmd *table;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct iwl_priv *priv = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- enum mac80211_rate_control_flags mac_flags;
- u32 tx_rate;
- struct iwl_scale_tbl_info tbl_type;
- struct iwl_scale_tbl_info *curr_tbl, *other_tbl, *tmp_tbl;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE_LIMIT(priv,
- "get frame ack response, update rate scale window\n");
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (!lq_sta) {
- IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
- return;
- } else if (!lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- return;
- }
-
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- /*
- * Ignore this Tx frame response if its initial rate doesn't match
- * that of latest Link Quality command. There may be stragglers
- * from a previous Link Quality command, but we're no longer interested
- * in those; they're either from the "active" mode while we're trying
- * to check "search" mode, or a prior "search" mode after we've moved
- * to a new "search" mode (which might become the new "active" mode).
- */
- table = &lq_sta->lq;
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate,
- priv->band, &tbl_type, &rs_index);
- if (priv->band == IEEE80211_BAND_5GHZ)
- rs_index -= IWL_FIRST_OFDM_RATE;
- mac_flags = info->status.rates[0].flags;
- mac_index = info->status.rates[0].idx;
- /* For HT packets, map MCS to PLCP */
- if (mac_flags & IEEE80211_TX_RC_MCS) {
- mac_index &= RATE_MCS_CODE_MSK; /* Remove # of streams */
- if (mac_index >= (IWL_RATE_9M_INDEX - IWL_FIRST_OFDM_RATE))
- mac_index++;
- /*
- * mac80211 HT index is always zero-indexed; we need to move
- * HT OFDM rates after CCK rates in 2.4 GHz band
- */
- if (priv->band == IEEE80211_BAND_2GHZ)
- mac_index += IWL_FIRST_OFDM_RATE;
- }
- /* Here we actually compare this rate to the latest LQ command */
- if ((mac_index < 0) ||
- (tbl_type.is_SGI !=
- !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
- (tbl_type.is_ht40 !=
- !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
- (tbl_type.is_dup !=
- !!(mac_flags & IEEE80211_TX_RC_DUP_DATA)) ||
- (tbl_type.ant_type != info->antenna_sel_tx) ||
- (!!(tx_rate & RATE_MCS_HT_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_MCS)) ||
- (!!(tx_rate & RATE_MCS_GF_MSK) !=
- !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
- (rs_index != mac_index)) {
- IWL_DEBUG_RATE(priv,
- "initial rate %d does not match %d (0x%x)\n",
- mac_index, rs_index, tx_rate);
- /*
- * Since rates mis-match, the last LQ command may have failed.
- * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
- * ... driver.
- */
- lq_sta->missed_rate_counter++;
- if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
- lq_sta->missed_rate_counter = 0;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq,
- CMD_ASYNC, false);
- }
- /* Regardless, ignore this status info for outdated rate */
- return;
- } else
- /* Rate did match, so reset the missed_rate_counter */
- lq_sta->missed_rate_counter = 0;
-
- /* Figure out if rate scale algorithm is in active or search table */
- if (iwl4965_table_type_matches(&tbl_type,
- &(lq_sta->lq_info[lq_sta->active_tbl]))) {
- curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- } else if (iwl4965_table_type_matches(&tbl_type,
- &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
- curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- } else {
- IWL_DEBUG_RATE(priv,
- "Neither active nor search matches tx rate\n");
- tmp_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "active- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- tmp_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
- IWL_DEBUG_RATE(priv, "search- lq:%x, ant:%x, SGI:%d\n",
- tmp_tbl->lq_type, tmp_tbl->ant_type, tmp_tbl->is_SGI);
- IWL_DEBUG_RATE(priv, "actual- lq:%x, ant:%x, SGI:%d\n",
- tbl_type.lq_type, tbl_type.ant_type, tbl_type.is_SGI);
- /*
- * no matching table found, let's by-pass the data collection
- * and continue to perform rate scale to find the rate table
- */
- iwl4965_rs_stay_in_table(lq_sta, true);
- goto done;
- }
-
- /*
- * Updating the frame history depends on whether packets were
- * aggregated.
- *
- * For aggregation, all packets were transmitted at the same rate, the
- * first index into rate scale table.
- */
- if (info->flags & IEEE80211_TX_STAT_AMPDU) {
- tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
- &rs_index);
- iwl4965_rs_collect_tx_data(curr_tbl, rs_index,
- info->status.ampdu_len,
- info->status.ampdu_ack_len);
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += info->status.ampdu_ack_len;
- lq_sta->total_failed += (info->status.ampdu_len -
- info->status.ampdu_ack_len);
- }
- } else {
- /*
- * For legacy, update frame history with for each Tx retry.
- */
- retries = info->status.rates[0].count - 1;
- /* HW doesn't send more than 15 retries */
- retries = min(retries, 15);
-
- /* The last transmission may have been successful */
- legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
- /* Collect data for each rate used during failed TX attempts */
- for (i = 0; i <= retries; ++i) {
- tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
- iwl4965_rs_get_tbl_info_from_mcs(tx_rate, priv->band,
- &tbl_type, &rs_index);
- /*
- * Only collect stats if retried rate is in the same RS
- * table as active/search.
- */
- if (iwl4965_table_type_matches(&tbl_type, curr_tbl))
- tmp_tbl = curr_tbl;
- else if (iwl4965_table_type_matches(&tbl_type,
- other_tbl))
- tmp_tbl = other_tbl;
- else
- continue;
- iwl4965_rs_collect_tx_data(tmp_tbl, rs_index, 1,
- i < retries ? 0 : legacy_success);
- }
-
- /* Update success/fail counts if not searching for new mode */
- if (lq_sta->stay_in_tbl) {
- lq_sta->total_success += legacy_success;
- lq_sta->total_failed += retries + (1 - legacy_success);
- }
- }
- /* The last TX rate is cached in lq_sta; it's set in if/else above */
- lq_sta->last_rate_n_flags = tx_rate;
-done:
- /* See if there's a better rate or modulation mode to try. */
- if (sta && sta->supp_rates[sband->band])
- iwl4965_rs_rate_scale_perform(priv, skb, sta, lq_sta);
-}
-
-/*
- * Begin a period of staying with a selected modulation mode.
- * Set "stay_in_tbl" flag to prevent any mode switches.
- * Set frame tx success limits according to legacy vs. high-throughput,
- * and reset overall (spanning all rates) tx success history statistics.
- * These control how long we stay using same modulation mode before
- * searching for a new mode.
- */
-static void iwl4965_rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
- struct iwl_lq_sta *lq_sta)
-{
- IWL_DEBUG_RATE(priv, "we are staying in the same table\n");
- lq_sta->stay_in_tbl = 1; /* only place this gets set */
- if (is_legacy) {
- lq_sta->table_count_limit = IWL_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_LEGACY_SUCCESS_LIMIT;
- } else {
- lq_sta->table_count_limit = IWL_NONE_LEGACY_TABLE_COUNT;
- lq_sta->max_failure_limit = IWL_NONE_LEGACY_FAILURE_LIMIT;
- lq_sta->max_success_limit = IWL_NONE_LEGACY_SUCCESS_LIMIT;
- }
- lq_sta->table_count = 0;
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = jiffies;
- lq_sta->action_counter = 0;
-}
-
-/*
- * Find correct throughput table for given mode of modulation
- */
-static void iwl4965_rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl)
-{
- /* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
-
- /* Check for invalid LQ type */
- if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Legacy rates have only one table */
- if (is_legacy(tbl->lq_type)) {
- tbl->expected_tpt = expected_tpt_legacy;
- return;
- }
-
- /* Choose among many HT tables depending on number of streams
- * (SISO/MIMO2), channel width (20/40), SGI, and aggregation
- * status */
- if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_siso20MHz;
- else if (is_siso(tbl->lq_type))
- ht_tbl_pointer = expected_tpt_siso40MHz;
- else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
- ht_tbl_pointer = expected_tpt_mimo2_20MHz;
- else /* if (is_mimo2(tbl->lq_type)) <-- must be true */
- ht_tbl_pointer = expected_tpt_mimo2_40MHz;
-
- if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
- tbl->expected_tpt = ht_tbl_pointer[0];
- else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
- tbl->expected_tpt = ht_tbl_pointer[1];
- else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
- tbl->expected_tpt = ht_tbl_pointer[2];
- else /* AGG+SGI */
- tbl->expected_tpt = ht_tbl_pointer[3];
-}
-
-/*
- * Find starting rate for new "search" high-throughput mode of modulation.
- * Goal is to find lowest expected rate (under perfect conditions) that is
- * above the current measured throughput of "active" mode, to give new mode
- * a fair chance to prove itself without too many challenges.
- *
- * This gets called when transitioning to more aggressive modulation
- * (i.e. legacy to SISO or MIMO, or SISO to MIMO), as well as less aggressive
- * (i.e. MIMO to SISO). When moving to MIMO, bit rate will typically need
- * to decrease to match "active" throughput. When moving from MIMO to SISO,
- * bit rate will typically need to increase, but not if performance was bad.
- */
-static s32 iwl4965_rs_get_best_rate(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl, /* "search" */
- u16 rate_mask, s8 index)
-{
- /* "active" values */
- struct iwl_scale_tbl_info *active_tbl =
- &(lq_sta->lq_info[lq_sta->active_tbl]);
- s32 active_sr = active_tbl->win[index].success_ratio;
- s32 active_tpt = active_tbl->expected_tpt[index];
-
- /* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
-
- s32 new_rate, high, low, start_hi;
- u16 high_low;
- s8 rate = index;
-
- new_rate = high = low = start_hi = IWL_RATE_INVALID;
-
- for (; ;) {
- high_low = iwl4965_rs_get_adjacent_rate(priv, rate, rate_mask,
- tbl->lq_type);
-
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /*
- * Lower the "search" bit rate, to give new "search" mode
- * approximately the same throughput as "active" if:
- *
- * 1) "Active" mode has been working modestly well (but not
- * great), and expected "search" throughput (under perfect
- * conditions) at candidate rate is above the actual
- * measured "active" throughput (but less than expected
- * "active" throughput under perfect conditions).
- * OR
- * 2) "Active" mode has been working perfectly or very well
- * and expected "search" throughput (under perfect
- * conditions) at candidate rate is above expected
- * "active" throughput (under perfect conditions).
- */
- if ((((100 * tpt_tbl[rate]) > lq_sta->last_tpt) &&
- ((active_sr > IWL_RATE_DECREASE_TH) &&
- (active_sr <= IWL_RATE_HIGH_TH) &&
- (tpt_tbl[rate] <= active_tpt))) ||
- ((active_sr >= IWL_RATE_SCALE_SWITCH) &&
- (tpt_tbl[rate] > active_tpt))) {
-
- /* (2nd or later pass)
- * If we've already tried to raise the rate, and are
- * now trying to lower it, use the higher rate. */
- if (start_hi != IWL_RATE_INVALID) {
- new_rate = start_hi;
- break;
- }
-
- new_rate = rate;
-
- /* Loop again with lower rate */
- if (low != IWL_RATE_INVALID)
- rate = low;
-
- /* Lower rate not available, use the original */
- else
- break;
-
- /* Else try to raise the "search" rate to match "active" */
- } else {
- /* (2nd or later pass)
- * If we've already tried to lower the rate, and are
- * now trying to raise it, use the lower rate. */
- if (new_rate != IWL_RATE_INVALID)
- break;
-
- /* Loop again with higher rate */
- else if (high != IWL_RATE_INVALID) {
- start_hi = high;
- rate = high;
-
- /* Higher rate not available, use the original */
- } else {
- new_rate = rate;
- break;
- }
- }
- }
-
- return new_rate;
-}
-
-/*
- * Set up search table for MIMO2
- */
-static int iwl4965_rs_switch_to_mimo2(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- s32 rate;
- s8 is_green = lq_sta->is_green;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- if (((sta->ht_cap.cap & IEEE80211_HT_CAP_SM_PS) >> 2)
- == WLAN_HT_CAP_SM_PS_STATIC)
- return -1;
-
- /* Need both Tx chains/antennas to support MIMO */
- if (priv->hw_params.tx_chains_num < 2)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to MIMO2\n");
-
- tbl->lq_type = LQ_MIMO2;
- tbl->is_dup = lq_sta->is_dup;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_mimo2_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
-
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 best rate %d mask %X\n",
- rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "Can't switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
-
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Set up search table for SISO
- */
-static int iwl4965_rs_switch_to_siso(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_scale_tbl_info *tbl, int index)
-{
- u16 rate_mask;
- u8 is_green = lq_sta->is_green;
- s32 rate;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- if (!conf_is_ht(conf) || !sta->ht_cap.ht_supported)
- return -1;
-
- IWL_DEBUG_RATE(priv, "LQ: try to switch to SISO\n");
-
- tbl->is_dup = lq_sta->is_dup;
- tbl->lq_type = LQ_SISO;
- tbl->action = 0;
- tbl->max_search = IWL_MAX_SEARCH;
- rate_mask = lq_sta->active_siso_rate;
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- tbl->is_ht40 = 1;
- else
- tbl->is_ht40 = 0;
-
- if (is_green)
- tbl->is_SGI = 0; /*11n spec: no SGI in SISO+Greenfield*/
-
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- rate = iwl4965_rs_get_best_rate(priv, lq_sta, tbl, rate_mask, index);
-
- IWL_DEBUG_RATE(priv, "LQ: get best rate %d mask %X\n", rate, rate_mask);
- if ((rate == IWL_RATE_INVALID) || !((1 << rate) & rate_mask)) {
- IWL_DEBUG_RATE(priv,
- "can not switch with index %d rate mask %x\n",
- rate, rate_mask);
- return -1;
- }
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv,
- tbl, rate, is_green);
- IWL_DEBUG_RATE(priv, "LQ: Switch to new mcs %X index is green %X\n",
- tbl->current_rate, is_green);
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from legacy
- */
-static int iwl4965_rs_move_legacy_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- int index)
-{
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- int ret = 0;
- u8 update_search_tbl_counter = 0;
-
- tbl->action = IWL_LEGACY_SWITCH_SISO;
-
- start_action = tbl->action;
- for (; ;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_LEGACY_SWITCH_ANTENNA1:
- case IWL_LEGACY_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: Legacy toggle Antenna\n");
-
- if ((tbl->action == IWL_LEGACY_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- /* Don't change antenna if success has been great */
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- /* Set up search table to try other antenna */
- memcpy(search_tbl, tbl, sz);
-
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- iwl4965_rs_set_expected_tpt_table(lq_sta,
- search_tbl);
- goto out;
- }
- break;
- case IWL_LEGACY_SWITCH_SISO:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to SISO\n");
-
- /* Set up search table to try SISO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta, conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
-
- break;
- case IWL_LEGACY_SWITCH_MIMO2_AB:
- case IWL_LEGACY_SWITCH_MIMO2_AC:
- case IWL_LEGACY_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: Legacy switch to MIMO2\n");
-
- /* Set up search table to try MIMO */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_LEGACY_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret) {
- lq_sta->action_counter = 0;
- goto out;
- }
- break;
- }
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
-
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
-out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_LEGACY_SWITCH_MIMO2_BC)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
- return 0;
-
-}
-
-/*
- * Try to switch to new modulation mode from SISO
- */
-static int iwl4965_rs_move_siso_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- u8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
-
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_SISO_SWITCH_ANTENNA1:
- case IWL_SISO_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle Antenna\n");
- if ((tbl->action == IWL_SISO_SWITCH_ANTENNA1 &&
- tx_chains_num <= 1) ||
- (tbl->action == IWL_SISO_SWITCH_ANTENNA2 &&
- tx_chains_num <= 2))
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_SISO_SWITCH_MIMO2_AB:
- case IWL_SISO_SWITCH_MIMO2_AC:
- case IWL_SISO_SWITCH_MIMO2_BC:
- IWL_DEBUG_RATE(priv, "LQ: SISO switch to MIMO2\n");
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = 0;
-
- if (tbl->action == IWL_SISO_SWITCH_MIMO2_AB)
- search_tbl->ant_type = ANT_AB;
- else if (tbl->action == IWL_SISO_SWITCH_MIMO2_AC)
- search_tbl->ant_type = ANT_AC;
- else
- search_tbl->ant_type = ANT_BC;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_mimo2(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
- break;
- case IWL_SISO_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: SISO toggle SGI/NGI\n");
-
- memcpy(search_tbl, tbl, sz);
- if (is_green) {
- if (!tbl->is_SGI)
- break;
- else
- IWL_ERR(priv,
- "SGI was set in GF+SISO\n");
- }
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
- }
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
-
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_SISO_SWITCH_GI)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-}
-
-/*
- * Try to switch to new modulation mode from MIMO2
- */
-static int iwl4965_rs_move_mimo2_to_other(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta, int index)
-{
- s8 is_green = lq_sta->is_green;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- struct iwl_scale_tbl_info *search_tbl =
- &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- struct iwl_rate_scale_data *window = &(tbl->win[index]);
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- u32 sz = (sizeof(struct iwl_scale_tbl_info) -
- (sizeof(struct iwl_rate_scale_data) * IWL_RATE_COUNT));
- u8 start_action;
- u8 valid_tx_ant = priv->hw_params.valid_tx_ant;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
- u8 update_search_tbl_counter = 0;
- int ret;
-
- start_action = tbl->action;
- for (;;) {
- lq_sta->action_counter++;
- switch (tbl->action) {
- case IWL_MIMO2_SWITCH_ANTENNA1:
- case IWL_MIMO2_SWITCH_ANTENNA2:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle Antennas\n");
-
- if (tx_chains_num <= 2)
- break;
-
- if (window->success_ratio >= IWL_RS_GOOD_RATIO)
- break;
-
- memcpy(search_tbl, tbl, sz);
- if (iwl4965_rs_toggle_antenna(valid_tx_ant,
- &search_tbl->current_rate, search_tbl)) {
- update_search_tbl_counter = 1;
- goto out;
- }
- break;
- case IWL_MIMO2_SWITCH_SISO_A:
- case IWL_MIMO2_SWITCH_SISO_B:
- case IWL_MIMO2_SWITCH_SISO_C:
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 switch to SISO\n");
-
- /* Set up new search table for SISO */
- memcpy(search_tbl, tbl, sz);
-
- if (tbl->action == IWL_MIMO2_SWITCH_SISO_A)
- search_tbl->ant_type = ANT_A;
- else if (tbl->action == IWL_MIMO2_SWITCH_SISO_B)
- search_tbl->ant_type = ANT_B;
- else
- search_tbl->ant_type = ANT_C;
-
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant,
- search_tbl->ant_type))
- break;
-
- ret = iwl4965_rs_switch_to_siso(priv, lq_sta,
- conf, sta,
- search_tbl, index);
- if (!ret)
- goto out;
-
- break;
-
- case IWL_MIMO2_SWITCH_GI:
- if (!tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_20))
- break;
- if (tbl->is_ht40 && !(ht_cap->cap &
- IEEE80211_HT_CAP_SGI_40))
- break;
-
- IWL_DEBUG_RATE(priv, "LQ: MIMO2 toggle SGI/NGI\n");
-
- /* Set up new search table for MIMO2 */
- memcpy(search_tbl, tbl, sz);
- search_tbl->is_SGI = !tbl->is_SGI;
- iwl4965_rs_set_expected_tpt_table(lq_sta, search_tbl);
- /*
- * If active table already uses the fastest possible
- * modulation (dual stream with short guard interval),
- * and it's working well, there's no need to look
- * for a better type of modulation!
- */
- if (tbl->is_SGI) {
- s32 tpt = lq_sta->last_tpt / 100;
- if (tpt >= search_tbl->expected_tpt[index])
- break;
- }
- search_tbl->current_rate =
- iwl4965_rate_n_flags_from_tbl(priv, search_tbl,
- index, is_green);
- update_search_tbl_counter = 1;
- goto out;
-
- }
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
-
- if (tbl->action == start_action)
- break;
- }
- search_tbl->lq_type = LQ_NONE;
- return 0;
- out:
- lq_sta->search_better_tbl = 1;
- tbl->action++;
- if (tbl->action > IWL_MIMO2_SWITCH_GI)
- tbl->action = IWL_MIMO2_SWITCH_ANTENNA1;
- if (update_search_tbl_counter)
- search_tbl->action = tbl->action;
-
- return 0;
-
-}
-
-/*
- * Check whether we should continue using same modulation mode, or
- * begin search for a new mode, based on:
- * 1) # tx successes or failures while using this mode
- * 2) # times calling this function
- * 3) elapsed time in this mode (not used, for now)
- */
-static void
-iwl4965_rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
-{
- struct iwl_scale_tbl_info *tbl;
- int i;
- int active_tbl;
- int flush_interval_passed = 0;
- struct iwl_priv *priv;
-
- priv = lq_sta->drv;
- active_tbl = lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* If we've been disallowing search, see if we should now allow it */
- if (lq_sta->stay_in_tbl) {
-
- /* Elapsed time using current modulation mode */
- if (lq_sta->flush_timer)
- flush_interval_passed =
- time_after(jiffies,
- (unsigned long)(lq_sta->flush_timer +
- IWL_RATE_SCALE_FLUSH_INTVL));
-
- /*
- * Check if we should allow search for new modulation mode.
- * If many frames have failed or succeeded, or we've used
- * this same modulation for a long time, allow search, and
- * reset history stats that keep track of whether we should
- * allow a new search. Also (below) reset all bitmaps and
- * stats in active history.
- */
- if (force_search ||
- (lq_sta->total_failed > lq_sta->max_failure_limit) ||
- (lq_sta->total_success > lq_sta->max_success_limit) ||
- ((!lq_sta->search_better_tbl) && (lq_sta->flush_timer)
- && (flush_interval_passed))) {
- IWL_DEBUG_RATE(priv, "LQ: stay is expired %d %d %d\n:",
- lq_sta->total_failed,
- lq_sta->total_success,
- flush_interval_passed);
-
- /* Allow search for new mode */
- lq_sta->stay_in_tbl = 0; /* only place reset */
- lq_sta->total_failed = 0;
- lq_sta->total_success = 0;
- lq_sta->flush_timer = 0;
-
- /*
- * Else if we've used this modulation mode enough repetitions
- * (regardless of elapsed time or success/failure), reset
- * history bitmaps and rate-specific stats for all rates in
- * active table.
- */
- } else {
- lq_sta->table_count++;
- if (lq_sta->table_count >=
- lq_sta->table_count_limit) {
- lq_sta->table_count = 0;
-
- IWL_DEBUG_RATE(priv,
- "LQ: stay in table clear win\n");
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-
- /* If transitioning to allow "search", reset all history
- * bitmaps and stats in active table (this will become the new
- * "search" table). */
- if (!lq_sta->stay_in_tbl) {
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
- }
- }
-}
-
-/*
- * setup rate table in uCode
- * return rate_n_flags as used in the table
- */
-static u32 iwl4965_rs_update_rate_tbl(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_lq_sta *lq_sta,
- struct iwl_scale_tbl_info *tbl,
- int index, u8 is_green)
-{
- u32 rate;
-
- /* Update uCode's rate table. */
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, index, is_green);
- iwl4965_rs_fill_link_cmd(priv, lq_sta, rate);
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_ASYNC, false);
-
- return rate;
-}
-
-/*
- * Do rate scaling and search for new modulation mode.
- */
-static void iwl4965_rs_rate_scale_perform(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- int low = IWL_RATE_INVALID;
- int high = IWL_RATE_INVALID;
- int index;
- int i;
- struct iwl_rate_scale_data *window = NULL;
- int current_tpt = IWL_INVALID_VALUE;
- int low_tpt = IWL_INVALID_VALUE;
- int high_tpt = IWL_INVALID_VALUE;
- u32 fail_count;
- s8 scale_action = 0;
- u16 rate_mask;
- u8 update_lq = 0;
- struct iwl_scale_tbl_info *tbl, *tbl1;
- u16 rate_scale_index_msk = 0;
- u32 rate;
- u8 is_green = 0;
- u8 active_tbl = 0;
- u8 done_search = 0;
- u16 high_low;
- s32 sr;
- u8 tid = MAX_TID_COUNT;
- struct iwl_tid_data *tid_data;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- IWL_DEBUG_RATE(priv, "rate scale calculate new rate for skb\n");
-
- /* Send management frames and NO_ACK data using lowest rate. */
- /* TODO: this could probably be improved.. */
- if (!ieee80211_is_data(hdr->frame_control) ||
- info->flags & IEEE80211_TX_CTL_NO_ACK)
- return;
-
- if (!sta || !lq_sta)
- return;
-
- lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
-
- tid = iwl4965_rs_tl_add_packet(lq_sta, hdr);
- if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF)
- lq_sta->is_agg = 0;
- else
- lq_sta->is_agg = 1;
- } else
- lq_sta->is_agg = 0;
-
- /*
- * Select rate-scale / modulation-mode table to work with in
- * the rest of this function: "search" if searching for better
- * modulation mode, or "active" if doing rate scaling within a mode.
- */
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
- if (is_legacy(tbl->lq_type))
- lq_sta->is_green = 0;
- else
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- is_green = lq_sta->is_green;
-
- /* current tx rate */
- index = lq_sta->last_txrate_idx;
-
- IWL_DEBUG_RATE(priv, "Rate scale index %d for type %d\n", index,
- tbl->lq_type);
-
- /* rates available for this association, and for modulation mode */
- rate_mask = iwl4965_rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
-
- IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
-
- /* mask with station rate restriction */
- if (is_legacy(tbl->lq_type)) {
- if (lq_sta->band == IEEE80211_BAND_5GHZ)
- /* supp_rates has no CCK bits in A mode */
- rate_scale_index_msk = (u16) (rate_mask &
- (lq_sta->supp_rates << IWL_FIRST_OFDM_RATE));
- else
- rate_scale_index_msk = (u16) (rate_mask &
- lq_sta->supp_rates);
-
- } else
- rate_scale_index_msk = rate_mask;
-
- if (!rate_scale_index_msk)
- rate_scale_index_msk = rate_mask;
-
- if (!((1 << index) & rate_scale_index_msk)) {
- IWL_ERR(priv, "Current Rate is not valid\n");
- if (lq_sta->search_better_tbl) {
- /* revert to active table if search table is not valid*/
- tbl->lq_type = LQ_NONE;
- lq_sta->search_better_tbl = 0;
- tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
- /* get "active" rate info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
- }
- return;
- }
-
- /* Get expected throughput table and history window for current rate */
- if (!tbl->expected_tpt) {
- IWL_ERR(priv, "tbl->expected_tpt is NULL\n");
- return;
- }
-
- /* force user max rate if set by user */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < index)) {
- index = lq_sta->max_rate_idx;
- update_lq = 1;
- window = &(tbl->win[index]);
- goto lq_update;
- }
-
- window = &(tbl->win[index]);
-
- /*
- * If there is not enough history to calculate actual average
- * throughput, keep analyzing results of more tx frames, without
- * changing rate or mode (bypass most of the rest of this function).
- * Set up new rate table in uCode only if old rate is not supported
- * in current association (use new rate found above).
- */
- fail_count = window->counter - window->success_counter;
- if ((fail_count < IWL_RATE_MIN_FAILURE_TH) &&
- (window->success_counter < IWL_RATE_MIN_SUCCESS_TH)) {
- IWL_DEBUG_RATE(priv, "LQ: still below TH. succ=%d total=%d "
- "for index %d\n",
- window->success_counter, window->counter, index);
-
- /* Can't calculate this yet; not enough history */
- window->average_tpt = IWL_INVALID_VALUE;
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- goto out;
- }
- /* Else we have enough samples; calculate estimate of
- * actual average throughput */
- if (window->average_tpt != ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128)) {
- IWL_ERR(priv,
- "expected_tpt should have been calculated by now\n");
- window->average_tpt = ((window->success_ratio *
- tbl->expected_tpt[index] + 64) / 128);
- }
-
- /* If we are searching for better modulation mode, check success. */
- if (lq_sta->search_better_tbl) {
- /* If good success, continue using the "search" mode;
- * no need to send new link quality command, since we're
- * continuing to use the setup that we've been trying. */
- if (window->average_tpt > lq_sta->last_tpt) {
-
- IWL_DEBUG_RATE(priv, "LQ: SWITCHING TO NEW TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- if (!is_legacy(tbl->lq_type))
- lq_sta->enable_counter = 1;
-
- /* Swap tables; "search" becomes "active" */
- lq_sta->active_tbl = active_tbl;
- current_tpt = window->average_tpt;
-
- /* Else poor success; go back to mode in "active" table */
- } else {
-
- IWL_DEBUG_RATE(priv, "LQ: GOING BACK TO THE OLD TABLE "
- "suc=%d cur-tpt=%d old-tpt=%d\n",
- window->success_ratio,
- window->average_tpt,
- lq_sta->last_tpt);
-
- /* Nullify "search" table */
- tbl->lq_type = LQ_NONE;
-
- /* Revert to "active" table */
- active_tbl = lq_sta->active_tbl;
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- /* Revert to "active" rate and throughput info */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
- current_tpt = lq_sta->last_tpt;
-
- /* Need to set up a new rate table in uCode */
- update_lq = 1;
- }
-
- /* Either way, we've made a decision; modulation mode
- * search is done, allow rate adjustment next time. */
- lq_sta->search_better_tbl = 0;
- done_search = 1; /* Don't switch modes below! */
- goto lq_update;
- }
-
- /* (Else) not in search of better modulation mode, try for better
- * starting rate, while staying in this mode. */
- high_low = iwl4965_rs_get_adjacent_rate(priv, index,
- rate_scale_index_msk,
- tbl->lq_type);
- low = high_low & 0xff;
- high = (high_low >> 8) & 0xff;
-
- /* If user set max rate, dont allow higher than user constrain */
- if ((lq_sta->max_rate_idx != -1) &&
- (lq_sta->max_rate_idx < high))
- high = IWL_RATE_INVALID;
-
- sr = window->success_ratio;
-
- /* Collect measured throughputs for current and adjacent rates */
- current_tpt = window->average_tpt;
- if (low != IWL_RATE_INVALID)
- low_tpt = tbl->win[low].average_tpt;
- if (high != IWL_RATE_INVALID)
- high_tpt = tbl->win[high].average_tpt;
-
- scale_action = 0;
-
- /* Too many failures, decrease rate */
- if ((sr <= IWL_RATE_DECREASE_TH) || (current_tpt == 0)) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low success_ratio\n");
- scale_action = -1;
-
- /* No throughput measured yet for adjacent rates; try increase. */
- } else if ((low_tpt == IWL_INVALID_VALUE) &&
- (high_tpt == IWL_INVALID_VALUE)) {
-
- if (high != IWL_RATE_INVALID && sr >= IWL_RATE_INCREASE_TH)
- scale_action = 1;
- else if (low != IWL_RATE_INVALID)
- scale_action = 0;
- }
-
- /* Both adjacent throughputs are measured, but neither one has better
- * throughput; we're using the best rate, don't change it! */
- else if ((low_tpt != IWL_INVALID_VALUE) &&
- (high_tpt != IWL_INVALID_VALUE) &&
- (low_tpt < current_tpt) &&
- (high_tpt < current_tpt))
- scale_action = 0;
-
- /* At least one adjacent rate's throughput is measured,
- * and may have better performance. */
- else {
- /* Higher adjacent rate's throughput is measured */
- if (high_tpt != IWL_INVALID_VALUE) {
- /* Higher rate has better throughput */
- if (high_tpt > current_tpt &&
- sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- } else {
- scale_action = 0;
- }
-
- /* Lower adjacent rate's throughput is measured */
- } else if (low_tpt != IWL_INVALID_VALUE) {
- /* Lower rate has better throughput */
- if (low_tpt > current_tpt) {
- IWL_DEBUG_RATE(priv,
- "decrease rate because of low tpt\n");
- scale_action = -1;
- } else if (sr >= IWL_RATE_INCREASE_TH) {
- scale_action = 1;
- }
- }
- }
-
- /* Sanity check; asked for decrease, but success rate or throughput
- * has been good at old rate. Don't change it. */
- if ((scale_action == -1) && (low != IWL_RATE_INVALID) &&
- ((sr > IWL_RATE_HIGH_TH) ||
- (current_tpt > (100 * tbl->expected_tpt[low]))))
- scale_action = 0;
-
- switch (scale_action) {
- case -1:
- /* Decrease starting rate, update uCode's rate table */
- if (low != IWL_RATE_INVALID) {
- update_lq = 1;
- index = low;
- }
-
- break;
- case 1:
- /* Increase starting rate, update uCode's rate table */
- if (high != IWL_RATE_INVALID) {
- update_lq = 1;
- index = high;
- }
-
- break;
- case 0:
- /* No change */
- default:
- break;
- }
-
- IWL_DEBUG_RATE(priv, "choose rate scale index %d action %d low %d "
- "high %d type %d\n",
- index, scale_action, low, high, tbl->lq_type);
-
-lq_update:
- /* Replace uCode's rate table for the destination station. */
- if (update_lq)
- rate = iwl4965_rs_update_rate_tbl(priv, ctx, lq_sta,
- tbl, index, is_green);
-
- /* Should we stay with this modulation mode,
- * or search for a new one? */
- iwl4965_rs_stay_in_table(lq_sta, false);
-
- /*
- * Search for new modulation mode if we're:
- * 1) Not changing rates right now
- * 2) Not just finishing up a search
- * 3) Allowing a new search
- */
- if (!update_lq && !done_search &&
- !lq_sta->stay_in_tbl && window->counter) {
- /* Save current throughput to compare with "search" throughput*/
- lq_sta->last_tpt = current_tpt;
-
- /* Select a new "search" modulation mode to try.
- * If one is found, set up the new "search" table. */
- if (is_legacy(tbl->lq_type))
- iwl4965_rs_move_legacy_other(priv, lq_sta,
- conf, sta, index);
- else if (is_siso(tbl->lq_type))
- iwl4965_rs_move_siso_to_other(priv, lq_sta,
- conf, sta, index);
- else /* (is_mimo2(tbl->lq_type)) */
- iwl4965_rs_move_mimo2_to_other(priv, lq_sta,
- conf, sta, index);
-
- /* If new "search" mode was selected, set up in uCode table */
- if (lq_sta->search_better_tbl) {
- /* Access the "search" table, clear its history. */
- tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &(tbl->win[i]));
-
- /* Use new "search" start rate */
- index = iwl4965_hwrate_to_plcp_idx(tbl->current_rate);
-
- IWL_DEBUG_RATE(priv,
- "Switch current mcs: %X index: %d\n",
- tbl->current_rate, index);
- iwl4965_rs_fill_link_cmd(priv, lq_sta,
- tbl->current_rate);
- iwl_legacy_send_lq_cmd(priv, ctx,
- &lq_sta->lq, CMD_ASYNC, false);
- } else
- done_search = 1;
- }
-
- if (done_search && !lq_sta->stay_in_tbl) {
- /* If the "active" (non-search) mode was legacy,
- * and we've tried switching antennas,
- * but we haven't been able to try HT modes (not available),
- * stay with best antenna legacy modulation for a while
- * before next round of mode comparisons. */
- tbl1 = &(lq_sta->lq_info[lq_sta->active_tbl]);
- if (is_legacy(tbl1->lq_type) && !conf_is_ht(conf) &&
- lq_sta->action_counter > tbl1->max_search) {
- IWL_DEBUG_RATE(priv, "LQ: STAY in legacy table\n");
- iwl4965_rs_set_stay_in_table(priv, 1, lq_sta);
- }
-
- /* If we're in an HT mode, and all 3 mode switch actions
- * have been tried and compared, stay in this best modulation
- * mode for a while before next round of mode comparisons. */
- if (lq_sta->enable_counter &&
- (lq_sta->action_counter >= tbl1->max_search)) {
- if ((lq_sta->last_tpt > IWL_AGG_TPT_THREHOLD) &&
- (lq_sta->tx_agg_tid_en & (1 << tid)) &&
- (tid != MAX_TID_COUNT)) {
- tid_data =
- &priv->stations[lq_sta->lq.sta_id].tid[tid];
- if (tid_data->agg.state == IWL_AGG_OFF) {
- IWL_DEBUG_RATE(priv,
- "try to aggregate tid %d\n",
- tid);
- iwl4965_rs_tl_turn_on_agg(priv, tid,
- lq_sta, sta);
- }
- }
- iwl4965_rs_set_stay_in_table(priv, 0, lq_sta);
- }
- }
-
-out:
- tbl->current_rate = iwl4965_rate_n_flags_from_tbl(priv, tbl,
- index, is_green);
- i = index;
- lq_sta->last_txrate_idx = i;
-}
-
-/**
- * iwl4965_rs_initialize_lq - Initialize a station's hardware rate table
- *
- * The uCode's station table contains a table of fallback rates
- * for automatic fallback during transmission.
- *
- * NOTE: This sets up a default set of values. These will be replaced later
- * if the driver's iwl-4965-rs rate scaling algorithm is used, instead of
- * rc80211_simple.
- *
- * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
- * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
- * which requires station table entry to exist).
- */
-static void iwl4965_rs_initialize_lq(struct iwl_priv *priv,
- struct ieee80211_conf *conf,
- struct ieee80211_sta *sta,
- struct iwl_lq_sta *lq_sta)
-{
- struct iwl_scale_tbl_info *tbl;
- int rate_idx;
- int i;
- u32 rate;
- u8 use_green = iwl4965_rs_use_green(sta);
- u8 active_tbl = 0;
- u8 valid_tx_ant;
- struct iwl_station_priv *sta_priv;
- struct iwl_rxon_context *ctx;
-
- if (!sta || !lq_sta)
- return;
-
- sta_priv = (void *)sta->drv_priv;
- ctx = sta_priv->common.ctx;
-
- i = lq_sta->last_txrate_idx;
-
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- if (!lq_sta->search_better_tbl)
- active_tbl = lq_sta->active_tbl;
- else
- active_tbl = 1 - lq_sta->active_tbl;
-
- tbl = &(lq_sta->lq_info[active_tbl]);
-
- if ((i < 0) || (i >= IWL_RATE_COUNT))
- i = 0;
-
- rate = iwlegacy_rates[i].plcp;
- tbl->ant_type = iwl4965_first_antenna(valid_tx_ant);
- rate |= tbl->ant_type << RATE_MCS_ANT_POS;
-
- if (i >= IWL_FIRST_CCK_RATE && i <= IWL_LAST_CCK_RATE)
- rate |= RATE_MCS_CCK_MSK;
-
- iwl4965_rs_get_tbl_info_from_mcs(rate, priv->band, tbl, &rate_idx);
- if (!iwl4965_rs_is_valid_ant(valid_tx_ant, tbl->ant_type))
- iwl4965_rs_toggle_antenna(valid_tx_ant, &rate, tbl);
-
- rate = iwl4965_rate_n_flags_from_tbl(priv, tbl, rate_idx, use_green);
- tbl->current_rate = rate;
- iwl4965_rs_set_expected_tpt_table(lq_sta, tbl);
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, rate);
- priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
- iwl_legacy_send_lq_cmd(priv, ctx, &lq_sta->lq, CMD_SYNC, true);
-}
-
-static void
-iwl4965_rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
-
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_supported_band *sband = txrc->sband;
- struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl_lq_sta *lq_sta = priv_sta;
- int rate_idx;
-
- IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n");
-
- /* Get max rate if user set max rate */
- if (lq_sta) {
- lq_sta->max_rate_idx = txrc->max_rate_idx;
- if ((sband->band == IEEE80211_BAND_5GHZ) &&
- (lq_sta->max_rate_idx != -1))
- lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE;
- if ((lq_sta->max_rate_idx < 0) ||
- (lq_sta->max_rate_idx >= IWL_RATE_COUNT))
- lq_sta->max_rate_idx = -1;
- }
-
- /* Treat uninitialized rate scaling data same as non-existing. */
- if (lq_sta && !lq_sta->drv) {
- IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
- priv_sta = NULL;
- }
-
- /* Send management frames and NO_ACK data using lowest rate. */
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- if (!lq_sta)
- return;
-
- rate_idx = lq_sta->last_txrate_idx;
-
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
- rate_idx -= IWL_FIRST_OFDM_RATE;
- /* 6M and 9M shared same MCS index */
- rate_idx = (rate_idx > 0) ? (rate_idx - 1) : 0;
- if (iwl4965_rs_extract_rate(lq_sta->last_rate_n_flags) >=
- IWL_RATE_MIMO2_6M_PLCP)
- rate_idx = rate_idx + MCS_INDEX_PER_STREAM;
- info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
- if (lq_sta->last_rate_n_flags & RATE_MCS_SGI_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_SHORT_GI;
- if (lq_sta->last_rate_n_flags & RATE_MCS_DUP_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_DUP_DATA;
- if (lq_sta->last_rate_n_flags & RATE_MCS_HT40_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (lq_sta->last_rate_n_flags & RATE_MCS_GF_MSK)
- info->control.rates[0].flags |=
- IEEE80211_TX_RC_GREEN_FIELD;
- } else {
- /* Check for invalid rates */
- if ((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT_LEGACY) ||
- ((sband->band == IEEE80211_BAND_5GHZ) &&
- (rate_idx < IWL_FIRST_OFDM_RATE)))
- rate_idx = rate_lowest_index(sband, sta);
- /* On valid 5 GHz rate, adjust index */
- else if (sband->band == IEEE80211_BAND_5GHZ)
- rate_idx -= IWL_FIRST_OFDM_RATE;
- info->control.rates[0].flags = 0;
- }
- info->control.rates[0].idx = rate_idx;
-
-}
-
-static void *iwl4965_rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
- gfp_t gfp)
-{
- struct iwl_lq_sta *lq_sta;
- struct iwl_station_priv *sta_priv =
- (struct iwl_station_priv *) sta->drv_priv;
- struct iwl_priv *priv;
-
- priv = (struct iwl_priv *)priv_rate;
- IWL_DEBUG_RATE(priv, "create station rate scale window\n");
-
- lq_sta = &sta_priv->lq_sta;
-
- return lq_sta;
-}
-
-/*
- * Called after adding a new station to initialize rate scaling
- */
-void
-iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta,
- u8 sta_id)
-{
- int i, j;
- struct ieee80211_hw *hw = priv->hw;
- struct ieee80211_conf *conf = &priv->hw->conf;
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- struct iwl_station_priv *sta_priv;
- struct iwl_lq_sta *lq_sta;
- struct ieee80211_supported_band *sband;
-
- sta_priv = (struct iwl_station_priv *) sta->drv_priv;
- lq_sta = &sta_priv->lq_sta;
- sband = hw->wiphy->bands[conf->channel->band];
-
-
- lq_sta->lq.sta_id = sta_id;
-
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- lq_sta->flush_timer = 0;
- lq_sta->supp_rates = sta->supp_rates[sband->band];
- for (j = 0; j < LQ_SIZE; j++)
- for (i = 0; i < IWL_RATE_COUNT; i++)
- iwl4965_rs_rate_scale_clear_window(
- &lq_sta->lq_info[j].win[i]);
-
- IWL_DEBUG_RATE(priv, "LQ:"
- "*** rate scale station global init for station %d ***\n",
- sta_id);
- /* TODO: what is a good starting rate for STA? About middle? Maybe not
- * the lowest or the highest rate.. Could consider using RSSI from
- * previous packets? Need to have IEEE 802.1X auth succeed immediately
- * after assoc.. */
-
- lq_sta->is_dup = 0;
- lq_sta->max_rate_idx = -1;
- lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
- lq_sta->is_green = iwl4965_rs_use_green(sta);
- lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
- lq_sta->band = priv->band;
- /*
- * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
- * supp_rates[] does not; shift to convert format, force 9 MBits off.
- */
- lq_sta->active_siso_rate = ht_cap->mcs.rx_mask[0] << 1;
- lq_sta->active_siso_rate |= ht_cap->mcs.rx_mask[0] & 0x1;
- lq_sta->active_siso_rate &= ~((u16)0x2);
- lq_sta->active_siso_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* Same here */
- lq_sta->active_mimo2_rate = ht_cap->mcs.rx_mask[1] << 1;
- lq_sta->active_mimo2_rate |= ht_cap->mcs.rx_mask[1] & 0x1;
- lq_sta->active_mimo2_rate &= ~((u16)0x2);
- lq_sta->active_mimo2_rate <<= IWL_FIRST_OFDM_RATE;
-
- /* These values will be overridden later */
- lq_sta->lq.general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!lq_sta->lq.general_params.dual_stream_ant_msk) {
- lq_sta->lq.general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- lq_sta->lq.general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- /* as default allow aggregation for all tids */
- lq_sta->tx_agg_tid_en = IWL_AGG_ALL_TID;
- lq_sta->drv = priv;
-
- /* Set last_txrate_idx to lowest rate */
- lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
- if (sband->band == IEEE80211_BAND_5GHZ)
- lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
- lq_sta->is_agg = 0;
-
-#ifdef CONFIG_MAC80211_DEBUGFS
- lq_sta->dbg_fixed_rate = 0;
-#endif
-
- iwl4965_rs_initialize_lq(priv, conf, sta, lq_sta);
-}
-
-static void iwl4965_rs_fill_link_cmd(struct iwl_priv *priv,
- struct iwl_lq_sta *lq_sta, u32 new_rate)
-{
- struct iwl_scale_tbl_info tbl_type;
- int index = 0;
- int rate_idx;
- int repeat_rate = 0;
- u8 ant_toggle_cnt = 0;
- u8 use_ht_possible = 1;
- u8 valid_tx_ant = 0;
- struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
-
- /* Override starting rate (index 0) if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Interpret new_rate (rate_n_flags) */
- iwl4965_rs_get_tbl_info_from_mcs(new_rate, lq_sta->band,
- &tbl_type, &rate_idx);
-
- /* How many times should we repeat the initial rate? */
- if (is_legacy(tbl_type.lq_type)) {
- ant_toggle_cnt = 1;
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- lq_cmd->general_params.mimo_delimiter =
- is_mimo(tbl_type.lq_type) ? 1 : 0;
-
- /* Fill 1st table entry (index 0) */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- if (iwl4965_num_of_ant(tbl_type.ant_type) == 1) {
- lq_cmd->general_params.single_stream_ant_msk =
- tbl_type.ant_type;
- } else if (iwl4965_num_of_ant(tbl_type.ant_type) == 2) {
- lq_cmd->general_params.dual_stream_ant_msk =
- tbl_type.ant_type;
- } /* otherwise we don't modify the existing value */
-
- index++;
- repeat_rate--;
- if (priv)
- valid_tx_ant = priv->hw_params.valid_tx_ant;
-
- /* Fill rest of rate table */
- while (index < LINK_QUAL_MAX_RETRY_NUM) {
- /* Repeat initial/next rate.
- * For legacy IWL_NUMBER_TRY == 1, this loop will not execute.
- * For HT IWL_HT_NUMBER_TRY == 3, this executes twice. */
- while (repeat_rate > 0 && (index < LINK_QUAL_MAX_RETRY_NUM)) {
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
- }
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags =
- cpu_to_le32(new_rate);
- repeat_rate--;
- index++;
- }
-
- iwl4965_rs_get_tbl_info_from_mcs(new_rate,
- lq_sta->band, &tbl_type,
- &rate_idx);
-
- /* Indicate to uCode which entries might be MIMO.
- * If initial rate was MIMO, this will finally end up
- * as (IWL_HT_NUMBER_TRY * 2), after 2nd pass, otherwise 0. */
- if (is_mimo(tbl_type.lq_type))
- lq_cmd->general_params.mimo_delimiter = index;
-
- /* Get next rate */
- new_rate = iwl4965_rs_get_lower_rate(lq_sta,
- &tbl_type, rate_idx,
- use_ht_possible);
-
- /* How many times should we repeat the next rate? */
- if (is_legacy(tbl_type.lq_type)) {
- if (ant_toggle_cnt < NUM_TRY_BEFORE_ANT_TOGGLE)
- ant_toggle_cnt++;
- else if (priv &&
- iwl4965_rs_toggle_antenna(valid_tx_ant,
- &new_rate, &tbl_type))
- ant_toggle_cnt = 1;
-
- repeat_rate = IWL_NUMBER_TRY;
- } else {
- repeat_rate = IWL_HT_NUMBER_TRY;
- }
-
- /* Don't allow HT rates after next pass.
- * iwl4965_rs_get_lower_rate() will change type to LQ_A or LQ_G. */
- use_ht_possible = 0;
-
- /* Override next rate if needed for debug purposes */
- iwl4965_rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
-
- /* Fill next table entry */
- lq_cmd->rs_table[index].rate_n_flags = cpu_to_le32(new_rate);
-
- index++;
- repeat_rate--;
- }
-
- lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
- lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
-
- lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-}
-
-static void
-*iwl4965_rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-/* rate scale requires free function to be implemented */
-static void iwl4965_rs_free(void *priv_rate)
-{
- return;
-}
-
-static void iwl4965_rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct iwl_priv *priv __maybe_unused = priv_r;
-
- IWL_DEBUG_RATE(priv, "enter\n");
- IWL_DEBUG_RATE(priv, "leave\n");
-}
-
-
-#ifdef CONFIG_MAC80211_DEBUGFS
-static int iwl4965_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-static void iwl4965_rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
- u32 *rate_n_flags, int index)
-{
- struct iwl_priv *priv;
- u8 valid_tx_ant;
- u8 ant_sel_tx;
-
- priv = lq_sta->drv;
- valid_tx_ant = priv->hw_params.valid_tx_ant;
- if (lq_sta->dbg_fixed_rate) {
- ant_sel_tx =
- ((lq_sta->dbg_fixed_rate & RATE_MCS_ANT_ABC_MSK)
- >> RATE_MCS_ANT_POS);
- if ((valid_tx_ant & ant_sel_tx) == ant_sel_tx) {
- *rate_n_flags = lq_sta->dbg_fixed_rate;
- IWL_DEBUG_RATE(priv, "Fixed rate ON\n");
- } else {
- lq_sta->dbg_fixed_rate = 0;
- IWL_ERR(priv,
- "Invalid antenna selection 0x%X, Valid is 0x%X\n",
- ant_sel_tx, valid_tx_ant);
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
- } else {
- IWL_DEBUG_RATE(priv, "Fixed rate OFF\n");
- }
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *ppos)
-{
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- char buf[64];
- size_t buf_size;
- u32 parsed_rate;
- struct iwl_station_priv *sta_priv =
- container_of(lq_sta, struct iwl_station_priv, lq_sta);
- struct iwl_rxon_context *ctx = sta_priv->common.ctx;
-
- priv = lq_sta->drv;
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x", &parsed_rate) == 1)
- lq_sta->dbg_fixed_rate = parsed_rate;
- else
- lq_sta->dbg_fixed_rate = 0;
-
- lq_sta->active_legacy_rate = 0x0FFF; /* 1 - 54 MBits, includes CCK */
- lq_sta->active_siso_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
- lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-
- IWL_DEBUG_RATE(priv, "sta_id %d rate 0x%X\n",
- lq_sta->lq.sta_id, lq_sta->dbg_fixed_rate);
-
- if (lq_sta->dbg_fixed_rate) {
- iwl4965_rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
- iwl_legacy_send_lq_cmd(lq_sta->drv, ctx, &lq_sta->lq, CMD_ASYNC,
- false);
- }
-
- return count;
-}
-
-static ssize_t iwl4965_rs_sta_dbgfs_scale_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i = 0;
- int index = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
-
- priv = lq_sta->drv;
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- desc += sprintf(buff+desc, "sta_id %d\n", lq_sta->lq.sta_id);
- desc += sprintf(buff+desc, "failed=%d success=%d rate=0%X\n",
- lq_sta->total_failed, lq_sta->total_success,
- lq_sta->active_legacy_rate);
- desc += sprintf(buff+desc, "fixed rate 0x%X\n",
- lq_sta->dbg_fixed_rate);
- desc += sprintf(buff+desc, "valid_tx_ant %s%s%s\n",
- (priv->hw_params.valid_tx_ant & ANT_A) ? "ANT_A," : "",
- (priv->hw_params.valid_tx_ant & ANT_B) ? "ANT_B," : "",
- (priv->hw_params.valid_tx_ant & ANT_C) ? "ANT_C" : "");
- desc += sprintf(buff+desc, "lq type %s\n",
- (is_legacy(tbl->lq_type)) ? "legacy" : "HT");
- if (is_Ht(tbl->lq_type)) {
- desc += sprintf(buff+desc, " %s",
- (is_siso(tbl->lq_type)) ? "SISO" : "MIMO2");
- desc += sprintf(buff+desc, " %s",
- (tbl->is_ht40) ? "40MHz" : "20MHz");
- desc += sprintf(buff+desc, " %s %s %s\n",
- (tbl->is_SGI) ? "SGI" : "",
- (lq_sta->is_green) ? "GF enabled" : "",
- (lq_sta->is_agg) ? "AGG on" : "");
- }
- desc += sprintf(buff+desc, "last tx rate=0x%X\n",
- lq_sta->last_rate_n_flags);
- desc += sprintf(buff+desc, "general:"
- "flags=0x%X mimo-d=%d s-ant0x%x d-ant=0x%x\n",
- lq_sta->lq.general_params.flags,
- lq_sta->lq.general_params.mimo_delimiter,
- lq_sta->lq.general_params.single_stream_ant_msk,
- lq_sta->lq.general_params.dual_stream_ant_msk);
-
- desc += sprintf(buff+desc, "agg:"
- "time_limit=%d dist_start_th=%d frame_cnt_limit=%d\n",
- le16_to_cpu(lq_sta->lq.agg_params.agg_time_limit),
- lq_sta->lq.agg_params.agg_dis_start_th,
- lq_sta->lq.agg_params.agg_frame_cnt_limit);
-
- desc += sprintf(buff+desc,
- "Start idx [0]=0x%x [1]=0x%x [2]=0x%x [3]=0x%x\n",
- lq_sta->lq.general_params.start_rate_index[0],
- lq_sta->lq.general_params.start_rate_index[1],
- lq_sta->lq.general_params.start_rate_index[2],
- lq_sta->lq.general_params.start_rate_index[3]);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- index = iwl4965_hwrate_to_plcp_idx(
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags));
- if (is_legacy(tbl->lq_type)) {
- desc += sprintf(buff+desc, " rate[%d] 0x%X %smbps\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps);
- } else {
- desc += sprintf(buff+desc,
- " rate[%d] 0x%X %smbps (%s)\n",
- i,
- le32_to_cpu(lq_sta->lq.rs_table[i].rate_n_flags),
- iwl_rate_mcs[index].mbps, iwl_rate_mcs[index].mcs);
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_scale_table_ops = {
- .write = iwl4965_rs_sta_dbgfs_scale_table_write,
- .read = iwl4965_rs_sta_dbgfs_scale_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-static ssize_t iwl4965_rs_sta_dbgfs_stats_table_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char *buff;
- int desc = 0;
- int i, j;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
-
- buff = kmalloc(1024, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
- for (i = 0; i < LQ_SIZE; i++) {
- desc += sprintf(buff+desc,
- "%s type=%d SGI=%d HT40=%d DUP=%d GF=%d\n"
- "rate=0x%X\n",
- lq_sta->active_tbl == i ? "*" : "x",
- lq_sta->lq_info[i].lq_type,
- lq_sta->lq_info[i].is_SGI,
- lq_sta->lq_info[i].is_ht40,
- lq_sta->lq_info[i].is_dup,
- lq_sta->is_green,
- lq_sta->lq_info[i].current_rate);
- for (j = 0; j < IWL_RATE_COUNT; j++) {
- desc += sprintf(buff+desc,
- "counter=%d success=%d %%=%d\n",
- lq_sta->lq_info[i].win[j].counter,
- lq_sta->lq_info[i].win[j].success_counter,
- lq_sta->lq_info[i].win[j].success_ratio);
- }
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- kfree(buff);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_stats_table_ops = {
- .read = iwl4965_rs_sta_dbgfs_stats_table_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static ssize_t iwl4965_rs_sta_dbgfs_rate_scale_data_read(struct file *file,
- char __user *user_buf, size_t count, loff_t *ppos)
-{
- char buff[120];
- int desc = 0;
- ssize_t ret;
-
- struct iwl_lq_sta *lq_sta = file->private_data;
- struct iwl_priv *priv;
- struct iwl_scale_tbl_info *tbl = &lq_sta->lq_info[lq_sta->active_tbl];
-
- priv = lq_sta->drv;
-
- if (is_Ht(tbl->lq_type))
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- tbl->expected_tpt[lq_sta->last_txrate_idx]);
- else
- desc += sprintf(buff+desc,
- "Bit Rate= %d Mb/s\n",
- iwlegacy_rates[lq_sta->last_txrate_idx].ieee >> 1);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
- return ret;
-}
-
-static const struct file_operations rs_sta_dbgfs_rate_scale_data_ops = {
- .read = iwl4965_rs_sta_dbgfs_rate_scale_data_read,
- .open = iwl4965_open_file_generic,
- .llseek = default_llseek,
-};
-
-static void iwl4965_rs_add_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- lq_sta->rs_sta_dbgfs_scale_table_file =
- debugfs_create_file("rate_scale_table", S_IRUSR | S_IWUSR, dir,
- lq_sta, &rs_sta_dbgfs_scale_table_ops);
- lq_sta->rs_sta_dbgfs_stats_table_file =
- debugfs_create_file("rate_stats_table", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_stats_table_ops);
- lq_sta->rs_sta_dbgfs_rate_scale_data_file =
- debugfs_create_file("rate_scale_data", S_IRUSR, dir,
- lq_sta, &rs_sta_dbgfs_rate_scale_data_ops);
- lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file =
- debugfs_create_u8("tx_agg_tid_enable", S_IRUSR | S_IWUSR, dir,
- &lq_sta->tx_agg_tid_en);
-
-}
-
-static void iwl4965_rs_remove_debugfs(void *priv, void *priv_sta)
-{
- struct iwl_lq_sta *lq_sta = priv_sta;
- debugfs_remove(lq_sta->rs_sta_dbgfs_scale_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_stats_table_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_rate_scale_data_file);
- debugfs_remove(lq_sta->rs_sta_dbgfs_tx_agg_tid_en_file);
-}
-#endif
-
-/*
- * Initialization of rate scaling information is done by driver after
- * the station is added. Since mac80211 calls this function before a
- * station is added we ignore it.
- */
-static void
-iwl4965_rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta)
-{
-}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
- .name = IWL4965_RS_NAME,
- .tx_status = iwl4965_rs_tx_status,
- .get_rate = iwl4965_rs_get_rate,
- .rate_init = iwl4965_rs_rate_init_stub,
- .alloc = iwl4965_rs_alloc,
- .free = iwl4965_rs_free,
- .alloc_sta = iwl4965_rs_alloc_sta,
- .free_sta = iwl4965_rs_free_sta,
-#ifdef CONFIG_MAC80211_DEBUGFS
- .add_sta_debugfs = iwl4965_rs_add_debugfs,
- .remove_sta_debugfs = iwl4965_rs_remove_debugfs,
-#endif
-};
-
-int iwl4965_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&rs_4965_ops);
-}
-
-void iwl4965_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&rs_4965_ops);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c b/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
deleted file mode 100644
index 2b144bb..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-rx.c
+++ /dev/null
@@ -1,215 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_missed_beacon_notif *missed_beacon;
-
- missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
- priv->missed_beacon_threshold) {
- IWL_DEBUG_CALIB(priv,
- "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consecutive_missed_beacons),
- le32_to_cpu(missed_beacon->total_missed_becons),
- le32_to_cpu(missed_beacon->num_recvd_beacons),
- le32_to_cpu(missed_beacon->num_expected_beacons));
- if (!test_bit(STATUS_SCANNING, &priv->status))
- iwl4965_init_sensitivity(priv);
- }
-}
-
-/* Calculate noise level, based on measurements during network silence just
- * before arriving beacon. This measurement can be done only if we know
- * exactly when to expect beacons, therefore only when we're associated. */
-static void iwl4965_rx_calc_noise(struct iwl_priv *priv)
-{
- struct statistics_rx_non_phy *rx_info;
- int num_active_rx = 0;
- int total_silence = 0;
- int bcn_silence_a, bcn_silence_b, bcn_silence_c;
- int last_rx_noise;
-
- rx_info = &(priv->_4965.statistics.rx.general);
- bcn_silence_a =
- le32_to_cpu(rx_info->beacon_silence_rssi_a) & IN_BAND_FILTER;
- bcn_silence_b =
- le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
- bcn_silence_c =
- le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
-
- if (bcn_silence_a) {
- total_silence += bcn_silence_a;
- num_active_rx++;
- }
- if (bcn_silence_b) {
- total_silence += bcn_silence_b;
- num_active_rx++;
- }
- if (bcn_silence_c) {
- total_silence += bcn_silence_c;
- num_active_rx++;
- }
-
- /* Average among active antennas */
- if (num_active_rx)
- last_rx_noise = (total_silence / num_active_rx) - 107;
- else
- last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
-
- IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
- bcn_silence_a, bcn_silence_b, bcn_silence_c,
- last_rx_noise);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-/*
- * based on the assumption of all statistics counter are in DWORD
- * FIXME: This function is for debugging, do not deal with
- * the case of counters roll-over.
- */
-static void iwl4965_accumulative_statistics(struct iwl_priv *priv,
- __le32 *stats)
-{
- int i, size;
- __le32 *prev_stats;
- u32 *accum_stats;
- u32 *delta, *max_delta;
- struct statistics_general_common *general, *accum_general;
- struct statistics_tx *tx, *accum_tx;
-
- prev_stats = (__le32 *)&priv->_4965.statistics;
- accum_stats = (u32 *)&priv->_4965.accum_statistics;
- size = sizeof(struct iwl_notif_statistics);
- general = &priv->_4965.statistics.general.common;
- accum_general = &priv->_4965.accum_statistics.general.common;
- tx = &priv->_4965.statistics.tx;
- accum_tx = &priv->_4965.accum_statistics.tx;
- delta = (u32 *)&priv->_4965.delta_statistics;
- max_delta = (u32 *)&priv->_4965.max_delta;
-
- for (i = sizeof(__le32); i < size;
- i += sizeof(__le32), stats++, prev_stats++, delta++,
- max_delta++, accum_stats++) {
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
- *delta = (le32_to_cpu(*stats) -
- le32_to_cpu(*prev_stats));
- *accum_stats += *delta;
- if (*delta > *max_delta)
- *max_delta = *delta;
- }
- }
-
- /* reset accumulative statistics for "no-counter" type statistics */
- accum_general->temperature = general->temperature;
- accum_general->ttl_timestamp = general->ttl_timestamp;
-}
-#endif
-
-#define REG_RECALIB_PERIOD (60)
-
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- int change;
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_DEBUG_RX(priv,
- "Statistics notification received (%d vs %d).\n",
- (int)sizeof(struct iwl_notif_statistics),
- le32_to_cpu(pkt->len_n_flags) &
- FH_RSCSR_FRAME_SIZE_MSK);
-
- change = ((priv->_4965.statistics.general.common.temperature !=
- pkt->u.stats.general.common.temperature) ||
- ((priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
- (pkt->u.stats.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- iwl4965_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
-#endif
-
- /* TODO: reading some of statistics is unneeded */
- memcpy(&priv->_4965.statistics, &pkt->u.stats,
- sizeof(priv->_4965.statistics));
-
- set_bit(STATUS_STATISTICS, &priv->status);
-
- /* Reschedule the statistics timer to occur in
- * REG_RECALIB_PERIOD seconds to ensure we get a
- * thermal update even if the uCode doesn't give
- * us one */
- mod_timer(&priv->statistics_periodic, jiffies +
- msecs_to_jiffies(REG_RECALIB_PERIOD * 1000));
-
- if (unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- (pkt->hdr.cmd == STATISTICS_NOTIFICATION)) {
- iwl4965_rx_calc_noise(priv);
- queue_work(priv->workqueue, &priv->run_time_calib_work);
- }
- if (priv->cfg->ops->lib->temp_ops.temperature && change)
- priv->cfg->ops->lib->temp_ops.temperature(priv);
-}
-
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- memset(&priv->_4965.accum_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.delta_statistics, 0,
- sizeof(struct iwl_notif_statistics));
- memset(&priv->_4965.max_delta, 0,
- sizeof(struct iwl_notif_statistics));
-#endif
- IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
- }
- iwl4965_rx_statistics(priv, rxb);
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c b/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
deleted file mode 100644
index a262c23..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-sta.c
+++ /dev/null
@@ -1,721 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-4965.h"
-
-static struct iwl_link_quality_cmd *
-iwl4965_sta_alloc_lq(struct iwl_priv *priv, u8 sta_id)
-{
- int i, r;
- struct iwl_link_quality_cmd *link_cmd;
- u32 rate_flags = 0;
- __le32 rate_n_flags;
-
- link_cmd = kzalloc(sizeof(struct iwl_link_quality_cmd), GFP_KERNEL);
- if (!link_cmd) {
- IWL_ERR(priv, "Unable to allocate memory for LQ cmd.\n");
- return NULL;
- }
- /* Set up the rate scaling to start at selected rate, fall back
- * all the way down to 1M in IEEE order, and then spin on 1M */
- if (priv->band == IEEE80211_BAND_5GHZ)
- r = IWL_RATE_6M_INDEX;
- else
- r = IWL_RATE_1M_INDEX;
-
- if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
- rate_flags |= RATE_MCS_CCK_MSK;
-
- rate_flags |= iwl4965_first_antenna(priv->hw_params.valid_tx_ant) <<
- RATE_MCS_ANT_POS;
- rate_n_flags = iwl4965_hw_set_rate_n_flags(iwlegacy_rates[r].plcp,
- rate_flags);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- link_cmd->rs_table[i].rate_n_flags = rate_n_flags;
-
- link_cmd->general_params.single_stream_ant_msk =
- iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
-
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant &
- ~iwl4965_first_antenna(priv->hw_params.valid_tx_ant);
- if (!link_cmd->general_params.dual_stream_ant_msk) {
- link_cmd->general_params.dual_stream_ant_msk = ANT_AB;
- } else if (iwl4965_num_of_ant(priv->hw_params.valid_tx_ant) == 2) {
- link_cmd->general_params.dual_stream_ant_msk =
- priv->hw_params.valid_tx_ant;
- }
-
- link_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
- link_cmd->agg_params.agg_time_limit =
- cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
-
- link_cmd->sta_id = sta_id;
-
- return link_cmd;
-}
-
-/*
- * iwl4965_add_bssid_station - Add the special IBSS BSSID station
- *
- * Function sleeps.
- */
-int
-iwl4965_add_bssid_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r)
-{
- int ret;
- u8 sta_id;
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
-
- if (sta_id_r)
- *sta_id_r = IWL_INVALID_STATION;
-
- ret = iwl_legacy_add_station_common(priv, ctx, addr, 0, NULL, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM\n", addr);
- return ret;
- }
-
- if (sta_id_r)
- *sta_id_r = sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].used |= IWL_STA_LOCAL;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- /* Set up default rate scaling table in device's station table */
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for station %pM.\n",
- addr);
- return -ENOMEM;
- }
-
- ret = iwl_legacy_send_lq_cmd(priv, ctx, link_cmd, CMD_SYNC, true);
- if (ret)
- IWL_ERR(priv, "Link quality command failed (%d)\n", ret);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int iwl4965_static_wepkey_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- bool send_if_empty)
-{
- int i, not_empty = 0;
- u8 buff[sizeof(struct iwl_wep_cmd) +
- sizeof(struct iwl_wep_key) * WEP_KEYS_MAX];
- struct iwl_wep_cmd *wep_cmd = (struct iwl_wep_cmd *)buff;
- size_t cmd_size = sizeof(struct iwl_wep_cmd);
- struct iwl_host_cmd cmd = {
- .id = ctx->wep_key_cmd,
- .data = wep_cmd,
- .flags = CMD_SYNC,
- };
-
- might_sleep();
-
- memset(wep_cmd, 0, cmd_size +
- (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
-
- for (i = 0; i < WEP_KEYS_MAX ; i++) {
- wep_cmd->key[i].key_index = i;
- if (ctx->wep_keys[i].key_size) {
- wep_cmd->key[i].key_offset = i;
- not_empty = 1;
- } else {
- wep_cmd->key[i].key_offset = WEP_INVALID_OFFSET;
- }
-
- wep_cmd->key[i].key_size = ctx->wep_keys[i].key_size;
- memcpy(&wep_cmd->key[i].key[3], ctx->wep_keys[i].key,
- ctx->wep_keys[i].key_size);
- }
-
- wep_cmd->global_key_type = WEP_KEY_WEP_TYPE;
- wep_cmd->num_keys = WEP_KEYS_MAX;
-
- cmd_size += sizeof(struct iwl_wep_key) * WEP_KEYS_MAX;
-
- cmd.len = cmd_size;
-
- if (not_empty || send_if_empty)
- return iwl_legacy_send_cmd(priv, &cmd);
- else
- return 0;
-}
-
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- lockdep_assert_held(&priv->mutex);
-
- return iwl4965_static_wepkey_cmd(priv, ctx, false);
-}
-
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
- keyconf->keyidx);
-
- memset(&ctx->wep_keys[keyconf->keyidx], 0, sizeof(ctx->wep_keys[0]));
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_WEPKEY command due to RFKILL.\n");
- /* but keys in device are clear anyway so return success */
- return 0;
- }
- ret = iwl4965_static_wepkey_cmd(priv, ctx, 1);
- IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
- keyconf->keyidx, ret);
-
- return ret;
-}
-
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (keyconf->keylen != WEP_KEY_LEN_128 &&
- keyconf->keylen != WEP_KEY_LEN_64) {
- IWL_DEBUG_WEP(priv, "Bad WEP key length %d\n", keyconf->keylen);
- return -EINVAL;
- }
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = HW_KEY_DEFAULT;
- priv->stations[ctx->ap_sta_id].keyinfo.cipher = keyconf->cipher;
-
- ctx->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
- memcpy(&ctx->wep_keys[keyconf->keyidx].key, &keyconf->key,
- keyconf->keylen);
-
- ret = iwl4965_static_wepkey_cmd(priv, ctx, false);
- IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
- keyconf->keylen, keyconf->keyidx, ret);
-
- return ret;
-}
-
-static int iwl4965_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- keyconf->flags &= ~IEEE80211_KEY_FLAG_GENERATE_IV;
-
- key_flags |= (STA_KEY_FLG_WEP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (keyconf->keylen == WEP_KEY_LEN_128)
- key_flags |= STA_KEY_FLG_KEY_SIZE_MSK;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- priv->stations[sta_id].keyinfo.keyidx = keyconf->keyidx;
-
- memcpy(priv->stations[sta_id].keyinfo.key,
- keyconf->key, keyconf->keylen);
-
- memcpy(&priv->stations[sta_id].sta.key.key[3],
- keyconf->key, keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
-
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl4965_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- int ret = 0;
- __le16 key_flags = 0;
-
- key_flags |= (STA_KEY_FLG_TKIP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- if (sta_id == ctx->bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = 16;
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
-
-
- /* This copy is acutally not needed: we get the key with each TX */
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key, 16);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key, 16);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key)
-{
- u8 sta_id;
- unsigned long flags;
- int i;
-
- if (iwl_legacy_scan_cancel(priv)) {
- /* cancel scan failed, just live w/ bad key and rely
- briefly on SW decryption */
- return;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- priv->stations[sta_id].sta.key.tkip_rx_tsc_byte2 = (u8) iv32;
-
- for (i = 0; i < 5; i++)
- priv->stations[sta_id].sta.key.tkip_rx_ttak[i] =
- cpu_to_le16(phase1key[i]);
-
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- iwl_legacy_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
-
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- u16 key_flags;
- u8 keyidx;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys--;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- key_flags = le16_to_cpu(priv->stations[sta_id].sta.key.key_flags);
- keyidx = (key_flags >> STA_KEY_FLG_KEYID_POS) & 0x3;
-
- IWL_DEBUG_WEP(priv, "Remove dynamic key: idx=%d sta=%d\n",
- keyconf->keyidx, sta_id);
-
- if (keyconf->keyidx != keyidx) {
- /* We need to remove a key with index different that the one
- * in the uCode. This means that the key we need to remove has
- * been replaced by another one with different index.
- * Don't do anything and return ok
- */
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET) {
- IWL_WARN(priv, "Removing wrong key %d 0x%x\n",
- keyconf->keyidx, key_flags);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- if (!test_and_clear_bit(priv->stations[sta_id].sta.key.key_offset,
- &priv->ucode_key_table))
- IWL_ERR(priv, "index %d not used in uCode key table.\n",
- priv->stations[sta_id].sta.key.key_offset);
- memset(&priv->stations[sta_id].keyinfo, 0,
- sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags =
- STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
- priv->stations[sta_id].sta.key.key_offset = WEP_INVALID_OFFSET;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_WEP(priv,
- "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_set_dynamic_key(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- ctx->key_mapping_keys++;
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl4965_set_ccmp_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl4965_set_tkip_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl4965_set_wep_dynamic_key_info(priv, ctx,
- keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv,
- "Unknown alg: %s cipher = %x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv,
- "Set dynamic key: cipher=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-/**
- * iwl4965_alloc_bcast_station - add broadcast station into driver's station table.
- *
- * This adds the broadcast station into the driver's station table
- * and marks it driver active, so that it will be restored to the
- * device at the next best time.
- */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_link_quality_cmd *link_cmd;
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx, iwlegacy_bcast_addr,
- false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_update_bcast_station - update broadcast station's LQ command
- *
- * Only used by iwl4965. Placed here to have all bcast station management
- * code together.
- */
-static int iwl4965_update_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- unsigned long flags;
- struct iwl_link_quality_cmd *link_cmd;
- u8 sta_id = ctx->bcast_sta_id;
-
- link_cmd = iwl4965_sta_alloc_lq(priv, sta_id);
- if (!link_cmd) {
- IWL_ERR(priv,
- "Unable to initialize rate scaling for bcast station.\n");
- return -ENOMEM;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (priv->stations[sta_id].lq)
- kfree(priv->stations[sta_id].lq);
- else
- IWL_DEBUG_INFO(priv,
- "Bcast station rate scaling has not been initialized yet.\n");
- priv->stations[sta_id].lq = link_cmd;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-int iwl4965_update_bcast_stations(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int ret = 0;
-
- for_each_context(priv, ctx) {
- ret = iwl4965_update_bcast_station(priv, ctx);
- if (ret)
- break;
- }
-
- return ret;
-}
-
-/**
- * iwl4965_sta_tx_modify_enable_tid - Enable Tx for this TID in station table
- */
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Remove "disable" flag, to enable Tx for this TID */
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_TID_DISABLE_TX;
- priv->stations[sta_id].sta.tid_disable_tx &= cpu_to_le16(~(1 << tid));
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION)
- return -ENXIO;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_ADDBA_TID_MSK;
- priv->stations[sta_id].sta.add_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.add_immediate_ba_ssn = cpu_to_le16(ssn);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid)
-{
- unsigned long flags;
- int sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- lockdep_assert_held(&priv->mutex);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags_msk = 0;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_DELBA_TID_MSK;
- priv->stations[sta_id].sta.remove_immediate_ba_tid = (u8)tid;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-void
-iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags |= STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask =
- STA_MODIFY_SLEEP_TX_COUNT_MSK;
- priv->stations[sta_id].sta.sleep_tx_count = cpu_to_le16(cnt);
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c b/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
deleted file mode 100644
index 7f12e36..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-tx.c
+++ /dev/null
@@ -1,1378 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-
-/*
- * mac80211 queues, ACs, hardware queues, FIFOs.
- *
- * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
- *
- * Mac80211 uses the following numbers, which we get as from it
- * by way of skb_get_queue_mapping(skb):
- *
- * VO 0
- * VI 1
- * BE 2
- * BK 3
- *
- *
- * Regular (not A-MPDU) frames are put into hardware queues corresponding
- * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
- * own queue per aggregation session (RA/TID combination), such queues are
- * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
- * order to map frames to the right queue, we also need an AC->hw queue
- * mapping. This is implemented here.
- *
- * Due to the way hw queues are set up (by the hw specific modules like
- * iwl-4965.c), the AC->hw queue mapping is the identity
- * mapping.
- */
-
-static const u8 tid_to_ac[] = {
- IEEE80211_AC_BE,
- IEEE80211_AC_BK,
- IEEE80211_AC_BK,
- IEEE80211_AC_BE,
- IEEE80211_AC_VI,
- IEEE80211_AC_VI,
- IEEE80211_AC_VO,
- IEEE80211_AC_VO
-};
-
-static inline int iwl4965_get_ac_from_tid(u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return tid_to_ac[tid];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-static inline int
-iwl4965_get_fifo_from_tid(struct iwl_rxon_context *ctx, u16 tid)
-{
- if (likely(tid < ARRAY_SIZE(tid_to_ac)))
- return ctx->ac_to_fifo[tid_to_ac[tid]];
-
- /* no support for TIDs 8-15 yet */
- return -EINVAL;
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl4965_tx_cmd_build_basic(struct iwl_priv *priv,
- struct sk_buff *skb,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr,
- u8 std_id)
-{
- __le16 fc = hdr->frame_control;
- __le32 tx_flags = tx_cmd->tx_flags;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-#define RTS_DFAULT_RETRY_LIMIT 60
-
-static void iwl4965_tx_cmd_build_rate(struct iwl_priv *priv,
- struct iwl_tx_cmd *tx_cmd,
- struct ieee80211_tx_info *info,
- __le16 fc)
-{
- u32 rate_flags;
- int rate_idx;
- u8 rts_retry_limit;
- u8 data_retry_limit;
- u8 rate_plcp;
-
- /* Set retry limit on DATA packets and Probe Responses*/
- if (ieee80211_is_probe_resp(fc))
- data_retry_limit = 3;
- else
- data_retry_limit = IWL4965_DEFAULT_TX_RETRY;
- tx_cmd->data_retry_limit = data_retry_limit;
-
- /* Set retry limit on RTS packets */
- rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
- if (data_retry_limit < rts_retry_limit)
- rts_retry_limit = data_retry_limit;
- tx_cmd->rts_retry_limit = rts_retry_limit;
-
- /* DATA packets will use the uCode station table for rate/antenna
- * selection */
- if (ieee80211_is_data(fc)) {
- tx_cmd->initial_rate_index = 0;
- tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
- return;
- }
-
- /**
- * If the current TX rate stored in mac80211 has the MCS bit set, it's
- * not really a TX rate. Thus, we use the lowest supported rate for
- * this band. Also use the lowest supported rate if the stored rate
- * index is invalid.
- */
- rate_idx = info->control.rates[0].idx;
- if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
- (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
- rate_idx = rate_lowest_index(&priv->bands[info->band],
- info->control.sta);
- /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
- if (info->band == IEEE80211_BAND_5GHZ)
- rate_idx += IWL_FIRST_OFDM_RATE;
- /* Get PLCP rate for tx_cmd->rate_n_flags */
- rate_plcp = iwlegacy_rates[rate_idx].plcp;
- /* Zero out flags for this packet */
- rate_flags = 0;
-
- /* Set CCK flag as needed */
- if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
-
- /* Set up antennas */
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
-
- rate_flags |= iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
-
- /* Set the rate in the TX cmd */
- tx_cmd->rate_n_flags = iwl4965_hw_set_rate_n_flags(rate_plcp, rate_flags);
-}
-
-static void iwl4965_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_tx_cmd *tx_cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct ieee80211_key_conf *keyconf = info->control.hw_key;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
- ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
- IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
- (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
-
- memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", keyconf->keyidx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyconf->cipher);
- break;
- }
-}
-
-/*
- * start REPLY_TX command process
- */
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct ieee80211_sta *sta = info->control.sta;
- struct iwl_station_priv *sta_priv = NULL;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- struct iwl_tx_cmd *tx_cmd;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int txq_id;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- dma_addr_t scratch_phys;
- u16 len, firstlen, secondlen;
- u16 seq_number = 0;
- __le16 fc;
- u8 hdr_len;
- u8 sta_id;
- u8 wait_write_ptr = 0;
- u8 tid = 0;
- u8 *qc = NULL;
- unsigned long flags;
- bool is_agg = false;
-
- if (info->control.vif)
- ctx = iwl_legacy_rxon_ctx_from_vif(info->control.vif);
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* For management frames use broadcast id to do not break aggregation */
- if (!ieee80211_is_data(fc))
- sta_id = ctx->bcast_sta_id;
- else {
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, ctx, info->control.sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop_unlock;
- }
- }
-
- IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
-
- if (sta)
- sta_priv = (void *)sta->drv_priv;
-
- if (sta_priv && sta_priv->asleep &&
- (info->flags & IEEE80211_TX_CTL_POLL_RESPONSE)) {
- /*
- * This sends an asynchronous command to the device,
- * but we can rely on it being processed before the
- * next frame is processed -- and the next frame to
- * this station is the one that will consume this
- * counter.
- * For now set the counter to just 1 since we do not
- * support uAPSD yet.
- */
- iwl4965_sta_modify_sleep_tx_count(priv, sta_id, 1);
- }
-
- /*
- * Send this frame after DTIM -- there's a special queue
- * reserved for this for contexts that support AP mode.
- */
- if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
- txq_id = ctx->mcast_queue;
- /*
- * The microcode will clear the more data
- * bit in the last frame it transmits.
- */
- hdr->frame_control |=
- cpu_to_le16(IEEE80211_FCTL_MOREDATA);
- } else
- txq_id = ctx->ac_to_queue[skb_get_queue_mapping(skb)];
-
- /* irqs already disabled/saved above when locking priv->lock */
- spin_lock(&priv->sta_lock);
-
- if (ieee80211_is_data_qos(fc)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (WARN_ON_ONCE(tid >= MAX_TID_COUNT)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
- seq_number = priv->stations[sta_id].tid[tid].seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU &&
- priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
- txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
- is_agg = true;
- }
- }
-
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if (unlikely(iwl_legacy_queue_space(q) < q->high_mark)) {
- spin_unlock(&priv->sta_lock);
- goto drop_unlock;
- }
-
- if (ieee80211_is_data_qos(fc)) {
- priv->stations[sta_id].tid[tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- priv->stations[sta_id].tid[tid].seq_number = seq_number;
- }
-
- spin_unlock(&priv->sta_lock);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = ctx;
-
- /* Set up first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[q->write_ptr];
- out_meta = &txq->meta[q->write_ptr];
- tx_cmd = &out_cmd->cmd.tx;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- if (info->control.hw_key)
- iwl4965_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl4965_tx_cmd_build_basic(priv, skb, tx_cmd, info, hdr, sta_id);
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
-
- iwl4965_tx_cmd_build_rate(priv, tx_cmd, info, fc);
-
- iwl_legacy_update_stats(priv, true, fc, len);
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- firstlen = (len + 3) & ~3;
-
- /* Tell NIC about any 2-byte padding after MAC header */
- if (firstlen != len)
- tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev,
- &out_cmd->hdr, firstlen,
- PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, firstlen);
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, firstlen, 1, 0);
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- secondlen = skb->len - hdr_len;
- if (secondlen > 0) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- secondlen, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, secondlen,
- 0, 0);
- }
-
- scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
- offsetof(struct iwl_tx_cmd, scratch);
-
- /* take back ownership of DMA buffer to enable update */
- pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
- tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
- tx_cmd->dram_msb_ptr = iwl_legacy_get_dma_hi_addr(scratch_phys);
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
-
- /* Set up entry for this TFD in Tx byte-count array */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
- le16_to_cpu(tx_cmd->len));
-
- pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
- firstlen, PCI_DMA_BIDIRECTIONAL);
-
- trace_iwlwifi_legacy_dev_tx(priv,
- &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
- sizeof(struct iwl_tfd),
- &out_cmd->hdr, firstlen,
- skb->data + hdr_len, secondlen);
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /*
- * At this point the frame is "transmitted" successfully
- * and we will get a TX status notification eventually,
- * regardless of the value of ret. "ret" only indicates
- * whether or not we should update the write pointer.
- */
-
- /*
- * Avoid atomic ops if it isn't an associated client.
- * Also, if this is a packet for aggregation, don't
- * increase the counter because the ucode will stop
- * aggregation queues when their respective station
- * goes to sleep.
- */
- if (sta_priv && sta_priv->client && !is_agg)
- atomic_inc(&sta_priv->pending_frames);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark) &&
- priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- } else {
- iwl_legacy_stop_queue(priv, txq);
- }
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
- return -1;
-}
-
-static inline int iwl4965_alloc_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr, size_t size)
-{
- ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
- GFP_KERNEL);
- if (!ptr->addr)
- return -ENOMEM;
- ptr->size = size;
- return 0;
-}
-
-static inline void iwl4965_free_dma_ptr(struct iwl_priv *priv,
- struct iwl_dma_ptr *ptr)
-{
- if (unlikely(!ptr->addr))
- return;
-
- dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
- memset(ptr, 0, sizeof(*ptr));
-}
-
-/**
- * iwl4965_hw_txq_ctx_free - Free TXQ Context
- *
- * Destroy all TX DMA queues and structures
- */
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- /* Tx queues */
- if (priv->txq) {
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_free(priv);
- else
- iwl_legacy_tx_queue_free(priv, txq_id);
- }
- iwl4965_free_dma_ptr(priv, &priv->kw);
-
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
-
- /* free tx queue structure */
- iwl_legacy_txq_mem(priv);
-}
-
-/**
- * iwl4965_txq_ctx_alloc - allocate TX queue context
- * Allocate all Tx DMA structures and initialize them
- *
- * @param priv
- * @return error code
- */
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv)
-{
- int ret;
- int txq_id, slots_num;
- unsigned long flags;
-
- /* Free all tx/cmd queues and keep-warm buffer */
- iwl4965_hw_txq_ctx_free(priv);
-
- ret = iwl4965_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
- priv->hw_params.scd_bc_tbls_size);
- if (ret) {
- IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
- goto error_bc_tbls;
- }
- /* Alloc keep-warm buffer */
- ret = iwl4965_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
- if (ret) {
- IWL_ERR(priv, "Keep Warm allocation failed\n");
- goto error_kw;
- }
-
- /* allocate tx queue structure */
- ret = iwl_legacy_alloc_txq_mem(priv);
- if (ret)
- goto error;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4/#9) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = (txq_id == priv->cmd_queue) ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- ret = iwl_legacy_tx_queue_init(priv,
- &priv->txq[txq_id], slots_num,
- txq_id);
- if (ret) {
- IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
- goto error;
- }
- }
-
- return ret;
-
- error:
- iwl4965_hw_txq_ctx_free(priv);
- iwl4965_free_dma_ptr(priv, &priv->kw);
- error_kw:
- iwl4965_free_dma_ptr(priv, &priv->scd_bc_tbls);
- error_bc_tbls:
- return ret;
-}
-
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv)
-{
- int txq_id, slots_num;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Turn off all Tx DMA fifos */
- iwl4965_txq_set_sched(priv, 0);
-
- /* Tell NIC where to find the "keep warm" buffer */
- iwl_legacy_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Alloc and init all Tx queues, including the command queue (#4) */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
- slots_num = txq_id == priv->cmd_queue ?
- TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
- iwl_legacy_tx_queue_reset(priv, &priv->txq[txq_id],
- slots_num, txq_id);
- }
-}
-
-/**
- * iwl4965_txq_ctx_stop - Stop all Tx DMA channels
- */
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv)
-{
- int ch, txq_id;
- unsigned long flags;
-
- /* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&priv->lock, flags);
-
- iwl4965_txq_set_sched(priv, 0);
-
- /* Stop each Tx DMA channel, and wait for it to be idle */
- for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
- if (iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
- FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
- 1000))
- IWL_ERR(priv, "Failing on timeout while stopping"
- " DMA channel %d [0x%08x]", ch,
- iwl_legacy_read_direct32(priv,
- FH_TSSR_TX_STATUS_REG));
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!priv->txq)
- return;
-
- /* Unmap DMA from host system and free skb's */
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (txq_id == priv->cmd_queue)
- iwl_legacy_cmd_queue_unmap(priv);
- else
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-}
-
-/*
- * Find first available (lowest unused) Tx Queue, mark it "active".
- * Called only when finding queue for aggregation.
- * Should never return anything < 7, because they should already
- * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
- */
-static int iwl4965_txq_ctx_activate_free(struct iwl_priv *priv)
-{
- int txq_id;
-
- for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
- if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
- return txq_id;
- return -1;
-}
-
-/**
- * iwl4965_tx_queue_stop_scheduler - Stop queue, but keep configuration
- */
-static void iwl4965_tx_queue_stop_scheduler(struct iwl_priv *priv,
- u16 txq_id)
-{
- /* Simply stop the queue, but don't change any configuration;
- * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
- iwl_legacy_write_prph(priv,
- IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (0 << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
- (1 << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
-}
-
-/**
- * iwl4965_tx_queue_set_q2ratid - Map unique receiver/tid combination to a queue
- */
-static int iwl4965_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
- u16 txq_id)
-{
- u32 tbl_dw_addr;
- u32 tbl_dw;
- u16 scd_q2ratid;
-
- scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
-
- tbl_dw_addr = priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
-
- tbl_dw = iwl_legacy_read_targ_mem(priv, tbl_dw_addr);
-
- if (txq_id & 0x1)
- tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
- else
- tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
-
- iwl_legacy_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
-
- return 0;
-}
-
-/**
- * iwl4965_tx_queue_agg_enable - Set up & enable aggregation for selected queue
- *
- * NOTE: txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE,
- * i.e. it must be one of the higher queues used for aggregation
- */
-static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
- int tx_fifo, int sta_id, int tid, u16 ssn_idx)
-{
- unsigned long flags;
- u16 ra_tid;
- int ret;
-
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- ra_tid = BUILD_RAxTID(sta_id, tid);
-
- /* Modify device's station table to Tx this TID */
- ret = iwl4965_sta_tx_modify_enable_tid(priv, sta_id, tid);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Stop this Tx queue before configuring it */
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- /* Map receiver-address / traffic-ID to this queue */
- iwl4965_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
-
- /* Set this queue as a chain-building queue */
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- /* Place first TFD at index corresponding to start sequence number.
- * Assumes that ssn_idx is valid (!= 0xFFF) */
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- /* Set up Tx window size and frame limit for this queue */
- iwl_legacy_write_targ_mem(priv,
- priv->scd_base_addr + IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id),
- (SCD_WIN_SIZE << IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
- (SCD_FRAME_LIMIT << IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS)
- & IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- iwl_legacy_set_bits_prph(priv, IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
-
- /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn)
-{
- int sta_id;
- int tx_fifo;
- int txq_id;
- int ret;
- unsigned long flags;
- struct iwl_tid_data *tid_data;
-
- tx_fifo = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo < 0))
- return tx_fifo;
-
- IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
- __func__, sta->addr, tid);
-
- sta_id = iwl_legacy_sta_id(sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Start AGG on invalid station\n");
- return -ENXIO;
- }
- if (unlikely(tid >= MAX_TID_COUNT))
- return -EINVAL;
-
- if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
- IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
- return -ENXIO;
- }
-
- txq_id = iwl4965_txq_ctx_activate_free(priv);
- if (txq_id == -1) {
- IWL_ERR(priv, "No free aggregation queue available\n");
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
- iwl_legacy_set_swq_id(&priv->txq[txq_id],
- iwl4965_get_ac_from_tid(tid), txq_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- ret = iwl4965_txq_agg_enable(priv, txq_id, tx_fifo,
- sta_id, tid, *ssn);
- if (ret)
- return ret;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- tid_data = &priv->stations[sta_id].tid[tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
- } else {
- IWL_DEBUG_HT(priv,
- "HW queue is NOT empty: %d packets in HW queue\n",
- tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-/**
- * txq_id must be greater than IWL49_FIRST_AMPDU_QUEUE
- * priv->lock must be held by the caller
- */
-static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
- u16 ssn_idx, u8 tx_fifo)
-{
- if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) {
- IWL_WARN(priv,
- "queue number out of range: %d, must be %d to %d\n",
- txq_id, IWL49_FIRST_AMPDU_QUEUE,
- IWL49_FIRST_AMPDU_QUEUE +
- priv->cfg->base_params->num_of_ampdu_queues - 1);
- return -EINVAL;
- }
-
- iwl4965_tx_queue_stop_scheduler(priv, txq_id);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_QUEUECHAIN_SEL, (1 << txq_id));
-
- priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl4965_set_wr_ptrs(priv, txq_id, ssn_idx);
-
- iwl_legacy_clear_bits_prph(priv,
- IWL49_SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(priv, txq_id);
- iwl4965_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
-
- return 0;
-}
-
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
-{
- int tx_fifo_id, txq_id, sta_id, ssn;
- struct iwl_tid_data *tid_data;
- int write_ptr, read_ptr;
- unsigned long flags;
-
- tx_fifo_id = iwl4965_get_fifo_from_tid(iwl_legacy_rxon_ctx_from_vif(vif), tid);
- if (unlikely(tx_fifo_id < 0))
- return tx_fifo_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- tid_data = &priv->stations[sta_id].tid[tid];
- ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
- txq_id = tid_data->agg.txq_id;
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
- }
-
- write_ptr = priv->txq[txq_id].q.write_ptr;
- read_ptr = priv->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
- priv->stations[sta_id].tid[tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(priv, "HW queue is empty\n");
- turn_off:
- priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&priv->sta_lock);
- spin_lock(&priv->lock);
-
- /*
- * the only reason this call can fail is queue number out of range,
- * which can happen if uCode is reloaded and all the station
- * information are lost. if it is outside the range, there is no need
- * to deactivate the uCode queue, just return "success" to allow
- * mac80211 to clean up it own data.
- */
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo_id);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
-
- return 0;
-}
-
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_queue *q = &priv->txq[txq_id].q;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
- struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
- struct iwl_rxon_context *ctx;
-
- ctx = &priv->contexts[priv->stations[sta_id].ctxid];
-
- lockdep_assert_held(&priv->sta_lock);
-
- switch (priv->stations[sta_id].tid[tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- u16 ssn = SEQ_TO_SN(tid_data->seq_number);
- int tx_fifo = iwl4965_get_fifo_from_tid(ctx, tid);
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue DELBA flow\n");
- iwl4965_txq_agg_disable(priv, txq_id, ssn, tx_fifo);
- tid_data->agg.state = IWL_AGG_OFF;
- ieee80211_stop_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(priv,
- "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- ieee80211_start_tx_ba_cb_irqsafe(ctx->vif, addr, tid);
- }
- break;
- }
-
- return 0;
-}
-
-static void iwl4965_non_agg_tx_status(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr1)
-{
- struct ieee80211_sta *sta;
- struct iwl_station_priv *sta_priv;
-
- rcu_read_lock();
- sta = ieee80211_find_sta(ctx->vif, addr1);
- if (sta) {
- sta_priv = (void *)sta->drv_priv;
- /* avoid atomic ops if this isn't a client */
- if (sta_priv->client &&
- atomic_dec_return(&sta_priv->pending_frames) == 0)
- ieee80211_sta_block_awake(priv->hw, sta, false);
- }
- rcu_read_unlock();
-}
-
-static void
-iwl4965_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
- bool is_agg)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
-
- if (!is_agg)
- iwl4965_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
-
- ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
-}
-
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- struct iwl_tx_info *tx_info;
- int nfreed = 0;
- struct ieee80211_hdr *hdr;
-
- if ((index >= q->n_bd) || (iwl_legacy_queue_used(q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- index, q->n_bd, q->write_ptr, q->read_ptr);
- return 0;
- }
-
- for (index = iwl_legacy_queue_inc_wrap(index, q->n_bd);
- q->read_ptr != index;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- tx_info = &txq->txb[txq->q.read_ptr];
-
- if (WARN_ON_ONCE(tx_info->skb == NULL))
- continue;
-
- hdr = (struct ieee80211_hdr *)tx_info->skb->data;
- if (ieee80211_is_data_qos(hdr->frame_control))
- nfreed++;
-
- iwl4965_tx_status(priv, tx_info,
- txq_id >= IWL4965_FIRST_AMPDU_QUEUE);
- tx_info->skb = NULL;
-
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- }
- return nfreed;
-}
-
-/**
- * iwl4965_tx_status_reply_compressed_ba - Update tx status from block-ack
- *
- * Go through block-ack's bitmap of ACK'd frames, update driver's record of
- * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
- */
-static int iwl4965_tx_status_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl_compressed_ba_resp *ba_resp)
-
-{
- int i, sh, ack;
- u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
- int successes = 0;
- struct ieee80211_tx_info *info;
- u64 bitmap, sent_bitmap;
-
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
- return -EINVAL;
- }
-
- /* Mark that the expected block-ack response arrived */
- agg->wait_for_ba = 0;
- IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx,
- ba_resp->seq_ctl);
-
- /* Calculate shift to align block-ack bits with our Tx window bits */
- sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
- if (sh < 0) /* tbw something is wrong with indices */
- sh += 0x100;
-
- if (agg->frame_count > (64 - sh)) {
- IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
- return -1;
- }
-
- /* don't use 64-bit values for now */
- bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
-
- /* check for success or failure according to the
- * transmitted bitmap and block-ack bitmap */
- sent_bitmap = bitmap & agg->bitmap;
-
- /* For each frame attempted in aggregation,
- * update driver's record of tx frame's status. */
- i = 0;
- while (sent_bitmap) {
- ack = sent_bitmap & 1ULL;
- successes += ack;
- IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
- ack ? "ACK" : "NACK", i,
- (agg->start_idx + i) & 0xff,
- agg->start_idx + i);
- sent_bitmap >>= 1;
- ++i;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
- (unsigned long long)bitmap);
-
- info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
- info->flags |= IEEE80211_TX_STAT_AMPDU;
- info->status.ampdu_ack_len = successes;
- info->status.ampdu_len = agg->frame_count;
- iwl4965_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
-
- return 0;
-}
-
-/**
- * translate ucode response to mac80211 tx status control values
- */
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info)
-{
- struct ieee80211_tx_rate *r = &info->control.rates[0];
-
- info->antenna_sel_tx =
- ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
- if (rate_n_flags & RATE_MCS_HT_MSK)
- r->flags |= IEEE80211_TX_RC_MCS;
- if (rate_n_flags & RATE_MCS_GF_MSK)
- r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
- if (rate_n_flags & RATE_MCS_HT40_MSK)
- r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (rate_n_flags & RATE_MCS_DUP_MSK)
- r->flags |= IEEE80211_TX_RC_DUP_DATA;
- if (rate_n_flags & RATE_MCS_SGI_MSK)
- r->flags |= IEEE80211_TX_RC_SHORT_GI;
- r->idx = iwl4965_hwrate_to_mac80211_idx(rate_n_flags, info->band);
-}
-
-/**
- * iwl4965_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
- *
- * Handles block-acknowledge notification from device, which reports success
- * of frames sent via aggregation.
- */
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_ht_agg *agg;
- int index;
- int sta_id;
- int tid;
- unsigned long flags;
-
- /* "flow" corresponds to Tx queue */
- u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
-
- /* "ssn" is start of block-ack Tx window, corresponds to index
- * (in Tx queue's circular buffer) of first TFD/frame in window */
- u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
-
- if (scd_flow >= priv->hw_params.max_txq_num) {
- IWL_ERR(priv,
- "BUG_ON scd_flow is bigger than number of queues\n");
- return;
- }
-
- txq = &priv->txq[scd_flow];
- sta_id = ba_resp->sta_id;
- tid = ba_resp->tid;
- agg = &priv->stations[sta_id].tid[tid].agg;
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
- return;
- }
-
- /* Find index just before block-ack window */
- index = iwl_legacy_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
- "sta_id = %d\n",
- agg->wait_for_ba,
- (u8 *) &ba_resp->sta_addr_lo32,
- ba_resp->sta_id);
- IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx,"
- "scd_flow = "
- "%d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
- (unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
- IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
- agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- /* Update driver's record of ACK vs. not for each frame in window */
- iwl4965_tx_status_reply_compressed_ba(priv, agg, ba_resp);
-
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
- /* calculate mac80211 ampdu sw queue to wake */
- int freed = iwl4965_tx_queue_reclaim(priv, scd_flow, index);
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
-
- if ((iwl_legacy_queue_space(&txq->q) > txq->q.low_mark) &&
- priv->mac80211_registered &&
- (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
-
- iwl4965_txq_check_empty(priv, sta_id, tid, scd_flow);
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status)
-{
-#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
-#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
-
- switch (status & TX_STATUS_MSK) {
- case TX_STATUS_SUCCESS:
- return "SUCCESS";
- TX_STATUS_POSTPONE(DELAY);
- TX_STATUS_POSTPONE(FEW_BYTES);
- TX_STATUS_POSTPONE(QUIET_PERIOD);
- TX_STATUS_POSTPONE(CALC_TTAK);
- TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
- TX_STATUS_FAIL(SHORT_LIMIT);
- TX_STATUS_FAIL(LONG_LIMIT);
- TX_STATUS_FAIL(FIFO_UNDERRUN);
- TX_STATUS_FAIL(DRAIN_FLOW);
- TX_STATUS_FAIL(RFKILL_FLUSH);
- TX_STATUS_FAIL(LIFE_EXPIRE);
- TX_STATUS_FAIL(DEST_PS);
- TX_STATUS_FAIL(HOST_ABORTED);
- TX_STATUS_FAIL(BT_RETRY);
- TX_STATUS_FAIL(STA_INVALID);
- TX_STATUS_FAIL(FRAG_DROPPED);
- TX_STATUS_FAIL(TID_DISABLE);
- TX_STATUS_FAIL(FIFO_FLUSHED);
- TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
- TX_STATUS_FAIL(PASSIVE_NO_RX);
- TX_STATUS_FAIL(NO_BEACON_ON_RADAR);
- }
-
- return "UNKNOWN";
-
-#undef TX_STATUS_FAIL
-#undef TX_STATUS_POSTPONE
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c b/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
deleted file mode 100644
index 001d148..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965-ucode.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-hw.h"
-#include "iwl-4965.h"
-#include "iwl-4965-calib.h"
-
-#define IWL_AC_UNSET -1
-
-/**
- * iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int
-iwl4965_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int ret = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL4965_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- ret = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * iwl4965_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl4965_verify_inst_full(struct iwl_priv *priv, __le32 *image,
- u32 len)
-{
- u32 val;
- u32 save_len = len;
- int ret = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL4965_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- ret = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return ret;
-}
-
-/**
- * iwl4965_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-int iwl4965_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int ret;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- ret = iwl4965_verify_inst_sparse(priv, image, len);
- if (!ret) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- ret = iwl4965_verify_inst_full(priv, image, len);
-
- return ret;
-}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
deleted file mode 100644
index 86f4fce..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ /dev/null
@@ -1,2183 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-4965-calib.h"
-#include "iwl-sta.h"
-#include "iwl-4965-led.h"
-#include "iwl-4965.h"
-#include "iwl-4965-debugfs.h"
-
-static int iwl4965_send_tx_power(struct iwl_priv *priv);
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
-
-/* Highest firmware API version supported */
-#define IWL4965_UCODE_API_MAX 2
-
-/* Lowest firmware API version supported */
-#define IWL4965_UCODE_API_MIN 2
-
-#define IWL4965_FW_PRE "iwlwifi-4965-"
-#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
-#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
-
-/* check contents of special bootstrap uCode SRAM */
-static int iwl4965_verify_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- u32 reg;
- u32 val;
-
- IWL_DEBUG_INFO(priv, "Begin verify bsm\n");
-
- /* verify BSM SRAM contents */
- val = iwl_legacy_read_prph(priv, BSM_WR_DWCOUNT_REG);
- for (reg = BSM_SRAM_LOWER_BOUND;
- reg < BSM_SRAM_LOWER_BOUND + len;
- reg += sizeof(u32), image++) {
- val = iwl_legacy_read_prph(priv, reg);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "BSM uCode verification failed at "
- "addr 0x%08X+%u (of %u), is 0x%x, s/b 0x%x\n",
- BSM_SRAM_LOWER_BOUND,
- reg - BSM_SRAM_LOWER_BOUND, len,
- val, le32_to_cpu(*image));
- return -EIO;
- }
- }
-
- IWL_DEBUG_INFO(priv, "BSM bootstrap uCode image OK\n");
-
- return 0;
-}
-
-/**
- * iwl4965_load_bsm - Load bootstrap instructions
- *
- * BSM operation:
- *
- * The Bootstrap State Machine (BSM) stores a short bootstrap uCode program
- * in special SRAM that does not power down during RFKILL. When powering back
- * up after power-saving sleeps (or during initial uCode load), the BSM loads
- * the bootstrap program into the on-board processor, and starts it.
- *
- * The bootstrap program loads (via DMA) instructions and data for a new
- * program from host DRAM locations indicated by the host driver in the
- * BSM_DRAM_* registers. Once the new program is loaded, it starts
- * automatically.
- *
- * When initializing the NIC, the host driver points the BSM to the
- * "initialize" uCode image. This uCode sets up some internal data, then
- * notifies host via "initialize alive" that it is complete.
- *
- * The host then replaces the BSM_DRAM_* pointer values to point to the
- * normal runtime uCode instructions and a backup uCode data cache buffer
- * (filled initially with starting data values for the on-board processor),
- * then triggers the "initialize" uCode to load and launch the runtime uCode,
- * which begins normal operation.
- *
- * When doing a power-save shutdown, runtime uCode saves data SRAM into
- * the backup data cache in DRAM before SRAM is powered down.
- *
- * When powering back up, the BSM loads the bootstrap program. This reloads
- * the runtime uCode instructions and the backup data cache into SRAM,
- * and re-launches the runtime uCode from where it left off.
- */
-static int iwl4965_load_bsm(struct iwl_priv *priv)
-{
- __le32 *image = priv->ucode_boot.v_addr;
- u32 len = priv->ucode_boot.len;
- dma_addr_t pinst;
- dma_addr_t pdata;
- u32 inst_len;
- u32 data_len;
- int i;
- u32 done;
- u32 reg_offset;
- int ret;
-
- IWL_DEBUG_INFO(priv, "Begin load bsm\n");
-
- priv->ucode_type = UCODE_RT;
-
- /* make sure bootstrap program is no larger than BSM's SRAM size */
- if (len > IWL49_MAX_BSM_SIZE)
- return -EINVAL;
-
- /* Tell bootstrap uCode where to find the "Initialize" uCode
- * in host DRAM ... host DRAM physical address bits 35:4 for 4965.
- * NOTE: iwl_init_alive_start() will replace these values,
- * after the "initialize" uCode has run, to point to
- * runtime/protocol instructions and backup data cache.
- */
- pinst = priv->ucode_init.p_addr >> 4;
- pdata = priv->ucode_init_data.p_addr >> 4;
- inst_len = priv->ucode_init.len;
- data_len = priv->ucode_init_data.len;
-
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG, inst_len);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG, data_len);
-
- /* Fill BSM memory with bootstrap instructions */
- for (reg_offset = BSM_SRAM_LOWER_BOUND;
- reg_offset < BSM_SRAM_LOWER_BOUND + len;
- reg_offset += sizeof(u32), image++)
- _iwl_legacy_write_prph(priv, reg_offset, le32_to_cpu(*image));
-
- ret = iwl4965_verify_bsm(priv);
- if (ret)
- return ret;
-
- /* Tell BSM to copy from BSM SRAM into instruction SRAM, when asked */
- iwl_legacy_write_prph(priv, BSM_WR_MEM_SRC_REG, 0x0);
- iwl_legacy_write_prph(priv,
- BSM_WR_MEM_DST_REG, IWL49_RTC_INST_LOWER_BOUND);
- iwl_legacy_write_prph(priv, BSM_WR_DWCOUNT_REG, len / sizeof(u32));
-
- /* Load bootstrap code into instruction SRAM now,
- * to prepare to load "initialize" uCode */
- iwl_legacy_write_prph(priv, BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START);
-
- /* Wait for load of bootstrap uCode to finish */
- for (i = 0; i < 100; i++) {
- done = iwl_legacy_read_prph(priv, BSM_WR_CTRL_REG);
- if (!(done & BSM_WR_CTRL_REG_BIT_START))
- break;
- udelay(10);
- }
- if (i < 100)
- IWL_DEBUG_INFO(priv, "BSM write complete, poll %d iterations\n", i);
- else {
- IWL_ERR(priv, "BSM write did not complete!\n");
- return -EIO;
- }
-
- /* Enable future boot loads whenever power management unit triggers it
- * (e.g. when powering back up after power-save shutdown) */
- iwl_legacy_write_prph(priv,
- BSM_WR_CTRL_REG, BSM_WR_CTRL_REG_BIT_START_EN);
-
-
- return 0;
-}
-
-/**
- * iwl4965_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl4965_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
- int ret = 0;
-
- /* bits 35:4 for 4965 */
- pinst = priv->ucode_code.p_addr >> 4;
- pdata = priv->ucode_data_backup.p_addr >> 4;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return ret;
-}
-
-/**
- * iwl4965_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * The 4965 "initialize" ALIVE reply contains calibration data for:
- * Voltage, temperature, and MIMO tx gain correction, now stored in priv
- * (3945 does not contain this data).
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
-*/
-static void iwl4965_init_alive_start(struct iwl_priv *priv)
-{
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Calculate temperature */
- priv->temperature = iwl4965_hw_get_temperature(priv);
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl4965_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
-restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static bool iw4965_is_ht40_channel(__le32 rxon_flags)
-{
- int chan_mod = le32_to_cpu(rxon_flags & RXON_FLG_CHANNEL_MODE_MSK)
- >> RXON_FLG_CHANNEL_MODE_POS;
- return ((chan_mod == CHANNEL_MODE_PURE_40) ||
- (chan_mod == CHANNEL_MODE_MIXED));
-}
-
-static void iwl4965_nic_config(struct iwl_priv *priv)
-{
- unsigned long flags;
- u16 radio_cfg;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- radio_cfg = iwl_legacy_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
-
- /* write radio config values to register */
- if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) == EEPROM_4965_RF_CFG_TYPE_MAX)
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
- EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
- EEPROM_RF_CFG_DASH_MSK(radio_cfg));
-
- /* set CSR_HW_CONFIG_REG for uCode use */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
- CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
-
- priv->calib_info = (struct iwl_eeprom_calib_info *)
- iwl_legacy_eeprom_query_addr(priv,
- EEPROM_4965_CALIB_TXPOWER_OFFSET);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
- * Called after every association, but this runs only once!
- * ... once chain noise is calibrated the first time, it's good forever. */
-static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
-{
- struct iwl_chain_noise_data *data = &(priv->chain_noise_data);
-
- if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
- iwl_legacy_is_any_associated(priv)) {
- struct iwl_calib_diff_gain_cmd cmd;
-
- /* clear data for chain noise calibration algorithm */
- data->chain_noise_a = 0;
- data->chain_noise_b = 0;
- data->chain_noise_c = 0;
- data->chain_signal_a = 0;
- data->chain_signal_b = 0;
- data->chain_signal_c = 0;
- data->beacon_count = 0;
-
- memset(&cmd, 0, sizeof(cmd));
- cmd.hdr.op_code = IWL_PHY_CALIBRATE_DIFF_GAIN_CMD;
- cmd.diff_gain_a = 0;
- cmd.diff_gain_b = 0;
- cmd.diff_gain_c = 0;
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
- sizeof(cmd), &cmd))
- IWL_ERR(priv,
- "Could not send REPLY_PHY_CALIBRATION_CMD\n");
- data->state = IWL_CHAIN_NOISE_ACCUMULATE;
- IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
- }
-}
-
-static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
- .min_nrg_cck = 97,
- .max_nrg_cck = 0, /* not used, set to 0 */
-
- .auto_corr_min_ofdm = 85,
- .auto_corr_min_ofdm_mrc = 170,
- .auto_corr_min_ofdm_x1 = 105,
- .auto_corr_min_ofdm_mrc_x1 = 220,
-
- .auto_corr_max_ofdm = 120,
- .auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 140,
- .auto_corr_max_ofdm_mrc_x1 = 270,
-
- .auto_corr_min_cck = 125,
- .auto_corr_max_cck = 200,
- .auto_corr_min_cck_mrc = 200,
- .auto_corr_max_cck_mrc = 400,
-
- .nrg_th_cck = 100,
- .nrg_th_ofdm = 100,
-
- .barker_corr_th_min = 190,
- .barker_corr_th_min_mrc = 390,
- .nrg_th_cca = 62,
-};
-
-static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
-{
- /* want Kelvin */
- priv->hw_params.ct_kill_threshold =
- CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY);
-}
-
-/**
- * iwl4965_hw_set_hw_params
- *
- * Called when initializing driver
- */
-static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
-{
- if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
- priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
- priv->cfg->mod_params->num_of_queues;
-
- priv->hw_params.max_txq_num = priv->cfg->base_params->num_of_queues;
- priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
- priv->hw_params.scd_bc_tbls_size =
- priv->cfg->base_params->num_of_queues *
- sizeof(struct iwl4965_scd_bc_tbl);
- priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
- priv->hw_params.max_stations = IWL4965_STATION_COUNT;
- priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWL4965_BROADCAST_ID;
- priv->hw_params.max_data_size = IWL49_RTC_DATA_SIZE;
- priv->hw_params.max_inst_size = IWL49_RTC_INST_SIZE;
- priv->hw_params.max_bsm_size = BSM_SRAM_SIZE;
- priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_5GHZ);
-
- priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
-
- priv->hw_params.tx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_tx_ant);
- priv->hw_params.rx_chains_num = iwl4965_num_of_ant(priv->cfg->valid_rx_ant);
- priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
- priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
-
- iwl4965_set_ct_threshold(priv);
-
- priv->hw_params.sens = &iwl4965_sensitivity;
- priv->hw_params.beacon_time_tsf_bits = IWL4965_EXT_BEACON_TIME_POS;
-
- return 0;
-}
-
-static s32 iwl4965_math_div_round(s32 num, s32 denom, s32 *res)
-{
- s32 sign = 1;
-
- if (num < 0) {
- sign = -sign;
- num = -num;
- }
- if (denom < 0) {
- sign = -sign;
- denom = -denom;
- }
- *res = 1;
- *res = ((num * 2 + denom) / (denom * 2)) * sign;
-
- return 1;
-}
-
-/**
- * iwl4965_get_voltage_compensation - Power supply voltage comp for txpower
- *
- * Determines power supply voltage compensation for txpower calculations.
- * Returns number of 1/2-dB steps to subtract from gain table index,
- * to compensate for difference between power supply voltage during
- * factory measurements, vs. current power supply voltage.
- *
- * Voltage indication is higher for lower voltage.
- * Lower voltage requires more gain (lower gain table index).
- */
-static s32 iwl4965_get_voltage_compensation(s32 eeprom_voltage,
- s32 current_voltage)
-{
- s32 comp = 0;
-
- if ((TX_POWER_IWL_ILLEGAL_VOLTAGE == eeprom_voltage) ||
- (TX_POWER_IWL_ILLEGAL_VOLTAGE == current_voltage))
- return 0;
-
- iwl4965_math_div_round(current_voltage - eeprom_voltage,
- TX_POWER_IWL_VOLTAGE_CODES_PER_03V, &comp);
-
- if (current_voltage > eeprom_voltage)
- comp *= 2;
- if ((comp < -2) || (comp > 2))
- comp = 0;
-
- return comp;
-}
-
-static s32 iwl4965_get_tx_atten_grp(u16 channel)
-{
- if (channel >= CALIB_IWL_TX_ATTEN_GR5_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR5_LCH)
- return CALIB_CH_GROUP_5;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR1_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR1_LCH)
- return CALIB_CH_GROUP_1;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR2_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR2_LCH)
- return CALIB_CH_GROUP_2;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR3_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR3_LCH)
- return CALIB_CH_GROUP_3;
-
- if (channel >= CALIB_IWL_TX_ATTEN_GR4_FCH &&
- channel <= CALIB_IWL_TX_ATTEN_GR4_LCH)
- return CALIB_CH_GROUP_4;
-
- return -EINVAL;
-}
-
-static u32 iwl4965_get_sub_band(const struct iwl_priv *priv, u32 channel)
-{
- s32 b = -1;
-
- for (b = 0; b < EEPROM_TX_POWER_BANDS; b++) {
- if (priv->calib_info->band_info[b].ch_from == 0)
- continue;
-
- if ((channel >= priv->calib_info->band_info[b].ch_from)
- && (channel <= priv->calib_info->band_info[b].ch_to))
- break;
- }
-
- return b;
-}
-
-static s32 iwl4965_interpolate_value(s32 x, s32 x1, s32 y1, s32 x2, s32 y2)
-{
- s32 val;
-
- if (x2 == x1)
- return y1;
- else {
- iwl4965_math_div_round((x2 - x) * (y1 - y2), (x2 - x1), &val);
- return val + y2;
- }
-}
-
-/**
- * iwl4965_interpolate_chan - Interpolate factory measurements for one channel
- *
- * Interpolates factory measurements from the two sample channels within a
- * sub-band, to apply to channel of interest. Interpolation is proportional to
- * differences in channel frequencies, which is proportional to differences
- * in channel number.
- */
-static int iwl4965_interpolate_chan(struct iwl_priv *priv, u32 channel,
- struct iwl_eeprom_calib_ch_info *chan_info)
-{
- s32 s = -1;
- u32 c;
- u32 m;
- const struct iwl_eeprom_calib_measure *m1;
- const struct iwl_eeprom_calib_measure *m2;
- struct iwl_eeprom_calib_measure *omeas;
- u32 ch_i1;
- u32 ch_i2;
-
- s = iwl4965_get_sub_band(priv, channel);
- if (s >= EEPROM_TX_POWER_BANDS) {
- IWL_ERR(priv, "Tx Power can not find channel %d\n", channel);
- return -1;
- }
-
- ch_i1 = priv->calib_info->band_info[s].ch1.ch_num;
- ch_i2 = priv->calib_info->band_info[s].ch2.ch_num;
- chan_info->ch_num = (u8) channel;
-
- IWL_DEBUG_TXPOWER(priv, "channel %d subband %d factory cal ch %d & %d\n",
- channel, s, ch_i1, ch_i2);
-
- for (c = 0; c < EEPROM_TX_POWER_TX_CHAINS; c++) {
- for (m = 0; m < EEPROM_TX_POWER_MEASUREMENTS; m++) {
- m1 = &(priv->calib_info->band_info[s].ch1.
- measurements[c][m]);
- m2 = &(priv->calib_info->band_info[s].ch2.
- measurements[c][m]);
- omeas = &(chan_info->measurements[c][m]);
-
- omeas->actual_pow =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->actual_pow,
- ch_i2,
- m2->actual_pow);
- omeas->gain_idx =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->gain_idx, ch_i2,
- m2->gain_idx);
- omeas->temperature =
- (u8) iwl4965_interpolate_value(channel, ch_i1,
- m1->temperature,
- ch_i2,
- m2->temperature);
- omeas->pa_det =
- (s8) iwl4965_interpolate_value(channel, ch_i1,
- m1->pa_det, ch_i2,
- m2->pa_det);
-
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d AP1=%d AP2=%d AP=%d\n", c, m,
- m1->actual_pow, m2->actual_pow, omeas->actual_pow);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d NI1=%d NI2=%d NI=%d\n", c, m,
- m1->gain_idx, m2->gain_idx, omeas->gain_idx);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d PA1=%d PA2=%d PA=%d\n", c, m,
- m1->pa_det, m2->pa_det, omeas->pa_det);
- IWL_DEBUG_TXPOWER(priv,
- "chain %d meas %d T1=%d T2=%d T=%d\n", c, m,
- m1->temperature, m2->temperature,
- omeas->temperature);
- }
- }
-
- return 0;
-}
-
-/* bit-rate-dependent table to prevent Tx distortion, in half-dB units,
- * for OFDM 6, 12, 18, 24, 36, 48, 54, 60 MBit, and CCK all rates. */
-static s32 back_off_table[] = {
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 20 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM SISO 40 MHz */
- 10, 10, 10, 10, 10, 15, 17, 20, /* OFDM MIMO 40 MHz */
- 10 /* CCK */
-};
-
-/* Thermal compensation values for txpower for various frequency ranges ...
- * ratios from 3:1 to 4.5:1 of degrees (Celsius) per half-dB gain adjust */
-static struct iwl4965_txpower_comp_entry {
- s32 degrees_per_05db_a;
- s32 degrees_per_05db_a_denom;
-} tx_power_cmp_tble[CALIB_CH_GROUP_MAX] = {
- {9, 2}, /* group 0 5.2, ch 34-43 */
- {4, 1}, /* group 1 5.2, ch 44-70 */
- {4, 1}, /* group 2 5.2, ch 71-124 */
- {4, 1}, /* group 3 5.2, ch 125-200 */
- {3, 1} /* group 4 2.4, ch all */
-};
-
-static s32 get_min_power_index(s32 rate_power_index, u32 band)
-{
- if (!band) {
- if ((rate_power_index & 7) <= 4)
- return MIN_TX_GAIN_INDEX_52GHZ_EXT;
- }
- return MIN_TX_GAIN_INDEX;
-}
-
-struct gain_entry {
- u8 dsp;
- u8 radio;
-};
-
-static const struct gain_entry gain_table[2][108] = {
- /* 5.2GHz power gain index table */
- {
- {123, 0x3F}, /* highest txpower */
- {117, 0x3F},
- {110, 0x3F},
- {104, 0x3F},
- {98, 0x3F},
- {110, 0x3E},
- {104, 0x3E},
- {98, 0x3E},
- {110, 0x3D},
- {104, 0x3D},
- {98, 0x3D},
- {110, 0x3C},
- {104, 0x3C},
- {98, 0x3C},
- {110, 0x3B},
- {104, 0x3B},
- {98, 0x3B},
- {110, 0x3A},
- {104, 0x3A},
- {98, 0x3A},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x25},
- {104, 0x25},
- {98, 0x25},
- {110, 0x24},
- {104, 0x24},
- {98, 0x24},
- {110, 0x23},
- {104, 0x23},
- {98, 0x23},
- {110, 0x22},
- {104, 0x18},
- {98, 0x18},
- {110, 0x17},
- {104, 0x17},
- {98, 0x17},
- {110, 0x16},
- {104, 0x16},
- {98, 0x16},
- {110, 0x15},
- {104, 0x15},
- {98, 0x15},
- {110, 0x14},
- {104, 0x14},
- {98, 0x14},
- {110, 0x13},
- {104, 0x13},
- {98, 0x13},
- {110, 0x12},
- {104, 0x08},
- {98, 0x08},
- {110, 0x07},
- {104, 0x07},
- {98, 0x07},
- {110, 0x06},
- {104, 0x06},
- {98, 0x06},
- {110, 0x05},
- {104, 0x05},
- {98, 0x05},
- {110, 0x04},
- {104, 0x04},
- {98, 0x04},
- {110, 0x03},
- {104, 0x03},
- {98, 0x03},
- {110, 0x02},
- {104, 0x02},
- {98, 0x02},
- {110, 0x01},
- {104, 0x01},
- {98, 0x01},
- {110, 0x00},
- {104, 0x00},
- {98, 0x00},
- {93, 0x00},
- {88, 0x00},
- {83, 0x00},
- {78, 0x00},
- },
- /* 2.4GHz power gain index table */
- {
- {110, 0x3f}, /* highest txpower */
- {104, 0x3f},
- {98, 0x3f},
- {110, 0x3e},
- {104, 0x3e},
- {98, 0x3e},
- {110, 0x3d},
- {104, 0x3d},
- {98, 0x3d},
- {110, 0x3c},
- {104, 0x3c},
- {98, 0x3c},
- {110, 0x3b},
- {104, 0x3b},
- {98, 0x3b},
- {110, 0x3a},
- {104, 0x3a},
- {98, 0x3a},
- {110, 0x39},
- {104, 0x39},
- {98, 0x39},
- {110, 0x38},
- {104, 0x38},
- {98, 0x38},
- {110, 0x37},
- {104, 0x37},
- {98, 0x37},
- {110, 0x36},
- {104, 0x36},
- {98, 0x36},
- {110, 0x35},
- {104, 0x35},
- {98, 0x35},
- {110, 0x34},
- {104, 0x34},
- {98, 0x34},
- {110, 0x33},
- {104, 0x33},
- {98, 0x33},
- {110, 0x32},
- {104, 0x32},
- {98, 0x32},
- {110, 0x31},
- {104, 0x31},
- {98, 0x31},
- {110, 0x30},
- {104, 0x30},
- {98, 0x30},
- {110, 0x6},
- {104, 0x6},
- {98, 0x6},
- {110, 0x5},
- {104, 0x5},
- {98, 0x5},
- {110, 0x4},
- {104, 0x4},
- {98, 0x4},
- {110, 0x3},
- {104, 0x3},
- {98, 0x3},
- {110, 0x2},
- {104, 0x2},
- {98, 0x2},
- {110, 0x1},
- {104, 0x1},
- {98, 0x1},
- {110, 0x0},
- {104, 0x0},
- {98, 0x0},
- {97, 0},
- {96, 0},
- {95, 0},
- {94, 0},
- {93, 0},
- {92, 0},
- {91, 0},
- {90, 0},
- {89, 0},
- {88, 0},
- {87, 0},
- {86, 0},
- {85, 0},
- {84, 0},
- {83, 0},
- {82, 0},
- {81, 0},
- {80, 0},
- {79, 0},
- {78, 0},
- {77, 0},
- {76, 0},
- {75, 0},
- {74, 0},
- {73, 0},
- {72, 0},
- {71, 0},
- {70, 0},
- {69, 0},
- {68, 0},
- {67, 0},
- {66, 0},
- {65, 0},
- {64, 0},
- {63, 0},
- {62, 0},
- {61, 0},
- {60, 0},
- {59, 0},
- }
-};
-
-static int iwl4965_fill_txpower_tbl(struct iwl_priv *priv, u8 band, u16 channel,
- u8 is_ht40, u8 ctrl_chan_high,
- struct iwl4965_tx_power_db *tx_power_tbl)
-{
- u8 saturation_power;
- s32 target_power;
- s32 user_target_power;
- s32 power_limit;
- s32 current_temp;
- s32 reg_limit;
- s32 current_regulatory;
- s32 txatten_grp = CALIB_CH_GROUP_MAX;
- int i;
- int c;
- const struct iwl_channel_info *ch_info = NULL;
- struct iwl_eeprom_calib_ch_info ch_eeprom_info;
- const struct iwl_eeprom_calib_measure *measurement;
- s16 voltage;
- s32 init_voltage;
- s32 voltage_compensation;
- s32 degrees_per_05db_num;
- s32 degrees_per_05db_denom;
- s32 factory_temp;
- s32 temperature_comp[2];
- s32 factory_gain_index[2];
- s32 factory_actual_pwr[2];
- s32 power_index;
-
- /* tx_power_user_lmt is in dBm, convert to half-dBm (half-dB units
- * are used for indexing into txpower table) */
- user_target_power = 2 * priv->tx_power_user_lmt;
-
- /* Get current (RXON) channel, band, width */
- IWL_DEBUG_TXPOWER(priv, "chan %d band %d is_ht40 %d\n", channel, band,
- is_ht40);
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -EINVAL;
-
- /* get txatten group, used to select 1) thermal txpower adjustment
- * and 2) mimo txpower balance between Tx chains. */
- txatten_grp = iwl4965_get_tx_atten_grp(channel);
- if (txatten_grp < 0) {
- IWL_ERR(priv, "Can't find txatten group for channel %d.\n",
- channel);
- return txatten_grp;
- }
-
- IWL_DEBUG_TXPOWER(priv, "channel %d belongs to txatten group %d\n",
- channel, txatten_grp);
-
- if (is_ht40) {
- if (ctrl_chan_high)
- channel -= 2;
- else
- channel += 2;
- }
-
- /* hardware txpower limits ...
- * saturation (clipping distortion) txpowers are in half-dBm */
- if (band)
- saturation_power = priv->calib_info->saturation_power24;
- else
- saturation_power = priv->calib_info->saturation_power52;
-
- if (saturation_power < IWL_TX_POWER_SATURATION_MIN ||
- saturation_power > IWL_TX_POWER_SATURATION_MAX) {
- if (band)
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_24;
- else
- saturation_power = IWL_TX_POWER_DEFAULT_SATURATION_52;
- }
-
- /* regulatory txpower limits ... reg_limit values are in half-dBm,
- * max_power_avg values are in dBm, convert * 2 */
- if (is_ht40)
- reg_limit = ch_info->ht40_max_power_avg * 2;
- else
- reg_limit = ch_info->max_power_avg * 2;
-
- if ((reg_limit < IWL_TX_POWER_REGULATORY_MIN) ||
- (reg_limit > IWL_TX_POWER_REGULATORY_MAX)) {
- if (band)
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_24;
- else
- reg_limit = IWL_TX_POWER_DEFAULT_REGULATORY_52;
- }
-
- /* Interpolate txpower calibration values for this channel,
- * based on factory calibration tests on spaced channels. */
- iwl4965_interpolate_chan(priv, channel, &ch_eeprom_info);
-
- /* calculate tx gain adjustment based on power supply voltage */
- voltage = le16_to_cpu(priv->calib_info->voltage);
- init_voltage = (s32)le32_to_cpu(priv->card_alive_init.voltage);
- voltage_compensation =
- iwl4965_get_voltage_compensation(voltage, init_voltage);
-
- IWL_DEBUG_TXPOWER(priv, "curr volt %d eeprom volt %d volt comp %d\n",
- init_voltage,
- voltage, voltage_compensation);
-
- /* get current temperature (Celsius) */
- current_temp = max(priv->temperature, IWL_TX_POWER_TEMPERATURE_MIN);
- current_temp = min(priv->temperature, IWL_TX_POWER_TEMPERATURE_MAX);
- current_temp = KELVIN_TO_CELSIUS(current_temp);
-
- /* select thermal txpower adjustment params, based on channel group
- * (same frequency group used for mimo txatten adjustment) */
- degrees_per_05db_num =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a;
- degrees_per_05db_denom =
- tx_power_cmp_tble[txatten_grp].degrees_per_05db_a_denom;
-
- /* get per-chain txpower values from factory measurements */
- for (c = 0; c < 2; c++) {
- measurement = &ch_eeprom_info.measurements[c][1];
-
- /* txgain adjustment (in half-dB steps) based on difference
- * between factory and current temperature */
- factory_temp = measurement->temperature;
- iwl4965_math_div_round((current_temp - factory_temp) *
- degrees_per_05db_denom,
- degrees_per_05db_num,
- &temperature_comp[c]);
-
- factory_gain_index[c] = measurement->gain_idx;
- factory_actual_pwr[c] = measurement->actual_pow;
-
- IWL_DEBUG_TXPOWER(priv, "chain = %d\n", c);
- IWL_DEBUG_TXPOWER(priv, "fctry tmp %d, "
- "curr tmp %d, comp %d steps\n",
- factory_temp, current_temp,
- temperature_comp[c]);
-
- IWL_DEBUG_TXPOWER(priv, "fctry idx %d, fctry pwr %d\n",
- factory_gain_index[c],
- factory_actual_pwr[c]);
- }
-
- /* for each of 33 bit-rates (including 1 for CCK) */
- for (i = 0; i < POWER_TABLE_NUM_ENTRIES; i++) {
- u8 is_mimo_rate;
- union iwl4965_tx_power_dual_stream tx_power;
-
- /* for mimo, reduce each chain's txpower by half
- * (3dB, 6 steps), so total output power is regulatory
- * compliant. */
- if (i & 0x8) {
- current_regulatory = reg_limit -
- IWL_TX_POWER_MIMO_REGULATORY_COMPENSATION;
- is_mimo_rate = 1;
- } else {
- current_regulatory = reg_limit;
- is_mimo_rate = 0;
- }
-
- /* find txpower limit, either hardware or regulatory */
- power_limit = saturation_power - back_off_table[i];
- if (power_limit > current_regulatory)
- power_limit = current_regulatory;
-
- /* reduce user's txpower request if necessary
- * for this rate on this channel */
- target_power = user_target_power;
- if (target_power > power_limit)
- target_power = power_limit;
-
- IWL_DEBUG_TXPOWER(priv, "rate %d sat %d reg %d usr %d tgt %d\n",
- i, saturation_power - back_off_table[i],
- current_regulatory, user_target_power,
- target_power);
-
- /* for each of 2 Tx chains (radio transmitters) */
- for (c = 0; c < 2; c++) {
- s32 atten_value;
-
- if (is_mimo_rate)
- atten_value =
- (s32)le32_to_cpu(priv->card_alive_init.
- tx_atten[txatten_grp][c]);
- else
- atten_value = 0;
-
- /* calculate index; higher index means lower txpower */
- power_index = (u8) (factory_gain_index[c] -
- (target_power -
- factory_actual_pwr[c]) -
- temperature_comp[c] -
- voltage_compensation +
- atten_value);
-
-/* IWL_DEBUG_TXPOWER(priv, "calculated txpower index %d\n",
- power_index); */
-
- if (power_index < get_min_power_index(i, band))
- power_index = get_min_power_index(i, band);
-
- /* adjust 5 GHz index to support negative indexes */
- if (!band)
- power_index += 9;
-
- /* CCK, rate 32, reduce txpower for CCK */
- if (i == POWER_TABLE_CCK_ENTRY)
- power_index +=
- IWL_TX_POWER_CCK_COMPENSATION_C_STEP;
-
- /* stay within the table! */
- if (power_index > 107) {
- IWL_WARN(priv, "txpower index %d > 107\n",
- power_index);
- power_index = 107;
- }
- if (power_index < 0) {
- IWL_WARN(priv, "txpower index %d < 0\n",
- power_index);
- power_index = 0;
- }
-
- /* fill txpower command for this rate/chain */
- tx_power.s.radio_tx_gain[c] =
- gain_table[band][power_index].radio;
- tx_power.s.dsp_predis_atten[c] =
- gain_table[band][power_index].dsp;
-
- IWL_DEBUG_TXPOWER(priv, "chain %d mimo %d index %d "
- "gain 0x%02x dsp %d\n",
- c, atten_value, power_index,
- tx_power.s.radio_tx_gain[c],
- tx_power.s.dsp_predis_atten[c]);
- } /* for each chain */
-
- tx_power_tbl->power_tbl[i].dw = cpu_to_le32(tx_power.dw);
-
- } /* for each rate */
-
- return 0;
-}
-
-/**
- * iwl4965_send_tx_power - Configure the TXPOWER level user limit
- *
- * Uses the active RXON for channel, band, and characteristics (ht40, high)
- * The power limit is taken from priv->tx_power_user_lmt.
- */
-static int iwl4965_send_tx_power(struct iwl_priv *priv)
-{
- struct iwl4965_txpowertable_cmd cmd = { 0 };
- int ret;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
- "TX Power requested while scanning!\n"))
- return -EAGAIN;
-
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->active.flags);
-
- if (is_ht40 && (ctx->active.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.channel = ctx->active.channel;
-
- ret = iwl4965_fill_txpower_tbl(priv, band,
- le16_to_cpu(ctx->active.channel),
- is_ht40, ctrl_chan_high, &cmd.tx_power);
- if (ret)
- goto out;
-
- ret = iwl_legacy_send_cmd_pdu(priv,
- REPLY_TX_PWR_TABLE_CMD, sizeof(cmd), &cmd);
-
-out:
- return ret;
-}
-
-static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int ret = 0;
- struct iwl4965_rxon_assoc_cmd rxon_assoc;
- const struct iwl_legacy_rxon_cmd *rxon1 = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *rxon2 = &ctx->active;
-
- if ((rxon1->flags == rxon2->flags) &&
- (rxon1->filter_flags == rxon2->filter_flags) &&
- (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
- (rxon1->ofdm_ht_single_stream_basic_rates ==
- rxon2->ofdm_ht_single_stream_basic_rates) &&
- (rxon1->ofdm_ht_dual_stream_basic_rates ==
- rxon2->ofdm_ht_dual_stream_basic_rates) &&
- (rxon1->rx_chain == rxon2->rx_chain) &&
- (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
- IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
- return 0;
- }
-
- rxon_assoc.flags = ctx->staging.flags;
- rxon_assoc.filter_flags = ctx->staging.filter_flags;
- rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates;
- rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates;
- rxon_assoc.reserved = 0;
- rxon_assoc.ofdm_ht_single_stream_basic_rates =
- ctx->staging.ofdm_ht_single_stream_basic_rates;
- rxon_assoc.ofdm_ht_dual_stream_basic_rates =
- ctx->staging.ofdm_ht_dual_stream_basic_rates;
- rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain;
-
- ret = iwl_legacy_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
- sizeof(rxon_assoc), &rxon_assoc, NULL);
-
- return ret;
-}
-
-static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- /* cast away the const for active_rxon in this function */
- struct iwl_legacy_rxon_cmd *active_rxon = (void *)&ctx->active;
- int ret;
- bool new_assoc =
- !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
-
- if (!iwl_legacy_is_alive(priv))
- return -EBUSY;
-
- if (!ctx->is_active)
- return 0;
-
- /* always get timestamp with Rx frame */
- ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
-
- ret = iwl_legacy_check_rxon_cmd(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
- return -EINVAL;
- }
-
- /*
- * receive commit_rxon request
- * abort any previous channel switch if still in process
- */
- if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
- (priv->switch_channel != ctx->staging.channel)) {
- IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
- le16_to_cpu(priv->switch_channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-
- /* If we don't need to send a full RXON, we can use
- * iwl_rxon_assoc_cmd which is used to reconfigure filter
- * and other flags for the current radio configuration. */
- if (!iwl_legacy_full_rxon_required(priv, ctx)) {
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
- return ret;
- }
-
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_print_rx_config_cmd(priv, ctx);
- /*
- * We do not commit tx power settings while channel changing,
- * do it now if tx power changed.
- */
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
- return 0;
- }
-
- /* If we are currently associated and the new config requires
- * an RXON_ASSOC and the new config wants the associated mask enabled,
- * we must clear the associated from the active configuration
- * before we apply the new config */
- if (iwl_legacy_is_associated_ctx(ctx) && new_assoc) {
- IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
-
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd),
- active_rxon);
-
- /* If the mask clearing failed then we set
- * active_rxon back to what it was previously */
- if (ret) {
- active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
- IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
- return ret;
- }
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
-
- IWL_DEBUG_INFO(priv, "Sending RXON\n"
- "* with%s RXON_FILTER_ASSOC_MSK\n"
- "* channel = %d\n"
- "* bssid = %pM\n",
- (new_assoc ? "" : "out"),
- le16_to_cpu(ctx->staging.channel),
- ctx->staging.bssid_addr);
-
- iwl_legacy_set_rxon_hwcrypto(priv, ctx,
- !priv->cfg->mod_params->sw_crypto);
-
- /* Apply the new configuration
- * RXON unassoc clears the station table in uCode so restoration of
- * stations is needed after it (the RXON command) completes
- */
- if (!new_assoc) {
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- iwl_legacy_clear_ucode_stations(priv, ctx);
- iwl_legacy_restore_stations(priv, ctx);
- ret = iwl4965_restore_default_wep_keys(priv, ctx);
- if (ret) {
- IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
- return ret;
- }
- }
- if (new_assoc) {
- priv->start_calib = 0;
- /* Apply the new configuration
- * RXON assoc doesn't clear the station table in uCode,
- */
- ret = iwl_legacy_send_cmd_pdu(priv, ctx->rxon_cmd,
- sizeof(struct iwl_legacy_rxon_cmd), &ctx->staging);
- if (ret) {
- IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
- return ret;
- }
- memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
- }
- iwl_legacy_print_rx_config_cmd(priv, ctx);
-
- iwl4965_init_sensitivity(priv);
-
- /* If we issue a new RXON command which required a tune then we must
- * send a new TXPOWER command or we won't be able to Tx any frames */
- ret = iwl_legacy_set_tx_power(priv, priv->tx_power_next, true);
- if (ret) {
- IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- int rc;
- u8 band = 0;
- bool is_ht40 = false;
- u8 ctrl_chan_high = 0;
- struct iwl4965_channel_switch_cmd cmd;
- const struct iwl_channel_info *ch_info;
- u32 switch_time_in_usec, ucode_switch_time;
- u16 ch;
- u32 tsf_low;
- u8 switch_count;
- u16 beacon_interval = le16_to_cpu(ctx->timing.beacon_interval);
- struct ieee80211_vif *vif = ctx->vif;
- band = priv->band == IEEE80211_BAND_2GHZ;
-
- is_ht40 = iw4965_is_ht40_channel(ctx->staging.flags);
-
- if (is_ht40 &&
- (ctx->staging.flags & RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK))
- ctrl_chan_high = 1;
-
- cmd.band = band;
- cmd.expect_beacon = 0;
- ch = ch_switch->channel->hw_value;
- cmd.channel = cpu_to_le16(ch);
- cmd.rxon_flags = ctx->staging.flags;
- cmd.rxon_filter_flags = ctx->staging.filter_flags;
- switch_count = ch_switch->count;
- tsf_low = ch_switch->timestamp & 0x0ffffffff;
- /*
- * calculate the ucode channel switch time
- * adding TSF as one of the factor for when to switch
- */
- if ((priv->ucode_beacon_time > tsf_low) && beacon_interval) {
- if (switch_count > ((priv->ucode_beacon_time - tsf_low) /
- beacon_interval)) {
- switch_count -= (priv->ucode_beacon_time -
- tsf_low) / beacon_interval;
- } else
- switch_count = 0;
- }
- if (switch_count <= 1)
- cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
- else {
- switch_time_in_usec =
- vif->bss_conf.beacon_int * switch_count * TIME_UNIT;
- ucode_switch_time = iwl_legacy_usecs_to_beacons(priv,
- switch_time_in_usec,
- beacon_interval);
- cmd.switch_time = iwl_legacy_add_beacon_time(priv,
- priv->ucode_beacon_time,
- ucode_switch_time,
- beacon_interval);
- }
- IWL_DEBUG_11H(priv, "uCode time for the switch is 0x%x\n",
- cmd.switch_time);
- ch_info = iwl_legacy_get_channel_info(priv, priv->band, ch);
- if (ch_info)
- cmd.expect_beacon = iwl_legacy_is_channel_radar(ch_info);
- else {
- IWL_ERR(priv, "invalid channel switch from %u to %u\n",
- ctx->active.channel, ch);
- return -EFAULT;
- }
-
- rc = iwl4965_fill_txpower_tbl(priv, band, ch, is_ht40,
- ctrl_chan_high, &cmd.tx_power);
- if (rc) {
- IWL_DEBUG_11H(priv, "error:%d fill txpower_tbl\n", rc);
- return rc;
- }
-
- return iwl_legacy_send_cmd_pdu(priv,
- REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
-}
-
-/**
- * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
- */
-static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt)
-{
- struct iwl4965_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
- int txq_id = txq->q.id;
- int write_ptr = txq->q.write_ptr;
- int len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
- __le16 bc_ent;
-
- WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
-
- bc_ent = cpu_to_le16(len & 0xFFF);
- /* Set up byte count within first 256 entries */
- scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
-
- /* If within first 64 entries, duplicate at end */
- if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
- scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
-}
-
-/**
- * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
- * @statistics: Provides the temperature reading from the uCode
- *
- * A return of <0 indicates bogus data in the statistics
- */
-static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
-{
- s32 temperature;
- s32 vt;
- s32 R1, R2, R3;
- u32 R4;
-
- if (test_bit(STATUS_TEMPERATURE, &priv->status) &&
- (priv->_4965.statistics.flag &
- STATISTICS_REPLY_FLG_HT40_MODE_MSK)) {
- IWL_DEBUG_TEMP(priv, "Running HT40 temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[1]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[1]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[1]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[1]);
- } else {
- IWL_DEBUG_TEMP(priv, "Running temperature calibration\n");
- R1 = (s32)le32_to_cpu(priv->card_alive_init.therm_r1[0]);
- R2 = (s32)le32_to_cpu(priv->card_alive_init.therm_r2[0]);
- R3 = (s32)le32_to_cpu(priv->card_alive_init.therm_r3[0]);
- R4 = le32_to_cpu(priv->card_alive_init.therm_r4[0]);
- }
-
- /*
- * Temperature is only 23 bits, so sign extend out to 32.
- *
- * NOTE If we haven't received a statistics notification yet
- * with an updated temperature, use R4 provided to us in the
- * "initialize" ALIVE response.
- */
- if (!test_bit(STATUS_TEMPERATURE, &priv->status))
- vt = sign_extend32(R4, 23);
- else
- vt = sign_extend32(le32_to_cpu(priv->_4965.statistics.
- general.common.temperature), 23);
-
- IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
-
- if (R3 == R1) {
- IWL_ERR(priv, "Calibration conflict R1 == R3\n");
- return -1;
- }
-
- /* Calculate temperature in degrees Kelvin, adjust by 97%.
- * Add offset to center the adjustment around 0 degrees Centigrade. */
- temperature = TEMPERATURE_CALIB_A_VAL * (vt - R2);
- temperature /= (R3 - R1);
- temperature = (temperature * 97) / 100 + TEMPERATURE_CALIB_KELVIN_OFFSET;
-
- IWL_DEBUG_TEMP(priv, "Calibrated temperature: %dK, %dC\n",
- temperature, KELVIN_TO_CELSIUS(temperature));
-
- return temperature;
-}
-
-/* Adjust Txpower only if temperature variance is greater than threshold. */
-#define IWL_TEMPERATURE_THRESHOLD 3
-
-/**
- * iwl4965_is_temp_calib_needed - determines if new calibration is needed
- *
- * If the temperature changed has changed sufficiently, then a recalibration
- * is needed.
- *
- * Assumes caller will replace priv->last_temperature once calibration
- * executed.
- */
-static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
-{
- int temp_diff;
-
- if (!test_bit(STATUS_STATISTICS, &priv->status)) {
- IWL_DEBUG_TEMP(priv, "Temperature not updated -- no statistics.\n");
- return 0;
- }
-
- temp_diff = priv->temperature - priv->last_temperature;
-
- /* get absolute value */
- if (temp_diff < 0) {
- IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
- temp_diff = -temp_diff;
- } else if (temp_diff == 0)
- IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
- else
- IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
-
- if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
- IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
- return 0;
- }
-
- IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
-
- return 1;
-}
-
-static void iwl4965_temperature_calib(struct iwl_priv *priv)
-{
- s32 temp;
-
- temp = iwl4965_hw_get_temperature(priv);
- if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
- return;
-
- if (priv->temperature != temp) {
- if (priv->temperature)
- IWL_DEBUG_TEMP(priv, "Temperature changed "
- "from %dC to %dC\n",
- KELVIN_TO_CELSIUS(priv->temperature),
- KELVIN_TO_CELSIUS(temp));
- else
- IWL_DEBUG_TEMP(priv, "Temperature "
- "initialized to %dC\n",
- KELVIN_TO_CELSIUS(temp));
- }
-
- priv->temperature = temp;
- set_bit(STATUS_TEMPERATURE, &priv->status);
-
- if (!priv->disable_tx_power_cal &&
- unlikely(!test_bit(STATUS_SCANNING, &priv->status)) &&
- iwl4965_is_temp_calib_needed(priv))
- queue_work(priv->workqueue, &priv->txpower_work);
-}
-
-static u16 iwl4965_get_hcmd_size(u8 cmd_id, u16 len)
-{
- switch (cmd_id) {
- case REPLY_RXON:
- return (u16) sizeof(struct iwl4965_rxon_cmd);
- default:
- return len;
- }
-}
-
-static u16 iwl4965_build_addsta_hcmd(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data)
-{
- struct iwl4965_addsta_cmd *addsta = (struct iwl4965_addsta_cmd *)data;
- addsta->mode = cmd->mode;
- memcpy(&addsta->sta, &cmd->sta, sizeof(struct sta_id_modify));
- memcpy(&addsta->key, &cmd->key, sizeof(struct iwl4965_keyinfo));
- addsta->station_flags = cmd->station_flags;
- addsta->station_flags_msk = cmd->station_flags_msk;
- addsta->tid_disable_tx = cmd->tid_disable_tx;
- addsta->add_immediate_ba_tid = cmd->add_immediate_ba_tid;
- addsta->remove_immediate_ba_tid = cmd->remove_immediate_ba_tid;
- addsta->add_immediate_ba_ssn = cmd->add_immediate_ba_ssn;
- addsta->sleep_tx_count = cmd->sleep_tx_count;
- addsta->reserved1 = cpu_to_le16(0);
- addsta->reserved2 = cpu_to_le16(0);
-
- return (u16)sizeof(struct iwl4965_addsta_cmd);
-}
-
-static inline u32 iwl4965_get_scd_ssn(struct iwl4965_tx_resp *tx_resp)
-{
- return le32_to_cpup(&tx_resp->u.status + tx_resp->frame_count) & MAX_SN;
-}
-
-/**
- * iwl4965_tx_status_reply_tx - Handle Tx response for frames in aggregation queue
- */
-static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
- struct iwl_ht_agg *agg,
- struct iwl4965_tx_resp *tx_resp,
- int txq_id, u16 start_idx)
-{
- u16 status;
- struct agg_tx_status *frame_status = tx_resp->u.agg_status;
- struct ieee80211_tx_info *info = NULL;
- struct ieee80211_hdr *hdr = NULL;
- u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
- int i, sh, idx;
- u16 seq;
- if (agg->wait_for_ba)
- IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
-
- agg->frame_count = tx_resp->frame_count;
- agg->start_idx = start_idx;
- agg->rate_n_flags = rate_n_flags;
- agg->bitmap = 0;
-
- /* num frames attempted by Tx command */
- if (agg->frame_count == 1) {
- /* Only one frame was attempted; no block-ack will arrive */
- status = le16_to_cpu(frame_status[0].status);
- idx = start_idx;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
- agg->frame_count, agg->start_idx, idx);
-
- info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb);
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags &= ~IEEE80211_TX_CTL_AMPDU;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv, rate_n_flags, info);
-
- IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
- status & 0xff, tx_resp->failure_frame);
- IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
-
- agg->wait_for_ba = 0;
- } else {
- /* Two or more frames were attempted; expect block-ack */
- u64 bitmap = 0;
- int start = agg->start_idx;
-
- /* Construct bit-map of pending frames within Tx window */
- for (i = 0; i < agg->frame_count; i++) {
- u16 sc;
- status = le16_to_cpu(frame_status[i].status);
- seq = le16_to_cpu(frame_status[i].sequence);
- idx = SEQ_TO_INDEX(seq);
- txq_id = SEQ_TO_QUEUE(seq);
-
- if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
- AGG_TX_STATE_ABORT_MSK))
- continue;
-
- IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
- agg->frame_count, txq_id, idx);
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, idx);
- if (!hdr) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't point to valid skb"
- " idx=%d, txq_id=%d\n", idx, txq_id);
- return -1;
- }
-
- sc = le16_to_cpu(hdr->seq_ctrl);
- if (idx != (SEQ_TO_SN(sc) & 0xff)) {
- IWL_ERR(priv,
- "BUG_ON idx doesn't match seq control"
- " idx=%d, seq_idx=%d, seq=%d\n",
- idx, SEQ_TO_SN(sc), hdr->seq_ctrl);
- return -1;
- }
-
- IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
- i, idx, SEQ_TO_SN(sc));
-
- sh = idx - start;
- if (sh > 64) {
- sh = (start - idx) + 0xff;
- bitmap = bitmap << sh;
- sh = 0;
- start = idx;
- } else if (sh < -64)
- sh = 0xff - (start - idx);
- else if (sh < 0) {
- sh = start - idx;
- start = idx;
- bitmap = bitmap << sh;
- sh = 0;
- }
- bitmap |= 1ULL << sh;
- IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
- start, (unsigned long long)bitmap);
- }
-
- agg->bitmap = bitmap;
- agg->start_idx = start;
- IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
- agg->frame_count, agg->start_idx,
- (unsigned long long)agg->bitmap);
-
- if (bitmap)
- agg->wait_for_ba = 1;
- }
- return 0;
-}
-
-static u8 iwl4965_find_station(struct iwl_priv *priv, const u8 *addr)
-{
- int i;
- int start = 0;
- int ret = IWL_INVALID_STATION;
- unsigned long flags;
-
- if ((priv->iw_mode == NL80211_IFTYPE_ADHOC))
- start = IWL_STA_ID;
-
- if (is_broadcast_ether_addr(addr))
- return priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = start; i < priv->hw_params.max_stations; i++)
- if (priv->stations[i].used &&
- (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr))) {
- ret = i;
- goto out;
- }
-
- IWL_DEBUG_ASSOC_LIMIT(priv, "can not find STA %pM total %d\n",
- addr, priv->num_stations);
-
- out:
- /*
- * It may be possible that more commands interacting with stations
- * arrive before we completed processing the adding of
- * station
- */
- if (ret != IWL_INVALID_STATION &&
- (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
- ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
- (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
- IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
- ret);
- ret = IWL_INVALID_STATION;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return ret;
-}
-
-static int iwl4965_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
-{
- if (priv->iw_mode == NL80211_IFTYPE_STATION) {
- return IWL_AP_ID;
- } else {
- u8 *da = ieee80211_get_DA(hdr);
- return iwl4965_find_station(priv, da);
- }
-}
-
-/**
- * iwl4965_rx_reply_tx - Handle standard (non-aggregation) Tx response
- */
-static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct ieee80211_hdr *hdr;
- struct ieee80211_tx_info *info;
- struct iwl4965_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
- u32 status = le32_to_cpu(tx_resp->u.status);
- int uninitialized_var(tid);
- int sta_id;
- int freed;
- u8 *qc = NULL;
- unsigned long flags;
-
- if ((index >= txq->q.n_bd) || (iwl_legacy_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
- txq->q.read_ptr);
- return;
- }
-
- txq->time_stamp = jiffies;
- info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
- memset(&info->status, 0, sizeof(info->status));
-
- hdr = iwl_legacy_tx_queue_get_hdr(priv, txq_id, index);
- if (ieee80211_is_data_qos(hdr->frame_control)) {
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
- }
-
- sta_id = iwl4965_get_ra_sta_id(priv, hdr);
- if (txq->sched_retry && unlikely(sta_id == IWL_INVALID_STATION)) {
- IWL_ERR(priv, "Station not known\n");
- return;
- }
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- if (txq->sched_retry) {
- const u32 scd_ssn = iwl4965_get_scd_ssn(tx_resp);
- struct iwl_ht_agg *agg = NULL;
- WARN_ON(!qc);
-
- agg = &priv->stations[sta_id].tid[tid].agg;
-
- iwl4965_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
-
- /* check if BAR is needed */
- if ((tx_resp->frame_count == 1) && !iwl4965_is_tx_success(status))
- info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
-
- if (txq->q.read_ptr != (scd_ssn & 0xff)) {
- index = iwl_legacy_queue_dec_wrap(scd_ssn & 0xff,
- txq->q.n_bd);
- IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
- "%d index %d\n", scd_ssn , index);
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc)
- iwl4965_free_tfds_in_queue(priv, sta_id,
- tid, freed);
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark)
- && (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
- iwl_legacy_wake_queue(priv, txq);
- }
- } else {
- info->status.rates[0].count = tx_resp->failure_frame + 1;
- info->flags |= iwl4965_tx_status_to_mac80211(status);
- iwl4965_hwrate_to_tx_control(priv,
- le32_to_cpu(tx_resp->rate_n_flags),
- info);
-
- IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) "
- "rate_n_flags 0x%x retries %d\n",
- txq_id,
- iwl4965_get_tx_fail_reason(status), status,
- le32_to_cpu(tx_resp->rate_n_flags),
- tx_resp->failure_frame);
-
- freed = iwl4965_tx_queue_reclaim(priv, txq_id, index);
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_free_tfds_in_queue(priv, sta_id, tid, freed);
- else if (sta_id == IWL_INVALID_STATION)
- IWL_DEBUG_TX_REPLY(priv, "Station not known\n");
-
- if (priv->mac80211_registered &&
- (iwl_legacy_queue_space(&txq->q) > txq->q.low_mark))
- iwl_legacy_wake_queue(priv, txq);
- }
- if (qc && likely(sta_id != IWL_INVALID_STATION))
- iwl4965_txq_check_empty(priv, sta_id, tid, txq_id);
-
- iwl4965_check_abort_status(priv, tx_resp->frame_count, status);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon = (void *)pkt->u.raw;
- u8 rate __maybe_unused =
- iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %#x, retries:%d ibssmgr:%d "
- "tsf:0x%.8x%.8x rate:%d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-/* Set up 4965-specific Rx frame reply handlers */
-static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
-{
- /* Legacy Rx frames */
- priv->rx_handlers[REPLY_RX] = iwl4965_rx_reply_rx;
- /* Tx response */
- priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-}
-
-static struct iwl_hcmd_ops iwl4965_hcmd = {
- .rxon_assoc = iwl4965_send_rxon_assoc,
- .commit_rxon = iwl4965_commit_rxon,
- .set_rxon_chain = iwl4965_set_rxon_chain,
-};
-
-static void iwl4965_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl4965_post_associate(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- struct ieee80211_conf *conf = NULL;
- int ret = 0;
-
- if (!vif || !priv->is_open)
- return;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- vif->bss_conf.aid, vif->bss_conf.beacon_int);
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl_legacy_commit_rxon(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- vif->bss_conf.aid, ctx->active.bssid_addr);
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl4965_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, vif->type);
- break;
- }
-
- /* the chain noise calibration will enabled PM upon completion
- * If chain noise has already been run, then we need to enable
- * power management here */
- if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
- iwl_legacy_power_update_mode(priv, false);
-
- /* Enable Rx differential gain and sensitivity calibrations */
- iwl4965_chain_noise_reset(priv);
- priv->start_calib = 1;
-}
-
-static void iwl4965_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int ret = 0;
-
- lockdep_assert_held(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!iwl_legacy_is_associated_ctx(ctx)) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- ret = iwl_legacy_send_rxon_timing(priv, ctx);
- if (ret)
- IWL_WARN(priv, "RXON timing failed - "
- "Attempting to continue.\n");
-
- /* AP has all antennas */
- priv->chain_noise_data.active_chains =
- priv->hw_params.valid_rx_ant;
- iwl_legacy_set_rxon_ht(priv, &priv->current_ht_config);
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* need to send beacon cmd before committing assoc RXON! */
- iwl4965_send_beacon_cmd(priv);
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
- }
- iwl4965_send_beacon_cmd(priv);
-}
-
-static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
- .get_hcmd_size = iwl4965_get_hcmd_size,
- .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
- .request_scan = iwl4965_request_scan,
- .post_scan = iwl4965_post_scan,
-};
-
-static struct iwl_lib_ops iwl4965_lib = {
- .set_hw_params = iwl4965_hw_set_hw_params,
- .txq_update_byte_cnt_tbl = iwl4965_txq_update_byte_cnt_tbl,
- .txq_attach_buf_to_tfd = iwl4965_hw_txq_attach_buf_to_tfd,
- .txq_free_tfd = iwl4965_hw_txq_free_tfd,
- .txq_init = iwl4965_hw_tx_queue_init,
- .rx_handler_setup = iwl4965_rx_handler_setup,
- .is_valid_rtc_data_addr = iwl4965_hw_valid_rtc_data_addr,
- .init_alive_start = iwl4965_init_alive_start,
- .load_ucode = iwl4965_load_bsm,
- .dump_nic_error_log = iwl4965_dump_nic_error_log,
- .dump_fh = iwl4965_dump_fh,
- .set_channel_switch = iwl4965_hw_channel_switch,
- .apm_ops = {
- .init = iwl_legacy_apm_init,
- .config = iwl4965_nic_config,
- },
- .eeprom_ops = {
- .regulatory_bands = {
- EEPROM_REGULATORY_BAND_1_CHANNELS,
- EEPROM_REGULATORY_BAND_2_CHANNELS,
- EEPROM_REGULATORY_BAND_3_CHANNELS,
- EEPROM_REGULATORY_BAND_4_CHANNELS,
- EEPROM_REGULATORY_BAND_5_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS,
- EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS
- },
- .acquire_semaphore = iwl4965_eeprom_acquire_semaphore,
- .release_semaphore = iwl4965_eeprom_release_semaphore,
- },
- .send_tx_power = iwl4965_send_tx_power,
- .update_chain_flags = iwl4965_update_chain_flags,
- .temp_ops = {
- .temperature = iwl4965_temperature_calib,
- },
- .debugfs_ops = {
- .rx_stats_read = iwl4965_ucode_rx_stats_read,
- .tx_stats_read = iwl4965_ucode_tx_stats_read,
- .general_stats_read = iwl4965_ucode_general_stats_read,
- },
-};
-
-static const struct iwl_legacy_ops iwl4965_legacy_ops = {
- .post_associate = iwl4965_post_associate,
- .config_ap = iwl4965_config_ap,
- .manage_ibss_station = iwl4965_manage_ibss_station,
- .update_bcast_stations = iwl4965_update_bcast_stations,
-};
-
-struct ieee80211_ops iwl4965_hw_ops = {
- .tx = iwl4965_mac_tx,
- .start = iwl4965_mac_start,
- .stop = iwl4965_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl4965_configure_filter,
- .set_key = iwl4965_mac_set_key,
- .update_tkip_key = iwl4965_mac_update_tkip_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .ampdu_action = iwl4965_mac_ampdu_action,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl4965_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .channel_switch = iwl4965_mac_channel_switch,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static const struct iwl_ops iwl4965_ops = {
- .lib = &iwl4965_lib,
- .hcmd = &iwl4965_hcmd,
- .utils = &iwl4965_hcmd_utils,
- .led = &iwl4965_led_ops,
- .legacy = &iwl4965_legacy_ops,
- .ieee80211_ops = &iwl4965_hw_ops,
-};
-
-static struct iwl_base_params iwl4965_base_params = {
- .eeprom_size = IWL4965_EEPROM_IMG_SIZE,
- .num_of_queues = IWL49_NUM_QUEUES,
- .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
- .pll_cfg_val = 0,
- .set_l0s = true,
- .use_bsm = true,
- .led_compensation = 61,
- .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .wd_timeout = IWL_DEF_WD_TIMEOUT,
- .temperature_kelvin = true,
- .ucode_tracing = true,
- .sensitivity_calib_by_driver = true,
- .chain_noise_calib_by_driver = true,
-};
-
-struct iwl_cfg iwl4965_cfg = {
- .name = "Intel(R) Wireless WiFi Link 4965AGN",
- .fw_name_pre = IWL4965_FW_PRE,
- .ucode_api_max = IWL4965_UCODE_API_MAX,
- .ucode_api_min = IWL4965_UCODE_API_MIN,
- .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
- .valid_tx_ant = ANT_AB,
- .valid_rx_ant = ANT_ABC,
- .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
- .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
- .ops = &iwl4965_ops,
- .mod_params = &iwl4965_mod_params,
- .base_params = &iwl4965_base_params,
- .led_mode = IWL_LED_BLINK,
- /*
- * Force use of chains B and C for scan RX on 5 GHz band
- * because the device has off-channel reception on chain A.
- */
- .scan_rx_antennas[IEEE80211_BAND_5GHZ] = ANT_BC,
-};
-
-/* Module firmware */
-MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.h b/drivers/net/wireless/iwlegacy/iwl-4965.h
deleted file mode 100644
index 01f8163..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-4965.h
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_4965_h__
-#define __iwl_4965_h__
-
-#include "iwl-dev.h"
-
-/* configuration for the _4965 devices */
-extern struct iwl_cfg iwl4965_cfg;
-
-extern struct iwl_mod_params iwl4965_mod_params;
-
-extern struct ieee80211_ops iwl4965_hw_ops;
-
-/* tx queue */
-void iwl4965_free_tfds_in_queue(struct iwl_priv *priv,
- int sta_id, int tid, int freed);
-
-/* RXON */
-void iwl4965_set_rxon_chain(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/* uCode */
-int iwl4965_verify_ucode(struct iwl_priv *priv);
-
-/* lib */
-void iwl4965_check_abort_status(struct iwl_priv *priv,
- u8 frame_count, u32 status);
-
-void iwl4965_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_hw_nic_init(struct iwl_priv *priv);
-int iwl4965_dump_fh(struct iwl_priv *priv, char **buf, bool display);
-
-/* rx */
-void iwl4965_rx_queue_restock(struct iwl_priv *priv);
-void iwl4965_rx_replenish(struct iwl_priv *priv);
-void iwl4965_rx_replenish_now(struct iwl_priv *priv);
-void iwl4965_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl4965_rxq_stop(struct iwl_priv *priv);
-int iwl4965_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
-void iwl4965_rx_reply_rx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_reply_rx_phy(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_rx_handle(struct iwl_priv *priv);
-
-/* tx */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len, u8 reset, u8 pad);
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-void iwl4965_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
- struct ieee80211_tx_info *info);
-int iwl4965_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
-int iwl4965_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn);
-int iwl4965_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid);
-int iwl4965_txq_check_empty(struct iwl_priv *priv,
- int sta_id, u8 tid, int txq_id);
-void iwl4965_rx_reply_compressed_ba(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-int iwl4965_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
-void iwl4965_hw_txq_ctx_free(struct iwl_priv *priv);
-int iwl4965_txq_ctx_alloc(struct iwl_priv *priv);
-void iwl4965_txq_ctx_reset(struct iwl_priv *priv);
-void iwl4965_txq_ctx_stop(struct iwl_priv *priv);
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask);
-
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index);
-/**
- * iwl4965_tx_queue_set_status - (optionally) start Tx/Cmd queue
- * @tx_fifo_id: Tx DMA/FIFO channel (range 0-7) that the queue will feed
- * @scd_retry: (1) Indicates queue will be used in aggregation mode
- *
- * NOTE: Acquire priv->lock before calling this function !
- */
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry);
-
-static inline u32 iwl4965_tx_status_to_mac80211(u32 status)
-{
- status &= TX_STATUS_MSK;
-
- switch (status) {
- case TX_STATUS_SUCCESS:
- case TX_STATUS_DIRECT_DONE:
- return IEEE80211_TX_STAT_ACK;
- case TX_STATUS_FAIL_DEST_PS:
- return IEEE80211_TX_STAT_TX_FILTERED;
- default:
- return 0;
- }
-}
-
-static inline bool iwl4965_is_tx_success(u32 status)
-{
- status &= TX_STATUS_MSK;
- return (status == TX_STATUS_SUCCESS) ||
- (status == TX_STATUS_DIRECT_DONE);
-}
-
-u8 iwl4965_toggle_tx_ant(struct iwl_priv *priv, u8 ant_idx, u8 valid);
-
-/* rx */
-void iwl4965_rx_missed_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-bool iwl4965_good_plcp_health(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl4965_rx_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl4965_reply_statistics(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/* scan */
-int iwl4965_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif);
-
-/* station mgmt */
-int iwl4965_manage_ibss_station(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-
-/* hcmd */
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-const char *iwl4965_get_tx_fail_reason(u32 status);
-#else
-static inline const char *
-iwl4965_get_tx_fail_reason(u32 status) { return ""; }
-#endif
-
-/* station management */
-int iwl4965_alloc_bcast_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_add_bssid_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, u8 *sta_id_r);
-int iwl4965_remove_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_set_default_wep_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key);
-int iwl4965_restore_default_wep_keys(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl4965_set_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-int iwl4965_remove_dynamic_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *key, u8 sta_id);
-void iwl4965_update_tkip_key(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta, u32 iv32, u16 *phase1key);
-int iwl4965_sta_tx_modify_enable_tid(struct iwl_priv *priv,
- int sta_id, int tid);
-int iwl4965_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid, u16 ssn);
-int iwl4965_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
- int tid);
-void iwl4965_sta_modify_sleep_tx_count(struct iwl_priv *priv,
- int sta_id, int cnt);
-int iwl4965_update_bcast_stations(struct iwl_priv *priv);
-
-/* rate */
-static inline u32 iwl4965_ant_idx_to_flags(u8 ant_idx)
-{
- return BIT(ant_idx) << RATE_MCS_ANT_POS;
-}
-
-static inline u8 iwl4965_hw_get_rate(__le32 rate_n_flags)
-{
- return le32_to_cpu(rate_n_flags) & 0xFF;
-}
-
-static inline __le32 iwl4965_hw_set_rate_n_flags(u8 rate, u32 flags)
-{
- return cpu_to_le32(flags|(u32)rate);
-}
-
-/* eeprom */
-void iwl4965_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-int iwl4965_eeprom_acquire_semaphore(struct iwl_priv *priv);
-void iwl4965_eeprom_release_semaphore(struct iwl_priv *priv);
-int iwl4965_eeprom_check_version(struct iwl_priv *priv);
-
-/* mac80211 handlers (for 4965) */
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
-int iwl4965_mac_start(struct ieee80211_hw *hw);
-void iwl4965_mac_stop(struct ieee80211_hw *hw);
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast);
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key);
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key);
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size);
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch);
-
-#endif /* __iwl_4965_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
deleted file mode 100644
index 2bd5659..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ /dev/null
@@ -1,2661 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-power.h"
-#include "iwl-sta.h"
-#include "iwl-helpers.h"
-
-
-MODULE_DESCRIPTION("iwl-legacy: common functions for 3945 and 4965");
-MODULE_VERSION(IWLWIFI_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
-/*
- * set bt_coex_active to true, uCode will do kill/defer
- * every time the priority line is asserted (BT is sending signals on the
- * priority line in the PCIx).
- * set bt_coex_active to false, uCode will ignore the BT activity and
- * perform the normal operation
- *
- * User might experience transmit issue on some platform due to WiFi/BT
- * co-exist problem. The possible behaviors are:
- * Able to scan and finding all the available AP
- * Not able to associate with any AP
- * On those platforms, WiFi communication can be restored by set
- * "bt_coex_active" module parameter to "false"
- *
- * default: bt_coex_active = true (BT_COEX_ENABLE)
- */
-static bool bt_coex_active = true;
-module_param(bt_coex_active, bool, S_IRUGO);
-MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
-
-u32 iwlegacy_debug_level;
-EXPORT_SYMBOL(iwlegacy_debug_level);
-
-const u8 iwlegacy_bcast_addr[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
-EXPORT_SYMBOL(iwlegacy_bcast_addr);
-
-
-/* This function both allocates and initializes hw and priv. */
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg)
-{
- struct iwl_priv *priv;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- struct ieee80211_hw *hw;
-
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
- cfg->ops->ieee80211_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n",
- cfg->name);
- goto out;
- }
-
- priv = hw->priv;
- priv->hw = hw;
-
-out:
- return hw;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_all);
-
-#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
-#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
-static void iwl_legacy_init_ht_hw_capab(const struct iwl_priv *priv,
- struct ieee80211_sta_ht_cap *ht_info,
- enum ieee80211_band band)
-{
- u16 max_bit_rate = 0;
- u8 rx_chains_num = priv->hw_params.rx_chains_num;
- u8 tx_chains_num = priv->hw_params.tx_chains_num;
-
- ht_info->cap = 0;
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
-
- ht_info->ht_supported = true;
-
- ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- max_bit_rate = MAX_BIT_RATE_20_MHZ;
- if (priv->hw_params.ht40_channel & BIT(band)) {
- ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- ht_info->cap |= IEEE80211_HT_CAP_SGI_40;
- ht_info->mcs.rx_mask[4] = 0x01;
- max_bit_rate = MAX_BIT_RATE_40_MHZ;
- }
-
- if (priv->cfg->mod_params->amsdu_size_8K)
- ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
-
- ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
-
- ht_info->mcs.rx_mask[0] = 0xFF;
- if (rx_chains_num >= 2)
- ht_info->mcs.rx_mask[1] = 0xFF;
- if (rx_chains_num >= 3)
- ht_info->mcs.rx_mask[2] = 0xFF;
-
- /* Highest supported Rx data rate */
- max_bit_rate *= rx_chains_num;
- WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK);
- ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate);
-
- /* Tx MCS capabilities */
- ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
- if (tx_chains_num != rx_chains_num) {
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_chains_num - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-}
-
-/**
- * iwl_legacy_init_geos - Initialize mac80211's geo/channel info based from eeprom
- */
-int iwl_legacy_init_geos(struct iwl_priv *priv)
-{
- struct iwl_channel_info *ch;
- struct ieee80211_supported_band *sband;
- struct ieee80211_channel *channels;
- struct ieee80211_channel *geo_ch;
- struct ieee80211_rate *rates;
- int i = 0;
- s8 max_tx_power = 0;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_bitrates ||
- priv->bands[IEEE80211_BAND_5GHZ].n_bitrates) {
- IWL_DEBUG_INFO(priv, "Geography modes already initialized.\n");
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
- return 0;
- }
-
- channels = kzalloc(sizeof(struct ieee80211_channel) *
- priv->channel_count, GFP_KERNEL);
- if (!channels)
- return -ENOMEM;
-
- rates = kzalloc((sizeof(struct ieee80211_rate) * IWL_RATE_COUNT_LEGACY),
- GFP_KERNEL);
- if (!rates) {
- kfree(channels);
- return -ENOMEM;
- }
-
- /* 5.2GHz channels start after the 2.4GHz channels */
- sband = &priv->bands[IEEE80211_BAND_5GHZ];
- sband->channels = &channels[ARRAY_SIZE(iwlegacy_eeprom_band_1)];
- /* just OFDM */
- sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_5GHZ);
-
- sband = &priv->bands[IEEE80211_BAND_2GHZ];
- sband->channels = channels;
- /* OFDM & CCK */
- sband->bitrates = rates;
- sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
-
- if (priv->cfg->sku & IWL_SKU_N)
- iwl_legacy_init_ht_hw_capab(priv, &sband->ht_cap,
- IEEE80211_BAND_2GHZ);
-
- priv->ieee_channels = channels;
- priv->ieee_rates = rates;
-
- for (i = 0; i < priv->channel_count; i++) {
- ch = &priv->channel_info[i];
-
- if (!iwl_legacy_is_channel_valid(ch))
- continue;
-
- sband = &priv->bands[ch->band];
-
- geo_ch = &sband->channels[sband->n_channels++];
-
- geo_ch->center_freq =
- ieee80211_channel_to_frequency(ch->channel, ch->band);
- geo_ch->max_power = ch->max_power_avg;
- geo_ch->max_antenna_gain = 0xff;
- geo_ch->hw_value = ch->channel;
-
- if (iwl_legacy_is_channel_valid(ch)) {
- if (!(ch->flags & EEPROM_CHANNEL_IBSS))
- geo_ch->flags |= IEEE80211_CHAN_NO_IBSS;
-
- if (!(ch->flags & EEPROM_CHANNEL_ACTIVE))
- geo_ch->flags |= IEEE80211_CHAN_PASSIVE_SCAN;
-
- if (ch->flags & EEPROM_CHANNEL_RADAR)
- geo_ch->flags |= IEEE80211_CHAN_RADAR;
-
- geo_ch->flags |= ch->ht40_extension_channel;
-
- if (ch->max_power_avg > max_tx_power)
- max_tx_power = ch->max_power_avg;
- } else {
- geo_ch->flags |= IEEE80211_CHAN_DISABLED;
- }
-
- IWL_DEBUG_INFO(priv, "Channel %d Freq=%d[%sGHz] %s flag=0x%X\n",
- ch->channel, geo_ch->center_freq,
- iwl_legacy_is_channel_a_band(ch) ? "5.2" : "2.4",
- geo_ch->flags & IEEE80211_CHAN_DISABLED ?
- "restricted" : "valid",
- geo_ch->flags);
- }
-
- priv->tx_power_device_lmt = max_tx_power;
- priv->tx_power_user_lmt = max_tx_power;
- priv->tx_power_next = max_tx_power;
-
- if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & IWL_SKU_A) {
- IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
- "Please send your PCI ID 0x%04X:0x%04X to maintainer.\n",
- priv->pci_dev->device,
- priv->pci_dev->subsystem_device);
- priv->cfg->sku &= ~IWL_SKU_A;
- }
-
- IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
- priv->bands[IEEE80211_BAND_2GHZ].n_channels,
- priv->bands[IEEE80211_BAND_5GHZ].n_channels);
-
- set_bit(STATUS_GEO_CONFIGURED, &priv->status);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_geos);
-
-/*
- * iwl_legacy_free_geos - undo allocations in iwl_legacy_init_geos
- */
-void iwl_legacy_free_geos(struct iwl_priv *priv)
-{
- kfree(priv->ieee_channels);
- kfree(priv->ieee_rates);
- clear_bit(STATUS_GEO_CONFIGURED, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_free_geos);
-
-static bool iwl_legacy_is_channel_extension(struct iwl_priv *priv,
- enum ieee80211_band band,
- u16 channel, u8 extension_chan_offset)
-{
- const struct iwl_channel_info *ch_info;
-
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (!iwl_legacy_is_channel_valid(ch_info))
- return false;
-
- if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40PLUS);
- else if (extension_chan_offset == IEEE80211_HT_PARAM_CHA_SEC_BELOW)
- return !(ch_info->ht40_extension_channel &
- IEEE80211_CHAN_NO_HT40MINUS);
-
- return false;
-}
-
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap)
-{
- if (!ctx->ht.enabled || !ctx->ht.is_40mhz)
- return false;
-
- /*
- * We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
- * the bit will not set if it is pure 40MHz case
- */
- if (ht_cap && !ht_cap->ht_supported)
- return false;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- if (priv->disable_ht40)
- return false;
-#endif
-
- return iwl_legacy_is_channel_extension(priv, priv->band,
- le16_to_cpu(ctx->staging.channel),
- ctx->ht.extension_chan_offset);
-}
-EXPORT_SYMBOL(iwl_legacy_is_ht40_tx_allowed);
-
-static u16 iwl_legacy_adjust_beacon_interval(u16 beacon_val, u16 max_beacon_val)
-{
- u16 new_val;
- u16 beacon_factor;
-
- /*
- * If mac80211 hasn't given us a beacon interval, program
- * the default into the device.
- */
- if (!beacon_val)
- return DEFAULT_BEACON_INTERVAL;
-
- /*
- * If the beacon interval we obtained from the peer
- * is too large, we'll have to wake up more often
- * (and in IBSS case, we'll beacon too much)
- *
- * For example, if max_beacon_val is 4096, and the
- * requested beacon interval is 7000, we'll have to
- * use 3500 to be able to wake up on the beacons.
- *
- * This could badly influence beacon detection stats.
- */
-
- beacon_factor = (beacon_val + max_beacon_val) / max_beacon_val;
- new_val = beacon_val / beacon_factor;
-
- if (!new_val)
- new_val = max_beacon_val;
-
- return new_val;
-}
-
-int
-iwl_legacy_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- u64 tsf;
- s32 interval_tm, rem;
- struct ieee80211_conf *conf = NULL;
- u16 beacon_int;
- struct ieee80211_vif *vif = ctx->vif;
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- lockdep_assert_held(&priv->mutex);
-
- memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd));
-
- ctx->timing.timestamp = cpu_to_le64(priv->timestamp);
- ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval);
-
- beacon_int = vif ? vif->bss_conf.beacon_int : 0;
-
- /*
- * TODO: For IBSS we need to get atim_window from mac80211,
- * for now just always use 0
- */
- ctx->timing.atim_window = 0;
-
- beacon_int = iwl_legacy_adjust_beacon_interval(beacon_int,
- priv->hw_params.max_beacon_itrvl * TIME_UNIT);
- ctx->timing.beacon_interval = cpu_to_le16(beacon_int);
-
- tsf = priv->timestamp; /* tsf is modifed by do_div: copy it */
- interval_tm = beacon_int * TIME_UNIT;
- rem = do_div(tsf, interval_tm);
- ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem);
-
- ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1;
-
- IWL_DEBUG_ASSOC(priv,
- "beacon interval %d beacon timer %d beacon tim %d\n",
- le16_to_cpu(ctx->timing.beacon_interval),
- le32_to_cpu(ctx->timing.beacon_init_val),
- le16_to_cpu(ctx->timing.atim_window));
-
- return iwl_legacy_send_cmd_pdu(priv, ctx->rxon_timing_cmd,
- sizeof(ctx->timing), &ctx->timing);
-}
-EXPORT_SYMBOL(iwl_legacy_send_rxon_timing);
-
-void
-iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (hw_decrypt)
- rxon->filter_flags &= ~RXON_FILTER_DIS_DECRYPT_MSK;
- else
- rxon->filter_flags |= RXON_FILTER_DIS_DECRYPT_MSK;
-
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_hwcrypto);
-
-/* validate RXON structure is valid */
-int
-iwl_legacy_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
- bool error = false;
-
- if (rxon->flags & RXON_FLG_BAND_24G_MSK) {
- if (rxon->flags & RXON_FLG_TGJ_NARROW_BAND_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong narrow\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_RADAR_DETECT_MSK) {
- IWL_WARN(priv, "check 2.4G: wrong radar\n");
- error = true;
- }
- } else {
- if (!(rxon->flags & RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "check 5.2G: not short slot!\n");
- error = true;
- }
- if (rxon->flags & RXON_FLG_CCK_MSK) {
- IWL_WARN(priv, "check 5.2G: CCK!\n");
- error = true;
- }
- }
- if ((rxon->node_addr[0] | rxon->bssid_addr[0]) & 0x1) {
- IWL_WARN(priv, "mac/bssid mcast!\n");
- error = true;
- }
-
- /* make sure basic rates 6Mbps and 1Mbps are supported */
- if ((rxon->ofdm_basic_rates & IWL_RATE_6M_MASK) == 0 &&
- (rxon->cck_basic_rates & IWL_RATE_1M_MASK) == 0) {
- IWL_WARN(priv, "neither 1 nor 6 are basic\n");
- error = true;
- }
-
- if (le16_to_cpu(rxon->assoc_id) > 2007) {
- IWL_WARN(priv, "aid > 2007\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_SHORT_SLOT_MSK)) {
- IWL_WARN(priv, "CCK and short slot\n");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK))
- == (RXON_FLG_CCK_MSK | RXON_FLG_AUTO_DETECT_MSK)) {
- IWL_WARN(priv, "CCK and auto detect");
- error = true;
- }
-
- if ((rxon->flags & (RXON_FLG_AUTO_DETECT_MSK |
- RXON_FLG_TGG_PROTECT_MSK)) ==
- RXON_FLG_TGG_PROTECT_MSK) {
- IWL_WARN(priv, "TGg but no auto-detect\n");
- error = true;
- }
-
- if (error)
- IWL_WARN(priv, "Tuning to channel %d\n",
- le16_to_cpu(rxon->channel));
-
- if (error) {
- IWL_ERR(priv, "Invalid RXON\n");
- return -EINVAL;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_check_rxon_cmd);
-
-/**
- * iwl_legacy_full_rxon_required - check if full RXON (vs RXON_ASSOC) cmd is needed
- * @priv: staging_rxon is compared to active_rxon
- *
- * If the RXON structure is changing enough to require a new tune,
- * or is clearing the RXON_FILTER_ASSOC_MSK, then return 1 to indicate that
- * a new tune (full RXON command, rather than RXON_ASSOC cmd) is required.
- */
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_legacy_rxon_cmd *staging = &ctx->staging;
- const struct iwl_legacy_rxon_cmd *active = &ctx->active;
-
-#define CHK(cond) \
- if ((cond)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " #cond "\n"); \
- return 1; \
- }
-
-#define CHK_NEQ(c1, c2) \
- if ((c1) != (c2)) { \
- IWL_DEBUG_INFO(priv, "need full RXON - " \
- #c1 " != " #c2 " - %d != %d\n", \
- (c1), (c2)); \
- return 1; \
- }
-
- /* These items are only settable from the full RXON command */
- CHK(!iwl_legacy_is_associated_ctx(ctx));
- CHK(compare_ether_addr(staging->bssid_addr, active->bssid_addr));
- CHK(compare_ether_addr(staging->node_addr, active->node_addr));
- CHK(compare_ether_addr(staging->wlap_bssid_addr,
- active->wlap_bssid_addr));
- CHK_NEQ(staging->dev_type, active->dev_type);
- CHK_NEQ(staging->channel, active->channel);
- CHK_NEQ(staging->air_propagation, active->air_propagation);
- CHK_NEQ(staging->ofdm_ht_single_stream_basic_rates,
- active->ofdm_ht_single_stream_basic_rates);
- CHK_NEQ(staging->ofdm_ht_dual_stream_basic_rates,
- active->ofdm_ht_dual_stream_basic_rates);
- CHK_NEQ(staging->assoc_id, active->assoc_id);
-
- /* flags, filter_flags, ofdm_basic_rates, and cck_basic_rates can
- * be updated with the RXON_ASSOC command -- however only some
- * flag transitions are allowed using RXON_ASSOC */
-
- /* Check if we are not switching bands */
- CHK_NEQ(staging->flags & RXON_FLG_BAND_24G_MSK,
- active->flags & RXON_FLG_BAND_24G_MSK);
-
- /* Check if we are switching association toggle */
- CHK_NEQ(staging->filter_flags & RXON_FILTER_ASSOC_MSK,
- active->filter_flags & RXON_FILTER_ASSOC_MSK);
-
-#undef CHK
-#undef CHK_NEQ
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_full_rxon_required);
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- /*
- * Assign the lowest rate -- should really get this from
- * the beacon skb from mac80211.
- */
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK)
- return IWL_RATE_1M_PLCP;
- else
- return IWL_RATE_6M_PLCP;
-}
-EXPORT_SYMBOL(iwl_legacy_get_lowest_plcp);
-
-static void _iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- if (!ctx->ht.enabled) {
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
- RXON_FLG_HT40_PROT_MSK |
- RXON_FLG_HT_PROT_MSK);
- return;
- }
-
- rxon->flags |= cpu_to_le32(ctx->ht.protection <<
- RXON_FLG_HT_OPERATING_MODE_POS);
-
- /* Set up channel bandwidth:
- * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
- /* clear the HT channel mode before set the mode */
- rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, NULL)) {
- /* pure ht40 */
- if (ctx->ht.protection ==
- IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- break;
- }
- } else {
- /* Note: control channel is opposite of extension channel */
- switch (ctx->ht.extension_chan_offset) {
- case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
- rxon->flags &=
- ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_BELOW:
- rxon->flags |=
- RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
- rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
- break;
- case IEEE80211_HT_PARAM_CHA_SEC_NONE:
- default:
- /* channel location only valid if in Mixed mode */
- IWL_ERR(priv,
- "invalid extension channel offset\n");
- break;
- }
- }
- } else {
- rxon->flags |= RXON_FLG_CHANNEL_MODE_LEGACY;
- }
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
- "extension channel offset 0x%x\n",
- le32_to_cpu(rxon->flags), ctx->ht.protection,
- ctx->ht.extension_chan_offset);
-}
-
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
-{
- struct iwl_rxon_context *ctx;
-
- for_each_context(priv, ctx)
- _iwl_legacy_set_rxon_ht(priv, ht_conf, ctx);
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_ht);
-
-/* Return valid, unused, channel for a passive scan to reset the RF */
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band)
-{
- const struct iwl_channel_info *ch_info;
- int i;
- u8 channel = 0;
- u8 min, max;
- struct iwl_rxon_context *ctx;
-
- if (band == IEEE80211_BAND_5GHZ) {
- min = 14;
- max = priv->channel_count;
- } else {
- min = 0;
- max = 14;
- }
-
- for (i = min; i < max; i++) {
- bool busy = false;
-
- for_each_context(priv, ctx) {
- busy = priv->channel_info[i].channel ==
- le16_to_cpu(ctx->staging.channel);
- if (busy)
- break;
- }
-
- if (busy)
- continue;
-
- channel = priv->channel_info[i].channel;
- ch_info = iwl_legacy_get_channel_info(priv, band, channel);
- if (iwl_legacy_is_channel_valid(ch_info))
- break;
- }
-
- return channel;
-}
-EXPORT_SYMBOL(iwl_legacy_get_single_channel_number);
-
-/**
- * iwl_legacy_set_rxon_channel - Set the band and channel values in staging RXON
- * @ch: requested channel as a pointer to struct ieee80211_channel
-
- * NOTE: Does not commit to the hardware; it sets appropriate bit fields
- * in the staging RXON flag structure based on the ch->band
- */
-int
-iwl_legacy_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx)
-{
- enum ieee80211_band band = ch->band;
- u16 channel = ch->hw_value;
-
- if ((le16_to_cpu(ctx->staging.channel) == channel) &&
- (priv->band == band))
- return 0;
-
- ctx->staging.channel = cpu_to_le16(channel);
- if (band == IEEE80211_BAND_5GHZ)
- ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK;
- else
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
-
- priv->band = band;
-
- IWL_DEBUG_INFO(priv, "Staging channel set to %d [%d]\n", channel, band);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_rxon_channel);
-
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- if (band == IEEE80211_BAND_5GHZ) {
- ctx->staging.flags &=
- ~(RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK
- | RXON_FLG_CCK_MSK);
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- } else {
- /* Copied from iwl_post_associate() */
- if (vif && vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
-
- ctx->staging.flags |= RXON_FLG_BAND_24G_MSK;
- ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK;
- ctx->staging.flags &= ~RXON_FLG_CCK_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_flags_for_band);
-
-/*
- * initialize rxon structure with default values from eeprom
- */
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- const struct iwl_channel_info *ch_info;
-
- memset(&ctx->staging, 0, sizeof(ctx->staging));
-
- if (!ctx->vif) {
- ctx->staging.dev_type = ctx->unused_devtype;
- } else
- switch (ctx->vif->type) {
-
- case NL80211_IFTYPE_STATION:
- ctx->staging.dev_type = ctx->station_devtype;
- ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- case NL80211_IFTYPE_ADHOC:
- ctx->staging.dev_type = ctx->ibss_devtype;
- ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK;
- ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK |
- RXON_FILTER_ACCEPT_GRP_MSK;
- break;
-
- default:
- IWL_ERR(priv, "Unsupported interface type %d\n",
- ctx->vif->type);
- break;
- }
-
-#if 0
- /* TODO: Figure out when short_preamble would be set and cache from
- * that */
- if (!hw_to_local(priv->hw)->short_preamble)
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
-#endif
-
- ch_info = iwl_legacy_get_channel_info(priv, priv->band,
- le16_to_cpu(ctx->active.channel));
-
- if (!ch_info)
- ch_info = &priv->channel_info[0];
-
- ctx->staging.channel = cpu_to_le16(ch_info->channel);
- priv->band = ch_info->band;
-
- iwl_legacy_set_flags_for_band(priv, ctx, priv->band, ctx->vif);
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- ctx->staging.cck_basic_rates =
- (IWL_CCK_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- /* clear both MIX and PURE40 mode flag */
- ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED |
- RXON_FLG_CHANNEL_MODE_PURE_40);
- if (ctx->vif)
- memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN);
-
- ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff;
- ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff;
-}
-EXPORT_SYMBOL(iwl_legacy_connection_init_rx_config);
-
-void iwl_legacy_set_rate(struct iwl_priv *priv)
-{
- const struct ieee80211_supported_band *hw = NULL;
- struct ieee80211_rate *rate;
- struct iwl_rxon_context *ctx;
- int i;
-
- hw = iwl_get_hw_mode(priv, priv->band);
- if (!hw) {
- IWL_ERR(priv, "Failed to set rate: unable to get hw mode\n");
- return;
- }
-
- priv->active_rate = 0;
-
- for (i = 0; i < hw->n_bitrates; i++) {
- rate = &(hw->bitrates[i]);
- if (rate->hw_value < IWL_RATE_COUNT_LEGACY)
- priv->active_rate |= (1 << rate->hw_value);
- }
-
- IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
-
- for_each_context(priv, ctx) {
- ctx->staging.cck_basic_rates =
- (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
-
- ctx->staging.ofdm_basic_rates =
- (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_set_rate);
-
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- ieee80211_chswitch_done(ctx->vif, is_success);
-}
-EXPORT_SYMBOL(iwl_legacy_chswitch_done);
-
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
-
- if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- return;
-
- if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
- rxon->channel = csa->channel;
- ctx->staging.channel = csa->channel;
- IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, true);
- } else {
- IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
- le16_to_cpu(csa->channel));
- iwl_legacy_chswitch_done(priv, false);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_rx_csa);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_rxon_cmd *rxon = &ctx->staging;
-
- IWL_DEBUG_RADIO(priv, "RX CONFIG:\n");
- iwl_print_hex_dump(priv, IWL_DL_RADIO, (u8 *) rxon, sizeof(*rxon));
- IWL_DEBUG_RADIO(priv, "u16 channel: 0x%x\n",
- le16_to_cpu(rxon->channel));
- IWL_DEBUG_RADIO(priv, "u32 flags: 0x%08X\n", le32_to_cpu(rxon->flags));
- IWL_DEBUG_RADIO(priv, "u32 filter_flags: 0x%08x\n",
- le32_to_cpu(rxon->filter_flags));
- IWL_DEBUG_RADIO(priv, "u8 dev_type: 0x%x\n", rxon->dev_type);
- IWL_DEBUG_RADIO(priv, "u8 ofdm_basic_rates: 0x%02x\n",
- rxon->ofdm_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8 cck_basic_rates: 0x%02x\n",
- rxon->cck_basic_rates);
- IWL_DEBUG_RADIO(priv, "u8[6] node_addr: %pM\n", rxon->node_addr);
- IWL_DEBUG_RADIO(priv, "u8[6] bssid_addr: %pM\n", rxon->bssid_addr);
- IWL_DEBUG_RADIO(priv, "u16 assoc_id: 0x%x\n",
- le16_to_cpu(rxon->assoc_id));
-}
-EXPORT_SYMBOL(iwl_legacy_print_rx_config_cmd);
-#endif
-/**
- * iwl_legacy_irq_handle_error - called for HW or SW error interrupt from card
- */
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv)
-{
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
-
- /* Cancel currently queued command. */
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
-
- IWL_ERR(priv, "Loaded firmware version: %s\n",
- priv->hw->wiphy->fw_version);
-
- priv->cfg->ops->lib->dump_nic_error_log(priv);
- if (priv->cfg->ops->lib->dump_fh)
- priv->cfg->ops->lib->dump_fh(priv, NULL, false);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_FW_ERRORS)
- iwl_legacy_print_rx_config_cmd(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-#endif
-
- wake_up(&priv->wait_command_queue);
-
- /* Keep the restart process from trying to send host
- * commands by clearing the INIT status bit */
- clear_bit(STATUS_READY, &priv->status);
-
- if (!test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_DEBUG(priv, IWL_DL_FW_ERRORS,
- "Restarting adapter due to uCode error.\n");
-
- if (priv->cfg->mod_params->restart_fw)
- queue_work(priv->workqueue, &priv->restart);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_irq_handle_error);
-
-static int iwl_legacy_apm_stop_master(struct iwl_priv *priv)
-{
- int ret = 0;
-
- /* stop device's busmaster DMA activity */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
-
- ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
- CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
- if (ret)
- IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
-
- IWL_DEBUG_INFO(priv, "stop master\n");
-
- return ret;
-}
-
-void iwl_legacy_apm_stop(struct iwl_priv *priv)
-{
- IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
-
- /* Stop device's DMA activity */
- iwl_legacy_apm_stop_master(priv);
-
- /* Reset the entire device */
- iwl_legacy_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
-
- udelay(10);
-
- /*
- * Clear "initialization complete" bit to move adapter from
- * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
- */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-}
-EXPORT_SYMBOL(iwl_legacy_apm_stop);
-
-
-/*
- * Start up NIC's basic functionality after it has been reset
- * (e.g. after platform boot, or shutdown via iwl_legacy_apm_stop())
- * NOTE: This does not load uCode nor start the embedded processor
- */
-int iwl_legacy_apm_init(struct iwl_priv *priv)
-{
- int ret = 0;
- u16 lctl;
-
- IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
-
- /*
- * Use "set_bit" below rather than "write", to preserve any hardware
- * bits already set by default after reset.
- */
-
- /* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
-
- /*
- * Disable L0s without affecting L1;
- * don't wait for ICH L0s (ICH bug W/A)
- */
- iwl_legacy_set_bit(priv, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
-
- /* Set FH wait threshold to maximum (HW error during stress W/A) */
- iwl_legacy_set_bit(priv, CSR_DBG_HPET_MEM_REG,
- CSR_DBG_HPET_MEM_REG_VAL);
-
- /*
- * Enable HAP INTA (interrupt from management bus) to
- * wake device's PCI Express link L1a -> L0s
- * NOTE: This is no-op for 3945 (non-existent bit)
- */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
-
- /*
- * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
- * Check if BIOS (or OS) enabled L1-ASPM on this device.
- * If so (likely), disable L0S, so device moves directly L0->L1;
- * costs negligible amount of power savings.
- * If not (unlikely), enable L0S, so there is at least some
- * power savings, even without L1.
- */
- if (priv->cfg->base_params->set_l0s) {
- lctl = iwl_legacy_pcie_link_ctl(priv);
- if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
- PCI_CFG_LINK_CTRL_VAL_L1_EN) {
- /* L1-ASPM enabled; disable(!) L0S */
- iwl_legacy_set_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
- } else {
- /* L1-ASPM disabled; enable(!) L0S */
- iwl_legacy_clear_bit(priv, CSR_GIO_REG,
- CSR_GIO_REG_VAL_L0S_ENABLED);
- IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
- }
- }
-
- /* Configure analog phase-lock-loop before activating to D0A */
- if (priv->cfg->base_params->pll_cfg_val)
- iwl_legacy_set_bit(priv, CSR_ANA_PLL_CFG,
- priv->cfg->base_params->pll_cfg_val);
-
- /*
- * Set "initialization complete" bit to move adapter from
- * D0U* --> D0A* (powered-up active) state.
- */
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
-
- /*
- * Wait for clock stabilization; once stabilized, access to
- * device-internal resources is supported, e.g. iwl_legacy_write_prph()
- * and accesses to uCode SRAM.
- */
- ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
- CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
- if (ret < 0) {
- IWL_DEBUG_INFO(priv, "Failed to init the card\n");
- goto out;
- }
-
- /*
- * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
- * BSM (Boostrap State Machine) is only in 3945 and 4965.
- *
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
- */
- if (priv->cfg->base_params->use_bsm)
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
- else
- iwl_legacy_write_prph(priv, APMG_CLK_EN_REG,
- APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
-
- /* Disable L1-Active */
- iwl_legacy_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_apm_init);
-
-
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
-{
- int ret;
- s8 prev_tx_power;
- bool defer;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->tx_power_user_lmt == tx_power && !force)
- return 0;
-
- if (!priv->cfg->ops->lib->send_tx_power)
- return -EOPNOTSUPP;
-
- /* 0 dBm mean 1 milliwatt */
- if (tx_power < 0) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d below 1 mW.\n",
- tx_power);
- return -EINVAL;
- }
-
- if (tx_power > priv->tx_power_device_lmt) {
- IWL_WARN(priv,
- "Requested user TXPOWER %d above upper limit %d.\n",
- tx_power, priv->tx_power_device_lmt);
- return -EINVAL;
- }
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete and commit_rxon use tx_power_next value,
- * it always need to be updated for newest request */
- priv->tx_power_next = tx_power;
-
- /* do not set tx power when scanning or channel changing */
- defer = test_bit(STATUS_SCANNING, &priv->status) ||
- memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging));
- if (defer && !force) {
- IWL_DEBUG_INFO(priv, "Deferring tx power set\n");
- return 0;
- }
-
- prev_tx_power = priv->tx_power_user_lmt;
- priv->tx_power_user_lmt = tx_power;
-
- ret = priv->cfg->ops->lib->send_tx_power(priv);
-
- /* if fail to set tx_power, restore the orig. tx power */
- if (ret) {
- priv->tx_power_user_lmt = prev_tx_power;
- priv->tx_power_next = prev_tx_power;
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_set_tx_power);
-
-void iwl_legacy_send_bt_config(struct iwl_priv *priv)
-{
- struct iwl_bt_cmd bt_cmd = {
- .lead_time = BT_LEAD_TIME_DEF,
- .max_kill = BT_MAX_KILL_DEF,
- .kill_ack_mask = 0,
- .kill_cts_mask = 0,
- };
-
- if (!bt_coex_active)
- bt_cmd.flags = BT_COEX_DISABLE;
- else
- bt_cmd.flags = BT_COEX_ENABLE;
-
- IWL_DEBUG_INFO(priv, "BT coex %s\n",
- (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
-
- if (iwl_legacy_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(struct iwl_bt_cmd), &bt_cmd))
- IWL_ERR(priv, "failed to send BT Coex Config\n");
-}
-EXPORT_SYMBOL(iwl_legacy_send_bt_config);
-
-int iwl_legacy_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
-{
- struct iwl_statistics_cmd statistics_cmd = {
- .configuration_flags =
- clear ? IWL_STATS_CONF_CLEAR_STATS : 0,
- };
-
- if (flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_pdu_async(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd, NULL);
- else
- return iwl_legacy_send_cmd_pdu(priv, REPLY_STATISTICS_CMD,
- sizeof(struct iwl_statistics_cmd),
- &statistics_cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_statistics_request);
-
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
- IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
- sleep->pm_sleep_mode, sleep->pm_wakeup_src);
-#endif
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_sleep_notif);
-
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
- "notification for %s:\n", len,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd));
- iwl_print_hex_dump(priv, IWL_DL_RADIO, pkt->u.raw, len);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_pm_debug_statistics_notif);
-
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-
- IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
- "seq 0x%04X ser 0x%08X\n",
- le32_to_cpu(pkt->u.err_resp.error_type),
- iwl_legacy_get_cmd_string(pkt->u.err_resp.cmd_id),
- pkt->u.err_resp.cmd_id,
- le16_to_cpu(pkt->u.err_resp.bad_cmd_seq_num),
- le32_to_cpu(pkt->u.err_resp.error_info));
-}
-EXPORT_SYMBOL(iwl_legacy_rx_reply_error);
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv)
-{
- memset(&priv->isr_stats, 0, sizeof(priv->isr_stats));
-}
-
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- ctx->qos_data.def_qos_parm.ac[q].cw_min =
- cpu_to_le16(params->cw_min);
- ctx->qos_data.def_qos_parm.ac[q].cw_max =
- cpu_to_le16(params->cw_max);
- ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- ctx->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_conf_tx);
-
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_mac_tx_last_beacon);
-
-static int
-iwl_legacy_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- iwl_legacy_connection_init_rx_config(priv, ctx);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
-
- return iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static int iwl_legacy_setup_interface(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
-
- lockdep_assert_held(&priv->mutex);
-
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_legacy_set_mode(priv, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- return 0;
-}
-
-int
-iwl_legacy_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *tmp, *ctx = NULL;
- int err;
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- vif->type, vif->addr);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Try to add interface when device not ready\n");
- err = -EINVAL;
- goto out;
- }
-
- for_each_context(priv, tmp) {
- u32 possible_modes =
- tmp->interface_modes | tmp->exclusive_interface_modes;
-
- if (tmp->vif) {
- /* check if this busy context is exclusive */
- if (tmp->exclusive_interface_modes &
- BIT(tmp->vif->type)) {
- err = -EINVAL;
- goto out;
- }
- continue;
- }
-
- if (!(possible_modes & BIT(vif->type)))
- continue;
-
- /* have maybe usable context w/o interface */
- ctx = tmp;
- break;
- }
-
- if (!ctx) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- vif_priv->ctx = ctx;
- ctx->vif = vif;
-
- err = iwl_legacy_setup_interface(priv, ctx);
- if (!err)
- goto out;
-
- ctx->vif = NULL;
- priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_add_interface);
-
-static void iwl_legacy_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- lockdep_assert_held(&priv->mutex);
-
- if (priv->scan_vif == vif) {
- iwl_legacy_scan_cancel_timeout(priv, 200);
- iwl_legacy_force_scan_end(priv);
- }
-
- if (!mode_change) {
- iwl_legacy_set_mode(priv, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
-}
-
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- WARN_ON(ctx->vif != vif);
- ctx->vif = NULL;
-
- iwl_legacy_teardown_interface(priv, vif, false);
-
- memset(priv->bssid, 0, ETH_ALEN);
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
-EXPORT_SYMBOL(iwl_legacy_mac_remove_interface);
-
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv)
-{
- if (!priv->txq)
- priv->txq = kzalloc(
- sizeof(struct iwl_tx_queue) *
- priv->cfg->base_params->num_of_queues,
- GFP_KERNEL);
- if (!priv->txq) {
- IWL_ERR(priv, "Not enough memory for txq\n");
- return -ENOMEM;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_txq_mem);
-
-void iwl_legacy_txq_mem(struct iwl_priv *priv)
-{
- kfree(priv->txq);
- priv->txq = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_mem);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-
-#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
-
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
- priv->tx_traffic_idx = 0;
- priv->rx_traffic_idx = 0;
- if (priv->tx_traffic)
- memset(priv->tx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
- if (priv->rx_traffic)
- memset(priv->rx_traffic, 0, IWL_TRAFFIC_DUMP_SIZE);
-}
-
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- u32 traffic_size = IWL_TRAFFIC_DUMP_SIZE;
-
- if (iwlegacy_debug_level & IWL_DL_TX) {
- if (!priv->tx_traffic) {
- priv->tx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->tx_traffic)
- return -ENOMEM;
- }
- }
- if (iwlegacy_debug_level & IWL_DL_RX) {
- if (!priv->rx_traffic) {
- priv->rx_traffic =
- kzalloc(traffic_size, GFP_KERNEL);
- if (!priv->rx_traffic)
- return -ENOMEM;
- }
- }
- iwl_legacy_reset_traffic_log(priv);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_alloc_traffic_mem);
-
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
- kfree(priv->tx_traffic);
- priv->tx_traffic = NULL;
-
- kfree(priv->rx_traffic);
- priv->rx_traffic = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_free_traffic_mem);
-
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_TX)))
- return;
-
- if (!priv->tx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->tx_traffic +
- (priv->tx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->tx_traffic_idx =
- (priv->tx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_tx_data_frame);
-
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
- __le16 fc;
- u16 len;
-
- if (likely(!(iwlegacy_debug_level & IWL_DL_RX)))
- return;
-
- if (!priv->rx_traffic)
- return;
-
- fc = header->frame_control;
- if (ieee80211_is_data(fc)) {
- len = (length > IWL_TRAFFIC_ENTRY_SIZE)
- ? IWL_TRAFFIC_ENTRY_SIZE : length;
- memcpy((priv->rx_traffic +
- (priv->rx_traffic_idx * IWL_TRAFFIC_ENTRY_SIZE)),
- header, len);
- priv->rx_traffic_idx =
- (priv->rx_traffic_idx + 1) % IWL_TRAFFIC_ENTRIES;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_dbg_log_rx_data_frame);
-
-const char *iwl_legacy_get_mgmt_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(MANAGEMENT_ASSOC_REQ);
- IWL_CMD(MANAGEMENT_ASSOC_RESP);
- IWL_CMD(MANAGEMENT_REASSOC_REQ);
- IWL_CMD(MANAGEMENT_REASSOC_RESP);
- IWL_CMD(MANAGEMENT_PROBE_REQ);
- IWL_CMD(MANAGEMENT_PROBE_RESP);
- IWL_CMD(MANAGEMENT_BEACON);
- IWL_CMD(MANAGEMENT_ATIM);
- IWL_CMD(MANAGEMENT_DISASSOC);
- IWL_CMD(MANAGEMENT_AUTH);
- IWL_CMD(MANAGEMENT_DEAUTH);
- IWL_CMD(MANAGEMENT_ACTION);
- default:
- return "UNKNOWN";
-
- }
-}
-
-const char *iwl_legacy_get_ctrl_string(int cmd)
-{
- switch (cmd) {
- IWL_CMD(CONTROL_BACK_REQ);
- IWL_CMD(CONTROL_BACK);
- IWL_CMD(CONTROL_PSPOLL);
- IWL_CMD(CONTROL_RTS);
- IWL_CMD(CONTROL_CTS);
- IWL_CMD(CONTROL_ACK);
- IWL_CMD(CONTROL_CFEND);
- IWL_CMD(CONTROL_CFENDACK);
- default:
- return "UNKNOWN";
-
- }
-}
-
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv)
-{
- memset(&priv->tx_stats, 0, sizeof(struct traffic_stats));
- memset(&priv->rx_stats, 0, sizeof(struct traffic_stats));
-}
-
-/*
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS defined,
- * iwl_legacy_update_stats function will
- * record all the MGMT, CTRL and DATA pkt for both TX and Rx pass
- * Use debugFs to display the rx/rx_statistics
- * if CONFIG_IWLWIFI_LEGACY_DEBUGFS not being defined, then no MGMT and CTRL
- * information will be recorded, but DATA pkt still will be recorded
- * for the reason of iwl_led.c need to control the led blinking based on
- * number of tx and rx data.
- *
- */
-void
-iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
-{
- struct traffic_stats *stats;
-
- if (is_tx)
- stats = &priv->tx_stats;
- else
- stats = &priv->rx_stats;
-
- if (ieee80211_is_mgmt(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- stats->mgmt[MANAGEMENT_ASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
- stats->mgmt[MANAGEMENT_ASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- stats->mgmt[MANAGEMENT_REASSOC_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
- stats->mgmt[MANAGEMENT_REASSOC_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
- stats->mgmt[MANAGEMENT_PROBE_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
- stats->mgmt[MANAGEMENT_PROBE_RESP]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BEACON):
- stats->mgmt[MANAGEMENT_BEACON]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ATIM):
- stats->mgmt[MANAGEMENT_ATIM]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
- stats->mgmt[MANAGEMENT_DISASSOC]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- stats->mgmt[MANAGEMENT_AUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- stats->mgmt[MANAGEMENT_DEAUTH]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACTION):
- stats->mgmt[MANAGEMENT_ACTION]++;
- break;
- }
- } else if (ieee80211_is_ctl(fc)) {
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_BACK_REQ):
- stats->ctrl[CONTROL_BACK_REQ]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_BACK):
- stats->ctrl[CONTROL_BACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_PSPOLL):
- stats->ctrl[CONTROL_PSPOLL]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_RTS):
- stats->ctrl[CONTROL_RTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CTS):
- stats->ctrl[CONTROL_CTS]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_ACK):
- stats->ctrl[CONTROL_ACK]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFEND):
- stats->ctrl[CONTROL_CFEND]++;
- break;
- case cpu_to_le16(IEEE80211_STYPE_CFENDACK):
- stats->ctrl[CONTROL_CFENDACK]++;
- break;
- }
- } else {
- /* data */
- stats->data_cnt++;
- stats->data_bytes += len;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_update_stats);
-#endif
-
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external)
-{
- struct iwl_force_reset *force_reset;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EINVAL;
-
- force_reset = &priv->force_reset;
- force_reset->reset_request_count++;
- if (!external) {
- if (force_reset->last_force_reset_jiffies &&
- time_after(force_reset->last_force_reset_jiffies +
- force_reset->reset_duration, jiffies)) {
- IWL_DEBUG_INFO(priv, "force reset rejected\n");
- force_reset->reset_reject_count++;
- return -EAGAIN;
- }
- }
- force_reset->reset_success_count++;
- force_reset->last_force_reset_jiffies = jiffies;
-
- /*
- * if the request is from external(ex: debugfs),
- * then always perform the request in regardless the module
- * parameter setting
- * if the request is from internal (uCode error or driver
- * detect failure), then fw_restart module parameter
- * need to be check before performing firmware reload
- */
-
- if (!external && !priv->cfg->mod_params->restart_fw) {
- IWL_DEBUG_INFO(priv, "Cancel firmware reload based on "
- "module parameter setting\n");
- return 0;
- }
-
- IWL_ERR(priv, "On demand firmware reload\n");
-
- /* Set the FW error flag -- cleared on iwl_down */
- set_bit(STATUS_FW_ERROR, &priv->status);
- wake_up(&priv->wait_command_queue);
- /*
- * Keep the restart process from trying to send host
- * commands by clearing the INIT status bit
- */
- clear_bit(STATUS_READY, &priv->status);
- queue_work(priv->workqueue, &priv->restart);
-
- return 0;
-}
-
-int
-iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *tmp;
- u32 interface_modes;
- int err;
-
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
- mutex_lock(&priv->mutex);
-
- if (!ctx->vif || !iwl_legacy_is_ready_rf(priv)) {
- /*
- * Huh? But wait ... this can maybe happen when
- * we're in the middle of a firmware restart!
- */
- err = -EBUSY;
- goto out;
- }
-
- interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
- if (!(interface_modes & BIT(newtype))) {
- err = -EBUSY;
- goto out;
- }
-
- if (ctx->exclusive_interface_modes & BIT(newtype)) {
- for_each_context(priv, tmp) {
- if (ctx == tmp)
- continue;
-
- if (!tmp->vif)
- continue;
-
- /*
- * The current mode switch would be exclusive, but
- * another context is active ... refuse the switch.
- */
- err = -EBUSY;
- goto out;
- }
- }
-
- /* success */
- iwl_legacy_teardown_interface(priv, vif, true);
- vif->type = newtype;
- vif->p2p = newp2p;
- err = iwl_legacy_setup_interface(priv, ctx);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
- err = 0;
-
- out:
- mutex_unlock(&priv->mutex);
- return err;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_change_interface);
-
-/*
- * On every watchdog tick we check (latest) time stamp. If it does not
- * change during timeout period and queue is not empty we reset firmware.
- */
-static int iwl_legacy_check_stuck_queue(struct iwl_priv *priv, int cnt)
-{
- struct iwl_tx_queue *txq = &priv->txq[cnt];
- struct iwl_queue *q = &txq->q;
- unsigned long timeout;
- int ret;
-
- if (q->read_ptr == q->write_ptr) {
- txq->time_stamp = jiffies;
- return 0;
- }
-
- timeout = txq->time_stamp +
- msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
-
- if (time_after(jiffies, timeout)) {
- IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
- q->id, priv->cfg->base_params->wd_timeout);
- ret = iwl_legacy_force_reset(priv, false);
- return (ret == -EAGAIN) ? 0 : 1;
- }
-
- return 0;
-}
-
-/*
- * Making watchdog tick be a quarter of timeout assure we will
- * discover the queue hung between timeout and 1.25*timeout
- */
-#define IWL_WD_TICK(timeout) ((timeout) / 4)
-
-/*
- * Watchdog timer callback, we check each tx queue for stuck, if if hung
- * we reset the firmware. If everything is fine just rearm the timer.
- */
-void iwl_legacy_bg_watchdog(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
- int cnt;
- unsigned long timeout;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- timeout = priv->cfg->base_params->wd_timeout;
- if (timeout == 0)
- return;
-
- /* monitor and check for stuck cmd queue */
- if (iwl_legacy_check_stuck_queue(priv, priv->cmd_queue))
- return;
-
- /* monitor and check for other stuck queues */
- if (iwl_legacy_is_any_associated(priv)) {
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- /* skip as we already checked the command queue */
- if (cnt == priv->cmd_queue)
- continue;
- if (iwl_legacy_check_stuck_queue(priv, cnt))
- return;
- }
- }
-
- mod_timer(&priv->watchdog, jiffies +
- msecs_to_jiffies(IWL_WD_TICK(timeout)));
-}
-EXPORT_SYMBOL(iwl_legacy_bg_watchdog);
-
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv)
-{
- unsigned int timeout = priv->cfg->base_params->wd_timeout;
-
- if (timeout)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_watchdog);
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in extended:internal format
- * the extended part is the beacon counts
- * the internal part is the time in usec within one beacon interval
- */
-u32
-iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * TIME_UNIT;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) &
- (iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits) >>
- priv->hw_params.beacon_time_tsf_bits);
- rem = (usec % interval) & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
-
- return (quot << priv->hw_params.beacon_time_tsf_bits) + rem;
-}
-EXPORT_SYMBOL(iwl_legacy_usecs_to_beacons);
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 addon_low = addon & iwl_legacy_beacon_time_mask_low(priv,
- priv->hw_params.beacon_time_tsf_bits);
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits)) +
- (addon & iwl_legacy_beacon_time_mask_high(priv,
- priv->hw_params.beacon_time_tsf_bits));
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
- } else
- res += (1 << priv->hw_params.beacon_time_tsf_bits);
-
- return cpu_to_le32(res);
-}
-EXPORT_SYMBOL(iwl_legacy_add_beacon_time);
-
-#ifdef CONFIG_PM
-
-int iwl_legacy_pci_suspend(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
-
- /*
- * This function is called when system goes into suspend state
- * mac80211 will call iwl_mac_stop() from the mac80211 suspend function
- * first but since iwl_mac_stop() has no knowledge of who the caller is,
- * it will not call apm_ops.stop() to stop the DMA operation.
- * Calling apm_ops.stop here to make sure we stop the DMA.
- */
- iwl_legacy_apm_stop(priv);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_suspend);
-
-int iwl_legacy_pci_resume(struct device *device)
-{
- struct pci_dev *pdev = to_pci_dev(device);
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- bool hw_rfkill = false;
-
- /*
- * We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state.
- */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl_legacy_enable_interrupts(priv);
-
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rfkill = true;
-
- if (hw_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rfkill);
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_pci_resume);
-
-const struct dev_pm_ops iwl_legacy_pm_ops = {
- .suspend = iwl_legacy_pci_suspend,
- .resume = iwl_legacy_pci_resume,
- .freeze = iwl_legacy_pci_suspend,
- .thaw = iwl_legacy_pci_resume,
- .poweroff = iwl_legacy_pci_suspend,
- .restore = iwl_legacy_pci_resume,
-};
-EXPORT_SYMBOL(iwl_legacy_pm_ops);
-
-#endif /* CONFIG_PM */
-
-static void
-iwl_legacy_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (!ctx->is_active)
- return;
-
- ctx->qos_data.def_qos_parm.qos_flags = 0;
-
- if (ctx->qos_data.qos_active)
- ctx->qos_data.def_qos_parm.qos_flags |=
- QOS_PARAM_FLG_UPDATE_EDCA_MSK;
-
- if (ctx->ht.enabled)
- ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
-
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
- ctx->qos_data.qos_active,
- ctx->qos_data.def_qos_parm.qos_flags);
-
- iwl_legacy_send_cmd_pdu_async(priv, ctx->qos_cmd,
- sizeof(struct iwl_qosparam_cmd),
- &ctx->qos_data.def_qos_parm, NULL);
-}
-
-/**
- * iwl_legacy_mac_config - mac80211 config callback
- */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = conf->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct iwl_rxon_context *ctx;
- unsigned long flags = 0;
- int ret = 0;
- u16 ch;
- int scan_active = 0;
- bool ht_changed[NUM_IWL_RXON_CTX] = {};
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return -EOPNOTSUPP;
-
- mutex_lock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
- channel->hw_value, changed);
-
- if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
- scan_active = 1;
- IWL_DEBUG_MAC80211(priv, "scan active\n");
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_SMPS |
- IEEE80211_CONF_CHANGE_CHANNEL)) {
- /* mac80211 uses static for non-HT which is what we want */
- priv->current_ht_config.smps = conf->smps_mode;
-
- /*
- * Recalculate chain counts.
- *
- * If monitor mode is enabled then mac80211 will
- * set up the SM PS mode to OFF if an HT channel is
- * configured.
- */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- for_each_context(priv, ctx)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* during scanning mac80211 will delay channel setting until
- * scan finish with changed = 0
- */
- if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- if (scan_active)
- goto set_ch_out;
-
- ch = channel->hw_value;
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
- !iwl_legacy_is_channel_ibss(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "leave - not IBSS channel\n");
- ret = -EINVAL;
- goto set_ch_out;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for_each_context(priv, ctx) {
- /* Configure HT40 channels */
- if (ctx->ht.enabled != conf_is_ht(conf)) {
- ctx->ht.enabled = conf_is_ht(conf);
- ht_changed[ctx->ctxid] = true;
- }
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- /*
- * Default to no protection. Protection mode will
- * later be set from BSS config in iwl_ht_conf
- */
- ctx->ht.protection =
- IEEE80211_HT_OP_MODE_PROTECTION_NONE;
-
- /* if we are switching from ht to 2.4 clear flags
- * from any ht related info since 2.4 does not
- * support ht */
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
-
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band,
- ctx->vif);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (priv->cfg->ops->legacy->update_bcast_stations)
- ret =
- priv->cfg->ops->legacy->update_bcast_stations(priv);
-
- set_ch_out:
- /* The list of supported rates and rate mask can be different
- * for each band; since the band may have changed, reset
- * the rate mask to what mac80211 lists */
- iwl_legacy_set_rate(priv);
- }
-
- if (changed & (IEEE80211_CONF_CHANGE_PS |
- IEEE80211_CONF_CHANGE_IDLE)) {
- ret = iwl_legacy_power_update_mode(priv, false);
- if (ret)
- IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
- }
-
- if (changed & IEEE80211_CONF_CHANGE_POWER) {
- IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
- priv->tx_power_user_lmt, conf->power_level);
-
- iwl_legacy_set_tx_power(priv, conf->power_level, false);
- }
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- goto out;
- }
-
- if (scan_active)
- goto out;
-
- for_each_context(priv, ctx) {
- if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
- iwl_legacy_commit_rxon(priv, ctx);
- else
- IWL_DEBUG_INFO(priv,
- "Not re-sending same RXON configuration.\n");
- if (ht_changed[ctx->ctxid])
- iwl_legacy_update_qos(priv, ctx);
- }
-
-out:
- IWL_DEBUG_MAC80211(priv, "leave\n");
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_config);
-
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- /* IBSS can only be the IWL_RXON_CTX_BSS context */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- mutex_lock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- spin_lock_irqsave(&priv->lock, flags);
- memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
- spin_unlock_irqrestore(&priv->lock, flags);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* new association get rid of ibss beacon skb */
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = NULL;
-
- priv->timestamp = 0;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl_legacy_scan_cancel_timeout(priv, 100);
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* we are restarting association process
- * clear RXON_FILTER_ASSOC_MSK bit
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl_legacy_commit_rxon(priv, ctx);
-
- iwl_legacy_set_rate(priv);
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
-
-static void iwl_legacy_ht_conf(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- struct ieee80211_sta *sta;
- struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_ASSOC(priv, "enter:\n");
-
- if (!ctx->ht.enabled)
- return;
-
- ctx->ht.protection =
- bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
- ctx->ht.non_gf_sta_present =
- !!(bss_conf->ht_operation_mode &
- IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
-
- ht_conf->single_chain_sufficient = false;
-
- switch (vif->type) {
- case NL80211_IFTYPE_STATION:
- rcu_read_lock();
- sta = ieee80211_find_sta(vif, bss_conf->bssid);
- if (sta) {
- struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
- int maxstreams;
-
- maxstreams = (ht_cap->mcs.tx_params &
- IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
- >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
- maxstreams += 1;
-
- if ((ht_cap->mcs.rx_mask[1] == 0) &&
- (ht_cap->mcs.rx_mask[2] == 0))
- ht_conf->single_chain_sufficient = true;
- if (maxstreams <= 1)
- ht_conf->single_chain_sufficient = true;
- } else {
- /*
- * If at all, this can only happen through a race
- * when the AP disconnects us while we're still
- * setting up the connection, in that case mac80211
- * will soon tell us about that.
- */
- ht_conf->single_chain_sufficient = true;
- }
- rcu_read_unlock();
- break;
- case NL80211_IFTYPE_ADHOC:
- ht_conf->single_chain_sufficient = true;
- break;
- default:
- break;
- }
-
- IWL_DEBUG_ASSOC(priv, "leave\n");
-}
-
-static inline void iwl_legacy_set_no_assoc(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
-
- /*
- * inform the ucode that there is no longer an
- * association and that no more packets should be
- * sent
- */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- ctx->staging.assoc_id = 0;
- iwl_legacy_commit_rxon(priv, ctx);
-}
-
-static void iwl_legacy_beacon_update(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- unsigned long flags;
- __le64 timestamp;
- struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
-
- if (!skb)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "update beacon but no beacon context!\n");
- dev_kfree_skb(skb);
- return;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- priv->beacon_skb = skb;
-
- timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
- priv->timestamp = le64_to_cpu(timestamp);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return;
- }
-
- priv->cfg->ops->legacy->post_associate(priv);
-}
-
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_legacy_rxon_ctx_from_vif(vif);
- int ret;
-
- if (WARN_ON(!priv->cfg->ops->legacy))
- return;
-
- IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
-
- mutex_lock(&priv->mutex);
-
- if (!iwl_legacy_is_alive(priv)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (changes & BSS_CHANGED_QOS) {
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- ctx->qos_data.qos_active = bss_conf->qos;
- iwl_legacy_update_qos(priv, ctx);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- /*
- * the add_interface code must make sure we only ever
- * have a single interface that could be beaconing at
- * any time.
- */
- if (vif->bss_conf.enable_beacon)
- priv->beacon_ctx = ctx;
- else
- priv->beacon_ctx = NULL;
- }
-
- if (changes & BSS_CHANGED_BSSID) {
- IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
-
- /*
- * If there is currently a HW scan going on in the
- * background then we need to cancel it else the RXON
- * below/in post_associate will fail.
- */
- if (iwl_legacy_scan_cancel_timeout(priv, 100)) {
- IWL_WARN(priv,
- "Aborted scan still in progress after 100ms\n");
- IWL_DEBUG_MAC80211(priv,
- "leaving - scan abort failed.\n");
- mutex_unlock(&priv->mutex);
- return;
- }
-
- /* mac80211 only sets assoc when in STATION mode */
- if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
-
- /* currently needed in a few places */
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- } else {
- ctx->staging.filter_flags &=
- ~RXON_FILTER_ASSOC_MSK;
- }
-
- }
-
- /*
- * This needs to be after setting the BSSID in case
- * mac80211 decides to do both changes at once because
- * it will invoke post_associate.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
- iwl_legacy_beacon_update(hw, vif);
-
- if (changes & BSS_CHANGED_ERP_PREAMBLE) {
- IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
- bss_conf->use_short_preamble);
- if (bss_conf->use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
- }
-
- if (changes & BSS_CHANGED_ERP_CTS_PROT) {
- IWL_DEBUG_MAC80211(priv,
- "ERP_CTS %d\n", bss_conf->use_cts_prot);
- if (bss_conf->use_cts_prot &&
- (priv->band != IEEE80211_BAND_5GHZ))
- ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
- if (bss_conf->use_cts_prot)
- ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
- else
- ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
- }
-
- if (changes & BSS_CHANGED_BASIC_RATES) {
- /* XXX use this information
- *
- * To do that, remove code from iwl_legacy_set_rate() and put something
- * like this here:
- *
- if (A-band)
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates;
- else
- ctx->staging.ofdm_basic_rates =
- bss_conf->basic_rates >> 4;
- ctx->staging.cck_basic_rates =
- bss_conf->basic_rates & 0xF;
- */
- }
-
- if (changes & BSS_CHANGED_HT) {
- iwl_legacy_ht_conf(priv, vif);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- if (changes & BSS_CHANGED_ASSOC) {
- IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
- if (bss_conf->assoc) {
- priv->timestamp = bss_conf->timestamp;
-
- if (!iwl_legacy_is_rfkill(priv))
- priv->cfg->ops->legacy->post_associate(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes && iwl_legacy_is_associated_ctx(ctx) && bss_conf->aid) {
- IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
- changes);
- ret = iwl_legacy_send_rxon_assoc(priv, ctx);
- if (!ret) {
- /* Sync active_rxon with latest change. */
- memcpy((void *)&ctx->active,
- &ctx->staging,
- sizeof(struct iwl_legacy_rxon_cmd));
- }
- }
-
- if (changes & BSS_CHANGED_BEACON_ENABLED) {
- if (vif->bss_conf.enable_beacon) {
- memcpy(ctx->staging.bssid_addr,
- bss_conf->bssid, ETH_ALEN);
- memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
- priv->cfg->ops->legacy->config_ap(priv);
- } else
- iwl_legacy_set_no_assoc(priv, vif);
- }
-
- if (changes & BSS_CHANGED_IBSS) {
- ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
- bss_conf->ibss_joined);
- if (ret)
- IWL_ERR(priv, "failed to %s IBSS station %pM\n",
- bss_conf->ibss_joined ? "add" : "remove",
- bss_conf->bssid);
- }
-
- mutex_unlock(&priv->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data)
-{
- struct iwl_priv *priv = data;
- u32 inta, inta_mask;
- u32 inta_fh;
- unsigned long flags;
- if (!priv)
- return IRQ_NONE;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the tasklet will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(priv, CSR_INT);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta && !inta_fh) {
- IWL_DEBUG_ISR(priv,
- "Ignore interrupt, inta == 0, inta_fh == 0\n");
- goto none;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- goto unplugged;
- }
-
- IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
-
- inta &= ~CSR_INT_BIT_SCD;
-
- /* iwl_irq_tasklet() will service interrupts and re-enable them */
- if (likely(inta || inta_fh))
- tasklet_schedule(&priv->irq_tasklet);
-
-unplugged:
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_HANDLED;
-
-none:
- /* re-enable interrupts here since we don't have anything to service. */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- return IRQ_NONE;
-}
-EXPORT_SYMBOL(iwl_legacy_isr);
-
-/*
- * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
- * function.
- */
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags)
-{
- if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
- *tx_flags |= TX_CMD_FLG_RTS_MSK;
- *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
-
- if (!ieee80211_is_mgmt(fc))
- return;
-
- switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
- case cpu_to_le16(IEEE80211_STYPE_AUTH):
- case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
- case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
- case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- break;
- }
- } else if (info->control.rates[0].flags &
- IEEE80211_TX_RC_USE_CTS_PROTECT) {
- *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
- *tx_flags |= TX_CMD_FLG_CTS_MSK;
- *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
deleted file mode 100644
index d1271fe..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ /dev/null
@@ -1,636 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_core_h__
-#define __iwl_legacy_core_h__
-
-/************************
- * forward declarations *
- ************************/
-struct iwl_host_cmd;
-struct iwl_cmd;
-
-
-#define IWLWIFI_VERSION "in-tree:"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-#define IWL_PCI_DEVICE(dev, subdev, cfg) \
- .vendor = PCI_VENDOR_ID_INTEL, .device = (dev), \
- .subvendor = PCI_ANY_ID, .subdevice = (subdev), \
- .driver_data = (kernel_ulong_t)&(cfg)
-
-#define TIME_UNIT 1024
-
-#define IWL_SKU_G 0x1
-#define IWL_SKU_A 0x2
-#define IWL_SKU_N 0x8
-
-#define IWL_CMD(x) case x: return #x
-
-struct iwl_hcmd_ops {
- int (*rxon_assoc)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- int (*commit_rxon)(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
- void (*set_rxon_chain)(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-};
-
-struct iwl_hcmd_utils_ops {
- u16 (*get_hcmd_size)(u8 cmd_id, u16 len);
- u16 (*build_addsta_hcmd)(const struct iwl_legacy_addsta_cmd *cmd,
- u8 *data);
- int (*request_scan)(struct iwl_priv *priv, struct ieee80211_vif *vif);
- void (*post_scan)(struct iwl_priv *priv);
-};
-
-struct iwl_apm_ops {
- int (*init)(struct iwl_priv *priv);
- void (*config)(struct iwl_priv *priv);
-};
-
-struct iwl_debugfs_ops {
- ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*tx_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
- ssize_t (*general_stats_read)(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos);
-};
-
-struct iwl_temp_ops {
- void (*temperature)(struct iwl_priv *priv);
-};
-
-struct iwl_lib_ops {
- /* set hw dependent parameters */
- int (*set_hw_params)(struct iwl_priv *priv);
- /* Handling TX */
- void (*txq_update_byte_cnt_tbl)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- u16 byte_cnt);
- int (*txq_attach_buf_to_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr,
- u16 len, u8 reset, u8 pad);
- void (*txq_free_tfd)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- int (*txq_init)(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
- /* setup Rx handler */
- void (*rx_handler_setup)(struct iwl_priv *priv);
- /* alive notification after init uCode load */
- void (*init_alive_start)(struct iwl_priv *priv);
- /* check validity of rtc data address */
- int (*is_valid_rtc_data_addr)(u32 addr);
- /* 1st ucode load */
- int (*load_ucode)(struct iwl_priv *priv);
-
- void (*dump_nic_error_log)(struct iwl_priv *priv);
- int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
- int (*set_channel_switch)(struct iwl_priv *priv,
- struct ieee80211_channel_switch *ch_switch);
- /* power management */
- struct iwl_apm_ops apm_ops;
-
- /* power */
- int (*send_tx_power) (struct iwl_priv *priv);
- void (*update_chain_flags)(struct iwl_priv *priv);
-
- /* eeprom operations (as defined in iwl-eeprom.h) */
- struct iwl_eeprom_ops eeprom_ops;
-
- /* temperature */
- struct iwl_temp_ops temp_ops;
-
- struct iwl_debugfs_ops debugfs_ops;
-
-};
-
-struct iwl_led_ops {
- int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
-};
-
-struct iwl_legacy_ops {
- void (*post_associate)(struct iwl_priv *priv);
- void (*config_ap)(struct iwl_priv *priv);
- /* station management */
- int (*update_bcast_stations)(struct iwl_priv *priv);
- int (*manage_ibss_station)(struct iwl_priv *priv,
- struct ieee80211_vif *vif, bool add);
-};
-
-struct iwl_ops {
- const struct iwl_lib_ops *lib;
- const struct iwl_hcmd_ops *hcmd;
- const struct iwl_hcmd_utils_ops *utils;
- const struct iwl_led_ops *led;
- const struct iwl_nic_ops *nic;
- const struct iwl_legacy_ops *legacy;
- const struct ieee80211_ops *ieee80211_ops;
-};
-
-struct iwl_mod_params {
- int sw_crypto; /* def: 0 = using hardware encryption */
- int disable_hw_scan; /* def: 0 = use h/w scan */
- int num_of_queues; /* def: HW dependent */
- int disable_11n; /* def: 0 = 11n capabilities enabled */
- int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
- int antenna; /* def: 0 = both antennas (use diversity) */
- int restart_fw; /* def: 1 = restart firmware */
-};
-
-/*
- * @led_compensation: compensate on the led on/off time per HW according
- * to the deviation to achieve the desired led frequency.
- * The detail algorithm is described in iwl-led.c
- * @chain_noise_num_beacons: number of beacons used to compute chain noise
- * @wd_timeout: TX queues watchdog timeout
- * @temperature_kelvin: temperature report by uCode in kelvin
- * @ucode_tracing: support ucode continuous tracing
- * @sensitivity_calib_by_driver: driver has the capability to perform
- * sensitivity calibration operation
- * @chain_noise_calib_by_driver: driver has the capability to perform
- * chain noise calibration operation
- */
-struct iwl_base_params {
- int eeprom_size;
- int num_of_queues; /* def: HW dependent */
- int num_of_ampdu_queues;/* def: HW dependent */
- /* for iwl_legacy_apm_init() */
- u32 pll_cfg_val;
- bool set_l0s;
- bool use_bsm;
-
- u16 led_compensation;
- int chain_noise_num_beacons;
- unsigned int wd_timeout;
- bool temperature_kelvin;
- const bool ucode_tracing;
- const bool sensitivity_calib_by_driver;
- const bool chain_noise_calib_by_driver;
-};
-
-/**
- * struct iwl_cfg
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- * (.ucode) will be added to filename before loading from disk. The
- * filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version. The firmware's API version will be
- * stored in @iwl_priv, enabling the driver to make runtime changes based
- * on firmware version used.
- *
- * For example,
- * if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- * Driver interacts with Firmware API version >= 2.
- * } else {
- * Driver interacts with Firmware API version 1.
- * }
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision. That is, through utilizing the
- * iwl_hcmd_utils_ops etc. we accommodate different command structures
- * and flows between hardware versions as well as their API
- * versions.
- *
- */
-struct iwl_cfg {
- /* params specific to an individual device within a device family */
- const char *name;
- const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_min;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- unsigned int sku;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
- const struct iwl_ops *ops;
- /* module based parameters which can be set from modprobe cmd */
- const struct iwl_mod_params *mod_params;
- /* params not likely to change within a device family */
- struct iwl_base_params *base_params;
- /* params likely to change within a device family */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- enum iwl_led_mode led_mode;
-};
-
-/***************************
- * L i b *
- ***************************/
-
-struct ieee80211_hw *iwl_legacy_alloc_all(struct iwl_cfg *cfg);
-int iwl_legacy_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-int iwl_legacy_mac_tx_last_beacon(struct ieee80211_hw *hw);
-void iwl_legacy_set_rxon_hwcrypto(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- int hw_decrypt);
-int iwl_legacy_check_rxon_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_full_rxon_required(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-int iwl_legacy_set_rxon_channel(struct iwl_priv *priv,
- struct ieee80211_channel *ch,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_flags_for_band(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-u8 iwl_legacy_get_single_channel_number(struct iwl_priv *priv,
- enum ieee80211_band band);
-void iwl_legacy_set_rxon_ht(struct iwl_priv *priv,
- struct iwl_ht_config *ht_conf);
-bool iwl_legacy_is_ht40_tx_allowed(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct ieee80211_sta_ht_cap *ht_cap);
-void iwl_legacy_connection_init_rx_config(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_set_rate(struct iwl_priv *priv);
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats);
-void iwl_legacy_irq_handle_error(struct iwl_priv *priv);
-int iwl_legacy_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int iwl_legacy_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p);
-int iwl_legacy_alloc_txq_mem(struct iwl_priv *priv);
-void iwl_legacy_txq_mem(struct iwl_priv *priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_free_traffic_mem(struct iwl_priv *priv);
-void iwl_legacy_reset_traffic_log(struct iwl_priv *priv);
-void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header);
-const char *iwl_legacy_get_mgmt_string(int cmd);
-const char *iwl_legacy_get_ctrl_string(int cmd);
-void iwl_legacy_clear_traffic_stats(struct iwl_priv *priv);
-void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc,
- u16 len);
-#else
-static inline int iwl_legacy_alloc_traffic_mem(struct iwl_priv *priv)
-{
- return 0;
-}
-static inline void iwl_legacy_free_traffic_mem(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_reset_traffic_log(struct iwl_priv *priv)
-{
-}
-static inline void iwl_legacy_dbg_log_tx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_dbg_log_rx_data_frame(struct iwl_priv *priv,
- u16 length, struct ieee80211_hdr *header)
-{
-}
-static inline void iwl_legacy_update_stats(struct iwl_priv *priv, bool is_tx,
- __le16 fc, u16 len)
-{
-}
-#endif
-/*****************************************************
- * RX handlers.
- * **************************************************/
-void iwl_legacy_rx_pm_sleep_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_rx_reply_error(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
-/*****************************************************
-* RX
-******************************************************/
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv);
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv);
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv);
-void iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q);
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q);
-void iwl_legacy_tx_cmd_complete(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-/* Handlers */
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-void iwl_legacy_recover_from_statistics(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt);
-void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success);
-void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
-
-/* TX helpers */
-
-/*****************************************************
-* TX
-******************************************************/
-void iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv,
- struct iwl_tx_queue *txq);
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id);
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id);
-void iwl_legacy_setup_watchdog(struct iwl_priv *priv);
-/*****************************************************
- * TX power
- ****************************************************/
-int iwl_legacy_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
-
-/*******************************************************************************
- * Rate
- ******************************************************************************/
-
-u8 iwl_legacy_get_lowest_plcp(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-
-/*******************************************************************************
- * Scanning
- ******************************************************************************/
-void iwl_legacy_init_scan_params(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel(struct iwl_priv *priv);
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
-void iwl_legacy_force_scan_end(struct iwl_priv *priv);
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
-void iwl_legacy_internal_short_hw_scan(struct iwl_priv *priv);
-int iwl_legacy_force_reset(struct iwl_priv *priv, bool external);
-u16 iwl_legacy_fill_probe_req(struct iwl_priv *priv,
- struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ie, int ie_len, int left);
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv);
-u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes);
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif);
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv);
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv);
-
-/* For faster active scanning, scan will move to the next channel if fewer than
- * PLCP_QUIET_THRESH packets are heard on this channel within
- * ACTIVE_QUIET_TIME after sending probe request. This shortens the dwell
- * time if it's a quiet channel (nothing responded to our probe, and there's
- * no other traffic).
- * Disable "quiet" feature by setting PLCP_QUIET_THRESH to 0. */
-#define IWL_ACTIVE_QUIET_TIME cpu_to_le16(10) /* msec */
-#define IWL_PLCP_QUIET_THRESH cpu_to_le16(1) /* packets */
-
-#define IWL_SCAN_CHECK_WATCHDOG (HZ * 7)
-
-/*****************************************************
- * S e n d i n g H o s t C o m m a n d s *
- *****************************************************/
-
-const char *iwl_legacy_get_cmd_string(u8 cmd);
-int __must_check iwl_legacy_send_cmd_sync(struct iwl_priv *priv,
- struct iwl_host_cmd *cmd);
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-int __must_check iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id,
- u16 len, const void *data);
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
- const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt));
-
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
-
-
-/*****************************************************
- * PCI *
- *****************************************************/
-
-static inline u16 iwl_legacy_pcie_link_ctl(struct iwl_priv *priv)
-{
- int pos;
- u16 pci_lnk_ctl;
- pos = pci_pcie_cap(priv->pci_dev);
- pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
- return pci_lnk_ctl;
-}
-
-void iwl_legacy_bg_watchdog(unsigned long data);
-u32 iwl_legacy_usecs_to_beacons(struct iwl_priv *priv,
- u32 usec, u32 beacon_interval);
-__le32 iwl_legacy_add_beacon_time(struct iwl_priv *priv, u32 base,
- u32 addon, u32 beacon_interval);
-
-#ifdef CONFIG_PM
-int iwl_legacy_pci_suspend(struct device *device);
-int iwl_legacy_pci_resume(struct device *device);
-extern const struct dev_pm_ops iwl_legacy_pm_ops;
-
-#define IWL_LEGACY_PM_OPS (&iwl_legacy_pm_ops)
-
-#else /* !CONFIG_PM */
-
-#define IWL_LEGACY_PM_OPS NULL
-
-#endif /* !CONFIG_PM */
-
-/*****************************************************
-* Error Handling Debugging
-******************************************************/
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-#else
-static inline void iwl_legacy_print_rx_config_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
-}
-#endif
-
-void iwl_legacy_clear_isr_stats(struct iwl_priv *priv);
-
-/*****************************************************
-* GEOS
-******************************************************/
-int iwl_legacy_init_geos(struct iwl_priv *priv);
-void iwl_legacy_free_geos(struct iwl_priv *priv);
-
-/*************** DRIVER STATUS FUNCTIONS *****/
-
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_CT_KILL 4
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_CHANNEL_SWITCH_PENDING 18
-
-static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
-{
- /* The adapter is 'ready' if READY and GEO_CONFIGURED bits are
- * set but EXIT_PENDING is not */
- return test_bit(STATUS_READY, &priv->status) &&
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) &&
- !test_bit(STATUS_EXIT_PENDING, &priv->status);
-}
-
-static inline int iwl_legacy_is_alive(struct iwl_priv *priv)
-{
- return test_bit(STATUS_ALIVE, &priv->status);
-}
-
-static inline int iwl_legacy_is_init(struct iwl_priv *priv)
-{
- return test_bit(STATUS_INIT, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill_hw(struct iwl_priv *priv)
-{
- return test_bit(STATUS_RF_KILL_HW, &priv->status);
-}
-
-static inline int iwl_legacy_is_rfkill(struct iwl_priv *priv)
-{
- return iwl_legacy_is_rfkill_hw(priv);
-}
-
-static inline int iwl_legacy_is_ctkill(struct iwl_priv *priv)
-{
- return test_bit(STATUS_CT_KILL, &priv->status);
-}
-
-static inline int iwl_legacy_is_ready_rf(struct iwl_priv *priv)
-{
-
- if (iwl_legacy_is_rfkill(priv))
- return 0;
-
- return iwl_legacy_is_ready(priv);
-}
-
-extern void iwl_legacy_send_bt_config(struct iwl_priv *priv);
-extern int iwl_legacy_send_statistics_request(struct iwl_priv *priv,
- u8 flags, bool clear);
-void iwl_legacy_apm_stop(struct iwl_priv *priv);
-int iwl_legacy_apm_init(struct iwl_priv *priv);
-
-int iwl_legacy_send_rxon_timing(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-static inline int iwl_legacy_send_rxon_assoc(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->rxon_assoc(priv, ctx);
-}
-static inline int iwl_legacy_commit_rxon(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
-}
-static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
- struct iwl_priv *priv, enum ieee80211_band band)
-{
- return priv->hw->wiphy->bands[band];
-}
-
-/* mac80211 handlers */
-int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
-void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *bss_conf,
- u32 changes);
-void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- __le16 fc, __le32 *tx_flags);
-
-irqreturn_t iwl_legacy_isr(int irq, void *data);
-
-#endif /* __iwl_legacy_core_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-debug.h b/drivers/net/wireless/iwlegacy/iwl-debug.h
deleted file mode 100644
index ae13112..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-debug.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_debug_h__
-#define __iwl_legacy_debug_h__
-
-struct iwl_priv;
-extern u32 iwlegacy_debug_level;
-
-#define IWL_ERR(p, f, a...) dev_err(&((p)->pci_dev->dev), f, ## a)
-#define IWL_WARN(p, f, a...) dev_warn(&((p)->pci_dev->dev), f, ## a)
-#define IWL_INFO(p, f, a...) dev_info(&((p)->pci_dev->dev), f, ## a)
-#define IWL_CRIT(p, f, a...) dev_crit(&((p)->pci_dev->dev), f, ## a)
-
-#define iwl_print_hex_error(priv, p, len) \
-do { \
- print_hex_dump(KERN_ERR, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define IWL_DEBUG(__priv, level, fmt, args...) \
-do { \
- if (iwl_legacy_get_debug_level(__priv) & (level)) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) \
-do { \
- if ((iwl_legacy_get_debug_level(__priv) & (level)) && net_ratelimit()) \
- dev_printk(KERN_ERR, &(__priv->hw->wiphy->dev), \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
-} while (0)
-
-#define iwl_print_hex_dump(priv, level, p, len) \
-do { \
- if (iwl_legacy_get_debug_level(priv) & level) \
- print_hex_dump(KERN_DEBUG, "iwl data: ", \
- DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
-} while (0)
-
-#else
-#define IWL_DEBUG(__priv, level, fmt, args...)
-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
-static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
- const void *p, u32 len)
-{}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv);
-#else
-static inline int
-iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- return 0;
-}
-static inline void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
-}
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
-/*
- * To use the debug system:
- *
- * If you are defining a new debug classification, simply add it to the #define
- * list here in the form of
- *
- * #define IWL_DL_xxxx VALUE
- *
- * where xxxx should be the name of the classification (for example, WEP).
- *
- * You then need to either add a IWL_xxxx_DEBUG() macro definition for your
- * classification, or use IWL_DEBUG(IWL_DL_xxxx, ...) whenever you want
- * to send output to that classification.
- *
- * The active debug levels can be accessed via files
- *
- * /sys/module/iwl4965/parameters/debug{50}
- * /sys/module/iwl3945/parameters/debug
- * /sys/class/net/wlan0/device/debug_level
- *
- * when CONFIG_IWLWIFI_LEGACY_DEBUG=y.
- */
-
-/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
-/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
-/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-#define IWL_DL_NOTIF (1 << 10)
-#define IWL_DL_SCAN (1 << 11)
-/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-#define IWL_DL_TXPOWER (1 << 14)
-#define IWL_DL_AP (1 << 15)
-/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
-/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
-/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
-#define IWL_DL_IO (1 << 27)
-/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_QOS (1 << 31)
-
-#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
-#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
-#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
-#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
-#define IWL_DEBUG_TX(p, f, a...) IWL_DEBUG(p, IWL_DL_TX, f, ## a)
-#define IWL_DEBUG_ISR(p, f, a...) IWL_DEBUG(p, IWL_DL_ISR, f, ## a)
-#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
-#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
-#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
-#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
-#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
-#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
-#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
-#define IWL_DEBUG_DROP(p, f, a...) IWL_DEBUG(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_DROP_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_DROP, f, ## a)
-#define IWL_DEBUG_AP(p, f, a...) IWL_DEBUG(p, IWL_DL_AP, f, ## a)
-#define IWL_DEBUG_TXPOWER(p, f, a...) IWL_DEBUG(p, IWL_DL_TXPOWER, f, ## a)
-#define IWL_DEBUG_IO(p, f, a...) IWL_DEBUG(p, IWL_DL_IO, f, ## a)
-#define IWL_DEBUG_RATE(p, f, a...) IWL_DEBUG(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_RATE_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_RATE, f, ## a)
-#define IWL_DEBUG_NOTIF(p, f, a...) IWL_DEBUG(p, IWL_DL_NOTIF, f, ## a)
-#define IWL_DEBUG_ASSOC(p, f, a...) \
- IWL_DEBUG(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_ASSOC_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_ASSOC | IWL_DL_INFO, f, ## a)
-#define IWL_DEBUG_HT(p, f, a...) IWL_DEBUG(p, IWL_DL_HT, f, ## a)
-#define IWL_DEBUG_STATS(p, f, a...) IWL_DEBUG(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
-#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
-#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
-#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
-#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
-
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-debugfs.c b/drivers/net/wireless/iwlegacy/iwl-debugfs.c
deleted file mode 100644
index 1407dca..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-debugfs.c
+++ /dev/null
@@ -1,1314 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/ieee80211.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* create and remove of files */
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, priv, \
- &iwl_legacy_dbgfs_##name##_ops)) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- struct dentry *__tmp; \
- __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
- parent, ptr); \
- if (IS_ERR(__tmp) || !__tmp) \
- goto err; \
-} while (0)
-
-/* file operation */
-#define DEBUGFS_READ_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_read(struct file *file, \
- char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-#define DEBUGFS_WRITE_FUNC(name) \
-static ssize_t iwl_legacy_dbgfs_##name##_write(struct file *file, \
- const char __user *user_buf, \
- size_t count, loff_t *ppos);
-
-
-static int
-iwl_legacy_dbgfs_open_file_generic(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-#define DEBUGFS_READ_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_WRITE_FILE_OPS(name) \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
- DEBUGFS_READ_FUNC(name); \
- DEBUGFS_WRITE_FUNC(name); \
-static const struct file_operations iwl_legacy_dbgfs_##name##_ops = { \
- .write = iwl_legacy_dbgfs_##name##_write, \
- .read = iwl_legacy_dbgfs_##name##_read, \
- .open = iwl_legacy_dbgfs_open_file_generic, \
- .llseek = generic_file_llseek, \
-};
-
-static ssize_t iwl_legacy_dbgfs_tx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
-
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->tx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->tx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->tx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->tx_stats.data_bytes);
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_clear_traffic_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- u32 clear_flag;
- char buf[8];
- int buf_size;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &clear_flag) != 1)
- return -EFAULT;
- iwl_legacy_clear_traffic_stats(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_statistics_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- int cnt;
- ssize_t ret;
- const size_t bufsz = 100 +
- sizeof(char) * 50 * (MANAGEMENT_MAX + CONTROL_MAX);
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Management:\n");
- for (cnt = 0; cnt < MANAGEMENT_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_mgmt_string(cnt),
- priv->rx_stats.mgmt[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Control:\n");
- for (cnt = 0; cnt < CONTROL_MAX; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t%25s\t\t: %u\n",
- iwl_legacy_get_ctrl_string(cnt),
- priv->rx_stats.ctrl[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Data:\n");
- pos += scnprintf(buf + pos, bufsz - pos, "\tcnt: %u\n",
- priv->rx_stats.data_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "\tbytes: %llu\n",
- priv->rx_stats.data_bytes);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-#define BYTE1_MASK 0x000000ff;
-#define BYTE2_MASK 0x0000ffff;
-#define BYTE3_MASK 0x00ffffff;
-static ssize_t iwl_legacy_dbgfs_sram_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- u32 val;
- char *buf;
- ssize_t ret;
- int i;
- int pos = 0;
- struct iwl_priv *priv = file->private_data;
- size_t bufsz;
-
- /* default is to dump the entire data segment */
- if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
- priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs_sram_len = priv->ucode_init_data.len;
- else
- priv->dbgfs_sram_len = priv->ucode_data.len;
- }
- bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs_sram_len);
- pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs_sram_offset);
- for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
- val = iwl_legacy_read_targ_mem(priv, priv->dbgfs_sram_offset + \
- priv->dbgfs_sram_len - i);
- if (i < 4) {
- switch (i) {
- case 1:
- val &= BYTE1_MASK;
- break;
- case 2:
- val &= BYTE2_MASK;
- break;
- case 3:
- val &= BYTE3_MASK;
- break;
- }
- }
- if (!(i % 16))
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "0x%08x ", val);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_sram_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[64];
- int buf_size;
- u32 offset, len;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
-
- if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs_sram_offset = offset;
- priv->dbgfs_sram_len = len;
- } else {
- priv->dbgfs_sram_offset = 0;
- priv->dbgfs_sram_len = 0;
- }
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_stations_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_station_entry *station;
- int max_sta = priv->hw_params.max_stations;
- char *buf;
- int i, j, pos = 0;
- ssize_t ret;
- /* Add 30 for initial string */
- const size_t bufsz = 30 + sizeof(char) * 500 * (priv->num_stations);
-
- buf = kmalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- pos += scnprintf(buf + pos, bufsz - pos, "num of stations: %d\n\n",
- priv->num_stations);
-
- for (i = 0; i < max_sta; i++) {
- station = &priv->stations[i];
- if (!station->used)
- continue;
- pos += scnprintf(buf + pos, bufsz - pos,
- "station %d - addr: %pM, flags: %#x\n",
- i, station->sta.sta.addr,
- station->sta.station_flags_msk);
- pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\tframes\ttfds\t");
- pos += scnprintf(buf + pos, bufsz - pos,
- "start_idx\tbitmap\t\t\trate_n_flags\n");
-
- for (j = 0; j < MAX_TID_COUNT; j++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%u\t%u\t\t%#.16llx\t%#x",
- j, station->tid[j].seq_number,
- station->tid[j].agg.txq_id,
- station->tid[j].agg.frame_count,
- station->tid[j].tfds_in_queue,
- station->tid[j].agg.start_idx,
- station->tid[j].agg.bitmap,
- station->tid[j].agg.rate_n_flags);
-
- if (station->tid[j].agg.wait_for_ba)
- pos += scnprintf(buf + pos, bufsz - pos,
- " - waitforba");
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_nvm_read(struct file *file,
- char __user *user_buf,
- size_t count,
- loff_t *ppos)
-{
- ssize_t ret;
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0, buf_size = 0;
- const u8 *ptr;
- char *buf;
- u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
- buf_size = 4 * eeprom_len + 256;
-
- if (eeprom_len % 16) {
- IWL_ERR(priv, "NVM size is not multiple of 16.\n");
- return -ENODATA;
- }
-
- ptr = priv->eeprom;
- if (!ptr) {
- IWL_ERR(priv, "Invalid EEPROM memory\n");
- return -ENOMEM;
- }
-
- /* 4 characters for byte 0xYY */
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
- eeprom_ver = iwl_legacy_eeprom_query16(priv, EEPROM_VERSION);
- pos += scnprintf(buf + pos, buf_size - pos, "EEPROM "
- "version: 0x%x\n", eeprom_ver);
- for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
- pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
- buf_size - pos, 0);
- pos += strlen(buf + pos);
- if (buf_size - pos > 0)
- buf[pos++] = '\n';
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_channels_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct ieee80211_channel *channels = NULL;
- const struct ieee80211_supported_band *supp_band = NULL;
- int pos = 0, i, bufsz = PAGE_SIZE;
- char *buf;
- ssize_t ret;
-
- if (!test_bit(STATUS_GEO_CONFIGURED, &priv->status))
- return -EAGAIN;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_2GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 2.4GHz band 802.11bg):\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- supp_band = iwl_get_hw_mode(priv, IEEE80211_BAND_5GHZ);
- if (supp_band) {
- channels = supp_band->channels;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Displaying %d channels in 5.2GHz band (802.11a)\n",
- supp_band->n_channels);
-
- for (i = 0; i < supp_band->n_channels; i++)
- pos += scnprintf(buf + pos, bufsz - pos,
- "%d: %ddBm: BSS%s%s, %s.\n",
- channels[i].hw_value,
- channels[i].max_power,
- channels[i].flags & IEEE80211_CHAN_RADAR ?
- " (IEEE 802.11h required)" : "",
- ((channels[i].flags & IEEE80211_CHAN_NO_IBSS)
- || (channels[i].flags &
- IEEE80211_CHAN_RADAR)) ? "" :
- ", IBSS",
- channels[i].flags &
- IEEE80211_CHAN_PASSIVE_SCAN ?
- "passive only" : "active/passive");
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[512];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
- test_bit(STATUS_HCMD_ACTIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
- test_bit(STATUS_INT_ENABLED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
- test_bit(STATUS_CT_KILL, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
- test_bit(STATUS_INIT, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
- test_bit(STATUS_ALIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_READY:\t\t %d\n",
- test_bit(STATUS_READY, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_TEMPERATURE:\t %d\n",
- test_bit(STATUS_TEMPERATURE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_GEO_CONFIGURED:\t %d\n",
- test_bit(STATUS_GEO_CONFIGURED, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_EXIT_PENDING:\t %d\n",
- test_bit(STATUS_EXIT_PENDING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_STATISTICS:\t %d\n",
- test_bit(STATUS_STATISTICS, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCANNING:\t %d\n",
- test_bit(STATUS_SCANNING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_ABORTING:\t %d\n",
- test_bit(STATUS_SCAN_ABORTING, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_SCAN_HW:\t\t %d\n",
- test_bit(STATUS_SCAN_HW, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_POWER_PMI:\t %d\n",
- test_bit(STATUS_POWER_PMI, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
- test_bit(STATUS_FW_ERROR, &priv->status));
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = 24 * 64; /* 24 items * 64 char per item */
- ssize_t ret;
-
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Interrupt Statistics Report:\n");
-
- pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
- priv->isr_stats.hw);
- pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
- priv->isr_stats.sw);
- if (priv->isr_stats.sw || priv->isr_stats.hw) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tLast Restarting Code: 0x%X\n",
- priv->isr_stats.err_code);
- }
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
- priv->isr_stats.sch);
- pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
- priv->isr_stats.alive);
-#endif
- pos += scnprintf(buf + pos, bufsz - pos,
- "HW RF KILL switch toggled:\t %u\n",
- priv->isr_stats.rfkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
- priv->isr_stats.ctkill);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
- priv->isr_stats.wakeup);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx command responses:\t\t %u\n",
- priv->isr_stats.rx);
- for (cnt = 0; cnt < REPLY_MAX; cnt++) {
- if (priv->isr_stats.rx_handlers[cnt] > 0)
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tRx handler[%36s]:\t\t %u\n",
- iwl_legacy_get_cmd_string(cnt),
- priv->isr_stats.rx_handlers[cnt]);
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
- priv->isr_stats.tx);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
- priv->isr_stats.unhandled);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_interrupt_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- u32 reset_flag;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%x", &reset_flag) != 1)
- return -EFAULT;
- if (reset_flag == 0)
- iwl_legacy_clear_isr_stats(priv);
-
- return count;
-}
-
-static ssize_t
-iwl_legacy_dbgfs_qos_read(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- struct iwl_rxon_context *ctx;
- int pos = 0, i;
- char buf[256 * NUM_IWL_RXON_CTX];
- const size_t bufsz = sizeof(buf);
-
- for_each_context(priv, ctx) {
- pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
- ctx->ctxid);
- for (i = 0; i < AC_NUM; i++) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tcw_min\tcw_max\taifsn\ttxop\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "AC[%d]\t%u\t%u\t%u\t%u\n", i,
- ctx->qos_data.def_qos_parm.ac[i].cw_min,
- ctx->qos_data.def_qos_parm.ac[i].cw_max,
- ctx->qos_data.def_qos_parm.ac[i].aifsn,
- ctx->qos_data.def_qos_parm.ac[i].edca_txop);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int ht40;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &ht40) != 1)
- return -EFAULT;
- if (!iwl_legacy_is_any_associated(priv))
- priv->disable_ht40 = ht40 ? true : false;
- else {
- IWL_ERR(priv, "Sta associated with AP - "
- "Change to 40MHz channel support is not allowed\n");
- return -EINVAL;
- }
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_disable_ht40_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[100];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "11n 40MHz Mode: %s\n",
- priv->disable_ht40 ? "Disabled" : "Enabled");
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_READ_FILE_OPS(nvm);
-DEBUGFS_READ_FILE_OPS(stations);
-DEBUGFS_READ_FILE_OPS(channels);
-DEBUGFS_READ_FILE_OPS(status);
-DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
-DEBUGFS_READ_FILE_OPS(qos);
-DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- int pos = 0, ofs = 0;
- int cnt = 0, entry;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char *buf;
- int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
- (priv->cfg->base_params->num_of_queues * 32 * 8) + 400;
- const u8 *ptr;
- ssize_t ret;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate buffer\n");
- return -ENOMEM;
- }
- pos += scnprintf(buf + pos, bufsz - pos, "Tx Queue\n");
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "q[%d]: read_ptr: %u, write_ptr: %u\n",
- cnt, q->read_ptr, q->write_ptr);
- }
- if (priv->tx_traffic && (iwlegacy_debug_level & IWL_DL_TX)) {
- ptr = priv->tx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Tx Traffic idx: %u\n", priv->tx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "Rx Queue\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "read: %u, write: %u\n",
- rxq->read, rxq->write);
-
- if (priv->rx_traffic && (iwlegacy_debug_level & IWL_DL_RX)) {
- ptr = priv->rx_traffic;
- pos += scnprintf(buf + pos, bufsz - pos,
- "Rx Traffic idx: %u\n", priv->rx_traffic_idx);
- for (cnt = 0, ofs = 0; cnt < IWL_TRAFFIC_ENTRIES; cnt++) {
- for (entry = 0; entry < IWL_TRAFFIC_ENTRY_SIZE / 16;
- entry++, ofs += 16) {
- pos += scnprintf(buf + pos, bufsz - pos,
- "0x%.4x ", ofs);
- hex_dump_to_buffer(ptr + ofs, 16, 16, 2,
- buf + pos, bufsz - pos, 0);
- pos += strlen(buf + pos);
- if (bufsz - pos > 0)
- buf[pos++] = '\n';
- }
- }
- }
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_traffic_log_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int traffic_log;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &traffic_log) != 1)
- return -EFAULT;
- if (traffic_log == 0)
- iwl_legacy_reset_traffic_log(priv);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_tx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- char *buf;
- int pos = 0;
- int cnt;
- int ret;
- const size_t bufsz = sizeof(char) * 64 *
- priv->cfg->base_params->num_of_queues;
-
- if (!priv->txq) {
- IWL_ERR(priv, "txq not ready\n");
- return -EAGAIN;
- }
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
- txq = &priv->txq[cnt];
- q = &txq->q;
- pos += scnprintf(buf + pos, bufsz - pos,
- "hwq %.2d: read=%u write=%u stop=%d"
- " swq_id=%#.2x (ac %d/hwq %d)\n",
- cnt, q->read_ptr, q->write_ptr,
- !!test_bit(cnt, priv->queue_stopped),
- txq->swq_id, txq->swq_id & 3,
- (txq->swq_id >> 2) & 0x1f);
- if (cnt >= 4)
- continue;
- /* for the ACs, display the stop count too */
- pos += scnprintf(buf + pos, bufsz - pos,
- " stop-count: %d\n",
- atomic_read(&priv->queue_stop_count[cnt]));
- }
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_rx_queue_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- struct iwl_rx_queue *rxq = &priv->rxq;
- char buf[256];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
- rxq->read);
- pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
- rxq->write);
- pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
- rxq->free_count);
- if (rxq->rb_stts) {
- pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
- le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
- } else {
- pos += scnprintf(buf + pos, bufsz - pos,
- "closed_rb_num: Not Allocated\n");
- }
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_rx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.rx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_tx_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.tx_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_ucode_general_stats_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- return priv->cfg->ops->lib->debugfs_ops.general_stats_read(file,
- user_buf, count, ppos);
-}
-
-static ssize_t iwl_legacy_dbgfs_sensitivity_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_sensitivity_data) * 4 + 100;
- ssize_t ret;
- struct iwl_sensitivity_data *data;
-
- data = &priv->sensitivity_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm:\t\t\t %u\n",
- data->auto_corr_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc:\t\t %u\n",
- data->auto_corr_ofdm_mrc);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_ofdm_x1:\t\t %u\n",
- data->auto_corr_ofdm_x1);
- pos += scnprintf(buf + pos, bufsz - pos,
- "auto_corr_ofdm_mrc_x1:\t\t %u\n",
- data->auto_corr_ofdm_mrc_x1);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck:\t\t\t %u\n",
- data->auto_corr_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "auto_corr_cck_mrc:\t\t %u\n",
- data->auto_corr_cck_mrc);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_ofdm:\t\t %u\n",
- data->last_bad_plcp_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_ofdm:\t\t %u\n",
- data->last_fa_cnt_ofdm);
- pos += scnprintf(buf + pos, bufsz - pos,
- "last_bad_plcp_cnt_cck:\t\t %u\n",
- data->last_bad_plcp_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "last_fa_cnt_cck:\t\t %u\n",
- data->last_fa_cnt_cck);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_curr_state:\t\t\t %u\n",
- data->nrg_curr_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_prev_state:\t\t\t %u\n",
- data->nrg_prev_state);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_value:\t\t\t");
- for (cnt = 0; cnt < 10; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_value[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_rssi:\t\t");
- for (cnt = 0; cnt < NRG_NUM_PREV_STAT_L; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->nrg_silence_rssi[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_ref:\t\t %u\n",
- data->nrg_silence_ref);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_energy_idx:\t\t\t %u\n",
- data->nrg_energy_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_silence_idx:\t\t %u\n",
- data->nrg_silence_idx);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_cck:\t\t\t %u\n",
- data->nrg_th_cck);
- pos += scnprintf(buf + pos, bufsz - pos,
- "nrg_auto_corr_silence_diff:\t %u\n",
- data->nrg_auto_corr_silence_diff);
- pos += scnprintf(buf + pos, bufsz - pos, "num_in_cck_no_fa:\t\t %u\n",
- data->num_in_cck_no_fa);
- pos += scnprintf(buf + pos, bufsz - pos, "nrg_th_ofdm:\t\t\t %u\n",
- data->nrg_th_ofdm);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-
-static ssize_t iwl_legacy_dbgfs_chain_noise_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- int cnt = 0;
- char *buf;
- int bufsz = sizeof(struct iwl_chain_noise_data) * 4 + 100;
- ssize_t ret;
- struct iwl_chain_noise_data *data;
-
- data = &priv->chain_noise_data;
- buf = kzalloc(bufsz, GFP_KERNEL);
- if (!buf) {
- IWL_ERR(priv, "Can not allocate Buffer\n");
- return -ENOMEM;
- }
-
- pos += scnprintf(buf + pos, bufsz - pos, "active_chains:\t\t\t %u\n",
- data->active_chains);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_a:\t\t\t %u\n",
- data->chain_noise_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_b:\t\t\t %u\n",
- data->chain_noise_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_noise_c:\t\t\t %u\n",
- data->chain_noise_c);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_a:\t\t\t %u\n",
- data->chain_signal_a);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_b:\t\t\t %u\n",
- data->chain_signal_b);
- pos += scnprintf(buf + pos, bufsz - pos, "chain_signal_c:\t\t\t %u\n",
- data->chain_signal_c);
- pos += scnprintf(buf + pos, bufsz - pos, "beacon_count:\t\t\t %u\n",
- data->beacon_count);
-
- pos += scnprintf(buf + pos, bufsz - pos, "disconn_array:\t\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->disconn_array[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "delta_gain_code:\t\t");
- for (cnt = 0; cnt < NUM_RX_CHAINS; cnt++) {
- pos += scnprintf(buf + pos, bufsz - pos, " %u",
- data->delta_gain_code[cnt]);
- }
- pos += scnprintf(buf + pos, bufsz - pos, "\n");
- pos += scnprintf(buf + pos, bufsz - pos, "radio_write:\t\t\t %u\n",
- data->radio_write);
- pos += scnprintf(buf + pos, bufsz - pos, "state:\t\t\t\t %u\n",
- data->state);
-
- ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
- kfree(buf);
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_power_save_status_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[60];
- int pos = 0;
- const size_t bufsz = sizeof(buf);
- u32 pwrsave_status;
-
- pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_REG_POWER_SAVE_STATUS_MSK;
-
- pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
- pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
- (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
- (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
- (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
- "error");
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_clear_ucode_statistics_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int clear;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &clear) != 1)
- return -EFAULT;
-
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- iwl_legacy_send_statistics_request(priv, CMD_SYNC, true);
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_rxon_filter_flags_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int len = 0;
- char buf[20];
-
- len = sprintf(buf, "0x%04X\n",
- le32_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags));
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
-}
-
-static ssize_t iwl_legacy_dbgfs_fh_reg_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char *buf;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- if (priv->cfg->ops->lib->dump_fh) {
- ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
- }
-
- return ret;
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[12];
- const size_t bufsz = sizeof(buf);
-
- pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
- priv->missed_beacon_threshold);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_missed_beacon_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int missed;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &missed) != 1)
- return -EINVAL;
-
- if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
- missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
- priv->missed_beacon_threshold =
- IWL_MISSED_BEACON_THRESHOLD_DEF;
- else
- priv->missed_beacon_threshold = missed;
-
- return count;
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_read(struct file *file,
- char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- int pos = 0;
- char buf[300];
- const size_t bufsz = sizeof(buf);
- struct iwl_force_reset *force_reset;
-
- force_reset = &priv->force_reset;
-
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request: %d\n",
- force_reset->reset_request_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request success: %d\n",
- force_reset->reset_success_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\tnumber of reset request reject: %d\n",
- force_reset->reset_reject_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "\treset duration: %lu\n",
- force_reset->reset_duration);
-
- return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
-}
-
-static ssize_t iwl_legacy_dbgfs_force_reset_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- int ret;
- struct iwl_priv *priv = file->private_data;
-
- ret = iwl_legacy_force_reset(priv, true);
-
- return ret ? ret : count;
-}
-
-static ssize_t iwl_legacy_dbgfs_wd_timeout_write(struct file *file,
- const char __user *user_buf,
- size_t count, loff_t *ppos) {
-
- struct iwl_priv *priv = file->private_data;
- char buf[8];
- int buf_size;
- int timeout;
-
- memset(buf, 0, sizeof(buf));
- buf_size = min(count, sizeof(buf) - 1);
- if (copy_from_user(buf, user_buf, buf_size))
- return -EFAULT;
- if (sscanf(buf, "%d", &timeout) != 1)
- return -EINVAL;
- if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
- timeout = IWL_DEF_WD_TIMEOUT;
-
- priv->cfg->base_params->wd_timeout = timeout;
- iwl_legacy_setup_watchdog(priv);
- return count;
-}
-
-DEBUGFS_READ_FILE_OPS(rx_statistics);
-DEBUGFS_READ_FILE_OPS(tx_statistics);
-DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
-DEBUGFS_READ_FILE_OPS(rx_queue);
-DEBUGFS_READ_FILE_OPS(tx_queue);
-DEBUGFS_READ_FILE_OPS(ucode_rx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_tx_stats);
-DEBUGFS_READ_FILE_OPS(ucode_general_stats);
-DEBUGFS_READ_FILE_OPS(sensitivity);
-DEBUGFS_READ_FILE_OPS(chain_noise);
-DEBUGFS_READ_FILE_OPS(power_save_status);
-DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
-DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
-DEBUGFS_READ_FILE_OPS(fh_reg);
-DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
-DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
-DEBUGFS_READ_FILE_OPS(rxon_flags);
-DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
-DEBUGFS_WRITE_FILE_OPS(wd_timeout);
-
-/*
- * Create the debugfs files and directories
- *
- */
-int iwl_legacy_dbgfs_register(struct iwl_priv *priv, const char *name)
-{
- struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
-
- dir_drv = debugfs_create_dir(name, phyd);
- if (!dir_drv)
- return -ENOMEM;
-
- priv->debugfs_dir = dir_drv;
-
- dir_data = debugfs_create_dir("data", dir_drv);
- if (!dir_data)
- goto err;
- dir_rf = debugfs_create_dir("rf", dir_drv);
- if (!dir_rf)
- goto err;
- dir_debug = debugfs_create_dir("debug", dir_drv);
- if (!dir_debug)
- goto err;
-
- DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
-
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
- DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
- DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
- if (priv->cfg->base_params->sensitivity_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf,
- &priv->disable_sens_cal);
- if (priv->cfg->base_params->chain_noise_calib_by_driver)
- DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
- &priv->disable_chain_noise_cal);
- DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
- &priv->disable_tx_power_cal);
- return 0;
-
-err:
- IWL_ERR(priv, "Can't create the debugfs directory\n");
- iwl_legacy_dbgfs_unregister(priv);
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_register);
-
-/**
- * Remove the debugfs files and directories
- *
- */
-void iwl_legacy_dbgfs_unregister(struct iwl_priv *priv)
-{
- if (!priv->debugfs_dir)
- return;
-
- debugfs_remove_recursive(priv->debugfs_dir);
- priv->debugfs_dir = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
deleted file mode 100644
index 9c786ed..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ /dev/null
@@ -1,1364 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-/*
- * Please use this file (iwl-dev.h) for driver implementation definitions.
- * Please use iwl-commands.h for uCode API definitions.
- * Please use iwl-4965-hw.h for hardware-related definitions.
- */
-
-#ifndef __iwl_legacy_dev_h__
-#define __iwl_legacy_dev_h__
-
-#include <linux/interrupt.h>
-#include <linux/pci.h> /* for struct pci_device_id */
-#include <linux/kernel.h>
-#include <linux/leds.h>
-#include <linux/wait.h>
-#include <net/ieee80211_radiotap.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-csr.h"
-#include "iwl-prph.h"
-#include "iwl-fh.h"
-#include "iwl-debug.h"
-#include "iwl-4965-hw.h"
-#include "iwl-3945-hw.h"
-#include "iwl-led.h"
-#include "iwl-power.h"
-#include "iwl-legacy-rs.h"
-
-struct iwl_tx_queue;
-
-/* CT-KILL constants */
-#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
-
-/* Default noise level to report when noise measurement is not available.
- * This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
- * 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
- * Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
- * Also, -127 works better than 0 when averaging frames with/without
- * noise info (e.g. averaging might be done in app); measured dBm values are
- * always negative ... using a negative value as the default keeps all
- * averages within an s8's (used in some apps) range of negative values. */
-#define IWL_NOISE_MEAS_NOT_AVAILABLE (-127)
-
-/*
- * RTS threshold here is total size [2347] minus 4 FCS bytes
- * Per spec:
- * a value of 0 means RTS on all data/management packets
- * a value > max MSDU size means no RTS
- * else RTS for data/management frames where MPDU is larger
- * than RTS value.
- */
-#define DEFAULT_RTS_THRESHOLD 2347U
-#define MIN_RTS_THRESHOLD 0U
-#define MAX_RTS_THRESHOLD 2347U
-#define MAX_MSDU_SIZE 2304U
-#define MAX_MPDU_SIZE 2346U
-#define DEFAULT_BEACON_INTERVAL 100U
-#define DEFAULT_SHORT_RETRY_LIMIT 7U
-#define DEFAULT_LONG_RETRY_LIMIT 4U
-
-struct iwl_rx_mem_buffer {
- dma_addr_t page_dma;
- struct page *page;
- struct list_head list;
-};
-
-#define rxb_addr(r) page_address(r->page)
-
-/* defined below */
-struct iwl_device_cmd;
-
-struct iwl_cmd_meta {
- /* only for SYNC commands, iff the reply skb is wanted */
- struct iwl_host_cmd *source;
- /*
- * only for ASYNC commands
- * (which is somewhat stupid -- look at iwl-sta.c for instance
- * which duplicates a bunch of code because the callback isn't
- * invoked for SYNC commands, if it were and its result passed
- * through it would be simpler...)
- */
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
-
- /* The CMD_SIZE_HUGE flag bit indicates that the command
- * structure is stored at the end of the shared queue memory. */
- u32 flags;
-
- DEFINE_DMA_UNMAP_ADDR(mapping);
- DEFINE_DMA_UNMAP_LEN(len);
-};
-
-/*
- * Generic queue structure
- *
- * Contains common data for Rx and Tx queues
- */
-struct iwl_queue {
- int n_bd; /* number of BDs in this queue */
- int write_ptr; /* 1-st empty entry (index) host_w*/
- int read_ptr; /* last used entry (index) host_r*/
- /* use for monitoring and recovering the stuck queue */
- dma_addr_t dma_addr; /* physical addr for BD's */
- int n_window; /* safe queue window */
- u32 id;
- int low_mark; /* low watermark, resume queue if free
- * space more than this */
- int high_mark; /* high watermark, stop queue if free
- * space less than this */
-};
-
-/* One for each TFD */
-struct iwl_tx_info {
- struct sk_buff *skb;
- struct iwl_rxon_context *ctx;
-};
-
-/**
- * struct iwl_tx_queue - Tx Queue for DMA
- * @q: generic Rx/Tx queue descriptor
- * @bd: base of circular buffer of TFDs
- * @cmd: array of command/TX buffer pointers
- * @meta: array of meta data for each command/tx buffer
- * @dma_addr_cmd: physical address of cmd/tx buffer array
- * @txb: array of per-TFD driver data
- * @time_stamp: time (in jiffies) of last read_ptr change
- * @need_update: indicates need to update read/write index
- * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
- *
- * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
- * descriptors) and required locking structures.
- */
-#define TFD_TX_CMD_SLOTS 256
-#define TFD_CMD_SLOTS 32
-
-struct iwl_tx_queue {
- struct iwl_queue q;
- void *tfds;
- struct iwl_device_cmd **cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_info *txb;
- unsigned long time_stamp;
- u8 need_update;
- u8 sched_retry;
- u8 active;
- u8 swq_id;
-};
-
-#define IWL_NUM_SCAN_RATES (2)
-
-struct iwl4965_channel_tgd_info {
- u8 type;
- s8 max_power;
-};
-
-struct iwl4965_channel_tgh_info {
- s64 last_radar_time;
-};
-
-#define IWL4965_MAX_RATE (33)
-
-struct iwl3945_clip_group {
- /* maximum power level to prevent clipping for each rate, derived by
- * us from this band's saturation power in EEPROM */
- const s8 clip_powers[IWL_MAX_RATES];
-};
-
-/* current Tx power values to use, one for each rate for each channel.
- * requested power is limited by:
- * -- regulatory EEPROM limits for this channel
- * -- hardware capabilities (clip-powers)
- * -- spectrum management
- * -- user preference (e.g. iwconfig)
- * when requested power is set, base power index must also be set. */
-struct iwl3945_channel_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 base_power_index; /* gain index for power at factory temp. */
- s8 requested_power; /* power (dBm) requested for this chnl/rate */
-};
-
-/* current scan Tx power values to use, one for each scan rate for each
- * channel. */
-struct iwl3945_scan_power_info {
- struct iwl3945_tx_power tpc; /* actual radio and DSP gain settings */
- s8 power_table_index; /* actual (compenst'd) index into gain table */
- s8 requested_power; /* scan pwr (dBm) requested for chnl/rate */
-};
-
-/*
- * One for each channel, holds all channel setup data
- * Some of the fields (e.g. eeprom and flags/max_power_avg) are redundant
- * with one another!
- */
-struct iwl_channel_info {
- struct iwl4965_channel_tgd_info tgd;
- struct iwl4965_channel_tgh_info tgh;
- struct iwl_eeprom_channel eeprom; /* EEPROM regulatory limit */
- struct iwl_eeprom_channel ht40_eeprom; /* EEPROM regulatory limit for
- * HT40 channel */
-
- u8 channel; /* channel number */
- u8 flags; /* flags copied from EEPROM */
- s8 max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- s8 curr_txpow; /* (dBm) regulatory/spectrum/user (not h/w) limit */
- s8 min_power; /* always 0 */
- s8 scan_power; /* (dBm) regul. eeprom, direct scans, any rate */
-
- u8 group_index; /* 0-4, maps channel to group1/2/3/4/5 */
- u8 band_index; /* 0-4, maps channel to band1/2/3/4/5 */
- enum ieee80211_band band;
-
- /* HT40 channel info */
- s8 ht40_max_power_avg; /* (dBm) regul. eeprom, normal Tx, any rate */
- u8 ht40_flags; /* flags copied from EEPROM */
- u8 ht40_extension_channel; /* HT_IE_EXT_CHANNEL_* */
-
- /* Radio/DSP gain settings for each "normal" data Tx rate.
- * These include, in addition to RF and DSP gain, a few fields for
- * remembering/modifying gain settings (indexes). */
- struct iwl3945_channel_power_info power_info[IWL4965_MAX_RATE];
-
- /* Radio/DSP gain settings for each scan rate, for directed scans. */
- struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
-};
-
-#define IWL_TX_FIFO_BK 0 /* shared */
-#define IWL_TX_FIFO_BE 1
-#define IWL_TX_FIFO_VI 2 /* shared */
-#define IWL_TX_FIFO_VO 3
-#define IWL_TX_FIFO_UNUSED -1
-
-/* Minimum number of queues. MAX_NUM is defined in hw specific files.
- * Set the minimum to accommodate the 4 standard TX queues, 1 command
- * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
-#define IWL_MIN_NUM_QUEUES 10
-
-#define IWL_DEFAULT_CMD_QUEUE_NUM 4
-
-#define IEEE80211_DATA_LEN 2304
-#define IEEE80211_4ADDR_LEN 30
-#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
-#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
-
-struct iwl_frame {
- union {
- struct ieee80211_hdr frame;
- struct iwl_tx_beacon_cmd beacon;
- u8 raw[IEEE80211_FRAME_LEN];
- u8 cmd[360];
- } u;
- struct list_head list;
-};
-
-#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
-#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
-#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
-
-enum {
- CMD_SYNC = 0,
- CMD_SIZE_NORMAL = 0,
- CMD_NO_SKB = 0,
- CMD_SIZE_HUGE = (1 << 0),
- CMD_ASYNC = (1 << 1),
- CMD_WANT_SKB = (1 << 2),
- CMD_MAPPED = (1 << 3),
-};
-
-#define DEF_CMD_PAYLOAD_SIZE 320
-
-/**
- * struct iwl_device_cmd
- *
- * For allocation of the command and tx queues, this establishes the overall
- * size of the largest command we send to uCode, except for a scan command
- * (which is relatively huge; space is allocated separately).
- */
-struct iwl_device_cmd {
- struct iwl_cmd_header hdr; /* uCode API */
- union {
- u32 flags;
- u8 val8;
- u16 val16;
- u32 val32;
- struct iwl_tx_cmd tx;
- u8 payload[DEF_CMD_PAYLOAD_SIZE];
- } __packed cmd;
-} __packed;
-
-#define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
-
-
-struct iwl_host_cmd {
- const void *data;
- unsigned long reply_page;
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt);
- u32 flags;
- u16 len;
- u8 id;
-};
-
-#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
-#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
-#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
-
-/**
- * struct iwl_rx_queue - Rx queue
- * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
- * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
- * @read: Shared index to newest available Rx buffer
- * @write: Shared index to oldest written Rx packet
- * @free_count: Number of pre-allocated buffers in rx_free
- * @rx_free: list of free SKBs for use
- * @rx_used: List of Rx buffers with no SKB
- * @need_update: flag to indicate we need to update read/write index
- * @rb_stts: driver's pointer to receive buffer status
- * @rb_stts_dma: bus address of receive buffer status
- *
- * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
- */
-struct iwl_rx_queue {
- __le32 *bd;
- dma_addr_t bd_dma;
- struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
- struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
- u32 read;
- u32 write;
- u32 free_count;
- u32 write_actual;
- struct list_head rx_free;
- struct list_head rx_used;
- int need_update;
- struct iwl_rb_status *rb_stts;
- dma_addr_t rb_stts_dma;
- spinlock_t lock;
-};
-
-#define IWL_SUPPORTED_RATES_IE_LEN 8
-
-#define MAX_TID_COUNT 9
-
-#define IWL_INVALID_RATE 0xFF
-#define IWL_INVALID_VALUE -1
-
-/**
- * struct iwl_ht_agg -- aggregation status while waiting for block-ack
- * @txq_id: Tx queue used for Tx attempt
- * @frame_count: # frames attempted by Tx command
- * @wait_for_ba: Expect block-ack before next Tx reply
- * @start_idx: Index of 1st Transmit Frame Descriptor (TFD) in Tx window
- * @bitmap0: Low order bitmap, one bit for each frame pending ACK in Tx window
- * @bitmap1: High order, one bit for each frame pending ACK in Tx window
- * @rate_n_flags: Rate at which Tx was attempted
- *
- * If REPLY_TX indicates that aggregation was attempted, driver must wait
- * for block ack (REPLY_COMPRESSED_BA). This struct stores tx reply info
- * until block ack arrives.
- */
-struct iwl_ht_agg {
- u16 txq_id;
- u16 frame_count;
- u16 wait_for_ba;
- u16 start_idx;
- u64 bitmap;
- u32 rate_n_flags;
-#define IWL_AGG_OFF 0
-#define IWL_AGG_ON 1
-#define IWL_EMPTYING_HW_QUEUE_ADDBA 2
-#define IWL_EMPTYING_HW_QUEUE_DELBA 3
- u8 state;
-};
-
-
-struct iwl_tid_data {
- u16 seq_number; /* 4965 only */
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
-};
-
-struct iwl_hw_key {
- u32 cipher;
- int keylen;
- u8 keyidx;
- u8 key[32];
-};
-
-union iwl_ht_rate_supp {
- u16 rates;
- struct {
- u8 siso_rate;
- u8 mimo_rate;
- };
-};
-
-#define CFG_HT_RX_AMPDU_FACTOR_8K (0x0)
-#define CFG_HT_RX_AMPDU_FACTOR_16K (0x1)
-#define CFG_HT_RX_AMPDU_FACTOR_32K (0x2)
-#define CFG_HT_RX_AMPDU_FACTOR_64K (0x3)
-#define CFG_HT_RX_AMPDU_FACTOR_DEF CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MAX CFG_HT_RX_AMPDU_FACTOR_64K
-#define CFG_HT_RX_AMPDU_FACTOR_MIN CFG_HT_RX_AMPDU_FACTOR_8K
-
-/*
- * Maximal MPDU density for TX aggregation
- * 4 - 2us density
- * 5 - 4us density
- * 6 - 8us density
- * 7 - 16us density
- */
-#define CFG_HT_MPDU_DENSITY_2USEC (0x4)
-#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
-#define CFG_HT_MPDU_DENSITY_8USEC (0x6)
-#define CFG_HT_MPDU_DENSITY_16USEC (0x7)
-#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
-#define CFG_HT_MPDU_DENSITY_MAX CFG_HT_MPDU_DENSITY_16USEC
-#define CFG_HT_MPDU_DENSITY_MIN (0x1)
-
-struct iwl_ht_config {
- bool single_chain_sufficient;
- enum ieee80211_smps_mode smps; /* current smps mode */
-};
-
-/* QoS structures */
-struct iwl_qos_info {
- int qos_active;
- struct iwl_qosparam_cmd def_qos_parm;
-};
-
-/*
- * Structure should be accessed with sta_lock held. When station addition
- * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
- * the commands (iwl_legacy_addsta_cmd and iwl_link_quality_cmd) without
- * sta_lock held.
- */
-struct iwl_station_entry {
- struct iwl_legacy_addsta_cmd sta;
- struct iwl_tid_data tid[MAX_TID_COUNT];
- u8 used, ctxid;
- struct iwl_hw_key keyinfo;
- struct iwl_link_quality_cmd *lq;
-};
-
-struct iwl_station_priv_common {
- struct iwl_rxon_context *ctx;
- u8 sta_id;
-};
-
-/*
- * iwl_station_priv: Driver's private station information
- *
- * When mac80211 creates a station it reserves some space (hw->sta_data_size)
- * in the structure for use by driver. This structure is places in that
- * space.
- *
- * The common struct MUST be first because it is shared between
- * 3945 and 4965!
- */
-struct iwl_station_priv {
- struct iwl_station_priv_common common;
- struct iwl_lq_sta lq_sta;
- atomic_t pending_frames;
- bool client;
- bool asleep;
-};
-
-/**
- * struct iwl_vif_priv - driver's private per-interface information
- *
- * When mac80211 allocates a virtual interface, it can allocate
- * space for us to put data into.
- */
-struct iwl_vif_priv {
- struct iwl_rxon_context *ctx;
- u8 ibss_bssid_sta_id;
-};
-
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- void *v_addr; /* access by driver */
- dma_addr_t p_addr; /* access by card's busmaster DMA */
- u32 len; /* bytes */
-};
-
-/* uCode file layout */
-struct iwl_ucode_header {
- __le32 ver; /* major/minor/API/serial */
- struct {
- __le32 inst_size; /* bytes of runtime code */
- __le32 data_size; /* bytes of runtime data */
- __le32 init_size; /* bytes of init code */
- __le32 init_data_size; /* bytes of init data */
- __le32 boot_size; /* bytes of bootstrap code */
- u8 data[0]; /* in same order as sizes */
- } v1;
-};
-
-struct iwl4965_ibss_seq {
- u8 mac[ETH_ALEN];
- u16 seq_num;
- u16 frag_num;
- unsigned long packet_time;
- struct list_head list;
-};
-
-struct iwl_sensitivity_ranges {
- u16 min_nrg_cck;
- u16 max_nrg_cck;
-
- u16 nrg_th_cck;
- u16 nrg_th_ofdm;
-
- u16 auto_corr_min_ofdm;
- u16 auto_corr_min_ofdm_mrc;
- u16 auto_corr_min_ofdm_x1;
- u16 auto_corr_min_ofdm_mrc_x1;
-
- u16 auto_corr_max_ofdm;
- u16 auto_corr_max_ofdm_mrc;
- u16 auto_corr_max_ofdm_x1;
- u16 auto_corr_max_ofdm_mrc_x1;
-
- u16 auto_corr_max_cck;
- u16 auto_corr_max_cck_mrc;
- u16 auto_corr_min_cck;
- u16 auto_corr_min_cck_mrc;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-
-#define KELVIN_TO_CELSIUS(x) ((x)-273)
-#define CELSIUS_TO_KELVIN(x) ((x)+273)
-
-
-/**
- * struct iwl_hw_params
- * @max_txq_num: Max # Tx queues supported
- * @dma_chnl_num: Number of Tx DMA/FIFO channels
- * @scd_bc_tbls_size: size of scheduler byte count tables
- * @tfd_size: TFD size
- * @tx/rx_chains_num: Number of TX/RX chains
- * @valid_tx/rx_ant: usable antennas
- * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
- * @max_rxq_log: Log-base-2 of max_rxq_size
- * @rx_page_order: Rx buffer page order
- * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
- * @max_stations:
- * @ht40_channel: is 40MHz width possible in band 2.4
- * BIT(IEEE80211_BAND_5GHZ) BIT(IEEE80211_BAND_5GHZ)
- * @sw_crypto: 0 for hw, 1 for sw
- * @max_xxx_size: for ucode uses
- * @ct_kill_threshold: temperature threshold
- * @beacon_time_tsf_bits: number of valid tsf bits for beacon time
- * @struct iwl_sensitivity_ranges: range of sensitivity values
- */
-struct iwl_hw_params {
- u8 max_txq_num;
- u8 dma_chnl_num;
- u16 scd_bc_tbls_size;
- u32 tfd_size;
- u8 tx_chains_num;
- u8 rx_chains_num;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 max_rxq_size;
- u16 max_rxq_log;
- u32 rx_page_order;
- u32 rx_wrt_ptr_reg;
- u8 max_stations;
- u8 ht40_channel;
- u8 max_beacon_itrvl; /* in 1024 ms */
- u32 max_inst_size;
- u32 max_data_size;
- u32 max_bsm_size;
- u32 ct_kill_threshold; /* value in hw-dependent units */
- u16 beacon_time_tsf_bits;
- const struct iwl_sensitivity_ranges *sens;
-};
-
-
-/******************************************************************************
- *
- * Functions implemented in core module which are forward declared here
- * for use by iwl-[4-5].c
- *
- * NOTE: The implementation of these functions are not hardware specific
- * which is why they are in the core module files.
- *
- * Naming convention --
- * iwl_ <-- Is part of iwlwifi
- * iwlXXXX_ <-- Hardware specific (implemented in iwl-XXXX.c for XXXX)
- * iwl4965_bg_ <-- Called from work queue context
- * iwl4965_mac_ <-- mac80211 callback
- *
- ****************************************************************************/
-extern void iwl4965_update_chain_flags(struct iwl_priv *priv);
-extern const u8 iwlegacy_bcast_addr[ETH_ALEN];
-extern int iwl_legacy_queue_space(const struct iwl_queue *q);
-static inline int iwl_legacy_queue_used(const struct iwl_queue *q, int i)
-{
- return q->write_ptr >= q->read_ptr ?
- (i >= q->read_ptr && i < q->write_ptr) :
- !(i < q->read_ptr && i >= q->write_ptr);
-}
-
-
-static inline u8 iwl_legacy_get_cmd_index(struct iwl_queue *q, u32 index,
- int is_huge)
-{
- /*
- * This is for init calibration result and scan command which
- * required buffer > TFD_MAX_PAYLOAD_SIZE,
- * the big buffer at end of command array
- */
- if (is_huge)
- return q->n_window; /* must be power of 2 */
-
- /* Otherwise, use normal size buffers */
- return index & (q->n_window - 1);
-}
-
-
-struct iwl_dma_ptr {
- dma_addr_t dma;
- void *addr;
- size_t size;
-};
-
-#define IWL_OPERATION_MODE_AUTO 0
-#define IWL_OPERATION_MODE_HT_ONLY 1
-#define IWL_OPERATION_MODE_MIXED 2
-#define IWL_OPERATION_MODE_20MHZ 3
-
-#define IWL_TX_CRC_SIZE 4
-#define IWL_TX_DELIMITER_SIZE 4
-
-#define TX_POWER_IWL_ILLEGAL_VOLTAGE -10000
-
-/* Sensitivity and chain noise calibration */
-#define INITIALIZATION_VALUE 0xFFFF
-#define IWL4965_CAL_NUM_BEACONS 20
-#define IWL_CAL_NUM_BEACONS 16
-#define MAXIMUM_ALLOWED_PATHLOSS 15
-
-#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
-
-#define MAX_FA_OFDM 50
-#define MIN_FA_OFDM 5
-#define MAX_FA_CCK 50
-#define MIN_FA_CCK 5
-
-#define AUTO_CORR_STEP_OFDM 1
-
-#define AUTO_CORR_STEP_CCK 3
-#define AUTO_CORR_MAX_TH_CCK 160
-
-#define NRG_DIFF 2
-#define NRG_STEP_CCK 2
-#define NRG_MARGIN 8
-#define MAX_NUMBER_CCK_NO_FA 100
-
-#define AUTO_CORR_CCK_MIN_VAL_DEF (125)
-
-#define CHAIN_A 0
-#define CHAIN_B 1
-#define CHAIN_C 2
-#define CHAIN_NOISE_DELTA_GAIN_INIT_VAL 4
-#define ALL_BAND_FILTER 0xFF00
-#define IN_BAND_FILTER 0xFF
-#define MIN_AVERAGE_NOISE_MAX_VALUE 0xFFFFFFFF
-
-#define NRG_NUM_PREV_STAT_L 20
-#define NUM_RX_CHAINS 3
-
-enum iwl4965_false_alarm_state {
- IWL_FA_TOO_MANY = 0,
- IWL_FA_TOO_FEW = 1,
- IWL_FA_GOOD_RANGE = 2,
-};
-
-enum iwl4965_chain_noise_state {
- IWL_CHAIN_NOISE_ALIVE = 0, /* must be 0 */
- IWL_CHAIN_NOISE_ACCUMULATE,
- IWL_CHAIN_NOISE_CALIBRATED,
- IWL_CHAIN_NOISE_DONE,
-};
-
-enum iwl4965_calib_enabled_state {
- IWL_CALIB_DISABLED = 0, /* must be 0 */
- IWL_CALIB_ENABLED = 1,
-};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_MAX,
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
-enum ucode_type {
- UCODE_NONE = 0,
- UCODE_INIT,
- UCODE_RT
-};
-
-/* Sensitivity calib data */
-struct iwl_sensitivity_data {
- u32 auto_corr_ofdm;
- u32 auto_corr_ofdm_mrc;
- u32 auto_corr_ofdm_x1;
- u32 auto_corr_ofdm_mrc_x1;
- u32 auto_corr_cck;
- u32 auto_corr_cck_mrc;
-
- u32 last_bad_plcp_cnt_ofdm;
- u32 last_fa_cnt_ofdm;
- u32 last_bad_plcp_cnt_cck;
- u32 last_fa_cnt_cck;
-
- u32 nrg_curr_state;
- u32 nrg_prev_state;
- u32 nrg_value[10];
- u8 nrg_silence_rssi[NRG_NUM_PREV_STAT_L];
- u32 nrg_silence_ref;
- u32 nrg_energy_idx;
- u32 nrg_silence_idx;
- u32 nrg_th_cck;
- s32 nrg_auto_corr_silence_diff;
- u32 num_in_cck_no_fa;
- u32 nrg_th_ofdm;
-
- u16 barker_corr_th_min;
- u16 barker_corr_th_min_mrc;
- u16 nrg_th_cca;
-};
-
-/* Chain noise (differential Rx gain) calib data */
-struct iwl_chain_noise_data {
- u32 active_chains;
- u32 chain_noise_a;
- u32 chain_noise_b;
- u32 chain_noise_c;
- u32 chain_signal_a;
- u32 chain_signal_b;
- u32 chain_signal_c;
- u16 beacon_count;
- u8 disconn_array[NUM_RX_CHAINS];
- u8 delta_gain_code[NUM_RX_CHAINS];
- u8 radio_write;
- u8 state;
-};
-
-#define EEPROM_SEM_TIMEOUT 10 /* milliseconds */
-#define EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-#define IWL_TRAFFIC_ENTRIES (256)
-#define IWL_TRAFFIC_ENTRY_SIZE (64)
-
-enum {
- MEASUREMENT_READY = (1 << 0),
- MEASUREMENT_ACTIVE = (1 << 1),
-};
-
-/* interrupt statistics */
-struct isr_statistics {
- u32 hw;
- u32 sw;
- u32 err_code;
- u32 sch;
- u32 alive;
- u32 rfkill;
- u32 ctkill;
- u32 wakeup;
- u32 rx;
- u32 rx_handlers[REPLY_MAX];
- u32 tx;
- u32 unhandled;
-};
-
-/* management statistics */
-enum iwl_mgmt_stats {
- MANAGEMENT_ASSOC_REQ = 0,
- MANAGEMENT_ASSOC_RESP,
- MANAGEMENT_REASSOC_REQ,
- MANAGEMENT_REASSOC_RESP,
- MANAGEMENT_PROBE_REQ,
- MANAGEMENT_PROBE_RESP,
- MANAGEMENT_BEACON,
- MANAGEMENT_ATIM,
- MANAGEMENT_DISASSOC,
- MANAGEMENT_AUTH,
- MANAGEMENT_DEAUTH,
- MANAGEMENT_ACTION,
- MANAGEMENT_MAX,
-};
-/* control statistics */
-enum iwl_ctrl_stats {
- CONTROL_BACK_REQ = 0,
- CONTROL_BACK,
- CONTROL_PSPOLL,
- CONTROL_RTS,
- CONTROL_CTS,
- CONTROL_ACK,
- CONTROL_CFEND,
- CONTROL_CFENDACK,
- CONTROL_MAX,
-};
-
-struct traffic_stats {
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- u32 mgmt[MANAGEMENT_MAX];
- u32 ctrl[CONTROL_MAX];
- u32 data_cnt;
- u64 data_bytes;
-#endif
-};
-
-/*
- * host interrupt timeout value
- * used with setting interrupt coalescing timer
- * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
- *
- * default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
- */
-#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
-#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
-
-#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
-
-/* TX queue watchdog timeouts in mSecs */
-#define IWL_DEF_WD_TIMEOUT (2000)
-#define IWL_LONG_WD_TIMEOUT (10000)
-#define IWL_MAX_WD_TIMEOUT (120000)
-
-struct iwl_force_reset {
- int reset_request_count;
- int reset_success_count;
- int reset_reject_count;
- unsigned long reset_duration;
- unsigned long last_force_reset_jiffies;
-};
-
-/* extend beacon time format bit shifting */
-/*
- * for _3945 devices
- * bits 31:24 - extended
- * bits 23:0 - interval
- */
-#define IWL3945_EXT_BEACON_TIME_POS 24
-/*
- * for _4965 devices
- * bits 31:22 - extended
- * bits 21:0 - interval
- */
-#define IWL4965_EXT_BEACON_TIME_POS 22
-
-enum iwl_rxon_context_id {
- IWL_RXON_CTX_BSS,
-
- NUM_IWL_RXON_CTX
-};
-
-struct iwl_rxon_context {
- struct ieee80211_vif *vif;
-
- const u8 *ac_to_fifo;
- const u8 *ac_to_queue;
- u8 mcast_queue;
-
- /*
- * We could use the vif to indicate active, but we
- * also need it to be active during disabling when
- * we already removed the vif for type setting.
- */
- bool always_active, is_active;
-
- bool ht_need_multiple_chains;
-
- enum iwl_rxon_context_id ctxid;
-
- u32 interface_modes, exclusive_interface_modes;
- u8 unused_devtype, ap_devtype, ibss_devtype, station_devtype;
-
- /*
- * We declare this const so it can only be
- * changed via explicit cast within the
- * routines that actually update the physical
- * hardware.
- */
- const struct iwl_legacy_rxon_cmd active;
- struct iwl_legacy_rxon_cmd staging;
-
- struct iwl_rxon_time_cmd timing;
-
- struct iwl_qos_info qos_data;
-
- u8 bcast_sta_id, ap_sta_id;
-
- u8 rxon_cmd, rxon_assoc_cmd, rxon_timing_cmd;
- u8 qos_cmd;
- u8 wep_key_cmd;
-
- struct iwl_wep_key wep_keys[WEP_KEYS_MAX];
- u8 key_mapping_keys;
-
- __le32 station_flags;
-
- struct {
- bool non_gf_sta_present;
- u8 protection;
- bool enabled, is_40mhz;
- u8 extension_chan_offset;
- } ht;
-};
-
-struct iwl_priv {
-
- /* ieee device used by generic ieee processing code */
- struct ieee80211_hw *hw;
- struct ieee80211_channel *ieee_channels;
- struct ieee80211_rate *ieee_rates;
- struct iwl_cfg *cfg;
-
- /* temporary frame storage list */
- struct list_head free_frames;
- int frames_count;
-
- enum ieee80211_band band;
- int alloc_rxb_page;
-
- void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb);
-
- struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-
- /* spectrum measurement report caching */
- struct iwl_spectrum_notification measure_report;
- u8 measurement_status;
-
- /* ucode beacon time */
- u32 ucode_beacon_time;
- int missed_beacon_threshold;
-
- /* track IBSS manager (last beacon) status */
- u32 ibss_manager;
-
- /* force reset */
- struct iwl_force_reset force_reset;
-
- /* we allocate array of iwl_channel_info for NIC's valid channels.
- * Access via channel # using indirect index array */
- struct iwl_channel_info *channel_info; /* channel info array */
- u8 channel_count; /* # of channels */
-
- /* thermal calibration */
- s32 temperature; /* degrees Kelvin */
- s32 last_temperature;
-
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
-
- /* Scan related variables */
- unsigned long scan_start;
- unsigned long scan_start_tsf;
- void *scan_cmd;
- enum ieee80211_band scan_band;
- struct cfg80211_scan_request *scan_request;
- struct ieee80211_vif *scan_vif;
- u8 scan_tx_ant[IEEE80211_NUM_BANDS];
- u8 mgmt_tx_ant;
-
- /* spinlock */
- spinlock_t lock; /* protect general shared data */
- spinlock_t hcmd_lock; /* protect hcmd */
- spinlock_t reg_lock; /* protect hw register access */
- struct mutex mutex;
-
- /* basic pci-network driver stuff */
- struct pci_dev *pci_dev;
-
- /* pci hardware address support */
- void __iomem *hw_base;
- u32 hw_rev;
- u32 hw_wa_rev;
- u8 rev_id;
-
- /* microcode/device supports multiple contexts */
- u8 valid_contexts;
-
- /* command queue number */
- u8 cmd_queue;
-
- /* max number of station keys */
- u8 sta_key_max_num;
-
- /* EEPROM MAC addresses */
- struct mac_address addresses[1];
-
- /* uCode images, save to reload in case of failure */
- int fw_index; /* firmware we're trying to load */
- u32 ucode_ver; /* version of ucode, copy of
- iwl_ucode.ver */
- struct fw_desc ucode_code; /* runtime inst */
- struct fw_desc ucode_data; /* runtime data original */
- struct fw_desc ucode_data_backup; /* runtime data save/restore */
- struct fw_desc ucode_init; /* initialization inst */
- struct fw_desc ucode_init_data; /* initialization data */
- struct fw_desc ucode_boot; /* bootstrap inst */
- enum ucode_type ucode_type;
- u8 ucode_write_complete; /* the image write is complete */
- char firmware_name[25];
-
- struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
-
- __le16 switch_channel;
-
- /* 1st responses from initialize and runtime uCode images.
- * _4965's initialize alive response contains some calibration data. */
- struct iwl_init_alive_resp card_alive_init;
- struct iwl_alive_resp card_alive;
-
- u16 active_rate;
-
- u8 start_calib;
- struct iwl_sensitivity_data sensitivity_data;
- struct iwl_chain_noise_data chain_noise_data;
- __le16 sensitivity_tbl[HD_TABLE_SIZE];
-
- struct iwl_ht_config current_ht_config;
-
- /* Rate scaling data */
- u8 retry_rate;
-
- wait_queue_head_t wait_command_queue;
-
- int activity_timer_active;
-
- /* Rx and Tx DMA processing queues */
- struct iwl_rx_queue rxq;
- struct iwl_tx_queue *txq;
- unsigned long txq_ctx_active_msk;
- struct iwl_dma_ptr kw; /* keep warm address */
- struct iwl_dma_ptr scd_bc_tbls;
-
- u32 scd_base_addr; /* scheduler sram base address */
-
- unsigned long status;
-
- /* counts mgmt, ctl, and data packets */
- struct traffic_stats tx_stats;
- struct traffic_stats rx_stats;
-
- /* counts interrupts */
- struct isr_statistics isr_stats;
-
- struct iwl_power_mgr power_data;
-
- /* context information */
- u8 bssid[ETH_ALEN]; /* used only on 3945 but filled by core */
-
- /* station table variables */
-
- /* Note: if lock and sta_lock are needed, lock must be acquired first */
- spinlock_t sta_lock;
- int num_stations;
- struct iwl_station_entry stations[IWL_STATION_COUNT];
- unsigned long ucode_key_table;
-
- /* queue refcounts */
-#define IWL_MAX_HW_QUEUES 32
- unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)];
- /* for each AC */
- atomic_t queue_stop_count[4];
-
- /* Indication if ieee80211_ops->open has been called */
- u8 is_open;
-
- u8 mac80211_registered;
-
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- struct iwl_eeprom_calib_info *calib_info;
-
- enum nl80211_iftype iw_mode;
-
- /* Last Rx'd beacon timestamp */
- u64 timestamp;
-
- union {
-#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
- struct {
- void *shared_virt;
- dma_addr_t shared_phys;
-
- struct delayed_work thermal_periodic;
- struct delayed_work rfkill_poll;
-
- struct iwl3945_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl3945_notif_statistics accum_statistics;
- struct iwl3945_notif_statistics delta_statistics;
- struct iwl3945_notif_statistics max_delta;
-#endif
-
- u32 sta_supp_rates;
- int last_rx_rssi; /* From Rx packet statistics */
-
- /* Rx'd packet timing information */
- u32 last_beacon_time;
- u64 last_tsf;
-
- /*
- * each calibration channel group in the
- * EEPROM has a derived clip setting for
- * each rate.
- */
- const struct iwl3945_clip_group clip_groups[5];
-
- } _3945;
-#endif
-#if defined(CONFIG_IWL4965) || defined(CONFIG_IWL4965_MODULE)
- struct {
- struct iwl_rx_phy_res last_phy_res;
- bool last_phy_res_valid;
-
- struct completion firmware_loading_complete;
-
- /*
- * chain noise reset and gain commands are the
- * two extra calibration commands follows the standard
- * phy calibration commands
- */
- u8 phy_calib_chain_noise_reset_cmd;
- u8 phy_calib_chain_noise_gain_cmd;
-
- struct iwl_notif_statistics statistics;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- struct iwl_notif_statistics accum_statistics;
- struct iwl_notif_statistics delta_statistics;
- struct iwl_notif_statistics max_delta;
-#endif
-
- } _4965;
-#endif
- };
-
- struct iwl_hw_params hw_params;
-
- u32 inta_mask;
-
- struct workqueue_struct *workqueue;
-
- struct work_struct restart;
- struct work_struct scan_completed;
- struct work_struct rx_replenish;
- struct work_struct abort_scan;
-
- struct iwl_rxon_context *beacon_ctx;
- struct sk_buff *beacon_skb;
-
- struct work_struct tx_flush;
-
- struct tasklet_struct irq_tasklet;
-
- struct delayed_work init_alive_start;
- struct delayed_work alive_start;
- struct delayed_work scan_check;
-
- /* TX Power */
- s8 tx_power_user_lmt;
- s8 tx_power_device_lmt;
- s8 tx_power_next;
-
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- /* debugging info */
- u32 debug_level; /* per device debugging will override global
- iwlegacy_debug_level if set */
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUGFS
- /* debugfs */
- u16 tx_traffic_idx;
- u16 rx_traffic_idx;
- u8 *tx_traffic;
- u8 *rx_traffic;
- struct dentry *debugfs_dir;
- u32 dbgfs_sram_offset, dbgfs_sram_len;
- bool disable_ht40;
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUGFS */
-
- struct work_struct txpower_work;
- u32 disable_sens_cal;
- u32 disable_chain_noise_cal;
- u32 disable_tx_power_cal;
- struct work_struct run_time_calib_work;
- struct timer_list statistics_periodic;
- struct timer_list watchdog;
- bool hw_ready;
-
- struct led_classdev led;
- unsigned long blink_on, blink_off;
- bool led_registered;
-}; /*iwl_priv */
-
-static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
-{
- set_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-static inline void iwl_txq_ctx_deactivate(struct iwl_priv *priv, int txq_id)
-{
- clear_bit(txq_id, &priv->txq_ctx_active_msk);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-/*
- * iwl_legacy_get_debug_level: Return active debug level for device
- *
- * Using sysfs it is possible to set per device debug level. This debug
- * level will be used if set, otherwise the global debug level which can be
- * set via module parameter is used.
- */
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- if (priv->debug_level)
- return priv->debug_level;
- else
- return iwlegacy_debug_level;
-}
-#else
-static inline u32 iwl_legacy_get_debug_level(struct iwl_priv *priv)
-{
- return iwlegacy_debug_level;
-}
-#endif
-
-
-static inline struct ieee80211_hdr *
-iwl_legacy_tx_queue_get_hdr(struct iwl_priv *priv,
- int txq_id, int idx)
-{
- if (priv->txq[txq_id].txb[idx].skb)
- return (struct ieee80211_hdr *)priv->txq[txq_id].
- txb[idx].skb->data;
- return NULL;
-}
-
-static inline struct iwl_rxon_context *
-iwl_legacy_rxon_ctx_from_vif(struct ieee80211_vif *vif)
-{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- return vif_priv->ctx;
-}
-
-#define for_each_context(priv, ctx) \
- for (ctx = &priv->contexts[IWL_RXON_CTX_BSS]; \
- ctx < &priv->contexts[NUM_IWL_RXON_CTX]; ctx++) \
- if (priv->valid_contexts & BIT(ctx->ctxid))
-
-static inline int iwl_legacy_is_associated(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctxid)
-{
- return (priv->contexts[ctxid].active.filter_flags &
- RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_any_associated(struct iwl_priv *priv)
-{
- return iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-}
-
-static inline int iwl_legacy_is_associated_ctx(struct iwl_rxon_context *ctx)
-{
- return (ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_valid(const struct iwl_channel_info *ch_info)
-{
- if (ch_info == NULL)
- return 0;
- return (ch_info->flags & EEPROM_CHANNEL_VALID) ? 1 : 0;
-}
-
-static inline int iwl_legacy_is_channel_radar(const struct iwl_channel_info *ch_info)
-{
- return (ch_info->flags & EEPROM_CHANNEL_RADAR) ? 1 : 0;
-}
-
-static inline u8 iwl_legacy_is_channel_a_band(const struct iwl_channel_info *ch_info)
-{
- return ch_info->band == IEEE80211_BAND_5GHZ;
-}
-
-static inline int
-iwl_legacy_is_channel_passive(const struct iwl_channel_info *ch)
-{
- return (!(ch->flags & EEPROM_CHANNEL_ACTIVE)) ? 1 : 0;
-}
-
-static inline int
-iwl_legacy_is_channel_ibss(const struct iwl_channel_info *ch)
-{
- return (ch->flags & EEPROM_CHANNEL_IBSS) ? 1 : 0;
-}
-
-static inline void
-__iwl_legacy_free_pages(struct iwl_priv *priv, struct page *page)
-{
- __free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-
-static inline void iwl_legacy_free_pages(struct iwl_priv *priv, unsigned long page)
-{
- free_pages(page, priv->hw_params.rx_page_order);
- priv->alloc_rxb_page--;
-}
-#endif /* __iwl_legacy_dev_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.c b/drivers/net/wireless/iwlegacy/iwl-devtrace.c
deleted file mode 100644
index acec991..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.c
+++ /dev/null
@@ -1,42 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/module.h>
-
-/* sparse doesn't like tracepoint macros */
-#ifndef __CHECKER__
-#include "iwl-dev.h"
-
-#define CREATE_TRACE_POINTS
-#include "iwl-devtrace.h"
-
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite8);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ioread32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_iowrite32);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_rx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_tx);
-EXPORT_TRACEPOINT_SYMBOL(iwlwifi_legacy_dev_ucode_error);
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-devtrace.h b/drivers/net/wireless/iwlegacy/iwl-devtrace.h
deleted file mode 100644
index a443725..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-devtrace.h
+++ /dev/null
@@ -1,210 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2009 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#if !defined(__IWLWIFI_LEGACY_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
-#define __IWLWIFI_LEGACY_DEVICE_TRACE
-
-#include <linux/tracepoint.h>
-
-#if !defined(CONFIG_IWLWIFI_LEGACY_DEVICE_TRACING) || defined(__CHECKER__)
-#undef TRACE_EVENT
-#define TRACE_EVENT(name, proto, ...) \
-static inline void trace_ ## name(proto) {}
-#endif
-
-
-#define PRIV_ENTRY __field(struct iwl_priv *, priv)
-#define PRIV_ASSIGN (__entry->priv = priv)
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_io
-
-TRACE_EVENT(iwlwifi_legacy_dev_ioread32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] read io[%#x] = %#x", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite8,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u8, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_iowrite32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
- TP_ARGS(priv, offs, val),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, offs)
- __field(u32, val)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->offs = offs;
- __entry->val = val;
- ),
- TP_printk("[%p] write io[%#x] = %#x)", __entry->priv,
- __entry->offs, __entry->val)
-);
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi_legacy_ucode
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM iwlwifi
-
-TRACE_EVENT(iwlwifi_legacy_dev_hcmd,
- TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
- TP_ARGS(priv, hcmd, len, flags),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, hcmd, len)
- __field(u32, flags)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(hcmd), hcmd, len);
- __entry->flags = flags;
- ),
- TP_printk("[%p] hcmd %#.2x (%ssync)",
- __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
- __entry->flags & CMD_ASYNC ? "a" : "")
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_rx,
- TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
- TP_ARGS(priv, rxbuf, len),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __dynamic_array(u8, rxbuf, len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
- ),
- TP_printk("[%p] RX cmd %#.2x",
- __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_tx,
- TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
- void *buf0, size_t buf0_len,
- void *buf1, size_t buf1_len),
- TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
- TP_STRUCT__entry(
- PRIV_ENTRY
-
- __field(size_t, framelen)
- __dynamic_array(u8, tfd, tfdlen)
-
- /*
- * Do not insert between or below these items,
- * we want to keep the frame together (except
- * for the possible padding).
- */
- __dynamic_array(u8, buf0, buf0_len)
- __dynamic_array(u8, buf1, buf1_len)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->framelen = buf0_len + buf1_len;
- memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
- memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
- memcpy(__get_dynamic_array(buf1), buf1, buf1_len);
- ),
- TP_printk("[%p] TX %.2x (%zu bytes)",
- __entry->priv,
- ((u8 *)__get_dynamic_array(buf0))[0],
- __entry->framelen)
-);
-
-TRACE_EVENT(iwlwifi_legacy_dev_ucode_error,
- TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
- u32 data1, u32 data2, u32 line, u32 blink1,
- u32 blink2, u32 ilink1, u32 ilink2),
- TP_ARGS(priv, desc, time, data1, data2, line,
- blink1, blink2, ilink1, ilink2),
- TP_STRUCT__entry(
- PRIV_ENTRY
- __field(u32, desc)
- __field(u32, time)
- __field(u32, data1)
- __field(u32, data2)
- __field(u32, line)
- __field(u32, blink1)
- __field(u32, blink2)
- __field(u32, ilink1)
- __field(u32, ilink2)
- ),
- TP_fast_assign(
- PRIV_ASSIGN;
- __entry->desc = desc;
- __entry->time = time;
- __entry->data1 = data1;
- __entry->data2 = data2;
- __entry->line = line;
- __entry->blink1 = blink1;
- __entry->blink2 = blink2;
- __entry->ilink1 = ilink1;
- __entry->ilink2 = ilink2;
- ),
- TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
- "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
- __entry->priv, __entry->desc, __entry->time, __entry->data1,
- __entry->data2, __entry->line, __entry->blink1,
- __entry->blink2, __entry->ilink1, __entry->ilink2)
-);
-
-#endif /* __IWLWIFI_DEVICE_TRACE */
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH .
-#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE iwl-devtrace
-#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.c b/drivers/net/wireless/iwlegacy/iwl-eeprom.c
deleted file mode 100644
index 5bf3f49..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-commands.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-io.h"
-
-/************************** EEPROM BANDS ****************************
- *
- * The iwlegacy_eeprom_band definitions below provide the mapping from the
- * EEPROM contents to the specific channel number supported for each
- * band.
- *
- * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
- * definition below maps to physical channel 42 in the 5.2GHz spectrum.
- * The specific geography and calibration information for that channel
- * is contained in the eeprom map itself.
- *
- * During init, we copy the eeprom information and channel map
- * information into priv->channel_info_24/52 and priv->channel_map_24/52
- *
- * channel_map_24/52 provides the index in the channel_info array for a
- * given channel. We have to have two separate maps as there is channel
- * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
- * band_2
- *
- * A value of 0xff stored in the channel_map indicates that the channel
- * is not supported by the hardware at all.
- *
- * A value of 0xfe in the channel_map indicates that the channel is not
- * valid for Tx with the current hardware. This means that
- * while the system can tune and receive on a given channel, it may not
- * be able to associate or transmit any frames on that
- * channel. There is no corresponding channel information for that
- * entry.
- *
- *********************************************************************/
-
-/* 2.4 GHz */
-const u8 iwlegacy_eeprom_band_1[14] = {
- 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
-};
-
-/* 5.2 GHz bands */
-static const u8 iwlegacy_eeprom_band_2[] = { /* 4915-5080MHz */
- 183, 184, 185, 187, 188, 189, 192, 196, 7, 8, 11, 12, 16
-};
-
-static const u8 iwlegacy_eeprom_band_3[] = { /* 5170-5320MHz */
- 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
-};
-
-static const u8 iwlegacy_eeprom_band_4[] = { /* 5500-5700MHz */
- 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
-};
-
-static const u8 iwlegacy_eeprom_band_5[] = { /* 5725-5825MHz */
- 145, 149, 153, 157, 161, 165
-};
-
-static const u8 iwlegacy_eeprom_band_6[] = { /* 2.4 ht40 channel */
- 1, 2, 3, 4, 5, 6, 7
-};
-
-static const u8 iwlegacy_eeprom_band_7[] = { /* 5.2 ht40 channel */
- 36, 44, 52, 60, 100, 108, 116, 124, 132, 149, 157
-};
-
-/******************************************************************************
- *
- * EEPROM related functions
- *
-******************************************************************************/
-
-static int iwl_legacy_eeprom_verify_signature(struct iwl_priv *priv)
-{
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
- int ret = 0;
-
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
- switch (gp) {
- case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
- case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- break;
- default:
- IWL_ERR(priv, "bad EEPROM signature,"
- "EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- break;
- }
- return ret;
-}
-
-const u8
-*iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
-{
- BUG_ON(offset >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[offset];
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query_addr);
-
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset)
-{
- if (!priv->eeprom)
- return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_query16);
-
-/**
- * iwl_legacy_eeprom_init - read EEPROM contents
- *
- * Load the EEPROM contents from adapter into priv->eeprom
- *
- * NOTE: This routine uses the non-debug IO access functions.
- */
-int iwl_legacy_eeprom_init(struct iwl_priv *priv)
-{
- __le16 *e;
- u32 gp = iwl_read32(priv, CSR_EEPROM_GP);
- int sz;
- int ret;
- u16 addr;
-
- /* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
- IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
- ret = -ENOMEM;
- goto alloc_err;
- }
- e = (__le16 *)priv->eeprom;
-
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- ret = iwl_legacy_eeprom_verify_signature(priv);
- if (ret < 0) {
- IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
- ret = -ENOENT;
- goto err;
- }
-
- /* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = priv->cfg->ops->lib->eeprom_ops.acquire_semaphore(priv);
- if (ret < 0) {
- IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
- ret = -ENOENT;
- goto err;
- }
-
- /* eeprom is an array of 16bit values */
- for (addr = 0; addr < sz; addr += sizeof(u16)) {
- u32 r;
-
- _iwl_legacy_write32(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
-
- ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
- CSR_EEPROM_REG_READ_VALID_MSK,
- CSR_EEPROM_REG_READ_VALID_MSK,
- IWL_EEPROM_ACCESS_TIMEOUT);
- if (ret < 0) {
- IWL_ERR(priv, "Time out reading EEPROM[%d]\n",
- addr);
- goto done;
- }
- r = _iwl_legacy_read_direct32(priv, CSR_EEPROM_REG);
- e[addr / 2] = cpu_to_le16(r >> 16);
- }
-
- IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- "EEPROM",
- iwl_legacy_eeprom_query16(priv, EEPROM_VERSION));
-
- ret = 0;
-done:
- priv->cfg->ops->lib->eeprom_ops.release_semaphore(priv);
-
-err:
- if (ret)
- iwl_legacy_eeprom_free(priv);
- /* Reset chip to save power until we load uCode during "up". */
- iwl_legacy_apm_stop(priv);
-alloc_err:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_init);
-
-void iwl_legacy_eeprom_free(struct iwl_priv *priv)
-{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_eeprom_free);
-
-static void iwl_legacy_init_band_reference(const struct iwl_priv *priv,
- int eep_band, int *eeprom_ch_count,
- const struct iwl_eeprom_channel **eeprom_ch_info,
- const u8 **eeprom_ch_index)
-{
- u32 offset = priv->cfg->ops->lib->
- eeprom_ops.regulatory_bands[eep_band - 1];
- switch (eep_band) {
- case 1: /* 2.4GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_1);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_1;
- break;
- case 2: /* 4.9GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_2);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_2;
- break;
- case 3: /* 5.2GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_3);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_3;
- break;
- case 4: /* 5.5GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_4);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_4;
- break;
- case 5: /* 5.7GHz band */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_5);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_5;
- break;
- case 6: /* 2.4GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_6);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_6;
- break;
- case 7: /* 5 GHz ht40 channels */
- *eeprom_ch_count = ARRAY_SIZE(iwlegacy_eeprom_band_7);
- *eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_legacy_eeprom_query_addr(priv, offset);
- *eeprom_ch_index = iwlegacy_eeprom_band_7;
- break;
- default:
- BUG();
- }
-}
-
-#define CHECK_AND_PRINT(x) ((eeprom_ch->flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-/**
- * iwl_legacy_mod_ht40_chan_info - Copy ht40 channel info into driver's priv.
- *
- * Does not set up a command, or touch hardware.
- */
-static int iwl_legacy_mod_ht40_chan_info(struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel,
- const struct iwl_eeprom_channel *eeprom_ch,
- u8 clear_ht40_extension_channel)
-{
- struct iwl_channel_info *ch_info;
-
- ch_info = (struct iwl_channel_info *)
- iwl_legacy_get_channel_info(priv, band, channel);
-
- if (!iwl_legacy_is_channel_valid(ch_info))
- return -1;
-
- IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT(IBSS),
- CHECK_AND_PRINT(ACTIVE),
- CHECK_AND_PRINT(RADAR),
- CHECK_AND_PRINT(WIDE),
- CHECK_AND_PRINT(DFS),
- eeprom_ch->flags,
- eeprom_ch->max_power_avg,
- ((eeprom_ch->flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch->flags & EEPROM_CHANNEL_RADAR)) ?
- "" : "not ");
-
- ch_info->ht40_eeprom = *eeprom_ch;
- ch_info->ht40_max_power_avg = eeprom_ch->max_power_avg;
- ch_info->ht40_flags = eeprom_ch->flags;
- if (eeprom_ch->flags & EEPROM_CHANNEL_VALID)
- ch_info->ht40_extension_channel &=
- ~clear_ht40_extension_channel;
-
- return 0;
-}
-
-#define CHECK_AND_PRINT_I(x) ((eeprom_ch_info[ch].flags & EEPROM_CHANNEL_##x) \
- ? # x " " : "")
-
-/**
- * iwl_legacy_init_channel_map - Set up driver's info for all possible channels
- */
-int iwl_legacy_init_channel_map(struct iwl_priv *priv)
-{
- int eeprom_ch_count = 0;
- const u8 *eeprom_ch_index = NULL;
- const struct iwl_eeprom_channel *eeprom_ch_info = NULL;
- int band, ch;
- struct iwl_channel_info *ch_info;
-
- if (priv->channel_count) {
- IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
- return 0;
- }
-
- IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
-
- priv->channel_count =
- ARRAY_SIZE(iwlegacy_eeprom_band_1) +
- ARRAY_SIZE(iwlegacy_eeprom_band_2) +
- ARRAY_SIZE(iwlegacy_eeprom_band_3) +
- ARRAY_SIZE(iwlegacy_eeprom_band_4) +
- ARRAY_SIZE(iwlegacy_eeprom_band_5);
-
- IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
- priv->channel_count);
-
- priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
- priv->channel_count, GFP_KERNEL);
- if (!priv->channel_info) {
- IWL_ERR(priv, "Could not allocate channel_info\n");
- priv->channel_count = 0;
- return -ENOMEM;
- }
-
- ch_info = priv->channel_info;
-
- /* Loop through the 5 EEPROM bands adding them in order to the
- * channel map we maintain (that contains additional information than
- * what just in the EEPROM) */
- for (band = 1; band <= 5; band++) {
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- ch_info->channel = eeprom_ch_index[ch];
- ch_info->band = (band == 1) ? IEEE80211_BAND_2GHZ :
- IEEE80211_BAND_5GHZ;
-
- /* permanently store EEPROM's channel regulatory flags
- * and max power in channel info database. */
- ch_info->eeprom = eeprom_ch_info[ch];
-
- /* Copy the run-time flags so they are there even on
- * invalid channels */
- ch_info->flags = eeprom_ch_info[ch].flags;
- /* First write that ht40 is not enabled, and then enable
- * one by one */
- ch_info->ht40_extension_channel =
- IEEE80211_CHAN_NO_HT40;
-
- if (!(iwl_legacy_is_channel_valid(ch_info))) {
- IWL_DEBUG_EEPROM(priv,
- "Ch. %d Flags %x [%sGHz] - "
- "No traffic\n",
- ch_info->channel,
- ch_info->flags,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4");
- ch_info++;
- continue;
- }
-
- /* Initialize regulatory-based run-time data */
- ch_info->max_power_avg = ch_info->curr_txpow =
- eeprom_ch_info[ch].max_power_avg;
- ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
- ch_info->min_power = 0;
-
- IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
- "%s%s%s%s%s%s(0x%02x %ddBm):"
- " Ad-Hoc %ssupported\n",
- ch_info->channel,
- iwl_legacy_is_channel_a_band(ch_info) ?
- "5.2" : "2.4",
- CHECK_AND_PRINT_I(VALID),
- CHECK_AND_PRINT_I(IBSS),
- CHECK_AND_PRINT_I(ACTIVE),
- CHECK_AND_PRINT_I(RADAR),
- CHECK_AND_PRINT_I(WIDE),
- CHECK_AND_PRINT_I(DFS),
- eeprom_ch_info[ch].flags,
- eeprom_ch_info[ch].max_power_avg,
- ((eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_IBSS)
- && !(eeprom_ch_info[ch].
- flags & EEPROM_CHANNEL_RADAR))
- ? "" : "not ");
-
- ch_info++;
- }
- }
-
- /* Check if we do have HT40 channels */
- if (priv->cfg->ops->lib->eeprom_ops.regulatory_bands[5] ==
- EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->ops->lib->eeprom_ops.regulatory_bands[6] ==
- EEPROM_REGULATORY_BAND_NO_HT40)
- return 0;
-
- /* Two additional EEPROM bands for 2.4 and 5 GHz HT40 channels */
- for (band = 6; band <= 7; band++) {
- enum ieee80211_band ieeeband;
-
- iwl_legacy_init_band_reference(priv, band, &eeprom_ch_count,
- &eeprom_ch_info, &eeprom_ch_index);
-
- /* EEPROM band 6 is 2.4, band 7 is 5 GHz */
- ieeeband =
- (band == 6) ? IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
-
- /* Loop through each band adding each of the channels */
- for (ch = 0; ch < eeprom_ch_count; ch++) {
- /* Set up driver's info for lower half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch],
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40PLUS);
-
- /* Set up driver's info for upper half */
- iwl_legacy_mod_ht40_chan_info(priv, ieeeband,
- eeprom_ch_index[ch] + 4,
- &eeprom_ch_info[ch],
- IEEE80211_CHAN_NO_HT40MINUS);
- }
- }
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_init_channel_map);
-
-/*
- * iwl_legacy_free_channel_map - undo allocations in iwl_legacy_init_channel_map
- */
-void iwl_legacy_free_channel_map(struct iwl_priv *priv)
-{
- kfree(priv->channel_info);
- priv->channel_count = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_free_channel_map);
-
-/**
- * iwl_legacy_get_channel_info - Find driver's private channel info
- *
- * Based on band and channel number.
- */
-const struct
-iwl_channel_info *iwl_legacy_get_channel_info(const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel)
-{
- int i;
-
- switch (band) {
- case IEEE80211_BAND_5GHZ:
- for (i = 14; i < priv->channel_count; i++) {
- if (priv->channel_info[i].channel == channel)
- return &priv->channel_info[i];
- }
- break;
- case IEEE80211_BAND_2GHZ:
- if (channel >= 1 && channel <= 14)
- return &priv->channel_info[channel - 1];
- break;
- default:
- BUG();
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(iwl_legacy_get_channel_info);
diff --git a/drivers/net/wireless/iwlegacy/iwl-eeprom.h b/drivers/net/wireless/iwlegacy/iwl-eeprom.h
deleted file mode 100644
index c59c810..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-eeprom.h
+++ /dev/null
@@ -1,344 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *****************************************************************************/
-
-#ifndef __iwl_legacy_eeprom_h__
-#define __iwl_legacy_eeprom_h__
-
-#include <net/mac80211.h>
-
-struct iwl_priv;
-
-/*
- * EEPROM access time values:
- *
- * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG.
- * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1).
- * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec.
- * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG.
- */
-#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */
-
-#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */
-#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */
-
-
-/*
- * Regulatory channel usage flags in EEPROM struct iwl4965_eeprom_channel.flags.
- *
- * IBSS and/or AP operation is allowed *only* on those channels with
- * (VALID && IBSS && ACTIVE && !RADAR). This restriction is in place because
- * RADAR detection is not supported by the 4965 driver, but is a
- * requirement for establishing a new network for legal operation on channels
- * requiring RADAR detection or restricting ACTIVE scanning.
- *
- * NOTE: "WIDE" flag does not indicate anything about "HT40" 40 MHz channels.
- * It only indicates that 20 MHz channel use is supported; HT40 channel
- * usage is indicated by a separate set of regulatory flags for each
- * HT40 channel pair.
- *
- * NOTE: Using a channel inappropriately will result in a uCode error!
- */
-#define IWL_NUM_TX_CALIB_GROUPS 5
-enum {
- EEPROM_CHANNEL_VALID = (1 << 0), /* usable for this SKU/geo */
- EEPROM_CHANNEL_IBSS = (1 << 1), /* usable as an IBSS channel */
- /* Bit 2 Reserved */
- EEPROM_CHANNEL_ACTIVE = (1 << 3), /* active scanning allowed */
- EEPROM_CHANNEL_RADAR = (1 << 4), /* radar detection required */
- EEPROM_CHANNEL_WIDE = (1 << 5), /* 20 MHz channel okay */
- /* Bit 6 Reserved (was Narrow Channel) */
- EEPROM_CHANNEL_DFS = (1 << 7), /* dynamic freq selection candidate */
-};
-
-/* SKU Capabilities */
-/* 3945 only */
-#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
-#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
-
-/* *regulatory* channel data format in eeprom, one for each channel.
- * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
-struct iwl_eeprom_channel {
- u8 flags; /* EEPROM_CHANNEL_* flags copied from EEPROM */
- s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
-} __packed;
-
-/* 3945 Specific */
-#define EEPROM_3945_EEPROM_VERSION (0x2f)
-
-/* 4965 has two radio transmitters (and 3 radio receivers) */
-#define EEPROM_TX_POWER_TX_CHAINS (2)
-
-/* 4965 has room for up to 8 sets of txpower calibration data */
-#define EEPROM_TX_POWER_BANDS (8)
-
-/* 4965 factory calibration measures txpower gain settings for
- * each of 3 target output levels */
-#define EEPROM_TX_POWER_MEASUREMENTS (3)
-
-/* 4965 Specific */
-/* 4965 driver does not work with txpower calibration version < 5 */
-#define EEPROM_4965_TX_POWER_VERSION (5)
-#define EEPROM_4965_EEPROM_VERSION (0x2f)
-#define EEPROM_4965_CALIB_VERSION_OFFSET (2*0xB6) /* 2 bytes */
-#define EEPROM_4965_CALIB_TXPOWER_OFFSET (2*0xE8) /* 48 bytes */
-#define EEPROM_4965_BOARD_REVISION (2*0x4F) /* 2 bytes */
-#define EEPROM_4965_BOARD_PBA (2*0x56+1) /* 9 bytes */
-
-/* 2.4 GHz */
-extern const u8 iwlegacy_eeprom_band_1[14];
-
-/*
- * factory calibration data for one txpower level, on one channel,
- * measured on one of the 2 tx chains (radio transmitter and associated
- * antenna). EEPROM contains:
- *
- * 1) Temperature (degrees Celsius) of device when measurement was made.
- *
- * 2) Gain table index used to achieve the target measurement power.
- * This refers to the "well-known" gain tables (see iwl-4965-hw.h).
- *
- * 3) Actual measured output power, in half-dBm ("34" = 17 dBm).
- *
- * 4) RF power amplifier detector level measurement (not used).
- */
-struct iwl_eeprom_calib_measure {
- u8 temperature; /* Device temperature (Celsius) */
- u8 gain_idx; /* Index into gain table */
- u8 actual_pow; /* Measured RF output power, half-dBm */
- s8 pa_det; /* Power amp detector level (not used) */
-} __packed;
-
-
-/*
- * measurement set for one channel. EEPROM contains:
- *
- * 1) Channel number measured
- *
- * 2) Measurements for each of 3 power levels for each of 2 radio transmitters
- * (a.k.a. "tx chains") (6 measurements altogether)
- */
-struct iwl_eeprom_calib_ch_info {
- u8 ch_num;
- struct iwl_eeprom_calib_measure
- measurements[EEPROM_TX_POWER_TX_CHAINS]
- [EEPROM_TX_POWER_MEASUREMENTS];
-} __packed;
-
-/*
- * txpower subband info.
- *
- * For each frequency subband, EEPROM contains the following:
- *
- * 1) First and last channels within range of the subband. "0" values
- * indicate that this sample set is not being used.
- *
- * 2) Sample measurement sets for 2 channels close to the range endpoints.
- */
-struct iwl_eeprom_calib_subband_info {
- u8 ch_from; /* channel number of lowest channel in subband */
- u8 ch_to; /* channel number of highest channel in subband */
- struct iwl_eeprom_calib_ch_info ch1;
- struct iwl_eeprom_calib_ch_info ch2;
-} __packed;
-
-
-/*
- * txpower calibration info. EEPROM contains:
- *
- * 1) Factory-measured saturation power levels (maximum levels at which
- * tx power amplifier can output a signal without too much distortion).
- * There is one level for 2.4 GHz band and one for 5 GHz band. These
- * values apply to all channels within each of the bands.
- *
- * 2) Factory-measured power supply voltage level. This is assumed to be
- * constant (i.e. same value applies to all channels/bands) while the
- * factory measurements are being made.
- *
- * 3) Up to 8 sets of factory-measured txpower calibration values.
- * These are for different frequency ranges, since txpower gain
- * characteristics of the analog radio circuitry vary with frequency.
- *
- * Not all sets need to be filled with data;
- * struct iwl_eeprom_calib_subband_info contains range of channels
- * (0 if unused) for each set of data.
- */
-struct iwl_eeprom_calib_info {
- u8 saturation_power24; /* half-dBm (e.g. "34" = 17 dBm) */
- u8 saturation_power52; /* half-dBm */
- __le16 voltage; /* signed */
- struct iwl_eeprom_calib_subband_info
- band_info[EEPROM_TX_POWER_BANDS];
-} __packed;
-
-
-/* General */
-#define EEPROM_DEVICE_ID (2*0x08) /* 2 bytes */
-#define EEPROM_MAC_ADDRESS (2*0x15) /* 6 bytes */
-#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
-#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
-#define EEPROM_VERSION (2*0x44) /* 2 bytes */
-#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
-#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
-#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
-#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
-#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
-
-/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
-#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
-#define EEPROM_RF_CFG_STEP_MSK(x) ((x >> 2) & 0x3) /* bits 2-3 */
-#define EEPROM_RF_CFG_DASH_MSK(x) ((x >> 4) & 0x3) /* bits 4-5 */
-#define EEPROM_RF_CFG_PNUM_MSK(x) ((x >> 6) & 0x3) /* bits 6-7 */
-#define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
-#define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
-
-#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
-#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
-
-/*
- * Per-channel regulatory data.
- *
- * Each channel that *might* be supported by iwl has a fixed location
- * in EEPROM containing EEPROM_CHANNEL_* usage flags (LSB) and max regulatory
- * txpower (MSB).
- *
- * Entries immediately below are for 20 MHz channel width. HT40 (40 MHz)
- * channels (only for 4965, not supported by 3945) appear later in the EEPROM.
- *
- * 2.4 GHz channels 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
- */
-#define EEPROM_REGULATORY_SKU_ID (2*0x60) /* 4 bytes */
-#define EEPROM_REGULATORY_BAND_1 (2*0x62) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_1_CHANNELS (2*0x63) /* 28 bytes */
-
-/*
- * 4.9 GHz channels 183, 184, 185, 187, 188, 189, 192, 196,
- * 5.0 GHz channels 7, 8, 11, 12, 16
- * (4915-5080MHz) (none of these is ever supported)
- */
-#define EEPROM_REGULATORY_BAND_2 (2*0x71) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_2_CHANNELS (2*0x72) /* 26 bytes */
-
-/*
- * 5.2 GHz channels 34, 36, 38, 40, 42, 44, 46, 48, 52, 56, 60, 64
- * (5170-5320MHz)
- */
-#define EEPROM_REGULATORY_BAND_3 (2*0x7F) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_3_CHANNELS (2*0x80) /* 24 bytes */
-
-/*
- * 5.5 GHz channels 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140
- * (5500-5700MHz)
- */
-#define EEPROM_REGULATORY_BAND_4 (2*0x8C) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_4_CHANNELS (2*0x8D) /* 22 bytes */
-
-/*
- * 5.7 GHz channels 145, 149, 153, 157, 161, 165
- * (5725-5825MHz)
- */
-#define EEPROM_REGULATORY_BAND_5 (2*0x98) /* 2 bytes */
-#define EEPROM_REGULATORY_BAND_5_CHANNELS (2*0x99) /* 12 bytes */
-
-/*
- * 2.4 GHz HT40 channels 1 (5), 2 (6), 3 (7), 4 (8), 5 (9), 6 (10), 7 (11)
- *
- * The channel listed is the center of the lower 20 MHz half of the channel.
- * The overall center frequency is actually 2 channels (10 MHz) above that,
- * and the upper half of each HT40 channel is centered 4 channels (20 MHz) away
- * from the lower half; e.g. the upper half of HT40 channel 1 is channel 5,
- * and the overall HT40 channel width centers on channel 3.
- *
- * NOTE: The RXON command uses 20 MHz channel numbers to specify the
- * control channel to which to tune. RXON also specifies whether the
- * control channel is the upper or lower half of a HT40 channel.
- *
- * NOTE: 4965 does not support HT40 channels on 2.4 GHz.
- */
-#define EEPROM_4965_REGULATORY_BAND_24_HT40_CHANNELS (2*0xA0) /* 14 bytes */
-
-/*
- * 5.2 GHz HT40 channels 36 (40), 44 (48), 52 (56), 60 (64),
- * 100 (104), 108 (112), 116 (120), 124 (128), 132 (136), 149 (153), 157 (161)
- */
-#define EEPROM_4965_REGULATORY_BAND_52_HT40_CHANNELS (2*0xA8) /* 22 bytes */
-
-#define EEPROM_REGULATORY_BAND_NO_HT40 (0)
-
-struct iwl_eeprom_ops {
- const u32 regulatory_bands[7];
- int (*acquire_semaphore) (struct iwl_priv *priv);
- void (*release_semaphore) (struct iwl_priv *priv);
-};
-
-
-int iwl_legacy_eeprom_init(struct iwl_priv *priv);
-void iwl_legacy_eeprom_free(struct iwl_priv *priv);
-const u8 *iwl_legacy_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset);
-u16 iwl_legacy_eeprom_query16(const struct iwl_priv *priv, size_t offset);
-int iwl_legacy_init_channel_map(struct iwl_priv *priv);
-void iwl_legacy_free_channel_map(struct iwl_priv *priv);
-const struct iwl_channel_info *iwl_legacy_get_channel_info(
- const struct iwl_priv *priv,
- enum ieee80211_band band, u16 channel);
-
-#endif /* __iwl_legacy_eeprom_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-fh.h b/drivers/net/wireless/iwlegacy/iwl-fh.h
deleted file mode 100644
index 6e60918..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-fh.h
+++ /dev/null
@@ -1,513 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2005 - 2011 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_fh_h__
-#define __iwl_legacy_fh_h__
-
-/****************************/
-/* Flow Handler Definitions */
-/****************************/
-
-/**
- * This I/O area is directly read/writable by driver (e.g. Linux uses writel())
- * Addresses are offsets from device's PCI hardware base address.
- */
-#define FH_MEM_LOWER_BOUND (0x1000)
-#define FH_MEM_UPPER_BOUND (0x2000)
-
-/**
- * Keep-Warm (KW) buffer base address.
- *
- * Driver must allocate a 4KByte buffer that is used by 4965 for keeping the
- * host DRAM powered on (via dummy accesses to DRAM) to maintain low-latency
- * DRAM access when 4965 is Txing or Rxing. The dummy accesses prevent host
- * from going into a power-savings mode that would cause higher DRAM latency,
- * and possible data over/under-runs, before all Tx/Rx is complete.
- *
- * Driver loads FH_KW_MEM_ADDR_REG with the physical address (bits 35:4)
- * of the buffer, which must be 4K aligned. Once this is set up, the 4965
- * automatically invokes keep-warm accesses when normal accesses might not
- * be sufficient to maintain fast DRAM response.
- *
- * Bit fields:
- * 31-0: Keep-warm buffer physical base address [35:4], must be 4K aligned
- */
-#define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C)
-
-
-/**
- * TFD Circular Buffers Base (CBBC) addresses
- *
- * 4965 has 16 base pointer registers, one for each of 16 host-DRAM-resident
- * circular buffers (CBs/queues) containing Transmit Frame Descriptors (TFDs)
- * (see struct iwl_tfd_frame). These 16 pointer registers are offset by 0x04
- * bytes from one another. Each TFD circular buffer in DRAM must be 256-byte
- * aligned (address bits 0-7 must be 0).
- *
- * Bit fields in each pointer register:
- * 27-0: TFD CB physical base address [35:8], must be 256-byte aligned
- */
-#define FH_MEM_CBBC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_MEM_CBBC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xA10)
-
-/* Find TFD CB base pointer for given queue (range 0-15). */
-#define FH_MEM_CBBC_QUEUE(x) (FH_MEM_CBBC_LOWER_BOUND + (x) * 0x4)
-
-
-/**
- * Rx SRAM Control and Status Registers (RSCSR)
- *
- * These registers provide handshake between driver and 4965 for the Rx queue
- * (this queue handles *all* command responses, notifications, Rx data, etc.
- * sent from 4965 uCode to host driver). Unlike Tx, there is only one Rx
- * queue, and only one Rx DMA/FIFO channel. Also unlike Tx, which can
- * concatenate up to 20 DRAM buffers to form a Tx frame, each Receive Buffer
- * Descriptor (RBD) points to only one Rx Buffer (RB); there is a 1:1
- * mapping between RBDs and RBs.
- *
- * Driver must allocate host DRAM memory for the following, and set the
- * physical address of each into 4965 registers:
- *
- * 1) Receive Buffer Descriptor (RBD) circular buffer (CB), typically with 256
- * entries (although any power of 2, up to 4096, is selectable by driver).
- * Each entry (1 dword) points to a receive buffer (RB) of consistent size
- * (typically 4K, although 8K or 16K are also selectable by driver).
- * Driver sets up RB size and number of RBDs in the CB via Rx config
- * register FH_MEM_RCSR_CHNL0_CONFIG_REG.
- *
- * Bit fields within one RBD:
- * 27-0: Receive Buffer physical address bits [35:8], 256-byte aligned
- *
- * Driver sets physical address [35:8] of base of RBD circular buffer
- * into FH_RSCSR_CHNL0_RBDCB_BASE_REG [27:0].
- *
- * 2) Rx status buffer, 8 bytes, in which 4965 indicates which Rx Buffers
- * (RBs) have been filled, via a "write pointer", actually the index of
- * the RB's corresponding RBD within the circular buffer. Driver sets
- * physical address [35:4] into FH_RSCSR_CHNL0_STTS_WPTR_REG [31:0].
- *
- * Bit fields in lower dword of Rx status buffer (upper dword not used
- * by driver; see struct iwl4965_shared, val0):
- * 31-12: Not used by driver
- * 11- 0: Index of last filled Rx buffer descriptor
- * (4965 writes, driver reads this value)
- *
- * As the driver prepares Receive Buffers (RBs) for 4965 to fill, driver must
- * enter pointers to these RBs into contiguous RBD circular buffer entries,
- * and update the 4965's "write" index register,
- * FH_RSCSR_CHNL0_RBDCB_WPTR_REG.
- *
- * This "write" index corresponds to the *next* RBD that the driver will make
- * available, i.e. one RBD past the tail of the ready-to-fill RBDs within
- * the circular buffer. This value should initially be 0 (before preparing any
- * RBs), should be 8 after preparing the first 8 RBs (for example), and must
- * wrap back to 0 at the end of the circular buffer (but don't wrap before
- * "read" index has advanced past 1! See below).
- * NOTE: 4965 EXPECTS THE WRITE INDEX TO BE INCREMENTED IN MULTIPLES OF 8.
- *
- * As the 4965 fills RBs (referenced from contiguous RBDs within the circular
- * buffer), it updates the Rx status buffer in host DRAM, 2) described above,
- * to tell the driver the index of the latest filled RBD. The driver must
- * read this "read" index from DRAM after receiving an Rx interrupt from 4965.
- *
- * The driver must also internally keep track of a third index, which is the
- * next RBD to process. When receiving an Rx interrupt, driver should process
- * all filled but unprocessed RBs up to, but not including, the RB
- * corresponding to the "read" index. For example, if "read" index becomes "1",
- * driver may process the RB pointed to by RBD 0. Depending on volume of
- * traffic, there may be many RBs to process.
- *
- * If read index == write index, 4965 thinks there is no room to put new data.
- * Due to this, the maximum number of filled RBs is 255, instead of 256. To
- * be safe, make sure that there is a gap of at least 2 RBDs between "write"
- * and "read" indexes; that is, make sure that there are no more than 254
- * buffers waiting to be filled.
- */
-#define FH_MEM_RSCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xBC0)
-#define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND)
-
-/**
- * Physical base address of 8-byte Rx Status buffer.
- * Bit fields:
- * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned.
- */
-#define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0)
-
-/**
- * Physical base address of Rx Buffer Descriptor Circular Buffer.
- * Bit fields:
- * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned.
- */
-#define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004)
-
-/**
- * Rx write pointer (index, really!).
- * Bit fields:
- * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1.
- * NOTE: For 256-entry circular buffer, use only bits [7:0].
- */
-#define FH_RSCSR_CHNL0_RBDCB_WPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x008)
-#define FH_RSCSR_CHNL0_WPTR (FH_RSCSR_CHNL0_RBDCB_WPTR_REG)
-
-
-/**
- * Rx Config/Status Registers (RCSR)
- * Rx Config Reg for channel 0 (only channel used)
- *
- * Driver must initialize FH_MEM_RCSR_CHNL0_CONFIG_REG as follows for
- * normal operation (see bit fields).
- *
- * Clearing FH_MEM_RCSR_CHNL0_CONFIG_REG to 0 turns off Rx DMA.
- * Driver should poll FH_MEM_RSSR_RX_STATUS_REG for
- * FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (bit 24) before continuing.
- *
- * Bit fields:
- * 31-30: Rx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29-24: reserved
- * 23-20: # RBDs in circular buffer = 2^value; use "8" for 256 RBDs (normal),
- * min "5" for 32 RBDs, max "12" for 4096 RBDs.
- * 19-18: reserved
- * 17-16: size of each receive buffer; '00' 4K (normal), '01' 8K,
- * '10' 12K, '11' 16K.
- * 15-14: reserved
- * 13-12: IRQ destination; '00' none, '01' host driver (normal operation)
- * 11- 4: timeout for closing Rx buffer and interrupting host (units 32 usec)
- * typical value 0x10 (about 1/2 msec)
- * 3- 0: reserved
- */
-#define FH_MEM_RCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC00)
-#define FH_MEM_RCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xCC0)
-#define FH_MEM_RCSR_CHNL0 (FH_MEM_RCSR_LOWER_BOUND)
-
-#define FH_MEM_RCSR_CHNL0_CONFIG_REG (FH_MEM_RCSR_CHNL0)
-
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_TIMEOUT_MSK (0x00000FF0) /* bits 4-11 */
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_MSK (0x00001000) /* bits 12 */
-#define FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK (0x00008000) /* bit 15 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RB_SIZE_MSK (0x00030000) /* bits 16-17 */
-#define FH_RCSR_CHNL0_RX_CONFIG_RBDBC_SIZE_MSK (0x00F00000) /* bits 20-23 */
-#define FH_RCSR_CHNL0_RX_CONFIG_DMA_CHNL_EN_MSK (0xC0000000) /* bits 30-31*/
-
-#define FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS (20)
-#define FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS (4)
-#define RX_RB_TIMEOUT (0x10)
-
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_VAL (0x00000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_PAUSE_EOF_VAL (0x40000000)
-#define FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL (0x80000000)
-
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K (0x00000000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K (0x00010000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K (0x00020000)
-#define FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_16K (0x00030000)
-
-#define FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY (0x00000004)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000)
-#define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000)
-
-#define FH_RSCSR_FRAME_SIZE_MSK (0x00003FFF) /* bits 0-13 */
-
-/**
- * Rx Shared Status Registers (RSSR)
- *
- * After stopping Rx DMA channel (writing 0 to
- * FH_MEM_RCSR_CHNL0_CONFIG_REG), driver must poll
- * FH_MEM_RSSR_RX_STATUS_REG until Rx channel is idle.
- *
- * Bit fields:
- * 24: 1 = Channel 0 is idle
- *
- * FH_MEM_RSSR_SHARED_CTRL_REG and FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV
- * contain default values that should not be altered by the driver.
- */
-#define FH_MEM_RSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xC40)
-#define FH_MEM_RSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-
-#define FH_MEM_RSSR_SHARED_CTRL_REG (FH_MEM_RSSR_LOWER_BOUND)
-#define FH_MEM_RSSR_RX_STATUS_REG (FH_MEM_RSSR_LOWER_BOUND + 0x004)
-#define FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV\
- (FH_MEM_RSSR_LOWER_BOUND + 0x008)
-
-#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
-
-#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
-
-/* TFDB Area - TFDs buffer table */
-#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
-#define FH_TFDIB_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x900)
-#define FH_TFDIB_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x958)
-#define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl))
-#define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4)
-
-/**
- * Transmit DMA Channel Control/Status Registers (TCSR)
- *
- * 4965 has one configuration register for each of 8 Tx DMA/FIFO channels
- * supported in hardware (don't confuse these with the 16 Tx queues in DRAM,
- * which feed the DMA/FIFO channels); config regs are separated by 0x20 bytes.
- *
- * To use a Tx DMA channel, driver must initialize its
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl) with:
- *
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- * FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE_VAL
- *
- * All other bits should be 0.
- *
- * Bit fields:
- * 31-30: Tx DMA channel enable: '00' off/pause, '01' pause at end of frame,
- * '10' operate normally
- * 29- 4: Reserved, set to "0"
- * 3: Enable internal DMA requests (1, normal operation), disable (0)
- * 2- 0: Reserved, set to "0"
- */
-#define FH_TCSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xD00)
-#define FH_TCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xE60)
-
-/* Find Control/Status reg for given Tx DMA/FIFO channel */
-#define FH49_TCSR_CHNL_NUM (7)
-#define FH50_TCSR_CHNL_NUM (8)
-
-/* TCSR: tx_config register values */
-#define FH_TCSR_CHNL_TX_CONFIG_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl))
-#define FH_TCSR_CHNL_TX_CREDIT_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x4)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl) \
- (FH_TCSR_LOWER_BOUND + 0x20 * (_chnl) + 0x8)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_TXF (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_MSG_MODE_DRV (0x00000001)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE (0x00000008)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD (0x00100000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_IFTFD (0x00200000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_NOINT (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_ENDTFD (0x00400000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_RTC_IFTFD (0x00800000)
-
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE (0x00000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE_EOF (0x40000000)
-#define FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE (0x80000000)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_EMPTY (0x00000000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_WAIT (0x00002000)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID (0x00000003)
-
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20)
-#define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12)
-
-/**
- * Tx Shared Status Registers (TSSR)
- *
- * After stopping Tx DMA channel (writing 0 to
- * FH_TCSR_CHNL_TX_CONFIG_REG(chnl)), driver must poll
- * FH_TSSR_TX_STATUS_REG until selected Tx channel is idle
- * (channel's buffers empty | no pending requests).
- *
- * Bit fields:
- * 31-24: 1 = Channel buffers empty (channel 7:0)
- * 23-16: 1 = No pending requests (channel 7:0)
- */
-#define FH_TSSR_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0xEA0)
-#define FH_TSSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xEC0)
-
-#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
-
-/**
- * Bit fields for TSSR(Tx Shared Status & Control) error status register:
- * 31: Indicates an address error when accessed to internal memory
- * uCode/driver must write "1" in order to clear this flag
- * 30: Indicates that Host did not send the expected number of dwords to FH
- * uCode/driver must write "1" in order to clear this flag
- * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
- * command was received from the scheduler while the TRB was already full
- * with previous command
- * uCode/driver must write "1" in order to clear this flag
- * 7-0: Each status bit indicates a channel's TxCredit error. When an error
- * bit is set, it indicates that the FH has received a full indication
- * from the RTC TxFIFO and the current value of the TxCredit counter was
- * not equal to zero. This mean that the credit mechanism was not
- * synchronized to the TxFIFO status
- * uCode/driver must write "1" in order to clear this flag
- */
-#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
-
-#define FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl) ((1 << (_chnl)) << 16)
-
-/* Tx service channels */
-#define FH_SRVC_CHNL (9)
-#define FH_SRVC_LOWER_BOUND (FH_MEM_LOWER_BOUND + 0x9C8)
-#define FH_SRVC_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0x9D0)
-#define FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl) \
- (FH_SRVC_LOWER_BOUND + ((_chnl) - 9) * 0x4)
-
-#define FH_TX_CHICKEN_BITS_REG (FH_MEM_LOWER_BOUND + 0xE98)
-/* Instruct FH to increment the retry count of a packet when
- * it is brought from the memory to TX-FIFO
- */
-#define FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN (0x00000002)
-
-#define RX_QUEUE_SIZE 256
-#define RX_QUEUE_MASK 255
-#define RX_QUEUE_SIZE_LOG 8
-
-/*
- * RX related structures and functions
- */
-#define RX_FREE_BUFFERS 64
-#define RX_LOW_WATERMARK 8
-
-/* Size of one Rx buffer in host DRAM */
-#define IWL_RX_BUF_SIZE_3K (3 * 1000) /* 3945 only */
-#define IWL_RX_BUF_SIZE_4K (4 * 1024)
-#define IWL_RX_BUF_SIZE_8K (8 * 1024)
-
-/**
- * struct iwl_rb_status - reseve buffer status
- * host memory mapped FH registers
- * @closed_rb_num [0:11] - Indicates the index of the RB which was closed
- * @closed_fr_num [0:11] - Indicates the index of the RX Frame which was closed
- * @finished_rb_num [0:11] - Indicates the index of the current RB
- * in which the last frame was written to
- * @finished_fr_num [0:11] - Indicates the index of the RX Frame
- * which was transferred
- */
-struct iwl_rb_status {
- __le16 closed_rb_num;
- __le16 closed_fr_num;
- __le16 finished_rb_num;
- __le16 finished_fr_nam;
- __le32 __unused; /* 3945 only */
-} __packed;
-
-
-#define TFD_QUEUE_SIZE_MAX (256)
-#define TFD_QUEUE_SIZE_BC_DUP (64)
-#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
-#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
-#define IWL_NUM_OF_TBS 20
-
-static inline u8 iwl_legacy_get_dma_hi_addr(dma_addr_t addr)
-{
- return (sizeof(addr) > sizeof(u32) ? (addr >> 16) >> 16 : 0) & 0xF;
-}
-/**
- * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor
- *
- * This structure contains dma address and length of transmission address
- *
- * @lo: low [31:0] portion of the dma address of TX buffer
- * every even is unaligned on 16 bit boundary
- * @hi_n_len 0-3 [35:32] portion of dma
- * 4-15 length of the tx buffer
- */
-struct iwl_tfd_tb {
- __le32 lo;
- __le16 hi_n_len;
-} __packed;
-
-/**
- * struct iwl_tfd
- *
- * Transmit Frame Descriptor (TFD)
- *
- * @ __reserved1[3] reserved
- * @ num_tbs 0-4 number of active tbs
- * 5 reserved
- * 6-7 padding (not used)
- * @ tbs[20] transmit frame buffer descriptors
- * @ __pad padding
- *
- * Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
- * Both driver and device share these circular buffers, each of which must be
- * contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
- *
- * Driver must indicate the physical address of the base of each
- * circular buffer via the FH_MEM_CBBC_QUEUE registers.
- *
- * Each TFD contains pointer/size information for up to 20 data buffers
- * in host DRAM. These buffers collectively contain the (one) frame described
- * by the TFD. Each buffer must be a single contiguous block of memory within
- * itself, but buffers may be scattered in host DRAM. Each buffer has max size
- * of (4K - 4). The concatenates all of a TFD's buffers into a single
- * Tx frame, up to 8 KBytes in size.
- *
- * A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
- */
-struct iwl_tfd {
- u8 __reserved1[3];
- u8 num_tbs;
- struct iwl_tfd_tb tbs[IWL_NUM_OF_TBS];
- __le32 __pad;
-} __packed;
-
-/* Keep Warm Size */
-#define IWL_KW_SIZE 0x1000 /* 4k */
-
-#endif /* !__iwl_legacy_fh_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c
deleted file mode 100644
index ce1fc9f..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c
+++ /dev/null
@@ -1,271 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/sched.h>
-#include <net/mac80211.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-eeprom.h"
-#include "iwl-core.h"
-
-
-const char *iwl_legacy_get_cmd_string(u8 cmd)
-{
- switch (cmd) {
- IWL_CMD(REPLY_ALIVE);
- IWL_CMD(REPLY_ERROR);
- IWL_CMD(REPLY_RXON);
- IWL_CMD(REPLY_RXON_ASSOC);
- IWL_CMD(REPLY_QOS_PARAM);
- IWL_CMD(REPLY_RXON_TIMING);
- IWL_CMD(REPLY_ADD_STA);
- IWL_CMD(REPLY_REMOVE_STA);
- IWL_CMD(REPLY_WEPKEY);
- IWL_CMD(REPLY_3945_RX);
- IWL_CMD(REPLY_TX);
- IWL_CMD(REPLY_RATE_SCALE);
- IWL_CMD(REPLY_LEDS_CMD);
- IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
- IWL_CMD(REPLY_CHANNEL_SWITCH);
- IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
- IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD);
- IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION);
- IWL_CMD(POWER_TABLE_CMD);
- IWL_CMD(PM_SLEEP_NOTIFICATION);
- IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC);
- IWL_CMD(REPLY_SCAN_CMD);
- IWL_CMD(REPLY_SCAN_ABORT_CMD);
- IWL_CMD(SCAN_START_NOTIFICATION);
- IWL_CMD(SCAN_RESULTS_NOTIFICATION);
- IWL_CMD(SCAN_COMPLETE_NOTIFICATION);
- IWL_CMD(BEACON_NOTIFICATION);
- IWL_CMD(REPLY_TX_BEACON);
- IWL_CMD(REPLY_TX_PWR_TABLE_CMD);
- IWL_CMD(REPLY_BT_CONFIG);
- IWL_CMD(REPLY_STATISTICS_CMD);
- IWL_CMD(STATISTICS_NOTIFICATION);
- IWL_CMD(CARD_STATE_NOTIFICATION);
- IWL_CMD(MISSED_BEACONS_NOTIFICATION);
- IWL_CMD(REPLY_CT_KILL_CONFIG_CMD);
- IWL_CMD(SENSITIVITY_CMD);
- IWL_CMD(REPLY_PHY_CALIBRATION_CMD);
- IWL_CMD(REPLY_RX_PHY_CMD);
- IWL_CMD(REPLY_RX_MPDU_CMD);
- IWL_CMD(REPLY_RX);
- IWL_CMD(REPLY_COMPRESSED_BA);
- default:
- return "UNKNOWN";
-
- }
-}
-EXPORT_SYMBOL(iwl_legacy_get_cmd_string);
-
-#define HOST_COMPLETE_TIMEOUT (HZ / 2)
-
-static void iwl_legacy_generic_cmd_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- break;
- default:
- IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
- }
-#endif
-}
-
-static int
-iwl_legacy_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int ret;
-
- BUG_ON(!(cmd->flags & CMD_ASYNC));
-
- /* An asynchronous command can not expect an SKB to be set. */
- BUG_ON(cmd->flags & CMD_WANT_SKB);
-
- /* Assign a generic callback if one is not provided */
- if (!cmd->callback)
- cmd->callback = iwl_legacy_generic_cmd_callback;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EBUSY;
-
- ret = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (ret < 0) {
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- return ret;
- }
- return 0;
-}
-
-int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- int cmd_idx;
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- BUG_ON(cmd->flags & CMD_ASYNC);
-
- /* A synchronous command can not have a callback set. */
- BUG_ON(cmd->callback);
-
- IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- set_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
-
- cmd_idx = iwl_legacy_enqueue_hcmd(priv, cmd);
- if (cmd_idx < 0) {
- ret = cmd_idx;
- IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n",
- iwl_legacy_get_cmd_string(cmd->id), ret);
- goto out;
- }
-
- ret = wait_event_timeout(priv->wait_command_queue,
- !test_bit(STATUS_HCMD_ACTIVE, &priv->status),
- HOST_COMPLETE_TIMEOUT);
- if (!ret) {
- if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: time out after %dms.\n",
- iwl_legacy_get_cmd_string(cmd->id),
- jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
-
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv,
- "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ETIMEDOUT;
- goto cancel;
- }
- }
-
- if (test_bit(STATUS_RF_KILL_HW, &priv->status)) {
- IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &priv->status)) {
- IWL_ERR(priv, "Command %s failed: FW Error\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
- if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
- IWL_ERR(priv, "Error: Response NULL in '%s'\n",
- iwl_legacy_get_cmd_string(cmd->id));
- ret = -EIO;
- goto cancel;
- }
-
- ret = 0;
- goto out;
-
-cancel:
- if (cmd->flags & CMD_WANT_SKB) {
- /*
- * Cancel the CMD_WANT_SKB flag for the cmd in the
- * TX cmd queue. Otherwise in case the cmd comes
- * in later, it will possibly set an invalid
- * address (cmd->meta.source).
- */
- priv->txq[priv->cmd_queue].meta[cmd_idx].flags &=
- ~CMD_WANT_SKB;
- }
-fail:
- if (cmd->reply_page) {
- iwl_legacy_free_pages(priv, cmd->reply_page);
- cmd->reply_page = 0;
- }
-out:
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_sync);
-
-int iwl_legacy_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- if (cmd->flags & CMD_ASYNC)
- return iwl_legacy_send_cmd_async(priv, cmd);
-
- return iwl_legacy_send_cmd_sync(priv, cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd);
-
-int
-iwl_legacy_send_cmd_pdu(struct iwl_priv *priv, u8 id, u16 len, const void *data)
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- return iwl_legacy_send_cmd_sync(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu);
-
-int iwl_legacy_send_cmd_pdu_async(struct iwl_priv *priv,
- u8 id, u16 len, const void *data,
- void (*callback)(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt))
-{
- struct iwl_host_cmd cmd = {
- .id = id,
- .len = len,
- .data = data,
- };
-
- cmd.flags |= CMD_ASYNC;
- cmd.callback = callback;
-
- return iwl_legacy_send_cmd_async(priv, &cmd);
-}
-EXPORT_SYMBOL(iwl_legacy_send_cmd_pdu_async);
diff --git a/drivers/net/wireless/iwlegacy/iwl-helpers.h b/drivers/net/wireless/iwlegacy/iwl-helpers.h
deleted file mode 100644
index 5cf23ea..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-helpers.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_helpers_h__
-#define __iwl_legacy_helpers_h__
-
-#include <linux/ctype.h>
-#include <net/mac80211.h>
-
-#include "iwl-io.h"
-
-#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
-
-
-static inline struct ieee80211_conf *iwl_legacy_ieee80211_get_hw_conf(
- struct ieee80211_hw *hw)
-{
- return &hw->conf;
-}
-
-/**
- * iwl_legacy_queue_inc_wrap - increment queue index, wrap back to beginning
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_inc_wrap(int index, int n_bd)
-{
- return ++index & (n_bd - 1);
-}
-
-/**
- * iwl_legacy_queue_dec_wrap - decrement queue index, wrap back to end
- * @index -- current index
- * @n_bd -- total number of entries in queue (must be power of 2)
- */
-static inline int iwl_legacy_queue_dec_wrap(int index, int n_bd)
-{
- return --index & (n_bd - 1);
-}
-
-/* TODO: Move fw_desc functions to iwl-pci.ko */
-static inline void iwl_legacy_free_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(&pci_dev->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static inline int iwl_legacy_alloc_fw_desc(struct pci_dev *pci_dev,
- struct fw_desc *desc)
-{
- if (!desc->len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
- &desc->p_addr, GFP_KERNEL);
- return (desc->v_addr != NULL) ? 0 : -ENOMEM;
-}
-
-/*
- * we have 8 bits used like this:
- *
- * 7 6 5 4 3 2 1 0
- * | | | | | | | |
- * | | | | | | +-+-------- AC queue (0-3)
- * | | | | | |
- * | +-+-+-+-+------------ HW queue ID
- * |
- * +---------------------- unused
- */
-static inline void
-iwl_legacy_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
-{
- BUG_ON(ac > 3); /* only have 2 bits */
- BUG_ON(hwq > 31); /* only use 5 bits */
-
- txq->swq_id = (hwq << 2) | ac;
-}
-
-static inline void iwl_legacy_wake_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (test_and_clear_bit(hwq, priv->queue_stopped))
- if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
- ieee80211_wake_queue(priv->hw, ac);
-}
-
-static inline void iwl_legacy_stop_queue(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- u8 queue = txq->swq_id;
- u8 ac = queue & 3;
- u8 hwq = (queue >> 2) & 0x1f;
-
- if (!test_and_set_bit(hwq, priv->queue_stopped))
- if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
- ieee80211_stop_queue(priv->hw, ac);
-}
-
-#ifdef ieee80211_stop_queue
-#undef ieee80211_stop_queue
-#endif
-
-#define ieee80211_stop_queue DO_NOT_USE_ieee80211_stop_queue
-
-#ifdef ieee80211_wake_queue
-#undef ieee80211_wake_queue
-#endif
-
-#define ieee80211_wake_queue DO_NOT_USE_ieee80211_wake_queue
-
-static inline void iwl_legacy_disable_interrupts(struct iwl_priv *priv)
-{
- clear_bit(STATUS_INT_ENABLED, &priv->status);
-
- /* disable interrupts from uCode/NIC to host */
- iwl_write32(priv, CSR_INT_MASK, 0x00000000);
-
- /* acknowledge/clear/reset any interrupts still pending
- * from uCode or flow handler (Rx/Tx DMA) */
- iwl_write32(priv, CSR_INT, 0xffffffff);
- iwl_write32(priv, CSR_FH_INT_STATUS, 0xffffffff);
- IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
-}
-
-static inline void iwl_legacy_enable_rfkill_int(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
- iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
-}
-
-static inline void iwl_legacy_enable_interrupts(struct iwl_priv *priv)
-{
- IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
- set_bit(STATUS_INT_ENABLED, &priv->status);
- iwl_write32(priv, CSR_INT_MASK, priv->inta_mask);
-}
-
-/**
- * iwl_legacy_beacon_time_mask_low - mask of lower 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_low(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return (1 << tsf_bits) - 1;
-}
-
-/**
- * iwl_legacy_beacon_time_mask_high - mask of higher 32 bit of beacon time
- * @priv -- pointer to iwl_priv data structure
- * @tsf_bits -- number of bits need to shift for masking)
- */
-static inline u32 iwl_legacy_beacon_time_mask_high(struct iwl_priv *priv,
- u16 tsf_bits)
-{
- return ((1 << (32 - tsf_bits)) - 1) << tsf_bits;
-}
-
-#endif /* __iwl_legacy_helpers_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-io.h b/drivers/net/wireless/iwlegacy/iwl-io.h
deleted file mode 100644
index 5cc5d34..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-io.h
+++ /dev/null
@@ -1,545 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_io_h__
-#define __iwl_legacy_io_h__
-
-#include <linux/io.h>
-
-#include "iwl-dev.h"
-#include "iwl-debug.h"
-#include "iwl-devtrace.h"
-
-/*
- * IO, register, and NIC memory access functions
- *
- * NOTE on naming convention and macro usage for these
- *
- * A single _ prefix before a an access function means that no state
- * check or debug information is printed when that function is called.
- *
- * A double __ prefix before an access function means that state is checked
- * and the current line number and caller function name are printed in addition
- * to any other debug output.
- *
- * The non-prefixed name is the #define that maps the caller into a
- * #define that provides the caller's name and __LINE__ to the double
- * prefix version.
- *
- * If you wish to call the function without any debug or state checking,
- * you should use the single _ prefix version (as is used by dependent IO
- * routines, for example _iwl_legacy_read_direct32 calls the non-check version of
- * _iwl_legacy_read32.)
- *
- * These declarations are *extremely* useful in quickly isolating code deltas
- * which result in misconfiguration of the hardware I/O. In combination with
- * git-bisect and the IO debug level you can quickly determine the specific
- * commit which breaks the IO sequence to the hardware.
- *
- */
-
-static inline void _iwl_legacy_write8(struct iwl_priv *priv, u32 ofs, u8 val)
-{
- trace_iwlwifi_legacy_dev_iowrite8(priv, ofs, val);
- iowrite8(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write8(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u8 val)
-{
- IWL_DEBUG_IO(priv, "write8(0x%08X, 0x%02X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write8(priv, ofs, val);
-}
-#define iwl_write8(priv, ofs, val) \
- __iwl_legacy_write8(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write8(priv, ofs, val) _iwl_legacy_write8(priv, ofs, val)
-#endif
-
-
-static inline void _iwl_legacy_write32(struct iwl_priv *priv, u32 ofs, u32 val)
-{
- trace_iwlwifi_legacy_dev_iowrite32(priv, ofs, val);
- iowrite32(val, priv->hw_base + ofs);
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_write32(const char *f, u32 l, struct iwl_priv *priv,
- u32 ofs, u32 val)
-{
- IWL_DEBUG_IO(priv, "write32(0x%08X, 0x%08X) - %s %d\n", ofs, val, f, l);
- _iwl_legacy_write32(priv, ofs, val);
-}
-#define iwl_write32(priv, ofs, val) \
- __iwl_legacy_write32(__FILE__, __LINE__, priv, ofs, val)
-#else
-#define iwl_write32(priv, ofs, val) _iwl_legacy_write32(priv, ofs, val)
-#endif
-
-static inline u32 _iwl_legacy_read32(struct iwl_priv *priv, u32 ofs)
-{
- u32 val = ioread32(priv->hw_base + ofs);
- trace_iwlwifi_legacy_dev_ioread32(priv, ofs, val);
- return val;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32
-__iwl_legacy_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
-{
- IWL_DEBUG_IO(priv, "read_direct32(0x%08X) - %s %d\n", ofs, f, l);
- return _iwl_legacy_read32(priv, ofs);
-}
-#define iwl_read32(priv, ofs) __iwl_legacy_read32(__FILE__, __LINE__, priv, ofs)
-#else
-#define iwl_read32(p, o) _iwl_legacy_read32(p, o)
-#endif
-
-#define IWL_POLL_INTERVAL 10 /* microseconds */
-static inline int
-_iwl_legacy_poll_bit(struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((_iwl_legacy_read32(priv, addr) & mask) == (bits & mask))
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 addr,
- u32 bits, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_bit(priv, addr, bits, mask, timeout);
- IWL_DEBUG_IO(priv, "poll_bit(0x%08X, 0x%08X, 0x%08X) - %s- %s %d\n",
- addr, bits, mask,
- unlikely(ret == -ETIMEDOUT) ? "timeout" : "", f, l);
- return ret;
-}
-#define iwl_poll_bit(priv, addr, bits, mask, timeout) \
- __iwl_legacy_poll_bit(__FILE__, __LINE__, priv, addr, \
- bits, mask, timeout)
-#else
-#define iwl_poll_bit(p, a, b, m, t) _iwl_legacy_poll_bit(p, a, b, m, t)
-#endif
-
-static inline void _iwl_legacy_set_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) | mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_set_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) | mask;
- IWL_DEBUG_IO(priv, "set_bit(0x%08X, 0x%08X) = 0x%08X\n", reg,
- mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_set_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_set_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_set_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline void
-_iwl_legacy_clear_bit(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- _iwl_legacy_write32(priv, reg, _iwl_legacy_read32(priv, reg) & ~mask);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void
-__iwl_legacy_clear_bit(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg, u32 mask)
-{
- u32 val = _iwl_legacy_read32(priv, reg) & ~mask;
- IWL_DEBUG_IO(priv, "clear_bit(0x%08X, 0x%08X) = 0x%08X\n", reg, mask, val);
- _iwl_legacy_write32(priv, reg, val);
-}
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- __iwl_legacy_clear_bit(__FILE__, __LINE__, p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#else
-static inline void iwl_legacy_clear_bit(struct iwl_priv *p, u32 r, u32 m)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&p->reg_lock, reg_flags);
- _iwl_legacy_clear_bit(p, r, m);
- spin_unlock_irqrestore(&p->reg_lock, reg_flags);
-}
-#endif
-
-static inline int _iwl_legacy_grab_nic_access(struct iwl_priv *priv)
-{
- int ret;
- u32 val;
-
- /* this bit wakes up the NIC */
- _iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /*
- * These bits say the device is running, and should keep running for
- * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
- * but they do not indicate that embedded SRAM is restored yet;
- * 3945 and 4965 have volatile SRAM, and must save/restore contents
- * to/from host DRAM when sleeping/waking for power-saving.
- * Each direction takes approximately 1/4 millisecond; with this
- * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
- * series of register accesses are expected (e.g. reading Event Log),
- * to keep device from sleeping.
- *
- * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
- * SRAM is okay/restored. We don't check that here because this call
- * is just for hardware register access; but GP1 MAC_SLEEP check is a
- * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
- *
- */
- ret = _iwl_legacy_poll_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
- (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
- CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
- if (ret < 0) {
- val = _iwl_legacy_read32(priv, CSR_GP_CNTRL);
- IWL_ERR(priv,
- "MAC is in deep sleep!. CSR_GP_CNTRL = 0x%08X\n", val);
- _iwl_legacy_write32(priv, CSR_RESET,
- CSR_RESET_REG_FLAG_FORCE_NMI);
- return -EIO;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_grab_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
- IWL_DEBUG_IO(priv, "grabbing nic access - %s %d\n", f, l);
- return _iwl_legacy_grab_nic_access(priv);
-}
-#define iwl_grab_nic_access(priv) \
- __iwl_legacy_grab_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_grab_nic_access(priv) \
- _iwl_legacy_grab_nic_access(priv)
-#endif
-
-static inline void _iwl_legacy_release_nic_access(struct iwl_priv *priv)
-{
- _iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline void __iwl_legacy_release_nic_access(const char *f, u32 l,
- struct iwl_priv *priv)
-{
-
- IWL_DEBUG_IO(priv, "releasing nic access - %s %d\n", f, l);
- _iwl_legacy_release_nic_access(priv);
-}
-#define iwl_release_nic_access(priv) \
- __iwl_legacy_release_nic_access(__FILE__, __LINE__, priv)
-#else
-#define iwl_release_nic_access(priv) \
- _iwl_legacy_release_nic_access(priv)
-#endif
-
-static inline u32 _iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- return _iwl_legacy_read32(priv, reg);
-}
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline u32 __iwl_legacy_read_direct32(const char *f, u32 l,
- struct iwl_priv *priv, u32 reg)
-{
- u32 value = _iwl_legacy_read_direct32(priv, reg);
- IWL_DEBUG_IO(priv,
- "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
- f, l);
- return value;
-}
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = __iwl_legacy_read_direct32(__FILE__, __LINE__, priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-#else
-static inline u32 iwl_legacy_read_direct32(struct iwl_priv *priv, u32 reg)
-{
- u32 value;
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- value = _iwl_legacy_read_direct32(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-
-}
-#endif
-
-static inline void _iwl_legacy_write_direct32(struct iwl_priv *priv,
- u32 reg, u32 value)
-{
- _iwl_legacy_write32(priv, reg, value);
-}
-static inline void
-iwl_legacy_write_direct32(struct iwl_priv *priv, u32 reg, u32 value)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, reg, value);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_write_reg_buf(struct iwl_priv *priv,
- u32 reg, u32 len, u32 *values)
-{
- u32 count = sizeof(u32);
-
- if ((priv != NULL) && (values != NULL)) {
- for (; 0 < len; len -= count, reg += count, values++)
- iwl_legacy_write_direct32(priv, reg, *values);
- }
-}
-
-static inline int _iwl_legacy_poll_direct_bit(struct iwl_priv *priv, u32 addr,
- u32 mask, int timeout)
-{
- int t = 0;
-
- do {
- if ((iwl_legacy_read_direct32(priv, addr) & mask) == mask)
- return t;
- udelay(IWL_POLL_INTERVAL);
- t += IWL_POLL_INTERVAL;
- } while (t < timeout);
-
- return -ETIMEDOUT;
-}
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static inline int __iwl_legacy_poll_direct_bit(const char *f, u32 l,
- struct iwl_priv *priv,
- u32 addr, u32 mask, int timeout)
-{
- int ret = _iwl_legacy_poll_direct_bit(priv, addr, mask, timeout);
-
- if (unlikely(ret == -ETIMEDOUT))
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) - "
- "timedout - %s %d\n", addr, mask, f, l);
- else
- IWL_DEBUG_IO(priv, "poll_direct_bit(0x%08X, 0x%08X) = 0x%08X "
- "- %s %d\n", addr, mask, ret, f, l);
- return ret;
-}
-#define iwl_poll_direct_bit(priv, addr, mask, timeout) \
-__iwl_legacy_poll_direct_bit(__FILE__, __LINE__, priv, addr, mask, timeout)
-#else
-#define iwl_poll_direct_bit _iwl_legacy_poll_direct_bit
-#endif
-
-static inline u32 _iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_RADDR, reg | (3 << 24));
- rmb();
- return _iwl_legacy_read_direct32(priv, HBUS_TARG_PRPH_RDAT);
-}
-static inline u32 iwl_legacy_read_prph(struct iwl_priv *priv, u32 reg)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return val;
-}
-
-static inline void _iwl_legacy_write_prph(struct iwl_priv *priv,
- u32 addr, u32 val)
-{
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WADDR,
- ((addr & 0x0000FFFF) | (3 << 24)));
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_PRPH_WDAT, val);
-}
-
-static inline void
-iwl_legacy_write_prph(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_prph(priv, addr, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_prph(priv, reg, mask) \
-_iwl_legacy_write_prph(priv, reg, (_iwl_legacy_read_prph(priv, reg) | mask))
-
-static inline void
-iwl_legacy_set_bits_prph(struct iwl_priv *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_prph(priv, reg, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-#define _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask) \
-_iwl_legacy_write_prph(priv, reg, \
- ((_iwl_legacy_read_prph(priv, reg) & mask) | bits))
-
-static inline void iwl_legacy_set_bits_mask_prph(struct iwl_priv *priv, u32 reg,
- u32 bits, u32 mask)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- _iwl_legacy_set_bits_mask_prph(priv, reg, bits, mask);
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void iwl_legacy_clear_bits_prph(struct iwl_priv
- *priv, u32 reg, u32 mask)
-{
- unsigned long reg_flags;
- u32 val;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
- val = _iwl_legacy_read_prph(priv, reg);
- _iwl_legacy_write_prph(priv, reg, (val & ~mask));
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline u32 iwl_legacy_read_targ_mem(struct iwl_priv *priv, u32 addr)
-{
- unsigned long reg_flags;
- u32 value;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- iwl_grab_nic_access(priv);
-
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR, addr);
- rmb();
- value = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
-
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
- return value;
-}
-
-static inline void
-iwl_legacy_write_targ_mem(struct iwl_priv *priv, u32 addr, u32 val)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WDAT, val);
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-
-static inline void
-iwl_legacy_write_targ_mem_buf(struct iwl_priv *priv, u32 addr,
- u32 len, u32 *values)
-{
- unsigned long reg_flags;
-
- spin_lock_irqsave(&priv->reg_lock, reg_flags);
- if (!iwl_grab_nic_access(priv)) {
- _iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_WADDR, addr);
- wmb();
- for (; 0 < len; len -= sizeof(u32), values++)
- _iwl_legacy_write_direct32(priv,
- HBUS_TARG_MEM_WDAT, *values);
-
- iwl_release_nic_access(priv);
- }
- spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
-}
-#endif
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.c b/drivers/net/wireless/iwlegacy/iwl-led.c
deleted file mode 100644
index dc568a4..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <asm/unaligned.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-
-/* default: IWL_LED_BLINK(0) using blinking index table */
-static int led_mode;
-module_param(led_mode, int, S_IRUGO);
-MODULE_PARM_DESC(led_mode, "0=system default, "
- "1=On(RF On)/Off(RF Off), 2=blinking");
-
-/* Throughput OFF time(ms) ON time (ms)
- * >300 25 25
- * >200 to 300 40 40
- * >100 to 200 55 55
- * >70 to 100 65 65
- * >50 to 70 75 75
- * >20 to 50 85 85
- * >10 to 20 95 95
- * >5 to 10 110 110
- * >1 to 5 130 130
- * >0 to 1 167 167
- * <=0 SOLID ON
- */
-static const struct ieee80211_tpt_blink iwl_blink[] = {
- { .throughput = 0, .blink_time = 334 },
- { .throughput = 1 * 1024 - 1, .blink_time = 260 },
- { .throughput = 5 * 1024 - 1, .blink_time = 220 },
- { .throughput = 10 * 1024 - 1, .blink_time = 190 },
- { .throughput = 20 * 1024 - 1, .blink_time = 170 },
- { .throughput = 50 * 1024 - 1, .blink_time = 150 },
- { .throughput = 70 * 1024 - 1, .blink_time = 130 },
- { .throughput = 100 * 1024 - 1, .blink_time = 110 },
- { .throughput = 200 * 1024 - 1, .blink_time = 80 },
- { .throughput = 300 * 1024 - 1, .blink_time = 50 },
-};
-
-/*
- * Adjust led blink rate to compensate on a MAC Clock difference on every HW
- * Led blink rate analysis showed an average deviation of 0% on 3945,
- * 5% on 4965 HW.
- * Need to compensate on the led on/off time per HW according to the deviation
- * to achieve the desired led frequency
- * The calculation is: (100-averageDeviation)/100 * blinkTime
- * For code efficiency the calculation will be:
- * compensation = (100 - averageDeviation) * 64 / 100
- * NewBlinkTime = (compensation * BlinkTime) / 64
- */
-static inline u8 iwl_legacy_blink_compensation(struct iwl_priv *priv,
- u8 time, u16 compensation)
-{
- if (!compensation) {
- IWL_ERR(priv, "undefined blink compensation: "
- "use pre-defined blinking time\n");
- return time;
- }
-
- return (u8)((time * compensation) >> 6);
-}
-
-/* Set led pattern command */
-static int iwl_legacy_led_cmd(struct iwl_priv *priv,
- unsigned long on,
- unsigned long off)
-{
- struct iwl_led_cmd led_cmd = {
- .id = IWL_LED_LINK,
- .interval = IWL_DEF_LED_INTRVL
- };
- int ret;
-
- if (!test_bit(STATUS_READY, &priv->status))
- return -EBUSY;
-
- if (priv->blink_on == on && priv->blink_off == off)
- return 0;
-
- if (off == 0) {
- /* led is SOLID_ON */
- on = IWL_LED_SOLID;
- }
-
- IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
- priv->cfg->base_params->led_compensation);
- led_cmd.on = iwl_legacy_blink_compensation(priv, on,
- priv->cfg->base_params->led_compensation);
- led_cmd.off = iwl_legacy_blink_compensation(priv, off,
- priv->cfg->base_params->led_compensation);
-
- ret = priv->cfg->ops->led->cmd(priv, &led_cmd);
- if (!ret) {
- priv->blink_on = on;
- priv->blink_off = off;
- }
- return ret;
-}
-
-static void iwl_legacy_led_brightness_set(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
- unsigned long on = 0;
-
- if (brightness > 0)
- on = IWL_LED_SOLID;
-
- iwl_legacy_led_cmd(priv, on, 0);
-}
-
-static int iwl_legacy_led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off)
-{
- struct iwl_priv *priv = container_of(led_cdev, struct iwl_priv, led);
-
- return iwl_legacy_led_cmd(priv, *delay_on, *delay_off);
-}
-
-void iwl_legacy_leds_init(struct iwl_priv *priv)
-{
- int mode = led_mode;
- int ret;
-
- if (mode == IWL_LED_DEFAULT)
- mode = priv->cfg->led_mode;
-
- priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
- wiphy_name(priv->hw->wiphy));
- priv->led.brightness_set = iwl_legacy_led_brightness_set;
- priv->led.blink_set = iwl_legacy_led_blink_set;
- priv->led.max_brightness = 1;
-
- switch (mode) {
- case IWL_LED_DEFAULT:
- WARN_ON(1);
- break;
- case IWL_LED_BLINK:
- priv->led.default_trigger =
- ieee80211_create_tpt_led_trigger(priv->hw,
- IEEE80211_TPT_LEDTRIG_FL_CONNECTED,
- iwl_blink, ARRAY_SIZE(iwl_blink));
- break;
- case IWL_LED_RF_STATE:
- priv->led.default_trigger =
- ieee80211_get_radio_led_name(priv->hw);
- break;
- }
-
- ret = led_classdev_register(&priv->pci_dev->dev, &priv->led);
- if (ret) {
- kfree(priv->led.name);
- return;
- }
-
- priv->led_registered = true;
-}
-EXPORT_SYMBOL(iwl_legacy_leds_init);
-
-void iwl_legacy_leds_exit(struct iwl_priv *priv)
-{
- if (!priv->led_registered)
- return;
-
- led_classdev_unregister(&priv->led);
- kfree(priv->led.name);
-}
-EXPORT_SYMBOL(iwl_legacy_leds_exit);
diff --git a/drivers/net/wireless/iwlegacy/iwl-led.h b/drivers/net/wireless/iwlegacy/iwl-led.h
deleted file mode 100644
index f0791f7..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-led.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_leds_h__
-#define __iwl_legacy_leds_h__
-
-
-struct iwl_priv;
-
-#define IWL_LED_SOLID 11
-#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
-
-#define IWL_LED_ACTIVITY (0<<1)
-#define IWL_LED_LINK (1<<1)
-
-/*
- * LED mode
- * IWL_LED_DEFAULT: use device default
- * IWL_LED_RF_STATE: turn LED on/off based on RF state
- * LED ON = RF ON
- * LED OFF = RF OFF
- * IWL_LED_BLINK: adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
- IWL_LED_DEFAULT,
- IWL_LED_RF_STATE,
- IWL_LED_BLINK,
-};
-
-void iwl_legacy_leds_init(struct iwl_priv *priv);
-void iwl_legacy_leds_exit(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_leds_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h b/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
deleted file mode 100644
index 38647e4..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-legacy-rs.h
+++ /dev/null
@@ -1,456 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#ifndef __iwl_legacy_rs_h__
-#define __iwl_legacy_rs_h__
-
-struct iwl_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 plcp_siso; /* uCode API: IWL_RATE_SISO_6M_PLCP, etc. */
- u8 plcp_mimo2; /* uCode API: IWL_RATE_MIMO2_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
-};
-
-struct iwl3945_rate_info {
- u8 plcp; /* uCode API: IWL_RATE_6M_PLCP, etc. */
- u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */
- u8 prev_ieee; /* previous rate in IEEE speeds */
- u8 next_ieee; /* next rate in IEEE speeds */
- u8 prev_rs; /* previous rate used in rs algo */
- u8 next_rs; /* next rate used in rs algo */
- u8 prev_rs_tgg; /* previous rate used in TGG rs algo */
- u8 next_rs_tgg; /* next rate used in TGG rs algo */
- u8 table_rs_index; /* index in rate scale table cmd */
- u8 prev_table_rs; /* prev in rate table cmd */
-};
-
-
-/*
- * These serve as indexes into
- * struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
- */
-enum {
- IWL_RATE_1M_INDEX = 0,
- IWL_RATE_2M_INDEX,
- IWL_RATE_5M_INDEX,
- IWL_RATE_11M_INDEX,
- IWL_RATE_6M_INDEX,
- IWL_RATE_9M_INDEX,
- IWL_RATE_12M_INDEX,
- IWL_RATE_18M_INDEX,
- IWL_RATE_24M_INDEX,
- IWL_RATE_36M_INDEX,
- IWL_RATE_48M_INDEX,
- IWL_RATE_54M_INDEX,
- IWL_RATE_60M_INDEX,
- IWL_RATE_COUNT,
- IWL_RATE_COUNT_LEGACY = IWL_RATE_COUNT - 1, /* Excluding 60M */
- IWL_RATE_COUNT_3945 = IWL_RATE_COUNT - 1,
- IWL_RATE_INVM_INDEX = IWL_RATE_COUNT,
- IWL_RATE_INVALID = IWL_RATE_COUNT,
-};
-
-enum {
- IWL_RATE_6M_INDEX_TABLE = 0,
- IWL_RATE_9M_INDEX_TABLE,
- IWL_RATE_12M_INDEX_TABLE,
- IWL_RATE_18M_INDEX_TABLE,
- IWL_RATE_24M_INDEX_TABLE,
- IWL_RATE_36M_INDEX_TABLE,
- IWL_RATE_48M_INDEX_TABLE,
- IWL_RATE_54M_INDEX_TABLE,
- IWL_RATE_1M_INDEX_TABLE,
- IWL_RATE_2M_INDEX_TABLE,
- IWL_RATE_5M_INDEX_TABLE,
- IWL_RATE_11M_INDEX_TABLE,
- IWL_RATE_INVM_INDEX_TABLE = IWL_RATE_INVM_INDEX - 1,
-};
-
-enum {
- IWL_FIRST_OFDM_RATE = IWL_RATE_6M_INDEX,
- IWL39_LAST_OFDM_RATE = IWL_RATE_54M_INDEX,
- IWL_LAST_OFDM_RATE = IWL_RATE_60M_INDEX,
- IWL_FIRST_CCK_RATE = IWL_RATE_1M_INDEX,
- IWL_LAST_CCK_RATE = IWL_RATE_11M_INDEX,
-};
-
-/* #define vs. enum to keep from defaulting to 'large integer' */
-#define IWL_RATE_6M_MASK (1 << IWL_RATE_6M_INDEX)
-#define IWL_RATE_9M_MASK (1 << IWL_RATE_9M_INDEX)
-#define IWL_RATE_12M_MASK (1 << IWL_RATE_12M_INDEX)
-#define IWL_RATE_18M_MASK (1 << IWL_RATE_18M_INDEX)
-#define IWL_RATE_24M_MASK (1 << IWL_RATE_24M_INDEX)
-#define IWL_RATE_36M_MASK (1 << IWL_RATE_36M_INDEX)
-#define IWL_RATE_48M_MASK (1 << IWL_RATE_48M_INDEX)
-#define IWL_RATE_54M_MASK (1 << IWL_RATE_54M_INDEX)
-#define IWL_RATE_60M_MASK (1 << IWL_RATE_60M_INDEX)
-#define IWL_RATE_1M_MASK (1 << IWL_RATE_1M_INDEX)
-#define IWL_RATE_2M_MASK (1 << IWL_RATE_2M_INDEX)
-#define IWL_RATE_5M_MASK (1 << IWL_RATE_5M_INDEX)
-#define IWL_RATE_11M_MASK (1 << IWL_RATE_11M_INDEX)
-
-/* uCode API values for legacy bit rates, both OFDM and CCK */
-enum {
- IWL_RATE_6M_PLCP = 13,
- IWL_RATE_9M_PLCP = 15,
- IWL_RATE_12M_PLCP = 5,
- IWL_RATE_18M_PLCP = 7,
- IWL_RATE_24M_PLCP = 9,
- IWL_RATE_36M_PLCP = 11,
- IWL_RATE_48M_PLCP = 1,
- IWL_RATE_54M_PLCP = 3,
- IWL_RATE_60M_PLCP = 3,/*FIXME:RS:should be removed*/
- IWL_RATE_1M_PLCP = 10,
- IWL_RATE_2M_PLCP = 20,
- IWL_RATE_5M_PLCP = 55,
- IWL_RATE_11M_PLCP = 110,
- /*FIXME:RS:add IWL_RATE_LEGACY_INVM_PLCP = 0,*/
-};
-
-/* uCode API values for OFDM high-throughput (HT) bit rates */
-enum {
- IWL_RATE_SISO_6M_PLCP = 0,
- IWL_RATE_SISO_12M_PLCP = 1,
- IWL_RATE_SISO_18M_PLCP = 2,
- IWL_RATE_SISO_24M_PLCP = 3,
- IWL_RATE_SISO_36M_PLCP = 4,
- IWL_RATE_SISO_48M_PLCP = 5,
- IWL_RATE_SISO_54M_PLCP = 6,
- IWL_RATE_SISO_60M_PLCP = 7,
- IWL_RATE_MIMO2_6M_PLCP = 0x8,
- IWL_RATE_MIMO2_12M_PLCP = 0x9,
- IWL_RATE_MIMO2_18M_PLCP = 0xa,
- IWL_RATE_MIMO2_24M_PLCP = 0xb,
- IWL_RATE_MIMO2_36M_PLCP = 0xc,
- IWL_RATE_MIMO2_48M_PLCP = 0xd,
- IWL_RATE_MIMO2_54M_PLCP = 0xe,
- IWL_RATE_MIMO2_60M_PLCP = 0xf,
- IWL_RATE_SISO_INVM_PLCP,
- IWL_RATE_MIMO2_INVM_PLCP = IWL_RATE_SISO_INVM_PLCP,
-};
-
-/* MAC header values for bit rates */
-enum {
- IWL_RATE_6M_IEEE = 12,
- IWL_RATE_9M_IEEE = 18,
- IWL_RATE_12M_IEEE = 24,
- IWL_RATE_18M_IEEE = 36,
- IWL_RATE_24M_IEEE = 48,
- IWL_RATE_36M_IEEE = 72,
- IWL_RATE_48M_IEEE = 96,
- IWL_RATE_54M_IEEE = 108,
- IWL_RATE_60M_IEEE = 120,
- IWL_RATE_1M_IEEE = 2,
- IWL_RATE_2M_IEEE = 4,
- IWL_RATE_5M_IEEE = 11,
- IWL_RATE_11M_IEEE = 22,
-};
-
-#define IWL_CCK_BASIC_RATES_MASK \
- (IWL_RATE_1M_MASK | \
- IWL_RATE_2M_MASK)
-
-#define IWL_CCK_RATES_MASK \
- (IWL_CCK_BASIC_RATES_MASK | \
- IWL_RATE_5M_MASK | \
- IWL_RATE_11M_MASK)
-
-#define IWL_OFDM_BASIC_RATES_MASK \
- (IWL_RATE_6M_MASK | \
- IWL_RATE_12M_MASK | \
- IWL_RATE_24M_MASK)
-
-#define IWL_OFDM_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_RATE_9M_MASK | \
- IWL_RATE_18M_MASK | \
- IWL_RATE_36M_MASK | \
- IWL_RATE_48M_MASK | \
- IWL_RATE_54M_MASK)
-
-#define IWL_BASIC_RATES_MASK \
- (IWL_OFDM_BASIC_RATES_MASK | \
- IWL_CCK_BASIC_RATES_MASK)
-
-#define IWL_RATES_MASK ((1 << IWL_RATE_COUNT) - 1)
-#define IWL_RATES_MASK_3945 ((1 << IWL_RATE_COUNT_3945) - 1)
-
-#define IWL_INVALID_VALUE -1
-
-#define IWL_MIN_RSSI_VAL -100
-#define IWL_MAX_RSSI_VAL 0
-
-/* These values specify how many Tx frame attempts before
- * searching for a new modulation mode */
-#define IWL_LEGACY_FAILURE_LIMIT 160
-#define IWL_LEGACY_SUCCESS_LIMIT 480
-#define IWL_LEGACY_TABLE_COUNT 160
-
-#define IWL_NONE_LEGACY_FAILURE_LIMIT 400
-#define IWL_NONE_LEGACY_SUCCESS_LIMIT 4500
-#define IWL_NONE_LEGACY_TABLE_COUNT 1500
-
-/* Success ratio (ACKed / attempted tx frames) values (perfect is 128 * 100) */
-#define IWL_RS_GOOD_RATIO 12800 /* 100% */
-#define IWL_RATE_SCALE_SWITCH 10880 /* 85% */
-#define IWL_RATE_HIGH_TH 10880 /* 85% */
-#define IWL_RATE_INCREASE_TH 6400 /* 50% */
-#define IWL_RATE_DECREASE_TH 1920 /* 15% */
-
-/* possible actions when in legacy mode */
-#define IWL_LEGACY_SWITCH_ANTENNA1 0
-#define IWL_LEGACY_SWITCH_ANTENNA2 1
-#define IWL_LEGACY_SWITCH_SISO 2
-#define IWL_LEGACY_SWITCH_MIMO2_AB 3
-#define IWL_LEGACY_SWITCH_MIMO2_AC 4
-#define IWL_LEGACY_SWITCH_MIMO2_BC 5
-
-/* possible actions when in siso mode */
-#define IWL_SISO_SWITCH_ANTENNA1 0
-#define IWL_SISO_SWITCH_ANTENNA2 1
-#define IWL_SISO_SWITCH_MIMO2_AB 2
-#define IWL_SISO_SWITCH_MIMO2_AC 3
-#define IWL_SISO_SWITCH_MIMO2_BC 4
-#define IWL_SISO_SWITCH_GI 5
-
-/* possible actions when in mimo mode */
-#define IWL_MIMO2_SWITCH_ANTENNA1 0
-#define IWL_MIMO2_SWITCH_ANTENNA2 1
-#define IWL_MIMO2_SWITCH_SISO_A 2
-#define IWL_MIMO2_SWITCH_SISO_B 3
-#define IWL_MIMO2_SWITCH_SISO_C 4
-#define IWL_MIMO2_SWITCH_GI 5
-
-#define IWL_MAX_SEARCH IWL_MIMO2_SWITCH_GI
-
-#define IWL_ACTION_LIMIT 3 /* # possible actions */
-
-#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
-
-/* load per tid defines for A-MPDU activation */
-#define IWL_AGG_TPT_THREHOLD 0
-#define IWL_AGG_LOAD_THRESHOLD 10
-#define IWL_AGG_ALL_TID 0xff
-#define TID_QUEUE_CELL_SPACING 50 /*mS */
-#define TID_QUEUE_MAX_SIZE 20
-#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
-
-#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
-#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
-
-extern const struct iwl_rate_info iwlegacy_rates[IWL_RATE_COUNT];
-
-enum iwl_table_type {
- LQ_NONE,
- LQ_G, /* legacy types */
- LQ_A,
- LQ_SISO, /* high-throughput types */
- LQ_MIMO2,
- LQ_MAX,
-};
-
-#define is_legacy(tbl) (((tbl) == LQ_G) || ((tbl) == LQ_A))
-#define is_siso(tbl) ((tbl) == LQ_SISO)
-#define is_mimo2(tbl) ((tbl) == LQ_MIMO2)
-#define is_mimo(tbl) (is_mimo2(tbl))
-#define is_Ht(tbl) (is_siso(tbl) || is_mimo(tbl))
-#define is_a_band(tbl) ((tbl) == LQ_A)
-#define is_g_and(tbl) ((tbl) == LQ_G)
-
-#define ANT_NONE 0x0
-#define ANT_A BIT(0)
-#define ANT_B BIT(1)
-#define ANT_AB (ANT_A | ANT_B)
-#define ANT_C BIT(2)
-#define ANT_AC (ANT_A | ANT_C)
-#define ANT_BC (ANT_B | ANT_C)
-#define ANT_ABC (ANT_AB | ANT_C)
-
-#define IWL_MAX_MCS_DISPLAY_SIZE 12
-
-struct iwl_rate_mcs_info {
- char mbps[IWL_MAX_MCS_DISPLAY_SIZE];
- char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
-};
-
-/**
- * struct iwl_rate_scale_data -- tx success history for one rate
- */
-struct iwl_rate_scale_data {
- u64 data; /* bitmap of successful frames */
- s32 success_counter; /* number of frames successful */
- s32 success_ratio; /* per-cent * 128 */
- s32 counter; /* number of frames attempted */
- s32 average_tpt; /* success ratio * expected throughput */
- unsigned long stamp;
-};
-
-/**
- * struct iwl_scale_tbl_info -- tx params and success history for all rates
- *
- * There are two of these in struct iwl_lq_sta,
- * one for "active", and one for "search".
- */
-struct iwl_scale_tbl_info {
- enum iwl_table_type lq_type;
- u8 ant_type;
- u8 is_SGI; /* 1 = short guard interval */
- u8 is_ht40; /* 1 = 40 MHz channel width */
- u8 is_dup; /* 1 = duplicated data streams */
- u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
- u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
- u32 current_rate; /* rate_n_flags, uCode API format */
- struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
-};
-
-struct iwl_traffic_load {
- unsigned long time_stamp; /* age of the oldest statistics */
- u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
- * slice */
- u32 total; /* total num of packets during the
- * last TID_MAX_TIME_DIFF */
- u8 queue_count; /* number of queues that has
- * been used since the last cleanup */
- u8 head; /* start of the circular buffer */
-};
-
-/**
- * struct iwl_lq_sta -- driver's rate scaling private structure
- *
- * Pointer to this gets passed back and forth between driver and mac80211.
- */
-struct iwl_lq_sta {
- u8 active_tbl; /* index of active table, range 0-1 */
- u8 enable_counter; /* indicates HT mode */
- u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
- u8 search_better_tbl; /* 1: currently trying alternate mode */
- s32 last_tpt;
-
- /* The following determine when to search for a new mode */
- u32 table_count_limit;
- u32 max_failure_limit; /* # failed frames before new search */
- u32 max_success_limit; /* # successful frames before new search */
- u32 table_count;
- u32 total_failed; /* total failed frames, any/all rates */
- u32 total_success; /* total successful frames, any/all rates */
- u64 flush_timer; /* time staying in mode before new search */
-
- u8 action_counter; /* # mode-switch actions tried */
- u8 is_green;
- u8 is_dup;
- enum ieee80211_band band;
-
- /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
- u32 supp_rates;
- u16 active_legacy_rate;
- u16 active_siso_rate;
- u16 active_mimo2_rate;
- s8 max_rate_idx; /* Max rate set by user */
- u8 missed_rate_counter;
-
- struct iwl_link_quality_cmd lq;
- struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
- u8 tx_agg_tid_en;
-#ifdef CONFIG_MAC80211_DEBUGFS
- struct dentry *rs_sta_dbgfs_scale_table_file;
- struct dentry *rs_sta_dbgfs_stats_table_file;
- struct dentry *rs_sta_dbgfs_rate_scale_data_file;
- struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
- u32 dbg_fixed_rate;
-#endif
- struct iwl_priv *drv;
-
- /* used to be in sta_info */
- int last_txrate_idx;
- /* last tx rate_n_flags */
- u32 last_rate_n_flags;
- /* packets destined for this STA are aggregated */
- u8 is_agg;
-};
-
-static inline u8 iwl4965_num_of_ant(u8 mask)
-{
- return !!((mask) & ANT_A) +
- !!((mask) & ANT_B) +
- !!((mask) & ANT_C);
-}
-
-static inline u8 iwl4965_first_antenna(u8 mask)
-{
- if (mask & ANT_A)
- return ANT_A;
- if (mask & ANT_B)
- return ANT_B;
- return ANT_C;
-}
-
-
-/**
- * iwl3945_rate_scale_init - Initialize the rate scale table based on assoc info
- *
- * The specific throughput table used is based on the type of network
- * the associated with, including A, B, G, and G w/ TGG protection
- */
-extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
-
-/* Initialize station's rate scaling information after adding station */
-extern void iwl4965_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
- struct ieee80211_sta *sta, u8 sta_id);
-
-/**
- * iwl_rate_control_register - Register the rate control algorithm callbacks
- *
- * Since the rate control algorithm is hardware specific, there is no need
- * or reason to place it as a stand alone module. The driver can call
- * iwl_rate_control_register in order to register the rate control callbacks
- * with the mac80211 subsystem. This should be performed prior to calling
- * ieee80211_register_hw
- *
- */
-extern int iwl4965_rate_control_register(void);
-extern int iwl3945_rate_control_register(void);
-
-/**
- * iwl_rate_control_unregister - Unregister the rate control callbacks
- *
- * This should be called after calling ieee80211_unregister_hw, but before
- * the driver is unloaded.
- */
-extern void iwl4965_rate_control_unregister(void);
-extern void iwl3945_rate_control_unregister(void);
-
-#endif /* __iwl_legacy_rs__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.c b/drivers/net/wireless/iwlegacy/iwl-power.c
deleted file mode 100644
index 903ef0d..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-commands.h"
-#include "iwl-debug.h"
-#include "iwl-power.h"
-
-/*
- * Setting power level allows the card to go to sleep when not busy.
- *
- * We calculate a sleep command based on the required latency, which
- * we get from mac80211. In order to handle thermal throttling, we can
- * also use pre-defined power levels.
- */
-
-/*
- * This defines the old power levels. They are still used by default
- * (level 1) and for thermal throttle (levels 3 through 5)
- */
-
-struct iwl_power_vec_entry {
- struct iwl_powertable_cmd cmd;
- u8 no_dtim; /* number of skip dtim */
-};
-
-static void iwl_legacy_power_sleep_cam_cmd(struct iwl_priv *priv,
- struct iwl_powertable_cmd *cmd)
-{
- memset(cmd, 0, sizeof(*cmd));
-
- if (priv->power_data.pci_pm)
- cmd->flags |= IWL_POWER_PCI_PM_MSK;
-
- IWL_DEBUG_POWER(priv, "Sleep command for CAM\n");
-}
-
-static int
-iwl_legacy_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(priv, "Sending power/sleep command\n");
- IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags);
- IWL_DEBUG_POWER(priv, "Tx timeout = %u\n",
- le32_to_cpu(cmd->tx_data_timeout));
- IWL_DEBUG_POWER(priv, "Rx timeout = %u\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(priv,
- "Sleep interval vector = { %d , %d , %d , %d , %d }\n",
- le32_to_cpu(cmd->sleep_interval[0]),
- le32_to_cpu(cmd->sleep_interval[1]),
- le32_to_cpu(cmd->sleep_interval[2]),
- le32_to_cpu(cmd->sleep_interval[3]),
- le32_to_cpu(cmd->sleep_interval[4]));
-
- return iwl_legacy_send_cmd_pdu(priv, POWER_TABLE_CMD,
- sizeof(struct iwl_powertable_cmd), cmd);
-}
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force)
-{
- int ret;
- bool update_chains;
-
- lockdep_assert_held(&priv->mutex);
-
- /* Don't update the RX chain when chain noise calibration is running */
- update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
- priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
-
- if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
- return 0;
-
- if (!iwl_legacy_is_ready_rf(priv))
- return -EIO;
-
- /* scan complete use sleep_power_next, need to be updated */
- memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
- if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
- IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
- return 0;
- }
-
- if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
- set_bit(STATUS_POWER_PMI, &priv->status);
-
- ret = iwl_legacy_set_power(priv, cmd);
- if (!ret) {
- if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
- clear_bit(STATUS_POWER_PMI, &priv->status);
-
- if (priv->cfg->ops->lib->update_chain_flags && update_chains)
- priv->cfg->ops->lib->update_chain_flags(priv);
- else if (priv->cfg->ops->lib->update_chain_flags)
- IWL_DEBUG_POWER(priv,
- "Cannot update the power, chain noise "
- "calibration running: %d\n",
- priv->chain_noise_data.state);
-
- memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
- } else
- IWL_ERR(priv, "set power fail, ret = %d", ret);
-
- return ret;
-}
-
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force)
-{
- struct iwl_powertable_cmd cmd;
-
- iwl_legacy_power_sleep_cam_cmd(priv, &cmd);
- return iwl_legacy_power_set_mode(priv, &cmd, force);
-}
-EXPORT_SYMBOL(iwl_legacy_power_update_mode);
-
-/* initialize to default */
-void iwl_legacy_power_initialize(struct iwl_priv *priv)
-{
- u16 lctl = iwl_legacy_pcie_link_ctl(priv);
-
- priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN);
-
- priv->power_data.debug_sleep_level_override = -1;
-
- memset(&priv->power_data.sleep_cmd, 0,
- sizeof(priv->power_data.sleep_cmd));
-}
-EXPORT_SYMBOL(iwl_legacy_power_initialize);
diff --git a/drivers/net/wireless/iwlegacy/iwl-power.h b/drivers/net/wireless/iwlegacy/iwl-power.h
deleted file mode 100644
index d30b36a..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-power.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#ifndef __iwl_legacy_power_setting_h__
-#define __iwl_legacy_power_setting_h__
-
-#include "iwl-commands.h"
-
-enum iwl_power_level {
- IWL_POWER_INDEX_1,
- IWL_POWER_INDEX_2,
- IWL_POWER_INDEX_3,
- IWL_POWER_INDEX_4,
- IWL_POWER_INDEX_5,
- IWL_POWER_NUM
-};
-
-struct iwl_power_mgr {
- struct iwl_powertable_cmd sleep_cmd;
- struct iwl_powertable_cmd sleep_cmd_next;
- int debug_sleep_level_override;
- bool pci_pm;
-};
-
-int
-iwl_legacy_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
- bool force);
-int iwl_legacy_power_update_mode(struct iwl_priv *priv, bool force);
-void iwl_legacy_power_initialize(struct iwl_priv *priv);
-
-#endif /* __iwl_legacy_power_setting_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-rx.c b/drivers/net/wireless/iwlegacy/iwl-rx.c
deleted file mode 100644
index f4d21ec..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-rx.c
+++ /dev/null
@@ -1,282 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include <asm/unaligned.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
- * each of which point to Receive Buffers to be filled by the NIC. These get
- * used not only for Rx frames, but for any command response or notification
- * from the NIC. The driver and NIC manage the Rx buffers by means
- * of indexes into the circular buffer.
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl_legacy_rx_queue_alloc() Allocates rx_free
- * iwl_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl_rx_queue_restock
- * iwl_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl_legacy_rx_queue_space - Return number of free slots available in queue.
- */
-int iwl_legacy_rx_queue_space(const struct iwl_rx_queue *q)
-{
- int s = q->read - q->write;
- if (s <= 0)
- s += RX_QUEUE_SIZE;
- /* keep some buffer to not confuse full and empty queue */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_space);
-
-/**
- * iwl_legacy_rx_queue_update_write_ptr - Update the write pointer for the RX queue
- */
-void
-iwl_legacy_rx_queue_update_write_ptr(struct iwl_priv *priv,
- struct iwl_rx_queue *q)
-{
- unsigned long flags;
- u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
- u32 reg;
-
- spin_lock_irqsave(&q->lock, flags);
-
- if (q->need_update == 0)
- goto exit_unlock;
-
- /* If power-saving is in use, make sure device is awake */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Rx queue requesting wakeup,"
- " GP1 = 0x%x\n", reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- goto exit_unlock;
- }
-
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
-
- /* Else device is assumed to be awake */
- } else {
- /* Device expects a multiple of 8 */
- q->write_actual = (q->write & ~0x7);
- iwl_legacy_write_direct32(priv, rx_wrt_ptr_reg,
- q->write_actual);
- }
-
- q->need_update = 0;
-
- exit_unlock:
- spin_unlock_irqrestore(&q->lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_update_write_ptr);
-
-int iwl_legacy_rx_queue_alloc(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- spin_lock_init(&rxq->lock);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
-
- /* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->bd_dma,
- GFP_KERNEL);
- if (!rxq->bd)
- goto err_bd;
-
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma, GFP_KERNEL);
- if (!rxq->rb_stts)
- goto err_rb;
-
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- rxq->need_update = 0;
- return 0;
-
-err_rb:
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
-err_bd:
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_queue_alloc);
-
-
-void iwl_legacy_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-EXPORT_SYMBOL(iwl_legacy_rx_spectrum_measure_notif);
-
-/*
- * returns non-zero if packet should be dropped
- */
-int iwl_legacy_set_decrypted_flag(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- u32 decrypt_res,
- struct ieee80211_rx_status *stats)
-{
- u16 fc = le16_to_cpu(hdr->frame_control);
-
- /*
- * All contexts have the same setting here due to it being
- * a module parameter, so OK to check any context.
- */
- if (priv->contexts[IWL_RXON_CTX_BSS].active.filter_flags &
- RXON_FILTER_DIS_DECRYPT_MSK)
- return 0;
-
- if (!(fc & IEEE80211_FCTL_PROTECTED))
- return 0;
-
- IWL_DEBUG_RX(priv, "decrypt_res:0x%x\n", decrypt_res);
- switch (decrypt_res & RX_RES_STATUS_SEC_TYPE_MSK) {
- case RX_RES_STATUS_SEC_TYPE_TKIP:
- /* The uCode has got a bad phase 1 Key, pushes the packet.
- * Decryption will be done in SW. */
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_KEY_TTAK)
- break;
-
- case RX_RES_STATUS_SEC_TYPE_WEP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_BAD_ICV_MIC) {
- /* bad ICV, the packet is destroyed since the
- * decryption is inplace, drop it */
- IWL_DEBUG_RX(priv, "Packet destroyed\n");
- return -1;
- }
- case RX_RES_STATUS_SEC_TYPE_CCMP:
- if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) ==
- RX_RES_STATUS_DECRYPT_OK) {
- IWL_DEBUG_RX(priv, "hw decrypt successfully!!!\n");
- stats->flag |= RX_FLAG_DECRYPTED;
- }
- break;
-
- default:
- break;
- }
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_set_decrypted_flag);
diff --git a/drivers/net/wireless/iwlegacy/iwl-scan.c b/drivers/net/wireless/iwlegacy/iwl-scan.c
deleted file mode 100644
index 521b73b..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-scan.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/******************************************************************************
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called LICENSE.GPL.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *****************************************************************************/
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/etherdevice.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/* For active scan, listen ACTIVE_DWELL_TIME (msec) on each channel after
- * sending probe req. This should be set long enough to hear probe responses
- * from more than one AP. */
-#define IWL_ACTIVE_DWELL_TIME_24 (30) /* all times in msec */
-#define IWL_ACTIVE_DWELL_TIME_52 (20)
-
-#define IWL_ACTIVE_DWELL_FACTOR_24GHZ (3)
-#define IWL_ACTIVE_DWELL_FACTOR_52GHZ (2)
-
-/* For passive scan, listen PASSIVE_DWELL_TIME (msec) on each channel.
- * Must be set longer than active dwell time.
- * For the most reliable scan, set > AP beacon interval (typically 100msec). */
-#define IWL_PASSIVE_DWELL_TIME_24 (20) /* all times in msec */
-#define IWL_PASSIVE_DWELL_TIME_52 (10)
-#define IWL_PASSIVE_DWELL_BASE (100)
-#define IWL_CHANNEL_TUNE_TIME 5
-
-static int iwl_legacy_send_scan_abort(struct iwl_priv *priv)
-{
- int ret;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_ABORT_CMD,
- .flags = CMD_WANT_SKB,
- };
-
- /* Exit instantly with error when device is not ready
- * to receive scan abort command or it does not perform
- * hardware scan currently */
- if (!test_bit(STATUS_READY, &priv->status) ||
- !test_bit(STATUS_GEO_CONFIGURED, &priv->status) ||
- !test_bit(STATUS_SCAN_HW, &priv->status) ||
- test_bit(STATUS_FW_ERROR, &priv->status) ||
- test_bit(STATUS_EXIT_PENDING, &priv->status))
- return -EIO;
-
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->u.status != CAN_ABORT_STATUS) {
- /* The scan abort will return 1 for success or
- * 2 for "failure". A failure condition can be
- * due to simply not being in an active scan which
- * can occur if we send the scan abort before we
- * the microcode has notified us that a scan is
- * completed. */
- IWL_DEBUG_SCAN(priv, "SCAN_ABORT ret %d.\n", pkt->u.status);
- ret = -EIO;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
- return ret;
-}
-
-static void iwl_legacy_complete_scan(struct iwl_priv *priv, bool aborted)
-{
- /* check if scan was requested from mac80211 */
- if (priv->scan_request) {
- IWL_DEBUG_SCAN(priv, "Complete scan in mac80211\n");
- ieee80211_scan_completed(priv->hw, aborted);
- }
-
- priv->scan_vif = NULL;
- priv->scan_request = NULL;
-}
-
-void iwl_legacy_force_scan_end(struct iwl_priv *priv)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Forcing scan end while not scanning\n");
- return;
- }
-
- IWL_DEBUG_SCAN(priv, "Forcing scan end\n");
- clear_bit(STATUS_SCANNING, &priv->status);
- clear_bit(STATUS_SCAN_HW, &priv->status);
- clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- iwl_legacy_complete_scan(priv, true);
-}
-
-static void iwl_legacy_do_scan_abort(struct iwl_priv *priv)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Not performing scan to abort\n");
- return;
- }
-
- if (test_and_set_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan abort in progress\n");
- return;
- }
-
- ret = iwl_legacy_send_scan_abort(priv);
- if (ret) {
- IWL_DEBUG_SCAN(priv, "Send scan abort failed %d\n", ret);
- iwl_legacy_force_scan_end(priv);
- } else
- IWL_DEBUG_SCAN(priv, "Successfully send scan abort\n");
-}
-
-/**
- * iwl_scan_cancel - Cancel any currently executing HW scan
- */
-int iwl_legacy_scan_cancel(struct iwl_priv *priv)
-{
- IWL_DEBUG_SCAN(priv, "Queuing abort scan\n");
- queue_work(priv->workqueue, &priv->abort_scan);
- return 0;
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel);
-
-/**
- * iwl_legacy_scan_cancel_timeout - Cancel any currently executing HW scan
- * @ms: amount of time to wait (in milliseconds) for scan to abort
- *
- */
-int iwl_legacy_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms)
-{
- unsigned long timeout = jiffies + msecs_to_jiffies(ms);
-
- lockdep_assert_held(&priv->mutex);
-
- IWL_DEBUG_SCAN(priv, "Scan cancel timeout\n");
-
- iwl_legacy_do_scan_abort(priv);
-
- while (time_before_eq(jiffies, timeout)) {
- if (!test_bit(STATUS_SCAN_HW, &priv->status))
- break;
- msleep(20);
- }
-
- return test_bit(STATUS_SCAN_HW, &priv->status);
-}
-EXPORT_SYMBOL(iwl_legacy_scan_cancel_timeout);
-
-/* Service response to REPLY_SCAN_CMD (0x80) */
-static void iwl_legacy_rx_reply_scan(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanreq_notification *notif =
- (struct iwl_scanreq_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan request status = 0x%x\n", notif->status);
-#endif
-}
-
-/* Service SCAN_START_NOTIFICATION (0x82) */
-static void iwl_legacy_rx_scan_start_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanstart_notification *notif =
- (struct iwl_scanstart_notification *)pkt->u.raw;
- priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
- IWL_DEBUG_SCAN(priv, "Scan start: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d (beacon timer %u)\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- notif->status, notif->beacon_timer);
-}
-
-/* Service SCAN_RESULTS_NOTIFICATION (0x83) */
-static void iwl_legacy_rx_scan_results_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scanresults_notification *notif =
- (struct iwl_scanresults_notification *)pkt->u.raw;
-
- IWL_DEBUG_SCAN(priv, "Scan ch.res: "
- "%d [802.11%s] "
- "(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec\n",
- notif->channel,
- notif->band ? "bg" : "a",
- le32_to_cpu(notif->tsf_high),
- le32_to_cpu(notif->tsf_low),
- le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
-#endif
-}
-
-/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
-static void iwl_legacy_rx_scan_complete_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
-#endif
-
- IWL_DEBUG_SCAN(priv,
- "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
- scan_notif->scanned_channels,
- scan_notif->tsf_low,
- scan_notif->tsf_high, scan_notif->status);
-
- /* The HW is no longer scanning */
- clear_bit(STATUS_SCAN_HW, &priv->status);
-
- IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
- (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
- jiffies_to_msecs(jiffies - priv->scan_start));
-
- queue_work(priv->workqueue, &priv->scan_completed);
-}
-
-void iwl_legacy_setup_rx_scan_handlers(struct iwl_priv *priv)
-{
- /* scan handlers */
- priv->rx_handlers[REPLY_SCAN_CMD] = iwl_legacy_rx_reply_scan;
- priv->rx_handlers[SCAN_START_NOTIFICATION] =
- iwl_legacy_rx_scan_start_notif;
- priv->rx_handlers[SCAN_RESULTS_NOTIFICATION] =
- iwl_legacy_rx_scan_results_notif;
- priv->rx_handlers[SCAN_COMPLETE_NOTIFICATION] =
- iwl_legacy_rx_scan_complete_notif;
-}
-EXPORT_SYMBOL(iwl_legacy_setup_rx_scan_handlers);
-
-inline u16 iwl_legacy_get_active_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 n_probes)
-{
- if (band == IEEE80211_BAND_5GHZ)
- return IWL_ACTIVE_DWELL_TIME_52 +
- IWL_ACTIVE_DWELL_FACTOR_52GHZ * (n_probes + 1);
- else
- return IWL_ACTIVE_DWELL_TIME_24 +
- IWL_ACTIVE_DWELL_FACTOR_24GHZ * (n_probes + 1);
-}
-EXPORT_SYMBOL(iwl_legacy_get_active_dwell_time);
-
-u16 iwl_legacy_get_passive_dwell_time(struct iwl_priv *priv,
- enum ieee80211_band band,
- struct ieee80211_vif *vif)
-{
- struct iwl_rxon_context *ctx;
- u16 passive = (band == IEEE80211_BAND_2GHZ) ?
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_24 :
- IWL_PASSIVE_DWELL_BASE + IWL_PASSIVE_DWELL_TIME_52;
-
- if (iwl_legacy_is_any_associated(priv)) {
- /*
- * If we're associated, we clamp the maximum passive
- * dwell time to be 98% of the smallest beacon interval
- * (minus 2 * channel tune time)
- */
- for_each_context(priv, ctx) {
- u16 value;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- continue;
- value = ctx->vif ? ctx->vif->bss_conf.beacon_int : 0;
- if ((value > IWL_PASSIVE_DWELL_BASE) || !value)
- value = IWL_PASSIVE_DWELL_BASE;
- value = (value * 98) / 100 - IWL_CHANNEL_TUNE_TIME * 2;
- passive = min(value, passive);
- }
- }
-
- return passive;
-}
-EXPORT_SYMBOL(iwl_legacy_get_passive_dwell_time);
-
-void iwl_legacy_init_scan_params(struct iwl_priv *priv)
-{
- u8 ant_idx = fls(priv->hw_params.valid_tx_ant) - 1;
- if (!priv->scan_tx_ant[IEEE80211_BAND_5GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_5GHZ] = ant_idx;
- if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
- priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
-}
-EXPORT_SYMBOL(iwl_legacy_init_scan_params);
-
-static int iwl_legacy_scan_initiate(struct iwl_priv *priv,
- struct ieee80211_vif *vif)
-{
- int ret;
-
- lockdep_assert_held(&priv->mutex);
-
- if (WARN_ON(!priv->cfg->ops->utils->request_scan))
- return -EOPNOTSUPP;
-
- cancel_delayed_work(&priv->scan_check);
-
- if (!iwl_legacy_is_ready_rf(priv)) {
- IWL_WARN(priv, "Request scan called when driver not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCAN_HW, &priv->status)) {
- IWL_DEBUG_SCAN(priv,
- "Multiple concurrent scan requests in parallel.\n");
- return -EBUSY;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending.\n");
- return -EBUSY;
- }
-
- IWL_DEBUG_SCAN(priv, "Starting scan...\n");
-
- set_bit(STATUS_SCANNING, &priv->status);
- priv->scan_start = jiffies;
-
- ret = priv->cfg->ops->utils->request_scan(priv, vif);
- if (ret) {
- clear_bit(STATUS_SCANNING, &priv->status);
- return ret;
- }
-
- queue_delayed_work(priv->workqueue, &priv->scan_check,
- IWL_SCAN_CHECK_WATCHDOG);
-
- return 0;
-}
-
-int iwl_legacy_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (req->n_channels == 0)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- ret = -EAGAIN;
- goto out_unlock;
- }
-
- /* mac80211 will only ask for one band at a time */
- priv->scan_request = req;
- priv->scan_vif = vif;
- priv->scan_band = req->channels[0]->band;
-
- ret = iwl_legacy_scan_initiate(priv, vif);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-out_unlock:
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_hw_scan);
-
-static void iwl_legacy_bg_scan_check(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, scan_check.work);
-
- IWL_DEBUG_SCAN(priv, "Scan check work\n");
-
- /* Since we are here firmware does not finish scan and
- * most likely is in bad shape, so we don't bother to
- * send abort command, just force scan complete to mac80211 */
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/**
- * iwl_legacy_fill_probe_req - fill in all required fields and IE for probe request
- */
-
-u16
-iwl_legacy_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
- const u8 *ta, const u8 *ies, int ie_len, int left)
-{
- int len = 0;
- u8 *pos = NULL;
-
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= 24;
- if (left < 0)
- return 0;
-
- frame->frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
- memcpy(frame->da, iwlegacy_bcast_addr, ETH_ALEN);
- memcpy(frame->sa, ta, ETH_ALEN);
- memcpy(frame->bssid, iwlegacy_bcast_addr, ETH_ALEN);
- frame->seq_ctrl = 0;
-
- len += 24;
-
- /* ...next IE... */
- pos = &frame->u.probe_req.variable[0];
-
- /* fill in our indirect SSID IE */
- left -= 2;
- if (left < 0)
- return 0;
- *pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
-
- if (WARN_ON(left < ie_len))
- return len;
-
- if (ies && ie_len) {
- memcpy(pos, ies, ie_len);
- len += ie_len;
- }
-
- return (u16)len;
-}
-EXPORT_SYMBOL(iwl_legacy_fill_probe_req);
-
-static void iwl_legacy_bg_abort_scan(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv, abort_scan);
-
- IWL_DEBUG_SCAN(priv, "Abort scan work\n");
-
- /* We keep scan_check work queued in case when firmware will not
- * report back scan completed notification */
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 200);
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl_legacy_bg_scan_completed(struct work_struct *work)
-{
- struct iwl_priv *priv =
- container_of(work, struct iwl_priv, scan_completed);
- bool aborted;
-
- IWL_DEBUG_SCAN(priv, "Completed scan.\n");
-
- cancel_delayed_work(&priv->scan_check);
-
- mutex_lock(&priv->mutex);
-
- aborted = test_and_clear_bit(STATUS_SCAN_ABORTING, &priv->status);
- if (aborted)
- IWL_DEBUG_SCAN(priv, "Aborted scan completed.\n");
-
- if (!test_and_clear_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already completed.\n");
- goto out_settings;
- }
-
- iwl_legacy_complete_scan(priv, aborted);
-
-out_settings:
- /* Can we still talk to firmware ? */
- if (!iwl_legacy_is_ready_rf(priv))
- goto out;
-
- /*
- * We do not commit power settings while scan is pending,
- * do it now if the settings changed.
- */
- iwl_legacy_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
- iwl_legacy_set_tx_power(priv, priv->tx_power_next, false);
-
- priv->cfg->ops->utils->post_scan(priv);
-
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl_legacy_setup_scan_deferred_work(struct iwl_priv *priv)
-{
- INIT_WORK(&priv->scan_completed, iwl_legacy_bg_scan_completed);
- INIT_WORK(&priv->abort_scan, iwl_legacy_bg_abort_scan);
- INIT_DELAYED_WORK(&priv->scan_check, iwl_legacy_bg_scan_check);
-}
-EXPORT_SYMBOL(iwl_legacy_setup_scan_deferred_work);
-
-void iwl_legacy_cancel_scan_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->abort_scan);
- cancel_work_sync(&priv->scan_completed);
-
- if (cancel_delayed_work_sync(&priv->scan_check)) {
- mutex_lock(&priv->mutex);
- iwl_legacy_force_scan_end(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cancel_scan_deferred_work);
diff --git a/drivers/net/wireless/iwlegacy/iwl-spectrum.h b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
index 9f70a47..85fe48e 100644
--- a/drivers/net/wireless/iwlegacy/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlegacy/iwl-spectrum.h
@@ -26,8 +26,8 @@
*
*****************************************************************************/
-#ifndef __iwl_legacy_spectrum_h__
-#define __iwl_legacy_spectrum_h__
+#ifndef __il_spectrum_h__
+#define __il_spectrum_h__
enum { /* ieee80211_basic_report.map */
IEEE80211_BASIC_MAP_BSS = (1 << 0),
IEEE80211_BASIC_MAP_OFDM = (1 << 1),
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.c b/drivers/net/wireless/iwlegacy/iwl-sta.c
deleted file mode 100644
index f10df3e..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.c
+++ /dev/null
@@ -1,817 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <net/mac80211.h>
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/lockdep.h>
-#include <linux/export.h>
-
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-
-/* priv->sta_lock must be held */
-static void iwl_legacy_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
-{
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv,
- "ACTIVATE a non DRIVER active station id %u addr %pM\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_ASSOC(priv,
- "STA id %u addr %pM already present"
- " in uCode (according to driver)\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- } else {
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
- sta_id, priv->stations[sta_id].sta.sta.addr);
- }
-}
-
-static int iwl_legacy_process_add_sta_resp(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *addsta,
- struct iwl_rx_packet *pkt,
- bool sync)
-{
- u8 sta_id = addsta->sta.sta_id;
- unsigned long flags;
- int ret = -EIO;
-
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- return ret;
- }
-
- IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
- sta_id);
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- iwl_legacy_sta_ucode_activate(priv, sta_id);
- ret = 0;
- break;
- case ADD_STA_NO_ROOM_IN_TABLE:
- IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
- sta_id);
- break;
- case ADD_STA_NO_BLOCK_ACK_RESOURCE:
- IWL_ERR(priv,
- "Adding station %d failed, no block ack resource.\n",
- sta_id);
- break;
- case ADD_STA_MODIFY_NON_EXIST_STA:
- IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
- sta_id);
- break;
- default:
- IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
- break;
- }
-
- IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- sta_id, priv->stations[sta_id].sta.sta.addr);
-
- /*
- * XXX: The MAC address in the command buffer is often changed from
- * the original sent to the device. That is, the MAC address
- * written to the command buffer often is not the same MAC address
- * read from the command buffer when the command returns. This
- * issue has not yet been resolved and this debugging is left to
- * observe the problem.
- */
- IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
- priv->stations[sta_id].sta.mode ==
- STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
- addsta->sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static void iwl_legacy_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
-{
- struct iwl_legacy_addsta_cmd *addsta =
- (struct iwl_legacy_addsta_cmd *)cmd->cmd.payload;
-
- iwl_legacy_process_add_sta_resp(priv, addsta, pkt, false);
-
-}
-
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags)
-{
- struct iwl_rx_packet *pkt = NULL;
- int ret = 0;
- u8 data[sizeof(*sta)];
- struct iwl_host_cmd cmd = {
- .id = REPLY_ADD_STA,
- .flags = flags,
- .data = data,
- };
- u8 sta_id __maybe_unused = sta->sta.sta_id;
-
- IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
- sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
-
- if (flags & CMD_ASYNC)
- cmd.callback = iwl_legacy_add_sta_callback;
- else {
- cmd.flags |= CMD_WANT_SKB;
- might_sleep();
- }
-
- cmd.len = priv->cfg->ops->utils->build_addsta_hcmd(sta, data);
- ret = iwl_legacy_send_cmd(priv, &cmd);
-
- if (ret || (flags & CMD_ASYNC))
- return ret;
-
- if (ret == 0) {
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- ret = iwl_legacy_process_add_sta_resp(priv, sta, pkt, true);
- }
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_add_sta);
-
-static void iwl_legacy_set_ht_add_station(struct iwl_priv *priv, u8 index,
- struct ieee80211_sta *sta,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_sta_ht_cap *sta_ht_inf = &sta->ht_cap;
- __le32 sta_flags;
- u8 mimo_ps_mode;
-
- if (!sta || !sta_ht_inf->ht_supported)
- goto done;
-
- mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
- IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
- "static" :
- (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
- "dynamic" : "disabled");
-
- sta_flags = priv->stations[index].sta.station_flags;
-
- sta_flags &= ~(STA_FLG_RTS_MIMO_PROT_MSK | STA_FLG_MIMO_DIS_MSK);
-
- switch (mimo_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- sta_flags |= STA_FLG_MIMO_DIS_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- sta_flags |= STA_FLG_RTS_MIMO_PROT_MSK;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- default:
- IWL_WARN(priv, "Invalid MIMO PS mode %d\n", mimo_ps_mode);
- break;
- }
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_factor << STA_FLG_MAX_AGG_SIZE_POS);
-
- sta_flags |= cpu_to_le32(
- (u32)sta_ht_inf->ampdu_density << STA_FLG_AGG_MPDU_DENSITY_POS);
-
- if (iwl_legacy_is_ht40_tx_allowed(priv, ctx, &sta->ht_cap))
- sta_flags |= STA_FLG_HT40_EN_MSK;
- else
- sta_flags &= ~STA_FLG_HT40_EN_MSK;
-
- priv->stations[index].sta.station_flags = sta_flags;
- done:
- return;
-}
-
-/**
- * iwl_legacy_prep_station - Prepare station information for addition
- *
- * should be called with sta_lock held
- */
-u8 iwl_legacy_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap, struct ieee80211_sta *sta)
-{
- struct iwl_station_entry *station;
- int i;
- u8 sta_id = IWL_INVALID_STATION;
- u16 rate;
-
- if (is_ap)
- sta_id = ctx->ap_sta_id;
- else if (is_broadcast_ether_addr(addr))
- sta_id = ctx->bcast_sta_id;
- else
- for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) {
- if (!compare_ether_addr(priv->stations[i].sta.sta.addr,
- addr)) {
- sta_id = i;
- break;
- }
-
- if (!priv->stations[i].used &&
- sta_id == IWL_INVALID_STATION)
- sta_id = i;
- }
-
- /*
- * These two conditions have the same outcome, but keep them
- * separate
- */
- if (unlikely(sta_id == IWL_INVALID_STATION))
- return sta_id;
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
- "STA %d already in process of being added.\n",
- sta_id);
- return sta_id;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
- !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
- IWL_DEBUG_ASSOC(priv,
- "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- return sta_id;
- }
-
- station = &priv->stations[sta_id];
- station->used = IWL_STA_DRIVER_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
- sta_id, addr);
- priv->num_stations++;
-
- /* Set up the REPLY_ADD_STA command to send to device */
- memset(&station->sta, 0, sizeof(struct iwl_legacy_addsta_cmd));
- memcpy(station->sta.sta.addr, addr, ETH_ALEN);
- station->sta.mode = 0;
- station->sta.sta.sta_id = sta_id;
- station->sta.station_flags = ctx->station_flags;
- station->ctxid = ctx->ctxid;
-
- if (sta) {
- struct iwl_station_priv_common *sta_priv;
-
- sta_priv = (void *)sta->drv_priv;
- sta_priv->ctx = ctx;
- }
-
- /*
- * OK to call unconditionally, since local stations (IBSS BSSID
- * STA and broadcast STA) pass in a NULL sta, and mac80211
- * doesn't allow HT IBSS.
- */
- iwl_legacy_set_ht_add_station(priv, sta_id, sta, ctx);
-
- /* 3945 only */
- rate = (priv->band == IEEE80211_BAND_5GHZ) ?
- IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP;
- /* Turn on both antennas for the station... */
- station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
-
- return sta_id;
-
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_prep_station);
-
-#define STA_WAIT_TIMEOUT (HZ/2)
-
-/**
- * iwl_legacy_add_station_common -
- */
-int
-iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r)
-{
- unsigned long flags_spin;
- int ret = 0;
- u8 sta_id;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- *sta_id_r = 0;
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- sta_id = iwl_legacy_prep_station(priv, ctx, addr, is_ap, sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
- addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
-
- /*
- * uCode is not able to deal with multiple requests to add a
- * station. Keep track if one is in progress so that we do not send
- * another.
- */
- if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
- IWL_DEBUG_INFO(priv,
- "STA %d already in process of being added.\n",
- sta_id);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
- (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv,
- "STA %d (%pM) already added, not adding again.\n",
- sta_id, addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EEXIST;
- }
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- /* Add station to device's station table */
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[sta_id].sta.sta.addr);
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- *sta_id_r = sta_id;
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_add_station_common);
-
-/**
- * iwl_legacy_sta_ucode_deactivate - deactivate ucode status for a station
- *
- * priv->sta_lock must be held
- */
-static void iwl_legacy_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
-{
- /* Ucode must be active and driver must be non active */
- if ((priv->stations[sta_id].used &
- (IWL_STA_UCODE_ACTIVE | IWL_STA_DRIVER_ACTIVE)) !=
- IWL_STA_UCODE_ACTIVE)
- IWL_ERR(priv, "removed non active STA %u\n", sta_id);
-
- priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
-
- memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
- IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
-}
-
-static int iwl_legacy_send_remove_station(struct iwl_priv *priv,
- const u8 *addr, int sta_id,
- bool temporary)
-{
- struct iwl_rx_packet *pkt;
- int ret;
-
- unsigned long flags_spin;
- struct iwl_rem_sta_cmd rm_sta_cmd;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_REMOVE_STA,
- .len = sizeof(struct iwl_rem_sta_cmd),
- .flags = CMD_SYNC,
- .data = &rm_sta_cmd,
- };
-
- memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
- rm_sta_cmd.num_sta = 1;
- memcpy(&rm_sta_cmd.addr, addr, ETH_ALEN);
-
- cmd.flags |= CMD_WANT_SKB;
-
- ret = iwl_legacy_send_cmd(priv, &cmd);
-
- if (ret)
- return ret;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
- if (!ret) {
- switch (pkt->u.rem_sta.status) {
- case REM_STA_SUCCESS_MSK:
- if (!temporary) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- iwl_legacy_sta_ucode_deactivate(priv, sta_id);
- spin_unlock_irqrestore(&priv->sta_lock,
- flags_spin);
- }
- IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
- break;
- }
- }
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return ret;
-}
-
-/**
- * iwl_legacy_remove_station - Remove driver's knowledge of station.
- */
-int iwl_legacy_remove_station(struct iwl_priv *priv, const u8 sta_id,
- const u8 *addr)
-{
- unsigned long flags;
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Unable to remove station %pM, device not ready.\n",
- addr);
- /*
- * It is typical for stations to be removed when we are
- * going down. Return success since device will be down
- * soon anyway
- */
- return 0;
- }
-
- IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
- sta_id, addr);
-
- if (WARN_ON(sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
-
- if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
- addr);
- goto out_err;
- }
-
- if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
- addr);
- goto out_err;
- }
-
- if (priv->stations[sta_id].used & IWL_STA_LOCAL) {
- kfree(priv->stations[sta_id].lq);
- priv->stations[sta_id].lq = NULL;
- }
-
- priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
-
- priv->num_stations--;
-
- BUG_ON(priv->num_stations < 0);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return iwl_legacy_send_remove_station(priv, addr, sta_id, false);
-out_err:
- spin_unlock_irqrestore(&priv->sta_lock, flags);
- return -EINVAL;
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_remove_station);
-
-/**
- * iwl_legacy_clear_ucode_stations - clear ucode station table bits
- *
- * This function clears all the bits in the driver indicating
- * which stations are active in the ucode. Call when something
- * other than explicit station management would cause this in
- * the ucode, e.g. unassociated RXON.
- */
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- int i;
- unsigned long flags_spin;
- bool cleared = false;
-
- IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver\n");
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx && ctx->ctxid != priv->stations[i].ctxid)
- continue;
-
- if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
- IWL_DEBUG_INFO(priv,
- "Clearing ucode active for station %d\n", i);
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- cleared = true;
- }
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- if (!cleared)
- IWL_DEBUG_INFO(priv,
- "No active stations found to be cleared\n");
-}
-EXPORT_SYMBOL(iwl_legacy_clear_ucode_stations);
-
-/**
- * iwl_legacy_restore_stations() - Restore driver known stations to device
- *
- * All stations considered active by driver, but not present in ucode, is
- * restored.
- *
- * Function sleeps.
- */
-void
-iwl_legacy_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- struct iwl_legacy_addsta_cmd sta_cmd;
- struct iwl_link_quality_cmd lq;
- unsigned long flags_spin;
- int i;
- bool found = false;
- int ret;
- bool send_lq;
-
- if (!iwl_legacy_is_ready(priv)) {
- IWL_DEBUG_INFO(priv,
- "Not ready yet, not restoring any stations.\n");
- return;
- }
-
- IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (ctx->ctxid != priv->stations[i].ctxid)
- continue;
- if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
- !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
- IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].sta.mode = 0;
- priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
- found = true;
- }
- }
-
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
- memcpy(&sta_cmd, &priv->stations[i].sta,
- sizeof(struct iwl_legacy_addsta_cmd));
- send_lq = false;
- if (priv->stations[i].lq) {
- memcpy(&lq, priv->stations[i].lq,
- sizeof(struct iwl_link_quality_cmd));
- send_lq = true;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- ret = iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
- if (ret) {
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- IWL_ERR(priv, "Adding station %pM failed.\n",
- priv->stations[i].sta.sta.addr);
- priv->stations[i].used &=
- ~IWL_STA_DRIVER_ACTIVE;
- priv->stations[i].used &=
- ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock,
- flags_spin);
- }
- /*
- * Rate scaling has already been initialized, send
- * current LQ command
- */
- if (send_lq)
- iwl_legacy_send_lq_cmd(priv, ctx, &lq,
- CMD_SYNC, true);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
- }
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- if (!found)
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
- " .... no stations to be restored.\n");
- else
- IWL_DEBUG_INFO(priv, "Restoring all known stations"
- " .... complete.\n");
-}
-EXPORT_SYMBOL(iwl_legacy_restore_stations);
-
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv)
-{
- int i;
-
- for (i = 0; i < priv->sta_key_max_num; i++)
- if (!test_and_set_bit(i, &priv->ucode_key_table))
- return i;
-
- return WEP_INVALID_OFFSET;
-}
-EXPORT_SYMBOL(iwl_legacy_get_free_ucode_key_index);
-
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- for (i = 0; i < priv->hw_params.max_stations; i++) {
- if (!(priv->stations[i].used & IWL_STA_BCAST))
- continue;
-
- priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
- priv->num_stations--;
- BUG_ON(priv->num_stations < 0);
- kfree(priv->stations[i].lq);
- priv->stations[i].lq = NULL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-EXPORT_SYMBOL_GPL(iwl_legacy_dealloc_bcast_stations);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-static void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
- IWL_DEBUG_RATE(priv, "lq station id 0x%x\n", lq->sta_id);
- IWL_DEBUG_RATE(priv, "lq ant 0x%X 0x%X\n",
- lq->general_params.single_stream_ant_msk,
- lq->general_params.dual_stream_ant_msk);
-
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++)
- IWL_DEBUG_RATE(priv, "lq index %d 0x%X\n",
- i, lq->rs_table[i].rate_n_flags);
-}
-#else
-static inline void iwl_legacy_dump_lq_cmd(struct iwl_priv *priv,
- struct iwl_link_quality_cmd *lq)
-{
-}
-#endif
-
-/**
- * iwl_legacy_is_lq_table_valid() - Test one aspect of LQ cmd for validity
- *
- * It sometimes happens when a HT rate has been in use and we
- * loose connectivity with AP then mac80211 will first tell us that the
- * current channel is not HT anymore before removing the station. In such a
- * scenario the RXON flags will be updated to indicate we are not
- * communicating HT anymore, but the LQ command may still contain HT rates.
- * Test for this to prevent driver from sending LQ command between the time
- * RXON flags are updated and when LQ command is updated.
- */
-static bool iwl_legacy_is_lq_table_valid(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq)
-{
- int i;
-
- if (ctx->ht.enabled)
- return true;
-
- IWL_DEBUG_INFO(priv, "Channel %u is not an HT channel\n",
- ctx->active.channel);
- for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
- if (le32_to_cpu(lq->rs_table[i].rate_n_flags) &
- RATE_MCS_HT_MSK) {
- IWL_DEBUG_INFO(priv,
- "index %d of LQ expects HT channel\n",
- i);
- return false;
- }
- }
- return true;
-}
-
-/**
- * iwl_legacy_send_lq_cmd() - Send link quality command
- * @init: This command is sent as part of station initialization right
- * after station has been added.
- *
- * The link quality command is sent as the last step of station creation.
- * This is the special case in which init is set and we call a callback in
- * this case to clear the state indicating that station creation is in
- * progress.
- */
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq, u8 flags, bool init)
-{
- int ret = 0;
- unsigned long flags_spin;
-
- struct iwl_host_cmd cmd = {
- .id = REPLY_TX_LINK_QUALITY_CMD,
- .len = sizeof(struct iwl_link_quality_cmd),
- .flags = flags,
- .data = lq,
- };
-
- if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
- return -EINVAL;
-
-
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- return -EINVAL;
- }
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
-
- iwl_legacy_dump_lq_cmd(priv, lq);
- BUG_ON(init && (cmd.flags & CMD_ASYNC));
-
- if (iwl_legacy_is_lq_table_valid(priv, ctx, lq))
- ret = iwl_legacy_send_cmd(priv, &cmd);
- else
- ret = -EINVAL;
-
- if (cmd.flags & CMD_ASYNC)
- return ret;
-
- if (init) {
- IWL_DEBUG_INFO(priv, "init LQ command complete,"
- " clearing sta addition status for sta %d\n",
- lq->sta_id);
- spin_lock_irqsave(&priv->sta_lock, flags_spin);
- priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
- spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
- }
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_send_lq_cmd);
-
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv_common *sta_common = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_legacy_remove_station(priv, sta_common->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->mutex);
- return ret;
-}
-EXPORT_SYMBOL(iwl_legacy_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlegacy/iwl-sta.h b/drivers/net/wireless/iwlegacy/iwl-sta.h
deleted file mode 100644
index 67bd75f..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-sta.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-#ifndef __iwl_legacy_sta_h__
-#define __iwl_legacy_sta_h__
-
-#include "iwl-dev.h"
-
-#define HW_KEY_DYNAMIC 0
-#define HW_KEY_DEFAULT 1
-
-#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
-#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
-#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
- being activated */
-#define IWL_STA_LOCAL BIT(3) /* station state not directed by mac80211;
- (this is for the IBSS BSSID stations) */
-#define IWL_STA_BCAST BIT(4) /* this station is the special bcast station */
-
-
-void iwl_legacy_restore_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_clear_ucode_stations(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx);
-void iwl_legacy_dealloc_bcast_stations(struct iwl_priv *priv);
-int iwl_legacy_get_free_ucode_key_index(struct iwl_priv *priv);
-int iwl_legacy_send_add_sta(struct iwl_priv *priv,
- struct iwl_legacy_addsta_cmd *sta, u8 flags);
-int iwl_legacy_add_station_common(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta, u8 *sta_id_r);
-int iwl_legacy_remove_station(struct iwl_priv *priv,
- const u8 sta_id,
- const u8 *addr);
-int iwl_legacy_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
-u8 iwl_legacy_prep_station(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- const u8 *addr, bool is_ap,
- struct ieee80211_sta *sta);
-
-int iwl_legacy_send_lq_cmd(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx,
- struct iwl_link_quality_cmd *lq,
- u8 flags, bool init);
-
-/**
- * iwl_legacy_clear_driver_stations - clear knowledge of all stations from driver
- * @priv: iwl priv struct
- *
- * This is called during iwl_down() to make sure that in the case
- * we're coming there from a hardware restart mac80211 will be
- * able to reconfigure stations -- if we're getting there in the
- * normal down flow then the stations will already be cleared.
- */
-static inline void iwl_legacy_clear_driver_stations(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rxon_context *ctx;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(priv->stations, 0, sizeof(priv->stations));
- priv->num_stations = 0;
-
- priv->ucode_key_table = 0;
-
- for_each_context(priv, ctx) {
- /*
- * Remove all key information that is not stored as part
- * of station information since mac80211 may not have had
- * a chance to remove all the keys. When device is
- * reconfigured by mac80211 after an error all keys will
- * be reconfigured.
- */
- memset(ctx->wep_keys, 0, sizeof(ctx->wep_keys));
- ctx->key_mapping_keys = 0;
- }
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-}
-
-static inline int iwl_legacy_sta_id(struct ieee80211_sta *sta)
-{
- if (WARN_ON(!sta))
- return IWL_INVALID_STATION;
-
- return ((struct iwl_station_priv_common *)sta->drv_priv)->sta_id;
-}
-
-/**
- * iwl_legacy_sta_id_or_broadcast - return sta_id or broadcast sta
- * @priv: iwl priv
- * @context: the current context
- * @sta: mac80211 station
- *
- * In certain circumstances mac80211 passes a station pointer
- * that may be %NULL, for example during TX or key setup. In
- * that case, we need to use the broadcast station, so this
- * inline wraps that pattern.
- */
-static inline int iwl_legacy_sta_id_or_broadcast(struct iwl_priv *priv,
- struct iwl_rxon_context *context,
- struct ieee80211_sta *sta)
-{
- int sta_id;
-
- if (!sta)
- return context->bcast_sta_id;
-
- sta_id = iwl_legacy_sta_id(sta);
-
- /*
- * mac80211 should not be passing a partially
- * initialised station!
- */
- WARN_ON(sta_id == IWL_INVALID_STATION);
-
- return sta_id;
-}
-#endif /* __iwl_legacy_sta_h__ */
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c
deleted file mode 100644
index c0dfb1a..0000000
--- a/drivers/net/wireless/iwlegacy/iwl-tx.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/etherdevice.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/export.h>
-#include <net/mac80211.h>
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-sta.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-
-/**
- * iwl_legacy_txq_update_write_ptr - Send new write index to hardware
- */
-void
-iwl_legacy_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- u32 reg = 0;
- int txq_id = txq->q.id;
-
- if (txq->need_update == 0)
- return;
-
- /* if we're trying to save power */
- if (test_bit(STATUS_POWER_PMI, &priv->status)) {
- /* wake up nic if it's powered down ...
- * uCode will wake up, and interrupt us again, so next
- * time we'll skip this part. */
- reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
- IWL_DEBUG_INFO(priv,
- "Tx queue %d requesting wakeup,"
- " GP1 = 0x%x\n", txq_id, reg);
- iwl_legacy_set_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return;
- }
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
-
- /*
- * else not in power-save mode,
- * uCode will never sleep when we're
- * trying to tx (during RFKILL, we're not trying to tx).
- */
- } else
- iwl_write32(priv, HBUS_TARG_WRPTR,
- txq->q.write_ptr | (txq_id << 8));
- txq->need_update = 0;
-}
-EXPORT_SYMBOL(iwl_legacy_txq_update_write_ptr);
-
-/**
- * iwl_legacy_tx_queue_unmap - Unmap any remaining DMA mappings and free skb's
- */
-void iwl_legacy_tx_queue_unmap(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
-
- if (q->n_bd == 0)
- return;
-
- while (q->write_ptr != q->read_ptr) {
- priv->cfg->ops->lib->txq_free_tfd(priv, txq);
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_unmap);
-
-/**
- * iwl_legacy_tx_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_tx_queue_free(struct iwl_priv *priv, int txq_id)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_tx_queue_unmap(priv, txq_id);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
-
- /* De-alloc array of per-TFD driver data */
- kfree(txq->txb);
- txq->txb = NULL;
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_free);
-
-/**
- * iwl_cmd_queue_unmap - Unmap any remaining DMA mappings from command queue
- */
-void iwl_legacy_cmd_queue_unmap(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- int i;
-
- if (q->n_bd == 0)
- return;
-
- while (q->read_ptr != q->write_ptr) {
- i = iwl_legacy_get_cmd_index(q, q->read_ptr, 0);
-
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd);
- }
-
- i = q->n_window;
- if (txq->meta[i].flags & CMD_MAPPED) {
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(&txq->meta[i], mapping),
- dma_unmap_len(&txq->meta[i], len),
- PCI_DMA_BIDIRECTIONAL);
- txq->meta[i].flags = 0;
- }
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_unmap);
-
-/**
- * iwl_legacy_cmd_queue_free - Deallocate DMA queue.
- * @txq: Transmit queue to deallocate.
- *
- * Empty queue by removing and destroying all BD's.
- * Free all buffers.
- * 0-fill, but do not free "txq" descriptor structure.
- */
-void iwl_legacy_cmd_queue_free(struct iwl_priv *priv)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct device *dev = &priv->pci_dev->dev;
- int i;
-
- iwl_legacy_cmd_queue_unmap(priv);
-
- /* De-alloc array of command/tx buffers */
- for (i = 0; i <= TFD_CMD_SLOTS; i++)
- kfree(txq->cmd[i]);
-
- /* De-alloc circular buffer of TFDs */
- if (txq->q.n_bd)
- dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
- txq->tfds, txq->q.dma_addr);
-
- /* deallocate arrays */
- kfree(txq->cmd);
- kfree(txq->meta);
- txq->cmd = NULL;
- txq->meta = NULL;
-
- /* 0-fill queue descriptor structure */
- memset(txq, 0, sizeof(*txq));
-}
-EXPORT_SYMBOL(iwl_legacy_cmd_queue_free);
-
-/*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
- * DMA services
- *
- * Theory of operation
- *
- * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
- * of buffer descriptors, each of which points to one or more data buffers for
- * the device to read from or fill. Driver and device exchange status of each
- * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
- * entries in each circular buffer, to protect against confusing empty and full
- * queue states.
- *
- * The device reads or writes the data in the queues via the device's several
- * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
- *
- * For Tx queue, there are low mark and high mark limits. If, after queuing
- * the packet for Tx, free space become < low mark, Tx queue stopped. When
- * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
- * Tx queue resumed.
- *
- * See more detailed info in iwl-4965-hw.h.
- ***************************************************/
-
-int iwl_legacy_queue_space(const struct iwl_queue *q)
-{
- int s = q->read_ptr - q->write_ptr;
-
- if (q->read_ptr > q->write_ptr)
- s -= q->n_bd;
-
- if (s <= 0)
- s += q->n_window;
- /* keep some reserve to not confuse empty and full situations */
- s -= 2;
- if (s < 0)
- s = 0;
- return s;
-}
-EXPORT_SYMBOL(iwl_legacy_queue_space);
-
-
-/**
- * iwl_legacy_queue_init - Initialize queue's high/low-water and read/write indexes
- */
-static int iwl_legacy_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
- int count, int slots_num, u32 id)
-{
- q->n_bd = count;
- q->n_window = slots_num;
- q->id = id;
-
- /* count must be power-of-two size, otherwise iwl_legacy_queue_inc_wrap
- * and iwl_legacy_queue_dec_wrap are broken. */
- BUG_ON(!is_power_of_2(count));
-
- /* slots_num must be power-of-two size, otherwise
- * iwl_legacy_get_cmd_index is broken. */
- BUG_ON(!is_power_of_2(slots_num));
-
- q->low_mark = q->n_window / 4;
- if (q->low_mark < 4)
- q->low_mark = 4;
-
- q->high_mark = q->n_window / 8;
- if (q->high_mark < 2)
- q->high_mark = 2;
-
- q->write_ptr = q->read_ptr = 0;
-
- return 0;
-}
-
-/**
- * iwl_legacy_tx_queue_alloc - Alloc driver data and TFD CB for one Tx/cmd queue
- */
-static int iwl_legacy_tx_queue_alloc(struct iwl_priv *priv,
- struct iwl_tx_queue *txq, u32 id)
-{
- struct device *dev = &priv->pci_dev->dev;
- size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
-
- /* Driver private data, only for Tx (not command) queues,
- * not shared with device. */
- if (id != priv->cmd_queue) {
- txq->txb = kzalloc(sizeof(txq->txb[0]) *
- TFD_QUEUE_SIZE_MAX, GFP_KERNEL);
- if (!txq->txb) {
- IWL_ERR(priv, "kmalloc for auxiliary BD "
- "structures failed\n");
- goto error;
- }
- } else {
- txq->txb = NULL;
- }
-
- /* Circular buffer of transmit frame descriptors (TFDs),
- * shared with device */
- txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
- GFP_KERNEL);
- if (!txq->tfds) {
- IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
- goto error;
- }
- txq->q.id = id;
-
- return 0;
-
- error:
- kfree(txq->txb);
- txq->txb = NULL;
-
- return -ENOMEM;
-}
-
-/**
- * iwl_legacy_tx_queue_init - Allocate and initialize one tx/cmd queue
- */
-int iwl_legacy_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int i, len;
- int ret;
- int actual_slots = slots_num;
-
- /*
- * Alloc buffer array for commands (Tx or other types of commands).
- * For the command queue (#4/#9), allocate command space + one big
- * command for scan, since scan command is very huge; the system will
- * not have two scans at the same time, so only one is needed.
- * For normal Tx queues (all other queues), no super-size command
- * space is needed.
- */
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- txq->meta = kzalloc(sizeof(struct iwl_cmd_meta) * actual_slots,
- GFP_KERNEL);
- txq->cmd = kzalloc(sizeof(struct iwl_device_cmd *) * actual_slots,
- GFP_KERNEL);
-
- if (!txq->meta || !txq->cmd)
- goto out_free_arrays;
-
- len = sizeof(struct iwl_device_cmd);
- for (i = 0; i < actual_slots; i++) {
- /* only happens for cmd queue */
- if (i == slots_num)
- len = IWL_MAX_CMD_SIZE;
-
- txq->cmd[i] = kmalloc(len, GFP_KERNEL);
- if (!txq->cmd[i])
- goto err;
- }
-
- /* Alloc driver data array and TFD circular buffer */
- ret = iwl_legacy_tx_queue_alloc(priv, txq, txq_id);
- if (ret)
- goto err;
-
- txq->need_update = 0;
-
- /*
- * For the default queues 0-3, set up the swq_id
- * already -- all others need to get one later
- * (if they need one at all).
- */
- if (txq_id < 4)
- iwl_legacy_set_swq_id(txq, txq_id, txq_id);
-
- /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
- * iwl_legacy_queue_inc_wrap and iwl_legacy_queue_dec_wrap are broken. */
- BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-
- return 0;
-err:
- for (i = 0; i < actual_slots; i++)
- kfree(txq->cmd[i]);
-out_free_arrays:
- kfree(txq->meta);
- kfree(txq->cmd);
-
- return -ENOMEM;
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_init);
-
-void iwl_legacy_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
- int slots_num, u32 txq_id)
-{
- int actual_slots = slots_num;
-
- if (txq_id == priv->cmd_queue)
- actual_slots++;
-
- memset(txq->meta, 0, sizeof(struct iwl_cmd_meta) * actual_slots);
-
- txq->need_update = 0;
-
- /* Initialize queue's high/low-water marks, and head/tail indexes */
- iwl_legacy_queue_init(priv, &txq->q,
- TFD_QUEUE_SIZE_MAX, slots_num, txq_id);
-
- /* Tell device where to find queue */
- priv->cfg->ops->lib->txq_init(priv, txq);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_queue_reset);
-
-/*************** HOST COMMAND QUEUE FUNCTIONS *****/
-
-/**
- * iwl_legacy_enqueue_hcmd - enqueue a uCode command
- * @priv: device private data point
- * @cmd: a point to the ucode command structure
- *
- * The function returns < 0 values to indicate the operation is
- * failed. On success, it turns the index (> 0) of command in the
- * command queue.
- */
-int iwl_legacy_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
-{
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- struct iwl_queue *q = &txq->q;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- unsigned long flags;
- int len;
- u32 idx;
- u16 fix_size;
-
- cmd->len = priv->cfg->ops->utils->get_hcmd_size(cmd->id, cmd->len);
- fix_size = (u16)(cmd->len + sizeof(out_cmd->hdr));
-
- /* If any of the command structures end up being larger than
- * the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
- * we will need to increase the size of the TFD entries
- * Also, check to see if command buffer should not exceed the size
- * of device_cmd and max_cmd_size. */
- BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
- !(cmd->flags & CMD_SIZE_HUGE));
- BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
-
- if (iwl_legacy_is_rfkill(priv) || iwl_legacy_is_ctkill(priv)) {
- IWL_WARN(priv, "Not sending command - %s KILL\n",
- iwl_legacy_is_rfkill(priv) ? "RF" : "CT");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- if (iwl_legacy_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-
- IWL_ERR(priv, "Restarting adapter due to command queue full\n");
- queue_work(priv->workqueue, &priv->restart);
- return -ENOSPC;
- }
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, cmd->flags & CMD_SIZE_HUGE);
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
-
- if (WARN_ON(out_meta->flags & CMD_MAPPED)) {
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return -ENOSPC;
- }
-
- memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */
- out_meta->flags = cmd->flags | CMD_MAPPED;
- if (cmd->flags & CMD_WANT_SKB)
- out_meta->source = cmd;
- if (cmd->flags & CMD_ASYNC)
- out_meta->callback = cmd->callback;
-
- out_cmd->hdr.cmd = cmd->id;
- memcpy(&out_cmd->cmd.payload, cmd->data, cmd->len);
-
- /* At this point, the out_cmd now has all of the incoming cmd
- * information */
-
- out_cmd->hdr.flags = 0;
- out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) |
- INDEX_TO_SEQ(q->write_ptr));
- if (cmd->flags & CMD_SIZE_HUGE)
- out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
- len = sizeof(struct iwl_device_cmd);
- if (idx == TFD_CMD_SLOTS)
- len = IWL_MAX_CMD_SIZE;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- switch (out_cmd->hdr.cmd) {
- case REPLY_TX_LINK_QUALITY_CMD:
- case SENSITIVITY_CMD:
- IWL_DEBUG_HC_DUMP(priv,
- "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- break;
- default:
- IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, "
- "%d bytes at %d[%d]:%d\n",
- iwl_legacy_get_cmd_string(out_cmd->hdr.cmd),
- out_cmd->hdr.cmd,
- le16_to_cpu(out_cmd->hdr.sequence), fix_size,
- q->write_ptr, idx, priv->cmd_queue);
- }
-#endif
- txq->need_update = 1;
-
- if (priv->cfg->ops->lib->txq_update_byte_cnt_tbl)
- /* Set up entry in queue's byte count circular buffer */
- priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq, 0);
-
- phys_addr = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- fix_size, PCI_DMA_BIDIRECTIONAL);
- dma_unmap_addr_set(out_meta, mapping, phys_addr);
- dma_unmap_len_set(out_meta, len, fix_size);
-
- trace_iwlwifi_legacy_dev_hcmd(priv, &out_cmd->hdr,
- fix_size, cmd->flags);
-
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, fix_size, 1,
- U32_PAD(cmd->len));
-
- /* Increment and update queue's write index */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return idx;
-}
-
-/**
- * iwl_legacy_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
- *
- * When FW advances 'R' index, all entries between old and new 'R' index
- * need to be reclaimed. As result, some free space forms. If there is
- * enough free space (> low mark), wake the stack that feeds us.
- */
-static void iwl_legacy_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
- int idx, int cmd_idx)
-{
- struct iwl_tx_queue *txq = &priv->txq[txq_id];
- struct iwl_queue *q = &txq->q;
- int nfreed = 0;
-
- if ((idx >= q->n_bd) || (iwl_legacy_queue_used(q, idx) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
- "is out of range [0-%d] %d %d.\n", txq_id,
- idx, q->n_bd, q->write_ptr, q->read_ptr);
- return;
- }
-
- for (idx = iwl_legacy_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
- q->read_ptr = iwl_legacy_queue_inc_wrap(q->read_ptr, q->n_bd)) {
-
- if (nfreed++ > 0) {
- IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx,
- q->write_ptr, q->read_ptr);
- queue_work(priv->workqueue, &priv->restart);
- }
-
- }
-}
-
-/**
- * iwl_legacy_tx_cmd_complete - Pull unused buffers off the queue and reclaim them
- * @rxb: Rx buffer to reclaim
- *
- * If an Rx buffer has an async callback associated with it the callback
- * will be executed. The attached skb (if present) will only be freed
- * if the callback returns 1
- */
-void
-iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u16 sequence = le16_to_cpu(pkt->hdr.sequence);
- int txq_id = SEQ_TO_QUEUE(sequence);
- int index = SEQ_TO_INDEX(sequence);
- int cmd_index;
- bool huge = !!(pkt->hdr.sequence & SEQ_HUGE_FRAME);
- struct iwl_device_cmd *cmd;
- struct iwl_cmd_meta *meta;
- struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue];
- unsigned long flags;
-
- /* If a Tx command is being handled and it isn't in the actual
- * command queue then there a command routing bug has been introduced
- * in the queue management code. */
- if (WARN(txq_id != priv->cmd_queue,
- "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
- txq_id, priv->cmd_queue, sequence,
- priv->txq[priv->cmd_queue].q.read_ptr,
- priv->txq[priv->cmd_queue].q.write_ptr)) {
- iwl_print_hex_error(priv, pkt, 32);
- return;
- }
-
- cmd_index = iwl_legacy_get_cmd_index(&txq->q, index, huge);
- cmd = txq->cmd[cmd_index];
- meta = &txq->meta[cmd_index];
-
- txq->time_stamp = jiffies;
-
- pci_unmap_single(priv->pci_dev,
- dma_unmap_addr(meta, mapping),
- dma_unmap_len(meta, len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Input error checking is done when commands are added to queue. */
- if (meta->flags & CMD_WANT_SKB) {
- meta->source->reply_page = (unsigned long)rxb_addr(rxb);
- rxb->page = NULL;
- } else if (meta->callback)
- meta->callback(priv, cmd, pkt);
-
- spin_lock_irqsave(&priv->hcmd_lock, flags);
-
- iwl_legacy_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
-
- if (!(meta->flags & CMD_ASYNC)) {
- clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
- IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
- iwl_legacy_get_cmd_string(cmd->hdr.cmd));
- wake_up(&priv->wait_command_queue);
- }
-
- /* Mark as unmapped */
- meta->flags = 0;
-
- spin_unlock_irqrestore(&priv->hcmd_lock, flags);
-}
-EXPORT_SYMBOL(iwl_legacy_tx_cmd_complete);
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c
deleted file mode 100644
index b282d86..0000000
--- a/drivers/net/wireless/iwlegacy/iwl3945-base.c
+++ /dev/null
@@ -1,4016 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/ieee80211_radiotap.h>
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl3945"
-
-#include "iwl-fh.h"
-#include "iwl-3945-fh.h"
-#include "iwl-commands.h"
-#include "iwl-sta.h"
-#include "iwl-3945.h"
-#include "iwl-core.h"
-#include "iwl-helpers.h"
-#include "iwl-dev.h"
-#include "iwl-spectrum.h"
-
-/*
- * module name, copyright, version, etc.
- */
-
-#define DRV_DESCRIPTION \
-"Intel(R) PRO/Wireless 3945ABG/BG Network Connection driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-/*
- * add "s" to indicate spectrum measurement included.
- * we add it here to be consistent with previous releases in which
- * this was configurable.
- */
-#define DRV_VERSION IWLWIFI_VERSION VD "s"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2011 Intel Corporation"
-#define DRV_AUTHOR "<ilw@linux.intel.com>"
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-
- /* module parameters */
-struct iwl_mod_params iwl3945_mod_params = {
- .sw_crypto = 1,
- .restart_fw = 1,
- .disable_hw_scan = 1,
- /* the rest are 0 by default */
-};
-
-/**
- * iwl3945_get_antenna_flags - Get antenna flags for RXON command
- * @priv: eeprom and antenna fields are used to determine antenna flags
- *
- * priv->eeprom39 is used to determine if antenna AUX/MAIN are reversed
- * iwl3945_mod_params.antenna specifies the antenna diversity mode:
- *
- * IWL_ANTENNA_DIVERSITY - NIC selects best antenna by itself
- * IWL_ANTENNA_MAIN - Force MAIN antenna
- * IWL_ANTENNA_AUX - Force AUX antenna
- */
-__le32 iwl3945_get_antenna_flags(const struct iwl_priv *priv)
-{
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- switch (iwl3945_mod_params.antenna) {
- case IWL_ANTENNA_DIVERSITY:
- return 0;
-
- case IWL_ANTENNA_MAIN:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
-
- case IWL_ANTENNA_AUX:
- if (eeprom->antenna_switch_type)
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_A_MSK;
- return RXON_FLG_DIS_DIV_MSK | RXON_FLG_ANT_B_MSK;
- }
-
- /* bad antenna selector value */
- IWL_ERR(priv, "Bad antenna selector value (0x%x)\n",
- iwl3945_mod_params.antenna);
-
- return 0; /* "diversity" is default if error */
-}
-
-static int iwl3945_set_ccmp_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- unsigned long flags;
- __le16 key_flags = 0;
- int ret;
-
- key_flags |= (STA_KEY_FLG_CCMP | STA_KEY_FLG_MAP_KEY_MSK);
- key_flags |= cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
-
- if (sta_id == priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id)
- key_flags |= STA_KEY_MULTICAST_MSK;
-
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- keyconf->hw_key_idx = keyconf->keyidx;
- key_flags &= ~STA_KEY_FLG_INVALID;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- priv->stations[sta_id].keyinfo.cipher = keyconf->cipher;
- priv->stations[sta_id].keyinfo.keylen = keyconf->keylen;
- memcpy(priv->stations[sta_id].keyinfo.key, keyconf->key,
- keyconf->keylen);
-
- memcpy(priv->stations[sta_id].sta.key.key, keyconf->key,
- keyconf->keylen);
-
- if ((priv->stations[sta_id].sta.key.key_flags & STA_KEY_FLG_ENCRYPT_MSK)
- == STA_KEY_FLG_NO_ENC)
- priv->stations[sta_id].sta.key.key_offset =
- iwl_legacy_get_free_ucode_key_index(priv);
- /* else, we are overriding an existing key => no need to allocated room
- * in uCode. */
-
- WARN(priv->stations[sta_id].sta.key.key_offset == WEP_INVALID_OFFSET,
- "no space for a new key");
-
- priv->stations[sta_id].sta.key.key_flags = key_flags;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
-
- IWL_DEBUG_INFO(priv, "hwcrypto: modify ucode station key info\n");
-
- ret = iwl_legacy_send_add_sta(priv,
- &priv->stations[sta_id].sta, CMD_ASYNC);
-
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return ret;
-}
-
-static int iwl3945_set_tkip_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_set_wep_dynamic_key_info(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf,
- u8 sta_id)
-{
- return -EOPNOTSUPP;
-}
-
-static int iwl3945_clear_sta_key_info(struct iwl_priv *priv, u8 sta_id)
-{
- unsigned long flags;
- struct iwl_legacy_addsta_cmd sta_cmd;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- memset(&priv->stations[sta_id].keyinfo, 0, sizeof(struct iwl_hw_key));
- memset(&priv->stations[sta_id].sta.key, 0,
- sizeof(struct iwl4965_keyinfo));
- priv->stations[sta_id].sta.key.key_flags = STA_KEY_FLG_NO_ENC;
- priv->stations[sta_id].sta.sta.modify_mask = STA_MODIFY_KEY_MASK;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(struct iwl_legacy_addsta_cmd));
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- IWL_DEBUG_INFO(priv, "hwcrypto: clear ucode station key info\n");
- return iwl_legacy_send_add_sta(priv, &sta_cmd, CMD_SYNC);
-}
-
-static int iwl3945_set_dynamic_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *keyconf, u8 sta_id)
-{
- int ret = 0;
-
- keyconf->hw_key_idx = HW_KEY_DYNAMIC;
-
- switch (keyconf->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- ret = iwl3945_set_ccmp_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_TKIP:
- ret = iwl3945_set_tkip_dynamic_key_info(priv, keyconf, sta_id);
- break;
- case WLAN_CIPHER_SUITE_WEP40:
- case WLAN_CIPHER_SUITE_WEP104:
- ret = iwl3945_set_wep_dynamic_key_info(priv, keyconf, sta_id);
- break;
- default:
- IWL_ERR(priv, "Unknown alg: %s alg=%x\n", __func__,
- keyconf->cipher);
- ret = -EINVAL;
- }
-
- IWL_DEBUG_WEP(priv, "Set dynamic key: alg=%x len=%d idx=%d sta=%d ret=%d\n",
- keyconf->cipher, keyconf->keylen, keyconf->keyidx,
- sta_id, ret);
-
- return ret;
-}
-
-static int iwl3945_remove_static_key(struct iwl_priv *priv)
-{
- int ret = -EOPNOTSUPP;
-
- return ret;
-}
-
-static int iwl3945_set_static_key(struct iwl_priv *priv,
- struct ieee80211_key_conf *key)
-{
- if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104)
- return -EOPNOTSUPP;
-
- IWL_ERR(priv, "Static key invalid: cipher %x\n", key->cipher);
- return -EINVAL;
-}
-
-static void iwl3945_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl3945_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl3945_frame *iwl3945_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl3945_frame, list);
-}
-
-static void iwl3945_free_frame(struct iwl_priv *priv, struct iwl3945_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
-
- if (!iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS) || !priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl3945_frame *frame;
- unsigned int frame_size;
- int rc;
- u8 rate;
-
- frame = iwl3945_get_free_frame(priv);
-
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- rate = iwl_legacy_get_lowest_plcp(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- frame_size = iwl3945_hw_get_beacon_cmd(priv, frame, rate);
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl3945_free_frame(priv, frame);
-
- return rc;
-}
-
-static void iwl3945_unset_hw_params(struct iwl_priv *priv)
-{
- if (priv->_3945.shared_virt)
- dma_free_coherent(&priv->pci_dev->dev,
- sizeof(struct iwl3945_shared),
- priv->_3945.shared_virt,
- priv->_3945.shared_phys);
-}
-
-static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
- struct ieee80211_tx_info *info,
- struct iwl_device_cmd *cmd,
- struct sk_buff *skb_frag,
- int sta_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
-
- tx_cmd->sec_ctl = 0;
-
- switch (keyinfo->cipher) {
- case WLAN_CIPHER_SUITE_CCMP:
- tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
- memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
- IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
- break;
-
- case WLAN_CIPHER_SUITE_TKIP:
- break;
-
- case WLAN_CIPHER_SUITE_WEP104:
- tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
- /* fall through */
- case WLAN_CIPHER_SUITE_WEP40:
- tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
- (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
-
- memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
-
- IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
- "with key %d\n", info->control.hw_key->hw_key_idx);
- break;
-
- default:
- IWL_ERR(priv, "Unknown encode cipher %x\n", keyinfo->cipher);
- break;
- }
-}
-
-/*
- * handle build REPLY_TX command notification.
- */
-static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct ieee80211_tx_info *info,
- struct ieee80211_hdr *hdr, u8 std_id)
-{
- struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
- __le32 tx_flags = tx_cmd->tx_flags;
- __le16 fc = hdr->frame_control;
-
- tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
- tx_flags |= TX_CMD_FLG_ACK_MSK;
- if (ieee80211_is_mgmt(fc))
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- if (ieee80211_is_probe_resp(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
- tx_flags |= TX_CMD_FLG_TSF_MSK;
- } else {
- tx_flags &= (~TX_CMD_FLG_ACK_MSK);
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- tx_cmd->sta_id = std_id;
- if (ieee80211_has_morefrags(fc))
- tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tx_cmd->tid_tspec = qc[0] & 0xf;
- tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
- } else {
- tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
- }
-
- iwl_legacy_tx_cmd_protection(priv, info, fc, &tx_flags);
-
- tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
- if (ieee80211_is_mgmt(fc)) {
- if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
- else
- tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
- } else {
- tx_cmd->timeout.pm_frame_timeout = 0;
- }
-
- tx_cmd->driver_txop = 0;
- tx_cmd->tx_flags = tx_flags;
- tx_cmd->next_frame_len = 0;
-}
-
-/*
- * start REPLY_TX command process
- */
-static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
-{
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
- struct iwl3945_tx_cmd *tx_cmd;
- struct iwl_tx_queue *txq = NULL;
- struct iwl_queue *q = NULL;
- struct iwl_device_cmd *out_cmd;
- struct iwl_cmd_meta *out_meta;
- dma_addr_t phys_addr;
- dma_addr_t txcmd_phys;
- int txq_id = skb_get_queue_mapping(skb);
- u16 len, idx, hdr_len;
- u8 id;
- u8 unicast;
- u8 sta_id;
- u8 tid = 0;
- __le16 fc;
- u8 wait_write_ptr = 0;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (iwl_legacy_is_rfkill(priv)) {
- IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
- goto drop_unlock;
- }
-
- if ((ieee80211_get_tx_rate(priv->hw, info)->hw_value & 0xFF) == IWL_INVALID_RATE) {
- IWL_ERR(priv, "ERROR: No TX rate available.\n");
- goto drop_unlock;
- }
-
- unicast = !is_multicast_ether_addr(hdr->addr1);
- id = 0;
-
- fc = hdr->frame_control;
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (ieee80211_is_auth(fc))
- IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
- else if (ieee80211_is_assoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
- else if (ieee80211_is_reassoc_req(fc))
- IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- hdr_len = ieee80211_hdrlen(fc);
-
- /* Find index into station table for destination station */
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS],
- info->control.sta);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
- hdr->addr1);
- goto drop;
- }
-
- IWL_DEBUG_RATE(priv, "station Id %d\n", sta_id);
-
- if (ieee80211_is_data_qos(fc)) {
- u8 *qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- if (unlikely(tid >= MAX_TID_COUNT))
- goto drop;
- }
-
- /* Descriptor for chosen Tx queue */
- txq = &priv->txq[txq_id];
- q = &txq->q;
-
- if ((iwl_legacy_queue_space(q) < q->high_mark))
- goto drop;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- idx = iwl_legacy_get_cmd_index(q, q->write_ptr, 0);
-
- /* Set up driver data for this TFD */
- memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
- txq->txb[q->write_ptr].skb = skb;
- txq->txb[q->write_ptr].ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /* Init first empty entry in queue's array of Tx/cmd buffers */
- out_cmd = txq->cmd[idx];
- out_meta = &txq->meta[idx];
- tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
- memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
- memset(tx_cmd, 0, sizeof(*tx_cmd));
-
- /*
- * Set up the Tx-command (not MAC!) header.
- * Store the chosen Tx queue and TFD index within the sequence field;
- * after Tx, uCode's Tx response will return this value so driver can
- * locate the frame within the tx queue and do post-tx processing.
- */
- out_cmd->hdr.cmd = REPLY_TX;
- out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
- INDEX_TO_SEQ(q->write_ptr)));
-
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
-
- if (info->control.hw_key)
- iwl3945_build_tx_cmd_hwcrypto(priv, info, out_cmd, skb, sta_id);
-
- /* TODO need this for burst mode later on */
- iwl3945_build_tx_cmd_basic(priv, out_cmd, info, hdr, sta_id);
-
- /* set is_hcca to 0; it probably will never be implemented */
- iwl3945_hw_build_tx_cmd_rate(priv, out_cmd, info, hdr, sta_id, 0);
-
- /* Total # bytes to be transmitted */
- len = (u16)skb->len;
- tx_cmd->len = cpu_to_le16(len);
-
- iwl_legacy_dbg_log_tx_data_frame(priv, len, hdr);
- iwl_legacy_update_stats(priv, true, fc, len);
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
- tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
-
- if (!ieee80211_has_morefrags(hdr->frame_control)) {
- txq->need_update = 1;
- } else {
- wait_write_ptr = 1;
- txq->need_update = 0;
- }
-
- IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
- le16_to_cpu(out_cmd->hdr.sequence));
- IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
- iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
- iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
- ieee80211_hdrlen(fc));
-
- /*
- * Use the first empty entry in this queue's command buffer array
- * to contain the Tx command and MAC header concatenated together
- * (payload data will be in another buffer).
- * Size of this varies, due to varying MAC header length.
- * If end is not dword aligned, we'll have 2 extra bytes at the end
- * of the MAC header (device reads on dword boundaries).
- * We'll tell device about this padding later.
- */
- len = sizeof(struct iwl3945_tx_cmd) +
- sizeof(struct iwl_cmd_header) + hdr_len;
- len = (len + 3) & ~3;
-
- /* Physical address of this Tx command's header (not MAC header!),
- * within command buffer array. */
- txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
- len, PCI_DMA_TODEVICE);
- /* we do not map meta data ... so we can safely access address to
- * provide to unmap command*/
- dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
- dma_unmap_len_set(out_meta, len, len);
-
- /* Add buffer containing Tx command and MAC(!) header to TFD's
- * first entry */
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- txcmd_phys, len, 1, 0);
-
-
- /* Set up TFD's 2nd entry to point directly to remainder of skb,
- * if any (802.11 null frames have no payload). */
- len = skb->len - hdr_len;
- if (len) {
- phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
- len, PCI_DMA_TODEVICE);
- priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
- phys_addr, len,
- 0, U32_PAD(len));
- }
-
-
- /* Tell device the write index *just past* this latest filled TFD */
- q->write_ptr = iwl_legacy_queue_inc_wrap(q->write_ptr, q->n_bd);
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- if ((iwl_legacy_queue_space(q) < q->high_mark)
- && priv->mac80211_registered) {
- if (wait_write_ptr) {
- spin_lock_irqsave(&priv->lock, flags);
- txq->need_update = 1;
- iwl_legacy_txq_update_write_ptr(priv, txq);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- iwl_legacy_stop_queue(priv, txq);
- }
-
- return 0;
-
-drop_unlock:
- spin_unlock_irqrestore(&priv->lock, flags);
-drop:
- return -1;
-}
-
-static int iwl3945_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl_spectrum_cmd spectrum;
- struct iwl_rx_packet *pkt;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- add_time = iwl_legacy_usecs_to_beacons(priv,
- le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
- le16_to_cpu(ctx->timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))
- spectrum.start_time =
- iwl_legacy_add_beacon_time(priv,
- priv->_3945.last_beacon_time, add_time,
- le16_to_cpu(ctx->timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (ctx->active.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (pkt->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
- pkt->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- iwl_legacy_free_pages(priv, cmd.reply_page);
-
- return rc;
-}
-
-static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- iwl3945_disable_events(priv);
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
-#endif
-
- IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
-}
-
-static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = beacon->beacon_notify_hdr.rate;
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_WARN(priv, "Card state received: HW:%s SW:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
-
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl3945_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl3945_rx_reply_alive;
- priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl3945_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
- priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
-
- /* Set up hardware specific Rx handlers */
- iwl3945_hw_rx_handler_setup(priv);
-}
-
-/************************** RX-FUNCTIONS ****************************/
-/*
- * Rx theory of operation
- *
- * The host allocates 32 DMA target addresses and passes the host address
- * to the firmware at register IWL_RFDS_TABLE_LOWER + N * RFD_SIZE where N is
- * 0 to 31
- *
- * Rx Queue Indexes
- * The host/firmware share two index registers for managing the Rx buffers.
- *
- * The READ index maps to the first position that the firmware may be writing
- * to -- the driver can read up to (but not including) this position and get
- * good data.
- * The READ index is managed by the firmware once the card is enabled.
- *
- * The WRITE index maps to the last position the driver has read from -- the
- * position preceding WRITE is the last slot the firmware can place a packet.
- *
- * The queue is empty (no good data) if WRITE = READ - 1, and is full if
- * WRITE = READ.
- *
- * During initialization, the host sets up the READ queue position to the first
- * INDEX position, and WRITE to the last (READ - 1 wrapped)
- *
- * When the firmware places a packet in a buffer, it will advance the READ index
- * and fire the RX interrupt. The driver can then query the READ index and
- * process as many packets as possible, moving the WRITE index forward as it
- * resets the Rx queue buffers with new memory.
- *
- * The management in the driver is as follows:
- * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
- * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
- * to replenish the iwl->rxq->rx_free.
- * + In iwl3945_rx_replenish (scheduled) if 'processed' != 'read' then the
- * iwl->rxq is replenished and the READ INDEX is updated (updating the
- * 'processed' and 'read' driver indexes as well)
- * + A received packet is processed and handed to the kernel network stack,
- * detached from the iwl->rxq. The driver 'processed' index is updated.
- * + The Host/Firmware iwl->rxq is replenished at tasklet time from the rx_free
- * list. If there are no allocated buffers in iwl->rxq->rx_free, the READ
- * INDEX is not incremented and iwl->status(RX_STALLED) is set. If there
- * were enough free buffers and RX_STALLED is set it is cleared.
- *
- *
- * Driver sequence:
- *
- * iwl3945_rx_replenish() Replenishes rx_free list from rx_used, and calls
- * iwl3945_rx_queue_restock
- * iwl3945_rx_queue_restock() Moves available buffers from rx_free into Rx
- * queue, updates firmware pointers, and updates
- * the WRITE index. If insufficient rx_free buffers
- * are available, schedules iwl3945_rx_replenish
- *
- * -- enable interrupts --
- * ISR - iwl3945_rx() Detach iwl_rx_mem_buffers from pool up to the
- * READ INDEX, detaching the SKB from the pool.
- * Moves the packet buffer from queue to rx_used.
- * Calls iwl3945_rx_queue_restock to refill any empty
- * slots.
- * ...
- *
- */
-
-/**
- * iwl3945_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)dma_addr);
-}
-
-/**
- * iwl3945_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
- int write;
-
- spin_lock_irqsave(&rxq->lock, flags);
- write = rxq->write & ~0x7;
- while ((iwl_legacy_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if ((rxq->write_actual != (rxq->write & ~0x7))
- || (abs(rxq->write - rxq->read) > 7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_legacy_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwl3945_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl3945_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- break;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- rxb->page = page;
- /* Get physical address of RB/SKB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- priv->alloc_rxb_page++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-void iwl3945_rx_replenish(void *data)
-{
- struct iwl_priv *priv = data;
- unsigned long flags;
-
- iwl3945_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl3945_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-static void iwl3945_rx_replenish_now(struct iwl_priv *priv)
-{
- iwl3945_rx_allocate(priv, GFP_ATOMIC);
-
- iwl3945_rx_queue_restock(priv);
-}
-
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_legacy_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-
-/* Convert linear signal-to-noise ratio into dB */
-static u8 ratio2dB[100] = {
-/* 0 1 2 3 4 5 6 7 8 9 */
- 0, 0, 6, 10, 12, 14, 16, 17, 18, 19, /* 00 - 09 */
- 20, 21, 22, 22, 23, 23, 24, 25, 26, 26, /* 10 - 19 */
- 26, 26, 26, 27, 27, 28, 28, 28, 29, 29, /* 20 - 29 */
- 29, 30, 30, 30, 31, 31, 31, 31, 32, 32, /* 30 - 39 */
- 32, 32, 32, 33, 33, 33, 33, 33, 34, 34, /* 40 - 49 */
- 34, 34, 34, 34, 35, 35, 35, 35, 35, 35, /* 50 - 59 */
- 36, 36, 36, 36, 36, 36, 36, 37, 37, 37, /* 60 - 69 */
- 37, 37, 37, 37, 37, 38, 38, 38, 38, 38, /* 70 - 79 */
- 38, 38, 38, 38, 38, 39, 39, 39, 39, 39, /* 80 - 89 */
- 39, 39, 39, 39, 39, 40, 40, 40, 40, 40 /* 90 - 99 */
-};
-
-/* Calculates a relative dB value from a ratio of linear
- * (i.e. not dB) signal levels.
- * Conversion assumes that levels are voltages (20*log), not powers (10*log). */
-int iwl3945_calc_db_from_ratio(int sig_ratio)
-{
- /* 1000:1 or higher just report as 60 dB */
- if (sig_ratio >= 1000)
- return 60;
-
- /* 100:1 or higher, divide by 10 and use table,
- * add 20 dB to make up for divide by 10 */
- if (sig_ratio >= 100)
- return 20 + (int)ratio2dB[sig_ratio/10];
-
- /* We shouldn't see this */
- if (sig_ratio < 1)
- return 0;
-
- /* Use table for ratios 1:1 - 99:1 */
- return (int)ratio2dB[sig_ratio];
-}
-
-/**
- * iwl3945_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-static void iwl3945_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty = 0;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl3945_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
- iwl_legacy_get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode won't assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl3945_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl3945_rx_replenish_now(priv);
- else
- iwl3945_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl3945_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static const char *iwl3945_desc_lookup(int i)
-{
- switch (i) {
- case 1:
- return "FAIL";
- case 2:
- return "BAD_PARAM";
- case 3:
- return "BAD_CHECKSUM";
- case 4:
- return "NMI_INTERRUPT";
- case 5:
- return "SYSASSERT";
- case 6:
- return "FATAL_ERROR";
- }
-
- return "UNKNOWN";
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 i;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
-
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
-
- if (!iwl3945_hw_valid_rtc_data_addr(base)) {
- IWL_ERR(priv, "Not valid error log pointer 0x%08X\n", base);
- return;
- }
-
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- IWL_ERR(priv, "Desc Time asrtPC blink2 "
- "ilink1 nmiPC Line\n");
- for (i = ERROR_START_OFFSET;
- i < (count * ERROR_ELEM_SIZE) + ERROR_START_OFFSET;
- i += ERROR_ELEM_SIZE) {
- desc = iwl_legacy_read_targ_mem(priv, base + i);
- time =
- iwl_legacy_read_targ_mem(priv, base + i + 1 * sizeof(u32));
- blink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 2 * sizeof(u32));
- blink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 3 * sizeof(u32));
- ilink1 =
- iwl_legacy_read_targ_mem(priv, base + i + 4 * sizeof(u32));
- ilink2 =
- iwl_legacy_read_targ_mem(priv, base + i + 5 * sizeof(u32));
- data1 =
- iwl_legacy_read_targ_mem(priv, base + i + 6 * sizeof(u32));
-
- IWL_ERR(priv,
- "%-13s (0x%X) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
- iwl3945_desc_lookup(desc), desc, time, blink1, blink2,
- ilink1, ilink2, data1);
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc, time, data1, 0,
- 0, blink1, blink2, ilink1, ilink2);
- }
-}
-
-static void iwl3945_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR39_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR39_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- "Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /* uCode wakes up after power-down sleep */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[0]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[1]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[2]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[3]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[4]);
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[5]);
-
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl3945_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "Tx interrupt\n");
- priv->isr_stats.tx++;
-
- iwl_write32(priv, CSR_FH_INT_STATUS, (1 << 6));
- iwl_legacy_write_direct32(priv, FH39_TCSR_CREDIT
- (FH39_SRVC_CHNL), 0x0);
- handled |= CSR_INT_BIT_FH_TX;
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~priv->inta_mask) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv, "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
- enum ieee80211_band band,
- u8 is_active, u8 n_probes,
- struct iwl3945_scan_channel *scan_ch,
- struct ieee80211_vif *vif)
-{
- struct ieee80211_channel *chan;
- const struct ieee80211_supported_band *sband;
- const struct iwl_channel_info *ch_info;
- u16 passive_dwell = 0;
- u16 active_dwell = 0;
- int added, i;
-
- sband = iwl_get_hw_mode(priv, band);
- if (!sband)
- return 0;
-
- active_dwell = iwl_legacy_get_active_dwell_time(priv, band, n_probes);
- passive_dwell = iwl_legacy_get_passive_dwell_time(priv, band, vif);
-
- if (passive_dwell <= active_dwell)
- passive_dwell = active_dwell + 1;
-
- for (i = 0, added = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
-
- if (chan->band != band)
- continue;
-
- scan_ch->channel = chan->hw_value;
-
- ch_info = iwl_legacy_get_channel_info(priv, band,
- scan_ch->channel);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_SCAN(priv,
- "Channel %d is INVALID for this band.\n",
- scan_ch->channel);
- continue;
- }
-
- scan_ch->active_dwell = cpu_to_le16(active_dwell);
- scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
- /* If passive , set up for auto-switch
- * and use long active_dwell time.
- */
- if (!is_active || iwl_legacy_is_channel_passive(ch_info) ||
- (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)) {
- scan_ch->type = 0; /* passive */
- if (IWL_UCODE_API(priv->ucode_ver) == 1)
- scan_ch->active_dwell = cpu_to_le16(passive_dwell - 1);
- } else {
- scan_ch->type = 1; /* active */
- }
-
- /* Set direct probe bits. These may be used both for active
- * scan channels (probes gets sent right away),
- * or for passive channels (probes get se sent only after
- * hearing clear Rx packet).*/
- if (IWL_UCODE_API(priv->ucode_ver) >= 2) {
- if (n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- } else {
- /* uCode v1 does not allow setting direct probe bits on
- * passive channel. */
- if ((scan_ch->type & 1) && n_probes)
- scan_ch->type |= IWL39_SCAN_PROBE_MASK(n_probes);
- }
-
- /* Set txpower levels to defaults */
- scan_ch->tpc.dsp_atten = 110;
- /* scan_pwr_info->tpc.dsp_atten; */
-
- /*scan_pwr_info->tpc.tx_gain; */
- if (band == IEEE80211_BAND_5GHZ)
- scan_ch->tpc.tx_gain = ((1 << 5) | (3 << 3)) | 3;
- else {
- scan_ch->tpc.tx_gain = ((1 << 5) | (5 << 3));
- /* NOTE: if we were doing 6Mb OFDM for scans we'd use
- * power level:
- * scan_ch->tpc.tx_gain = ((1 << 5) | (2 << 3)) | 3;
- */
- }
-
- IWL_DEBUG_SCAN(priv, "Scanning %d [%s %d]\n",
- scan_ch->channel,
- (scan_ch->type & 1) ? "ACTIVE" : "PASSIVE",
- (scan_ch->type & 1) ?
- active_dwell : passive_dwell);
-
- scan_ch++;
- added++;
- }
-
- IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
- return added;
-}
-
-static void iwl3945_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwl3945_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i > IWL39_LAST_OFDM_RATE) || (i < IWL_FIRST_OFDM_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |= (iwl3945_rates[i].plcp == 10) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl3945_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-/**
- * iwl3945_verify_inst_full - verify runtime uCode image in card vs. host,
- * looking at all data.
- */
-static int iwl3945_verify_inst_full(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- u32 save_len = len;
- int rc = 0;
- u32 errcnt;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- IWL39_RTC_INST_LOWER_BOUND);
-
- errcnt = 0;
- for (; len > 0; len -= sizeof(u32), image++) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- save_len - len, val, le32_to_cpu(*image));
- rc = -EIO;
- errcnt++;
- if (errcnt >= 20)
- break;
- }
- }
-
-
- if (!errcnt)
- IWL_DEBUG_INFO(priv,
- "ucode image in INSTRUCTION memory is good\n");
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_inst_sparse - verify runtime uCode image in card vs. host,
- * using sample data 100 bytes apart. If these sample points are good,
- * it's a pretty good bet that everything between them is good, too.
- */
-static int iwl3945_verify_inst_sparse(struct iwl_priv *priv, __le32 *image, u32 len)
-{
- u32 val;
- int rc = 0;
- u32 errcnt = 0;
- u32 i;
-
- IWL_DEBUG_INFO(priv, "ucode inst image size is %u\n", len);
-
- for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
- /* read data comes through single port, auto-incr addr */
- /* NOTE: Use the debugless read so we don't flood kernel log
- * if IWL_DL_IO is set */
- iwl_legacy_write_direct32(priv, HBUS_TARG_MEM_RADDR,
- i + IWL39_RTC_INST_LOWER_BOUND);
- val = _iwl_legacy_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- if (val != le32_to_cpu(*image)) {
-#if 0 /* Enable this if you want to see details */
- IWL_ERR(priv, "uCode INST section is invalid at "
- "offset 0x%x, is 0x%x, s/b 0x%x\n",
- i, val, *image);
-#endif
- rc = -EIO;
- errcnt++;
- if (errcnt >= 3)
- break;
- }
- }
-
- return rc;
-}
-
-
-/**
- * iwl3945_verify_ucode - determine which instruction image is in SRAM,
- * and verify its contents
- */
-static int iwl3945_verify_ucode(struct iwl_priv *priv)
-{
- __le32 *image;
- u32 len;
- int rc = 0;
-
- /* Try bootstrap */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Bootstrap uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try initialize */
- image = (__le32 *)priv->ucode_init.v_addr;
- len = priv->ucode_init.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Initialize uCode is good in inst SRAM\n");
- return 0;
- }
-
- /* Try runtime/protocol */
- image = (__le32 *)priv->ucode_code.v_addr;
- len = priv->ucode_code.len;
- rc = iwl3945_verify_inst_sparse(priv, image, len);
- if (rc == 0) {
- IWL_DEBUG_INFO(priv, "Runtime uCode is good in inst SRAM\n");
- return 0;
- }
-
- IWL_ERR(priv, "NO VALID UCODE IMAGE IN INSTRUCTION SRAM!!\n");
-
- /* Since nothing seems to match, show first several data entries in
- * instruction SRAM, so maybe visual inspection will give a clue.
- * Selection of bootstrap image (vs. other images) is arbitrary. */
- image = (__le32 *)priv->ucode_boot.v_addr;
- len = priv->ucode_boot.len;
- rc = iwl3945_verify_inst_full(priv, image, len);
-
- return rc;
-}
-
-static void iwl3945_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-#define IWL3945_UCODE_GET(item) \
-static u32 iwl3945_ucode_get_##item(const struct iwl_ucode_header *ucode)\
-{ \
- return le32_to_cpu(ucode->v1.item); \
-}
-
-static u32 iwl3945_ucode_get_header_size(u32 api_ver)
-{
- return 24;
-}
-
-static u8 *iwl3945_ucode_get_data(const struct iwl_ucode_header *ucode)
-{
- return (u8 *) ucode->v1.data;
-}
-
-IWL3945_UCODE_GET(inst_size);
-IWL3945_UCODE_GET(data_size);
-IWL3945_UCODE_GET(init_size);
-IWL3945_UCODE_GET(init_data_size);
-IWL3945_UCODE_GET(boot_size);
-
-/**
- * iwl3945_read_ucode - Read uCode images from disk file.
- *
- * Copy into buffers for card to fetch via bus-mastering
- */
-static int iwl3945_read_ucode(struct iwl_priv *priv)
-{
- const struct iwl_ucode_header *ucode;
- int ret = -EINVAL, index;
- const struct firmware *ucode_raw;
- /* firmware file name contains uCode/driver compatibility version */
- const char *name_pre = priv->cfg->fw_name_pre;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- char buf[25];
- u8 *src;
- size_t len;
- u32 api_ver, inst_size, data_size, init_size, init_data_size, boot_size;
-
- /* Ask kernel firmware_class module to get the boot firmware off disk.
- * request_firmware() is synchronous, file is in memory on return. */
- for (index = api_max; index >= api_min; index--) {
- sprintf(buf, "%s%u%s", name_pre, index, ".ucode");
- ret = request_firmware(&ucode_raw, buf, &priv->pci_dev->dev);
- if (ret < 0) {
- IWL_ERR(priv, "%s firmware file req failed: %d\n",
- buf, ret);
- if (ret == -ENOENT)
- continue;
- else
- goto error;
- } else {
- if (index < api_max)
- IWL_ERR(priv, "Loaded firmware %s, "
- "which is deprecated. "
- " Please use API v%u instead.\n",
- buf, api_max);
- IWL_DEBUG_INFO(priv, "Got firmware '%s' file "
- "(%zd bytes) from disk\n",
- buf, ucode_raw->size);
- break;
- }
- }
-
- if (ret < 0)
- goto error;
-
- /* Make sure that we got at least our header! */
- if (ucode_raw->size < iwl3945_ucode_get_header_size(1)) {
- IWL_ERR(priv, "File size way too small!\n");
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
- inst_size = iwl3945_ucode_get_inst_size(ucode);
- data_size = iwl3945_ucode_get_data_size(ucode);
- init_size = iwl3945_ucode_get_init_size(ucode);
- init_data_size = iwl3945_ucode_get_init_data_size(ucode);
- boot_size = iwl3945_ucode_get_boot_size(ucode);
- src = iwl3945_ucode_get_data(ucode);
-
- /* api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward */
-
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv, "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- priv->ucode_ver = 0;
- ret = -EINVAL;
- goto err_release;
- }
- if (api_ver != api_max)
- IWL_ERR(priv, "Firmware has old API version. Expected %u, "
- "got %u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
- inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %u\n",
- data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %u\n",
- init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %u\n",
- init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %u\n",
- boot_size);
-
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != iwl3945_ucode_get_header_size(api_ver) +
- inst_size + data_size + init_size +
- init_data_size + boot_size) {
-
- IWL_DEBUG_INFO(priv,
- "uCode file size %zd does not match expected size\n",
- ucode_raw->size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Verify that uCode images will fit in card's SRAM */
- if (inst_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode instr len %d too large to fit in\n",
- inst_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- if (data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv, "uCode data len %d too large to fit in\n",
- data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_size > IWL39_MAX_INST_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init instr len %d too large to fit in\n",
- init_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (init_data_size > IWL39_MAX_DATA_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode init data len %d too large to fit in\n",
- init_data_size);
- ret = -EINVAL;
- goto err_release;
- }
- if (boot_size > IWL39_MAX_BSM_SIZE) {
- IWL_DEBUG_INFO(priv,
- "uCode boot instr len %d too large to fit in\n",
- boot_size);
- ret = -EINVAL;
- goto err_release;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (init_size && init_data_size) {
- priv->ucode_init.len = init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (boot_size) {
- priv->ucode_boot.len = boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- len = inst_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode instr len %zd\n", len);
- memcpy(priv->ucode_code.v_addr, src, len);
- src += len;
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /* Runtime data (2nd block)
- * NOTE: Copy into backup buffer will be done in iwl3945_up() */
- len = data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) uCode data len %zd\n", len);
- memcpy(priv->ucode_data.v_addr, src, len);
- memcpy(priv->ucode_data_backup.v_addr, src, len);
- src += len;
-
- /* Initialization instructions (3rd block) */
- if (init_size) {
- len = init_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %zd\n", len);
- memcpy(priv->ucode_init.v_addr, src, len);
- src += len;
- }
-
- /* Initialization data (4th block) */
- if (init_data_size) {
- len = init_data_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %zd\n", len);
- memcpy(priv->ucode_init_data.v_addr, src, len);
- src += len;
- }
-
- /* Bootstrap instructions (5th block) */
- len = boot_size;
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) boot instr len %zd\n", len);
- memcpy(priv->ucode_boot.v_addr, src, len);
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- return 0;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- ret = -ENOMEM;
- iwl3945_dealloc_ucode_pci(priv);
-
- err_release:
- release_firmware(ucode_raw);
-
- error:
- return ret;
-}
-
-
-/**
- * iwl3945_set_ucode_ptrs - Set uCode address location
- *
- * Tell initialization uCode where to find runtime uCode.
- *
- * BSM registers initially contain pointers to initialization uCode.
- * We need to replace them to load runtime uCode inst and data,
- * and to save runtime data when powering down.
- */
-static int iwl3945_set_ucode_ptrs(struct iwl_priv *priv)
-{
- dma_addr_t pinst;
- dma_addr_t pdata;
-
- /* bits 31:0 for 3945 */
- pinst = priv->ucode_code.p_addr;
- pdata = priv->ucode_data_backup.p_addr;
-
- /* Tell bootstrap uCode where to find image to load */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_PTR_REG, pinst);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_PTR_REG, pdata);
- iwl_legacy_write_prph(priv, BSM_DRAM_DATA_BYTECOUNT_REG,
- priv->ucode_data.len);
-
- /* Inst byte count must be last to set up, bit 31 signals uCode
- * that all new ptr/size info is in place */
- iwl_legacy_write_prph(priv, BSM_DRAM_INST_BYTECOUNT_REG,
- priv->ucode_code.len | BSM_DRAM_INST_LOAD);
-
- IWL_DEBUG_INFO(priv, "Runtime uCode pointers are set.\n");
-
- return 0;
-}
-
-/**
- * iwl3945_init_alive_start - Called after REPLY_ALIVE notification received
- *
- * Called after REPLY_ALIVE notification received from "initialize" uCode.
- *
- * Tell "initialize" uCode to go ahead and load the runtime uCode.
- */
-static void iwl3945_init_alive_start(struct iwl_priv *priv)
-{
- /* Check alive response for "valid" sign from uCode */
- if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
- goto restart;
- }
-
- /* Bootstrap uCode has loaded initialize uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "initialize" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
- goto restart;
- }
-
- /* Send pointers to protocol/runtime uCode image ... init code will
- * load and launch runtime uCode, which will send us another "Alive"
- * notification. */
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- if (iwl3945_set_ucode_ptrs(priv)) {
- /* Runtime instruction load won't happen;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Couldn't set up uCode pointers.\n");
- goto restart;
- }
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-/**
- * iwl3945_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl3945_init_alive_start()).
- */
-static void iwl3945_alive_start(struct iwl_priv *priv)
-{
- int thermal_spin = 0;
- u32 rfkill;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl3945_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- rfkill = iwl_legacy_read_prph(priv, APMG_RFKILL_REG);
- IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
-
- if (rfkill & 0x1) {
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- /* if RFKILL is not on, then wait for thermal
- * sensor in adapter to kick in */
- while (iwl3945_hw_get_temperature(priv) == 0) {
- thermal_spin++;
- udelay(10);
- }
-
- if (thermal_spin)
- IWL_DEBUG_INFO(priv, "Thermal calibration took %dus\n",
- thermal_spin * 10);
- } else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- /* After the ALIVE response, we can send commands to 3945 uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK_3945;
-
- iwl_legacy_power_update_mode(priv, true);
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- struct iwl3945_rxon_cmd *active_rxon =
- (struct iwl3945_rxon_cmd *)(&ctx->active);
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- /* Initialize our rx_config data */
- iwl_legacy_connection_init_rx_config(priv, ctx);
- }
-
- /* Configure Bluetooth device coexistence support */
- iwl_legacy_send_bt_config(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl3945_commit_rxon(priv, ctx);
-
- iwl3945_reg_txpower_periodic(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl3945_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- /* Station information will now be cleared in device */
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl3945_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl3945_init() then
- * clear all bits but the RF Kill bits and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl3945_hw_txq_ctx_stop(priv);
- iwl3945_hw_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl3945_clear_free_frames(priv);
-}
-
-static void iwl3945_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl3945_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl3945_cancel_deferred_work(priv);
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int iwl3945_alloc_bcast_station(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- unsigned long flags;
- u8 sta_id;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
- sta_id = iwl_legacy_prep_station(priv, ctx,
- iwlegacy_bcast_addr, false, NULL);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Unable to prepare broadcast station\n");
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return -EINVAL;
- }
-
- priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
- priv->stations[sta_id].used |= IWL_STA_BCAST;
- spin_unlock_irqrestore(&priv->sta_lock, flags);
-
- return 0;
-}
-
-static int __iwl3945_up(struct iwl_priv *priv)
-{
- int rc, i;
-
- rc = iwl3945_alloc_bcast_station(priv);
- if (rc)
- return rc;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bring up\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else {
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return -ENODEV;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- rc = iwl3945_hw_nic_init(priv);
- if (rc) {
- IWL_ERR(priv, "Unable to int nic\n");
- return rc;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- /* We return success when we resume from suspend and rf_kill is on. */
- if (test_bit(STATUS_RF_KILL_HW, &priv->status))
- return 0;
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- rc = priv->cfg->ops->lib->load_ucode(priv);
-
- if (rc) {
- IWL_ERR(priv,
- "Unable to set up bootstrap uCode: %d\n", rc);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl3945_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl3945_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl3945_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl3945_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-/*
- * 3945 cannot interrupt driver when hardware rf kill switch toggles;
- * driver must poll CSR_GP_CNTRL_REG register for change. This register
- * *is* readable even when device has been SW_RESET into low power mode
- * (e.g. during RF KILL).
- */
-static void iwl3945_rfkill_poll(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
- bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
- bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
- & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
-
- if (new_rfkill != old_rfkill) {
- if (new_rfkill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
-
- IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
- new_rfkill ? "disable radio" : "enable radio");
- }
-
- /* Keep this running, even if radio now enabled. This will be
- * cancelled in mac_start() if system decides to start again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
-}
-
-int iwl3945_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
-{
- struct iwl_host_cmd cmd = {
- .id = REPLY_SCAN_CMD,
- .len = sizeof(struct iwl3945_scan_cmd),
- .flags = CMD_SIZE_HUGE,
- };
- struct iwl3945_scan_cmd *scan;
- u8 n_probes = 0;
- enum ieee80211_band band;
- bool is_active = false;
- int ret;
- u16 len;
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->scan_cmd) {
- priv->scan_cmd = kmalloc(sizeof(struct iwl3945_scan_cmd) +
- IWL_MAX_SCAN_SIZE, GFP_KERNEL);
- if (!priv->scan_cmd) {
- IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
- return -ENOMEM;
- }
- }
- scan = priv->scan_cmd;
- memset(scan, 0, sizeof(struct iwl3945_scan_cmd) + IWL_MAX_SCAN_SIZE);
-
- scan->quiet_plcp_th = IWL_PLCP_QUIET_THRESH;
- scan->quiet_time = IWL_ACTIVE_QUIET_TIME;
-
- if (iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS)) {
- u16 interval;
- u32 extra;
- u32 suspend_time = 100;
- u32 scan_suspend_time = 100;
-
- IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
- interval = vif->bss_conf.beacon_int;
-
- scan->suspend_time = 0;
- scan->max_out_time = cpu_to_le32(200 * 1024);
- if (!interval)
- interval = suspend_time;
- /*
- * suspend time format:
- * 0-19: beacon interval in usec (time before exec.)
- * 20-23: 0
- * 24-31: number of beacons (suspend between channels)
- */
-
- extra = (suspend_time / interval) << 24;
- scan_suspend_time = 0xFF0FFFFF &
- (extra | ((suspend_time % interval) * 1024));
-
- scan->suspend_time = cpu_to_le32(scan_suspend_time);
- IWL_DEBUG_SCAN(priv, "suspend_time 0x%X beacon interval %d\n",
- scan_suspend_time, interval);
- }
-
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
- IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
- }
- is_active = true;
- } else
- IWL_DEBUG_SCAN(priv, "Kicking off passive scan.\n");
-
- /* We don't build a direct scan probe request; the uCode will do
- * that based on the direct_mask added to each channel entry */
- scan->tx_cmd.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK;
- scan->tx_cmd.sta_id = priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id;
- scan->tx_cmd.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
-
- /* flags + rate selection */
-
- switch (priv->scan_band) {
- case IEEE80211_BAND_2GHZ:
- scan->flags = RXON_FLG_BAND_24G_MSK | RXON_FLG_AUTO_DETECT_MSK;
- scan->tx_cmd.rate = IWL_RATE_1M_PLCP;
- band = IEEE80211_BAND_2GHZ;
- break;
- case IEEE80211_BAND_5GHZ:
- scan->tx_cmd.rate = IWL_RATE_6M_PLCP;
- band = IEEE80211_BAND_5GHZ;
- break;
- default:
- IWL_WARN(priv, "Invalid scan band\n");
- return -EIO;
- }
-
- /*
- * If active scaning is requested but a certain channel
- * is marked passive, we can do active scanning if we
- * detect transmissions.
- */
- scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH_DEFAULT :
- IWL_GOOD_CRC_TH_DISABLED;
-
- len = iwl_legacy_fill_probe_req(priv, (struct ieee80211_mgmt *)scan->data,
- vif->addr, priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
- scan->tx_cmd.len = cpu_to_le16(len);
-
- /* select Rx antennas */
- scan->flags |= iwl3945_get_antenna_flags(priv);
-
- scan->channel_count = iwl3945_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[len], vif);
- if (scan->channel_count == 0) {
- IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
- return -EIO;
- }
-
- cmd.len += le16_to_cpu(scan->tx_cmd.len) +
- scan->channel_count * sizeof(struct iwl3945_scan_channel);
- cmd.data = scan;
- scan->len = cpu_to_le16(cmd.len);
-
- set_bit(STATUS_SCAN_HW, &priv->status);
- ret = iwl_legacy_send_cmd_sync(priv, &cmd);
- if (ret)
- clear_bit(STATUS_SCAN_HW, &priv->status);
- return ret;
-}
-
-void iwl3945_post_scan(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- /*
- * Since setting the RXON may have been deferred while
- * performing the scan, fire one off if needed
- */
- if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
- iwl3945_commit_rxon(priv, ctx);
-}
-
-static void iwl3945_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
- mutex_unlock(&priv->mutex);
- iwl3945_down(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl3945_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl3945_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl3945_rx_replenish(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-void iwl3945_post_associate(struct iwl_priv *priv)
-{
- int rc = 0;
- struct ieee80211_conf *conf = NULL;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- if (!ctx->vif || !priv->is_open)
- return;
-
- IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
- ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- conf = iwl_legacy_ieee80211_get_hw_conf(priv->hw);
-
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
-
- ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
-
- IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
- ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
-
- if (ctx->vif->bss_conf.use_short_preamble)
- ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (ctx->vif->bss_conf.use_short_slot)
- ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
- }
-
- iwl3945_commit_rxon(priv, ctx);
-
- switch (ctx->vif->type) {
- case NL80211_IFTYPE_STATION:
- iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
- break;
- case NL80211_IFTYPE_ADHOC:
- iwl3945_send_beacon_cmd(priv);
- break;
- default:
- IWL_ERR(priv, "%s Should not be called in %d mode\n",
- __func__, ctx->vif->type);
- break;
- }
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (2 * HZ)
-
-static int iwl3945_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
-
- /* fetch ucode file from disk, alloc and copy to bus-master buffers ...
- * ucode filename and max sizes are card-specific. */
-
- if (!priv->ucode_code.len) {
- ret = iwl3945_read_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Could not read microcode: %d\n", ret);
- mutex_unlock(&priv->mutex);
- goto out_release_irq;
- }
- }
-
- ret = __iwl3945_up(priv);
-
- mutex_unlock(&priv->mutex);
-
- if (ret)
- goto out_release_irq;
-
- IWL_DEBUG_INFO(priv, "Start UP work.\n");
-
- /* Wait for START_ALIVE from ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv,
- "Wait for START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- ret = -ETIMEDOUT;
- goto out_release_irq;
- }
- }
-
- /* ucode is running and will send rfkill notifications,
- * no need to poll the killswitch state anymore */
- cancel_delayed_work(&priv->_3945.rfkill_poll);
-
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-
-out_release_irq:
- priv->is_open = 0;
- IWL_DEBUG_MAC80211(priv, "leave - failed\n");
- return ret;
-}
-
-static void iwl3945_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open) {
- IWL_DEBUG_MAC80211(priv, "leave - skip\n");
- return;
- }
-
- priv->is_open = 0;
-
- iwl3945_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* start polling the killswitch state again */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- round_jiffies_relative(2 * HZ));
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl3945_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl3945_config_ap(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif = ctx->vif;
- int rc = 0;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* The following should be done only at AP bring up */
- if (!(iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS))) {
-
- /* RXON - unassoc (to set timing command) */
- ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
-
- /* RXON Timing */
- rc = iwl_legacy_send_rxon_timing(priv, ctx);
- if (rc)
- IWL_WARN(priv, "REPLY_RXON_TIMING failed - "
- "Attempting to continue.\n");
-
- ctx->staging.assoc_id = 0;
-
- if (vif->bss_conf.use_short_preamble)
- ctx->staging.flags |=
- RXON_FLG_SHORT_PREAMBLE_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_PREAMBLE_MSK;
-
- if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
- if (vif->bss_conf.use_short_slot)
- ctx->staging.flags |=
- RXON_FLG_SHORT_SLOT_MSK;
- else
- ctx->staging.flags &=
- ~RXON_FLG_SHORT_SLOT_MSK;
- }
- /* restore RXON assoc */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- iwl3945_commit_rxon(priv, ctx);
- }
- iwl3945_send_beacon_cmd(priv);
-}
-
-static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = 0;
- u8 sta_id = IWL_INVALID_STATION;
- u8 static_key;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (iwl3945_mod_params.sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- /*
- * To support IBSS RSN, don't program group keys in IBSS, the
- * hardware will then not attempt to decrypt the frames.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
- return -EOPNOTSUPP;
-
- static_key = !iwl_legacy_is_associated(priv, IWL_RXON_CTX_BSS);
-
- if (!static_key) {
- sta_id = iwl_legacy_sta_id_or_broadcast(
- priv, &priv->contexts[IWL_RXON_CTX_BSS], sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
- }
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- switch (cmd) {
- case SET_KEY:
- if (static_key)
- ret = iwl3945_set_static_key(priv, key);
- else
- ret = iwl3945_set_dynamic_key(priv, key, sta_id);
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (static_key)
- ret = iwl3945_remove_static_key(priv);
- else
- ret = iwl3945_clear_sta_key_info(priv, sta_id);
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl3945_sta_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
-
- ret = iwl_legacy_add_station_common(priv,
- &priv->contexts[IWL_RXON_CTX_BSS],
- sta->addr, is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl3945_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-static void iwl3945_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but even if hw is ready, committing here breaks for some reason,
- * we'll eventually commit the filter flags change anyway.
- */
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/bus/pci/drivers/iwl/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl3945_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl3945_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl3945_show_debug_level, iwl3945_store_debug_level);
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-static ssize_t iwl3945_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_hw_get_temperature(priv));
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl3945_show_temperature, NULL);
-
-static ssize_t iwl3945_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl3945_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
- u32 val;
-
- val = simple_strtoul(p, &p, 10);
- if (p == buf)
- IWL_INFO(priv, ": %s is not in decimal form.\n", buf);
- else
- iwl3945_hw_reg_set_txpower(priv, val);
-
- return count;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, iwl3945_show_tx_power, iwl3945_store_tx_power);
-
-static ssize_t iwl3945_show_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n", ctx->active.flags);
-}
-
-static ssize_t iwl3945_store_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- u32 flags = simple_strtoul(buf, NULL, 0);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.flags) != flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.flags = 0x%04X\n",
- flags);
- ctx->staging.flags = cpu_to_le32(flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, iwl3945_show_flags, iwl3945_store_flags);
-
-static ssize_t iwl3945_show_filter_flags(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- return sprintf(buf, "0x%04X\n",
- le32_to_cpu(ctx->active.filter_flags));
-}
-
-static ssize_t iwl3945_store_filter_flags(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u32 filter_flags = simple_strtoul(buf, NULL, 0);
-
- mutex_lock(&priv->mutex);
- if (le32_to_cpu(ctx->staging.filter_flags) != filter_flags) {
- /* Cancel any currently running scans... */
- if (iwl_legacy_scan_cancel_timeout(priv, 100))
- IWL_WARN(priv, "Could not cancel scan.\n");
- else {
- IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
- "0x%04X\n", filter_flags);
- ctx->staging.filter_flags =
- cpu_to_le32(filter_flags);
- iwl3945_commit_rxon(priv, ctx);
- }
- }
- mutex_unlock(&priv->mutex);
-
- return count;
-}
-
-static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, iwl3945_show_filter_flags,
- iwl3945_store_filter_flags);
-
-static ssize_t iwl3945_show_measurement(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_spectrum_notification measure_report;
- u32 size = sizeof(measure_report), len = 0, ofs = 0;
- u8 *data = (u8 *)&measure_report;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->lock, flags);
- if (!(priv->measurement_status & MEASUREMENT_READY)) {
- spin_unlock_irqrestore(&priv->lock, flags);
- return 0;
- }
- memcpy(&measure_report, &priv->measure_report, size);
- priv->measurement_status = 0;
- spin_unlock_irqrestore(&priv->lock, flags);
-
- while (size && (PAGE_SIZE - len)) {
- hex_dump_to_buffer(data + ofs, size, 16, 1, buf + len,
- PAGE_SIZE - len, 1);
- len = strlen(buf);
- if (PAGE_SIZE - len)
- buf[len++] = '\n';
-
- ofs += 16;
- size -= min(size, 16U);
- }
-
- return len;
-}
-
-static ssize_t iwl3945_store_measurement(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_measurement_params params = {
- .channel = le16_to_cpu(ctx->active.channel),
- .start_time = cpu_to_le64(priv->_3945.last_tsf),
- .duration = cpu_to_le16(1),
- };
- u8 type = IWL_MEASURE_BASIC;
- u8 buffer[32];
- u8 channel;
-
- if (count) {
- char *p = buffer;
- strncpy(buffer, buf, min(sizeof(buffer), count));
- channel = simple_strtoul(p, NULL, 0);
- if (channel)
- params.channel = channel;
-
- p = buffer;
- while (*p && *p != ' ')
- p++;
- if (*p)
- type = simple_strtoul(p + 1, NULL, 0);
- }
-
- IWL_DEBUG_INFO(priv, "Invoking measurement of type %d on "
- "channel %d (for '%s')\n", type, params.channel, buf);
- iwl3945_get_measurement(priv, &params, type);
-
- return count;
-}
-
-static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
- iwl3945_show_measurement, iwl3945_store_measurement);
-
-static ssize_t iwl3945_store_retry_rate(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- priv->retry_rate = simple_strtoul(buf, NULL, 0);
- if (priv->retry_rate <= 0)
- priv->retry_rate = 1;
-
- return count;
-}
-
-static ssize_t iwl3945_show_retry_rate(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "%d", priv->retry_rate);
-}
-
-static DEVICE_ATTR(retry_rate, S_IWUSR | S_IRUSR, iwl3945_show_retry_rate,
- iwl3945_store_retry_rate);
-
-
-static ssize_t iwl3945_show_channels(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- /* all this shit doesn't belong into sysfs anyway */
- return 0;
-}
-
-static DEVICE_ATTR(channels, S_IRUSR, iwl3945_show_channels, NULL);
-
-static ssize_t iwl3945_show_antenna(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", iwl3945_mod_params.antenna);
-}
-
-static ssize_t iwl3945_store_antenna(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv __maybe_unused = dev_get_drvdata(d);
- int ant;
-
- if (count == 0)
- return 0;
-
- if (sscanf(buf, "%1i", &ant) != 1) {
- IWL_DEBUG_INFO(priv, "not in hex or decimal form.\n");
- return count;
- }
-
- if ((ant >= 0) && (ant <= 2)) {
- IWL_DEBUG_INFO(priv, "Setting antenna select to %d.\n", ant);
- iwl3945_mod_params.antenna = (enum iwl3945_antenna)ant;
- } else
- IWL_DEBUG_INFO(priv, "Bad antenna select value %d.\n", ant);
-
-
- return count;
-}
-
-static DEVICE_ATTR(antenna, S_IWUSR | S_IRUGO, iwl3945_show_antenna, iwl3945_store_antenna);
-
-static ssize_t iwl3945_show_status(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
- return sprintf(buf, "0x%08x\n", (int)priv->status);
-}
-
-static DEVICE_ATTR(status, S_IRUGO, iwl3945_show_status, NULL);
-
-static ssize_t iwl3945_dump_error_log(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- char *p = (char *)buf;
-
- if (p[0] == '1')
- iwl3945_dump_nic_error_log(priv);
-
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(dump_errors, S_IWUSR, NULL, iwl3945_dump_error_log);
-
-/*****************************************************************************
- *
- * driver setup and tear down
- *
- *****************************************************************************/
-
-static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl3945_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
- INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- iwl3945_hw_setup_deferred_work(priv);
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl3945_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
-{
- iwl3945_hw_cancel_deferred_work(priv);
-
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-}
-
-static struct attribute *iwl3945_sysfs_entries[] = {
- &dev_attr_antenna.attr,
- &dev_attr_channels.attr,
- &dev_attr_dump_errors.attr,
- &dev_attr_flags.attr,
- &dev_attr_filter_flags.attr,
- &dev_attr_measurement.attr,
- &dev_attr_retry_rate.attr,
- &dev_attr_status.attr,
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl3945_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl3945_sysfs_entries,
-};
-
-struct ieee80211_ops iwl3945_hw_ops = {
- .tx = iwl3945_mac_tx,
- .start = iwl3945_mac_start,
- .stop = iwl3945_mac_stop,
- .add_interface = iwl_legacy_mac_add_interface,
- .remove_interface = iwl_legacy_mac_remove_interface,
- .change_interface = iwl_legacy_mac_change_interface,
- .config = iwl_legacy_mac_config,
- .configure_filter = iwl3945_configure_filter,
- .set_key = iwl3945_mac_set_key,
- .conf_tx = iwl_legacy_mac_conf_tx,
- .reset_tsf = iwl_legacy_mac_reset_tsf,
- .bss_info_changed = iwl_legacy_mac_bss_info_changed,
- .hw_scan = iwl_legacy_mac_hw_scan,
- .sta_add = iwl3945_mac_sta_add,
- .sta_remove = iwl_legacy_mac_sta_remove,
- .tx_last_beacon = iwl_legacy_mac_tx_last_beacon,
-};
-
-static int iwl3945_init_drv(struct iwl_priv *priv)
-{
- int ret;
- struct iwl3945_eeprom *eeprom = (struct iwl3945_eeprom *)priv->eeprom;
-
- priv->retry_rate = 1;
- priv->beacon_skb = NULL;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
- IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
- eeprom->version);
- ret = -EINVAL;
- goto err;
- }
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- /* Set up txpower settings in driver for all channels */
- if (iwl3945_txpower_set_from_eeprom(priv)) {
- ret = -EIO;
- goto err_free_channel_map;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl3945_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-#define IWL3945_MAX_PROBE_REQUEST 200
-
-static int iwl3945_setup_mac(struct iwl_priv *priv)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
-
- hw->rate_control_algorithm = "iwl-3945-rs";
- hw->sta_data_size = sizeof(struct iwl3945_sta_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- hw->wiphy->interface_modes =
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes;
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = IWL3945_MAX_PROBE_REQUEST - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
-
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- struct iwl3945_eeprom *eeprom;
- unsigned long flags;
-
- /***********************
- * 1. Allocating HW data
- * ********************/
-
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- hw = iwl_legacy_alloc_all(cfg);
- if (hw == NULL) {
- pr_err("Can not allocate network device\n");
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- priv->cmd_queue = IWL39_CMD_QUEUE_NUM;
-
- /* 3945 has only one valid context */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- /*
- * Disabling hardware scan means that mac80211 will perform scans
- * "the hard way", rather than using device's scan.
- */
- if (iwl3945_mod_params.disable_hw_scan) {
- IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
- iwl3945_hw_ops.hw_scan = NULL;
- }
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /***************************
- * 2. Initializing PCI bus
- * *************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
-
- pci_set_drvdata(pdev, priv);
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- /***********************
- * 3. Read REV Register
- * ********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, 0x41, 0x00);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /***********************
- * 4. Read EEPROM
- * ********************/
-
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- /* MAC Address location in EEPROM same for 3945/4965 */
- eeprom = (struct iwl3945_eeprom *)priv->eeprom;
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", eeprom->mac_address);
- SET_IEEE80211_PERM_ADDR(priv->hw, eeprom->mac_address);
-
- /***********************
- * 5. Setup HW Constants
- * ********************/
- /* Device-specific setup */
- if (iwl3945_hw_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw settings\n");
- goto out_eeprom_free;
- }
-
- /***********************
- * 6. Setup priv
- * ********************/
-
- err = iwl3945_init_drv(priv);
- if (err) {
- IWL_ERR(priv, "initializing driver failed\n");
- goto out_unset_hw_params;
- }
-
- IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s\n",
- priv->cfg->name);
-
- /***********************
- * 7. Setup Services
- * ********************/
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- err = sysfs_create_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_release_irq;
- }
-
- iwl_legacy_set_rxon_channel(priv,
- &priv->bands[IEEE80211_BAND_2GHZ].channels[5],
- &priv->contexts[IWL_RXON_CTX_BSS]);
- iwl3945_setup_deferred_work(priv);
- iwl3945_setup_rx_handlers(priv);
- iwl_legacy_power_initialize(priv);
-
- /*********************************
- * 8. Setup and Register mac80211
- * *******************************/
-
- iwl_legacy_enable_interrupts(priv);
-
- err = iwl3945_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
- /* Start monitoring the killswitch */
- queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
- 2 * HZ);
-
- return 0;
-
- out_remove_sysfs:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
- out_release_irq:
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- out_unset_hw_params:
- iwl3945_unset_hw_params(priv);
- out_eeprom_free:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_set_drvdata(pdev, NULL);
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl3945_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl_down(), but there are paths to
- * run iwl_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl3945_synchronize_irq(priv);
-
- sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
-
- cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
-
- iwl3945_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl3945_rx_queue_free(priv, &priv->rxq);
- iwl3945_hw_txq_ctx_free(priv);
-
- iwl3945_unset_hw_params(priv);
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl3945_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(pdev->irq, priv);
- pci_disable_msi(pdev);
-
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl_legacy_free_channel_map(priv);
- iwl_legacy_free_geos(priv);
- kfree(priv->scan_cmd);
- if (priv->beacon_skb)
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-static struct pci_driver iwl3945_driver = {
- .name = DRV_NAME,
- .id_table = iwl3945_hw_card_ids,
- .probe = iwl3945_pci_probe,
- .remove = __devexit_p(iwl3945_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl3945_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl3945_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl3945_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl3945_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl3945_exit(void)
-{
- pci_unregister_driver(&iwl3945_driver);
- iwl3945_rate_control_unregister();
-}
-
-MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
-
-module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
-MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
-module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto,
- "using software crypto (default 1 [software])");
-module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
- int, S_IRUGO);
-MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 1)");
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-module_param_named(fw_restart, iwl3945_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
-
-module_exit(iwl3945_exit);
-module_init(iwl3945_init);
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
deleted file mode 100644
index d2fba9e..0000000
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ /dev/null
@@ -1,3281 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/pci-aspm.h>
-#include <linux/slab.h>
-#include <linux/dma-mapping.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/firmware.h>
-#include <linux/etherdevice.h>
-#include <linux/if_arp.h>
-
-#include <net/mac80211.h>
-
-#include <asm/div64.h>
-
-#define DRV_NAME "iwl4965"
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-helpers.h"
-#include "iwl-sta.h"
-#include "iwl-4965-calib.h"
-#include "iwl-4965.h"
-#include "iwl-4965-led.h"
-
-
-/******************************************************************************
- *
- * module boiler plate
- *
- ******************************************************************************/
-
-/*
- * module name, copyright, version, etc.
- */
-#define DRV_DESCRIPTION "Intel(R) Wireless WiFi 4965 driver for Linux"
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-#define VD "d"
-#else
-#define VD
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD
-
-
-MODULE_DESCRIPTION(DRV_DESCRIPTION);
-MODULE_VERSION(DRV_VERSION);
-MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("iwl4965");
-
-void iwl4965_update_chain_flags(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
-
- if (priv->cfg->ops->hcmd->set_rxon_chain) {
- for_each_context(priv, ctx) {
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- if (ctx->active.rx_chain != ctx->staging.rx_chain)
- iwl_legacy_commit_rxon(priv, ctx);
- }
- }
-}
-
-static void iwl4965_clear_free_frames(struct iwl_priv *priv)
-{
- struct list_head *element;
-
- IWL_DEBUG_INFO(priv, "%d frames on pre-allocated heap on clear.\n",
- priv->frames_count);
-
- while (!list_empty(&priv->free_frames)) {
- element = priv->free_frames.next;
- list_del(element);
- kfree(list_entry(element, struct iwl_frame, list));
- priv->frames_count--;
- }
-
- if (priv->frames_count) {
- IWL_WARN(priv, "%d frames still in use. Did we lose one?\n",
- priv->frames_count);
- priv->frames_count = 0;
- }
-}
-
-static struct iwl_frame *iwl4965_get_free_frame(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- struct list_head *element;
- if (list_empty(&priv->free_frames)) {
- frame = kzalloc(sizeof(*frame), GFP_KERNEL);
- if (!frame) {
- IWL_ERR(priv, "Could not allocate frame!\n");
- return NULL;
- }
-
- priv->frames_count++;
- return frame;
- }
-
- element = priv->free_frames.next;
- list_del(element);
- return list_entry(element, struct iwl_frame, list);
-}
-
-static void iwl4965_free_frame(struct iwl_priv *priv, struct iwl_frame *frame)
-{
- memset(frame, 0, sizeof(*frame));
- list_add(&frame->list, &priv->free_frames);
-}
-
-static u32 iwl4965_fill_beacon_frame(struct iwl_priv *priv,
- struct ieee80211_hdr *hdr,
- int left)
-{
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_skb)
- return 0;
-
- if (priv->beacon_skb->len > left)
- return 0;
-
- memcpy(hdr, priv->beacon_skb->data, priv->beacon_skb->len);
-
- return priv->beacon_skb->len;
-}
-
-/* Parse the beacon frame to find the TIM element and set tim_idx & tim_size */
-static void iwl4965_set_beacon_tim(struct iwl_priv *priv,
- struct iwl_tx_beacon_cmd *tx_beacon_cmd,
- u8 *beacon, u32 frame_size)
-{
- u16 tim_idx;
- struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)beacon;
-
- /*
- * The index is relative to frame start but we start looking at the
- * variable-length part of the beacon.
- */
- tim_idx = mgmt->u.beacon.variable - beacon;
-
- /* Parse variable-length elements of beacon to find WLAN_EID_TIM */
- while ((tim_idx < (frame_size - 2)) &&
- (beacon[tim_idx] != WLAN_EID_TIM))
- tim_idx += beacon[tim_idx+1] + 2;
-
- /* If TIM field was found, set variables */
- if ((tim_idx < (frame_size - 1)) && (beacon[tim_idx] == WLAN_EID_TIM)) {
- tx_beacon_cmd->tim_idx = cpu_to_le16(tim_idx);
- tx_beacon_cmd->tim_size = beacon[tim_idx+1];
- } else
- IWL_WARN(priv, "Unable to find TIM Element in beacon\n");
-}
-
-static unsigned int iwl4965_hw_get_beacon_cmd(struct iwl_priv *priv,
- struct iwl_frame *frame)
-{
- struct iwl_tx_beacon_cmd *tx_beacon_cmd;
- u32 frame_size;
- u32 rate_flags;
- u32 rate;
- /*
- * We have to set up the TX command, the TX Beacon command, and the
- * beacon contents.
- */
-
- lockdep_assert_held(&priv->mutex);
-
- if (!priv->beacon_ctx) {
- IWL_ERR(priv, "trying to build beacon w/o beacon context!\n");
- return 0;
- }
-
- /* Initialize memory */
- tx_beacon_cmd = &frame->u.beacon;
- memset(tx_beacon_cmd, 0, sizeof(*tx_beacon_cmd));
-
- /* Set up TX beacon contents */
- frame_size = iwl4965_fill_beacon_frame(priv, tx_beacon_cmd->frame,
- sizeof(frame->u) - sizeof(*tx_beacon_cmd));
- if (WARN_ON_ONCE(frame_size > MAX_MPDU_SIZE))
- return 0;
- if (!frame_size)
- return 0;
-
- /* Set up TX command fields */
- tx_beacon_cmd->tx.len = cpu_to_le16((u16)frame_size);
- tx_beacon_cmd->tx.sta_id = priv->beacon_ctx->bcast_sta_id;
- tx_beacon_cmd->tx.stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
- tx_beacon_cmd->tx.tx_flags = TX_CMD_FLG_SEQ_CTL_MSK |
- TX_CMD_FLG_TSF_MSK | TX_CMD_FLG_STA_RATE_MSK;
-
- /* Set up TX beacon command fields */
- iwl4965_set_beacon_tim(priv, tx_beacon_cmd, (u8 *)tx_beacon_cmd->frame,
- frame_size);
-
- /* Set up packet rate and flags */
- rate = iwl_legacy_get_lowest_plcp(priv, priv->beacon_ctx);
- priv->mgmt_tx_ant = iwl4965_toggle_tx_ant(priv, priv->mgmt_tx_ant,
- priv->hw_params.valid_tx_ant);
- rate_flags = iwl4965_ant_idx_to_flags(priv->mgmt_tx_ant);
- if ((rate >= IWL_FIRST_CCK_RATE) && (rate <= IWL_LAST_CCK_RATE))
- rate_flags |= RATE_MCS_CCK_MSK;
- tx_beacon_cmd->tx.rate_n_flags = iwl4965_hw_set_rate_n_flags(rate,
- rate_flags);
-
- return sizeof(*tx_beacon_cmd) + frame_size;
-}
-
-int iwl4965_send_beacon_cmd(struct iwl_priv *priv)
-{
- struct iwl_frame *frame;
- unsigned int frame_size;
- int rc;
-
- frame = iwl4965_get_free_frame(priv);
- if (!frame) {
- IWL_ERR(priv, "Could not obtain free frame buffer for beacon "
- "command.\n");
- return -ENOMEM;
- }
-
- frame_size = iwl4965_hw_get_beacon_cmd(priv, frame);
- if (!frame_size) {
- IWL_ERR(priv, "Error configuring the beacon command\n");
- iwl4965_free_frame(priv, frame);
- return -EINVAL;
- }
-
- rc = iwl_legacy_send_cmd_pdu(priv, REPLY_TX_BEACON, frame_size,
- &frame->u.cmd[0]);
-
- iwl4965_free_frame(priv, frame);
-
- return rc;
-}
-
-static inline dma_addr_t iwl4965_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- dma_addr_t addr = get_unaligned_le32(&tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- addr |=
- ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16;
-
- return addr;
-}
-
-static inline u16 iwl4965_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
-
- return le16_to_cpu(tb->hi_n_len) >> 4;
-}
-
-static inline void iwl4965_tfd_set_tb(struct iwl_tfd *tfd, u8 idx,
- dma_addr_t addr, u16 len)
-{
- struct iwl_tfd_tb *tb = &tfd->tbs[idx];
- u16 hi_n_len = len << 4;
-
- put_unaligned_le32(addr, &tb->lo);
- if (sizeof(dma_addr_t) > sizeof(u32))
- hi_n_len |= ((addr >> 16) >> 16) & 0xF;
-
- tb->hi_n_len = cpu_to_le16(hi_n_len);
-
- tfd->num_tbs = idx + 1;
-}
-
-static inline u8 iwl4965_tfd_get_num_tbs(struct iwl_tfd *tfd)
-{
- return tfd->num_tbs & 0x1f;
-}
-
-/**
- * iwl4965_hw_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
- * @priv - driver private data
- * @txq - tx queue
- *
- * Does NOT advance any TFD circular buffer read/write indexes
- * Does NOT free the TFD itself (which is within circular buffer)
- */
-void iwl4965_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq)
-{
- struct iwl_tfd *tfd_tmp = (struct iwl_tfd *)txq->tfds;
- struct iwl_tfd *tfd;
- struct pci_dev *dev = priv->pci_dev;
- int index = txq->q.read_ptr;
- int i;
- int num_tbs;
-
- tfd = &tfd_tmp[index];
-
- /* Sanity check on number of chunks */
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Too many chunks: %i\n", num_tbs);
- /* @todo issue fatal error, it is quite serious situation */
- return;
- }
-
- /* Unmap tx_cmd */
- if (num_tbs)
- pci_unmap_single(dev,
- dma_unmap_addr(&txq->meta[index], mapping),
- dma_unmap_len(&txq->meta[index], len),
- PCI_DMA_BIDIRECTIONAL);
-
- /* Unmap chunks, if any. */
- for (i = 1; i < num_tbs; i++)
- pci_unmap_single(dev, iwl4965_tfd_tb_get_addr(tfd, i),
- iwl4965_tfd_tb_get_len(tfd, i),
- PCI_DMA_TODEVICE);
-
- /* free SKB */
- if (txq->txb) {
- struct sk_buff *skb;
-
- skb = txq->txb[txq->q.read_ptr].skb;
-
- /* can be called from irqs-disabled context */
- if (skb) {
- dev_kfree_skb_any(skb);
- txq->txb[txq->q.read_ptr].skb = NULL;
- }
- }
-}
-
-int iwl4965_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- dma_addr_t addr, u16 len,
- u8 reset, u8 pad)
-{
- struct iwl_queue *q;
- struct iwl_tfd *tfd, *tfd_tmp;
- u32 num_tbs;
-
- q = &txq->q;
- tfd_tmp = (struct iwl_tfd *)txq->tfds;
- tfd = &tfd_tmp[q->write_ptr];
-
- if (reset)
- memset(tfd, 0, sizeof(*tfd));
-
- num_tbs = iwl4965_tfd_get_num_tbs(tfd);
-
- /* Each TFD can point to a maximum 20 Tx buffers */
- if (num_tbs >= IWL_NUM_OF_TBS) {
- IWL_ERR(priv, "Error can not send more than %d chunks\n",
- IWL_NUM_OF_TBS);
- return -EINVAL;
- }
-
- BUG_ON(addr & ~DMA_BIT_MASK(36));
- if (unlikely(addr & ~IWL_TX_DMA_MASK))
- IWL_ERR(priv, "Unaligned address = %llx\n",
- (unsigned long long)addr);
-
- iwl4965_tfd_set_tb(tfd, num_tbs, addr, len);
-
- return 0;
-}
-
-/*
- * Tell nic where to find circular buffer of Tx Frame Descriptors for
- * given Tx queue, and enable the DMA channel used for that queue.
- *
- * 4965 supports up to 16 Tx queues in DRAM, mapped to up to 8 Tx DMA
- * channels supported in hardware.
- */
-int iwl4965_hw_tx_queue_init(struct iwl_priv *priv,
- struct iwl_tx_queue *txq)
-{
- int txq_id = txq->q.id;
-
- /* Circular buffer (TFD queue in DRAM) physical base address */
- iwl_legacy_write_direct32(priv, FH_MEM_CBBC_QUEUE(txq_id),
- txq->q.dma_addr >> 8);
-
- return 0;
-}
-
-/******************************************************************************
- *
- * Generic RX handler implementations
- *
- ******************************************************************************/
-static void iwl4965_rx_reply_alive(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_alive_resp *palive;
- struct delayed_work *pwork;
-
- palive = &pkt->u.alive_frame;
-
- IWL_DEBUG_INFO(priv, "Alive ucode status 0x%08X revision "
- "0x%01X 0x%01X\n",
- palive->is_valid, palive->ver_type,
- palive->ver_subtype);
-
- if (palive->ver_subtype == INITIALIZE_SUBTYPE) {
- IWL_DEBUG_INFO(priv, "Initialization Alive received.\n");
- memcpy(&priv->card_alive_init,
- &pkt->u.alive_frame,
- sizeof(struct iwl_init_alive_resp));
- pwork = &priv->init_alive_start;
- } else {
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
- memcpy(&priv->card_alive, &pkt->u.alive_frame,
- sizeof(struct iwl_alive_resp));
- pwork = &priv->alive_start;
- }
-
- /* We delay the ALIVE response by 5ms to
- * give the HW RF Kill time to activate... */
- if (palive->is_valid == UCODE_VALID_OK)
- queue_delayed_work(priv->workqueue, pwork,
- msecs_to_jiffies(5));
- else
- IWL_WARN(priv, "uCode did not respond OK.\n");
-}
-
-/**
- * iwl4965_bg_statistics_periodic - Timer callback to queue statistics
- *
- * This callback is provided in order to send a statistics request.
- *
- * This timer function is continually reset to execute within
- * REG_RECALIB_PERIOD seconds since the last STATISTICS_NOTIFICATION
- * was received. We need to ensure we receive the statistics in order
- * to update the temperature used for calibrating the TXPOWER.
- */
-static void iwl4965_bg_statistics_periodic(unsigned long data)
-{
- struct iwl_priv *priv = (struct iwl_priv *)data;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- /* dont send host command if rf-kill is on */
- if (!iwl_legacy_is_ready_rf(priv))
- return;
-
- iwl_legacy_send_statistics_request(priv, CMD_ASYNC, false);
-}
-
-static void iwl4965_rx_beacon_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl4965_beacon_notif *beacon =
- (struct iwl4965_beacon_notif *)pkt->u.raw;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u8 rate = iwl4965_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
-
- IWL_DEBUG_RX(priv, "beacon status %x retries %d iss %d "
- "tsf %d %d rate %d\n",
- le32_to_cpu(beacon->beacon_notify_hdr.u.status) & TX_STATUS_MSK,
- beacon->beacon_notify_hdr.failure_frame,
- le32_to_cpu(beacon->ibss_mgr_status),
- le32_to_cpu(beacon->high_tsf),
- le32_to_cpu(beacon->low_tsf), rate);
-#endif
-
- priv->ibss_manager = le32_to_cpu(beacon->ibss_mgr_status);
-}
-
-static void iwl4965_perform_ct_kill_task(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- IWL_DEBUG_POWER(priv, "Stop all queues\n");
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- iwl_read32(priv, CSR_UCODE_DRV_GP1);
-
- spin_lock_irqsave(&priv->reg_lock, flags);
- if (!iwl_grab_nic_access(priv))
- iwl_release_nic_access(priv);
- spin_unlock_irqrestore(&priv->reg_lock, flags);
-}
-
-/* Handle notification from uCode that card's power state is changing
- * due to software, hardware, or critical temperature RFKILL */
-static void iwl4965_rx_card_state_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
- unsigned long status = priv->status;
-
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
- (flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On",
- (flags & CT_CARD_DISABLED) ?
- "Reached" : "Not reached");
-
- if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- CT_CARD_DISABLED)) {
-
- iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
-
- if (!(flags & RXON_CARD_DISABLED)) {
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
- iwl_legacy_write_direct32(priv, HBUS_TARG_MBX_C,
- HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
- }
- }
-
- if (flags & CT_CARD_DISABLED)
- iwl4965_perform_ct_kill_task(priv);
-
- if (flags & HW_CARD_DISABLED)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (!(flags & RXON_CARD_DISABLED))
- iwl_legacy_scan_cancel(priv);
-
- if ((test_bit(STATUS_RF_KILL_HW, &status) !=
- test_bit(STATUS_RF_KILL_HW, &priv->status)))
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
- else
- wake_up(&priv->wait_command_queue);
-}
-
-/**
- * iwl4965_setup_rx_handlers - Initialize Rx handler callbacks
- *
- * Setup the RX handlers for each of the reply types sent from the uCode
- * to the host.
- *
- * This function chains into the hardware specific files for them to setup
- * any hardware specific handlers as well.
- */
-static void iwl4965_setup_rx_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[REPLY_ALIVE] = iwl4965_rx_reply_alive;
- priv->rx_handlers[REPLY_ERROR] = iwl_legacy_rx_reply_error;
- priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_legacy_rx_csa;
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_legacy_rx_spectrum_measure_notif;
- priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_legacy_rx_pm_sleep_notif;
- priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
- iwl_legacy_rx_pm_debug_statistics_notif;
- priv->rx_handlers[BEACON_NOTIFICATION] = iwl4965_rx_beacon_notif;
-
- /*
- * The same handler is used for both the REPLY to a discrete
- * statistics request from the host as well as for the periodic
- * statistics notifications (after received beacons) from the uCode.
- */
- priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl4965_reply_statistics;
- priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl4965_rx_statistics;
-
- iwl_legacy_setup_rx_scan_handlers(priv);
-
- /* status change handler */
- priv->rx_handlers[CARD_STATE_NOTIFICATION] =
- iwl4965_rx_card_state_notif;
-
- priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
- iwl4965_rx_missed_beacon_notif;
- /* Rx handlers */
- priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl4965_rx_reply_rx_phy;
- priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl4965_rx_reply_rx;
- /* block ack */
- priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl4965_rx_reply_compressed_ba;
- /* Set up hardware specific Rx handlers */
- priv->cfg->ops->lib->rx_handler_setup(priv);
-}
-
-/**
- * iwl4965_rx_handle - Main entry function for receiving responses from uCode
- *
- * Uses the priv->rx_handlers callback function array to invoke
- * the appropriate handlers, including command responses,
- * frame-received notifications, and other notifications.
- */
-void iwl4965_rx_handle(struct iwl_priv *priv)
-{
- struct iwl_rx_mem_buffer *rxb;
- struct iwl_rx_packet *pkt;
- struct iwl_rx_queue *rxq = &priv->rxq;
- u32 r, i;
- int reclaim;
- unsigned long flags;
- u8 fill_rx = 0;
- u32 count = 8;
- int total_empty;
-
- /* uCode's read index (stored in shared DRAM) indicates the last Rx
- * buffer that the driver may process (last buffer filled by ucode). */
- r = le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF;
- i = rxq->read;
-
- /* Rx interrupt, but nothing sent from uCode */
- if (i == r)
- IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
-
- /* calculate total frames need to be restock after handling RX */
- total_empty = r - rxq->write_actual;
- if (total_empty < 0)
- total_empty += RX_QUEUE_SIZE;
-
- if (total_empty > (RX_QUEUE_SIZE / 2))
- fill_rx = 1;
-
- while (i != r) {
- int len;
-
- rxb = rxq->queue[i];
-
- /* If an RXB doesn't have a Rx queue slot associated with it,
- * then a bug has been introduced in the queue refilling
- * routines -- catch it here */
- BUG_ON(rxb == NULL);
-
- rxq->queue[i] = NULL;
-
- pci_unmap_page(priv->pci_dev, rxb->page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- pkt = rxb_addr(rxb);
-
- len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- len += sizeof(u32); /* account for status word */
- trace_iwlwifi_legacy_dev_rx(priv, pkt, len);
-
- /* Reclaim a command buffer only if this packet is a response
- * to a (driver-originated) command.
- * If the packet (e.g. Rx frame) originated from uCode,
- * there is no command buffer to reclaim.
- * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
- * but apparently a few don't get set; catch them here. */
- reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME) &&
- (pkt->hdr.cmd != REPLY_RX_PHY_CMD) &&
- (pkt->hdr.cmd != REPLY_RX) &&
- (pkt->hdr.cmd != REPLY_RX_MPDU_CMD) &&
- (pkt->hdr.cmd != REPLY_COMPRESSED_BA) &&
- (pkt->hdr.cmd != STATISTICS_NOTIFICATION) &&
- (pkt->hdr.cmd != REPLY_TX);
-
- /* Based on type of command response or notification,
- * handle those that need handling via function in
- * rx_handlers table. See iwl4965_setup_rx_handlers() */
- if (priv->rx_handlers[pkt->hdr.cmd]) {
- IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
- i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
- priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
- } else {
- /* No handling needed */
- IWL_DEBUG_RX(priv,
- "r %d i %d No handler needed for %s, 0x%02x\n",
- r, i, iwl_legacy_get_cmd_string(pkt->hdr.cmd),
- pkt->hdr.cmd);
- }
-
- /*
- * XXX: After here, we should always check rxb->page
- * against NULL before touching it or its virtual
- * memory (pkt). Because some rx_handler might have
- * already taken or freed the pages.
- */
-
- if (reclaim) {
- /* Invoke any callbacks, transfer the buffer to caller,
- * and fire off the (possibly) blocking iwl_legacy_send_cmd()
- * as we reclaim the driver command queue */
- if (rxb->page)
- iwl_legacy_tx_cmd_complete(priv, rxb);
- else
- IWL_WARN(priv, "Claim null rxb?\n");
- }
-
- /* Reuse the page if possible. For notification packets and
- * SKBs that fail to Rx correctly, add them back into the
- * rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
- if (rxb->page != NULL) {
- rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
- 0, PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
- } else
- list_add_tail(&rxb->list, &rxq->rx_used);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- i = (i + 1) & RX_QUEUE_MASK;
- /* If there are a lot of unused frames,
- * restock the Rx queue so ucode wont assert. */
- if (fill_rx) {
- count++;
- if (count >= 8) {
- rxq->read = i;
- iwl4965_rx_replenish_now(priv);
- count = 0;
- }
- }
- }
-
- /* Backtrack one entry */
- rxq->read = i;
- if (fill_rx)
- iwl4965_rx_replenish_now(priv);
- else
- iwl4965_rx_queue_restock(priv);
-}
-
-/* call this function to flush any scheduled tasklet */
-static inline void iwl4965_synchronize_irq(struct iwl_priv *priv)
-{
- /* wait to make sure we flush pending tasklet*/
- synchronize_irq(priv->pci_dev->irq);
- tasklet_kill(&priv->irq_tasklet);
-}
-
-static void iwl4965_irq_tasklet(struct iwl_priv *priv)
-{
- u32 inta, handled = 0;
- u32 inta_fh;
- unsigned long flags;
- u32 i;
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- u32 inta_mask;
-#endif
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Ack/clear/reset pending uCode interrupts.
- * Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
- * and will clear only when CSR_FH_INT_STATUS gets cleared. */
- inta = iwl_read32(priv, CSR_INT);
- iwl_write32(priv, CSR_INT, inta);
-
- /* Ack/clear/reset pending flow-handler (DMA) interrupts.
- * Any new interrupts that happen after this, either while we're
- * in this tasklet, or later, will show up in next ISR/tasklet. */
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- iwl_write32(priv, CSR_FH_INT_STATUS, inta_fh);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & IWL_DL_ISR) {
- /* just for debug */
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- IWL_DEBUG_ISR(priv, "inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask, inta_fh);
- }
-#endif
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
- * atomic, make sure that inta covers all the interrupts that
- * we've discovered, even if FH interrupt came in just after
- * reading CSR_INT. */
- if (inta_fh & CSR49_FH_INT_RX_MASK)
- inta |= CSR_INT_BIT_FH_RX;
- if (inta_fh & CSR49_FH_INT_TX_MASK)
- inta |= CSR_INT_BIT_FH_TX;
-
- /* Now service all interrupt bits discovered above. */
- if (inta & CSR_INT_BIT_HW_ERR) {
- IWL_ERR(priv, "Hardware error detected. Restarting.\n");
-
- /* Tell the device to stop sending interrupts */
- iwl_legacy_disable_interrupts(priv);
-
- priv->isr_stats.hw++;
- iwl_legacy_irq_handle_error(priv);
-
- handled |= CSR_INT_BIT_HW_ERR;
-
- return;
- }
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- /* NIC fires this, but we don't use it, redundant with WAKEUP */
- if (inta & CSR_INT_BIT_SCD) {
- IWL_DEBUG_ISR(priv, "Scheduler finished to transmit "
- "the frame/frames.\n");
- priv->isr_stats.sch++;
- }
-
- /* Alive notification via Rx interrupt will do the real work */
- if (inta & CSR_INT_BIT_ALIVE) {
- IWL_DEBUG_ISR(priv, "Alive interrupt\n");
- priv->isr_stats.alive++;
- }
- }
-#endif
- /* Safely ignore these bits for debug checks below */
- inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
-
- /* HW RF KILL switch toggled */
- if (inta & CSR_INT_BIT_RF_KILL) {
- int hw_rf_kill = 0;
- if (!(iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW))
- hw_rf_kill = 1;
-
- IWL_WARN(priv, "RF_KILL bit toggled to %s.\n",
- hw_rf_kill ? "disable radio" : "enable radio");
-
- priv->isr_stats.rfkill++;
-
- /* driver only loads ucode once setting the interface up.
- * the driver allows loading the ucode even if the radio
- * is killed. Hence update the killswitch state here. The
- * rfkill handler will care about restarting if needed.
- */
- if (!test_bit(STATUS_ALIVE, &priv->status)) {
- if (hw_rf_kill)
- set_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, hw_rf_kill);
- }
-
- handled |= CSR_INT_BIT_RF_KILL;
- }
-
- /* Chip got too hot and stopped itself */
- if (inta & CSR_INT_BIT_CT_KILL) {
- IWL_ERR(priv, "Microcode CT kill error detected.\n");
- priv->isr_stats.ctkill++;
- handled |= CSR_INT_BIT_CT_KILL;
- }
-
- /* Error detected by uCode */
- if (inta & CSR_INT_BIT_SW_ERR) {
- IWL_ERR(priv, "Microcode SW error detected. "
- " Restarting 0x%X.\n", inta);
- priv->isr_stats.sw++;
- iwl_legacy_irq_handle_error(priv);
- handled |= CSR_INT_BIT_SW_ERR;
- }
-
- /*
- * uCode wakes up after power-down sleep.
- * Tell device about any new tx or host commands enqueued,
- * and about any Rx buffers made available while asleep.
- */
- if (inta & CSR_INT_BIT_WAKEUP) {
- IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
- iwl_legacy_rx_queue_update_write_ptr(priv, &priv->rxq);
- for (i = 0; i < priv->hw_params.max_txq_num; i++)
- iwl_legacy_txq_update_write_ptr(priv, &priv->txq[i]);
- priv->isr_stats.wakeup++;
- handled |= CSR_INT_BIT_WAKEUP;
- }
-
- /* All uCode command responses, including Tx command responses,
- * Rx "responses" (frame-received notification), and other
- * notifications from uCode come through here*/
- if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
- iwl4965_rx_handle(priv);
- priv->isr_stats.rx++;
- handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
- }
-
- /* This "Tx" DMA channel is used only for loading uCode */
- if (inta & CSR_INT_BIT_FH_TX) {
- IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
- priv->isr_stats.tx++;
- handled |= CSR_INT_BIT_FH_TX;
- /* Wake up uCode load routine, now that load is complete */
- priv->ucode_write_complete = 1;
- wake_up(&priv->wait_command_queue);
- }
-
- if (inta & ~handled) {
- IWL_ERR(priv, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
- priv->isr_stats.unhandled++;
- }
-
- if (inta & ~(priv->inta_mask)) {
- IWL_WARN(priv, "Disabled INTA bits 0x%08x were pending\n",
- inta & ~priv->inta_mask);
- IWL_WARN(priv, " with FH_INT = 0x%08x\n", inta_fh);
- }
-
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &priv->status))
- iwl_legacy_enable_interrupts(priv);
- /* Re-enable RF_KILL if it occurred */
- else if (handled & CSR_INT_BIT_RF_KILL)
- iwl_legacy_enable_rfkill_int(priv);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- if (iwl_legacy_get_debug_level(priv) & (IWL_DL_ISR)) {
- inta = iwl_read32(priv, CSR_INT);
- inta_mask = iwl_read32(priv, CSR_INT_MASK);
- inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
- IWL_DEBUG_ISR(priv,
- "End inta 0x%08x, enabled 0x%08x, fh 0x%08x, "
- "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
- }
-#endif
-}
-
-/*****************************************************************************
- *
- * sysfs attributes
- *
- *****************************************************************************/
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-
-/*
- * The following adds a new attribute to the sysfs representation
- * of this device driver (i.e. a new file in /sys/class/net/wlan0/device/)
- * used for controlling the debug level.
- *
- * See the level definitions in iwl for details.
- *
- * The debug_level being managed using sysfs below is a per device debug
- * level that is used instead of the global debug level if it (the per
- * device debug level) is set.
- */
-static ssize_t iwl4965_show_debug_level(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- return sprintf(buf, "0x%08X\n", iwl_legacy_get_debug_level(priv));
-}
-static ssize_t iwl4965_store_debug_level(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 0, &val);
- if (ret)
- IWL_ERR(priv, "%s is not in hex or decimal form.\n", buf);
- else {
- priv->debug_level = val;
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv,
- "Not enough memory to generate traffic log\n");
- }
- return strnlen(buf, count);
-}
-
-static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
- iwl4965_show_debug_level, iwl4965_store_debug_level);
-
-
-#endif /* CONFIG_IWLWIFI_LEGACY_DEBUG */
-
-
-static ssize_t iwl4965_show_temperature(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_alive(priv))
- return -EAGAIN;
-
- return sprintf(buf, "%d\n", priv->temperature);
-}
-
-static DEVICE_ATTR(temperature, S_IRUGO, iwl4965_show_temperature, NULL);
-
-static ssize_t iwl4965_show_tx_power(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
-
- if (!iwl_legacy_is_ready_rf(priv))
- return sprintf(buf, "off\n");
- else
- return sprintf(buf, "%d\n", priv->tx_power_user_lmt);
-}
-
-static ssize_t iwl4965_store_tx_power(struct device *d,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct iwl_priv *priv = dev_get_drvdata(d);
- unsigned long val;
- int ret;
-
- ret = strict_strtoul(buf, 10, &val);
- if (ret)
- IWL_INFO(priv, "%s is not in decimal form.\n", buf);
- else {
- ret = iwl_legacy_set_tx_power(priv, val, false);
- if (ret)
- IWL_ERR(priv, "failed setting tx power (0x%d).\n",
- ret);
- else
- ret = count;
- }
- return ret;
-}
-
-static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO,
- iwl4965_show_tx_power, iwl4965_store_tx_power);
-
-static struct attribute *iwl_sysfs_entries[] = {
- &dev_attr_temperature.attr,
- &dev_attr_tx_power.attr,
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
- &dev_attr_debug_level.attr,
-#endif
- NULL
-};
-
-static struct attribute_group iwl_attribute_group = {
- .name = NULL, /* put in device directory */
- .attrs = iwl_sysfs_entries,
-};
-
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl4965_dealloc_ucode_pci(struct iwl_priv *priv)
-{
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_code);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_init_data);
- iwl_legacy_free_fw_desc(priv->pci_dev, &priv->ucode_boot);
-}
-
-static void iwl4965_nic_start(struct iwl_priv *priv)
-{
- /* Remove all resets to allow NIC to operate */
- iwl_write32(priv, CSR_RESET, 0);
-}
-
-static void iwl4965_ucode_callback(const struct firmware *ucode_raw,
- void *context);
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length);
-
-static int __must_check iwl4965_request_firmware(struct iwl_priv *priv, bool first)
-{
- const char *name_pre = priv->cfg->fw_name_pre;
- char tag[8];
-
- if (first) {
- priv->fw_index = priv->cfg->ucode_api_max;
- sprintf(tag, "%d", priv->fw_index);
- } else {
- priv->fw_index--;
- sprintf(tag, "%d", priv->fw_index);
- }
-
- if (priv->fw_index < priv->cfg->ucode_api_min) {
- IWL_ERR(priv, "no suitable firmware found!\n");
- return -ENOENT;
- }
-
- sprintf(priv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
-
- IWL_DEBUG_INFO(priv, "attempting to load firmware '%s'\n",
- priv->firmware_name);
-
- return request_firmware_nowait(THIS_MODULE, 1, priv->firmware_name,
- &priv->pci_dev->dev, GFP_KERNEL, priv,
- iwl4965_ucode_callback);
-}
-
-struct iwl4965_firmware_pieces {
- const void *inst, *data, *init, *init_data, *boot;
- size_t inst_size, data_size, init_size, init_data_size, boot_size;
-};
-
-static int iwl4965_load_firmware(struct iwl_priv *priv,
- const struct firmware *ucode_raw,
- struct iwl4965_firmware_pieces *pieces)
-{
- struct iwl_ucode_header *ucode = (void *)ucode_raw->data;
- u32 api_ver, hdr_size;
- const u8 *src;
-
- priv->ucode_ver = le32_to_cpu(ucode->ver);
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- switch (api_ver) {
- default:
- case 0:
- case 1:
- case 2:
- hdr_size = 24;
- if (ucode_raw->size < hdr_size) {
- IWL_ERR(priv, "File size too small!\n");
- return -EINVAL;
- }
- pieces->inst_size = le32_to_cpu(ucode->v1.inst_size);
- pieces->data_size = le32_to_cpu(ucode->v1.data_size);
- pieces->init_size = le32_to_cpu(ucode->v1.init_size);
- pieces->init_data_size =
- le32_to_cpu(ucode->v1.init_data_size);
- pieces->boot_size = le32_to_cpu(ucode->v1.boot_size);
- src = ucode->v1.data;
- break;
- }
-
- /* Verify size of file vs. image size info in file's header */
- if (ucode_raw->size != hdr_size + pieces->inst_size +
- pieces->data_size + pieces->init_size +
- pieces->init_data_size + pieces->boot_size) {
-
- IWL_ERR(priv,
- "uCode file size %d does not match expected size\n",
- (int)ucode_raw->size);
- return -EINVAL;
- }
-
- pieces->inst = src;
- src += pieces->inst_size;
- pieces->data = src;
- src += pieces->data_size;
- pieces->init = src;
- src += pieces->init_size;
- pieces->init_data = src;
- src += pieces->init_data_size;
- pieces->boot = src;
- src += pieces->boot_size;
-
- return 0;
-}
-
-/**
- * iwl4965_ucode_callback - callback when firmware was loaded
- *
- * If loaded successfully, copies the firmware into buffers
- * for the card to fetch (via DMA).
- */
-static void
-iwl4965_ucode_callback(const struct firmware *ucode_raw, void *context)
-{
- struct iwl_priv *priv = context;
- struct iwl_ucode_header *ucode;
- int err;
- struct iwl4965_firmware_pieces pieces;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- const unsigned int api_min = priv->cfg->ucode_api_min;
- u32 api_ver;
-
- u32 max_probe_length = 200;
- u32 standard_phy_calibration_size =
- IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
-
- memset(&pieces, 0, sizeof(pieces));
-
- if (!ucode_raw) {
- if (priv->fw_index <= priv->cfg->ucode_api_max)
- IWL_ERR(priv,
- "request for firmware file '%s' failed.\n",
- priv->firmware_name);
- goto try_again;
- }
-
- IWL_DEBUG_INFO(priv, "Loaded firmware file '%s' (%zd bytes).\n",
- priv->firmware_name, ucode_raw->size);
-
- /* Make sure that we got at least the API version number */
- if (ucode_raw->size < 4) {
- IWL_ERR(priv, "File size way too small!\n");
- goto try_again;
- }
-
- /* Data from ucode file: header followed by uCode images */
- ucode = (struct iwl_ucode_header *)ucode_raw->data;
-
- err = iwl4965_load_firmware(priv, ucode_raw, &pieces);
-
- if (err)
- goto try_again;
-
- api_ver = IWL_UCODE_API(priv->ucode_ver);
-
- /*
- * api_ver should match the api version forming part of the
- * firmware filename ... but we don't check for that and only rely
- * on the API version read from firmware header from here on forward
- */
- if (api_ver < api_min || api_ver > api_max) {
- IWL_ERR(priv,
- "Driver unable to support your firmware API. "
- "Driver supports v%u, firmware is v%u.\n",
- api_max, api_ver);
- goto try_again;
- }
-
- if (api_ver != api_max)
- IWL_ERR(priv,
- "Firmware has old API version. Expected v%u, "
- "got v%u. New firmware can be obtained "
- "from http://www.intellinuxwireless.org.\n",
- api_max, api_ver);
-
- IWL_INFO(priv, "loaded firmware version %u.%u.%u.%u\n",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- snprintf(priv->hw->wiphy->fw_version,
- sizeof(priv->hw->wiphy->fw_version),
- "%u.%u.%u.%u",
- IWL_UCODE_MAJOR(priv->ucode_ver),
- IWL_UCODE_MINOR(priv->ucode_ver),
- IWL_UCODE_API(priv->ucode_ver),
- IWL_UCODE_SERIAL(priv->ucode_ver));
-
- /*
- * For any of the failures below (before allocating pci memory)
- * we will try to load a version with a smaller API -- maybe the
- * user just got a corrupted version of the latest API.
- */
-
- IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
- priv->ucode_ver);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %Zd\n",
- pieces.inst_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr runtime data size = %Zd\n",
- pieces.data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init inst size = %Zd\n",
- pieces.init_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr init data size = %Zd\n",
- pieces.init_data_size);
- IWL_DEBUG_INFO(priv, "f/w package hdr boot inst size = %Zd\n",
- pieces.boot_size);
-
- /* Verify that uCode images will fit in card's SRAM */
- if (pieces.inst_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode instr len %Zd too large to fit in\n",
- pieces.inst_size);
- goto try_again;
- }
-
- if (pieces.data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode data len %Zd too large to fit in\n",
- pieces.data_size);
- goto try_again;
- }
-
- if (pieces.init_size > priv->hw_params.max_inst_size) {
- IWL_ERR(priv, "uCode init instr len %Zd too large to fit in\n",
- pieces.init_size);
- goto try_again;
- }
-
- if (pieces.init_data_size > priv->hw_params.max_data_size) {
- IWL_ERR(priv, "uCode init data len %Zd too large to fit in\n",
- pieces.init_data_size);
- goto try_again;
- }
-
- if (pieces.boot_size > priv->hw_params.max_bsm_size) {
- IWL_ERR(priv, "uCode boot instr len %Zd too large to fit in\n",
- pieces.boot_size);
- goto try_again;
- }
-
- /* Allocate ucode buffers for card's bus-master loading ... */
-
- /* Runtime instructions and 2 copies of data:
- * 1) unmodified from disk
- * 2) backup cache for save/restore during power-downs */
- priv->ucode_code.len = pieces.inst_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_code);
-
- priv->ucode_data.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data);
-
- priv->ucode_data_backup.len = pieces.data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_data_backup);
-
- if (!priv->ucode_code.v_addr || !priv->ucode_data.v_addr ||
- !priv->ucode_data_backup.v_addr)
- goto err_pci_alloc;
-
- /* Initialization instructions and data */
- if (pieces.init_size && pieces.init_data_size) {
- priv->ucode_init.len = pieces.init_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init);
-
- priv->ucode_init_data.len = pieces.init_data_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_init_data);
-
- if (!priv->ucode_init.v_addr || !priv->ucode_init_data.v_addr)
- goto err_pci_alloc;
- }
-
- /* Bootstrap (instructions only, no data) */
- if (pieces.boot_size) {
- priv->ucode_boot.len = pieces.boot_size;
- iwl_legacy_alloc_fw_desc(priv->pci_dev, &priv->ucode_boot);
-
- if (!priv->ucode_boot.v_addr)
- goto err_pci_alloc;
- }
-
- /* Now that we can no longer fail, copy information */
-
- priv->sta_key_max_num = STA_KEY_MAX_NUM;
-
- /* Copy images into buffers for card's bus-master reads ... */
-
- /* Runtime instructions (first block of data in file) */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode instr len %Zd\n",
- pieces.inst_size);
- memcpy(priv->ucode_code.v_addr, pieces.inst, pieces.inst_size);
-
- IWL_DEBUG_INFO(priv, "uCode instr buf vaddr = 0x%p, paddr = 0x%08x\n",
- priv->ucode_code.v_addr, (u32)priv->ucode_code.p_addr);
-
- /*
- * Runtime data
- * NOTE: Copy into backup buffer will be done in iwl_up()
- */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) uCode data len %Zd\n",
- pieces.data_size);
- memcpy(priv->ucode_data.v_addr, pieces.data, pieces.data_size);
- memcpy(priv->ucode_data_backup.v_addr, pieces.data, pieces.data_size);
-
- /* Initialization instructions */
- if (pieces.init_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init instr len %Zd\n",
- pieces.init_size);
- memcpy(priv->ucode_init.v_addr, pieces.init, pieces.init_size);
- }
-
- /* Initialization data */
- if (pieces.init_data_size) {
- IWL_DEBUG_INFO(priv,
- "Copying (but not loading) init data len %Zd\n",
- pieces.init_data_size);
- memcpy(priv->ucode_init_data.v_addr, pieces.init_data,
- pieces.init_data_size);
- }
-
- /* Bootstrap instructions */
- IWL_DEBUG_INFO(priv, "Copying (but not loading) boot instr len %Zd\n",
- pieces.boot_size);
- memcpy(priv->ucode_boot.v_addr, pieces.boot, pieces.boot_size);
-
- /*
- * figure out the offset of chain noise reset and gain commands
- * base on the size of standard phy calibration commands table size
- */
- priv->_4965.phy_calib_chain_noise_reset_cmd =
- standard_phy_calibration_size;
- priv->_4965.phy_calib_chain_noise_gain_cmd =
- standard_phy_calibration_size + 1;
-
- /**************************************************
- * This is still part of probe() in a sense...
- *
- * 9. Setup and register with mac80211 and debugfs
- **************************************************/
- err = iwl4965_mac_setup_register(priv, max_probe_length);
- if (err)
- goto out_unbind;
-
- err = iwl_legacy_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv,
- "failed to create debugfs files. Ignoring error: %d\n", err);
-
- err = sysfs_create_group(&priv->pci_dev->dev.kobj,
- &iwl_attribute_group);
- if (err) {
- IWL_ERR(priv, "failed to create sysfs device attributes\n");
- goto out_unbind;
- }
-
- /* We have our copies now, allow OS release its copies */
- release_firmware(ucode_raw);
- complete(&priv->_4965.firmware_loading_complete);
- return;
-
- try_again:
- /* try next, if any */
- if (iwl4965_request_firmware(priv, false))
- goto out_unbind;
- release_firmware(ucode_raw);
- return;
-
- err_pci_alloc:
- IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl4965_dealloc_ucode_pci(priv);
- out_unbind:
- complete(&priv->_4965.firmware_loading_complete);
- device_release_driver(&priv->pci_dev->dev);
- release_firmware(ucode_raw);
-}
-
-static const char * const desc_lookup_text[] = {
- "OK",
- "FAIL",
- "BAD_PARAM",
- "BAD_CHECKSUM",
- "NMI_INTERRUPT_WDG",
- "SYSASSERT",
- "FATAL_ERROR",
- "BAD_COMMAND",
- "HW_ERROR_TUNE_LOCK",
- "HW_ERROR_TEMPERATURE",
- "ILLEGAL_CHAN_FREQ",
- "VCC_NOT_STABLE",
- "FH_ERROR",
- "NMI_INTERRUPT_HOST",
- "NMI_INTERRUPT_ACTION_PT",
- "NMI_INTERRUPT_UNKNOWN",
- "UCODE_VERSION_MISMATCH",
- "HW_ERROR_ABS_LOCK",
- "HW_ERROR_CAL_LOCK_FAIL",
- "NMI_INTERRUPT_INST_ACTION_PT",
- "NMI_INTERRUPT_DATA_ACTION_PT",
- "NMI_TRM_HW_ER",
- "NMI_INTERRUPT_TRM",
- "NMI_INTERRUPT_BREAK_POINT",
- "DEBUG_0",
- "DEBUG_1",
- "DEBUG_2",
- "DEBUG_3",
-};
-
-static struct { char *name; u8 num; } advanced_lookup[] = {
- { "NMI_INTERRUPT_WDG", 0x34 },
- { "SYSASSERT", 0x35 },
- { "UCODE_VERSION_MISMATCH", 0x37 },
- { "BAD_COMMAND", 0x38 },
- { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
- { "FATAL_ERROR", 0x3D },
- { "NMI_TRM_HW_ERR", 0x46 },
- { "NMI_INTERRUPT_TRM", 0x4C },
- { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
- { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
- { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
- { "NMI_INTERRUPT_HOST", 0x66 },
- { "NMI_INTERRUPT_ACTION_PT", 0x7C },
- { "NMI_INTERRUPT_UNKNOWN", 0x84 },
- { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
- { "ADVANCED_SYSASSERT", 0 },
-};
-
-static const char *iwl4965_desc_lookup(u32 num)
-{
- int i;
- int max = ARRAY_SIZE(desc_lookup_text);
-
- if (num < max)
- return desc_lookup_text[num];
-
- max = ARRAY_SIZE(advanced_lookup) - 1;
- for (i = 0; i < max; i++) {
- if (advanced_lookup[i].num == num)
- break;
- }
- return advanced_lookup[i].name;
-}
-
-#define ERROR_START_OFFSET (1 * sizeof(u32))
-#define ERROR_ELEM_SIZE (7 * sizeof(u32))
-
-void iwl4965_dump_nic_error_log(struct iwl_priv *priv)
-{
- u32 data2, line;
- u32 desc, time, count, base, data1;
- u32 blink1, blink2, ilink1, ilink2;
- u32 pc, hcmd;
-
- if (priv->ucode_type == UCODE_INIT) {
- base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
- } else {
- base = le32_to_cpu(priv->card_alive.error_event_table_ptr);
- }
-
- if (!priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
- IWL_ERR(priv,
- "Not valid error log pointer 0x%08X for %s uCode\n",
- base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
- }
-
- count = iwl_legacy_read_targ_mem(priv, base);
-
- if (ERROR_START_OFFSET <= count * ERROR_ELEM_SIZE) {
- IWL_ERR(priv, "Start IWL Error Log Dump:\n");
- IWL_ERR(priv, "Status: 0x%08lX, count: %d\n",
- priv->status, count);
- }
-
- desc = iwl_legacy_read_targ_mem(priv, base + 1 * sizeof(u32));
- priv->isr_stats.err_code = desc;
- pc = iwl_legacy_read_targ_mem(priv, base + 2 * sizeof(u32));
- blink1 = iwl_legacy_read_targ_mem(priv, base + 3 * sizeof(u32));
- blink2 = iwl_legacy_read_targ_mem(priv, base + 4 * sizeof(u32));
- ilink1 = iwl_legacy_read_targ_mem(priv, base + 5 * sizeof(u32));
- ilink2 = iwl_legacy_read_targ_mem(priv, base + 6 * sizeof(u32));
- data1 = iwl_legacy_read_targ_mem(priv, base + 7 * sizeof(u32));
- data2 = iwl_legacy_read_targ_mem(priv, base + 8 * sizeof(u32));
- line = iwl_legacy_read_targ_mem(priv, base + 9 * sizeof(u32));
- time = iwl_legacy_read_targ_mem(priv, base + 11 * sizeof(u32));
- hcmd = iwl_legacy_read_targ_mem(priv, base + 22 * sizeof(u32));
-
- trace_iwlwifi_legacy_dev_ucode_error(priv, desc,
- time, data1, data2, line,
- blink1, blink2, ilink1, ilink2);
-
- IWL_ERR(priv, "Desc Time "
- "data1 data2 line\n");
- IWL_ERR(priv, "%-28s (0x%04X) %010u 0x%08X 0x%08X %u\n",
- iwl4965_desc_lookup(desc), desc, time, data1, data2, line);
- IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
- IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
- pc, blink1, blink2, ilink1, ilink2, hcmd);
-}
-
-static void iwl4965_rf_kill_ct_config(struct iwl_priv *priv)
-{
- struct iwl_ct_kill_config cmd;
- unsigned long flags;
- int ret = 0;
-
- spin_lock_irqsave(&priv->lock, flags);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_REG_BIT_CT_KILL_EXIT);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- cmd.critical_temperature_R =
- cpu_to_le32(priv->hw_params.ct_kill_threshold);
-
- ret = iwl_legacy_send_cmd_pdu(priv, REPLY_CT_KILL_CONFIG_CMD,
- sizeof(cmd), &cmd);
- if (ret)
- IWL_ERR(priv, "REPLY_CT_KILL_CONFIG_CMD failed\n");
- else
- IWL_DEBUG_INFO(priv, "REPLY_CT_KILL_CONFIG_CMD "
- "succeeded, "
- "critical temperature is %d\n",
- priv->hw_params.ct_kill_threshold);
-}
-
-static const s8 default_queue_to_tx_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
- IWL49_CMD_FIFO_NUM,
- IWL_TX_FIFO_UNUSED,
- IWL_TX_FIFO_UNUSED,
-};
-
-static int iwl4965_alive_notify(struct iwl_priv *priv)
-{
- u32 a;
- unsigned long flags;
- int i, chan;
- u32 reg_val;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Clear 4965's internal Tx Scheduler data base */
- priv->scd_base_addr = iwl_legacy_read_prph(priv,
- IWL49_SCD_SRAM_BASE_ADDR);
- a = priv->scd_base_addr + IWL49_SCD_CONTEXT_DATA_OFFSET;
- for (; a < priv->scd_base_addr + IWL49_SCD_TX_STTS_BITMAP_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
- for (; a < priv->scd_base_addr +
- IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
- iwl_legacy_write_targ_mem(priv, a, 0);
-
- /* Tel 4965 where to find Tx byte count tables */
- iwl_legacy_write_prph(priv, IWL49_SCD_DRAM_BASE_ADDR,
- priv->scd_bc_tbls.dma >> 10);
-
- /* Enable DMA channel */
- for (chan = 0; chan < FH49_TCSR_CHNL_NUM ; chan++)
- iwl_legacy_write_direct32(priv,
- FH_TCSR_CHNL_TX_CONFIG_REG(chan),
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
- FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
-
- /* Update FH chicken bits */
- reg_val = iwl_legacy_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
- iwl_legacy_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
- reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
-
- /* Disable chain mode for all queues */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUECHAIN_SEL, 0);
-
- /* Initialize each Tx queue (including the command queue) */
- for (i = 0; i < priv->hw_params.max_txq_num; i++) {
-
- /* TFD circular buffer read/write indexes */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(i), 0);
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
-
- /* Max Tx Window size for Scheduler-ACK mode */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i),
- (SCD_WIN_SIZE <<
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS) &
- IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK);
-
- /* Frame limit */
- iwl_legacy_write_targ_mem(priv, priv->scd_base_addr +
- IWL49_SCD_CONTEXT_QUEUE_OFFSET(i) +
- sizeof(u32),
- (SCD_FRAME_LIMIT <<
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK);
-
- }
- iwl_legacy_write_prph(priv, IWL49_SCD_INTERRUPT_MASK,
- (1 << priv->hw_params.max_txq_num) - 1);
-
- /* Activate all Tx DMA/FIFO channels */
- iwl4965_txq_set_sched(priv, IWL_MASK(0, 6));
-
- iwl4965_set_wr_ptrs(priv, IWL_DEFAULT_CMD_QUEUE_NUM, 0);
-
- /* make sure all queue are not stopped */
- memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
- for (i = 0; i < 4; i++)
- atomic_set(&priv->queue_stop_count[i], 0);
-
- /* reset to 0 to enable all the queue first */
- priv->txq_ctx_active_msk = 0;
- /* Map each Tx/cmd queue to its corresponding fifo */
- BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
-
- for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
- int ac = default_queue_to_tx_fifo[i];
-
- iwl_txq_ctx_activate(priv, i);
-
- if (ac == IWL_TX_FIFO_UNUSED)
- continue;
-
- iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
- }
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-/**
- * iwl4965_alive_start - called after REPLY_ALIVE notification received
- * from protocol/runtime uCode (initialization uCode's
- * Alive gets handled by iwl_init_alive_start()).
- */
-static void iwl4965_alive_start(struct iwl_priv *priv)
-{
- int ret = 0;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
-
- IWL_DEBUG_INFO(priv, "Runtime Alive received.\n");
-
- if (priv->card_alive.is_valid != UCODE_VALID_OK) {
- /* We had an error bringing up the hardware, so take it
- * all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Alive failed.\n");
- goto restart;
- }
-
- /* Initialize uCode has loaded Runtime uCode ... verify inst image.
- * This is a paranoid check, because we would not have gotten the
- * "runtime" alive if code weren't properly loaded. */
- if (iwl4965_verify_ucode(priv)) {
- /* Runtime instruction load was bad;
- * take it all the way back down so we can try again */
- IWL_DEBUG_INFO(priv, "Bad runtime uCode load.\n");
- goto restart;
- }
-
- ret = iwl4965_alive_notify(priv);
- if (ret) {
- IWL_WARN(priv,
- "Could not complete ALIVE transition [ntf]: %d\n", ret);
- goto restart;
- }
-
-
- /* After the ALIVE response, we can send host commands to the uCode */
- set_bit(STATUS_ALIVE, &priv->status);
-
- /* Enable watchdog to monitor the driver tx queues */
- iwl_legacy_setup_watchdog(priv);
-
- if (iwl_legacy_is_rfkill(priv))
- return;
-
- ieee80211_wake_queues(priv->hw);
-
- priv->active_rate = IWL_RATES_MASK;
-
- if (iwl_legacy_is_associated_ctx(ctx)) {
- struct iwl_legacy_rxon_cmd *active_rxon =
- (struct iwl_legacy_rxon_cmd *)&ctx->active;
- /* apply any changes in staging */
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
- active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
- } else {
- struct iwl_rxon_context *tmp;
- /* Initialize our rx_config data */
- for_each_context(priv, tmp)
- iwl_legacy_connection_init_rx_config(priv, tmp);
-
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
- }
-
- /* Configure bluetooth coexistence if enabled */
- iwl_legacy_send_bt_config(priv);
-
- iwl4965_reset_run_time_calib(priv);
-
- set_bit(STATUS_READY, &priv->status);
-
- /* Configure the adapter for unassociated operation */
- iwl_legacy_commit_rxon(priv, ctx);
-
- /* At this point, the NIC is initialized and operational */
- iwl4965_rf_kill_ct_config(priv);
-
- IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
- wake_up(&priv->wait_command_queue);
-
- iwl_legacy_power_update_mode(priv, true);
- IWL_DEBUG_INFO(priv, "Updated power mode\n");
-
- return;
-
- restart:
- queue_work(priv->workqueue, &priv->restart);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv);
-
-static void __iwl4965_down(struct iwl_priv *priv)
-{
- unsigned long flags;
- int exit_pending;
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is going down\n");
-
- iwl_legacy_scan_cancel_timeout(priv, 200);
-
- exit_pending = test_and_set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
- * to prevent rearm timer */
- del_timer_sync(&priv->watchdog);
-
- iwl_legacy_clear_ucode_stations(priv, NULL);
- iwl_legacy_dealloc_bcast_stations(priv);
- iwl_legacy_clear_driver_stations(priv);
-
- /* Unblock any waiting calls */
- wake_up_all(&priv->wait_command_queue);
-
- /* Wipe out the EXIT_PENDING status bit if we are not actually
- * exiting the module */
- if (!exit_pending)
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl4965_synchronize_irq(priv);
-
- if (priv->mac80211_registered)
- ieee80211_stop_queues(priv->hw);
-
- /* If we have not previously called iwl_init() then
- * clear all bits but the RF Kill bit and return */
- if (!iwl_legacy_is_init(priv)) {
- priv->status = test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
- goto exit;
- }
-
- /* ...otherwise clear out all the status bits but the RF Kill
- * bit and continue taking the NIC down. */
- priv->status &= test_bit(STATUS_RF_KILL_HW, &priv->status) <<
- STATUS_RF_KILL_HW |
- test_bit(STATUS_GEO_CONFIGURED, &priv->status) <<
- STATUS_GEO_CONFIGURED |
- test_bit(STATUS_FW_ERROR, &priv->status) <<
- STATUS_FW_ERROR |
- test_bit(STATUS_EXIT_PENDING, &priv->status) <<
- STATUS_EXIT_PENDING;
-
- iwl4965_txq_ctx_stop(priv);
- iwl4965_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_legacy_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_legacy_clear_bit(priv, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_legacy_apm_stop(priv);
-
- exit:
- memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
-
- dev_kfree_skb(priv->beacon_skb);
- priv->beacon_skb = NULL;
-
- /* clear out any free frames */
- iwl4965_clear_free_frames(priv);
-}
-
-static void iwl4965_down(struct iwl_priv *priv)
-{
- mutex_lock(&priv->mutex);
- __iwl4965_down(priv);
- mutex_unlock(&priv->mutex);
-
- iwl4965_cancel_deferred_work(priv);
-}
-
-#define HW_READY_TIMEOUT (50)
-
-static int iwl4965_set_hw_ready(struct iwl_priv *priv)
-{
- int ret = 0;
-
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
-
- /* See if we got it */
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
- HW_READY_TIMEOUT);
- if (ret != -ETIMEDOUT)
- priv->hw_ready = true;
- else
- priv->hw_ready = false;
-
- IWL_DEBUG_INFO(priv, "hardware %s\n",
- (priv->hw_ready == 1) ? "ready" : "not ready");
- return ret;
-}
-
-static int iwl4965_prepare_card_hw(struct iwl_priv *priv)
-{
- int ret = 0;
-
- IWL_DEBUG_INFO(priv, "iwl4965_prepare_card_hw enter\n");
-
- ret = iwl4965_set_hw_ready(priv);
- if (priv->hw_ready)
- return ret;
-
- /* If HW is not ready, prepare the conditions to check again */
- iwl_legacy_set_bit(priv, CSR_HW_IF_CONFIG_REG,
- CSR_HW_IF_CONFIG_REG_PREPARE);
-
- ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
- ~CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE,
- CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE, 150000);
-
- /* HW should be ready by now, check again. */
- if (ret != -ETIMEDOUT)
- iwl4965_set_hw_ready(priv);
-
- return ret;
-}
-
-#define MAX_HW_RESTARTS 5
-
-static int __iwl4965_up(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int i;
- int ret;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- if (!priv->ucode_data_backup.v_addr || !priv->ucode_data.v_addr) {
- IWL_ERR(priv, "ucode not available for device bringup\n");
- return -EIO;
- }
-
- for_each_context(priv, ctx) {
- ret = iwl4965_alloc_bcast_station(priv, ctx);
- if (ret) {
- iwl_legacy_dealloc_bcast_stations(priv);
- return ret;
- }
- }
-
- iwl4965_prepare_card_hw(priv);
-
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv,
- CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_legacy_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
-
- iwl_legacy_enable_interrupts(priv);
- IWL_WARN(priv, "Radio disabled by HW RF Kill switch\n");
- return 0;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- /* must be initialised before iwl_hw_nic_init */
- priv->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
-
- ret = iwl4965_hw_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Copy original ucode data image from disk into backup cache.
- * This will be used to initialize the on-board processor's
- * data SRAM for a clean start when the runtime program first loads. */
- memcpy(priv->ucode_data_backup.v_addr, priv->ucode_data.v_addr,
- priv->ucode_data.len);
-
- for (i = 0; i < MAX_HW_RESTARTS; i++) {
-
- /* load bootstrap state machine,
- * load bootstrap program into processor's memory,
- * prepare to load the "initialize" uCode */
- ret = priv->cfg->ops->lib->load_ucode(priv);
-
- if (ret) {
- IWL_ERR(priv, "Unable to set up bootstrap uCode: %d\n",
- ret);
- continue;
- }
-
- /* start card; "initialize" will load runtime ucode */
- iwl4965_nic_start(priv);
-
- IWL_DEBUG_INFO(priv, DRV_NAME " is coming up\n");
-
- return 0;
- }
-
- set_bit(STATUS_EXIT_PENDING, &priv->status);
- __iwl4965_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->status);
-
- /* tried to restart and config the device for as long as our
- * patience could withstand */
- IWL_ERR(priv, "Unable to initialize device after %d attempts.\n", i);
- return -EIO;
-}
-
-
-/*****************************************************************************
- *
- * Workqueue callbacks
- *
- *****************************************************************************/
-
-static void iwl4965_bg_init_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, init_alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- priv->cfg->ops->lib->init_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_alive_start(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, alive_start.work);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- goto out;
-
- iwl4965_alive_start(priv);
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_run_time_calib_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- run_time_calib_work);
-
- mutex_lock(&priv->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- if (priv->start_calib) {
- iwl4965_chain_noise_calibration(priv,
- (void *)&priv->_4965.statistics);
- iwl4965_sensitivity_calibration(priv,
- (void *)&priv->_4965.statistics);
- }
-
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_bg_restart(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- if (test_and_clear_bit(STATUS_FW_ERROR, &priv->status)) {
- struct iwl_rxon_context *ctx;
-
- mutex_lock(&priv->mutex);
- for_each_context(priv, ctx)
- ctx->vif = NULL;
- priv->is_open = 0;
-
- __iwl4965_down(priv);
-
- mutex_unlock(&priv->mutex);
- iwl4965_cancel_deferred_work(priv);
- ieee80211_restart_hw(priv->hw);
- } else {
- iwl4965_down(priv);
-
- mutex_lock(&priv->mutex);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
- mutex_unlock(&priv->mutex);
- return;
- }
-
- __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
- }
-}
-
-static void iwl4965_bg_rx_replenish(struct work_struct *data)
-{
- struct iwl_priv *priv =
- container_of(data, struct iwl_priv, rx_replenish);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- iwl4965_rx_replenish(priv);
- mutex_unlock(&priv->mutex);
-}
-
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-#define UCODE_READY_TIMEOUT (4 * HZ)
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwl4965_mac_setup_register(struct iwl_priv *priv,
- u32 max_probe_length)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
- struct iwl_rxon_context *ctx;
-
- hw->rate_control_algorithm = "iwl-4965-rs";
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
- if (priv->cfg->sku & IWL_SKU_N)
- hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
- hw->sta_data_size = sizeof(struct iwl_station_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- for_each_context(priv, ctx) {
- hw->wiphy->interface_modes |= ctx->interface_modes;
- hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
- }
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS;
-
- /*
- * For now, disable PS by default because it affects
- * RX performance significantly.
- */
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = max_probe_length - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_legacy_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-
-int iwl4965_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->mutex);
- ret = __iwl4965_up(priv);
- mutex_unlock(&priv->mutex);
-
- if (ret)
- return ret;
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
- /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from
- * mac80211 will not be run successfully. */
- ret = wait_event_timeout(priv->wait_command_queue,
- test_bit(STATUS_READY, &priv->status),
- UCODE_READY_TIMEOUT);
- if (!ret) {
- if (!test_bit(STATUS_READY, &priv->status)) {
- IWL_ERR(priv, "START_ALIVE timeout after %dms.\n",
- jiffies_to_msecs(UCODE_READY_TIMEOUT));
- return -ETIMEDOUT;
- }
- }
-
- iwl4965_led_enable(priv);
-
-out:
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-void iwl4965_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open)
- return;
-
- priv->is_open = 0;
-
- iwl4965_down(priv);
-
- flush_workqueue(priv->workqueue);
-
- /* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_legacy_enable_rfkill_int(priv);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwl4965_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-void iwl4965_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- iwl4965_update_tkip_key(priv, vif_priv->ctx, keyconf, sta,
- iv32, phase1key);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-int iwl4965_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif, struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
- bool is_default_wep_key = false;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (priv->cfg->mod_params->sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- sta_id = iwl_legacy_sta_id_or_broadcast(priv, vif_priv->ctx, sta);
- if (sta_id == IWL_INVALID_STATION)
- return -EINVAL;
-
- mutex_lock(&priv->mutex);
- iwl_legacy_scan_cancel_timeout(priv, 100);
-
- /*
- * If we are getting WEP group key and we didn't receive any key mapping
- * so far, we are in legacy wep mode (group key only), otherwise we are
- * in 1X mode.
- * In legacy wep mode, we use another host command to the uCode.
- */
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta) {
- if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
- else
- is_default_wep_key =
- (key->hw_key_idx == HW_KEY_DEFAULT);
- }
-
- switch (cmd) {
- case SET_KEY:
- if (is_default_wep_key)
- ret = iwl4965_set_default_wep_key(priv,
- vif_priv->ctx, key);
- else
- ret = iwl4965_set_dynamic_key(priv, vif_priv->ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (is_default_wep_key)
- ret = iwl4965_remove_default_wep_key(priv, ctx, key);
- else
- ret = iwl4965_remove_dynamic_key(priv, ctx,
- key, sta_id);
-
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-int iwl4965_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = -EINVAL;
-
- IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
- sta->addr, tid);
-
- if (!(priv->cfg->sku & IWL_SKU_N))
- return -EACCES;
-
- mutex_lock(&priv->mutex);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT(priv, "start Rx\n");
- ret = iwl4965_sta_rx_agg_start(priv, sta, tid, *ssn);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl4965_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT(priv, "start Tx\n");
- ret = iwl4965_tx_agg_start(priv, vif, sta, tid, ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwl4965_tx_agg_stop(priv, vif, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- ret = 0;
- break;
- }
- mutex_unlock(&priv->mutex);
-
- return ret;
-}
-
-int iwl4965_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret;
- u8 sta_id;
-
- IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->common.sta_id = IWL_INVALID_STATION;
-
- atomic_set(&sta_priv->pending_frames, 0);
-
- ret = iwl_legacy_add_station_common(priv, vif_priv->ctx, sta->addr,
- is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- mutex_unlock(&priv->mutex);
- return ret;
- }
-
- sta_priv->common.sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl4965_rs_rate_init(priv, sta, sta_id);
- mutex_unlock(&priv->mutex);
-
- return 0;
-}
-
-void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = ch_switch->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
-
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u16 ch;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->mutex);
-
- if (iwl_legacy_is_rfkill(priv))
- goto out;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
- goto out;
-
- if (!iwl_legacy_is_associated_ctx(ctx))
- goto out;
-
- if (!priv->cfg->ops->lib->set_channel_switch)
- goto out;
-
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
- goto out;
-
- ch_info = iwl_legacy_get_channel_info(priv, channel->band, ch);
- if (!iwl_legacy_is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
- spin_lock_irq(&priv->lock);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_legacy_set_rxon_channel(priv, channel, ctx);
- iwl_legacy_set_rxon_ht(priv, ht_conf);
- iwl_legacy_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
- spin_unlock_irq(&priv->lock);
-
- iwl_legacy_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
-
-out:
- mutex_unlock(&priv->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-void iwl4965_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->mutex);
-
- for_each_context(priv, ctx) {
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but we'll eventually commit the filter flags change anyway.
- */
- }
-
- mutex_unlock(&priv->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_legacy_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-/*****************************************************************************
- *
- * driver setup and teardown
- *
- *****************************************************************************/
-
-static void iwl4965_bg_txpower_work(struct work_struct *work)
-{
- struct iwl_priv *priv = container_of(work, struct iwl_priv,
- txpower_work);
-
- mutex_lock(&priv->mutex);
-
- /* If a scan happened to start before we got here
- * then just return; the statistics notification will
- * kick off another scheduled work to compensate for
- * any temperature delta we missed here. */
- if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
- test_bit(STATUS_SCANNING, &priv->status))
- goto out;
-
- /* Regardless of if we are associated, we must reconfigure the
- * TX power since frames can be sent on non-radar channels while
- * not associated */
- priv->cfg->ops->lib->send_tx_power(priv);
-
- /* Update last_temperature to keep is_calib_needed from running
- * when it isn't needed... */
- priv->last_temperature = priv->temperature;
-out:
- mutex_unlock(&priv->mutex);
-}
-
-static void iwl4965_setup_deferred_work(struct iwl_priv *priv)
-{
- priv->workqueue = create_singlethread_workqueue(DRV_NAME);
-
- init_waitqueue_head(&priv->wait_command_queue);
-
- INIT_WORK(&priv->restart, iwl4965_bg_restart);
- INIT_WORK(&priv->rx_replenish, iwl4965_bg_rx_replenish);
- INIT_WORK(&priv->run_time_calib_work, iwl4965_bg_run_time_calib_work);
- INIT_DELAYED_WORK(&priv->init_alive_start, iwl4965_bg_init_alive_start);
- INIT_DELAYED_WORK(&priv->alive_start, iwl4965_bg_alive_start);
-
- iwl_legacy_setup_scan_deferred_work(priv);
-
- INIT_WORK(&priv->txpower_work, iwl4965_bg_txpower_work);
-
- init_timer(&priv->statistics_periodic);
- priv->statistics_periodic.data = (unsigned long)priv;
- priv->statistics_periodic.function = iwl4965_bg_statistics_periodic;
-
- init_timer(&priv->watchdog);
- priv->watchdog.data = (unsigned long)priv;
- priv->watchdog.function = iwl_legacy_bg_watchdog;
-
- tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
- iwl4965_irq_tasklet, (unsigned long)priv);
-}
-
-static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
-{
- cancel_work_sync(&priv->txpower_work);
- cancel_delayed_work_sync(&priv->init_alive_start);
- cancel_delayed_work(&priv->alive_start);
- cancel_work_sync(&priv->run_time_calib_work);
-
- iwl_legacy_cancel_scan_deferred_work(priv);
-
- del_timer_sync(&priv->statistics_periodic);
-}
-
-static void iwl4965_init_hw_rates(struct iwl_priv *priv,
- struct ieee80211_rate *rates)
-{
- int i;
-
- for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
- rates[i].bitrate = iwlegacy_rates[i].ieee * 5;
- rates[i].hw_value = i; /* Rate scaling will work on indexes */
- rates[i].hw_value_short = i;
- rates[i].flags = 0;
- if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
- /*
- * If CCK != 1M then set short preamble rate flag.
- */
- rates[i].flags |=
- (iwlegacy_rates[i].plcp == IWL_RATE_1M_PLCP) ?
- 0 : IEEE80211_RATE_SHORT_PREAMBLE;
- }
- }
-}
-/*
- * Acquire priv->lock before calling this function !
- */
-void iwl4965_set_wr_ptrs(struct iwl_priv *priv, int txq_id, u32 index)
-{
- iwl_legacy_write_direct32(priv, HBUS_TARG_WRPTR,
- (index & 0xff) | (txq_id << 8));
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_RDPTR(txq_id), index);
-}
-
-void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
- struct iwl_tx_queue *txq,
- int tx_fifo_id, int scd_retry)
-{
- int txq_id = txq->q.id;
-
- /* Find out whether to activate Tx queue */
- int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
-
- /* Set up and activate */
- iwl_legacy_write_prph(priv, IWL49_SCD_QUEUE_STATUS_BITS(txq_id),
- (active << IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (tx_fifo_id << IWL49_SCD_QUEUE_STTS_REG_POS_TXF) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_WSL) |
- (scd_retry << IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK) |
- IWL49_SCD_QUEUE_STTS_REG_MSK);
-
- txq->sched_retry = scd_retry;
-
- IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
- active ? "Activate" : "Deactivate",
- scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
-}
-
-
-static int iwl4965_init_drv(struct iwl_priv *priv)
-{
- int ret;
-
- spin_lock_init(&priv->sta_lock);
- spin_lock_init(&priv->hcmd_lock);
-
- INIT_LIST_HEAD(&priv->free_frames);
-
- mutex_init(&priv->mutex);
-
- priv->ieee_channels = NULL;
- priv->ieee_rates = NULL;
- priv->band = IEEE80211_BAND_2GHZ;
-
- priv->iw_mode = NL80211_IFTYPE_STATION;
- priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
- priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
-
- /* initialize force reset */
- priv->force_reset.reset_duration = IWL_DELAY_NEXT_FORCE_FW_RELOAD;
-
- /* Choose which receivers/antennas to use */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv,
- &priv->contexts[IWL_RXON_CTX_BSS]);
-
- iwl_legacy_init_scan_params(priv);
-
- ret = iwl_legacy_init_channel_map(priv);
- if (ret) {
- IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
- goto err;
- }
-
- ret = iwl_legacy_init_geos(priv);
- if (ret) {
- IWL_ERR(priv, "initializing geos failed: %d\n", ret);
- goto err_free_channel_map;
- }
- iwl4965_init_hw_rates(priv, priv->ieee_rates);
-
- return 0;
-
-err_free_channel_map:
- iwl_legacy_free_channel_map(priv);
-err:
- return ret;
-}
-
-static void iwl4965_uninit_drv(struct iwl_priv *priv)
-{
- iwl4965_calib_free_results(priv);
- iwl_legacy_free_geos(priv);
- iwl_legacy_free_channel_map(priv);
- kfree(priv->scan_cmd);
-}
-
-static void iwl4965_hw_detect(struct iwl_priv *priv)
-{
- priv->hw_rev = _iwl_legacy_read32(priv, CSR_HW_REV);
- priv->hw_wa_rev = _iwl_legacy_read32(priv, CSR_HW_REV_WA_REG);
- priv->rev_id = priv->pci_dev->revision;
- IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", priv->rev_id);
-}
-
-static int iwl4965_set_hw_params(struct iwl_priv *priv)
-{
- priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
- priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
- if (priv->cfg->mod_params->amsdu_size_8K)
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
- else
- priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
-
- priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
-
- if (priv->cfg->mod_params->disable_11n)
- priv->cfg->sku &= ~IWL_SKU_N;
-
- /* Device-specific setup */
- return priv->cfg->ops->lib->set_hw_params(priv);
-}
-
-static const u8 iwl4965_bss_ac_to_fifo[] = {
- IWL_TX_FIFO_VO,
- IWL_TX_FIFO_VI,
- IWL_TX_FIFO_BE,
- IWL_TX_FIFO_BK,
-};
-
-static const u8 iwl4965_bss_ac_to_queue[] = {
- 0, 1, 2, 3,
-};
-
-static int
-iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- int err = 0, i;
- struct iwl_priv *priv;
- struct ieee80211_hw *hw;
- struct iwl_cfg *cfg = (struct iwl_cfg *)(ent->driver_data);
- unsigned long flags;
- u16 pci_cmd;
-
- /************************
- * 1. Allocating HW data
- ************************/
-
- hw = iwl_legacy_alloc_all(cfg);
- if (!hw) {
- err = -ENOMEM;
- goto out;
- }
- priv = hw->priv;
- /* At this point both hw and priv are allocated. */
-
- /*
- * The default context is always valid,
- * more may be discovered when firmware
- * is loaded.
- */
- priv->valid_contexts = BIT(IWL_RXON_CTX_BSS);
-
- for (i = 0; i < NUM_IWL_RXON_CTX; i++)
- priv->contexts[i].ctxid = i;
-
- priv->contexts[IWL_RXON_CTX_BSS].always_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].is_active = true;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_cmd = REPLY_RXON;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_timing_cmd = REPLY_RXON_TIMING;
- priv->contexts[IWL_RXON_CTX_BSS].rxon_assoc_cmd = REPLY_RXON_ASSOC;
- priv->contexts[IWL_RXON_CTX_BSS].qos_cmd = REPLY_QOS_PARAM;
- priv->contexts[IWL_RXON_CTX_BSS].ap_sta_id = IWL_AP_ID;
- priv->contexts[IWL_RXON_CTX_BSS].wep_key_cmd = REPLY_WEPKEY;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_fifo = iwl4965_bss_ac_to_fifo;
- priv->contexts[IWL_RXON_CTX_BSS].ac_to_queue = iwl4965_bss_ac_to_queue;
- priv->contexts[IWL_RXON_CTX_BSS].exclusive_interface_modes =
- BIT(NL80211_IFTYPE_ADHOC);
- priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
- BIT(NL80211_IFTYPE_STATION);
- priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
- priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
- priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
- priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 1);
-
- SET_IEEE80211_DEV(hw, &pdev->dev);
-
- IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
- priv->pci_dev = pdev;
- priv->inta_mask = CSR_INI_SET_MASK;
-
- if (iwl_legacy_alloc_traffic_mem(priv))
- IWL_ERR(priv, "Not enough memory to generate traffic log\n");
-
- /**************************
- * 2. Initializing PCI bus
- **************************/
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
- PCIE_LINK_STATE_CLKPM);
-
- if (pci_enable_device(pdev)) {
- err = -ENODEV;
- goto out_ieee80211_free_hw;
- }
-
- pci_set_master(pdev);
-
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
- if (err) {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (!err)
- err = pci_set_consistent_dma_mask(pdev,
- DMA_BIT_MASK(32));
- /* both attempts failed: */
- if (err) {
- IWL_WARN(priv, "No suitable DMA available.\n");
- goto out_pci_disable_device;
- }
- }
-
- err = pci_request_regions(pdev, DRV_NAME);
- if (err)
- goto out_pci_disable_device;
-
- pci_set_drvdata(pdev, priv);
-
-
- /***********************
- * 3. Read REV register
- ***********************/
- priv->hw_base = pci_iomap(pdev, 0, 0);
- if (!priv->hw_base) {
- err = -ENODEV;
- goto out_pci_release_regions;
- }
-
- IWL_DEBUG_INFO(priv, "pci_resource_len = 0x%08llx\n",
- (unsigned long long) pci_resource_len(pdev, 0));
- IWL_DEBUG_INFO(priv, "pci_resource_base = %p\n", priv->hw_base);
-
- /* these spin locks will be used in apm_ops.init and EEPROM access
- * we should init now
- */
- spin_lock_init(&priv->reg_lock);
- spin_lock_init(&priv->lock);
-
- /*
- * stop and reset the on-board processor just in case it is in a
- * strange state ... like being left stranded by a primary kernel
- * and this is now the kdump kernel trying to start up
- */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- iwl4965_hw_detect(priv);
- IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, priv->hw_rev);
-
- /* We disable the RETRY_TIMEOUT register (0x41) to keep
- * PCI Tx retries from interfering with C3 CPU state */
- pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
-
- iwl4965_prepare_card_hw(priv);
- if (!priv->hw_ready) {
- IWL_WARN(priv, "Failed, HW not ready\n");
- goto out_iounmap;
- }
-
- /*****************
- * 4. Read EEPROM
- *****************/
- /* Read the EEPROM */
- err = iwl_legacy_eeprom_init(priv);
- if (err) {
- IWL_ERR(priv, "Unable to init EEPROM\n");
- goto out_iounmap;
- }
- err = iwl4965_eeprom_check_version(priv);
- if (err)
- goto out_free_eeprom;
-
- if (err)
- goto out_free_eeprom;
-
- /* extract MAC Address */
- iwl4965_eeprom_get_mac(priv, priv->addresses[0].addr);
- IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
- priv->hw->wiphy->addresses = priv->addresses;
- priv->hw->wiphy->n_addresses = 1;
-
- /************************
- * 5. Setup HW constants
- ************************/
- if (iwl4965_set_hw_params(priv)) {
- IWL_ERR(priv, "failed to set hw parameters\n");
- goto out_free_eeprom;
- }
-
- /*******************
- * 6. Setup priv
- *******************/
-
- err = iwl4965_init_drv(priv);
- if (err)
- goto out_free_eeprom;
- /* At this point both hw and priv are initialized. */
-
- /********************
- * 7. Setup services
- ********************/
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- pci_enable_msi(priv->pci_dev);
-
- err = request_irq(priv->pci_dev->irq, iwl_legacy_isr,
- IRQF_SHARED, DRV_NAME, priv);
- if (err) {
- IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
- goto out_disable_msi;
- }
-
- iwl4965_setup_deferred_work(priv);
- iwl4965_setup_rx_handlers(priv);
-
- /*********************************************
- * 8. Enable interrupts and read RFKILL state
- *********************************************/
-
- /* enable rfkill interrupt: hw bug w/a */
- pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
- if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
- pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
- }
-
- iwl_legacy_enable_rfkill_int(priv);
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) &
- CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- wiphy_rfkill_set_hw_state(priv->hw->wiphy,
- test_bit(STATUS_RF_KILL_HW, &priv->status));
-
- iwl_legacy_power_initialize(priv);
-
- init_completion(&priv->_4965.firmware_loading_complete);
-
- err = iwl4965_request_firmware(priv, true);
- if (err)
- goto out_destroy_workqueue;
-
- return 0;
-
- out_destroy_workqueue:
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- free_irq(priv->pci_dev->irq, priv);
- out_disable_msi:
- pci_disable_msi(priv->pci_dev);
- iwl4965_uninit_drv(priv);
- out_free_eeprom:
- iwl_legacy_eeprom_free(priv);
- out_iounmap:
- pci_iounmap(pdev, priv->hw_base);
- out_pci_release_regions:
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
- out_pci_disable_device:
- pci_disable_device(pdev);
- out_ieee80211_free_hw:
- iwl_legacy_free_traffic_mem(priv);
- ieee80211_free_hw(priv->hw);
- out:
- return err;
-}
-
-static void __devexit iwl4965_pci_remove(struct pci_dev *pdev)
-{
- struct iwl_priv *priv = pci_get_drvdata(pdev);
- unsigned long flags;
-
- if (!priv)
- return;
-
- wait_for_completion(&priv->_4965.firmware_loading_complete);
-
- IWL_DEBUG_INFO(priv, "*** UNLOAD DRIVER ***\n");
-
- iwl_legacy_dbgfs_unregister(priv);
- sysfs_remove_group(&pdev->dev.kobj, &iwl_attribute_group);
-
- /* ieee80211_unregister_hw call wil cause iwl_mac_stop to
- * to be called and iwl4965_down since we are removing the device
- * we need to set STATUS_EXIT_PENDING bit.
- */
- set_bit(STATUS_EXIT_PENDING, &priv->status);
-
- iwl_legacy_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- } else {
- iwl4965_down(priv);
- }
-
- /*
- * Make sure device is reset to low power before unloading driver.
- * This may be redundant with iwl4965_down(), but there are paths to
- * run iwl4965_down() without calling apm_ops.stop(), and there are
- * paths to avoid running iwl4965_down() at all before leaving driver.
- * This (inexpensive) call *makes sure* device is reset.
- */
- iwl_legacy_apm_stop(priv);
-
- /* make sure we flush any pending irq or
- * tasklet for the driver
- */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_legacy_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwl4965_synchronize_irq(priv);
-
- iwl4965_dealloc_ucode_pci(priv);
-
- if (priv->rxq.bd)
- iwl4965_rx_queue_free(priv, &priv->rxq);
- iwl4965_hw_txq_ctx_free(priv);
-
- iwl_legacy_eeprom_free(priv);
-
-
- /*netif_stop_queue(dev); */
- flush_workqueue(priv->workqueue);
-
- /* ieee80211_unregister_hw calls iwl_mac_stop, which flushes
- * priv->workqueue... so we can't take down the workqueue
- * until now... */
- destroy_workqueue(priv->workqueue);
- priv->workqueue = NULL;
- iwl_legacy_free_traffic_mem(priv);
-
- free_irq(priv->pci_dev->irq, priv);
- pci_disable_msi(priv->pci_dev);
- pci_iounmap(pdev, priv->hw_base);
- pci_release_regions(pdev);
- pci_disable_device(pdev);
- pci_set_drvdata(pdev, NULL);
-
- iwl4965_uninit_drv(priv);
-
- dev_kfree_skb(priv->beacon_skb);
-
- ieee80211_free_hw(priv->hw);
-}
-
-/*
- * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
- * must be called under priv->lock and mac access
- */
-void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
-{
- iwl_legacy_write_prph(priv, IWL49_SCD_TXFACT, mask);
-}
-
-/*****************************************************************************
- *
- * driver and module entry point
- *
- *****************************************************************************/
-
-/* Hardware specific file defines the PCI IDs table for that hardware module */
-static DEFINE_PCI_DEVICE_TABLE(iwl4965_hw_card_ids) = {
-#if defined(CONFIG_IWL4965_MODULE) || defined(CONFIG_IWL4965)
- {IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_cfg)},
- {IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_cfg)},
-#endif /* CONFIG_IWL4965 */
-
- {0}
-};
-MODULE_DEVICE_TABLE(pci, iwl4965_hw_card_ids);
-
-static struct pci_driver iwl4965_driver = {
- .name = DRV_NAME,
- .id_table = iwl4965_hw_card_ids,
- .probe = iwl4965_pci_probe,
- .remove = __devexit_p(iwl4965_pci_remove),
- .driver.pm = IWL_LEGACY_PM_OPS,
-};
-
-static int __init iwl4965_init(void)
-{
-
- int ret;
- pr_info(DRV_DESCRIPTION ", " DRV_VERSION "\n");
- pr_info(DRV_COPYRIGHT "\n");
-
- ret = iwl4965_rate_control_register();
- if (ret) {
- pr_err("Unable to register rate control algorithm: %d\n", ret);
- return ret;
- }
-
- ret = pci_register_driver(&iwl4965_driver);
- if (ret) {
- pr_err("Unable to initialize PCI module\n");
- goto error_register;
- }
-
- return ret;
-
-error_register:
- iwl4965_rate_control_unregister();
- return ret;
-}
-
-static void __exit iwl4965_exit(void)
-{
- pci_unregister_driver(&iwl4965_driver);
- iwl4965_rate_control_unregister();
-}
-
-module_exit(iwl4965_exit);
-module_init(iwl4965_init);
-
-#ifdef CONFIG_IWLWIFI_LEGACY_DEBUG
-module_param_named(debug, iwlegacy_debug_level, uint, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "debug output mask");
-#endif
-
-module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
-MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
-module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
-MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
-module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
- int, S_IRUGO);
-MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
-module_param_named(fw_restart, iwl4965_mod_params.restart_fw, int, S_IRUGO);
-MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlegacy/iwl-prph.h b/drivers/net/wireless/iwlegacy/prph.h
index 30a4930..ffec4b4 100644
--- a/drivers/net/wireless/iwlegacy/iwl-prph.h
+++ b/drivers/net/wireless/iwlegacy/prph.h
@@ -60,8 +60,8 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_legacy_prph_h__
-#define __iwl_legacy_prph_h__
+#ifndef __il_prph_h__
+#define __il_prph_h__
/*
* Registers in this file are internal, not PCI bus memory mapped.
@@ -91,9 +91,9 @@
#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
#define APMG_PS_CTRL_VAL_PWR_SRC_VMAIN (0x00000000)
-#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
+#define APMG_PS_CTRL_VAL_PWR_SRC_MAX (0x01000000) /* 3945 only */
#define APMG_PS_CTRL_VAL_PWR_SRC_VAUX (0x02000000)
-#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
+#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
@@ -120,13 +120,13 @@
*
* 1) Initialization -- performs hardware calibration and sets up some
* internal data, then notifies host via "initialize alive" notification
- * (struct iwl_init_alive_resp) that it has completed all of its work.
+ * (struct il_init_alive_resp) that it has completed all of its work.
* After signal from host, it then loads and starts the runtime program.
* The initialization program must be used when initially setting up the
* NIC after loading the driver.
*
* 2) Runtime/Protocol -- performs all normal runtime operations. This
- * notifies host via "alive" notification (struct iwl_alive_resp) that it
+ * notifies host via "alive" notification (struct il_alive_resp) that it
* is ready to be used.
*
* When initializing the NIC, the host driver does the following procedure:
@@ -189,7 +189,7 @@
* procedure.
*
* This save/restore method is mostly for autonomous power management during
- * normal operation (result of POWER_TABLE_CMD). Platform suspend/resume and
+ * normal operation (result of C_POWER_TBL). Platform suspend/resume and
* RFKILL should use complete restarts (with total re-initialization) of uCode,
* allowing total shutdown (including BSM memory).
*
@@ -202,19 +202,19 @@
*/
/* BSM bit fields */
-#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
-#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup*/
-#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
+#define BSM_WR_CTRL_REG_BIT_START (0x80000000) /* start boot load now */
+#define BSM_WR_CTRL_REG_BIT_START_EN (0x40000000) /* enable boot after pwrup */
+#define BSM_DRAM_INST_LOAD (0x80000000) /* start program load now */
/* BSM addresses */
#define BSM_BASE (PRPH_BASE + 0x3400)
#define BSM_END (PRPH_BASE + 0x3800)
-#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
-#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
-#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
-#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
-#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
+#define BSM_WR_CTRL_REG (BSM_BASE + 0x000) /* ctl and status */
+#define BSM_WR_MEM_SRC_REG (BSM_BASE + 0x004) /* source in BSM mem */
+#define BSM_WR_MEM_DST_REG (BSM_BASE + 0x008) /* dest in SRAM mem */
+#define BSM_WR_DWCOUNT_REG (BSM_BASE + 0x00C) /* bytes */
+#define BSM_WR_STATUS_REG (BSM_BASE + 0x010) /* bit 0: 1 == done */
/*
* Pointers and size regs for bootstrap load and data SRAM save/restore.
@@ -231,8 +231,7 @@
* Read/write, address range from LOWER_BOUND to (LOWER_BOUND + SIZE -1)
*/
#define BSM_SRAM_LOWER_BOUND (PRPH_BASE + 0x3800)
-#define BSM_SRAM_SIZE (1024) /* bytes */
-
+#define BSM_SRAM_SIZE (1024) /* bytes */
/* 3945 Tx scheduler registers */
#define ALM_SCD_BASE (PRPH_BASE + 0x2E00)
@@ -255,7 +254,7 @@
* but one DMA channel may take input from several queues.
*
* Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
- * (cf. default_queue_to_tx_fifo in iwl-4965.c):
+ * (cf. default_queue_to_tx_fifo in 4965.c):
*
* 0 -- EDCA BK (background) frames, lowest priority
* 1 -- EDCA BE (best effort) frames, normal priority
@@ -274,20 +273,20 @@
* The driver sets up each queue to work in one of two modes:
*
* 1) Scheduler-Ack, in which the scheduler automatically supports a
- * block-ack (BA) window of up to 64 TFDs. In this mode, each queue
+ * block-ack (BA) win of up to 64 TFDs. In this mode, each queue
* contains TFDs for a unique combination of Recipient Address (RA)
* and Traffic Identifier (TID), that is, traffic of a given
* Quality-Of-Service (QOS) priority, destined for a single station.
*
* In scheduler-ack mode, the scheduler keeps track of the Tx status of
- * each frame within the BA window, including whether it's been transmitted,
+ * each frame within the BA win, including whether it's been transmitted,
* and whether it's been acknowledged by the receiving station. The device
* automatically processes block-acks received from the receiving STA,
* and reschedules un-acked frames to be retransmitted (successful
* Tx completion may end up being out-of-order).
*
* The driver must maintain the queue's Byte Count table in host DRAM
- * (struct iwl4965_sched_queue_byte_cnt_tbl) for this mode.
+ * (struct il4965_sched_queue_byte_cnt_tbl) for this mode.
* This mode does not support fragmentation.
*
* 2) FIFO (a.k.a. non-Scheduler-ACK), in which each TFD is processed in order.
@@ -316,34 +315,34 @@
*/
/**
- * Max Tx window size is the max number of contiguous TFDs that the scheduler
+ * Max Tx win size is the max number of contiguous TFDs that the scheduler
* can keep track of at one time when creating block-ack chains of frames.
* Note that "64" matches the number of ack bits in a block-ack packet.
* Driver should use SCD_WIN_SIZE and SCD_FRAME_LIMIT values to initialize
- * IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
+ * IL49_SCD_CONTEXT_QUEUE_OFFSET(x) values.
*/
#define SCD_WIN_SIZE 64
#define SCD_FRAME_LIMIT 64
/* SCD registers are internal, must be accessed via HBUS_TARG_PRPH regs */
-#define IWL49_SCD_START_OFFSET 0xa02c00
+#define IL49_SCD_START_OFFSET 0xa02c00
/*
* 4965 tells driver SRAM address for internal scheduler structs via this reg.
* Value is valid only after "Alive" response from uCode.
*/
-#define IWL49_SCD_SRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x0)
+#define IL49_SCD_SRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x0)
/*
* Driver may need to update queue-empty bits after changing queue's
- * write and read pointers (indexes) during (re-)initialization (i.e. when
+ * write and read pointers (idxes) during (re-)initialization (i.e. when
* scheduler is not tracking what's happening).
* Bit fields:
* 31-16: Write mask -- 1: update empty bit, 0: don't change empty bit
* 15-00: Empty state, one for each queue -- 1: empty, 0: non-empty
* NOTE: This register is not used by Linux driver.
*/
-#define IWL49_SCD_EMPTY_BITS (IWL49_SCD_START_OFFSET + 0x4)
+#define IL49_SCD_EMPTY_BITS (IL49_SCD_START_OFFSET + 0x4)
/*
* Physical base address of array of byte count (BC) circular buffers (CBs).
@@ -351,11 +350,11 @@
* This register points to BC CB for queue 0, must be on 1024-byte boundary.
* Others are spaced by 1024 bytes.
* Each BC CB is 2 bytes * (256 + 64) = 740 bytes, followed by 384 bytes pad.
- * (Index into a queue's BC CB) = (index into queue's TFD CB) = (SSN & 0xff).
+ * (Index into a queue's BC CB) = (idx into queue's TFD CB) = (SSN & 0xff).
* Bit fields:
* 25-00: Byte Count CB physical address [35:10], must be 1024-byte aligned.
*/
-#define IWL49_SCD_DRAM_BASE_ADDR (IWL49_SCD_START_OFFSET + 0x10)
+#define IL49_SCD_DRAM_BASE_ADDR (IL49_SCD_START_OFFSET + 0x10)
/*
* Enables any/all Tx DMA/FIFO channels.
@@ -364,23 +363,23 @@
* Bit fields:
* 7- 0: Enable (1), disable (0), one bit for each channel 0-7
*/
-#define IWL49_SCD_TXFACT (IWL49_SCD_START_OFFSET + 0x1c)
+#define IL49_SCD_TXFACT (IL49_SCD_START_OFFSET + 0x1c)
/*
- * Queue (x) Write Pointers (indexes, really!), one for each Tx queue.
+ * Queue (x) Write Pointers (idxes, really!), one for each Tx queue.
* Initialized and updated by driver as new TFDs are added to queue.
- * NOTE: If using Block Ack, index must correspond to frame's
- * Start Sequence Number; index = (SSN & 0xff)
+ * NOTE: If using Block Ack, idx must correspond to frame's
+ * Start Sequence Number; idx = (SSN & 0xff)
* NOTE: Alternative to HBUS_TARG_WRPTR, which is what Linux driver uses?
*/
-#define IWL49_SCD_QUEUE_WRPTR(x) (IWL49_SCD_START_OFFSET + 0x24 + (x) * 4)
+#define IL49_SCD_QUEUE_WRPTR(x) (IL49_SCD_START_OFFSET + 0x24 + (x) * 4)
/*
- * Queue (x) Read Pointers (indexes, really!), one for each Tx queue.
- * For FIFO mode, index indicates next frame to transmit.
- * For Scheduler-ACK mode, index indicates first frame in Tx window.
+ * Queue (x) Read Pointers (idxes, really!), one for each Tx queue.
+ * For FIFO mode, idx indicates next frame to transmit.
+ * For Scheduler-ACK mode, idx indicates first frame in Tx win.
* Initialized by driver, updated by scheduler.
*/
-#define IWL49_SCD_QUEUE_RDPTR(x) (IWL49_SCD_START_OFFSET + 0x64 + (x) * 4)
+#define IL49_SCD_QUEUE_RDPTR(x) (IL49_SCD_START_OFFSET + 0x64 + (x) * 4)
/*
* Select which queues work in chain mode (1) vs. not (0).
@@ -391,18 +390,18 @@
* NOTE: If driver sets up queue for chain mode, it should be also set up
* Scheduler-ACK mode as well, via SCD_QUEUE_STATUS_BITS(x).
*/
-#define IWL49_SCD_QUEUECHAIN_SEL (IWL49_SCD_START_OFFSET + 0xd0)
+#define IL49_SCD_QUEUECHAIN_SEL (IL49_SCD_START_OFFSET + 0xd0)
/*
* Select which queues interrupt driver when scheduler increments
- * a queue's read pointer (index).
+ * a queue's read pointer (idx).
* Bit fields:
* 31-16: Reserved
* 15-00: Interrupt enable, one bit for each queue -- 1: enabled, 0: disabled
* NOTE: This functionality is apparently a no-op; driver relies on interrupts
* from Rx queue to read Tx command responses and update Tx queues.
*/
-#define IWL49_SCD_INTERRUPT_MASK (IWL49_SCD_START_OFFSET + 0xe4)
+#define IL49_SCD_INTERRUPT_MASK (IL49_SCD_START_OFFSET + 0xe4)
/*
* Queue search status registers. One for each queue.
@@ -414,7 +413,7 @@
* Driver should init to "1" for aggregation mode, or "0" otherwise.
* 7-6: Driver should init to "0"
* 5: Window Size Left; indicates whether scheduler can request
- * another TFD, based on window size, etc. Driver should init
+ * another TFD, based on win size, etc. Driver should init
* this bit to "1" for aggregation mode, or "0" for non-agg.
* 4-1: Tx FIFO to use (range 0-7).
* 0: Queue is active (1), not active (0).
@@ -423,18 +422,18 @@
* NOTE: If enabling Scheduler-ACK mode, chain mode should also be enabled
* via SCD_QUEUECHAIN_SEL.
*/
-#define IWL49_SCD_QUEUE_STATUS_BITS(x)\
- (IWL49_SCD_START_OFFSET + 0x104 + (x) * 4)
+#define IL49_SCD_QUEUE_STATUS_BITS(x)\
+ (IL49_SCD_START_OFFSET + 0x104 + (x) * 4)
/* Bit field positions */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
+#define IL49_SCD_QUEUE_STTS_REG_POS_ACTIVE (0)
+#define IL49_SCD_QUEUE_STTS_REG_POS_TXF (1)
+#define IL49_SCD_QUEUE_STTS_REG_POS_WSL (5)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACK (8)
/* Write masks */
-#define IWL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
-#define IWL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
+#define IL49_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN (10)
+#define IL49_SCD_QUEUE_STTS_REG_MSK (0x0007FC00)
/**
* 4965 internal SRAM structures for scheduler, shared with driver ...
@@ -460,7 +459,7 @@
* each queue's entry as follows:
*
* LS Dword bit fields:
- * 0-06: Max Tx window size for Scheduler-ACK. Driver should init to 64.
+ * 0-06: Max Tx win size for Scheduler-ACK. Driver should init to 64.
*
* MS Dword bit fields:
* 16-22: Frame limit. Driver should init to 10 (0xa).
@@ -470,14 +469,14 @@
* Init must be done after driver receives "Alive" response from 4965 uCode,
* and when setting up queue for aggregation.
*/
-#define IWL49_SCD_CONTEXT_DATA_OFFSET 0x380
-#define IWL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
- (IWL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
+#define IL49_SCD_CONTEXT_DATA_OFFSET 0x380
+#define IL49_SCD_CONTEXT_QUEUE_OFFSET(x) \
+ (IL49_SCD_CONTEXT_DATA_OFFSET + ((x) * 8))
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
-#define IWL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
-#define IWL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_POS (0)
+#define IL49_SCD_QUEUE_CTX_REG1_WIN_SIZE_MSK (0x0000007F)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
+#define IL49_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
/*
* Tx Status Bitmap
@@ -486,7 +485,7 @@
* "Alive" notification from uCode. Area is used only by device itself;
* no other support (besides clearing) is required from driver.
*/
-#define IWL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
+#define IL49_SCD_TX_STTS_BITMAP_OFFSET 0x400
/*
* RAxTID to queue translation mapping.
@@ -494,7 +493,7 @@
* When queue is in Scheduler-ACK mode, frames placed in a that queue must be
* for only one combination of receiver address (RA) and traffic ID (TID), i.e.
* one QOS priority level destined for one station (for this wireless link,
- * not final destination). The SCD_TRANSLATE_TABLE area provides 16 16-bit
+ * not final destination). The SCD_TRANSLATE_TBL area provides 16 16-bit
* mappings, one for each of the 16 queues. If queue is not in Scheduler-ACK
* mode, the device ignores the mapping value.
*
@@ -508,16 +507,16 @@
* must read a dword-aligned value from device SRAM, replace the 16-bit map
* value of interest, and write the dword value back into device SRAM.
*/
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET 0x500
+#define IL49_SCD_TRANSLATE_TBL_OFFSET 0x500
/* Find translation table dword to read/write for given queue */
-#define IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
- ((IWL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
+#define IL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(x) \
+ ((IL49_SCD_TRANSLATE_TBL_OFFSET + ((x) * 2)) & 0xfffffffc)
-#define IWL_SCD_TXFIFO_POS_TID (0)
-#define IWL_SCD_TXFIFO_POS_RA (4)
-#define IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
+#define IL_SCD_TXFIFO_POS_TID (0)
+#define IL_SCD_TXFIFO_POS_RA (4)
+#define IL_SCD_QUEUE_RA_TID_MAP_RATID_MSK (0x01FF)
/*********************** END TX SCHEDULER *************************************/
-#endif /* __iwl_legacy_prph_h__ */
+#endif /* __il_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 57703d5..ae08498 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -102,12 +102,28 @@ config IWLWIFI_DEVICE_TRACING
occur.
endmenu
-config IWLWIFI_DEVICE_SVTOOL
- bool "iwlwifi device svtool support"
+config IWLWIFI_DEVICE_TESTMODE
+ def_bool y
depends on IWLWIFI
- select NL80211_TESTMODE
+ depends on NL80211_TESTMODE
help
- This option enables the svtool support for iwlwifi device through
- NL80211_TESTMODE. svtool is a software validation tool that runs in
- the user space and interacts with the device in the kernel space
- through the generic netlink message via NL80211_TESTMODE channel.
+ This option enables the testmode support for iwlwifi device through
+ NL80211_TESTMODE. This provide the capabilities of enable user space
+ validation applications to interacts with the device through the
+ generic netlink message via NL80211_TESTMODE channel.
+
+config IWLWIFI_P2P
+ bool "iwlwifi experimental P2P support"
+ depends on IWLWIFI
+ help
+ This option enables experimental P2P support for some devices
+ based on microcode support. Since P2P support is still under
+ development, this option may even enable it for some devices
+ now that turn out to not support it in the future due to
+ microcode restrictions.
+
+ To determine if your microcode supports the experimental P2P
+ offered by this option, check if the driver advertises AP
+ support when it is loaded.
+
+ Say Y only if you want to experiment with P2P.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index c73e5ed..9dc84a7 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,7 +1,7 @@
# WIFI
obj-$(CONFIG_IWLWIFI) += iwlwifi.o
-iwlwifi-objs := iwl-agn.o iwl-agn-rs.o
-iwlwifi-objs += iwl-agn-ucode.o iwl-agn-tx.o
+iwlwifi-objs := iwl-agn.o iwl-agn-rs.o iwl-mac80211.o
+iwlwifi-objs += iwl-ucode.o iwl-agn-tx.o
iwlwifi-objs += iwl-agn-lib.o iwl-agn-calib.o iwl-io.o
iwlwifi-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-rx.o
@@ -18,7 +18,7 @@ iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
-iwlwifi-$(CONFIG_IWLWIFI_DEVICE_SVTOOL) += iwl-sv-open.o
+iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
CFLAGS_iwl-devtrace.o := -I$(src)
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index dd008b0..1ef7bfc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -124,10 +124,10 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -135,28 +135,19 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl1000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl1000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7943197..0946933 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -86,7 +86,7 @@ static void iwl2000_nic_config(struct iwl_priv *priv)
{
iwl_rf_config(priv);
- if (priv->cfg->iq_invert)
+ if (cfg(priv)->iq_invert)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_IQ_INVER);
}
@@ -120,10 +120,10 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -131,29 +131,19 @@ static int iwl2000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl2000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl2000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -258,25 +248,19 @@ static struct iwl_bt_params iwl2030_bt_params = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
.iq_invert = true \
struct iwl_cfg iwl2000_2bgn_cfg = {
- .name = "2000 Series 2x2 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 2200 BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2000_2bg_cfg = {
- .name = "2000 Series 2x2 BG",
- IWL_DEVICE_2000,
-};
-
struct iwl_cfg iwl2000_2bgn_d_cfg = {
- .name = "2000D Series 2x2 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 2200D BGN",
IWL_DEVICE_2000,
.ht_params = &iwl2000_ht_params,
};
@@ -291,7 +275,6 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -299,16 +282,11 @@ struct iwl_cfg iwl2000_2bgn_d_cfg = {
.iq_invert = true \
struct iwl_cfg iwl2030_2bgn_cfg = {
- .name = "2000 Series 2x2 BGN/BT",
+ .name = "Intel(R) Centrino(R) Wireless-N 2230 BGN",
IWL_DEVICE_2030,
.ht_params = &iwl2000_ht_params,
};
-struct iwl_cfg iwl2030_2bg_cfg = {
- .name = "2000 Series 2x2 BG/BT",
- IWL_DEVICE_2030,
-};
-
#define IWL_DEVICE_105 \
.fw_name_pre = IWL105_FW_PRE, \
.ucode_api_max = IWL105_UCODE_API_MAX, \
@@ -318,7 +296,6 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.eeprom_calib_ver = EEPROM_2000_TX_POWER_VERSION, \
.lib = &iwl2000_lib, \
.base_params = &iwl2000_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -326,19 +303,14 @@ struct iwl_cfg iwl2030_2bg_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl105_bg_cfg = {
- .name = "105 Series 1x1 BG",
- IWL_DEVICE_105,
-};
-
struct iwl_cfg iwl105_bgn_cfg = {
- .name = "105 Series 1x1 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 105 BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
struct iwl_cfg iwl105_bgn_d_cfg = {
- .name = "105D Series 1x1 BGN",
+ .name = "Intel(R) Centrino(R) Wireless-N 105D BGN",
IWL_DEVICE_105,
.ht_params = &iwl2000_ht_params,
};
@@ -353,7 +325,6 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.lib = &iwl2030_lib, \
.base_params = &iwl2030_base_params, \
.bt_params = &iwl2030_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.temp_offset_v2 = true, \
.led_mode = IWL_LED_RF_STATE, \
@@ -361,13 +332,8 @@ struct iwl_cfg iwl105_bgn_d_cfg = {
.rx_with_siso_diversity = true, \
.iq_invert = true \
-struct iwl_cfg iwl135_bg_cfg = {
- .name = "135 Series 1x1 BG/BT",
- IWL_DEVICE_135,
-};
-
struct iwl_cfg iwl135_bgn_cfg = {
- .name = "135 Series 1x1 BGN/BT",
+ .name = "Intel(R) Centrino(R) Wireless-N 135 BGN",
IWL_DEVICE_135,
.ht_params = &iwl2000_ht_params,
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index f55fb2d..b3a365f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -134,10 +134,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
-static s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
+static s32 iwl_temp_calib_to_offset(struct iwl_shared *shrd)
{
u16 temperature, voltage;
- __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *temp_calib = (__le16 *)iwl_eeprom_query_addr(shrd,
EEPROM_KELVIN_TEMPERATURE);
temperature = le16_to_cpu(temp_calib[0]);
@@ -151,7 +151,7 @@ static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
{
const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
s32 threshold = (s32)CELSIUS_TO_KELVIN(CT_KILL_THRESHOLD_LEGACY) -
- iwl_temp_calib_to_offset(priv);
+ iwl_temp_calib_to_offset(priv->shrd);
hw_params(priv).ct_kill_threshold = threshold * volt2temp_coef;
}
@@ -166,10 +166,10 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -178,22 +178,15 @@ static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl5000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_TX_IQ_PERD) |
- BIT(IWL_CALIB_BASE_BAND);
return 0;
}
@@ -202,10 +195,10 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWLAGN_RTC_DATA_SIZE;
@@ -214,22 +207,15 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- hw_params(priv).rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ hw_params(priv).rx_chains_num = num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl5150_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl5150_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_DC);
return 0;
}
@@ -237,7 +223,7 @@ static int iwl5150_hw_set_hw_params(struct iwl_priv *priv)
static void iwl5150_temperature(struct iwl_priv *priv)
{
u32 vt = 0;
- s32 offset = iwl_temp_calib_to_offset(priv);
+ s32 offset = iwl_temp_calib_to_offset(priv->shrd);
vt = le32_to_cpu(priv->statistics.common.temperature);
vt = vt / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF + offset;
@@ -434,7 +420,7 @@ struct iwl_cfg iwl5350_agn_cfg = {
.eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
.lib = &iwl5150_lib, \
.base_params = &iwl5000_base_params, \
- .need_dc_calib = true, \
+ .no_xtal_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index c840c78..54b7533 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -46,11 +46,12 @@
#include "iwl-cfg.h"
/* Highest firmware API version supported */
-#define IWL6000_UCODE_API_MAX 4
+#define IWL6000_UCODE_API_MAX 6
#define IWL6050_UCODE_API_MAX 5
#define IWL6000G2_UCODE_API_MAX 6
/* Oldest version we won't warn about */
+#define IWL6000_UCODE_API_OK 4
#define IWL6000G2_UCODE_API_OK 5
/* Lowest firmware API version supported */
@@ -80,7 +81,7 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
static void iwl6050_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
}
@@ -88,7 +89,7 @@ static void iwl6050_additional_nic_config(struct iwl_priv *priv)
static void iwl6150_additional_nic_config(struct iwl_priv *priv)
{
/* Indicate calibration version to uCode. */
- if (iwlagn_eeprom_calib_version(priv) >= 6)
+ if (iwl_eeprom_calib_version(priv->shrd) >= 6)
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
iwl_set_bit(bus(priv), CSR_GP_DRIVER_REG,
@@ -101,14 +102,14 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
iwl_rf_config(priv);
/* no locking required for register write */
- if (priv->cfg->pa_type == IWL_PA_INTERNAL) {
+ if (cfg(priv)->pa_type == IWL_PA_INTERNAL) {
/* 2x2 IPA phy type */
iwl_write32(bus(priv), CSR_GP_DRIVER_REG,
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* do additional nic configuration if needed */
- if (priv->cfg->additional_nic_config)
- priv->cfg->additional_nic_config(priv);
+ if (cfg(priv)->additional_nic_config)
+ cfg(priv)->additional_nic_config(priv);
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -140,10 +141,10 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
{
if (iwlagn_mod_params.num_of_queues >= IWL_MIN_NUM_QUEUES &&
iwlagn_mod_params.num_of_queues <= IWLAGN_NUM_QUEUES)
- priv->cfg->base_params->num_of_queues =
+ cfg(priv)->base_params->num_of_queues =
iwlagn_mod_params.num_of_queues;
- hw_params(priv).max_txq_num = priv->cfg->base_params->num_of_queues;
+ hw_params(priv).max_txq_num = cfg(priv)->base_params->num_of_queues;
priv->contexts[IWL_RXON_CTX_BSS].bcast_sta_id = IWLAGN_BROADCAST_ID;
hw_params(priv).max_data_size = IWL60_RTC_DATA_SIZE;
@@ -152,29 +153,19 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
hw_params(priv).ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
BIT(IEEE80211_BAND_5GHZ);
- hw_params(priv).tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
- if (priv->cfg->rx_with_siso_diversity)
+ hw_params(priv).tx_chains_num = num_of_ant(cfg(priv)->valid_tx_ant);
+ if (cfg(priv)->rx_with_siso_diversity)
hw_params(priv).rx_chains_num = 1;
else
hw_params(priv).rx_chains_num =
- num_of_ant(priv->cfg->valid_rx_ant);
- hw_params(priv).valid_tx_ant = priv->cfg->valid_tx_ant;
- hw_params(priv).valid_rx_ant = priv->cfg->valid_rx_ant;
+ num_of_ant(cfg(priv)->valid_rx_ant);
+ hw_params(priv).valid_tx_ant = cfg(priv)->valid_tx_ant;
+ hw_params(priv).valid_rx_ant = cfg(priv)->valid_rx_ant;
iwl6000_set_ct_threshold(priv);
/* Set initial sensitivity parameters */
- /* Set initial calibration set */
hw_params(priv).sens = &iwl6000_sensitivity;
- hw_params(priv).calib_init_cfg =
- BIT(IWL_CALIB_XTAL) |
- BIT(IWL_CALIB_LO) |
- BIT(IWL_CALIB_TX_IQ) |
- BIT(IWL_CALIB_BASE_BAND);
- if (priv->cfg->need_dc_calib)
- hw_params(priv).calib_rt_cfg |= IWL_CALIB_CFG_DC_IDX;
- if (priv->cfg->need_temp_offset_calib)
- hw_params(priv).calib_init_cfg |= BIT(IWL_CALIB_TEMP_OFFSET);
return 0;
}
@@ -364,7 +355,6 @@ static struct iwl_bt_params iwl6000_bt_params = {
.eeprom_calib_ver = EEPROM_6005_TX_POWER_VERSION, \
.lib = &iwl6000_lib, \
.base_params = &iwl6000_g2_base_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE
@@ -406,7 +396,6 @@ struct iwl_cfg iwl6005_2agn_d_cfg = {
.lib = &iwl6030_lib, \
.base_params = &iwl6000_g2_base_params, \
.bt_params = &iwl6000_bt_params, \
- .need_dc_calib = true, \
.need_temp_offset_calib = true, \
.led_mode = IWL_LED_RF_STATE, \
.adv_pm = true \
@@ -434,21 +423,11 @@ struct iwl_cfg iwl6030_2bg_cfg = {
};
struct iwl_cfg iwl6035_2agn_cfg = {
- .name = "6035 Series 2x2 AGN/BT",
+ .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
IWL_DEVICE_6030,
.ht_params = &iwl6000_ht_params,
};
-struct iwl_cfg iwl6035_2abg_cfg = {
- .name = "6035 Series 2x2 ABG/BT",
- IWL_DEVICE_6030,
-};
-
-struct iwl_cfg iwl6035_2bg_cfg = {
- .name = "6035 Series 2x2 BG/BT",
- IWL_DEVICE_6030,
-};
-
struct iwl_cfg iwl1030_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
IWL_DEVICE_6030,
@@ -479,6 +458,7 @@ struct iwl_cfg iwl130_bg_cfg = {
#define IWL_DEVICE_6000i \
.fw_name_pre = IWL6000_FW_PRE, \
.ucode_api_max = IWL6000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL6000_UCODE_API_OK, \
.ucode_api_min = IWL6000_UCODE_API_MIN, \
.valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
.valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
@@ -516,7 +496,6 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -540,7 +519,6 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
- .need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
@@ -559,17 +537,17 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
.fw_name_pre = IWL6000_FW_PRE,
.ucode_api_max = IWL6000_UCODE_API_MAX,
+ .ucode_api_ok = IWL6000_UCODE_API_OK,
.ucode_api_min = IWL6000_UCODE_API_MIN,
.eeprom_ver = EEPROM_6000_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
.lib = &iwl6000_lib,
.base_params = &iwl6000_base_params,
.ht_params = &iwl6000_ht_params,
- .need_dc_calib = true,
.led_mode = IWL_LED_BLINK,
};
-MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_OK));
MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6005_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
MODULE_FIRMWARE(IWL6030_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index 03bac48..50ff849 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -82,56 +82,64 @@ struct statistics_general_data {
u32 beacon_energy_c;
};
-int iwl_send_calib_results(struct iwl_priv *priv)
+int iwl_send_calib_results(struct iwl_trans *trans)
{
- int ret = 0;
- int i = 0;
-
struct iwl_host_cmd hcmd = {
.id = REPLY_PHY_CALIBRATION_CMD,
.flags = CMD_SYNC,
};
-
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- if ((BIT(i) & hw_params(priv).calib_init_cfg) &&
- priv->calib_results[i].buf) {
- hcmd.len[0] = priv->calib_results[i].buf_len;
- hcmd.data[0] = priv->calib_results[i].buf;
- hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
- ret = iwl_trans_send_cmd(trans(priv), &hcmd);
- if (ret) {
- IWL_ERR(priv, "Error %d iteration %d\n",
- ret, i);
- break;
- }
+ struct iwl_calib_result *res;
+
+ list_for_each_entry(res, &trans->calib_results, list) {
+ int ret;
+
+ hcmd.len[0] = res->cmd_len;
+ hcmd.data[0] = &res->hdr;
+ hcmd.dataflags[0] = IWL_HCMD_DFL_NOCOPY;
+ ret = iwl_trans_send_cmd(trans, &hcmd);
+ if (ret) {
+ IWL_ERR(trans, "Error %d on calib cmd %d\n",
+ ret, res->hdr.op_code);
+ return ret;
}
}
- return ret;
+ return 0;
}
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len)
{
- if (res->buf_len != len) {
- kfree(res->buf);
- res->buf = kzalloc(len, GFP_ATOMIC);
- }
- if (unlikely(res->buf == NULL))
+ struct iwl_calib_result *res, *tmp;
+
+ res = kmalloc(sizeof(*res) + len - sizeof(struct iwl_calib_hdr),
+ GFP_ATOMIC);
+ if (!res)
return -ENOMEM;
+ memcpy(&res->hdr, cmd, len);
+ res->cmd_len = len;
+
+ list_for_each_entry(tmp, &trans->calib_results, list) {
+ if (tmp->hdr.op_code == res->hdr.op_code) {
+ list_replace(&tmp->list, &res->list);
+ kfree(tmp);
+ return 0;
+ }
+ }
+
+ /* wasn't in list already */
+ list_add_tail(&res->list, &trans->calib_results);
- res->buf_len = len;
- memcpy(res->buf, buf, len);
return 0;
}
-void iwl_calib_free_results(struct iwl_priv *priv)
+void iwl_calib_free_results(struct iwl_trans *trans)
{
- int i;
+ struct iwl_calib_result *res, *tmp;
- for (i = 0; i < IWL_CALIB_MAX; i++) {
- kfree(priv->calib_results[i].buf);
- priv->calib_results[i].buf = NULL;
- priv->calib_results[i].buf_len = 0;
+ list_for_each_entry_safe(res, tmp, &trans->calib_results, list) {
+ list_del(&res->list);
+ kfree(res);
}
}
@@ -505,7 +513,7 @@ static int iwl_enhance_sensitivity_write(struct iwl_priv *priv)
iwl_prepare_legacy_sensitivity_tbl(priv, data, &cmd.enhance_table[0]);
- if (priv->cfg->base_params->hd_v2) {
+ if (cfg(priv)->base_params->hd_v2) {
cmd.enhance_table[HD_INA_NON_SQUARE_DET_OFDM_INDEX] =
HD_INA_NON_SQUARE_DET_OFDM_DATA_V2;
cmd.enhance_table[HD_INA_NON_SQUARE_DET_CCK_INDEX] =
@@ -839,7 +847,7 @@ static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
* connect the first valid tx chain
*/
first_chain =
- find_first_chain(priv->cfg->valid_tx_ant);
+ find_first_chain(cfg(priv)->valid_tx_ant);
data->disconn_array[first_chain] = 0;
active_chains |= BIT(first_chain);
IWL_DEBUG_CALIB(priv,
@@ -882,7 +890,7 @@ static void iwlagn_gain_computation(struct iwl_priv *priv,
continue;
}
- delta_g = (priv->cfg->base_params->chain_noise_scale *
+ delta_g = (cfg(priv)->base_params->chain_noise_scale *
((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
@@ -1039,8 +1047,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
return;
/* Analyze signal for disconnected antenna */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* Disable disconnected antenna algorithm for advanced
bt coex, assuming valid antennas are connected */
data->active_chains = hw_params(priv).valid_rx_ant;
@@ -1074,7 +1082,7 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv)
iwlagn_gain_computation(priv, average_noise,
min_average_noise_antenna_i, min_average_noise,
- find_first_chain(priv->cfg->valid_rx_ant));
+ find_first_chain(cfg(priv)->valid_rx_ant));
/* Some power changes may have been made during the calibration.
* Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
index a869fc9..10275ce 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.h
@@ -72,8 +72,4 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv);
void iwl_init_sensitivity(struct iwl_priv *priv);
void iwl_reset_run_time_calib(struct iwl_priv *priv);
-int iwl_send_calib_results(struct iwl_priv *priv);
-int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
-void iwl_calib_free_results(struct iwl_priv *priv);
-
#endif /* __iwl_calib_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index 1a52ed2..ca78e91 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -32,6 +32,7 @@
#include <linux/init.h>
#include <linux/sched.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -92,11 +93,11 @@ void iwlagn_temperature(struct iwl_priv *priv)
iwl_tt_handler(priv);
}
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd)
{
struct iwl_eeprom_calib_hdr *hdr;
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(shrd,
EEPROM_CALIB_ALL);
return hdr->version;
@@ -105,7 +106,7 @@ u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
/*
* EEPROM
*/
-static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
+static u32 eeprom_indirect_address(const struct iwl_shared *shrd, u32 address)
{
u16 offset = 0;
@@ -114,31 +115,31 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
switch (address & INDIRECT_TYPE_MSK) {
case INDIRECT_HOST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_HOST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_HOST);
break;
case INDIRECT_GENERAL:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_GENERAL);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_GENERAL);
break;
case INDIRECT_REGULATORY:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_REGULATORY);
break;
case INDIRECT_TXP_LIMIT:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT);
break;
case INDIRECT_TXP_LIMIT_SIZE:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_TXP_LIMIT_SIZE);
break;
case INDIRECT_CALIBRATION:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_CALIBRATION);
break;
case INDIRECT_PROCESS_ADJST:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_PROCESS_ADJST);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_PROCESS_ADJST);
break;
case INDIRECT_OTHERS:
- offset = iwl_eeprom_query16(priv, EEPROM_LINK_OTHERS);
+ offset = iwl_eeprom_query16(shrd, EEPROM_LINK_OTHERS);
break;
default:
- IWL_ERR(priv, "illegal indirect type: 0x%X\n",
+ IWL_ERR(shrd->trans, "illegal indirect type: 0x%X\n",
address & INDIRECT_TYPE_MSK);
break;
}
@@ -147,11 +148,11 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
return (address & ADDRESS_MSK) + (offset << 1);
}
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset)
{
- u32 address = eeprom_indirect_address(priv, offset);
- BUG_ON(address >= priv->cfg->base_params->eeprom_size);
- return &priv->eeprom[address];
+ u32 address = eeprom_indirect_address(shrd, offset);
+ BUG_ON(address >= shrd->cfg->base_params->eeprom_size);
+ return &shrd->eeprom[address];
}
struct iwl_mod_params iwlagn_mod_params = {
@@ -232,7 +233,7 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
IWL_PAN_SCD_MULTICAST_MSK;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
@@ -374,15 +375,15 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(iwlagn_def_3w_lookup) !=
sizeof(basic.bt3_lookup_table));
- if (priv->cfg->bt_params) {
- if (priv->cfg->bt_params->bt_session_2) {
+ if (cfg(priv)->bt_params) {
+ if (cfg(priv)->bt_params->bt_session_2) {
bt_cmd_2000.prio_boost = cpu_to_le32(
- priv->cfg->bt_params->bt_prio_boost);
+ cfg(priv)->bt_params->bt_prio_boost);
bt_cmd_2000.tx_prio_boost = 0;
bt_cmd_2000.rx_prio_boost = 0;
} else {
bt_cmd_6000.prio_boost =
- priv->cfg->bt_params->bt_prio_boost;
+ cfg(priv)->bt_params->bt_prio_boost;
bt_cmd_6000.tx_prio_boost = 0;
bt_cmd_6000.rx_prio_boost = 0;
}
@@ -430,7 +431,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
- if (priv->cfg->bt_params->bt_session_2) {
+ if (cfg(priv)->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_BT_CONFIG,
@@ -799,8 +800,8 @@ static bool is_single_rx_stream(struct iwl_priv *priv)
*/
static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
{
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -827,6 +828,7 @@ static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
case IEEE80211_SMPS_STATIC:
case IEEE80211_SMPS_DYNAMIC:
return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_AUTOMATIC:
case IEEE80211_SMPS_OFF:
return active_cnt;
default:
@@ -870,8 +872,8 @@ void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
else
active_chains = hw_params(priv).valid_rx_ant;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(priv->bt_full_concurrent ||
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)) {
/*
@@ -933,53 +935,359 @@ u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant, u8 valid)
return ant;
}
-/* notification wait support */
-void iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data)
+#ifdef CONFIG_PM_SLEEP
+static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
{
- wait_entry->fn = fn;
- wait_entry->fn_data = fn_data;
- wait_entry->cmd = cmd;
- wait_entry->triggered = false;
- wait_entry->aborted = false;
-
- spin_lock_bh(&priv->notif_wait_lock);
- list_add(&wait_entry->list, &priv->notif_waits);
- spin_unlock_bh(&priv->notif_wait_lock);
+ int i;
+
+ for (i = 0; i < IWLAGN_P1K_SIZE; i++)
+ out[i] = cpu_to_le16(p1k[i]);
}
-int iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout)
+struct wowlan_key_data {
+ struct iwl_rxon_context *ctx;
+ struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
+ struct iwlagn_wowlan_tkip_params_cmd *tkip;
+ const u8 *bssid;
+ bool error, use_rsc_tsc, use_tkip;
+};
+
+
+static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key,
+ void *_data)
{
- int ret;
+ struct iwl_priv *priv = hw->priv;
+ struct wowlan_key_data *data = _data;
+ struct iwl_rxon_context *ctx = data->ctx;
+ struct aes_sc *aes_sc, *aes_tx_sc = NULL;
+ struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
+ struct iwlagn_p1k_cache *rx_p1ks;
+ u8 *rx_mic_key;
+ struct ieee80211_key_seq seq;
+ u32 cur_rx_iv32 = 0;
+ u16 p1k[IWLAGN_P1K_SIZE];
+ int ret, i;
+
+ mutex_lock(&priv->shrd->mutex);
- ret = wait_event_timeout(priv->notif_waitq,
- wait_entry->triggered || wait_entry->aborted,
- timeout);
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
+ !sta && !ctx->key_mapping_keys)
+ ret = iwl_set_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl_set_dynamic_key(priv, ctx, key, sta);
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
+ if (ret) {
+ IWL_ERR(priv, "Error setting key during suspend!\n");
+ data->error = true;
+ }
- if (wait_entry->aborted)
- return -EIO;
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ if (sta) {
+ tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
+ tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
- /* return value is always >= 0 */
- if (ret <= 0)
- return -ETIMEDOUT;
- return 0;
+ rx_p1ks = data->tkip->rx_uni;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
+
+ ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
+ iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
+
+ memcpy(data->tkip->mic_keys.tx,
+ &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ rx_mic_key = data->tkip->mic_keys.rx_unicast;
+ } else {
+ tkip_sc =
+ data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
+ rx_p1ks = data->tkip->rx_multi;
+ rx_mic_key = data->tkip->mic_keys.rx_mcast;
+ }
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 (as they need to to avoid replay attacks)
+ * for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
+ tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
+ /* wrapping isn't allowed, AP must rekey */
+ if (seq.tkip.iv32 > cur_rx_iv32)
+ cur_rx_iv32 = seq.tkip.iv32;
+ }
+
+ ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
+ ieee80211_get_tkip_rx_p1k(key, data->bssid,
+ cur_rx_iv32 + 1, p1k);
+ iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
+
+ memcpy(rx_mic_key,
+ &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
+ IWLAGN_MIC_KEY_SIZE);
+
+ data->use_tkip = true;
+ data->use_rsc_tsc = true;
+ break;
+ case WLAN_CIPHER_SUITE_CCMP:
+ if (sta) {
+ u8 *pn = seq.ccmp.pn;
+
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
+ aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
+
+ ieee80211_get_key_tx_seq(key, &seq);
+ aes_tx_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ } else
+ aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
+
+ /*
+ * For non-QoS this relies on the fact that both the uCode and
+ * mac80211 use TID 0 for checking the IV in the frames.
+ */
+ for (i = 0; i < IWLAGN_NUM_RSC; i++) {
+ u8 *pn = seq.ccmp.pn;
+
+ ieee80211_get_key_rx_seq(key, i, &seq);
+ aes_sc->pn = cpu_to_le64(
+ (u64)pn[5] |
+ ((u64)pn[4] << 8) |
+ ((u64)pn[3] << 16) |
+ ((u64)pn[2] << 24) |
+ ((u64)pn[1] << 32) |
+ ((u64)pn[0] << 40));
+ }
+ data->use_rsc_tsc = true;
+ break;
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+}
+
+int iwlagn_send_patterns(struct iwl_priv *priv,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
+ struct iwl_host_cmd cmd = {
+ .id = REPLY_WOWLAN_PATTERNS,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .flags = CMD_SYNC,
+ };
+ int i, err;
+
+ if (!wowlan->n_patterns)
+ return 0;
+
+ cmd.len[0] = sizeof(*pattern_cmd) +
+ wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
+
+ pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
+ if (!pattern_cmd)
+ return -ENOMEM;
+
+ pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
+
+ for (i = 0; i < wowlan->n_patterns; i++) {
+ int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
+
+ memcpy(&pattern_cmd->patterns[i].mask,
+ wowlan->patterns[i].mask, mask_len);
+ memcpy(&pattern_cmd->patterns[i].pattern,
+ wowlan->patterns[i].pattern,
+ wowlan->patterns[i].pattern_len);
+ pattern_cmd->patterns[i].mask_size = mask_len;
+ pattern_cmd->patterns[i].pattern_size =
+ wowlan->patterns[i].pattern_len;
+ }
+
+ cmd.data[0] = pattern_cmd;
+ err = iwl_trans_send_cmd(trans(priv), &cmd);
+ kfree(pattern_cmd);
+ return err;
}
-void iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry)
+int iwlagn_suspend(struct iwl_priv *priv,
+ struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
{
- spin_lock_bh(&priv->notif_wait_lock);
- list_del(&wait_entry->list);
- spin_unlock_bh(&priv->notif_wait_lock);
+ struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
+ struct iwl_rxon_cmd rxon;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
+ struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
+ struct iwlagn_d3_config_cmd d3_cfg_cmd = {};
+ struct wowlan_key_data key_data = {
+ .ctx = ctx,
+ .bssid = ctx->active.bssid_addr,
+ .use_rsc_tsc = false,
+ .tkip = &tkip_cmd,
+ .use_tkip = false,
+ };
+ int ret, i;
+ u16 seq;
+
+ key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
+ if (!key_data.rsc_tsc)
+ return -ENOMEM;
+
+ memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
+
+ /*
+ * We know the last used seqno, and the uCode expects to know that
+ * one, it will increment before TX.
+ */
+ seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
+ wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
+
+ /*
+ * For QoS counters, we store the one to use next, so subtract 0x10
+ * since the uCode will add 0x10 before using the value.
+ */
+ for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
+ seq = priv->tid_data[IWL_AP_ID][i].seq_number;
+ seq -= 0x10;
+ wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
+ }
+
+ if (wowlan->disconnect)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
+ IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
+ if (wowlan->magic_pkt)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
+ if (wowlan->gtk_rekey_failure)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
+ if (wowlan->eap_identity_req)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
+ if (wowlan->four_way_handshake)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
+ if (wowlan->n_patterns)
+ wakeup_filter_cmd.enabled |=
+ cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
+
+ if (wowlan->rfkill_release)
+ d3_cfg_cmd.wakeup_flags |=
+ cpu_to_le32(IWLAGN_D3_WAKEUP_RFKILL);
+
+ iwl_scan_cancel_timeout(priv, 200);
+
+ memcpy(&rxon, &ctx->active, sizeof(rxon));
+
+ iwl_trans_stop_device(trans(priv));
+
+ priv->shrd->wowlan = true;
+
+ ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_WOWLAN);
+ if (ret)
+ goto out;
+
+ /* now configure WoWLAN ucode */
+ ret = iwl_alive_start(priv);
+ if (ret)
+ goto out;
+
+ memcpy(&ctx->staging, &rxon, sizeof(rxon));
+ ret = iwlagn_commit_rxon(priv, ctx);
+ if (ret)
+ goto out;
+
+ ret = iwl_power_update_mode(priv, true);
+ if (ret)
+ goto out;
+
+ if (!iwlagn_mod_params.sw_crypto) {
+ /* mark all keys clear */
+ priv->ucode_key_table = 0;
+ ctx->key_mapping_keys = 0;
+
+ /*
+ * This needs to be unlocked due to lock ordering
+ * constraints. Since we're in the suspend path
+ * that isn't really a problem though.
+ */
+ mutex_unlock(&priv->shrd->mutex);
+ ieee80211_iter_keys(priv->hw, ctx->vif,
+ iwlagn_wowlan_program_keys,
+ &key_data);
+ mutex_lock(&priv->shrd->mutex);
+ if (key_data.error) {
+ ret = -EIO;
+ goto out;
+ }
+
+ if (key_data.use_rsc_tsc) {
+ struct iwl_host_cmd rsc_tsc_cmd = {
+ .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
+ .flags = CMD_SYNC,
+ .data[0] = key_data.rsc_tsc,
+ .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
+ .len[0] = sizeof(*key_data.rsc_tsc),
+ };
+
+ ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (key_data.use_tkip) {
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
+ REPLY_WOWLAN_TKIP_PARAMS,
+ CMD_SYNC, sizeof(tkip_cmd),
+ &tkip_cmd);
+ if (ret)
+ goto out;
+ }
+
+ if (priv->have_rekey_data) {
+ memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
+ memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
+ kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
+ memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
+ kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
+ kek_kck_cmd.replay_ctr = priv->replay_ctr;
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv),
+ REPLY_WOWLAN_KEK_KCK_MATERIAL,
+ CMD_SYNC, sizeof(kek_kck_cmd),
+ &kek_kck_cmd);
+ if (ret)
+ goto out;
+ }
+ }
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_D3_CONFIG, CMD_SYNC,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
+ CMD_SYNC, sizeof(wakeup_filter_cmd),
+ &wakeup_filter_cmd);
+ if (ret)
+ goto out;
+
+ ret = iwlagn_send_patterns(priv, wowlan);
+ out:
+ kfree(key_data.rsc_tsc);
+ return ret;
}
+#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 66118ce..334b5ae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -298,7 +298,7 @@ static u8 rs_tl_add_packet(struct iwl_lq_sta *lq_data,
} else
return IWL_MAX_TID_COUNT;
- if (unlikely(tid >= TID_MAX_LOAD_COUNT))
+ if (unlikely(tid >= IWL_MAX_TID_COUNT))
return IWL_MAX_TID_COUNT;
tl = &lq_data->load[tid];
@@ -352,7 +352,7 @@ static void rs_program_fix_rate(struct iwl_priv *priv,
lq_sta->active_mimo2_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
lq_sta->active_mimo3_rate = 0x1FD0; /* 6 - 60 MBits, no 9, no CCK */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
/* testmode has higher priority to overwirte the fixed rate */
if (priv->tm_fixed_rate)
lq_sta->dbg_fixed_rate = priv->tm_fixed_rate;
@@ -379,7 +379,7 @@ static u32 rs_tl_get_load(struct iwl_lq_sta *lq_data, u8 tid)
s32 index;
struct iwl_traffic_load *tl = NULL;
- if (tid >= TID_MAX_LOAD_COUNT)
+ if (tid >= IWL_MAX_TID_COUNT)
return 0;
tl = &(lq_data->load[tid]);
@@ -444,11 +444,11 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
struct iwl_lq_sta *lq_data,
struct ieee80211_sta *sta)
{
- if (tid < TID_MAX_LOAD_COUNT)
+ if (tid < IWL_MAX_TID_COUNT)
rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
else
- IWL_ERR(priv, "tid exceeds max load count: %d/%d\n",
- tid, TID_MAX_LOAD_COUNT);
+ IWL_ERR(priv, "tid exceeds max TID count: %d/%d\n",
+ tid, IWL_MAX_TID_COUNT);
}
static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
@@ -1081,12 +1081,12 @@ done:
if (sta && sta->supp_rates[sband->band])
rs_rate_scale_perform(priv, skb, sta, lq_sta);
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_SVTOOL)
+#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_IWLWIFI_DEVICE_TESTMODE)
if ((priv->tm_fixed_rate) &&
(priv->tm_fixed_rate != lq_sta->dbg_fixed_rate))
rs_program_fix_rate(priv, lq_sta);
#endif
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
+ if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist)
rs_bt_update_lq(priv, ctx, lq_sta);
}
@@ -1458,10 +1458,8 @@ static int rs_move_legacy_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant =
- first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_LEGACY_SWITCH_ANTENNA2)
- tbl->action = IWL_LEGACY_SWITCH_ANTENNA1;
+ tbl->action = IWL_LEGACY_SWITCH_SISO;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -1636,10 +1634,8 @@ static int rs_move_siso_to_other(struct iwl_priv *priv,
break;
case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
/* avoid antenna B unless MIMO */
- valid_tx_ant =
- first_antenna(hw_params(priv).valid_tx_ant);
if (tbl->action == IWL_SISO_SWITCH_ANTENNA2)
- tbl->action = IWL_SISO_SWITCH_ANTENNA1;
+ tbl->action = IWL_SISO_SWITCH_MIMO2_AB;
break;
case IWL_BT_COEX_TRAFFIC_LOAD_HIGH:
case IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS:
@@ -2277,7 +2273,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
tid = rs_tl_add_packet(lq_sta, hdr);
if ((tid != IWL_MAX_TID_COUNT) &&
(lq_sta->tx_agg_tid_en & (1 << tid))) {
- tid_data = &priv->shrd->tid_data[lq_sta->lq.sta_id][tid];
+ tid_data = &priv->tid_data[lq_sta->lq.sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF)
lq_sta->is_agg = 0;
else
@@ -2649,8 +2645,7 @@ lq_update:
(lq_sta->tx_agg_tid_en & (1 << tid)) &&
(tid != IWL_MAX_TID_COUNT)) {
u8 sta_id = lq_sta->lq.sta_id;
- tid_data =
- &priv->shrd->tid_data[sta_id][tid];
+ tid_data = &priv->tid_data[sta_id][tid];
if (tid_data->agg.state == IWL_AGG_OFF) {
IWL_DEBUG_RATE(priv,
"try to aggregate tid %d\n",
@@ -2908,7 +2903,7 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
if (sband->band == IEEE80211_BAND_5GHZ)
lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
lq_sta->is_agg = 0;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
priv->tm_fixed_rate = 0;
#endif
#ifdef CONFIG_MAC80211_DEBUGFS
@@ -3059,11 +3054,11 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
* overwrite if needed, pass aggregation time limit
* to uCode in uSec
*/
- if (priv && priv->cfg->bt_params &&
- priv->cfg->bt_params->agg_time_limit &&
+ if (priv && cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->agg_time_limit &&
priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
lq_cmd->agg_params.agg_time_limit =
- cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
+ cpu_to_le16(cfg(priv)->bt_params->agg_time_limit);
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index f4f6deb..6675b3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -281,7 +281,6 @@ enum {
#define TID_QUEUE_CELL_SPACING 50 /*mS */
#define TID_QUEUE_MAX_SIZE 20
#define TID_ROUND_VALUE 5 /* mS */
-#define TID_MAX_LOAD_COUNT 8
#define TID_MAX_TIME_DIFF ((TID_QUEUE_MAX_SIZE - 1) * TID_QUEUE_CELL_SPACING)
#define TIME_WRAP_AROUND(x, y) (((y) > (x)) ? (y) - (x) : (0-(x)) + (y))
@@ -402,7 +401,7 @@ struct iwl_lq_sta {
struct iwl_link_quality_cmd lq;
struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
- struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
+ struct iwl_traffic_load load[IWL_MAX_TID_COUNT];
u8 tx_agg_tid_en;
#ifdef CONFIG_MAC80211_DEBUGFS
struct dentry *rs_sta_dbgfs_scale_table_file;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
index 5af9e62..b22b297 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rx.c
@@ -117,6 +117,7 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS);
IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL);
IWL_CMD(REPLY_WOWLAN_GET_STATUS);
+ IWL_CMD(REPLY_D3_CONFIG);
default:
return "UNKNOWN";
@@ -317,7 +318,7 @@ static bool iwlagn_good_plcp_health(struct iwl_priv *priv,
unsigned int msecs)
{
int delta;
- int threshold = priv->cfg->base_params->plcp_delta_threshold;
+ int threshold = cfg(priv)->base_params->plcp_delta_threshold;
if (threshold == IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
@@ -582,8 +583,8 @@ static int iwlagn_rx_statistics(struct iwl_priv *priv,
iwlagn_rx_calc_noise(priv);
queue_work(priv->shrd->workqueue, &priv->run_time_calib_work);
}
- if (priv->cfg->lib->temperature && change)
- priv->cfg->lib->temperature(priv);
+ if (cfg(priv)->lib->temperature && change)
+ cfg(priv)->lib->temperature(priv);
return 0;
}
@@ -800,7 +801,8 @@ static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
ctx->active.bssid_addr))
continue;
ctx->last_tx_rejected = false;
- iwl_trans_wake_any_queue(trans(priv), ctx->ctxid);
+ iwl_trans_wake_any_queue(trans(priv), ctx->ctxid,
+ "channel got active");
}
}
@@ -1032,6 +1034,50 @@ static int iwlagn_rx_reply_rx(struct iwl_priv *priv,
return 0;
}
+static int iwlagn_rx_noa_notification(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_wipan_noa_data *new_data, *old_data;
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_wipan_noa_notification *noa_notif = (void *)pkt->u.raw;
+
+ /* no condition -- we're in softirq */
+ old_data = rcu_dereference_protected(priv->noa_data, true);
+
+ if (noa_notif->noa_active) {
+ u32 len = le16_to_cpu(noa_notif->noa_attribute.length);
+ u32 copylen = len;
+
+ /* EID, len, OUI, subtype */
+ len += 1 + 1 + 3 + 1;
+ /* P2P id, P2P length */
+ len += 1 + 2;
+ copylen += 1 + 2;
+
+ new_data = kmalloc(sizeof(*new_data) + len, GFP_ATOMIC);
+ if (new_data) {
+ new_data->length = len;
+ new_data->data[0] = WLAN_EID_VENDOR_SPECIFIC;
+ new_data->data[1] = len - 2; /* not counting EID, len */
+ new_data->data[2] = (WLAN_OUI_WFA >> 16) & 0xff;
+ new_data->data[3] = (WLAN_OUI_WFA >> 8) & 0xff;
+ new_data->data[4] = (WLAN_OUI_WFA >> 0) & 0xff;
+ new_data->data[5] = WLAN_OUI_TYPE_WFA_P2P;
+ memcpy(&new_data->data[6], &noa_notif->noa_attribute,
+ copylen);
+ }
+ } else
+ new_data = NULL;
+
+ rcu_assign_pointer(priv->noa_data, new_data);
+
+ if (old_data)
+ kfree_rcu(old_data, rcu_head);
+
+ return 0;
+}
+
/**
* iwl_setup_rx_handlers - Initialize Rx handler callbacks
*
@@ -1055,6 +1101,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
handlers[BEACON_NOTIFICATION] = iwlagn_rx_beacon_notif;
handlers[REPLY_ADD_STA] = iwl_add_sta_callback;
+ handlers[REPLY_WIPAN_NOA_NOTIFICATION] = iwlagn_rx_noa_notification;
+
/*
* The same handler is used for both the REPLY to a discrete
* statistics request from the host as well as for the periodic
@@ -1083,13 +1131,13 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
/* set up notification wait support */
- spin_lock_init(&priv->notif_wait_lock);
- INIT_LIST_HEAD(&priv->notif_waits);
- init_waitqueue_head(&priv->notif_waitq);
+ spin_lock_init(&priv->shrd->notif_wait_lock);
+ INIT_LIST_HEAD(&priv->shrd->notif_waits);
+ init_waitqueue_head(&priv->shrd->notif_waitq);
/* Set up BT Rx handlers */
- if (priv->cfg->lib->bt_rx_handler_setup)
- priv->cfg->lib->bt_rx_handler_setup(priv);
+ if (cfg(priv)->lib->bt_rx_handler_setup)
+ cfg(priv)->lib->bt_rx_handler_setup(priv);
}
@@ -1104,11 +1152,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
* even if the RX handler consumes the RXB we have
* access to it in the notification wait entry.
*/
- if (!list_empty(&priv->notif_waits)) {
+ if (!list_empty(&priv->shrd->notif_waits)) {
struct iwl_notification_wait *w;
- spin_lock(&priv->notif_wait_lock);
- list_for_each_entry(w, &priv->notif_waits, list) {
+ spin_lock(&priv->shrd->notif_wait_lock);
+ list_for_each_entry(w, &priv->shrd->notif_waits, list) {
if (w->cmd != pkt->hdr.cmd)
continue;
IWL_DEBUG_RX(priv,
@@ -1117,11 +1165,11 @@ int iwl_rx_dispatch(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
pkt->hdr.cmd);
w->triggered = true;
if (w->fn)
- w->fn(priv, pkt, w->fn_data);
+ w->fn(trans(priv), pkt, w->fn_data);
}
- spin_unlock(&priv->notif_wait_lock);
+ spin_unlock(&priv->shrd->notif_wait_lock);
- wake_up_all(&priv->notif_waitq);
+ wake_up_all(&priv->shrd->notif_waitq);
}
if (priv->pre_rx_handler)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
index 5c7c17c..1c66594 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -45,7 +45,8 @@ static int iwlagn_disable_bss(struct iwl_priv *priv,
send->filter_flags = old_filter;
if (ret)
- IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
+ IWL_DEBUG_QUIET_RFKILL(priv,
+ "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
return ret;
}
@@ -59,7 +60,7 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
u8 old_dev_type = send->dev_type;
int ret;
- iwlagn_init_notification_wait(priv, &disable_wait,
+ iwl_init_notification_wait(priv->shrd, &disable_wait,
REPLY_WIPAN_DEACTIVATION_COMPLETE,
NULL, NULL);
@@ -73,9 +74,9 @@ static int iwlagn_disable_pan(struct iwl_priv *priv,
if (ret) {
IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
- iwlagn_remove_notification(priv, &disable_wait);
+ iwl_remove_notification(priv->shrd, &disable_wait);
} else {
- ret = iwlagn_wait_notification(priv, &disable_wait, HZ);
+ ret = iwl_wait_notification(priv->shrd, &disable_wait, HZ);
if (ret)
IWL_ERR(priv, "Timed out waiting for PAN disable\n");
}
@@ -116,7 +117,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
if (ctx->ht.enabled)
ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
- IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
+ IWL_DEBUG_INFO(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
ctx->qos_data.qos_active,
ctx->qos_data.def_qos_parm.qos_flags);
@@ -124,7 +125,7 @@ static void iwlagn_update_qos(struct iwl_priv *priv,
sizeof(struct iwl_qosparam_cmd),
&ctx->qos_data.def_qos_parm);
if (ret)
- IWL_ERR(priv, "Failed to update QoS\n");
+ IWL_DEBUG_QUIET_RFKILL(priv, "Failed to update QoS\n");
}
static int iwlagn_update_beacon(struct iwl_priv *priv,
@@ -295,9 +296,9 @@ static int iwlagn_rxon_connect(struct iwl_priv *priv,
}
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
- priv->cfg->ht_params && priv->cfg->ht_params->smps_mode)
+ cfg(priv)->ht_params && cfg(priv)->ht_params->smps_mode)
ieee80211_request_smps(ctx->vif,
- priv->cfg->ht_params->smps_mode);
+ cfg(priv)->ht_params->smps_mode);
return 0;
}
@@ -444,8 +445,8 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
* force CTS-to-self frames protection if RTS-CTS is not preferred
* one aggregation protection method
*/
- if (!(priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation))
+ if (!(cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation))
ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
@@ -559,6 +560,9 @@ int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
mutex_lock(&priv->shrd->mutex);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ goto out;
+
if (unlikely(test_bit(STATUS_SCANNING, &priv->shrd->status))) {
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
goto out;
@@ -850,7 +854,8 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
if (ctx->last_tx_rejected) {
ctx->last_tx_rejected = false;
iwl_trans_wake_any_queue(trans(priv),
- ctx->ctxid);
+ ctx->ctxid,
+ "Disassoc: flush queue");
}
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 4b2aa1d..e483cfa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -130,25 +130,15 @@ int iwl_add_sta_callback(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
return iwl_process_add_sta_resp(priv, addsta, pkt);
}
-static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
-{
- u16 size = (u16)sizeof(struct iwl_addsta_cmd);
- struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
- memcpy(addsta, cmd, size);
- /* resrved in 5000 */
- addsta->rate_n_flags = cpu_to_le16(0);
- return size;
-}
-
int iwl_send_add_sta(struct iwl_priv *priv,
struct iwl_addsta_cmd *sta, u8 flags)
{
int ret = 0;
- u8 data[sizeof(*sta)];
struct iwl_host_cmd cmd = {
.id = REPLY_ADD_STA,
.flags = flags,
- .data = { data, },
+ .data = { sta, },
+ .len = { sizeof(*sta), },
};
u8 sta_id __maybe_unused = sta->sta.sta_id;
@@ -160,7 +150,6 @@ int iwl_send_add_sta(struct iwl_priv *priv,
might_sleep();
}
- cmd.len[0] = iwlagn_build_addsta_hcmd(sta, data);
ret = iwl_trans_send_cmd(trans(priv), &cmd);
if (ret || (flags & CMD_ASYNC))
@@ -463,6 +452,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr)
{
unsigned long flags;
+ u8 tid;
if (!iwl_is_ready(priv->shrd)) {
IWL_DEBUG_INFO(priv,
@@ -501,6 +491,10 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
priv->stations[sta_id].lq = NULL;
}
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++)
+ memset(&priv->tid_data[sta_id][tid], 0,
+ sizeof(priv->tid_data[sta_id][tid]));
+
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
priv->num_stations--;
@@ -647,7 +641,7 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
int ret;
struct iwl_addsta_cmd sta_cmd;
struct iwl_link_quality_cmd lq;
- bool active;
+ bool active, have_lq = false;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
@@ -657,7 +651,10 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
sta_cmd.mode = 0;
- memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+ if (priv->stations[sta_id].lq) {
+ memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
+ have_lq = true;
+ }
active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
@@ -679,7 +676,8 @@ void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
if (ret)
IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
priv->stations[sta_id].sta.sta.addr, ret);
- iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
+ if (have_lq)
+ iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
}
int iwl_get_free_ucode_key_offset(struct iwl_priv *priv)
@@ -825,28 +823,6 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
return ret;
}
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
- "station %pM\n", sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
- sta->addr);
- ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
- if (ret)
- IWL_ERR(priv, "Error removing station %pM\n",
- sta->addr);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
void iwl_sta_fill_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
u8 sta_id, struct iwl_link_quality_cmd *link_cmd)
@@ -1211,6 +1187,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
unsigned long flags;
struct iwl_addsta_cmd sta_cmd;
u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+ __le16 key_flags;
/* if station isn't there, neither is the key */
if (sta_id == IWL_INVALID_STATION)
@@ -1236,7 +1213,14 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
IWL_ERR(priv, "offset %d not used in uCode key table.\n",
keyconf->hw_key_idx);
- sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+ key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+ key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
+ STA_KEY_FLG_INVALID;
+
+ if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+ key_flags |= STA_KEY_MULTICAST_MSK;
+
+ sta_cmd.key.key_flags = key_flags;
sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
@@ -1459,20 +1443,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
}
-static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
- priv->stations[sta_id].sta.sta.modify_mask = 0;
- priv->stations[sta_id].sta.sleep_tx_count = 0;
- priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
- iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
- spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
-}
void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
{
@@ -1489,36 +1460,3 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
}
-
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- int sta_id;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- switch (cmd) {
- case STA_NOTIFY_SLEEP:
- WARN_ON(!sta_priv->client);
- sta_priv->asleep = true;
- if (atomic_read(&sta_priv->pending_frames) > 0)
- ieee80211_sta_block_awake(hw, sta, true);
- break;
- case STA_NOTIFY_AWAKE:
- WARN_ON(!sta_priv->client);
- if (!sta_priv->asleep)
- break;
- sta_priv->asleep = false;
- sta_id = iwl_sta_id(sta);
- if (sta_id != IWL_INVALID_STATION)
- iwl_sta_modify_ps_wake(priv, sta_id);
- break;
- default:
- break;
- }
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
index c27180a..b0dff7a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tt.c
@@ -633,7 +633,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
- if (priv->cfg->base_params->adv_thermal_throttle) {
+ if (cfg(priv)->base_params->adv_thermal_throttle) {
IWL_DEBUG_TEMP(priv, "Advanced Thermal Throttling\n");
tt->restriction = kcalloc(IWL_TI_STATE_MAX,
sizeof(struct iwl_tt_restriction),
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index df1540c..63bbc60 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -74,8 +74,8 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
else if (info->band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
(ieee80211_is_auth(fc) || ieee80211_is_assoc_req(fc) ||
ieee80211_is_reassoc_req(fc) ||
skb->protocol == cpu_to_be16(ETH_P_PAE)))
@@ -91,6 +91,7 @@ static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
} else {
+ tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
else
@@ -151,7 +152,7 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
if (ieee80211_is_data(fc)) {
tx_cmd->initial_rate_index = 0;
tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
if (priv->tm_fixed_rate) {
/*
* rate overwrite by testmode
@@ -164,7 +165,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
}
#endif
return;
- }
+ } else if (ieee80211_is_back_req(fc))
+ tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
/**
* If the current TX rate stored in mac80211 has the MCS bit set, it's
@@ -190,8 +192,8 @@ static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
rate_flags |= RATE_MCS_CCK_MSK;
/* Set up antennas */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant,
@@ -261,8 +263,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
__le16 fc;
u8 hdr_len;
- u16 len;
- u8 sta_id;
+ u16 len, seq_number = 0;
+ u8 sta_id, tid = IWL_MAX_TID_COUNT;
unsigned long flags;
bool is_agg = false;
@@ -286,6 +288,19 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
#endif
+ if (unlikely(ieee80211_is_probe_resp(fc))) {
+ struct iwl_wipan_noa_data *noa_data =
+ rcu_dereference(priv->noa_data);
+
+ if (noa_data &&
+ pskb_expand_head(skb, 0, noa_data->length,
+ GFP_ATOMIC) == 0) {
+ memcpy(skb_put(skb, noa_data->length),
+ noa_data->data, noa_data->length);
+ hdr = (struct ieee80211_hdr *)skb->data;
+ }
+ }
+
hdr_len = ieee80211_hdrlen(fc);
/* For management frames use broadcast id to do not break aggregation */
@@ -354,9 +369,51 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
info->driver_data[0] = ctx;
info->driver_data[1] = dev_cmd;
- if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id))
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
+ u8 *qc = NULL;
+ struct iwl_tid_data *tid_data;
+ qc = ieee80211_get_qos_ctl(hdr);
+ tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
+ if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
+ goto drop_unlock_sta;
+ tid_data = &priv->tid_data[sta_id][tid];
+
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ tid_data->agg.state != IWL_AGG_ON) {
+ IWL_ERR(priv, "TX_CTL_AMPDU while not in AGG:"
+ " Tx flags = 0x%08x, agg.state = %d",
+ info->flags, tid_data->agg.state);
+ IWL_ERR(priv, "sta_id = %d, tid = %d seq_num = %d",
+ sta_id, tid, SEQ_TO_SN(tid_data->seq_number));
+ goto drop_unlock_sta;
+ }
+
+ /* We can receive packets from the stack in IWL_AGG_{ON,OFF}
+ * only. Check this here.
+ */
+ if (WARN_ONCE(tid_data->agg.state != IWL_AGG_ON &&
+ tid_data->agg.state != IWL_AGG_OFF,
+ "Tx while agg.state = %d", tid_data->agg.state))
+ goto drop_unlock_sta;
+
+ seq_number = tid_data->seq_number;
+ seq_number &= IEEE80211_SCTL_SEQ;
+ hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
+ hdr->seq_ctrl |= cpu_to_le16(seq_number);
+ seq_number += 0x10;
+ }
+
+ /* Copy MAC header from skb into command buffer */
+ memcpy(tx_cmd->hdr, hdr, hdr_len);
+
+ if (iwl_trans_tx(trans(priv), skb, dev_cmd, ctx->ctxid, sta_id, tid))
goto drop_unlock_sta;
+ if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc) &&
+ !ieee80211_has_morefrags(fc))
+ priv->tid_data[sta_id][tid].seq_number = seq_number;
+
spin_unlock(&priv->shrd->sta_lock);
spin_unlock_irqrestore(&priv->shrd->lock, flags);
@@ -381,10 +438,81 @@ drop_unlock_priv:
return -1;
}
+int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid)
+{
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
+ int sta_id;
+
+ sta_id = iwl_sta_id(sta);
+
+ if (sta_id == IWL_INVALID_STATION) {
+ IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
+ return -ENXIO;
+ }
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ tid_data = &priv->tid_data[sta_id][tid];
+
+ switch (priv->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /*
+ * This can happen if the peer stops aggregation
+ * again before we've had a chance to drain the
+ * queue we selected previously, i.e. before the
+ * session was really started completely.
+ */
+ IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+ goto turn_off;
+ case IWL_AGG_ON:
+ break;
+ default:
+ IWL_WARN(priv, "Stopping AGG while state not ON "
+ "or starting for %d on %d (%d)\n", sta_id, tid,
+ priv->tid_data[sta_id][tid].agg.state);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ /* There are still packets for this RA / TID in the HW */
+ if (tid_data->agg.ssn != tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+ "next_recl = %d",
+ tid_data->agg.ssn,
+ tid_data->next_reclaimed);
+ priv->tid_data[sta_id][tid].agg.state =
+ IWL_EMPTYING_HW_QUEUE_DELBA;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return 0;
+ }
+
+ IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+ tid_data->agg.ssn);
+turn_off:
+ priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
+
+ /* do not restore/save irqs */
+ spin_unlock(&priv->shrd->sta_lock);
+ spin_lock(&priv->shrd->lock);
+
+ iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+
+ return 0;
+}
+
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_tid_data *tid_data;
+ unsigned long flags;
int sta_id;
int ret;
@@ -399,7 +527,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (unlikely(tid >= IWL_MAX_TID_COUNT))
return -EINVAL;
- if (priv->shrd->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
+ if (priv->tid_data[sta_id][tid].agg.state != IWL_AGG_OFF) {
IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
return -ENXIO;
}
@@ -408,27 +536,136 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
if (ret)
return ret;
- ret = iwl_trans_tx_agg_alloc(trans(priv), vif_priv->ctx->ctxid, sta_id,
- tid, ssn);
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+
+ tid_data = &priv->tid_data[sta_id][tid];
+ tid_data->agg.ssn = SEQ_TO_SN(tid_data->seq_number);
+
+ *ssn = tid_data->agg.ssn;
+
+ ret = iwl_trans_tx_agg_alloc(trans(priv), sta_id, tid);
+ if (ret) {
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+ return ret;
+ }
+
+ if (*ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d",
+ tid_data->agg.ssn);
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ } else {
+ IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, "
+ "next_reclaimed = %d",
+ tid_data->agg.ssn,
+ tid_data->next_reclaimed);
+ tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
+ }
+
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return ret;
}
-int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta, u16 tid)
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size)
{
- int sta_id;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ unsigned long flags;
+ u16 ssn;
- sta_id = iwl_sta_id(sta);
+ buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
- if (sta_id == IWL_INVALID_STATION) {
- IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
- return -ENXIO;
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ ssn = priv->tid_data[sta_priv->sta_id][tid].agg.ssn;
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+ iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, sta_priv->sta_id, tid,
+ buf_size, ssn);
+
+ /*
+ * If the limit is 0, then it wasn't initialised yet,
+ * use the default. We can do that since we take the
+ * minimum below, and we don't want to go above our
+ * default due to hardware restrictions.
+ */
+ if (sta_priv->max_agg_bufsize == 0)
+ sta_priv->max_agg_bufsize =
+ LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+
+ /*
+ * Even though in theory the peer could have different
+ * aggregation reorder buffer sizes for different sessions,
+ * our ucode doesn't allow for that and has a global limit
+ * for each station. Therefore, use the minimum of all the
+ * aggregation sessions and our default value.
+ */
+ sta_priv->max_agg_bufsize =
+ min(sta_priv->max_agg_bufsize, buf_size);
+
+ if (cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch to RTS/CTS if it is the prefer protection
+ * method for HT traffic
+ */
+
+ sta_priv->lq_sta.lq.general_params.flags |=
+ LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
}
+ priv->agg_tids_count++;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
- return iwl_trans_tx_agg_disable(trans(priv), vif_priv->ctx->ctxid,
- sta_id, tid);
+ sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
+ sta_priv->max_agg_bufsize;
+
+ IWL_DEBUG_HT(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
+ sta->addr, tid);
+
+ return iwl_send_lq_cmd(priv, ctx,
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+}
+
+static void iwlagn_check_ratid_empty(struct iwl_priv *priv, int sta_id, u8 tid)
+{
+ struct iwl_tid_data *tid_data = &priv->tid_data[sta_id][tid];
+ enum iwl_rxon_context_id ctx;
+ struct ieee80211_vif *vif;
+ u8 *addr;
+
+ lockdep_assert_held(&priv->shrd->sta_lock);
+
+ addr = priv->stations[sta_id].sta.sta.addr;
+ ctx = priv->stations[sta_id].ctxid;
+ vif = priv->contexts[ctx].vif;
+
+ switch (priv->tid_data[sta_id][tid].agg.state) {
+ case IWL_EMPTYING_HW_QUEUE_DELBA:
+ /* There are no packets for this RA / TID in the HW any more */
+ if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can continue DELBA flow ssn = next_recl ="
+ " %d", tid_data->next_reclaimed);
+ iwl_trans_tx_agg_disable(trans(priv), sta_id, tid);
+ tid_data->agg.state = IWL_AGG_OFF;
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
+ }
+ break;
+ case IWL_EMPTYING_HW_QUEUE_ADDBA:
+ /* There are no packets for this RA / TID in the HW any more */
+ if (tid_data->agg.ssn == tid_data->next_reclaimed) {
+ IWL_DEBUG_TX_QUEUES(priv,
+ "Can continue ADDBA flow ssn = next_recl ="
+ " %d", tid_data->next_reclaimed);
+ tid_data->agg.state = IWL_AGG_ON;
+ ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
+ }
+ break;
+ default:
+ break;
+ }
}
static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
@@ -568,10 +805,12 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
IWLAGN_TX_RES_TID_POS;
int sta_id = (tx_resp->ra_tid & IWLAGN_TX_RES_RA_MSK) >>
IWLAGN_TX_RES_RA_POS;
- struct iwl_ht_agg *agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ struct iwl_ht_agg *agg = &priv->tid_data[sta_id][tid].agg;
u32 status = le16_to_cpu(tx_resp->status.status);
int i;
+ WARN_ON(tid == IWL_TID_NON_QOS);
+
if (agg->wait_for_ba)
IWL_DEBUG_TX_REPLY(priv,
"got tx response w/o block-ack\n");
@@ -584,8 +823,8 @@ static void iwl_rx_reply_tx_agg(struct iwl_priv *priv,
* notification again.
*/
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
IWL_DEBUG_COEX(priv, "receive reply tx w/ bt_kill\n");
}
@@ -758,7 +997,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
struct ieee80211_hdr *hdr;
u32 status = le16_to_cpu(tx_resp->status.status);
- u32 ssn = iwlagn_get_scd_ssn(tx_resp);
+ u16 ssn = iwlagn_get_scd_ssn(tx_resp);
int tid;
int sta_id;
int freed;
@@ -780,10 +1019,37 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_rx_reply_tx_agg(priv, tx_resp);
if (tx_resp->frame_count == 1) {
+ u16 next_reclaimed = le16_to_cpu(tx_resp->seq_ctl);
+ next_reclaimed = SEQ_TO_SN(next_reclaimed + 0x10);
+
+ if (is_agg) {
+ /* If this is an aggregation queue, we can rely on the
+ * ssn since the wifi sequence number corresponds to
+ * the index in the TFD ring (%256).
+ * The seq_ctl is the sequence control of the packet
+ * to which this Tx response relates. But if there is a
+ * hole in the bitmap of the BA we received, this Tx
+ * response may allow to reclaim the hole and all the
+ * subsequent packets that were already acked.
+ * In that case, seq_ctl != ssn, and the next packet
+ * to be reclaimed will be ssn and not seq_ctl.
+ */
+ next_reclaimed = ssn;
+ }
+
__skb_queue_head_init(&skbs);
+
+ if (tid != IWL_TID_NON_QOS) {
+ priv->tid_data[sta_id][tid].next_reclaimed =
+ next_reclaimed;
+ IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d",
+ next_reclaimed);
+ }
+
/*we can free until ssn % q.n_bd not inclusive */
- iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
- ssn, status, &skbs);
+ WARN_ON(iwl_trans_reclaim(trans(priv), sta_id, tid, txq_id,
+ ssn, status, &skbs));
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&skbs)) {
skb = __skb_dequeue(&skbs);
@@ -803,7 +1069,8 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb,
iwl_is_associated_ctx(ctx) && ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
ctx->last_tx_rejected = true;
- iwl_trans_stop_queue(trans(priv), txq_id);
+ iwl_trans_stop_queue(trans(priv), txq_id,
+ "Tx on passive channel");
IWL_DEBUG_TX_REPLY(priv,
"TXQ %d status %s (0x%08x) "
@@ -877,27 +1144,24 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
sta_id = ba_resp->sta_id;
tid = ba_resp->tid;
- agg = &priv->shrd->tid_data[sta_id][tid].agg;
+ agg = &priv->tid_data[sta_id][tid].agg;
spin_lock_irqsave(&priv->shrd->sta_lock, flags);
- if (unlikely(agg->txq_id != scd_flow)) {
- /*
- * FIXME: this is a uCode bug which need to be addressed,
- * log the information and return for now!
- * since it is possible happen very often and in order
- * not to fill the syslog, don't enable the logging by default
- */
- IWL_DEBUG_TX_REPLY(priv,
- "BA scd_flow %d does not match txq_id %d\n",
- scd_flow, agg->txq_id);
+ if (unlikely(!agg->wait_for_ba)) {
+ if (unlikely(ba_resp->bitmap))
+ IWL_ERR(priv, "Received BA when not expected\n");
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
- if (unlikely(!agg->wait_for_ba)) {
- if (unlikely(ba_resp->bitmap))
- IWL_ERR(priv, "Received BA when not expected\n");
+ __skb_queue_head_init(&reclaimed_skbs);
+
+ /* Release all TFDs before the SSN, i.e. all TFDs in front of
+ * block-ack window (we assume that they've been successfully
+ * transmitted ... if not, it's too late anyway). */
+ if (iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow,
+ ba_resp_scd_ssn, 0, &reclaimed_skbs)) {
spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
return 0;
}
@@ -909,11 +1173,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
ba_resp->sta_id);
IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, "
"scd_flow = %d, scd_ssn = %d\n",
- ba_resp->tid,
- ba_resp->seq_ctl,
+ ba_resp->tid, le16_to_cpu(ba_resp->seq_ctl),
(unsigned long long)le64_to_cpu(ba_resp->bitmap),
- ba_resp->scd_flow,
- ba_resp->scd_ssn);
+ scd_flow, ba_resp_scd_ssn);
/* Mark that the expected block-ack response arrived */
agg->wait_for_ba = false;
@@ -932,13 +1194,9 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
ba_resp->txed, ba_resp->txed_2_done);
- __skb_queue_head_init(&reclaimed_skbs);
+ priv->tid_data[sta_id][tid].next_reclaimed = ba_resp_scd_ssn;
- /* Release all TFDs before the SSN, i.e. all TFDs in front of
- * block-ack window (we assume that they've been successfully
- * transmitted ... if not, it's too late anyway). */
- iwl_trans_reclaim(trans(priv), sta_id, tid, scd_flow, ba_resp_scd_ssn,
- 0, &reclaimed_skbs);
+ iwlagn_check_ratid_empty(priv, sta_id, tid);
freed = 0;
while (!skb_queue_empty(&reclaimed_skbs)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index e0e9a3d..b5c7c5f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -30,7 +30,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
@@ -44,6 +43,7 @@
#include <asm/div64.h>
#include "iwl-eeprom.h"
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -367,7 +367,7 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
- base = priv->device_pointers.error_event_table;
+ base = priv->shrd->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) {
capacity = iwl_read_targ_mem(bus(priv), base);
num_wraps = iwl_read_targ_mem(bus(priv),
@@ -452,52 +452,6 @@ static void iwl_bg_tx_flush(struct work_struct *work)
iwlagn_dev_txfifo_flush(priv, IWL_DROP_ALL);
}
-/******************************************************************************
- *
- * uCode download functions
- *
- ******************************************************************************/
-
-static void iwl_free_fw_desc(struct iwl_priv *priv, struct fw_desc *desc)
-{
- if (desc->v_addr)
- dma_free_coherent(bus(priv)->dev, desc->len,
- desc->v_addr, desc->p_addr);
- desc->v_addr = NULL;
- desc->len = 0;
-}
-
-static void iwl_free_fw_img(struct iwl_priv *priv, struct fw_img *img)
-{
- iwl_free_fw_desc(priv, &img->code);
- iwl_free_fw_desc(priv, &img->data);
-}
-
-static void iwl_dealloc_ucode(struct iwl_priv *priv)
-{
- iwl_free_fw_img(priv, &priv->ucode_rt);
- iwl_free_fw_img(priv, &priv->ucode_init);
- iwl_free_fw_img(priv, &priv->ucode_wowlan);
-}
-
-static int iwl_alloc_fw_desc(struct iwl_priv *priv, struct fw_desc *desc,
- const void *data, size_t len)
-{
- if (!len) {
- desc->v_addr = NULL;
- return -EINVAL;
- }
-
- desc->v_addr = dma_alloc_coherent(bus(priv)->dev, len,
- &desc->p_addr, GFP_KERNEL);
- if (!desc->v_addr)
- return -ENOMEM;
-
- desc->len = len;
- memcpy(desc->v_addr, data, len);
- return 0;
-}
-
static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
{
int i;
@@ -555,23 +509,14 @@ static void iwl_init_context(struct iwl_priv *priv, u32 ucode_flags)
BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
}
-
-struct iwlagn_ucode_capabilities {
- u32 max_probe_length;
- u32 standard_phy_calibration_size;
- u32 flags;
-};
-
static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context);
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa);
#define UCODE_EXPERIMENTAL_INDEX 100
#define UCODE_EXPERIMENTAL_TAG "exp"
static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
{
- const char *name_pre = priv->cfg->fw_name_pre;
+ const char *name_pre = cfg(priv)->fw_name_pre;
char tag[8];
if (first) {
@@ -580,14 +525,14 @@ static int __must_check iwl_request_firmware(struct iwl_priv *priv, bool first)
strcpy(tag, UCODE_EXPERIMENTAL_TAG);
} else if (priv->fw_index == UCODE_EXPERIMENTAL_INDEX) {
#endif
- priv->fw_index = priv->cfg->ucode_api_max;
+ priv->fw_index = cfg(priv)->ucode_api_max;
sprintf(tag, "%d", priv->fw_index);
} else {
priv->fw_index--;
sprintf(tag, "%d", priv->fw_index);
}
- if (priv->fw_index < priv->cfg->ucode_api_min) {
+ if (priv->fw_index < cfg(priv)->ucode_api_min) {
IWL_ERR(priv, "no suitable firmware found!\n");
return -ENOENT;
}
@@ -892,9 +837,9 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
struct iwl_ucode_header *ucode;
int err;
struct iwlagn_firmware_pieces pieces;
- const unsigned int api_max = priv->cfg->ucode_api_max;
- unsigned int api_ok = priv->cfg->ucode_api_ok;
- const unsigned int api_min = priv->cfg->ucode_api_min;
+ const unsigned int api_max = cfg(priv)->ucode_api_max;
+ unsigned int api_ok = cfg(priv)->ucode_api_ok;
+ const unsigned int api_min = cfg(priv)->ucode_api_min;
u32 api_ver;
char buildstr[25];
u32 build;
@@ -1040,30 +985,32 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
/* Runtime instructions and 2 copies of data:
* 1) unmodified from disk
* 2) backup cache for save/restore during power-downs */
- if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.code,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.code,
pieces.inst, pieces.inst_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_rt.data,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_rt.data,
pieces.data, pieces.data_size))
goto err_pci_alloc;
/* Initialization instructions and data */
if (pieces.init_size && pieces.init_data_size) {
- if (iwl_alloc_fw_desc(priv, &priv->ucode_init.code,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.code,
pieces.init, pieces.init_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_init.data,
+ if (iwl_alloc_fw_desc(bus(priv), &trans(priv)->ucode_init.data,
pieces.init_data, pieces.init_data_size))
goto err_pci_alloc;
}
/* WoWLAN instructions and data */
if (pieces.wowlan_inst_size && pieces.wowlan_data_size) {
- if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.code,
+ if (iwl_alloc_fw_desc(bus(priv),
+ &trans(priv)->ucode_wowlan.code,
pieces.wowlan_inst,
pieces.wowlan_inst_size))
goto err_pci_alloc;
- if (iwl_alloc_fw_desc(priv, &priv->ucode_wowlan.data,
+ if (iwl_alloc_fw_desc(bus(priv),
+ &trans(priv)->ucode_wowlan.data,
pieces.wowlan_data,
pieces.wowlan_data_size))
goto err_pci_alloc;
@@ -1081,20 +1028,23 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->init_evtlog_size = (pieces.init_evtlog_size - 16)/12;
else
priv->init_evtlog_size =
- priv->cfg->base_params->max_event_log_size;
+ cfg(priv)->base_params->max_event_log_size;
priv->init_errlog_ptr = pieces.init_errlog_ptr;
priv->inst_evtlog_ptr = pieces.inst_evtlog_ptr;
if (pieces.inst_evtlog_size)
priv->inst_evtlog_size = (pieces.inst_evtlog_size - 16)/12;
else
priv->inst_evtlog_size =
- priv->cfg->base_params->max_event_log_size;
+ cfg(priv)->base_params->max_event_log_size;
priv->inst_errlog_ptr = pieces.inst_errlog_ptr;
+#ifndef CONFIG_IWLWIFI_P2P
+ ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
+#endif
priv->new_scan_threshold_behaviour =
!!(ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWSCAN);
- if (!(priv->cfg->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
+ if (!(cfg(priv)->sku & EEPROM_SKU_CAP_IPAN_ENABLE))
ucode_capa.flags &= ~IWL_UCODE_TLV_FLAGS_PAN;
/*
@@ -1111,7 +1061,6 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
priv->sta_key_max_num = STA_KEY_MAX_NUM;
priv->shrd->cmd_queue = IWL_DEFAULT_CMD_QUEUE_NUM;
}
-
/*
* figure out the offset of chain noise reset and gain commands
* base on the size of standard phy calibration commands table size
@@ -1156,7 +1105,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
err_pci_alloc:
IWL_ERR(priv, "failed to allocate pci memory\n");
- iwl_dealloc_ucode(priv);
+ iwl_dealloc_ucode(trans(priv));
out_unbind:
complete(&priv->firmware_loading_complete);
device_release_driver(bus(priv)->dev);
@@ -1176,7 +1125,7 @@ static void iwl_rf_kill_ct_config(struct iwl_priv *priv)
spin_unlock_irqrestore(&priv->shrd->lock, flags);
priv->thermal_throttle.ct_kill_toggle = false;
- if (priv->cfg->base_params->support_ct_kill_exit) {
+ if (cfg(priv)->base_params->support_ct_kill_exit) {
adv_cmd.critical_temperature_enter =
cpu_to_le32(hw_params(priv).ct_kill_threshold);
adv_cmd.critical_temperature_exit =
@@ -1271,10 +1220,10 @@ int iwl_alive_start(struct iwl_priv *priv)
return -ERFKILL;
/* download priority table before any calibration request */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* Configure Bluetooth device coexistence support */
- if (priv->cfg->bt_params->bt_sco_disable)
+ if (cfg(priv)->bt_params->bt_sco_disable)
priv->bt_enable_pspoll = false;
else
priv->bt_enable_pspoll = true;
@@ -1286,14 +1235,14 @@ int iwl_alive_start(struct iwl_priv *priv)
priv->bt_valid = IWLAGN_BT_VALID_ENABLE_FLAGS;
priv->cur_rssi_ctx = NULL;
- iwlagn_send_prio_tbl(priv);
+ iwl_send_prio_tbl(trans(priv));
/* FIXME: w/a to force change uCode BT state machine */
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_CLOSE,
+ ret = iwl_send_bt_env(trans(priv), IWL_BT_COEX_ENV_CLOSE,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
@@ -1304,16 +1253,17 @@ int iwl_alive_start(struct iwl_priv *priv)
iwl_send_bt_config(priv);
}
- if (hw_params(priv).calib_rt_cfg)
- iwlagn_send_calib_cfg_rt(priv,
- hw_params(priv).calib_rt_cfg);
+ /*
+ * Perform runtime calibrations, including DC calibration.
+ */
+ iwlagn_send_calib_cfg_rt(priv, IWL_CALIB_CFG_DC_IDX);
ieee80211_wake_queues(priv->hw);
priv->active_rate = IWL_RATES_MASK;
/* Configure Tx antenna selection based on H/W config */
- iwlagn_send_tx_ant_config(priv, priv->cfg->valid_tx_ant);
+ iwlagn_send_tx_ant_config(priv, cfg(priv)->valid_tx_ant);
if (iwl_is_associated_ctx(ctx) && !priv->shrd->wowlan) {
struct iwl_rxon_cmd *active_rxon =
@@ -1352,7 +1302,7 @@ int iwl_alive_start(struct iwl_priv *priv)
static void iwl_cancel_deferred_work(struct iwl_priv *priv);
-static void __iwl_down(struct iwl_priv *priv)
+void __iwl_down(struct iwl_priv *priv)
{
int exit_pending;
@@ -1382,9 +1332,9 @@ static void __iwl_down(struct iwl_priv *priv)
priv->bt_status = 0;
priv->cur_rssi_ctx = NULL;
priv->bt_is_sco = 0;
- if (priv->cfg->bt_params)
+ if (cfg(priv)->bt_params)
priv->bt_traffic_load =
- priv->cfg->bt_params->bt_init_traffic_load;
+ cfg(priv)->bt_params->bt_init_traffic_load;
else
priv->bt_traffic_load = 0;
priv->bt_full_concurrent = false;
@@ -1415,7 +1365,7 @@ static void __iwl_down(struct iwl_priv *priv)
priv->beacon_skb = NULL;
}
-static void iwl_down(struct iwl_priv *priv)
+void iwl_down(struct iwl_priv *priv)
{
mutex_lock(&priv->shrd->mutex);
__iwl_down(priv);
@@ -1424,57 +1374,6 @@ static void iwl_down(struct iwl_priv *priv)
iwl_cancel_deferred_work(priv);
}
-#define MAX_HW_RESTARTS 5
-
-static int __iwl_up(struct iwl_priv *priv)
-{
- struct iwl_rxon_context *ctx;
- int ret;
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
- IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
- return -EIO;
- }
-
- for_each_context(priv, ctx) {
- ret = iwlagn_alloc_bcast_station(priv, ctx);
- if (ret) {
- iwl_dealloc_bcast_stations(priv);
- return ret;
- }
- }
-
- ret = iwlagn_run_init_ucode(priv);
- if (ret) {
- IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
- goto error;
- }
-
- ret = iwlagn_load_ucode_wait_alive(priv,
- &priv->ucode_rt,
- IWL_UCODE_REGULAR);
- if (ret) {
- IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
- goto error;
- }
-
- ret = iwl_alive_start(priv);
- if (ret)
- goto error;
- return 0;
-
- error:
- set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
- __iwl_down(priv);
- clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
-
- IWL_ERR(priv, "Unable to initialize device.\n");
- return ret;
-}
-
-
/*****************************************************************************
*
* Workqueue callbacks
@@ -1502,7 +1401,7 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
mutex_unlock(&priv->shrd->mutex);
}
-static void iwlagn_prepare_restart(struct iwl_priv *priv)
+void iwlagn_prepare_restart(struct iwl_priv *priv)
{
struct iwl_rxon_context *ctx;
bool bt_full_concurrent;
@@ -1559,1172 +1458,8 @@ static void iwl_bg_restart(struct work_struct *data)
}
}
-/*****************************************************************************
- *
- * mac80211 entry point functions
- *
- *****************************************************************************/
-
-static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_AP),
- },
-};
-
-static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
- {
- .max = 2,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_dualmode[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_sta_ap_limits,
- .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
- },
-};
-
-static const struct ieee80211_iface_combination
-iwlagn_iface_combinations_p2p[] = {
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .beacon_int_infra_match = true,
- .limits = iwlagn_p2p_sta_go_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
- },
- { .num_different_channels = 1,
- .max_interfaces = 2,
- .limits = iwlagn_p2p_2sta_limits,
- .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
- },
-};
-
-/*
- * Not a mac80211 entry point function, but it fits in with all the
- * other mac80211 functions grouped here.
- */
-static int iwlagn_mac_setup_register(struct iwl_priv *priv,
- struct iwlagn_ucode_capabilities *capa)
-{
- int ret;
- struct ieee80211_hw *hw = priv->hw;
- struct iwl_rxon_context *ctx;
-
- hw->rate_control_algorithm = "iwl-agn-rs";
-
- /* Tell mac80211 our characteristics */
- hw->flags = IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_NEED_DTIM_PERIOD |
- IEEE80211_HW_SPECTRUM_MGMT |
- IEEE80211_HW_REPORTS_TX_ACK_STATUS;
-
- /*
- * Including the following line will crash some AP's. This
- * workaround removes the stimulus which causes the crash until
- * the AP software can be fixed.
- hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
- */
-
- hw->flags |= IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
-
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
- hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
- IEEE80211_HW_SUPPORTS_STATIC_SMPS;
-
- if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->sta_data_size = sizeof(struct iwl_station_priv);
- hw->vif_data_size = sizeof(struct iwl_vif_priv);
-
- for_each_context(priv, ctx) {
- hw->wiphy->interface_modes |= ctx->interface_modes;
- hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
- }
-
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
-
- if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_p2p);
- } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
- hw->wiphy->iface_combinations = iwlagn_iface_combinations_dualmode;
- hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
- }
-
- hw->wiphy->max_remain_on_channel_duration = 1000;
-
- hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
- WIPHY_FLAG_DISABLE_BEACON_HINTS |
- WIPHY_FLAG_IBSS_RSN;
-
- if (priv->ucode_wowlan.code.len && device_can_wakeup(bus(priv)->dev)) {
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
- WIPHY_WOWLAN_DISCONNECT |
- WIPHY_WOWLAN_EAP_IDENTITY_REQ |
- WIPHY_WOWLAN_RFKILL_RELEASE;
- if (!iwlagn_mod_params.sw_crypto)
- hw->wiphy->wowlan.flags |=
- WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
- WIPHY_WOWLAN_GTK_REKEY_FAILURE;
-
- hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
- hw->wiphy->wowlan.pattern_min_len =
- IWLAGN_WOWLAN_MIN_PATTERN_LEN;
- hw->wiphy->wowlan.pattern_max_len =
- IWLAGN_WOWLAN_MAX_PATTERN_LEN;
- }
-
- if (iwlagn_mod_params.power_save)
- hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
- else
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
- /* we create the 802.11 header and a zero-length SSID element */
- hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
-
- /* Default value; 4 EDCA QOS priorities */
- hw->queues = 4;
-
- hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
-
- if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &priv->bands[IEEE80211_BAND_2GHZ];
- if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
- priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &priv->bands[IEEE80211_BAND_5GHZ];
-
- iwl_leds_init(priv);
-
- ret = ieee80211_register_hw(priv->hw);
- if (ret) {
- IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
- return ret;
- }
- priv->mac80211_registered = 1;
-
- return 0;
-}
-
-
-static int iwlagn_mac_start(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- /* we should be verifying the device is ready to be opened */
- mutex_lock(&priv->shrd->mutex);
- ret = __iwl_up(priv);
- mutex_unlock(&priv->shrd->mutex);
- if (ret)
- return ret;
-
- IWL_DEBUG_INFO(priv, "Start UP work done.\n");
-
- /* Now we should be done, and the READY bit should be set. */
- if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
- ret = -EIO;
-
- iwlagn_led_enable(priv);
-
- priv->is_open = 1;
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-static void iwlagn_mac_stop(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!priv->is_open)
- return;
-
- priv->is_open = 0;
-
- iwl_down(priv);
-
- flush_workqueue(priv->shrd->workqueue);
-
- /* User space software may expect getting rfkill changes
- * even if interface is down */
- iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
- iwl_enable_rfkill_int(priv);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-#ifdef CONFIG_PM_SLEEP
-static int iwlagn_send_patterns(struct iwl_priv *priv,
- struct cfg80211_wowlan *wowlan)
-{
- struct iwlagn_wowlan_patterns_cmd *pattern_cmd;
- struct iwl_host_cmd cmd = {
- .id = REPLY_WOWLAN_PATTERNS,
- .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .flags = CMD_SYNC,
- };
- int i, err;
-
- if (!wowlan->n_patterns)
- return 0;
-
- cmd.len[0] = sizeof(*pattern_cmd) +
- wowlan->n_patterns * sizeof(struct iwlagn_wowlan_pattern);
-
- pattern_cmd = kmalloc(cmd.len[0], GFP_KERNEL);
- if (!pattern_cmd)
- return -ENOMEM;
- pattern_cmd->n_patterns = cpu_to_le32(wowlan->n_patterns);
- for (i = 0; i < wowlan->n_patterns; i++) {
- int mask_len = DIV_ROUND_UP(wowlan->patterns[i].pattern_len, 8);
-
- memcpy(&pattern_cmd->patterns[i].mask,
- wowlan->patterns[i].mask, mask_len);
- memcpy(&pattern_cmd->patterns[i].pattern,
- wowlan->patterns[i].pattern,
- wowlan->patterns[i].pattern_len);
- pattern_cmd->patterns[i].mask_size = mask_len;
- pattern_cmd->patterns[i].pattern_size =
- wowlan->patterns[i].pattern_len;
- }
-
- cmd.data[0] = pattern_cmd;
- err = iwl_trans_send_cmd(trans(priv), &cmd);
- kfree(pattern_cmd);
- return err;
-}
-#endif
-
-static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_gtk_rekey_data *data)
-{
- struct iwl_priv *priv = hw->priv;
-
- if (iwlagn_mod_params.sw_crypto)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
- goto out;
-
- memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
- memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
- priv->replay_ctr = cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
- priv->have_rekey_data = true;
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-struct wowlan_key_data {
- struct iwl_rxon_context *ctx;
- struct iwlagn_wowlan_rsc_tsc_params_cmd *rsc_tsc;
- struct iwlagn_wowlan_tkip_params_cmd *tkip;
- const u8 *bssid;
- bool error, use_rsc_tsc, use_tkip;
-};
-
-#ifdef CONFIG_PM_SLEEP
-static void iwlagn_convert_p1k(u16 *p1k, __le16 *out)
-{
- int i;
-
- for (i = 0; i < IWLAGN_P1K_SIZE; i++)
- out[i] = cpu_to_le16(p1k[i]);
-}
-
-static void iwlagn_wowlan_program_keys(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key,
- void *_data)
-{
- struct iwl_priv *priv = hw->priv;
- struct wowlan_key_data *data = _data;
- struct iwl_rxon_context *ctx = data->ctx;
- struct aes_sc *aes_sc, *aes_tx_sc = NULL;
- struct tkip_sc *tkip_sc, *tkip_tx_sc = NULL;
- struct iwlagn_p1k_cache *rx_p1ks;
- u8 *rx_mic_key;
- struct ieee80211_key_seq seq;
- u32 cur_rx_iv32 = 0;
- u16 p1k[IWLAGN_P1K_SIZE];
- int ret, i;
-
- mutex_lock(&priv->shrd->mutex);
-
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) &&
- !sta && !ctx->key_mapping_keys)
- ret = iwl_set_default_wep_key(priv, ctx, key);
- else
- ret = iwl_set_dynamic_key(priv, ctx, key, sta);
-
- if (ret) {
- IWL_ERR(priv, "Error setting key during suspend!\n");
- data->error = true;
- }
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- if (sta) {
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.unicast_rsc;
- tkip_tx_sc = &data->rsc_tsc->all_tsc_rsc.tkip.tsc;
-
- rx_p1ks = data->tkip->rx_uni;
-
- ieee80211_get_key_tx_seq(key, &seq);
- tkip_tx_sc->iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_tx_sc->iv32 = cpu_to_le32(seq.tkip.iv32);
-
- ieee80211_get_tkip_p1k_iv(key, seq.tkip.iv32, p1k);
- iwlagn_convert_p1k(p1k, data->tkip->tx.p1k);
-
- memcpy(data->tkip->mic_keys.tx,
- &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
- IWLAGN_MIC_KEY_SIZE);
-
- rx_mic_key = data->tkip->mic_keys.rx_unicast;
- } else {
- tkip_sc = data->rsc_tsc->all_tsc_rsc.tkip.multicast_rsc;
- rx_p1ks = data->tkip->rx_multi;
- rx_mic_key = data->tkip->mic_keys.rx_mcast;
- }
-
- /*
- * For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 (as they need to to avoid replay attacks)
- * for checking the IV in the frames.
- */
- for (i = 0; i < IWLAGN_NUM_RSC; i++) {
- ieee80211_get_key_rx_seq(key, i, &seq);
- tkip_sc[i].iv16 = cpu_to_le16(seq.tkip.iv16);
- tkip_sc[i].iv32 = cpu_to_le32(seq.tkip.iv32);
- /* wrapping isn't allowed, AP must rekey */
- if (seq.tkip.iv32 > cur_rx_iv32)
- cur_rx_iv32 = seq.tkip.iv32;
- }
-
- ieee80211_get_tkip_rx_p1k(key, data->bssid, cur_rx_iv32, p1k);
- iwlagn_convert_p1k(p1k, rx_p1ks[0].p1k);
- ieee80211_get_tkip_rx_p1k(key, data->bssid,
- cur_rx_iv32 + 1, p1k);
- iwlagn_convert_p1k(p1k, rx_p1ks[1].p1k);
-
- memcpy(rx_mic_key,
- &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
- IWLAGN_MIC_KEY_SIZE);
-
- data->use_tkip = true;
- data->use_rsc_tsc = true;
- break;
- case WLAN_CIPHER_SUITE_CCMP:
- if (sta) {
- u8 *pn = seq.ccmp.pn;
-
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.unicast_rsc;
- aes_tx_sc = &data->rsc_tsc->all_tsc_rsc.aes.tsc;
-
- ieee80211_get_key_tx_seq(key, &seq);
- aes_tx_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
- } else
- aes_sc = data->rsc_tsc->all_tsc_rsc.aes.multicast_rsc;
-
- /*
- * For non-QoS this relies on the fact that both the uCode and
- * mac80211 use TID 0 for checking the IV in the frames.
- */
- for (i = 0; i < IWLAGN_NUM_RSC; i++) {
- u8 *pn = seq.ccmp.pn;
-
- ieee80211_get_key_rx_seq(key, i, &seq);
- aes_sc->pn = cpu_to_le64(
- (u64)pn[5] |
- ((u64)pn[4] << 8) |
- ((u64)pn[3] << 16) |
- ((u64)pn[2] << 24) |
- ((u64)pn[1] << 32) |
- ((u64)pn[0] << 40));
- }
- data->use_rsc_tsc = true;
- break;
- }
-
- mutex_unlock(&priv->shrd->mutex);
-}
-
-static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
- struct cfg80211_wowlan *wowlan)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwlagn_wowlan_wakeup_filter_cmd wakeup_filter_cmd;
- struct iwl_rxon_cmd rxon;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwlagn_wowlan_kek_kck_material_cmd kek_kck_cmd;
- struct iwlagn_wowlan_tkip_params_cmd tkip_cmd = {};
- struct wowlan_key_data key_data = {
- .ctx = ctx,
- .bssid = ctx->active.bssid_addr,
- .use_rsc_tsc = false,
- .tkip = &tkip_cmd,
- .use_tkip = false,
- };
- int ret, i;
- u16 seq;
-
- if (WARN_ON(!wowlan))
- return -EINVAL;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- /* Don't attempt WoWLAN when not associated, tear down instead. */
- if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
- !iwl_is_associated_ctx(ctx)) {
- ret = 1;
- goto out;
- }
-
- key_data.rsc_tsc = kzalloc(sizeof(*key_data.rsc_tsc), GFP_KERNEL);
- if (!key_data.rsc_tsc) {
- ret = -ENOMEM;
- goto out;
- }
-
- memset(&wakeup_filter_cmd, 0, sizeof(wakeup_filter_cmd));
-
- /*
- * We know the last used seqno, and the uCode expects to know that
- * one, it will increment before TX.
- */
- seq = le16_to_cpu(priv->last_seq_ctl) & IEEE80211_SCTL_SEQ;
- wakeup_filter_cmd.non_qos_seq = cpu_to_le16(seq);
-
- /*
- * For QoS counters, we store the one to use next, so subtract 0x10
- * since the uCode will add 0x10 before using the value.
- */
- for (i = 0; i < 8; i++) {
- seq = priv->shrd->tid_data[IWL_AP_ID][i].seq_number;
- seq -= 0x10;
- wakeup_filter_cmd.qos_seq[i] = cpu_to_le16(seq);
- }
-
- if (wowlan->disconnect)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_BEACON_MISS |
- IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE);
- if (wowlan->magic_pkt)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_MAGIC_PACKET);
- if (wowlan->gtk_rekey_failure)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
- if (wowlan->eap_identity_req)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ);
- if (wowlan->four_way_handshake)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
- if (wowlan->rfkill_release)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_RFKILL);
- if (wowlan->n_patterns)
- wakeup_filter_cmd.enabled |=
- cpu_to_le32(IWLAGN_WOWLAN_WAKEUP_PATTERN_MATCH);
-
- iwl_scan_cancel_timeout(priv, 200);
-
- memcpy(&rxon, &ctx->active, sizeof(rxon));
-
- iwl_trans_stop_device(trans(priv));
-
- priv->shrd->wowlan = true;
-
- ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_wowlan,
- IWL_UCODE_WOWLAN);
- if (ret)
- goto error;
-
- /* now configure WoWLAN ucode */
- ret = iwl_alive_start(priv);
- if (ret)
- goto error;
-
- memcpy(&ctx->staging, &rxon, sizeof(rxon));
- ret = iwlagn_commit_rxon(priv, ctx);
- if (ret)
- goto error;
-
- ret = iwl_power_update_mode(priv, true);
- if (ret)
- goto error;
-
- if (!iwlagn_mod_params.sw_crypto) {
- /* mark all keys clear */
- priv->ucode_key_table = 0;
- ctx->key_mapping_keys = 0;
-
- /*
- * This needs to be unlocked due to lock ordering
- * constraints. Since we're in the suspend path
- * that isn't really a problem though.
- */
- mutex_unlock(&priv->shrd->mutex);
- ieee80211_iter_keys(priv->hw, ctx->vif,
- iwlagn_wowlan_program_keys,
- &key_data);
- mutex_lock(&priv->shrd->mutex);
- if (key_data.error) {
- ret = -EIO;
- goto error;
- }
-
- if (key_data.use_rsc_tsc) {
- struct iwl_host_cmd rsc_tsc_cmd = {
- .id = REPLY_WOWLAN_TSC_RSC_PARAMS,
- .flags = CMD_SYNC,
- .data[0] = key_data.rsc_tsc,
- .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
- .len[0] = sizeof(*key_data.rsc_tsc),
- };
-
- ret = iwl_trans_send_cmd(trans(priv), &rsc_tsc_cmd);
- if (ret)
- goto error;
- }
-
- if (key_data.use_tkip) {
- ret = iwl_trans_send_cmd_pdu(trans(priv),
- REPLY_WOWLAN_TKIP_PARAMS,
- CMD_SYNC, sizeof(tkip_cmd),
- &tkip_cmd);
- if (ret)
- goto error;
- }
-
- if (priv->have_rekey_data) {
- memset(&kek_kck_cmd, 0, sizeof(kek_kck_cmd));
- memcpy(kek_kck_cmd.kck, priv->kck, NL80211_KCK_LEN);
- kek_kck_cmd.kck_len = cpu_to_le16(NL80211_KCK_LEN);
- memcpy(kek_kck_cmd.kek, priv->kek, NL80211_KEK_LEN);
- kek_kck_cmd.kek_len = cpu_to_le16(NL80211_KEK_LEN);
- kek_kck_cmd.replay_ctr = priv->replay_ctr;
-
- ret = iwl_trans_send_cmd_pdu(trans(priv),
- REPLY_WOWLAN_KEK_KCK_MATERIAL,
- CMD_SYNC, sizeof(kek_kck_cmd),
- &kek_kck_cmd);
- if (ret)
- goto error;
- }
- }
-
- ret = iwl_trans_send_cmd_pdu(trans(priv), REPLY_WOWLAN_WAKEUP_FILTER,
- CMD_SYNC, sizeof(wakeup_filter_cmd),
- &wakeup_filter_cmd);
- if (ret)
- goto error;
-
- ret = iwlagn_send_patterns(priv, wowlan);
- if (ret)
- goto error;
-
- device_set_wakeup_enable(bus(priv)->dev, true);
-
- /* Now let the ucode operate on its own */
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
- goto out;
-
- error:
- priv->shrd->wowlan = false;
- iwlagn_prepare_restart(priv);
- ieee80211_restart_hw(priv->hw);
- out:
- mutex_unlock(&priv->shrd->mutex);
- kfree(key_data.rsc_tsc);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwlagn_mac_resume(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct ieee80211_vif *vif;
- unsigned long flags;
- u32 base, status = 0xffffffff;
- int ret = -EIO;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
-
- base = priv->device_pointers.error_event_table;
- if (iwlagn_hw_valid_rtc_data_addr(base)) {
- spin_lock_irqsave(&bus(priv)->reg_lock, flags);
- ret = iwl_grab_nic_access_silent(bus(priv));
- if (ret == 0) {
- iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
- status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
- iwl_release_nic_access(bus(priv));
- }
- spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (ret == 0) {
- if (!priv->wowlan_sram)
- priv->wowlan_sram =
- kzalloc(priv->ucode_wowlan.data.len,
- GFP_KERNEL);
-
- if (priv->wowlan_sram)
- _iwl_read_targ_mem_words(
- bus(priv), 0x800000, priv->wowlan_sram,
- priv->ucode_wowlan.data.len / 4);
- }
-#endif
- }
-
- /* we'll clear ctx->vif during iwlagn_prepare_restart() */
- vif = ctx->vif;
-
- priv->shrd->wowlan = false;
-
- device_set_wakeup_enable(bus(priv)->dev, false);
-
- iwlagn_prepare_restart(priv);
-
- memset((void *)&ctx->active, 0, sizeof(ctx->active));
- iwl_connection_init_rx_config(priv, ctx);
- iwlagn_set_rxon_chain(priv, ctx);
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- ieee80211_resume_disconnect(vif);
-
- return 1;
-}
-#endif
-
-static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MACDUMP(priv, "enter\n");
-
- IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
- ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
-
- if (iwlagn_tx_skb(priv, skb))
- dev_kfree_skb_any(skb);
-
- IWL_DEBUG_MACDUMP(priv, "leave\n");
-}
-
-static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_key_conf *keyconf,
- struct ieee80211_sta *sta,
- u32 iv32, u16 *phase1key)
-{
- struct iwl_priv *priv = hw->priv;
-
- iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
-}
-
-static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta,
- struct ieee80211_key_conf *key)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- bool is_default_wep_key = false;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (iwlagn_mod_params.sw_crypto) {
- IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
- return -EOPNOTSUPP;
- }
-
- switch (key->cipher) {
- case WLAN_CIPHER_SUITE_TKIP:
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- /* fall through */
- case WLAN_CIPHER_SUITE_CCMP:
- key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- break;
- default:
- break;
- }
-
- /*
- * We could program these keys into the hardware as well, but we
- * don't expect much multicast traffic in IBSS and having keys
- * for more stations is probably more useful.
- *
- * Mark key TX-only and return 0.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC &&
- !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
- key->hw_key_idx = WEP_INVALID_OFFSET;
- return 0;
- }
-
- /* If they key was TX-only, accept deletion */
- if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
- return 0;
-
- mutex_lock(&priv->shrd->mutex);
- iwl_scan_cancel_timeout(priv, 100);
-
- BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
-
- /*
- * If we are getting WEP group key and we didn't receive any key mapping
- * so far, we are in legacy wep mode (group key only), otherwise we are
- * in 1X mode.
- * In legacy wep mode, we use another host command to the uCode.
- */
- if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
- if (cmd == SET_KEY)
- is_default_wep_key = !ctx->key_mapping_keys;
- else
- is_default_wep_key =
- key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
- }
-
-
- switch (cmd) {
- case SET_KEY:
- if (is_default_wep_key) {
- ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
- break;
- }
- ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
- if (ret) {
- /*
- * can't add key for RX, but we don't need it
- * in the device for TX so still return 0
- */
- ret = 0;
- key->hw_key_idx = WEP_INVALID_OFFSET;
- }
-
- IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
- break;
- case DISABLE_KEY:
- if (is_default_wep_key)
- ret = iwl_remove_default_wep_key(priv, ctx, key);
- else
- ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
-
- IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
- break;
- default:
- ret = -EINVAL;
- }
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size)
-{
- struct iwl_priv *priv = hw->priv;
- int ret = -EINVAL;
- struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
- sta->addr, tid);
-
- if (!(priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE))
- return -EACCES;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- switch (action) {
- case IEEE80211_AMPDU_RX_START:
- IWL_DEBUG_HT(priv, "start Rx\n");
- ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
- break;
- case IEEE80211_AMPDU_RX_STOP:
- IWL_DEBUG_HT(priv, "stop Rx\n");
- ret = iwl_sta_rx_agg_stop(priv, sta, tid);
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- break;
- case IEEE80211_AMPDU_TX_START:
- IWL_DEBUG_HT(priv, "start Tx\n");
- ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
- break;
- case IEEE80211_AMPDU_TX_STOP:
- IWL_DEBUG_HT(priv, "stop Tx\n");
- ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
- if ((ret == 0) && (priv->agg_tids_count > 0)) {
- priv->agg_tids_count--;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
- }
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
- ret = 0;
- if (!priv->agg_tids_count && priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch off RTS/CTS if it was previously enabled
- */
- sta_priv->lq_sta.lq.general_params.flags &=
- ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
- }
- break;
- case IEEE80211_AMPDU_TX_OPERATIONAL:
- buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF);
-
- iwl_trans_tx_agg_setup(trans(priv), ctx->ctxid, iwl_sta_id(sta),
- tid, buf_size);
-
- /*
- * If the limit is 0, then it wasn't initialised yet,
- * use the default. We can do that since we take the
- * minimum below, and we don't want to go above our
- * default due to hardware restrictions.
- */
- if (sta_priv->max_agg_bufsize == 0)
- sta_priv->max_agg_bufsize =
- LINK_QUAL_AGG_FRAME_LIMIT_DEF;
-
- /*
- * Even though in theory the peer could have different
- * aggregation reorder buffer sizes for different sessions,
- * our ucode doesn't allow for that and has a global limit
- * for each station. Therefore, use the minimum of all the
- * aggregation sessions and our default value.
- */
- sta_priv->max_agg_bufsize =
- min(sta_priv->max_agg_bufsize, buf_size);
-
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->use_rts_for_aggregation) {
- /*
- * switch to RTS/CTS if it is the prefer protection
- * method for HT traffic
- */
-
- sta_priv->lq_sta.lq.general_params.flags |=
- LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
- }
- priv->agg_tids_count++;
- IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
- priv->agg_tids_count);
-
- sta_priv->lq_sta.lq.agg_params.agg_frame_cnt_limit =
- sta_priv->max_agg_bufsize;
-
- iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
- &sta_priv->lq_sta.lq, CMD_ASYNC, false);
-
- IWL_INFO(priv, "Tx aggregation enabled on ra = %pM tid = %d\n",
- sta->addr, tid);
- ret = 0;
- break;
- }
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return ret;
-}
-
-static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_sta *sta)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- bool is_ap = vif->type == NL80211_IFTYPE_STATION;
- int ret = 0;
- u8 sta_id;
-
- IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
- sta->addr);
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
- sta->addr);
- sta_priv->sta_id = IWL_INVALID_STATION;
-
- atomic_set(&sta_priv->pending_frames, 0);
- if (vif->type == NL80211_IFTYPE_AP)
- sta_priv->client = true;
-
- ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
- is_ap, sta, &sta_id);
- if (ret) {
- IWL_ERR(priv, "Unable to add station %pM (%d)\n",
- sta->addr, ret);
- /* Should we return success if return code is EEXIST ? */
- goto out;
- }
-
- sta_priv->sta_id = sta_id;
-
- /* Initialize rate scaling */
- IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
- sta->addr);
- iwl_rs_rate_init(priv, sta, sta_id);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
- struct ieee80211_channel_switch *ch_switch)
-{
- struct iwl_priv *priv = hw->priv;
- const struct iwl_channel_info *ch_info;
- struct ieee80211_conf *conf = &hw->conf;
- struct ieee80211_channel *channel = ch_switch->channel;
- struct iwl_ht_config *ht_conf = &priv->current_ht_config;
- /*
- * MULTI-FIXME
- * When we add support for multiple interfaces, we need to
- * revisit this. The channel switch command in the device
- * only affects the BSS context, but what does that really
- * mean? And what if we get a CSA on the second interface?
- * This needs a lot of work.
- */
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- u16 ch;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_rfkill(priv->shrd))
- goto out;
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
- test_bit(STATUS_SCANNING, &priv->shrd->status) ||
- test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
- goto out;
-
- if (!iwl_is_associated_ctx(ctx))
- goto out;
-
- if (!priv->cfg->lib->set_channel_switch)
- goto out;
-
- ch = channel->hw_value;
- if (le16_to_cpu(ctx->active.channel) == ch)
- goto out;
-
- ch_info = iwl_get_channel_info(priv, channel->band, ch);
- if (!is_channel_valid(ch_info)) {
- IWL_DEBUG_MAC80211(priv, "invalid channel\n");
- goto out;
- }
-
- spin_lock_irq(&priv->shrd->lock);
-
- priv->current_ht_config.smps = conf->smps_mode;
-
- /* Configure HT40 channels */
- ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled)
- iwlagn_config_ht40(conf, ctx);
- else
- ctx->ht.is_40mhz = false;
-
- if ((le16_to_cpu(ctx->staging.channel) != ch))
- ctx->staging.flags = 0;
-
- iwl_set_rxon_channel(priv, channel, ctx);
- iwl_set_rxon_ht(priv, ht_conf);
- iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
-
- spin_unlock_irq(&priv->shrd->lock);
-
- iwl_set_rate(priv);
- /*
- * at this point, staging_rxon has the
- * configuration for channel switch
- */
- set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
- priv->switch_channel = cpu_to_le16(ch);
- if (priv->cfg->lib->set_channel_switch(priv, ch_switch)) {
- clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
- priv->switch_channel = 0;
- ieee80211_chswitch_done(ctx->vif, false);
- }
-
-out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static void iwlagn_configure_filter(struct ieee80211_hw *hw,
- unsigned int changed_flags,
- unsigned int *total_flags,
- u64 multicast)
-{
- struct iwl_priv *priv = hw->priv;
- __le32 filter_or = 0, filter_nand = 0;
- struct iwl_rxon_context *ctx;
-
-#define CHK(test, flag) do { \
- if (*total_flags & (test)) \
- filter_or |= (flag); \
- else \
- filter_nand |= (flag); \
- } while (0)
-
- IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
- changed_flags, *total_flags);
-
- CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
- /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
- CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
- CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
-
-#undef CHK
-
- mutex_lock(&priv->shrd->mutex);
-
- for_each_context(priv, ctx) {
- ctx->staging.filter_flags &= ~filter_nand;
- ctx->staging.filter_flags |= filter_or;
-
- /*
- * Not committing directly because hardware can perform a scan,
- * but we'll eventually commit the filter flags change anyway.
- */
- }
-
- mutex_unlock(&priv->shrd->mutex);
-
- /*
- * Receiving all multicast frames is always enabled by the
- * default flags setup in iwl_connection_init_rx_config()
- * since we currently do not support programming multicast
- * filters into the device.
- */
- *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
- FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
-}
-
-static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
-{
- struct iwl_priv *priv = hw->priv;
-
- mutex_lock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
- IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
- goto done;
- }
- if (iwl_is_rfkill(priv->shrd)) {
- IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
- goto done;
- }
-
- /*
- * mac80211 will not push any more frames for transmit
- * until the flush is completed
- */
- if (drop) {
- IWL_DEBUG_MAC80211(priv, "send flush command\n");
- if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
- IWL_ERR(priv, "flush request fail\n");
- goto done;
- }
- }
- IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
- iwl_trans_wait_tx_queue_empty(trans(priv));
-done:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
void iwlagn_disable_roc(struct iwl_priv *priv)
{
@@ -2758,166 +1493,6 @@ static void iwlagn_disable_roc_work(struct work_struct *work)
mutex_unlock(&priv->shrd->mutex);
}
-static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel,
- enum nl80211_channel_type channel_type,
- int duration)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
- int err = 0;
-
- if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
- err = -EBUSY;
- goto out;
- }
-
- priv->hw_roc_channel = channel;
- priv->hw_roc_chantype = channel_type;
- priv->hw_roc_duration = duration;
- priv->hw_roc_start_notified = false;
- cancel_delayed_work(&priv->hw_roc_disable_work);
-
- if (!ctx->is_active) {
- ctx->is_active = true;
- ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
- memcpy(ctx->staging.node_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- memcpy(ctx->staging.bssid_addr,
- priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
- ETH_ALEN);
- err = iwlagn_commit_rxon(priv, ctx);
- if (err)
- goto out;
- ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
- RXON_FILTER_PROMISC_MSK |
- RXON_FILTER_CTL2HOST_MSK;
-
- err = iwlagn_commit_rxon(priv, ctx);
- if (err) {
- iwlagn_disable_roc(priv);
- goto out;
- }
- priv->hw_roc_setup = true;
- }
-
- err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
- if (err)
- iwlagn_disable_roc(priv);
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
-
-static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
- return -EOPNOTSUPP;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
- iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
- iwlagn_disable_roc(priv);
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-
-static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
- int ret;
- u8 sta_id;
-
- if (ctx->ctxid != IWL_RXON_CTX_PAN)
- return 0;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx)) {
- ret = 0;
- goto out;
- }
-
- if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
- ret = -EBUSY;
- goto out;
- }
-
- ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
- if (ret)
- goto out;
-
- if (WARN_ON(sta_id != ctx->ap_sta_id)) {
- ret = -EIO;
- goto out_remove_sta;
- }
-
- memcpy(ctx->bssid, bssid, ETH_ALEN);
- ctx->preauth_bssid = true;
-
- ret = iwlagn_commit_rxon(priv, ctx);
-
- if (ret == 0)
- goto out;
-
- out_remove_sta:
- iwl_remove_station(priv, sta_id, bssid);
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return ret;
-}
-
-static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- const u8 *bssid,
- enum ieee80211_tx_sync_type type)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *ctx = vif_priv->ctx;
-
- if (ctx->ctxid != IWL_RXON_CTX_PAN)
- return;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (iwl_is_associated_ctx(ctx))
- goto out;
-
- iwl_remove_station(priv, ctx->ap_sta_id, bssid);
- ctx->preauth_bssid = false;
- /* no need to commit */
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
/*****************************************************************************
*
* driver setup and teardown
@@ -2941,8 +1516,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
iwl_setup_scan_deferred_work(priv);
- if (priv->cfg->lib->bt_setup_deferred_work)
- priv->cfg->lib->bt_setup_deferred_work(priv);
+ if (cfg(priv)->lib->bt_setup_deferred_work)
+ cfg(priv)->lib->bt_setup_deferred_work(priv);
init_timer(&priv->statistics_periodic);
priv->statistics_periodic.data = (unsigned long)priv;
@@ -2959,8 +1534,8 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
static void iwl_cancel_deferred_work(struct iwl_priv *priv)
{
- if (priv->cfg->lib->cancel_deferred_work)
- priv->cfg->lib->cancel_deferred_work(priv);
+ if (cfg(priv)->lib->cancel_deferred_work)
+ cfg(priv)->lib->cancel_deferred_work(priv);
cancel_work_sync(&priv->run_time_calib_work);
cancel_work_sync(&priv->beacon_update);
@@ -3004,6 +1579,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
mutex_init(&priv->shrd->mutex);
+ INIT_LIST_HEAD(&trans(priv)->calib_results);
+
priv->ieee_channels = NULL;
priv->ieee_rates = NULL;
priv->band = IEEE80211_BAND_2GHZ;
@@ -3027,8 +1604,8 @@ static int iwl_init_drv(struct iwl_priv *priv)
iwl_init_scan_params(priv);
/* init bt coex */
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
priv->kill_ack_mask = IWLAGN_BT_KILL_ACK_MASK_DEFAULT;
priv->kill_cts_mask = IWLAGN_BT_KILL_CTS_MASK_DEFAULT;
priv->bt_valid = IWLAGN_BT_ALL_VALID_MSK;
@@ -3060,88 +1637,19 @@ err:
static void iwl_uninit_drv(struct iwl_priv *priv)
{
- iwl_calib_free_results(priv);
iwl_free_geos(priv);
iwl_free_channel_map(priv);
if (priv->tx_cmd_pool)
kmem_cache_destroy(priv->tx_cmd_pool);
kfree(priv->scan_cmd);
kfree(priv->beacon_cmd);
+ kfree(rcu_dereference_raw(priv->noa_data));
#ifdef CONFIG_IWLWIFI_DEBUGFS
kfree(priv->wowlan_sram);
#endif
}
-static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
- enum ieee80211_rssi_event rssi_event)
-{
- struct iwl_priv *priv = hw->priv;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- mutex_lock(&priv->shrd->mutex);
-
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
- if (rssi_event == RSSI_EVENT_LOW)
- priv->bt_enable_pspoll = true;
- else if (rssi_event == RSSI_EVENT_HIGH)
- priv->bt_enable_pspoll = false;
-
- iwlagn_send_advance_bt_config(priv);
- } else {
- IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
- "ignoring RSSI callback\n");
- }
-
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-}
-
-static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, bool set)
-{
- struct iwl_priv *priv = hw->priv;
- queue_work(priv->shrd->workqueue, &priv->beacon_update);
-
- return 0;
-}
-
-struct ieee80211_ops iwlagn_hw_ops = {
- .tx = iwlagn_mac_tx,
- .start = iwlagn_mac_start,
- .stop = iwlagn_mac_stop,
-#ifdef CONFIG_PM_SLEEP
- .suspend = iwlagn_mac_suspend,
- .resume = iwlagn_mac_resume,
-#endif
- .add_interface = iwlagn_mac_add_interface,
- .remove_interface = iwlagn_mac_remove_interface,
- .change_interface = iwlagn_mac_change_interface,
- .config = iwlagn_mac_config,
- .configure_filter = iwlagn_configure_filter,
- .set_key = iwlagn_mac_set_key,
- .update_tkip_key = iwlagn_mac_update_tkip_key,
- .set_rekey_data = iwlagn_mac_set_rekey_data,
- .conf_tx = iwlagn_mac_conf_tx,
- .bss_info_changed = iwlagn_bss_info_changed,
- .ampdu_action = iwlagn_mac_ampdu_action,
- .hw_scan = iwlagn_mac_hw_scan,
- .sta_notify = iwlagn_mac_sta_notify,
- .sta_add = iwlagn_mac_sta_add,
- .sta_remove = iwlagn_mac_sta_remove,
- .channel_switch = iwlagn_mac_channel_switch,
- .flush = iwlagn_mac_flush,
- .tx_last_beacon = iwlagn_mac_tx_last_beacon,
- .remain_on_channel = iwlagn_mac_remain_on_channel,
- .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
- .rssi_callback = iwlagn_mac_rssi_callback,
- CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
- CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
- .tx_sync = iwlagn_mac_tx_sync,
- .finish_tx_sync = iwlagn_mac_finish_tx_sync,
- .set_tim = iwlagn_mac_set_tim,
-};
static u32 iwl_hw_detect(struct iwl_priv *priv)
{
@@ -3161,40 +1669,55 @@ static int iwl_set_hw_params(struct iwl_priv *priv)
hw_params(priv).rx_page_order =
get_order(IWL_RX_BUF_SIZE_4K);
- if (iwlagn_mod_params.disable_11n)
- priv->cfg->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_ALL)
+ cfg(priv)->sku &= ~EEPROM_SKU_CAP_11N_ENABLE;
hw_params(priv).num_ampdu_queues =
- priv->cfg->base_params->num_of_ampdu_queues;
+ cfg(priv)->base_params->num_of_ampdu_queues;
hw_params(priv).shadow_reg_enable =
- priv->cfg->base_params->shadow_reg_enable;
- hw_params(priv).sku = priv->cfg->sku;
- hw_params(priv).wd_timeout = priv->cfg->base_params->wd_timeout;
+ cfg(priv)->base_params->shadow_reg_enable;
+ hw_params(priv).sku = cfg(priv)->sku;
+ hw_params(priv).wd_timeout = cfg(priv)->base_params->wd_timeout;
/* Device-specific setup */
- return priv->cfg->lib->set_hw_params(priv);
+ return cfg(priv)->lib->set_hw_params(priv);
}
-/* This function both allocates and initializes hw and priv. */
-static struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
-{
- struct iwl_priv *priv;
- /* mac80211 allocates memory for this device instance, including
- * space for this driver's private structure */
- struct ieee80211_hw *hw;
- hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
- if (hw == NULL) {
- pr_err("%s: Can not allocate network device\n",
- cfg->name);
- goto out;
- }
- priv = hw->priv;
- priv->hw = hw;
+static void iwl_debug_config(struct iwl_priv *priv)
+{
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUG "
+#ifdef CONFIG_IWLWIFI_DEBUG
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEBUGFS "
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TRACING "
+#ifdef CONFIG_IWLWIFI_DEVICE_TRACING
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
-out:
- return hw;
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_DEVICE_TESTMODE "
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
+ dev_printk(KERN_INFO, bus(priv)->dev, "CONFIG_IWLWIFI_P2P "
+#ifdef CONFIG_IWLWIFI_P2P
+ "enabled\n");
+#else
+ "disabled\n");
+#endif
}
int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
@@ -3209,8 +1732,9 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
/************************
* 1. Allocating HW data
************************/
- hw = iwl_alloc_all(cfg);
+ hw = iwl_alloc_all();
if (!hw) {
+ pr_err("%s: Cannot allocate network device\n", cfg->name);
err = -ENOMEM;
goto out;
}
@@ -3231,8 +1755,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
SET_IEEE80211_DEV(hw, bus(priv)->dev);
+ /* what debugging capabilities we have */
+ iwl_debug_config(priv);
+
IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
- priv->cfg = cfg;
+ cfg(priv) = cfg;
/* is antenna coupling more than 35dB ? */
priv->bt_ant_couple_ok =
@@ -3266,7 +1793,7 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
***********************/
hw_rev = iwl_hw_detect(priv);
IWL_INFO(priv, "Detected %s, REV=0x%X\n",
- priv->cfg->name, hw_rev);
+ cfg(priv)->name, hw_rev);
err = iwl_trans_request_irq(trans(priv));
if (err)
@@ -3296,11 +1823,11 @@ int iwl_probe(struct iwl_bus *bus, const struct iwl_trans_ops *trans_ops,
goto out_free_eeprom;
/* extract MAC Address */
- iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
+ iwl_eeprom_get_mac(priv->shrd, priv->addresses[0].addr);
IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
priv->hw->wiphy->addresses = priv->addresses;
priv->hw->wiphy->n_addresses = 1;
- num_mac = iwl_eeprom_query16(priv, EEPROM_NUM_MAC_ADDRESS);
+ num_mac = iwl_eeprom_query16(priv->shrd, EEPROM_NUM_MAC_ADDRESS);
if (num_mac > 1) {
memcpy(priv->addresses[1].addr, priv->addresses[0].addr,
ETH_ALEN);
@@ -3365,7 +1892,7 @@ out_destroy_workqueue:
priv->shrd->workqueue = NULL;
iwl_uninit_drv(priv);
out_free_eeprom:
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
out_free_trans:
iwl_trans_free(trans(priv));
out_free_traffic_mem:
@@ -3390,21 +1917,16 @@ void __devexit iwl_remove(struct iwl_priv * priv)
set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
iwl_testmode_cleanup(priv);
- iwl_leds_exit(priv);
-
- if (priv->mac80211_registered) {
- ieee80211_unregister_hw(priv->hw);
- priv->mac80211_registered = 0;
- }
+ iwlagn_mac_unregister(priv);
iwl_tt_exit(priv);
/*This will stop the queues, move the device to low power state */
iwl_trans_stop_device(trans(priv));
- iwl_dealloc_ucode(priv);
+ iwl_dealloc_ucode(trans(priv));
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/*netif_stop_queue(dev); */
flush_workqueue(priv->shrd->workqueue);
@@ -3474,8 +1996,9 @@ module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
MODULE_PARM_DESC(queues_num, "number of hw queues.");
-module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
-MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
+module_param_named(11n_disable, iwlagn_mod_params.disable_11n, uint, S_IRUGO);
+MODULE_PARM_DESC(11n_disable,
+ "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index 3856aba..f84fb3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -65,6 +65,12 @@
#include "iwl-dev.h"
+struct iwlagn_ucode_capabilities {
+ u32 max_probe_length;
+ u32 standard_phy_calibration_size;
+ u32 flags;
+};
+
extern struct ieee80211_ops iwlagn_hw_ops;
int iwl_reset_ict(struct iwl_trans *trans);
@@ -77,6 +83,16 @@ static inline void iwl_set_calib_hdr(struct iwl_calib_hdr *hdr, u8 cmd)
hdr->data_valid = 1;
}
+void __iwl_down(struct iwl_priv *priv);
+void iwl_down(struct iwl_priv *priv);
+void iwlagn_prepare_restart(struct iwl_priv *priv);
+
+/* MAC80211 */
+struct ieee80211_hw *iwl_alloc_all(void);
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa);
+void iwlagn_mac_unregister(struct iwl_priv *priv);
+
/* RXON */
int iwlagn_set_pan_params(struct iwl_priv *priv);
int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -93,20 +109,20 @@ void iwlagn_config_ht40(struct ieee80211_conf *conf,
int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb,
struct iwl_device_cmd *cmd);
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type);
-void iwlagn_send_prio_tbl(struct iwl_priv *priv);
-int iwlagn_run_init_ucode(struct iwl_priv *priv);
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
- struct fw_img *image,
- enum iwlagn_ucode_type ucode_type);
/* lib */
int iwlagn_send_tx_power(struct iwl_priv *priv);
void iwlagn_temperature(struct iwl_priv *priv);
-u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
+u16 iwl_eeprom_calib_version(struct iwl_shared *shrd);
int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control);
int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
+#ifdef CONFIG_PM_SLEEP
+int iwlagn_send_patterns(struct iwl_priv *priv,
+ struct cfg80211_wowlan *wowlan);
+int iwlagn_suspend(struct iwl_priv *priv,
+ struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
+#endif
/* rx */
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
@@ -117,6 +133,8 @@ void iwl_setup_rx_handlers(struct iwl_priv *priv);
int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
+int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta, u16 tid, u8 buf_size);
int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
@@ -198,9 +216,6 @@ int iwl_add_station_common(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
struct ieee80211_sta *sta, u8 *sta_id_r);
int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
const u8 *addr);
-int iwlagn_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- struct ieee80211_sta *sta);
-
u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
const u8 *addr, bool is_ap, struct ieee80211_sta *sta);
@@ -318,10 +333,6 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
int iwl_update_bcast_station(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
int iwl_update_bcast_stations(struct iwl_priv *priv);
-void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta);
/* rate */
static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -341,28 +352,11 @@ static inline __le32 iwl_hw_set_rate_n_flags(u8 rate, u32 flags)
/* eeprom */
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv);
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
-
-/* notification wait support */
-void __acquires(wait_entry)
-iwlagn_init_notification_wait(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- u8 cmd,
- void (*fn)(struct iwl_priv *priv,
- struct iwl_rx_packet *pkt,
- void *data),
- void *fn_data);
-int __must_check __releases(wait_entry)
-iwlagn_wait_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry,
- unsigned long timeout);
-void __releases(wait_entry)
-iwlagn_remove_notification(struct iwl_priv *priv,
- struct iwl_notification_wait *wait_entry);
-extern int iwlagn_init_alive_start(struct iwl_priv *priv);
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac);
+
extern int iwl_alive_start(struct iwl_priv *priv);
/* svtool */
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
extern int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data,
int len);
extern int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/iwlwifi/iwl-bus.h b/drivers/net/wireless/iwlwifi/iwl-bus.h
index 08b9759..940d503 100644
--- a/drivers/net/wireless/iwlwifi/iwl-bus.h
+++ b/drivers/net/wireless/iwlwifi/iwl-bus.h
@@ -122,7 +122,8 @@ struct iwl_bus;
* struct iwl_bus_ops - bus specific operations
* @get_pm_support: must returns true if the bus can go to sleep
* @apm_config: will be called during the config of the APM
- * @get_hw_id: prints the hw_id in the provided buffer
+ * @get_hw_id_string: prints the hw_id in the provided buffer
+ * @get_hw_id: get hw_id in u32
* @write8: write a byte to register at offset ofs
* @write32: write a dword to register at offset ofs
* @wread32: read a dword at register at offset ofs
@@ -130,7 +131,8 @@ struct iwl_bus;
struct iwl_bus_ops {
bool (*get_pm_support)(struct iwl_bus *bus);
void (*apm_config)(struct iwl_bus *bus);
- void (*get_hw_id)(struct iwl_bus *bus, char buf[], int buf_len);
+ void (*get_hw_id_string)(struct iwl_bus *bus, char buf[], int buf_len);
+ u32 (*get_hw_id)(struct iwl_bus *bus);
void (*write8)(struct iwl_bus *bus, u32 ofs, u8 val);
void (*write32)(struct iwl_bus *bus, u32 ofs, u32 val);
u32 (*read32)(struct iwl_bus *bus, u32 ofs);
@@ -172,9 +174,15 @@ static inline void bus_apm_config(struct iwl_bus *bus)
bus->ops->apm_config(bus);
}
-static inline void bus_get_hw_id(struct iwl_bus *bus, char buf[], int buf_len)
+static inline void bus_get_hw_id_string(struct iwl_bus *bus, char buf[],
+ int buf_len)
{
- bus->ops->get_hw_id(bus, buf, buf_len);
+ bus->ops->get_hw_id_string(bus, buf, buf_len);
+}
+
+static inline u32 bus_get_hw_id(struct iwl_bus *bus)
+{
+ return bus->ops->get_hw_id(bus);
}
static inline void bus_write8(struct iwl_bus *bus, u32 ofs, u8 val)
diff --git a/drivers/net/wireless/iwlwifi/iwl-cfg.h b/drivers/net/wireless/iwlwifi/iwl-cfg.h
index 2a2dc45..e1d7825 100644
--- a/drivers/net/wireless/iwlwifi/iwl-cfg.h
+++ b/drivers/net/wireless/iwlwifi/iwl-cfg.h
@@ -101,17 +101,11 @@ extern struct iwl_cfg iwl100_bg_cfg;
extern struct iwl_cfg iwl130_bgn_cfg;
extern struct iwl_cfg iwl130_bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_cfg;
-extern struct iwl_cfg iwl2000_2bg_cfg;
extern struct iwl_cfg iwl2000_2bgn_d_cfg;
extern struct iwl_cfg iwl2030_2bgn_cfg;
-extern struct iwl_cfg iwl2030_2bg_cfg;
extern struct iwl_cfg iwl6035_2agn_cfg;
-extern struct iwl_cfg iwl6035_2abg_cfg;
-extern struct iwl_cfg iwl6035_2bg_cfg;
-extern struct iwl_cfg iwl105_bg_cfg;
extern struct iwl_cfg iwl105_bgn_cfg;
extern struct iwl_cfg iwl105_bgn_d_cfg;
-extern struct iwl_cfg iwl135_bg_cfg;
extern struct iwl_cfg iwl135_bgn_cfg;
#endif /* __iwl_pci_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 69d5f85..f822ac4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,10 +109,10 @@ enum {
/* RX, TX, LEDs */
REPLY_TX = 0x1c,
REPLY_LEDS_CMD = 0x48,
- REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
+ REPLY_TX_LINK_QUALITY_CMD = 0x4e,
/* WiMAX coexistence */
- COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
+ COEX_PRIORITY_TABLE_CMD = 0x5a,
COEX_MEDIUM_NOTIFICATION = 0x5b,
COEX_EVENT_CMD = 0x5c,
@@ -198,6 +198,7 @@ enum {
REPLY_WOWLAN_TKIP_PARAMS = 0xe3,
REPLY_WOWLAN_KEK_KCK_MATERIAL = 0xe4,
REPLY_WOWLAN_GET_STATUS = 0xe5,
+ REPLY_D3_CONFIG = 0xd3,
REPLY_MAX = 0xff
};
@@ -465,23 +466,27 @@ struct iwl_error_event_table {
u32 frame_ptr; /* frame pointer */
u32 stack_ptr; /* stack pointer */
u32 hcmd; /* last host command header */
-#if 0
- /* no need to read the remainder, we don't use the values */
- u32 isr0; /* isr status register LMPM_NIC_ISR0: rxtx_flag */
- u32 isr1; /* isr status register LMPM_NIC_ISR1: host_flag */
- u32 isr2; /* isr status register LMPM_NIC_ISR2: enc_flag */
- u32 isr3; /* isr status register LMPM_NIC_ISR3: time_flag */
- u32 isr4; /* isr status register LMPM_NIC_ISR4: wico interrupt */
+ u32 isr0; /* isr status register LMPM_NIC_ISR0:
+ * rxtx_flag */
+ u32 isr1; /* isr status register LMPM_NIC_ISR1:
+ * host_flag */
+ u32 isr2; /* isr status register LMPM_NIC_ISR2:
+ * enc_flag */
+ u32 isr3; /* isr status register LMPM_NIC_ISR3:
+ * time_flag */
+ u32 isr4; /* isr status register LMPM_NIC_ISR4:
+ * wico interrupt */
u32 isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
u32 wait_event; /* wait event() caller address */
u32 l2p_control; /* L2pControlField */
u32 l2p_duration; /* L2pDurationField */
u32 l2p_mhvalid; /* L2pMhValidBits */
u32 l2p_addr_match; /* L2pAddrMatchStat */
- u32 lmpm_pmg_sel; /* indicate which clocks are turned on (LMPM_PMG_SEL) */
- u32 u_timestamp; /* indicate when the date and time of the compilation */
+ u32 lmpm_pmg_sel; /* indicate which clocks are turned on
+ * (LMPM_PMG_SEL) */
+ u32 u_timestamp; /* indicate when the date and time of the
+ * compilation */
u32 flow_handler; /* FH read/write pointers, RX credit */
-#endif
} __packed;
struct iwl_alive_resp {
@@ -809,7 +814,8 @@ struct iwl_qosparam_cmd {
#define IWLAGN_STATION_COUNT 16
#define IWL_INVALID_STATION 255
-#define IWL_MAX_TID_COUNT 9
+#define IWL_MAX_TID_COUNT 8
+#define IWL_TID_NON_QOS IWL_MAX_TID_COUNT
#define STA_FLG_TX_RATE_MSK cpu_to_le32(1 << 2)
#define STA_FLG_PWR_SAVE_MSK cpu_to_le32(1 << 8)
@@ -930,8 +936,7 @@ struct iwl_addsta_cmd {
* corresponding to bit (e.g. bit 5 controls TID 5).
* Set modify_mask bit STA_MODIFY_TID_DISABLE_TX to use this field. */
__le16 tid_disable_tx;
-
- __le16 rate_n_flags; /* 3945 only */
+ __le16 legacy_reserved;
/* TID for which to add block-ack support.
* Set modify_mask bit STA_MODIFY_ADDBA_TID_MSK to use this field. */
@@ -1161,8 +1166,7 @@ struct iwl_rx_mpdu_res_start {
*
* uCode handles retrying Tx when an ACK is expected but not received.
* This includes trying lower data rates than the one requested in the Tx
- * command, as set up by the REPLY_RATE_SCALE (for 3945) or
- * REPLY_TX_LINK_QUALITY_CMD (agn).
+ * command, as set up by the REPLY_TX_LINK_QUALITY_CMD (agn).
*
* Driver sets up transmit power for various rates via REPLY_TX_PWR_TABLE_CMD.
* This command must be executed after every RXON command, before Tx can occur.
@@ -1174,25 +1178,9 @@ struct iwl_rx_mpdu_res_start {
* 1: Use RTS/CTS protocol or CTS-to-self if spec allows it
* before this frame. if CTS-to-self required check
* RXON_FLG_SELF_CTS_EN status.
- * unused in 3945/4965, used in 5000 series and after
*/
#define TX_CMD_FLG_PROT_REQUIRE_MSK cpu_to_le32(1 << 0)
-/*
- * 1: Use Request-To-Send protocol before this frame.
- * Mutually exclusive vs. TX_CMD_FLG_CTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_RTS_MSK cpu_to_le32(1 << 1)
-
-/*
- * 1: Transmit Clear-To-Send to self before this frame.
- * Driver should set this for AUTH/DEAUTH/ASSOC-REQ/REASSOC mgmnt frames.
- * Mutually exclusive vs. TX_CMD_FLG_RTS_MSK.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_CTS_MSK cpu_to_le32(1 << 2)
-
/* 1: Expect ACK from receiving station
* 0: Don't expect ACK (MAC header's duration field s/b 0)
* Set this for unicast frames, but not broadcast/multicast. */
@@ -1210,18 +1198,8 @@ struct iwl_rx_mpdu_res_start {
* Set when Txing a block-ack request frame. Also set TX_CMD_FLG_ACK_MSK. */
#define TX_CMD_FLG_IMM_BA_RSP_MASK cpu_to_le32(1 << 6)
-/*
- * 1: Frame requires full Tx-Op protection.
- * Set this if either RTS or CTS Tx Flag gets set.
- * used in 3945/4965, unused in 5000 series and after
- */
-#define TX_CMD_FLG_FULL_TXOP_PROT_MSK cpu_to_le32(1 << 7)
-
-/* Tx antenna selection field; used only for 3945, reserved (0) for agn devices.
- * Set field to "0" to allow 3945 uCode to select antenna (normal usage). */
+/* Tx antenna selection field; reserved (0) for agn devices. */
#define TX_CMD_FLG_ANT_SEL_MSK cpu_to_le32(0xf00)
-#define TX_CMD_FLG_ANT_A_MSK cpu_to_le32(1 << 8)
-#define TX_CMD_FLG_ANT_B_MSK cpu_to_le32(1 << 9)
/* 1: Ignore Bluetooth priority for this frame.
* 0: Delay Tx until Bluetooth device is done (normal usage). */
@@ -1567,7 +1545,6 @@ struct iwl_compressed_ba_resp {
__le64 bitmap;
__le16 scd_flow;
__le16 scd_ssn;
- /* following only for 5000 series and up */
u8 txed; /* number of frames sent */
u8 txed_2_done; /* number of frames acked */
} __packed;
@@ -1669,7 +1646,7 @@ struct iwl_link_qual_agg_params {
/*
* REPLY_TX_LINK_QUALITY_CMD = 0x4e (command, has simple generic response)
*
- * For agn devices only; 3945 uses REPLY_RATE_SCALE.
+ * For agn devices
*
* Each station in the agn device's internal station table has its own table
* of 16
@@ -1918,7 +1895,7 @@ struct iwl_link_quality_cmd {
/*
* REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
*
- * 3945 and agn devices support hardware handshake with Bluetooth device on
+ * agn devices support hardware handshake with Bluetooth device on
* same platform. Bluetooth device alerts wireless device when it will Tx;
* wireless device can delay or kill its own Tx to accommodate.
*/
@@ -2202,8 +2179,8 @@ struct iwl_spectrum_notification {
struct iwl_powertable_cmd {
__le16 flags;
- u8 keep_alive_seconds; /* 3945 reserved */
- u8 debug_flags; /* 3945 reserved */
+ u8 keep_alive_seconds;
+ u8 debug_flags;
__le32 rx_data_timeout;
__le32 tx_data_timeout;
__le32 sleep_interval[IWL_POWER_VEC_SIZE];
@@ -2324,9 +2301,9 @@ struct iwl_scan_channel {
/**
* struct iwl_ssid_ie - directed scan network information element
*
- * Up to 20 of these may appear in REPLY_SCAN_CMD (Note: Only 4 are in
- * 3945 SCAN api), selected by "type" bit field in struct iwl_scan_channel;
- * each channel may select different ssids from among the 20 (4) entries.
+ * Up to 20 of these may appear in REPLY_SCAN_CMD,
+ * selected by "type" bit field in struct iwl_scan_channel;
+ * each channel may select different ssids from among the 20 entries.
* SSID IEs get transmitted in reverse order of entry.
*/
struct iwl_ssid_ie {
@@ -2335,7 +2312,6 @@ struct iwl_ssid_ie {
u8 ssid[32];
} __packed;
-#define PROBE_OPTION_MAX_3945 4
#define PROBE_OPTION_MAX 20
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
#define IWL_GOOD_CRC_TH_DISABLED 0
@@ -2416,8 +2392,6 @@ struct iwl_scan_cmd {
* channel */
__le32 suspend_time; /* pause scan this long (in "extended beacon
* format") when returning to service chnl:
- * 3945; 31:24 # beacons, 19:0 additional usec,
- * 4965; 31:22 # beacons, 21:0 additional usec.
*/
__le32 flags; /* RXON_FLG_* */
__le32 filter_flags; /* RXON_FILTER_* */
@@ -2733,7 +2707,7 @@ struct statistics_div {
struct statistics_general_common {
__le32 temperature; /* radio temperature */
- __le32 temperature_m; /* for 5000 and up, this is radio voltage */
+ __le32 temperature_m; /* radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
@@ -3801,6 +3775,19 @@ struct iwl_bt_coex_prot_env_cmd {
} __attribute__((packed));
/*
+ * REPLY_D3_CONFIG
+ */
+enum iwlagn_d3_wakeup_filters {
+ IWLAGN_D3_WAKEUP_RFKILL = BIT(0),
+ IWLAGN_D3_WAKEUP_SYSASSERT = BIT(1),
+};
+
+struct iwlagn_d3_config_cmd {
+ __le32 min_sleep_time;
+ __le32 wakeup_flags;
+} __packed;
+
+/*
* REPLY_WOWLAN_PATTERNS
*/
#define IWLAGN_WOWLAN_MIN_PATTERN_LEN 16
@@ -3830,19 +3817,16 @@ enum iwlagn_wowlan_wakeup_filters {
IWLAGN_WOWLAN_WAKEUP_BEACON_MISS = BIT(2),
IWLAGN_WOWLAN_WAKEUP_LINK_CHANGE = BIT(3),
IWLAGN_WOWLAN_WAKEUP_GTK_REKEY_FAIL = BIT(4),
- IWLAGN_WOWLAN_WAKEUP_RFKILL = BIT(5),
- IWLAGN_WOWLAN_WAKEUP_UCODE_ERROR = BIT(6),
- IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(7),
- IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(8),
- IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(9),
- IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(10),
+ IWLAGN_WOWLAN_WAKEUP_EAP_IDENT_REQ = BIT(5),
+ IWLAGN_WOWLAN_WAKEUP_4WAY_HANDSHAKE = BIT(6),
+ IWLAGN_WOWLAN_WAKEUP_ALWAYS = BIT(7),
+ IWLAGN_WOWLAN_WAKEUP_ENABLE_NET_DETECT = BIT(8),
};
struct iwlagn_wowlan_wakeup_filter_cmd {
__le32 enabled;
__le16 non_qos_seq;
- u8 min_sleep_seconds;
- u8 reserved;
+ __le16 reserved;
__le16 qos_seq[8];
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index fcf5416..7bcfa78 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -60,8 +60,8 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->ht_supported = true;
- if (priv->cfg->ht_params &&
- priv->cfg->ht_params->ht_greenfield_support)
+ if (cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
max_bit_rate = MAX_BIT_RATE_20_MHZ;
@@ -76,11 +76,7 @@ static void iwl_init_ht_hw_capab(const struct iwl_priv *priv,
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
ht_info->ampdu_factor = CFG_HT_RX_AMPDU_FACTOR_DEF;
- if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_factor)
- ht_info->ampdu_factor = priv->cfg->bt_params->ampdu_factor;
ht_info->ampdu_density = CFG_HT_MPDU_DENSITY_DEF;
- if (priv->cfg->bt_params && priv->cfg->bt_params->ampdu_density)
- ht_info->ampdu_density = priv->cfg->bt_params->ampdu_density;
ht_info->mcs.rx_mask[0] = 0xFF;
if (rx_chains_num >= 2)
@@ -141,7 +137,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = &rates[IWL_FIRST_OFDM_RATE];
sband->n_bitrates = IWL_RATE_COUNT_LEGACY - IWL_FIRST_OFDM_RATE;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_5GHZ);
@@ -151,7 +147,7 @@ int iwl_init_geos(struct iwl_priv *priv)
sband->bitrates = rates;
sband->n_bitrates = IWL_RATE_COUNT_LEGACY;
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
iwl_init_ht_hw_capab(priv, &sband->ht_cap,
IEEE80211_BAND_2GHZ);
@@ -206,12 +202,12 @@ int iwl_init_geos(struct iwl_priv *priv)
priv->tx_power_next = max_tx_power;
if ((priv->bands[IEEE80211_BAND_5GHZ].n_channels == 0) &&
- priv->cfg->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
+ cfg(priv)->sku & EEPROM_SKU_CAP_BAND_52GHZ) {
char buf[32];
- bus_get_hw_id(bus(priv), buf, sizeof(buf));
+ bus_get_hw_id_string(bus(priv), buf, sizeof(buf));
IWL_INFO(priv, "Incorrectly detected BG card as ABG. "
"Please send your %s to maintainer.\n", buf);
- priv->cfg->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
+ cfg(priv)->sku &= ~EEPROM_SKU_CAP_BAND_52GHZ;
}
IWL_INFO(priv, "Tunable channels: %d 802.11bg, %d 802.11a channels\n",
@@ -836,19 +832,6 @@ void iwl_print_rx_config_cmd(struct iwl_priv *priv,
}
#endif
-static void iwlagn_abort_notification_waits(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_notification_wait *wait_entry;
-
- spin_lock_irqsave(&priv->notif_wait_lock, flags);
- list_for_each_entry(wait_entry, &priv->notif_waits, list)
- wait_entry->aborted = true;
- spin_unlock_irqrestore(&priv->notif_wait_lock, flags);
-
- wake_up_all(&priv->notif_waitq);
-}
-
void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
{
unsigned int reload_msec;
@@ -860,7 +843,7 @@ void iwlagn_fw_error(struct iwl_priv *priv, bool ondemand)
/* Cancel currently queued command. */
clear_bit(STATUS_HCMD_ACTIVE, &priv->shrd->status);
- iwlagn_abort_notification_waits(priv);
+ iwl_abort_notification_waits(priv->shrd);
/* Keep the restart process from trying to send host
* commands by clearing the ready bit */
@@ -979,9 +962,9 @@ int iwl_apm_init(struct iwl_priv *priv)
bus_apm_config(bus(priv));
/* Configure analog phase-lock-loop before activating to D0A */
- if (priv->cfg->base_params->pll_cfg_val)
+ if (cfg(priv)->base_params->pll_cfg_val)
iwl_set_bit(bus(priv), CSR_ANA_PLL_CFG,
- priv->cfg->base_params->pll_cfg_val);
+ cfg(priv)->base_params->pll_cfg_val);
/*
* Set "initialization complete" bit to move adapter from
@@ -1120,229 +1103,8 @@ int iwl_send_statistics_request(struct iwl_priv *priv, u8 flags, bool clear)
&statistics_cmd);
}
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx;
- unsigned long flags;
- int q;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
- if (!iwl_is_ready_rf(priv->shrd)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
- if (queue >= AC_NUM) {
- IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
- return 0;
- }
-
- q = AC_NUM - 1 - queue;
-
- spin_lock_irqsave(&priv->shrd->lock, flags);
-
- /*
- * MULTI-FIXME
- * This may need to be done per interface in nl80211/cfg80211/mac80211.
- */
- for_each_context(priv, ctx) {
- ctx->qos_data.def_qos_parm.ac[q].cw_min =
- cpu_to_le16(params->cw_min);
- ctx->qos_data.def_qos_parm.ac[q].cw_max =
- cpu_to_le16(params->cw_max);
- ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
- ctx->qos_data.def_qos_parm.ac[q].edca_txop =
- cpu_to_le16((params->txop * 32));
-
- ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
- }
-
- spin_unlock_irqrestore(&priv->shrd->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
-}
-
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
-{
- struct iwl_priv *priv = hw->priv;
-
- return priv->ibss_manager == IWL_IBSS_MANAGER;
-}
-
-static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
-{
- iwl_connection_init_rx_config(priv, ctx);
-
- iwlagn_set_rxon_chain(priv, ctx);
-
- return iwlagn_commit_rxon(priv, ctx);
-}
-
-static int iwl_setup_interface(struct iwl_priv *priv,
- struct iwl_rxon_context *ctx)
-{
- struct ieee80211_vif *vif = ctx->vif;
- int err;
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- /*
- * This variable will be correct only when there's just
- * a single context, but all code using it is for hardware
- * that supports only one context.
- */
- priv->iw_mode = vif->type;
-
- ctx->is_active = true;
-
- err = iwl_set_mode(priv, ctx);
- if (err) {
- if (!ctx->always_active)
- ctx->is_active = false;
- return err;
- }
-
- if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
- vif->type == NL80211_IFTYPE_ADHOC) {
- /*
- * pretend to have high BT traffic as long as we
- * are operating in IBSS mode, as this will cause
- * the rate scaling etc. to behave as intended.
- */
- priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
- }
-
- return 0;
-}
-
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
- struct iwl_rxon_context *tmp, *ctx = NULL;
- int err;
- enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
- viftype, vif->addr);
-
- cancel_delayed_work_sync(&priv->hw_roc_disable_work);
-
- mutex_lock(&priv->shrd->mutex);
-
- iwlagn_disable_roc(priv);
-
- if (!iwl_is_ready_rf(priv->shrd)) {
- IWL_WARN(priv, "Try to add interface when device not ready\n");
- err = -EINVAL;
- goto out;
- }
-
- for_each_context(priv, tmp) {
- u32 possible_modes =
- tmp->interface_modes | tmp->exclusive_interface_modes;
-
- if (tmp->vif) {
- /* check if this busy context is exclusive */
- if (tmp->exclusive_interface_modes &
- BIT(tmp->vif->type)) {
- err = -EINVAL;
- goto out;
- }
- continue;
- }
-
- if (!(possible_modes & BIT(viftype)))
- continue;
-
- /* have maybe usable context w/o interface */
- ctx = tmp;
- break;
- }
-
- if (!ctx) {
- err = -EOPNOTSUPP;
- goto out;
- }
-
- vif_priv->ctx = ctx;
- ctx->vif = vif;
-
- err = iwl_setup_interface(priv, ctx);
- if (!err)
- goto out;
-
- ctx->vif = NULL;
- priv->iw_mode = NL80211_IFTYPE_STATION;
- out:
- mutex_unlock(&priv->shrd->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
- return err;
-}
-
-static void iwl_teardown_interface(struct iwl_priv *priv,
- struct ieee80211_vif *vif,
- bool mode_change)
-{
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- lockdep_assert_held(&priv->shrd->mutex);
-
- if (priv->scan_vif == vif) {
- iwl_scan_cancel_timeout(priv, 200);
- iwl_force_scan_end(priv);
- }
-
- if (!mode_change) {
- iwl_set_mode(priv, ctx);
- if (!ctx->always_active)
- ctx->is_active = false;
- }
-
- /*
- * When removing the IBSS interface, overwrite the
- * BT traffic load with the stored one from the last
- * notification, if any. If this is a device that
- * doesn't implement this, this has no effect since
- * both values are the same and zero.
- */
- if (vif->type == NL80211_IFTYPE_ADHOC)
- priv->bt_traffic_load = priv->last_bt_traffic_load;
-}
-
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- mutex_lock(&priv->shrd->mutex);
-
- if (WARN_ON(ctx->vif != vif)) {
- struct iwl_rxon_context *tmp;
- IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
- for_each_context(priv, tmp)
- IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
- tmp->ctxid, tmp, tmp->vif);
- }
- ctx->vif = NULL;
-
- iwl_teardown_interface(priv, vif, false);
-
- mutex_unlock(&priv->shrd->mutex);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
-}
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -1649,97 +1411,13 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
return 0;
}
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p)
-{
- struct iwl_priv *priv = hw->priv;
- struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
- struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
- struct iwl_rxon_context *tmp;
- enum nl80211_iftype newviftype = newtype;
- u32 interface_modes;
- int err;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- newtype = ieee80211_iftype_p2p(newtype, newp2p);
-
- mutex_lock(&priv->shrd->mutex);
-
- if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
- /*
- * Huh? But wait ... this can maybe happen when
- * we're in the middle of a firmware restart!
- */
- err = -EBUSY;
- goto out;
- }
-
- interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
-
- if (!(interface_modes & BIT(newtype))) {
- err = -EBUSY;
- goto out;
- }
-
- /*
- * Refuse a change that should be done by moving from the PAN
- * context to the BSS context instead, if the BSS context is
- * available and can support the new interface type.
- */
- if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
- (bss_ctx->interface_modes & BIT(newtype) ||
- bss_ctx->exclusive_interface_modes & BIT(newtype))) {
- BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
- err = -EBUSY;
- goto out;
- }
-
- if (ctx->exclusive_interface_modes & BIT(newtype)) {
- for_each_context(priv, tmp) {
- if (ctx == tmp)
- continue;
-
- if (!tmp->vif)
- continue;
-
- /*
- * The current mode switch would be exclusive, but
- * another context is active ... refuse the switch.
- */
- err = -EBUSY;
- goto out;
- }
- }
-
- /* success */
- iwl_teardown_interface(priv, vif, true);
- vif->type = newviftype;
- vif->p2p = newp2p;
- err = iwl_setup_interface(priv, ctx);
- WARN_ON(err);
- /*
- * We've switched internally, but submitting to the
- * device may have failed for some reason. Mask this
- * error, because otherwise mac80211 will not switch
- * (and set the interface type back) and we'll be
- * out of sync with it.
- */
- err = 0;
-
- out:
- mutex_unlock(&priv->shrd->mutex);
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return err;
-}
int iwl_cmd_echo_test(struct iwl_priv *priv)
{
int ret;
struct iwl_host_cmd cmd = {
.id = REPLY_ECHO,
+ .len = { 0 },
.flags = CMD_SYNC,
};
@@ -1783,7 +1461,7 @@ void iwl_bg_watchdog(unsigned long data)
if (iwl_is_rfkill(priv->shrd))
return;
- timeout = priv->cfg->base_params->wd_timeout;
+ timeout = cfg(priv)->base_params->wd_timeout;
if (timeout == 0)
return;
@@ -1808,11 +1486,11 @@ void iwl_bg_watchdog(unsigned long data)
void iwl_setup_watchdog(struct iwl_priv *priv)
{
- unsigned int timeout = priv->cfg->base_params->wd_timeout;
+ unsigned int timeout = cfg(priv)->base_params->wd_timeout;
if (!iwlagn_mod_params.wd_disable) {
/* use system default */
- if (timeout && !priv->cfg->base_params->wd_disable)
+ if (timeout && !cfg(priv)->base_params->wd_disable)
mod_timer(&priv->watchdog,
jiffies +
msecs_to_jiffies(IWL_WD_TICK(timeout)));
@@ -1902,34 +1580,6 @@ __le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
return cpu_to_le32(res);
}
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid)
-{
- struct ieee80211_vif *vif;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
- if (ctx == NUM_IWL_RXON_CTX)
- ctx = priv->stations[sta_id].ctxid;
- vif = priv->contexts[ctx].vif;
-
- ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid)
-{
- struct ieee80211_vif *vif;
- u8 *addr = priv->stations[sta_id].sta.sta.addr;
-
- if (ctx == NUM_IWL_RXON_CTX)
- ctx = priv->stations[sta_id].ctxid;
- vif = priv->contexts[ctx].vif;
-
- ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
-}
-
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
{
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
@@ -1937,8 +1587,7 @@ void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state)
void iwl_nic_config(struct iwl_priv *priv)
{
- priv->cfg->lib->nic_config(priv);
-
+ cfg(priv)->lib->nic_config(priv);
}
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index f2fc288..7bf76ab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -142,8 +142,6 @@ struct iwl_base_params {
* @bt_init_traffic_load: specify initial bt traffic load
* @bt_prio_boost: default bt priority boost value
* @agg_time_limit: maximum number of uSec in aggregation
- * @ampdu_factor: Maximum A-MPDU length factor
- * @ampdu_density: Minimum A-MPDU spacing
* @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
*/
struct iwl_bt_params {
@@ -151,8 +149,6 @@ struct iwl_bt_params {
u8 bt_init_traffic_load;
u8 bt_prio_boost;
u16 agg_time_limit;
- u8 ampdu_factor;
- u8 ampdu_density;
bool bt_sco_disable;
bool bt_session_2;
};
@@ -165,84 +161,10 @@ struct iwl_ht_params {
enum ieee80211_smps_mode smps_mode;
};
-/**
- * struct iwl_cfg
- * @name: Offical name of the device
- * @fw_name_pre: Firmware filename prefix. The api version and extension
- * (.ucode) will be added to filename before loading from disk. The
- * filename is constructed as fw_name_pre<api>.ucode.
- * @ucode_api_max: Highest version of uCode API supported by driver.
- * @ucode_api_ok: oldest version of the uCode API that is OK to load
- * without a warning, for use in transitions
- * @ucode_api_min: Lowest version of uCode API supported by driver.
- * @valid_tx_ant: valid transmit antenna
- * @valid_rx_ant: valid receive antenna
- * @sku: sku information from EEPROM
- * @eeprom_ver: EEPROM version
- * @eeprom_calib_ver: EEPROM calibration version
- * @lib: pointer to the lib ops
- * @additional_nic_config: additional nic configuration
- * @base_params: pointer to basic parameters
- * @ht_params: point to ht patameters
- * @bt_params: pointer to bt parameters
- * @pa_type: used by 6000 series only to identify the type of Power Amplifier
- * @need_dc_calib: need to perform init dc calibration
- * @need_temp_offset_calib: need to perform temperature offset calibration
- * @scan_antennas: available antenna for scan operation
- * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
- * @adv_pm: advance power management
- * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
- * @internal_wimax_coex: internal wifi/wimax combo device
- * @iq_invert: I/Q inversion
- * @temp_offset_v2: support v2 of temperature offset calibration
- *
- * We enable the driver to be backward compatible wrt API version. The
- * driver specifies which APIs it supports (with @ucode_api_max being the
- * highest and @ucode_api_min the lowest). Firmware will only be loaded if
- * it has a supported API version.
- *
- * The ideal usage of this infrastructure is to treat a new ucode API
- * release as a new hardware revision.
- */
-struct iwl_cfg {
- /* params specific to an individual device within a device family */
- const char *name;
- const char *fw_name_pre;
- const unsigned int ucode_api_max;
- const unsigned int ucode_api_ok;
- const unsigned int ucode_api_min;
- u8 valid_tx_ant;
- u8 valid_rx_ant;
- u16 sku;
- u16 eeprom_ver;
- u16 eeprom_calib_ver;
- const struct iwl_lib_ops *lib;
- void (*additional_nic_config)(struct iwl_priv *priv);
- /* params not likely to change within a device family */
- struct iwl_base_params *base_params;
- /* params likely to change within a device family */
- struct iwl_ht_params *ht_params;
- struct iwl_bt_params *bt_params;
- enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
- const bool need_dc_calib; /* if used set to true */
- const bool need_temp_offset_calib; /* if used set to true */
- u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
- enum iwl_led_mode led_mode;
- const bool adv_pm;
- const bool rx_with_siso_diversity;
- const bool internal_wimax_coex;
- const bool iq_invert;
- const bool temp_offset_v2;
-};
-
/***************************
* L i b *
***************************/
-int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, u16 queue,
- const struct ieee80211_tx_queue_params *params);
-int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw);
void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
int hw_decrypt);
int iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
@@ -262,13 +184,6 @@ bool iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
void iwl_connection_init_rx_config(struct iwl_priv *priv,
struct iwl_rxon_context *ctx);
void iwl_set_rate(struct iwl_priv *priv);
-int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
-int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum nl80211_iftype newtype, bool newp2p);
int iwl_cmd_echo_test(struct iwl_priv *priv);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_alloc_traffic_mem(struct iwl_priv *priv);
@@ -325,9 +240,6 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
void iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
void iwl_force_scan_end(struct iwl_priv *priv);
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req);
void iwl_internal_short_hw_scan(struct iwl_priv *priv);
int iwl_force_reset(struct iwl_priv *priv, int mode, bool external);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
@@ -381,8 +293,8 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
static inline bool iwl_advanced_bt_coexist(struct iwl_priv *priv)
{
- return priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist;
+ return cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist;
}
static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index b9f3267..fbc3095 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -284,8 +284,8 @@
#define CSR_HW_REV_TYPE_6x35 CSR_HW_REV_TYPE_6x05
#define CSR_HW_REV_TYPE_2x30 (0x00000C0)
#define CSR_HW_REV_TYPE_2x00 (0x0000100)
-#define CSR_HW_REV_TYPE_200 (0x0000110)
-#define CSR_HW_REV_TYPE_230 (0x0000120)
+#define CSR_HW_REV_TYPE_105 (0x0000110)
+#define CSR_HW_REV_TYPE_135 (0x0000120)
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 69a77e2..f8fc239 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -47,20 +47,21 @@ do { \
} while (0)
#ifdef CONFIG_IWLWIFI_DEBUG
-#define IWL_DEBUG(m, level, fmt, args...) \
+#define IWL_DEBUG(m, level, fmt, ...) \
do { \
if (iwl_get_debug_level((m)->shrd) & (level)) \
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
-#define IWL_DEBUG_LIMIT(m, level, fmt, args...) \
+#define IWL_DEBUG_LIMIT(m, level, fmt, ...) \
do { \
- if (iwl_get_debug_level((m)->shrd) & (level) && net_ratelimit())\
- dev_printk(KERN_ERR, bus(m)->dev, \
- "%c %s " fmt, in_interrupt() ? 'I' : 'U', \
- __func__ , ## args); \
+ if (iwl_get_debug_level((m)->shrd) & (level) && \
+ net_ratelimit()) \
+ dev_err(bus(m)->dev, "%c %s " fmt, \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
} while (0)
#define iwl_print_hex_dump(m, level, p, len) \
@@ -70,10 +71,29 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, ...) \
+do { \
+ if (!iwl_is_rfkill(p->shrd)) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
+ else if (iwl_get_debug_level(p->shrd) & IWL_DL_RADIO) \
+ dev_err(bus(p)->dev, "%s%c %s " fmt, \
+ "(RFKILL) ", \
+ in_interrupt() ? 'I' : 'U', __func__, \
+ ##__VA_ARGS__); \
+} while (0)
+
#else
#define IWL_DEBUG(m, level, fmt, args...)
#define IWL_DEBUG_LIMIT(m, level, fmt, args...)
#define iwl_print_hex_dump(m, level, p, len)
+#define IWL_DEBUG_QUIET_RFKILL(p, fmt, args...) \
+do { \
+ if (!iwl_is_rfkill(p->shrd)) \
+ IWL_ERR(p, fmt, ##args); \
+} while (0)
#endif /* CONFIG_IWLWIFI_DEBUG */
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -114,48 +134,43 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
*/
/* 0x0000000F - 0x00000001 */
-#define IWL_DL_INFO (1 << 0)
-#define IWL_DL_MAC80211 (1 << 1)
-#define IWL_DL_HCMD (1 << 2)
-#define IWL_DL_STATE (1 << 3)
+#define IWL_DL_INFO 0x00000001
+#define IWL_DL_MAC80211 0x00000002
+#define IWL_DL_HCMD 0x00000004
+#define IWL_DL_STATE 0x00000008
/* 0x000000F0 - 0x00000010 */
-#define IWL_DL_MACDUMP (1 << 4)
-#define IWL_DL_HCMD_DUMP (1 << 5)
-#define IWL_DL_EEPROM (1 << 6)
-#define IWL_DL_RADIO (1 << 7)
+#define IWL_DL_EEPROM 0x00000040
+#define IWL_DL_RADIO 0x00000080
/* 0x00000F00 - 0x00000100 */
-#define IWL_DL_POWER (1 << 8)
-#define IWL_DL_TEMP (1 << 9)
-/* reserved (1 << 10) */
-#define IWL_DL_SCAN (1 << 11)
+#define IWL_DL_POWER 0x00000100
+#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
-#define IWL_DL_ASSOC (1 << 12)
-#define IWL_DL_DROP (1 << 13)
-/* reserved (1 << 14) */
-#define IWL_DL_COEX (1 << 15)
+#define IWL_DL_ASSOC 0x00001000
+#define IWL_DL_DROP 0x00002000
+#define IWL_DL_COEX 0x00008000
/* 0x000F0000 - 0x00010000 */
-#define IWL_DL_FW (1 << 16)
-#define IWL_DL_RF_KILL (1 << 17)
-#define IWL_DL_FW_ERRORS (1 << 18)
-#define IWL_DL_LED (1 << 19)
+#define IWL_DL_FW 0x00010000
+#define IWL_DL_RF_KILL 0x00020000
+#define IWL_DL_FW_ERRORS 0x00040000
+#define IWL_DL_LED 0x00080000
/* 0x00F00000 - 0x00100000 */
-#define IWL_DL_RATE (1 << 20)
-#define IWL_DL_CALIB (1 << 21)
-#define IWL_DL_WEP (1 << 22)
-#define IWL_DL_TX (1 << 23)
+#define IWL_DL_RATE 0x00100000
+#define IWL_DL_CALIB 0x00200000
+#define IWL_DL_WEP 0x00400000
+#define IWL_DL_TX 0x00800000
/* 0x0F000000 - 0x01000000 */
-#define IWL_DL_RX (1 << 24)
-#define IWL_DL_ISR (1 << 25)
-#define IWL_DL_HT (1 << 26)
+#define IWL_DL_RX 0x01000000
+#define IWL_DL_ISR 0x02000000
+#define IWL_DL_HT 0x04000000
/* 0xF0000000 - 0x10000000 */
-#define IWL_DL_11H (1 << 28)
-#define IWL_DL_STATS (1 << 29)
-#define IWL_DL_TX_REPLY (1 << 30)
-#define IWL_DL_QOS (1 << 31)
+#define IWL_DL_11H 0x10000000
+#define IWL_DL_STATS 0x20000000
+#define IWL_DL_TX_REPLY 0x40000000
+#define IWL_DL_TX_QUEUES 0x80000000
#define IWL_DEBUG_INFO(p, f, a...) IWL_DEBUG(p, IWL_DL_INFO, f, ## a)
#define IWL_DEBUG_MAC80211(p, f, a...) IWL_DEBUG(p, IWL_DL_MAC80211, f, ## a)
-#define IWL_DEBUG_MACDUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_MACDUMP, f, ## a)
#define IWL_DEBUG_TEMP(p, f, a...) IWL_DEBUG(p, IWL_DL_TEMP, f, ## a)
#define IWL_DEBUG_SCAN(p, f, a...) IWL_DEBUG(p, IWL_DL_SCAN, f, ## a)
#define IWL_DEBUG_RX(p, f, a...) IWL_DEBUG(p, IWL_DL_RX, f, ## a)
@@ -164,7 +179,6 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_LED(p, f, a...) IWL_DEBUG(p, IWL_DL_LED, f, ## a)
#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
-#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
@@ -186,9 +200,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
#define IWL_DEBUG_STATS_LIMIT(p, f, a...) \
IWL_DEBUG_LIMIT(p, IWL_DL_STATS, f, ## a)
#define IWL_DEBUG_TX_REPLY(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_TX_REPLY_LIMIT(p, f, a...) \
- IWL_DEBUG_LIMIT(p, IWL_DL_TX_REPLY, f, ## a)
-#define IWL_DEBUG_QOS(p, f, a...) IWL_DEBUG(p, IWL_DL_QOS, f, ## a)
+#define IWL_DEBUG_TX_QUEUES(p, f, a...) IWL_DEBUG(p, IWL_DL_TX_QUEUES, f, ## a)
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index a1670e3..04a3343 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -234,11 +234,12 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
/* default is to dump the entire data segment */
if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ struct iwl_trans *trans = trans(priv);
priv->dbgfs_sram_offset = 0x800000;
- if (priv->ucode_type == IWL_UCODE_INIT)
- priv->dbgfs_sram_len = priv->ucode_init.data.len;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT)
+ priv->dbgfs_sram_len = trans->ucode_init.data.len;
else
- priv->dbgfs_sram_len = priv->ucode_rt.data.len;
+ priv->dbgfs_sram_len = trans->ucode_rt.data.len;
}
len = priv->dbgfs_sram_len;
@@ -341,7 +342,7 @@ static ssize_t iwl_dbgfs_wowlan_sram_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos,
priv->wowlan_sram,
- priv->ucode_wowlan.data.len);
+ trans(priv)->ucode_wowlan.data.len);
}
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
@@ -371,15 +372,13 @@ static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
i, station->sta.sta.addr,
station->sta.station_flags_msk);
pos += scnprintf(buf + pos, bufsz - pos,
- "TID\tseq_num\ttxq_id\ttfds\trate_n_flags\n");
+ "TID\tseq_num\trate_n_flags\n");
for (j = 0; j < IWL_MAX_TID_COUNT; j++) {
- tid_data = &priv->shrd->tid_data[i][j];
+ tid_data = &priv->tid_data[i][j];
pos += scnprintf(buf + pos, bufsz - pos,
- "%d:\t%#x\t%#x\t%u\t%#x",
+ "%d:\t%#x\t%#x",
j, tid_data->seq_number,
- tid_data->agg.txq_id,
- tid_data->tfds_in_queue,
tid_data->agg.rate_n_flags);
if (tid_data->agg.wait_for_ba)
@@ -407,7 +406,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
const u8 *ptr;
char *buf;
u16 eeprom_ver;
- size_t eeprom_len = priv->cfg->base_params->eeprom_size;
+ size_t eeprom_len = cfg(priv)->base_params->eeprom_size;
buf_size = 4 * eeprom_len + 256;
if (eeprom_len % 16) {
@@ -415,7 +414,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return -ENODATA;
}
- ptr = priv->eeprom;
+ ptr = priv->shrd->eeprom;
if (!ptr) {
IWL_ERR(priv, "Invalid EEPROM/OTP memory\n");
return -ENOMEM;
@@ -427,10 +426,10 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
IWL_ERR(priv, "Can not allocate Buffer\n");
return -ENOMEM;
}
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
"version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", eeprom_ver);
for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
@@ -1541,15 +1540,15 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
if (tx->tx_power.ant_a || tx->tx_power.ant_b || tx->tx_power.ant_c) {
pos += scnprintf(buf + pos, bufsz - pos,
"tx power: (1/2 dB step)\n");
- if ((priv->cfg->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
+ if ((cfg(priv)->valid_tx_ant & ANT_A) && tx->tx_power.ant_a)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna A:",
tx->tx_power.ant_a);
- if ((priv->cfg->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
+ if ((cfg(priv)->valid_tx_ant & ANT_B) && tx->tx_power.ant_b)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna B:",
tx->tx_power.ant_b);
- if ((priv->cfg->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
+ if ((cfg(priv)->valid_tx_ant & ANT_C) && tx->tx_power.ant_c)
pos += scnprintf(buf + pos, bufsz - pos,
fmt_hex, "antenna C:",
tx->tx_power.ant_c);
@@ -2220,7 +2219,7 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
- priv->cfg->base_params->plcp_delta_threshold);
+ cfg(priv)->base_params->plcp_delta_threshold);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -2242,10 +2241,10 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
return -EINVAL;
if ((plcp < IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
(plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
- priv->cfg->base_params->plcp_delta_threshold =
+ cfg(priv)->base_params->plcp_delta_threshold =
IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE;
else
- priv->cfg->base_params->plcp_delta_threshold = plcp;
+ cfg(priv)->base_params->plcp_delta_threshold = plcp;
return count;
}
@@ -2347,7 +2346,7 @@ static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
timeout = IWL_DEF_WD_TIMEOUT;
- priv->cfg->base_params->wd_timeout = timeout;
+ cfg(priv)->base_params->wd_timeout = timeout;
iwl_setup_watchdog(priv);
return count;
}
@@ -2407,10 +2406,10 @@ static ssize_t iwl_dbgfs_protection_mode_read(struct file *file,
char buf[40];
const size_t bufsz = sizeof(buf);
- if (priv->cfg->ht_params)
+ if (cfg(priv)->ht_params)
pos += scnprintf(buf + pos, bufsz - pos,
"use %s for aggregation\n",
- (priv->cfg->ht_params->use_rts_for_aggregation) ?
+ (cfg(priv)->ht_params->use_rts_for_aggregation) ?
"rts/cts" : "cts-to-self");
else
pos += scnprintf(buf + pos, bufsz - pos, "N/A");
@@ -2427,7 +2426,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
int buf_size;
int rts;
- if (!priv->cfg->ht_params)
+ if (!cfg(priv)->ht_params)
return -EINVAL;
memset(buf, 0, sizeof(buf));
@@ -2437,9 +2436,9 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
if (sscanf(buf, "%d", &rts) != 1)
return -EINVAL;
if (rts)
- priv->cfg->ht_params->use_rts_for_aggregation = true;
+ cfg(priv)->ht_params->use_rts_for_aggregation = true;
else
- priv->cfg->ht_params->use_rts_for_aggregation = false;
+ cfg(priv)->ht_params->use_rts_for_aggregation = false;
return count;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 6c00a44..e54a4d1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -60,11 +60,10 @@ struct iwl_tx_queue;
/* Default noise level to report when noise measurement is not available.
* This may be because we're:
- * 1) Not associated (4965, no beacon statistics being sent to driver)
+ * 1) Not associated no beacon statistics being sent to driver)
* 2) Scanning (noise measurement does not apply to associated channel)
- * 3) Receiving CCK (3945 delivers noise info only for OFDM frames)
* Use default noise value of -127 ... this is below the range of measurable
- * Rx dBm for either 3945 or 4965, so it can indicate "unmeasurable" to user.
+ * Rx dBm for all agn devices, so it can indicate "unmeasurable" to user.
* Also, -127 works better than 0 when averaging frames with/without
* noise info (e.g. averaging might be done in app); measured dBm values are
* always negative ... using a negative value as the default keeps all
@@ -190,6 +189,69 @@ struct iwl_qos_info {
struct iwl_qosparam_cmd def_qos_parm;
};
+/**
+ * enum iwl_agg_state
+ *
+ * The state machine of the BA agreement establishment / tear down.
+ * These states relate to a specific RA / TID.
+ *
+ * @IWL_AGG_OFF: aggregation is not used
+ * @IWL_AGG_ON: aggregation session is up
+ * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
+ * HW queue to be empty from packets for this RA /TID.
+ */
+enum iwl_agg_state {
+ IWL_AGG_OFF = 0,
+ IWL_AGG_ON,
+ IWL_EMPTYING_HW_QUEUE_ADDBA,
+ IWL_EMPTYING_HW_QUEUE_DELBA,
+};
+
+/**
+ * struct iwl_ht_agg - aggregation state machine
+
+ * This structs holds the states for the BA agreement establishment and tear
+ * down. It also holds the state during the BA session itself. This struct is
+ * duplicated for each RA / TID.
+
+ * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
+ * Tx response (REPLY_TX), and the block ack notification
+ * (REPLY_COMPRESSED_BA).
+ * @state: state of the BA agreement establishment / tear down.
+ * @txq_id: Tx queue used by the BA session - used by the transport layer.
+ * Needed by the upper layer for debugfs only.
+ * @ssn: the first packet to be sent in AGG HW queue in Tx AGG start flow, or
+ * the first packet to be sent in legacy HW queue in Tx AGG stop flow.
+ * Basically when next_reclaimed reaches ssn, we can tell mac80211 that
+ * we are ready to finish the Tx AGG stop / start flow.
+ * @wait_for_ba: Expect block-ack before next Tx reply
+ */
+struct iwl_ht_agg {
+ u32 rate_n_flags;
+ enum iwl_agg_state state;
+ u16 txq_id;
+ u16 ssn;
+ bool wait_for_ba;
+};
+
+/**
+ * struct iwl_tid_data - one for each RA / TID
+
+ * This structs holds the states for each RA / TID.
+
+ * @seq_number: the next WiFi sequence number to use
+ * @next_reclaimed: the WiFi sequence number of the next packet to be acked.
+ * This is basically (last acked packet++).
+ * @agg: aggregation state machine
+ */
+struct iwl_tid_data {
+ u16 seq_number;
+ u16 next_reclaimed;
+ struct iwl_ht_agg agg;
+};
+
/*
* Structure should be accessed with sta_lock held. When station addition
* is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
@@ -230,17 +292,6 @@ struct iwl_vif_priv {
u8 ibss_bssid_sta_id;
};
-/* one for each uCode image (inst/data, boot/init/runtime) */
-struct fw_desc {
- void *v_addr; /* access by driver */
- dma_addr_t p_addr; /* access by card's busmaster DMA */
- u32 len; /* bytes */
-};
-
-struct fw_img {
- struct fw_desc code, data;
-};
-
/* v1/v2 uCode file layout */
struct iwl_ucode_header {
__le32 ver; /* major/minor/API/serial */
@@ -452,29 +503,6 @@ enum iwlagn_chain_noise_state {
IWL_CHAIN_NOISE_DONE,
};
-
-/*
- * enum iwl_calib
- * defines the order in which results of initial calibrations
- * should be sent to the runtime uCode
- */
-enum iwl_calib {
- IWL_CALIB_XTAL,
- IWL_CALIB_DC,
- IWL_CALIB_LO,
- IWL_CALIB_TX_IQ,
- IWL_CALIB_TX_IQ_PERD,
- IWL_CALIB_BASE_BAND,
- IWL_CALIB_TEMP_OFFSET,
- IWL_CALIB_MAX
-};
-
-/* Opaque calibration results */
-struct iwl_calib_result {
- void *buf;
- size_t buf_len;
-};
-
/* Sensitivity calib data */
struct iwl_sensitivity_data {
u32 auto_corr_ofdm;
@@ -547,16 +575,6 @@ enum iwl_access_mode {
IWL_OTP_ACCESS_RELATIVE,
};
-/**
- * enum iwl_pa_type - Power Amplifier type
- * @IWL_PA_SYSTEM: based on uCode configuration
- * @IWL_PA_INTERNAL: use Internal only
- */
-enum iwl_pa_type {
- IWL_PA_SYSTEM = 0,
- IWL_PA_INTERNAL = 1,
-};
-
/* reply_tx_statistics (for _agn devices) */
struct reply_tx_error_statistics {
u32 pp_delay;
@@ -714,35 +732,6 @@ struct iwl_force_reset {
*/
#define IWLAGN_EXT_BEACON_TIME_POS 22
-/**
- * struct iwl_notification_wait - notification wait entry
- * @list: list head for global list
- * @fn: function called with the notification
- * @cmd: command ID
- *
- * This structure is not used directly, to wait for a
- * notification declare it on the stack, and call
- * iwlagn_init_notification_wait() with appropriate
- * parameters. Then do whatever will cause the ucode
- * to notify the driver, and to wait for that then
- * call iwlagn_wait_notification().
- *
- * Each notification is one-shot. If at some point we
- * need to support multi-shot notifications (which
- * can't be allocated on the stack) we need to modify
- * the code for them.
- */
-struct iwl_notification_wait {
- struct list_head list;
-
- void (*fn)(struct iwl_priv *priv, struct iwl_rx_packet *pkt,
- void *data);
- void *fn_data;
-
- u8 cmd;
- bool triggered, aborted;
-};
-
struct iwl_rxon_context {
struct ieee80211_vif *vif;
@@ -805,14 +794,7 @@ enum iwl_scan_type {
IWL_SCAN_ROC,
};
-enum iwlagn_ucode_type {
- IWL_UCODE_NONE,
- IWL_UCODE_REGULAR,
- IWL_UCODE_INIT,
- IWL_UCODE_WOWLAN,
-};
-
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace {
u32 buff_size;
u32 total_size;
@@ -822,8 +804,20 @@ struct iwl_testmode_trace {
dma_addr_t dma_addr;
bool trace_enabled;
};
+struct iwl_testmode_sram {
+ u32 buff_size;
+ u32 num_chunks;
+ u8 *buff_addr;
+ bool sram_readed;
+};
#endif
+struct iwl_wipan_noa_data {
+ struct rcu_head rcu_head;
+ u32 length;
+ u8 data[];
+};
+
struct iwl_priv {
/*data shared among all the driver's layers */
@@ -835,7 +829,6 @@ struct iwl_priv {
struct ieee80211_channel *ieee_channels;
struct ieee80211_rate *ieee_rates;
struct kmem_cache *tx_cmd_pool;
- struct iwl_cfg *cfg;
enum ieee80211_band band;
@@ -880,8 +873,7 @@ struct iwl_priv {
s32 temperature; /* Celsius */
s32 last_temperature;
- /* init calibration results */
- struct iwl_calib_result calib_results[IWL_CALIB_MAX];
+ struct iwl_wipan_noa_data __rcu *noa_data;
/* Scan related variables */
unsigned long scan_start;
@@ -907,23 +899,12 @@ struct iwl_priv {
u32 ucode_ver; /* version of ucode, copy of
iwl_ucode.ver */
- struct fw_img ucode_rt;
- struct fw_img ucode_init;
- struct fw_img ucode_wowlan;
-
- enum iwlagn_ucode_type ucode_type;
- u8 ucode_write_complete; /* the image write is complete */
char firmware_name[25];
struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
__le16 switch_channel;
- struct {
- u32 error_event_table;
- u32 log_event_table;
- } device_pointers;
-
u16 active_rate;
u8 start_calib;
@@ -951,17 +932,13 @@ struct iwl_priv {
int num_stations;
struct iwl_station_entry stations[IWLAGN_STATION_COUNT];
unsigned long ucode_key_table;
+ struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
u8 mac80211_registered;
/* Indication if ieee80211_ops->open has been called */
u8 is_open;
- /* eeprom -- this is in the card's little endian byte order */
- u8 *eeprom;
- int nvm_device_type;
- struct iwl_eeprom_calib_info *calib_info;
-
enum nl80211_iftype iw_mode;
/* Last Rx'd beacon timestamp */
@@ -1017,10 +994,6 @@ struct iwl_priv {
/* counts reply_tx error */
struct reply_tx_error_statistics reply_tx_stats;
struct reply_agg_tx_error_statistics reply_agg_tx_stats;
- /* notification wait support */
- struct list_head notif_waits;
- spinlock_t notif_wait_lock;
- wait_queue_head_t notif_waitq;
/* remain-on-channel offload support */
struct ieee80211_channel *hw_roc_channel;
@@ -1098,8 +1071,9 @@ struct iwl_priv {
struct led_classdev led;
unsigned long blink_on, blink_off;
bool led_registered;
-#ifdef CONFIG_IWLWIFI_DEVICE_SVTOOL
+#ifdef CONFIG_IWLWIFI_DEVICE_TESTMODE
struct iwl_testmode_trace testmode_trace;
+ struct iwl_testmode_sram testmode_sram;
u32 tm_fixed_rate;
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index a635a7e..2a2c8de 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -28,7 +28,7 @@
/* sparse doesn't like tracepoint macros */
#ifndef __CHECKER__
-#include "iwl-dev.h"
+#include "iwl-trans.h"
#define CREATE_TRACE_POINTS
#include "iwl-devtrace.h"
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index 8a51c5c..9b212a8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -29,7 +29,6 @@
#include <linux/tracepoint.h>
-struct iwl_priv;
#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
#undef TRACE_EVENT
@@ -37,14 +36,14 @@ struct iwl_priv;
static inline void trace_ ## name(proto) {}
#endif
-#define PRIV_ENTRY __field(struct iwl_priv *, priv)
+#define PRIV_ENTRY __field(void *, priv)
#define PRIV_ASSIGN __entry->priv = priv
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_io
TRACE_EVENT(iwlwifi_dev_ioread32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_PROTO(void *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -60,7 +59,7 @@ TRACE_EVENT(iwlwifi_dev_ioread32,
);
TRACE_EVENT(iwlwifi_dev_iowrite8,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u8 val),
+ TP_PROTO(void *priv, u32 offs, u8 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -76,7 +75,7 @@ TRACE_EVENT(iwlwifi_dev_iowrite8,
);
TRACE_EVENT(iwlwifi_dev_iowrite32,
- TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
+ TP_PROTO(void *priv, u32 offs, u32 val),
TP_ARGS(priv, offs, val),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -91,11 +90,40 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
);
+TRACE_EVENT(iwlwifi_dev_irq,
+ TP_PROTO(void *priv),
+ TP_ARGS(priv),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ ),
+ /* TP_printk("") doesn't compile */
+ TP_printk("%d", 0)
+);
+
+TRACE_EVENT(iwlwifi_dev_ict_read,
+ TP_PROTO(void *priv, u32 index, u32 value),
+ TP_ARGS(priv, index, value),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+ __field(u32, index)
+ __field(u32, value)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->index = index;
+ __entry->value = value;
+ ),
+ TP_printk("read ict[%d] = %#.8x", __entry->index, __entry->value)
+);
+
#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi_ucode
TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
TP_ARGS(priv, time, data, ev),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -115,7 +143,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
);
TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
- TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_PROTO(void *priv, u32 wraps, u32 n_entry, u32 p_entry),
TP_ARGS(priv, wraps, n_entry, p_entry),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -139,7 +167,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
- TP_PROTO(struct iwl_priv *priv, u32 flags,
+ TP_PROTO(void *priv, u32 flags,
const void *hcmd0, size_t len0,
const void *hcmd1, size_t len1,
const void *hcmd2, size_t len2),
@@ -164,7 +192,7 @@ TRACE_EVENT(iwlwifi_dev_hcmd,
);
TRACE_EVENT(iwlwifi_dev_rx,
- TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
+ TP_PROTO(void *priv, void *rxbuf, size_t len),
TP_ARGS(priv, rxbuf, len),
TP_STRUCT__entry(
PRIV_ENTRY
@@ -179,7 +207,7 @@ TRACE_EVENT(iwlwifi_dev_rx,
);
TRACE_EVENT(iwlwifi_dev_tx,
- TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
+ TP_PROTO(void *priv, void *tfd, size_t tfdlen,
void *buf0, size_t buf0_len,
void *buf1, size_t buf1_len),
TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
@@ -211,7 +239,7 @@ TRACE_EVENT(iwlwifi_dev_tx,
);
TRACE_EVENT(iwlwifi_dev_ucode_error,
- TP_PROTO(struct iwl_priv *priv, u32 desc, u32 tsf_low,
+ TP_PROTO(void *priv, u32 desc, u32 tsf_low,
u32 data1, u32 data2, u32 line, u32 blink1,
u32 blink2, u32 ilink1, u32 ilink2, u32 bcon_time,
u32 gp1, u32 gp2, u32 gp3, u32 ucode_ver, u32 hw_ver,
@@ -271,7 +299,7 @@ TRACE_EVENT(iwlwifi_dev_ucode_error,
);
TRACE_EVENT(iwlwifi_dev_ucode_event,
- TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_PROTO(void *priv, u32 time, u32 data, u32 ev),
TP_ARGS(priv, time, data, ev),
TP_STRUCT__entry(
PRIV_ENTRY
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index a4e43bd..c1eda97 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -149,23 +149,23 @@ static const u8 iwl_eeprom_band_7[] = { /* 5.2 ht40 channel */
* EEPROM chip, not a single event, so even reads could conflict if they
* weren't arbitrated by the semaphore.
*/
-static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
+static int iwl_eeprom_acquire_semaphore(struct iwl_bus *bus)
{
u16 count;
int ret;
for (count = 0; count < EEPROM_SEM_RETRY_LIMIT; count++) {
/* Request semaphore */
- iwl_set_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_set_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
/* See if we got it */
- ret = iwl_poll_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ ret = iwl_poll_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
EEPROM_SEM_TIMEOUT);
if (ret >= 0) {
- IWL_DEBUG_EEPROM(priv,
+ IWL_DEBUG_EEPROM(bus,
"Acquired semaphore after %d tries.\n",
count+1);
return ret;
@@ -175,39 +175,39 @@ static int iwl_eeprom_acquire_semaphore(struct iwl_priv *priv)
return ret;
}
-static void iwl_eeprom_release_semaphore(struct iwl_priv *priv)
+static void iwl_eeprom_release_semaphore(struct iwl_bus *bus)
{
- iwl_clear_bit(bus(priv), CSR_HW_IF_CONFIG_REG,
+ iwl_clear_bit(bus, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
}
-static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
+static int iwl_eeprom_verify_signature(struct iwl_trans *trans)
{
- u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
+ u32 gp = iwl_read32(bus(trans), CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
int ret = 0;
- IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
+ IWL_DEBUG_EEPROM(trans, "EEPROM signature=0x%08x\n", gp);
switch (gp) {
case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
- IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
+ if (trans->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
+ IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n",
gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
- if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
- IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
+ if (trans->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
+ IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp);
ret = -ENOENT;
}
break;
case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
default:
- IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
+ IWL_ERR(trans, "bad EEPROM/OTP signature, type=%s, "
"EEPROM_GP=0x%08x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM", gp);
ret = -ENOENT;
break;
@@ -215,11 +215,11 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
return ret;
}
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset)
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset)
{
- if (!priv->eeprom)
+ if (!shrd->eeprom)
return 0;
- return (u16)priv->eeprom[offset] | ((u16)priv->eeprom[offset + 1] << 8);
+ return (u16)shrd->eeprom[offset] | ((u16)shrd->eeprom[offset + 1] << 8);
}
int iwl_eeprom_check_version(struct iwl_priv *priv)
@@ -227,11 +227,11 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
u16 eeprom_ver;
u16 calib_ver;
- eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
- calib_ver = iwlagn_eeprom_calib_version(priv);
+ eeprom_ver = iwl_eeprom_query16(priv->shrd, EEPROM_VERSION);
+ calib_ver = iwl_eeprom_calib_version(priv->shrd);
- if (eeprom_ver < priv->cfg->eeprom_ver ||
- calib_ver < priv->cfg->eeprom_calib_ver)
+ if (eeprom_ver < cfg(priv)->eeprom_ver ||
+ calib_ver < cfg(priv)->eeprom_calib_ver)
goto err;
IWL_INFO(priv, "device EEPROM VER=0x%x, CALIB=0x%x\n",
@@ -241,45 +241,46 @@ int iwl_eeprom_check_version(struct iwl_priv *priv)
err:
IWL_ERR(priv, "Unsupported (too old) EEPROM VER=0x%x < 0x%x "
"CALIB=0x%x < 0x%x\n",
- eeprom_ver, priv->cfg->eeprom_ver,
- calib_ver, priv->cfg->eeprom_calib_ver);
+ eeprom_ver, cfg(priv)->eeprom_ver,
+ calib_ver, cfg(priv)->eeprom_calib_ver);
return -EINVAL;
}
int iwl_eeprom_check_sku(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
u16 radio_cfg;
- if (!priv->cfg->sku) {
+ if (!cfg(priv)->sku) {
/* not using sku overwrite */
- priv->cfg->sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
- if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE &&
- !priv->cfg->ht_params) {
+ cfg(priv)->sku = iwl_eeprom_query16(shrd, EEPROM_SKU_CAP);
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE &&
+ !cfg(priv)->ht_params) {
IWL_ERR(priv, "Invalid 11n configuration\n");
return -EINVAL;
}
}
- if (!priv->cfg->sku) {
+ if (!cfg(priv)->sku) {
IWL_ERR(priv, "Invalid device sku\n");
return -EINVAL;
}
- IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
+ IWL_INFO(priv, "Device SKU: 0x%X\n", cfg(priv)->sku);
- if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
+ if (!cfg(priv)->valid_tx_ant && !cfg(priv)->valid_rx_ant) {
/* not using .cfg overwrite */
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
- priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
- priv->cfg->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
- if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
- IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
- priv->cfg->valid_tx_ant,
- priv->cfg->valid_rx_ant);
+ radio_cfg = iwl_eeprom_query16(shrd, EEPROM_RADIO_CONFIG);
+ cfg(priv)->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
+ cfg(priv)->valid_rx_ant = EEPROM_RF_CFG_RX_ANT_MSK(radio_cfg);
+ if (!cfg(priv)->valid_tx_ant || !cfg(priv)->valid_rx_ant) {
+ IWL_ERR(priv, "Invalid chain (0x%X, 0x%X)\n",
+ cfg(priv)->valid_tx_ant,
+ cfg(priv)->valid_rx_ant);
return -EINVAL;
}
- IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
- priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
+ IWL_INFO(priv, "Valid Tx ant: 0x%X, Valid Rx ant: 0x%X\n",
+ cfg(priv)->valid_tx_ant, cfg(priv)->valid_rx_ant);
}
/*
* for some special cases,
@@ -289,9 +290,9 @@ int iwl_eeprom_check_sku(struct iwl_priv *priv)
return 0;
}
-void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
+void iwl_eeprom_get_mac(const struct iwl_shared *shrd, u8 *mac)
{
- const u8 *addr = iwl_eeprom_query_addr(priv,
+ const u8 *addr = iwl_eeprom_query_addr(shrd,
EEPROM_MAC_ADDRESS);
memcpy(mac, addr, ETH_ALEN);
}
@@ -302,19 +303,19 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
*
******************************************************************************/
-static void iwl_set_otp_access(struct iwl_priv *priv, enum iwl_access_mode mode)
+static void iwl_set_otp_access(struct iwl_bus *bus, enum iwl_access_mode mode)
{
- iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ iwl_read32(bus, CSR_OTP_GP_REG);
if (mode == IWL_OTP_ACCESS_ABSOLUTE)
- iwl_clear_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_clear_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
else
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_OTP_ACCESS_MODE);
}
-static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
+static int iwl_get_nvm_type(struct iwl_bus *bus, u32 hw_rev)
{
u32 otpgp;
int nvm_type;
@@ -322,7 +323,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
/* OTP only valid for CP/PP and after */
switch (hw_rev & CSR_HW_REV_TYPE_MSK) {
case CSR_HW_REV_TYPE_NONE:
- IWL_ERR(priv, "Unknown hardware type\n");
+ IWL_ERR(bus, "Unknown hardware type\n");
return -ENOENT;
case CSR_HW_REV_TYPE_5300:
case CSR_HW_REV_TYPE_5350:
@@ -331,7 +332,7 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
nvm_type = NVM_DEVICE_TYPE_EEPROM;
break;
default:
- otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT)
nvm_type = NVM_DEVICE_TYPE_OTP;
else
@@ -341,73 +342,73 @@ static int iwl_get_nvm_type(struct iwl_priv *priv, u32 hw_rev)
return nvm_type;
}
-static int iwl_init_otp_access(struct iwl_priv *priv)
+static int iwl_init_otp_access(struct iwl_bus *bus)
{
int ret;
/* Enable 40MHz radio clock */
- iwl_write32(bus(priv), CSR_GP_CNTRL,
- iwl_read32(bus(priv), CSR_GP_CNTRL) |
+ iwl_write32(bus, CSR_GP_CNTRL,
+ iwl_read32(bus, CSR_GP_CNTRL) |
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* wait for clock to be ready */
- ret = iwl_poll_bit(bus(priv), CSR_GP_CNTRL,
+ ret = iwl_poll_bit(bus, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (ret < 0)
- IWL_ERR(priv, "Time out access OTP\n");
+ IWL_ERR(bus, "Time out access OTP\n");
else {
- iwl_set_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+ iwl_set_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
udelay(5);
- iwl_clear_bits_prph(bus(priv), APMG_PS_CTRL_REG,
+ iwl_clear_bits_prph(bus, APMG_PS_CTRL_REG,
APMG_PS_CTRL_VAL_RESET_REQ);
/*
* CSR auto clock gate disable bit -
* this is only applicable for HW with OTP shadow RAM
*/
- if (priv->cfg->base_params->shadow_ram_support)
- iwl_set_bit(bus(priv), CSR_DBG_LINK_PWR_MGMT_REG,
+ if (cfg(bus)->base_params->shadow_ram_support)
+ iwl_set_bit(bus, CSR_DBG_LINK_PWR_MGMT_REG,
CSR_RESET_LINK_PWR_MGMT_DISABLED);
}
return ret;
}
-static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_data)
+static int iwl_read_otp_word(struct iwl_bus *bus, u16 addr, __le16 *eeprom_data)
{
int ret = 0;
u32 r;
u32 otpgp;
- iwl_write32(bus(priv), CSR_EEPROM_REG,
+ iwl_write32(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
- ret = iwl_poll_bit(bus(priv), CSR_EEPROM_REG,
+ ret = iwl_poll_bit(bus, CSR_EEPROM_REG,
CSR_EEPROM_REG_READ_VALID_MSK,
CSR_EEPROM_REG_READ_VALID_MSK,
IWL_EEPROM_ACCESS_TIMEOUT);
if (ret < 0) {
- IWL_ERR(priv, "Time out reading OTP[%d]\n", addr);
+ IWL_ERR(bus, "Time out reading OTP[%d]\n", addr);
return ret;
}
- r = iwl_read32(bus(priv), CSR_EEPROM_REG);
+ r = iwl_read32(bus, CSR_EEPROM_REG);
/* check for ECC errors: */
- otpgp = iwl_read32(bus(priv), CSR_OTP_GP_REG);
+ otpgp = iwl_read32(bus, CSR_OTP_GP_REG);
if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) {
/* stop in this case */
/* set the uncorrectable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
- IWL_ERR(priv, "Uncorrectable OTP ECC error, abort OTP read\n");
+ IWL_ERR(bus, "Uncorrectable OTP ECC error, abort OTP read\n");
return -EINVAL;
}
if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) {
/* continue in this case */
/* set the correctable OTP ECC bit for acknowledgement */
- iwl_set_bit(bus(priv), CSR_OTP_GP_REG,
+ iwl_set_bit(bus, CSR_OTP_GP_REG,
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK);
- IWL_ERR(priv, "Correctable OTP ECC error, continue read\n");
+ IWL_ERR(bus, "Correctable OTP ECC error, continue read\n");
}
*eeprom_data = cpu_to_le16(r >> 16);
return 0;
@@ -416,20 +417,20 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, __le16 *eeprom_dat
/*
* iwl_is_otp_empty: check for empty OTP
*/
-static bool iwl_is_otp_empty(struct iwl_priv *priv)
+static bool iwl_is_otp_empty(struct iwl_bus *bus)
{
u16 next_link_addr = 0;
__le16 link_value;
bool is_empty = false;
/* locate the beginning of OTP link list */
- if (!iwl_read_otp_word(priv, next_link_addr, &link_value)) {
+ if (!iwl_read_otp_word(bus, next_link_addr, &link_value)) {
if (!link_value) {
- IWL_ERR(priv, "OTP is empty\n");
+ IWL_ERR(bus, "OTP is empty\n");
is_empty = true;
}
} else {
- IWL_ERR(priv, "Unable to read first block of OTP list.\n");
+ IWL_ERR(bus, "Unable to read first block of OTP list.\n");
is_empty = true;
}
@@ -446,7 +447,7 @@ static bool iwl_is_otp_empty(struct iwl_priv *priv)
* we should read and used to configure the device.
* only perform this operation if shadow RAM is disabled
*/
-static int iwl_find_otp_image(struct iwl_priv *priv,
+static int iwl_find_otp_image(struct iwl_bus *bus,
u16 *validblockaddr)
{
u16 next_link_addr = 0, valid_addr;
@@ -454,10 +455,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
int usedblocks = 0;
/* set addressing mode to absolute to traverse the link list */
- iwl_set_otp_access(priv, IWL_OTP_ACCESS_ABSOLUTE);
+ iwl_set_otp_access(bus, IWL_OTP_ACCESS_ABSOLUTE);
/* checking for empty OTP or error */
- if (iwl_is_otp_empty(priv))
+ if (iwl_is_otp_empty(bus))
return -EINVAL;
/*
@@ -471,9 +472,9 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
*/
valid_addr = next_link_addr;
next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
- IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
+ IWL_DEBUG_EEPROM(bus, "OTP blocks %d addr 0x%x\n",
usedblocks, next_link_addr);
- if (iwl_read_otp_word(priv, next_link_addr, &link_value))
+ if (iwl_read_otp_word(bus, next_link_addr, &link_value))
return -EINVAL;
if (!link_value) {
/*
@@ -488,10 +489,10 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
}
/* more in the link list, continue */
usedblocks++;
- } while (usedblocks <= priv->cfg->base_params->max_ll_items);
+ } while (usedblocks <= cfg(bus)->base_params->max_ll_items);
/* OTP has no valid blocks */
- IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
+ IWL_DEBUG_EEPROM(bus, "OTP has no valid blocks\n");
return -EINVAL;
}
@@ -504,28 +505,28 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
* iwl_get_max_txpower_avg - get the highest tx power from all chains.
* find the highest tx power from all chains for the channel
*/
-static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
+static s8 iwl_get_max_txpower_avg(struct iwl_cfg *cfg,
struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
int element, s8 *max_txpower_in_half_dbm)
{
s8 max_txpower_avg = 0; /* (dBm) */
/* Take the highest tx power from any valid chains */
- if ((priv->cfg->valid_tx_ant & ANT_A) &&
+ if ((cfg->valid_tx_ant & ANT_A) &&
(enhanced_txpower[element].chain_a_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_a_max;
- if ((priv->cfg->valid_tx_ant & ANT_B) &&
+ if ((cfg->valid_tx_ant & ANT_B) &&
(enhanced_txpower[element].chain_b_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_b_max;
- if ((priv->cfg->valid_tx_ant & ANT_C) &&
+ if ((cfg->valid_tx_ant & ANT_C) &&
(enhanced_txpower[element].chain_c_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].chain_c_max;
- if (((priv->cfg->valid_tx_ant == ANT_AB) |
- (priv->cfg->valid_tx_ant == ANT_BC) |
- (priv->cfg->valid_tx_ant == ANT_AC)) &&
+ if (((cfg->valid_tx_ant == ANT_AB) |
+ (cfg->valid_tx_ant == ANT_BC) |
+ (cfg->valid_tx_ant == ANT_AC)) &&
(enhanced_txpower[element].mimo2_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo2_max;
- if ((priv->cfg->valid_tx_ant == ANT_ABC) &&
+ if ((cfg->valid_tx_ant == ANT_ABC) &&
(enhanced_txpower[element].mimo3_max > max_txpower_avg))
max_txpower_avg = enhanced_txpower[element].mimo3_max;
@@ -582,6 +583,7 @@ iwl_eeprom_enh_txp_read_element(struct iwl_priv *priv,
void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
{
+ struct iwl_shared *shrd = priv->shrd;
struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
int idx, entries;
__le16 *txp_len;
@@ -590,10 +592,10 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
/* the length is in 16-bit words, but we want entries */
- txp_len = (__le16 *) iwl_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
+ txp_len = (__le16 *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_SZ_OFFS);
entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
- txp_array = (void *) iwl_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
+ txp_array = (void *) iwl_eeprom_query_addr(shrd, EEPROM_TXP_OFFS);
for (idx = 0; idx < entries; idx++) {
txp = &txp_array[idx];
@@ -627,7 +629,7 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
((txp->delta_20_in_40 & 0xf0) >> 4),
(txp->delta_20_in_40 & 0x0f));
- max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
+ max_txp_avg = iwl_get_max_txpower_avg(cfg(priv), txp_array, idx,
&max_txp_avg_halfdbm);
/*
@@ -646,12 +648,13 @@ void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
/**
* iwl_eeprom_init - read EEPROM contents
*
- * Load the EEPROM contents from adapter into priv->eeprom
+ * Load the EEPROM contents from adapter into shrd->eeprom
*
* NOTE: This routine uses the non-debug IO access functions.
*/
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
{
+ struct iwl_shared *shrd = priv->shrd;
__le16 *e;
u32 gp = iwl_read32(bus(priv), CSR_EEPROM_GP);
int sz;
@@ -660,22 +663,22 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
u16 validblockaddr = 0;
u16 cache_addr = 0;
- priv->nvm_device_type = iwl_get_nvm_type(priv, hw_rev);
- if (priv->nvm_device_type == -ENOENT)
+ trans(priv)->nvm_device_type = iwl_get_nvm_type(bus(priv), hw_rev);
+ if (trans(priv)->nvm_device_type == -ENOENT)
return -ENOENT;
/* allocate eeprom */
- sz = priv->cfg->base_params->eeprom_size;
+ sz = cfg(priv)->base_params->eeprom_size;
IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
- priv->eeprom = kzalloc(sz, GFP_KERNEL);
- if (!priv->eeprom) {
+ shrd->eeprom = kzalloc(sz, GFP_KERNEL);
+ if (!shrd->eeprom) {
ret = -ENOMEM;
goto alloc_err;
}
- e = (__le16 *)priv->eeprom;
+ e = (__le16 *)shrd->eeprom;
iwl_apm_init(priv);
- ret = iwl_eeprom_verify_signature(priv);
+ ret = iwl_eeprom_verify_signature(trans(priv));
if (ret < 0) {
IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp);
ret = -ENOENT;
@@ -683,16 +686,16 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
/* Make sure driver (instead of uCode) is allowed to read EEPROM */
- ret = iwl_eeprom_acquire_semaphore(priv);
+ ret = iwl_eeprom_acquire_semaphore(bus(priv));
if (ret < 0) {
IWL_ERR(priv, "Failed to acquire EEPROM semaphore.\n");
ret = -ENOENT;
goto err;
}
- if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
+ if (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
- ret = iwl_init_otp_access(priv);
+ ret = iwl_init_otp_access(bus(priv));
if (ret) {
IWL_ERR(priv, "Failed to initialize OTP access.\n");
ret = -ENOENT;
@@ -706,8 +709,8 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK |
CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK);
/* traversing the linked list if no shadow ram supported */
- if (!priv->cfg->base_params->shadow_ram_support) {
- if (iwl_find_otp_image(priv, &validblockaddr)) {
+ if (!cfg(priv)->base_params->shadow_ram_support) {
+ if (iwl_find_otp_image(bus(priv), &validblockaddr)) {
ret = -ENOENT;
goto done;
}
@@ -716,7 +719,7 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
addr += sizeof(u16)) {
__le16 eeprom_data;
- ret = iwl_read_otp_word(priv, addr, &eeprom_data);
+ ret = iwl_read_otp_word(bus(priv), addr, &eeprom_data);
if (ret)
goto done;
e[cache_addr / 2] = eeprom_data;
@@ -744,27 +747,27 @@ int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev)
}
IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
- (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
+ (trans(priv)->nvm_device_type == NVM_DEVICE_TYPE_OTP)
? "OTP" : "EEPROM",
- iwl_eeprom_query16(priv, EEPROM_VERSION));
+ iwl_eeprom_query16(shrd, EEPROM_VERSION));
ret = 0;
done:
- iwl_eeprom_release_semaphore(priv);
+ iwl_eeprom_release_semaphore(bus(priv));
err:
if (ret)
- iwl_eeprom_free(priv);
+ iwl_eeprom_free(priv->shrd);
/* Reset chip to save power until we load uCode during "up". */
iwl_apm_stop(priv);
alloc_err:
return ret;
}
-void iwl_eeprom_free(struct iwl_priv *priv)
+void iwl_eeprom_free(struct iwl_shared *shrd)
{
- kfree(priv->eeprom);
- priv->eeprom = NULL;
+ kfree(shrd->eeprom);
+ shrd->eeprom = NULL;
}
static void iwl_init_band_reference(const struct iwl_priv *priv,
@@ -772,49 +775,50 @@ static void iwl_init_band_reference(const struct iwl_priv *priv,
const struct iwl_eeprom_channel **eeprom_ch_info,
const u8 **eeprom_ch_index)
{
- u32 offset = priv->cfg->lib->
+ struct iwl_shared *shrd = priv->shrd;
+ u32 offset = cfg(priv)->lib->
eeprom_ops.regulatory_bands[eep_band - 1];
switch (eep_band) {
case 1: /* 2.4GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_1);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_1;
break;
case 2: /* 4.9GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_2);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_2;
break;
case 3: /* 5.2GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_3);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_3;
break;
case 4: /* 5.5GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_4);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_4;
break;
case 5: /* 5.7GHz band */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_5);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_5;
break;
case 6: /* 2.4GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_6);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_6;
break;
case 7: /* 5 GHz ht40 channels */
*eeprom_ch_count = ARRAY_SIZE(iwl_eeprom_band_7);
*eeprom_ch_info = (struct iwl_eeprom_channel *)
- iwl_eeprom_query_addr(priv, offset);
+ iwl_eeprom_query_addr(shrd, offset);
*eeprom_ch_index = iwl_eeprom_band_7;
break;
default:
@@ -979,9 +983,9 @@ int iwl_init_channel_map(struct iwl_priv *priv)
}
/* Check if we do have HT40 channels */
- if (priv->cfg->lib->eeprom_ops.regulatory_bands[5] ==
+ if (cfg(priv)->lib->eeprom_ops.regulatory_bands[5] ==
EEPROM_REGULATORY_BAND_NO_HT40 &&
- priv->cfg->lib->eeprom_ops.regulatory_bands[6] ==
+ cfg(priv)->lib->eeprom_ops.regulatory_bands[6] ==
EEPROM_REGULATORY_BAND_NO_HT40)
return 0;
@@ -1017,8 +1021,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
* driver need to process addition information
* to determine the max channel tx power limits
*/
- if (priv->cfg->lib->eeprom_ops.update_enhanced_txpower)
- priv->cfg->lib->eeprom_ops.update_enhanced_txpower(priv);
+ if (cfg(priv)->lib->eeprom_ops.update_enhanced_txpower)
+ cfg(priv)->lib->eeprom_ops.update_enhanced_txpower(priv);
return 0;
}
@@ -1064,7 +1068,7 @@ void iwl_rf_config(struct iwl_priv *priv)
{
u16 radio_cfg;
- radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
+ radio_cfg = iwl_eeprom_query16(priv->shrd, EEPROM_RADIO_CONFIG);
/* write radio config values to register */
if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index c94747e..9fa937e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -66,6 +66,7 @@
#include <net/mac80211.h>
struct iwl_priv;
+struct iwl_shared;
/*
* EEPROM access time values:
@@ -305,11 +306,11 @@ struct iwl_eeprom_ops {
int iwl_eeprom_init(struct iwl_priv *priv, u32 hw_rev);
-void iwl_eeprom_free(struct iwl_priv *priv);
+void iwl_eeprom_free(struct iwl_shared *shrd);
int iwl_eeprom_check_version(struct iwl_priv *priv);
int iwl_eeprom_check_sku(struct iwl_priv *priv);
-const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
-u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
+const u8 *iwl_eeprom_query_addr(const struct iwl_shared *shrd, size_t offset);
+u16 iwl_eeprom_query16(const struct iwl_shared *shrd, size_t offset);
int iwl_init_channel_map(struct iwl_priv *priv);
void iwl_free_channel_map(struct iwl_priv *priv);
const struct iwl_channel_info *iwl_get_channel_info(
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index 3ffa8e6..d57ea64 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -143,7 +143,7 @@ u32 iwl_read_direct32(struct iwl_bus *bus, u32 reg)
spin_lock_irqsave(&bus->reg_lock, flags);
iwl_grab_nic_access(bus);
- value = iwl_read32(bus(bus), reg);
+ value = iwl_read32(bus, reg);
iwl_release_nic_access(bus);
spin_unlock_irqrestore(&bus->reg_lock, flags);
@@ -283,16 +283,29 @@ u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr)
return value;
}
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+ void *buf, int words)
{
unsigned long flags;
+ int offs, result = 0;
+ u32 *vals = buf;
spin_lock_irqsave(&bus->reg_lock, flags);
if (!iwl_grab_nic_access(bus)) {
iwl_write32(bus, HBUS_TARG_MEM_WADDR, addr);
wmb();
- iwl_write32(bus, HBUS_TARG_MEM_WDAT, val);
+
+ for (offs = 0; offs < words; offs++)
+ iwl_write32(bus, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(bus);
- }
+ } else
+ result = -EBUSY;
spin_unlock_irqrestore(&bus->reg_lock, flags);
+
+ return result;
+}
+
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val)
+{
+ return _iwl_write_targ_mem_words(bus, addr, &val, 1);
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index ced2cbe..aae2eeb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -85,6 +85,9 @@ void _iwl_read_targ_mem_words(struct iwl_bus *bus, u32 addr,
(bufsize) / sizeof(u32));\
} while (0)
+int _iwl_write_targ_mem_words(struct iwl_bus *bus, u32 addr,
+ void *buf, int words);
+
u32 iwl_read_targ_mem(struct iwl_bus *bus, u32 addr);
-void iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
+int iwl_write_targ_mem(struct iwl_bus *bus, u32 addr, u32 val);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index eb54173..14dcbfc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -137,11 +137,11 @@ static int iwl_led_cmd(struct iwl_priv *priv,
}
IWL_DEBUG_LED(priv, "Led blink time compensation=%u\n",
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
led_cmd.on = iwl_blink_compensation(priv, on,
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
led_cmd.off = iwl_blink_compensation(priv, off,
- priv->cfg->base_params->led_compensation);
+ cfg(priv)->base_params->led_compensation);
ret = iwl_send_led_cmd(priv, &led_cmd);
if (!ret) {
@@ -178,7 +178,7 @@ void iwl_leds_init(struct iwl_priv *priv)
int ret;
if (mode == IWL_LED_DEFAULT)
- mode = priv->cfg->led_mode;
+ mode = cfg(priv)->led_mode;
priv->led.name = kasprintf(GFP_KERNEL, "%s-led",
wiphy_name(priv->hw->wiphy));
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 1c93dfe..2550b3c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -36,20 +36,6 @@ struct iwl_priv;
#define IWL_LED_ACTIVITY (0<<1)
#define IWL_LED_LINK (1<<1)
-/*
- * LED mode
- * IWL_LED_DEFAULT: use device default
- * IWL_LED_RF_STATE: turn LED on/off based on RF state
- * LED ON = RF ON
- * LED OFF = RF OFF
- * IWL_LED_BLINK: adjust led blink rate based on blink table
- */
-enum iwl_led_mode {
- IWL_LED_DEFAULT,
- IWL_LED_RF_STATE,
- IWL_LED_BLINK,
-};
-
void iwlagn_led_enable(struct iwl_priv *priv);
void iwl_leds_init(struct iwl_priv *priv);
void iwl_leds_exit(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
new file mode 100644
index 0000000..f980e57
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -0,0 +1,1601 @@
+/******************************************************************************
+ *
+ * Copyright(c) 2003 - 2011 Intel Corporation. All rights reserved.
+ *
+ * Portions of this file are derived from the ipw3945 project, as well
+ * as portions of the ieee80211 subsystem header files.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ *****************************************************************************/
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/firmware.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+
+#include <net/mac80211.h>
+
+#include <asm/div64.h>
+
+#include "iwl-eeprom.h"
+#include "iwl-wifi.h"
+#include "iwl-dev.h"
+#include "iwl-core.h"
+#include "iwl-io.h"
+#include "iwl-agn-calib.h"
+#include "iwl-agn.h"
+#include "iwl-shared.h"
+#include "iwl-bus.h"
+#include "iwl-trans.h"
+
+/*****************************************************************************
+ *
+ * mac80211 entry point functions
+ *
+ *****************************************************************************/
+
+static const struct ieee80211_iface_limit iwlagn_sta_ap_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_sta_go_limits[] = {
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_limit iwlagn_p2p_2sta_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_dualmode[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_sta_ap_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_sta_ap_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_2sta_limits),
+ },
+};
+
+static const struct ieee80211_iface_combination
+iwlagn_iface_combinations_p2p[] = {
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .beacon_int_infra_match = true,
+ .limits = iwlagn_p2p_sta_go_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_sta_go_limits),
+ },
+ { .num_different_channels = 1,
+ .max_interfaces = 2,
+ .limits = iwlagn_p2p_2sta_limits,
+ .n_limits = ARRAY_SIZE(iwlagn_p2p_2sta_limits),
+ },
+};
+
+/*
+ * Not a mac80211 entry point function, but it fits in with all the
+ * other mac80211 functions grouped here.
+ */
+int iwlagn_mac_setup_register(struct iwl_priv *priv,
+ struct iwlagn_ucode_capabilities *capa)
+{
+ int ret;
+ struct ieee80211_hw *hw = priv->hw;
+ struct iwl_rxon_context *ctx;
+
+ hw->rate_control_algorithm = "iwl-agn-rs";
+
+ /* Tell mac80211 our characteristics */
+ hw->flags = IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_AMPDU_AGGREGATION |
+ IEEE80211_HW_NEED_DTIM_PERIOD |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ /*
+ * Including the following line will crash some AP's. This
+ * workaround removes the stimulus which causes the crash until
+ * the AP software can be fixed.
+ hw->max_tx_aggregation_subframes = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
+ */
+
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+
+ if (cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
+ if (capa->flags & IWL_UCODE_TLV_FLAGS_MFP)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->sta_data_size = sizeof(struct iwl_station_priv);
+ hw->vif_data_size = sizeof(struct iwl_vif_priv);
+
+ for_each_context(priv, ctx) {
+ hw->wiphy->interface_modes |= ctx->interface_modes;
+ hw->wiphy->interface_modes |= ctx->exclusive_interface_modes;
+ }
+
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+
+ if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)) {
+ hw->wiphy->iface_combinations = iwlagn_iface_combinations_p2p;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_p2p);
+ } else if (hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
+ hw->wiphy->iface_combinations =
+ iwlagn_iface_combinations_dualmode;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(iwlagn_iface_combinations_dualmode);
+ }
+
+ hw->wiphy->max_remain_on_channel_duration = 1000;
+
+ hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
+ WIPHY_FLAG_DISABLE_BEACON_HINTS |
+ WIPHY_FLAG_IBSS_RSN;
+
+ if (trans(priv)->ucode_wowlan.code.len &&
+ device_can_wakeup(bus(priv)->dev)) {
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_MAGIC_PKT |
+ WIPHY_WOWLAN_DISCONNECT |
+ WIPHY_WOWLAN_EAP_IDENTITY_REQ |
+ WIPHY_WOWLAN_RFKILL_RELEASE;
+ if (!iwlagn_mod_params.sw_crypto)
+ hw->wiphy->wowlan.flags |=
+ WIPHY_WOWLAN_SUPPORTS_GTK_REKEY |
+ WIPHY_WOWLAN_GTK_REKEY_FAILURE;
+
+ hw->wiphy->wowlan.n_patterns = IWLAGN_WOWLAN_MAX_PATTERNS;
+ hw->wiphy->wowlan.pattern_min_len =
+ IWLAGN_WOWLAN_MIN_PATTERN_LEN;
+ hw->wiphy->wowlan.pattern_max_len =
+ IWLAGN_WOWLAN_MAX_PATTERN_LEN;
+ }
+
+ if (iwlagn_mod_params.power_save)
+ hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ else
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ /* we create the 802.11 header and a zero-length SSID element */
+ hw->wiphy->max_scan_ie_len = capa->max_probe_length - 24 - 2;
+
+ /* Default value; 4 EDCA QOS priorities */
+ hw->queues = 4;
+
+ hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
+
+ if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &priv->bands[IEEE80211_BAND_2GHZ];
+ if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
+ priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &priv->bands[IEEE80211_BAND_5GHZ];
+
+ hw->wiphy->hw_version = bus_get_hw_id(bus(priv));
+
+ iwl_leds_init(priv);
+
+ ret = ieee80211_register_hw(priv->hw);
+ if (ret) {
+ IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
+ return ret;
+ }
+ priv->mac80211_registered = 1;
+
+ return 0;
+}
+
+void iwlagn_mac_unregister(struct iwl_priv *priv)
+{
+ if (!priv->mac80211_registered)
+ return;
+ iwl_leds_exit(priv);
+ ieee80211_unregister_hw(priv->hw);
+ priv->mac80211_registered = 0;
+}
+
+static int __iwl_up(struct iwl_priv *priv)
+{
+ struct iwl_rxon_context *ctx;
+ int ret;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ IWL_WARN(priv, "Exit pending; will not bring the NIC up\n");
+ return -EIO;
+ }
+
+ for_each_context(priv, ctx) {
+ ret = iwlagn_alloc_bcast_station(priv, ctx);
+ if (ret) {
+ iwl_dealloc_bcast_stations(priv);
+ return ret;
+ }
+ }
+
+ ret = iwl_run_init_ucode(trans(priv));
+ if (ret) {
+ IWL_ERR(priv, "Failed to run INIT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_load_ucode_wait_alive(trans(priv), IWL_UCODE_REGULAR);
+ if (ret) {
+ IWL_ERR(priv, "Failed to start RT ucode: %d\n", ret);
+ goto error;
+ }
+
+ ret = iwl_alive_start(priv);
+ if (ret)
+ goto error;
+ return 0;
+
+ error:
+ set_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+ __iwl_down(priv);
+ clear_bit(STATUS_EXIT_PENDING, &priv->shrd->status);
+
+ IWL_ERR(priv, "Unable to initialize device.\n");
+ return ret;
+}
+
+static int iwlagn_mac_start(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ /* we should be verifying the device is ready to be opened */
+ mutex_lock(&priv->shrd->mutex);
+ ret = __iwl_up(priv);
+ mutex_unlock(&priv->shrd->mutex);
+ if (ret)
+ return ret;
+
+ IWL_DEBUG_INFO(priv, "Start UP work done.\n");
+
+ /* Now we should be done, and the READY bit should be set. */
+ if (WARN_ON(!test_bit(STATUS_READY, &priv->shrd->status)))
+ ret = -EIO;
+
+ iwlagn_led_enable(priv);
+
+ priv->is_open = 1;
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+static void iwlagn_mac_stop(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!priv->is_open)
+ return;
+
+ priv->is_open = 0;
+
+ iwl_down(priv);
+
+ flush_workqueue(priv->shrd->workqueue);
+
+ /* User space software may expect getting rfkill changes
+ * even if interface is down */
+ iwl_write32(bus(priv), CSR_INT, 0xFFFFFFFF);
+ iwl_enable_rfkill_int(priv);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_set_rekey_data(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_gtk_rekey_data *data)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (iwlagn_mod_params.sw_crypto)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (priv->contexts[IWL_RXON_CTX_BSS].vif != vif)
+ goto out;
+
+ memcpy(priv->kek, data->kek, NL80211_KEK_LEN);
+ memcpy(priv->kck, data->kck, NL80211_KCK_LEN);
+ priv->replay_ctr =
+ cpu_to_le64(be64_to_cpup((__be64 *)&data->replay_ctr));
+ priv->have_rekey_data = true;
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int iwlagn_mac_suspend(struct ieee80211_hw *hw,
+ struct cfg80211_wowlan *wowlan)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int ret;
+
+ if (WARN_ON(!wowlan))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ /* Don't attempt WoWLAN when not associated, tear down instead. */
+ if (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION ||
+ !iwl_is_associated_ctx(ctx)) {
+ ret = 1;
+ goto out;
+ }
+
+ ret = iwlagn_suspend(priv, hw, wowlan);
+ if (ret)
+ goto error;
+
+ device_set_wakeup_enable(bus(priv)->dev, true);
+
+ /* Now let the ucode operate on its own */
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_SET,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ goto out;
+
+ error:
+ priv->shrd->wowlan = false;
+ iwlagn_prepare_restart(priv);
+ ieee80211_restart_hw(priv->hw);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static int iwlagn_mac_resume(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct ieee80211_vif *vif;
+ unsigned long flags;
+ u32 base, status = 0xffffffff;
+ int ret = -EIO;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ iwl_write32(bus(priv), CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
+
+ base = priv->shrd->device_pointers.error_event_table;
+ if (iwlagn_hw_valid_rtc_data_addr(base)) {
+ spin_lock_irqsave(&bus(priv)->reg_lock, flags);
+ ret = iwl_grab_nic_access_silent(bus(priv));
+ if (ret == 0) {
+ iwl_write32(bus(priv), HBUS_TARG_MEM_RADDR, base);
+ status = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ iwl_release_nic_access(bus(priv));
+ }
+ spin_unlock_irqrestore(&bus(priv)->reg_lock, flags);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ if (ret == 0) {
+ struct iwl_trans *trans = trans(priv);
+ if (!priv->wowlan_sram)
+ priv->wowlan_sram =
+ kzalloc(trans->ucode_wowlan.data.len,
+ GFP_KERNEL);
+
+ if (priv->wowlan_sram)
+ _iwl_read_targ_mem_words(
+ bus(priv), 0x800000, priv->wowlan_sram,
+ trans->ucode_wowlan.data.len / 4);
+ }
+#endif
+ }
+
+ /* we'll clear ctx->vif during iwlagn_prepare_restart() */
+ vif = ctx->vif;
+
+ priv->shrd->wowlan = false;
+
+ device_set_wakeup_enable(bus(priv)->dev, false);
+
+ iwlagn_prepare_restart(priv);
+
+ memset((void *)&ctx->active, 0, sizeof(ctx->active));
+ iwl_connection_init_rx_config(priv, ctx);
+ iwlagn_set_rxon_chain(priv, ctx);
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ ieee80211_resume_disconnect(vif);
+
+ return 1;
+}
+
+#endif
+
+static void iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
+ ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
+
+ if (iwlagn_tx_skb(priv, skb))
+ dev_kfree_skb_any(skb);
+}
+
+static void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ iwl_update_tkip_key(priv, vif, keyconf, sta, iv32, phase1key);
+}
+
+static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta,
+ struct ieee80211_key_conf *key)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ bool is_default_wep_key = false;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (iwlagn_mod_params.sw_crypto) {
+ IWL_DEBUG_MAC80211(priv, "leave - hwcrypto disabled\n");
+ return -EOPNOTSUPP;
+ }
+
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We could program these keys into the hardware as well, but we
+ * don't expect much multicast traffic in IBSS and having keys
+ * for more stations is probably more useful.
+ *
+ * Mark key TX-only and return 0.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC &&
+ !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ return 0;
+ }
+
+ /* If they key was TX-only, accept deletion */
+ if (cmd == DISABLE_KEY && key->hw_key_idx == WEP_INVALID_OFFSET)
+ return 0;
+
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, 100);
+
+ BUILD_BUG_ON(WEP_INVALID_OFFSET == IWLAGN_HW_KEY_DEFAULT);
+
+ /*
+ * If we are getting WEP group key and we didn't receive any key mapping
+ * so far, we are in legacy wep mode (group key only), otherwise we are
+ * in 1X mode.
+ * In legacy wep mode, we use another host command to the uCode.
+ */
+ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
+ key->cipher == WLAN_CIPHER_SUITE_WEP104) && !sta) {
+ if (cmd == SET_KEY)
+ is_default_wep_key = !ctx->key_mapping_keys;
+ else
+ is_default_wep_key =
+ key->hw_key_idx == IWLAGN_HW_KEY_DEFAULT;
+ }
+
+
+ switch (cmd) {
+ case SET_KEY:
+ if (is_default_wep_key) {
+ ret = iwl_set_default_wep_key(priv, vif_priv->ctx, key);
+ break;
+ }
+ ret = iwl_set_dynamic_key(priv, vif_priv->ctx, key, sta);
+ if (ret) {
+ /*
+ * can't add key for RX, but we don't need it
+ * in the device for TX so still return 0
+ */
+ ret = 0;
+ key->hw_key_idx = WEP_INVALID_OFFSET;
+ }
+
+ IWL_DEBUG_MAC80211(priv, "enable hwcrypto key\n");
+ break;
+ case DISABLE_KEY:
+ if (is_default_wep_key)
+ ret = iwl_remove_default_wep_key(priv, ctx, key);
+ else
+ ret = iwl_remove_dynamic_key(priv, ctx, key, sta);
+
+ IWL_DEBUG_MAC80211(priv, "disable hwcrypto key\n");
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn,
+ u8 buf_size)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret = -EINVAL;
+ struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
+
+ IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
+ sta->addr, tid);
+
+ if (!(cfg(priv)->sku & EEPROM_SKU_CAP_11N_ENABLE))
+ return -EACCES;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ break;
+ IWL_DEBUG_HT(priv, "start Rx\n");
+ ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
+ break;
+ case IEEE80211_AMPDU_RX_STOP:
+ IWL_DEBUG_HT(priv, "stop Rx\n");
+ ret = iwl_sta_rx_agg_stop(priv, sta, tid);
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ ret = 0;
+ break;
+ case IEEE80211_AMPDU_TX_START:
+ if (iwlagn_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ break;
+ IWL_DEBUG_HT(priv, "start Tx\n");
+ ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ IWL_DEBUG_HT(priv, "stop Tx\n");
+ ret = iwlagn_tx_agg_stop(priv, vif, sta, tid);
+ if ((ret == 0) && (priv->agg_tids_count > 0)) {
+ priv->agg_tids_count--;
+ IWL_DEBUG_HT(priv, "priv->agg_tids_count = %u\n",
+ priv->agg_tids_count);
+ }
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status))
+ ret = 0;
+ if (!priv->agg_tids_count && cfg(priv)->ht_params &&
+ cfg(priv)->ht_params->use_rts_for_aggregation) {
+ /*
+ * switch off RTS/CTS if it was previously enabled
+ */
+ sta_priv->lq_sta.lq.general_params.flags &=
+ ~LINK_QUAL_FLAGS_SET_STA_TLC_RTS_MSK;
+ iwl_send_lq_cmd(priv, iwl_rxon_ctx_from_vif(vif),
+ &sta_priv->lq_sta.lq, CMD_ASYNC, false);
+ }
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ ret = iwlagn_tx_agg_oper(priv, vif, sta, tid, buf_size);
+ break;
+ }
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return ret;
+}
+
+static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ bool is_ap = vif->type == NL80211_IFTYPE_STATION;
+ int ret = 0;
+ u8 sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "received request to add station %pM\n",
+ sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to add station %pM\n",
+ sta->addr);
+ sta_priv->sta_id = IWL_INVALID_STATION;
+
+ atomic_set(&sta_priv->pending_frames, 0);
+ if (vif->type == NL80211_IFTYPE_AP)
+ sta_priv->client = true;
+
+ ret = iwl_add_station_common(priv, vif_priv->ctx, sta->addr,
+ is_ap, sta, &sta_id);
+ if (ret) {
+ IWL_ERR(priv, "Unable to add station %pM (%d)\n",
+ sta->addr, ret);
+ /* Should we return success if return code is EEXIST ? */
+ goto out;
+ }
+
+ sta_priv->sta_id = sta_id;
+
+ /* Initialize rate scaling */
+ IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
+ sta->addr);
+ iwl_rs_rate_init(priv, sta, sta_id);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
+ struct ieee80211_channel_switch *ch_switch)
+{
+ struct iwl_priv *priv = hw->priv;
+ const struct iwl_channel_info *ch_info;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_channel *channel = ch_switch->channel;
+ struct iwl_ht_config *ht_conf = &priv->current_ht_config;
+ /*
+ * MULTI-FIXME
+ * When we add support for multiple interfaces, we need to
+ * revisit this. The channel switch command in the device
+ * only affects the BSS context, but what does that really
+ * mean? And what if we get a CSA on the second interface?
+ * This needs a lot of work.
+ */
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ u16 ch;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_rfkill(priv->shrd))
+ goto out;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status) ||
+ test_bit(STATUS_SCANNING, &priv->shrd->status) ||
+ test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status))
+ goto out;
+
+ if (!iwl_is_associated_ctx(ctx))
+ goto out;
+
+ if (!cfg(priv)->lib->set_channel_switch)
+ goto out;
+
+ ch = channel->hw_value;
+ if (le16_to_cpu(ctx->active.channel) == ch)
+ goto out;
+
+ ch_info = iwl_get_channel_info(priv, channel->band, ch);
+ if (!is_channel_valid(ch_info)) {
+ IWL_DEBUG_MAC80211(priv, "invalid channel\n");
+ goto out;
+ }
+
+ spin_lock_irq(&priv->shrd->lock);
+
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /* Configure HT40 channels */
+ ctx->ht.enabled = conf_is_ht(conf);
+ if (ctx->ht.enabled)
+ iwlagn_config_ht40(conf, ctx);
+ else
+ ctx->ht.is_40mhz = false;
+
+ if ((le16_to_cpu(ctx->staging.channel) != ch))
+ ctx->staging.flags = 0;
+
+ iwl_set_rxon_channel(priv, channel, ctx);
+ iwl_set_rxon_ht(priv, ht_conf);
+ iwl_set_flags_for_band(priv, ctx, channel->band, ctx->vif);
+
+ spin_unlock_irq(&priv->shrd->lock);
+
+ iwl_set_rate(priv);
+ /*
+ * at this point, staging_rxon has the
+ * configuration for channel switch
+ */
+ set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ priv->switch_channel = cpu_to_le16(ch);
+ if (cfg(priv)->lib->set_channel_switch(priv, ch_switch)) {
+ clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->shrd->status);
+ priv->switch_channel = 0;
+ ieee80211_chswitch_done(ctx->vif, false);
+ }
+
+out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_configure_filter(struct ieee80211_hw *hw,
+ unsigned int changed_flags,
+ unsigned int *total_flags,
+ u64 multicast)
+{
+ struct iwl_priv *priv = hw->priv;
+ __le32 filter_or = 0, filter_nand = 0;
+ struct iwl_rxon_context *ctx;
+
+#define CHK(test, flag) do { \
+ if (*total_flags & (test)) \
+ filter_or |= (flag); \
+ else \
+ filter_nand |= (flag); \
+ } while (0)
+
+ IWL_DEBUG_MAC80211(priv, "Enter: changed: 0x%x, total: 0x%x\n",
+ changed_flags, *total_flags);
+
+ CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
+ /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
+ CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
+ CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
+
+#undef CHK
+
+ mutex_lock(&priv->shrd->mutex);
+
+ for_each_context(priv, ctx) {
+ ctx->staging.filter_flags &= ~filter_nand;
+ ctx->staging.filter_flags |= filter_or;
+
+ /*
+ * Not committing directly because hardware can perform a scan,
+ * but we'll eventually commit the filter flags change anyway.
+ */
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ /*
+ * Receiving all multicast frames is always enabled by the
+ * default flags setup in iwl_connection_init_rx_config()
+ * since we currently do not support programming multicast
+ * filters into the device.
+ */
+ *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
+ FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
+}
+
+static void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->shrd->status)) {
+ IWL_DEBUG_TX(priv, "Aborting flush due to device shutdown\n");
+ goto done;
+ }
+ if (iwl_is_rfkill(priv->shrd)) {
+ IWL_DEBUG_TX(priv, "Aborting flush due to RF Kill\n");
+ goto done;
+ }
+
+ /*
+ * mac80211 will not push any more frames for transmit
+ * until the flush is completed
+ */
+ if (drop) {
+ IWL_DEBUG_MAC80211(priv, "send flush command\n");
+ if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
+ IWL_ERR(priv, "flush request fail\n");
+ goto done;
+ }
+ }
+ IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n");
+ iwl_trans_wait_tx_queue_empty(trans(priv));
+done:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_remain_on_channel(struct ieee80211_hw *hw,
+ struct ieee80211_channel *channel,
+ enum nl80211_channel_type channel_type,
+ int duration)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_PAN];
+ int err = 0;
+
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ if (!(ctx->interface_modes & BIT(NL80211_IFTYPE_P2P_CLIENT)))
+ return -EOPNOTSUPP;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (test_bit(STATUS_SCAN_HW, &priv->shrd->status)) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ priv->hw_roc_channel = channel;
+ priv->hw_roc_chantype = channel_type;
+ /* convert from ms to TU */
+ priv->hw_roc_duration = DIV_ROUND_UP(1000 * duration, 1024);
+ priv->hw_roc_start_notified = false;
+ cancel_delayed_work(&priv->hw_roc_disable_work);
+
+ if (!ctx->is_active) {
+ static const struct iwl_qos_info default_qos_data = {
+ .def_qos_parm = {
+ .ac[0] = {
+ .cw_min = cpu_to_le16(3),
+ .cw_max = cpu_to_le16(7),
+ .aifsn = 2,
+ .edca_txop = cpu_to_le16(1504),
+ },
+ .ac[1] = {
+ .cw_min = cpu_to_le16(7),
+ .cw_max = cpu_to_le16(15),
+ .aifsn = 2,
+ .edca_txop = cpu_to_le16(3008),
+ },
+ .ac[2] = {
+ .cw_min = cpu_to_le16(15),
+ .cw_max = cpu_to_le16(1023),
+ .aifsn = 3,
+ },
+ .ac[3] = {
+ .cw_min = cpu_to_le16(15),
+ .cw_max = cpu_to_le16(1023),
+ .aifsn = 7,
+ },
+ },
+ };
+
+ ctx->is_active = true;
+ ctx->qos_data = default_qos_data;
+ ctx->staging.dev_type = RXON_DEV_TYPE_P2P;
+ memcpy(ctx->staging.node_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ memcpy(ctx->staging.bssid_addr,
+ priv->contexts[IWL_RXON_CTX_BSS].staging.node_addr,
+ ETH_ALEN);
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err)
+ goto out;
+ ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK |
+ RXON_FILTER_PROMISC_MSK |
+ RXON_FILTER_CTL2HOST_MSK;
+
+ err = iwlagn_commit_rxon(priv, ctx);
+ if (err) {
+ iwlagn_disable_roc(priv);
+ goto out;
+ }
+ priv->hw_roc_setup = true;
+ }
+
+ err = iwl_scan_initiate(priv, ctx->vif, IWL_SCAN_ROC, channel->band);
+ if (err)
+ iwlagn_disable_roc(priv);
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return err;
+}
+
+static int iwlagn_mac_cancel_remain_on_channel(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ if (!(priv->shrd->valid_contexts & BIT(IWL_RXON_CTX_PAN)))
+ return -EOPNOTSUPP;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+ iwl_scan_cancel_timeout(priv, priv->hw_roc_duration);
+ iwlagn_disable_roc(priv);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return 0;
+}
+
+static int iwlagn_mac_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ int ret;
+ u8 sta_id;
+
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return 0;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx)) {
+ ret = 0;
+ goto out;
+ }
+
+ if (ctx->preauth_bssid || test_bit(STATUS_SCAN_HW,
+ &priv->shrd->status)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = iwl_add_station_common(priv, ctx, bssid, true, NULL, &sta_id);
+ if (ret)
+ goto out;
+
+ if (WARN_ON(sta_id != ctx->ap_sta_id)) {
+ ret = -EIO;
+ goto out_remove_sta;
+ }
+
+ memcpy(ctx->bssid, bssid, ETH_ALEN);
+ ctx->preauth_bssid = true;
+
+ ret = iwlagn_commit_rxon(priv, ctx);
+
+ if (ret == 0)
+ goto out;
+
+ out_remove_sta:
+ iwl_remove_station(priv, sta_id, bssid);
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwlagn_mac_finish_tx_sync(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ const u8 *bssid,
+ enum ieee80211_tx_sync_type type)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+
+ if (ctx->ctxid != IWL_RXON_CTX_PAN)
+ return;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (iwl_is_associated_ctx(ctx))
+ goto out;
+
+ iwl_remove_station(priv, ctx->ap_sta_id, bssid);
+ ctx->preauth_bssid = false;
+ /* no need to commit */
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static void iwlagn_mac_rssi_callback(struct ieee80211_hw *hw,
+ enum ieee80211_rssi_event rssi_event)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+ mutex_lock(&priv->shrd->mutex);
+
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
+ if (rssi_event == RSSI_EVENT_LOW)
+ priv->bt_enable_pspoll = true;
+ else if (rssi_event == RSSI_EVENT_HIGH)
+ priv->bt_enable_pspoll = false;
+
+ iwlagn_send_advance_bt_config(priv);
+ } else {
+ IWL_DEBUG_MAC80211(priv, "Advanced BT coex disabled,"
+ "ignoring RSSI callback\n");
+ }
+
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+static int iwlagn_mac_set_tim(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, bool set)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ queue_work(priv->shrd->workqueue, &priv->beacon_update);
+
+ return 0;
+}
+
+static int iwlagn_mac_conf_tx(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *ctx = vif_priv->ctx;
+ unsigned long flags;
+ int q;
+
+ if (WARN_ON(!ctx))
+ return -EINVAL;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (!iwl_is_ready_rf(priv->shrd)) {
+ IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
+ return -EIO;
+ }
+
+ if (queue >= AC_NUM) {
+ IWL_DEBUG_MAC80211(priv, "leave - queue >= AC_NUM %d\n", queue);
+ return 0;
+ }
+
+ q = AC_NUM - 1 - queue;
+
+ spin_lock_irqsave(&priv->shrd->lock, flags);
+
+ ctx->qos_data.def_qos_parm.ac[q].cw_min =
+ cpu_to_le16(params->cw_min);
+ ctx->qos_data.def_qos_parm.ac[q].cw_max =
+ cpu_to_le16(params->cw_max);
+ ctx->qos_data.def_qos_parm.ac[q].aifsn = params->aifs;
+ ctx->qos_data.def_qos_parm.ac[q].edca_txop =
+ cpu_to_le16((params->txop * 32));
+
+ ctx->qos_data.def_qos_parm.ac[q].reserved1 = 0;
+
+ spin_unlock_irqrestore(&priv->shrd->lock, flags);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return 0;
+}
+
+static int iwlagn_mac_tx_last_beacon(struct ieee80211_hw *hw)
+{
+ struct iwl_priv *priv = hw->priv;
+
+ return priv->ibss_manager == IWL_IBSS_MANAGER;
+}
+
+static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
+{
+ iwl_connection_init_rx_config(priv, ctx);
+
+ iwlagn_set_rxon_chain(priv, ctx);
+
+ return iwlagn_commit_rxon(priv, ctx);
+}
+
+static int iwl_setup_interface(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx)
+{
+ struct ieee80211_vif *vif = ctx->vif;
+ int err;
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ /*
+ * This variable will be correct only when there's just
+ * a single context, but all code using it is for hardware
+ * that supports only one context.
+ */
+ priv->iw_mode = vif->type;
+
+ ctx->is_active = true;
+
+ err = iwl_set_mode(priv, ctx);
+ if (err) {
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ return err;
+ }
+
+ if (cfg(priv)->bt_params && cfg(priv)->bt_params->advanced_bt_coexist &&
+ vif->type == NL80211_IFTYPE_ADHOC) {
+ /*
+ * pretend to have high BT traffic as long as we
+ * are operating in IBSS mode, as this will cause
+ * the rate scaling etc. to behave as intended.
+ */
+ priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
+ }
+
+ return 0;
+}
+
+static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
+ struct iwl_rxon_context *tmp, *ctx = NULL;
+ int err;
+ enum nl80211_iftype viftype = ieee80211_vif_type_p2p(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
+ viftype, vif->addr);
+
+ cancel_delayed_work_sync(&priv->hw_roc_disable_work);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ iwlagn_disable_roc(priv);
+
+ if (!iwl_is_ready_rf(priv->shrd)) {
+ IWL_WARN(priv, "Try to add interface when device not ready\n");
+ err = -EINVAL;
+ goto out;
+ }
+
+ for_each_context(priv, tmp) {
+ u32 possible_modes =
+ tmp->interface_modes | tmp->exclusive_interface_modes;
+
+ if (tmp->vif) {
+ /* check if this busy context is exclusive */
+ if (tmp->exclusive_interface_modes &
+ BIT(tmp->vif->type)) {
+ err = -EINVAL;
+ goto out;
+ }
+ continue;
+ }
+
+ if (!(possible_modes & BIT(viftype)))
+ continue;
+
+ /* have maybe usable context w/o interface */
+ ctx = tmp;
+ break;
+ }
+
+ if (!ctx) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ vif_priv->ctx = ctx;
+ ctx->vif = vif;
+
+ err = iwl_setup_interface(priv, ctx);
+ if (!err)
+ goto out;
+
+ ctx->vif = NULL;
+ priv->iw_mode = NL80211_IFTYPE_STATION;
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+ return err;
+}
+
+static void iwl_teardown_interface(struct iwl_priv *priv,
+ struct ieee80211_vif *vif,
+ bool mode_change)
+{
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+ lockdep_assert_held(&priv->shrd->mutex);
+
+ if (priv->scan_vif == vif) {
+ iwl_scan_cancel_timeout(priv, 200);
+ iwl_force_scan_end(priv);
+ }
+
+ if (!mode_change) {
+ iwl_set_mode(priv, ctx);
+ if (!ctx->always_active)
+ ctx->is_active = false;
+ }
+
+ /*
+ * When removing the IBSS interface, overwrite the
+ * BT traffic load with the stored one from the last
+ * notification, if any. If this is a device that
+ * doesn't implement this, this has no effect since
+ * both values are the same and zero.
+ */
+ if (vif->type == NL80211_IFTYPE_ADHOC)
+ priv->bt_traffic_load = priv->last_bt_traffic_load;
+}
+
+static void iwlagn_mac_remove_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (WARN_ON(ctx->vif != vif)) {
+ struct iwl_rxon_context *tmp;
+ IWL_ERR(priv, "ctx->vif = %p, vif = %p\n", ctx->vif, vif);
+ for_each_context(priv, tmp)
+ IWL_ERR(priv, "\tID = %d:\tctx = %p\tctx->vif = %p\n",
+ tmp->ctxid, tmp, tmp->vif);
+ }
+ ctx->vif = NULL;
+
+ iwl_teardown_interface(priv, vif, false);
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+}
+
+static int iwlagn_mac_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype newtype, bool newp2p)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
+ struct iwl_rxon_context *bss_ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ struct iwl_rxon_context *tmp;
+ enum nl80211_iftype newviftype = newtype;
+ u32 interface_modes;
+ int err;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ newtype = ieee80211_iftype_p2p(newtype, newp2p);
+
+ mutex_lock(&priv->shrd->mutex);
+
+ if (!ctx->vif || !iwl_is_ready_rf(priv->shrd)) {
+ /*
+ * Huh? But wait ... this can maybe happen when
+ * we're in the middle of a firmware restart!
+ */
+ err = -EBUSY;
+ goto out;
+ }
+
+ interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
+
+ if (!(interface_modes & BIT(newtype))) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * Refuse a change that should be done by moving from the PAN
+ * context to the BSS context instead, if the BSS context is
+ * available and can support the new interface type.
+ */
+ if (ctx->ctxid == IWL_RXON_CTX_PAN && !bss_ctx->vif &&
+ (bss_ctx->interface_modes & BIT(newtype) ||
+ bss_ctx->exclusive_interface_modes & BIT(newtype))) {
+ BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (ctx->exclusive_interface_modes & BIT(newtype)) {
+ for_each_context(priv, tmp) {
+ if (ctx == tmp)
+ continue;
+
+ if (!tmp->vif)
+ continue;
+
+ /*
+ * The current mode switch would be exclusive, but
+ * another context is active ... refuse the switch.
+ */
+ err = -EBUSY;
+ goto out;
+ }
+ }
+
+ /* success */
+ iwl_teardown_interface(priv, vif, true);
+ vif->type = newviftype;
+ vif->p2p = newp2p;
+ err = iwl_setup_interface(priv, ctx);
+ WARN_ON(err);
+ /*
+ * We've switched internally, but submitting to the
+ * device may have failed for some reason. Mask this
+ * error, because otherwise mac80211 will not switch
+ * (and set the interface type back) and we'll be
+ * out of sync with it.
+ */
+ err = 0;
+
+ out:
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return err;
+}
+
+static int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct cfg80211_scan_request *req)
+{
+ struct iwl_priv *priv = hw->priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ if (req->n_channels == 0)
+ return -EINVAL;
+
+ mutex_lock(&priv->shrd->mutex);
+
+ /*
+ * If an internal scan is in progress, just set
+ * up the scan_request as per above.
+ */
+ if (priv->scan_type != IWL_SCAN_NORMAL) {
+ IWL_DEBUG_SCAN(priv,
+ "SCAN request during internal scan - defer\n");
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ ret = 0;
+ } else {
+ priv->scan_request = req;
+ priv->scan_vif = vif;
+ /*
+ * mac80211 will only ask for one band at a time
+ * so using channels[0] here is ok
+ */
+ ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
+ req->channels[0]->band);
+ if (ret) {
+ priv->scan_request = NULL;
+ priv->scan_vif = NULL;
+ }
+ }
+
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ mutex_unlock(&priv->shrd->mutex);
+
+ return ret;
+}
+
+static int iwlagn_mac_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int ret;
+
+ IWL_DEBUG_MAC80211(priv, "enter: received request to remove "
+ "station %pM\n", sta->addr);
+ mutex_lock(&priv->shrd->mutex);
+ IWL_DEBUG_INFO(priv, "proceeding to remove station %pM\n",
+ sta->addr);
+ ret = iwl_remove_station(priv, sta_priv->sta_id, sta->addr);
+ if (ret)
+ IWL_DEBUG_QUIET_RFKILL(priv, "Error removing station %pM\n",
+ sta->addr);
+ mutex_unlock(&priv->shrd->mutex);
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+
+ return ret;
+}
+
+static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->shrd->sta_lock, flags);
+ priv->stations[sta_id].sta.station_flags &= ~STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.station_flags_msk = STA_FLG_PWR_SAVE_MSK;
+ priv->stations[sta_id].sta.sta.modify_mask = 0;
+ priv->stations[sta_id].sta.sleep_tx_count = 0;
+ priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
+ iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
+ spin_unlock_irqrestore(&priv->shrd->sta_lock, flags);
+
+}
+
+static void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum sta_notify_cmd cmd,
+ struct ieee80211_sta *sta)
+{
+ struct iwl_priv *priv = hw->priv;
+ struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
+ int sta_id;
+
+ IWL_DEBUG_MAC80211(priv, "enter\n");
+
+ switch (cmd) {
+ case STA_NOTIFY_SLEEP:
+ WARN_ON(!sta_priv->client);
+ sta_priv->asleep = true;
+ if (atomic_read(&sta_priv->pending_frames) > 0)
+ ieee80211_sta_block_awake(hw, sta, true);
+ break;
+ case STA_NOTIFY_AWAKE:
+ WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
+ sta_priv->asleep = false;
+ sta_id = iwl_sta_id(sta);
+ if (sta_id != IWL_INVALID_STATION)
+ iwl_sta_modify_ps_wake(priv, sta_id);
+ break;
+ default:
+ break;
+ }
+ IWL_DEBUG_MAC80211(priv, "leave\n");
+}
+
+struct ieee80211_ops iwlagn_hw_ops = {
+ .tx = iwlagn_mac_tx,
+ .start = iwlagn_mac_start,
+ .stop = iwlagn_mac_stop,
+#ifdef CONFIG_PM_SLEEP
+ .suspend = iwlagn_mac_suspend,
+ .resume = iwlagn_mac_resume,
+#endif
+ .add_interface = iwlagn_mac_add_interface,
+ .remove_interface = iwlagn_mac_remove_interface,
+ .change_interface = iwlagn_mac_change_interface,
+ .config = iwlagn_mac_config,
+ .configure_filter = iwlagn_configure_filter,
+ .set_key = iwlagn_mac_set_key,
+ .update_tkip_key = iwlagn_mac_update_tkip_key,
+ .set_rekey_data = iwlagn_mac_set_rekey_data,
+ .conf_tx = iwlagn_mac_conf_tx,
+ .bss_info_changed = iwlagn_bss_info_changed,
+ .ampdu_action = iwlagn_mac_ampdu_action,
+ .hw_scan = iwlagn_mac_hw_scan,
+ .sta_notify = iwlagn_mac_sta_notify,
+ .sta_add = iwlagn_mac_sta_add,
+ .sta_remove = iwlagn_mac_sta_remove,
+ .channel_switch = iwlagn_mac_channel_switch,
+ .flush = iwlagn_mac_flush,
+ .tx_last_beacon = iwlagn_mac_tx_last_beacon,
+ .remain_on_channel = iwlagn_mac_remain_on_channel,
+ .cancel_remain_on_channel = iwlagn_mac_cancel_remain_on_channel,
+ .rssi_callback = iwlagn_mac_rssi_callback,
+ CFG80211_TESTMODE_CMD(iwlagn_mac_testmode_cmd)
+ CFG80211_TESTMODE_DUMP(iwlagn_mac_testmode_dump)
+ .tx_sync = iwlagn_mac_tx_sync,
+ .finish_tx_sync = iwlagn_mac_finish_tx_sync,
+ .set_tim = iwlagn_mac_set_tim,
+};
+
+/* This function both allocates and initializes hw and priv. */
+struct ieee80211_hw *iwl_alloc_all(void)
+{
+ struct iwl_priv *priv;
+ /* mac80211 allocates memory for this device instance, including
+ * space for this driver's private structure */
+ struct ieee80211_hw *hw;
+
+ hw = ieee80211_alloc_hw(sizeof(struct iwl_priv), &iwlagn_hw_ops);
+ if (!hw)
+ goto out;
+
+ priv = hw->priv;
+ priv->hw = hw;
+
+out:
+ return hw;
+}
diff --git a/drivers/net/wireless/iwlwifi/iwl-pci.c b/drivers/net/wireless/iwlwifi/iwl-pci.c
index 1800029..fb30ea7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-pci.c
+++ b/drivers/net/wireless/iwlwifi/iwl-pci.c
@@ -135,7 +135,7 @@ static void iwl_pci_apm_config(struct iwl_bus *bus)
}
}
-static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
+static void iwl_pci_get_hw_id_string(struct iwl_bus *bus, char buf[],
int buf_len)
{
struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
@@ -144,6 +144,13 @@ static void iwl_pci_get_hw_id(struct iwl_bus *bus, char buf[],
pci_dev->subsystem_device);
}
+static u32 iwl_pci_get_hw_id(struct iwl_bus *bus)
+{
+ struct pci_dev *pci_dev = IWL_BUS_GET_PCI_DEV(bus);
+
+ return (pci_dev->device << 16) + pci_dev->subsystem_device;
+}
+
static void iwl_pci_write8(struct iwl_bus *bus, u32 ofs, u8 val)
{
iowrite8(val, IWL_BUS_GET_PCI_BUS(bus)->hw_base + ofs);
@@ -163,6 +170,7 @@ static u32 iwl_pci_read32(struct iwl_bus *bus, u32 ofs)
static const struct iwl_bus_ops bus_ops_pci = {
.get_pm_support = iwl_pci_is_pm_supported,
.apm_config = iwl_pci_apm_config,
+ .get_hw_id_string = iwl_pci_get_hw_id_string,
.get_hw_id = iwl_pci_get_hw_id,
.write8 = iwl_pci_write8,
.write32 = iwl_pci_write32,
@@ -256,6 +264,8 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0082, 0xC020, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0085, 0xC220, iwl6005_2agn_sff_cfg)},
{IWL_PCI_DEVICE(0x0082, 0x1341, iwl6005_2agn_d_cfg)},
+ {IWL_PCI_DEVICE(0x0082, 0x1304, iwl6005_2agn_cfg)},/* low 5GHz active */
+ {IWL_PCI_DEVICE(0x0082, 0x1305, iwl6005_2agn_cfg)},/* high 5GHz active */
/* 6x30 Series */
{IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
@@ -325,46 +335,28 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x0890, 0x4022, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0891, 0x4222, iwl2000_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4422, iwl2000_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4026, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0891, 0x4226, iwl2000_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0890, 0x4426, iwl2000_2bg_cfg)},
{IWL_PCI_DEVICE(0x0890, 0x4822, iwl2000_2bgn_d_cfg)},
/* 2x30 Series */
{IWL_PCI_DEVICE(0x0887, 0x4062, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0888, 0x4262, iwl2030_2bgn_cfg)},
{IWL_PCI_DEVICE(0x0887, 0x4462, iwl2030_2bgn_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4066, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0888, 0x4266, iwl2030_2bg_cfg)},
- {IWL_PCI_DEVICE(0x0887, 0x4466, iwl2030_2bg_cfg)},
/* 6x35 Series */
{IWL_PCI_DEVICE(0x088E, 0x4060, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088F, 0x4260, iwl6035_2agn_cfg)},
{IWL_PCI_DEVICE(0x088E, 0x4460, iwl6035_2agn_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4064, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4264, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4464, iwl6035_2abg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4066, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088F, 0x4266, iwl6035_2bg_cfg)},
- {IWL_PCI_DEVICE(0x088E, 0x4466, iwl6035_2bg_cfg)},
/* 105 Series */
{IWL_PCI_DEVICE(0x0894, 0x0022, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0895, 0x0222, iwl105_bgn_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0422, iwl105_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0026, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0895, 0x0226, iwl105_bg_cfg)},
- {IWL_PCI_DEVICE(0x0894, 0x0426, iwl105_bg_cfg)},
{IWL_PCI_DEVICE(0x0894, 0x0822, iwl105_bgn_d_cfg)},
/* 135 Series */
{IWL_PCI_DEVICE(0x0892, 0x0062, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0893, 0x0262, iwl135_bgn_cfg)},
{IWL_PCI_DEVICE(0x0892, 0x0462, iwl135_bgn_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0066, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0893, 0x0266, iwl135_bg_cfg)},
- {IWL_PCI_DEVICE(0x0892, 0x0466, iwl135_bg_cfg)},
{0}
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 4eaab20..2b188a6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -167,7 +167,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
u8 skip;
u32 slp_itrvl;
- if (priv->cfg->adv_pm) {
+ if (cfg(priv)->adv_pm) {
table = apm_range_2;
if (period <= IWL_DTIM_RANGE_1_MAX)
table = apm_range_1;
@@ -221,7 +221,7 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
if (iwl_advanced_bt_coexist(priv)) {
- if (!priv->cfg->bt_params->bt_sco_disable)
+ if (!cfg(priv)->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -307,7 +307,7 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
if (iwl_advanced_bt_coexist(priv)) {
- if (!priv->cfg->bt_params->bt_sco_disable)
+ if (!cfg(priv)->bt_params->bt_sco_disable)
cmd->flags |= IWL_POWER_BT_SCO_ENA;
else
cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
@@ -350,7 +350,7 @@ static void iwl_power_build_cmd(struct iwl_priv *priv,
if (priv->shrd->wowlan)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, dtimper);
- else if (!priv->cfg->base_params->no_idle_support &&
+ else if (!cfg(priv)->base_params->no_idle_support &&
priv->hw->conf.flags & IEEE80211_CONF_IDLE)
iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
else if (iwl_tt_is_low_power_state(priv)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index e5d727f..a645472 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -416,6 +416,8 @@ static u16 iwl_limit_dwell(struct iwl_priv *priv, u16 dwell_time)
if (!iwl_is_associated_ctx(ctx))
continue;
+ if (ctx->staging.dev_type == RXON_DEV_TYPE_P2P)
+ continue;
value = ctx->beacon_int;
if (!value)
value = IWL_PASSIVE_DWELL_BASE;
@@ -567,7 +569,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
u32 rate_flags = 0;
- u16 cmd_len;
+ u16 cmd_len = 0;
u16 rx_chain = 0;
enum ieee80211_band band;
u8 n_probes = 0;
@@ -678,7 +680,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
priv->contexts[IWL_RXON_CTX_BSS].active.flags &
RXON_FLG_CHANNEL_MODE_MSK)
>> RXON_FLG_CHANNEL_MODE_POS;
- if (chan_mod == CHANNEL_MODE_PURE_40) {
+ if ((priv->scan_request && priv->scan_request->no_cck) ||
+ chan_mod == CHANNEL_MODE_PURE_40) {
rate = IWL_RATE_6M_PLCP;
} else {
rate = IWL_RATE_1M_PLCP;
@@ -688,8 +691,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
* Internal scans are passive, so we can indiscriminately set
* the BT ignore flag on 2.4 GHz since it applies to TX only.
*/
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist)
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist)
scan->tx_cmd.tx_flags |= TX_CMD_FLG_IGNORE_BT;
break;
case IEEE80211_BAND_5GHZ:
@@ -730,12 +733,12 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
band = priv->scan_band;
- if (priv->cfg->scan_rx_antennas[band])
- rx_ant = priv->cfg->scan_rx_antennas[band];
+ if (cfg(priv)->scan_rx_antennas[band])
+ rx_ant = cfg(priv)->scan_rx_antennas[band];
if (band == IEEE80211_BAND_2GHZ &&
- priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist) {
/* transmit 2.4 GHz probes only on first antenna */
scan_tx_antennas = first_antenna(scan_tx_antennas);
}
@@ -759,8 +762,8 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
rx_ant = first_antenna(active_chains);
}
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist &&
+ if (cfg(priv)->bt_params &&
+ cfg(priv)->bt_params->advanced_bt_coexist &&
priv->bt_full_concurrent) {
/* operated as 1x1 in full concurrency mode */
rx_ant = first_antenna(rx_ant);
@@ -938,51 +941,6 @@ int __must_check iwl_scan_initiate(struct iwl_priv *priv,
return 0;
}
-int iwlagn_mac_hw_scan(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct cfg80211_scan_request *req)
-{
- struct iwl_priv *priv = hw->priv;
- int ret;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (req->n_channels == 0)
- return -EINVAL;
-
- mutex_lock(&priv->shrd->mutex);
-
- /*
- * If an internal scan is in progress, just set
- * up the scan_request as per above.
- */
- if (priv->scan_type != IWL_SCAN_NORMAL) {
- IWL_DEBUG_SCAN(priv,
- "SCAN request during internal scan - defer\n");
- priv->scan_request = req;
- priv->scan_vif = vif;
- ret = 0;
- } else {
- priv->scan_request = req;
- priv->scan_vif = vif;
- /*
- * mac80211 will only ask for one band at a time
- * so using channels[0] here is ok
- */
- ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL,
- req->channels[0]->band);
- if (ret) {
- priv->scan_request = NULL;
- priv->scan_vif = NULL;
- }
- }
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- mutex_unlock(&priv->shrd->mutex);
-
- return ret;
-}
/*
* internal short scan, this function should only been called while associated.
diff --git a/drivers/net/wireless/iwlwifi/iwl-shared.h b/drivers/net/wireless/iwlwifi/iwl-shared.h
index 14eaf37..dc55cc4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-shared.h
+++ b/drivers/net/wireless/iwlwifi/iwl-shared.h
@@ -94,9 +94,9 @@
* This implementation is iwl-pci.c
*/
-struct iwl_cfg;
struct iwl_bus;
struct iwl_priv;
+struct iwl_trans;
struct iwl_sensitivity_ranges;
struct iwl_trans_ops;
@@ -107,6 +107,10 @@ struct iwl_trans_ops;
extern struct iwl_mod_params iwlagn_mod_params;
+#define IWL_DISABLE_HT_ALL BIT(0)
+#define IWL_DISABLE_HT_TXAGG BIT(1)
+#define IWL_DISABLE_HT_RXAGG BIT(2)
+
/**
* struct iwl_mod_params
*
@@ -114,7 +118,8 @@ extern struct iwl_mod_params iwlagn_mod_params;
*
* @sw_crypto: using hardware encryption, default = 0
* @num_of_queues: number of tx queue, HW dependent
- * @disable_11n: 11n capabilities enabled, default = 0
+ * @disable_11n: disable 11n capabilities, default = 0,
+ * use IWL_DISABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 1
* @antenna: both antennas (use diversity), default = 0
* @restart_fw: restart firmware, default = 1
@@ -135,7 +140,7 @@ extern struct iwl_mod_params iwlagn_mod_params;
struct iwl_mod_params {
int sw_crypto;
int num_of_queues;
- int disable_11n;
+ unsigned int disable_11n;
int amsdu_size_8K;
int antenna;
int restart_fw;
@@ -174,8 +179,6 @@ struct iwl_mod_params {
* @ct_kill_exit_threshold: when to reeable the device - in hw dependent unit
* relevant for 1000, 6000 and up
* @wd_timeout: TX queues watchdog timeout
- * @calib_init_cfg: setup initial calibrations for the hw
- * @calib_rt_cfg: setup runtime calibrations for the hw
* @struct iwl_sensitivity_ranges: range of sensitivity values
*/
struct iwl_hw_params {
@@ -195,67 +198,148 @@ struct iwl_hw_params {
u32 ct_kill_exit_threshold;
unsigned int wd_timeout;
- u32 calib_init_cfg;
- u32 calib_rt_cfg;
const struct iwl_sensitivity_ranges *sens;
};
/**
- * enum iwl_agg_state
+ * enum iwl_ucode_type
*
- * The state machine of the BA agreement establishment / tear down.
- * These states relate to a specific RA / TID.
+ * The type of ucode currently loaded on the hardware.
*
- * @IWL_AGG_OFF: aggregation is not used
- * @IWL_AGG_ON: aggregation session is up
- * @IWL_EMPTYING_HW_QUEUE_ADDBA: establishing a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
- * @IWL_EMPTYING_HW_QUEUE_DELBA: tearing down a BA session - waiting for the
- * HW queue to be empty from packets for this RA /TID.
+ * @IWL_UCODE_NONE: No ucode loaded
+ * @IWL_UCODE_REGULAR: Normal runtime ucode
+ * @IWL_UCODE_INIT: Initial ucode
+ * @IWL_UCODE_WOWLAN: Wake on Wireless enabled ucode
*/
-enum iwl_agg_state {
- IWL_AGG_OFF = 0,
- IWL_AGG_ON,
- IWL_EMPTYING_HW_QUEUE_ADDBA,
- IWL_EMPTYING_HW_QUEUE_DELBA,
+enum iwl_ucode_type {
+ IWL_UCODE_NONE,
+ IWL_UCODE_REGULAR,
+ IWL_UCODE_INIT,
+ IWL_UCODE_WOWLAN,
};
/**
- * struct iwl_ht_agg - aggregation state machine
-
- * This structs holds the states for the BA agreement establishment and tear
- * down. It also holds the state during the BA session itself. This struct is
- * duplicated for each RA / TID.
-
- * @rate_n_flags: Rate at which Tx was attempted. Holds the data between the
- * Tx response (REPLY_TX), and the block ack notification
- * (REPLY_COMPRESSED_BA).
- * @state: state of the BA agreement establishment / tear down.
- * @txq_id: Tx queue used by the BA session - used by the transport layer.
- * Needed by the upper layer for debugfs only.
- * @wait_for_ba: Expect block-ack before next Tx reply
+ * struct iwl_notification_wait - notification wait entry
+ * @list: list head for global list
+ * @fn: function called with the notification
+ * @cmd: command ID
+ *
+ * This structure is not used directly, to wait for a
+ * notification declare it on the stack, and call
+ * iwlagn_init_notification_wait() with appropriate
+ * parameters. Then do whatever will cause the ucode
+ * to notify the driver, and to wait for that then
+ * call iwlagn_wait_notification().
+ *
+ * Each notification is one-shot. If at some point we
+ * need to support multi-shot notifications (which
+ * can't be allocated on the stack) we need to modify
+ * the code for them.
*/
-struct iwl_ht_agg {
- u32 rate_n_flags;
- enum iwl_agg_state state;
- u16 txq_id;
- bool wait_for_ba;
+struct iwl_notification_wait {
+ struct list_head list;
+
+ void (*fn)(struct iwl_trans *trans, struct iwl_rx_packet *pkt,
+ void *data);
+ void *fn_data;
+
+ u8 cmd;
+ bool triggered, aborted;
};
/**
- * struct iwl_tid_data - one for each RA / TID
+ * enum iwl_pa_type - Power Amplifier type
+ * @IWL_PA_SYSTEM: based on uCode configuration
+ * @IWL_PA_INTERNAL: use Internal only
+ */
+enum iwl_pa_type {
+ IWL_PA_SYSTEM = 0,
+ IWL_PA_INTERNAL = 1,
+};
- * This structs holds the states for each RA / TID.
+/*
+ * LED mode
+ * IWL_LED_DEFAULT: use device default
+ * IWL_LED_RF_STATE: turn LED on/off based on RF state
+ * LED ON = RF ON
+ * LED OFF = RF OFF
+ * IWL_LED_BLINK: adjust led blink rate based on blink table
+ */
+enum iwl_led_mode {
+ IWL_LED_DEFAULT,
+ IWL_LED_RF_STATE,
+ IWL_LED_BLINK,
+};
- * @seq_number: the next WiFi sequence number to use
- * @tfds_in_queue: number of packets sent to the HW queues.
- * Exported for debugfs only
- * @agg: aggregation state machine
+/**
+ * struct iwl_cfg
+ * @name: Offical name of the device
+ * @fw_name_pre: Firmware filename prefix. The api version and extension
+ * (.ucode) will be added to filename before loading from disk. The
+ * filename is constructed as fw_name_pre<api>.ucode.
+ * @ucode_api_max: Highest version of uCode API supported by driver.
+ * @ucode_api_ok: oldest version of the uCode API that is OK to load
+ * without a warning, for use in transitions
+ * @ucode_api_min: Lowest version of uCode API supported by driver.
+ * @valid_tx_ant: valid transmit antenna
+ * @valid_rx_ant: valid receive antenna
+ * @sku: sku information from EEPROM
+ * @eeprom_ver: EEPROM version
+ * @eeprom_calib_ver: EEPROM calibration version
+ * @lib: pointer to the lib ops
+ * @additional_nic_config: additional nic configuration
+ * @base_params: pointer to basic parameters
+ * @ht_params: point to ht patameters
+ * @bt_params: pointer to bt parameters
+ * @pa_type: used by 6000 series only to identify the type of Power Amplifier
+ * @need_temp_offset_calib: need to perform temperature offset calibration
+ * @no_xtal_calib: some devices do not need crystal calibration data,
+ * don't send it to those
+ * @scan_rx_antennas: available antenna for scan operation
+ * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
+ * @adv_pm: advance power management
+ * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
+ * @internal_wimax_coex: internal wifi/wimax combo device
+ * @iq_invert: I/Q inversion
+ * @temp_offset_v2: support v2 of temperature offset calibration
+ *
+ * We enable the driver to be backward compatible wrt API version. The
+ * driver specifies which APIs it supports (with @ucode_api_max being the
+ * highest and @ucode_api_min the lowest). Firmware will only be loaded if
+ * it has a supported API version.
+ *
+ * The ideal usage of this infrastructure is to treat a new ucode API
+ * release as a new hardware revision.
*/
-struct iwl_tid_data {
- u16 seq_number;
- u16 tfds_in_queue;
- struct iwl_ht_agg agg;
+struct iwl_cfg {
+ /* params specific to an individual device within a device family */
+ const char *name;
+ const char *fw_name_pre;
+ const unsigned int ucode_api_max;
+ const unsigned int ucode_api_ok;
+ const unsigned int ucode_api_min;
+ u8 valid_tx_ant;
+ u8 valid_rx_ant;
+ u16 sku;
+ u16 eeprom_ver;
+ u16 eeprom_calib_ver;
+ const struct iwl_lib_ops *lib;
+ void (*additional_nic_config)(struct iwl_priv *priv);
+ /* params not likely to change within a device family */
+ struct iwl_base_params *base_params;
+ /* params likely to change within a device family */
+ struct iwl_ht_params *ht_params;
+ struct iwl_bt_params *bt_params;
+ enum iwl_pa_type pa_type; /* if used set to IWL_PA_SYSTEM */
+ const bool need_temp_offset_calib; /* if used set to true */
+ const bool no_xtal_calib;
+ u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
+ enum iwl_led_mode led_mode;
+ const bool adv_pm;
+ const bool rx_with_siso_diversity;
+ const bool internal_wimax_coex;
+ const bool iq_invert;
+ const bool temp_offset_v2;
};
/**
@@ -266,15 +350,25 @@ struct iwl_tid_data {
* @ucode_owner: IWL_OWNERSHIP_*
* @cmd_queue: command queue number
* @status: STATUS_*
+ * @wowlan: are we running wowlan uCode
* @valid_contexts: microcode/device supports multiple contexts
* @bus: pointer to the bus layer data
+ * @cfg: see struct iwl_cfg
* @priv: pointer to the upper layer data
+ * @trans: pointer to the transport layer data
* @hw_params: see struct iwl_hw_params
* @workqueue: the workqueue used by all the layers of the driver
* @lock: protect general shared data
* @sta_lock: protects the station table.
* If lock and sta_lock are needed, lock must be acquired first.
* @mutex:
+ * @wait_command_queue: the wait_queue for SYNC host command nad uCode load
+ * @eeprom: pointer to the eeprom/OTP image
+ * @ucode_type: indicator of loaded ucode image
+ * @notif_waits: things waiting for notification
+ * @notif_wait_lock: lock protecting notification
+ * @notif_waitq: head of notification wait queue
+ * @device_pointers: pointers to ucode event tables
*/
struct iwl_shared {
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -290,6 +384,7 @@ struct iwl_shared {
u8 valid_contexts;
struct iwl_bus *bus;
+ struct iwl_cfg *cfg;
struct iwl_priv *priv;
struct iwl_trans *trans;
struct iwl_hw_params hw_params;
@@ -299,13 +394,29 @@ struct iwl_shared {
spinlock_t sta_lock;
struct mutex mutex;
- struct iwl_tid_data tid_data[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
-
wait_queue_head_t wait_command_queue;
+
+ /* eeprom -- this is in the card's little endian byte order */
+ u8 *eeprom;
+
+ /* ucode related variables */
+ enum iwl_ucode_type ucode_type;
+
+ /* notification wait support */
+ struct list_head notif_waits;
+ spinlock_t notif_wait_lock;
+ wait_queue_head_t notif_waitq;
+
+ struct {
+ u32 error_event_table;
+ u32 log_event_table;
+ } device_pointers;
+
};
/*Whatever _m is (iwl_trans, iwl_priv, iwl_bus, these macros will work */
#define priv(_m) ((_m)->shrd->priv)
+#define cfg(_m) ((_m)->shrd->cfg)
#define bus(_m) ((_m)->shrd->bus)
#define trans(_m) ((_m)->shrd->trans)
#define hw_params(_m) ((_m)->shrd->hw_params)
@@ -427,12 +538,6 @@ int __must_check iwl_rx_dispatch(struct iwl_priv *priv,
struct iwl_device_cmd *cmd);
int iwlagn_hw_valid_rtc_data_addr(u32 addr);
-void iwl_start_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid);
-void iwl_stop_tx_ba_trans_ready(struct iwl_priv *priv,
- enum iwl_rxon_context_id ctx,
- u8 sta_id, u8 tid);
void iwl_set_hw_rfkill_state(struct iwl_priv *priv, bool state);
void iwl_nic_config(struct iwl_priv *priv);
void iwl_free_skb(struct iwl_priv *priv, struct sk_buff *skb);
@@ -445,6 +550,24 @@ bool iwl_check_for_ct_kill(struct iwl_priv *priv);
void iwl_stop_sw_queue(struct iwl_priv *priv, u8 ac);
void iwl_wake_sw_queue(struct iwl_priv *priv, u8 ac);
+/* notification wait support */
+void iwl_abort_notification_waits(struct iwl_shared *shrd);
+void __acquires(wait_entry)
+iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data);
+int __must_check __releases(wait_entry)
+iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout);
+void __releases(wait_entry)
+iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry);
+
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_reset_traffic_log(struct iwl_priv *priv);
#endif /* CONFIG_IWLWIFI_DEBUGFS */
diff --git a/drivers/net/wireless/iwlwifi/iwl-sv-open.c b/drivers/net/wireless/iwlwifi/iwl-testmode.c
index 5e50d88..4a5cddd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sv-open.c
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.c
@@ -70,6 +70,7 @@
#include <net/mac80211.h>
#include <net/netlink.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-debug.h"
@@ -77,6 +78,7 @@
#include "iwl-agn.h"
#include "iwl-testmode.h"
#include "iwl-trans.h"
+#include "iwl-bus.h"
/* The TLVs used in the gnl message policy between the kernel module and
* user space application. iwl_testmode_gnl_msg_policy is to be carried
@@ -106,6 +108,13 @@ struct nla_policy iwl_testmode_gnl_msg_policy[IWL_TM_ATTR_MAX] = {
[IWL_TM_ATTR_FIXRATE] = { .type = NLA_U32, },
[IWL_TM_ATTR_UCODE_OWNER] = { .type = NLA_U8, },
+
+ [IWL_TM_ATTR_SRAM_ADDR] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_SIZE] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_SRAM_DUMP] = { .type = NLA_UNSPEC, },
+
+ [IWL_TM_ATTR_FW_VERSION] = { .type = NLA_U32, },
+ [IWL_TM_ATTR_DEVICE_ID] = { .type = NLA_U32, },
};
/*
@@ -177,6 +186,18 @@ void iwl_testmode_init(struct iwl_priv *priv)
{
priv->pre_rx_handler = iwl_testmode_ucode_rx_pkt;
priv->testmode_trace.trace_enabled = false;
+ priv->testmode_sram.sram_readed = false;
+}
+
+static void iwl_sram_cleanup(struct iwl_priv *priv)
+{
+ if (priv->testmode_sram.sram_readed) {
+ kfree(priv->testmode_sram.buff_addr);
+ priv->testmode_sram.buff_addr = NULL;
+ priv->testmode_sram.buff_size = 0;
+ priv->testmode_sram.num_chunks = 0;
+ priv->testmode_sram.sram_readed = false;
+ }
}
static void iwl_trace_cleanup(struct iwl_priv *priv)
@@ -201,6 +222,7 @@ static void iwl_trace_cleanup(struct iwl_priv *priv)
void iwl_testmode_cleanup(struct iwl_priv *priv)
{
iwl_trace_cleanup(priv);
+ iwl_sram_cleanup(priv);
}
/*
@@ -276,7 +298,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_INFO(priv, "testmode register access command offset 0x%x\n", ofs);
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
- case IWL_TM_CMD_APP2DEV_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
val32 = iwl_read32(bus(priv), ofs);
IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
@@ -291,7 +313,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
IWL_DEBUG_INFO(priv,
"Error sending msg : %d\n", status);
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
IWL_DEBUG_INFO(priv,
"Error finding value to write\n");
@@ -302,7 +324,7 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write32(bus(priv), ofs, val32);
}
break;
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
if (!tb[IWL_TM_ATTR_REG_VALUE8]) {
IWL_DEBUG_INFO(priv, "Error finding value to write\n");
return -ENOMSG;
@@ -312,6 +334,32 @@ static int iwl_testmode_reg(struct ieee80211_hw *hw, struct nlattr **tb)
iwl_write8(bus(priv), ofs, val8);
}
break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ val32 = iwl_read_prph(bus(priv), ofs);
+ IWL_INFO(priv, "32bit value to read 0x%x\n", val32);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_REG_VALUE32, val32);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ if (!tb[IWL_TM_ATTR_REG_VALUE32]) {
+ IWL_DEBUG_INFO(priv,
+ "Error finding value to write\n");
+ return -ENOMSG;
+ } else {
+ val32 = nla_get_u32(tb[IWL_TM_ATTR_REG_VALUE32]);
+ IWL_INFO(priv, "32bit value to write 0x%x\n", val32);
+ iwl_write_prph(bus(priv), ofs, val32);
+ }
+ break;
default:
IWL_DEBUG_INFO(priv, "Unknown testmode register command ID\n");
return -ENOSYS;
@@ -330,24 +378,24 @@ static int iwl_testmode_cfg_init_calib(struct iwl_priv *priv)
struct iwl_notification_wait calib_wait;
int ret;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(priv->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
- ret = iwlagn_init_alive_start(priv);
+ ret = iwl_init_alive_start(trans(priv));
if (ret) {
IWL_DEBUG_INFO(priv,
"Error configuring init calibration: %d\n", ret);
goto cfg_init_calib_error;
}
- ret = iwlagn_wait_notification(priv, &calib_wait, 2 * HZ);
+ ret = iwl_wait_notification(priv->shrd, &calib_wait, 2 * HZ);
if (ret)
IWL_DEBUG_INFO(priv, "Error detecting"
" CALIBRATION_COMPLETE_NOTIFICATION: %d\n", ret);
return ret;
cfg_init_calib_error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(priv->shrd, &calib_wait);
return ret;
}
@@ -370,14 +418,16 @@ cfg_init_calib_error:
static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
{
struct iwl_priv *priv = hw->priv;
+ struct iwl_trans *trans = trans(priv);
struct sk_buff *skb;
unsigned char *rsp_data_ptr = NULL;
int status = 0, rsp_data_len = 0;
+ u32 devid;
switch (nla_get_u32(tb[IWL_TM_ATTR_COMMAND])) {
case IWL_TM_CMD_APP2DEV_GET_DEVICENAME:
- rsp_data_ptr = (unsigned char *)priv->cfg->name;
- rsp_data_len = strlen(priv->cfg->name);
+ rsp_data_ptr = (unsigned char *)cfg(priv)->name;
+ rsp_data_len = strlen(cfg(priv)->name);
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
rsp_data_len + 20);
if (!skb) {
@@ -396,8 +446,7 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
break;
case IWL_TM_CMD_APP2DEV_LOAD_INIT_FW:
- status = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- IWL_UCODE_INIT);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
if (status)
IWL_DEBUG_INFO(priv,
"Error loading init ucode: %d\n", status);
@@ -405,13 +454,11 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
case IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB:
iwl_testmode_cfg_init_calib(priv);
- iwl_trans_stop_device(trans(priv));
+ iwl_trans_stop_device(trans);
break;
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
- status = iwlagn_load_ucode_wait_alive(priv,
- &priv->ucode_rt,
- IWL_UCODE_REGULAR);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_REGULAR);
if (status) {
IWL_DEBUG_INFO(priv,
"Error loading runtime ucode: %d\n", status);
@@ -423,10 +470,25 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
"Error starting the device: %d\n", status);
break;
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ iwl_scan_cancel_timeout(priv, 200);
+ iwl_trans_stop_device(trans);
+ status = iwl_load_ucode_wait_alive(trans, IWL_UCODE_WOWLAN);
+ if (status) {
+ IWL_DEBUG_INFO(priv,
+ "Error loading WOWLAN ucode: %d\n", status);
+ break;
+ }
+ status = iwl_alive_start(priv);
+ if (status)
+ IWL_DEBUG_INFO(priv,
+ "Error starting the device: %d\n", status);
+ break;
+
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
- if (priv->eeprom) {
+ if (priv->shrd->eeprom) {
skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy,
- priv->cfg->base_params->eeprom_size + 20);
+ cfg(priv)->base_params->eeprom_size + 20);
if (!skb) {
IWL_DEBUG_INFO(priv,
"Error allocating memory\n");
@@ -435,8 +497,8 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
NLA_PUT_U32(skb, IWL_TM_ATTR_COMMAND,
IWL_TM_CMD_DEV2APP_EEPROM_RSP);
NLA_PUT(skb, IWL_TM_ATTR_EEPROM,
- priv->cfg->base_params->eeprom_size,
- priv->eeprom);
+ cfg(priv)->base_params->eeprom_size,
+ priv->shrd->eeprom);
status = cfg80211_testmode_reply(skb);
if (status < 0)
IWL_DEBUG_INFO(priv,
@@ -455,6 +517,37 @@ static int iwl_testmode_driver(struct ieee80211_hw *hw, struct nlattr **tb)
priv->tm_fixed_rate = nla_get_u32(tb[IWL_TM_ATTR_FIXRATE]);
break;
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ IWL_INFO(priv, "uCode version raw: 0x%x\n", priv->ucode_ver);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_FW_VERSION, priv->ucode_ver);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
+ devid = bus_get_hw_id(bus(priv));
+ IWL_INFO(priv, "hw version: 0x%x\n", devid);
+
+ skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, 20);
+ if (!skb) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ NLA_PUT_U32(skb, IWL_TM_ATTR_DEVICE_ID, devid);
+ status = cfg80211_testmode_reply(skb);
+ if (status < 0)
+ IWL_DEBUG_INFO(priv,
+ "Error sending msg : %d\n", status);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode driver command ID\n");
return -ENOSYS;
@@ -535,7 +628,7 @@ static int iwl_testmode_trace(struct ieee80211_hw *hw, struct nlattr **tb)
}
priv->testmode_trace.num_chunks =
DIV_ROUND_UP(priv->testmode_trace.buff_size,
- TRACE_CHUNK_SIZE);
+ DUMP_CHUNK_SIZE);
break;
case IWL_TM_CMD_APP2DEV_END_TRACE:
@@ -567,15 +660,15 @@ static int iwl_testmode_trace_dump(struct ieee80211_hw *hw, struct nlattr **tb,
idx = cb->args[4];
if (idx >= priv->testmode_trace.num_chunks)
return -ENOENT;
- length = TRACE_CHUNK_SIZE;
+ length = DUMP_CHUNK_SIZE;
if (((idx + 1) == priv->testmode_trace.num_chunks) &&
- (priv->testmode_trace.buff_size % TRACE_CHUNK_SIZE))
+ (priv->testmode_trace.buff_size % DUMP_CHUNK_SIZE))
length = priv->testmode_trace.buff_size %
- TRACE_CHUNK_SIZE;
+ DUMP_CHUNK_SIZE;
NLA_PUT(skb, IWL_TM_ATTR_TRACE_DUMP, length,
priv->testmode_trace.trace_addr +
- (TRACE_CHUNK_SIZE * idx));
+ (DUMP_CHUNK_SIZE * idx));
idx++;
cb->args[4] = idx;
return 0;
@@ -621,6 +714,110 @@ static int iwl_testmode_ownership(struct ieee80211_hw *hw, struct nlattr **tb)
return 0;
}
+/*
+ * This function handles the user application commands for SRAM data dump
+ *
+ * It retrieves the mandatory fields IWL_TM_ATTR_SRAM_ADDR and
+ * IWL_TM_ATTR_SRAM_SIZE to decide the memory area for SRAM data reading
+ *
+ * Several error will be retured, -EBUSY if the SRAM data retrieved by
+ * previous command has not been delivered to userspace, or -ENOMSG if
+ * the mandatory fields (IWL_TM_ATTR_SRAM_ADDR,IWL_TM_ATTR_SRAM_SIZE)
+ * are missing, or -ENOMEM if the buffer allocation fails.
+ *
+ * Otherwise 0 is replied indicating the success of the SRAM reading.
+ *
+ * @hw: ieee80211_hw object that represents the device
+ * @tb: gnl message fields from the user space
+ */
+static int iwl_testmode_sram(struct ieee80211_hw *hw, struct nlattr **tb)
+{
+ struct iwl_priv *priv = hw->priv;
+ u32 base, ofs, size, maxsize;
+
+ if (priv->testmode_sram.sram_readed)
+ return -EBUSY;
+
+ if (!tb[IWL_TM_ATTR_SRAM_ADDR]) {
+ IWL_DEBUG_INFO(priv, "Error finding SRAM offset address\n");
+ return -ENOMSG;
+ }
+ ofs = nla_get_u32(tb[IWL_TM_ATTR_SRAM_ADDR]);
+ if (!tb[IWL_TM_ATTR_SRAM_SIZE]) {
+ IWL_DEBUG_INFO(priv, "Error finding size for SRAM reading\n");
+ return -ENOMSG;
+ }
+ size = nla_get_u32(tb[IWL_TM_ATTR_SRAM_SIZE]);
+ switch (priv->shrd->ucode_type) {
+ case IWL_UCODE_REGULAR:
+ maxsize = trans(priv)->ucode_rt.data.len;
+ break;
+ case IWL_UCODE_INIT:
+ maxsize = trans(priv)->ucode_init.data.len;
+ break;
+ case IWL_UCODE_WOWLAN:
+ maxsize = trans(priv)->ucode_wowlan.data.len;
+ break;
+ case IWL_UCODE_NONE:
+ IWL_DEBUG_INFO(priv, "Error, uCode does not been loaded\n");
+ return -ENOSYS;
+ default:
+ IWL_DEBUG_INFO(priv, "Error, unsupported uCode type\n");
+ return -ENOSYS;
+ }
+ if ((ofs + size) > maxsize) {
+ IWL_DEBUG_INFO(priv, "Invalid offset/size: out of range\n");
+ return -EINVAL;
+ }
+ priv->testmode_sram.buff_size = (size / 4) * 4;
+ priv->testmode_sram.buff_addr =
+ kmalloc(priv->testmode_sram.buff_size, GFP_KERNEL);
+ if (priv->testmode_sram.buff_addr == NULL) {
+ IWL_DEBUG_INFO(priv, "Error allocating memory\n");
+ return -ENOMEM;
+ }
+ base = 0x800000;
+ _iwl_read_targ_mem_words(bus(priv), base + ofs,
+ priv->testmode_sram.buff_addr,
+ priv->testmode_sram.buff_size / 4);
+ priv->testmode_sram.num_chunks =
+ DIV_ROUND_UP(priv->testmode_sram.buff_size, DUMP_CHUNK_SIZE);
+ priv->testmode_sram.sram_readed = true;
+ return 0;
+}
+
+static int iwl_testmode_sram_dump(struct ieee80211_hw *hw, struct nlattr **tb,
+ struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct iwl_priv *priv = hw->priv;
+ int idx, length;
+
+ if (priv->testmode_sram.sram_readed) {
+ idx = cb->args[4];
+ if (idx >= priv->testmode_sram.num_chunks) {
+ iwl_sram_cleanup(priv);
+ return -ENOENT;
+ }
+ length = DUMP_CHUNK_SIZE;
+ if (((idx + 1) == priv->testmode_sram.num_chunks) &&
+ (priv->testmode_sram.buff_size % DUMP_CHUNK_SIZE))
+ length = priv->testmode_sram.buff_size %
+ DUMP_CHUNK_SIZE;
+
+ NLA_PUT(skb, IWL_TM_ATTR_SRAM_DUMP, length,
+ priv->testmode_sram.buff_addr +
+ (DUMP_CHUNK_SIZE * idx));
+ idx++;
+ cb->args[4] = idx;
+ return 0;
+ } else
+ return -EFAULT;
+
+ nla_put_failure:
+ return -ENOBUFS;
+}
+
/* The testmode gnl message handler that takes the gnl message from the
* user space and parses it per the policy iwl_testmode_gnl_msg_policy, then
@@ -668,9 +865,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
IWL_DEBUG_INFO(priv, "testmode cmd to uCode\n");
result = iwl_testmode_ucode(hw, tb);
break;
- case IWL_TM_CMD_APP2DEV_REG_READ32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE32:
- case IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ case IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ case IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
IWL_DEBUG_INFO(priv, "testmode cmd to register\n");
result = iwl_testmode_reg(hw, tb);
break;
@@ -680,6 +879,9 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
case IWL_TM_CMD_APP2DEV_LOAD_RUNTIME_FW:
case IWL_TM_CMD_APP2DEV_GET_EEPROM:
case IWL_TM_CMD_APP2DEV_FIXRATE_REQ:
+ case IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW:
+ case IWL_TM_CMD_APP2DEV_GET_FW_VERSION:
+ case IWL_TM_CMD_APP2DEV_GET_DEVICE_ID:
IWL_DEBUG_INFO(priv, "testmode cmd to driver\n");
result = iwl_testmode_driver(hw, tb);
break;
@@ -696,6 +898,11 @@ int iwlagn_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len)
result = iwl_testmode_ownership(hw, tb);
break;
+ case IWL_TM_CMD_APP2DEV_READ_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram read cmd to driver\n");
+ result = iwl_testmode_sram(hw, tb);
+ break;
+
default:
IWL_DEBUG_INFO(priv, "Unknown testmode command\n");
result = -ENOSYS;
@@ -744,6 +951,10 @@ int iwlagn_mac_testmode_dump(struct ieee80211_hw *hw, struct sk_buff *skb,
IWL_DEBUG_INFO(priv, "uCode trace cmd to driver\n");
result = iwl_testmode_trace_dump(hw, tb, skb, cb);
break;
+ case IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ IWL_DEBUG_INFO(priv, "testmode sram dump cmd to driver\n");
+ result = iwl_testmode_sram_dump(hw, tb, skb, cb);
+ break;
default:
result = -EINVAL;
break;
diff --git a/drivers/net/wireless/iwlwifi/iwl-testmode.h b/drivers/net/wireless/iwlwifi/iwl-testmode.h
index b980bda..26138f1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-testmode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-testmode.h
@@ -76,9 +76,9 @@
* the actual uCode host command ID is carried with
* IWL_TM_ATTR_UCODE_CMD_ID
*
- * @IWL_TM_CMD_APP2DEV_REG_READ32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE32:
- * @IWL_TM_CMD_APP2DEV_REG_WRITE8:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32:
+ * @IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8:
* commands from user applicaiton to access register
*
* @IWL_TM_CMD_APP2DEV_GET_DEVICENAME: retrieve device name
@@ -103,16 +103,30 @@
* @IWL_TM_CMD_DEV2APP_EEPROM_RSP:
* commands from kernel space to carry the eeprom response
* to user application
+ *
* @IWL_TM_CMD_APP2DEV_OWNERSHIP:
* commands from user application to own change the ownership of the uCode
* if application has the ownership, the only host command from
* testmode will deliver to uCode. Default owner is driver
+ *
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32:
+ * @IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32:
+ * commands from user applicaiton to indirectly access peripheral register
+ *
+ * @IWL_TM_CMD_APP2DEV_READ_SRAM:
+ * @IWL_TM_CMD_APP2DEV_DUMP_SRAM:
+ * commands from user applicaiton to read data in sram
+ *
+ * @IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW: load Weak On Wireless LAN uCode image
+ * @IWL_TM_CMD_APP2DEV_GET_FW_VERSION: retrieve uCode version
+ * @IWL_TM_CMD_APP2DEV_GET_DEVICE_ID: retrieve ID information in device
+ *
*/
enum iwl_tm_cmd_t {
IWL_TM_CMD_APP2DEV_UCODE = 1,
- IWL_TM_CMD_APP2DEV_REG_READ32 = 2,
- IWL_TM_CMD_APP2DEV_REG_WRITE32 = 3,
- IWL_TM_CMD_APP2DEV_REG_WRITE8 = 4,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_READ32 = 2,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE32 = 3,
+ IWL_TM_CMD_APP2DEV_DIRECT_REG_WRITE8 = 4,
IWL_TM_CMD_APP2DEV_GET_DEVICENAME = 5,
IWL_TM_CMD_APP2DEV_LOAD_INIT_FW = 6,
IWL_TM_CMD_APP2DEV_CFG_INIT_CALIB = 7,
@@ -126,7 +140,14 @@ enum iwl_tm_cmd_t {
IWL_TM_CMD_DEV2APP_UCODE_RX_PKT = 15,
IWL_TM_CMD_DEV2APP_EEPROM_RSP = 16,
IWL_TM_CMD_APP2DEV_OWNERSHIP = 17,
- IWL_TM_CMD_MAX = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_READ32 = 18,
+ IWL_TM_CMD_APP2DEV_INDIRECT_REG_WRITE32 = 19,
+ IWL_TM_CMD_APP2DEV_READ_SRAM = 20,
+ IWL_TM_CMD_APP2DEV_DUMP_SRAM = 21,
+ IWL_TM_CMD_APP2DEV_LOAD_WOWLAN_FW = 22,
+ IWL_TM_CMD_APP2DEV_GET_FW_VERSION = 23,
+ IWL_TM_CMD_APP2DEV_GET_DEVICE_ID = 24,
+ IWL_TM_CMD_MAX = 25,
};
/*
@@ -196,6 +217,26 @@ enum iwl_tm_cmd_t {
* When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_OWNERSHIP,
* The mandatory fields are:
* IWL_TM_ATTR_UCODE_OWNER for the new owner
+ *
+ * @IWL_TM_ATTR_SRAM_ADDR:
+ * @IWL_TM_ATTR_SRAM_SIZE:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_READ_SRAM,
+ * The mandatory fields are:
+ * IWL_TM_ATTR_SRAM_ADDR for the address in sram
+ * IWL_TM_ATTR_SRAM_SIZE for the buffer size of data reading
+ *
+ * @IWL_TM_ATTR_SRAM_DUMP:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_DUMP_SRAM,
+ * IWL_TM_ATTR_SRAM_DUMP for the data in sram
+ *
+ * @IWL_TM_ATTR_FW_VERSION:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_FW_VERSION,
+ * IWL_TM_ATTR_FW_VERSION for the uCode version
+ *
+ * @IWL_TM_ATTR_DEVICE_ID:
+ * When IWL_TM_ATTR_COMMAND is IWL_TM_CMD_APP2DEV_GET_DEVICE_ID,
+ * IWL_TM_ATTR_DEVICE_ID for the device ID information
+ *
*/
enum iwl_tm_attr_t {
IWL_TM_ATTR_NOT_APPLICABLE = 0,
@@ -213,7 +254,12 @@ enum iwl_tm_attr_t {
IWL_TM_ATTR_TRACE_DUMP = 12,
IWL_TM_ATTR_FIXRATE = 13,
IWL_TM_ATTR_UCODE_OWNER = 14,
- IWL_TM_ATTR_MAX = 15,
+ IWL_TM_ATTR_SRAM_ADDR = 15,
+ IWL_TM_ATTR_SRAM_SIZE = 16,
+ IWL_TM_ATTR_SRAM_DUMP = 17,
+ IWL_TM_ATTR_FW_VERSION = 18,
+ IWL_TM_ATTR_DEVICE_ID = 19,
+ IWL_TM_ATTR_MAX = 20,
};
/* uCode trace buffer */
@@ -221,6 +267,8 @@ enum iwl_tm_attr_t {
#define TRACE_BUFF_SIZE_MIN 0x20000
#define TRACE_BUFF_SIZE_DEF TRACE_BUFF_SIZE_MIN
#define TRACE_BUFF_PADD 0x2000
-#define TRACE_CHUNK_SIZE (PAGE_SIZE - 1024)
+
+/* Maximum data size of each dump it packet */
+#define DUMP_CHUNK_SIZE (PAGE_SIZE - 1024)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 2b6756e..f6debf9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -219,9 +219,7 @@ struct iwl_trans_pcie {
/* INT ICT Table */
__le32 *ict_tbl;
- void *ict_tbl_vir;
dma_addr_t ict_tbl_dma;
- dma_addr_t aligned_ict_tbl_dma;
int ict_index;
u32 inta;
bool use_ict;
@@ -236,6 +234,7 @@ struct iwl_trans_pcie {
const u8 *ac_to_fifo[NUM_IWL_RXON_CTX];
const u8 *ac_to_queue[NUM_IWL_RXON_CTX];
u8 mcast_queue[NUM_IWL_RXON_CTX];
+ u8 agg_txq[IWLAGN_STATION_COUNT][IWL_MAX_TID_COUNT];
struct iwl_tx_queue *txq;
unsigned long txq_ctx_active_msk;
@@ -280,20 +279,16 @@ void iwl_tx_cmd_complete(struct iwl_trans *trans,
void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
u16 byte_cnt);
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id);
int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq,
int tx_fifo_id, int scd_retry);
-int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid, u16 *ssn);
+int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, int sta_id, int tid);
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
- int sta_id, int tid, int frame_limit);
+ int sta_id, int tid, int frame_limit, u16 ssn);
void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
int index, enum dma_data_direction dma_dir);
int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
@@ -354,8 +349,13 @@ static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
txq->swq_id = (hwq << 2) | ac;
}
+static inline u8 iwl_get_queue_ac(struct iwl_tx_queue *txq)
+{
+ return txq->swq_id & 0x3;
+}
+
static inline void iwl_wake_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -363,13 +363,22 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (test_and_clear_bit(hwq, trans_pcie->queue_stopped))
- if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0)
+ if (test_and_clear_bit(hwq, trans_pcie->queue_stopped)) {
+ if (atomic_dec_return(&trans_pcie->queue_stop_count[ac]) <= 0) {
iwl_wake_sw_queue(priv(trans), ac);
+ IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d ac %d. %s",
+ hwq, ac, msg);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "Don't wake hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ }
+ }
}
static inline void iwl_stop_queue(struct iwl_trans *trans,
- struct iwl_tx_queue *txq)
+ struct iwl_tx_queue *txq, const char *msg)
{
u8 queue = txq->swq_id;
u8 ac = queue & 3;
@@ -377,9 +386,23 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!test_and_set_bit(hwq, trans_pcie->queue_stopped))
- if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0)
+ if (!test_and_set_bit(hwq, trans_pcie->queue_stopped)) {
+ if (atomic_inc_return(&trans_pcie->queue_stop_count[ac]) > 0) {
iwl_stop_sw_queue(priv(trans), ac);
+ IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "Don't stop hwq %d ac %d"
+ " stop count %d. %s",
+ hwq, ac, atomic_read(&trans_pcie->
+ queue_stop_count[ac]), msg);
+ }
+ } else {
+ IWL_DEBUG_TX_QUEUES(trans, "stop hwq %d, but it is stopped/ %s",
+ hwq, msg);
+ }
}
#ifdef ieee80211_stop_queue
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
index 374c68c..65d1f05 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
@@ -594,8 +594,8 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- base = priv->device_pointers.error_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.error_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_errlog_ptr;
} else {
@@ -607,7 +607,7 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return;
}
@@ -648,6 +648,21 @@ static void iwl_dump_nic_error_log(struct iwl_trans *trans)
IWL_ERR(trans, "0x%08X | hw version\n", table.hw_ver);
IWL_ERR(trans, "0x%08X | board version\n", table.brd_ver);
IWL_ERR(trans, "0x%08X | hcmd\n", table.hcmd);
+
+ IWL_ERR(trans, "0x%08X | isr0\n", table.isr0);
+ IWL_ERR(trans, "0x%08X | isr1\n", table.isr1);
+ IWL_ERR(trans, "0x%08X | isr2\n", table.isr2);
+ IWL_ERR(trans, "0x%08X | isr3\n", table.isr3);
+ IWL_ERR(trans, "0x%08X | isr4\n", table.isr4);
+ IWL_ERR(trans, "0x%08X | isr_pref\n", table.isr_pref);
+ IWL_ERR(trans, "0x%08X | wait_event\n", table.wait_event);
+ IWL_ERR(trans, "0x%08X | l2p_control\n", table.l2p_control);
+ IWL_ERR(trans, "0x%08X | l2p_duration\n", table.l2p_duration);
+ IWL_ERR(trans, "0x%08X | l2p_mhvalid\n", table.l2p_mhvalid);
+ IWL_ERR(trans, "0x%08X | l2p_addr_match\n", table.l2p_addr_match);
+ IWL_ERR(trans, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
+ IWL_ERR(trans, "0x%08X | timestamp\n", table.u_timestamp);
+ IWL_ERR(trans, "0x%08X | flow_handler\n", table.flow_handler);
}
/**
@@ -657,7 +672,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans)
{
struct iwl_priv *priv = priv(trans);
/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
- if (priv->cfg->internal_wimax_coex &&
+ if (cfg(priv)->internal_wimax_coex &&
(!(iwl_read_prph(bus(trans), APMG_CLK_CTRL_REG) &
APMS_CLK_VAL_MRB_FUNC_MODE) ||
(iwl_read_prph(bus(trans), APMG_PS_CTRL_REG) &
@@ -709,8 +724,8 @@ static int iwl_print_event_log(struct iwl_trans *trans, u32 start_idx,
if (num_events == 0)
return pos;
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
if (!base)
base = priv->init_evtlog_ptr;
} else {
@@ -823,8 +838,8 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
size_t bufsz = 0;
struct iwl_priv *priv = priv(trans);
- base = priv->device_pointers.log_event_table;
- if (priv->ucode_type == IWL_UCODE_INIT) {
+ base = trans->shrd->device_pointers.log_event_table;
+ if (trans->shrd->ucode_type == IWL_UCODE_INIT) {
logsize = priv->init_evtlog_size;
if (!base)
base = priv->init_evtlog_ptr;
@@ -838,7 +853,7 @@ int iwl_dump_nic_event_log(struct iwl_trans *trans, bool full_log,
IWL_ERR(trans,
"Invalid event log pointer 0x%08X for %s uCode\n",
base,
- (priv->ucode_type == IWL_UCODE_INIT)
+ (trans->shrd->ucode_type == IWL_UCODE_INIT)
? "Init" : "RT");
return -EINVAL;
}
@@ -957,11 +972,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
}
#endif
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
-
/* saved interrupt in inta variable now we can reset trans_pcie->inta */
trans_pcie->inta = 0;
+ spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
IWL_ERR(trans, "Hardware error detected. Restarting.\n");
@@ -1108,7 +1123,7 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
isr_stats->tx++;
handled |= CSR_INT_BIT_FH_TX;
/* Wake up uCode load routine, now that load is complete */
- priv(trans)->ucode_write_complete = 1;
+ trans->ucode_write_complete = 1;
wake_up(&trans->shrd->wait_command_queue);
}
@@ -1136,7 +1151,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
* ICT functions
*
******************************************************************************/
-#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
/* Free dram table */
void iwl_free_isr_ict(struct iwl_trans *trans)
@@ -1144,21 +1163,19 @@ void iwl_free_isr_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (trans_pcie->ict_tbl_vir) {
- dma_free_coherent(bus(trans)->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- trans_pcie->ict_tbl_vir,
+ if (trans_pcie->ict_tbl) {
+ dma_free_coherent(bus(trans)->dev, ICT_SIZE,
+ trans_pcie->ict_tbl,
trans_pcie->ict_tbl_dma);
- trans_pcie->ict_tbl_vir = NULL;
- memset(&trans_pcie->ict_tbl_dma, 0,
- sizeof(trans_pcie->ict_tbl_dma));
- memset(&trans_pcie->aligned_ict_tbl_dma, 0,
- sizeof(trans_pcie->aligned_ict_tbl_dma));
+ trans_pcie->ict_tbl = NULL;
+ trans_pcie->ict_tbl_dma = 0;
}
}
-/* allocate dram shared table it is a PAGE_SIZE aligned
+/*
+ * allocate dram shared table, it is an aligned memory
+ * block of ICT_SIZE.
* also reset all data related to ICT table interrupt.
*/
int iwl_alloc_isr_ict(struct iwl_trans *trans)
@@ -1166,36 +1183,26 @@ int iwl_alloc_isr_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- /* allocate shrared data table */
- trans_pcie->ict_tbl_vir =
- dma_alloc_coherent(bus(trans)->dev,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
- &trans_pcie->ict_tbl_dma, GFP_KERNEL);
- if (!trans_pcie->ict_tbl_vir)
+ trans_pcie->ict_tbl =
+ dma_alloc_coherent(bus(trans)->dev, ICT_SIZE,
+ &trans_pcie->ict_tbl_dma,
+ GFP_KERNEL);
+ if (!trans_pcie->ict_tbl)
return -ENOMEM;
- /* align table to PAGE_SIZE boundary */
- trans_pcie->aligned_ict_tbl_dma =
- ALIGN(trans_pcie->ict_tbl_dma, PAGE_SIZE);
-
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx dma aligned %Lx diff %d\n",
- (unsigned long long)trans_pcie->ict_tbl_dma,
- (unsigned long long)trans_pcie->aligned_ict_tbl_dma,
- (int)(trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma));
+ /* just an API sanity check ... it is guaranteed to be aligned */
+ if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
+ iwl_free_isr_ict(trans);
+ return -EINVAL;
+ }
- trans_pcie->ict_tbl = trans_pcie->ict_tbl_vir +
- (trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma);
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma);
- IWL_DEBUG_ISR(trans, "ict vir addr %p vir aligned %p diff %d\n",
- trans_pcie->ict_tbl, trans_pcie->ict_tbl_vir,
- (int)(trans_pcie->aligned_ict_tbl_dma -
- trans_pcie->ict_tbl_dma));
+ IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
/* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl_vir, 0,
- (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
trans_pcie->ict_index = 0;
/* add periodic RX interrupt */
@@ -1213,23 +1220,20 @@ int iwl_reset_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
- if (!trans_pcie->ict_tbl_vir)
+ if (!trans_pcie->ict_tbl)
return 0;
spin_lock_irqsave(&trans->shrd->lock, flags);
iwl_disable_interrupts(trans);
- memset(&trans_pcie->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
+ memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- val = trans_pcie->aligned_ict_tbl_dma >> PAGE_SHIFT;
+ val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
val |= CSR_DRAM_INT_TBL_ENABLE;
val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
- IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%X "
- "aligned dma address %Lx\n",
- val,
- (unsigned long long)trans_pcie->aligned_ict_tbl_dma);
+ IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
iwl_write32(bus(trans), CSR_DRAM_INT_TBL_REG, val);
trans_pcie->use_ict = true;
@@ -1266,6 +1270,8 @@ static irqreturn_t iwl_isr(int irq, void *data)
if (!trans)
return IRQ_NONE;
+ trace_iwlwifi_dev_irq(priv(trans));
+
trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
spin_lock_irqsave(&trans->shrd->lock, flags);
@@ -1340,6 +1346,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
struct iwl_trans_pcie *trans_pcie;
u32 inta, inta_mask;
u32 val = 0;
+ u32 read;
unsigned long flags;
if (!trans)
@@ -1353,6 +1360,8 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (!trans_pcie->use_ict)
return iwl_isr(irq, data);
+ trace_iwlwifi_dev_irq(priv(trans));
+
spin_lock_irqsave(&trans->shrd->lock, flags);
/* Disable (but don't clear!) interrupts here to avoid
@@ -1367,24 +1376,29 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
/* Ignore interrupt if there's nothing in NIC to service.
* This may be due to IRQ shared with another device,
* or due to sporadic interrupts thrown from our NIC. */
- if (!trans_pcie->ict_tbl[trans_pcie->ict_index]) {
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index, read);
+ if (!read) {
IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
goto none;
}
- /* read all entries that not 0 start with ict_index */
- while (trans_pcie->ict_tbl[trans_pcie->ict_index]) {
-
- val |= le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index,
- le32_to_cpu(
- trans_pcie->ict_tbl[trans_pcie->ict_index]));
+ trans_pcie->ict_index, read);
trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
trans_pcie->ict_index =
iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
- }
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(priv(trans), trans_pcie->ict_index,
+ read);
+ } while (read);
/* We should not get this value, just ignore it. */
if (val == 0xffffffff)
@@ -1411,7 +1425,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (likely(inta))
tasklet_schedule(&trans_pcie->irq_tasklet);
else if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
- !trans_pcie->inta) {
+ !trans_pcie->inta) {
/* Allow interrupt if was disabled by this handler and
* no tasklet was schedules, We should not enable interrupt,
* tasklet will enable it.
@@ -1427,7 +1441,7 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
* only Re-enable if disabled by irq.
*/
if (test_bit(STATUS_INT_ENABLED, &trans->shrd->status) &&
- !trans_pcie->inta)
+ !trans_pcie->inta)
iwl_enable_interrupts(trans);
spin_unlock_irqrestore(&trans->shrd->lock, flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 4a0c953..bd29568 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -408,6 +408,7 @@ static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id)
void iwl_trans_set_wr_ptrs(struct iwl_trans *trans,
int txq_id, u32 index)
{
+ IWL_DEBUG_TX_QUEUES(trans, "Q %d WrPtr: %d", txq_id, index & 0xff);
iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR,
(index & 0xff) | (txq_id << 8));
iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index);
@@ -430,7 +431,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
txq->sched_retry = scd_retry;
- IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n",
+ IWL_DEBUG_TX_QUEUES(trans, "%s %s Queue %d on FIFO %d\n",
active ? "Activate" : "Deactivate",
scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
}
@@ -446,14 +447,21 @@ static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie,
return -EINVAL;
}
+static inline bool is_agg_txqid_valid(struct iwl_trans *trans, int txq_id)
+{
+ if (txq_id < IWLAGN_FIRST_AMPDU_QUEUE)
+ return false;
+ return txq_id < (IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues);
+}
+
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id,
- int tid, int frame_limit)
+ int tid, int frame_limit, u16 ssn)
{
- int tx_fifo, txq_id, ssn_idx;
+ int tx_fifo, txq_id;
u16 ra_tid;
unsigned long flags;
- struct iwl_tid_data *tid_data;
struct iwl_trans_pcie *trans_pcie =
IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -469,11 +477,15 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
return;
}
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- ssn_idx = SEQ_TO_SN(tid_data->seq_number);
- txq_id = tid_data->agg.txq_id;
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
+ IWL_ERR(trans,
+ "queue number out of range: %d, must be %d to %d\n",
+ txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
+ IWLAGN_FIRST_AMPDU_QUEUE +
+ hw_params(trans).num_ampdu_queues - 1);
+ return;
+ }
ra_tid = BUILD_RAxTID(sta_id, tid);
@@ -493,9 +505,9 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans,
/* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */
- trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
- trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
- iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx);
+ trans_pcie->txq[txq_id].q.read_ptr = (ssn & 0xff);
+ trans_pcie->txq[txq_id].q.write_ptr = (ssn & 0xff);
+ iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
/* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr +
@@ -539,12 +551,9 @@ static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans)
}
int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid, u16 *ssn)
+ int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_tid_data *tid_data;
- unsigned long flags;
int txq_id;
txq_id = iwlagn_txq_ctx_activate_free(trans);
@@ -553,117 +562,39 @@ int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans,
return -ENXIO;
}
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- *ssn = SEQ_TO_SN(tid_data->seq_number);
- tid_data->agg.txq_id = txq_id;
+ trans_pcie->agg_txq[sta_id][tid] = txq_id;
iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id);
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(trans, "HW queue is empty\n");
- tid_data->agg.state = IWL_AGG_ON;
- iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
- } else {
- IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW"
- "queue\n", tid_data->tfds_in_queue);
- tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
- }
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
-
return 0;
}
-void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwlagn_tx_queue_stop_scheduler(trans, txq_id);
-
- iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
-
- trans_pcie->txq[txq_id].q.read_ptr = 0;
- trans_pcie->txq[txq_id].q.write_ptr = 0;
- /* supposes that ssn_idx is valid (!= 0xFFF) */
- iwl_trans_set_wr_ptrs(trans, txq_id, 0);
-
- iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
- iwl_txq_ctx_deactivate(trans_pcie, txq_id);
- iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
-}
-
-int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid)
+int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, int sta_id, int tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- int read_ptr, write_ptr;
- struct iwl_tid_data *tid_data;
- int txq_id;
-
- spin_lock_irqsave(&trans->shrd->sta_lock, flags);
+ u8 txq_id = trans_pcie->agg_txq[sta_id][tid];
- tid_data = &trans->shrd->tid_data[sta_id][tid];
- txq_id = tid_data->agg.txq_id;
-
- if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
- (IWLAGN_FIRST_AMPDU_QUEUE +
- hw_params(trans).num_ampdu_queues <= txq_id)) {
+ if (WARN_ON_ONCE(is_agg_txqid_valid(trans, txq_id) == false)) {
IWL_ERR(trans,
"queue number out of range: %d, must be %d to %d\n",
txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
IWLAGN_FIRST_AMPDU_QUEUE +
hw_params(trans).num_ampdu_queues - 1);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
return -EINVAL;
}
- switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /*
- * This can happen if the peer stops aggregation
- * again before we've had a chance to drain the
- * queue we selected previously, i.e. before the
- * session was really started completely.
- */
- IWL_DEBUG_HT(trans, "AGG stop before setup done\n");
- goto turn_off;
- case IWL_AGG_ON:
- break;
- default:
- IWL_WARN(trans, "Stopping AGG while state not ON "
- "or starting for %d on %d (%d)\n", sta_id, tid,
- trans->shrd->tid_data[sta_id][tid].agg.state);
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- write_ptr = trans_pcie->txq[txq_id].q.write_ptr;
- read_ptr = trans_pcie->txq[txq_id].q.read_ptr;
-
- /* The queue is not empty */
- if (write_ptr != read_ptr) {
- IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n");
- trans->shrd->tid_data[sta_id][tid].agg.state =
- IWL_EMPTYING_HW_QUEUE_DELBA;
- spin_unlock_irqrestore(&trans->shrd->sta_lock, flags);
- return 0;
- }
-
- IWL_DEBUG_HT(trans, "HW queue is empty\n");
-turn_off:
- trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF;
-
- /* do not restore/save irqs */
- spin_unlock(&trans->shrd->sta_lock);
- spin_lock(&trans->shrd->lock);
-
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
+ iwlagn_tx_queue_stop_scheduler(trans, txq_id);
- spin_unlock_irqrestore(&trans->shrd->lock, flags);
+ iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id));
- iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid);
+ trans_pcie->agg_txq[sta_id][tid] = 0;
+ trans_pcie->txq[txq_id].q.read_ptr = 0;
+ trans_pcie->txq[txq_id].q.write_ptr = 0;
+ /* supposes that ssn_idx is valid (!= 0xFFF) */
+ iwl_trans_set_wr_ptrs(trans, txq_id, 0);
+ iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id));
+ iwl_txq_ctx_deactivate(trans_pcie, txq_id);
+ iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0);
return 0;
}
@@ -982,7 +913,8 @@ static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
ret = iwl_enqueue_hcmd(trans, cmd);
if (ret < 0) {
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ IWL_DEBUG_QUIET_RFKILL(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@@ -1000,6 +932,20 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
+ if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status))
+ return -EBUSY;
+
+
+ if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
+ get_cmd_string(cmd->id));
+ return -ECANCELED;
+ }
+ if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
+ IWL_ERR(trans, "Command %s failed: FW Error\n",
+ get_cmd_string(cmd->id));
+ return -EIO;
+ }
set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
get_cmd_string(cmd->id));
@@ -1008,7 +954,8 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
if (cmd_idx < 0) {
ret = cmd_idx;
clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status);
- IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
+ IWL_DEBUG_QUIET_RFKILL(trans,
+ "Error sending %s: enqueue_hcmd failed: %d\n",
get_cmd_string(cmd->id), ret);
return ret;
}
@@ -1022,12 +969,12 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
&trans_pcie->txq[trans->shrd->cmd_queue];
struct iwl_queue *q = &txq->q;
- IWL_ERR(trans,
+ IWL_DEBUG_QUIET_RFKILL(trans,
"Error sending %s: time out after %dms.\n",
get_cmd_string(cmd->id),
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
- IWL_ERR(trans,
+ IWL_DEBUG_QUIET_RFKILL(trans,
"Current CMD queue read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
@@ -1039,18 +986,6 @@ static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
}
}
- if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n",
- get_cmd_string(cmd->id));
- ret = -ECANCELED;
- goto fail;
- }
- if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) {
- IWL_ERR(trans, "Command %s failed: FW Error\n",
- get_cmd_string(cmd->id));
- ret = -EIO;
- goto fail;
- }
if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
IWL_ERR(trans, "Error: Response NULL in '%s'\n",
get_cmd_string(cmd->id));
@@ -1071,7 +1006,7 @@ cancel:
trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &=
~CMD_WANT_SKB;
}
-fail:
+
if (cmd->reply_page) {
iwl_free_pages(trans->shrd, cmd->reply_page);
cmd->reply_page = 0;
@@ -1115,9 +1050,6 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
return 0;
}
- IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id,
- q->read_ptr, index);
-
if (WARN_ON(!skb_queue_empty(skbs)))
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 5f17ab8..324d06d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -88,18 +88,16 @@ static int iwl_trans_rx_alloc(struct iwl_trans *trans)
return -EINVAL;
/* Allocate the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = dma_alloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
- &rxq->bd_dma, GFP_KERNEL);
+ rxq->bd = dma_zalloc_coherent(dev, sizeof(__le32) * RX_QUEUE_SIZE,
+ &rxq->bd_dma, GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- memset(rxq->bd, 0, sizeof(__le32) * RX_QUEUE_SIZE);
/*Allocate the driver's pointer to receive buffer status */
- rxq->rb_stts = dma_alloc_coherent(dev, sizeof(*rxq->rb_stts),
- &rxq->rb_stts_dma, GFP_KERNEL);
+ rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb_stts;
- memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
return 0;
@@ -1044,7 +1042,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id)
+ u8 sta_id, u8 tid)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
@@ -1058,13 +1056,12 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
dma_addr_t txcmd_phys;
dma_addr_t scratch_phys;
u16 len, firstlen, secondlen;
- u16 seq_number = 0;
u8 wait_write_ptr = 0;
u8 txq_id;
- u8 tid = 0;
bool is_agg = false;
__le16 fc = hdr->frame_control;
u8 hdr_len = ieee80211_hdrlen(fc);
+ u16 __maybe_unused wifi_seq;
/*
* Send this frame after DTIM -- there's a special queue
@@ -1085,36 +1082,28 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq_id =
trans_pcie->ac_to_queue[ctx][skb_get_queue_mapping(skb)];
- if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
- u8 *qc = NULL;
- struct iwl_tid_data *tid_data;
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
- tid_data = &trans->shrd->tid_data[sta_id][tid];
-
- if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
- return -1;
-
- seq_number = tid_data->seq_number;
- seq_number &= IEEE80211_SCTL_SEQ;
- hdr->seq_ctrl = hdr->seq_ctrl &
- cpu_to_le16(IEEE80211_SCTL_FRAG);
- hdr->seq_ctrl |= cpu_to_le16(seq_number);
- seq_number += 0x10;
- /* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- WARN_ON_ONCE(tid_data->agg.state != IWL_AGG_ON);
- txq_id = tid_data->agg.txq_id;
- is_agg = true;
- }
+ /* aggregation is on for this <sta,tid> */
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ WARN_ON(tid >= IWL_MAX_TID_COUNT);
+ txq_id = trans_pcie->agg_txq[sta_id][tid];
+ is_agg = true;
}
- /* Copy MAC header from skb into command buffer */
- memcpy(tx_cmd->hdr, hdr, hdr_len);
-
txq = &trans_pcie->txq[txq_id];
q = &txq->q;
+ /* In AGG mode, the index in the ring must correspond to the WiFi
+ * sequence number. This is a HW requirements to help the SCD to parse
+ * the BA.
+ * Check here that the packets are in the right place on the ring.
+ */
+#ifdef CONFIG_IWLWIFI_DEBUG
+ wifi_seq = SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
+ WARN_ONCE(is_agg && ((wifi_seq & 0xff) != q->write_ptr),
+ "Q: %d WiFi Seq %d tfdNum %d",
+ txq_id, wifi_seq, q->write_ptr);
+#endif
+
/* Set up driver data for this TFD */
txq->skbs[q->write_ptr] = skb;
txq->cmd[q->write_ptr] = dev_cmd;
@@ -1212,13 +1201,6 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_txq_update_write_ptr(trans, txq);
- if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue++;
- if (!ieee80211_has_morefrags(fc))
- trans->shrd->tid_data[sta_id][tid].seq_number =
- seq_number;
- }
-
/*
* At this point the frame is "transmitted" successfully
* and we will get a TX status notification eventually,
@@ -1230,7 +1212,7 @@ static int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
txq->need_update = 1;
iwl_txq_update_write_ptr(trans, txq);
} else {
- iwl_stop_queue(trans, txq);
+ iwl_stop_queue(trans, txq, "Queue is full");
}
}
return 0;
@@ -1267,101 +1249,49 @@ static int iwl_trans_pcie_request_irq(struct iwl_trans *trans)
return 0;
}
-static int iwlagn_txq_check_empty(struct iwl_trans *trans,
- int sta_id, u8 tid, int txq_id)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- struct iwl_queue *q = &trans_pcie->txq[txq_id].q;
- struct iwl_tid_data *tid_data = &trans->shrd->tid_data[sta_id][tid];
-
- lockdep_assert_held(&trans->shrd->sta_lock);
-
- switch (trans->shrd->tid_data[sta_id][tid].agg.state) {
- case IWL_EMPTYING_HW_QUEUE_DELBA:
- /* We are reclaiming the last packet of the */
- /* aggregated HW queue */
- if ((txq_id == tid_data->agg.txq_id) &&
- (q->read_ptr == q->write_ptr)) {
- IWL_DEBUG_HT(trans,
- "HW queue empty: continue DELBA flow\n");
- iwl_trans_pcie_txq_agg_disable(trans, txq_id);
- tid_data->agg.state = IWL_AGG_OFF;
- iwl_stop_tx_ba_trans_ready(priv(trans),
- NUM_IWL_RXON_CTX,
- sta_id, tid);
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
- }
- break;
- case IWL_EMPTYING_HW_QUEUE_ADDBA:
- /* We are reclaiming the last packet of the queue */
- if (tid_data->tfds_in_queue == 0) {
- IWL_DEBUG_HT(trans,
- "HW queue empty: continue ADDBA flow\n");
- tid_data->agg.state = IWL_AGG_ON;
- iwl_start_tx_ba_trans_ready(priv(trans),
- NUM_IWL_RXON_CTX,
- sta_id, tid);
- }
- break;
- default:
- break;
- }
-
- return 0;
-}
-
-static void iwl_free_tfds_in_queue(struct iwl_trans *trans,
- int sta_id, int tid, int freed)
-{
- lockdep_assert_held(&trans->shrd->sta_lock);
-
- if (trans->shrd->tid_data[sta_id][tid].tfds_in_queue >= freed)
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue -= freed;
- else {
- IWL_DEBUG_TX(trans, "free more than tfds_in_queue (%u:%d)\n",
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue,
- freed);
- trans->shrd->tid_data[sta_id][tid].tfds_in_queue = 0;
- }
-}
-
-static void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
+static int iwl_trans_pcie_reclaim(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id];
- enum iwl_agg_state agg_state;
/* n_bd is usually 256 => n_bd - 1 = 0xff */
int tfd_num = ssn & (txq->q.n_bd - 1);
int freed = 0;
- bool cond;
txq->time_stamp = jiffies;
- if (txq->sched_retry) {
- agg_state =
- trans->shrd->tid_data[txq->sta_id][txq->tid].agg.state;
- cond = (agg_state != IWL_EMPTYING_HW_QUEUE_DELBA);
- } else {
- cond = (status != TX_STATUS_FAIL_PASSIVE_NO_RX);
+ if (unlikely(txq_id >= IWLAGN_FIRST_AMPDU_QUEUE &&
+ tid != IWL_TID_NON_QOS &&
+ txq_id != trans_pcie->agg_txq[sta_id][tid])) {
+ /*
+ * FIXME: this is a uCode bug which need to be addressed,
+ * log the information and return for now.
+ * Since it is can possibly happen very often and in order
+ * not to fill the syslog, don't use IWL_ERR or IWL_WARN
+ */
+ IWL_DEBUG_TX_QUEUES(trans, "Bad queue mapping txq_id %d, "
+ "agg_txq[sta_id[tid] %d", txq_id,
+ trans_pcie->agg_txq[sta_id][tid]);
+ return 1;
}
if (txq->q.read_ptr != tfd_num) {
- IWL_DEBUG_TX_REPLY(trans, "Retry scheduler reclaim "
- "scd_ssn=%d idx=%d txq=%d swq=%d\n",
- ssn , tfd_num, txq_id, txq->swq_id);
+ IWL_DEBUG_TX_REPLY(trans, "[Q %d | AC %d] %d -> %d (%d)\n",
+ txq_id, iwl_get_queue_ac(txq), txq->q.read_ptr,
+ tfd_num, ssn);
freed = iwl_tx_queue_reclaim(trans, txq_id, tfd_num, skbs);
- if (iwl_queue_space(&txq->q) > txq->q.low_mark && cond)
- iwl_wake_queue(trans, txq);
+ if (iwl_queue_space(&txq->q) > txq->q.low_mark &&
+ (!txq->sched_retry ||
+ status != TX_STATUS_FAIL_PASSIVE_NO_RX))
+ iwl_wake_queue(trans, txq, "Packets reclaimed");
}
-
- iwl_free_tfds_in_queue(trans, sta_id, tid, freed);
- iwlagn_txq_check_empty(trans, sta_id, tid, txq_id);
+ return 0;
}
static void iwl_trans_pcie_free(struct iwl_trans *trans)
{
+ iwl_calib_free_results(trans);
iwl_trans_pcie_tx_free(trans);
iwl_trans_pcie_rx_free(trans);
free_irq(bus(trans)->irq, trans);
@@ -1417,7 +1347,8 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
#endif /* CONFIG_PM_SLEEP */
static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx)
+ enum iwl_rxon_context_id ctx,
+ const char *msg)
{
u8 ac, txq_id;
struct iwl_trans_pcie *trans_pcie =
@@ -1425,11 +1356,11 @@ static void iwl_trans_pcie_wake_any_queue(struct iwl_trans *trans,
for (ac = 0; ac < AC_NUM; ac++) {
txq_id = trans_pcie->ac_to_queue[ctx][ac];
- IWL_DEBUG_INFO(trans, "Queue Status: Q[%d] %s\n",
+ IWL_DEBUG_TX_QUEUES(trans, "Queue Status: Q[%d] %s\n",
ac,
(atomic_read(&trans_pcie->queue_stop_count[ac]) > 0)
? "stopped" : "awake");
- iwl_wake_queue(trans, &trans_pcie->txq[txq_id]);
+ iwl_wake_queue(trans, &trans_pcie->txq[txq_id], msg);
}
}
@@ -1452,11 +1383,12 @@ static struct iwl_trans *iwl_trans_pcie_alloc(struct iwl_shared *shrd)
return iwl_trans;
}
-static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id)
+static void iwl_trans_pcie_stop_queue(struct iwl_trans *trans, int txq_id,
+ const char *msg)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- iwl_stop_queue(trans, &trans_pcie->txq[txq_id]);
+ iwl_stop_queue(trans, &trans_pcie->txq[txq_id], msg);
}
#define IWL_FLUSH_WAIT_MS 2000
@@ -1511,8 +1443,12 @@ static int iwl_trans_pcie_check_stuck_queue(struct iwl_trans *trans, int cnt)
if (time_after(jiffies, timeout)) {
IWL_ERR(trans, "Queue %d stuck for %u ms.\n", q->id,
hw_params(trans).wd_timeout);
- IWL_ERR(trans, "Current read_ptr %d write_ptr %d\n",
+ IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
q->read_ptr, q->write_ptr);
+ IWL_ERR(trans, "Current HW read_ptr %d write_ptr %d\n",
+ iwl_read_prph(bus(trans), SCD_QUEUE_RDPTR(cnt))
+ & (TFD_QUEUE_SIZE_MAX - 1),
+ iwl_read_prph(bus(trans), SCD_QUEUE_WRPTR(cnt)));
return 1;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index c592312..e6bf3f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -171,32 +171,31 @@ struct iwl_trans_ops {
void (*tx_start)(struct iwl_trans *trans);
void (*wake_any_queue)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx);
+ enum iwl_rxon_context_id ctx,
+ const char *msg);
int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id);
- void (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
+ u8 sta_id, u8 tid);
+ int (*reclaim)(struct iwl_trans *trans, int sta_id, int tid,
int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs);
int (*tx_agg_disable)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id,
- int tid);
+ int sta_id, int tid);
int (*tx_agg_alloc)(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx, int sta_id, int tid,
- u16 *ssn);
+ int sta_id, int tid);
void (*tx_agg_setup)(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx, int sta_id, int tid,
- int frame_limit);
+ int frame_limit, u16 ssn);
void (*kick_nic)(struct iwl_trans *trans);
void (*free)(struct iwl_trans *trans);
- void (*stop_queue)(struct iwl_trans *trans, int q);
+ void (*stop_queue)(struct iwl_trans *trans, int q, const char *msg);
int (*dbgfs_register)(struct iwl_trans *trans, struct dentry* dir);
int (*check_stuck_queue)(struct iwl_trans *trans, int q);
@@ -207,17 +206,54 @@ struct iwl_trans_ops {
#endif
};
+/* one for each uCode image (inst/data, boot/init/runtime) */
+struct fw_desc {
+ dma_addr_t p_addr; /* hardware address */
+ void *v_addr; /* software address */
+ u32 len; /* size in bytes */
+};
+
+struct fw_img {
+ struct fw_desc code; /* firmware code image */
+ struct fw_desc data; /* firmware data image */
+};
+
+/* Opaque calibration results */
+struct iwl_calib_result {
+ struct list_head list;
+ size_t cmd_len;
+ struct iwl_calib_hdr hdr;
+ /* data follows */
+};
+
/**
* struct iwl_trans - transport common data
* @ops - pointer to iwl_trans_ops
* @shrd - pointer to iwl_shared which holds shared data from the upper layer
* @hcmd_lock: protects HCMD
+ * @ucode_write_complete: indicates that the ucode has been copied.
+ * @ucode_rt: run time ucode image
+ * @ucode_init: init ucode image
+ * @ucode_wowlan: wake on wireless ucode image (optional)
+ * @nvm_device_type: indicates OTP or eeprom
+ * @calib_results: list head for init calibration results
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
struct iwl_shared *shrd;
spinlock_t hcmd_lock;
+ u8 ucode_write_complete; /* the image write is complete */
+ struct fw_img ucode_rt;
+ struct fw_img ucode_init;
+ struct fw_img ucode_wowlan;
+
+ /* eeprom related variables */
+ int nvm_device_type;
+
+ /* init calibration results */
+ struct list_head calib_results;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __attribute__((__aligned__(sizeof(void *))));
@@ -249,9 +285,10 @@ static inline void iwl_trans_tx_start(struct iwl_trans *trans)
}
static inline void iwl_trans_wake_any_queue(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx)
+ enum iwl_rxon_context_id ctx,
+ const char *msg)
{
- trans->ops->wake_any_queue(trans, ctx);
+ trans->ops->wake_any_queue(trans, ctx, msg);
}
@@ -266,39 +303,38 @@ int iwl_trans_send_cmd_pdu(struct iwl_trans *trans, u8 id,
static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, enum iwl_rxon_context_id ctx,
- u8 sta_id)
+ u8 sta_id, u8 tid)
{
- return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id);
+ return trans->ops->tx(trans, skb, dev_cmd, ctx, sta_id, tid);
}
-static inline void iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
+static inline int iwl_trans_reclaim(struct iwl_trans *trans, int sta_id,
int tid, int txq_id, int ssn, u32 status,
struct sk_buff_head *skbs)
{
- trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn, status, skbs);
+ return trans->ops->reclaim(trans, sta_id, tid, txq_id, ssn,
+ status, skbs);
}
static inline int iwl_trans_tx_agg_disable(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
int sta_id, int tid)
{
- return trans->ops->tx_agg_disable(trans, ctx, sta_id, tid);
+ return trans->ops->tx_agg_disable(trans, sta_id, tid);
}
static inline int iwl_trans_tx_agg_alloc(struct iwl_trans *trans,
- enum iwl_rxon_context_id ctx,
- int sta_id, int tid, u16 *ssn)
+ int sta_id, int tid)
{
- return trans->ops->tx_agg_alloc(trans, ctx, sta_id, tid, ssn);
+ return trans->ops->tx_agg_alloc(trans, sta_id, tid);
}
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans,
enum iwl_rxon_context_id ctx,
int sta_id, int tid,
- int frame_limit)
+ int frame_limit, u16 ssn)
{
- trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit);
+ trans->ops->tx_agg_setup(trans, ctx, sta_id, tid, frame_limit, ssn);
}
static inline void iwl_trans_kick_nic(struct iwl_trans *trans)
@@ -311,9 +347,10 @@ static inline void iwl_trans_free(struct iwl_trans *trans)
trans->ops->free(trans);
}
-static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q)
+static inline void iwl_trans_stop_queue(struct iwl_trans *trans, int q,
+ const char *msg)
{
- trans->ops->stop_queue(trans, q);
+ trans->ops->stop_queue(trans, q, msg);
}
static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
@@ -348,4 +385,13 @@ static inline int iwl_trans_resume(struct iwl_trans *trans)
******************************************************/
extern const struct iwl_trans_ops trans_ops_pcie;
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+ const void *data, size_t len);
+void iwl_dealloc_ucode(struct iwl_trans *trans);
+
+int iwl_send_calib_results(struct iwl_trans *trans);
+int iwl_calib_set(struct iwl_trans *trans,
+ const struct iwl_calib_hdr *cmd, int len);
+void iwl_calib_free_results(struct iwl_trans *trans);
+
#endif /* __iwl_trans_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-ucode.c
index 8ba0dd5..36a1b5b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-ucode.c
@@ -31,7 +31,9 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+#include "iwl-wifi.h"
#include "iwl-dev.h"
#include "iwl-core.h"
#include "iwl-io.h"
@@ -72,51 +74,98 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
};
+/******************************************************************************
+ *
+ * uCode download functions
+ *
+ ******************************************************************************/
+
+static void iwl_free_fw_desc(struct iwl_bus *bus, struct fw_desc *desc)
+{
+ if (desc->v_addr)
+ dma_free_coherent(bus->dev, desc->len,
+ desc->v_addr, desc->p_addr);
+ desc->v_addr = NULL;
+ desc->len = 0;
+}
+
+static void iwl_free_fw_img(struct iwl_bus *bus, struct fw_img *img)
+{
+ iwl_free_fw_desc(bus, &img->code);
+ iwl_free_fw_desc(bus, &img->data);
+}
+
+void iwl_dealloc_ucode(struct iwl_trans *trans)
+{
+ iwl_free_fw_img(bus(trans), &trans->ucode_rt);
+ iwl_free_fw_img(bus(trans), &trans->ucode_init);
+ iwl_free_fw_img(bus(trans), &trans->ucode_wowlan);
+}
+
+int iwl_alloc_fw_desc(struct iwl_bus *bus, struct fw_desc *desc,
+ const void *data, size_t len)
+{
+ if (!len) {
+ desc->v_addr = NULL;
+ return -EINVAL;
+ }
+
+ desc->v_addr = dma_alloc_coherent(bus->dev, len,
+ &desc->p_addr, GFP_KERNEL);
+ if (!desc->v_addr)
+ return -ENOMEM;
+
+ desc->len = len;
+ memcpy(desc->v_addr, data, len);
+ return 0;
+}
+
/*
* ucode
*/
-static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
+static int iwl_load_section(struct iwl_trans *trans, const char *name,
struct fw_desc *image, u32 dst_addr)
{
+ struct iwl_bus *bus = bus(trans);
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
int ret;
- priv->ucode_write_complete = 0;
+ trans->ucode_write_complete = 0;
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
(iwl_get_dma_hi_addr(phy_addr)
<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
- iwl_write_direct32(bus(priv),
+ iwl_write_direct32(bus,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- IWL_DEBUG_FW(priv, "%s uCode section being loaded...\n", name);
- ret = wait_event_timeout(priv->shrd->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
+ IWL_DEBUG_FW(bus, "%s uCode section being loaded...\n", name);
+ ret = wait_event_timeout(trans->shrd->wait_command_queue,
+ trans->ucode_write_complete, 5 * HZ);
if (!ret) {
- IWL_ERR(priv, "Could not load the %s uCode section\n",
+ IWL_ERR(trans, "Could not load the %s uCode section\n",
name);
return -ETIMEDOUT;
}
@@ -124,41 +173,65 @@ static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
return 0;
}
-static int iwlagn_load_given_ucode(struct iwl_priv *priv,
- struct fw_img *image)
+static inline struct fw_img *iwl_get_ucode_image(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
+{
+ switch (ucode_type) {
+ case IWL_UCODE_INIT:
+ return &trans->ucode_init;
+ case IWL_UCODE_WOWLAN:
+ return &trans->ucode_wowlan;
+ case IWL_UCODE_REGULAR:
+ return &trans->ucode_rt;
+ case IWL_UCODE_NONE:
+ break;
+ }
+ return NULL;
+}
+
+static int iwl_load_given_ucode(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
int ret = 0;
+ struct fw_img *image = iwl_get_ucode_image(trans, ucode_type);
- ret = iwlagn_load_section(priv, "INST", &image->code,
+
+ if (!image) {
+ IWL_ERR(trans, "Invalid ucode requested (%d)\n",
+ ucode_type);
+ return -EINVAL;
+ }
+
+ ret = iwl_load_section(trans, "INST", &image->code,
IWLAGN_RTC_INST_LOWER_BOUND);
if (ret)
return ret;
- return iwlagn_load_section(priv, "DATA", &image->data,
+ return iwl_load_section(trans, "DATA", &image->data,
IWLAGN_RTC_DATA_LOWER_BOUND);
}
/*
* Calibration
*/
-static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
+static int iwl_set_Xtal_calib(struct iwl_trans *trans)
{
struct iwl_calib_xtal_freq_cmd cmd;
__le16 *xtal_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_XTAL);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd, EEPROM_XTAL);
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD);
cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_cmd cmd;
__le16 *offset_calib =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ EEPROM_RAW_TEMPERATURE);
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
@@ -166,49 +239,48 @@ static int iwlagn_set_temperature_offset_calib(struct iwl_priv *priv)
if (!(cmd.radio_sensor_offset))
cmd.radio_sensor_offset = DEFAULT_RADIO_SENSOR_OFFSET;
- IWL_DEBUG_CALIB(priv, "Radio sensor offset: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset: %d\n",
le16_to_cpu(cmd.radio_sensor_offset));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_set_temperature_offset_calib_v2(struct iwl_priv *priv)
+static int iwl_set_temperature_offset_calib_v2(struct iwl_trans *trans)
{
struct iwl_calib_temperature_offset_v2_cmd cmd;
- __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(priv,
+ __le16 *offset_calib_high = (__le16 *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_KELVIN_TEMPERATURE);
__le16 *offset_calib_low =
- (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_RAW_TEMPERATURE);
+ (__le16 *)iwl_eeprom_query_addr(trans->shrd,
+ EEPROM_RAW_TEMPERATURE);
struct iwl_eeprom_calib_hdr *hdr;
memset(&cmd, 0, sizeof(cmd));
iwl_set_calib_hdr(&cmd.hdr, IWL_PHY_CALIBRATE_TEMP_OFFSET_CMD);
- hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
+ hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(trans->shrd,
EEPROM_CALIB_ALL);
memcpy(&cmd.radio_sensor_offset_high, offset_calib_high,
sizeof(*offset_calib_high));
memcpy(&cmd.radio_sensor_offset_low, offset_calib_low,
sizeof(*offset_calib_low));
if (!(cmd.radio_sensor_offset_low)) {
- IWL_DEBUG_CALIB(priv, "no info in EEPROM, use default\n");
+ IWL_DEBUG_CALIB(trans, "no info in EEPROM, use default\n");
cmd.radio_sensor_offset_low = DEFAULT_RADIO_SENSOR_OFFSET;
cmd.radio_sensor_offset_high = DEFAULT_RADIO_SENSOR_OFFSET;
}
memcpy(&cmd.burntVoltageRef, &hdr->voltage,
sizeof(hdr->voltage));
- IWL_DEBUG_CALIB(priv, "Radio sensor offset high: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset high: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_high));
- IWL_DEBUG_CALIB(priv, "Radio sensor offset low: %d\n",
+ IWL_DEBUG_CALIB(trans, "Radio sensor offset low: %d\n",
le16_to_cpu(cmd.radio_sensor_offset_low));
- IWL_DEBUG_CALIB(priv, "Voltage Ref: %d\n",
+ IWL_DEBUG_CALIB(trans, "Voltage Ref: %d\n",
le16_to_cpu(cmd.burntVoltageRef));
- return iwl_calib_set(&priv->calib_results[IWL_CALIB_TEMP_OFFSET],
- (u8 *)&cmd, sizeof(cmd));
+ return iwl_calib_set(trans, (void *)&cmd, sizeof(cmd));
}
-static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
+static int iwl_send_calib_cfg(struct iwl_trans *trans)
{
struct iwl_calib_cfg_cmd calib_cfg_cmd;
struct iwl_host_cmd cmd = {
@@ -224,7 +296,7 @@ static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
calib_cfg_cmd.ucd_calib_cfg.flags =
IWL_CALIB_CFG_FLAG_SEND_COMPLETE_NTFY_MSK;
- return iwl_trans_send_cmd(trans(priv), &cmd);
+ return iwl_trans_send_cmd(trans, &cmd);
}
int iwlagn_rx_calib_result(struct iwl_priv *priv,
@@ -234,60 +306,37 @@ int iwlagn_rx_calib_result(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
- int index;
/* reduce the size of the length field itself */
len -= 4;
- /* Define the order in which the results will be sent to the runtime
- * uCode. iwl_send_calib_results sends them in a row according to
- * their index. We sort them here
- */
- switch (hdr->op_code) {
- case IWL_PHY_CALIBRATE_DC_CMD:
- index = IWL_CALIB_DC;
- break;
- case IWL_PHY_CALIBRATE_LO_CMD:
- index = IWL_CALIB_LO;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_CMD:
- index = IWL_CALIB_TX_IQ;
- break;
- case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
- index = IWL_CALIB_TX_IQ_PERD;
- break;
- case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
- index = IWL_CALIB_BASE_BAND;
- break;
- default:
- IWL_ERR(priv, "Unknown calibration notification %d\n",
- hdr->op_code);
- return -1;
- }
- iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
+ if (iwl_calib_set(trans(priv), hdr, len))
+ IWL_ERR(priv, "Failed to record calibration data %d\n",
+ hdr->op_code);
+
return 0;
}
-int iwlagn_init_alive_start(struct iwl_priv *priv)
+int iwl_init_alive_start(struct iwl_trans *trans)
{
int ret;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->advanced_bt_coexist) {
+ if (cfg(trans)->bt_params &&
+ cfg(trans)->bt_params->advanced_bt_coexist) {
/*
* Tell uCode we are ready to perform calibration
* need to perform this before any calibration
* no need to close the envlope since we are going
* to load the runtime uCode later.
*/
- ret = iwlagn_send_bt_env(priv, IWL_BT_COEX_ENV_OPEN,
+ ret = iwl_send_bt_env(trans, IWL_BT_COEX_ENV_OPEN,
BT_COEX_PRIO_TBL_EVT_INIT_CALIB2);
if (ret)
return ret;
}
- ret = iwlagn_send_calib_cfg(priv);
+ ret = iwl_send_calib_cfg(trans);
if (ret)
return ret;
@@ -295,21 +344,21 @@ int iwlagn_init_alive_start(struct iwl_priv *priv)
* temperature offset calibration is only needed for runtime ucode,
* so prepare the value now.
*/
- if (priv->cfg->need_temp_offset_calib) {
- if (priv->cfg->temp_offset_v2)
- return iwlagn_set_temperature_offset_calib_v2(priv);
+ if (cfg(trans)->need_temp_offset_calib) {
+ if (cfg(trans)->temp_offset_v2)
+ return iwl_set_temperature_offset_calib_v2(trans);
else
- return iwlagn_set_temperature_offset_calib(priv);
+ return iwl_set_temperature_offset_calib(trans);
}
return 0;
}
-static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
+static int iwl_send_wimax_coex(struct iwl_trans *trans)
{
struct iwl_wimax_coex_cmd coex_cmd;
- if (priv->cfg->base_params->support_wimax_coexist) {
+ if (cfg(trans)->base_params->support_wimax_coexist) {
/* UnMask wake up src at associated sleep */
coex_cmd.flags = COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
@@ -328,12 +377,12 @@ static int iwlagn_send_wimax_coex(struct iwl_priv *priv)
/* coexistence is disabled */
memset(&coex_cmd, 0, sizeof(coex_cmd));
}
- return iwl_trans_send_cmd_pdu(trans(priv),
+ return iwl_trans_send_cmd_pdu(trans,
COEX_PRIORITY_TABLE_CMD, CMD_SYNC,
sizeof(coex_cmd), &coex_cmd);
}
-static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
+static const u8 iwl_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
(0 << IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS)),
((BT_COEX_PRIO_TBL_PRIO_BYPASS << IWL_BT_COEX_PRIO_TBL_PRIO_POS) |
@@ -355,61 +404,64 @@ static const u8 iwlagn_bt_prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX] = {
0, 0, 0, 0, 0, 0, 0
};
-void iwlagn_send_prio_tbl(struct iwl_priv *priv)
+void iwl_send_prio_tbl(struct iwl_trans *trans)
{
struct iwl_bt_coex_prio_table_cmd prio_tbl_cmd;
- memcpy(prio_tbl_cmd.prio_tbl, iwlagn_bt_prio_tbl,
- sizeof(iwlagn_bt_prio_tbl));
- if (iwl_trans_send_cmd_pdu(trans(priv),
+ memcpy(prio_tbl_cmd.prio_tbl, iwl_bt_prio_tbl,
+ sizeof(iwl_bt_prio_tbl));
+ if (iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PRIO_TABLE, CMD_SYNC,
sizeof(prio_tbl_cmd), &prio_tbl_cmd))
- IWL_ERR(priv, "failed to send BT prio tbl command\n");
+ IWL_ERR(trans, "failed to send BT prio tbl command\n");
}
-int iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type)
{
struct iwl_bt_coex_prot_env_cmd env_cmd;
int ret;
env_cmd.action = action;
env_cmd.type = type;
- ret = iwl_trans_send_cmd_pdu(trans(priv),
+ ret = iwl_trans_send_cmd_pdu(trans,
REPLY_BT_COEX_PROT_ENV, CMD_SYNC,
sizeof(env_cmd), &env_cmd);
if (ret)
- IWL_ERR(priv, "failed to send BT env command\n");
+ IWL_ERR(trans, "failed to send BT env command\n");
return ret;
}
-static int iwlagn_alive_notify(struct iwl_priv *priv)
+static int iwl_alive_notify(struct iwl_trans *trans)
{
+ struct iwl_priv *priv = priv(trans);
struct iwl_rxon_context *ctx;
int ret;
if (!priv->tx_cmd_pool)
priv->tx_cmd_pool =
- kmem_cache_create("iwlagn_dev_cmd",
+ kmem_cache_create("iwl_dev_cmd",
sizeof(struct iwl_device_cmd),
sizeof(void *), 0, NULL);
if (!priv->tx_cmd_pool)
return -ENOMEM;
- iwl_trans_tx_start(trans(priv));
+ iwl_trans_tx_start(trans);
for_each_context(priv, ctx)
ctx->last_tx_rejected = false;
- ret = iwlagn_send_wimax_coex(priv);
+ ret = iwl_send_wimax_coex(trans);
if (ret)
return ret;
- ret = iwlagn_set_Xtal_calib(priv);
- if (ret)
- return ret;
+ if (!cfg(priv)->no_xtal_calib) {
+ ret = iwl_set_Xtal_calib(trans);
+ if (ret)
+ return ret;
+ }
- return iwl_send_calib_results(priv);
+ return iwl_send_calib_results(trans);
}
@@ -418,7 +470,7 @@ static int iwlagn_alive_notify(struct iwl_priv *priv)
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
-static int iwl_verify_inst_sparse(struct iwl_priv *priv,
+static int iwl_verify_inst_sparse(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -426,15 +478,15 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
u32 val;
u32 i;
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
- iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
i + IWLAGN_RTC_INST_LOWER_BOUND);
- val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
@@ -442,7 +494,7 @@ static int iwl_verify_inst_sparse(struct iwl_priv *priv,
return 0;
}
-static void iwl_print_mismatch_inst(struct iwl_priv *priv,
+static void iwl_print_mismatch_inst(struct iwl_bus *bus,
struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->v_addr;
@@ -451,18 +503,18 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
u32 offs;
int errors = 0;
- IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
+ IWL_DEBUG_FW(bus, "ucode inst image size is %u\n", len);
- iwl_write_direct32(bus(priv), HBUS_TARG_MEM_RADDR,
+ iwl_write_direct32(bus, HBUS_TARG_MEM_RADDR,
IWLAGN_RTC_INST_LOWER_BOUND);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
- val = iwl_read32(bus(priv), HBUS_TARG_MEM_RDAT);
+ val = iwl_read32(bus, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
- IWL_ERR(priv, "uCode INST section at "
+ IWL_ERR(bus, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
@@ -474,91 +526,163 @@ static void iwl_print_mismatch_inst(struct iwl_priv *priv,
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
-static int iwl_verify_ucode(struct iwl_priv *priv, struct fw_img *img)
+static int iwl_verify_ucode(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
- if (!iwl_verify_inst_sparse(priv, &img->code)) {
- IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
+ struct fw_img *img = iwl_get_ucode_image(trans, ucode_type);
+
+ if (!img) {
+ IWL_ERR(trans, "Invalid ucode requested (%d)\n", ucode_type);
+ return -EINVAL;
+ }
+
+ if (!iwl_verify_inst_sparse(bus(trans), &img->code)) {
+ IWL_DEBUG_FW(trans, "uCode is good in inst SRAM\n");
return 0;
}
- IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
+ IWL_ERR(trans, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
- iwl_print_mismatch_inst(priv, &img->code);
+ iwl_print_mismatch_inst(bus(trans), &img->code);
return -EIO;
}
-struct iwlagn_alive_data {
+struct iwl_alive_data {
bool valid;
u8 subtype;
};
-static void iwlagn_alive_fn(struct iwl_priv *priv,
+static void iwl_alive_fn(struct iwl_trans *trans,
struct iwl_rx_packet *pkt,
void *data)
{
- struct iwlagn_alive_data *alive_data = data;
+ struct iwl_alive_data *alive_data = data;
struct iwl_alive_resp *palive;
palive = &pkt->u.alive_frame;
- IWL_DEBUG_FW(priv, "Alive ucode status 0x%08X revision "
+ IWL_DEBUG_FW(trans, "Alive ucode status 0x%08X revision "
"0x%01X 0x%01X\n",
palive->is_valid, palive->ver_type,
palive->ver_subtype);
- priv->device_pointers.error_event_table =
+ trans->shrd->device_pointers.error_event_table =
le32_to_cpu(palive->error_event_table_ptr);
- priv->device_pointers.log_event_table =
+ trans->shrd->device_pointers.log_event_table =
le32_to_cpu(palive->log_event_table_ptr);
alive_data->subtype = palive->ver_subtype;
alive_data->valid = palive->is_valid == UCODE_VALID_OK;
}
+/* notification wait support */
+void iwl_init_notification_wait(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ u8 cmd,
+ void (*fn)(struct iwl_trans *trans,
+ struct iwl_rx_packet *pkt,
+ void *data),
+ void *fn_data)
+{
+ wait_entry->fn = fn;
+ wait_entry->fn_data = fn_data;
+ wait_entry->cmd = cmd;
+ wait_entry->triggered = false;
+ wait_entry->aborted = false;
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_add(&wait_entry->list, &shrd->notif_waits);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+int iwl_wait_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry,
+ unsigned long timeout)
+{
+ int ret;
+
+ ret = wait_event_timeout(shrd->notif_waitq,
+ wait_entry->triggered || wait_entry->aborted,
+ timeout);
+
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+
+ if (wait_entry->aborted)
+ return -EIO;
+
+ /* return value is always >= 0 */
+ if (ret <= 0)
+ return -ETIMEDOUT;
+ return 0;
+}
+
+void iwl_remove_notification(struct iwl_shared *shrd,
+ struct iwl_notification_wait *wait_entry)
+{
+ spin_lock_bh(&shrd->notif_wait_lock);
+ list_del(&wait_entry->list);
+ spin_unlock_bh(&shrd->notif_wait_lock);
+}
+
+void iwl_abort_notification_waits(struct iwl_shared *shrd)
+{
+ unsigned long flags;
+ struct iwl_notification_wait *wait_entry;
+
+ spin_lock_irqsave(&shrd->notif_wait_lock, flags);
+ list_for_each_entry(wait_entry, &shrd->notif_waits, list)
+ wait_entry->aborted = true;
+ spin_unlock_irqrestore(&shrd->notif_wait_lock, flags);
+
+ wake_up_all(&shrd->notif_waitq);
+}
+
#define UCODE_ALIVE_TIMEOUT HZ
#define UCODE_CALIB_TIMEOUT (2*HZ)
-int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
- struct fw_img *image,
- enum iwlagn_ucode_type ucode_type)
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type)
{
struct iwl_notification_wait alive_wait;
- struct iwlagn_alive_data alive_data;
+ struct iwl_alive_data alive_data;
int ret;
- enum iwlagn_ucode_type old_type;
+ enum iwl_ucode_type old_type;
- ret = iwl_trans_start_device(trans(priv));
+ ret = iwl_trans_start_device(trans);
if (ret)
return ret;
- iwlagn_init_notification_wait(priv, &alive_wait, REPLY_ALIVE,
- iwlagn_alive_fn, &alive_data);
+ iwl_init_notification_wait(trans->shrd, &alive_wait, REPLY_ALIVE,
+ iwl_alive_fn, &alive_data);
- old_type = priv->ucode_type;
- priv->ucode_type = ucode_type;
+ old_type = trans->shrd->ucode_type;
+ trans->shrd->ucode_type = ucode_type;
- ret = iwlagn_load_given_ucode(priv, image);
+ ret = iwl_load_given_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
- iwlagn_remove_notification(priv, &alive_wait);
+ trans->shrd->ucode_type = old_type;
+ iwl_remove_notification(trans->shrd, &alive_wait);
return ret;
}
- iwl_trans_kick_nic(trans(priv));
+ iwl_trans_kick_nic(trans);
/*
* Some things may run in the background now, but we
* just wait for the ALIVE notification here.
*/
- ret = iwlagn_wait_notification(priv, &alive_wait, UCODE_ALIVE_TIMEOUT);
+ ret = iwl_wait_notification(trans->shrd, &alive_wait,
+ UCODE_ALIVE_TIMEOUT);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
if (!alive_data.valid) {
- IWL_ERR(priv, "Loaded ucode is not valid!\n");
- priv->ucode_type = old_type;
+ IWL_ERR(trans, "Loaded ucode is not valid!\n");
+ trans->shrd->ucode_type = old_type;
return -EIO;
}
@@ -568,9 +692,9 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) {
- ret = iwl_verify_ucode(priv, image);
+ ret = iwl_verify_ucode(trans, ucode_type);
if (ret) {
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
@@ -578,42 +702,41 @@ int iwlagn_load_ucode_wait_alive(struct iwl_priv *priv,
msleep(5);
}
- ret = iwlagn_alive_notify(priv);
+ ret = iwl_alive_notify(trans);
if (ret) {
- IWL_WARN(priv,
+ IWL_WARN(trans,
"Could not complete ALIVE transition: %d\n", ret);
- priv->ucode_type = old_type;
+ trans->shrd->ucode_type = old_type;
return ret;
}
return 0;
}
-int iwlagn_run_init_ucode(struct iwl_priv *priv)
+int iwl_run_init_ucode(struct iwl_trans *trans)
{
struct iwl_notification_wait calib_wait;
int ret;
- lockdep_assert_held(&priv->shrd->mutex);
+ lockdep_assert_held(&trans->shrd->mutex);
/* No init ucode required? Curious, but maybe ok */
- if (!priv->ucode_init.code.len)
+ if (!trans->ucode_init.code.len)
return 0;
- if (priv->ucode_type != IWL_UCODE_NONE)
+ if (trans->shrd->ucode_type != IWL_UCODE_NONE)
return 0;
- iwlagn_init_notification_wait(priv, &calib_wait,
+ iwl_init_notification_wait(trans->shrd, &calib_wait,
CALIBRATION_COMPLETE_NOTIFICATION,
NULL, NULL);
/* Will also start the device */
- ret = iwlagn_load_ucode_wait_alive(priv, &priv->ucode_init,
- IWL_UCODE_INIT);
+ ret = iwl_load_ucode_wait_alive(trans, IWL_UCODE_INIT);
if (ret)
goto error;
- ret = iwlagn_init_alive_start(priv);
+ ret = iwl_init_alive_start(trans);
if (ret)
goto error;
@@ -621,14 +744,15 @@ int iwlagn_run_init_ucode(struct iwl_priv *priv)
* Some things may run in the background now, but we
* just wait for the calibration complete notification.
*/
- ret = iwlagn_wait_notification(priv, &calib_wait, UCODE_CALIB_TIMEOUT);
+ ret = iwl_wait_notification(trans->shrd, &calib_wait,
+ UCODE_CALIB_TIMEOUT);
goto out;
error:
- iwlagn_remove_notification(priv, &calib_wait);
+ iwl_remove_notification(trans->shrd, &calib_wait);
out:
/* Whatever happened, stop the device */
- iwl_trans_stop_device(trans(priv));
+ iwl_trans_stop_device(trans);
return ret;
}
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h b/drivers/net/wireless/iwlwifi/iwl-wifi.h
index f46c80e..1850110 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-wifi.h
@@ -59,17 +59,16 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
-#ifndef __iwl_4965_calib_h__
-#define __iwl_4965_calib_h__
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-commands.h"
+#ifndef __iwl_wifi_h__
+#define __iwl_wifi_h__
-void iwl4965_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp);
-void iwl4965_sensitivity_calibration(struct iwl_priv *priv, void *resp);
-void iwl4965_init_sensitivity(struct iwl_priv *priv);
-void iwl4965_reset_run_time_calib(struct iwl_priv *priv);
-void iwl4965_calib_free_results(struct iwl_priv *priv);
+#include "iwl-shared.h"
-#endif /* __iwl_4965_calib_h__ */
+int iwl_send_bt_env(struct iwl_trans *trans, u8 action, u8 type);
+void iwl_send_prio_tbl(struct iwl_trans *trans);
+int iwl_init_alive_start(struct iwl_trans *trans);
+int iwl_run_init_ucode(struct iwl_trans *trans);
+int iwl_load_ucode_wait_alive(struct iwl_trans *trans,
+ enum iwl_ucode_type ucode_type);
+#endif /* __iwl_wifi_h__ */
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index c42be81..48e8218 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -165,11 +165,15 @@ static int iwm_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
struct key_params *params)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key = &iwm->keys[key_index];
+ struct iwm_key *key;
int ret;
IWM_DBG_WEXT(iwm, DBG, "Adding key for %pM\n", mac_addr);
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
+ key = &iwm->keys[key_index];
memset(key, 0, sizeof(struct iwm_key));
ret = iwm_key_init(key, key_index, mac_addr, params);
if (ret < 0) {
@@ -214,8 +218,12 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
u8 key_index, bool pairwise, const u8 *mac_addr)
{
struct iwm_priv *iwm = ndev_to_iwm(ndev);
- struct iwm_key *key = &iwm->keys[key_index];
+ struct iwm_key *key;
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
+ key = &iwm->keys[key_index];
if (!iwm->keys[key_index].key_len) {
IWM_DBG_WEXT(iwm, DBG, "Key %d not used\n", key_index);
return 0;
@@ -236,6 +244,9 @@ static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
IWM_DBG_WEXT(iwm, DBG, "Default key index is: %d\n", key_index);
+ if (key_index >= IWM_NUM_KEYS)
+ return -ENOENT;
+
if (!iwm->keys[key_index].key_len) {
IWM_ERR(iwm, "Key %d not used\n", key_index);
return -EINVAL;
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 98a179f..1f868b1 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -91,11 +91,11 @@ static struct iwm_conf def_iwm_conf = {
.mac_addr = {0x00, 0x02, 0xb3, 0x01, 0x02, 0x03},
};
-static int modparam_reset;
+static bool modparam_reset;
module_param_named(reset, modparam_reset, bool, 0644);
MODULE_PARM_DESC(reset, "reset on firmware errors (default 0 [not reset])");
-static int modparam_wimax_enable = 1;
+static bool modparam_wimax_enable = true;
module_param_named(wimax_enable, modparam_wimax_enable, bool, 0644);
MODULE_PARM_DESC(wimax_enable, "Enable wimax core (default 1 [wimax enabled])");
@@ -130,7 +130,7 @@ static void iwm_disconnect_work(struct work_struct *work)
iwm_invalidate_mlme_profile(iwm);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index a414768..7d708f4 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -660,7 +660,7 @@ static int iwm_mlme_profile_invalidate(struct iwm_priv *iwm, u8 *buf,
clear_bit(IWM_STATUS_SME_CONNECTING, &iwm->status);
clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
- iwm->umac_profile_active = 0;
+ iwm->umac_profile_active = false;
memset(iwm->bssid, 0, ETH_ALEN);
iwm->channel = 0;
@@ -735,7 +735,7 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
umac_sta->mac_addr,
umac_sta->flags & UMAC_STA_FLAG_QOS);
- sta->valid = 1;
+ sta->valid = true;
sta->qos = umac_sta->flags & UMAC_STA_FLAG_QOS;
sta->color = GET_VAL8(umac_sta->sta_id, LMAC_STA_COLOR);
memcpy(sta->addr, umac_sta->mac_addr, ETH_ALEN);
@@ -750,12 +750,12 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
sta = &iwm->sta_table[GET_VAL8(umac_sta->sta_id, LMAC_STA_ID)];
if (!memcmp(sta->addr, umac_sta->mac_addr, ETH_ALEN))
- sta->valid = 0;
+ sta->valid = false;
break;
case UMAC_OPCODE_CLEAR_ALL:
for (i = 0; i < IWM_STA_TABLE_NUM; i++)
- iwm->sta_table[i].valid = 0;
+ iwm->sta_table[i].valid = false;
break;
default:
@@ -1203,7 +1203,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
switch (hdr->oid) {
case UMAC_WIFI_IF_CMD_SET_PROFILE:
- iwm->umac_profile_active = 1;
+ iwm->umac_profile_active = true;
break;
default:
break;
@@ -1363,7 +1363,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
*/
list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
if (cmd->seq_num == seq_num) {
- cmd->resp_received = 1;
+ cmd->resp_received = true;
cmd->buf.len = buf_size;
memcpy(cmd->buf.hdr, buf, buf_size);
wake_up_interruptible(&iwm->nonwifi_queue);
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index a7f1ab2..a7cd311 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -485,6 +485,7 @@ static int lbs_cfg_set_channel(struct wiphy *wiphy,
static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
struct cmd_header *resp)
{
+ struct cfg80211_bss *bss;
struct cmd_ds_802_11_scan_rsp *scanresp = (void *)resp;
int bsssize;
const u8 *pos;
@@ -632,12 +633,14 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
LBS_SCAN_RSSI_TO_MBM(rssi)/100);
if (channel &&
- !(channel->flags & IEEE80211_CHAN_DISABLED))
- cfg80211_inform_bss(wiphy, channel,
+ !(channel->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(wiphy, channel,
bssid, get_unaligned_le64(tsfdesc),
capa, intvl, ie, ielen,
LBS_SCAN_RSSI_TO_MBM(rssi),
GFP_KERNEL);
+ cfg80211_put_bss(bss);
+ }
} else
lbs_deb_scan("scan response: missing BSS channel IE\n");
@@ -728,9 +731,11 @@ static void lbs_scan_worker(struct work_struct *work)
le16_to_cpu(scan_cmd->hdr.size),
lbs_ret_scan, 0);
- if (priv->scan_channel >= priv->scan_req->n_channels)
+ if (priv->scan_channel >= priv->scan_req->n_channels) {
/* Mark scan done */
+ cancel_delayed_work(&priv->scan_work);
lbs_scan_done(priv);
+ }
/* Restart network */
if (carrier)
@@ -759,12 +764,12 @@ static void _internal_start_scan(struct lbs_private *priv, bool internal,
request->n_ssids, request->n_channels, request->ie_len);
priv->scan_channel = 0;
- queue_delayed_work(priv->work_thread, &priv->scan_work,
- msecs_to_jiffies(50));
-
priv->scan_req = request;
priv->internal_scan = internal;
+ queue_delayed_work(priv->work_thread, &priv->scan_work,
+ msecs_to_jiffies(50));
+
lbs_deb_leave(LBS_DEB_CFG80211);
}
@@ -1720,6 +1725,7 @@ static void lbs_join_post(struct lbs_private *priv,
2 + 2 + /* atim */
2 + 8]; /* extended rates */
u8 *fake = fake_ie;
+ struct cfg80211_bss *bss;
lbs_deb_enter(LBS_DEB_CFG80211);
@@ -1763,14 +1769,15 @@ static void lbs_join_post(struct lbs_private *priv,
*fake++ = 0x6c;
lbs_deb_hex(LBS_DEB_CFG80211, "IE", fake_ie, fake - fake_ie);
- cfg80211_inform_bss(priv->wdev->wiphy,
- params->channel,
- bssid,
- 0,
- capability,
- params->beacon_interval,
- fake_ie, fake - fake_ie,
- 0, GFP_KERNEL);
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ params->channel,
+ bssid,
+ 0,
+ capability,
+ params->beacon_interval,
+ fake_ie, fake - fake_ie,
+ 0, GFP_KERNEL);
+ cfg80211_put_bss(bss);
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index d8d8f0d..c192671 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -704,7 +704,7 @@ out_unlock:
struct lbs_debugfs_files {
const char *name;
- int perm;
+ umode_t perm;
struct file_operations fops;
};
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 885ddc1..f955b2d 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -13,13 +13,14 @@ static void lbs_ethtool_get_drvinfo(struct net_device *dev,
{
struct lbs_private *priv = dev->ml_priv;
- snprintf(info->fw_version, 32, "%u.%u.%u.p%u",
+ snprintf(info->fw_version, sizeof(info->fw_version),
+ "%u.%u.%u.p%u",
priv->fwrelease >> 24 & 0xff,
priv->fwrelease >> 16 & 0xff,
priv->fwrelease >> 8 & 0xff,
priv->fwrelease & 0xff);
- strcpy(info->driver, "libertas");
- strcpy(info->version, lbs_driver_version);
+ strlcpy(info->driver, "libertas", sizeof(info->driver));
+ strlcpy(info->version, lbs_driver_version, sizeof(info->version));
}
/*
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index e269351..3f7bf4d 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -859,7 +859,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
* Most of the libertas cards can do unaligned register access, but some
* weird ones cannot. That's especially true for the CF8305 card.
*/
- card->align_regs = 0;
+ card->align_regs = false;
card->model = get_model(p_dev->manf_id, p_dev->card_id);
if (card->model == MODEL_UNKNOWN) {
@@ -871,7 +871,7 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
/* Check if we have a current silicon */
prod_id = if_cs_read8(card, IF_CS_PRODUCT_ID);
if (card->model == MODEL_8305) {
- card->align_regs = 1;
+ card->align_regs = true;
if (prod_id < IF_CS_CF8305_B1_REV) {
pr_err("8305 rev B0 and older are not supported\n");
ret = -ENODEV;
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 728baa4..50b1ee7 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -1291,7 +1291,6 @@ static struct spi_driver libertas_spi_driver = {
.remove = __devexit_p(libertas_spi_remove),
.driver = {
.name = "libertas_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = &if_spi_pm_ops,
},
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index db879c3..b5fbbc7 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -1184,29 +1184,7 @@ static struct usb_driver if_usb_driver = {
.reset_resume = if_usb_resume,
};
-static int __init if_usb_init_module(void)
-{
- int ret = 0;
-
- lbs_deb_enter(LBS_DEB_MAIN);
-
- ret = usb_register(&if_usb_driver);
-
- lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
- return ret;
-}
-
-static void __exit if_usb_exit_module(void)
-{
- lbs_deb_enter(LBS_DEB_MAIN);
-
- usb_deregister(&if_usb_driver);
-
- lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-module_init(if_usb_init_module);
-module_exit(if_usb_exit_module);
+module_usb_driver(if_usb_driver);
MODULE_DESCRIPTION("8388 USB WLAN Driver");
MODULE_AUTHOR("Marvell International Ltd. and Red Hat, Inc.");
diff --git a/drivers/net/wireless/libertas_tf/if_usb.c b/drivers/net/wireless/libertas_tf/if_usb.c
index 68202e6..aff8b57 100644
--- a/drivers/net/wireless/libertas_tf/if_usb.c
+++ b/drivers/net/wireless/libertas_tf/if_usb.c
@@ -924,27 +924,7 @@ static struct usb_driver if_usb_driver = {
.resume = if_usb_resume,
};
-static int __init if_usb_init_module(void)
-{
- int ret = 0;
-
- lbtf_deb_enter(LBTF_DEB_MAIN);
-
- ret = usb_register(&if_usb_driver);
-
- lbtf_deb_leave_args(LBTF_DEB_MAIN, "ret %d", ret);
- return ret;
-}
-
-static void __exit if_usb_exit_module(void)
-{
- lbtf_deb_enter(LBTF_DEB_MAIN);
- usb_deregister(&if_usb_driver);
- lbtf_deb_leave(LBTF_DEB_MAIN);
-}
-
-module_init(if_usb_init_module);
-module_exit(if_usb_exit_module);
+module_usb_driver(if_usb_driver);
MODULE_DESCRIPTION("8388 USB WLAN Thinfirm Driver");
MODULE_AUTHOR("Cozybit Inc.");
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index ceb51b6..a034572 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -719,11 +719,11 @@ void lbtf_bcn_sent(struct lbtf_private *priv)
return;
if (skb_queue_empty(&priv->bc_ps_buf)) {
- bool tx_buff_bc = 0;
+ bool tx_buff_bc = false;
while ((skb = ieee80211_get_buffered_bc(priv->hw, priv->vif))) {
skb_queue_tail(&priv->bc_ps_buf, skb);
- tx_buff_bc = 1;
+ tx_buff_bc = true;
}
if (tx_buff_bc) {
ieee80211_stop_queues(priv->hw);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 523ad55..4b9e730 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -37,7 +37,8 @@ MODULE_AUTHOR("Jouni Malinen");
MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211");
MODULE_LICENSE("GPL");
-int wmediumd_pid;
+static u32 wmediumd_pid;
+
static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
@@ -665,7 +666,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
bool ack;
struct ieee80211_tx_info *txi;
- int _pid;
+ u32 _pid;
mac80211_hwsim_monitor_rx(hw, skb);
@@ -676,7 +677,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
}
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -707,7 +708,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
wiphy_debug(hw->wiphy, "%s\n", __func__);
- data->started = 1;
+ data->started = true;
return 0;
}
@@ -715,7 +716,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
{
struct mac80211_hwsim_data *data = hw->priv;
- data->started = 0;
+ data->started = false;
del_timer(&data->beacon_timer);
wiphy_debug(hw->wiphy, "%s\n", __func__);
}
@@ -764,7 +765,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
struct ieee80211_hw *hw = arg;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
- int _pid;
+ u32 _pid;
hwsim_check_magic(vif);
@@ -781,7 +782,7 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_monitor_rx(hw, skb);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(hw, skb, _pid);
@@ -1254,7 +1255,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_pspoll *pspoll;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1275,7 +1276,7 @@ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif)
memcpy(pspoll->ta, mac, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1292,7 +1293,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
struct hwsim_vif_priv *vp = (void *)vif->drv_priv;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
- int _pid;
+ u32 _pid;
if (!vp->assoc)
return;
@@ -1314,7 +1315,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac,
memcpy(hdr->addr3, vp->bssid, ETH_ALEN);
/* wmediumd mode check */
- _pid = wmediumd_pid;
+ _pid = ACCESS_ONCE(wmediumd_pid);
if (_pid)
return mac80211_hwsim_tx_frame_nl(data->hw, skb, _pid);
@@ -1634,8 +1635,6 @@ static int hwsim_init_netlink(void)
int rc;
printk(KERN_INFO "mac80211_hwsim: initializing netlink\n");
- wmediumd_pid = 0;
-
rc = genl_register_family_with_ops(&hwsim_genl_family,
hwsim_ops, ARRAY_SIZE(hwsim_ops));
if (rc)
@@ -1748,6 +1747,8 @@ static int __init init_mac80211_hwsim(void)
IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
IEEE80211_HW_AMPDU_AGGREGATION;
+ hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
+
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
hw->sta_data_size = sizeof(struct hwsim_sta_priv);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 7aa9aa0..681d3f2 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -33,7 +33,7 @@
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
-static int
+static void
mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl
*rx_reor_tbl_ptr, int start_win)
@@ -71,8 +71,6 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = start_win;
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
-
- return 0;
}
/*
@@ -83,7 +81,7 @@ mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
* Since the buffer is linear, the function uses rotation to simulate
* circular buffer.
*/
-static int
+static void
mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr)
{
@@ -119,7 +117,6 @@ mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
rx_reor_tbl_ptr->start_win = (rx_reor_tbl_ptr->start_win + i)
&(MAX_TID_VALUE - 1);
spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
- return 0;
}
/*
@@ -405,7 +402,7 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
u8 *ta, u8 pkt_type, void *payload)
{
struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
- int start_win, end_win, win_size, ret;
+ int start_win, end_win, win_size;
u16 pkt_index;
rx_reor_tbl_ptr =
@@ -452,11 +449,8 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
start_win = (end_win - win_size) + 1;
else
start_win = (MAX_TID_VALUE - (win_size - seq_num)) + 1;
- ret = mwifiex_11n_dispatch_pkt_until_start_win(priv,
+ mwifiex_11n_dispatch_pkt_until_start_win(priv,
rx_reor_tbl_ptr, start_win);
-
- if (ret)
- return ret;
}
if (pkt_type != PKT_TYPE_BAR) {
@@ -475,9 +469,9 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
* Dispatch all packets sequentially from start_win until a
* hole is found and adjust the start_win appropriately
*/
- ret = mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
+ mwifiex_11n_scan_and_dispatch(priv, rx_reor_tbl_ptr);
- return ret;
+ return 0;
}
/*
diff --git a/drivers/net/wireless/mwifiex/Kconfig b/drivers/net/wireless/mwifiex/Kconfig
index 8f2797a..2a078ce 100644
--- a/drivers/net/wireless/mwifiex/Kconfig
+++ b/drivers/net/wireless/mwifiex/Kconfig
@@ -10,12 +10,12 @@ config MWIFIEX
mwifiex.
config MWIFIEX_SDIO
- tristate "Marvell WiFi-Ex Driver for SD8787"
+ tristate "Marvell WiFi-Ex Driver for SD8787/SD8797"
depends on MWIFIEX && MMC
select FW_LOADER
---help---
This adds support for wireless adapters based on Marvell
- 8787 chipset with SDIO interface.
+ 8787/8797 chipsets with SDIO interface.
If you choose to build it as a module, it will be called
mwifiex_sdio.
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 462c710..2210a0f 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -24,50 +24,26 @@
* This function maps the nl802.11 channel type into driver channel type.
*
* The mapping is as follows -
- * NL80211_CHAN_NO_HT -> NO_SEC_CHANNEL
- * NL80211_CHAN_HT20 -> NO_SEC_CHANNEL
- * NL80211_CHAN_HT40PLUS -> SEC_CHANNEL_ABOVE
- * NL80211_CHAN_HT40MINUS -> SEC_CHANNEL_BELOW
- * Others -> NO_SEC_CHANNEL
+ * NL80211_CHAN_NO_HT -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ * NL80211_CHAN_HT20 -> IEEE80211_HT_PARAM_CHA_SEC_NONE
+ * NL80211_CHAN_HT40PLUS -> IEEE80211_HT_PARAM_CHA_SEC_ABOVE
+ * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW
+ * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE
*/
-static int
-mwifiex_cfg80211_channel_type_to_mwifiex_channels(enum nl80211_channel_type
- channel_type)
+static u8
+mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type
+ channel_type)
{
switch (channel_type) {
case NL80211_CHAN_NO_HT:
case NL80211_CHAN_HT20:
- return NO_SEC_CHANNEL;
+ return IEEE80211_HT_PARAM_CHA_SEC_NONE;
case NL80211_CHAN_HT40PLUS:
- return SEC_CHANNEL_ABOVE;
+ return IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
case NL80211_CHAN_HT40MINUS:
- return SEC_CHANNEL_BELOW;
- default:
- return NO_SEC_CHANNEL;
- }
-}
-
-/*
- * This function maps the driver channel type into nl802.11 channel type.
- *
- * The mapping is as follows -
- * NO_SEC_CHANNEL -> NL80211_CHAN_HT20
- * SEC_CHANNEL_ABOVE -> NL80211_CHAN_HT40PLUS
- * SEC_CHANNEL_BELOW -> NL80211_CHAN_HT40MINUS
- * Others -> NL80211_CHAN_HT20
- */
-static enum nl80211_channel_type
-mwifiex_channels_to_cfg80211_channel_type(int channel_type)
-{
- switch (channel_type) {
- case NO_SEC_CHANNEL:
- return NL80211_CHAN_HT20;
- case SEC_CHANNEL_ABOVE:
- return NL80211_CHAN_HT40PLUS;
- case SEC_CHANNEL_BELOW:
- return NL80211_CHAN_HT40MINUS;
+ return IEEE80211_HT_PARAM_CHA_SEC_BELOW;
default:
- return NL80211_CHAN_HT20;
+ return IEEE80211_HT_PARAM_CHA_SEC_NONE;
}
}
@@ -120,10 +96,11 @@ mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev,
static int
mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy,
enum nl80211_tx_power_setting type,
- int dbm)
+ int mbm)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
struct mwifiex_power_cfg power_cfg;
+ int dbm = MBM_TO_DBM(mbm);
if (type == NL80211_TX_POWER_FIXED) {
power_cfg.is_power_auto = 0;
@@ -330,37 +307,51 @@ mwifiex_set_rf_channel(struct mwifiex_private *priv,
enum nl80211_channel_type channel_type)
{
struct mwifiex_chan_freq_power cfp;
- struct mwifiex_ds_band_cfg band_cfg;
u32 config_bands = 0;
struct wiphy *wiphy = priv->wdev->wiphy;
+ struct mwifiex_adapter *adapter = priv->adapter;
if (chan) {
- memset(&band_cfg, 0, sizeof(band_cfg));
/* Set appropriate bands */
- if (chan->band == IEEE80211_BAND_2GHZ)
- config_bands = BAND_B | BAND_G | BAND_GN;
- else
- config_bands = BAND_AN | BAND_A;
- if (priv->bss_mode == NL80211_IFTYPE_STATION
- || priv->bss_mode == NL80211_IFTYPE_UNSPECIFIED) {
- band_cfg.config_bands = config_bands;
- } else if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
- band_cfg.config_bands = config_bands;
- band_cfg.adhoc_start_band = config_bands;
+ if (chan->band == IEEE80211_BAND_2GHZ) {
+ if (channel_type == NL80211_CHAN_NO_HT)
+ if (priv->adapter->config_bands == BAND_B ||
+ priv->adapter->config_bands == BAND_G)
+ config_bands =
+ priv->adapter->config_bands;
+ else
+ config_bands = BAND_B | BAND_G;
+ else
+ config_bands = BAND_B | BAND_G | BAND_GN;
+ } else {
+ if (channel_type == NL80211_CHAN_NO_HT)
+ config_bands = BAND_A;
+ else
+ config_bands = BAND_AN | BAND_A;
}
- band_cfg.sec_chan_offset =
- mwifiex_cfg80211_channel_type_to_mwifiex_channels
+ if (!((config_bands | adapter->fw_bands) &
+ ~adapter->fw_bands)) {
+ adapter->config_bands = config_bands;
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ adapter->adhoc_start_band = config_bands;
+ if ((config_bands & BAND_GN) ||
+ (config_bands & BAND_AN))
+ adapter->adhoc_11n_enabled = true;
+ else
+ adapter->adhoc_11n_enabled = false;
+ }
+ }
+ adapter->sec_chan_offset =
+ mwifiex_cfg80211_channel_type_to_sec_chan_offset
(channel_type);
-
- if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
- return -EFAULT;
+ adapter->channel_type = channel_type;
mwifiex_send_domain_info_cmd_fw(wiphy);
}
wiphy_dbg(wiphy, "info: setting band %d, channel offset %d and "
- "mode %d\n", config_bands, band_cfg.sec_chan_offset,
+ "mode %d\n", config_bands, adapter->sec_chan_offset,
priv->bss_mode);
if (!chan)
return 0;
@@ -696,9 +687,9 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
const u8 *peer,
const struct cfg80211_bitrate_mask *mask)
{
- struct mwifiex_ds_band_cfg band_cfg;
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int index = 0, mode = 0, i;
+ struct mwifiex_adapter *adapter = priv->adapter;
/* Currently only 2.4GHz is supported */
for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) {
@@ -720,16 +711,15 @@ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy,
mode |= BAND_B;
}
- memset(&band_cfg, 0, sizeof(band_cfg));
- band_cfg.config_bands = mode;
-
- if (priv->bss_mode == NL80211_IFTYPE_ADHOC)
- band_cfg.adhoc_start_band = mode;
-
- band_cfg.sec_chan_offset = NO_SEC_CHANNEL;
-
- if (mwifiex_set_radio_band_cfg(priv, &band_cfg))
- return -EFAULT;
+ if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) {
+ adapter->config_bands = mode;
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ adapter->adhoc_start_band = mode;
+ adapter->adhoc_11n_enabled = false;
+ }
+ }
+ adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ adapter->channel_type = NL80211_CHAN_NO_HT;
wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n",
(mode & BAND_B) ? "b" : "",
@@ -750,17 +740,13 @@ mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
wiphy_dbg(wiphy, "info: successfully disconnected from %pM:"
" reason code %d\n", priv->cfg_bssid, reason_code);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -780,6 +766,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
{
struct ieee80211_channel *chan;
struct mwifiex_bss_info bss_info;
+ struct cfg80211_bss *bss;
int ie_len;
u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)];
enum ieee80211_band band;
@@ -800,9 +787,10 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ieee80211_channel_to_frequency(bss_info.bss_chan,
band));
- cfg80211_inform_bss(priv->wdev->wiphy, chan,
+ bss = cfg80211_inform_bss(priv->wdev->wiphy, chan,
bss_info.bssid, 0, WLAN_CAPABILITY_IBSS,
0, ie_buf, ie_len, 0, GFP_KERNEL);
+ cfg80211_put_bss(bss);
memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN);
return 0;
@@ -851,10 +839,15 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
if (channel)
ret = mwifiex_set_rf_channel(priv, channel,
- mwifiex_channels_to_cfg80211_channel_type
- (priv->adapter->chan_offset));
+ priv->adapter->channel_type);
- ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); /* Disable keys */
+ /* As this is new association, clear locally stored
+ * keys and security related flags */
+ priv->sec_info.wpa_enabled = false;
+ priv->sec_info.wpa2_enabled = false;
+ priv->wep_key_curr_index = 0;
+ priv->sec_info.encryption_mode = 0;
+ ret = mwifiex_set_encode(priv, NULL, 0, 0, 1);
if (mode == NL80211_IFTYPE_ADHOC) {
/* "privacy" is set only for ad-hoc mode */
@@ -899,6 +892,7 @@ mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
dev_dbg(priv->adapter->dev,
"info: setting wep encryption"
" with key len %d\n", sme->key_len);
+ priv->wep_key_curr_index = sme->key_idx;
ret = mwifiex_set_encode(priv, sme->key, sme->key_len,
sme->key_idx, 0);
}
@@ -978,27 +972,32 @@ mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
int ret = 0;
- if (priv->assoc_request)
- return -EBUSY;
-
if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "received infra assoc request "
"when station is in ibss mode\n");
goto done;
}
- priv->assoc_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n",
(char *) sme->ssid, sme->bssid);
ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid,
priv->bss_mode, sme->channel, sme, 0);
-
- priv->assoc_request = 1;
done:
- priv->assoc_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0,
+ NULL, 0, WLAN_STATUS_SUCCESS,
+ GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: associated to bssid %pM successfully\n",
+ priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: association to bssid %pM failed\n",
+ priv->cfg_bssid);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+ }
+
return ret;
}
@@ -1015,28 +1014,29 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
int ret = 0;
- if (priv->ibss_join_request)
- return -EBUSY;
-
if (priv->bss_mode != NL80211_IFTYPE_ADHOC) {
wiphy_err(wiphy, "request to join ibss received "
"when station is not in ibss mode\n");
goto done;
}
- priv->ibss_join_request = -EINPROGRESS;
-
wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n",
(char *) params->ssid, params->bssid);
ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid,
params->bssid, priv->bss_mode,
params->channel, NULL, params->privacy);
-
- priv->ibss_join_request = 1;
done:
- priv->ibss_join_result = ret;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ if (!ret) {
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ dev_dbg(priv->adapter->dev,
+ "info: joined/created adhoc network with bssid"
+ " %pM successfully\n", priv->cfg_bssid);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "info: failed creating/joining adhoc network\n");
+ }
+
return ret;
}
@@ -1051,17 +1051,12 @@ mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
{
struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy);
- if (priv->disconnect)
- return -EBUSY;
-
- priv->disconnect = 1;
-
wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n",
priv->cfg_bssid);
if (mwifiex_deauthenticate(priv, NULL))
return -EFAULT;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
return 0;
}
@@ -1078,15 +1073,42 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_scan_request *request)
{
struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int i;
+ struct ieee80211_channel *chan;
wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name);
- if (priv->scan_request && priv->scan_request != request)
- return -EBUSY;
-
priv->scan_request = request;
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
+ GFP_KERNEL);
+ if (!priv->user_scan_cfg) {
+ dev_err(priv->adapter->dev, "failed to alloc scan_req\n");
+ return -ENOMEM;
+ }
+ for (i = 0; i < request->n_ssids; i++) {
+ memcpy(priv->user_scan_cfg->ssid_list[i].ssid,
+ request->ssids[i].ssid, request->ssids[i].ssid_len);
+ priv->user_scan_cfg->ssid_list[i].max_len =
+ request->ssids[i].ssid_len;
+ }
+ for (i = 0; i < request->n_channels; i++) {
+ chan = request->channels[i];
+ priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
+ priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
+
+ if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_PASSIVE;
+ else
+ priv->user_scan_cfg->chan_list[i].scan_type =
+ MWIFIEX_SCAN_TYPE_ACTIVE;
+
+ priv->user_scan_cfg->chan_list[i].scan_time = 0;
+ }
+ if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg))
+ return -EFAULT;
+
return 0;
}
@@ -1292,10 +1314,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev)
priv->media_connected = false;
- cancel_work_sync(&priv->cfg_workqueue);
- flush_workqueue(priv->workqueue);
- destroy_workqueue(priv->workqueue);
-
priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
return 0;
@@ -1373,9 +1391,6 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN);
wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- /* We are using custom domains */
- wdev->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
-
/* Reserve space for bss band information */
wdev->wiphy->bss_priv_size = sizeof(u8);
@@ -1404,100 +1419,3 @@ int mwifiex_register_cfg80211(struct mwifiex_private *priv)
return ret;
}
-
-/*
- * This function handles the result of different pending network operations.
- *
- * The following operations are handled and CFG802.11 subsystem is
- * notified accordingly -
- * - Scan request completion
- * - Association request completion
- * - IBSS join request completion
- * - Disconnect request completion
- */
-void
-mwifiex_cfg80211_results(struct work_struct *work)
-{
- struct mwifiex_private *priv =
- container_of(work, struct mwifiex_private, cfg_workqueue);
- struct mwifiex_user_scan_cfg *scan_req;
- int ret = 0, i;
- struct ieee80211_channel *chan;
-
- if (priv->scan_request) {
- scan_req = kzalloc(sizeof(struct mwifiex_user_scan_cfg),
- GFP_KERNEL);
- if (!scan_req) {
- dev_err(priv->adapter->dev, "failed to alloc "
- "scan_req\n");
- return;
- }
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- memcpy(scan_req->ssid_list[i].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- scan_req->ssid_list[i].max_len =
- priv->scan_request->ssids[i].ssid_len;
- }
- for (i = 0; i < priv->scan_request->n_channels; i++) {
- chan = priv->scan_request->channels[i];
- scan_req->chan_list[i].chan_number = chan->hw_value;
- scan_req->chan_list[i].radio_type = chan->band;
- if (chan->flags & IEEE80211_CHAN_DISABLED)
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_PASSIVE;
- else
- scan_req->chan_list[i].scan_type =
- MWIFIEX_SCAN_TYPE_ACTIVE;
- scan_req->chan_list[i].scan_time = 0;
- }
- if (mwifiex_set_user_scan_ioctl(priv, scan_req))
- ret = -EFAULT;
- priv->scan_result_status = ret;
- dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n",
- __func__);
- cfg80211_scan_done(priv->scan_request,
- (priv->scan_result_status < 0));
- priv->scan_request = NULL;
- kfree(scan_req);
- }
-
- if (priv->assoc_request == 1) {
- if (!priv->assoc_result) {
- cfg80211_connect_result(priv->netdev, priv->cfg_bssid,
- NULL, 0, NULL, 0,
- WLAN_STATUS_SUCCESS,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: associated to bssid %pM successfully\n",
- priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: association to bssid %pM failed\n",
- priv->cfg_bssid);
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- }
- priv->assoc_request = 0;
- priv->assoc_result = 0;
- }
-
- if (priv->ibss_join_request == 1) {
- if (!priv->ibss_join_result) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
- GFP_KERNEL);
- dev_dbg(priv->adapter->dev,
- "info: joined/created adhoc network with bssid"
- " %pM successfully\n", priv->cfg_bssid);
- } else {
- dev_dbg(priv->adapter->dev,
- "info: failed creating/joining adhoc network\n");
- }
- priv->ibss_join_request = 0;
- priv->ibss_join_result = 0;
- }
-
- if (priv->disconnect) {
- memset(priv->cfg_bssid, 0, ETH_ALEN);
- priv->disconnect = 0;
- }
-}
diff --git a/drivers/net/wireless/mwifiex/cfg80211.h b/drivers/net/wireless/mwifiex/cfg80211.h
index 8d010f2..76c76c6 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.h
+++ b/drivers/net/wireless/mwifiex/cfg80211.h
@@ -26,5 +26,4 @@
int mwifiex_register_cfg80211(struct mwifiex_private *);
-void mwifiex_cfg80211_results(struct work_struct *work);
#endif
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index f2e6de0..1782a77 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -75,18 +75,32 @@ static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
* This function maps an index in supported rates table into
* the corresponding data rate.
*/
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+ u8 ht_info)
{
- u16 mcs_rate[4][8] = {
- {0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e}
- , /* LG 40M */
- {0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c}
- , /* SG 40M */
- {0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82}
- , /* LG 20M */
- {0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90}
- }; /* SG 20M */
-
+ /*
+ * For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+ u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+ };
+ u32 mcs_num_supp =
+ (priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
if (ht_info & BIT(0)) {
@@ -95,7 +109,7 @@ u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info)
rate = 0x0D; /* MCS 32 SGI rate */
else
rate = 0x0C; /* MCS 32 LGI rate */
- } else if (index < 8) {
+ } else if (index < mcs_num_supp) {
if (ht_info & BIT(1)) {
if (ht_info & BIT(2))
/* SGI, 40M */
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 0cc5d73..51c5417 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -165,6 +165,7 @@ enum MWIFIEX_802_11_WEP_STATUS {
#define GET_RXMCSSUPP(DevMCSSupported) (DevMCSSupported & 0x0f)
#define SETHT_MCS32(x) (x[4] |= 1)
+#define HT_STREAM_2X2 0x22
#define SET_SECONDARYCHAN(RadioType, SECCHAN) (RadioType |= (SECCHAN << 4))
@@ -375,8 +376,6 @@ enum mwifiex_chan_scan_mode_bitmasks {
MWIFIEX_DISABLE_CHAN_FILT = BIT(1),
};
-#define SECOND_CHANNEL_BELOW 0x30
-#define SECOND_CHANNEL_ABOVE 0x10
struct mwifiex_chan_scan_param_set {
u8 radio_type;
u8 chan_number;
@@ -673,7 +672,7 @@ struct host_cmd_ds_802_11_ad_hoc_start {
union ieee_types_phy_param_set phy_param_set;
u16 reserved1;
__le16 cap_info_bitmap;
- u8 DataRate[HOSTCMD_SUPPORTED_RATES];
+ u8 data_rate[HOSTCMD_SUPPORTED_RATES];
} __packed;
struct host_cmd_ds_802_11_ad_hoc_result {
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index d792b3f..1d0ec57 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -187,8 +187,6 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
struct mwifiex_opt_sleep_confirm *sleep_cfm_buf = NULL;
skb_put(adapter->sleep_cfm, sizeof(struct mwifiex_opt_sleep_confirm));
- sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
- (adapter->sleep_cfm->data);
adapter->cmd_sent = false;
@@ -248,12 +246,14 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
memset(adapter->event_body, 0, sizeof(adapter->event_body));
adapter->hw_dot_11n_dev_cap = 0;
adapter->hw_dev_mcs_support = 0;
- adapter->chan_offset = 0;
+ adapter->sec_chan_offset = 0;
adapter->adhoc_11n_enabled = false;
mwifiex_wmm_init(adapter);
if (adapter->sleep_cfm) {
+ sleep_cfm_buf = (struct mwifiex_opt_sleep_confirm *)
+ adapter->sleep_cfm->data;
memset(sleep_cfm_buf, 0, adapter->sleep_cfm->len);
sleep_cfm_buf->command =
cpu_to_le16(HostCmd_CMD_802_11_PS_MODE_ENH);
@@ -283,6 +283,45 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
}
/*
+ * This function sets trans_start per tx_queue
+ */
+void mwifiex_set_trans_start(struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++)
+ netdev_get_tx_queue(dev, i)->trans_start = jiffies;
+
+ dev->trans_start = jiffies;
+}
+
+/*
+ * This function wakes up all queues in net_device
+ */
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_wake_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
+ * This function stops all queues in net_device
+ */
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter)
+{
+ unsigned long dev_queue_flags;
+
+ spin_lock_irqsave(&adapter->queue_lock, dev_queue_flags);
+ netif_tx_stop_all_queues(netdev);
+ spin_unlock_irqrestore(&adapter->queue_lock, dev_queue_flags);
+}
+
+/*
* This function releases the lock variables and frees the locks and
* associated locks.
*/
@@ -343,7 +382,8 @@ mwifiex_free_adapter(struct mwifiex_adapter *adapter)
adapter->if_ops.cleanup_if(adapter);
- dev_kfree_skb_any(adapter->sleep_cfm);
+ if (adapter->sleep_cfm)
+ dev_kfree_skb_any(adapter->sleep_cfm);
}
/*
@@ -359,6 +399,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
spin_lock_init(&adapter->int_lock);
spin_lock_init(&adapter->main_proc_lock);
spin_lock_init(&adapter->mwifiex_cmd_lock);
+ spin_lock_init(&adapter->queue_lock);
for (i = 0; i < adapter->priv_num; i++) {
if (adapter->priv[i]) {
priv = adapter->priv[i];
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index e0b68e7..d5d81f1 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -62,17 +62,6 @@ enum {
BAND_AN = 16,
};
-#define NO_SEC_CHANNEL 0
-#define SEC_CHANNEL_ABOVE 1
-#define SEC_CHANNEL_BELOW 3
-
-struct mwifiex_ds_band_cfg {
- u32 config_bands;
- u32 adhoc_start_band;
- u32 adhoc_channel;
- u32 sec_chan_offset;
-};
-
enum {
ADHOC_IDLE,
ADHOC_STARTED,
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 62b4c29..0b0eb5e 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -724,8 +724,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
u32 cmd_append_size = 0;
u32 i;
u16 tmp_cap;
- uint16_t ht_cap_info;
struct mwifiex_ie_types_chan_list_param_set *chan_tlv;
+ u8 radio_type;
struct mwifiex_ie_types_htcap *ht_cap;
struct mwifiex_ie_types_htinfo *ht_info;
@@ -837,8 +837,8 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL;
}
- memset(adhoc_start->DataRate, 0, sizeof(adhoc_start->DataRate));
- mwifiex_get_active_data_rates(priv, adhoc_start->DataRate);
+ memset(adhoc_start->data_rate, 0, sizeof(adhoc_start->data_rate));
+ mwifiex_get_active_data_rates(priv, adhoc_start->data_rate);
if ((adapter->adhoc_start_band & BAND_G) &&
(priv->curr_pkt_filter & HostCmd_ACT_MAC_ADHOC_G_PROTECTION_ON)) {
if (mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL,
@@ -850,20 +850,19 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
}
/* Find the last non zero */
- for (i = 0; i < sizeof(adhoc_start->DataRate) &&
- adhoc_start->DataRate[i];
- i++)
- ;
+ for (i = 0; i < sizeof(adhoc_start->data_rate); i++)
+ if (!adhoc_start->data_rate[i])
+ break;
priv->curr_bss_params.num_of_rates = i;
/* Copy the ad-hoc creating rates into Current BSS rate structure */
memcpy(&priv->curr_bss_params.data_rates,
- &adhoc_start->DataRate, priv->curr_bss_params.num_of_rates);
+ &adhoc_start->data_rate, priv->curr_bss_params.num_of_rates);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: rates=%02x %02x %02x %02x\n",
- adhoc_start->DataRate[0], adhoc_start->DataRate[1],
- adhoc_start->DataRate[2], adhoc_start->DataRate[3]);
+ adhoc_start->data_rate[0], adhoc_start->data_rate[1],
+ adhoc_start->data_rate[2], adhoc_start->data_rate[3]);
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: AD-HOC Start command is ready\n");
@@ -886,12 +885,14 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
= mwifiex_band_to_radio_type(priv->curr_bss_params.band);
if (adapter->adhoc_start_band & BAND_GN
|| adapter->adhoc_start_band & BAND_AN) {
- if (adapter->chan_offset == SEC_CHANNEL_ABOVE)
+ if (adapter->sec_chan_offset ==
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
- SECOND_CHANNEL_ABOVE;
- else if (adapter->chan_offset == SEC_CHANNEL_BELOW)
+ (IEEE80211_HT_PARAM_CHA_SEC_ABOVE << 4);
+ else if (adapter->sec_chan_offset ==
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE)
chan_tlv->chan_scan_param[0].radio_type |=
- SECOND_CHANNEL_BELOW;
+ (IEEE80211_HT_PARAM_CHA_SEC_BELOW << 4);
}
dev_dbg(adapter->dev, "info: ADHOC_S_CMD: TLV Band = %d\n",
chan_tlv->chan_scan_param[0].radio_type);
@@ -914,55 +915,40 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
}
if (adapter->adhoc_11n_enabled) {
- {
- ht_cap = (struct mwifiex_ie_types_htcap *) pos;
- memset(ht_cap, 0,
- sizeof(struct mwifiex_ie_types_htcap));
- ht_cap->header.type =
- cpu_to_le16(WLAN_EID_HT_CAPABILITY);
- ht_cap->header.len =
- cpu_to_le16(sizeof(struct ieee80211_ht_cap));
- ht_cap_info = le16_to_cpu(ht_cap->ht_cap.cap_info);
-
- ht_cap_info |= IEEE80211_HT_CAP_SGI_20;
- if (adapter->chan_offset) {
- ht_cap_info |= IEEE80211_HT_CAP_SGI_40;
- ht_cap_info |= IEEE80211_HT_CAP_DSSSCCK40;
- ht_cap_info |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
- SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
- }
+ /* Fill HT CAPABILITY */
+ ht_cap = (struct mwifiex_ie_types_htcap *) pos;
+ memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap));
+ ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ ht_cap->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ radio_type = mwifiex_band_to_radio_type(
+ priv->adapter->config_bands);
+ mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+
+ pos += sizeof(struct mwifiex_ie_types_htcap);
+ cmd_append_size +=
+ sizeof(struct mwifiex_ie_types_htcap);
- ht_cap->ht_cap.ampdu_params_info
- = IEEE80211_HT_MAX_AMPDU_64K;
- ht_cap->ht_cap.mcs.rx_mask[0] = 0xff;
- pos += sizeof(struct mwifiex_ie_types_htcap);
- cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htcap);
- }
- {
- ht_info = (struct mwifiex_ie_types_htinfo *) pos;
- memset(ht_info, 0,
- sizeof(struct mwifiex_ie_types_htinfo));
- ht_info->header.type =
- cpu_to_le16(WLAN_EID_HT_INFORMATION);
- ht_info->header.len =
- cpu_to_le16(sizeof(struct ieee80211_ht_info));
- ht_info->ht_info.control_chan =
- (u8) priv->curr_bss_params.bss_descriptor.
- channel;
- if (adapter->chan_offset) {
- ht_info->ht_info.ht_param =
- adapter->chan_offset;
- ht_info->ht_info.ht_param |=
+ /* Fill HT INFORMATION */
+ ht_info = (struct mwifiex_ie_types_htinfo *) pos;
+ memset(ht_info, 0, sizeof(struct mwifiex_ie_types_htinfo));
+ ht_info->header.type = cpu_to_le16(WLAN_EID_HT_INFORMATION);
+ ht_info->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_info));
+
+ ht_info->ht_info.control_chan =
+ (u8) priv->curr_bss_params.bss_descriptor.channel;
+ if (adapter->sec_chan_offset) {
+ ht_info->ht_info.ht_param = adapter->sec_chan_offset;
+ ht_info->ht_info.ht_param |=
IEEE80211_HT_PARAM_CHAN_WIDTH_ANY;
- }
- ht_info->ht_info.operation_mode =
- cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
- ht_info->ht_info.basic_set[0] = 0xff;
- pos += sizeof(struct mwifiex_ie_types_htinfo);
- cmd_append_size +=
- sizeof(struct mwifiex_ie_types_htinfo);
}
+ ht_info->ht_info.operation_mode =
+ cpu_to_le16(IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
+ ht_info->ht_info.basic_set[0] = 0xff;
+ pos += sizeof(struct mwifiex_ie_types_htinfo);
+ cmd_append_size +=
+ sizeof(struct mwifiex_ie_types_htinfo);
}
cmd->size = cpu_to_le16((u16)
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 67e6db7..b728f54 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -401,7 +401,7 @@ mwifiex_fill_buffer(struct sk_buff *skb)
static int
mwifiex_open(struct net_device *dev)
{
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
@@ -465,8 +465,8 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
atomic_inc(&priv->adapter->tx_pending);
if (atomic_read(&priv->adapter->tx_pending) >= MAX_TX_PENDING) {
- netif_stop_queue(priv->netdev);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
+ mwifiex_stop_net_dev_queue(priv->netdev, priv->adapter);
}
queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
@@ -533,7 +533,7 @@ mwifiex_tx_timeout(struct net_device *dev)
dev_err(priv->adapter->dev, "%lu : Tx timeout, bss_index=%d\n",
jiffies, priv->bss_index);
- dev->trans_start = jiffies;
+ mwifiex_set_trans_start(dev);
priv->num_tx_timeout++;
}
@@ -586,8 +586,6 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
priv->media_connected = false;
memset(&priv->nick_name, 0, sizeof(priv->nick_name));
priv->num_tx_timeout = 0;
- priv->workqueue = create_singlethread_workqueue("cfg80211_wq");
- INIT_WORK(&priv->cfg_workqueue, mwifiex_cfg80211_results);
memcpy(dev->dev_addr, priv->curr_addr, ETH_ALEN);
}
@@ -793,7 +791,8 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
priv = adapter->priv[i];
if (priv && priv->netdev) {
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev,
+ adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
}
@@ -823,7 +822,9 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
continue;
rtnl_lock();
- mwifiex_del_virtual_intf(priv->wdev->wiphy, priv->netdev);
+ if (priv->wdev && priv->netdev)
+ mwifiex_del_virtual_intf(priv->wdev->wiphy,
+ priv->netdev);
rtnl_unlock();
}
@@ -831,9 +832,11 @@ int mwifiex_remove_card(struct mwifiex_adapter *adapter, struct semaphore *sem)
if (!priv)
goto exit_remove;
- wiphy_unregister(priv->wdev->wiphy);
- wiphy_free(priv->wdev->wiphy);
- kfree(priv->wdev);
+ if (priv->wdev) {
+ wiphy_unregister(priv->wdev->wiphy);
+ wiphy_free(priv->wdev->wiphy);
+ kfree(priv->wdev);
+ }
mwifiex_terminate_workqueue(adapter);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index 30f138b..3186aa4 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -453,15 +453,8 @@ struct mwifiex_private {
u8 scan_pending_on_block;
u8 report_scan_result;
struct cfg80211_scan_request *scan_request;
- int scan_result_status;
- int assoc_request;
- u16 assoc_result;
- int ibss_join_request;
- u16 ibss_join_result;
- bool disconnect;
+ struct mwifiex_user_scan_cfg *user_scan_cfg;
u8 cfg_bssid[6];
- struct workqueue_struct *workqueue;
- struct work_struct cfg_workqueue;
u8 country_code[IEEE80211_COUNTRY_STRING_LEN];
struct wps wps;
u8 scan_block;
@@ -647,7 +640,8 @@ struct mwifiex_adapter {
u32 hw_dot_11n_dev_cap;
u8 hw_dev_mcs_support;
u8 adhoc_11n_enabled;
- u8 chan_offset;
+ u8 sec_chan_offset;
+ enum nl80211_channel_type channel_type;
struct mwifiex_dbg dbg;
u8 arp_filter[ARP_FILTER_MAX_BUF_SIZE];
u32 arp_filter_size;
@@ -655,10 +649,19 @@ struct mwifiex_adapter {
struct mwifiex_wait_queue cmd_wait_q;
u8 scan_wait_q_woken;
struct cmd_ctrl_node *cmd_queued;
+ spinlock_t queue_lock; /* lock for tx queues */
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
+void mwifiex_set_trans_start(struct net_device *dev);
+
+void mwifiex_stop_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
+void mwifiex_wake_up_net_dev_queue(struct net_device *netdev,
+ struct mwifiex_adapter *adapter);
+
int mwifiex_init_fw(struct mwifiex_adapter *adapter);
int mwifiex_init_fw_complete(struct mwifiex_adapter *adapter);
@@ -775,7 +778,8 @@ struct mwifiex_chan_freq_power *
struct mwifiex_chan_freq_power *mwifiex_get_cfp_by_band_and_freq_from_cfg80211(
struct mwifiex_private *priv,
u8 band, u32 freq);
-u32 mwifiex_index_to_data_rate(u8 index, u8 ht_info);
+u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv, u8 index,
+ u8 ht_info);
u32 mwifiex_find_freq_from_band_chan(u8, u8);
int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask,
u8 **buffer);
@@ -951,8 +955,6 @@ int mwifiex_main_process(struct mwifiex_adapter *);
int mwifiex_bss_set_channel(struct mwifiex_private *,
struct mwifiex_chan_freq_power *cfp);
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *,
- struct mwifiex_ds_band_cfg *);
int mwifiex_get_bss_info(struct mwifiex_private *,
struct mwifiex_bss_info *);
int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv,
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index d34acf0..4053509 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -386,8 +386,7 @@ static int mwifiex_pcie_create_txbd_ring(struct mwifiex_adapter *adapter)
card->txbd_ring_vbase = kzalloc(card->txbd_ring_size, GFP_KERNEL);
if (!card->txbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for txbd ring.\n");
- kfree(card->txbd_ring_vbase);
- return -1;
+ return -ENOMEM;
}
card->txbd_ring_pbase = virt_to_phys(card->txbd_ring_vbase);
@@ -477,7 +476,7 @@ static int mwifiex_pcie_create_rxbd_ring(struct mwifiex_adapter *adapter)
if (!card->rxbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer for "
"rxbd_ring.\n");
- return -1;
+ return -ENOMEM;
}
card->rxbd_ring_pbase = virt_to_phys(card->rxbd_ring_vbase);
@@ -570,7 +569,7 @@ static int mwifiex_pcie_create_evtbd_ring(struct mwifiex_adapter *adapter)
if (!card->evtbd_ring_vbase) {
dev_err(adapter->dev, "Unable to allocate buffer. "
"Terminating download\n");
- return -1;
+ return -ENOMEM;
}
card->evtbd_ring_pbase = virt_to_phys(card->evtbd_ring_vbase);
@@ -1229,15 +1228,16 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
if (!skb)
return 0;
- if (rdptr >= MWIFIEX_MAX_EVT_BD)
+ if (rdptr >= MWIFIEX_MAX_EVT_BD) {
dev_err(adapter->dev, "event_complete: Invalid rdptr 0x%x\n",
rdptr);
+ return -EINVAL;
+ }
/* Read the event ring write pointer set by firmware */
if (mwifiex_read_reg(adapter, REG_EVTBD_WRPTR, &wrptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_WRPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
if (!card->evt_buf_list[rdptr]) {
@@ -1266,15 +1266,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
/* Write the event ring read pointer in to REG_EVTBD_RDPTR */
if (mwifiex_write_reg(adapter, REG_EVTBD_RDPTR, card->evtbd_rdptr)) {
dev_err(adapter->dev, "event_complete: failed to read REG_EVTBD_RDPTR\n");
- ret = -1;
- goto done;
+ return -1;
}
-done:
- /* Free the buffer for failure case */
- if (ret && skb)
- dev_kfree_skb_any(skb);
-
dev_dbg(adapter->dev, "info: Check Events Again\n");
ret = mwifiex_pcie_process_event_ready(adapter);
@@ -1672,9 +1666,8 @@ static int mwifiex_pcie_host_to_card(struct mwifiex_adapter *adapter, u8 type,
struct sk_buff *skb,
struct mwifiex_tx_param *tx_param)
{
- if (!adapter || !skb) {
- dev_err(adapter->dev, "Invalid parameter in %s <%p, %p>\n",
- __func__, adapter, skb);
+ if (!skb) {
+ dev_err(adapter->dev, "Passed NULL skb to %s\n", __func__);
return -1;
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 8d3ab37..6396d33 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -500,7 +500,6 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
struct ieee80211_channel *ch;
struct mwifiex_adapter *adapter = priv->adapter;
int chan_idx = 0, i;
- u8 scan_type;
for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) {
@@ -514,19 +513,20 @@ mwifiex_scan_create_channel_list(struct mwifiex_private *priv,
if (ch->flags & IEEE80211_CHAN_DISABLED)
continue;
scan_chan_list[chan_idx].radio_type = band;
- scan_type = ch->flags & IEEE80211_CHAN_PASSIVE_SCAN;
+
if (user_scan_in &&
user_scan_in->chan_list[0].scan_time)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16((u16) user_scan_in->
chan_list[0].scan_time);
- else if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+ else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->passive_scan_time);
else
scan_chan_list[chan_idx].max_scan_time =
cpu_to_le16(adapter->active_scan_time);
- if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE)
+
+ if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
scan_chan_list[chan_idx].chan_scan_mode_bitmap
|= MWIFIEX_PASSIVE_SCAN;
else
@@ -1391,11 +1391,8 @@ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv,
{
int status;
- priv->adapter->scan_wait_q_woken = false;
-
status = mwifiex_scan_networks(priv, scan_req);
- if (!status)
- status = mwifiex_wait_queue_complete(priv->adapter);
+ queue_work(priv->adapter->workqueue, &priv->adapter->main_work);
return status;
}
@@ -1537,11 +1534,6 @@ done:
return 0;
}
-static void mwifiex_free_bss_priv(struct cfg80211_bss *bss)
-{
- kfree(bss->priv);
-}
-
/*
* This function handles the command response of scan.
*
@@ -1767,7 +1759,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
cap_info_bitmap, beacon_period,
ie_buf, ie_len, rssi, GFP_KERNEL);
*(u8 *)bss->priv = band;
- bss->free_priv = mwifiex_free_bss_priv;
+ cfg80211_put_bss(bss);
if (priv->media_connected && !memcmp(bssid,
priv->curr_bss_params.bss_descriptor
@@ -1801,6 +1793,14 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
up(&priv->async_sem);
}
+ if (priv->user_scan_cfg) {
+ dev_dbg(priv->adapter->dev, "info: %s: sending scan "
+ "results\n", __func__);
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ kfree(priv->user_scan_cfg);
+ priv->user_scan_cfg = NULL;
+ }
} else {
/* Get scan command from scan_pending_q and put to
cmd_pending_q */
diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c
index 283171b..d39d845 100644
--- a/drivers/net/wireless/mwifiex/sdio.c
+++ b/drivers/net/wireless/mwifiex/sdio.c
@@ -256,10 +256,13 @@ static int mwifiex_sdio_resume(struct device *dev)
/* Device ID for SD8787 */
#define SDIO_DEVICE_ID_MARVELL_8787 (0x9119)
+/* Device ID for SD8797 */
+#define SDIO_DEVICE_ID_MARVELL_8797 (0x9129)
/* WLAN IDs */
static const struct sdio_device_id mwifiex_ids[] = {
{SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787)},
+ {SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797)},
{},
};
@@ -1084,7 +1087,7 @@ static int mwifiex_sdio_card_to_host_mp_aggr(struct mwifiex_adapter *adapter,
(adapter->ioport | 0x1000 |
(card->mpa_rx.ports << 4)) +
card->mpa_rx.start_port, 1))
- return -1;
+ goto error;
curr_ptr = card->mpa_rx.buf;
@@ -1127,12 +1130,29 @@ rx_curr_single:
if (mwifiex_sdio_card_to_host(adapter, &pkt_type,
skb->data, skb->len,
adapter->ioport + port))
- return -1;
+ goto error;
mwifiex_decode_rx_packet(adapter, skb, pkt_type);
}
return 0;
+
+error:
+ if (MP_RX_AGGR_IN_PROGRESS(card)) {
+ /* Multiport-aggregation transfer failed - cleanup */
+ for (pind = 0; pind < card->mpa_rx.pkt_cnt; pind++) {
+ /* copy pkt to deaggr buf */
+ skb_deaggr = card->mpa_rx.skb_arr[pind];
+ dev_kfree_skb_any(skb_deaggr);
+ }
+ MP_RX_AGGR_BUF_RESET(card);
+ }
+
+ if (f_do_rx_cur)
+ /* Single transfer pending. Free curr buff also */
+ dev_kfree_skb_any(skb);
+
+ return -1;
}
/*
@@ -1268,7 +1288,6 @@ static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev,
"info: CFG reg val =%x\n", cr);
- dev_kfree_skb_any(skb);
return -1;
}
}
@@ -1573,7 +1592,16 @@ static int mwifiex_register_dev(struct mwifiex_adapter *adapter)
sdio_set_drvdata(func, card);
adapter->dev = &func->dev;
- strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+
+ switch (func->device) {
+ case SDIO_DEVICE_ID_MARVELL_8797:
+ strcpy(adapter->fw_name, SD8797_DEFAULT_FW_NAME);
+ break;
+ case SDIO_DEVICE_ID_MARVELL_8787:
+ default:
+ strcpy(adapter->fw_name, SD8787_DEFAULT_FW_NAME);
+ break;
+ }
return 0;
@@ -1630,14 +1658,14 @@ static int mwifiex_init_sdio(struct mwifiex_adapter *adapter)
card->mpa_tx.pkt_cnt = 0;
card->mpa_tx.start_port = 0;
- card->mpa_tx.enabled = 0;
+ card->mpa_tx.enabled = 1;
card->mpa_tx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
card->mpa_rx.buf_len = 0;
card->mpa_rx.pkt_cnt = 0;
card->mpa_rx.start_port = 0;
- card->mpa_rx.enabled = 0;
+ card->mpa_rx.enabled = 1;
card->mpa_rx.pkt_aggr_limit = SDIO_MP_AGGR_DEF_PKT_LIMIT;
/* Allocate buffers for SDIO MP-A */
@@ -1774,4 +1802,5 @@ MODULE_AUTHOR("Marvell International Ltd.");
MODULE_DESCRIPTION("Marvell WiFi-Ex SDIO Driver version " SDIO_VERSION);
MODULE_VERSION(SDIO_VERSION);
MODULE_LICENSE("GPL v2");
-MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE(SD8787_DEFAULT_FW_NAME);
+MODULE_FIRMWARE(SD8797_DEFAULT_FW_NAME);
diff --git a/drivers/net/wireless/mwifiex/sdio.h b/drivers/net/wireless/mwifiex/sdio.h
index 3f71180..a3fb322 100644
--- a/drivers/net/wireless/mwifiex/sdio.h
+++ b/drivers/net/wireless/mwifiex/sdio.h
@@ -29,6 +29,7 @@
#include "main.h"
#define SD8787_DEFAULT_FW_NAME "mrvl/sd8787_uapsta.bin"
+#define SD8797_DEFAULT_FW_NAME "mrvl/sd8797_uapsta.bin"
#define BLOCK_MODE 1
#define BYTE_MODE 0
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index ea6518d..6e443ff 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -748,7 +748,7 @@ static int mwifiex_cmd_802_11_rf_channel(struct mwifiex_private *priv,
cpu_to_le16(HostCmd_SCAN_RADIO_TYPE_A);
rf_type = le16_to_cpu(rf_chan->rf_type);
- SET_SECONDARYCHAN(rf_type, priv->adapter->chan_offset);
+ SET_SECONDARYCHAN(rf_type, priv->adapter->sec_chan_offset);
rf_chan->current_channel = cpu_to_le16(*channel);
}
rf_chan->action = cpu_to_le16(cmd_action);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 7a16b0c..e812db8 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -508,7 +508,7 @@ static int mwifiex_ret_802_11_tx_rate_query(struct mwifiex_private *priv,
priv->tx_htinfo = resp->params.tx_rate.ht_info;
if (!priv->is_data_rate_auto)
priv->data_rate =
- mwifiex_index_to_data_rate(priv->tx_rate,
+ mwifiex_index_to_data_rate(priv, priv->tx_rate,
priv->tx_htinfo);
return 0;
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index f204810..d7aa21d 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -115,18 +115,17 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv)
if (adapter->num_cmd_timeout && adapter->curr_cmd)
return;
priv->media_connected = false;
- if (!priv->disconnect) {
- priv->disconnect = 1;
- dev_dbg(adapter->dev, "info: successfully disconnected from"
- " %pM: reason code %d\n", priv->cfg_bssid,
- WLAN_REASON_DEAUTH_LEAVING);
- cfg80211_disconnected(priv->netdev,
- WLAN_REASON_DEAUTH_LEAVING, NULL, 0,
- GFP_KERNEL);
- queue_work(priv->workqueue, &priv->cfg_workqueue);
+ dev_dbg(adapter->dev, "info: successfully disconnected from"
+ " %pM: reason code %d\n", priv->cfg_bssid,
+ WLAN_REASON_DEAUTH_LEAVING);
+ if (priv->bss_mode == NL80211_IFTYPE_STATION) {
+ cfg80211_disconnected(priv->netdev, WLAN_REASON_DEAUTH_LEAVING,
+ NULL, 0, GFP_KERNEL);
}
+ memset(priv->cfg_bssid, 0, ETH_ALEN);
+
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
/* Reset wireless stats signal info */
@@ -201,7 +200,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
if (!netif_carrier_ok(priv->netdev))
netif_carrier_on(priv->netdev);
if (netif_queue_stopped(priv->netdev))
- netif_wake_queue(priv->netdev);
+ mwifiex_wake_up_net_dev_queue(priv->netdev, adapter);
break;
case EVENT_DEAUTHENTICATED:
@@ -292,7 +291,7 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
priv->adhoc_is_link_sensed = false;
mwifiex_clean_txrx(priv);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
if (netif_carrier_ok(priv->netdev))
netif_carrier_off(priv->netdev);
break;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 1679c25..b0fbf5d 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -54,7 +54,7 @@ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist,
int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
{
bool cancel_flag = false;
- int status = adapter->cmd_wait_q.status;
+ int status;
struct cmd_ctrl_node *cmd_queued;
if (!adapter->cmd_queued)
@@ -79,6 +79,8 @@ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter)
mwifiex_cancel_pending_ioctl(adapter);
dev_dbg(adapter->dev, "cmd cancel\n");
}
+
+ status = adapter->cmd_wait_q.status;
adapter->cmd_wait_q.status = 0;
return status;
@@ -239,7 +241,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
"associating...\n");
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
/* Clear any past association response stored for
* application retrieval */
@@ -270,7 +274,9 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
ret = mwifiex_check_network_compatibility(priv, bss_desc);
if (!netif_queue_stopped(priv->netdev))
- netif_stop_queue(priv->netdev);
+ mwifiex_stop_net_dev_queue(priv->netdev, adapter);
+ if (netif_carrier_ok(priv->netdev))
+ netif_carrier_off(priv->netdev);
if (!ret) {
dev_dbg(adapter->dev, "info: network found in scan"
@@ -477,67 +483,6 @@ int mwifiex_get_bss_info(struct mwifiex_private *priv,
}
/*
- * The function sets band configurations.
- *
- * it performs extra checks to make sure the Ad-Hoc
- * band and channel are compatible. Otherwise it returns an error.
- *
- */
-int mwifiex_set_radio_band_cfg(struct mwifiex_private *priv,
- struct mwifiex_ds_band_cfg *radio_cfg)
-{
- struct mwifiex_adapter *adapter = priv->adapter;
- u8 infra_band, adhoc_band;
- u32 adhoc_channel;
-
- infra_band = (u8) radio_cfg->config_bands;
- adhoc_band = (u8) radio_cfg->adhoc_start_band;
- adhoc_channel = radio_cfg->adhoc_channel;
-
- /* SET Infra band */
- if ((infra_band | adapter->fw_bands) & ~adapter->fw_bands)
- return -1;
-
- adapter->config_bands = infra_band;
-
- /* SET Ad-hoc Band */
- if ((adhoc_band | adapter->fw_bands) & ~adapter->fw_bands)
- return -1;
-
- if (adhoc_band)
- adapter->adhoc_start_band = adhoc_band;
- adapter->chan_offset = (u8) radio_cfg->sec_chan_offset;
- /*
- * If no adhoc_channel is supplied verify if the existing adhoc
- * channel compiles with new adhoc_band
- */
- if (!adhoc_channel) {
- if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, adapter->adhoc_start_band,
- priv->adhoc_channel)) {
- /* Pass back the default channel */
- radio_cfg->adhoc_channel = DEFAULT_AD_HOC_CHANNEL;
- if ((adapter->adhoc_start_band & BAND_A)
- || (adapter->adhoc_start_band & BAND_AN))
- radio_cfg->adhoc_channel =
- DEFAULT_AD_HOC_CHANNEL_A;
- }
- } else { /* Retrurn error if adhoc_band and
- adhoc_channel combination is invalid */
- if (!mwifiex_get_cfp_by_band_and_channel_from_cfg80211
- (priv, adapter->adhoc_start_band, (u16) adhoc_channel))
- return -1;
- priv->adhoc_channel = (u8) adhoc_channel;
- }
- if ((adhoc_band & BAND_GN) || (adhoc_band & BAND_AN))
- adapter->adhoc_11n_enabled = true;
- else
- adapter->adhoc_11n_enabled = false;
-
- return 0;
-}
-
-/*
* The function disables auto deep sleep mode.
*/
int mwifiex_disable_auto_ds(struct mwifiex_private *priv)
@@ -837,8 +782,8 @@ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv,
if (!ret) {
if (rate->is_rate_auto)
- rate->rate = mwifiex_index_to_data_rate(priv->tx_rate,
- priv->tx_htinfo);
+ rate->rate = mwifiex_index_to_data_rate(priv,
+ priv->tx_rate, priv->tx_htinfo);
else
rate->rate = priv->data_rate;
} else {
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 2743051..5e1ef7e 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -126,6 +126,9 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
u16 rx_pkt_type;
struct mwifiex_private *priv = adapter->priv[rx_info->bss_index];
+ if (!priv)
+ return -1;
+
local_rx_pd = (struct rxpd *) (skb->data);
rx_pkt_type = local_rx_pd->rx_pkt_type;
@@ -189,12 +192,11 @@ int mwifiex_process_sta_rx_packet(struct mwifiex_adapter *adapter,
(u8) local_rx_pd->rx_pkt_type,
skb);
- if (ret || (rx_pkt_type == PKT_TYPE_BAR)) {
- if (priv && (ret == -1))
- priv->stats.rx_dropped++;
-
+ if (ret || (rx_pkt_type == PKT_TYPE_BAR))
dev_kfree_skb_any(skb);
- }
+
+ if (ret)
+ priv->stats.rx_dropped++;
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index a206f41..d9274a1 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -134,7 +134,7 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if (!priv)
goto done;
- priv->netdev->trans_start = jiffies;
+ mwifiex_set_trans_start(priv->netdev);
if (!status) {
priv->stats.tx_packets++;
priv->stats.tx_bytes += skb->len;
@@ -152,7 +152,8 @@ int mwifiex_write_data_complete(struct mwifiex_adapter *adapter,
if ((GET_BSS_ROLE(tpriv) == MWIFIEX_BSS_ROLE_STA)
&& (tpriv->media_connected)) {
if (netif_queue_stopped(tpriv->netdev))
- netif_wake_queue(tpriv->netdev);
+ mwifiex_wake_up_net_dev_queue(tpriv->netdev,
+ adapter);
}
}
done:
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 995695c..dd5aeaf 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -28,10 +28,10 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.12"
+#define MWL8K_VERSION "0.13"
/* Module parameters */
-static unsigned ap_mode_default;
+static bool ap_mode_default;
module_param(ap_mode_default, bool, 0);
MODULE_PARM_DESC(ap_mode_default,
"Set to 1 to make ap mode the default instead of sta mode");
@@ -198,6 +198,7 @@ struct mwl8k_priv {
/* firmware access */
struct mutex fw_mutex;
struct task_struct *fw_mutex_owner;
+ struct task_struct *hw_restart_owner;
int fw_mutex_depth;
struct completion *hostcmd_wait;
@@ -262,6 +263,10 @@ struct mwl8k_priv {
*/
struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_WMM_QUEUES];
+ /* To perform the task of reloading the firmware */
+ struct work_struct fw_reload;
+ bool hw_restart_in_progress;
+
/* async firmware loading state */
unsigned fw_state;
char *fw_pref;
@@ -738,10 +743,10 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
if (ready_code == MWL8K_FWAP_READY) {
- priv->ap_fw = 1;
+ priv->ap_fw = true;
break;
} else if (ready_code == MWL8K_FWSTA_READY) {
- priv->ap_fw = 0;
+ priv->ap_fw = false;
break;
}
@@ -1498,6 +1503,18 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
might_sleep();
+ /* Since fw restart is in progress, allow only the firmware
+ * commands from the restart code and block the other
+ * commands since they are going to fail in any case since
+ * the firmware has crashed
+ */
+ if (priv->hw_restart_in_progress) {
+ if (priv->hw_restart_owner == current)
+ return 0;
+ else
+ return -EBUSY;
+ }
+
/*
* The TX queues are stopped at this point, so this test
* doesn't need to take ->tx_lock.
@@ -1541,6 +1558,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
wiphy_err(hw->wiphy, "tx rings stuck for %d ms\n",
MWL8K_TX_WAIT_TIMEOUT_MS);
mwl8k_dump_tx_rings(hw);
+ priv->hw_restart_in_progress = true;
+ ieee80211_queue_work(hw, &priv->fw_reload);
rc = -ETIMEDOUT;
}
@@ -2058,7 +2077,9 @@ static int mwl8k_fw_lock(struct ieee80211_hw *hw)
rc = mwl8k_tx_wait_empty(hw);
if (rc) {
- ieee80211_wake_queues(hw);
+ if (!priv->hw_restart_in_progress)
+ ieee80211_wake_queues(hw);
+
mutex_unlock(&priv->fw_mutex);
return rc;
@@ -2077,7 +2098,9 @@ static void mwl8k_fw_unlock(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
if (!--priv->fw_mutex_depth) {
- ieee80211_wake_queues(hw);
+ if (!priv->hw_restart_in_progress)
+ ieee80211_wake_queues(hw);
+
priv->fw_mutex_owner = NULL;
mutex_unlock(&priv->fw_mutex);
}
@@ -2754,7 +2777,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
else if (channel->band == IEEE80211_BAND_5GHZ)
cmd->band = cpu_to_le16(0x4);
- cmd->channel = channel->hw_value;
+ cmd->channel = cpu_to_le16(channel->hw_value);
if (conf->channel_type == NL80211_CHAN_NO_HT ||
conf->channel_type == NL80211_CHAN_HT20) {
@@ -4043,7 +4066,7 @@ static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
goto done;
if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
- WLAN_CIPHER_SUITE_WEP104)
+ key->cipher == WLAN_CIPHER_SUITE_WEP104)
mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
@@ -4398,7 +4421,8 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
int i;
- mwl8k_cmd_radio_disable(hw);
+ if (!priv->hw_restart_in_progress)
+ mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
@@ -4499,6 +4523,16 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return 0;
}
+static void mwl8k_remove_vif(struct mwl8k_priv *priv, struct mwl8k_vif *vif)
+{
+ /* Has ieee80211_restart_hw re-added the removed interfaces? */
+ if (!priv->macids_used)
+ return;
+
+ priv->macids_used &= ~(1 << vif->macid);
+ list_del(&vif->list);
+}
+
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
@@ -4510,8 +4544,54 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->macids_used &= ~(1 << mwl8k_vif->macid);
- list_del(&mwl8k_vif->list);
+ mwl8k_remove_vif(priv, mwl8k_vif);
+}
+
+static void mwl8k_hw_restart_work(struct work_struct *work)
+{
+ struct mwl8k_priv *priv =
+ container_of(work, struct mwl8k_priv, fw_reload);
+ struct ieee80211_hw *hw = priv->hw;
+ struct mwl8k_device_info *di;
+ int rc;
+
+ /* If some command is waiting for a response, clear it */
+ if (priv->hostcmd_wait != NULL) {
+ complete(priv->hostcmd_wait);
+ priv->hostcmd_wait = NULL;
+ }
+
+ priv->hw_restart_owner = current;
+ di = priv->device_info;
+ mwl8k_fw_lock(hw);
+
+ if (priv->ap_fw)
+ rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
+ else
+ rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
+
+ if (rc)
+ goto fail;
+
+ priv->hw_restart_owner = NULL;
+ priv->hw_restart_in_progress = false;
+
+ /*
+ * This unlock will wake up the queues and
+ * also opens the command path for other
+ * commands
+ */
+ mwl8k_fw_unlock(hw);
+
+ ieee80211_restart_hw(hw);
+
+ wiphy_err(hw->wiphy, "Firmware restarted successfully\n");
+
+ return;
+fail:
+ mwl8k_fw_unlock(hw);
+
+ wiphy_err(hw->wiphy, "Firmware restart failed\n");
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -5024,7 +5104,11 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
for (i = 0; i < MAX_AMPDU_ATTEMPTS; i++) {
rc = mwl8k_check_ba(hw, stream);
- if (!rc)
+ /* If HW restart is in progress mwl8k_post_cmd will
+ * return -EBUSY. Avoid retrying mwl8k_check_ba in
+ * such cases
+ */
+ if (!rc || rc == -EBUSY)
break;
/*
* HW queues take time to be flushed, give them
@@ -5044,14 +5128,14 @@ mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
ieee80211_start_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_STOP:
- if (stream == NULL)
- break;
- if (stream->state == AMPDU_STREAM_ACTIVE) {
- spin_unlock(&priv->stream_lock);
- mwl8k_destroy_ba(hw, stream);
- spin_lock(&priv->stream_lock);
+ if (stream) {
+ if (stream->state == AMPDU_STREAM_ACTIVE) {
+ spin_unlock(&priv->stream_lock);
+ mwl8k_destroy_ba(hw, stream);
+ spin_lock(&priv->stream_lock);
+ }
+ mwl8k_remove_stream(hw, stream);
}
- mwl8k_remove_stream(hw, stream);
ieee80211_stop_tx_ba_cb_irqsafe(vif, addr, tid);
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
@@ -5263,12 +5347,15 @@ fail:
mwl8k_release_firmware(priv);
}
+#define MAX_RESTART_ATTEMPTS 1
static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
bool nowait)
{
struct mwl8k_priv *priv = hw->priv;
int rc;
+ int count = MAX_RESTART_ATTEMPTS;
+retry:
/* Reset firmware and hardware */
mwl8k_hw_reset(priv);
@@ -5290,6 +5377,16 @@ static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
/* Reclaim memory once firmware is successfully loaded */
mwl8k_release_firmware(priv);
+ if (rc && count) {
+ /* FW did not start successfully;
+ * lets try one more time
+ */
+ count--;
+ wiphy_err(hw->wiphy, "Trying to reload the firmware again\n");
+ msleep(20);
+ goto retry;
+ }
+
return rc;
}
@@ -5365,7 +5462,14 @@ static int mwl8k_probe_hw(struct ieee80211_hw *hw)
goto err_free_queues;
}
- memset(priv->ampdu, 0, sizeof(priv->ampdu));
+ /*
+ * When hw restart is requested,
+ * mac80211 will take care of clearing
+ * the ampdu streams, so do not clear
+ * the ampdu state here
+ */
+ if (!priv->hw_restart_in_progress)
+ memset(priv->ampdu, 0, sizeof(priv->ampdu));
/*
* Temporarily enable interrupts. Initial firmware host
@@ -5439,10 +5543,20 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
{
int i, rc = 0;
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *vif, *tmp_vif;
mwl8k_stop(hw);
mwl8k_rxq_deinit(hw, 0);
+ /*
+ * All the existing interfaces are re-added by the ieee80211_reconfig;
+ * which means driver should remove existing interfaces before calling
+ * ieee80211_restart_hw
+ */
+ if (priv->hw_restart_in_progress)
+ list_for_each_entry_safe(vif, tmp_vif, &priv->vif_list, list)
+ mwl8k_remove_vif(priv, vif);
+
for (i = 0; i < mwl8k_tx_queues(priv); i++)
mwl8k_txq_deinit(hw, i);
@@ -5454,6 +5568,9 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
if (rc)
goto fail;
+ if (priv->hw_restart_in_progress)
+ return rc;
+
rc = mwl8k_start(hw);
if (rc)
goto fail;
@@ -5517,13 +5634,15 @@ static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
- priv->radio_on = 0;
- priv->radio_short_preamble = 0;
+ priv->radio_on = false;
+ priv->radio_short_preamble = false;
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
/* Handle watchdog ba events */
INIT_WORK(&priv->watchdog_ba_handle, mwl8k_watchdog_ba_events);
+ /* To reload the firmware if it crashes */
+ INIT_WORK(&priv->fw_reload, mwl8k_hw_restart_work);
/* TX reclaim and RX tasklets. */
tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
@@ -5667,6 +5786,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
if (rc)
goto err_stop_firmware;
+
+ priv->hw_restart_in_progress = false;
+
return rc;
err_stop_firmware:
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index b52acc4..9fb77d0 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -121,7 +121,7 @@ module_param(orinoco_debug, int, 0644);
MODULE_PARM_DESC(orinoco_debug, "Debug level");
#endif
-static int suppress_linkstatus; /* = 0 */
+static bool suppress_linkstatus; /* = 0 */
module_param(suppress_linkstatus, bool, 0644);
MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes");
diff --git a/drivers/net/wireless/orinoco/orinoco_usb.c b/drivers/net/wireless/orinoco/orinoco_usb.c
index 0793e42..ae8ce56 100644
--- a/drivers/net/wireless/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/orinoco/orinoco_usb.c
@@ -1759,32 +1759,7 @@ static struct usb_driver orinoco_driver = {
static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
" (Manuel Estrada Sainz)";
-static int __init ezusb_module_init(void)
-{
- int err;
-
- printk(KERN_DEBUG "%s\n", version);
-
- /* register this driver with the USB subsystem */
- err = usb_register(&orinoco_driver);
- if (err < 0) {
- printk(KERN_ERR PFX "usb_register failed, error %d\n",
- err);
- return err;
- }
-
- return 0;
-}
-
-static void __exit ezusb_module_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&orinoco_driver);
-}
-
-
-module_init(ezusb_module_init);
-module_exit(ezusb_module_exit);
+module_usb_driver(orinoco_driver);
MODULE_AUTHOR("Manuel Estrada Sainz");
MODULE_DESCRIPTION("Driver for Orinoco wireless LAN cards using EZUSB bridge");
diff --git a/drivers/net/wireless/orinoco/scan.c b/drivers/net/wireless/orinoco/scan.c
index e99ca1c..96e39ed 100644
--- a/drivers/net/wireless/orinoco/scan.c
+++ b/drivers/net/wireless/orinoco/scan.c
@@ -76,6 +76,7 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *cbss;
u8 *ie;
u8 ie_buf[46];
u64 timestamp;
@@ -121,9 +122,10 @@ static void orinoco_add_hostscan_result(struct orinoco_private *priv,
beacon_interval = le16_to_cpu(bss->a.beacon_interv);
signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level));
- cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
- capability, beacon_interval, ie_buf, ie_len,
- signal, GFP_KERNEL);
+ cbss = cfg80211_inform_bss(wiphy, channel, bss->a.bssid, timestamp,
+ capability, beacon_interval, ie_buf, ie_len,
+ signal, GFP_KERNEL);
+ cfg80211_put_bss(cbss);
}
void orinoco_add_extscan_result(struct orinoco_private *priv,
@@ -132,6 +134,7 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
{
struct wiphy *wiphy = priv_to_wiphy(priv);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *cbss;
const u8 *ie;
u64 timestamp;
s32 signal;
@@ -152,9 +155,10 @@ void orinoco_add_extscan_result(struct orinoco_private *priv,
ie = bss->data;
signal = SIGNAL_TO_MBM(bss->level);
- cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
- capability, beacon_interval, ie, ie_len,
- signal, GFP_KERNEL);
+ cbss = cfg80211_inform_bss(wiphy, channel, bss->bssid, timestamp,
+ capability, beacon_interval, ie, ie_len,
+ signal, GFP_KERNEL);
+ cfg80211_put_bss(cbss);
}
void orinoco_add_hostscan_results(struct orinoco_private *priv,
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index db4d9a0..af2ca1a 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -27,7 +27,7 @@
#include "p54.h"
#include "lmac.h"
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
diff --git a/drivers/net/wireless/p54/p54spi.c b/drivers/net/wireless/p54/p54spi.c
index 78d0d69..7faed62 100644
--- a/drivers/net/wireless/p54/p54spi.c
+++ b/drivers/net/wireless/p54/p54spi.c
@@ -581,11 +581,7 @@ static void p54spi_op_stop(struct ieee80211_hw *dev)
struct p54s_priv *priv = dev->priv;
unsigned long flags;
- if (mutex_lock_interruptible(&priv->mutex)) {
- /* FIXME: how to handle this error? */
- return;
- }
-
+ mutex_lock(&priv->mutex);
WARN_ON(priv->fw_state != FW_STATE_READY);
p54spi_power_off(priv);
@@ -704,7 +700,6 @@ static int __devexit p54spi_remove(struct spi_device *spi)
static struct spi_driver p54spi_driver = {
.driver = {
.name = "p54spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 9b60968..f4d28c3 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -1083,15 +1083,4 @@ static struct usb_driver p54u_driver = {
.soft_unbind = 1,
};
-static int __init p54u_init(void)
-{
- return usb_register(&p54u_driver);
-}
-
-static void __exit p54u_exit(void)
-{
- usb_deregister(&p54u_driver);
-}
-
-module_init(p54u_init);
-module_exit(p54u_exit);
+module_usb_driver(p54u_driver);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 6ed9c32..42b97bc 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -242,7 +242,7 @@ void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb)
skb_unlink(skb, &priv->tx_queue);
p54_tx_qos_accounting_free(priv, skb);
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
}
EXPORT_SYMBOL_GPL(p54_free_skb);
@@ -788,7 +788,7 @@ void p54_tx_80211(struct ieee80211_hw *dev, struct sk_buff *skb)
&hdr_flags, &aid, &burst_allowed);
if (p54_tx_qos_accounting_alloc(priv, skb, queue)) {
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(dev, skb);
return;
}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.c b/drivers/net/wireless/prism54/isl_ioctl.c
index bc2ba80..4e44b1a 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.c
+++ b/drivers/net/wireless/prism54/isl_ioctl.c
@@ -2493,323 +2493,7 @@ prism54_set_mac_address(struct net_device *ndev, void *addr)
return ret;
}
-/* Note: currently, use hostapd ioctl from the Host AP driver for WPA
- * support. This is to be replaced with Linux wireless extensions once they
- * get WPA support. */
-
-/* Note II: please leave all this together as it will be easier to remove later,
- * once wireless extensions add WPA support -mcgrof */
-
-/* PRISM54_HOSTAPD ioctl() cmd: */
-enum {
- PRISM2_SET_ENCRYPTION = 6,
- PRISM2_HOSTAPD_SET_GENERIC_ELEMENT = 12,
- PRISM2_HOSTAPD_MLME = 13,
- PRISM2_HOSTAPD_SCAN_REQ = 14,
-};
-
#define PRISM54_SET_WPA SIOCIWFIRSTPRIV+12
-#define PRISM54_HOSTAPD SIOCIWFIRSTPRIV+25
-#define PRISM54_DROP_UNENCRYPTED SIOCIWFIRSTPRIV+26
-
-#define PRISM2_HOSTAPD_MAX_BUF_SIZE 1024
-#define PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN \
- offsetof(struct prism2_hostapd_param, u.generic_elem.data)
-
-/* Maximum length for algorithm names (-1 for nul termination)
- * used in ioctl() */
-#define HOSTAP_CRYPT_ALG_NAME_LEN 16
-
-struct prism2_hostapd_param {
- u32 cmd;
- u8 sta_addr[ETH_ALEN];
- union {
- struct {
- u8 alg[HOSTAP_CRYPT_ALG_NAME_LEN];
- u32 flags;
- u32 err;
- u8 idx;
- u8 seq[8]; /* sequence counter (set: RX, get: TX) */
- u16 key_len;
- u8 key[0];
- } crypt;
- struct {
- u8 len;
- u8 data[0];
- } generic_elem;
- struct {
-#define MLME_STA_DEAUTH 0
-#define MLME_STA_DISASSOC 1
- u16 cmd;
- u16 reason_code;
- } mlme;
- struct {
- u8 ssid_len;
- u8 ssid[32];
- } scan_req;
- } u;
-};
-
-
-static int
-prism2_ioctl_set_encryption(struct net_device *dev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(dev);
- int rvalue = 0, force = 0;
- int authen = DOT11_AUTH_OS, invoke = 0, exunencrypt = 0;
- union oid_res_t r;
-
- /* with the new API, it's impossible to get a NULL pointer.
- * New version of iwconfig set the IW_ENCODE_NOKEY flag
- * when no key is given, but older versions don't. */
-
- if (param->u.crypt.key_len > 0) {
- /* we have a key to set */
- int index = param->u.crypt.idx;
- int current_index;
- struct obj_key key = { DOT11_PRIV_TKIP, 0, "" };
-
- /* get the current key index */
- rvalue = mgt_get_request(priv, DOT11_OID_DEFKEYID, 0, NULL, &r);
- current_index = r.u;
- /* Verify that the key is not marked as invalid */
- if (!(param->u.crypt.flags & IW_ENCODE_NOKEY)) {
- key.length = param->u.crypt.key_len > sizeof (param->u.crypt.key) ?
- sizeof (param->u.crypt.key) : param->u.crypt.key_len;
- memcpy(key.key, param->u.crypt.key, key.length);
- if (key.length == 32)
- /* we want WPA-PSK */
- key.type = DOT11_PRIV_TKIP;
- if ((index < 0) || (index > 3))
- /* no index provided use the current one */
- index = current_index;
-
- /* now send the key to the card */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYX, index,
- &key);
- }
- /*
- * If a valid key is set, encryption should be enabled
- * (user may turn it off later).
- * This is also how "iwconfig ethX key on" works
- */
- if ((index == current_index) && (key.length > 0))
- force = 1;
- } else {
- int index = (param->u.crypt.flags & IW_ENCODE_INDEX) - 1;
- if ((index >= 0) && (index <= 3)) {
- /* we want to set the key index */
- rvalue |=
- mgt_set_request(priv, DOT11_OID_DEFKEYID, 0,
- &index);
- } else {
- if (!(param->u.crypt.flags & IW_ENCODE_MODE)) {
- /* we cannot do anything. Complain. */
- return -EINVAL;
- }
- }
- }
- /* now read the flags */
- if (param->u.crypt.flags & IW_ENCODE_DISABLED) {
- /* Encoding disabled,
- * authen = DOT11_AUTH_OS;
- * invoke = 0;
- * exunencrypt = 0; */
- }
- if (param->u.crypt.flags & IW_ENCODE_OPEN)
- /* Encode but accept non-encoded packets. No auth */
- invoke = 1;
- if ((param->u.crypt.flags & IW_ENCODE_RESTRICTED) || force) {
- /* Refuse non-encoded packets. Auth */
- authen = DOT11_AUTH_BOTH;
- invoke = 1;
- exunencrypt = 1;
- }
- /* do the change if requested */
- if ((param->u.crypt.flags & IW_ENCODE_MODE) || force) {
- rvalue |=
- mgt_set_request(priv, DOT11_OID_AUTHENABLE, 0, &authen);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_PRIVACYINVOKED, 0, &invoke);
- rvalue |=
- mgt_set_request(priv, DOT11_OID_EXUNENCRYPTED, 0,
- &exunencrypt);
- }
- return rvalue;
-}
-
-static int
-prism2_ioctl_set_generic_element(struct net_device *ndev,
- struct prism2_hostapd_param *param,
- int param_len)
-{
- islpci_private *priv = netdev_priv(ndev);
- int max_len, len, alen, ret=0;
- struct obj_attachment *attach;
-
- len = param->u.generic_elem.len;
- max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN;
- if (max_len < 0 || max_len < len)
- return -EINVAL;
-
- alen = sizeof(*attach) + len;
- attach = kzalloc(alen, GFP_KERNEL);
- if (attach == NULL)
- return -ENOMEM;
-
-#define WLAN_FC_TYPE_MGMT 0
-#define WLAN_FC_STYPE_ASSOC_REQ 0
-#define WLAN_FC_STYPE_REASSOC_REQ 2
-
- /* Note: endianness is covered by mgt_set_varlen */
-
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_ASSOC_REQ << 4);
- attach->id = -1;
- attach->size = len;
- memcpy(attach->data, param->u.generic_elem.data, len);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0) {
- attach->type = (WLAN_FC_TYPE_MGMT << 2) |
- (WLAN_FC_STYPE_REASSOC_REQ << 4);
-
- ret = mgt_set_varlen(priv, DOT11_OID_ATTACHMENT, attach, len);
-
- if (ret == 0)
- printk(KERN_DEBUG "%s: WPA IE Attachment was set\n",
- ndev->name);
- }
-
- kfree(attach);
- return ret;
-
-}
-
-static int
-prism2_ioctl_mlme(struct net_device *dev, struct prism2_hostapd_param *param)
-{
- return -EOPNOTSUPP;
-}
-
-static int
-prism2_ioctl_scan_req(struct net_device *ndev,
- struct prism2_hostapd_param *param)
-{
- islpci_private *priv = netdev_priv(ndev);
- struct iw_request_info info;
- int i, rvalue;
- struct obj_bsslist *bsslist;
- u32 noise = 0;
- char *extra = "";
- char *current_ev = "foo";
- union oid_res_t r;
-
- if (islpci_get_state(priv) < PRV_STATE_INIT) {
- /* device is not ready, fail gently */
- return 0;
- }
-
- /* first get the noise value. We will use it to report the link quality */
- rvalue = mgt_get_request(priv, DOT11_OID_NOISEFLOOR, 0, NULL, &r);
- noise = r.u;
-
- /* Ask the device for a list of known bss. We can report at most
- * IW_MAX_AP=64 to the range struct. But the device won't repport anything
- * if you change the value of IWMAX_BSS=24.
- */
- rvalue |= mgt_get_request(priv, DOT11_OID_BSSLIST, 0, NULL, &r);
- bsslist = r.ptr;
-
- info.cmd = PRISM54_HOSTAPD;
- info.flags = 0;
-
- /* ok now, scan the list and translate its info */
- for (i = 0; i < min(IW_MAX_AP, (int) bsslist->nr); i++)
- current_ev = prism54_translate_bss(ndev, &info, current_ev,
- extra + IW_SCAN_MAX_DATA,
- &(bsslist->bsslist[i]),
- noise);
- kfree(bsslist);
-
- return rvalue;
-}
-
-static int
-prism54_hostapd(struct net_device *ndev, struct iw_point *p)
-{
- struct prism2_hostapd_param *param;
- int ret = 0;
- u32 uwrq;
-
- printk(KERN_DEBUG "prism54_hostapd - len=%d\n", p->length);
- if (p->length < sizeof(struct prism2_hostapd_param) ||
- p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer)
- return -EINVAL;
-
- param = memdup_user(p->pointer, p->length);
- if (IS_ERR(param))
- return PTR_ERR(param);
-
- switch (param->cmd) {
- case PRISM2_SET_ENCRYPTION:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set encryption request\n",
- ndev->name);
- ret = prism2_ioctl_set_encryption(ndev, param, p->length);
- break;
- case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT:
- printk(KERN_DEBUG "%s: Caught WPA supplicant set WPA IE request\n",
- ndev->name);
- ret = prism2_ioctl_set_generic_element(ndev, param,
- p->length);
- break;
- case PRISM2_HOSTAPD_MLME:
- printk(KERN_DEBUG "%s: Caught WPA supplicant MLME request\n",
- ndev->name);
- ret = prism2_ioctl_mlme(ndev, param);
- break;
- case PRISM2_HOSTAPD_SCAN_REQ:
- printk(KERN_DEBUG "%s: Caught WPA supplicant scan request\n",
- ndev->name);
- ret = prism2_ioctl_scan_req(ndev, param);
- break;
- case PRISM54_SET_WPA:
- printk(KERN_DEBUG "%s: Caught WPA supplicant wpa init request\n",
- ndev->name);
- uwrq = 1;
- ret = prism54_set_wpa(ndev, NULL, &uwrq, NULL);
- break;
- case PRISM54_DROP_UNENCRYPTED:
- printk(KERN_DEBUG "%s: Caught WPA drop unencrypted request\n",
- ndev->name);
-#if 0
- uwrq = 0x01;
- mgt_set(priv, DOT11_OID_EXUNENCRYPTED, &uwrq);
- down_write(&priv->mib_sem);
- mgt_commit(priv);
- up_write(&priv->mib_sem);
-#endif
- /* Not necessary, as set_wpa does it, should we just do it here though? */
- ret = 0;
- break;
- default:
- printk(KERN_DEBUG "%s: Caught a WPA supplicant request that is not supported\n",
- ndev->name);
- ret = -EOPNOTSUPP;
- break;
- }
-
- if (ret == 0 && copy_to_user(p->pointer, param, p->length))
- ret = -EFAULT;
-
- kfree(param);
-
- return ret;
-}
static int
prism54_set_wpa(struct net_device *ndev, struct iw_request_info *info,
@@ -3223,20 +2907,3 @@ const struct iw_handler_def prism54_handler_def = {
.private_args = (struct iw_priv_args *) prism54_private_args,
.get_wireless_stats = prism54_get_wireless_stats,
};
-
-/* For wpa_supplicant */
-
-int
-prism54_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct iwreq *wrq = (struct iwreq *) rq;
- int ret = -1;
- switch (cmd) {
- case PRISM54_HOSTAPD:
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
- ret = prism54_hostapd(ndev, &wrq->u.data);
- return ret;
- }
- return -EOPNOTSUPP;
-}
diff --git a/drivers/net/wireless/prism54/isl_ioctl.h b/drivers/net/wireless/prism54/isl_ioctl.h
index bcfbfb9..a34bceb 100644
--- a/drivers/net/wireless/prism54/isl_ioctl.h
+++ b/drivers/net/wireless/prism54/isl_ioctl.h
@@ -43,8 +43,6 @@ void prism54_wpa_bss_ie_clean(islpci_private *priv);
int prism54_set_mac_address(struct net_device *, void *);
-int prism54_ioctl(struct net_device *, struct ifreq *, int);
-
extern const struct iw_handler_def prism54_handler_def;
#endif /* _ISL_IOCTL_H */
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 5d0f615..5970ff6 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -793,8 +793,8 @@ islpci_set_multicast_list(struct net_device *dev)
static void islpci_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
- strcpy(info->driver, DRV_NAME);
- strcpy(info->version, DRV_VERSION);
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static const struct ethtool_ops islpci_ethtool_ops = {
@@ -804,7 +804,6 @@ static const struct ethtool_ops islpci_ethtool_ops = {
static const struct net_device_ops islpci_netdev_ops = {
.ndo_open = islpci_open,
.ndo_stop = islpci_close,
- .ndo_do_ioctl = prism54_ioctl,
.ndo_start_xmit = islpci_eth_transmit,
.ndo_tx_timeout = islpci_eth_tx_timeout,
.ndo_set_mac_address = prism54_set_mac_address,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 0021e49..04fec1f 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -2426,7 +2426,7 @@ static void rx_authenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
del_timer(&local->timer);
@@ -2513,7 +2513,7 @@ static void rx_deauthenticate(ray_dev_t *local, struct rcs __iomem *prcs,
unsigned int pkt_addr, int rx_len)
{
/* UCHAR buff[256];
- struct rx_msg *msg = (struct rx_msg *)buff;
+ struct ray_rx_msg *msg = (struct ray_rx_msg *) buff;
*/
pr_debug("Deauthentication frame received\n");
local->authentication_state = UNAUTHENTICATED;
diff --git a/drivers/net/wireless/rayctl.h b/drivers/net/wireless/rayctl.h
index d7646f2..3c3b98b 100644
--- a/drivers/net/wireless/rayctl.h
+++ b/drivers/net/wireless/rayctl.h
@@ -566,9 +566,9 @@ struct phy_header {
UCHAR hdr_3;
UCHAR hdr_4;
};
-struct rx_msg {
+struct ray_rx_msg {
struct mac_header mac;
- UCHAR var[1];
+ UCHAR var[0];
};
struct tx_msg {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 0c13840..a330c69 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -244,6 +244,10 @@ enum ndis_80211_power_mode {
NDIS_80211_POWER_MODE_FAST_PSP,
};
+enum ndis_80211_pmkid_cand_list_flag_bits {
+ NDIS_80211_PMKID_CAND_PREAUTH = cpu_to_le32(1 << 0)
+};
+
struct ndis_80211_auth_request {
__le32 length;
u8 bssid[6];
@@ -387,19 +391,17 @@ struct ndis_80211_capability {
struct ndis_80211_bssid_info {
u8 bssid[6];
u8 pmkid[16];
-};
+} __packed;
struct ndis_80211_pmkid {
__le32 length;
__le32 bssid_info_count;
struct ndis_80211_bssid_info bssid_info[0];
-};
+} __packed;
/*
* private data
*/
-#define NET_TYPE_11FB 0
-
#define CAP_MODE_80211A 1
#define CAP_MODE_80211B 2
#define CAP_MODE_80211G 4
@@ -414,6 +416,7 @@ struct ndis_80211_pmkid {
#define RNDIS_WLAN_ALG_TKIP (1<<1)
#define RNDIS_WLAN_ALG_CCMP (1<<2)
+#define RNDIS_WLAN_NUM_KEYS 4
#define RNDIS_WLAN_KEY_MGMT_NONE 0
#define RNDIS_WLAN_KEY_MGMT_802_1X (1<<0)
#define RNDIS_WLAN_KEY_MGMT_PSK (1<<1)
@@ -516,7 +519,7 @@ struct rndis_wlan_private {
/* encryption stuff */
int encr_tx_key_index;
- struct rndis_wlan_encr_key encr_keys[4];
+ struct rndis_wlan_encr_key encr_keys[RNDIS_WLAN_NUM_KEYS];
int wpa_version;
u8 command_buffer[COMMAND_BUFFER_SIZE];
@@ -1346,6 +1349,32 @@ static int set_channel(struct usbnet *usbdev, int channel)
return ret;
}
+static struct ieee80211_channel *get_current_channel(struct usbnet *usbdev,
+ u16 *beacon_interval)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ struct ieee80211_channel *channel;
+ struct ndis_80211_conf config;
+ int len, ret;
+
+ /* Get channel and beacon interval */
+ len = sizeof(config);
+ ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
+ __func__, ret);
+ if (ret < 0)
+ return NULL;
+
+ channel = ieee80211_get_channel(priv->wdev.wiphy,
+ KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
+ if (!channel)
+ return NULL;
+
+ if (beacon_interval)
+ *beacon_interval = le16_to_cpu(config.beacon_period);
+ return channel;
+}
+
/* index must be 0 - N, as per NDIS */
static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
int index)
@@ -1535,6 +1564,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
bool is_wpa;
int ret;
+ if (index >= RNDIS_WLAN_NUM_KEYS)
+ return -ENOENT;
+
if (priv->encr_keys[index].len == 0)
return 0;
@@ -1972,11 +2004,12 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
return ret;
}
-static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
- struct ndis_80211_bssid_ex *bssid)
+static bool rndis_bss_info_update(struct usbnet *usbdev,
+ struct ndis_80211_bssid_ex *bssid)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
s32 signal;
u64 timestamp;
u16 capability;
@@ -2015,9 +2048,12 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
capability = le16_to_cpu(fixed->capabilities);
beacon_interval = le16_to_cpu(fixed->beacon_interval);
- return cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid->mac,
timestamp, capability, beacon_interval, ie, ie_len, signal,
GFP_KERNEL);
+ cfg80211_put_bss(bss);
+
+ return (bss != NULL);
}
static struct ndis_80211_bssid_ex *next_bssid_list_item(
@@ -2451,6 +2487,9 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
+ if (key_index >= RNDIS_WLAN_NUM_KEYS)
+ return -ENOENT;
+
priv->encr_tx_key_index = key_index;
if (is_wpa_key(priv, key_index))
@@ -2639,12 +2678,12 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
struct ieee80211_channel *channel;
- struct ndis_80211_conf config;
struct ndis_80211_ssid ssid;
+ struct cfg80211_bss *bss;
s32 signal;
u64 timestamp;
u16 capability;
- u16 beacon_interval;
+ u16 beacon_interval = 0;
__le32 rssi;
u8 ie_buf[34];
int len, ret, ie_len;
@@ -2669,22 +2708,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
}
/* Get channel and beacon interval */
- len = sizeof(config);
- ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
- netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
- __func__, ret);
- if (ret >= 0) {
- beacon_interval = le16_to_cpu(config.beacon_period);
- channel = ieee80211_get_channel(priv->wdev.wiphy,
- KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
- if (!channel) {
- netdev_warn(usbdev->net, "%s(): could not get channel."
- "\n", __func__);
- return;
- }
- } else {
- netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
- __func__);
+ channel = get_current_channel(usbdev, &beacon_interval);
+ if (!channel) {
+ netdev_warn(usbdev->net, "%s(): could not get channel.\n",
+ __func__);
return;
}
@@ -2714,9 +2741,10 @@ static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
bssid, (u32)timestamp, capability, beacon_interval, ie_len,
ssid.essid, signal);
- cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
+ bss = cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
timestamp, capability, beacon_interval, ie_buf, ie_len,
signal, GFP_KERNEL);
+ cfg80211_put_bss(bss);
}
/*
@@ -2828,8 +2856,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
req_ie_len, resp_ie,
resp_ie_len, 0, GFP_KERNEL);
else
- cfg80211_roamed(usbdev->net, NULL, bssid,
- req_ie, req_ie_len,
+ cfg80211_roamed(usbdev->net,
+ get_current_channel(usbdev, NULL),
+ bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
@@ -2995,25 +3024,13 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
for (i = 0; i < le32_to_cpu(cand_list->num_candidates); i++) {
struct ndis_80211_pmkid_candidate *cand =
&cand_list->candidate_list[i];
+ bool preauth = !!(cand->flags & NDIS_80211_PMKID_CAND_PREAUTH);
- netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
- i, le32_to_cpu(cand->flags), cand->bssid);
-
-#if 0
- struct iw_pmkid_cand pcand;
- union iwreq_data wrqu;
-
- memset(&pcand, 0, sizeof(pcand));
- if (le32_to_cpu(cand->flags) & 0x01)
- pcand.flags |= IW_PMKID_CAND_PREAUTH;
- pcand.index = i;
- memcpy(pcand.bssid.sa_data, cand->bssid, ETH_ALEN);
+ netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, preauth: %d, bssid: %pM\n",
+ i, le32_to_cpu(cand->flags), preauth, cand->bssid);
- memset(&wrqu, 0, sizeof(wrqu));
- wrqu.data.length = sizeof(pcand);
- wireless_send_event(usbdev->net, IWEVPMKIDCAND, &wrqu,
- (u8 *)&pcand);
-#endif
+ cfg80211_pmksa_candidate_notify(usbdev->net, i, cand->bssid,
+ preauth, GFP_ATOMIC);
}
}
@@ -3754,17 +3771,7 @@ static struct usb_driver rndis_wlan_driver = {
.resume = usbnet_resume,
};
-static int __init rndis_wlan_init(void)
-{
- return usb_register(&rndis_wlan_driver);
-}
-module_init(rndis_wlan_init);
-
-static void __exit rndis_wlan_exit(void)
-{
- usb_deregister(&rndis_wlan_driver);
-}
-module_exit(rndis_wlan_exit);
+module_usb_driver(rndis_wlan_driver);
MODULE_AUTHOR("Bjorge Dijkstra");
MODULE_AUTHOR("Jussi Kivilinna");
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 53c5f87..1de9c75 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -1982,15 +1982,4 @@ static struct usb_driver rt2500usb_driver = {
.resume = rt2x00usb_resume,
};
-static int __init rt2500usb_init(void)
-{
- return usb_register(&rt2500usb_driver);
-}
-
-static void __exit rt2500usb_exit(void)
-{
- usb_deregister(&rt2500usb_driver);
-}
-
-module_init(rt2500usb_init);
-module_exit(rt2500usb_exit);
+module_usb_driver(rt2500usb_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 4778620..2571a2f 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -50,7 +50,7 @@
* RF2853 2.4G/5G 3T3R
* RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
* RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
- * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
+ * RF3053 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
* RF5370 2.4G 1T1R
* RF5390 2.4G 1T1R
*/
@@ -66,7 +66,7 @@
#define RF2853 0x000a
#define RF3320 0x000b
#define RF3322 0x000c
-#define RF3853 0x000d
+#define RF3053 0x000d
#define RF5370 0x5370
#define RF5390 0x5390
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 1ba079d..7bef66d 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -514,9 +514,9 @@ EXPORT_SYMBOL_GPL(rt2800_write_tx_data);
static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
{
- int rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
- int rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
- int rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2);
+ s8 rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0);
+ s8 rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1);
+ s8 rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2);
u16 eeprom;
u8 offset0;
u8 offset1;
@@ -552,7 +552,7 @@ static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2)
* which gives less energy...
*/
rssi0 = max(rssi0, rssi1);
- return max(rssi0, rssi2);
+ return (int)max(rssi0, rssi2);
}
void rt2800_process_rxwi(struct queue_entry *entry,
@@ -1203,8 +1203,10 @@ void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
!(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
!(filter_flags & FIF_PSPOLL));
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
- rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA,
+ !(filter_flags & FIF_CONTROL));
+ rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR,
+ !(filter_flags & FIF_CONTROL));
rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
!(filter_flags & FIF_CONTROL));
rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
@@ -1942,19 +1944,24 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
info->default_power2 = TXPOWER_A_TO_DEV(info->default_power2);
}
- if (rt2x00_rf(rt2x00dev, RF2020) ||
- rt2x00_rf(rt2x00dev, RF3020) ||
- rt2x00_rf(rt2x00dev, RF3021) ||
- rt2x00_rf(rt2x00dev, RF3022) ||
- rt2x00_rf(rt2x00dev, RF3320))
+ switch (rt2x00dev->chip.rf) {
+ case RF2020:
+ case RF3020:
+ case RF3021:
+ case RF3022:
+ case RF3320:
rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
- else if (rt2x00_rf(rt2x00dev, RF3052))
+ break;
+ case RF3052:
rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info);
- else if (rt2x00_rf(rt2x00dev, RF5370) ||
- rt2x00_rf(rt2x00dev, RF5390))
+ break;
+ case RF5370:
+ case RF5390:
rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info);
- else
+ break;
+ default:
rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
+ }
/*
* Change BBP settings
@@ -3930,15 +3937,18 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
- if (!rt2x00_rt(rt2x00dev, RT2860) &&
- !rt2x00_rt(rt2x00dev, RT2872) &&
- !rt2x00_rt(rt2x00dev, RT2883) &&
- !rt2x00_rt(rt2x00dev, RT3070) &&
- !rt2x00_rt(rt2x00dev, RT3071) &&
- !rt2x00_rt(rt2x00dev, RT3090) &&
- !rt2x00_rt(rt2x00dev, RT3390) &&
- !rt2x00_rt(rt2x00dev, RT3572) &&
- !rt2x00_rt(rt2x00dev, RT5390)) {
+ switch (rt2x00dev->chip.rt) {
+ case RT2860:
+ case RT2872:
+ case RT2883:
+ case RT3070:
+ case RT3071:
+ case RT3090:
+ case RT3390:
+ case RT3572:
+ case RT5390:
+ break;
+ default:
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
@@ -4552,6 +4562,9 @@ int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
survey->channel_time_ext_busy = busy_ext / 1000;
}
+ if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL))
+ survey->filled |= SURVEY_INFO_IN_USE;
+
return 0;
}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index da48c8a..dc88bae 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -50,7 +50,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
enum dev_state state)
{
- int mask = (state == STATE_RADIO_IRQ_ON);
u32 reg;
unsigned long flags;
@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
}
spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
- rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
- rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
- rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
+ reg = 0;
+ if (state == STATE_RADIO_IRQ_ON) {
+ rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+ rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+ }
rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 3778763..262ee9e 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,7 +45,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -400,10 +400,10 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
/*
* The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
* TXWI + 802.11 header + L2 pad + payload + pad,
- * so need to decrease size of TXINFO and USB end pad.
+ * so need to decrease size of TXINFO.
*/
rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
- entry->skb->len - TXINFO_DESC_SIZE - 4);
+ roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE);
rt2x00_set_field32(&word, TXINFO_W0_WIV,
!test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -421,37 +421,20 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
}
-static void rt2800usb_write_tx_data(struct queue_entry *entry,
- struct txentry_desc *txdesc)
+/*
+ * TX data initialization
+ */
+static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
{
- unsigned int len;
- int err;
-
- rt2800_write_tx_data(entry, txdesc);
-
/*
- * pad(1~3 bytes) is added after each 802.11 payload.
- * USB end pad(4 bytes) is added at each USB bulk out packet end.
+ * pad(1~3 bytes) is needed after each 802.11 payload.
+ * USB end pad(4 bytes) is needed at each USB bulk out packet end.
* TX frame format is :
* | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
* |<------------- tx_pkt_len ------------->|
*/
- len = roundup(entry->skb->len, 4) + 4;
- err = skb_padto(entry->skb, len);
- if (unlikely(err)) {
- WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n");
- return;
- }
-
- entry->skb->len = len;
-}
-/*
- * TX data initialization
- */
-static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
-{
- return entry->skb->len;
+ return roundup(entry->skb->len, 4) + 4;
}
/*
@@ -807,7 +790,7 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
.flush_queue = rt2x00usb_flush_queue,
.tx_dma_done = rt2800usb_tx_dma_done,
.write_tx_desc = rt2800usb_write_tx_desc,
- .write_tx_data = rt2800usb_write_tx_data,
+ .write_tx_data = rt2800_write_tx_data,
.write_beacon = rt2800_write_beacon,
.clear_beacon = rt2800_clear_beacon,
.get_tx_data_len = rt2800usb_get_tx_data_len,
@@ -914,12 +897,14 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x050d, 0x8053) },
{ USB_DEVICE(0x050d, 0x805c) },
{ USB_DEVICE(0x050d, 0x815c) },
+ { USB_DEVICE(0x050d, 0x825a) },
{ USB_DEVICE(0x050d, 0x825b) },
{ USB_DEVICE(0x050d, 0x935a) },
{ USB_DEVICE(0x050d, 0x935b) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00e8) },
{ USB_DEVICE(0x0411, 0x0158) },
+ { USB_DEVICE(0x0411, 0x015d) },
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
/* Corega */
@@ -934,6 +919,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07d1, 0x3c0e) },
{ USB_DEVICE(0x07d1, 0x3c0f) },
{ USB_DEVICE(0x07d1, 0x3c11) },
+ { USB_DEVICE(0x07d1, 0x3c13) },
+ { USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c16) },
/* Draytek */
{ USB_DEVICE(0x07fa, 0x7712) },
@@ -943,6 +930,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x7392, 0x7711) },
{ USB_DEVICE(0x7392, 0x7717) },
{ USB_DEVICE(0x7392, 0x7718) },
+ { USB_DEVICE(0x7392, 0x7722) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480) },
{ USB_DEVICE(0x203d, 0x14a9) },
@@ -976,6 +964,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x13b1, 0x0031) },
{ USB_DEVICE(0x1737, 0x0070) },
{ USB_DEVICE(0x1737, 0x0071) },
+ { USB_DEVICE(0x1737, 0x0077) },
+ { USB_DEVICE(0x1737, 0x0078) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0162) },
{ USB_DEVICE(0x0789, 0x0163) },
@@ -999,9 +989,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0db0, 0x871b) },
{ USB_DEVICE(0x0db0, 0x871c) },
{ USB_DEVICE(0x0db0, 0x899a) },
+ /* Ovislink */
+ { USB_DEVICE(0x1b75, 0x3071) },
+ { USB_DEVICE(0x1b75, 0x3072) },
/* Para */
{ USB_DEVICE(0x20b8, 0x8888) },
/* Pegatron */
+ { USB_DEVICE(0x1d4d, 0x0002) },
{ USB_DEVICE(0x1d4d, 0x000c) },
{ USB_DEVICE(0x1d4d, 0x000e) },
{ USB_DEVICE(0x1d4d, 0x0011) },
@@ -1054,7 +1048,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Sparklan */
{ USB_DEVICE(0x15a9, 0x0006) },
/* Sweex */
+ { USB_DEVICE(0x177f, 0x0153) },
{ USB_DEVICE(0x177f, 0x0302) },
+ { USB_DEVICE(0x177f, 0x0313) },
/* U-Media */
{ USB_DEVICE(0x157e, 0x300e) },
{ USB_DEVICE(0x157e, 0x3013) },
@@ -1138,27 +1134,24 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x13d3, 0x3322) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x1003) },
- { USB_DEVICE(0x050d, 0x825a) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x012e) },
{ USB_DEVICE(0x0411, 0x0148) },
{ USB_DEVICE(0x0411, 0x0150) },
- { USB_DEVICE(0x0411, 0x015d) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x0041) },
{ USB_DEVICE(0x07aa, 0x0042) },
{ USB_DEVICE(0x18c5, 0x0008) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0b) },
- { USB_DEVICE(0x07d1, 0x3c13) },
- { USB_DEVICE(0x07d1, 0x3c15) },
{ USB_DEVICE(0x07d1, 0x3c17) },
{ USB_DEVICE(0x2001, 0x3c17) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x4085) },
- { USB_DEVICE(0x7392, 0x7722) },
/* Encore */
{ USB_DEVICE(0x203d, 0x14a1) },
+ /* Fujitsu Stylistic 550 */
+ { USB_DEVICE(0x1690, 0x0761) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010) },
/* Gigabyte */
@@ -1170,20 +1163,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605) },
{ USB_DEVICE(0x1740, 0x0615) },
- /* Linksys */
- { USB_DEVICE(0x1737, 0x0077) },
- { USB_DEVICE(0x1737, 0x0078) },
/* Logitec */
{ USB_DEVICE(0x0789, 0x0168) },
{ USB_DEVICE(0x0789, 0x0169) },
/* Motorola */
{ USB_DEVICE(0x100d, 0x9032) },
- /* Ovislink */
- { USB_DEVICE(0x1b75, 0x3071) },
- { USB_DEVICE(0x1b75, 0x3072) },
/* Pegatron */
{ USB_DEVICE(0x05a6, 0x0101) },
- { USB_DEVICE(0x1d4d, 0x0002) },
{ USB_DEVICE(0x1d4d, 0x0010) },
/* Planex */
{ USB_DEVICE(0x2019, 0x5201) },
@@ -1202,9 +1188,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x083a, 0xc522) },
{ USB_DEVICE(0x083a, 0xd522) },
{ USB_DEVICE(0x083a, 0xf511) },
- /* Sweex */
- { USB_DEVICE(0x177f, 0x0153) },
- { USB_DEVICE(0x177f, 0x0313) },
/* Zyxel */
{ USB_DEVICE(0x0586, 0x341a) },
#endif
@@ -1234,15 +1217,4 @@ static struct usb_driver rt2800usb_driver = {
.resume = rt2x00usb_resume,
};
-static int __init rt2800usb_init(void)
-{
- return usb_register(&rt2800usb_driver);
-}
-
-static void __exit rt2800usb_exit(void)
-{
- usb_deregister(&rt2800usb_driver);
-}
-
-module_init(rt2800usb_init);
-module_exit(rt2800usb_exit);
+module_usb_driver(rt2800usb_driver);
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 99ff12d..b03b22c 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -189,9 +189,9 @@ struct rt2x00_chip {
#define RT3090 0x3090 /* 2.4GHz PCIe */
#define RT3390 0x3390
#define RT3572 0x3572
-#define RT3593 0x3593 /* PCIe */
+#define RT3593 0x3593
#define RT3883 0x3883 /* WSOC */
-#define RT5390 0x5390 /* 2.4GHz */
+#define RT5390 0x5390 /* 2.4GHz */
u16 rf;
u16 rev;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index edd317f..fd356b7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -426,10 +426,14 @@ void rt2x00lib_txdone(struct queue_entry *entry,
/*
* If the data queue was below the threshold before the txdone
* handler we must make sure the packet queue in the mac80211 stack
- * is reenabled when the txdone handler has finished.
+ * is reenabled when the txdone handler has finished. This has to be
+ * serialized with rt2x00mac_tx(), otherwise we can wake up queue
+ * before it was stopped.
*/
+ spin_lock_bh(&entry->queue->tx_lock);
if (!rt2x00queue_threshold(entry->queue))
rt2x00queue_unpause_queue(entry->queue);
+ spin_unlock_bh(&entry->queue->tx_lock);
}
EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
@@ -831,11 +835,11 @@ static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev,
if (spec->supported_rates & SUPPORT_RATE_OFDM)
num_rates += 8;
- channels = kzalloc(sizeof(*channels) * spec->num_channels, GFP_KERNEL);
+ channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL);
if (!channels)
return -ENOMEM;
- rates = kzalloc(sizeof(*rates) * num_rates, GFP_KERNEL);
+ rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL);
if (!rates)
goto exit_free_channels;
@@ -1220,7 +1224,8 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
cancel_work_sync(&rt2x00dev->rxdone_work);
cancel_work_sync(&rt2x00dev->txdone_work);
}
- destroy_workqueue(rt2x00dev->workqueue);
+ if (rt2x00dev->workqueue)
+ destroy_workqueue(rt2x00dev->workqueue);
/*
* Free the tx status fifo.
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index bf0acff..2df2eb6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -152,15 +152,24 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
if (unlikely(rt2x00queue_write_tx_frame(queue, skb, false)))
goto exit_fail;
+ /*
+ * Pausing queue has to be serialized with rt2x00lib_txdone(). Note
+ * we should not use spin_lock_bh variant as bottom halve was already
+ * disabled before ieee80211_xmit() call.
+ */
+ spin_lock(&queue->tx_lock);
if (rt2x00queue_threshold(queue))
rt2x00queue_pause_queue(queue);
+ spin_unlock(&queue->tx_lock);
return;
exit_fail:
+ spin_lock(&queue->tx_lock);
rt2x00queue_pause_queue(queue);
+ spin_unlock(&queue->tx_lock);
exit_free_skb:
- dev_kfree_skb_any(skb);
+ ieee80211_free_txskb(hw, skb);
}
EXPORT_SYMBOL_GPL(rt2x00mac_tx);
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 5adfb3e..9b1b2b7 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -619,6 +619,9 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
else if (test_bit(REQUIRE_DMA, &queue->rt2x00dev->cap_flags))
rt2x00queue_align_frame(skb);
+ /*
+ * That function must be called with bh disabled.
+ */
spin_lock(&queue->tx_lock);
if (unlikely(rt2x00queue_full(queue))) {
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index 1e31050..2eea386 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -298,12 +298,22 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void* data)
return false;
/*
- * USB devices cannot blindly pass the skb->len as the
- * length of the data to usb_fill_bulk_urb. Pass the skb
- * to the driver to determine what the length should be.
+ * USB devices require certain padding at the end of each frame
+ * and urb. Those paddings are not included in skbs. Pass entry
+ * to the driver to determine what the overall length should be.
*/
length = rt2x00dev->ops->lib->get_tx_data_len(entry);
+ status = skb_padto(entry->skb, length);
+ if (unlikely(status)) {
+ /* TODO: report something more appropriate than IO_FAILED. */
+ WARNING(rt2x00dev, "TX SKB padding error, out of memory\n");
+ set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
+ rt2x00lib_dmadone(entry);
+
+ return false;
+ }
+
usb_fill_bulk_urb(entry_priv->urb, usb_dev,
usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
entry->skb->data, length,
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index bf55b4a..e0c6d11 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -41,7 +41,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt = 0;
+static bool modparam_nohwcrypt = false;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index cfb19db..e477a96 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
/*
* Allow hardware encryption to be disabled.
*/
-static int modparam_nohwcrypt;
+static bool modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -2528,15 +2528,4 @@ static struct usb_driver rt73usb_driver = {
.resume = rt2x00usb_resume,
};
-static int __init rt73usb_init(void)
-{
- return usb_register(&rt73usb_driver);
-}
-
-static void __exit rt73usb_exit(void)
-{
- usb_deregister(&rt73usb_driver);
-}
-
-module_init(rt73usb_init);
-module_exit(rt73usb_exit);
+module_usb_driver(rt73usb_driver);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 4a78f9e..638fbef 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -1592,15 +1592,4 @@ static struct usb_driver rtl8187_driver = {
.disconnect = __devexit_p(rtl8187_disconnect),
};
-static int __init rtl8187_init(void)
-{
- return usb_register(&rtl8187_driver);
-}
-
-static void __exit rtl8187_exit(void)
-{
- usb_deregister(&rtl8187_driver);
-}
-
-module_init(rtl8187_init);
-module_exit(rtl8187_exit);
+module_usb_driver(rtl8187_driver);
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index b4ce934..8d6eb0f 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -345,9 +345,9 @@ static void _rtl_init_mac80211(struct ieee80211_hw *hw)
if (is_valid_ether_addr(rtlefuse->dev_addr)) {
SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
} else {
- u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
- get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
- SET_IEEE80211_PERM_ADDR(hw, rtlmac);
+ u8 rtlmac1[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
+ get_random_bytes((rtlmac1 + (ETH_ALEN - 1)), 1);
+ SET_IEEE80211_PERM_ADDR(hw, rtlmac1);
}
}
@@ -396,7 +396,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw)
u8 valid = 0;
/*set init state to on */
- rtlpriv->rfkill.rfkill_state = 1;
+ rtlpriv->rfkill.rfkill_state = true;
wiphy_rfkill_set_hw_state(hw->wiphy, 0);
radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
@@ -448,12 +448,12 @@ int rtl_init_core(struct ieee80211_hw *hw)
/* <4> locks */
mutex_init(&rtlpriv->locks.conf_mutex);
+ mutex_init(&rtlpriv->locks.ps_mutex);
spin_lock_init(&rtlpriv->locks.ips_lock);
spin_lock_init(&rtlpriv->locks.irq_th_lock);
spin_lock_init(&rtlpriv->locks.h2c_lock);
spin_lock_init(&rtlpriv->locks.rf_ps_lock);
spin_lock_init(&rtlpriv->locks.rf_lock);
- spin_lock_init(&rtlpriv->locks.lps_lock);
spin_lock_init(&rtlpriv->locks.waitq_lock);
spin_lock_init(&rtlpriv->locks.cck_and_rw_pagea_lock);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
index 4ae9059..f66b575 100644
--- a/drivers/net/wireless/rtlwifi/base.h
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -76,7 +76,7 @@ enum ap_peer {
SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
#define SET_80211_PS_POLL_AID(_hdr, _val) \
- (*(u16 *)((u8 *)(_hdr) + 2) = le16_to_cpu(_val))
+ (*(u16 *)((u8 *)(_hdr) + 2) = _val)
#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
memcpy(((u8 *)(_hdr)) + 4, (u8 *)(_val), ETH_ALEN)
#define SET_80211_PS_POLL_TA(_hdr, _val) \
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index eb61061..9245d88 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -78,7 +78,7 @@ static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
u8 init_aspm;
ppsc->reg_rfps_level = 0;
- ppsc->support_aspm = 0;
+ ppsc->support_aspm = false;
/*Update PCI ASPM setting */
ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
@@ -570,9 +570,9 @@ static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
if (ieee80211_is_nullfunc(fc)) {
if (ieee80211_has_pm(fc)) {
rtlpriv->mac80211.offchan_delay = true;
- rtlpriv->psc.state_inap = 1;
+ rtlpriv->psc.state_inap = true;
} else {
- rtlpriv->psc.state_inap = 0;
+ rtlpriv->psc.state_inap = false;
}
}
@@ -610,7 +610,7 @@ tx_status_ok:
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
}
@@ -736,7 +736,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
if (((rtlpriv->link_info.num_rx_inperiod +
rtlpriv->link_info.num_tx_inperiod) > 8) ||
(rtlpriv->link_info.num_rx_inperiod > 2)) {
- tasklet_schedule(&rtlpriv->works.ips_leave_tasklet);
+ schedule_work(&rtlpriv->works.lps_leave_work);
}
dev_kfree_skb_any(skb);
@@ -780,6 +780,7 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
unsigned long flags;
u32 inta = 0;
u32 intb = 0;
+ irqreturn_t ret = IRQ_HANDLED;
spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
@@ -787,8 +788,10 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
/*Shared IRQ or HW disappared */
- if (!inta || inta == 0xffff)
+ if (!inta || inta == 0xffff) {
+ ret = IRQ_NONE;
goto done;
+ }
/*<1> beacon related */
if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
@@ -890,12 +893,9 @@ static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
if (rtlpriv->rtlhal.earlymode_enable)
tasklet_schedule(&rtlpriv->works.irq_tasklet);
- spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
-
done:
spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
- return IRQ_HANDLED;
+ return ret;
}
static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
@@ -903,11 +903,6 @@ static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
_rtl_pci_tx_chk_waitq(hw);
}
-static void _rtl_pci_ips_leave_tasklet(struct ieee80211_hw *hw)
-{
- rtl_lps_leave(hw);
-}
-
static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -945,6 +940,15 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
return;
}
+static void rtl_lps_leave_work_callback(struct work_struct *work)
+{
+ struct rtl_works *rtlworks =
+ container_of(work, struct rtl_works, lps_leave_work);
+ struct ieee80211_hw *hw = rtlworks->hw;
+
+ rtl_lps_leave(hw);
+}
+
static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
{
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -1006,9 +1010,7 @@ static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
(void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
(unsigned long)hw);
- tasklet_init(&rtlpriv->works.ips_leave_tasklet,
- (void (*)(unsigned long))_rtl_pci_ips_leave_tasklet,
- (unsigned long)hw);
+ INIT_WORK(&rtlpriv->works.lps_leave_work, rtl_lps_leave_work_callback);
}
static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
@@ -1478,7 +1480,7 @@ static void rtl_pci_deinit(struct ieee80211_hw *hw)
synchronize_irq(rtlpci->pdev->irq);
tasklet_kill(&rtlpriv->works.irq_tasklet);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
flush_workqueue(rtlpriv->works.rtl_wq);
destroy_workqueue(rtlpriv->works.rtl_wq);
@@ -1499,7 +1501,7 @@ static int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
return err;
}
- return 1;
+ return 0;
}
static int rtl_pci_start(struct ieee80211_hw *hw)
@@ -1553,7 +1555,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
set_hal_stop(rtlhal);
rtlpriv->cfg->ops->disable_interrupt(hw);
- tasklet_kill(&rtlpriv->works.ips_leave_tasklet);
+ cancel_work_sync(&rtlpriv->works.lps_leave_work);
spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
while (ppsc->rfchange_inprogress) {
@@ -1868,7 +1870,7 @@ int __devinit rtl_pci_probe(struct pci_dev *pdev,
}
/* Init PCI sw */
- err = !rtl_pci_init(hw, pdev);
+ err = rtl_pci_init(hw, pdev);
if (err) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("Failed to init PCI.\n"));
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index 55c8e50..130fdd9 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -237,11 +237,12 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate rtstate;
+ unsigned long flags;
if (mac->opmode != NL80211_IFTYPE_STATION)
return;
- spin_lock(&rtlpriv->locks.ips_lock);
+ spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
if (ppsc->inactiveps) {
rtstate = ppsc->rfpwr_state;
@@ -257,7 +258,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
}
}
- spin_unlock(&rtlpriv->locks.ips_lock);
+ spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
}
/*for FW LPS*/
@@ -395,7 +396,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock_irq(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
@@ -407,7 +408,7 @@ void rtl_lps_enter(struct ieee80211_hw *hw)
}
}
- spin_unlock_irq(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/*Leave the leisure power save mode.*/
@@ -416,9 +417,8 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
- unsigned long flags;
- spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
@@ -439,7 +439,7 @@ void rtl_lps_leave(struct ieee80211_hw *hw)
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
/* For sw LPS*/
@@ -540,9 +540,9 @@ void rtl_swlps_rf_awake(struct ieee80211_hw *hw)
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock_irq(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock_irq(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
}
void rtl_swlps_rfon_wq_callback(void *data)
@@ -575,9 +575,9 @@ void rtl_swlps_rf_sleep(struct ieee80211_hw *hw)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock_irq(&rtlpriv->locks.lps_lock);
+ mutex_lock(&rtlpriv->locks.ps_mutex);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock_irq(&rtlpriv->locks.lps_lock);
+ mutex_unlock(&rtlpriv->locks.ps_mutex);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index 950c65a..931d979 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -73,6 +73,34 @@ static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
}
}
+static void rtl_block_fw_writeN(struct ieee80211_hw *hw, const u8 *buffer,
+ u32 size)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ u32 blockSize = REALTEK_USB_VENQT_MAX_BUF_SIZE - 20;
+ u8 *bufferPtr = (u8 *) buffer;
+ u32 i, offset, blockCount, remainSize;
+
+ blockCount = size / blockSize;
+ remainSize = size % blockSize;
+
+ for (i = 0; i < blockCount; i++) {
+ offset = i * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ blockSize);
+ }
+
+ if (remainSize) {
+ offset = blockCount * blockSize;
+ rtlpriv->io.writeN_sync(rtlpriv,
+ (FW_8192C_START_ADDRESS + offset),
+ (void *)(bufferPtr + offset),
+ remainSize);
+ }
+}
+
static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
const u8 *buffer, u32 size)
{
@@ -81,23 +109,30 @@ static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
u8 *bufferPtr = (u8 *) buffer;
u32 *pu4BytePtr = (u32 *) buffer;
u32 i, offset, blockCount, remainSize;
+ u32 data;
+ if (rtlpriv->io.writeN_sync) {
+ rtl_block_fw_writeN(hw, buffer, size);
+ return;
+ }
blockCount = size / blockSize;
remainSize = size % blockSize;
+ if (remainSize) {
+ /* the last word is < 4 bytes - pad it with zeros */
+ for (i = 0; i < 4 - remainSize; i++)
+ *(bufferPtr + size + i) = 0;
+ blockCount++;
+ }
for (i = 0; i < blockCount; i++) {
offset = i * blockSize;
+ /* for big-endian platforms, the firmware data need to be byte
+ * swapped as it was read as a byte string and will be written
+ * as 32-bit dwords and byte swapped when written
+ */
+ data = le32_to_cpu(*(__le32 *)(pu4BytePtr + i));
rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
- *(pu4BytePtr + i));
- }
-
- if (remainSize) {
- offset = blockCount * blockSize;
- bufferPtr += offset;
- for (i = 0; i < remainSize; i++) {
- rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
- offset + i), *(bufferPtr + i));
- }
+ data);
}
}
@@ -227,10 +262,10 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
u32 fwsize;
enum version_8192c version = rtlhal->version;
- pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
if (!rtlhal->pfirmware)
return 1;
+ pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
pfwdata = (u8 *) rtlhal->pfirmware;
fwsize = rtlhal->fwsize;
@@ -238,8 +273,9 @@ int rtl92c_download_fw(struct ieee80211_hw *hw)
if (IS_FW_HEADER_EXIST(pfwheader)) {
RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
("Firmware Version(%d), Signature(%#x),Size(%d)\n",
- pfwheader->version, pfwheader->signature,
- (uint)sizeof(struct rtl92c_firmware_header)));
+ le16_to_cpu(pfwheader->version),
+ le16_to_cpu(pfwheader->signature),
+ (uint)sizeof(struct rtl92c_firmware_header)));
pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index 3d5823c..cec5a3a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -32,32 +32,32 @@
#define FW_8192C_SIZE 0x3000
#define FW_8192C_START_ADDRESS 0x1000
-#define FW_8192C_END_ADDRESS 0x3FFF
+#define FW_8192C_END_ADDRESS 0x1FFF
#define FW_8192C_PAGE_SIZE 4096
#define FW_8192C_POLLING_DELAY 5
#define FW_8192C_POLLING_TIMEOUT_COUNT 100
#define IS_FW_HEADER_EXIST(_pfwhdr) \
- ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
- (_pfwhdr->signature&0xFFF0) == 0x88C0)
+ ((le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x92C0 ||\
+ (le16_to_cpu(_pfwhdr->signature)&0xFFF0) == 0x88C0)
struct rtl92c_firmware_header {
- u16 signature;
+ __le16 signature;
u8 category;
u8 function;
- u16 version;
+ __le16 version;
u8 subversion;
u8 rsvd1;
u8 month;
u8 date;
u8 hour;
u8 minute;
- u16 ramcodeSize;
- u16 rsvd2;
- u32 svnindex;
- u32 rsvd3;
- u32 rsvd4;
- u32 rsvd5;
+ __le16 ramcodeSize;
+ __le16 rsvd2;
+ __le32 svnindex;
+ __le32 rsvd3;
+ __le32 rsvd4;
+ __le32 rsvd5;
};
enum rtl8192c_h2c_cmd {
@@ -94,5 +94,6 @@ void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
+void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index f2aa33d..89ef698 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -98,9 +98,9 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
rtl8192ce_bt_reg_init(hw);
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 814c05d..124cf63 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -498,7 +498,7 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
}
RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
hwinfo, HWSET_MAX_SIZE);
- eeprom_id = *((u16 *)&hwinfo[0]);
+ eeprom_id = le16_to_cpu(*((__le16 *)&hwinfo[0]));
if (eeprom_id != RTL8190_EEPROM_ID) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
@@ -516,13 +516,14 @@ static void _rtl92cu_read_adapter_info(struct ieee80211_hw *hw)
pr_info("MAC address: %pM\n", rtlefuse->dev_addr);
_rtl92cu_read_txpower_info_from_hwpg(hw,
rtlefuse->autoload_failflag, hwinfo);
- rtlefuse->eeprom_vid = *(u16 *)&hwinfo[EEPROM_VID];
- rtlefuse->eeprom_did = *(u16 *)&hwinfo[EEPROM_DID];
+ rtlefuse->eeprom_vid = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VID]);
+ rtlefuse->eeprom_did = le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_DID]);
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
(" VID = 0x%02x PID = 0x%02x\n",
rtlefuse->eeprom_vid, rtlefuse->eeprom_did));
rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
- rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
+ rtlefuse->eeprom_version =
+ le16_to_cpu(*(__le16 *)&hwinfo[EEPROM_VERSION]);
rtlefuse->txpwr_fromeprom = true;
rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
@@ -2435,7 +2436,7 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
"%x\n", ppsc->hwradiooff, e_rfpowerstate_toset));
}
if (actuallyset) {
- ppsc->hwradiooff = 1;
+ ppsc->hwradiooff = true;
if (e_rfpowerstate_toset == ERFON) {
if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM))
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
index 060a06f..9e0c8fc 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/mac.c
@@ -84,6 +84,7 @@ void rtl92c_read_chip_version(struct ieee80211_hw *hw)
}
}
rtlhal->version = (enum version_8192c)chip_version;
+ pr_info("rtl8192cu: Chip version 0x%x\n", chip_version);
switch (rtlhal->version) {
case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index c244f2f..6d2ca77 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -57,9 +57,9 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw)
const struct firmware *firmware;
int err;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dbg.global_debuglevel = rtlpriv->cfg->mod_params->debug;
rtlpriv->rtlhal.pfirmware = vmalloc(0x4000);
@@ -275,6 +275,8 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8191, rtl92cu_hal_cfg)},
/****** 8188CU ********/
+ /* RTL8188CTV */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x018a, rtl92cu_hal_cfg)},
/* 8188CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8170, rtl92cu_hal_cfg)},
/* 8188cu 1*1 dongle */
@@ -291,14 +293,14 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817e, rtl92cu_hal_cfg)},
/* 8188RU in Alfa AWUS036NHR */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817f, rtl92cu_hal_cfg)},
+ /* RTL8188CUS-VL */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x818a, rtl92cu_hal_cfg)},
/* 8188 Combo for BC4 */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8754, rtl92cu_hal_cfg)},
/****** 8192CU ********/
- /* 8191cu 1*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8177, rtl92cu_hal_cfg)},
/* 8192cu 2*2 */
- {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817b, rtl92cu_hal_cfg)},
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x8178, rtl92cu_hal_cfg)},
/* 8192CE-VAU USB minCard */
{RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x817c, rtl92cu_hal_cfg)},
@@ -309,13 +311,17 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
{RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
{RTL_USB_DEVICE(0x0846, 0x9041, rtl92cu_hal_cfg)}, /*NetGear WNA1000M*/
- {RTL_USB_DEVICE(0x0Df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0052, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x005c, rtl92cu_hal_cfg)}, /*Sitecom - Edimax*/
{RTL_USB_DEVICE(0x0eb0, 0x9071, rtl92cu_hal_cfg)}, /*NO Brand - Etop*/
/* HP - Lite-On ,8188CUS Slim Combo */
{RTL_USB_DEVICE(0x103c, 0x1629, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x13d3, 0x3357, rtl92cu_hal_cfg)}, /* AzureWave */
{RTL_USB_DEVICE(0x2001, 0x3308, rtl92cu_hal_cfg)}, /*D-Link - Alpha*/
+ {RTL_USB_DEVICE(0x2019, 0x4902, rtl92cu_hal_cfg)}, /*Planex - Etop*/
{RTL_USB_DEVICE(0x2019, 0xab2a, rtl92cu_hal_cfg)}, /*Planex - Abocom*/
+ /*SW-WF02-AD15 -Abocom*/
+ {RTL_USB_DEVICE(0x2019, 0xab2e, rtl92cu_hal_cfg)},
{RTL_USB_DEVICE(0x2019, 0xed17, rtl92cu_hal_cfg)}, /*PCI - Edimax*/
{RTL_USB_DEVICE(0x20f4, 0x648b, rtl92cu_hal_cfg)}, /*TRENDnet - Cameo*/
{RTL_USB_DEVICE(0x7392, 0x7811, rtl92cu_hal_cfg)}, /*Edimax - Edimax*/
@@ -326,14 +332,36 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
{RTL_USB_DEVICE(0x4855, 0x0091, rtl92cu_hal_cfg)}, /* NetweeN-Feixun */
{RTL_USB_DEVICE(0x9846, 0x9041, rtl92cu_hal_cfg)}, /* Netgear Cameo */
+ /****** 8188 RU ********/
+ /* Netcore */
+ {RTL_USB_DEVICE(USB_VENDER_ID_REALTEK, 0x317f, rtl92cu_hal_cfg)},
+
+ /****** 8188CUS Slim Solo********/
+ {RTL_USB_DEVICE(0x04f2, 0xaff7, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaff9, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffa, rtl92cu_hal_cfg)}, /*Xavi*/
+
+ /****** 8188CUS Slim Combo ********/
+ {RTL_USB_DEVICE(0x04f2, 0xaff8, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffb, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x04f2, 0xaffc, rtl92cu_hal_cfg)}, /*Xavi*/
+ {RTL_USB_DEVICE(0x2019, 0x1201, rtl92cu_hal_cfg)}, /*Planex-Vencer*/
+
/****** 8192CU ********/
+ {RTL_USB_DEVICE(0x050d, 0x2102, rtl92cu_hal_cfg)}, /*Belcom-Sercomm*/
+ {RTL_USB_DEVICE(0x050d, 0x2103, rtl92cu_hal_cfg)}, /*Belcom-Edimax*/
{RTL_USB_DEVICE(0x0586, 0x341f, rtl92cu_hal_cfg)}, /*Zyxel -Abocom*/
{RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
{RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
+ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
+ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
+ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
+ {RTL_USB_DEVICE(0x0e66, 0x0019, rtl92cu_hal_cfg)}, /*Hawking-Edimax*/
{RTL_USB_DEVICE(0x2001, 0x3307, rtl92cu_hal_cfg)}, /*D-Link-Cameo*/
{RTL_USB_DEVICE(0x2001, 0x3309, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2001, 0x330a, rtl92cu_hal_cfg)}, /*D-Link-Alpha*/
{RTL_USB_DEVICE(0x2019, 0xab2b, rtl92cu_hal_cfg)}, /*Planex -Abocom*/
+ {RTL_USB_DEVICE(0x20f4, 0x624d, rtl92cu_hal_cfg)}, /*TRENDNet*/
{RTL_USB_DEVICE(0x7392, 0x7822, rtl92cu_hal_cfg)}, /*Edimax -Edimax*/
{}
};
@@ -356,15 +384,4 @@ static struct usb_driver rtl8192cu_driver = {
#endif
};
-static int __init rtl8192cu_init(void)
-{
- return usb_register(&rtl8192cu_driver);
-}
-
-static void __exit rtl8192cu_exit(void)
-{
- usb_deregister(&rtl8192cu_driver);
-}
-
-module_init(rtl8192cu_init);
-module_exit(rtl8192cu_exit);
+module_usb_driver(rtl8192cu_driver);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
index bc33b14..b3cc7b9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/trx.c
@@ -491,7 +491,7 @@ static void _rtl_tx_desc_checksum(u8 *txdesc)
SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, 0);
for (index = 0; index < 16; index++)
checksum = checksum ^ (*(ptr + index));
- SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, checksum);
+ SET_TX_DESC_TX_DESC_CHECKSUM(txdesc, cpu_to_le16(checksum));
}
void rtl92cu_tx_fill_desc(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
index f5bd3a3..9d89d7c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/hw.c
@@ -466,8 +466,8 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
bool int_migration = *(bool *) (val);
if (int_migration) {
- /* Set interrrupt migration timer and
- * corresponging Tx/Rx counter.
+ /* Set interrupt migration timer and
+ * corresponding Tx/Rx counter.
* timer 25ns*0xfa0=100us for 0xf packets.
* 0x306:Rx, 0x307:Tx */
rtl_write_dword(rtlpriv, REG_INT_MIG, 0xfe000fa0);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 149493f..7911c9c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -99,9 +99,9 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
- rtlpriv->dm.useramask = 1;
+ rtlpriv->dm.useramask = true;
/* dual mac */
if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
index 6f91a14..3fda6b1 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/fw.c
@@ -196,6 +196,8 @@ static bool _rtl92s_firmware_downloadcode(struct ieee80211_hw *hw,
/* Allocate skb buffer to contain firmware */
/* info and tx descriptor info. */
skb = dev_alloc_skb(frag_length);
+ if (!skb)
+ return false;
skb_reserve(skb, extra_descoffset);
seg_ptr = (u8 *)skb_put(skb, (u32)(frag_length -
extra_descoffset));
@@ -573,6 +575,8 @@ static bool _rtl92s_firmware_set_h2c_cmd(struct ieee80211_hw *hw, u8 h2c_cmd,
len = _rtl92s_get_h2c_cmdlen(MAX_TRANSMIT_BUFFER_SIZE, 1, &cmd_len);
skb = dev_alloc_skb(len);
+ if (!skb)
+ return false;
cb_desc = (struct rtl_tcb_desc *)(skb->cb);
cb_desc->queue_index = TXCMD_QUEUE;
cb_desc->cmd_or_init = DESC_PACKET_TYPE_NORMAL;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 92f49d5..78723cf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -98,9 +98,9 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
int err = 0;
u16 earlyrxthreshold = 7;
- rtlpriv->dm.dm_initialgain_enable = 1;
+ rtlpriv->dm.dm_initialgain_enable = true;
rtlpriv->dm.dm_flag = 0;
- rtlpriv->dm.disable_framebursting = 0;
+ rtlpriv->dm.disable_framebursting = false;
rtlpriv->dm.thermalvalue = 0;
rtlpriv->dm.useramask = true;
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 54cb8a6..e956fa7 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -34,13 +34,14 @@
#include "usb.h"
#include "base.h"
#include "ps.h"
+#include "rtl8192c/fw_common.h"
#define REALTEK_USB_VENQT_READ 0xC0
#define REALTEK_USB_VENQT_WRITE 0x40
#define REALTEK_USB_VENQT_CMD_REQ 0x05
#define REALTEK_USB_VENQT_CMD_IDX 0x00
-#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
+#define MAX_USBCTRL_VENDORREQ_TIMES 10
static void usbctrl_async_callback(struct urb *urb)
{
@@ -82,6 +83,7 @@ static int _usbctrl_vendorreq_async_write(struct usb_device *udev, u8 request,
dr->wValue = cpu_to_le16(value);
dr->wIndex = cpu_to_le16(index);
dr->wLength = cpu_to_le16(len);
+ /* data are already in little-endian order */
memcpy(buf, pdata, len);
usb_fill_control_urb(urb, udev, pipe,
(unsigned char *)dr, buf, len,
@@ -100,16 +102,28 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
unsigned int pipe;
int status;
u8 reqtype;
+ int vendorreq_times = 0;
+ static int count;
pipe = usb_rcvctrlpipe(udev, 0); /* read_in */
reqtype = REALTEK_USB_VENQT_READ;
- status = usb_control_msg(udev, pipe, request, reqtype, value, index,
- pdata, len, 0); /* max. timeout */
+ do {
+ status = usb_control_msg(udev, pipe, request, reqtype, value,
+ index, pdata, len, 0); /*max. timeout*/
+ if (status < 0) {
+ /* firmware download is checksumed, don't retry */
+ if ((value >= FW_8192C_START_ADDRESS &&
+ value <= FW_8192C_END_ADDRESS))
+ break;
+ } else {
+ break;
+ }
+ } while (++vendorreq_times < MAX_USBCTRL_VENDORREQ_TIMES);
- if (status < 0)
+ if (status < 0 && count++ < 4)
pr_err("reg 0x%x, usbctrl_vendorreq TimeOut! status:0x%x value=0x%x\n",
- value, status, *(u32 *)pdata);
+ value, status, le32_to_cpu(*(u32 *)pdata));
return status;
}
@@ -129,7 +143,7 @@ static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len)
wvalue = (u16)addr;
_usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
- ret = *data;
+ ret = le32_to_cpu(*data);
kfree(data);
return ret;
}
@@ -161,12 +175,12 @@ static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
u8 request;
u16 wvalue;
u16 index;
- u32 data;
+ __le32 data;
request = REALTEK_USB_VENQT_CMD_REQ;
index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
wvalue = (u16)(addr&0x0000ffff);
- data = val;
+ data = cpu_to_le32(val);
_usbctrl_vendorreq_async_write(udev, request, wvalue, index, &data,
len);
}
@@ -192,6 +206,30 @@ static void _usb_write32_async(struct rtl_priv *rtlpriv, u32 addr, u32 val)
_usb_write_async(to_usb_device(dev), addr, val, 4);
}
+static void _usb_writeN_sync(struct rtl_priv *rtlpriv, u32 addr, void *data,
+ u16 len)
+{
+ struct device *dev = rtlpriv->io.dev;
+ struct usb_device *udev = to_usb_device(dev);
+ u8 request = REALTEK_USB_VENQT_CMD_REQ;
+ u8 reqtype = REALTEK_USB_VENQT_WRITE;
+ u16 wvalue;
+ u16 index = REALTEK_USB_VENQT_CMD_IDX;
+ int pipe = usb_sndctrlpipe(udev, 0); /* write_out */
+ u8 *buffer;
+ dma_addr_t dma_addr;
+
+ wvalue = (u16)(addr&0x0000ffff);
+ buffer = usb_alloc_coherent(udev, (size_t)len, GFP_ATOMIC, &dma_addr);
+ if (!buffer)
+ return;
+ memcpy(buffer, data, len);
+ usb_control_msg(udev, pipe, request, reqtype, wvalue,
+ index, buffer, len, 50);
+
+ usb_free_coherent(udev, (size_t)len, buffer, dma_addr);
+}
+
static void _rtl_usb_io_handler_init(struct device *dev,
struct ieee80211_hw *hw)
{
@@ -205,6 +243,7 @@ static void _rtl_usb_io_handler_init(struct device *dev,
rtlpriv->io.read8_sync = _usb_read8_sync;
rtlpriv->io.read16_sync = _usb_read16_sync;
rtlpriv->io.read32_sync = _usb_read32_sync;
+ rtlpriv->io.writeN_sync = _usb_writeN_sync;
}
static void _rtl_usb_io_handler_release(struct ieee80211_hw *hw)
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index 713c7dd..cdaf142 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -63,6 +63,7 @@
#define AC_MAX 4
#define QOS_QUEUE_NUM 4
#define RTL_MAC80211_NUM_QUEUE 5
+#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
@@ -943,8 +944,10 @@ struct rtl_io {
unsigned long pci_base_addr; /*device I/O address */
void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
- void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, __le16 val);
- void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, __le32 val);
+ void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
+ void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
+ void (*writeN_sync) (struct rtl_priv *rtlpriv, u32 addr, void *buf,
+ u16 len);
u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
@@ -1485,7 +1488,7 @@ struct rtl_intf_ops {
struct rtl_mod_params {
/* default: 0 = using hardware encryption */
- int sw_crypto;
+ bool sw_crypto;
/* default: 0 = DBG_EMERG (0)*/
int debug;
@@ -1541,6 +1544,7 @@ struct rtl_hal_cfg {
struct rtl_locks {
/* mutex */
struct mutex conf_mutex;
+ struct mutex ps_mutex;
/*spin lock */
spinlock_t ips_lock;
@@ -1548,7 +1552,6 @@ struct rtl_locks {
spinlock_t h2c_lock;
spinlock_t rf_ps_lock;
spinlock_t rf_lock;
- spinlock_t lps_lock;
spinlock_t waitq_lock;
/*Dual mac*/
@@ -1573,7 +1576,8 @@ struct rtl_works {
/* For SW LPS */
struct delayed_work ps_work;
struct delayed_work ps_rfon_wq;
- struct tasklet_struct ips_leave_tasklet;
+
+ struct work_struct lps_leave_work;
};
struct rtl_debug {
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index eaa5f95..6248c35 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -319,7 +319,6 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
static struct spi_driver wl1251_spi_driver = {
.driver = {
.name = DRIVER_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 3fe388b..af08c86 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -42,16 +42,6 @@ config WL12XX_SDIO
If you choose to build a module, it'll be called wl12xx_sdio.
Say N if unsure.
-config WL12XX_SDIO_TEST
- tristate "TI wl12xx SDIO testing support"
- depends on WL12XX && MMC && WL12XX_SDIO
- default n
- ---help---
- This module adds support for the SDIO bus testing with the
- TI wl12xx chipsets. You probably don't want this unless you are
- testing a new hardware platform. Select this if you want to test the
- SDIO bus which is connected to the wl12xx chip.
-
config WL12XX_PLATFORM_DATA
bool
depends on WL12XX_SDIO != n || WL1251_SDIO != n
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 621b348..fe67262 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -3,14 +3,11 @@ wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
wl12xx_spi-objs = spi.o
wl12xx_sdio-objs = sdio.o
-wl12xx_sdio_test-objs = sdio_test.o
wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
obj-$(CONFIG_WL12XX) += wl12xx.o
obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
-obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
-
# small builtin driver bit
obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/acx.c b/drivers/net/wireless/wl12xx/acx.c
index ca044a7..7537c40 100644
--- a/drivers/net/wireless/wl12xx/acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -29,11 +29,12 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "ps.h"
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_wake_up_condition *wake_up;
int ret;
@@ -46,7 +47,7 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
goto out;
}
- wake_up->role_id = wl->role_id;
+ wake_up->role_id = wlvif->role_id;
wake_up->wake_up_event = wl->conf.conn.wake_up_event;
wake_up->listen_interval = wl->conf.conn.listen_interval;
@@ -84,7 +85,8 @@ out:
return ret;
}
-int wl1271_acx_tx_power(struct wl1271 *wl, int power)
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power)
{
struct acx_current_tx_power *acx;
int ret;
@@ -100,7 +102,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
@@ -114,7 +116,7 @@ out:
return ret;
}
-int wl1271_acx_feature_cfg(struct wl1271 *wl)
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_feature_config *feature;
int ret;
@@ -128,7 +130,7 @@ int wl1271_acx_feature_cfg(struct wl1271 *wl)
}
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
- feature->role_id = wl->role_id;
+ feature->role_id = wlvif->role_id;
feature->data_flow_options = 0;
feature->options = 0;
@@ -184,33 +186,8 @@ out:
return ret;
}
-int wl1271_acx_pd_threshold(struct wl1271 *wl)
-{
- struct acx_packet_detection *pd;
- int ret;
-
- wl1271_debug(DEBUG_ACX, "acx data pd threshold");
-
- pd = kzalloc(sizeof(*pd), GFP_KERNEL);
- if (!pd) {
- ret = -ENOMEM;
- goto out;
- }
-
- pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
-
- ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
- if (ret < 0) {
- wl1271_warning("failed to set pd threshold: %d", ret);
- goto out;
- }
-
-out:
- kfree(pd);
- return ret;
-}
-
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time)
{
struct acx_slot *slot;
int ret;
@@ -223,7 +200,7 @@ int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time)
goto out;
}
- slot->role_id = wl->role_id;
+ slot->role_id = wlvif->role_id;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
@@ -238,8 +215,8 @@ out:
return ret;
}
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len)
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
@@ -253,7 +230,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
}
/* MAC filtering */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
acx->num_groups = mc_list_len;
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
@@ -270,7 +247,8 @@ out:
return ret;
}
-int wl1271_acx_service_period_timeout(struct wl1271 *wl)
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_rx_timeout *rx_timeout;
int ret;
@@ -283,7 +261,7 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
wl1271_debug(DEBUG_ACX, "acx service period timeout");
- rx_timeout->role_id = wl->role_id;
+ rx_timeout->role_id = wlvif->role_id;
rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
@@ -300,7 +278,8 @@ out:
return ret;
}
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold)
{
struct acx_rts_threshold *rts;
int ret;
@@ -320,7 +299,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold)
goto out;
}
- rts->role_id = wl->role_id;
+ rts->role_id = wlvif->role_id;
rts->threshold = cpu_to_le16((u16)rts_threshold);
ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
@@ -363,7 +342,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
@@ -380,7 +360,7 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
goto out;
}
- beacon_filter->role_id = wl->role_id;
+ beacon_filter->role_id = wlvif->role_id;
beacon_filter->enable = enable_filter;
/*
@@ -401,7 +381,8 @@ out:
return ret;
}
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct acx_beacon_filter_ie_table *ie_table;
int i, idx = 0;
@@ -417,7 +398,7 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
}
/* configure default beacon pass-through rules */
- ie_table->role_id = wl->role_id;
+ ie_table->role_id = wlvif->role_id;
ie_table->num_ie = 0;
for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
@@ -458,7 +439,8 @@ out:
#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct acx_conn_monit_params *acx;
u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
@@ -479,7 +461,7 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
timeout = wl->conf.conn.bss_lose_timeout;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->synch_fail_thold = cpu_to_le32(threshold);
acx->bss_lose_timeout = cpu_to_le32(timeout);
@@ -582,7 +564,7 @@ out:
return ret;
}
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_beacon_broadcast *bb;
int ret;
@@ -595,7 +577,7 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
goto out;
}
- bb->role_id = wl->role_id;
+ bb->role_id = wlvif->role_id;
bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
@@ -612,7 +594,7 @@ out:
return ret;
}
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
{
struct acx_aid *acx_aid;
int ret;
@@ -625,7 +607,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
goto out;
}
- acx_aid->role_id = wl->role_id;
+ acx_aid->role_id = wlvif->role_id;
acx_aid->aid = cpu_to_le16(aid);
ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
@@ -668,7 +650,8 @@ out:
return ret;
}
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
int ret;
@@ -681,7 +664,7 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->preamble = preamble;
ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
@@ -695,7 +678,7 @@ out:
return ret;
}
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect)
{
struct acx_ctsprotect *acx;
@@ -709,7 +692,7 @@ int wl1271_acx_cts_protect(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ctsprotect = ctsprotect;
ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
@@ -739,7 +722,7 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
@@ -755,11 +738,11 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
- wl->basic_rate, wl->rate_set);
+ wlvif->basic_rate, wlvif->rate_set);
/* configure one basic rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->basic_rate);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -771,8 +754,8 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
}
/* configure one AP supported rate class */
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_AP_FULL_RATE);
- acx->rate_policy.enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
+ acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
@@ -788,7 +771,7 @@ int wl1271_acx_sta_rate_policies(struct wl1271 *wl)
* (p2p packets should always go out with OFDM rates, even
* if we are currently connected to 11b AP)
*/
- acx->rate_policy_idx = cpu_to_le32(ACX_TX_BASIC_RATE_P2P);
+ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
acx->rate_policy.enabled_rates =
cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
@@ -839,8 +822,8 @@ out:
return ret;
}
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
{
struct acx_ac_cfg *acx;
int ret = 0;
@@ -855,7 +838,7 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ac = ac;
acx->cw_min = cw_min;
acx->cw_max = cpu_to_le16(cw_max);
@@ -873,7 +856,8 @@ out:
return ret;
}
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1)
{
@@ -889,7 +873,7 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->queue_id = queue_id;
acx->channel_type = channel_type;
acx->tsid = tsid;
@@ -1098,7 +1082,8 @@ out:
return ret;
}
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
int ret = 0;
@@ -1114,7 +1099,7 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
@@ -1129,7 +1114,8 @@ out:
return ret;
}
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address)
{
struct wl1271_acx_arp_filter *acx;
int ret;
@@ -1142,7 +1128,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
@@ -1189,7 +1175,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_keep_alive_mode *acx = NULL;
int ret = 0;
@@ -1202,7 +1189,7 @@ int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->enabled = enable;
ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
@@ -1216,7 +1203,8 @@ out:
return ret;
}
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid)
{
struct wl1271_acx_keep_alive_config *acx = NULL;
int ret = 0;
@@ -1229,7 +1217,7 @@ int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
acx->index = index;
acx->tpl_validation = tpl_valid;
@@ -1247,8 +1235,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst)
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst)
{
struct wl1271_acx_rssi_snr_trigger *acx = NULL;
int ret = 0;
@@ -1261,9 +1249,9 @@ int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
goto out;
}
- wl->last_rssi_event = -1;
+ wlvif->last_rssi_event = -1;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
@@ -1288,7 +1276,8 @@ out:
return ret;
}
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
@@ -1302,7 +1291,7 @@ int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->rssi_beacon = c->avg_weight_rssi_beacon;
acx->rssi_data = c->avg_weight_rssi_data;
acx->snr_beacon = c->avg_weight_snr_beacon;
@@ -1367,6 +1356,7 @@ out:
}
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode)
{
struct wl1271_acx_ht_information *acx;
@@ -1380,7 +1370,7 @@ int wl1271_acx_set_ht_information(struct wl1271 *wl,
goto out;
}
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
@@ -1402,7 +1392,8 @@ out:
}
/* Configure BA session initiator/receiver parameters setting in the FW. */
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ba_initiator_policy *acx;
int ret;
@@ -1416,7 +1407,7 @@ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl)
}
/* set for the current role */
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
acx->win_size = wl->conf.ht.tx_ba_win_size;
acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
@@ -1494,7 +1485,8 @@ out:
return ret;
}
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
struct wl1271_acx_ps_rx_streaming *rx_streaming;
u32 conf_queues, enable_queues;
@@ -1523,7 +1515,7 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable)
if (!(conf_queues & BIT(i)))
continue;
- rx_streaming->role_id = wl->role_id;
+ rx_streaming->role_id = wlvif->role_id;
rx_streaming->tid = i;
rx_streaming->enable = enable_queues & BIT(i);
rx_streaming->period = wl->conf.rx_streaming.interval;
@@ -1542,7 +1534,7 @@ out:
return ret;
}
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ap_max_tx_retry *acx = NULL;
int ret;
@@ -1553,7 +1545,7 @@ int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl)
if (!acx)
return -ENOMEM;
- acx->role_id = wl->role_id;
+ acx->role_id = wlvif->role_id;
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
@@ -1567,7 +1559,7 @@ out:
return ret;
}
-int wl1271_acx_config_ps(struct wl1271 *wl)
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_config_ps *config_ps;
int ret;
@@ -1582,7 +1574,7 @@ int wl1271_acx_config_ps(struct wl1271 *wl)
config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
- config_ps->null_data_rate = cpu_to_le32(wl->basic_rate);
+ config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
sizeof(*config_ps));
diff --git a/drivers/net/wireless/wl12xx/acx.h b/drivers/net/wireless/wl12xx/acx.h
index e3f93b4..69892b4 100644
--- a/drivers/net/wireless/wl12xx/acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -171,13 +171,6 @@ struct acx_rx_msdu_lifetime {
__le32 lifetime;
} __packed;
-struct acx_packet_detection {
- struct acx_header header;
-
- __le32 threshold;
-} __packed;
-
-
enum acx_slot_type {
SLOT_TIME_LONG = 0,
SLOT_TIME_SHORT = 1,
@@ -654,11 +647,6 @@ struct acx_rate_class {
u8 reserved;
};
-#define ACX_TX_BASIC_RATE 0
-#define ACX_TX_AP_FULL_RATE 1
-#define ACX_TX_BASIC_RATE_P2P 2
-#define ACX_TX_AP_MODE_MGMT_RATE 4
-#define ACX_TX_AP_MODE_BCST_RATE 5
struct acx_rate_policy {
struct acx_header header;
@@ -1234,39 +1222,48 @@ enum {
};
-int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
+int wl1271_acx_wake_up_conditions(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
-int wl1271_acx_tx_power(struct wl1271 *wl, int power);
-int wl1271_acx_feature_cfg(struct wl1271 *wl);
+int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ int power);
+int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_mem_map(struct wl1271 *wl,
struct acx_header *mem_map, size_t len);
int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
-int wl1271_acx_pd_threshold(struct wl1271 *wl);
-int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
-int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
- void *mc_list, u32 mc_list_len);
-int wl1271_acx_service_period_timeout(struct wl1271 *wl);
-int wl1271_acx_rts_threshold(struct wl1271 *wl, u32 rts_threshold);
+int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_slot_type slot_time);
+int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, void *mc_list, u32 mc_list_len);
+int wl1271_acx_service_period_timeout(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u32 rts_threshold);
int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
-int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
-int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
-int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
+int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable_filter);
+int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
+int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
int wl12xx_acx_sg_cfg(struct wl1271 *wl);
int wl1271_acx_cca_threshold(struct wl1271 *wl);
-int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
-int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
+int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid);
int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
-int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
-int wl1271_acx_cts_protect(struct wl1271 *wl,
+int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum acx_preamble_type preamble);
+int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_sta_rate_policies(struct wl1271 *wl);
+int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx);
-int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
- u8 aifsn, u16 txop);
-int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold);
@@ -1276,26 +1273,34 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl);
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
int wl1271_acx_smart_reflex(struct wl1271 *wl);
-int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
-int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
+int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 enable, __be32 address);
int wl1271_acx_pm_config(struct wl1271 *wl);
-int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
-int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
-int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
- s16 thold, u8 hyst);
-int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
+int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *vif,
+ bool enable);
+int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 index, u8 tpl_valid);
+int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable, s16 thold, u8 hyst);
+int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation, u8 hlid);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
u16 ht_operation_mode);
-int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl);
+int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid);
int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
-int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, bool enable);
-int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl);
-int wl1271_acx_config_ps(struct wl1271 *wl);
+int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable);
+int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/boot.c b/drivers/net/wireless/wl12xx/boot.c
index 6813379..8f9cf5a 100644
--- a/drivers/net/wireless/wl12xx/boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -25,6 +25,7 @@
#include <linux/wl12xx.h>
#include <linux/export.h>
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "boot.h"
@@ -347,6 +348,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 3;
for (i = 0; i < burst_len; i++) {
+ if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
@@ -358,6 +362,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
nvs_ptr += 4;
dest_addr += 4;
}
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
}
/*
@@ -369,6 +376,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
*/
nvs_ptr = (u8 *)wl->nvs +
ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
+
+ if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
+ goto out_badnvs;
+
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
@@ -384,6 +395,10 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
kfree(nvs_aligned);
return 0;
+
+out_badnvs:
+ wl1271_error("nvs data is malformed");
+ return -EILSEQ;
}
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
diff --git a/drivers/net/wireless/wl12xx/cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index a52299e..25990bd 100644
--- a/drivers/net/wireless/wl12xx/cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,6 +29,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "acx.h"
@@ -120,6 +121,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from INI out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -143,6 +149,12 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -162,6 +174,11 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
if (!wl->nvs)
return -ENODEV;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from ini out of bounds");
+ return -EINVAL;
+ }
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
@@ -186,6 +203,12 @@ int wl128x_cmd_general_parms(struct wl1271 *wl)
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
+ if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
+ wl1271_warning("FEM index from FW out of bounds");
+ ret = -EINVAL;
+ goto out;
+ }
+
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer ? "auto" : "manual", gp->tx_bip_fem_manufacturer);
@@ -358,7 +381,8 @@ static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
return 0;
}
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id)
{
struct wl12xx_cmd_role_enable *cmd;
int ret;
@@ -381,7 +405,7 @@ int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id)
goto out_free;
}
- memcpy(cmd->mac_address, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->mac_address, addr, ETH_ALEN);
cmd->role_type = role_type;
ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
@@ -433,37 +457,41 @@ out:
return ret;
}
-static int wl12xx_allocate_link(struct wl1271 *wl, u8 *hlid)
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
if (link >= WL12XX_MAX_LINKS)
return -EBUSY;
__set_bit(link, wl->links_map);
+ __set_bit(link, wlvif->links_map);
*hlid = link;
return 0;
}
-static void wl12xx_free_link(struct wl1271 *wl, u8 *hlid)
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
if (*hlid == WL12XX_INVALID_LINK_ID)
return;
__clear_bit(*hlid, wl->links_map);
+ __clear_bit(*hlid, wlvif->links_map);
*hlid = WL12XX_INVALID_LINK_ID;
}
-static int wl12xx_get_new_session_id(struct wl1271 *wl)
+static int wl12xx_get_new_session_id(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->session_counter >= SESSION_COUNTER_MAX)
- wl->session_counter = 0;
+ if (wlvif->session_counter >= SESSION_COUNTER_MAX)
+ wlvif->session_counter = 0;
- wl->session_counter++;
+ wlvif->session_counter++;
- return wl->session_counter;
+ return wlvif->session_counter;
}
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -474,20 +502,20 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wl->dev_role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
- cmd->role_id = wl->dev_role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->dev_role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
- if (wl->dev_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->dev_hlid);
+ if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
if (ret)
goto out_free;
}
- cmd->device.hlid = wl->dev_hlid;
- cmd->device.session = wl->session_counter;
+ cmd->device.hlid = wlvif->dev_hlid;
+ cmd->device.session = wlvif->session_counter;
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
@@ -502,9 +530,7 @@ int wl12xx_cmd_role_start_dev(struct wl1271 *wl)
err_hlid:
/* clear links on error */
- __clear_bit(wl->dev_hlid, wl->links_map);
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
-
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -513,12 +539,13 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
+static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->dev_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -529,7 +556,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
wl1271_debug(DEBUG_CMD, "cmd role stop dev");
- cmd->role_id = wl->dev_role_id;
+ cmd->role_id = wlvif->dev_role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -545,7 +572,7 @@ int wl12xx_cmd_role_stop_dev(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->dev_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
@@ -554,8 +581,9 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
int ret;
@@ -565,33 +593,33 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->sta.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->sta.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->sta.ssid_len = wl->ssid_len;
- memcpy(cmd->sta.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->sta.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->sta.hlid = wl->sta_hlid;
- cmd->sta.session = wl12xx_get_new_session_id(wl);
- cmd->sta.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->sta.hlid = wlvif->sta.hlid;
+ cmd->sta.session = wl12xx_get_new_session_id(wl, wlvif);
+ cmd->sta.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -603,7 +631,7 @@ int wl12xx_cmd_role_start_sta(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -613,12 +641,12 @@ out:
}
/* use this function to stop ibss as well */
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
- if (WARN_ON(wl->sta_hlid == WL12XX_INVALID_LINK_ID))
+ if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -627,9 +655,9 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
@@ -639,7 +667,7 @@ int wl12xx_cmd_role_stop_sta(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -648,16 +676,17 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
- wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
/* trying to use hidden SSID with an old hostapd version */
- if (wl->ssid_len == 0 && !bss_conf->hidden_ssid) {
+ if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
wl1271_error("got a null SSID from beacon/bss");
ret = -EINVAL;
goto out;
@@ -669,30 +698,30 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out;
}
- ret = wl12xx_allocate_link(wl, &wl->ap_global_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
if (ret < 0)
goto out_free;
- ret = wl12xx_allocate_link(wl, &wl->ap_bcast_hlid);
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
if (ret < 0)
goto out_free_global;
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
- cmd->ap.global_hlid = wl->ap_global_hlid;
- cmd->ap.broadcast_hlid = wl->ap_bcast_hlid;
- cmd->ap.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ap.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->ap.global_hlid = wlvif->ap.global_hlid;
+ cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
+ cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
- cmd->channel = wl->channel;
+ cmd->channel = wlvif->channel;
if (!bss_conf->hidden_ssid) {
/* take the SSID from the beacon for backward compatibility */
cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
- cmd->ap.ssid_len = wl->ssid_len;
- memcpy(cmd->ap.ssid, wl->ssid, wl->ssid_len);
+ cmd->ap.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
} else {
cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
cmd->ap.ssid_len = bss_conf->ssid_len;
@@ -701,7 +730,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->ap.local_rates = cpu_to_le32(0xffffffff);
- switch (wl->band) {
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -709,7 +738,7 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_warning("ap start - unknown band: %d", (int)wl->band);
+ wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
cmd->band = RADIO_BAND_2_4GHZ;
break;
}
@@ -723,10 +752,10 @@ int wl12xx_cmd_role_start_ap(struct wl1271 *wl)
goto out_free;
out_free_bcast:
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
out_free_global:
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -735,7 +764,7 @@ out:
return ret;
}
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
@@ -746,9 +775,9 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -756,8 +785,8 @@ int wl12xx_cmd_role_stop_ap(struct wl1271 *wl)
goto out_free;
}
- wl12xx_free_link(wl, &wl->ap_bcast_hlid);
- wl12xx_free_link(wl, &wl->ap_global_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
@@ -766,10 +795,11 @@ out:
return ret;
}
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -778,35 +808,36 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
goto out;
}
- wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wl->role_id);
+ wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
- cmd->role_id = wl->role_id;
- if (wl->band == IEEE80211_BAND_5GHZ)
+ cmd->role_id = wlvif->role_id;
+ if (wlvif->band == IEEE80211_BAND_5GHZ)
cmd->band = WL12XX_BAND_5GHZ;
- cmd->channel = wl->channel;
- cmd->ibss.basic_rate_set = cpu_to_le32(wl->basic_rate_set);
- cmd->ibss.beacon_interval = cpu_to_le16(wl->beacon_int);
+ cmd->channel = wlvif->channel;
+ cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
+ cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ibss.dtim_interval = bss_conf->dtim_period;
cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
- cmd->ibss.ssid_len = wl->ssid_len;
- memcpy(cmd->ibss.ssid, wl->ssid, wl->ssid_len);
- memcpy(cmd->ibss.bssid, wl->bssid, ETH_ALEN);
- cmd->sta.local_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.ssid_len = wlvif->ssid_len;
+ memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
+ memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
+ cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID) {
- ret = wl12xx_allocate_link(wl, &wl->sta_hlid);
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
+ ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
- cmd->ibss.hlid = wl->sta_hlid;
- cmd->ibss.remote_rates = cpu_to_le32(wl->rate_set);
+ cmd->ibss.hlid = wlvif->sta.hlid;
+ cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
- wl->role_id, cmd->sta.hlid, cmd->sta.session,
- wl->basic_rate_set, wl->rate_set);
+ wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
+ wlvif->basic_rate_set, wlvif->rate_set);
- wl1271_debug(DEBUG_CMD, "wl->bssid = %pM", wl->bssid);
+ wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
+ vif->bss_conf.bssid);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
@@ -818,7 +849,7 @@ int wl12xx_cmd_role_start_ibss(struct wl1271 *wl)
err_hlid:
/* clear links on error. */
- wl12xx_free_link(wl, &wl->sta_hlid);
+ wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
@@ -962,7 +993,8 @@ out:
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -975,7 +1007,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
goto out;
}
- ps_params->role_id = wl->role_id;
+ ps_params->role_id = wlvif->role_id;
ps_params->ps_mode = ps_mode;
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
@@ -1030,7 +1062,7 @@ out:
return ret;
}
-int wl1271_cmd_build_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
int size;
@@ -1038,11 +1070,12 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
int ret = -ENOMEM;
- if (wl->bss_type == BSS_TYPE_IBSS) {
+ if (wlvif->bss_type == BSS_TYPE_IBSS) {
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw,
+ wl12xx_wlvif_to_vif(wlvif));
if (!skb)
goto out;
size = skb->len;
@@ -1050,7 +1083,7 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
}
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1061,19 +1094,21 @@ out:
}
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
skb->data, skb->len,
CMD_TEMPL_KLV_IDX_NULL_DATA,
- wl->basic_rate);
+ wlvif->basic_rate);
out:
dev_kfree_skb(skb);
@@ -1084,32 +1119,35 @@ out:
}
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret = 0;
- skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ skb = ieee80211_pspoll_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
- skb->len, 0, wl->basic_rate_set);
+ skb->len, 0, wlvif->basic_rate_set);
out:
dev_kfree_skb(skb);
return ret;
}
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret;
u32 rate;
- skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ skb = ieee80211_probereq_get(wl->hw, vif, ssid, ssid_len,
ie, ie_len);
if (!skb) {
ret = -ENOMEM;
@@ -1118,7 +1156,7 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl,
wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
@@ -1132,20 +1170,22 @@ out:
}
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
int ret;
u32 rate;
if (!skb)
- skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
+ skb = ieee80211_ap_probereq_get(wl->hw, vif);
if (!skb)
goto out;
wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[wl->band]);
- if (wl->band == IEEE80211_BAND_2GHZ)
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
else
@@ -1159,9 +1199,11 @@ out:
return skb;
}
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr)
{
int ret;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_arp_rsp_template tmpl;
struct ieee80211_hdr_3addr *hdr;
struct arphdr *arp_hdr;
@@ -1173,8 +1215,8 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_DATA |
IEEE80211_FCTL_TODS);
- memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
- memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
+ memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(hdr->addr2, vif->addr, ETH_ALEN);
memset(hdr->addr3, 0xff, ETH_ALEN);
/* llc layer */
@@ -1190,25 +1232,26 @@ int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
- memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
+ memcpy(tmpl.sender_hw, vif->addr, ETH_ALEN);
tmpl.sender_ip = ip_addr;
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
&tmpl, sizeof(tmpl), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
return ret;
}
-int wl1271_build_qos_null_data(struct wl1271 *wl)
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr template;
memset(&template, 0, sizeof(template));
- memcpy(template.addr1, wl->bssid, ETH_ALEN);
- memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
- memcpy(template.addr3, wl->bssid, ETH_ALEN);
+ memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
+ memcpy(template.addr2, vif->addr, ETH_ALEN);
+ memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
@@ -1219,7 +1262,7 @@ int wl1271_build_qos_null_data(struct wl1271 *wl)
return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
sizeof(template), 0,
- wl->basic_rate);
+ wlvif->basic_rate);
}
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
@@ -1253,7 +1296,8 @@ out:
return ret;
}
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
@@ -1261,7 +1305,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
int ret = 0;
/* hlid might have already been deleted */
- if (wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1270,7 +1314,7 @@ int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
goto out;
}
- cmd->hlid = wl->sta_hlid;
+ cmd->hlid = wlvif->sta.hlid;
if (key_type == KEY_WEP)
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
@@ -1321,9 +1365,10 @@ out:
* TODO: merge with sta/ibss into 1 set_key function.
* note there are slight diffs
*/
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
+ u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
@@ -1333,7 +1378,7 @@ int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (!cmd)
return -ENOMEM;
- if (hlid == wl->ap_bcast_hlid) {
+ if (hlid == wlvif->ap.bcast_hlid) {
if (key_type == KEY_WEP)
lid_type = WEP_DEFAULT_LID_TYPE;
else
@@ -1411,7 +1456,8 @@ out:
return ret;
}
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid)
{
struct wl12xx_cmd_add_peer *cmd;
int i, ret;
@@ -1438,13 +1484,13 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid)
else
cmd->psd_type[i] = WL1271_PSD_LEGACY;
- sta_rates = sta->supp_rates[wl->band];
+ sta_rates = sta->supp_rates[wlvif->band];
if (sta->ht_cap.ht_supported)
sta_rates |= sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET;
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
- wl->band));
+ wlvif->band));
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
@@ -1584,12 +1630,13 @@ out:
return ret;
}
-static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
+static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 role_id)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
- wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wl->channel, role_id);
+ wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", wlvif->channel, role_id);
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
return -EINVAL;
@@ -1601,8 +1648,8 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
}
cmd->role_id = role_id;
- cmd->channel = wl->channel;
- switch (wl->band) {
+ cmd->channel = wlvif->channel;
+ switch (wlvif->band) {
case IEEE80211_BAND_2GHZ:
cmd->band = RADIO_BAND_2_4GHZ;
break;
@@ -1610,7 +1657,7 @@ static int wl12xx_cmd_roc(struct wl1271 *wl, u8 role_id)
cmd->band = RADIO_BAND_5GHZ;
break;
default:
- wl1271_error("roc - unknown band: %d", (int)wl->band);
+ wl1271_error("roc - unknown band: %d", (int)wlvif->band);
ret = -EINVAL;
goto out_free;
}
@@ -1657,14 +1704,14 @@ out:
return ret;
}
-int wl12xx_roc(struct wl1271 *wl, u8 role_id)
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id)
{
int ret = 0;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
- ret = wl12xx_cmd_roc(wl, role_id);
+ ret = wl12xx_cmd_roc(wl, wlvif, role_id);
if (ret < 0)
goto out;
@@ -1753,3 +1800,53 @@ out_free:
out:
return ret;
}
+
+/* start dev role and roc on its channel */
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ ret = wl12xx_cmd_role_start_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+
+ ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out_stop;
+
+ return 0;
+
+out_stop:
+ wl12xx_cmd_role_stop_dev(wl, wlvif);
+out:
+ return ret;
+}
+
+/* croc dev hlid, and stop the role */
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
+
+ if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS)))
+ return -EINVAL;
+
+ /* flush all pending packets */
+ wl1271_tx_work_locked(wl);
+
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl, wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
+ if (ret < 0)
+ goto out;
+out:
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index b7bd427..3f7d0b9 100644
--- a/drivers/net/wireless/wl12xx/cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -36,45 +36,54 @@ int wl128x_cmd_general_parms(struct wl1271 *wl);
int wl1271_cmd_radio_parms(struct wl1271 *wl);
int wl128x_cmd_radio_parms(struct wl1271 *wl);
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl);
-int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 role_type, u8 *role_id);
+int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
+ u8 *role_id);
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id);
-int wl12xx_cmd_role_start_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_dev(struct wl1271 *wl);
-int wl12xx_cmd_role_start_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_sta(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_stop_ap(struct wl1271 *wl);
-int wl12xx_cmd_role_start_ibss(struct wl1271 *wl);
+int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 ps_mode);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
void *buf, size_t buf_len, int index, u32 rates);
-int wl1271_cmd_build_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
-int wl1271_cmd_build_probe_req(struct wl1271 *wl,
+int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 aid);
+int wl1271_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct sk_buff *skb);
-int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
-int wl1271_build_qos_null_data(struct wl1271 *wl);
-int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
+int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ __be32 ip_addr);
+int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif);
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid);
-int wl1271_cmd_set_sta_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16);
-int wl1271_cmd_set_ap_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16);
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, u8 hlid);
-int wl12xx_roc(struct wl1271 *wl, u8 role_id);
+int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id);
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
-int wl12xx_cmd_add_peer(struct wl1271 *wl, struct ieee80211_sta *sta, u8 hlid);
+int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta, u8 hlid);
int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
int wl12xx_cmd_config_fwlog(struct wl1271 *wl);
int wl12xx_cmd_start_fwlog(struct wl1271 *wl);
@@ -82,6 +91,9 @@ int wl12xx_cmd_stop_fwlog(struct wl1271 *wl);
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct ieee80211_channel_switch *ch_switch);
int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl);
+int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 *hlid);
+void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid);
enum wl1271_commands {
CMD_INTERROGATE = 1, /*use this to read information elements*/
diff --git a/drivers/net/wireless/wl12xx/conf.h b/drivers/net/wireless/wl12xx/conf.h
index 04bb8fb..1bcfb01 100644
--- a/drivers/net/wireless/wl12xx/conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -440,6 +440,10 @@ struct conf_rx_settings {
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
CONF_HW_BIT_RATE_54MBPS)
+#define CONF_TX_CCK_RATES (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS | CONF_HW_BIT_RATE_5_5MBPS | \
+ CONF_HW_BIT_RATE_11MBPS)
+
#define CONF_TX_OFDM_RATES (CONF_HW_BIT_RATE_6MBPS | \
CONF_HW_BIT_RATE_12MBPS | CONF_HW_BIT_RATE_24MBPS | \
CONF_HW_BIT_RATE_36MBPS | CONF_HW_BIT_RATE_48MBPS | \
diff --git a/drivers/net/wireless/wl12xx/debug.h b/drivers/net/wireless/wl12xx/debug.h
new file mode 100644
index 0000000..b85fd8c4
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debug.h
@@ -0,0 +1,101 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2011 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2009 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <coelho@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __DEBUG_H__
+#define __DEBUG_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+
+#define DRIVER_NAME "wl12xx"
+#define DRIVER_PREFIX DRIVER_NAME ": "
+
+enum {
+ DEBUG_NONE = 0,
+ DEBUG_IRQ = BIT(0),
+ DEBUG_SPI = BIT(1),
+ DEBUG_BOOT = BIT(2),
+ DEBUG_MAILBOX = BIT(3),
+ DEBUG_TESTMODE = BIT(4),
+ DEBUG_EVENT = BIT(5),
+ DEBUG_TX = BIT(6),
+ DEBUG_RX = BIT(7),
+ DEBUG_SCAN = BIT(8),
+ DEBUG_CRYPT = BIT(9),
+ DEBUG_PSM = BIT(10),
+ DEBUG_MAC80211 = BIT(11),
+ DEBUG_CMD = BIT(12),
+ DEBUG_ACX = BIT(13),
+ DEBUG_SDIO = BIT(14),
+ DEBUG_FILTERS = BIT(15),
+ DEBUG_ADHOC = BIT(16),
+ DEBUG_AP = BIT(17),
+ DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
+ DEBUG_ALL = ~0,
+};
+
+extern u32 wl12xx_debug_level;
+
+#define DEBUG_DUMP_LIMIT 1024
+
+#define wl1271_error(fmt, arg...) \
+ pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
+
+#define wl1271_warning(fmt, arg...) \
+ pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
+
+#define wl1271_notice(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_info(fmt, arg...) \
+ pr_info(DRIVER_PREFIX fmt "\n", ##arg)
+
+#define wl1271_debug(level, fmt, arg...) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
+ } while (0)
+
+/* TODO: use pr_debug_hex_dump when it becomes available */
+#define wl1271_dump(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ 0); \
+ } while (0)
+
+#define wl1271_dump_ascii(level, prefix, buf, len) \
+ do { \
+ if (level & wl12xx_debug_level) \
+ print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
+ DUMP_PREFIX_OFFSET, 16, 1, \
+ buf, \
+ min_t(size_t, len, DEBUG_DUMP_LIMIT), \
+ true); \
+ } while (0)
+
+#endif /* __DEBUG_H__ */
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
index 3999fd5..15eb3a9 100644
--- a/drivers/net/wireless/wl12xx/debugfs.c
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "ps.h"
#include "io.h"
@@ -316,12 +317,19 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
{
struct wl1271 *wl = file->private_data;
int res = 0;
- char buf[1024];
+ ssize_t ret;
+ char *buf;
+
+#define DRIVER_STATE_BUF_LEN 1024
+
+ buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
mutex_lock(&wl->mutex);
#define DRIVER_STATE_PRINT(x, fmt) \
- (res += scnprintf(buf + res, sizeof(buf) - res,\
+ (res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", wl->x))
#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
@@ -346,29 +354,14 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
- DRIVER_STATE_PRINT_INT(tx_security_last_seq_lsb);
DRIVER_STATE_PRINT_INT(rx_counter);
- DRIVER_STATE_PRINT_INT(session_counter);
DRIVER_STATE_PRINT_INT(state);
- DRIVER_STATE_PRINT_INT(bss_type);
DRIVER_STATE_PRINT_INT(channel);
- DRIVER_STATE_PRINT_HEX(rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate_set);
- DRIVER_STATE_PRINT_HEX(basic_rate);
DRIVER_STATE_PRINT_INT(band);
- DRIVER_STATE_PRINT_INT(beacon_int);
- DRIVER_STATE_PRINT_INT(psm_entry_retry);
- DRIVER_STATE_PRINT_INT(ps_poll_failures);
DRIVER_STATE_PRINT_INT(power_level);
- DRIVER_STATE_PRINT_INT(rssi_thold);
- DRIVER_STATE_PRINT_INT(last_rssi_event);
DRIVER_STATE_PRINT_INT(sg_enabled);
DRIVER_STATE_PRINT_INT(enable_11a);
DRIVER_STATE_PRINT_INT(noise);
- DRIVER_STATE_PRINT_LHEX(ap_hlid_map[0]);
- DRIVER_STATE_PRINT_INT(last_tx_hlid);
- DRIVER_STATE_PRINT_INT(ba_support);
- DRIVER_STATE_PRINT_HEX(ba_rx_bitmap);
DRIVER_STATE_PRINT_HEX(ap_fw_ps_map);
DRIVER_STATE_PRINT_LHEX(ap_ps_map);
DRIVER_STATE_PRINT_HEX(quirks);
@@ -387,10 +380,13 @@ static ssize_t driver_state_read(struct file *file, char __user *user_buf,
#undef DRIVER_STATE_PRINT_LHEX
#undef DRIVER_STATE_PRINT_STR
#undef DRIVER_STATE_PRINT
+#undef DRIVER_STATE_BUF_LEN
mutex_unlock(&wl->mutex);
- return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
}
static const struct file_operations driver_state_ops = {
@@ -399,6 +395,115 @@ static const struct file_operations driver_state_ops = {
.llseek = default_llseek,
};
+static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
+ int ret, res = 0;
+ const int buf_size = 4096;
+ char *buf;
+ char tmp_buf[64];
+
+ buf = kzalloc(buf_size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+
+#define VIF_STATE_PRINT(x, fmt) \
+ (res += scnprintf(buf + res, buf_size - res, \
+ #x " = " fmt "\n", wlvif->x))
+
+#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld")
+#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d")
+#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s")
+#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx")
+#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
+#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x")
+
+#define VIF_STATE_PRINT_NSTR(x, len) \
+ do { \
+ memset(tmp_buf, 0, sizeof(tmp_buf)); \
+ memcpy(tmp_buf, wlvif->x, \
+ min_t(u8, len, sizeof(tmp_buf) - 1)); \
+ res += scnprintf(buf + res, buf_size - res, \
+ #x " = %s\n", tmp_buf); \
+ } while (0)
+
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ VIF_STATE_PRINT_INT(role_id);
+ VIF_STATE_PRINT_INT(bss_type);
+ VIF_STATE_PRINT_LHEX(flags);
+ VIF_STATE_PRINT_INT(p2p);
+ VIF_STATE_PRINT_INT(dev_role_id);
+ VIF_STATE_PRINT_INT(dev_hlid);
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ VIF_STATE_PRINT_INT(sta.hlid);
+ VIF_STATE_PRINT_INT(sta.ba_rx_bitmap);
+ VIF_STATE_PRINT_INT(sta.basic_rate_idx);
+ VIF_STATE_PRINT_INT(sta.ap_rate_idx);
+ VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
+ } else {
+ VIF_STATE_PRINT_INT(ap.global_hlid);
+ VIF_STATE_PRINT_INT(ap.bcast_hlid);
+ VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
+ VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
+ VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
+ VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
+ }
+ VIF_STATE_PRINT_INT(last_tx_hlid);
+ VIF_STATE_PRINT_LHEX(links_map[0]);
+ VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
+ VIF_STATE_PRINT_INT(band);
+ VIF_STATE_PRINT_INT(channel);
+ VIF_STATE_PRINT_HEX(bitrate_masks[0]);
+ VIF_STATE_PRINT_HEX(bitrate_masks[1]);
+ VIF_STATE_PRINT_HEX(basic_rate_set);
+ VIF_STATE_PRINT_HEX(basic_rate);
+ VIF_STATE_PRINT_HEX(rate_set);
+ VIF_STATE_PRINT_INT(beacon_int);
+ VIF_STATE_PRINT_INT(default_key);
+ VIF_STATE_PRINT_INT(aid);
+ VIF_STATE_PRINT_INT(session_counter);
+ VIF_STATE_PRINT_INT(ps_poll_failures);
+ VIF_STATE_PRINT_INT(psm_entry_retry);
+ VIF_STATE_PRINT_INT(power_level);
+ VIF_STATE_PRINT_INT(rssi_thold);
+ VIF_STATE_PRINT_INT(last_rssi_event);
+ VIF_STATE_PRINT_INT(ba_support);
+ VIF_STATE_PRINT_INT(ba_allowed);
+ VIF_STATE_PRINT_LLHEX(tx_security_seq);
+ VIF_STATE_PRINT_INT(tx_security_last_seq_lsb);
+ }
+
+#undef VIF_STATE_PRINT_INT
+#undef VIF_STATE_PRINT_LONG
+#undef VIF_STATE_PRINT_HEX
+#undef VIF_STATE_PRINT_LHEX
+#undef VIF_STATE_PRINT_LLHEX
+#undef VIF_STATE_PRINT_STR
+#undef VIF_STATE_PRINT_NSTR
+#undef VIF_STATE_PRINT
+
+ mutex_unlock(&wl->mutex);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations vifs_state_ops = {
+ .read = vifs_state_read,
+ .open = wl1271_open_file_generic,
+ .llseek = default_llseek,
+};
+
static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -520,6 +625,7 @@ static ssize_t rx_streaming_interval_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -543,7 +649,9 @@ static ssize_t rx_streaming_interval_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -572,6 +680,7 @@ static ssize_t rx_streaming_always_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
@@ -595,7 +704,9 @@ static ssize_t rx_streaming_always_write(struct file *file,
if (ret < 0)
goto out;
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -624,6 +735,7 @@ static ssize_t beacon_filtering_write(struct file *file,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
+ struct wl12xx_vif *wlvif;
char buf[10];
size_t len;
unsigned long value;
@@ -646,7 +758,9 @@ static ssize_t beacon_filtering_write(struct file *file,
if (ret < 0)
goto out;
- ret = wl1271_acx_beacon_filter_opt(wl, !!value);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -770,6 +884,7 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl,
DEBUGFS_ADD(gpio_power, rootdir);
DEBUGFS_ADD(start_recovery, rootdir);
DEBUGFS_ADD(driver_state, rootdir);
+ DEBUGFS_ADD(vifs_state, rootdir);
DEBUGFS_ADD(dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
DEBUGFS_ADD(beacon_filtering, rootdir);
diff --git a/drivers/net/wireless/wl12xx/event.c b/drivers/net/wireless/wl12xx/event.c
index 674ad2a..d3280df68 100644
--- a/drivers/net/wireless/wl12xx/event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -22,6 +22,7 @@
*/
#include "wl12xx.h"
+#include "debug.h"
#include "reg.h"
#include "io.h"
#include "event.h"
@@ -31,12 +32,16 @@
void wl1271_pspoll_work(struct work_struct *work)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct delayed_work *dwork;
struct wl1271 *wl;
int ret;
dwork = container_of(work, struct delayed_work, work);
- wl = container_of(dwork, struct wl1271, pspoll_work);
+ wlvif = container_of(dwork, struct wl12xx_vif, pspoll_work);
+ vif = container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+ wl = wlvif->wl;
wl1271_debug(DEBUG_EVENT, "pspoll work");
@@ -45,10 +50,10 @@ void wl1271_pspoll_work(struct work_struct *work)
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (!test_and_clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags))
+ if (!test_and_clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags))
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -60,31 +65,33 @@ void wl1271_pspoll_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, wl->basic_rate, true);
+ wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
wl1271_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
};
-static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
+static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int delay = wl->conf.conn.ps_poll_recovery_period;
int ret;
- wl->ps_poll_failures++;
- if (wl->ps_poll_failures == 1)
+ wlvif->ps_poll_failures++;
+ if (wlvif->ps_poll_failures == 1)
wl1271_info("AP with dysfunctional ps-poll, "
"trying to work around it.");
/* force active mode receive data from the AP */
- if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
return;
- set_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
- ieee80211_queue_delayed_work(wl->hw, &wl->pspoll_work,
+ set_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
+ ieee80211_queue_delayed_work(wl->hw, &wlvif->pspoll_work,
msecs_to_jiffies(delay));
}
@@ -97,6 +104,7 @@ static void wl1271_event_pspoll_delivery_fail(struct wl1271 *wl)
}
static int wl1271_event_ps_report(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox,
bool *beacon_loss)
{
@@ -109,41 +117,37 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
case EVENT_ENTER_POWER_SAVE_FAIL:
wl1271_debug(DEBUG_PSM, "PSM entry failed");
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
/* remain in active mode */
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
break;
}
- if (wl->psm_entry_retry < total_retries) {
- wl->psm_entry_retry++;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ if (wlvif->psm_entry_retry < total_retries) {
+ wlvif->psm_entry_retry++;
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
} else {
wl1271_info("No ack to nullfunc from AP.");
- wl->psm_entry_retry = 0;
+ wlvif->psm_entry_retry = 0;
*beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
- wl->psm_entry_retry = 0;
-
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, true);
- if (ret < 0)
- break;
+ wlvif->psm_entry_retry = 0;
/*
* BET has only a minor effect in 5GHz and masks
* channel switch IEs, so we only enable BET on 2.4GHz
*/
- if (wl->band == IEEE80211_BAND_2GHZ)
+ if (wlvif->band == IEEE80211_BAND_2GHZ)
/* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, true);
+ ret = wl1271_acx_bet_enable(wl, wlvif, true);
- if (wl->ps_compl) {
- complete(wl->ps_compl);
- wl->ps_compl = NULL;
+ if (wlvif->ps_compl) {
+ complete(wlvif->ps_compl);
+ wlvif->ps_compl = NULL;
}
break;
default:
@@ -154,39 +158,44 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
}
static void wl1271_event_rssi_trigger(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
enum nl80211_cqm_rssi_threshold_event event;
s8 metric = mbox->rssi_snr_trigger_metric[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
- if (metric <= wl->rssi_thold)
+ if (metric <= wlvif->rssi_thold)
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
- if (event != wl->last_rssi_event)
- ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
- wl->last_rssi_event = event;
+ if (event != wlvif->last_rssi_event)
+ ieee80211_cqm_rssi_notify(vif, event, GFP_KERNEL);
+ wlvif->last_rssi_event = event;
}
-static void wl1271_stop_ba_event(struct wl1271 *wl)
+static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (!wl->ba_rx_bitmap)
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (!wlvif->sta.ba_rx_bitmap)
return;
- ieee80211_stop_rx_ba_session(wl->vif, wl->ba_rx_bitmap,
- wl->bssid);
+ ieee80211_stop_rx_ba_session(vif, wlvif->sta.ba_rx_bitmap,
+ vif->bss_conf.bssid);
} else {
- int i;
+ u8 hlid;
struct wl1271_link *lnk;
- for (i = WL1271_AP_STA_HLID_START; i < AP_MAX_LINKS; i++) {
- lnk = &wl->links[i];
- if (!wl1271_is_active_sta(wl, i) || !lnk->ba_bitmap)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
+ WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ if (!lnk->ba_bitmap)
continue;
- ieee80211_stop_rx_ba_session(wl->vif,
+ ieee80211_stop_rx_ba_session(vif,
lnk->ba_bitmap,
lnk->addr);
}
@@ -196,14 +205,23 @@ static void wl1271_stop_ba_event(struct wl1271 *wl)
static void wl12xx_event_soft_gemini_sense(struct wl1271 *wl,
u8 enable)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
+
if (enable) {
/* disable dynamic PS when requested by the firmware */
- ieee80211_disable_dyn_ps(wl->vif);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_disable_dyn_ps(vif);
+ }
set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
} else {
- ieee80211_enable_dyn_ps(wl->vif);
clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
- wl1271_recalc_rx_streaming(wl);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_enable_dyn_ps(vif);
+ wl1271_recalc_rx_streaming(wl, wlvif);
+ }
}
}
@@ -217,10 +235,11 @@ static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
{
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
u32 vector;
bool beacon_loss = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
bool disconnect_sta = false;
unsigned long sta_bitmap = 0;
@@ -234,7 +253,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, wl->scan_vif);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
@@ -248,13 +267,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT "
"(status 0x%0x)", mbox->scheduled_scan_status);
if (wl->sched_scanning) {
- wl1271_scan_sched_scan_stop(wl);
ieee80211_sched_scan_stopped(wl->hw);
+ wl->sched_scanning = false;
}
}
- if (vector & SOFT_GEMINI_SENSE_EVENT_ID &&
- wl->bss_type == BSS_TYPE_STA_BSS)
+ if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
wl12xx_event_soft_gemini_sense(wl,
mbox->soft_gemini_sense_info);
@@ -267,40 +285,54 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
*
*/
- if ((vector & BSS_LOSE_EVENT_ID) && !is_ap) {
+ if (vector & BSS_LOSE_EVENT_ID) {
+ /* TODO: check for multi-role */
wl1271_info("Beacon loss detected.");
/* indicate to the stack, that beacons have been lost */
beacon_loss = true;
}
- if ((vector & PS_REPORT_EVENT_ID) && !is_ap) {
+ if (vector & PS_REPORT_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
- ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
- if (ret < 0)
- return ret;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl1271_event_ps_report(wl, wlvif,
+ mbox, &beacon_loss);
+ if (ret < 0)
+ return ret;
+ }
}
- if ((vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID) && !is_ap)
- wl1271_event_pspoll_delivery_fail(wl);
+ if (vector & PSPOLL_DELIVERY_FAILURE_EVENT_ID)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_pspoll_delivery_fail(wl, wlvif);
+ }
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
+ /* TODO: check actual multi-role support */
wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
- if (wl->vif)
- wl1271_event_rssi_trigger(wl, mbox);
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ wl1271_event_rssi_trigger(wl, wlvif, mbox);
+ }
}
- if ((vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)) {
+ if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID) {
+ u8 role_id = mbox->role_id;
wl1271_debug(DEBUG_EVENT, "BA_SESSION_RX_CONSTRAINT_EVENT_ID. "
- "ba_allowed = 0x%x", mbox->rx_ba_allowed);
+ "ba_allowed = 0x%x, role_id=%d",
+ mbox->rx_ba_allowed, role_id);
- wl->ba_allowed = !!mbox->rx_ba_allowed;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (role_id != 0xff && role_id != wlvif->role_id)
+ continue;
- if (wl->vif && !wl->ba_allowed)
- wl1271_stop_ba_event(wl);
+ wlvif->ba_allowed = !!mbox->rx_ba_allowed;
+ if (!wlvif->ba_allowed)
+ wl1271_stop_ba_event(wl, wlvif);
+ }
}
- if ((vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) && !is_ap) {
+ if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "CHANNEL_SWITCH_COMPLETE_EVENT_ID. "
"status = 0x%x",
mbox->channel_switch_status);
@@ -309,50 +341,65 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* 1) channel switch complete with status=0
* 2) channel switch failed status=1
*/
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags) &&
- (wl->vif))
- ieee80211_chswitch_done(wl->vif,
- mbox->channel_switch_status ? false : true);
+
+ /* TODO: configure only the relevant vif */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ bool success;
+
+ if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
+ &wl->flags))
+ continue;
+
+ success = mbox->channel_switch_status ? false : true;
+ ieee80211_chswitch_done(vif, success);
+ }
}
if ((vector & DUMMY_PACKET_EVENT_ID)) {
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
- if (wl->vif)
- wl1271_tx_dummy_packet(wl);
+ wl1271_tx_dummy_packet(wl);
}
/*
* "TX retries exceeded" has a different meaning according to mode.
* In AP mode the offending station is disconnected.
*/
- if ((vector & MAX_TX_RETRY_EVENT_ID) && is_ap) {
+ if (vector & MAX_TX_RETRY_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "MAX_TX_RETRY_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_tx_retry_exceeded);
disconnect_sta = true;
}
- if ((vector & INACTIVE_STA_EVENT_ID) && is_ap) {
+ if (vector & INACTIVE_STA_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
sta_bitmap |= le16_to_cpu(mbox->sta_aging_status);
disconnect_sta = true;
}
- if (is_ap && disconnect_sta) {
+ if (disconnect_sta) {
u32 num_packets = wl->conf.tx.max_tx_retries;
struct ieee80211_sta *sta;
const u8 *addr;
int h;
- for (h = find_first_bit(&sta_bitmap, AP_MAX_LINKS);
- h < AP_MAX_LINKS;
- h = find_next_bit(&sta_bitmap, AP_MAX_LINKS, h+1)) {
- if (!wl1271_is_active_sta(wl, h))
+ for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ bool found = false;
+ /* find the ap vif connected to this sta */
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ if (!test_bit(h, wlvif->ap.sta_hlid_map))
+ continue;
+ found = true;
+ break;
+ }
+ if (!found)
continue;
+ vif = wl12xx_wlvif_to_vif(wlvif);
addr = wl->links[h].addr;
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, addr);
+ sta = ieee80211_find_sta(vif, addr);
if (sta) {
wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
ieee80211_report_low_ack(sta, num_packets);
@@ -361,8 +408,11 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
}
}
- if (wl->vif && beacon_loss)
- ieee80211_connection_loss(wl->vif);
+ if (beacon_loss)
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_connection_loss(vif);
+ }
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/event.h b/drivers/net/wireless/wl12xx/event.h
index 49c1a0e..1d878ba 100644
--- a/drivers/net/wireless/wl12xx/event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -132,7 +132,4 @@ void wl1271_event_mbox_config(struct wl1271 *wl);
int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
void wl1271_pspoll_work(struct work_struct *work);
-/* Functions from main.c */
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid);
-
#endif
diff --git a/drivers/net/wireless/wl12xx/init.c b/drivers/net/wireless/wl12xx/init.c
index 04db64c..ca7ee59 100644
--- a/drivers/net/wireless/wl12xx/init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include "debug.h"
#include "init.h"
#include "wl12xx_80211.h"
#include "acx.h"
@@ -33,7 +34,7 @@
#include "tx.h"
#include "io.h"
-int wl1271_sta_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret, i;
@@ -64,7 +65,7 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
- (struct wl12xx_qos_null_data_template),
+ (struct ieee80211_qos_hdr),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
@@ -88,10 +89,33 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
if (ret < 0)
return ret;
+ /*
+ * Put very large empty placeholders for all templates. These
+ * reserve memory for later.
+ */
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
+ WL1271_CMD_TEMPL_MAX_SIZE,
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
+ sizeof
+ (struct wl12xx_disconn_template),
+ 0, WL1271_RATE_AUTOMATIC);
+ if (ret < 0)
+ return ret;
+
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
- WL1271_CMD_TEMPL_DFLT_SIZE, i,
- WL1271_RATE_AUTOMATIC);
+ sizeof(struct ieee80211_qos_hdr),
+ i, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
@@ -99,7 +123,8 @@ int wl1271_sta_init_templates_config(struct wl1271 *wl)
return 0;
}
-static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
+static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct wl12xx_disconn_template *tmpl;
int ret;
@@ -114,7 +139,7 @@ static int wl1271_ap_init_deauth_template(struct wl1271 *wl)
tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_DEAUTH);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP,
tmpl, sizeof(*tmpl), 0, rate);
@@ -123,8 +148,10 @@ out:
return ret;
}
-static int wl1271_ap_init_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_hdr_3addr *nullfunc;
int ret;
u32 rate;
@@ -141,10 +168,10 @@ static int wl1271_ap_init_null_template(struct wl1271 *wl)
/* nullfunc->addr1 is filled by FW */
- memcpy(nullfunc->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(nullfunc->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
+ memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, nullfunc,
sizeof(*nullfunc), 0, rate);
@@ -153,8 +180,10 @@ out:
return ret;
}
-static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
+static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr *qosnull;
int ret;
u32 rate;
@@ -171,10 +200,10 @@ static int wl1271_ap_init_qos_null_template(struct wl1271 *wl)
/* qosnull->addr1 is filled by FW */
- memcpy(qosnull->addr2, wl->mac_addr, ETH_ALEN);
- memcpy(qosnull->addr3, wl->mac_addr, ETH_ALEN);
+ memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
+ memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
- rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, qosnull,
sizeof(*qosnull), 0, rate);
@@ -183,49 +212,6 @@ out:
return ret;
}
-static int wl1271_ap_init_templates_config(struct wl1271 *wl)
-{
- int ret;
-
- /*
- * Put very large empty placeholders for all templates. These
- * reserve memory for later.
- */
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_AP_BEACON, NULL,
- WL1271_CMD_TEMPL_MAX_SIZE,
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_DEAUTH_AP, NULL,
- sizeof
- (struct wl12xx_disconn_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
- sizeof(struct wl12xx_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
- sizeof
- (struct wl12xx_qos_null_data_template),
- 0, WL1271_RATE_AUTOMATIC);
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static int wl12xx_init_rx_config(struct wl1271 *wl)
{
int ret;
@@ -237,39 +223,37 @@ static int wl12xx_init_rx_config(struct wl1271 *wl)
return 0;
}
-int wl1271_init_phy_config(struct wl1271 *wl)
+static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_pd_threshold(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_slot(wl, DEFAULT_SLOT_TIME);
+ ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
if (ret < 0)
return ret;
- ret = wl1271_acx_service_period_timeout(wl);
+ ret = wl1271_acx_service_period_timeout(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_rts_threshold(wl, wl->hw->wiphy->rts_threshold);
+ ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_init_beacon_filter(struct wl1271 *wl)
+static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- /* disable beacon filtering at this stage */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_table(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_acx_beacon_filter_table(wl);
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
if (ret < 0)
return ret;
@@ -302,11 +286,12 @@ int wl1271_init_energy_detection(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
+static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_acx_bcn_dtim_options(wl);
+ ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
if (ret < 0)
return ret;
@@ -327,36 +312,13 @@ static int wl12xx_init_fwlog(struct wl1271 *wl)
return 0;
}
-static int wl1271_sta_hw_init(struct wl1271 *wl)
+/* generic sta initialization (non vif-specific) */
+static int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (wl->chip.id != CHIP_ID_1283_PG20) {
- ret = wl1271_cmd_ext_radio_parms(wl);
- if (ret < 0)
- return ret;
- }
-
/* PS config */
- ret = wl1271_acx_config_ps(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
- if (ret < 0)
- return ret;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- return ret;
-
- /* Beacon filtering */
- ret = wl1271_init_beacon_filter(wl);
+ ret = wl12xx_acx_config_ps(wl, wlvif);
if (ret < 0)
return ret;
@@ -365,103 +327,61 @@ static int wl1271_sta_hw_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- /* Beacons and broadcast settings */
- ret = wl1271_init_beacon_broadcast(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for ELP power saving */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
- if (ret < 0)
- return ret;
-
- /* Configure rssi/snr averaging weights */
- ret = wl1271_acx_rssi_snr_avg_weights(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_sta_rate_policies(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* Configure the FW logger */
- ret = wl12xx_init_fwlog(wl);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret, i;
/* disable all keep-alive templates */
for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
- ret = wl1271_acx_keep_alive_config(wl, i,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif, i,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
return ret;
}
/* disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init(struct wl1271 *wl)
+/* generic ap initialization (non vif-specific) */
+static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- ret = wl1271_ap_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
- /* Configure for power always on */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- return ret;
-
- ret = wl1271_init_ap_rates(wl);
- if (ret < 0)
- return ret;
-
- ret = wl1271_acx_ap_max_tx_retry(wl);
- if (ret < 0)
- return ret;
-
- ret = wl12xx_acx_mem_cfg(wl);
- if (ret < 0)
- return ret;
-
- /* initialize Tx power */
- ret = wl1271_acx_tx_power(wl, wl->power_level);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
-int wl1271_ap_init_templates(struct wl1271 *wl)
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
- ret = wl1271_ap_init_deauth_template(wl);
+ ret = wl1271_ap_init_deauth_template(wl, wlvif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_null_template(wl);
+ ret = wl1271_ap_init_null_template(wl, vif);
if (ret < 0)
return ret;
- ret = wl1271_ap_init_qos_null_template(wl);
+ ret = wl1271_ap_init_qos_null_template(wl, vif);
if (ret < 0)
return ret;
@@ -469,43 +389,45 @@ int wl1271_ap_init_templates(struct wl1271 *wl)
* when operating as AP we want to receive external beacons for
* configuring ERP protection.
*/
- ret = wl1271_acx_beacon_filter_opt(wl, false);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
-static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl)
+static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
+ struct ieee80211_vif *vif)
{
- return wl1271_ap_init_templates(wl);
+ return wl1271_ap_init_templates(wl, vif);
}
-int wl1271_init_ap_rates(struct wl1271 *wl)
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret;
struct conf_tx_rate_class rc;
u32 supported_rates;
- wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x", wl->basic_rate_set);
+ wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
+ wlvif->basic_rate_set);
- if (wl->basic_rate_set == 0)
+ if (wlvif->basic_rate_set == 0)
return -EINVAL;
- rc.enabled_rates = wl->basic_rate_set;
+ rc.enabled_rates = wlvif->basic_rate_set;
rc.long_retry_limit = 10;
rc.short_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_MGMT_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
if (ret < 0)
return ret;
/* use the min basic rate for AP broadcast/multicast */
- rc.enabled_rates = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, ACX_TX_AP_MODE_BCST_RATE);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
if (ret < 0)
return ret;
@@ -513,7 +435,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wl->basic_rate_set & CONF_TX_OFDM_RATES))
+ if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_AP_ENABLED_RATES;
@@ -527,7 +449,8 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
- ret = wl1271_acx_ap_rate_policy(wl, &rc, i);
+ ret = wl1271_acx_ap_rate_policy(wl, &rc,
+ wlvif->ap.ucast_rate_idx[i]);
if (ret < 0)
return ret;
}
@@ -535,24 +458,23 @@ int wl1271_init_ap_rates(struct wl1271 *wl)
return 0;
}
-static int wl1271_set_ba_policies(struct wl1271 *wl)
+static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
/* Reset the BA RX indicators */
- wl->ba_rx_bitmap = 0;
- wl->ba_allowed = true;
+ wlvif->ba_allowed = true;
wl->ba_rx_session_count = 0;
/* BA is supported in STA/AP modes */
- if (wl->bss_type != BSS_TYPE_AP_BSS &&
- wl->bss_type != BSS_TYPE_STA_BSS) {
- wl->ba_support = false;
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
+ wlvif->bss_type != BSS_TYPE_STA_BSS) {
+ wlvif->ba_support = false;
return 0;
}
- wl->ba_support = true;
+ wlvif->ba_support = true;
/* 802.11n initiator BA session setting */
- return wl12xx_acx_set_ba_initiator_policy(wl);
+ return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
}
int wl1271_chip_specific_init(struct wl1271 *wl)
@@ -562,7 +484,7 @@ int wl1271_chip_specific_init(struct wl1271 *wl)
if (wl->chip.id == CHIP_ID_1283_PG20) {
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
+ if (!(wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT))
/* Enable SDIO padding */
host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
@@ -575,39 +497,186 @@ out:
return ret;
}
+/* vif-specifc initialization */
+static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ int ret;
-int wl1271_hw_init(struct wl1271 *wl)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
+ if (ret < 0)
+ return ret;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
+
+ /* Beacon filtering */
+ ret = wl1271_init_sta_beacon_filter(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Beacons and broadcast settings */
+ ret = wl1271_init_beacon_broadcast(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* Configure rssi/snr averaging weights */
+ ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* vif-specific intialization */
+static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
+ int ret;
+
+ ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ /* initialize Tx power */
+ ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret, i;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_general_parms(wl);
- else
- ret = wl1271_cmd_general_parms(wl);
+ /*
+ * consider all existing roles before configuring psm.
+ * TODO: reconfigure on interface removal.
+ */
+ if (!wl->ap_count) {
+ if (is_ap) {
+ /* Configure for power always on */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ return ret;
+ } else if (!wl->sta_count) {
+ /* Configure for ELP power saving */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ /* Mode specific init */
+ if (is_ap) {
+ ret = wl1271_ap_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_ap_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_sta_hw_init(wl, wlvif);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_init_sta_role(wl, wlvif);
+ if (ret < 0)
+ return ret;
+ }
+
+ wl12xx_init_phy_vif_config(wl, wlvif);
+
+ /* Default TID/AC configuration */
+ BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
+ conf_ac->cw_min, conf_ac->cw_max,
+ conf_ac->aifsn, conf_ac->tx_op_limit);
+ if (ret < 0)
+ return ret;
+
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, wlvif,
+ conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Configure HW encryption */
+ ret = wl1271_acx_feature_cfg(wl, wlvif);
if (ret < 0)
return ret;
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = wl128x_cmd_radio_parms(wl);
+ /* Mode specific init - post mem init */
+ if (is_ap)
+ ret = wl1271_ap_hw_init_post_mem(wl, vif);
else
- ret = wl1271_cmd_radio_parms(wl);
+ ret = wl1271_sta_hw_init_post_mem(wl, vif);
+
+ if (ret < 0)
+ return ret;
+
+ /* Configure initiator BA sessions policies */
+ ret = wl1271_set_ba_policies(wl, wlvif);
if (ret < 0)
return ret;
+ return 0;
+}
+
+int wl1271_hw_init(struct wl1271 *wl)
+{
+ int ret;
+
+ if (wl->chip.id == CHIP_ID_1283_PG20) {
+ ret = wl128x_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl128x_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = wl1271_cmd_general_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ ret = wl1271_cmd_ext_radio_parms(wl);
+ if (ret < 0)
+ return ret;
+ }
+
/* Chip-specific init */
ret = wl1271_chip_specific_init(wl);
if (ret < 0)
return ret;
- /* Mode specific init */
- if (is_ap)
- ret = wl1271_ap_hw_init(wl);
- else
- ret = wl1271_sta_hw_init(wl);
+ /* Init templates */
+ ret = wl1271_init_templates_config(wl);
+ if (ret < 0)
+ return ret;
+
+ ret = wl12xx_acx_mem_cfg(wl);
+ if (ret < 0)
+ return ret;
+ /* Configure the FW logger */
+ ret = wl12xx_init_fwlog(wl);
if (ret < 0)
return ret;
@@ -626,11 +695,6 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl1271_acx_dco_itrim_params(wl);
if (ret < 0)
goto out_free_memmap;
@@ -655,61 +719,20 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
- /* Configure HW encryption */
- ret = wl1271_acx_feature_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure PM */
ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
- /* Mode specific init - post mem init */
- if (is_ap)
- ret = wl1271_ap_hw_init_post_mem(wl);
- else
- ret = wl1271_sta_hw_init_post_mem(wl);
-
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_set_rate_mgmt_params(wl);
if (ret < 0)
goto out_free_memmap;
- /* Configure initiator BA sessions policies */
- ret = wl1271_set_ba_policies(wl);
- if (ret < 0)
- goto out_free_memmap;
-
/* configure hangover */
ret = wl12xx_acx_config_hangover(wl);
if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/init.h b/drivers/net/wireless/wl12xx/init.h
index 3a3c230..2da0f40 100644
--- a/drivers/net/wireless/wl12xx/init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -27,13 +27,13 @@
#include "wl12xx.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
-int wl1271_sta_init_templates_config(struct wl1271 *wl);
-int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
int wl1271_init_pta(struct wl1271 *wl);
int wl1271_init_energy_detection(struct wl1271 *wl);
int wl1271_chip_specific_init(struct wl1271 *wl);
int wl1271_hw_init(struct wl1271 *wl);
-int wl1271_init_ap_rates(struct wl1271 *wl);
-int wl1271_ap_init_templates(struct wl1271 *wl);
+int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif);
+int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif);
#endif
diff --git a/drivers/net/wireless/wl12xx/io.c b/drivers/net/wireless/wl12xx/io.c
index c2da66f..079ad38 100644
--- a/drivers/net/wireless/wl12xx/io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -24,8 +24,10 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "io.h"
#include "tx.h"
@@ -46,7 +48,7 @@
bool wl1271_set_block_size(struct wl1271 *wl)
{
if (wl->if_ops->set_block_size) {
- wl->if_ops->set_block_size(wl, WL12XX_BUS_BLOCK_SIZE);
+ wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
return true;
}
@@ -55,12 +57,12 @@ bool wl1271_set_block_size(struct wl1271 *wl)
void wl1271_disable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->disable_irq(wl);
+ disable_irq(wl->irq);
}
void wl1271_enable_interrupts(struct wl1271 *wl)
{
- wl->if_ops->enable_irq(wl);
+ enable_irq(wl->irq);
}
/* Set the SPI partitions to access the chip addresses
@@ -128,13 +130,13 @@ EXPORT_SYMBOL_GPL(wl1271_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
if (wl->if_ops->reset)
- wl->if_ops->reset(wl);
+ wl->if_ops->reset(wl->dev);
}
void wl1271_io_init(struct wl1271 *wl)
{
if (wl->if_ops->init)
- wl->if_ops->init(wl);
+ wl->if_ops->init(wl->dev);
}
void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/io.h b/drivers/net/wireless/wl12xx/io.h
index e839341..d398cbc 100644
--- a/drivers/net/wireless/wl12xx/io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -51,23 +51,17 @@ void wl1271_enable_interrupts(struct wl1271 *wl);
void wl1271_io_reset(struct wl1271 *wl);
void wl1271_io_init(struct wl1271 *wl);
-static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
-{
- return wl->if_ops->dev(wl);
-}
-
-
/* Raw target IO, address is not translated */
static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->write(wl, addr, buf, len, fixed);
+ wl->if_ops->write(wl->dev, addr, buf, len, fixed);
}
static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed)
{
- wl->if_ops->read(wl, addr, buf, len, fixed);
+ wl->if_ops->read(wl->dev, addr, buf, len, fixed);
}
static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
@@ -155,13 +149,13 @@ static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
static inline void wl1271_power_off(struct wl1271 *wl)
{
- wl->if_ops->power(wl, false);
+ wl->if_ops->power(wl->dev, false);
clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static inline int wl1271_power_on(struct wl1271 *wl)
{
- int ret = wl->if_ops->power(wl, true);
+ int ret = wl->if_ops->power(wl->dev, true);
if (ret == 0)
set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
@@ -176,15 +170,10 @@ u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
int wl1271_set_partition(struct wl1271 *wl,
struct wl1271_partition_set *p);
+bool wl1271_set_block_size(struct wl1271 *wl);
+
/* Functions from wl1271_main.c */
-int wl1271_register_hw(struct wl1271 *wl);
-void wl1271_unregister_hw(struct wl1271 *wl);
-int wl1271_init_ieee80211(struct wl1271 *wl);
-struct ieee80211_hw *wl1271_alloc_hw(void);
-int wl1271_free_hw(struct wl1271 *wl);
-irqreturn_t wl1271_irq(int irq, void *data);
-bool wl1271_set_block_size(struct wl1271 *wl);
int wl1271_tx_dummy_packet(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/main.c b/drivers/net/wireless/wl12xx/main.c
index 884f82b..d5f55a1 100644
--- a/drivers/net/wireless/wl12xx/main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -32,8 +32,10 @@
#include <linux/slab.h>
#include <linux/wl12xx.h>
#include <linux/sched.h>
+#include <linux/interrupt.h>
#include "wl12xx.h"
+#include "debug.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "io.h"
@@ -377,42 +379,30 @@ static char *fwlog_param;
static bool bug_on_recovery;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues);
-static void wl1271_free_ap_keys(struct wl1271 *wl);
-
-
-static void wl1271_device_release(struct device *dev)
-{
-
-}
-
-static struct platform_device wl1271_device = {
- .name = "wl1271",
- .id = -1,
-
- /* device model insists to have a release function */
- .dev = {
- .release = wl1271_device_release,
- },
-};
+static void wl1271_op_stop(struct ieee80211_hw *hw);
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
static DEFINE_MUTEX(wl_list_mutex);
static LIST_HEAD(wl_list);
-static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate)
+static int wl1271_check_operstate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ unsigned char operstate)
{
int ret;
+
if (operstate != IF_OPER_UP)
return 0;
- if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags))
+ if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
return 0;
- ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid);
+ ret = wl12xx_cmd_set_peer_state(wl, wlvif->sta.hlid);
if (ret < 0)
return ret;
- wl12xx_croc(wl, wl->role_id);
+ wl12xx_croc(wl, wlvif->role_id);
wl1271_info("Association completed.");
return 0;
@@ -426,6 +416,7 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
struct ieee80211_hw *hw;
struct wl1271 *wl;
struct wl1271 *wl_temp;
+ struct wl12xx_vif *wlvif;
int ret = 0;
/* Check that this notification is for us. */
@@ -459,17 +450,28 @@ static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
if (wl->state == WL1271_STATE_OFF)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (dev->operstate != IF_OPER_UP)
goto out;
+ /*
+ * The correct behavior should be just getting the appropriate wlvif
+ * from the given dev, but currently we don't have a mac80211
+ * interface for it.
+ */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ continue;
- wl1271_check_operstate(wl, dev->operstate);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
- wl1271_ps_elp_sleep(wl);
+ wl1271_check_operstate(wl, wlvif,
+ ieee80211_get_operstate(vif));
+ wl1271_ps_elp_sleep(wl);
+ }
out:
mutex_unlock(&wl->mutex);
@@ -498,19 +500,20 @@ static int wl1271_reg_notify(struct wiphy *wiphy,
return 0;
}
-static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable)
+static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool enable)
{
int ret = 0;
/* we should hold wl->mutex */
- ret = wl1271_acx_ps_rx_streaming(wl, enable);
+ ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
if (ret < 0)
goto out;
if (enable)
- set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
else
- clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
out:
return ret;
}
@@ -519,25 +522,25 @@ out:
* this function is being called when the rx_streaming interval
* has beed changed or rx_streaming should be disabled
*/
-int wl1271_recalc_rx_streaming(struct wl1271 *wl)
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
int period = wl->conf.rx_streaming.interval;
/* don't reconfigure if rx_streaming is disabled */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
/* reconfigure/disable according to new streaming_period */
if (period &&
- test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) &&
+ test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
(wl->conf.rx_streaming.always ||
test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
else {
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
/* don't cancel_work_sync since we might deadlock */
- del_timer_sync(&wl->rx_streaming_timer);
+ del_timer_sync(&wlvif->rx_streaming_timer);
}
out:
return ret;
@@ -546,13 +549,14 @@ out:
static void wl1271_rx_streaming_enable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_enable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_enable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) ||
- !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
+ if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
(!wl->conf.rx_streaming.always &&
!test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
goto out;
@@ -564,12 +568,12 @@ static void wl1271_rx_streaming_enable_work(struct work_struct *work)
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, true);
+ ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
goto out_sleep;
/* stop it after some time of inactivity */
- mod_timer(&wl->rx_streaming_timer,
+ mod_timer(&wlvif->rx_streaming_timer,
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
@@ -581,19 +585,20 @@ out:
static void wl1271_rx_streaming_disable_work(struct work_struct *work)
{
int ret;
- struct wl1271 *wl =
- container_of(work, struct wl1271, rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
+ rx_streaming_disable_work);
+ struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl1271_set_rx_streaming(wl, false);
+ ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
@@ -605,8 +610,9 @@ out:
static void wl1271_rx_streaming_timer(unsigned long data)
{
- struct wl1271 *wl = (struct wl1271 *)data;
- ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work);
+ struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl1271 *wl = wlvif->wl;
+ ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
static void wl1271_conf_init(struct wl1271 *wl)
@@ -645,9 +651,7 @@ static void wl1271_conf_init(struct wl1271 *wl)
static int wl1271_plt_init(struct wl1271 *wl)
{
- struct conf_tx_ac_category *conf_ac;
- struct conf_tx_tid *conf_tid;
- int ret, i;
+ int ret;
if (wl->chip.id == CHIP_ID_1283_PG20)
ret = wl128x_cmd_general_parms(wl);
@@ -676,74 +680,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_sta_init_templates_config(wl);
- if (ret < 0)
- return ret;
-
ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
return ret;
- /* PHY layer config */
- ret = wl1271_init_phy_config(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- ret = wl1271_acx_dco_itrim_params(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Initialize connection monitoring thresholds */
- ret = wl1271_acx_conn_monit_params(wl, false);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Bluetooth WLAN coexistence */
- ret = wl1271_init_pta(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* FM WLAN coexistence */
- ret = wl1271_acx_fm_coex(wl);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Energy detection */
- ret = wl1271_init_energy_detection(wl);
- if (ret < 0)
- goto out_free_memmap;
-
ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
goto out_free_memmap;
- /* Default fragmentation threshold */
- ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
- if (ret < 0)
- goto out_free_memmap;
-
- /* Default TID/AC configuration */
- BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- conf_ac = &wl->conf.tx.ac_conf[i];
- ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
- conf_ac->cw_max, conf_ac->aifsn,
- conf_ac->tx_op_limit);
- if (ret < 0)
- goto out_free_memmap;
-
- conf_tid = &wl->conf.tx.tid_conf[i];
- ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
- conf_tid->channel_type,
- conf_tid->tsid,
- conf_tid->ps_scheme,
- conf_tid->ack_policy,
- conf_tid->apsd_conf[0],
- conf_tid->apsd_conf[1]);
- if (ret < 0)
- goto out_free_memmap;
- }
-
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
@@ -768,14 +712,12 @@ static int wl1271_plt_init(struct wl1271 *wl)
return ret;
}
-static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
+static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid, u8 tx_pkts)
{
bool fw_ps, single_sta;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
single_sta = (wl->active_sta_count == 1);
@@ -784,7 +726,7 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* packets in FW or if the STA is awake.
*/
if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_end(wl, hlid);
+ wl12xx_ps_link_end(wl, wlvif, hlid);
/*
* Start high-level PS if the STA is asleep with enough blocks in FW.
@@ -792,24 +734,14 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts)
* case FW-memory congestion is not a problem.
*/
else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
-}
-
-bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid)
-{
- int id;
-
- /* global/broadcast "stations" are always active */
- if (hlid < WL1271_AP_STA_HLID_START)
- return true;
-
- id = hlid - WL1271_AP_STA_HLID_START;
- return test_bit(id, wl->ap_hlid_map);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct wl12xx_fw_status *status)
{
+ struct wl1271_link *lnk;
u32 cur_fw_ps_map;
u8 hlid, cnt;
@@ -825,25 +757,22 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) {
- if (!wl1271_is_active_sta(wl, hlid))
- continue;
-
- cnt = status->tx_lnk_free_pkts[hlid] -
- wl->links[hlid].prev_freed_pkts;
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS) {
+ lnk = &wl->links[hlid];
+ cnt = status->tx_lnk_free_pkts[hlid] - lnk->prev_freed_pkts;
- wl->links[hlid].prev_freed_pkts =
- status->tx_lnk_free_pkts[hlid];
- wl->links[hlid].allocated_pkts -= cnt;
+ lnk->prev_freed_pkts = status->tx_lnk_free_pkts[hlid];
+ lnk->allocated_pkts -= cnt;
- wl12xx_irq_ps_regulate_link(wl, hlid,
- wl->links[hlid].allocated_pkts);
+ wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
+ lnk->allocated_pkts);
}
}
static void wl12xx_fw_status(struct wl1271 *wl,
struct wl12xx_fw_status *status)
{
+ struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
@@ -898,8 +827,9 @@ static void wl12xx_fw_status(struct wl1271 *wl,
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* for AP update num of allocated TX blocks per link and ps status */
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- wl12xx_irq_update_links_status(wl, status);
+ wl12xx_for_each_wlvif_ap(wl, wlvif) {
+ wl12xx_irq_update_links_status(wl, wlvif, status);
+ }
/* update the host-chipset time offset */
getnstimeofday(&ts);
@@ -932,7 +862,7 @@ static void wl1271_netstack_work(struct work_struct *work)
#define WL1271_IRQ_MAX_LOOPS 256
-irqreturn_t wl1271_irq(int irq, void *cookie)
+static irqreturn_t wl1271_irq(int irq, void *cookie)
{
int ret;
u32 intr;
@@ -1054,7 +984,6 @@ out:
return IRQ_HANDLED;
}
-EXPORT_SYMBOL_GPL(wl1271_irq);
static int wl1271_fetch_firmware(struct wl1271 *wl)
{
@@ -1069,10 +998,10 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
- ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, fw_name, wl->dev);
if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
+ wl1271_error("could not get firmware %s: %d", fw_name, ret);
return ret;
}
@@ -1107,10 +1036,11 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
const struct firmware *fw;
int ret;
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
+ ret = request_firmware(&fw, WL12XX_NVS_NAME, wl->dev);
if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
+ wl1271_error("could not get nvs file %s: %d", WL12XX_NVS_NAME,
+ ret);
return ret;
}
@@ -1217,11 +1147,13 @@ static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, recovery_work);
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
mutex_lock(&wl->mutex);
if (wl->state != WL1271_STATE_ON)
- goto out;
+ goto out_unlock;
/* Avoid a recursive recovery */
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1238,9 +1170,12 @@ static void wl1271_recovery_work(struct work_struct *work)
* in the firmware during recovery. This doens't hurt if the network is
* not encrypted.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
- wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
+ wlvif->tx_security_seq +=
+ WL1271_TX_SQN_POST_RECOVERY_PADDING;
+ }
/* Prevent spurious TX during FW restart */
ieee80211_stop_queues(wl->hw);
@@ -1251,7 +1186,14 @@ static void wl1271_recovery_work(struct work_struct *work)
}
/* reboot the chipset */
- __wl1271_op_remove_interface(wl, false);
+ while (!list_empty(&wl->wlvif_list)) {
+ wlvif = list_first_entry(&wl->wlvif_list,
+ struct wl12xx_vif, list);
+ vif = wl12xx_wlvif_to_vif(wlvif);
+ __wl1271_op_remove_interface(wl, vif, false);
+ }
+ mutex_unlock(&wl->mutex);
+ wl1271_op_stop(wl->hw);
clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
@@ -1262,8 +1204,8 @@ static void wl1271_recovery_work(struct work_struct *work)
* to restart the HW.
*/
ieee80211_wake_queues(wl->hw);
-
-out:
+ return;
+out_unlock:
mutex_unlock(&wl->mutex);
}
@@ -1318,7 +1260,16 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
/* 0. read chip id from CHIP_ID */
wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
- /* 1. check if chip id is valid */
+ /*
+ * For wl127x based devices we could use the default block
+ * size (512 bytes), but due to a bug in the sdio driver, we
+ * need to set it explicitly after the chip is powered on. To
+ * simplify the code and since the performance impact is
+ * negligible, we use the same block size for all different
+ * chip types.
+ */
+ if (!wl1271_set_block_size(wl))
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
switch (wl->chip.id) {
case CHIP_ID_1271_PG10:
@@ -1328,7 +1279,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
@@ -1336,7 +1289,9 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
+ wl->quirks |= WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT;
break;
+
case CHIP_ID_1283_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
wl->chip.id);
@@ -1344,9 +1299,6 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
-
- if (wl1271_set_block_size(wl))
- wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT;
break;
case CHIP_ID_1283_PG10:
default:
@@ -1389,8 +1341,6 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->bss_type = BSS_TYPE_STA_BSS;
-
while (retries) {
retries--;
ret = wl1271_chip_wakeup(wl);
@@ -1482,33 +1432,34 @@ int wl1271_plt_stop(struct wl1271 *wl)
static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_vif *vif = info->control.vif;
+ struct wl12xx_vif *wlvif = NULL;
unsigned long flags;
int q, mapping;
- u8 hlid = 0;
+ u8 hlid;
+
+ if (vif)
+ wlvif = wl12xx_vif_to_data(vif);
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- hlid = wl12xx_tx_get_hlid_ap(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue the packet */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- if (!wl1271_is_active_sta(wl, hlid)) {
- wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d",
- hlid, q);
- dev_kfree_skb(skb);
- goto out;
- }
-
- wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
- skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
- } else {
- skb_queue_tail(&wl->tx_queue[q], skb);
+ if (hlid == WL12XX_INVALID_LINK_ID ||
+ (wlvif && !test_bit(hlid, wlvif->links_map))) {
+ wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
+ ieee80211_free_txskb(hw, skb);
+ goto out;
}
+ wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q);
+ skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
+
wl->tx_queue_count[q]++;
/*
@@ -1609,13 +1560,14 @@ static struct notifier_block wl1271_dev_notifier = {
};
#ifdef CONFIG_PM
-static int wl1271_configure_suspend_sta(struct wl1271 *wl)
+static int wl1271_configure_suspend_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
@@ -1623,12 +1575,12 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
goto out_unlock;
/* enter psm if needed*/
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
DECLARE_COMPLETION_ONSTACK(compl);
- wl->ps_compl = &compl;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ wlvif->ps_compl = &compl;
+ ret = wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
if (ret < 0)
goto out_sleep;
@@ -1638,42 +1590,43 @@ static int wl1271_configure_suspend_sta(struct wl1271 *wl)
ret = wait_for_completion_timeout(
&compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT));
+
+ mutex_lock(&wl->mutex);
if (ret <= 0) {
wl1271_warning("couldn't enter ps mode!");
ret = -EBUSY;
- goto out;
+ goto out_cleanup;
}
- /* take mutex again, and wakeup */
- mutex_lock(&wl->mutex);
-
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
- goto out_unlock;
+ goto out_cleanup;
}
out_sleep:
wl1271_ps_elp_sleep(wl);
+out_cleanup:
+ wlvif->ps_compl = NULL;
out_unlock:
mutex_unlock(&wl->mutex);
-out:
return ret;
}
-static int wl1271_configure_suspend_ap(struct wl1271 *wl)
+static int wl1271_configure_suspend_ap(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret = 0;
mutex_lock(&wl->mutex);
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
goto out_unlock;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
- ret = wl1271_acx_beacon_filter_opt(wl, true);
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
wl1271_ps_elp_sleep(wl);
out_unlock:
@@ -1682,20 +1635,22 @@ out_unlock:
}
-static int wl1271_configure_suspend(struct wl1271 *wl)
+static int wl1271_configure_suspend(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- return wl1271_configure_suspend_sta(wl);
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl1271_configure_suspend_ap(wl);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ return wl1271_configure_suspend_sta(wl, wlvif);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl1271_configure_suspend_ap(wl, wlvif);
return 0;
}
-static void wl1271_configure_resume(struct wl1271 *wl)
+static void wl1271_configure_resume(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
int ret;
- bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS;
- bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS;
+ bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
+ bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
if (!is_sta && !is_ap)
return;
@@ -1707,11 +1662,11 @@ static void wl1271_configure_resume(struct wl1271 *wl)
if (is_sta) {
/* exit psm if it wasn't configured */
- if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags))
- wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (!test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags))
+ wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
} else if (is_ap) {
- wl1271_acx_beacon_filter_opt(wl, false);
+ wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
wl1271_ps_elp_sleep(wl);
@@ -1723,16 +1678,19 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow || !wow->any);
wl->wow_enabled = true;
- ret = wl1271_configure_suspend(wl);
- if (ret < 0) {
- wl1271_warning("couldn't prepare device to suspend");
- return ret;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_configure_suspend(wl, wlvif);
+ if (ret < 0) {
+ wl1271_warning("couldn't prepare device to suspend");
+ return ret;
+ }
}
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
@@ -1751,7 +1709,9 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
wl1271_enable_interrupts(wl);
flush_work(&wl->tx_work);
- flush_delayed_work(&wl->pspoll_work);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ flush_delayed_work(&wlvif->pspoll_work);
+ }
flush_delayed_work(&wl->elp_work);
return 0;
@@ -1760,6 +1720,7 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
static int wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
unsigned long flags;
bool run_irq_work = false;
@@ -1783,7 +1744,9 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
wl1271_irq(0, wl);
wl1271_enable_interrupts(wl);
}
- wl1271_configure_resume(wl);
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ wl1271_configure_resume(wl, wlvif);
+ }
wl->wow_enabled = false;
return 0;
@@ -1810,20 +1773,119 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
static void wl1271_op_stop(struct ieee80211_hw *hw)
{
+ struct wl1271 *wl = hw->priv;
+ int i;
+
wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
+
+ mutex_lock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ mutex_unlock(&wl->mutex);
+ return;
+ }
+ /*
+ * this must be before the cancel_work calls below, so that the work
+ * functions don't perform further work.
+ */
+ wl->state = WL1271_STATE_OFF;
+ mutex_unlock(&wl->mutex);
+
+ mutex_lock(&wl_list_mutex);
+ list_del(&wl->list);
+ mutex_unlock(&wl_list_mutex);
+
+ wl1271_disable_interrupts(wl);
+ wl1271_flush_deferred_work(wl);
+ cancel_delayed_work_sync(&wl->scan_complete_work);
+ cancel_work_sync(&wl->netstack_work);
+ cancel_work_sync(&wl->tx_work);
+ cancel_delayed_work_sync(&wl->elp_work);
+
+ /* let's notify MAC80211 about the remaining pending TX frames */
+ wl12xx_tx_reset(wl, true);
+ mutex_lock(&wl->mutex);
+
+ wl1271_power_off(wl);
+
+ wl->band = IEEE80211_BAND_2GHZ;
+
+ wl->rx_counter = 0;
+ wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
+ wl->tx_blocks_available = 0;
+ wl->tx_allocated_blocks = 0;
+ wl->tx_results_count = 0;
+ wl->tx_packets_count = 0;
+ wl->time_offset = 0;
+ wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ wl->ap_fw_ps_map = 0;
+ wl->ap_ps_map = 0;
+ wl->sched_scanning = false;
+ memset(wl->roles_map, 0, sizeof(wl->roles_map));
+ memset(wl->links_map, 0, sizeof(wl->links_map));
+ memset(wl->roc_map, 0, sizeof(wl->roc_map));
+ wl->active_sta_count = 0;
+
+ /* The system link is always allocated */
+ __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
+
+ /*
+ * this is performed after the cancel_work calls and the associated
+ * mutex_lock, so that wl1271_op_add_interface does not accidentally
+ * get executed before all these vars have been reset.
+ */
+ wl->flags = 0;
+
+ wl->tx_blocks_freed = 0;
+
+ for (i = 0; i < NUM_TX_QUEUES; i++) {
+ wl->tx_pkts_freed[i] = 0;
+ wl->tx_allocated_pkts[i] = 0;
+ }
+
+ wl1271_debugfs_reset(wl);
+
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
+ kfree(wl->tx_res_if);
+ wl->tx_res_if = NULL;
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+ mutex_unlock(&wl->mutex);
}
-static u8 wl12xx_get_role_type(struct wl1271 *wl)
+static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
- switch (wl->bss_type) {
+ u8 policy = find_first_zero_bit(wl->rate_policies_map,
+ WL12XX_MAX_RATE_POLICIES);
+ if (policy >= WL12XX_MAX_RATE_POLICIES)
+ return -EBUSY;
+
+ __set_bit(policy, wl->rate_policies_map);
+ *idx = policy;
+ return 0;
+}
+
+static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
+{
+ if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
+ return;
+
+ __clear_bit(*idx, wl->rate_policies_map);
+ *idx = WL12XX_MAX_RATE_POLICIES;
+}
+
+static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
+{
+ switch (wlvif->bss_type) {
case BSS_TYPE_AP_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_GO;
else
return WL1271_ROLE_AP;
case BSS_TYPE_STA_BSS:
- if (wl->p2p)
+ if (wlvif->p2p)
return WL1271_ROLE_P2P_CL;
else
return WL1271_ROLE_STA;
@@ -1832,78 +1894,95 @@ static u8 wl12xx_get_role_type(struct wl1271 *wl)
return WL1271_ROLE_IBSS;
default:
- wl1271_error("invalid bss_type: %d", wl->bss_type);
+ wl1271_error("invalid bss_type: %d", wlvif->bss_type);
}
return WL12XX_INVALID_ROLE_TYPE;
}
-static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
- struct wl1271 *wl = hw->priv;
- struct wiphy *wiphy = hw->wiphy;
- int retries = WL1271_BOOT_RETRIES;
- int ret = 0;
- u8 role_type;
- bool booted = false;
-
- wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- ieee80211_vif_type_p2p(vif), vif->addr);
-
- mutex_lock(&wl->mutex);
- if (wl->vif) {
- wl1271_debug(DEBUG_MAC80211,
- "multiple vifs are not supported yet");
- ret = -EBUSY;
- goto out;
- }
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i;
- /*
- * in some very corner case HW recovery scenarios its possible to
- * get here before __wl1271_op_remove_interface is complete, so
- * opt out if that is the case.
- */
- if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) {
- ret = -EBUSY;
- goto out;
- }
+ /* clear everything but the persistent data */
+ memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_CLIENT:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_STATION:
- wl->bss_type = BSS_TYPE_STA_BSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
- wl->bss_type = BSS_TYPE_IBSS;
- wl->set_bss_type = BSS_TYPE_STA_BSS;
+ wlvif->bss_type = BSS_TYPE_IBSS;
break;
case NL80211_IFTYPE_P2P_GO:
- wl->p2p = 1;
+ wlvif->p2p = 1;
/* fall-through */
case NL80211_IFTYPE_AP:
- wl->bss_type = BSS_TYPE_AP_BSS;
+ wlvif->bss_type = BSS_TYPE_AP_BSS;
break;
default:
- ret = -EOPNOTSUPP;
- goto out;
+ wlvif->bss_type = MAX_BSS_TYPE;
+ return -EOPNOTSUPP;
}
- role_type = wl12xx_get_role_type(wl);
- if (role_type == WL12XX_INVALID_ROLE_TYPE) {
- ret = -EINVAL;
- goto out;
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /* init sta/ibss data */
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ /* init ap data */
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_allocate_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
}
- memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
- if (wl->state != WL1271_STATE_OFF) {
- wl1271_error("cannot start because not in off state: %d",
- wl->state);
- ret = -EBUSY;
- goto out;
- }
+ wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
+ wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
+ wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
+ wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
+
+ /*
+ * mac80211 configures some values globally, while we treat them
+ * per-interface. thus, on init, we have to copy them from wl
+ */
+ wlvif->band = wl->band;
+ wlvif->channel = wl->channel;
+ wlvif->power_level = wl->power_level;
+
+ INIT_WORK(&wlvif->rx_streaming_enable_work,
+ wl1271_rx_streaming_enable_work);
+ INIT_WORK(&wlvif->rx_streaming_disable_work,
+ wl1271_rx_streaming_disable_work);
+ INIT_DELAYED_WORK(&wlvif->pspoll_work, wl1271_pspoll_work);
+ INIT_LIST_HEAD(&wlvif->list);
+
+ setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
+ (unsigned long) wlvif);
+ return 0;
+}
+
+static bool wl12xx_init_fw(struct wl1271 *wl)
+{
+ int retries = WL1271_BOOT_RETRIES;
+ bool booted = false;
+ struct wiphy *wiphy = wl->hw->wiphy;
+ int ret;
while (retries) {
retries--;
@@ -1915,25 +1994,6 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
if (ret < 0)
goto power_off;
- if (wl->bss_type == BSS_TYPE_STA_BSS ||
- wl->bss_type == BSS_TYPE_IBSS) {
- /*
- * The device role is a special role used for
- * rx and tx frames prior to association (as
- * the STA role can get packets only from
- * its associated bssid)
- */
- ret = wl12xx_cmd_role_enable(wl,
- WL1271_ROLE_DEVICE,
- &wl->dev_role_id);
- if (ret < 0)
- goto irq_disable;
- }
-
- ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id);
- if (ret < 0)
- goto irq_disable;
-
ret = wl1271_hw_init(wl);
if (ret < 0)
goto irq_disable;
@@ -1964,9 +2024,6 @@ power_off:
goto out;
}
- wl->vif = vif;
- wl->state = WL1271_STATE_ON;
- set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags);
wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
@@ -1984,7 +2041,115 @@ power_off:
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
+ wl->state = WL1271_STATE_ON;
+out:
+ return booted;
+}
+
+static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
+{
+ return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
+}
+
+static int wl1271_op_add_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int ret = 0;
+ u8 role_type;
+ bool booted = false;
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
+ ieee80211_vif_type_p2p(vif), vif->addr);
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out_unlock;
+
+ if (wl->vif) {
+ wl1271_debug(DEBUG_MAC80211,
+ "multiple vifs are not supported yet");
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /*
+ * in some very corner case HW recovery scenarios its possible to
+ * get here before __wl1271_op_remove_interface is complete, so
+ * opt out if that is the case.
+ */
+ if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
+ test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ ret = wl12xx_init_vif_data(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wlvif->wl = wl;
+ role_type = wl12xx_get_role_type(wl, wlvif);
+ if (role_type == WL12XX_INVALID_ROLE_TYPE) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /*
+ * TODO: after the nvs issue will be solved, move this block
+ * to start(), and make sure here the driver is ON.
+ */
+ if (wl->state == WL1271_STATE_OFF) {
+ /*
+ * we still need this in order to configure the fw
+ * while uploading the nvs
+ */
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
+
+ booted = wl12xx_init_fw(wl);
+ if (!booted) {
+ ret = -EINVAL;
+ goto out;
+ }
+ }
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ /*
+ * The device role is a special role used for
+ * rx and tx frames prior to association (as
+ * the STA role can get packets only from
+ * its associated bssid)
+ */
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ WL1271_ROLE_DEVICE,
+ &wlvif->dev_role_id);
+ if (ret < 0)
+ goto out;
+ }
+
+ ret = wl12xx_cmd_role_enable(wl, vif->addr,
+ role_type, &wlvif->role_id);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_init_vif_specific(wl, vif);
+ if (ret < 0)
+ goto out;
+
+ wl->vif = vif;
+ list_add(&wlvif->list, &wl->wlvif_list);
+ set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count++;
+ else
+ wl->sta_count++;
out:
+ wl1271_ps_elp_sleep(wl);
+out_unlock:
mutex_unlock(&wl->mutex);
mutex_lock(&wl_list_mutex);
@@ -1996,29 +2161,34 @@ out:
}
static void __wl1271_op_remove_interface(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
bool reset_tx_queues)
{
- int ret, i;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ int i, ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
+ if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ return;
+
+ wl->vif = NULL;
+
/* because of hardware recovery, we may get here twice */
if (wl->state != WL1271_STATE_ON)
return;
wl1271_info("down");
- mutex_lock(&wl_list_mutex);
- list_del(&wl->list);
- mutex_unlock(&wl_list_mutex);
-
/* enable dyn ps just in case (if left on due to fw crash etc) */
- if (wl->bss_type == BSS_TYPE_STA_BSS)
- ieee80211_enable_dyn_ps(wl->vif);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS)
+ ieee80211_enable_dyn_ps(vif);
- if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
+ if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
+ wl->scan_vif == vif) {
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
}
@@ -2029,13 +2199,17 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
if (ret < 0)
goto deinit;
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id);
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ if (wl12xx_dev_role_started(wlvif))
+ wl12xx_stop_dev(wl, wlvif);
+
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
if (ret < 0)
goto deinit;
}
- ret = wl12xx_cmd_role_disable(wl, &wl->role_id);
+ ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
if (ret < 0)
goto deinit;
@@ -2043,120 +2217,93 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
}
deinit:
/* clear all hlids (except system_hlid) */
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
+ wlvif->bss_type == BSS_TYPE_IBSS) {
+ wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
+ } else {
+ wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
+ wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
+ wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
+ wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
+ for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
+ wl12xx_free_rate_policy(wl,
+ &wlvif->ap.ucast_rate_idx[i]);
+ }
- /*
- * this must be before the cancel_work calls below, so that the work
- * functions don't perform further work.
- */
- wl->state = WL1271_STATE_OFF;
+ wl12xx_tx_reset_wlvif(wl, wlvif);
+ wl1271_free_ap_keys(wl, wlvif);
+ if (wl->last_wlvif == wlvif)
+ wl->last_wlvif = NULL;
+ list_del(&wlvif->list);
+ memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
+ wlvif->role_id = WL12XX_INVALID_ROLE_ID;
+ wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
+
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl->ap_count--;
+ else
+ wl->sta_count--;
mutex_unlock(&wl->mutex);
-
- wl1271_disable_interrupts(wl);
- wl1271_flush_deferred_work(wl);
- cancel_delayed_work_sync(&wl->scan_complete_work);
- cancel_work_sync(&wl->netstack_work);
- cancel_work_sync(&wl->tx_work);
- del_timer_sync(&wl->rx_streaming_timer);
- cancel_work_sync(&wl->rx_streaming_enable_work);
- cancel_work_sync(&wl->rx_streaming_disable_work);
- cancel_delayed_work_sync(&wl->pspoll_work);
- cancel_delayed_work_sync(&wl->elp_work);
+ del_timer_sync(&wlvif->rx_streaming_timer);
+ cancel_work_sync(&wlvif->rx_streaming_enable_work);
+ cancel_work_sync(&wlvif->rx_streaming_disable_work);
+ cancel_delayed_work_sync(&wlvif->pspoll_work);
mutex_lock(&wl->mutex);
-
- /* let's notify MAC80211 about the remaining pending TX frames */
- wl1271_tx_reset(wl, reset_tx_queues);
- wl1271_power_off(wl);
-
- memset(wl->bssid, 0, ETH_ALEN);
- memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1);
- wl->ssid_len = 0;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->p2p = 0;
- wl->band = IEEE80211_BAND_2GHZ;
-
- wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
- wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->tx_blocks_available = 0;
- wl->tx_allocated_blocks = 0;
- wl->tx_results_count = 0;
- wl->tx_packets_count = 0;
- wl->time_offset = 0;
- wl->session_counter = 0;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
- wl->vif = NULL;
- wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl1271_free_ap_keys(wl);
- memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map));
- wl->ap_fw_ps_map = 0;
- wl->ap_ps_map = 0;
- wl->sched_scanning = false;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- memset(wl->roles_map, 0, sizeof(wl->roles_map));
- memset(wl->links_map, 0, sizeof(wl->links_map));
- memset(wl->roc_map, 0, sizeof(wl->roc_map));
- wl->active_sta_count = 0;
-
- /* The system link is always allocated */
- __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
-
- /*
- * this is performed after the cancel_work calls and the associated
- * mutex_lock, so that wl1271_op_add_interface does not accidentally
- * get executed before all these vars have been reset.
- */
- wl->flags = 0;
-
- wl->tx_blocks_freed = 0;
-
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- wl->tx_pkts_freed[i] = 0;
- wl->tx_allocated_pkts[i] = 0;
- }
-
- wl1271_debugfs_reset(wl);
-
- kfree(wl->fw_status);
- wl->fw_status = NULL;
- kfree(wl->tx_res_if);
- wl->tx_res_if = NULL;
- kfree(wl->target_mem_map);
- wl->target_mem_map = NULL;
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl12xx_vif *iter;
mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF ||
+ !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
+ goto out;
+
/*
* wl->vif can be null here if someone shuts down the interface
* just when hardware recovery has been started.
*/
- if (wl->vif) {
- WARN_ON(wl->vif != vif);
- __wl1271_op_remove_interface(wl, true);
- }
+ wl12xx_for_each_wlvif(wl, iter) {
+ if (iter != wlvif)
+ continue;
+ __wl1271_op_remove_interface(wl, vif, true);
+ break;
+ }
+ WARN_ON(iter != wlvif);
+out:
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->recovery_work);
}
-static int wl1271_join(struct wl1271 *wl, bool set_assoc)
+static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum nl80211_iftype new_type, bool p2p)
+{
+ wl1271_op_remove_interface(hw, vif);
+
+ vif->type = ieee80211_iftype_p2p(new_type, p2p);
+ vif->p2p = p2p;
+ return wl1271_op_add_interface(hw, vif);
+}
+
+static int wl1271_join(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool set_assoc)
{
int ret;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
/*
* One of the side effects of the JOIN command is that is clears
@@ -2167,20 +2314,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* Keep the below message for now, unless it starts bothering
* users who really like to roam a lot :)
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
wl1271_info("JOIN while associated.");
if (set_assoc)
- set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
+ set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
if (is_ibss)
- ret = wl12xx_cmd_role_start_ibss(wl);
+ ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
else
- ret = wl12xx_cmd_role_start_sta(wl);
+ ret = wl12xx_cmd_role_start_sta(wl, wlvif);
if (ret < 0)
goto out;
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
/*
@@ -2189,19 +2336,20 @@ static int wl1271_join(struct wl1271 *wl, bool set_assoc)
* the join. The acx_aid starts the keep-alive process, and the order
* of the commands below is relevant.
*/
- ret = wl1271_acx_keep_alive_mode(wl, true);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
if (ret < 0)
goto out;
- ret = wl1271_acx_aid(wl, wl->aid);
+ ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
- ret = wl1271_cmd_build_klv_null_data(wl);
+ ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ ret = wl1271_acx_keep_alive_config(wl, wlvif,
+ CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
goto out;
@@ -2210,72 +2358,63 @@ out:
return ret;
}
-static int wl1271_unjoin(struct wl1271 *wl)
+static int wl1271_unjoin(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
- if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) {
+ if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+
wl12xx_cmd_stop_channel_switch(wl);
- ieee80211_chswitch_done(wl->vif, false);
+ ieee80211_chswitch_done(vif, false);
}
/* to stop listening to a channel, we disconnect */
- ret = wl12xx_cmd_role_stop_sta(wl);
+ ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
if (ret < 0)
goto out;
- memset(wl->bssid, 0, ETH_ALEN);
-
/* reset TX security counters on a clean disconnect */
- wl->tx_security_last_seq_lsb = 0;
- wl->tx_security_seq = 0;
+ wlvif->tx_security_last_seq_lsb = 0;
+ wlvif->tx_security_seq = 0;
out:
return ret;
}
-static void wl1271_set_band_rate(struct wl1271 *wl)
+static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
- wl->basic_rate_set = wl->bitrate_masks[wl->band];
- wl->rate_set = wl->basic_rate_set;
+ wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
+ wlvif->rate_set = wlvif->basic_rate_set;
}
-static bool wl12xx_is_roc(struct wl1271 *wl)
-{
- u8 role_id;
-
- role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
- if (role_id >= WL12XX_MAX_ROLES)
- return false;
-
- return true;
-}
-
-static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
+static int wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ bool idle)
{
int ret;
+ bool cur_idle = !test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
+
+ if (idle == cur_idle)
+ return 0;
if (idle) {
/* no need to croc if we weren't busy (e.g. during boot) */
- if (wl12xx_is_roc(wl)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ if (wl12xx_dev_role_started(wlvif)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
- wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
ret = wl1271_acx_keep_alive_config(
- wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
+ wl, wlvif, CMD_TEMPL_KLV_IDX_NULL_DATA,
ACX_KEEP_ALIVE_TPL_INVALID);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_IDLE, &wl->flags);
+ clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
} else {
/* The current firmware only supports sched_scan in idle */
if (wl->sched_scanning) {
@@ -2283,75 +2422,32 @@ static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle)
ieee80211_sched_scan_stopped(wl->hw);
}
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_IDLE, &wl->flags);
+ set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
}
out:
return ret;
}
-static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct ieee80211_conf *conf, u32 changed)
{
- struct wl1271 *wl = hw->priv;
- struct ieee80211_conf *conf = &hw->conf;
- int channel, ret = 0;
- bool is_ap;
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
+ int channel, ret;
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
- " changed 0x%x",
- channel,
- conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level,
- conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
- changed);
-
- /*
- * mac80211 will go to idle nearly immediately after transmitting some
- * frames, such as the deauth. To make sure those frames reach the air,
- * wait here until the TX queue is fully flushed.
- */
- if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
- (conf->flags & IEEE80211_CONF_IDLE))
- wl1271_tx_flush(wl);
-
- mutex_lock(&wl->mutex);
-
- if (unlikely(wl->state == WL1271_STATE_OFF)) {
- /* we support configuring the channel and band while off */
- if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
- wl->band = conf->channel->band;
- wl->channel = channel;
- }
-
- if ((changed & IEEE80211_CONF_CHANGE_POWER))
- wl->power_level = conf->power_level;
-
- goto out;
- }
-
- is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
-
- ret = wl1271_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
/* if the channel changes while joined, join again */
if (changed & IEEE80211_CONF_CHANGE_CHANNEL &&
- ((wl->band != conf->channel->band) ||
- (wl->channel != channel))) {
+ ((wlvif->band != conf->channel->band) ||
+ (wlvif->channel != channel))) {
/* send all pending packets */
wl1271_tx_work_locked(wl);
- wl->band = conf->channel->band;
- wl->channel = channel;
+ wlvif->band = conf->channel->band;
+ wlvif->channel = channel;
if (!is_ap) {
/*
@@ -2360,24 +2456,27 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* possible rate for the band as a fixed rate for
* association frames and other control messages.
*/
- if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
- wl1271_set_band_rate(wl);
+ if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
+ wl1271_set_band_rate(wl, wlvif);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
wl1271_warning("rate policy for channel "
"failed %d", ret);
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- if (wl12xx_is_roc(wl)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags)) {
+ if (wl12xx_dev_role_started(wlvif)) {
/* roaming */
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
- goto out_sleep;
+ return ret;
}
- ret = wl1271_join(wl, false);
+ ret = wl1271_join(wl, wlvif, false);
if (ret < 0)
wl1271_warning("cmd join on channel "
"failed %d", ret);
@@ -2387,66 +2486,114 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
* not idle. otherwise, CROC will be called
* anyway.
*/
- if (wl12xx_is_roc(wl) &&
+ if (wl12xx_dev_role_started(wlvif) &&
!(conf->flags & IEEE80211_CONF_IDLE)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
- goto out_sleep;
+ return ret;
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
- wl1271_warning("roc failed %d",
- ret);
+ return ret;
}
}
}
}
- if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) {
- ret = wl1271_sta_handle_idle(wl,
- conf->flags & IEEE80211_CONF_IDLE);
- if (ret < 0)
- wl1271_warning("idle mode change failed %d", ret);
- }
-
/*
* if mac80211 changes the PSM mode, make sure the mode is not
* incorrectly changed after the pspoll failure active window.
*/
if (changed & IEEE80211_CONF_CHANGE_PS)
- clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSPOLL_FAILURE, &wlvif->flags);
if (conf->flags & IEEE80211_CONF_PS &&
- !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
- set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ !test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
+ set_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm enabled");
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
- wl->basic_rate, true);
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_POWER_SAVE_MODE,
+ wlvif->basic_rate, true);
}
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "psm disabled");
- clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags);
- if (test_bit(WL1271_FLAG_PSM, &wl->flags))
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
- wl->basic_rate, true);
+ if (test_bit(WLVIF_FLAG_PSM, &wlvif->flags))
+ ret = wl1271_ps_set_mode(wl, wlvif,
+ STATION_ACTIVE_MODE,
+ wlvif->basic_rate, true);
}
- if (conf->power_level != wl->power_level) {
- ret = wl1271_acx_tx_power(wl, conf->power_level);
+ if (conf->power_level != wlvif->power_level) {
+ ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
if (ret < 0)
- goto out_sleep;
+ return ret;
+
+ wlvif->power_level = conf->power_level;
+ }
+ return 0;
+}
+
+static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
+{
+ struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_conf *conf = &hw->conf;
+ int channel, ret = 0;
+
+ channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s"
+ " changed 0x%x",
+ channel,
+ conf->flags & IEEE80211_CONF_PS ? "on" : "off",
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
+ changed);
+
+ /*
+ * mac80211 will go to idle nearly immediately after transmitting some
+ * frames, such as the deauth. To make sure those frames reach the air,
+ * wait here until the TX queue is fully flushed.
+ */
+ if ((changed & IEEE80211_CONF_CHANGE_IDLE) &&
+ (conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_tx_flush(wl);
+
+ mutex_lock(&wl->mutex);
+
+ /* we support configuring the channel and band even while off */
+ if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
+ wl->band = conf->channel->band;
+ wl->channel = channel;
+ }
+
+ if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* configure each interface */
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl12xx_config_vif(wl, wlvif, conf, changed);
+ if (ret < 0)
+ goto out_sleep;
}
out_sleep:
@@ -2509,6 +2656,8 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
{
struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
+
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
@@ -2526,15 +2675,20 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
- if (*total & FIF_ALLMULTI)
- ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
- else if (fp)
- ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
- fp->mc_list,
- fp->mc_list_length);
- if (ret < 0)
- goto out_sleep;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
+ if (*total & FIF_ALLMULTI)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ false,
+ NULL, 0);
+ else if (fp)
+ ret = wl1271_acx_group_address_tbl(wl, wlvif,
+ fp->enabled,
+ fp->mc_list,
+ fp->mc_list_length);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
/*
@@ -2551,9 +2705,10 @@ out:
kfree(fp);
}
-static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
- u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
- u16 tx_seq_16)
+static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 id, u8 key_type, u8 key_size,
+ const u8 *key, u8 hlid, u32 tx_seq_32,
+ u16 tx_seq_16)
{
struct wl1271_ap_key *ap_key;
int i;
@@ -2568,10 +2723,10 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
* an existing key.
*/
for (i = 0; i < MAX_NUM_KEYS; i++) {
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- if (wl->recorded_ap_keys[i]->id == id) {
+ if (wlvif->ap.recorded_keys[i]->id == id) {
wl1271_warning("trying to record key replacement");
return -EINVAL;
}
@@ -2592,21 +2747,21 @@ static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type,
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
- wl->recorded_ap_keys[i] = ap_key;
+ wlvif->ap.recorded_keys[i] = ap_key;
return 0;
}
-static void wl1271_free_ap_keys(struct wl1271 *wl)
+static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
for (i = 0; i < MAX_NUM_KEYS; i++) {
- kfree(wl->recorded_ap_keys[i]);
- wl->recorded_ap_keys[i] = NULL;
+ kfree(wlvif->ap.recorded_keys[i]);
+ wlvif->ap.recorded_keys[i] = NULL;
}
}
-static int wl1271_ap_init_hwenc(struct wl1271 *wl)
+static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret = 0;
struct wl1271_ap_key *key;
@@ -2614,15 +2769,15 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
for (i = 0; i < MAX_NUM_KEYS; i++) {
u8 hlid;
- if (wl->recorded_ap_keys[i] == NULL)
+ if (wlvif->ap.recorded_keys[i] == NULL)
break;
- key = wl->recorded_ap_keys[i];
+ key = wlvif->ap.recorded_keys[i];
hlid = key->hlid;
if (hlid == WL12XX_INVALID_LINK_ID)
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
- ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
@@ -2635,23 +2790,24 @@ static int wl1271_ap_init_hwenc(struct wl1271 *wl)
}
if (wep_key_added) {
- ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key,
- wl->ap_bcast_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
+ wlvif->ap.bcast_hlid);
if (ret < 0)
goto out;
}
out:
- wl1271_free_ap_keys(wl);
+ wl1271_free_ap_keys(wl, wlvif);
return ret;
}
-static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
+static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
u16 tx_seq_16, struct ieee80211_sta *sta)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap) {
struct wl1271_station *wl_sta;
@@ -2661,10 +2817,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
- hlid = wl->ap_bcast_hlid;
+ hlid = wlvif->ap.bcast_hlid;
}
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
/*
* We do not support removing keys after AP shutdown.
* Pretend we do to make mac80211 happy.
@@ -2672,12 +2828,12 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
if (action != KEY_ADD_OR_REPLACE)
return 0;
- ret = wl1271_record_ap_key(wl, id,
+ ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
} else {
- ret = wl1271_cmd_set_ap_key(wl, action,
+ ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16);
@@ -2718,10 +2874,10 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* don't remove key if hlid was already deleted */
if (action == KEY_REMOVE &&
- wl->sta_hlid == WL12XX_INVALID_LINK_ID)
+ wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
- ret = wl1271_cmd_set_sta_key(wl, action,
+ ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
id, key_type, key_size,
key, addr, tx_seq_32,
tx_seq_16);
@@ -2731,8 +2887,8 @@ static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
/* the default WEP key needs to be configured at least once */
if (key_type == KEY_WEP) {
ret = wl12xx_cmd_set_default_wep_key(wl,
- wl->default_key,
- wl->sta_hlid);
+ wlvif->default_key,
+ wlvif->sta.hlid);
if (ret < 0)
return ret;
}
@@ -2747,6 +2903,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
@@ -2782,20 +2939,20 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = KEY_AES;
- key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
- tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
- tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
+ tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
+ tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
@@ -2806,7 +2963,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
switch (cmd) {
case SET_KEY:
- ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE,
+ ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
tx_seq_32, tx_seq_16, sta);
@@ -2817,7 +2974,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
break;
case DISABLE_KEY:
- ret = wl1271_set_key(wl, KEY_REMOVE,
+ ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
0, 0, sta);
@@ -2847,6 +3004,8 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+
int ret;
u8 *ssid = NULL;
size_t len = 0;
@@ -2874,18 +3033,18 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- /* cancel ROC before scanning */
- if (wl12xx_is_roc(wl)) {
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
- /* don't allow scanning right now */
- ret = -EBUSY;
- goto out_sleep;
- }
- wl12xx_croc(wl, wl->dev_role_id);
- wl12xx_cmd_role_stop_dev(wl);
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
+ test_bit(wlvif->role_id, wl->roc_map)) {
+ /* don't allow scanning right now */
+ ret = -EBUSY;
+ goto out_sleep;
}
- ret = wl1271_scan(hw->priv, ssid, len, req);
+ /* cancel ROC before scanning */
+ if (wl12xx_dev_role_started(wlvif))
+ wl12xx_stop_dev(wl, wlvif);
+
+ ret = wl1271_scan(hw->priv, vif, ssid, len, req);
out_sleep:
wl1271_ps_elp_sleep(wl);
out:
@@ -2921,6 +3080,7 @@ static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
}
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
+ wl->scan_vif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, true);
@@ -2938,6 +3098,7 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_sched_scan_ies *ies)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
@@ -2948,11 +3109,11 @@ static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
- ret = wl1271_scan_sched_scan_config(wl, req, ies);
+ ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
if (ret < 0)
goto out_sleep;
- ret = wl1271_scan_sched_scan_start(wl);
+ ret = wl1271_scan_sched_scan_start(wl, wlvif);
if (ret < 0)
goto out_sleep;
@@ -3017,6 +3178,7 @@ out:
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret = 0;
mutex_lock(&wl->mutex);
@@ -3030,10 +3192,11 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
if (ret < 0)
goto out;
- ret = wl1271_acx_rts_threshold(wl, value);
- if (ret < 0)
- wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret);
-
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ ret = wl1271_acx_rts_threshold(wl, wlvif, value);
+ if (ret < 0)
+ wl1271_warning("set rts threshold failed: %d", ret);
+ }
wl1271_ps_elp_sleep(wl);
out:
@@ -3042,9 +3205,10 @@ out:
return ret;
}
-static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
+static int wl1271_ssid_set(struct ieee80211_vif *vif, struct sk_buff *skb,
int offset)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ssid_len;
const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
skb->len - offset);
@@ -3060,8 +3224,8 @@ static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
return -EINVAL;
}
- wl->ssid_len = ssid_len;
- memcpy(wl->ssid, ptr+2, ssid_len);
+ wlvif->ssid_len = ssid_len;
+ memcpy(wlvif->ssid, ptr+2, ssid_len);
return 0;
}
@@ -3096,18 +3260,40 @@ static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
skb_trim(skb, skb->len - len);
}
-static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
- u8 *probe_rsp_data,
- size_t probe_rsp_len,
- u32 rates)
+static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
+ struct ieee80211_vif *vif)
{
- struct ieee80211_bss_conf *bss_conf = &wl->vif->bss_conf;
+ struct sk_buff *skb;
+ int ret;
+
+ skb = ieee80211_proberesp_get(wl->hw, vif);
+ if (!skb)
+ return -EOPNOTSUPP;
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_AP_PROBE_RESPONSE,
+ skb->data,
+ skb->len, 0,
+ rates);
+
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
+ u8 *probe_rsp_data,
+ size_t probe_rsp_len,
+ u32 rates)
+{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
int ssid_ie_offset, ie_offset, templ_len;
const u8 *ptr;
/* no need to change probe response if the SSID is set correctly */
- if (wl->ssid_len > 0)
+ if (wlvif->ssid_len > 0)
return wl1271_cmd_template_set(wl,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_data,
@@ -3153,16 +3339,18 @@ static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl,
}
static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
+ struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
- ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
else
- ret = wl1271_acx_slot(wl, SLOT_TIME_LONG);
+ ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
if (ret < 0) {
wl1271_warning("Set slot time failed %d", ret);
goto out;
@@ -3171,16 +3359,18 @@ static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
else
- wl1271_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
+ wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (bss_conf->use_cts_prot)
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_ENABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_ENABLE);
else
- ret = wl1271_acx_cts_protect(wl, CTSPROTECT_DISABLE);
+ ret = wl1271_acx_cts_protect(wl, wlvif,
+ CTSPROTECT_DISABLE);
if (ret < 0) {
wl1271_warning("Set ctsprotect failed %d", ret);
goto out;
@@ -3196,14 +3386,23 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret = 0;
if ((changed & BSS_CHANGED_BEACON_INT)) {
wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
- wl->beacon_int = bss_conf->beacon_int;
+ wlvif->beacon_int = bss_conf->beacon_int;
+ }
+
+ if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
+ u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ if (!wl1271_ap_set_probe_resp_tmpl(wl, rate, vif)) {
+ wl1271_debug(DEBUG_AP, "probe response updated");
+ set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
+ }
}
if ((changed & BSS_CHANGED_BEACON)) {
@@ -3214,17 +3413,19 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
u16 tmpl_id;
- if (!beacon)
+ if (!beacon) {
+ ret = -EINVAL;
goto out;
+ }
wl1271_debug(DEBUG_MASTER, "beacon updated");
- ret = wl1271_ssid_set(wl, beacon, ieoffset);
+ ret = wl1271_ssid_set(vif, beacon, ieoffset);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
}
- min_rate = wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
CMD_TEMPL_BEACON;
ret = wl1271_cmd_template_set(wl, tmpl_id,
@@ -3236,6 +3437,13 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
goto out;
}
+ /*
+ * In case we already have a probe-resp beacon set explicitly
+ * by usermode, don't use the beacon data.
+ */
+ if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
+ goto end_bcn;
+
/* remove TIM ie from probe response */
wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
@@ -3254,7 +3462,7 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
if (is_ap)
- ret = wl1271_ap_set_probe_resp_tmpl(wl,
+ ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
beacon->data,
beacon->len,
min_rate);
@@ -3264,12 +3472,15 @@ static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
beacon->data,
beacon->len, 0,
min_rate);
+end_bcn:
dev_kfree_skb(beacon);
if (ret < 0)
goto out;
}
out:
+ if (ret != 0)
+ wl1271_error("beacon info change failed: %d", ret);
return ret;
}
@@ -3279,23 +3490,24 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if ((changed & BSS_CHANGED_BASIC_RATES)) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate = wl1271_tx_min_rate_get(wl,
- wl->basic_rate_set);
+ wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
+ wlvif->band);
+ wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
- ret = wl1271_init_ap_rates(wl);
+ ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0) {
wl1271_error("AP rate policy change failed %d", ret);
goto out;
}
- ret = wl1271_ap_init_templates(wl);
+ ret = wl1271_ap_init_templates(wl, vif);
if (ret < 0)
goto out;
}
@@ -3306,38 +3518,40 @@ static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
if ((changed & BSS_CHANGED_BEACON_ENABLED)) {
if (bss_conf->enable_beacon) {
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_start_ap(wl);
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_start_ap(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_ap_init_hwenc(wl);
+ ret = wl1271_ap_init_hwenc(wl, wlvif);
if (ret < 0)
goto out;
- set_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
wl1271_debug(DEBUG_AP, "started AP");
}
} else {
- if (test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) {
- ret = wl12xx_cmd_role_stop_ap(wl);
+ if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
+ ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
- clear_bit(WL1271_FLAG_AP_STARTED, &wl->flags);
+ clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
+ clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
+ &wlvif->flags);
wl1271_debug(DEBUG_AP, "stopped AP");
}
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3355,8 +3569,9 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool do_join = false, set_assoc = false;
- bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
+ bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
bool ibss_joined = false;
u32 sta_rate_set = 0;
int ret;
@@ -3373,14 +3588,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
if (changed & BSS_CHANGED_IBSS) {
if (bss_conf->ibss_joined) {
- set_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags);
+ set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
- if (test_and_clear_bit(WL1271_FLAG_IBSS_JOINED,
- &wl->flags)) {
- wl1271_unjoin(wl);
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ if (test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED,
+ &wlvif->flags)) {
+ wl1271_unjoin(wl, wlvif);
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3396,46 +3610,40 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
- if (bss_conf->enable_beacon)
- wl->set_bss_type = BSS_TYPE_IBSS;
- else
- wl->set_bss_type = BSS_TYPE_STA_BSS;
do_join = true;
}
+ if (changed & BSS_CHANGED_IDLE) {
+ ret = wl1271_sta_handle_idle(wl, wlvif, bss_conf->idle);
+ if (ret < 0)
+ wl1271_warning("idle mode change failed %d", ret);
+ }
+
if ((changed & BSS_CHANGED_CQM)) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
- ret = wl1271_acx_rssi_snr_trigger(wl, enable,
+ ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
bss_conf->cqm_rssi_thold,
bss_conf->cqm_rssi_hyst);
if (ret < 0)
goto out;
- wl->rssi_thold = bss_conf->cqm_rssi_thold;
+ wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
- if ((changed & BSS_CHANGED_BSSID) &&
- /*
- * Now we know the correct bssid, so we send a new join command
- * and enable the BSSID filter
- */
- memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
- memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
-
- if (!is_zero_ether_addr(wl->bssid)) {
- ret = wl1271_cmd_build_null_data(wl);
+ if (changed & BSS_CHANGED_BSSID)
+ if (!is_zero_ether_addr(bss_conf->bssid)) {
+ ret = wl12xx_cmd_build_null_data(wl, wlvif);
if (ret < 0)
goto out;
- ret = wl1271_build_qos_null_data(wl);
+ ret = wl1271_build_qos_null_data(wl, vif);
if (ret < 0)
goto out;
/* Need to update the BSSID (for filtering etc) */
do_join = true;
}
- }
if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_HT)) {
rcu_read_lock();
@@ -3459,26 +3667,28 @@ sta_not_found:
if (bss_conf->assoc) {
u32 rates;
int ieoffset;
- wl->aid = bss_conf->aid;
+ wlvif->aid = bss_conf->aid;
set_assoc = true;
- wl->ps_poll_failures = 0;
+ wlvif->ps_poll_failures = 0;
/*
* use basic rates from AP, and determine lowest rate
* to use with control frames.
*/
rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
if (sta_rate_set)
- wl->rate_set = wl1271_tx_enabled_rates_get(wl,
+ wlvif->rate_set =
+ wl1271_tx_enabled_rates_get(wl,
sta_rate_set,
- wl->band);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->band);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
@@ -3488,53 +3698,56 @@ sta_not_found:
* updates it by itself when the first beacon is
* received after a join.
*/
- ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
+ ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
if (ret < 0)
goto out;
/*
* Get a template for hardware connection maintenance
*/
- dev_kfree_skb(wl->probereq);
- wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
+ wlvif,
+ NULL);
ieoffset = offsetof(struct ieee80211_mgmt,
u.probe_req.variable);
- wl1271_ssid_set(wl, wl->probereq, ieoffset);
+ wl1271_ssid_set(vif, wlvif->probereq, ieoffset);
/* enable the connection monitoring feature */
- ret = wl1271_acx_conn_monit_params(wl, true);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
if (ret < 0)
goto out;
} else {
/* use defaults when not associated */
bool was_assoc =
- !!test_and_clear_bit(WL1271_FLAG_STA_ASSOCIATED,
- &wl->flags);
+ !!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED,
+ &wlvif->flags);
bool was_ifup =
- !!test_and_clear_bit(WL1271_FLAG_STA_STATE_SENT,
- &wl->flags);
- wl->aid = 0;
+ !!test_and_clear_bit(WLVIF_FLAG_STA_STATE_SENT,
+ &wlvif->flags);
+ wlvif->aid = 0;
/* free probe-request template */
- dev_kfree_skb(wl->probereq);
- wl->probereq = NULL;
+ dev_kfree_skb(wlvif->probereq);
+ wlvif->probereq = NULL;
/* re-enable dynamic ps - just in case */
- ieee80211_enable_dyn_ps(wl->vif);
+ ieee80211_enable_dyn_ps(vif);
/* revert back to minimum rates for the current band */
- wl1271_set_band_rate(wl);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
- ret = wl1271_acx_sta_rate_policies(wl);
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
/* disable connection monitor features */
- ret = wl1271_acx_conn_monit_params(wl, false);
+ ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
/* Disable the keep-alive feature */
- ret = wl1271_acx_keep_alive_mode(wl, false);
+ ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
goto out;
@@ -3546,7 +3759,7 @@ sta_not_found:
* no IF_OPER_UP notification.
*/
if (!was_ifup) {
- ret = wl12xx_croc(wl, wl->role_id);
+ ret = wl12xx_croc(wl, wlvif->role_id);
if (ret < 0)
goto out;
}
@@ -3555,17 +3768,16 @@ sta_not_found:
* roaming on the same channel. until we will
* have a better flow...)
*/
- if (test_bit(wl->dev_role_id, wl->roc_map)) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
+ if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
+ ret = wl12xx_croc(wl,
+ wlvif->dev_role_id);
if (ret < 0)
goto out;
}
- wl1271_unjoin(wl);
- if (!(conf_flags & IEEE80211_CONF_IDLE)) {
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
- }
+ wl1271_unjoin(wl, wlvif);
+ if (!(conf_flags & IEEE80211_CONF_IDLE))
+ wl12xx_start_dev(wl, wlvif);
}
}
}
@@ -3576,27 +3788,28 @@ sta_not_found:
if (bss_conf->ibss_joined) {
u32 rates = bss_conf->basic_rates;
- wl->basic_rate_set =
+ wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
- wl->band);
- wl->basic_rate =
- wl1271_tx_min_rate_get(wl, wl->basic_rate_set);
+ wlvif->band);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl,
+ wlvif->basic_rate_set);
/* by default, use 11b + OFDM rates */
- wl->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
- ret = wl1271_acx_sta_rate_policies(wl);
+ wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
}
}
- ret = wl1271_bss_erp_info_changed(wl, bss_conf, changed);
+ ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = bss_conf->arp_addr_list[0];
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
+ WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
if (bss_conf->arp_addr_cnt == 1 &&
bss_conf->arp_filter_enabled) {
@@ -3606,24 +3819,24 @@ sta_not_found:
* isn't being set (when sending), so we have to
* reconfigure the template upon every ip change.
*/
- ret = wl1271_cmd_build_arp_rsp(wl, addr);
+ ret = wl1271_cmd_build_arp_rsp(wl, wlvif, addr);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
goto out;
}
- ret = wl1271_acx_arp_ip_filter(wl,
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif,
ACX_ARP_FILTER_ARP_FILTERING,
addr);
} else
- ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
+ ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
if (ret < 0)
goto out;
}
if (do_join) {
- ret = wl1271_join(wl, set_assoc);
+ ret = wl1271_join(wl, wlvif, set_assoc);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out;
@@ -3631,35 +3844,31 @@ sta_not_found:
/* ROC until connected (after EAPOL exchange) */
if (!is_ibss) {
- ret = wl12xx_roc(wl, wl->role_id);
+ ret = wl12xx_roc(wl, wlvif, wlvif->role_id);
if (ret < 0)
goto out;
- wl1271_check_operstate(wl,
+ wl1271_check_operstate(wl, wlvif,
ieee80211_get_operstate(vif));
}
/*
* stop device role if started (we might already be in
- * STA role). TODO: make it better.
+ * STA/IBSS role).
*/
- if (wl->dev_role_id != WL12XX_INVALID_ROLE_ID) {
- ret = wl12xx_croc(wl, wl->dev_role_id);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_cmd_role_stop_dev(wl);
+ if (wl12xx_dev_role_started(wlvif)) {
+ ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
}
/* If we want to go in PSM but we're not there yet */
- if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
- !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_PSM_REQUESTED, &wlvif->flags) &&
+ !test_bit(WLVIF_FLAG_PSM, &wlvif->flags)) {
enum wl1271_cmd_ps_mode mode;
mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, mode,
- wl->basic_rate,
+ ret = wl1271_ps_set_mode(wl, wlvif, mode,
+ wlvif->basic_rate,
true);
if (ret < 0)
goto out;
@@ -3673,7 +3882,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
true,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap true failed %d",
ret);
@@ -3685,7 +3894,7 @@ sta_not_found:
ret = wl1271_acx_set_ht_capabilities(wl,
&sta_ht_cap,
false,
- wl->sta_hlid);
+ wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap false failed %d",
ret);
@@ -3697,7 +3906,7 @@ sta_not_found:
/* Handle HT information change. Done after join. */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
- ret = wl1271_acx_set_ht_information(wl,
+ ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
@@ -3715,7 +3924,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
u32 changed)
{
struct wl1271 *wl = hw->priv;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed 0x%x",
@@ -3726,6 +3936,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
+ goto out;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
@@ -3746,6 +3959,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ps_scheme;
int ret = 0;
@@ -3758,31 +3972,8 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
else
ps_scheme = CONF_PS_SCHEME_LEGACY;
- if (wl->state == WL1271_STATE_OFF) {
- /*
- * If the state is off, the parameters will be recorded and
- * configured on init. This happens in AP-mode.
- */
- struct conf_tx_ac_category *conf_ac =
- &wl->conf.tx.ac_conf[wl1271_tx_get_queue(queue)];
- struct conf_tx_tid *conf_tid =
- &wl->conf.tx.tid_conf[wl1271_tx_get_queue(queue)];
-
- conf_ac->ac = wl1271_tx_get_queue(queue);
- conf_ac->cw_min = (u8)params->cw_min;
- conf_ac->cw_max = params->cw_max;
- conf_ac->aifsn = params->aifs;
- conf_ac->tx_op_limit = params->txop << 5;
-
- conf_tid->queue_id = wl1271_tx_get_queue(queue);
- conf_tid->channel_type = CONF_CHANNEL_TYPE_EDCF;
- conf_tid->tsid = wl1271_tx_get_queue(queue);
- conf_tid->ps_scheme = ps_scheme;
- conf_tid->ack_policy = CONF_ACK_POLICY_LEGACY;
- conf_tid->apsd_conf[0] = 0;
- conf_tid->apsd_conf[1] = 0;
+ if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
- }
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
@@ -3792,13 +3983,13 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
* the txop is confed in units of 32us by the mac80211,
* we need us
*/
- ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
- ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
ps_scheme, CONF_ACK_POLICY_LEGACY,
@@ -3861,43 +4052,43 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
}
static int wl1271_allocate_sta(struct wl1271 *wl,
- struct ieee80211_sta *sta,
- u8 *hlid)
+ struct wl12xx_vif *wlvif,
+ struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
- int id;
+ int ret;
- id = find_first_zero_bit(wl->ap_hlid_map, AP_MAX_STATIONS);
- if (id >= AP_MAX_STATIONS) {
+
+ if (wl->active_sta_count >= AP_MAX_STATIONS) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
wl_sta = (struct wl1271_station *)sta->drv_priv;
- set_bit(id, wl->ap_hlid_map);
- wl_sta->hlid = WL1271_AP_STA_HLID_START + id;
- *hlid = wl_sta->hlid;
+ ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
+ if (ret < 0) {
+ wl1271_warning("could not allocate HLID - too many links");
+ return -EBUSY;
+ }
+
+ set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
wl->active_sta_count++;
return 0;
}
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid)
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
- int id = hlid - WL1271_AP_STA_HLID_START;
-
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
return;
- if (!test_bit(id, wl->ap_hlid_map))
- return;
-
- clear_bit(id, wl->ap_hlid_map);
+ clear_bit(hlid, wlvif->ap.sta_hlid_map);
memset(wl->links[hlid].addr, 0, ETH_ALEN);
wl->links[hlid].ba_bitmap = 0;
wl1271_tx_reset_link_queues(wl, hlid);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
+ wl12xx_free_link(wl, wlvif, &hlid);
wl->active_sta_count--;
}
@@ -3906,6 +4097,8 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
+ struct wl1271_station *wl_sta;
int ret = 0;
u8 hlid;
@@ -3914,20 +4107,23 @@ static int wl1271_op_sta_add(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
- ret = wl1271_allocate_sta(wl, sta, &hlid);
+ ret = wl1271_allocate_sta(wl, wlvif, sta);
if (ret < 0)
goto out;
+ wl_sta = (struct wl1271_station *)sta->drv_priv;
+ hlid = wl_sta->hlid;
+
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out_free_sta;
- ret = wl12xx_cmd_add_peer(wl, sta, hlid);
+ ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
if (ret < 0)
goto out_sleep;
@@ -3944,7 +4140,7 @@ out_sleep:
out_free_sta:
if (ret < 0)
- wl1271_free_sta(wl, hlid);
+ wl1271_free_sta(wl, wlvif, hlid);
out:
mutex_unlock(&wl->mutex);
@@ -3956,6 +4152,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_station *wl_sta;
int ret = 0, id;
@@ -3964,14 +4161,14 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
wl_sta = (struct wl1271_station *)sta->drv_priv;
- id = wl_sta->hlid - WL1271_AP_STA_HLID_START;
- if (WARN_ON(!test_bit(id, wl->ap_hlid_map)))
+ id = wl_sta->hlid;
+ if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
goto out;
ret = wl1271_ps_elp_wakeup(wl);
@@ -3982,7 +4179,7 @@ static int wl1271_op_sta_remove(struct ieee80211_hw *hw,
if (ret < 0)
goto out_sleep;
- wl1271_free_sta(wl, wl_sta->hlid);
+ wl1271_free_sta(wl, wlvif, wl_sta->hlid);
out_sleep:
wl1271_ps_elp_sleep(wl);
@@ -3999,6 +4196,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
u8 buf_size)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u8 hlid, *ba_bitmap;
@@ -4016,10 +4214,10 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
goto out;
}
- if (wl->bss_type == BSS_TYPE_STA_BSS) {
- hlid = wl->sta_hlid;
- ba_bitmap = &wl->ba_rx_bitmap;
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
+ hlid = wlvif->sta.hlid;
+ ba_bitmap = &wlvif->sta.ba_rx_bitmap;
+ } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
@@ -4039,7 +4237,7 @@ static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (!wl->ba_support || !wl->ba_allowed) {
+ if (!wlvif->ba_support || !wlvif->ba_allowed) {
ret = -ENOTSUPP;
break;
}
@@ -4108,8 +4306,9 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
- int i;
+ int i, ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
mask->control[NL80211_BAND_2GHZ].legacy,
@@ -4118,19 +4317,39 @@ static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
- wl->bitrate_masks[i] =
+ wlvif->bitrate_masks[i] =
wl1271_tx_enabled_rates_get(wl,
mask->control[i].legacy,
i);
+
+ if (unlikely(wl->state == WL1271_STATE_OFF))
+ goto out;
+
+ if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
+ !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ wl1271_set_band_rate(wl, wlvif);
+ wlvif->basic_rate =
+ wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
+ ret = wl1271_acx_sta_rate_policies(wl, wlvif);
+
+ wl1271_ps_elp_sleep(wl);
+ }
+out:
mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
}
static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_channel_switch *ch_switch)
{
struct wl1271 *wl = hw->priv;
+ struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
@@ -4138,19 +4357,24 @@ static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF)) {
- mutex_unlock(&wl->mutex);
- ieee80211_chswitch_done(wl->vif, false);
- return;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
+ ieee80211_chswitch_done(vif, false);
+ }
+ goto out;
}
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- ret = wl12xx_cmd_channel_switch(wl, ch_switch);
+ /* TODO: change mac80211 to pass vif as param */
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ ret = wl12xx_cmd_channel_switch(wl, ch_switch);
- if (!ret)
- set_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags);
+ if (!ret)
+ set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
+ }
wl1271_ps_elp_sleep(wl);
@@ -4170,10 +4394,6 @@ static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
/* packets are considered pending if in the TX queue or the FW */
ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
-
- /* the above is appropriate for STA mode for PS purposes */
- WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
-
out:
mutex_unlock(&wl->mutex);
@@ -4410,6 +4630,7 @@ static const struct ieee80211_ops wl1271_ops = {
.stop = wl1271_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
+ .change_interface = wl12xx_op_change_interface,
#ifdef CONFIG_PM
.suspend = wl1271_op_suspend,
.resume = wl1271_op_resume,
@@ -4604,7 +4825,7 @@ static struct bin_attribute fwlog_attr = {
.read = wl1271_sysfs_read_fwlog,
};
-int wl1271_register_hw(struct wl1271 *wl)
+static int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
@@ -4645,9 +4866,8 @@ int wl1271_register_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_register_hw);
-void wl1271_unregister_hw(struct wl1271 *wl)
+static void wl1271_unregister_hw(struct wl1271 *wl)
{
if (wl->state == WL1271_STATE_PLT)
__wl1271_plt_stop(wl);
@@ -4657,9 +4877,8 @@ void wl1271_unregister_hw(struct wl1271 *wl)
wl->mac80211_registered = false;
}
-EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
-int wl1271_init_ieee80211(struct wl1271 *wl)
+static int wl1271_init_ieee80211(struct wl1271 *wl)
{
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
@@ -4736,27 +4955,33 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
- SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
+ /* the FW answers probe-requests in AP-mode */
+ wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
+ wl->hw->wiphy->probe_resp_offload =
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
+ NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
+
+ SET_IEEE80211_DEV(wl->hw, wl->dev);
wl->hw->sta_data_size = sizeof(struct wl1271_station);
+ wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
wl->hw->max_rx_aggregation_subframes = 8;
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
#define WL1271_DEFAULT_CHANNEL 0
-struct ieee80211_hw *wl1271_alloc_hw(void)
+static struct ieee80211_hw *wl1271_alloc_hw(void)
{
struct ieee80211_hw *hw;
- struct platform_device *plat_dev = NULL;
struct wl1271 *wl;
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_LINKS > WL12XX_MAX_LINKS);
+ BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
@@ -4765,41 +4990,26 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_hw_alloc;
}
- plat_dev = kmemdup(&wl1271_device, sizeof(wl1271_device), GFP_KERNEL);
- if (!plat_dev) {
- wl1271_error("could not allocate platform_device");
- ret = -ENOMEM;
- goto err_plat_alloc;
- }
-
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
INIT_LIST_HEAD(&wl->list);
+ INIT_LIST_HEAD(&wl->wlvif_list);
wl->hw = hw;
- wl->plat_dev = plat_dev;
-
- for (i = 0; i < NUM_TX_QUEUES; i++)
- skb_queue_head_init(&wl->tx_queue[i]);
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < AP_MAX_LINKS; j++)
+ for (j = 0; j < WL12XX_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
- INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
- INIT_WORK(&wl->rx_streaming_enable_work,
- wl1271_rx_streaming_enable_work);
- INIT_WORK(&wl->rx_streaming_disable_work,
- wl1271_rx_streaming_disable_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
@@ -4808,41 +5018,21 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
}
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
- wl->default_key = 0;
wl->rx_counter = 0;
- wl->psm_entry_retry = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
- wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
- wl->rate_set = CONF_TX_RATE_MASK_BASIC;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
wl->flags = 0;
wl->sg_enabled = true;
wl->hw_pg_ver = -1;
- wl->bss_type = MAX_BSS_TYPE;
- wl->set_bss_type = MAX_BSS_TYPE;
- wl->last_tx_hlid = 0;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
wl->platform_quirks = 0;
wl->sched_scanning = false;
- wl->tx_security_seq = 0;
- wl->tx_security_last_seq_lsb = 0;
wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
- wl->role_id = WL12XX_INVALID_ROLE_ID;
wl->system_hlid = WL12XX_SYSTEM_HLID;
- wl->sta_hlid = WL12XX_INVALID_LINK_ID;
- wl->dev_role_id = WL12XX_INVALID_ROLE_ID;
- wl->dev_hlid = WL12XX_INVALID_LINK_ID;
- wl->session_counter = 0;
- wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID;
- wl->ap_global_hlid = WL12XX_INVALID_LINK_ID;
wl->active_sta_count = 0;
- setup_timer(&wl->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wl);
wl->fwlog_size = 0;
init_waitqueue_head(&wl->fwlog_waitq);
@@ -4860,8 +5050,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
/* Apply default driver configuration. */
wl1271_conf_init(wl);
- wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
- wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
order = get_order(WL1271_AGGR_BUFFER_SIZE);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
@@ -4883,49 +5071,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
goto err_dummy_packet;
}
- /* Register platform device */
- ret = platform_device_register(wl->plat_dev);
- if (ret) {
- wl1271_error("couldn't register platform device");
- goto err_fwlog;
- }
- dev_set_drvdata(&wl->plat_dev->dev, wl);
-
- /* Create sysfs file to control bt coex state */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file bt_coex_state");
- goto err_platform;
- }
-
- /* Create sysfs file to get HW PG version */
- ret = device_create_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file hw_pg_ver");
- goto err_bt_coex_state;
- }
-
- /* Create sysfs file for the FW log */
- ret = device_create_bin_file(&wl->plat_dev->dev, &fwlog_attr);
- if (ret < 0) {
- wl1271_error("failed to create sysfs file fwlog");
- goto err_hw_pg_ver;
- }
-
return hw;
-err_hw_pg_ver:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
-
-err_bt_coex_state:
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
-
-err_platform:
- platform_device_unregister(wl->plat_dev);
-
-err_fwlog:
- free_page((unsigned long)wl->fwlog);
-
err_dummy_packet:
dev_kfree_skb(wl->dummy_packet);
@@ -4937,18 +5084,14 @@ err_wq:
err_hw:
wl1271_debugfs_exit(wl);
- kfree(plat_dev);
-
-err_plat_alloc:
ieee80211_free_hw(hw);
err_hw_alloc:
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
-int wl1271_free_hw(struct wl1271 *wl)
+static int wl1271_free_hw(struct wl1271 *wl)
{
/* Unblock any fwlog readers */
mutex_lock(&wl->mutex);
@@ -4956,17 +5099,15 @@ int wl1271_free_hw(struct wl1271 *wl)
wake_up_interruptible_all(&wl->fwlog_waitq);
mutex_unlock(&wl->mutex);
- device_remove_bin_file(&wl->plat_dev->dev, &fwlog_attr);
+ device_remove_bin_file(wl->dev, &fwlog_attr);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_hw_pg_ver);
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
- device_remove_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
- platform_device_unregister(wl->plat_dev);
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf,
get_order(WL1271_AGGR_BUFFER_SIZE));
- kfree(wl->plat_dev);
wl1271_debugfs_exit(wl);
@@ -4983,7 +5124,174 @@ int wl1271_free_hw(struct wl1271 *wl)
return 0;
}
-EXPORT_SYMBOL_GPL(wl1271_free_hw);
+
+static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
+{
+ struct wl1271 *wl = cookie;
+ unsigned long flags;
+
+ wl1271_debug(DEBUG_IRQ, "IRQ");
+
+ /* complete the ELP completion */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
+ if (wl->elp_compl) {
+ complete(wl->elp_compl);
+ wl->elp_compl = NULL;
+ }
+
+ if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
+ /* don't enqueue a work right now. mark it as pending */
+ set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
+ wl1271_debug(DEBUG_IRQ, "should not enqueue work");
+ disable_irq_nosync(wl->irq);
+ pm_wakeup_event(wl->dev, 0);
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static int __devinit wl12xx_probe(struct platform_device *pdev)
+{
+ struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ unsigned long irqflags;
+ int ret = -ENODEV;
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw)) {
+ wl1271_error("can't allocate hw");
+ ret = PTR_ERR(hw);
+ goto out;
+ }
+
+ wl = hw->priv;
+ wl->irq = platform_get_irq(pdev, 0);
+ wl->ref_clock = pdata->board_ref_clock;
+ wl->tcxo_clock = pdata->board_tcxo_clock;
+ wl->platform_quirks = pdata->platform_quirks;
+ wl->set_power = pdata->set_power;
+ wl->dev = &pdev->dev;
+ wl->if_ops = pdata->ops;
+
+ platform_set_drvdata(pdev, wl);
+
+ if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
+ irqflags = IRQF_TRIGGER_RISING;
+ else
+ irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+
+ ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wl1271_irq,
+ irqflags,
+ pdev->name, wl);
+ if (ret < 0) {
+ wl1271_error("request_irq() failed: %d", ret);
+ goto out_free_hw;
+ }
+
+ ret = enable_irq_wake(wl->irq);
+ if (!ret) {
+ wl->irq_wake_enabled = true;
+ device_init_wakeup(wl->dev, 1);
+ if (pdata->pwr_in_suspend)
+ hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+
+ }
+ disable_irq(wl->irq);
+
+ ret = wl1271_init_ieee80211(wl);
+ if (ret)
+ goto out_irq;
+
+ ret = wl1271_register_hw(wl);
+ if (ret)
+ goto out_irq;
+
+ /* Create sysfs file to control bt coex state */
+ ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file bt_coex_state");
+ goto out_irq;
+ }
+
+ /* Create sysfs file to get HW PG version */
+ ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file hw_pg_ver");
+ goto out_bt_coex_state;
+ }
+
+ /* Create sysfs file for the FW log */
+ ret = device_create_bin_file(wl->dev, &fwlog_attr);
+ if (ret < 0) {
+ wl1271_error("failed to create sysfs file fwlog");
+ goto out_hw_pg_ver;
+ }
+
+ return 0;
+
+out_hw_pg_ver:
+ device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
+
+out_bt_coex_state:
+ device_remove_file(wl->dev, &dev_attr_bt_coex_state);
+
+out_irq:
+ free_irq(wl->irq, wl);
+
+out_free_hw:
+ wl1271_free_hw(wl);
+
+out:
+ return ret;
+}
+
+static int __devexit wl12xx_remove(struct platform_device *pdev)
+{
+ struct wl1271 *wl = platform_get_drvdata(pdev);
+
+ if (wl->irq_wake_enabled) {
+ device_init_wakeup(wl->dev, 0);
+ disable_irq_wake(wl->irq);
+ }
+ wl1271_unregister_hw(wl);
+ free_irq(wl->irq, wl);
+ wl1271_free_hw(wl);
+
+ return 0;
+}
+
+static const struct platform_device_id wl12xx_id_table[] __devinitconst = {
+ { "wl12xx", 0 },
+ { } /* Terminating Entry */
+};
+MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
+
+static struct platform_driver wl12xx_driver = {
+ .probe = wl12xx_probe,
+ .remove = __devexit_p(wl12xx_remove),
+ .id_table = wl12xx_id_table,
+ .driver = {
+ .name = "wl12xx_driver",
+ .owner = THIS_MODULE,
+ }
+};
+
+static int __init wl12xx_init(void)
+{
+ return platform_driver_register(&wl12xx_driver);
+}
+module_init(wl12xx_init);
+
+static void __exit wl12xx_exit(void)
+{
+ platform_driver_unregister(&wl12xx_driver);
+}
+module_exit(wl12xx_exit);
u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
diff --git a/drivers/net/wireless/wl12xx/ps.c b/drivers/net/wireless/wl12xx/ps.c
index c15ebf2..a2bdacd 100644
--- a/drivers/net/wireless/wl12xx/ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -25,6 +25,7 @@
#include "ps.h"
#include "io.h"
#include "tx.h"
+#include "debug.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -32,6 +33,7 @@ void wl1271_elp_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct wl12xx_vif *wlvif;
dwork = container_of(work, struct delayed_work, work);
wl = container_of(dwork, struct wl1271, elp_work);
@@ -47,11 +49,18 @@ void wl1271_elp_work(struct work_struct *work)
if (unlikely(!test_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
goto out;
- if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
- (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
goto out;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ goto out;
+
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ goto out;
+ }
+
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
@@ -65,13 +74,20 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
+
/* we shouldn't get consecutive sleep requests */
if (WARN_ON(test_and_set_bit(WL1271_FLAG_ELP_REQUESTED, &wl->flags)))
return;
- if (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
- !test_bit(WL1271_FLAG_IDLE, &wl->flags))
- return;
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return;
+
+ if (!test_bit(WLVIF_FLAG_PSM, &wlvif->flags) &&
+ test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
+ return;
+ }
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -143,8 +159,8 @@ out:
return 0;
}
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send)
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send)
{
int ret;
@@ -152,39 +168,34 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
case STATION_POWER_SAVE_MODE:
wl1271_debug(DEBUG_PSM, "entering psm");
- ret = wl1271_acx_wake_up_conditions(wl);
+ ret = wl1271_acx_wake_up_conditions(wl, wlvif);
if (ret < 0) {
wl1271_error("couldn't set wake up conditions");
return ret;
}
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
- set_bit(WL1271_FLAG_PSM, &wl->flags);
+ set_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
case STATION_ACTIVE_MODE:
default:
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
- if (wl->band == IEEE80211_BAND_2GHZ) {
- ret = wl1271_acx_bet_enable(wl, false);
+ if (wlvif->band == IEEE80211_BAND_2GHZ) {
+ ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
return ret;
}
- /* disable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, false);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, wlvif, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
- clear_bit(WL1271_FLAG_PSM, &wl->flags);
+ clear_bit(WLVIF_FLAG_PSM, &wlvif->flags);
break;
}
@@ -223,9 +234,11 @@ static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
wl1271_handle_tx_low_watermark(wl);
}
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (test_bit(hlid, &wl->ap_ps_map))
return;
@@ -235,7 +248,7 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
clean_queues);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for starting ps",
wl->links[hlid].addr);
@@ -253,9 +266,10 @@ void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues)
__set_bit(hlid, &wl->ap_ps_map);
}
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
struct ieee80211_sta *sta;
+ struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (!test_bit(hlid, &wl->ap_ps_map))
return;
@@ -265,7 +279,7 @@ void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid)
__clear_bit(hlid, &wl->ap_ps_map);
rcu_read_lock();
- sta = ieee80211_find_sta(wl->vif, wl->links[hlid].addr);
+ sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for ending ps",
wl->links[hlid].addr);
diff --git a/drivers/net/wireless/wl12xx/ps.h b/drivers/net/wireless/wl12xx/ps.h
index 25eb9bc..a12052f0 100644
--- a/drivers/net/wireless/wl12xx/ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -27,13 +27,14 @@
#include "wl12xx.h"
#include "acx.h"
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
- u32 rates, bool send);
+int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ enum wl1271_cmd_ps_mode mode, u32 rates, bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl);
void wl1271_elp_work(struct work_struct *work);
-void wl1271_ps_link_start(struct wl1271 *wl, u8 hlid, bool clean_queues);
-void wl1271_ps_link_end(struct wl1271 *wl, u8 hlid);
+void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid, bool clean_queues);
+void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#define WL1271_PS_COMPLETE_TIMEOUT 500
diff --git a/drivers/net/wireless/wl12xx/reg.h b/drivers/net/wireless/wl12xx/reg.h
index 3f570f3..df34d59 100644
--- a/drivers/net/wireless/wl12xx/reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
@@ -408,7 +408,7 @@
/* Firmware image load chunk size */
-#define CHUNK_SIZE 512
+#define CHUNK_SIZE 16384
/* Firmware image header size */
#define FW_HDR_SIZE 8
diff --git a/drivers/net/wireless/wl12xx/rx.c b/drivers/net/wireless/wl12xx/rx.c
index dee4cfe..4fbd2a7 100644
--- a/drivers/net/wireless/wl12xx/rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -25,9 +25,11 @@
#include <linux/sched.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
#include "rx.h"
+#include "tx.h"
#include "io.h"
static u8 wl12xx_rx_get_mem_block(struct wl12xx_fw_status *status,
@@ -96,7 +98,7 @@ static void wl1271_rx_status(struct wl1271 *wl,
}
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
- bool unaligned)
+ bool unaligned, u8 *hlid)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
@@ -159,6 +161,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
* payload aligned to 4 bytes.
*/
memcpy(buf, data + sizeof(*desc), length - sizeof(*desc));
+ *hlid = desc->hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
@@ -169,10 +172,10 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
- wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d", skb,
+ wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
skb->len - desc->pad_len,
beacon ? "beacon" : "",
- seq_num);
+ seq_num, *hlid);
skb_trim(skb, skb->len - desc->pad_len);
@@ -185,6 +188,7 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
{
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
@@ -192,8 +196,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
u32 mem_block;
u32 pkt_length;
u32 pkt_offset;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
- bool had_data = false;
+ u8 hlid;
bool unaligned = false;
while (drv_rx_counter != fw_rx_counter) {
@@ -253,8 +256,15 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
*/
if (wl1271_rx_handle_data(wl,
wl->aggr_buf + pkt_offset,
- pkt_length, unaligned) == 1)
- had_data = true;
+ pkt_length, unaligned,
+ &hlid) == 1) {
+ if (hlid < WL12XX_MAX_LINKS)
+ __set_bit(hlid, active_hlids);
+ else
+ WARN(1,
+ "hlid exceeded WL12XX_MAX_LINKS "
+ "(%d)\n", hlid);
+ }
wl->rx_counter++;
drv_rx_counter++;
@@ -270,17 +280,5 @@ void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* restart rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
diff --git a/drivers/net/wireless/wl12xx/scan.c b/drivers/net/wireless/wl12xx/scan.c
index fc29c67..e24111e 100644
--- a/drivers/net/wireless/wl12xx/scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -24,6 +24,7 @@
#include <linux/ieee80211.h>
#include "wl12xx.h"
+#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
@@ -34,6 +35,8 @@ void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
int ret;
bool is_sta, is_ibss;
@@ -50,28 +53,31 @@ void wl1271_scan_complete_work(struct work_struct *work)
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
+ vif = wl->scan_vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
+ wl->scan_vif = NULL;
ret = wl1271_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
- if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
- wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
+ wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
/* return to ROC if needed */
- is_sta = (wl->bss_type == BSS_TYPE_STA_BSS);
- is_ibss = (wl->bss_type == BSS_TYPE_IBSS);
- if (((is_sta && !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) ||
- (is_ibss && !test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags))) &&
- !test_bit(wl->dev_role_id, wl->roc_map)) {
+ is_sta = (wlvif->bss_type == BSS_TYPE_STA_BSS);
+ is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
+ if (((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
+ (is_ibss && !test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))) &&
+ !test_bit(wlvif->dev_role_id, wl->roc_map)) {
/* restore remain on channel */
- wl12xx_cmd_role_start_dev(wl);
- wl12xx_roc(wl, wl->dev_role_id);
+ wl12xx_start_dev(wl, wlvif);
}
wl1271_ps_elp_sleep(wl);
@@ -155,9 +161,11 @@ static int wl1271_get_scan_channels(struct wl1271 *wl,
#define WL1271_NOTHING_TO_SCAN 1
-static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
- bool passive, u32 basic_rate)
+static int wl1271_scan_send(struct wl1271 *wl, struct ieee80211_vif *vif,
+ enum ieee80211_band band,
+ bool passive, u32 basic_rate)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271_cmd_scan *cmd;
struct wl1271_cmd_trigger_scan_to *trigger;
int ret;
@@ -177,11 +185,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
- if (WARN_ON(wl->role_id == WL12XX_INVALID_ROLE_ID)) {
+ if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
- cmd->params.role_id = wl->role_id;
+ cmd->params.role_id = wlvif->role_id;
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
@@ -194,7 +202,6 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
- cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.tid_trigger = 0;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
@@ -208,11 +215,11 @@ static int wl1271_scan_send(struct wl1271 *wl, enum ieee80211_band band,
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
- memcpy(cmd->addr, wl->mac_addr, ETH_ALEN);
+ memcpy(cmd->addr, vif->addr, ETH_ALEN);
- ret = wl1271_cmd_build_probe_req(wl, wl->scan.ssid, wl->scan.ssid_len,
- wl->scan.req->ie, wl->scan.req->ie_len,
- band);
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, wl->scan.ssid,
+ wl->scan.ssid_len, wl->scan.req->ie,
+ wl->scan.req->ie_len, band);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
@@ -241,11 +248,12 @@ out:
return ret;
}
-void wl1271_scan_stm(struct wl1271 *wl)
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif)
{
+ struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
enum ieee80211_band band;
- u32 rate;
+ u32 rate, mask;
switch (wl->scan.state) {
case WL1271_SCAN_STATE_IDLE:
@@ -253,47 +261,59 @@ void wl1271_scan_stm(struct wl1271 *wl)
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
band = IEEE80211_BAND_2GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ mask = wlvif->bitrate_masks[band];
+ if (wl->scan.req->no_cck) {
+ mask &= ~CONF_TX_CCK_RATES;
+ if (!mask)
+ mask = CONF_TX_RATE_MASK_BASIC_P2P;
+ }
+ rate = wl1271_tx_min_rate_get(wl, mask);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
if (wl->enable_11a)
wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
else
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, false, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
band = IEEE80211_BAND_5GHZ;
- rate = wl1271_tx_min_rate_get(wl, wl->bitrate_masks[band]);
- ret = wl1271_scan_send(wl, band, true, rate);
+ rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
+ ret = wl1271_scan_send(wl, vif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_DONE;
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
}
break;
@@ -317,7 +337,8 @@ void wl1271_scan_stm(struct wl1271 *wl)
}
}
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req)
{
/*
@@ -338,6 +359,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
wl->scan.ssid_len = 0;
}
+ wl->scan_vif = vif;
wl->scan.req = req;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
@@ -346,7 +368,7 @@ int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
- wl1271_scan_stm(wl);
+ wl1271_scan_stm(wl, vif);
return 0;
}
@@ -415,18 +437,19 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
+
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_dfs);
- }
- else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
+ } else {
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_passive);
- } else {
- channels[j].min_duration =
- cpu_to_le16(c->min_dwell_time_active);
- channels[j].max_duration =
- cpu_to_le16(c->max_dwell_time_active);
}
+
+ channels[j].min_duration =
+ cpu_to_le16(c->min_dwell_time_active);
+ channels[j].max_duration =
+ cpu_to_le16(c->max_dwell_time_active);
+
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
@@ -550,6 +573,9 @@ wl12xx_scan_sched_scan_ssid_list(struct wl1271 *wl,
* so they're used in probe requests.
*/
for (i = 0; i < req->n_ssids; i++) {
+ if (!req->ssids[i].ssid_len)
+ continue;
+
for (j = 0; j < cmd->n_ssids; j++)
if (!memcmp(req->ssids[i].ssid,
cmd->ssids[j].ssid,
@@ -585,6 +611,7 @@ out:
}
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
{
@@ -631,7 +658,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[0]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_2GHZ],
ies->len[IEEE80211_BAND_2GHZ],
@@ -643,7 +670,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
if (!force_passive && cfg->active[1]) {
- ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
+ ret = wl1271_cmd_build_probe_req(wl, wlvif, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_5GHZ],
ies->len[IEEE80211_BAND_5GHZ],
@@ -667,17 +694,17 @@ out:
return ret;
}
-int wl1271_scan_sched_scan_start(struct wl1271 *wl)
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_start *start;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
- if (wl->bss_type != BSS_TYPE_STA_BSS)
+ if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
- if (!test_bit(WL1271_FLAG_IDLE, &wl->flags))
+ if (test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
start = kzalloc(sizeof(*start), GFP_KERNEL);
@@ -727,7 +754,6 @@ void wl1271_scan_sched_scan_stop(struct wl1271 *wl)
wl1271_error("failed to send sched scan stop command");
goto out_free;
}
- wl->sched_scanning = false;
out_free:
kfree(stop);
diff --git a/drivers/net/wireless/wl12xx/scan.h b/drivers/net/wireless/wl12xx/scan.h
index 9211515..a7ed43d 100644
--- a/drivers/net/wireless/wl12xx/scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -26,18 +26,20 @@
#include "wl12xx.h"
-int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
+int wl1271_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
+ const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req);
int wl1271_scan_stop(struct wl1271 *wl);
int wl1271_scan_build_probe_req(struct wl1271 *wl,
const u8 *ssid, size_t ssid_len,
const u8 *ie, size_t ie_len, u8 band);
-void wl1271_scan_stm(struct wl1271 *wl);
+void wl1271_scan_stm(struct wl1271 *wl, struct ieee80211_vif *vif);
void wl1271_scan_complete_work(struct work_struct *work);
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies);
-int wl1271_scan_sched_scan_start(struct wl1271 *wl);
+int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl1271_scan_sched_scan_stop(struct wl1271 *wl);
void wl1271_scan_sched_scan_results(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 516a898..468a505 100644
--- a/drivers/net/wireless/wl12xx/sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -24,6 +24,7 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/platform_device.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
@@ -44,107 +45,67 @@
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
#endif
+struct wl12xx_sdio_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
+
static const struct sdio_device_id wl1271_devices[] __devinitconst = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1271_devices);
-static void wl1271_sdio_set_block_size(struct wl1271 *wl, unsigned int blksz)
-{
- sdio_claim_host(wl->if_priv);
- sdio_set_block_size(wl->if_priv, blksz);
- sdio_release_host(wl->if_priv);
-}
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
+static void wl1271_sdio_set_block_size(struct device *child,
+ unsigned int blksz)
{
- struct wl1271 *wl = cookie;
- unsigned long flags;
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
-
- if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
- /* don't enqueue a work right now. mark it as pending */
- set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
- wl1271_debug(DEBUG_IRQ, "should not enqueue work");
- disable_irq_nosync(wl->irq);
- pm_wakeup_event(wl1271_sdio_wl_to_dev(wl), 0);
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- return IRQ_HANDLED;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
+ sdio_claim_host(func);
+ sdio_set_block_size(func, blksz);
+ sdio_release_host(func);
}
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
if (fixed)
ret = sdio_readsb(func, buf, addr, len);
else
ret = sdio_memcpy_fromio(func, buf, addr, len);
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
+ addr, len);
}
if (ret)
- wl1271_error("sdio read failed (%d)", ret);
+ dev_err(child->parent, "sdio read failed (%d)\n", ret);
}
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_sdio_raw_write(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
int ret;
- struct sdio_func *func = wl_to_func(wl);
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
+ dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
+ addr, ((u8 *)buf)[0]);
} else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
+ dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
+ addr, len);
if (fixed)
ret = sdio_writesb(func, addr, buf, len);
@@ -153,13 +114,13 @@ static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
}
if (ret)
- wl1271_error("sdio write failed (%d)", ret);
+ dev_err(child->parent, "sdio write failed (%d)\n", ret);
}
-static int wl1271_sdio_power_on(struct wl1271 *wl)
+static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
/* If enabled, tell runtime PM not to power off the card */
if (pm_runtime_enabled(&func->dev)) {
@@ -180,10 +141,10 @@ out:
return ret;
}
-static int wl1271_sdio_power_off(struct wl1271 *wl)
+static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
- struct sdio_func *func = wl_to_func(wl);
int ret;
+ struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_disable_func(func);
sdio_release_host(func);
@@ -200,46 +161,43 @@ static int wl1271_sdio_power_off(struct wl1271 *wl)
return ret;
}
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
+static int wl12xx_sdio_set_power(struct device *child, bool enable)
{
+ struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
+
if (enable)
- return wl1271_sdio_power_on(wl);
+ return wl12xx_sdio_power_on(glue);
else
- return wl1271_sdio_power_off(wl);
+ return wl12xx_sdio_power_off(glue);
}
static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
+ .read = wl12xx_sdio_raw_read,
+ .write = wl12xx_sdio_raw_write,
+ .power = wl12xx_sdio_set_power,
.set_block_size = wl1271_sdio_set_block_size,
};
static int __devinit wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
- struct ieee80211_hw *hw;
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- unsigned long irqflags;
+ struct wl12xx_platform_data *wlan_data;
+ struct wl12xx_sdio_glue *glue;
+ struct resource res[1];
mmc_pm_flag_t mmcflags;
- int ret;
+ int ret = -ENOMEM;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
return -ENODEV;
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
-
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&func->dev, "can't allocate glue\n");
+ goto out;
+ }
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
+ glue->dev = &func->dev;
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
@@ -250,80 +208,79 @@ static int __devinit wl1271_probe(struct sdio_func *func,
wlan_data = wl12xx_get_platform_data();
if (IS_ERR(wlan_data)) {
ret = PTR_ERR(wlan_data);
- wl1271_error("missing wlan platform data: %d", ret);
- goto out_free;
+ dev_err(glue->dev, "missing wlan platform data: %d\n", ret);
+ goto out_free_glue;
}
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
- wl->platform_quirks = wlan_data->platform_quirks;
+ /* if sdio can keep power while host is suspended, enable wow */
+ mmcflags = sdio_get_host_pm_caps(func);
+ dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
-
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
- }
+ if (mmcflags & MMC_PM_KEEP_POWER)
+ wlan_data->pwr_in_suspend = true;
+
+ wlan_data->ops = &sdio_ops;
- ret = enable_irq_wake(wl->irq);
- if (!ret) {
- wl->irq_wake_enabled = true;
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 1);
+ sdio_set_drvdata(func, glue);
- /* if sdio can keep power while host is suspended, enable wow */
- mmcflags = sdio_get_host_pm_caps(func);
- wl1271_debug(DEBUG_SDIO, "sdio PM caps = 0x%x", mmcflags);
+ /* Tell PM core that we don't need the card to be powered now */
+ pm_runtime_put_noidle(&func->dev);
- if (mmcflags & MMC_PM_KEEP_POWER)
- hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- disable_irq(wl->irq);
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ glue->core->dev.parent = &func->dev;
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ memset(res, 0x00, sizeof(res));
- sdio_set_drvdata(func, wl);
+ res[0].start = wlan_data->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- /* Tell PM core that we don't need the card to be powered now */
- pm_runtime_put_noidle(&func->dev);
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
+ }
+ ret = platform_device_add_data(glue->core, wlan_data,
+ sizeof(*wlan_data));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
+
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't add platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
+out_dev_put:
+ platform_device_put(glue->core);
- out_free:
- wl1271_free_hw(wl);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static void __devexit wl1271_remove(struct sdio_func *func)
{
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
- wl1271_unregister_hw(wl);
- if (wl->irq_wake_enabled) {
- device_init_wakeup(wl1271_sdio_wl_to_dev(wl), 0);
- disable_irq_wake(wl->irq);
- }
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
}
#ifdef CONFIG_PM
@@ -332,20 +289,21 @@ static int wl1271_suspend(struct device *dev)
/* Tell MMC/SDIO core it's OK to power down the card
* (if it isn't already), but not to remove it completely */
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
mmc_pm_flag_t sdio_flags;
int ret = 0;
- wl1271_debug(DEBUG_MAC80211, "wl1271 suspend. wow_enabled: %d",
- wl->wow_enabled);
+ dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
+ wl->wow_enabled);
/* check whether sdio should keep power */
if (wl->wow_enabled) {
sdio_flags = sdio_get_host_pm_caps(func);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
- wl1271_error("can't keep power while host "
- "is suspended");
+ dev_err(dev, "can't keep power while host "
+ "is suspended\n");
ret = -EINVAL;
goto out;
}
@@ -353,7 +311,7 @@ static int wl1271_suspend(struct device *dev)
/* keep power while host suspended */
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
- wl1271_error("error while trying to keep power");
+ dev_err(dev, "error while trying to keep power\n");
goto out;
}
@@ -367,9 +325,10 @@ out:
static int wl1271_resume(struct device *dev)
{
struct sdio_func *func = dev_to_sdio_func(dev);
- struct wl1271 *wl = sdio_get_drvdata(func);
+ struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
+ struct wl1271 *wl = platform_get_drvdata(glue->core);
- wl1271_debug(DEBUG_MAC80211, "wl1271 resume");
+ dev_dbg(dev, "wl1271 resume\n");
if (wl->wow_enabled) {
/* claim back host */
sdio_claim_host(func);
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
deleted file mode 100644
index f25d5d9..0000000
--- a/drivers/net/wireless/wl12xx/sdio_test.c
+++ /dev/null
@@ -1,543 +0,0 @@
-/*
- * SDIO testing driver for wl12xx
- *
- * Copyright (C) 2010 Nokia Corporation
- *
- * Contact: Roger Quadros <roger.quadros@nokia.com>
- *
- * wl12xx read/write routines taken from the main module
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/crc7.h>
-#include <linux/vmalloc.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/host.h>
-#include <linux/gpio.h>
-#include <linux/wl12xx.h>
-#include <linux/kthread.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-
-#include "wl12xx.h"
-#include "io.h"
-#include "boot.h"
-
-#ifndef SDIO_VENDOR_ID_TI
-#define SDIO_VENDOR_ID_TI 0x0097
-#endif
-
-#ifndef SDIO_DEVICE_ID_TI_WL1271
-#define SDIO_DEVICE_ID_TI_WL1271 0x4076
-#endif
-
-static bool rx, tx;
-
-module_param(rx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
- "This test continuously reads data from the SDIO device.\n");
-
-module_param(tx, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
- "This test continuously writes data to the SDIO device.\n");
-
-struct wl1271_test {
- struct wl1271 wl;
- struct task_struct *test_task;
-};
-
-static const struct sdio_device_id wl1271_devices[] = {
- { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
- {}
-};
-
-static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_func(wl)->dev);
-}
-
-static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- if (fixed)
- ret = sdio_readsb(func, buf, addr, len);
- else
- ret = sdio_memcpy_fromio(func, buf, addr, len);
-
- wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
- }
-
- if (ret)
- wl1271_error("sdio read failed (%d)", ret);
-}
-
-static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
-{
- int ret = 0;
- struct sdio_func *func = wl_to_func(wl);
-
- if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
- sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
- wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
- addr, ((u8 *)buf)[0]);
- } else {
- wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
- addr, len);
- wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
-
- if (fixed)
- ret = sdio_writesb(func, addr, buf, len);
- else
- ret = sdio_memcpy_toio(func, addr, buf, len);
- }
- if (ret)
- wl1271_error("sdio write failed (%d)", ret);
-
-}
-
-static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
-{
- struct sdio_func *func = wl_to_func(wl);
- int ret;
-
- /* Let the SDIO stack handle wlan_enable control, so we
- * keep host claimed while wlan is in use to keep wl1271
- * alive.
- */
- if (enable) {
- /* Power up the card */
- ret = pm_runtime_get_sync(&func->dev);
- if (ret < 0)
- goto out;
-
- /* Runtime PM might be disabled, power up the card manually */
- ret = mmc_power_restore_host(func->card->host);
- if (ret < 0)
- goto out;
-
- sdio_claim_host(func);
- sdio_enable_func(func);
- } else {
- sdio_disable_func(func);
- sdio_release_host(func);
-
- /* Runtime PM might be disabled, power off the card manually */
- ret = mmc_power_save_host(func->card->host);
- if (ret < 0)
- goto out;
-
- /* Power down the card */
- ret = pm_runtime_put_sync(&func->dev);
- }
-
-out:
- return ret;
-}
-
-static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
-{
-}
-
-static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
-{
-}
-
-
-static struct wl1271_if_operations sdio_ops = {
- .read = wl1271_sdio_raw_read,
- .write = wl1271_sdio_raw_write,
- .power = wl1271_sdio_set_power,
- .dev = wl1271_sdio_wl_to_dev,
- .enable_irq = wl1271_sdio_enable_interrupts,
- .disable_irq = wl1271_sdio_disable_interrupts,
-};
-
-static void wl1271_fw_wakeup(struct wl1271 *wl)
-{
- u32 elp_reg;
-
- elp_reg = ELPCTRL_WAKE_UP;
- wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
-}
-
-static int wl1271_fetch_firmware(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- if (wl->chip.id == CHIP_ID_1283_PG20)
- ret = request_firmware(&fw, WL128X_FW_NAME,
- wl1271_wl_to_dev(wl));
- else
- ret = request_firmware(&fw, WL127X_FW_NAME,
- wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get firmware: %d", ret);
- return ret;
- }
-
- if (fw->size % 4) {
- wl1271_error("firmware size is not multiple of 32 bits: %zu",
- fw->size);
- ret = -EILSEQ;
- goto out;
- }
-
- wl->fw_len = fw->size;
- wl->fw = vmalloc(wl->fw_len);
-
- if (!wl->fw) {
- wl1271_error("could not allocate memory for the firmware");
- ret = -ENOMEM;
- goto out;
- }
-
- memcpy(wl->fw, fw->data, wl->fw_len);
-
- ret = 0;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_fetch_nvs(struct wl1271 *wl)
-{
- const struct firmware *fw;
- int ret;
-
- ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl));
-
- if (ret < 0) {
- wl1271_error("could not get nvs file: %d", ret);
- return ret;
- }
-
- wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
-
- if (!wl->nvs) {
- wl1271_error("could not allocate memory for the nvs file");
- ret = -ENOMEM;
- goto out;
- }
-
- wl->nvs_len = fw->size;
-
-out:
- release_firmware(fw);
-
- return ret;
-}
-
-static int wl1271_chip_wakeup(struct wl1271 *wl)
-{
- struct wl1271_partition_set partition;
- int ret;
-
- msleep(WL1271_PRE_POWER_ON_SLEEP);
- ret = wl1271_power_on(wl);
- if (ret)
- return ret;
-
- msleep(WL1271_POWER_ON_SLEEP);
-
- /* We don't need a real memory partition here, because we only want
- * to use the registers at this point. */
- memset(&partition, 0, sizeof(partition));
- partition.reg.start = REGISTERS_BASE;
- partition.reg.size = REGISTERS_DOWN_SIZE;
- wl1271_set_partition(wl, &partition);
-
- /* ELP module wake up */
- wl1271_fw_wakeup(wl);
-
- /* whal_FwCtrl_BootSm() */
-
- /* 0. read chip id from CHIP_ID */
- wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
-
- /* 1. check if chip id is valid */
-
- switch (wl->chip.id) {
- case CHIP_ID_1271_PG10:
- wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
- wl->chip.id);
- break;
- case CHIP_ID_1271_PG20:
- wl1271_notice("chip id 0x%x (1271 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG20:
- wl1271_notice("chip id 0x%x (1283 PG20)",
- wl->chip.id);
- break;
- case CHIP_ID_1283_PG10:
- default:
- wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
- return -ENODEV;
- }
-
- return ret;
-}
-
-static struct wl1271_partition_set part_down = {
- .mem = {
- .start = 0x00000000,
- .size = 0x000177c0
- },
- .reg = {
- .start = REGISTERS_BASE,
- .size = 0x00008800
- },
- .mem2 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
- .mem3 = {
- .start = 0x00000000,
- .size = 0x00000000
- },
-};
-
-static int tester(void *data)
-{
- struct wl1271 *wl = data;
- struct sdio_func *func = wl_to_func(wl);
- struct device *pdev = &func->dev;
- int ret = 0;
- bool rx_started = 0;
- bool tx_started = 0;
- uint8_t *tx_buf, *rx_buf;
- int test_size = PAGE_SIZE;
- u32 addr = 0;
- struct wl1271_partition_set partition;
-
- /* We assume chip is powered up and firmware fetched */
-
- memcpy(&partition, &part_down, sizeof(partition));
- partition.mem.start = addr;
- wl1271_set_partition(wl, &partition);
-
- tx_buf = kmalloc(test_size, GFP_KERNEL);
- rx_buf = kmalloc(test_size, GFP_KERNEL);
- if (!tx_buf || !rx_buf) {
- dev_err(pdev,
- "Could not allocate memory. Test will not run.\n");
- ret = -ENOMEM;
- goto free;
- }
-
- memset(tx_buf, 0x5a, test_size);
-
- /* write something in data area so we can read it back */
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- while (!kthread_should_stop()) {
- if (rx && !rx_started) {
- dev_info(pdev, "starting rx test\n");
- rx_started = 1;
- } else if (!rx && rx_started) {
- dev_info(pdev, "stopping rx test\n");
- rx_started = 0;
- }
-
- if (tx && !tx_started) {
- dev_info(pdev, "starting tx test\n");
- tx_started = 1;
- } else if (!tx && tx_started) {
- dev_info(pdev, "stopping tx test\n");
- tx_started = 0;
- }
-
- if (rx_started)
- wl1271_read(wl, addr, rx_buf, test_size, false);
-
- if (tx_started)
- wl1271_write(wl, addr, tx_buf, test_size, false);
-
- if (!rx_started && !tx_started)
- msleep(100);
- }
-
-free:
- kfree(tx_buf);
- kfree(rx_buf);
- return ret;
-}
-
-static int __devinit wl1271_probe(struct sdio_func *func,
- const struct sdio_device_id *id)
-{
- const struct wl12xx_platform_data *wlan_data;
- struct wl1271 *wl;
- struct wl1271_test *wl_test;
- int ret = 0;
-
- /* wl1271 has 2 sdio functions we handle just the wlan part */
- if (func->num != 0x02)
- return -ENODEV;
-
- wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
- if (!wl_test) {
- dev_err(&func->dev, "Could not allocate memory\n");
- return -ENOMEM;
- }
-
- wl = &wl_test->wl;
-
- wl->if_priv = func;
- wl->if_ops = &sdio_ops;
-
- /* Grab access to FN0 for ELP reg. */
- func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
-
- /* Use block mode for transferring over one block size of data */
- func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
-
- wlan_data = wl12xx_get_platform_data();
- if (IS_ERR(wlan_data)) {
- ret = PTR_ERR(wlan_data);
- dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
- goto out_free;
- }
-
- wl->irq = wlan_data->irq;
- wl->ref_clock = wlan_data->board_ref_clock;
- wl->tcxo_clock = wlan_data->board_tcxo_clock;
-
- sdio_set_drvdata(func, wl_test);
-
- /* power up the device */
- ret = wl1271_chip_wakeup(wl);
- if (ret) {
- dev_err(&func->dev, "could not wake up chip\n");
- goto out_free;
- }
-
- if (wl->fw == NULL) {
- ret = wl1271_fetch_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware fetch error\n");
- goto out_off;
- }
- }
-
- /* fetch NVS */
- if (wl->nvs == NULL) {
- ret = wl1271_fetch_nvs(wl);
- if (ret < 0) {
- dev_err(&func->dev, "NVS fetch error\n");
- goto out_off;
- }
- }
-
- ret = wl1271_load_firmware(wl);
- if (ret < 0) {
- dev_err(&func->dev, "firmware load error: %d\n", ret);
- goto out_free;
- }
-
- dev_info(&func->dev, "initialized\n");
-
- /* I/O testing will be done in the tester thread */
-
- wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
- if (IS_ERR(wl_test->test_task)) {
- dev_err(&func->dev, "unable to create kernel thread\n");
- ret = PTR_ERR(wl_test->test_task);
- goto out_free;
- }
-
- return 0;
-
-out_off:
- /* power off the chip */
- wl1271_power_off(wl);
-
-out_free:
- kfree(wl_test);
- return ret;
-}
-
-static void __devexit wl1271_remove(struct sdio_func *func)
-{
- struct wl1271_test *wl_test = sdio_get_drvdata(func);
-
- /* stop the I/O test thread */
- kthread_stop(wl_test->test_task);
-
- /* power off the chip */
- wl1271_power_off(&wl_test->wl);
-
- vfree(wl_test->wl.fw);
- wl_test->wl.fw = NULL;
- kfree(wl_test->wl.nvs);
- wl_test->wl.nvs = NULL;
-
- kfree(wl_test);
-}
-
-static struct sdio_driver wl1271_sdio_driver = {
- .name = "wl12xx_sdio_test",
- .id_table = wl1271_devices,
- .probe = wl1271_probe,
- .remove = __devexit_p(wl1271_remove),
-};
-
-static int __init wl1271_init(void)
-{
- int ret;
-
- ret = sdio_register_driver(&wl1271_sdio_driver);
- if (ret < 0)
- pr_err("failed to register sdio driver: %d\n", ret);
-
- return ret;
-}
-module_init(wl1271_init);
-
-static void __exit wl1271_exit(void)
-{
- sdio_unregister_driver(&wl1271_sdio_driver);
-}
-module_exit(wl1271_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
-
diff --git a/drivers/net/wireless/wl12xx/spi.c b/drivers/net/wireless/wl12xx/spi.c
index 0f97186..92caa7c 100644
--- a/drivers/net/wireless/wl12xx/spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -27,6 +27,7 @@
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/wl12xx.h>
+#include <linux/platform_device.h>
#include <linux/slab.h>
#include "wl12xx.h"
@@ -69,35 +70,22 @@
#define WSPI_MAX_NUM_OF_CHUNKS (WL1271_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE)
-static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
-{
- return wl->if_priv;
-}
-
-static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
-{
- return &(wl_to_spi(wl)->dev);
-}
-
-static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
-{
- disable_irq(wl->irq);
-}
-
-static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
-{
- enable_irq(wl->irq);
-}
+struct wl12xx_spi_glue {
+ struct device *dev;
+ struct platform_device *core;
+};
-static void wl1271_spi_reset(struct wl1271 *wl)
+static void wl12xx_spi_reset(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi reset");
+ dev_err(child->parent,
+ "could not allocate cmd for spi reset\n");
return;
}
@@ -110,21 +98,22 @@ static void wl1271_spi_reset(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
- wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
-static void wl1271_spi_init(struct wl1271 *wl)
+static void wl12xx_spi_init(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
- wl1271_error("could not allocate cmd for spi init");
+ dev_err(child->parent,
+ "could not allocate cmd for spi init\n");
return;
}
@@ -165,15 +154,16 @@ static void wl1271_spi_init(struct wl1271 *wl)
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
- spi_sync(wl_to_spi(wl), &m);
- wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
+ spi_sync(to_spi_device(glue->dev), &m);
kfree(cmd);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
-static int wl1271_spi_read_busy(struct wl1271 *wl)
+static int wl12xx_spi_read_busy(struct device *child)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
@@ -194,20 +184,22 @@ static int wl1271_spi_read_busy(struct wl1271 *wl)
t[0].len = sizeof(u32);
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (*busy_buf & 0x1)
return 0;
}
/* The SPI bus is unresponsive, the read failed. */
- wl1271_error("SPI read busy-word timeout!\n");
+ dev_err(child->parent, "SPI read busy-word timeout!\n");
return -ETIMEDOUT;
}
-static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
+static void wl12xx_spi_raw_read(struct device *child, int addr, void *buf,
size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
+ struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[2];
struct spi_message m;
u32 *busy_buf;
@@ -243,10 +235,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
- spi_sync(wl_to_spi(wl), &m);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
- wl1271_spi_read_busy(wl)) {
+ wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
return;
}
@@ -259,10 +251,7 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
- spi_sync(wl_to_spi(wl), &m);
-
- wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, chunk_len);
+ spi_sync(to_spi_device(glue->dev), &m);
if (!fixed)
addr += chunk_len;
@@ -271,9 +260,10 @@ static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
}
}
-static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
- size_t len, bool fixed)
+static void wl12xx_spi_raw_write(struct device *child, int addr, void *buf,
+ size_t len, bool fixed)
{
+ struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t[2 * WSPI_MAX_NUM_OF_CHUNKS];
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS];
@@ -308,9 +298,6 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
t[i].len = chunk_len;
spi_message_add_tail(&t[i++], &m);
- wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
- wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, chunk_len);
-
if (!fixed)
addr += chunk_len;
buf += chunk_len;
@@ -318,72 +305,41 @@ static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
cmd++;
}
- spi_sync(wl_to_spi(wl), &m);
-}
-
-static irqreturn_t wl1271_hardirq(int irq, void *cookie)
-{
- struct wl1271 *wl = cookie;
- unsigned long flags;
-
- wl1271_debug(DEBUG_IRQ, "IRQ");
-
- /* complete the ELP completion */
- spin_lock_irqsave(&wl->wl_lock, flags);
- set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
- if (wl->elp_compl) {
- complete(wl->elp_compl);
- wl->elp_compl = NULL;
- }
- spin_unlock_irqrestore(&wl->wl_lock, flags);
-
- return IRQ_WAKE_THREAD;
-}
-
-static int wl1271_spi_set_power(struct wl1271 *wl, bool enable)
-{
- if (wl->set_power)
- wl->set_power(enable);
-
- return 0;
+ spi_sync(to_spi_device(glue->dev), &m);
}
static struct wl1271_if_operations spi_ops = {
- .read = wl1271_spi_raw_read,
- .write = wl1271_spi_raw_write,
- .reset = wl1271_spi_reset,
- .init = wl1271_spi_init,
- .power = wl1271_spi_set_power,
- .dev = wl1271_spi_wl_to_dev,
- .enable_irq = wl1271_spi_enable_interrupts,
- .disable_irq = wl1271_spi_disable_interrupts,
+ .read = wl12xx_spi_raw_read,
+ .write = wl12xx_spi_raw_write,
+ .reset = wl12xx_spi_reset,
+ .init = wl12xx_spi_init,
.set_block_size = NULL,
};
static int __devinit wl1271_probe(struct spi_device *spi)
{
+ struct wl12xx_spi_glue *glue;
struct wl12xx_platform_data *pdata;
- struct ieee80211_hw *hw;
- struct wl1271 *wl;
- unsigned long irqflags;
- int ret;
+ struct resource res[1];
+ int ret = -ENOMEM;
pdata = spi->dev.platform_data;
if (!pdata) {
- wl1271_error("no platform data");
+ dev_err(&spi->dev, "no platform data\n");
return -ENODEV;
}
- hw = wl1271_alloc_hw();
- if (IS_ERR(hw))
- return PTR_ERR(hw);
+ pdata->ops = &spi_ops;
- wl = hw->priv;
+ glue = kzalloc(sizeof(*glue), GFP_KERNEL);
+ if (!glue) {
+ dev_err(&spi->dev, "can't allocate glue\n");
+ goto out;
+ }
- dev_set_drvdata(&spi->dev, wl);
- wl->if_priv = spi;
+ glue->dev = &spi->dev;
- wl->if_ops = &spi_ops;
+ spi_set_drvdata(spi, glue);
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
@@ -391,69 +347,61 @@ static int __devinit wl1271_probe(struct spi_device *spi)
ret = spi_setup(spi);
if (ret < 0) {
- wl1271_error("spi_setup failed");
- goto out_free;
+ dev_err(glue->dev, "spi_setup failed\n");
+ goto out_free_glue;
}
- wl->set_power = pdata->set_power;
- if (!wl->set_power) {
- wl1271_error("set power function missing in platform data");
- ret = -ENODEV;
- goto out_free;
+ glue->core = platform_device_alloc("wl12xx", -1);
+ if (!glue->core) {
+ dev_err(glue->dev, "can't allocate platform_device\n");
+ ret = -ENOMEM;
+ goto out_free_glue;
}
- wl->ref_clock = pdata->board_ref_clock;
- wl->tcxo_clock = pdata->board_tcxo_clock;
- wl->platform_quirks = pdata->platform_quirks;
+ glue->core->dev.parent = &spi->dev;
- if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
- irqflags = IRQF_TRIGGER_RISING;
- else
- irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
+ memset(res, 0x00, sizeof(res));
- wl->irq = spi->irq;
- if (wl->irq < 0) {
- wl1271_error("irq missing in platform data");
- ret = -ENODEV;
- goto out_free;
- }
+ res[0].start = spi->irq;
+ res[0].flags = IORESOURCE_IRQ;
+ res[0].name = "irq";
- ret = request_threaded_irq(wl->irq, wl1271_hardirq, wl1271_irq,
- irqflags,
- DRIVER_NAME, wl);
- if (ret < 0) {
- wl1271_error("request_irq() failed: %d", ret);
- goto out_free;
+ ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
+ if (ret) {
+ dev_err(glue->dev, "can't add resources\n");
+ goto out_dev_put;
}
- disable_irq(wl->irq);
-
- ret = wl1271_init_ieee80211(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add_data(glue->core, pdata, sizeof(*pdata));
+ if (ret) {
+ dev_err(glue->dev, "can't add platform data\n");
+ goto out_dev_put;
+ }
- ret = wl1271_register_hw(wl);
- if (ret)
- goto out_irq;
+ ret = platform_device_add(glue->core);
+ if (ret) {
+ dev_err(glue->dev, "can't register platform device\n");
+ goto out_dev_put;
+ }
return 0;
- out_irq:
- free_irq(wl->irq, wl);
-
- out_free:
- wl1271_free_hw(wl);
+out_dev_put:
+ platform_device_put(glue->core);
+out_free_glue:
+ kfree(glue);
+out:
return ret;
}
static int __devexit wl1271_remove(struct spi_device *spi)
{
- struct wl1271 *wl = dev_get_drvdata(&spi->dev);
+ struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
- wl1271_unregister_hw(wl);
- free_irq(wl->irq, wl);
- wl1271_free_hw(wl);
+ platform_device_del(glue->core);
+ platform_device_put(glue->core);
+ kfree(glue);
return 0;
}
@@ -462,7 +410,6 @@ static int __devexit wl1271_remove(struct spi_device *spi)
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/net/wireless/wl12xx/testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index 4ae8eff..25093c0 100644
--- a/drivers/net/wireless/wl12xx/testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -26,8 +26,10 @@
#include <net/genetlink.h>
#include "wl12xx.h"
+#include "debug.h"
#include "acx.h"
#include "reg.h"
+#include "ps.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
@@ -36,6 +38,7 @@ enum wl1271_tm_commands {
WL1271_TM_CMD_TEST,
WL1271_TM_CMD_INTERROGATE,
WL1271_TM_CMD_CONFIGURE,
+ WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
WL1271_TM_CMD_RECOVER,
@@ -87,31 +90,47 @@ static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
return -EMSGSIZE;
mutex_lock(&wl->mutex);
- ret = wl1271_cmd_test(wl, buf, buf_len, answer);
- mutex_unlock(&wl->mutex);
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
wl1271_warning("testmode cmd test failed: %d", ret);
- return ret;
+ goto out_sleep;
}
if (answer) {
len = nla_total_size(buf_len);
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
- if (!skb)
- return -ENOMEM;
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
- return ret;
+ goto out_sleep;
}
- return 0;
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_sleep;
}
static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
@@ -128,33 +147,53 @@ static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+ mutex_lock(&wl->mutex);
+
+ if (wl->state == WL1271_STATE_OFF) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd)
- return -ENOMEM;
+ if (!cmd) {
+ ret = -ENOMEM;
+ goto out_sleep;
+ }
- mutex_lock(&wl->mutex);
ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
- mutex_unlock(&wl->mutex);
-
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
- kfree(cmd);
- return ret;
+ goto out_free;
}
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
if (!skb) {
- kfree(cmd);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_free;
}
NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+ ret = cfg80211_testmode_reply(skb);
+ if (ret < 0)
+ goto out_free;
+
+out_free:
+ kfree(cmd);
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+out:
+ mutex_unlock(&wl->mutex);
- return 0;
+ return ret;
nla_put_failure:
kfree_skb(skb);
- return -EMSGSIZE;
+ ret = -EMSGSIZE;
+ goto out_free;
}
static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
diff --git a/drivers/net/wireless/wl12xx/tx.c b/drivers/net/wireless/wl12xx/tx.c
index bad9e29..4508ccd 100644
--- a/drivers/net/wireless/wl12xx/tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -26,22 +26,24 @@
#include <linux/etherdevice.h>
#include "wl12xx.h"
+#include "debug.h"
#include "io.h"
#include "reg.h"
#include "ps.h"
#include "tx.h"
#include "event.h"
-static int wl1271_set_default_wep_key(struct wl1271 *wl, u8 id)
+static int wl1271_set_default_wep_key(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 id)
{
int ret;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap)
ret = wl12xx_cmd_set_default_wep_key(wl, id,
- wl->ap_bcast_hlid);
+ wlvif->ap.bcast_hlid);
else
- ret = wl12xx_cmd_set_default_wep_key(wl, id, wl->sta_hlid);
+ ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
if (ret < 0)
return ret;
@@ -76,7 +78,8 @@ static void wl1271_free_tx_id(struct wl1271 *wl, int id)
}
static int wl1271_tx_update_filters(struct wl1271 *wl,
- struct sk_buff *skb)
+ struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
int ret;
@@ -92,15 +95,11 @@ static int wl1271_tx_update_filters(struct wl1271 *wl,
if (!ieee80211_is_auth(hdr->frame_control))
return 0;
- if (wl->dev_hlid != WL12XX_INVALID_LINK_ID)
+ if (wlvif->dev_hlid != WL12XX_INVALID_LINK_ID)
goto out;
wl1271_debug(DEBUG_CMD, "starting device role for roaming");
- ret = wl12xx_cmd_role_start_dev(wl);
- if (ret < 0)
- goto out;
-
- ret = wl12xx_roc(wl, wl->dev_role_id);
+ ret = wl12xx_start_dev(wl, wlvif);
if (ret < 0)
goto out;
out:
@@ -123,18 +122,16 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
}
-static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
+static void wl1271_tx_regulate_link(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif,
+ u8 hlid)
{
bool fw_ps, single_sta;
u8 tx_pkts;
- /* only regulate station links */
- if (hlid < WL1271_AP_STA_HLID_START)
+ if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
return;
- if (WARN_ON(!wl1271_is_active_sta(wl, hlid)))
- return;
-
fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
tx_pkts = wl->links[hlid].allocated_pkts;
single_sta = (wl->active_sta_count == 1);
@@ -146,7 +143,7 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl, u8 hlid)
* case FW-memory congestion is not a problem.
*/
if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
- wl1271_ps_link_start(wl, hlid, true);
+ wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
@@ -154,7 +151,8 @@ bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
return wl->dummy_packet == skb;
}
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
@@ -167,49 +165,51 @@ u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb)
} else {
struct ieee80211_hdr *hdr;
- if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags))
+ if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
return wl->system_hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_mgmt(hdr->frame_control))
- return wl->ap_global_hlid;
+ return wlvif->ap.global_hlid;
else
- return wl->ap_bcast_hlid;
+ return wlvif->ap.bcast_hlid;
}
}
-static u8 wl1271_tx_get_hlid(struct wl1271 *wl, struct sk_buff *skb)
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
return wl->system_hlid;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- return wl12xx_tx_get_hlid_ap(wl, skb);
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
- wl1271_tx_update_filters(wl, skb);
+ wl1271_tx_update_filters(wl, wlvif, skb);
- if ((test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) ||
- test_bit(WL1271_FLAG_IBSS_JOINED, &wl->flags)) &&
+ if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
+ test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
!ieee80211_is_auth(hdr->frame_control) &&
!ieee80211_is_assoc_req(hdr->frame_control))
- return wl->sta_hlid;
+ return wlvif->sta.hlid;
else
- return wl->dev_hlid;
+ return wlvif->dev_hlid;
}
static unsigned int wl12xx_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
- if (wl->quirks & WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT)
- return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
- else
+ if (wl->quirks & WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT)
return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
+ else
+ return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
-static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
- u32 buf_offset, u8 hlid)
+static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra, u32 buf_offset,
+ u8 hlid)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
@@ -217,6 +217,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
u32 total_blocks;
int id, ret = -EBUSY, ac;
u32 spare_blocks = wl->tx_spare_blocks;
+ bool is_dummy = false;
if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
return -EAGAIN;
@@ -231,8 +232,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
len = wl12xx_calc_packet_alignment(wl, total_len);
/* in case of a dummy packet, use default amount of spare mem blocks */
- if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
+ if (unlikely(wl12xx_is_dummy_packet(wl, skb))) {
+ is_dummy = true;
spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT;
+ }
total_blocks = (len + TX_HW_BLOCK_SIZE - 1) / TX_HW_BLOCK_SIZE +
spare_blocks;
@@ -257,8 +260,9 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
- if (wl->bss_type == BSS_TYPE_AP_BSS &&
- hlid >= WL1271_AP_STA_HLID_START)
+ if (!is_dummy && wlvif &&
+ wlvif->bss_type == BSS_TYPE_AP_BSS &&
+ test_bit(hlid, wlvif->ap.sta_hlid_map))
wl->links[hlid].allocated_pkts++;
ret = 0;
@@ -273,15 +277,16 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
return ret;
}
-static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
- u32 extra, struct ieee80211_tx_info *control,
- u8 hlid)
+static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 extra,
+ struct ieee80211_tx_info *control, u8 hlid)
{
struct timespec ts;
struct wl1271_tx_hw_descr *desc;
int aligned_len, ac, rate_idx;
s64 hosttime;
- u16 tx_attr;
+ u16 tx_attr = 0;
+ bool is_dummy;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -298,7 +303,8 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
hosttime = (timespec_to_ns(&ts) >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
- if (wl->bss_type != BSS_TYPE_AP_BSS)
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+ if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
else
desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
@@ -307,39 +313,42 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = skb->priority;
- if (wl12xx_is_dummy_packet(wl, skb)) {
+ if (is_dummy) {
/*
* FW expects the dummy packet to have an invalid session id -
* any session id that is different than the one set in the join
*/
- tx_attr = ((~wl->session_counter) <<
+ tx_attr = (SESSION_COUNTER_INVALID <<
TX_HW_ATTR_OFST_SESSION_COUNTER) &
TX_HW_ATTR_SESSION_COUNTER;
tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
- } else {
+ } else if (wlvif) {
/* configure the tx attributes */
- tx_attr =
- wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
+ tx_attr = wlvif->session_counter <<
+ TX_HW_ATTR_OFST_SESSION_COUNTER;
}
desc->hlid = hlid;
-
- if (wl->bss_type != BSS_TYPE_AP_BSS) {
+ if (is_dummy || !wlvif)
+ rate_idx = 0;
+ else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
/* if the packets are destined for AP (have a STA entry)
send them with AP rate policies, otherwise use default
basic rates */
- if (control->control.sta)
- rate_idx = ACX_TX_AP_FULL_RATE;
+ if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
+ rate_idx = wlvif->sta.p2p_rate_idx;
+ else if (control->control.sta)
+ rate_idx = wlvif->sta.ap_rate_idx;
else
- rate_idx = ACX_TX_BASIC_RATE;
+ rate_idx = wlvif->sta.basic_rate_idx;
} else {
- if (hlid == wl->ap_global_hlid)
- rate_idx = ACX_TX_AP_MODE_MGMT_RATE;
- else if (hlid == wl->ap_bcast_hlid)
- rate_idx = ACX_TX_AP_MODE_BCST_RATE;
+ if (hlid == wlvif->ap.global_hlid)
+ rate_idx = wlvif->ap.mgmt_rate_idx;
+ else if (hlid == wlvif->ap.bcast_hlid)
+ rate_idx = wlvif->ap.bcast_rate_idx;
else
- rate_idx = ac;
+ rate_idx = wlvif->ap.ucast_rate_idx[ac];
}
tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
@@ -379,20 +388,24 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
}
/* caller must hold wl->mutex */
-static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
- u32 buf_offset)
+static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb, u32 buf_offset)
{
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
u32 total_len;
u8 hlid;
+ bool is_dummy;
if (!skb)
return -EINVAL;
info = IEEE80211_SKB_CB(skb);
+ /* TODO: handle dummy packets on multi-vifs */
+ is_dummy = wl12xx_is_dummy_packet(wl, skb);
+
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
extra = WL1271_TKIP_IV_SPACE;
@@ -405,29 +418,28 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
- if (unlikely(is_wep && wl->default_key != idx)) {
- ret = wl1271_set_default_wep_key(wl, idx);
+ if (unlikely(is_wep && wlvif->default_key != idx)) {
+ ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
- wl->default_key = idx;
+ wlvif->default_key = idx;
}
}
-
- hlid = wl1271_tx_get_hlid(wl, skb);
+ hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
if (hlid == WL12XX_INVALID_LINK_ID) {
wl1271_error("invalid hlid. dropping skb 0x%p", skb);
return -EINVAL;
}
- ret = wl1271_tx_allocate(wl, skb, extra, buf_offset, hlid);
+ ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
if (ret < 0)
return ret;
- wl1271_tx_fill_hdr(wl, skb, extra, info, hlid);
+ wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
+ if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, skb);
- wl1271_tx_regulate_link(wl, hlid);
+ wl1271_tx_regulate_link(wl, wlvif, hlid);
}
/*
@@ -444,7 +456,7 @@ static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct sk_buff *skb,
memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
/* Revert side effects in the dummy packet skb, so it can be reused */
- if (wl12xx_is_dummy_packet(wl, skb))
+ if (is_dummy)
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
return total_len;
@@ -522,19 +534,18 @@ static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
return &queues[q];
}
-static struct sk_buff *wl1271_sta_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
+ struct wl1271_link *lnk)
{
- struct sk_buff *skb = NULL;
+ struct sk_buff *skb;
unsigned long flags;
struct sk_buff_head *queue;
- queue = wl1271_select_queue(wl, wl->tx_queue);
+ queue = wl1271_select_queue(wl, lnk->tx_queue);
if (!queue)
- goto out;
+ return NULL;
skb = skb_dequeue(queue);
-
-out:
if (skb) {
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -545,43 +556,33 @@ out:
return skb;
}
-static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
+static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
- unsigned long flags;
int i, h, start_hlid;
- struct sk_buff_head *queue;
/* start from the link after the last one */
- start_hlid = (wl->last_tx_hlid + 1) % AP_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < AP_MAX_LINKS; i++) {
- h = (start_hlid + i) % AP_MAX_LINKS;
+ for (i = 0; i < WL12XX_MAX_LINKS; i++) {
+ h = (start_hlid + i) % WL12XX_MAX_LINKS;
/* only consider connected stations */
- if (h >= WL1271_AP_STA_HLID_START &&
- !test_bit(h - WL1271_AP_STA_HLID_START, wl->ap_hlid_map))
+ if (!test_bit(h, wlvif->links_map))
continue;
- queue = wl1271_select_queue(wl, wl->links[h].tx_queue);
- if (!queue)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
+ if (!skb)
continue;
- skb = skb_dequeue(queue);
- if (skb)
- break;
+ wlvif->last_tx_hlid = h;
+ break;
}
- if (skb) {
- int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
- wl->last_tx_hlid = h;
- spin_lock_irqsave(&wl->wl_lock, flags);
- wl->tx_queue_count[q]--;
- spin_unlock_irqrestore(&wl->wl_lock, flags);
- } else {
- wl->last_tx_hlid = 0;
- }
+ if (!skb)
+ wlvif->last_tx_hlid = 0;
return skb;
}
@@ -589,12 +590,32 @@ static struct sk_buff *wl1271_ap_skb_dequeue(struct wl1271 *wl)
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
{
unsigned long flags;
+ struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
- if (wl->bss_type == BSS_TYPE_AP_BSS)
- skb = wl1271_ap_skb_dequeue(wl);
- else
- skb = wl1271_sta_skb_dequeue(wl);
+ if (wlvif) {
+ wl12xx_for_each_wlvif_continue(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ /* do another pass */
+ if (!skb) {
+ wl12xx_for_each_wlvif(wl, wlvif) {
+ skb = wl12xx_vif_skb_dequeue(wl, wlvif);
+ if (skb) {
+ wl->last_wlvif = wlvif;
+ break;
+ }
+ }
+ }
+
+ if (!skb)
+ skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
@@ -610,21 +631,21 @@ static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
return skb;
}
-static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
+static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
- } else if (wl->bss_type == BSS_TYPE_AP_BSS) {
- u8 hlid = wl1271_tx_get_hlid(wl, skb);
+ } else {
+ u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wl->last_tx_hlid = (hlid + AP_MAX_LINKS - 1) % AP_MAX_LINKS;
- } else {
- skb_queue_head(&wl->tx_queue[q], skb);
+ wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
+ WL12XX_MAX_LINKS;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -639,29 +660,71 @@ static bool wl1271_tx_is_data_present(struct sk_buff *skb)
return ieee80211_is_data_present(hdr->frame_control);
}
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
+{
+ struct wl12xx_vif *wlvif;
+ u32 timeout;
+ u8 hlid;
+
+ if (!wl->conf.rx_streaming.interval)
+ return;
+
+ if (!wl->conf.rx_streaming.always &&
+ !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
+ return;
+
+ timeout = wl->conf.rx_streaming.duration;
+ wl12xx_for_each_wlvif_sta(wl, wlvif) {
+ bool found = false;
+ for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ if (test_bit(hlid, wlvif->links_map)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ continue;
+
+ /* enable rx streaming */
+ if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
+ ieee80211_queue_work(wl->hw,
+ &wlvif->rx_streaming_enable_work);
+
+ mod_timer(&wlvif->rx_streaming_timer,
+ jiffies + msecs_to_jiffies(timeout));
+ }
+}
+
void wl1271_tx_work_locked(struct wl1271 *wl)
{
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
+ struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0;
bool sent_packets = false;
- bool had_data = false;
- bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
+ unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
int ret;
if (unlikely(wl->state == WL1271_STATE_OFF))
return;
while ((skb = wl1271_skb_dequeue(wl))) {
- if (wl1271_tx_is_data_present(skb))
- had_data = true;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ bool has_data = false;
- ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
+ wlvif = NULL;
+ if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
+ wlvif = wl12xx_vif_to_data(info->control.vif);
+
+ has_data = wlvif && wl1271_tx_is_data_present(skb);
+ ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
if (ret == -EAGAIN) {
/*
* Aggregation buffer is full.
* Flush buffer and try again.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true);
sent_packets = true;
@@ -672,16 +735,27 @@ void wl1271_tx_work_locked(struct wl1271 *wl)
* Firmware buffer is full.
* Queue back last skb, and stop aggregating.
*/
- wl1271_skb_queue_head(wl, skb);
+ wl1271_skb_queue_head(wl, wlvif, skb);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
} else if (ret < 0) {
- dev_kfree_skb(skb);
+ if (wl12xx_is_dummy_packet(wl, skb))
+ /*
+ * fw still expects dummy packet,
+ * so re-enqueue it
+ */
+ wl1271_skb_queue_head(wl, wlvif, skb);
+ else
+ ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
}
buf_offset += ret;
wl->tx_packets_count++;
+ if (has_data) {
+ desc = (struct wl1271_tx_hw_descr *) skb->data;
+ __set_bit(desc->hlid, active_hlids);
+ }
}
out_ack:
@@ -701,19 +775,7 @@ out_ack:
wl1271_handle_tx_low_watermark(wl);
}
- if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
- (wl->conf.rx_streaming.always ||
- test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
- u32 timeout = wl->conf.rx_streaming.duration;
-
- /* enable rx streaming */
- if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
- ieee80211_queue_work(wl->hw,
- &wl->rx_streaming_enable_work);
-
- mod_timer(&wl->rx_streaming_timer,
- jiffies + msecs_to_jiffies(timeout));
- }
+ wl12xx_rearm_rx_streaming(wl, active_hlids);
}
void wl1271_tx_work(struct work_struct *work)
@@ -737,6 +799,8 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
struct wl1271_tx_hw_res_descr *result)
{
struct ieee80211_tx_info *info;
+ struct ieee80211_vif *vif;
+ struct wl12xx_vif *wlvif;
struct sk_buff *skb;
int id = result->id;
int rate = -1;
@@ -756,11 +820,16 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
return;
}
+ /* info->control is valid as long as we don't update info->status */
+ vif = info->control.vif;
+ wlvif = wl12xx_vif_to_data(vif);
+
/* update the TX status info */
if (result->status == TX_SUCCESS) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
- rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
+ rate = wl1271_rate_to_idx(result->rate_class_index,
+ wlvif->band);
retries = result->ack_failures;
} else if (result->status == TX_RETRY_EXCEEDED) {
wl->stats.excessive_retries++;
@@ -783,14 +852,14 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
u8 fw_lsb = result->tx_security_sequence_number_lsb;
- u8 cur_lsb = wl->tx_security_last_seq_lsb;
+ u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
/*
* update security sequence number, taking care of potential
* wrap-around
*/
- wl->tx_security_seq += (fw_lsb - cur_lsb + 256) % 256;
- wl->tx_security_last_seq_lsb = fw_lsb;
+ wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
+ wlvif->tx_security_last_seq_lsb = fw_lsb;
}
/* remove private header from packet */
@@ -886,39 +955,30 @@ void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
}
/* caller must hold wl->mutex and TX must be stopped */
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
- struct sk_buff *skb;
- struct ieee80211_tx_info *info;
/* TX failure */
- if (wl->bss_type == BSS_TYPE_AP_BSS) {
- for (i = 0; i < AP_MAX_LINKS; i++) {
- wl1271_free_sta(wl, i);
- wl1271_tx_reset_link_queues(wl, i);
- wl->links[i].allocated_pkts = 0;
- wl->links[i].prev_freed_pkts = 0;
- }
-
- wl->last_tx_hlid = 0;
- } else {
- for (i = 0; i < NUM_TX_QUEUES; i++) {
- while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
- wl1271_debug(DEBUG_TX, "freeing skb 0x%p",
- skb);
-
- if (!wl12xx_is_dummy_packet(wl, skb)) {
- info = IEEE80211_SKB_CB(skb);
- info->status.rates[0].idx = -1;
- info->status.rates[0].count = 0;
- ieee80211_tx_status_ni(wl->hw, skb);
- }
- }
- }
+ for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ if (wlvif->bss_type == BSS_TYPE_AP_BSS)
+ wl1271_free_sta(wl, wlvif, i);
+ else
+ wlvif->sta.ba_rx_bitmap = 0;
- wl->ba_rx_bitmap = 0;
+ wl1271_tx_reset_link_queues(wl, i);
+ wl->links[i].allocated_pkts = 0;
+ wl->links[i].prev_freed_pkts = 0;
}
+ wlvif->last_tx_hlid = 0;
+
+}
+/* caller must hold wl->mutex and TX must be stopped */
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
+{
+ int i;
+ struct sk_buff *skb;
+ struct ieee80211_tx_info *info;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0;
diff --git a/drivers/net/wireless/wl12xx/tx.h b/drivers/net/wireless/wl12xx/tx.h
index dc4f09a..2dbb24e 100644
--- a/drivers/net/wireless/wl12xx/tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -206,18 +206,23 @@ static inline int wl1271_tx_total_queue_count(struct wl1271 *wl)
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_work_locked(struct wl1271 *wl);
void wl1271_tx_complete(struct wl1271 *wl);
-void wl1271_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
+void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues);
void wl1271_tx_flush(struct wl1271 *wl);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
enum ieee80211_band rate_band);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set);
-u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
+u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ struct sk_buff *skb);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid);
void wl1271_handle_tx_low_watermark(struct wl1271 *wl);
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb);
+void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids);
/* from main.c */
-void wl1271_free_sta(struct wl1271 *wl, u8 hlid);
+void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 1ec90fc..b2b09cd 100644
--- a/drivers/net/wireless/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -35,83 +35,6 @@
#include "conf.h"
#include "ini.h"
-#define DRIVER_NAME "wl1271"
-#define DRIVER_PREFIX DRIVER_NAME ": "
-
-/*
- * FW versions support BA 11n
- * versions marks x.x.x.50-60.x
- */
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_START 50
-#define WL12XX_BA_SUPPORT_FW_COST_VER2_END 60
-
-enum {
- DEBUG_NONE = 0,
- DEBUG_IRQ = BIT(0),
- DEBUG_SPI = BIT(1),
- DEBUG_BOOT = BIT(2),
- DEBUG_MAILBOX = BIT(3),
- DEBUG_TESTMODE = BIT(4),
- DEBUG_EVENT = BIT(5),
- DEBUG_TX = BIT(6),
- DEBUG_RX = BIT(7),
- DEBUG_SCAN = BIT(8),
- DEBUG_CRYPT = BIT(9),
- DEBUG_PSM = BIT(10),
- DEBUG_MAC80211 = BIT(11),
- DEBUG_CMD = BIT(12),
- DEBUG_ACX = BIT(13),
- DEBUG_SDIO = BIT(14),
- DEBUG_FILTERS = BIT(15),
- DEBUG_ADHOC = BIT(16),
- DEBUG_AP = BIT(17),
- DEBUG_MASTER = (DEBUG_ADHOC | DEBUG_AP),
- DEBUG_ALL = ~0,
-};
-
-extern u32 wl12xx_debug_level;
-
-#define DEBUG_DUMP_LIMIT 1024
-
-#define wl1271_error(fmt, arg...) \
- pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
-
-#define wl1271_warning(fmt, arg...) \
- pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
-
-#define wl1271_notice(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_info(fmt, arg...) \
- pr_info(DRIVER_PREFIX fmt "\n", ##arg)
-
-#define wl1271_debug(level, fmt, arg...) \
- do { \
- if (level & wl12xx_debug_level) \
- pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
- } while (0)
-
-/* TODO: use pr_debug_hex_dump when it will be available */
-#define wl1271_dump(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- 0); \
- } while (0)
-
-#define wl1271_dump_ascii(level, prefix, buf, len) \
- do { \
- if (level & wl12xx_debug_level) \
- print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
- DUMP_PREFIX_OFFSET, 16, 1, \
- buf, \
- min_t(size_t, len, DEBUG_DUMP_LIMIT), \
- true); \
- } while (0)
-
#define WL127X_FW_NAME "ti-connectivity/wl127x-fw-3.bin"
#define WL128X_FW_NAME "ti-connectivity/wl128x-fw-3.bin"
@@ -142,16 +65,12 @@ extern u32 wl12xx_debug_level;
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+#define WL12XX_MAX_RATE_POLICIES 16
+
/* Defined by FW as 0. Will not be freed or allocated. */
#define WL12XX_SYSTEM_HLID 0
/*
- * TODO: we currently don't support multirole. remove
- * this constant from the code when we do.
- */
-#define WL1271_AP_STA_HLID_START 3
-
-/*
* When in AP-mode, we allow (at least) this number of packets
* to be transmitted to FW for a STA in PS-mode. Only when packets are
* present in the FW buffers it will wake the sleeping STA. We want to put
@@ -236,13 +155,6 @@ struct wl1271_stats {
#define AP_MAX_STATIONS 8
-/* Broadcast and Global links + system link + links to stations */
-/*
- * TODO: when WL1271_AP_STA_HLID_START is no longer constant, change all
- * the places that use this.
- */
-#define AP_MAX_LINKS (AP_MAX_STATIONS + WL1271_AP_STA_HLID_START)
-
/* FW status registers */
struct wl12xx_fw_status {
__le32 intr;
@@ -299,17 +211,14 @@ struct wl1271_scan {
};
struct wl1271_if_operations {
- void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*read)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
+ void (*write)(struct device *child, int addr, void *buf, size_t len,
bool fixed);
- void (*reset)(struct wl1271 *wl);
- void (*init)(struct wl1271 *wl);
- int (*power)(struct wl1271 *wl, bool enable);
- struct device* (*dev)(struct wl1271 *wl);
- void (*enable_irq)(struct wl1271 *wl);
- void (*disable_irq)(struct wl1271 *wl);
- void (*set_block_size) (struct wl1271 *wl, unsigned int blksz);
+ void (*reset)(struct device *child);
+ void (*init)(struct device *child);
+ int (*power)(struct device *child, bool enable);
+ void (*set_block_size) (struct device *child, unsigned int blksz);
};
#define MAX_NUM_KEYS 14
@@ -326,29 +235,33 @@ struct wl1271_ap_key {
};
enum wl12xx_flags {
- WL1271_FLAG_STA_ASSOCIATED,
- WL1271_FLAG_IBSS_JOINED,
WL1271_FLAG_GPIO_POWER,
WL1271_FLAG_TX_QUEUE_STOPPED,
WL1271_FLAG_TX_PENDING,
WL1271_FLAG_IN_ELP,
WL1271_FLAG_ELP_REQUESTED,
- WL1271_FLAG_PSM,
- WL1271_FLAG_PSM_REQUESTED,
WL1271_FLAG_IRQ_RUNNING,
- WL1271_FLAG_IDLE,
- WL1271_FLAG_PSPOLL_FAILURE,
- WL1271_FLAG_STA_STATE_SENT,
WL1271_FLAG_FW_TX_BUSY,
- WL1271_FLAG_AP_STARTED,
- WL1271_FLAG_IF_INITIALIZED,
WL1271_FLAG_DUMMY_PACKET_PENDING,
WL1271_FLAG_SUSPENDED,
WL1271_FLAG_PENDING_WORK,
WL1271_FLAG_SOFT_GEMINI,
- WL1271_FLAG_RX_STREAMING_STARTED,
WL1271_FLAG_RECOVERY_IN_PROGRESS,
- WL1271_FLAG_CS_PROGRESS,
+};
+
+enum wl12xx_vif_flags {
+ WLVIF_FLAG_INITIALIZED,
+ WLVIF_FLAG_STA_ASSOCIATED,
+ WLVIF_FLAG_IBSS_JOINED,
+ WLVIF_FLAG_AP_STARTED,
+ WLVIF_FLAG_PSM,
+ WLVIF_FLAG_PSM_REQUESTED,
+ WLVIF_FLAG_STA_STATE_SENT,
+ WLVIF_FLAG_RX_STREAMING_STARTED,
+ WLVIF_FLAG_PSPOLL_FAILURE,
+ WLVIF_FLAG_CS_PROGRESS,
+ WLVIF_FLAG_AP_PROBE_RESP_SET,
+ WLVIF_FLAG_IN_USE,
};
struct wl1271_link {
@@ -366,10 +279,11 @@ struct wl1271_link {
};
struct wl1271 {
- struct platform_device *plat_dev;
struct ieee80211_hw *hw;
bool mac80211_registered;
+ struct device *dev;
+
void *if_priv;
struct wl1271_if_operations *if_ops;
@@ -399,25 +313,20 @@ struct wl1271 {
s8 hw_pg_ver;
- u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
- u8 bss_type;
- u8 set_bss_type;
- u8 p2p; /* we are using p2p role */
- u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
- u8 ssid_len;
int channel;
- u8 role_id;
- u8 dev_role_id;
u8 system_hlid;
- u8 sta_hlid;
- u8 dev_hlid;
- u8 ap_global_hlid;
- u8 ap_bcast_hlid;
unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
+ unsigned long rate_policies_map[
+ BITS_TO_LONGS(WL12XX_MAX_RATE_POLICIES)];
+
+ struct list_head wlvif_list;
+
+ u8 sta_count;
+ u8 ap_count;
struct wl1271_acx_mem_map *target_mem_map;
@@ -440,11 +349,7 @@ struct wl1271 {
/* Time-offset between host and chipset clocks */
s64 time_offset;
- /* Session counter for the chipset */
- int session_counter;
-
/* Frames scheduled for transmission, not handled yet */
- struct sk_buff_head tx_queue[NUM_TX_QUEUES];
int tx_queue_count[NUM_TX_QUEUES];
long stopped_queues_map;
@@ -462,17 +367,6 @@ struct wl1271 {
struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
int tx_frames_cnt;
- /*
- * Security sequence number
- * bits 0-15: lower 16 bits part of sequence number
- * bits 16-47: higher 32 bits part of sequence number
- * bits 48-63: not in use
- */
- u64 tx_security_seq;
-
- /* 8 bits of the last sequence number in use */
- u8 tx_security_last_seq_lsb;
-
/* FW Rx counter */
u32 rx_counter;
@@ -507,59 +401,21 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
+ struct ieee80211_vif *scan_vif;
struct wl1271_scan scan;
struct delayed_work scan_complete_work;
bool sched_scanning;
- /* probe-req template for the current AP */
- struct sk_buff *probereq;
-
- /* Our association ID */
- u16 aid;
-
- /*
- * currently configured rate set:
- * bits 0-15 - 802.11abg rates
- * bits 16-23 - 802.11n MCS index mask
- * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
- */
- u32 basic_rate_set;
- u32 basic_rate;
- u32 rate_set;
- u32 bitrate_masks[IEEE80211_NUM_BANDS];
-
/* The current band */
enum ieee80211_band band;
- /* Beaconing interval (needed for ad-hoc) */
- u32 beacon_int;
-
- /* Default key (for WEP) */
- u32 default_key;
-
- /* Rx Streaming */
- struct work_struct rx_streaming_enable_work;
- struct work_struct rx_streaming_disable_work;
- struct timer_list rx_streaming_timer;
-
struct completion *elp_compl;
- struct completion *ps_compl;
struct delayed_work elp_work;
- struct delayed_work pspoll_work;
-
- /* counter for ps-poll delivery failures */
- int ps_poll_failures;
-
- /* retry counter for PSM entries */
- u8 psm_entry_retry;
/* in dBm */
int power_level;
- int rssi_thold;
- int last_rssi_event;
-
struct wl1271_stats stats;
__le32 buffer_32;
@@ -583,20 +439,9 @@ struct wl1271 {
/* Most recently reported noise in dBm */
s8 noise;
- /* map for HLIDs of associated stations - when operating in AP mode */
- unsigned long ap_hlid_map[BITS_TO_LONGS(AP_MAX_STATIONS)];
-
- /* recoreded keys for AP-mode - set here before AP startup */
- struct wl1271_ap_key *recorded_ap_keys[MAX_NUM_KEYS];
-
/* bands supported by this instance of wl12xx */
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
- /* RX BA constraint value */
- bool ba_support;
- u8 ba_rx_bitmap;
- bool ba_allowed;
-
int tcxo_clock;
/*
@@ -610,10 +455,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[AP_MAX_LINKS];
-
- /* the hlid of the link where the last transmitted skb came from */
- int last_tx_hlid;
+ struct wl1271_link links[WL12XX_MAX_LINKS];
/* AP-mode - a bitmap of links currently in PS mode according to FW */
u32 ap_fw_ps_map;
@@ -632,21 +474,173 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+
+ /* last wlvif we transmitted from */
+ struct wl12xx_vif *last_wlvif;
};
struct wl1271_station {
u8 hlid;
};
+struct wl12xx_vif {
+ struct wl1271 *wl;
+ struct list_head list;
+ unsigned long flags;
+ u8 bss_type;
+ u8 p2p; /* we are using p2p role */
+ u8 role_id;
+
+ /* sta/ibss specific */
+ u8 dev_role_id;
+ u8 dev_hlid;
+
+ union {
+ struct {
+ u8 hlid;
+ u8 ba_rx_bitmap;
+
+ u8 basic_rate_idx;
+ u8 ap_rate_idx;
+ u8 p2p_rate_idx;
+ } sta;
+ struct {
+ u8 global_hlid;
+ u8 bcast_hlid;
+
+ /* HLIDs bitmap of associated stations */
+ unsigned long sta_hlid_map[BITS_TO_LONGS(
+ WL12XX_MAX_LINKS)];
+
+ /* recoreded keys - set here before AP startup */
+ struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
+
+ u8 mgmt_rate_idx;
+ u8 bcast_rate_idx;
+ u8 ucast_rate_idx[CONF_TX_MAX_AC_COUNT];
+ } ap;
+ };
+
+ /* the hlid of the last transmitted skb */
+ int last_tx_hlid;
+
+ unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+
+ u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
+ u8 ssid_len;
+
+ /* The current band */
+ enum ieee80211_band band;
+ int channel;
+
+ u32 bitrate_masks[IEEE80211_NUM_BANDS];
+ u32 basic_rate_set;
+
+ /*
+ * currently configured rate set:
+ * bits 0-15 - 802.11abg rates
+ * bits 16-23 - 802.11n MCS index mask
+ * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
+ */
+ u32 basic_rate;
+ u32 rate_set;
+
+ /* probe-req template for the current AP */
+ struct sk_buff *probereq;
+
+ /* Beaconing interval (needed for ad-hoc) */
+ u32 beacon_int;
+
+ /* Default key (for WEP) */
+ u32 default_key;
+
+ /* Our association ID */
+ u16 aid;
+
+ /* Session counter for the chipset */
+ int session_counter;
+
+ struct completion *ps_compl;
+ struct delayed_work pspoll_work;
+
+ /* counter for ps-poll delivery failures */
+ int ps_poll_failures;
+
+ /* retry counter for PSM entries */
+ u8 psm_entry_retry;
+
+ /* in dBm */
+ int power_level;
+
+ int rssi_thold;
+ int last_rssi_event;
+
+ /* RX BA constraint value */
+ bool ba_support;
+ bool ba_allowed;
+
+ /* Rx Streaming */
+ struct work_struct rx_streaming_enable_work;
+ struct work_struct rx_streaming_disable_work;
+ struct timer_list rx_streaming_timer;
+
+ /*
+ * This struct must be last!
+ * data that has to be saved acrossed reconfigs (e.g. recovery)
+ * should be declared in this struct.
+ */
+ struct {
+ u8 persistent[0];
+ /*
+ * Security sequence number
+ * bits 0-15: lower 16 bits part of sequence number
+ * bits 16-47: higher 32 bits part of sequence number
+ * bits 48-63: not in use
+ */
+ u64 tx_security_seq;
+
+ /* 8 bits of the last sequence number in use */
+ u8 tx_security_last_seq_lsb;
+ };
+};
+
+static inline struct wl12xx_vif *wl12xx_vif_to_data(struct ieee80211_vif *vif)
+{
+ return (struct wl12xx_vif *)vif->drv_priv;
+}
+
+static inline
+struct ieee80211_vif *wl12xx_wlvif_to_vif(struct wl12xx_vif *wlvif)
+{
+ return container_of((void *)wlvif, struct ieee80211_vif, drv_priv);
+}
+
+#define wl12xx_for_each_wlvif(wl, wlvif) \
+ list_for_each_entry(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_continue(wl, wlvif) \
+ list_for_each_entry_continue(wlvif, &wl->wlvif_list, list)
+
+#define wl12xx_for_each_wlvif_bss_type(wl, wlvif, _bss_type) \
+ wl12xx_for_each_wlvif(wl, wlvif) \
+ if (wlvif->bss_type == _bss_type)
+
+#define wl12xx_for_each_wlvif_sta(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_STA_BSS)
+
+#define wl12xx_for_each_wlvif_ap(wl, wlvif) \
+ wl12xx_for_each_wlvif_bss_type(wl, wlvif, BSS_TYPE_AP_BSS)
+
int wl1271_plt_start(struct wl1271 *wl);
int wl1271_plt_stop(struct wl1271 *wl);
-int wl1271_recalc_rx_streaming(struct wl1271 *wl);
+int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif);
void wl12xx_queue_recovery_work(struct wl1271 *wl);
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
#define JOIN_TIMEOUT 5000 /* 5000 milliseconds to join */
-#define SESSION_COUNTER_MAX 7 /* maximum value for the session counter */
+#define SESSION_COUNTER_MAX 6 /* maximum value for the session counter */
+#define SESSION_COUNTER_INVALID 7 /* used with dummy_packet */
#define WL1271_DEFAULT_POWER_LEVEL 0
@@ -669,8 +663,8 @@ size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen);
/* Each RX/TX transaction requires an end-of-transaction transfer */
#define WL12XX_QUIRK_END_OF_TRANSACTION BIT(0)
-/* WL128X requires aggregated packets to be aligned to the SDIO block size */
-#define WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT BIT(2)
+/* wl127x and SPI don't support SDIO block size alignment */
+#define WL12XX_QUIRK_NO_BLOCKSIZE_ALIGNMENT BIT(2)
/* Older firmwares did not implement the FW logger over bus feature */
#define WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED BIT(4)
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index f7971d3..8f0ffaf 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -116,11 +116,6 @@ struct wl12xx_ps_poll_template {
u8 ta[ETH_ALEN];
} __packed;
-struct wl12xx_qos_null_data_template {
- struct ieee80211_header header;
- __le16 qos_ctl;
-} __packed;
-
struct wl12xx_arp_rsp_template {
struct ieee80211_hdr_3addr hdr;
diff --git a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
index 973b110..998e958 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
+++ b/drivers/net/wireless/wl12xx/wl12xx_platform_data.c
@@ -1,8 +1,29 @@
+/*
+ * This file is part of wl12xx
+ *
+ * Copyright (C) 2010-2011 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
#include <linux/module.h>
#include <linux/err.h>
#include <linux/wl12xx.h>
-static const struct wl12xx_platform_data *platform_data;
+static struct wl12xx_platform_data *platform_data;
int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
{
@@ -18,7 +39,7 @@ int __init wl12xx_set_platform_data(const struct wl12xx_platform_data *data)
return 0;
}
-const struct wl12xx_platform_data *wl12xx_get_platform_data(void)
+struct wl12xx_platform_data *wl12xx_get_platform_data(void)
{
if (!platform_data)
return ERR_PTR(-ENODEV);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 8efa2f2..a66b93b 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -1907,15 +1907,4 @@ static struct usb_driver zd1201_usb = {
.resume = zd1201_resume,
};
-static int __init zd1201_init(void)
-{
- return usb_register(&zd1201_usb);
-}
-
-static void __exit zd1201_cleanup(void)
-{
- usb_deregister(&zd1201_usb);
-}
-
-module_init(zd1201_init);
-module_exit(zd1201_cleanup);
+module_usb_driver(zd1201_usb);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 0a70149..98a574a 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -866,6 +866,14 @@ static int fill_ctrlset(struct zd_mac *mac,
ZD_ASSERT(frag_len <= 0xffff);
+ /*
+ * Firmware computes the duration itself (for all frames except PSPoll)
+ * and needs the field set to 0 at input, otherwise firmware messes up
+ * duration_id and sets bits 14 and 15 on.
+ */
+ if (!ieee80211_is_pspoll(hdr->frame_control))
+ hdr->duration_id = 0;
+
txrate = ieee80211_get_tx_rate(mac->hw, info);
cs->modulation = txrate->hw_value;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1825629..b7d41f8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -165,7 +165,8 @@ static int xenvif_change_mtu(struct net_device *dev, int mtu)
return 0;
}
-static u32 xenvif_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xenvif_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct xenvif *vif = netdev_priv(dev);
@@ -222,7 +223,7 @@ static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
}
}
-static struct ethtool_ops xenvif_ethtool_ops = {
+static const struct ethtool_ops xenvif_ethtool_ops = {
.get_link = ethtool_op_get_link,
.get_sset_count = xenvif_get_sset_count,
@@ -230,7 +231,7 @@ static struct ethtool_ops xenvif_ethtool_ops = {
.get_strings = xenvif_get_strings,
};
-static struct net_device_ops xenvif_netdev_ops = {
+static const struct net_device_ops xenvif_netdev_ops = {
.ndo_start_xmit = xenvif_start_xmit,
.ndo_get_stats = xenvif_get_stats,
.ndo_open = xenvif_open,
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 15e332d..59effac 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -395,7 +395,7 @@ static void netbk_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct netbk_rx_meta *meta;
/*
- * These variables a used iff get_page_ext returns true,
+ * These variables are used iff get_page_ext returns true,
* in which case they are guaranteed to be initialized.
*/
unsigned int uninitialized_var(group), uninitialized_var(idx);
@@ -940,8 +940,6 @@ static struct gnttab_copy *xen_netbk_get_requests(struct xen_netbk *netbk,
if (!page)
return NULL;
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txp->gref;
gop->source.domid = vif->domid;
gop->source.offset = txp->offset;
@@ -1336,8 +1334,6 @@ static unsigned xen_netbk_tx_build_gops(struct xen_netbk *netbk)
continue;
}
- netbk->mmap_pages[pending_idx] = page;
-
gop->source.u.ref = txreq.gref;
gop->source.domid = vif->domid;
gop->source.offset = txreq.offset;
@@ -1638,7 +1634,7 @@ static int __init netback_init(void)
int rc = 0;
int group;
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
xen_netbk_group_nr = num_online_cpus();
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 1ce729d..410018c 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -474,17 +474,14 @@ static const struct xenbus_device_id netback_ids[] = {
};
-static struct xenbus_driver netback = {
- .name = "vif",
- .owner = THIS_MODULE,
- .ids = netback_ids,
+static DEFINE_XENBUS_DRIVER(netback, ,
.probe = netback_probe,
.remove = netback_remove,
.uevent = netback_uevent,
.otherend_changed = frontend_changed,
-};
+);
int xenvif_xenbus_init(void)
{
- return xenbus_register_backend(&netback);
+ return xenbus_register_backend(&netback_driver);
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 226faab..698b905 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -68,7 +68,7 @@ struct netfront_cb {
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
struct netfront_stats {
u64 rx_packets;
@@ -201,7 +201,7 @@ static void xennet_sysfs_delif(struct net_device *netdev);
#define xennet_sysfs_delif(dev) do { } while (0)
#endif
-static int xennet_can_sg(struct net_device *dev)
+static bool xennet_can_sg(struct net_device *dev)
{
return dev->features & NETIF_F_SG;
}
@@ -1190,7 +1190,8 @@ static void xennet_uninit(struct net_device *dev)
gnttab_free_grant_references(np->gref_rx_head);
}
-static u32 xennet_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t xennet_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct netfront_info *np = netdev_priv(dev);
int val;
@@ -1216,7 +1217,8 @@ static u32 xennet_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int xennet_set_features(struct net_device *dev, u32 features)
+static int xennet_set_features(struct net_device *dev,
+ netdev_features_t features)
{
if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
netdev_info(dev, "Reducing MTU because no SG offload");
@@ -1707,7 +1709,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
- case XenbusStateConnected:
case XenbusStateUnknown:
case XenbusStateClosed:
break;
@@ -1718,6 +1719,9 @@ static void netback_changed(struct xenbus_device *dev,
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
+ break;
+
+ case XenbusStateConnected:
netif_notify_peers(netdev);
break;
@@ -1910,7 +1914,7 @@ static void xennet_sysfs_delif(struct net_device *netdev)
#endif /* CONFIG_SYSFS */
-static struct xenbus_device_id netfront_ids[] = {
+static const struct xenbus_device_id netfront_ids[] = {
{ "vif" },
{ "" }
};
@@ -1937,15 +1941,12 @@ static int __devexit xennet_remove(struct xenbus_device *dev)
return 0;
}
-static struct xenbus_driver netfront_driver = {
- .name = "vif",
- .owner = THIS_MODULE,
- .ids = netfront_ids,
+static DEFINE_XENBUS_DRIVER(netfront, ,
.probe = netfront_probe,
.remove = __devexit_p(xennet_remove),
.resume = netfront_resume,
.otherend_changed = netback_changed,
-};
+);
static int __init netif_init(void)
{
diff --git a/drivers/nfc/pn533.c b/drivers/nfc/pn533.c
index 7bcb1fe..1a1500b 100644
--- a/drivers/nfc/pn533.c
+++ b/drivers/nfc/pn533.c
@@ -72,6 +72,7 @@ MODULE_DEVICE_TABLE(usb, pn533_table);
#define PN533_CMD_IN_LIST_PASSIVE_TARGET 0x4A
#define PN533_CMD_IN_ATR 0x50
#define PN533_CMD_IN_RELEASE 0x52
+#define PN533_CMD_IN_JUMP_FOR_DEP 0x56
#define PN533_CMD_RESPONSE(cmd) (cmd + 1)
@@ -231,6 +232,26 @@ struct pn533_cmd_activate_response {
u8 gt[];
} __packed;
+/* PN533_CMD_IN_JUMP_FOR_DEP */
+struct pn533_cmd_jump_dep {
+ u8 active;
+ u8 baud;
+ u8 next;
+ u8 gt[];
+} __packed;
+
+struct pn533_cmd_jump_dep_response {
+ u8 status;
+ u8 tg;
+ u8 nfcid3t[10];
+ u8 didt;
+ u8 bst;
+ u8 brt;
+ u8 to;
+ u8 ppt;
+ /* optional */
+ u8 gt[];
+} __packed;
struct pn533 {
struct usb_device *udev;
@@ -1121,6 +1142,7 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
{
struct pn533_cmd_activate_param param;
struct pn533_cmd_activate_response *resp;
+ u16 gt_len;
int rc;
nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
@@ -1146,7 +1168,11 @@ static int pn533_activate_target_nfcdep(struct pn533 *dev)
if (rc != PN533_CMD_RET_SUCCESS)
return -EIO;
- return 0;
+ /* ATR_RES general bytes are located at offset 16 */
+ gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 16;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev, resp->gt, gt_len);
+
+ return rc;
}
static int pn533_activate_target(struct nfc_dev *nfc_dev, u32 target_idx,
@@ -1239,6 +1265,142 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev, u32 target_idx)
return;
}
+
+static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
+ u8 *params, int params_len)
+{
+ struct pn533_cmd_jump_dep *cmd;
+ struct pn533_cmd_jump_dep_response *resp;
+ struct nfc_target nfc_target;
+ u8 target_gt_len;
+ int rc;
+
+ if (params_len == -ENOENT) {
+ nfc_dev_dbg(&dev->interface->dev, "");
+ return 0;
+ }
+
+ if (params_len < 0) {
+ nfc_dev_err(&dev->interface->dev,
+ "Error %d when bringing DEP link up",
+ params_len);
+ return 0;
+ }
+
+ if (dev->tgt_available_prots &&
+ !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) {
+ nfc_dev_err(&dev->interface->dev,
+ "The target does not support DEP");
+ return -EINVAL;
+ }
+
+ resp = (struct pn533_cmd_jump_dep_response *) params;
+ cmd = (struct pn533_cmd_jump_dep *) arg;
+ rc = resp->status & PN533_CMD_RET_MASK;
+ if (rc != PN533_CMD_RET_SUCCESS) {
+ nfc_dev_err(&dev->interface->dev,
+ "Bringing DEP link up failed %d", rc);
+ return 0;
+ }
+
+ if (!dev->tgt_available_prots) {
+ nfc_dev_dbg(&dev->interface->dev, "Creating new target");
+
+ nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK;
+ rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1);
+ if (rc)
+ return 0;
+
+ dev->tgt_available_prots = 0;
+ }
+
+ dev->tgt_active_prot = NFC_PROTO_NFC_DEP;
+
+ /* ATR_RES general bytes are located at offset 17 */
+ target_gt_len = PN533_FRAME_CMD_PARAMS_LEN(dev->in_frame) - 17;
+ rc = nfc_set_remote_general_bytes(dev->nfc_dev,
+ resp->gt, target_gt_len);
+ if (rc == 0)
+ rc = nfc_dep_link_is_up(dev->nfc_dev,
+ dev->nfc_dev->targets[0].idx,
+ !cmd->active, NFC_RF_INITIATOR);
+
+ return 0;
+}
+
+static int pn533_dep_link_up(struct nfc_dev *nfc_dev, int target_idx,
+ u8 comm_mode, u8 rf_mode)
+{
+ struct pn533 *dev = nfc_get_drvdata(nfc_dev);
+ struct pn533_cmd_jump_dep *cmd;
+ u8 cmd_len, local_gt_len, *local_gt;
+ int rc;
+
+ nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
+
+ if (rf_mode == NFC_RF_TARGET) {
+ nfc_dev_err(&dev->interface->dev, "Target mode not supported");
+ return -EOPNOTSUPP;
+ }
+
+
+ if (dev->poll_mod_count) {
+ nfc_dev_err(&dev->interface->dev,
+ "Cannot bring the DEP link up while polling");
+ return -EBUSY;
+ }
+
+ if (dev->tgt_active_prot) {
+ nfc_dev_err(&dev->interface->dev,
+ "There is already an active target");
+ return -EBUSY;
+ }
+
+ local_gt = nfc_get_local_general_bytes(dev->nfc_dev, &local_gt_len);
+ if (local_gt_len > NFC_MAX_GT_LEN)
+ return -EINVAL;
+
+ cmd_len = sizeof(struct pn533_cmd_jump_dep) + local_gt_len;
+ cmd = kzalloc(cmd_len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ pn533_tx_frame_init(dev->out_frame, PN533_CMD_IN_JUMP_FOR_DEP);
+
+ cmd->active = !comm_mode;
+ cmd->baud = 0;
+ if (local_gt != NULL) {
+ cmd->next = 4; /* We have some Gi */
+ memcpy(cmd->gt, local_gt, local_gt_len);
+ } else {
+ cmd->next = 0;
+ }
+
+ memcpy(PN533_FRAME_CMD_PARAMS_PTR(dev->out_frame), cmd, cmd_len);
+ dev->out_frame->datalen += cmd_len;
+
+ pn533_tx_frame_finish(dev->out_frame);
+
+ rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
+ dev->in_maxlen, pn533_in_dep_link_up_complete,
+ cmd, GFP_KERNEL);
+ if (rc)
+ goto out;
+
+
+out:
+ kfree(cmd);
+
+ return rc;
+}
+
+static int pn533_dep_link_down(struct nfc_dev *nfc_dev)
+{
+ pn533_deactivate_target(nfc_dev, 0);
+
+ return 0;
+}
+
#define PN533_CMD_DATAEXCH_HEAD_LEN (sizeof(struct pn533_frame) + 3)
#define PN533_CMD_DATAEXCH_DATA_MAXLEN 262
@@ -1339,7 +1501,7 @@ error:
return 0;
}
-int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
+static int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
struct sk_buff *skb,
data_exchange_cb_t cb,
void *cb_context)
@@ -1368,7 +1530,7 @@ int pn533_data_exchange(struct nfc_dev *nfc_dev, u32 target_idx,
PN533_CMD_DATAEXCH_DATA_MAXLEN +
PN533_FRAME_TAIL_SIZE;
- skb_resp = nfc_alloc_skb(skb_resp_len, GFP_KERNEL);
+ skb_resp = nfc_alloc_recv_skb(skb_resp_len, GFP_KERNEL);
if (!skb_resp) {
rc = -ENOMEM;
goto error;
@@ -1434,6 +1596,8 @@ static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata,
struct nfc_ops pn533_nfc_ops = {
.dev_up = NULL,
.dev_down = NULL,
+ .dep_link_up = pn533_dep_link_up,
+ .dep_link_down = pn533_dep_link_down,
.start_poll = pn533_start_poll,
.stop_poll = pn533_stop_poll,
.activate_target = pn533_activate_target,
@@ -1597,24 +1761,7 @@ static struct usb_driver pn533_driver = {
.id_table = pn533_table,
};
-static int __init pn533_init(void)
-{
- int rc;
-
- rc = usb_register(&pn533_driver);
- if (rc)
- err("usb_register failed. Error number %d", rc);
-
- return rc;
-}
-
-static void __exit pn533_exit(void)
-{
- usb_deregister(&pn533_driver);
-}
-
-module_init(pn533_init);
-module_exit(pn533_exit);
+module_usb_driver(pn533_driver);
MODULE_AUTHOR("Lauro Ramos Venancio <lauro.venancio@openbossa.org>,"
" Aloisio Almeida Jr <aloisio.almeida@openbossa.org>");
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index cac63c9..268163d 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -15,6 +15,15 @@ config PROC_DEVICETREE
an image of the device tree that the kernel copies from Open
Firmware or other boot firmware. If unsure, say Y here.
+config OF_SELFTEST
+ bool "Device Tree Runtime self tests"
+ help
+ This option builds in test cases for the device tree infrastructure
+ that are executed one at boot time, and the results dumped to the
+ console.
+
+ If unsure, say N here, but this option is safe to enable.
+
config OF_FLATTREE
bool
select DTC
diff --git a/drivers/of/Makefile b/drivers/of/Makefile
index dccb117..a73f5a5 100644
--- a/drivers/of/Makefile
+++ b/drivers/of/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_OF_GPIO) += gpio.o
obj-$(CONFIG_OF_I2C) += of_i2c.o
obj-$(CONFIG_OF_NET) += of_net.o
obj-$(CONFIG_OF_SPI) += of_spi.o
+obj-$(CONFIG_OF_SELFTEST) += selftest.o
obj-$(CONFIG_OF_MDIO) += of_mdio.o
obj-$(CONFIG_OF_PCI) += of_pci.o
obj-$(CONFIG_OF_PCI_IRQ) += of_pci_irq.o
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 72c33fb..66d96f1 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -14,7 +14,7 @@
static struct of_bus *of_match_bus(struct device_node *np);
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
- struct resource *r);
+ const char *name, struct resource *r);
/* Debug utility */
#ifdef DEBUG
@@ -215,7 +215,7 @@ int of_pci_address_to_resource(struct device_node *dev, int bar,
addrp = of_get_pci_address(dev, bar, &size, &flags);
if (addrp == NULL)
return -EINVAL;
- return __of_address_to_resource(dev, addrp, size, flags, r);
+ return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
}
EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
#endif /* CONFIG_PCI */
@@ -529,7 +529,7 @@ EXPORT_SYMBOL(of_get_address);
static int __of_address_to_resource(struct device_node *dev,
const __be32 *addrp, u64 size, unsigned int flags,
- struct resource *r)
+ const char *name, struct resource *r)
{
u64 taddr;
@@ -551,7 +551,8 @@ static int __of_address_to_resource(struct device_node *dev,
r->end = taddr + size - 1;
}
r->flags = flags;
- r->name = dev->full_name;
+ r->name = name ? name : dev->full_name;
+
return 0;
}
@@ -569,11 +570,16 @@ int of_address_to_resource(struct device_node *dev, int index,
const __be32 *addrp;
u64 size;
unsigned int flags;
+ const char *name = NULL;
addrp = of_get_address(dev, index, &size, &flags);
if (addrp == NULL)
return -EINVAL;
- return __of_address_to_resource(dev, addrp, size, flags, r);
+
+ /* Get optional "reg-names" property to add a name to a resource */
+ of_property_read_string_index(dev, "reg-names", index, &name);
+
+ return __of_address_to_resource(dev, addrp, size, flags, name, r);
}
EXPORT_SYMBOL_GPL(of_address_to_resource);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 9b6588e..133908a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -752,7 +752,7 @@ int of_property_read_string_index(struct device_node *np, const char *propname,
for (i = 0; total < prop->length; total += l, p += l) {
l = strlen(p) + 1;
- if ((*p != 0) && (i++ == index)) {
+ if (i++ == index) {
*output = p;
return 0;
}
@@ -790,11 +790,9 @@ int of_property_count_strings(struct device_node *np, const char *propname)
p = prop->value;
- for (i = 0; total < prop->length; total += l, p += l) {
+ for (i = 0; total < prop->length; total += l, p += l, i++)
l = strlen(p) + 1;
- if (*p != 0)
- i++;
- }
+
return i;
}
EXPORT_SYMBOL_GPL(of_property_count_strings);
@@ -824,17 +822,19 @@ of_parse_phandle(struct device_node *np, const char *phandle_name, int index)
EXPORT_SYMBOL(of_parse_phandle);
/**
- * of_parse_phandles_with_args - Find a node pointed by phandle in a list
+ * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
* @np: pointer to a device tree node containing a list
* @list_name: property name that contains a list
* @cells_name: property name that specifies phandles' arguments count
* @index: index of a phandle to parse out
- * @out_node: optional pointer to device_node struct pointer (will be filled)
- * @out_args: optional pointer to arguments pointer (will be filled)
+ * @out_args: optional pointer to output arguments structure (will be filled)
*
* This function is useful to parse lists of phandles and their arguments.
- * Returns 0 on success and fills out_node and out_args, on error returns
- * appropriate errno value.
+ * Returns 0 on success and fills out_args, on error returns appropriate
+ * errno value.
+ *
+ * Caller is responsible to call of_node_put() on the returned out_args->node
+ * pointer.
*
* Example:
*
@@ -851,94 +851,96 @@ EXPORT_SYMBOL(of_parse_phandle);
* }
*
* To get a device_node of the `node2' node you may call this:
- * of_parse_phandles_with_args(node3, "list", "#list-cells", 2, &node2, &args);
+ * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
*/
-int of_parse_phandles_with_args(struct device_node *np, const char *list_name,
+int of_parse_phandle_with_args(struct device_node *np, const char *list_name,
const char *cells_name, int index,
- struct device_node **out_node,
- const void **out_args)
+ struct of_phandle_args *out_args)
{
- int ret = -EINVAL;
- const __be32 *list;
- const __be32 *list_end;
- int size;
- int cur_index = 0;
+ const __be32 *list, *list_end;
+ int size, cur_index = 0;
+ uint32_t count = 0;
struct device_node *node = NULL;
- const void *args = NULL;
+ phandle phandle;
+ /* Retrieve the phandle list property */
list = of_get_property(np, list_name, &size);
- if (!list) {
- ret = -ENOENT;
- goto err0;
- }
+ if (!list)
+ return -EINVAL;
list_end = list + size / sizeof(*list);
+ /* Loop over the phandles until all the requested entry is found */
while (list < list_end) {
- const __be32 *cells;
- phandle phandle;
+ count = 0;
+ /*
+ * If phandle is 0, then it is an empty entry with no
+ * arguments. Skip forward to the next entry.
+ */
phandle = be32_to_cpup(list++);
- args = list;
-
- /* one cell hole in the list = <>; */
- if (!phandle)
- goto next;
-
- node = of_find_node_by_phandle(phandle);
- if (!node) {
- pr_debug("%s: could not find phandle\n",
- np->full_name);
- goto err0;
- }
+ if (phandle) {
+ /*
+ * Find the provider node and parse the #*-cells
+ * property to determine the argument length
+ */
+ node = of_find_node_by_phandle(phandle);
+ if (!node) {
+ pr_err("%s: could not find phandle\n",
+ np->full_name);
+ break;
+ }
+ if (of_property_read_u32(node, cells_name, &count)) {
+ pr_err("%s: could not get %s for %s\n",
+ np->full_name, cells_name,
+ node->full_name);
+ break;
+ }
- cells = of_get_property(node, cells_name, &size);
- if (!cells || size != sizeof(*cells)) {
- pr_debug("%s: could not get %s for %s\n",
- np->full_name, cells_name, node->full_name);
- goto err1;
+ /*
+ * Make sure that the arguments actually fit in the
+ * remaining property data length
+ */
+ if (list + count > list_end) {
+ pr_err("%s: arguments longer than property\n",
+ np->full_name);
+ break;
+ }
}
- list += be32_to_cpup(cells);
- if (list > list_end) {
- pr_debug("%s: insufficient arguments length\n",
- np->full_name);
- goto err1;
+ /*
+ * All of the error cases above bail out of the loop, so at
+ * this point, the parsing is successful. If the requested
+ * index matches, then fill the out_args structure and return,
+ * or return -ENOENT for an empty entry.
+ */
+ if (cur_index == index) {
+ if (!phandle)
+ return -ENOENT;
+
+ if (out_args) {
+ int i;
+ if (WARN_ON(count > MAX_PHANDLE_ARGS))
+ count = MAX_PHANDLE_ARGS;
+ out_args->np = node;
+ out_args->args_count = count;
+ for (i = 0; i < count; i++)
+ out_args->args[i] = be32_to_cpup(list++);
+ }
+ return 0;
}
-next:
- if (cur_index == index)
- break;
of_node_put(node);
node = NULL;
- args = NULL;
+ list += count;
cur_index++;
}
- if (!node) {
- /*
- * args w/o node indicates that the loop above has stopped at
- * the 'hole' cell. Report this differently.
- */
- if (args)
- ret = -EEXIST;
- else
- ret = -ENOENT;
- goto err0;
- }
-
- if (out_node)
- *out_node = node;
- if (out_args)
- *out_args = args;
-
- return 0;
-err1:
- of_node_put(node);
-err0:
- pr_debug("%s failed with status %d\n", __func__, ret);
- return ret;
+ /* Loop exited without finding a valid entry; return an error */
+ if (node)
+ of_node_put(node);
+ return -EINVAL;
}
-EXPORT_SYMBOL(of_parse_phandles_with_args);
+EXPORT_SYMBOL(of_parse_phandle_with_args);
/**
* prom_add_property - Add a property to a node
@@ -1159,7 +1161,7 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
if (!of_aliases)
return;
- for_each_property(pp, of_aliases->properties) {
+ for_each_property_of_node(of_aliases, pp) {
const char *start = pp->name;
const char *end = start + strlen(start);
struct device_node *np;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index fd85fa2..91a375f 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -18,6 +18,7 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <asm/setup.h> /* for COMMAND_LINE_SIZE */
#ifdef CONFIG_PPC
#include <asm/machdep.h>
#endif /* CONFIG_PPC */
@@ -107,7 +108,7 @@ int of_fdt_is_compatible(struct boot_param_header *blob,
* of_fdt_match - Return true if node matches a list of compatible values
*/
int of_fdt_match(struct boot_param_header *blob, unsigned long node,
- const char **compat)
+ const char *const *compat)
{
unsigned int tmp, score = 0;
@@ -541,7 +542,7 @@ int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
/**
* of_flat_dt_match - Return true if node matches a list of compatible values
*/
-int __init of_flat_dt_match(unsigned long node, const char **compat)
+int __init of_flat_dt_match(unsigned long node, const char *const *compat)
{
return of_fdt_match(initial_boot_params, node, compat);
}
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index ef0105f..7e62d15 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -35,32 +35,27 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
int index, enum of_gpio_flags *flags)
{
int ret;
- struct device_node *gpio_np;
struct gpio_chip *gc;
- int size;
- const void *gpio_spec;
- const __be32 *gpio_cells;
+ struct of_phandle_args gpiospec;
- ret = of_parse_phandles_with_args(np, propname, "#gpio-cells", index,
- &gpio_np, &gpio_spec);
+ ret = of_parse_phandle_with_args(np, propname, "#gpio-cells", index,
+ &gpiospec);
if (ret) {
pr_debug("%s: can't parse gpios property\n", __func__);
goto err0;
}
- gc = of_node_to_gpiochip(gpio_np);
+ gc = of_node_to_gpiochip(gpiospec.np);
if (!gc) {
pr_debug("%s: gpio controller %s isn't registered\n",
- np->full_name, gpio_np->full_name);
+ np->full_name, gpiospec.np->full_name);
ret = -ENODEV;
goto err1;
}
- gpio_cells = of_get_property(gpio_np, "#gpio-cells", &size);
- if (!gpio_cells || size != sizeof(*gpio_cells) ||
- be32_to_cpup(gpio_cells) != gc->of_gpio_n_cells) {
+ if (gpiospec.args_count != gc->of_gpio_n_cells) {
pr_debug("%s: wrong #gpio-cells for %s\n",
- np->full_name, gpio_np->full_name);
+ np->full_name, gpiospec.np->full_name);
ret = -EINVAL;
goto err1;
}
@@ -69,13 +64,13 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname,
if (flags)
*flags = 0;
- ret = gc->of_xlate(gc, np, gpio_spec, flags);
+ ret = gc->of_xlate(gc, &gpiospec, flags);
if (ret < 0)
goto err1;
ret += gc->base;
err1:
- of_node_put(gpio_np);
+ of_node_put(gpiospec.np);
err0:
pr_debug("%s exited with status %d\n", __func__, ret);
return ret;
@@ -105,8 +100,8 @@ unsigned int of_gpio_count(struct device_node *np)
do {
int ret;
- ret = of_parse_phandles_with_args(np, "gpios", "#gpio-cells",
- cnt, NULL, NULL);
+ ret = of_parse_phandle_with_args(np, "gpios", "#gpio-cells",
+ cnt, NULL);
/* A hole in the gpios = <> counts anyway. */
if (ret < 0 && ret != -EEXIST)
break;
@@ -127,12 +122,9 @@ EXPORT_SYMBOL(of_gpio_count);
* gpio chips. This function performs only one sanity check: whether gpio
* is less than ngpios (that is specified in the gpio_chip).
*/
-int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
- const void *gpio_spec, u32 *flags)
+int of_gpio_simple_xlate(struct gpio_chip *gc,
+ const struct of_phandle_args *gpiospec, u32 *flags)
{
- const __be32 *gpio = gpio_spec;
- const u32 n = be32_to_cpup(gpio);
-
/*
* We're discouraging gpio_cells < 2, since that way you'll have to
* write your own xlate function (that will have to retrive the GPIO
@@ -144,13 +136,16 @@ int of_gpio_simple_xlate(struct gpio_chip *gc, struct device_node *np,
return -EINVAL;
}
- if (n > gc->ngpio)
+ if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
+ return -EINVAL;
+
+ if (gpiospec->args[0] > gc->ngpio)
return -EINVAL;
if (flags)
- *flags = be32_to_cpu(gpio[1]);
+ *flags = gpiospec->args[1];
- return n;
+ return gpiospec->args[0];
}
EXPORT_SYMBOL(of_gpio_simple_xlate);
@@ -198,8 +193,6 @@ int of_mm_gpiochip_add(struct device_node *np,
if (ret)
goto err2;
- pr_debug("%s: registered as generic GPIO chip, base is %d\n",
- np->full_name, gc->base);
return 0;
err2:
iounmap(mm_gc->regs);
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 0f0cfa3..9cf0060 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -341,9 +341,18 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
/* Only dereference the resource if both the
* resource and the irq are valid. */
if (r && irq) {
+ const char *name = NULL;
+
+ /*
+ * Get optional "interrupts-names" property to add a name
+ * to the resource.
+ */
+ of_property_read_string_index(dev, "interrupt-names", index,
+ &name);
+
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
- r->name = dev->full_name;
+ r->name = name ? name : dev->full_name;
}
return irq;
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 980c079..483c0ad 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -182,7 +182,7 @@ struct phy_device *of_phy_connect_fixed_link(struct net_device *dev,
if (!phy_id || sz < sizeof(*phy_id))
return NULL;
- sprintf(bus_id, PHY_ID_FMT, "0", be32_to_cpu(phy_id[0]));
+ sprintf(bus_id, PHY_ID_FMT, "fixed-0", be32_to_cpu(phy_id[0]));
phy = phy_connect(dev, bus_id, hndlr, 0, iface);
return IS_ERR(phy) ? NULL : phy;
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index bc5b399..07cc1d6 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -229,7 +229,7 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
return ret;
}
-static void *kernel_tree_alloc(u64 size, u64 align)
+static void * __init kernel_tree_alloc(u64 size, u64 align)
{
return prom_early_alloc(size);
}
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
new file mode 100644
index 0000000..9d2b480
--- /dev/null
+++ b/drivers/of/selftest.c
@@ -0,0 +1,139 @@
+/*
+ * Self tests for device tree subsystem
+ */
+
+#define pr_fmt(fmt) "### %s(): " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+static bool selftest_passed = true;
+#define selftest(result, fmt, ...) { \
+ selftest_passed &= (result); \
+ if (!(result)) \
+ pr_err("FAIL %s:%i " fmt, __FILE__, __LINE__, ##__VA_ARGS__); \
+}
+
+static void __init of_selftest_parse_phandle_with_args(void)
+{
+ struct device_node *np;
+ struct of_phandle_args args;
+ int rc, i;
+ bool passed_all = true;
+
+ pr_info("start\n");
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ if (!np) {
+ pr_err("missing testcase data\n");
+ return;
+ }
+
+ for (i = 0; i < 7; i++) {
+ bool passed = true;
+ rc = of_parse_phandle_with_args(np, "phandle-list",
+ "#phandle-cells", i, &args);
+
+ /* Test the values from tests-phandle.dtsi */
+ switch (i) {
+ case 0:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+ break;
+ case 1:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 0);
+ break;
+ case 2:
+ passed &= (rc == -ENOENT);
+ break;
+ case 3:
+ passed &= !rc;
+ passed &= (args.args_count == 3);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 4);
+ passed &= (args.args[2] == 3);
+ break;
+ case 4:
+ passed &= !rc;
+ passed &= (args.args_count == 2);
+ passed &= (args.args[0] == (i + 1));
+ passed &= (args.args[1] == 100);
+ break;
+ case 5:
+ passed &= !rc;
+ passed &= (args.args_count == 0);
+ break;
+ case 6:
+ passed &= !rc;
+ passed &= (args.args_count == 1);
+ passed &= (args.args[0] == (i + 1));
+ break;
+ case 7:
+ passed &= (rc == -EINVAL);
+ break;
+ default:
+ passed = false;
+ }
+
+ if (!passed) {
+ int j;
+ pr_err("index %i - data error on node %s rc=%i regs=[",
+ i, args.np->full_name, rc);
+ for (j = 0; j < args.args_count; j++)
+ printk(" %i", args.args[j]);
+ printk(" ]\n");
+
+ passed_all = false;
+ }
+ }
+
+ /* Check for missing list property */
+ rc = of_parse_phandle_with_args(np, "phandle-list-missing",
+ "#phandle-cells", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for missing cells property */
+ rc = of_parse_phandle_with_args(np, "phandle-list",
+ "#phandle-cells-missing", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for bad phandle in list */
+ rc = of_parse_phandle_with_args(np, "phandle-list-bad-phandle",
+ "#phandle-cells", 0, &args);
+ passed_all &= (rc == -EINVAL);
+
+ /* Check for incorrectly formed argument list */
+ rc = of_parse_phandle_with_args(np, "phandle-list-bad-args",
+ "#phandle-cells", 1, &args);
+ passed_all &= (rc == -EINVAL);
+
+ pr_info("end - %s\n", passed_all ? "PASS" : "FAIL");
+}
+
+static int __init of_selftest(void)
+{
+ struct device_node *np;
+
+ np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
+ if (!np) {
+ pr_info("No testcase data in device tree; not running tests\n");
+ return 0;
+ }
+ of_node_put(np);
+
+ pr_info("start of selftest - you will see error messages\n");
+ of_selftest_parse_phandle_with_args();
+ pr_info("end of selftest - %s\n", selftest_passed ? "PASS" : "FAIL");
+ return 0;
+}
+late_initcall(of_selftest);
diff --git a/drivers/oprofile/nmi_timer_int.c b/drivers/oprofile/nmi_timer_int.c
new file mode 100644
index 0000000..76f1c93
--- /dev/null
+++ b/drivers/oprofile/nmi_timer_int.c
@@ -0,0 +1,173 @@
+/**
+ * @file nmi_timer_int.c
+ *
+ * @remark Copyright 2011 Advanced Micro Devices, Inc.
+ *
+ * @author Robert Richter <robert.richter@amd.com>
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/errno.h>
+#include <linux/oprofile.h>
+#include <linux/perf_event.h>
+
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+
+static DEFINE_PER_CPU(struct perf_event *, nmi_timer_events);
+static int ctr_running;
+
+static struct perf_event_attr nmi_timer_attr = {
+ .type = PERF_TYPE_HARDWARE,
+ .config = PERF_COUNT_HW_CPU_CYCLES,
+ .size = sizeof(struct perf_event_attr),
+ .pinned = 1,
+ .disabled = 1,
+};
+
+static void nmi_timer_callback(struct perf_event *event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ event->hw.interrupts = 0; /* don't throttle interrupts */
+ oprofile_add_sample(regs, 0);
+}
+
+static int nmi_timer_start_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (!event) {
+ event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
+ nmi_timer_callback, NULL);
+ if (IS_ERR(event))
+ return PTR_ERR(event);
+ per_cpu(nmi_timer_events, cpu) = event;
+ }
+
+ if (event && ctr_running)
+ perf_event_enable(event);
+
+ return 0;
+}
+
+static void nmi_timer_stop_cpu(int cpu)
+{
+ struct perf_event *event = per_cpu(nmi_timer_events, cpu);
+
+ if (event && ctr_running)
+ perf_event_disable(event);
+}
+
+static int nmi_timer_cpu_notifier(struct notifier_block *b, unsigned long action,
+ void *data)
+{
+ int cpu = (unsigned long)data;
+ switch (action) {
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ nmi_timer_start_cpu(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ nmi_timer_stop_cpu(cpu);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block nmi_timer_cpu_nb = {
+ .notifier_call = nmi_timer_cpu_notifier
+};
+
+static int nmi_timer_start(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ ctr_running = 1;
+ for_each_online_cpu(cpu)
+ nmi_timer_start_cpu(cpu);
+ put_online_cpus();
+
+ return 0;
+}
+
+static void nmi_timer_stop(void)
+{
+ int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ nmi_timer_stop_cpu(cpu);
+ ctr_running = 0;
+ put_online_cpus();
+}
+
+static void nmi_timer_shutdown(void)
+{
+ struct perf_event *event;
+ int cpu;
+
+ get_online_cpus();
+ unregister_cpu_notifier(&nmi_timer_cpu_nb);
+ for_each_possible_cpu(cpu) {
+ event = per_cpu(nmi_timer_events, cpu);
+ if (!event)
+ continue;
+ perf_event_disable(event);
+ per_cpu(nmi_timer_events, cpu) = NULL;
+ perf_event_release_kernel(event);
+ }
+
+ put_online_cpus();
+}
+
+static int nmi_timer_setup(void)
+{
+ int cpu, err;
+ u64 period;
+
+ /* clock cycles per tick: */
+ period = (u64)cpu_khz * 1000;
+ do_div(period, HZ);
+ nmi_timer_attr.sample_period = period;
+
+ get_online_cpus();
+ err = register_cpu_notifier(&nmi_timer_cpu_nb);
+ if (err)
+ goto out;
+ /* can't attach events to offline cpus: */
+ for_each_online_cpu(cpu) {
+ err = nmi_timer_start_cpu(cpu);
+ if (err)
+ break;
+ }
+ if (err)
+ nmi_timer_shutdown();
+out:
+ put_online_cpus();
+ return err;
+}
+
+int __init op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ int err = 0;
+
+ err = nmi_timer_setup();
+ if (err)
+ return err;
+ nmi_timer_shutdown(); /* only check, don't alloc */
+
+ ops->create_files = NULL;
+ ops->setup = nmi_timer_setup;
+ ops->shutdown = nmi_timer_shutdown;
+ ops->start = nmi_timer_start;
+ ops->stop = nmi_timer_stop;
+ ops->cpu_type = "timer";
+
+ printk(KERN_INFO "oprofile: using NMI timer interrupt.\n");
+
+ return 0;
+}
+
+#endif
diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
index f8c752e..ed2c3ec 100644
--- a/drivers/oprofile/oprof.c
+++ b/drivers/oprofile/oprof.c
@@ -246,37 +246,31 @@ static int __init oprofile_init(void)
int err;
/* always init architecture to setup backtrace support */
+ timer_mode = 0;
err = oprofile_arch_init(&oprofile_ops);
+ if (!err) {
+ if (!timer && !oprofilefs_register())
+ return 0;
+ oprofile_arch_exit();
+ }
- timer_mode = err || timer; /* fall back to timer mode on errors */
- if (timer_mode) {
- if (!err)
- oprofile_arch_exit();
+ /* setup timer mode: */
+ timer_mode = 1;
+ /* no nmi timer mode if oprofile.timer is set */
+ if (timer || op_nmi_timer_init(&oprofile_ops)) {
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
- err = oprofilefs_register();
- if (!err)
- return 0;
-
- /* failed */
- if (timer_mode)
- oprofile_timer_exit();
- else
- oprofile_arch_exit();
-
- return err;
+ return oprofilefs_register();
}
static void __exit oprofile_exit(void)
{
oprofilefs_unregister();
- if (timer_mode)
- oprofile_timer_exit();
- else
+ if (!timer_mode)
oprofile_arch_exit();
}
diff --git a/drivers/oprofile/oprof.h b/drivers/oprofile/oprof.h
index 177b73d..d32ef81 100644
--- a/drivers/oprofile/oprof.h
+++ b/drivers/oprofile/oprof.h
@@ -35,7 +35,15 @@ struct dentry;
void oprofile_create_files(struct super_block *sb, struct dentry *root);
int oprofile_timer_init(struct oprofile_operations *ops);
-void oprofile_timer_exit(void);
+#ifdef CONFIG_OPROFILE_NMI_TIMER
+int op_nmi_timer_init(struct oprofile_operations *ops);
+#else
+static inline int op_nmi_timer_init(struct oprofile_operations *ops)
+{
+ return -ENODEV;
+}
+#endif
+
int oprofile_set_ulong(unsigned long *addr, unsigned long val);
int oprofile_set_timeout(unsigned long time);
diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
index 878fba1..93404f7 100644
--- a/drivers/oprofile/timer_int.c
+++ b/drivers/oprofile/timer_int.c
@@ -97,24 +97,24 @@ static struct notifier_block __refdata oprofile_cpu_notifier = {
.notifier_call = oprofile_cpu_notify,
};
-int oprofile_timer_init(struct oprofile_operations *ops)
+static int oprofile_hrtimer_setup(void)
{
- int rc;
-
- rc = register_hotcpu_notifier(&oprofile_cpu_notifier);
- if (rc)
- return rc;
- ops->create_files = NULL;
- ops->setup = NULL;
- ops->shutdown = NULL;
- ops->start = oprofile_hrtimer_start;
- ops->stop = oprofile_hrtimer_stop;
- ops->cpu_type = "timer";
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
- return 0;
+ return register_hotcpu_notifier(&oprofile_cpu_notifier);
}
-void oprofile_timer_exit(void)
+static void oprofile_hrtimer_shutdown(void)
{
unregister_hotcpu_notifier(&oprofile_cpu_notifier);
}
+
+int oprofile_timer_init(struct oprofile_operations *ops)
+{
+ ops->create_files = NULL;
+ ops->setup = oprofile_hrtimer_setup;
+ ops->shutdown = oprofile_hrtimer_shutdown;
+ ops->start = oprofile_hrtimer_start;
+ ops->stop = oprofile_hrtimer_stop;
+ ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
+ return 0;
+}
diff --git a/drivers/parisc/Kconfig b/drivers/parisc/Kconfig
index 553a990..6202649 100644
--- a/drivers/parisc/Kconfig
+++ b/drivers/parisc/Kconfig
@@ -108,13 +108,6 @@ config IOMMU_HELPER
depends on IOMMU_SBA || IOMMU_CCIO
default y
-#config PCI_EPIC
-# bool "EPIC/SAGA PCI support"
-# depends on PCI
-# default y
-# help
-# Say Y here for V-class PCI, DMA/IOMMU, IRQ subsystem support.
-
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index bcd5d54..7ff10c1 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -562,19 +562,6 @@ dino_fixup_bus(struct pci_bus *bus)
/* Firmware doesn't set up card-mode dino, so we have to */
if (is_card_dino(&dino_dev->hba.dev->id)) {
dino_card_setup(bus, dino_dev->hba.base_addr);
- } else if(bus->parent == NULL) {
- /* must have a dino above it, reparent the resources
- * into the dino window */
- int i;
- struct resource *res = &dino_dev->hba.lmmio_space;
-
- bus->resource[0] = &(dino_dev->hba.io_space);
- for(i = 0; i < DINO_MAX_LMMIO_RESOURCES; i++) {
- if(res[i].flags == 0)
- break;
- bus->resource[i+1] = &res[i];
- }
-
} else if (bus->parent) {
int i;
@@ -927,6 +914,7 @@ static int __init dino_probe(struct parisc_device *dev)
const char *version = "unknown";
char *name;
int is_cujo = 0;
+ LIST_HEAD(resources);
struct pci_bus *bus;
unsigned long hpa = dev->hpa.start;
@@ -1003,26 +991,37 @@ static int __init dino_probe(struct parisc_device *dev)
dev->dev.platform_data = dino_dev;
+ pci_add_resource(&resources, &dino_dev->hba.io_space);
+ if (dino_dev->hba.lmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.lmmio_space);
+ if (dino_dev->hba.elmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.elmmio_space);
+ if (dino_dev->hba.gmmio_space.flags)
+ pci_add_resource(&resources, &dino_dev->hba.gmmio_space);
+
/*
** It's not used to avoid chicken/egg problems
** with configuration accessor functions.
*/
- dino_dev->hba.hba_bus = bus = pci_scan_bus_parented(&dev->dev,
- dino_current_bus, &dino_cfg_ops, NULL);
-
- if(bus) {
- /* This code *depends* on scanning being single threaded
- * if it isn't, this global bus number count will fail
- */
- dino_current_bus = bus->subordinate + 1;
- pci_bus_assign_resources(bus);
- pci_bus_add_devices(bus);
- } else {
+ dino_dev->hba.hba_bus = bus = pci_create_root_bus(&dev->dev,
+ dino_current_bus, &dino_cfg_ops, NULL, &resources);
+ if (!bus) {
printk(KERN_ERR "ERROR: failed to scan PCI bus on %s (duplicate bus number %d?)\n",
dev_name(&dev->dev), dino_current_bus);
+ pci_free_resource_list(&resources);
/* increment the bus number in case of duplicates */
dino_current_bus++;
+ return 0;
}
+
+ bus->subordinate = pci_scan_child_bus(bus);
+
+ /* This code *depends* on scanning being single threaded
+ * if it isn't, this global bus number count will fail
+ */
+ dino_current_bus = bus->subordinate + 1;
+ pci_bus_assign_resources(bus);
+ pci_bus_add_devices(bus);
return 0;
}
diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h
index a9c46cc..8c33491 100644
--- a/drivers/parisc/iommu-helpers.h
+++ b/drivers/parisc/iommu-helpers.h
@@ -1,3 +1,5 @@
+#include <linux/prefetch.h>
+
/**
* iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir.
* @ioc: The I/O Controller.
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 3aeb327..d5f3d75 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -653,7 +653,7 @@ lba_fixup_bus(struct pci_bus *bus)
}
} else {
/* Host-PCI Bridge */
- int err, i;
+ int err;
DBG("lba_fixup_bus() %s [%lx/%lx]/%lx\n",
ldev->hba.io_space.name,
@@ -669,9 +669,6 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&ioport_resource, 2);
BUG();
}
- /* advertize Host bridge resources to PCI bus */
- bus->resource[0] = &(ldev->hba.io_space);
- i = 1;
if (ldev->hba.elmmio_space.start) {
err = request_resource(&iomem_resource,
@@ -685,35 +682,17 @@ lba_fixup_bus(struct pci_bus *bus)
/* lba_dump_res(&iomem_resource, 2); */
/* BUG(); */
- } else
- bus->resource[i++] = &(ldev->hba.elmmio_space);
+ }
}
-
- /* Overlaps with elmmio can (and should) fail here.
- * We will prune (or ignore) the distributed range.
- *
- * FIXME: SBA code should register all elmmio ranges first.
- * that would take care of elmmio ranges routed
- * to a different rope (already discovered) from
- * getting registered *after* LBA code has already
- * registered it's distributed lmmio range.
- */
- if (truncate_pat_collision(&iomem_resource,
- &(ldev->hba.lmmio_space))) {
-
- printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
- (long)ldev->hba.lmmio_space.start,
- (long)ldev->hba.lmmio_space.end);
- } else {
+ if (ldev->hba.lmmio_space.flags) {
err = request_resource(&iomem_resource, &(ldev->hba.lmmio_space));
if (err < 0) {
printk(KERN_ERR "FAILED: lba_fixup_bus() request for "
"lmmio_space [%lx/%lx]\n",
(long)ldev->hba.lmmio_space.start,
(long)ldev->hba.lmmio_space.end);
- } else
- bus->resource[i++] = &(ldev->hba.lmmio_space);
+ }
}
#ifdef CONFIG_64BIT
@@ -728,7 +707,6 @@ lba_fixup_bus(struct pci_bus *bus)
lba_dump_res(&iomem_resource, 2);
BUG();
}
- bus->resource[i++] = &(ldev->hba.gmmio_space);
}
#endif
@@ -1404,6 +1382,7 @@ static int __init
lba_driver_probe(struct parisc_device *dev)
{
struct lba_device *lba_dev;
+ LIST_HEAD(resources);
struct pci_bus *lba_bus;
struct pci_ops *cfg_ops;
u32 func_class;
@@ -1518,10 +1497,41 @@ lba_driver_probe(struct parisc_device *dev)
if (lba_dev->hba.bus_num.start < lba_next_bus)
lba_dev->hba.bus_num.start = lba_next_bus;
+ /* Overlaps with elmmio can (and should) fail here.
+ * We will prune (or ignore) the distributed range.
+ *
+ * FIXME: SBA code should register all elmmio ranges first.
+ * that would take care of elmmio ranges routed
+ * to a different rope (already discovered) from
+ * getting registered *after* LBA code has already
+ * registered it's distributed lmmio range.
+ */
+ if (truncate_pat_collision(&iomem_resource,
+ &(lba_dev->hba.lmmio_space))) {
+ printk(KERN_WARNING "LBA: lmmio_space [%lx/%lx] duplicate!\n",
+ (long)lba_dev->hba.lmmio_space.start,
+ (long)lba_dev->hba.lmmio_space.end);
+ lba_dev->hba.lmmio_space.flags = 0;
+ }
+
+ pci_add_resource(&resources, &lba_dev->hba.io_space);
+ if (lba_dev->hba.elmmio_space.start)
+ pci_add_resource(&resources, &lba_dev->hba.elmmio_space);
+ if (lba_dev->hba.lmmio_space.flags)
+ pci_add_resource(&resources, &lba_dev->hba.lmmio_space);
+ if (lba_dev->hba.gmmio_space.flags)
+ pci_add_resource(&resources, &lba_dev->hba.gmmio_space);
+
dev->dev.platform_data = lba_dev;
lba_bus = lba_dev->hba.hba_bus =
- pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
- cfg_ops, NULL);
+ pci_create_root_bus(&dev->dev, lba_dev->hba.bus_num.start,
+ cfg_ops, NULL, &resources);
+ if (!lba_bus) {
+ pci_free_resource_list(&resources);
+ return 0;
+ }
+
+ lba_bus->subordinate = pci_scan_child_bus(lba_bus);
/* This is in lieu of calling pci_assign_unassigned_resources() */
if (is_pdc_pat()) {
@@ -1551,10 +1561,8 @@ lba_driver_probe(struct parisc_device *dev)
lba_dev->flags |= LBA_FLAG_SKIP_PROBE;
}
- if (lba_bus) {
- lba_next_bus = lba_bus->subordinate + 1;
- pci_bus_add_devices(lba_bus);
- }
+ lba_next_bus = lba_bus->subordinate + 1;
+ pci_bus_add_devices(lba_bus);
/* Whew! Finally done! Tell services we got this one covered. */
return 0;
diff --git a/drivers/parport/parport_ax88796.c b/drivers/parport/parport_ax88796.c
index 844f613..7c5d866 100644
--- a/drivers/parport/parport_ax88796.c
+++ b/drivers/parport/parport_ax88796.c
@@ -420,18 +420,7 @@ static struct platform_driver axdrv = {
.resume = parport_ax88796_resume,
};
-static int __init parport_ax88796_init(void)
-{
- return platform_driver_register(&axdrv);
-}
-
-static void __exit parport_ax88796_exit(void)
-{
- platform_driver_unregister(&axdrv);
-}
-
-module_init(parport_ax88796_init)
-module_exit(parport_ax88796_exit)
+module_platform_driver(axdrv);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("AX88796 Parport parallel port driver");
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c
index 0dc34f1..d471627 100644
--- a/drivers/parport/parport_ip32.c
+++ b/drivers/parport/parport_ip32.c
@@ -135,7 +135,7 @@
#define PARPORT_IP32_ENABLE_EPP (1U << 3)
#define PARPORT_IP32_ENABLE_ECP (1U << 4)
static unsigned int features = ~0U;
-static int verbose_probing = DEFAULT_VERBOSE_PROBING;
+static bool verbose_probing = DEFAULT_VERBOSE_PROBING;
/* We do not support more than one port. */
static struct parport *this_port = NULL;
diff --git a/drivers/parport/parport_mfc3.c b/drivers/parport/parport_mfc3.c
index 362db31..1c0c642 100644
--- a/drivers/parport/parport_mfc3.c
+++ b/drivers/parport/parport_mfc3.c
@@ -397,7 +397,7 @@ static void __exit parport_mfc3_exit(void)
MODULE_AUTHOR("Joerg Dorchain <joerg@dorchain.net>");
-MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Paralllel Port");
+MODULE_DESCRIPTION("Parport Driver for Multiface 3 expansion cards Parallel Port");
MODULE_SUPPORTED_DEVICE("Multiface 3 Parallel Port");
MODULE_LICENSE("GPL");
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index d0b597b..0cb64f5 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3404,8 +3404,8 @@ static int __init parport_init_mode_setup(char *str)
#endif
#ifdef MODULE
-static const char *irq[PARPORT_PC_MAX_PORTS];
-static const char *dma[PARPORT_PC_MAX_PORTS];
+static char *irq[PARPORT_PC_MAX_PORTS];
+static char *dma[PARPORT_PC_MAX_PORTS];
MODULE_PARM_DESC(io, "Base I/O address (SPP regs)");
module_param_array(io, int, NULL, 0);
diff --git a/drivers/parport/parport_sunbpp.c b/drivers/parport/parport_sunbpp.c
index 910c5a2..9390a53 100644
--- a/drivers/parport/parport_sunbpp.c
+++ b/drivers/parport/parport_sunbpp.c
@@ -391,21 +391,10 @@ static struct platform_driver bpp_sbus_driver = {
.remove = __devexit_p(bpp_remove),
};
-static int __init parport_sunbpp_init(void)
-{
- return platform_driver_register(&bpp_sbus_driver);
-}
-
-static void __exit parport_sunbpp_exit(void)
-{
- platform_driver_unregister(&bpp_sbus_driver);
-}
+module_platform_driver(bpp_sbus_driver);
MODULE_AUTHOR("Derrick J Brashear");
MODULE_DESCRIPTION("Parport Driver for Sparc bidirectional Port");
MODULE_SUPPORTED_DEVICE("Sparc Bidirectional Parallel Port");
MODULE_VERSION("2.0");
MODULE_LICENSE("GPL");
-
-module_init(parport_sunbpp_init)
-module_exit(parport_sunbpp_exit)
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index f02b523..37856f7 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -98,11 +98,11 @@ config PCI_PASID
If unsure, say N.
config PCI_IOAPIC
- bool
+ tristate "PCI IO-APIC hotplug support" if X86
depends on PCI
depends on ACPI
depends on HOTPLUG
- default y
+ default !X86
config PCI_LABEL
def_bool y if (DMI || ACPI)
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index fdaa42a..2a58164 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -13,7 +13,7 @@
* configuration space.
*/
-static DEFINE_RAW_SPINLOCK(pci_lock);
+DEFINE_RAW_SPINLOCK(pci_lock);
/*
* Wrappers for all PCI configuration access functions. They just check
@@ -127,20 +127,20 @@ EXPORT_SYMBOL(pci_write_vpd);
* We have a bit per device to indicate it's blocked and a global wait queue
* for callers to sleep on until devices are unblocked.
*/
-static DECLARE_WAIT_QUEUE_HEAD(pci_ucfg_wait);
+static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
-static noinline void pci_wait_ucfg(struct pci_dev *dev)
+static noinline void pci_wait_cfg(struct pci_dev *dev)
{
DECLARE_WAITQUEUE(wait, current);
- __add_wait_queue(&pci_ucfg_wait, &wait);
+ __add_wait_queue(&pci_cfg_wait, &wait);
do {
set_current_state(TASK_UNINTERRUPTIBLE);
raw_spin_unlock_irq(&pci_lock);
schedule();
raw_spin_lock_irq(&pci_lock);
- } while (dev->block_ucfg_access);
- __remove_wait_queue(&pci_ucfg_wait, &wait);
+ } while (dev->block_cfg_access);
+ __remove_wait_queue(&pci_cfg_wait, &wait);
}
/* Returns 0 on success, negative values indicate error. */
@@ -153,7 +153,8 @@ int pci_user_read_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->read(dev->bus, dev->devfn, \
pos, sizeof(type), &data); \
raw_spin_unlock_irq(&pci_lock); \
@@ -172,7 +173,8 @@ int pci_user_write_config_##size \
if (PCI_##size##_BAD) \
return -EINVAL; \
raw_spin_lock_irq(&pci_lock); \
- if (unlikely(dev->block_ucfg_access)) pci_wait_ucfg(dev); \
+ if (unlikely(dev->block_cfg_access)) \
+ pci_wait_cfg(dev); \
ret = dev->bus->ops->write(dev->bus, dev->devfn, \
pos, sizeof(type), val); \
raw_spin_unlock_irq(&pci_lock); \
@@ -401,36 +403,56 @@ int pci_vpd_truncate(struct pci_dev *dev, size_t size)
EXPORT_SYMBOL(pci_vpd_truncate);
/**
- * pci_block_user_cfg_access - Block userspace PCI config reads/writes
+ * pci_cfg_access_lock - Lock PCI config reads/writes
* @dev: pci device struct
*
- * When user access is blocked, any reads or writes to config space will
- * sleep until access is unblocked again. We don't allow nesting of
- * block/unblock calls.
+ * When access is locked, any userspace reads or writes to config
+ * space and concurrent lock requests will sleep until access is
+ * allowed via pci_cfg_access_unlocked again.
*/
-void pci_block_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_lock(struct pci_dev *dev)
+{
+ might_sleep();
+
+ raw_spin_lock_irq(&pci_lock);
+ if (dev->block_cfg_access)
+ pci_wait_cfg(dev);
+ dev->block_cfg_access = 1;
+ raw_spin_unlock_irq(&pci_lock);
+}
+EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
+
+/**
+ * pci_cfg_access_trylock - try to lock PCI config reads/writes
+ * @dev: pci device struct
+ *
+ * Same as pci_cfg_access_lock, but will return 0 if access is
+ * already locked, 1 otherwise. This function can be used from
+ * atomic contexts.
+ */
+bool pci_cfg_access_trylock(struct pci_dev *dev)
{
unsigned long flags;
- int was_blocked;
+ bool locked = true;
raw_spin_lock_irqsave(&pci_lock, flags);
- was_blocked = dev->block_ucfg_access;
- dev->block_ucfg_access = 1;
+ if (dev->block_cfg_access)
+ locked = false;
+ else
+ dev->block_cfg_access = 1;
raw_spin_unlock_irqrestore(&pci_lock, flags);
- /* If we BUG() inside the pci_lock, we're guaranteed to hose
- * the machine */
- BUG_ON(was_blocked);
+ return locked;
}
-EXPORT_SYMBOL_GPL(pci_block_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
/**
- * pci_unblock_user_cfg_access - Unblock userspace PCI config reads/writes
+ * pci_cfg_access_unlock - Unlock PCI config reads/writes
* @dev: pci device struct
*
- * This function allows userspace PCI config accesses to resume.
+ * This function allows PCI config accesses to resume.
*/
-void pci_unblock_user_cfg_access(struct pci_dev *dev)
+void pci_cfg_access_unlock(struct pci_dev *dev)
{
unsigned long flags;
@@ -438,10 +460,10 @@ void pci_unblock_user_cfg_access(struct pci_dev *dev)
/* This indicates a problem in the caller, but we don't need
* to kill them, unlike a double-block above. */
- WARN_ON(!dev->block_ucfg_access);
+ WARN_ON(!dev->block_cfg_access);
- dev->block_ucfg_access = 0;
- wake_up_all(&pci_ucfg_wait);
+ dev->block_cfg_access = 0;
+ wake_up_all(&pci_cfg_wait);
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
-EXPORT_SYMBOL_GPL(pci_unblock_user_cfg_access);
+EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/drivers/pci/ats.c b/drivers/pci/ats.c
index b0dd08e..95655d7 100644
--- a/drivers/pci/ats.c
+++ b/drivers/pci/ats.c
@@ -128,6 +128,23 @@ void pci_disable_ats(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_disable_ats);
+void pci_restore_ats_state(struct pci_dev *dev)
+{
+ u16 ctrl;
+
+ if (!pci_ats_enabled(dev))
+ return;
+ if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ATS))
+ BUG();
+
+ ctrl = PCI_ATS_CTRL_ENABLE;
+ if (!dev->is_virtfn)
+ ctrl |= PCI_ATS_CTRL_STU(dev->ats->stu - PCI_ATS_MIN_STU);
+
+ pci_write_config_word(dev, dev->ats->pos + PCI_ATS_CTRL, ctrl);
+}
+EXPORT_SYMBOL_GPL(pci_restore_ats_state);
+
/**
* pci_ats_queue_depth - query the ATS Invalidate Queue Depth
* @dev: the PCI device
@@ -175,21 +192,22 @@ int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
u32 max_requests;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
- if ((control & PCI_PRI_ENABLE) || !(status & PCI_PRI_STATUS_STOPPED))
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
+ if ((control & PCI_PRI_CTRL_ENABLE) ||
+ !(status & PCI_PRI_STATUS_STOPPED))
return -EBUSY;
- pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ_OFF, &max_requests);
+ pci_read_config_dword(pdev, pos + PCI_PRI_MAX_REQ, &max_requests);
reqs = min(max_requests, reqs);
- pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ_OFF, reqs);
+ pci_write_config_dword(pdev, pos + PCI_PRI_ALLOC_REQ, reqs);
- control |= PCI_PRI_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ control |= PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
return 0;
}
@@ -206,13 +224,13 @@ void pci_disable_pri(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- control &= ~PCI_PRI_ENABLE;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ control &= ~PCI_PRI_CTRL_ENABLE;
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
}
EXPORT_SYMBOL_GPL(pci_disable_pri);
@@ -227,13 +245,13 @@ bool pci_pri_enabled(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
- return (control & PCI_PRI_ENABLE) ? true : false;
+ return (control & PCI_PRI_CTRL_ENABLE) ? true : false;
}
EXPORT_SYMBOL_GPL(pci_pri_enabled);
@@ -249,17 +267,17 @@ int pci_reset_pri(struct pci_dev *pdev)
u16 control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- if (control & PCI_PRI_ENABLE)
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ if (control & PCI_PRI_CTRL_ENABLE)
return -EBUSY;
- control |= PCI_PRI_RESET;
+ control |= PCI_PRI_CTRL_RESET;
- pci_write_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PRI_CTRL, control);
return 0;
}
@@ -282,14 +300,14 @@ bool pci_pri_stopped(struct pci_dev *pdev)
u16 control, status;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return true;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- if (control & PCI_PRI_ENABLE)
+ if (control & PCI_PRI_CTRL_ENABLE)
return false;
return (status & PCI_PRI_STATUS_STOPPED) ? true : false;
@@ -311,15 +329,15 @@ int pci_pri_status(struct pci_dev *pdev)
u16 status, control;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PRI_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PRI_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PRI_STATUS_OFF, &status);
+ pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
/* Stopped bit is undefined when enable == 1, so clear it */
- if (control & PCI_PRI_ENABLE)
+ if (control & PCI_PRI_CTRL_ENABLE)
status &= ~PCI_PRI_STATUS_STOPPED;
return status;
@@ -342,25 +360,25 @@ int pci_enable_pasid(struct pci_dev *pdev, int features)
u16 control, supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, &control);
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
- if (!(supported & PCI_PASID_ENABLE))
+ if (control & PCI_PASID_CTRL_ENABLE)
return -EINVAL;
- supported &= PCI_PASID_EXEC | PCI_PASID_PRIV;
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
/* User wants to enable anything unsupported? */
if ((supported & features) != features)
return -EINVAL;
- control = PCI_PASID_ENABLE | features;
+ control = PCI_PASID_CTRL_ENABLE | features;
- pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
return 0;
}
@@ -376,11 +394,11 @@ void pci_disable_pasid(struct pci_dev *pdev)
u16 control = 0;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return;
- pci_write_config_word(pdev, pos + PCI_PASID_CONTROL_OFF, control);
+ pci_write_config_word(pdev, pos + PCI_PASID_CTRL, control);
}
EXPORT_SYMBOL_GPL(pci_disable_pasid);
@@ -391,22 +409,21 @@ EXPORT_SYMBOL_GPL(pci_disable_pasid);
* Returns a negative value when no PASI capability is present.
* Otherwise is returns a bitmask with supported features. Current
* features reported are:
- * PCI_PASID_ENABLE - PASID capability can be enabled
- * PCI_PASID_EXEC - Execute permission supported
- * PCI_PASID_PRIV - Priviledged mode supported
+ * PCI_PASID_CAP_EXEC - Execute permission supported
+ * PCI_PASID_CAP_PRIV - Priviledged mode supported
*/
int pci_pasid_features(struct pci_dev *pdev)
{
u16 supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
- supported &= PCI_PASID_ENABLE | PCI_PASID_EXEC | PCI_PASID_PRIV;
+ supported &= PCI_PASID_CAP_EXEC | PCI_PASID_CAP_PRIV;
return supported;
}
@@ -426,11 +443,11 @@ int pci_max_pasids(struct pci_dev *pdev)
u16 supported;
int pos;
- pos = pci_find_ext_capability(pdev, PCI_PASID_CAP);
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PASID);
if (!pos)
return -EINVAL;
- pci_read_config_word(pdev, pos + PCI_PASID_CAP_OFF, &supported);
+ pci_read_config_word(pdev, pos + PCI_PASID_CAP, &supported);
supported = (supported & PASID_NUMBER_MASK) >> PASID_NUMBER_SHIFT;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 1e2ad92..398f5d8 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -18,6 +18,32 @@
#include "pci.h"
+void pci_add_resource(struct list_head *resources, struct resource *res)
+{
+ struct pci_bus_resource *bus_res;
+
+ bus_res = kzalloc(sizeof(struct pci_bus_resource), GFP_KERNEL);
+ if (!bus_res) {
+ printk(KERN_ERR "PCI: can't add bus resource %pR\n", res);
+ return;
+ }
+
+ bus_res->res = res;
+ list_add_tail(&bus_res->list, resources);
+}
+EXPORT_SYMBOL(pci_add_resource);
+
+void pci_free_resource_list(struct list_head *resources)
+{
+ struct pci_bus_resource *bus_res, *tmp;
+
+ list_for_each_entry_safe(bus_res, tmp, resources, list) {
+ list_del(&bus_res->list);
+ kfree(bus_res);
+ }
+}
+EXPORT_SYMBOL(pci_free_resource_list);
+
void pci_bus_add_resource(struct pci_bus *bus, struct resource *res,
unsigned int flags)
{
@@ -52,16 +78,12 @@ EXPORT_SYMBOL_GPL(pci_bus_resource_n);
void pci_bus_remove_resources(struct pci_bus *bus)
{
- struct pci_bus_resource *bus_res, *tmp;
int i;
for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
bus->resource[i] = NULL;
- list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) {
- list_del(&bus_res->list);
- kfree(bus_res);
- }
+ pci_free_resource_list(&bus->resources);
}
/**
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 095f29e..2a47e82 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -44,7 +44,7 @@
#define METHOD_NAME__SUN "_SUN"
#define METHOD_NAME_OSHP "OSHP"
-static int debug_acpi;
+static bool debug_acpi;
static acpi_status
decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index efa9f2d..aa41631 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -47,7 +47,7 @@
/* name size which is used for entries in pcihpfs */
#define SLOT_NAME_SIZE 21 /* {_SUN} */
-static int debug;
+static bool debug;
int acpiphp_debug;
/* local variables */
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index e525263..c35e8ad 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -43,7 +43,7 @@
#define DRIVER_AUTHOR "Irene Zubarev <zubarev@us.ibm.com>, Vernon Mauery <vernux@us.ibm.com>"
#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver IBM extension"
-static int debug;
+static bool debug;
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 41f6a8d..6bf8d2a 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -57,8 +57,8 @@
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
/* local variables */
-static int debug;
-static int poll;
+static bool debug;
+static bool poll;
static struct cpci_hp_controller_ops zt5550_hpc_ops;
static struct cpci_hp_controller zt5550_hpc;
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index f1ce99c..187a199 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -57,8 +57,8 @@ struct irq_routing_table *cpqhp_routing_table;
static void __iomem *smbios_table;
static void __iomem *smbios_start;
static void __iomem *cpqhp_rom_start;
-static int power_mode;
-static int debug;
+static bool power_mode;
+static bool debug;
static int initialized;
#define DRIVER_VERSION "0.9.8"
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index d934dd4..5506e0e 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -49,7 +49,7 @@
int ibmphp_debug;
-static int debug;
+static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC (debug, "Debugging mode enabled or not");
MODULE_LICENSE ("GPL");
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 6d2eea9..202f4a9 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -51,7 +51,7 @@
/* local variables */
-static int debug;
+static bool debug;
#define DRIVER_VERSION "0.5"
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Scott Murray <scottm@somanetworks.com>"
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 838f571..4b7cce1 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -40,12 +40,11 @@
#define MY_NAME "pciehp"
-extern int pciehp_poll_mode;
+extern bool pciehp_poll_mode;
extern int pciehp_poll_time;
-extern int pciehp_debug;
-extern int pciehp_force;
+extern bool pciehp_debug;
+extern bool pciehp_force;
extern struct workqueue_struct *pciehp_wq;
-extern struct workqueue_struct *pciehp_ordered_wq;
#define dbg(format, arg...) \
do { \
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 7ac8358..365c6b9 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -38,12 +38,11 @@
#include <linux/time.h>
/* Global variables */
-int pciehp_debug;
-int pciehp_poll_mode;
+bool pciehp_debug;
+bool pciehp_poll_mode;
int pciehp_poll_time;
-int pciehp_force;
+bool pciehp_force;
struct workqueue_struct *pciehp_wq;
-struct workqueue_struct *pciehp_ordered_wq;
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -345,18 +344,11 @@ static int __init pcied_init(void)
if (!pciehp_wq)
return -ENOMEM;
- pciehp_ordered_wq = alloc_ordered_workqueue("pciehp_ordered", 0);
- if (!pciehp_ordered_wq) {
- destroy_workqueue(pciehp_wq);
- return -ENOMEM;
- }
-
pciehp_firmware_init();
retval = pcie_port_service_register(&hpdriver_portdrv);
dbg("pcie_port_service_register = %d\n", retval);
info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
if (retval) {
- destroy_workqueue(pciehp_ordered_wq);
destroy_workqueue(pciehp_wq);
dbg("Failure to register service\n");
}
@@ -366,9 +358,8 @@ static int __init pcied_init(void)
static void __exit pcied_cleanup(void)
{
dbg("unload_pciehpd()\n");
- destroy_workqueue(pciehp_ordered_wq);
- destroy_workqueue(pciehp_wq);
pcie_port_service_unregister(&hpdriver_portdrv);
+ destroy_workqueue(pciehp_wq);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
}
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 085dbb5..27f4429 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -344,7 +344,7 @@ void pciehp_queue_pushbutton_work(struct work_struct *work)
kfree(info);
goto out;
}
- queue_work(pciehp_ordered_wq, &info->work);
+ queue_work(pciehp_wq, &info->work);
out:
mutex_unlock(&p_slot->lock);
}
@@ -439,7 +439,7 @@ static void handle_surprise_event(struct slot *p_slot)
else
p_slot->state = POWERON_STATE;
- queue_work(pciehp_ordered_wq, &info->work);
+ queue_work(pciehp_wq, &info->work);
}
static void interrupt_event_handler(struct work_struct *work)
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7b14148..bcdbb16 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -806,7 +806,6 @@ static void pcie_cleanup_slot(struct controller *ctrl)
struct slot *slot = ctrl->slot;
cancel_delayed_work(&slot->work);
flush_workqueue(pciehp_wq);
- flush_workqueue(pciehp_ordered_wq);
kfree(slot);
}
diff --git a/drivers/pci/hotplug/pcihp_skeleton.c b/drivers/pci/hotplug/pcihp_skeleton.c
index 5175d9b..b20ceaa 100644
--- a/drivers/pci/hotplug/pcihp_skeleton.c
+++ b/drivers/pci/hotplug/pcihp_skeleton.c
@@ -59,7 +59,7 @@ static LIST_HEAD(slot_list);
#define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
/* local variables */
-static int debug;
+static bool debug;
static int num_slots;
#define DRIVER_VERSION "0.3"
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 419919a..df56774 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -46,7 +46,7 @@
#define PRESENT 1 /* Card in slot */
#define MY_NAME "rpaphp"
-extern int rpaphp_debug;
+extern bool rpaphp_debug;
#define dbg(format, arg...) \
do { \
if (rpaphp_debug) \
diff --git a/drivers/pci/hotplug/rpaphp_core.c b/drivers/pci/hotplug/rpaphp_core.c
index 758adb5..127d6e6 100644
--- a/drivers/pci/hotplug/rpaphp_core.c
+++ b/drivers/pci/hotplug/rpaphp_core.c
@@ -37,7 +37,7 @@
/* and pci_do_scan_bus */
#include "rpaphp.h"
-int rpaphp_debug;
+bool rpaphp_debug;
LIST_HEAD(rpaphp_slot_head);
#define DRIVER_VERSION "0.1"
diff --git a/drivers/pci/hotplug/shpchp.h b/drivers/pci/hotplug/shpchp.h
index e0c90e6..ca64932 100644
--- a/drivers/pci/hotplug/shpchp.h
+++ b/drivers/pci/hotplug/shpchp.h
@@ -43,9 +43,9 @@
#define MY_NAME THIS_MODULE->name
#endif
-extern int shpchp_poll_mode;
+extern bool shpchp_poll_mode;
extern int shpchp_poll_time;
-extern int shpchp_debug;
+extern bool shpchp_debug;
extern struct workqueue_struct *shpchp_wq;
extern struct workqueue_struct *shpchp_ordered_wq;
diff --git a/drivers/pci/hotplug/shpchp_core.c b/drivers/pci/hotplug/shpchp_core.c
index dd7e0c5..7414fd9 100644
--- a/drivers/pci/hotplug/shpchp_core.c
+++ b/drivers/pci/hotplug/shpchp_core.c
@@ -36,8 +36,8 @@
#include "shpchp.h"
/* Global variables */
-int shpchp_debug;
-int shpchp_poll_mode;
+bool shpchp_debug;
+bool shpchp_poll_mode;
int shpchp_poll_time;
struct workqueue_struct *shpchp_wq;
struct workqueue_struct *shpchp_ordered_wq;
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 5775638..205af8d 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -17,7 +17,7 @@
*/
#include <linux/pci.h>
-#include <linux/export.h>
+#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/slab.h>
#include <acpi/acpi_bus.h>
@@ -27,7 +27,7 @@ struct ioapic {
u32 gsi_base;
};
-static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
+static int __devinit ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
{
acpi_handle handle;
acpi_status status;
@@ -88,7 +88,7 @@ exit_free:
return -ENODEV;
}
-static void ioapic_remove(struct pci_dev *dev)
+static void __devexit ioapic_remove(struct pci_dev *dev)
{
struct ioapic *ioapic = pci_get_drvdata(dev);
@@ -99,13 +99,12 @@ static void ioapic_remove(struct pci_dev *dev)
}
-static struct pci_device_id ioapic_devices[] = {
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOAPIC << 8, 0xffff00, },
- { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_SYSTEM_PIC_IOXAPIC << 8, 0xffff00, },
+static DEFINE_PCI_DEVICE_TABLE(ioapic_devices) = {
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) },
+ { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) },
{ }
};
+MODULE_DEVICE_TABLE(pci, ioapic_devices);
static struct pci_driver ioapic_driver = {
.name = "ioapic",
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 1969a3e..0dab5ec 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -348,10 +348,10 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
}
iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
msleep(100);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
iov->initial = initial;
if (nr_virtfn < initial)
@@ -379,10 +379,10 @@ failed:
virtfn_remove(dev, j, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
@@ -405,10 +405,10 @@ static void sriov_disable(struct pci_dev *dev)
virtfn_remove(dev, i, 0);
iov->ctrl &= ~(PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE);
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
ssleep(1);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
if (iov->link != dev->devfn)
sysfs_remove_link(&dev->dev.kobj, "dep_link");
@@ -452,7 +452,6 @@ static int sriov_init(struct pci_dev *dev, int pos)
found:
pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl);
- pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, total);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
if (!offset || (total > 1 && !stride))
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 0e6d04d..a825d78 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -86,6 +86,31 @@ void default_teardown_msi_irqs(struct pci_dev *dev)
}
#endif
+#ifndef arch_restore_msi_irqs
+# define arch_restore_msi_irqs default_restore_msi_irqs
+# define HAVE_DEFAULT_MSI_RESTORE_IRQS
+#endif
+
+#ifdef HAVE_DEFAULT_MSI_RESTORE_IRQS
+void default_restore_msi_irqs(struct pci_dev *dev, int irq)
+{
+ struct msi_desc *entry;
+
+ entry = NULL;
+ if (dev->msix_enabled) {
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (irq == entry->irq)
+ break;
+ }
+ } else if (dev->msi_enabled) {
+ entry = irq_get_msi_desc(irq);
+ }
+
+ if (entry)
+ write_msi_msg(irq, &entry->msg);
+}
+#endif
+
static void msi_set_enable(struct pci_dev *dev, int pos, int enable)
{
u16 control;
@@ -323,6 +348,18 @@ static void free_msi_irqs(struct pci_dev *dev)
if (list_is_last(&entry->list, &dev->msi_list))
iounmap(entry->mask_base);
}
+
+ /*
+ * Its possible that we get into this path
+ * When populate_msi_sysfs fails, which means the entries
+ * were not registered with sysfs. In that case don't
+ * unregister them.
+ */
+ if (entry->kobj.parent) {
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ }
+
list_del(&entry->list);
kfree(entry);
}
@@ -360,7 +397,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, pos, 0);
- write_msi_msg(dev->irq, &entry->msg);
+ arch_restore_msi_irqs(dev, dev->irq);
pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
msi_mask_irq(entry, msi_capable_mask(control), entry->masked);
@@ -388,7 +425,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
list_for_each_entry(entry, &dev->msi_list, list) {
- write_msi_msg(entry->irq, &entry->msg);
+ arch_restore_msi_irqs(dev, entry->irq);
msix_mask_irq(entry, entry->masked);
}
@@ -403,6 +440,98 @@ void pci_restore_msi_state(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_restore_msi_state);
+
+#define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr)
+#define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj)
+
+struct msi_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr,
+ const char *buf, size_t count);
+};
+
+static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr,
+ char *buf)
+{
+ return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi");
+}
+
+static ssize_t msi_irq_attr_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct msi_attribute *attribute = to_msi_attr(attr);
+ struct msi_desc *entry = to_msi_desc(kobj);
+
+ if (!attribute->show)
+ return -EIO;
+
+ return attribute->show(entry, attribute, buf);
+}
+
+static const struct sysfs_ops msi_irq_sysfs_ops = {
+ .show = msi_irq_attr_show,
+};
+
+static struct msi_attribute mode_attribute =
+ __ATTR(mode, S_IRUGO, show_msi_mode, NULL);
+
+
+struct attribute *msi_irq_default_attrs[] = {
+ &mode_attribute.attr,
+ NULL
+};
+
+void msi_kobj_release(struct kobject *kobj)
+{
+ struct msi_desc *entry = to_msi_desc(kobj);
+
+ pci_dev_put(entry->dev);
+}
+
+static struct kobj_type msi_irq_ktype = {
+ .release = msi_kobj_release,
+ .sysfs_ops = &msi_irq_sysfs_ops,
+ .default_attrs = msi_irq_default_attrs,
+};
+
+static int populate_msi_sysfs(struct pci_dev *pdev)
+{
+ struct msi_desc *entry;
+ struct kobject *kobj;
+ int ret;
+ int count = 0;
+
+ pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj);
+ if (!pdev->msi_kset)
+ return -ENOMEM;
+
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ kobj = &entry->kobj;
+ kobj->kset = pdev->msi_kset;
+ pci_dev_get(pdev);
+ ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL,
+ "%u", entry->irq);
+ if (ret)
+ goto out_unroll;
+
+ count++;
+ }
+
+ return 0;
+
+out_unroll:
+ list_for_each_entry(entry, &pdev->msi_list, list) {
+ if (!count)
+ break;
+ kobject_del(&entry->kobj);
+ kobject_put(&entry->kobj);
+ count--;
+ }
+ return ret;
+}
+
/**
* msi_capability_init - configure device's MSI capability structure
* @dev: pointer to the pci_dev data structure of MSI device function
@@ -454,6 +583,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
return ret;
}
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ msi_mask_irq(entry, mask, ~mask);
+ free_msi_irqs(dev);
+ return ret;
+ }
+
/* Set MSI enabled bits */
pci_intx_for_msi(dev, 0);
msi_set_enable(dev, pos, 1);
@@ -574,6 +710,12 @@ static int msix_capability_init(struct pci_dev *dev,
msix_program_entries(dev, entries);
+ ret = populate_msi_sysfs(dev);
+ if (ret) {
+ ret = 0;
+ goto error;
+ }
+
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
dev->msix_enabled = 1;
@@ -732,6 +874,8 @@ void pci_disable_msi(struct pci_dev *dev)
pci_msi_shutdown(dev);
free_msi_irqs(dev);
+ kset_unregister(dev->msi_kset);
+ dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msi);
@@ -830,6 +974,8 @@ void pci_disable_msix(struct pci_dev *dev)
pci_msix_shutdown(dev);
free_msi_irqs(dev);
+ kset_unregister(dev->msi_kset);
+ dev->msi_kset = NULL;
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -870,5 +1016,15 @@ EXPORT_SYMBOL(pci_msi_enabled);
void pci_msi_init_pci_dev(struct pci_dev *dev)
{
+ int pos;
INIT_LIST_HEAD(&dev->msi_list);
+
+ /* Disable the msi hardware to avoid screaming interrupts
+ * during boot. This is the power on reset default so
+ * usually this should be a noop.
+ */
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos)
+ msi_set_enable(dev, pos, 0);
+ msix_set_enable(dev, 0);
}
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 4ecb640..060fd22 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -45,16 +45,20 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
{
struct pci_dev *pci_dev = context;
- if (event == ACPI_NOTIFY_DEVICE_WAKE && pci_dev) {
+ if (event != ACPI_NOTIFY_DEVICE_WAKE || !pci_dev)
+ return;
+
+ if (!pci_dev->pm_cap || !pci_dev->pme_support
+ || pci_check_pme_status(pci_dev)) {
if (pci_dev->pme_poll)
pci_dev->pme_poll = false;
pci_wakeup_event(pci_dev);
- pci_check_pme_status(pci_dev);
pm_runtime_resume(&pci_dev->dev);
- if (pci_dev->subordinate)
- pci_pme_wakeup_bus(pci_dev->subordinate);
}
+
+ if (pci_dev->subordinate)
+ pci_pme_wakeup_bus(pci_dev->subordinate);
}
/**
@@ -395,7 +399,6 @@ static int __init acpi_pci_init(void)
if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
printk(KERN_INFO"ACPI FADT declares the system doesn't support PCIe ASPM, so disable it\n");
- pcie_clear_aspm();
pcie_no_aspm();
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 12d1e81..3623d65 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -604,7 +604,8 @@ static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
* supported as well. Drivers are supposed to support either the
* former, or the latter, but not both at the same time.
*/
- WARN_ON(ret && drv->driver.pm);
+ WARN(ret && drv->driver.pm, "driver %s device %04x:%04x\n",
+ drv->name, pci_dev->vendor, pci_dev->device);
return ret;
}
diff --git a/drivers/pci/pci-label.c b/drivers/pci/pci-label.c
index 81525ae..edaed6f 100644
--- a/drivers/pci/pci-label.c
+++ b/drivers/pci/pci-label.c
@@ -89,7 +89,7 @@ find_smbios_instance_string(struct pci_dev *pdev, char *buf,
return 0;
}
-static mode_t
+static umode_t
smbios_instance_string_exist(struct kobject *kobj, struct attribute *attr,
int n)
{
@@ -275,7 +275,7 @@ device_has_dsm(struct device *dev)
return FALSE;
}
-static mode_t
+static umode_t
acpi_index_string_exist(struct kobject *kobj, struct attribute *attr, int n)
{
struct device *dev;
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 106be0d..a3cd8ca 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -432,7 +432,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
u8 *data = (u8*) buf;
/* Several chips lock up trying to read undefined config space */
- if (security_capable(&init_user_ns, filp->f_cred, CAP_SYS_ADMIN) == 0) {
+ if (security_capable(filp->f_cred, &init_user_ns, CAP_SYS_ADMIN) == 0) {
size = dev->cfg_size;
} else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
size = 128;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6d4a531..af295bb 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -88,6 +88,12 @@ enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
u8 pci_cache_line_size;
+/*
+ * If we set up a device for bus mastering, we need to check the latency
+ * timer as certain BIOSes forget to set it properly.
+ */
+unsigned int pcibios_max_latency = 255;
+
/**
* pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
* @bus: pointer to PCI bus structure to search
@@ -959,6 +965,7 @@ void pci_restore_state(struct pci_dev *dev)
/* PCI Express register must be restored first */
pci_restore_pcie_state(dev);
+ pci_restore_ats_state(dev);
/*
* The Base Address register should be programmed before the command
@@ -967,7 +974,7 @@ void pci_restore_state(struct pci_dev *dev)
for (i = 15; i >= 0; i--) {
pci_read_config_dword(dev, i * 4, &val);
if (val != dev->saved_config_space[i]) {
- dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
+ dev_dbg(&dev->dev, "restoring config "
"space at offset %#x (was %#x, writing %#x)\n",
i, val, (int)dev->saved_config_space[i]);
pci_write_config_dword(dev,i * 4,
@@ -1536,8 +1543,7 @@ void pci_pme_active(struct pci_dev *dev, bool enable)
}
out:
- dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
- enable ? "enabled" : "disabled");
+ dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
}
/**
@@ -2596,6 +2602,33 @@ static void __pci_set_master(struct pci_dev *dev, bool enable)
}
/**
+ * pcibios_set_master - enable PCI bus-mastering for device dev
+ * @dev: the PCI device to enable
+ *
+ * Enables PCI bus-mastering for the device. This is the default
+ * implementation. Architecture specific implementations can override
+ * this if necessary.
+ */
+void __weak pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+
+ /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
+ if (pci_is_pcie(dev))
+ return;
+
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat < 16)
+ lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
+ else if (lat > pcibios_max_latency)
+ lat = pcibios_max_latency;
+ else
+ return;
+ dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
+}
+
+/**
* pci_set_master - enables bus-mastering for device dev
* @dev: the PCI device to enable
*
@@ -2768,6 +2801,116 @@ pci_intx(struct pci_dev *pdev, int enable)
}
/**
+ * pci_intx_mask_supported - probe for INTx masking support
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev support INTx masking via the config space
+ * command word.
+ */
+bool pci_intx_mask_supported(struct pci_dev *dev)
+{
+ bool mask_supported = false;
+ u16 orig, new;
+
+ pci_cfg_access_lock(dev);
+
+ pci_read_config_word(dev, PCI_COMMAND, &orig);
+ pci_write_config_word(dev, PCI_COMMAND,
+ orig ^ PCI_COMMAND_INTX_DISABLE);
+ pci_read_config_word(dev, PCI_COMMAND, &new);
+
+ /*
+ * There's no way to protect against hardware bugs or detect them
+ * reliably, but as long as we know what the value should be, let's
+ * go ahead and check it.
+ */
+ if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
+ dev_err(&dev->dev, "Command register changed from "
+ "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
+ } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
+ mask_supported = true;
+ pci_write_config_word(dev, PCI_COMMAND, orig);
+ }
+
+ pci_cfg_access_unlock(dev);
+ return mask_supported;
+}
+EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
+
+static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
+{
+ struct pci_bus *bus = dev->bus;
+ bool mask_updated = true;
+ u32 cmd_status_dword;
+ u16 origcmd, newcmd;
+ unsigned long flags;
+ bool irq_pending;
+
+ /*
+ * We do a single dword read to retrieve both command and status.
+ * Document assumptions that make this possible.
+ */
+ BUILD_BUG_ON(PCI_COMMAND % 4);
+ BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
+
+ raw_spin_lock_irqsave(&pci_lock, flags);
+
+ bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
+
+ irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
+
+ /*
+ * Check interrupt status register to see whether our device
+ * triggered the interrupt (when masking) or the next IRQ is
+ * already pending (when unmasking).
+ */
+ if (mask != irq_pending) {
+ mask_updated = false;
+ goto done;
+ }
+
+ origcmd = cmd_status_dword;
+ newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
+ if (mask)
+ newcmd |= PCI_COMMAND_INTX_DISABLE;
+ if (newcmd != origcmd)
+ bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
+
+done:
+ raw_spin_unlock_irqrestore(&pci_lock, flags);
+
+ return mask_updated;
+}
+
+/**
+ * pci_check_and_mask_intx - mask INTx on pending interrupt
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, mask it and
+ * return true in that case. False is returned if not interrupt was
+ * pending.
+ */
+bool pci_check_and_mask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, true);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
+
+/**
+ * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
+ * @dev: the PCI device to operate on
+ *
+ * Check if the device dev has its INTx line asserted, unmask it if not
+ * and return true. False is returned and the mask remains active if
+ * there was still an interrupt pending.
+ */
+bool pci_check_and_unmask_intx(struct pci_dev *dev)
+{
+ return pci_check_and_set_intx_mask(dev, false);
+}
+EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
+
+/**
* pci_msi_off - disables any msi or msix capabilities
* @dev: the PCI device to operate on
*
@@ -2965,7 +3108,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
might_sleep();
if (!probe) {
- pci_block_user_cfg_access(dev);
+ pci_cfg_access_lock(dev);
/* block PM suspend, driver probe, etc. */
device_lock(&dev->dev);
}
@@ -2990,7 +3133,7 @@ static int pci_dev_reset(struct pci_dev *dev, int probe)
done:
if (!probe) {
device_unlock(&dev->dev);
- pci_unblock_user_cfg_access(dev);
+ pci_cfg_access_unlock(dev);
}
return rc;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index b74084e..1009a5e 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -136,6 +136,8 @@ static inline void pci_remove_legacy_files(struct pci_bus *bus) { return; }
/* Lock for read/write access to pci device and bus lists */
extern struct rw_semaphore pci_bus_sem;
+extern raw_spinlock_t pci_lock;
+
extern unsigned int pci_pm_d3_delay;
#ifdef CONFIG_PCI_MSI
@@ -249,6 +251,14 @@ struct pci_sriov {
u8 __iomem *mstate; /* VF Migration State Array */
};
+#ifdef CONFIG_PCI_ATS
+extern void pci_restore_ats_state(struct pci_dev *dev);
+#else
+static inline void pci_restore_ats_state(struct pci_dev *dev)
+{
+}
+#endif /* CONFIG_PCI_ATS */
+
#ifdef CONFIG_PCI_IOV
extern int pci_iov_init(struct pci_dev *dev);
extern void pci_iov_release(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/Kconfig b/drivers/pci/pcie/Kconfig
index dc29348..72962cc 100644
--- a/drivers/pci/pcie/Kconfig
+++ b/drivers/pci/pcie/Kconfig
@@ -39,7 +39,7 @@ config PCIEASPM
Power Management) and Clock Power Management. ASPM supports
state L0/L0s/L1.
- ASPM is initially set up the the firmware. With this option enabled,
+ ASPM is initially set up by the firmware. With this option enabled,
Linux can modify this state in order to disable ASPM on known-bad
hardware or configurations and enable it when known-safe.
diff --git a/drivers/pci/pcie/aer/aer_inject.c b/drivers/pci/pcie/aer/aer_inject.c
index 95489cd..5222986 100644
--- a/drivers/pci/pcie/aer/aer_inject.c
+++ b/drivers/pci/pcie/aer/aer_inject.c
@@ -28,7 +28,7 @@
#include "aerdrv.h"
/* Override the existing corrected and uncorrected error masks */
-static int aer_mask_override;
+static bool aer_mask_override;
module_param(aer_mask_override, bool, 0);
struct aer_error_inj {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 9674e9f..0ca0535 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -27,8 +27,8 @@
#include <linux/kfifo.h>
#include "aerdrv.h"
-static int forceload;
-static int nosourceid;
+static bool forceload;
+static bool nosourceid;
module_param(forceload, bool, 0);
module_param(nosourceid, bool, 0);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index cbfbab1..24f049e 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -68,7 +68,7 @@ struct pcie_link_state {
struct aspm_latency acceptable[8];
};
-static int aspm_disabled, aspm_force, aspm_clear_state;
+static int aspm_disabled, aspm_force;
static bool aspm_support_enabled = true;
static DEFINE_MUTEX(aspm_lock);
static LIST_HEAD(link_list);
@@ -500,8 +500,8 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
int pos;
u32 reg32;
- if (aspm_clear_state)
- return -EINVAL;
+ if (aspm_disabled)
+ return 0;
/*
* Some functions in a slot might not all be PCIe functions,
@@ -574,9 +574,6 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
return;
- if (aspm_disabled && !aspm_clear_state)
- return;
-
/* VIA has a strange chipset, root port is under a bridge */
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT &&
pdev->bus->self)
@@ -608,7 +605,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
* the BIOS's expectation, we'll do so once pci_enable_device() is
* called.
*/
- if (aspm_policy != POLICY_POWERSAVE || aspm_clear_state) {
+ if (aspm_policy != POLICY_POWERSAVE) {
pcie_config_aspm_path(link);
pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
@@ -649,8 +646,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link, *root, *parent_link;
- if ((aspm_disabled && !aspm_clear_state) || !pci_is_pcie(pdev) ||
- !parent || !parent->link_state)
+ if (!pci_is_pcie(pdev) || !parent || !parent->link_state)
return;
if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
(parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
@@ -734,13 +730,18 @@ void pcie_aspm_powersave_config_link(struct pci_dev *pdev)
* pci_disable_link_state - disable pci device's link state, so the link will
* never enter specific states
*/
-static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
+static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem,
+ bool force)
{
struct pci_dev *parent = pdev->bus->self;
struct pcie_link_state *link;
- if (aspm_disabled || !pci_is_pcie(pdev))
+ if (aspm_disabled && !force)
return;
+
+ if (!pci_is_pcie(pdev))
+ return;
+
if (pdev->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
pdev->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
parent = pdev;
@@ -768,16 +769,31 @@ static void __pci_disable_link_state(struct pci_dev *pdev, int state, bool sem)
void pci_disable_link_state_locked(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, false);
+ __pci_disable_link_state(pdev, state, false, false);
}
EXPORT_SYMBOL(pci_disable_link_state_locked);
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
- __pci_disable_link_state(pdev, state, true);
+ __pci_disable_link_state(pdev, state, true, false);
}
EXPORT_SYMBOL(pci_disable_link_state);
+void pcie_clear_aspm(struct pci_bus *bus)
+{
+ struct pci_dev *child;
+
+ /*
+ * Clear any ASPM setup that the firmware has carried out on this bus
+ */
+ list_for_each_entry(child, &bus->devices, bus_list) {
+ __pci_disable_link_state(child, PCIE_LINK_STATE_L0S |
+ PCIE_LINK_STATE_L1 |
+ PCIE_LINK_STATE_CLKPM,
+ false, true);
+ }
+}
+
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
@@ -935,6 +951,7 @@ void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev)
static int __init pcie_aspm_disable(char *str)
{
if (!strcmp(str, "off")) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
aspm_support_enabled = false;
printk(KERN_INFO "PCIe ASPM is disabled\n");
@@ -947,16 +964,18 @@ static int __init pcie_aspm_disable(char *str)
__setup("pcie_aspm=", pcie_aspm_disable);
-void pcie_clear_aspm(void)
-{
- if (!aspm_force)
- aspm_clear_state = 1;
-}
-
void pcie_no_aspm(void)
{
- if (!aspm_force)
+ /*
+ * Disabling ASPM is intended to prevent the kernel from modifying
+ * existing hardware state, not to clear existing state. To that end:
+ * (a) set policy to POLICY_DEFAULT in order to avoid changing state
+ * (b) prevent userspace from changing policy
+ */
+ if (!aspm_force) {
+ aspm_policy = POLICY_DEFAULT;
aspm_disabled = 1;
+ }
}
/**
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 04e74f4..71eac9c 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -651,6 +651,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
secondary, subordinate, pass);
+ if (!primary && (primary != bus->number) && secondary && subordinate) {
+ dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
+ primary = bus->number;
+ }
+
/* Check if setup is sensible at all */
if (!pass &&
(primary != bus->number || secondary <= bus->number)) {
@@ -1522,19 +1527,21 @@ unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
return max;
}
-struct pci_bus * pci_create_bus(struct device *parent,
- int bus, struct pci_ops *ops, void *sysdata)
+struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
{
- int error;
+ int error, i;
struct pci_bus *b, *b2;
struct device *dev;
+ struct pci_bus_resource *bus_res, *n;
+ struct resource *res;
b = pci_alloc_bus();
if (!b)
return NULL;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev){
+ if (!dev) {
kfree(b);
return NULL;
}
@@ -1577,8 +1584,20 @@ struct pci_bus * pci_create_bus(struct device *parent,
pci_create_legacy_files(b);
b->number = b->secondary = bus;
- b->resource[0] = &ioport_resource;
- b->resource[1] = &iomem_resource;
+
+ /* Add initial resources to the bus */
+ list_for_each_entry_safe(bus_res, n, resources, list)
+ list_move_tail(&bus_res->list, &b->resources);
+
+ if (parent)
+ dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
+ else
+ printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
+
+ pci_bus_for_each_resource(b, res, i) {
+ if (res)
+ dev_info(&b->dev, "root bus resource %pR\n", res);
+ }
return b;
@@ -1594,18 +1613,58 @@ err_out:
return NULL;
}
+struct pci_bus * __devinit pci_scan_root_bus(struct device *parent, int bus,
+ struct pci_ops *ops, void *sysdata, struct list_head *resources)
+{
+ struct pci_bus *b;
+
+ b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
+ if (!b)
+ return NULL;
+
+ b->subordinate = pci_scan_child_bus(b);
+ pci_bus_add_devices(b);
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_root_bus);
+
+/* Deprecated; use pci_scan_root_bus() instead */
struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
int bus, struct pci_ops *ops, void *sysdata)
{
+ LIST_HEAD(resources);
struct pci_bus *b;
- b = pci_create_bus(parent, bus, ops, sysdata);
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
if (b)
b->subordinate = pci_scan_child_bus(b);
+ else
+ pci_free_resource_list(&resources);
return b;
}
EXPORT_SYMBOL(pci_scan_bus_parented);
+struct pci_bus * __devinit pci_scan_bus(int bus, struct pci_ops *ops,
+ void *sysdata)
+{
+ LIST_HEAD(resources);
+ struct pci_bus *b;
+
+ pci_add_resource(&resources, &ioport_resource);
+ pci_add_resource(&resources, &iomem_resource);
+ b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
+ if (b) {
+ b->subordinate = pci_scan_child_bus(b);
+ pci_bus_add_devices(b);
+ } else {
+ pci_free_resource_list(&resources);
+ }
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_bus);
+
#ifdef CONFIG_HOTPLUG
/**
* pci_rescan_bus - scan a PCI bus for devices.
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 7f87bee..ef8b18c 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -77,6 +77,7 @@ void pci_remove_bus(struct pci_bus *pci_bus)
}
EXPORT_SYMBOL(pci_remove_bus);
+static void __pci_remove_behind_bridge(struct pci_dev *dev);
/**
* pci_remove_bus_device - remove a PCI device and any children
* @dev: the device to remove
@@ -89,19 +90,41 @@ EXPORT_SYMBOL(pci_remove_bus);
* device lists, remove the /proc entry, and notify userspace
* (/sbin/hotplug).
*/
-void pci_remove_bus_device(struct pci_dev *dev)
+static void __pci_remove_bus_device(struct pci_dev *dev)
{
- pci_stop_bus_device(dev);
if (dev->subordinate) {
struct pci_bus *b = dev->subordinate;
- pci_remove_behind_bridge(dev);
+ __pci_remove_behind_bridge(dev);
pci_remove_bus(b);
dev->subordinate = NULL;
}
pci_destroy_dev(dev);
}
+void pci_remove_bus_device(struct pci_dev *dev)
+{
+ pci_stop_bus_device(dev);
+ __pci_remove_bus_device(dev);
+}
+
+static void __pci_remove_behind_bridge(struct pci_dev *dev)
+{
+ struct list_head *l, *n;
+
+ if (dev->subordinate)
+ list_for_each_safe(l, n, &dev->subordinate->devices)
+ __pci_remove_bus_device(pci_dev_b(l));
+}
+
+static void pci_stop_behind_bridge(struct pci_dev *dev)
+{
+ struct list_head *l, *n;
+
+ if (dev->subordinate)
+ list_for_each_safe(l, n, &dev->subordinate->devices)
+ pci_stop_bus_device(pci_dev_b(l));
+}
/**
* pci_remove_behind_bridge - remove all devices behind a PCI bridge
@@ -113,11 +136,8 @@ void pci_remove_bus_device(struct pci_dev *dev)
*/
void pci_remove_behind_bridge(struct pci_dev *dev)
{
- struct list_head *l, *n;
-
- if (dev->subordinate)
- list_for_each_safe(l, n, &dev->subordinate->devices)
- pci_remove_bus_device(pci_dev_b(l));
+ pci_stop_behind_bridge(dev);
+ __pci_remove_behind_bridge(dev);
}
static void pci_stop_bus_devices(struct pci_bus *bus)
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 5717509b..b66bfdb 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -85,9 +85,9 @@ void pci_update_resource(struct pci_dev *dev, int resno)
}
}
res->flags &= ~IORESOURCE_UNSET;
- dev_info(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
- resno, res, (unsigned long long)region.start,
- (unsigned long long)region.end);
+ dev_dbg(&dev->dev, "BAR %d: set to %pR (PCI address [%#llx-%#llx])\n",
+ resno, res, (unsigned long long)region.start,
+ (unsigned long long)region.end);
}
int pci_claim_resource(struct pci_dev *dev, int resource)
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index 90832a9..1620088 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -189,7 +189,7 @@ static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
if (verbose_request)
dev_info(&pdev->xdev->dev,
- "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
+ "read dev=%04x:%02x:%02x.%d - offset %x size %d\n",
pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
PCI_FUNC(devfn), where, size);
@@ -228,7 +228,7 @@ static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
if (verbose_request)
dev_info(&pdev->xdev->dev,
- "write dev=%04x:%02x:%02x.%01x - "
+ "write dev=%04x:%02x:%02x.%d - "
"offset %x size %d val %x\n",
pci_domain_nr(bus), bus->number,
PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
@@ -432,7 +432,7 @@ static int __devinit pcifront_scan_bus(struct pcifront_device *pdev,
d = pci_scan_single_device(b, devfn);
if (d)
dev_info(&pdev->xdev->dev, "New device on "
- "%04x:%02x:%02x.%02x found.\n", domain, bus,
+ "%04x:%02x:%02x.%d found.\n", domain, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
}
@@ -1041,7 +1041,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
if (!pci_dev) {
dev_dbg(&pdev->xdev->dev,
- "Cannot get PCI device %04x:%02x:%02x.%02x\n",
+ "Cannot get PCI device %04x:%02x:%02x.%d\n",
domain, bus, slot, func);
continue;
}
@@ -1049,7 +1049,7 @@ static int pcifront_detach_devices(struct pcifront_device *pdev)
pci_dev_put(pci_dev);
dev_dbg(&pdev->xdev->dev,
- "PCI device %04x:%02x:%02x.%02x removed.\n",
+ "PCI device %04x:%02x:%02x.%d removed.\n",
domain, bus, slot, func);
}
@@ -1126,14 +1126,11 @@ static const struct xenbus_device_id xenpci_ids[] = {
{""},
};
-static struct xenbus_driver xenbus_pcifront_driver = {
- .name = "pcifront",
- .owner = THIS_MODULE,
- .ids = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xenpci, "pcifront",
.probe = pcifront_xenbus_probe,
.remove = pcifront_xenbus_remove,
.otherend_changed = pcifront_backend_changed,
-};
+);
static int __init pcifront_init(void)
{
@@ -1142,12 +1139,12 @@ static int __init pcifront_init(void)
pci_frontend_registrar(1 /* enable */);
- return xenbus_register_frontend(&xenbus_pcifront_driver);
+ return xenbus_register_frontend(&xenpci_driver);
}
static void __exit pcifront_cleanup(void)
{
- xenbus_unregister_driver(&xenbus_pcifront_driver);
+ xenbus_unregister_driver(&xenpci_driver);
pci_frontend_registrar(0 /* disable */);
}
module_init(pcifront_init);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 6e318ce..f9e3fb3 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -155,18 +155,14 @@ config PCMCIA_M8XX
This driver is also available as a module called m8xx_pcmcia.
-config PCMCIA_AU1X00
- tristate "Au1x00 pcmcia support"
- depends on MIPS_ALCHEMY && PCMCIA
-
config PCMCIA_ALCHEMY_DEVBOARD
tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
depends on MIPS_ALCHEMY && PCMCIA
select 64BIT_PHYS_ADDR
help
Enable this driver of you want PCMCIA support on your Alchemy
- Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200 board.
- NOT suitable for the PB1000!
+ Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
+ board. NOT suitable for the PB1000!
This driver is also available as a module called db1xxx_ss.ko
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 29935ea..ec543a4 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -29,7 +29,6 @@ obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_base.o sa1100_cs.o
obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_base.o sa1111_cs.o
obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
obj-$(CONFIG_M32R_CFC) += m32r_cfc.o
-obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
@@ -39,9 +38,6 @@ obj-$(CONFIG_AT91_CF) += at91_cf.o
obj-$(CONFIG_ELECTRA_CF) += electra_cf.o
obj-$(CONFIG_PCMCIA_ALCHEMY_DEVBOARD) += db1xxx_ss.o
-au1x00_ss-y += au1000_generic.o
-au1x00_ss-$(CONFIG_MIPS_PB1000) += au1000_pb1x00.o
-
sa1111_cs-y += sa1111_generic.o
sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o
sa1111_cs-$(CONFIG_SA1100_BADGE4) += sa1100_badge4.o
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
deleted file mode 100644
index 95dd7c6..0000000
--- a/drivers/pcmcia/au1000_generic.c
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- *
- * Alchemy Semi Au1000 pcmcia driver
- *
- * Copyright 2001-2003 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@embeddedalley.com or source@mvista.com
- *
- * Copyright 2004 Pete Popov, Embedded Alley Solutions, Inc.
- * Updated the driver to 2.6. Followed the sa11xx API and largely
- * copied many of the hardware independent functions.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- *
- */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-#include <linux/cpufreq.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/notifier.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-
-#include <asm/mach-au1x00/au1000.h>
-#include "au1000_generic.h"
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pete Popov <ppopov@embeddedalley.com>");
-MODULE_DESCRIPTION("Linux PCMCIA Card Services: Au1x00 Socket Controller");
-
-#if 0
-#define debug(x,args...) printk(KERN_DEBUG "%s: " x, __func__ , ##args)
-#else
-#define debug(x,args...)
-#endif
-
-#define MAP_SIZE 0x100000
-extern struct au1000_pcmcia_socket au1000_pcmcia_socket[];
-#define PCMCIA_SOCKET(x) (au1000_pcmcia_socket + (x))
-#define to_au1000_socket(x) container_of(x, struct au1000_pcmcia_socket, socket)
-
-/* Some boards like to support CF cards as IDE root devices, so they
- * grab pcmcia sockets directly.
- */
-u32 *pcmcia_base_vaddrs[2];
-extern const unsigned long mips_io_port_base;
-
-static DEFINE_MUTEX(pcmcia_sockets_lock);
-
-static int (*au1x00_pcmcia_hw_init[])(struct device *dev) = {
- au1x_board_init,
-};
-
-static int
-au1x00_pcmcia_skt_state(struct au1000_pcmcia_socket *skt)
-{
- struct pcmcia_state state;
- unsigned int stat;
-
- memset(&state, 0, sizeof(struct pcmcia_state));
-
- skt->ops->socket_state(skt, &state);
-
- stat = state.detect ? SS_DETECT : 0;
- stat |= state.ready ? SS_READY : 0;
- stat |= state.wrprot ? SS_WRPROT : 0;
- stat |= state.vs_3v ? SS_3VCARD : 0;
- stat |= state.vs_Xv ? SS_XVCARD : 0;
- stat |= skt->cs_state.Vcc ? SS_POWERON : 0;
-
- if (skt->cs_state.flags & SS_IOCARD)
- stat |= state.bvd1 ? SS_STSCHG : 0;
- else {
- if (state.bvd1 == 0)
- stat |= SS_BATDEAD;
- else if (state.bvd2 == 0)
- stat |= SS_BATWARN;
- }
- return stat;
-}
-
-/*
- * au100_pcmcia_config_skt
- *
- * Convert PCMCIA socket state to our socket configure structure.
- */
-static int
-au1x00_pcmcia_config_skt(struct au1000_pcmcia_socket *skt, socket_state_t *state)
-{
- int ret;
-
- ret = skt->ops->configure_socket(skt, state);
- if (ret == 0) {
- skt->cs_state = *state;
- }
-
- if (ret < 0)
- debug("unable to configure socket %d\n", skt->nr);
-
- return ret;
-}
-
-/* au1x00_pcmcia_sock_init()
- *
- * (Re-)Initialise the socket, turning on status interrupts
- * and PCMCIA bus. This must wait for power to stabilise
- * so that the card status signals report correctly.
- *
- * Returns: 0
- */
-static int au1x00_pcmcia_sock_init(struct pcmcia_socket *sock)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- debug("initializing socket %u\n", skt->nr);
-
- skt->ops->socket_init(skt);
- return 0;
-}
-
-/*
- * au1x00_pcmcia_suspend()
- *
- * Remove power on the socket, disable IRQs from the card.
- * Turn off status interrupts, and disable the PCMCIA bus.
- *
- * Returns: 0
- */
-static int au1x00_pcmcia_suspend(struct pcmcia_socket *sock)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- debug("suspending socket %u\n", skt->nr);
-
- skt->ops->socket_suspend(skt);
-
- return 0;
-}
-
-static DEFINE_SPINLOCK(status_lock);
-
-/*
- * au1x00_check_status()
- */
-static void au1x00_check_status(struct au1000_pcmcia_socket *skt)
-{
- unsigned int events;
-
- debug("entering PCMCIA monitoring thread\n");
-
- do {
- unsigned int status;
- unsigned long flags;
-
- status = au1x00_pcmcia_skt_state(skt);
-
- spin_lock_irqsave(&status_lock, flags);
- events = (status ^ skt->status) & skt->cs_state.csc_mask;
- skt->status = status;
- spin_unlock_irqrestore(&status_lock, flags);
-
- debug("events: %s%s%s%s%s%s\n",
- events == 0 ? "<NONE>" : "",
- events & SS_DETECT ? "DETECT " : "",
- events & SS_READY ? "READY " : "",
- events & SS_BATDEAD ? "BATDEAD " : "",
- events & SS_BATWARN ? "BATWARN " : "",
- events & SS_STSCHG ? "STSCHG " : "");
-
- if (events)
- pcmcia_parse_events(&skt->socket, events);
- } while (events);
-}
-
-/*
- * au1x00_pcmcia_poll_event()
- * Let's poll for events in addition to IRQs since IRQ only is unreliable...
- */
-static void au1x00_pcmcia_poll_event(unsigned long dummy)
-{
- struct au1000_pcmcia_socket *skt = (struct au1000_pcmcia_socket *)dummy;
- debug("polling for events\n");
-
- mod_timer(&skt->poll_timer, jiffies + AU1000_PCMCIA_POLL_PERIOD);
-
- au1x00_check_status(skt);
-}
-
-/* au1x00_pcmcia_get_status()
- *
- * From the sa11xx_core.c:
- * Implements the get_status() operation for the in-kernel PCMCIA
- * service (formerly SS_GetStatus in Card Services). Essentially just
- * fills in bits in `status' according to internal driver state or
- * the value of the voltage detect chipselect register.
- *
- * As a debugging note, during card startup, the PCMCIA core issues
- * three set_socket() commands in a row the first with RESET deasserted,
- * the second with RESET asserted, and the last with RESET deasserted
- * again. Following the third set_socket(), a get_status() command will
- * be issued. The kernel is looking for the SS_READY flag (see
- * setup_socket(), reset_socket(), and unreset_socket() in cs.c).
- *
- * Returns: 0
- */
-static int
-au1x00_pcmcia_get_status(struct pcmcia_socket *sock, unsigned int *status)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- skt->status = au1x00_pcmcia_skt_state(skt);
- *status = skt->status;
-
- return 0;
-}
-
-/* au1x00_pcmcia_set_socket()
- * Implements the set_socket() operation for the in-kernel PCMCIA
- * service (formerly SS_SetSocket in Card Services). We more or
- * less punt all of this work and let the kernel handle the details
- * of power configuration, reset, &c. We also record the value of
- * `state' in order to regurgitate it to the PCMCIA core later.
- *
- * Returns: 0
- */
-static int
-au1x00_pcmcia_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
-
- debug("for sock %u\n", skt->nr);
-
- debug("\tmask: %s%s%s%s%s%s\n\tflags: %s%s%s%s%s%s\n",
- (state->csc_mask==0)?"<NONE>":"",
- (state->csc_mask&SS_DETECT)?"DETECT ":"",
- (state->csc_mask&SS_READY)?"READY ":"",
- (state->csc_mask&SS_BATDEAD)?"BATDEAD ":"",
- (state->csc_mask&SS_BATWARN)?"BATWARN ":"",
- (state->csc_mask&SS_STSCHG)?"STSCHG ":"",
- (state->flags==0)?"<NONE>":"",
- (state->flags&SS_PWR_AUTO)?"PWR_AUTO ":"",
- (state->flags&SS_IOCARD)?"IOCARD ":"",
- (state->flags&SS_RESET)?"RESET ":"",
- (state->flags&SS_SPKR_ENA)?"SPKR_ENA ":"",
- (state->flags&SS_OUTPUT_ENA)?"OUTPUT_ENA ":"");
- debug("\tVcc %d Vpp %d irq %d\n",
- state->Vcc, state->Vpp, state->io_irq);
-
- return au1x00_pcmcia_config_skt(skt, state);
-}
-
-int
-au1x00_pcmcia_set_io_map(struct pcmcia_socket *sock, struct pccard_io_map *map)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
- unsigned int speed;
-
- if(map->map>=MAX_IO_WIN){
- debug("map (%d) out of range\n", map->map);
- return -1;
- }
-
- if(map->flags&MAP_ACTIVE){
- speed=(map->speed>0)?map->speed:AU1000_PCMCIA_IO_SPEED;
- skt->spd_io[map->map] = speed;
- }
-
- map->start=(unsigned int)(u32)skt->virt_io;
- map->stop=map->start+MAP_SIZE;
- return 0;
-
-} /* au1x00_pcmcia_set_io_map() */
-
-
-static int
-au1x00_pcmcia_set_mem_map(struct pcmcia_socket *sock, struct pccard_mem_map *map)
-{
- struct au1000_pcmcia_socket *skt = to_au1000_socket(sock);
- unsigned short speed = map->speed;
-
- if(map->map>=MAX_WIN){
- debug("map (%d) out of range\n", map->map);
- return -1;
- }
-
- if (map->flags & MAP_ATTRIB) {
- skt->spd_attr[map->map] = speed;
- skt->spd_mem[map->map] = 0;
- } else {
- skt->spd_attr[map->map] = 0;
- skt->spd_mem[map->map] = speed;
- }
-
- if (map->flags & MAP_ATTRIB) {
- map->static_start = skt->phys_attr + map->card_start;
- }
- else {
- map->static_start = skt->phys_mem + map->card_start;
- }
-
- debug("set_mem_map %d start %08lx card_start %08x\n",
- map->map, map->static_start, map->card_start);
- return 0;
-
-} /* au1x00_pcmcia_set_mem_map() */
-
-static struct pccard_operations au1x00_pcmcia_operations = {
- .init = au1x00_pcmcia_sock_init,
- .suspend = au1x00_pcmcia_suspend,
- .get_status = au1x00_pcmcia_get_status,
- .set_socket = au1x00_pcmcia_set_socket,
- .set_io_map = au1x00_pcmcia_set_io_map,
- .set_mem_map = au1x00_pcmcia_set_mem_map,
-};
-
-static const char *skt_names[] = {
- "PCMCIA socket 0",
- "PCMCIA socket 1",
-};
-
-struct skt_dev_info {
- int nskt;
-};
-
-int au1x00_pcmcia_socket_probe(struct device *dev, struct pcmcia_low_level *ops, int first, int nr)
-{
- struct skt_dev_info *sinfo;
- struct au1000_pcmcia_socket *skt;
- int ret, i;
-
- sinfo = kzalloc(sizeof(struct skt_dev_info), GFP_KERNEL);
- if (!sinfo) {
- ret = -ENOMEM;
- goto out;
- }
-
- sinfo->nskt = nr;
-
- /*
- * Initialise the per-socket structure.
- */
- for (i = 0; i < nr; i++) {
- skt = PCMCIA_SOCKET(i);
- memset(skt, 0, sizeof(*skt));
-
- skt->socket.resource_ops = &pccard_static_ops;
- skt->socket.ops = &au1x00_pcmcia_operations;
- skt->socket.owner = ops->owner;
- skt->socket.dev.parent = dev;
-
- init_timer(&skt->poll_timer);
- skt->poll_timer.function = au1x00_pcmcia_poll_event;
- skt->poll_timer.data = (unsigned long)skt;
- skt->poll_timer.expires = jiffies + AU1000_PCMCIA_POLL_PERIOD;
-
- skt->nr = first + i;
- skt->irq = 255;
- skt->dev = dev;
- skt->ops = ops;
-
- skt->res_skt.name = skt_names[skt->nr];
- skt->res_io.name = "io";
- skt->res_io.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
- skt->res_mem.name = "memory";
- skt->res_mem.flags = IORESOURCE_MEM;
- skt->res_attr.name = "attribute";
- skt->res_attr.flags = IORESOURCE_MEM;
-
- /*
- * PCMCIA client drivers use the inb/outb macros to access the
- * IO registers. Since mips_io_port_base is added to the
- * access address of the mips implementation of inb/outb,
- * we need to subtract it here because we want to access the
- * I/O or MEM address directly, without going through this
- * "mips_io_port_base" mechanism.
- */
- if (i == 0) {
- skt->virt_io = (void *)
- (ioremap((phys_t)AU1X_SOCK0_IO, 0x1000) -
- (u32)mips_io_port_base);
- skt->phys_attr = AU1X_SOCK0_PHYS_ATTR;
- skt->phys_mem = AU1X_SOCK0_PHYS_MEM;
- }
- else {
- skt->virt_io = (void *)
- (ioremap((phys_t)AU1X_SOCK1_IO, 0x1000) -
- (u32)mips_io_port_base);
- skt->phys_attr = AU1X_SOCK1_PHYS_ATTR;
- skt->phys_mem = AU1X_SOCK1_PHYS_MEM;
- }
- pcmcia_base_vaddrs[i] = (u32 *)skt->virt_io;
- ret = ops->hw_init(skt);
-
- skt->socket.features = SS_CAP_STATIC_MAP|SS_CAP_PCCARD;
- skt->socket.irq_mask = 0;
- skt->socket.map_size = MAP_SIZE;
- skt->socket.pci_irq = skt->irq;
- skt->socket.io_offset = (unsigned long)skt->virt_io;
-
- skt->status = au1x00_pcmcia_skt_state(skt);
-
- ret = pcmcia_register_socket(&skt->socket);
- if (ret)
- goto out_err;
-
- WARN_ON(skt->socket.sock != i);
-
- add_timer(&skt->poll_timer);
- }
-
- dev_set_drvdata(dev, sinfo);
- return 0;
-
-
-out_err:
- ops->hw_shutdown(skt);
- while (i-- > 0) {
- skt = PCMCIA_SOCKET(i);
-
- del_timer_sync(&skt->poll_timer);
- pcmcia_unregister_socket(&skt->socket);
- if (i == 0) {
- iounmap(skt->virt_io + (u32)mips_io_port_base);
- skt->virt_io = NULL;
- }
-#ifndef CONFIG_MIPS_XXS1500
- else {
- iounmap(skt->virt_io + (u32)mips_io_port_base);
- skt->virt_io = NULL;
- }
-#endif
- ops->hw_shutdown(skt);
-
- }
- kfree(sinfo);
-out:
- return ret;
-}
-
-int au1x00_drv_pcmcia_remove(struct platform_device *dev)
-{
- struct skt_dev_info *sinfo = platform_get_drvdata(dev);
- int i;
-
- mutex_lock(&pcmcia_sockets_lock);
- platform_set_drvdata(dev, NULL);
-
- for (i = 0; i < sinfo->nskt; i++) {
- struct au1000_pcmcia_socket *skt = PCMCIA_SOCKET(i);
-
- del_timer_sync(&skt->poll_timer);
- pcmcia_unregister_socket(&skt->socket);
- skt->ops->hw_shutdown(skt);
- au1x00_pcmcia_config_skt(skt, &dead_socket);
- iounmap(skt->virt_io + (u32)mips_io_port_base);
- skt->virt_io = NULL;
- }
-
- kfree(sinfo);
- mutex_unlock(&pcmcia_sockets_lock);
- return 0;
-}
-
-
-/*
- * PCMCIA "Driver" API
- */
-
-static int au1x00_drv_pcmcia_probe(struct platform_device *dev)
-{
- int i, ret = -ENODEV;
-
- mutex_lock(&pcmcia_sockets_lock);
- for (i=0; i < ARRAY_SIZE(au1x00_pcmcia_hw_init); i++) {
- ret = au1x00_pcmcia_hw_init[i](&dev->dev);
- if (ret == 0)
- break;
- }
- mutex_unlock(&pcmcia_sockets_lock);
- return ret;
-}
-
-static struct platform_driver au1x00_pcmcia_driver = {
- .driver = {
- .name = "au1x00-pcmcia",
- .owner = THIS_MODULE,
- },
- .probe = au1x00_drv_pcmcia_probe,
- .remove = au1x00_drv_pcmcia_remove,
-};
-
-
-/* au1x00_pcmcia_init()
- *
- * This routine performs low-level PCMCIA initialization and then
- * registers this socket driver with Card Services.
- *
- * Returns: 0 on success, -ve error code on failure
- */
-static int __init au1x00_pcmcia_init(void)
-{
- int error = 0;
- error = platform_driver_register(&au1x00_pcmcia_driver);
- return error;
-}
-
-/* au1x00_pcmcia_exit()
- * Invokes the low-level kernel service to free IRQs associated with this
- * socket controller and reset GPIO edge detection.
- */
-static void __exit au1x00_pcmcia_exit(void)
-{
- platform_driver_unregister(&au1x00_pcmcia_driver);
-}
-
-module_init(au1x00_pcmcia_init);
-module_exit(au1x00_pcmcia_exit);
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
deleted file mode 100644
index 5c36bda..0000000
--- a/drivers/pcmcia/au1000_generic.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*
- * Alchemy Semi Au1000 pcmcia driver include file
- *
- * Copyright 2001 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#ifndef __ASM_AU1000_PCMCIA_H
-#define __ASM_AU1000_PCMCIA_H
-
-/* include the world */
-
-#include <pcmcia/ss.h>
-#include <pcmcia/cistpl.h>
-#include "cs_internal.h"
-
-#define AU1000_PCMCIA_POLL_PERIOD (2*HZ)
-#define AU1000_PCMCIA_IO_SPEED (255)
-#define AU1000_PCMCIA_MEM_SPEED (300)
-
-#define AU1X_SOCK0_IO 0xF00000000ULL
-#define AU1X_SOCK0_PHYS_ATTR 0xF40000000ULL
-#define AU1X_SOCK0_PHYS_MEM 0xF80000000ULL
-
-/* pcmcia socket 1 needs external glue logic so the memory map
- * differs from board to board.
- */
-#if defined(CONFIG_MIPS_PB1000)
-#define AU1X_SOCK1_IO 0xF08000000ULL
-#define AU1X_SOCK1_PHYS_ATTR 0xF48000000ULL
-#define AU1X_SOCK1_PHYS_MEM 0xF88000000ULL
-#endif
-
-struct pcmcia_state {
- unsigned detect: 1,
- ready: 1,
- wrprot: 1,
- bvd1: 1,
- bvd2: 1,
- vs_3v: 1,
- vs_Xv: 1;
-};
-
-struct pcmcia_configure {
- unsigned sock: 8,
- vcc: 8,
- vpp: 8,
- output: 1,
- speaker: 1,
- reset: 1;
-};
-
-struct pcmcia_irqs {
- int sock;
- int irq;
- const char *str;
-};
-
-
-struct au1000_pcmcia_socket {
- struct pcmcia_socket socket;
-
- /*
- * Info from low level handler
- */
- struct device *dev;
- unsigned int nr;
- unsigned int irq;
-
- /*
- * Core PCMCIA state
- */
- struct pcmcia_low_level *ops;
-
- unsigned int status;
- socket_state_t cs_state;
-
- unsigned short spd_io[MAX_IO_WIN];
- unsigned short spd_mem[MAX_WIN];
- unsigned short spd_attr[MAX_WIN];
-
- struct resource res_skt;
- struct resource res_io;
- struct resource res_mem;
- struct resource res_attr;
-
- void * virt_io;
- unsigned int phys_io;
- unsigned int phys_attr;
- unsigned int phys_mem;
- unsigned short speed_io, speed_attr, speed_mem;
-
- unsigned int irq_state;
-
- struct timer_list poll_timer;
-};
-
-struct pcmcia_low_level {
- struct module *owner;
-
- int (*hw_init)(struct au1000_pcmcia_socket *);
- void (*hw_shutdown)(struct au1000_pcmcia_socket *);
-
- void (*socket_state)(struct au1000_pcmcia_socket *, struct pcmcia_state *);
- int (*configure_socket)(struct au1000_pcmcia_socket *, struct socket_state_t *);
-
- /*
- * Enable card status IRQs on (re-)initialisation. This can
- * be called at initialisation, power management event, or
- * pcmcia event.
- */
- void (*socket_init)(struct au1000_pcmcia_socket *);
-
- /*
- * Disable card status IRQs and PCMCIA bus on suspend.
- */
- void (*socket_suspend)(struct au1000_pcmcia_socket *);
-};
-
-extern int au1x_board_init(struct device *dev);
-
-#endif /* __ASM_AU1000_PCMCIA_H */
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
deleted file mode 100644
index b239664..0000000
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ /dev/null
@@ -1,294 +0,0 @@
-/*
- *
- * Alchemy Semi Pb1000 boards specific pcmcia routines.
- *
- * Copyright 2002 MontaVista Software Inc.
- * Author: MontaVista Software, Inc.
- * ppopov@mvista.com or source@mvista.com
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/ioport.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/proc_fs.h>
-#include <linux/types.h>
-
-#include <pcmcia/ss.h>
-#include <pcmcia/cistpl.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/system.h>
-
-#include <asm/au1000.h>
-#include <asm/au1000_pcmcia.h>
-
-#define debug(fmt, arg...) do { } while (0)
-
-#include <asm/pb1000.h>
-#define PCMCIA_IRQ AU1000_GPIO_15
-
-static int pb1x00_pcmcia_init(struct pcmcia_init *init)
-{
- u16 pcr;
- pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
-
- au_writel(0x8000, PB1000_MDR); /* clear pcmcia interrupt */
- au_sync_delay(100);
- au_writel(0x4000, PB1000_MDR); /* enable pcmcia interrupt */
- au_sync();
-
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0);
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,1);
- au_writel(pcr, PB1000_PCR);
- au_sync_delay(20);
-
- return PCMCIA_NUM_SOCKS;
-}
-
-static int pb1x00_pcmcia_shutdown(void)
-{
- u16 pcr;
- pcr = PCR_SLOT_0_RST | PCR_SLOT_1_RST;
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,0);
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,1);
- au_writel(pcr, PB1000_PCR);
- au_sync_delay(20);
- return 0;
-}
-
-static int
-pb1x00_pcmcia_socket_state(unsigned sock, struct pcmcia_state *state)
-{
- u32 inserted0, inserted1;
- u16 vs0, vs1;
-
- vs0 = vs1 = (u16)au_readl(PB1000_ACR1);
- inserted0 = !(vs0 & (ACR1_SLOT_0_CD1 | ACR1_SLOT_0_CD2));
- inserted1 = !(vs1 & (ACR1_SLOT_1_CD1 | ACR1_SLOT_1_CD2));
- vs0 = (vs0 >> 4) & 0x3;
- vs1 = (vs1 >> 12) & 0x3;
-
- state->ready = 0;
- state->vs_Xv = 0;
- state->vs_3v = 0;
- state->detect = 0;
-
- if (sock == 0) {
- if (inserted0) {
- switch (vs0) {
- case 0:
- case 2:
- state->vs_3v=1;
- break;
- case 3: /* 5V */
- break;
- default:
- /* return without setting 'detect' */
- printk(KERN_ERR "pb1x00 bad VS (%d)\n",
- vs0);
- return 0;
- }
- state->detect = 1;
- }
- }
- else {
- if (inserted1) {
- switch (vs1) {
- case 0:
- case 2:
- state->vs_3v=1;
- break;
- case 3: /* 5V */
- break;
- default:
- /* return without setting 'detect' */
- printk(KERN_ERR "pb1x00 bad VS (%d)\n",
- vs1);
- return 0;
- }
- state->detect = 1;
- }
- }
-
- if (state->detect) {
- state->ready = 1;
- }
-
- state->bvd1=1;
- state->bvd2=1;
- state->wrprot=0;
- return 1;
-}
-
-
-static int pb1x00_pcmcia_get_irq_info(struct pcmcia_irq_info *info)
-{
-
- if(info->sock > PCMCIA_MAX_SOCK) return -1;
-
- /*
- * Even in the case of the Pb1000, both sockets are connected
- * to the same irq line.
- */
- info->irq = PCMCIA_IRQ;
-
- return 0;
-}
-
-
-static int
-pb1x00_pcmcia_configure_socket(const struct pcmcia_configure *configure)
-{
- u16 pcr;
-
- if(configure->sock > PCMCIA_MAX_SOCK) return -1;
-
- pcr = au_readl(PB1000_PCR);
-
- if (configure->sock == 0) {
- pcr &= ~(PCR_SLOT_0_VCC0 | PCR_SLOT_0_VCC1 |
- PCR_SLOT_0_VPP0 | PCR_SLOT_0_VPP1);
- }
- else {
- pcr &= ~(PCR_SLOT_1_VCC0 | PCR_SLOT_1_VCC1 |
- PCR_SLOT_1_VPP0 | PCR_SLOT_1_VPP1);
- }
-
- pcr &= ~PCR_SLOT_0_RST;
- debug("Vcc %dV Vpp %dV, pcr %x\n",
- configure->vcc, configure->vpp, pcr);
- switch(configure->vcc){
- case 0: /* Vcc 0 */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_GND,
- configure->sock);
- break;
- case 12:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_12V,
- configure->sock);
- break;
- case 50:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_5V,
- configure->sock);
- break;
- case 33:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_3V,
- configure->sock);
- break;
- default:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
- configure->sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- case 50: /* Vcc 5V */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_GND,
- configure->sock);
- break;
- case 50:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_5V,
- configure->sock);
- break;
- case 12:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_12V,
- configure->sock);
- break;
- case 33:
- pcr |= SET_VCC_VPP(VCC_5V,VPP_3V,
- configure->sock);
- break;
- default:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
- configure->sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- case 33: /* Vcc 3.3V */
- switch(configure->vpp) {
- case 0:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_GND,
- configure->sock);
- break;
- case 50:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_5V,
- configure->sock);
- break;
- case 12:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_12V,
- configure->sock);
- break;
- case 33:
- pcr |= SET_VCC_VPP(VCC_3V,VPP_3V,
- configure->sock);
- break;
- default:
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,
- configure->sock);
- printk("%s: bad Vcc/Vpp (%d:%d)\n",
- __func__,
- configure->vcc,
- configure->vpp);
- break;
- }
- break;
- default: /* what's this ? */
- pcr |= SET_VCC_VPP(VCC_HIZ,VPP_HIZ,configure->sock);
- printk(KERN_ERR "%s: bad Vcc %d\n",
- __func__, configure->vcc);
- break;
- }
-
- if (configure->sock == 0) {
- pcr &= ~(PCR_SLOT_0_RST);
- if (configure->reset)
- pcr |= PCR_SLOT_0_RST;
- }
- else {
- pcr &= ~(PCR_SLOT_1_RST);
- if (configure->reset)
- pcr |= PCR_SLOT_1_RST;
- }
- au_writel(pcr, PB1000_PCR);
- au_sync_delay(300);
-
- return 0;
-}
-
-
-struct pcmcia_low_level pb1x00_pcmcia_ops = {
- pb1x00_pcmcia_init,
- pb1x00_pcmcia_shutdown,
- pb1x00_pcmcia_socket_state,
- pb1x00_pcmcia_get_irq_info,
- pb1x00_pcmcia_configure_socket
-};
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 3e49df6..5b7c227 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -7,7 +7,7 @@
/* This is a fairly generic PCMCIA socket driver suitable for the
* following Alchemy Development boards:
- * Db1000, Db/Pb1500, Db/Pb1100, Db/Pb1550, Db/Pb1200.
+ * Db1000, Db/Pb1500, Db/Pb1100, Db/Pb1550, Db/Pb1200, Db1300
*
* The Db1000 is used as a reference: Per-socket card-, carddetect- and
* statuschange IRQs connected to SoC GPIOs, control and status register
@@ -18,6 +18,7 @@
* - Pb1100/Pb1500: single socket only; voltage key bits VS are
* at STATUS[5:4] (instead of STATUS[1:0]).
* - Au1200-based: additional card-eject irqs, irqs not gpios!
+ * - Db1300: Db1200-like, no pwr ctrl, single socket (#1).
*/
#include <linux/delay.h>
@@ -59,11 +60,17 @@ struct db1x_pcmcia_sock {
#define BOARD_TYPE_DEFAULT 0 /* most boards */
#define BOARD_TYPE_DB1200 1 /* IRQs aren't gpios */
#define BOARD_TYPE_PB1100 2 /* VS bits slightly different */
+#define BOARD_TYPE_DB1300 3 /* no power control */
int board_type;
};
#define to_db1x_socket(x) container_of(x, struct db1x_pcmcia_sock, socket)
+static int db1300_card_inserted(struct db1x_pcmcia_sock *sock)
+{
+ return bcsr_read(BCSR_SIGSTAT) & (1 << 8);
+}
+
/* DB/PB1200: check CPLD SIGSTATUS register bit 10/12 */
static int db1200_card_inserted(struct db1x_pcmcia_sock *sock)
{
@@ -84,6 +91,8 @@ static int db1x_card_inserted(struct db1x_pcmcia_sock *sock)
switch (sock->board_type) {
case BOARD_TYPE_DB1200:
return db1200_card_inserted(sock);
+ case BOARD_TYPE_DB1300:
+ return db1300_card_inserted(sock);
default:
return db1000_card_inserted(sock);
}
@@ -160,7 +169,8 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
* ejection handler have been registered and the currently
* active one disabled.
*/
- if (sock->board_type == BOARD_TYPE_DB1200) {
+ if ((sock->board_type == BOARD_TYPE_DB1200) ||
+ (sock->board_type == BOARD_TYPE_DB1300)) {
ret = request_irq(sock->insert_irq, db1200_pcmcia_cdirq,
IRQF_DISABLED, "pcmcia_insert", sock);
if (ret)
@@ -174,7 +184,7 @@ static int db1x_pcmcia_setup_irqs(struct db1x_pcmcia_sock *sock)
}
/* enable the currently silent one */
- if (db1200_card_inserted(sock))
+ if (db1x_card_inserted(sock))
enable_irq(sock->eject_irq);
else
enable_irq(sock->insert_irq);
@@ -270,7 +280,8 @@ static int db1x_pcmcia_configure(struct pcmcia_socket *skt,
}
/* create new voltage code */
- cr_set |= ((v << 2) | p) << (sock->nr * 8);
+ if (sock->board_type != BOARD_TYPE_DB1300)
+ cr_set |= ((v << 2) | p) << (sock->nr * 8);
changed = state->flags ^ sock->old_flags;
@@ -343,6 +354,10 @@ static int db1x_pcmcia_get_status(struct pcmcia_socket *skt,
/* if Vcc is not zero, we have applied power to a card */
status |= GET_VCC(cr, sock->nr) ? SS_POWERON : 0;
+ /* DB1300: power always on, but don't tell when no card present */
+ if ((sock->board_type == BOARD_TYPE_DB1300) && (status & SS_DETECT))
+ status = SS_POWERON | SS_3VCARD | SS_DETECT;
+
/* reset de-asserted? then we're ready */
status |= (GET_RESET(cr, sock->nr)) ? SS_READY : SS_RESET;
@@ -419,6 +434,9 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
case BCSR_WHOAMI_PB1200 ... BCSR_WHOAMI_DB1200:
sock->board_type = BOARD_TYPE_DB1200;
break;
+ case BCSR_WHOAMI_DB1300:
+ sock->board_type = BOARD_TYPE_DB1300;
+ break;
default:
printk(KERN_INFO "db1xxx-ss: unknown board %d!\n", bid);
ret = -ENODEV;
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 749c2a1..1932029 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1269,10 +1269,8 @@ static int pcmcia_bus_add(struct pcmcia_socket *skt)
static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
{
- if (!verify_cis_cache(skt)) {
- pcmcia_put_socket(skt);
+ if (!verify_cis_cache(skt))
return 0;
- }
dev_dbg(&skt->dev, "cis mismatch - different card\n");
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index a87e272..64d433e 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -328,21 +328,15 @@ static int pxa2xx_drv_pcmcia_probe(struct platform_device *dev)
goto err1;
}
- if (ret) {
- while (--i >= 0)
- soc_pcmcia_remove_one(&sinfo->skt[i]);
- kfree(sinfo);
- clk_put(clk);
- } else {
- pxa2xx_configure_sockets(&dev->dev);
- dev_set_drvdata(&dev->dev, sinfo);
- }
+ pxa2xx_configure_sockets(&dev->dev);
+ dev_set_drvdata(&dev->dev, sinfo);
return 0;
err1:
while (--i >= 0)
soc_pcmcia_remove_one(&sinfo->skt[i]);
+ clk_put(clk);
kfree(sinfo);
err0:
return ret;
diff --git a/drivers/pcmcia/pxa2xx_cm_x255.c b/drivers/pcmcia/pxa2xx_cm_x255.c
index 0b4f946..31ab6ddf 100644
--- a/drivers/pcmcia/pxa2xx_cm_x255.c
+++ b/drivers/pcmcia/pxa2xx_cm_x255.c
@@ -16,8 +16,6 @@
#include <linux/gpio.h>
#include <linux/export.h>
-#include <asm/mach-types.h>
-
#include "soc_common.h"
#define GPIO_PCMCIA_SKTSEL (54)
@@ -27,15 +25,15 @@
#define GPIO_PCMCIA_S1_RDYINT (8)
#define GPIO_PCMCIA_RESET (9)
-#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
-#define PCMCIA_S1_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S1_CD_VALID)
-#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
-#define PCMCIA_S1_RDYINT IRQ_GPIO(GPIO_PCMCIA_S1_RDYINT)
+#define PCMCIA_S0_CD_VALID gpio_to_irq(GPIO_PCMCIA_S0_CD_VALID)
+#define PCMCIA_S1_CD_VALID gpio_to_irq(GPIO_PCMCIA_S1_CD_VALID)
+#define PCMCIA_S0_RDYINT gpio_to_irq(GPIO_PCMCIA_S0_RDYINT)
+#define PCMCIA_S1_RDYINT gpio_to_irq(GPIO_PCMCIA_S1_RDYINT)
static struct pcmcia_irqs irqs[] = {
- { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
- { 1, PCMCIA_S1_CD_VALID, "PCMCIA1 CD" },
+ { .sock = 0, .str = "PCMCIA0 CD" },
+ { .sock = 1, .str = "PCMCIA1 CD" },
};
static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -46,6 +44,8 @@ static int cmx255_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->socket.pci_irq = skt->nr == 0 ? PCMCIA_S0_RDYINT : PCMCIA_S1_RDYINT;
+ irqs[0].irq = PCMCIA_S0_CD_VALID;
+ irqs[1].irq = PCMCIA_S1_CD_VALID;
ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
if (!ret)
gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_cm_x270.c b/drivers/pcmcia/pxa2xx_cm_x270.c
index 923f315..3dc7621 100644
--- a/drivers/pcmcia/pxa2xx_cm_x270.c
+++ b/drivers/pcmcia/pxa2xx_cm_x270.c
@@ -16,20 +16,18 @@
#include <linux/gpio.h>
#include <linux/export.h>
-#include <asm/mach-types.h>
-
#include "soc_common.h"
#define GPIO_PCMCIA_S0_CD_VALID (84)
#define GPIO_PCMCIA_S0_RDYINT (82)
#define GPIO_PCMCIA_RESET (53)
-#define PCMCIA_S0_CD_VALID IRQ_GPIO(GPIO_PCMCIA_S0_CD_VALID)
-#define PCMCIA_S0_RDYINT IRQ_GPIO(GPIO_PCMCIA_S0_RDYINT)
+#define PCMCIA_S0_CD_VALID gpio_to_irq(GPIO_PCMCIA_S0_CD_VALID)
+#define PCMCIA_S0_RDYINT gpio_to_irq(GPIO_PCMCIA_S0_RDYINT)
static struct pcmcia_irqs irqs[] = {
- { 0, PCMCIA_S0_CD_VALID, "PCMCIA0 CD" },
+ { .sock = 0, .str = "PCMCIA0 CD" },
};
static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
@@ -40,6 +38,7 @@ static int cmx270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_direction_output(GPIO_PCMCIA_RESET, 0);
skt->socket.pci_irq = PCMCIA_S0_RDYINT;
+ irqs[0].irq = PCMCIA_S0_CD_VALID;
ret = soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
if (!ret)
gpio_free(GPIO_PCMCIA_RESET);
diff --git a/drivers/pcmcia/pxa2xx_e740.c b/drivers/pcmcia/pxa2xx_e740.c
index 8bfbd4d..17cd2ce 100644
--- a/drivers/pcmcia/pxa2xx_e740.c
+++ b/drivers/pcmcia/pxa2xx_e740.c
@@ -26,20 +26,23 @@
static struct pcmcia_irqs cd_irqs[] = {
{
.sock = 0,
- .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD0),
.str = "CF card detect"
},
{
.sock = 1,
- .irq = IRQ_GPIO(GPIO_E740_PCMCIA_CD1),
.str = "Wifi switch"
},
};
static int e740_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->socket.pci_irq = skt->nr == 0 ? IRQ_GPIO(GPIO_E740_PCMCIA_RDY0) :
- IRQ_GPIO(GPIO_E740_PCMCIA_RDY1);
+ if (skt->nr == 0)
+ skt->socket.pci_irq = gpio_to_irq(GPIO_E740_PCMCIA_RDY0);
+ else
+ skt->socket.pci_irq = gpio_to_irq(GPIO_E740_PCMCIA_RDY1);
+
+ cd_irqs[0].irq = gpio_to_irq(GPIO_E740_PCMCIA_CD0);
+ cd_irqs[1].irq = gpio_to_irq(GPIO_E740_PCMCIA_CD1);
return soc_pcmcia_request_irqs(skt, &cd_irqs[skt->nr], 1);
}
diff --git a/drivers/pcmcia/pxa2xx_palmld.c b/drivers/pcmcia/pxa2xx_palmld.c
index d589ad1..6a8e011 100644
--- a/drivers/pcmcia/pxa2xx_palmld.c
+++ b/drivers/pcmcia/pxa2xx_palmld.c
@@ -33,7 +33,7 @@ static int palmld_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ret = gpio_request_array(palmld_pcmcia_gpios,
ARRAY_SIZE(palmld_pcmcia_gpios));
- skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMLD_PCMCIA_READY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMLD_PCMCIA_READY);
return ret;
}
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
index 9c6a04b..9e38de7 100644
--- a/drivers/pcmcia/pxa2xx_palmtc.c
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -37,7 +37,7 @@ static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ret = gpio_request_array(palmtc_pcmcia_gpios,
ARRAY_SIZE(palmtc_pcmcia_gpios));
- skt->socket.pci_irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_NR_PALMTC_PCMCIA_READY);
return ret;
}
diff --git a/drivers/pcmcia/pxa2xx_stargate2.c b/drivers/pcmcia/pxa2xx_stargate2.c
index 9396222..6c2366b 100644
--- a/drivers/pcmcia/pxa2xx_stargate2.c
+++ b/drivers/pcmcia/pxa2xx_stargate2.c
@@ -34,7 +34,7 @@
#define SG2_S0_GPIO_READY 81
static struct pcmcia_irqs irqs[] = {
- { 0, IRQ_GPIO(SG2_S0_GPIO_DETECT), "PCMCIA0 CD" },
+ {.sock = 0, .str = "PCMCIA0 CD" },
};
static struct gpio sg2_pcmcia_gpios[] = {
@@ -44,7 +44,9 @@ static struct gpio sg2_pcmcia_gpios[] = {
static int sg2_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
{
- skt->socket.pci_irq = IRQ_GPIO(SG2_S0_GPIO_READY);
+ skt->socket.pci_irq = gpio_to_irq(SG2_S0_GPIO_READY);
+ irqs[0].irq = gpio_to_irq(SG2_S0_GPIO_DETECT);
+
return soc_pcmcia_request_irqs(skt, irqs, ARRAY_SIZE(irqs));
}
diff --git a/drivers/pcmcia/pxa2xx_trizeps4.c b/drivers/pcmcia/pxa2xx_trizeps4.c
index 57ddb96..7c33f89 100644
--- a/drivers/pcmcia/pxa2xx_trizeps4.c
+++ b/drivers/pcmcia/pxa2xx_trizeps4.c
@@ -30,7 +30,7 @@
extern void board_pcmcia_power(int power);
static struct pcmcia_irqs irqs[] = {
- { 0, IRQ_GPIO(GPIO_PCD), "cs0_cd" }
+ { .sock = 0, .str = "cs0_cd" }
/* on other baseboards we can have more inputs */
};
@@ -53,7 +53,8 @@ static int trizeps_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
gpio_free(GPIO_PRDY);
return -EINVAL;
}
- skt->socket.pci_irq = IRQ_GPIO(GPIO_PRDY);
+ skt->socket.pci_irq = gpio_to_irq(GPIO_PRDY);
+ irqs[0].irq = gpio_to_irq(GPIO_PCD);
break;
default:
break;
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 66ab92c..61b17d2 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -38,12 +38,10 @@ static struct gpio vpac270_cf_gpios[] = {
static struct pcmcia_irqs cd_irqs[] = {
{
.sock = 0,
- .irq = IRQ_GPIO(GPIO84_VPAC270_PCMCIA_CD),
.str = "PCMCIA CD"
},
{
.sock = 1,
- .irq = IRQ_GPIO(GPIO17_VPAC270_CF_CD),
.str = "CF CD"
},
};
@@ -57,6 +55,7 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ARRAY_SIZE(vpac270_pcmcia_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO35_VPAC270_PCMCIA_RDY);
+ cd_irqs[0].irq = gpio_to_irq(GPIO84_VPAC270_PCMCIA_CD);
if (!ret)
ret = soc_pcmcia_request_irqs(skt, &cd_irqs[0], 1);
@@ -65,6 +64,7 @@ static int vpac270_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
ARRAY_SIZE(vpac270_cf_gpios));
skt->socket.pci_irq = gpio_to_irq(GPIO12_VPAC270_CF_RDY);
+ cd_irqs[1].irq = gpio_to_irq(GPIO17_VPAC270_CF_CD);
if (!ret)
ret = soc_pcmcia_request_irqs(skt, &cd_irqs[1], 1);
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 5986690..27f2fe3 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -205,7 +205,8 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
dev_set_drvdata(&dev->dev, NULL);
- for (; next = s->next, s; s = next) {
+ for (; s; s = next) {
+ next = s->next;
soc_pcmcia_remove_one(&s->soc);
kfree(s);
}
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 9dc565c..849c0c1 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -24,15 +24,15 @@
#include "yenta_socket.h"
#include "i82365.h"
-static int disable_clkrun;
+static bool disable_clkrun;
module_param(disable_clkrun, bool, 0444);
MODULE_PARM_DESC(disable_clkrun, "If PC card doesn't function properly, please try this option");
-static int isa_probe = 1;
+static bool isa_probe = 1;
module_param(isa_probe, bool, 0444);
MODULE_PARM_DESC(isa_probe, "If set ISA interrupts are probed (default). Set to N to disable probing");
-static int pwr_irqs_off;
+static bool pwr_irqs_off;
module_param(pwr_irqs_off, bool, 0644);
MODULE_PARM_DESC(pwr_irqs_off, "Force IRQs off during power-on of slot. Use only when seeing IRQ storms!");
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index e17e2f8..afaf885 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -12,7 +12,10 @@ menu "Pin controllers"
depends on PINCTRL
config PINMUX
- bool "Support pinmux controllers"
+ bool "Support pin multiplexing controllers"
+
+config PINCONF
+ bool "Support pin configuration controllers"
config DEBUG_PINCTRL
bool "Debug PINCTRL calls"
@@ -20,16 +23,25 @@ config DEBUG_PINCTRL
help
Say Y here to add some extra checks and diagnostics to PINCTRL calls.
-config PINMUX_SIRF
- bool "CSR SiRFprimaII pinmux driver"
+config PINCTRL_SIRF
+ bool "CSR SiRFprimaII pin controller driver"
depends on ARCH_PRIMA2
select PINMUX
-config PINMUX_U300
- bool "U300 pinmux driver"
+config PINCTRL_U300
+ bool "U300 pin controller driver"
depends on ARCH_U300
select PINMUX
+config PINCTRL_COH901
+ bool "ST-Ericsson U300 COH 901 335/571 GPIO"
+ depends on GPIOLIB && ARCH_U300 && PINMUX_U300
+ help
+ Say yes here to support GPIO interface on ST-Ericsson U300.
+ The names of the two IP block variants supported are
+ COH 901 335 and COH 901 571/3. They contain 3, 5 or 7
+ ports of 8 GPIO pins each.
+
endmenu
endif
diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile
index bdc548a..827601c 100644
--- a/drivers/pinctrl/Makefile
+++ b/drivers/pinctrl/Makefile
@@ -1,8 +1,10 @@
# generic pinmux support
-ccflags-$(CONFIG_DEBUG_PINMUX) += -DDEBUG
+ccflags-$(CONFIG_DEBUG_PINCTRL) += -DDEBUG
obj-$(CONFIG_PINCTRL) += core.o
obj-$(CONFIG_PINMUX) += pinmux.o
-obj-$(CONFIG_PINMUX_SIRF) += pinmux-sirf.o
-obj-$(CONFIG_PINMUX_U300) += pinmux-u300.o
+obj-$(CONFIG_PINCONF) += pinconf.o
+obj-$(CONFIG_PINCTRL_SIRF) += pinctrl-sirf.o
+obj-$(CONFIG_PINCTRL_U300) += pinctrl-u300.o
+obj-$(CONFIG_PINCTRL_COH901) += pinctrl-coh901.o
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index eadef9e..894cd5e 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -28,17 +28,12 @@
#include <linux/pinctrl/machine.h>
#include "core.h"
#include "pinmux.h"
+#include "pinconf.h"
/* Global list of pin control devices */
static DEFINE_MUTEX(pinctrldev_list_mutex);
static LIST_HEAD(pinctrldev_list);
-static void pinctrl_dev_release(struct device *dev)
-{
- struct pinctrl_dev *pctldev = dev_get_drvdata(dev);
- kfree(pctldev);
-}
-
const char *pinctrl_dev_get_name(struct pinctrl_dev *pctldev)
{
/* We're not allowed to register devices without name */
@@ -70,14 +65,14 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node) {
- if (dev && &pctldev->dev == dev) {
+ if (dev && pctldev->dev == dev) {
/* Matched on device pointer */
found = true;
break;
}
if (devname &&
- !strcmp(dev_name(&pctldev->dev), devname)) {
+ !strcmp(dev_name(pctldev->dev), devname)) {
/* Matched on device name */
found = true;
break;
@@ -88,7 +83,7 @@ struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
return found ? pctldev : NULL;
}
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin)
{
struct pin_desc *pindesc;
unsigned long flags;
@@ -101,6 +96,31 @@ struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin)
}
/**
+ * pin_get_from_name() - look up a pin number from a name
+ * @pctldev: the pin control device to lookup the pin on
+ * @name: the name of the pin to look up
+ */
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name)
+{
+ unsigned i, pin;
+
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
+ struct pin_desc *desc;
+
+ pin = pctldev->desc->pins[i].number;
+ desc = pin_desc_get(pctldev, pin);
+ /* Pin space may be sparse */
+ if (desc == NULL)
+ continue;
+ if (desc->name && !strcmp(name, desc->name))
+ return pin;
+ }
+
+ return -EINVAL;
+}
+
+/**
* pin_is_valid() - check if pin exists on controller
* @pctldev: the pin control device to check the pin on
* @pin: pin to check, use the local pin controller index number
@@ -139,6 +159,8 @@ static void pinctrl_free_pindescs(struct pinctrl_dev *pctldev,
if (pindesc != NULL) {
radix_tree_delete(&pctldev->pin_desc_tree,
pins[i].number);
+ if (pindesc->dynamic_name)
+ kfree(pindesc->name);
}
kfree(pindesc);
}
@@ -160,19 +182,27 @@ static int pinctrl_register_one_pin(struct pinctrl_dev *pctldev,
pindesc = kzalloc(sizeof(*pindesc), GFP_KERNEL);
if (pindesc == NULL)
return -ENOMEM;
+
spin_lock_init(&pindesc->lock);
/* Set owner */
pindesc->pctldev = pctldev;
/* Copy basic pin info */
- pindesc->name = name;
+ if (name) {
+ pindesc->name = name;
+ } else {
+ pindesc->name = kasprintf(GFP_KERNEL, "PIN%u", number);
+ if (pindesc->name == NULL)
+ return -ENOMEM;
+ pindesc->dynamic_name = true;
+ }
spin_lock(&pctldev->pin_desc_tree_lock);
radix_tree_insert(&pctldev->pin_desc_tree, number, pindesc);
spin_unlock(&pctldev->pin_desc_tree_lock);
pr_debug("registered pin %d (%s) on %s\n",
- number, name ? name : "(unnamed)", pctldev->desc->name);
+ number, pindesc->name, pctldev->desc->name);
return 0;
}
@@ -284,21 +314,52 @@ void pinctrl_remove_gpio_range(struct pinctrl_dev *pctldev,
mutex_unlock(&pctldev->gpio_ranges_lock);
}
+/**
+ * pinctrl_get_group_selector() - returns the group selector for a group
+ * @pctldev: the pin controller handling the group
+ * @pin_group: the pin group to look up
+ */
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+ const char *pin_group)
+{
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ unsigned group_selector = 0;
+
+ while (pctlops->list_groups(pctldev, group_selector) >= 0) {
+ const char *gname = pctlops->get_group_name(pctldev,
+ group_selector);
+ if (!strcmp(gname, pin_group)) {
+ dev_dbg(pctldev->dev,
+ "found group selector %u for %s\n",
+ group_selector,
+ pin_group);
+ return group_selector;
+ }
+
+ group_selector++;
+ }
+
+ dev_err(pctldev->dev, "does not have pin group %s\n",
+ pin_group);
+
+ return -EINVAL;
+}
+
#ifdef CONFIG_DEBUG_FS
static int pinctrl_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
const struct pinctrl_ops *ops = pctldev->desc->pctlops;
- unsigned pin;
+ unsigned i, pin;
seq_printf(s, "registered pins: %d\n", pctldev->desc->npins);
- seq_printf(s, "max pin number: %d\n", pctldev->desc->maxpin);
- /* The highest pin number need to be included in the loop, thus <= */
- for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
struct pin_desc *desc;
+ pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
/* Pin space may be sparse */
if (desc == NULL)
@@ -363,8 +424,11 @@ static int pinctrl_gpioranges_show(struct seq_file *s, void *what)
/* Loop over the ranges */
mutex_lock(&pctldev->gpio_ranges_lock);
list_for_each_entry(range, &pctldev->gpio_ranges, node) {
- seq_printf(s, "%u: %s [%u - %u]\n", range->id, range->name,
- range->base, (range->base + range->npins - 1));
+ seq_printf(s, "%u: %s GPIOS [%u - %u] PINS [%u - %u]\n",
+ range->id, range->name,
+ range->base, (range->base + range->npins - 1),
+ range->pin_base,
+ (range->pin_base + range->npins - 1));
}
mutex_unlock(&pctldev->gpio_ranges_lock);
@@ -375,11 +439,15 @@ static int pinctrl_devices_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev;
- seq_puts(s, "name [pinmux]\n");
+ seq_puts(s, "name [pinmux] [pinconf]\n");
mutex_lock(&pinctrldev_list_mutex);
list_for_each_entry(pctldev, &pinctrldev_list, node) {
seq_printf(s, "%s ", pctldev->desc->name);
if (pctldev->desc->pmxops)
+ seq_puts(s, "yes ");
+ else
+ seq_puts(s, "no ");
+ if (pctldev->desc->confops)
seq_puts(s, "yes");
else
seq_puts(s, "no");
@@ -442,13 +510,15 @@ static struct dentry *debugfs_root;
static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
{
- static struct dentry *device_root;
+ struct dentry *device_root;
- device_root = debugfs_create_dir(dev_name(&pctldev->dev),
+ device_root = debugfs_create_dir(dev_name(pctldev->dev),
debugfs_root);
+ pctldev->device_root = device_root;
+
if (IS_ERR(device_root) || !device_root) {
pr_warn("failed to create debugfs directory for %s\n",
- dev_name(&pctldev->dev));
+ dev_name(pctldev->dev));
return;
}
debugfs_create_file("pins", S_IFREG | S_IRUGO,
@@ -458,6 +528,12 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
pinmux_init_device_debugfs(device_root, pctldev);
+ pinconf_init_device_debugfs(device_root, pctldev);
+}
+
+static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
+{
+ debugfs_remove_recursive(pctldev->device_root);
}
static void pinctrl_init_debugfs(void)
@@ -484,6 +560,10 @@ static void pinctrl_init_debugfs(void)
{
}
+static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
+{
+}
+
#endif
/**
@@ -495,7 +575,6 @@ static void pinctrl_init_debugfs(void)
struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
struct device *dev, void *driver_data)
{
- static atomic_t pinmux_no = ATOMIC_INIT(0);
struct pinctrl_dev *pctldev;
int ret;
@@ -504,16 +583,6 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
if (pctldesc->name == NULL)
return NULL;
- /* If we're implementing pinmuxing, check the ops for sanity */
- if (pctldesc->pmxops) {
- ret = pinmux_check_ops(pctldesc->pmxops);
- if (ret) {
- pr_err("%s pinmux ops lacks necessary functions\n",
- pctldesc->name);
- return NULL;
- }
- }
-
pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
if (pctldev == NULL)
return NULL;
@@ -526,18 +595,27 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
spin_lock_init(&pctldev->pin_desc_tree_lock);
INIT_LIST_HEAD(&pctldev->gpio_ranges);
mutex_init(&pctldev->gpio_ranges_lock);
+ pctldev->dev = dev;
- /* Register device */
- pctldev->dev.parent = dev;
- dev_set_name(&pctldev->dev, "pinctrl.%d",
- atomic_inc_return(&pinmux_no) - 1);
- pctldev->dev.release = pinctrl_dev_release;
- ret = device_register(&pctldev->dev);
- if (ret != 0) {
- pr_err("error in device registration\n");
- goto out_reg_dev_err;
+ /* If we're implementing pinmuxing, check the ops for sanity */
+ if (pctldesc->pmxops) {
+ ret = pinmux_check_ops(pctldev);
+ if (ret) {
+ pr_err("%s pinmux ops lacks necessary functions\n",
+ pctldesc->name);
+ goto out_err;
+ }
+ }
+
+ /* If we're implementing pinconfig, check the ops for sanity */
+ if (pctldesc->confops) {
+ ret = pinconf_check_ops(pctldev);
+ if (ret) {
+ pr_err("%s pin config ops lacks necessary functions\n",
+ pctldesc->name);
+ goto out_err;
+ }
}
- dev_set_drvdata(&pctldev->dev, pctldev);
/* Register all the pins */
pr_debug("try to register %d pins on %s...\n",
@@ -547,7 +625,7 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pr_err("error during pin registration\n");
pinctrl_free_pindescs(pctldev, pctldesc->pins,
pctldesc->npins);
- goto out_reg_pins_err;
+ goto out_err;
}
pinctrl_init_device_debugfs(pctldev);
@@ -557,10 +635,8 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
pinmux_hog_maps(pctldev);
return pctldev;
-out_reg_pins_err:
- device_del(&pctldev->dev);
-out_reg_dev_err:
- put_device(&pctldev->dev);
+out_err:
+ kfree(pctldev);
return NULL;
}
EXPORT_SYMBOL_GPL(pinctrl_register);
@@ -576,6 +652,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
if (pctldev == NULL)
return;
+ pinctrl_remove_device_debugfs(pctldev);
pinmux_unhog_maps(pctldev);
/* TODO: check that no pinmuxes are still active? */
mutex_lock(&pinctrldev_list_mutex);
@@ -584,7 +661,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
/* Destroy descriptor tree */
pinctrl_free_pindescs(pctldev, pctldev->desc->pins,
pctldev->desc->npins);
- device_unregister(&pctldev->dev);
+ kfree(pctldev);
}
EXPORT_SYMBOL_GPL(pinctrl_unregister);
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 472fa13..cfa86da 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -9,6 +9,10 @@
* License terms: GNU General Public License (GPL) version 2
*/
+#include <linux/pinctrl/pinconf.h>
+
+struct pinctrl_gpio_range;
+
/**
* struct pinctrl_dev - pin control class device
* @node: node to include this pin controller in the global pin controller list
@@ -34,9 +38,12 @@ struct pinctrl_dev {
spinlock_t pin_desc_tree_lock;
struct list_head gpio_ranges;
struct mutex gpio_ranges_lock;
- struct device dev;
+ struct device *dev;
struct module *owner;
void *driver_data;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *device_root;
+#endif
#ifdef CONFIG_PINMUX
struct mutex pinmux_hogs_lock;
struct list_head pinmux_hogs;
@@ -48,6 +55,7 @@ struct pinctrl_dev {
* @pctldev: corresponding pin control device
* @name: a name for the pin, e.g. the name of the pin/pad/finger on a
* datasheet or such
+ * @dynamic_name: if the name of this pin was dynamically allocated
* @lock: a lock to protect the descriptor structure
* @mux_requested: whether the pin is already requested by pinmux or not
* @mux_function: a named muxing function for the pin that will be passed to
@@ -56,6 +64,7 @@ struct pinctrl_dev {
struct pin_desc {
struct pinctrl_dev *pctldev;
const char *name;
+ bool dynamic_name;
spinlock_t lock;
/* These fields only added when supporting pinmux drivers */
#ifdef CONFIG_PINMUX
@@ -65,7 +74,10 @@ struct pin_desc {
struct pinctrl_dev *get_pinctrl_dev_from_dev(struct device *dev,
const char *dev_name);
-struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, int pin);
+struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev, unsigned int pin);
+int pin_get_from_name(struct pinctrl_dev *pctldev, const char *name);
int pinctrl_get_device_gpio_range(unsigned gpio,
struct pinctrl_dev **outdev,
struct pinctrl_gpio_range **outrange);
+int pinctrl_get_group_selector(struct pinctrl_dev *pctldev,
+ const char *pin_group);
diff --git a/drivers/pinctrl/pinconf.c b/drivers/pinctrl/pinconf.c
new file mode 100644
index 0000000..9fb7545
--- /dev/null
+++ b/drivers/pinctrl/pinconf.c
@@ -0,0 +1,328 @@
+/*
+ * Core driver for the pin config portions of the pin control subsystem
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#define pr_fmt(fmt) "pinconfig core: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinconf.h>
+#include "core.h"
+#include "pinconf.h"
+
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (!ops || !ops->pin_config_get) {
+ dev_err(pctldev->dev, "cannot get pin configuration, missing "
+ "pin_config_get() function in driver\n");
+ return -EINVAL;
+ }
+
+ return ops->pin_config_get(pctldev, pin, config);
+}
+
+/**
+ * pin_config_get() - get the configuration of a single pin parameter
+ * @dev_name: name of the pin controller device for this pin
+ * @name: name of the pin to get the config for
+ * @config: the config pointed to by this argument will be filled in with the
+ * current pin state, it can be used directly by drivers as a numeral, or
+ * it can be dereferenced to any struct.
+ */
+int pin_config_get(const char *dev_name, const char *name,
+ unsigned long *config)
+{
+ struct pinctrl_dev *pctldev;
+ int pin;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+
+ pin = pin_get_from_name(pctldev, name);
+ if (pin < 0)
+ return pin;
+
+ return pin_config_get_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_get);
+
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+ int ret;
+
+ if (!ops || !ops->pin_config_set) {
+ dev_err(pctldev->dev, "cannot configure pin, missing "
+ "config function in driver\n");
+ return -EINVAL;
+ }
+
+ ret = ops->pin_config_set(pctldev, pin, config);
+ if (ret) {
+ dev_err(pctldev->dev,
+ "unable to set pin configuration on pin %d\n", pin);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * pin_config_set() - set the configuration of a single pin parameter
+ * @dev_name: name of pin controller device for this pin
+ * @name: name of the pin to set the config for
+ * @config: the config in this argument will contain the desired pin state, it
+ * can be used directly by drivers as a numeral, or it can be dereferenced
+ * to any struct.
+ */
+int pin_config_set(const char *dev_name, const char *name,
+ unsigned long config)
+{
+ struct pinctrl_dev *pctldev;
+ int pin;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+
+ pin = pin_get_from_name(pctldev, name);
+ if (pin < 0)
+ return pin;
+
+ return pin_config_set_for_pin(pctldev, pin, config);
+}
+EXPORT_SYMBOL(pin_config_set);
+
+int pin_config_group_get(const char *dev_name, const char *pin_group,
+ unsigned long *config)
+{
+ struct pinctrl_dev *pctldev;
+ const struct pinconf_ops *ops;
+ int selector;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+ ops = pctldev->desc->confops;
+
+ if (!ops || !ops->pin_config_group_get) {
+ dev_err(pctldev->dev, "cannot get configuration for pin "
+ "group, missing group config get function in "
+ "driver\n");
+ return -EINVAL;
+ }
+
+ selector = pinctrl_get_group_selector(pctldev, pin_group);
+ if (selector < 0)
+ return selector;
+
+ return ops->pin_config_group_get(pctldev, selector, config);
+}
+EXPORT_SYMBOL(pin_config_group_get);
+
+
+int pin_config_group_set(const char *dev_name, const char *pin_group,
+ unsigned long config)
+{
+ struct pinctrl_dev *pctldev;
+ const struct pinconf_ops *ops;
+ const struct pinctrl_ops *pctlops;
+ int selector;
+ const unsigned *pins;
+ unsigned num_pins;
+ int ret;
+ int i;
+
+ pctldev = get_pinctrl_dev_from_dev(NULL, dev_name);
+ if (!pctldev)
+ return -EINVAL;
+ ops = pctldev->desc->confops;
+ pctlops = pctldev->desc->pctlops;
+
+ if (!ops || (!ops->pin_config_group_set && !ops->pin_config_set)) {
+ dev_err(pctldev->dev, "cannot configure pin group, missing "
+ "config function in driver\n");
+ return -EINVAL;
+ }
+
+ selector = pinctrl_get_group_selector(pctldev, pin_group);
+ if (selector < 0)
+ return selector;
+
+ ret = pctlops->get_group_pins(pctldev, selector, &pins, &num_pins);
+ if (ret) {
+ dev_err(pctldev->dev, "cannot configure pin group, error "
+ "getting pins\n");
+ return ret;
+ }
+
+ /*
+ * If the pin controller supports handling entire groups we use that
+ * capability.
+ */
+ if (ops->pin_config_group_set) {
+ ret = ops->pin_config_group_set(pctldev, selector, config);
+ /*
+ * If the pin controller prefer that a certain group be handled
+ * pin-by-pin as well, it returns -EAGAIN.
+ */
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ /*
+ * If the controller cannot handle entire groups, we configure each pin
+ * individually.
+ */
+ if (!ops->pin_config_set)
+ return 0;
+
+ for (i = 0; i < num_pins; i++) {
+ ret = ops->pin_config_set(pctldev, pins[i], config);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(pin_config_group_set);
+
+int pinconf_check_ops(struct pinctrl_dev *pctldev)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ /* We must be able to read out pin status */
+ if (!ops->pin_config_get && !ops->pin_config_group_get)
+ return -EINVAL;
+ /* We have to be able to config the pins in SOME way */
+ if (!ops->pin_config_set && !ops->pin_config_group_set)
+ return -EINVAL;
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static void pinconf_dump_pin(struct pinctrl_dev *pctldev,
+ struct seq_file *s, int pin)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (ops && ops->pin_config_dbg_show)
+ ops->pin_config_dbg_show(pctldev, s, pin);
+}
+
+static int pinconf_pins_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ unsigned i, pin;
+
+ seq_puts(s, "Pin config settings per pin\n");
+ seq_puts(s, "Format: pin (name): pinmux setting array\n");
+
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
+ struct pin_desc *desc;
+
+ pin = pctldev->desc->pins[i].number;
+ desc = pin_desc_get(pctldev, pin);
+ /* Skip if we cannot search the pin */
+ if (desc == NULL)
+ continue;
+
+ seq_printf(s, "pin %d (%s):", pin,
+ desc->name ? desc->name : "unnamed");
+
+ pinconf_dump_pin(pctldev, s, pin);
+
+ seq_printf(s, "\n");
+ }
+
+ return 0;
+}
+
+static void pinconf_dump_group(struct pinctrl_dev *pctldev,
+ struct seq_file *s, unsigned selector,
+ const char *gname)
+{
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+
+ if (ops && ops->pin_config_group_dbg_show)
+ ops->pin_config_group_dbg_show(pctldev, s, selector);
+}
+
+static int pinconf_groups_show(struct seq_file *s, void *what)
+{
+ struct pinctrl_dev *pctldev = s->private;
+ const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
+ const struct pinconf_ops *ops = pctldev->desc->confops;
+ unsigned selector = 0;
+
+ if (!ops || !ops->pin_config_group_get)
+ return 0;
+
+ seq_puts(s, "Pin config settings per pin group\n");
+ seq_puts(s, "Format: group (name): pinmux setting array\n");
+
+ while (pctlops->list_groups(pctldev, selector) >= 0) {
+ const char *gname = pctlops->get_group_name(pctldev, selector);
+
+ seq_printf(s, "%u (%s):", selector, gname);
+ pinconf_dump_group(pctldev, s, selector, gname);
+ selector++;
+ }
+
+ return 0;
+}
+
+static int pinconf_pins_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_pins_show, inode->i_private);
+}
+
+static int pinconf_groups_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pinconf_groups_show, inode->i_private);
+}
+
+static const struct file_operations pinconf_pins_ops = {
+ .open = pinconf_pins_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations pinconf_groups_ops = {
+ .open = pinconf_groups_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+ debugfs_create_file("pinconf-pins", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinconf_pins_ops);
+ debugfs_create_file("pinconf-groups", S_IFREG | S_IRUGO,
+ devroot, pctldev, &pinconf_groups_ops);
+}
+
+#endif
diff --git a/drivers/pinctrl/pinconf.h b/drivers/pinctrl/pinconf.h
new file mode 100644
index 0000000..006b77f
--- /dev/null
+++ b/drivers/pinctrl/pinconf.h
@@ -0,0 +1,36 @@
+/*
+ * Internal interface between the core pin control system and the
+ * pin config portions
+ *
+ * Copyright (C) 2011 ST-Ericsson SA
+ * Written on behalf of Linaro for ST-Ericsson
+ * Based on bits of regulator core, gpio core and clk core
+ *
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ *
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#ifdef CONFIG_PINCONF
+
+int pinconf_check_ops(struct pinctrl_dev *pctldev);
+void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev);
+int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long *config);
+int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
+ unsigned long config);
+
+#else
+
+static inline int pinconf_check_ops(struct pinctrl_dev *pctldev)
+{
+ return 0;
+}
+
+static inline void pinconf_init_device_debugfs(struct dentry *devroot,
+ struct pinctrl_dev *pctldev)
+{
+}
+
+#endif
diff --git a/drivers/gpio/gpio-u300.c b/drivers/pinctrl/pinctrl-coh901.c
index 4035778..69fb707 100644
--- a/drivers/gpio/gpio-u300.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -22,6 +22,7 @@
#include <linux/gpio.h>
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/pinctrl/pinmux.h>
#include <mach/gpio-u300.h>
/*
@@ -351,6 +352,24 @@ static inline struct u300_gpio *to_u300_gpio(struct gpio_chip *chip)
return container_of(chip, struct u300_gpio, chip);
}
+static int u300_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ /*
+ * Map back to global GPIO space and request muxing, the direction
+ * parameter does not matter for this controller.
+ */
+ int gpio = chip->base + offset;
+
+ return pinmux_request_gpio(gpio);
+}
+
+static void u300_gpio_free(struct gpio_chip *chip, unsigned offset)
+{
+ int gpio = chip->base + offset;
+
+ pinmux_free_gpio(gpio);
+}
+
static int u300_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct u300_gpio *gpio = to_u300_gpio(chip);
@@ -483,6 +502,8 @@ static int u300_gpio_config(struct gpio_chip *chip, unsigned offset,
static struct gpio_chip u300_gpio_chip = {
.label = "u300-gpio-chip",
.owner = THIS_MODULE,
+ .request = u300_gpio_request,
+ .free = u300_gpio_free,
.get = u300_gpio_get,
.set = u300_gpio_set,
.direction_input = u300_gpio_direction_input,
diff --git a/drivers/pinctrl/pinmux-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index d76cae6..6b3534c 100644
--- a/drivers/pinctrl/pinmux-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -463,7 +463,7 @@ static const struct sirfsoc_padmux spi1_padmux = {
.funcval = BIT(8),
};
-static const unsigned spi1_pins[] = { 33, 34, 35, 36 };
+static const unsigned spi1_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask sdmmc1_muxmask[] = {
{
@@ -1067,7 +1067,7 @@ static int sirfsoc_pinmux_request_gpio(struct pinctrl_dev *pmxdev,
spmx = pinctrl_dev_get_drvdata(pmxdev);
muxval = readl(spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
- muxval = muxval | (1 << offset);
+ muxval = muxval | (1 << (offset - range->pin_base));
writel(muxval, spmx->gpio_virtbase + SIRFSOC_GPIO_PAD_EN(group));
return 0;
@@ -1086,7 +1086,6 @@ static struct pinctrl_desc sirfsoc_pinmux_desc = {
.name = DRIVER_NAME,
.pins = sirfsoc_pads,
.npins = ARRAY_SIZE(sirfsoc_pads),
- .maxpin = SIRFSOC_NUM_PADS - 1,
.pctlops = &sirfsoc_pctrl_ops,
.pmxops = &sirfsoc_pinmux_ops,
.owner = THIS_MODULE,
@@ -1100,21 +1099,25 @@ static struct pinctrl_gpio_range sirfsoc_gpio_ranges[] = {
.name = "sirfsoc-gpio*",
.id = 0,
.base = 0,
+ .pin_base = 0,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 1,
.base = 32,
+ .pin_base = 32,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 2,
.base = 64,
+ .pin_base = 64,
.npins = 32,
}, {
.name = "sirfsoc-gpio*",
.id = 3,
.base = 96,
+ .pin_base = 96,
.npins = 19,
},
};
diff --git a/drivers/pinctrl/pinmux-u300.c b/drivers/pinctrl/pinctrl-u300.c
index 4858a64..c8d02f1 100644
--- a/drivers/pinctrl/pinmux-u300.c
+++ b/drivers/pinctrl/pinctrl-u300.c
@@ -940,20 +940,23 @@ static void u300_pmx_endisable(struct u300_pmx *upmx, unsigned selector,
{
u16 regval, val, mask;
int i;
+ const struct u300_pmx_mask *upmx_mask;
+ upmx_mask = u300_pmx_functions[selector].mask;
for (i = 0; i < ARRAY_SIZE(u300_pmx_registers); i++) {
if (enable)
- val = u300_pmx_functions[selector].mask->bits;
+ val = upmx_mask->bits;
else
val = 0;
- mask = u300_pmx_functions[selector].mask->mask;
+ mask = upmx_mask->mask;
if (mask != 0) {
regval = readw(upmx->virtbase + u300_pmx_registers[i]);
regval &= ~mask;
regval |= val;
writew(regval, upmx->virtbase + u300_pmx_registers[i]);
}
+ upmx_mask++;
}
}
@@ -1016,21 +1019,35 @@ static struct pinmux_ops u300_pmx_ops = {
};
/*
- * FIXME: this will be set to sane values as this driver engulfs
- * drivers/gpio/gpio-u300.c and we really know this stuff.
+ * GPIO ranges handled by the application-side COH901XXX GPIO controller
+ * Very many pins can be converted into GPIO pins, but we only list those
+ * that are useful in practice to cut down on tables.
*/
-static struct pinctrl_gpio_range u300_gpio_range = {
- .name = "COH901*",
- .id = 0,
- .base = 0,
- .npins = 64,
+#define U300_GPIO_RANGE(a, b, c) { .name = "COH901XXX", .id = a, .base= a, \
+ .pin_base = b, .npins = c }
+
+static struct pinctrl_gpio_range u300_gpio_ranges[] = {
+ U300_GPIO_RANGE(10, 426, 1),
+ U300_GPIO_RANGE(11, 180, 1),
+ U300_GPIO_RANGE(12, 165, 1), /* MS/MMC card insertion */
+ U300_GPIO_RANGE(13, 179, 1),
+ U300_GPIO_RANGE(14, 178, 1),
+ U300_GPIO_RANGE(16, 194, 1),
+ U300_GPIO_RANGE(17, 193, 1),
+ U300_GPIO_RANGE(18, 192, 1),
+ U300_GPIO_RANGE(19, 191, 1),
+ U300_GPIO_RANGE(20, 186, 1),
+ U300_GPIO_RANGE(21, 185, 1),
+ U300_GPIO_RANGE(22, 184, 1),
+ U300_GPIO_RANGE(23, 183, 1),
+ U300_GPIO_RANGE(24, 182, 1),
+ U300_GPIO_RANGE(25, 181, 1),
};
static struct pinctrl_desc u300_pmx_desc = {
.name = DRIVER_NAME,
.pins = u300_pads,
.npins = ARRAY_SIZE(u300_pads),
- .maxpin = U300_NUM_PADS-1,
.pctlops = &u300_pctrl_ops,
.pmxops = &u300_pmx_ops,
.owner = THIS_MODULE,
@@ -1038,9 +1055,10 @@ static struct pinctrl_desc u300_pmx_desc = {
static int __init u300_pmx_probe(struct platform_device *pdev)
{
- int ret;
struct u300_pmx *upmx;
struct resource *res;
+ int ret;
+ int i;
/* Create state holders etc for this driver */
upmx = devm_kzalloc(&pdev->dev, sizeof(*upmx), GFP_KERNEL);
@@ -1077,7 +1095,8 @@ static int __init u300_pmx_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_range);
+ for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+ pinctrl_add_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
platform_set_drvdata(pdev, upmx);
@@ -1099,8 +1118,10 @@ out_no_resource:
static int __exit u300_pmx_remove(struct platform_device *pdev)
{
struct u300_pmx *upmx = platform_get_drvdata(pdev);
+ int i;
- pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_range);
+ for (i = 0; i < ARRAY_SIZE(u300_gpio_ranges); i++)
+ pinctrl_remove_gpio_range(upmx->pctl, &u300_gpio_ranges[i]);
pinctrl_unregister(upmx->pctl);
iounmap(upmx->virtbase);
release_mem_region(upmx->phybase, upmx->physize);
diff --git a/drivers/pinctrl/pinmux.c b/drivers/pinctrl/pinmux.c
index a5467f8..7c3193f 100644
--- a/drivers/pinctrl/pinmux.c
+++ b/drivers/pinctrl/pinmux.c
@@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/sysfs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -32,12 +33,8 @@
static DEFINE_MUTEX(pinmux_list_mutex);
static LIST_HEAD(pinmux_list);
-/* List of pinmux hogs */
-static DEFINE_MUTEX(pinmux_hoglist_mutex);
-static LIST_HEAD(pinmux_hoglist);
-
-/* Global pinmux maps, we allow one set only */
-static struct pinmux_map const *pinmux_maps;
+/* Global pinmux maps */
+static struct pinmux_map *pinmux_maps;
static unsigned pinmux_maps_num;
/**
@@ -56,11 +53,6 @@ struct pinmux_group {
* @dev: the device using this pinmux
* @usecount: the number of active users of this mux setting, used to keep
* track of nested use cases
- * @pins: an array of discrete physical pins used in this mapping, taken
- * from the global pin enumeration space (copied from pinmux map)
- * @num_pins: the number of pins in this mapping array, i.e. the number of
- * elements in .pins so we can iterate over that array (copied from
- * pinmux map)
* @pctldev: pin control device handling this pinmux
* @func_selector: the function selector for the pinmux device handling
* this pinmux
@@ -98,41 +90,35 @@ struct pinmux_hog {
* @function: a functional name to give to this pin, passed to the driver
* so it knows what function to mux in, e.g. the string "gpioNN"
* means that you want to mux in the pin for use as GPIO number NN
- * @gpio: if this request concerns a single GPIO pin
* @gpio_range: the range matching the GPIO pin if this is a request for a
* single GPIO pin
*/
static int pin_request(struct pinctrl_dev *pctldev,
- int pin, const char *function, bool gpio,
+ int pin, const char *function,
struct pinctrl_gpio_range *gpio_range)
{
struct pin_desc *desc;
const struct pinmux_ops *ops = pctldev->desc->pmxops;
int status = -EINVAL;
- dev_dbg(&pctldev->dev, "request pin %d for %s\n", pin, function);
-
- if (!pin_is_valid(pctldev, pin)) {
- dev_err(&pctldev->dev, "pin is invalid\n");
- return -EINVAL;
- }
-
- if (!function) {
- dev_err(&pctldev->dev, "no function name given\n");
- return -EINVAL;
- }
+ dev_dbg(pctldev->dev, "request pin %d for %s\n", pin, function);
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin is not registered so it cannot be requested\n");
goto out;
}
+ if (!function) {
+ dev_err(pctldev->dev, "no function name given\n");
+ return -EINVAL;
+ }
+
spin_lock(&desc->lock);
if (desc->mux_function) {
spin_unlock(&desc->lock);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin already requested\n");
goto out;
}
@@ -141,7 +127,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
/* Let each pin increase references to this module */
if (!try_module_get(pctldev->owner)) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not increase module refcount for pin %d\n",
pin);
status = -EINVAL;
@@ -152,7 +138,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
* If there is no kind of request function for the pin we just assume
* we got it by default and proceed.
*/
- if (gpio && ops->gpio_request_enable)
+ if (gpio_range && ops->gpio_request_enable)
/* This requests and enables a single GPIO pin */
status = ops->gpio_request_enable(pctldev, gpio_range, pin);
else if (ops->request)
@@ -161,8 +147,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
status = 0;
if (status)
- dev_err(&pctldev->dev, "->request on device %s failed "
- "for pin %d\n",
+ dev_err(pctldev->dev, "->request on device %s failed for pin %d\n",
pctldev->desc->name, pin);
out_free_pin:
if (status) {
@@ -172,7 +157,7 @@ out_free_pin:
}
out:
if (status)
- dev_err(&pctldev->dev, "pin-%d (%s) status %d\n",
+ dev_err(pctldev->dev, "pin-%d (%s) status %d\n",
pin, function ? : "?", status);
return status;
@@ -182,34 +167,52 @@ out:
* pin_free() - release a single muxed in pin so something else can be muxed
* @pctldev: pin controller device handling this pin
* @pin: the pin to free
- * @free_func: whether to free the pin's assigned function name string
+ * @gpio_range: the range matching the GPIO pin if this is a request for a
+ * single GPIO pin
+ *
+ * This function returns a pointer to the function name in use. This is used
+ * for callers that dynamically allocate a function name so it can be freed
+ * once the pin is free. This is done for GPIO request functions.
*/
-static void pin_free(struct pinctrl_dev *pctldev, int pin, int free_func)
+static const char *pin_free(struct pinctrl_dev *pctldev, int pin,
+ struct pinctrl_gpio_range *gpio_range)
{
const struct pinmux_ops *ops = pctldev->desc->pmxops;
struct pin_desc *desc;
+ const char *func;
desc = pin_desc_get(pctldev, pin);
if (desc == NULL) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"pin is not registered so it cannot be freed\n");
- return;
+ return NULL;
}
- if (ops->free)
+ /*
+ * If there is no kind of request function for the pin we just assume
+ * we got it by default and proceed.
+ */
+ if (gpio_range && ops->gpio_disable_free)
+ ops->gpio_disable_free(pctldev, gpio_range, pin);
+ else if (ops->free)
ops->free(pctldev, pin);
spin_lock(&desc->lock);
- if (free_func)
- kfree(desc->mux_function);
+ func = desc->mux_function;
desc->mux_function = NULL;
spin_unlock(&desc->lock);
module_put(pctldev->owner);
+
+ return func;
}
/**
* pinmux_request_gpio() - request a single pin to be muxed in as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_request() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed in.
*/
int pinmux_request_gpio(unsigned gpio)
{
@@ -225,7 +228,7 @@ int pinmux_request_gpio(unsigned gpio)
return -EINVAL;
/* Convert to the pin controllers number space */
- pin = gpio - range->base;
+ pin = gpio - range->base + range->pin_base;
/* Conjure some name stating what chip and pin this is taken by */
snprintf(gpiostr, 15, "%s:%d", range->name, gpio);
@@ -234,7 +237,7 @@ int pinmux_request_gpio(unsigned gpio)
if (!function)
return -EINVAL;
- ret = pin_request(pctldev, pin, function, true, range);
+ ret = pin_request(pctldev, pin, function, range);
if (ret < 0)
kfree(function);
@@ -245,6 +248,10 @@ EXPORT_SYMBOL_GPL(pinmux_request_gpio);
/**
* pinmux_free_gpio() - free a single pin, currently used as GPIO
* @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_free() semantics, platforms and individual drivers
+ * shall *NOT* request GPIO pins to be muxed out.
*/
void pinmux_free_gpio(unsigned gpio)
{
@@ -252,56 +259,110 @@ void pinmux_free_gpio(unsigned gpio)
struct pinctrl_gpio_range *range;
int ret;
int pin;
+ const char *func;
ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
if (ret)
return;
/* Convert to the pin controllers number space */
- pin = gpio - range->base;
+ pin = gpio - range->base + range->pin_base;
- pin_free(pctldev, pin, true);
+ func = pin_free(pctldev, pin, range);
+ kfree(func);
}
EXPORT_SYMBOL_GPL(pinmux_free_gpio);
+static int pinmux_gpio_direction(unsigned gpio, bool input)
+{
+ struct pinctrl_dev *pctldev;
+ struct pinctrl_gpio_range *range;
+ const struct pinmux_ops *ops;
+ int ret;
+ int pin;
+
+ ret = pinctrl_get_device_gpio_range(gpio, &pctldev, &range);
+ if (ret)
+ return ret;
+
+ ops = pctldev->desc->pmxops;
+
+ /* Convert to the pin controllers number space */
+ pin = gpio - range->base + range->pin_base;
+
+ if (ops->gpio_set_direction)
+ ret = ops->gpio_set_direction(pctldev, range, pin, input);
+ else
+ ret = 0;
+
+ return ret;
+}
+
+/**
+ * pinmux_gpio_direction_input() - request a GPIO pin to go into input mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_input() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_input(unsigned gpio)
+{
+ return pinmux_gpio_direction(gpio, true);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_input);
+
+/**
+ * pinmux_gpio_direction_output() - request a GPIO pin to go into output mode
+ * @gpio: the GPIO pin number from the GPIO subsystem number space
+ *
+ * This function should *ONLY* be used from gpiolib-based GPIO drivers,
+ * as part of their gpio_direction_output() semantics, platforms and individual
+ * drivers shall *NOT* touch pinmux GPIO calls.
+ */
+int pinmux_gpio_direction_output(unsigned gpio)
+{
+ return pinmux_gpio_direction(gpio, false);
+}
+EXPORT_SYMBOL_GPL(pinmux_gpio_direction_output);
+
/**
* pinmux_register_mappings() - register a set of pinmux mappings
- * @maps: the pinmux mappings table to register
+ * @maps: the pinmux mappings table to register, this should be marked with
+ * __initdata so it can be discarded after boot, this function will
+ * perform a shallow copy for the mapping entries.
* @num_maps: the number of maps in the mapping table
*
* Only call this once during initialization of your machine, the function is
* tagged as __init and won't be callable after init has completed. The map
* passed into this function will be owned by the pinmux core and cannot be
- * free:d.
+ * freed.
*/
int __init pinmux_register_mappings(struct pinmux_map const *maps,
unsigned num_maps)
{
+ void *tmp_maps;
int i;
- if (pinmux_maps != NULL) {
- pr_err("pinmux mappings already registered, you can only "
- "register one set of maps\n");
- return -EINVAL;
- }
-
pr_debug("add %d pinmux maps\n", num_maps);
+
+ /* First sanity check the new mapping */
for (i = 0; i < num_maps; i++) {
- /* Sanity check the mapping */
if (!maps[i].name) {
- pr_err("failed to register map %d: "
- "no map name given\n", i);
+ pr_err("failed to register map %d: no map name given\n",
+ i);
return -EINVAL;
}
+
if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) {
- pr_err("failed to register map %s (%d): "
- "no pin control device given\n",
+ pr_err("failed to register map %s (%d): no pin control device given\n",
maps[i].name, i);
return -EINVAL;
}
+
if (!maps[i].function) {
- pr_err("failed to register map %s (%d): "
- "no function ID given\n", maps[i].name, i);
+ pr_err("failed to register map %s (%d): no function ID given\n",
+ maps[i].name, i);
return -EINVAL;
}
@@ -315,14 +376,35 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps,
maps[i].function);
}
- pinmux_maps = maps;
- pinmux_maps_num = num_maps;
+ /*
+ * Make a copy of the map array - string pointers will end up in the
+ * kernel const section anyway so these do not need to be deep copied.
+ */
+ if (!pinmux_maps_num) {
+ /* On first call, just copy them */
+ tmp_maps = kmemdup(maps,
+ sizeof(struct pinmux_map) * num_maps,
+ GFP_KERNEL);
+ if (!tmp_maps)
+ return -ENOMEM;
+ } else {
+ /* Subsequent calls, reallocate array to new size */
+ size_t oldsize = sizeof(struct pinmux_map) * pinmux_maps_num;
+ size_t newsize = sizeof(struct pinmux_map) * num_maps;
+
+ tmp_maps = krealloc(pinmux_maps, oldsize + newsize, GFP_KERNEL);
+ if (!tmp_maps)
+ return -ENOMEM;
+ memcpy((tmp_maps + oldsize), maps, newsize);
+ }
+ pinmux_maps = tmp_maps;
+ pinmux_maps_num += num_maps;
return 0;
}
/**
- * acquire_pins() - acquire all the pins for a certain funcion on a pinmux
+ * acquire_pins() - acquire all the pins for a certain function on a pinmux
* @pctldev: the device to take the pins on
* @func_selector: the function selector to acquire the pins for
* @group_selector: the group selector containing the pins to acquire
@@ -345,22 +427,21 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
if (ret)
return ret;
- dev_dbg(&pctldev->dev, "requesting the %u pins from group %u\n",
+ dev_dbg(pctldev->dev, "requesting the %u pins from group %u\n",
num_pins, group_selector);
/* Try to allocate all pins in this group, one by one */
for (i = 0; i < num_pins; i++) {
- ret = pin_request(pctldev, pins[i], func, false, NULL);
+ ret = pin_request(pctldev, pins[i], func, NULL);
if (ret) {
- dev_err(&pctldev->dev,
- "could not get pin %d for function %s "
- "on device %s - conflicting mux mappings?\n",
+ dev_err(pctldev->dev,
+ "could not get pin %d for function %s on device %s - conflicting mux mappings?\n",
pins[i], func ? : "(undefined)",
pinctrl_dev_get_name(pctldev));
/* On error release all taken pins */
i--; /* this pin just failed */
for (; i >= 0; i--)
- pin_free(pctldev, pins[i], false);
+ pin_free(pctldev, pins[i], NULL);
return -ENODEV;
}
}
@@ -369,7 +450,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
/**
* release_pins() - release pins taken by earlier acquirement
- * @pctldev: the device to free the pinx on
+ * @pctldev: the device to free the pins on
* @group_selector: the group selector containing the pins to free
*/
static void release_pins(struct pinctrl_dev *pctldev,
@@ -384,44 +465,12 @@ static void release_pins(struct pinctrl_dev *pctldev,
ret = pctlops->get_group_pins(pctldev, group_selector,
&pins, &num_pins);
if (ret) {
- dev_err(&pctldev->dev, "could not get pins to release for "
- "group selector %d\n",
+ dev_err(pctldev->dev, "could not get pins to release for group selector %d\n",
group_selector);
return;
}
for (i = 0; i < num_pins; i++)
- pin_free(pctldev, pins[i], false);
-}
-
-/**
- * pinmux_get_group_selector() - returns the group selector for a group
- * @pctldev: the pin controller handling the group
- * @pin_group: the pin group to look up
- */
-static int pinmux_get_group_selector(struct pinctrl_dev *pctldev,
- const char *pin_group)
-{
- const struct pinctrl_ops *pctlops = pctldev->desc->pctlops;
- unsigned group_selector = 0;
-
- while (pctlops->list_groups(pctldev, group_selector) >= 0) {
- const char *gname = pctlops->get_group_name(pctldev,
- group_selector);
- if (!strcmp(gname, pin_group)) {
- dev_dbg(&pctldev->dev,
- "found group selector %u for %s\n",
- group_selector,
- pin_group);
- return group_selector;
- }
-
- group_selector++;
- }
-
- dev_err(&pctldev->dev, "does not have pin group %s\n",
- pin_group);
-
- return -EINVAL;
+ pin_free(pctldev, pins[i], NULL);
}
/**
@@ -465,20 +514,18 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
return ret;
if (num_groups < 1)
return -EINVAL;
- ret = pinmux_get_group_selector(pctldev, groups[0]);
+ ret = pinctrl_get_group_selector(pctldev, groups[0]);
if (ret < 0) {
- dev_err(&pctldev->dev,
- "function %s wants group %s but the pin "
- "controller does not seem to have that group\n",
+ dev_err(pctldev->dev,
+ "function %s wants group %s but the pin controller does not seem to have that group\n",
pmxops->get_function_name(pctldev, func_selector),
groups[0]);
return ret;
}
if (num_groups > 1)
- dev_dbg(&pctldev->dev,
- "function %s support more than one group, "
- "default-selecting first group %s (%d)\n",
+ dev_dbg(pctldev->dev,
+ "function %s support more than one group, default-selecting first group %s (%d)\n",
pmxops->get_function_name(pctldev, func_selector),
groups[0],
ret);
@@ -486,13 +533,13 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
return ret;
}
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"check if we have pin group %s on controller %s\n",
pin_group, pinctrl_dev_get_name(pctldev));
- ret = pinmux_get_group_selector(pctldev, pin_group);
+ ret = pinctrl_get_group_selector(pctldev, pin_group);
if (ret < 0) {
- dev_dbg(&pctldev->dev,
+ dev_dbg(pctldev->dev,
"%s does not support pin group %s with function %s\n",
pinctrl_dev_get_name(pctldev),
pin_group,
@@ -569,11 +616,9 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
*/
if (pmx->pctldev && pmx->pctldev != pctldev) {
- dev_err(&pctldev->dev,
- "different pin control devices given for device %s, "
- "function %s\n",
- devname,
- map->function);
+ dev_err(pctldev->dev,
+ "different pin control devices given for device %s, function %s\n",
+ devname, map->function);
return -EINVAL;
}
pmx->dev = dev;
@@ -592,7 +637,7 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
*/
if (pmx->func_selector != UINT_MAX &&
pmx->func_selector != func_selector) {
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"dual function defines in the map for device %s\n",
devname);
return -EINVAL;
@@ -637,7 +682,6 @@ static void pinmux_free_groups(struct pinmux *pmx)
*/
struct pinmux *pinmux_get(struct device *dev, const char *name)
{
-
struct pinmux_map const *map = NULL;
struct pinctrl_dev *pctldev = NULL;
const char *devname = NULL;
@@ -687,8 +731,7 @@ struct pinmux *pinmux_get(struct device *dev, const char *name)
else if (map->ctrl_dev_name)
devname = map->ctrl_dev_name;
- pr_warning("could not find a pinctrl device for pinmux "
- "function %s, fishy, they shall all have one\n",
+ pr_warning("could not find a pinctrl device for pinmux function %s, fishy, they shall all have one\n",
map->function);
pr_warning("given pinctrl device name: %s",
devname ? devname : "UNDEFINED");
@@ -698,7 +741,7 @@ struct pinmux *pinmux_get(struct device *dev, const char *name)
}
pr_debug("in map, found pctldev %s to handle function %s",
- dev_name(&pctldev->dev), map->function);
+ dev_name(pctldev->dev), map->function);
/*
@@ -846,8 +889,11 @@ void pinmux_disable(struct pinmux *pmx)
}
EXPORT_SYMBOL_GPL(pinmux_disable);
-int pinmux_check_ops(const struct pinmux_ops *ops)
+int pinmux_check_ops(struct pinctrl_dev *pctldev)
{
+ const struct pinmux_ops *ops = pctldev->desc->pmxops;
+ unsigned selector = 0;
+
/* Check that we implement required operations */
if (!ops->list_functions ||
!ops->get_function_name ||
@@ -856,6 +902,18 @@ int pinmux_check_ops(const struct pinmux_ops *ops)
!ops->disable)
return -EINVAL;
+ /* Check that all functions registered have names */
+ while (ops->list_functions(pctldev, selector) >= 0) {
+ const char *fname = ops->get_function_name(pctldev,
+ selector);
+ if (!fname) {
+ pr_err("pinmux ops has no name for function%u\n",
+ selector);
+ return -EINVAL;
+ }
+ selector++;
+ }
+
return 0;
}
@@ -874,8 +932,8 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
* without any problems, so then we can hog pinmuxes for
* all devices that just want a static pin mux at this point.
*/
- dev_err(&pctldev->dev, "map %s wants to hog a non-system "
- "pinmux, this is not going to work\n", map->name);
+ dev_err(pctldev->dev, "map %s wants to hog a non-system pinmux, this is not going to work\n",
+ map->name);
return -EINVAL;
}
@@ -886,7 +944,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
pmx = pinmux_get(NULL, map->name);
if (IS_ERR(pmx)) {
kfree(hog);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not get the %s pinmux mapping for hogging\n",
map->name);
return PTR_ERR(pmx);
@@ -896,7 +954,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
if (ret) {
pinmux_put(pmx);
kfree(hog);
- dev_err(&pctldev->dev,
+ dev_err(pctldev->dev,
"could not enable the %s pinmux mapping for hogging\n",
map->name);
return ret;
@@ -905,7 +963,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
hog->map = map;
hog->pmx = pmx;
- dev_info(&pctldev->dev, "hogged map %s, function %s\n", map->name,
+ dev_info(pctldev->dev, "hogged map %s, function %s\n", map->name,
map->function);
mutex_lock(&pctldev->pinmux_hogs_lock);
list_add(&hog->node, &pctldev->pinmux_hogs);
@@ -924,7 +982,7 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
*/
int pinmux_hog_maps(struct pinctrl_dev *pctldev)
{
- struct device *dev = &pctldev->dev;
+ struct device *dev = pctldev->dev;
const char *devname = dev_name(dev);
int ret;
int i;
@@ -935,9 +993,12 @@ int pinmux_hog_maps(struct pinctrl_dev *pctldev)
for (i = 0; i < pinmux_maps_num; i++) {
struct pinmux_map const *map = &pinmux_maps[i];
- if (((map->ctrl_dev == dev) ||
- !strcmp(map->ctrl_dev_name, devname)) &&
- map->hog_on_boot) {
+ if (!map->hog_on_boot)
+ continue;
+
+ if ((map->ctrl_dev == dev) ||
+ (map->ctrl_dev_name &&
+ !strcmp(map->ctrl_dev_name, devname))) {
/* OK time to hog! */
ret = pinmux_hog_map(pctldev, map);
if (ret)
@@ -948,7 +1009,7 @@ int pinmux_hog_maps(struct pinctrl_dev *pctldev)
}
/**
- * pinmux_hog_maps() - unhog specific map entries on controller device
+ * pinmux_unhog_maps() - unhog specific map entries on controller device
* @pctldev: the pin control device to unhog entries on
*/
void pinmux_unhog_maps(struct pinctrl_dev *pctldev)
@@ -1005,18 +1066,19 @@ static int pinmux_functions_show(struct seq_file *s, void *what)
static int pinmux_pins_show(struct seq_file *s, void *what)
{
struct pinctrl_dev *pctldev = s->private;
- unsigned pin;
+ unsigned i, pin;
seq_puts(s, "Pinmux settings per pin\n");
seq_puts(s, "Format: pin (name): pinmuxfunction\n");
- /* The highest pin number need to be included in the loop, thus <= */
- for (pin = 0; pin <= pctldev->desc->maxpin; pin++) {
+ /* The pin number can be retrived from the pin controller descriptor */
+ for (i = 0; i < pctldev->desc->npins; i++) {
struct pin_desc *desc;
+ pin = pctldev->desc->pins[i].number;
desc = pin_desc_get(pctldev, pin);
- /* Pin space may be sparse */
+ /* Skip if we cannot search the pin */
if (desc == NULL)
continue;
@@ -1063,13 +1125,15 @@ static int pinmux_show(struct seq_file *s, void *what)
seq_printf(s, "device: %s function: %s (%u),",
pinctrl_dev_get_name(pmx->pctldev),
- pmxops->get_function_name(pctldev, pmx->func_selector),
+ pmxops->get_function_name(pctldev,
+ pmx->func_selector),
pmx->func_selector);
seq_printf(s, " groups: [");
list_for_each_entry(grp, &pmx->groups, node) {
seq_printf(s, " %s (%u)",
- pctlops->get_group_name(pctldev, grp->group_selector),
+ pctlops->get_group_name(pctldev,
+ grp->group_selector),
grp->group_selector);
}
seq_printf(s, " ]");
diff --git a/drivers/pinctrl/pinmux.h b/drivers/pinctrl/pinmux.h
index 844500b..97f5222 100644
--- a/drivers/pinctrl/pinmux.h
+++ b/drivers/pinctrl/pinmux.h
@@ -12,7 +12,7 @@
*/
#ifdef CONFIG_PINMUX
-int pinmux_check_ops(const struct pinmux_ops *ops);
+int pinmux_check_ops(struct pinctrl_dev *pctldev);
void pinmux_init_device_debugfs(struct dentry *devroot,
struct pinctrl_dev *pctldev);
void pinmux_init_debugfs(struct dentry *subsys_root);
@@ -21,7 +21,7 @@ void pinmux_unhog_maps(struct pinctrl_dev *pctldev);
#else
-static inline int pinmux_check_ops(const struct pinmux_ops *ops)
+static inline int pinmux_check_ops(struct pinctrl_dev *pctldev)
{
return 0;
}
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 7f43cf8..15dbd8c 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -143,6 +143,30 @@ config FUJITSU_LAPTOP_DEBUG
If you are not sure, say N here.
+config FUJITSU_TABLET
+ tristate "Fujitsu Tablet Extras"
+ depends on ACPI
+ depends on INPUT
+ ---help---
+ This is a driver for tablets built by Fujitsu:
+
+ * Lifebook P1510/P1610/P1620/Txxxx
+ * Stylistic ST5xxx
+ * Possibly other Fujitsu tablet models
+
+ It adds support for the panel buttons, docking station detection,
+ tablet/notebook mode detection for convertible and
+ orientation detection for docked slates.
+
+ If you have a Fujitsu convertible or slate, say Y or M here.
+
+config AMILO_RFKILL
+ tristate "Fujitsu-Siemens Amilo rfkill support"
+ depends on RFKILL
+ ---help---
+ This is a driver for enabling wifi on some Fujitsu-Siemens Amilo
+ laptops.
+
config TC1100_WMI
tristate "HP Compaq TC1100 Tablet WMI Extras (EXPERIMENTAL)"
depends on !X86_64
@@ -639,7 +663,7 @@ config ACPI_CMPC
config INTEL_SCU_IPC
bool "Intel SCU IPC Support"
- depends on X86_MRST
+ depends on X86_INTEL_MID
default y
---help---
IPC is used to bridge the communications between kernel and SCU on
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 293a320..d328f21 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -17,12 +17,14 @@ obj-$(CONFIG_ACER_WMI) += acer-wmi.o
obj-$(CONFIG_ACERHDF) += acerhdf.o
obj-$(CONFIG_HP_ACCEL) += hp_accel.o
obj-$(CONFIG_HP_WMI) += hp-wmi.o
+obj-$(CONFIG_AMILO_RFKILL) += amilo-rfkill.o
obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
obj-$(CONFIG_SENSORS_HDAPS) += hdaps.o
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
+obj-$(CONFIG_FUJITSU_TABLET) += fujitsu-tablet.o
obj-$(CONFIG_PANASONIC_LAPTOP) += panasonic-laptop.o
obj-$(CONFIG_INTEL_MENLOW) += intel_menlow.o
obj-$(CONFIG_ACPI_WMI) += wmi.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index b848277..1e5290b 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -679,6 +679,32 @@ static acpi_status AMW0_find_mailled(void)
return AE_OK;
}
+static int AMW0_set_cap_acpi_check_device_found;
+
+static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle,
+ u32 level, void *context, void **retval)
+{
+ AMW0_set_cap_acpi_check_device_found = 1;
+ return AE_OK;
+}
+
+static const struct acpi_device_id norfkill_ids[] = {
+ { "VPC2004", 0},
+ { "IBM0068", 0},
+ { "LEN0068", 0},
+ { "", 0},
+};
+
+static int AMW0_set_cap_acpi_check_device(void)
+{
+ const struct acpi_device_id *id;
+
+ for (id = norfkill_ids; id->id[0]; id++)
+ acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb,
+ NULL, NULL);
+ return AMW0_set_cap_acpi_check_device_found;
+}
+
static acpi_status AMW0_set_capabilities(void)
{
struct wmab_args args;
@@ -692,7 +718,9 @@ static acpi_status AMW0_set_capabilities(void)
* work.
*/
if (wmi_has_guid(AMW0_GUID2)) {
- interface->capability |= ACER_CAP_WIRELESS;
+ if ((quirks != &quirk_unknown) ||
+ !AMW0_set_cap_acpi_check_device())
+ interface->capability |= ACER_CAP_WIRELESS;
return AE_OK;
}
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c
new file mode 100644
index 0000000..19170bb
--- /dev/null
+++ b/drivers/platform/x86/amilo-rfkill.c
@@ -0,0 +1,173 @@
+/*
+ * Support for rfkill on some Fujitsu-Siemens Amilo laptops.
+ * Copyright 2011 Ben Hutchings.
+ *
+ * Based in part on the fsam7440 driver, which is:
+ * Copyright 2005 Alejandro Vidal Mata & Javier Vidal Mata.
+ * and on the fsaa1655g driver, which is:
+ * Copyright 2006 Martin Večeřa.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/i8042.h>
+#include <linux/io.h>
+#include <linux/moduleparam.h>
+#include <linux/platform_device.h>
+#include <linux/rfkill.h>
+
+/*
+ * These values were obtained from disassembling and debugging the
+ * PM.exe program installed in the Fujitsu-Siemens AMILO A1655G
+ */
+#define A1655_WIFI_COMMAND 0x10C5
+#define A1655_WIFI_ON 0x25
+#define A1655_WIFI_OFF 0x45
+
+static int amilo_a1655_rfkill_set_block(void *data, bool blocked)
+{
+ u8 param = blocked ? A1655_WIFI_OFF : A1655_WIFI_ON;
+ int rc;
+
+ i8042_lock_chip();
+ rc = i8042_command(&param, A1655_WIFI_COMMAND);
+ i8042_unlock_chip();
+ return rc;
+}
+
+static const struct rfkill_ops amilo_a1655_rfkill_ops = {
+ .set_block = amilo_a1655_rfkill_set_block
+};
+
+/*
+ * These values were obtained from disassembling the PM.exe program
+ * installed in the Fujitsu-Siemens AMILO M 7440
+ */
+#define M7440_PORT1 0x118f
+#define M7440_PORT2 0x118e
+#define M7440_RADIO_ON1 0x12
+#define M7440_RADIO_ON2 0x80
+#define M7440_RADIO_OFF1 0x10
+#define M7440_RADIO_OFF2 0x00
+
+static int amilo_m7440_rfkill_set_block(void *data, bool blocked)
+{
+ u8 val1 = blocked ? M7440_RADIO_OFF1 : M7440_RADIO_ON1;
+ u8 val2 = blocked ? M7440_RADIO_OFF2 : M7440_RADIO_ON2;
+
+ outb(val1, M7440_PORT1);
+ outb(val2, M7440_PORT2);
+
+ /* Check whether the state has changed correctly */
+ if (inb(M7440_PORT1) != val1 || inb(M7440_PORT2) != val2)
+ return -EIO;
+
+ return 0;
+}
+
+static const struct rfkill_ops amilo_m7440_rfkill_ops = {
+ .set_block = amilo_m7440_rfkill_set_block
+};
+
+static const struct dmi_system_id __devinitdata amilo_rfkill_id_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_BOARD_NAME, "AMILO A1655"),
+ },
+ .driver_data = (void *)&amilo_a1655_rfkill_ops
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
+ DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"),
+ },
+ .driver_data = (void *)&amilo_m7440_rfkill_ops
+ },
+ {}
+};
+
+static struct platform_device *amilo_rfkill_pdev;
+static struct rfkill *amilo_rfkill_dev;
+
+static int __devinit amilo_rfkill_probe(struct platform_device *device)
+{
+ const struct dmi_system_id *system_id =
+ dmi_first_match(amilo_rfkill_id_table);
+ int rc;
+
+ amilo_rfkill_dev = rfkill_alloc(KBUILD_MODNAME, &device->dev,
+ RFKILL_TYPE_WLAN,
+ system_id->driver_data, NULL);
+ if (!amilo_rfkill_dev)
+ return -ENOMEM;
+
+ rc = rfkill_register(amilo_rfkill_dev);
+ if (rc)
+ goto fail;
+
+ return 0;
+
+fail:
+ rfkill_destroy(amilo_rfkill_dev);
+ return rc;
+}
+
+static int amilo_rfkill_remove(struct platform_device *device)
+{
+ rfkill_unregister(amilo_rfkill_dev);
+ rfkill_destroy(amilo_rfkill_dev);
+ return 0;
+}
+
+static struct platform_driver amilo_rfkill_driver = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = amilo_rfkill_probe,
+ .remove = amilo_rfkill_remove,
+};
+
+static int __init amilo_rfkill_init(void)
+{
+ int rc;
+
+ if (dmi_first_match(amilo_rfkill_id_table) == NULL)
+ return -ENODEV;
+
+ rc = platform_driver_register(&amilo_rfkill_driver);
+ if (rc)
+ return rc;
+
+ amilo_rfkill_pdev = platform_device_register_simple(KBUILD_MODNAME, -1,
+ NULL, 0);
+ if (IS_ERR(amilo_rfkill_pdev)) {
+ rc = PTR_ERR(amilo_rfkill_pdev);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ platform_driver_unregister(&amilo_rfkill_driver);
+ return rc;
+}
+
+static void __exit amilo_rfkill_exit(void)
+{
+ platform_device_unregister(amilo_rfkill_pdev);
+ platform_driver_unregister(&amilo_rfkill_driver);
+}
+
+MODULE_AUTHOR("Ben Hutchings <ben@decadent.org.uk>");
+MODULE_LICENSE("GPL");
+MODULE_DEVICE_TABLE(dmi, amilo_rfkill_id_table);
+
+module_init(amilo_rfkill_init);
+module_exit(amilo_rfkill_exit);
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index edaccad..b7944f9 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -1477,7 +1477,7 @@ static struct attribute *asus_attributes[] = {
NULL
};
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index d1049ee..72d731c 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -992,7 +992,7 @@ static struct attribute *hwmon_attributes[] = {
NULL
};
-static mode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1357,7 +1357,7 @@ static struct attribute *platform_attributes[] = {
NULL
};
-static mode_t asus_sysfs_is_visible(struct kobject *kobj,
+static umode_t asus_sysfs_is_visible(struct kobject *kobj,
struct attribute *attr, int idx)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index d9312b3..6f966d6 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1053,7 +1053,7 @@ static const struct file_operations disp_proc_fops = {
};
static int
-asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
+asus_proc_add(char *name, const struct file_operations *proc_fops, umode_t mode,
struct acpi_device *device)
{
struct proc_dir_entry *proc;
@@ -1072,7 +1072,7 @@ asus_proc_add(char *name, const struct file_operations *proc_fops, mode_t mode,
static int asus_hotk_add_fs(struct acpi_device *device)
{
struct proc_dir_entry *proc;
- mode_t mode;
+ umode_t mode;
if ((asus_uid == 0) && (asus_gid == 0)) {
mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 8877b83..d967344 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -189,7 +189,7 @@ struct compal_data{
/* =============== */
/* General globals */
/* =============== */
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
new file mode 100644
index 0000000..580d80a
--- /dev/null
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -0,0 +1,478 @@
+/*
+ * Copyright (C) 2006-2012 Robert Gerlach <khnz@gmx.de>
+ * Copyright (C) 2005-2006 Jan Rychter <jan@rychter.com>
+ *
+ * You can redistribute and/or modify this program under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
+ * Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/input.h>
+#include <linux/delay.h>
+#include <linux/dmi.h>
+
+#define MODULENAME "fujitsu-tablet"
+
+#define ACPI_FUJITSU_CLASS "fujitsu"
+
+#define INVERT_TABLET_MODE_BIT 0x01
+#define FORCE_TABLET_MODE_IF_UNDOCK 0x02
+
+#define KEYMAP_LEN 16
+
+static const struct acpi_device_id fujitsu_ids[] = {
+ { .id = "FUJ02BD" },
+ { .id = "FUJ02BF" },
+ { .id = "" }
+};
+
+struct fujitsu_config {
+ unsigned short keymap[KEYMAP_LEN];
+ unsigned int quirks;
+};
+
+static unsigned short keymap_Lifebook_Tseries[KEYMAP_LEN] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_SCROLLDOWN,
+ KEY_SCROLLUP,
+ KEY_DIRECTION,
+ KEY_LEFTCTRL,
+ KEY_BRIGHTNESSUP,
+ KEY_BRIGHTNESSDOWN,
+ KEY_BRIGHTNESS_ZERO,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_LEFTALT
+};
+
+static unsigned short keymap_Lifebook_U810[KEYMAP_LEN] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_PROG1,
+ KEY_PROG2,
+ KEY_DIRECTION,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_UP,
+ KEY_DOWN,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_LEFTCTRL,
+ KEY_LEFTALT
+};
+
+static unsigned short keymap_Stylistic_Tseries[KEYMAP_LEN] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_PRINT,
+ KEY_BACKSPACE,
+ KEY_SPACE,
+ KEY_ENTER,
+ KEY_BRIGHTNESSUP,
+ KEY_BRIGHTNESSDOWN,
+ KEY_DOWN,
+ KEY_UP,
+ KEY_SCROLLUP,
+ KEY_SCROLLDOWN,
+ KEY_LEFTCTRL,
+ KEY_LEFTALT
+};
+
+static unsigned short keymap_Stylistic_ST5xxx[KEYMAP_LEN] __initconst = {
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_RESERVED,
+ KEY_MAIL,
+ KEY_DIRECTION,
+ KEY_ESC,
+ KEY_ENTER,
+ KEY_BRIGHTNESSUP,
+ KEY_BRIGHTNESSDOWN,
+ KEY_DOWN,
+ KEY_UP,
+ KEY_SCROLLUP,
+ KEY_SCROLLDOWN,
+ KEY_LEFTCTRL,
+ KEY_LEFTALT
+};
+
+static struct {
+ struct input_dev *idev;
+ struct fujitsu_config config;
+ unsigned long prev_keymask;
+
+ char phys[21];
+
+ int irq;
+ int io_base;
+ int io_length;
+} fujitsu;
+
+static u8 fujitsu_ack(void)
+{
+ return inb(fujitsu.io_base + 2);
+}
+
+static u8 fujitsu_status(void)
+{
+ return inb(fujitsu.io_base + 6);
+}
+
+static u8 fujitsu_read_register(const u8 addr)
+{
+ outb(addr, fujitsu.io_base);
+ return inb(fujitsu.io_base + 4);
+}
+
+static void fujitsu_send_state(void)
+{
+ int state;
+ int dock, tablet_mode;
+
+ state = fujitsu_read_register(0xdd);
+
+ dock = state & 0x02;
+
+ if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
+ tablet_mode = 1;
+ } else{
+ tablet_mode = state & 0x01;
+ if (fujitsu.config.quirks & INVERT_TABLET_MODE_BIT)
+ tablet_mode = !tablet_mode;
+ }
+
+ input_report_switch(fujitsu.idev, SW_DOCK, dock);
+ input_report_switch(fujitsu.idev, SW_TABLET_MODE, tablet_mode);
+ input_sync(fujitsu.idev);
+}
+
+static void fujitsu_reset(void)
+{
+ int timeout = 50;
+
+ fujitsu_ack();
+
+ while ((fujitsu_status() & 0x02) && (--timeout))
+ msleep(20);
+
+ fujitsu_send_state();
+}
+
+static int __devinit input_fujitsu_setup(struct device *parent,
+ const char *name, const char *phys)
+{
+ struct input_dev *idev;
+ int error;
+ int i;
+
+ idev = input_allocate_device();
+ if (!idev)
+ return -ENOMEM;
+
+ idev->dev.parent = parent;
+ idev->phys = phys;
+ idev->name = name;
+ idev->id.bustype = BUS_HOST;
+ idev->id.vendor = 0x1734; /* Fujitsu Siemens Computer GmbH */
+ idev->id.product = 0x0001;
+ idev->id.version = 0x0101;
+
+ idev->keycode = fujitsu.config.keymap;
+ idev->keycodesize = sizeof(fujitsu.config.keymap[0]);
+ idev->keycodemax = ARRAY_SIZE(fujitsu.config.keymap);
+
+ __set_bit(EV_REP, idev->evbit);
+
+ for (i = 0; i < ARRAY_SIZE(fujitsu.config.keymap); i++)
+ if (fujitsu.config.keymap[i])
+ input_set_capability(idev, EV_KEY, fujitsu.config.keymap[i]);
+
+ input_set_capability(idev, EV_MSC, MSC_SCAN);
+
+ input_set_capability(idev, EV_SW, SW_DOCK);
+ input_set_capability(idev, EV_SW, SW_TABLET_MODE);
+
+ input_set_capability(idev, EV_SW, SW_DOCK);
+ input_set_capability(idev, EV_SW, SW_TABLET_MODE);
+
+ error = input_register_device(idev);
+ if (error) {
+ input_free_device(idev);
+ return error;
+ }
+
+ fujitsu.idev = idev;
+ return 0;
+}
+
+static void input_fujitsu_remove(void)
+{
+ input_unregister_device(fujitsu.idev);
+}
+
+static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
+{
+ unsigned long keymask, changed;
+ unsigned int keycode;
+ int pressed;
+ int i;
+
+ if (unlikely(!(fujitsu_status() & 0x01)))
+ return IRQ_NONE;
+
+ fujitsu_send_state();
+
+ keymask = fujitsu_read_register(0xde);
+ keymask |= fujitsu_read_register(0xdf) << 8;
+ keymask ^= 0xffff;
+
+ changed = keymask ^ fujitsu.prev_keymask;
+ if (changed) {
+ fujitsu.prev_keymask = keymask;
+
+ for_each_set_bit(i, &changed, KEYMAP_LEN) {
+ keycode = fujitsu.config.keymap[i];
+ pressed = keymask & changed & BIT(i);
+
+ if (pressed)
+ input_event(fujitsu.idev, EV_MSC, MSC_SCAN, i);
+
+ input_report_key(fujitsu.idev, keycode, pressed);
+ input_sync(fujitsu.idev);
+ }
+ }
+
+ fujitsu_ack();
+ return IRQ_HANDLED;
+}
+
+static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi)
+{
+ printk(KERN_INFO MODULENAME ": %s\n", dmi->ident);
+ memcpy(fujitsu.config.keymap, dmi->driver_data,
+ sizeof(fujitsu.config.keymap));
+ return 1;
+}
+
+static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
+{
+ fujitsu_dmi_default(dmi);
+ fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
+ fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
+ return 1;
+}
+
+static struct dmi_system_id dmi_ids[] __initconst = {
+ {
+ .callback = fujitsu_dmi_default,
+ .ident = "Fujitsu Siemens P/T Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK")
+ },
+ .driver_data = keymap_Lifebook_Tseries
+ },
+ {
+ .callback = fujitsu_dmi_default,
+ .ident = "Fujitsu Lifebook T Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook T")
+ },
+ .driver_data = keymap_Lifebook_Tseries
+ },
+ {
+ .callback = fujitsu_dmi_stylistic,
+ .ident = "Fujitsu Siemens Stylistic T Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic T")
+ },
+ .driver_data = keymap_Stylistic_Tseries
+ },
+ {
+ .callback = fujitsu_dmi_default,
+ .ident = "Fujitsu LifeBook U810",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook U810")
+ },
+ .driver_data = keymap_Lifebook_U810
+ },
+ {
+ .callback = fujitsu_dmi_stylistic,
+ .ident = "Fujitsu Siemens Stylistic ST5xxx Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "STYLISTIC ST5")
+ },
+ .driver_data = keymap_Stylistic_ST5xxx
+ },
+ {
+ .callback = fujitsu_dmi_stylistic,
+ .ident = "Fujitsu Siemens Stylistic ST5xxx Series",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Stylistic ST5")
+ },
+ .driver_data = keymap_Stylistic_ST5xxx
+ },
+ {
+ .callback = fujitsu_dmi_default,
+ .ident = "Unknown (using defaults)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, ""),
+ DMI_MATCH(DMI_PRODUCT_NAME, "")
+ },
+ .driver_data = keymap_Lifebook_Tseries
+ },
+ { NULL }
+};
+
+static acpi_status __devinit
+fujitsu_walk_resources(struct acpi_resource *res, void *data)
+{
+ switch (res->type) {
+ case ACPI_RESOURCE_TYPE_IRQ:
+ fujitsu.irq = res->data.irq.interrupts[0];
+ return AE_OK;
+
+ case ACPI_RESOURCE_TYPE_IO:
+ fujitsu.io_base = res->data.io.minimum;
+ fujitsu.io_length = res->data.io.address_length;
+ return AE_OK;
+
+ case ACPI_RESOURCE_TYPE_END_TAG:
+ if (fujitsu.irq && fujitsu.io_base)
+ return AE_OK;
+ else
+ return AE_NOT_FOUND;
+
+ default:
+ return AE_ERROR;
+ }
+}
+
+static int __devinit acpi_fujitsu_add(struct acpi_device *adev)
+{
+ acpi_status status;
+ int error;
+
+ if (!adev)
+ return -EINVAL;
+
+ status = acpi_walk_resources(adev->handle, METHOD_NAME__CRS,
+ fujitsu_walk_resources, NULL);
+ if (ACPI_FAILURE(status) || !fujitsu.irq || !fujitsu.io_base)
+ return -ENODEV;
+
+ sprintf(acpi_device_name(adev), "Fujitsu %s", acpi_device_hid(adev));
+ sprintf(acpi_device_class(adev), "%s", ACPI_FUJITSU_CLASS);
+
+ snprintf(fujitsu.phys, sizeof(fujitsu.phys),
+ "%s/input0", acpi_device_hid(adev));
+
+ error = input_fujitsu_setup(&adev->dev,
+ acpi_device_name(adev), fujitsu.phys);
+ if (error)
+ return error;
+
+ if (!request_region(fujitsu.io_base, fujitsu.io_length, MODULENAME)) {
+ input_fujitsu_remove();
+ return -EBUSY;
+ }
+
+ fujitsu_reset();
+
+ error = request_irq(fujitsu.irq, fujitsu_interrupt,
+ IRQF_SHARED, MODULENAME, fujitsu_interrupt);
+ if (error) {
+ release_region(fujitsu.io_base, fujitsu.io_length);
+ input_fujitsu_remove();
+ return error;
+ }
+
+ return 0;
+}
+
+static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type)
+{
+ free_irq(fujitsu.irq, fujitsu_interrupt);
+ release_region(fujitsu.io_base, fujitsu.io_length);
+ input_fujitsu_remove();
+ return 0;
+}
+
+static int acpi_fujitsu_resume(struct acpi_device *adev)
+{
+ fujitsu_reset();
+ return 0;
+}
+
+static struct acpi_driver acpi_fujitsu_driver = {
+ .name = MODULENAME,
+ .class = "hotkey",
+ .ids = fujitsu_ids,
+ .ops = {
+ .add = acpi_fujitsu_add,
+ .remove = acpi_fujitsu_remove,
+ .resume = acpi_fujitsu_resume,
+ }
+};
+
+static int __init fujitsu_module_init(void)
+{
+ int error;
+
+ dmi_check_system(dmi_ids);
+
+ error = acpi_bus_register_driver(&acpi_fujitsu_driver);
+ if (error)
+ return error;
+
+ return 0;
+}
+
+static void __exit fujitsu_module_exit(void)
+{
+ acpi_bus_unregister_driver(&acpi_fujitsu_driver);
+}
+
+module_init(fujitsu_module_init);
+module_exit(fujitsu_module_exit);
+
+MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
+MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("2.4");
+
+MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 811d436..7481146 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -28,12 +28,13 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/io.h>
-#include <linux/sysdev.h>
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/mutex.h>
#include <asm/bios_ebda.h>
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
@@ -84,19 +85,6 @@ static void __iomem *rtl_cmd_addr;
static u8 rtl_cmd_type;
static u8 rtl_cmd_width;
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
-{
- const volatile u32 __iomem *p = addr;
- u32 low, high;
-
- low = readl(p);
- high = readl(p + 1);
-
- return low + ((u64)high << 32);
-}
-#endif
-
static void __iomem *rtl_port_map(phys_addr_t addr, unsigned long len)
{
if (rtl_cmd_type == RTL_ADDR_TYPE_MMIO)
@@ -165,22 +153,22 @@ static int ibm_rtl_write(u8 value)
return ret;
}
-static ssize_t rtl_show_version(struct sysdev_class * dev,
- struct sysdev_class_attribute *attr,
+static ssize_t rtl_show_version(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", (int)ioread8(&rtl_table->version));
}
-static ssize_t rtl_show_state(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t rtl_show_state(struct device *dev,
+ struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", ioread8(&rtl_table->rt_status));
}
-static ssize_t rtl_set_state(struct sysdev_class *dev,
- struct sysdev_class_attribute *attr,
+static ssize_t rtl_set_state(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -205,27 +193,28 @@ static ssize_t rtl_set_state(struct sysdev_class *dev,
return ret;
}
-static struct sysdev_class class_rtl = {
+static struct bus_type rtl_subsys = {
.name = "ibm_rtl",
+ .dev_name = "ibm_rtl",
};
-static SYSDEV_CLASS_ATTR(version, S_IRUGO, rtl_show_version, NULL);
-static SYSDEV_CLASS_ATTR(state, 0600, rtl_show_state, rtl_set_state);
+static DEVICE_ATTR(version, S_IRUGO, rtl_show_version, NULL);
+static DEVICE_ATTR(state, 0600, rtl_show_state, rtl_set_state);
-static struct sysdev_class_attribute *rtl_attributes[] = {
- &attr_version,
- &attr_state,
+static struct device_attribute *rtl_attributes[] = {
+ &dev_attr_version,
+ &dev_attr_state,
NULL
};
static int rtl_setup_sysfs(void) {
int ret, i;
- ret = sysdev_class_register(&class_rtl);
+ ret = subsys_system_register(&rtl_subsys, NULL);
if (!ret) {
for (i = 0; rtl_attributes[i]; i ++)
- sysdev_class_create_file(&class_rtl, rtl_attributes[i]);
+ device_create_file(rtl_subsys.dev_root, rtl_attributes[i]);
}
return ret;
}
@@ -233,8 +222,8 @@ static int rtl_setup_sysfs(void) {
static void rtl_teardown_sysfs(void) {
int i;
for (i = 0; rtl_attributes[i]; i ++)
- sysdev_class_remove_file(&class_rtl, rtl_attributes[i]);
- sysdev_class_unregister(&class_rtl);
+ device_remove_file(rtl_subsys.dev_root, rtl_attributes[i]);
+ bus_unregister(&rtl_subsys);
}
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index a36addf..ac902f7 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -368,7 +368,7 @@ static struct attribute *ideapad_attributes[] = {
NULL
};
-static mode_t ideapad_is_visible(struct kobject *kobj,
+static umode_t ideapad_is_visible(struct kobject *kobj,
struct attribute *attr,
int idx)
{
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 809a3ae..88a98cf 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -77,6 +77,8 @@
#include <asm/processor.h>
#include "intel_ips.h"
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
#define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32
/*
@@ -344,19 +346,6 @@ struct ips_driver {
static bool
ips_gpu_turbo_enabled(struct ips_driver *ips);
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
-{
- const volatile u32 __iomem *p = addr;
- u32 low, high;
-
- low = readl(p);
- high = readl(p + 1);
-
- return low + ((u64)high << 32);
-}
-#endif
-
/**
* ips_cpu_busy - is CPU busy?
* @ips: IPS driver struct
diff --git a/drivers/platform/x86/intel_menlow.c b/drivers/platform/x86/intel_menlow.c
index abddc83..3271ac8 100644
--- a/drivers/platform/x86/intel_menlow.c
+++ b/drivers/platform/x86/intel_menlow.c
@@ -389,7 +389,7 @@ static ssize_t bios_enabled_show(struct device *dev,
return sprintf(buf, "%s\n", bios_enabled ? "enabled" : "disabled");
}
-static int intel_menlow_add_one_attribute(char *name, int mode, void *show,
+static int intel_menlow_add_one_attribute(char *name, umode_t mode, void *show,
void *store, struct device *dev,
acpi_handle handle)
{
diff --git a/drivers/platform/x86/intel_oaktrail.c b/drivers/platform/x86/intel_oaktrail.c
index 7f88c79..6ee0b5c 100644
--- a/drivers/platform/x86/intel_oaktrail.c
+++ b/drivers/platform/x86/intel_oaktrail.c
@@ -95,7 +95,7 @@
#define OT_EC_BL_CONTROL_ON_DATA 0x1A
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c
index 48870e5..f00d0d1 100644
--- a/drivers/platform/x86/intel_scu_ipc.c
+++ b/drivers/platform/x86/intel_scu_ipc.c
@@ -19,7 +19,7 @@
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/init.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/pm.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index f204643..bb51321 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -89,7 +89,7 @@ static int msi_laptop_resume(struct platform_device *device);
#define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force, "Force driver load, ignore DMI data");
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index 05be30e..ffff8b4 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -562,8 +562,8 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device)
num_sifr = acpi_pcc_get_sqty(device);
- if (num_sifr > 255) {
- ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr too large"));
+ if (num_sifr < 0 || num_sifr > 255) {
+ ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "num_sifr out of range"));
return -ENODEV;
}
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 09e26bf..fd73ea8 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -228,12 +228,12 @@ static struct platform_device *sdev;
static struct rfkill *rfk;
static bool has_stepping_quirk;
-static int force;
+static bool force;
module_param(force, bool, 0);
MODULE_PARM_DESC(force,
"Disable the DMI check and forces the driver to be loaded");
-static int debug;
+static bool debug;
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7b82868..ea0c607 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -297,7 +297,7 @@ struct ibm_init_struct {
char param[32];
int (*init) (struct ibm_init_struct *);
- mode_t base_procfs_mode;
+ umode_t base_procfs_mode;
struct ibm_struct *data;
};
@@ -378,13 +378,13 @@ static unsigned int bright_maxlvl; /* 0 = unknown */
#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
static int dbg_wlswemul;
-static int tpacpi_wlsw_emulstate;
+static bool tpacpi_wlsw_emulstate;
static int dbg_bluetoothemul;
-static int tpacpi_bluetooth_emulstate;
+static bool tpacpi_bluetooth_emulstate;
static int dbg_wwanemul;
-static int tpacpi_wwan_emulstate;
+static bool tpacpi_wwan_emulstate;
static int dbg_uwbemul;
-static int tpacpi_uwb_emulstate;
+static bool tpacpi_uwb_emulstate;
#endif
@@ -2456,8 +2456,9 @@ static int hotkey_kthread(void *data)
u32 poll_mask, event_mask;
unsigned int si, so;
unsigned long t;
- unsigned int change_detector, must_reset;
+ unsigned int change_detector;
unsigned int poll_freq;
+ bool was_frozen;
mutex_lock(&hotkey_thread_mutex);
@@ -2488,14 +2489,14 @@ static int hotkey_kthread(void *data)
t = 100; /* should never happen... */
}
t = msleep_interruptible(t);
- if (unlikely(kthread_should_stop()))
+ if (unlikely(kthread_freezable_should_stop(&was_frozen)))
break;
- must_reset = try_to_freeze();
- if (t > 0 && !must_reset)
+
+ if (t > 0 && !was_frozen)
continue;
mutex_lock(&hotkey_thread_data_mutex);
- if (must_reset || hotkey_config_change != change_detector) {
+ if (was_frozen || hotkey_config_change != change_detector) {
/* forget old state on thaw or config change */
si = so;
t = 0;
@@ -2528,10 +2529,6 @@ exit:
static void hotkey_poll_stop_sync(void)
{
if (tpacpi_hotkey_task) {
- if (frozen(tpacpi_hotkey_task) ||
- freezing(tpacpi_hotkey_task))
- thaw_process(tpacpi_hotkey_task);
-
kthread_stop(tpacpi_hotkey_task);
tpacpi_hotkey_task = NULL;
mutex_lock(&hotkey_thread_mutex);
@@ -6447,7 +6444,7 @@ static struct ibm_struct brightness_driver_data = {
static int alsa_index = ~((1 << (SNDRV_CARDS - 3)) - 1); /* last three slots */
static char *alsa_id = "ThinkPadEC";
-static int alsa_enable = SNDRV_DEFAULT_ENABLE1;
+static bool alsa_enable = SNDRV_DEFAULT_ENABLE1;
struct tpacpi_alsa_data {
struct snd_card *card;
@@ -6490,7 +6487,7 @@ static enum tpacpi_volume_access_mode volume_mode =
TPACPI_VOL_MODE_MAX;
static enum tpacpi_volume_capabilities volume_capabilities;
-static int volume_control_allowed;
+static bool volume_control_allowed;
/*
* Used to syncronize writers to TP_EC_AUDIO and
@@ -7268,7 +7265,7 @@ enum fan_control_commands {
* and also watchdog cmd */
};
-static int fan_control_allowed;
+static bool fan_control_allowed;
static enum fan_status_access_mode fan_status_access_mode;
static enum fan_control_access_mode fan_control_access_mode;
@@ -8440,7 +8437,7 @@ static struct proc_dir_entry *proc_dir;
* Module and infrastructure proble, init and exit handling
*/
-static int force_load;
+static bool force_load;
#ifdef CONFIG_THINKPAD_ACPI_DEBUG
static const char * __init str_supported(int is_supported)
@@ -8542,7 +8539,7 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
"%s installed\n", ibm->name);
if (ibm->read) {
- mode_t mode = iibm->base_procfs_mode;
+ umode_t mode = iibm->base_procfs_mode;
if (!mode)
mode = S_IRUGO;
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index a134c26..42a4dcc 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -82,12 +82,12 @@ struct wmi_block {
#define ACPI_WMI_STRING 0x4 /* GUID takes & returns a string */
#define ACPI_WMI_EVENT 0x8 /* GUID is an event */
-static int debug_event;
+static bool debug_event;
module_param(debug_event, bool, 0444);
MODULE_PARM_DESC(debug_event,
"Log WMI Events [0/1]");
-static int debug_dump_wdg;
+static bool debug_dump_wdg;
module_param(debug_dump_wdg, bool, 0444);
MODULE_PARM_DESC(debug_dump_wdg,
"Dump available WMI interfaces [0/1]");
diff --git a/drivers/pnp/quirks.c b/drivers/pnp/quirks.c
index dfbd5a6..258fef2 100644
--- a/drivers/pnp/quirks.c
+++ b/drivers/pnp/quirks.c
@@ -295,6 +295,45 @@ static void quirk_system_pci_resources(struct pnp_dev *dev)
}
}
+#ifdef CONFIG_AMD_NB
+
+#include <asm/amd_nb.h>
+
+static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
+{
+ resource_size_t start, end;
+ struct pnp_resource *pnp_res;
+ struct resource *res;
+ struct resource mmconfig_res, *mmconfig;
+
+ mmconfig = amd_get_mmconfig_range(&mmconfig_res);
+ if (!mmconfig)
+ return;
+
+ list_for_each_entry(pnp_res, &dev->resources, list) {
+ res = &pnp_res->res;
+ if (res->end < mmconfig->start || res->start > mmconfig->end ||
+ (res->start == mmconfig->start && res->end == mmconfig->end))
+ continue;
+
+ dev_info(&dev->dev, FW_BUG
+ "%pR covers only part of AMD MMCONFIG area %pR; adding more reservations\n",
+ res, mmconfig);
+ if (mmconfig->start < res->start) {
+ start = mmconfig->start;
+ end = res->start - 1;
+ pnp_add_mem_resource(dev, start, end, 0);
+ }
+ if (mmconfig->end > res->end) {
+ start = res->end + 1;
+ end = mmconfig->end;
+ pnp_add_mem_resource(dev, start, end, 0);
+ }
+ break;
+ }
+}
+#endif
+
/*
* PnP Quirks
* Cards or devices that need some tweaking due to incomplete resource info
@@ -322,6 +361,9 @@ static struct pnp_fixup pnp_fixups[] = {
/* PnP resources that might overlap PCI BARs */
{"PNP0c01", quirk_system_pci_resources},
{"PNP0c02", quirk_system_pci_resources},
+#ifdef CONFIG_AMD_NB
+ {"PNP0c01", quirk_amd_mmconfig_area},
+#endif
{""}
};
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 9f88641..3a8daf8 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -116,12 +116,12 @@ config BATTERY_WM97XX
help
Say Y to enable support for battery measured by WM97xx aux port.
-config BATTERY_BQ20Z75
- tristate "TI BQ20z75 gas gauge"
+config BATTERY_SBS
+ tristate "SBS Compliant gas gauge"
depends on I2C
help
- Say Y to include support for TI BQ20z75 SBS-compliant
- gas gauge and protection IC.
+ Say Y to include support for SBS battery driver for SBS-compliant
+ gas gauges.
config BATTERY_BQ27x00
tristate "BQ27x00 battery driver"
@@ -150,6 +150,14 @@ config BATTERY_DA9030
Say Y here to enable support for batteries charger integrated into
DA9030 PMIC.
+config BATTERY_DA9052
+ tristate "Dialog DA9052 Battery"
+ depends on PMIC_DA9052
+ depends on BROKEN
+ help
+ Say Y here to enable support for batteries charger integrated into
+ DA9052 PMIC.
+
config BATTERY_MAX17040
tristate "Maxim MAX17040 Fuel Gauge"
depends on I2C
@@ -226,6 +234,12 @@ config CHARGER_TWL4030
help
Say Y here to enable support for TWL4030 Battery Charge Interface.
+config CHARGER_LP8727
+ tristate "National Semiconductor LP8727 charger driver"
+ depends on I2C
+ help
+ Say Y here to enable support for LP8727 Charger Driver.
+
config CHARGER_GPIO
tristate "GPIO charger"
depends on GPIOLIB
@@ -236,6 +250,16 @@ config CHARGER_GPIO
This driver can be build as a module. If so, the module will be
called gpio-charger.
+config CHARGER_MANAGER
+ bool "Battery charger manager for multiple chargers"
+ depends on REGULATOR && RTC_CLASS
+ help
+ Say Y to enable charger-manager support, which allows multiple
+ chargers attached to a battery and multiple batteries attached to a
+ system. The charger-manager also can monitor charging status in
+ runtime and in suspend-to-RAM by waking up the system periodically
+ with help of suspend_again support.
+
config CHARGER_MAX8997
tristate "Maxim MAX8997/MAX8966 PMIC battery charger driver"
depends on MFD_MAX8997 && REGULATOR_MAX8997
diff --git a/drivers/power/Makefile b/drivers/power/Makefile
index b4af13d..e429008 100644
--- a/drivers/power/Makefile
+++ b/drivers/power/Makefile
@@ -22,9 +22,10 @@ obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o
obj-$(CONFIG_BATTERY_TOSA) += tosa_battery.o
obj-$(CONFIG_BATTERY_COLLIE) += collie_battery.o
obj-$(CONFIG_BATTERY_WM97XX) += wm97xx_battery.o
-obj-$(CONFIG_BATTERY_BQ20Z75) += bq20z75.o
+obj-$(CONFIG_BATTERY_SBS) += sbs-battery.o
obj-$(CONFIG_BATTERY_BQ27x00) += bq27x00_battery.o
obj-$(CONFIG_BATTERY_DA9030) += da9030_battery.o
+obj-$(CONFIG_BATTERY_DA9052) += da9052-battery.o
obj-$(CONFIG_BATTERY_MAX17040) += max17040_battery.o
obj-$(CONFIG_BATTERY_MAX17042) += max17042_battery.o
obj-$(CONFIG_BATTERY_Z2) += z2_battery.o
@@ -35,6 +36,8 @@ obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o
obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o
obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o
obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o
+obj-$(CONFIG_CHARGER_LP8727) += lp8727_charger.o
obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o
+obj-$(CONFIG_CHARGER_MANAGER) += charger-manager.o
obj-$(CONFIG_CHARGER_MAX8997) += max8997_charger.o
obj-$(CONFIG_CHARGER_MAX8998) += max8998_charger.o
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index bb16f5b..1ed6ea0 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -54,12 +54,17 @@
#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
#define BQ27000_REG_ILMD 0x76 /* Initial last measured discharge */
-#define BQ27000_FLAG_CHGS BIT(7)
+#define BQ27000_FLAG_EDVF BIT(0) /* Final End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_EDV1 BIT(1) /* First End-of-Discharge-Voltage flag */
+#define BQ27000_FLAG_CI BIT(4) /* Capacity Inaccurate flag */
#define BQ27000_FLAG_FC BIT(5)
+#define BQ27000_FLAG_CHGS BIT(7) /* Charge state flag */
#define BQ27500_REG_SOC 0x2C
#define BQ27500_REG_DCAP 0x3C /* Design capacity */
#define BQ27500_FLAG_DSC BIT(0)
+#define BQ27500_FLAG_SOCF BIT(1) /* State-of-Charge threshold final */
+#define BQ27500_FLAG_SOC1 BIT(2) /* State-of-Charge threshold 1 */
#define BQ27500_FLAG_FC BIT(9)
#define BQ27000_RS 20 /* Resistor sense */
@@ -79,9 +84,8 @@ struct bq27x00_reg_cache {
int charge_full;
int cycle_count;
int capacity;
+ int energy;
int flags;
-
- int current_now;
};
struct bq27x00_device_info {
@@ -108,6 +112,7 @@ static enum power_supply_property bq27x00_battery_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_LEVEL,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
@@ -149,7 +154,7 @@ static int bq27x00_battery_read_rsoc(struct bq27x00_device_info *di)
rsoc = bq27x00_read(di, BQ27000_REG_RSOC, true);
if (rsoc < 0)
- dev_err(di->dev, "error reading relative State-of-Charge\n");
+ dev_dbg(di->dev, "error reading relative State-of-Charge\n");
return rsoc;
}
@@ -164,7 +169,8 @@ static int bq27x00_battery_read_charge(struct bq27x00_device_info *di, u8 reg)
charge = bq27x00_read(di, reg, false);
if (charge < 0) {
- dev_err(di->dev, "error reading nominal available capacity\n");
+ dev_dbg(di->dev, "error reading charge register %02x: %d\n",
+ reg, charge);
return charge;
}
@@ -208,7 +214,7 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
ilmd = bq27x00_read(di, BQ27000_REG_ILMD, true);
if (ilmd < 0) {
- dev_err(di->dev, "error reading initial last measured discharge\n");
+ dev_dbg(di->dev, "error reading initial last measured discharge\n");
return ilmd;
}
@@ -221,6 +227,50 @@ static int bq27x00_battery_read_ilmd(struct bq27x00_device_info *di)
}
/*
+ * Return the battery Available energy in µWh
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_energy(struct bq27x00_device_info *di)
+{
+ int ae;
+
+ ae = bq27x00_read(di, BQ27x00_REG_AE, false);
+ if (ae < 0) {
+ dev_dbg(di->dev, "error reading available energy\n");
+ return ae;
+ }
+
+ if (di->chip == BQ27500)
+ ae *= 1000;
+ else
+ ae = ae * 29200 / BQ27000_RS;
+
+ return ae;
+}
+
+/*
+ * Return the battery temperature in tenths of degree Celsius
+ * Or < 0 if something fails.
+ */
+static int bq27x00_battery_read_temperature(struct bq27x00_device_info *di)
+{
+ int temp;
+
+ temp = bq27x00_read(di, BQ27x00_REG_TEMP, false);
+ if (temp < 0) {
+ dev_err(di->dev, "error reading temperature\n");
+ return temp;
+ }
+
+ if (di->chip == BQ27500)
+ temp -= 2731;
+ else
+ temp = ((temp * 5) - 5463) / 2;
+
+ return temp;
+}
+
+/*
* Return the battery Cycle count total
* Or < 0 if something fails.
*/
@@ -245,7 +295,8 @@ static int bq27x00_battery_read_time(struct bq27x00_device_info *di, u8 reg)
tval = bq27x00_read(di, reg, false);
if (tval < 0) {
- dev_err(di->dev, "error reading register %02x: %d\n", reg, tval);
+ dev_dbg(di->dev, "error reading time register %02x: %d\n",
+ reg, tval);
return tval;
}
@@ -260,27 +311,33 @@ static void bq27x00_update(struct bq27x00_device_info *di)
struct bq27x00_reg_cache cache = {0, };
bool is_bq27500 = di->chip == BQ27500;
- cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, is_bq27500);
+ cache.flags = bq27x00_read(di, BQ27x00_REG_FLAGS, !is_bq27500);
if (cache.flags >= 0) {
- cache.capacity = bq27x00_battery_read_rsoc(di);
- cache.temperature = bq27x00_read(di, BQ27x00_REG_TEMP, false);
- cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
- cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
- cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
- cache.charge_full = bq27x00_battery_read_lmd(di);
+ if (!is_bq27500 && (cache.flags & BQ27000_FLAG_CI)) {
+ dev_info(di->dev, "battery is not calibrated! ignoring capacity values\n");
+ cache.capacity = -ENODATA;
+ cache.energy = -ENODATA;
+ cache.time_to_empty = -ENODATA;
+ cache.time_to_empty_avg = -ENODATA;
+ cache.time_to_full = -ENODATA;
+ cache.charge_full = -ENODATA;
+ } else {
+ cache.capacity = bq27x00_battery_read_rsoc(di);
+ cache.energy = bq27x00_battery_read_energy(di);
+ cache.time_to_empty = bq27x00_battery_read_time(di, BQ27x00_REG_TTE);
+ cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP);
+ cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF);
+ cache.charge_full = bq27x00_battery_read_lmd(di);
+ }
+ cache.temperature = bq27x00_battery_read_temperature(di);
cache.cycle_count = bq27x00_battery_read_cyct(di);
- if (!is_bq27500)
- cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false);
-
/* We only have to read charge design full once */
if (di->charge_design_full <= 0)
di->charge_design_full = bq27x00_battery_read_ilmd(di);
}
- /* Ignore current_now which is a snapshot of the current battery state
- * and is likely to be different even between two consecutive reads */
- if (memcmp(&di->cache, &cache, sizeof(cache) - sizeof(int)) != 0) {
+ if (memcmp(&di->cache, &cache, sizeof(cache)) != 0) {
di->cache = cache;
power_supply_changed(&di->bat);
}
@@ -302,25 +359,6 @@ static void bq27x00_battery_poll(struct work_struct *work)
}
}
-
-/*
- * Return the battery temperature in tenths of degree Celsius
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_temperature(struct bq27x00_device_info *di,
- union power_supply_propval *val)
-{
- if (di->cache.temperature < 0)
- return di->cache.temperature;
-
- if (di->chip == BQ27500)
- val->intval = di->cache.temperature - 2731;
- else
- val->intval = ((di->cache.temperature * 5) - 5463) / 2;
-
- return 0;
-}
-
/*
* Return the battery average current in µA
* Note that current can be negative signed as well
@@ -330,20 +368,20 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
int curr;
+ int flags;
- if (di->chip == BQ27500)
- curr = bq27x00_read(di, BQ27x00_REG_AI, false);
- else
- curr = di->cache.current_now;
-
- if (curr < 0)
+ curr = bq27x00_read(di, BQ27x00_REG_AI, false);
+ if (curr < 0) {
+ dev_err(di->dev, "error reading current\n");
return curr;
+ }
if (di->chip == BQ27500) {
/* bq27500 returns signed value */
val->intval = (int)((s16)curr) * 1000;
} else {
- if (di->cache.flags & BQ27000_FLAG_CHGS) {
+ flags = bq27x00_read(di, BQ27x00_REG_FLAGS, false);
+ if (flags & BQ27000_FLAG_CHGS) {
dev_dbg(di->dev, "negative current!\n");
curr = -curr;
}
@@ -382,50 +420,56 @@ static int bq27x00_battery_status(struct bq27x00_device_info *di,
return 0;
}
-/*
- * Return the battery Voltage in milivolts
- * Or < 0 if something fails.
- */
-static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
+static int bq27x00_battery_capacity_level(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
- int volt;
+ int level;
- volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
- if (volt < 0)
- return volt;
+ if (di->chip == BQ27500) {
+ if (di->cache.flags & BQ27500_FLAG_FC)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (di->cache.flags & BQ27500_FLAG_SOC1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (di->cache.flags & BQ27500_FLAG_SOCF)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else
+ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ } else {
+ if (di->cache.flags & BQ27000_FLAG_FC)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ else if (di->cache.flags & BQ27000_FLAG_EDV1)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_LOW;
+ else if (di->cache.flags & BQ27000_FLAG_EDVF)
+ level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
+ else
+ level = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
+ }
- val->intval = volt * 1000;
+ val->intval = level;
return 0;
}
/*
- * Return the battery Available energy in µWh
+ * Return the battery Voltage in milivolts
* Or < 0 if something fails.
*/
-static int bq27x00_battery_energy(struct bq27x00_device_info *di,
+static int bq27x00_battery_voltage(struct bq27x00_device_info *di,
union power_supply_propval *val)
{
- int ae;
+ int volt;
- ae = bq27x00_read(di, BQ27x00_REG_AE, false);
- if (ae < 0) {
- dev_err(di->dev, "error reading available energy\n");
- return ae;
+ volt = bq27x00_read(di, BQ27x00_REG_VOLT, false);
+ if (volt < 0) {
+ dev_err(di->dev, "error reading voltage\n");
+ return volt;
}
- if (di->chip == BQ27500)
- ae *= 1000;
- else
- ae = ae * 29200 / BQ27000_RS;
-
- val->intval = ae;
+ val->intval = volt * 1000;
return 0;
}
-
static int bq27x00_simple_value(int value,
union power_supply_propval *val)
{
@@ -473,8 +517,11 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
ret = bq27x00_simple_value(di->cache.capacity, val);
break;
+ case POWER_SUPPLY_PROP_CAPACITY_LEVEL:
+ ret = bq27x00_battery_capacity_level(di, val);
+ break;
case POWER_SUPPLY_PROP_TEMP:
- ret = bq27x00_battery_temperature(di, val);
+ ret = bq27x00_simple_value(di->cache.temperature, val);
break;
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
ret = bq27x00_simple_value(di->cache.time_to_empty, val);
@@ -501,7 +548,7 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
ret = bq27x00_simple_value(di->cache.cycle_count, val);
break;
case POWER_SUPPLY_PROP_ENERGY_NOW:
- ret = bq27x00_battery_energy(di, val);
+ ret = bq27x00_simple_value(di->cache.energy, val);
break;
default:
return -EINVAL;
@@ -546,6 +593,14 @@ static int bq27x00_powersupply_init(struct bq27x00_device_info *di)
static void bq27x00_powersupply_unregister(struct bq27x00_device_info *di)
{
+ /*
+ * power_supply_unregister call bq27x00_battery_get_property which
+ * call bq27x00_battery_poll.
+ * Make sure that bq27x00_battery_poll will not call
+ * schedule_delayed_work again after unregister (which cause OOPS).
+ */
+ poll_interval = 0;
+
cancel_delayed_work_sync(&di->work);
power_supply_unregister(&di->bat);
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
new file mode 100644
index 0000000..88fd971
--- /dev/null
+++ b/drivers/power/charger-manager.c
@@ -0,0 +1,1072 @@
+/*
+ * Copyright (C) 2011 Samsung Electronics Co., Ltd.
+ * MyungJoo Ham <myungjoo.ham@samsung.com>
+ *
+ * This driver enables to monitor battery health and control charger
+ * during suspend-to-mem.
+ * Charger manager depends on other devices. register this later than
+ * the depending devices.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+**/
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/rtc.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+#include <linux/power/charger-manager.h>
+#include <linux/regulator/consumer.h>
+
+/*
+ * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
+ * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
+ * without any delays.
+ */
+#define CM_JIFFIES_SMALL (2)
+
+/* If y is valid (> 0) and smaller than x, do x = y */
+#define CM_MIN_VALID(x, y) x = (((y > 0) && ((x) > (y))) ? (y) : (x))
+
+/*
+ * Regard CM_RTC_SMALL (sec) is small enough to ignore error in invoking
+ * rtc alarm. It should be 2 or larger
+ */
+#define CM_RTC_SMALL (2)
+
+#define UEVENT_BUF_SIZE 32
+
+static LIST_HEAD(cm_list);
+static DEFINE_MUTEX(cm_list_mtx);
+
+/* About in-suspend (suspend-again) monitoring */
+static struct rtc_device *rtc_dev;
+/*
+ * Backup RTC alarm
+ * Save the wakeup alarm before entering suspend-to-RAM
+ */
+static struct rtc_wkalrm rtc_wkalarm_save;
+/* Backup RTC alarm time in terms of seconds since 01-01-1970 00:00:00 */
+static unsigned long rtc_wkalarm_save_time;
+static bool cm_suspended;
+static bool cm_rtc_set;
+static unsigned long cm_suspend_duration_ms;
+
+/* Global charger-manager description */
+static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
+
+/**
+ * is_batt_present - See if the battery presents in place.
+ * @cm: the Charger Manager representing the battery.
+ */
+static bool is_batt_present(struct charger_manager *cm)
+{
+ union power_supply_propval val;
+ bool present = false;
+ int i, ret;
+
+ switch (cm->desc->battery_present) {
+ case CM_FUEL_GAUGE:
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_PRESENT, &val);
+ if (ret == 0 && val.intval)
+ present = true;
+ break;
+ case CM_CHARGER_STAT:
+ for (i = 0; cm->charger_stat[i]; i++) {
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_PRESENT, &val);
+ if (ret == 0 && val.intval) {
+ present = true;
+ break;
+ }
+ }
+ break;
+ }
+
+ return present;
+}
+
+/**
+ * is_ext_pwr_online - See if an external power source is attached to charge
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Returns true if at least one of the chargers of the battery has an external
+ * power source attached to charge the battery regardless of whether it is
+ * actually charging or not.
+ */
+static bool is_ext_pwr_online(struct charger_manager *cm)
+{
+ union power_supply_propval val;
+ bool online = false;
+ int i, ret;
+
+ /* If at least one of them has one, it's yes. */
+ for (i = 0; cm->charger_stat[i]; i++) {
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret == 0 && val.intval) {
+ online = true;
+ break;
+ }
+ }
+
+ return online;
+}
+
+/**
+ * get_batt_uV - Get the voltage level of the battery
+ * @cm: the Charger Manager representing the battery.
+ * @uV: the voltage level returned.
+ *
+ * Returns 0 if there is no error.
+ * Returns a negative value on error.
+ */
+static int get_batt_uV(struct charger_manager *cm, int *uV)
+{
+ union power_supply_propval val;
+ int ret;
+
+ if (cm->fuel_gauge)
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
+ else
+ return -ENODEV;
+
+ if (ret)
+ return ret;
+
+ *uV = val.intval;
+ return 0;
+}
+
+/**
+ * is_charging - Returns true if the battery is being charged.
+ * @cm: the Charger Manager representing the battery.
+ */
+static bool is_charging(struct charger_manager *cm)
+{
+ int i, ret;
+ bool charging = false;
+ union power_supply_propval val;
+
+ /* If there is no battery, it cannot be charged */
+ if (!is_batt_present(cm))
+ return false;
+
+ /* If at least one of the charger is charging, return yes */
+ for (i = 0; cm->charger_stat[i]; i++) {
+ /* 1. The charger sholuld not be DISABLED */
+ if (cm->emergency_stop)
+ continue;
+ if (!cm->charger_enabled)
+ continue;
+
+ /* 2. The charger should be online (ext-power) */
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_ONLINE, &val);
+ if (ret) {
+ dev_warn(cm->dev, "Cannot read ONLINE value from %s.\n",
+ cm->desc->psy_charger_stat[i]);
+ continue;
+ }
+ if (val.intval == 0)
+ continue;
+
+ /*
+ * 3. The charger should not be FULL, DISCHARGING,
+ * or NOT_CHARGING.
+ */
+ ret = cm->charger_stat[i]->get_property(
+ cm->charger_stat[i],
+ POWER_SUPPLY_PROP_STATUS, &val);
+ if (ret) {
+ dev_warn(cm->dev, "Cannot read STATUS value from %s.\n",
+ cm->desc->psy_charger_stat[i]);
+ continue;
+ }
+ if (val.intval == POWER_SUPPLY_STATUS_FULL ||
+ val.intval == POWER_SUPPLY_STATUS_DISCHARGING ||
+ val.intval == POWER_SUPPLY_STATUS_NOT_CHARGING)
+ continue;
+
+ /* Then, this is charging. */
+ charging = true;
+ break;
+ }
+
+ return charging;
+}
+
+/**
+ * is_polling_required - Return true if need to continue polling for this CM.
+ * @cm: the Charger Manager representing the battery.
+ */
+static bool is_polling_required(struct charger_manager *cm)
+{
+ switch (cm->desc->polling_mode) {
+ case CM_POLL_DISABLE:
+ return false;
+ case CM_POLL_ALWAYS:
+ return true;
+ case CM_POLL_EXTERNAL_POWER_ONLY:
+ return is_ext_pwr_online(cm);
+ case CM_POLL_CHARGING_ONLY:
+ return is_charging(cm);
+ default:
+ dev_warn(cm->dev, "Incorrect polling_mode (%d)\n",
+ cm->desc->polling_mode);
+ }
+
+ return false;
+}
+
+/**
+ * try_charger_enable - Enable/Disable chargers altogether
+ * @cm: the Charger Manager representing the battery.
+ * @enable: true: enable / false: disable
+ *
+ * Note that Charger Manager keeps the charger enabled regardless whether
+ * the charger is charging or not (because battery is full or no external
+ * power source exists) except when CM needs to disable chargers forcibly
+ * bacause of emergency causes; when the battery is overheated or too cold.
+ */
+static int try_charger_enable(struct charger_manager *cm, bool enable)
+{
+ int err = 0, i;
+ struct charger_desc *desc = cm->desc;
+
+ /* Ignore if it's redundent command */
+ if (enable && cm->charger_enabled)
+ return 0;
+ if (!enable && !cm->charger_enabled)
+ return 0;
+
+ if (enable) {
+ if (cm->emergency_stop)
+ return -EAGAIN;
+ err = regulator_bulk_enable(desc->num_charger_regulators,
+ desc->charger_regulators);
+ } else {
+ /*
+ * Abnormal battery state - Stop charging forcibly,
+ * even if charger was enabled at the other places
+ */
+ err = regulator_bulk_disable(desc->num_charger_regulators,
+ desc->charger_regulators);
+
+ for (i = 0; i < desc->num_charger_regulators; i++) {
+ if (regulator_is_enabled(
+ desc->charger_regulators[i].consumer)) {
+ regulator_force_disable(
+ desc->charger_regulators[i].consumer);
+ dev_warn(cm->dev,
+ "Disable regulator(%s) forcibly.\n",
+ desc->charger_regulators[i].supply);
+ }
+ }
+ }
+
+ if (!err)
+ cm->charger_enabled = enable;
+
+ return err;
+}
+
+/**
+ * uevent_notify - Let users know something has changed.
+ * @cm: the Charger Manager representing the battery.
+ * @event: the event string.
+ *
+ * If @event is null, it implies that uevent_notify is called
+ * by resume function. When called in the resume function, cm_suspended
+ * should be already reset to false in order to let uevent_notify
+ * notify the recent event during the suspend to users. While
+ * suspended, uevent_notify does not notify users, but tracks
+ * events so that uevent_notify can notify users later after resumed.
+ */
+static void uevent_notify(struct charger_manager *cm, const char *event)
+{
+ static char env_str[UEVENT_BUF_SIZE + 1] = "";
+ static char env_str_save[UEVENT_BUF_SIZE + 1] = "";
+
+ if (cm_suspended) {
+ /* Nothing in suspended-event buffer */
+ if (env_str_save[0] == 0) {
+ if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
+ return; /* status not changed */
+ strncpy(env_str_save, event, UEVENT_BUF_SIZE);
+ return;
+ }
+
+ if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE))
+ return; /* Duplicated. */
+ else
+ strncpy(env_str_save, event, UEVENT_BUF_SIZE);
+
+ return;
+ }
+
+ if (event == NULL) {
+ /* No messages pending */
+ if (!env_str_save[0])
+ return;
+
+ strncpy(env_str, env_str_save, UEVENT_BUF_SIZE);
+ kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
+ env_str_save[0] = 0;
+
+ return;
+ }
+
+ /* status not changed */
+ if (!strncmp(env_str, event, UEVENT_BUF_SIZE))
+ return;
+
+ /* save the status and notify the update */
+ strncpy(env_str, event, UEVENT_BUF_SIZE);
+ kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE);
+
+ dev_info(cm->dev, event);
+}
+
+/**
+ * _cm_monitor - Monitor the temperature and return true for exceptions.
+ * @cm: the Charger Manager representing the battery.
+ *
+ * Returns true if there is an event to notify for the battery.
+ * (True if the status of "emergency_stop" changes)
+ */
+static bool _cm_monitor(struct charger_manager *cm)
+{
+ struct charger_desc *desc = cm->desc;
+ int temp = desc->temperature_out_of_range(&cm->last_temp_mC);
+
+ dev_dbg(cm->dev, "monitoring (%2.2d.%3.3dC)\n",
+ cm->last_temp_mC / 1000, cm->last_temp_mC % 1000);
+
+ /* It has been stopped or charging already */
+ if (!!temp == !!cm->emergency_stop)
+ return false;
+
+ if (temp) {
+ cm->emergency_stop = temp;
+ if (!try_charger_enable(cm, false)) {
+ if (temp > 0)
+ uevent_notify(cm, "OVERHEAT");
+ else
+ uevent_notify(cm, "COLD");
+ }
+ } else {
+ cm->emergency_stop = 0;
+ if (!try_charger_enable(cm, true))
+ uevent_notify(cm, "CHARGING");
+ }
+
+ return true;
+}
+
+/**
+ * cm_monitor - Monitor every battery.
+ *
+ * Returns true if there is an event to notify from any of the batteries.
+ * (True if the status of "emergency_stop" changes)
+ */
+static bool cm_monitor(void)
+{
+ bool stop = false;
+ struct charger_manager *cm;
+
+ mutex_lock(&cm_list_mtx);
+
+ list_for_each_entry(cm, &cm_list, entry)
+ stop = stop || _cm_monitor(cm);
+
+ mutex_unlock(&cm_list_mtx);
+
+ return stop;
+}
+
+static int charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct charger_manager *cm = container_of(psy,
+ struct charger_manager, charger_psy);
+ struct charger_desc *desc = cm->desc;
+ int i, ret = 0, uV;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (is_charging(cm))
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ else if (is_ext_pwr_online(cm))
+ val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ else
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ if (cm->emergency_stop > 0)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else if (cm->emergency_stop < 0)
+ val->intval = POWER_SUPPLY_HEALTH_COLD;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ if (is_batt_present(cm))
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ ret = get_batt_uV(cm, &i);
+ val->intval = i;
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CURRENT_NOW, val);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ /* in thenth of centigrade */
+ if (cm->last_temp_mC == INT_MIN)
+ desc->temperature_out_of_range(&cm->last_temp_mC);
+ val->intval = cm->last_temp_mC / 100;
+ if (!desc->measure_battery_temp)
+ ret = -ENODEV;
+ break;
+ case POWER_SUPPLY_PROP_TEMP_AMBIENT:
+ /* in thenth of centigrade */
+ if (cm->last_temp_mC == INT_MIN)
+ desc->temperature_out_of_range(&cm->last_temp_mC);
+ val->intval = cm->last_temp_mC / 100;
+ if (desc->measure_battery_temp)
+ ret = -ENODEV;
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (!cm->fuel_gauge) {
+ ret = -ENODEV;
+ break;
+ }
+
+ if (!is_batt_present(cm)) {
+ /* There is no battery. Assume 100% */
+ val->intval = 100;
+ break;
+ }
+
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CAPACITY, val);
+ if (ret)
+ break;
+
+ if (val->intval > 100) {
+ val->intval = 100;
+ break;
+ }
+ if (val->intval < 0)
+ val->intval = 0;
+
+ /* Do not adjust SOC when charging: voltage is overrated */
+ if (is_charging(cm))
+ break;
+
+ /*
+ * If the capacity value is inconsistent, calibrate it base on
+ * the battery voltage values and the thresholds given as desc
+ */
+ ret = get_batt_uV(cm, &uV);
+ if (ret) {
+ /* Voltage information not available. No calibration */
+ ret = 0;
+ break;
+ }
+
+ if (desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV &&
+ !is_charging(cm)) {
+ val->intval = 100;
+ break;
+ }
+
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ if (is_ext_pwr_online(cm))
+ val->intval = 1;
+ else
+ val->intval = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ if (cm->fuel_gauge) {
+ if (cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CHARGE_FULL, val) == 0)
+ break;
+ }
+
+ if (is_ext_pwr_online(cm)) {
+ /* Not full if it's charging. */
+ if (is_charging(cm)) {
+ val->intval = 0;
+ break;
+ }
+ /*
+ * Full if it's powered but not charging andi
+ * not forced stop by emergency
+ */
+ if (!cm->emergency_stop) {
+ val->intval = 1;
+ break;
+ }
+ }
+
+ /* Full if it's over the fullbatt voltage */
+ ret = get_batt_uV(cm, &uV);
+ if (!ret && desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV &&
+ !is_charging(cm)) {
+ val->intval = 1;
+ break;
+ }
+
+ /* Full if the cap is 100 */
+ if (cm->fuel_gauge) {
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CAPACITY, val);
+ if (!ret && val->intval >= 100 && !is_charging(cm)) {
+ val->intval = 1;
+ break;
+ }
+ }
+
+ val->intval = 0;
+ ret = 0;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ if (is_charging(cm)) {
+ ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ val);
+ if (ret) {
+ val->intval = 1;
+ ret = 0;
+ } else {
+ /* If CHARGE_NOW is supplied, use it */
+ val->intval = (val->intval > 0) ?
+ val->intval : 1;
+ }
+ } else {
+ val->intval = 0;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+#define NUM_CHARGER_PSY_OPTIONAL (4)
+static enum power_supply_property default_charger_props[] = {
+ /* Guaranteed to provide */
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ /*
+ * Optional properties are:
+ * POWER_SUPPLY_PROP_CHARGE_NOW,
+ * POWER_SUPPLY_PROP_CURRENT_NOW,
+ * POWER_SUPPLY_PROP_TEMP, and
+ * POWER_SUPPLY_PROP_TEMP_AMBIENT,
+ */
+};
+
+static struct power_supply psy_default = {
+ .name = "battery",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = default_charger_props,
+ .num_properties = ARRAY_SIZE(default_charger_props),
+ .get_property = charger_get_property,
+};
+
+/**
+ * cm_setup_timer - For in-suspend monitoring setup wakeup alarm
+ * for suspend_again.
+ *
+ * Returns true if the alarm is set for Charger Manager to use.
+ * Returns false if
+ * cm_setup_timer fails to set an alarm,
+ * cm_setup_timer does not need to set an alarm for Charger Manager,
+ * or an alarm previously configured is to be used.
+ */
+static bool cm_setup_timer(void)
+{
+ struct charger_manager *cm;
+ unsigned int wakeup_ms = UINT_MAX;
+ bool ret = false;
+
+ mutex_lock(&cm_list_mtx);
+
+ list_for_each_entry(cm, &cm_list, entry) {
+ /* Skip if polling is not required for this CM */
+ if (!is_polling_required(cm) && !cm->emergency_stop)
+ continue;
+ if (cm->desc->polling_interval_ms == 0)
+ continue;
+ CM_MIN_VALID(wakeup_ms, cm->desc->polling_interval_ms);
+ }
+
+ mutex_unlock(&cm_list_mtx);
+
+ if (wakeup_ms < UINT_MAX && wakeup_ms > 0) {
+ pr_info("Charger Manager wakeup timer: %u ms.\n", wakeup_ms);
+ if (rtc_dev) {
+ struct rtc_wkalrm tmp;
+ unsigned long time, now;
+ unsigned long add = DIV_ROUND_UP(wakeup_ms, 1000);
+
+ /*
+ * Set alarm with the polling interval (wakeup_ms)
+ * except when rtc_wkalarm_save comes first.
+ * However, the alarm time should be NOW +
+ * CM_RTC_SMALL or later.
+ */
+ tmp.enabled = 1;
+ rtc_read_time(rtc_dev, &tmp.time);
+ rtc_tm_to_time(&tmp.time, &now);
+ if (add < CM_RTC_SMALL)
+ add = CM_RTC_SMALL;
+ time = now + add;
+
+ ret = true;
+
+ if (rtc_wkalarm_save.enabled &&
+ rtc_wkalarm_save_time &&
+ rtc_wkalarm_save_time < time) {
+ if (rtc_wkalarm_save_time < now + CM_RTC_SMALL)
+ time = now + CM_RTC_SMALL;
+ else
+ time = rtc_wkalarm_save_time;
+
+ /* The timer is not appointed by CM */
+ ret = false;
+ }
+
+ pr_info("Waking up after %lu secs.\n",
+ time - now);
+
+ rtc_time_to_tm(time, &tmp.time);
+ rtc_set_alarm(rtc_dev, &tmp);
+ cm_suspend_duration_ms += wakeup_ms;
+ return ret;
+ }
+ }
+
+ if (rtc_dev)
+ rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
+ return false;
+}
+
+/**
+ * cm_suspend_again - Determine whether suspend again or not
+ *
+ * Returns true if the system should be suspended again
+ * Returns false if the system should be woken up
+ */
+bool cm_suspend_again(void)
+{
+ struct charger_manager *cm;
+ bool ret = false;
+
+ if (!g_desc || !g_desc->rtc_only_wakeup || !g_desc->rtc_only_wakeup() ||
+ !cm_rtc_set)
+ return false;
+
+ if (cm_monitor())
+ goto out;
+
+ ret = true;
+ mutex_lock(&cm_list_mtx);
+ list_for_each_entry(cm, &cm_list, entry) {
+ if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
+ cm->status_save_batt != is_batt_present(cm))
+ ret = false;
+ }
+ mutex_unlock(&cm_list_mtx);
+
+ cm_rtc_set = cm_setup_timer();
+out:
+ /* It's about the time when the non-CM appointed timer goes off */
+ if (rtc_wkalarm_save.enabled) {
+ unsigned long now;
+ struct rtc_time tmp;
+
+ rtc_read_time(rtc_dev, &tmp);
+ rtc_tm_to_time(&tmp, &now);
+
+ if (rtc_wkalarm_save_time &&
+ now + CM_RTC_SMALL >= rtc_wkalarm_save_time)
+ return false;
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(cm_suspend_again);
+
+/**
+ * setup_charger_manager - initialize charger_global_desc data
+ * @gd: pointer to instance of charger_global_desc
+ */
+int setup_charger_manager(struct charger_global_desc *gd)
+{
+ if (!gd)
+ return -EINVAL;
+
+ if (rtc_dev)
+ rtc_class_close(rtc_dev);
+ rtc_dev = NULL;
+ g_desc = NULL;
+
+ if (!gd->rtc_only_wakeup) {
+ pr_err("The callback rtc_only_wakeup is not given.\n");
+ return -EINVAL;
+ }
+
+ if (gd->rtc_name) {
+ rtc_dev = rtc_class_open(gd->rtc_name);
+ if (IS_ERR_OR_NULL(rtc_dev)) {
+ rtc_dev = NULL;
+ /* Retry at probe. RTC may be not registered yet */
+ }
+ } else {
+ pr_warn("No wakeup timer is given for charger manager."
+ "In-suspend monitoring won't work.\n");
+ }
+
+ g_desc = gd;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(setup_charger_manager);
+
+static int charger_manager_probe(struct platform_device *pdev)
+{
+ struct charger_desc *desc = dev_get_platdata(&pdev->dev);
+ struct charger_manager *cm;
+ int ret = 0, i = 0;
+ union power_supply_propval val;
+
+ if (g_desc && !rtc_dev && g_desc->rtc_name) {
+ rtc_dev = rtc_class_open(g_desc->rtc_name);
+ if (IS_ERR_OR_NULL(rtc_dev)) {
+ rtc_dev = NULL;
+ dev_err(&pdev->dev, "Cannot get RTC %s.\n",
+ g_desc->rtc_name);
+ ret = -ENODEV;
+ goto err_alloc;
+ }
+ }
+
+ if (!desc) {
+ dev_err(&pdev->dev, "No platform data (desc) found.\n");
+ ret = -ENODEV;
+ goto err_alloc;
+ }
+
+ cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL);
+ if (!cm) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+
+ /* Basic Values. Unspecified are Null or 0 */
+ cm->dev = &pdev->dev;
+ cm->desc = kzalloc(sizeof(struct charger_desc), GFP_KERNEL);
+ if (!cm->desc) {
+ dev_err(&pdev->dev, "Cannot allocate memory.\n");
+ ret = -ENOMEM;
+ goto err_alloc_desc;
+ }
+ memcpy(cm->desc, desc, sizeof(struct charger_desc));
+ cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
+
+ if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "charger_regulators undefined.\n");
+ goto err_no_charger;
+ }
+
+ if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) {
+ dev_err(&pdev->dev, "No power supply defined.\n");
+ ret = -EINVAL;
+ goto err_no_charger_stat;
+ }
+
+ /* Counting index only */
+ while (desc->psy_charger_stat[i])
+ i++;
+
+ cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1),
+ GFP_KERNEL);
+ if (!cm->charger_stat) {
+ ret = -ENOMEM;
+ goto err_no_charger_stat;
+ }
+
+ for (i = 0; desc->psy_charger_stat[i]; i++) {
+ cm->charger_stat[i] = power_supply_get_by_name(
+ desc->psy_charger_stat[i]);
+ if (!cm->charger_stat[i]) {
+ dev_err(&pdev->dev, "Cannot find power supply "
+ "\"%s\"\n",
+ desc->psy_charger_stat[i]);
+ ret = -ENODEV;
+ goto err_chg_stat;
+ }
+ }
+
+ cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
+ if (!cm->fuel_gauge) {
+ dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
+ desc->psy_fuel_gauge);
+ ret = -ENODEV;
+ goto err_chg_stat;
+ }
+
+ if (desc->polling_interval_ms == 0 ||
+ msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) {
+ dev_err(&pdev->dev, "polling_interval_ms is too small\n");
+ ret = -EINVAL;
+ goto err_chg_stat;
+ }
+
+ if (!desc->temperature_out_of_range) {
+ dev_err(&pdev->dev, "there is no temperature_out_of_range\n");
+ ret = -EINVAL;
+ goto err_chg_stat;
+ }
+
+ platform_set_drvdata(pdev, cm);
+
+ memcpy(&cm->charger_psy, &psy_default,
+ sizeof(psy_default));
+ if (!desc->psy_name) {
+ strncpy(cm->psy_name_buf, psy_default.name,
+ PSY_NAME_MAX);
+ } else {
+ strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX);
+ }
+ cm->charger_psy.name = cm->psy_name_buf;
+
+ /* Allocate for psy properties because they may vary */
+ cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property)
+ * (ARRAY_SIZE(default_charger_props) +
+ NUM_CHARGER_PSY_OPTIONAL),
+ GFP_KERNEL);
+ if (!cm->charger_psy.properties) {
+ dev_err(&pdev->dev, "Cannot allocate for psy properties.\n");
+ ret = -ENOMEM;
+ goto err_chg_stat;
+ }
+ memcpy(cm->charger_psy.properties, default_charger_props,
+ sizeof(enum power_supply_property) *
+ ARRAY_SIZE(default_charger_props));
+ cm->charger_psy.num_properties = psy_default.num_properties;
+
+ /* Find which optional psy-properties are available */
+ if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_CHARGE_NOW;
+ cm->charger_psy.num_properties++;
+ }
+ if (!cm->fuel_gauge->get_property(cm->fuel_gauge,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
+ &val)) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_CURRENT_NOW;
+ cm->charger_psy.num_properties++;
+ }
+ if (!desc->measure_battery_temp) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_TEMP_AMBIENT;
+ cm->charger_psy.num_properties++;
+ }
+ if (desc->measure_battery_temp) {
+ cm->charger_psy.properties[cm->charger_psy.num_properties] =
+ POWER_SUPPLY_PROP_TEMP;
+ cm->charger_psy.num_properties++;
+ }
+
+ ret = power_supply_register(NULL, &cm->charger_psy);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register charger-manager with"
+ " name \"%s\".\n", cm->charger_psy.name);
+ goto err_register;
+ }
+
+ ret = regulator_bulk_get(&pdev->dev, desc->num_charger_regulators,
+ desc->charger_regulators);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot get charger regulators.\n");
+ goto err_bulk_get;
+ }
+
+ ret = try_charger_enable(cm, true);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot enable charger regulators\n");
+ goto err_chg_enable;
+ }
+
+ /* Add to the list */
+ mutex_lock(&cm_list_mtx);
+ list_add(&cm->entry, &cm_list);
+ mutex_unlock(&cm_list_mtx);
+
+ return 0;
+
+err_chg_enable:
+ if (desc->charger_regulators)
+ regulator_bulk_free(desc->num_charger_regulators,
+ desc->charger_regulators);
+err_bulk_get:
+ power_supply_unregister(&cm->charger_psy);
+err_register:
+ kfree(cm->charger_psy.properties);
+err_chg_stat:
+ kfree(cm->charger_stat);
+err_no_charger_stat:
+err_no_charger:
+ kfree(cm->desc);
+err_alloc_desc:
+ kfree(cm);
+err_alloc:
+ return ret;
+}
+
+static int __devexit charger_manager_remove(struct platform_device *pdev)
+{
+ struct charger_manager *cm = platform_get_drvdata(pdev);
+ struct charger_desc *desc = cm->desc;
+
+ /* Remove from the list */
+ mutex_lock(&cm_list_mtx);
+ list_del(&cm->entry);
+ mutex_unlock(&cm_list_mtx);
+
+ if (desc->charger_regulators)
+ regulator_bulk_free(desc->num_charger_regulators,
+ desc->charger_regulators);
+
+ power_supply_unregister(&cm->charger_psy);
+ kfree(cm->charger_psy.properties);
+ kfree(cm->charger_stat);
+ kfree(cm->desc);
+ kfree(cm);
+
+ return 0;
+}
+
+static const struct platform_device_id charger_manager_id[] = {
+ { "charger-manager", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, charger_manager_id);
+
+static int cm_suspend_prepare(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct charger_manager *cm = platform_get_drvdata(pdev);
+
+ if (!cm_suspended) {
+ if (rtc_dev) {
+ struct rtc_time tmp;
+ unsigned long now;
+
+ rtc_read_alarm(rtc_dev, &rtc_wkalarm_save);
+ rtc_read_time(rtc_dev, &tmp);
+
+ if (rtc_wkalarm_save.enabled) {
+ rtc_tm_to_time(&rtc_wkalarm_save.time,
+ &rtc_wkalarm_save_time);
+ rtc_tm_to_time(&tmp, &now);
+ if (now > rtc_wkalarm_save_time)
+ rtc_wkalarm_save_time = 0;
+ } else {
+ rtc_wkalarm_save_time = 0;
+ }
+ }
+ cm_suspended = true;
+ }
+
+ cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
+ cm->status_save_batt = is_batt_present(cm);
+
+ if (!cm_rtc_set) {
+ cm_suspend_duration_ms = 0;
+ cm_rtc_set = cm_setup_timer();
+ }
+
+ return 0;
+}
+
+static void cm_suspend_complete(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev, struct platform_device,
+ dev);
+ struct charger_manager *cm = platform_get_drvdata(pdev);
+
+ if (cm_suspended) {
+ if (rtc_dev) {
+ struct rtc_wkalrm tmp;
+
+ rtc_read_alarm(rtc_dev, &tmp);
+ rtc_wkalarm_save.pending = tmp.pending;
+ rtc_set_alarm(rtc_dev, &rtc_wkalarm_save);
+ }
+ cm_suspended = false;
+ cm_rtc_set = false;
+ }
+
+ uevent_notify(cm, NULL);
+}
+
+static const struct dev_pm_ops charger_manager_pm = {
+ .prepare = cm_suspend_prepare,
+ .complete = cm_suspend_complete,
+};
+
+static struct platform_driver charger_manager_driver = {
+ .driver = {
+ .name = "charger-manager",
+ .owner = THIS_MODULE,
+ .pm = &charger_manager_pm,
+ },
+ .probe = charger_manager_probe,
+ .remove = __devexit_p(charger_manager_remove),
+ .id_table = charger_manager_id,
+};
+
+static int __init charger_manager_init(void)
+{
+ return platform_driver_register(&charger_manager_driver);
+}
+late_initcall(charger_manager_init);
+
+static void __exit charger_manager_cleanup(void)
+{
+ platform_driver_unregister(&charger_manager_driver);
+}
+module_exit(charger_manager_cleanup);
+
+MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
+MODULE_DESCRIPTION("Charger Manager");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/collie_battery.c b/drivers/power/collie_battery.c
index 548d263..74c6b23 100644
--- a/drivers/power/collie_battery.c
+++ b/drivers/power/collie_battery.c
@@ -146,7 +146,7 @@ static void collie_bat_external_power_changed(struct power_supply *psy)
static irqreturn_t collie_bat_gpio_isr(int irq, void *data)
{
- pr_info("collie_bat_gpio irq: %d\n", gpio_get_value(irq_to_gpio(irq)));
+ pr_info("collie_bat_gpio irq\n");
schedule_work(&bat_work);
return IRQ_HANDLED;
}
@@ -277,18 +277,13 @@ static struct collie_bat collie_bat_bu = {
.adc_temp_divider = -1,
};
-static struct {
- int gpio;
- char *name;
- bool output;
- int value;
-} gpios[] = {
- { COLLIE_GPIO_CO, "main battery full", 0, 0 },
- { COLLIE_GPIO_MAIN_BAT_LOW, "main battery low", 0, 0 },
- { COLLIE_GPIO_CHARGE_ON, "main charge on", 1, 0 },
- { COLLIE_GPIO_MBAT_ON, "main battery", 1, 0 },
- { COLLIE_GPIO_TMP_ON, "main battery temp", 1, 0 },
- { COLLIE_GPIO_BBAT_ON, "backup battery", 1, 0 },
+static struct gpio collie_batt_gpios[] = {
+ { COLLIE_GPIO_CO, GPIOF_IN, "main battery full" },
+ { COLLIE_GPIO_MAIN_BAT_LOW, GPIOF_IN, "main battery low" },
+ { COLLIE_GPIO_CHARGE_ON, GPIOF_OUT_INIT_LOW, "main charge on" },
+ { COLLIE_GPIO_MBAT_ON, GPIOF_OUT_INIT_LOW, "main battery" },
+ { COLLIE_GPIO_TMP_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
+ { COLLIE_GPIO_BBAT_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
};
#ifdef CONFIG_PM
@@ -313,29 +308,16 @@ static int collie_bat_resume(struct ucb1x00_dev *dev)
static int __devinit collie_bat_probe(struct ucb1x00_dev *dev)
{
int ret;
- int i;
if (!machine_is_collie())
return -ENODEV;
ucb = dev->ucb;
- for (i = 0; i < ARRAY_SIZE(gpios); i++) {
- ret = gpio_request(gpios[i].gpio, gpios[i].name);
- if (ret) {
- i--;
- goto err_gpio;
- }
-
- if (gpios[i].output)
- ret = gpio_direction_output(gpios[i].gpio,
- gpios[i].value);
- else
- ret = gpio_direction_input(gpios[i].gpio);
-
- if (ret)
- goto err_gpio;
- }
+ ret = gpio_request_array(collie_batt_gpios,
+ ARRAY_SIZE(collie_batt_gpios));
+ if (ret)
+ return ret;
mutex_init(&collie_bat_main.work_lock);
@@ -363,19 +345,12 @@ err_psy_reg_main:
/* see comment in collie_bat_remove */
cancel_work_sync(&bat_work);
-
- i--;
-err_gpio:
- for (; i >= 0; i--)
- gpio_free(gpios[i].gpio);
-
+ gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
return ret;
}
static void __devexit collie_bat_remove(struct ucb1x00_dev *dev)
{
- int i;
-
free_irq(gpio_to_irq(COLLIE_GPIO_CO), &collie_bat_main);
power_supply_unregister(&collie_bat_bu.psy);
@@ -387,9 +362,7 @@ static void __devexit collie_bat_remove(struct ucb1x00_dev *dev)
* unregistered now.
*/
cancel_work_sync(&bat_work);
-
- for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
- gpio_free(gpios[i].gpio);
+ gpio_free_array(collie_batt_gpios, ARRAY_SIZE(collie_batt_gpios));
}
static struct ucb1x00_driver collie_bat_driver = {
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index d2c793cf..3fd3e95 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -588,18 +588,7 @@ static struct platform_driver da903x_battery_driver = {
.remove = da9030_battery_remove,
};
-static int da903x_battery_init(void)
-{
- return platform_driver_register(&da903x_battery_driver);
-}
-
-static void da903x_battery_exit(void)
-{
- platform_driver_unregister(&da903x_battery_driver);
-}
-
-module_init(da903x_battery_init);
-module_exit(da903x_battery_exit);
+module_platform_driver(da903x_battery_driver);
MODULE_DESCRIPTION("DA9030 battery charger driver");
MODULE_AUTHOR("Mike Rapoport, CompuLab");
diff --git a/drivers/power/da9052-battery.c b/drivers/power/da9052-battery.c
new file mode 100644
index 0000000..e8ea47a
--- /dev/null
+++ b/drivers/power/da9052-battery.c
@@ -0,0 +1,664 @@
+/*
+ * Batttery Driver for Dialog DA9052 PMICs
+ *
+ * Copyright(c) 2011 Dialog Semiconductor Ltd.
+ *
+ * Author: David Dajun Chen <dchen@diasemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/delay.h>
+#include <linux/freezer.h>
+#include <linux/fs.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/uaccess.h>
+#include <linux/platform_device.h>
+#include <linux/power_supply.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/pdata.h>
+#include <linux/mfd/da9052/reg.h>
+
+/* STATIC CONFIGURATION */
+#define DA9052_BAT_CUTOFF_VOLT 2800
+#define DA9052_BAT_TSH 62000
+#define DA9052_BAT_LOW_CAP 4
+#define DA9052_AVG_SZ 4
+#define DA9052_VC_TBL_SZ 68
+#define DA9052_VC_TBL_REF_SZ 3
+
+#define DA9052_ISET_USB_MASK 0x0F
+#define DA9052_CHG_USB_ILIM_MASK 0x40
+#define DA9052_CHG_LIM_COLS 16
+
+#define DA9052_MEAN(x, y) ((x + y) / 2)
+
+enum charger_type_enum {
+ DA9052_NOCHARGER = 1,
+ DA9052_CHARGER,
+};
+
+static const u16 da9052_chg_current_lim[2][DA9052_CHG_LIM_COLS] = {
+ {70, 80, 90, 100, 110, 120, 400, 450,
+ 500, 550, 600, 650, 700, 900, 1100, 1300},
+ {80, 90, 100, 110, 120, 400, 450, 500,
+ 550, 600, 800, 1000, 1200, 1400, 1600, 1800},
+};
+
+static const u16 vc_tbl_ref[3] = {10, 25, 40};
+/* Lookup table for voltage vs capacity */
+static u32 const vc_tbl[3][68][2] = {
+ /* For temperature 10 degree Celsius */
+ {
+ {4082, 100}, {4036, 98},
+ {4020, 96}, {4008, 95},
+ {3997, 93}, {3983, 91},
+ {3964, 90}, {3943, 88},
+ {3926, 87}, {3912, 85},
+ {3900, 84}, {3890, 82},
+ {3881, 80}, {3873, 79},
+ {3865, 77}, {3857, 76},
+ {3848, 74}, {3839, 73},
+ {3829, 71}, {3820, 70},
+ {3811, 68}, {3802, 67},
+ {3794, 65}, {3785, 64},
+ {3778, 62}, {3770, 61},
+ {3763, 59}, {3756, 58},
+ {3750, 56}, {3744, 55},
+ {3738, 53}, {3732, 52},
+ {3727, 50}, {3722, 49},
+ {3717, 47}, {3712, 46},
+ {3708, 44}, {3703, 43},
+ {3700, 41}, {3696, 40},
+ {3693, 38}, {3691, 37},
+ {3688, 35}, {3686, 34},
+ {3683, 32}, {3681, 31},
+ {3678, 29}, {3675, 28},
+ {3672, 26}, {3669, 25},
+ {3665, 23}, {3661, 22},
+ {3656, 21}, {3651, 19},
+ {3645, 18}, {3639, 16},
+ {3631, 15}, {3622, 13},
+ {3611, 12}, {3600, 10},
+ {3587, 9}, {3572, 7},
+ {3548, 6}, {3503, 5},
+ {3420, 3}, {3268, 2},
+ {2992, 1}, {2746, 0}
+ },
+ /* For temperature 25 degree Celsius */
+ {
+ {4102, 100}, {4065, 98},
+ {4048, 96}, {4034, 95},
+ {4021, 93}, {4011, 92},
+ {4001, 90}, {3986, 88},
+ {3968, 87}, {3952, 85},
+ {3938, 84}, {3926, 82},
+ {3916, 81}, {3908, 79},
+ {3900, 77}, {3892, 76},
+ {3883, 74}, {3874, 73},
+ {3864, 71}, {3855, 70},
+ {3846, 68}, {3836, 67},
+ {3827, 65}, {3819, 64},
+ {3810, 62}, {3801, 61},
+ {3793, 59}, {3786, 58},
+ {3778, 56}, {3772, 55},
+ {3765, 53}, {3759, 52},
+ {3754, 50}, {3748, 49},
+ {3743, 47}, {3738, 46},
+ {3733, 44}, {3728, 43},
+ {3724, 41}, {3720, 40},
+ {3716, 38}, {3712, 37},
+ {3709, 35}, {3706, 34},
+ {3703, 33}, {3701, 31},
+ {3698, 30}, {3696, 28},
+ {3693, 27}, {3690, 25},
+ {3687, 24}, {3683, 22},
+ {3680, 21}, {3675, 19},
+ {3671, 18}, {3666, 17},
+ {3660, 15}, {3654, 14},
+ {3647, 12}, {3639, 11},
+ {3630, 9}, {3621, 8},
+ {3613, 6}, {3606, 5},
+ {3597, 4}, {3582, 2},
+ {3546, 1}, {2747, 0}
+ },
+ /* For temperature 40 degree Celsius */
+ {
+ {4114, 100}, {4081, 98},
+ {4065, 96}, {4050, 95},
+ {4036, 93}, {4024, 92},
+ {4013, 90}, {4002, 88},
+ {3990, 87}, {3976, 85},
+ {3962, 84}, {3950, 82},
+ {3939, 81}, {3930, 79},
+ {3921, 77}, {3912, 76},
+ {3902, 74}, {3893, 73},
+ {3883, 71}, {3874, 70},
+ {3865, 68}, {3856, 67},
+ {3847, 65}, {3838, 64},
+ {3829, 62}, {3820, 61},
+ {3812, 59}, {3803, 58},
+ {3795, 56}, {3787, 55},
+ {3780, 53}, {3773, 52},
+ {3767, 50}, {3761, 49},
+ {3756, 47}, {3751, 46},
+ {3746, 44}, {3741, 43},
+ {3736, 41}, {3732, 40},
+ {3728, 38}, {3724, 37},
+ {3720, 35}, {3716, 34},
+ {3713, 33}, {3710, 31},
+ {3707, 30}, {3704, 28},
+ {3701, 27}, {3698, 25},
+ {3695, 24}, {3691, 22},
+ {3686, 21}, {3681, 19},
+ {3676, 18}, {3671, 17},
+ {3666, 15}, {3661, 14},
+ {3655, 12}, {3648, 11},
+ {3640, 9}, {3632, 8},
+ {3622, 6}, {3616, 5},
+ {3611, 4}, {3604, 2},
+ {3594, 1}, {2747, 0}
+ }
+};
+
+struct da9052_battery {
+ struct da9052 *da9052;
+ struct power_supply psy;
+ struct notifier_block nb;
+ int charger_type;
+ int status;
+ int health;
+};
+
+static inline int volt_reg_to_mV(int value)
+{
+ return ((value * 1000) / 512) + 2500;
+}
+
+static inline int ichg_reg_to_mA(int value)
+{
+ return (value * 3900) / 1000;
+}
+
+static int da9052_read_chgend_current(struct da9052_battery *bat,
+ int *current_mA)
+{
+ int ret;
+
+ if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
+ return -EINVAL;
+
+ ret = da9052_reg_read(bat->da9052, DA9052_ICHG_END_REG);
+ if (ret < 0)
+ return ret;
+
+ *current_mA = ichg_reg_to_mA(ret & DA9052_ICHGEND_ICHGEND);
+
+ return 0;
+}
+
+static int da9052_read_chg_current(struct da9052_battery *bat, int *current_mA)
+{
+ int ret;
+
+ if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
+ return -EINVAL;
+
+ ret = da9052_reg_read(bat->da9052, DA9052_ICHG_AV_REG);
+ if (ret < 0)
+ return ret;
+
+ *current_mA = ichg_reg_to_mA(ret & DA9052_ICHGAV_ICHGAV);
+
+ return 0;
+}
+
+static int da9052_bat_check_status(struct da9052_battery *bat, int *status)
+{
+ u8 v[2] = {0, 0};
+ u8 bat_status;
+ u8 chg_end;
+ int ret;
+ int chg_current;
+ int chg_end_current;
+ bool dcinsel;
+ bool dcindet;
+ bool vbussel;
+ bool vbusdet;
+ bool dc;
+ bool vbus;
+
+ ret = da9052_group_read(bat->da9052, DA9052_STATUS_A_REG, 2, v);
+ if (ret < 0)
+ return ret;
+
+ bat_status = v[0];
+ chg_end = v[1];
+
+ dcinsel = bat_status & DA9052_STATUSA_DCINSEL;
+ dcindet = bat_status & DA9052_STATUSA_DCINDET;
+ vbussel = bat_status & DA9052_STATUSA_VBUSSEL;
+ vbusdet = bat_status & DA9052_STATUSA_VBUSDET;
+ dc = dcinsel && dcindet;
+ vbus = vbussel && vbusdet;
+
+ /* Preference to WALL(DCIN) charger unit */
+ if (dc || vbus) {
+ bat->charger_type = DA9052_CHARGER;
+
+ /* If charging end flag is set and Charging current is greater
+ * than charging end limit then battery is charging
+ */
+ if ((chg_end & DA9052_STATUSB_CHGEND) != 0) {
+ ret = da9052_read_chg_current(bat, &chg_current);
+ if (ret < 0)
+ return ret;
+ ret = da9052_read_chgend_current(bat, &chg_end_current);
+ if (ret < 0)
+ return ret;
+
+ if (chg_current >= chg_end_current)
+ bat->status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else {
+ /* If Charging end flag is cleared then battery is
+ * charging
+ */
+ bat->status = POWER_SUPPLY_STATUS_CHARGING;
+ }
+ } else if (dcindet || vbusdet) {
+ bat->charger_type = DA9052_CHARGER;
+ bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ } else {
+ bat->charger_type = DA9052_NOCHARGER;
+ bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ if (status != NULL)
+ *status = bat->status;
+ return 0;
+}
+
+static int da9052_bat_read_volt(struct da9052_battery *bat, int *volt_mV)
+{
+ int volt;
+
+ volt = da9052_adc_manual_read(bat->da9052, DA9052_ADC_MAN_MUXSEL_VBAT);
+ if (volt < 0)
+ return volt;
+
+ *volt_mV = volt_reg_to_mV(volt);
+
+ return 0;
+}
+
+static int da9052_bat_check_presence(struct da9052_battery *bat, int *illegal)
+{
+ int bat_temp;
+
+ bat_temp = da9052_adc_read_temp(bat->da9052);
+ if (bat_temp < 0)
+ return bat_temp;
+
+ if (bat_temp > DA9052_BAT_TSH)
+ *illegal = 1;
+ else
+ *illegal = 0;
+
+ return 0;
+}
+
+static int da9052_bat_interpolate(int vbat_lower, int vbat_upper,
+ int level_lower, int level_upper,
+ int bat_voltage)
+{
+ int tmp;
+
+ tmp = ((level_upper - level_lower) * 1000) / (vbat_upper - vbat_lower);
+ tmp = level_lower + (((bat_voltage - vbat_lower) * tmp) / 1000);
+
+ return tmp;
+}
+
+unsigned char da9052_determine_vc_tbl_index(unsigned char adc_temp)
+{
+ int i;
+
+ if (adc_temp <= vc_tbl_ref[0])
+ return 0;
+
+ if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1])
+ return DA9052_VC_TBL_REF_SZ - 1;
+
+ for (i = 0; i < DA9052_VC_TBL_REF_SZ; i++) {
+ if ((adc_temp > vc_tbl_ref[i]) &&
+ (adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1])))
+ return i;
+ if ((adc_temp > DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1]))
+ && (adc_temp <= vc_tbl_ref[i]))
+ return i + 1;
+ }
+}
+
+static int da9052_bat_read_capacity(struct da9052_battery *bat, int *capacity)
+{
+ int adc_temp;
+ int bat_voltage;
+ int vbat_lower;
+ int vbat_upper;
+ int level_upper;
+ int level_lower;
+ int ret;
+ int flag;
+ int i = 0;
+ int j;
+
+ ret = da9052_bat_read_volt(bat, &bat_voltage);
+ if (ret < 0)
+ return ret;
+
+ adc_temp = da9052_adc_read_temp(bat->da9052);
+ if (adc_temp < 0)
+ return adc_temp;
+
+ i = da9052_determine_vc_tbl_index(adc_temp);
+
+ if (bat_voltage >= vc_tbl[i][0][0]) {
+ *capacity = 100;
+ return 0;
+ }
+ if (bat_voltage <= vc_tbl[i][DA9052_VC_TBL_SZ - 1][0]) {
+ *capacity = 0;
+ return 0;
+ }
+ flag = 0;
+
+ for (j = 0; j < (DA9052_VC_TBL_SZ-1); j++) {
+ if ((bat_voltage <= vc_tbl[i][j][0]) &&
+ (bat_voltage >= vc_tbl[i][j + 1][0])) {
+ vbat_upper = vc_tbl[i][j][0];
+ vbat_lower = vc_tbl[i][j + 1][0];
+ level_upper = vc_tbl[i][j][1];
+ level_lower = vc_tbl[i][j + 1][1];
+ flag = 1;
+ break;
+ }
+ }
+ if (!flag)
+ return -EIO;
+
+ *capacity = da9052_bat_interpolate(vbat_lower, vbat_upper, level_lower,
+ level_upper, bat_voltage);
+
+ return 0;
+}
+
+static int da9052_bat_check_health(struct da9052_battery *bat, int *health)
+{
+ int ret;
+ int bat_illegal;
+ int capacity;
+
+ ret = da9052_bat_check_presence(bat, &bat_illegal);
+ if (ret < 0)
+ return ret;
+
+ if (bat_illegal) {
+ bat->health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ return 0;
+ }
+
+ if (bat->health != POWER_SUPPLY_HEALTH_OVERHEAT) {
+ ret = da9052_bat_read_capacity(bat, &capacity);
+ if (ret < 0)
+ return ret;
+ if (capacity < DA9052_BAT_LOW_CAP)
+ bat->health = POWER_SUPPLY_HEALTH_DEAD;
+ else
+ bat->health = POWER_SUPPLY_HEALTH_GOOD;
+ }
+
+ *health = bat->health;
+
+ return 0;
+}
+
+static irqreturn_t da9052_bat_irq(int irq, void *data)
+{
+ struct da9052_battery *bat = data;
+
+ irq -= bat->da9052->irq_base;
+
+ if (irq == DA9052_IRQ_CHGEND)
+ bat->status = POWER_SUPPLY_STATUS_FULL;
+ else
+ da9052_bat_check_status(bat, NULL);
+
+ if (irq == DA9052_IRQ_CHGEND || irq == DA9052_IRQ_DCIN ||
+ irq == DA9052_IRQ_VBUS || irq == DA9052_IRQ_TBAT) {
+ power_supply_changed(&bat->psy);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int da9052_USB_current_notifier(struct notifier_block *nb,
+ unsigned long events, void *data)
+{
+ u8 row;
+ u8 col;
+ int *current_mA = data;
+ int ret;
+ struct da9052_battery *bat = container_of(nb, struct da9052_battery,
+ nb);
+
+ if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
+ return -EPERM;
+
+ ret = da9052_reg_read(bat->da9052, DA9052_CHGBUCK_REG);
+ if (ret & DA9052_CHG_USB_ILIM_MASK)
+ return -EPERM;
+
+ if (bat->da9052->chip_id == DA9052)
+ row = 0;
+ else
+ row = 1;
+
+ if (*current_mA < da9052_chg_current_lim[row][0] ||
+ *current_mA > da9052_chg_current_lim[row][DA9052_CHG_LIM_COLS - 1])
+ return -EINVAL;
+
+ for (col = 0; col <= DA9052_CHG_LIM_COLS - 1 ; col++) {
+ if (*current_mA <= da9052_chg_current_lim[row][col])
+ break;
+ }
+
+ return da9052_reg_update(bat->da9052, DA9052_ISET_REG,
+ DA9052_ISET_USB_MASK, col);
+}
+
+static int da9052_bat_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret;
+ int illegal;
+ struct da9052_battery *bat = container_of(psy, struct da9052_battery,
+ psy);
+
+ ret = da9052_bat_check_presence(bat, &illegal);
+ if (ret < 0)
+ return ret;
+
+ if (illegal && psp != POWER_SUPPLY_PROP_PRESENT)
+ return -ENODEV;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = da9052_bat_check_status(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_ONLINE:
+ val->intval =
+ (bat->charger_type == DA9052_NOCHARGER) ? 0 : 1;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ ret = da9052_bat_check_presence(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = da9052_bat_check_health(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
+ val->intval = DA9052_BAT_CUTOFF_VOLT * 1000;
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_AVG:
+ ret = da9052_bat_read_volt(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CURRENT_AVG:
+ ret = da9052_read_chg_current(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ ret = da9052_bat_read_capacity(bat, &val->intval);
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ val->intval = da9052_adc_read_temp(bat->da9052);
+ ret = val->intval;
+ break;
+ case POWER_SUPPLY_PROP_TECHNOLOGY:
+ val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret;
+}
+
+static enum power_supply_property da9052_bat_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_ONLINE,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
+ POWER_SUPPLY_PROP_VOLTAGE_AVG,
+ POWER_SUPPLY_PROP_CURRENT_AVG,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TECHNOLOGY,
+};
+
+static struct power_supply template_battery = {
+ .name = "da9052-bat",
+ .type = POWER_SUPPLY_TYPE_BATTERY,
+ .properties = da9052_bat_props,
+ .num_properties = ARRAY_SIZE(da9052_bat_props),
+ .get_property = da9052_bat_get_property,
+};
+
+static const char *const da9052_bat_irqs[] = {
+ "BATT TEMP",
+ "DCIN DET",
+ "DCIN REM",
+ "VBUS DET",
+ "VBUS REM",
+ "CHG END",
+};
+
+static s32 __devinit da9052_bat_probe(struct platform_device *pdev)
+{
+ struct da9052_pdata *pdata;
+ struct da9052_battery *bat;
+ int ret;
+ int irq;
+ int i;
+
+ bat = kzalloc(sizeof(struct da9052_battery), GFP_KERNEL);
+ if (!bat)
+ return -ENOMEM;
+
+ bat->da9052 = dev_get_drvdata(pdev->dev.parent);
+ bat->psy = template_battery;
+ bat->charger_type = DA9052_NOCHARGER;
+ bat->status = POWER_SUPPLY_STATUS_UNKNOWN;
+ bat->health = POWER_SUPPLY_HEALTH_UNKNOWN;
+ bat->nb.notifier_call = da9052_USB_current_notifier;
+
+ pdata = bat->da9052->dev->platform_data;
+ if (pdata != NULL && pdata->use_for_apm)
+ bat->psy.use_for_apm = pdata->use_for_apm;
+ else
+ bat->psy.use_for_apm = 1;
+
+ for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
+ irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
+ ret = request_threaded_irq(bat->da9052->irq_base + irq,
+ NULL, da9052_bat_irq,
+ IRQF_TRIGGER_LOW | IRQF_ONESHOT,
+ da9052_bat_irqs[i], bat);
+ if (ret != 0) {
+ dev_err(bat->da9052->dev,
+ "DA9052 failed to request %s IRQ %d: %d\n",
+ da9052_bat_irqs[i], irq, ret);
+ goto err;
+ }
+ }
+
+ ret = power_supply_register(&pdev->dev, &bat->psy);
+ if (ret)
+ goto err;
+
+ return 0;
+
+err:
+ for (; i >= 0; i--) {
+ irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
+ free_irq(bat->da9052->irq_base + irq, bat);
+ }
+ kfree(bat);
+ return ret;
+}
+static int __devexit da9052_bat_remove(struct platform_device *pdev)
+{
+ int i;
+ int irq;
+ struct da9052_battery *bat = platform_get_drvdata(pdev);
+
+ for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
+ irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
+ free_irq(bat->da9052->irq_base + irq, bat);
+ }
+ power_supply_unregister(&bat->psy);
+
+ return 0;
+}
+
+static struct platform_driver da9052_bat_driver = {
+ .probe = da9052_bat_probe,
+ .remove = __devexit_p(da9052_bat_remove),
+ .driver = {
+ .name = "da9052-bat",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_bat_init(void)
+{
+ return platform_driver_register(&da9052_bat_driver);
+}
+module_init(da9052_bat_init);
+
+static void __exit da9052_bat_exit(void)
+{
+ platform_driver_unregister(&da9052_bat_driver);
+}
+module_exit(da9052_bat_exit);
+
+MODULE_DESCRIPTION("DA9052 BAT Device Driver");
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-bat");
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c
index f2c9cc3..076e211 100644
--- a/drivers/power/ds2760_battery.c
+++ b/drivers/power/ds2760_battery.c
@@ -64,7 +64,7 @@ static unsigned int cache_time = 1000;
module_param(cache_time, uint, 0644);
MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
-static unsigned int pmod_enabled;
+static bool pmod_enabled;
module_param(pmod_enabled, bool, 0644);
MODULE_PARM_DESC(pmod_enabled, "PMOD enable bit");
@@ -95,7 +95,11 @@ static int rated_capacities[] = {
2880, /* Samsung */
2880, /* BYD */
2880, /* Lishen */
- 2880 /* NEC */
+ 2880, /* NEC */
+#ifdef CONFIG_MACH_H4700
+ 0,
+ 3600, /* HP iPAQ hx4700 3.7V 3600mAh (359114-001) */
+#endif
};
/* array is level at temps 0°C, 10°C, 20°C, 30°C, 40°C
@@ -637,18 +641,7 @@ static struct platform_driver ds2760_battery_driver = {
.resume = ds2760_battery_resume,
};
-static int __init ds2760_battery_init(void)
-{
- return platform_driver_register(&ds2760_battery_driver);
-}
-
-static void __exit ds2760_battery_exit(void)
-{
- platform_driver_unregister(&ds2760_battery_driver);
-}
-
-module_init(ds2760_battery_init);
-module_exit(ds2760_battery_exit);
+module_platform_driver(ds2760_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, "
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c
index 91a783d..de31cae 100644
--- a/drivers/power/ds2780_battery.c
+++ b/drivers/power/ds2780_battery.c
@@ -841,29 +841,17 @@ static int __devexit ds2780_battery_remove(struct platform_device *pdev)
return 0;
}
-MODULE_ALIAS("platform:ds2780-battery");
-
static struct platform_driver ds2780_battery_driver = {
.driver = {
.name = "ds2780-battery",
},
.probe = ds2780_battery_probe,
- .remove = ds2780_battery_remove,
+ .remove = __devexit_p(ds2780_battery_remove),
};
-static int __init ds2780_battery_init(void)
-{
- return platform_driver_register(&ds2780_battery_driver);
-}
-
-static void __exit ds2780_battery_exit(void)
-{
- platform_driver_unregister(&ds2780_battery_driver);
-}
-
-module_init(ds2780_battery_init);
-module_exit(ds2780_battery_exit);
+module_platform_driver(ds2780_battery_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>");
MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver");
+MODULE_ALIAS("platform:ds2780-battery");
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c
index a64b885..8672c91 100644
--- a/drivers/power/gpio-charger.c
+++ b/drivers/power/gpio-charger.c
@@ -185,17 +185,7 @@ static struct platform_driver gpio_charger_driver = {
},
};
-static int __init gpio_charger_init(void)
-{
- return platform_driver_register(&gpio_charger_driver);
-}
-module_init(gpio_charger_init);
-
-static void __exit gpio_charger_exit(void)
-{
- platform_driver_unregister(&gpio_charger_driver);
-}
-module_exit(gpio_charger_exit);
+module_platform_driver(gpio_charger_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Driver for chargers which report their online status through a GPIO");
diff --git a/drivers/power/intel_mid_battery.c b/drivers/power/intel_mid_battery.c
index 01fa671..d096497 100644
--- a/drivers/power/intel_mid_battery.c
+++ b/drivers/power/intel_mid_battery.c
@@ -779,18 +779,7 @@ static struct platform_driver platform_pmic_battery_driver = {
.remove = __devexit_p(platform_pmic_battery_remove),
};
-static int __init platform_pmic_battery_module_init(void)
-{
- return platform_driver_register(&platform_pmic_battery_driver);
-}
-
-static void __exit platform_pmic_battery_module_exit(void)
-{
- platform_driver_unregister(&platform_pmic_battery_driver);
-}
-
-module_init(platform_pmic_battery_module_init);
-module_exit(platform_pmic_battery_module_exit);
+module_platform_driver(platform_pmic_battery_driver);
MODULE_AUTHOR("Nithish Mahalingam <nithish.mahalingam@intel.com>");
MODULE_DESCRIPTION("Intel Moorestown PMIC Battery Driver");
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index f6d72b4..b806667 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -79,7 +79,7 @@ static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on)
{
struct isp1704_charger_data *board = isp->dev->platform_data;
- if (board->set_power)
+ if (board && board->set_power)
board->set_power(on);
}
@@ -494,17 +494,7 @@ static struct platform_driver isp1704_charger_driver = {
.remove = __devexit_p(isp1704_charger_remove),
};
-static int __init isp1704_charger_init(void)
-{
- return platform_driver_register(&isp1704_charger_driver);
-}
-module_init(isp1704_charger_init);
-
-static void __exit isp1704_charger_exit(void)
-{
- platform_driver_unregister(&isp1704_charger_driver);
-}
-module_exit(isp1704_charger_exit);
+module_platform_driver(isp1704_charger_driver);
MODULE_ALIAS("platform:isp1704_charger");
MODULE_AUTHOR("Nokia Corporation");
diff --git a/drivers/power/jz4740-battery.c b/drivers/power/jz4740-battery.c
index 763f894..8dbc7bf 100644
--- a/drivers/power/jz4740-battery.c
+++ b/drivers/power/jz4740-battery.c
@@ -67,7 +67,7 @@ static irqreturn_t jz_battery_irq_handler(int irq, void *devid)
static long jz_battery_read_voltage(struct jz_battery *battery)
{
- unsigned long t;
+ long t;
unsigned long val;
long voltage;
@@ -441,17 +441,7 @@ static struct platform_driver jz_battery_driver = {
},
};
-static int __init jz_battery_init(void)
-{
- return platform_driver_register(&jz_battery_driver);
-}
-module_init(jz_battery_init);
-
-static void __exit jz_battery_exit(void)
-{
- platform_driver_unregister(&jz_battery_driver);
-}
-module_exit(jz_battery_exit);
+module_platform_driver(jz_battery_driver);
MODULE_ALIAS("platform:jz4740-battery");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/lp8727_charger.c b/drivers/power/lp8727_charger.c
new file mode 100644
index 0000000..c53dd12
--- /dev/null
+++ b/drivers/power/lp8727_charger.c
@@ -0,0 +1,495 @@
+/*
+ * Driver for LP8727 Micro/Mini USB IC with intergrated charger
+ *
+ * Copyright (C) 2011 National Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/power_supply.h>
+#include <linux/lp8727.h>
+
+#define DEBOUNCE_MSEC 270
+
+/* Registers */
+#define CTRL1 0x1
+#define CTRL2 0x2
+#define SWCTRL 0x3
+#define INT1 0x4
+#define INT2 0x5
+#define STATUS1 0x6
+#define STATUS2 0x7
+#define CHGCTRL2 0x9
+
+/* CTRL1 register */
+#define CP_EN (1 << 0)
+#define ADC_EN (1 << 1)
+#define ID200_EN (1 << 4)
+
+/* CTRL2 register */
+#define CHGDET_EN (1 << 1)
+#define INT_EN (1 << 6)
+
+/* SWCTRL register */
+#define SW_DM1_DM (0x0 << 0)
+#define SW_DM1_U1 (0x1 << 0)
+#define SW_DM1_HiZ (0x7 << 0)
+#define SW_DP2_DP (0x0 << 3)
+#define SW_DP2_U2 (0x1 << 3)
+#define SW_DP2_HiZ (0x7 << 3)
+
+/* INT1 register */
+#define IDNO (0xF << 0)
+#define VBUS (1 << 4)
+
+/* STATUS1 register */
+#define CHGSTAT (3 << 4)
+#define CHPORT (1 << 6)
+#define DCPORT (1 << 7)
+
+/* STATUS2 register */
+#define TEMP_STAT (3 << 5)
+
+enum lp8727_dev_id {
+ ID_NONE,
+ ID_TA,
+ ID_DEDICATED_CHG,
+ ID_USB_CHG,
+ ID_USB_DS,
+ ID_MAX,
+};
+
+enum lp8727_chg_stat {
+ PRECHG,
+ CC,
+ CV,
+ EOC,
+};
+
+struct lp8727_psy {
+ struct power_supply ac;
+ struct power_supply usb;
+ struct power_supply batt;
+};
+
+struct lp8727_chg {
+ struct device *dev;
+ struct i2c_client *client;
+ struct mutex xfer_lock;
+ struct delayed_work work;
+ struct workqueue_struct *irqthread;
+ struct lp8727_platform_data *pdata;
+ struct lp8727_psy *psy;
+ struct lp8727_chg_param *chg_parm;
+ enum lp8727_dev_id devid;
+};
+
+static int lp8727_i2c_read(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+{
+ s32 ret;
+
+ mutex_lock(&pchg->xfer_lock);
+ ret = i2c_smbus_read_i2c_block_data(pchg->client, reg, len, data);
+ mutex_unlock(&pchg->xfer_lock);
+
+ return (ret != len) ? -EIO : 0;
+}
+
+static int lp8727_i2c_write(struct lp8727_chg *pchg, u8 reg, u8 *data, u8 len)
+{
+ s32 ret;
+
+ mutex_lock(&pchg->xfer_lock);
+ ret = i2c_smbus_write_i2c_block_data(pchg->client, reg, len, data);
+ mutex_unlock(&pchg->xfer_lock);
+
+ return ret;
+}
+
+static inline int lp8727_i2c_read_byte(struct lp8727_chg *pchg, u8 reg,
+ u8 *data)
+{
+ return lp8727_i2c_read(pchg, reg, data, 1);
+}
+
+static inline int lp8727_i2c_write_byte(struct lp8727_chg *pchg, u8 reg,
+ u8 *data)
+{
+ return lp8727_i2c_write(pchg, reg, data, 1);
+}
+
+static int lp8727_is_charger_attached(const char *name, int id)
+{
+ if (name) {
+ if (!strcmp(name, "ac"))
+ return (id == ID_TA || id == ID_DEDICATED_CHG) ? 1 : 0;
+ else if (!strcmp(name, "usb"))
+ return (id == ID_USB_CHG) ? 1 : 0;
+ }
+
+ return (id >= ID_TA && id <= ID_USB_CHG) ? 1 : 0;
+}
+
+static void lp8727_init_device(struct lp8727_chg *pchg)
+{
+ u8 val;
+
+ val = ID200_EN | ADC_EN | CP_EN;
+ if (lp8727_i2c_write_byte(pchg, CTRL1, &val))
+ dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL1);
+
+ val = INT_EN | CHGDET_EN;
+ if (lp8727_i2c_write_byte(pchg, CTRL2, &val))
+ dev_err(pchg->dev, "i2c write err : addr=0x%.2x\n", CTRL2);
+}
+
+static int lp8727_is_dedicated_charger(struct lp8727_chg *pchg)
+{
+ u8 val;
+ lp8727_i2c_read_byte(pchg, STATUS1, &val);
+ return (val & DCPORT);
+}
+
+static int lp8727_is_usb_charger(struct lp8727_chg *pchg)
+{
+ u8 val;
+ lp8727_i2c_read_byte(pchg, STATUS1, &val);
+ return (val & CHPORT);
+}
+
+static void lp8727_ctrl_switch(struct lp8727_chg *pchg, u8 sw)
+{
+ u8 val = sw;
+ lp8727_i2c_write_byte(pchg, SWCTRL, &val);
+}
+
+static void lp8727_id_detection(struct lp8727_chg *pchg, u8 id, int vbusin)
+{
+ u8 devid = ID_NONE;
+ u8 swctrl = SW_DM1_HiZ | SW_DP2_HiZ;
+
+ switch (id) {
+ case 0x5:
+ devid = ID_TA;
+ pchg->chg_parm = &pchg->pdata->ac;
+ break;
+ case 0xB:
+ if (lp8727_is_dedicated_charger(pchg)) {
+ pchg->chg_parm = &pchg->pdata->ac;
+ devid = ID_DEDICATED_CHG;
+ } else if (lp8727_is_usb_charger(pchg)) {
+ pchg->chg_parm = &pchg->pdata->usb;
+ devid = ID_USB_CHG;
+ swctrl = SW_DM1_DM | SW_DP2_DP;
+ } else if (vbusin) {
+ devid = ID_USB_DS;
+ swctrl = SW_DM1_DM | SW_DP2_DP;
+ }
+ break;
+ default:
+ devid = ID_NONE;
+ pchg->chg_parm = NULL;
+ break;
+ }
+
+ pchg->devid = devid;
+ lp8727_ctrl_switch(pchg, swctrl);
+}
+
+static void lp8727_enable_chgdet(struct lp8727_chg *pchg)
+{
+ u8 val;
+
+ lp8727_i2c_read_byte(pchg, CTRL2, &val);
+ val |= CHGDET_EN;
+ lp8727_i2c_write_byte(pchg, CTRL2, &val);
+}
+
+static void lp8727_delayed_func(struct work_struct *_work)
+{
+ u8 intstat[2], idno, vbus;
+ struct lp8727_chg *pchg =
+ container_of(_work, struct lp8727_chg, work.work);
+
+ if (lp8727_i2c_read(pchg, INT1, intstat, 2)) {
+ dev_err(pchg->dev, "can not read INT registers\n");
+ return;
+ }
+
+ idno = intstat[0] & IDNO;
+ vbus = intstat[0] & VBUS;
+
+ lp8727_id_detection(pchg, idno, vbus);
+ lp8727_enable_chgdet(pchg);
+
+ power_supply_changed(&pchg->psy->ac);
+ power_supply_changed(&pchg->psy->usb);
+ power_supply_changed(&pchg->psy->batt);
+}
+
+static irqreturn_t lp8727_isr_func(int irq, void *ptr)
+{
+ struct lp8727_chg *pchg = ptr;
+ unsigned long delay = msecs_to_jiffies(DEBOUNCE_MSEC);
+
+ queue_delayed_work(pchg->irqthread, &pchg->work, delay);
+
+ return IRQ_HANDLED;
+}
+
+static void lp8727_intr_config(struct lp8727_chg *pchg)
+{
+ INIT_DELAYED_WORK(&pchg->work, lp8727_delayed_func);
+
+ pchg->irqthread = create_singlethread_workqueue("lp8727-irqthd");
+ if (!pchg->irqthread)
+ dev_err(pchg->dev, "can not create thread for lp8727\n");
+
+ if (request_threaded_irq(pchg->client->irq,
+ NULL,
+ lp8727_isr_func,
+ IRQF_TRIGGER_FALLING, "lp8727_irq", pchg)) {
+ dev_err(pchg->dev, "lp8727 irq can not be registered\n");
+ }
+}
+
+static enum power_supply_property lp8727_charger_prop[] = {
+ POWER_SUPPLY_PROP_ONLINE,
+};
+
+static enum power_supply_property lp8727_battery_prop[] = {
+ POWER_SUPPLY_PROP_STATUS,
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_PRESENT,
+ POWER_SUPPLY_PROP_VOLTAGE_NOW,
+ POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_TEMP,
+};
+
+static char *battery_supplied_to[] = {
+ "main_batt",
+};
+
+static int lp8727_charger_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct lp8727_chg *pchg = dev_get_drvdata(psy->dev->parent);
+
+ if (psp == POWER_SUPPLY_PROP_ONLINE)
+ val->intval = lp8727_is_charger_attached(psy->name,
+ pchg->devid);
+
+ return 0;
+}
+
+static int lp8727_battery_get_property(struct power_supply *psy,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct lp8727_chg *pchg = dev_get_drvdata(psy->dev->parent);
+ u8 read;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ if (lp8727_is_charger_attached(psy->name, pchg->devid)) {
+ lp8727_i2c_read_byte(pchg, STATUS1, &read);
+ if (((read & CHGSTAT) >> 4) == EOC)
+ val->intval = POWER_SUPPLY_STATUS_FULL;
+ else
+ val->intval = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+ break;
+ case POWER_SUPPLY_PROP_HEALTH:
+ lp8727_i2c_read_byte(pchg, STATUS2, &read);
+ read = (read & TEMP_STAT) >> 5;
+ if (read >= 0x1 && read <= 0x3)
+ val->intval = POWER_SUPPLY_HEALTH_OVERHEAT;
+ else
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ if (pchg->pdata->get_batt_present)
+ val->intval = pchg->pdata->get_batt_present();
+ break;
+ case POWER_SUPPLY_PROP_VOLTAGE_NOW:
+ if (pchg->pdata->get_batt_level)
+ val->intval = pchg->pdata->get_batt_level();
+ break;
+ case POWER_SUPPLY_PROP_CAPACITY:
+ if (pchg->pdata->get_batt_capacity)
+ val->intval = pchg->pdata->get_batt_capacity();
+ break;
+ case POWER_SUPPLY_PROP_TEMP:
+ if (pchg->pdata->get_batt_temp)
+ val->intval = pchg->pdata->get_batt_temp();
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static void lp8727_charger_changed(struct power_supply *psy)
+{
+ struct lp8727_chg *pchg = dev_get_drvdata(psy->dev->parent);
+ u8 val;
+ u8 eoc_level, ichg;
+
+ if (lp8727_is_charger_attached(psy->name, pchg->devid)) {
+ if (pchg->chg_parm) {
+ eoc_level = pchg->chg_parm->eoc_level;
+ ichg = pchg->chg_parm->ichg;
+ val = (ichg << 4) | eoc_level;
+ lp8727_i2c_write_byte(pchg, CHGCTRL2, &val);
+ }
+ }
+}
+
+static int lp8727_register_psy(struct lp8727_chg *pchg)
+{
+ struct lp8727_psy *psy;
+
+ psy = kzalloc(sizeof(*psy), GFP_KERNEL);
+ if (!psy)
+ goto err_mem;
+
+ pchg->psy = psy;
+
+ psy->ac.name = "ac";
+ psy->ac.type = POWER_SUPPLY_TYPE_MAINS;
+ psy->ac.properties = lp8727_charger_prop;
+ psy->ac.num_properties = ARRAY_SIZE(lp8727_charger_prop);
+ psy->ac.get_property = lp8727_charger_get_property;
+ psy->ac.supplied_to = battery_supplied_to;
+ psy->ac.num_supplicants = ARRAY_SIZE(battery_supplied_to);
+
+ if (power_supply_register(pchg->dev, &psy->ac))
+ goto err_psy;
+
+ psy->usb.name = "usb";
+ psy->usb.type = POWER_SUPPLY_TYPE_USB;
+ psy->usb.properties = lp8727_charger_prop;
+ psy->usb.num_properties = ARRAY_SIZE(lp8727_charger_prop);
+ psy->usb.get_property = lp8727_charger_get_property;
+ psy->usb.supplied_to = battery_supplied_to;
+ psy->usb.num_supplicants = ARRAY_SIZE(battery_supplied_to);
+
+ if (power_supply_register(pchg->dev, &psy->usb))
+ goto err_psy;
+
+ psy->batt.name = "main_batt";
+ psy->batt.type = POWER_SUPPLY_TYPE_BATTERY;
+ psy->batt.properties = lp8727_battery_prop;
+ psy->batt.num_properties = ARRAY_SIZE(lp8727_battery_prop);
+ psy->batt.get_property = lp8727_battery_get_property;
+ psy->batt.external_power_changed = lp8727_charger_changed;
+
+ if (power_supply_register(pchg->dev, &psy->batt))
+ goto err_psy;
+
+ return 0;
+
+err_mem:
+ return -ENOMEM;
+err_psy:
+ kfree(psy);
+ return -EPERM;
+}
+
+static void lp8727_unregister_psy(struct lp8727_chg *pchg)
+{
+ struct lp8727_psy *psy = pchg->psy;
+
+ if (!psy)
+ return;
+
+ power_supply_unregister(&psy->ac);
+ power_supply_unregister(&psy->usb);
+ power_supply_unregister(&psy->batt);
+ kfree(psy);
+}
+
+static int lp8727_probe(struct i2c_client *cl, const struct i2c_device_id *id)
+{
+ struct lp8727_chg *pchg;
+ int ret;
+
+ if (!i2c_check_functionality(cl->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))
+ return -EIO;
+
+ pchg = kzalloc(sizeof(*pchg), GFP_KERNEL);
+ if (!pchg)
+ return -ENOMEM;
+
+ pchg->client = cl;
+ pchg->dev = &cl->dev;
+ pchg->pdata = cl->dev.platform_data;
+ i2c_set_clientdata(cl, pchg);
+
+ mutex_init(&pchg->xfer_lock);
+
+ lp8727_init_device(pchg);
+ lp8727_intr_config(pchg);
+
+ ret = lp8727_register_psy(pchg);
+ if (ret)
+ dev_err(pchg->dev,
+ "can not register power supplies. err=%d", ret);
+
+ return 0;
+}
+
+static int __devexit lp8727_remove(struct i2c_client *cl)
+{
+ struct lp8727_chg *pchg = i2c_get_clientdata(cl);
+
+ lp8727_unregister_psy(pchg);
+ free_irq(pchg->client->irq, pchg);
+ flush_workqueue(pchg->irqthread);
+ destroy_workqueue(pchg->irqthread);
+ kfree(pchg);
+ return 0;
+}
+
+static const struct i2c_device_id lp8727_ids[] = {
+ {"lp8727", 0},
+ { }
+};
+
+static struct i2c_driver lp8727_driver = {
+ .driver = {
+ .name = "lp8727",
+ },
+ .probe = lp8727_probe,
+ .remove = __devexit_p(lp8727_remove),
+ .id_table = lp8727_ids,
+};
+
+static int __init lp8727_init(void)
+{
+ return i2c_add_driver(&lp8727_driver);
+}
+
+static void __exit lp8727_exit(void)
+{
+ i2c_del_driver(&lp8727_driver);
+}
+
+module_init(lp8727_init);
+module_exit(lp8727_exit);
+
+MODULE_DESCRIPTION("National Semiconductor LP8727 charger driver");
+MODULE_AUTHOR
+ ("Woogyom Kim <milo.kim@ti.com>, Daniel Jeong <daniel.jeong@ti.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 9f0183c..86acee2 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -85,55 +85,79 @@ static int max17042_get_property(struct power_supply *psy,
{
struct max17042_chip *chip = container_of(psy,
struct max17042_chip, battery);
+ int ret;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_STATUS);
- if (val->intval & MAX17042_STATUS_BattAbsent)
+ ret = max17042_read_reg(chip->client, MAX17042_STATUS);
+ if (ret < 0)
+ return ret;
+
+ if (ret & MAX17042_STATUS_BattAbsent)
val->intval = 0;
else
val->intval = 1;
break;
case POWER_SUPPLY_PROP_CYCLE_COUNT:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_Cycles);
+ ret = max17042_read_reg(chip->client, MAX17042_Cycles);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
break;
case POWER_SUPPLY_PROP_VOLTAGE_MAX:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_MinMaxVolt);
- val->intval >>= 8;
+ ret = max17042_read_reg(chip->client, MAX17042_MinMaxVolt);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret >> 8;
val->intval *= 20000; /* Units of LSB = 20mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_V_empty);
- val->intval >>= 7;
+ ret = max17042_read_reg(chip->client, MAX17042_V_empty);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret >> 7;
val->intval *= 10000; /* Units of LSB = 10mV */
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_VCELL) * 83; /* 1000 / 12 = 83 */
+ ret = max17042_read_reg(chip->client, MAX17042_VCELL);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_AvgVCELL) * 83;
+ ret = max17042_read_reg(chip->client, MAX17042_AvgVCELL);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret * 625 / 8;
break;
case POWER_SUPPLY_PROP_CAPACITY:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_SOC) / 256;
+ ret = max17042_read_reg(chip->client, MAX17042_SOC);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret >> 8;
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_RepSOC);
- if ((val->intval / 256) >= MAX17042_BATTERY_FULL)
+ ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
+ if (ret < 0)
+ return ret;
+
+ if ((ret >> 8) >= MAX17042_BATTERY_FULL)
val->intval = 1;
- else if (val->intval >= 0)
+ else if (ret >= 0)
val->intval = 0;
break;
case POWER_SUPPLY_PROP_TEMP:
- val->intval = max17042_read_reg(chip->client,
- MAX17042_TEMP);
+ ret = max17042_read_reg(chip->client, MAX17042_TEMP);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
/* The value is signed. */
if (val->intval & 0x8000) {
val->intval = (0x7fff & ~val->intval) + 1;
@@ -145,24 +169,30 @@ static int max17042_get_property(struct power_supply *psy,
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (chip->pdata->enable_current_sense) {
- val->intval = max17042_read_reg(chip->client,
- MAX17042_Current);
+ ret = max17042_read_reg(chip->client, MAX17042_Current);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
val->intval++;
val->intval *= -1;
}
- val->intval >>= 4;
- val->intval *= 1000000 * 25 / chip->pdata->r_sns;
+ val->intval *= 1562500 / chip->pdata->r_sns;
} else {
return -EINVAL;
}
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
if (chip->pdata->enable_current_sense) {
- val->intval = max17042_read_reg(chip->client,
- MAX17042_AvgCurrent);
+ ret = max17042_read_reg(chip->client,
+ MAX17042_AvgCurrent);
+ if (ret < 0)
+ return ret;
+
+ val->intval = ret;
if (val->intval & 0x8000) {
/* Negative */
val->intval = ~val->intval & 0x7fff;
@@ -210,6 +240,9 @@ static int __devinit max17042_probe(struct i2c_client *client,
if (!chip->pdata->enable_current_sense)
chip->battery.num_properties -= 2;
+ if (chip->pdata->r_sns == 0)
+ chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
+
ret = power_supply_register(&client->dev, &chip->battery);
if (ret) {
dev_err(&client->dev, "failed: power supply register\n");
@@ -226,9 +259,6 @@ static int __devinit max17042_probe(struct i2c_client *client,
max17042_write_reg(client, MAX17042_CGAIN, 0x0000);
max17042_write_reg(client, MAX17042_MiscCFG, 0x0003);
max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
- } else {
- if (chip->pdata->r_sns == 0)
- chip->pdata->r_sns = MAX17042_DEFAULT_SNS_RESISTOR;
}
return 0;
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c
index 2595145..3e23f43 100644
--- a/drivers/power/max8903_charger.c
+++ b/drivers/power/max8903_charger.c
@@ -374,19 +374,9 @@ static struct platform_driver max8903_driver = {
},
};
-static int __init max8903_init(void)
-{
- return platform_driver_register(&max8903_driver);
-}
-module_init(max8903_init);
-
-static void __exit max8903_exit(void)
-{
- platform_driver_unregister(&max8903_driver);
-}
-module_exit(max8903_exit);
+module_platform_driver(max8903_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("MAX8903 Charger Driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
-MODULE_ALIAS("max8903-charger");
+MODULE_ALIAS("platform:max8903-charger");
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c
index a70e16d..daa333b 100644
--- a/drivers/power/max8925_power.c
+++ b/drivers/power/max8925_power.c
@@ -78,6 +78,8 @@ struct max8925_power_info {
unsigned batt_detect:1; /* detecing MB by ID pin */
unsigned topoff_threshold:2;
unsigned fast_charge:3;
+ unsigned no_temp_support:1;
+ unsigned no_insert_detect:1;
int (*set_charger) (int);
};
@@ -116,17 +118,7 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
case MAX8925_IRQ_VCHG_DC_F:
info->ac_online = 0;
__set_charger(info, 0);
- dev_dbg(chip->dev, "Adapter is removal\n");
- break;
- case MAX8925_IRQ_VCHG_USB_R:
- info->usb_online = 1;
- __set_charger(info, 1);
- dev_dbg(chip->dev, "USB inserted\n");
- break;
- case MAX8925_IRQ_VCHG_USB_F:
- info->usb_online = 0;
- __set_charger(info, 0);
- dev_dbg(chip->dev, "USB is removal\n");
+ dev_dbg(chip->dev, "Adapter removed\n");
break;
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
@@ -168,27 +160,33 @@ static irqreturn_t max8925_charger_handler(int irq, void *data)
static int start_measure(struct max8925_power_info *info, int type)
{
unsigned char buf[2] = {0, 0};
+ int meas_cmd;
int meas_reg = 0, ret;
switch (type) {
case MEASURE_VCHG:
+ meas_cmd = MAX8925_CMD_VCHG;
meas_reg = MAX8925_ADC_VCHG;
break;
case MEASURE_VBBATT:
+ meas_cmd = MAX8925_CMD_VBBATT;
meas_reg = MAX8925_ADC_VBBATT;
break;
case MEASURE_VMBATT:
+ meas_cmd = MAX8925_CMD_VMBATT;
meas_reg = MAX8925_ADC_VMBATT;
break;
case MEASURE_ISNS:
+ meas_cmd = MAX8925_CMD_ISNS;
meas_reg = MAX8925_ADC_ISNS;
break;
default:
return -EINVAL;
}
+ max8925_reg_write(info->adc, meas_cmd, 0);
max8925_bulk_read(info->adc, meas_reg, 2, buf);
- ret = (buf[0] << 4) | (buf[1] >> 4);
+ ret = ((buf[0]<<8) | buf[1]) >> 4;
return ret;
}
@@ -208,7 +206,7 @@ static int max8925_ac_get_prop(struct power_supply *psy,
if (info->ac_online) {
ret = start_measure(info, MEASURE_VCHG);
if (ret >= 0) {
- val->intval = ret << 1; /* unit is mV */
+ val->intval = ret * 2000; /* unit is uV */
goto out;
}
}
@@ -242,7 +240,7 @@ static int max8925_usb_get_prop(struct power_supply *psy,
if (info->usb_online) {
ret = start_measure(info, MEASURE_VCHG);
if (ret >= 0) {
- val->intval = ret << 1; /* unit is mV */
+ val->intval = ret * 2000; /* unit is uV */
goto out;
}
}
@@ -266,7 +264,6 @@ static int max8925_bat_get_prop(struct power_supply *psy,
union power_supply_propval *val)
{
struct max8925_power_info *info = dev_get_drvdata(psy->dev->parent);
- long long int tmp = 0;
int ret = 0;
switch (psp) {
@@ -277,7 +274,7 @@ static int max8925_bat_get_prop(struct power_supply *psy,
if (info->bat_online) {
ret = start_measure(info, MEASURE_VMBATT);
if (ret >= 0) {
- val->intval = ret << 1; /* unit is mV */
+ val->intval = ret * 2000; /* unit is uV */
ret = 0;
break;
}
@@ -288,8 +285,8 @@ static int max8925_bat_get_prop(struct power_supply *psy,
if (info->bat_online) {
ret = start_measure(info, MEASURE_ISNS);
if (ret >= 0) {
- tmp = (long long int)ret * 6250 / 4096 - 3125;
- ret = (int)tmp;
+ /* assume r_sns is 0.02 */
+ ret = ((ret * 6250) - 3125) /* uA */;
val->intval = 0;
if (ret > 0)
val->intval = ret; /* unit is mA */
@@ -365,13 +362,14 @@ static __devinit int max8925_init_charger(struct max8925_chip *chip,
int ret;
REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_OVP, "ac-ovp");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_F, "ac-remove");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_R, "ac-insert");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_USB_OVP, "usb-ovp");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_USB_F, "usb-remove");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_USB_R, "usb-insert");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_R, "batt-temp-in-range");
- REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_F, "batt-temp-out-range");
+ if (!info->no_insert_detect) {
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_F, "ac-remove");
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_R, "ac-insert");
+ }
+ if (!info->no_temp_support) {
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_R, "batt-temp-in-range");
+ REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_F, "batt-temp-out-range");
+ }
REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_F, "vsys-high");
REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_R, "vsys-low");
REQUEST_IRQ(MAX8925_IRQ_VCHG_RST, "charger-reset");
@@ -379,9 +377,15 @@ static __devinit int max8925_init_charger(struct max8925_chip *chip,
REQUEST_IRQ(MAX8925_IRQ_VCHG_TOPOFF, "charger-topoff");
REQUEST_IRQ(MAX8925_IRQ_VCHG_TMR_FAULT, "charger-timer-expire");
- info->ac_online = 0;
info->usb_online = 0;
info->bat_online = 0;
+
+ /* check for power - can miss interrupt at boot time */
+ if (start_measure(info, MEASURE_VCHG) * 2000 > 500000)
+ info->ac_online = 1;
+ else
+ info->ac_online = 0;
+
ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS);
if (ret >= 0) {
/*
@@ -449,6 +453,8 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
info->ac.properties = max8925_ac_props;
info->ac.num_properties = ARRAY_SIZE(max8925_ac_props);
info->ac.get_property = max8925_ac_get_prop;
+ info->ac.supplied_to = pdata->supplied_to;
+ info->ac.num_supplicants = pdata->num_supplicants;
ret = power_supply_register(&pdev->dev, &info->ac);
if (ret)
goto out;
@@ -459,6 +465,9 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
info->usb.properties = max8925_usb_props;
info->usb.num_properties = ARRAY_SIZE(max8925_usb_props);
info->usb.get_property = max8925_usb_get_prop;
+ info->usb.supplied_to = pdata->supplied_to;
+ info->usb.num_supplicants = pdata->num_supplicants;
+
ret = power_supply_register(&pdev->dev, &info->usb);
if (ret)
goto out_usb;
@@ -478,6 +487,8 @@ static __devinit int max8925_power_probe(struct platform_device *pdev)
info->topoff_threshold = pdata->topoff_threshold;
info->fast_charge = pdata->fast_charge;
info->set_charger = pdata->set_charger;
+ info->no_temp_support = pdata->no_temp_support;
+ info->no_insert_detect = pdata->no_insert_detect;
max8925_init_charger(chip, info);
return 0;
@@ -512,17 +523,7 @@ static struct platform_driver max8925_power_driver = {
},
};
-static int __init max8925_power_init(void)
-{
- return platform_driver_register(&max8925_power_driver);
-}
-module_init(max8925_power_init);
-
-static void __exit max8925_power_exit(void)
-{
- platform_driver_unregister(&max8925_power_driver);
-}
-module_exit(max8925_power_exit);
+module_platform_driver(max8925_power_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Power supply driver for MAX8925");
diff --git a/drivers/power/max8997_charger.c b/drivers/power/max8997_charger.c
index a23317d..6e88c5d 100644
--- a/drivers/power/max8997_charger.c
+++ b/drivers/power/max8997_charger.c
@@ -19,7 +19,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/module.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -98,7 +97,7 @@ static __devinit int max8997_battery_probe(struct platform_device *pdev)
return -EINVAL;
if (pdata->eoc_mA) {
- u8 val = (pdata->eoc_mA - 50) / 10;
+ int val = (pdata->eoc_mA - 50) / 10;
if (val < 0)
val = 0;
if (val > 0xf)
@@ -179,6 +178,7 @@ static int __devexit max8997_battery_remove(struct platform_device *pdev)
static const struct platform_device_id max8997_battery_id[] = {
{ "max8997-battery", 0 },
+ { }
};
static struct platform_driver max8997_battery_driver = {
diff --git a/drivers/power/max8998_charger.c b/drivers/power/max8998_charger.c
index 93e3bb4..9b3f2bf 100644
--- a/drivers/power/max8998_charger.c
+++ b/drivers/power/max8998_charger.c
@@ -154,6 +154,7 @@ static __devinit int max8998_battery_probe(struct platform_device *pdev)
case 0:
dev_dbg(max8998->dev,
"Full Timeout not set: leave it unchanged.\n");
+ break;
default:
dev_err(max8998->dev, "Invalid Full Timeout value\n");
ret = -EINVAL;
@@ -190,6 +191,7 @@ static int __devexit max8998_battery_remove(struct platform_device *pdev)
static const struct platform_device_id max8998_battery_id[] = {
{ "max8998-battery", TYPE_MAX8998 },
+ { }
};
static struct platform_driver max8998_battery_driver = {
@@ -202,17 +204,7 @@ static struct platform_driver max8998_battery_driver = {
.id_table = max8998_battery_id,
};
-static int __init max8998_battery_init(void)
-{
- return platform_driver_register(&max8998_battery_driver);
-}
-module_init(max8998_battery_init);
-
-static void __exit max8998_battery_cleanup(void)
-{
- platform_driver_unregister(&max8998_battery_driver);
-}
-module_exit(max8998_battery_cleanup);
+module_platform_driver(max8998_battery_driver);
MODULE_DESCRIPTION("MAXIM 8998 battery control driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c
index 0b0ff3a..7385092 100644
--- a/drivers/power/olpc_battery.c
+++ b/drivers/power/olpc_battery.c
@@ -519,29 +519,35 @@ static struct device_attribute olpc_bat_error = {
* Initialisation
*********************************************************************/
-static struct platform_device *bat_pdev;
-
static struct power_supply olpc_bat = {
+ .name = "olpc-battery",
.get_property = olpc_bat_get_property,
.use_for_apm = 1,
};
-void olpc_battery_trigger_uevent(unsigned long cause)
+static int olpc_battery_suspend(struct platform_device *pdev,
+ pm_message_t state)
{
- if (cause & EC_SCI_SRC_ACPWR)
- kobject_uevent(&olpc_ac.dev->kobj, KOBJ_CHANGE);
- if (cause & (EC_SCI_SRC_BATERR|EC_SCI_SRC_BATSOC|EC_SCI_SRC_BATTERY))
- kobject_uevent(&olpc_bat.dev->kobj, KOBJ_CHANGE);
+ if (device_may_wakeup(olpc_ac.dev))
+ olpc_ec_wakeup_set(EC_SCI_SRC_ACPWR);
+ else
+ olpc_ec_wakeup_clear(EC_SCI_SRC_ACPWR);
+
+ if (device_may_wakeup(olpc_bat.dev))
+ olpc_ec_wakeup_set(EC_SCI_SRC_BATTERY | EC_SCI_SRC_BATSOC
+ | EC_SCI_SRC_BATERR);
+ else
+ olpc_ec_wakeup_clear(EC_SCI_SRC_BATTERY | EC_SCI_SRC_BATSOC
+ | EC_SCI_SRC_BATERR);
+
+ return 0;
}
-static int __init olpc_bat_init(void)
+static int __devinit olpc_battery_probe(struct platform_device *pdev)
{
- int ret = 0;
+ int ret;
uint8_t status;
- if (!olpc_platform_info.ecver)
- return -ENXIO;
-
/*
* We've seen a number of EC protocol changes; this driver requires
* the latest EC protocol, supported by 0x44 and above.
@@ -558,15 +564,10 @@ static int __init olpc_bat_init(void)
/* Ignore the status. It doesn't actually matter */
- bat_pdev = platform_device_register_simple("olpc-battery", 0, NULL, 0);
- if (IS_ERR(bat_pdev))
- return PTR_ERR(bat_pdev);
-
- ret = power_supply_register(&bat_pdev->dev, &olpc_ac);
+ ret = power_supply_register(&pdev->dev, &olpc_ac);
if (ret)
- goto ac_failed;
+ return ret;
- olpc_bat.name = bat_pdev->name;
if (olpc_board_at_least(olpc_board_pre(0xd0))) { /* XO-1.5 */
olpc_bat.properties = olpc_xo15_bat_props;
olpc_bat.num_properties = ARRAY_SIZE(olpc_xo15_bat_props);
@@ -575,7 +576,7 @@ static int __init olpc_bat_init(void)
olpc_bat.num_properties = ARRAY_SIZE(olpc_xo1_bat_props);
}
- ret = power_supply_register(&bat_pdev->dev, &olpc_bat);
+ ret = power_supply_register(&pdev->dev, &olpc_bat);
if (ret)
goto battery_failed;
@@ -587,7 +588,12 @@ static int __init olpc_bat_init(void)
if (ret)
goto error_failed;
- goto success;
+ if (olpc_ec_wakeup_available()) {
+ device_set_wakeup_capable(olpc_ac.dev, true);
+ device_set_wakeup_capable(olpc_bat.dev, true);
+ }
+
+ return 0;
error_failed:
device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
@@ -595,23 +601,36 @@ eeprom_failed:
power_supply_unregister(&olpc_bat);
battery_failed:
power_supply_unregister(&olpc_ac);
-ac_failed:
- platform_device_unregister(bat_pdev);
-success:
return ret;
}
-static void __exit olpc_bat_exit(void)
+static int __devexit olpc_battery_remove(struct platform_device *pdev)
{
device_remove_file(olpc_bat.dev, &olpc_bat_error);
device_remove_bin_file(olpc_bat.dev, &olpc_bat_eeprom);
power_supply_unregister(&olpc_bat);
power_supply_unregister(&olpc_ac);
- platform_device_unregister(bat_pdev);
+ return 0;
}
-module_init(olpc_bat_init);
-module_exit(olpc_bat_exit);
+static const struct of_device_id olpc_battery_ids[] __devinitconst = {
+ { .compatible = "olpc,xo1-battery" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, olpc_battery_ids);
+
+static struct platform_driver olpc_battery_driver = {
+ .driver = {
+ .name = "olpc-battery",
+ .owner = THIS_MODULE,
+ .of_match_table = olpc_battery_ids,
+ },
+ .probe = olpc_battery_probe,
+ .remove = __devexit_p(olpc_battery_remove),
+ .suspend = olpc_battery_suspend,
+};
+
+module_platform_driver(olpc_battery_driver);
MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/pcf50633-charger.c b/drivers/power/pcf50633-charger.c
index 4fa52e1..3d1e9ef 100644
--- a/drivers/power/pcf50633-charger.c
+++ b/drivers/power/pcf50633-charger.c
@@ -474,17 +474,7 @@ static struct platform_driver pcf50633_mbc_driver = {
.remove = __devexit_p(pcf50633_mbc_remove),
};
-static int __init pcf50633_mbc_init(void)
-{
- return platform_driver_register(&pcf50633_mbc_driver);
-}
-module_init(pcf50633_mbc_init);
-
-static void __exit pcf50633_mbc_exit(void)
-{
- platform_driver_unregister(&pcf50633_mbc_driver);
-}
-module_exit(pcf50633_mbc_exit);
+module_platform_driver(pcf50633_mbc_driver);
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
MODULE_DESCRIPTION("PCF50633 mbc driver");
diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
index 69f8aa3..fd49689 100644
--- a/drivers/power/pda_power.c
+++ b/drivers/power/pda_power.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/interrupt.h>
+#include <linux/notifier.h>
#include <linux/power_supply.h>
#include <linux/pda_power.h>
#include <linux/regulator/consumer.h>
@@ -40,7 +41,9 @@ static int polling;
#ifdef CONFIG_USB_OTG_UTILS
static struct otg_transceiver *transceiver;
+static struct notifier_block otg_nb;
#endif
+
static struct regulator *ac_draw;
enum {
@@ -222,7 +225,42 @@ static void polling_timer_func(unsigned long unused)
#ifdef CONFIG_USB_OTG_UTILS
static int otg_is_usb_online(void)
{
- return (transceiver->state == OTG_STATE_B_PERIPHERAL);
+ return (transceiver->last_event == USB_EVENT_VBUS ||
+ transceiver->last_event == USB_EVENT_ENUMERATED);
+}
+
+static int otg_is_ac_online(void)
+{
+ return (transceiver->last_event == USB_EVENT_CHARGER);
+}
+
+static int otg_handle_notification(struct notifier_block *nb,
+ unsigned long event, void *unused)
+{
+ switch (event) {
+ case USB_EVENT_CHARGER:
+ ac_status = PDA_PSY_TO_CHANGE;
+ break;
+ case USB_EVENT_VBUS:
+ case USB_EVENT_ENUMERATED:
+ usb_status = PDA_PSY_TO_CHANGE;
+ break;
+ case USB_EVENT_NONE:
+ ac_status = PDA_PSY_TO_CHANGE;
+ usb_status = PDA_PSY_TO_CHANGE;
+ break;
+ default:
+ return NOTIFY_OK;
+ }
+
+ /*
+ * Wait a bit before reading ac/usb line status and setting charger,
+ * because ac/usb status readings may lag from irq.
+ */
+ mod_timer(&charger_timer,
+ jiffies + msecs_to_jiffies(pdata->wait_for_status));
+
+ return NOTIFY_OK;
}
#endif
@@ -282,6 +320,16 @@ static int pda_power_probe(struct platform_device *pdev)
ret = PTR_ERR(ac_draw);
}
+#ifdef CONFIG_USB_OTG_UTILS
+ transceiver = otg_get_transceiver();
+ if (transceiver && !pdata->is_usb_online) {
+ pdata->is_usb_online = otg_is_usb_online;
+ }
+ if (transceiver && !pdata->is_ac_online) {
+ pdata->is_ac_online = otg_is_ac_online;
+ }
+#endif
+
if (pdata->is_ac_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_ac);
if (ret) {
@@ -303,13 +351,6 @@ static int pda_power_probe(struct platform_device *pdev)
}
}
-#ifdef CONFIG_USB_OTG_UTILS
- transceiver = otg_get_transceiver();
- if (transceiver && !pdata->is_usb_online) {
- pdata->is_usb_online = otg_is_usb_online;
- }
-#endif
-
if (pdata->is_usb_online) {
ret = power_supply_register(&pdev->dev, &pda_psy_usb);
if (ret) {
@@ -331,6 +372,18 @@ static int pda_power_probe(struct platform_device *pdev)
}
}
+#ifdef CONFIG_USB_OTG_UTILS
+ if (transceiver && pdata->use_otg_notifier) {
+ otg_nb.notifier_call = otg_handle_notification;
+ ret = otg_register_notifier(transceiver, &otg_nb);
+ if (ret) {
+ dev_err(dev, "failure to register otg notifier\n");
+ goto otg_reg_notifier_failed;
+ }
+ polling = 0;
+ }
+#endif
+
if (polling) {
dev_dbg(dev, "will poll for status\n");
setup_timer(&polling_timer, polling_timer_func, 0);
@@ -343,6 +396,11 @@ static int pda_power_probe(struct platform_device *pdev)
return 0;
+#ifdef CONFIG_USB_OTG_UTILS
+otg_reg_notifier_failed:
+ if (pdata->is_usb_online && usb_irq)
+ free_irq(usb_irq->start, &pda_psy_usb);
+#endif
usb_irq_failed:
if (pdata->is_usb_online)
power_supply_unregister(&pda_psy_usb);
@@ -440,8 +498,6 @@ static int pda_power_resume(struct platform_device *pdev)
#define pda_power_resume NULL
#endif /* CONFIG_PM */
-MODULE_ALIAS("platform:pda-power");
-
static struct platform_driver pda_power_pdrv = {
.driver = {
.name = "pda-power",
@@ -452,17 +508,8 @@ static struct platform_driver pda_power_pdrv = {
.resume = pda_power_resume,
};
-static int __init pda_power_init(void)
-{
- return platform_driver_register(&pda_power_pdrv);
-}
+module_platform_driver(pda_power_pdrv);
-static void __exit pda_power_exit(void)
-{
- platform_driver_unregister(&pda_power_pdrv);
-}
-
-module_init(pda_power_init);
-module_exit(pda_power_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anton Vorontsov <cbou@mail.ru>");
+MODULE_ALIAS("platform:pda-power");
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 329b46b..6ad6127 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -98,7 +98,9 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
{
union power_supply_propval ret = {0,};
struct power_supply *psy = dev_get_drvdata(dev);
+ unsigned int *count = data;
+ (*count)++;
if (psy->type != POWER_SUPPLY_TYPE_BATTERY) {
if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &ret))
return 0;
@@ -111,10 +113,18 @@ static int __power_supply_is_system_supplied(struct device *dev, void *data)
int power_supply_is_system_supplied(void)
{
int error;
+ unsigned int count = 0;
- error = class_for_each_device(power_supply_class, NULL, NULL,
+ error = class_for_each_device(power_supply_class, NULL, &count,
__power_supply_is_system_supplied);
+ /*
+ * If no power class device was found at all, most probably we are
+ * running on a desktop system, so assume we are on mains power.
+ */
+ if (count == 0)
+ return 1;
+
return error;
}
EXPORT_SYMBOL_GPL(power_supply_is_system_supplied);
@@ -147,6 +157,12 @@ struct power_supply *power_supply_get_by_name(char *name)
}
EXPORT_SYMBOL_GPL(power_supply_get_by_name);
+int power_supply_powers(struct power_supply *psy, struct device *dev)
+{
+ return sysfs_create_link(&psy->dev->kobj, &dev->kobj, "powers");
+}
+EXPORT_SYMBOL_GPL(power_supply_powers);
+
static void power_supply_dev_release(struct device *dev)
{
pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
@@ -202,6 +218,7 @@ EXPORT_SYMBOL_GPL(power_supply_register);
void power_supply_unregister(struct power_supply *psy)
{
cancel_work_sync(&psy->changed_work);
+ sysfs_remove_link(&psy->dev->kobj, "powers");
power_supply_remove_triggers(psy);
device_unregister(psy->dev);
}
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index e15d4c9..b52b57c 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -43,7 +43,7 @@ static ssize_t power_supply_show_property(struct device *dev,
struct device_attribute *attr,
char *buf) {
static char *type_text[] = {
- "Battery", "UPS", "Mains", "USB",
+ "Unknown", "Battery", "UPS", "Mains", "USB",
"USB_DCP", "USB_CDP", "USB_ACA"
};
static char *status_text[] = {
@@ -63,6 +63,9 @@ static ssize_t power_supply_show_property(struct device *dev,
static char *capacity_level_text[] = {
"Unknown", "Critical", "Low", "Normal", "High", "Full"
};
+ static char *scope_text[] = {
+ "Unknown", "System", "Device"
+ };
ssize_t ret = 0;
struct power_supply *psy = dev_get_drvdata(dev);
const ptrdiff_t off = attr - power_supply_attrs;
@@ -78,8 +81,8 @@ static ssize_t power_supply_show_property(struct device *dev,
dev_dbg(dev, "driver has no data for `%s' property\n",
attr->attr.name);
else if (ret != -ENODEV)
- dev_err(dev, "driver failed to report `%s' property\n",
- attr->attr.name);
+ dev_err(dev, "driver failed to report `%s' property: %zd\n",
+ attr->attr.name, ret);
return ret;
}
@@ -95,6 +98,8 @@ static ssize_t power_supply_show_property(struct device *dev,
return sprintf(buf, "%s\n", capacity_level_text[value.intval]);
else if (off == POWER_SUPPLY_PROP_TYPE)
return sprintf(buf, "%s\n", type_text[value.intval]);
+ else if (off == POWER_SUPPLY_PROP_SCOPE)
+ return sprintf(buf, "%s\n", scope_text[value.intval]);
else if (off >= POWER_SUPPLY_PROP_MODEL_NAME)
return sprintf(buf, "%s\n", value.strval);
@@ -167,6 +172,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(time_to_full_now),
POWER_SUPPLY_ATTR(time_to_full_avg),
POWER_SUPPLY_ATTR(type),
+ POWER_SUPPLY_ATTR(scope),
/* Properties of type `const char *' */
POWER_SUPPLY_ATTR(model_name),
POWER_SUPPLY_ATTR(manufacturer),
@@ -176,13 +182,13 @@ static struct device_attribute power_supply_attrs[] = {
static struct attribute *
__power_supply_attrs[ARRAY_SIZE(power_supply_attrs) + 1];
-static mode_t power_supply_attr_is_visible(struct kobject *kobj,
+static umode_t power_supply_attr_is_visible(struct kobject *kobj,
struct attribute *attr,
int attrno)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct power_supply *psy = dev_get_drvdata(dev);
- mode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
+ umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
int i;
if (attrno == POWER_SUPPLY_PROP_TYPE)
diff --git a/drivers/power/s3c_adc_battery.c b/drivers/power/s3c_adc_battery.c
index d32d0d7..8b804a5 100644
--- a/drivers/power/s3c_adc_battery.c
+++ b/drivers/power/s3c_adc_battery.c
@@ -47,6 +47,22 @@ static void s3c_adc_bat_ext_power_changed(struct power_supply *psy)
msecs_to_jiffies(JITTER_DELAY));
}
+static int gather_samples(struct s3c_adc_client *client, int num, int channel)
+{
+ int value, i;
+
+ /* default to 1 if nothing is set */
+ if (num < 1)
+ num = 1;
+
+ value = 0;
+ for (i = 0; i < num; i++)
+ value += s3c_adc_read(client, channel);
+ value /= num;
+
+ return value;
+}
+
static enum power_supply_property s3c_adc_backup_bat_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_MIN,
@@ -67,7 +83,8 @@ static int s3c_adc_backup_bat_get_property(struct power_supply *psy,
if (bat->volt_value < 0 ||
jiffies_to_msecs(jiffies - bat->timestamp) >
BAT_POLL_INTERVAL) {
- bat->volt_value = s3c_adc_read(bat->client,
+ bat->volt_value = gather_samples(bat->client,
+ bat->pdata->backup_volt_samples,
bat->pdata->backup_volt_channel);
bat->volt_value *= bat->pdata->backup_volt_mult;
bat->timestamp = jiffies;
@@ -139,9 +156,11 @@ static int s3c_adc_bat_get_property(struct power_supply *psy,
if (bat->volt_value < 0 || bat->cur_value < 0 ||
jiffies_to_msecs(jiffies - bat->timestamp) >
BAT_POLL_INTERVAL) {
- bat->volt_value = s3c_adc_read(bat->client,
+ bat->volt_value = gather_samples(bat->client,
+ bat->pdata->volt_samples,
bat->pdata->volt_channel) * bat->pdata->volt_mult;
- bat->cur_value = s3c_adc_read(bat->client,
+ bat->cur_value = gather_samples(bat->client,
+ bat->pdata->current_samples,
bat->pdata->current_channel) * bat->pdata->current_mult;
bat->timestamp = jiffies;
}
@@ -421,17 +440,7 @@ static struct platform_driver s3c_adc_bat_driver = {
.resume = s3c_adc_bat_resume,
};
-static int __init s3c_adc_bat_init(void)
-{
- return platform_driver_register(&s3c_adc_bat_driver);
-}
-module_init(s3c_adc_bat_init);
-
-static void __exit s3c_adc_bat_exit(void)
-{
- platform_driver_unregister(&s3c_adc_bat_driver);
-}
-module_exit(s3c_adc_bat_exit);
+module_platform_driver(s3c_adc_bat_driver);
MODULE_AUTHOR("Vasily Khoruzhick <anarsoul@gmail.com>");
MODULE_DESCRIPTION("iPAQ H1930/H1940/RX1950 battery controller driver");
diff --git a/drivers/power/bq20z75.c b/drivers/power/sbs-battery.c
index 9c5e5be..9ff8af0 100644
--- a/drivers/power/bq20z75.c
+++ b/drivers/power/sbs-battery.c
@@ -1,5 +1,5 @@
/*
- * Gas Gauge driver for TI's BQ20Z75
+ * Gas Gauge driver for SBS Compliant Batteries
*
* Copyright (c) 2010, NVIDIA Corporation.
*
@@ -28,7 +28,7 @@
#include <linux/interrupt.h>
#include <linux/gpio.h>
-#include <linux/power/bq20z75.h>
+#include <linux/power/sbs-battery.h>
enum {
REG_MANUFACTURER_DATA,
@@ -53,7 +53,7 @@ enum {
/* Battery Mode defines */
#define BATTERY_MODE_OFFSET 0x03
#define BATTERY_MODE_MASK 0x8000
-enum bq20z75_battery_mode {
+enum sbs_battery_mode {
BATTERY_MODE_AMPS,
BATTERY_MODE_WATTS
};
@@ -67,62 +67,56 @@ enum bq20z75_battery_mode {
#define BATTERY_FULL_CHARGED 0x20
#define BATTERY_FULL_DISCHARGED 0x10
-#define BQ20Z75_DATA(_psp, _addr, _min_value, _max_value) { \
+#define SBS_DATA(_psp, _addr, _min_value, _max_value) { \
.psp = _psp, \
.addr = _addr, \
.min_value = _min_value, \
.max_value = _max_value, \
}
-static const struct bq20z75_device_data {
+static const struct chip_data {
enum power_supply_property psp;
u8 addr;
int min_value;
int max_value;
-} bq20z75_data[] = {
+} sbs_data[] = {
[REG_MANUFACTURER_DATA] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_PRESENT, 0x00, 0, 65535),
[REG_TEMPERATURE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_TEMP, 0x08, 0, 65535),
[REG_VOLTAGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
+ SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_NOW, 0x09, 0, 20000),
[REG_CURRENT] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768,
- 32767),
+ SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
[REG_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
+ SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100),
[REG_REMAINING_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
[REG_REMAINING_CAPACITY_CHARGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CHARGE_NOW, 0x0F, 0, 65535),
[REG_FULL_CHARGE_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL, 0x10, 0, 65535),
[REG_FULL_CHARGE_CAPACITY_CHARGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL, 0x10, 0, 65535),
[REG_TIME_TO_EMPTY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, 0x12, 0, 65535),
[REG_TIME_TO_FULL] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, 0x13, 0, 65535),
[REG_STATUS] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_STATUS, 0x16, 0, 65535),
[REG_CYCLE_COUNT] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CYCLE_COUNT, 0x17, 0, 65535),
[REG_DESIGN_CAPACITY] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, 0x18, 0, 65535),
[REG_DESIGN_CAPACITY_CHARGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 0x18, 0, 65535),
[REG_DESIGN_VOLTAGE] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0,
- 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 0x19, 0, 65535),
[REG_SERIAL_NUMBER] =
- BQ20Z75_DATA(POWER_SUPPLY_PROP_SERIAL_NUMBER, 0x1C, 0, 65535),
+ SBS_DATA(POWER_SUPPLY_PROP_SERIAL_NUMBER, 0x1C, 0, 65535),
};
-static enum power_supply_property bq20z75_properties[] = {
+static enum power_supply_property sbs_properties[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_PRESENT,
@@ -144,10 +138,10 @@ static enum power_supply_property bq20z75_properties[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
};
-struct bq20z75_info {
+struct sbs_info {
struct i2c_client *client;
struct power_supply power_supply;
- struct bq20z75_platform_data *pdata;
+ struct sbs_platform_data *pdata;
bool is_present;
bool gpio_detect;
bool enable_detection;
@@ -158,14 +152,14 @@ struct bq20z75_info {
int ignore_changes;
};
-static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
+static int sbs_read_word_data(struct i2c_client *client, u8 address)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret = 0;
int retries = 1;
- if (bq20z75_device->pdata)
- retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+ if (chip->pdata)
+ retries = max(chip->pdata->i2c_retry_count + 1, 1);
while (retries > 0) {
ret = i2c_smbus_read_word_data(client, address);
@@ -184,15 +178,15 @@ static int bq20z75_read_word_data(struct i2c_client *client, u8 address)
return le16_to_cpu(ret);
}
-static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
+static int sbs_write_word_data(struct i2c_client *client, u8 address,
u16 value)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret = 0;
int retries = 1;
- if (bq20z75_device->pdata)
- retries = max(bq20z75_device->pdata->i2c_retry_count + 1, 1);
+ if (chip->pdata)
+ retries = max(chip->pdata->i2c_retry_count + 1, 1);
while (retries > 0) {
ret = i2c_smbus_write_word_data(client, address,
@@ -212,44 +206,41 @@ static int bq20z75_write_word_data(struct i2c_client *client, u8 address,
return 0;
}
-static int bq20z75_get_battery_presence_and_health(
+static int sbs_get_battery_presence_and_health(
struct i2c_client *client, enum power_supply_property psp,
union power_supply_propval *val)
{
s32 ret;
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
if (psp == POWER_SUPPLY_PROP_PRESENT &&
- bq20z75_device->gpio_detect) {
- ret = gpio_get_value(
- bq20z75_device->pdata->battery_detect);
- if (ret == bq20z75_device->pdata->battery_detect_present)
+ chip->gpio_detect) {
+ ret = gpio_get_value(chip->pdata->battery_detect);
+ if (ret == chip->pdata->battery_detect_present)
val->intval = 1;
else
val->intval = 0;
- bq20z75_device->is_present = val->intval;
+ chip->is_present = val->intval;
return ret;
}
/* Write to ManufacturerAccess with
* ManufacturerAccess command and then
* read the status */
- ret = bq20z75_write_word_data(client,
- bq20z75_data[REG_MANUFACTURER_DATA].addr,
- MANUFACTURER_ACCESS_STATUS);
+ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
+ MANUFACTURER_ACCESS_STATUS);
if (ret < 0) {
if (psp == POWER_SUPPLY_PROP_PRESENT)
val->intval = 0; /* battery removed */
return ret;
}
- ret = bq20z75_read_word_data(client,
- bq20z75_data[REG_MANUFACTURER_DATA].addr);
+ ret = sbs_read_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr);
if (ret < 0)
return ret;
- if (ret < bq20z75_data[REG_MANUFACTURER_DATA].min_value ||
- ret > bq20z75_data[REG_MANUFACTURER_DATA].max_value) {
+ if (ret < sbs_data[REG_MANUFACTURER_DATA].min_value ||
+ ret > sbs_data[REG_MANUFACTURER_DATA].max_value) {
val->intval = 0;
return 0;
}
@@ -279,24 +270,23 @@ static int bq20z75_get_battery_presence_and_health(
return 0;
}
-static int bq20z75_get_battery_property(struct i2c_client *client,
+static int sbs_get_battery_property(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret;
- ret = bq20z75_read_word_data(client,
- bq20z75_data[reg_offset].addr);
+ ret = sbs_read_word_data(client, sbs_data[reg_offset].addr);
if (ret < 0)
return ret;
/* returned values are 16 bit */
- if (bq20z75_data[reg_offset].min_value < 0)
+ if (sbs_data[reg_offset].min_value < 0)
ret = (s16)ret;
- if (ret >= bq20z75_data[reg_offset].min_value &&
- ret <= bq20z75_data[reg_offset].max_value) {
+ if (ret >= sbs_data[reg_offset].min_value &&
+ ret <= sbs_data[reg_offset].max_value) {
val->intval = ret;
if (psp != POWER_SUPPLY_PROP_STATUS)
return 0;
@@ -310,12 +300,12 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
else
val->intval = POWER_SUPPLY_STATUS_CHARGING;
- if (bq20z75_device->poll_time == 0)
- bq20z75_device->last_state = val->intval;
- else if (bq20z75_device->last_state != val->intval) {
- cancel_delayed_work_sync(&bq20z75_device->work);
- power_supply_changed(&bq20z75_device->power_supply);
- bq20z75_device->poll_time = 0;
+ if (chip->poll_time == 0)
+ chip->last_state = val->intval;
+ else if (chip->last_state != val->intval) {
+ cancel_delayed_work_sync(&chip->work);
+ power_supply_changed(&chip->power_supply);
+ chip->poll_time = 0;
}
} else {
if (psp == POWER_SUPPLY_PROP_STATUS)
@@ -327,7 +317,7 @@ static int bq20z75_get_battery_property(struct i2c_client *client,
return 0;
}
-static void bq20z75_unit_adjustment(struct i2c_client *client,
+static void sbs_unit_adjustment(struct i2c_client *client,
enum power_supply_property psp, union power_supply_propval *val)
{
#define BASE_UNIT_CONVERSION 1000
@@ -338,7 +328,7 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
case POWER_SUPPLY_PROP_ENERGY_NOW:
case POWER_SUPPLY_PROP_ENERGY_FULL:
case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN:
- /* bq20z75 provides energy in units of 10mWh.
+ /* sbs provides energy in units of 10mWh.
* Convert to µWh
*/
val->intval *= BATTERY_MODE_CAP_MULT_WATT;
@@ -354,7 +344,7 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
break;
case POWER_SUPPLY_PROP_TEMP:
- /* bq20z75 provides battery temperature in 0.1K
+ /* sbs provides battery temperature in 0.1K
* so convert it to 0.1°C
*/
val->intval -= TEMP_KELVIN_TO_CELSIUS;
@@ -362,7 +352,7 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
- /* bq20z75 provides time to empty and time to full in minutes.
+ /* sbs provides time to empty and time to full in minutes.
* Convert to seconds
*/
val->intval *= TIME_UNIT_CONVERSION;
@@ -374,13 +364,12 @@ static void bq20z75_unit_adjustment(struct i2c_client *client,
}
}
-static enum bq20z75_battery_mode
-bq20z75_set_battery_mode(struct i2c_client *client,
- enum bq20z75_battery_mode mode)
+static enum sbs_battery_mode sbs_set_battery_mode(struct i2c_client *client,
+ enum sbs_battery_mode mode)
{
int ret, original_val;
- original_val = bq20z75_read_word_data(client, BATTERY_MODE_OFFSET);
+ original_val = sbs_read_word_data(client, BATTERY_MODE_OFFSET);
if (original_val < 0)
return original_val;
@@ -392,68 +381,67 @@ bq20z75_set_battery_mode(struct i2c_client *client,
else
ret = original_val | BATTERY_MODE_MASK;
- ret = bq20z75_write_word_data(client, BATTERY_MODE_OFFSET, ret);
+ ret = sbs_write_word_data(client, BATTERY_MODE_OFFSET, ret);
if (ret < 0)
return ret;
return original_val & BATTERY_MODE_MASK;
}
-static int bq20z75_get_battery_capacity(struct i2c_client *client,
+static int sbs_get_battery_capacity(struct i2c_client *client,
int reg_offset, enum power_supply_property psp,
union power_supply_propval *val)
{
s32 ret;
- enum bq20z75_battery_mode mode = BATTERY_MODE_WATTS;
+ enum sbs_battery_mode mode = BATTERY_MODE_WATTS;
if (power_supply_is_amp_property(psp))
mode = BATTERY_MODE_AMPS;
- mode = bq20z75_set_battery_mode(client, mode);
+ mode = sbs_set_battery_mode(client, mode);
if (mode < 0)
return mode;
- ret = bq20z75_read_word_data(client, bq20z75_data[reg_offset].addr);
+ ret = sbs_read_word_data(client, sbs_data[reg_offset].addr);
if (ret < 0)
return ret;
if (psp == POWER_SUPPLY_PROP_CAPACITY) {
- /* bq20z75 spec says that this can be >100 %
+ /* sbs spec says that this can be >100 %
* even if max value is 100 % */
val->intval = min(ret, 100);
} else
val->intval = ret;
- ret = bq20z75_set_battery_mode(client, mode);
+ ret = sbs_set_battery_mode(client, mode);
if (ret < 0)
return ret;
return 0;
}
-static char bq20z75_serial[5];
-static int bq20z75_get_battery_serial_number(struct i2c_client *client,
+static char sbs_serial[5];
+static int sbs_get_battery_serial_number(struct i2c_client *client,
union power_supply_propval *val)
{
int ret;
- ret = bq20z75_read_word_data(client,
- bq20z75_data[REG_SERIAL_NUMBER].addr);
+ ret = sbs_read_word_data(client, sbs_data[REG_SERIAL_NUMBER].addr);
if (ret < 0)
return ret;
- ret = sprintf(bq20z75_serial, "%04x", ret);
- val->strval = bq20z75_serial;
+ ret = sprintf(sbs_serial, "%04x", ret);
+ val->strval = sbs_serial;
return 0;
}
-static int bq20z75_get_property_index(struct i2c_client *client,
+static int sbs_get_property_index(struct i2c_client *client,
enum power_supply_property psp)
{
int count;
- for (count = 0; count < ARRAY_SIZE(bq20z75_data); count++)
- if (psp == bq20z75_data[count].psp)
+ for (count = 0; count < ARRAY_SIZE(sbs_data); count++)
+ if (psp == sbs_data[count].psp)
return count;
dev_warn(&client->dev,
@@ -462,19 +450,19 @@ static int bq20z75_get_property_index(struct i2c_client *client,
return -EINVAL;
}
-static int bq20z75_get_property(struct power_supply *psy,
+static int sbs_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int ret = 0;
- struct bq20z75_info *bq20z75_device = container_of(psy,
- struct bq20z75_info, power_supply);
- struct i2c_client *client = bq20z75_device->client;
+ struct sbs_info *chip = container_of(psy,
+ struct sbs_info, power_supply);
+ struct i2c_client *client = chip->client;
switch (psp) {
case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_HEALTH:
- ret = bq20z75_get_battery_presence_and_health(client, psp, val);
+ ret = sbs_get_battery_presence_and_health(client, psp, val);
if (psp == POWER_SUPPLY_PROP_PRESENT)
return 0;
break;
@@ -490,15 +478,15 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_FULL:
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
case POWER_SUPPLY_PROP_CAPACITY:
- ret = bq20z75_get_property_index(client, psp);
+ ret = sbs_get_property_index(client, psp);
if (ret < 0)
break;
- ret = bq20z75_get_battery_capacity(client, ret, psp, val);
+ ret = sbs_get_battery_capacity(client, ret, psp, val);
break;
case POWER_SUPPLY_PROP_SERIAL_NUMBER:
- ret = bq20z75_get_battery_serial_number(client, val);
+ ret = sbs_get_battery_serial_number(client, val);
break;
case POWER_SUPPLY_PROP_STATUS:
@@ -509,11 +497,11 @@ static int bq20z75_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN:
- ret = bq20z75_get_property_index(client, psp);
+ ret = sbs_get_property_index(client, psp);
if (ret < 0)
break;
- ret = bq20z75_get_battery_property(client, ret, psp, val);
+ ret = sbs_get_battery_property(client, ret, psp, val);
break;
default:
@@ -522,25 +510,25 @@ static int bq20z75_get_property(struct power_supply *psy,
return -EINVAL;
}
- if (!bq20z75_device->enable_detection)
+ if (!chip->enable_detection)
goto done;
- if (!bq20z75_device->gpio_detect &&
- bq20z75_device->is_present != (ret >= 0)) {
- bq20z75_device->is_present = (ret >= 0);
- power_supply_changed(&bq20z75_device->power_supply);
+ if (!chip->gpio_detect &&
+ chip->is_present != (ret >= 0)) {
+ chip->is_present = (ret >= 0);
+ power_supply_changed(&chip->power_supply);
}
done:
if (!ret) {
/* Convert units to match requirements for power supply class */
- bq20z75_unit_adjustment(client, psp, val);
+ sbs_unit_adjustment(client, psp, val);
}
dev_dbg(&client->dev,
"%s: property = %d, value = %x\n", __func__, psp, val->intval);
- if (ret && bq20z75_device->is_present)
+ if (ret && chip->is_present)
return ret;
/* battery not present, so return NODATA for properties */
@@ -550,7 +538,7 @@ done:
return 0;
}
-static irqreturn_t bq20z75_irq(int irq, void *devid)
+static irqreturn_t sbs_irq(int irq, void *devid)
{
struct power_supply *battery = devid;
@@ -559,36 +547,35 @@ static irqreturn_t bq20z75_irq(int irq, void *devid)
return IRQ_HANDLED;
}
-static void bq20z75_external_power_changed(struct power_supply *psy)
+static void sbs_external_power_changed(struct power_supply *psy)
{
- struct bq20z75_info *bq20z75_device;
+ struct sbs_info *chip;
- bq20z75_device = container_of(psy, struct bq20z75_info, power_supply);
+ chip = container_of(psy, struct sbs_info, power_supply);
- if (bq20z75_device->ignore_changes > 0) {
- bq20z75_device->ignore_changes--;
+ if (chip->ignore_changes > 0) {
+ chip->ignore_changes--;
return;
}
/* cancel outstanding work */
- cancel_delayed_work_sync(&bq20z75_device->work);
+ cancel_delayed_work_sync(&chip->work);
- schedule_delayed_work(&bq20z75_device->work, HZ);
- bq20z75_device->poll_time = bq20z75_device->pdata->poll_retry_count;
+ schedule_delayed_work(&chip->work, HZ);
+ chip->poll_time = chip->pdata->poll_retry_count;
}
-static void bq20z75_delayed_work(struct work_struct *work)
+static void sbs_delayed_work(struct work_struct *work)
{
- struct bq20z75_info *bq20z75_device;
+ struct sbs_info *chip;
s32 ret;
- bq20z75_device = container_of(work, struct bq20z75_info, work.work);
+ chip = container_of(work, struct sbs_info, work.work);
- ret = bq20z75_read_word_data(bq20z75_device->client,
- bq20z75_data[REG_STATUS].addr);
+ ret = sbs_read_word_data(chip->client, sbs_data[REG_STATUS].addr);
/* if the read failed, give up on this work */
if (ret < 0) {
- bq20z75_device->poll_time = 0;
+ chip->poll_time = 0;
return;
}
@@ -601,62 +588,145 @@ static void bq20z75_delayed_work(struct work_struct *work)
else
ret = POWER_SUPPLY_STATUS_CHARGING;
- if (bq20z75_device->last_state != ret) {
- bq20z75_device->poll_time = 0;
- power_supply_changed(&bq20z75_device->power_supply);
+ if (chip->last_state != ret) {
+ chip->poll_time = 0;
+ power_supply_changed(&chip->power_supply);
return;
}
- if (bq20z75_device->poll_time > 0) {
- schedule_delayed_work(&bq20z75_device->work, HZ);
- bq20z75_device->poll_time--;
+ if (chip->poll_time > 0) {
+ schedule_delayed_work(&chip->work, HZ);
+ chip->poll_time--;
return;
}
}
-static int __devinit bq20z75_probe(struct i2c_client *client,
+#if defined(CONFIG_OF)
+
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+static const struct of_device_id sbs_dt_ids[] = {
+ { .compatible = "sbs,sbs-battery" },
+ { .compatible = "ti,bq20z75" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sbs_dt_ids);
+
+static struct sbs_platform_data *sbs_of_populate_pdata(
+ struct i2c_client *client)
+{
+ struct device_node *of_node = client->dev.of_node;
+ struct sbs_platform_data *pdata = client->dev.platform_data;
+ enum of_gpio_flags gpio_flags;
+ int rc;
+ u32 prop;
+
+ /* verify this driver matches this device */
+ if (!of_node)
+ return NULL;
+
+ /* if platform data is set, honor it */
+ if (pdata)
+ return pdata;
+
+ /* first make sure at least one property is set, otherwise
+ * it won't change behavior from running without pdata.
+ */
+ if (!of_get_property(of_node, "sbs,i2c-retry-count", NULL) &&
+ !of_get_property(of_node, "sbs,poll-retry-count", NULL) &&
+ !of_get_property(of_node, "sbs,battery-detect-gpios", NULL))
+ goto of_out;
+
+ pdata = devm_kzalloc(&client->dev, sizeof(struct sbs_platform_data),
+ GFP_KERNEL);
+ if (!pdata)
+ goto of_out;
+
+ rc = of_property_read_u32(of_node, "sbs,i2c-retry-count", &prop);
+ if (!rc)
+ pdata->i2c_retry_count = prop;
+
+ rc = of_property_read_u32(of_node, "sbs,poll-retry-count", &prop);
+ if (!rc)
+ pdata->poll_retry_count = prop;
+
+ if (!of_get_property(of_node, "sbs,battery-detect-gpios", NULL)) {
+ pdata->battery_detect = -1;
+ goto of_out;
+ }
+
+ pdata->battery_detect = of_get_named_gpio_flags(of_node,
+ "sbs,battery-detect-gpios", 0, &gpio_flags);
+
+ if (gpio_flags & OF_GPIO_ACTIVE_LOW)
+ pdata->battery_detect_present = 0;
+ else
+ pdata->battery_detect_present = 1;
+
+of_out:
+ return pdata;
+}
+#else
+#define sbs_dt_ids NULL
+static struct sbs_platform_data *sbs_of_populate_pdata(
+ struct i2c_client *client)
+{
+ return client->dev.platform_data;
+}
+#endif
+
+static int __devinit sbs_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- struct bq20z75_info *bq20z75_device;
- struct bq20z75_platform_data *pdata = client->dev.platform_data;
+ struct sbs_info *chip;
+ struct sbs_platform_data *pdata = client->dev.platform_data;
int rc;
int irq;
+ char *name;
- bq20z75_device = kzalloc(sizeof(struct bq20z75_info), GFP_KERNEL);
- if (!bq20z75_device)
+ name = kasprintf(GFP_KERNEL, "sbs-%s", dev_name(&client->dev));
+ if (!name) {
+ dev_err(&client->dev, "Failed to allocate device name\n");
return -ENOMEM;
+ }
+
+ chip = kzalloc(sizeof(struct sbs_info), GFP_KERNEL);
+ if (!chip) {
+ rc = -ENOMEM;
+ goto exit_free_name;
+ }
- bq20z75_device->client = client;
- bq20z75_device->enable_detection = false;
- bq20z75_device->gpio_detect = false;
- bq20z75_device->power_supply.name = "battery";
- bq20z75_device->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
- bq20z75_device->power_supply.properties = bq20z75_properties;
- bq20z75_device->power_supply.num_properties =
- ARRAY_SIZE(bq20z75_properties);
- bq20z75_device->power_supply.get_property = bq20z75_get_property;
+ chip->client = client;
+ chip->enable_detection = false;
+ chip->gpio_detect = false;
+ chip->power_supply.name = name;
+ chip->power_supply.type = POWER_SUPPLY_TYPE_BATTERY;
+ chip->power_supply.properties = sbs_properties;
+ chip->power_supply.num_properties = ARRAY_SIZE(sbs_properties);
+ chip->power_supply.get_property = sbs_get_property;
/* ignore first notification of external change, it is generated
* from the power_supply_register call back
*/
- bq20z75_device->ignore_changes = 1;
- bq20z75_device->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
- bq20z75_device->power_supply.external_power_changed =
- bq20z75_external_power_changed;
+ chip->ignore_changes = 1;
+ chip->last_state = POWER_SUPPLY_STATUS_UNKNOWN;
+ chip->power_supply.external_power_changed = sbs_external_power_changed;
+
+ pdata = sbs_of_populate_pdata(client);
if (pdata) {
- bq20z75_device->gpio_detect =
- gpio_is_valid(pdata->battery_detect);
- bq20z75_device->pdata = pdata;
+ chip->gpio_detect = gpio_is_valid(pdata->battery_detect);
+ chip->pdata = pdata;
}
- i2c_set_clientdata(client, bq20z75_device);
+ i2c_set_clientdata(client, chip);
- if (!bq20z75_device->gpio_detect)
+ if (!chip->gpio_detect)
goto skip_gpio;
rc = gpio_request(pdata->battery_detect, dev_name(&client->dev));
if (rc) {
dev_warn(&client->dev, "Failed to request gpio: %d\n", rc);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
@@ -664,7 +734,7 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
if (rc) {
dev_warn(&client->dev, "Failed to get gpio as input: %d\n", rc);
gpio_free(pdata->battery_detect);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
@@ -672,25 +742,25 @@ static int __devinit bq20z75_probe(struct i2c_client *client,
if (irq <= 0) {
dev_warn(&client->dev, "Failed to get gpio as irq: %d\n", irq);
gpio_free(pdata->battery_detect);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
- rc = request_irq(irq, bq20z75_irq,
+ rc = request_irq(irq, sbs_irq,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- dev_name(&client->dev), &bq20z75_device->power_supply);
+ dev_name(&client->dev), &chip->power_supply);
if (rc) {
dev_warn(&client->dev, "Failed to request irq: %d\n", rc);
gpio_free(pdata->battery_detect);
- bq20z75_device->gpio_detect = false;
+ chip->gpio_detect = false;
goto skip_gpio;
}
- bq20z75_device->irq = irq;
+ chip->irq = irq;
skip_gpio:
- rc = power_supply_register(&client->dev, &bq20z75_device->power_supply);
+ rc = power_supply_register(&client->dev, &chip->power_supply);
if (rc) {
dev_err(&client->dev,
"%s: Failed to register power supply\n", __func__);
@@ -700,95 +770,100 @@ skip_gpio:
dev_info(&client->dev,
"%s: battery gas gauge device registered\n", client->name);
- INIT_DELAYED_WORK(&bq20z75_device->work, bq20z75_delayed_work);
+ INIT_DELAYED_WORK(&chip->work, sbs_delayed_work);
- bq20z75_device->enable_detection = true;
+ chip->enable_detection = true;
return 0;
exit_psupply:
- if (bq20z75_device->irq)
- free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
- if (bq20z75_device->gpio_detect)
+ if (chip->irq)
+ free_irq(chip->irq, &chip->power_supply);
+ if (chip->gpio_detect)
gpio_free(pdata->battery_detect);
- kfree(bq20z75_device);
+ kfree(chip);
+
+exit_free_name:
+ kfree(name);
return rc;
}
-static int __devexit bq20z75_remove(struct i2c_client *client)
+static int __devexit sbs_remove(struct i2c_client *client)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
- if (bq20z75_device->irq)
- free_irq(bq20z75_device->irq, &bq20z75_device->power_supply);
- if (bq20z75_device->gpio_detect)
- gpio_free(bq20z75_device->pdata->battery_detect);
+ if (chip->irq)
+ free_irq(chip->irq, &chip->power_supply);
+ if (chip->gpio_detect)
+ gpio_free(chip->pdata->battery_detect);
- power_supply_unregister(&bq20z75_device->power_supply);
+ power_supply_unregister(&chip->power_supply);
- cancel_delayed_work_sync(&bq20z75_device->work);
+ cancel_delayed_work_sync(&chip->work);
- kfree(bq20z75_device);
- bq20z75_device = NULL;
+ kfree(chip->power_supply.name);
+ kfree(chip);
+ chip = NULL;
return 0;
}
#if defined CONFIG_PM
-static int bq20z75_suspend(struct i2c_client *client,
+static int sbs_suspend(struct i2c_client *client,
pm_message_t state)
{
- struct bq20z75_info *bq20z75_device = i2c_get_clientdata(client);
+ struct sbs_info *chip = i2c_get_clientdata(client);
s32 ret;
- if (bq20z75_device->poll_time > 0)
- cancel_delayed_work_sync(&bq20z75_device->work);
+ if (chip->poll_time > 0)
+ cancel_delayed_work_sync(&chip->work);
/* write to manufacturer access with sleep command */
- ret = bq20z75_write_word_data(client,
- bq20z75_data[REG_MANUFACTURER_DATA].addr,
+ ret = sbs_write_word_data(client, sbs_data[REG_MANUFACTURER_DATA].addr,
MANUFACTURER_ACCESS_SLEEP);
- if (bq20z75_device->is_present && ret < 0)
+ if (chip->is_present && ret < 0)
return ret;
return 0;
}
#else
-#define bq20z75_suspend NULL
+#define sbs_suspend NULL
#endif
-/* any smbus transaction will wake up bq20z75 */
-#define bq20z75_resume NULL
+/* any smbus transaction will wake up sbs */
+#define sbs_resume NULL
-static const struct i2c_device_id bq20z75_id[] = {
+static const struct i2c_device_id sbs_id[] = {
{ "bq20z75", 0 },
+ { "sbs-battery", 1 },
{}
};
-MODULE_DEVICE_TABLE(i2c, bq20z75_id);
-
-static struct i2c_driver bq20z75_battery_driver = {
- .probe = bq20z75_probe,
- .remove = __devexit_p(bq20z75_remove),
- .suspend = bq20z75_suspend,
- .resume = bq20z75_resume,
- .id_table = bq20z75_id,
+MODULE_DEVICE_TABLE(i2c, sbs_id);
+
+static struct i2c_driver sbs_battery_driver = {
+ .probe = sbs_probe,
+ .remove = __devexit_p(sbs_remove),
+ .suspend = sbs_suspend,
+ .resume = sbs_resume,
+ .id_table = sbs_id,
.driver = {
- .name = "bq20z75-battery",
+ .name = "sbs-battery",
+ .of_match_table = sbs_dt_ids,
},
};
-static int __init bq20z75_battery_init(void)
+static int __init sbs_battery_init(void)
{
- return i2c_add_driver(&bq20z75_battery_driver);
+ return i2c_add_driver(&sbs_battery_driver);
}
-module_init(bq20z75_battery_init);
+module_init(sbs_battery_init);
-static void __exit bq20z75_battery_exit(void)
+static void __exit sbs_battery_exit(void)
{
- i2c_del_driver(&bq20z75_battery_driver);
+ i2c_del_driver(&sbs_battery_driver);
}
-module_exit(bq20z75_battery_exit);
+module_exit(sbs_battery_exit);
-MODULE_DESCRIPTION("BQ20z75 battery monitor driver");
+MODULE_DESCRIPTION("SBS battery monitor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/power/tosa_battery.c b/drivers/power/tosa_battery.c
index 53f0d35..28bbe7e 100644
--- a/drivers/power/tosa_battery.c
+++ b/drivers/power/tosa_battery.c
@@ -307,25 +307,20 @@ static struct tosa_bat tosa_bat_bu = {
.adc_temp_divider = -1,
};
-static struct {
- int gpio;
- char *name;
- bool output;
- int value;
-} gpios[] = {
- { TOSA_GPIO_CHARGE_OFF, "main charge off", 1, 1 },
- { TOSA_GPIO_CHARGE_OFF_JC, "jacket charge off", 1, 1 },
- { TOSA_GPIO_BAT_SW_ON, "battery switch", 1, 0 },
- { TOSA_GPIO_BAT0_V_ON, "main battery", 1, 0 },
- { TOSA_GPIO_BAT1_V_ON, "jacket battery", 1, 0 },
- { TOSA_GPIO_BAT1_TH_ON, "main battery temp", 1, 0 },
- { TOSA_GPIO_BAT0_TH_ON, "jacket battery temp", 1, 0 },
- { TOSA_GPIO_BU_CHRG_ON, "backup battery", 1, 0 },
- { TOSA_GPIO_BAT0_CRG, "main battery full", 0, 0 },
- { TOSA_GPIO_BAT1_CRG, "jacket battery full", 0, 0 },
- { TOSA_GPIO_BAT0_LOW, "main battery low", 0, 0 },
- { TOSA_GPIO_BAT1_LOW, "jacket battery low", 0, 0 },
- { TOSA_GPIO_JACKET_DETECT, "jacket detect", 0, 0 },
+static struct gpio tosa_bat_gpios[] = {
+ { TOSA_GPIO_CHARGE_OFF, GPIOF_OUT_INIT_HIGH, "main charge off" },
+ { TOSA_GPIO_CHARGE_OFF_JC, GPIOF_OUT_INIT_HIGH, "jacket charge off" },
+ { TOSA_GPIO_BAT_SW_ON, GPIOF_OUT_INIT_LOW, "battery switch" },
+ { TOSA_GPIO_BAT0_V_ON, GPIOF_OUT_INIT_LOW, "main battery" },
+ { TOSA_GPIO_BAT1_V_ON, GPIOF_OUT_INIT_LOW, "jacket battery" },
+ { TOSA_GPIO_BAT1_TH_ON, GPIOF_OUT_INIT_LOW, "main battery temp" },
+ { TOSA_GPIO_BAT0_TH_ON, GPIOF_OUT_INIT_LOW, "jacket battery temp" },
+ { TOSA_GPIO_BU_CHRG_ON, GPIOF_OUT_INIT_LOW, "backup battery" },
+ { TOSA_GPIO_BAT0_CRG, GPIOF_IN, "main battery full" },
+ { TOSA_GPIO_BAT1_CRG, GPIOF_IN, "jacket battery full" },
+ { TOSA_GPIO_BAT0_LOW, GPIOF_IN, "main battery low" },
+ { TOSA_GPIO_BAT1_LOW, GPIOF_IN, "jacket battery low" },
+ { TOSA_GPIO_JACKET_DETECT, GPIOF_IN, "jacket detect" },
};
#ifdef CONFIG_PM
@@ -350,27 +345,13 @@ static int tosa_bat_resume(struct platform_device *dev)
static int __devinit tosa_bat_probe(struct platform_device *dev)
{
int ret;
- int i;
if (!machine_is_tosa())
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(gpios); i++) {
- ret = gpio_request(gpios[i].gpio, gpios[i].name);
- if (ret) {
- i--;
- goto err_gpio;
- }
-
- if (gpios[i].output)
- ret = gpio_direction_output(gpios[i].gpio,
- gpios[i].value);
- else
- ret = gpio_direction_input(gpios[i].gpio);
-
- if (ret)
- goto err_gpio;
- }
+ ret = gpio_request_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
+ if (ret)
+ return ret;
mutex_init(&tosa_bat_main.work_lock);
mutex_init(&tosa_bat_jacket.work_lock);
@@ -424,18 +405,12 @@ err_psy_reg_main:
/* see comment in tosa_bat_remove */
cancel_work_sync(&bat_work);
- i--;
-err_gpio:
- for (; i >= 0; i--)
- gpio_free(gpios[i].gpio);
-
+ gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return ret;
}
static int __devexit tosa_bat_remove(struct platform_device *dev)
{
- int i;
-
free_irq(gpio_to_irq(TOSA_GPIO_JACKET_DETECT), &tosa_bat_jacket);
free_irq(gpio_to_irq(TOSA_GPIO_BAT1_CRG), &tosa_bat_jacket);
free_irq(gpio_to_irq(TOSA_GPIO_BAT0_CRG), &tosa_bat_main);
@@ -450,10 +425,7 @@ static int __devexit tosa_bat_remove(struct platform_device *dev)
* unregistered now.
*/
cancel_work_sync(&bat_work);
-
- for (i = ARRAY_SIZE(gpios) - 1; i >= 0; i--)
- gpio_free(gpios[i].gpio);
-
+ gpio_free_array(tosa_bat_gpios, ARRAY_SIZE(tosa_bat_gpios));
return 0;
}
@@ -466,18 +438,7 @@ static struct platform_driver tosa_bat_driver = {
.resume = tosa_bat_resume,
};
-static int __init tosa_bat_init(void)
-{
- return platform_driver_register(&tosa_bat_driver);
-}
-
-static void __exit tosa_bat_exit(void)
-{
- platform_driver_unregister(&tosa_bat_driver);
-}
-
-module_init(tosa_bat_init);
-module_exit(tosa_bat_exit);
+module_platform_driver(tosa_bat_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dmitry Baryshkov");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index e648cbe..6243e69 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -226,17 +226,7 @@ static struct platform_driver wm831x_backup_driver = {
},
};
-static int __init wm831x_backup_init(void)
-{
- return platform_driver_register(&wm831x_backup_driver);
-}
-module_init(wm831x_backup_init);
-
-static void __exit wm831x_backup_exit(void)
-{
- platform_driver_unregister(&wm831x_backup_driver);
-}
-module_exit(wm831x_backup_exit);
+module_platform_driver(wm831x_backup_driver);
MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 6cc2ca6..987332b 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -27,6 +27,7 @@ struct wm831x_power {
char wall_name[20];
char usb_name[20];
char battery_name[20];
+ bool have_battery;
};
static int wm831x_power_check_online(struct wm831x *wm831x, int supply,
@@ -449,7 +450,8 @@ static irqreturn_t wm831x_bat_irq(int irq, void *data)
/* The battery charger is autonomous so we don't need to do
* anything except kick user space */
- power_supply_changed(&wm831x_power->battery);
+ if (wm831x_power->have_battery)
+ power_supply_changed(&wm831x_power->battery);
return IRQ_HANDLED;
}
@@ -479,7 +481,8 @@ static irqreturn_t wm831x_pwr_src_irq(int irq, void *data)
dev_dbg(wm831x->dev, "Power source changed\n");
/* Just notify for everything - little harm in overnotifying. */
- power_supply_changed(&wm831x_power->battery);
+ if (wm831x_power->have_battery)
+ power_supply_changed(&wm831x_power->battery);
power_supply_changed(&wm831x_power->usb);
power_supply_changed(&wm831x_power->wall);
@@ -537,15 +540,6 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret)
goto err_kmalloc;
- battery->name = power->battery_name;
- battery->properties = wm831x_bat_props;
- battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
- battery->get_property = wm831x_bat_get_prop;
- battery->use_for_apm = 1;
- ret = power_supply_register(&pdev->dev, battery);
- if (ret)
- goto err_wall;
-
usb->name = power->usb_name,
usb->type = POWER_SUPPLY_TYPE_USB;
usb->properties = wm831x_usb_props;
@@ -553,7 +547,23 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
usb->get_property = wm831x_usb_get_prop;
ret = power_supply_register(&pdev->dev, usb);
if (ret)
- goto err_battery;
+ goto err_wall;
+
+ ret = wm831x_reg_read(wm831x, WM831X_CHARGER_CONTROL_1);
+ if (ret < 0)
+ goto err_wall;
+ power->have_battery = ret & WM831X_CHG_ENA;
+
+ if (power->have_battery) {
+ battery->name = power->battery_name;
+ battery->properties = wm831x_bat_props;
+ battery->num_properties = ARRAY_SIZE(wm831x_bat_props);
+ battery->get_property = wm831x_bat_get_prop;
+ battery->use_for_apm = 1;
+ ret = power_supply_register(&pdev->dev, battery);
+ if (ret)
+ goto err_usb;
+ }
irq = platform_get_irq_byname(pdev, "SYSLO");
ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
@@ -562,7 +572,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "Failed to request SYSLO IRQ %d: %d\n",
irq, ret);
- goto err_usb;
+ goto err_battery;
}
irq = platform_get_irq_byname(pdev, "PWR SRC");
@@ -601,10 +611,11 @@ err_bat_irq:
err_syslo:
irq = platform_get_irq_byname(pdev, "SYSLO");
free_irq(irq, power);
+err_battery:
+ if (power->have_battery)
+ power_supply_unregister(battery);
err_usb:
power_supply_unregister(usb);
-err_battery:
- power_supply_unregister(battery);
err_wall:
power_supply_unregister(wall);
err_kmalloc:
@@ -628,7 +639,8 @@ static __devexit int wm831x_power_remove(struct platform_device *pdev)
irq = platform_get_irq_byname(pdev, "SYSLO");
free_irq(irq, wm831x_power);
- power_supply_unregister(&wm831x_power->battery);
+ if (wm831x_power->have_battery)
+ power_supply_unregister(&wm831x_power->battery);
power_supply_unregister(&wm831x_power->wall);
power_supply_unregister(&wm831x_power->usb);
kfree(wm831x_power);
@@ -643,17 +655,7 @@ static struct platform_driver wm831x_power_driver = {
},
};
-static int __init wm831x_power_init(void)
-{
- return platform_driver_register(&wm831x_power_driver);
-}
-module_init(wm831x_power_init);
-
-static void __exit wm831x_power_exit(void)
-{
- platform_driver_unregister(&wm831x_power_driver);
-}
-module_exit(wm831x_power_exit);
+module_platform_driver(wm831x_power_driver);
MODULE_DESCRIPTION("Power supply driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
diff --git a/drivers/power/wm8350_power.c b/drivers/power/wm8350_power.c
index 0693902..fae04d3 100644
--- a/drivers/power/wm8350_power.c
+++ b/drivers/power/wm8350_power.c
@@ -522,17 +522,7 @@ static struct platform_driver wm8350_power_driver = {
},
};
-static int __init wm8350_power_init(void)
-{
- return platform_driver_register(&wm8350_power_driver);
-}
-module_init(wm8350_power_init);
-
-static void __exit wm8350_power_exit(void)
-{
- platform_driver_unregister(&wm8350_power_driver);
-}
-module_exit(wm8350_power_exit);
+module_platform_driver(wm8350_power_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Power supply driver for WM8350");
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 156559e..d2d4c08 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -25,9 +25,8 @@
#include <linux/irq.h>
#include <linux/slab.h>
-static DEFINE_MUTEX(bat_lock);
static struct work_struct bat_work;
-static struct mutex work_lock;
+static DEFINE_MUTEX(work_lock);
static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
static enum power_supply_property *prop;
@@ -181,8 +180,6 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
if (dev->id != -1)
return -EINVAL;
- mutex_init(&work_lock);
-
if (!pdata) {
dev_err(&dev->dev, "No platform_data supplied\n");
return -EINVAL;
@@ -196,7 +193,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
if (ret)
goto err2;
ret = request_irq(gpio_to_irq(pdata->charge_gpio),
- wm97xx_chrg_irq, IRQF_DISABLED,
+ wm97xx_chrg_irq, 0,
"AC Detect", dev);
if (ret)
goto err2;
@@ -291,18 +288,7 @@ static struct platform_driver wm97xx_bat_driver = {
.remove = __devexit_p(wm97xx_bat_remove),
};
-static int __init wm97xx_bat_init(void)
-{
- return platform_driver_register(&wm97xx_bat_driver);
-}
-
-static void __exit wm97xx_bat_exit(void)
-{
- platform_driver_unregister(&wm97xx_bat_driver);
-}
-
-module_init(wm97xx_bat_init);
-module_exit(wm97xx_bat_exit);
+module_platform_driver(wm97xx_bat_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c
index d119c38..636ebb2 100644
--- a/drivers/power/z2_battery.c
+++ b/drivers/power/z2_battery.c
@@ -218,7 +218,7 @@ static int __devinit z2_batt_probe(struct i2c_client *client,
irq_set_irq_type(gpio_to_irq(info->charge_gpio),
IRQ_TYPE_EDGE_BOTH);
ret = request_irq(gpio_to_irq(info->charge_gpio),
- z2_charge_switch_irq, IRQF_DISABLED,
+ z2_charge_switch_irq, 0,
"AC Detect", charger);
if (ret)
goto err3;
@@ -313,7 +313,7 @@ static struct i2c_driver z2_batt_driver = {
.pm = Z2_BATTERY_PM_OPS
},
.probe = z2_batt_probe,
- .remove = z2_batt_remove,
+ .remove = __devexit_p(z2_batt_remove),
.id_table = z2_batt_id,
};
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 2baadd2..98fbe62 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -369,9 +369,9 @@ static int __init pps_init(void)
int err;
pps_class = class_create(THIS_MODULE, "pps");
- if (!pps_class) {
+ if (IS_ERR(pps_class)) {
pr_err("failed to allocate class\n");
- return -ENOMEM;
+ return PTR_ERR(pps_class);
}
pps_class->dev_attrs = pps_attrs;
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 691b1ab..30d2072 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work)
*/
mport = priv->mport;
- wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE));
- rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+ wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+ rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
while (wr_ptr != rd_ptr) {
idb_entry = (u64 *)(priv->idb_base +
(TSI721_IDB_ENTRY_SIZE * rd_ptr));
rd_ptr++;
+ rd_ptr %= IDB_QSIZE;
idb.msg = *idb_entry;
*idb_entry = 0;
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c
index ca0d608..28b81ae 100644
--- a/drivers/regulator/88pm8607.c
+++ b/drivers/regulator/88pm8607.c
@@ -196,7 +196,7 @@ static const unsigned int LDO12_suspend_table[] = {
};
static const unsigned int LDO13_table[] = {
- 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0, 0,
+ 1200000, 1300000, 1800000, 2000000, 2500000, 2800000, 3000000, 0,
};
static const unsigned int LDO13_suspend_table[] = {
@@ -389,10 +389,10 @@ static struct pm8607_regulator_info pm8607_regulator_info[] = {
PM8607_LDO( 7, LDO7, 0, 3, SUPPLIES_EN12, 1),
PM8607_LDO( 8, LDO8, 0, 3, SUPPLIES_EN12, 2),
PM8607_LDO( 9, LDO9, 0, 3, SUPPLIES_EN12, 3),
- PM8607_LDO(10, LDO10, 0, 3, SUPPLIES_EN12, 4),
+ PM8607_LDO(10, LDO10, 0, 4, SUPPLIES_EN12, 4),
PM8607_LDO(12, LDO12, 0, 4, SUPPLIES_EN12, 5),
PM8607_LDO(13, VIBRATOR_SET, 1, 3, VIBRATOR_SET, 0),
- PM8607_LDO(14, LDO14, 0, 4, SUPPLIES_EN12, 6),
+ PM8607_LDO(14, LDO14, 0, 3, SUPPLIES_EN12, 6),
};
static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
@@ -427,7 +427,7 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev)
/* replace driver_data with info */
info->regulator = regulator_register(&info->desc, &pdev->dev,
- pdata, info);
+ pdata, info, NULL);
if (IS_ERR(info->regulator)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
info->desc.name);
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 9713b1b..7a61b17 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -93,6 +93,7 @@ config REGULATOR_MAX1586
config REGULATOR_MAX8649
tristate "Maxim 8649 voltage regulator"
depends on I2C
+ select REGMAP_I2C
help
This driver controls a Maxim 8649 voltage output regulator via
I2C bus.
@@ -177,6 +178,13 @@ config REGULATOR_DA903X
Say y here to support the BUCKs and LDOs regulators found on
Dialog Semiconductor DA9030/DA9034 PMIC.
+config REGULATOR_DA9052
+ tristate "Dialog DA9052/DA9053 regulators"
+ depends on PMIC_DA9052
+ help
+ This driver supports the voltage regulators of DA9052-BC and
+ DA9053-AA/Bx PMIC.
+
config REGULATOR_PCF50633
tristate "PCF50633 regulator driver"
depends on MFD_PCF50633
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 93a6318..503bac8 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_REGULATOR) += core.o dummy.o
+obj-$(CONFIG_OF) += of_regulator.o
obj-$(CONFIG_REGULATOR_FIXED_VOLTAGE) += fixed.o
obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
@@ -29,6 +30,7 @@ obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
obj-$(CONFIG_REGULATOR_TPS6586X) += tps6586x-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
+obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o
diff --git a/drivers/regulator/aat2870-regulator.c b/drivers/regulator/aat2870-regulator.c
index 298c6c6..685ad43 100644
--- a/drivers/regulator/aat2870-regulator.c
+++ b/drivers/regulator/aat2870-regulator.c
@@ -63,7 +63,7 @@ static int aat2870_ldo_set_voltage_sel(struct regulator_dev *rdev,
struct aat2870_data *aat2870 = dev_get_drvdata(ri->pdev->dev.parent);
return aat2870->update(aat2870, ri->voltage_addr, ri->voltage_mask,
- (selector << ri->voltage_shift) & ri->voltage_mask);
+ selector << ri->voltage_shift);
}
static int aat2870_ldo_get_voltage_sel(struct regulator_dev *rdev)
@@ -188,7 +188,7 @@ static int aat2870_regulator_probe(struct platform_device *pdev)
ri->pdev = pdev;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "Failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 585e494..042271a 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -634,7 +634,7 @@ static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
rdev = regulator_register(&ab3100_regulator_desc[i],
&pdev->dev,
&plfdata->reg_constraints[i],
- reg);
+ reg, NULL);
if (IS_ERR(rdev)) {
err = PTR_ERR(rdev);
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index 6e1ae69..c9b92531 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -16,8 +16,8 @@
#include <linux/module.h>
#include <linux/err.h>
#include <linux/platform_device.h>
-#include <linux/mfd/ab8500.h>
#include <linux/mfd/abx500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/ab8500.h>
@@ -822,7 +822,7 @@ static __devinit int ab8500_regulator_probe(struct platform_device *pdev)
/* register regulator with framework */
info->regulator = regulator_register(&info->desc, &pdev->dev,
- &pdata->regulator[i], info);
+ &pdata->regulator[i], info, NULL);
if (IS_ERR(info->regulator)) {
err = PTR_ERR(info->regulator);
dev_err(&pdev->dev, "failed to register regulator %s\n",
diff --git a/drivers/regulator/ad5398.c b/drivers/regulator/ad5398.c
index a4be416..483c809 100644
--- a/drivers/regulator/ad5398.c
+++ b/drivers/regulator/ad5398.c
@@ -233,7 +233,7 @@ static int __devinit ad5398_probe(struct i2c_client *client,
chip->current_mask = (chip->current_level - 1) << chip->current_offset;
chip->rdev = regulator_register(&ad5398_reg, &client->dev,
- init_data, chip);
+ init_data, chip, NULL);
if (IS_ERR(chip->rdev)) {
ret = PTR_ERR(chip->rdev);
dev_err(&client->dev, "failed to register %s %s\n",
diff --git a/drivers/regulator/bq24022.c b/drivers/regulator/bq24022.c
index e24d1b7..9fab6d1 100644
--- a/drivers/regulator/bq24022.c
+++ b/drivers/regulator/bq24022.c
@@ -107,7 +107,7 @@ static int __init bq24022_probe(struct platform_device *pdev)
ret = gpio_direction_output(pdata->gpio_nce, 1);
bq24022 = regulator_register(&bq24022_desc, &pdev->dev,
- pdata->init_data, pdata);
+ pdata->init_data, pdata, NULL);
if (IS_ERR(bq24022)) {
dev_dbg(&pdev->dev, "couldn't register regulator\n");
ret = PTR_ERR(bq24022);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 938398f..e9a83f8 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -25,6 +25,8 @@
#include <linux/mutex.h>
#include <linux/suspend.h>
#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
@@ -132,6 +134,33 @@ static struct regulator *get_device_regulator(struct device *dev)
return NULL;
}
+/**
+ * of_get_regulator - get a regulator device node based on supply name
+ * @dev: Device pointer for the consumer (of regulator) device
+ * @supply: regulator supply name
+ *
+ * Extract the regulator device node corresponding to the supply name.
+ * retruns the device node corresponding to the regulator if found, else
+ * returns NULL.
+ */
+static struct device_node *of_get_regulator(struct device *dev, const char *supply)
+{
+ struct device_node *regnode = NULL;
+ char prop_name[32]; /* 32 is max size of property name */
+
+ dev_dbg(dev, "Looking up %s-supply from device tree\n", supply);
+
+ snprintf(prop_name, 32, "%s-supply", supply);
+ regnode = of_parse_phandle(dev->of_node, prop_name, 0);
+
+ if (!regnode) {
+ dev_warn(dev, "%s property in node %s references invalid phandle",
+ prop_name, dev->of_node->full_name);
+ return NULL;
+ }
+ return regnode;
+}
+
/* Platform voltage constraint check */
static int regulator_check_voltage(struct regulator_dev *rdev,
int *min_uV, int *max_uV)
@@ -883,8 +912,12 @@ static int set_machine_constraints(struct regulator_dev *rdev,
int ret = 0;
struct regulator_ops *ops = rdev->desc->ops;
- rdev->constraints = kmemdup(constraints, sizeof(*constraints),
- GFP_KERNEL);
+ if (constraints)
+ rdev->constraints = kmemdup(constraints, sizeof(*constraints),
+ GFP_KERNEL);
+ else
+ rdev->constraints = kzalloc(sizeof(*constraints),
+ GFP_KERNEL);
if (!rdev->constraints)
return -ENOMEM;
@@ -893,7 +926,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
goto out;
/* do we need to setup our suspend state */
- if (constraints->initial_state) {
+ if (rdev->constraints->initial_state) {
ret = suspend_prepare(rdev, rdev->constraints->initial_state);
if (ret < 0) {
rdev_err(rdev, "failed to set suspend state\n");
@@ -901,7 +934,7 @@ static int set_machine_constraints(struct regulator_dev *rdev,
}
}
- if (constraints->initial_mode) {
+ if (rdev->constraints->initial_mode) {
if (!ops->set_mode) {
rdev_err(rdev, "no set_mode operation\n");
ret = -EINVAL;
@@ -952,9 +985,8 @@ static int set_supply(struct regulator_dev *rdev,
rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
- if (IS_ERR(rdev->supply)) {
- err = PTR_ERR(rdev->supply);
- rdev->supply = NULL;
+ if (rdev->supply == NULL) {
+ err = -ENOMEM;
return err;
}
@@ -1148,6 +1180,30 @@ static int _regulator_get_enable_time(struct regulator_dev *rdev)
return rdev->desc->ops->enable_time(rdev);
}
+static struct regulator_dev *regulator_dev_lookup(struct device *dev,
+ const char *supply)
+{
+ struct regulator_dev *r;
+ struct device_node *node;
+
+ /* first do a dt based lookup */
+ if (dev && dev->of_node) {
+ node = of_get_regulator(dev, supply);
+ if (node)
+ list_for_each_entry(r, &regulator_list, list)
+ if (r->dev.parent &&
+ node == r->dev.of_node)
+ return r;
+ }
+
+ /* if not found, try doing it non-dt way */
+ list_for_each_entry(r, &regulator_list, list)
+ if (strcmp(rdev_get_name(r), supply) == 0)
+ return r;
+
+ return NULL;
+}
+
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
int exclusive)
@@ -1168,6 +1224,10 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
mutex_lock(&regulator_list_mutex);
+ rdev = regulator_dev_lookup(dev, id);
+ if (rdev)
+ goto found;
+
list_for_each_entry(map, &regulator_map_list, list) {
/* If the mapping has a device set up it must match */
if (map->dev_name &&
@@ -1221,6 +1281,7 @@ found:
if (regulator == NULL) {
regulator = ERR_PTR(-ENOMEM);
module_put(rdev->owner);
+ goto out;
}
rdev->open_count++;
@@ -1726,6 +1787,7 @@ int regulator_is_supported_voltage(struct regulator *regulator,
return 0;
}
+EXPORT_SYMBOL_GPL(regulator_is_supported_voltage);
static int _regulator_do_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV)
@@ -2429,6 +2491,43 @@ err:
EXPORT_SYMBOL_GPL(regulator_bulk_disable);
/**
+ * regulator_bulk_force_disable - force disable multiple regulator consumers
+ *
+ * @num_consumers: Number of consumers
+ * @consumers: Consumer data; clients are stored here.
+ * @return 0 on success, an errno on failure
+ *
+ * This convenience API allows consumers to forcibly disable multiple regulator
+ * clients in a single API call.
+ * NOTE: This should be used for situations when device damage will
+ * likely occur if the regulators are not disabled (e.g. over temp).
+ * Although regulator_force_disable function call for some consumers can
+ * return error numbers, the function is called for all consumers.
+ */
+int regulator_bulk_force_disable(int num_consumers,
+ struct regulator_bulk_data *consumers)
+{
+ int i;
+ int ret;
+
+ for (i = 0; i < num_consumers; i++)
+ consumers[i].ret =
+ regulator_force_disable(consumers[i].consumer);
+
+ for (i = 0; i < num_consumers; i++) {
+ if (consumers[i].ret != 0) {
+ ret = consumers[i].ret;
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ return ret;
+}
+EXPORT_SYMBOL_GPL(regulator_bulk_force_disable);
+
+/**
* regulator_bulk_free - free multiple regulator consumers
*
* @num_consumers: Number of consumers
@@ -2503,7 +2602,8 @@ static int add_regulator_attributes(struct regulator_dev *rdev)
int status = 0;
/* some attributes need specific methods to be displayed */
- if (ops->get_voltage || ops->get_voltage_sel) {
+ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) ||
+ (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) {
status = device_create_file(dev, &dev_attr_microvolts);
if (status < 0)
return status;
@@ -2631,17 +2731,21 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
* @dev: struct device for the regulator
* @init_data: platform provided init data, passed through by driver
* @driver_data: private regulator data
+ * @of_node: OpenFirmware node to parse for device tree bindings (may be
+ * NULL).
*
* Called by regulator drivers to register a regulator.
* Returns 0 on success.
*/
struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
struct device *dev, const struct regulator_init_data *init_data,
- void *driver_data)
+ void *driver_data, struct device_node *of_node)
{
+ const struct regulation_constraints *constraints = NULL;
static atomic_t regulator_no = ATOMIC_INIT(0);
struct regulator_dev *rdev;
int ret, i;
+ const char *supply = NULL;
if (regulator_desc == NULL)
return ERR_PTR(-EINVAL);
@@ -2653,9 +2757,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
regulator_desc->type != REGULATOR_CURRENT)
return ERR_PTR(-EINVAL);
- if (!init_data)
- return ERR_PTR(-EINVAL);
-
/* Only one of each should be implemented */
WARN_ON(regulator_desc->ops->get_voltage &&
regulator_desc->ops->get_voltage_sel);
@@ -2688,7 +2789,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
INIT_DELAYED_WORK(&rdev->disable_work, regulator_disable_work);
/* preform any regulator specific init */
- if (init_data->regulator_init) {
+ if (init_data && init_data->regulator_init) {
ret = init_data->regulator_init(rdev->reg_data);
if (ret < 0)
goto clean;
@@ -2696,6 +2797,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
/* register with sysfs */
rdev->dev.class = &regulator_class;
+ rdev->dev.of_node = of_node;
rdev->dev.parent = dev;
dev_set_name(&rdev->dev, "regulator.%d",
atomic_inc_return(&regulator_no) - 1);
@@ -2708,7 +2810,10 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
dev_set_drvdata(&rdev->dev, rdev);
/* set regulator constraints */
- ret = set_machine_constraints(rdev, &init_data->constraints);
+ if (init_data)
+ constraints = &init_data->constraints;
+
+ ret = set_machine_constraints(rdev, constraints);
if (ret < 0)
goto scrub;
@@ -2717,21 +2822,18 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
if (ret < 0)
goto scrub;
- if (init_data->supply_regulator) {
+ if (init_data && init_data->supply_regulator)
+ supply = init_data->supply_regulator;
+ else if (regulator_desc->supply_name)
+ supply = regulator_desc->supply_name;
+
+ if (supply) {
struct regulator_dev *r;
- int found = 0;
- list_for_each_entry(r, &regulator_list, list) {
- if (strcmp(rdev_get_name(r),
- init_data->supply_regulator) == 0) {
- found = 1;
- break;
- }
- }
+ r = regulator_dev_lookup(dev, supply);
- if (!found) {
- dev_err(dev, "Failed to find supply %s\n",
- init_data->supply_regulator);
+ if (!r) {
+ dev_err(dev, "Failed to find supply %s\n", supply);
ret = -ENODEV;
goto scrub;
}
@@ -2739,18 +2841,28 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
ret = set_supply(rdev, r);
if (ret < 0)
goto scrub;
+
+ /* Enable supply if rail is enabled */
+ if (rdev->desc->ops->is_enabled &&
+ rdev->desc->ops->is_enabled(rdev)) {
+ ret = regulator_enable(rdev->supply);
+ if (ret < 0)
+ goto scrub;
+ }
}
/* add consumers devices */
- for (i = 0; i < init_data->num_consumer_supplies; i++) {
- ret = set_consumer_device_supply(rdev,
- init_data->consumer_supplies[i].dev,
- init_data->consumer_supplies[i].dev_name,
- init_data->consumer_supplies[i].supply);
- if (ret < 0) {
- dev_err(dev, "Failed to set supply %s\n",
+ if (init_data) {
+ for (i = 0; i < init_data->num_consumer_supplies; i++) {
+ ret = set_consumer_device_supply(rdev,
+ init_data->consumer_supplies[i].dev,
+ init_data->consumer_supplies[i].dev_name,
init_data->consumer_supplies[i].supply);
- goto unset_supplies;
+ if (ret < 0) {
+ dev_err(dev, "Failed to set supply %s\n",
+ init_data->consumer_supplies[i].supply);
+ goto unset_supplies;
+ }
}
}
diff --git a/drivers/regulator/da903x.c b/drivers/regulator/da903x.c
index e23ddfa..8dbc54d 100644
--- a/drivers/regulator/da903x.c
+++ b/drivers/regulator/da903x.c
@@ -537,7 +537,7 @@ static int __devinit da903x_regulator_probe(struct platform_device *pdev)
ri->desc.ops = &da9030_regulator_ldo1_15_ops;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/da9052-regulator.c b/drivers/regulator/da9052-regulator.c
new file mode 100644
index 0000000..09915e8
--- /dev/null
+++ b/drivers/regulator/da9052-regulator.c
@@ -0,0 +1,610 @@
+/*
+* da9052-regulator.c: Regulator driver for DA9052
+*
+* Copyright(c) 2011 Dialog Semiconductor Ltd.
+*
+* Author: David Dajun Chen <dchen@diasemi.com>
+*
+* This program is free software; you can redistribute it and/or modify
+* it under the terms of the GNU General Public License as published by
+* the Free Software Foundation; either version 2 of the License, or
+* (at your option) any later version.
+*
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include <linux/mfd/da9052/da9052.h>
+#include <linux/mfd/da9052/reg.h>
+#include <linux/mfd/da9052/pdata.h>
+
+/* Buck step size */
+#define DA9052_BUCK_PERI_3uV_STEP 100000
+#define DA9052_BUCK_PERI_REG_MAP_UPTO_3uV 24
+#define DA9052_CONST_3uV 3000000
+
+#define DA9052_MIN_UA 0
+#define DA9052_MAX_UA 3
+#define DA9052_CURRENT_RANGE 4
+
+/* Bit masks */
+#define DA9052_BUCK_ILIM_MASK_EVEN 0x0c
+#define DA9052_BUCK_ILIM_MASK_ODD 0xc0
+
+static const u32 da9052_current_limits[3][4] = {
+ {700000, 800000, 1000000, 1200000}, /* DA9052-BC BUCKs */
+ {1600000, 2000000, 2400000, 3000000}, /* DA9053-AA/Bx BUCK-CORE */
+ {800000, 1000000, 1200000, 1500000}, /* DA9053-AA/Bx BUCK-PRO,
+ * BUCK-MEM and BUCK-PERI
+ */
+};
+
+struct da9052_regulator_info {
+ struct regulator_desc reg_desc;
+ int step_uV;
+ int min_uV;
+ int max_uV;
+ unsigned char volt_shift;
+ unsigned char en_bit;
+ unsigned char activate_bit;
+};
+
+struct da9052_regulator {
+ struct da9052 *da9052;
+ struct da9052_regulator_info *info;
+ struct regulator_dev *rdev;
+};
+
+static int verify_range(struct da9052_regulator_info *info,
+ int min_uV, int max_uV)
+{
+ if (min_uV > info->max_uV || max_uV < info->min_uV)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int da9052_regulator_enable(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ 1 << info->en_bit, 1 << info->en_bit);
+}
+
+static int da9052_regulator_disable(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ 1 << info->en_bit, 0);
+}
+
+static int da9052_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ return ret & (1 << info->en_bit);
+}
+
+static int da9052_dcdc_get_current_limit(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ int offset = rdev_get_id(rdev);
+ int ret, row = 2;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKA_REG + offset/2);
+ if (ret < 0)
+ return ret;
+
+ /* Determine the even or odd position of the buck current limit
+ * register field
+ */
+ if (offset % 2 == 0)
+ ret = (ret & DA9052_BUCK_ILIM_MASK_EVEN) >> 2;
+ else
+ ret = (ret & DA9052_BUCK_ILIM_MASK_ODD) >> 6;
+
+ /* Select the appropriate current limit range */
+ if (regulator->da9052->chip_id == DA9052)
+ row = 0;
+ else if (offset == 0)
+ row = 1;
+
+ return da9052_current_limits[row][ret];
+}
+
+static int da9052_dcdc_set_current_limit(struct regulator_dev *rdev, int min_uA,
+ int max_uA)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ int offset = rdev_get_id(rdev);
+ int reg_val = 0;
+ int i, row = 2;
+
+ /* Select the appropriate current limit range */
+ if (regulator->da9052->chip_id == DA9052)
+ row = 0;
+ else if (offset == 0)
+ row = 1;
+
+ if (min_uA > da9052_current_limits[row][DA9052_MAX_UA] ||
+ max_uA < da9052_current_limits[row][DA9052_MIN_UA])
+ return -EINVAL;
+
+ for (i = 0; i < DA9052_CURRENT_RANGE; i++) {
+ if (min_uA <= da9052_current_limits[row][i]) {
+ reg_val = i;
+ break;
+ }
+ }
+
+ /* Determine the even or odd position of the buck current limit
+ * register field
+ */
+ if (offset % 2 == 0)
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKA_REG + offset/2,
+ DA9052_BUCK_ILIM_MASK_EVEN,
+ reg_val << 2);
+ else
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKA_REG + offset/2,
+ DA9052_BUCK_ILIM_MASK_ODD,
+ reg_val << 6);
+}
+
+static int da9052_list_buckperi_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int volt_uV;
+
+ if ((regulator->da9052->chip_id == DA9052) &&
+ (selector >= DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)) {
+ volt_uV = ((DA9052_BUCK_PERI_REG_MAP_UPTO_3uV * info->step_uV)
+ + info->min_uV);
+ volt_uV += (selector - DA9052_BUCK_PERI_REG_MAP_UPTO_3uV)
+ * (DA9052_BUCK_PERI_3uV_STEP);
+ } else
+ volt_uV = (selector * info->step_uV) + info->min_uV;
+
+ if (volt_uV > info->max_uV)
+ return -EINVAL;
+
+ return volt_uV;
+}
+
+static int da9052_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int volt_uV;
+
+ volt_uV = info->min_uV + info->step_uV * selector;
+
+ if (volt_uV > info->max_uV)
+ return -EINVAL;
+
+ return volt_uV;
+}
+
+static int da9052_regulator_set_voltage_int(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = verify_range(info, min_uV, max_uV);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV < info->min_uV)
+ min_uV = info->min_uV;
+
+ *selector = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
+
+ ret = da9052_list_voltage(rdev, *selector);
+ if (ret < 0)
+ return ret;
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_set_ldo_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ return da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+}
+
+static int da9052_set_ldo5_6_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some LDOs are DVC controlled which requires enabling of
+ * the LDO activate bit to implment the changes on the
+ * LDO output.
+ */
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+ info->activate_bit, info->activate_bit);
+}
+
+static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV,
+ unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int ret;
+
+ ret = da9052_regulator_set_voltage_int(rdev, min_uV, max_uV, selector);
+ if (ret < 0)
+ return ret;
+
+ /* Some DCDCs are DVC controlled which requires enabling of
+ * the DCDC activate bit to implment the changes on the
+ * DCDC output.
+ */
+ return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+ info->activate_bit, info->activate_bit);
+}
+
+static int da9052_get_regulator_voltage_sel(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ ret &= ((1 << info->volt_shift) - 1);
+
+ return ret;
+}
+
+static int da9052_set_buckperi_voltage(struct regulator_dev *rdev, int min_uV,
+ int max_uV, unsigned int *selector)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = verify_range(info, min_uV, max_uV);
+ if (ret < 0)
+ return ret;
+
+ if (min_uV < info->min_uV)
+ min_uV = info->min_uV;
+
+ if ((regulator->da9052->chip_id == DA9052) &&
+ (min_uV >= DA9052_CONST_3uV))
+ *selector = DA9052_BUCK_PERI_REG_MAP_UPTO_3uV +
+ DIV_ROUND_UP(min_uV - DA9052_CONST_3uV,
+ DA9052_BUCK_PERI_3uV_STEP);
+ else
+ *selector = DIV_ROUND_UP(min_uV - info->min_uV, info->step_uV);
+
+ ret = da9052_list_buckperi_voltage(rdev, *selector);
+ if (ret < 0)
+ return ret;
+
+ return da9052_reg_update(regulator->da9052,
+ DA9052_BUCKCORE_REG + offset,
+ (1 << info->volt_shift) - 1, *selector);
+}
+
+static int da9052_get_buckperi_voltage_sel(struct regulator_dev *rdev)
+{
+ struct da9052_regulator *regulator = rdev_get_drvdata(rdev);
+ struct da9052_regulator_info *info = regulator->info;
+ int offset = rdev_get_id(rdev);
+ int ret;
+
+ ret = da9052_reg_read(regulator->da9052, DA9052_BUCKCORE_REG + offset);
+ if (ret < 0)
+ return ret;
+
+ ret &= ((1 << info->volt_shift) - 1);
+
+ return ret;
+}
+
+static struct regulator_ops da9052_buckperi_ops = {
+ .list_voltage = da9052_list_buckperi_voltage,
+ .get_voltage_sel = da9052_get_buckperi_voltage_sel,
+ .set_voltage = da9052_set_buckperi_voltage,
+
+ .get_current_limit = da9052_dcdc_get_current_limit,
+ .set_current_limit = da9052_dcdc_set_current_limit,
+
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_dcdc_ops = {
+ .set_voltage = da9052_set_dcdc_voltage,
+ .get_current_limit = da9052_dcdc_get_current_limit,
+ .set_current_limit = da9052_dcdc_set_current_limit,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo5_6_ops = {
+ .set_voltage = da9052_set_ldo5_6_voltage,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+static struct regulator_ops da9052_ldo_ops = {
+ .set_voltage = da9052_set_ldo_voltage,
+
+ .list_voltage = da9052_list_voltage,
+ .get_voltage_sel = da9052_get_regulator_voltage_sel,
+ .is_enabled = da9052_regulator_is_enabled,
+ .enable = da9052_regulator_enable,
+ .disable = da9052_regulator_disable,
+};
+
+#define DA9052_LDO5_6(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "LDO" #_id,\
+ .ops = &da9052_ldo5_6_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .n_voltages = (max - min) / step + 1, \
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_LDO(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "LDO" #_id,\
+ .ops = &da9052_ldo_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .n_voltages = (max - min) / step + 1, \
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_DCDC(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "BUCK" #_id,\
+ .ops = &da9052_dcdc_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .n_voltages = (max - min) / step + 1, \
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+#define DA9052_BUCKPERI(_id, step, min, max, sbits, ebits, abits) \
+{\
+ .reg_desc = {\
+ .name = "BUCK" #_id,\
+ .ops = &da9052_buckperi_ops,\
+ .type = REGULATOR_VOLTAGE,\
+ .id = _id,\
+ .n_voltages = (max - min) / step + 1, \
+ .owner = THIS_MODULE,\
+ },\
+ .min_uV = (min) * 1000,\
+ .max_uV = (max) * 1000,\
+ .step_uV = (step) * 1000,\
+ .volt_shift = (sbits),\
+ .en_bit = (ebits),\
+ .activate_bit = (abits),\
+}
+
+static struct da9052_regulator_info da9052_regulator_info[] = {
+ /* Buck1 - 4 */
+ DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_BUCKPERI(3, 50, 1800, 3600, 5, 6, 0),
+ /* LD01 - LDO10 */
+ DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static struct da9052_regulator_info da9053_regulator_info[] = {
+ /* Buck1 - 4 */
+ DA9052_DCDC(0, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBCOREGO),
+ DA9052_DCDC(1, 25, 500, 2075, 6, 6, DA9052_SUPPLY_VBPROGO),
+ DA9052_DCDC(2, 25, 925, 2500, 6, 6, DA9052_SUPPLY_VBMEMGO),
+ DA9052_BUCKPERI(3, 25, 925, 2500, 6, 6, 0),
+ /* LD01 - LDO10 */
+ DA9052_LDO(4, 50, 600, 1800, 5, 6, 0),
+ DA9052_LDO5_6(5, 25, 600, 1800, 6, 6, DA9052_SUPPLY_VLDO2GO),
+ DA9052_LDO5_6(6, 25, 1725, 3300, 6, 6, DA9052_SUPPLY_VLDO3GO),
+ DA9052_LDO(7, 25, 1725, 3300, 6, 6, 0),
+ DA9052_LDO(8, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(9, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(10, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(11, 50, 1200, 3600, 6, 6, 0),
+ DA9052_LDO(12, 50, 1250, 3650, 6, 6, 0),
+ DA9052_LDO(13, 50, 1200, 3600, 6, 6, 0),
+};
+
+static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
+ int id)
+{
+ struct da9052_regulator_info *info;
+ int i;
+
+ switch (chip_id) {
+ case DA9052:
+ for (i = 0; i < ARRAY_SIZE(da9052_regulator_info); i++) {
+ info = &da9052_regulator_info[i];
+ if (info->reg_desc.id == id)
+ return info;
+ }
+ break;
+ case DA9053_AA:
+ case DA9053_BA:
+ case DA9053_BB:
+ for (i = 0; i < ARRAY_SIZE(da9053_regulator_info); i++) {
+ info = &da9053_regulator_info[i];
+ if (info->reg_desc.id == id)
+ return info;
+ }
+ break;
+ }
+
+ return NULL;
+}
+
+static int __devinit da9052_regulator_probe(struct platform_device *pdev)
+{
+ struct da9052_regulator *regulator;
+ struct da9052 *da9052;
+ struct da9052_pdata *pdata;
+ int ret;
+
+ regulator = devm_kzalloc(&pdev->dev, sizeof(struct da9052_regulator),
+ GFP_KERNEL);
+ if (!regulator)
+ return -ENOMEM;
+
+ da9052 = dev_get_drvdata(pdev->dev.parent);
+ pdata = da9052->dev->platform_data;
+ regulator->da9052 = da9052;
+
+ regulator->info = find_regulator_info(regulator->da9052->chip_id,
+ pdev->id);
+ if (regulator->info == NULL) {
+ dev_err(&pdev->dev, "invalid regulator ID specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ regulator->rdev = regulator_register(&regulator->info->reg_desc,
+ &pdev->dev,
+ pdata->regulators[pdev->id],
+ regulator, NULL);
+ if (IS_ERR(regulator->rdev)) {
+ dev_err(&pdev->dev, "failed to register regulator %s\n",
+ regulator->info->reg_desc.name);
+ ret = PTR_ERR(regulator->rdev);
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, regulator);
+
+ return 0;
+err:
+ devm_kfree(&pdev->dev, regulator);
+ return ret;
+}
+
+static int __devexit da9052_regulator_remove(struct platform_device *pdev)
+{
+ struct da9052_regulator *regulator = platform_get_drvdata(pdev);
+
+ regulator_unregister(regulator->rdev);
+ devm_kfree(&pdev->dev, regulator);
+
+ return 0;
+}
+
+static struct platform_driver da9052_regulator_driver = {
+ .probe = da9052_regulator_probe,
+ .remove = __devexit_p(da9052_regulator_remove),
+ .driver = {
+ .name = "da9052-regulator",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init da9052_regulator_init(void)
+{
+ return platform_driver_register(&da9052_regulator_driver);
+}
+subsys_initcall(da9052_regulator_init);
+
+static void __exit da9052_regulator_exit(void)
+{
+ platform_driver_unregister(&da9052_regulator_driver);
+}
+module_exit(da9052_regulator_exit);
+
+MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
+MODULE_DESCRIPTION("Power Regulator driver for Dialog DA9052 PMIC");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:da9052-regulator");
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 7832975..515443f 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -486,7 +486,7 @@ static int __devinit db8500_regulator_probe(struct platform_device *pdev)
/* register with the regulator framework */
info->rdev = regulator_register(&info->desc, &pdev->dev,
- init_data, info);
+ init_data, info, NULL);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register %s: err %i\n",
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
index b8f5205..0ee00de 100644
--- a/drivers/regulator/dummy.c
+++ b/drivers/regulator/dummy.c
@@ -42,7 +42,7 @@ static int __devinit dummy_regulator_probe(struct platform_device *pdev)
int ret;
dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
- &dummy_initdata, NULL);
+ &dummy_initdata, NULL, NULL);
if (IS_ERR(dummy_regulator_rdev)) {
ret = PTR_ERR(dummy_regulator_rdev);
pr_err("Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index 21ecf21..e24e3a1 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -27,6 +27,10 @@
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/regulator/of_regulator.h>
+#include <linux/regulator/machine.h>
struct fixed_voltage_data {
struct regulator_desc desc;
@@ -38,6 +42,58 @@ struct fixed_voltage_data {
bool is_enabled;
};
+
+/**
+ * of_get_fixed_voltage_config - extract fixed_voltage_config structure info
+ * @dev: device requesting for fixed_voltage_config
+ *
+ * Populates fixed_voltage_config structure by extracting data from device
+ * tree node, returns a pointer to the populated structure of NULL if memory
+ * alloc fails.
+ */
+static struct fixed_voltage_config *
+of_get_fixed_voltage_config(struct device *dev)
+{
+ struct fixed_voltage_config *config;
+ struct device_node *np = dev->of_node;
+ const __be32 *delay;
+ struct regulator_init_data *init_data;
+
+ config = devm_kzalloc(dev, sizeof(struct fixed_voltage_config),
+ GFP_KERNEL);
+ if (!config)
+ return NULL;
+
+ config->init_data = of_get_regulator_init_data(dev, dev->of_node);
+ if (!config->init_data)
+ return NULL;
+
+ init_data = config->init_data;
+ init_data->constraints.apply_uV = 0;
+
+ config->supply_name = init_data->constraints.name;
+ if (init_data->constraints.min_uV == init_data->constraints.max_uV) {
+ config->microvolts = init_data->constraints.min_uV;
+ } else {
+ dev_err(dev,
+ "Fixed regulator specified with variable voltages\n");
+ return NULL;
+ }
+
+ if (init_data->constraints.boot_on)
+ config->enabled_at_boot = true;
+
+ config->gpio = of_get_named_gpio(np, "gpio", 0);
+ delay = of_get_property(np, "startup-delay-us", NULL);
+ if (delay)
+ config->startup_delay = be32_to_cpu(*delay);
+
+ if (of_find_property(np, "enable-active-high", NULL))
+ config->enable_high = true;
+
+ return config;
+}
+
static int fixed_voltage_is_enabled(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
@@ -80,7 +136,10 @@ static int fixed_voltage_get_voltage(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
- return data->microvolts;
+ if (data->microvolts)
+ return data->microvolts;
+ else
+ return -EINVAL;
}
static int fixed_voltage_list_voltage(struct regulator_dev *dev,
@@ -105,10 +164,18 @@ static struct regulator_ops fixed_voltage_ops = {
static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
{
- struct fixed_voltage_config *config = pdev->dev.platform_data;
+ struct fixed_voltage_config *config;
struct fixed_voltage_data *drvdata;
int ret;
+ if (pdev->dev.of_node)
+ config = of_get_fixed_voltage_config(&pdev->dev);
+ else
+ config = pdev->dev.platform_data;
+
+ if (!config)
+ return -ENOMEM;
+
drvdata = kzalloc(sizeof(struct fixed_voltage_data), GFP_KERNEL);
if (drvdata == NULL) {
dev_err(&pdev->dev, "Failed to allocate device data\n");
@@ -180,7 +247,8 @@ static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
}
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
- config->init_data, drvdata);
+ config->init_data, drvdata,
+ pdev->dev.of_node);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
@@ -217,12 +285,23 @@ static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
return 0;
}
+#if defined(CONFIG_OF)
+static const struct of_device_id fixed_of_match[] __devinitconst = {
+ { .compatible = "regulator-fixed", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, fixed_of_match);
+#else
+#define fixed_of_match NULL
+#endif
+
static struct platform_driver regulator_fixed_voltage_driver = {
.probe = reg_fixed_voltage_probe,
.remove = __devexit_p(reg_fixed_voltage_remove),
.driver = {
.name = "reg-fixed-voltage",
.owner = THIS_MODULE,
+ .of_match_table = fixed_of_match,
},
};
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index f0acf52..42e1cb1 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -284,7 +284,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
drvdata->state = state;
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
- config->init_data, drvdata);
+ config->init_data, drvdata, NULL);
if (IS_ERR(drvdata->dev)) {
ret = PTR_ERR(drvdata->dev);
dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret);
diff --git a/drivers/regulator/isl6271a-regulator.c b/drivers/regulator/isl6271a-regulator.c
index e4b3592..c1a456c 100644
--- a/drivers/regulator/isl6271a-regulator.c
+++ b/drivers/regulator/isl6271a-regulator.c
@@ -170,7 +170,7 @@ static int __devinit isl6271a_probe(struct i2c_client *i2c,
for (i = 0; i < 3; i++) {
pmic->rdev[i] = regulator_register(&isl_rd[i], &i2c->dev,
- init_data, pmic);
+ init_data, pmic, NULL);
if (IS_ERR(pmic->rdev[i])) {
dev_err(&i2c->dev, "failed to register %s\n", id->name);
err = PTR_ERR(pmic->rdev[i]);
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 72b16b5..0cfabd3 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -451,7 +451,7 @@ static int __devinit setup_regulators(struct lp3971 *lp3971,
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
lp3971->rdev[i] = regulator_register(&regulators[reg->id],
- lp3971->dev, reg->initdata, lp3971);
+ lp3971->dev, reg->initdata, lp3971, NULL);
if (IS_ERR(lp3971->rdev[i])) {
err = PTR_ERR(lp3971->rdev[i]);
diff --git a/drivers/regulator/lp3972.c b/drivers/regulator/lp3972.c
index fbc5e37..49a15ee 100644
--- a/drivers/regulator/lp3972.c
+++ b/drivers/regulator/lp3972.c
@@ -555,7 +555,7 @@ static int __devinit setup_regulators(struct lp3972 *lp3972,
for (i = 0; i < pdata->num_regulators; i++) {
struct lp3972_regulator_subdev *reg = &pdata->regulators[i];
lp3972->rdev[i] = regulator_register(&regulators[reg->id],
- lp3972->dev, reg->initdata, lp3972);
+ lp3972->dev, reg->initdata, lp3972, NULL);
if (IS_ERR(lp3972->rdev[i])) {
err = PTR_ERR(lp3972->rdev[i]);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 3f49512..40e7a4d 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -214,7 +214,7 @@ static int __devinit max1586_pmic_probe(struct i2c_client *client,
}
rdev[i] = regulator_register(&max1586_reg[id], &client->dev,
pdata->subdevs[i].platform_data,
- max1586);
+ max1586, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1062cf9..d0e1180 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -16,6 +16,7 @@
#include <linux/regulator/driver.h>
#include <linux/slab.h>
#include <linux/regulator/max8649.h>
+#include <linux/regmap.h>
#define MAX8649_DCDC_VMIN 750000 /* uV */
#define MAX8649_DCDC_VMAX 1380000 /* uV */
@@ -49,9 +50,8 @@
struct max8649_regulator_info {
struct regulator_dev *regulator;
- struct i2c_client *i2c;
struct device *dev;
- struct mutex io_lock;
+ struct regmap *regmap;
int vol_reg;
unsigned mode:2; /* bit[1:0] = VID1, VID0 */
@@ -63,71 +63,6 @@ struct max8649_regulator_info {
/* I2C operations */
-static inline int max8649_read_device(struct i2c_client *i2c,
- int reg, int bytes, void *dest)
-{
- unsigned char data;
- int ret;
-
- data = (unsigned char)reg;
- ret = i2c_master_send(i2c, &data, 1);
- if (ret < 0)
- return ret;
- ret = i2c_master_recv(i2c, dest, bytes);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static inline int max8649_write_device(struct i2c_client *i2c,
- int reg, int bytes, void *src)
-{
- unsigned char buf[bytes + 1];
- int ret;
-
- buf[0] = (unsigned char)reg;
- memcpy(&buf[1], src, bytes);
-
- ret = i2c_master_send(i2c, buf, bytes + 1);
- if (ret < 0)
- return ret;
- return 0;
-}
-
-static int max8649_reg_read(struct i2c_client *i2c, int reg)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
- unsigned char data;
- int ret;
-
- mutex_lock(&info->io_lock);
- ret = max8649_read_device(i2c, reg, 1, &data);
- mutex_unlock(&info->io_lock);
-
- if (ret < 0)
- return ret;
- return (int)data;
-}
-
-static int max8649_set_bits(struct i2c_client *i2c, int reg,
- unsigned char mask, unsigned char data)
-{
- struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
- unsigned char value;
- int ret;
-
- mutex_lock(&info->io_lock);
- ret = max8649_read_device(i2c, reg, 1, &value);
- if (ret < 0)
- goto out;
- value &= ~mask;
- value |= data;
- ret = max8649_write_device(i2c, reg, 1, &value);
-out:
- mutex_unlock(&info->io_lock);
- return ret;
-}
-
static inline int check_range(int min_uV, int max_uV)
{
if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
@@ -144,13 +79,14 @@ static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
static int max8649_get_voltage(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
unsigned char data;
int ret;
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret < 0)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
return ret;
- data = (unsigned char)ret & MAX8649_VOL_MASK;
+ data = (unsigned char)val & MAX8649_VOL_MASK;
return max8649_list_voltage(rdev, data);
}
@@ -170,14 +106,14 @@ static int max8649_set_voltage(struct regulator_dev *rdev,
mask = MAX8649_VOL_MASK;
*selector = data & mask;
- return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
+ return regmap_update_bits(info->regmap, info->vol_reg, mask, data);
}
/* EN_PD means pulldown on EN input */
static int max8649_enable(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0);
+ return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD, 0);
}
/*
@@ -187,38 +123,40 @@ static int max8649_enable(struct regulator_dev *rdev)
static int max8649_disable(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
- return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD,
+ return regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_EN_PD,
MAX8649_EN_PD);
}
static int max8649_is_enabled(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
int ret;
- ret = max8649_reg_read(info->i2c, MAX8649_CONTROL);
- if (ret < 0)
+ ret = regmap_read(info->regmap, MAX8649_CONTROL, &val);
+ if (ret != 0)
return ret;
- return !((unsigned char)ret & MAX8649_EN_PD);
+ return !((unsigned char)val & MAX8649_EN_PD);
}
static int max8649_enable_time(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
int voltage, rate, ret;
+ unsigned int val;
/* get voltage */
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret < 0)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
return ret;
- ret &= MAX8649_VOL_MASK;
- voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */
+ val &= MAX8649_VOL_MASK;
+ voltage = max8649_list_voltage(rdev, (unsigned char)val); /* uV */
/* get rate */
- ret = max8649_reg_read(info->i2c, MAX8649_RAMP);
- if (ret < 0)
+ ret = regmap_read(info->regmap, MAX8649_RAMP, &val);
+ if (ret != 0)
return ret;
- ret = (ret & MAX8649_RAMP_MASK) >> 5;
+ ret = (val & MAX8649_RAMP_MASK) >> 5;
rate = (32 * 1000) >> ret; /* uV/uS */
return DIV_ROUND_UP(voltage, rate);
@@ -230,12 +168,12 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
switch (mode) {
case REGULATOR_MODE_FAST:
- max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM,
- MAX8649_FORCE_PWM);
+ regmap_update_bits(info->regmap, info->vol_reg, MAX8649_FORCE_PWM,
+ MAX8649_FORCE_PWM);
break;
case REGULATOR_MODE_NORMAL:
- max8649_set_bits(info->i2c, info->vol_reg,
- MAX8649_FORCE_PWM, 0);
+ regmap_update_bits(info->regmap, info->vol_reg,
+ MAX8649_FORCE_PWM, 0);
break;
default:
return -EINVAL;
@@ -246,10 +184,13 @@ static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
static unsigned int max8649_get_mode(struct regulator_dev *rdev)
{
struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned int val;
int ret;
- ret = max8649_reg_read(info->i2c, info->vol_reg);
- if (ret & MAX8649_FORCE_PWM)
+ ret = regmap_read(info->regmap, info->vol_reg, &val);
+ if (ret != 0)
+ return ret;
+ if (val & MAX8649_FORCE_PWM)
return REGULATOR_MODE_FAST;
return REGULATOR_MODE_NORMAL;
}
@@ -275,11 +216,17 @@ static struct regulator_desc dcdc_desc = {
.owner = THIS_MODULE,
};
+static struct regmap_config max8649_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
static int __devinit max8649_regulator_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct max8649_platform_data *pdata = client->dev.platform_data;
struct max8649_regulator_info *info = NULL;
+ unsigned int val;
unsigned char data;
int ret;
@@ -289,9 +236,14 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
return -ENOMEM;
}
- info->i2c = client;
+ info->regmap = regmap_init_i2c(client, &max8649_regmap_config);
+ if (IS_ERR(info->regmap)) {
+ ret = PTR_ERR(info->regmap);
+ dev_err(&client->dev, "Failed to allocate register map: %d\n", ret);
+ goto fail;
+ }
+
info->dev = &client->dev;
- mutex_init(&info->io_lock);
i2c_set_clientdata(client, info);
info->mode = pdata->mode;
@@ -312,8 +264,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
break;
}
- ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1);
- if (ret < 0) {
+ ret = regmap_read(info->regmap, MAX8649_CHIP_ID1, &val);
+ if (ret != 0) {
dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
ret);
goto out;
@@ -321,33 +273,33 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret);
/* enable VID0 & VID1 */
- max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
+ regmap_update_bits(info->regmap, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
/* enable/disable external clock synchronization */
info->extclk = pdata->extclk;
data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
- max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
+ regmap_update_bits(info->regmap, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
if (info->extclk) {
/* set external clock frequency */
info->extclk_freq = pdata->extclk_freq;
- max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
- info->extclk_freq << 6);
+ regmap_update_bits(info->regmap, MAX8649_SYNC, MAX8649_EXT_MASK,
+ info->extclk_freq << 6);
}
if (pdata->ramp_timing) {
info->ramp_timing = pdata->ramp_timing;
- max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK,
- info->ramp_timing << 5);
+ regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_MASK,
+ info->ramp_timing << 5);
}
info->ramp_down = pdata->ramp_down;
if (info->ramp_down) {
- max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN,
- MAX8649_RAMP_DOWN);
+ regmap_update_bits(info->regmap, MAX8649_RAMP, MAX8649_RAMP_DOWN,
+ MAX8649_RAMP_DOWN);
}
info->regulator = regulator_register(&dcdc_desc, &client->dev,
- pdata->regulator, info);
+ pdata->regulator, info, NULL);
if (IS_ERR(info->regulator)) {
dev_err(info->dev, "failed to register regulator %s\n",
dcdc_desc.name);
@@ -358,6 +310,8 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
dev_info(info->dev, "Max8649 regulator device is detected.\n");
return 0;
out:
+ regmap_exit(info->regmap);
+fail:
kfree(info);
return ret;
}
@@ -369,6 +323,7 @@ static int __devexit max8649_regulator_remove(struct i2c_client *client)
if (info) {
if (info->regulator)
regulator_unregister(info->regulator);
+ regmap_exit(info->regmap);
kfree(info);
}
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 33f5d9a..a838e66 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -449,7 +449,7 @@ static int __devinit max8660_probe(struct i2c_client *client,
rdev[i] = regulator_register(&max8660_reg[id], &client->dev,
pdata->subdevs[i].platform_data,
- max8660);
+ max8660, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(&client->dev, "failed to register %s\n",
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c
index cc9ec0e..cc290d3 100644
--- a/drivers/regulator/max8925-regulator.c
+++ b/drivers/regulator/max8925-regulator.c
@@ -24,9 +24,13 @@
#define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */
#define SD1_DVM_EN 6 /* SDV1 bit 6 */
-/* bit definitions in SD & LDO control registers */
-#define OUT_ENABLE 0x1f /* Power U/D sequence as I2C */
-#define OUT_DISABLE 0x1e /* Power U/D sequence as I2C */
+/* bit definitions in LDO control registers */
+#define LDO_SEQ_I2C 0x7 /* Power U/D by i2c */
+#define LDO_SEQ_MASK 0x7 /* Power U/D sequence mask */
+#define LDO_SEQ_SHIFT 2 /* Power U/D sequence offset */
+#define LDO_I2C_EN 0x1 /* Enable by i2c */
+#define LDO_I2C_EN_MASK 0x1 /* Enable mask by i2c */
+#define LDO_I2C_EN_SHIFT 0 /* Enable offset by i2c */
struct max8925_regulator_info {
struct regulator_desc desc;
@@ -40,7 +44,6 @@ struct max8925_regulator_info {
int vol_reg;
int vol_shift;
int vol_nbits;
- int enable_bit;
int enable_reg;
};
@@ -98,8 +101,10 @@ static int max8925_enable(struct regulator_dev *rdev)
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
- OUT_ENABLE << info->enable_bit,
- OUT_ENABLE << info->enable_bit);
+ LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+ LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+ LDO_SEQ_I2C << LDO_SEQ_SHIFT |
+ LDO_I2C_EN << LDO_I2C_EN_SHIFT);
}
static int max8925_disable(struct regulator_dev *rdev)
@@ -107,20 +112,24 @@ static int max8925_disable(struct regulator_dev *rdev)
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
return max8925_set_bits(info->i2c, info->enable_reg,
- OUT_ENABLE << info->enable_bit,
- OUT_DISABLE << info->enable_bit);
+ LDO_SEQ_MASK << LDO_SEQ_SHIFT |
+ LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT,
+ LDO_SEQ_I2C << LDO_SEQ_SHIFT);
}
static int max8925_is_enabled(struct regulator_dev *rdev)
{
struct max8925_regulator_info *info = rdev_get_drvdata(rdev);
- int ret;
+ int ldo_seq, ret;
ret = max8925_reg_read(info->i2c, info->enable_reg);
if (ret < 0)
return ret;
-
- return ret & (1 << info->enable_bit);
+ ldo_seq = (ret >> LDO_SEQ_SHIFT) & LDO_SEQ_MASK;
+ if (ldo_seq != LDO_SEQ_I2C)
+ return 1;
+ else
+ return ret & (LDO_I2C_EN_MASK << LDO_I2C_EN_SHIFT);
}
static int max8925_set_dvm_voltage(struct regulator_dev *rdev, int uV)
@@ -188,7 +197,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
.vol_shift = 0, \
.vol_nbits = 6, \
.enable_reg = MAX8925_SDCTL##_id, \
- .enable_bit = 0, \
}
#define MAX8925_LDO(_id, min, max, step) \
@@ -207,7 +215,6 @@ static struct regulator_ops max8925_regulator_ldo_ops = {
.vol_shift = 0, \
.vol_nbits = 6, \
.enable_reg = MAX8925_LDOCTL##_id, \
- .enable_bit = 0, \
}
static struct max8925_regulator_info max8925_regulator_info[] = {
@@ -266,7 +273,7 @@ static int __devinit max8925_regulator_probe(struct platform_device *pdev)
ri->chip = chip;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdata->regulator[pdev->id], ri);
+ pdata->regulator[pdev->id], ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/max8952.c b/drivers/regulator/max8952.c
index 3883d85..75d8940 100644
--- a/drivers/regulator/max8952.c
+++ b/drivers/regulator/max8952.c
@@ -208,7 +208,7 @@ static int __devinit max8952_pmic_probe(struct i2c_client *client,
max8952->pdata = pdata;
max8952->rdev = regulator_register(&regulator, max8952->dev,
- &pdata->reg_data, max8952);
+ &pdata->reg_data, max8952, NULL);
if (IS_ERR(max8952->rdev)) {
ret = PTR_ERR(max8952->rdev);
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c
index 6176129..d26e864 100644
--- a/drivers/regulator/max8997.c
+++ b/drivers/regulator/max8997.c
@@ -1146,7 +1146,7 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev)
regulators[id].n_voltages = 16;
rdev[i] = regulator_register(&regulators[id], max8997->dev,
- pdata->regulators[i].initdata, max8997);
+ pdata->regulators[i].initdata, max8997, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8997->dev, "regulator init failed for %d\n",
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c
index 41a1495..2d38c24 100644
--- a/drivers/regulator/max8998.c
+++ b/drivers/regulator/max8998.c
@@ -847,7 +847,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev)
regulators[index].n_voltages = count;
}
rdev[i] = regulator_register(&regulators[index], max8998->dev,
- pdata->regulators[i].initdata, max8998);
+ pdata->regulators[i].initdata, max8998, NULL);
if (IS_ERR(rdev[i])) {
ret = PTR_ERR(rdev[i]);
dev_err(max8998->dev, "regulator init failed\n");
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 8479082..8e9b90a 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -344,7 +344,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "%s id %d\n", __func__, pdev->id);
- priv = kzalloc(sizeof(*priv) +
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
pdata->num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
if (!priv)
@@ -357,7 +357,7 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
init_data = &pdata->regulators[i];
priv->regulators[i] = regulator_register(
&mc13783_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
+ &pdev->dev, init_data->init_data, priv, NULL);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -374,8 +374,6 @@ err:
while (--i >= 0)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
-
return ret;
}
@@ -391,7 +389,6 @@ static int __devexit mc13783_regulator_remove(struct platform_device *pdev)
for (i = 0; i < pdata->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
return 0;
}
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
index 023d17d..e8cfc99 100644
--- a/drivers/regulator/mc13892-regulator.c
+++ b/drivers/regulator/mc13892-regulator.c
@@ -527,18 +527,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent);
struct mc13xxx_regulator_platform_data *pdata =
dev_get_platdata(&pdev->dev);
- struct mc13xxx_regulator_init_data *init_data;
+ struct mc13xxx_regulator_init_data *mc13xxx_data;
int i, ret;
+ int num_regulators = 0;
u32 val;
- priv = kzalloc(sizeof(*priv) +
- pdata->num_regulators * sizeof(priv->regulators[0]),
+ num_regulators = mc13xxx_get_num_regulators_dt(pdev);
+ if (num_regulators <= 0 && pdata)
+ num_regulators = pdata->num_regulators;
+ if (num_regulators <= 0)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv) +
+ num_regulators * sizeof(priv->regulators[0]),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ priv->num_regulators = num_regulators;
priv->mc13xxx_regulators = mc13892_regulators;
priv->mc13xxx = mc13892;
+ platform_set_drvdata(pdev, priv);
mc13xxx_lock(mc13892);
ret = mc13xxx_reg_read(mc13892, MC13892_REVISION, &val);
@@ -569,11 +578,27 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
= mc13892_vcam_set_mode;
mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
= mc13892_vcam_get_mode;
- for (i = 0; i < pdata->num_regulators; i++) {
- init_data = &pdata->regulators[i];
+
+ mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
+ ARRAY_SIZE(mc13892_regulators));
+ for (i = 0; i < num_regulators; i++) {
+ struct regulator_init_data *init_data;
+ struct regulator_desc *desc;
+ struct device_node *node = NULL;
+ int id;
+
+ if (mc13xxx_data) {
+ id = mc13xxx_data[i].id;
+ init_data = mc13xxx_data[i].init_data;
+ node = mc13xxx_data[i].node;
+ } else {
+ id = pdata->regulators[i].id;
+ init_data = pdata->regulators[i].init_data;
+ }
+ desc = &mc13892_regulators[id].desc;
+
priv->regulators[i] = regulator_register(
- &mc13892_regulators[init_data->id].desc,
- &pdev->dev, init_data->init_data, priv);
+ desc, &pdev->dev, init_data, priv, node);
if (IS_ERR(priv->regulators[i])) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
@@ -583,8 +608,6 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev)
}
}
- platform_set_drvdata(pdev, priv);
-
return 0;
err:
while (--i >= 0)
@@ -592,7 +615,6 @@ err:
err_free:
mc13xxx_unlock(mc13892);
- kfree(priv);
return ret;
}
@@ -600,16 +622,13 @@ err_free:
static int __devexit mc13892_regulator_remove(struct platform_device *pdev)
{
struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
- struct mc13xxx_regulator_platform_data *pdata =
- dev_get_platdata(&pdev->dev);
int i;
platform_set_drvdata(pdev, NULL);
- for (i = 0; i < pdata->num_regulators; i++)
+ for (i = 0; i < priv->num_regulators; i++)
regulator_unregister(priv->regulators[i]);
- kfree(priv);
return 0;
}
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index 6532853..62dcd0a 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -18,12 +18,14 @@
#include <linux/mfd/mc13xxx.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/driver.h>
+#include <linux/regulator/of_regulator.h>
#include <linux/platform_device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/module.h>
+#include <linux/of.h>
#include "mc13xxx.h"
static int mc13xxx_regulator_enable(struct regulator_dev *rdev)
@@ -236,6 +238,63 @@ int mc13xxx_sw_regulator_is_enabled(struct regulator_dev *rdev)
}
EXPORT_SYMBOL_GPL(mc13xxx_sw_regulator_is_enabled);
+#ifdef CONFIG_OF
+int __devinit mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+ struct device_node *parent, *child;
+ int num = 0;
+
+ of_node_get(pdev->dev.parent->of_node);
+ parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!parent)
+ return -ENODEV;
+
+ for_each_child_of_node(parent, child)
+ num++;
+
+ return num;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_get_num_regulators_dt);
+
+struct mc13xxx_regulator_init_data * __devinit mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators)
+{
+ struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev);
+ struct mc13xxx_regulator_init_data *data, *p;
+ struct device_node *parent, *child;
+ int i;
+
+ of_node_get(pdev->dev.parent->of_node);
+ parent = of_find_node_by_name(pdev->dev.parent->of_node, "regulators");
+ if (!parent)
+ return NULL;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data) * priv->num_regulators,
+ GFP_KERNEL);
+ if (!data)
+ return NULL;
+ p = data;
+
+ for_each_child_of_node(parent, child) {
+ for (i = 0; i < num_regulators; i++) {
+ if (!of_node_cmp(child->name,
+ regulators[i].desc.name)) {
+ p->id = i;
+ p->init_data = of_get_regulator_init_data(
+ &pdev->dev, child);
+ p->node = child;
+ p++;
+ break;
+ }
+ }
+ }
+
+ return data;
+}
+EXPORT_SYMBOL_GPL(mc13xxx_parse_regulators_dt);
+#endif
+
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yong Shen <yong.shen@linaro.org>");
MODULE_DESCRIPTION("Regulator Driver for Freescale MC13xxx PMIC");
diff --git a/drivers/regulator/mc13xxx.h b/drivers/regulator/mc13xxx.h
index 2775826..b3961c6 100644
--- a/drivers/regulator/mc13xxx.h
+++ b/drivers/regulator/mc13xxx.h
@@ -29,6 +29,7 @@ struct mc13xxx_regulator_priv {
struct mc13xxx *mc13xxx;
u32 powermisc_pwgt_state;
struct mc13xxx_regulator *mc13xxx_regulators;
+ int num_regulators;
struct regulator_dev *regulators[];
};
@@ -42,13 +43,32 @@ extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev,
int min_uV, int max_uV, unsigned *selector);
extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev);
+#ifdef CONFIG_OF
+extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev);
+extern struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators);
+#else
+static inline int mc13xxx_get_num_regulators_dt(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+
+static inline struct mc13xxx_regulator_init_data *mc13xxx_parse_regulators_dt(
+ struct platform_device *pdev, struct mc13xxx_regulator *regulators,
+ int num_regulators)
+{
+ return NULL;
+}
+#endif
+
extern struct regulator_ops mc13xxx_regulator_ops;
extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -66,7 +86,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_FIXED_DEFINE(prefix, _name, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
@@ -81,7 +101,7 @@ extern struct regulator_ops mc13xxx_fixed_regulator_ops;
#define MC13xxx_GPO_DEFINE(prefix, _name, _reg, _voltages, _ops) \
[prefix ## _name] = { \
.desc = { \
- .name = #prefix "_" #_name, \
+ .name = #_name, \
.n_voltages = ARRAY_SIZE(_voltages), \
.ops = &_ops, \
.type = REGULATOR_VOLTAGE, \
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
new file mode 100644
index 0000000..679734d
--- /dev/null
+++ b/drivers/regulator/of_regulator.c
@@ -0,0 +1,87 @@
+/*
+ * OF helpers for regulator framework
+ *
+ * Copyright (C) 2011 Texas Instruments, Inc.
+ * Rajendra Nayak <rnayak@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/regulator/machine.h>
+
+static void of_get_regulation_constraints(struct device_node *np,
+ struct regulator_init_data **init_data)
+{
+ const __be32 *min_uV, *max_uV, *uV_offset;
+ const __be32 *min_uA, *max_uA;
+ struct regulation_constraints *constraints = &(*init_data)->constraints;
+
+ constraints->name = of_get_property(np, "regulator-name", NULL);
+
+ min_uV = of_get_property(np, "regulator-min-microvolt", NULL);
+ if (min_uV)
+ constraints->min_uV = be32_to_cpu(*min_uV);
+ max_uV = of_get_property(np, "regulator-max-microvolt", NULL);
+ if (max_uV)
+ constraints->max_uV = be32_to_cpu(*max_uV);
+
+ /* Voltage change possible? */
+ if (constraints->min_uV != constraints->max_uV)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
+ /* Only one voltage? Then make sure it's set. */
+ if (min_uV && max_uV && constraints->min_uV == constraints->max_uV)
+ constraints->apply_uV = true;
+
+ uV_offset = of_get_property(np, "regulator-microvolt-offset", NULL);
+ if (uV_offset)
+ constraints->uV_offset = be32_to_cpu(*uV_offset);
+ min_uA = of_get_property(np, "regulator-min-microamp", NULL);
+ if (min_uA)
+ constraints->min_uA = be32_to_cpu(*min_uA);
+ max_uA = of_get_property(np, "regulator-max-microamp", NULL);
+ if (max_uA)
+ constraints->max_uA = be32_to_cpu(*max_uA);
+
+ /* Current change possible? */
+ if (constraints->min_uA != constraints->max_uA)
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_CURRENT;
+
+ if (of_find_property(np, "regulator-boot-on", NULL))
+ constraints->boot_on = true;
+
+ if (of_find_property(np, "regulator-always-on", NULL))
+ constraints->always_on = true;
+ else /* status change should be possible if not always on. */
+ constraints->valid_ops_mask |= REGULATOR_CHANGE_STATUS;
+}
+
+/**
+ * of_get_regulator_init_data - extract regulator_init_data structure info
+ * @dev: device requesting for regulator_init_data
+ *
+ * Populates regulator_init_data structure by extracting data from device
+ * tree node, returns a pointer to the populated struture or NULL if memory
+ * alloc fails.
+ */
+struct regulator_init_data *of_get_regulator_init_data(struct device *dev,
+ struct device_node *node)
+{
+ struct regulator_init_data *init_data;
+
+ if (!node)
+ return NULL;
+
+ init_data = devm_kzalloc(dev, sizeof(*init_data), GFP_KERNEL);
+ if (!init_data)
+ return NULL; /* Out of memory? */
+
+ of_get_regulation_constraints(node, &init_data);
+ return init_data;
+}
+EXPORT_SYMBOL_GPL(of_get_regulator_init_data);
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 31f6e11..a5aab1b 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -277,7 +277,7 @@ static int __devinit pcap_regulator_probe(struct platform_device *pdev)
void *pcap = dev_get_drvdata(pdev->dev.parent);
rdev = regulator_register(&pcap_regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, pcap);
+ pdev->dev.platform_data, pcap, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/pcf50633-regulator.c b/drivers/regulator/pcf50633-regulator.c
index 69a11d9..1d1c310 100644
--- a/drivers/regulator/pcf50633-regulator.c
+++ b/drivers/regulator/pcf50633-regulator.c
@@ -320,7 +320,7 @@ static int __devinit pcf50633_regulator_probe(struct platform_device *pdev)
pcf = dev_to_pcf50633(pdev->dev.parent);
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, pcf);
+ pdev->dev.platform_data, pcf, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c
index 1011873..d9278da 100644
--- a/drivers/regulator/tps6105x-regulator.c
+++ b/drivers/regulator/tps6105x-regulator.c
@@ -151,7 +151,8 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev)
/* Register regulator with framework */
tps6105x->regulator = regulator_register(&tps6105x_regulator_desc,
&tps6105x->client->dev,
- pdata->regulator_data, tps6105x);
+ pdata->regulator_data, tps6105x,
+ NULL);
if (IS_ERR(tps6105x->regulator)) {
ret = PTR_ERR(tps6105x->regulator);
dev_err(&tps6105x->client->dev,
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 9fb4c7b..18d61a0 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -152,48 +152,21 @@ struct tps_driver_data {
u8 core_regulator;
};
-static int tps_65023_set_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps->regmap, reg, mask, mask);
-}
-
-static int tps_65023_clear_bits(struct tps_pmic *tps, u8 reg, u8 mask)
-{
- return regmap_update_bits(tps->regmap, reg, mask, 0);
-}
-
-static int tps_65023_reg_read(struct tps_pmic *tps, u8 reg)
-{
- unsigned int val;
- int ret;
-
- ret = regmap_read(tps->regmap, reg, &val);
-
- if (ret != 0)
- return ret;
- else
- return val;
-}
-
-static int tps_65023_reg_write(struct tps_pmic *tps, u8 reg, u8 val)
-{
- return regmap_write(tps->regmap, reg, val);
-}
-
static int tps65023_dcdc_is_enabled(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, dcdc = rdev_get_id(dev);
+ int ret;
u8 shift;
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+ ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
- if (data < 0)
- return data;
+ if (ret != 0)
+ return ret;
else
return (data & 1<<shift) ? 1 : 0;
}
@@ -202,16 +175,17 @@ static int tps65023_ldo_is_enabled(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
+ int ret;
u8 shift;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- data = tps_65023_reg_read(tps, TPS65023_REG_REG_CTRL);
+ ret = regmap_read(tps->regmap, TPS65023_REG_REG_CTRL, &data);
- if (data < 0)
- return data;
+ if (ret != 0)
+ return ret;
else
return (data & 1<<shift) ? 1 : 0;
}
@@ -226,7 +200,7 @@ static int tps65023_dcdc_enable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
}
static int tps65023_dcdc_disable(struct regulator_dev *dev)
@@ -239,7 +213,7 @@ static int tps65023_dcdc_disable(struct regulator_dev *dev)
return -EINVAL;
shift = TPS65023_NUM_REGULATOR - dcdc;
- return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
}
static int tps65023_ldo_enable(struct regulator_dev *dev)
@@ -252,7 +226,7 @@ static int tps65023_ldo_enable(struct regulator_dev *dev)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- return tps_65023_set_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 1 << shift);
}
static int tps65023_ldo_disable(struct regulator_dev *dev)
@@ -265,21 +239,22 @@ static int tps65023_ldo_disable(struct regulator_dev *dev)
return -EINVAL;
shift = (ldo == TPS65023_LDO_1 ? 1 : 2);
- return tps_65023_clear_bits(tps, TPS65023_REG_REG_CTRL, 1 << shift);
+ return regmap_update_bits(tps->regmap, TPS65023_REG_REG_CTRL, 1 << shift, 0);
}
static int tps65023_dcdc_get_voltage(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
+ int ret;
int data, dcdc = rdev_get_id(dev);
if (dcdc < TPS65023_DCDC_1 || dcdc > TPS65023_DCDC_3)
return -EINVAL;
if (dcdc == tps->core_regulator) {
- data = tps_65023_reg_read(tps, TPS65023_REG_DEF_CORE);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_DEF_CORE, &data);
+ if (ret != 0)
+ return ret;
data &= (tps->info[dcdc]->table_len - 1);
return tps->info[dcdc]->table[data] * 1000;
} else
@@ -318,13 +293,13 @@ static int tps65023_dcdc_set_voltage(struct regulator_dev *dev,
if (vsel == tps->info[dcdc]->table_len)
goto failed;
- ret = tps_65023_reg_write(tps, TPS65023_REG_DEF_CORE, vsel);
+ ret = regmap_write(tps->regmap, TPS65023_REG_DEF_CORE, vsel);
/* Tell the chip that we have changed the value in DEFCORE
* and its time to update the core voltage
*/
- tps_65023_set_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_GO);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_GO, TPS65023_REG_CTRL2_GO);
return ret;
@@ -336,13 +311,14 @@ static int tps65023_ldo_get_voltage(struct regulator_dev *dev)
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, ldo = rdev_get_id(dev);
+ int ret;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
- data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+ if (ret != 0)
+ return ret;
data >>= (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1));
data &= (tps->info[ldo]->table_len - 1);
@@ -354,6 +330,7 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
{
struct tps_pmic *tps = rdev_get_drvdata(dev);
int data, vsel, ldo = rdev_get_id(dev);
+ int ret;
if (ldo < TPS65023_LDO_1 || ldo > TPS65023_LDO_2)
return -EINVAL;
@@ -377,13 +354,13 @@ static int tps65023_ldo_set_voltage(struct regulator_dev *dev,
*selector = vsel;
- data = tps_65023_reg_read(tps, TPS65023_REG_LDO_CTRL);
- if (data < 0)
- return data;
+ ret = regmap_read(tps->regmap, TPS65023_REG_LDO_CTRL, &data);
+ if (ret != 0)
+ return ret;
data &= TPS65023_LDO_CTRL_LDOx_MASK(ldo - TPS65023_LDO_1);
data |= (vsel << (TPS65023_LDO_CTRL_LDOx_SHIFT(ldo - TPS65023_LDO_1)));
- return tps_65023_reg_write(tps, TPS65023_REG_LDO_CTRL, data);
+ return regmap_write(tps->regmap, TPS65023_REG_LDO_CTRL, data);
}
static int tps65023_dcdc_list_voltage(struct regulator_dev *dev,
@@ -496,7 +473,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
/* Register the regulators */
rdev = regulator_register(&tps->desc[i], &client->dev,
- init_data, tps);
+ init_data, tps, NULL);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
@@ -511,12 +488,12 @@ static int __devinit tps_65023_probe(struct i2c_client *client,
i2c_set_clientdata(client, tps);
/* Enable setting output voltage by I2C */
- tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
/* Enable setting output voltage by I2C */
- tps_65023_clear_bits(tps, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ);
+ regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
+ TPS65023_REG_CTRL2_CORE_ADJ, TPS65023_REG_CTRL2_CORE_ADJ);
return 0;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index bdef703..0b63ef7 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -599,7 +599,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev)
tps->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&tps->desc[i],
- tps6507x_dev->dev, init_data, tps);
+ tps6507x_dev->dev, init_data, tps, NULL);
if (IS_ERR(rdev)) {
dev_err(tps6507x_dev->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/tps6524x-regulator.c b/drivers/regulator/tps6524x-regulator.c
index 9166aa0..2e94686 100644
--- a/drivers/regulator/tps6524x-regulator.c
+++ b/drivers/regulator/tps6524x-regulator.c
@@ -481,7 +481,7 @@ static int set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV,
if (i >= info->n_voltages)
i = info->n_voltages - 1;
- *selector = info->voltages[i];
+ *selector = i;
return write_field(hw, &info->voltage, i);
}
@@ -651,7 +651,7 @@ static int __devinit pmic_probe(struct spi_device *spi)
hw->desc[i].n_voltages = 1;
hw->rdev[i] = regulator_register(&hw->desc[i], dev,
- init_data, hw);
+ init_data, hw, NULL);
if (IS_ERR(hw->rdev[i])) {
ret = PTR_ERR(hw->rdev[i]);
hw->rdev[i] = NULL;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index 14b9389..c75fb20 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -396,7 +396,7 @@ static int __devinit tps6586x_regulator_probe(struct platform_device *pdev)
return err;
rdev = regulator_register(&ri->desc, &pdev->dev,
- pdev->dev.platform_data, ri);
+ pdev->dev.platform_data, ri, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register regulator %s\n",
ri->desc.name);
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index b552aae..40ecf516 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -25,30 +25,6 @@
#include <linux/gpio.h>
#include <linux/mfd/tps65910.h>
-#define TPS65910_REG_VRTC 0
-#define TPS65910_REG_VIO 1
-#define TPS65910_REG_VDD1 2
-#define TPS65910_REG_VDD2 3
-#define TPS65910_REG_VDD3 4
-#define TPS65910_REG_VDIG1 5
-#define TPS65910_REG_VDIG2 6
-#define TPS65910_REG_VPLL 7
-#define TPS65910_REG_VDAC 8
-#define TPS65910_REG_VAUX1 9
-#define TPS65910_REG_VAUX2 10
-#define TPS65910_REG_VAUX33 11
-#define TPS65910_REG_VMMC 12
-
-#define TPS65911_REG_VDDCTRL 4
-#define TPS65911_REG_LDO1 5
-#define TPS65911_REG_LDO2 6
-#define TPS65911_REG_LDO3 7
-#define TPS65911_REG_LDO4 8
-#define TPS65911_REG_LDO5 9
-#define TPS65911_REG_LDO6 10
-#define TPS65911_REG_LDO7 11
-#define TPS65911_REG_LDO8 12
-
#define TPS65910_SUPPLY_STATE_ENABLED 0x1
/* supported VIO voltages in milivolts */
@@ -686,7 +662,7 @@ static int tps65910_set_voltage_dcdc(struct regulator_dev *dev,
tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel);
break;
case TPS65911_REG_VDDCTRL:
- vsel = selector;
+ vsel = selector + 3;
tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel);
}
@@ -885,8 +861,6 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
if (!pmic_plat_data)
return -EINVAL;
- reg_data = pmic_plat_data->tps65910_pmic_init_data;
-
pmic = kzalloc(sizeof(*pmic), GFP_KERNEL);
if (!pmic)
return -ENOMEM;
@@ -937,7 +911,16 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
goto err_free_info;
}
- for (i = 0; i < pmic->num_regulators; i++, info++, reg_data++) {
+ for (i = 0; i < pmic->num_regulators && i < TPS65910_NUM_REGS;
+ i++, info++) {
+
+ reg_data = pmic_plat_data->tps65910_pmic_init_data[i];
+
+ /* Regulator API handles empty constraints but not NULL
+ * constraints */
+ if (!reg_data)
+ continue;
+
/* Register the regulators */
pmic->info[i] = info;
@@ -965,7 +948,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
pmic->desc[i].owner = THIS_MODULE;
rdev = regulator_register(&pmic->desc[i],
- tps65910->dev, reg_data, pmic);
+ tps65910->dev, reg_data, pmic, NULL);
if (IS_ERR(rdev)) {
dev_err(tps65910->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/tps65912-regulator.c b/drivers/regulator/tps65912-regulator.c
index 39d4a17..da00d88 100644
--- a/drivers/regulator/tps65912-regulator.c
+++ b/drivers/regulator/tps65912-regulator.c
@@ -727,7 +727,7 @@ static __devinit int tps65912_probe(struct platform_device *pdev)
pmic->desc[i].owner = THIS_MODULE;
range = tps65912_get_range(pmic, i);
rdev = regulator_register(&pmic->desc[i],
- tps65912->dev, reg_data, pmic);
+ tps65912->dev, reg_data, pmic, NULL);
if (IS_ERR(rdev)) {
dev_err(tps65912->dev,
"failed to register %s regulator\n",
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 11cc308..181a2cf 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -1112,7 +1112,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev)
break;
}
- rdev = regulator_register(&info->desc, &pdev->dev, initdata, info);
+ rdev = regulator_register(&info->desc, &pdev->dev, initdata, info, NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "can't register %s, %ld\n",
info->desc.name, PTR_ERR(rdev));
diff --git a/drivers/regulator/userspace-consumer.c b/drivers/regulator/userspace-consumer.c
index fc66551..518667e 100644
--- a/drivers/regulator/userspace-consumer.c
+++ b/drivers/regulator/userspace-consumer.c
@@ -185,18 +185,7 @@ static struct platform_driver regulator_userspace_consumer_driver = {
},
};
-
-static int __init regulator_userspace_consumer_init(void)
-{
- return platform_driver_register(&regulator_userspace_consumer_driver);
-}
-module_init(regulator_userspace_consumer_init);
-
-static void __exit regulator_userspace_consumer_exit(void)
-{
- platform_driver_unregister(&regulator_userspace_consumer_driver);
-}
-module_exit(regulator_userspace_consumer_exit);
+module_platform_driver(regulator_userspace_consumer_driver);
MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>");
MODULE_DESCRIPTION("Userspace consumer for voltage and current regulators");
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index 858c1f8..ee0b161 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -352,17 +352,7 @@ static struct platform_driver regulator_virtual_consumer_driver = {
},
};
-static int __init regulator_virtual_consumer_init(void)
-{
- return platform_driver_register(&regulator_virtual_consumer_driver);
-}
-module_init(regulator_virtual_consumer_init);
-
-static void __exit regulator_virtual_consumer_exit(void)
-{
- platform_driver_unregister(&regulator_virtual_consumer_driver);
-}
-module_exit(regulator_virtual_consumer_exit);
+module_platform_driver(regulator_virtual_consumer_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("Virtual regulator consumer");
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index bd3531d..4904a40 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -511,7 +511,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->dcdc[id] == NULL)
return -ENODEV;
- dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+ dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
+ GFP_KERNEL);
if (dcdc == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -553,7 +554,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data);
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -590,7 +591,6 @@ err_regulator:
err:
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
- kfree(dcdc);
return ret;
}
@@ -605,7 +605,6 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
regulator_unregister(dcdc->regulator);
if (dcdc->dvs_gpio)
gpio_free(dcdc->dvs_gpio);
- kfree(dcdc);
return 0;
}
@@ -722,7 +721,8 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->dcdc[id] == NULL)
return -ENODEV;
- dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL);
+ dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc),
+ GFP_KERNEL);
if (dcdc == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -747,7 +747,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -771,7 +771,6 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(dcdc->regulator);
err:
- kfree(dcdc);
return ret;
}
@@ -783,7 +782,6 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
- kfree(dcdc);
return 0;
}
@@ -874,7 +872,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->dcdc[id], dcdc);
+ pdata->dcdc[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n",
@@ -973,7 +971,7 @@ static __devinit int wm831x_epe_probe(struct platform_device *pdev)
dcdc->desc.owner = THIS_MODULE;
dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev,
- pdata->epe[id], dcdc);
+ pdata->epe[id], dcdc, NULL);
if (IS_ERR(dcdc->regulator)) {
ret = PTR_ERR(dcdc->regulator);
dev_err(wm831x->dev, "Failed to register EPE%d: %d\n",
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 01f27c7..634aac3 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -162,7 +162,8 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->isink[id] == NULL)
return -ENODEV;
- isink = kzalloc(sizeof(struct wm831x_isink), GFP_KERNEL);
+ isink = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_isink),
+ GFP_KERNEL);
if (isink == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -189,7 +190,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
isink->desc.owner = THIS_MODULE;
isink->regulator = regulator_register(&isink->desc, &pdev->dev,
- pdata->isink[id], isink);
+ pdata->isink[id], isink, NULL);
if (IS_ERR(isink->regulator)) {
ret = PTR_ERR(isink->regulator);
dev_err(wm831x->dev, "Failed to register ISINK%d: %d\n",
@@ -213,7 +214,6 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(isink->regulator);
err:
- kfree(isink);
return ret;
}
@@ -226,7 +226,6 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
free_irq(platform_get_irq(pdev, 0), isink);
regulator_unregister(isink->regulator);
- kfree(isink);
return 0;
}
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 6709710..f1e4ab0 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -326,7 +326,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -351,7 +351,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -376,7 +376,6 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(ldo->regulator);
err:
- kfree(ldo);
return ret;
}
@@ -388,7 +387,6 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
@@ -596,7 +594,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -621,7 +619,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -645,7 +643,6 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
err_regulator:
regulator_unregister(ldo->regulator);
err:
- kfree(ldo);
return ret;
}
@@ -655,7 +652,6 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
free_irq(platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
@@ -793,7 +789,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
if (pdata == NULL || pdata->ldo[id] == NULL)
return -ENODEV;
- ldo = kzalloc(sizeof(struct wm831x_ldo), GFP_KERNEL);
+ ldo = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_ldo), GFP_KERNEL);
if (ldo == NULL) {
dev_err(&pdev->dev, "Unable to allocate private data\n");
return -ENOMEM;
@@ -818,7 +814,7 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
ldo->desc.owner = THIS_MODULE;
ldo->regulator = regulator_register(&ldo->desc, &pdev->dev,
- pdata->ldo[id], ldo);
+ pdata->ldo[id], ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm831x->dev, "Failed to register LDO%d: %d\n",
@@ -831,7 +827,6 @@ static __devinit int wm831x_alive_ldo_probe(struct platform_device *pdev)
return 0;
err:
- kfree(ldo);
return ret;
}
@@ -840,7 +835,6 @@ static __devexit int wm831x_alive_ldo_remove(struct platform_device *pdev)
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
regulator_unregister(ldo->regulator);
- kfree(ldo);
return 0;
}
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index 1bcb22c..6894009 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -1428,7 +1428,7 @@ static int wm8350_regulator_probe(struct platform_device *pdev)
/* register regulator */
rdev = regulator_register(&wm8350_reg[pdev->id], &pdev->dev,
pdev->dev.platform_data,
- dev_get_drvdata(&pdev->dev));
+ dev_get_drvdata(&pdev->dev), NULL);
if (IS_ERR(rdev)) {
dev_err(&pdev->dev, "failed to register %s\n",
wm8350_reg[pdev->id].name);
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index 71632dd..706f395 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -326,7 +326,7 @@ static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
struct regulator_dev *rdev;
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, wm8400);
+ pdev->dev.platform_data, wm8400, NULL);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
index b87bf5c..435e335 100644
--- a/drivers/regulator/wm8994-regulator.c
+++ b/drivers/regulator/wm8994-regulator.c
@@ -269,7 +269,7 @@ static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
ldo->is_enabled = true;
ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
- pdata->ldo[id].init_data, ldo);
+ pdata->ldo[id].init_data, ldo, NULL);
if (IS_ERR(ldo->regulator)) {
ret = PTR_ERR(ldo->regulator);
dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 53eb4e5..3a125b8 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -498,9 +498,9 @@ config RTC_DRV_CMOS
will be called rtc-cmos.
config RTC_DRV_VRTC
- tristate "Virtual RTC for Moorestown platforms"
- depends on X86_MRST
- default y if X86_MRST
+ tristate "Virtual RTC for Intel MID platforms"
+ depends on X86_INTEL_MID
+ default y if X86_INTEL_MID
help
Say "yes" here to get direct support for the real time clock
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 8e28625..8a1c031 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -228,11 +228,11 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
alarm->time.tm_hour = now.tm_hour;
/* For simplicity, only support date rollover for now */
- if (alarm->time.tm_mday == -1) {
+ if (alarm->time.tm_mday < 1 || alarm->time.tm_mday > 31) {
alarm->time.tm_mday = now.tm_mday;
missing = day;
}
- if (alarm->time.tm_mon == -1) {
+ if ((unsigned)alarm->time.tm_mon >= 12) {
alarm->time.tm_mon = now.tm_mon;
if (missing == none)
missing = month;
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
index 64b847b..f04761e 100644
--- a/drivers/rtc/rtc-88pm860x.c
+++ b/drivers/rtc/rtc-88pm860x.c
@@ -410,17 +410,7 @@ static struct platform_driver pm860x_rtc_driver = {
.remove = __devexit_p(pm860x_rtc_remove),
};
-static int __init pm860x_rtc_init(void)
-{
- return platform_driver_register(&pm860x_rtc_driver);
-}
-module_init(pm860x_rtc_init);
-
-static void __exit pm860x_rtc_exit(void)
-{
- platform_driver_unregister(&pm860x_rtc_driver);
-}
-module_exit(pm860x_rtc_exit);
+module_platform_driver(pm860x_rtc_driver);
MODULE_DESCRIPTION("Marvell 88PM860x RTC driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/rtc/rtc-ab8500.c b/drivers/rtc/rtc-ab8500.c
index e346705..4bcf9ca 100644
--- a/drivers/rtc/rtc-ab8500.c
+++ b/drivers/rtc/rtc-ab8500.c
@@ -15,7 +15,7 @@
#include <linux/platform_device.h>
#include <linux/rtc.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#include <linux/delay.h>
#define AB8500_RTC_SOFF_STAT_REG 0x00
@@ -90,7 +90,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
/* Early AB8500 chips will not clear the rtc read request bit */
if (abx500_get_chip_id(dev) == 0) {
- msleep(1);
+ usleep_range(1000, 1000);
} else {
/* Wait for some cycles after enabling the rtc read in ab8500 */
while (time_before(jiffies, timeout)) {
@@ -102,7 +102,7 @@ static int ab8500_rtc_read_time(struct device *dev, struct rtc_time *tm)
if (!(value & RTC_READ_REQUEST))
break;
- msleep(1);
+ usleep_range(1000, 5000);
}
}
@@ -258,6 +258,109 @@ static int ab8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
return ab8500_rtc_irq_enable(dev, alarm->enabled);
}
+
+static int ab8500_rtc_set_calibration(struct device *dev, int calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ /*
+ * Check that the calibration value (which is in units of 0.5
+ * parts-per-million) is in the AB8500's range for RtcCalibration
+ * register. -128 (0x80) is not permitted because the AB8500 uses
+ * a sign-bit rather than two's complement, so 0x80 is just another
+ * representation of zero.
+ */
+ if ((calibration < -127) || (calibration > 127)) {
+ dev_err(dev, "RtcCalibration value outside permitted range\n");
+ return -EINVAL;
+ }
+
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert to this sort of representation before writing
+ * into RtcCalibration register...
+ */
+ if (calibration >= 0)
+ rtccal = 0x7F & calibration;
+ else
+ rtccal = ~(calibration - 1) | 0x80;
+
+ retval = abx500_set_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, rtccal);
+
+ return retval;
+}
+
+static int ab8500_rtc_get_calibration(struct device *dev, int *calibration)
+{
+ int retval;
+ u8 rtccal = 0;
+
+ retval = abx500_get_register_interruptible(dev, AB8500_RTC,
+ AB8500_RTC_CALIB_REG, &rtccal);
+ if (retval >= 0) {
+ /*
+ * The AB8500 uses sign (in bit7) and magnitude (in bits0-7)
+ * so need to convert value from RtcCalibration register into
+ * a two's complement signed value...
+ */
+ if (rtccal & 0x80)
+ *calibration = 0 - (rtccal & 0x7F);
+ else
+ *calibration = 0x7F & rtccal;
+ }
+
+ return retval;
+}
+
+static ssize_t ab8500_sysfs_store_rtc_calibration(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int retval;
+ int calibration = 0;
+
+ if (sscanf(buf, " %i ", &calibration) != 1) {
+ dev_err(dev, "Failed to store RTC calibration attribute\n");
+ return -EINVAL;
+ }
+
+ retval = ab8500_rtc_set_calibration(dev, calibration);
+
+ return retval ? retval : count;
+}
+
+static ssize_t ab8500_sysfs_show_rtc_calibration(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int retval = 0;
+ int calibration = 0;
+
+ retval = ab8500_rtc_get_calibration(dev, &calibration);
+ if (retval < 0) {
+ dev_err(dev, "Failed to read RTC calibration attribute\n");
+ sprintf(buf, "0\n");
+ return retval;
+ }
+
+ return sprintf(buf, "%d\n", calibration);
+}
+
+static DEVICE_ATTR(rtc_calibration, S_IRUGO | S_IWUSR,
+ ab8500_sysfs_show_rtc_calibration,
+ ab8500_sysfs_store_rtc_calibration);
+
+static int ab8500_sysfs_rtc_register(struct device *dev)
+{
+ return device_create_file(dev, &dev_attr_rtc_calibration);
+}
+
+static void ab8500_sysfs_rtc_unregister(struct device *dev)
+{
+ device_remove_file(dev, &dev_attr_rtc_calibration);
+}
+
static irqreturn_t rtc_alarm_handler(int irq, void *data)
{
struct rtc_device *rtc = data;
@@ -295,7 +398,7 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
/* Wait for reset by the PorRtc */
- msleep(1);
+ usleep_range(1000, 5000);
err = abx500_get_register_interruptible(&pdev->dev, AB8500_RTC,
AB8500_RTC_STAT_REG, &rtc_ctrl);
@@ -308,6 +411,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return -ENODEV;
}
+ device_init_wakeup(&pdev->dev, true);
+
rtc = rtc_device_register("ab8500-rtc", &pdev->dev, &ab8500_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -316,8 +421,8 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
return err;
}
- err = request_threaded_irq(irq, NULL, rtc_alarm_handler, 0,
- "ab8500-rtc", rtc);
+ err = request_threaded_irq(irq, NULL, rtc_alarm_handler,
+ IRQF_NO_SUSPEND, "ab8500-rtc", rtc);
if (err < 0) {
rtc_device_unregister(rtc);
return err;
@@ -325,6 +430,13 @@ static int __devinit ab8500_rtc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, rtc);
+
+ err = ab8500_sysfs_rtc_register(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "sysfs RTC failed to register\n");
+ return err;
+ }
+
return 0;
}
@@ -333,6 +445,8 @@ static int __devexit ab8500_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
int irq = platform_get_irq_byname(pdev, "ALARM");
+ ab8500_sysfs_rtc_unregister(&pdev->dev);
+
free_irq(irq, rtc);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
@@ -349,18 +463,8 @@ static struct platform_driver ab8500_rtc_driver = {
.remove = __devexit_p(ab8500_rtc_remove),
};
-static int __init ab8500_rtc_init(void)
-{
- return platform_driver_register(&ab8500_rtc_driver);
-}
-
-static void __exit ab8500_rtc_exit(void)
-{
- platform_driver_unregister(&ab8500_rtc_driver);
-}
+module_platform_driver(ab8500_rtc_driver);
-module_init(ab8500_rtc_init);
-module_exit(ab8500_rtc_exit);
MODULE_AUTHOR("Virupax Sadashivpetimath <virupax.sadashivpetimath@stericsson.com>");
MODULE_DESCRIPTION("AB8500 RTC Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index e39b77a..dc474bc 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -32,11 +32,17 @@
#include <mach/at91_rtc.h>
+#define at91_rtc_read(field) \
+ __raw_readl(at91_rtc_regs + field)
+#define at91_rtc_write(field, val) \
+ __raw_writel((val), at91_rtc_regs + field)
#define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */
static DECLARE_COMPLETION(at91_rtc_updated);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
+static void __iomem *at91_rtc_regs;
+static int irq;
/*
* Decode time/date into rtc_time structure
@@ -48,10 +54,10 @@ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg,
/* must read twice in case it changes */
do {
- time = at91_sys_read(timereg);
- date = at91_sys_read(calreg);
- } while ((time != at91_sys_read(timereg)) ||
- (date != at91_sys_read(calreg)));
+ time = at91_rtc_read(timereg);
+ date = at91_rtc_read(calreg);
+ } while ((time != at91_rtc_read(timereg)) ||
+ (date != at91_rtc_read(calreg)));
tm->tm_sec = bcd2bin((time & AT91_RTC_SEC) >> 0);
tm->tm_min = bcd2bin((time & AT91_RTC_MIN) >> 8);
@@ -98,19 +104,19 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
tm->tm_hour, tm->tm_min, tm->tm_sec);
/* Stop Time/Calendar from counting */
- cr = at91_sys_read(AT91_RTC_CR);
- at91_sys_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
+ cr = at91_rtc_read(AT91_RTC_CR);
+ at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
- at91_sys_write(AT91_RTC_TIMR,
+ at91_rtc_write(AT91_RTC_TIMR,
bin2bcd(tm->tm_sec) << 0
| bin2bcd(tm->tm_min) << 8
| bin2bcd(tm->tm_hour) << 16);
- at91_sys_write(AT91_RTC_CALR,
+ at91_rtc_write(AT91_RTC_CALR,
bin2bcd((tm->tm_year + 1900) / 100) /* century */
| bin2bcd(tm->tm_year % 100) << 8 /* year */
| bin2bcd(tm->tm_mon + 1) << 16 /* tm_mon starts at zero */
@@ -118,8 +124,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
| bin2bcd(tm->tm_mday) << 24);
/* Restart Time/Calendar */
- cr = at91_sys_read(AT91_RTC_CR);
- at91_sys_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
+ cr = at91_rtc_read(AT91_RTC_CR);
+ at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
return 0;
}
@@ -135,7 +141,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+ alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
? 1 : 0;
pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -160,20 +166,20 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_TIMALR,
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_TIMALR,
bin2bcd(tm.tm_sec) << 0
| bin2bcd(tm.tm_min) << 8
| bin2bcd(tm.tm_hour) << 16
| AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN);
- at91_sys_write(AT91_RTC_CALALR,
+ at91_rtc_write(AT91_RTC_CALALR,
bin2bcd(tm.tm_mon + 1) << 16 /* tm_mon starts at zero */
| bin2bcd(tm.tm_mday) << 24
| AT91_RTC_DATEEN | AT91_RTC_MTHEN);
if (alrm->enabled) {
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
}
pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -188,10 +194,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
pr_debug("%s(): cmd=%08x\n", __func__, enabled);
if (enabled) {
- at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
- at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
} else
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
return 0;
}
@@ -200,7 +206,7 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned long imr = at91_sys_read(AT91_RTC_IMR);
+ unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
seq_printf(seq, "update_IRQ\t: %s\n",
(imr & AT91_RTC_ACKUPD) ? "yes" : "no");
@@ -220,7 +226,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_sys_read(AT91_RTC_SR) & at91_sys_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
@@ -229,7 +235,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
if (rtsr & AT91_RTC_ACKUPD)
complete(&at91_rtc_updated);
- at91_sys_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
+ at91_rtc_write(AT91_RTC_SCCR, rtsr); /* clear status reg */
rtc_update_irq(rtc, 1, events);
@@ -256,22 +262,41 @@ static const struct rtc_class_ops at91_rtc_ops = {
static int __init at91_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
- int ret;
+ struct resource *regs;
+ int ret = 0;
- at91_sys_write(AT91_RTC_CR, 0);
- at91_sys_write(AT91_RTC_MR, 0); /* 24 hour mode */
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs) {
+ dev_err(&pdev->dev, "no mmio resource defined\n");
+ return -ENXIO;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource defined\n");
+ return -ENXIO;
+ }
+
+ at91_rtc_regs = ioremap(regs->start, resource_size(regs));
+ if (!at91_rtc_regs) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ return -ENOMEM;
+ }
+
+ at91_rtc_write(AT91_RTC_CR, 0);
+ at91_rtc_write(AT91_RTC_MR, 0); /* 24 hour mode */
/* Disable all interrupts */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt,
+ ret = request_irq(irq, at91_rtc_interrupt,
IRQF_SHARED,
"at91_rtc", pdev);
if (ret) {
printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n",
- AT91_ID_SYS);
+ irq);
return ret;
}
@@ -284,7 +309,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
rtc = rtc_device_register(pdev->name, &pdev->dev,
&at91_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
- free_irq(AT91_ID_SYS, pdev);
+ free_irq(irq, pdev);
return PTR_ERR(rtc);
}
platform_set_drvdata(pdev, rtc);
@@ -301,10 +326,10 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
struct rtc_device *rtc = platform_get_drvdata(pdev);
/* Disable all interrupts */
- at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
+ at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
- free_irq(AT91_ID_SYS, pdev);
+ free_irq(irq, pdev);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
@@ -323,13 +348,13 @@ static int at91_rtc_suspend(struct device *dev)
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_sys_read(AT91_RTC_IMR)
+ at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
& (AT91_RTC_ALARM|AT91_RTC_SECEV);
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
- enable_irq_wake(AT91_ID_SYS);
+ enable_irq_wake(irq);
else
- at91_sys_write(AT91_RTC_IDR, at91_rtc_imr);
+ at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
}
return 0;
}
@@ -338,9 +363,9 @@ static int at91_rtc_resume(struct device *dev)
{
if (at91_rtc_imr) {
if (device_may_wakeup(dev))
- disable_irq_wake(AT91_ID_SYS);
+ disable_irq_wake(irq);
else
- at91_sys_write(AT91_RTC_IER, at91_rtc_imr);
+ at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
}
return 0;
}
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index a3ad957..ee3c122 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -307,8 +307,12 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 1);
platform_set_drvdata(pdev, rtc);
- rtc->rtt = (void __force __iomem *) (AT91_VA_BASE_SYS - AT91_BASE_SYS);
- rtc->rtt += r->start;
+ rtc->rtt = ioremap(r->start, resource_size(r));
+ if (!rtc->rtt) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
mr = rtt_readl(rtc, MR);
@@ -326,7 +330,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
&at91_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc->rtcdev)) {
ret = PTR_ERR(rtc->rtcdev);
- goto fail;
+ goto fail_register;
}
/* register irq handler after we know what name we'll use */
@@ -351,6 +355,8 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
return 0;
+fail_register:
+ iounmap(rtc->rtt);
fail:
platform_set_drvdata(pdev, NULL);
kfree(rtc);
@@ -371,6 +377,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
rtc_device_unregister(rtc->rtcdev);
+ iounmap(rtc->rtt);
platform_set_drvdata(pdev, NULL);
kfree(rtc);
return 0;
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index 90d8662..abfc1a0 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -456,18 +456,7 @@ static struct platform_driver bfin_rtc_driver = {
.resume = bfin_rtc_resume,
};
-static int __init bfin_rtc_init(void)
-{
- return platform_driver_register(&bfin_rtc_driver);
-}
-
-static void __exit bfin_rtc_exit(void)
-{
- platform_driver_unregister(&bfin_rtc_driver);
-}
-
-module_init(bfin_rtc_init);
-module_exit(bfin_rtc_exit);
+module_platform_driver(bfin_rtc_driver);
MODULE_DESCRIPTION("Blackfin On-Chip Real Time Clock Driver");
MODULE_AUTHOR("Mike Frysinger <vapier@gentoo.org>");
diff --git a/drivers/rtc/rtc-bq4802.c b/drivers/rtc/rtc-bq4802.c
index 128270c..bf612ef 100644
--- a/drivers/rtc/rtc-bq4802.c
+++ b/drivers/rtc/rtc-bq4802.c
@@ -218,15 +218,4 @@ static struct platform_driver bq4802_driver = {
.remove = __devexit_p(bq4802_remove),
};
-static int __init bq4802_init(void)
-{
- return platform_driver_register(&bq4802_driver);
-}
-
-static void __exit bq4802_exit(void)
-{
- platform_driver_unregister(&bq4802_driver);
-}
-
-module_init(bq4802_init);
-module_exit(bq4802_exit);
+module_platform_driver(bq4802_driver);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 05beb6c..d7782aa 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -164,7 +164,7 @@ static inline unsigned char cmos_read_bank2(unsigned char addr)
static inline void cmos_write_bank2(unsigned char val, unsigned char addr)
{
outb(addr, RTC_PORT(2));
- outb(val, RTC_PORT(2));
+ outb(val, RTC_PORT(3));
}
#else
diff --git a/drivers/rtc/rtc-dm355evm.c b/drivers/rtc/rtc-dm355evm.c
index 2322c43..d4457af 100644
--- a/drivers/rtc/rtc-dm355evm.c
+++ b/drivers/rtc/rtc-dm355evm.c
@@ -161,16 +161,6 @@ static struct platform_driver rtc_dm355evm_driver = {
},
};
-static int __init dm355evm_rtc_init(void)
-{
- return platform_driver_register(&rtc_dm355evm_driver);
-}
-module_init(dm355evm_rtc_init);
-
-static void __exit dm355evm_rtc_exit(void)
-{
- platform_driver_unregister(&rtc_dm355evm_driver);
-}
-module_exit(dm355evm_rtc_exit);
+module_platform_driver(rtc_dm355evm_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index 68e6caf..990c3ff 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -396,21 +396,10 @@ static struct platform_driver ds1286_platform_driver = {
.remove = __devexit_p(ds1286_remove),
};
-static int __init ds1286_init(void)
-{
- return platform_driver_register(&ds1286_platform_driver);
-}
-
-static void __exit ds1286_exit(void)
-{
- platform_driver_unregister(&ds1286_platform_driver);
-}
+module_platform_driver(ds1286_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("DS1286 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-ds1286");
-
-module_init(ds1286_init);
-module_exit(ds1286_exit);
diff --git a/drivers/rtc/rtc-ds1511.c b/drivers/rtc/rtc-ds1511.c
index 586c244..761f36b 100644
--- a/drivers/rtc/rtc-ds1511.c
+++ b/drivers/rtc/rtc-ds1511.c
@@ -580,20 +580,7 @@ static struct platform_driver ds1511_rtc_driver = {
},
};
- static int __init
-ds1511_rtc_init(void)
-{
- return platform_driver_register(&ds1511_rtc_driver);
-}
-
- static void __exit
-ds1511_rtc_exit(void)
-{
- platform_driver_unregister(&ds1511_rtc_driver);
-}
-
-module_init(ds1511_rtc_init);
-module_exit(ds1511_rtc_exit);
+module_platform_driver(ds1511_rtc_driver);
MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>");
MODULE_DESCRIPTION("Dallas DS1511 RTC driver");
diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c
index 1350029..6f0a1b5 100644
--- a/drivers/rtc/rtc-ds1553.c
+++ b/drivers/rtc/rtc-ds1553.c
@@ -361,18 +361,7 @@ static struct platform_driver ds1553_rtc_driver = {
},
};
-static __init int ds1553_init(void)
-{
- return platform_driver_register(&ds1553_rtc_driver);
-}
-
-static __exit void ds1553_exit(void)
-{
- platform_driver_unregister(&ds1553_rtc_driver);
-}
-
-module_init(ds1553_init);
-module_exit(ds1553_exit);
+module_platform_driver(ds1553_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1553 RTC driver");
diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c
index e3e0f92..7611266 100644
--- a/drivers/rtc/rtc-ds1742.c
+++ b/drivers/rtc/rtc-ds1742.c
@@ -240,18 +240,7 @@ static struct platform_driver ds1742_rtc_driver = {
},
};
-static __init int ds1742_init(void)
-{
- return platform_driver_register(&ds1742_rtc_driver);
-}
-
-static __exit void ds1742_exit(void)
-{
- platform_driver_unregister(&ds1742_rtc_driver);
-}
-
-module_init(ds1742_init);
-module_exit(ds1742_exit);
+module_platform_driver(ds1742_rtc_driver);
MODULE_AUTHOR("Atsushi Nemoto <anemo@mba.ocn.ne.jp>");
MODULE_DESCRIPTION("Dallas DS1742 RTC driver");
diff --git a/drivers/rtc/rtc-jz4740.c b/drivers/rtc/rtc-jz4740.c
index b647363..05ab227 100644
--- a/drivers/rtc/rtc-jz4740.c
+++ b/drivers/rtc/rtc-jz4740.c
@@ -345,7 +345,7 @@ static const struct dev_pm_ops jz4740_pm_ops = {
#define JZ4740_RTC_PM_OPS NULL
#endif /* CONFIG_PM */
-struct platform_driver jz4740_rtc_driver = {
+static struct platform_driver jz4740_rtc_driver = {
.probe = jz4740_rtc_probe,
.remove = __devexit_p(jz4740_rtc_remove),
.driver = {
@@ -355,17 +355,7 @@ struct platform_driver jz4740_rtc_driver = {
},
};
-static int __init jz4740_rtc_init(void)
-{
- return platform_driver_register(&jz4740_rtc_driver);
-}
-module_init(jz4740_rtc_init);
-
-static void __exit jz4740_rtc_exit(void)
-{
- platform_driver_unregister(&jz4740_rtc_driver);
-}
-module_exit(jz4740_rtc_exit);
+module_platform_driver(jz4740_rtc_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_LICENSE("GPL");
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index ae16250..ecc1713 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -396,17 +396,7 @@ static struct platform_driver lpc32xx_rtc_driver = {
},
};
-static int __init lpc32xx_rtc_init(void)
-{
- return platform_driver_register(&lpc32xx_rtc_driver);
-}
-module_init(lpc32xx_rtc_init);
-
-static void __exit lpc32xx_rtc_exit(void)
-{
- platform_driver_unregister(&lpc32xx_rtc_driver);
-}
-module_exit(lpc32xx_rtc_exit);
+module_platform_driver(lpc32xx_rtc_driver);
MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com");
MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC");
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 7317d3b..ef71132 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -200,7 +200,6 @@ static int __devexit m41t93_remove(struct spi_device *spi)
static struct spi_driver m41t93_driver = {
.driver = {
.name = "rtc-m41t93",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = m41t93_probe,
diff --git a/drivers/rtc/rtc-m41t94.c b/drivers/rtc/rtc-m41t94.c
index e259ed7..2a4721f 100644
--- a/drivers/rtc/rtc-m41t94.c
+++ b/drivers/rtc/rtc-m41t94.c
@@ -147,7 +147,6 @@ static int __devexit m41t94_remove(struct spi_device *spi)
static struct spi_driver m41t94_driver = {
.driver = {
.name = "rtc-m41t94",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = m41t94_probe,
diff --git a/drivers/rtc/rtc-m48t35.c b/drivers/rtc/rtc-m48t35.c
index 8e2a24e..f9e3b35 100644
--- a/drivers/rtc/rtc-m48t35.c
+++ b/drivers/rtc/rtc-m48t35.c
@@ -216,21 +216,10 @@ static struct platform_driver m48t35_platform_driver = {
.remove = __devexit_p(m48t35_remove),
};
-static int __init m48t35_init(void)
-{
- return platform_driver_register(&m48t35_platform_driver);
-}
-
-static void __exit m48t35_exit(void)
-{
- platform_driver_unregister(&m48t35_platform_driver);
-}
+module_platform_driver(m48t35_platform_driver);
MODULE_AUTHOR("Thomas Bogendoerfer <tsbogend@alpha.franken.de>");
MODULE_DESCRIPTION("M48T35 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t35");
-
-module_init(m48t35_init);
-module_exit(m48t35_exit);
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index 2836538..30ebfec 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -530,18 +530,7 @@ static struct platform_driver m48t59_rtc_driver = {
.remove = __devexit_p(m48t59_rtc_remove),
};
-static int __init m48t59_rtc_init(void)
-{
- return platform_driver_register(&m48t59_rtc_driver);
-}
-
-static void __exit m48t59_rtc_exit(void)
-{
- platform_driver_unregister(&m48t59_rtc_driver);
-}
-
-module_init(m48t59_rtc_init);
-module_exit(m48t59_rtc_exit);
+module_platform_driver(m48t59_rtc_driver);
MODULE_AUTHOR("Mark Zhan <rongkai.zhan@windriver.com>");
MODULE_DESCRIPTION("M48T59/M48T02/M48T08 RTC driver");
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f981287..863fb33 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -185,21 +185,10 @@ static struct platform_driver m48t86_rtc_platform_driver = {
.remove = __devexit_p(m48t86_rtc_remove),
};
-static int __init m48t86_rtc_init(void)
-{
- return platform_driver_register(&m48t86_rtc_platform_driver);
-}
-
-static void __exit m48t86_rtc_exit(void)
-{
- platform_driver_unregister(&m48t86_rtc_platform_driver);
-}
+module_platform_driver(m48t86_rtc_platform_driver);
MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
MODULE_DESCRIPTION("M48T86 RTC driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:rtc-m48t86");
-
-module_init(m48t86_rtc_init);
-module_exit(m48t86_rtc_exit);
diff --git a/drivers/rtc/rtc-max6902.c b/drivers/rtc/rtc-max6902.c
index 0ec3f58..1f6b3cc 100644
--- a/drivers/rtc/rtc-max6902.c
+++ b/drivers/rtc/rtc-max6902.c
@@ -154,7 +154,6 @@ static int __devexit max6902_remove(struct spi_device *spi)
static struct spi_driver max6902_driver = {
.driver = {
.name = "rtc-max6902",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max6902_probe,
diff --git a/drivers/rtc/rtc-max8925.c b/drivers/rtc/rtc-max8925.c
index 3bc046f..2d71943 100644
--- a/drivers/rtc/rtc-max8925.c
+++ b/drivers/rtc/rtc-max8925.c
@@ -261,6 +261,8 @@ static int __devinit max8925_rtc_probe(struct platform_device *pdev)
/* XXX - isn't this redundant? */
platform_set_drvdata(pdev, info);
+ device_init_wakeup(&pdev->dev, 1);
+
info->rtc_dev = rtc_device_register("max8925-rtc", &pdev->dev,
&max8925_rtc_ops, THIS_MODULE);
ret = PTR_ERR(info->rtc_dev);
@@ -290,26 +292,40 @@ static int __devexit max8925_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int max8925_rtc_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+
+ if (device_may_wakeup(dev))
+ chip->wakeup_flag |= 1 << MAX8925_IRQ_RTC_ALARM0;
+ return 0;
+}
+static int max8925_rtc_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
+
+ if (device_may_wakeup(dev))
+ chip->wakeup_flag &= ~(1 << MAX8925_IRQ_RTC_ALARM0);
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(max8925_rtc_pm_ops, max8925_rtc_suspend, max8925_rtc_resume);
+
static struct platform_driver max8925_rtc_driver = {
.driver = {
.name = "max8925-rtc",
.owner = THIS_MODULE,
+ .pm = &max8925_rtc_pm_ops,
},
.probe = max8925_rtc_probe,
.remove = __devexit_p(max8925_rtc_remove),
};
-static int __init max8925_rtc_init(void)
-{
- return platform_driver_register(&max8925_rtc_driver);
-}
-module_init(max8925_rtc_init);
-
-static void __exit max8925_rtc_exit(void)
-{
- platform_driver_unregister(&max8925_rtc_driver);
-}
-module_exit(max8925_rtc_exit);
+module_platform_driver(max8925_rtc_driver);
MODULE_DESCRIPTION("Maxim MAX8925 RTC driver");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index 2e48aa6..7196f43 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -327,17 +327,7 @@ static struct platform_driver max8998_rtc_driver = {
.id_table = max8998_rtc_id,
};
-static int __init max8998_rtc_init(void)
-{
- return platform_driver_register(&max8998_rtc_driver);
-}
-module_init(max8998_rtc_init);
-
-static void __exit max8998_rtc_exit(void)
-{
- platform_driver_unregister(&max8998_rtc_driver);
-}
-module_exit(max8998_rtc_exit);
+module_platform_driver(max8998_rtc_driver);
MODULE_AUTHOR("Minkyu Kang <mk7.kang@samsung.com>");
MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
diff --git a/drivers/rtc/rtc-mc13xxx.c b/drivers/rtc/rtc-mc13xxx.c
index 9d0c3b4..546f685 100644
--- a/drivers/rtc/rtc-mc13xxx.c
+++ b/drivers/rtc/rtc-mc13xxx.c
@@ -399,7 +399,7 @@ static int __exit mc13xxx_rtc_remove(struct platform_device *pdev)
return 0;
}
-const struct platform_device_id mc13xxx_rtc_idtable[] = {
+static const struct platform_device_id mc13xxx_rtc_idtable[] = {
{
.name = "mc13783-rtc",
}, {
diff --git a/drivers/rtc/rtc-mpc5121.c b/drivers/rtc/rtc-mpc5121.c
index da60915..9d3cacc 100644
--- a/drivers/rtc/rtc-mpc5121.c
+++ b/drivers/rtc/rtc-mpc5121.c
@@ -418,17 +418,7 @@ static struct platform_driver mpc5121_rtc_driver = {
.remove = __devexit_p(mpc5121_rtc_remove),
};
-static int __init mpc5121_rtc_init(void)
-{
- return platform_driver_register(&mpc5121_rtc_driver);
-}
-module_init(mpc5121_rtc_init);
-
-static void __exit mpc5121_rtc_exit(void)
-{
- platform_driver_unregister(&mpc5121_rtc_driver);
-}
-module_exit(mpc5121_rtc_exit);
+module_platform_driver(mpc5121_rtc_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("John Rigby <jcrigby@gmail.com>");
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bb21f44..6cd6c72 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -537,18 +537,7 @@ static struct platform_driver vrtc_mrst_platform_driver = {
}
};
-static int __init vrtc_mrst_init(void)
-{
- return platform_driver_register(&vrtc_mrst_platform_driver);
-}
-
-static void __exit vrtc_mrst_exit(void)
-{
- platform_driver_unregister(&vrtc_mrst_platform_driver);
-}
-
-module_init(vrtc_mrst_init);
-module_exit(vrtc_mrst_exit);
+module_platform_driver(vrtc_mrst_platform_driver);
MODULE_AUTHOR("Jacob Pan; Feng Tang");
MODULE_DESCRIPTION("Driver for Moorestown virtual RTC");
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 39e41fb..5e1d64e 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -155,7 +155,6 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
{
struct rtc_time alarm_tm, now_tm;
unsigned long now, time;
- int ret;
struct platform_device *pdev = to_platform_device(dev);
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
void __iomem *ioaddr = pdata->ioaddr;
@@ -168,21 +167,33 @@ static int rtc_update_alarm(struct device *dev, struct rtc_time *alrm)
alarm_tm.tm_hour = alrm->tm_hour;
alarm_tm.tm_min = alrm->tm_min;
alarm_tm.tm_sec = alrm->tm_sec;
- rtc_tm_to_time(&now_tm, &now);
rtc_tm_to_time(&alarm_tm, &time);
- if (time < now) {
- time += 60 * 60 * 24;
- rtc_time_to_tm(time, &alarm_tm);
- }
-
- ret = rtc_tm_to_time(&alarm_tm, &time);
-
/* clear all the interrupt status bits */
writew(readw(ioaddr + RTC_RTCISR), ioaddr + RTC_RTCISR);
set_alarm_or_time(dev, MXC_RTC_ALARM, time);
- return ret;
+ return 0;
+}
+
+static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
+ unsigned int enabled)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
+ void __iomem *ioaddr = pdata->ioaddr;
+ u32 reg;
+
+ spin_lock_irq(&pdata->rtc->irq_lock);
+ reg = readw(ioaddr + RTC_RTCIENR);
+
+ if (enabled)
+ reg |= bit;
+ else
+ reg &= ~bit;
+
+ writew(reg, ioaddr + RTC_RTCIENR);
+ spin_unlock_irq(&pdata->rtc->irq_lock);
}
/* This function is the RTC interrupt service routine. */
@@ -199,13 +210,12 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
/* clear interrupt sources */
writew(status, ioaddr + RTC_RTCISR);
- /* clear alarm interrupt if it has occurred */
- if (status & RTC_ALM_BIT)
- status &= ~RTC_ALM_BIT;
-
/* update irq data & counter */
- if (status & RTC_ALM_BIT)
+ if (status & RTC_ALM_BIT) {
events |= (RTC_AF | RTC_IRQF);
+ /* RTC alarm should be one-shot */
+ mxc_rtc_irq_enable(&pdev->dev, RTC_ALM_BIT, 0);
+ }
if (status & RTC_1HZ_BIT)
events |= (RTC_UF | RTC_IRQF);
@@ -213,9 +223,6 @@ static irqreturn_t mxc_rtc_interrupt(int irq, void *dev_id)
if (status & PIT_ALL_ON)
events |= (RTC_PF | RTC_IRQF);
- if ((status & RTC_ALM_BIT) && rtc_valid_tm(&pdata->g_rtc_alarm))
- rtc_update_alarm(&pdev->dev, &pdata->g_rtc_alarm);
-
rtc_update_irq(pdata->rtc, 1, events);
spin_unlock_irq(&pdata->rtc->irq_lock);
@@ -242,26 +249,6 @@ static void mxc_rtc_release(struct device *dev)
spin_unlock_irq(&pdata->rtc->irq_lock);
}
-static void mxc_rtc_irq_enable(struct device *dev, unsigned int bit,
- unsigned int enabled)
-{
- struct platform_device *pdev = to_platform_device(dev);
- struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
- void __iomem *ioaddr = pdata->ioaddr;
- u32 reg;
-
- spin_lock_irq(&pdata->rtc->irq_lock);
- reg = readw(ioaddr + RTC_RTCIENR);
-
- if (enabled)
- reg |= bit;
- else
- reg &= ~bit;
-
- writew(reg, ioaddr + RTC_RTCIENR);
- spin_unlock_irq(&pdata->rtc->irq_lock);
-}
-
static int mxc_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
mxc_rtc_irq_enable(dev, RTC_ALM_BIT, enabled);
@@ -290,6 +277,17 @@ static int mxc_rtc_read_time(struct device *dev, struct rtc_time *tm)
*/
static int mxc_rtc_set_mmss(struct device *dev, unsigned long time)
{
+ /*
+ * TTC_DAYR register is 9-bit in MX1 SoC, save time and day of year only
+ */
+ if (cpu_is_mx1()) {
+ struct rtc_time tm;
+
+ rtc_time_to_tm(time, &tm);
+ tm.tm_year = 70;
+ rtc_tm_to_time(&tm, &time);
+ }
+
/* Avoid roll-over from reading the different registers */
do {
set_alarm_or_time(dev, MXC_RTC_TIME, time);
@@ -324,21 +322,7 @@ static int mxc_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
int ret;
- if (rtc_valid_tm(&alrm->time)) {
- if (alrm->time.tm_sec > 59 ||
- alrm->time.tm_hour > 23 ||
- alrm->time.tm_min > 59)
- return -EINVAL;
-
- ret = rtc_update_alarm(dev, &alrm->time);
- } else {
- ret = rtc_valid_tm(&alrm->time);
- if (ret)
- return ret;
-
- ret = rtc_update_alarm(dev, &alrm->time);
- }
-
+ ret = rtc_update_alarm(dev, &alrm->time);
if (ret)
return ret;
@@ -424,6 +408,9 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
pdata->irq = -1;
}
+ if (pdata->irq >=0)
+ device_init_wakeup(&pdev->dev, 1);
+
rtc = rtc_device_register(pdev->name, &pdev->dev, &mxc_rtc_ops,
THIS_MODULE);
if (IS_ERR(rtc)) {
@@ -459,9 +446,39 @@ static int __exit mxc_rtc_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int mxc_rtc_suspend(struct device *dev)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ enable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static int mxc_rtc_resume(struct device *dev)
+{
+ struct rtc_plat_data *pdata = dev_get_drvdata(dev);
+
+ if (device_may_wakeup(dev))
+ disable_irq_wake(pdata->irq);
+
+ return 0;
+}
+
+static struct dev_pm_ops mxc_rtc_pm_ops = {
+ .suspend = mxc_rtc_suspend,
+ .resume = mxc_rtc_resume,
+};
+#endif
+
static struct platform_driver mxc_rtc_driver = {
.driver = {
.name = "mxc_rtc",
+#ifdef CONFIG_PM
+ .pm = &mxc_rtc_pm_ops,
+#endif
.owner = THIS_MODULE,
},
.remove = __exit_p(mxc_rtc_remove),
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index 2ee3bbf..b46c400 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -340,7 +340,6 @@ static int __devexit pcf2123_remove(struct spi_device *spi)
static struct spi_driver pcf2123_driver = {
.driver = {
.name = "rtc-pcf2123",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = pcf2123_probe,
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c
index 0c42389..a20202f 100644
--- a/drivers/rtc/rtc-pcf50633.c
+++ b/drivers/rtc/rtc-pcf50633.c
@@ -294,17 +294,7 @@ static struct platform_driver pcf50633_rtc_driver = {
.remove = __devexit_p(pcf50633_rtc_remove),
};
-static int __init pcf50633_rtc_init(void)
-{
- return platform_driver_register(&pcf50633_rtc_driver);
-}
-module_init(pcf50633_rtc_init);
-
-static void __exit pcf50633_rtc_exit(void)
-{
- platform_driver_unregister(&pcf50633_rtc_driver);
-}
-module_exit(pcf50633_rtc_exit);
+module_platform_driver(pcf50633_rtc_driver);
MODULE_DESCRIPTION("PCF50633 RTC driver");
MODULE_AUTHOR("Balaji Rao <balajirrao@openmoko.org>");
diff --git a/drivers/rtc/rtc-pl030.c b/drivers/rtc/rtc-pl030.c
index 1d28d44..02111fe 100644
--- a/drivers/rtc/rtc-pl030.c
+++ b/drivers/rtc/rtc-pl030.c
@@ -174,6 +174,8 @@ static struct amba_id pl030_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl030_ids);
+
static struct amba_driver pl030_driver = {
.drv = {
.name = "rtc-pl030",
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index ff1b84b..a952c8d 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -420,6 +420,8 @@ static struct amba_id pl031_ids[] = {
{0, 0},
};
+MODULE_DEVICE_TABLE(amba, pl031_ids);
+
static struct amba_driver pl031_driver = {
.drv = {
.name = "rtc-pl031",
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index d420e9d..9f1d6bc 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -532,17 +532,7 @@ static struct platform_driver pm8xxx_rtc_driver = {
},
};
-static int __init pm8xxx_rtc_init(void)
-{
- return platform_driver_register(&pm8xxx_rtc_driver);
-}
-module_init(pm8xxx_rtc_init);
-
-static void __exit pm8xxx_rtc_exit(void)
-{
- platform_driver_unregister(&pm8xxx_rtc_driver);
-}
-module_exit(pm8xxx_rtc_exit);
+module_platform_driver(pm8xxx_rtc_driver);
MODULE_ALIAS("platform:rtc-pm8xxx");
MODULE_DESCRIPTION("PMIC8xxx RTC driver");
diff --git a/drivers/rtc/rtc-puv3.c b/drivers/rtc/rtc-puv3.c
index e4b6880..ab0acae 100644
--- a/drivers/rtc/rtc-puv3.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -164,7 +164,7 @@ static int puv3_rtc_open(struct device *dev)
int ret;
ret = request_irq(puv3_rtc_alarmno, puv3_rtc_alarmirq,
- IRQF_DISABLED, "pkunity-rtc alarm", rtc_dev);
+ 0, "pkunity-rtc alarm", rtc_dev);
if (ret) {
dev_err(dev, "IRQ%d error %d\n", puv3_rtc_alarmno, ret);
@@ -172,7 +172,7 @@ static int puv3_rtc_open(struct device *dev)
}
ret = request_irq(puv3_rtc_tickno, puv3_rtc_tickirq,
- IRQF_DISABLED, "pkunity-rtc tick", rtc_dev);
+ 0, "pkunity-rtc tick", rtc_dev);
if (ret) {
dev_err(dev, "IRQ%d error %d\n", puv3_rtc_tickno, ret);
@@ -326,7 +326,7 @@ static int puv3_rtc_resume(struct platform_device *pdev)
#define puv3_rtc_resume NULL
#endif
-static struct platform_driver puv3_rtcdrv = {
+static struct platform_driver puv3_rtc_driver = {
.probe = puv3_rtc_probe,
.remove = __devexit_p(puv3_rtc_remove),
.suspend = puv3_rtc_suspend,
@@ -337,21 +337,7 @@ static struct platform_driver puv3_rtcdrv = {
}
};
-static char __initdata banner[] = "PKUnity-v3 RTC, (c) 2009 PKUnity Co.\n";
-
-static int __init puv3_rtc_init(void)
-{
- printk(banner);
- return platform_driver_register(&puv3_rtcdrv);
-}
-
-static void __exit puv3_rtc_exit(void)
-{
- platform_driver_unregister(&puv3_rtcdrv);
-}
-
-module_init(puv3_rtc_init);
-module_exit(puv3_rtc_exit);
+module_platform_driver(puv3_rtc_driver);
MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
MODULE_AUTHOR("Hu Dongliang");
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 9beba49c..2853c2a 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -125,6 +125,13 @@ static int __devinit r9701_probe(struct spi_device *spi)
unsigned char tmp;
int res;
+ tmp = R100CNT;
+ res = read_regs(&spi->dev, &tmp, 1);
+ if (res || tmp != 0x20) {
+ dev_err(&spi->dev, "cannot read RTC register\n");
+ return -ENODEV;
+ }
+
rtc = rtc_device_register("r9701",
&spi->dev, &r9701_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc))
@@ -132,13 +139,6 @@ static int __devinit r9701_probe(struct spi_device *spi)
dev_set_drvdata(&spi->dev, rtc);
- tmp = R100CNT;
- res = read_regs(&spi->dev, &tmp, 1);
- if (res || tmp != 0x20) {
- rtc_device_unregister(rtc);
- return res;
- }
-
return 0;
}
diff --git a/drivers/rtc/rtc-rs5c348.c b/drivers/rtc/rtc-rs5c348.c
index 971bc8e..ce2ca85 100644
--- a/drivers/rtc/rtc-rs5c348.c
+++ b/drivers/rtc/rtc-rs5c348.c
@@ -229,7 +229,6 @@ static int __devexit rs5c348_remove(struct spi_device *spi)
static struct spi_driver rs5c348_driver = {
.driver = {
.name = "rtc-rs5c348",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = rs5c348_probe,
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 5b979d9..aef40bd 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -25,6 +25,7 @@
#include <linux/clk.h>
#include <linux/log2.h>
#include <linux/slab.h>
+#include <linux/of.h>
#include <mach/hardware.h>
#include <asm/uaccess.h>
@@ -507,7 +508,13 @@ static int __devinit s3c_rtc_probe(struct platform_device *pdev)
goto err_nortc;
}
- s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node)
+ s3c_rtc_cpu_type = of_device_is_compatible(pdev->dev.of_node,
+ "samsung,s3c6410-rtc") ? TYPE_S3C64XX : TYPE_S3C2410;
+ else
+#endif
+ s3c_rtc_cpu_type = platform_get_device_id(pdev)->driver_data;
/* Check RTC Time */
@@ -629,6 +636,17 @@ static int s3c_rtc_resume(struct platform_device *pdev)
#define s3c_rtc_resume NULL
#endif
+#ifdef CONFIG_OF
+static const struct of_device_id s3c_rtc_dt_match[] = {
+ { .compatible = "samsung,s3c2410-rtc" },
+ { .compatible = "samsung,s3c6410-rtc" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c_rtc_dt_match);
+#else
+#define s3c_rtc_dt_match NULL
+#endif
+
static struct platform_device_id s3c_rtc_driver_ids[] = {
{
.name = "s3c2410-rtc",
@@ -651,24 +669,11 @@ static struct platform_driver s3c_rtc_driver = {
.driver = {
.name = "s3c-rtc",
.owner = THIS_MODULE,
+ .of_match_table = s3c_rtc_dt_match,
},
};
-static char __initdata banner[] = "S3C24XX RTC, (c) 2004,2006 Simtec Electronics\n";
-
-static int __init s3c_rtc_init(void)
-{
- printk(banner);
- return platform_driver_register(&s3c_rtc_driver);
-}
-
-static void __exit s3c_rtc_exit(void)
-{
- platform_driver_unregister(&s3c_rtc_driver);
-}
-
-module_init(s3c_rtc_init);
-module_exit(s3c_rtc_exit);
+module_platform_driver(s3c_rtc_driver);
MODULE_DESCRIPTION("Samsung S3C RTC Driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 0b40bb8..cb9a585 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -36,7 +36,6 @@
#ifdef CONFIG_ARCH_PXA
#include <mach/regs-rtc.h>
-#include <mach/regs-ost.h>
#endif
#define RTC_DEF_DIVIDER (32768 - 1)
@@ -188,8 +187,6 @@ static void sa1100_rtc_release(struct device *dev)
{
spin_lock_irq(&sa1100_rtc_lock);
RTSR = 0;
- OIER &= ~OIER_E1;
- OSSR = OSSR_M1;
spin_unlock_irq(&sa1100_rtc_lock);
free_irq(IRQ_RTCAlrm, dev);
@@ -369,18 +366,7 @@ static struct platform_driver sa1100_rtc_driver = {
},
};
-static int __init sa1100_rtc_init(void)
-{
- return platform_driver_register(&sa1100_rtc_driver);
-}
-
-static void __exit sa1100_rtc_exit(void)
-{
- platform_driver_unregister(&sa1100_rtc_driver);
-}
-
-module_init(sa1100_rtc_init);
-module_exit(sa1100_rtc_exit);
+module_platform_driver(sa1100_rtc_driver);
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
MODULE_DESCRIPTION("SA11x0/PXA2xx Realtime Clock Driver (RTC)");
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index 893bac2..19a28a6 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -516,17 +516,7 @@ static struct platform_driver spear_rtc_driver = {
},
};
-static int __init rtc_init(void)
-{
- return platform_driver_register(&spear_rtc_driver);
-}
-module_init(rtc_init);
-
-static void __exit rtc_exit(void)
-{
- platform_driver_unregister(&spear_rtc_driver);
-}
-module_exit(rtc_exit);
+module_platform_driver(spear_rtc_driver);
MODULE_ALIAS("platform:rtc-spear");
MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>");
diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c
index ed3e9b5..7621116 100644
--- a/drivers/rtc/rtc-stk17ta8.c
+++ b/drivers/rtc/rtc-stk17ta8.c
@@ -370,18 +370,7 @@ static struct platform_driver stk17ta8_rtc_driver = {
},
};
-static __init int stk17ta8_init(void)
-{
- return platform_driver_register(&stk17ta8_rtc_driver);
-}
-
-static __exit void stk17ta8_exit(void)
-{
- platform_driver_unregister(&stk17ta8_rtc_driver);
-}
-
-module_init(stk17ta8_init);
-module_exit(stk17ta8_exit);
+module_platform_driver(stk17ta8_rtc_driver);
MODULE_AUTHOR("Thomas Hommel <thomas.hommel@ge.com>");
MODULE_DESCRIPTION("Simtek STK17TA8 RTC driver");
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c
index 7315068..1028786 100644
--- a/drivers/rtc/rtc-stmp3xxx.c
+++ b/drivers/rtc/rtc-stmp3xxx.c
@@ -276,18 +276,7 @@ static struct platform_driver stmp3xxx_rtcdrv = {
},
};
-static int __init stmp3xxx_rtc_init(void)
-{
- return platform_driver_register(&stmp3xxx_rtcdrv);
-}
-
-static void __exit stmp3xxx_rtc_exit(void)
-{
- platform_driver_unregister(&stmp3xxx_rtcdrv);
-}
-
-module_init(stmp3xxx_rtc_init);
-module_exit(stmp3xxx_rtc_exit);
+module_platform_driver(stmp3xxx_rtcdrv);
MODULE_DESCRIPTION("STMP3xxx RTC Driver");
MODULE_AUTHOR("dmitry pervushin <dpervushin@embeddedalley.com> and "
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 20687d5..d43b4f6 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -550,6 +550,11 @@ static int twl_rtc_resume(struct platform_device *pdev)
#define twl_rtc_resume NULL
#endif
+static const struct of_device_id twl_rtc_of_match[] = {
+ {.compatible = "ti,twl4030-rtc", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, twl_rtc_of_match);
MODULE_ALIAS("platform:twl_rtc");
static struct platform_driver twl4030rtc_driver = {
@@ -559,8 +564,9 @@ static struct platform_driver twl4030rtc_driver = {
.suspend = twl_rtc_suspend,
.resume = twl_rtc_resume,
.driver = {
- .owner = THIS_MODULE,
- .name = "twl_rtc",
+ .owner = THIS_MODULE,
+ .name = "twl_rtc",
+ .of_match_table = twl_rtc_of_match,
},
};
diff --git a/drivers/rtc/rtc-v3020.c b/drivers/rtc/rtc-v3020.c
index f71c3ce..bca5d67 100644
--- a/drivers/rtc/rtc-v3020.c
+++ b/drivers/rtc/rtc-v3020.c
@@ -393,18 +393,7 @@ static struct platform_driver rtc_device_driver = {
},
};
-static __init int v3020_init(void)
-{
- return platform_driver_register(&rtc_device_driver);
-}
-
-static __exit void v3020_exit(void)
-{
- platform_driver_unregister(&rtc_device_driver);
-}
-
-module_init(v3020_init);
-module_exit(v3020_exit);
+module_platform_driver(rtc_device_driver);
MODULE_DESCRIPTION("V3020 RTC");
MODULE_AUTHOR("Raphael Assenat");
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c5698cd..fcbfdda 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -405,15 +405,4 @@ static struct platform_driver rtc_platform_driver = {
},
};
-static int __init vr41xx_rtc_init(void)
-{
- return platform_driver_register(&rtc_platform_driver);
-}
-
-static void __exit vr41xx_rtc_exit(void)
-{
- platform_driver_unregister(&rtc_platform_driver);
-}
-
-module_init(vr41xx_rtc_init);
-module_exit(vr41xx_rtc_exit);
+module_platform_driver(rtc_platform_driver);
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c
index f93f412..9e94fb1 100644
--- a/drivers/rtc/rtc-vt8500.c
+++ b/drivers/rtc/rtc-vt8500.c
@@ -311,17 +311,7 @@ static struct platform_driver vt8500_rtc_driver = {
},
};
-static int __init vt8500_rtc_init(void)
-{
- return platform_driver_register(&vt8500_rtc_driver);
-}
-module_init(vt8500_rtc_init);
-
-static void __exit vt8500_rtc_exit(void)
-{
- platform_driver_unregister(&vt8500_rtc_driver);
-}
-module_exit(vt8500_rtc_exit);
+module_platform_driver(vt8500_rtc_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)");
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index bdc909b..3b6e6a6 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -324,15 +324,6 @@ static irqreturn_t wm831x_alm_irq(int irq, void *data)
return IRQ_HANDLED;
}
-static irqreturn_t wm831x_per_irq(int irq, void *data)
-{
- struct wm831x_rtc *wm831x_rtc = data;
-
- rtc_update_irq(wm831x_rtc->rtc, 1, RTC_IRQF | RTC_UF);
-
- return IRQ_HANDLED;
-}
-
static const struct rtc_class_ops wm831x_rtc_ops = {
.read_time = wm831x_rtc_readtime,
.set_mmss = wm831x_rtc_set_mmss,
@@ -405,11 +396,10 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
{
struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
struct wm831x_rtc *wm831x_rtc;
- int per_irq = platform_get_irq_byname(pdev, "PER");
int alm_irq = platform_get_irq_byname(pdev, "ALM");
int ret = 0;
- wm831x_rtc = kzalloc(sizeof(*wm831x_rtc), GFP_KERNEL);
+ wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
if (wm831x_rtc == NULL)
return -ENOMEM;
@@ -433,14 +423,6 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
goto err;
}
- ret = request_threaded_irq(per_irq, NULL, wm831x_per_irq,
- IRQF_TRIGGER_RISING, "RTC period",
- wm831x_rtc);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to request periodic IRQ %d: %d\n",
- per_irq, ret);
- }
-
ret = request_threaded_irq(alm_irq, NULL, wm831x_alm_irq,
IRQF_TRIGGER_RISING, "RTC alarm",
wm831x_rtc);
@@ -452,20 +434,16 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
return 0;
err:
- kfree(wm831x_rtc);
return ret;
}
static int __devexit wm831x_rtc_remove(struct platform_device *pdev)
{
struct wm831x_rtc *wm831x_rtc = platform_get_drvdata(pdev);
- int per_irq = platform_get_irq_byname(pdev, "PER");
int alm_irq = platform_get_irq_byname(pdev, "ALM");
free_irq(alm_irq, wm831x_rtc);
- free_irq(per_irq, wm831x_rtc);
rtc_device_unregister(wm831x_rtc->rtc);
- kfree(wm831x_rtc);
return 0;
}
@@ -490,17 +468,7 @@ static struct platform_driver wm831x_rtc_driver = {
},
};
-static int __init wm831x_rtc_init(void)
-{
- return platform_driver_register(&wm831x_rtc_driver);
-}
-module_init(wm831x_rtc_init);
-
-static void __exit wm831x_rtc_exit(void)
-{
- platform_driver_unregister(&wm831x_rtc_driver);
-}
-module_exit(wm831x_rtc_exit);
+module_platform_driver(wm831x_rtc_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("RTC driver for the WM831x series PMICs");
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index 6642142..c2e52d1 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -486,17 +486,7 @@ static struct platform_driver wm8350_rtc_driver = {
},
};
-static int __init wm8350_rtc_init(void)
-{
- return platform_driver_register(&wm8350_rtc_driver);
-}
-module_init(wm8350_rtc_init);
-
-static void __exit wm8350_rtc_exit(void)
-{
- platform_driver_unregister(&wm8350_rtc_driver);
-}
-module_exit(wm8350_rtc_exit);
+module_platform_driver(wm8350_rtc_driver);
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_DESCRIPTION("RTC driver for the WM8350");
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 65894f0..110137e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -17,7 +17,6 @@
#include <linux/ctype.h>
#include <linux/major.h>
#include <linux/slab.h>
-#include <linux/buffer_head.h>
#include <linux/hdreg.h>
#include <linux/async.h>
#include <linux/mutex.h>
@@ -1073,7 +1072,7 @@ static const struct file_operations dasd_stats_global_fops = {
static void dasd_profile_init(struct dasd_profile *profile,
struct dentry *base_dentry)
{
- mode_t mode;
+ umode_t mode;
struct dentry *pde;
if (!base_dentry)
@@ -1112,7 +1111,7 @@ static void dasd_statistics_removeroot(void)
static void dasd_statistics_createroot(void)
{
- mode_t mode;
+ umode_t mode;
struct dentry *pde;
dasd_debugfs_root_entry = NULL;
@@ -3262,6 +3261,12 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
device->path_data.tbvpm |= eventlpm;
dasd_schedule_device_bh(device);
}
+ if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
+ DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+ "Pathgroup re-established\n");
+ if (device->discipline->kick_validate)
+ device->discipline->kick_validate(device);
+ }
}
dasd_put_device(device);
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 87a0cf1..0326571 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -1718,7 +1718,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
erp->startdev = device;
erp->memdev = device;
erp->magic = default_erp->magic;
- erp->expires = 0;
+ erp->expires = default_erp->expires;
erp->retries = 256;
erp->buildclk = get_clock();
erp->status = DASD_CQR_FILLED;
@@ -2363,7 +2363,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->memdev = device;
erp->block = cqr->block;
erp->magic = cqr->magic;
- erp->expires = 0;
+ erp->expires = cqr->expires;
erp->retries = 256;
erp->buildclk = get_clock();
erp->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index c388eda..b3beed5 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -189,14 +189,12 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
unsigned long flags;
struct alias_server *server, *newserver;
struct alias_lcu *lcu, *newlcu;
- int is_lcu_known;
struct dasd_uid uid;
private = (struct dasd_eckd_private *) device->private;
device->discipline->get_uid(device, &uid);
spin_lock_irqsave(&aliastree.lock, flags);
- is_lcu_known = 1;
server = _find_server(&uid);
if (!server) {
spin_unlock_irqrestore(&aliastree.lock, flags);
@@ -208,7 +206,6 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
if (!server) {
list_add(&newserver->server, &aliastree.serverlist);
server = newserver;
- is_lcu_known = 0;
} else {
/* someone was faster */
_free_server(newserver);
@@ -226,12 +223,10 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
if (!lcu) {
list_add(&newlcu->lcu, &server->lculist);
lcu = newlcu;
- is_lcu_known = 0;
} else {
/* someone was faster */
_free_lcu(newlcu);
}
- is_lcu_known = 0;
}
spin_lock(&lcu->lock);
list_add(&device->alias_list, &lcu->inactive_devices);
@@ -239,64 +234,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
spin_unlock(&lcu->lock);
spin_unlock_irqrestore(&aliastree.lock, flags);
- return is_lcu_known;
-}
-
-/*
- * The first device to be registered on an LCU will have to do
- * some additional setup steps to configure that LCU on the
- * storage server. All further devices should wait with their
- * initialization until the first device is done.
- * To synchronize this work, the first device will call
- * dasd_alias_lcu_setup_complete when it is done, and all
- * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
- */
-void dasd_alias_lcu_setup_complete(struct dasd_device *device)
-{
- unsigned long flags;
- struct alias_server *server;
- struct alias_lcu *lcu;
- struct dasd_uid uid;
-
- device->discipline->get_uid(device, &uid);
- lcu = NULL;
- spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(&uid);
- if (server)
- lcu = _find_lcu(server, &uid);
- spin_unlock_irqrestore(&aliastree.lock, flags);
- if (!lcu) {
- DBF_EVENT_DEVID(DBF_ERR, device->cdev,
- "could not find lcu for %04x %02x",
- uid.ssid, uid.real_unit_addr);
- WARN_ON(1);
- return;
- }
- complete_all(&lcu->lcu_setup);
-}
-
-void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
-{
- unsigned long flags;
- struct alias_server *server;
- struct alias_lcu *lcu;
- struct dasd_uid uid;
-
- device->discipline->get_uid(device, &uid);
- lcu = NULL;
- spin_lock_irqsave(&aliastree.lock, flags);
- server = _find_server(&uid);
- if (server)
- lcu = _find_lcu(server, &uid);
- spin_unlock_irqrestore(&aliastree.lock, flags);
- if (!lcu) {
- DBF_EVENT_DEVID(DBF_ERR, device->cdev,
- "could not find lcu for %04x %02x",
- uid.ssid, uid.real_unit_addr);
- WARN_ON(1);
- return;
- }
- wait_for_completion(&lcu->lcu_setup);
+ return 0;
}
/*
@@ -705,6 +643,16 @@ struct dasd_device *dasd_alias_get_start_dev(struct dasd_device *base_device)
if (lcu->pav == NO_PAV ||
lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
return NULL;
+ if (unlikely(!(private->features.feature[8] & 0x01))) {
+ /*
+ * PAV enabled but prefix not, very unlikely
+ * seems to be a lost pathgroup
+ * use base device to do IO
+ */
+ DBF_DEV_EVENT(DBF_ERR, base_device, "%s",
+ "Prefix not enabled with PAV enabled\n");
+ return NULL;
+ }
spin_lock_irqsave(&lcu->lock, flags);
alias_device = group->next;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 6ab2968..2617b1e 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -18,12 +18,12 @@
#include <linux/hdreg.h> /* HDIO_GETGEO */
#include <linux/bio.h>
#include <linux/module.h>
+#include <linux/compat.h>
#include <linux/init.h>
#include <asm/debug.h>
#include <asm/idals.h>
#include <asm/ebcdic.h>
-#include <asm/compat.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/cio.h>
@@ -752,24 +752,13 @@ dasd_eckd_cdl_reclen(int recid)
return sizes_trk0[recid];
return LABEL_SIZE;
}
-
-/*
- * Generate device unique id that specifies the physical device.
- */
-static int dasd_eckd_generate_uid(struct dasd_device *device)
+/* create unique id from private structure. */
+static void create_uid(struct dasd_eckd_private *private)
{
- struct dasd_eckd_private *private;
- struct dasd_uid *uid;
int count;
- unsigned long flags;
+ struct dasd_uid *uid;
- private = (struct dasd_eckd_private *) device->private;
- if (!private)
- return -ENODEV;
- if (!private->ned || !private->gneq)
- return -ENODEV;
uid = &private->uid;
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
memset(uid, 0, sizeof(struct dasd_uid));
memcpy(uid->vendor, private->ned->HDA_manufacturer,
sizeof(uid->vendor) - 1);
@@ -792,6 +781,23 @@ static int dasd_eckd_generate_uid(struct dasd_device *device)
private->vdsneq->uit[count]);
}
}
+}
+
+/*
+ * Generate device unique id that specifies the physical device.
+ */
+static int dasd_eckd_generate_uid(struct dasd_device *device)
+{
+ struct dasd_eckd_private *private;
+ unsigned long flags;
+
+ private = (struct dasd_eckd_private *) device->private;
+ if (!private)
+ return -ENODEV;
+ if (!private->ned || !private->gneq)
+ return -ENODEV;
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ create_uid(private);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
return 0;
}
@@ -811,6 +817,21 @@ static int dasd_eckd_get_uid(struct dasd_device *device, struct dasd_uid *uid)
return -EINVAL;
}
+/*
+ * compare device UID with data of a given dasd_eckd_private structure
+ * return 0 for match
+ */
+static int dasd_eckd_compare_path_uid(struct dasd_device *device,
+ struct dasd_eckd_private *private)
+{
+ struct dasd_uid device_uid;
+
+ create_uid(private);
+ dasd_eckd_get_uid(device, &device_uid);
+
+ return memcmp(&device_uid, &private->uid, sizeof(struct dasd_uid));
+}
+
static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr,
__u8 *rcd_buffer,
@@ -1005,59 +1026,120 @@ static int dasd_eckd_read_conf(struct dasd_device *device)
int conf_len, conf_data_saved;
int rc;
__u8 lpm, opm;
- struct dasd_eckd_private *private;
+ struct dasd_eckd_private *private, path_private;
struct dasd_path *path_data;
+ struct dasd_uid *uid;
+ char print_path_uid[60], print_device_uid[60];
private = (struct dasd_eckd_private *) device->private;
path_data = &device->path_data;
opm = ccw_device_get_path_mask(device->cdev);
- lpm = 0x80;
conf_data_saved = 0;
/* get configuration data per operational path */
for (lpm = 0x80; lpm; lpm>>= 1) {
- if (lpm & opm) {
- rc = dasd_eckd_read_conf_lpm(device, &conf_data,
- &conf_len, lpm);
- if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
- "Read configuration data returned "
- "error %d", rc);
- return rc;
- }
- if (conf_data == NULL) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "No configuration data "
- "retrieved");
- /* no further analysis possible */
- path_data->opm |= lpm;
- continue; /* no error */
+ if (!(lpm & opm))
+ continue;
+ rc = dasd_eckd_read_conf_lpm(device, &conf_data,
+ &conf_len, lpm);
+ if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data returned "
+ "error %d", rc);
+ return rc;
+ }
+ if (conf_data == NULL) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "No configuration data "
+ "retrieved");
+ /* no further analysis possible */
+ path_data->opm |= lpm;
+ continue; /* no error */
+ }
+ /* save first valid configuration data */
+ if (!conf_data_saved) {
+ kfree(private->conf_data);
+ private->conf_data = conf_data;
+ private->conf_len = conf_len;
+ if (dasd_eckd_identify_conf_parts(private)) {
+ private->conf_data = NULL;
+ private->conf_len = 0;
+ kfree(conf_data);
+ continue;
}
- /* save first valid configuration data */
- if (!conf_data_saved) {
- kfree(private->conf_data);
- private->conf_data = conf_data;
- private->conf_len = conf_len;
- if (dasd_eckd_identify_conf_parts(private)) {
- private->conf_data = NULL;
- private->conf_len = 0;
- kfree(conf_data);
- continue;
- }
- conf_data_saved++;
+ /*
+ * build device UID that other path data
+ * can be compared to it
+ */
+ dasd_eckd_generate_uid(device);
+ conf_data_saved++;
+ } else {
+ path_private.conf_data = conf_data;
+ path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(
+ &path_private)) {
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
+ kfree(conf_data);
+ continue;
}
- switch (dasd_eckd_path_access(conf_data, conf_len)) {
- case 0x02:
- path_data->npm |= lpm;
- break;
- case 0x03:
- path_data->ppm |= lpm;
- break;
+
+ if (dasd_eckd_compare_path_uid(
+ device, &path_private)) {
+ uid = &path_private.uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_path_uid,
+ sizeof(print_path_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_path_uid,
+ sizeof(print_path_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ uid = &private->uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_device_uid,
+ sizeof(print_device_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_device_uid,
+ sizeof(print_device_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ dev_err(&device->cdev->dev,
+ "Not all channel paths lead to "
+ "the same device, path %02X leads to "
+ "device %s instead of %s\n", lpm,
+ print_path_uid, print_device_uid);
+ return -EINVAL;
}
- path_data->opm |= lpm;
- if (conf_data != private->conf_data)
- kfree(conf_data);
+
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
+ }
+ switch (dasd_eckd_path_access(conf_data, conf_len)) {
+ case 0x02:
+ path_data->npm |= lpm;
+ break;
+ case 0x03:
+ path_data->ppm |= lpm;
+ break;
}
+ path_data->opm |= lpm;
+
+ if (conf_data != private->conf_data)
+ kfree(conf_data);
}
+
return 0;
}
@@ -1090,12 +1172,61 @@ static int verify_fcx_max_data(struct dasd_device *device, __u8 lpm)
return 0;
}
+static int rebuild_device_uid(struct dasd_device *device,
+ struct path_verification_work_data *data)
+{
+ struct dasd_eckd_private *private;
+ struct dasd_path *path_data;
+ __u8 lpm, opm;
+ int rc;
+
+ rc = -ENODEV;
+ private = (struct dasd_eckd_private *) device->private;
+ path_data = &device->path_data;
+ opm = device->path_data.opm;
+
+ for (lpm = 0x80; lpm; lpm >>= 1) {
+ if (!(lpm & opm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) /* -EOPNOTSUPP is ok */
+ continue;
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev,
+ "Read configuration data "
+ "returned error %d", rc);
+ break;
+ }
+ memcpy(private->conf_data, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ if (dasd_eckd_identify_conf_parts(private)) {
+ rc = -ENODEV;
+ } else /* first valid path is enough */
+ break;
+ }
+
+ if (!rc)
+ rc = dasd_eckd_generate_uid(device);
+
+ return rc;
+}
+
static void do_path_verification_work(struct work_struct *work)
{
struct path_verification_work_data *data;
struct dasd_device *device;
+ struct dasd_eckd_private path_private;
+ struct dasd_uid *uid;
+ __u8 path_rcd_buf[DASD_ECKD_RCD_DATA_SIZE];
__u8 lpm, opm, npm, ppm, epm;
unsigned long flags;
+ char print_uid[60];
int rc;
data = container_of(work, struct path_verification_work_data, worker);
@@ -1112,64 +1243,129 @@ static void do_path_verification_work(struct work_struct *work)
ppm = 0;
epm = 0;
for (lpm = 0x80; lpm; lpm >>= 1) {
- if (lpm & data->tbvpm) {
- memset(data->rcd_buffer, 0, sizeof(data->rcd_buffer));
- memset(&data->cqr, 0, sizeof(data->cqr));
- data->cqr.cpaddr = &data->ccw;
- rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
- data->rcd_buffer,
- lpm);
- if (!rc) {
- switch (dasd_eckd_path_access(data->rcd_buffer,
- DASD_ECKD_RCD_DATA_SIZE)) {
- case 0x02:
- npm |= lpm;
- break;
- case 0x03:
- ppm |= lpm;
- break;
- }
- opm |= lpm;
- } else if (rc == -EOPNOTSUPP) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
- "path verification: No configuration "
- "data retrieved");
- opm |= lpm;
- } else if (rc == -EAGAIN) {
- DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ if (!(lpm & data->tbvpm))
+ continue;
+ memset(&data->rcd_buffer, 0, sizeof(data->rcd_buffer));
+ memset(&data->cqr, 0, sizeof(data->cqr));
+ data->cqr.cpaddr = &data->ccw;
+ rc = dasd_eckd_read_conf_immediately(device, &data->cqr,
+ data->rcd_buffer,
+ lpm);
+ if (!rc) {
+ switch (dasd_eckd_path_access(data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE)
+ ) {
+ case 0x02:
+ npm |= lpm;
+ break;
+ case 0x03:
+ ppm |= lpm;
+ break;
+ }
+ opm |= lpm;
+ } else if (rc == -EOPNOTSUPP) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "path verification: No configuration "
+ "data retrieved");
+ opm |= lpm;
+ } else if (rc == -EAGAIN) {
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"path verification: device is stopped,"
" try again later");
- epm |= lpm;
- } else {
- dev_warn(&device->cdev->dev,
- "Reading device feature codes failed "
- "(rc=%d) for new path %x\n", rc, lpm);
- continue;
- }
- if (verify_fcx_max_data(device, lpm)) {
+ epm |= lpm;
+ } else {
+ dev_warn(&device->cdev->dev,
+ "Reading device feature codes failed "
+ "(rc=%d) for new path %x\n", rc, lpm);
+ continue;
+ }
+ if (verify_fcx_max_data(device, lpm)) {
+ opm &= ~lpm;
+ npm &= ~lpm;
+ ppm &= ~lpm;
+ continue;
+ }
+
+ /*
+ * save conf_data for comparison after
+ * rebuild_device_uid may have changed
+ * the original data
+ */
+ memcpy(&path_rcd_buf, data->rcd_buffer,
+ DASD_ECKD_RCD_DATA_SIZE);
+ path_private.conf_data = (void *) &path_rcd_buf;
+ path_private.conf_len = DASD_ECKD_RCD_DATA_SIZE;
+ if (dasd_eckd_identify_conf_parts(&path_private)) {
+ path_private.conf_data = NULL;
+ path_private.conf_len = 0;
+ continue;
+ }
+
+ /*
+ * compare path UID with device UID only if at least
+ * one valid path is left
+ * in other case the device UID may have changed and
+ * the first working path UID will be used as device UID
+ */
+ if (device->path_data.opm &&
+ dasd_eckd_compare_path_uid(device, &path_private)) {
+ /*
+ * the comparison was not successful
+ * rebuild the device UID with at least one
+ * known path in case a z/VM hyperswap command
+ * has changed the device
+ *
+ * after this compare again
+ *
+ * if either the rebuild or the recompare fails
+ * the path can not be used
+ */
+ if (rebuild_device_uid(device, data) ||
+ dasd_eckd_compare_path_uid(
+ device, &path_private)) {
+ uid = &path_private.uid;
+ if (strlen(uid->vduit) > 0)
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x.%s",
+ uid->vendor, uid->serial,
+ uid->ssid, uid->real_unit_addr,
+ uid->vduit);
+ else
+ snprintf(print_uid, sizeof(print_uid),
+ "%s.%s.%04x.%02x",
+ uid->vendor, uid->serial,
+ uid->ssid,
+ uid->real_unit_addr);
+ dev_err(&device->cdev->dev,
+ "The newly added channel path %02X "
+ "will not be used because it leads "
+ "to a different device %s\n",
+ lpm, print_uid);
opm &= ~lpm;
npm &= ~lpm;
ppm &= ~lpm;
+ continue;
}
}
+
+ /*
+ * There is a small chance that a path is lost again between
+ * above path verification and the following modification of
+ * the device opm mask. We could avoid that race here by using
+ * yet another path mask, but we rather deal with this unlikely
+ * situation in dasd_start_IO.
+ */
+ spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
+ if (!device->path_data.opm && opm) {
+ device->path_data.opm = opm;
+ dasd_generic_path_operational(device);
+ } else
+ device->path_data.opm |= opm;
+ device->path_data.npm |= npm;
+ device->path_data.ppm |= ppm;
+ device->path_data.tbvpm |= epm;
+ spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
}
- /*
- * There is a small chance that a path is lost again between
- * above path verification and the following modification of
- * the device opm mask. We could avoid that race here by using
- * yet another path mask, but we rather deal with this unlikely
- * situation in dasd_start_IO.
- */
- spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
- if (!device->path_data.opm && opm) {
- device->path_data.opm = opm;
- dasd_generic_path_operational(device);
- } else
- device->path_data.opm |= opm;
- device->path_data.npm |= npm;
- device->path_data.ppm |= ppm;
- device->path_data.tbvpm |= epm;
- spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
dasd_put_device(device);
if (data->isglobal)
@@ -1338,6 +1534,10 @@ static void dasd_eckd_validate_server(struct dasd_device *device)
struct dasd_eckd_private *private;
int enable_pav;
+ private = (struct dasd_eckd_private *) device->private;
+ if (private->uid.type == UA_BASE_PAV_ALIAS ||
+ private->uid.type == UA_HYPER_PAV_ALIAS)
+ return;
if (dasd_nopav || MACHINE_IS_VM)
enable_pav = 0;
else
@@ -1346,11 +1546,28 @@ static void dasd_eckd_validate_server(struct dasd_device *device)
/* may be requested feature is not available on server,
* therefore just report error and go ahead */
- private = (struct dasd_eckd_private *) device->private;
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
"returned rc=%d", private->uid.ssid, rc);
}
+/*
+ * worker to do a validate server in case of a lost pathgroup
+ */
+static void dasd_eckd_do_validate_server(struct work_struct *work)
+{
+ struct dasd_device *device = container_of(work, struct dasd_device,
+ kick_validate);
+ dasd_eckd_validate_server(device);
+ dasd_put_device(device);
+}
+
+static void dasd_eckd_kick_validate_server(struct dasd_device *device)
+{
+ dasd_get_device(device);
+ /* queue call to do_validate_server to the kernel event daemon. */
+ schedule_work(&device->kick_validate);
+}
+
static u32 get_fcx_max_data(struct dasd_device *device)
{
#if defined(CONFIG_64BIT)
@@ -1392,10 +1609,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
struct dasd_eckd_private *private;
struct dasd_block *block;
struct dasd_uid temp_uid;
- int is_known, rc, i;
+ int rc, i;
int readonly;
unsigned long value;
+ /* setup work queue for validate server*/
+ INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+
if (!ccw_device_is_pathgroup(device->cdev)) {
dev_warn(&device->cdev->dev,
"A channel path group could not be established\n");
@@ -1441,11 +1661,6 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
device->default_expires = value;
}
- /* Generate device unique id */
- rc = dasd_eckd_generate_uid(device);
- if (rc)
- goto out_err1;
-
dasd_eckd_get_uid(device, &temp_uid);
if (temp_uid.type == UA_BASE_DEVICE) {
block = dasd_alloc_block();
@@ -1460,22 +1675,12 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
block->base = device;
}
- /* register lcu with alias handling, enable PAV if this is a new lcu */
- is_known = dasd_alias_make_device_known_to_lcu(device);
- if (is_known < 0) {
- rc = is_known;
+ /* register lcu with alias handling, enable PAV */
+ rc = dasd_alias_make_device_known_to_lcu(device);
+ if (rc)
goto out_err2;
- }
- /*
- * dasd_eckd_validate_server is done on the first device that
- * is found for an LCU. All later other devices have to wait
- * for it, so they will read the correct feature codes.
- */
- if (!is_known) {
- dasd_eckd_validate_server(device);
- dasd_alias_lcu_setup_complete(device);
- } else
- dasd_alias_wait_for_lcu_setup(device);
+
+ dasd_eckd_validate_server(device);
/* device may report different configuration data after LCU setup */
rc = dasd_eckd_read_conf(device);
@@ -2206,7 +2411,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
- last_trk, cmd, startdev) == -EAGAIN) {
+ last_trk, cmd, basedev) == -EAGAIN) {
/* Clock not in sync and XRC is enabled.
* Try again later.
*/
@@ -3907,7 +4112,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
{
struct dasd_eckd_private *private;
struct dasd_eckd_characteristics temp_rdc_data;
- int is_known, rc;
+ int rc;
struct dasd_uid temp_uid;
unsigned long flags;
@@ -3930,14 +4135,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
goto out_err;
/* register lcu with alias handling, enable PAV if this is a new lcu */
- is_known = dasd_alias_make_device_known_to_lcu(device);
- if (is_known < 0)
- return is_known;
- if (!is_known) {
- dasd_eckd_validate_server(device);
- dasd_alias_lcu_setup_complete(device);
- } else
- dasd_alias_wait_for_lcu_setup(device);
+ rc = dasd_alias_make_device_known_to_lcu(device);
+ if (rc)
+ return rc;
+ dasd_eckd_validate_server(device);
/* RE-Read Configuration Data */
rc = dasd_eckd_read_conf(device);
@@ -4079,6 +4280,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.restore = dasd_eckd_restore_device,
.reload = dasd_eckd_reload_device,
.get_uid = dasd_eckd_get_uid,
+ .kick_validate = dasd_eckd_kick_validate_server,
};
static int __init
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index afe8c33..33a6743 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -355,6 +355,7 @@ struct dasd_discipline {
int (*reload) (struct dasd_device *);
int (*get_uid) (struct dasd_device *, struct dasd_uid *);
+ void (*kick_validate) (struct dasd_device *);
};
extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -455,6 +456,7 @@ struct dasd_device {
struct work_struct kick_work;
struct work_struct restore_device;
struct work_struct reload_device;
+ struct work_struct kick_validate;
struct timer_list timer;
debug_info_t *debug_area;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index f1a2016..792c69e 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -13,6 +13,7 @@
#define KMSG_COMPONENT "dasd"
#include <linux/interrupt.h>
+#include <linux/compat.h>
#include <linux/major.h>
#include <linux/fs.h>
#include <linux/blkpg.h>
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 98f3e4a..690c333 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -36,7 +36,7 @@
#include <linux/blkdev.h>
#include <linux/blkpg.h>
#include <linux/hdreg.h> /* HDIO_GETGEO */
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/bio.h>
#include <linux/suspend.h>
#include <linux/platform_device.h>
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 934458a..e71a50d 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -87,6 +87,7 @@ struct raw3215_info {
struct tty_struct *tty; /* pointer to tty structure if present */
struct raw3215_req *queued_read; /* pointer to queued read requests */
struct raw3215_req *queued_write;/* pointer to queued write requests */
+ struct tasklet_struct tlet; /* tasklet to invoke tty_wakeup */
wait_queue_head_t empty_wait; /* wait queue for flushing */
struct timer_list timer; /* timer for delayed output */
int line_pos; /* position on the line (for tabs) */
@@ -334,19 +335,23 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
}
/*
+ * Call tty_wakeup from tasklet context
+ */
+static void raw3215_wakeup(unsigned long data)
+{
+ struct raw3215_info *raw = (struct raw3215_info *) data;
+ tty_wakeup(raw->tty);
+}
+
+/*
* Try to start the next IO and wake up processes waiting on the tty.
*/
static void raw3215_next_io(struct raw3215_info *raw)
{
- struct tty_struct *tty;
-
raw3215_mk_write_req(raw);
raw3215_try_io(raw);
- tty = raw->tty;
- if (tty != NULL &&
- RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) {
- tty_wakeup(tty);
- }
+ if (raw->tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE)
+ tasklet_schedule(&raw->tlet);
}
/*
@@ -682,6 +687,7 @@ static int raw3215_probe (struct ccw_device *cdev)
return -ENOMEM;
}
init_waitqueue_head(&raw->empty_wait);
+ tasklet_init(&raw->tlet, raw3215_wakeup, (unsigned long) raw);
dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq;
@@ -901,6 +907,7 @@ static int __init con3215_init(void)
raw->flags |= RAW3215_FIXED;
init_waitqueue_head(&raw->empty_wait);
+ tasklet_init(&raw->tlet, raw3215_wakeup, (unsigned long) raw);
/* Request the console irq */
if (raw3215_startup(raw) != 0) {
@@ -966,6 +973,7 @@ static void tty3215_close(struct tty_struct *tty, struct file * filp)
tty->closing = 1;
/* Shutdown the terminal */
raw3215_shutdown(raw);
+ tasklet_kill(&raw->tlet);
tty->closing = 0;
raw->tty = NULL;
}
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index e712981..9117045 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -11,6 +11,7 @@
#include <linux/console.h>
#include <linux/init.h>
#include <linux/interrupt.h>
+#include <linux/compat.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/slab.h>
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index e5cb924..f3b8bb8 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -75,7 +75,7 @@ static LIST_HEAD(raw3270_devices);
static int raw3270_registered;
/* Module parameters */
-static int tubxcorrect = 0;
+static bool tubxcorrect = 0;
module_param(tubxcorrect, bool, 0);
/*
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 95b909a..3c03c10 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -11,7 +11,7 @@
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/cpu.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/workqueue.h>
#include <asm/smp.h>
@@ -31,14 +31,14 @@ static struct work_struct sclp_cpu_change_work;
static void sclp_cpu_capability_notify(struct work_struct *work)
{
int cpu;
- struct sys_device *sysdev;
+ struct device *dev;
s390_adjust_jiffies();
pr_warning("cpu capability changed.\n");
get_online_cpus();
for_each_online_cpu(cpu) {
- sysdev = get_cpu_sysdev(cpu);
- kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ dev = get_cpu_device(cpu);
+ kobject_uevent(&dev->kobj, KOBJ_CHANGE);
}
put_online_cpus();
}
diff --git a/drivers/s390/char/tape_class.h b/drivers/s390/char/tape_class.h
index 9e32780..ba2092f 100644
--- a/drivers/s390/char/tape_class.h
+++ b/drivers/s390/char/tape_class.h
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/major.h>
-#include <linux/kobj_map.h>
#include <linux/cdev.h>
#include <linux/device.h>
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c
index 75bde6a..89c03e6 100644
--- a/drivers/s390/char/vmcp.c
+++ b/drivers/s390/char/vmcp.c
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/init.h>
+#include <linux/compat.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
index 11312f4..2211277 100644
--- a/drivers/s390/char/vmwatchdog.c
+++ b/drivers/s390/char/vmwatchdog.c
@@ -28,9 +28,9 @@
#define MAX_CMDLEN 240
#define MIN_INTERVAL 15
static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
-static int vmwdt_conceal;
+static bool vmwdt_conceal;
-static int vmwdt_nowayout = WATCHDOG_NOWAYOUT;
+static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT;
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 0c87b0f..8f9a1a3 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -8,6 +8,7 @@
*/
#include <linux/slab.h>
+#include <linux/compat.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/uaccess.h>
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 2985eb4..204ca72 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -98,7 +98,7 @@ enum cmb_format {
* enum cmb_format.
*/
static int format = CMF_AUTODETECT;
-module_param(format, bool, 0444);
+module_param(format, bint, 0444);
/**
* struct cmb_operations - functions to use depending on cmb_format
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 3ef8d07..770a740 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -167,7 +167,7 @@ again:
DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
return 0;
}
@@ -215,7 +215,7 @@ again:
DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
- 0, -1, -1, q->irq_ptr->int_parm);
+ q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
return 0;
}
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 2acc01f..452989a 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -22,12 +22,9 @@
static struct kmem_cache *qdio_q_cache;
static struct kmem_cache *qdio_aob_cache;
-struct qaob *qdio_allocate_aob()
+struct qaob *qdio_allocate_aob(void)
{
- struct qaob *aob;
-
- aob = kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
- return aob;
+ return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
}
EXPORT_SYMBOL_GPL(qdio_allocate_aob);
@@ -180,7 +177,8 @@ static void setup_queues(struct qdio_irq *irq_ptr,
setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
q->is_input_q = 1;
- q->u.in.queue_start_poll = qdio_init->queue_start_poll[i];
+ q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
+ qdio_init->queue_start_poll_array[i] : NULL;
setup_storage_lists(q, irq_ptr, input_sbal_array, i);
input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c
index dd47378..077b7d1 100644
--- a/drivers/s390/crypto/zcrypt_pcixcc.c
+++ b/drivers/s390/crypto/zcrypt_pcixcc.c
@@ -56,11 +56,6 @@
#define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
#define PCIXCC_MAX_XCRB_MESSAGE_SIZE (12*1024)
-#define PCIXCC_MAX_XCRB_RESPONSE_SIZE PCIXCC_MAX_XCRB_MESSAGE_SIZE
-#define PCIXCC_MAX_XCRB_DATA_SIZE (11*1024)
-#define PCIXCC_MAX_XCRB_REPLY_SIZE (5*1024)
-
-#define PCIXCC_MAX_RESPONSE_SIZE PCIXCC_MAX_XCRB_RESPONSE_SIZE
#define PCIXCC_CLEANUP_TIME (15*HZ)
@@ -265,7 +260,7 @@ static int ICACRT_msg_to_type6CRT_msgX(struct zcrypt_device *zdev,
* @ap_msg: pointer to AP message
* @xcRB: pointer to user input data
*
- * Returns 0 on success or -EFAULT.
+ * Returns 0 on success or -EFAULT, -EINVAL.
*/
struct type86_fmt2_msg {
struct type86_hdr hdr;
@@ -295,19 +290,12 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
CEIL4(xcRB->request_control_blk_length) +
xcRB->request_data_length;
if (ap_msg->length > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
- return -EFAULT;
- if (CEIL4(xcRB->reply_control_blk_length) > PCIXCC_MAX_XCRB_REPLY_SIZE)
- return -EFAULT;
- if (CEIL4(xcRB->reply_data_length) > PCIXCC_MAX_XCRB_DATA_SIZE)
- return -EFAULT;
- replylen = CEIL4(xcRB->reply_control_blk_length) +
- CEIL4(xcRB->reply_data_length) +
- sizeof(struct type86_fmt2_msg);
- if (replylen > PCIXCC_MAX_XCRB_RESPONSE_SIZE) {
- xcRB->reply_control_blk_length = PCIXCC_MAX_XCRB_RESPONSE_SIZE -
- (sizeof(struct type86_fmt2_msg) +
- CEIL4(xcRB->reply_data_length));
- }
+ return -EINVAL;
+ replylen = sizeof(struct type86_fmt2_msg) +
+ CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if (replylen > PCIXCC_MAX_XCRB_MESSAGE_SIZE)
+ return -EINVAL;
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
@@ -326,7 +314,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
return -EFAULT;
if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) >
xcRB->request_control_blk_length)
- return -EFAULT;
+ return -EINVAL;
function_code = ((unsigned char *)&msg->cprbx) + msg->cprbx.cprb_len;
memcpy(msg->hdr.function_code, function_code, sizeof(msg->hdr.function_code));
@@ -678,7 +666,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev,
break;
case PCIXCC_RESPONSE_TYPE_XCRB:
length = t86r->fmt2.offset2 + t86r->fmt2.count2;
- length = min(PCIXCC_MAX_XCRB_RESPONSE_SIZE, length);
+ length = min(PCIXCC_MAX_XCRB_MESSAGE_SIZE, length);
memcpy(msg->message, reply->message, length);
break;
default:
@@ -1043,7 +1031,7 @@ static int zcrypt_pcixcc_probe(struct ap_device *ap_dev)
struct zcrypt_device *zdev;
int rc = 0;
- zdev = zcrypt_device_alloc(PCIXCC_MAX_RESPONSE_SIZE);
+ zdev = zcrypt_device_alloc(PCIXCC_MAX_XCRB_MESSAGE_SIZE);
if (!zdev)
return -ENOMEM;
zdev->ap_dev = ap_dev;
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 94f49ff..7bc1955 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -198,7 +198,7 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev,
goto out;
vq = vring_new_virtqueue(config->num, KVM_S390_VIRTIO_RING_ALIGN,
- vdev, (void *) config->address,
+ vdev, true, (void *) config->address,
kvm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
@@ -263,6 +263,11 @@ error:
return PTR_ERR(vqs[i]);
}
+static const char *kvm_bus_name(struct virtio_device *vdev)
+{
+ return "";
+}
+
/*
* The config ops structure as defined by virtio config
*/
@@ -276,6 +281,7 @@ static struct virtio_config_ops kvm_vq_configspace_ops = {
.reset = kvm_reset,
.find_vqs = kvm_find_vqs,
.del_vqs = kvm_del_vqs,
+ .bus_name = kvm_bus_name,
};
/*
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index b6a6356..8160591 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -63,6 +63,7 @@
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <asm/ebcdic.h>
#include <net/iucv/iucv.h>
#include "fsm.h"
@@ -75,7 +76,7 @@ MODULE_DESCRIPTION ("Linux for S/390 IUCV network driver");
* Debug Facility stuff
*/
#define IUCV_DBF_SETUP_NAME "iucv_setup"
-#define IUCV_DBF_SETUP_LEN 32
+#define IUCV_DBF_SETUP_LEN 64
#define IUCV_DBF_SETUP_PAGES 2
#define IUCV_DBF_SETUP_NR_AREAS 1
#define IUCV_DBF_SETUP_LEVEL 3
@@ -226,6 +227,7 @@ struct iucv_connection {
struct net_device *netdev;
struct connection_profile prof;
char userid[9];
+ char userdata[17];
};
/**
@@ -263,7 +265,7 @@ struct ll_header {
};
#define NETIUCV_HDRLEN (sizeof(struct ll_header))
-#define NETIUCV_BUFSIZE_MAX 32768
+#define NETIUCV_BUFSIZE_MAX 65537
#define NETIUCV_BUFSIZE_DEFAULT NETIUCV_BUFSIZE_MAX
#define NETIUCV_MTU_MAX (NETIUCV_BUFSIZE_MAX - NETIUCV_HDRLEN)
#define NETIUCV_MTU_DEFAULT 9216
@@ -288,7 +290,12 @@ static inline int netiucv_test_and_set_busy(struct net_device *dev)
return test_and_set_bit(0, &priv->tbusy);
}
-static u8 iucvMagic[16] = {
+static u8 iucvMagic_ascii[16] = {
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+ 0x30, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20
+};
+
+static u8 iucvMagic_ebcdic[16] = {
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40,
0xF0, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40
};
@@ -301,18 +308,38 @@ static u8 iucvMagic[16] = {
*
* @returns The printable string (static data!!)
*/
-static char *netiucv_printname(char *name)
+static char *netiucv_printname(char *name, int len)
{
- static char tmp[9];
+ static char tmp[17];
char *p = tmp;
- memcpy(tmp, name, 8);
- tmp[8] = '\0';
- while (*p && (!isspace(*p)))
+ memcpy(tmp, name, len);
+ tmp[len] = '\0';
+ while (*p && ((p - tmp) < len) && (!isspace(*p)))
p++;
*p = '\0';
return tmp;
}
+static char *netiucv_printuser(struct iucv_connection *conn)
+{
+ static char tmp_uid[9];
+ static char tmp_udat[17];
+ static char buf[100];
+
+ if (memcmp(conn->userdata, iucvMagic_ebcdic, 16)) {
+ tmp_uid[8] = '\0';
+ tmp_udat[16] = '\0';
+ memcpy(tmp_uid, conn->userid, 8);
+ memcpy(tmp_uid, netiucv_printname(tmp_uid, 8), 8);
+ memcpy(tmp_udat, conn->userdata, 16);
+ EBCASC(tmp_udat, 16);
+ memcpy(tmp_udat, netiucv_printname(tmp_udat, 16), 16);
+ sprintf(buf, "%s.%s", tmp_uid, tmp_udat);
+ return buf;
+ } else
+ return netiucv_printname(conn->userid, 8);
+}
+
/**
* States of the interface statemachine.
*/
@@ -563,15 +590,18 @@ static int netiucv_callback_connreq(struct iucv_path *path,
{
struct iucv_connection *conn = path->private;
struct iucv_event ev;
+ static char tmp_user[9];
+ static char tmp_udat[17];
int rc;
- if (memcmp(iucvMagic, ipuser, 16))
- /* ipuser must match iucvMagic. */
- return -EINVAL;
rc = -EINVAL;
+ memcpy(tmp_user, netiucv_printname(ipvmid, 8), 8);
+ memcpy(tmp_udat, ipuser, 16);
+ EBCASC(tmp_udat, 16);
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(conn, &iucv_connection_list, list) {
- if (strncmp(ipvmid, conn->userid, 8))
+ if (strncmp(ipvmid, conn->userid, 8) ||
+ strncmp(ipuser, conn->userdata, 16))
continue;
/* Found a matching connection for this path. */
conn->path = path;
@@ -580,6 +610,8 @@ static int netiucv_callback_connreq(struct iucv_path *path,
fsm_event(conn->fsm, CONN_EVENT_CONN_REQ, &ev);
rc = 0;
}
+ IUCV_DBF_TEXT_(setup, 2, "Connection requested for %s.%s\n",
+ tmp_user, netiucv_printname(tmp_udat, 16));
read_unlock_bh(&iucv_connection_rwlock);
return rc;
}
@@ -816,7 +848,7 @@ static void conn_action_connaccept(fsm_instance *fi, int event, void *arg)
conn->path = path;
path->msglim = NETIUCV_QUEUELEN_DEFAULT;
path->flags = 0;
- rc = iucv_path_accept(path, &netiucv_handler, NULL, conn);
+ rc = iucv_path_accept(path, &netiucv_handler, conn->userdata , conn);
if (rc) {
IUCV_DBF_TEXT_(setup, 2, "rc %d from iucv_accept", rc);
return;
@@ -854,7 +886,7 @@ static void conn_action_conntimsev(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
+ iucv_path_sever(conn->path, conn->userdata);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
}
@@ -867,9 +899,9 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_deltimer(&conn->timer);
- iucv_path_sever(conn->path, NULL);
- dev_info(privptr->dev, "The peer interface of the IUCV device"
- " has closed the connection\n");
+ iucv_path_sever(conn->path, conn->userdata);
+ dev_info(privptr->dev, "The peer z/VM guest %s has closed the "
+ "connection\n", netiucv_printuser(conn));
IUCV_DBF_TEXT(data, 2,
"conn_action_connsever: Remote dropped connection\n");
fsm_newstate(fi, CONN_STATE_STARTWAIT);
@@ -886,8 +918,6 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
IUCV_DBF_TEXT(trace, 3, __func__);
fsm_newstate(fi, CONN_STATE_STARTWAIT);
- IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n",
- netdev->name, conn->userid);
/*
* We must set the state before calling iucv_connect because the
@@ -897,8 +927,11 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, CONN_STATE_SETUPWAIT);
conn->path = iucv_path_alloc(NETIUCV_QUEUELEN_DEFAULT, 0, GFP_KERNEL);
+ IUCV_DBF_TEXT_(setup, 2, "%s: connecting to %s ...\n",
+ netdev->name, netiucv_printuser(conn));
+
rc = iucv_path_connect(conn->path, &netiucv_handler, conn->userid,
- NULL, iucvMagic, conn);
+ NULL, conn->userdata, conn);
switch (rc) {
case 0:
netdev->tx_queue_len = conn->path->msglim;
@@ -908,13 +941,13 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
case 11:
dev_warn(privptr->dev,
"The IUCV device failed to connect to z/VM guest %s\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 12:
dev_warn(privptr->dev,
"The IUCV device failed to connect to the peer on z/VM"
- " guest %s\n", netiucv_printname(conn->userid));
+ " guest %s\n", netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_STARTWAIT);
break;
case 13:
@@ -927,7 +960,7 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg)
dev_err(privptr->dev,
"z/VM guest %s has too many IUCV connections"
" to connect with the IUCV device\n",
- netiucv_printname(conn->userid));
+ netiucv_printname(conn->userid, 8));
fsm_newstate(fi, CONN_STATE_CONNERR);
break;
case 15:
@@ -972,7 +1005,7 @@ static void conn_action_stop(fsm_instance *fi, int event, void *arg)
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
IUCV_DBF_TEXT(trace, 5, "calling iucv_path_sever\n");
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1090,7 +1123,8 @@ dev_action_connup(fsm_instance *fi, int event, void *arg)
fsm_newstate(fi, DEV_STATE_RUNNING);
dev_info(privptr->dev,
"The IUCV device has been connected"
- " successfully to %s\n", privptr->conn->userid);
+ " successfully to %s\n",
+ netiucv_printuser(privptr->conn));
IUCV_DBF_TEXT(setup, 3,
"connection is up and running\n");
break;
@@ -1452,45 +1486,72 @@ static ssize_t user_show(struct device *dev, struct device_attribute *attr,
struct netiucv_priv *priv = dev_get_drvdata(dev);
IUCV_DBF_TEXT(trace, 5, __func__);
- return sprintf(buf, "%s\n", netiucv_printname(priv->conn->userid));
+ return sprintf(buf, "%s\n", netiucv_printuser(priv->conn));
}
-static ssize_t user_write(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
+static int netiucv_check_user(const char *buf, size_t count, char *username,
+ char *userdata)
{
- struct netiucv_priv *priv = dev_get_drvdata(dev);
- struct net_device *ndev = priv->conn->netdev;
- char *p;
- char *tmp;
- char username[9];
- int i;
- struct iucv_connection *cp;
+ const char *p;
+ int i;
- IUCV_DBF_TEXT(trace, 3, __func__);
- if (count > 9) {
- IUCV_DBF_TEXT_(setup, 2,
- "%d is length of username\n", (int) count);
+ p = strchr(buf, '.');
+ if ((p && ((count > 26) ||
+ ((p - buf) > 8) ||
+ (buf + count - p > 18))) ||
+ (!p && (count > 9))) {
+ IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
return -EINVAL;
}
- tmp = strsep((char **) &buf, "\n");
- for (i = 0, p = tmp; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || (*p == '$')) {
- username[i]= toupper(*p);
+ for (i = 0, p = buf; i < 8 && *p && *p != '.'; i++, p++) {
+ if (isalnum(*p) || *p == '$') {
+ username[i] = toupper(*p);
continue;
}
- if (*p == '\n') {
+ if (*p == '\n')
/* trailing lf, grr */
break;
- }
IUCV_DBF_TEXT_(setup, 2,
- "username: invalid character %c\n", *p);
+ "conn_write: invalid character %02x\n", *p);
return -EINVAL;
}
while (i < 8)
username[i++] = ' ';
username[8] = '\0';
+ if (*p == '.') {
+ p++;
+ for (i = 0; i < 16 && *p; i++, p++) {
+ if (*p == '\n')
+ break;
+ userdata[i] = toupper(*p);
+ }
+ while (i > 0 && i < 16)
+ userdata[i++] = ' ';
+ } else
+ memcpy(userdata, iucvMagic_ascii, 16);
+ userdata[16] = '\0';
+ ASCEBC(userdata, 16);
+
+ return 0;
+}
+
+static ssize_t user_write(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct netiucv_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->conn->netdev;
+ char username[9];
+ char userdata[17];
+ int rc;
+ struct iucv_connection *cp;
+
+ IUCV_DBF_TEXT(trace, 3, __func__);
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
+
if (memcmp(username, priv->conn->userid, 9) &&
(ndev->flags & (IFF_UP | IFF_RUNNING))) {
/* username changed while the interface is active. */
@@ -1499,15 +1560,17 @@ static ssize_t user_write(struct device *dev, struct device_attribute *attr,
}
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9) && cp->netdev != ndev) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17) && cp->netdev != ndev) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "user_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "user_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
memcpy(priv->conn->userid, username, 9);
+ memcpy(priv->conn->userdata, userdata, 17);
return count;
}
@@ -1537,7 +1600,8 @@ static ssize_t buffer_write (struct device *dev, struct device_attribute *attr,
bs1 = simple_strtoul(buf, &e, 0);
if (e && (!isspace(*e))) {
- IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %c\n", *e);
+ IUCV_DBF_TEXT_(setup, 2, "buffer_write: invalid char %02x\n",
+ *e);
return -EINVAL;
}
if (bs1 > NETIUCV_BUFSIZE_MAX) {
@@ -1864,7 +1928,8 @@ static void netiucv_unregister_device(struct device *dev)
* Add it to the list of netiucv connections;
*/
static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
- char *username)
+ char *username,
+ char *userdata)
{
struct iucv_connection *conn;
@@ -1893,6 +1958,8 @@ static struct iucv_connection *netiucv_new_connection(struct net_device *dev,
fsm_settimer(conn->fsm, &conn->timer);
fsm_newstate(conn->fsm, CONN_STATE_INVALID);
+ if (userdata)
+ memcpy(conn->userdata, userdata, 17);
if (username) {
memcpy(conn->userid, username, 9);
fsm_newstate(conn->fsm, CONN_STATE_STOPPED);
@@ -1919,6 +1986,7 @@ out:
*/
static void netiucv_remove_connection(struct iucv_connection *conn)
{
+
IUCV_DBF_TEXT(trace, 3, __func__);
write_lock_bh(&iucv_connection_rwlock);
list_del_init(&conn->list);
@@ -1926,7 +1994,7 @@ static void netiucv_remove_connection(struct iucv_connection *conn)
fsm_deltimer(&conn->timer);
netiucv_purge_skb_queue(&conn->collect_queue);
if (conn->path) {
- iucv_path_sever(conn->path, iucvMagic);
+ iucv_path_sever(conn->path, conn->userdata);
kfree(conn->path);
conn->path = NULL;
}
@@ -1985,7 +2053,7 @@ static void netiucv_setup_netdevice(struct net_device *dev)
/**
* Allocate and initialize everything of a net device.
*/
-static struct net_device *netiucv_init_netdevice(char *username)
+static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
{
struct netiucv_priv *privptr;
struct net_device *dev;
@@ -2004,7 +2072,7 @@ static struct net_device *netiucv_init_netdevice(char *username)
if (!privptr->fsm)
goto out_netdev;
- privptr->conn = netiucv_new_connection(dev, username);
+ privptr->conn = netiucv_new_connection(dev, username, userdata);
if (!privptr->conn) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_new_connection\n");
goto out_fsm;
@@ -2022,47 +2090,31 @@ out_netdev:
static ssize_t conn_write(struct device_driver *drv,
const char *buf, size_t count)
{
- const char *p;
char username[9];
- int i, rc;
+ char userdata[17];
+ int rc;
struct net_device *dev;
struct netiucv_priv *priv;
struct iucv_connection *cp;
IUCV_DBF_TEXT(trace, 3, __func__);
- if (count>9) {
- IUCV_DBF_TEXT(setup, 2, "conn_write: too long\n");
- return -EINVAL;
- }
-
- for (i = 0, p = buf; i < 8 && *p; i++, p++) {
- if (isalnum(*p) || *p == '$') {
- username[i] = toupper(*p);
- continue;
- }
- if (*p == '\n')
- /* trailing lf, grr */
- break;
- IUCV_DBF_TEXT_(setup, 2,
- "conn_write: invalid character %c\n", *p);
- return -EINVAL;
- }
- while (i < 8)
- username[i++] = ' ';
- username[8] = '\0';
+ rc = netiucv_check_user(buf, count, username, userdata);
+ if (rc)
+ return rc;
read_lock_bh(&iucv_connection_rwlock);
list_for_each_entry(cp, &iucv_connection_list, list) {
- if (!strncmp(username, cp->userid, 9)) {
+ if (!strncmp(username, cp->userid, 9) &&
+ !strncmp(userdata, cp->userdata, 17)) {
read_unlock_bh(&iucv_connection_rwlock);
- IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection "
- "to %s already exists\n", username);
+ IUCV_DBF_TEXT_(setup, 2, "conn_write: Connection to %s "
+ "already exists\n", netiucv_printuser(cp));
return -EEXIST;
}
}
read_unlock_bh(&iucv_connection_rwlock);
- dev = netiucv_init_netdevice(username);
+ dev = netiucv_init_netdevice(username, userdata);
if (!dev) {
IUCV_DBF_TEXT(setup, 2, "NULL from netiucv_init_netdevice\n");
return -ENODEV;
@@ -2083,8 +2135,9 @@ static ssize_t conn_write(struct device_driver *drv,
if (rc)
goto out_unreg;
- dev_info(priv->dev, "The IUCV interface to %s has been"
- " established successfully\n", netiucv_printname(username));
+ dev_info(priv->dev, "The IUCV interface to %s has been established "
+ "successfully\n",
+ netiucv_printuser(priv->conn));
return count;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index fff57de..9c3f38d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -66,7 +66,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum qeth_qdio_buffer_states newbufstate);
-
+static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
static inline const char *qeth_get_cardname(struct qeth_card *card)
{
@@ -363,6 +363,9 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
int bidx, int forced_cleanup)
{
+ if (q->card->options.cq != QETH_CQ_ENABLED)
+ return;
+
if (q->bufs[bidx]->next_pending != NULL) {
struct qeth_qdio_out_buffer *head = q->bufs[bidx];
struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
@@ -390,6 +393,13 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q,
}
}
+ if (forced_cleanup && (atomic_read(&(q->bufs[bidx]->state)) ==
+ QETH_QDIO_BUF_HANDLED_DELAYED)) {
+ /* for recovery situations */
+ q->bufs[bidx]->aob = q->bufstates[bidx].aob;
+ qeth_init_qdio_out_buf(q, bidx);
+ QETH_CARD_TEXT(q->card, 2, "clprecov");
+ }
}
@@ -412,7 +422,6 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
notification = TX_NOTIFY_OK;
} else {
BUG_ON(atomic_read(&buffer->state) != QETH_QDIO_BUF_PENDING);
-
atomic_set(&buffer->state, QETH_QDIO_BUF_IN_CQ);
notification = TX_NOTIFY_DELAYED_OK;
}
@@ -425,7 +434,8 @@ static inline void qeth_qdio_handle_aob(struct qeth_card *card,
buffer->aob = NULL;
qeth_clear_output_buffer(buffer->q, buffer,
- QETH_QDIO_BUF_HANDLED_DELAYED);
+ QETH_QDIO_BUF_HANDLED_DELAYED);
+
/* from here on: do not touch buffer anymore */
qdio_release_aob(aob);
}
@@ -1113,11 +1123,25 @@ out:
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
{
struct sk_buff *skb;
+ struct iucv_sock *iucv;
+ int notify_general_error = 0;
+
+ if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
+ notify_general_error = 1;
+
+ /* release may never happen from within CQ tasklet scope */
+ BUG_ON(atomic_read(&buf->state) == QETH_QDIO_BUF_IN_CQ);
skb = skb_dequeue(&buf->skb_list);
while (skb) {
QETH_CARD_TEXT(buf->q->card, 5, "skbr");
QETH_CARD_TEXT_(buf->q->card, 5, "%lx", (long) skb);
+ if (notify_general_error && skb->protocol == ETH_P_AF_IUCV) {
+ if (skb->sk) {
+ iucv = iucv_sk(skb->sk);
+ iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
+ }
+ }
atomic_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
@@ -1160,7 +1184,7 @@ static void qeth_clear_outq_buffers(struct qeth_qdio_out_q *q, int free)
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
if (!q->bufs[j])
continue;
- qeth_cleanup_handled_pending(q, j, free);
+ qeth_cleanup_handled_pending(q, j, 1);
qeth_clear_output_buffer(q, q->bufs[j], QETH_QDIO_BUF_EMPTY);
if (free) {
kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
@@ -1207,7 +1231,7 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
qeth_free_cq(card);
cancel_delayed_work_sync(&card->buffer_reclaim_work);
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j)
- kfree_skb(card->qdio.in_q->bufs[j].rx_skb);
+ dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
kfree(card->qdio.in_q);
card->qdio.in_q = NULL;
/* inbound buffer pool */
@@ -1329,6 +1353,7 @@ static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
static void qeth_start_kernel_thread(struct work_struct *work)
{
+ struct task_struct *ts;
struct qeth_card *card = container_of(work, struct qeth_card,
kernel_thread_starter);
QETH_CARD_TEXT(card , 2, "strthrd");
@@ -1336,9 +1361,15 @@ static void qeth_start_kernel_thread(struct work_struct *work)
if (card->read.state != CH_STATE_UP &&
card->write.state != CH_STATE_UP)
return;
- if (qeth_do_start_thread(card, QETH_RECOVER_THREAD))
- kthread_run(card->discipline.recover, (void *) card,
+ if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
+ ts = kthread_run(card->discipline.recover, (void *)card,
"qeth_recover");
+ if (IS_ERR(ts)) {
+ qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
+ qeth_clear_thread_running_bit(card,
+ QETH_RECOVER_THREAD);
+ }
+ }
}
static int qeth_setup_card(struct qeth_card *card)
@@ -4521,7 +4552,7 @@ static int qeth_qdio_establish(struct qeth_card *card)
init_data.no_output_qs = card->qdio.no_out_queues;
init_data.input_handler = card->discipline.input_handler;
init_data.output_handler = card->discipline.output_handler;
- init_data.queue_start_poll = queue_start_poll;
+ init_data.queue_start_poll_array = queue_start_poll;
init_data.int_parm = (unsigned long) card;
init_data.input_sbal_addr_array = (void **) in_sbal_ptrs;
init_data.output_sbal_addr_array = (void **) out_sbal_ptrs;
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a21ae3d..c129671 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -301,21 +301,21 @@ static void qeth_l2_process_vlans(struct qeth_card *card)
spin_unlock_bh(&card->vlanlock);
}
-static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
- return;
+ return 0;
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "aidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "aidREC");
- return;
+ return 0;
}
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
@@ -324,10 +324,13 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
+ } else {
+ return -ENOMEM;
}
+ return 0;
}
-static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
@@ -335,11 +338,11 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
QETH_CARD_TEXT(card, 3, "kidOSM");
- return;
+ return 0;
}
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_bh(&card->vlanlock);
list_for_each_entry(id, &card->vid_list, list) {
@@ -355,6 +358,7 @@ static void qeth_l2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
+ return 0;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -1169,6 +1173,7 @@ static void __exit qeth_l2_exit(void)
static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4d5307d..9648e4e 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1869,15 +1869,15 @@ static void qeth_l3_free_vlan_addresses(struct qeth_card *card,
qeth_l3_free_vlan_addresses6(card, vid);
}
-static void qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
set_bit(vid, card->active_vlans);
- return;
+ return 0;
}
-static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
{
struct qeth_card *card = dev->ml_priv;
unsigned long flags;
@@ -1885,7 +1885,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) {
QETH_CARD_TEXT(card, 3, "kidREC");
- return;
+ return 0;
}
spin_lock_irqsave(&card->vlanlock, flags);
/* unregister IP addresses of vlan device */
@@ -1893,6 +1893,7 @@ static void qeth_l3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
clear_bit(vid, card->active_vlans);
spin_unlock_irqrestore(&card->vlanlock, flags);
qeth_l3_set_multicast_list(card->dev);
+ return 0;
}
static inline int qeth_l3_rebuild_skb(struct qeth_card *card,
@@ -2759,7 +2760,7 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb)
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (n) {
cast_type = n->type;
rcu_read_unlock();
@@ -2855,7 +2856,7 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr,
rcu_read_lock();
dst = skb_dst(skb);
if (dst)
- n = dst_get_neighbour(dst);
+ n = dst_get_neighbour_noref(dst);
if (ipv == 4) {
/* IPv4 */
hdr->hdr.l3.flags = qeth_l3_get_qeth_hdr_flags4(cast_type);
@@ -3209,7 +3210,8 @@ static int qeth_l3_stop(struct net_device *dev)
return 0;
}
-static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
+static netdev_features_t qeth_l3_fix_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
@@ -3223,7 +3225,8 @@ static u32 qeth_l3_fix_features(struct net_device *dev, u32 features)
return features;
}
-static int qeth_l3_set_features(struct net_device *dev, u32 features)
+static int qeth_l3_set_features(struct net_device *dev,
+ netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;
u32 changed = dev->features ^ features;
@@ -3489,14 +3492,13 @@ contin:
else
netif_carrier_off(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
+ rtnl_lock();
if (recovery_mode)
__qeth_l3_open(card->dev);
- else {
- rtnl_lock();
+ else
dev_open(card->dev);
- rtnl_unlock();
- }
qeth_l3_set_multicast_list(card->dev);
+ rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
@@ -3542,6 +3544,11 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
card->info.hwtrap = 1;
}
qeth_l3_stop_card(card, recovery_mode);
+ if ((card->options.cq == QETH_CQ_ENABLED) && card->dev) {
+ rtnl_lock();
+ call_netdevice_notifiers(NETDEV_REBOOT, card->dev);
+ rtnl_unlock();
+ }
rc = ccw_device_set_offline(CARD_DDEV(card));
rc2 = ccw_device_set_offline(CARD_WDEV(card));
rc3 = ccw_device_set_offline(CARD_RDEV(card));
@@ -3596,6 +3603,7 @@ static int qeth_l3_recover(void *ptr)
static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ qeth_set_allowed_threads(card, 0, 1);
if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c
index 303dde0..fab2c25 100644
--- a/drivers/s390/scsi/zfcp_cfdc.c
+++ b/drivers/s390/scsi/zfcp_cfdc.c
@@ -11,6 +11,7 @@
#define KMSG_COMPONENT "zfcp"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 06ea3bc..16570aa 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -830,16 +830,11 @@ config SCSI_ISCI
tristate "Intel(R) C600 Series Chipset SAS Controller"
depends on PCI && SCSI
depends on X86
- # (temporary): known alpha quality driver
- depends on EXPERIMENTAL
select SCSI_SAS_LIBSAS
- select SCSI_SAS_HOST_SMP
---help---
This driver supports the 6Gb/s SAS capabilities of the storage
control unit found in the Intel(R) C600 series chipset.
- The experimental tag will be removed after the driver exits alpha
-
config SCSI_GENERIC_NCR5380
tristate "Generic NCR5380/53c400 SCSI PIO support"
depends on ISA && SCSI
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 8a0b330..0bd38da 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -650,6 +650,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
AAC_OPT_NEW_COMM) ?
(dev->scsi_host_ptr->max_sectors << 9) :
65536)) {
+ kfree(usg);
rcode = -EINVAL;
goto cleanup;
}
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index 195823a..ed119ce 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -102,7 +102,7 @@ static int setup_dmaspeed[MAXBOARDS] __initdata = { -1, -1, -1, -1 };
*/
#if defined(MODULE)
-static int isapnp = 0;
+static bool isapnp = 0;
static int aha1542[] = {0x330, 11, 4, -1};
module_param_array(aha1542, int, NULL, 0);
module_param(isapnp, bool, 0);
diff --git a/drivers/scsi/aic7xxx/aicasm/aicasm.c b/drivers/scsi/aic7xxx/aicasm/aicasm.c
index e4a7787..2e3117a 100644
--- a/drivers/scsi/aic7xxx/aicasm/aicasm.c
+++ b/drivers/scsi/aic7xxx/aicasm/aicasm.c
@@ -1,5 +1,5 @@
/*
- * Aic7xxx SCSI host adapter firmware asssembler
+ * Aic7xxx SCSI host adapter firmware assembler
*
* Copyright (c) 1997, 1998, 2000, 2001 Justin T. Gibbs.
* Copyright (c) 2001, 2002 Adaptec Inc.
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8b002f6..33c8f09 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -733,7 +733,7 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep);
}
-mode_t be2iscsi_attr_is_visible(int param_type, int param)
+umode_t be2iscsi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 4a1f2e3..5c45be1 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -26,7 +26,7 @@
#define BE2_IPV4 0x1
#define BE2_IPV6 0x10
-mode_t be2iscsi_attr_is_visible(int param_type, int param);
+umode_t be2iscsi_attr_is_visible(int param_type, int param);
void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 379c696..375756f 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,9 +325,9 @@ static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
}
-static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_TGT_NAME:
@@ -348,9 +348,9 @@ static mode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
return rc;
}
-static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_INI_INITIATOR_NAME:
@@ -364,9 +364,9 @@ static mode_t beiscsi_ini_get_attr_visibility(void *data, int type)
}
-static mode_t beiscsi_eth_get_attr_visibility(void *data, int type)
+static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
{
- int rc;
+ umode_t rc;
switch (type) {
case ISCSI_BOOT_ETH_FLAGS:
@@ -1105,7 +1105,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
struct be_status_bhs *sts_bhs =
(struct be_status_bhs *)io_task->cmd_bhs;
struct iscsi_conn *conn = beiscsi_conn->conn;
- unsigned int sense_len;
unsigned char *sense;
u32 resid = 0, exp_cmdsn, max_cmdsn;
u8 rsp, status, flags;
@@ -1153,9 +1152,11 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
}
if (status == SAM_STAT_CHECK_CONDITION) {
+ u16 sense_len;
unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
+
sense = sts_bhs->sense_info + sizeof(unsigned short);
- sense_len = cpu_to_be16(*slen);
+ sense_len = be16_to_cpu(*slen);
memcpy(task->sc->sense_buffer, sense,
min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
}
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index 7b3d235..b5a1595 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -902,7 +902,7 @@ struct sfp_mem_s {
union sfp_xcvr_e10g_code_u {
u8 b;
struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
u8 e10g_unall:1; /* 10G Ethernet compliance */
u8 e10g_lrm:1;
u8 e10g_lr:1;
@@ -982,7 +982,7 @@ union sfp_xcvr_fc2_code_u {
union sfp_xcvr_fc3_code_u {
u8 b;
struct {
-#ifdef __BIGENDIAN
+#ifdef __BIG_ENDIAN
u8 rsv4:1;
u8 mb800:1; /* 800 Mbytes/sec */
u8 mb1600:1; /* 1600 Mbytes/sec */
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 863c6ba..cb07c62 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -34,22 +34,22 @@
struct bfa_iocfc_intr_attr_s {
u8 coalesce; /* enable/disable coalescing */
u8 rsvd[3];
- __be16 latency; /* latency in microseconds */
- __be16 delay; /* delay in microseconds */
+ __be16 latency; /* latency in microseconds */
+ __be16 delay; /* delay in microseconds */
};
/*
* IOC firmware configuraton
*/
struct bfa_iocfc_fwcfg_s {
- u16 num_fabrics; /* number of fabrics */
- u16 num_lports; /* number of local lports */
- u16 num_rports; /* number of remote ports */
- u16 num_ioim_reqs; /* number of IO reqs */
- u16 num_tskim_reqs; /* task management requests */
- u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
- u16 num_fcxp_reqs; /* unassisted FC exchanges */
- u16 num_uf_bufs; /* unsolicited recv buffers */
+ u16 num_fabrics; /* number of fabrics */
+ u16 num_lports; /* number of local lports */
+ u16 num_rports; /* number of remote ports */
+ u16 num_ioim_reqs; /* number of IO reqs */
+ u16 num_tskim_reqs; /* task management requests */
+ u16 num_fwtio_reqs; /* number of TM IO reqs in FW */
+ u16 num_fcxp_reqs; /* unassisted FC exchanges */
+ u16 num_uf_bufs; /* unsolicited recv buffers */
u8 num_cqs;
u8 fw_tick_res; /* FW clock resolution in ms */
u8 rsvd[2];
@@ -57,19 +57,19 @@ struct bfa_iocfc_fwcfg_s {
#pragma pack()
struct bfa_iocfc_drvcfg_s {
- u16 num_reqq_elems; /* number of req queue elements */
- u16 num_rspq_elems; /* number of rsp queue elements */
- u16 num_sgpgs; /* number of total SG pages */
- u16 num_sboot_tgts; /* number of SAN boot targets */
- u16 num_sboot_luns; /* number of SAN boot luns */
- u16 ioc_recover; /* IOC recovery mode */
- u16 min_cfg; /* minimum configuration */
- u16 path_tov; /* device path timeout */
- u16 num_tio_reqs; /*!< number of TM IO reqs */
+ u16 num_reqq_elems; /* number of req queue elements */
+ u16 num_rspq_elems; /* number of rsp queue elements */
+ u16 num_sgpgs; /* number of total SG pages */
+ u16 num_sboot_tgts; /* number of SAN boot targets */
+ u16 num_sboot_luns; /* number of SAN boot luns */
+ u16 ioc_recover; /* IOC recovery mode */
+ u16 min_cfg; /* minimum configuration */
+ u16 path_tov; /* device path timeout */
+ u16 num_tio_reqs; /* number of TM IO reqs */
u8 port_mode;
u8 rsvd_a;
- bfa_boolean_t delay_comp; /* delay completion of
- failed inflight IOs */
+ bfa_boolean_t delay_comp; /* delay completion of failed
+ * inflight IOs */
u16 num_ttsk_reqs; /* TM task management requests */
u32 rsvd;
};
@@ -101,8 +101,8 @@ struct bfa_fw_ioim_stats_s {
u32 fw_frm_drop; /* f/w drop the frame */
u32 rec_timeout; /* FW rec timed out */
- u32 error_rec; /* FW sending rec on
- * an error condition*/
+ u32 error_rec; /* FW sending rec on
+ * an error condition*/
u32 wait_for_si; /* FW wait for SI */
u32 rec_rsp_inval; /* REC rsp invalid */
u32 seqr_io_abort; /* target does not know cmd so abort */
@@ -124,9 +124,9 @@ struct bfa_fw_ioim_stats_s {
u32 unexp_fcp_rsp; /* fcp response in wrong state */
u32 fcp_rsp_under_run; /* fcp rsp IO underrun */
- u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
+ u32 fcp_rsp_under_run_wr; /* fcp rsp IO underrun for write */
u32 fcp_rsp_under_run_err; /* fcp rsp IO underrun error */
- u32 fcp_rsp_resid_inval; /* invalid residue */
+ u32 fcp_rsp_resid_inval; /* invalid residue */
u32 fcp_rsp_over_run; /* fcp rsp IO overrun */
u32 fcp_rsp_over_run_err; /* fcp rsp IO overrun error */
u32 fcp_rsp_proto_err; /* protocol error in fcp rsp */
@@ -142,21 +142,20 @@ struct bfa_fw_ioim_stats_s {
u32 ioh_hit_class2_event; /* IOH hit class2 */
u32 ioh_miss_other_event; /* IOH miss other */
u32 ioh_seq_cnt_err_event; /* IOH seq cnt error */
- u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
- * bytes xfered */
+ u32 ioh_len_err_event; /* IOH len error - fcp_dl !=
+ * bytes xfered */
u32 ioh_seq_len_err_event; /* IOH seq len error */
u32 ioh_data_oor_event; /* Data out of range */
u32 ioh_ro_ooo_event; /* Relative offset out of range */
u32 ioh_cpu_owned_event; /* IOH hit -iost owned by f/w */
u32 ioh_unexp_frame_event; /* unexpected frame received
- * count */
+ * count */
u32 ioh_err_int; /* IOH error int during data-phase
- * for scsi write
- */
+ * for scsi write */
};
struct bfa_fw_tio_stats_s {
- u32 tio_conf_proc; /* TIO CONF processed */
+ u32 tio_conf_proc; /* TIO CONF processed */
u32 tio_conf_drop; /* TIO CONF dropped */
u32 tio_cleanup_req; /* TIO cleanup requested */
u32 tio_cleanup_comp; /* TIO cleanup completed */
@@ -164,34 +163,36 @@ struct bfa_fw_tio_stats_s {
u32 tio_abort_rsp_comp; /* TIO abort rsp completed */
u32 tio_abts_req; /* TIO ABTS requested */
u32 tio_abts_ack; /* TIO ABTS ack-ed */
- u32 tio_abts_ack_nocomp; /* TIO ABTS ack-ed but not completed */
+ u32 tio_abts_ack_nocomp;/* TIO ABTS ack-ed but not completed */
u32 tio_abts_tmo; /* TIO ABTS timeout */
u32 tio_snsdata_dma; /* TIO sense data DMA */
- u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
- u32 tio_rxwchan_avail; /* TIO RX wait channel available */
+ u32 tio_rxwchan_wait; /* TIO waiting for RX wait channel */
+ u32 tio_rxwchan_avail; /* TIO RX wait channel available */
u32 tio_hit_bls; /* TIO IOH BLS event */
u32 tio_uf_recv; /* TIO received UF */
- u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
- u32 tio_wr_invalid_sm;/* TIO write reqst in wrong state machine */
+ u32 tio_rd_invalid_sm; /* TIO read reqst in wrong state machine */
+ u32 tio_wr_invalid_sm; /* TIO write reqst in wrong state machine */
- u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
- u32 ds_rxwchan_avail; /* DS RX wait channel available */
+ u32 ds_rxwchan_wait; /* DS waiting for RX wait channel */
+ u32 ds_rxwchan_avail; /* DS RX wait channel available */
u32 ds_unaligned_rd; /* DS unaligned read */
- u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state machine */
- u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state machine */
+ u32 ds_rdcomp_invalid_sm; /* DS read completed in wrong state
+ * machine */
+ u32 ds_wrcomp_invalid_sm; /* DS write completed in wrong state
+ * machine */
u32 ds_flush_req; /* DS flush requested */
u32 ds_flush_comp; /* DS flush completed */
u32 ds_xfrdy_exp; /* DS XFER_RDY expired */
u32 ds_seq_cnt_err; /* DS seq cnt error */
u32 ds_seq_len_err; /* DS seq len error */
u32 ds_data_oor; /* DS data out of order */
- u32 ds_hit_bls; /* DS hit BLS */
+ u32 ds_hit_bls; /* DS hit BLS */
u32 ds_edtov_timer_exp; /* DS edtov expired */
u32 ds_cpu_owned; /* DS cpu owned */
u32 ds_hit_class2; /* DS hit class2 */
u32 ds_length_err; /* DS length error */
u32 ds_ro_ooo_err; /* DS relative offset out-of-order error */
- u32 ds_rectov_timer_exp; /* DS rectov expired */
+ u32 ds_rectov_timer_exp;/* DS rectov expired */
u32 ds_unexp_fr_err; /* DS unexp frame error */
};
@@ -208,119 +209,119 @@ struct bfa_fw_io_stats_s {
*/
struct bfa_fw_port_fpg_stats_s {
- u32 intr_evt;
- u32 intr;
- u32 intr_excess;
- u32 intr_cause0;
- u32 intr_other;
- u32 intr_other_ign;
- u32 sig_lost;
- u32 sig_regained;
- u32 sync_lost;
- u32 sync_to;
- u32 sync_regained;
- u32 div2_overflow;
- u32 div2_underflow;
- u32 efifo_overflow;
- u32 efifo_underflow;
- u32 idle_rx;
- u32 lrr_rx;
- u32 lr_rx;
- u32 ols_rx;
- u32 nos_rx;
- u32 lip_rx;
- u32 arbf0_rx;
- u32 arb_rx;
- u32 mrk_rx;
- u32 const_mrk_rx;
- u32 prim_unknown;
+ u32 intr_evt;
+ u32 intr;
+ u32 intr_excess;
+ u32 intr_cause0;
+ u32 intr_other;
+ u32 intr_other_ign;
+ u32 sig_lost;
+ u32 sig_regained;
+ u32 sync_lost;
+ u32 sync_to;
+ u32 sync_regained;
+ u32 div2_overflow;
+ u32 div2_underflow;
+ u32 efifo_overflow;
+ u32 efifo_underflow;
+ u32 idle_rx;
+ u32 lrr_rx;
+ u32 lr_rx;
+ u32 ols_rx;
+ u32 nos_rx;
+ u32 lip_rx;
+ u32 arbf0_rx;
+ u32 arb_rx;
+ u32 mrk_rx;
+ u32 const_mrk_rx;
+ u32 prim_unknown;
};
struct bfa_fw_port_lksm_stats_s {
- u32 hwsm_success; /* hwsm state machine success */
- u32 hwsm_fails; /* hwsm fails */
- u32 hwsm_wdtov; /* hwsm timed out */
- u32 swsm_success; /* swsm success */
- u32 swsm_fails; /* swsm fails */
- u32 swsm_wdtov; /* swsm timed out */
- u32 busybufs; /* link init failed due to busybuf */
- u32 buf_waits; /* bufwait state entries */
- u32 link_fails; /* link failures */
- u32 psp_errors; /* primitive sequence protocol errors */
- u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
- u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
- u32 lr_tx; /* No. of times LR tx started */
- u32 lrr_tx; /* No. of times LRR tx started */
- u32 ols_tx; /* No. of times OLS tx started */
- u32 nos_tx; /* No. of times NOS tx started */
- u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
- u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
- u32 bbsc_lr; /* LKSM LR tx for credit recovery */
+ u32 hwsm_success; /* hwsm state machine success */
+ u32 hwsm_fails; /* hwsm fails */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_fails; /* swsm fails */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 busybufs; /* link init failed due to busybuf */
+ u32 buf_waits; /* bufwait state entries */
+ u32 link_fails; /* link failures */
+ u32 psp_errors; /* primitive sequence protocol errors */
+ u32 lr_unexp; /* No. of times LR rx-ed unexpectedly */
+ u32 lrr_unexp; /* No. of times LRR rx-ed unexpectedly */
+ u32 lr_tx; /* No. of times LR tx started */
+ u32 lrr_tx; /* No. of times LRR tx started */
+ u32 ols_tx; /* No. of times OLS tx started */
+ u32 nos_tx; /* No. of times NOS tx started */
+ u32 hwsm_lrr_rx; /* No. of times LRR rx-ed by HWSM */
+ u32 hwsm_lr_rx; /* No. of times LR rx-ed by HWSM */
+ u32 bbsc_lr; /* LKSM LR tx for credit recovery */
};
struct bfa_fw_port_snsm_stats_s {
- u32 hwsm_success; /* Successful hwsm terminations */
- u32 hwsm_fails; /* hwsm fail count */
- u32 hwsm_wdtov; /* hwsm timed out */
- u32 swsm_success; /* swsm success */
- u32 swsm_wdtov; /* swsm timed out */
- u32 error_resets; /* error resets initiated by upsm */
- u32 sync_lost; /* Sync loss count */
- u32 sig_lost; /* Signal loss count */
- u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
+ u32 hwsm_success; /* Successful hwsm terminations */
+ u32 hwsm_fails; /* hwsm fail count */
+ u32 hwsm_wdtov; /* hwsm timed out */
+ u32 swsm_success; /* swsm success */
+ u32 swsm_wdtov; /* swsm timed out */
+ u32 error_resets; /* error resets initiated by upsm */
+ u32 sync_lost; /* Sync loss count */
+ u32 sig_lost; /* Signal loss count */
+ u32 asn8g_attempts; /* SNSM HWSM at 8Gbps attempts */
};
struct bfa_fw_port_physm_stats_s {
- u32 module_inserts; /* Module insert count */
- u32 module_xtracts; /* Module extracts count */
- u32 module_invalids; /* Invalid module inserted count */
- u32 module_read_ign; /* Module validation status ignored */
- u32 laser_faults; /* Laser fault count */
- u32 rsvd;
+ u32 module_inserts; /* Module insert count */
+ u32 module_xtracts; /* Module extracts count */
+ u32 module_invalids; /* Invalid module inserted count */
+ u32 module_read_ign; /* Module validation status ignored */
+ u32 laser_faults; /* Laser fault count */
+ u32 rsvd;
};
struct bfa_fw_fip_stats_s {
- u32 vlan_req; /* vlan discovery requests */
- u32 vlan_notify; /* vlan notifications */
- u32 vlan_err; /* vlan response error */
- u32 vlan_timeouts; /* vlan disvoery timeouts */
- u32 vlan_invalids; /* invalid vlan in discovery advert. */
- u32 disc_req; /* Discovery solicit requests */
- u32 disc_rsp; /* Discovery solicit response */
- u32 disc_err; /* Discovery advt. parse errors */
- u32 disc_unsol; /* Discovery unsolicited */
- u32 disc_timeouts; /* Discovery timeouts */
- u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
- u32 linksvc_unsupp; /* Unsupported link service req */
- u32 linksvc_err; /* Parse error in link service req */
- u32 logo_req; /* FIP logos received */
- u32 clrvlink_req; /* Clear virtual link req */
- u32 op_unsupp; /* Unsupported FIP operation */
- u32 untagged; /* Untagged frames (ignored) */
- u32 invalid_version; /* Invalid FIP version */
+ u32 vlan_req; /* vlan discovery requests */
+ u32 vlan_notify; /* vlan notifications */
+ u32 vlan_err; /* vlan response error */
+ u32 vlan_timeouts; /* vlan disvoery timeouts */
+ u32 vlan_invalids; /* invalid vlan in discovery advert. */
+ u32 disc_req; /* Discovery solicit requests */
+ u32 disc_rsp; /* Discovery solicit response */
+ u32 disc_err; /* Discovery advt. parse errors */
+ u32 disc_unsol; /* Discovery unsolicited */
+ u32 disc_timeouts; /* Discovery timeouts */
+ u32 disc_fcf_unavail; /* Discovery FCF Not Avail. */
+ u32 linksvc_unsupp; /* Unsupported link service req */
+ u32 linksvc_err; /* Parse error in link service req */
+ u32 logo_req; /* FIP logos received */
+ u32 clrvlink_req; /* Clear virtual link req */
+ u32 op_unsupp; /* Unsupported FIP operation */
+ u32 untagged; /* Untagged frames (ignored) */
+ u32 invalid_version; /* Invalid FIP version */
};
struct bfa_fw_lps_stats_s {
- u32 mac_invalids; /* Invalid mac assigned */
- u32 rsvd;
+ u32 mac_invalids; /* Invalid mac assigned */
+ u32 rsvd;
};
struct bfa_fw_fcoe_stats_s {
- u32 cee_linkups; /* CEE link up count */
- u32 cee_linkdns; /* CEE link down count */
- u32 fip_linkups; /* FIP link up count */
- u32 fip_linkdns; /* FIP link up count */
- u32 fip_fails; /* FIP fail count */
- u32 mac_invalids; /* Invalid mac assigned */
+ u32 cee_linkups; /* CEE link up count */
+ u32 cee_linkdns; /* CEE link down count */
+ u32 fip_linkups; /* FIP link up count */
+ u32 fip_linkdns; /* FIP link up count */
+ u32 fip_fails; /* FIP fail count */
+ u32 mac_invalids; /* Invalid mac assigned */
};
/*
* IOC firmware FCoE port stats
*/
struct bfa_fw_fcoe_port_stats_s {
- struct bfa_fw_fcoe_stats_s fcoe_stats;
- struct bfa_fw_fip_stats_s fip_stats;
+ struct bfa_fw_fcoe_stats_s fcoe_stats;
+ struct bfa_fw_fip_stats_s fip_stats;
};
/*
@@ -335,8 +336,8 @@ struct bfa_fw_fc_uport_stats_s {
* IOC firmware FC port stats
*/
union bfa_fw_fc_port_stats_s {
- struct bfa_fw_fc_uport_stats_s fc_stats;
- struct bfa_fw_fcoe_port_stats_s fcoe_stats;
+ struct bfa_fw_fc_uport_stats_s fc_stats;
+ struct bfa_fw_fcoe_port_stats_s fcoe_stats;
};
/*
@@ -366,25 +367,25 @@ struct bfa_fw_lpsm_stats_s {
*/
struct bfa_fw_trunk_stats_s {
u32 emt_recvd; /* Trunk EMT received */
- u32 emt_accepted; /* Trunk EMT Accepted */
- u32 emt_rejected; /* Trunk EMT rejected */
+ u32 emt_accepted; /* Trunk EMT Accepted */
+ u32 emt_rejected; /* Trunk EMT rejected */
u32 etp_recvd; /* Trunk ETP received */
- u32 etp_accepted; /* Trunk ETP Accepted */
- u32 etp_rejected; /* Trunk ETP rejected */
+ u32 etp_accepted; /* Trunk ETP Accepted */
+ u32 etp_rejected; /* Trunk ETP rejected */
u32 lr_recvd; /* Trunk LR received */
- u32 rsvd; /* padding for 64 bit alignment */
+ u32 rsvd; /* padding for 64 bit alignment */
};
struct bfa_fw_advsm_stats_s {
u32 flogi_sent; /* Flogi sent */
u32 flogi_acc_recvd; /* Flogi Acc received */
u32 flogi_rjt_recvd; /* Flogi rejects received */
- u32 flogi_retries; /* Flogi retries */
+ u32 flogi_retries; /* Flogi retries */
u32 elp_recvd; /* ELP received */
- u32 elp_accepted; /* ELP Accepted */
- u32 elp_rejected; /* ELP rejected */
- u32 elp_dropped; /* ELP dropped */
+ u32 elp_accepted; /* ELP Accepted */
+ u32 elp_rejected; /* ELP rejected */
+ u32 elp_dropped; /* ELP dropped */
};
/*
@@ -521,7 +522,7 @@ struct bfa_qos_vc_attr_s {
u16 total_vc_count; /* Total VC Count */
u16 shared_credit;
u32 elp_opmode_flags;
- struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
+ struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC]; /* as many as
* total_vc_count */
};
@@ -531,16 +532,16 @@ struct bfa_qos_vc_attr_s {
struct bfa_qos_stats_s {
u32 flogi_sent; /* QoS Flogi sent */
u32 flogi_acc_recvd; /* QoS Flogi Acc received */
- u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
+ u32 flogi_rjt_recvd; /* QoS Flogi rejects received */
u32 flogi_retries; /* QoS Flogi retries */
u32 elp_recvd; /* QoS ELP received */
u32 elp_accepted; /* QoS ELP Accepted */
- u32 elp_rejected; /* QoS ELP rejected */
- u32 elp_dropped; /* QoS ELP dropped */
+ u32 elp_rejected; /* QoS ELP rejected */
+ u32 elp_dropped; /* QoS ELP dropped */
- u32 qos_rscn_recvd; /* QoS RSCN received */
- u32 rsvd; /* padding for 64 bit alignment */
+ u32 qos_rscn_recvd; /* QoS RSCN received */
+ u32 rsvd; /* padding for 64 bit alignment */
};
/*
@@ -548,9 +549,9 @@ struct bfa_qos_stats_s {
*/
struct bfa_fcoe_stats_s {
u64 secs_reset; /* Seconds since stats reset */
- u64 cee_linkups; /* CEE link up */
+ u64 cee_linkups; /* CEE link up */
u64 cee_linkdns; /* CEE link down */
- u64 fip_linkups; /* FIP link up */
+ u64 fip_linkups; /* FIP link up */
u64 fip_linkdns; /* FIP link down */
u64 fip_fails; /* FIP failures */
u64 mac_invalids; /* Invalid mac assignments */
@@ -560,38 +561,38 @@ struct bfa_fcoe_stats_s {
u64 vlan_timeouts; /* Vlan request timeouts */
u64 vlan_invalids; /* Vlan invalids */
u64 disc_req; /* Discovery requests */
- u64 disc_rsp; /* Discovery responses */
+ u64 disc_rsp; /* Discovery responses */
u64 disc_err; /* Discovery error frames */
u64 disc_unsol; /* Discovery unsolicited */
u64 disc_timeouts; /* Discovery timeouts */
u64 disc_fcf_unavail; /* Discovery FCF not avail */
- u64 linksvc_unsupp; /* FIP link service req unsupp. */
- u64 linksvc_err; /* FIP link service req errors */
+ u64 linksvc_unsupp; /* FIP link service req unsupp */
+ u64 linksvc_err; /* FIP link service req errors */
u64 logo_req; /* FIP logos received */
- u64 clrvlink_req; /* Clear virtual link requests */
+ u64 clrvlink_req; /* Clear virtual link requests */
u64 op_unsupp; /* FIP operation unsupp. */
- u64 untagged; /* FIP untagged frames */
+ u64 untagged; /* FIP untagged frames */
u64 txf_ucast; /* Tx FCoE unicast frames */
- u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
+ u64 txf_ucast_vlan; /* Tx FCoE unicast vlan frames */
u64 txf_ucast_octets; /* Tx FCoE unicast octets */
u64 txf_mcast; /* Tx FCoE multicast frames */
- u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
+ u64 txf_mcast_vlan; /* Tx FCoE multicast vlan frames */
u64 txf_mcast_octets; /* Tx FCoE multicast octets */
u64 txf_bcast; /* Tx FCoE broadcast frames */
- u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
+ u64 txf_bcast_vlan; /* Tx FCoE broadcast vlan frames */
u64 txf_bcast_octets; /* Tx FCoE broadcast octets */
- u64 txf_timeout; /* Tx timeouts */
+ u64 txf_timeout; /* Tx timeouts */
u64 txf_parity_errors; /* Transmit parity err */
- u64 txf_fid_parity_errors; /* Transmit FID parity err */
+ u64 txf_fid_parity_errors; /* Transmit FID parity err */
u64 rxf_ucast_octets; /* Rx FCoE unicast octets */
u64 rxf_ucast; /* Rx FCoE unicast frames */
- u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
+ u64 rxf_ucast_vlan; /* Rx FCoE unicast vlan frames */
u64 rxf_mcast_octets; /* Rx FCoE multicast octets */
u64 rxf_mcast; /* Rx FCoE multicast frames */
- u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
+ u64 rxf_mcast_vlan; /* Rx FCoE multicast vlan frames */
u64 rxf_bcast_octets; /* Rx FCoE broadcast octets */
u64 rxf_bcast; /* Rx FCoE broadcast frames */
- u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
+ u64 rxf_bcast_vlan; /* Rx FCoE broadcast vlan frames */
};
/*
@@ -672,12 +673,7 @@ struct bfa_itnim_iostats_s {
u32 tm_iocdowns; /* TM cleaned-up due to IOC down */
u32 tm_cleanups; /* TM cleanup requests */
u32 tm_cleanup_comps; /* TM cleanup completions */
- u32 lm_lun_across_sg; /* LM lun is across sg data buf */
- u32 lm_lun_not_sup; /* LM lun not supported */
- u32 lm_rpl_data_changed; /* LM report-lun data changed */
- u32 lm_wire_residue_changed; /* LM report-lun rsp residue changed */
- u32 lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
- u32 lm_lun_not_rdy; /* LM lun not ready */
+ u32 rsvd[6];
};
/* Modify char* port_stt[] in bfal_port.c if a new state was added */
@@ -852,12 +848,12 @@ struct bfa_port_cfg_s {
u8 tx_bbcredit; /* transmit buffer credits */
u8 ratelimit; /* ratelimit enabled or not */
u8 trl_def_speed; /* ratelimit default speed */
- u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
- u8 bb_scn_state; /* Config state of BB_SCN */
- u8 faa_state; /* FAA enabled/disabled */
- u8 rsvd[1];
- u16 path_tov; /* device path timeout */
- u16 q_depth; /* SCSI Queue depth */
+ u8 bb_scn; /* BB_SCN value from FLOGI Exchg */
+ u8 bb_scn_state; /* Config state of BB_SCN */
+ u8 faa_state; /* FAA enabled/disabled */
+ u8 rsvd[1];
+ u16 path_tov; /* device path timeout */
+ u16 q_depth; /* SCSI Queue depth */
};
#pragma pack()
@@ -868,20 +864,21 @@ struct bfa_port_attr_s {
/*
* Static fields
*/
- wwn_t nwwn; /* node wwn */
- wwn_t pwwn; /* port wwn */
- wwn_t factorynwwn; /* factory node wwn */
- wwn_t factorypwwn; /* factory port wwn */
- enum fc_cos cos_supported; /* supported class of services */
- u32 rsvd;
+ wwn_t nwwn; /* node wwn */
+ wwn_t pwwn; /* port wwn */
+ wwn_t factorynwwn; /* factory node wwn */
+ wwn_t factorypwwn; /* factory port wwn */
+ enum fc_cos cos_supported; /* supported class of
+ * services */
+ u32 rsvd;
struct fc_symname_s port_symname; /* port symbolic name */
- enum bfa_port_speed speed_supported; /* supported speeds */
- bfa_boolean_t pbind_enabled;
+ enum bfa_port_speed speed_supported; /* supported speeds */
+ bfa_boolean_t pbind_enabled;
/*
* Configured values
*/
- struct bfa_port_cfg_s pport_cfg; /* pport cfg */
+ struct bfa_port_cfg_s pport_cfg; /* pport cfg */
/*
* Dynamic field - info from BFA
@@ -890,19 +887,20 @@ struct bfa_port_attr_s {
enum bfa_port_speed speed; /* current speed */
enum bfa_port_topology topology; /* current topology */
bfa_boolean_t beacon; /* current beacon status */
- bfa_boolean_t link_e2e_beacon; /* link beacon is on */
- bfa_boolean_t bbsc_op_status; /* fc credit recovery oper state */
+ bfa_boolean_t link_e2e_beacon; /* link beacon is on */
+ bfa_boolean_t bbsc_op_status; /* fc credit recovery oper
+ * state */
/*
* Dynamic field - info from FCS
*/
- u32 pid; /* port ID */
+ u32 pid; /* port ID */
enum bfa_port_type port_type; /* current topology */
- u32 loopback; /* external loopback */
- u32 authfail; /* auth fail state */
+ u32 loopback; /* external loopback */
+ u32 authfail; /* auth fail state */
/* FCoE specific */
- u16 fcoe_vlan;
+ u16 fcoe_vlan;
u8 rsvd1[2];
};
@@ -910,48 +908,48 @@ struct bfa_port_attr_s {
* Port FCP mappings.
*/
struct bfa_port_fcpmap_s {
- char osdevname[256];
+ char osdevname[256];
u32 bus;
u32 target;
u32 oslun;
u32 fcid;
- wwn_t nwwn;
- wwn_t pwwn;
+ wwn_t nwwn;
+ wwn_t pwwn;
u64 fcplun;
- char luid[256];
+ char luid[256];
};
/*
* Port RNID info.
*/
struct bfa_port_rnid_s {
- wwn_t wwn;
+ wwn_t wwn;
u32 unittype;
u32 portid;
u32 attached_nodes_num;
u16 ip_version;
u16 udp_port;
- u8 ipaddr[16];
+ u8 ipaddr[16];
u16 rsvd;
u16 topologydiscoveryflags;
};
#pragma pack(1)
struct bfa_fcport_fcf_s {
- wwn_t name; /* FCF name */
- wwn_t fabric_name; /* Fabric Name */
- u8 fipenabled; /* FIP enabled or not */
- u8 fipfailed; /* FIP failed or not */
- u8 resv[2];
- u8 pri; /* FCF priority */
- u8 version; /* FIP version used */
- u8 available; /* Available for login */
- u8 fka_disabled; /* FKA is disabled */
- u8 maxsz_verified; /* FCoE max size verified */
- u8 fc_map[3]; /* FC map */
- __be16 vlan; /* FCoE vlan tag/priority */
- u32 fka_adv_per; /* FIP ka advert. period */
- mac_t mac; /* FCF mac */
+ wwn_t name; /* FCF name */
+ wwn_t fabric_name; /* Fabric Name */
+ u8 fipenabled; /* FIP enabled or not */
+ u8 fipfailed; /* FIP failed or not */
+ u8 resv[2];
+ u8 pri; /* FCF priority */
+ u8 version; /* FIP version used */
+ u8 available; /* Available for login */
+ u8 fka_disabled; /* FKA is disabled */
+ u8 maxsz_verified; /* FCoE max size verified */
+ u8 fc_map[3]; /* FC map */
+ __be16 vlan; /* FCoE vlan tag/priority */
+ u32 fka_adv_per; /* FIP ka advert. period */
+ mac_t mac; /* FCF mac */
};
/*
@@ -981,7 +979,7 @@ struct bfa_port_link_s {
u8 linkstate_rsn; /* bfa_port_linkstate_rsn_t */
u8 topology; /* P2P/LOOP bfa_port_topology */
u8 speed; /* Link speed (1/2/4/8 G) */
- u32 linkstate_opt; /* Linkstate optional data (debug) */
+ u32 linkstate_opt; /* Linkstate optional data (debug) */
u8 trunked; /* Trunked or not (1 or 0) */
u8 resvd[3];
struct bfa_qos_attr_s qos_attr; /* QoS Attributes */
@@ -1035,7 +1033,7 @@ struct bfa_rport_hal_stats_s {
u32 sm_fwc_del; /* fw create: delete events */
u32 sm_fwc_off; /* fw create: offline events */
u32 sm_fwc_hwf; /* fw create: IOC down */
- u32 sm_fwc_unexp; /* fw create: exception events*/
+ u32 sm_fwc_unexp; /* fw create: exception events*/
u32 sm_on_off; /* online: offline events */
u32 sm_on_del; /* online: delete events */
u32 sm_on_hwf; /* online: IOC down events */
@@ -1043,25 +1041,25 @@ struct bfa_rport_hal_stats_s {
u32 sm_fwd_rsp; /* fw delete: fw responses */
u32 sm_fwd_del; /* fw delete: delete events */
u32 sm_fwd_hwf; /* fw delete: IOC down events */
- u32 sm_fwd_unexp; /* fw delete: exception events*/
+ u32 sm_fwd_unexp; /* fw delete: exception events*/
u32 sm_off_del; /* offline: delete events */
u32 sm_off_on; /* offline: online events */
u32 sm_off_hwf; /* offline: IOC down events */
- u32 sm_off_unexp; /* offline: exception events */
- u32 sm_del_fwrsp; /* delete: fw responses */
+ u32 sm_off_unexp; /* offline: exception events */
+ u32 sm_del_fwrsp; /* delete: fw responses */
u32 sm_del_hwf; /* delete: IOC down events */
- u32 sm_del_unexp; /* delete: exception events */
- u32 sm_delp_fwrsp; /* delete pend: fw responses */
+ u32 sm_del_unexp; /* delete: exception events */
+ u32 sm_delp_fwrsp; /* delete pend: fw responses */
u32 sm_delp_hwf; /* delete pend: IOC downs */
- u32 sm_delp_unexp; /* delete pend: exceptions */
- u32 sm_offp_fwrsp; /* off-pending: fw responses */
+ u32 sm_delp_unexp; /* delete pend: exceptions */
+ u32 sm_offp_fwrsp; /* off-pending: fw responses */
u32 sm_offp_del; /* off-pending: deletes */
u32 sm_offp_hwf; /* off-pending: IOC downs */
- u32 sm_offp_unexp; /* off-pending: exceptions */
+ u32 sm_offp_unexp; /* off-pending: exceptions */
u32 sm_iocd_off; /* IOC down: offline events */
u32 sm_iocd_del; /* IOC down: delete events */
u32 sm_iocd_on; /* IOC down: online events */
- u32 sm_iocd_unexp; /* IOC down: exceptions */
+ u32 sm_iocd_unexp; /* IOC down: exceptions */
u32 rsvd;
};
#pragma pack(1)
@@ -1069,9 +1067,9 @@ struct bfa_rport_hal_stats_s {
* Rport's QoS attributes
*/
struct bfa_rport_qos_attr_s {
- u8 qos_priority; /* rport's QoS priority */
- u8 rsvd[3];
- u32 qos_flow_id; /* QoS flow Id */
+ u8 qos_priority; /* rport's QoS priority */
+ u8 rsvd[3];
+ u32 qos_flow_id; /* QoS flow Id */
};
#pragma pack()
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 50b6a1c..8d0b88f 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
-#define SCSI_SENSE_CUR_ERR 0x70
-#define SCSI_SENSE_DEF_ERR 0x71
-
-/*
- * SCSI additional sense codes
- */
-#define SCSI_ASC_LUN_NOT_READY 0x04
-#define SCSI_ASC_LUN_NOT_SUPPORTED 0x25
-#define SCSI_ASC_TOCC 0x3F
-
-/*
- * SCSI additional sense code qualifiers
- */
-#define SCSI_ASCQ_MAN_INTR_REQ 0x03 /* manual intervention req */
-#define SCSI_ASCQ_RL_DATA_CHANGED 0x0E /* report luns data changed */
-
-/*
- * Methods of reporting informational exceptions
- */
-#define SCSI_MP_IEC_UNIT_ATTN 0x2 /* generate unit attention */
-
-struct scsi_report_luns_data_s {
- u32 lun_list_length; /* length of LUN list length */
- u32 reserved;
- struct scsi_lun lun[1]; /* first LUN in lun list */
-};
-
-struct scsi_inquiry_vendor_s {
- u8 vendor_id[8];
-};
-
-struct scsi_inquiry_prodid_s {
- u8 product_id[16];
-};
-
-struct scsi_inquiry_prodrev_s {
- u8 product_rev[4];
-};
-
-struct scsi_inquiry_data_s {
-#ifdef __BIG_ENDIAN
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type:5; /* peripheral device type */
- u8 rmb:1; /* removable medium bit */
- u8 device_type_mod:7; /* device type modifier */
- u8 version;
- u8 aenc:1; /* async evt notification capability */
- u8 trm_iop:1; /* terminate I/O process */
- u8 norm_aca:1; /* normal ACA supported */
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 rsp_data_format:4;
- u8 additional_len;
- u8 sccs:1;
- u8 reserved1:7;
- u8 reserved2:1;
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved3:1;
- u8 multi_port:1; /* multi-port device */
- u8 m_chngr:1; /* device in medium transport element */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 addr16:1; /* SIP specific bit */
- u8 rel_adr:1; /* relative address */
- u8 w_bus32:1;
- u8 w_bus16:1;
- u8 synchronous:1;
- u8 linked_commands:1;
- u8 trans_dis:1;
- u8 cmd_queue:1; /* command queueing supported */
- u8 soft_reset:1; /* soft reset alternative (VS) */
-#else
- u8 device_type:5; /* peripheral device type */
- u8 peripheral_qual:3; /* peripheral qualifier */
- u8 device_type_mod:7; /* device type modifier */
- u8 rmb:1; /* removable medium bit */
- u8 version;
- u8 rsp_data_format:4;
- u8 hi_support:1; /* SCSI-3: supports REPORT LUNS */
- u8 norm_aca:1; /* normal ACA supported */
- u8 terminate_iop:1;/* terminate I/O process */
- u8 aenc:1; /* async evt notification capability */
- u8 additional_len;
- u8 reserved1:7;
- u8 sccs:1;
- u8 addr16:1; /* SIP specific bit */
- u8 addr32:1; /* SIP specific bit */
- u8 ack_req_q:1; /* SIP specific bit */
- u8 m_chngr:1; /* device in medium transport element */
- u8 multi_port:1; /* multi-port device */
- u8 reserved3:1; /* TBD - Vendor Specific */
- u8 enc_serv:1; /* enclosure service component */
- u8 reserved2:1;
- u8 soft_seset:1; /* soft reset alternative (VS) */
- u8 cmd_queue:1; /* command queueing supported */
- u8 trans_dis:1;
- u8 linked_commands:1;
- u8 synchronous:1;
- u8 w_bus16:1;
- u8 w_bus32:1;
- u8 rel_adr:1; /* relative address */
-#endif
- struct scsi_inquiry_vendor_s vendor_id;
- struct scsi_inquiry_prodid_s product_id;
- struct scsi_inquiry_prodrev_s product_rev;
- u8 vendor_specific[20];
- u8 reserved4[40];
-};
-
-/*
- * SCSI sense data format
- */
-struct scsi_sense_s {
-#ifdef __BIG_ENDIAN
- u8 valid:1;
- u8 rsp_code:7;
-#else
- u8 rsp_code:7;
- u8 valid:1;
-#endif
- u8 seg_num;
-#ifdef __BIG_ENDIAN
- u8 file_mark:1;
- u8 eom:1; /* end of media */
- u8 ili:1; /* incorrect length indicator */
- u8 reserved:1;
- u8 sense_key:4;
-#else
- u8 sense_key:4;
- u8 reserved:1;
- u8 ili:1; /* incorrect length indicator */
- u8 eom:1; /* end of media */
- u8 file_mark:1;
-#endif
- u8 information[4]; /* device-type or cmd specific info */
- u8 add_sense_length; /* additional sense length */
- u8 command_info[4];/* command specific information */
- u8 asc; /* additional sense code */
- u8 ascq; /* additional sense code qualifier */
- u8 fru_code; /* field replaceable unit code */
-#ifdef __BIG_ENDIAN
- u8 sksv:1; /* sense key specific valid */
- u8 c_d:1; /* command/data bit */
- u8 res1:2;
- u8 bpv:1; /* bit pointer valid */
- u8 bpointer:3; /* bit pointer */
-#else
- u8 bpointer:3; /* bit pointer */
- u8 bpv:1; /* bit pointer valid */
- u8 res1:2;
- u8 c_d:1; /* command/data bit */
- u8 sksv:1; /* sense key specific valid */
-#endif
- u8 fpointer[2]; /* field pointer */
-};
-
/*
* Fibre Channel Header Structure (FCHS) definition
*/
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index e07bd47..f0f80e2 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
* BFA ITNIM Related definitions
*/
static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
-static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
static void bfa_ioim_lm_init(struct bfa_s *bfa);
#define BFA_ITNIM_FROM_TAG(_fcpim, _tag) \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-#define bfa_ioim_rp_wwn(__ioim) \
- (((struct bfa_fcs_rport_s *) \
- (__ioim)->itnim->rport->rport_drv)->pwwn)
-
-#define bfa_ioim_lp_wwn(__ioim) \
- ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa), \
- (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn) \
-
#define bfa_itnim_sler_cb(__itnim) do { \
if ((__itnim)->bfa->fcs) \
bfa_cb_itnim_sler((__itnim)->ditn); \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
} \
} while (0)
-enum bfa_ioim_lm_status {
- BFA_IOIM_LM_PRESENT = 1,
- BFA_IOIM_LM_LUN_NOT_SUP = 2,
- BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
- BFA_IOIM_LM_LUN_NOT_RDY = 4,
-};
-
enum bfa_ioim_lm_ua_status {
BFA_IOIM_LM_UA_RESET = 0,
BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
BFA_IOIM_SM_TMDONE = 16, /* IO cleanup from tskim */
BFA_IOIM_SM_HWFAIL = 17, /* IOC h/w failure event */
BFA_IOIM_SM_IOTOV = 18, /* ITN offline TOV */
- BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/* lunmask lun not supported */
- BFA_IOIM_SM_LM_RPL_DC = 20, /* lunmask report-lun data changed */
- BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/* lunmask lun not ready */
};
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
static bfa_boolean_t bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
/*
* forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
- bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
- bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
- bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
}
bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
__bfa_cb_ioim_abort, ioim);
break;
- case BFA_IOIM_SM_LM_LUN_NOT_SUP:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_sup, ioim);
- break;
-
- case BFA_IOIM_SM_LM_RPL_DC:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_rpl_dc, ioim);
- break;
-
- case BFA_IOIM_SM_LM_LUN_NOT_RDY:
- bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
- bfa_ioim_move_to_comp_q(ioim);
- bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
- __bfa_cb_ioim_lm_lun_not_rdy, ioim);
- break;
-
default:
bfa_sm_fault(ioim->bfa, event);
}
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
}
}
-/*
- * Validate LUN for LUN masking
- */
-static enum bfa_ioim_lm_status
-bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
- struct bfa_rport_s *rp, struct scsi_lun lun)
-{
- u8 i;
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
-
- if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
-
- if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
- scsilun_to_int((struct scsi_lun *)&lun))
- && (rp->rport_tag == lun_list[i].rp_tag)
- && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
- lun_list[i].lp_tag)) {
- bfa_trc(ioim->bfa, lun_list[i].rp_tag);
- bfa_trc(ioim->bfa, lun_list[i].lp_tag);
- bfa_trc(ioim->bfa, scsilun_to_int(
- (struct scsi_lun *)&lun_list[i].lun));
-
- if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
- ((cdb->scsi_cdb[0] != INQUIRY) ||
- (cdb->scsi_cdb[0] != REPORT_LUNS))) {
- lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
- return BFA_IOIM_LM_RPL_DATA_CHANGED;
- }
-
- if (cdb->scsi_cdb[0] == REPORT_LUNS)
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-
- return BFA_IOIM_LM_PRESENT;
- }
- }
-
- if ((cdb->scsi_cdb[0] == INQUIRY) &&
- (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
- ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
- return BFA_IOIM_LM_PRESENT;
- }
-
- if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
- return BFA_IOIM_LM_LUN_NOT_RDY;
-
- return BFA_IOIM_LM_LUN_NOT_SUP;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
-{
- return BFA_TRUE;
-}
-
-static void
-bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
- int buf_lun_cnt)
-{
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
- struct scsi_lun lun;
- int i, j;
-
- bfa_trc(ioim->bfa, buf_lun_cnt);
- for (j = 0; j < buf_lun_cnt; j++) {
- lun = *((struct scsi_lun *)(lun_data + j));
- for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
- continue;
- if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
- (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
- (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
- == scsilun_to_int((struct scsi_lun *)&lun))) {
- lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
- break;
- }
- } /* next lun in mask DB */
- } /* next lun in buf */
-}
-
-static int
-bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
- struct scsi_report_luns_data_s *rl)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
- struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
- int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
- int lun_across_sg_bytes, bytes_from_next_buf;
- u64 last_lun, temp_last_lun;
-
- /* fetch luns from the first sg element */
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
- (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
-
- /* fetch luns from multiple sg elements */
- scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
- if (sgeid == 0) {
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- continue;
- }
-
- /* if the buf is having more data */
- lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
- if (lun_across_sg_bytes) {
- bfa_trc(ioim->bfa, lun_across_sg_bytes);
- bfa_stats(ioim->itnim, lm_lun_across_sg);
- bytes_from_next_buf = sizeof(struct scsi_lun) -
- lun_across_sg_bytes;
-
- /* from next buf take higher bytes */
- temp_last_lun = *((u64 *)
- phys_to_virt(sg_dma_address(sg)));
- last_lun |= temp_last_lun >>
- (lun_across_sg_bytes * BITS_PER_BYTE);
-
- /* from prev buf take higher bytes */
- temp_last_lun = *((u64 *)(prev_rl_data +
- (prev_sg_len - lun_across_sg_bytes)));
- temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
- last_lun = last_lun | (temp_last_lun <<
- (bytes_from_next_buf * BITS_PER_BYTE));
-
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
- } else
- bytes_from_next_buf = 0;
-
- *pgdlen += sg_dma_len(sg);
- prev_sg_len = sg_dma_len(sg);
- prev_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
- bytes_from_next_buf,
- sg_dma_len(sg) / sizeof(struct scsi_lun));
- }
-
- /* update the report luns data - based on fetched luns */
- sg = scsi_sglist(cmnd);
- base_rl_data = (struct scsi_lun *)rl->lun;
- base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
- for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
- if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
- base_rl_data[j] = lun_list[i].lun;
- lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
- j++;
- lun_fetched_cnt++;
- }
-
- if (j > base_count) {
- j = 0;
- sg = sg_next(sg);
- base_rl_data = (struct scsi_lun *)
- phys_to_virt(sg_dma_address(sg));
- base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
- }
- }
-
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- return lun_fetched_cnt;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_inquiry_data_s *inq;
- struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
-
- bfa_trc(ioim->bfa, inq->device_type);
- inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
- return 0;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
-{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct scatterlist *sg = scsi_sglist(cmnd);
- struct bfi_ioim_rsp_s *m;
- struct scsi_report_luns_data_s *rl = NULL;
- int lun_count = 0, lun_fetched_cnt = 0;
- u32 residue, pgdlen = 0;
-
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
- if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
- return BFA_TRUE;
-
- m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
- if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
- return BFA_TRUE;
-
- pgdlen = sg_dma_len(sg);
- bfa_trc(ioim->bfa, pgdlen);
- rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
- lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
- lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
-
- if (lun_count == lun_fetched_cnt)
- return BFA_TRUE;
-
- bfa_trc(ioim->bfa, lun_count);
- bfa_trc(ioim->bfa, lun_fetched_cnt);
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-
- if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
- rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
- sizeof(struct scsi_lun);
- else
- bfa_stats(ioim->itnim, lm_small_buf_addresidue);
-
- bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
- bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
-
- residue = be32_to_cpu(m->residue);
- residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
- bfa_stats(ioim->itnim, lm_wire_residue_changed);
- m->residue = be32_to_cpu(residue);
- bfa_trc(ioim->bfa, ioim->nsges);
- return BFA_FALSE;
-}
-
static void
__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
{
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
m->scsi_status, sns_len, snsinfo, residue);
}
-static void
-__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
- snsinfo->sense_key = ILLEGAL_REQUEST;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
- ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
- snsinfo->asc = SCSI_ASC_TOCC;
- snsinfo->add_sense_length = 0x6;
- snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
-{
- struct bfa_ioim_s *ioim = cbarg;
- int sns_len = 0xD;
- u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
- struct scsi_sense_s *snsinfo;
-
- if (!complete) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
- return;
- }
-
- snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
- ioim->fcpim->fcp, ioim->iotag);
- snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
- snsinfo->add_sense_length = 0xa;
- snsinfo->sense_key = NOT_READY;
- snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
- snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
- bfa_trc(ioim->bfa, residue);
- bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
- SCSI_STATUS_CHECK_CONDITION, sns_len,
- (u8 *)snsinfo, residue);
-}
-
void
bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
if (port) {
*pwwn = port->port_cfg.pwwn;
rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
- rp = rp_fcs->bfa_rport;
+ if (rp_fcs)
+ rp = rp_fcs->bfa_rport;
}
}
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
0, 0, NULL, 0);
}
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
0, 0, NULL, 0);
}
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
return;
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
}
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
ioim->bfa = fcpim->bfa;
ioim->fcpim = fcpim;
ioim->iosp = iosp;
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
INIT_LIST_HEAD(&ioim->sgpg_q);
bfa_reqq_winit(&ioim->iosp->reqq_wait,
bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
evt = BFA_IOIM_SM_DONE;
else
evt = BFA_IOIM_SM_COMP;
- ioim->proc_rsp_data(ioim);
break;
case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
if (rsp->abort_tag != ioim->abort_tag) {
bfa_trc(ioim->bfa, rsp->abort_tag);
bfa_trc(ioim->bfa, ioim->abort_tag);
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
return;
}
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
WARN_ON(1);
}
- ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
bfa_sm_send_event(ioim, evt);
}
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
bfa_ioim_cb_profile_comp(fcpim, ioim);
- if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- return;
- }
-
- if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
- else
- bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
+ bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
}
/*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
void
bfa_ioim_start(struct bfa_ioim_s *ioim)
{
- struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
- struct bfa_lps_s *lps;
- enum bfa_ioim_lm_status status;
- struct scsi_lun scsilun;
-
- if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
- lps = BFA_IOIM_TO_LPS(ioim);
- int_to_scsilun(cmnd->device->lun, &scsilun);
- status = bfa_ioim_lm_check(ioim, lps,
- ioim->itnim->rport, scsilun);
- if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
- bfa_stats(ioim->itnim, lm_lun_not_rdy);
- return;
- }
-
- if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
- bfa_stats(ioim->itnim, lm_lun_not_sup);
- return;
- }
-
- if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
- bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
- bfa_stats(ioim->itnim, lm_rpl_data_changed);
- return;
- }
- }
-
bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
/*
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 1080bcb..36f26da 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
struct bfad_tskim_s;
typedef void (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
-typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
struct bfa_fcpim_s {
struct bfa_s *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
u32 path_tov;
u16 q_depth;
u8 reqq; /* Request queue to be used */
- u8 lun_masking_pending;
struct list_head itnim_q; /* queue of active itnim */
struct list_head ioim_resfree_q; /* IOs waiting for f/w */
struct list_head ioim_comp_q; /* IO global comp Q */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
u8 reqq; /* Request queue for I/O */
u8 mode; /* IO is passthrough or not */
u64 start_time; /* IO's Profile start val */
- bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
};
struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
(__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET; \
} while (0)
-#define BFA_IOIM_TO_LPS(__ioim) \
- BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa), \
- __ioim->itnim->rport->rport_info.lp_tag)
-
static inline bfa_boolean_t
bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
{
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 1ac5aec..eca7ab7 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -3727,11 +3727,11 @@ bfa_sfp_media_get(struct bfa_sfp_s *sfp)
(xmtr_tech & SFP_XMTR_TECH_SA))
*media = BFA_SFP_MEDIA_SW;
/* Check 10G Ethernet Compilance code */
- else if (e10g.b & 0x10)
+ else if (e10g.r.e10g_sr)
*media = BFA_SFP_MEDIA_SW;
- else if (e10g.b & 0x60)
+ else if (e10g.r.e10g_lrm && e10g.r.e10g_lr)
*media = BFA_SFP_MEDIA_LW;
- else if (e10g.r.e10g_unall & 0x80)
+ else if (e10g.r.e10g_unall)
*media = BFA_SFP_MEDIA_UNKNOWN;
else
bfa_trc(sfp, 0);
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 95adb86..b52cbb6 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
#define BFA_LP_TAG_INVALID 0xff
void bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
void bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
-bfa_boolean_t bfa_rport_lunmask_active(struct bfa_rport_s *rp);
-wwn_t bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
-struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
- wwn_t *lpwwn, wwn_t rpwwn);
-void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
/*
* bfa fcxp API functions
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 66fb725..404fd10 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
spin_lock_irqsave(&bfad->bfad_lock, flags);
bfa_fcs_vport_start(&vport->fcs_vport);
+ list_add_tail(&vport->list_entry, &bfad->vport_list);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
bfad->ref_count = 0;
bfad->pport.bfad = bfad;
INIT_LIST_HEAD(&bfad->pbc_vport_list);
+ INIT_LIST_HEAD(&bfad->vport_list);
/* Setup the debugfs node for this bfad */
if (bfa_debugfs_enable)
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9d95844..1938fe0 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
free_scsi_host:
bfad_scsi_host_free(bfad, im_port);
-
+ list_del(&vport->list_entry);
kfree(vport);
return 0;
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index 06fc00c..530de2b 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -2394,6 +2394,21 @@ out:
return 0;
}
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+ struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+ struct bfad_vport_s *vport = NULL;
+
+ /* Set the scsi device LUN SCAN flags for base port */
+ bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+ /* Set the scsi device LUN SCAN flags for the vports */
+ list_for_each_entry(vport, &bfad->vport_list, list_entry)
+ bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
int
bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
{
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
unsigned long flags;
spin_lock_irqsave(&bfad->bfad_lock, flags);
- if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+ if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+ /* Set the LUN Scanning mode to be Sequential scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
- else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+ /* Set the LUN Scanning mode to default REPORT_LUNS scan */
+ if (iocmd->status == BFA_STATUS_OK)
+ bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+ } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
spin_unlock_irqrestore(&bfad->bfad_lock, flags);
return 0;
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index dee1a09..439c012 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -472,7 +472,7 @@ static const struct file_operations bfad_debugfs_op_regwr = {
struct bfad_debugfs_entry {
const char *name;
- mode_t mode;
+ umode_t mode;
const struct file_operations *fops;
};
@@ -557,8 +557,7 @@ bfad_debugfs_exit(struct bfad_port_s *port)
}
}
- /*
- * Remove the pci_dev debugfs directory for the port */
+ /* Remove the pci_dev debugfs directory for the port */
if (port->port_debugfs_root) {
debugfs_remove(port->port_debugfs_root);
port->port_debugfs_root = NULL;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 5e19a5f..dc5b9d9 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -43,6 +43,7 @@
#include <scsi/scsi_transport_fc.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
#include "bfa_modules.h"
#include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
struct list_head active_aen_q;
struct bfa_aen_entry_s aen_list[BFA_AEN_MAX_ENTRY];
spinlock_t bfad_aen_spinlock;
+ struct list_head vport_list;
};
/* BFAD state machine events */
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index e5db649..3153923 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -918,16 +918,70 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
}
/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+ struct fc_rport *rport)
+{
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+ struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+ struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+ int i = 0, ret = -ENXIO;
+
+ for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+ if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+ scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+ lun_list[i].rp_tag == bfa_rport->rport_tag &&
+ lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+ ret = BFA_STATUS_OK;
+ break;
+ }
+ }
+ return ret;
+}
+
+/*
* Scsi_Host template entry slave_alloc
*/
static int
bfad_im_slave_alloc(struct scsi_device *sdev)
{
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+ struct bfad_itnim_data_s *itnim_data =
+ (struct bfad_itnim_data_s *) rport->dd_data;
+ struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
if (!rport || fc_remote_port_chkready(rport))
return -ENXIO;
+ if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+ /*
+ * We should not mask LUN 0 - since this will translate
+ * to no LUN / TARGET for SCSI ml resulting no scan.
+ */
+ if (sdev->lun == 0) {
+ sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+ BLIST_SPARSELUN;
+ goto done;
+ }
+
+ /*
+ * Query LUN Mask configuration - to expose this LUN
+ * to the SCSI mid-layer or to mask it.
+ */
+ if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+ BFA_STATUS_OK)
+ return -ENXIO;
+ }
+done:
sdev->hostdata = rport->dd_data;
return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
&& (fc_rport->scsi_target_id < MAX_FCP_TARGET))
itnim->scsi_tgt_id = fc_rport->scsi_target_id;
+ itnim->channel = fc_rport->channel;
+
return;
}
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 004b6cf..0814367 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
struct fc_rport *fc_rport;
struct bfa_itnim_s *bfa_itnim;
u16 scsi_tgt_id;
+ u16 channel;
u16 queue_work;
unsigned long last_ramp_up_time;
unsigned long last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
int bfad_im_bsg_request(struct fc_bsg_job *job);
int bfad_im_bsg_timeout(struct fc_bsg_job *job);
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do { \
+ struct scsi_device *__sdev = NULL; \
+ struct bfad_itnim_s *__itnim = NULL; \
+ u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN; \
+ list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
+ list_entry) { \
+ __sdev = scsi_device_lookup((__im_port)->shost, \
+ __itnim->channel, \
+ __itnim->scsi_tgt_id, 0); \
+ if (__sdev) { \
+ if ((__lunmask_cfg) == BFA_TRUE) \
+ __sdev->sdev_bflags |= scan_flags; \
+ else \
+ __sdev->sdev_bflags &= ~scan_flags; \
+ scsi_device_put(__sdev); \
+ } \
+ } \
+} while (0)
+
#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index d1e6971..1a44b45 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2177,7 +2177,7 @@ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
return 0;
}
-static mode_t bnx2i_attr_is_visible(int param_type, int param)
+static umode_t bnx2i_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
index 000294a..36739da 100644
--- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
+++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c
@@ -966,7 +966,7 @@ static int init_act_open(struct cxgbi_sock *csk)
csk->saddr.sin_addr.s_addr = chba->ipv4addr;
csk->rss_qid = 0;
- csk->l2t = t3_l2t_get(t3dev, dst_get_neighbour(dst), ndev);
+ csk->l2t = t3_l2t_get(t3dev, dst, ndev);
if (!csk->l2t) {
pr_err("NO l2t available.\n");
return -EINVAL;
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index ac7a9b1..5a4a3bf 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1127,6 +1127,7 @@ static int init_act_open(struct cxgbi_sock *csk)
struct net_device *ndev = cdev->ports[csk->port_id];
struct port_info *pi = netdev_priv(ndev);
struct sk_buff *skb = NULL;
+ struct neighbour *n;
unsigned int step;
log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
@@ -1141,7 +1142,12 @@ static int init_act_open(struct cxgbi_sock *csk)
cxgbi_sock_set_flag(csk, CTPF_HAS_ATID);
cxgbi_sock_get(csk);
- csk->l2t = cxgb4_l2t_get(lldi->l2t, dst_get_neighbour(csk->dst), ndev, 0);
+ n = dst_get_neighbour_noref(csk->dst);
+ if (!n) {
+ pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+ goto rel_resource;
+ }
+ csk->l2t = cxgb4_l2t_get(lldi->l2t, n, ndev, 0);
if (!csk->l2t) {
pr_err("%s, cannot alloc l2t.\n", ndev->name);
goto rel_resource;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index c10f74a..d3ff9cd 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -472,6 +472,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
struct net_device *ndev;
struct cxgbi_device *cdev;
struct rtable *rt = NULL;
+ struct neighbour *n;
struct flowi4 fl4;
struct cxgbi_sock *csk = NULL;
unsigned int mtu = 0;
@@ -493,7 +494,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
goto err_out;
}
dst = &rt->dst;
- ndev = dst_get_neighbour(dst)->dev;
+ n = dst_get_neighbour_noref(dst);
+ if (!n) {
+ err = -ENODEV;
+ goto rel_rt;
+ }
+ ndev = n->dev;
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
pr_info("multi-cast route %pI4, port %u, dev %s.\n",
@@ -507,7 +513,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
ndev = ip_dev_find(&init_net, daddr->sin_addr.s_addr);
mtu = ndev->mtu;
pr_info("rt dev %s, loopback -> %s, mtu %u.\n",
- dst_get_neighbour(dst)->dev->name, ndev->name, mtu);
+ n->dev->name, ndev->name, mtu);
}
cdev = cxgbi_device_find_by_netdev(ndev, &port);
@@ -1862,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
if (!tdata->skb) {
- pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
- cdev->skb_tx_rsvd, headroom, opcode);
+ struct cxgbi_sock *csk = cconn->cep->csk;
+ struct net_device *ndev = cdev->ports[csk->port_id];
+ ndev->stats.tx_dropped++;
return -ENOMEM;
}
@@ -2569,7 +2576,7 @@ void cxgbi_iscsi_cleanup(struct iscsi_transport *itp,
}
EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup);
-mode_t cxgbi_attr_is_visible(int param_type, int param)
+umode_t cxgbi_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 20c88279..80fa99b 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -709,7 +709,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *);
void cxgbi_cleanup_task(struct iscsi_task *task);
-mode_t cxgbi_attr_is_visible(int param_type, int param);
+umode_t cxgbi_attr_is_visible(int param_type, int param);
void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *);
int cxgbi_set_conn_param(struct iscsi_cls_conn *,
enum iscsi_param, char *, int);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index f5b718d..13aeca3 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -546,7 +546,7 @@ static struct ParameterData __devinitdata cfg_data[] = {
* command line overrides will be used. If set to 1 then safe and
* slow settings will be used.
*/
-static int use_safe_settings = 0;
+static bool use_safe_settings = 0;
module_param_named(safe, use_safe_settings, bool, 0);
MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index 23149b9..48e46f5 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -28,7 +28,6 @@
static DEFINE_SPINLOCK(list_lock);
static LIST_HEAD(scsi_dh_list);
-static int scsi_dh_list_idx = 1;
static struct scsi_device_handler *get_device_handler(const char *name)
{
@@ -45,21 +44,6 @@ static struct scsi_device_handler *get_device_handler(const char *name)
return found;
}
-static struct scsi_device_handler *get_device_handler_by_idx(int idx)
-{
- struct scsi_device_handler *tmp, *found = NULL;
-
- spin_lock(&list_lock);
- list_for_each_entry(tmp, &scsi_dh_list, list) {
- if (tmp->idx == idx) {
- found = tmp;
- break;
- }
- }
- spin_unlock(&list_lock);
- return found;
-}
-
/*
* device_handler_match_function - Match a device handler to a device
* @sdev - SCSI device to be tested
@@ -84,23 +68,6 @@ device_handler_match_function(struct scsi_device *sdev)
}
/*
- * device_handler_match_devlist - Match a device handler to a device
- * @sdev - SCSI device to be tested
- *
- * Tests @sdev against all device_handler registered in the devlist.
- * Returns the found device handler or NULL if not found.
- */
-static struct scsi_device_handler *
-device_handler_match_devlist(struct scsi_device *sdev)
-{
- int idx;
-
- idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model,
- SCSI_DEVINFO_DH);
- return get_device_handler_by_idx(idx);
-}
-
-/*
* device_handler_match - Attach a device handler to a device
* @scsi_dh - The device handler to match against or NULL
* @sdev - SCSI device to be tested against @scsi_dh
@@ -116,8 +83,6 @@ device_handler_match(struct scsi_device_handler *scsi_dh,
struct scsi_device_handler *found_dh;
found_dh = device_handler_match_function(sdev);
- if (!found_dh)
- found_dh = device_handler_match_devlist(sdev);
if (scsi_dh && found_dh != scsi_dh)
found_dh = NULL;
@@ -361,25 +326,14 @@ static int scsi_dh_notifier_remove(struct device *dev, void *data)
*/
int scsi_register_device_handler(struct scsi_device_handler *scsi_dh)
{
- int i;
if (get_device_handler(scsi_dh->name))
return -EBUSY;
spin_lock(&list_lock);
- scsi_dh->idx = scsi_dh_list_idx++;
list_add(&scsi_dh->list, &scsi_dh_list);
spin_unlock(&list_lock);
- for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
- scsi_dev_info_list_add_keyed(0,
- scsi_dh->devlist[i].vendor,
- scsi_dh->devlist[i].model,
- NULL,
- scsi_dh->idx,
- SCSI_DEVINFO_DH);
- }
-
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_add);
printk(KERN_INFO "%s: device handler registered\n", scsi_dh->name);
@@ -396,7 +350,6 @@ EXPORT_SYMBOL_GPL(scsi_register_device_handler);
*/
int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
{
- int i;
if (!get_device_handler(scsi_dh->name))
return -ENODEV;
@@ -404,12 +357,6 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh)
bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh,
scsi_dh_notifier_remove);
- for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) {
- scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor,
- scsi_dh->devlist[i].model,
- SCSI_DEVINFO_DH);
- }
-
spin_lock(&list_lock);
list_del(&scsi_dh->list);
spin_unlock(&list_lock);
@@ -588,10 +535,6 @@ static int __init scsi_dh_init(void)
{
int r;
- r = scsi_dev_info_add_list(SCSI_DEVINFO_DH, "SCSI Device Handler");
- if (r)
- return r;
-
r = bus_register_notifier(&scsi_bus_type, &scsi_dh_nb);
if (!r)
@@ -606,7 +549,6 @@ static void __exit scsi_dh_exit(void)
bus_for_each_dev(&scsi_bus_type, NULL, NULL,
scsi_dh_sysfs_attr_remove);
bus_unregister_notifier(&scsi_bus_type, &scsi_dh_nb);
- scsi_dev_info_remove_list(SCSI_DEVINFO_DH);
}
module_init(scsi_dh_init);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 4ef0212..04c5cea 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
* Power On, Reset, or Bus Device Reset, just retry.
*/
return ADD_TO_MLQUEUE;
+ if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+ /*
+ * Mode Parameters Changed
+ */
+ return ADD_TO_MLQUEUE;
if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
/*
* ALUA state changed
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 591186c..e1c8be0 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -629,6 +629,24 @@ static const struct scsi_dh_devlist clariion_dev_list[] = {
{NULL, NULL},
};
+static bool clariion_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; clariion_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, clariion_dev_list[i].vendor,
+ strlen(clariion_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, clariion_dev_list[i].model,
+ strlen(clariion_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int clariion_bus_attach(struct scsi_device *sdev);
static void clariion_bus_detach(struct scsi_device *sdev);
@@ -642,6 +660,7 @@ static struct scsi_device_handler clariion_dh = {
.activate = clariion_activate,
.prep_fn = clariion_prep_fn,
.set_params = clariion_set_params,
+ .match = clariion_match,
};
static int clariion_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_hp_sw.c b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
index 0f86a18..084062b 100644
--- a/drivers/scsi/device_handler/scsi_dh_hp_sw.c
+++ b/drivers/scsi/device_handler/scsi_dh_hp_sw.c
@@ -320,6 +320,24 @@ static const struct scsi_dh_devlist hp_sw_dh_data_list[] = {
{NULL, NULL},
};
+static bool hp_sw_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; hp_sw_dh_data_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor,
+ strlen(hp_sw_dh_data_list[i].vendor)) &&
+ !strncmp(sdev->model, hp_sw_dh_data_list[i].model,
+ strlen(hp_sw_dh_data_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int hp_sw_bus_attach(struct scsi_device *sdev);
static void hp_sw_bus_detach(struct scsi_device *sdev);
@@ -331,6 +349,7 @@ static struct scsi_device_handler hp_sw_dh = {
.detach = hp_sw_bus_detach,
.activate = hp_sw_activate,
.prep_fn = hp_sw_prep_fn,
+ .match = hp_sw_match,
};
static int hp_sw_bus_attach(struct scsi_device *sdev)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 1d31279..20c4557 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -364,10 +364,7 @@ static void release_controller(struct kref *kref)
struct rdac_controller *ctlr;
ctlr = container_of(kref, struct rdac_controller, kref);
- flush_workqueue(kmpath_rdacd);
- spin_lock(&list_lock);
list_del(&ctlr->node);
- spin_unlock(&list_lock);
kfree(ctlr);
}
@@ -376,20 +373,17 @@ static struct rdac_controller *get_controller(int index, char *array_name,
{
struct rdac_controller *ctlr, *tmp;
- spin_lock(&list_lock);
-
list_for_each_entry(tmp, &ctlr_list, node) {
if ((memcmp(tmp->array_id, array_id, UNIQUE_ID_LEN) == 0) &&
(tmp->index == index) &&
(tmp->host == sdev->host)) {
kref_get(&tmp->kref);
- spin_unlock(&list_lock);
return tmp;
}
}
ctlr = kmalloc(sizeof(*ctlr), GFP_ATOMIC);
if (!ctlr)
- goto done;
+ return NULL;
/* initialize fields of controller */
memcpy(ctlr->array_id, array_id, UNIQUE_ID_LEN);
@@ -405,8 +399,7 @@ static struct rdac_controller *get_controller(int index, char *array_name,
INIT_WORK(&ctlr->ms_work, send_mode_select);
INIT_LIST_HEAD(&ctlr->ms_head);
list_add(&ctlr->node, &ctlr_list);
-done:
- spin_unlock(&list_lock);
+
return ctlr;
}
@@ -517,9 +510,12 @@ static int initialize_controller(struct scsi_device *sdev,
index = 0;
else
index = 1;
+
+ spin_lock(&list_lock);
h->ctlr = get_controller(index, array_name, array_id, sdev);
if (!h->ctlr)
err = SCSI_DH_RES_TEMP_UNAVAIL;
+ spin_unlock(&list_lock);
}
return err;
}
@@ -820,6 +816,24 @@ static const struct scsi_dh_devlist rdac_dev_list[] = {
{NULL, NULL},
};
+static bool rdac_match(struct scsi_device *sdev)
+{
+ int i;
+
+ if (scsi_device_tpgs(sdev))
+ return false;
+
+ for (i = 0; rdac_dev_list[i].vendor; i++) {
+ if (!strncmp(sdev->vendor, rdac_dev_list[i].vendor,
+ strlen(rdac_dev_list[i].vendor)) &&
+ !strncmp(sdev->model, rdac_dev_list[i].model,
+ strlen(rdac_dev_list[i].model))) {
+ return true;
+ }
+ }
+ return false;
+}
+
static int rdac_bus_attach(struct scsi_device *sdev);
static void rdac_bus_detach(struct scsi_device *sdev);
@@ -832,6 +846,7 @@ static struct scsi_device_handler rdac_dh = {
.attach = rdac_bus_attach,
.detach = rdac_bus_detach,
.activate = rdac_activate,
+ .match = rdac_match,
};
static int rdac_bus_attach(struct scsi_device *sdev)
@@ -887,7 +902,9 @@ static int rdac_bus_attach(struct scsi_device *sdev)
return 0;
clean_ctlr:
+ spin_lock(&list_lock);
kref_put(&h->ctlr->kref, release_controller);
+ spin_unlock(&list_lock);
failed:
kfree(scsi_dh_data);
@@ -902,14 +919,19 @@ static void rdac_bus_detach( struct scsi_device *sdev )
struct rdac_dh_data *h;
unsigned long flags;
- spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
scsi_dh_data = sdev->scsi_dh_data;
+ h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ if (h->ctlr && h->ctlr->ms_queued)
+ flush_workqueue(kmpath_rdacd);
+
+ spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
sdev->scsi_dh_data = NULL;
spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
- h = (struct rdac_dh_data *) scsi_dh_data->buf;
+ spin_lock(&list_lock);
if (h->ctlr)
kref_put(&h->ctlr->kref, release_controller);
+ spin_unlock(&list_lock);
kfree(scsi_dh_data);
module_put(THIS_MODULE);
sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", RDAC_NAME);
@@ -934,6 +956,8 @@ static int __init rdac_init(void)
if (!kmpath_rdacd) {
scsi_unregister_device_handler(&rdac_dh);
printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+ r = -EINVAL;
}
done:
return r;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 8d67467..e959960 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \
"Direct Data Placement (DDP).");
-DEFINE_MUTEX(fcoe_config_mutex);
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
static struct workqueue_struct *fcoe_wq;
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
/* fcoe host list */
/* must only by accessed under the RTNL mutex */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
/* Function Prototypes */
static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
.lport_set_port_id = fcoe_set_port_id,
};
-struct fc_function_template fcoe_nport_fc_functions = {
+static struct fc_function_template fcoe_nport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
.bsg_request = fc_lport_bsg_request,
};
-struct fc_function_template fcoe_vport_fc_functions = {
+static struct fc_function_template fcoe_vport_fc_functions = {
.show_host_node_name = 1,
.show_host_port_name = 1,
.show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
*
* Caller must be holding the RTNL mutex
*/
-void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
*
* Returns: True for read types I/O, otherwise returns false.
*/
-bool fcoe_oem_match(struct fc_frame *fp)
+static bool fcoe_oem_match(struct fc_frame *fp)
{
struct fc_frame_header *fh = fc_frame_header_get(fp);
struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
if (fc_fcp_is_read(fr_fsp(fp)) &&
(fr_fsp(fp)->data_len > fcoe_ddp_min))
return true;
- else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+ else if ((fr_fsp(fp) == NULL) &&
+ (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+ (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
fcp = fc_frame_payload_get(fp, sizeof(*fcp));
- if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
- fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
- (fcp->fc_flags & FCP_CFL_WRDATA))
+ if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+ (ntohl(fcp->fc_dl) > fcoe_ddp_min))
return true;
}
return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
*
* Returns: 0 on success
*/
-int __exit fcoe_if_exit(void)
+static int __exit fcoe_if_exit(void)
{
fc_release_transport(fcoe_nport_scsi_transport);
fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
*
* Returns: 0 for success
*/
-int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
struct packet_type *ptype, struct net_device *olddev)
{
struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
*
* Return: 0 for success
*/
-int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
{
int wlen;
u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
skb->dev ? skb->dev->name : "<NULL>");
port = lport_priv(lport);
- if (skb_is_nonlinear(skb))
- skb_linearize(skb); /* not ideal */
+ skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
/*
* Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ drop:
*
* Return: 0 for success
*/
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_percpu_receive_thread(void *arg)
{
struct fcoe_percpu_s *p = arg;
struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ out_nortnl:
* Returns: 0 if the ethtool query was successful
* -1 if the ethtool query failed
*/
-int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_link_speed_update(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
* Returns: 0 if link is UP and OK, -1 if not
*
*/
-int fcoe_link_ok(struct fc_lport *lport)
+static int fcoe_link_ok(struct fc_lport *lport)
{
struct net_device *netdev = fcoe_netdev(lport);
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
* there no packets that will be handled by the lport, but also that any
* threads already handling packet have returned.
*/
-void fcoe_percpu_clean(struct fc_lport *lport)
+static void fcoe_percpu_clean(struct fc_lport *lport)
{
struct fcoe_percpu_s *pp;
struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
*
* Returns: Always 0 (return value required by FC transport template)
*/
-int fcoe_reset(struct Scsi_Host *shost)
+static int fcoe_reset(struct Scsi_Host *shost)
{
struct fc_lport *lport = shost_priv(shost);
struct fcoe_port *port = lport_priv(lport);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 6c6884b..bcc89e6 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -40,9 +40,7 @@
#define FCOE_MIN_XID 0x0000 /* the min xid supported by fcoe_sw */
#define FCOE_MAX_XID 0x0FFF /* the max xid supported by fcoe_sw */
-unsigned int fcoe_debug_logging;
-module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+extern unsigned int fcoe_debug_logging;
#define FCOE_LOGGING 0x01 /* General logging, not categorized */
#define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
index d969855..d3e4d7c 100644
--- a/drivers/scsi/gdth.h
+++ b/drivers/scsi/gdth.h
@@ -359,7 +359,7 @@ typedef struct {
u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
- u32 cmd_buff_size; /* size of each cmd bufer in bytes */
+ u32 cmd_buff_size; /* size of each cmd buffer in bytes */
u32 reserved1;
u32 reserved2;
} __attribute__((packed)) gdth_perf_modes;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 865d452..b96962c 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -293,12 +293,14 @@ static u32 unresettable_controller[] = {
0x3215103C, /* Smart Array E200i */
0x3237103C, /* Smart Array E500 */
0x323D103C, /* Smart Array P700m */
+ 0x40800E11, /* Smart Array 5i */
0x409C0E11, /* Smart Array 6400 */
0x409D0E11, /* Smart Array 6400 EM */
};
/* List of controllers which cannot even be soft reset */
static u32 soft_unresettable_controller[] = {
+ 0x40800E11, /* Smart Array 5i */
/* Exclude 640x boards. These are two pci devices in one slot
* which share a battery backed cache module. One controls the
* cache, the other accesses the cache through the one that controls
@@ -4072,10 +4074,10 @@ static int hpsa_request_irq(struct ctlr_info *h,
if (h->msix_vector || h->msi_vector)
rc = request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h);
+ 0, h->devname, h);
else
rc = request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h);
+ IRQF_SHARED, h->devname, h);
if (rc) {
dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
@@ -4269,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
remove_ctlr_from_lockup_detector_list(h);
/* If the list of ctlr's to monitor is empty, stop the thread */
if (list_empty(&hpsa_ctlr_list)) {
+ spin_unlock_irqrestore(&lockup_detector_lock, flags);
kthread_stop(hpsa_lockup_detector);
+ spin_lock_irqsave(&lockup_detector_lock, flags);
hpsa_lockup_detector = NULL;
}
spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fd860d9..b538f08 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -4613,11 +4613,13 @@ static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
- dev_err(&ioa_cfg->pdev->dev,
- "Adapter being reset as a result of error recovery.\n");
+ if (!ioa_cfg->in_reset_reload) {
+ dev_err(&ioa_cfg->pdev->dev,
+ "Adapter being reset as a result of error recovery.\n");
- if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
- ioa_cfg->sdt_state = GET_DUMP;
+ if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+ ioa_cfg->sdt_state = GET_DUMP;
+ }
rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
@@ -4907,7 +4909,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_cmd_pkt *cmd_pkt;
- u32 ioasc;
+ u32 ioasc, int_reg;
int op_found = 0;
ENTER;
@@ -4920,7 +4922,17 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
*/
if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
return FAILED;
- if (!res || !ipr_is_gscsi(res))
+ if (!res)
+ return FAILED;
+
+ /*
+ * If we are aborting a timed out op, chances are that the timeout was caused
+ * by a still not detected EEH error. In such cases, reading a register will
+ * trigger the EEH recovery infrastructure.
+ */
+ int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+
+ if (!ipr_is_gscsi(res))
return FAILED;
list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
@@ -7638,8 +7650,12 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
**/
static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
ENTER;
- pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
ipr_cmd->job_step = ipr_reset_restore_cfg_space;
LEAVE;
return IPR_RC_JOB_CONTINUE;
@@ -7660,8 +7676,6 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
int rc = PCIBIOS_SUCCESSFUL;
ENTER;
- pci_block_user_cfg_access(ioa_cfg->pdev);
-
if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
writel(IPR_UPROCI_SIS64_START_BIST,
ioa_cfg->regs.set_uproc_interrupt_reg32);
@@ -7673,7 +7687,9 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
rc = IPR_RC_JOB_RETURN;
} else {
- pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
+ if (ioa_cfg->cfg_locked)
+ pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
+ ioa_cfg->cfg_locked = 0;
ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
rc = IPR_RC_JOB_CONTINUE;
}
@@ -7716,7 +7732,6 @@ static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- pci_block_user_cfg_access(pdev);
pci_set_pcie_reset_state(pdev, pcie_warm_reset);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
@@ -7725,6 +7740,56 @@ static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_block_config_access_wait - Wait for permission to block config access
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+
+ if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
+ ioa_cfg->cfg_locked = 1;
+ ipr_cmd->job_step = ioa_cfg->reset;
+ } else {
+ if (ipr_cmd->u.time_left) {
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
+ ipr_reset_start_timer(ipr_cmd,
+ IPR_CHECK_FOR_RESET_TIMEOUT);
+ } else {
+ ipr_cmd->job_step = ioa_cfg->reset;
+ dev_err(&ioa_cfg->pdev->dev,
+ "Timed out waiting to lock config access. Resetting anyway.\n");
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * ipr_reset_block_config_access - Block config access to the IOA
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: This attempts to block config access to the IOA
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
+{
+ ipr_cmd->ioa_cfg->cfg_locked = 0;
+ ipr_cmd->job_step = ipr_reset_block_config_access_wait;
+ ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
* ipr_reset_allowed - Query whether or not IOA can be reset
* @ioa_cfg: ioa config struct
*
@@ -7763,7 +7828,7 @@ static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
} else {
- ipr_cmd->job_step = ioa_cfg->reset;
+ ipr_cmd->job_step = ipr_reset_block_config_access;
rc = IPR_RC_JOB_CONTINUE;
}
@@ -7796,7 +7861,7 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
} else {
- ipr_cmd->job_step = ioa_cfg->reset;
+ ipr_cmd->job_step = ipr_reset_block_config_access;
}
ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index ac84736..b13f9cc 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1387,6 +1387,7 @@ struct ipr_ioa_cfg {
u8 msi_received:1;
u8 sis64:1;
u8 dump_timeout:1;
+ u8 cfg_locked:1;
u8 revid;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 218f71a..d77891e 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -4494,7 +4494,7 @@ ips_init_scb(ips_ha_t * ha, ips_scb_t * scb)
/* */
/* Initialize a CCB to default values */
/* */
-/* ASSUMED to be callled from within a lock */
+/* ASSUMED to be called from within a lock */
/* */
/****************************************************************************/
static ips_scb_t *
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644
index 5f54461..0000000
--- a/drivers/scsi/isci/firmware/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-# Makefile for create_fw
-#
-CC=gcc
-CFLAGS=-c -Wall -O2 -g
-LDFLAGS=
-SOURCES=create_fw.c
-OBJECTS=$(SOURCES:.cpp=.o)
-EXECUTABLE=create_fw
-
-all: $(SOURCES) $(EXECUTABLE)
-
-$(EXECUTABLE): $(OBJECTS)
- $(CC) $(LDFLAGS) $(OBJECTS) -o $@
-
-.c.o:
- $(CC) $(CFLAGS) $< -O $@
-
-clean:
- rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644
index 8056d2b..0000000
--- a/drivers/scsi/isci/firmware/README
+++ /dev/null
@@ -1,36 +0,0 @@
-This defines the temporary binary blow we are to pass to the SCU
-driver to emulate the binary firmware that we will eventually be
-able to access via NVRAM on the SCU controller.
-
-The current size of the binary blob is expected to be 149 bytes or larger
-
-Header Types:
-0x1: Phy Masks
-0x2: Phy Gens
-0x3: SAS Addrs
-0xff: End of Data
-
-ID string - u8[12]: "#SCU MAGIC#\0"
-Version - u8: 1
-SubVersion - u8: 0
-
-Header Type - u8: 0x1
-Size - u8: 8
-Phy Mask - u32[8]
-
-Header Type - u8: 0x2
-Size - u8: 8
-Phy Gen - u32[8]
-
-Header Type - u8: 0x3
-Size - u8: 8
-Sas Addr - u64[8]
-
-Header Type - u8: 0xf
-
-
-==============================================================================
-
-Place isci_firmware.bin in /lib/firmware
-Be sure to recreate the initramfs image to include the firmware.
-
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644
index c7a2887..0000000
--- a/drivers/scsi/isci/firmware/create_fw.c
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
-#include <errno.h>
-#include <asm/types.h>
-#include <strings.h>
-#include <stdint.h>
-
-#include "create_fw.h"
-#include "../probe_roms.h"
-
-int write_blob(struct isci_orom *isci_orom)
-{
- FILE *fd;
- int err;
- size_t count;
-
- fd = fopen(blob_name, "w+");
- if (!fd) {
- perror("Open file for write failed");
- fclose(fd);
- return -EIO;
- }
-
- count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
- if (count != 1) {
- perror("Write data failed");
- fclose(fd);
- return -EIO;
- }
-
- fclose(fd);
-
- return 0;
-}
-
-void set_binary_values(struct isci_orom *isci_orom)
-{
- int ctrl_idx, phy_idx, port_idx;
-
- /* setting OROM signature */
- strncpy(isci_orom->hdr.signature, sig, strlen(sig));
- isci_orom->hdr.version = version;
- isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
- isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
- isci_orom->hdr.num_elements = num_elements;
-
- for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
- isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
- isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
- max_num_concurrent_dev_spin_up;
- isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
- enable_ssc;
-
- for (port_idx = 0; port_idx < 4; port_idx++)
- isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
- phy_mask[ctrl_idx][port_idx];
-
- for (phy_idx = 0; phy_idx < 4; phy_idx++) {
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
- (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
- (__u32)(sas_addr[ctrl_idx][phy_idx]);
-
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
- afe_tx_amp_control0;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
- afe_tx_amp_control1;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
- afe_tx_amp_control2;
- isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
- afe_tx_amp_control3;
- }
- }
-}
-
-int main(void)
-{
- int err;
- struct isci_orom *isci_orom;
-
- isci_orom = malloc(sizeof(struct isci_orom));
- memset(isci_orom, 0, sizeof(struct isci_orom));
-
- set_binary_values(isci_orom);
-
- err = write_blob(isci_orom);
- if (err < 0) {
- free(isci_orom);
- return err;
- }
-
- free(isci_orom);
- return 0;
-}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644
index 5f29882..0000000
--- a/drivers/scsi/isci/firmware/create_fw.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _CREATE_FW_H_
-#define _CREATE_FW_H_
-#include "../probe_roms.h"
-
-
-/* we are configuring for 2 SCUs */
-static const int num_elements = 2;
-
-/*
- * For all defined arrays:
- * elements 0-3 are for SCU0, ports 0-3
- * elements 4-7 are for SCU1, ports 0-3
- *
- * valid configurations for one SCU are:
- * P0 P1 P2 P3
- * ----------------
- * 0xF,0x0,0x0,0x0 # 1 x4 port
- * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
- * # ports
- * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
- * # port
- * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
- * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
- *
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value assigned to UNINIT_PARAM (255).
- */
-
-/* discovery mode type (port auto config mode by default ) */
-
-/*
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value "0000000000000000". SAS address of zero's is
- * considered invalid and will not be used.
- */
-#ifdef MPC
-static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
- {1, 2, 4, 8} };
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
- 0x5FCFFFFFF0000002ULL,
- 0x5FCFFFFFF0000003ULL,
- 0x5FCFFFFFF0000004ULL },
- { 0x5FCFFFFFF0000005ULL,
- 0x5FCFFFFFF0000006ULL,
- 0x5FCFFFFFF0000007ULL,
- 0x5FCFFFFFF0000008ULL } };
-#else /* APC (default) */
-static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4];
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL,
- 0x5FCFFFFF00000001ULL },
- { 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL,
- 0x5FCFFFFF00000002ULL } };
-#endif
-
-/* Maximum number of concurrent device spin up */
-static const int max_num_concurrent_dev_spin_up = 1;
-
-/* enable of ssc operation */
-static const int enable_ssc;
-
-/* AFE_TX_AMP_CONTROL */
-static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
-static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
-static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
-static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
-
-static const char blob_name[] = "isci_firmware.bin";
-static const char sig[] = "ISCUOEMB";
-static const unsigned char version = 0x10;
-
-#endif
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index e7fe9c4..418391b 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
*/
if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
(iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
- (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+ (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+ (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
is_controller_start_complete = false;
break;
}
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Default to no SSC operation. */
ihost->oem_parameters.controller.do_enable_ssc = false;
+ /* Default to short cables on all phys. */
+ ihost->oem_parameters.controller.cable_selection_mask = 0;
+
/* Initialize all of the port parameter information to narrow ports. */
for (index = 0; index < SCI_MAX_PORTS; index++) {
ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
/* Initialize all of the phy parameter information. */
for (index = 0; index < SCI_MAX_PHYS; index++) {
- /* Default to 6G (i.e. Gen 3) for now. */
- ihost->user_parameters.phys[index].max_speed_generation = 3;
+ /* Default to 3G (i.e. Gen 2). */
+ ihost->user_parameters.phys[index].max_speed_generation =
+ SCIC_SDS_PARM_GEN2_SPEED;
/* the frequencies cannot be 0 */
ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
ihost->user_parameters.ssp_inactivity_timeout = 5;
ihost->user_parameters.stp_max_occupancy_timeout = 5;
ihost->user_parameters.ssp_max_occupancy_timeout = 20;
- ihost->user_parameters.no_outbound_task_timeout = 20;
+ ihost->user_parameters.no_outbound_task_timeout = 2;
}
static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
return sci_controller_reset(ihost);
}
-int sci_oem_parameters_validate(struct sci_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
{
int i;
@@ -1791,18 +1796,63 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
oem->controller.max_concurr_spin_up < 1)
return -EINVAL;
+ if (oem->controller.do_enable_ssc) {
+ if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+ return -EINVAL;
+
+ if (version >= ISCI_ROM_VER_1_1) {
+ u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ case 6:
+ case 7:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ test = oem->controller.ssc_sas_tx_spread_level;
+ if (oem->controller.ssc_sas_tx_type == 0) {
+ switch (test) {
+ case 0:
+ case 2:
+ case 3:
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else if (oem->controller.ssc_sas_tx_type == 1) {
+ switch (test) {
+ case 0:
+ case 3:
+ case 6:
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+ }
+
return 0;
}
static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
{
u32 state = ihost->sm.current_state_id;
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
if (state == SCIC_RESET ||
state == SCIC_INITIALIZING ||
state == SCIC_INITIALIZED) {
+ u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version :
+ ISCI_ROM_VER_1_0;
- if (sci_oem_parameters_validate(&ihost->oem_parameters))
+ if (sci_oem_parameters_validate(&ihost->oem_parameters,
+ oem_version))
return SCI_FAILURE_INVALID_PARAMETER_VALUE;
return SCI_SUCCESS;
@@ -1857,6 +1907,31 @@ static void power_control_timeout(unsigned long data)
ihost->power_control.phys_waiting--;
ihost->power_control.phys_granted_power++;
sci_phy_consume_power_handler(iphy);
+
+ if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+ u8 j;
+
+ for (j = 0; j < SCI_MAX_PHYS; j++) {
+ struct isci_phy *requester = ihost->power_control.requesters[j];
+
+ /*
+ * Search the power_control queue to see if there are other phys
+ * attached to the same remote device. If found, take all of
+ * them out of await_sas_power state.
+ */
+ if (requester != NULL && requester != iphy) {
+ u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+ if (other == 0) {
+ ihost->power_control.requesters[j] = NULL;
+ ihost->power_control.phys_waiting--;
+ sci_phy_consume_power_handler(requester);
+ }
+ }
+ }
+ }
}
/*
@@ -1891,9 +1966,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
ihost->power_control.timer_started = true;
} else {
- /* Add the phy in the waiting list */
- ihost->power_control.requesters[iphy->phy_index] = iphy;
- ihost->power_control.phys_waiting++;
+ /*
+ * There are phys, attached to the same sas address as this phy, are
+ * already in READY state, this phy don't need wait.
+ */
+ u8 i;
+ struct isci_phy *current_phy;
+
+ for (i = 0; i < SCI_MAX_PHYS; i++) {
+ u8 other;
+ current_phy = &ihost->phys[i];
+
+ other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+ iphy->frame_rcvd.iaf.sas_addr,
+ sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+ if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+ current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+ other == 0) {
+ sci_phy_consume_power_handler(iphy);
+ break;
+ }
+ }
+
+ if (i == SCI_MAX_PHYS) {
+ /* Add the phy in the waiting list */
+ ihost->power_control.requesters[iphy->phy_index] = iphy;
+ ihost->power_control.phys_waiting++;
+ }
}
}
@@ -1908,162 +2008,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
ihost->power_control.requesters[iphy->phy_index] = NULL;
}
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+ return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+ int phy,
+ unsigned char selection_byte)
+{
+ return ((selection_byte & (1 << phy)) ? 1 : 0)
+ + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+ if (is_cable_select_overridden())
+ return ((unsigned char *)&cable_selection_override)
+ + ihost->id;
+ else
+ return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+ return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+ static char *cable_names[] = {
+ [short_cable] = "short",
+ [long_cable] = "long",
+ [medium_cable] = "medium",
+ [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+ };
+ return (selection <= undefined_cable) ? cable_names[selection]
+ : cable_names[undefined_cable];
+}
+
#define AFE_REGISTER_WRITE_DELAY 10
-/* Initialize the AFE for this phy index. We need to read the AFE setup from
- * the OEM parameters
- */
static void sci_controller_afe_initialization(struct isci_host *ihost)
{
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
const struct sci_oem_params *oem = &ihost->oem_parameters;
struct pci_dev *pdev = ihost->pdev;
u32 afe_status;
u32 phy_id;
+ unsigned char cable_selection_mask = *to_cable_select(ihost);
/* Clear DFX Status registers */
- writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x0081000f, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- if (is_b0(pdev)) {
+ if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
/* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
- * Timer, PM Stagger Timer */
- writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+ * Timer, PM Stagger Timer
+ */
+ writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Configure bias currents to normal */
if (is_a2(pdev))
- writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005A00, &afe->afe_bias_control);
else if (is_b0(pdev) || is_c0(pdev))
- writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+ writel(0x00005F00, &afe->afe_bias_control);
+ else if (is_c1(pdev))
+ writel(0x00005500, &afe->afe_bias_control);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable PLL */
- if (is_b0(pdev) || is_c0(pdev))
- writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
- else
- writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+ if (is_a2(pdev))
+ writel(0x80040908, &afe->afe_pll_control0);
+ else if (is_b0(pdev) || is_c0(pdev))
+ writel(0x80040A08, &afe->afe_pll_control0);
+ else if (is_c1(pdev)) {
+ writel(0x80000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x00000B08, &afe->afe_pll_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ writel(0x80000B08, &afe->afe_pll_control0);
+ }
udelay(AFE_REGISTER_WRITE_DELAY);
/* Wait for the PLL to lock */
do {
- afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+ afe_status = readl(&afe->afe_common_block_status);
udelay(AFE_REGISTER_WRITE_DELAY);
} while ((afe_status & 0x00001000) == 0);
if (is_a2(pdev)) {
- /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
- writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+ /* Shorten SAS SNW lock time (RxLock timer value from 76
+ * us to 50 us)
+ */
+ writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+ int cable_length_long =
+ is_long_cable(phy_id, cable_selection_mask);
+ int cable_length_medium =
+ is_medium_cable(phy_id, cable_selection_mask);
- if (is_b0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ if (is_a2(pdev)) {
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00004512, &xcvr->afe_xcvr_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x0050100F, &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+ } else if (is_b0(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00030000, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
} else if (is_c0(pdev)) {
- /* Configure transmitter SSC parameters */
- writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x00014500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- } else {
- /*
- * All defaults, except the Receive Word Alignament/Comma Detect
- * Enable....(0xe800) */
- writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ } else if (is_c1(pdev)) {
+ /* Configure transmitter SSC parameters */
+ writel(0x00010202, &xcvr->afe_tx_ssc_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+ /* All defaults, except the Receive Word
+ * Alignament/Comma Detect Enable....(0xe800)
+ */
+ writel(0x0001C500, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
+ /* Power up TX and RX out from power down (PWRDNTX and
+ * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+ */
if (is_a2(pdev))
- writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003F0, &xcvr->afe_channel_control);
else if (is_b0(pdev)) {
- /* Power down TX and RX (PWRDNTX and PWRDNRX) */
- writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
- } else {
- writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000003D4, &xcvr->afe_channel_control);
+ } else if (is_c0(pdev)) {
+ writel(0x000001E7, &xcvr->afe_channel_control);
udelay(AFE_REGISTER_WRITE_DELAY);
- /*
- * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
- * & increase TX int & ext bias 20%....(0xe85c) */
- writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+ writel(0x000001E4, &xcvr->afe_channel_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+ &xcvr->afe_channel_control);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+ &xcvr->afe_channel_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
if (is_a2(pdev)) {
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
udelay(AFE_REGISTER_WRITE_DELAY);
}
- /*
- * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
- * RDD=0x0(RX Detect Enabled) ....(0xe800) */
- writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+ if (is_a2(pdev) || is_b0(pdev))
+ /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+ * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+ * Enabled) ....(0xe800)
+ */
+ writel(0x00004100, &xcvr->afe_xcvr_control0);
+ else if (is_c0(pdev))
+ writel(0x00014100, &xcvr->afe_xcvr_control0);
+ else if (is_c1(pdev))
+ writel(0x0001C100, &xcvr->afe_xcvr_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Leave DFE/FFE on */
if (is_a2(pdev))
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
else if (is_b0(pdev)) {
- writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
- } else {
- writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c0(pdev)) {
+ writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ /* Enable TX equalization (0xe824) */
+ writel(0x00040000, &xcvr->afe_tx_control);
+ } else if (is_c1(pdev)) {
+ writel(cable_length_long ? 0x01500C0C :
+ cable_length_medium ? 0x01400C0D : 0x02400C0D,
+ &xcvr->afe_xcvr_control1);
+ udelay(AFE_REGISTER_WRITE_DELAY);
+
+ writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+ writel(cable_length_long ? 0x33091C1F :
+ cable_length_medium ? 0x3315181F : 0x2B17161F,
+ &xcvr->afe_rx_ssc_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
/* Enable TX equalization (0xe824) */
- writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+ writel(0x00040000, &xcvr->afe_tx_control);
}
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control0,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+ writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control1,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+ writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control2,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+ writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
udelay(AFE_REGISTER_WRITE_DELAY);
- writel(oem_phy->afe_tx_amp_control3,
- &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+ writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
udelay(AFE_REGISTER_WRITE_DELAY);
}
/* Transfer control to the PEs */
- writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+ writel(0x00010f00, &afe->afe_dfx_master_control0);
udelay(AFE_REGISTER_WRITE_DELAY);
}
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 646051a..5477f0f 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
static inline bool is_c0(struct pci_dev *pdev)
{
- if (pdev->revision >= 5)
+ if (pdev->revision == 5)
return true;
return false;
}
+static inline bool is_c1(struct pci_dev *pdev)
+{
+ if (pdev->revision >= 6)
+ return true;
+ return false;
+}
+
+enum cable_selections {
+ short_cable = 0,
+ long_cable = 1,
+ medium_cable = 2,
+ undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+ return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
/* set hw control for 'activity', even though active enclosures seem to drive
* the activity led on their own. Skip setting FSENG control on 'status' due
* to unexpected operation and 'error' due to not being a supported automatic
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index a97edab..17c4c2c 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -65,7 +65,7 @@
#include "probe_roms.h"
#define MAJ 1
-#define MIN 0
+#define MIN 1
#define BUILD 0
#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
__stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
/* linux isci specific settings */
-unsigned char no_outbound_task_to = 20;
+unsigned char no_outbound_task_to = 2;
module_param(no_outbound_task_to, byte, 0);
MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
module_param(stp_inactive_to, ushort, 0);
MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
-unsigned char phy_gen = 3;
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
module_param(phy_gen, byte, 0);
MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
module_param(max_concurr_spinup, byte, 0);
MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+ "This field indicates length of the SAS/SATA cable between "
+ "host and device. If any bits > 15 are set (default) "
+ "indicates \"use platform defaults\"");
+
static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
{
struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
return NULL;
isci_host->shost = shost;
+ dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+ "{%s, %s, %s, %s}\n",
+ (is_cable_select_overridden() ? "* " : ""), isci_host->id,
+ lookup_cable_names(decode_cable_selection(isci_host, 3)),
+ lookup_cable_names(decode_cable_selection(isci_host, 2)),
+ lookup_cable_names(decode_cable_selection(isci_host, 1)),
+ lookup_cable_names(decode_cable_selection(isci_host, 0)));
+
err = isci_host_init(isci_host);
if (err)
goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
orom = isci_request_oprom(pdev);
for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
- if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+ if (sci_oem_parameters_validate(&orom->ctrl[i],
+ orom->hdr.version)) {
dev_warn(&pdev->dev,
"[%d]: invalid oem parameters detected, falling back to firmware\n", i);
devm_kfree(&pdev->dev, orom);
diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h
index 8efeb6b..234ab46 100644
--- a/drivers/scsi/isci/isci.h
+++ b/drivers/scsi/isci/isci.h
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
extern u16 stp_inactive_to;
extern unsigned char phy_gen;
extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
irqreturn_t isci_msix_isr(int vec, void *data);
irqreturn_t isci_intx_isr(int vec, void *data);
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 35f50c2..fe18acf 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
static enum sci_status
sci_phy_link_layer_initialization(struct isci_phy *iphy,
- struct scu_link_layer_registers __iomem *reg)
+ struct scu_link_layer_registers __iomem *llr)
{
struct isci_host *ihost = iphy->owning_port->owning_controller;
+ struct sci_phy_user_params *phy_user;
+ struct sci_phy_oem_params *phy_oem;
int phy_idx = iphy->phy_index;
- struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
- struct sci_phy_oem_params *phy_oem =
- &ihost->oem_parameters.phys[phy_idx];
- u32 phy_configuration;
struct sci_phy_cap phy_cap;
+ u32 phy_configuration;
u32 parity_check = 0;
u32 parity_count = 0;
u32 llctl, link_rate;
u32 clksm_value = 0;
u32 sp_timeouts = 0;
- iphy->link_layer_registers = reg;
+ phy_user = &ihost->user_parameters.phys[phy_idx];
+ phy_oem = &ihost->oem_parameters.phys[phy_idx];
+ iphy->link_layer_registers = llr;
/* Set our IDENTIFY frame data */
#define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
- &iphy->link_layer_registers->transmit_identification);
+ &llr->transmit_identification);
/* Write the device SAS Address */
- writel(0xFEDCBA98,
- &iphy->link_layer_registers->sas_device_name_high);
- writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+ writel(0xFEDCBA98, &llr->sas_device_name_high);
+ writel(phy_idx, &llr->sas_device_name_low);
/* Write the source SAS Address */
- writel(phy_oem->sas_address.high,
- &iphy->link_layer_registers->source_sas_address_high);
- writel(phy_oem->sas_address.low,
- &iphy->link_layer_registers->source_sas_address_low);
+ writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+ writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
/* Clear and Set the PHY Identifier */
- writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
- writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
- &iphy->link_layer_registers->identify_frame_phy_id);
+ writel(0, &llr->identify_frame_phy_id);
+ writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
/* Change the initial state of the phy configuration register */
- phy_configuration =
- readl(&iphy->link_layer_registers->phy_configuration);
+ phy_configuration = readl(&llr->phy_configuration);
/* Hold OOB state machine in reset */
phy_configuration |= SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
- writel(phy_configuration,
- &iphy->link_layer_registers->phy_configuration);
+ writel(phy_configuration, &llr->phy_configuration);
/* Configure the SNW capabilities */
phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
phy_cap.gen3_no_ssc = 1;
phy_cap.gen2_no_ssc = 1;
phy_cap.gen1_no_ssc = 1;
- if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+ if (ihost->oem_parameters.controller.do_enable_ssc) {
+ struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+ struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+ struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+ bool en_sas = false;
+ bool en_sata = false;
+ u32 sas_type = 0;
+ u32 sata_spread = 0x2;
+ u32 sas_spread = 0x2;
+
phy_cap.gen3_ssc = 1;
phy_cap.gen2_ssc = 1;
phy_cap.gen1_ssc = 1;
+
+ if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+ en_sas = en_sata = true;
+ else {
+ sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+ sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+ if (sata_spread)
+ en_sata = true;
+
+ if (sas_spread) {
+ en_sas = true;
+ sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+ }
+
+ }
+
+ if (en_sas) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_xcvr_control0);
+ reg |= (0x00100000 | (sas_type << 19));
+ writel(reg, &xcvr->afe_xcvr_control0);
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sas_spread << 8;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+ }
+
+ if (en_sata) {
+ u32 reg;
+
+ reg = readl(&xcvr->afe_tx_ssc_control);
+ reg |= sata_spread;
+ writel(reg, &xcvr->afe_tx_ssc_control);
+
+ reg = readl(&llr->stp_control);
+ reg |= 1 << 12;
+ writel(reg, &llr->stp_control);
+ }
}
- /*
- * The SAS specification indicates that the phy_capabilities that
- * are transmitted shall have an even parity. Calculate the parity. */
+ /* The SAS specification indicates that the phy_capabilities that
+ * are transmitted shall have an even parity. Calculate the parity.
+ */
parity_check = phy_cap.all;
while (parity_check != 0) {
if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
parity_check >>= 1;
}
- /*
- * If parity indicates there are an odd number of bits set, then
- * set the parity bit to 1 in the phy capabilities. */
+ /* If parity indicates there are an odd number of bits set, then
+ * set the parity bit to 1 in the phy capabilities.
+ */
if ((parity_count % 2) != 0)
phy_cap.parity = 1;
- writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+ writel(phy_cap.all, &llr->phy_capabilities);
/* Set the enable spinup period but disable the ability to send
* notify enable spinup
*/
writel(SCU_ENSPINUP_GEN_VAL(COUNT,
phy_user->notify_enable_spin_up_insertion_frequency),
- &iphy->link_layer_registers->notify_enable_spinup_control);
+ &llr->notify_enable_spinup_control);
/* Write the ALIGN Insertion Ferequency for connected phy and
* inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
phy_user->align_insertion_frequency);
- writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+ writel(clksm_value, &llr->clock_skew_management);
- /* @todo Provide a way to write this register correctly */
- writel(0x02108421,
- &iphy->link_layer_registers->afe_lookup_table_control);
+ if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+ writel(0x04210400, &llr->afe_lookup_table_control);
+ writel(0x020A7C05, &llr->sas_primitive_timeout);
+ } else
+ writel(0x02108421, &llr->afe_lookup_table_control);
llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
(u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
break;
}
llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
- writel(llctl, &iphy->link_layer_registers->link_layer_control);
+ writel(llctl, &llr->link_layer_control);
- sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+ sp_timeouts = readl(&llr->sas_phy_timeouts);
/* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
*/
sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
- writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+ writel(sp_timeouts, &llr->sas_phy_timeouts);
if (is_a2(ihost->pdev)) {
- /* Program the max ARB time for the PHY to 700us so we inter-operate with
- * the PMC expander which shuts down PHYs if the expander PHY generates too
- * many breaks. This time value will guarantee that the initiator PHY will
- * generate the break.
+ /* Program the max ARB time for the PHY to 700us so we
+ * inter-operate with the PMC expander which shuts down
+ * PHYs if the expander PHY generates too many breaks.
+ * This time value will guarantee that the initiator PHY
+ * will generate the break.
*/
writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
- &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+ &llr->maximum_arbitration_wait_timer_timeout);
}
- /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
- writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+ /* Disable link layer hang detection, rely on the OS timeout for
+ * I/O timeouts.
+ */
+ writel(0, &llr->link_layer_hang_detection_timeout);
/* We can exit the initial state to the stopped state */
sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
}
-/**
- *
- *
- * This method will start the OOB/SN state machine for this struct isci_phy object.
- */
-static void scu_link_layer_start_oob(
- struct isci_phy *iphy)
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
{
- u32 scu_sas_pcfg_value;
-
- scu_sas_pcfg_value =
- readl(&iphy->link_layer_registers->phy_configuration);
- scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
- scu_sas_pcfg_value &=
- ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
- SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
- writel(scu_sas_pcfg_value,
- &iphy->link_layer_registers->phy_configuration);
+ struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+ u32 val;
+
+ /** Reset OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+ SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Reset OOB sequence - end */
+
+ /** Start OOB sequence - start */
+ val = readl(&ll->phy_configuration);
+ val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+ writel(val, &ll->phy_configuration);
+ readl(&ll->phy_configuration); /* flush */
+ /** Start OOB sequence - end */
}
/**
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index ac7f277..7c6ac58 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
* value is returned if the specified port is not valid. When this value is
* returned, no data is copied to the properties output parameter.
*/
-static enum sci_status sci_port_get_properties(struct isci_port *iport,
+enum sci_status sci_port_get_properties(struct isci_port *iport,
struct sci_port_properties *prop)
{
if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
}
}
-static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
- bool do_notify_user)
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+ sci_phy_resume(iphy);
+ iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+ struct isci_phy *iphy,
+ u8 flags)
{
struct isci_host *ihost = iport->owning_controller;
- if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+ if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
sci_phy_resume(iphy);
iport->active_phy_mask |= 1 << iphy->phy_index;
sci_controller_clear_invalid_phy(ihost, iphy);
- if (do_notify_user == true)
+ if (flags & PF_NOTIFY)
isci_port_link_up(ihost, iport, iphy);
}
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
struct isci_host *ihost = iport->owning_controller;
iport->active_phy_mask &= ~(1 << iphy->phy_index);
+ iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
if (!iport->active_phy_mask)
iport->last_active_phy = iphy->phy_index;
iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
- /* Re-assign the phy back to the LP as if it were a narrow port */
- writel(iphy->phy_index,
- &iport->port_pe_configuration_register[iphy->phy_index]);
+ /* Re-assign the phy back to the LP as if it were a narrow port for APC
+ * mode. For MPC mode, the phy will remain in the port.
+ */
+ if (iport->owning_controller->oem_parameters.controller.mode_type ==
+ SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+ writel(iphy->phy_index,
+ &iport->port_pe_configuration_register[iphy->phy_index]);
if (do_notify_user == true)
isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
* sci_port_general_link_up_handler - phy can be assigned to port?
* @sci_port: sci_port object for which has a phy that has gone link up.
* @sci_phy: This is the struct isci_phy object that has gone link up.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- * sci_port_link_up()) as to the fact that a new phy as become ready.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
*
- * Determine if this phy can be assigned to this
- * port . If the phy is not a valid PHY for
- * this port then the function will notify the user. A PHY can only be
- * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
- * the same port. none
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
*/
static void sci_port_general_link_up_handler(struct isci_port *iport,
- struct isci_phy *iphy,
- bool do_notify_user)
+ struct isci_phy *iphy,
+ u8 flags)
{
struct sci_sas_address port_sas_address;
struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
iport->active_phy_mask == 0) {
struct sci_base_state_machine *sm = &iport->sm;
- sci_port_activate_phy(iport, iphy, do_notify_user);
+ sci_port_activate_phy(iport, iphy, flags);
if (sm->current_state_id == SCI_PORT_RESETTING)
port_state_machine_change(iport, SCI_PORT_READY);
} else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
struct isci_phy *iphy)
{
if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
- (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
- sci_port_is_wide(iport)) {
- sci_port_invalid_link_up(iport, iphy);
-
- return false;
+ (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+ if (sci_port_is_wide(iport)) {
+ sci_port_invalid_link_up(iport, iphy);
+ return false;
+ } else {
+ struct isci_host *ihost = iport->owning_controller;
+ struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+ writel(iphy->phy_index,
+ &dst_port->port_pe_configuration_register[iphy->phy_index]);
+ }
}
return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
}
}
+static void scic_sds_port_ready_substate_waiting_exit(
+ struct sci_base_state_machine *sm)
+{
+ struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+ sci_port_resume_port_task_scheduler(iport);
+}
+
static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
{
u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
writel(iport->physical_port_index,
&iport->port_pe_configuration_register[
iport->phy_table[index]->phy_index]);
+ if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+ sci_port_resume_phy(iport, iport->phy_table[index]);
}
}
sci_port_update_viit_entry(iport);
- sci_port_resume_port_task_scheduler(iport);
-
/*
* Post the dummy task for the port so the hardware can schedule
* io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
if (iport->active_phy_mask == 0) {
isci_port_not_ready(ihost, iport);
- port_state_machine_change(iport,
- SCI_PORT_SUB_WAITING);
- } else if (iport->started_request_count == 0)
- port_state_machine_change(iport,
- SCI_PORT_SUB_OPERATIONAL);
-}
-
-static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
-{
- struct isci_port *iport = container_of(sm, typeof(*iport), sm);
-
- sci_port_suspend_port_task_scheduler(iport);
- if (iport->ready_exit)
- sci_port_invalidate_dummy_remote_node(iport);
+ port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+ } else
+ port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
}
enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
if (status != SCI_SUCCESS)
return status;
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
/* Re-enter the configuring state since this may be the last phy in
* the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* Since this is the first phy going link up for the port we
* can just enable it and continue
*/
- sci_port_activate_phy(iport, iphy, true);
+ sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
port_state_machine_change(iport,
SCI_PORT_SUB_OPERATIONAL);
return SCI_SUCCESS;
case SCI_PORT_SUB_OPERATIONAL:
- sci_port_general_link_up_handler(iport, iphy, true);
+ sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
return SCI_SUCCESS;
case SCI_PORT_RESETTING:
/* TODO We should make sure that the phy that has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
/* In the resetting state we don't notify the user regarding
* link up and link down notifications.
*/
- sci_port_general_link_up_handler(iport, iphy, false);
+ sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
return SCI_SUCCESS;
default:
dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
},
[SCI_PORT_SUB_WAITING] = {
.enter_state = sci_port_ready_substate_waiting_enter,
+ .exit_state = scic_sds_port_ready_substate_waiting_exit,
},
[SCI_PORT_SUB_OPERATIONAL] = {
.enter_state = sci_port_ready_substate_operational_enter,
.exit_state = sci_port_ready_substate_operational_exit
},
[SCI_PORT_SUB_CONFIGURING] = {
- .enter_state = sci_port_ready_substate_configuring_enter,
- .exit_state = sci_port_ready_substate_configuring_exit
+ .enter_state = sci_port_ready_substate_configuring_enter
},
[SCI_PORT_RESETTING] = {
.exit_state = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
iport->logical_port_index = SCIC_SDS_DUMMY_PORT;
iport->physical_port_index = index;
iport->active_phy_mask = 0;
+ iport->enabled_phy_mask = 0;
iport->last_active_phy = 0;
iport->ready_exit = false;
diff --git a/drivers/scsi/isci/port.h b/drivers/scsi/isci/port.h
index cb5ffbc..0811609 100644
--- a/drivers/scsi/isci/port.h
+++ b/drivers/scsi/isci/port.h
@@ -63,6 +63,9 @@
#define SCIC_SDS_DUMMY_PORT 0xFF
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
struct isci_phy;
struct isci_host;
@@ -83,6 +86,8 @@ enum isci_status {
* @logical_port_index: software port index
* @physical_port_index: hardware port index
* @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ * that are already part of the port
* @reserved_tag:
* @reserved_rni: reserver for port task scheduler workaround
* @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
u8 logical_port_index;
u8 physical_port_index;
u8 active_phy_mask;
+ u8 enabled_phy_mask;
u8 last_active_phy;
u16 reserved_rni;
u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
struct isci_port *iport,
struct isci_phy *iphy);
+enum sci_status sci_port_get_properties(
+ struct isci_port *iport,
+ struct sci_port_properties *prop);
+
enum sci_status sci_port_link_up(struct isci_port *iport,
struct isci_phy *iphy);
enum sci_status sci_port_link_down(struct isci_port *iport,
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 38a99d2..6d1e954 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -57,7 +57,7 @@
#define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT (10)
#define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (100)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION (250)
enum SCIC_SDS_APC_ACTIVITY {
SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
return sci_port_configuration_agent_validate_ports(ihost, port_agent);
}
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(
+ struct sci_port_configuration_agent *port_agent,
+ u32 timeout)
+{
+ if (port_agent->timer_pending)
+ sci_del_timer(&port_agent->timer);
+
+ port_agent->timer_pending = true;
+ sci_mod_timer(&port_agent->timer, timeout);
+}
+
static void sci_apc_agent_configure_ports(struct isci_host *ihost,
struct sci_port_configuration_agent *port_agent,
struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
break;
case SCIC_SDS_APC_START_TIMER:
- /*
- * This can occur for either a link down event, or a link
- * up event where we cannot yet tell the port to which a
- * phy belongs.
- */
- if (port_agent->timer_pending)
- sci_del_timer(&port_agent->timer);
-
- port_agent->timer_pending = true;
- sci_mod_timer(&port_agent->timer,
- SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
break;
case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
if (!iport) {
/* the phy is not the part of this port */
port_agent->phy_ready_mask |= 1 << phy_index;
- sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+ sci_apc_agent_start_timer(port_agent,
+ SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
u32 port_state = iport->sm.current_state_id;
diff --git a/drivers/scsi/isci/probe_roms.c b/drivers/scsi/isci/probe_roms.c
index b5f4341..9b8117b 100644
--- a/drivers/scsi/isci/probe_roms.c
+++ b/drivers/scsi/isci/probe_roms.c
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
memcpy(orom, fw->data, fw->size);
- if (is_c0(pdev))
+ if (is_c0(pdev) || is_c1(pdev))
goto out;
/*
diff --git a/drivers/scsi/isci/probe_roms.h b/drivers/scsi/isci/probe_roms.h
index 2c75248..bb0e9d4 100644
--- a/drivers/scsi/isci/probe_roms.h
+++ b/drivers/scsi/isci/probe_roms.h
@@ -152,7 +152,7 @@ struct sci_user_parameters {
#define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
struct sci_oem_params;
-int sci_oem_parameters_validate(struct sci_oem_params *oem);
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
struct isci_orom;
struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
0x1a, 0x04, 0xc6)
#define ISCI_EFI_VAR_NAME "RstScuO"
+#define ISCI_ROM_VER_1_0 0x10
+#define ISCI_ROM_VER_1_1 0x11
+#define ISCI_ROM_VER_1_3 0x13
+#define ISCI_ROM_VER_LATEST ISCI_ROM_VER_1_3
+
/* Allowed PORT configuration modes APC Automatic PORT configuration mode is
* defined by the OEM configuration parameters providing no PHY_MASK parameters
* for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
struct {
uint8_t mode_type;
uint8_t max_concurr_spin_up;
- uint8_t do_enable_ssc;
- uint8_t reserved;
+ /*
+ * This bitfield indicates the OEM's desired default Tx
+ * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+ * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+ */
+ union {
+ struct {
+ /*
+ * NOTE: Max spread for SATA is +0 / -5000 PPM.
+ * Down-spreading SSC (only method allowed for SATA):
+ * SATA SSC Tx Disabled = 0x0
+ * SATA SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SATA SSC Tx at +0 / -2129 PPM Spread = 0x3
+ * SATA SSC Tx at +0 / -4257 PPM Spread = 0x6
+ * SATA SSC Tx at +0 / -4967 PPM Spread = 0x7
+ */
+ uint8_t ssc_sata_tx_spread_level:4;
+ /*
+ * SAS SSC Tx Disabled = 0x0
+ *
+ * NOTE: Max spread for SAS down-spreading +0 /
+ * -2300 PPM
+ * Down-spreading SSC:
+ * SAS SSC Tx at +0 / -1419 PPM Spread = 0x2
+ * SAS SSC Tx at +0 / -2129 PPM Spread = 0x3
+ *
+ * NOTE: Max spread for SAS center-spreading +2300 /
+ * -2300 PPM
+ * Center-spreading SSC:
+ * SAS SSC Tx at +1064 / -1064 PPM Spread = 0x3
+ * SAS SSC Tx at +2129 / -2129 PPM Spread = 0x6
+ */
+ uint8_t ssc_sas_tx_spread_level:3;
+ /*
+ * NOTE: Refer to the SSC section of the SAS 2.x
+ * Specification for proper setting of this field.
+ * For standard SAS Initiator SAS PHY operation it
+ * should be 0 for Down-spreading.
+ * SAS SSC Tx spread type:
+ * Down-spreading SSC = 0
+ * Center-spreading SSC = 1
+ */
+ uint8_t ssc_sas_tx_type:1;
+ };
+ uint8_t do_enable_ssc;
+ };
+ /*
+ * This field indicates length of the SAS/SATA cable between
+ * host and device.
+ * This field is used make relationship between analog
+ * parameters of the phy in the silicon and length of the cable.
+ * Supported cable attenuation levels:
+ * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+ * 6m.
+ *
+ * This is bit mask field:
+ *
+ * BIT: (MSB) 7 6 5 4
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Medium cable
+ * length assignment
+ * BIT: 3 2 1 0 (LSB)
+ * ASSIGNMENT: <phy3><phy2><phy1><phy0> - Long cable length
+ * assignment
+ *
+ * BITS 7-4 are set when the cable length is assigned to medium
+ * BITS 3-0 are set when the cable length is assigned to long
+ *
+ * The BIT positions are clear when the cable length is
+ * assigned to short.
+ *
+ * Setting the bits for both long and medium cable length is
+ * undefined.
+ *
+ * A value of 0x84 would assign
+ * phy3 - medium
+ * phy2 - long
+ * phy1 - short
+ * phy0 - short
+ */
+ uint8_t cable_selection_mask;
} controller;
struct {
diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c
index b207cd3..dd74b6c 100644
--- a/drivers/scsi/isci/remote_device.c
+++ b/drivers/scsi/isci/remote_device.c
@@ -53,6 +53,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scsi/sas.h>
+#include <linux/bitops.h>
#include "isci.h"
#include "port.h"
#include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
struct isci_remote_device *idev)
{
enum sci_status status;
+ struct sci_port_properties properties;
struct domain_device *dev = idev->domain_dev;
sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
* entries will be needed to store the remote node.
*/
idev->is_direct_attached = true;
+
+ sci_port_get_properties(iport, &properties);
+ /* Get accurate port width from port's phy mask for a DA device. */
+ idev->device_port_width = hweight32(properties.phy_mask);
+
status = sci_controller_allocate_remote_node_context(iport->owning_controller,
idev,
&idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
idev->connection_rate = sci_port_get_max_allowed_speed(iport);
- /* / @todo Should I assign the port width by reading all of the phys on the port? */
- idev->device_port_width = 1;
-
return SCI_SUCCESS;
}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 66ad3dc..f5a3f7d 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
}
}
- isci_print_tmf(tmf);
+ isci_print_tmf(ihost, tmf);
if (tmf->status == SCI_SUCCESS)
ret = TMF_RESP_FUNC_COMPLETE;
diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h
index bc78c0a..1b27b37 100644
--- a/drivers/scsi/isci/task.h
+++ b/drivers/scsi/isci/task.h
@@ -106,7 +106,6 @@ struct isci_tmf {
} resp;
unsigned char lun[8];
u16 io_tag;
- struct isci_remote_device *device;
enum isci_tmf_function_codes tmf_code;
int status;
@@ -120,10 +119,10 @@ struct isci_tmf {
};
-static inline void isci_print_tmf(struct isci_tmf *tmf)
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
{
if (SAS_PROTOCOL_SATA == tmf->proto)
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.d2h_fis.status = %x\n"
"tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
tmf->resp.d2h_fis.status,
tmf->resp.d2h_fis.error);
else
- dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+ dev_dbg(&ihost->pdev->dev,
"%s: status = %x\n"
"tmf->resp.resp_iu.data_present = %x\n"
"tmf->resp.resp_iu.status = %x\n"
diff --git a/drivers/scsi/iscsi_boot_sysfs.c b/drivers/scsi/iscsi_boot_sysfs.c
index 89700cb..14c1c8f 100644
--- a/drivers/scsi/iscsi_boot_sysfs.c
+++ b/drivers/scsi/iscsi_boot_sysfs.c
@@ -112,7 +112,7 @@ static struct attribute *target_attrs[] = {
NULL
};
-static mode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_tgt_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -193,7 +193,7 @@ static struct attribute *ethernet_attrs[] = {
NULL
};
-static mode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_eth_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -265,7 +265,7 @@ static struct attribute *initiator_attrs[] = {
NULL
};
-static mode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_boot_ini_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct iscsi_boot_kobj *boot_kobj =
@@ -306,7 +306,7 @@ iscsi_boot_create_kobj(struct iscsi_boot_kset *boot_kset,
struct attribute_group *attr_group,
const char *name, int index, void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
struct iscsi_boot_kobj *boot_kobj;
@@ -369,7 +369,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset, &iscsi_boot_target_attr_group,
@@ -394,7 +394,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
@@ -420,7 +420,7 @@ struct iscsi_boot_kobj *
iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index,
void *data,
ssize_t (*show) (void *data, int type, char *buf),
- mode_t (*is_visible) (void *data, int type),
+ umode_t (*is_visible) (void *data, int type),
void (*release) (void *data))
{
return iscsi_boot_create_kobj(boot_kset,
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 7c34d8e..db47158 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -873,7 +873,7 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
iscsi_host_free(shost);
}
-static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
+static umode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 08e26d4..27cfb0c 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
*
- * Copyright (C) 2007 Thomas Bogendrfer (tsbogend@alpha.frankende)
+ * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
*/
#include <linux/kernel.h>
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 7269e92..1d1b0c9 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
* Locking Note: This function expects that the lport mutex is locked before
* calling it.
*/
-void fc_disc_stop_rports(struct fc_disc *disc)
+static void fc_disc_stop_rports(struct fc_disc *disc)
{
struct fc_lport *lport;
struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
* fc_disc_stop() - Stop discovery for a given lport
* @lport: The local port that discovery should stop on
*/
-void fc_disc_stop(struct fc_lport *lport)
+static void fc_disc_stop(struct fc_lport *lport)
{
struct fc_disc *disc = &lport->disc;
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
* This function will block until discovery has been
* completely stopped and all rports have been deleted.
*/
-void fc_disc_stop_final(struct fc_lport *lport)
+static void fc_disc_stop_final(struct fc_lport *lport)
{
fc_disc_stop(lport);
lport->tt.rport_flush_queue();
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index fb9161d..e17a28d 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -28,6 +28,7 @@
#include <scsi/fc/fc_els.h>
#include <scsi/libfc.h>
#include <scsi/fc_encode.h>
+#include "fc_libfc.h"
/**
* fc_elsct_send() - Send an ELS or CT frame
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 9de9db2..4d70d96 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -91,7 +91,7 @@ struct fc_exch_pool {
* It manages the allocation of exchange IDs.
*/
struct fc_exch_mgr {
- struct fc_exch_pool *pool;
+ struct fc_exch_pool __percpu *pool;
mempool_t *ep_pool;
enum fc_class class;
struct kref kref;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index 221875e..f607314 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
fsp->xfer_ddp = FC_XID_UNKNOWN;
atomic_set(&fsp->ref_cnt, 1);
init_timer(&fsp->timer);
+ fsp->timer.data = (unsigned long)fsp;
INIT_LIST_HEAD(&fsp->list);
spin_lock_init(&fsp->scsi_pkt_lock);
}
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
}
put_cpu();
- init_timer(&fsp->timer);
- fsp->timer.data = (unsigned long)fsp;
-
/*
* send it to the lower layer
* if we get -1 return then put the request in the pending
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index e77094a..83750eb 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
* @lport: The local port receiving the event
* @event: The discovery event
*/
-void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+static void fc_lport_disc_callback(struct fc_lport *lport,
+ enum fc_disc_event event)
{
switch (event) {
case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
* Locking Note: The lport lock is expected to be held before calling
* this routine.
*/
-void fc_lport_enter_flogi(struct fc_lport *lport)
+static void fc_lport_enter_flogi(struct fc_lport *lport)
{
struct fc_frame *fp;
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9e4348..83aa1ef 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
* If it appears we are already logged in, ADISC is used to verify
* the setup.
*/
-int fc_rport_login(struct fc_rport_priv *rdata)
+static int fc_rport_login(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
* function will hold the rport lock, call an _enter_*
* function and then unlock the rport.
*/
-int fc_rport_logoff(struct fc_rport_priv *rdata)
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
{
mutex_lock(&rdata->rp_mutex);
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
* @fp: The FLOGI response frame
* @rp_arg: The remote port that received the FLOGI response
*/
-void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
- void *rp_arg)
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+ void *rp_arg)
{
struct fc_rport_priv *rdata = rp_arg;
struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ reject:
*
* Locking Note: Called with the lport lock held.
*/
-void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
{
struct fc_seq_els_data els_data;
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index bb4c8e0..825f930 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -247,18 +247,6 @@ struct lpfc_stats {
uint32_t fcpLocalErr;
};
-enum sysfs_mbox_state {
- SMBOX_IDLE,
- SMBOX_WRITING,
- SMBOX_READING
-};
-
-struct lpfc_sysfs_mbox {
- enum sysfs_mbox_state state;
- size_t offset;
- struct lpfcMboxq * mbox;
-};
-
struct lpfc_hba;
@@ -783,8 +771,6 @@ struct lpfc_hba {
uint64_t bg_apptag_err_cnt;
uint64_t bg_reftag_err_cnt;
- struct lpfc_sysfs_mbox sysfs_mbox;
-
/* fastpath list. */
spinlock_t scsi_buf_list_lock;
struct list_head lpfc_scsi_buf_list;
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index d0ebaeb..f6697cb 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -351,10 +351,23 @@ lpfc_fwrev_show(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
+ uint32_t if_type;
+ uint8_t sli_family;
char fwrev[32];
+ int len;
lpfc_decode_firmware_rev(phba, fwrev, 1);
- return snprintf(buf, PAGE_SIZE, "%s, sli-%d\n", fwrev, phba->sli_rev);
+ if_type = phba->sli4_hba.pc_sli4_params.if_type;
+ sli_family = phba->sli4_hba.pc_sli4_params.sli_family;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d\n",
+ fwrev, phba->sli_rev);
+ else
+ len = snprintf(buf, PAGE_SIZE, "%s, sli-%d:%d:%x\n",
+ fwrev, phba->sli_rev, if_type, sli_family);
+
+ return len;
}
/**
@@ -488,6 +501,34 @@ lpfc_link_state_show(struct device *dev, struct device_attribute *attr,
}
/**
+ * lpfc_sli4_protocol_show - Return the fip mode of the HBA
+ * @dev: class unused variable.
+ * @attr: device attribute, not used.
+ * @buf: on return contains the module description text.
+ *
+ * Returns: size of formatted string.
+ **/
+static ssize_t
+lpfc_sli4_protocol_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL) {
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_GE)
+ return snprintf(buf, PAGE_SIZE, "fcoe\n");
+ if (phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC)
+ return snprintf(buf, PAGE_SIZE, "fc\n");
+ }
+ return snprintf(buf, PAGE_SIZE, "unknown\n");
+}
+
+/**
* lpfc_link_state_store - Transition the link_state on an HBA port
* @dev: class device that is converted into a Scsi_host.
* @attr: device attribute, not used.
@@ -773,7 +814,12 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr,
* the readyness after performing a firmware reset.
*
* Returns:
- * zero for success
+ * zero for success, -EPERM when port does not have privilage to perform the
+ * reset, -EIO when port timeout from recovering from the reset.
+ *
+ * Note:
+ * As the caller will interpret the return code by value, be careful in making
+ * change or addition to return codes.
**/
int
lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba)
@@ -826,9 +872,11 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
{
struct completion online_compl;
struct pci_dev *pdev = phba->pcidev;
+ uint32_t before_fc_flag;
+ uint32_t sriov_nr_virtfn;
uint32_t reg_val;
- int status = 0;
- int rc;
+ int status = 0, rc = 0;
+ int job_posted = 1, sriov_err;
if (!phba->cfg_enable_hba_reset)
return -EACCES;
@@ -838,6 +886,10 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
LPFC_SLI_INTF_IF_TYPE_2))
return -EPERM;
+ /* Keep state if we need to restore back */
+ before_fc_flag = phba->pport->fc_flag;
+ sriov_nr_virtfn = phba->cfg_sriov_nr_virtfn;
+
/* Disable SR-IOV virtual functions if enabled */
if (phba->cfg_sriov_nr_virtfn) {
pci_disable_sriov(pdev);
@@ -869,21 +921,44 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode)
/* delay driver action following IF_TYPE_2 reset */
rc = lpfc_sli4_pdev_status_reg_wait(phba);
- if (rc)
+ if (rc == -EPERM) {
+ /* no privilage for reset, restore if needed */
+ if (before_fc_flag & FC_OFFLINE_MODE)
+ goto out;
+ } else if (rc == -EIO) {
+ /* reset failed, there is nothing more we can do */
return rc;
+ }
+
+ /* keep the original port state */
+ if (before_fc_flag & FC_OFFLINE_MODE)
+ goto out;
init_completion(&online_compl);
- rc = lpfc_workq_post_event(phba, &status, &online_compl,
- LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
+ job_posted = lpfc_workq_post_event(phba, &status, &online_compl,
+ LPFC_EVT_ONLINE);
+ if (!job_posted)
+ goto out;
wait_for_completion(&online_compl);
- if (status != 0)
- return -EIO;
+out:
+ /* in any case, restore the virtual functions enabled as before */
+ if (sriov_nr_virtfn) {
+ sriov_err =
+ lpfc_sli_probe_sriov_nr_virtfn(phba, sriov_nr_virtfn);
+ if (!sriov_err)
+ phba->cfg_sriov_nr_virtfn = sriov_nr_virtfn;
+ }
- return 0;
+ /* return proper error code */
+ if (!rc) {
+ if (!job_posted)
+ rc = -ENOMEM;
+ else if (status)
+ rc = -EIO;
+ }
+ return rc;
}
/**
@@ -955,33 +1030,38 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
struct lpfc_hba *phba = vport->phba;
struct completion online_compl;
- int status=0;
+ char *board_mode_str = NULL;
+ int status = 0;
int rc;
- if (!phba->cfg_enable_hba_reset)
- return -EACCES;
+ if (!phba->cfg_enable_hba_reset) {
+ status = -EACCES;
+ goto board_mode_out;
+ }
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "3050 lpfc_board_mode set to %s\n", buf);
+ "3050 lpfc_board_mode set to %s\n", buf);
init_completion(&online_compl);
if(strncmp(buf, "online", sizeof("online") - 1) == 0) {
rc = lpfc_workq_post_event(phba, &status, &online_compl,
LPFC_EVT_ONLINE);
- if (rc == 0)
- return -ENOMEM;
+ if (rc == 0) {
+ status = -ENOMEM;
+ goto board_mode_out;
+ }
wait_for_completion(&online_compl);
} else if (strncmp(buf, "offline", sizeof("offline") - 1) == 0)
status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE);
else if (strncmp(buf, "warm", sizeof("warm") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
- return -EINVAL;
+ status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_WARM_START);
else if (strncmp(buf, "error", sizeof("error") - 1) == 0)
if (phba->sli_rev == LPFC_SLI_REV4)
- return -EINVAL;
+ status = -EINVAL;
else
status = lpfc_do_offline(phba, LPFC_EVT_KILL);
else if (strncmp(buf, "dump", sizeof("dump") - 1) == 0)
@@ -991,12 +1071,21 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
else if (strncmp(buf, "dv_reset", sizeof("dv_reset") - 1) == 0)
status = lpfc_sli4_pdev_reg_request(phba, LPFC_DV_RESET);
else
- return -EINVAL;
+ status = -EINVAL;
+board_mode_out:
if (!status)
return strlen(buf);
- else
+ else {
+ board_mode_str = strchr(buf, '\n');
+ if (board_mode_str)
+ *board_mode_str = '\0';
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3097 Failed \"%s\", status(%d), "
+ "fc_flag(x%x)\n",
+ buf, status, phba->pport->fc_flag);
return status;
+ }
}
/**
@@ -1942,6 +2031,7 @@ static DEVICE_ATTR(lpfc_fips_rev, S_IRUGO, lpfc_fips_rev_show, NULL);
static DEVICE_ATTR(lpfc_dss, S_IRUGO, lpfc_dss_show, NULL);
static DEVICE_ATTR(lpfc_sriov_hw_max_virtfn, S_IRUGO,
lpfc_sriov_hw_max_virtfn_show, NULL);
+static DEVICE_ATTR(protocol, S_IRUGO, lpfc_sli4_protocol_show, NULL);
static char *lpfc_soft_wwn_key = "C99G71SL8032A";
@@ -2687,6 +2777,14 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr,
if (val >= 0 && val <= 6) {
prev_val = phba->cfg_topology;
phba->cfg_topology = val;
+ if (phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G &&
+ val == 4) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
+ "3113 Loop mode not supported at speed %d\n",
+ phba->cfg_link_speed);
+ phba->cfg_topology = prev_val;
+ return -EINVAL;
+ }
if (nolip)
return strlen(buf);
@@ -3132,6 +3230,14 @@ lpfc_link_speed_store(struct device *dev, struct device_attribute *attr,
val);
return -EINVAL;
}
+ if (val == LPFC_USER_LINK_SPEED_16G &&
+ phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3112 lpfc_link_speed attribute cannot be set "
+ "to %d. Speed is not supported in loop mode.\n",
+ val);
+ return -EINVAL;
+ }
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
prev_val = phba->cfg_link_speed;
@@ -3176,6 +3282,13 @@ lpfc_param_show(link_speed)
static int
lpfc_link_speed_init(struct lpfc_hba *phba, int val)
{
+ if (val == LPFC_USER_LINK_SPEED_16G && phba->cfg_topology == 4) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3111 lpfc_link_speed of %d cannot "
+ "support loop mode, setting topology to default.\n",
+ val);
+ phba->cfg_topology = 0;
+ }
if ((val >= 0) && (val <= LPFC_USER_LINK_SPEED_MAX) &&
(LPFC_USER_LINK_SPEED_BITMAP & (1 << val))) {
phba->cfg_link_speed = val;
@@ -3830,6 +3943,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_fips_rev,
&dev_attr_lpfc_dss,
&dev_attr_lpfc_sriov_hw_max_virtfn,
+ &dev_attr_protocol,
NULL,
};
@@ -3988,23 +4102,6 @@ static struct bin_attribute sysfs_ctlreg_attr = {
};
/**
- * sysfs_mbox_idle - frees the sysfs mailbox
- * @phba: lpfc_hba pointer
- **/
-static void
-sysfs_mbox_idle(struct lpfc_hba *phba)
-{
- phba->sysfs_mbox.state = SMBOX_IDLE;
- phba->sysfs_mbox.offset = 0;
-
- if (phba->sysfs_mbox.mbox) {
- mempool_free(phba->sysfs_mbox.mbox,
- phba->mbox_mem_pool);
- phba->sysfs_mbox.mbox = NULL;
- }
-}
-
-/**
* sysfs_mbox_write - Write method for writing information via mbox
* @filp: open sysfs file
* @kobj: kernel kobject that contains the kernel class device.
@@ -4014,71 +4111,18 @@ sysfs_mbox_idle(struct lpfc_hba *phba)
* @count: bytes to transfer.
*
* Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to send buf contents to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
*
* Returns:
- * -ERANGE off and count combo out of range
- * -EINVAL off, count or buff address invalid
- * zero if count is zero
- * -EPERM adapter is offline
- * -ENOMEM failed to allocate memory for the mail box
- * -EAGAIN offset, state or mbox is NULL
- * count number of bytes transferred
+ * -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- struct lpfcMboxq *mbox = NULL;
-
- if ((count + off) > MAILBOX_CMD_SIZE)
- return -ERANGE;
-
- if (off % 4 || count % 4 || (unsigned long)buf % 4)
- return -EINVAL;
-
- if (count == 0)
- return 0;
-
- if (off == 0) {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
- return -ENOMEM;
- memset(mbox, 0, sizeof (LPFC_MBOXQ_t));
- }
-
- spin_lock_irq(&phba->hbalock);
-
- if (off == 0) {
- if (phba->sysfs_mbox.mbox)
- mempool_free(mbox, phba->mbox_mem_pool);
- else
- phba->sysfs_mbox.mbox = mbox;
- phba->sysfs_mbox.state = SMBOX_WRITING;
- } else {
- if (phba->sysfs_mbox.state != SMBOX_WRITING ||
- phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.mbox == NULL) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
- }
-
- memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
- buf, count);
-
- phba->sysfs_mbox.offset = off + count;
-
- spin_unlock_irq(&phba->hbalock);
-
- return count;
+ return -EPERM;
}
/**
@@ -4091,201 +4135,18 @@ sysfs_mbox_write(struct file *filp, struct kobject *kobj,
* @count: bytes to transfer.
*
* Description:
- * Accessed via /sys/class/scsi_host/hostxxx/mbox.
- * Uses the sysfs mbox to receive data from to the adapter.
+ * Deprecated function. All mailbox access from user space is performed via the
+ * bsg interface.
*
* Returns:
- * -ERANGE off greater than mailbox command size
- * -EINVAL off, count or buff address invalid
- * zero if off and count are zero
- * -EACCES adapter over temp
- * -EPERM garbage can value to catch a multitude of errors
- * -EAGAIN management IO not permitted, state or off error
- * -ETIME mailbox timeout
- * -ENODEV mailbox error
- * count number of bytes transferred
+ * -EPERM operation not permitted
**/
static ssize_t
sysfs_mbox_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct Scsi_Host *shost = class_to_shost(dev);
- struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
- struct lpfc_hba *phba = vport->phba;
- LPFC_MBOXQ_t *mboxq;
- MAILBOX_t *pmb;
- uint32_t mbox_tmo;
- int rc;
-
- if (off > MAILBOX_CMD_SIZE)
- return -ERANGE;
-
- if ((count + off) > MAILBOX_CMD_SIZE)
- count = MAILBOX_CMD_SIZE - off;
-
- if (off % 4 || count % 4 || (unsigned long)buf % 4)
- return -EINVAL;
-
- if (off && count == 0)
- return 0;
-
- spin_lock_irq(&phba->hbalock);
-
- if (phba->over_temp_state == HBA_OVER_TEMP) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EACCES;
- }
-
- if (off == 0 &&
- phba->sysfs_mbox.state == SMBOX_WRITING &&
- phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
- mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox;
- pmb = &mboxq->u.mb;
- switch (pmb->mbxCommand) {
- /* Offline only */
- case MBX_INIT_LINK:
- case MBX_DOWN_LINK:
- case MBX_CONFIG_LINK:
- case MBX_CONFIG_RING:
- case MBX_RESET_RING:
- case MBX_UNREG_LOGIN:
- case MBX_CLEAR_LA:
- case MBX_DUMP_CONTEXT:
- case MBX_RUN_DIAGS:
- case MBX_RESTART:
- case MBX_SET_MASK:
- case MBX_SET_DEBUG:
- if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
- printk(KERN_WARNING "mbox_read:Command 0x%x "
- "is illegal in on-line state\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
- case MBX_WRITE_NV:
- case MBX_WRITE_VPARMS:
- case MBX_LOAD_SM:
- case MBX_READ_NV:
- case MBX_READ_CONFIG:
- case MBX_READ_RCONFIG:
- case MBX_READ_STATUS:
- case MBX_READ_XRI:
- case MBX_READ_REV:
- case MBX_READ_LNK_STAT:
- case MBX_DUMP_MEMORY:
- case MBX_DOWN_LOAD:
- case MBX_UPDATE_CFG:
- case MBX_KILL_BOARD:
- case MBX_LOAD_AREA:
- case MBX_LOAD_EXP_ROM:
- case MBX_BEACON:
- case MBX_DEL_LD_ENTRY:
- case MBX_SET_VARIABLE:
- case MBX_WRITE_WWN:
- case MBX_PORT_CAPABILITIES:
- case MBX_PORT_IOV_CONTROL:
- break;
- case MBX_SECURITY_MGMT:
- case MBX_AUTH_PORT:
- if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
- printk(KERN_WARNING "mbox_read:Command 0x%x "
- "is not permitted\n", pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
- break;
- case MBX_READ_SPARM64:
- case MBX_READ_TOPOLOGY:
- case MBX_REG_LOGIN:
- case MBX_REG_LOGIN64:
- case MBX_CONFIG_PORT:
- case MBX_RUN_BIU_DIAG:
- printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- default:
- printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
- pmb->mbxCommand);
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EPERM;
- }
-
- /* If HBA encountered an error attention, allow only DUMP
- * or RESTART mailbox commands until the HBA is restarted.
- */
- if (phba->pport->stopped &&
- pmb->mbxCommand != MBX_DUMP_MEMORY &&
- pmb->mbxCommand != MBX_RESTART &&
- pmb->mbxCommand != MBX_WRITE_VPARMS &&
- pmb->mbxCommand != MBX_WRITE_WWN)
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "1259 mbox: Issued mailbox cmd "
- "0x%x while in stopped state.\n",
- pmb->mbxCommand);
-
- phba->sysfs_mbox.mbox->vport = vport;
-
- /* Don't allow mailbox commands to be sent when blocked
- * or when in the middle of discovery
- */
- if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
-
- if ((vport->fc_flag & FC_OFFLINE_MODE) ||
- (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
-
- spin_unlock_irq(&phba->hbalock);
- rc = lpfc_sli_issue_mbox (phba,
- phba->sysfs_mbox.mbox,
- MBX_POLL);
- spin_lock_irq(&phba->hbalock);
-
- } else {
- spin_unlock_irq(&phba->hbalock);
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- spin_lock_irq(&phba->hbalock);
- }
-
- if (rc != MBX_SUCCESS) {
- if (rc == MBX_TIMEOUT) {
- phba->sysfs_mbox.mbox = NULL;
- }
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
- }
- phba->sysfs_mbox.state = SMBOX_READING;
- }
- else if (phba->sysfs_mbox.offset != off ||
- phba->sysfs_mbox.state != SMBOX_READING) {
- printk(KERN_WARNING "mbox_read: Bad State\n");
- sysfs_mbox_idle(phba);
- spin_unlock_irq(&phba->hbalock);
- return -EAGAIN;
- }
-
- memcpy(buf, (uint8_t *) &pmb + off, count);
-
- phba->sysfs_mbox.offset = off + count;
-
- if (phba->sysfs_mbox.offset == MAILBOX_CMD_SIZE)
- sysfs_mbox_idle(phba);
-
- spin_unlock_irq(&phba->hbalock);
-
- return count;
+ return -EPERM;
}
static struct bin_attribute sysfs_mbox_attr = {
@@ -4429,8 +4290,13 @@ lpfc_get_host_port_state(struct Scsi_Host *shost)
case LPFC_LINK_UP:
case LPFC_CLEAR_LA:
case LPFC_HBA_READY:
- /* Links up, beyond this port_type reports state */
- fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ /* Links up, reports port state accordingly */
+ if (vport->port_state < LPFC_VPORT_READY)
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_BYPASSED;
+ else
+ fc_host_port_state(shost) =
+ FC_PORTSTATE_ONLINE;
break;
case LPFC_HBA_ERROR:
fc_host_port_state(shost) = FC_PORTSTATE_ERROR;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 6760c69..56a86ba 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -916,9 +916,11 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
} else {
switch (cmd) {
case ELX_LOOPBACK_DATA:
- diag_cmd_data_free(phba,
- (struct lpfc_dmabufext *)
- dmabuf);
+ if (phba->sli_rev <
+ LPFC_SLI_REV4)
+ diag_cmd_data_free(phba,
+ (struct lpfc_dmabufext
+ *)dmabuf);
break;
case ELX_LOOPBACK_XRI_SETUP:
if ((phba->sli_rev ==
@@ -1000,7 +1002,8 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
error_ct_unsol_exit:
if (!list_empty(&head))
list_del(&head);
- if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+ if ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (evt_req_id == SLI_CT_ELX_LOOPBACK))
return 0;
return 1;
}
@@ -1566,7 +1569,7 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
struct diag_mode_set *loopback_mode;
uint32_t link_flags;
uint32_t timeout;
- LPFC_MBOXQ_t *pmboxq;
+ LPFC_MBOXQ_t *pmboxq = NULL;
int mbxstatus = MBX_SUCCESS;
int i = 0;
int rc = 0;
@@ -1615,7 +1618,6 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
rc = -ETIMEDOUT;
goto loopback_mode_exit;
}
-
msleep(10);
}
@@ -1635,7 +1637,9 @@ lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
rc = -ENODEV;
else {
+ spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
/* wait for the link attention interrupt */
msleep(100);
@@ -1659,7 +1663,7 @@ loopback_mode_exit:
/*
* Let SLI layer release mboxq if mbox command completed after timeout.
*/
- if (mbxstatus != MBX_TIMEOUT)
+ if (pmboxq && mbxstatus != MBX_TIMEOUT)
mempool_free(pmboxq, phba->mbox_mem_pool);
job_error:
@@ -1700,11 +1704,16 @@ lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
rc = -ENOMEM;
goto link_diag_state_set_out;
}
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
+ diag, phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+
link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
- phba->sli4_hba.link_state.number);
+ phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
- phba->sli4_hba.link_state.type);
+ phba->sli4_hba.lnk_info.lnk_tp);
if (diag)
bf_set(lpfc_mbx_set_diag_state_diag,
&link_diag_state->u.req, 1);
@@ -1727,6 +1736,79 @@ link_diag_state_set_out:
}
/**
+ * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is responsible for issuing a sli4 mailbox command for setting
+ * up internal loopback diagnostic.
+ */
+static int
+lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ uint32_t req_len, alloc_len;
+ struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
+ int mbxstatus = MBX_SUCCESS, rc = 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+ req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+ LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
+ req_len, LPFC_SLI4_MBX_EMBED);
+ if (alloc_len != req_len) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return -ENOMEM;
+ }
+ link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
+ bf_set(lpfc_mbx_set_diag_state_link_num,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
+ bf_set(lpfc_mbx_set_diag_state_link_type,
+ &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
+ bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
+ LPFC_DIAG_LOOPBACK_TYPE_SERDES);
+
+ mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
+ if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3127 Failed setup loopback mode mailbox "
+ "command, rc:x%x, status:x%x\n", mbxstatus,
+ pmboxq->u.mb.mbxStatus);
+ rc = -ENODEV;
+ }
+ if (pmboxq && (mbxstatus != MBX_TIMEOUT))
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return rc;
+}
+
+/**
+ * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
+ * @phba: Pointer to HBA context object.
+ *
+ * This function set up SLI4 FC port registrations for diagnostic run, which
+ * includes all the rpis, vfi, and also vpi.
+ */
+static int
+lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
+{
+ int rc;
+
+ if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3136 Port still had vfi registered: "
+ "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
+ phba->pport->fc_myDID, phba->fcf.fcfi,
+ phba->sli4_hba.vfi_ids[phba->pport->vfi],
+ phba->vpi_ids[phba->pport->vpi]);
+ return -EINVAL;
+ }
+ rc = lpfc_issue_reg_vfi(phba->pport);
+ return rc;
+}
+
+/**
* lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
* @phba: Pointer to HBA context object.
* @job: LPFC_BSG_VENDOR_DIAG_MODE
@@ -1738,10 +1820,8 @@ static int
lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
{
struct diag_mode_set *loopback_mode;
- uint32_t link_flags, timeout, req_len, alloc_len;
- struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
- LPFC_MBOXQ_t *pmboxq = NULL;
- int mbxstatus = MBX_SUCCESS, i, rc = 0;
+ uint32_t link_flags, timeout;
+ int i, rc = 0;
/* no data to return just the return code */
job->reply->reply_payload_rcv_len = 0;
@@ -1762,65 +1842,100 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
if (rc)
goto job_error;
+ /* indicate we are in loobpack diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag |= LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+
+ /* reset port to start frome scratch */
+ rc = lpfc_selective_reset(phba);
+ if (rc)
+ goto job_error;
+
/* bring the link to diagnostic mode */
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3129 Bring link to diagnostic state.\n");
loopback_mode = (struct diag_mode_set *)
job->request->rqst_data.h_vendor.vendor_cmd;
link_flags = loopback_mode->type;
timeout = loopback_mode->timeout * 100;
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
- if (rc)
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3130 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
goto loopback_mode_exit;
+ }
/* wait for link down before proceeding */
i = 0;
while (phba->link_state != LPFC_LINK_DOWN) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3131 Timeout waiting for link to "
+ "diagnostic mode, timeout:%d ms\n",
+ timeout * 10);
goto loopback_mode_exit;
}
msleep(10);
}
+
/* set up loopback mode */
- pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!pmboxq) {
- rc = -ENOMEM;
- goto loopback_mode_exit;
- }
- req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
- alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
- LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
- req_len, LPFC_SLI4_MBX_EMBED);
- if (alloc_len != req_len) {
- rc = -ENOMEM;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3132 Set up loopback mode:x%x\n", link_flags);
+
+ if (link_flags == INTERNAL_LOOP_BACK)
+ rc = lpfc_sli4_bsg_set_internal_loopback(phba);
+ else if (link_flags == EXTERNAL_LOOP_BACK)
+ rc = lpfc_hba_init_link_fc_topology(phba,
+ FLAGS_TOPOLOGY_MODE_PT_PT,
+ MBX_NOWAIT);
+ else {
+ rc = -EINVAL;
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3141 Loopback mode:x%x not supported\n",
+ link_flags);
goto loopback_mode_exit;
}
- link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
- bf_set(lpfc_mbx_set_diag_state_link_num,
- &link_diag_loopback->u.req, phba->sli4_hba.link_state.number);
- bf_set(lpfc_mbx_set_diag_state_link_type,
- &link_diag_loopback->u.req, phba->sli4_hba.link_state.type);
- if (link_flags == INTERNAL_LOOP_BACK)
- bf_set(lpfc_mbx_set_diag_lpbk_type,
- &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
- else
- bf_set(lpfc_mbx_set_diag_lpbk_type,
- &link_diag_loopback->u.req,
- LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL);
- mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
- if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
- rc = -ENODEV;
- else {
- phba->link_flag |= LS_LOOPBACK_MODE;
+ if (!rc) {
/* wait for the link attention interrupt */
msleep(100);
i = 0;
+ while (phba->link_state < LPFC_LINK_UP) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3137 Timeout waiting for link up "
+ "in loopback mode, timeout:%d ms\n",
+ timeout * 10);
+ break;
+ }
+ msleep(10);
+ }
+ }
+
+ /* port resource registration setup for loopback diagnostic */
+ if (!rc) {
+ /* set up a none zero myDID for loopback test */
+ phba->pport->fc_myDID = 1;
+ rc = lpfc_sli4_diag_fcport_reg_setup(phba);
+ } else
+ goto loopback_mode_exit;
+
+ if (!rc) {
+ /* wait for the port ready */
+ msleep(100);
+ i = 0;
while (phba->link_state != LPFC_HBA_READY) {
if (i++ > timeout) {
rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3133 Timeout waiting for port "
+ "loopback mode ready, timeout:%d ms\n",
+ timeout * 10);
break;
}
msleep(10);
@@ -1828,14 +1943,14 @@ lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
}
loopback_mode_exit:
+ /* clear loopback diagnostic mode */
+ if (rc) {
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ }
lpfc_bsg_diag_mode_exit(phba);
- /*
- * Let SLI layer release mboxq if mbox command completed after timeout.
- */
- if (pmboxq && (mbxstatus != MBX_TIMEOUT))
- mempool_free(pmboxq, phba->mbox_mem_pool);
-
job_error:
/* make error code available to userspace */
job->reply->result = rc;
@@ -1879,7 +1994,6 @@ lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
rc = -ENODEV;
return rc;
-
}
/**
@@ -1895,7 +2009,9 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
struct Scsi_Host *shost;
struct lpfc_vport *vport;
struct lpfc_hba *phba;
- int rc;
+ struct diag_mode_set *loopback_mode_end_cmd;
+ uint32_t timeout;
+ int rc, i;
shost = job->shost;
if (!shost)
@@ -1913,11 +2029,47 @@ lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
LPFC_SLI_INTF_IF_TYPE_2)
return -ENODEV;
+ /* clear loopback diagnostic mode */
+ spin_lock_irq(&phba->hbalock);
+ phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
+ loopback_mode_end_cmd = (struct diag_mode_set *)
+ job->request->rqst_data.h_vendor.vendor_cmd;
+ timeout = loopback_mode_end_cmd->timeout * 100;
+
rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+ "3139 Failed to bring link to diagnostic "
+ "state, rc:x%x\n", rc);
+ goto loopback_mode_end_exit;
+ }
- if (!rc)
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ /* wait for link down before proceeding */
+ i = 0;
+ while (phba->link_state != LPFC_LINK_DOWN) {
+ if (i++ > timeout) {
+ rc = -ETIMEDOUT;
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3140 Timeout waiting for link to "
+ "diagnostic mode_end, timeout:%d ms\n",
+ timeout * 10);
+ /* there is nothing much we can do here */
+ break;
+ }
+ msleep(10);
+ }
+
+ /* reset port resource registrations */
+ rc = lpfc_selective_reset(phba);
+ phba->pport->fc_myDID = 0;
+loopback_mode_end_exit:
+ /* make return code available to userspace */
+ job->reply->result = rc;
+ /* complete the job back to userspace if no error */
+ if (rc == 0)
+ job->job_done(job);
return rc;
}
@@ -2012,9 +2164,9 @@ lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
}
run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
- phba->sli4_hba.link_state.number);
+ phba->sli4_hba.lnk_info.lnk_no);
bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
- phba->sli4_hba.link_state.type);
+ phba->sli4_hba.lnk_info.lnk_tp);
bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
link_diag_test_cmd->test_id);
bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
@@ -2091,10 +2243,18 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
if (!mbox)
return -ENOMEM;
- if (phba->sli_rev == LPFC_SLI_REV4)
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ else {
*rpi = lpfc_sli4_alloc_rpi(phba);
- status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
- (uint8_t *)&phba->pport->fc_sparam, mbox, *rpi);
+ status = lpfc_reg_rpi(phba, phba->pport->vpi,
+ phba->pport->fc_myDID,
+ (uint8_t *)&phba->pport->fc_sparam,
+ mbox, *rpi);
+ }
+
if (status) {
mempool_free(mbox, phba->mbox_mem_pool);
if (phba->sli_rev == LPFC_SLI_REV4)
@@ -2117,7 +2277,8 @@ static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
return -ENODEV;
}
- *rpi = mbox->u.mb.un.varWords[0];
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ *rpi = mbox->u.mb.un.varWords[0];
lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
kfree(dmabuff);
@@ -2142,7 +2303,12 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
if (mbox == NULL)
return -ENOMEM;
- lpfc_unreg_login(phba, 0, rpi, mbox);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_unreg_login(phba, 0, rpi, mbox);
+ else
+ lpfc_unreg_login(phba, phba->pport->vpi,
+ phba->sli4_hba.rpi_ids[rpi], mbox);
+
status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
@@ -2630,15 +2796,15 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
uint32_t full_size;
size_t segment_len = 0, segment_offset = 0, current_offset = 0;
uint16_t rpi = 0;
- struct lpfc_iocbq *cmdiocbq, *rspiocbq;
- IOCB_t *cmd, *rsp;
+ struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
+ IOCB_t *cmd, *rsp = NULL;
struct lpfc_sli_ct_request *ctreq;
struct lpfc_dmabuf *txbmp;
struct ulp_bde64 *txbpl = NULL;
struct lpfc_dmabufext *txbuffer = NULL;
struct list_head head;
struct lpfc_dmabuf *curr;
- uint16_t txxri, rxxri;
+ uint16_t txxri = 0, rxxri;
uint32_t num_bde;
uint8_t *ptr = NULL, *rx_databuf = NULL;
int rc = 0;
@@ -2665,7 +2831,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
rc = -EINVAL;
goto loopback_test_exit;
}
-
diag_mode = (struct diag_mode_test *)
job->request->rqst_data.h_vendor.vendor_cmd;
@@ -2720,18 +2885,19 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
if (rc)
goto loopback_test_exit;
- rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
- if (rc) {
- lpfcdiag_loop_self_unreg(phba, rpi);
- goto loopback_test_exit;
- }
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
- rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
- if (rc) {
- lpfcdiag_loop_self_unreg(phba, rpi);
- goto loopback_test_exit;
+ rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
+ if (rc) {
+ lpfcdiag_loop_self_unreg(phba, rpi);
+ goto loopback_test_exit;
+ }
}
-
evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
SLI_CT_ELX_LOOPBACK);
if (!evt) {
@@ -2746,7 +2912,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
cmdiocbq = lpfc_sli_get_iocbq(phba);
- rspiocbq = lpfc_sli_get_iocbq(phba);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rspiocbq = lpfc_sli_get_iocbq(phba);
txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
if (txbmp) {
@@ -2759,14 +2926,18 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
}
}
- if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
- !txbmp->virt) {
+ if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
+ rc = -ENOMEM;
+ goto err_loopback_test_exit;
+ }
+ if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
rc = -ENOMEM;
goto err_loopback_test_exit;
}
cmd = &cmdiocbq->iocb;
- rsp = &rspiocbq->iocb;
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ rsp = &rspiocbq->iocb;
INIT_LIST_HEAD(&head);
list_add_tail(&head, &txbuffer->dma.list);
@@ -2796,7 +2967,6 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
list_del(&head);
/* Build the XMIT_SEQUENCE iocb */
-
num_bde = (uint32_t)txbuffer->flag;
cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
@@ -2813,16 +2983,27 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
cmd->ulpBdeCount = 1;
cmd->ulpLe = 1;
cmd->ulpClass = CLASS3;
- cmd->ulpContext = txxri;
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ cmd->ulpContext = txxri;
+ } else {
+ cmd->un.xseq64.bdl.ulpIoTag32 = 0;
+ cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
+ cmdiocbq->context3 = txbmp;
+ cmdiocbq->sli4_xritag = NO_XRI;
+ cmd->unsli3.rcvsli3.ox_id = 0xffff;
+ }
cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
cmdiocbq->vport = phba->pport;
-
iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
rspiocbq, (phba->fc_ratov * 2) +
LPFC_DRVR_TIMEOUT);
- if ((iocb_stat != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
+ if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
+ (rsp->ulpStatus != IOCB_SUCCESS))) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3126 Failed loopback test issue iocb: "
+ "iocb_stat:x%x\n", iocb_stat);
rc = -EIO;
goto err_loopback_test_exit;
}
@@ -2832,9 +3013,12 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
evt->wq, !list_empty(&evt->events_to_see),
((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
evt->waiting = 0;
- if (list_empty(&evt->events_to_see))
+ if (list_empty(&evt->events_to_see)) {
rc = (time_left) ? -EINTR : -ETIMEDOUT;
- else {
+ lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
+ "3125 Not receiving unsolicited event, "
+ "rc:x%x\n", rc);
+ } else {
spin_lock_irqsave(&phba->ct_ev_lock, flags);
list_move(evt->events_to_see.prev, &evt->events_to_get);
evdat = list_entry(evt->events_to_get.prev,
@@ -2891,7 +3075,7 @@ loopback_test_exit:
job->reply->result = rc;
job->dd_data = NULL;
/* complete the job back to userspace if no error */
- if (rc == 0)
+ if (rc == IOCB_SUCCESS)
job->job_done(job);
return rc;
}
@@ -3078,7 +3262,9 @@ static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
&& (mb->un.varWords[1] == 1)) {
phba->wait_4_mlo_maint_flg = 1;
} else if (mb->un.varWords[0] == SETVAR_MLORST) {
+ spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_LOOPBACK_MODE;
+ spin_unlock_irq(&phba->hbalock);
phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
}
break;
@@ -3140,6 +3326,9 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
unsigned long flags;
uint32_t size;
int rc = 0;
+ struct lpfc_dmabuf *dmabuf;
+ struct lpfc_sli_config_mbox *sli_cfg_mbx;
+ uint8_t *pmbx;
spin_lock_irqsave(&phba->ct_ev_lock, flags);
dd_data = pmboxq->context1;
@@ -3156,7 +3345,19 @@ lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
*/
pmb = (uint8_t *)&pmboxq->u.mb;
pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
+ /* Copy the byte swapped response mailbox back to the user */
memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
+ /* if there is any non-embedded extended data copy that too */
+ dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
+ sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
+ if (!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
+ pmbx = (uint8_t *)dmabuf->virt;
+ /* byte swap the extended data following the mailbox command */
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
+ }
job = dd_data->context_un.mbox.set_job;
if (job) {
@@ -3519,6 +3720,18 @@ lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* state change */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
+ /*
+ * Non-embedded mailbox subcommand data gets byte swapped here because
+ * the lower level driver code only does the first 64 mailbox words.
+ */
+ if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
+ &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
+ (nemb_tp == nemb_mse))
+ lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
+ &pmbx[sizeof(MAILBOX_t)],
+ sli_cfg_mbx->un.sli_config_emb0_subsys.
+ mse[0].buf_len);
+
rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
@@ -3575,7 +3788,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
&sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2953 Handled SLI_CONFIG(mse) wr, "
+ "2953 Failed SLI_CONFIG(mse) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_MSE);
@@ -3593,7 +3806,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
- "2954 Handled SLI_CONFIG(hbd) wr, "
+ "2954 Failed SLI_CONFIG(hbd) wr, "
"ext_buf_cnt(%d) out of range(%d)\n",
ext_buf_cnt,
LPFC_MBX_SLI_CONFIG_MAX_HBD);
@@ -3687,6 +3900,7 @@ lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
"2956 Failed to issue SLI_CONFIG ext-buffer "
"maibox command, rc:x%x\n", rc);
rc = -EPIPE;
+ goto job_error;
}
/* wait for additoinal external buffers */
@@ -3721,7 +3935,7 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
uint32_t opcode;
int rc = SLI_CONFIG_NOT_HANDLED;
- /* state change */
+ /* state change on new multi-buffer pass-through mailbox command */
phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
@@ -3752,18 +3966,36 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
break;
default:
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2959 Not handled SLI_CONFIG "
+ "2959 Reject SLI_CONFIG "
"subsys_fcoe, opcode:x%x\n",
opcode);
- rc = SLI_CONFIG_NOT_HANDLED;
+ rc = -EPERM;
+ break;
+ }
+ } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
+ switch (opcode) {
+ case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3106 Handled SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
+ nemb_mse, dmabuf);
+ break;
+ default:
+ lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
+ "3107 Reject SLI_CONFIG "
+ "subsys_fcoe, opcode:x%x\n",
+ opcode);
+ rc = -EPERM;
break;
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2977 Handled SLI_CONFIG "
+ "2977 Reject SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
- rc = SLI_CONFIG_NOT_HANDLED;
+ rc = -EPERM;
}
} else {
subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
@@ -3799,12 +4031,17 @@ lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
}
} else {
lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
- "2978 Handled SLI_CONFIG "
+ "2978 Not handled SLI_CONFIG "
"subsys:x%d, opcode:x%x\n",
subsys, opcode);
rc = SLI_CONFIG_NOT_HANDLED;
}
}
+
+ /* state reset on not handled new multi-buffer mailbox command */
+ if (rc != SLI_CONFIG_HANDLED)
+ phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
+
return rc;
}
@@ -4262,11 +4499,8 @@ lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
/* extended mailbox commands will need an extended buffer */
if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
- /* any data for the device? */
- if (mbox_req->inExtWLen) {
- from = pmbx;
- ext = from + sizeof(MAILBOX_t);
- }
+ from = pmbx;
+ ext = from + sizeof(MAILBOX_t);
pmboxq->context2 = ext;
pmboxq->in_ext_byte_len =
mbox_req->inExtWLen * sizeof(uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index c8c2b47..edfe61f 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -96,7 +96,7 @@ struct get_mgmt_rev {
};
#define MANAGEMENT_MAJOR_REV 1
-#define MANAGEMENT_MINOR_REV 0
+#define MANAGEMENT_MINOR_REV 1
/* the MgmtRevInfo structure */
struct MgmtRevInfo {
@@ -248,6 +248,7 @@ struct lpfc_sli_config_emb1_subsys {
#define COMN_OPCODE_WRITE_OBJECT 0xAC
#define COMN_OPCODE_READ_OBJECT_LIST 0xAD
#define COMN_OPCODE_DELETE_OBJECT 0xAE
+#define COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES 0x79
uint32_t timeout;
uint32_t request_length;
uint32_t word9;
diff --git a/drivers/scsi/lpfc/lpfc_compat.h b/drivers/scsi/lpfc/lpfc_compat.h
index 75e2e56..c88e556 100644
--- a/drivers/scsi/lpfc/lpfc_compat.h
+++ b/drivers/scsi/lpfc/lpfc_compat.h
@@ -1,7 +1,7 @@
/*******************************************************************
* This file is part of the Emulex Linux Device Driver for *
* Fibre Channel Host Bus Adapters. *
- * Copyright (C) 2004-2005 Emulex. All rights reserved. *
+ * Copyright (C) 2004-2011 Emulex. All rights reserved. *
* EMULEX and SLI are trademarks of Emulex. *
* www.emulex.com *
* *
@@ -82,7 +82,8 @@ lpfc_memcpy_from_slim( void *dest, void __iomem *src, unsigned int bytes)
static inline void
lpfc_memcpy_to_slim( void __iomem *dest, void *src, unsigned int bytes)
{
- __iowrite32_copy(dest, src, bytes);
+ /* convert bytes in argument list to word count for copy function */
+ __iowrite32_copy(dest, src, bytes / sizeof(uint32_t));
}
static inline void
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 60f9534..26924b7 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -26,7 +26,7 @@ void lpfc_sli_read_link_ste(struct lpfc_hba *);
void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
-int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
+int lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -78,6 +78,7 @@ void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_unregister_vfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -106,7 +107,7 @@ void lpfc_cleanup(struct lpfc_vport *);
void lpfc_disc_timeout(unsigned long);
struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
-
+struct lpfc_nodelist *lpfc_findnode_rpi(struct lpfc_vport *, uint16_t);
void lpfc_worker_wake_up(struct lpfc_hba *);
int lpfc_workq_post_event(struct lpfc_hba *, void *, void *, uint32_t);
int lpfc_do_work(void *);
@@ -453,3 +454,11 @@ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int);
uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *);
int lpfc_sli4_queue_create(struct lpfc_hba *);
void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+void lpfc_sli4_abts_err_handler(struct lpfc_hba *, struct lpfc_nodelist *,
+ struct sli4_wcqe_xri_aborted *);
+int lpfc_hba_init_link_fc_topology(struct lpfc_hba *, uint32_t, uint32_t);
+int lpfc_issue_reg_vfi(struct lpfc_vport *);
+int lpfc_issue_unreg_vfi(struct lpfc_vport *);
+int lpfc_selective_reset(struct lpfc_hba *);
+int lpfc_sli4_read_config(struct lpfc_hba *phba);
+int lpfc_scsi_buf_update(struct lpfc_hba *phba);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index 2838259..3587a3f 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1997,7 +1997,8 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
/* Get slow-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path EQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.sp_eq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2006,12 +2007,17 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.sp_eq->entry_size,
phba->sli4_hba.sp_eq->host_index,
phba->sli4_hba.sp_eq->hba_index);
+ }
/* Get fast-path event queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path EQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++) {
+ if (phba->sli4_hba.fp_eq[fcp_qidx]) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tEQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2020,16 +2026,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fp_eq[fcp_qidx]->entry_size,
phba->sli4_hba.fp_eq[fcp_qidx]->host_index,
phba->sli4_hba.fp_eq[fcp_qidx]->hba_index);
+ }
+ }
}
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get mailbox complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX CQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.mbx_cq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.mbx_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2038,14 +2047,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_cq->entry_size,
phba->sli4_hba.mbx_cq->host_index,
phba->sli4_hba.mbx_cq->hba_index);
+ }
/* Get slow-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS CQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.els_cq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.els_cq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID [%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2054,16 +2065,21 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_cq->entry_size,
phba->sli4_hba.els_cq->host_index,
phba->sli4_hba.els_cq->hba_index);
+ }
/* Get fast-path complete queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP CQ information:\n");
fcp_qidx = 0;
- do {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fcp_cq) {
+ do {
+ if (phba->sli4_hba.fcp_cq[fcp_qidx]) {
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated EQID[%02d]:\n",
phba->sli4_hba.fcp_cq[fcp_qidx]->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tCQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2072,16 +2088,20 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_cq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_cq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_cq[fcp_qidx]->hba_index);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
+ }
/* Get mailbox queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path MBX MQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.mbx_wq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.mbx_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2090,14 +2110,16 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.mbx_wq->entry_size,
phba->sli4_hba.mbx_wq->host_index,
phba->sli4_hba.mbx_wq->hba_index);
+ }
/* Get slow-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path ELS WQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.els_wq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.els_wq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n\n",
@@ -2106,15 +2128,22 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.els_wq->entry_size,
phba->sli4_hba.els_wq->host_index,
phba->sli4_hba.els_wq->hba_index);
+ }
/* Get fast-path work queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Fast-path FCP WQ information:\n");
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++) {
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.fcp_wq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+ fcp_qidx++) {
+ if (!phba->sli4_hba.fcp_wq[fcp_qidx])
+ continue;
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.fcp_wq[fcp_qidx]->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tWQID[%02d], "
"QE-COUNT[%04d], WQE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2123,16 +2152,19 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.fcp_wq[fcp_qidx]->entry_size,
phba->sli4_hba.fcp_wq[fcp_qidx]->host_index,
phba->sli4_hba.fcp_wq[fcp_qidx]->hba_index);
+ }
+ len += snprintf(pbuffer+len,
+ LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
}
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len, "\n");
/* Get receive queue information */
len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Slow-path RQ information:\n");
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ if (phba->sli4_hba.hdr_rq && phba->sli4_hba.dat_rq) {
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"Associated CQID[%02d]:\n",
phba->sli4_hba.hdr_rq->assoc_qid);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tHQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2141,7 +2173,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.hdr_rq->entry_size,
phba->sli4_hba.hdr_rq->host_index,
phba->sli4_hba.hdr_rq->hba_index);
- len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
+ len += snprintf(pbuffer+len, LPFC_QUE_INFO_GET_BUF_SIZE-len,
"\tDQID[%02d], "
"QE-COUNT[%04d], QE-SIZE[%04d], "
"HOST-INDEX[%04d], PORT-INDEX[%04d]\n",
@@ -2150,7 +2182,7 @@ lpfc_idiag_queinfo_read(struct file *file, char __user *buf, size_t nbytes,
phba->sli4_hba.dat_rq->entry_size,
phba->sli4_hba.dat_rq->host_index,
phba->sli4_hba.dat_rq->hba_index);
-
+ }
return simple_read_from_buffer(buf, nbytes, ppos, pbuffer, len);
}
@@ -2360,7 +2392,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
switch (quetp) {
case LPFC_IDIAG_EQ:
/* Slow-path event queue */
- if (phba->sli4_hba.sp_eq->queue_id == queid) {
+ if (phba->sli4_hba.sp_eq &&
+ phba->sli4_hba.sp_eq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.sp_eq, index, count);
@@ -2370,23 +2403,29 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* Fast-path event queue */
- for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
- if (phba->sli4_hba.fp_eq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fp_eq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_eq_count; qidx++) {
+ if (phba->sli4_hba.fp_eq[qidx] &&
+ phba->sli4_hba.fp_eq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fp_eq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private = phba->sli4_hba.fp_eq[qidx];
- goto pass_check;
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fp_eq[qidx];
+ goto pass_check;
+ }
}
}
goto error_out;
break;
case LPFC_IDIAG_CQ:
/* MBX complete queue */
- if (phba->sli4_hba.mbx_cq->queue_id == queid) {
+ if (phba->sli4_hba.mbx_cq &&
+ phba->sli4_hba.mbx_cq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_cq, index, count);
@@ -2396,7 +2435,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* ELS complete queue */
- if (phba->sli4_hba.els_cq->queue_id == queid) {
+ if (phba->sli4_hba.els_cq &&
+ phba->sli4_hba.els_cq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_cq, index, count);
@@ -2406,25 +2446,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP complete queue */
- qidx = 0;
- do {
- if (phba->sli4_hba.fcp_cq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fcp_cq) {
+ qidx = 0;
+ do {
+ if (phba->sli4_hba.fcp_cq[qidx] &&
+ phba->sli4_hba.fcp_cq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_cq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private =
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
phba->sli4_hba.fcp_cq[qidx];
- goto pass_check;
- }
- } while (++qidx < phba->cfg_fcp_eq_count);
+ goto pass_check;
+ }
+ } while (++qidx < phba->cfg_fcp_eq_count);
+ }
goto error_out;
break;
case LPFC_IDIAG_MQ:
/* MBX work queue */
- if (phba->sli4_hba.mbx_wq->queue_id == queid) {
+ if (phba->sli4_hba.mbx_wq &&
+ phba->sli4_hba.mbx_wq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.mbx_wq, index, count);
@@ -2433,10 +2478,12 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
idiag.ptr_private = phba->sli4_hba.mbx_wq;
goto pass_check;
}
+ goto error_out;
break;
case LPFC_IDIAG_WQ:
/* ELS work queue */
- if (phba->sli4_hba.els_wq->queue_id == queid) {
+ if (phba->sli4_hba.els_wq &&
+ phba->sli4_hba.els_wq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.els_wq, index, count);
@@ -2446,24 +2493,30 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* FCP work queue */
- for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
- if (phba->sli4_hba.fcp_wq[qidx]->queue_id == queid) {
- /* Sanity check */
- rc = lpfc_idiag_que_param_check(
+ if (phba->sli4_hba.fcp_wq) {
+ for (qidx = 0; qidx < phba->cfg_fcp_wq_count; qidx++) {
+ if (!phba->sli4_hba.fcp_wq[qidx])
+ continue;
+ if (phba->sli4_hba.fcp_wq[qidx]->queue_id ==
+ queid) {
+ /* Sanity check */
+ rc = lpfc_idiag_que_param_check(
phba->sli4_hba.fcp_wq[qidx],
index, count);
- if (rc)
- goto error_out;
- idiag.ptr_private =
- phba->sli4_hba.fcp_wq[qidx];
- goto pass_check;
+ if (rc)
+ goto error_out;
+ idiag.ptr_private =
+ phba->sli4_hba.fcp_wq[qidx];
+ goto pass_check;
+ }
}
}
goto error_out;
break;
case LPFC_IDIAG_RQ:
/* HDR queue */
- if (phba->sli4_hba.hdr_rq->queue_id == queid) {
+ if (phba->sli4_hba.hdr_rq &&
+ phba->sli4_hba.hdr_rq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.hdr_rq, index, count);
@@ -2473,7 +2526,8 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
goto pass_check;
}
/* DAT queue */
- if (phba->sli4_hba.dat_rq->queue_id == queid) {
+ if (phba->sli4_hba.dat_rq &&
+ phba->sli4_hba.dat_rq->queue_id == queid) {
/* Sanity check */
rc = lpfc_idiag_que_param_check(
phba->sli4_hba.dat_rq, index, count);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 445826a..7afc757 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -421,13 +421,13 @@ fail:
* @vport: pointer to a host virtual N_Port data structure.
*
* This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
- * the @vport. This mailbox command is necessary for FCoE only.
+ * the @vport. This mailbox command is necessary for SLI4 port only.
*
* Return code
* 0 - successfully issued REG_VFI for @vport
* A failure code otherwise.
**/
-static int
+int
lpfc_issue_reg_vfi(struct lpfc_vport *vport)
{
struct lpfc_hba *phba = vport->phba;
@@ -438,10 +438,14 @@ lpfc_issue_reg_vfi(struct lpfc_vport *vport)
int rc = 0;
sp = &phba->fc_fabparam;
- ndlp = lpfc_findnode_did(vport, Fabric_DID);
- if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
- rc = -ENODEV;
- goto fail;
+ /* move forward in case of SLI4 FC port loopback test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->link_flag & LS_LOOPBACK_MODE)) {
+ ndlp = lpfc_findnode_did(vport, Fabric_DID);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+ rc = -ENODEV;
+ goto fail;
+ }
}
dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
@@ -487,6 +491,54 @@ fail:
}
/**
+ * lpfc_issue_unreg_vfi - Unregister VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a UNREG_VFI mailbox with the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for SLI4 port only.
+ *
+ * Return code
+ * 0 - successfully issued REG_VFI for @vport
+ * A failure code otherwise.
+ **/
+int
+lpfc_issue_unreg_vfi(struct lpfc_vport *vport)
+{
+ struct lpfc_hba *phba = vport->phba;
+ struct Scsi_Host *shost;
+ LPFC_MBOXQ_t *mboxq;
+ int rc;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2556 UNREG_VFI mbox allocation failed"
+ "HBA state x%x\n", phba->pport->port_state);
+ return -ENOMEM;
+ }
+
+ lpfc_unreg_vfi(mboxq, vport);
+ mboxq->vport = vport;
+ mboxq->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+ if (rc == MBX_NOT_FINISHED) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+ "2557 UNREG_VFI issue mbox failed rc x%x "
+ "HBA state x%x\n",
+ rc, phba->pport->port_state);
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return -EIO;
+ }
+
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
+ vport->fc_flag &= ~FC_VFI_REGISTERED;
+ spin_unlock_irq(shost->host_lock);
+ return 0;
+}
+
+/**
* lpfc_check_clean_addr_bit - Check whether assigned FCID is clean.
* @vport: pointer to a host virtual N_Port data structure.
* @sp: pointer to service parameter data structure.
@@ -615,7 +667,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
"1816 FLOGI NPIV supported, "
"response data 0x%x\n",
sp->cmn.response_multiple_NPort);
+ spin_lock_irq(&phba->hbalock);
phba->link_flag |= LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
} else {
/* Because we asked f/w for NPIV it still expects us
to call reg_vnpid atleast for the physcial host */
@@ -623,7 +677,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
LOG_ELS | LOG_VPORT,
"1817 Fabric does not support NPIV "
"- configuring single port mode.\n");
+ spin_lock_irq(&phba->hbalock);
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ spin_unlock_irq(&phba->hbalock);
}
}
@@ -686,11 +742,16 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
lpfc_do_scr_ns_plogi(phba, vport);
} else if (vport->fc_flag & FC_VFI_REGISTERED)
lpfc_issue_init_vpi(vport);
- else
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3135 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
lpfc_issue_reg_vfi(vport);
+ }
}
return 0;
}
+
/**
* lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
* @vport: pointer to a host virtual N_Port data structure.
@@ -907,17 +968,16 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
* LPFC_MAX_DISC_THREADS (32). Scanning in the case of no
* alpa map would take too long otherwise.
*/
- if (phba->alpa_map[0] == 0) {
+ if (phba->alpa_map[0] == 0)
vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS;
- if ((phba->sli_rev == LPFC_SLI_REV4) &&
- (!(vport->fc_flag & FC_VFI_REGISTERED) ||
- (vport->fc_prevDID != vport->fc_myDID))) {
- if (vport->fc_flag & FC_VFI_REGISTERED)
- lpfc_sli4_unreg_all_rpis(vport);
- lpfc_issue_reg_vfi(vport);
- lpfc_nlp_put(ndlp);
- goto out;
- }
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (!(vport->fc_flag & FC_VFI_REGISTERED) ||
+ (vport->fc_prevDID != vport->fc_myDID))) {
+ if (vport->fc_flag & FC_VFI_REGISTERED)
+ lpfc_sli4_unreg_all_rpis(vport);
+ lpfc_issue_reg_vfi(vport);
+ lpfc_nlp_put(ndlp);
+ goto out;
}
goto flogifail;
}
@@ -1075,6 +1135,7 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Setup CSPs accordingly for Fabric */
sp->cmn.e_d_tov = 0;
sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
@@ -1163,8 +1224,7 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
icmd = &iocb->iocb;
- if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR &&
- icmd->un.elsreq64.bdl.ulpIoTag32) {
+ if (icmd->ulpCommand == CMD_ELS_REQUEST64_CR) {
ndlp = (struct lpfc_nodelist *)(iocb->context1);
if (ndlp && NLP_CHK_NODE_ACT(ndlp) &&
(ndlp->nlp_DID == Fabric_DID))
@@ -3066,17 +3126,22 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
if (did == FDMI_DID)
retry = 1;
- if (((cmd == ELS_CMD_FLOGI) || (cmd == ELS_CMD_FDISC)) &&
+ if ((cmd == ELS_CMD_FLOGI) &&
(phba->fc_topology != LPFC_TOPOLOGY_LOOP) &&
!lpfc_error_lost_link(irsp)) {
/* FLOGI retry policy */
retry = 1;
- /* retry forever */
+ /* retry FLOGI forever */
maxretry = 0;
if (cmdiocb->retry >= 100)
delay = 5000;
else if (cmdiocb->retry >= 32)
delay = 1000;
+ } else if ((cmd == ELS_CMD_FDISC) && !lpfc_error_lost_link(irsp)) {
+ /* retry FDISCs every second up to devloss */
+ retry = 1;
+ maxretry = vport->cfg_devloss_tmo;
+ delay = 1000;
}
cmdiocb->retry++;
@@ -3389,11 +3454,17 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/*
* The driver received a LOGO from the rport and has ACK'd it.
- * At this point, the driver is done so release the IOCB and
- * remove the ndlp reference.
+ * At this point, the driver is done so release the IOCB
*/
lpfc_els_free_iocb(phba, cmdiocb);
- lpfc_nlp_put(ndlp);
+
+ /*
+ * Remove the ndlp reference if it's a fabric node that has
+ * sent us an unsolicted LOGO.
+ */
+ if (ndlp->nlp_type & NLP_FABRIC)
+ lpfc_nlp_put(ndlp);
+
return;
}
@@ -4867,23 +4938,31 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
sizeof(struct lpfc_name));
if (!rc) {
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox)
+ if (phba->sli_rev < LPFC_SLI_REV4) {
+ mbox = mempool_alloc(phba->mbox_mem_pool,
+ GFP_KERNEL);
+ if (!mbox)
+ return 1;
+ lpfc_linkdown(phba);
+ lpfc_init_link(phba, mbox,
+ phba->cfg_topology,
+ phba->cfg_link_speed);
+ mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
+ mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+ mbox->vport = vport;
+ rc = lpfc_sli_issue_mbox(phba, mbox,
+ MBX_NOWAIT);
+ lpfc_set_loopback_flag(phba);
+ if (rc == MBX_NOT_FINISHED)
+ mempool_free(mbox, phba->mbox_mem_pool);
return 1;
-
- lpfc_linkdown(phba);
- lpfc_init_link(phba, mbox,
- phba->cfg_topology,
- phba->cfg_link_speed);
- mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- lpfc_set_loopback_flag(phba);
- if (rc == MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
+ } else {
+ /* abort the flogi coming back to ourselves
+ * due to external loopback on the port.
+ */
+ lpfc_els_abort_flogi(phba);
+ return 0;
}
- return 1;
} else if (rc > 0) { /* greater than */
spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_PT2PT_PLOGI;
@@ -5838,8 +5917,12 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
vport->fc_myDID = vport->fc_prevDID;
if (phba->sli_rev < LPFC_SLI_REV4)
lpfc_issue_fabric_reglogin(vport);
- else
+ else {
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "3138 Need register VFI: (x%x/%x)\n",
+ vport->fc_prevDID, vport->fc_myDID);
lpfc_issue_reg_vfi(vport);
+ }
}
}
return 0;
@@ -6596,56 +6679,6 @@ dropit:
}
/**
- * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
- * @phba: pointer to lpfc hba data structure.
- * @vpi: host virtual N_Port identifier.
- *
- * This routine finds a vport on a HBA (referred by @phba) through a
- * @vpi. The function walks the HBA's vport list and returns the address
- * of the vport with the matching @vpi.
- *
- * Return code
- * NULL - No vport with the matching @vpi found
- * Otherwise - Address to the vport with the matching @vpi.
- **/
-struct lpfc_vport *
-lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
-{
- struct lpfc_vport *vport;
- unsigned long flags;
- int i = 0;
-
- /* The physical ports are always vpi 0 - translate is unnecessary. */
- if (vpi > 0) {
- /*
- * Translate the physical vpi to the logical vpi. The
- * vport stores the logical vpi.
- */
- for (i = 0; i < phba->max_vpi; i++) {
- if (vpi == phba->vpi_ids[i])
- break;
- }
-
- if (i >= phba->max_vpi) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2936 Could not find Vport mapped "
- "to vpi %d\n", vpi);
- return NULL;
- }
- }
-
- spin_lock_irqsave(&phba->hbalock, flags);
- list_for_each_entry(vport, &phba->port_list, listentry) {
- if (vport->vpi == i) {
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return vport;
- }
- }
- spin_unlock_irqrestore(&phba->hbalock, flags);
- return NULL;
-}
-
-/**
* lpfc_els_unsol_event - Process an unsolicited event from an els sli ring
* @phba: pointer to lpfc hba data structure.
* @pring: pointer to a SLI ring.
@@ -7281,6 +7314,7 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Setup CSPs accordingly for Fabric */
sp->cmn.e_d_tov = 0;
sp->cmn.w2.r_a_tov = 0;
+ sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
sp->cls2.seqDelivery = 1;
sp->cls3.seqDelivery = 1;
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 091f68e..678a4b1 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1074,6 +1074,12 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
mempool_free(pmb, phba->mbox_mem_pool);
+ /* don't perform discovery for SLI4 loopback diagnostic test */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ !(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->link_flag & LS_LOOPBACK_MODE))
+ return;
+
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP &&
vport->fc_flag & FC_PUBLIC_LOOP &&
!(vport->fc_flag & FC_LBIT)) {
@@ -2646,9 +2652,14 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
- /* VFI not supported on interface type 0, just do the flogi */
- if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type,
- &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) {
+ /*
+ * VFI not supported on interface type 0, just do the flogi
+ * Also continue if the VFI is in use - just use the same one.
+ */
+ if (mboxq->u.mb.mbxStatus &&
+ (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_0) &&
+ mboxq->u.mb.mbxStatus != MBX_VFI_IN_USE) {
lpfc_printf_vlog(vport, KERN_ERR,
LOG_MBOX,
"2891 Init VFI mailbox failed 0x%x\n",
@@ -2842,10 +2853,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
lpfc_disc_list_loopmap(vport);
/* Start discovery */
lpfc_disc_start(vport);
- goto fail_free_mem;
+ goto out_free_mem;
}
lpfc_vport_set_state(vport, FC_VPORT_FAILED);
- goto fail_free_mem;
+ goto out_free_mem;
}
/* The VPI is implicitly registered when the VFI is registered */
spin_lock_irq(shost->host_lock);
@@ -2855,10 +2866,16 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock);
+ /* In case SLI4 FC loopback test, we are ready */
+ if ((phba->sli_rev == LPFC_SLI_REV4) &&
+ (phba->link_flag & LS_LOOPBACK_MODE)) {
+ phba->link_state = LPFC_HBA_READY;
+ goto out_free_mem;
+ }
+
if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
/* For private loop just start discovery and we are done. */
if ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) &&
- (phba->alpa_map[0] == 0) &&
!(vport->fc_flag & FC_PUBLIC_LOOP)) {
/* Use loop map to make discovery list */
lpfc_disc_list_loopmap(vport);
@@ -2870,7 +2887,7 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
}
}
-fail_free_mem:
+out_free_mem:
mempool_free(mboxq, phba->mbox_mem_pool);
lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
kfree(dmabuf);
@@ -2923,6 +2940,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
+ struct Scsi_Host *shost;
int i;
struct lpfc_dmabuf *mp;
int rc;
@@ -2946,6 +2964,7 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la);
phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED;
+ shost = lpfc_shost_from_vport(vport);
if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
phba->sli3_options &= ~LPFC_SLI3_NPIV_ENABLED;
@@ -2957,8 +2976,11 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
"1309 Link Up Event npiv not supported in loop "
"topology\n");
/* Get Loop Map information */
- if (bf_get(lpfc_mbx_read_top_il, la))
+ if (bf_get(lpfc_mbx_read_top_il, la)) {
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
+ }
vport->fc_myDID = bf_get(lpfc_mbx_read_top_alpa_granted, la);
i = la->lilpBde64.tus.f.bdeSize;
@@ -3003,11 +3025,13 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la)
} else {
if (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)) {
if (phba->max_vpi && phba->cfg_enable_npiv &&
- (phba->sli_rev == 3))
+ (phba->sli_rev >= LPFC_SLI_REV3))
phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
}
vport->fc_myDID = phba->fc_pref_DID;
+ spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_LBIT;
+ spin_unlock_irq(shost->host_lock);
}
spin_unlock_irq(&phba->hbalock);
@@ -3224,15 +3248,14 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
} else if (bf_get(lpfc_mbx_read_top_att_type, la) ==
LPFC_ATT_LINK_DOWN) {
phba->fc_stat.LinkDown++;
- if (phba->link_flag & LS_LOOPBACK_MODE) {
+ if (phba->link_flag & LS_LOOPBACK_MODE)
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1308 Link Down Event in loop back mode "
"x%x received "
"Data: x%x x%x x%x\n",
la->eventTag, phba->fc_eventTag,
phba->pport->port_state, vport->fc_flag);
- }
- else {
+ else
lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
"1305 Link Down Event x%x received "
"Data: x%x x%x x%x x%x x%x\n",
@@ -3240,7 +3263,6 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->pport->port_state, vport->fc_flag,
bf_get(lpfc_mbx_read_top_mm, la),
bf_get(lpfc_mbx_read_top_fa, la));
- }
lpfc_mbx_issue_link_down(phba);
}
if ((bf_get(lpfc_mbx_read_top_mm, la)) &&
@@ -3594,6 +3616,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
MAILBOX_t *mb = &pmb->u.mb;
struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
struct lpfc_nodelist *ndlp;
+ struct Scsi_Host *shost;
ndlp = (struct lpfc_nodelist *) pmb->context2;
pmb->context1 = NULL;
@@ -3639,8 +3662,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
* vport discovery */
if (!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
lpfc_start_fdiscs(phba);
- else
+ else {
+ shost = lpfc_shost_from_vport(vport);
+ spin_lock_irq(shost->host_lock);
vport->fc_flag &= ~FC_LOGO_RCVD_DID_CHNG ;
+ spin_unlock_irq(shost->host_lock);
+ }
lpfc_do_scr_ns_plogi(phba, vport);
}
@@ -5353,6 +5380,73 @@ lpfc_findnode_wwpn(struct lpfc_vport *vport, struct lpfc_name *wwpn)
return ndlp;
}
+/*
+ * This routine looks up the ndlp lists for the given RPI. If the rpi
+ * is found, the routine returns the node element list pointer else
+ * return NULL.
+ */
+struct lpfc_nodelist *
+lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi)
+{
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
+ struct lpfc_nodelist *ndlp;
+
+ spin_lock_irq(shost->host_lock);
+ ndlp = __lpfc_findnode_rpi(vport, rpi);
+ spin_unlock_irq(shost->host_lock);
+ return ndlp;
+}
+
+/**
+ * lpfc_find_vport_by_vpid - Find a vport on a HBA through vport identifier
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: the physical host virtual N_Port identifier.
+ *
+ * This routine finds a vport on a HBA (referred by @phba) through a
+ * @vpi. The function walks the HBA's vport list and returns the address
+ * of the vport with the matching @vpi.
+ *
+ * Return code
+ * NULL - No vport with the matching @vpi found
+ * Otherwise - Address to the vport with the matching @vpi.
+ **/
+struct lpfc_vport *
+lpfc_find_vport_by_vpid(struct lpfc_hba *phba, uint16_t vpi)
+{
+ struct lpfc_vport *vport;
+ unsigned long flags;
+ int i = 0;
+
+ /* The physical ports are always vpi 0 - translate is unnecessary. */
+ if (vpi > 0) {
+ /*
+ * Translate the physical vpi to the logical vpi. The
+ * vport stores the logical vpi.
+ */
+ for (i = 0; i < phba->max_vpi; i++) {
+ if (vpi == phba->vpi_ids[i])
+ break;
+ }
+
+ if (i >= phba->max_vpi) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
+ "2936 Could not find Vport mapped "
+ "to vpi %d\n", vpi);
+ return NULL;
+ }
+ }
+
+ spin_lock_irqsave(&phba->hbalock, flags);
+ list_for_each_entry(vport, &phba->port_list, listentry) {
+ if (vport->vpi == i) {
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return vport;
+ }
+ }
+ spin_unlock_irqrestore(&phba->hbalock, flags);
+ return NULL;
+}
+
void
lpfc_nlp_init(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
uint32_t did)
@@ -5599,7 +5693,7 @@ out:
*
* This function frees memory associated with the mailbox command.
*/
-static void
+void
lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
{
struct lpfc_vport *vport = mboxq->vport;
@@ -5651,7 +5745,6 @@ lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
int
lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
{
- LPFC_MBOXQ_t *mbox;
struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
@@ -5687,35 +5780,9 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba)
/* Cleanup any outstanding ELS commands */
lpfc_els_flush_all_cmd(phba);
- /* Unregister VFI */
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mbox) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2556 UNREG_VFI mbox allocation failed"
- "HBA state x%x\n", phba->pport->port_state);
- return -ENOMEM;
- }
-
- lpfc_unreg_vfi(mbox, phba->pport);
- mbox->vport = phba->pport;
- mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
-
- rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
- if (rc == MBX_NOT_FINISHED) {
- lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
- "2557 UNREG_VFI issue mbox failed rc x%x "
- "HBA state x%x\n",
- rc, phba->pport->port_state);
- mempool_free(mbox, phba->mbox_mem_pool);
- return -EIO;
- }
-
- shost = lpfc_shost_from_vport(phba->pport);
- spin_lock_irq(shost->host_lock);
- phba->pport->fc_flag &= ~FC_VFI_REGISTERED;
- spin_unlock_irq(shost->host_lock);
-
- return 0;
+ /* Unregister the physical port VFI */
+ rc = lpfc_issue_unreg_vfi(phba->pport);
+ return rc;
}
/**
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 046edc4..7245bea 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -349,6 +349,12 @@ struct csp {
* Word 1 Bit 31 in FLOGI response is clean address bit
*/
#define clean_address_bit request_multiple_Nport /* Word 1, bit 31 */
+/*
+ * Word 1 Bit 30 in common service parameter is overloaded.
+ * Word 1 Bit 30 in FLOGI request is Virtual Fabrics
+ * Word 1 Bit 30 in PLOGI request is random offset
+ */
+#define virtual_fabric_support randomOffset /* Word 1, bit 30 */
#ifdef __BIG_ENDIAN_BITFIELD
uint16_t request_multiple_Nport:1; /* FC Word 1, bit 31 */
uint16_t randomOffset:1; /* FC Word 1, bit 30 */
@@ -1852,8 +1858,8 @@ typedef struct {
uint8_t fabric_AL_PA; /* If using a Fabric Assigned AL_PA */
#endif
-#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
#define FLAGS_TOPOLOGY_MODE_LOOP_PT 0x00 /* Attempt loop then pt-pt */
+#define FLAGS_LOCAL_LB 0x01 /* link_flags (=1) ENDEC loopback */
#define FLAGS_TOPOLOGY_MODE_PT_PT 0x02 /* Attempt pt-pt only */
#define FLAGS_TOPOLOGY_MODE_LOOP 0x04 /* Attempt loop only */
#define FLAGS_TOPOLOGY_MODE_PT_LOOP 0x06 /* Attempt pt-pt then loop */
@@ -2819,7 +2825,8 @@ typedef struct {
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd1 : 19; /* Reserved */
uint32_t cdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t casabt : 1; /* Configure async abts status notice */
+ uint32_t rsvd2 : 2; /* Reserved */
uint32_t cbg : 1; /* Configure BlockGuard */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t ccrp : 1; /* Config Command Ring Polling */
@@ -2839,14 +2846,16 @@ typedef struct {
uint32_t ccrp : 1; /* Config Command Ring Polling */
uint32_t cmv : 1; /* Configure Max VPIs */
uint32_t cbg : 1; /* Configure BlockGuard */
- uint32_t rsvd2 : 3; /* Reserved */
+ uint32_t rsvd2 : 2; /* Reserved */
+ uint32_t casabt : 1; /* Configure async abts status notice */
uint32_t cdss : 1; /* Configure Data Security SLI */
uint32_t rsvd1 : 19; /* Reserved */
#endif
#ifdef __BIG_ENDIAN_BITFIELD
uint32_t rsvd3 : 19; /* Reserved */
uint32_t gdss : 1; /* Configure Data Security SLI */
- uint32_t rsvd4 : 3; /* Reserved */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
+ uint32_t rsvd4 : 2; /* Reserved */
uint32_t gbg : 1; /* Grant BlockGuard */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gcrp : 1; /* Grant Command Ring Polling */
@@ -2866,7 +2875,8 @@ typedef struct {
uint32_t gcrp : 1; /* Grant Command Ring Polling */
uint32_t gmv : 1; /* Grant Max VPIs */
uint32_t gbg : 1; /* Grant BlockGuard */
- uint32_t rsvd4 : 3; /* Reserved */
+ uint32_t rsvd4 : 2; /* Reserved */
+ uint32_t gasabt : 1; /* Grant async abts status notice */
uint32_t gdss : 1; /* Configure Data Security SLI */
uint32_t rsvd3 : 19; /* Reserved */
#endif
@@ -3465,6 +3475,7 @@ typedef struct {
} ASYNCSTAT_FIELDS;
#define ASYNC_TEMP_WARN 0x100
#define ASYNC_TEMP_SAFE 0x101
+#define ASYNC_STATUS_CN 0x102
/* IOCB Command template for CMD_IOCB_RCV_ELS64_CX (0xB7)
or CMD_IOCB_RCV_SEQ64_CX (0xB5) */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 98d2152..e5bfa7f 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -1351,11 +1351,11 @@ struct lpfc_mbx_set_link_diag_loopback {
struct {
uint32_t word0;
#define lpfc_mbx_set_diag_lpbk_type_SHIFT 0
-#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000001
+#define lpfc_mbx_set_diag_lpbk_type_MASK 0x00000003
#define lpfc_mbx_set_diag_lpbk_type_WORD word0
#define LPFC_DIAG_LOOPBACK_TYPE_DISABLE 0x0
#define LPFC_DIAG_LOOPBACK_TYPE_INTERNAL 0x1
-#define LPFC_DIAG_LOOPBACK_TYPE_EXTERNAL 0x2
+#define LPFC_DIAG_LOOPBACK_TYPE_SERDES 0x2
#define lpfc_mbx_set_diag_lpbk_link_num_SHIFT 16
#define lpfc_mbx_set_diag_lpbk_link_num_MASK 0x0000003F
#define lpfc_mbx_set_diag_lpbk_link_num_WORD word0
@@ -1830,6 +1830,8 @@ struct lpfc_mbx_init_vfi {
#define lpfc_init_vfi_hop_count_MASK 0x000000FF
#define lpfc_init_vfi_hop_count_WORD word4
};
+#define MBX_VFI_IN_USE 0x9F02
+
struct lpfc_mbx_reg_vfi {
uint32_t word1;
@@ -2104,6 +2106,8 @@ struct lpfc_mbx_read_config {
#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6
#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003
#define lpfc_mbx_rd_conf_lnk_type_WORD word2
+#define LPFC_LNK_TYPE_GE 0
+#define LPFC_LNK_TYPE_FC 1
#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8
#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001
#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2
@@ -3320,6 +3324,9 @@ struct wqe_rctl_dfctl {
#define wqe_la_SHIFT 3
#define wqe_la_MASK 0x000000001
#define wqe_la_WORD word5
+#define wqe_xo_SHIFT 6
+#define wqe_xo_MASK 0x000000001
+#define wqe_xo_WORD word5
#define wqe_ls_SHIFT 7
#define wqe_ls_MASK 0x000000001
#define wqe_ls_WORD word5
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 55bc4fc..dfea2da 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -62,7 +62,6 @@ static int lpfc_post_rcv_buf(struct lpfc_hba *);
static int lpfc_sli4_queue_verify(struct lpfc_hba *);
static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
static int lpfc_setup_endian_order(struct lpfc_hba *);
-static int lpfc_sli4_read_config(struct lpfc_hba *);
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
static void lpfc_free_sgl_list(struct lpfc_hba *);
static int lpfc_init_sgl_list(struct lpfc_hba *);
@@ -475,27 +474,6 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Get the default values for Model Name and Description */
lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
- if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_16G)
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G)
- && !(phba->lmt & LMT_1Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G)
- && !(phba->lmt & LMT_2Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G)
- && !(phba->lmt & LMT_4Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G)
- && !(phba->lmt & LMT_8Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G)
- && !(phba->lmt & LMT_10Gb))
- || ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G)
- && !(phba->lmt & LMT_16Gb))) {
- /* Reset link speed to auto */
- lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
- "1302 Invalid speed for this board: "
- "Reset link speed to auto: x%x\n",
- phba->cfg_link_speed);
- phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
- }
-
phba->link_state = LPFC_LINK_DOWN;
/* Only process IOCBs on ELS ring till hba_state is READY */
@@ -585,28 +563,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
return -EIO;
}
} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
- lpfc_init_link(phba, pmb, phba->cfg_topology,
- phba->cfg_link_speed);
- pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- lpfc_set_loopback_flag(phba);
- rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
- if (rc != MBX_SUCCESS) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0454 Adapter failed to init, mbxCmd x%x "
- "INIT_LINK, mbxStatus x%x\n",
- mb->mbxCommand, mb->mbxStatus);
-
- /* Clear all interrupt enable conditions */
- writel(0, phba->HCregaddr);
- readl(phba->HCregaddr); /* flush */
- /* Clear all pending interrupts */
- writel(0xffffffff, phba->HAregaddr);
- readl(phba->HAregaddr); /* flush */
- phba->link_state = LPFC_HBA_ERROR;
- if (rc != MBX_BUSY)
- mempool_free(pmb, phba->mbox_mem_pool);
- return -EIO;
- }
+ mempool_free(pmb, phba->mbox_mem_pool);
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ return rc;
}
/* MBOX buffer will be freed in mbox compl */
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -668,6 +628,28 @@ lpfc_config_port_post(struct lpfc_hba *phba)
int
lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
{
+ return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
+}
+
+/**
+ * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
+ * @phba: pointer to lpfc hba data structure.
+ * @fc_topology: desired fc topology.
+ * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
+ *
+ * This routine will issue the INIT_LINK mailbox command call.
+ * It is available to other drivers through the lpfc_hba data
+ * structure for use as a delayed link up mechanism with the
+ * module parameter lpfc_suppress_link_up.
+ *
+ * Return code
+ * 0 - success
+ * Any other value - error
+ **/
+int
+lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
+ uint32_t flag)
+{
struct lpfc_vport *vport = phba->pport;
LPFC_MBOXQ_t *pmb;
MAILBOX_t *mb;
@@ -681,9 +663,30 @@ lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
mb = &pmb->u.mb;
pmb->vport = vport;
- lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
+ if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
+ !(phba->lmt & LMT_1Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
+ !(phba->lmt & LMT_2Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
+ !(phba->lmt & LMT_4Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
+ !(phba->lmt & LMT_8Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
+ !(phba->lmt & LMT_10Gb)) ||
+ ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
+ !(phba->lmt & LMT_16Gb))) {
+ /* Reset link speed to auto */
+ lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
+ "1302 Invalid speed for this board:%d "
+ "Reset link speed to auto.\n",
+ phba->cfg_link_speed);
+ phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
+ }
+ lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- lpfc_set_loopback_flag(phba);
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ lpfc_set_loopback_flag(phba);
rc = lpfc_sli_issue_mbox(phba, pmb, flag);
if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -1437,7 +1440,10 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
uint32_t event_data;
struct Scsi_Host *shost;
uint32_t if_type;
- struct lpfc_register portstat_reg;
+ struct lpfc_register portstat_reg = {0};
+ uint32_t reg_err1, reg_err2;
+ uint32_t uerrlo_reg, uemasklo_reg;
+ uint32_t pci_rd_rc1, pci_rd_rc2;
int rc;
/* If the pci channel is offline, ignore possible errors, since
@@ -1449,38 +1455,52 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!phba->cfg_enable_hba_reset)
return;
- /* Send an internal error event to mgmt application */
- lpfc_board_errevt_to_mgmt(phba);
-
- /* For now, the actual action for SLI4 device handling is not
- * specified yet, just treated it as adaptor hardware failure
- */
- event_data = FC_REG_DUMP_EVENT;
- shost = lpfc_shost_from_vport(vport);
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(event_data), (char *) &event_data,
- SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
-
if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
switch (if_type) {
case LPFC_SLI_INTF_IF_TYPE_0:
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UERRLOregaddr,
+ &uerrlo_reg);
+ pci_rd_rc2 = lpfc_readl(
+ phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
+ &uemasklo_reg);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
+ return;
lpfc_sli4_offline_eratt(phba);
break;
case LPFC_SLI_INTF_IF_TYPE_2:
- portstat_reg.word0 =
- readl(phba->sli4_hba.u.if_type2.STATUSregaddr);
-
+ pci_rd_rc1 = lpfc_readl(
+ phba->sli4_hba.u.if_type2.STATUSregaddr,
+ &portstat_reg.word0);
+ /* consider PCI bus read error as pci_channel_offline */
+ if (pci_rd_rc1 == -EIO)
+ return;
+ reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
+ reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
/* TODO: Register for Overtemp async events. */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2889 Port Overtemperature event, "
- "taking port\n");
+ "taking port offline\n");
spin_lock_irq(&phba->hbalock);
phba->over_temp_state = HBA_OVER_TEMP;
spin_unlock_irq(&phba->hbalock);
lpfc_sli4_offline_eratt(phba);
- return;
+ break;
}
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FW_RESTART)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3143 Port Down: Firmware Restarted\n");
+ else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3144 Port Down: Debug Dump\n");
+ else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3145 Port Down: Provisioning\n");
/*
* On error status condition, driver need to wait for port
* ready before performing reset.
@@ -1489,14 +1509,19 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
if (!rc) {
/* need reset: attempt for port recovery */
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2887 Port Error: Attempting "
- "Port Recovery\n");
+ "2887 Reset Needed: Attempting Port "
+ "Recovery...\n");
lpfc_offline_prep(phba);
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
if (lpfc_online(phba) == 0) {
lpfc_unblock_mgmt_io(phba);
- return;
+ /* don't report event on forced debug dump */
+ if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
+ reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
+ return;
+ else
+ break;
}
/* fall through for not able to recover */
}
@@ -1506,6 +1531,16 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba)
default:
break;
}
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3123 Report dump event to upper layer\n");
+ /* Send an internal error event to mgmt application */
+ lpfc_board_errevt_to_mgmt(phba);
+
+ event_data = FC_REG_DUMP_EVENT;
+ shost = lpfc_shost_from_vport(vport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(event_data), (char *) &event_data,
+ SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
}
/**
@@ -2674,6 +2709,32 @@ lpfc_offline(struct lpfc_hba *phba)
}
/**
+ * lpfc_scsi_buf_update - Update the scsi_buffers that are already allocated.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine goes through all the scsi buffers in the system and updates the
+ * Physical XRIs assigned to the SCSI buffer because these may change after any
+ * firmware reset
+ *
+ * Return codes
+ * 0 - successful (for now, it always returns 0)
+ **/
+int
+lpfc_scsi_buf_update(struct lpfc_hba *phba)
+{
+ struct lpfc_scsi_buf *sb, *sb_next;
+
+ spin_lock_irq(&phba->hbalock);
+ spin_lock(&phba->scsi_buf_list_lock);
+ list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list)
+ sb->cur_iocbq.sli4_xritag =
+ phba->sli4_hba.xri_ids[sb->cur_iocbq.sli4_lxritag];
+ spin_unlock(&phba->scsi_buf_list_lock);
+ spin_unlock_irq(&phba->hbalock);
+ return 0;
+}
+
+/**
* lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
* @phba: pointer to lpfc hba data structure.
*
@@ -5040,15 +5101,8 @@ lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
struct lpfc_rpi_hdr *rpi_hdr;
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
- /*
- * If the SLI4 port supports extents, posting the rpi header isn't
- * required. Set the expected maximum count and let the actual value
- * get set when extents are fully allocated.
- */
- if (!phba->sli4_hba.rpi_hdrs_in_use) {
- phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
return rc;
- }
if (phba->sli4_hba.extents_in_use)
return -EIO;
@@ -5942,7 +5996,7 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
* -ENOMEM - No available memory
* -EIO - The mailbox failed to complete successfully.
**/
-static int
+int
lpfc_sli4_read_config(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *pmb;
@@ -5974,6 +6028,20 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
rc = -EIO;
} else {
rd_config = &pmb->u.mqe.un.rd_config;
+ if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
+ phba->sli4_hba.lnk_info.lnk_tp =
+ bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
+ phba->sli4_hba.lnk_info.lnk_no =
+ bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3081 lnk_type:%d, lnk_numb:%d\n",
+ phba->sli4_hba.lnk_info.lnk_tp,
+ phba->sli4_hba.lnk_info.lnk_no);
+ } else
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3082 Mailbox (x%x) returned ldv:x0\n",
+ bf_get(lpfc_mqe_command, &pmb->u.mqe));
phba->sli4_hba.extents_in_use =
bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
phba->sli4_hba.max_cfg_param.max_xri =
@@ -6462,6 +6530,7 @@ out_free_fcp_wq:
phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
}
kfree(phba->sli4_hba.fcp_wq);
+ phba->sli4_hba.fcp_wq = NULL;
out_free_els_wq:
lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
phba->sli4_hba.els_wq = NULL;
@@ -6474,6 +6543,7 @@ out_free_fcp_cq:
phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
}
kfree(phba->sli4_hba.fcp_cq);
+ phba->sli4_hba.fcp_cq = NULL;
out_free_els_cq:
lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
phba->sli4_hba.els_cq = NULL;
@@ -6486,6 +6556,7 @@ out_free_fp_eq:
phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
}
kfree(phba->sli4_hba.fp_eq);
+ phba->sli4_hba.fp_eq = NULL;
out_free_sp_eq:
lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
phba->sli4_hba.sp_eq = NULL;
@@ -6519,8 +6590,10 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.els_wq = NULL;
/* Release FCP work queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+ if (phba->sli4_hba.fcp_wq != NULL)
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count;
+ fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
kfree(phba->sli4_hba.fcp_wq);
phba->sli4_hba.fcp_wq = NULL;
@@ -6540,15 +6613,18 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
/* Release FCP response complete queue */
fcp_qidx = 0;
- do
- lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
- while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq != NULL)
+ do
+ lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+ while (++fcp_qidx < phba->cfg_fcp_eq_count);
kfree(phba->sli4_hba.fcp_cq);
phba->sli4_hba.fcp_cq = NULL;
/* Release fast-path event queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
- lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+ if (phba->sli4_hba.fp_eq != NULL)
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++)
+ lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
kfree(phba->sli4_hba.fp_eq);
phba->sli4_hba.fp_eq = NULL;
@@ -6601,11 +6677,18 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path event queue */
+ if (phba->cfg_fcp_eq_count && !phba->sli4_hba.fp_eq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3147 Fast-path EQs not allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_sp_eq;
+ }
for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0522 Fast-path EQ (%d) not "
"allocated\n", fcp_eqidx);
+ rc = -ENOMEM;
goto out_destroy_fp_eq;
}
rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
@@ -6630,6 +6713,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.mbx_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0528 Mailbox CQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fp_eq;
}
rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
@@ -6649,6 +6733,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.els_cq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0530 ELS CQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_mbx_cq;
}
rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
@@ -6665,12 +6750,20 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.sp_eq->queue_id);
/* Set up fast-path FCP Response Complete Queue */
+ if (!phba->sli4_hba.fcp_cq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3148 Fast-path FCP CQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_cq;
+ }
fcp_cqidx = 0;
do {
if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0526 Fast-path FCP CQ (%d) not "
"allocated\n", fcp_cqidx);
+ rc = -ENOMEM;
goto out_destroy_fcp_cq;
}
if (phba->cfg_fcp_eq_count)
@@ -6709,6 +6802,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.mbx_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0538 Slow-path MQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fcp_cq;
}
rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
@@ -6728,6 +6822,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.els_wq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0536 Slow-path ELS WQ not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_mbx_wq;
}
rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
@@ -6744,11 +6839,19 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
phba->sli4_hba.els_cq->queue_id);
/* Set up fast-path FCP Work Queue */
+ if (!phba->sli4_hba.fcp_wq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3149 Fast-path FCP WQ array not "
+ "allocated\n");
+ rc = -ENOMEM;
+ goto out_destroy_els_wq;
+ }
for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0534 Fast-path FCP WQ (%d) not "
"allocated\n", fcp_wqidx);
+ rc = -ENOMEM;
goto out_destroy_fcp_wq;
}
rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
@@ -6779,6 +6882,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"0540 Receive Queue not allocated\n");
+ rc = -ENOMEM;
goto out_destroy_fcp_wq;
}
@@ -6805,18 +6909,21 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
out_destroy_fcp_wq:
for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+out_destroy_els_wq:
lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
out_destroy_mbx_wq:
lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
out_destroy_fcp_cq:
for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+out_destroy_els_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
out_destroy_mbx_cq:
lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
out_destroy_fp_eq:
for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+out_destroy_sp_eq:
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
out_error:
return rc;
@@ -6853,13 +6960,18 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset ELS complete queue */
lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
/* Unset FCP response complete queue */
- fcp_qidx = 0;
- do {
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
- } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq) {
+ fcp_qidx = 0;
+ do {
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+ } while (++fcp_qidx < phba->cfg_fcp_eq_count);
+ }
/* Unset fast-path event queue */
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
- lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count;
+ fcp_qidx++)
+ lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+ }
/* Unset slow-path event queue */
lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
}
@@ -7398,22 +7510,25 @@ out:
static void
lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
{
- struct pci_dev *pdev;
-
- /* Obtain PCI device reference */
- if (!phba->pcidev)
- return;
- else
- pdev = phba->pcidev;
-
- /* Free coherent DMA memory allocated */
-
- /* Unmap I/O memory space */
- iounmap(phba->sli4_hba.drbl_regs_memmap_p);
- iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
- iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ uint32_t if_type;
+ if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
- return;
+ switch (if_type) {
+ case LPFC_SLI_INTF_IF_TYPE_0:
+ iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+ iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_2:
+ iounmap(phba->sli4_hba.conf_regs_memmap_p);
+ break;
+ case LPFC_SLI_INTF_IF_TYPE_1:
+ default:
+ dev_printk(KERN_ERR, &phba->pcidev->dev,
+ "FATAL - unsupported SLI4 interface type - %d\n",
+ if_type);
+ break;
+ }
}
/**
@@ -9198,12 +9313,15 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
- /* check for firmware upgrade or downgrade */
- snprintf(file_name, 16, "%s.grp", phba->ModelName);
- error = request_firmware(&fw, file_name, &phba->pcidev->dev);
- if (!error) {
- lpfc_write_firmware(phba, fw);
- release_firmware(fw);
+ /* check for firmware upgrade or downgrade (if_type 2 only) */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
+ LPFC_SLI_INTF_IF_TYPE_2) {
+ snprintf(file_name, 16, "%s.grp", phba->ModelName);
+ error = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!error) {
+ lpfc_write_firmware(phba, fw);
+ release_firmware(fw);
+ }
}
/* Check if there are static vports to be created. */
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 2ebc7d2..20336f0 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1293,6 +1293,10 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
phba->sli_rev = LPFC_SLI_REV2;
mb->un.varCfgPort.sli_mode = phba->sli_rev;
+ /* If this is an SLI3 port, configure async status notification. */
+ if (phba->sli_rev == LPFC_SLI_REV3)
+ mb->un.varCfgPort.casabt = 1;
+
/* Now setup pcb */
phba->pcb->type = TYPE_NATIVE_SLI2;
phba->pcb->feature = FEATURE_INITIAL_SLI2;
@@ -2129,6 +2133,14 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX,
+ "3134 Register VFI, mydid:x%x, fcfi:%d, "
+ " vfi:%d, vpi:%d, fc_pname:%x%x\n",
+ vport->fc_myDID,
+ vport->phba->fcf.fcfi,
+ vport->phba->sli4_hba.vfi_ids[vport->vfi],
+ vport->phba->vpi_ids[vport->vpi],
+ reg_vfi->wwn[0], reg_vfi->wwn[1]);
}
/**
@@ -2175,16 +2187,15 @@ lpfc_unreg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
}
/**
- * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * lpfc_sli4_dump_cfg_rg23 - Dump sli4 port config region 23
* @phba: pointer to the hba structure containing.
* @mbox: pointer to lpfc mbox command to initialize.
*
- * This function create a SLI4 dump mailbox command to dump FCoE
- * parameters stored in region 23.
+ * This function create a SLI4 dump mailbox command to dump configure
+ * region 23.
**/
int
-lpfc_dump_fcoe_param(struct lpfc_hba *phba,
- struct lpfcMboxq *mbox)
+lpfc_sli4_dump_cfg_rg23(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
{
struct lpfc_dmabuf *mp = NULL;
MAILBOX_t *mb;
@@ -2198,9 +2209,9 @@ lpfc_dump_fcoe_param(struct lpfc_hba *phba,
if (!mp || !mp->virt) {
kfree(mp);
- /* dump_fcoe_param failed to allocate memory */
+ /* dump config region 23 failed to allocate memory */
lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
- "2569 lpfc_dump_fcoe_param: memory"
+ "2569 lpfc dump config region 23: memory"
" allocation failed\n");
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index 10d5b5e..ade763d3 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -389,7 +389,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
{
struct hbq_dmabuf *hbqbp;
- hbqbp = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ hbqbp = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!hbqbp)
return NULL;
@@ -441,7 +441,7 @@ lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
{
struct hbq_dmabuf *dma_buf;
- dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+ dma_buf = kzalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
if (!dma_buf)
return NULL;
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 2ddd02f..e8bb005 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -783,6 +783,14 @@ lpfc_device_rm_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
static uint32_t
+lpfc_device_recov_unused_node(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp,
+ void *arg, uint32_t evt)
+{
+ return ndlp->nlp_state;
+}
+
+static uint32_t
lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
void *arg, uint32_t evt)
{
@@ -2147,7 +2155,7 @@ static uint32_t (*lpfc_disc_action[NLP_STE_MAX_STATE * NLP_EVT_MAX_EVENT])
lpfc_disc_illegal, /* CMPL_ADISC */
lpfc_disc_illegal, /* CMPL_REG_LOGIN */
lpfc_device_rm_unused_node, /* DEVICE_RM */
- lpfc_disc_illegal, /* DEVICE_RECOVERY */
+ lpfc_device_recov_unused_node, /* DEVICE_RECOVERY */
lpfc_rcv_plogi_plogi_issue, /* RCV_PLOGI PLOGI_ISSUE */
lpfc_rcv_prli_plogi_issue, /* RCV_PRLI */
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 2e1e54e..c60f5d0 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -681,8 +681,10 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
rrq_empty = list_empty(&phba->active_rrq_list);
spin_unlock_irqrestore(&phba->hbalock, iflag);
- if (ndlp)
+ if (ndlp) {
lpfc_set_rrq_active(phba, ndlp, xri, rxid, 1);
+ lpfc_sli4_abts_err_handler(phba, ndlp, axri);
+ }
lpfc_release_scsi_buf_s4(phba, psb);
if (rrq_empty)
lpfc_worker_wake_up(phba);
@@ -2911,8 +2913,8 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
int_to_scsilun(lpfc_cmd->pCmd->device->lun,
&lpfc_cmd->fcp_cmnd->fcp_lun);
- memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, 16);
-
+ memset(&fcp_cmnd->fcpCdb[0], 0, LPFC_FCP_CDB_LEN);
+ memcpy(&fcp_cmnd->fcpCdb[0], scsi_cmnd->cmnd, scsi_cmnd->cmd_len);
if (scsi_populate_tag_msg(scsi_cmnd, tag)) {
switch (tag[0]) {
case HEAD_OF_QUEUE_TAG:
@@ -3236,6 +3238,15 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
cmnd->result = err;
goto out_fail_command;
}
+ /*
+ * Do not let the mid-layer retry I/O too fast. If an I/O is retried
+ * without waiting a bit then indicate that the device is busy.
+ */
+ if (cmnd->retries &&
+ time_before(jiffies, (cmnd->jiffies_at_alloc +
+ msecs_to_jiffies(LPFC_RETRY_PAUSE *
+ cmnd->retries))))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
ndlp = rdata->pnode;
if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) &&
diff --git a/drivers/scsi/lpfc/lpfc_scsi.h b/drivers/scsi/lpfc/lpfc_scsi.h
index ce645b2..9075a08 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.h
+++ b/drivers/scsi/lpfc/lpfc_scsi.h
@@ -21,6 +21,7 @@
#include <asm/byteorder.h>
struct lpfc_hba;
+#define LPFC_FCP_CDB_LEN 16
#define list_remove_head(list, entry, type, member) \
do { \
@@ -102,7 +103,7 @@ struct fcp_cmnd {
#define WRITE_DATA 0x01 /* Bit 0 */
#define READ_DATA 0x02 /* Bit 1 */
- uint8_t fcpCdb[16]; /* SRB cdb field is copied here */
+ uint8_t fcpCdb[LPFC_FCP_CDB_LEN]; /* SRB cdb field is copied here */
uint32_t fcpDl; /* Total transfer length */
};
@@ -153,5 +154,5 @@ struct lpfc_scsi_buf {
#define LPFC_SCSI_DMA_EXT_SIZE 264
#define LPFC_BPL_SIZE 1024
-
+#define LPFC_RETRY_PAUSE 300
#define MDAC_DIRECT_CMD 0x22
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4d4104f..23a2759 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -89,15 +89,20 @@ lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
static uint32_t
lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
{
- union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
+ union lpfc_wqe *temp_wqe;
struct lpfc_register doorbell;
uint32_t host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_wqe = q->qe[q->host_index].wqe;
+
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
/* set consumption flag every once in a while */
- if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
+ if (!((q->host_index + 1) % q->entry_repost))
bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
@@ -134,6 +139,10 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
{
uint32_t released = 0;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
if (q->hba_index == index)
return 0;
do {
@@ -158,10 +167,15 @@ lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
{
- struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
+ struct lpfc_mqe *temp_mqe;
struct lpfc_register doorbell;
uint32_t host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return -ENOMEM;
+ temp_mqe = q->qe[q->host_index].mqe;
+
/* If the host has not yet processed the next entry then we are done */
if (((q->host_index + 1) % q->entry_count) == q->hba_index)
return -ENOMEM;
@@ -195,6 +209,10 @@ lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue *q)
{
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
/* Clear the mailbox pointer for completion */
q->phba->mbox = NULL;
q->hba_index = ((q->hba_index + 1) % q->entry_count);
@@ -213,7 +231,12 @@ lpfc_sli4_mq_release(struct lpfc_queue *q)
static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue *q)
{
- struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
+ struct lpfc_eqe *eqe;
+
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+ eqe = q->qe[q->hba_index].eqe;
/* If the next EQE is not valid then we are done */
if (!bf_get_le32(lpfc_eqe_valid, eqe))
@@ -248,6 +271,10 @@ lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
struct lpfc_eqe *temp_eqe;
struct lpfc_register doorbell;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
+
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_eqe = q->qe[q->host_index].eqe;
@@ -288,6 +315,10 @@ lpfc_sli4_cq_get(struct lpfc_queue *q)
{
struct lpfc_cqe *cqe;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return NULL;
+
/* If the next CQE is not valid then we are done */
if (!bf_get_le32(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
return NULL;
@@ -322,6 +353,9 @@ lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
struct lpfc_cqe *temp_qe;
struct lpfc_register doorbell;
+ /* sanity check on queue memory */
+ if (unlikely(!q))
+ return 0;
/* while there are valid entries */
while (q->hba_index != q->host_index) {
temp_qe = q->qe[q->host_index].cqe;
@@ -359,11 +393,17 @@ static int
lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
{
- struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
- struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
+ struct lpfc_rqe *temp_hrqe;
+ struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
int put_index = hq->host_index;
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return -ENOMEM;
+ temp_hrqe = hq->qe[hq->host_index].rqe;
+ temp_drqe = dq->qe[dq->host_index].rqe;
+
if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
return -EINVAL;
if (hq->host_index != dq->host_index)
@@ -402,6 +442,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
{
+ /* sanity check on queue memory */
+ if (unlikely(!hq) || unlikely(!dq))
+ return 0;
+
if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
return 0;
hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
@@ -3575,8 +3619,8 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
* lpfc_reset_barrier - Make HBA ready for HBA reset
* @phba: Pointer to HBA context object.
*
- * This function is called before resetting an HBA. This
- * function requests HBA to quiesce DMAs before a reset.
+ * This function is called before resetting an HBA. This function is called
+ * with hbalock held and requests HBA to quiesce DMAs before a reset.
**/
void lpfc_reset_barrier(struct lpfc_hba *phba)
{
@@ -3851,7 +3895,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
{
struct lpfc_sli *psli = &phba->sli;
uint16_t cfg_value;
- uint8_t qindx;
/* Reset HBA */
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3867,19 +3910,6 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
psli->sli_flag &= ~(LPFC_PROCESS_LA);
phba->fcf.fcf_flag = 0;
- /* Clean up the child queue list for the CQs */
- list_del_init(&phba->sli4_hba.mbx_wq->list);
- list_del_init(&phba->sli4_hba.els_wq->list);
- list_del_init(&phba->sli4_hba.hdr_rq->list);
- list_del_init(&phba->sli4_hba.dat_rq->list);
- list_del_init(&phba->sli4_hba.mbx_cq->list);
- list_del_init(&phba->sli4_hba.els_cq->list);
- for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
- list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
- qindx = 0;
- do
- list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
- while (++qindx < phba->cfg_fcp_eq_count);
spin_unlock_irq(&phba->hbalock);
/* Now physically reset the device */
@@ -3892,6 +3922,7 @@ lpfc_sli4_brdreset(struct lpfc_hba *phba)
~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
/* Perform FCoE PCI function reset */
+ lpfc_sli4_queue_destroy(phba);
lpfc_pci_function_reset(phba);
/* Restore PCI cmd register */
@@ -4339,6 +4370,11 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
spin_unlock_irq(&phba->hbalock);
done = 1;
+
+ if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
+ (pmb->u.mb.un.varCfgPort.gasabt == 0))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "3110 Port did not grant ASABT\n");
}
}
if (!done) {
@@ -4551,9 +4587,9 @@ lpfc_sli_hba_setup_error:
* data structure.
**/
static int
-lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
- LPFC_MBOXQ_t *mboxq)
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
{
+ LPFC_MBOXQ_t *mboxq;
struct lpfc_dmabuf *mp;
struct lpfc_mqe *mqe;
uint32_t data_length;
@@ -4565,10 +4601,16 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
- mqe = &mboxq->u.mqe;
- if (lpfc_dump_fcoe_param(phba, mboxq))
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq)
return -ENOMEM;
+ mqe = &mboxq->u.mqe;
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
+ rc = -ENOMEM;
+ goto out_free_mboxq;
+ }
+
mp = (struct lpfc_dmabuf *) mboxq->context1;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
@@ -4596,19 +4638,25 @@ lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
if (rc) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return -EIO;
+ rc = -EIO;
+ goto out_free_mboxq;
}
data_length = mqe->un.mb_words[5];
if (data_length > DMP_RGN23_SIZE) {
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return -EIO;
+ rc = -EIO;
+ goto out_free_mboxq;
}
lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
lpfc_mbuf_free(phba, mp->virt, mp->phys);
kfree(mp);
- return 0;
+ rc = 0;
+
+out_free_mboxq:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ return rc;
}
/**
@@ -4706,7 +4754,6 @@ static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
{
LPFC_MBOXQ_t *mboxq;
- struct lpfc_mbx_read_config *rd_config;
struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
struct lpfc_controller_attribute *cntl_attr;
struct lpfc_mbx_get_port_name *get_port_name;
@@ -4724,33 +4771,11 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq)
return -ENOMEM;
-
/* obtain link type and link number via READ_CONFIG */
- lpfc_read_config(phba, mboxq);
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- if (rc == MBX_SUCCESS) {
- rd_config = &mboxq->u.mqe.un.rd_config;
- if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
- phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
- phba->sli4_hba.lnk_info.lnk_tp =
- bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
- phba->sli4_hba.lnk_info.lnk_no =
- bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
- "3081 lnk_type:%d, lnk_numb:%d\n",
- phba->sli4_hba.lnk_info.lnk_tp,
- phba->sli4_hba.lnk_info.lnk_no);
- goto retrieve_ppname;
- } else
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3082 Mailbox (x%x) returned ldv:x0\n",
- bf_get(lpfc_mqe_command,
- &mboxq->u.mqe));
- } else
- lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
- "3083 Mailbox (x%x) failed, status:x%x\n",
- bf_get(lpfc_mqe_command, &mboxq->u.mqe),
- bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+ phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
+ lpfc_sli4_read_config(phba);
+ if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
+ goto retrieve_ppname;
/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
@@ -4875,14 +4900,19 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
fcp_eqidx = 0;
- do
- lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
- LPFC_QUEUE_REARM);
- while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+ if (phba->sli4_hba.fcp_cq) {
+ do
+ lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ while (++fcp_eqidx < phba->cfg_fcp_eq_count);
+ }
lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
- lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
- LPFC_QUEUE_REARM);
+ if (phba->sli4_hba.fp_eq) {
+ for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count;
+ fcp_eqidx++)
+ lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+ LPFC_QUEUE_REARM);
+ }
}
/**
@@ -5457,6 +5487,8 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
uint16_t count, base;
unsigned long longs;
+ if (!phba->sli4_hba.rpi_hdrs_in_use)
+ phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
if (phba->sli4_hba.extents_in_use) {
/*
* The port supports resource extents. The XRI, VPI, VFI, RPI
@@ -5538,9 +5570,10 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
* need any action - just exit.
*/
if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
- LPFC_IDX_RSRC_RDY)
- return 0;
-
+ LPFC_IDX_RSRC_RDY) {
+ lpfc_sli4_dealloc_resource_identifiers(phba);
+ lpfc_sli4_remove_rpis(phba);
+ }
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
base = phba->sli4_hba.max_cfg_param.rpi_base;
@@ -5880,14 +5913,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
if (!mboxq)
return -ENOMEM;
- /*
- * Continue initialization with default values even if driver failed
- * to read FCoE param config regions
- */
- if (lpfc_sli4_read_fcoe_params(phba, mboxq))
- lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
- "2570 Failed to read FCoE parameters\n");
-
/* Issue READ_REV to collect vpd and FW information. */
vpd_size = SLI4_PAGE_SIZE;
vpd = kzalloc(vpd_size, GFP_KERNEL);
@@ -5925,6 +5950,16 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
}
/*
+ * Continue initialization with default values even if driver failed
+ * to read FCoE param config regions, only read parameters if the
+ * board is FCoE
+ */
+ if (phba->hba_flag & HBA_FCOE_MODE &&
+ lpfc_sli4_read_fcoe_params(phba))
+ lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
+ "2570 Failed to read FCoE parameters\n");
+
+ /*
* Retrieve sli4 device physical port name, failure of doing it
* is considered as non-fatal.
*/
@@ -6044,6 +6079,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
"rc = x%x\n", rc);
goto out_free_mbox;
}
+ /* update physical xri mappings in the scsi buffers */
+ lpfc_scsi_buf_update(phba);
/* Read the port's service parameters. */
rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
@@ -6205,7 +6242,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
rc = 0;
phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
&mboxq->u.mqe.un.reg_fcfi);
+
+ /* Check if the port is configured to be disabled */
+ lpfc_sli_read_link_ste(phba);
}
+
/*
* The port is ready, set the host's link state to LINK_DOWN
* in preparation for link interrupts.
@@ -6213,10 +6254,25 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
spin_lock_irq(&phba->hbalock);
phba->link_state = LPFC_LINK_DOWN;
spin_unlock_irq(&phba->hbalock);
- if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
- rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
- if (rc)
+ if (!(phba->hba_flag & HBA_FCOE_MODE) &&
+ (phba->hba_flag & LINK_DISABLED)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3103 Adapter Link is disabled.\n");
+ lpfc_down_link(phba, mboxq);
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc != MBX_SUCCESS) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_SLI,
+ "3104 Adapter failed to issue "
+ "DOWN_LINK mbox cmd, rc:x%x\n", rc);
goto out_unset_queue;
+ }
+ } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
+ /* don't perform init_link on SLI4 FC port loopback test */
+ if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
+ rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
+ if (rc)
+ goto out_unset_queue;
+ }
}
mempool_free(mboxq, phba->mbox_mem_pool);
return rc;
@@ -7487,6 +7543,7 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
struct ulp_bde64 *bpl = NULL;
struct ulp_bde64 bde;
struct sli4_sge *sgl = NULL;
+ struct lpfc_dmabuf *dmabuf;
IOCB_t *icmd;
int numBdes = 0;
int i = 0;
@@ -7505,9 +7562,12 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
* have not been byteswapped yet so there is no
* need to swap them back.
*/
- bpl = (struct ulp_bde64 *)
- ((struct lpfc_dmabuf *)piocbq->context3)->virt;
+ if (piocbq->context3)
+ dmabuf = (struct lpfc_dmabuf *)piocbq->context3;
+ else
+ return xritag;
+ bpl = (struct ulp_bde64 *)dmabuf->virt;
if (!bpl)
return xritag;
@@ -7616,6 +7676,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
int numBdes, i;
struct ulp_bde64 bde;
struct lpfc_nodelist *ndlp;
+ uint32_t *pcmd;
+ uint32_t if_type;
fip = phba->hba_flag & HBA_FIP_SUPPORT;
/* The fcp commands will set command type */
@@ -7669,6 +7731,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
iocbq->iocb.ulpCommand);
return IOCB_ERROR;
}
+
wqe->els_req.payload_len = xmit_len;
/* Els_reguest64 has a TMO */
bf_set(wqe_tmo, &wqe->els_req.wqe_com,
@@ -7683,9 +7746,28 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
/* CCP CCPE PV PRI in word10 were set in the memcpy */
- if (command_type == ELS_COMMAND_FIP) {
+ if (command_type == ELS_COMMAND_FIP)
els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
>> LPFC_FIP_ELS_ID_SHIFT);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
+ if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
+ *pcmd == ELS_CMD_SCR ||
+ *pcmd == ELS_CMD_PLOGI)) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ } else if (iocbq->context1) {
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ }
}
bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
@@ -7704,6 +7786,8 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
/* The entire sequence is transmitted for this IOCB */
xmit_len = total_len;
cmnd = CMD_XMIT_SEQUENCE64_CR;
+ if (phba->link_flag & LS_LOOPBACK_MODE)
+ bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
case CMD_XMIT_SEQUENCE64_CR:
/* word3 iocb=io_tag32 wqe=reserved */
wqe->xmit_sequence.rsvd3 = 0;
@@ -7846,6 +7930,16 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
+ pcmd = (uint32_t *) (((struct lpfc_dmabuf *)
+ iocbq->context2)->virt);
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
+ bf_set(els_req64_sp, &wqe->els_req, 1);
+ bf_set(els_req64_sid, &wqe->els_req,
+ iocbq->vport->fc_myDID);
+ bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
+ bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+ phba->vpi_ids[phba->pport->vpi]);
+ }
command_type = OTHER_COMMAND;
break;
case CMD_CLOSE_XRI_CN:
@@ -8037,6 +8131,8 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
*/
if (piocb->iocb_flag & LPFC_IO_FCP)
piocb->fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+ if (unlikely(!phba->sli4_hba.fcp_wq))
+ return IOCB_ERROR;
if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx],
&wqe))
return IOCB_ERROR;
@@ -8173,6 +8269,137 @@ lpfc_extra_ring_setup( struct lpfc_hba *phba)
return 0;
}
+/* lpfc_sli_abts_recover_port - Recover a port that failed an ABTS.
+ * @vport: pointer to virtual port object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ *
+ * The driver calls this routine in response to a XRI ABORT CQE
+ * event from the port. In this event, the driver is required to
+ * recover its login to the rport even though its login may be valid
+ * from the driver's perspective. The failed ABTS notice from the
+ * port indicates the rport is not responding.
+ */
+static void
+lpfc_sli_abts_recover_port(struct lpfc_vport *vport,
+ struct lpfc_nodelist *ndlp)
+{
+ struct Scsi_Host *shost;
+ struct lpfc_hba *phba;
+ unsigned long flags = 0;
+
+ shost = lpfc_shost_from_vport(vport);
+ phba = vport->phba;
+ if (ndlp->nlp_state != NLP_STE_MAPPED_NODE) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_SLI, "3093 No rport recovery needed. "
+ "rport in state 0x%x\n",
+ ndlp->nlp_state);
+ return;
+ }
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3094 Start rport recovery on shost id 0x%x "
+ "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x "
+ "flags 0x%x\n",
+ shost->host_no, ndlp->nlp_DID,
+ vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state,
+ ndlp->nlp_flag);
+ /*
+ * The rport is not responding. Don't attempt ADISC recovery.
+ * Remove the FCP-2 flag to force a PLOGI.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_disc_state_machine(vport, ndlp, NULL,
+ NLP_EVT_DEVICE_RECOVERY);
+ lpfc_cancel_retry_delay_tmo(vport, ndlp);
+ spin_lock_irqsave(shost->host_lock, flags);
+ ndlp->nlp_flag |= NLP_NPR_2B_DISC;
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ lpfc_disc_start(vport);
+}
+
+/* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to iocb object.
+ *
+ * The async_event handler calls this routine when it receives
+ * an ASYNC_STATUS_CN event from the port. The port generates
+ * this event when an Abort Sequence request to an rport fails
+ * twice in succession. The abort could be originated by the
+ * driver or by the port. The ABTS could have been for an ELS
+ * or FCP IO. The port only generates this event when an ABTS
+ * fails to complete after one retry.
+ */
+static void
+lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_iocbq *iocbq)
+{
+ struct lpfc_nodelist *ndlp = NULL;
+ uint16_t rpi = 0, vpi = 0;
+ struct lpfc_vport *vport = NULL;
+
+ /* The rpi in the ulpContext is vport-sensitive. */
+ vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
+ rpi = iocbq->iocb.ulpContext;
+
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3092 Port generated ABTS async event "
+ "on vpi %d rpi %d status 0x%x\n",
+ vpi, rpi, iocbq->iocb.ulpStatus);
+
+ vport = lpfc_find_vport_by_vpid(phba, vpi);
+ if (!vport)
+ goto err_exit;
+ ndlp = lpfc_findnode_rpi(vport, rpi);
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ goto err_exit;
+
+ if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+ lpfc_sli_abts_recover_port(vport, ndlp);
+ return;
+
+ err_exit:
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3095 Event Context not found, no "
+ "action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
+ iocbq->iocb.ulpContext, iocbq->iocb.ulpStatus,
+ vpi, rpi);
+}
+
+/* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
+ * @phba: pointer to HBA context object.
+ * @ndlp: nodelist pointer for the impacted rport.
+ * @axri: pointer to the wcqe containing the failed exchange.
+ *
+ * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
+ * port. The port generates this event when an abort exchange request to an
+ * rport fails twice in succession with no reply. The abort could be originated
+ * by the driver or by the port. The ABTS could have been for an ELS or FCP IO.
+ */
+void
+lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
+ struct lpfc_nodelist *ndlp,
+ struct sli4_wcqe_xri_aborted *axri)
+{
+ struct lpfc_vport *vport;
+
+ if (!ndlp || !NLP_CHK_NODE_ACT(ndlp))
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3115 Node Context not found, driver "
+ "ignoring abts err event\n");
+ vport = ndlp->vport;
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3116 Port generated FCP XRI ABORT event on "
+ "vpi %d rpi %d xri x%x status 0x%x\n",
+ ndlp->vport->vpi, ndlp->nlp_rpi,
+ bf_get(lpfc_wcqe_xa_xri, axri),
+ bf_get(lpfc_wcqe_xa_status, axri));
+
+ if (bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT)
+ lpfc_sli_abts_recover_port(vport, ndlp);
+}
+
/**
* lpfc_sli_async_event_handler - ASYNC iocb handler function
* @phba: Pointer to HBA context object.
@@ -8192,63 +8419,58 @@ lpfc_sli_async_event_handler(struct lpfc_hba * phba,
{
IOCB_t *icmd;
uint16_t evt_code;
- uint16_t temp;
struct temp_event temp_event_data;
struct Scsi_Host *shost;
uint32_t *iocb_w;
icmd = &iocbq->iocb;
evt_code = icmd->un.asyncstat.evt_code;
- temp = icmd->ulpContext;
- if ((evt_code != ASYNC_TEMP_WARN) &&
- (evt_code != ASYNC_TEMP_SAFE)) {
+ switch (evt_code) {
+ case ASYNC_TEMP_WARN:
+ case ASYNC_TEMP_SAFE:
+ temp_event_data.data = (uint32_t) icmd->ulpContext;
+ temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
+ if (evt_code == ASYNC_TEMP_WARN) {
+ temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0347 Adapter is very hot, please take "
+ "corrective action. temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ } else {
+ temp_event_data.event_code = LPFC_NORMAL_TEMP;
+ lpfc_printf_log(phba, KERN_ERR, LOG_TEMP,
+ "0340 Adapter temperature is OK now. "
+ "temperature : %d Celsius\n",
+ (uint32_t) icmd->ulpContext);
+ }
+
+ /* Send temperature change event to applications */
+ shost = lpfc_shost_from_vport(phba->pport);
+ fc_host_post_vendor_event(shost, fc_get_event_number(),
+ sizeof(temp_event_data), (char *) &temp_event_data,
+ LPFC_NL_VENDOR_ID);
+ break;
+ case ASYNC_STATUS_CN:
+ lpfc_sli_abts_err_handler(phba, iocbq);
+ break;
+ default:
iocb_w = (uint32_t *) icmd;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_SLI,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0346 Ring %d handler: unexpected ASYNC_STATUS"
" evt_code 0x%x\n"
"W0 0x%08x W1 0x%08x W2 0x%08x W3 0x%08x\n"
"W4 0x%08x W5 0x%08x W6 0x%08x W7 0x%08x\n"
"W8 0x%08x W9 0x%08x W10 0x%08x W11 0x%08x\n"
"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
- pring->ringno,
- icmd->un.asyncstat.evt_code,
+ pring->ringno, icmd->un.asyncstat.evt_code,
iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
- return;
- }
- temp_event_data.data = (uint32_t)temp;
- temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
- if (evt_code == ASYNC_TEMP_WARN) {
- temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_TEMP,
- "0347 Adapter is very hot, please take "
- "corrective action. temperature : %d Celsius\n",
- temp);
- }
- if (evt_code == ASYNC_TEMP_SAFE) {
- temp_event_data.event_code = LPFC_NORMAL_TEMP;
- lpfc_printf_log(phba,
- KERN_ERR,
- LOG_TEMP,
- "0340 Adapter temperature is OK now. "
- "temperature : %d Celsius\n",
- temp);
+ break;
}
-
- /* Send temperature change event to applications */
- shost = lpfc_shost_from_vport(phba->pport);
- fc_host_post_vendor_event(shost, fc_get_event_number(),
- sizeof(temp_event_data), (char *) &temp_event_data,
- LPFC_NL_VENDOR_ID);
-
}
@@ -8823,12 +9045,14 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
{
IOCB_t *irsp = &rspiocb->iocb;
uint16_t abort_iotag, abort_context;
- struct lpfc_iocbq *abort_iocb;
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
-
- abort_iocb = NULL;
+ struct lpfc_iocbq *abort_iocb = NULL;
if (irsp->ulpStatus) {
+
+ /*
+ * Assume that the port already completed and returned, or
+ * will return the iocb. Just Log the message.
+ */
abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
@@ -8846,68 +9070,15 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
*/
abort_iocb = phba->sli.iocbq_lookup[abort_context];
- /*
- * If the iocb is not found in Firmware queue the iocb
- * might have completed already. Do not free it again.
- */
- if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
- if (irsp->un.ulpWord[4] != IOERR_NO_XRI) {
- spin_unlock_irq(&phba->hbalock);
- lpfc_sli_release_iocbq(phba, cmdiocb);
- return;
- }
- /* For SLI4 the ulpContext field for abort IOCB
- * holds the iotag of the IOCB being aborted so
- * the local abort_context needs to be reset to
- * match the aborted IOCBs ulpContext.
- */
- if (abort_iocb && phba->sli_rev == LPFC_SLI_REV4)
- abort_context = abort_iocb->iocb.ulpContext;
- }
-
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
"0327 Cannot abort els iocb %p "
"with tag %x context %x, abort status %x, "
"abort code %x\n",
abort_iocb, abort_iotag, abort_context,
irsp->ulpStatus, irsp->un.ulpWord[4]);
- /*
- * make sure we have the right iocbq before taking it
- * off the txcmplq and try to call completion routine.
- */
- if (!abort_iocb ||
- abort_iocb->iocb.ulpContext != abort_context ||
- (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
- spin_unlock_irq(&phba->hbalock);
- else if (phba->sli_rev < LPFC_SLI_REV4) {
- /*
- * leave the SLI4 aborted command on the txcmplq
- * list and the command complete WCQE's XB bit
- * will tell whether the SGL (XRI) can be released
- * immediately or to the aborted SGL list for the
- * following abort XRI from the HBA.
- */
- list_del_init(&abort_iocb->list);
- if (abort_iocb->iocb_flag & LPFC_IO_ON_Q) {
- abort_iocb->iocb_flag &= ~LPFC_IO_ON_Q;
- pring->txcmplq_cnt--;
- }
- /* Firmware could still be in progress of DMAing
- * payload, so don't free data buffer till after
- * a hbeat.
- */
- abort_iocb->iocb_flag |= LPFC_DELAY_MEM_FREE;
- abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
- spin_unlock_irq(&phba->hbalock);
-
- abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
- abort_iocb->iocb.un.ulpWord[4] = IOERR_ABORT_REQUESTED;
- (abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
- } else
- spin_unlock_irq(&phba->hbalock);
+ spin_unlock_irq(&phba->hbalock);
}
-
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -9258,6 +9429,14 @@ void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_iocbq *rspiocb)
{
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+ "3096 ABORT_XRI_CN completing on xri x%x "
+ "original iotag x%x, abort cmd iotag x%x "
+ "status 0x%x, reason 0x%x\n",
+ cmdiocb->iocb.un.acxri.abortContextTag,
+ cmdiocb->iocb.un.acxri.abortIoTag,
+ cmdiocb->iotag, rspiocb->iocb.ulpStatus,
+ rspiocb->iocb.un.ulpWord[4]);
lpfc_sli_release_iocbq(phba, cmdiocb);
return;
}
@@ -9771,7 +9950,7 @@ lpfc_sli4_eratt_read(struct lpfc_hba *phba)
phba->work_status[1] =
readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2885 Port Error Detected: "
+ "2885 Port Status Event: "
"port status reg 0x%x, "
"port smphr reg 0x%x, "
"error 1=0x%x, error 2=0x%x\n",
@@ -10777,6 +10956,9 @@ static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
struct lpfc_wcqe_release *wcqe)
{
+ /* sanity check on queue memory */
+ if (unlikely(!phba->sli4_hba.els_wq))
+ return;
/* Check for the slow-path ELS work queue */
if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
@@ -10866,6 +11048,10 @@ lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
uint32_t status, rq_id;
unsigned long iflags;
+ /* sanity check on queue memory */
+ if (unlikely(!hrq) || unlikely(!drq))
+ return workposted;
+
if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
else
@@ -11000,6 +11186,9 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
/* Search for completion queue pointer matching this cqid */
speq = phba->sli4_hba.sp_eq;
+ /* sanity check on queue memory */
+ if (unlikely(!speq))
+ return;
list_for_each_entry(childq, &speq->child_list, list) {
if (childq->queue_id == cqid) {
cq = childq;
@@ -11241,12 +11430,18 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
return;
}
+ if (unlikely(!phba->sli4_hba.fcp_cq)) {
+ lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+ "3146 Fast-path completion queues "
+ "does not exist\n");
+ return;
+ }
cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
if (unlikely(!cq)) {
if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"0367 Fast-path completion queue "
- "does not exist\n");
+ "(%d) does not exist\n", fcp_cqidx);
return;
}
@@ -11417,6 +11612,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
/* Get to the EQ struct associated with this vector */
fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+ if (unlikely(!fpeq))
+ return IRQ_NONE;
/* Check device state for handling interrupt */
if (unlikely(lpfc_intr_state_check(phba))) {
@@ -11635,6 +11832,9 @@ lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
uint16_t dmult;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!eq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -11751,6 +11951,9 @@ lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!cq || !eq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -11933,6 +12136,9 @@ lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!mq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12083,6 +12289,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page;
+ /* sanity check on queue memory */
+ if (!wq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12151,6 +12360,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
wq->subtype = subtype;
wq->host_index = 0;
wq->hba_index = 0;
+ wq->entry_repost = LPFC_RELEASE_NOTIFICATION_INTERVAL;
/* link the wq onto the parent cq child list */
list_add_tail(&wq->list, &cq->child_list);
@@ -12174,6 +12384,9 @@ lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno)
{
uint32_t cnt;
+ /* sanity check on queue memory */
+ if (!rq)
+ return;
cnt = lpfc_hbq_defs[qno]->entry_count;
/* Recalc repost for RQs based on buffers initially posted */
@@ -12219,6 +12432,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ /* sanity check on queue memory */
+ if (!hrq || !drq || !cq)
+ return -ENODEV;
if (!phba->sli4_hba.pc_sli4_params.supported)
hw_page_size = SLI4_PAGE_SIZE;
@@ -12420,6 +12636,7 @@ lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!eq)
return -ENODEV;
mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12475,6 +12692,7 @@ lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!cq)
return -ENODEV;
mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12528,6 +12746,7 @@ lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!mq)
return -ENODEV;
mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12581,6 +12800,7 @@ lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!wq)
return -ENODEV;
mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -12634,6 +12854,7 @@ lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ /* sanity check on queue memory */
if (!hrq || !drq)
return -ENODEV;
mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
@@ -15252,45 +15473,42 @@ lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
}
/**
- * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
* @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
*
- * This function read region 23 and parse TLV for port status to
- * decide if the user disaled the port. If the TLV indicates the
- * port is disabled, the hba_flag is set accordingly.
+ * This function gets SLI3 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
**/
-void
-lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+static uint32_t
+lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
{
LPFC_MBOXQ_t *pmb = NULL;
MAILBOX_t *mb;
- uint8_t *rgn23_data = NULL;
- uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
+ uint32_t offset = 0;
int rc;
+ if (!rgn23_data)
+ return 0;
+
pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!pmb) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2600 lpfc_sli_read_serdes_param failed to"
- " allocate mailbox memory\n");
- goto out;
+ "2600 failed to allocate mailbox memory\n");
+ return 0;
}
mb = &pmb->u.mb;
- /* Get adapter Region 23 data */
- rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
- if (!rgn23_data)
- goto out;
-
do {
lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
if (rc != MBX_SUCCESS) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2601 lpfc_sli_read_link_ste failed to"
- " read config region 23 rc 0x%x Status 0x%x\n",
- rc, mb->mbxStatus);
+ "2601 failed to read config "
+ "region 23, rc 0x%x Status 0x%x\n",
+ rc, mb->mbxStatus);
mb->un.varDmp.word_cnt = 0;
}
/*
@@ -15303,13 +15521,96 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
- rgn23_data + offset,
- mb->un.varDmp.word_cnt);
+ rgn23_data + offset,
+ mb->un.varDmp.word_cnt);
offset += mb->un.varDmp.word_cnt;
} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
- data_size = offset;
- offset = 0;
+ mempool_free(pmb, phba->mbox_mem_pool);
+ return offset;
+}
+
+/**
+ * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
+ * @phba: pointer to lpfc hba data structure.
+ * @rgn23_data: pointer to configure region 23 data.
+ *
+ * This function gets SLI4 port configure region 23 data through memory dump
+ * mailbox command. When it successfully retrieves data, the size of the data
+ * will be returned, otherwise, 0 will be returned.
+ **/
+static uint32_t
+lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
+{
+ LPFC_MBOXQ_t *mboxq = NULL;
+ struct lpfc_dmabuf *mp = NULL;
+ struct lpfc_mqe *mqe;
+ uint32_t data_length = 0;
+ int rc;
+
+ if (!rgn23_data)
+ return 0;
+
+ mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3105 failed to allocate mailbox memory\n");
+ return 0;
+ }
+
+ if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
+ goto out;
+ mqe = &mboxq->u.mqe;
+ mp = (struct lpfc_dmabuf *) mboxq->context1;
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+ if (rc)
+ goto out;
+ data_length = mqe->un.mb_words[5];
+ if (data_length == 0)
+ goto out;
+ if (data_length > DMP_RGN23_SIZE) {
+ data_length = 0;
+ goto out;
+ }
+ lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
+out:
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ if (mp) {
+ lpfc_mbuf_free(phba, mp->virt, mp->phys);
+ kfree(mp);
+ }
+ return data_length;
+}
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+ uint8_t *rgn23_data = NULL;
+ uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
+ uint32_t offset = 0;
+
+ /* Get adapter Region 23 data */
+ rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+ if (!rgn23_data)
+ goto out;
+
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
+ else {
+ if_type = bf_get(lpfc_sli_intf_if_type,
+ &phba->sli4_hba.sli_intf);
+ if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
+ goto out;
+ data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
+ }
if (!data_size)
goto out;
@@ -15373,9 +15674,8 @@ lpfc_sli_read_link_ste(struct lpfc_hba *phba)
goto out;
}
}
+
out:
- if (pmb)
- mempool_free(pmb, phba->mbox_mem_pool);
kfree(rgn23_data);
return;
}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index d5cffd8..3f266e2 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -291,7 +291,7 @@ struct lpfc_bmbx {
#define LPFC_RQE_SIZE 8
#define LPFC_EQE_DEF_COUNT 1024
-#define LPFC_CQE_DEF_COUNT 256
+#define LPFC_CQE_DEF_COUNT 1024
#define LPFC_WQE_DEF_COUNT 256
#define LPFC_MQE_DEF_COUNT 16
#define LPFC_RQE_DEF_COUNT 512
@@ -420,7 +420,16 @@ struct lpfc_sli4_hba {
void __iomem *STATUSregaddr;
void __iomem *CTRLregaddr;
void __iomem *ERR1regaddr;
+#define SLIPORT_ERR1_REG_ERR_CODE_1 0x1
+#define SLIPORT_ERR1_REG_ERR_CODE_2 0x2
void __iomem *ERR2regaddr;
+#define SLIPORT_ERR2_REG_FW_RESTART 0x0
+#define SLIPORT_ERR2_REG_FUNC_PROVISON 0x1
+#define SLIPORT_ERR2_REG_FORCED_DUMP 0x2
+#define SLIPORT_ERR2_REG_FAILURE_EQ 0x3
+#define SLIPORT_ERR2_REG_FAILURE_CQ 0x4
+#define SLIPORT_ERR2_REG_FAILURE_BUS 0x5
+#define SLIPORT_ERR2_REG_FAILURE_RQ 0x6
} if_type2;
} u;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index b0630e3..dd044d0 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.27"
+#define LPFC_DRIVER_VERSION "8.3.28"
#define LPFC_DRIVER_NAME "lpfc"
#define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp"
#define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index cff6ca6..0fe188e 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -774,10 +774,10 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
return NULL;
spin_lock_irq(&phba->hbalock);
list_for_each_entry(port_iterator, &phba->port_list, listentry) {
+ if (port_iterator->load_flag & FC_UNLOADING)
+ continue;
if (!scsi_host_get(lpfc_shost_from_vport(port_iterator))) {
- if (!(port_iterator->load_flag & FC_UNLOADING))
- lpfc_printf_vlog(port_iterator, KERN_ERR,
- LOG_VPORT,
+ lpfc_printf_vlog(port_iterator, KERN_ERR, LOG_VPORT,
"1801 Create vport work array FAILED: "
"cannot do scsi_host_get\n");
continue;
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 590ce1e..70eb1f7 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -25,6 +25,7 @@
#include <asm/dma.h>
#include <asm/macints.h>
#include <asm/macintosh.h>
+#include <asm/mac_via.h>
#include <scsi/scsi_host.h>
@@ -149,7 +150,7 @@ static inline int mac_esp_wait_for_dreq(struct esp *esp)
do {
if (mep->pdma_regs == NULL) {
- if (mac_irq_pending(IRQ_MAC_SCSIDRQ))
+ if (via2_scsi_drq_pending())
return 0;
} else {
if (nubus_readl(mep->pdma_regs) & 0x200)
@@ -564,8 +565,7 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
esp_chips[dev->id] = esp;
mb();
if (esp_chips[!dev->id] == NULL) {
- err = request_irq(host->irq, mac_scsi_esp_intr, 0,
- "Mac ESP", NULL);
+ err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
if (err < 0) {
esp_chips[dev->id] = NULL;
goto fail_free_priv;
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index af3a6af..2bccfbe 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -291,8 +291,7 @@ int __init macscsi_detect(struct scsi_host_template * tpnt)
((struct NCR5380_hostdata *)instance->hostdata)->ctrl = 0;
if (instance->irq != SCSI_IRQ_NONE)
- if (request_irq(instance->irq, NCR5380_intr, IRQ_FLG_SLOW,
- "ncr5380", instance)) {
+ if (request_irq(instance->irq, NCR5380_intr, 0, "ncr5380", instance)) {
printk(KERN_WARNING "scsi%d: IRQ%d not free, interrupts disabled\n",
instance->host_no, instance->irq);
instance->irq = SCSI_IRQ_NONE;
@@ -340,9 +339,6 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." );
- /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
- disable_irq(IRQ_MAC_SCSI);
-
/* get in phase */
NCR5380_write( TARGET_COMMAND_REG,
PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
@@ -358,9 +354,6 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
barrier();
- /* switch on SCSI IRQ again */
- enable_irq(IRQ_MAC_SCSI);
-
printk(KERN_INFO " done\n" );
}
#endif
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 5c17764..15eefa1 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
adapter->host->sg_tablesize = adapter->sglen;
- /* use HP firmware and bios version encoding */
+ /* use HP firmware and bios version encoding
+ Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+ right 8 bits making them zero. This 0 value was hardcoded to fix
+ sparse warnings. */
if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
sprintf (adapter->fw_version, "%c%d%d.%d%d",
adapter->product_info.fw_version[2],
- adapter->product_info.fw_version[1] >> 8,
+ 0,
adapter->product_info.fw_version[1] & 0x0f,
- adapter->product_info.fw_version[0] >> 8,
+ 0,
adapter->product_info.fw_version[0] & 0x0f);
sprintf (adapter->bios_version, "%c%d%d.%d%d",
adapter->product_info.bios_version[2],
- adapter->product_info.bios_version[1] >> 8,
+ 0,
adapter->product_info.bios_version[1] & 0x0f,
- adapter->product_info.bios_version[0] >> 8,
+ 0,
adapter->product_info.bios_version[0] & 0x0f);
} else {
memcpy(adapter->fw_version,
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index dd94c7d..e5f416f 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "00.00.06.12-rc1"
-#define MEGASAS_RELDATE "Oct. 5, 2011"
-#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011"
+#define MEGASAS_VERSION "00.00.06.14-rc1"
+#define MEGASAS_RELDATE "Jan. 6, 2012"
+#define MEGASAS_EXT_VERSION "Fri. Jan. 6 17:00:00 PDT 2012"
/*
* Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
#define MFI_OB_INTR_STATUS_MASK 0x00000002
#define MFI_POLL_TIMEOUT_SECS 60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL (HZ/10)
#define MFI_REPLY_1078_MESSAGE_INTERRUPT 0x80000000
#define MFI_REPLY_GEN2_MESSAGE_INTERRUPT 0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
u32 mfiStatus;
u32 last_seq_num;
- struct timer_list io_completion_timer;
struct list_head internal_reset_pending_q;
/* Ptr to hba specific information */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 29a994f..8b300be 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v00.00.06.12-rc1
+ * Version : v00.00.06.14-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -59,14 +59,6 @@
#include "megaraid_sas.h"
/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
- "Complete cmds from IO path, (default=0)");
-
-/*
* Number of sectors per IO command
* Will be set in megasas_init_mfi if user does not provide
*/
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
cmd->frame_count-1, instance->reg_set);
- /*
- * Check if we have pend cmds to be completed
- */
- if (poll_mode_io && atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
return 0;
out_return_cmd:
@@ -3370,47 +3357,6 @@ fail_fw_init:
return -EINVAL;
}
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance: Adapter soft state
- * @timer: timer object to be initialized
- * @fn: timer function
- * @interval: time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
- struct timer_list *timer,
- void *fn, unsigned long interval)
-{
- init_timer(timer);
- timer->expires = jiffies + interval;
- timer->data = (unsigned long)instance;
- timer->function = fn;
- add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr: Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
- struct megasas_instance *instance =
- (struct megasas_instance *)instance_addr;
-
- if (atomic_read(&instance->fw_outstanding))
- tasklet_schedule(&instance->isr_tasklet);
-
- /* Restart timer */
- if (poll_mode_io)
- mod_timer(&instance->io_completion_timer,
- jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
static u32
megasas_init_adapter_mfi(struct megasas_instance *instance)
{
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
return 0;
fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
host = instance->host;
instance->unload = 1;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
}
instance->instancet->enable_intr(instance->reg_set);
-
- /* Initialize the cmd completion timer */
- if (poll_mode_io)
- megasas_start_timer(instance, &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
instance->unload = 0;
/*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
host = instance->host;
fusion = instance->ctrl_context;
- if (poll_mode_io)
- del_timer_sync(&instance->io_completion_timer);
-
scsi_remove_host(instance->host);
megasas_flush_cache(instance);
megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
cmd->frame->hdr.context = cmd->index;
cmd->frame->hdr.pad_0 = 0;
+ cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+ MFI_FRAME_SENSE64);
/*
* The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
megasas_sysfs_set_dbg_lvl);
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
- return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
- const char *buf, size_t count)
-{
- int retval = count;
- int tmp = poll_mode_io;
- int i;
- struct megasas_instance *instance;
-
- if (sscanf(buf, "%u", &poll_mode_io) < 1) {
- printk(KERN_ERR "megasas: could not set poll_mode_io\n");
- retval = -EINVAL;
- }
-
- /*
- * Check if poll_mode_io is already set or is same as previous value
- */
- if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
- goto out;
-
- if (poll_mode_io) {
- /*
- * Start timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance) {
- megasas_start_timer(instance,
- &instance->io_completion_timer,
- megasas_io_completion_timer,
- MEGASAS_COMPLETION_TIMER_INTERVAL);
- }
- }
- } else {
- /*
- * Delete timers for all adapters
- */
- for (i = 0; i < megasas_mgmt_info.max_index; i++) {
- instance = megasas_mgmt_info.instance[i];
- if (instance)
- del_timer_sync(&instance->io_completion_timer);
- }
- }
-
-out:
- return retval;
-}
-
static void
megasas_aen_polling(struct work_struct *work)
{
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
kfree(ev);
}
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
- megasas_sysfs_show_poll_mode_io,
- megasas_sysfs_set_poll_mode_io);
-
/**
* megasas_init - Driver load entry point
*/
@@ -5566,11 +5438,6 @@ static int __init megasas_init(void)
if (rval)
goto err_dcf_dbg_lvl;
rval = driver_create_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- if (rval)
- goto err_dcf_poll_mode_io;
-
- rval = driver_create_file(&megasas_pci_driver.driver,
&driver_attr_support_device_change);
if (rval)
goto err_dcf_support_device_change;
@@ -5579,10 +5446,6 @@ static int __init megasas_init(void)
err_dcf_support_device_change:
driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
- driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
err_dcf_dbg_lvl:
driver_remove_file(&megasas_pci_driver.driver,
@@ -5607,8 +5470,6 @@ err_pcidrv:
static void __exit megasas_exit(void)
{
driver_remove_file(&megasas_pci_driver.driver,
- &driver_attr_poll_mode_io);
- driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_dbg_lvl);
driver_remove_file(&megasas_pci_driver.driver,
&driver_attr_support_poll_for_event);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 5255dd6..294abb0 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
else {
*pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
if ((raid->level >= 5) &&
- (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
+ ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
+ (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
+ raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
else if (raid->level == 1) {
/* Get alternate Pd. */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 8dc1b32..a01f0aa 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -8,7 +8,7 @@
* scatter/gather formats.
* Creation Date: June 21, 2006
*
- * mpi2.h Version: 02.00.20
+ * mpi2.h Version: 02.00.22
*
* Version History
* ---------------
@@ -69,6 +69,8 @@
* 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
* Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
* 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
* --------------------------------------------------------------------------
*/
@@ -94,7 +96,7 @@
#define MPI2_VERSION_02_00 (0x0200)
/* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT (0x14)
+#define MPI2_HEADER_VERSION_UNIT (0x16)
#define MPI2_HEADER_VERSION_DEV (0x00)
#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
@@ -1073,8 +1075,10 @@ typedef struct _MPI2_IEEE_SGE_UNION
#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
/* IEEE Simple Element only */
-#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
/* IEEE Chain Element only */
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR) /* typo in name */
/****************************************************************************
* IEEE SGE operation Macros
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index cfd95b4..3a023da 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -6,7 +6,7 @@
* Title: MPI Configuration messages and pages
* Creation Date: November 10, 2006
*
- * mpi2_cnfg.h Version: 02.00.19
+ * mpi2_cnfg.h Version: 02.00.21
*
* Version History
* ---------------
@@ -140,6 +140,13 @@
* Added SASNotifyPrimitiveMasks field to
* MPI2_CONFIG_PAGE_IOC_7.
* 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up
+ * bit to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of
+ * SAS IO Unit Page 4.
+
* --------------------------------------------------------------------------
*/
@@ -904,8 +911,8 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008)
-#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /* obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /* obsolete */
/* defines for IO Unit Page 7 IOCTemperatureUnits field */
#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
@@ -1970,10 +1977,14 @@ typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP
{
U8 MaxTargetSpinup; /* 0x00 */
U8 SpinupDelay; /* 0x01 */
- U16 Reserved1; /* 0x02 */
+ U8 SpinupFlags; /* 0x02 */
+ U8 Reserved1; /* 0x03 */
} MPI2_SAS_IOUNIT4_SPINUP_GROUP, MPI2_POINTER PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
Mpi2SasIOUnit4SpinupGroup_t, MPI2_POINTER pMpi2SasIOUnit4SpinupGroup_t;
+/* defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
/*
* Host code (drivers, BIOS, utilities, etc.) should leave this define set to
* one and check the value returned for NumPhys at runtime.
@@ -2321,13 +2332,12 @@ typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1
/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
-
/* values for SAS Expander Page 1 DiscoveryInfo field */
#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
/****************************************************************************
* SAS Device Config Pages
@@ -2447,6 +2457,8 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
#define MPI2_SASPHY0_PAGEVERSION (0x03)
+/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
/* use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
/* use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
@@ -2454,12 +2466,10 @@ typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0
/* values for SAS PHY Page 0 Flags field */
#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
-/* use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
/* use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
-/* use MPI2_SAS_PHYINFO_ for the PhyInfo field */
-
/* SAS PHY Page 1 */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index 93d9b69..9a925c0 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -6,7 +6,7 @@
* Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
* Creation Date: October 11, 2006
*
- * mpi2_ioc.h Version: 02.00.17
+ * mpi2_ioc.h Version: 02.00.19
*
* Version History
* ---------------
@@ -110,6 +110,13 @@
* Added Temperature Threshold Event.
* Added Host Message Event.
* Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
* --------------------------------------------------------------------------
*/
@@ -578,7 +585,7 @@ typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
{
U16 TaskTag; /* 0x00 */
U8 ReasonCode; /* 0x02 */
- U8 Reserved1; /* 0x03 */
+ U8 PhysicalPort; /* 0x03 */
U8 ASC; /* 0x04 */
U8 ASCQ; /* 0x05 */
U16 DevHandle; /* 0x06 */
@@ -1366,16 +1373,18 @@ typedef struct _MPI2_EXT_IMAGE_HEADER
#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
/* defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
-
-#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MEGARAID)
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+#define MPI2_EXT_IMAGE_TYPE_MAX \
+ (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC) /* deprecated */
@@ -1568,7 +1577,7 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
/* defines for the Feature field */
#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
-#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /* obsolete */
#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
@@ -1597,14 +1606,14 @@ typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
/* parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
/* Parameter1 indicates desired PCIe link speed using these defines */
-#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01)
-#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02)
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /* obsolete */
/* Parameter2 indicates desired PCIe link width using these defines */
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04)
-#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08)
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /* obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /* obsolete */
/* Parameter3 and Parameter4 are reserved */
/* parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index bd61a7b..0601612 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -6,7 +6,7 @@
* Title: MPI Integrated RAID messages and structures
* Creation Date: April 26, 2007
*
- * mpi2_raid.h Version: 02.00.05
+ * mpi2_raid.h Version: 02.00.06
*
* Version History
* ---------------
@@ -23,6 +23,10 @@
* 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
* VolumeCreationFlags and marked the old one as obsolete.
* 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+
* --------------------------------------------------------------------------
*/
@@ -176,7 +180,9 @@ typedef struct _MPI2_RAID_ACTION_REQUEST
#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
-
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
/* RAID Volume Creation Structure */
@@ -244,6 +250,23 @@ typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION
Mpi2RaidOnlineCapacityExpansion_t,
MPI2_POINTER pMpi2RaidOnlineCapacityExpansion_t;
+/* RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /* 0x00 */
+ U16 CandidateDevHandle; /* 0x02 */
+ U32 Flags; /* 0x04 */
+ U32 Reserved1; /* 0x08 */
+ U32 Reserved2; /* 0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+Mpi2RaidCompatibilityInputStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityInputStruct_t;
+
+/* defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
/* RAID Volume Indicator Structure */
@@ -263,15 +286,45 @@ typedef struct _MPI2_RAID_VOL_INDICATOR
#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+/* RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /* 0x00 */
+ U8 Reserved1; /* 0x01 */
+ U16 Reserved2; /* 0x02 */
+ U32 GenericAttributes; /* 0x04 */
+ U32 OEMSpecificAttributes; /* 0x08 */
+ U32 Reserved3; /* 0x0C */
+ U32 Reserved4; /* 0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+MPI2_POINTER PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+Mpi2RaidCompatibilityResultStruct_t,
+MPI2_POINTER pMpi2RaidCompatibilityResultStruct_t;
+
+/* defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/* defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
/* RAID Action Reply ActionData union */
typedef union _MPI2_RAID_ACTION_REPLY_DATA
{
- U32 Word[5];
- MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
- U16 VolDevHandle;
- U8 VolumeState;
- U8 PhysDiskNum;
+ U32 Word[5];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
} MPI2_RAID_ACTION_REPLY_DATA, MPI2_POINTER PTR_MPI2_RAID_ACTION_REPLY_DATA,
Mpi2RaidActionReplyData_t, MPI2_POINTER pMpi2RaidActionReplyData_t;
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 2a4bced..3cbe677 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -6,7 +6,7 @@
* Title: MPI diagnostic tool structures and definitions
* Creation Date: March 26, 2007
*
- * mpi2_tool.h Version: 02.00.06
+ * mpi2_tool.h Version: 02.00.07
*
* Version History
* ---------------
@@ -25,6 +25,8 @@
* 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
* 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
* Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
* --------------------------------------------------------------------------
*/
@@ -181,7 +183,7 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
U8 DevIndex; /* 0x14 */
U8 Action; /* 0x15 */
U8 SGLFlags; /* 0x16 */
- U8 Reserved7; /* 0x17 */
+ U8 Flags; /* 0x17 */
U16 TxDataLength; /* 0x18 */
U16 RxDataLength; /* 0x1A */
U32 Reserved8; /* 0x1C */
@@ -205,6 +207,9 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
/* use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+/* values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
/* Toolbox ISTWI Read Write Tool reply message */
typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index beda04a..a78036f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -57,6 +57,7 @@
#include <linux/sort.h>
#include <linux/io.h>
#include <linux/time.h>
+#include <linux/kthread.h>
#include <linux/aer.h>
#include "mpt2sas_base.h"
@@ -65,6 +66,8 @@ static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
static int max_queue_depth = -1;
module_param(max_queue_depth, int, 0);
MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
@@ -89,19 +92,6 @@ static int disable_discovery = -1;
module_param(disable_discovery, int, 0);
MODULE_PARM_DESC(disable_discovery, " disable discovery ");
-
-/* diag_buffer_enable is bitwise
- * bit 0 set = TRACE
- * bit 1 set = SNAPSHOT
- * bit 2 set = EXTENDED
- *
- * Either bit can be set, or both
- */
-static int diag_buffer_enable;
-module_param(diag_buffer_enable, int, 0);
-MODULE_PARM_DESC(diag_buffer_enable, " post diag buffers "
- "(TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
-
/**
* _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
*
@@ -120,10 +110,34 @@ _scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
ioc->fwfault_debug = mpt2sas_fwfault_debug;
return 0;
}
+
module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
param_get_int, &mpt2sas_fwfault_debug, 0644);
/**
+ * mpt2sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt2sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT2SAS_ADAPTER *ioc = (struct MPT2SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_remove_bus_device(pdev);
+ return 0;
+}
+
+
+/**
* _base_fault_reset_work - workq handling ioc fault conditions
* @work: input argument, used to derive ioc
* Context: sleep.
@@ -138,6 +152,7 @@ _base_fault_reset_work(struct work_struct *work)
unsigned long flags;
u32 doorbell;
int rc;
+ struct task_struct *p;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
if (ioc->shost_recovery)
@@ -145,6 +160,39 @@ _base_fault_reset_work(struct work_struct *work)
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
+ ioc->name, __func__);
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt2sas_remove_dead_ioc_func, ioc,
+ "mpt2sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p)) {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ } else {
+ printk(MPT2SAS_ERR_FMT
+ "%s: Running mpt2sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ }
+
+ return; /* don't rearm timer */
+ }
+
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -1346,7 +1394,7 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
if (_base_check_enable_msix(ioc) != 0)
goto try_ioapic;
- ioc->reply_queue_count = min_t(u8, ioc->cpu_count,
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
ioc->msix_vector_count);
entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
@@ -1916,6 +1964,10 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS2LL040_BRANDING);
break;
+ case MPT2SAS_INTEL_RAMSDALE_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RAMSDALE_BRANDING);
+ break;
default:
break;
}
@@ -1925,6 +1977,22 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RS25GB008_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25JB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25JB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25JB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB080_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25KB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25KB040_BRANDING);
+ break;
default:
break;
}
@@ -2311,8 +2379,6 @@ _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
}
if (ioc->chain_dma_pool)
pci_pool_destroy(ioc->chain_dma_pool);
- }
- if (ioc->chain_lookup) {
free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
ioc->chain_lookup = NULL;
}
@@ -2330,9 +2396,7 @@ static int
_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
{
struct mpt2sas_facts *facts;
- u32 queue_size, queue_diff;
u16 max_sge_elements;
- u16 num_of_reply_frames;
u16 chains_needed_per_io;
u32 sz, total_sz, reply_post_free_sz;
u32 retry_sz;
@@ -2359,7 +2423,8 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
max_request_credit = (max_queue_depth < facts->RequestCredit)
? max_queue_depth : facts->RequestCredit;
else
- max_request_credit = facts->RequestCredit;
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
ioc->hba_queue_depth = max_request_credit;
ioc->hi_priority_depth = facts->HighPriorityCredit;
@@ -2400,50 +2465,25 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
}
ioc->chains_needed_per_io = chains_needed_per_io;
- /* reply free queue sizing - taking into account for events */
- num_of_reply_frames = ioc->hba_queue_depth + 32;
-
- /* number of replies frames can't be a multiple of 16 */
- /* decrease number of reply frames by 1 */
- if (!(num_of_reply_frames % 16))
- num_of_reply_frames--;
-
- /* calculate number of reply free queue entries
- * (must be multiple of 16)
- */
-
- /* (we know reply_free_queue_depth is not a multiple of 16) */
- queue_size = num_of_reply_frames;
- queue_size += 16 - (queue_size % 16);
- ioc->reply_free_queue_depth = queue_size;
-
- /* reply descriptor post queue sizing */
- /* this size should be the number of request frames + number of reply
- * frames
- */
-
- queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
- /* round up to 16 byte boundary */
- if (queue_size % 16)
- queue_size += 16 - (queue_size % 16);
-
- /* check against IOC maximum reply post queue depth */
- if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
- queue_diff = queue_size -
- facts->MaxReplyDescriptorPostQueueDepth;
-
- /* round queue_diff up to multiple of 16 */
- if (queue_diff % 16)
- queue_diff += 16 - (queue_diff % 16);
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
- /* adjust hba_queue_depth, reply_free_queue_depth,
- * and queue_size
- */
- ioc->hba_queue_depth -= (queue_diff / 2);
- ioc->reply_free_queue_depth -= (queue_diff / 2);
- queue_size = facts->MaxReplyDescriptorPostQueueDepth;
+ /* align the reply post queue on the next 16 count boundary */
+ if (!ioc->reply_free_queue_depth % 16)
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth + 16;
+ else
+ ioc->reply_post_queue_depth = ioc->reply_free_queue_depth +
+ 32 - (ioc->reply_free_queue_depth % 16);
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth = min_t(u16,
+ (facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16)),
+ (ioc->hba_queue_depth - (ioc->hba_queue_depth % 16)));
+ ioc->reply_free_queue_depth = ioc->reply_post_queue_depth - 16;
+ ioc->hba_queue_depth = ioc->reply_free_queue_depth - 64;
}
- ioc->reply_post_queue_depth = queue_size;
+
dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
"sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
@@ -2529,15 +2569,12 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
"depth(%d)\n", ioc->name, ioc->request,
ioc->scsiio_depth));
- /* loop till the allocation succeeds */
- do {
- sz = ioc->chain_depth * sizeof(struct chain_tracker);
- ioc->chain_pages = get_order(sz);
- ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
- GFP_KERNEL, ioc->chain_pages);
- if (ioc->chain_lookup == NULL)
- ioc->chain_depth -= 100;
- } while (ioc->chain_lookup == NULL);
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
ioc->request_sz, 16, 0);
if (!ioc->chain_dma_pool) {
@@ -3136,8 +3173,8 @@ mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
ioc->ioc_link_reset_in_progress = 1;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
@@ -3238,8 +3275,8 @@ mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
request = mpt2sas_base_get_msg_frame(ioc, smid);
ioc->base_cmds.smid = smid;
memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
msecs_to_jiffies(10000));
if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
@@ -3746,8 +3783,8 @@ _base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
mpi_request->EventMasks[i] =
cpu_to_le32(ioc->event_masks[i]);
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->base_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
printk(MPT2SAS_ERR_FMT "%s: timeout\n",
@@ -4062,7 +4099,8 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
ioc->reply_free[i] = cpu_to_le32(reply_address);
/* initialize reply queues */
- _base_assign_reply_queues(ioc);
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
reply_post_free = (long)ioc->reply_post_free;
@@ -4110,24 +4148,17 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
if (ioc->is_driver_loading) {
-
-
-
- ioc->wait_for_discovery_to_complete =
- _base_determine_wait_on_discovery(ioc);
- return r; /* scan_start and scan_finished support */
- }
-
-
- if (ioc->wait_for_discovery_to_complete && ioc->is_warpdrive) {
- if (ioc->manu_pg10.OEMIdentifier == 0x80) {
+ if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
+ == 0x80) {
hide_flag = (u8) (ioc->manu_pg10.OEMSpecificFlags0 &
MFG_PAGE10_HIDE_SSDS_MASK);
if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
ioc->mfg_pg10_hide_flag = hide_flag;
}
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+ return r; /* scan_start and scan_finished support */
}
-
r = _base_send_port_enable(ioc, sleep_flag);
if (r)
return r;
@@ -4206,7 +4237,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
r = mpt2sas_base_map_resources(ioc);
if (r)
- return r;
+ goto out_free_resources;
if (ioc->is_warpdrive) {
ioc->reply_post_host_index[0] =
@@ -4517,7 +4548,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
printk(MPT2SAS_ERR_FMT "%s: pci error recovery reset\n",
ioc->name, __func__);
r = 0;
- goto out;
+ goto out_unlocked;
}
if (mpt2sas_fwfault_debug)
@@ -4573,6 +4604,7 @@ mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
mutex_unlock(&ioc->reset_in_progress_mutex);
+ out_unlocked:
dtmprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: exit\n", ioc->name,
__func__));
return r;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 3c3babc..c7459fd 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,8 +69,8 @@
#define MPT2SAS_DRIVER_NAME "mpt2sas"
#define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
#define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION "10.100.00.00"
-#define MPT2SAS_MAJOR_VERSION 10
+#define MPT2SAS_DRIVER_VERSION "12.100.00.00"
+#define MPT2SAS_MAJOR_VERSION 12
#define MPT2SAS_MINOR_VERSION 100
#define MPT2SAS_BUILD_VERSION 00
#define MPT2SAS_RELEASE_VERSION 00
@@ -157,20 +157,33 @@
/*
* Intel HBA branding
*/
+#define MPT2SAS_INTEL_RMS25JB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB080"
+#define MPT2SAS_INTEL_RMS25JB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25JB040"
+#define MPT2SAS_INTEL_RMS25KB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB080"
+#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25KB040"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
"Intel Integrated RAID Module RMS2LL040"
#define MPT2SAS_INTEL_RS25GB008_BRANDING \
"Intel(R) RAID Controller RS25GB008"
-
+#define MPT2SAS_INTEL_RAMSDALE_BRANDING \
+ "Intel 720 Series SSD"
/*
* Intel HBA SSDIDs
*/
+#define MPT2SAS_INTEL_RMS25JB080_SSDID 0x3516
+#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
+#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
+#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
-
+#define MPT2SAS_INTEL_RAMSDALE_SSDID 0x3700
/*
* HP HBA branding
@@ -373,6 +386,7 @@ struct _sas_device {
* @percent_complete: resync percent complete
* @direct_io_enabled: Whether direct io to PDs are allowed or not
* @stripe_exponent: X where 2powX is the stripe sz in blocks
+ * @block_exponent: X where 2powX is the block sz in bytes
* @max_lba: Maximum number of LBA in the volume
* @stripe_sz: Stripe Size of the volume
* @device_info: Device info of the volume member disk
@@ -394,6 +408,7 @@ struct _raid_device {
u8 percent_complete;
u8 direct_io_enabled;
u8 stripe_exponent;
+ u8 block_exponent;
u64 max_lba;
u32 stripe_sz;
u32 device_info;
@@ -623,6 +638,7 @@ enum mutex_type {
TM_MUTEX_ON = 1,
};
+typedef void (*MPT2SAS_FLUSH_RUNNING_CMDS)(struct MPT2SAS_ADAPTER *ioc);
/**
* struct MPT2SAS_ADAPTER - per adapter struct
* @list: ioc_list
@@ -665,6 +681,7 @@ enum mutex_type {
* @msix_vector_count: number msix vectors
* @cpu_msix_table: table for mapping cpus to msix index
* @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
* @scsi_io_cb_idx: shost generated commands
* @tm_cb_idx: task management commands
* @scsih_cb_idx: scsih internal commands
@@ -816,6 +833,7 @@ struct MPT2SAS_ADAPTER {
resource_size_t **reply_post_host_index;
u16 cpu_msix_table_sz;
u32 ioc_reset_count;
+ MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index aabcb91..7fceb89 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -818,6 +818,7 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
_ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
#endif
+ init_completion(&ioc->ctl_cmds.done);
switch (mpi_request->Function) {
case MPI2_FUNCTION_SCSI_IO_REQUEST:
case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
@@ -903,7 +904,6 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
timeout = MPT2_IOCTL_DEFAULT_TIMEOUT;
else
timeout = karg.timeout;
- init_completion(&ioc->ctl_cmds.done);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
timeout*HZ);
if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
@@ -1477,8 +1477,8 @@ _ctl_diag_register_2(struct MPT2SAS_ADAPTER *ioc,
mpi_request->ProductSpecific[i] =
cpu_to_le32(ioc->product_specific[buffer_type][i]);
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -1821,8 +1821,8 @@ _ctl_send_release(struct MPT2SAS_ADAPTER *ioc, u8 buffer_type, u8 *issue_reset)
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
@@ -2095,8 +2095,8 @@ _ctl_diag_read_buffer(void __user *arg, enum block_state state)
mpi_request->VF_ID = 0; /* TODO */
mpi_request->VP_ID = 0;
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->ctl_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
MPT2_IOCTL_DEFAULT_TIMEOUT*HZ);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index d570573..193e33e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -99,7 +99,7 @@ MODULE_PARM_DESC(logging_level, " bits for enabling additional logging info "
static ushort max_sectors = 0xFFFF;
module_param(max_sectors, ushort, 0);
-MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 8192 default=8192");
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
#define MPT2SAS_MAX_LUN (16895)
@@ -612,13 +612,17 @@ _scsih_sas_device_add(struct MPT2SAS_ADAPTER *ioc,
if (!mpt2sas_transport_port_add(ioc, sas_device->handle,
sas_device->sas_address_parent)) {
_scsih_sas_device_remove(ioc, sas_device);
- } else if (!sas_device->starget) {
- if (!ioc->is_driver_loading)
- mpt2sas_transport_port_remove(ioc,
- sas_device->sas_address,
- sas_device->sas_address_parent);
- _scsih_sas_device_remove(ioc, sas_device);
- }
+ } else if (!sas_device->starget) {
+ /* When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt2sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
}
/**
@@ -1007,8 +1011,8 @@ _scsih_get_chain_buffer_tracker(struct MPT2SAS_ADAPTER *ioc, u16 smid)
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
if (list_empty(&ioc->free_chain_list)) {
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- printk(MPT2SAS_WARN_FMT "chain buffers not available\n",
- ioc->name);
+ dfailprintk(ioc, printk(MPT2SAS_WARN_FMT "chain buffers not "
+ "available\n", ioc->name));
return NULL;
}
chain_req = list_entry(ioc->free_chain_list.next,
@@ -1449,7 +1453,7 @@ _scsih_slave_destroy(struct scsi_device *sdev)
spin_lock_irqsave(&ioc->sas_device_lock, flags);
sas_device = mpt2sas_scsih_sas_device_find_by_sas_address(ioc,
sas_target_priv_data->sas_address);
- if (sas_device)
+ if (sas_device && !sas_target_priv_data->num_luns)
sas_device->starget = NULL;
spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
}
@@ -1776,11 +1780,9 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
Mpi2ConfigReply_t mpi_reply;
u16 sz;
u8 num_pds, count;
- u64 mb = 1024 * 1024;
- u64 tb_2 = 2 * mb * mb;
- u64 capacity;
- u32 stripe_sz;
- u8 i, stripe_exp;
+ unsigned long stripe_sz, block_sz;
+ u8 stripe_exp, block_exp;
+ u64 dev_max_lba;
if (!ioc->is_warpdrive)
return;
@@ -1844,51 +1846,57 @@ _scsih_init_warpdrive_properties(struct MPT2SAS_ADAPTER *ioc,
vol_pg0->PhysDisk[count].PhysDiskNum);
goto out_error;
}
+ /* Disable direct I/O if member drive lba exceeds 4 bytes */
+ dev_max_lba = le64_to_cpu(pd_pg0.DeviceMaxLBA);
+ if (dev_max_lba >> 32) {
+ printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is "
+ "disabled for the drive with handle(0x%04x) member"
+ "handle (0x%04x) unsupported max lba 0x%016llx\n",
+ ioc->name, raid_device->handle,
+ le16_to_cpu(pd_pg0.DevHandle),
+ (unsigned long long)dev_max_lba);
+ goto out_error;
+ }
+
raid_device->pd_handle[count] = le16_to_cpu(pd_pg0.DevHandle);
}
/*
* Assumption for WD: Direct I/O is not supported if the volume is
- * not RAID0, if the stripe size is not 64KB, if the block size is
- * not 512 and if the volume size is >2TB
+ * not RAID0
*/
- if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0 ||
- le16_to_cpu(vol_pg0->BlockSize) != 512) {
+ if (raid_device->volume_type != MPI2_RAID_VOL_TYPE_RAID0) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
"for the drive with handle(0x%04x): type=%d, "
"s_sz=%uK, blk_size=%u\n", ioc->name,
raid_device->handle, raid_device->volume_type,
- le32_to_cpu(vol_pg0->StripeSize)/2,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024,
le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
- capacity = (u64) le16_to_cpu(vol_pg0->BlockSize) *
- (le64_to_cpu(vol_pg0->MaxLBA) + 1);
-
- if (capacity > tb_2) {
+ stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
+ stripe_exp = find_first_bit(&stripe_sz, 32);
+ if (stripe_exp == 32) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) since drive sz > 2TB\n",
- ioc->name, raid_device->handle);
+ "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ ioc->name, raid_device->handle,
+ (le32_to_cpu(vol_pg0->StripeSize) *
+ le16_to_cpu(vol_pg0->BlockSize)) / 1024);
goto out_error;
}
-
- stripe_sz = le32_to_cpu(vol_pg0->StripeSize);
- stripe_exp = 0;
- for (i = 0; i < 32; i++) {
- if (stripe_sz & 1)
- break;
- stripe_exp++;
- stripe_sz >>= 1;
- }
- if (i == 32) {
+ raid_device->stripe_exponent = stripe_exp;
+ block_sz = le16_to_cpu(vol_pg0->BlockSize);
+ block_exp = find_first_bit(&block_sz, 16);
+ if (block_exp == 16) {
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is disabled "
- "for the drive with handle(0x%04x) invalid stripe sz %uK\n",
+ "for the drive with handle(0x%04x) invalid block sz %u\n",
ioc->name, raid_device->handle,
- le32_to_cpu(vol_pg0->StripeSize)/2);
+ le16_to_cpu(vol_pg0->BlockSize));
goto out_error;
}
- raid_device->stripe_exponent = stripe_exp;
+ raid_device->block_exponent = block_exp;
raid_device->direct_io_enabled = 1;
printk(MPT2SAS_INFO_FMT "WarpDrive : Direct IO is Enabled for the drive"
@@ -3804,8 +3812,9 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
{
u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
u32 stripe_sz, stripe_exp;
- u8 num_pds, *cdb_ptr, *tmp_ptr, *lba_ptr1, *lba_ptr2;
+ u8 num_pds, *cdb_ptr, i;
u8 cdb0 = scmd->cmnd[0];
+ u64 v_llba;
/*
* Try Direct I/O to RAID memeber disks
@@ -3816,15 +3825,11 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
| cdb_ptr[5])) {
- io_size = scsi_bufflen(scmd) >> 9;
+ io_size = scsi_bufflen(scmd) >>
+ raid_device->block_exponent;
+ i = (cdb0 < READ_16) ? 2 : 6;
/* get virtual lba */
- lba_ptr1 = lba_ptr2 = (cdb0 < READ_16) ? &cdb_ptr[2] :
- &cdb_ptr[6];
- tmp_ptr = (u8 *)&v_lba + 3;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr-- = *lba_ptr1++;
- *tmp_ptr = *lba_ptr1;
+ v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
if (((u64)v_lba + (u64)io_size - 1) <=
(u32)raid_device->max_lba) {
@@ -3843,11 +3848,39 @@ _scsih_setup_direct_io(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
mpi_request->DevHandle =
cpu_to_le16(raid_device->
pd_handle[column]);
- tmp_ptr = (u8 *)&p_lba + 3;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2++ = *tmp_ptr--;
- *lba_ptr2 = *tmp_ptr;
+ (*(__be32 *)(&cdb_ptr[i])) =
+ cpu_to_be32(p_lba);
+ /*
+ * WD: To indicate this I/O is directI/O
+ */
+ _scsih_scsi_direct_io_set(ioc, smid, 1);
+ }
+ }
+ } else {
+ io_size = scsi_bufflen(scmd) >>
+ raid_device->block_exponent;
+ /* get virtual lba */
+ v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
+
+ if ((v_llba + (u64)io_size - 1) <=
+ raid_device->max_lba) {
+ stripe_sz = raid_device->stripe_sz;
+ stripe_exp = raid_device->stripe_exponent;
+ stripe_off = (u32) (v_llba & (stripe_sz - 1));
+
+ /* Check whether IO falls within a stripe */
+ if ((stripe_off + io_size) <= stripe_sz) {
+ num_pds = raid_device->num_pds;
+ p_lba = (u32)(v_llba >> stripe_exp);
+ stripe_unit = p_lba / num_pds;
+ column = p_lba % num_pds;
+ p_lba = (stripe_unit << stripe_exp) +
+ stripe_off;
+ mpi_request->DevHandle =
+ cpu_to_le16(raid_device->
+ pd_handle[column]);
+ (*(__be64 *)(&cdb_ptr[2])) =
+ cpu_to_be64((u64)p_lba);
/*
* WD: To indicate this I/O is directI/O
*/
@@ -4403,11 +4436,14 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_NO_CONNECT << 16;
goto out;
}
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
/*
* WARPDRIVE: If direct_io is set then it is directIO,
* the failed direct I/O should be redirected to volume
*/
- if (_scsih_scsi_direct_io_get(ioc, smid)) {
+ if (_scsih_scsi_direct_io_get(ioc, smid) &&
+ ((ioc_status & MPI2_IOCSTATUS_MASK)
+ != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
ioc->scsi_lookup[smid - 1].scmd = scmd;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
@@ -4441,7 +4477,6 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
- ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
else
@@ -4485,6 +4520,8 @@ _scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
scmd->result = DID_TRANSPORT_DISRUPTED << 16;
goto out;
}
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
scmd->result = DID_RESET << 16;
@@ -6714,6 +6751,7 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
} else
sas_target_priv_data = NULL;
raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
starget_printk(KERN_INFO, raid_device->starget,
"handle(0x%04x), wwid(0x%016llx)\n", handle,
(unsigned long long)raid_device->wwid);
@@ -6724,16 +6762,16 @@ _scsih_mark_responding_raid_device(struct MPT2SAS_ADAPTER *ioc, u64 wwid,
*/
_scsih_init_warpdrive_properties(ioc, raid_device);
if (raid_device->handle == handle)
- goto out;
+ return;
printk(KERN_INFO "\thandle changed from(0x%04x)!!!\n",
raid_device->handle);
raid_device->handle = handle;
if (sas_target_priv_data)
sas_target_priv_data->handle = handle;
- goto out;
+ return;
}
}
- out:
+
spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
}
@@ -7418,7 +7456,7 @@ static struct scsi_host_template scsih_driver_template = {
.can_queue = 1,
.this_id = -1,
.sg_tablesize = MPT2SAS_SG_DEPTH,
- .max_sectors = 8192,
+ .max_sectors = 32767,
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mpt2sas_host_attrs,
@@ -7928,6 +7966,7 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
spin_lock_init(&ioc->ioc_reset_in_progress_lock);
@@ -7958,11 +7997,11 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
"for max_sectors, range is 64 to 8192. Assigning "
"value of 64.\n", ioc->name, max_sectors);
- } else if (max_sectors > 8192) {
- shost->max_sectors = 8192;
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
printk(MPT2SAS_WARN_FMT "Invalid value %d passed "
"for max_sectors, range is 64 to 8192. Assigning "
- "default value of 8192.\n", ioc->name,
+ "default value of 32767.\n", ioc->name,
max_sectors);
} else {
shost->max_sectors = max_sectors & 0xFFFE;
@@ -8000,7 +8039,6 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto out_attach_fail;
}
- scsi_scan_host(shost);
if (ioc->is_warpdrive) {
if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
ioc->hide_drives = 0;
@@ -8014,8 +8052,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
} else
ioc->hide_drives = 0;
+ scsi_scan_host(shost);
- _scsih_probe_devices(ioc);
return 0;
out_attach_fail:
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 2307322..8310474 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -398,8 +398,8 @@ _transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "report_manufacture - "
"send to sas_addr(0x%016llx)\n", ioc->name,
(unsigned long long)sas_address));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1184,8 +1184,8 @@ _transport_get_expander_phy_error_log(struct MPT2SAS_ADAPTER *ioc,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "phy_error_log - "
"send to sas_addr(0x%016llx), phy(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1509,8 +1509,9 @@ _transport_expander_phy_control(struct MPT2SAS_ADAPTER *ioc,
"send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n", ioc->name,
(unsigned long long)phy->identify.sas_address, phy->number,
phy_operation));
- mpt2sas_base_put_smid_default(ioc, smid);
+
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
@@ -1949,8 +1950,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
dtransportprintk(ioc, printk(MPT2SAS_INFO_FMT "%s - "
"sending smp request\n", ioc->name, __func__));
- mpt2sas_base_put_smid_default(ioc, smid);
init_completion(&ioc->transport_cmds.done);
+ mpt2sas_base_put_smid_default(ioc, smid);
timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
10*HZ);
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index f6a50c9..0029249 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -59,11 +59,11 @@ MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra2
#define ASYNC_MODE 1
#define ULTRA20M_MODE 2
-static int auto_param = 0; /* default: ON */
+static bool auto_param = 0; /* default: ON */
module_param (auto_param, bool, 0);
MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)");
-static int disc_priv = 1; /* default: OFF */
+static bool disc_priv = 1; /* default: OFF */
module_param (disc_priv, bool, 0);
MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))");
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index b31a8e3..d4ed9eb 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -69,10 +69,10 @@
#ifndef SCSI_OSD_MAJOR
# define SCSI_OSD_MAJOR 260
#endif
-#define SCSI_OSD_MAX_MINOR 64
+#define SCSI_OSD_MAX_MINOR MINORMASK
static const char osd_name[] = "osd";
-static const char *osd_version_string = "open-osd 0.2.0";
+static const char *osd_version_string = "open-osd 0.2.1";
MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>");
MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index ca86721..b61a753 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -70,7 +70,7 @@ module_param(nsp_burst_mode, int, 0);
MODULE_PARM_DESC(nsp_burst_mode, "Burst transfer mode (0=io8, 1=io32, 2=mem32(default))");
/* Release IO ports after configuration? */
-static int free_ports = 0;
+static bool free_ports = 0;
module_param(free_ports, bool, 0);
MODULE_PARM_DESC(free_ports, "Release IO ports after configuration? (default: 0 (=no))");
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 5163edb..ea8a0b4 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4105,7 +4105,7 @@ static long pmcraid_chr_ioctl(
hdr = kmalloc(sizeof(struct pmcraid_ioctl_header), GFP_KERNEL);
if (!hdr) {
- pmcraid_err("faile to allocate memory for ioctl header\n");
+ pmcraid_err("failed to allocate memory for ioctl header\n");
return -ENOMEM;
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6465dae..9f41b3b 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -107,7 +107,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
}
- return -EINVAL;
+ return count;
}
static struct bin_attribute sysfs_fw_dump_attr = {
@@ -387,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
case 3:
if (ha->optrom_state != QLA_SWRITING)
- return -ENOMEM;
+ return -EINVAL;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7068,
@@ -667,7 +667,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7074,
- "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
+ "Unable to write EDC (%x) %02x:%04x:%02x:%02x:%02hhx\n",
rval, dev, adr, opt, len, buf[8]);
return -EIO;
}
@@ -724,7 +724,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_info, vha, 0x7075,
- "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
+ "Unable to write EDC status (%x) %02x:%04x:%02x:%02x.\n",
rval, dev, adr, opt, len);
return -EIO;
}
@@ -1036,8 +1036,7 @@ qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
vha->device_flags & DFLG_NO_CABLE)
len = snprintf(buf, PAGE_SIZE, "Link Down\n");
else if (atomic_read(&vha->loop_state) != LOOP_READY ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ qla2x00_reset_active(vha))
len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
else {
len = snprintf(buf, PAGE_SIZE, "Link Up - ");
@@ -1359,8 +1358,7 @@ qla2x00_thermal_temp_show(struct device *dev,
return snprintf(buf, PAGE_SIZE, "\n");
temp = frac = 0;
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ if (qla2x00_reset_active(vha))
ql_log(ql_log_warn, vha, 0x707b,
"ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
@@ -1379,8 +1377,7 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
int rval = QLA_FUNCTION_FAILED;
uint16_t state[5];
- if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
+ if (qla2x00_reset_active(vha))
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
@@ -1693,9 +1690,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (IS_FWI2_CAPABLE(ha)) {
rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
- !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
- !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
- !ha->dpc_active) {
+ !qla2x00_reset_active(vha) && !ha->dpc_active) {
/* Must be in a 'READY' state for statistics retrieval. */
rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
stats, stats_dma);
@@ -1971,8 +1966,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Queue delete failed.\n");
}
- scsi_host_put(vha->host);
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
+ scsi_host_put(vha->host);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 8b641a8..1682e2e 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -31,6 +31,7 @@ qla2x00_get_ctx_bsg_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size)
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
+ ctx->iocbs = 1;
done:
return sp;
}
@@ -102,18 +103,11 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = 0;
- if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))) {
+ if (!(IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) || IS_QLA82XX(ha))) {
ret = -EINVAL;
goto exit_fcp_prio_cfg;
}
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- ret = -EBUSY;
- goto exit_fcp_prio_cfg;
- }
-
/* Get the sub command */
oper = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
@@ -389,6 +383,20 @@ done:
return rval;
}
+inline uint16_t
+qla24xx_calc_ct_iocbs(uint16_t dsds)
+{
+ uint16_t iocbs;
+
+ iocbs = 1;
+ if (dsds > 2) {
+ iocbs += (dsds - 2) / 5;
+ if ((dsds - 2) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
+
static int
qla2x00_process_ct(struct fc_bsg_job *bsg_job)
{
@@ -489,6 +497,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ct = sp->ctx;
ct->type = SRB_CT_CMD;
ct->name = "bsg_ct";
+ ct->iocbs = qla24xx_calc_ct_iocbs(req_sg_cnt + rsp_sg_cnt);
ct->u.bsg_job = bsg_job;
ql_dbg(ql_dbg_user, vha, 0x7016,
@@ -630,13 +639,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
dma_addr_t rsp_data_dma;
uint32_t rsp_data_len;
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
- return -EBUSY;
- }
-
if (!vha->flags.online) {
ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
return -EIO;
@@ -858,13 +860,6 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
int rval = 0;
uint32_t flag;
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
- return -EBUSY;
- }
-
if (!IS_QLA84XX(ha)) {
ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
return -EINVAL;
@@ -906,11 +901,6 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
uint32_t flag;
uint32_t fw_ver;
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
- return -EBUSY;
-
if (!IS_QLA84XX(ha)) {
ql_dbg(ql_dbg_user, vha, 0x7032,
"Not 84xx, exiting.\n");
@@ -1020,14 +1010,6 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
uint32_t data_len = 0;
uint32_t dma_direction = DMA_NONE;
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- ql_log(ql_log_warn, vha, 0x7039,
- "Abort active or needed.\n");
- return -EBUSY;
- }
-
if (!IS_QLA84XX(ha)) {
ql_log(ql_log_warn, vha, 0x703a,
"Not 84xx, exiting.\n");
@@ -1230,13 +1212,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = 0;
- if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
- test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
- test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
- ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
- return -EBUSY;
- }
-
if (!IS_IIDMA_CAPABLE(vha->hw)) {
ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
return -EINVAL;
@@ -1652,8 +1627,17 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
vha = shost_priv(host);
}
+ if (qla2x00_reset_active(vha)) {
+ ql_dbg(ql_dbg_user, vha, 0x709f,
+ "BSG: ISP abort active/needed -- cmd=%d.\n",
+ bsg_job->request->msgcode);
+ bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->job_done(bsg_job);
+ return -EBUSY;
+ }
+
ql_dbg(ql_dbg_user, vha, 0x7000,
- "Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
+ "Entered %s msgcode=0x%x.\n", __func__, bsg_job->request->msgcode);
switch (bsg_job->request->msgcode) {
case FC_BSG_RPT_ELS:
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index f3cddd5..45cbf0b 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,15 +11,18 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0116 | |
+ * | Module Init and Probe | 0x0116 | 0xfa |
* | Mailbox commands | 0x112b | |
- * | Device Discovery | 0x2083 | |
- * | Queue Command and IO tracing | 0x302e | 0x3008 |
+ * | Device Discovery | 0x2084 | |
+ * | Queue Command and IO tracing | 0x302f | 0x3008,0x302d, |
+ * | | | 0x302e |
* | DPC Thread | 0x401c | |
- * | Async Events | 0x5059 | |
- * | Timer Routines | 0x6010 | 0x600e,0x600f |
- * | User Space Interactions | 0x709d | |
- * | Task Management | 0x8041 | 0x800b |
+ * | Async Events | 0x5057 | 0x5052 |
+ * | Timer Routines | 0x6011 | 0x600e,0x600f |
+ * | User Space Interactions | 0x709e | 0x7018,0x702e |
+ * | | | 0x7039,0x7045 |
+ * | Task Management | 0x803c | 0x8025-0x8026 |
+ * | | | 0x800b,0x8039 |
* | AER/EEH | 0x900f | |
* | Virtual Port | 0xa007 | |
* | ISP82XX Specific | 0xb052 | |
@@ -368,7 +371,7 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
memcpy(iter_reg, ha->fce, ntohl(fcec->size));
- return iter_reg;
+ return (char *)iter_reg + ntohl(fcec->size);
}
static inline void *
@@ -1650,6 +1653,15 @@ qla81xx_fw_dump_failed:
/****************************************************************************/
/* Driver Debug Functions. */
/****************************************************************************/
+
+static inline int
+ql_mask_match(uint32_t level)
+{
+ if (ql2xextended_error_logging == 1)
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
+ return (level & ql2xextended_error_logging) == level;
+}
+
/*
* This function is for formatting and logging debug information.
* It is to be used when vha is available. It formats the message
@@ -1664,34 +1676,31 @@ qla81xx_fw_dump_failed:
* msg: The message to be displayed.
*/
void
-ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
- struct pci_dev *pdev = NULL;
-
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if ((level & ql2xextended_error_logging) == level) {
- if (vha != NULL) {
- pdev = vha->hw->pdev;
- /* <module-name> <pci-name> <msg-id>:<host> Message */
- sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id + ql_dbg_offset,
- vha->host_no);
- } else
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- "0000:00:00.0", id + ql_dbg_offset);
-
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- pr_warning("%s", pbuf);
+ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+
+ if (!ql_mask_match(level))
+ return;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <pci-name> <msg-id>:<host> Message */
+ pr_warn("%s [%s]-%04x:%ld: %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
+ vha->host_no, &vaf);
+ } else {
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
}
- va_end(ap);
+ va_end(va);
}
@@ -1710,31 +1719,27 @@ ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
+ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
if (pdev == NULL)
return;
+ if (!ql_mask_match(level))
+ return;
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if ((level & ql2xextended_error_logging) == level) {
- /* <module-name> <dev-name>:<msg-id> Message */
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id + ql_dbg_offset);
+ va_start(va, fmt);
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- pr_warning("%s", pbuf);
- }
+ vaf.fmt = fmt;
+ vaf.va = &va;
- va_end(ap);
+ /* <module-name> <dev-name>:<msg-id> Message */
+ pr_warn("%s [%s]-%04x: : %pV",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
+ va_end(va);
}
/*
@@ -1751,47 +1756,47 @@ ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
- struct pci_dev *pdev = NULL;
-
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if (level <= ql_errlev) {
- if (vha != NULL) {
- pdev = vha->hw->pdev;
- /* <module-name> <msg-id>:<host> Message */
- sprintf(pbuf, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id, vha->host_no);
- } else
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- "0000:00:00.0", id);
+ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
+ if (level > ql_errlev)
+ return;
- switch (level) {
- case 0: /* FATAL LOG */
- pr_crit("%s", pbuf);
- break;
- case 1:
- pr_err("%s", pbuf);
- break;
- case 2:
- pr_warn("%s", pbuf);
- break;
- default:
- pr_info("%s", pbuf);
- break;
- }
+ if (vha != NULL) {
+ const struct pci_dev *pdev = vha->hw->pdev;
+ /* <module-name> <msg-id>:<host> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
+ } else {
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, "0000:00:00.0", id);
+ }
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case 1:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case 2:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
}
- va_end(ap);
+ va_end(va);
}
/*
@@ -1809,43 +1814,44 @@ ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, char *msg, ...) {
* msg: The message to be displayed.
*/
void
-ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id, char *msg, ...) {
-
- char pbuf[QL_DBG_BUF_LEN];
- va_list ap;
- uint32_t len;
+ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
+ const char *fmt, ...)
+{
+ va_list va;
+ struct va_format vaf;
+ char pbuf[128];
if (pdev == NULL)
return;
+ if (level > ql_errlev)
+ return;
- memset(pbuf, 0, QL_DBG_BUF_LEN);
-
- va_start(ap, msg);
-
- if (level <= ql_errlev) {
- /* <module-name> <dev-name>:<msg-id> Message */
- sprintf(pbuf, "%s [%s]-%04x: : ", QL_MSGHDR,
- dev_name(&(pdev->dev)), id);
-
- len = strlen(pbuf);
- vsprintf(pbuf+len, msg, ap);
- switch (level) {
- case 0: /* FATAL LOG */
- pr_crit("%s", pbuf);
- break;
- case 1:
- pr_err("%s", pbuf);
- break;
- case 2:
- pr_warn("%s", pbuf);
- break;
- default:
- pr_info("%s", pbuf);
- break;
- }
+ /* <module-name> <dev-name>:<msg-id> Message */
+ snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
+ QL_MSGHDR, dev_name(&(pdev->dev)), id);
+ pbuf[sizeof(pbuf) - 1] = 0;
+
+ va_start(va, fmt);
+
+ vaf.fmt = fmt;
+ vaf.va = &va;
+
+ switch (level) {
+ case 0: /* FATAL LOG */
+ pr_crit("%s%pV", pbuf, &vaf);
+ break;
+ case 1:
+ pr_err("%s%pV", pbuf, &vaf);
+ break;
+ case 2:
+ pr_warn("%s%pV", pbuf, &vaf);
+ break;
+ default:
+ pr_info("%s%pV", pbuf, &vaf);
+ break;
}
- va_end(ap);
+ va_end(va);
}
void
@@ -1858,20 +1864,20 @@ ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
uint16_t __iomem *mbx_reg;
- if ((level & ql2xextended_error_logging) == level) {
-
- if (IS_QLA82XX(ha))
- mbx_reg = &reg82->mailbox_in[0];
- else if (IS_FWI2_CAPABLE(ha))
- mbx_reg = &reg24->mailbox0;
- else
- mbx_reg = MAILBOX_REG(ha, reg, 0);
+ if (!ql_mask_match(level))
+ return;
- ql_dbg(level, vha, id, "Mailbox registers:\n");
- for (i = 0; i < 6; i++)
- ql_dbg(level, vha, id,
- "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
- }
+ if (IS_QLA82XX(ha))
+ mbx_reg = &reg82->mailbox_in[0];
+ else if (IS_FWI2_CAPABLE(ha))
+ mbx_reg = &reg24->mailbox0;
+ else
+ mbx_reg = MAILBOX_REG(ha, reg, 0);
+
+ ql_dbg(level, vha, id, "Mailbox registers:\n");
+ for (i = 0; i < 6; i++)
+ ql_dbg(level, vha, id,
+ "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
}
@@ -1881,24 +1887,25 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
{
uint32_t cnt;
uint8_t c;
- if ((level & ql2xextended_error_logging) == level) {
-
- ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
- "9 Ah Bh Ch Dh Eh Fh\n");
- ql_dbg(level, vha, id, "----------------------------------"
- "----------------------------\n");
-
- ql_dbg(level, vha, id, "");
- for (cnt = 0; cnt < size;) {
- c = *b++;
- printk("%02x", (uint32_t) c);
- cnt++;
- if (!(cnt % 16))
- printk("\n");
- else
- printk(" ");
- }
- if (cnt % 16)
- ql_dbg(level, vha, id, "\n");
+
+ if (!ql_mask_match(level))
+ return;
+
+ ql_dbg(level, vha, id, " 0 1 2 3 4 5 6 7 8 "
+ "9 Ah Bh Ch Dh Eh Fh\n");
+ ql_dbg(level, vha, id, "----------------------------------"
+ "----------------------------\n");
+
+ ql_dbg(level, vha, id, " ");
+ for (cnt = 0; cnt < size;) {
+ c = *b++;
+ printk("%02x", (uint32_t) c);
+ cnt++;
+ if (!(cnt % 16))
+ printk("\n");
+ else
+ printk(" ");
}
+ if (cnt % 16)
+ ql_dbg(level, vha, id, "\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 98a377b..5f1b6d9 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -232,6 +232,7 @@ struct qla2xxx_fw_dump {
};
#define QL_MSGHDR "qla2xxx"
+#define QL_DBG_DEFAULT1_MASK 0x1e400000
#define ql_log_fatal 0 /* display fatal errors */
#define ql_log_warn 1 /* display critical errors */
@@ -244,15 +245,15 @@ struct qla2xxx_fw_dump {
extern int ql_errlev;
-void
-ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_dbg_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
-void
-ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, char *, ...);
-void
-ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log(uint32_t, scsi_qla_host_t *vha, int32_t, const char *fmt, ...);
+void __attribute__((format (printf, 4, 5)))
+ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
/* Debug Levels */
/* The 0x40000000 is the max value any debug level can have
@@ -275,5 +276,3 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, char *, ...);
#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
* not covered by upper categories
*/
-
-#define QL_DBG_BUF_LEN 512
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index fcf052c..af1003f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -44,6 +44,7 @@
* ISP2100 HBAs.
*/
#define MAILBOX_REGISTER_COUNT_2100 8
+#define MAILBOX_REGISTER_COUNT_2200 24
#define MAILBOX_REGISTER_COUNT 32
#define QLA2200A_RISC_ROM_VER 4
@@ -271,6 +272,7 @@ struct srb_iocb {
struct srb_ctx {
uint16_t type;
char *name;
+ int iocbs;
union {
struct srb_iocb *iocb_cmd;
struct fc_bsg_job *bsg_job;
@@ -2244,6 +2246,7 @@ struct isp_operations {
int (*get_flash_version) (struct scsi_qla_host *, void *);
int (*start_scsi) (srb_t *);
int (*abort_isp) (struct scsi_qla_host *);
+ int (*iospace_config)(struct qla_hw_data*);
};
/* MSI-X Support *************************************************************/
@@ -2978,10 +2981,6 @@ typedef struct scsi_qla_host {
atomic_dec(&__vha->vref_count); \
} while (0)
-
-#define qla_printk(level, ha, format, arg...) \
- dev_printk(level , &((ha)->pdev->dev) , format , ## arg)
-
/*
* qla2x00 local function return status codes
*/
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c0c11af..408679b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -572,7 +572,7 @@ extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *,
size_t, char *);
extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *);
extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *);
-extern void qla82xx_start_iocbs(srb_t *);
+extern void qla82xx_start_iocbs(scsi_qla_host_t *);
extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *);
extern int qla82xx_check_md_needed(scsi_qla_host_t *);
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 37937aa..4aea4ae 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -758,7 +758,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
"GA_NXT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
sns_cmd->p.gan_data[9] != 0x02) {
- ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2084,
"GA_NXT failed, rejected request ga_nxt_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
sns_cmd->p.gan_data, 16);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 54ea68c..1fa067e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -111,6 +111,7 @@ qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
memset(sp, 0, sizeof(*sp));
sp->fcport = fcport;
sp->ctx = ctx;
+ ctx->iocbs = 1;
ctx->u.iocb_cmd = iocb;
iocb->free = qla2x00_ctx_sp_free;
@@ -154,8 +155,8 @@ qla2x00_async_iocb_timeout(srb_t *sp)
struct srb_ctx *ctx = sp->ctx;
ql_dbg(ql_dbg_disc, fcport->vha, 0x2071,
- "Async-%s timeout - portid=%02x%02x%02x.\n",
- ctx->name, fcport->d_id.b.domain, fcport->d_id.b.area,
+ "Async-%s timeout - hdl=%x portid=%02x%02x%02x.\n",
+ ctx->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
fcport->flags &= ~FCF_ASYNC_SENT;
@@ -211,9 +212,10 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - loopid=%x portid=%02x%02x%02x retries=%d.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, fcport->login_retry);
+ "Async-login - hdl=%x, loopid=%x portid=%02x%02x%02x "
+ "retries=%d.\n", sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
return rval;
done_free_sp:
@@ -258,9 +260,9 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x2070,
- "Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-logout - hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -308,9 +310,9 @@ qla2x00_async_adisc(struct scsi_qla_host *vha, fc_port_t *fcport,
goto done_free_sp;
ql_dbg(ql_dbg_disc, vha, 0x206f,
- "Async-adisc - loopid=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-adisc - hdl=%x loopid=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -360,9 +362,9 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun,
goto done_free_sp;
ql_dbg(ql_dbg_taskm, vha, 0x802f,
- "Async-tmf loop-id=%x portid=%02x%02x%02x.\n",
- fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa);
+ "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n",
+ sp->handle, fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
return rval;
done_free_sp:
@@ -514,7 +516,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
set_bit(0, ha->req_qid_map);
set_bit(0, ha->rsp_qid_map);
- ql_log(ql_log_info, vha, 0x0040,
+ ql_dbg(ql_dbg_init, vha, 0x0040,
"Configuring PCI space...\n");
rval = ha->isp_ops->pci_config(vha);
if (rval) {
@@ -533,7 +535,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
}
ha->isp_ops->get_flash_version(vha, req->ring);
- ql_log(ql_log_info, vha, 0x0061,
+ ql_dbg(ql_dbg_init, vha, 0x0061,
"Configure NVRAM parameters...\n");
ha->isp_ops->nvram_config(vha);
@@ -550,7 +552,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
return QLA_FUNCTION_FAILED;
}
- ql_log(ql_log_info, vha, 0x0078,
+ ql_dbg(ql_dbg_init, vha, 0x0078,
"Verifying loaded RISC code...\n");
if (qla2x00_isp_firmware(vha) != QLA_SUCCESS) {
@@ -1294,7 +1296,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
ha->flags.fce_enabled = 0;
goto try_eft;
}
- ql_log(ql_log_info, vha, 0x00c0,
+ ql_dbg(ql_dbg_init, vha, 0x00c0,
"Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024);
fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE;
@@ -1321,7 +1323,7 @@ try_eft:
tc_dma);
goto cont_alloc;
}
- ql_log(ql_log_info, vha, 0x00c3,
+ ql_dbg(ql_dbg_init, vha, 0x00c3,
"Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024);
eft_size = EFT_SIZE;
@@ -1358,7 +1360,7 @@ cont_alloc:
}
return;
}
- ql_log(ql_log_info, vha, 0x00c5,
+ ql_dbg(ql_dbg_init, vha, 0x00c5,
"Allocated (%d KB) for firmware dump.\n", dump_size / 1024);
ha->fw_dump_len = dump_size;
@@ -1929,7 +1931,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
rval = qla84xx_init_chip(vha);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn,
- vha, 0x8026,
+ vha, 0x8007,
"Init chip failed.\n");
break;
}
@@ -1938,7 +1940,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
cs84xx_time = jiffies - cs84xx_time;
wtime += cs84xx_time;
mtime += cs84xx_time;
- ql_dbg(ql_dbg_taskm, vha, 0x8025,
+ ql_dbg(ql_dbg_taskm, vha, 0x8008,
"Increasing wait time by %ld. "
"New time %ld.\n", cs84xx_time,
wtime);
@@ -1981,16 +1983,13 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
/* Delay for a while */
msleep(500);
-
- ql_dbg(ql_dbg_taskm, vha, 0x8039,
- "fw_state=%x curr time=%lx.\n", state[0], jiffies);
} while (1);
ql_dbg(ql_dbg_taskm, vha, 0x803a,
"fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
state[1], state[2], state[3], state[4], jiffies);
- if (rval) {
+ if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
ql_log(ql_log_warn, vha, 0x803b,
"Firmware ready **** FAILED ****.\n");
}
@@ -2386,7 +2385,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
* internal driver logging.
*/
if (nv->host_p[0] & BIT_7)
- ql2xextended_error_logging = 0x7fffffff;
+ ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
ha->flags.disable_risc_code_load = ((nv->host_p[0] & BIT_4) ? 1 : 0);
/* Always load RISC code on non ISP2[12]00 chips. */
if (!IS_QLA2100(ha) && !IS_QLA2200(ha))
@@ -4188,7 +4187,8 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n");
+ ql_log(ql_log_warn, vha, 0x8023, "%s **** FAILED ****.\n",
+ __func__);
}
return(status);
@@ -4638,7 +4638,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
struct req_que *req = ha->req_q_map[0];
ql_dbg(ql_dbg_init, vha, 0x008b,
- "Loading firmware from flash (%x).\n", faddr);
+ "FW: Loading firmware from flash (%x).\n", faddr);
rval = QLA_SUCCESS;
@@ -4836,8 +4836,8 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_FUNCTION_FAILED;
}
- ql_log(ql_log_info, vha, 0x0092,
- "Loading via request-firmware.\n");
+ ql_dbg(ql_dbg_init, vha, 0x0092,
+ "FW: Loading via request-firmware.\n");
rval = QLA_SUCCESS;
@@ -5425,7 +5425,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
if ((vha->device_flags & DFLG_NO_CABLE))
status = 0;
- ql_log(ql_log_info, vha, 0x803d,
+ ql_log(ql_log_info, vha, 0x8000,
"Configure loop done, status = 0x%x.\n", status);
}
@@ -5458,7 +5458,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
ha->fce_dma, ha->fce_bufs, ha->fce_mb,
&ha->fce_bufs);
if (rval) {
- ql_log(ql_log_warn, vha, 0x803e,
+ ql_log(ql_log_warn, vha, 0x8001,
"Unable to reinitialize FCE (%d).\n",
rval);
ha->flags.fce_enabled = 0;
@@ -5470,7 +5470,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
rval = qla2x00_enable_eft_trace(vha,
ha->eft_dma, EFT_NUM_BUFFERS);
if (rval) {
- ql_log(ql_log_warn, vha, 0x803f,
+ ql_log(ql_log_warn, vha, 0x8010,
"Unable to reinitialize EFT (%d).\n",
rval);
}
@@ -5478,7 +5478,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
}
if (!status) {
- ql_dbg(ql_dbg_taskm, vha, 0x8040,
+ ql_dbg(ql_dbg_taskm, vha, 0x8011,
"qla82xx_restart_isp succeeded.\n");
spin_lock_irqsave(&ha->vport_slock, flags);
@@ -5496,7 +5496,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha)
spin_unlock_irqrestore(&ha->vport_slock, flags);
} else {
- ql_log(ql_log_warn, vha, 0x8041,
+ ql_log(ql_log_warn, vha, 0x8016,
"qla82xx_restart_isp **** FAILED ****.\n");
}
@@ -5643,13 +5643,26 @@ qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *vha, fc_port_t *fcport)
if (priority < 0)
return QLA_FUNCTION_FAILED;
+ if (IS_QLA82XX(vha->hw)) {
+ fcport->fcp_prio = priority & 0xf;
+ return QLA_SUCCESS;
+ }
+
ret = qla24xx_set_fcp_prio(vha, fcport->loop_id, priority, mb);
- if (ret == QLA_SUCCESS)
- fcport->fcp_prio = priority;
- else
+ if (ret == QLA_SUCCESS) {
+ if (fcport->fcp_prio != priority)
+ ql_dbg(ql_dbg_user, vha, 0x709e,
+ "Updated FCP_CMND priority - value=%d loop_id=%d "
+ "port_id=%02x%02x%02x.\n", priority,
+ fcport->loop_id, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa);
+ fcport->fcp_prio = priority & 0xf;
+ } else
ql_dbg(ql_dbg_user, vha, 0x704f,
- "Unable to activate fcp priority, ret=0x%x.\n", ret);
-
+ "Unable to update FCP_CMND priority - ret=0x%x for "
+ "loop_id=%d port_id=%02x%02x%02x.\n", ret, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
+ fcport->d_id.b.al_pa);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 9902834..7cc4f36 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -131,3 +131,16 @@ qla2x00_hba_err_chk_enabled(srb_t *sp)
}
return 0;
}
+
+static inline int
+qla2x00_reset_active(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
+
+ /* Test appropriate base-vha and vha flags. */
+ return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
+ test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
+ test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index a4b267e..55a9676 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -11,8 +11,6 @@
#include <scsi/scsi_tcq.h>
-static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
-
static void qla25xx_set_que(srb_t *, struct rsp_que **);
/**
* qla2x00_get_cmd_direction() - Determine control_flag data direction.
@@ -468,6 +466,42 @@ queuing_error:
}
/**
+ * qla2x00_start_iocbs() - Execute the IOCB command
+ */
+static void
+qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
+{
+ struct qla_hw_data *ha = vha->hw;
+ device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+ struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+
+ if (IS_QLA82XX(ha)) {
+ qla82xx_start_iocbs(vha);
+ } else {
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ /* Set chip new ring index. */
+ if (ha->mqenable) {
+ WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+ RD_REG_DWORD(&ioreg->hccr);
+ } else if (IS_FWI2_CAPABLE(ha)) {
+ WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+ RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ } else {
+ WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
+ req->ring_index);
+ RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ }
+ }
+}
+
+/**
* qla2x00_marker() - Send a marker IOCB to the firmware.
* @ha: HA context
* @loop_id: loop ID
@@ -489,6 +523,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
mrk24 = NULL;
+ req = ha->req_q_map[0];
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
if (mrk == NULL) {
ql_log(ql_log_warn, base_vha, 0x3026,
@@ -515,7 +550,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
}
wmb();
- qla2x00_isp_cmd(vha, req);
+ qla2x00_start_iocbs(vha, req);
return (QLA_SUCCESS);
}
@@ -536,89 +571,140 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
}
/**
- * qla2x00_isp_cmd() - Modify the request ring pointer.
- * @ha: HA context
+ * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
+ * Continuation Type 1 IOCBs to allocate.
+ *
+ * @dsds: number of data segment decriptors needed
*
- * Note: The caller must hold the hardware lock before calling this routine.
+ * Returns the number of IOCB entries needed to store @dsds.
*/
-static void
-qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
+inline uint16_t
+qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
- struct qla_hw_data *ha = vha->hw;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+ uint16_t iocbs;
- ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
- "IOCB data:\n");
- ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
- (uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
+ iocbs = 1;
+ if (dsds > 1) {
+ iocbs += (dsds - 1) / 5;
+ if ((dsds - 1) % 5)
+ iocbs++;
+ }
+ return iocbs;
+}
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
+static inline int
+qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
+ uint16_t tot_dsds)
+{
+ uint32_t *cur_dsd = NULL;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ struct scsi_cmnd *cmd;
+ struct scatterlist *cur_seg;
+ uint32_t *dsd_seg;
+ void *next_dsd;
+ uint8_t avail_dsds;
+ uint8_t first_iocb = 1;
+ uint32_t dsd_list_len;
+ struct dsd_dma *dsd_ptr;
+ struct ct6_dsd *ctx;
- /* Set chip new ring index. */
- if (IS_QLA82XX(ha)) {
- uint32_t dbval = 0x04 | (ha->portnum << 5);
+ cmd = sp->cmd;
- /* write, read and verify logic */
- dbval = dbval | (req->id << 8) | (req->ring_index << 16);
- if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
- else {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD((unsigned long __iomem *)
- ha->nxdb_wr_ptr, dbval);
- wmb();
- }
- }
- } else if (ha->mqenable) {
- /* Set chip new ring index. */
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else {
- if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+ /* Update entry type to indicate Command Type 3 IOCB */
+ *((uint32_t *)(&cmd_pkt->entry_type)) =
+ __constant_cpu_to_le32(COMMAND_TYPE_6);
+
+ /* No data transfer */
+ if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
+ cmd_pkt->byte_count = __constant_cpu_to_le32(0);
+ return 0;
+ }
+
+ vha = sp->fcport->vha;
+ ha = vha->hw;
+
+ /* Set transfer direction */
+ if (cmd->sc_data_direction == DMA_TO_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_WRITE_DATA);
+ ha->qla_stats.output_bytes += scsi_bufflen(cmd);
+ } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
+ cmd_pkt->control_flags =
+ __constant_cpu_to_le16(CF_READ_DATA);
+ ha->qla_stats.input_bytes += scsi_bufflen(cmd);
+ }
+
+ cur_seg = scsi_sglist(cmd);
+ ctx = sp->ctx;
+
+ while (tot_dsds) {
+ avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
+ QLA_DSDS_PER_IOCB : tot_dsds;
+ tot_dsds -= avail_dsds;
+ dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
+
+ dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
+ struct dsd_dma, list);
+ next_dsd = dsd_ptr->dsd_addr;
+ list_del(&dsd_ptr->list);
+ ha->gbl_dsd_avail--;
+ list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
+ ctx->dsd_use_cnt++;
+ ha->gbl_dsd_inuse++;
+
+ if (first_iocb) {
+ first_iocb = 0;
+ dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
+ *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
} else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
- req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+ *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
+ *cur_dsd++ = cpu_to_le32(dsd_list_len);
+ }
+ cur_dsd = (uint32_t *)next_dsd;
+ while (avail_dsds) {
+ dma_addr_t sle_dma;
+
+ sle_dma = sg_dma_address(cur_seg);
+ *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+ *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
+ cur_seg = sg_next(cur_seg);
+ avail_dsds--;
}
}
+ /* Null termination */
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ *cur_dsd++ = 0;
+ cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
+ return 0;
}
-/**
- * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
- * Continuation Type 1 IOCBs to allocate.
+/*
+ * qla24xx_calc_dsd_lists() - Determine number of DSD list required
+ * for Command Type 6.
*
* @dsds: number of data segment decriptors needed
*
- * Returns the number of IOCB entries needed to store @dsds.
+ * Returns the number of dsd list needed to store @dsds.
*/
inline uint16_t
-qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
+qla24xx_calc_dsd_lists(uint16_t dsds)
{
- uint16_t iocbs;
+ uint16_t dsd_lists = 0;
- iocbs = 1;
- if (dsds > 1) {
- iocbs += (dsds - 1) / 5;
- if ((dsds - 1) % 5)
- iocbs++;
- }
- return iocbs;
+ dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
+ if (dsds % QLA_DSDS_PER_IOCB)
+ dsd_lists++;
+ return dsd_lists;
}
+
/**
* qla24xx_build_scsi_iocbs() - Build IOCB command utilizing Command Type 7
* IOCB types.
@@ -945,6 +1031,7 @@ alloc_and_fill:
*cur_dsd++ = 0;
return 0;
}
+
static int
qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint16_t tot_dsds)
@@ -1004,7 +1091,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
sle_dma = sg_dma_address(sg);
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
- cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
+ i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
sp->cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@@ -1731,6 +1818,7 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
uint32_t index, handle;
request_t *pkt;
uint16_t cnt, req_cnt;
+ struct srb_ctx *ctx;
pkt = NULL;
req_cnt = 1;
@@ -1759,6 +1847,12 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
req->outstanding_cmds[handle] = sp;
sp->handle = handle;
+ /* Adjust entry-counts as needed. */
+ if (sp->ctx) {
+ ctx = sp->ctx;
+ req_cnt = ctx->iocbs;
+ }
+
skip_cmd_array:
/* Check for room on request queue. */
if (req->cnt < req_cnt) {
@@ -1793,42 +1887,6 @@ queuing_error:
}
static void
-qla2x00_start_iocbs(srb_t *sp)
-{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
- struct req_que *req = ha->req_q_map[0];
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
- struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
-
- if (IS_QLA82XX(ha)) {
- qla82xx_start_iocbs(sp);
- } else {
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- /* Set chip new ring index. */
- if (ha->mqenable) {
- WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
- RD_REG_DWORD(&ioreg->hccr);
- } else if (IS_QLA82XX(ha)) {
- qla82xx_start_iocbs(sp);
- } else if (IS_FWI2_CAPABLE(ha)) {
- WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
- RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
- } else {
- WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp),
- req->ring_index);
- RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
- }
- }
-}
-
-static void
qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
struct srb_ctx *ctx = sp->ctx;
@@ -2160,6 +2218,381 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
ct_iocb->entry_count = entry_count;
}
+/*
+ * qla82xx_start_scsi() - Send a SCSI command to the ISP
+ * @sp: command to send to the ISP
+ *
+ * Returns non-zero if a failure occurred, else zero.
+ */
+int
+qla82xx_start_scsi(srb_t *sp)
+{
+ int ret, nseg;
+ unsigned long flags;
+ struct scsi_cmnd *cmd;
+ uint32_t *clr_ptr;
+ uint32_t index;
+ uint32_t handle;
+ uint16_t cnt;
+ uint16_t req_cnt;
+ uint16_t tot_dsds;
+ struct device_reg_82xx __iomem *reg;
+ uint32_t dbval;
+ uint32_t *fcp_dl;
+ uint8_t additional_cdb_len;
+ struct ct6_dsd *ctx;
+ struct scsi_qla_host *vha = sp->fcport->vha;
+ struct qla_hw_data *ha = vha->hw;
+ struct req_que *req = NULL;
+ struct rsp_que *rsp = NULL;
+ char tag[2];
+
+ /* Setup device pointers. */
+ ret = 0;
+ reg = &ha->iobase->isp82;
+ cmd = sp->cmd;
+ req = vha->req;
+ rsp = ha->rsp_q_map[0];
+
+ /* So we know we haven't pci_map'ed anything yet */
+ tot_dsds = 0;
+
+ dbval = 0x04 | (ha->portnum << 5);
+
+ /* Send marker if required */
+ if (vha->marker_needed != 0) {
+ if (qla2x00_marker(vha, req,
+ rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x300c,
+ "qla2x00_marker failed for cmd=%p.\n", cmd);
+ return QLA_FUNCTION_FAILED;
+ }
+ vha->marker_needed = 0;
+ }
+
+ /* Acquire ring specific lock */
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ /* Check for room in outstanding command list. */
+ handle = req->current_outstanding_cmd;
+ for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ handle++;
+ if (handle == MAX_OUTSTANDING_COMMANDS)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ break;
+ }
+ if (index == MAX_OUTSTANDING_COMMANDS)
+ goto queuing_error;
+
+ /* Map the sg table so we have an accurate count of sg entries needed */
+ if (scsi_sg_count(cmd)) {
+ nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
+ scsi_sg_count(cmd), cmd->sc_data_direction);
+ if (unlikely(!nseg))
+ goto queuing_error;
+ } else
+ nseg = 0;
+
+ tot_dsds = nseg;
+
+ if (tot_dsds > ql2xshiftctondsd) {
+ struct cmd_type_6 *cmd_pkt;
+ uint16_t more_dsd_lists = 0;
+ struct dsd_dma *dsd_ptr;
+ uint16_t i;
+
+ more_dsd_lists = qla24xx_calc_dsd_lists(tot_dsds);
+ if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
+ ql_dbg(ql_dbg_io, vha, 0x300d,
+ "Num of DSD list %d is than %d for cmd=%p.\n",
+ more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
+ cmd);
+ goto queuing_error;
+ }
+
+ if (more_dsd_lists <= ha->gbl_dsd_avail)
+ goto sufficient_dsds;
+ else
+ more_dsd_lists -= ha->gbl_dsd_avail;
+
+ for (i = 0; i < more_dsd_lists; i++) {
+ dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
+ if (!dsd_ptr) {
+ ql_log(ql_log_fatal, vha, 0x300e,
+ "Failed to allocate memory for dsd_dma "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+
+ dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
+ GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
+ if (!dsd_ptr->dsd_addr) {
+ kfree(dsd_ptr);
+ ql_log(ql_log_fatal, vha, 0x300f,
+ "Failed to allocate memory for dsd_addr "
+ "for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
+ ha->gbl_dsd_avail++;
+ }
+
+sufficient_dsds:
+ req_cnt = 1;
+
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
+ if (!sp->ctx) {
+ ql_log(ql_log_fatal, vha, 0x3010,
+ "Failed to allocate ctx for cmd=%p.\n", cmd);
+ goto queuing_error;
+ }
+ memset(ctx, 0, sizeof(struct ct6_dsd));
+ ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
+ GFP_ATOMIC, &ctx->fcp_cmnd_dma);
+ if (!ctx->fcp_cmnd) {
+ ql_log(ql_log_fatal, vha, 0x3011,
+ "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+
+ /* Initialize the DSD list and dma handle */
+ INIT_LIST_HEAD(&ctx->dsd_list);
+ ctx->dsd_use_cnt = 0;
+
+ if (cmd->cmd_len > 16) {
+ additional_cdb_len = cmd->cmd_len - 16;
+ if ((cmd->cmd_len % 4) != 0) {
+ /* SCSI command bigger than 16 bytes must be
+ * multiple of 4
+ */
+ ql_log(ql_log_warn, vha, 0x3012,
+ "scsi cmd len %d not multiple of 4 "
+ "for cmd=%p.\n", cmd->cmd_len, cmd);
+ goto queuing_error_fcp_cmnd;
+ }
+ ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
+ } else {
+ additional_cdb_len = 0;
+ ctx->fcp_cmnd_len = 12 + 16 + 4;
+ }
+
+ cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ /* Build IOCB segments */
+ if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
+ goto queuing_error_fcp_cmnd;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
+
+ /* build FCP_CMND IU */
+ memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
+ int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
+ ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
+
+ if (cmd->sc_data_direction == DMA_TO_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 1;
+ else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
+ ctx->fcp_cmnd->additional_cdb_len |= 2;
+
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ ctx->fcp_cmnd->task_attribute =
+ TSK_ORDERED;
+ break;
+ }
+ }
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ ctx->fcp_cmnd->task_attribute |=
+ sp->fcport->fcp_prio << 3;
+
+ memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
+
+ fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
+ additional_cdb_len);
+ *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
+
+ cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
+ cmd_pkt->fcp_cmnd_dseg_address[0] =
+ cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
+ cmd_pkt->fcp_cmnd_dseg_address[1] =
+ cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
+
+ sp->flags |= SRB_FCP_CMND_DMA_VALID;
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+ } else {
+ struct cmd_type_7 *cmd_pkt;
+ req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (req->cnt < (req_cnt + 2)) {
+ cnt = (uint16_t)RD_REG_DWORD_RELAXED(
+ &reg->req_q_out[0]);
+ if (req->ring_index < cnt)
+ req->cnt = cnt - req->ring_index;
+ else
+ req->cnt = req->length -
+ (req->ring_index - cnt);
+ }
+ if (req->cnt < (req_cnt + 2))
+ goto queuing_error;
+
+ cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
+ cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+ /* Zero out remaining portion of packet. */
+ /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
+ clr_ptr = (uint32_t *)cmd_pkt + 2;
+ memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+ cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+
+ /* Set NPORT-ID and LUN number*/
+ cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+ cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+ cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+ cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+ cmd_pkt->vp_index = sp->fcport->vp_idx;
+
+ int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
+ host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
+ sizeof(cmd_pkt->lun));
+
+ /*
+ * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
+ */
+ if (scsi_populate_tag_msg(cmd, tag)) {
+ switch (tag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ cmd_pkt->task = TSK_HEAD_OF_QUEUE;
+ break;
+ case ORDERED_QUEUE_TAG:
+ cmd_pkt->task = TSK_ORDERED;
+ break;
+ }
+ }
+
+ /* Populate the FCP_PRIO. */
+ if (ha->flags.fcp_prio_enabled)
+ cmd_pkt->task |= sp->fcport->fcp_prio << 3;
+
+ /* Load SCSI command packet. */
+ memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
+ host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
+
+ cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
+
+ /* Build IOCB segments */
+ qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
+
+ /* Set total data segment count. */
+ cmd_pkt->entry_count = (uint8_t)req_cnt;
+ /* Specify response queue number where
+ * completion should happen.
+ */
+ cmd_pkt->entry_status = (uint8_t) rsp->id;
+
+ }
+ /* Build command packet. */
+ req->current_outstanding_cmd = handle;
+ req->outstanding_cmds[handle] = sp;
+ sp->handle = handle;
+ sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
+ req->cnt -= req_cnt;
+ wmb();
+
+ /* Adjust ring index. */
+ req->ring_index++;
+ if (req->ring_index == req->length) {
+ req->ring_index = 0;
+ req->ring_ptr = req->ring;
+ } else
+ req->ring_ptr++;
+
+ sp->flags |= SRB_DMA_VALID;
+
+ /* Set chip new ring index. */
+ /* write, read and verify logic */
+ dbval = dbval | (req->id << 8) | (req->ring_index << 16);
+ if (ql2xdbwr)
+ qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
+ else {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ WRT_REG_DWORD(
+ (unsigned long __iomem *)ha->nxdb_wr_ptr,
+ dbval);
+ wmb();
+ }
+ }
+
+ /* Manage unprocessed RIO/ZIO commands in response queue. */
+ if (vha->flags.process_response_queue &&
+ rsp->ring_ptr->signature != RESPONSE_PROCESSED)
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ return QLA_SUCCESS;
+
+queuing_error_fcp_cmnd:
+ dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
+queuing_error:
+ if (tot_dsds)
+ scsi_dma_unmap(cmd);
+
+ if (sp->ctx) {
+ mempool_free(sp->ctx, ha->ctx_mempool);
+ sp->ctx = NULL;
+ }
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return QLA_FUNCTION_FAILED;
+}
+
int
qla2x00_start_sp(srb_t *sp)
{
@@ -2196,8 +2629,8 @@ qla2x00_start_sp(srb_t *sp)
break;
case SRB_CT_CMD:
IS_FWI2_CAPABLE(ha) ?
- qla24xx_ct_iocb(sp, pkt) :
- qla2x00_ct_iocb(sp, pkt);
+ qla24xx_ct_iocb(sp, pkt) :
+ qla2x00_ct_iocb(sp, pkt);
break;
case SRB_ADISC_CMD:
IS_FWI2_CAPABLE(ha) ?
@@ -2212,7 +2645,7 @@ qla2x00_start_sp(srb_t *sp)
}
wmb();
- qla2x00_start_iocbs(sp);
+ qla2x00_start_iocbs(sp->fcport->vha, ha->req_q_map[0]);
done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return rval;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 7b91b29..349843e 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -242,32 +242,34 @@ static void
qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
+ uint32_t mboxes;
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
if (IS_QLA2200(ha) && cnt == 8)
wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
- if (cnt == 4 || cnt == 5)
+ if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
- else
+ else if (mboxes & BIT_0)
ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
wptr++;
- }
-
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x5000,
- "Got mbx completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x5001,
- "MBX pointer ERROR.\n");
+ mboxes >>= 1;
}
}
@@ -298,7 +300,7 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
return;
ql_dbg(ql_dbg_async, vha, 0x5022,
- "Inter-Driver Commucation %s -- ACK timeout=%d.\n",
+ "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
vha->host_no, event[aen & 0xff], timeout);
rval = qla2x00_post_idc_ack_work(vha, mb);
@@ -453,7 +455,7 @@ skip_rio:
break;
case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
- ql_log(ql_log_info, vha, 0x5009,
+ ql_dbg(ql_dbg_async, vha, 0x5009,
"LIP occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -487,7 +489,7 @@ skip_rio:
ha->link_data_rate = mb[1];
}
- ql_log(ql_log_info, vha, 0x500a,
+ ql_dbg(ql_dbg_async, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n", link_speed);
vha->flags.management_server_logged_in = 0;
@@ -497,7 +499,7 @@ skip_rio:
case MBA_LOOP_DOWN: /* Loop Down Event */
mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
- ql_log(ql_log_info, vha, 0x500b,
+ ql_dbg(ql_dbg_async, vha, 0x500b,
"LOOP DOWN detected (%x %x %x %x).\n",
mb[1], mb[2], mb[3], mbx);
@@ -519,7 +521,7 @@ skip_rio:
break;
case MBA_LIP_RESET: /* LIP reset occurred */
- ql_log(ql_log_info, vha, 0x500c,
+ ql_dbg(ql_dbg_async, vha, 0x500c,
"LIP reset occurred (%x).\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -587,7 +589,7 @@ skip_rio:
if (IS_QLA2100(ha))
break;
- ql_log(ql_log_info, vha, 0x500f,
+ ql_dbg(ql_dbg_async, vha, 0x500f,
"Configuration change detected: value=%x.\n", mb[1]);
if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
@@ -920,15 +922,15 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (mbx->entry_status) {
ql_dbg(ql_dbg_async, vha, 0x5043,
- "Async-%s error entry - portid=%02x%02x%02x "
+ "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
"entry-status=%x status=%x state-flag=%x "
- "status-flags=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
+ "status-flags=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa, mbx->entry_status,
le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
le16_to_cpu(mbx->status_flags));
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5057,
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
(uint8_t *)mbx, sizeof(*mbx));
goto logio_done;
@@ -940,9 +942,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
status = 0;
if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_async, vha, 0x5045,
- "Async-%s complete - portid=%02x%02x%02x mbx1=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, le16_to_cpu(mbx->mb1));
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ le16_to_cpu(mbx->mb1));
data[0] = MBS_COMMAND_COMPLETE;
if (ctx->type == SRB_LOGIN_CMD) {
@@ -968,11 +971,10 @@ qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_log(ql_log_warn, vha, 0x5046,
- "Async-%s failed - portid=%02x%02x%02x status=%x "
- "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n",
- type, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa, status,
- le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
+ "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
le16_to_cpu(mbx->mb7));
@@ -1036,7 +1038,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
bsg_job->reply->result = DID_ERROR << 16;
bsg_job->reply->reply_payload_rcv_len = 0;
}
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5058,
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
(uint8_t *)pkt, sizeof(*pkt));
} else {
bsg_job->reply->result = DID_OK << 16;
@@ -1111,9 +1113,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
ql_log(ql_log_info, vha, 0x503f,
- "ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
- type, comp_status, fw_status[1], fw_status[2],
+ type, sp->handle, comp_status, fw_status[1], fw_status[2],
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->total_byte_count));
fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
@@ -1121,9 +1123,9 @@ qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
}
else {
ql_log(ql_log_info, vha, 0x5040,
- "ELS-CT pass-through-%s error comp_status-status=0x%x "
+ "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
"error subcode 1=0x%x error subcode 2=0x%x.\n",
- type, comp_status,
+ type, sp->handle, comp_status,
le16_to_cpu(((struct els_sts_entry_24xx *)
pkt)->error_subcode_1),
le16_to_cpu(((struct els_sts_entry_24xx *)
@@ -1184,11 +1186,12 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
QLA_LOGIO_LOGIN_RETRIED : 0;
if (logio->entry_status) {
ql_log(ql_log_warn, vha, 0x5034,
- "Async-%s error entry - "
+ "Async-%s error entry - hdl=%x"
"portid=%02x%02x%02x entry-status=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa, logio->entry_status);
- ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5059,
+ type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ logio->entry_status);
+ ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
(uint8_t *)logio, sizeof(*logio));
goto logio_done;
@@ -1196,10 +1199,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
ql_dbg(ql_dbg_async, vha, 0x5036,
- "Async-%s complete - portid=%02x%02x%02x "
- "iop0=%x.\n",
- type, fcport->d_id.b.domain, fcport->d_id.b.area,
- fcport->d_id.b.al_pa,
+ "Async-%s complete - hdl=%x portid=%02x%02x%02x "
+ "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa,
le32_to_cpu(logio->io_parameter[0]));
data[0] = MBS_COMMAND_COMPLETE;
@@ -1238,9 +1240,8 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
}
ql_dbg(ql_dbg_async, vha, 0x5037,
- "Async-%s failed - portid=%02x%02x%02x comp=%x "
- "iop0=%x iop1=%x.\n",
- type, fcport->d_id.b.domain,
+ "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
+ "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
fcport->d_id.b.area, fcport->d_id.b.al_pa,
le16_to_cpu(logio->comp_status),
le32_to_cpu(logio->io_parameter[0]),
@@ -1274,25 +1275,25 @@ qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
if (sts->entry_status) {
ql_log(ql_log_warn, vha, 0x5038,
- "Async-%s error - entry-status(%x).\n",
- type, sts->entry_status);
+ "Async-%s error - hdl=%x entry-status(%x).\n",
+ type, sp->handle, sts->entry_status);
} else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
ql_log(ql_log_warn, vha, 0x5039,
- "Async-%s error - completion status(%x).\n",
- type, sts->comp_status);
+ "Async-%s error - hdl=%x completion status(%x).\n",
+ type, sp->handle, sts->comp_status);
} else if (!(le16_to_cpu(sts->scsi_status) &
SS_RESPONSE_INFO_LEN_VALID)) {
ql_log(ql_log_warn, vha, 0x503a,
- "Async-%s error - no response info(%x).\n",
- type, sts->scsi_status);
+ "Async-%s error - hdl=%x no response info(%x).\n",
+ type, sp->handle, sts->scsi_status);
} else if (le32_to_cpu(sts->rsp_data_len) < 4) {
ql_log(ql_log_warn, vha, 0x503b,
- "Async-%s error - not enough response(%d).\n",
- type, sts->rsp_data_len);
+ "Async-%s error - hdl=%x not enough response(%d).\n",
+ type, sp->handle, sts->rsp_data_len);
} else if (sts->data[3]) {
ql_log(ql_log_warn, vha, 0x503c,
- "Async-%s error - response(%x).\n",
- type, sts->data[3]);
+ "Async-%s error - hdl=%x response(%x).\n",
+ type, sp->handle, sts->data[3]);
} else {
error = 0;
}
@@ -1337,9 +1338,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
if (pkt->entry_status != 0) {
- ql_log(ql_log_warn, vha, 0x5035,
- "Process error entry.\n");
-
qla2x00_error_entry(vha, rsp, pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
@@ -1391,7 +1389,6 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
}
static inline void
-
qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
uint32_t sense_len, struct rsp_que *rsp)
{
@@ -1413,13 +1410,14 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
if (sp->request_sense_length != 0)
rsp->status_srb = sp;
- ql_dbg(ql_dbg_io, vha, 0x301c,
- "Check condition Sense data, scsi(%ld:%d:%d:%d) cmd=%p.\n",
- sp->fcport->vha->host_no, cp->device->channel, cp->device->id,
- cp->device->lun, cp);
- if (sense_len)
+ if (sense_len) {
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
+ "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
+ sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
+ cp);
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
cp->sense_buffer, sense_len);
+ }
}
struct scsi_dif_tuple {
@@ -1506,7 +1504,7 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
}
if (k != blocks_done) {
- qla_printk(KERN_WARNING, sp->fcport->vha->hw,
+ ql_log(ql_log_warn, vha, 0x302f,
"unexpected tag values tag:lba=%x:%llx)\n",
e_ref_tag, (unsigned long long)lba_s);
return 1;
@@ -1611,7 +1609,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sp = NULL;
if (sp == NULL) {
- ql_log(ql_log_warn, vha, 0x3017,
+ ql_dbg(ql_dbg_io, vha, 0x3017,
"Invalid status handle (0x%x).\n", sts->handle);
if (IS_QLA82XX(ha))
@@ -1623,7 +1621,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
cp = sp->cmd;
if (cp == NULL) {
- ql_log(ql_log_warn, vha, 0x3018,
+ ql_dbg(ql_dbg_io, vha, 0x3018,
"Command already returned (0x%x/%p).\n",
sts->handle, sp);
@@ -1670,7 +1668,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
par_sense_len -= rsp_info_len;
}
if (rsp_info_len > 3 && rsp_info[3]) {
- ql_log(ql_log_warn, vha, 0x3019,
+ ql_dbg(ql_dbg_io, vha, 0x3019,
"FCP I/O protocol failure (0x%x/0x%x).\n",
rsp_info_len, rsp_info[3]);
@@ -1701,7 +1699,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_log(ql_log_warn, vha, 0x301a,
+ ql_dbg(ql_dbg_io, vha, 0x301a,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
@@ -1713,7 +1711,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
cp->result = DID_OK << 16 | lscsi_status;
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_log(ql_log_warn, vha, 0x301b,
+ ql_dbg(ql_dbg_io, vha, 0x301b,
"QUEUE FULL detected.\n");
break;
}
@@ -1735,7 +1733,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- ql_log(ql_log_warn, vha, 0x301d,
+ ql_dbg(ql_dbg_io, vha, 0x301d,
"Dropped frame(s) detected "
"(0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
@@ -1747,7 +1745,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
if (!lscsi_status &&
((unsigned)(scsi_bufflen(cp) - resid) <
cp->underflow)) {
- ql_log(ql_log_warn, vha, 0x301e,
+ ql_dbg(ql_dbg_io, vha, 0x301e,
"Mid-layer underflow "
"detected (0x%x of 0x%x bytes).\n",
resid, scsi_bufflen(cp));
@@ -1756,7 +1754,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
break;
}
} else {
- ql_log(ql_log_warn, vha, 0x301f,
+ ql_dbg(ql_dbg_io, vha, 0x301f,
"Dropped frame(s) detected (0x%x "
"of 0x%x bytes).\n", resid, scsi_bufflen(cp));
@@ -1774,7 +1772,7 @@ check_scsi_status:
*/
if (lscsi_status != 0) {
if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
- ql_log(ql_log_warn, vha, 0x3020,
+ ql_dbg(ql_dbg_io, vha, 0x3020,
"QUEUE FULL detected.\n");
logit = 1;
break;
@@ -1838,10 +1836,15 @@ out:
if (logit)
ql_dbg(ql_dbg_io, vha, 0x3022,
"FCP command status: 0x%x-0x%x (0x%x) "
- "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
+ "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
+ "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
"rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
- comp_status, scsi_status, cp->result, ox_id, cp->cmnd[0],
- cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
+ comp_status, scsi_status, cp->result, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+ cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
+ cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
+ cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
if (rsp->status_srb == NULL)
@@ -1899,6 +1902,45 @@ qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
}
}
+static int
+qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
+{
+ struct qla_hw_data *ha = vha->hw;
+ struct srb_ctx *ctx;
+
+ if (!sp->ctx)
+ return 1;
+
+ ctx = sp->ctx;
+
+ if (ctx->type == SRB_LOGIN_CMD ||
+ ctx->type == SRB_LOGOUT_CMD ||
+ ctx->type == SRB_TM_CMD) {
+ ctx->u.iocb_cmd->done(sp);
+ return 0;
+ } else if (ctx->type == SRB_ADISC_CMD) {
+ ctx->u.iocb_cmd->free(sp);
+ return 0;
+ } else {
+ struct fc_bsg_job *bsg_job;
+
+ bsg_job = ctx->u.bsg_job;
+ if (ctx->type == SRB_ELS_CMD_HST ||
+ ctx->type == SRB_CT_CMD)
+ kfree(sp->fcport);
+
+ bsg_job->reply->reply_data.ctels_reply.status =
+ FC_CTELS_STATUS_OK;
+ bsg_job->reply->result = DID_ERROR << 16;
+ bsg_job->reply->reply_payload_rcv_len = 0;
+ kfree(sp->ctx);
+ mempool_free(sp, ha->srb_mempool);
+ bsg_job->job_done(bsg_job);
+ return 0;
+ }
+ return 1;
+}
+
/**
* qla2x00_error_entry() - Process an error entry.
* @ha: SCSI driver HA context
@@ -1909,7 +1951,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
{
srb_t *sp;
struct qla_hw_data *ha = vha->hw;
- uint32_t handle = LSW(pkt->handle);
+ const char func[] = "ERROR-IOCB";
uint16_t que = MSW(pkt->handle);
struct req_que *req = ha->req_q_map[que];
@@ -1932,28 +1974,20 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
ql_dbg(ql_dbg_async, vha, 0x502f,
"UNKNOWN flag error.\n");
- /* Validate handle. */
- if (handle < MAX_OUTSTANDING_COMMANDS)
- sp = req->outstanding_cmds[handle];
- else
- sp = NULL;
-
+ sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
- /* Free outstanding command slot. */
- req->outstanding_cmds[handle] = NULL;
-
- /* Bad payload or header */
- if (pkt->entry_status &
- (RF_INV_E_ORDER | RF_INV_E_COUNT |
- RF_INV_E_PARAM | RF_INV_E_TYPE)) {
- sp->cmd->result = DID_ERROR << 16;
- } else if (pkt->entry_status & RF_BUSY) {
- sp->cmd->result = DID_BUS_BUSY << 16;
- } else {
- sp->cmd->result = DID_ERROR << 16;
+ if (qla2x00_free_sp_ctx(vha, sp)) {
+ if (pkt->entry_status &
+ (RF_INV_E_ORDER | RF_INV_E_COUNT |
+ RF_INV_E_PARAM | RF_INV_E_TYPE)) {
+ sp->cmd->result = DID_ERROR << 16;
+ } else if (pkt->entry_status & RF_BUSY) {
+ sp->cmd->result = DID_BUS_BUSY << 16;
+ } else {
+ sp->cmd->result = DID_ERROR << 16;
+ }
+ qla2x00_sp_compl(ha, sp);
}
- qla2x00_sp_compl(ha, sp);
-
} else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
|| pkt->entry_type == COMMAND_TYPE_6) {
@@ -1977,26 +2011,30 @@ static void
qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
{
uint16_t cnt;
+ uint32_t mboxes;
uint16_t __iomem *wptr;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
+ /* Read all mbox registers? */
+ mboxes = (1 << ha->mbx_count) - 1;
+ if (!ha->mcp)
+ ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
+ else
+ mboxes = ha->mcp->in_mb;
+
/* Load return mailbox registers. */
ha->flags.mbox_int = 1;
ha->mailbox_out[0] = mb0;
+ mboxes >>= 1;
wptr = (uint16_t __iomem *)&reg->mailbox1;
for (cnt = 1; cnt < ha->mbx_count; cnt++) {
- ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
- wptr++;
- }
+ if (mboxes & BIT_0)
+ ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x504d,
- "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
- ql_dbg(ql_dbg_async, vha, 0x504e,
- "MBX pointer ERROR.\n");
+ mboxes >>= 1;
+ wptr++;
}
}
@@ -2025,9 +2063,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
}
if (pkt->entry_status != 0) {
- ql_dbg(ql_dbg_async, vha, 0x5029,
- "Process error entry.\n");
-
qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
((response_t *)pkt)->signature = RESPONSE_PROCESSED;
wmb();
@@ -2055,7 +2090,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
break;
case CT_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
- clear_bit(MBX_INTERRUPT, &vha->hw->mbx_cmd_flags);
break;
case ELS_IOCB_TYPE:
qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 82a3353..08f1d01 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -342,6 +342,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ /* Allow next mbx cmd to come in. */
+ complete(&ha->mbx_cmd_comp);
if (ha->isp_ops->abort_isp(vha)) {
/* Failed. retry later. */
set_bit(ISP_ABORT_NEEDED,
@@ -350,6 +352,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
clear_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
ql_dbg(ql_dbg_mbx, base_vha, 0x101f,
"Finished abort_isp.\n");
+ goto mbx_done;
}
}
}
@@ -358,6 +361,7 @@ premature_exit:
/* Allow next mbx cmd to come in. */
complete(&ha->mbx_cmd_comp);
+mbx_done:
if (rval) {
ql_dbg(ql_dbg_mbx, base_vha, 0x1020,
"**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, cmd=%x ****.\n",
@@ -2581,7 +2585,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__);
mcp->mb[0] = MBC_STOP_FIRMWARE;
- mcp->out_mb = MBX_0;
+ mcp->mb[1] = 0;
+ mcp->out_mb = MBX_1|MBX_0;
mcp->in_mb = MBX_0;
mcp->tov = 5;
mcp->flags = 0;
@@ -2887,7 +2892,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
- if (MSB(stat) == 1) {
+ if (MSB(stat) != 0) {
ql_dbg(ql_dbg_mbx, vha, 0x10ba,
"Could not acquire ID for VP[%d].\n", vp_idx);
return;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 0355493..270ba31 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -369,7 +369,7 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
"!= Read crbwin (0x%x), off=0x%lx.\n",
- ha->crb_win, win_read, *off);
+ __func__, ha->crb_win, win_read, *off);
}
*off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase;
}
@@ -409,7 +409,7 @@ qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off)
}
/* strange address given */
ql_dbg(ql_dbg_p3p, vha, 0xb001,
- "%x: Warning: unm_nic_pci_set_crbwindow "
+ "%s: Warning: unm_nic_pci_set_crbwindow "
"called with an unknown address(%llx).\n",
QLA2XXX_DRIVER_NAME, off);
return off;
@@ -1055,7 +1055,7 @@ ql82xx_rom_lock_d(struct qla_hw_data *ha)
"ROM lock failed.\n");
return -1;
}
- return 0;;
+ return 0;
}
static int
@@ -1165,19 +1165,6 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff);
else
qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff);
-
- /* reset ms */
- val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
- val |= (1 << 1);
- qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
- msleep(20);
-
- /* unreset ms */
- val = qla82xx_rd_32(ha, QLA82XX_CRB_QDR_NET + 0xe4);
- val &= ~(1 << 1);
- qla82xx_wr_32(ha, QLA82XX_CRB_QDR_NET + 0xe4, val);
- msleep(20);
-
qla82xx_rom_unlock(ha);
/* Read the signature value from the flash.
@@ -1711,12 +1698,12 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- ha->nx_pcibase, ha->iobase,
+ (void *)ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010,
"nx_pci_base=%p iobase=%p "
"max_req_queues=%d msix_count=%d.\n",
- ha->nx_pcibase, ha->iobase,
+ (void *)ha->nx_pcibase, ha->iobase,
ha->max_req_queues, ha->msix_count);
return 0;
@@ -1744,7 +1731,7 @@ qla82xx_pci_config(scsi_qla_host_t *vha)
ret = pci_set_mwi(ha->pdev);
ha->chip_revision = ha->pdev->revision;
ql_dbg(ql_dbg_init, vha, 0x0043,
- "Chip revision:%ld.\n",
+ "Chip revision:%d.\n",
ha->chip_revision);
return 0;
}
@@ -2023,13 +2010,9 @@ qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
wptr++;
}
- if (ha->mcp) {
- ql_dbg(ql_dbg_async, vha, 0x5052,
- "Got mailbox completion. cmd=%x.\n", ha->mcp->mb[0]);
- } else {
+ if (!ha->mcp)
ql_dbg(ql_dbg_async, vha, 0x5053,
"MBX pointer ERROR.\n");
- }
}
/*
@@ -2543,484 +2526,6 @@ qla82xx_start_firmware(scsi_qla_host_t *vha)
return qla82xx_check_rcvpeg_state(ha);
}
-static inline int
-qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
- uint16_t tot_dsds)
-{
- uint32_t *cur_dsd = NULL;
- scsi_qla_host_t *vha;
- struct qla_hw_data *ha;
- struct scsi_cmnd *cmd;
- struct scatterlist *cur_seg;
- uint32_t *dsd_seg;
- void *next_dsd;
- uint8_t avail_dsds;
- uint8_t first_iocb = 1;
- uint32_t dsd_list_len;
- struct dsd_dma *dsd_ptr;
- struct ct6_dsd *ctx;
-
- cmd = sp->cmd;
-
- /* Update entry type to indicate Command Type 3 IOCB */
- *((uint32_t *)(&cmd_pkt->entry_type)) =
- __constant_cpu_to_le32(COMMAND_TYPE_6);
-
- /* No data transfer */
- if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
- cmd_pkt->byte_count = __constant_cpu_to_le32(0);
- return 0;
- }
-
- vha = sp->fcport->vha;
- ha = vha->hw;
-
- /* Set transfer direction */
- if (cmd->sc_data_direction == DMA_TO_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_WRITE_DATA);
- ha->qla_stats.output_bytes += scsi_bufflen(cmd);
- } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
- cmd_pkt->control_flags =
- __constant_cpu_to_le16(CF_READ_DATA);
- ha->qla_stats.input_bytes += scsi_bufflen(cmd);
- }
-
- cur_seg = scsi_sglist(cmd);
- ctx = sp->ctx;
-
- while (tot_dsds) {
- avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
- QLA_DSDS_PER_IOCB : tot_dsds;
- tot_dsds -= avail_dsds;
- dsd_list_len = (avail_dsds + 1) * QLA_DSD_SIZE;
-
- dsd_ptr = list_first_entry(&ha->gbl_dsd_list,
- struct dsd_dma, list);
- next_dsd = dsd_ptr->dsd_addr;
- list_del(&dsd_ptr->list);
- ha->gbl_dsd_avail--;
- list_add_tail(&dsd_ptr->list, &ctx->dsd_list);
- ctx->dsd_use_cnt++;
- ha->gbl_dsd_inuse++;
-
- if (first_iocb) {
- first_iocb = 0;
- dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address;
- *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len);
- } else {
- *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma));
- *cur_dsd++ = cpu_to_le32(dsd_list_len);
- }
- cur_dsd = (uint32_t *)next_dsd;
- while (avail_dsds) {
- dma_addr_t sle_dma;
-
- sle_dma = sg_dma_address(cur_seg);
- *cur_dsd++ = cpu_to_le32(LSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(MSD(sle_dma));
- *cur_dsd++ = cpu_to_le32(sg_dma_len(cur_seg));
- cur_seg = sg_next(cur_seg);
- avail_dsds--;
- }
- }
-
- /* Null termination */
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- *cur_dsd++ = 0;
- cmd_pkt->control_flags |= CF_DATA_SEG_DESCR_ENABLE;
- return 0;
-}
-
-/*
- * qla82xx_calc_dsd_lists() - Determine number of DSD list required
- * for Command Type 6.
- *
- * @dsds: number of data segment decriptors needed
- *
- * Returns the number of dsd list needed to store @dsds.
- */
-inline uint16_t
-qla82xx_calc_dsd_lists(uint16_t dsds)
-{
- uint16_t dsd_lists = 0;
-
- dsd_lists = (dsds/QLA_DSDS_PER_IOCB);
- if (dsds % QLA_DSDS_PER_IOCB)
- dsd_lists++;
- return dsd_lists;
-}
-
-/*
- * qla82xx_start_scsi() - Send a SCSI command to the ISP
- * @sp: command to send to the ISP
- *
- * Returns non-zero if a failure occurred, else zero.
- */
-int
-qla82xx_start_scsi(srb_t *sp)
-{
- int ret, nseg;
- unsigned long flags;
- struct scsi_cmnd *cmd;
- uint32_t *clr_ptr;
- uint32_t index;
- uint32_t handle;
- uint16_t cnt;
- uint16_t req_cnt;
- uint16_t tot_dsds;
- struct device_reg_82xx __iomem *reg;
- uint32_t dbval;
- uint32_t *fcp_dl;
- uint8_t additional_cdb_len;
- struct ct6_dsd *ctx;
- struct scsi_qla_host *vha = sp->fcport->vha;
- struct qla_hw_data *ha = vha->hw;
- struct req_que *req = NULL;
- struct rsp_que *rsp = NULL;
- char tag[2];
-
- /* Setup device pointers. */
- ret = 0;
- reg = &ha->iobase->isp82;
- cmd = sp->cmd;
- req = vha->req;
- rsp = ha->rsp_q_map[0];
-
- /* So we know we haven't pci_map'ed anything yet */
- tot_dsds = 0;
-
- dbval = 0x04 | (ha->portnum << 5);
-
- /* Send marker if required */
- if (vha->marker_needed != 0) {
- if (qla2x00_marker(vha, req,
- rsp, 0, 0, MK_SYNC_ALL) != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x300c,
- "qla2x00_marker failed for cmd=%p.\n", cmd);
- return QLA_FUNCTION_FAILED;
- }
- vha->marker_needed = 0;
- }
-
- /* Acquire ring specific lock */
- spin_lock_irqsave(&ha->hardware_lock, flags);
-
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
- handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == MAX_OUTSTANDING_COMMANDS)
- goto queuing_error;
-
- /* Map the sg table so we have an accurate count of sg entries needed */
- if (scsi_sg_count(cmd)) {
- nseg = dma_map_sg(&ha->pdev->dev, scsi_sglist(cmd),
- scsi_sg_count(cmd), cmd->sc_data_direction);
- if (unlikely(!nseg))
- goto queuing_error;
- } else
- nseg = 0;
-
- tot_dsds = nseg;
-
- if (tot_dsds > ql2xshiftctondsd) {
- struct cmd_type_6 *cmd_pkt;
- uint16_t more_dsd_lists = 0;
- struct dsd_dma *dsd_ptr;
- uint16_t i;
-
- more_dsd_lists = qla82xx_calc_dsd_lists(tot_dsds);
- if ((more_dsd_lists + ha->gbl_dsd_inuse) >= NUM_DSD_CHAIN) {
- ql_dbg(ql_dbg_io, vha, 0x300d,
- "Num of DSD list %d is than %d for cmd=%p.\n",
- more_dsd_lists + ha->gbl_dsd_inuse, NUM_DSD_CHAIN,
- cmd);
- goto queuing_error;
- }
-
- if (more_dsd_lists <= ha->gbl_dsd_avail)
- goto sufficient_dsds;
- else
- more_dsd_lists -= ha->gbl_dsd_avail;
-
- for (i = 0; i < more_dsd_lists; i++) {
- dsd_ptr = kzalloc(sizeof(struct dsd_dma), GFP_ATOMIC);
- if (!dsd_ptr) {
- ql_log(ql_log_fatal, vha, 0x300e,
- "Failed to allocate memory for dsd_dma "
- "for cmd=%p.\n", cmd);
- goto queuing_error;
- }
-
- dsd_ptr->dsd_addr = dma_pool_alloc(ha->dl_dma_pool,
- GFP_ATOMIC, &dsd_ptr->dsd_list_dma);
- if (!dsd_ptr->dsd_addr) {
- kfree(dsd_ptr);
- ql_log(ql_log_fatal, vha, 0x300f,
- "Failed to allocate memory for dsd_addr "
- "for cmd=%p.\n", cmd);
- goto queuing_error;
- }
- list_add_tail(&dsd_ptr->list, &ha->gbl_dsd_list);
- ha->gbl_dsd_avail++;
- }
-
-sufficient_dsds:
- req_cnt = 1;
-
- if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
- &reg->req_q_out[0]);
- if (req->ring_index < cnt)
- req->cnt = cnt - req->ring_index;
- else
- req->cnt = req->length -
- (req->ring_index - cnt);
- }
-
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
- ctx = sp->ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
- if (!sp->ctx) {
- ql_log(ql_log_fatal, vha, 0x3010,
- "Failed to allocate ctx for cmd=%p.\n", cmd);
- goto queuing_error;
- }
- memset(ctx, 0, sizeof(struct ct6_dsd));
- ctx->fcp_cmnd = dma_pool_alloc(ha->fcp_cmnd_dma_pool,
- GFP_ATOMIC, &ctx->fcp_cmnd_dma);
- if (!ctx->fcp_cmnd) {
- ql_log(ql_log_fatal, vha, 0x3011,
- "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
- goto queuing_error_fcp_cmnd;
- }
-
- /* Initialize the DSD list and dma handle */
- INIT_LIST_HEAD(&ctx->dsd_list);
- ctx->dsd_use_cnt = 0;
-
- if (cmd->cmd_len > 16) {
- additional_cdb_len = cmd->cmd_len - 16;
- if ((cmd->cmd_len % 4) != 0) {
- /* SCSI command bigger than 16 bytes must be
- * multiple of 4
- */
- ql_log(ql_log_warn, vha, 0x3012,
- "scsi cmd len %d not multiple of 4 "
- "for cmd=%p.\n", cmd->cmd_len, cmd);
- goto queuing_error_fcp_cmnd;
- }
- ctx->fcp_cmnd_len = 12 + cmd->cmd_len + 4;
- } else {
- additional_cdb_len = 0;
- ctx->fcp_cmnd_len = 12 + 16 + 4;
- }
-
- cmd_pkt = (struct cmd_type_6 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
- /* Zero out remaining portion of packet. */
- /* tagged queuing modifier -- default is TSK_SIMPLE (0). */
- clr_ptr = (uint32_t *)cmd_pkt + 2;
- memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
- cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
- /* Set NPORT-ID and LUN number*/
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
- cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
- cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
-
- /* Build IOCB segments */
- if (qla2xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
- goto queuing_error_fcp_cmnd;
-
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
- host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
-
- /* build FCP_CMND IU */
- memset(ctx->fcp_cmnd, 0, sizeof(struct fcp_cmnd));
- int_to_scsilun(sp->cmd->device->lun, &ctx->fcp_cmnd->lun);
- ctx->fcp_cmnd->additional_cdb_len = additional_cdb_len;
-
- if (cmd->sc_data_direction == DMA_TO_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 1;
- else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
- ctx->fcp_cmnd->additional_cdb_len |= 2;
-
- /*
- * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
- */
- if (scsi_populate_tag_msg(cmd, tag)) {
- switch (tag[0]) {
- case HEAD_OF_QUEUE_TAG:
- ctx->fcp_cmnd->task_attribute =
- TSK_HEAD_OF_QUEUE;
- break;
- case ORDERED_QUEUE_TAG:
- ctx->fcp_cmnd->task_attribute =
- TSK_ORDERED;
- break;
- }
- }
-
- memcpy(ctx->fcp_cmnd->cdb, cmd->cmnd, cmd->cmd_len);
-
- fcp_dl = (uint32_t *)(ctx->fcp_cmnd->cdb + 16 +
- additional_cdb_len);
- *fcp_dl = htonl((uint32_t)scsi_bufflen(cmd));
-
- cmd_pkt->fcp_cmnd_dseg_len = cpu_to_le16(ctx->fcp_cmnd_len);
- cmd_pkt->fcp_cmnd_dseg_address[0] =
- cpu_to_le32(LSD(ctx->fcp_cmnd_dma));
- cmd_pkt->fcp_cmnd_dseg_address[1] =
- cpu_to_le32(MSD(ctx->fcp_cmnd_dma));
-
- sp->flags |= SRB_FCP_CMND_DMA_VALID;
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
- /* Set total data segment count. */
- cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where
- * completion should happen
- */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
- } else {
- struct cmd_type_7 *cmd_pkt;
- req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
- if (req->cnt < (req_cnt + 2)) {
- cnt = (uint16_t)RD_REG_DWORD_RELAXED(
- &reg->req_q_out[0]);
- if (req->ring_index < cnt)
- req->cnt = cnt - req->ring_index;
- else
- req->cnt = req->length -
- (req->ring_index - cnt);
- }
- if (req->cnt < (req_cnt + 2))
- goto queuing_error;
-
- cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
- cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
-
- /* Zero out remaining portion of packet. */
- /* tagged queuing modifier -- default is TSK_SIMPLE (0).*/
- clr_ptr = (uint32_t *)cmd_pkt + 2;
- memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
- cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
-
- /* Set NPORT-ID and LUN number*/
- cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
- cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
- cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
- cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
- cmd_pkt->vp_index = sp->fcport->vp_idx;
-
- int_to_scsilun(sp->cmd->device->lun, &cmd_pkt->lun);
- host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
- sizeof(cmd_pkt->lun));
-
- /*
- * Update tagged queuing modifier -- default is TSK_SIMPLE (0).
- */
- if (scsi_populate_tag_msg(cmd, tag)) {
- switch (tag[0]) {
- case HEAD_OF_QUEUE_TAG:
- cmd_pkt->task = TSK_HEAD_OF_QUEUE;
- break;
- case ORDERED_QUEUE_TAG:
- cmd_pkt->task = TSK_ORDERED;
- break;
- }
- }
-
- /* Load SCSI command packet. */
- memcpy(cmd_pkt->fcp_cdb, cmd->cmnd, cmd->cmd_len);
- host_to_fcp_swap(cmd_pkt->fcp_cdb, sizeof(cmd_pkt->fcp_cdb));
-
- cmd_pkt->byte_count = cpu_to_le32((uint32_t)scsi_bufflen(cmd));
-
- /* Build IOCB segments */
- qla24xx_build_scsi_iocbs(sp, cmd_pkt, tot_dsds);
-
- /* Set total data segment count. */
- cmd_pkt->entry_count = (uint8_t)req_cnt;
- /* Specify response queue number where
- * completion should happen.
- */
- cmd_pkt->entry_status = (uint8_t) rsp->id;
-
- }
- /* Build command packet. */
- req->current_outstanding_cmd = handle;
- req->outstanding_cmds[handle] = sp;
- sp->handle = handle;
- sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
- req->cnt -= req_cnt;
- wmb();
-
- /* Adjust ring index. */
- req->ring_index++;
- if (req->ring_index == req->length) {
- req->ring_index = 0;
- req->ring_ptr = req->ring;
- } else
- req->ring_ptr++;
-
- sp->flags |= SRB_DMA_VALID;
-
- /* Set chip new ring index. */
- /* write, read and verify logic */
- dbval = dbval | (req->id << 8) | (req->ring_index << 16);
- if (ql2xdbwr)
- qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval);
- else {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
- WRT_REG_DWORD(
- (unsigned long __iomem *)ha->nxdb_wr_ptr,
- dbval);
- wmb();
- }
- }
-
- /* Manage unprocessed RIO/ZIO commands in response queue. */
- if (vha->flags.process_response_queue &&
- rsp->ring_ptr->signature != RESPONSE_PROCESSED)
- qla24xx_process_response_queue(vha, rsp);
-
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- return QLA_SUCCESS;
-
-queuing_error_fcp_cmnd:
- dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd, ctx->fcp_cmnd_dma);
-queuing_error:
- if (tot_dsds)
- scsi_dma_unmap(cmd);
-
- if (sp->ctx) {
- mempool_free(sp->ctx, ha->ctx_mempool);
- sp->ctx = NULL;
- }
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
- return QLA_FUNCTION_FAILED;
-}
-
static uint32_t *
qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
uint32_t length)
@@ -3272,9 +2777,9 @@ qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf,
}
void
-qla82xx_start_iocbs(srb_t *sp)
+qla82xx_start_iocbs(scsi_qla_host_t *vha)
{
- struct qla_hw_data *ha = sp->fcport->vha->hw;
+ struct qla_hw_data *ha = vha->hw;
struct req_que *req = ha->req_q_map[0];
struct device_reg_82xx __iomem *reg;
uint32_t dbval;
@@ -3659,11 +3164,10 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
qla82xx_md_free(vha);
/* ALlocate MiniDump resources */
qla82xx_md_prep(vha);
- } else
- ql_log(ql_log_info, vha, 0xb02e,
- "Firmware dump available to retrieve\n",
- vha->host_no);
- }
+ }
+ } else
+ ql_log(ql_log_info, vha, 0xb02e,
+ "Firmware dump available to retrieve\n");
}
return rval;
}
@@ -3758,7 +3262,6 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
switch (dev_state) {
case QLA82XX_DEV_READY:
- qla82xx_check_md_needed(vha);
ha->flags.isp82xx_reset_owner = 0;
goto exit;
case QLA82XX_DEV_COLD:
@@ -3876,7 +3379,7 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
QLA82XX_CRB_PEG_NET_3 + 0x3c),
qla82xx_rd_32(ha,
QLA82XX_CRB_PEG_NET_4 + 0x3c));
- if (LSW(MSB(halt_status)) == 0x67)
+ if (((halt_status & 0x1fffff00) >> 8) == 0x67)
ql_log(ql_log_warn, vha, 0xb052,
"Firmware aborted with "
"error code 0x00006700. Device is "
@@ -4067,7 +3570,7 @@ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha)
}
}
ql_dbg(ql_dbg_p3p, vha, 0xb027,
- "%s status=%d.\n", status);
+ "%s: status=%d.\n", __func__, status);
return status;
}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f9e5b85..036030c 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -83,6 +83,9 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
+ "\t\t0x1e400000 - Preferred value for capturing essential "
+ "debug information (equivalent to old "
+ "ql2xextended_error_logging=1).\n"
"\t\tDo LOGICAL OR of the value to enable more than one level");
int ql2xshiftctondsd = 6;
@@ -199,7 +202,7 @@ int ql2xmdcapmask = 0x1F;
module_param(ql2xmdcapmask, int, S_IRUGO);
MODULE_PARM_DESC(ql2xmdcapmask,
"Set the Minidump driver capture mask level. "
- "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
+ "Default is 0x1F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
int ql2xmdenable = 1;
module_param(ql2xmdenable, int, S_IRUGO);
@@ -622,6 +625,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
cmd->result = DID_NO_CONNECT << 16;
goto qc24_fail_command;
}
+
+ if (!fcport) {
+ cmd->result = DID_NO_CONNECT << 16;
+ goto qc24_fail_command;
+ }
+
if (atomic_read(&fcport->state) != FCS_ONLINE) {
if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
@@ -847,14 +856,10 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
int wait = 0;
struct qla_hw_data *ha = vha->hw;
- ql_dbg(ql_dbg_taskm, vha, 0x8000,
- "Entered %s for cmd=%p.\n", __func__, cmd);
if (!CMD_SP(cmd))
return SUCCESS;
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8001,
- "Return value of fc_block_scsi_eh=%d.\n", ret);
if (ret != 0)
return ret;
ret = SUCCESS;
@@ -870,18 +875,20 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
}
ql_dbg(ql_dbg_taskm, vha, 0x8002,
- "Aborting sp=%p cmd=%p from RISC ", sp, cmd);
+ "Aborting from RISC nexus=%ld:%d:%d sp=%p cmd=%p\n",
+ vha->host_no, id, lun, sp, cmd);
/* Get a reference to the sp and drop the lock.*/
sp_get(sp);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
+ ret = FAILED;
ql_dbg(ql_dbg_taskm, vha, 0x8003,
- "Abort command mbx failed for cmd=%p.\n", cmd);
+ "Abort command mbx failed cmd=%p.\n", cmd);
} else {
ql_dbg(ql_dbg_taskm, vha, 0x8004,
- "Abort command mbx success.\n");
+ "Abort command mbx success cmd=%p.\n", cmd);
wait = 1;
}
@@ -897,13 +904,14 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
if (wait) {
if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x8006,
- "Abort handler timed out for cmd=%p.\n", cmd);
+ "Abort handler timed out cmd=%p.\n", cmd);
ret = FAILED;
}
}
ql_log(ql_log_info, vha, 0x801c,
- "Abort command issued -- %d %x.\n", wait, ret);
+ "Abort command issued nexus=%ld:%d:%d -- %d %x.\n",
+ vha->host_no, id, lun, wait, ret);
return ret;
}
@@ -972,19 +980,15 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
int err;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8007,
- "fcport is NULL.\n");
return FAILED;
}
err = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8008,
- "fc_block_scsi_eh ret=%d.\n", err);
if (err != 0)
return err;
ql_log(ql_log_info, vha, 0x8009,
- "%s RESET ISSUED for id %d lun %d cmd=%p.\n", name,
+ "%s RESET ISSUED nexus=%ld:%d:%d cmd=%p.\n", name, vha->host_no,
cmd->device->id, cmd->device->lun, cmd);
err = 0;
@@ -1009,15 +1013,16 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
}
ql_log(ql_log_info, vha, 0x800e,
- "%s RESET SUCCEEDED for id %d lun %d cmd=%p.\n", name,
- cmd->device->id, cmd->device->lun, cmd);
+ "%s RESET SUCCEEDED nexus:%ld:%d:%d cmd=%p.\n", name,
+ vha->host_no, cmd->device->id, cmd->device->lun, cmd);
return SUCCESS;
eh_reset_failed:
ql_log(ql_log_info, vha, 0x800f,
- "%s RESET FAILED: %s for id %d lun %d cmd=%p.\n", name,
- reset_errors[err], cmd->device->id, cmd->device->lun);
+ "%s RESET FAILED: %s nexus=%ld:%d:%d cmd=%p.\n", name,
+ reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
+ cmd);
return FAILED;
}
@@ -1068,20 +1073,16 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
lun = cmd->device->lun;
if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8010,
- "fcport is NULL.\n");
return ret;
}
ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8011,
- "fc_block_scsi_eh ret=%d.\n", ret);
if (ret != 0)
return ret;
ret = FAILED;
ql_log(ql_log_info, vha, 0x8012,
- "BUS RESET ISSUED for id %d lun %d.\n", id, lun);
+ "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
ql_log(ql_log_fatal, vha, 0x8013,
@@ -1105,7 +1106,8 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
eh_bus_reset_done:
ql_log(ql_log_warn, vha, 0x802b,
- "BUS RESET %s.\n", (ret == FAILED) ? "FAILED" : "SUCCEDED");
+ "BUS RESET %s nexus=%ld:%d:%d.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEDED", vha->host_no, id, lun);
return ret;
}
@@ -1129,7 +1131,6 @@ static int
qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
{
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
- fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
struct qla_hw_data *ha = vha->hw;
int ret = FAILED;
unsigned int id, lun;
@@ -1138,21 +1139,8 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
id = cmd->device->id;
lun = cmd->device->lun;
- if (!fcport) {
- ql_log(ql_log_warn, vha, 0x8016,
- "fcport is NULL.\n");
- return ret;
- }
-
- ret = fc_block_scsi_eh(cmd);
- ql_dbg(ql_dbg_taskm, vha, 0x8017,
- "fc_block_scsi_eh ret=%d.\n", ret);
- if (ret != 0)
- return ret;
- ret = FAILED;
-
ql_log(ql_log_info, vha, 0x8018,
- "ADAPTER RESET ISSUED for id %d lun %d.\n", id, lun);
+ "ADAPTER RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
goto eh_host_reset_lock;
@@ -1193,8 +1181,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
ret = SUCCESS;
eh_host_reset_lock:
- qla_printk(KERN_INFO, ha, "%s: reset %s.\n", __func__,
- (ret == FAILED) ? "failed" : "succeeded");
+ ql_log(ql_log_info, vha, 0x8017,
+ "ADAPTER RESET %s nexus=%ld:%d:%d.\n",
+ (ret == FAILED) ? "FAILED" : "SUCCEEDED", vha->host_no, id, lun);
return ret;
}
@@ -1344,10 +1333,8 @@ static void qla2x00_handle_queue_full(struct scsi_device *sdev, int qdepth)
return;
ql_dbg(ql_dbg_io, fcport->vha, 0x3029,
- "Queue depth adjusted-down "
- "to %d for scsi(%ld:%d:%d:%d).\n",
- sdev->queue_depth, fcport->vha->host_no,
- sdev->channel, sdev->id, sdev->lun);
+ "Queue depth adjusted-down to %d for nexus=%ld:%d:%d.\n",
+ sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
@@ -1369,10 +1356,8 @@ static void qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, int qdepth)
scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, qdepth);
ql_dbg(ql_dbg_io, vha, 0x302a,
- "Queue depth adjusted-up to %d for "
- "scsi(%ld:%d:%d:%d).\n",
- sdev->queue_depth, fcport->vha->host_no,
- sdev->channel, sdev->id, sdev->lun);
+ "Queue depth adjusted-up to %d for nexus=%ld:%d:%d.\n",
+ sdev->queue_depth, fcport->vha->host_no, sdev->id, sdev->lun);
}
static int
@@ -1496,6 +1481,118 @@ qla24xx_disable_intrs(struct qla_hw_data *ha)
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
+static int
+qla2x00_iospace_config(struct qla_hw_data *ha)
+{
+ resource_size_t pio;
+ uint16_t msix;
+ int cpus;
+
+ if (IS_QLA82XX(ha))
+ return qla82xx_iospace_config(ha);
+
+ if (pci_request_selected_regions(ha->pdev, ha->bars,
+ QLA2XXX_DRIVER_NAME)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
+ "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (!(ha->bars & 1))
+ goto skip_pio;
+
+ /* We only need PIO for Flash operations on ISP2312 v2 chips. */
+ pio = pci_resource_start(ha->pdev, 0);
+ if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
+ if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
+ "Invalid pci I/O region size (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ } else {
+ ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
+ "Region #0 no a PIO resource (%s).\n",
+ pci_name(ha->pdev));
+ pio = 0;
+ }
+ ha->pio_address = pio;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
+ "PIO address=%llu.\n",
+ (unsigned long long)ha->pio_address);
+
+skip_pio:
+ /* Use MMIO operations for all accesses. */
+ if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
+ "Region #1 not an MMIO resource (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+ if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
+ "Invalid PCI mem region size (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
+ if (!ha->iobase) {
+ ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
+ "Cannot remap MMIO (%s), aborting.\n",
+ pci_name(ha->pdev));
+ goto iospace_error_exit;
+ }
+
+ /* Determine queue resources */
+ ha->max_req_queues = ha->max_rsp_queues = 1;
+ if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
+ (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
+ (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+ goto mqiobase_exit;
+
+ ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
+ pci_resource_len(ha->pdev, 3));
+ if (ha->mqiobase) {
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
+ "MQIO Base=%p.\n", ha->mqiobase);
+ /* Read MSIX vector size of the board */
+ pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
+ ha->msix_count = msix;
+ /* Max queues are bounded by available msix vectors */
+ /* queue 0 uses two msix vectors */
+ if (ql2xmultique_tag) {
+ cpus = num_online_cpus();
+ ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
+ (cpus + 1) : (ha->msix_count - 1);
+ ha->max_req_queues = 2;
+ } else if (ql2xmaxqueues > 1) {
+ ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+ QLA_MQ_SIZE : ql2xmaxqueues;
+ ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
+ "QoS mode set, max no of request queues:%d.\n",
+ ha->max_req_queues);
+ }
+ ql_log_pci(ql_log_info, ha->pdev, 0x001a,
+ "MSI-X vector count: %d.\n", msix);
+ } else
+ ql_log_pci(ql_log_info, ha->pdev, 0x001b,
+ "BAR 3 not enabled.\n");
+
+mqiobase_exit:
+ ha->msix_count = ha->max_rsp_queues + 1;
+ ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
+ "MSIX Count:%d.\n", ha->msix_count);
+ return (0);
+
+iospace_error_exit:
+ return (-ENOMEM);
+}
+
+
static struct isp_operations qla2100_isp_ops = {
.pci_config = qla2100_pci_config,
.reset_chip = qla2x00_reset_chip,
@@ -1530,6 +1627,7 @@ static struct isp_operations qla2100_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla2300_isp_ops = {
@@ -1566,6 +1664,7 @@ static struct isp_operations qla2300_isp_ops = {
.get_flash_version = qla2x00_get_flash_version,
.start_scsi = qla2x00_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla24xx_isp_ops = {
@@ -1602,6 +1701,7 @@ static struct isp_operations qla24xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla25xx_isp_ops = {
@@ -1638,6 +1738,7 @@ static struct isp_operations qla25xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla81xx_isp_ops = {
@@ -1674,6 +1775,7 @@ static struct isp_operations qla81xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla24xx_dif_start_scsi,
.abort_isp = qla2x00_abort_isp,
+ .iospace_config = qla2x00_iospace_config,
};
static struct isp_operations qla82xx_isp_ops = {
@@ -1710,6 +1812,7 @@ static struct isp_operations qla82xx_isp_ops = {
.get_flash_version = qla24xx_get_flash_version,
.start_scsi = qla82xx_start_scsi,
.abort_isp = qla82xx_abort_isp,
+ .iospace_config = qla82xx_iospace_config,
};
static inline void
@@ -1819,121 +1922,10 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
else
ha->flags.port0 = 0;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x000b,
- "device_type=0x%x port=%d fw_srisc_address=%p.\n",
+ "device_type=0x%x port=%d fw_srisc_address=0x%x.\n",
ha->device_type, ha->flags.port0, ha->fw_srisc_address);
}
-static int
-qla2x00_iospace_config(struct qla_hw_data *ha)
-{
- resource_size_t pio;
- uint16_t msix;
- int cpus;
-
- if (IS_QLA82XX(ha))
- return qla82xx_iospace_config(ha);
-
- if (pci_request_selected_regions(ha->pdev, ha->bars,
- QLA2XXX_DRIVER_NAME)) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0011,
- "Failed to reserve PIO/MMIO regions (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
- if (!(ha->bars & 1))
- goto skip_pio;
-
- /* We only need PIO for Flash operations on ISP2312 v2 chips. */
- pio = pci_resource_start(ha->pdev, 0);
- if (pci_resource_flags(ha->pdev, 0) & IORESOURCE_IO) {
- if (pci_resource_len(ha->pdev, 0) < MIN_IOBASE_LEN) {
- ql_log_pci(ql_log_warn, ha->pdev, 0x0012,
- "Invalid pci I/O region size (%s).\n",
- pci_name(ha->pdev));
- pio = 0;
- }
- } else {
- ql_log_pci(ql_log_warn, ha->pdev, 0x0013,
- "Region #0 no a PIO resource (%s).\n",
- pci_name(ha->pdev));
- pio = 0;
- }
- ha->pio_address = pio;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0014,
- "PIO address=%p.\n",
- ha->pio_address);
-
-skip_pio:
- /* Use MMIO operations for all accesses. */
- if (!(pci_resource_flags(ha->pdev, 1) & IORESOURCE_MEM)) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0015,
- "Region #1 not an MMIO resource (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
- if (pci_resource_len(ha->pdev, 1) < MIN_IOBASE_LEN) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0016,
- "Invalid PCI mem region size (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
-
- ha->iobase = ioremap(pci_resource_start(ha->pdev, 1), MIN_IOBASE_LEN);
- if (!ha->iobase) {
- ql_log_pci(ql_log_fatal, ha->pdev, 0x0017,
- "Cannot remap MMIO (%s), aborting.\n",
- pci_name(ha->pdev));
- goto iospace_error_exit;
- }
-
- /* Determine queue resources */
- ha->max_req_queues = ha->max_rsp_queues = 1;
- if ((ql2xmaxqueues <= 1 && !ql2xmultique_tag) ||
- (ql2xmaxqueues > 1 && ql2xmultique_tag) ||
- (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
- goto mqiobase_exit;
-
- ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
- pci_resource_len(ha->pdev, 3));
- if (ha->mqiobase) {
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0018,
- "MQIO Base=%p.\n", ha->mqiobase);
- /* Read MSIX vector size of the board */
- pci_read_config_word(ha->pdev, QLA_PCI_MSIX_CONTROL, &msix);
- ha->msix_count = msix;
- /* Max queues are bounded by available msix vectors */
- /* queue 0 uses two msix vectors */
- if (ql2xmultique_tag) {
- cpus = num_online_cpus();
- ha->max_rsp_queues = (ha->msix_count - 1 > cpus) ?
- (cpus + 1) : (ha->msix_count - 1);
- ha->max_req_queues = 2;
- } else if (ql2xmaxqueues > 1) {
- ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
- QLA_MQ_SIZE : ql2xmaxqueues;
- ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc008,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0019,
- "QoS mode set, max no of request queues:%d.\n",
- ha->max_req_queues);
- }
- ql_log_pci(ql_log_info, ha->pdev, 0x001a,
- "MSI-X vector count: %d.\n", msix);
- } else
- ql_log_pci(ql_log_info, ha->pdev, 0x001b,
- "BAR 3 not enabled.\n");
-
-mqiobase_exit:
- ha->msix_count = ha->max_rsp_queues + 1;
- ql_dbg_pci(ql_dbg_init, ha->pdev, 0x001c,
- "MSIX Count:%d.\n", ha->msix_count);
- return (0);
-
-iospace_error_exit:
- return (-ENOMEM);
-}
-
static void
qla2xxx_scan_start(struct Scsi_Host *shost)
{
@@ -2032,14 +2024,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
pdev->needs_freset = 1;
}
- /* Configure PCI I/O space */
- ret = qla2x00_iospace_config(ha);
- if (ret)
- goto probe_hw_failed;
-
- ql_log_pci(ql_log_info, pdev, 0x001d,
- "Found an ISP%04X irq %d iobase 0x%p.\n",
- pdev->device, pdev->irq, ha->iobase);
ha->prev_topology = 0;
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
@@ -2060,7 +2044,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->nvram_data_off = ~0;
ha->isp_ops = &qla2100_isp_ops;
} else if (IS_QLA2200(ha)) {
- ha->mbx_count = MAILBOX_REGISTER_COUNT;
+ ha->mbx_count = MAILBOX_REGISTER_COUNT_2200;
req_length = REQUEST_ENTRY_CNT_2200;
rsp_length = RESPONSE_ENTRY_CNT_2100;
ha->max_loop_id = SNS_LAST_LOOP_ID_2100;
@@ -2152,6 +2136,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"flash_data_off=%d, nvram_conf_off=%d, nvram_data_off=%d.\n",
ha->isp_ops, ha->flash_conf_off, ha->flash_data_off,
ha->nvram_conf_off, ha->nvram_data_off);
+
+ /* Configure PCI I/O space */
+ ret = ha->isp_ops->iospace_config(ha);
+ if (ret)
+ goto probe_hw_failed;
+
+ ql_log_pci(ql_log_info, pdev, 0x001d,
+ "Found an ISP%04X irq %d iobase 0x%p.\n",
+ pdev->device, pdev->irq, ha->iobase);
mutex_init(&ha->vport_lock);
init_completion(&ha->mbx_cmd_comp);
complete(&ha->mbx_cmd_comp);
@@ -2227,7 +2220,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ql_dbg(ql_dbg_init, base_vha, 0x0033,
"max_id=%d this_id=%d "
"cmd_per_len=%d unique_id=%d max_cmd_len=%d max_channel=%d "
- "max_lun=%d transportt=%p, vendor_id=%d.\n", host->max_id,
+ "max_lun=%d transportt=%p, vendor_id=%llu.\n", host->max_id,
host->this_id, host->cmd_per_lun, host->unique_id,
host->max_cmd_len, host->max_channel, host->max_lun,
host->transportt, sht->vendor_id);
@@ -2382,9 +2375,6 @@ skip_dpc:
qla2x00_dfs_setup(base_vha);
- ql_log(ql_log_info, base_vha, 0x00fa,
- "QLogic Fibre Channed HBA Driver: %s.\n",
- qla2x00_version_str);
ql_log(ql_log_info, base_vha, 0x00fb,
"QLogic %s - %s.\n",
ha->model_number, ha->model_desc ? ha->model_desc : "");
@@ -2833,7 +2823,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
if (!ha->sns_cmd)
goto fail_dma_pool;
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0026,
- "sns_cmd.\n", ha->sns_cmd);
+ "sns_cmd: %p.\n", ha->sns_cmd);
} else {
/* Get consistent memory allocated for MS IOCB */
ha->ms_iocb = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
@@ -3460,27 +3450,21 @@ qla2x00_do_dpc(void *data)
schedule();
__set_current_state(TASK_RUNNING);
- ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
- "DPC handler waking up.\n");
- ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
- "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
-
- /* Initialization not yet finished. Don't do anything yet. */
- if (!base_vha->flags.init_done)
- continue;
+ if (!base_vha->flags.init_done || ha->flags.mbox_busy)
+ goto end_loop;
if (ha->flags.eeh_busy) {
ql_dbg(ql_dbg_dpc, base_vha, 0x4003,
"eeh_busy=%d.\n", ha->flags.eeh_busy);
- continue;
+ goto end_loop;
}
ha->dpc_active = 1;
- if (ha->flags.mbox_busy) {
- ha->dpc_active = 0;
- continue;
- }
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4001,
+ "DPC handler waking up.\n");
+ ql_dbg(ql_dbg_dpc, base_vha, 0x4002,
+ "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
qla2x00_do_work(base_vha);
@@ -3622,6 +3606,7 @@ qla2x00_do_dpc(void *data)
qla2x00_do_dpc_all_vps(base_vha);
ha->dpc_active = 0;
+end_loop:
set_current_state(TASK_INTERRUPTIBLE);
} /* End of while(1) */
__set_current_state(TASK_RUNNING);
@@ -3705,16 +3690,6 @@ qla2x00_sp_free_dma(srb_t *sp)
sp->flags &= ~SRB_CRC_CTX_DMA_VALID;
}
- CMD_SP(cmd) = NULL;
-}
-
-static void
-qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
-{
- struct scsi_cmnd *cmd = sp->cmd;
-
- qla2x00_sp_free_dma(sp);
-
if (sp->flags & SRB_FCP_CMND_DMA_VALID) {
struct ct6_dsd *ctx = sp->ctx;
dma_pool_free(ha->fcp_cmnd_dma_pool, ctx->fcp_cmnd,
@@ -3726,6 +3701,15 @@ qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
sp->ctx = NULL;
}
+ CMD_SP(cmd) = NULL;
+}
+
+static void
+qla2x00_sp_final_compl(struct qla_hw_data *ha, srb_t *sp)
+{
+ struct scsi_cmnd *cmd = sp->cmd;
+
+ qla2x00_sp_free_dma(sp);
mempool_free(sp, ha->srb_mempool);
cmd->scsi_done(cmd);
}
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index eff1356..16bc728 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -904,8 +904,9 @@ no_flash_data:
}
done:
ql_dbg(ql_dbg_init, vha, 0x004d,
- "FDT[%x]: (0x%x/0x%x) erase=0x%x "
- "pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
+ "FDT[%s]: (0x%x/0x%x) erase=0x%x "
+ "pr=%x wrtd=0x%x blk=0x%x.\n",
+ loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
ha->fdt_wrt_disable, ha->fdt_block_size);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 23f33a6..29d780c 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.03.07.12-k"
+#define QLA2XXX_VERSION "8.03.07.13-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 3
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index af62c3c..8d58ae2 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -20,12 +20,12 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
printk("------------------------------------------------------------"
"--\n");
for (cnt = 0; cnt < size; c++) {
- printk(KERN_INFO "%02x", *c);
+ printk("%02x", *c);
if (!(++cnt % 16))
- printk(KERN_INFO "\n");
+ printk("\n");
else
- printk(KERN_INFO " ");
+ printk(" ");
}
printk(KERN_INFO "\n");
}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index fd5edc6..bfe6854 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -150,6 +150,8 @@
#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
/* recovery timeout */
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+#define LSW(x) ((uint16_t)(x))
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -177,6 +179,7 @@
#define LOGIN_TOV 12
#define MAX_RESET_HA_RETRIES 2
+#define FW_ALIVE_WAIT_TOV 3
#define CMD_SP(Cmnd) ((Cmnd)->SCp.ptr)
@@ -670,6 +673,7 @@ struct scsi_qla_host {
uint16_t pri_ddb_idx;
uint16_t sec_ddb_idx;
int is_reset;
+ uint16_t temperature;
};
struct ql4_task_data {
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 4ac07f8..7825c14 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -752,7 +752,7 @@ struct dev_db_entry {
uint8_t res4[0x36]; /* 8A-BF */
uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
* pointer to a string so we
- * don't have to reserve soooo
+ * don't have to reserve so
* much RAM */
uint8_t link_local_ipv6_addr[0x10]; /* 1A0-1AF */
uint8_t res5[0x10]; /* 1B0-1BF */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1bdfa81..90614f3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
+ writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+ &ha->reg->ctrl_status);
+ readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Get firmware "
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 827e930..9582886 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -123,13 +123,13 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
if (!srb) {
- DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
- "handle 0x%x, sp=%p. This cmd may have already "
- "been completed.\n", ha->host_no, __func__,
- le32_to_cpu(sts_entry->handle), srb));
- ql4_printk(KERN_WARNING, ha, "%s invalid status entry:"
- " handle=0x%0x\n", __func__, sts_entry->handle);
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
+ "handle=0x%0x, srb=%p\n", __func__,
+ sts_entry->handle, srb);
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
return;
}
@@ -563,7 +563,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_DHCP_LEASE_EXPIRED:
DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
"Reset HA\n", ha->host_no, mbox_status));
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_LINK_UP:
@@ -617,9 +621,13 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
(mbox_sts[2] == ACB_STATE_ACQUIRING)))
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
- (mbox_sts[2] == ACB_STATE_VALID))
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
- else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
+ (mbox_sts[2] == ACB_STATE_VALID)) {
+ if (is_qla8022(ha))
+ set_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ else
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ } else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
complete(&ha->disable_acb_comp);
break;
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index c259378..e1e66a4 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ if (is_qla8022(ha)) {
+ ql4_printk(KERN_INFO, ha,
+ "disabling pause transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ }
goto mbox_exit;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index f484ff4..65253df 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -10,6 +10,8 @@
#include "ql4_def.h"
#include "ql4_glbl.h"
+#include <asm-generic/io-64-nonatomic-lo-hi.h>
+
#define MASK(n) DMA_BIT_MASK(n)
#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
@@ -655,27 +657,6 @@ static int qla4_8xxx_pci_is_same_window(struct scsi_qla_host *ha,
return 0;
}
-#ifndef readq
-static inline __u64 readq(const volatile void __iomem *addr)
-{
- const volatile u32 __iomem *p = addr;
- u32 low, high;
-
- low = readl(p);
- high = readl(p + 1);
-
- return low + ((u64)high << 32);
-}
-#endif
-
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
-{
- writel(val, addr);
- writel(val >> 32, addr+4);
-}
-#endif
-
static int qla4_8xxx_pci_mem_read_direct(struct scsi_qla_host *ha,
u64 off, void *data, int size)
{
@@ -1792,8 +1773,11 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
int rval = QLA_SUCCESS;
unsigned long dev_init_timeout;
- if (!test_bit(AF_INIT_DONE, &ha->flags))
+ if (!test_bit(AF_INIT_DONE, &ha->flags)) {
+ qla4_8xxx_idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
+ qla4_8xxx_idc_unlock(ha);
+ }
dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state,
@@ -1802,8 +1786,8 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
/* wait for 30 seconds for device to go ready */
dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
+ qla4_8xxx_idc_lock(ha);
while (1) {
- qla4_8xxx_idc_lock(ha);
if (time_after_eq(jiffies, dev_init_timeout)) {
ql4_printk(KERN_WARNING, ha, "Device init failed!\n");
@@ -1819,15 +1803,14 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
/* NOTE: Make sure idc unlocked upon exit of switch statement */
switch (dev_state) {
case QLA82XX_DEV_READY:
- qla4_8xxx_idc_unlock(ha);
goto exit;
case QLA82XX_DEV_COLD:
rval = qla4_8xxx_device_bootstrap(ha);
- qla4_8xxx_idc_unlock(ha);
goto exit;
case QLA82XX_DEV_INITIALIZING:
qla4_8xxx_idc_unlock(ha);
msleep(1000);
+ qla4_8xxx_idc_lock(ha);
break;
case QLA82XX_DEV_NEED_RESET:
if (!ql4xdontresethba) {
@@ -1836,38 +1819,48 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
* reset handler */
dev_init_timeout = jiffies +
(ha->nx_dev_init_timeout * HZ);
+ } else {
+ qla4_8xxx_idc_unlock(ha);
+ msleep(1000);
+ qla4_8xxx_idc_lock(ha);
}
- qla4_8xxx_idc_unlock(ha);
break;
case QLA82XX_DEV_NEED_QUIESCENT:
- qla4_8xxx_idc_unlock(ha);
/* idc locked/unlocked in handler */
qla4_8xxx_need_qsnt_handler(ha);
- qla4_8xxx_idc_lock(ha);
- /* fall thru needs idc_locked */
+ break;
case QLA82XX_DEV_QUIESCENT:
qla4_8xxx_idc_unlock(ha);
msleep(1000);
+ qla4_8xxx_idc_lock(ha);
break;
case QLA82XX_DEV_FAILED:
qla4_8xxx_idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
+ qla4_8xxx_idc_lock(ha);
goto exit;
default:
qla4_8xxx_idc_unlock(ha);
qla4xxx_dead_adapter_cleanup(ha);
rval = QLA_ERROR;
+ qla4_8xxx_idc_lock(ha);
goto exit;
}
}
exit:
+ qla4_8xxx_idc_unlock(ha);
return rval;
}
int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
{
int retval;
+
+ /* clear the interrupt */
+ writel(0, &ha->qla4_8xxx_reg->host_int);
+ readl(&ha->qla4_8xxx_reg->host_int);
+
retval = qla4_8xxx_device_state_handler(ha);
if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 35376a1..dc45ac9 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -19,12 +19,28 @@
#define PHAN_PEG_RCV_INITIALIZED 0xff01
/*CRB_RELATED*/
-#define QLA82XX_CRB_BASE QLA82XX_CAM_RAM(0x200)
-#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
-
+#define QLA82XX_CRB_BASE (QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X) (QLA82XX_CRB_BASE+(X))
#define CRB_CMDPEG_STATE QLA82XX_REG(0x50)
#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
+
+#define qla82xx_get_temp_val(x) ((x) >> 16)
+#define qla82xx_get_temp_state(x) ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
#define QLA82XX_HW_H1_CH_HUB_ADR 0x0E
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 4169c8b..ce6d3b7 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
int ql4xdisablesysfsboot = 1;
module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdisablesysfsboot,
- "Set to disable exporting boot targets to sysfs\n"
- " 0 - Export boot targets\n"
- " 1 - Do not export boot targets (Default)");
+ " Set to disable exporting boot targets to sysfs.\n"
+ "\t\t 0 - Export boot targets\n"
+ "\t\t 1 - Do not export boot targets (Default)");
int ql4xdontresethba = 0;
module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xdontresethba,
- "Don't reset the HBA for driver recovery \n"
- " 0 - It will reset HBA (Default)\n"
- " 1 - It will NOT reset HBA");
+ " Don't reset the HBA for driver recovery.\n"
+ "\t\t 0 - It will reset HBA (Default)\n"
+ "\t\t 1 - It will NOT reset HBA");
-int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
+int ql4xextended_error_logging;
module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xextended_error_logging,
- "Option to enable extended error logging, "
- "Default is 0 - no logging, 1 - debug logging");
+ " Option to enable extended error logging.\n"
+ "\t\t 0 - no logging (Default)\n"
+ "\t\t 2 - debug logging");
int ql4xenablemsix = 1;
module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql4xenablemsix,
- "Set to enable MSI or MSI-X interrupt mechanism.\n"
- " 0 = enable INTx interrupt mechanism.\n"
- " 1 = enable MSI-X interrupt mechanism (Default).\n"
- " 2 = enable MSI interrupt mechanism.");
+ " Set to enable MSI or MSI-X interrupt mechanism.\n"
+ "\t\t 0 = enable INTx interrupt mechanism.\n"
+ "\t\t 1 = enable MSI-X interrupt mechanism (Default).\n"
+ "\t\t 2 = enable MSI interrupt mechanism.");
#define QL4_DEF_QDEPTH 32
static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql4xmaxqdepth,
- "Maximum queue depth to report for target devices.\n"
- " Default: 32.");
+ " Maximum queue depth to report for target devices.\n"
+ "\t\t Default: 32.");
static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
MODULE_PARM_DESC(ql4xsess_recovery_tmo,
"Target Session Recovery Timeout.\n"
- " Default: 120 sec.");
+ "\t\t Default: 120 sec.");
static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
/*
@@ -128,7 +129,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd);
static int qla4xxx_slave_alloc(struct scsi_device *device);
static int qla4xxx_slave_configure(struct scsi_device *device);
static void qla4xxx_slave_destroy(struct scsi_device *sdev);
-static mode_t ql4_attr_is_visible(int param_type, int param);
+static umode_t ql4_attr_is_visible(int param_type, int param);
static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
@@ -197,7 +198,7 @@ static struct iscsi_transport qla4xxx_iscsi_transport = {
static struct scsi_transport_template *qla4xxx_scsi_transport;
-static mode_t ql4_attr_is_visible(int param_type, int param)
+static umode_t ql4_attr_is_visible(int param_type, int param)
{
switch (param_type) {
case ISCSI_HOST_PARAM:
@@ -935,7 +936,16 @@ qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len)
goto exit_init_fw_cb;
}
- qla4xxx_disable_acb(ha);
+ rval = qla4xxx_disable_acb(ha);
+ if (rval != QLA_SUCCESS) {
+ ql4_printk(KERN_ERR, ha, "%s: disable acb mbx failed\n",
+ __func__);
+ rval = -EIO;
+ goto exit_init_fw_cb;
+ }
+
+ wait_for_completion_timeout(&ha->disable_acb_comp,
+ DISABLE_ACB_TOV * HZ);
qla4xxx_initcb_to_acb(init_fw_cb);
@@ -1621,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
/* Update timers after login */
ddb_entry->default_relogin_timeout =
- le16_to_cpu(fw_ddb_entry->def_timeout);
+ (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+ (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+ le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
@@ -1961,14 +1973,51 @@ mem_alloc_error_exit:
}
/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+ uint32_t temp, temp_state, temp_val;
+ int status = QLA_SUCCESS;
+
+ temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+
+ temp_state = qla82xx_get_temp_state(temp);
+ temp_val = qla82xx_get_temp_val(temp);
+
+ if (temp_state == QLA82XX_TEMP_PANIC) {
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+ " exceeds maximum allowed. Hardware has been shut"
+ " down.\n", temp_val);
+ status = QLA_ERROR;
+ } else if (temp_state == QLA82XX_TEMP_WARN) {
+ if (ha->temperature == QLA82XX_TEMP_NORMAL)
+ ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+ " degrees C exceeds operating range."
+ " Immediate action needed.\n", temp_val);
+ } else {
+ if (ha->temperature == QLA82XX_TEMP_WARN)
+ ql4_printk(KERN_INFO, ha, "Device temperature is"
+ " now %d degrees C in normal range.\n",
+ temp_val);
+ }
+ ha->temperature = temp_state;
+ return status;
+}
+
+/**
* qla4_8xxx_check_fw_alive - Check firmware health
* @ha: Pointer to host adapter structure.
*
* Context: Interrupt
**/
-static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
+static int qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
{
- uint32_t fw_heartbeat_counter, halt_status;
+ uint32_t fw_heartbeat_counter;
+ int status = QLA_SUCCESS;
fw_heartbeat_counter = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
/* If PEG_ALIVE_COUNTER is 0xffffffff, AER/EEH is in progress, ignore */
@@ -1976,7 +2025,7 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Device in frozen "
"state, QLA82XX_PEG_ALIVE_COUNTER is 0xffffffff\n",
ha->host_no, __func__));
- return;
+ return status;
}
if (ha->fw_heartbeat_counter == fw_heartbeat_counter) {
@@ -1984,8 +2033,6 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
/* FW not alive after 2 seconds */
if (ha->seconds_since_last_heartbeat == 2) {
ha->seconds_since_last_heartbeat = 0;
- halt_status = qla4_8xxx_rd_32(ha,
- QLA82XX_PEG_HALT_STATUS1);
ql4_printk(KERN_INFO, ha,
"scsi(%ld): %s, Dumping hw/fw registers:\n "
@@ -1993,7 +2040,9 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
" 0x%x,\n PEG_NET_0_PC: 0x%x, PEG_NET_1_PC:"
" 0x%x,\n PEG_NET_2_PC: 0x%x, PEG_NET_3_PC:"
" 0x%x,\n PEG_NET_4_PC: 0x%x\n",
- ha->host_no, __func__, halt_status,
+ ha->host_no, __func__,
+ qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1),
qla4_8xxx_rd_32(ha,
QLA82XX_PEG_HALT_STATUS2),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 +
@@ -2006,24 +2055,13 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
0x3c),
qla4_8xxx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 +
0x3c));
-
- /* Since we cannot change dev_state in interrupt
- * context, set appropriate DPC flag then wakeup
- * DPC */
- if (halt_status & HALT_STATUS_UNRECOVERABLE)
- set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
- else {
- printk("scsi%ld: %s: detect abort needed!\n",
- ha->host_no, __func__);
- set_bit(DPC_RESET_HA, &ha->dpc_flags);
- }
- qla4xxx_wake_dpc(ha);
- qla4xxx_mailbox_premature_completion(ha);
+ status = QLA_ERROR;
}
} else
ha->seconds_since_last_heartbeat = 0;
ha->fw_heartbeat_counter = fw_heartbeat_counter;
+ return status;
}
/**
@@ -2034,22 +2072,29 @@ static void qla4_8xxx_check_fw_alive(struct scsi_qla_host *ha)
**/
void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
{
- uint32_t dev_state;
-
- dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ uint32_t dev_state, halt_status;
/* don't poll if reset is going on */
if (!(test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) ||
test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
- if (dev_state == QLA82XX_DEV_NEED_RESET &&
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+
+ if (qla4_8xxx_check_temp(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
!test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
if (!ql4xdontresethba) {
ql4_printk(KERN_INFO, ha, "%s: HW State: "
"NEED RESET!\n", __func__);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
qla4xxx_wake_dpc(ha);
- qla4xxx_mailbox_premature_completion(ha);
}
} else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
!test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags)) {
@@ -2059,12 +2104,41 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
qla4xxx_wake_dpc(ha);
} else {
/* Check firmware health */
- qla4_8xxx_check_fw_alive(ha);
+ if (qla4_8xxx_check_fw_alive(ha)) {
+ ql4_printk(KERN_INFO, ha, "disabling pause"
+ " transmit on port 0 & 1.\n");
+ qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+ CRB_NIU_XG_PAUSE_CTL_P0 |
+ CRB_NIU_XG_PAUSE_CTL_P1);
+ halt_status = qla4_8xxx_rd_32(ha,
+ QLA82XX_PEG_HALT_STATUS1);
+
+ if (LSW(MSB(halt_status)) == 0x67)
+ ql4_printk(KERN_ERR, ha, "%s:"
+ " Firmware aborted with"
+ " error code 0x00006700."
+ " Device is being reset\n",
+ __func__);
+
+ /* Since we cannot change dev_state in interrupt
+ * context, set appropriate DPC flag then wakeup
+ * DPC */
+ if (halt_status & HALT_STATUS_UNRECOVERABLE)
+ set_bit(DPC_HA_UNRECOVERABLE,
+ &ha->dpc_flags);
+ else {
+ ql4_printk(KERN_INFO, ha, "%s: detect "
+ "abort needed!\n", __func__);
+ set_bit(DPC_RESET_HA, &ha->dpc_flags);
+ }
+ qla4xxx_mailbox_premature_completion(ha);
+ qla4xxx_wake_dpc(ha);
+ }
}
}
}
-void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
{
struct iscsi_session *sess;
struct ddb_entry *ddb_entry;
@@ -2414,6 +2488,8 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
{
int status = QLA_ERROR;
uint8_t reset_chip = 0;
+ uint32_t dev_state;
+ unsigned long wait;
/* Stall incoming I/O until we are done */
scsi_block_requests(ha->host);
@@ -2464,8 +2540,29 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
* or if stop_firmware fails for ISP-82xx.
* This is the default case for ISP-4xxx */
if (!is_qla8022(ha) || reset_chip) {
+ if (!is_qla8022(ha))
+ goto chip_reset;
+
+ /* Check if 82XX firmware is alive or not
+ * We may have arrived here from NEED_RESET
+ * detection only */
+ if (test_bit(AF_FW_RECOVERY, &ha->flags))
+ goto chip_reset;
+
+ wait = jiffies + (FW_ALIVE_WAIT_TOV * HZ);
+ while (time_before(jiffies, wait)) {
+ if (qla4_8xxx_check_fw_alive(ha)) {
+ qla4xxx_mailbox_premature_completion(ha);
+ break;
+ }
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(HZ);
+ }
+
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
+chip_reset:
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -2501,6 +2598,25 @@ recover_ha_init_adapter:
* Since we don't want to block the DPC for too long
* with multiple resets in the same thread,
* utilize DPC to retry */
+ if (is_qla8022(ha)) {
+ qla4_8xxx_idc_lock(ha);
+ dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
+ qla4_8xxx_idc_unlock(ha);
+ if (dev_state == QLA82XX_DEV_FAILED) {
+ ql4_printk(KERN_INFO, ha, "%s: don't retry "
+ "recover adapter. H/W is in Failed "
+ "state\n", __func__);
+ qla4xxx_dead_adapter_cleanup(ha);
+ clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA, &ha->dpc_flags);
+ clear_bit(DPC_RESET_HA_FW_CONTEXT,
+ &ha->dpc_flags);
+ status = QLA_ERROR;
+
+ goto exit_recover;
+ }
+ }
+
if (!test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags)) {
ha->retry_reset_ha_cnt = MAX_RESET_HA_RETRIES;
DEBUG2(printk("scsi%ld: recover adapter - retrying "
@@ -2539,6 +2655,7 @@ recover_ha_init_adapter:
clear_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags);
}
+exit_recover:
ha->adapter_error_count++;
if (test_bit(AF_ONLINE, &ha->flags))
@@ -2806,6 +2923,7 @@ dpc_post_reset_ha:
**/
static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
{
+ qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
/* Turn-off interrupts on the card. */
@@ -3039,7 +3157,7 @@ static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf)
return rc;
}
-static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_eth_get_attr_visibility(void *data, int type)
{
int rc;
@@ -3073,7 +3191,7 @@ static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf)
return rc;
}
-static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_ini_get_attr_visibility(void *data, int type)
{
int rc;
@@ -3160,7 +3278,7 @@ static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf)
return qla4xxx_show_boot_tgt_info(boot_sess, type, buf);
}
-static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
+static umode_t qla4xxx_tgt_get_attr_visibility(void *data, int type)
{
int rc;
@@ -3768,16 +3886,14 @@ exit_check:
return ret;
}
-static void qla4xxx_free_nt_list(struct list_head *list_nt)
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
{
- struct qla_ddb_index *nt_ddb_idx, *nt_ddb_idx_tmp;
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
- /* Free up the normaltargets list */
- list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
- list_del_init(&nt_ddb_idx->list);
- vfree(nt_ddb_idx);
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
}
-
}
static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3826,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry)
{
+ uint16_t def_timeout;
+
ddb_entry->ddb_type = FLASH_DDB;
ddb_entry->fw_ddb_index = INVALID_ENTRY;
ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3836,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
atomic_set(&ddb_entry->relogin_timer, 0);
atomic_set(&ddb_entry->relogin_retry_count, 0);
-
+ def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
ddb_entry->default_relogin_timeout =
- le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+ (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+ def_timeout : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
}
@@ -3876,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
ip_state == IP_ADDRSTATE_DEPRICATED ||
ip_state == IP_ADDRSTATE_DISABLING)
ip_idx[idx] = -1;
-
}
/* Break if all IP states checked */
@@ -3889,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
} while (time_after(wtime, jiffies));
}
-void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+ struct list_head *list_st)
{
+ struct qla_ddb_index *st_ddb_idx;
int max_ddbs;
+ int fw_idx_size;
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
int ret;
uint32_t idx = 0, next_idx = 0;
uint32_t state = 0, conn_err = 0;
- uint16_t conn_id;
- struct dev_db_entry *fw_ddb_entry;
- struct ddb_entry *ddb_entry = NULL;
- dma_addr_t fw_ddb_dma;
- struct iscsi_cls_session *cls_sess;
- struct iscsi_session *sess;
- struct iscsi_cls_conn *cls_conn;
- struct iscsi_endpoint *ep;
- uint16_t cmds_max = 32, tmo = 0;
- uint32_t initial_cmdsn = 0;
- struct list_head list_st, list_nt; /* List of sendtargets */
- struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
- int fw_idx_size;
- unsigned long wtime;
- struct qla_ddb_index *nt_ddb_idx;
-
- if (!test_bit(AF_LINK_UP, &ha->flags)) {
- set_bit(AF_BUILD_DDB_LIST, &ha->flags);
- ha->is_reset = is_reset;
- return;
- }
- max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
- MAX_DEV_DB_ENTRIES;
+ uint16_t conn_id = 0;
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
&fw_ddb_dma);
if (fw_ddb_entry == NULL) {
DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
- goto exit_ddb_list;
+ goto exit_st_list;
}
- INIT_LIST_HEAD(&list_st);
- INIT_LIST_HEAD(&list_nt);
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
fw_idx_size = sizeof(struct qla_ddb_index);
for (idx = 0; idx < max_ddbs; idx = next_idx) {
- ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
- fw_ddb_dma, NULL,
- &next_idx, &state, &conn_err,
- NULL, &conn_id);
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
- if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
- goto continue_next_st;
-
/* Check if ST, add to the list_st */
if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
goto continue_next_st;
@@ -3951,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
st_ddb_idx->fw_ddb_idx = idx;
- list_add_tail(&st_ddb_idx->list, &list_st);
+ list_add_tail(&st_ddb_idx->list, list_st);
continue_next_st:
if (next_idx == 0)
break;
}
- /* Before issuing conn open mbox, ensure all IPs states are configured
- * Note, conn open fails if IPs are not configured
+exit_st_list:
+ if (fw_ddb_entry)
+ dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+ struct list_head *list_ddb)
+{
+ struct qla_ddb_index *ddb_idx, *ddb_idx_tmp;
+ uint32_t next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ int ret;
+
+ list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+ ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+ NULL, 0, NULL, &next_idx, &state,
+ &conn_err, NULL, NULL);
+ if (ret == QLA_ERROR)
+ continue;
+
+ if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED) {
+ list_del_init(&ddb_idx->list);
+ vfree(ddb_idx);
+ }
+ }
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+ struct dev_db_entry *fw_ddb_entry,
+ int is_reset)
+{
+ struct iscsi_cls_session *cls_sess;
+ struct iscsi_session *sess;
+ struct iscsi_cls_conn *cls_conn;
+ struct iscsi_endpoint *ep;
+ uint16_t cmds_max = 32;
+ uint16_t conn_id = 0;
+ uint32_t initial_cmdsn = 0;
+ int ret = QLA_SUCCESS;
+
+ struct ddb_entry *ddb_entry = NULL;
+
+ /* Create session object, with INVALID_ENTRY,
+ * the targer_id would get set when we issue the login
*/
- qla4xxx_wait_for_ip_configuration(ha);
+ cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+ cmds_max, sizeof(struct ddb_entry),
+ sizeof(struct ql4_task_data),
+ initial_cmdsn, INVALID_ENTRY);
+ if (!cls_sess) {
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
- /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
- qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ /*
+ * so calling module_put function to decrement the
+ * reference count.
+ **/
+ module_put(qla4xxx_iscsi_transport.owner);
+ sess = cls_sess->dd_data;
+ ddb_entry = sess->dd_data;
+ ddb_entry->sess = cls_sess;
+
+ cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+ memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ sizeof(struct dev_db_entry));
+
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+ cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+ if (!cls_conn) {
+ ret = QLA_ERROR;
+ goto exit_setup;
}
- /* Wait to ensure all sendtargets are done for min 12 sec wait */
- tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Default time to wait for build ddb %d\n", tmo));
+ ddb_entry->conn = cls_conn;
- wtime = jiffies + (HZ * tmo);
- do {
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
- list) {
- ret = qla4xxx_get_fwddb_entry(ha,
- st_ddb_idx->fw_ddb_idx,
- NULL, 0, NULL, &next_idx,
- &state, &conn_err, NULL,
- NULL);
- if (ret == QLA_ERROR)
- continue;
+ /* Setup ep, for displaying attributes in sysfs */
+ ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+ if (ep) {
+ ep->conn = cls_conn;
+ cls_conn->ep = ep;
+ } else {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+ ret = QLA_ERROR;
+ goto exit_setup;
+ }
- if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED) {
- list_del_init(&st_ddb_idx->list);
- vfree(st_ddb_idx);
- }
- }
- schedule_timeout_uninterruptible(HZ / 10);
- } while (time_after(wtime, jiffies));
+ /* Update sess/conn params */
+ qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
- /* Free up the sendtargets list */
- list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
- list_del_init(&st_ddb_idx->list);
- vfree(st_ddb_idx);
+ if (is_reset == RESET_ADAPTER) {
+ iscsi_block_session(cls_sess);
+ /* Use the relogin path to discover new devices
+ * by short-circuting the logic of setting
+ * timer to relogin - instead set the flags
+ * to initiate login right away.
+ */
+ set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+ set_bit(DF_RELOGIN, &ddb_entry->flags);
}
+exit_setup:
+ return ret;
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+ struct list_head *list_nt, int is_reset)
+{
+ struct dev_db_entry *fw_ddb_entry;
+ dma_addr_t fw_ddb_dma;
+ int max_ddbs;
+ int fw_idx_size;
+ int ret;
+ uint32_t idx = 0, next_idx = 0;
+ uint32_t state = 0, conn_err = 0;
+ uint16_t conn_id = 0;
+ struct qla_ddb_index *nt_ddb_idx;
+
+ fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+ &fw_ddb_dma);
+ if (fw_ddb_entry == NULL) {
+ DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+ goto exit_nt_list;
+ }
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+ fw_idx_size = sizeof(struct qla_ddb_index);
+
for (idx = 0; idx < max_ddbs; idx = next_idx) {
- ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
- fw_ddb_dma, NULL,
- &next_idx, &state, &conn_err,
- NULL, &conn_id);
+ ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+ NULL, &next_idx, &state,
+ &conn_err, NULL, &conn_id);
if (ret == QLA_ERROR)
break;
@@ -4014,107 +4207,113 @@ continue_next_st:
if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
goto continue_next_nt;
- if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
- state == DDB_DS_SESSION_FAILED) {
- DEBUG2(ql4_printk(KERN_INFO, ha,
- "Adding DDB to session = 0x%x\n",
- idx));
- if (is_reset == INIT_ADAPTER) {
- nt_ddb_idx = vmalloc(fw_idx_size);
- if (!nt_ddb_idx)
- break;
-
- nt_ddb_idx->fw_ddb_idx = idx;
-
- memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
- sizeof(struct dev_db_entry));
-
- if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
- fw_ddb_entry) == QLA_SUCCESS) {
- vfree(nt_ddb_idx);
- goto continue_next_nt;
- }
- list_add_tail(&nt_ddb_idx->list, &list_nt);
- } else if (is_reset == RESET_ADAPTER) {
- if (qla4xxx_is_session_exists(ha,
- fw_ddb_entry) == QLA_SUCCESS)
- goto continue_next_nt;
- }
+ if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+ state == DDB_DS_SESSION_FAILED))
+ goto continue_next_nt;
- /* Create session object, with INVALID_ENTRY,
- * the targer_id would get set when we issue the login
- */
- cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
- ha->host, cmds_max,
- sizeof(struct ddb_entry),
- sizeof(struct ql4_task_data),
- initial_cmdsn, INVALID_ENTRY);
- if (!cls_sess)
- goto exit_ddb_list;
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Adding DDB to session = 0x%x\n", idx));
+ if (is_reset == INIT_ADAPTER) {
+ nt_ddb_idx = vmalloc(fw_idx_size);
+ if (!nt_ddb_idx)
+ break;
- /*
- * iscsi_session_setup increments the driver reference
- * count which wouldn't let the driver to be unloaded.
- * so calling module_put function to decrement the
- * reference count.
- **/
- module_put(qla4xxx_iscsi_transport.owner);
- sess = cls_sess->dd_data;
- ddb_entry = sess->dd_data;
- ddb_entry->sess = cls_sess;
+ nt_ddb_idx->fw_ddb_idx = idx;
- cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
- memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+ memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
sizeof(struct dev_db_entry));
- qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
-
- cls_conn = iscsi_conn_setup(cls_sess,
- sizeof(struct qla_conn),
- conn_id);
- if (!cls_conn)
- goto exit_ddb_list;
-
- ddb_entry->conn = cls_conn;
-
- /* Setup ep, for displaying attributes in sysfs */
- ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
- if (ep) {
- ep->conn = cls_conn;
- cls_conn->ep = ep;
- } else {
- DEBUG2(ql4_printk(KERN_ERR, ha,
- "Unable to get ep\n"));
- }
-
- /* Update sess/conn params */
- qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
- cls_conn);
-
- if (is_reset == RESET_ADAPTER) {
- iscsi_block_session(cls_sess);
- /* Use the relogin path to discover new devices
- * by short-circuting the logic of setting
- * timer to relogin - instead set the flags
- * to initiate login right away.
- */
- set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
- set_bit(DF_RELOGIN, &ddb_entry->flags);
+ if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
+ fw_ddb_entry) == QLA_SUCCESS) {
+ vfree(nt_ddb_idx);
+ goto continue_next_nt;
}
+ list_add_tail(&nt_ddb_idx->list, list_nt);
+ } else if (is_reset == RESET_ADAPTER) {
+ if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
+ QLA_SUCCESS)
+ goto continue_next_nt;
}
+
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+ if (ret == QLA_ERROR)
+ goto exit_nt_list;
+
continue_next_nt:
if (next_idx == 0)
break;
}
-exit_ddb_list:
- qla4xxx_free_nt_list(&list_nt);
+
+exit_nt_list:
if (fw_ddb_entry)
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+ uint16_t tmo = 0;
+ struct list_head list_st, list_nt;
+ struct qla_ddb_index *st_ddb_idx, *st_ddb_idx_tmp;
+ unsigned long wtime;
+
+ if (!test_bit(AF_LINK_UP, &ha->flags)) {
+ set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+ ha->is_reset = is_reset;
+ return;
+ }
+
+ INIT_LIST_HEAD(&list_st);
+ INIT_LIST_HEAD(&list_nt);
+
+ qla4xxx_build_st_list(ha, &list_st);
+
+ /* Before issuing conn open mbox, ensure all IPs states are configured
+ * Note, conn open fails if IPs are not configured
+ */
+ qla4xxx_wait_for_ip_configuration(ha);
+
+ /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+ list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+ qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+ }
+
+ /* Wait to ensure all sendtargets are done for min 12 sec wait */
+ tmo = ((ha->def_timeout > LOGIN_TOV) &&
+ (ha->def_timeout < LOGIN_TOV * 10) ?
+ ha->def_timeout : LOGIN_TOV);
+
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "Default time to wait for build ddb %d\n", tmo));
+
+ wtime = jiffies + (HZ * tmo);
+ do {
+ if (list_empty(&list_st))
+ break;
+
+ qla4xxx_remove_failed_ddb(ha, &list_st);
+ schedule_timeout_uninterruptible(HZ / 10);
+ } while (time_after(wtime, jiffies));
+
+ /* Free up the sendtargets list */
+ qla4xxx_free_ddb_list(&list_st);
+
+ qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+
+ qla4xxx_free_ddb_list(&list_nt);
qla4xxx_free_ddb_index(ha);
}
-
/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
@@ -4816,6 +5015,20 @@ static int qla4xxx_eh_target_reset(struct scsi_cmnd *cmd)
}
/**
+ * qla4xxx_is_eh_active - check if error handler is running
+ * @shost: Pointer to SCSI Host struct
+ *
+ * This routine finds that if reset host is called in EH
+ * scenario or from some application like sg_reset
+ **/
+static int qla4xxx_is_eh_active(struct Scsi_Host *shost)
+{
+ if (shost->shost_state == SHOST_RECOVERY)
+ return 1;
+ return 0;
+}
+
+/**
* qla4xxx_eh_host_reset - kernel callback
* @cmd: Pointer to Linux's SCSI command structure
*
@@ -4832,6 +5045,11 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd)
if (ql4xdontresethba) {
DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n",
ha->host_no, __func__));
+
+ /* Clear outstanding srb in queues */
+ if (qla4xxx_is_eh_active(cmd->device->host))
+ qla4xxx_abort_active_cmds(ha, DID_ABORT << 16);
+
return FAILED;
}
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 5254e57..133989b 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index dc6131e..5f84a14 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1812,7 +1812,7 @@ int scsi_error_handler(void *data)
* what we need to do to get it up and online again (if we can).
* If we fail, we end up taking the thing offline.
*/
- if (scsi_autopm_get_host(shost) != 0) {
+ if (!shost->eh_noresume && scsi_autopm_get_host(shost) != 0) {
SCSI_LOG_ERROR_RECOVERY(1,
printk(KERN_ERR "Error handler scsi_eh_%d "
"unable to autoresume\n",
@@ -1833,7 +1833,8 @@ int scsi_error_handler(void *data)
* which are still online.
*/
scsi_restart_operations(shost);
- scsi_autopm_put_host(shost);
+ if (!shost->eh_noresume)
+ scsi_autopm_put_host(shost);
set_current_state(TASK_INTERRUPTIBLE);
}
__set_current_state(TASK_RUNNING);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f85cfa6..b2c95db 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
}
if (scsi_target_is_busy(starget)) {
- if (list_empty(&sdev->starved_entry))
- list_add_tail(&sdev->starved_entry,
- &shost->starved_list);
+ list_move_tail(&sdev->starved_entry, &shost->starved_list);
return 0;
}
- /* We're OK to process the command, so we can't be starved */
- if (!list_empty(&sdev->starved_entry))
- list_del_init(&sdev->starved_entry);
return 1;
}
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c
index 44f76e8..c77628a 100644
--- a/drivers/scsi/scsi_netlink.c
+++ b/drivers/scsi/scsi_netlink.c
@@ -112,7 +112,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
goto next_msg;
}
- if (security_netlink_recv(skb, CAP_SYS_ADMIN)) {
+ if (!capable(CAP_SYS_ADMIN)) {
err = -EPERM;
goto next_msg;
}
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index d329f8b..c467064 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#include <linux/async.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
@@ -49,8 +50,22 @@ static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
{
int err = 0;
- if (scsi_is_sdev_device(dev))
+ if (scsi_is_sdev_device(dev)) {
+ /*
+ * sd is the only high-level SCSI driver to implement runtime
+ * PM, and sd treats runtime suspend, system suspend, and
+ * system hibernate identically (but not system freeze).
+ */
+ if (pm_runtime_suspended(dev)) {
+ if (msg.event == PM_EVENT_SUSPEND ||
+ msg.event == PM_EVENT_HIBERNATE)
+ return 0; /* already suspended */
+
+ /* wake up device so that FREEZE will succeed */
+ pm_runtime_resume(dev);
+ }
err = scsi_dev_type_suspend(dev, msg);
+ }
return err;
}
@@ -58,8 +73,17 @@ static int scsi_bus_resume_common(struct device *dev)
{
int err = 0;
- if (scsi_is_sdev_device(dev))
+ if (scsi_is_sdev_device(dev)) {
+ /*
+ * Parent device may have runtime suspended as soon as
+ * it is woken up during the system resume.
+ *
+ * Resume it on behalf of child.
+ */
+ pm_runtime_get_sync(dev->parent);
err = scsi_dev_type_resume(dev);
+ pm_runtime_put_sync(dev->parent);
+ }
if (err == 0) {
pm_runtime_disable(dev);
@@ -69,6 +93,19 @@ static int scsi_bus_resume_common(struct device *dev)
return err;
}
+static int scsi_bus_prepare(struct device *dev)
+{
+ if (scsi_is_sdev_device(dev)) {
+ /* sd probing uses async_schedule. Wait until it finishes. */
+ async_synchronize_full();
+
+ } else if (scsi_is_host_device(dev)) {
+ /* Wait until async scanning is finished */
+ scsi_complete_async_scans();
+ }
+ return 0;
+}
+
static int scsi_bus_suspend(struct device *dev)
{
return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
@@ -87,6 +124,7 @@ static int scsi_bus_poweroff(struct device *dev)
#else /* CONFIG_PM_SLEEP */
#define scsi_bus_resume_common NULL
+#define scsi_bus_prepare NULL
#define scsi_bus_suspend NULL
#define scsi_bus_freeze NULL
#define scsi_bus_poweroff NULL
@@ -195,6 +233,7 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
#endif /* CONFIG_PM_RUNTIME */
const struct dev_pm_ops scsi_bus_pm_ops = {
+ .prepare = scsi_bus_prepare,
.suspend = scsi_bus_suspend,
.resume = scsi_bus_resume_common,
.freeze = scsi_bus_freeze,
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index 2a58895..be4fa6d 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -45,7 +45,6 @@ static inline void scsi_log_completion(struct scsi_cmnd *cmd, int disposition)
enum {
SCSI_DEVINFO_GLOBAL = 0,
SCSI_DEVINFO_SPI,
- SCSI_DEVINFO_DH,
};
extern int scsi_get_device_flags(struct scsi_device *sdev,
@@ -110,6 +109,7 @@ extern void scsi_exit_procfs(void);
#endif /* CONFIG_PROC_FS */
/* scsi_scan.c */
+extern int scsi_complete_async_scans(void);
extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int,
unsigned int, unsigned int, int);
extern void scsi_forget_host(struct Scsi_Host *);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index b3c6d95..29c4c04 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
- blk_get_queue(sdev->request_queue);
+ WARN_ON_ONCE(!blk_get_queue(sdev->request_queue));
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
@@ -1815,6 +1815,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data)
}
spin_unlock(&async_scan_lock);
+ scsi_autopm_put_host(shost);
scsi_host_put(shost);
kfree(data);
}
@@ -1841,7 +1842,6 @@ static int do_scan_async(void *_data)
do_scsi_scan_host(shost);
scsi_finish_async_scan(data);
- scsi_autopm_put_host(shost);
return 0;
}
@@ -1869,7 +1869,7 @@ void scsi_scan_host(struct Scsi_Host *shost)
p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no);
if (IS_ERR(p))
do_scan_async(data);
- /* scsi_autopm_put_host(shost) is called in do_scan_async() */
+ /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */
}
EXPORT_SYMBOL(scsi_scan_host);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1b21491..f59d4a0 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
spin_lock_irqsave(shost->host_lock, flags);
rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
- FC_RPORT_DEVLOSS_PENDING);
+ FC_RPORT_DEVLOSS_PENDING |
+ FC_RPORT_DEVLOSS_CALLBK_DONE);
spin_unlock_irqrestore(shost->host_lock, flags);
/* ensure any stgt delete functions are done */
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 96029e6..cfd4914 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -328,7 +328,7 @@ iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED);
iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU);
iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT);
-static mode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_iface_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1030,6 +1030,7 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport,
return NULL;
session->transport = transport;
+ session->creator = -1;
session->recovery_tmo = 120;
session->state = ISCSI_SESSION_FREE;
INIT_DELAYED_WORK(&session->recovery_work, session_recovery_timedout);
@@ -1634,8 +1635,9 @@ EXPORT_SYMBOL_GPL(iscsi_session_event);
static int
iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
- struct iscsi_uevent *ev, uint32_t initial_cmdsn,
- uint16_t cmds_max, uint16_t queue_depth)
+ struct iscsi_uevent *ev, pid_t pid,
+ uint32_t initial_cmdsn, uint16_t cmds_max,
+ uint16_t queue_depth)
{
struct iscsi_transport *transport = priv->iscsi_transport;
struct iscsi_cls_session *session;
@@ -1646,6 +1648,7 @@ iscsi_if_create_session(struct iscsi_internal *priv, struct iscsi_endpoint *ep,
if (!session)
return -ENOMEM;
+ session->creator = pid;
shost = iscsi_session_to_shost(session);
ev->r.c_session_ret.host_no = shost->host_no;
ev->r.c_session_ret.sid = session->sid;
@@ -1938,6 +1941,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
switch (nlh->nlmsg_type) {
case ISCSI_UEVENT_CREATE_SESSION:
err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CREDS(skb)->pid,
ev->u.c_session.initial_cmdsn,
ev->u.c_session.cmds_max,
ev->u.c_session.queue_depth);
@@ -1950,6 +1954,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
}
err = iscsi_if_create_session(priv, ep, ev,
+ NETLINK_CREDS(skb)->pid,
ev->u.c_bound_session.initial_cmdsn,
ev->u.c_bound_session.cmds_max,
ev->u.c_bound_session.queue_depth);
@@ -2199,7 +2204,7 @@ static struct attribute *iscsi_conn_attrs[] = {
NULL,
};
-static mode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_conn_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2298,6 +2303,15 @@ show_priv_session_state(struct device *dev, struct device_attribute *attr,
}
static ISCSI_CLASS_ATTR(priv_sess, state, S_IRUGO, show_priv_session_state,
NULL);
+static ssize_t
+show_priv_session_creator(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->creator);
+}
+static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
+ NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -2367,10 +2381,11 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_sess_targetalias.attr,
&dev_attr_priv_sess_recovery_tmo.attr,
&dev_attr_priv_sess_state.attr,
+ &dev_attr_priv_sess_creator.attr,
NULL,
};
-static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
@@ -2424,6 +2439,8 @@ static mode_t iscsi_session_attr_is_visible(struct kobject *kobj,
return S_IRUGO | S_IWUSR;
else if (attr == &dev_attr_priv_sess_state.attr)
return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_creator.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid session attr");
return 0;
@@ -2468,7 +2485,7 @@ static struct attribute *iscsi_host_attrs[] = {
NULL,
};
-static mode_t iscsi_host_attr_is_visible(struct kobject *kobj,
+static umode_t iscsi_host_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c
index 5fbeadd..a2715c3 100644
--- a/drivers/scsi/scsi_transport_spi.c
+++ b/drivers/scsi/scsi_transport_spi.c
@@ -1434,7 +1434,7 @@ static int spi_host_configure(struct transport_container *tc,
(si->f->show_##name ? S_IRUGO : 0) | \
(si->f->set_##name ? S_IWUSR : 0)
-static mode_t target_attribute_is_visible(struct kobject *kobj,
+static umode_t target_attribute_is_visible(struct kobject *kobj,
struct attribute *attr, int i)
{
struct device *cdev = container_of(kobj, struct device, kobj);
diff --git a/drivers/scsi/scsicam.c b/drivers/scsi/scsicam.c
index 6803b1e..92d24d6 100644
--- a/drivers/scsi/scsicam.c
+++ b/drivers/scsi/scsicam.c
@@ -16,7 +16,6 @@
#include <linux/genhd.h>
#include <linux/kernel.h>
#include <linux/blkdev.h>
-#include <linux/buffer_head.h>
#include <asm/unaligned.h>
#include <scsi/scsicam.h>
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fa3a591..c691fb5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -50,6 +50,7 @@
#include <linux/string_helpers.h>
#include <linux/async.h>
#include <linux/slab.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <asm/unaligned.h>
@@ -1074,6 +1075,10 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, "
"cmd=0x%x\n", disk->disk_name, cmd));
+ error = scsi_verify_blk_ioctl(bdev, cmd);
+ if (error < 0)
+ return error;
+
/*
* If we are in the middle of error recovery, don't let anyone
* else try and use this device. Also, if error recovery fails, it
@@ -1096,7 +1101,7 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode,
error = scsi_ioctl(sdp, cmd, p);
break;
default:
- error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p);
+ error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p);
if (error != -ENOTTY)
break;
error = scsi_ioctl(sdp, cmd, p);
@@ -1266,6 +1271,11 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ int ret;
+
+ ret = scsi_verify_blk_ioctl(bdev, cmd);
+ if (ret < 0)
+ return ret;
/*
* If we are in the middle of error recovery, don't let anyone
@@ -1277,8 +1287,6 @@ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode,
return -ENODEV;
if (sdev->host->hostt->compat_ioctl) {
- int ret;
-
ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg);
return ret;
@@ -2741,6 +2749,9 @@ static void sd_shutdown(struct device *dev)
if (!sdkp)
return; /* this can happen */
+ if (pm_runtime_suspended(dev))
+ goto exit;
+
if (sdkp->WCE) {
sd_printk(KERN_NOTICE, sdkp, "Synchronizing SCSI cache\n");
sd_sync_cache(sdkp);
@@ -2751,6 +2762,7 @@ static void sd_shutdown(struct device *dev)
sd_start_stop_device(sdkp, 0);
}
+exit:
scsi_disk_put(sdkp);
}
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 0cb39ff..f8fb2d6 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -408,7 +408,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
kunmap_atomic(sdt, KM_USER0);
}
- bio->bi_flags |= BIO_MAPPED_INTEGRITY;
+ bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
}
return 0;
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 441a1c5..eacd46b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -2325,16 +2325,15 @@ static struct sg_proc_leaf sg_proc_leaf_arr[] = {
static int
sg_proc_init(void)
{
- int k, mask;
int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
- struct sg_proc_leaf * leaf;
+ int k;
sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
if (!sg_proc_sgp)
return 1;
for (k = 0; k < num_leaves; ++k) {
- leaf = &sg_proc_leaf_arr[k];
- mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
+ struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k];
+ umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
}
return 0;
@@ -2369,16 +2368,15 @@ static ssize_t
sg_proc_write_adio(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
- char buff[11];
+ int err;
+ unsigned long num;
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
+ err = kstrtoul_from_user(buffer, count, 0, &num);
+ if (err)
+ return err;
+ sg_allow_dio = num ? 1 : 0;
return count;
}
@@ -2391,17 +2389,15 @@ static ssize_t
sg_proc_write_dressz(struct file *filp, const char __user *buffer,
size_t count, loff_t *off)
{
- int num;
+ int err;
unsigned long k = ULONG_MAX;
- char buff[11];
if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
return -EACCES;
- num = (count < 10) ? count : 10;
- if (copy_from_user(buff, buffer, num))
- return -EFAULT;
- buff[num] = '\0';
- k = simple_strtoul(buff, NULL, 10);
+
+ err = kstrtoul_from_user(buffer, count, 0, &k);
+ if (err)
+ return err;
if (k <= 1048576) { /* limit "big buff" to 1 MB */
sg_big_buff = k;
return count;
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 9acc2b2..cf51432 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -51,7 +51,7 @@
#include "53c700.h"
-MODULE_AUTHOR("Thomas Bogendrfer");
+MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SNI RM 53c710 SCSI Driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:snirm_53c710");
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index b4543f5..36d1ed7 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
struct sym_lcb *lp = sym_lp(tp, sdev->lun);
unsigned long flags;
+ /* if slave_alloc returned before allocating a sym_lcb, return */
+ if (!lp)
+ return;
+
spin_lock_irqsave(np->s.host->host_lock, flags);
if (lp->busy_itlq || lp->busy_itl) {
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index a18996d..7264116 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -1144,7 +1144,7 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
*
* These are statically allocated. Trying to be clever was not worth it.
*
- * Dynamic allocation can fail, and we can't go deeep into the memory
+ * Dynamic allocation can fail, and we can't go deep into the memory
* allocator, since we're a SCSI driver, and trying too hard to allocate
* memory might generate disk I/O. We also don't want to fail disk I/O
* in that case because we can't get an allocation - the I/O could be
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 67e272a..7139ad2 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -7,11 +7,4 @@ obj-$(CONFIG_HAVE_CLK) += clk/
obj-$(CONFIG_MAPLE) += maple/
obj-$(CONFIG_SUPERHYWAY) += superhyway/
obj-$(CONFIG_GENERIC_GPIO) += pfc.o
-
-#
-# For the moment we only use this framework for ARM-based SH/R-Mobile
-# platforms and generic SH. SH-based SH-Mobile platforms are still using
-# an older framework that is pending up-porting, at which point this
-# special casing can go away.
-#
-obj-$(CONFIG_SUPERH)$(CONFIG_ARCH_SHMOBILE) += pm_runtime.o
+obj-y += pm_runtime.o
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index db257a3..7715de2 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -355,7 +355,7 @@ static int clk_establish_mapping(struct clk *clk)
*/
if (!clk->parent) {
clk->mapping = &dummy_mapping;
- return 0;
+ goto out;
}
/*
@@ -384,6 +384,9 @@ static int clk_establish_mapping(struct clk *clk)
}
clk->mapping = mapping;
+out:
+ clk->mapped_reg = clk->mapping->base;
+ clk->mapped_reg += (phys_addr_t)clk->enable_reg - clk->mapping->phys;
return 0;
}
@@ -402,10 +405,12 @@ static void clk_teardown_mapping(struct clk *clk)
/* Nothing to do */
if (mapping == &dummy_mapping)
- return;
+ goto out;
kref_put(&mapping->ref, clk_destroy_mapping);
clk->mapping = NULL;
+out:
+ clk->mapped_reg = NULL;
}
int clk_register(struct clk *clk)
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 82dd6fb..92d314a 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -15,15 +15,15 @@
static int sh_clk_mstp32_enable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
- clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) & ~(1 << clk->enable_bit),
+ clk->mapped_reg);
return 0;
}
static void sh_clk_mstp32_disable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
- clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) | (1 << clk->enable_bit),
+ clk->mapped_reg);
}
static struct clk_ops sh_clk_mstp32_clk_ops = {
@@ -72,7 +72,7 @@ static unsigned long sh_clk_div6_recalc(struct clk *clk)
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, NULL);
- idx = __raw_readl(clk->enable_reg) & 0x003f;
+ idx = ioread32(clk->mapped_reg) & 0x003f;
return clk->freq_table[idx].frequency;
}
@@ -98,10 +98,10 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
if (ret < 0)
return ret;
- value = __raw_readl(clk->enable_reg) &
+ value = ioread32(clk->mapped_reg) &
~(((1 << clk->src_width) - 1) << clk->src_shift);
- __raw_writel(value | (i << clk->src_shift), clk->enable_reg);
+ iowrite32(value | (i << clk->src_shift), clk->mapped_reg);
/* Rebuild the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -119,10 +119,10 @@ static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
if (idx < 0)
return idx;
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value &= ~0x3f;
value |= idx;
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
return 0;
}
@@ -133,9 +133,9 @@ static int sh_clk_div6_enable(struct clk *clk)
ret = sh_clk_div6_set_rate(clk, clk->rate);
if (ret == 0) {
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value &= ~0x100; /* clear stop bit to enable clock */
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
}
return ret;
}
@@ -144,10 +144,10 @@ static void sh_clk_div6_disable(struct clk *clk)
{
unsigned long value;
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value |= 0x100; /* stop clock */
value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
}
static struct clk_ops sh_clk_div6_clk_ops = {
@@ -167,6 +167,38 @@ static struct clk_ops sh_clk_div6_reparent_clk_ops = {
.set_parent = sh_clk_div6_set_parent,
};
+static int __init sh_clk_init_parent(struct clk *clk)
+{
+ u32 val;
+
+ if (clk->parent)
+ return 0;
+
+ if (!clk->parent_table || !clk->parent_num)
+ return 0;
+
+ if (!clk->src_width) {
+ pr_err("sh_clk_init_parent: cannot select parent clock\n");
+ return -EINVAL;
+ }
+
+ val = (ioread32(clk->mapped_reg) >> clk->src_shift);
+ val &= (1 << clk->src_width) - 1;
+
+ if (val >= clk->parent_num) {
+ pr_err("sh_clk_init_parent: parent table size failed\n");
+ return -EINVAL;
+ }
+
+ clk_reparent(clk, clk->parent_table[val]);
+ if (!clk->parent) {
+ pr_err("sh_clk_init_parent: unable to set parent");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
struct clk_ops *ops)
{
@@ -190,8 +222,11 @@ static int __init sh_clk_div6_register_ops(struct clk *clks, int nr,
clkp->ops = ops;
clkp->freq_table = freq_table + (k * freq_table_size);
clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
-
ret = clk_register(clkp);
+ if (ret < 0)
+ break;
+
+ ret = sh_clk_init_parent(clkp);
}
return ret;
@@ -217,7 +252,7 @@ static unsigned long sh_clk_div4_recalc(struct clk *clk)
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
table, &clk->arch_flags);
- idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
+ idx = (ioread32(clk->mapped_reg) >> clk->enable_bit) & 0x000f;
return clk->freq_table[idx].frequency;
}
@@ -235,15 +270,15 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
*/
if (parent->flags & CLK_ENABLE_ON_INIT)
- value = __raw_readl(clk->enable_reg) & ~(1 << 7);
+ value = ioread32(clk->mapped_reg) & ~(1 << 7);
else
- value = __raw_readl(clk->enable_reg) | (1 << 7);
+ value = ioread32(clk->mapped_reg) | (1 << 7);
ret = clk_reparent(clk, parent);
if (ret < 0)
return ret;
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
/* Rebiuld the frequency table */
clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
@@ -260,10 +295,10 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
if (idx < 0)
return idx;
- value = __raw_readl(clk->enable_reg);
+ value = ioread32(clk->mapped_reg);
value &= ~(0xf << clk->enable_bit);
value |= (idx << clk->enable_bit);
- __raw_writel(value, clk->enable_reg);
+ iowrite32(value, clk->mapped_reg);
if (d4t->kick)
d4t->kick(clk);
@@ -273,13 +308,13 @@ static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
static int sh_clk_div4_enable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) & ~(1 << 8), clk->mapped_reg);
return 0;
}
static void sh_clk_div4_disable(struct clk *clk)
{
- __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
+ iowrite32(ioread32(clk->mapped_reg) | (1 << 8), clk->mapped_reg);
}
static struct clk_ops sh_clk_div4_clk_ops = {
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 8b7a141..e53e449 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -25,7 +25,7 @@
#include <linux/stat.h>
#include <linux/interrupt.h>
#include <linux/sh_intc.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/syscore_ops.h>
#include <linux/list.h>
#include <linux/spinlock.h>
@@ -354,6 +354,8 @@ int __init register_intc_controller(struct intc_desc *desc)
if (desc->force_enable)
intc_enable_disable_enum(desc, d, desc->force_enable, 1);
+ d->skip_suspend = desc->skip_syscore_suspend;
+
nr_intc_controllers++;
return 0;
@@ -386,6 +388,9 @@ static int intc_suspend(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
/* enable wakeup irqs belonging to this intc controller */
for_each_active_irq(irq) {
struct irq_data *data;
@@ -409,6 +414,9 @@ static void intc_resume(void)
list_for_each_entry(d, &intc_list, list) {
int irq;
+ if (d->skip_suspend)
+ continue;
+
for_each_active_irq(irq) {
struct irq_data *data;
struct irq_chip *chip;
@@ -434,46 +442,47 @@ struct syscore_ops intc_syscore_ops = {
.resume = intc_resume,
};
-struct sysdev_class intc_sysdev_class = {
+struct bus_type intc_subsys = {
.name = "intc",
+ .dev_name = "intc",
};
static ssize_t
-show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
+show_intc_name(struct device *dev, struct device_attribute *attr, char *buf)
{
struct intc_desc_int *d;
- d = container_of(dev, struct intc_desc_int, sysdev);
+ d = container_of(dev, struct intc_desc_int, dev);
return sprintf(buf, "%s\n", d->chip.name);
}
-static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
+static DEVICE_ATTR(name, S_IRUGO, show_intc_name, NULL);
-static int __init register_intc_sysdevs(void)
+static int __init register_intc_devs(void)
{
struct intc_desc_int *d;
int error;
register_syscore_ops(&intc_syscore_ops);
- error = sysdev_class_register(&intc_sysdev_class);
+ error = subsys_system_register(&intc_subsys, NULL);
if (!error) {
list_for_each_entry(d, &intc_list, list) {
- d->sysdev.id = d->index;
- d->sysdev.cls = &intc_sysdev_class;
- error = sysdev_register(&d->sysdev);
+ d->dev.id = d->index;
+ d->dev.bus = &intc_subsys;
+ error = device_register(&d->dev);
if (error == 0)
- error = sysdev_create_file(&d->sysdev,
- &attr_name);
+ error = device_create_file(&d->dev,
+ &dev_attr_name);
if (error)
break;
}
}
if (error)
- pr_err("sysdev registration error\n");
+ pr_err("device registration error\n");
return error;
}
-device_initcall(register_intc_sysdevs);
+device_initcall(register_intc_devs);
diff --git a/drivers/sh/intc/internals.h b/drivers/sh/intc/internals.h
index 5b93485..b0e9155 100644
--- a/drivers/sh/intc/internals.h
+++ b/drivers/sh/intc/internals.h
@@ -4,7 +4,7 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/radix-tree.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -51,7 +51,7 @@ struct intc_subgroup_entry {
struct intc_desc_int {
struct list_head list;
- struct sys_device sysdev;
+ struct device dev;
struct radix_tree_root tree;
raw_spinlock_t lock;
unsigned int index;
@@ -67,6 +67,7 @@ struct intc_desc_int {
struct intc_window *window;
unsigned int nr_windows;
struct irq_chip chip;
+ bool skip_suspend;
};
@@ -157,7 +158,7 @@ void _intc_enable(struct irq_data *data, unsigned long handle);
extern struct list_head intc_list;
extern raw_spinlock_t intc_big_lock;
extern unsigned int nr_intc_controllers;
-extern struct sysdev_class intc_sysdev_class;
+extern struct bus_type intc_subsys;
unsigned int intc_get_dfl_prio_level(void);
unsigned int intc_get_prio_level(unsigned int irq);
diff --git a/drivers/sh/intc/userimask.c b/drivers/sh/intc/userimask.c
index 56bf933..e649cea 100644
--- a/drivers/sh/intc/userimask.c
+++ b/drivers/sh/intc/userimask.c
@@ -10,7 +10,7 @@
#define pr_fmt(fmt) "intc: " fmt
#include <linux/errno.h>
-#include <linux/sysdev.h>
+#include <linux/device.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/stat.h>
@@ -20,15 +20,15 @@
static void __iomem *uimask;
static ssize_t
-show_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr, char *buf)
+show_intc_userimask(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
}
static ssize_t
-store_intc_userimask(struct sysdev_class *cls,
- struct sysdev_class_attribute *attr,
+store_intc_userimask(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t count)
{
unsigned long level;
@@ -55,8 +55,8 @@ store_intc_userimask(struct sysdev_class *cls,
return count;
}
-static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
- show_intc_userimask, store_intc_userimask);
+static DEVICE_ATTR(userimask, S_IRUSR | S_IWUSR,
+ show_intc_userimask, store_intc_userimask);
static int __init userimask_sysdev_init(void)
@@ -64,7 +64,7 @@ static int __init userimask_sysdev_init(void)
if (unlikely(!uimask))
return -ENXIO;
- return sysdev_class_create_file(&intc_sysdev_class, &attr_userimask);
+ return device_create_file(intc_subsys.dev_root, &dev_attr_userimask);
}
late_initcall(userimask_sysdev_init);
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
index e67fe17..522c6c4 100644
--- a/drivers/sh/pfc.c
+++ b/drivers/sh/pfc.c
@@ -19,6 +19,75 @@
#include <linux/irq.h>
#include <linux/bitops.h>
#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/ioport.h>
+
+static void pfc_iounmap(struct pinmux_info *pip)
+{
+ int k;
+
+ for (k = 0; k < pip->num_resources; k++)
+ if (pip->window[k].virt)
+ iounmap(pip->window[k].virt);
+
+ kfree(pip->window);
+ pip->window = NULL;
+}
+
+static int pfc_ioremap(struct pinmux_info *pip)
+{
+ struct resource *res;
+ int k;
+
+ if (!pip->num_resources)
+ return 0;
+
+ pip->window = kzalloc(pip->num_resources * sizeof(*pip->window),
+ GFP_NOWAIT);
+ if (!pip->window)
+ goto err1;
+
+ for (k = 0; k < pip->num_resources; k++) {
+ res = pip->resource + k;
+ WARN_ON(resource_type(res) != IORESOURCE_MEM);
+ pip->window[k].phys = res->start;
+ pip->window[k].size = resource_size(res);
+ pip->window[k].virt = ioremap_nocache(res->start,
+ resource_size(res));
+ if (!pip->window[k].virt)
+ goto err2;
+ }
+
+ return 0;
+
+err2:
+ pfc_iounmap(pip);
+err1:
+ return -1;
+}
+
+static void __iomem *pfc_phys_to_virt(struct pinmux_info *pip,
+ unsigned long address)
+{
+ struct pfc_window *window;
+ int k;
+
+ /* scan through physical windows and convert address */
+ for (k = 0; k < pip->num_resources; k++) {
+ window = pip->window + k;
+
+ if (address < window->phys)
+ continue;
+
+ if (address >= (window->phys + window->size))
+ continue;
+
+ return window->virt + (address - window->phys);
+ }
+
+ /* no windows defined, register must be 1:1 mapped virt:phys */
+ return (void __iomem *)address;
+}
static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
{
@@ -31,41 +100,54 @@ static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
return 1;
}
-static unsigned long gpio_read_raw_reg(unsigned long reg,
+static unsigned long gpio_read_raw_reg(void __iomem *mapped_reg,
unsigned long reg_width)
{
switch (reg_width) {
case 8:
- return __raw_readb(reg);
+ return ioread8(mapped_reg);
case 16:
- return __raw_readw(reg);
+ return ioread16(mapped_reg);
case 32:
- return __raw_readl(reg);
+ return ioread32(mapped_reg);
}
BUG();
return 0;
}
-static void gpio_write_raw_reg(unsigned long reg,
+static void gpio_write_raw_reg(void __iomem *mapped_reg,
unsigned long reg_width,
unsigned long data)
{
switch (reg_width) {
case 8:
- __raw_writeb(data, reg);
+ iowrite8(data, mapped_reg);
return;
case 16:
- __raw_writew(data, reg);
+ iowrite16(data, mapped_reg);
return;
case 32:
- __raw_writel(data, reg);
+ iowrite32(data, mapped_reg);
return;
}
BUG();
}
+static int gpio_read_bit(struct pinmux_data_reg *dr,
+ unsigned long in_pos)
+{
+ unsigned long pos;
+
+ pos = dr->reg_width - (in_pos + 1);
+
+ pr_debug("read_bit: addr = %lx, pos = %ld, "
+ "r_width = %ld\n", dr->reg, pos, dr->reg_width);
+
+ return (gpio_read_raw_reg(dr->mapped_reg, dr->reg_width) >> pos) & 1;
+}
+
static void gpio_write_bit(struct pinmux_data_reg *dr,
unsigned long in_pos, unsigned long value)
{
@@ -82,53 +164,72 @@ static void gpio_write_bit(struct pinmux_data_reg *dr,
else
clear_bit(pos, &dr->reg_shadow);
- gpio_write_raw_reg(dr->reg, dr->reg_width, dr->reg_shadow);
+ gpio_write_raw_reg(dr->mapped_reg, dr->reg_width, dr->reg_shadow);
}
-static int gpio_read_reg(unsigned long reg, unsigned long reg_width,
- unsigned long field_width, unsigned long in_pos)
+static void config_reg_helper(struct pinmux_info *gpioc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long in_pos,
+ void __iomem **mapped_regp,
+ unsigned long *maskp,
+ unsigned long *posp)
{
- unsigned long data, mask, pos;
+ int k;
+
+ *mapped_regp = pfc_phys_to_virt(gpioc, crp->reg);
- data = 0;
- mask = (1 << field_width) - 1;
- pos = reg_width - ((in_pos + 1) * field_width);
+ if (crp->field_width) {
+ *maskp = (1 << crp->field_width) - 1;
+ *posp = crp->reg_width - ((in_pos + 1) * crp->field_width);
+ } else {
+ *maskp = (1 << crp->var_field_width[in_pos]) - 1;
+ *posp = crp->reg_width;
+ for (k = 0; k <= in_pos; k++)
+ *posp -= crp->var_field_width[k];
+ }
+}
- pr_debug("read_reg: addr = %lx, pos = %ld, "
+static int read_config_reg(struct pinmux_info *gpioc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field)
+{
+ void __iomem *mapped_reg;
+ unsigned long mask, pos;
+
+ config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
+
+ pr_debug("read_reg: addr = %lx, field = %ld, "
"r_width = %ld, f_width = %ld\n",
- reg, pos, reg_width, field_width);
+ crp->reg, field, crp->reg_width, crp->field_width);
- data = gpio_read_raw_reg(reg, reg_width);
- return (data >> pos) & mask;
+ return (gpio_read_raw_reg(mapped_reg, crp->reg_width) >> pos) & mask;
}
-static void gpio_write_reg(unsigned long reg, unsigned long reg_width,
- unsigned long field_width, unsigned long in_pos,
- unsigned long value)
+static void write_config_reg(struct pinmux_info *gpioc,
+ struct pinmux_cfg_reg *crp,
+ unsigned long field, unsigned long value)
{
- unsigned long mask, pos;
+ void __iomem *mapped_reg;
+ unsigned long mask, pos, data;
- mask = (1 << field_width) - 1;
- pos = reg_width - ((in_pos + 1) * field_width);
+ config_reg_helper(gpioc, crp, field, &mapped_reg, &mask, &pos);
- pr_debug("write_reg addr = %lx, value = %ld, pos = %ld, "
+ pr_debug("write_reg addr = %lx, value = %ld, field = %ld, "
"r_width = %ld, f_width = %ld\n",
- reg, value, pos, reg_width, field_width);
+ crp->reg, value, field, crp->reg_width, crp->field_width);
mask = ~(mask << pos);
value = value << pos;
- switch (reg_width) {
- case 8:
- __raw_writeb((__raw_readb(reg) & mask) | value, reg);
- break;
- case 16:
- __raw_writew((__raw_readw(reg) & mask) | value, reg);
- break;
- case 32:
- __raw_writel((__raw_readl(reg) & mask) | value, reg);
- break;
- }
+ data = gpio_read_raw_reg(mapped_reg, crp->reg_width);
+ data &= mask;
+ data |= value;
+
+ if (gpioc->unlock_reg)
+ gpio_write_raw_reg(pfc_phys_to_virt(gpioc, gpioc->unlock_reg),
+ 32, ~data);
+
+ gpio_write_raw_reg(mapped_reg, crp->reg_width, data);
}
static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
@@ -147,6 +248,8 @@ static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
if (!data_reg->reg_width)
break;
+ data_reg->mapped_reg = pfc_phys_to_virt(gpioc, data_reg->reg);
+
for (n = 0; n < data_reg->reg_width; n++) {
if (data_reg->enum_ids[n] == gpiop->enum_id) {
gpiop->flags &= ~PINMUX_FLAG_DREG;
@@ -179,7 +282,8 @@ static void setup_data_regs(struct pinmux_info *gpioc)
if (!drp->reg_width)
break;
- drp->reg_shadow = gpio_read_raw_reg(drp->reg, drp->reg_width);
+ drp->reg_shadow = gpio_read_raw_reg(drp->mapped_reg,
+ drp->reg_width);
k++;
}
}
@@ -201,12 +305,13 @@ static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
}
static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
- struct pinmux_cfg_reg **crp, int *indexp,
+ struct pinmux_cfg_reg **crp,
+ int *fieldp, int *valuep,
unsigned long **cntp)
{
struct pinmux_cfg_reg *config_reg;
- unsigned long r_width, f_width;
- int k, n;
+ unsigned long r_width, f_width, curr_width, ncomb;
+ int k, m, n, pos, bit_pos;
k = 0;
while (1) {
@@ -217,13 +322,27 @@ static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
if (!r_width)
break;
- for (n = 0; n < (r_width / f_width) * (1 << f_width); n++) {
- if (config_reg->enum_ids[n] == enum_id) {
- *crp = config_reg;
- *indexp = n;
- *cntp = &config_reg->cnt[n / (1 << f_width)];
- return 0;
+
+ pos = 0;
+ m = 0;
+ for (bit_pos = 0; bit_pos < r_width; bit_pos += curr_width) {
+ if (f_width)
+ curr_width = f_width;
+ else
+ curr_width = config_reg->var_field_width[m];
+
+ ncomb = 1 << curr_width;
+ for (n = 0; n < ncomb; n++) {
+ if (config_reg->enum_ids[pos + n] == enum_id) {
+ *crp = config_reg;
+ *fieldp = m;
+ *valuep = n;
+ *cntp = &config_reg->cnt[m];
+ return 0;
+ }
}
+ pos += ncomb;
+ m++;
}
k++;
}
@@ -261,36 +380,6 @@ static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
return -1;
}
-static void write_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- int index)
-{
- unsigned long ncomb, pos, value;
-
- ncomb = 1 << crp->field_width;
- pos = index / ncomb;
- value = index % ncomb;
-
- gpio_write_reg(crp->reg, crp->reg_width, crp->field_width, pos, value);
-}
-
-static int check_config_reg(struct pinmux_info *gpioc,
- struct pinmux_cfg_reg *crp,
- int index)
-{
- unsigned long ncomb, pos, value;
-
- ncomb = 1 << crp->field_width;
- pos = index / ncomb;
- value = index % ncomb;
-
- if (gpio_read_reg(crp->reg, crp->reg_width,
- crp->field_width, pos) == value)
- return 0;
-
- return -1;
-}
-
enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
@@ -299,7 +388,7 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
struct pinmux_cfg_reg *cr = NULL;
pinmux_enum_t enum_id;
struct pinmux_range *range;
- int in_range, pos, index;
+ int in_range, pos, field, value;
unsigned long *cntp;
switch (pinmux_type) {
@@ -330,7 +419,8 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
pos = 0;
enum_id = 0;
- index = 0;
+ field = 0;
+ value = 0;
while (1) {
pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
if (pos <= 0)
@@ -377,17 +467,19 @@ static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
if (!in_range)
continue;
- if (get_config_reg(gpioc, enum_id, &cr, &index, &cntp) != 0)
+ if (get_config_reg(gpioc, enum_id, &cr,
+ &field, &value, &cntp) != 0)
goto out_err;
switch (cfg_mode) {
case GPIO_CFG_DRYRUN:
- if (!*cntp || !check_config_reg(gpioc, cr, index))
+ if (!*cntp ||
+ (read_config_reg(gpioc, cr, field) != value))
continue;
break;
case GPIO_CFG_REQ:
- write_config_reg(gpioc, cr, index);
+ write_config_reg(gpioc, cr, field, value);
*cntp = *cntp + 1;
break;
@@ -564,7 +656,7 @@ static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
return -EINVAL;
- return gpio_read_reg(dr->reg, dr->reg_width, 1, bit);
+ return gpio_read_bit(dr, bit);
}
static int sh_gpio_get(struct gpio_chip *chip, unsigned offset)
@@ -606,10 +698,15 @@ static int sh_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
int register_pinmux(struct pinmux_info *pip)
{
struct gpio_chip *chip = &pip->chip;
+ int ret;
pr_info("%s handling gpio %d -> %d\n",
pip->name, pip->first_gpio, pip->last_gpio);
+ ret = pfc_ioremap(pip);
+ if (ret < 0)
+ return ret;
+
setup_data_regs(pip);
chip->request = sh_gpio_request;
@@ -627,12 +724,16 @@ int register_pinmux(struct pinmux_info *pip)
chip->base = pip->first_gpio;
chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1;
- return gpiochip_add(chip);
+ ret = gpiochip_add(chip);
+ if (ret < 0)
+ pfc_iounmap(pip);
+
+ return ret;
}
int unregister_pinmux(struct pinmux_info *pip)
{
pr_info("%s deregistering\n", pip->name);
-
+ pfc_iounmap(pip);
return gpiochip_remove(&pip->chip);
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 8ba4510..8293658 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -87,12 +87,12 @@ config SPI_BFIN_SPORT
Enable support for a SPI bus via the Blackfin SPORT peripheral.
config SPI_AU1550
- tristate "Au1550/Au12x0 SPI Controller"
+ tristate "Au1550/Au1200/Au1300 SPI Controller"
depends on MIPS_ALCHEMY && EXPERIMENTAL
select SPI_BITBANG
help
If you say yes to this option, support will be included for the
- Au1550 SPI controller (may also work with Au1200,Au1210,Au1250).
+ PSC SPI controller found on Au1550, Au1200 and Au1300 series.
config SPI_BITBANG
tristate "Utilities for Bitbanging SPI masters"
@@ -174,8 +174,7 @@ config SPI_LM70_LLP
config SPI_MPC52xx
tristate "Freescale MPC52xx SPI (non-PSC) controller support"
- depends on PPC_MPC52xx && SPI
- select SPI_MASTER_OF
+ depends on PPC_MPC52xx
help
This drivers supports the MPC52xx SPI controller in master SPI
mode.
@@ -300,7 +299,7 @@ config SPI_S3C24XX_FIQ
config SPI_S3C64XX
tristate "Samsung S3C64XX series type SPI"
- depends on (ARCH_S3C64XX || ARCH_S5P64X0)
+ depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
select S3C64XX_DMA if ARCH_S3C64XX
help
SPI driver for Samsung S3C64XX and newer SoCs.
@@ -333,8 +332,7 @@ config SPI_STMP3XXX
config SPI_TEGRA
tristate "Nvidia Tegra SPI controller"
- depends on ARCH_TEGRA
- select TEGRA_SYSTEM_DMA
+ depends on ARCH_TEGRA && TEGRA_SYSTEM_DMA
help
SPI driver for NVidia Tegra SoCs
@@ -346,14 +344,14 @@ config SPI_TI_SSP
serial port.
config SPI_TOPCLIFF_PCH
- tristate "Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH SPI controller"
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
depends on PCI
help
SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
used in some x86 embedded processors.
- This driver also supports the ML7213, a companion chip for the
- Atom E6xx series and compatible with the Intel EG20T PCH.
+ This driver also supports the ML7213/ML7223/ML7831, a companion chip
+ for the Atom E6xx series and compatible with the Intel EG20T PCH.
config SPI_TXX9
tristate "Toshiba TXx9 SPI controller"
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e743a45..8418eb0 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxchan = dws->rxchan;
/* 2. Prepare the TX dma transfer */
- txconf.direction = DMA_TO_DEVICE;
+ txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = dws->dma_addr;
txconf.dst_maxburst = LNW_DMA_MSIZE_16;
txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
txdesc = txchan->device->device_prep_slave_sg(txchan,
&dws->tx_sgl,
1,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
txdesc->callback = dw_spi_dma_done;
txdesc->callback_param = dws;
/* 3. Prepare the RX dma transfer */
- rxconf.direction = DMA_FROM_DEVICE;
+ rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = dws->dma_addr;
rxconf.src_maxburst = LNW_DMA_MSIZE_16;
rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
&dws->rx_sgl,
1,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
rxdesc->callback = dw_spi_dma_done;
rxdesc->callback_param = dws;
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 0a282e5..d46e55c 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
struct dma_async_tx_descriptor *txd;
enum dma_slave_buswidth buswidth;
struct dma_slave_config conf;
+ enum dma_transfer_direction slave_dirn;
struct scatterlist *sg;
struct sg_table *sgt;
struct dma_chan *chan;
@@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
conf.src_addr = espi->sspdr_phys;
conf.src_addr_width = buswidth;
+ slave_dirn = DMA_DEV_TO_MEM;
} else {
chan = espi->dma_tx;
buf = t->tx_buf;
@@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
conf.dst_addr = espi->sspdr_phys;
conf.dst_addr_width = buswidth;
+ slave_dirn = DMA_MEM_TO_DEV;
}
ret = dmaengine_slave_config(chan, &conf);
@@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
return ERR_PTR(-ENOMEM);
txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
- dir, DMA_CTRL_ACK);
+ slave_dirn, DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
@@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
dma_cap_set(DMA_SLAVE, mask);
espi->dma_rx_data.port = EP93XX_DMA_SSP;
- espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+ espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
espi->dma_rx_data.name = "ep93xx-spi-rx";
espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
@@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
}
espi->dma_tx_data.port = EP93XX_DMA_SSP;
- espi->dma_tx_data.direction = DMA_TO_DEVICE;
+ espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
espi->dma_tx_data.name = "ep93xx-spi-tx";
espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 322be7a..0b0dfb7 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -121,6 +121,7 @@ struct omap2_mcspi {
/* SPI1 has 4 channels, while SPI2 has 2 */
struct omap2_mcspi_dma *dma_channels;
struct device *dev;
+ struct workqueue_struct *wq;
};
struct omap2_mcspi_cs {
@@ -143,8 +144,6 @@ struct omap2_mcspi_regs {
static struct omap2_mcspi_regs omap2_mcspi_ctx[OMAP2_MCSPI_MAX_CTRL];
-static struct workqueue_struct *omap2_mcspi_wq;
-
#define MOD_REG_BIT(val, mask, set) do { \
if (set) \
val |= mask; \
@@ -1043,7 +1042,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
spin_lock_irqsave(&mcspi->lock, flags);
list_add_tail(&m->queue, &mcspi->msg_queue);
- queue_work(omap2_mcspi_wq, &mcspi->work);
+ queue_work(mcspi->wq, &mcspi->work);
spin_unlock_irqrestore(&mcspi->lock, flags);
return 0;
@@ -1088,6 +1087,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
struct omap2_mcspi *mcspi;
struct resource *r;
int status = 0, i;
+ char wq_name[20];
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
@@ -1111,10 +1111,17 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi = spi_master_get_devdata(master);
mcspi->master = master;
+ sprintf(wq_name, "omap2_mcspi/%d", master->bus_num);
+ mcspi->wq = alloc_workqueue(wq_name, WQ_MEM_RECLAIM, 1);
+ if (mcspi->wq == NULL) {
+ status = -ENOMEM;
+ goto free_master;
+ }
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (r == NULL) {
status = -ENODEV;
- goto err1;
+ goto free_master;
}
r->start += pdata->regs_offset;
@@ -1123,14 +1130,14 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
if (!request_mem_region(r->start, resource_size(r),
dev_name(&pdev->dev))) {
status = -EBUSY;
- goto err1;
+ goto free_master;
}
mcspi->base = ioremap(r->start, resource_size(r));
if (!mcspi->base) {
dev_dbg(&pdev->dev, "can't ioremap MCSPI\n");
status = -ENOMEM;
- goto err2;
+ goto release_region;
}
mcspi->dev = &pdev->dev;
@@ -1145,7 +1152,7 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
GFP_KERNEL);
if (mcspi->dma_channels == NULL)
- goto err2;
+ goto unmap_io;
for (i = 0; i < master->num_chipselect; i++) {
char dma_ch_name[14];
@@ -1175,25 +1182,33 @@ static int __init omap2_mcspi_probe(struct platform_device *pdev)
mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start;
}
+ if (status < 0)
+ goto dma_chnl_free;
+
pm_runtime_enable(&pdev->dev);
if (status || omap2_mcspi_master_setup(mcspi) < 0)
- goto err3;
+ goto disable_pm;
status = spi_register_master(master);
if (status < 0)
- goto err4;
+ goto err_spi_register;
return status;
-err4:
+err_spi_register:
spi_master_put(master);
-err3:
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+dma_chnl_free:
kfree(mcspi->dma_channels);
-err2:
- release_mem_region(r->start, resource_size(r));
+unmap_io:
iounmap(mcspi->base);
-err1:
+release_region:
+ release_mem_region(r->start, resource_size(r));
+free_master:
+ kfree(master);
+ platform_set_drvdata(pdev, NULL);
return status;
}
@@ -1210,6 +1225,7 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
dma_channels = mcspi->dma_channels;
omap2_mcspi_disable_clocks(mcspi);
+ pm_runtime_disable(&pdev->dev);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
@@ -1217,6 +1233,8 @@ static int __exit omap2_mcspi_remove(struct platform_device *pdev)
spi_unregister_master(master);
iounmap(base);
kfree(dma_channels);
+ destroy_workqueue(mcspi->wq);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
@@ -1275,10 +1293,6 @@ static struct platform_driver omap2_mcspi_driver = {
static int __init omap2_mcspi_init(void)
{
- omap2_mcspi_wq = create_singlethread_workqueue(
- omap2_mcspi_driver.driver.name);
- if (omap2_mcspi_wq == NULL)
- return -1;
return platform_driver_probe(&omap2_mcspi_driver, omap2_mcspi_probe);
}
subsys_initcall(omap2_mcspi_init);
@@ -1287,7 +1301,6 @@ static void __exit omap2_mcspi_exit(void)
{
platform_driver_unregister(&omap2_mcspi_driver);
- destroy_workqueue(omap2_mcspi_wq);
}
module_exit(omap2_mcspi_exit);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 5559b22..f37ad22 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -340,6 +340,10 @@ struct vendor_data {
* @cur_msg: Pointer to current spi_message being processed
* @cur_transfer: Pointer to current spi_transfer
* @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ * and it was found that it uses the same chip select as the previous
+ * message, so we left it active after the previous transfer, and it's
+ * active already.
* @tx: current position in TX buffer to be read
* @tx_end: end position in TX buffer to be read
* @rx: current position in RX buffer to be written
@@ -373,6 +377,7 @@ struct pl022 {
struct spi_message *cur_msg;
struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
+ bool next_msg_cs_active;
void *tx;
void *tx_end;
void *rx;
@@ -445,23 +450,9 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
- void (*curr_cs_control) (u32 command);
+ pl022->next_msg_cs_active = false;
- /*
- * This local reference to the chip select function
- * is needed because we set curr_chip to NULL
- * as a step toward termininating the message.
- */
- curr_cs_control = pl022->cur_chip->cs_control;
- spin_lock_irqsave(&pl022->queue_lock, flags);
- msg = pl022->cur_msg;
- pl022->cur_msg = NULL;
- pl022->cur_transfer = NULL;
- pl022->cur_chip = NULL;
- queue_work(pl022->workqueue, &pl022->pump_messages);
- spin_unlock_irqrestore(&pl022->queue_lock, flags);
-
- last_transfer = list_entry(msg->transfers.prev,
+ last_transfer = list_entry(pl022->cur_msg->transfers.prev,
struct spi_transfer,
transfer_list);
@@ -473,18 +464,13 @@ static void giveback(struct pl022 *pl022)
*/
udelay(last_transfer->delay_usecs);
- /*
- * Drop chip select UNLESS cs_change is true or we are returning
- * a message with an error, or next message is for another chip
- */
- if (!last_transfer->cs_change)
- curr_cs_control(SSP_CHIP_DESELECT);
- else {
+ if (!last_transfer->cs_change) {
struct spi_message *next_msg;
- /* Holding of cs was hinted, but we need to make sure
- * the next message is for the same chip. Don't waste
- * time with the following tests unless this was hinted.
+ /*
+ * cs_change was not set. We can keep the chip select
+ * enabled if there is message in the queue and it is
+ * for the same spi device.
*
* We cannot postpone this until pump_messages, because
* after calling msg->complete (below) the driver that
@@ -501,19 +487,29 @@ static void giveback(struct pl022 *pl022)
struct spi_message, queue);
spin_unlock_irqrestore(&pl022->queue_lock, flags);
- /* see if the next and current messages point
- * to the same chip
+ /*
+ * see if the next and current messages point
+ * to the same spi device.
*/
- if (next_msg && next_msg->spi != msg->spi)
+ if (next_msg && next_msg->spi != pl022->cur_msg->spi)
next_msg = NULL;
- if (!next_msg || msg->state == STATE_ERROR)
- curr_cs_control(SSP_CHIP_DESELECT);
+ if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
+ pl022->cur_chip->cs_control(SSP_CHIP_DESELECT);
+ else
+ pl022->next_msg_cs_active = true;
}
+
+ spin_lock_irqsave(&pl022->queue_lock, flags);
+ msg = pl022->cur_msg;
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+ queue_work(pl022->workqueue, &pl022->pump_messages);
+ spin_unlock_irqrestore(&pl022->queue_lock, flags);
+
msg->state = NULL;
if (msg->complete)
msg->complete(msg->context);
- /* This message is completed, so let's turn off the clocks & power */
- pm_runtime_put(&pl022->adev->dev);
}
/**
@@ -904,11 +900,11 @@ static int configure_dma(struct pl022 *pl022)
{
struct dma_slave_config rx_conf = {
.src_addr = SSP_DR(pl022->phybase),
- .direction = DMA_FROM_DEVICE,
+ .direction = DMA_DEV_TO_MEM,
};
struct dma_slave_config tx_conf = {
.dst_addr = SSP_DR(pl022->phybase),
- .direction = DMA_TO_DEVICE,
+ .direction = DMA_MEM_TO_DEV,
};
unsigned int pages;
int ret;
@@ -1045,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022)
rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
pl022->sgt_rx.sgl,
rx_sglen,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!rxdesc)
goto err_rxdesc;
@@ -1053,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022)
txdesc = txchan->device->device_prep_slave_sg(txchan,
pl022->sgt_tx.sgl,
tx_sglen,
- DMA_TO_DEVICE,
+ DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!txdesc)
goto err_txdesc;
@@ -1087,7 +1083,7 @@ err_alloc_rx_sg:
return -ENOMEM;
}
-static int __init pl022_dma_probe(struct pl022 *pl022)
+static int __devinit pl022_dma_probe(struct pl022 *pl022)
{
dma_cap_mask_t mask;
@@ -1244,9 +1240,9 @@ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
if ((pl022->tx == pl022->tx_end) && (flag == 0)) {
flag = 1;
- /* Disable Transmit interrupt */
- writew(readw(SSP_IMSC(pl022->virtbase)) &
- (~SSP_IMSC_MASK_TXIM),
+ /* Disable Transmit interrupt, enable receive interrupt */
+ writew((readw(SSP_IMSC(pl022->virtbase)) &
+ ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
SSP_IMSC(pl022->virtbase));
}
@@ -1352,7 +1348,7 @@ static void pump_transfers(unsigned long data)
*/
udelay(previous->delay_usecs);
- /* Drop chip select only if cs_change is requested */
+ /* Reselect chip select only if cs_change was requested */
if (previous->cs_change)
pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
} else {
@@ -1379,15 +1375,22 @@ static void pump_transfers(unsigned long data)
}
err_config_dma:
- writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ /* enable all interrupts except RX */
+ writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
}
static void do_interrupt_dma_transfer(struct pl022 *pl022)
{
- u32 irqflags = ENABLE_ALL_INTERRUPTS;
+ /*
+ * Default is to enable all interrupts except RX -
+ * this will be enabled once TX is complete
+ */
+ u32 irqflags = ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM;
+
+ /* Enable target chip, if not already active */
+ if (!pl022->next_msg_cs_active)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
- /* Enable target chip */
- pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
/* Error path */
pl022->cur_msg->state = STATE_ERROR;
@@ -1442,7 +1445,8 @@ static void do_polling_transfer(struct pl022 *pl022)
} else {
/* STATE_START */
message->state = STATE_RUNNING;
- pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
+ if (!pl022->next_msg_cs_active)
+ pl022->cur_chip->cs_control(SSP_CHIP_SELECT);
}
/* Configuration Changing Per Transfer */
@@ -1504,14 +1508,28 @@ static void pump_messages(struct work_struct *work)
struct pl022 *pl022 =
container_of(work, struct pl022, pump_messages);
unsigned long flags;
+ bool was_busy = false;
/* Lock queue and check for queue work */
spin_lock_irqsave(&pl022->queue_lock, flags);
if (list_empty(&pl022->queue) || !pl022->running) {
+ if (pl022->busy) {
+ /* nothing more to do - disable spi/ssp and power off */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ if (pl022->master_info->autosuspend_delay > 0) {
+ pm_runtime_mark_last_busy(&pl022->adev->dev);
+ pm_runtime_put_autosuspend(&pl022->adev->dev);
+ } else {
+ pm_runtime_put(&pl022->adev->dev);
+ }
+ }
pl022->busy = false;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
return;
}
+
/* Make sure we are not already running a message */
if (pl022->cur_msg) {
spin_unlock_irqrestore(&pl022->queue_lock, flags);
@@ -1522,7 +1540,10 @@ static void pump_messages(struct work_struct *work)
list_entry(pl022->queue.next, struct spi_message, queue);
list_del_init(&pl022->cur_msg->queue);
- pl022->busy = true;
+ if (pl022->busy)
+ was_busy = true;
+ else
+ pl022->busy = true;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
/* Initial message state */
@@ -1532,12 +1553,14 @@ static void pump_messages(struct work_struct *work)
/* Setup the SPI using the per chip configuration */
pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi);
- /*
- * We enable the core voltage and clocks here, then the clocks
- * and core will be disabled when giveback() is called in each method
- * (poll/interrupt/DMA)
- */
- pm_runtime_get_sync(&pl022->adev->dev);
+ if (!was_busy)
+ /*
+ * We enable the core voltage and clocks here, then the clocks
+ * and core will be disabled when this workqueue is run again
+ * and there is no more work to be done.
+ */
+ pm_runtime_get_sync(&pl022->adev->dev);
+
restore_state(pl022);
flush(pl022);
@@ -1582,6 +1605,7 @@ static int start_queue(struct pl022 *pl022)
pl022->cur_msg = NULL;
pl022->cur_transfer = NULL;
pl022->cur_chip = NULL;
+ pl022->next_msg_cs_active = false;
spin_unlock_irqrestore(&pl022->queue_lock, flags);
queue_work(pl022->workqueue, &pl022->pump_messages);
@@ -1881,7 +1905,7 @@ static int pl022_setup(struct spi_device *spi)
{
struct pl022_config_chip const *chip_info;
struct chip_data *chip;
- struct ssp_clock_params clk_freq = {0, };
+ struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
int status = 0;
struct pl022 *pl022 = spi_master_get_devdata(spi->master);
unsigned int bits = spi->bits_per_word;
@@ -2231,7 +2255,17 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id)
dev_dbg(dev, "probe succeeded\n");
/* let runtime pm put suspend */
- pm_runtime_put(dev);
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&adev->dev,
+ "will use autosuspend for runtime pm, delay %dms\n",
+ platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev,
+ platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_put_autosuspend(dev);
+ } else {
+ pm_runtime_put(dev);
+ }
return 0;
err_spi_register:
@@ -2305,11 +2339,6 @@ static int pl022_suspend(struct device *dev)
return status;
}
- amba_vcore_enable(pl022->adev);
- amba_pclk_enable(pl022->adev);
- load_ssp_default_config(pl022);
- amba_pclk_disable(pl022->adev);
- amba_vcore_disable(pl022->adev);
dev_dbg(dev, "suspended\n");
return 0;
}
@@ -2432,6 +2461,8 @@ static struct amba_id pl022_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl022_ids);
+
static struct amba_driver pl022_driver = {
.drv = {
.name = "ssp-pl022",
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 019a716..dcf7e10 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -971,6 +971,7 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
struct s3c64xx_spi_info *sci;
struct spi_master *master;
int ret;
+ char clk_name[16];
if (pdev->id < 0) {
dev_err(&pdev->dev,
@@ -984,11 +985,6 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
}
sci = pdev->dev.platform_data;
- if (!sci->src_clk_name) {
- dev_err(&pdev->dev,
- "Board init must call s3c64xx_spi_set_info()\n");
- return -EINVAL;
- }
/* Check for availability of necessary resource */
@@ -1073,17 +1069,17 @@ static int __init s3c64xx_spi_probe(struct platform_device *pdev)
goto err4;
}
- sdd->src_clk = clk_get(&pdev->dev, sci->src_clk_name);
+ sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
+ sdd->src_clk = clk_get(&pdev->dev, clk_name);
if (IS_ERR(sdd->src_clk)) {
dev_err(&pdev->dev,
- "Unable to acquire clock '%s'\n", sci->src_clk_name);
+ "Unable to acquire clock '%s'\n", clk_name);
ret = PTR_ERR(sdd->src_clk);
goto err5;
}
if (clk_enable(sdd->src_clk)) {
- dev_err(&pdev->dev, "Couldn't enable clock '%s'\n",
- sci->src_clk_name);
+ dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
ret = -EBUSY;
goto err6;
}
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 6a80749..10182eb 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -1,7 +1,7 @@
/*
* SPI bus driver for the Topcliff PCH used by Intel SoCs
*
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -95,16 +95,18 @@
#define PCH_CLOCK_HZ 50000000
#define PCH_MAX_SPBR 1023
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
#define PCI_VENDOR_ID_ROHM 0x10DB
#define PCI_DEVICE_ID_ML7213_SPI 0x802c
#define PCI_DEVICE_ID_ML7223_SPI 0x800F
+#define PCI_DEVICE_ID_ML7831_SPI 0x8816
/*
* Set the number of SPI instance max
* Intel EG20T PCH : 1ch
- * OKI SEMICONDUCTOR ML7213 IOH : 2ch
- * OKI SEMICONDUCTOR ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7213 IOH : 2ch
+ * LAPIS Semiconductor ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7831 IOH : 1ch
*/
#define PCH_SPI_MAX_DEV 2
@@ -218,6 +220,7 @@ static struct pci_device_id pch_spi_pcidev_id[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
{ PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
{ }
};
@@ -1076,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
sg = dma->sg_rx_p;
desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
- num, DMA_FROM_DEVICE,
+ num, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1121,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
}
sg = dma->sg_tx_p;
desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
- sg, num, DMA_TO_DEVICE,
+ sg, num, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1717,7 +1720,7 @@ static int pch_spi_resume(struct pci_dev *pdev)
#endif
-static struct pci_driver pch_spi_pcidev = {
+static struct pci_driver pch_spi_pcidev_driver = {
.name = "pch_spi",
.id_table = pch_spi_pcidev_id,
.probe = pch_spi_probe,
@@ -1733,7 +1736,7 @@ static int __init pch_spi_init(void)
if (ret)
return ret;
- ret = pci_register_driver(&pch_spi_pcidev);
+ ret = pci_register_driver(&pch_spi_pcidev_driver);
if (ret)
return ret;
@@ -1743,7 +1746,7 @@ module_init(pch_spi_init);
static void __exit pch_spi_exit(void)
{
- pci_unregister_driver(&pch_spi_pcidev);
+ pci_unregister_driver(&pch_spi_pcidev_driver);
platform_driver_unregister(&pch_spi_pd_driver);
}
module_exit(pch_spi_exit);
@@ -1753,4 +1756,4 @@ MODULE_PARM_DESC(use_dma,
"to use DMA for data transfers pass 1 else 0; default 1");
MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7xxx IOH SPI Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 77eae99..b2ccdea 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -319,7 +319,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
}
spi->master = master;
- spi->dev.parent = dev;
+ spi->dev.parent = &master->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
device_initialize(&spi->dev);
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 520e828..49d2091 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -75,7 +75,7 @@ static u32 get_cfgspace_addr(struct ssb_pcicore *pc,
u32 tmp;
/* We do only have one cardbus device behind the bridge. */
- if (pc->cardbusmode && (dev >= 1))
+ if (pc->cardbusmode && (dev > 1))
goto out;
if (bus == 0) {
diff --git a/drivers/ssb/pci.c b/drivers/ssb/pci.c
index 34c3bab..973223f 100644
--- a/drivers/ssb/pci.c
+++ b/drivers/ssb/pci.c
@@ -607,6 +607,29 @@ static void sprom_extract_r8(struct ssb_sprom *out, const u16 *in)
memcpy(&out->antenna_gain.ghz5, &out->antenna_gain.ghz24,
sizeof(out->antenna_gain.ghz5));
+ /* Extract FEM info */
+ SPEX(fem.ghz2.tssipos, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz2.extpa_gain, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz2.pdet_range, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz2.tr_iso, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz2.antswlut, SSB_SPROM8_FEM2G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
+ SPEX(fem.ghz5.tssipos, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TSSIPOS, SSB_SROM8_FEM_TSSIPOS_SHIFT);
+ SPEX(fem.ghz5.extpa_gain, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_EXTPA_GAIN, SSB_SROM8_FEM_EXTPA_GAIN_SHIFT);
+ SPEX(fem.ghz5.pdet_range, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_PDET_RANGE, SSB_SROM8_FEM_PDET_RANGE_SHIFT);
+ SPEX(fem.ghz5.tr_iso, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_TR_ISO, SSB_SROM8_FEM_TR_ISO_SHIFT);
+ SPEX(fem.ghz5.antswlut, SSB_SPROM8_FEM5G,
+ SSB_SROM8_FEM_ANTSWLUT, SSB_SROM8_FEM_ANTSWLUT_SHIFT);
+
sprom_extract_r458(out, in);
/* TODO - get remaining rev 8 stuff needed */
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 25cdff3..9e63472 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -60,8 +60,6 @@ source "drivers/staging/rts5139/Kconfig"
source "drivers/staging/frontier/Kconfig"
-source "drivers/staging/pohmelfs/Kconfig"
-
source "drivers/staging/phison/Kconfig"
source "drivers/staging/line6/Kconfig"
@@ -72,8 +70,6 @@ source "drivers/staging/octeon/Kconfig"
source "drivers/staging/serqt_usb2/Kconfig"
-source "drivers/staging/spectra/Kconfig"
-
source "drivers/staging/quatech_usb2/Kconfig"
source "drivers/staging/vt6655/Kconfig"
@@ -116,20 +112,20 @@ source "drivers/staging/bcm/Kconfig"
source "drivers/staging/ft1000/Kconfig"
-source "drivers/staging/intel_sst/Kconfig"
-
source "drivers/staging/speakup/Kconfig"
source "drivers/staging/cptm1217/Kconfig"
source "drivers/staging/ste_rmi4/Kconfig"
-source "drivers/staging/gma500/Kconfig"
-
source "drivers/staging/mei/Kconfig"
source "drivers/staging/nvec/Kconfig"
source "drivers/staging/media/Kconfig"
+source "drivers/staging/omapdrm/Kconfig"
+
+source "drivers/staging/android/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a25f3f2..943e148 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -21,9 +21,7 @@ obj-$(CONFIG_RTL8192E) += rtl8192e/
obj-$(CONFIG_R8712U) += rtl8712/
obj-$(CONFIG_RTS_PSTOR) += rts_pstor/
obj-$(CONFIG_RTS5139) += rts5139/
-obj-$(CONFIG_SPECTRA) += spectra/
obj-$(CONFIG_TRANZPORT) += frontier/
-obj-$(CONFIG_POHMELFS) += pohmelfs/
obj-$(CONFIG_IDE_PHISON) += phison/
obj-$(CONFIG_LINE6_USB) += line6/
obj-$(CONFIG_USB_SERIAL_QUATECH2) += serqt_usb2/
@@ -50,10 +48,10 @@ obj-$(CONFIG_SBE_2T3E3) += sbe-2t3e3/
obj-$(CONFIG_USB_ENESTORAGE) += keucr/
obj-$(CONFIG_BCM_WIMAX) += bcm/
obj-$(CONFIG_FT1000) += ft1000/
-obj-$(CONFIG_SND_INTEL_SST) += intel_sst/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_TOUCHSCREEN_CLEARPAD_TM1217) += cptm1217/
obj-$(CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4) += ste_rmi4/
-obj-$(CONFIG_DRM_PSB) += gma500/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_MFD_NVEC) += nvec/
+obj-$(CONFIG_DRM_OMAP) += omapdrm/
+obj-$(CONFIG_ANDROID) += android/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
new file mode 100644
index 0000000..fef3580
--- /dev/null
+++ b/drivers/staging/android/Kconfig
@@ -0,0 +1,107 @@
+menu "Android"
+
+config ANDROID
+ bool "Android Drivers"
+ default N
+ ---help---
+ Enable support for various drivers needed on the Android platform
+
+if ANDROID
+
+config ANDROID_BINDER_IPC
+ bool "Android Binder IPC Driver"
+ default n
+
+config ASHMEM
+ bool "Enable the Anonymous Shared Memory Subsystem"
+ default n
+ depends on SHMEM || TINY_SHMEM
+ help
+ The ashmem subsystem is a new shared memory allocator, similar to
+ POSIX SHM but with different behavior and sporting a simpler
+ file-based API.
+
+config ANDROID_LOGGER
+ tristate "Android log driver"
+ default n
+
+config ANDROID_RAM_CONSOLE
+ bool "Android RAM buffer console"
+ depends on !S390 && !UML
+ default n
+
+config ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ bool "Enable verbose console messages on Android RAM console"
+ default y
+ depends on ANDROID_RAM_CONSOLE
+
+menuconfig ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ bool "Android RAM Console Enable error correction"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+ depends on !ANDROID_RAM_CONSOLE_EARLY_INIT
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC8
+ select REED_SOLOMON_DEC8
+
+if ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+ int "Android RAM Console Data data size"
+ default 128
+ help
+ Must be a power of 2.
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+ int "Android RAM Console ECC size"
+ default 16
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+ int "Android RAM Console Symbol size"
+ default 8
+
+config ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+ hex "Android RAM Console Polynomial"
+ default 0x19 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 4)
+ default 0x29 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 5)
+ default 0x61 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 6)
+ default 0x89 if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 7)
+ default 0x11d if (ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE = 8)
+
+endif # ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+
+config ANDROID_RAM_CONSOLE_EARLY_INIT
+ bool "Start Android RAM console early"
+ default n
+ depends on ANDROID_RAM_CONSOLE
+
+config ANDROID_RAM_CONSOLE_EARLY_ADDR
+ hex "Android RAM console virtual address"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_RAM_CONSOLE_EARLY_SIZE
+ hex "Android RAM console buffer size"
+ default 0
+ depends on ANDROID_RAM_CONSOLE_EARLY_INIT
+
+config ANDROID_TIMED_OUTPUT
+ bool "Timed output class driver"
+ default y
+
+config ANDROID_TIMED_GPIO
+ tristate "Android timed gpio driver"
+ depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT
+ default n
+
+config ANDROID_LOW_MEMORY_KILLER
+ bool "Android Low Memory Killer"
+ default N
+ ---help---
+ Register processes to be killed when memory is low
+
+source "drivers/staging/android/switch/Kconfig"
+
+endif # if ANDROID
+
+endmenu
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
new file mode 100644
index 0000000..5fcc24f
--- /dev/null
+++ b/drivers/staging/android/Makefile
@@ -0,0 +1,8 @@
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o
+obj-$(CONFIG_ASHMEM) += ashmem.o
+obj-$(CONFIG_ANDROID_LOGGER) += logger.o
+obj-$(CONFIG_ANDROID_RAM_CONSOLE) += ram_console.o
+obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
+obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
+obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
+obj-$(CONFIG_ANDROID_SWITCH) += switch/
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
new file mode 100644
index 0000000..e59c5be
--- /dev/null
+++ b/drivers/staging/android/TODO
@@ -0,0 +1,10 @@
+TODO:
+ - checkpatch.pl cleanups
+ - sparse fixes
+ - rename files to be not so "generic"
+ - make sure things build as modules properly
+ - add proper arch dependancies as needed
+ - audit userspace interfaces to make sure they are sane
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Brian Swetland <swetland@google.com>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
new file mode 100644
index 0000000..99052bf
--- /dev/null
+++ b/drivers/staging/android/ashmem.c
@@ -0,0 +1,752 @@
+/* mm/ashmem.c
+**
+** Anonymous Shared Memory Subsystem, ashmem
+**
+** Copyright (C) 2008 Google, Inc.
+**
+** Robert Love <rlove@google.com>
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/security.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/personality.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include "ashmem.h"
+
+#define ASHMEM_NAME_PREFIX "dev/ashmem/"
+#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
+#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
+
+/*
+ * ashmem_area - anonymous shared memory area
+ * Lifecycle: From our parent file's open() until its release()
+ * Locking: Protected by `ashmem_mutex'
+ * Big Note: Mappings do NOT pin this structure; it dies on close()
+ */
+struct ashmem_area {
+ char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
+ struct list_head unpinned_list; /* list of all ashmem areas */
+ struct file *file; /* the shmem-based backing file */
+ size_t size; /* size of the mapping, in bytes */
+ unsigned long prot_mask; /* allowed prot bits, as vm_flags */
+};
+
+/*
+ * ashmem_range - represents an interval of unpinned (evictable) pages
+ * Lifecycle: From unpin to pin
+ * Locking: Protected by `ashmem_mutex'
+ */
+struct ashmem_range {
+ struct list_head lru; /* entry in LRU list */
+ struct list_head unpinned; /* entry in its area's unpinned list */
+ struct ashmem_area *asma; /* associated area */
+ size_t pgstart; /* starting page, inclusive */
+ size_t pgend; /* ending page, inclusive */
+ unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
+};
+
+/* LRU list of unpinned pages, protected by ashmem_mutex */
+static LIST_HEAD(ashmem_lru_list);
+
+/* Count of pages on our LRU list, protected by ashmem_mutex */
+static unsigned long lru_count;
+
+/*
+ * ashmem_mutex - protects the list of and each individual ashmem_area
+ *
+ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
+ */
+static DEFINE_MUTEX(ashmem_mutex);
+
+static struct kmem_cache *ashmem_area_cachep __read_mostly;
+static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
+#define range_size(range) \
+ ((range)->pgend - (range)->pgstart + 1)
+
+#define range_on_lru(range) \
+ ((range)->purged == ASHMEM_NOT_PURGED)
+
+#define page_range_subsumes_range(range, start, end) \
+ (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
+
+#define page_range_subsumed_by_range(range, start, end) \
+ (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
+
+#define page_in_range(range, page) \
+ (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
+
+#define page_range_in_range(range, start, end) \
+ (page_in_range(range, start) || page_in_range(range, end) || \
+ page_range_subsumes_range(range, start, end))
+
+#define range_before_page(range, page) \
+ ((range)->pgend < (page))
+
+#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
+
+static inline void lru_add(struct ashmem_range *range)
+{
+ list_add_tail(&range->lru, &ashmem_lru_list);
+ lru_count += range_size(range);
+}
+
+static inline void lru_del(struct ashmem_range *range)
+{
+ list_del(&range->lru);
+ lru_count -= range_size(range);
+}
+
+/*
+ * range_alloc - allocate and initialize a new ashmem_range structure
+ *
+ * 'asma' - associated ashmem_area
+ * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
+ * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * 'start' - starting page, inclusive
+ * 'end' - ending page, inclusive
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int range_alloc(struct ashmem_area *asma,
+ struct ashmem_range *prev_range, unsigned int purged,
+ size_t start, size_t end)
+{
+ struct ashmem_range *range;
+
+ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+ if (unlikely(!range))
+ return -ENOMEM;
+
+ range->asma = asma;
+ range->pgstart = start;
+ range->pgend = end;
+ range->purged = purged;
+
+ list_add_tail(&range->unpinned, &prev_range->unpinned);
+
+ if (range_on_lru(range))
+ lru_add(range);
+
+ return 0;
+}
+
+static void range_del(struct ashmem_range *range)
+{
+ list_del(&range->unpinned);
+ if (range_on_lru(range))
+ lru_del(range);
+ kmem_cache_free(ashmem_range_cachep, range);
+}
+
+/*
+ * range_shrink - shrinks a range
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static inline void range_shrink(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ size_t pre = range_size(range);
+
+ range->pgstart = start;
+ range->pgend = end;
+
+ if (range_on_lru(range))
+ lru_count -= pre - range_size(range);
+}
+
+static int ashmem_open(struct inode *inode, struct file *file)
+{
+ struct ashmem_area *asma;
+ int ret;
+
+ ret = generic_file_open(inode, file);
+ if (unlikely(ret))
+ return ret;
+
+ asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
+ if (unlikely(!asma))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&asma->unpinned_list);
+ memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
+ asma->prot_mask = PROT_MASK;
+ file->private_data = asma;
+
+ return 0;
+}
+
+static int ashmem_release(struct inode *ignored, struct file *file)
+{
+ struct ashmem_area *asma = file->private_data;
+ struct ashmem_range *range, *next;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
+ range_del(range);
+ mutex_unlock(&ashmem_mutex);
+
+ if (asma->file)
+ fput(asma->file);
+ kmem_cache_free(ashmem_area_cachep, asma);
+
+ return 0;
+}
+
+static ssize_t ashmem_read(struct file *file, char __user *buf,
+ size_t len, loff_t *pos)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* If size is not set, or set to 0, always return EOF. */
+ if (asma->size == 0)
+ goto out;
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret < 0)
+ goto out;
+
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret;
+
+ mutex_lock(&ashmem_mutex);
+
+ if (asma->size == 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out;
+ }
+
+ ret = asma->file->f_op->llseek(asma->file, offset, origin);
+ if (ret < 0)
+ goto out;
+
+ /** Copy f_pos from backing file, since f_ops->llseek() sets it */
+ file->f_pos = asma->file->f_pos;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static inline unsigned long calc_vm_may_flags(unsigned long prot)
+{
+ return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
+ _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
+ _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
+}
+
+static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* user needs to SET_SIZE before mapping */
+ if (unlikely(!asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* requested protection bits must match our allowed protection mask */
+ if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
+ calc_vm_prot_bits(PROT_MASK))) {
+ ret = -EPERM;
+ goto out;
+ }
+ vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
+
+ if (!asma->file) {
+ char *name = ASHMEM_NAME_DEF;
+ struct file *vmfile;
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ name = asma->name;
+
+ /* ... and allocate the backing shmem file */
+ vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
+ if (unlikely(IS_ERR(vmfile))) {
+ ret = PTR_ERR(vmfile);
+ goto out;
+ }
+ asma->file = vmfile;
+ }
+ get_file(asma->file);
+
+ /*
+ * XXX - Reworked to use shmem_zero_setup() instead of
+ * shmem_set_file while we're in staging. -jstultz
+ */
+ if (vma->vm_flags & VM_SHARED) {
+ ret = shmem_zero_setup(vma);
+ if (ret) {
+ fput(asma->file);
+ goto out;
+ }
+ }
+
+ if (vma->vm_file)
+ fput(vma->vm_file);
+ vma->vm_file = asma->file;
+ vma->vm_flags |= VM_CAN_NONLINEAR;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+/*
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
+ *
+ * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
+ * many objects (pages) we have in total.
+ *
+ * 'gfp_mask' is the mask of the allocation that got us into this mess.
+ *
+ * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * proceed without risk of deadlock (due to gfp_mask).
+ *
+ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
+ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
+ * pages freed.
+ */
+static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct ashmem_range *range, *next;
+
+ /* We might recurse into filesystem code, so bail out if necessary */
+ if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
+ return -1;
+ if (!sc->nr_to_scan)
+ return lru_count;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+ struct inode *inode = range->asma->file->f_dentry->d_inode;
+ loff_t start = range->pgstart * PAGE_SIZE;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE - 1;
+
+ vmtruncate_range(inode, start, end);
+ range->purged = ASHMEM_WAS_PURGED;
+ lru_del(range);
+
+ sc->nr_to_scan -= range_size(range);
+ if (sc->nr_to_scan <= 0)
+ break;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return lru_count;
+}
+
+static struct shrinker ashmem_shrinker = {
+ .shrink = ashmem_shrink,
+ .seeks = DEFAULT_SEEKS * 4,
+};
+
+static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* the user can only remove, not add, protection bits */
+ if (unlikely((asma->prot_mask & prot) != prot)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* does the application expect PROT_READ to imply PROT_EXEC? */
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ asma->prot_mask = prot;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static int set_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* cannot change an existing mapping's name */
+ if (unlikely(asma->file)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
+ name, ASHMEM_NAME_LEN)))
+ ret = -EFAULT;
+ asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
+
+out:
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static int get_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
+ size_t len;
+
+ /*
+ * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
+ * prevents us from revealing one user's stack to another.
+ */
+ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
+ if (unlikely(copy_to_user(name,
+ asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
+ ret = -EFAULT;
+ } else {
+ if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
+ sizeof(ASHMEM_NAME_DEF))))
+ ret = -EFAULT;
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+/*
+ * ashmem_pin - pin the given ashmem region, returning whether it was
+ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ int ret = ASHMEM_NOT_PURGED;
+
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* moved past last applicable page; we can short circuit */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to pin pages that span multiple ranges,
+ * or to pin pages that aren't even unpinned, so this is messy.
+ *
+ * Four cases:
+ * 1. The requested range subsumes an existing range, so we
+ * just remove the entire matching range.
+ * 2. The requested range overlaps the start of an existing
+ * range, so we just update that range.
+ * 3. The requested range overlaps the end of an existing
+ * range, so we just update that range.
+ * 4. The requested range punches a hole in an existing range,
+ * so we have to update one side of the range and then
+ * create a new range for the other side.
+ */
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret |= range->purged;
+
+ /* Case #1: Easy. Just nuke the whole thing. */
+ if (page_range_subsumes_range(range, pgstart, pgend)) {
+ range_del(range);
+ continue;
+ }
+
+ /* Case #2: We overlap from the start, so adjust it */
+ if (range->pgstart >= pgstart) {
+ range_shrink(range, pgend + 1, range->pgend);
+ continue;
+ }
+
+ /* Case #3: We overlap from the rear, so adjust it */
+ if (range->pgend <= pgend) {
+ range_shrink(range, range->pgstart, pgstart-1);
+ continue;
+ }
+
+ /*
+ * Case #4: We eat a chunk out of the middle. A bit
+ * more complicated, we allocate a new range for the
+ * second half and adjust the first chunk's endpoint.
+ */
+ range_alloc(asma, range, range->purged,
+ pgend + 1, range->pgend);
+ range_shrink(range, range->pgstart, pgstart - 1);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * ashmem_unpin - unpin the given range of pages. Returns zero on success.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+{
+ struct ashmem_range *range, *next;
+ unsigned int purged = ASHMEM_NOT_PURGED;
+
+restart:
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* short circuit: this is our insertion point */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to unpin pages that are already entirely
+ * or partially pinned. We handle those two cases here.
+ */
+ if (page_range_subsumed_by_range(range, pgstart, pgend))
+ return 0;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ pgstart = min_t(size_t, range->pgstart, pgstart),
+ pgend = max_t(size_t, range->pgend, pgend);
+ purged |= range->purged;
+ range_del(range);
+ goto restart;
+ }
+ }
+
+ return range_alloc(asma, range, purged, pgstart, pgend);
+}
+
+/*
+ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
+ * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
+ size_t pgend)
+{
+ struct ashmem_range *range;
+ int ret = ASHMEM_IS_PINNED;
+
+ list_for_each_entry(range, &asma->unpinned_list, unpinned) {
+ if (range_before_page(range, pgstart))
+ break;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret = ASHMEM_IS_UNPINNED;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ void __user *p)
+{
+ struct ashmem_pin pin;
+ size_t pgstart, pgend;
+ int ret = -EINVAL;
+
+ if (unlikely(!asma->file))
+ return -EINVAL;
+
+ if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
+ return -EFAULT;
+
+ /* per custom, you can pass zero for len to mean "everything onward" */
+ if (!pin.len)
+ pin.len = PAGE_ALIGN(asma->size) - pin.offset;
+
+ if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
+ return -EINVAL;
+
+ if (unlikely(((__u32) -1) - pin.offset < pin.len))
+ return -EINVAL;
+
+ if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
+ return -EINVAL;
+
+ pgstart = pin.offset / PAGE_SIZE;
+ pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
+
+ mutex_lock(&ashmem_mutex);
+
+ switch (cmd) {
+ case ASHMEM_PIN:
+ ret = ashmem_pin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_UNPIN:
+ ret = ashmem_unpin(asma, pgstart, pgend);
+ break;
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_get_pin_status(asma, pgstart, pgend);
+ break;
+ }
+
+ mutex_unlock(&ashmem_mutex);
+
+ return ret;
+}
+
+static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ashmem_area *asma = file->private_data;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case ASHMEM_SET_NAME:
+ ret = set_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_GET_NAME:
+ ret = get_name(asma, (void __user *) arg);
+ break;
+ case ASHMEM_SET_SIZE:
+ ret = -EINVAL;
+ if (!asma->file) {
+ ret = 0;
+ asma->size = (size_t) arg;
+ }
+ break;
+ case ASHMEM_GET_SIZE:
+ ret = asma->size;
+ break;
+ case ASHMEM_SET_PROT_MASK:
+ ret = set_prot_mask(asma, arg);
+ break;
+ case ASHMEM_GET_PROT_MASK:
+ ret = asma->prot_mask;
+ break;
+ case ASHMEM_PIN:
+ case ASHMEM_UNPIN:
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
+ break;
+ case ASHMEM_PURGE_ALL_CACHES:
+ ret = -EPERM;
+ if (capable(CAP_SYS_ADMIN)) {
+ struct shrink_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .nr_to_scan = 0,
+ };
+ ret = ashmem_shrink(&ashmem_shrinker, &sc);
+ sc.nr_to_scan = ret;
+ ashmem_shrink(&ashmem_shrinker, &sc);
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static struct file_operations ashmem_fops = {
+ .owner = THIS_MODULE,
+ .open = ashmem_open,
+ .release = ashmem_release,
+ .read = ashmem_read,
+ .llseek = ashmem_llseek,
+ .mmap = ashmem_mmap,
+ .unlocked_ioctl = ashmem_ioctl,
+ .compat_ioctl = ashmem_ioctl,
+};
+
+static struct miscdevice ashmem_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ashmem",
+ .fops = &ashmem_fops,
+};
+
+static int __init ashmem_init(void)
+{
+ int ret;
+
+ ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
+ sizeof(struct ashmem_area),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_area_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
+ sizeof(struct ashmem_range),
+ 0, 0, NULL);
+ if (unlikely(!ashmem_range_cachep)) {
+ printk(KERN_ERR "ashmem: failed to create slab cache\n");
+ return -ENOMEM;
+ }
+
+ ret = misc_register(&ashmem_misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "ashmem: failed to register misc device!\n");
+ return ret;
+ }
+
+ register_shrinker(&ashmem_shrinker);
+
+ printk(KERN_INFO "ashmem: initialized\n");
+
+ return 0;
+}
+
+static void __exit ashmem_exit(void)
+{
+ int ret;
+
+ unregister_shrinker(&ashmem_shrinker);
+
+ ret = misc_deregister(&ashmem_misc);
+ if (unlikely(ret))
+ printk(KERN_ERR "ashmem: failed to unregister misc device!\n");
+
+ kmem_cache_destroy(ashmem_range_cachep);
+ kmem_cache_destroy(ashmem_area_cachep);
+
+ printk(KERN_INFO "ashmem: unloaded\n");
+}
+
+module_init(ashmem_init);
+module_exit(ashmem_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
new file mode 100644
index 0000000..1976b10
--- /dev/null
+++ b/drivers/staging/android/ashmem.h
@@ -0,0 +1,48 @@
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ *
+ * This file is dual licensed. It may be redistributed and/or modified
+ * under the terms of the Apache 2.0 License OR version 2 of the GNU
+ * General Public License.
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+#define ASHMEM_SET_NAME _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN])
+#define ASHMEM_GET_NAME _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN])
+#define ASHMEM_SET_SIZE _IOW(__ASHMEMIOC, 3, size_t)
+#define ASHMEM_GET_SIZE _IO(__ASHMEMIOC, 4)
+#define ASHMEM_SET_PROT_MASK _IOW(__ASHMEMIOC, 5, unsigned long)
+#define ASHMEM_GET_PROT_MASK _IO(__ASHMEMIOC, 6)
+#define ASHMEM_PIN _IOW(__ASHMEMIOC, 7, struct ashmem_pin)
+#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
+#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
+#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
+
+#endif /* _LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
new file mode 100644
index 0000000..f0b7e66
--- /dev/null
+++ b/drivers/staging/android/binder.c
@@ -0,0 +1,3609 @@
+/* binder.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/nsproxy.h>
+#include <linux/poll.h>
+#include <linux/debugfs.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+
+#include "binder.h"
+
+static DEFINE_MUTEX(binder_lock);
+static DEFINE_MUTEX(binder_deferred_lock);
+static DEFINE_MUTEX(binder_mmap_lock);
+
+static HLIST_HEAD(binder_procs);
+static HLIST_HEAD(binder_deferred_list);
+static HLIST_HEAD(binder_dead_nodes);
+
+static struct dentry *binder_debugfs_dir_entry_root;
+static struct dentry *binder_debugfs_dir_entry_proc;
+static struct binder_node *binder_context_mgr_node;
+static uid_t binder_context_mgr_uid = -1;
+static int binder_last_id;
+static struct workqueue_struct *binder_deferred_workqueue;
+
+#define BINDER_DEBUG_ENTRY(name) \
+static int binder_##name##_open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, binder_##name##_show, inode->i_private); \
+} \
+\
+static const struct file_operations binder_##name##_fops = { \
+ .owner = THIS_MODULE, \
+ .open = binder_##name##_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused);
+BINDER_DEBUG_ENTRY(proc);
+
+/* This is only defined in include/asm-arm/sizes.h */
+#ifndef SZ_1K
+#define SZ_1K 0x400
+#endif
+
+#ifndef SZ_4M
+#define SZ_4M 0x400000
+#endif
+
+#define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
+
+#define BINDER_SMALL_BUF_SIZE (PAGE_SIZE * 64)
+
+enum {
+ BINDER_DEBUG_USER_ERROR = 1U << 0,
+ BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
+ BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
+ BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
+ BINDER_DEBUG_DEAD_BINDER = 1U << 4,
+ BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
+ BINDER_DEBUG_READ_WRITE = 1U << 6,
+ BINDER_DEBUG_USER_REFS = 1U << 7,
+ BINDER_DEBUG_THREADS = 1U << 8,
+ BINDER_DEBUG_TRANSACTION = 1U << 9,
+ BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
+ BINDER_DEBUG_FREE_BUFFER = 1U << 11,
+ BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
+ BINDER_DEBUG_BUFFER_ALLOC = 1U << 13,
+ BINDER_DEBUG_PRIORITY_CAP = 1U << 14,
+ BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15,
+};
+static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
+ BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
+module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
+
+static int binder_debug_no_lock;
+module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
+
+static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
+static int binder_stop_on_user_error;
+
+static int binder_set_stop_on_user_error(const char *val,
+ struct kernel_param *kp)
+{
+ int ret;
+ ret = param_set_int(val, kp);
+ if (binder_stop_on_user_error < 2)
+ wake_up(&binder_user_error_wait);
+ return ret;
+}
+module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
+ param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
+
+#define binder_debug(mask, x...) \
+ do { \
+ if (binder_debug_mask & mask) \
+ printk(KERN_INFO x); \
+ } while (0)
+
+#define binder_user_error(x...) \
+ do { \
+ if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
+ printk(KERN_INFO x); \
+ if (binder_stop_on_user_error) \
+ binder_stop_on_user_error = 2; \
+ } while (0)
+
+enum binder_stat_types {
+ BINDER_STAT_PROC,
+ BINDER_STAT_THREAD,
+ BINDER_STAT_NODE,
+ BINDER_STAT_REF,
+ BINDER_STAT_DEATH,
+ BINDER_STAT_TRANSACTION,
+ BINDER_STAT_TRANSACTION_COMPLETE,
+ BINDER_STAT_COUNT
+};
+
+struct binder_stats {
+ int br[_IOC_NR(BR_FAILED_REPLY) + 1];
+ int bc[_IOC_NR(BC_DEAD_BINDER_DONE) + 1];
+ int obj_created[BINDER_STAT_COUNT];
+ int obj_deleted[BINDER_STAT_COUNT];
+};
+
+static struct binder_stats binder_stats;
+
+static inline void binder_stats_deleted(enum binder_stat_types type)
+{
+ binder_stats.obj_deleted[type]++;
+}
+
+static inline void binder_stats_created(enum binder_stat_types type)
+{
+ binder_stats.obj_created[type]++;
+}
+
+struct binder_transaction_log_entry {
+ int debug_id;
+ int call_type;
+ int from_proc;
+ int from_thread;
+ int target_handle;
+ int to_proc;
+ int to_thread;
+ int to_node;
+ int data_size;
+ int offsets_size;
+};
+struct binder_transaction_log {
+ int next;
+ int full;
+ struct binder_transaction_log_entry entry[32];
+};
+static struct binder_transaction_log binder_transaction_log;
+static struct binder_transaction_log binder_transaction_log_failed;
+
+static struct binder_transaction_log_entry *binder_transaction_log_add(
+ struct binder_transaction_log *log)
+{
+ struct binder_transaction_log_entry *e;
+ e = &log->entry[log->next];
+ memset(e, 0, sizeof(*e));
+ log->next++;
+ if (log->next == ARRAY_SIZE(log->entry)) {
+ log->next = 0;
+ log->full = 1;
+ }
+ return e;
+}
+
+struct binder_work {
+ struct list_head entry;
+ enum {
+ BINDER_WORK_TRANSACTION = 1,
+ BINDER_WORK_TRANSACTION_COMPLETE,
+ BINDER_WORK_NODE,
+ BINDER_WORK_DEAD_BINDER,
+ BINDER_WORK_DEAD_BINDER_AND_CLEAR,
+ BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
+ } type;
+};
+
+struct binder_node {
+ int debug_id;
+ struct binder_work work;
+ union {
+ struct rb_node rb_node;
+ struct hlist_node dead_node;
+ };
+ struct binder_proc *proc;
+ struct hlist_head refs;
+ int internal_strong_refs;
+ int local_weak_refs;
+ int local_strong_refs;
+ void __user *ptr;
+ void __user *cookie;
+ unsigned has_strong_ref:1;
+ unsigned pending_strong_ref:1;
+ unsigned has_weak_ref:1;
+ unsigned pending_weak_ref:1;
+ unsigned has_async_transaction:1;
+ unsigned accept_fds:1;
+ unsigned min_priority:8;
+ struct list_head async_todo;
+};
+
+struct binder_ref_death {
+ struct binder_work work;
+ void __user *cookie;
+};
+
+struct binder_ref {
+ /* Lookups needed: */
+ /* node + proc => ref (transaction) */
+ /* desc + proc => ref (transaction, inc/dec ref) */
+ /* node => refs + procs (proc exit) */
+ int debug_id;
+ struct rb_node rb_node_desc;
+ struct rb_node rb_node_node;
+ struct hlist_node node_entry;
+ struct binder_proc *proc;
+ struct binder_node *node;
+ uint32_t desc;
+ int strong;
+ int weak;
+ struct binder_ref_death *death;
+};
+
+struct binder_buffer {
+ struct list_head entry; /* free and allocated entries by addesss */
+ struct rb_node rb_node; /* free entry by size or allocated entry */
+ /* by address */
+ unsigned free:1;
+ unsigned allow_user_free:1;
+ unsigned async_transaction:1;
+ unsigned debug_id:29;
+
+ struct binder_transaction *transaction;
+
+ struct binder_node *target_node;
+ size_t data_size;
+ size_t offsets_size;
+ uint8_t data[0];
+};
+
+enum binder_deferred_state {
+ BINDER_DEFERRED_PUT_FILES = 0x01,
+ BINDER_DEFERRED_FLUSH = 0x02,
+ BINDER_DEFERRED_RELEASE = 0x04,
+};
+
+struct binder_proc {
+ struct hlist_node proc_node;
+ struct rb_root threads;
+ struct rb_root nodes;
+ struct rb_root refs_by_desc;
+ struct rb_root refs_by_node;
+ int pid;
+ struct vm_area_struct *vma;
+ struct task_struct *tsk;
+ struct files_struct *files;
+ struct hlist_node deferred_work_node;
+ int deferred_work;
+ void *buffer;
+ ptrdiff_t user_buffer_offset;
+
+ struct list_head buffers;
+ struct rb_root free_buffers;
+ struct rb_root allocated_buffers;
+ size_t free_async_space;
+
+ struct page **pages;
+ size_t buffer_size;
+ uint32_t buffer_free;
+ struct list_head todo;
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+ struct list_head delivered_death;
+ int max_threads;
+ int requested_threads;
+ int requested_threads_started;
+ int ready_threads;
+ long default_priority;
+ struct dentry *debugfs_entry;
+};
+
+enum {
+ BINDER_LOOPER_STATE_REGISTERED = 0x01,
+ BINDER_LOOPER_STATE_ENTERED = 0x02,
+ BINDER_LOOPER_STATE_EXITED = 0x04,
+ BINDER_LOOPER_STATE_INVALID = 0x08,
+ BINDER_LOOPER_STATE_WAITING = 0x10,
+ BINDER_LOOPER_STATE_NEED_RETURN = 0x20
+};
+
+struct binder_thread {
+ struct binder_proc *proc;
+ struct rb_node rb_node;
+ int pid;
+ int looper;
+ struct binder_transaction *transaction_stack;
+ struct list_head todo;
+ uint32_t return_error; /* Write failed, return error code in read buf */
+ uint32_t return_error2; /* Write failed, return error code in read */
+ /* buffer. Used when sending a reply to a dead process that */
+ /* we are also waiting on */
+ wait_queue_head_t wait;
+ struct binder_stats stats;
+};
+
+struct binder_transaction {
+ int debug_id;
+ struct binder_work work;
+ struct binder_thread *from;
+ struct binder_transaction *from_parent;
+ struct binder_proc *to_proc;
+ struct binder_thread *to_thread;
+ struct binder_transaction *to_parent;
+ unsigned need_reply:1;
+ /* unsigned is_dead:1; */ /* not used at the moment */
+
+ struct binder_buffer *buffer;
+ unsigned int code;
+ unsigned int flags;
+ long priority;
+ long saved_priority;
+ uid_t sender_euid;
+};
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
+
+/*
+ * copied from get_unused_fd_flags
+ */
+int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
+{
+ struct files_struct *files = proc->files;
+ int fd, error;
+ struct fdtable *fdt;
+ unsigned long rlim_cur;
+ unsigned long irqs;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ error = -EMFILE;
+ spin_lock(&files->file_lock);
+
+repeat:
+ fdt = files_fdtable(files);
+ fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
+ files->next_fd);
+
+ /*
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
+ rlim_cur = 0;
+ if (lock_task_sighand(proc->tsk, &irqs)) {
+ rlim_cur = proc->tsk->signal->rlim[RLIMIT_NOFILE].rlim_cur;
+ unlock_task_sighand(proc->tsk, &irqs);
+ }
+ if (fd >= rlim_cur)
+ goto out;
+
+ /* Do we need to expand the fd array or fd set? */
+ error = expand_files(files, fd);
+ if (error < 0)
+ goto out;
+
+ if (error) {
+ /*
+ * If we needed to expand the fs array we
+ * might have blocked - try again.
+ */
+ error = -EMFILE;
+ goto repeat;
+ }
+
+ FD_SET(fd, fdt->open_fds);
+ if (flags & O_CLOEXEC)
+ FD_SET(fd, fdt->close_on_exec);
+ else
+ FD_CLR(fd, fdt->close_on_exec);
+ files->next_fd = fd + 1;
+#if 1
+ /* Sanity check */
+ if (fdt->fd[fd] != NULL) {
+ printk(KERN_WARNING "get_unused_fd: slot %d not NULL!\n", fd);
+ fdt->fd[fd] = NULL;
+ }
+#endif
+ error = fd;
+
+out:
+ spin_unlock(&files->file_lock);
+ return error;
+}
+
+/*
+ * copied from fd_install
+ */
+static void task_fd_install(
+ struct binder_proc *proc, unsigned int fd, struct file *file)
+{
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+
+ if (files == NULL)
+ return;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ BUG_ON(fdt->fd[fd] != NULL);
+ rcu_assign_pointer(fdt->fd[fd], file);
+ spin_unlock(&files->file_lock);
+}
+
+/*
+ * copied from __put_unused_fd in open.c
+ */
+static void __put_unused_fd(struct files_struct *files, unsigned int fd)
+{
+ struct fdtable *fdt = files_fdtable(files);
+ __FD_CLR(fd, fdt->open_fds);
+ if (fd < files->next_fd)
+ files->next_fd = fd;
+}
+
+/*
+ * copied from sys_close
+ */
+static long task_close_fd(struct binder_proc *proc, unsigned int fd)
+{
+ struct file *filp;
+ struct files_struct *files = proc->files;
+ struct fdtable *fdt;
+ int retval;
+
+ if (files == NULL)
+ return -ESRCH;
+
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ if (fd >= fdt->max_fds)
+ goto out_unlock;
+ filp = fdt->fd[fd];
+ if (!filp)
+ goto out_unlock;
+ rcu_assign_pointer(fdt->fd[fd], NULL);
+ FD_CLR(fd, fdt->close_on_exec);
+ __put_unused_fd(files, fd);
+ spin_unlock(&files->file_lock);
+ retval = filp_close(filp, files);
+
+ /* can't restart close syscall because file table entry was cleared */
+ if (unlikely(retval == -ERESTARTSYS ||
+ retval == -ERESTARTNOINTR ||
+ retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK))
+ retval = -EINTR;
+
+ return retval;
+
+out_unlock:
+ spin_unlock(&files->file_lock);
+ return -EBADF;
+}
+
+static void binder_set_nice(long nice)
+{
+ long min_nice;
+ if (can_nice(current, nice)) {
+ set_user_nice(current, nice);
+ return;
+ }
+ min_nice = 20 - current->signal->rlim[RLIMIT_NICE].rlim_cur;
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "binder: %d: nice value %ld not allowed use "
+ "%ld instead\n", current->pid, nice, min_nice);
+ set_user_nice(current, min_nice);
+ if (min_nice < 20)
+ return;
+ binder_user_error("binder: %d RLIMIT_NICE not set\n", current->pid);
+}
+
+static size_t binder_buffer_size(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ if (list_is_last(&buffer->entry, &proc->buffers))
+ return proc->buffer + proc->buffer_size - (void *)buffer->data;
+ else
+ return (size_t)list_entry(buffer->entry.next,
+ struct binder_buffer, entry) - (size_t)buffer->data;
+}
+
+static void binder_insert_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->free_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ size_t new_buffer_size;
+
+ BUG_ON(!new_buffer->free);
+
+ new_buffer_size = binder_buffer_size(proc, new_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: add free buffer, size %zd, "
+ "at %p\n", proc->pid, new_buffer_size, new_buffer);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (new_buffer_size < buffer_size)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->free_buffers);
+}
+
+static void binder_insert_allocated_buffer(struct binder_proc *proc,
+ struct binder_buffer *new_buffer)
+{
+ struct rb_node **p = &proc->allocated_buffers.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_buffer *buffer;
+
+ BUG_ON(new_buffer->free);
+
+ while (*p) {
+ parent = *p;
+ buffer = rb_entry(parent, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (new_buffer < buffer)
+ p = &parent->rb_left;
+ else if (new_buffer > buffer)
+ p = &parent->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_buffer->rb_node, parent, p);
+ rb_insert_color(&new_buffer->rb_node, &proc->allocated_buffers);
+}
+
+static struct binder_buffer *binder_buffer_lookup(struct binder_proc *proc,
+ void __user *user_ptr)
+{
+ struct rb_node *n = proc->allocated_buffers.rb_node;
+ struct binder_buffer *buffer;
+ struct binder_buffer *kern_ptr;
+
+ kern_ptr = user_ptr - proc->user_buffer_offset
+ - offsetof(struct binder_buffer, data);
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(buffer->free);
+
+ if (kern_ptr < buffer)
+ n = n->rb_left;
+ else if (kern_ptr > buffer)
+ n = n->rb_right;
+ else
+ return buffer;
+ }
+ return NULL;
+}
+
+static int binder_update_page_range(struct binder_proc *proc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ void *page_addr;
+ unsigned long user_page_addr;
+ struct vm_struct tmp_area;
+ struct page **page;
+ struct mm_struct *mm;
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: %s pages %p-%p\n", proc->pid,
+ allocate ? "allocate" : "free", start, end);
+
+ if (end <= start)
+ return 0;
+
+ if (vma)
+ mm = NULL;
+ else
+ mm = get_task_mm(proc->tsk);
+
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ vma = proc->vma;
+ if (vma && mm != vma->vm_mm) {
+ pr_err("binder: %d: vma mm and task mm mismatch\n",
+ proc->pid);
+ vma = NULL;
+ }
+ }
+
+ if (allocate == 0)
+ goto free_range;
+
+ if (vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed to "
+ "map pages in userspace, no vma\n", proc->pid);
+ goto err_no_vma;
+ }
+
+ for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
+ int ret;
+ struct page **page_array_ptr;
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+
+ BUG_ON(*page);
+ *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (*page == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "for page at %p\n", proc->pid, page_addr);
+ goto err_alloc_page_failed;
+ }
+ tmp_area.addr = page_addr;
+ tmp_area.size = PAGE_SIZE + PAGE_SIZE /* guard page? */;
+ page_array_ptr = page;
+ ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %p in kernel\n",
+ proc->pid, page_addr);
+ goto err_map_kernel_failed;
+ }
+ user_page_addr =
+ (uintptr_t)page_addr + proc->user_buffer_offset;
+ ret = vm_insert_page(vma, user_page_addr, page[0]);
+ if (ret) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf failed "
+ "to map page at %lx in userspace\n",
+ proc->pid, user_page_addr);
+ goto err_vm_insert_page_failed;
+ }
+ /* vm_insert_page does not seem to increment the refcount */
+ }
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return 0;
+
+free_range:
+ for (page_addr = end - PAGE_SIZE; page_addr >= start;
+ page_addr -= PAGE_SIZE) {
+ page = &proc->pages[(page_addr - proc->buffer) / PAGE_SIZE];
+ if (vma)
+ zap_page_range(vma, (uintptr_t)page_addr +
+ proc->user_buffer_offset, PAGE_SIZE, NULL);
+err_vm_insert_page_failed:
+ unmap_kernel_range((unsigned long)page_addr, PAGE_SIZE);
+err_map_kernel_failed:
+ __free_page(*page);
+ *page = NULL;
+err_alloc_page_failed:
+ ;
+ }
+err_no_vma:
+ if (mm) {
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+ return -ENOMEM;
+}
+
+static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
+ size_t data_size,
+ size_t offsets_size, int is_async)
+{
+ struct rb_node *n = proc->free_buffers.rb_node;
+ struct binder_buffer *buffer;
+ size_t buffer_size;
+ struct rb_node *best_fit = NULL;
+ void *has_page_addr;
+ void *end_page_addr;
+ size_t size;
+
+ if (proc->vma == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf, no vma\n",
+ proc->pid);
+ return NULL;
+ }
+
+ size = ALIGN(data_size, sizeof(void *)) +
+ ALIGN(offsets_size, sizeof(void *));
+
+ if (size < data_size || size < offsets_size) {
+ binder_user_error("binder: %d: got transaction with invalid "
+ "size %zd-%zd\n", proc->pid, data_size, offsets_size);
+ return NULL;
+ }
+
+ if (is_async &&
+ proc->free_async_space < size + sizeof(struct binder_buffer)) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd"
+ "failed, no async space left\n", proc->pid, size);
+ return NULL;
+ }
+
+ while (n) {
+ buffer = rb_entry(n, struct binder_buffer, rb_node);
+ BUG_ON(!buffer->free);
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ if (size < buffer_size) {
+ best_fit = n;
+ n = n->rb_left;
+ } else if (size > buffer_size)
+ n = n->rb_right;
+ else {
+ best_fit = n;
+ break;
+ }
+ }
+ if (best_fit == NULL) {
+ printk(KERN_ERR "binder: %d: binder_alloc_buf size %zd failed, "
+ "no address space\n", proc->pid, size);
+ return NULL;
+ }
+ if (n == NULL) {
+ buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
+ buffer_size = binder_buffer_size(proc, buffer);
+ }
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got buff"
+ "er %p size %zd\n", proc->pid, size, buffer, buffer_size);
+
+ has_page_addr =
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
+ if (n == NULL) {
+ if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
+ buffer_size = size; /* no room for other buffers */
+ else
+ buffer_size = size + sizeof(struct binder_buffer);
+ }
+ end_page_addr =
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ if (end_page_addr > has_page_addr)
+ end_page_addr = has_page_addr;
+ if (binder_update_page_range(proc, 1,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data), end_page_addr, NULL))
+ return NULL;
+
+ rb_erase(best_fit, &proc->free_buffers);
+ buffer->free = 0;
+ binder_insert_allocated_buffer(proc, buffer);
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer = (void *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(proc, new_buffer);
+ }
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_alloc_buf size %zd got "
+ "%p\n", proc->pid, size, buffer);
+ buffer->data_size = data_size;
+ buffer->offsets_size = offsets_size;
+ buffer->async_transaction = is_async;
+ if (is_async) {
+ proc->free_async_space -= size + sizeof(struct binder_buffer);
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_alloc_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ return buffer;
+}
+
+static void *buffer_start_page(struct binder_buffer *buffer)
+{
+ return (void *)((uintptr_t)buffer & PAGE_MASK);
+}
+
+static void *buffer_end_page(struct binder_buffer *buffer)
+{
+ return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+}
+
+static void binder_delete_free_buffer(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ struct binder_buffer *prev, *next = NULL;
+ int free_page_end = 1;
+ int free_page_start = 1;
+
+ BUG_ON(proc->buffers.next == &buffer->entry);
+ prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ BUG_ON(!prev->free);
+ if (buffer_end_page(prev) == buffer_start_page(buffer)) {
+ free_page_start = 0;
+ if (buffer_end_page(prev) == buffer_end_page(buffer))
+ free_page_end = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p "
+ "share page with %p\n", proc->pid, buffer, prev);
+ }
+
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (buffer_start_page(next) == buffer_end_page(buffer)) {
+ free_page_end = 0;
+ if (buffer_start_page(next) ==
+ buffer_start_page(buffer))
+ free_page_start = 0;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer"
+ " %p share page with %p\n", proc->pid,
+ buffer, prev);
+ }
+ }
+ list_del(&buffer->entry);
+ if (free_page_start || free_page_end) {
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: merge free, buffer %p do "
+ "not share page%s%s with with %p or %p\n",
+ proc->pid, buffer, free_page_start ? "" : " end",
+ free_page_end ? "" : " start", prev, next);
+ binder_update_page_range(proc, 0, free_page_start ?
+ buffer_start_page(buffer) : buffer_end_page(buffer),
+ (free_page_end ? buffer_end_page(buffer) :
+ buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ }
+}
+
+static void binder_free_buf(struct binder_proc *proc,
+ struct binder_buffer *buffer)
+{
+ size_t size, buffer_size;
+
+ buffer_size = binder_buffer_size(proc, buffer);
+
+ size = ALIGN(buffer->data_size, sizeof(void *)) +
+ ALIGN(buffer->offsets_size, sizeof(void *));
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder: %d: binder_free_buf %p size %zd buffer"
+ "_size %zd\n", proc->pid, buffer, size, buffer_size);
+
+ BUG_ON(buffer->free);
+ BUG_ON(size > buffer_size);
+ BUG_ON(buffer->transaction != NULL);
+ BUG_ON((void *)buffer < proc->buffer);
+ BUG_ON((void *)buffer > proc->buffer + proc->buffer_size);
+
+ if (buffer->async_transaction) {
+ proc->free_async_space += size + sizeof(struct binder_buffer);
+
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
+ "binder: %d: binder_free_buf size %zd "
+ "async free %zd\n", proc->pid, size,
+ proc->free_async_space);
+ }
+
+ binder_update_page_range(proc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ (void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK),
+ NULL);
+ rb_erase(&buffer->rb_node, &proc->allocated_buffers);
+ buffer->free = 1;
+ if (!list_is_last(&buffer->entry, &proc->buffers)) {
+ struct binder_buffer *next = list_entry(buffer->entry.next,
+ struct binder_buffer, entry);
+ if (next->free) {
+ rb_erase(&next->rb_node, &proc->free_buffers);
+ binder_delete_free_buffer(proc, next);
+ }
+ }
+ if (proc->buffers.next != &buffer->entry) {
+ struct binder_buffer *prev = list_entry(buffer->entry.prev,
+ struct binder_buffer, entry);
+ if (prev->free) {
+ binder_delete_free_buffer(proc, buffer);
+ rb_erase(&prev->rb_node, &proc->free_buffers);
+ buffer = prev;
+ }
+ }
+ binder_insert_free_buffer(proc, buffer);
+}
+
+static struct binder_node *binder_get_node(struct binder_proc *proc,
+ void __user *ptr)
+{
+ struct rb_node *n = proc->nodes.rb_node;
+ struct binder_node *node;
+
+ while (n) {
+ node = rb_entry(n, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ n = n->rb_left;
+ else if (ptr > node->ptr)
+ n = n->rb_right;
+ else
+ return node;
+ }
+ return NULL;
+}
+
+static struct binder_node *binder_new_node(struct binder_proc *proc,
+ void __user *ptr,
+ void __user *cookie)
+{
+ struct rb_node **p = &proc->nodes.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_node *node;
+
+ while (*p) {
+ parent = *p;
+ node = rb_entry(parent, struct binder_node, rb_node);
+
+ if (ptr < node->ptr)
+ p = &(*p)->rb_left;
+ else if (ptr > node->ptr)
+ p = &(*p)->rb_right;
+ else
+ return NULL;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (node == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_NODE);
+ rb_link_node(&node->rb_node, parent, p);
+ rb_insert_color(&node->rb_node, &proc->nodes);
+ node->debug_id = ++binder_last_id;
+ node->proc = proc;
+ node->ptr = ptr;
+ node->cookie = cookie;
+ node->work.type = BINDER_WORK_NODE;
+ INIT_LIST_HEAD(&node->work.entry);
+ INIT_LIST_HEAD(&node->async_todo);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p created\n",
+ proc->pid, current->pid, node->debug_id,
+ node->ptr, node->cookie);
+ return node;
+}
+
+static int binder_inc_node(struct binder_node *node, int strong, int internal,
+ struct list_head *target_list)
+{
+ if (strong) {
+ if (internal) {
+ if (target_list == NULL &&
+ node->internal_strong_refs == 0 &&
+ !(node == binder_context_mgr_node &&
+ node->has_strong_ref)) {
+ printk(KERN_ERR "binder: invalid inc strong "
+ "node for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ node->internal_strong_refs++;
+ } else
+ node->local_strong_refs++;
+ if (!node->has_strong_ref && target_list) {
+ list_del_init(&node->work.entry);
+ list_add_tail(&node->work.entry, target_list);
+ }
+ } else {
+ if (!internal)
+ node->local_weak_refs++;
+ if (!node->has_weak_ref && list_empty(&node->work.entry)) {
+ if (target_list == NULL) {
+ printk(KERN_ERR "binder: invalid inc weak node "
+ "for %d\n", node->debug_id);
+ return -EINVAL;
+ }
+ list_add_tail(&node->work.entry, target_list);
+ }
+ }
+ return 0;
+}
+
+static int binder_dec_node(struct binder_node *node, int strong, int internal)
+{
+ if (strong) {
+ if (internal)
+ node->internal_strong_refs--;
+ else
+ node->local_strong_refs--;
+ if (node->local_strong_refs || node->internal_strong_refs)
+ return 0;
+ } else {
+ if (!internal)
+ node->local_weak_refs--;
+ if (node->local_weak_refs || !hlist_empty(&node->refs))
+ return 0;
+ }
+ if (node->proc && (node->has_strong_ref || node->has_weak_ref)) {
+ if (list_empty(&node->work.entry)) {
+ list_add_tail(&node->work.entry, &node->proc->todo);
+ wake_up_interruptible(&node->proc->wait);
+ }
+ } else {
+ if (hlist_empty(&node->refs) && !node->local_strong_refs &&
+ !node->local_weak_refs) {
+ list_del_init(&node->work.entry);
+ if (node->proc) {
+ rb_erase(&node->rb_node, &node->proc->nodes);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: refless node %d deleted\n",
+ node->debug_id);
+ } else {
+ hlist_del(&node->dead_node);
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: dead node %d deleted\n",
+ node->debug_id);
+ }
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ }
+ }
+
+ return 0;
+}
+
+
+static struct binder_ref *binder_get_ref(struct binder_proc *proc,
+ uint32_t desc)
+{
+ struct rb_node *n = proc->refs_by_desc.rb_node;
+ struct binder_ref *ref;
+
+ while (n) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+
+ if (desc < ref->desc)
+ n = n->rb_left;
+ else if (desc > ref->desc)
+ n = n->rb_right;
+ else
+ return ref;
+ }
+ return NULL;
+}
+
+static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
+ struct binder_node *node)
+{
+ struct rb_node *n;
+ struct rb_node **p = &proc->refs_by_node.rb_node;
+ struct rb_node *parent = NULL;
+ struct binder_ref *ref, *new_ref;
+
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_node);
+
+ if (node < ref->node)
+ p = &(*p)->rb_left;
+ else if (node > ref->node)
+ p = &(*p)->rb_right;
+ else
+ return ref;
+ }
+ new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+ if (new_ref == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_REF);
+ new_ref->debug_id = ++binder_last_id;
+ new_ref->proc = proc;
+ new_ref->node = node;
+ rb_link_node(&new_ref->rb_node_node, parent, p);
+ rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
+
+ new_ref->desc = (node == binder_context_mgr_node) ? 0 : 1;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ ref = rb_entry(n, struct binder_ref, rb_node_desc);
+ if (ref->desc > new_ref->desc)
+ break;
+ new_ref->desc = ref->desc + 1;
+ }
+
+ p = &proc->refs_by_desc.rb_node;
+ while (*p) {
+ parent = *p;
+ ref = rb_entry(parent, struct binder_ref, rb_node_desc);
+
+ if (new_ref->desc < ref->desc)
+ p = &(*p)->rb_left;
+ else if (new_ref->desc > ref->desc)
+ p = &(*p)->rb_right;
+ else
+ BUG();
+ }
+ rb_link_node(&new_ref->rb_node_desc, parent, p);
+ rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
+ if (node) {
+ hlist_add_head(&new_ref->node_entry, &node->refs);
+
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "node %d\n", proc->pid, new_ref->debug_id,
+ new_ref->desc, node->debug_id);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d new ref %d desc %d for "
+ "dead node\n", proc->pid, new_ref->debug_id,
+ new_ref->desc);
+ }
+ return new_ref;
+}
+
+static void binder_delete_ref(struct binder_ref *ref)
+{
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d delete ref %d desc %d for "
+ "node %d\n", ref->proc->pid, ref->debug_id,
+ ref->desc, ref->node->debug_id);
+
+ rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
+ rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
+ if (ref->strong)
+ binder_dec_node(ref->node, 1, 1);
+ hlist_del(&ref->node_entry);
+ binder_dec_node(ref->node, 0, 1);
+ if (ref->death) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d delete ref %d desc %d "
+ "has death notification\n", ref->proc->pid,
+ ref->debug_id, ref->desc);
+ list_del(&ref->death->work.entry);
+ kfree(ref->death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ }
+ kfree(ref);
+ binder_stats_deleted(BINDER_STAT_REF);
+}
+
+static int binder_inc_ref(struct binder_ref *ref, int strong,
+ struct list_head *target_list)
+{
+ int ret;
+ if (strong) {
+ if (ref->strong == 0) {
+ ret = binder_inc_node(ref->node, 1, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->strong++;
+ } else {
+ if (ref->weak == 0) {
+ ret = binder_inc_node(ref->node, 0, 1, target_list);
+ if (ret)
+ return ret;
+ }
+ ref->weak++;
+ }
+ return 0;
+}
+
+
+static int binder_dec_ref(struct binder_ref *ref, int strong)
+{
+ if (strong) {
+ if (ref->strong == 0) {
+ binder_user_error("binder: %d invalid dec strong, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->strong--;
+ if (ref->strong == 0) {
+ int ret;
+ ret = binder_dec_node(ref->node, strong, 1);
+ if (ret)
+ return ret;
+ }
+ } else {
+ if (ref->weak == 0) {
+ binder_user_error("binder: %d invalid dec weak, "
+ "ref %d desc %d s %d w %d\n",
+ ref->proc->pid, ref->debug_id,
+ ref->desc, ref->strong, ref->weak);
+ return -EINVAL;
+ }
+ ref->weak--;
+ }
+ if (ref->strong == 0 && ref->weak == 0)
+ binder_delete_ref(ref);
+ return 0;
+}
+
+static void binder_pop_transaction(struct binder_thread *target_thread,
+ struct binder_transaction *t)
+{
+ if (target_thread) {
+ BUG_ON(target_thread->transaction_stack != t);
+ BUG_ON(target_thread->transaction_stack->from != target_thread);
+ target_thread->transaction_stack =
+ target_thread->transaction_stack->from_parent;
+ t->from = NULL;
+ }
+ t->need_reply = 0;
+ if (t->buffer)
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+}
+
+static void binder_send_failed_reply(struct binder_transaction *t,
+ uint32_t error_code)
+{
+ struct binder_thread *target_thread;
+ BUG_ON(t->flags & TF_ONE_WAY);
+ while (1) {
+ target_thread = t->from;
+ if (target_thread) {
+ if (target_thread->return_error != BR_OK &&
+ target_thread->return_error2 == BR_OK) {
+ target_thread->return_error2 =
+ target_thread->return_error;
+ target_thread->return_error = BR_OK;
+ }
+ if (target_thread->return_error == BR_OK) {
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply for "
+ "transaction %d to %d:%d\n",
+ t->debug_id, target_thread->proc->pid,
+ target_thread->pid);
+
+ binder_pop_transaction(target_thread, t);
+ target_thread->return_error = error_code;
+ wake_up_interruptible(&target_thread->wait);
+ } else {
+ printk(KERN_ERR "binder: reply failed, target "
+ "thread, %d:%d, has error code %d "
+ "already\n", target_thread->proc->pid,
+ target_thread->pid,
+ target_thread->return_error);
+ }
+ return;
+ } else {
+ struct binder_transaction *next = t->from_parent;
+
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: send failed reply "
+ "for transaction %d, target dead\n",
+ t->debug_id);
+
+ binder_pop_transaction(target_thread, t);
+ if (next == NULL) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed,"
+ " no target thread at root\n");
+ return;
+ }
+ t = next;
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: reply failed, no target "
+ "thread -- retry %d\n", t->debug_id);
+ }
+ }
+}
+
+static void binder_transaction_buffer_release(struct binder_proc *proc,
+ struct binder_buffer *buffer,
+ size_t *failed_at)
+{
+ size_t *offp, *off_end;
+ int debug_id = buffer->debug_id;
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
+ proc->pid, buffer->debug_id,
+ buffer->data_size, buffer->offsets_size, failed_at);
+
+ if (buffer->target_node)
+ binder_dec_node(buffer->target_node, 1, 0);
+
+ offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
+ if (failed_at)
+ off_end = failed_at;
+ else
+ off_end = (void *)offp + buffer->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > buffer->data_size - sizeof(*fp) ||
+ buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ printk(KERN_ERR "binder: transaction release %d bad"
+ "offset %zd, size %zd\n", debug_id,
+ *offp, buffer->data_size);
+ continue;
+ }
+ fp = (struct flat_binder_object *)(buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad node %p\n", debug_id, fp->binder);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p\n",
+ node->debug_id, node->ptr);
+ binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ printk(KERN_ERR "binder: transaction release %d"
+ " bad handle %ld\n", debug_id,
+ fp->handle);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, ref->node->debug_id);
+ binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
+ } break;
+
+ case BINDER_TYPE_FD:
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld\n", fp->handle);
+ if (failed_at)
+ task_close_fd(proc, fp->handle);
+ break;
+
+ default:
+ printk(KERN_ERR "binder: transaction release %d bad "
+ "object type %lx\n", debug_id, fp->type);
+ break;
+ }
+ }
+}
+
+static void binder_transaction(struct binder_proc *proc,
+ struct binder_thread *thread,
+ struct binder_transaction_data *tr, int reply)
+{
+ struct binder_transaction *t;
+ struct binder_work *tcomplete;
+ size_t *offp, *off_end;
+ struct binder_proc *target_proc;
+ struct binder_thread *target_thread = NULL;
+ struct binder_node *target_node = NULL;
+ struct list_head *target_list;
+ wait_queue_head_t *target_wait;
+ struct binder_transaction *in_reply_to = NULL;
+ struct binder_transaction_log_entry *e;
+ uint32_t return_error;
+
+ e = binder_transaction_log_add(&binder_transaction_log);
+ e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
+ e->from_proc = proc->pid;
+ e->from_thread = thread->pid;
+ e->target_handle = tr->target.handle;
+ e->data_size = tr->data_size;
+ e->offsets_size = tr->offsets_size;
+
+ if (reply) {
+ in_reply_to = thread->transaction_stack;
+ if (in_reply_to == NULL) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with no transaction stack\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_empty_call_stack;
+ }
+ binder_set_nice(in_reply_to->saved_priority);
+ if (in_reply_to->to_thread != thread) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad transaction stack,"
+ " transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, in_reply_to->debug_id,
+ in_reply_to->to_proc ?
+ in_reply_to->to_proc->pid : 0,
+ in_reply_to->to_thread ?
+ in_reply_to->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ goto err_bad_call_stack;
+ }
+ thread->transaction_stack = in_reply_to->to_parent;
+ target_thread = in_reply_to->from;
+ if (target_thread == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (target_thread->transaction_stack != in_reply_to) {
+ binder_user_error("binder: %d:%d got reply transaction "
+ "with bad target transaction stack %d, "
+ "expected %d\n",
+ proc->pid, thread->pid,
+ target_thread->transaction_stack ?
+ target_thread->transaction_stack->debug_id : 0,
+ in_reply_to->debug_id);
+ return_error = BR_FAILED_REPLY;
+ in_reply_to = NULL;
+ target_thread = NULL;
+ goto err_dead_binder;
+ }
+ target_proc = target_thread->proc;
+ } else {
+ if (tr->target.handle) {
+ struct binder_ref *ref;
+ ref = binder_get_ref(proc, tr->target.handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction to invalid handle\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_invalid_target_handle;
+ }
+ target_node = ref->node;
+ } else {
+ target_node = binder_context_mgr_node;
+ if (target_node == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_no_context_mgr_node;
+ }
+ }
+ e->to_node = target_node->debug_id;
+ target_proc = target_node->proc;
+ if (target_proc == NULL) {
+ return_error = BR_DEAD_REPLY;
+ goto err_dead_binder;
+ }
+ if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
+ struct binder_transaction *tmp;
+ tmp = thread->transaction_stack;
+ if (tmp->to_thread != thread) {
+ binder_user_error("binder: %d:%d got new "
+ "transaction with bad transaction stack"
+ ", transaction %d has target %d:%d\n",
+ proc->pid, thread->pid, tmp->debug_id,
+ tmp->to_proc ? tmp->to_proc->pid : 0,
+ tmp->to_thread ?
+ tmp->to_thread->pid : 0);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_call_stack;
+ }
+ while (tmp) {
+ if (tmp->from && tmp->from->proc == target_proc)
+ target_thread = tmp->from;
+ tmp = tmp->from_parent;
+ }
+ }
+ }
+ if (target_thread) {
+ e->to_thread = target_thread->pid;
+ target_list = &target_thread->todo;
+ target_wait = &target_thread->wait;
+ } else {
+ target_list = &target_proc->todo;
+ target_wait = &target_proc->wait;
+ }
+ e->to_proc = target_proc->pid;
+
+ /* TODO: reuse incoming transaction for reply */
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_t_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+
+ tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+ if (tcomplete == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_alloc_tcomplete_failed;
+ }
+ binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
+
+ t->debug_id = ++binder_last_id;
+ e->debug_id = t->debug_id;
+
+ if (reply)
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_REPLY %d -> %d:%d, "
+ "data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_thread->pid,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+ else
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d BC_TRANSACTION %d -> "
+ "%d - node %d, data %p-%p size %zd-%zd\n",
+ proc->pid, thread->pid, t->debug_id,
+ target_proc->pid, target_node->debug_id,
+ tr->data.ptr.buffer, tr->data.ptr.offsets,
+ tr->data_size, tr->offsets_size);
+
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+ else
+ t->from = NULL;
+ t->sender_euid = proc->tsk->cred->euid;
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->buffer = binder_alloc_buf(target_proc, tr->data_size,
+ tr->offsets_size, !reply && (t->flags & TF_ONE_WAY));
+ if (t->buffer == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_alloc_buf_failed;
+ }
+ t->buffer->allow_user_free = 0;
+ t->buffer->debug_id = t->debug_id;
+ t->buffer->transaction = t;
+ t->buffer->target_node = target_node;
+ if (target_node)
+ binder_inc_node(target_node, 1, 0, NULL);
+
+ offp = (size_t *)(t->buffer->data + ALIGN(tr->data_size, sizeof(void *)));
+
+ if (copy_from_user(t->buffer->data, tr->data.ptr.buffer, tr->data_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "data ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (copy_from_user(offp, tr->data.ptr.offsets, tr->offsets_size)) {
+ binder_user_error("binder: %d:%d got transaction with invalid "
+ "offsets ptr\n", proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ goto err_copy_data_failed;
+ }
+ if (!IS_ALIGNED(tr->offsets_size, sizeof(size_t))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offsets size, %zd\n",
+ proc->pid, thread->pid, tr->offsets_size);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ off_end = (void *)offp + tr->offsets_size;
+ for (; offp < off_end; offp++) {
+ struct flat_binder_object *fp;
+ if (*offp > t->buffer->data_size - sizeof(*fp) ||
+ t->buffer->data_size < sizeof(*fp) ||
+ !IS_ALIGNED(*offp, sizeof(void *))) {
+ binder_user_error("binder: %d:%d got transaction with "
+ "invalid offset, %zd\n",
+ proc->pid, thread->pid, *offp);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_offset;
+ }
+ fp = (struct flat_binder_object *)(t->buffer->data + *offp);
+ switch (fp->type) {
+ case BINDER_TYPE_BINDER:
+ case BINDER_TYPE_WEAK_BINDER: {
+ struct binder_ref *ref;
+ struct binder_node *node = binder_get_node(proc, fp->binder);
+ if (node == NULL) {
+ node = binder_new_node(proc, fp->binder, fp->cookie);
+ if (node == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_new_node_failed;
+ }
+ node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ }
+ if (fp->cookie != node->cookie) {
+ binder_user_error("binder: %d:%d sending u%p "
+ "node %d, cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ fp->binder, node->debug_id,
+ fp->cookie, node->cookie);
+ goto err_binder_get_ref_for_node_failed;
+ }
+ ref = binder_get_ref_for_node(target_proc, node);
+ if (ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ if (fp->type == BINDER_TYPE_BINDER)
+ fp->type = BINDER_TYPE_HANDLE;
+ else
+ fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->handle = ref->desc;
+ binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
+ &thread->todo);
+
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " node %d u%p -> ref %d desc %d\n",
+ node->debug_id, node->ptr, ref->debug_id,
+ ref->desc);
+ } break;
+ case BINDER_TYPE_HANDLE:
+ case BINDER_TYPE_WEAK_HANDLE: {
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d got "
+ "transaction with invalid "
+ "handle, %ld\n", proc->pid,
+ thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_failed;
+ }
+ if (ref->node->proc == target_proc) {
+ if (fp->type == BINDER_TYPE_HANDLE)
+ fp->type = BINDER_TYPE_BINDER;
+ else
+ fp->type = BINDER_TYPE_WEAK_BINDER;
+ fp->binder = ref->node->ptr;
+ fp->cookie = ref->node->cookie;
+ binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> node %d u%p\n",
+ ref->debug_id, ref->desc, ref->node->debug_id,
+ ref->node->ptr);
+ } else {
+ struct binder_ref *new_ref;
+ new_ref = binder_get_ref_for_node(target_proc, ref->node);
+ if (new_ref == NULL) {
+ return_error = BR_FAILED_REPLY;
+ goto err_binder_get_ref_for_node_failed;
+ }
+ fp->handle = new_ref->desc;
+ binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " ref %d desc %d -> ref %d desc %d (node %d)\n",
+ ref->debug_id, ref->desc, new_ref->debug_id,
+ new_ref->desc, ref->node->debug_id);
+ }
+ } break;
+
+ case BINDER_TYPE_FD: {
+ int target_fd;
+ struct file *file;
+
+ if (reply) {
+ if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
+ binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+ } else if (!target_node->accept_fds) {
+ binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fd_not_allowed;
+ }
+
+ file = fget(fp->handle);
+ if (file == NULL) {
+ binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
+ proc->pid, thread->pid, fp->handle);
+ return_error = BR_FAILED_REPLY;
+ goto err_fget_failed;
+ }
+ target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
+ if (target_fd < 0) {
+ fput(file);
+ return_error = BR_FAILED_REPLY;
+ goto err_get_unused_fd_failed;
+ }
+ task_fd_install(target_proc, target_fd, file);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ " fd %ld -> %d\n", fp->handle, target_fd);
+ /* TODO: fput? */
+ fp->handle = target_fd;
+ } break;
+
+ default:
+ binder_user_error("binder: %d:%d got transactio"
+ "n with invalid object type, %lx\n",
+ proc->pid, thread->pid, fp->type);
+ return_error = BR_FAILED_REPLY;
+ goto err_bad_object_type;
+ }
+ }
+ if (reply) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ binder_pop_transaction(target_thread, in_reply_to);
+ } else if (!(t->flags & TF_ONE_WAY)) {
+ BUG_ON(t->buffer->async_transaction != 0);
+ t->need_reply = 1;
+ t->from_parent = thread->transaction_stack;
+ thread->transaction_stack = t;
+ } else {
+ BUG_ON(target_node == NULL);
+ BUG_ON(t->buffer->async_transaction != 1);
+ if (target_node->has_async_transaction) {
+ target_list = &target_node->async_todo;
+ target_wait = NULL;
+ } else
+ target_node->has_async_transaction = 1;
+ }
+ t->work.type = BINDER_WORK_TRANSACTION;
+ list_add_tail(&t->work.entry, target_list);
+ tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
+ list_add_tail(&tcomplete->entry, &thread->todo);
+ if (target_wait)
+ wake_up_interruptible(target_wait);
+ return;
+
+err_get_unused_fd_failed:
+err_fget_failed:
+err_fd_not_allowed:
+err_binder_get_ref_for_node_failed:
+err_binder_get_ref_failed:
+err_binder_new_node_failed:
+err_bad_object_type:
+err_bad_offset:
+err_copy_data_failed:
+ binder_transaction_buffer_release(target_proc, t->buffer, offp);
+ t->buffer->transaction = NULL;
+ binder_free_buf(target_proc, t->buffer);
+err_binder_alloc_buf_failed:
+ kfree(tcomplete);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+err_alloc_tcomplete_failed:
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+err_bad_call_stack:
+err_empty_call_stack:
+err_dead_binder:
+err_invalid_target_handle:
+err_no_context_mgr_node:
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d transaction failed %d, size %zd-%zd\n",
+ proc->pid, thread->pid, return_error,
+ tr->data_size, tr->offsets_size);
+
+ {
+ struct binder_transaction_log_entry *fe;
+ fe = binder_transaction_log_add(&binder_transaction_log_failed);
+ *fe = *e;
+ }
+
+ BUG_ON(thread->return_error != BR_OK);
+ if (in_reply_to) {
+ thread->return_error = BR_TRANSACTION_COMPLETE;
+ binder_send_failed_reply(in_reply_to, return_error);
+ } else
+ thread->return_error = return_error;
+}
+
+int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
+ void __user *buffer, int size, signed long *consumed)
+{
+ uint32_t cmd;
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ while (ptr < end && thread->return_error == BR_OK) {
+ if (get_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
+ binder_stats.bc[_IOC_NR(cmd)]++;
+ proc->stats.bc[_IOC_NR(cmd)]++;
+ thread->stats.bc[_IOC_NR(cmd)]++;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ case BC_ACQUIRE:
+ case BC_RELEASE:
+ case BC_DECREFS: {
+ uint32_t target;
+ struct binder_ref *ref;
+ const char *debug_string;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (target == 0 && binder_context_mgr_node &&
+ (cmd == BC_INCREFS || cmd == BC_ACQUIRE)) {
+ ref = binder_get_ref_for_node(proc,
+ binder_context_mgr_node);
+ if (ref->desc != target) {
+ binder_user_error("binder: %d:"
+ "%d tried to acquire "
+ "reference to desc 0, "
+ "got %d instead\n",
+ proc->pid, thread->pid,
+ ref->desc);
+ }
+ } else
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d refcou"
+ "nt change on invalid ref %d\n",
+ proc->pid, thread->pid, target);
+ break;
+ }
+ switch (cmd) {
+ case BC_INCREFS:
+ debug_string = "IncRefs";
+ binder_inc_ref(ref, 0, NULL);
+ break;
+ case BC_ACQUIRE:
+ debug_string = "Acquire";
+ binder_inc_ref(ref, 1, NULL);
+ break;
+ case BC_RELEASE:
+ debug_string = "Release";
+ binder_dec_ref(ref, 1);
+ break;
+ case BC_DECREFS:
+ default:
+ debug_string = "DecRefs";
+ binder_dec_ref(ref, 0);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid, debug_string, ref->debug_id,
+ ref->desc, ref->strong, ref->weak, ref->node->debug_id);
+ break;
+ }
+ case BC_INCREFS_DONE:
+ case BC_ACQUIRE_DONE: {
+ void __user *node_ptr;
+ void *cookie;
+ struct binder_node *node;
+
+ if (get_user(node_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (get_user(cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ node = binder_get_node(proc, node_ptr);
+ if (node == NULL) {
+ binder_user_error("binder: %d:%d "
+ "%s u%p no match\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" :
+ "BC_ACQUIRE_DONE",
+ node_ptr);
+ break;
+ }
+ if (cookie != node->cookie) {
+ binder_user_error("binder: %d:%d %s u%p node %d"
+ " cookie mismatch %p != %p\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ?
+ "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node_ptr, node->debug_id,
+ cookie, node->cookie);
+ break;
+ }
+ if (cmd == BC_ACQUIRE_DONE) {
+ if (node->pending_strong_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_ACQUIRE_DONE node %d has "
+ "no pending acquire request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_strong_ref = 0;
+ } else {
+ if (node->pending_weak_ref == 0) {
+ binder_user_error("binder: %d:%d "
+ "BC_INCREFS_DONE node %d has "
+ "no pending increfs request\n",
+ proc->pid, thread->pid,
+ node->debug_id);
+ break;
+ }
+ node->pending_weak_ref = 0;
+ }
+ binder_dec_node(node, cmd == BC_ACQUIRE_DONE, 0);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s node %d ls %d lw %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
+ node->debug_id, node->local_strong_refs, node->local_weak_refs);
+ break;
+ }
+ case BC_ATTEMPT_ACQUIRE:
+ printk(KERN_ERR "binder: BC_ATTEMPT_ACQUIRE not supported\n");
+ return -EINVAL;
+ case BC_ACQUIRE_RESULT:
+ printk(KERN_ERR "binder: BC_ACQUIRE_RESULT not supported\n");
+ return -EINVAL;
+
+ case BC_FREE_BUFFER: {
+ void __user *data_ptr;
+ struct binder_buffer *buffer;
+
+ if (get_user(data_ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ buffer = binder_buffer_lookup(proc, data_ptr);
+ if (buffer == NULL) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p no match\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ if (!buffer->allow_user_free) {
+ binder_user_error("binder: %d:%d "
+ "BC_FREE_BUFFER u%p matched "
+ "unreturned buffer\n",
+ proc->pid, thread->pid, data_ptr);
+ break;
+ }
+ binder_debug(BINDER_DEBUG_FREE_BUFFER,
+ "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
+ proc->pid, thread->pid, data_ptr, buffer->debug_id,
+ buffer->transaction ? "active" : "finished");
+
+ if (buffer->transaction) {
+ buffer->transaction->buffer = NULL;
+ buffer->transaction = NULL;
+ }
+ if (buffer->async_transaction && buffer->target_node) {
+ BUG_ON(!buffer->target_node->has_async_transaction);
+ if (list_empty(&buffer->target_node->async_todo))
+ buffer->target_node->has_async_transaction = 0;
+ else
+ list_move_tail(buffer->target_node->async_todo.next, &thread->todo);
+ }
+ binder_transaction_buffer_release(proc, buffer, NULL);
+ binder_free_buf(proc, buffer);
+ break;
+ }
+
+ case BC_TRANSACTION:
+ case BC_REPLY: {
+ struct binder_transaction_data tr;
+
+ if (copy_from_user(&tr, ptr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+ binder_transaction(proc, thread, &tr, cmd == BC_REPLY);
+ break;
+ }
+
+ case BC_REGISTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "after BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ } else if (proc->requested_threads == 0) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_REGISTER_LOOPER called "
+ "without request\n",
+ proc->pid, thread->pid);
+ } else {
+ proc->requested_threads--;
+ proc->requested_threads_started++;
+ }
+ thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
+ break;
+ case BC_ENTER_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_ENTER_LOOPER\n",
+ proc->pid, thread->pid);
+ if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
+ thread->looper |= BINDER_LOOPER_STATE_INVALID;
+ binder_user_error("binder: %d:%d ERROR:"
+ " BC_ENTER_LOOPER called after "
+ "BC_REGISTER_LOOPER\n",
+ proc->pid, thread->pid);
+ }
+ thread->looper |= BINDER_LOOPER_STATE_ENTERED;
+ break;
+ case BC_EXIT_LOOPER:
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BC_EXIT_LOOPER\n",
+ proc->pid, thread->pid);
+ thread->looper |= BINDER_LOOPER_STATE_EXITED;
+ break;
+
+ case BC_REQUEST_DEATH_NOTIFICATION:
+ case BC_CLEAR_DEATH_NOTIFICATION: {
+ uint32_t target;
+ void __user *cookie;
+ struct binder_ref *ref;
+ struct binder_ref_death *death;
+
+ if (get_user(target, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ ref = binder_get_ref(proc, target);
+ if (ref == NULL) {
+ binder_user_error("binder: %d:%d %s "
+ "invalid ref %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ target);
+ break;
+ }
+
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
+ proc->pid, thread->pid,
+ cmd == BC_REQUEST_DEATH_NOTIFICATION ?
+ "BC_REQUEST_DEATH_NOTIFICATION" :
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ cookie, ref->debug_id, ref->desc,
+ ref->strong, ref->weak, ref->node->debug_id);
+
+ if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
+ if (ref->death) {
+ binder_user_error("binder: %d:%"
+ "d BC_REQUEST_DEATH_NOTI"
+ "FICATION death notific"
+ "ation already set\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = kzalloc(sizeof(*death), GFP_KERNEL);
+ if (death == NULL) {
+ thread->return_error = BR_ERROR;
+ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
+ "binder: %d:%d "
+ "BC_REQUEST_DEATH_NOTIFICATION failed\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ binder_stats_created(BINDER_STAT_DEATH);
+ INIT_LIST_HEAD(&death->work.entry);
+ death->cookie = cookie;
+ ref->death = death;
+ if (ref->node->proc == NULL) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&ref->death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&ref->death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } else {
+ if (ref->death == NULL) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion not active\n",
+ proc->pid, thread->pid);
+ break;
+ }
+ death = ref->death;
+ if (death->cookie != cookie) {
+ binder_user_error("binder: %d:%"
+ "d BC_CLEAR_DEATH_NOTIFI"
+ "CATION death notificat"
+ "ion cookie mismatch "
+ "%p != %p\n",
+ proc->pid, thread->pid,
+ death->cookie, cookie);
+ break;
+ }
+ ref->death = NULL;
+ if (list_empty(&death->work.entry)) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ } else {
+ BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
+ death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
+ }
+ }
+ } break;
+ case BC_DEAD_BINDER_DONE: {
+ struct binder_work *w;
+ void __user *cookie;
+ struct binder_ref_death *death = NULL;
+ if (get_user(cookie, (void __user * __user *)ptr))
+ return -EFAULT;
+
+ ptr += sizeof(void *);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
+ if (tmp_death->cookie == cookie) {
+ death = tmp_death;
+ break;
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: %d:%d BC_DEAD_BINDER_DONE %p found %p\n",
+ proc->pid, thread->pid, cookie, death);
+ if (death == NULL) {
+ binder_user_error("binder: %d:%d BC_DEAD"
+ "_BINDER_DONE %p not found\n",
+ proc->pid, thread->pid, cookie);
+ break;
+ }
+
+ list_del_init(&death->work.entry);
+ if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
+ death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
+ if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) {
+ list_add_tail(&death->work.entry, &thread->todo);
+ } else {
+ list_add_tail(&death->work.entry, &proc->todo);
+ wake_up_interruptible(&proc->wait);
+ }
+ }
+ } break;
+
+ default:
+ printk(KERN_ERR "binder: %d:%d unknown command %d\n",
+ proc->pid, thread->pid, cmd);
+ return -EINVAL;
+ }
+ *consumed = ptr - buffer;
+ }
+ return 0;
+}
+
+void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread,
+ uint32_t cmd)
+{
+ if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
+ binder_stats.br[_IOC_NR(cmd)]++;
+ proc->stats.br[_IOC_NR(cmd)]++;
+ thread->stats.br[_IOC_NR(cmd)]++;
+ }
+}
+
+static int binder_has_proc_work(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ return !list_empty(&proc->todo) ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_has_thread_work(struct binder_thread *thread)
+{
+ return !list_empty(&thread->todo) || thread->return_error != BR_OK ||
+ (thread->looper & BINDER_LOOPER_STATE_NEED_RETURN);
+}
+
+static int binder_thread_read(struct binder_proc *proc,
+ struct binder_thread *thread,
+ void __user *buffer, int size,
+ signed long *consumed, int non_block)
+{
+ void __user *ptr = buffer + *consumed;
+ void __user *end = buffer + size;
+
+ int ret = 0;
+ int wait_for_proc_work;
+
+ if (*consumed == 0) {
+ if (put_user(BR_NOOP, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ }
+
+retry:
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo);
+
+ if (thread->return_error != BR_OK && ptr < end) {
+ if (thread->return_error2 != BR_OK) {
+ if (put_user(thread->return_error2, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (ptr == end)
+ goto done;
+ thread->return_error2 = BR_OK;
+ }
+ if (put_user(thread->return_error, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ thread->return_error = BR_OK;
+ goto done;
+ }
+
+
+ thread->looper |= BINDER_LOOPER_STATE_WAITING;
+ if (wait_for_proc_work)
+ proc->ready_threads++;
+ mutex_unlock(&binder_lock);
+ if (wait_for_proc_work) {
+ if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED))) {
+ binder_user_error("binder: %d:%d ERROR: Thread waiting "
+ "for process work before calling BC_REGISTER_"
+ "LOOPER or BC_ENTER_LOOPER (state %x)\n",
+ proc->pid, thread->pid, thread->looper);
+ wait_event_interruptible(binder_user_error_wait,
+ binder_stop_on_user_error < 2);
+ }
+ binder_set_nice(proc->default_priority);
+ if (non_block) {
+ if (!binder_has_proc_work(proc, thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible_exclusive(proc->wait, binder_has_proc_work(proc, thread));
+ } else {
+ if (non_block) {
+ if (!binder_has_thread_work(thread))
+ ret = -EAGAIN;
+ } else
+ ret = wait_event_interruptible(thread->wait, binder_has_thread_work(thread));
+ }
+ mutex_lock(&binder_lock);
+ if (wait_for_proc_work)
+ proc->ready_threads--;
+ thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
+
+ if (ret)
+ return ret;
+
+ while (1) {
+ uint32_t cmd;
+ struct binder_transaction_data tr;
+ struct binder_work *w;
+ struct binder_transaction *t = NULL;
+
+ if (!list_empty(&thread->todo))
+ w = list_first_entry(&thread->todo, struct binder_work, entry);
+ else if (!list_empty(&proc->todo) && wait_for_proc_work)
+ w = list_first_entry(&proc->todo, struct binder_work, entry);
+ else {
+ if (ptr - buffer == 4 && !(thread->looper & BINDER_LOOPER_STATE_NEED_RETURN)) /* no data added */
+ goto retry;
+ break;
+ }
+
+ if (end - ptr < sizeof(tr) + 4)
+ break;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ t = container_of(w, struct binder_transaction, work);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ cmd = BR_TRANSACTION_COMPLETE;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
+ "binder: %d:%d BR_TRANSACTION_COMPLETE\n",
+ proc->pid, thread->pid);
+
+ list_del(&w->entry);
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ case BINDER_WORK_NODE: {
+ struct binder_node *node = container_of(w, struct binder_node, work);
+ uint32_t cmd = BR_NOOP;
+ const char *cmd_name;
+ int strong = node->internal_strong_refs || node->local_strong_refs;
+ int weak = !hlist_empty(&node->refs) || node->local_weak_refs || strong;
+ if (weak && !node->has_weak_ref) {
+ cmd = BR_INCREFS;
+ cmd_name = "BR_INCREFS";
+ node->has_weak_ref = 1;
+ node->pending_weak_ref = 1;
+ node->local_weak_refs++;
+ } else if (strong && !node->has_strong_ref) {
+ cmd = BR_ACQUIRE;
+ cmd_name = "BR_ACQUIRE";
+ node->has_strong_ref = 1;
+ node->pending_strong_ref = 1;
+ node->local_strong_refs++;
+ } else if (!strong && node->has_strong_ref) {
+ cmd = BR_RELEASE;
+ cmd_name = "BR_RELEASE";
+ node->has_strong_ref = 0;
+ } else if (!weak && node->has_weak_ref) {
+ cmd = BR_DECREFS;
+ cmd_name = "BR_DECREFS";
+ node->has_weak_ref = 0;
+ }
+ if (cmd != BR_NOOP) {
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(node->ptr, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ if (put_user(node->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_USER_REFS,
+ "binder: %d:%d %s %d u%p c%p\n",
+ proc->pid, thread->pid, cmd_name, node->debug_id, node->ptr, node->cookie);
+ } else {
+ list_del_init(&w->entry);
+ if (!weak && !strong) {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p deleted\n",
+ proc->pid, thread->pid, node->debug_id,
+ node->ptr, node->cookie);
+ rb_erase(&node->rb_node, &proc->nodes);
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ binder_debug(BINDER_DEBUG_INTERNAL_REFS,
+ "binder: %d:%d node %d u%p c%p state unchanged\n",
+ proc->pid, thread->pid, node->debug_id, node->ptr,
+ node->cookie);
+ }
+ }
+ } break;
+ case BINDER_WORK_DEAD_BINDER:
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
+ struct binder_ref_death *death;
+ uint32_t cmd;
+
+ death = container_of(w, struct binder_ref_death, work);
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
+ cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
+ else
+ cmd = BR_DEAD_BINDER;
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (put_user(death->cookie, (void * __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(void *);
+ binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
+ "binder: %d:%d %s %p\n",
+ proc->pid, thread->pid,
+ cmd == BR_DEAD_BINDER ?
+ "BR_DEAD_BINDER" :
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ death->cookie);
+
+ if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
+ list_del(&w->entry);
+ kfree(death);
+ binder_stats_deleted(BINDER_STAT_DEATH);
+ } else
+ list_move(&w->entry, &proc->delivered_death);
+ if (cmd == BR_DEAD_BINDER)
+ goto done; /* DEAD_BINDER notifications can cause transactions */
+ } break;
+ }
+
+ if (!t)
+ continue;
+
+ BUG_ON(t->buffer == NULL);
+ if (t->buffer->target_node) {
+ struct binder_node *target_node = t->buffer->target_node;
+ tr.target.ptr = target_node->ptr;
+ tr.cookie = target_node->cookie;
+ t->saved_priority = task_nice(current);
+ if (t->priority < target_node->min_priority &&
+ !(t->flags & TF_ONE_WAY))
+ binder_set_nice(t->priority);
+ else if (!(t->flags & TF_ONE_WAY) ||
+ t->saved_priority > target_node->min_priority)
+ binder_set_nice(target_node->min_priority);
+ cmd = BR_TRANSACTION;
+ } else {
+ tr.target.ptr = NULL;
+ tr.cookie = NULL;
+ cmd = BR_REPLY;
+ }
+ tr.code = t->code;
+ tr.flags = t->flags;
+ tr.sender_euid = t->sender_euid;
+
+ if (t->from) {
+ struct task_struct *sender = t->from->proc->tsk;
+ tr.sender_pid = task_tgid_nr_ns(sender,
+ current->nsproxy->pid_ns);
+ } else {
+ tr.sender_pid = 0;
+ }
+
+ tr.data_size = t->buffer->data_size;
+ tr.offsets_size = t->buffer->offsets_size;
+ tr.data.ptr.buffer = (void *)t->buffer->data +
+ proc->user_buffer_offset;
+ tr.data.ptr.offsets = tr.data.ptr.buffer +
+ ALIGN(t->buffer->data_size,
+ sizeof(void *));
+
+ if (put_user(cmd, (uint32_t __user *)ptr))
+ return -EFAULT;
+ ptr += sizeof(uint32_t);
+ if (copy_to_user(ptr, &tr, sizeof(tr)))
+ return -EFAULT;
+ ptr += sizeof(tr);
+
+ binder_stat_br(proc, thread, cmd);
+ binder_debug(BINDER_DEBUG_TRANSACTION,
+ "binder: %d:%d %s %d %d:%d, cmd %d"
+ "size %zd-%zd ptr %p-%p\n",
+ proc->pid, thread->pid,
+ (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
+ "BR_REPLY",
+ t->debug_id, t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0, cmd,
+ t->buffer->data_size, t->buffer->offsets_size,
+ tr.data.ptr.buffer, tr.data.ptr.offsets);
+
+ list_del(&t->work.entry);
+ t->buffer->allow_user_free = 1;
+ if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
+ t->to_parent = thread->transaction_stack;
+ t->to_thread = thread;
+ thread->transaction_stack = t;
+ } else {
+ t->buffer->transaction = NULL;
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+ }
+ break;
+ }
+
+done:
+
+ *consumed = ptr - buffer;
+ if (proc->requested_threads + proc->ready_threads == 0 &&
+ proc->requested_threads_started < proc->max_threads &&
+ (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
+ BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
+ /*spawn a new thread if we leave this out */) {
+ proc->requested_threads++;
+ binder_debug(BINDER_DEBUG_THREADS,
+ "binder: %d:%d BR_SPAWN_LOOPER\n",
+ proc->pid, thread->pid);
+ if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void binder_release_work(struct list_head *list)
+{
+ struct binder_work *w;
+ while (!list_empty(list)) {
+ w = list_first_entry(list, struct binder_work, entry);
+ list_del_init(&w->entry);
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION: {
+ struct binder_transaction *t;
+
+ t = container_of(w, struct binder_transaction, work);
+ if (t->buffer->target_node && !(t->flags & TF_ONE_WAY))
+ binder_send_failed_reply(t, BR_DEAD_REPLY);
+ } break;
+ case BINDER_WORK_TRANSACTION_COMPLETE: {
+ kfree(w);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
+ } break;
+ default:
+ break;
+ }
+ }
+
+}
+
+static struct binder_thread *binder_get_thread(struct binder_proc *proc)
+{
+ struct binder_thread *thread = NULL;
+ struct rb_node *parent = NULL;
+ struct rb_node **p = &proc->threads.rb_node;
+
+ while (*p) {
+ parent = *p;
+ thread = rb_entry(parent, struct binder_thread, rb_node);
+
+ if (current->pid < thread->pid)
+ p = &(*p)->rb_left;
+ else if (current->pid > thread->pid)
+ p = &(*p)->rb_right;
+ else
+ break;
+ }
+ if (*p == NULL) {
+ thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+ if (thread == NULL)
+ return NULL;
+ binder_stats_created(BINDER_STAT_THREAD);
+ thread->proc = proc;
+ thread->pid = current->pid;
+ init_waitqueue_head(&thread->wait);
+ INIT_LIST_HEAD(&thread->todo);
+ rb_link_node(&thread->rb_node, parent, p);
+ rb_insert_color(&thread->rb_node, &proc->threads);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ thread->return_error = BR_OK;
+ thread->return_error2 = BR_OK;
+ }
+ return thread;
+}
+
+static int binder_free_thread(struct binder_proc *proc,
+ struct binder_thread *thread)
+{
+ struct binder_transaction *t;
+ struct binder_transaction *send_reply = NULL;
+ int active_transactions = 0;
+
+ rb_erase(&thread->rb_node, &proc->threads);
+ t = thread->transaction_stack;
+ if (t && t->to_thread == thread)
+ send_reply = t;
+ while (t) {
+ active_transactions++;
+ binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
+ "binder: release %d:%d transaction %d "
+ "%s, still active\n", proc->pid, thread->pid,
+ t->debug_id,
+ (t->to_thread == thread) ? "in" : "out");
+
+ if (t->to_thread == thread) {
+ t->to_proc = NULL;
+ t->to_thread = NULL;
+ if (t->buffer) {
+ t->buffer->transaction = NULL;
+ t->buffer = NULL;
+ }
+ t = t->to_parent;
+ } else if (t->from == thread) {
+ t->from = NULL;
+ t = t->from_parent;
+ } else
+ BUG();
+ }
+ if (send_reply)
+ binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
+ binder_release_work(&thread->todo);
+ kfree(thread);
+ binder_stats_deleted(BINDER_STAT_THREAD);
+ return active_transactions;
+}
+
+static unsigned int binder_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread = NULL;
+ int wait_for_proc_work;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+
+ wait_for_proc_work = thread->transaction_stack == NULL &&
+ list_empty(&thread->todo) && thread->return_error == BR_OK;
+ mutex_unlock(&binder_lock);
+
+ if (wait_for_proc_work) {
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ poll_wait(filp, &proc->wait, wait);
+ if (binder_has_proc_work(proc, thread))
+ return POLLIN;
+ } else {
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ poll_wait(filp, &thread->wait, wait);
+ if (binder_has_thread_work(thread))
+ return POLLIN;
+ }
+ return 0;
+}
+
+static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ struct binder_proc *proc = filp->private_data;
+ struct binder_thread *thread;
+ unsigned int size = _IOC_SIZE(cmd);
+ void __user *ubuf = (void __user *)arg;
+
+ /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
+
+ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret)
+ return ret;
+
+ mutex_lock(&binder_lock);
+ thread = binder_get_thread(proc);
+ if (thread == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ switch (cmd) {
+ case BINDER_WRITE_READ: {
+ struct binder_write_read bwr;
+ if (size != sizeof(struct binder_write_read)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d write %ld at %08lx, read %ld at %08lx\n",
+ proc->pid, thread->pid, bwr.write_size, bwr.write_buffer,
+ bwr.read_size, bwr.read_buffer);
+
+ if (bwr.write_size > 0) {
+ ret = binder_thread_write(proc, thread, (void __user *)bwr.write_buffer, bwr.write_size, &bwr.write_consumed);
+ if (ret < 0) {
+ bwr.read_consumed = 0;
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ if (bwr.read_size > 0) {
+ ret = binder_thread_read(proc, thread, (void __user *)bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK);
+ if (!list_empty(&proc->todo))
+ wake_up_interruptible(&proc->wait);
+ if (ret < 0) {
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
+ ret = -EFAULT;
+ goto err;
+ }
+ }
+ binder_debug(BINDER_DEBUG_READ_WRITE,
+ "binder: %d:%d wrote %ld of %ld, read return %ld of %ld\n",
+ proc->pid, thread->pid, bwr.write_consumed, bwr.write_size,
+ bwr.read_consumed, bwr.read_size);
+ if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
+ case BINDER_SET_MAX_THREADS:
+ if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ case BINDER_SET_CONTEXT_MGR:
+ if (binder_context_mgr_node != NULL) {
+ printk(KERN_ERR "binder: BINDER_SET_CONTEXT_MGR already set\n");
+ ret = -EBUSY;
+ goto err;
+ }
+ if (binder_context_mgr_uid != -1) {
+ if (binder_context_mgr_uid != current->cred->euid) {
+ printk(KERN_ERR "binder: BINDER_SET_"
+ "CONTEXT_MGR bad uid %d != %d\n",
+ current->cred->euid,
+ binder_context_mgr_uid);
+ ret = -EPERM;
+ goto err;
+ }
+ } else
+ binder_context_mgr_uid = current->cred->euid;
+ binder_context_mgr_node = binder_new_node(proc, NULL, NULL);
+ if (binder_context_mgr_node == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ binder_context_mgr_node->local_weak_refs++;
+ binder_context_mgr_node->local_strong_refs++;
+ binder_context_mgr_node->has_strong_ref = 1;
+ binder_context_mgr_node->has_weak_ref = 1;
+ break;
+ case BINDER_THREAD_EXIT:
+ binder_debug(BINDER_DEBUG_THREADS, "binder: %d:%d exit\n",
+ proc->pid, thread->pid);
+ binder_free_thread(proc, thread);
+ thread = NULL;
+ break;
+ case BINDER_VERSION:
+ if (size != sizeof(struct binder_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &((struct binder_version *)ubuf)->protocol_version)) {
+ ret = -EINVAL;
+ goto err;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ goto err;
+ }
+ ret = 0;
+err:
+ if (thread)
+ thread->looper &= ~BINDER_LOOPER_STATE_NEED_RETURN;
+ mutex_unlock(&binder_lock);
+ wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
+ if (ret && ret != -ERESTARTSYS)
+ printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
+ return ret;
+}
+
+static void binder_vma_open(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+}
+
+static void binder_vma_close(struct vm_area_struct *vma)
+{
+ struct binder_proc *proc = vma->vm_private_data;
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder: %d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+ proc->vma = NULL;
+ binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
+}
+
+static struct vm_operations_struct binder_vm_ops = {
+ .open = binder_vma_open,
+ .close = binder_vma_close,
+};
+
+static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ int ret;
+ struct vm_struct *area;
+ struct binder_proc *proc = filp->private_data;
+ const char *failure_string;
+ struct binder_buffer *buffer;
+
+ if ((vma->vm_end - vma->vm_start) > SZ_4M)
+ vma->vm_end = vma->vm_start + SZ_4M;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_mmap: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
+ proc->pid, vma->vm_start, vma->vm_end,
+ (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
+ (unsigned long)pgprot_val(vma->vm_page_prot));
+
+ if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
+ ret = -EPERM;
+ failure_string = "bad vm_flags";
+ goto err_bad_arg;
+ }
+ vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
+
+ mutex_lock(&binder_mmap_lock);
+ if (proc->buffer) {
+ ret = -EBUSY;
+ failure_string = "already mapped";
+ goto err_already_mapped;
+ }
+
+ area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
+ if (area == NULL) {
+ ret = -ENOMEM;
+ failure_string = "get_vm_area";
+ goto err_get_vm_area_failed;
+ }
+ proc->buffer = area->addr;
+ proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
+ mutex_unlock(&binder_mmap_lock);
+
+#ifdef CONFIG_CPU_CACHE_VIPT
+ if (cache_is_vipt_aliasing()) {
+ while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
+ printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
+ vma->vm_start += PAGE_SIZE;
+ }
+ }
+#endif
+ proc->pages = kzalloc(sizeof(proc->pages[0]) * ((vma->vm_end - vma->vm_start) / PAGE_SIZE), GFP_KERNEL);
+ if (proc->pages == NULL) {
+ ret = -ENOMEM;
+ failure_string = "alloc page array";
+ goto err_alloc_pages_failed;
+ }
+ proc->buffer_size = vma->vm_end - vma->vm_start;
+
+ vma->vm_ops = &binder_vm_ops;
+ vma->vm_private_data = proc;
+
+ if (binder_update_page_range(proc, 1, proc->buffer, proc->buffer + PAGE_SIZE, vma)) {
+ ret = -ENOMEM;
+ failure_string = "alloc small buf";
+ goto err_alloc_small_buf_failed;
+ }
+ buffer = proc->buffer;
+ INIT_LIST_HEAD(&proc->buffers);
+ list_add(&buffer->entry, &proc->buffers);
+ buffer->free = 1;
+ binder_insert_free_buffer(proc, buffer);
+ proc->free_async_space = proc->buffer_size / 2;
+ barrier();
+ proc->files = get_files_struct(proc->tsk);
+ proc->vma = vma;
+
+ /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
+ proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
+ return 0;
+
+err_alloc_small_buf_failed:
+ kfree(proc->pages);
+ proc->pages = NULL;
+err_alloc_pages_failed:
+ mutex_lock(&binder_mmap_lock);
+ vfree(proc->buffer);
+ proc->buffer = NULL;
+err_get_vm_area_failed:
+err_already_mapped:
+ mutex_unlock(&binder_mmap_lock);
+err_bad_arg:
+ printk(KERN_ERR "binder_mmap: %d %lx-%lx %s failed %d\n",
+ proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
+ return ret;
+}
+
+static int binder_open(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc;
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
+ current->group_leader->pid, current->pid);
+
+ proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+ if (proc == NULL)
+ return -ENOMEM;
+ get_task_struct(current);
+ proc->tsk = current;
+ INIT_LIST_HEAD(&proc->todo);
+ init_waitqueue_head(&proc->wait);
+ proc->default_priority = task_nice(current);
+ mutex_lock(&binder_lock);
+ binder_stats_created(BINDER_STAT_PROC);
+ hlist_add_head(&proc->proc_node, &binder_procs);
+ proc->pid = current->group_leader->pid;
+ INIT_LIST_HEAD(&proc->delivered_death);
+ filp->private_data = proc;
+ mutex_unlock(&binder_lock);
+
+ if (binder_debugfs_dir_entry_proc) {
+ char strbuf[11];
+ snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
+ proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
+ binder_debugfs_dir_entry_proc, proc, &binder_proc_fops);
+ }
+
+ return 0;
+}
+
+static int binder_flush(struct file *filp, fl_owner_t id)
+{
+ struct binder_proc *proc = filp->private_data;
+
+ binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
+
+ return 0;
+}
+
+static void binder_deferred_flush(struct binder_proc *proc)
+{
+ struct rb_node *n;
+ int wake_count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ thread->looper |= BINDER_LOOPER_STATE_NEED_RETURN;
+ if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
+ wake_up_interruptible(&thread->wait);
+ wake_count++;
+ }
+ }
+ wake_up_interruptible_all(&proc->wait);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_flush: %d woke %d threads\n", proc->pid,
+ wake_count);
+}
+
+static int binder_release(struct inode *nodp, struct file *filp)
+{
+ struct binder_proc *proc = filp->private_data;
+ debugfs_remove(proc->debugfs_entry);
+ binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
+
+ return 0;
+}
+
+static void binder_deferred_release(struct binder_proc *proc)
+{
+ struct hlist_node *pos;
+ struct binder_transaction *t;
+ struct rb_node *n;
+ int threads, nodes, incoming_refs, outgoing_refs, buffers, active_transactions, page_count;
+
+ BUG_ON(proc->vma);
+ BUG_ON(proc->files);
+
+ hlist_del(&proc->proc_node);
+ if (binder_context_mgr_node && binder_context_mgr_node->proc == proc) {
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder_release: %d context_mgr_node gone\n",
+ proc->pid);
+ binder_context_mgr_node = NULL;
+ }
+
+ threads = 0;
+ active_transactions = 0;
+ while ((n = rb_first(&proc->threads))) {
+ struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
+ threads++;
+ active_transactions += binder_free_thread(proc, thread);
+ }
+ nodes = 0;
+ incoming_refs = 0;
+ while ((n = rb_first(&proc->nodes))) {
+ struct binder_node *node = rb_entry(n, struct binder_node, rb_node);
+
+ nodes++;
+ rb_erase(&node->rb_node, &proc->nodes);
+ list_del_init(&node->work.entry);
+ if (hlist_empty(&node->refs)) {
+ kfree(node);
+ binder_stats_deleted(BINDER_STAT_NODE);
+ } else {
+ struct binder_ref *ref;
+ int death = 0;
+
+ node->proc = NULL;
+ node->local_strong_refs = 0;
+ node->local_weak_refs = 0;
+ hlist_add_head(&node->dead_node, &binder_dead_nodes);
+
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry) {
+ incoming_refs++;
+ if (ref->death) {
+ death++;
+ if (list_empty(&ref->death->work.entry)) {
+ ref->death->work.type = BINDER_WORK_DEAD_BINDER;
+ list_add_tail(&ref->death->work.entry, &ref->proc->todo);
+ wake_up_interruptible(&ref->proc->wait);
+ } else
+ BUG();
+ }
+ }
+ binder_debug(BINDER_DEBUG_DEAD_BINDER,
+ "binder: node %d now dead, "
+ "refs %d, death %d\n", node->debug_id,
+ incoming_refs, death);
+ }
+ }
+ outgoing_refs = 0;
+ while ((n = rb_first(&proc->refs_by_desc))) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ outgoing_refs++;
+ binder_delete_ref(ref);
+ }
+ binder_release_work(&proc->todo);
+ buffers = 0;
+
+ while ((n = rb_first(&proc->allocated_buffers))) {
+ struct binder_buffer *buffer = rb_entry(n, struct binder_buffer,
+ rb_node);
+ t = buffer->transaction;
+ if (t) {
+ t->buffer = NULL;
+ buffer->transaction = NULL;
+ printk(KERN_ERR "binder: release proc %d, "
+ "transaction %d, not freed\n",
+ proc->pid, t->debug_id);
+ /*BUG();*/
+ }
+ binder_free_buf(proc, buffer);
+ buffers++;
+ }
+
+ binder_stats_deleted(BINDER_STAT_PROC);
+
+ page_count = 0;
+ if (proc->pages) {
+ int i;
+ for (i = 0; i < proc->buffer_size / PAGE_SIZE; i++) {
+ if (proc->pages[i]) {
+ void *page_addr = proc->buffer + i * PAGE_SIZE;
+ binder_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "binder_release: %d: "
+ "page %d at %p not freed\n",
+ proc->pid, i,
+ page_addr);
+ unmap_kernel_range((unsigned long)page_addr,
+ PAGE_SIZE);
+ __free_page(proc->pages[i]);
+ page_count++;
+ }
+ }
+ kfree(proc->pages);
+ vfree(proc->buffer);
+ }
+
+ put_task_struct(proc->tsk);
+
+ binder_debug(BINDER_DEBUG_OPEN_CLOSE,
+ "binder_release: %d threads %d, nodes %d (ref %d), "
+ "refs %d, active transactions %d, buffers %d, "
+ "pages %d\n",
+ proc->pid, threads, nodes, incoming_refs, outgoing_refs,
+ active_transactions, buffers, page_count);
+
+ kfree(proc);
+}
+
+static void binder_deferred_func(struct work_struct *work)
+{
+ struct binder_proc *proc;
+ struct files_struct *files;
+
+ int defer;
+ do {
+ mutex_lock(&binder_lock);
+ mutex_lock(&binder_deferred_lock);
+ if (!hlist_empty(&binder_deferred_list)) {
+ proc = hlist_entry(binder_deferred_list.first,
+ struct binder_proc, deferred_work_node);
+ hlist_del_init(&proc->deferred_work_node);
+ defer = proc->deferred_work;
+ proc->deferred_work = 0;
+ } else {
+ proc = NULL;
+ defer = 0;
+ }
+ mutex_unlock(&binder_deferred_lock);
+
+ files = NULL;
+ if (defer & BINDER_DEFERRED_PUT_FILES) {
+ files = proc->files;
+ if (files)
+ proc->files = NULL;
+ }
+
+ if (defer & BINDER_DEFERRED_FLUSH)
+ binder_deferred_flush(proc);
+
+ if (defer & BINDER_DEFERRED_RELEASE)
+ binder_deferred_release(proc); /* frees proc */
+
+ mutex_unlock(&binder_lock);
+ if (files)
+ put_files_struct(files);
+ } while (proc);
+}
+static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
+
+static void
+binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
+{
+ mutex_lock(&binder_deferred_lock);
+ proc->deferred_work |= defer;
+ if (hlist_unhashed(&proc->deferred_work_node)) {
+ hlist_add_head(&proc->deferred_work_node,
+ &binder_deferred_list);
+ queue_work(binder_deferred_workqueue, &binder_deferred_work);
+ }
+ mutex_unlock(&binder_deferred_lock);
+}
+
+static void print_binder_transaction(struct seq_file *m, const char *prefix,
+ struct binder_transaction *t)
+{
+ seq_printf(m,
+ "%s %d: %p from %d:%d to %d:%d code %x flags %x pri %ld r%d",
+ prefix, t->debug_id, t,
+ t->from ? t->from->proc->pid : 0,
+ t->from ? t->from->pid : 0,
+ t->to_proc ? t->to_proc->pid : 0,
+ t->to_thread ? t->to_thread->pid : 0,
+ t->code, t->flags, t->priority, t->need_reply);
+ if (t->buffer == NULL) {
+ seq_puts(m, " buffer free\n");
+ return;
+ }
+ if (t->buffer->target_node)
+ seq_printf(m, " node %d",
+ t->buffer->target_node->debug_id);
+ seq_printf(m, " size %zd:%zd data %p\n",
+ t->buffer->data_size, t->buffer->offsets_size,
+ t->buffer->data);
+}
+
+static void print_binder_buffer(struct seq_file *m, const char *prefix,
+ struct binder_buffer *buffer)
+{
+ seq_printf(m, "%s %d: %p size %zd:%zd %s\n",
+ prefix, buffer->debug_id, buffer->data,
+ buffer->data_size, buffer->offsets_size,
+ buffer->transaction ? "active" : "delivered");
+}
+
+static void print_binder_work(struct seq_file *m, const char *prefix,
+ const char *transaction_prefix,
+ struct binder_work *w)
+{
+ struct binder_node *node;
+ struct binder_transaction *t;
+
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ t = container_of(w, struct binder_transaction, work);
+ print_binder_transaction(m, transaction_prefix, t);
+ break;
+ case BINDER_WORK_TRANSACTION_COMPLETE:
+ seq_printf(m, "%stransaction complete\n", prefix);
+ break;
+ case BINDER_WORK_NODE:
+ node = container_of(w, struct binder_node, work);
+ seq_printf(m, "%snode work %d: u%p c%p\n",
+ prefix, node->debug_id, node->ptr, node->cookie);
+ break;
+ case BINDER_WORK_DEAD_BINDER:
+ seq_printf(m, "%shas dead binder\n", prefix);
+ break;
+ case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
+ seq_printf(m, "%shas cleared dead binder\n", prefix);
+ break;
+ case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
+ seq_printf(m, "%shas cleared death notification\n", prefix);
+ break;
+ default:
+ seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
+ break;
+ }
+}
+
+static void print_binder_thread(struct seq_file *m,
+ struct binder_thread *thread,
+ int print_always)
+{
+ struct binder_transaction *t;
+ struct binder_work *w;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, " thread %d: l %02x\n", thread->pid, thread->looper);
+ header_pos = m->count;
+ t = thread->transaction_stack;
+ while (t) {
+ if (t->from == thread) {
+ print_binder_transaction(m,
+ " outgoing transaction", t);
+ t = t->from_parent;
+ } else if (t->to_thread == thread) {
+ print_binder_transaction(m,
+ " incoming transaction", t);
+ t = t->to_parent;
+ } else {
+ print_binder_transaction(m, " bad transaction", t);
+ t = NULL;
+ }
+ }
+ list_for_each_entry(w, &thread->todo, entry) {
+ print_binder_work(m, " ", " pending transaction", w);
+ }
+ if (!print_always && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static void print_binder_node(struct seq_file *m, struct binder_node *node)
+{
+ struct binder_ref *ref;
+ struct hlist_node *pos;
+ struct binder_work *w;
+ int count;
+
+ count = 0;
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ count++;
+
+ seq_printf(m, " node %d: u%p c%p hs %d hw %d ls %d lw %d is %d iw %d",
+ node->debug_id, node->ptr, node->cookie,
+ node->has_strong_ref, node->has_weak_ref,
+ node->local_strong_refs, node->local_weak_refs,
+ node->internal_strong_refs, count);
+ if (count) {
+ seq_puts(m, " proc");
+ hlist_for_each_entry(ref, pos, &node->refs, node_entry)
+ seq_printf(m, " %d", ref->proc->pid);
+ }
+ seq_puts(m, "\n");
+ list_for_each_entry(w, &node->async_todo, entry)
+ print_binder_work(m, " ",
+ " pending async transaction", w);
+}
+
+static void print_binder_ref(struct seq_file *m, struct binder_ref *ref)
+{
+ seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %p\n",
+ ref->debug_id, ref->desc, ref->node->proc ? "" : "dead ",
+ ref->node->debug_id, ref->strong, ref->weak, ref->death);
+}
+
+static void print_binder_proc(struct seq_file *m,
+ struct binder_proc *proc, int print_all)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ size_t start_pos = m->count;
+ size_t header_pos;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ header_pos = m->count;
+
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ print_binder_thread(m, rb_entry(n, struct binder_thread,
+ rb_node), print_all);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (print_all || node->has_async_transaction)
+ print_binder_node(m, node);
+ }
+ if (print_all) {
+ for (n = rb_first(&proc->refs_by_desc);
+ n != NULL;
+ n = rb_next(n))
+ print_binder_ref(m, rb_entry(n, struct binder_ref,
+ rb_node_desc));
+ }
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ print_binder_buffer(m, " buffer",
+ rb_entry(n, struct binder_buffer, rb_node));
+ list_for_each_entry(w, &proc->todo, entry)
+ print_binder_work(m, " ", " pending transaction", w);
+ list_for_each_entry(w, &proc->delivered_death, entry) {
+ seq_puts(m, " has delivered dead binder\n");
+ break;
+ }
+ if (!print_all && m->count == header_pos)
+ m->count = start_pos;
+}
+
+static const char *binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY"
+};
+
+static const char *binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE"
+};
+
+static const char *binder_objstat_strings[] = {
+ "proc",
+ "thread",
+ "node",
+ "ref",
+ "death",
+ "transaction",
+ "transaction_complete"
+};
+
+static void print_binder_stats(struct seq_file *m, const char *prefix,
+ struct binder_stats *stats)
+{
+ int i;
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
+ ARRAY_SIZE(binder_command_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
+ if (stats->bc[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_command_strings[i], stats->bc[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
+ ARRAY_SIZE(binder_return_strings));
+ for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
+ if (stats->br[i])
+ seq_printf(m, "%s%s: %d\n", prefix,
+ binder_return_strings[i], stats->br[i]);
+ }
+
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(binder_objstat_strings));
+ BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
+ ARRAY_SIZE(stats->obj_deleted));
+ for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
+ if (stats->obj_created[i] || stats->obj_deleted[i])
+ seq_printf(m, "%s%s: active %d total %d\n", prefix,
+ binder_objstat_strings[i],
+ stats->obj_created[i] - stats->obj_deleted[i],
+ stats->obj_created[i]);
+ }
+}
+
+static void print_binder_proc_stats(struct seq_file *m,
+ struct binder_proc *proc)
+{
+ struct binder_work *w;
+ struct rb_node *n;
+ int count, strong, weak;
+
+ seq_printf(m, "proc %d\n", proc->pid);
+ count = 0;
+ for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " threads: %d\n", count);
+ seq_printf(m, " requested threads: %d+%d/%d\n"
+ " ready threads %d\n"
+ " free async space %zd\n", proc->requested_threads,
+ proc->requested_threads_started, proc->max_threads,
+ proc->ready_threads, proc->free_async_space);
+ count = 0;
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " nodes: %d\n", count);
+ count = 0;
+ strong = 0;
+ weak = 0;
+ for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
+ struct binder_ref *ref = rb_entry(n, struct binder_ref,
+ rb_node_desc);
+ count++;
+ strong += ref->strong;
+ weak += ref->weak;
+ }
+ seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
+
+ count = 0;
+ for (n = rb_first(&proc->allocated_buffers); n != NULL; n = rb_next(n))
+ count++;
+ seq_printf(m, " buffers: %d\n", count);
+
+ count = 0;
+ list_for_each_entry(w, &proc->todo, entry) {
+ switch (w->type) {
+ case BINDER_WORK_TRANSACTION:
+ count++;
+ break;
+ default:
+ break;
+ }
+ }
+ seq_printf(m, " pending transactions: %d\n", count);
+
+ print_binder_stats(m, " ", &proc->stats);
+}
+
+
+static int binder_state_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ struct binder_node *node;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder state:\n");
+
+ if (!hlist_empty(&binder_dead_nodes))
+ seq_puts(m, "dead nodes:\n");
+ hlist_for_each_entry(node, pos, &binder_dead_nodes, dead_node)
+ print_binder_node(m, node);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_stats_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder stats:\n");
+
+ print_binder_stats(m, "", &binder_stats);
+
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc_stats(m, proc);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_transactions_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc;
+ struct hlist_node *pos;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+
+ seq_puts(m, "binder transactions:\n");
+ hlist_for_each_entry(proc, pos, &binder_procs, proc_node)
+ print_binder_proc(m, proc, 0);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static int binder_proc_show(struct seq_file *m, void *unused)
+{
+ struct binder_proc *proc = m->private;
+ int do_lock = !binder_debug_no_lock;
+
+ if (do_lock)
+ mutex_lock(&binder_lock);
+ seq_puts(m, "binder proc state:\n");
+ print_binder_proc(m, proc, 1);
+ if (do_lock)
+ mutex_unlock(&binder_lock);
+ return 0;
+}
+
+static void print_binder_transaction_log_entry(struct seq_file *m,
+ struct binder_transaction_log_entry *e)
+{
+ seq_printf(m,
+ "%d: %s from %d:%d to %d:%d node %d handle %d size %d:%d\n",
+ e->debug_id, (e->call_type == 2) ? "reply" :
+ ((e->call_type == 1) ? "async" : "call "), e->from_proc,
+ e->from_thread, e->to_proc, e->to_thread, e->to_node,
+ e->target_handle, e->data_size, e->offsets_size);
+}
+
+static int binder_transaction_log_show(struct seq_file *m, void *unused)
+{
+ struct binder_transaction_log *log = m->private;
+ int i;
+
+ if (log->full) {
+ for (i = log->next; i < ARRAY_SIZE(log->entry); i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ }
+ for (i = 0; i < log->next; i++)
+ print_binder_transaction_log_entry(m, &log->entry[i]);
+ return 0;
+}
+
+static const struct file_operations binder_fops = {
+ .owner = THIS_MODULE,
+ .poll = binder_poll,
+ .unlocked_ioctl = binder_ioctl,
+ .mmap = binder_mmap,
+ .open = binder_open,
+ .flush = binder_flush,
+ .release = binder_release,
+};
+
+static struct miscdevice binder_miscdev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "binder",
+ .fops = &binder_fops
+};
+
+BINDER_DEBUG_ENTRY(state);
+BINDER_DEBUG_ENTRY(stats);
+BINDER_DEBUG_ENTRY(transactions);
+BINDER_DEBUG_ENTRY(transaction_log);
+
+static int __init binder_init(void)
+{
+ int ret;
+
+ binder_deferred_workqueue = create_singlethread_workqueue("binder");
+ if (!binder_deferred_workqueue)
+ return -ENOMEM;
+
+ binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
+ if (binder_debugfs_dir_entry_root)
+ binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
+ binder_debugfs_dir_entry_root);
+ ret = misc_register(&binder_miscdev);
+ if (binder_debugfs_dir_entry_root) {
+ debugfs_create_file("state",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_state_fops);
+ debugfs_create_file("stats",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_stats_fops);
+ debugfs_create_file("transactions",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ NULL,
+ &binder_transactions_fops);
+ debugfs_create_file("transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log,
+ &binder_transaction_log_fops);
+ debugfs_create_file("failed_transaction_log",
+ S_IRUGO,
+ binder_debugfs_dir_entry_root,
+ &binder_transaction_log_failed,
+ &binder_transaction_log_fops);
+ }
+ return ret;
+}
+
+device_initcall(binder_init);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/android/binder.h b/drivers/staging/android/binder.h
new file mode 100644
index 0000000..25ab6f2
--- /dev/null
+++ b/drivers/staging/android/binder.h
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Based on, but no longer compatible with, the original
+ * OpenBinder.org binder driver interface, which is:
+ *
+ * Copyright (c) 2005 Palmsource, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_BINDER_H
+#define _LINUX_BINDER_H
+
+#include <linux/ioctl.h>
+
+#define B_PACK_CHARS(c1, c2, c3, c4) \
+ ((((c1)<<24)) | (((c2)<<16)) | (((c3)<<8)) | (c4))
+#define B_TYPE_LARGE 0x85
+
+enum {
+ BINDER_TYPE_BINDER = B_PACK_CHARS('s', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_BINDER = B_PACK_CHARS('w', 'b', '*', B_TYPE_LARGE),
+ BINDER_TYPE_HANDLE = B_PACK_CHARS('s', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_WEAK_HANDLE = B_PACK_CHARS('w', 'h', '*', B_TYPE_LARGE),
+ BINDER_TYPE_FD = B_PACK_CHARS('f', 'd', '*', B_TYPE_LARGE),
+};
+
+enum {
+ FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
+};
+
+/*
+ * This is the flattened representation of a Binder object for transfer
+ * between processes. The 'offsets' supplied as part of a binder transaction
+ * contains offsets into the data where these structures occur. The Binder
+ * driver takes care of re-writing the structure type and data as it moves
+ * between processes.
+ */
+struct flat_binder_object {
+ /* 8 bytes for large_flat_header. */
+ unsigned long type;
+ unsigned long flags;
+
+ /* 8 bytes of data. */
+ union {
+ void *binder; /* local object */
+ signed long handle; /* remote object */
+ };
+
+ /* extra data associated with local object */
+ void *cookie;
+};
+
+/*
+ * On 64-bit platforms where user code may run in 32-bits the driver must
+ * translate the buffer (and local binder) addresses apropriately.
+ */
+
+struct binder_write_read {
+ signed long write_size; /* bytes to write */
+ signed long write_consumed; /* bytes consumed by driver */
+ unsigned long write_buffer;
+ signed long read_size; /* bytes to read */
+ signed long read_consumed; /* bytes consumed by driver */
+ unsigned long read_buffer;
+};
+
+/* Use with BINDER_VERSION, driver fills in fields. */
+struct binder_version {
+ /* driver protocol version -- increment with incompatible change */
+ signed long protocol_version;
+};
+
+/* This is the current protocol version. */
+#define BINDER_CURRENT_PROTOCOL_VERSION 7
+
+#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
+#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, int64_t)
+#define BINDER_SET_MAX_THREADS _IOW('b', 5, size_t)
+#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, int)
+#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, int)
+#define BINDER_THREAD_EXIT _IOW('b', 8, int)
+#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
+
+/*
+ * NOTE: Two special error codes you should check for when calling
+ * in to the driver are:
+ *
+ * EINTR -- The operation has been interupted. This should be
+ * handled by retrying the ioctl() until a different error code
+ * is returned.
+ *
+ * ECONNREFUSED -- The driver is no longer accepting operations
+ * from your process. That is, the process is being destroyed.
+ * You should handle this by exiting from your process. Note
+ * that once this error code is returned, all further calls to
+ * the driver from any thread will return this same code.
+ */
+
+enum transaction_flags {
+ TF_ONE_WAY = 0x01, /* this is a one-way call: async, no return */
+ TF_ROOT_OBJECT = 0x04, /* contents are the component's root object */
+ TF_STATUS_CODE = 0x08, /* contents are a 32-bit status code */
+ TF_ACCEPT_FDS = 0x10, /* allow replies with file descriptors */
+};
+
+struct binder_transaction_data {
+ /* The first two are only used for bcTRANSACTION and brTRANSACTION,
+ * identifying the target and contents of the transaction.
+ */
+ union {
+ size_t handle; /* target descriptor of command transaction */
+ void *ptr; /* target descriptor of return transaction */
+ } target;
+ void *cookie; /* target object cookie */
+ unsigned int code; /* transaction command */
+
+ /* General information about the transaction. */
+ unsigned int flags;
+ pid_t sender_pid;
+ uid_t sender_euid;
+ size_t data_size; /* number of bytes of data */
+ size_t offsets_size; /* number of bytes of offsets */
+
+ /* If this transaction is inline, the data immediately
+ * follows here; otherwise, it ends with a pointer to
+ * the data buffer.
+ */
+ union {
+ struct {
+ /* transaction data */
+ const void *buffer;
+ /* offsets from buffer to flat_binder_object structs */
+ const void *offsets;
+ } ptr;
+ uint8_t buf[8];
+ } data;
+};
+
+struct binder_ptr_cookie {
+ void *ptr;
+ void *cookie;
+};
+
+struct binder_pri_desc {
+ int priority;
+ int desc;
+};
+
+struct binder_pri_ptr_cookie {
+ int priority;
+ void *ptr;
+ void *cookie;
+};
+
+enum BinderDriverReturnProtocol {
+ BR_ERROR = _IOR('r', 0, int),
+ /*
+ * int: error code
+ */
+
+ BR_OK = _IO('r', 1),
+ /* No parameters! */
+
+ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data),
+ BR_REPLY = _IOR('r', 3, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the received command.
+ */
+
+ BR_ACQUIRE_RESULT = _IOR('r', 4, int),
+ /*
+ * not currently supported
+ * int: 0 if the last bcATTEMPT_ACQUIRE was not successful.
+ * Else the remote object has acquired a primary reference.
+ */
+
+ BR_DEAD_REPLY = _IO('r', 5),
+ /*
+ * The target of the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) is no longer with us. No parameters.
+ */
+
+ BR_TRANSACTION_COMPLETE = _IO('r', 6),
+ /*
+ * No parameters... always refers to the last transaction requested
+ * (including replies). Note that this will be sent even for
+ * asynchronous transactions.
+ */
+
+ BR_INCREFS = _IOR('r', 7, struct binder_ptr_cookie),
+ BR_ACQUIRE = _IOR('r', 8, struct binder_ptr_cookie),
+ BR_RELEASE = _IOR('r', 9, struct binder_ptr_cookie),
+ BR_DECREFS = _IOR('r', 10, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_ATTEMPT_ACQUIRE = _IOR('r', 11, struct binder_pri_ptr_cookie),
+ /*
+ * not currently supported
+ * int: priority
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BR_NOOP = _IO('r', 12),
+ /*
+ * No parameters. Do nothing and examine the next command. It exists
+ * primarily so that we can replace it with a BR_SPAWN_LOOPER command.
+ */
+
+ BR_SPAWN_LOOPER = _IO('r', 13),
+ /*
+ * No parameters. The driver has determined that a process has no
+ * threads waiting to service incomming transactions. When a process
+ * receives this command, it must spawn a new service thread and
+ * register it via bcENTER_LOOPER.
+ */
+
+ BR_FINISHED = _IO('r', 14),
+ /*
+ * not currently supported
+ * stop threadpool thread
+ */
+
+ BR_DEAD_BINDER = _IOR('r', 15, void *),
+ /*
+ * void *: cookie
+ */
+ BR_CLEAR_DEATH_NOTIFICATION_DONE = _IOR('r', 16, void *),
+ /*
+ * void *: cookie
+ */
+
+ BR_FAILED_REPLY = _IO('r', 17),
+ /*
+ * The the last transaction (either a bcTRANSACTION or
+ * a bcATTEMPT_ACQUIRE) failed (e.g. out of memory). No parameters.
+ */
+};
+
+enum BinderDriverCommandProtocol {
+ BC_TRANSACTION = _IOW('c', 0, struct binder_transaction_data),
+ BC_REPLY = _IOW('c', 1, struct binder_transaction_data),
+ /*
+ * binder_transaction_data: the sent command.
+ */
+
+ BC_ACQUIRE_RESULT = _IOW('c', 2, int),
+ /*
+ * not currently supported
+ * int: 0 if the last BR_ATTEMPT_ACQUIRE was not successful.
+ * Else you have acquired a primary reference on the object.
+ */
+
+ BC_FREE_BUFFER = _IOW('c', 3, int),
+ /*
+ * void *: ptr to transaction data received on a read
+ */
+
+ BC_INCREFS = _IOW('c', 4, int),
+ BC_ACQUIRE = _IOW('c', 5, int),
+ BC_RELEASE = _IOW('c', 6, int),
+ BC_DECREFS = _IOW('c', 7, int),
+ /*
+ * int: descriptor
+ */
+
+ BC_INCREFS_DONE = _IOW('c', 8, struct binder_ptr_cookie),
+ BC_ACQUIRE_DONE = _IOW('c', 9, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie for binder
+ */
+
+ BC_ATTEMPT_ACQUIRE = _IOW('c', 10, struct binder_pri_desc),
+ /*
+ * not currently supported
+ * int: priority
+ * int: descriptor
+ */
+
+ BC_REGISTER_LOOPER = _IO('c', 11),
+ /*
+ * No parameters.
+ * Register a spawned looper thread with the device.
+ */
+
+ BC_ENTER_LOOPER = _IO('c', 12),
+ BC_EXIT_LOOPER = _IO('c', 13),
+ /*
+ * No parameters.
+ * These two commands are sent as an application-level thread
+ * enters and exits the binder loop, respectively. They are
+ * used so the binder can have an accurate count of the number
+ * of looping threads it has available.
+ */
+
+ BC_REQUEST_DEATH_NOTIFICATION = _IOW('c', 14, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_CLEAR_DEATH_NOTIFICATION = _IOW('c', 15, struct binder_ptr_cookie),
+ /*
+ * void *: ptr to binder
+ * void *: cookie
+ */
+
+ BC_DEAD_BINDER_DONE = _IOW('c', 16, void *),
+ /*
+ * void *: cookie
+ */
+};
+
+#endif /* _LINUX_BINDER_H */
+
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
new file mode 100644
index 0000000..ffc2d04
--- /dev/null
+++ b/drivers/staging/android/logger.c
@@ -0,0 +1,616 @@
+/*
+ * drivers/misc/logger.c
+ *
+ * A Logging Subsystem
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include "logger.h"
+
+#include <asm/ioctls.h>
+
+/*
+ * struct logger_log - represents a specific log, such as 'main' or 'radio'
+ *
+ * This structure lives from module insertion until module removal, so it does
+ * not need additional reference counting. The structure is protected by the
+ * mutex 'mutex'.
+ */
+struct logger_log {
+ unsigned char *buffer;/* the ring buffer itself */
+ struct miscdevice misc; /* misc device representing the log */
+ wait_queue_head_t wq; /* wait queue for readers */
+ struct list_head readers; /* this log's readers */
+ struct mutex mutex; /* mutex protecting buffer */
+ size_t w_off; /* current write head offset */
+ size_t head; /* new readers start here */
+ size_t size; /* size of the log */
+};
+
+/*
+ * struct logger_reader - a logging device open for reading
+ *
+ * This object lives from open to release, so we don't need additional
+ * reference counting. The structure is protected by log->mutex.
+ */
+struct logger_reader {
+ struct logger_log *log; /* associated log */
+ struct list_head list; /* entry in logger_log's list */
+ size_t r_off; /* current read head offset */
+};
+
+/* logger_offset - returns index 'n' into the log via (optimized) modulus */
+#define logger_offset(n) ((n) & (log->size - 1))
+
+/*
+ * file_get_log - Given a file structure, return the associated log
+ *
+ * This isn't aesthetic. We have several goals:
+ *
+ * 1) Need to quickly obtain the associated log during an I/O operation
+ * 2) Readers need to maintain state (logger_reader)
+ * 3) Writers need to be very fast (open() should be a near no-op)
+ *
+ * In the reader case, we can trivially go file->logger_reader->logger_log.
+ * For a writer, we don't want to maintain a logger_reader, so we just go
+ * file->logger_log. Thus what file->private_data points at depends on whether
+ * or not the file was opened for reading. This function hides that dirtiness.
+ */
+static inline struct logger_log *file_get_log(struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ return reader->log;
+ } else
+ return file->private_data;
+}
+
+/*
+ * get_entry_len - Grabs the length of the payload of the next entry starting
+ * from 'off'.
+ *
+ * Caller needs to hold log->mutex.
+ */
+static __u32 get_entry_len(struct logger_log *log, size_t off)
+{
+ __u16 val;
+
+ switch (log->size - off) {
+ case 1:
+ memcpy(&val, log->buffer + off, 1);
+ memcpy(((char *) &val) + 1, log->buffer, 1);
+ break;
+ default:
+ memcpy(&val, log->buffer + off, 2);
+ }
+
+ return sizeof(struct logger_entry) + val;
+}
+
+/*
+ * do_read_log_to_user - reads exactly 'count' bytes from 'log' into the
+ * user-space buffer 'buf'. Returns 'count' on success.
+ *
+ * Caller must hold log->mutex.
+ */
+static ssize_t do_read_log_to_user(struct logger_log *log,
+ struct logger_reader *reader,
+ char __user *buf,
+ size_t count)
+{
+ size_t len;
+
+ /*
+ * We read from the log in two disjoint operations. First, we read from
+ * the current read head offset up to 'count' bytes or to the end of
+ * the log, whichever comes first.
+ */
+ len = min(count, log->size - reader->r_off);
+ if (copy_to_user(buf, log->buffer + reader->r_off, len))
+ return -EFAULT;
+
+ /*
+ * Second, we read any remaining bytes, starting back at the head of
+ * the log.
+ */
+ if (count != len)
+ if (copy_to_user(buf + len, log->buffer, count - len))
+ return -EFAULT;
+
+ reader->r_off = logger_offset(reader->r_off + count);
+
+ return count;
+}
+
+/*
+ * logger_read - our log's read() method
+ *
+ * Behavior:
+ *
+ * - O_NONBLOCK works
+ * - If there are no log entries to read, blocks until log is written to
+ * - Atomically reads exactly one log entry
+ *
+ * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
+ * buffer is insufficient to hold next entry.
+ */
+static ssize_t logger_read(struct file *file, char __user *buf,
+ size_t count, loff_t *pos)
+{
+ struct logger_reader *reader = file->private_data;
+ struct logger_log *log = reader->log;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+start:
+ while (1) {
+ prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
+
+ mutex_lock(&log->mutex);
+ ret = (log->w_off == reader->r_off);
+ mutex_unlock(&log->mutex);
+ if (!ret)
+ break;
+
+ if (file->f_flags & O_NONBLOCK) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
+
+ schedule();
+ }
+
+ finish_wait(&log->wq, &wait);
+ if (ret)
+ return ret;
+
+ mutex_lock(&log->mutex);
+
+ /* is there still something to read or did we race? */
+ if (unlikely(log->w_off == reader->r_off)) {
+ mutex_unlock(&log->mutex);
+ goto start;
+ }
+
+ /* get the size of the next entry */
+ ret = get_entry_len(log, reader->r_off);
+ if (count < ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* get exactly one entry from the log */
+ ret = do_read_log_to_user(log, reader, buf, ret);
+
+out:
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+/*
+ * get_next_entry - return the offset of the first valid entry at least 'len'
+ * bytes after 'off'.
+ *
+ * Caller must hold log->mutex.
+ */
+static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
+{
+ size_t count = 0;
+
+ do {
+ size_t nr = get_entry_len(log, off);
+ off = logger_offset(off + nr);
+ count += nr;
+ } while (count < len);
+
+ return off;
+}
+
+/*
+ * clock_interval - is a < c < b in mod-space? Put another way, does the line
+ * from a to b cross c?
+ */
+static inline int clock_interval(size_t a, size_t b, size_t c)
+{
+ if (b < a) {
+ if (a < c || b >= c)
+ return 1;
+ } else {
+ if (a < c && b >= c)
+ return 1;
+ }
+
+ return 0;
+}
+
+/*
+ * fix_up_readers - walk the list of all readers and "fix up" any who were
+ * lapped by the writer; also do the same for the default "start head".
+ * We do this by "pulling forward" the readers and start head to the first
+ * entry after the new write head.
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void fix_up_readers(struct logger_log *log, size_t len)
+{
+ size_t old = log->w_off;
+ size_t new = logger_offset(old + len);
+ struct logger_reader *reader;
+
+ if (clock_interval(old, new, log->head))
+ log->head = get_next_entry(log, log->head, len);
+
+ list_for_each_entry(reader, &log->readers, list)
+ if (clock_interval(old, new, reader->r_off))
+ reader->r_off = get_next_entry(log, reader->r_off, len);
+}
+
+/*
+ * do_write_log - writes 'len' bytes from 'buf' to 'log'
+ *
+ * The caller needs to hold log->mutex.
+ */
+static void do_write_log(struct logger_log *log, const void *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ memcpy(log->buffer + log->w_off, buf, len);
+
+ if (count != len)
+ memcpy(log->buffer, buf + len, count - len);
+
+ log->w_off = logger_offset(log->w_off + count);
+
+}
+
+/*
+ * do_write_log_user - writes 'len' bytes from the user-space buffer 'buf' to
+ * the log 'log'
+ *
+ * The caller needs to hold log->mutex.
+ *
+ * Returns 'count' on success, negative error code on failure.
+ */
+static ssize_t do_write_log_from_user(struct logger_log *log,
+ const void __user *buf, size_t count)
+{
+ size_t len;
+
+ len = min(count, log->size - log->w_off);
+ if (len && copy_from_user(log->buffer + log->w_off, buf, len))
+ return -EFAULT;
+
+ if (count != len)
+ if (copy_from_user(log->buffer, buf + len, count - len))
+ return -EFAULT;
+
+ log->w_off = logger_offset(log->w_off + count);
+
+ return count;
+}
+
+/*
+ * logger_aio_write - our write method, implementing support for write(),
+ * writev(), and aio_write(). Writes are our fast path, and we try to optimize
+ * them above all else.
+ */
+ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t ppos)
+{
+ struct logger_log *log = file_get_log(iocb->ki_filp);
+ size_t orig = log->w_off;
+ struct logger_entry header;
+ struct timespec now;
+ ssize_t ret = 0;
+
+ now = current_kernel_time();
+
+ header.pid = current->tgid;
+ header.tid = current->pid;
+ header.sec = now.tv_sec;
+ header.nsec = now.tv_nsec;
+ header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
+
+ /* null writes succeed, return zero */
+ if (unlikely(!header.len))
+ return 0;
+
+ mutex_lock(&log->mutex);
+
+ /*
+ * Fix up any readers, pulling them forward to the first readable
+ * entry after (what will be) the new write offset. We do this now
+ * because if we partially fail, we can end up with clobbered log
+ * entries that encroach on readable buffer.
+ */
+ fix_up_readers(log, sizeof(struct logger_entry) + header.len);
+
+ do_write_log(log, &header, sizeof(struct logger_entry));
+
+ while (nr_segs-- > 0) {
+ size_t len;
+ ssize_t nr;
+
+ /* figure out how much of this vector we can keep */
+ len = min_t(size_t, iov->iov_len, header.len - ret);
+
+ /* write out this segment's payload */
+ nr = do_write_log_from_user(log, iov->iov_base, len);
+ if (unlikely(nr < 0)) {
+ log->w_off = orig;
+ mutex_unlock(&log->mutex);
+ return nr;
+ }
+
+ iov++;
+ ret += nr;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ /* wake up any blocked readers */
+ wake_up_interruptible(&log->wq);
+
+ return ret;
+}
+
+static struct logger_log *get_log_from_minor(int);
+
+/*
+ * logger_open - the log's open() file operation
+ *
+ * Note how near a no-op this is in the write-only case. Keep it that way!
+ */
+static int logger_open(struct inode *inode, struct file *file)
+{
+ struct logger_log *log;
+ int ret;
+
+ ret = nonseekable_open(inode, file);
+ if (ret)
+ return ret;
+
+ log = get_log_from_minor(MINOR(inode->i_rdev));
+ if (!log)
+ return -ENODEV;
+
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader;
+
+ reader = kmalloc(sizeof(struct logger_reader), GFP_KERNEL);
+ if (!reader)
+ return -ENOMEM;
+
+ reader->log = log;
+ INIT_LIST_HEAD(&reader->list);
+
+ mutex_lock(&log->mutex);
+ reader->r_off = log->head;
+ list_add_tail(&reader->list, &log->readers);
+ mutex_unlock(&log->mutex);
+
+ file->private_data = reader;
+ } else
+ file->private_data = log;
+
+ return 0;
+}
+
+/*
+ * logger_release - the log's release file operation
+ *
+ * Note this is a total no-op in the write-only case. Keep it that way!
+ */
+static int logger_release(struct inode *ignored, struct file *file)
+{
+ if (file->f_mode & FMODE_READ) {
+ struct logger_reader *reader = file->private_data;
+ list_del(&reader->list);
+ kfree(reader);
+ }
+
+ return 0;
+}
+
+/*
+ * logger_poll - the log's poll file operation, for poll/select/epoll
+ *
+ * Note we always return POLLOUT, because you can always write() to the log.
+ * Note also that, strictly speaking, a return value of POLLIN does not
+ * guarantee that the log is readable without blocking, as there is a small
+ * chance that the writer can lap the reader in the interim between poll()
+ * returning and the read() request.
+ */
+static unsigned int logger_poll(struct file *file, poll_table *wait)
+{
+ struct logger_reader *reader;
+ struct logger_log *log;
+ unsigned int ret = POLLOUT | POLLWRNORM;
+
+ if (!(file->f_mode & FMODE_READ))
+ return ret;
+
+ reader = file->private_data;
+ log = reader->log;
+
+ poll_wait(file, &log->wq, wait);
+
+ mutex_lock(&log->mutex);
+ if (log->w_off != reader->r_off)
+ ret |= POLLIN | POLLRDNORM;
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct logger_log *log = file_get_log(file);
+ struct logger_reader *reader;
+ long ret = -ENOTTY;
+
+ mutex_lock(&log->mutex);
+
+ switch (cmd) {
+ case LOGGER_GET_LOG_BUF_SIZE:
+ ret = log->size;
+ break;
+ case LOGGER_GET_LOG_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off >= reader->r_off)
+ ret = log->w_off - reader->r_off;
+ else
+ ret = (log->size - reader->r_off) + log->w_off;
+ break;
+ case LOGGER_GET_NEXT_ENTRY_LEN:
+ if (!(file->f_mode & FMODE_READ)) {
+ ret = -EBADF;
+ break;
+ }
+ reader = file->private_data;
+ if (log->w_off != reader->r_off)
+ ret = get_entry_len(log, reader->r_off);
+ else
+ ret = 0;
+ break;
+ case LOGGER_FLUSH_LOG:
+ if (!(file->f_mode & FMODE_WRITE)) {
+ ret = -EBADF;
+ break;
+ }
+ list_for_each_entry(reader, &log->readers, list)
+ reader->r_off = log->w_off;
+ log->head = log->w_off;
+ ret = 0;
+ break;
+ }
+
+ mutex_unlock(&log->mutex);
+
+ return ret;
+}
+
+static const struct file_operations logger_fops = {
+ .owner = THIS_MODULE,
+ .read = logger_read,
+ .aio_write = logger_aio_write,
+ .poll = logger_poll,
+ .unlocked_ioctl = logger_ioctl,
+ .compat_ioctl = logger_ioctl,
+ .open = logger_open,
+ .release = logger_release,
+};
+
+/*
+ * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
+ * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
+ * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
+ */
+#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
+static unsigned char _buf_ ## VAR[SIZE]; \
+static struct logger_log VAR = { \
+ .buffer = _buf_ ## VAR, \
+ .misc = { \
+ .minor = MISC_DYNAMIC_MINOR, \
+ .name = NAME, \
+ .fops = &logger_fops, \
+ .parent = NULL, \
+ }, \
+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(VAR .wq), \
+ .readers = LIST_HEAD_INIT(VAR .readers), \
+ .mutex = __MUTEX_INITIALIZER(VAR .mutex), \
+ .w_off = 0, \
+ .head = 0, \
+ .size = SIZE, \
+};
+
+DEFINE_LOGGER_DEVICE(log_main, LOGGER_LOG_MAIN, 256*1024)
+DEFINE_LOGGER_DEVICE(log_events, LOGGER_LOG_EVENTS, 256*1024)
+DEFINE_LOGGER_DEVICE(log_radio, LOGGER_LOG_RADIO, 256*1024)
+DEFINE_LOGGER_DEVICE(log_system, LOGGER_LOG_SYSTEM, 256*1024)
+
+static struct logger_log *get_log_from_minor(int minor)
+{
+ if (log_main.misc.minor == minor)
+ return &log_main;
+ if (log_events.misc.minor == minor)
+ return &log_events;
+ if (log_radio.misc.minor == minor)
+ return &log_radio;
+ if (log_system.misc.minor == minor)
+ return &log_system;
+ return NULL;
+}
+
+static int __init init_log(struct logger_log *log)
+{
+ int ret;
+
+ ret = misc_register(&log->misc);
+ if (unlikely(ret)) {
+ printk(KERN_ERR "logger: failed to register misc "
+ "device for log '%s'!\n", log->misc.name);
+ return ret;
+ }
+
+ printk(KERN_INFO "logger: created %luK log '%s'\n",
+ (unsigned long) log->size >> 10, log->misc.name);
+
+ return 0;
+}
+
+static int __init logger_init(void)
+{
+ int ret;
+
+ ret = init_log(&log_main);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_events);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_radio);
+ if (unlikely(ret))
+ goto out;
+
+ ret = init_log(&log_system);
+ if (unlikely(ret))
+ goto out;
+
+out:
+ return ret;
+}
+device_initcall(logger_init);
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
new file mode 100644
index 0000000..2cb06e9
--- /dev/null
+++ b/drivers/staging/android/logger.h
@@ -0,0 +1,49 @@
+/* include/linux/logger.h
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Author: Robert Love <rlove@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_LOGGER_H
+#define _LINUX_LOGGER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+struct logger_entry {
+ __u16 len; /* length of the payload */
+ __u16 __pad; /* no matter what, we get 2 bytes of padding */
+ __s32 pid; /* generating process's pid */
+ __s32 tid; /* generating process's tid */
+ __s32 sec; /* seconds since Epoch */
+ __s32 nsec; /* nanoseconds */
+ char msg[0]; /* the entry's payload */
+};
+
+#define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */
+#define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */
+#define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */
+#define LOGGER_LOG_MAIN "log_main" /* everything else */
+
+#define LOGGER_ENTRY_MAX_LEN (4*1024)
+#define LOGGER_ENTRY_MAX_PAYLOAD \
+ (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
+
+#define __LOGGERIO 0xAE
+
+#define LOGGER_GET_LOG_BUF_SIZE _IO(__LOGGERIO, 1) /* size of log */
+#define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */
+#define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */
+#define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */
+
+#endif /* _LINUX_LOGGER_H */
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
new file mode 100644
index 0000000..efc7dc1
--- /dev/null
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -0,0 +1,222 @@
+/* drivers/misc/lowmemorykiller.c
+ *
+ * The lowmemorykiller driver lets user-space specify a set of memory thresholds
+ * where processes with a range of oom_adj values will get killed. Specify the
+ * minimum oom_adj values in /sys/module/lowmemorykiller/parameters/adj and the
+ * number of free pages in /sys/module/lowmemorykiller/parameters/minfree. Both
+ * files take a comma separated list of numbers in ascending order.
+ *
+ * For example, write "0,8" to /sys/module/lowmemorykiller/parameters/adj and
+ * "1024,4096" to /sys/module/lowmemorykiller/parameters/minfree to kill
+ * processes with a oom_adj value of 8 or higher when the free memory drops
+ * below 4096 pages and kill processes with a oom_adj value of 0 or higher
+ * when the free memory drops below 1024 pages.
+ *
+ * The driver considers memory used for caches to be free, but if a large
+ * percentage of the cached memory is locked this can be very inaccurate
+ * and processes may not get killed until the normal oom killer is triggered.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/oom.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/notifier.h>
+
+static uint32_t lowmem_debug_level = 2;
+static int lowmem_adj[6] = {
+ 0,
+ 1,
+ 6,
+ 12,
+};
+static int lowmem_adj_size = 4;
+static size_t lowmem_minfree[6] = {
+ 3 * 512, /* 6MB */
+ 2 * 1024, /* 8MB */
+ 4 * 1024, /* 16MB */
+ 16 * 1024, /* 64MB */
+};
+static int lowmem_minfree_size = 4;
+
+static struct task_struct *lowmem_deathpending;
+static unsigned long lowmem_deathpending_timeout;
+
+#define lowmem_print(level, x...) \
+ do { \
+ if (lowmem_debug_level >= (level)) \
+ printk(x); \
+ } while (0)
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data);
+
+static struct notifier_block task_nb = {
+ .notifier_call = task_notify_func,
+};
+
+static int
+task_notify_func(struct notifier_block *self, unsigned long val, void *data)
+{
+ struct task_struct *task = data;
+ if (task == lowmem_deathpending) {
+ lowmem_deathpending = NULL;
+ task_handoff_unregister(&task_nb);
+ }
+ return NOTIFY_OK;
+}
+
+static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
+{
+ struct task_struct *p;
+ struct task_struct *selected = NULL;
+ int rem = 0;
+ int tasksize;
+ int i;
+ int min_adj = OOM_ADJUST_MAX + 1;
+ int selected_tasksize = 0;
+ int selected_oom_adj;
+ int array_size = ARRAY_SIZE(lowmem_adj);
+ int other_free = global_page_state(NR_FREE_PAGES);
+ int other_file = global_page_state(NR_FILE_PAGES) -
+ global_page_state(NR_SHMEM);
+
+ /*
+ * If we already have a death outstanding, then
+ * bail out right away; indicating to vmscan
+ * that we have nothing further to offer on
+ * this pass.
+ *
+ * Note: Currently you need CONFIG_PROFILING
+ * for this to work correctly.
+ */
+ if (lowmem_deathpending &&
+ time_before_eq(jiffies, lowmem_deathpending_timeout))
+ return 0;
+
+ if (lowmem_adj_size < array_size)
+ array_size = lowmem_adj_size;
+ if (lowmem_minfree_size < array_size)
+ array_size = lowmem_minfree_size;
+ for (i = 0; i < array_size; i++) {
+ if (other_free < lowmem_minfree[i] &&
+ other_file < lowmem_minfree[i]) {
+ min_adj = lowmem_adj[i];
+ break;
+ }
+ }
+ if (sc->nr_to_scan > 0)
+ lowmem_print(3, "lowmem_shrink %lu, %x, ofree %d %d, ma %d\n",
+ sc->nr_to_scan, sc->gfp_mask, other_free,
+ other_file, min_adj);
+ rem = global_page_state(NR_ACTIVE_ANON) +
+ global_page_state(NR_ACTIVE_FILE) +
+ global_page_state(NR_INACTIVE_ANON) +
+ global_page_state(NR_INACTIVE_FILE);
+ if (sc->nr_to_scan <= 0 || min_adj == OOM_ADJUST_MAX + 1) {
+ lowmem_print(5, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ return rem;
+ }
+ selected_oom_adj = min_adj;
+
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ struct mm_struct *mm;
+ struct signal_struct *sig;
+ int oom_adj;
+
+ task_lock(p);
+ mm = p->mm;
+ sig = p->signal;
+ if (!mm || !sig) {
+ task_unlock(p);
+ continue;
+ }
+ oom_adj = sig->oom_adj;
+ if (oom_adj < min_adj) {
+ task_unlock(p);
+ continue;
+ }
+ tasksize = get_mm_rss(mm);
+ task_unlock(p);
+ if (tasksize <= 0)
+ continue;
+ if (selected) {
+ if (oom_adj < selected_oom_adj)
+ continue;
+ if (oom_adj == selected_oom_adj &&
+ tasksize <= selected_tasksize)
+ continue;
+ }
+ selected = p;
+ selected_tasksize = tasksize;
+ selected_oom_adj = oom_adj;
+ lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
+ p->pid, p->comm, oom_adj, tasksize);
+ }
+ if (selected) {
+ lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
+ selected->pid, selected->comm,
+ selected_oom_adj, selected_tasksize);
+ /*
+ * If CONFIG_PROFILING is off, then task_handoff_register()
+ * is a nop. In that case we don't want to stall the killer
+ * by setting lowmem_deathpending.
+ */
+#ifdef CONFIG_PROFILING
+ lowmem_deathpending = selected;
+ lowmem_deathpending_timeout = jiffies + HZ;
+ task_handoff_register(&task_nb);
+#endif
+ force_sig(SIGKILL, selected);
+ rem -= selected_tasksize;
+ }
+ lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
+ sc->nr_to_scan, sc->gfp_mask, rem);
+ read_unlock(&tasklist_lock);
+ return rem;
+}
+
+static struct shrinker lowmem_shrinker = {
+ .shrink = lowmem_shrink,
+ .seeks = DEFAULT_SEEKS * 16
+};
+
+static int __init lowmem_init(void)
+{
+ register_shrinker(&lowmem_shrinker);
+ return 0;
+}
+
+static void __exit lowmem_exit(void)
+{
+ unregister_shrinker(&lowmem_shrinker);
+}
+
+module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
+module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
+ S_IRUGO | S_IWUSR);
+module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
+ S_IRUGO | S_IWUSR);
+module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
+
+module_init(lowmem_init);
+module_exit(lowmem_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/staging/android/ram_console.c b/drivers/staging/android/ram_console.c
new file mode 100644
index 0000000..6d4d679
--- /dev/null
+++ b/drivers/staging/android/ram_console.c
@@ -0,0 +1,443 @@
+/* drivers/android/ram_console.c
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/console.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include "ram_console.h"
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+#include <linux/rslib.h>
+#endif
+
+struct ram_console_buffer {
+ uint32_t sig;
+ uint32_t start;
+ uint32_t size;
+ uint8_t data[0];
+};
+
+#define RAM_CONSOLE_SIG (0x43474244) /* DBGC */
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static char __initdata
+ ram_console_old_log_init_buffer[CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE];
+#endif
+static char *ram_console_old_log;
+static size_t ram_console_old_log_size;
+
+static struct ram_console_buffer *ram_console_buffer;
+static size_t ram_console_buffer_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static char *ram_console_par_buffer;
+static struct rs_control *ram_console_rs_decoder;
+static int ram_console_corrected_bytes;
+static int ram_console_bad_blocks;
+#define ECC_BLOCK_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_DATA_SIZE
+#define ECC_SIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_ECC_SIZE
+#define ECC_SYMSIZE CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_SYMBOL_SIZE
+#define ECC_POLY CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION_POLYNOMIAL
+#endif
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+static void ram_console_encode_rs8(uint8_t *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(ram_console_rs_decoder, data, len, par, 0);
+ for (i = 0; i < ECC_SIZE; i++)
+ ecc[i] = par[i];
+}
+
+static int ram_console_decode_rs8(void *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[ECC_SIZE];
+ for (i = 0; i < ECC_SIZE; i++)
+ par[i] = ecc[i];
+ return decode_rs8(ram_console_rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+#endif
+
+static void ram_console_update(const char *s, unsigned int count)
+{
+ struct ram_console_buffer *buffer = ram_console_buffer;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *buffer_end = buffer->data + ram_console_buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int size = ECC_BLOCK_SIZE;
+#endif
+ memcpy(buffer->data + buffer->start, s, count);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ block = buffer->data + (buffer->start & ~(ECC_BLOCK_SIZE - 1));
+ par = ram_console_par_buffer +
+ (buffer->start / ECC_BLOCK_SIZE) * ECC_SIZE;
+ do {
+ if (block + ECC_BLOCK_SIZE > buffer_end)
+ size = buffer_end - block;
+ ram_console_encode_rs8(block, size, par);
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ } while (block < buffer->data + buffer->start + count);
+#endif
+}
+
+static void ram_console_update_header(void)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ struct ram_console_buffer *buffer = ram_console_buffer;
+ uint8_t *par;
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+ ram_console_encode_rs8((uint8_t *)buffer, sizeof(*buffer), par);
+#endif
+}
+
+static void
+ram_console_write(struct console *console, const char *s, unsigned int count)
+{
+ int rem;
+ struct ram_console_buffer *buffer = ram_console_buffer;
+
+ if (count > ram_console_buffer_size) {
+ s += count - ram_console_buffer_size;
+ count = ram_console_buffer_size;
+ }
+ rem = ram_console_buffer_size - buffer->start;
+ if (rem < count) {
+ ram_console_update(s, rem);
+ s += rem;
+ count -= rem;
+ buffer->start = 0;
+ buffer->size = ram_console_buffer_size;
+ }
+ ram_console_update(s, count);
+
+ buffer->start += count;
+ if (buffer->size < ram_console_buffer_size)
+ buffer->size += count;
+ ram_console_update_header();
+}
+
+static struct console ram_console = {
+ .name = "ram",
+ .write = ram_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED,
+ .index = -1,
+};
+
+void ram_console_enable_console(int enabled)
+{
+ if (enabled)
+ ram_console.flags |= CON_ENABLED;
+ else
+ ram_console.flags &= ~CON_ENABLED;
+}
+
+static void __init
+ram_console_save_old(struct ram_console_buffer *buffer, const char *bootinfo,
+ char *dest)
+{
+ size_t old_log_size = buffer->size;
+ size_t bootinfo_size = 0;
+ size_t total_size = old_log_size;
+ char *ptr;
+ const char *bootinfo_label = "Boot info:\n";
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ uint8_t *block;
+ uint8_t *par;
+ char strbuf[80];
+ int strbuf_len = 0;
+
+ block = buffer->data;
+ par = ram_console_par_buffer;
+ while (block < buffer->data + buffer->size) {
+ int numerr;
+ int size = ECC_BLOCK_SIZE;
+ if (block + size > buffer->data + ram_console_buffer_size)
+ size = buffer->data + ram_console_buffer_size - block;
+ numerr = ram_console_decode_rs8(block, size, par);
+ if (numerr > 0) {
+#if 0
+ printk(KERN_INFO "ram_console: error in block %p, %d\n",
+ block, numerr);
+#endif
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+#if 0
+ printk(KERN_INFO "ram_console: uncorrectable error in "
+ "block %p\n", block);
+#endif
+ ram_console_bad_blocks++;
+ }
+ block += ECC_BLOCK_SIZE;
+ par += ECC_SIZE;
+ }
+ if (ram_console_corrected_bytes || ram_console_bad_blocks)
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ ram_console_corrected_bytes, ram_console_bad_blocks);
+ else
+ strbuf_len = snprintf(strbuf, sizeof(strbuf),
+ "\nNo errors detected\n");
+ if (strbuf_len >= sizeof(strbuf))
+ strbuf_len = sizeof(strbuf) - 1;
+ total_size += strbuf_len;
+#endif
+
+ if (bootinfo)
+ bootinfo_size = strlen(bootinfo) + strlen(bootinfo_label);
+ total_size += bootinfo_size;
+
+ if (dest == NULL) {
+ dest = kmalloc(total_size, GFP_KERNEL);
+ if (dest == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer\n");
+ return;
+ }
+ }
+
+ ram_console_old_log = dest;
+ ram_console_old_log_size = total_size;
+ memcpy(ram_console_old_log,
+ &buffer->data[buffer->start], buffer->size - buffer->start);
+ memcpy(ram_console_old_log + buffer->size - buffer->start,
+ &buffer->data[0], buffer->start);
+ ptr = ram_console_old_log + old_log_size;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ memcpy(ptr, strbuf, strbuf_len);
+ ptr += strbuf_len;
+#endif
+ if (bootinfo) {
+ memcpy(ptr, bootinfo_label, strlen(bootinfo_label));
+ ptr += strlen(bootinfo_label);
+ memcpy(ptr, bootinfo, bootinfo_size);
+ ptr += bootinfo_size;
+ }
+}
+
+static int __init ram_console_init(struct ram_console_buffer *buffer,
+ size_t buffer_size, const char *bootinfo,
+ char *old_buf)
+{
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ int numerr;
+ uint8_t *par;
+#endif
+ ram_console_buffer = buffer;
+ ram_console_buffer_size =
+ buffer_size - sizeof(struct ram_console_buffer);
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "datasize %zu\n", buffer, buffer_size,
+ ram_console_buffer_size);
+ return 0;
+ }
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ERROR_CORRECTION
+ ram_console_buffer_size -= (DIV_ROUND_UP(ram_console_buffer_size,
+ ECC_BLOCK_SIZE) + 1) * ECC_SIZE;
+
+ if (ram_console_buffer_size > buffer_size) {
+ pr_err("ram_console: buffer %p, invalid size %zu, "
+ "non-ecc datasize %zu\n",
+ buffer, buffer_size, ram_console_buffer_size);
+ return 0;
+ }
+
+ ram_console_par_buffer = buffer->data + ram_console_buffer_size;
+
+
+ /* first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ ram_console_rs_decoder = init_rs(ECC_SYMSIZE, ECC_POLY, 0, 1, ECC_SIZE);
+ if (ram_console_rs_decoder == NULL) {
+ printk(KERN_INFO "ram_console: init_rs failed\n");
+ return 0;
+ }
+
+ ram_console_corrected_bytes = 0;
+ ram_console_bad_blocks = 0;
+
+ par = ram_console_par_buffer +
+ DIV_ROUND_UP(ram_console_buffer_size, ECC_BLOCK_SIZE) * ECC_SIZE;
+
+ numerr = ram_console_decode_rs8(buffer, sizeof(*buffer), par);
+ if (numerr > 0) {
+ printk(KERN_INFO "ram_console: error in header, %d\n", numerr);
+ ram_console_corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ printk(KERN_INFO
+ "ram_console: uncorrectable error in header\n");
+ ram_console_bad_blocks++;
+ }
+#endif
+
+ if (buffer->sig == RAM_CONSOLE_SIG) {
+ if (buffer->size > ram_console_buffer_size
+ || buffer->start > buffer->size)
+ printk(KERN_INFO "ram_console: found existing invalid "
+ "buffer, size %d, start %d\n",
+ buffer->size, buffer->start);
+ else {
+ printk(KERN_INFO "ram_console: found existing buffer, "
+ "size %d, start %d\n",
+ buffer->size, buffer->start);
+ ram_console_save_old(buffer, bootinfo, old_buf);
+ }
+ } else {
+ printk(KERN_INFO "ram_console: no valid data in buffer "
+ "(sig = 0x%08x)\n", buffer->sig);
+ }
+
+ buffer->sig = RAM_CONSOLE_SIG;
+ buffer->start = 0;
+ buffer->size = 0;
+
+ register_console(&ram_console);
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_ENABLE_VERBOSE
+ console_verbose();
+#endif
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+static int __init ram_console_early_init(void)
+{
+ return ram_console_init((struct ram_console_buffer *)
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_ADDR,
+ CONFIG_ANDROID_RAM_CONSOLE_EARLY_SIZE,
+ NULL,
+ ram_console_old_log_init_buffer);
+}
+#else
+static int ram_console_driver_probe(struct platform_device *pdev)
+{
+ struct resource *res = pdev->resource;
+ size_t start;
+ size_t buffer_size;
+ void *buffer;
+ const char *bootinfo = NULL;
+ struct ram_console_platform_data *pdata = pdev->dev.platform_data;
+
+ if (res == NULL || pdev->num_resources != 1 ||
+ !(res->flags & IORESOURCE_MEM)) {
+ printk(KERN_ERR "ram_console: invalid resource, %p %d flags "
+ "%lx\n", res, pdev->num_resources, res ? res->flags : 0);
+ return -ENXIO;
+ }
+ buffer_size = res->end - res->start + 1;
+ start = res->start;
+ printk(KERN_INFO "ram_console: got buffer at %zx, size %zx\n",
+ start, buffer_size);
+ buffer = ioremap(res->start, buffer_size);
+ if (buffer == NULL) {
+ printk(KERN_ERR "ram_console: failed to map memory\n");
+ return -ENOMEM;
+ }
+
+ if (pdata)
+ bootinfo = pdata->bootinfo;
+
+ return ram_console_init(buffer, buffer_size, bootinfo, NULL/* allocate */);
+}
+
+static struct platform_driver ram_console_driver = {
+ .probe = ram_console_driver_probe,
+ .driver = {
+ .name = "ram_console",
+ },
+};
+
+static int __init ram_console_module_init(void)
+{
+ int err;
+ err = platform_driver_register(&ram_console_driver);
+ return err;
+}
+#endif
+
+static ssize_t ram_console_read_old(struct file *file, char __user *buf,
+ size_t len, loff_t *offset)
+{
+ loff_t pos = *offset;
+ ssize_t count;
+
+ if (pos >= ram_console_old_log_size)
+ return 0;
+
+ count = min(len, (size_t)(ram_console_old_log_size - pos));
+ if (copy_to_user(buf, ram_console_old_log + pos, count))
+ return -EFAULT;
+
+ *offset += count;
+ return count;
+}
+
+static const struct file_operations ram_console_file_ops = {
+ .owner = THIS_MODULE,
+ .read = ram_console_read_old,
+};
+
+static int __init ram_console_late_init(void)
+{
+ struct proc_dir_entry *entry;
+
+ if (ram_console_old_log == NULL)
+ return 0;
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+ ram_console_old_log = kmalloc(ram_console_old_log_size, GFP_KERNEL);
+ if (ram_console_old_log == NULL) {
+ printk(KERN_ERR
+ "ram_console: failed to allocate buffer for old log\n");
+ ram_console_old_log_size = 0;
+ return 0;
+ }
+ memcpy(ram_console_old_log,
+ ram_console_old_log_init_buffer, ram_console_old_log_size);
+#endif
+ entry = create_proc_entry("last_kmsg", S_IFREG | S_IRUGO, NULL);
+ if (!entry) {
+ printk(KERN_ERR "ram_console: failed to create proc entry\n");
+ kfree(ram_console_old_log);
+ ram_console_old_log = NULL;
+ return 0;
+ }
+
+ entry->proc_fops = &ram_console_file_ops;
+ entry->size = ram_console_old_log_size;
+ return 0;
+}
+
+#ifdef CONFIG_ANDROID_RAM_CONSOLE_EARLY_INIT
+console_initcall(ram_console_early_init);
+#else
+postcore_initcall(ram_console_module_init);
+#endif
+late_initcall(ram_console_late_init);
+
diff --git a/drivers/staging/android/ram_console.h b/drivers/staging/android/ram_console.h
new file mode 100644
index 0000000..9f1125c
--- /dev/null
+++ b/drivers/staging/android/ram_console.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+#define _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_
+
+struct ram_console_platform_data {
+ const char *bootinfo;
+};
+
+#endif /* _INCLUDE_LINUX_PLATFORM_DATA_RAM_CONSOLE_H_ */
diff --git a/drivers/staging/android/switch/Kconfig b/drivers/staging/android/switch/Kconfig
new file mode 100644
index 0000000..36846f6
--- /dev/null
+++ b/drivers/staging/android/switch/Kconfig
@@ -0,0 +1,11 @@
+menuconfig ANDROID_SWITCH
+ tristate "Android Switch class support"
+ help
+ Say Y here to enable Android switch class support. This allows
+ monitoring switches by userspace via sysfs and uevent.
+
+config ANDROID_SWITCH_GPIO
+ tristate "Android GPIO Switch support"
+ depends on GENERIC_GPIO && ANDROID_SWITCH
+ help
+ Say Y here to enable GPIO based switch support.
diff --git a/drivers/staging/android/switch/Makefile b/drivers/staging/android/switch/Makefile
new file mode 100644
index 0000000..d76bfdc
--- /dev/null
+++ b/drivers/staging/android/switch/Makefile
@@ -0,0 +1,4 @@
+# Android Switch Class Driver
+obj-$(CONFIG_ANDROID_SWITCH) += switch_class.o
+obj-$(CONFIG_ANDROID_SWITCH_GPIO) += switch_gpio.o
+
diff --git a/drivers/staging/android/switch/switch.h b/drivers/staging/android/switch/switch.h
new file mode 100644
index 0000000..4fcb310
--- /dev/null
+++ b/drivers/staging/android/switch/switch.h
@@ -0,0 +1,53 @@
+/*
+ * Switch class driver
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef __LINUX_SWITCH_H__
+#define __LINUX_SWITCH_H__
+
+struct switch_dev {
+ const char *name;
+ struct device *dev;
+ int index;
+ int state;
+
+ ssize_t (*print_name)(struct switch_dev *sdev, char *buf);
+ ssize_t (*print_state)(struct switch_dev *sdev, char *buf);
+};
+
+struct gpio_switch_platform_data {
+ const char *name;
+ unsigned gpio;
+
+ /* if NULL, switch_dev.name will be printed */
+ const char *name_on;
+ const char *name_off;
+ /* if NULL, "0" or "1" will be printed */
+ const char *state_on;
+ const char *state_off;
+};
+
+extern int switch_dev_register(struct switch_dev *sdev);
+extern void switch_dev_unregister(struct switch_dev *sdev);
+
+static inline int switch_get_state(struct switch_dev *sdev)
+{
+ return sdev->state;
+}
+
+extern void switch_set_state(struct switch_dev *sdev, int state);
+
+#endif /* __LINUX_SWITCH_H__ */
diff --git a/drivers/staging/android/switch/switch_class.c b/drivers/staging/android/switch/switch_class.c
new file mode 100644
index 0000000..7468044
--- /dev/null
+++ b/drivers/staging/android/switch/switch_class.c
@@ -0,0 +1,174 @@
+/*
+ * switch_class.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include "switch.h"
+
+struct class *switch_class;
+static atomic_t device_count;
+
+static ssize_t state_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_state) {
+ int ret = sdev->print_state(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%d\n", sdev->state);
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct switch_dev *sdev = (struct switch_dev *)
+ dev_get_drvdata(dev);
+
+ if (sdev->print_name) {
+ int ret = sdev->print_name(sdev, buf);
+ if (ret >= 0)
+ return ret;
+ }
+ return sprintf(buf, "%s\n", sdev->name);
+}
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, state_show, NULL);
+static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, name_show, NULL);
+
+void switch_set_state(struct switch_dev *sdev, int state)
+{
+ char name_buf[120];
+ char state_buf[120];
+ char *prop_buf;
+ char *envp[3];
+ int env_offset = 0;
+ int length;
+
+ if (sdev->state != state) {
+ sdev->state = state;
+
+ prop_buf = (char *)get_zeroed_page(GFP_KERNEL);
+ if (prop_buf) {
+ length = name_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(name_buf, sizeof(name_buf),
+ "SWITCH_NAME=%s", prop_buf);
+ envp[env_offset++] = name_buf;
+ }
+ length = state_show(sdev->dev, NULL, prop_buf);
+ if (length > 0) {
+ if (prop_buf[length - 1] == '\n')
+ prop_buf[length - 1] = 0;
+ snprintf(state_buf, sizeof(state_buf),
+ "SWITCH_STATE=%s", prop_buf);
+ envp[env_offset++] = state_buf;
+ }
+ envp[env_offset] = NULL;
+ kobject_uevent_env(&sdev->dev->kobj, KOBJ_CHANGE, envp);
+ free_page((unsigned long)prop_buf);
+ } else {
+ printk(KERN_ERR "out of memory in switch_set_state\n");
+ kobject_uevent(&sdev->dev->kobj, KOBJ_CHANGE);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(switch_set_state);
+
+static int create_switch_class(void)
+{
+ if (!switch_class) {
+ switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(switch_class))
+ return PTR_ERR(switch_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int switch_dev_register(struct switch_dev *sdev)
+{
+ int ret;
+
+ if (!switch_class) {
+ ret = create_switch_class();
+ if (ret < 0)
+ return ret;
+ }
+
+ sdev->index = atomic_inc_return(&device_count);
+ sdev->dev = device_create(switch_class, NULL,
+ MKDEV(0, sdev->index), NULL, sdev->name);
+ if (IS_ERR(sdev->dev))
+ return PTR_ERR(sdev->dev);
+
+ ret = device_create_file(sdev->dev, &dev_attr_state);
+ if (ret < 0)
+ goto err_create_file_1;
+ ret = device_create_file(sdev->dev, &dev_attr_name);
+ if (ret < 0)
+ goto err_create_file_2;
+
+ dev_set_drvdata(sdev->dev, sdev);
+ sdev->state = 0;
+ return 0;
+
+err_create_file_2:
+ device_remove_file(sdev->dev, &dev_attr_state);
+err_create_file_1:
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ printk(KERN_ERR "switch: Failed to register driver %s\n", sdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(switch_dev_register);
+
+void switch_dev_unregister(struct switch_dev *sdev)
+{
+ device_remove_file(sdev->dev, &dev_attr_name);
+ device_remove_file(sdev->dev, &dev_attr_state);
+ device_destroy(switch_class, MKDEV(0, sdev->index));
+ dev_set_drvdata(sdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(switch_dev_unregister);
+
+static int __init switch_class_init(void)
+{
+ return create_switch_class();
+}
+
+static void __exit switch_class_exit(void)
+{
+ class_destroy(switch_class);
+}
+
+module_init(switch_class_init);
+module_exit(switch_class_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("Switch class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/switch/switch_gpio.c b/drivers/staging/android/switch/switch_gpio.c
new file mode 100644
index 0000000..38b2c2f
--- /dev/null
+++ b/drivers/staging/android/switch/switch_gpio.c
@@ -0,0 +1,172 @@
+/*
+ * switch_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include "switch.h"
+
+struct gpio_switch_data {
+ struct switch_dev sdev;
+ unsigned gpio;
+ const char *name_on;
+ const char *name_off;
+ const char *state_on;
+ const char *state_off;
+ int irq;
+ struct work_struct work;
+};
+
+static void gpio_switch_work(struct work_struct *work)
+{
+ int state;
+ struct gpio_switch_data *data =
+ container_of(work, struct gpio_switch_data, work);
+
+ state = gpio_get_value(data->gpio);
+ switch_set_state(&data->sdev, state);
+}
+
+static irqreturn_t gpio_irq_handler(int irq, void *dev_id)
+{
+ struct gpio_switch_data *switch_data =
+ (struct gpio_switch_data *)dev_id;
+
+ schedule_work(&switch_data->work);
+ return IRQ_HANDLED;
+}
+
+static ssize_t switch_gpio_print_state(struct switch_dev *sdev, char *buf)
+{
+ struct gpio_switch_data *switch_data =
+ container_of(sdev, struct gpio_switch_data, sdev);
+ const char *state;
+ if (switch_get_state(sdev))
+ state = switch_data->state_on;
+ else
+ state = switch_data->state_off;
+
+ if (state)
+ return sprintf(buf, "%s\n", state);
+ return -1;
+}
+
+static int gpio_switch_probe(struct platform_device *pdev)
+{
+ struct gpio_switch_platform_data *pdata = pdev->dev.platform_data;
+ struct gpio_switch_data *switch_data;
+ int ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ switch_data = kzalloc(sizeof(struct gpio_switch_data), GFP_KERNEL);
+ if (!switch_data)
+ return -ENOMEM;
+
+ switch_data->sdev.name = pdata->name;
+ switch_data->gpio = pdata->gpio;
+ switch_data->name_on = pdata->name_on;
+ switch_data->name_off = pdata->name_off;
+ switch_data->state_on = pdata->state_on;
+ switch_data->state_off = pdata->state_off;
+ switch_data->sdev.print_state = switch_gpio_print_state;
+
+ ret = switch_dev_register(&switch_data->sdev);
+ if (ret < 0)
+ goto err_switch_dev_register;
+
+ ret = gpio_request(switch_data->gpio, pdev->name);
+ if (ret < 0)
+ goto err_request_gpio;
+
+ ret = gpio_direction_input(switch_data->gpio);
+ if (ret < 0)
+ goto err_set_gpio_input;
+
+ INIT_WORK(&switch_data->work, gpio_switch_work);
+
+ switch_data->irq = gpio_to_irq(switch_data->gpio);
+ if (switch_data->irq < 0) {
+ ret = switch_data->irq;
+ goto err_detect_irq_num_failed;
+ }
+
+ ret = request_irq(switch_data->irq, gpio_irq_handler,
+ IRQF_TRIGGER_LOW, pdev->name, switch_data);
+ if (ret < 0)
+ goto err_request_irq;
+
+ /* Perform initial detection */
+ gpio_switch_work(&switch_data->work);
+
+ return 0;
+
+err_request_irq:
+err_detect_irq_num_failed:
+err_set_gpio_input:
+ gpio_free(switch_data->gpio);
+err_request_gpio:
+ switch_dev_unregister(&switch_data->sdev);
+err_switch_dev_register:
+ kfree(switch_data);
+
+ return ret;
+}
+
+static int __devexit gpio_switch_remove(struct platform_device *pdev)
+{
+ struct gpio_switch_data *switch_data = platform_get_drvdata(pdev);
+
+ cancel_work_sync(&switch_data->work);
+ gpio_free(switch_data->gpio);
+ switch_dev_unregister(&switch_data->sdev);
+ kfree(switch_data);
+
+ return 0;
+}
+
+static struct platform_driver gpio_switch_driver = {
+ .probe = gpio_switch_probe,
+ .remove = __devexit_p(gpio_switch_remove),
+ .driver = {
+ .name = "switch-gpio",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init gpio_switch_init(void)
+{
+ return platform_driver_register(&gpio_switch_driver);
+}
+
+static void __exit gpio_switch_exit(void)
+{
+ platform_driver_unregister(&gpio_switch_driver);
+}
+
+module_init(gpio_switch_init);
+module_exit(gpio_switch_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("GPIO Switch driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
new file mode 100644
index 0000000..a64481c
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.c
@@ -0,0 +1,176 @@
+/* drivers/misc/timed_gpio.c
+ *
+ * Copyright (C) 2008 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/hrtimer.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+
+#include "timed_output.h"
+#include "timed_gpio.h"
+
+
+struct timed_gpio_data {
+ struct timed_output_dev dev;
+ struct hrtimer timer;
+ spinlock_t lock;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
+{
+ struct timed_gpio_data *data =
+ container_of(timer, struct timed_gpio_data, timer);
+
+ gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
+ return HRTIMER_NORESTART;
+}
+
+static int gpio_get_time(struct timed_output_dev *dev)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+
+ if (hrtimer_active(&data->timer)) {
+ ktime_t r = hrtimer_get_remaining(&data->timer);
+ struct timeval t = ktime_to_timeval(r);
+ return t.tv_sec * 1000 + t.tv_usec / 1000;
+ } else
+ return 0;
+}
+
+static void gpio_enable(struct timed_output_dev *dev, int value)
+{
+ struct timed_gpio_data *data =
+ container_of(dev, struct timed_gpio_data, dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* cancel previous timer and set GPIO according to value */
+ hrtimer_cancel(&data->timer);
+ gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
+
+ if (value > 0) {
+ if (value > data->max_timeout)
+ value = data->max_timeout;
+
+ hrtimer_start(&data->timer,
+ ktime_set(value / 1000, (value % 1000) * 1000000),
+ HRTIMER_MODE_REL);
+ }
+
+ spin_unlock_irqrestore(&data->lock, flags);
+}
+
+static int timed_gpio_probe(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio *cur_gpio;
+ struct timed_gpio_data *gpio_data, *gpio_dat;
+ int i, j, ret = 0;
+
+ if (!pdata)
+ return -EBUSY;
+
+ gpio_data = kzalloc(sizeof(struct timed_gpio_data) * pdata->num_gpios,
+ GFP_KERNEL);
+ if (!gpio_data)
+ return -ENOMEM;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ cur_gpio = &pdata->gpios[i];
+ gpio_dat = &gpio_data[i];
+
+ hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ gpio_dat->timer.function = gpio_timer_func;
+ spin_lock_init(&gpio_dat->lock);
+
+ gpio_dat->dev.name = cur_gpio->name;
+ gpio_dat->dev.get_time = gpio_get_time;
+ gpio_dat->dev.enable = gpio_enable;
+ ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
+ if (ret >= 0) {
+ ret = timed_output_dev_register(&gpio_dat->dev);
+ if (ret < 0)
+ gpio_free(cur_gpio->gpio);
+ }
+ if (ret < 0) {
+ for (j = 0; j < i; j++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+ kfree(gpio_data);
+ return ret;
+ }
+
+ gpio_dat->gpio = cur_gpio->gpio;
+ gpio_dat->max_timeout = cur_gpio->max_timeout;
+ gpio_dat->active_low = cur_gpio->active_low;
+ gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
+ }
+
+ platform_set_drvdata(pdev, gpio_data);
+
+ return 0;
+}
+
+static int timed_gpio_remove(struct platform_device *pdev)
+{
+ struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
+ struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
+ int i;
+
+ for (i = 0; i < pdata->num_gpios; i++) {
+ timed_output_dev_unregister(&gpio_data[i].dev);
+ gpio_free(gpio_data[i].gpio);
+ }
+
+ kfree(gpio_data);
+
+ return 0;
+}
+
+static struct platform_driver timed_gpio_driver = {
+ .probe = timed_gpio_probe,
+ .remove = timed_gpio_remove,
+ .driver = {
+ .name = TIMED_GPIO_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init timed_gpio_init(void)
+{
+ return platform_driver_register(&timed_gpio_driver);
+}
+
+static void __exit timed_gpio_exit(void)
+{
+ platform_driver_unregister(&timed_gpio_driver);
+}
+
+module_init(timed_gpio_init);
+module_exit(timed_gpio_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed gpio driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
new file mode 100644
index 0000000..a0e15f8
--- /dev/null
+++ b/drivers/staging/android/timed_gpio.h
@@ -0,0 +1,33 @@
+/* include/linux/timed_gpio.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_GPIO_H
+#define _LINUX_TIMED_GPIO_H
+
+#define TIMED_GPIO_NAME "timed-gpio"
+
+struct timed_gpio {
+ const char *name;
+ unsigned gpio;
+ int max_timeout;
+ u8 active_low;
+};
+
+struct timed_gpio_platform_data {
+ int num_gpios;
+ struct timed_gpio *gpios;
+};
+
+#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
new file mode 100644
index 0000000..f373422
--- /dev/null
+++ b/drivers/staging/android/timed_output.c
@@ -0,0 +1,123 @@
+/* drivers/misc/timed_output.c
+ *
+ * Copyright (C) 2009 Google, Inc.
+ * Author: Mike Lockwood <lockwood@android.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+
+#include "timed_output.h"
+
+static struct class *timed_output_class;
+static atomic_t device_count;
+
+static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int remaining = tdev->get_time(tdev);
+
+ return sprintf(buf, "%d\n", remaining);
+}
+
+static ssize_t enable_store(
+ struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ struct timed_output_dev *tdev = dev_get_drvdata(dev);
+ int value;
+
+ if (sscanf(buf, "%d", &value) != 1)
+ return -EINVAL;
+
+ tdev->enable(tdev, value);
+
+ return size;
+}
+
+static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, enable_show, enable_store);
+
+static int create_timed_output_class(void)
+{
+ if (!timed_output_class) {
+ timed_output_class = class_create(THIS_MODULE, "timed_output");
+ if (IS_ERR(timed_output_class))
+ return PTR_ERR(timed_output_class);
+ atomic_set(&device_count, 0);
+ }
+
+ return 0;
+}
+
+int timed_output_dev_register(struct timed_output_dev *tdev)
+{
+ int ret;
+
+ if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
+ return -EINVAL;
+
+ ret = create_timed_output_class();
+ if (ret < 0)
+ return ret;
+
+ tdev->index = atomic_inc_return(&device_count);
+ tdev->dev = device_create(timed_output_class, NULL,
+ MKDEV(0, tdev->index), NULL, tdev->name);
+ if (IS_ERR(tdev->dev))
+ return PTR_ERR(tdev->dev);
+
+ ret = device_create_file(tdev->dev, &dev_attr_enable);
+ if (ret < 0)
+ goto err_create_file;
+
+ dev_set_drvdata(tdev->dev, tdev);
+ tdev->state = 0;
+ return 0;
+
+err_create_file:
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ printk(KERN_ERR "timed_output: Failed to register driver %s\n",
+ tdev->name);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_register);
+
+void timed_output_dev_unregister(struct timed_output_dev *tdev)
+{
+ device_remove_file(tdev->dev, &dev_attr_enable);
+ device_destroy(timed_output_class, MKDEV(0, tdev->index));
+ dev_set_drvdata(tdev->dev, NULL);
+}
+EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
+
+static int __init timed_output_init(void)
+{
+ return create_timed_output_class();
+}
+
+static void __exit timed_output_exit(void)
+{
+ class_destroy(timed_output_class);
+}
+
+module_init(timed_output_init);
+module_exit(timed_output_exit);
+
+MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
+MODULE_DESCRIPTION("timed output class driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
new file mode 100644
index 0000000..ec907ab
--- /dev/null
+++ b/drivers/staging/android/timed_output.h
@@ -0,0 +1,37 @@
+/* include/linux/timed_output.h
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+*/
+
+#ifndef _LINUX_TIMED_OUTPUT_H
+#define _LINUX_TIMED_OUTPUT_H
+
+struct timed_output_dev {
+ const char *name;
+
+ /* enable the output and set the timer */
+ void (*enable)(struct timed_output_dev *sdev, int timeout);
+
+ /* returns the current number of milliseconds remaining on the timer */
+ int (*get_time)(struct timed_output_dev *sdev);
+
+ /* private data */
+ struct device *dev;
+ int index;
+ int state;
+};
+
+extern int timed_output_dev_register(struct timed_output_dev *dev);
+extern void timed_output_dev_unregister(struct timed_output_dev *dev);
+
+#endif
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 7bb7da7..1df9586 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -201,7 +201,7 @@ static ssize_t set_enabled(struct device *dev, struct device_attribute *attr,
struct usb_interface *intf = to_usb_interface(dev);
struct asus_oled_dev *odev = usb_get_intfdata(intf);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
enable_oled(odev, value);
@@ -217,7 +217,7 @@ static ssize_t class_set_enabled(struct device *device,
(struct asus_oled_dev *) dev_get_drvdata(device);
unsigned long value;
- if (strict_strtoul(buf, 10, &value))
+ if (kstrtoul(buf, 10, &value))
return -EINVAL;
enable_oled(odev, value);
@@ -355,7 +355,14 @@ static void send_data(struct asus_oled_dev *odev)
static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
{
- while (count-- > 0 && val) {
+ odev->last_val = val;
+
+ if (val == 0) {
+ odev->buf_offs += count;
+ return 0;
+ }
+
+ while (count-- > 0) {
size_t x = odev->buf_offs % odev->width;
size_t y = odev->buf_offs / odev->width;
size_t i;
@@ -406,7 +413,6 @@ static int append_values(struct asus_oled_dev *odev, uint8_t val, size_t count)
;
}
- odev->last_val = val;
odev->buf_offs++;
}
@@ -805,10 +811,9 @@ error:
static void __exit asus_oled_exit(void)
{
+ usb_deregister(&oled_driver);
class_remove_file(oled_class, &class_attr_version.attr);
class_destroy(oled_class);
-
- usb_deregister(&oled_driver);
}
module_init(asus_oled_init);
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 2fa658e..179707b 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -161,6 +161,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
INT Status = STATUS_FAILURE;
int timeout = 0;
IOCTL_BUFFER IoBuffer;
+ int bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Parameters Passed to control IOCTL cmd=0x%X arg=0x%lX", cmd, arg);
@@ -230,11 +231,16 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (!temp_buff)
return -ENOMEM;
- Status = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
+ bytes = rdmalt(Adapter, (UINT)sRdmBuffer.Register,
(PUINT)temp_buff, Bufflen);
- if (Status == STATUS_SUCCESS) {
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
+ } else {
+ Status = bytes;
}
kfree(temp_buff);
@@ -302,7 +308,11 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (copy_from_user(&sRdmBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
- /* FIXME: don't trust user supplied length */
+ if (IoBuffer.OutputLength > USHRT_MAX ||
+ IoBuffer.OutputLength == 0) {
+ return -EINVAL;
+ }
+
temp_buff = kmalloc(IoBuffer.OutputLength, GFP_KERNEL);
if (!temp_buff)
return STATUS_FAILURE;
@@ -318,11 +328,17 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
uiTempVar = sRdmBuffer.Register & EEPROM_REJECT_MASK;
- Status = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
+ bytes = rdmaltWithLock(Adapter, (UINT)sRdmBuffer.Register, (PUINT)temp_buff, IoBuffer.OutputLength);
- if (Status == STATUS_SUCCESS)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, IoBuffer.OutputLength))
- Status = -EFAULT;
+ if (bytes > 0) {
+ Status = STATUS_SUCCESS;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, bytes)) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
+ } else {
+ Status = bytes;
+ }
kfree(temp_buff);
break;
@@ -437,12 +453,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
}
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
-
- if (STATUS_SUCCESS != Status) {
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"GPIO_MODE_REGISTER read failed");
break;
+ } else {
+ Status = STATUS_SUCCESS;
}
/* Set the gpio mode register to output */
@@ -519,12 +537,15 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
uiBit = gpio_info.uiGpioNumber;
/* Set the gpio output register */
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER,
(PUINT)ucRead, sizeof(UINT));
- if (Status != STATUS_SUCCESS) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM Failed\n");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
}
break;
@@ -590,11 +611,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
}
if (pgpio_multi_info[WIMAX_IDX].uiGPIOMask) {
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_PIN_STATE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
- if (Status != STATUS_SUCCESS) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "RDM to GPIO_PIN_STATE_REGISTER Failed.");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
pgpio_multi_info[WIMAX_IDX].uiGPIOValue = (*(UINT *)ucResetValue &
@@ -605,7 +629,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
- break;
+ return -EFAULT;
}
}
break;
@@ -629,11 +653,14 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (copy_from_user(&gpio_multi_mode, IoBuffer.InputBuffer, IoBuffer.InputLength))
return -EFAULT;
- Status = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
+ bytes = rdmaltWithLock(Adapter, (UINT)GPIO_MODE_REGISTER, (PUINT)ucResetValue, sizeof(UINT));
- if (STATUS_SUCCESS != Status) {
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Read of GPIO_MODE_REGISTER failed");
return Status;
+ } else {
+ Status = STATUS_SUCCESS;
}
/* Validating the request */
@@ -678,7 +705,7 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Failed while copying Content to IOBufer for user space err:%d", Status);
- break;
+ return -EFAULT;
}
}
break;
@@ -706,9 +733,8 @@ static long bcm_char_ioctl(struct file *filp, UINT cmd, ULONG arg)
return -ENOMEM;
if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
- Status = -EFAULT;
kfree(pvBuffer);
- break;
+ return -EFAULT;
}
down(&Adapter->LowPowerModeSync);
@@ -733,8 +759,7 @@ cntrlEnd:
}
case IOCTL_BCM_BUFFER_DOWNLOAD_START: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
- if (NVMAccess) {
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL,
"IOCTL_BCM_CHIP_RESET not allowed as EEPROM Read/Write is in progress\n");
return -EACCES;
@@ -743,157 +768,162 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"Starting the firmware download PID =0x%x!!!!\n", current->pid);
- if (!down_trylock(&Adapter->fw_download_sema)) {
- Adapter->bBinDownloaded = FALSE;
- Adapter->fw_download_process_pid = current->pid;
- Adapter->bCfgDownloaded = FALSE;
- Adapter->fw_download_done = FALSE;
- netif_carrier_off(Adapter->dev);
- netif_stop_queue(Adapter->dev);
- Status = reset_card_proc(Adapter);
- if (Status) {
- pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- }
- mdelay(10);
- } else {
- Status = -EBUSY;
+ if (down_trylock(&Adapter->fw_download_sema))
+ return -EBUSY;
+
+ Adapter->bBinDownloaded = FALSE;
+ Adapter->fw_download_process_pid = current->pid;
+ Adapter->bCfgDownloaded = FALSE;
+ Adapter->fw_download_done = FALSE;
+ netif_carrier_off(Adapter->dev);
+ netif_stop_queue(Adapter->dev);
+ Status = reset_card_proc(Adapter);
+ if (Status) {
+ pr_err(PFX "%s: reset_card_proc Failed!\n", Adapter->dev->name);
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
}
+ mdelay(10);
up(&Adapter->NVMRdmWrmLock);
- break;
+ return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD: {
FIRMWARE_INFO *psFwInfo = NULL;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Starting the firmware download PID =0x%x!!!!\n", current->pid);
- do {
- if (!down_trylock(&Adapter->fw_download_sema)) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Invalid way to download buffer. Use Start and then call this!!!\n");
- Status = -EINVAL;
- break;
- }
-
- /* Copy Ioctl Buffer structure */
- if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
- return -EFAULT;
+ if (!down_trylock(&Adapter->fw_download_sema)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
- "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
+ "Invalid way to download buffer. Use Start and then call this!!!\n");
+ up(&Adapter->fw_download_sema);
+ Status = -EINVAL;
+ return Status;
+ }
- if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO))
- return -EINVAL;
+ /* Copy Ioctl Buffer structure */
+ if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
- if (!psFwInfo)
- return -ENOMEM;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
+ "Length for FW DLD is : %lx\n", IoBuffer.InputLength);
- if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength))
- return -EFAULT;
+ if (IoBuffer.InputLength > sizeof(FIRMWARE_INFO)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- if (!psFwInfo->pvMappedFirmwareAddress ||
- (psFwInfo->u32FirmwareLength == 0)) {
+ psFwInfo = kmalloc(sizeof(*psFwInfo), GFP_KERNEL);
+ if (!psFwInfo) {
+ up(&Adapter->fw_download_sema);
+ return -ENOMEM;
+ }
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
- psFwInfo->u32FirmwareLength);
- Status = -EINVAL;
- break;
- }
+ if (copy_from_user(psFwInfo, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
+ up(&Adapter->fw_download_sema);
+ return -EFAULT;
+ }
- Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
+ if (!psFwInfo->pvMappedFirmwareAddress ||
+ (psFwInfo->u32FirmwareLength == 0)) {
- if (Status != STATUS_SUCCESS) {
- if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
- else
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Something else is wrong %lu\n",
+ psFwInfo->u32FirmwareLength);
+ up(&Adapter->fw_download_sema);
+ Status = -EINVAL;
+ return Status;
+ }
- /* up(&Adapter->fw_download_sema); */
+ Status = bcm_ioctl_fw_download(Adapter, psFwInfo);
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = DRIVER_INIT;
- Adapter->LEDInfo.bLedInitDone = FALSE;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
- }
- break;
+ if (Status != STATUS_SUCCESS) {
+ if (psFwInfo->u32StartingAddress == CONFIG_BEGIN_ADDR)
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Configuration File Upload Failed\n");
+ else
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "IOCTL: Firmware File Upload Failed\n");
+
+ /* up(&Adapter->fw_download_sema); */
- } while (0);
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = DRIVER_INIT;
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
+ }
+ }
if (Status != STATUS_SUCCESS)
up(&Adapter->fw_download_sema);
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, OSAL_DBG, DBG_LVL_ALL, "IOCTL: Firmware File Uploaded\n");
kfree(psFwInfo);
- break;
+ return Status;
}
case IOCTL_BCM_BUFFER_DOWNLOAD_STOP: {
- INT NVMAccess = down_trylock(&Adapter->NVMRdmWrmLock);
+ if (!down_trylock(&Adapter->fw_download_sema)) {
+ up(&Adapter->fw_download_sema);
+ return -EINVAL;
+ }
- if (NVMAccess) {
+ if (down_trylock(&Adapter->NVMRdmWrmLock)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0,
"FW download blocked as EEPROM Read/Write is in progress\n");
up(&Adapter->fw_download_sema);
return -EACCES;
}
- if (down_trylock(&Adapter->fw_download_sema)) {
- Adapter->bBinDownloaded = TRUE;
- Adapter->bCfgDownloaded = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->downloadDDR = 0;
-
- /* setting the Mips to Run */
- Status = run_card_proc(Adapter);
+ Adapter->bBinDownloaded = TRUE;
+ Adapter->bCfgDownloaded = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->downloadDDR = 0;
- if (Status) {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
- up(&Adapter->fw_download_sema);
- up(&Adapter->NVMRdmWrmLock);
- break;
- } else {
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
- DBG_LVL_ALL, "Firm Download Over...\n");
- }
-
- mdelay(10);
-
- /* Wait for MailBox Interrupt */
- if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
- BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
-
- timeout = 5*HZ;
- Adapter->waiting_to_fw_download_done = FALSE;
- wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
- Adapter->waiting_to_fw_download_done, timeout);
- Adapter->fw_download_process_pid = INVALID_PID;
- Adapter->fw_download_done = TRUE;
- atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
- Adapter->CurrNumRecvDescs = 0;
- Adapter->PrevNumRecvDescs = 0;
- atomic_set(&Adapter->cntrlpktCnt, 0);
- Adapter->LinkUpStatus = 0;
- Adapter->LinkStatus = 0;
+ /* setting the Mips to Run */
+ Status = run_card_proc(Adapter);
- if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
- Adapter->DriverState = FW_DOWNLOAD_DONE;
- wake_up(&Adapter->LEDInfo.notify_led_event);
- }
-
- if (!timeout)
- Status = -ENODEV;
+ if (Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Firm Download Failed\n");
+ up(&Adapter->fw_download_sema);
+ up(&Adapter->NVMRdmWrmLock);
+ return Status;
} else {
- Status = -EINVAL;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG,
+ DBG_LVL_ALL, "Firm Download Over...\n");
+ }
+
+ mdelay(10);
+
+ /* Wait for MailBox Interrupt */
+ if (StartInterruptUrb((PS_INTERFACE_ADAPTER)Adapter->pvInterfaceAdapter))
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Unable to send interrupt...\n");
+
+ timeout = 5*HZ;
+ Adapter->waiting_to_fw_download_done = FALSE;
+ wait_event_timeout(Adapter->ioctl_fw_dnld_wait_queue,
+ Adapter->waiting_to_fw_download_done, timeout);
+ Adapter->fw_download_process_pid = INVALID_PID;
+ Adapter->fw_download_done = TRUE;
+ atomic_set(&Adapter->CurrNumFreeTxDesc, 0);
+ Adapter->CurrNumRecvDescs = 0;
+ Adapter->PrevNumRecvDescs = 0;
+ atomic_set(&Adapter->cntrlpktCnt, 0);
+ Adapter->LinkUpStatus = 0;
+ Adapter->LinkStatus = 0;
+
+ if (Adapter->LEDInfo.led_thread_running & BCM_LED_THREAD_RUNNING_ACTIVELY) {
+ Adapter->DriverState = FW_DOWNLOAD_DONE;
+ wake_up(&Adapter->LEDInfo.notify_led_event);
}
+ if (!timeout)
+ Status = -ENODEV;
+
up(&Adapter->fw_download_sema);
up(&Adapter->NVMRdmWrmLock);
- break;
+ return Status;
}
case IOCTL_BE_BUCKET_SIZE:
@@ -969,11 +999,15 @@ cntrlEnd:
}
case IOCTL_BCM_GET_DRIVER_VERSION: {
+ ulong len;
+
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, IoBuffer.OutputLength))
+ len = min_t(ulong, IoBuffer.OutputLength, strlen(VER_FILEVERSION_STR) + 1);
+
+ if (copy_to_user(IoBuffer.OutputBuffer, VER_FILEVERSION_STR, len))
return -EFAULT;
Status = STATUS_SUCCESS;
break;
@@ -985,8 +1019,7 @@ cntrlEnd:
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user failed..\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IoBuffer.OutputLength != sizeof(link_state)) {
@@ -1001,8 +1034,7 @@ cntrlEnd:
if (copy_to_user(IoBuffer.OutputBuffer, &link_state, min_t(size_t, sizeof(link_state), IoBuffer.OutputLength))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
Status = STATUS_SUCCESS;
break;
@@ -1068,8 +1100,10 @@ cntrlEnd:
GetDroppedAppCntrlPktMibs(temp_buff, pTarang);
if (Status != STATUS_FAILURE)
- if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS)))
- Status = -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, temp_buff, sizeof(S_MIBS_HOST_STATS_MIBS))) {
+ kfree(temp_buff);
+ return -EFAULT;
+ }
kfree(temp_buff);
break;
@@ -1103,7 +1137,9 @@ cntrlEnd:
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
return -EFAULT;
- /* FIXME: restrict length */
+ if (IoBuffer.InputLength < sizeof(ULONG) * 2)
+ return -EINVAL;
+
pvBuffer = kmalloc(IoBuffer.InputLength, GFP_KERNEL);
if (!pvBuffer)
return -ENOMEM;
@@ -1111,8 +1147,7 @@ cntrlEnd:
/* Get WrmBuffer structure */
if (copy_from_user(pvBuffer, IoBuffer.InputBuffer, IoBuffer.InputLength)) {
kfree(pvBuffer);
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
pBulkBuffer = (PBULKWRM_BUFFER)pvBuffer;
@@ -1242,8 +1277,7 @@ cntrlEnd:
memset(&tv1, 0, sizeof(struct timeval));
if ((Adapter->eNVMType == NVM_FLASH) && (Adapter->uiFlashLayoutMajorVersion == 0)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "The Flash Control Section is Corrupted. Hence Rejection on NVM Read/Write\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IsFlash2x(Adapter)) {
@@ -1252,7 +1286,7 @@ cntrlEnd:
(Adapter->eActiveDSD != DSD2)) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No DSD is active..hence NVM Command is blocked");
- return STATUS_FAILURE ;
+ return STATUS_FAILURE;
}
}
@@ -1271,8 +1305,7 @@ cntrlEnd:
if ((stNVMReadWrite.uiOffset + stNVMReadWrite.uiNumBytes) > Adapter->uiNVMDSDSize) {
/* BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Can't allow access beyond NVM Size: 0x%x 0x%x\n", stNVMReadWrite.uiOffset, stNVMReadWrite.uiNumBytes); */
- Status = STATUS_FAILURE;
- break;
+ return STATUS_FAILURE;
}
pReadData = kzalloc(stNVMReadWrite.uiNumBytes, GFP_KERNEL);
@@ -1280,9 +1313,8 @@ cntrlEnd:
return -ENOMEM;
if (copy_from_user(pReadData, stNVMReadWrite.pBuffer, stNVMReadWrite.uiNumBytes)) {
- Status = -EFAULT;
kfree(pReadData);
- break;
+ return -EFAULT;
}
do_gettimeofday(&tv0);
@@ -1309,7 +1341,7 @@ cntrlEnd:
if (copy_to_user(stNVMReadWrite.pBuffer, pReadData, stNVMReadWrite.uiNumBytes)) {
kfree(pReadData);
- Status = -EFAULT;
+ return -EFAULT;
}
} else {
down(&Adapter->NVMRdmWrmLock);
@@ -1377,9 +1409,8 @@ cntrlEnd:
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, " timetaken by Write/read :%ld msec\n", (tv1.tv_sec - tv0.tv_sec)*1000 + (tv1.tv_usec - tv0.tv_usec)/1000);
kfree(pReadData);
- Status = STATUS_SUCCESS;
+ return STATUS_SUCCESS;
}
- break;
case IOCTL_BCM_FLASH2X_SECTION_READ: {
FLASH2X_READWRITE sFlash2xRead = {0};
@@ -1456,7 +1487,9 @@ cntrlEnd:
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Copy to use failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
@@ -1548,7 +1581,9 @@ cntrlEnd:
Status = copy_from_user(pWriteBuff, InputAddr, WriteBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to user failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pWriteBuff);
+ return -EFAULT;
}
BCM_DEBUG_PRINT_BUFFER(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, pWriteBuff, WriteBytes);
@@ -1608,8 +1643,10 @@ cntrlEnd:
BcmGetFlash2xSectionalBitMap(Adapter, psFlash2xBitMap);
up(&Adapter->NVMRdmWrmLock);
- if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP)))
- Status = -EFAULT;
+ if (copy_to_user(IoBuffer.OutputBuffer, psFlash2xBitMap, sizeof(FLASH2X_BITMAP))) {
+ kfree(psFlash2xBitMap);
+ return -EFAULT;
+ }
kfree(psFlash2xBitMap);
}
@@ -1627,13 +1664,13 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
+ return -EFAULT;
}
down(&Adapter->NVMRdmWrmLock);
@@ -1677,13 +1714,13 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed Status :%d", Status);
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&sCopySectStrut, IoBuffer.InputBuffer, sizeof(FLASH2X_COPY_SECTION));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of Copy_Section_Struct failed with Status :%d", Status);
- return Status;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Source SEction :%x", sCopySectStrut.SrcSection);
@@ -1744,7 +1781,7 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- break;
+ return -EFAULT;
}
if (Adapter->eNVMType != NVM_FLASH) {
@@ -1783,12 +1820,12 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of IOCTL BUFFER failed");
- return Status;
+ return -EFAULT;
}
Status = copy_from_user(&eFlash2xSectionVal, IoBuffer.InputBuffer, sizeof(INT));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy of flash section val failed");
- return Status;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "Read Section :%d", eFlash2xSectionVal);
@@ -1830,8 +1867,7 @@ cntrlEnd:
/* Copy Ioctl Buffer structure */
if (copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER))) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "copy_from_user 1 failed\n");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (copy_from_user(&stNVMRead, IoBuffer.OutputBuffer, sizeof(NVM_READWRITE)))
@@ -1886,7 +1922,9 @@ cntrlEnd:
Status = copy_to_user(OutPutBuff, pReadBuff, ReadBytes);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Copy to use failed with status :%d", Status);
- break;
+ up(&Adapter->NVMRdmWrmLock);
+ kfree(pReadBuff);
+ return -EFAULT;
}
NOB = NOB - ReadBytes;
if (NOB) {
@@ -1907,8 +1945,7 @@ cntrlEnd:
Status = copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER));
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of Ioctl buffer is failed from user space");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
if (IoBuffer.InputLength != sizeof(unsigned long)) {
@@ -1919,8 +1956,7 @@ cntrlEnd:
Status = copy_from_user(&RxCntrlMsgBitMask, IoBuffer.InputBuffer, IoBuffer.InputLength);
if (Status) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "copy of control bit mask failed from user space");
- Status = -EFAULT;
- break;
+ return -EFAULT;
}
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, OSAL_DBG, DBG_LVL_ALL, "\n Got user defined cntrl msg bit mask :%lx", RxCntrlMsgBitMask);
pTarang->RxCntrlMsgBitMask = RxCntrlMsgBitMask;
diff --git a/drivers/staging/bcm/HandleControlPacket.c b/drivers/staging/bcm/HandleControlPacket.c
index 2b1e9e1..b058e30 100644
--- a/drivers/staging/bcm/HandleControlPacket.c
+++ b/drivers/staging/bcm/HandleControlPacket.c
@@ -1,195 +1,208 @@
/**
-@file HandleControlPacket.c
-This file contains the routines to deal with
-sending and receiving of control packets.
-*/
+ * @file HandleControlPacket.c
+ * This file contains the routines to deal with
+ * sending and receiving of control packets.
+ */
#include "headers.h"
/**
-When a control packet is received, analyze the
-"status" and call appropriate response function.
-Enqueue the control packet for Application.
-@return None
-*/
+ * When a control packet is received, analyze the
+ * "status" and call appropriate response function.
+ * Enqueue the control packet for Application.
+ * @return None
+ */
static VOID handle_rx_control_packet(PMINI_ADAPTER Adapter, struct sk_buff *skb)
{
- PPER_TARANG_DATA pTarang = NULL;
+ PPER_TARANG_DATA pTarang = NULL;
BOOLEAN HighPriorityMessage = FALSE;
- struct sk_buff * newPacket = NULL;
+ struct sk_buff *newPacket = NULL;
CHAR cntrl_msg_mask_bit = 0;
- BOOLEAN drop_pkt_flag = TRUE ;
+ BOOLEAN drop_pkt_flag = TRUE;
USHORT usStatus = *(PUSHORT)(skb->data);
if (netif_msg_pktdata(Adapter))
print_hex_dump(KERN_DEBUG, PFX "rx control: ", DUMP_PREFIX_NONE,
- 16, 1, skb->data, skb->len, 0);
-
- switch(usStatus)
- {
- case CM_RESPONSES: // 0xA0
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
- HighPriorityMessage = TRUE ;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- HighPriorityMessage = TRUE ;
- if(Adapter->LinkStatus==LINKUP_DONE)
- {
- CmControlResponseMessage(Adapter,(skb->data +sizeof(USHORT)));
- }
- break;
- case LINK_CONTROL_RESP: //0xA2
- case STATUS_RSP: //0xA1
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"LINK_CONTROL_RESP");
- HighPriorityMessage = TRUE ;
- LinkControlResponseMessage(Adapter,(skb->data + sizeof(USHORT)));
- break;
- case STATS_POINTER_RESP: //0xA6
- HighPriorityMessage = TRUE ;
- StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
- break;
- case IDLE_MODE_STATUS: //0xA3
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"IDLE_MODE_STATUS Type Message Got from F/W");
- InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
- sizeof(USHORT)));
- HighPriorityMessage = TRUE ;
- break;
-
- case AUTH_SS_HOST_MSG:
- HighPriorityMessage = TRUE ;
- break;
-
- default:
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,"Got Default Response");
- /* Let the Application Deal with This Packet */
- break;
+ 16, 1, skb->data, skb->len, 0);
+
+ switch (usStatus) {
+ case CM_RESPONSES: /* 0xA0 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL,
+ "MAC Version Seems to be Non Multi-Classifier, rejected by Driver");
+ HighPriorityMessage = TRUE;
+ break;
+ case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
+ HighPriorityMessage = TRUE;
+ if (Adapter->LinkStatus == LINKUP_DONE)
+ CmControlResponseMessage(Adapter,
+ (skb->data + sizeof(USHORT)));
+ break;
+ case LINK_CONTROL_RESP: /* 0xA2 */
+ case STATUS_RSP: /* 0xA1 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "LINK_CONTROL_RESP");
+ HighPriorityMessage = TRUE;
+ LinkControlResponseMessage(Adapter,
+ (skb->data + sizeof(USHORT)));
+ break;
+ case STATS_POINTER_RESP: /* 0xA6 */
+ HighPriorityMessage = TRUE;
+ StatisticsResponse(Adapter, (skb->data + sizeof(USHORT)));
+ break;
+ case IDLE_MODE_STATUS: /* 0xA3 */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL,
+ "IDLE_MODE_STATUS Type Message Got from F/W");
+ InterfaceIdleModeRespond(Adapter, (PUINT)(skb->data +
+ sizeof(USHORT)));
+ HighPriorityMessage = TRUE;
+ break;
+
+ case AUTH_SS_HOST_MSG:
+ HighPriorityMessage = TRUE;
+ break;
+
+ default:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "Got Default Response");
+ /* Let the Application Deal with This Packet */
+ break;
}
- //Queue The Control Packet to The Application Queues
+ /* Queue The Control Packet to The Application Queues */
down(&Adapter->RxAppControlQueuelock);
- for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next)
- {
- if(Adapter->device_removed)
- {
+ for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
+ if (Adapter->device_removed)
break;
- }
- drop_pkt_flag = TRUE ;
+ drop_pkt_flag = TRUE;
/*
- There are cntrl msg from A0 to AC. It has been mapped to 0 to C bit in the cntrl mask.
- Also, by default AD to BF has been masked to the rest of the bits... which wil be ON by default.
- if mask bit is enable to particular pkt status, send it out to app else stop it.
- */
+ * There are cntrl msg from A0 to AC. It has been mapped to 0 to
+ * C bit in the cntrl mask.
+ * Also, by default AD to BF has been masked to the rest of the
+ * bits... which wil be ON by default.
+ * if mask bit is enable to particular pkt status, send it out
+ * to app else stop it.
+ */
cntrl_msg_mask_bit = (usStatus & 0x1F);
- //printk("\ninew msg mask bit which is disable in mask:%X", cntrl_msg_mask_bit);
- if(pTarang->RxCntrlMsgBitMask & (1<<cntrl_msg_mask_bit))
- drop_pkt_flag = FALSE;
-
- if ((drop_pkt_flag == TRUE) || (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN) ||
- ((pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN/2) && (HighPriorityMessage == FALSE)))
- {
+ /*
+ * printk("\ninew msg mask bit which is disable in mask:%X",
+ * cntrl_msg_mask_bit);
+ */
+ if (pTarang->RxCntrlMsgBitMask & (1 << cntrl_msg_mask_bit))
+ drop_pkt_flag = FALSE;
+
+ if ((drop_pkt_flag == TRUE) ||
+ (pTarang->AppCtrlQueueLen > MAX_APP_QUEUE_LEN)
+ || ((pTarang->AppCtrlQueueLen >
+ MAX_APP_QUEUE_LEN / 2) &&
+ (HighPriorityMessage == FALSE))) {
/*
- Assumption:-
- 1. every tarang manages it own dropped pkt statitistics
- 2. Total packet dropped per tarang will be equal to the sum of all types of dropped
- pkt by that tarang only.
-
- */
- switch(*(PUSHORT)skb->data)
- {
- case CM_RESPONSES:
- pTarang->stDroppedAppCntrlMsgs.cm_responses++;
- break;
- case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
- pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++;
- break;
- case LINK_CONTROL_RESP:
- pTarang->stDroppedAppCntrlMsgs.link_control_resp++;
- break;
- case STATUS_RSP:
- pTarang->stDroppedAppCntrlMsgs.status_rsp++;
- break;
- case STATS_POINTER_RESP:
- pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++;
- break;
- case IDLE_MODE_STATUS:
- pTarang->stDroppedAppCntrlMsgs.idle_mode_status++ ;
- break;
- case AUTH_SS_HOST_MSG:
- pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++ ;
- break;
+ * Assumption:-
+ * 1. every tarang manages it own dropped pkt
+ * statitistics
+ * 2. Total packet dropped per tarang will be equal to
+ * the sum of all types of dropped pkt by that
+ * tarang only.
+ */
+ switch (*(PUSHORT)skb->data) {
+ case CM_RESPONSES:
+ pTarang->stDroppedAppCntrlMsgs.cm_responses++;
+ break;
+ case CM_CONTROL_NEWDSX_MULTICLASSIFIER_RESP:
+ pTarang->stDroppedAppCntrlMsgs.cm_control_newdsx_multiclassifier_resp++;
+ break;
+ case LINK_CONTROL_RESP:
+ pTarang->stDroppedAppCntrlMsgs.link_control_resp++;
+ break;
+ case STATUS_RSP:
+ pTarang->stDroppedAppCntrlMsgs.status_rsp++;
+ break;
+ case STATS_POINTER_RESP:
+ pTarang->stDroppedAppCntrlMsgs.stats_pointer_resp++;
+ break;
+ case IDLE_MODE_STATUS:
+ pTarang->stDroppedAppCntrlMsgs.idle_mode_status++;
+ break;
+ case AUTH_SS_HOST_MSG:
+ pTarang->stDroppedAppCntrlMsgs.auth_ss_host_msg++;
+ break;
default:
- pTarang->stDroppedAppCntrlMsgs.low_priority_message++ ;
- break;
+ pTarang->stDroppedAppCntrlMsgs.low_priority_message++;
+ break;
}
continue;
}
- newPacket = skb_clone(skb, GFP_KERNEL);
- if (!newPacket)
- break;
- ENQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail,
- newPacket);
- pTarang->AppCtrlQueueLen++;
- }
+ newPacket = skb_clone(skb, GFP_KERNEL);
+ if (!newPacket)
+ break;
+ ENQUEUEPACKET(pTarang->RxAppControlHead,
+ pTarang->RxAppControlTail, newPacket);
+ pTarang->AppCtrlQueueLen++;
+ }
up(&Adapter->RxAppControlQueuelock);
- wake_up(&Adapter->process_read_wait_queue);
- dev_kfree_skb(skb);
- BCM_DEBUG_PRINT( Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "After wake_up_interruptible");
+ wake_up(&Adapter->process_read_wait_queue);
+ dev_kfree_skb(skb);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
+ "After wake_up_interruptible");
}
/**
-@ingroup ctrl_pkt_functions
-Thread to handle control pkt reception
-*/
-int control_packet_handler (PMINI_ADAPTER Adapter /**< pointer to adapter object*/
- )
+ * @ingroup ctrl_pkt_functions
+ * Thread to handle control pkt reception
+ */
+int control_packet_handler(PMINI_ADAPTER Adapter /* pointer to adapter object*/)
{
- struct sk_buff *ctrl_packet= NULL;
+ struct sk_buff *ctrl_packet = NULL;
unsigned long flags = 0;
- //struct timeval tv ;
- //int *puiBuffer = NULL ;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Entering to make thread wait on control packet event!");
- while(1)
- {
+ /* struct timeval tv; */
+ /* int *puiBuffer = NULL; */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL,
+ "Entering to make thread wait on control packet event!");
+ while (1) {
wait_event_interruptible(Adapter->process_rx_cntrlpkt,
- atomic_read(&Adapter->cntrlpktCnt) ||
- Adapter->bWakeUpDevice ||
- kthread_should_stop()
- );
+ atomic_read(&Adapter->cntrlpktCnt) ||
+ Adapter->bWakeUpDevice ||
+ kthread_should_stop());
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Exiting \n");
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, CP_CTRL_PKT,
+ DBG_LVL_ALL, "Exiting\n");
return 0;
}
- if(TRUE == Adapter->bWakeUpDevice)
- {
+ if (TRUE == Adapter->bWakeUpDevice) {
Adapter->bWakeUpDevice = FALSE;
- if((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode) &&
- ((TRUE == Adapter->IdleMode)|| (TRUE == Adapter->bShutStatus)))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, CP_CTRL_PKT, DBG_LVL_ALL, "Calling InterfaceAbortIdlemode\n");
- // Adapter->bTriedToWakeUpFromlowPowerMode = TRUE;
- InterfaceIdleModeWakeup (Adapter);
+ if ((FALSE == Adapter->bTriedToWakeUpFromlowPowerMode)
+ && ((TRUE == Adapter->IdleMode) ||
+ (TRUE == Adapter->bShutStatus))) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ CP_CTRL_PKT, DBG_LVL_ALL,
+ "Calling InterfaceAbortIdlemode\n");
+ /*
+ * Adapter->bTriedToWakeUpFromlowPowerMode
+ * = TRUE;
+ */
+ InterfaceIdleModeWakeup(Adapter);
}
continue;
}
- while(atomic_read(&Adapter->cntrlpktCnt))
- {
+ while (atomic_read(&Adapter->cntrlpktCnt)) {
spin_lock_irqsave(&Adapter->control_queue_lock, flags);
ctrl_packet = Adapter->RxControlHead;
- if(ctrl_packet)
- {
- DEQUEUEPACKET(Adapter->RxControlHead,Adapter->RxControlTail);
-// Adapter->RxControlHead=ctrl_packet->next;
+ if (ctrl_packet) {
+ DEQUEUEPACKET(Adapter->RxControlHead,
+ Adapter->RxControlTail);
+ /* Adapter->RxControlHead=ctrl_packet->next; */
}
- spin_unlock_irqrestore (&Adapter->control_queue_lock, flags);
- handle_rx_control_packet(Adapter, ctrl_packet);
+ spin_unlock_irqrestore(&Adapter->control_queue_lock,
+ flags);
+ handle_rx_control_packet(Adapter, ctrl_packet);
atomic_dec(&Adapter->cntrlpktCnt);
}
@@ -201,22 +214,22 @@ int control_packet_handler (PMINI_ADAPTER Adapter /**< pointer to adapter obje
INT flushAllAppQ(void)
{
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
- PPER_TARANG_DATA pTarang = NULL;
+ PPER_TARANG_DATA pTarang = NULL;
struct sk_buff *PacketToDrop = NULL;
- for(pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next)
- {
- while(pTarang->RxAppControlHead != NULL)
- {
- PacketToDrop=pTarang->RxAppControlHead;
- DEQUEUEPACKET(pTarang->RxAppControlHead,pTarang->RxAppControlTail);
+ for (pTarang = Adapter->pTarangs; pTarang; pTarang = pTarang->next) {
+ while (pTarang->RxAppControlHead != NULL) {
+ PacketToDrop = pTarang->RxAppControlHead;
+ DEQUEUEPACKET(pTarang->RxAppControlHead,
+ pTarang->RxAppControlTail);
dev_kfree_skb(PacketToDrop);
}
pTarang->AppCtrlQueueLen = 0;
- //dropped contrl packet statistics also should be reset.
- memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0, sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
+ /* dropped contrl packet statistics also should be reset. */
+ memset((PVOID)&pTarang->stDroppedAppCntrlMsgs, 0,
+ sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
- return STATUS_SUCCESS ;
+ return STATUS_SUCCESS;
}
diff --git a/drivers/staging/bcm/InterfaceDld.c b/drivers/staging/bcm/InterfaceDld.c
index bcd86bb..65c352f 100644
--- a/drivers/staging/bcm/InterfaceDld.c
+++ b/drivers/staging/bcm/InterfaceDld.c
@@ -62,6 +62,7 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
static int fw_down;
INT Status = STATUS_SUCCESS;
PS_INTERFACE_ADAPTER psIntfAdapter = (PS_INTERFACE_ADAPTER)arg;
+ int bytes;
buff = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_DMA);
buff_readback = kmalloc(MAX_TRANSFER_CTRL_BYTE_USB , GFP_DMA);
@@ -94,8 +95,9 @@ int InterfaceFileReadbackFromChip(PVOID arg, struct file *flp, unsigned int on_c
break;
}
- Status = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
- if (Status) {
+ bytes = InterfaceRDM(psIntfAdapter, on_chip_loc, buff_readback, len);
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "RDM of len %d Failed! %d", len, reg);
goto exit;
}
@@ -302,6 +304,7 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32Fi
UINT len = u32FirmwareLength;
INT retval = STATUS_SUCCESS;
PUCHAR readbackbuff = kzalloc(MAX_TRANSFER_CTRL_BYTE_USB, GFP_KERNEL);
+ int bytes;
if (NULL == readbackbuff) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "MEMORY ALLOCATION FAILED");
@@ -310,9 +313,10 @@ static INT buffRdbkVerify(PMINI_ADAPTER Adapter, PUCHAR mappedbuffer, UINT u32Fi
while (u32FirmwareLength && !retval) {
len = MIN_VAL(u32FirmwareLength, MAX_TRANSFER_CTRL_BYTE_USB);
- retval = rdm(Adapter, u32StartingAddress, readbackbuff, len);
+ bytes = rdm(Adapter, u32StartingAddress, readbackbuff, len);
- if (retval) {
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "rdm failed with status %d", retval);
break;
}
diff --git a/drivers/staging/bcm/InterfaceIdleMode.c b/drivers/staging/bcm/InterfaceIdleMode.c
index 96fa4ea..faeb03e 100644
--- a/drivers/staging/bcm/InterfaceIdleMode.c
+++ b/drivers/staging/bcm/InterfaceIdleMode.c
@@ -46,6 +46,7 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
{
int status = STATUS_SUCCESS;
unsigned int uiRegRead = 0;
+ int bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL,"SubType of Message :0x%X", ntohl(*puiBuffer));
@@ -77,16 +78,16 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
else if(Adapter->ulPowerSaveMode != DEVICE_POWERSAVE_MODE_AS_PROTOCOL_IDLE_MODE)
{
//clear on read Register
- status = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG0, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg0");
return status;
}
//clear on read Register
- status = rdmalt (Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, DEVICE_INT_OUT_EP_REG1, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "rdm failed while clearing H/W Abort Reg1");
return status;
}
@@ -117,9 +118,9 @@ int InterfaceIdleModeRespond(PMINI_ADAPTER Adapter, unsigned int* puiBuffer)
Adapter->chip_id== BCS220_3)
{
- status = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
- if(status)
- {
+ bytes = rdmalt(Adapter, HPM_CONFIG_MSW, &uiRegRead, sizeof(uiRegRead));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, IDLE_MODE, DBG_LVL_ALL, "rdm failed while Reading HPM_CONFIG_LDO145 Reg 0\n");
return status;
}
@@ -266,6 +267,8 @@ void InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter)
{
unsigned int uiRegVal = 0;
INT Status = 0;
+ int bytes;
+
if(Adapter->ulPowerSaveMode == DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING)
{
// clear idlemode interrupt.
@@ -282,16 +285,16 @@ void InterfaceHandleShutdownModeWakeup(PMINI_ADAPTER Adapter)
{
//clear Interrupt EP registers.
- Status = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
- if(Status)
- {
+ bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG0, &uiRegVal, sizeof(uiRegVal));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG0 failed with Err :%d", Status);
return;
}
- Status = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
- if(Status)
- {
+ bytes = rdmalt(Adapter,DEVICE_INT_OUT_EP_REG1, &uiRegVal, sizeof(uiRegVal));
+ if (bytes < 0) {
+ Status = bytes;
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"RDM of DEVICE_INT_OUT_EP_REG1 failed with Err :%d", Status);
return;
}
diff --git a/drivers/staging/bcm/InterfaceInit.c b/drivers/staging/bcm/InterfaceInit.c
index a09d351..8e3c586 100644
--- a/drivers/staging/bcm/InterfaceInit.c
+++ b/drivers/staging/bcm/InterfaceInit.c
@@ -68,7 +68,7 @@ static void InterfaceAdapterFree(PS_INTERFACE_ADAPTER psIntfAdapter)
static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
{
unsigned long ulReg = 0;
- int ret;
+ int bytes;
/* Program EP2 MAX_PKT_SIZE */
ulReg = ntohl(EP2_MPS_REG);
@@ -94,8 +94,8 @@ static void ConfigureEndPointTypesThroughEEPROM(PMINI_ADAPTER Adapter)
BeceemEEPROMBulkWrite(Adapter, (PUCHAR)&ulReg, 0x140, 4, TRUE);
/* Program TX EP as interrupt(Alternate Setting) */
- ret = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
- if (ret) {
+ bytes = rdmalt(Adapter, 0x0F0110F8, (u32 *)&ulReg, sizeof(u32));
+ if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, DRV_ENTRY, DBG_LVL_ALL,
"reading of Tx EP failed\n");
return;
@@ -430,6 +430,7 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
int usedIntOutForBulkTransfer = 0 ;
BOOLEAN bBcm16 = FALSE;
UINT uiData = 0;
+ int bytes;
/* Store the usb dev into interface adapter */
psIntfAdapter->udev = usb_get_dev(interface_to_usbdev(psIntfAdapter->interface));
@@ -438,9 +439,10 @@ static int InterfaceAdapterInit(PS_INTERFACE_ADAPTER psIntfAdapter)
psIntfAdapter->psAdapter->interface_rdm = BcmRDM;
psIntfAdapter->psAdapter->interface_wrm = BcmWRM;
- retval = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
+ bytes = rdmalt(psIntfAdapter->psAdapter, CHIP_ID_REG,
(u32 *)&(psIntfAdapter->psAdapter->chip_id), sizeof(u32));
- if (retval) {
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_PRINTK, 0, 0, "CHIP ID Read Failed\n");
return retval;
}
diff --git a/drivers/staging/bcm/InterfaceMisc.c b/drivers/staging/bcm/InterfaceMisc.c
index 61f878b..2218fae 100644
--- a/drivers/staging/bcm/InterfaceMisc.c
+++ b/drivers/staging/bcm/InterfaceMisc.c
@@ -5,7 +5,7 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
PVOID buff,
INT len)
{
- int retval = 0;
+ int bytes;
USHORT usRetries = 0;
if (psIntfAdapter == NULL) {
@@ -30,7 +30,7 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
psIntfAdapter->psAdapter->DeviceAccess = TRUE;
do {
- retval = usb_control_msg(psIntfAdapter->udev,
+ bytes = usb_control_msg(psIntfAdapter->udev,
usb_rcvctrlpipe(psIntfAdapter->udev, 0),
0x02,
0xC2,
@@ -41,22 +41,20 @@ INT InterfaceRDM(PS_INTERFACE_ADAPTER psIntfAdapter,
5000);
usRetries++;
- if (-ENODEV == retval) {
+ if (-ENODEV == bytes) {
psIntfAdapter->psAdapter->device_removed = TRUE;
break;
}
- } while ((retval < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
+ } while ((bytes < 0) && (usRetries < MAX_RDM_WRM_RETIRES));
- if (retval < 0) {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", retval, usRetries);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
- return retval;
- } else {
- BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", retval);
- psIntfAdapter->psAdapter->DeviceAccess = FALSE;
- return STATUS_SUCCESS;
- }
+ if (bytes < 0)
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM failed status :%d, retires :%d", bytes, usRetries);
+ else
+ BCM_DEBUG_PRINT(psIntfAdapter->psAdapter, DBG_TYPE_OTHERS, RDM, DBG_LVL_ALL, "RDM sent %d", bytes);
+
+ psIntfAdapter->psAdapter->DeviceAccess = FALSE;
+ return bytes;
}
INT InterfaceWRM(PS_INTERFACE_ADAPTER psIntfAdapter,
diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c
index e9f29d5..c7725e1 100644
--- a/drivers/staging/bcm/Misc.c
+++ b/drivers/staging/bcm/Misc.c
@@ -814,6 +814,7 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
PMINI_ADAPTER Adapter = GET_BCM_ADAPTER(gblpnetdev);
PS_INTERFACE_ADAPTER psIntfAdapter = NULL;
unsigned int value = 0, uiResetValue = 0;
+ int bytes;
psIntfAdapter = ((PS_INTERFACE_ADAPTER)(ps_adapter->pvInterfaceAdapter));
ps_adapter->bDDRInitDone = FALSE;
@@ -848,8 +849,9 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
ps_adapter->chip_id == BCS250_BC ||
ps_adapter->chip_id == BCS220_3) {
- retval = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
- if (retval < 0) {
+ bytes = rdmalt(ps_adapter, HPM_CONFIG_LDO145, &value, sizeof(value));
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
@@ -862,8 +864,9 @@ int reset_card_proc(PMINI_ADAPTER ps_adapter)
}
}
} else {
- retval = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
- if (retval < 0) {
+ bytes = rdmalt(ps_adapter, 0x0f007018, &value, sizeof(value));
+ if (bytes < 0) {
+ retval = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "read failed with status :%d", retval);
goto err_exit;
}
@@ -925,11 +928,16 @@ err_exit:
int run_card_proc(PMINI_ADAPTER ps_adapter)
{
+ int status = STATUS_SUCCESS;
+ int bytes;
+
unsigned int value = 0;
{
- if (rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value)) < 0) {
+ bytes = rdmalt(ps_adapter, CLOCK_RESET_CNTRL_REG_1, &value, sizeof(value));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(ps_adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "%s:%d\n", __func__, __LINE__);
- return STATUS_FAILURE;
+ return status;
}
if (ps_adapter->bFlashBoot)
@@ -942,7 +950,7 @@ int run_card_proc(PMINI_ADAPTER ps_adapter)
return STATUS_FAILURE;
}
}
- return STATUS_SUCCESS;
+ return status;
}
int InitCardAndDownloadFirmware(PMINI_ADAPTER ps_adapter)
@@ -1215,6 +1223,7 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
int status = 0, i = 0;
unsigned int temp = 0;
unsigned char *pucmacaddr = kmalloc(MAC_ADDRESS_SIZE, GFP_KERNEL);
+ int bytes;
if (!pucmacaddr) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "No Buffers to Read the EEPROM Address\n");
@@ -1231,8 +1240,9 @@ static unsigned char *ReadMacAddrEEPROM(PMINI_ADAPTER Adapter, ulong dwAddress)
}
for (i = 0; i < MAC_ADDRESS_SIZE; i++) {
- status = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp, sizeof(temp));
- if (status != STATUS_SUCCESS) {
+ bytes = rdmalt(Adapter, EEPROM_READ_DATA_Q_REG, &temp, sizeof(temp));
+ if (bytes < 0) {
+ status = bytes;
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm Failed..\n");
kfree(pucmacaddr);
pucmacaddr = NULL;
@@ -1574,11 +1584,13 @@ void update_per_sf_desc_cnts(PMINI_ADAPTER Adapter)
{
INT iIndex = 0;
u32 uibuff[MAX_TARGET_DSX_BUFFERS];
+ int bytes;
if (!atomic_read(&Adapter->uiMBupdate))
return;
- if (rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS) < 0) {
+ bytes = rdmaltWithLock(Adapter, TARGET_SFID_TXDESC_MAP_LOC, (PUINT)uibuff, sizeof(UINT) * MAX_TARGET_DSX_BUFFERS);
+ if (bytes < 0) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "rdm failed\n");
return;
}
diff --git a/drivers/staging/bcm/hostmibs.c b/drivers/staging/bcm/hostmibs.c
index c13ea5c..101c4e3 100644
--- a/drivers/staging/bcm/hostmibs.c
+++ b/drivers/staging/bcm/hostmibs.c
@@ -1,4 +1,3 @@
-
/*
* File Name: hostmibs.c
*
@@ -6,73 +5,72 @@
*
* Abstract: This file contains the routines to copy the statistics used by
* the driver to the Host MIBS structure and giving the same to Application.
- *
*/
+
#include "headers.h"
-INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
+INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
{
- S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
- S_PHS_RULE *pstPhsRule = NULL;
- S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
- S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
- PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION)&Adapter->stBCMPhsContext;
+ S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
+ S_PHS_RULE *pstPhsRule = NULL;
+ S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
+ S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
+ PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION) &Adapter->stBCMPhsContext;
- UINT nClassifierIndex = 0, nPhsTableIndex = 0,nSfIndex = 0, uiIndex = 0;
+ UINT nClassifierIndex = 0, nPhsTableIndex = 0, nSfIndex = 0, uiIndex = 0;
- if(pDeviceExtension == NULL)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
+ if (pDeviceExtension == NULL) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
return STATUS_FAILURE;
}
- //Copy the classifier Table
- for(nClassifierIndex=0; nClassifierIndex < MAX_CLASSIFIERS;
- nClassifierIndex++)
- {
- if(Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
- memcpy((PVOID)&pstHostMibs->astClassifierTable[nClassifierIndex],
- (PVOID)&Adapter->astClassifierTable[nClassifierIndex],
- sizeof(S_MIBS_CLASSIFIER_RULE));
+ /* Copy the classifier Table */
+ for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) {
+ if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
+ memcpy((PVOID) & pstHostMibs->
+ astClassifierTable[nClassifierIndex],
+ (PVOID) & Adapter->
+ astClassifierTable[nClassifierIndex],
+ sizeof(S_MIBS_CLASSIFIER_RULE));
}
- //Copy the SF Table
- for(nSfIndex=0; nSfIndex < NO_OF_QUEUES ; nSfIndex++)
- {
- if(Adapter->PackInfo[nSfIndex].bValid)
- {
- memcpy((PVOID)&pstHostMibs->astSFtable[nSfIndex],(PVOID)&Adapter->PackInfo[nSfIndex],sizeof(S_MIBS_SERVICEFLOW_TABLE));
- }
- else
- {
- //if index in not valid, don't process this for the PHS table. Go For the next entry.
- continue ;
- }
+ /* Copy the SF Table */
+ for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
+ if (Adapter->PackInfo[nSfIndex].bValid) {
+ memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex],
+ (PVOID) & Adapter->PackInfo[nSfIndex],
+ sizeof(S_MIBS_SERVICEFLOW_TABLE));
+ } else {
+ /* If index in not valid,
+ * don't process this for the PHS table.
+ * Go For the next entry.
+ */
+ continue;
+ }
- //Retrieve the SFID Entry Index for requested Service Flow
- if(PHS_INVALID_TABLE_INDEX == GetServiceFlowEntry(pDeviceExtension->pstServiceFlowPhsRulesTable,
- Adapter->PackInfo[nSfIndex].usVCID_Value ,&pstServiceFlowEntry))
- {
+ /* Retrieve the SFID Entry Index for requested Service Flow */
+ if (PHS_INVALID_TABLE_INDEX ==
+ GetServiceFlowEntry(pDeviceExtension->
+ pstServiceFlowPhsRulesTable,
+ Adapter->PackInfo[nSfIndex].
+ usVCID_Value, &pstServiceFlowEntry))
- continue;
- }
+ continue;
pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
-
- for(uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) {
pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
- if(pstClassifierRule->bUsed)
- {
- pstPhsRule = pstClassifierRule->pstPhsRule;
+ if (pstClassifierRule->bUsed) {
+ pstPhsRule = pstClassifierRule->pstPhsRule;
- pstHostMibs->astPhsRulesTable[nPhsTableIndex].ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
+ pstHostMibs->astPhsRulesTable[nPhsTableIndex].
+ ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
- memcpy(&pstHostMibs->astPhsRulesTable[nPhsTableIndex].u8PHSI,
- &pstPhsRule->u8PHSI,
- sizeof(S_PHS_RULE));
+ memcpy(&pstHostMibs->
+ astPhsRulesTable[nPhsTableIndex].u8PHSI,
+ &pstPhsRule->u8PHSI, sizeof(S_PHS_RULE));
nPhsTableIndex++;
}
@@ -81,65 +79,63 @@ INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMi
}
-
- //copy other Host Statistics parameters
+ /* Copy other Host Statistics parameters */
pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
- pstHostMibs->stHostInfo.CurrNumFreeDesc =
- atomic_read(&Adapter->CurrNumFreeTxDesc);
+ pstHostMibs->stHostInfo.CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc);
pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize;
pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive;
pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD;
- memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist,Adapter->aTxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
- memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist,Adapter->aRxPktSizeHist,sizeof(UINT32)*MIBS_MAX_HIST_ENTRIES);
+ memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist, Adapter->aTxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
+ memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist, Adapter->aRxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
return STATUS_SUCCESS;
}
-
VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
{
memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
- &(pTarang->stDroppedAppCntrlMsgs),sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
+ &(pTarang->stDroppedAppCntrlMsgs),
+ sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
-
-VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter,
- CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
+VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter, CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
{
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfSfid = psfLocalSet->u32SFID;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsFixedVsVariableSduInd);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSduSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsSfSchedulingType);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqEnable);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqWindowSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockLifetime);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqSyncLossTimeout);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqDeliverInOrder);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqRxPurgeTimeout);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsArqBlockSize);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsReqTxPolicy);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnSfCsSpecification);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
- Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid = ntohl(Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable.wmanIfCmnCpsTargetSaid);
+ S_MIBS_EXTSERVICEFLOW_PARAMETERS *t = &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable;
+
+ t->wmanIfSfid = psfLocalSet->u32SFID;
+ t->wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
+ t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
+ t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
+ t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
+ t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
+ t->wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
+ t->wmanIfCmnCpsFixedVsVariableSduInd = ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd);
+ t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
+ t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize);
+ t->wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
+ t->wmanIfCmnCpsSfSchedulingType = ntohl(t->wmanIfCmnCpsSfSchedulingType);
+ t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
+ t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable);
+ t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
+ t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize);
+ t->wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
+ t->wmanIfCmnCpsArqBlockLifetime = ntohl(t->wmanIfCmnCpsArqBlockLifetime);
+ t->wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
+ t->wmanIfCmnCpsArqSyncLossTimeout = ntohl(t->wmanIfCmnCpsArqSyncLossTimeout);
+ t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
+ t->wmanIfCmnCpsArqDeliverInOrder = ntohl(t->wmanIfCmnCpsArqDeliverInOrder);
+ t->wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
+ t->wmanIfCmnCpsArqRxPurgeTimeout = ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout);
+ t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
+ t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize);
+ t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
+ t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy);
+ t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
+ t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification);
+ t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
+ t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid);
}
diff --git a/drivers/staging/bcm/led_control.c b/drivers/staging/bcm/led_control.c
index 16e939f..c7f4886 100644
--- a/drivers/staging/bcm/led_control.c
+++ b/drivers/staging/bcm/led_control.c
@@ -5,65 +5,69 @@
static B_UINT16 CFG_CalculateChecksum(B_UINT8 *pu8Buffer, B_UINT32 u32Size)
{
- B_UINT16 u16CheckSum=0;
- while(u32Size--) {
+ B_UINT16 u16CheckSum = 0;
+ while (u32Size--) {
u16CheckSum += (B_UINT8)~(*pu8Buffer);
- pu8Buffer++;
+ pu8Buffer++;
}
return u16CheckSum;
}
+
BOOLEAN IsReqGpioIsLedInNVM(PMINI_ADAPTER Adapter, UINT gpios)
{
- INT Status ;
- Status = (Adapter->gpioBitMap & gpios) ^ gpios ;
- if(Status)
+ INT Status;
+ Status = (Adapter->gpioBitMap & gpios) ^ gpios;
+ if (Status)
return FALSE;
else
return TRUE;
}
-static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex, ULONG timeout, INT num_of_time, LedEventInfo_t currdriverstate)
+static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex,
+ ULONG timeout, INT num_of_time, LedEventInfo_t currdriverstate)
{
int Status = STATUS_SUCCESS;
BOOLEAN bInfinite = FALSE;
- /*Check if num_of_time is -ve. If yes, blink led in infinite loop*/
- if(num_of_time < 0)
- {
+ /* Check if num_of_time is -ve. If yes, blink led in infinite loop */
+ if (num_of_time < 0) {
bInfinite = TRUE;
num_of_time = 1;
}
- while(num_of_time)
- {
-
- if(currdriverstate == Adapter->DriverState)
+ while (num_of_time) {
+ if (currdriverstate == Adapter->DriverState)
TURN_ON_LED(GPIO_Num, uiLedIndex);
- /*Wait for timeout after setting on the LED*/
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState || kthread_should_stop(),
- msecs_to_jiffies(timeout));
-
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
+ /* Wait for timeout after setting on the LED */
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState ||
+ kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status=EVENT_SIGNALED;
+ Status = EVENT_SIGNALED;
break;
}
- if(Status)
- {
+ if (Status) {
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status=EVENT_SIGNALED;
+ Status = EVENT_SIGNALED;
break;
}
TURN_OFF_LED(GPIO_Num, uiLedIndex);
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate!= Adapter->DriverState || kthread_should_stop(),
- msecs_to_jiffies(timeout));
- if(bInfinite == FALSE)
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState ||
+ kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+ if (bInfinite == FALSE)
num_of_time--;
}
return Status;
@@ -71,19 +75,19 @@ static INT LED_Blink(PMINI_ADAPTER Adapter, UINT GPIO_Num, UCHAR uiLedIndex, ULO
static INT ScaleRateofTransfer(ULONG rate)
{
- if(rate <= 3)
+ if (rate <= 3)
return rate;
- else if((rate > 3) && (rate <= 100))
+ else if ((rate > 3) && (rate <= 100))
return 5;
- else if((rate > 100) && (rate <= 200))
+ else if ((rate > 100) && (rate <= 200))
return 6;
- else if((rate > 200) && (rate <= 300))
+ else if ((rate > 200) && (rate <= 300))
return 7;
- else if((rate > 300) && (rate <= 400))
+ else if ((rate > 300) && (rate <= 400))
return 8;
- else if((rate > 400) && (rate <= 500))
+ else if ((rate > 400) && (rate <= 500))
return 9;
- else if((rate > 500) && (rate <= 600))
+ else if ((rate > 500) && (rate <= 600))
return 10;
else
return MAX_NUM_OF_BLINKS;
@@ -92,215 +96,232 @@ static INT ScaleRateofTransfer(ULONG rate)
static INT LED_Proportional_Blink(PMINI_ADAPTER Adapter, UCHAR GPIO_Num_tx,
- UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex, LedEventInfo_t currdriverstate)
+ UCHAR uiTxLedIndex, UCHAR GPIO_Num_rx, UCHAR uiRxLedIndex,
+ LedEventInfo_t currdriverstate)
{
- /* Initial values of TX and RX packets*/
+ /* Initial values of TX and RX packets */
ULONG64 Initial_num_of_packts_tx = 0, Initial_num_of_packts_rx = 0;
- /*values of TX and RX packets after 1 sec*/
+ /* values of TX and RX packets after 1 sec */
ULONG64 Final_num_of_packts_tx = 0, Final_num_of_packts_rx = 0;
- /*Rate of transfer of Tx and Rx in 1 sec*/
+ /* Rate of transfer of Tx and Rx in 1 sec */
ULONG64 rate_of_transfer_tx = 0, rate_of_transfer_rx = 0;
int Status = STATUS_SUCCESS;
INT num_of_time = 0, num_of_time_tx = 0, num_of_time_rx = 0;
UINT remDelay = 0;
BOOLEAN bBlinkBothLED = TRUE;
- //UINT GPIO_num = DISABLE_GPIO_NUM;
+ /* UINT GPIO_num = DISABLE_GPIO_NUM; */
ulong timeout = 0;
- /*Read initial value of packets sent/received */
+ /* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Initial_num_of_packts_rx = Adapter->dev->stats.rx_packets;
- /*Scale the rate of transfer to no of blinks.*/
- num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
+ /* Scale the rate of transfer to no of blinks. */
+ num_of_time_tx = ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
+ num_of_time_rx = ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
- while((Adapter->device_removed == FALSE))
- {
+ while ((Adapter->device_removed == FALSE)) {
timeout = 50;
- /*Blink Tx and Rx LED when both Tx and Rx is in normal bandwidth*/
- if(bBlinkBothLED)
- {
- /*Assign minimum number of blinks of either Tx or Rx.*/
- if(num_of_time_tx > num_of_time_rx)
+ /*
+ * Blink Tx and Rx LED when both Tx and Rx is
+ * in normal bandwidth
+ */
+ if (bBlinkBothLED) {
+ /*
+ * Assign minimum number of blinks of
+ * either Tx or Rx.
+ */
+ if (num_of_time_tx > num_of_time_rx)
num_of_time = num_of_time_rx;
else
num_of_time = num_of_time_tx;
- if(num_of_time > 0)
- {
- /*Blink both Tx and Rx LEDs*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time,currdriverstate)
+ if (num_of_time > 0) {
+ /* Blink both Tx and Rx LEDs */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
- if(LED_Blink(Adapter, 1<<GPIO_Num_rx, uiRxLedIndex, timeout, num_of_time,currdriverstate)
+
+ if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
+ uiRxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
}
- if(num_of_time == num_of_time_tx)
- {
- /*Blink pending rate of Rx*/
- if(LED_Blink(Adapter, (1 << GPIO_Num_rx), uiRxLedIndex, timeout,
- num_of_time_rx-num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ if (num_of_time == num_of_time_tx) {
+ /* Blink pending rate of Rx */
+ if (LED_Blink(Adapter, (1 << GPIO_Num_rx),
+ uiRxLedIndex, timeout,
+ num_of_time_rx-num_of_time,
+ currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
+
num_of_time = num_of_time_rx;
- }
- else
- {
- /*Blink pending rate of Tx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout,
- num_of_time_tx-num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ } else {
+ /* Blink pending rate of Tx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time_tx-num_of_time,
+ currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
+
num_of_time = num_of_time_tx;
}
- }
- else
- {
- if(num_of_time == num_of_time_tx)
- {
- /*Blink pending rate of Rx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_tx, uiTxLedIndex, timeout, num_of_time,currdriverstate)
+ } else {
+ if (num_of_time == num_of_time_tx) {
+ /* Blink pending rate of Rx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_tx,
+ uiTxLedIndex, timeout,
+ num_of_time, currdriverstate)
== EVENT_SIGNALED)
- {
return EVENT_SIGNALED;
- }
- }
- else
- {
- /*Blink pending rate of Tx*/
- if(LED_Blink(Adapter, 1<<GPIO_Num_rx, uiRxLedIndex, timeout,
- num_of_time,currdriverstate) == EVENT_SIGNALED)
- {
+ } else {
+ /* Blink pending rate of Tx */
+ if (LED_Blink(Adapter, 1 << GPIO_Num_rx,
+ uiRxLedIndex, timeout,
+ num_of_time, currdriverstate)
+ == EVENT_SIGNALED)
return EVENT_SIGNALED;
- }
}
}
- /* If Tx/Rx rate is less than maximum blinks per second,
- * wait till delay completes to 1 second
- */
+
+ /*
+ * If Tx/Rx rate is less than maximum blinks per second,
+ * wait till delay completes to 1 second
+ */
remDelay = MAX_NUM_OF_BLINKS - num_of_time;
- if(remDelay > 0)
- {
- timeout= 100 * remDelay;
- Status = wait_event_interruptible_timeout(Adapter->LEDInfo.notify_led_event,
- currdriverstate!= Adapter->DriverState ||kthread_should_stop() ,
- msecs_to_jiffies (timeout));
-
- if(kthread_should_stop())
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running= BCM_LED_THREAD_DISABLED;
+ if (remDelay > 0) {
+ timeout = 100 * remDelay;
+ Status = wait_event_interruptible_timeout(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState
+ || kthread_should_stop(),
+ msecs_to_jiffies(timeout));
+
+ if (kthread_should_stop()) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ LED_DUMP_INFO, DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
return EVENT_SIGNALED;
}
- if(Status)
+ if (Status)
return EVENT_SIGNALED;
}
- /*Turn off both Tx and Rx LEDs before next second*/
- TURN_OFF_LED(1<<GPIO_Num_tx, uiTxLedIndex);
- TURN_OFF_LED(1<<GPIO_Num_rx, uiTxLedIndex);
+ /* Turn off both Tx and Rx LEDs before next second */
+ TURN_OFF_LED(1 << GPIO_Num_tx, uiTxLedIndex);
+ TURN_OFF_LED(1 << GPIO_Num_rx, uiTxLedIndex);
/*
- * Read the Tx & Rx packets transmission after 1 second and
- * calculate rate of transfer
- */
+ * Read the Tx & Rx packets transmission after 1 second and
+ * calculate rate of transfer
+ */
Final_num_of_packts_tx = Adapter->dev->stats.tx_packets;
Final_num_of_packts_rx = Adapter->dev->stats.rx_packets;
- rate_of_transfer_tx = Final_num_of_packts_tx - Initial_num_of_packts_tx;
- rate_of_transfer_rx = Final_num_of_packts_rx - Initial_num_of_packts_rx;
+ rate_of_transfer_tx = Final_num_of_packts_tx -
+ Initial_num_of_packts_tx;
+ rate_of_transfer_rx = Final_num_of_packts_rx -
+ Initial_num_of_packts_rx;
- /*Read initial value of packets sent/received */
+ /* Read initial value of packets sent/received */
Initial_num_of_packts_tx = Final_num_of_packts_tx;
- Initial_num_of_packts_rx = Final_num_of_packts_rx ;
+ Initial_num_of_packts_rx = Final_num_of_packts_rx;
- /*Scale the rate of transfer to no of blinks.*/
- num_of_time_tx= ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
- num_of_time_rx= ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
+ /* Scale the rate of transfer to no of blinks. */
+ num_of_time_tx =
+ ScaleRateofTransfer((ULONG)rate_of_transfer_tx);
+ num_of_time_rx =
+ ScaleRateofTransfer((ULONG)rate_of_transfer_rx);
}
return Status;
}
-
-//-----------------------------------------------------------------------------
-// Procedure: ValidateDSDParamsChecksum
-//
-// Description: Reads DSD Params and validates checkusm.
-//
-// Arguments:
-// Adapter - Pointer to Adapter structure.
-// ulParamOffset - Start offset of the DSD parameter to be read and validated.
-// usParamLen - Length of the DSD Parameter.
-//
-// Returns:
-// <OSAL_STATUS_CODE>
-//-----------------------------------------------------------------------------
-
-static INT ValidateDSDParamsChecksum(
- PMINI_ADAPTER Adapter,
- ULONG ulParamOffset,
- USHORT usParamLen )
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: ValidateDSDParamsChecksum
+ *
+ * Description: Reads DSD Params and validates checkusm.
+ *
+ * Arguments:
+ * Adapter - Pointer to Adapter structure.
+ * ulParamOffset - Start offset of the DSD parameter to be read and
+ * validated.
+ * usParamLen - Length of the DSD Parameter.
+ *
+ * Returns:
+ * <OSAL_STATUS_CODE>
+ * -----------------------------------------------------------------------------
+ */
+static INT ValidateDSDParamsChecksum(PMINI_ADAPTER Adapter, ULONG ulParamOffset,
+ USHORT usParamLen)
{
INT Status = STATUS_SUCCESS;
- PUCHAR puBuffer = NULL;
- USHORT usChksmOrg = 0;
+ PUCHAR puBuffer = NULL;
+ USHORT usChksmOrg = 0;
USHORT usChecksumCalculated = 0;
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",ulParamOffset, usParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread:ValidateDSDParamsChecksum: 0x%lx 0x%X",
+ ulParamOffset, usParamLen);
puBuffer = kmalloc(usParamLen, GFP_KERNEL);
- if(!puBuffer)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum Allocation failed");
+ if (!puBuffer) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum Allocation failed");
return -ENOMEM;
}
- //
- // Read the DSD data from the parameter offset.
- //
- if(STATUS_SUCCESS != BeceemNVMRead(Adapter,(PUINT)puBuffer,ulParamOffset,usParamLen))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status=STATUS_IMAGE_CHECKSUM_MISMATCH;
+ /* Read the DSD data from the parameter offset. */
+ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)puBuffer,
+ ulParamOffset, usParamLen)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
- //
- // Calculate the checksum of the data read from the DSD parameter.
- //
- usChecksumCalculated = CFG_CalculateChecksum(puBuffer,usParamLen);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: usCheckSumCalculated = 0x%x\n", usChecksumCalculated);
-
- //
- // End of the DSD parameter will have a TWO bytes checksum stored in it. Read it and compare with the calculated
- // Checksum.
- //
- if(STATUS_SUCCESS != BeceemNVMRead(Adapter,(PUINT)&usChksmOrg,ulParamOffset+usParamLen,2))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
- Status=STATUS_IMAGE_CHECKSUM_MISMATCH;
+ /* Calculate the checksum of the data read from the DSD parameter. */
+ usChecksumCalculated = CFG_CalculateChecksum(puBuffer, usParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: usCheckSumCalculated = 0x%x\n",
+ usChecksumCalculated);
+
+ /*
+ * End of the DSD parameter will have a TWO bytes checksum stored in it.
+ * Read it and compare with the calculated Checksum.
+ */
+ if (STATUS_SUCCESS != BeceemNVMRead(Adapter, (PUINT)&usChksmOrg,
+ ulParamOffset+usParamLen, 2)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum BeceemNVMRead failed");
+ Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
usChksmOrg = ntohs(usChksmOrg);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: usChksmOrg = 0x%x", usChksmOrg);
-
- //
- // Compare the checksum calculated with the checksum read from DSD section
- //
- if(usChecksumCalculated ^ usChksmOrg)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: usChksmOrg = 0x%x", usChksmOrg);
+
+ /*
+ * Compare the checksum calculated with the checksum read
+ * from DSD section
+ */
+ if (usChecksumCalculated ^ usChksmOrg) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: ValidateDSDParamsChecksum: Checksums don't match");
Status = STATUS_IMAGE_CHECKSUM_MISMATCH;
goto exit;
}
@@ -311,523 +332,526 @@ exit:
}
-//-----------------------------------------------------------------------------
-// Procedure: ValidateHWParmStructure
-//
-// Description: Validates HW Parameters.
-//
-// Arguments:
-// Adapter - Pointer to Adapter structure.
-// ulHwParamOffset - Start offset of the HW parameter Section to be read and validated.
-//
-// Returns:
-// <OSAL_STATUS_CODE>
-//-----------------------------------------------------------------------------
-
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: ValidateHWParmStructure
+ *
+ * Description: Validates HW Parameters.
+ *
+ * Arguments:
+ * Adapter - Pointer to Adapter structure.
+ * ulHwParamOffset - Start offset of the HW parameter Section to be read
+ * and validated.
+ *
+ * Returns:
+ * <OSAL_STATUS_CODE>
+ * -----------------------------------------------------------------------------
+ */
static INT ValidateHWParmStructure(PMINI_ADAPTER Adapter, ULONG ulHwParamOffset)
{
- INT Status = STATUS_SUCCESS ;
+ INT Status = STATUS_SUCCESS;
USHORT HwParamLen = 0;
- // Add DSD start offset to the hwParamOffset to get the actual address.
+ /*
+ * Add DSD start offset to the hwParamOffset to get
+ * the actual address.
+ */
ulHwParamOffset += DSD_START_OFFSET;
- /*Read the Length of HW_PARAM structure*/
- BeceemNVMRead(Adapter,(PUINT)&HwParamLen,ulHwParamOffset,2);
+ /* Read the Length of HW_PARAM structure */
+ BeceemNVMRead(Adapter, (PUINT)&HwParamLen, ulHwParamOffset, 2);
HwParamLen = ntohs(HwParamLen);
- if(0==HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
- {
+ if (0 == HwParamLen || HwParamLen > Adapter->uiNVMDSDSize)
return STATUS_IMAGE_CHECKSUM_MISMATCH;
- }
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "LED Thread:HwParamLen = 0x%x", HwParamLen);
- Status =ValidateDSDParamsChecksum(Adapter,ulHwParamOffset,HwParamLen);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread:HwParamLen = 0x%x", HwParamLen);
+ Status = ValidateDSDParamsChecksum(Adapter, ulHwParamOffset,
+ HwParamLen);
return Status;
} /* ValidateHWParmStructure() */
-static int ReadLEDInformationFromEEPROM(PMINI_ADAPTER Adapter, UCHAR GPIO_Array[])
+static int ReadLEDInformationFromEEPROM(PMINI_ADAPTER Adapter,
+ UCHAR GPIO_Array[])
{
int Status = STATUS_SUCCESS;
- ULONG dwReadValue = 0;
- USHORT usHwParamData = 0;
- USHORT usEEPROMVersion = 0;
- UCHAR ucIndex = 0;
- UCHAR ucGPIOInfo[32] = {0};
+ ULONG dwReadValue = 0;
+ USHORT usHwParamData = 0;
+ USHORT usEEPROMVersion = 0;
+ UCHAR ucIndex = 0;
+ UCHAR ucGPIOInfo[32] = {0};
- BeceemNVMRead(Adapter,(PUINT)&usEEPROMVersion,EEPROM_VERSION_OFFSET,2);
+ BeceemNVMRead(Adapter, (PUINT)&usEEPROMVersion,
+ EEPROM_VERSION_OFFSET, 2);
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"usEEPROMVersion: Minor:0x%X Major:0x%x",usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF));
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "usEEPROMVersion: Minor:0x%X Major:0x%x",
+ usEEPROMVersion&0xFF, ((usEEPROMVersion>>8)&0xFF));
- if(((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION)
- {
- BeceemNVMRead(Adapter,(PUINT)&usHwParamData,EEPROM_HW_PARAM_POINTER_ADDRESS,2);
+ if (((usEEPROMVersion>>8)&0xFF) < EEPROM_MAP5_MAJORVERSION) {
+ BeceemNVMRead(Adapter, (PUINT)&usHwParamData,
+ EEPROM_HW_PARAM_POINTER_ADDRESS, 2);
usHwParamData = ntohs(usHwParamData);
dwReadValue = usHwParamData;
- }
- else
- {
- //
- // Validate Compatibility section and then read HW param if compatibility section is valid.
- //
+ } else {
+ /*
+ * Validate Compatibility section and then read HW param
+ * if compatibility section is valid.
+ */
Status = ValidateDSDParamsChecksum(Adapter,
- DSD_START_OFFSET,
- COMPATIBILITY_SECTION_LENGTH_MAP5);
+ DSD_START_OFFSET,
+ COMPATIBILITY_SECTION_LENGTH_MAP5);
- if(Status != STATUS_SUCCESS)
- {
+ if (Status != STATUS_SUCCESS)
return Status;
- }
- BeceemNVMRead(Adapter,(PUINT)&dwReadValue,EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5,4);
+
+ BeceemNVMRead(Adapter, (PUINT)&dwReadValue,
+ EEPROM_HW_PARAM_POINTER_ADDRRES_MAP5, 4);
dwReadValue = ntohl(dwReadValue);
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Start address of HW_PARAM structure = 0x%lx",dwReadValue);
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: Start address of HW_PARAM structure = 0x%lx",
+ dwReadValue);
- //
- // Validate if the address read out is within the DSD.
- // Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
- // lower limit should be above DSD_START_OFFSET and
- // upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
- //
- if(dwReadValue < DSD_START_OFFSET ||
- dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
- {
+ /*
+ * Validate if the address read out is within the DSD.
+ * Adapter->uiNVMDSDSize gives whole DSD size inclusive of Autoinit.
+ * lower limit should be above DSD_START_OFFSET and
+ * upper limit should be below (Adapter->uiNVMDSDSize-DSD_START_OFFSET)
+ */
+ if (dwReadValue < DSD_START_OFFSET ||
+ dwReadValue > (Adapter->uiNVMDSDSize-DSD_START_OFFSET))
return STATUS_IMAGE_CHECKSUM_MISMATCH;
- }
Status = ValidateHWParmStructure(Adapter, dwReadValue);
- if(Status){
+ if (Status)
return Status;
- }
/*
- Add DSD_START_OFFSET to the offset read from the EEPROM.
- This will give the actual start HW Parameters start address.
- To read GPIO section, add GPIO offset further.
- */
-
- dwReadValue += DSD_START_OFFSET; // = start address of hw param section.
- dwReadValue += GPIO_SECTION_START_OFFSET; // = GPIO start offset within HW Param section.
-
- /* Read the GPIO values for 32 GPIOs from EEPROM and map the function
- * number to GPIO pin number to GPIO_Array
- */
- BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo,dwReadValue,32);
- for(ucIndex = 0; ucIndex < 32; ucIndex++)
- {
-
- switch(ucGPIOInfo[ucIndex])
- {
- case RED_LED:
- {
- GPIO_Array[RED_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case BLUE_LED:
- {
- GPIO_Array[BLUE_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case YELLOW_LED:
- {
- GPIO_Array[YELLOW_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- case GREEN_LED:
- {
- GPIO_Array[GREEN_LED] = ucIndex;
- Adapter->gpioBitMap |= (1<<ucIndex);
- break;
- }
- default:
- break;
- }
+ * Add DSD_START_OFFSET to the offset read from the EEPROM.
+ * This will give the actual start HW Parameters start address.
+ * To read GPIO section, add GPIO offset further.
+ */
+
+ dwReadValue +=
+ DSD_START_OFFSET; /* = start address of hw param section. */
+ dwReadValue += GPIO_SECTION_START_OFFSET;
+ /* = GPIO start offset within HW Param section. */
+ /*
+ * Read the GPIO values for 32 GPIOs from EEPROM and map the function
+ * number to GPIO pin number to GPIO_Array
+ */
+ BeceemNVMRead(Adapter, (UINT *)ucGPIOInfo, dwReadValue, 32);
+ for (ucIndex = 0; ucIndex < 32; ucIndex++) {
+
+ switch (ucGPIOInfo[ucIndex]) {
+ case RED_LED:
+ GPIO_Array[RED_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case BLUE_LED:
+ GPIO_Array[BLUE_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case YELLOW_LED:
+ GPIO_Array[YELLOW_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ case GREEN_LED:
+ GPIO_Array[GREEN_LED] = ucIndex;
+ Adapter->gpioBitMap |= (1 << ucIndex);
+ break;
+ default:
+ break;
}
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"GPIO's bit map correspond to LED :0x%X",Adapter->gpioBitMap);
- return Status;
+
+ }
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "GPIO's bit map correspond to LED :0x%X", Adapter->gpioBitMap);
+ return Status;
}
-static int ReadConfigFileStructure(PMINI_ADAPTER Adapter, BOOLEAN *bEnableThread)
+static int ReadConfigFileStructure(PMINI_ADAPTER Adapter,
+ BOOLEAN *bEnableThread)
{
int Status = STATUS_SUCCESS;
- UCHAR GPIO_Array[NUM_OF_LEDS+1]; /*Array to store GPIO numbers from EEPROM*/
+ /* Array to store GPIO numbers from EEPROM */
+ UCHAR GPIO_Array[NUM_OF_LEDS+1];
UINT uiIndex = 0;
UINT uiNum_of_LED_Type = 0;
PUCHAR puCFGData = NULL;
UCHAR bData = 0;
memset(GPIO_Array, DISABLE_GPIO_NUM, NUM_OF_LEDS+1);
- if(!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams))
- {
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Target Params not Avail.\n");
+ if (!Adapter->pstargetparams || IS_ERR(Adapter->pstargetparams)) {
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "Target Params not Avail.\n");
return -ENOENT;
}
- /*Populate GPIO_Array with GPIO numbers for LED functions*/
- /*Read the GPIO numbers from EEPROM*/
+ /* Populate GPIO_Array with GPIO numbers for LED functions */
+ /* Read the GPIO numbers from EEPROM */
Status = ReadLEDInformationFromEEPROM(Adapter, GPIO_Array);
- if(Status == STATUS_IMAGE_CHECKSUM_MISMATCH)
- {
+ if (Status == STATUS_IMAGE_CHECKSUM_MISMATCH) {
*bEnableThread = FALSE;
return STATUS_SUCCESS;
- }
- else if(Status)
- {
+ } else if (Status) {
*bEnableThread = FALSE;
return Status;
}
- /*
- * CONFIG file read successfully. Deallocate the memory of
- * uiFileNameBufferSize
- */
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: Config file read successfully\n");
+
+ /*
+ * CONFIG file read successfully. Deallocate the memory of
+ * uiFileNameBufferSize
+ */
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,
+ "LED Thread: Config file read successfully\n");
puCFGData = (PUCHAR) &Adapter->pstargetparams->HostDrvrConfig1;
/*
- * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
- * will have the information of LED type, LED on state for different
- * driver state and LED blink state.
- */
+ * Offset for HostDrvConfig1, HostDrvConfig2, HostDrvConfig3 which
+ * will have the information of LED type, LED on state for different
+ * driver state and LED blink state.
+ */
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
bData = *puCFGData;
- /*Check Bit 8 for polarity. If it is set, polarity is reverse polarity*/
- if(bData & 0x80)
- {
+ /*
+ * Check Bit 8 for polarity. If it is set,
+ * polarity is reverse polarity
+ */
+ if (bData & 0x80) {
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 0;
- /*unset the bit 8*/
+ /* unset the bit 8 */
bData = bData & 0x7f;
}
Adapter->LEDInfo.LEDState[uiIndex].LED_Type = bData;
- if(bData <= NUM_OF_LEDS)
- Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = GPIO_Array[bData];
+ if (bData <= NUM_OF_LEDS)
+ Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
+ GPIO_Array[bData];
else
- Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num = DISABLE_GPIO_NUM;
+ Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num =
+ DISABLE_GPIO_NUM;
puCFGData++;
bData = *puCFGData;
Adapter->LEDInfo.LEDState[uiIndex].LED_On_State = bData;
puCFGData++;
bData = *puCFGData;
- Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State= bData;
+ Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State = bData;
puCFGData++;
}
- /*Check if all the LED settings are disabled. If it is disabled, dont launch the LED control thread.*/
- for(uiIndex = 0; uiIndex<NUM_OF_LEDS; uiIndex++)
- {
- if((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) ||
- (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) ||
+ /*
+ * Check if all the LED settings are disabled. If it is disabled,
+ * dont launch the LED control thread.
+ */
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if ((Adapter->LEDInfo.LEDState[uiIndex].LED_Type == DISABLE_GPIO_NUM) ||
+ (Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0x7f) ||
(Adapter->LEDInfo.LEDState[uiIndex].LED_Type == 0))
uiNum_of_LED_Type++;
}
- if(uiNum_of_LED_Type >= NUM_OF_LEDS)
+ if (uiNum_of_LED_Type >= NUM_OF_LEDS)
*bEnableThread = FALSE;
return Status;
}
-//--------------------------------------------------------------------------
-// Procedure: LedGpioInit
-//
-// Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode and make the
-// initial state to be OFF.
-//
-// Arguments:
-// Adapter - Pointer to MINI_ADAPTER structure.
-//
-// Returns: VOID
-//
-//-----------------------------------------------------------------------------
+/*
+ * -----------------------------------------------------------------------------
+ * Procedure: LedGpioInit
+ *
+ * Description: Initializes LED GPIOs. Makes the LED GPIOs to OUTPUT mode
+ * and make the initial state to be OFF.
+ *
+ * Arguments:
+ * Adapter - Pointer to MINI_ADAPTER structure.
+ *
+ * Returns: VOID
+ *
+ * -----------------------------------------------------------------------------
+ */
static VOID LedGpioInit(PMINI_ADAPTER Adapter)
{
UINT uiResetValue = 0;
UINT uiIndex = 0;
/* Set all LED GPIO Mode to output mode */
- if(rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) <0)
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: RDM Failed\n");
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
+ if (rdmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
+ sizeof(uiResetValue)) < 0)
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "LED Thread: RDM Failed\n");
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
+ DISABLE_GPIO_NUM)
uiResetValue |= (1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num);
- TURN_OFF_LED(1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num,uiIndex);
+ TURN_OFF_LED(1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num,
+ uiIndex);
}
- if(wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue, sizeof(uiResetValue)) < 0)
- BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: WRM Failed\n");
+ if (wrmalt(Adapter, GPIO_MODE_REGISTER, &uiResetValue,
+ sizeof(uiResetValue)) < 0)
+ BCM_DEBUG_PRINT (Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "LED Thread: WRM Failed\n");
- Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
}
-//-----------------------------------------------------------------------------
-static INT BcmGetGPIOPinInfo(PMINI_ADAPTER Adapter, UCHAR *GPIO_num_tx, UCHAR *GPIO_num_rx ,UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,LedEventInfo_t currdriverstate)
+static INT BcmGetGPIOPinInfo(PMINI_ADAPTER Adapter, UCHAR *GPIO_num_tx,
+ UCHAR *GPIO_num_rx, UCHAR *uiLedTxIndex, UCHAR *uiLedRxIndex,
+ LedEventInfo_t currdriverstate)
{
UINT uiIndex = 0;
*GPIO_num_tx = DISABLE_GPIO_NUM;
*GPIO_num_rx = DISABLE_GPIO_NUM;
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
- if((currdriverstate == NORMAL_OPERATION)||
- (currdriverstate == IDLEMODE_EXIT)||
- (currdriverstate == FW_DOWNLOAD))
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State & currdriverstate)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- {
- if(*GPIO_num_tx == DISABLE_GPIO_NUM)
- {
+ if ((currdriverstate == NORMAL_OPERATION) ||
+ (currdriverstate == IDLEMODE_EXIT) ||
+ (currdriverstate == FW_DOWNLOAD)) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].LED_Blink_State &
+ currdriverstate) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM) {
+ if (*GPIO_num_tx == DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
- }
- else
- {
+ } else {
*GPIO_num_rx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedRxIndex = uiIndex;
}
}
}
- }
- else
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].LED_On_State & currdriverstate)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- {
+ } else {
+ if (Adapter->LEDInfo.LEDState[uiIndex].LED_On_State
+ & currdriverstate) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM) {
*GPIO_num_tx = Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num;
*uiLedTxIndex = uiIndex;
}
}
}
}
- return STATUS_SUCCESS ;
+ return STATUS_SUCCESS;
}
static VOID LEDControlThread(PMINI_ADAPTER Adapter)
{
UINT uiIndex = 0;
UCHAR GPIO_num = 0;
- UCHAR uiLedIndex = 0 ;
+ UCHAR uiLedIndex = 0;
UINT uiResetValue = 0;
LedEventInfo_t currdriverstate = 0;
ulong timeout = 0;
INT Status = 0;
- UCHAR dummyGPIONum = 0;
- UCHAR dummyIndex = 0;
+ UCHAR dummyGPIONum = 0;
+ UCHAR dummyIndex = 0;
- //currdriverstate = Adapter->DriverState;
+ /* currdriverstate = Adapter->DriverState; */
Adapter->LEDInfo.bIdleMode_tx_from_host = FALSE;
- /*Wait till event is triggered*/
- //wait_event(Adapter->LEDInfo.notify_led_event,
- // currdriverstate!= Adapter->DriverState);
+ /*
+ * Wait till event is triggered
+ *
+ * wait_event(Adapter->LEDInfo.notify_led_event,
+ * currdriverstate!= Adapter->DriverState);
+ */
- GPIO_num = DISABLE_GPIO_NUM ;
+ GPIO_num = DISABLE_GPIO_NUM;
- while(TRUE)
- {
- /*Wait till event is triggered*/
- if( (GPIO_num == DISABLE_GPIO_NUM)
+ while (TRUE) {
+ /* Wait till event is triggered */
+ if ((GPIO_num == DISABLE_GPIO_NUM)
||
- ((currdriverstate != FW_DOWNLOAD) &&
- (currdriverstate != NORMAL_OPERATION) &&
- (currdriverstate != LOWPOWER_MODE_ENTER))
- ||
- (currdriverstate == LED_THREAD_INACTIVE) )
- {
- Status = wait_event_interruptible(Adapter->LEDInfo.notify_led_event,
- currdriverstate != Adapter->DriverState || kthread_should_stop());
- }
-
- if(kthread_should_stop() || Adapter->device_removed )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Led thread got signal to exit..hence exiting");
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
- TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
- return ;//STATUS_FAILURE;
+ ((currdriverstate != FW_DOWNLOAD) &&
+ (currdriverstate != NORMAL_OPERATION) &&
+ (currdriverstate != LOWPOWER_MODE_ENTER))
+ ||
+ (currdriverstate == LED_THREAD_INACTIVE))
+ Status = wait_event_interruptible(
+ Adapter->LEDInfo.notify_led_event,
+ currdriverstate != Adapter->DriverState
+ || kthread_should_stop());
+
+ if (kthread_should_stop() || Adapter->device_removed) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Led thread got signal to exit..hence exiting");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
+ TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
+ return; /* STATUS_FAILURE; */
}
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_OFF_LED(1<<GPIO_num, uiLedIndex);
- }
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_OFF_LED(1 << GPIO_num, uiLedIndex);
- if(Adapter->LEDInfo.bLedInitDone == FALSE)
- {
+ if (Adapter->LEDInfo.bLedInitDone == FALSE) {
LedGpioInit(Adapter);
Adapter->LEDInfo.bLedInitDone = TRUE;
}
- switch(Adapter->DriverState)
- {
- case DRIVER_INIT:
- {
- currdriverstate = DRIVER_INIT;//Adapter->DriverState;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
+ switch (Adapter->DriverState) {
+ case DRIVER_INIT:
+ currdriverstate = DRIVER_INIT;
+ /* Adapter->DriverState; */
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
break;
- case FW_DOWNLOAD:
- {
- //BCM_DEBUG_PRINT (Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FW_DN_DONE called\n");
- currdriverstate = FW_DOWNLOAD;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex, currdriverstate);
-
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- timeout = 50;
- LED_Blink(Adapter, 1<<GPIO_num, uiLedIndex, timeout, -1,currdriverstate);
- }
+ case FW_DOWNLOAD:
+ /*
+ * BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS,
+ * LED_DUMP_INFO, DBG_LVL_ALL,
+ * "LED Thread: FW_DN_DONE called\n");
+ */
+ currdriverstate = FW_DOWNLOAD;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+
+ if (GPIO_num != DISABLE_GPIO_NUM) {
+ timeout = 50;
+ LED_Blink(Adapter, 1 << GPIO_num, uiLedIndex,
+ timeout, -1, currdriverstate);
}
break;
- case FW_DOWNLOAD_DONE:
- {
- currdriverstate = FW_DOWNLOAD_DONE;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex, &dummyIndex,currdriverstate);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
+ case FW_DOWNLOAD_DONE:
+ currdriverstate = FW_DOWNLOAD_DONE;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyIndex, currdriverstate);
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
- case SHUTDOWN_EXIT:
- //no break, continue to NO_NETWORK_ENTRY state as well.
-
- case NO_NETWORK_ENTRY:
- {
- currdriverstate = NO_NETWORK_ENTRY;
- BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum, &uiLedIndex,&dummyGPIONum,currdriverstate);
- if(GPIO_num != DISABLE_GPIO_NUM)
- {
- TURN_ON_LED(1<<GPIO_num, uiLedIndex);
- }
- }
+ case SHUTDOWN_EXIT:
+ /*
+ * no break, continue to NO_NETWORK_ENTRY
+ * state as well.
+ */
+ case NO_NETWORK_ENTRY:
+ currdriverstate = NO_NETWORK_ENTRY;
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num, &dummyGPIONum,
+ &uiLedIndex, &dummyGPIONum, currdriverstate);
+ if (GPIO_num != DISABLE_GPIO_NUM)
+ TURN_ON_LED(1 << GPIO_num, uiLedIndex);
break;
- case NORMAL_OPERATION:
+ case NORMAL_OPERATION:
{
UCHAR GPIO_num_tx = DISABLE_GPIO_NUM;
UCHAR GPIO_num_rx = DISABLE_GPIO_NUM;
UCHAR uiLEDTx = 0;
UCHAR uiLEDRx = 0;
currdriverstate = NORMAL_OPERATION;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
-
- BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx, &GPIO_num_rx, &uiLEDTx,&uiLEDRx,currdriverstate);
- if((GPIO_num_tx == DISABLE_GPIO_NUM) && (GPIO_num_rx == DISABLE_GPIO_NUM))
- {
- GPIO_num = DISABLE_GPIO_NUM ;
- }
- else
- {
- /*If single LED is selected, use same for both Tx and Rx*/
- if(GPIO_num_tx == DISABLE_GPIO_NUM)
- {
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
+
+ BcmGetGPIOPinInfo(Adapter, &GPIO_num_tx,
+ &GPIO_num_rx, &uiLEDTx, &uiLEDRx,
+ currdriverstate);
+ if ((GPIO_num_tx == DISABLE_GPIO_NUM) &&
+ (GPIO_num_rx ==
+ DISABLE_GPIO_NUM)) {
+ GPIO_num = DISABLE_GPIO_NUM;
+ } else {
+ /*
+ * If single LED is selected, use same
+ * for both Tx and Rx
+ */
+ if (GPIO_num_tx == DISABLE_GPIO_NUM) {
GPIO_num_tx = GPIO_num_rx;
uiLEDTx = uiLEDRx;
- }
- else if(GPIO_num_rx == DISABLE_GPIO_NUM)
- {
+ } else if (GPIO_num_rx ==
+ DISABLE_GPIO_NUM) {
GPIO_num_rx = GPIO_num_tx;
uiLEDRx = uiLEDTx;
}
- /*Blink the LED in proportionate to Tx and Rx transmissions.*/
- LED_Proportional_Blink(Adapter, GPIO_num_tx, uiLEDTx, GPIO_num_rx, uiLEDRx,currdriverstate);
+ /*
+ * Blink the LED in proportionate
+ * to Tx and Rx transmissions.
+ */
+ LED_Proportional_Blink(Adapter,
+ GPIO_num_tx, uiLEDTx,
+ GPIO_num_rx, uiLEDRx,
+ currdriverstate);
}
}
break;
- case LOWPOWER_MODE_ENTER:
- {
- currdriverstate = LOWPOWER_MODE_ENTER;
- if( DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING == Adapter->ulPowerSaveMode)
- {
- /* Turn OFF all the LED */
- uiResetValue = 0;
- for(uiIndex =0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
-
+ case LOWPOWER_MODE_ENTER:
+ currdriverstate = LOWPOWER_MODE_ENTER;
+ if (DEVICE_POWERSAVE_MODE_AS_MANUAL_CLOCK_GATING ==
+ Adapter->ulPowerSaveMode) {
+ /* Turn OFF all the LED */
+ uiResetValue = 0;
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
- /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
- Adapter->LEDInfo.bLedInitDone = FALSE;
- Adapter->LEDInfo.bIdle_led_off = TRUE;
- wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
- GPIO_num = DISABLE_GPIO_NUM;
- break;
- }
- case IDLEMODE_CONTINUE:
- {
- currdriverstate = IDLEMODE_CONTINUE;
- GPIO_num = DISABLE_GPIO_NUM;
+
}
+ /* Turn off LED And WAKE-UP for Sendinf IDLE mode ACK */
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ Adapter->LEDInfo.bIdle_led_off = TRUE;
+ wake_up(&Adapter->LEDInfo.idleModeSyncEvent);
+ GPIO_num = DISABLE_GPIO_NUM;
break;
- case IDLEMODE_EXIT:
- {
- }
+ case IDLEMODE_CONTINUE:
+ currdriverstate = IDLEMODE_CONTINUE;
+ GPIO_num = DISABLE_GPIO_NUM;
break;
- case DRIVER_HALT:
- {
- currdriverstate = DRIVER_HALT;
- GPIO_num = DISABLE_GPIO_NUM;
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
- //Adapter->DriverState = DRIVER_INIT;
+ case IDLEMODE_EXIT:
+ break;
+ case DRIVER_HALT:
+ currdriverstate = DRIVER_HALT;
+ GPIO_num = DISABLE_GPIO_NUM;
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
+ /* Adapter->DriverState = DRIVER_INIT; */
break;
- case LED_THREAD_INACTIVE :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"InActivating LED thread...");
- currdriverstate = LED_THREAD_INACTIVE;
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_INACTIVELY ;
- Adapter->LEDInfo.bLedInitDone = FALSE ;
- //disable ALL LED
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
- {
- if(Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num !=
- DISABLE_GPIO_NUM)
- TURN_OFF_LED((1<<Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num),uiIndex);
- }
+ case LED_THREAD_INACTIVE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "InActivating LED thread...");
+ currdriverstate = LED_THREAD_INACTIVE;
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_INACTIVELY;
+ Adapter->LEDInfo.bLedInitDone = FALSE;
+ /* disable ALL LED */
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ if (Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num
+ != DISABLE_GPIO_NUM)
+ TURN_OFF_LED((1 << Adapter->LEDInfo.LEDState[uiIndex].GPIO_Num), uiIndex);
}
break;
- case LED_THREAD_ACTIVE :
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"Activating LED thread again...");
- if(Adapter->LinkUpStatus == FALSE)
- Adapter->DriverState = NO_NETWORK_ENTRY;
- else
- Adapter->DriverState = NORMAL_OPERATION;
+ case LED_THREAD_ACTIVE:
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL, "Activating LED thread again...");
+ if (Adapter->LinkUpStatus == FALSE)
+ Adapter->DriverState = NO_NETWORK_ENTRY;
+ else
+ Adapter->DriverState = NORMAL_OPERATION;
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY ;
- }
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_ACTIVELY;
+ break;
+ /* return; */
+ default:
break;
- //return;
- default:
- break;
}
}
Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
@@ -839,49 +863,54 @@ int InitLedSettings(PMINI_ADAPTER Adapter)
BOOLEAN bEnableThread = TRUE;
UCHAR uiIndex = 0;
- /*Initially set BitPolarity to normal polarity. The bit 8 of LED type
- * is used to change the polarity of the LED.*/
+ /*
+ * Initially set BitPolarity to normal polarity. The bit 8 of LED type
+ * is used to change the polarity of the LED.
+ */
- for(uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++) {
+ for (uiIndex = 0; uiIndex < NUM_OF_LEDS; uiIndex++)
Adapter->LEDInfo.LEDState[uiIndex].BitPolarity = 1;
- }
- /*Read the LED settings of CONFIG file and map it to GPIO numbers in EEPROM*/
+ /*
+ * Read the LED settings of CONFIG file and map it
+ * to GPIO numbers in EEPROM
+ */
Status = ReadConfigFileStructure(Adapter, &bEnableThread);
- if(STATUS_SUCCESS != Status)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL,"LED Thread: FAILED in ReadConfigFileStructure\n");
+ if (STATUS_SUCCESS != Status) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "LED Thread: FAILED in ReadConfigFileStructure\n");
return Status;
}
- if(Adapter->LEDInfo.led_thread_running)
- {
- if(bEnableThread)
+ if (Adapter->LEDInfo.led_thread_running) {
+ if (bEnableThread) {
;
- else
- {
+ } else {
Adapter->DriverState = DRIVER_HALT;
wake_up(&Adapter->LEDInfo.notify_led_event);
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
}
- }
-
- else if(bEnableThread)
- {
- /*Create secondary thread to handle the LEDs*/
+ } else if (bEnableThread) {
+ /* Create secondary thread to handle the LEDs */
init_waitqueue_head(&Adapter->LEDInfo.notify_led_event);
init_waitqueue_head(&Adapter->LEDInfo.idleModeSyncEvent);
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_RUNNING_ACTIVELY;
- Adapter->LEDInfo.bIdle_led_off = FALSE;
- Adapter->LEDInfo.led_cntrl_threadid = kthread_run((int (*)(void *))
- LEDControlThread, Adapter, "led_control_thread");
- if(IS_ERR(Adapter->LEDInfo.led_cntrl_threadid))
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_OTHERS, LED_DUMP_INFO, DBG_LVL_ALL, "Not able to spawn Kernel Thread\n");
- Adapter->LEDInfo.led_thread_running = BCM_LED_THREAD_DISABLED;
- return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
- }
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_RUNNING_ACTIVELY;
+ Adapter->LEDInfo.bIdle_led_off = FALSE;
+ Adapter->LEDInfo.led_cntrl_threadid =
+ kthread_run((int (*)(void *)) LEDControlThread,
+ Adapter, "led_control_thread");
+ if (IS_ERR(Adapter->LEDInfo.led_cntrl_threadid)) {
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, LED_DUMP_INFO,
+ DBG_LVL_ALL,
+ "Not able to spawn Kernel Thread\n");
+ Adapter->LEDInfo.led_thread_running =
+ BCM_LED_THREAD_DISABLED;
+ return PTR_ERR(Adapter->LEDInfo.led_cntrl_threadid);
+ }
}
return Status;
}
diff --git a/drivers/staging/bcm/nvm.c b/drivers/staging/bcm/nvm.c
index 3de0daf..7d703cb 100644
--- a/drivers/staging/bcm/nvm.c
+++ b/drivers/staging/bcm/nvm.c
@@ -78,7 +78,7 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
{
value=0;
uiStatus = 0 ;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem has got removed hence exiting....");
@@ -93,7 +93,7 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
wrmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
uiData = (UCHAR)value;
break;
@@ -102,8 +102,8 @@ static UCHAR ReadEEPROMStatusRegister( PMINI_ADAPTER Adapter )
dwRetries-- ;
if ( dwRetries == 0 )
{
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG,&value1, sizeof(value1));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"0x3004 = %x 0x3008 = %x, retries = %d failed.\n",value,value1, MAX_EEPROM_RETRIES*RETRIES_PER_DELAY);
return uiData;
}
@@ -158,7 +158,7 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
{
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem has got Removed.hence exiting from loop...");
@@ -202,8 +202,8 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
{
value=0;
value1=0;
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG,&value, sizeof(value));
- rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG,&value1, sizeof(value1));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &value, sizeof(value));
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS_REG, &value1, sizeof(value1));
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "dwNumWords %d 0x3004 = %x 0x3008 = %x retries = %d failed.\n", dwNumWords, value, value1, MAX_EEPROM_RETRIES*RETRIES_PER_DELAY);
return STATUS_FAILURE;
}
@@ -217,22 +217,22 @@ INT ReadBeceemEEPROMBulk( PMINI_ADAPTER Adapter,
pvalue = (PUCHAR)(pdwData + dwIndex);
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[0] = value;
value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[1] = value;
value =0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[2] = value;
value = 0;
- rdmalt(Adapter, EEPROM_READ_DATAQ_REG,&value, sizeof(value));
+ rdmalt(Adapter, EEPROM_READ_DATAQ_REG, &value, sizeof(value));
pvalue[3] = value;
}
@@ -445,6 +445,7 @@ static INT BeceemFlashBulkRead(
UINT uiBytesToRead = uiNumBytes;
INT Status = 0;
UINT uiPartOffset = 0;
+ int bytes;
if(Adapter->device_removed )
{
@@ -469,9 +470,9 @@ static INT BeceemFlashBulkRead(
uiBytesToRead = MAX_RW_SIZE - (uiOffset%MAX_RW_SIZE);
uiBytesToRead = MIN(uiNumBytes,uiBytesToRead);
- if(rdm(Adapter,uiPartOffset, (PCHAR)pBuffer+uiIndex,uiBytesToRead))
- {
- Status = -1;
+ bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer+uiIndex, uiBytesToRead);
+ if (bytes < 0) {
+ Status = bytes;
Adapter->SelectedChip = RESET_CHIP_SELECT;
return Status;
}
@@ -488,9 +489,9 @@ static INT BeceemFlashBulkRead(
uiBytesToRead = MIN(uiNumBytes,MAX_RW_SIZE);
- if(rdm(Adapter,uiPartOffset, (PCHAR)pBuffer+uiIndex,uiBytesToRead))
- {
- Status = -1;
+ bytes = rdm(Adapter, uiPartOffset, (PCHAR)pBuffer+uiIndex, uiBytesToRead);
+ if (bytes < 0) {
+ Status = bytes;
break;
}
@@ -613,6 +614,7 @@ static INT FlashSectorErase(PMINI_ADAPTER Adapter,
UINT iIndex = 0, iRetries = 0;
UINT uiStatus = 0;
UINT value;
+ int bytes;
for(iIndex=0;iIndex<numOfSectors;iIndex++)
{
@@ -632,10 +634,11 @@ static INT FlashSectorErase(PMINI_ADAPTER Adapter,
return STATUS_FAILURE;
}
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0 )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries++;
//After every try lets make the CPU free for 10 ms. generally time taken by the
@@ -679,6 +682,7 @@ static INT flashByteWrite(
UINT value;
ULONG ulData = *(PUCHAR)pData;
+ int bytes;
//
// need not write 0xFF because write requires an erase and erase will
@@ -720,10 +724,11 @@ static INT flashByteWrite(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
if( iRetries && ((iRetries % FLASH_PER_RETRIES_DELAY) == 0))
@@ -771,6 +776,7 @@ static INT flashWrite(
UINT value;
UINT uiErasePattern[4] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
// make whole sector 0xFFFFFFFF.
@@ -803,10 +809,11 @@ static INT flashWrite(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0 )
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
@@ -849,6 +856,7 @@ static INT flashByteWriteStatus(
INT iRetries = MAX_FLASH_RETRIES * FLASH_PER_RETRIES_DELAY; //3
ULONG ulData = *(PUCHAR)pData;
UINT value;
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
@@ -891,10 +899,11 @@ static INT flashByteWriteStatus(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
@@ -935,6 +944,7 @@ static INT flashWriteStatus(
//UINT uiReadBack = 0;
UINT value;
UINT uiErasePattern[4] = {0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF,0xFFFFFFFF};
+ int bytes;
//
// need not write 0xFFFFFFFF because write requires an erase and erase will
@@ -967,10 +977,11 @@ static INT flashWriteStatus(
return STATUS_FAILURE;
}
//__udelay(1);
- if(rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus)) < 0)
- {
- BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Reading status of FLASH_SPI_READQ_REG fails");
- return STATUS_FAILURE;
+ bytes = rdmalt(Adapter, FLASH_SPI_READQ_REG, &uiStatus, sizeof(uiStatus));
+ if (bytes < 0) {
+ uiStatus = bytes;
+ BCM_DEBUG_PRINT(Adapter, DBG_TYPE_PRINTK, 0, 0, "Reading status of FLASH_SPI_READQ_REG fails");
+ return uiStatus;
}
iRetries--;
//this will ensure that in there will be no changes in the current path.
@@ -1841,7 +1852,7 @@ static INT BeceemEEPROMWritePage( PMINI_ADAPTER Adapter, UINT uiData[], UINT uiO
* What we are checking if the previous write has completed, and this
* may take time. We should wait till the Empty bit is set. */
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus)) ;
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
while ( ( uiStatus & EEPROM_WRITE_QUEUE_EMPTY ) == 0 )
{
uiRetries--;
@@ -1855,7 +1866,7 @@ static INT BeceemEEPROMWritePage( PMINI_ADAPTER Adapter, UINT uiData[], UINT uiO
msleep(1);
uiStatus = 0;
- rdmalt( Adapter, EEPROM_SPI_Q_STATUS1_REG,&uiStatus, sizeof(uiStatus)) ;
+ rdmalt(Adapter, EEPROM_SPI_Q_STATUS1_REG, &uiStatus, sizeof(uiStatus));
if(Adapter->device_removed == TRUE)
{
BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Modem got removed hence exiting from loop....");
@@ -2500,7 +2511,7 @@ static ULONG BcmReadFlashRDID(PMINI_ADAPTER Adapter)
// Read SPI READQ REG. The output will be WWXXYYZZ.
// The ID is 3Bytes long and is WWXXYY. ZZ needs to be Ignored.
//
- rdmalt(Adapter, FLASH_SPI_READQ_REG,(PUINT)&ulRDID, sizeof(ulRDID));
+ rdmalt(Adapter, FLASH_SPI_READQ_REG, (PUINT)&ulRDID, sizeof(ulRDID));
return (ulRDID >>8);
@@ -4735,8 +4746,8 @@ static INT BcmDoChipSelect(PMINI_ADAPTER Adapter, UINT offset)
Adapter->SelectedChip = ChipNum ;
//bit[13..12] will select the appropriate chip
- rdmalt(Adapter,FLASH_CONFIG_REG, &FlashConfig, 4);
- rdmalt(Adapter,FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
+ rdmalt(Adapter, FLASH_CONFIG_REG, &FlashConfig, 4);
+ rdmalt(Adapter, FLASH_GPIO_CONFIG_REG, &GPIOConfig, 4);
{
switch(ChipNum)
diff --git a/drivers/staging/bcm/target_params.h b/drivers/staging/bcm/target_params.h
index 2d8b8a3..1487638 100644
--- a/drivers/staging/bcm/target_params.h
+++ b/drivers/staging/bcm/target_params.h
@@ -72,8 +72,8 @@ typedef struct _TARGET_PARAMS
// removed SHUT down related 'unused' params from here to sync 4.x and 5.x CFG files..
//BAMC Related Parameters
- //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 Enable Band AMC signaling.
- //bit 16-31 Band AMC Data configuration: Bit 16 = 1 Band AMC 2x3 support.
+ //Bit 0-15 Band AMC signaling configuration: Bit 1 = 1 – Enable Band AMC signaling.
+ //bit 16-31 Band AMC Data configuration: Bit 16 = 1 – Band AMC 2x3 support.
B_UINT32 m_u32BandAMCEnable;
} stTargetParams,TARGET_PARAMS,*PTARGET_PARAMS, STARGETPARAMS, *PSTARGETPARAMS;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 5e78c77..9bcf87a 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -61,7 +61,7 @@ EXPORT_SYMBOL(comedi_debug);
module_param(comedi_debug, int, 0644);
#endif
-int comedi_autoconfig = 1;
+bool comedi_autoconfig = 1;
module_param(comedi_autoconfig, bool, 0444);
static int comedi_num_legacy_minors;
@@ -1479,10 +1479,10 @@ static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
dev_file_info = comedi_get_device_file_info(minor);
if (dev_file_info == NULL)
- return -ENODEV;
+ return -ENODEV;
dev = dev_file_info->device;
if (dev == NULL)
- return -ENODEV;
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1556,10 +1556,10 @@ static unsigned int comedi_poll(struct file *file, poll_table * wait)
dev_file_info = comedi_get_device_file_info(minor);
if (dev_file_info == NULL)
- return -ENODEV;
+ return -ENODEV;
dev = dev_file_info->device;
if (dev == NULL)
- return -ENODEV;
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
@@ -1610,10 +1610,10 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
dev_file_info = comedi_get_device_file_info(minor);
if (dev_file_info == NULL)
- return -ENODEV;
+ return -ENODEV;
dev = dev_file_info->device;
if (dev == NULL)
- return -ENODEV;
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1721,10 +1721,10 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
dev_file_info = comedi_get_device_file_info(minor);
if (dev_file_info == NULL)
- return -ENODEV;
+ return -ENODEV;
dev = dev_file_info->device;
if (dev == NULL)
- return -ENODEV;
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
@@ -1931,10 +1931,10 @@ static int comedi_close(struct inode *inode, struct file *file)
dev_file_info = comedi_get_device_file_info(minor);
if (dev_file_info == NULL)
- return -ENODEV;
+ return -ENODEV;
dev = dev_file_info->device;
if (dev == NULL)
- return -ENODEV;
+ return -ENODEV;
mutex_lock(&dev->mutex);
@@ -1973,10 +1973,10 @@ static int comedi_fasync(int fd, struct file *file, int on)
dev_file_info = comedi_get_device_file_info(minor);
if (dev_file_info == NULL)
- return -ENODEV;
+ return -ENODEV;
dev = dev_file_info->device;
if (dev == NULL)
- return -ENODEV;
+ return -ENODEV;
return fasync_helper(fd, file, on, &dev->async_queue);
}
@@ -2479,18 +2479,18 @@ static ssize_t store_max_read_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_max_size_kb;
- uint64_t new_max_size;
+ unsigned int new_max_size_kb;
+ unsigned int new_max_size;
+ int ret;
struct comedi_subdevice *const read_subdevice =
comedi_get_read_subdevice(info);
- if (strict_strtoul(buf, 10, &new_max_size_kb))
- return -EINVAL;
- if (new_max_size_kb != (uint32_t) new_max_size_kb)
- return -EINVAL;
- new_max_size = ((uint64_t) new_max_size_kb) * bytes_per_kibi;
- if (new_max_size != (uint32_t) new_max_size)
+ ret = kstrtouint(buf, 10, &new_max_size_kb);
+ if (ret)
+ return ret;
+ if (new_max_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_max_size = new_max_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (read_subdevice == NULL ||
@@ -2540,19 +2540,19 @@ static ssize_t store_read_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_size_kb;
- uint64_t new_size;
+ unsigned int new_size_kb;
+ unsigned int new_size;
int retval;
+ int ret;
struct comedi_subdevice *const read_subdevice =
comedi_get_read_subdevice(info);
- if (strict_strtoul(buf, 10, &new_size_kb))
- return -EINVAL;
- if (new_size_kb != (uint32_t) new_size_kb)
- return -EINVAL;
- new_size = ((uint64_t) new_size_kb) * bytes_per_kibi;
- if (new_size != (uint32_t) new_size)
+ ret = kstrtouint(buf, 10, &new_size_kb);
+ if (ret)
+ return ret;
+ if (new_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_size = new_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (read_subdevice == NULL ||
@@ -2606,18 +2606,18 @@ static ssize_t store_max_write_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_max_size_kb;
- uint64_t new_max_size;
+ unsigned int new_max_size_kb;
+ unsigned int new_max_size;
+ int ret;
struct comedi_subdevice *const write_subdevice =
comedi_get_write_subdevice(info);
- if (strict_strtoul(buf, 10, &new_max_size_kb))
- return -EINVAL;
- if (new_max_size_kb != (uint32_t) new_max_size_kb)
- return -EINVAL;
- new_max_size = ((uint64_t) new_max_size_kb) * bytes_per_kibi;
- if (new_max_size != (uint32_t) new_max_size)
+ ret = kstrtouint(buf, 10, &new_max_size_kb);
+ if (ret)
+ return ret;
+ if (new_max_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
+ new_max_size = new_max_size_kb * bytes_per_kibi;
mutex_lock(&info->device->mutex);
if (write_subdevice == NULL ||
@@ -2667,19 +2667,19 @@ static ssize_t store_write_buffer_kb(struct device *dev,
const char *buf, size_t count)
{
struct comedi_device_file_info *info = dev_get_drvdata(dev);
- unsigned long new_size_kb;
- uint64_t new_size;
+ unsigned int new_size_kb;
+ unsigned int new_size;
int retval;
+ int ret;
struct comedi_subdevice *const write_subdevice =
comedi_get_write_subdevice(info);
- if (strict_strtoul(buf, 10, &new_size_kb))
- return -EINVAL;
- if (new_size_kb != (uint32_t) new_size_kb)
+ ret = kstrtouint(buf, 10, &new_size_kb);
+ if (ret)
+ return ret;
+ if (new_size_kb > (UINT_MAX / bytes_per_kibi))
return -EINVAL;
new_size = ((uint64_t) new_size_kb) * bytes_per_kibi;
- if (new_size != (uint32_t) new_size)
- return -EINVAL;
mutex_lock(&info->device->mutex);
if (write_subdevice == NULL ||
diff --git a/drivers/staging/comedi/comedi_fops.h b/drivers/staging/comedi/comedi_fops.h
index da4b4f5..006cf14 100644
--- a/drivers/staging/comedi/comedi_fops.h
+++ b/drivers/staging/comedi/comedi_fops.h
@@ -1,10 +1,11 @@
#ifndef _COMEDI_FOPS_H
#define _COMEDI_FOPS_H
+#include <linux/types.h>
extern struct class *comedi_class;
extern const struct file_operations comedi_fops;
-extern int comedi_autoconfig;
+extern bool comedi_autoconfig;
extern struct comedi_driver *comedi_drivers;
#endif /* _COMEDI_FOPS_H */
diff --git a/drivers/staging/comedi/drivers/addi-data/addi_common.c b/drivers/staging/comedi/drivers/addi-data/addi_common.c
index 6fb7594..ca5bd9b8 100644
--- a/drivers/staging/comedi/drivers/addi-data/addi_common.c
+++ b/drivers/staging/comedi/drivers/addi-data/addi_common.c
@@ -145,78 +145,77 @@ void fpu_end(void)
static DEFINE_PCI_DEVICE_TABLE(addi_apci_tbl) = {
#ifdef CONFIG_APCI_3120
- {APCI3120_BOARD_VENDOR_ID, 0x818D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x818D)},
#endif
#ifdef CONFIG_APCI_1032
- {APCI1032_BOARD_VENDOR_ID, 0x1003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1032_BOARD_VENDOR_ID, 0x1003)},
#endif
#ifdef CONFIG_APCI_1516
- {APCI1516_BOARD_VENDOR_ID, 0x1001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1516_BOARD_VENDOR_ID, 0x1001)},
#endif
#ifdef CONFIG_APCI_2016
- {APCI2016_BOARD_VENDOR_ID, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2016_BOARD_VENDOR_ID, 0x1002)},
#endif
#ifdef CONFIG_APCI_2032
- {APCI2032_BOARD_VENDOR_ID, 0x1004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2032_BOARD_VENDOR_ID, 0x1004)},
#endif
#ifdef CONFIG_APCI_2200
- {APCI2200_BOARD_VENDOR_ID, 0x1005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI2200_BOARD_VENDOR_ID, 0x1005)},
#endif
#ifdef CONFIG_APCI_1564
- {APCI1564_BOARD_VENDOR_ID, 0x1006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1564_BOARD_VENDOR_ID, 0x1006)},
#endif
#ifdef CONFIG_APCI_1500
- {APCI1500_BOARD_VENDOR_ID, 0x80fc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1500_BOARD_VENDOR_ID, 0x80fc)},
#endif
#ifdef CONFIG_APCI_3001
- {APCI3120_BOARD_VENDOR_ID, 0x828D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x828D)},
#endif
#ifdef CONFIG_APCI_3501
- {APCI3501_BOARD_VENDOR_ID, 0x3001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3501_BOARD_VENDOR_ID, 0x3001)},
#endif
#ifdef CONFIG_APCI_035
- {APCI035_BOARD_VENDOR_ID, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI035_BOARD_VENDOR_ID, 0x0300)},
#endif
#ifdef CONFIG_APCI_3200
- {APCI3200_BOARD_VENDOR_ID, 0x3000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3000)},
#endif
#ifdef CONFIG_APCI_3300
- {APCI3200_BOARD_VENDOR_ID, 0x3007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3007)},
#endif
#ifdef CONFIG_APCI_1710
- {APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID)},
#endif
#ifdef CONFIG_APCI_16XX
- {0x15B8, 0x1009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x100A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1009)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x100A)},
#endif
#ifdef CONFIG_APCI_3XXX
- {0x15B8, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3013, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3015, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3016, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3017, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3018, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x301F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3023, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x300B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x15B8, 0x3024, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3010)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3013)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3014)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3015)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3016)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3017)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3018)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3019)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301A)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301D)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301E)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301F)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3020)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3021)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3022)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3023)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3002)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3003)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3004)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3024)},
#endif
{0}
};
@@ -1019,7 +1018,7 @@ static const struct addi_board boardtypes[] = {
#endif
#ifdef CONFIG_APCI_16XX
{"apci1648",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x1009,
128,
0,
@@ -1075,7 +1074,7 @@ static const struct addi_board boardtypes[] = {
i_APCI16XX_InsnBitsWriteTTLIO},
{"apci1696",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x100A,
128,
0,
@@ -1132,7 +1131,7 @@ static const struct addi_board boardtypes[] = {
#endif
#ifdef CONFIG_APCI_3XXX
{"apci3000-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3010,
256,
256,
@@ -1188,7 +1187,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300F,
256,
256,
@@ -1244,7 +1243,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3000-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300E,
256,
256,
@@ -1300,7 +1299,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3013,
256,
256,
@@ -1356,7 +1355,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3014,
256,
256,
@@ -1412,7 +1411,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3006-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3015,
256,
256,
@@ -1468,7 +1467,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3016,
256,
256,
@@ -1524,7 +1523,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3017,
256,
256,
@@ -1580,7 +1579,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3010-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3018,
256,
256,
@@ -1636,7 +1635,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3019,
256,
256,
@@ -1692,7 +1691,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301A,
256,
256,
@@ -1748,7 +1747,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3016-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301B,
256,
256,
@@ -1804,7 +1803,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301C,
256,
256,
@@ -1860,7 +1859,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3100-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301D,
256,
256,
@@ -1916,7 +1915,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301E,
256,
256,
@@ -1972,7 +1971,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3106-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x301F,
256,
256,
@@ -2028,7 +2027,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3020,
256,
256,
@@ -2084,7 +2083,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3110-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3021,
256,
256,
@@ -2140,7 +2139,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-16-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3022,
256,
256,
@@ -2196,7 +2195,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3116-8-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3023,
256,
256,
@@ -2252,7 +2251,7 @@ static const struct addi_board boardtypes[] = {
i_APCI3XXX_InsnWriteTTLIO},
{"apci3003",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x300B,
256,
256,
@@ -2307,7 +2306,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-16",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3002,
256,
256,
@@ -2362,7 +2361,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-8",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3003,
256,
256,
@@ -2417,7 +2416,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3002-4",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3004,
256,
256,
@@ -2472,7 +2471,7 @@ static const struct addi_board boardtypes[] = {
NULL},
{"apci3500",
- 0x15B8,
+ PCI_VENDOR_ID_ADDIDATA,
0x3024,
256,
256,
diff --git a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
index c75a1a1..f9545b0 100644
--- a/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
+++ b/drivers/staging/comedi/drivers/addi-data/hwdrv_apci3200.c
@@ -3598,7 +3598,7 @@ int i_APCI3200_InterruptHandleEos(struct comedi_device *dev)
n = comedi_buf_write_alloc(s->async,
(7 + 12) * sizeof(unsigned int));
- /* If not enougth memory available, event is set to Comedi Buffer Errror */
+ /* If not enough memory available, event is set to Comedi Buffer Error */
if (n > ((7 + 12) * sizeof(unsigned int))) {
printk("\ncomedi_buf_write_alloc n = %i", n);
s->async->events |= COMEDI_CB_ERROR;
diff --git a/drivers/staging/comedi/drivers/adl_pci7230.c b/drivers/staging/comedi/drivers/adl_pci7230.c
index 72a7258..20d5705 100644
--- a/drivers/staging/comedi/drivers/adl_pci7230.c
+++ b/drivers/staging/comedi/drivers/adl_pci7230.c
@@ -44,15 +44,7 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7230 0x7230
static DEFINE_PCI_DEVICE_TABLE(adl_pci7230_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK,
- PCI_DEVICE_ID_PCI7230,
- PCI_ANY_ID,
- PCI_ANY_ID,
- 0,
- 0,
- 0
- },
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7230) },
{0}
};
diff --git a/drivers/staging/comedi/drivers/adl_pci7296.c b/drivers/staging/comedi/drivers/adl_pci7296.c
index f28fe6b..9a232053 100644
--- a/drivers/staging/comedi/drivers/adl_pci7296.c
+++ b/drivers/staging/comedi/drivers/adl_pci7296.c
@@ -49,10 +49,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7296 0x7296
static DEFINE_PCI_DEVICE_TABLE(adl_pci7296_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7296, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7296) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci7296_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci7432.c b/drivers/staging/comedi/drivers/adl_pci7432.c
index 262da7b..86ee219 100644
--- a/drivers/staging/comedi/drivers/adl_pci7432.c
+++ b/drivers/staging/comedi/drivers/adl_pci7432.c
@@ -44,10 +44,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI7432 0x7432
static DEFINE_PCI_DEVICE_TABLE(adl_pci7432_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7432, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI7432) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci7432_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci8164.c b/drivers/staging/comedi/drivers/adl_pci8164.c
index 767a594..3b83d65 100644
--- a/drivers/staging/comedi/drivers/adl_pci8164.c
+++ b/drivers/staging/comedi/drivers/adl_pci8164.c
@@ -57,10 +57,8 @@ Configuration Options:
#define PCI_DEVICE_ID_PCI8164 0x8164
static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = {
- {
- PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, adl_pci8164_pci_table);
diff --git a/drivers/staging/comedi/drivers/adl_pci9111.c b/drivers/staging/comedi/drivers/adl_pci9111.c
index fc48bae..2a9bd88a 100644
--- a/drivers/staging/comedi/drivers/adl_pci9111.c
+++ b/drivers/staging/comedi/drivers/adl_pci9111.c
@@ -311,10 +311,8 @@ static const struct comedi_lrange pci9111_hr_ai_range = {
};
static DEFINE_PCI_DEVICE_TABLE(pci9111_pci_table) = {
- { PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0 },
- /* { PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
- * 0, 0, 0 }, */
+ { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HR_DEVICE_ID) },
+ /* { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI9111_HG_DEVICE_ID) }, */
{ 0 }
};
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index da2b75b..8318c82 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -1382,16 +1382,14 @@ static int pci1710_attach(struct comedi_device *dev,
int i;
int board_index;
- printk("comedi%d: adv_pci1710: ", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: adv_pci1710:\n", dev->minor);
opt_bus = it->options[0];
opt_slot = it->options[1];
ret = alloc_private(dev, sizeof(struct pci1710_private));
- if (ret < 0) {
- printk(" - Allocation failed!\n");
+ if (ret < 0)
return -ENOMEM;
- }
/* Look for matching PCI device */
errstr = "not found!";
@@ -1436,10 +1434,10 @@ static int pci1710_attach(struct comedi_device *dev,
if (!pcidev) {
if (opt_bus || opt_slot) {
- printk(" - Card at b:s %d:%d %s\n",
- opt_bus, opt_slot, errstr);
+ dev_err(dev->hw_dev, "- Card at b:s %d:%d %s\n",
+ opt_bus, opt_slot, errstr);
} else {
- printk(" - Card %s\n", errstr);
+ dev_err(dev->hw_dev, "- Card %s\n", errstr);
}
return -EIO;
}
@@ -1450,8 +1448,8 @@ static int pci1710_attach(struct comedi_device *dev,
irq = pcidev->irq;
iobase = pci_resource_start(pcidev, 2);
- printk(", b:s:f=%d:%d:%d, io=0x%4lx", pci_bus, pci_slot, pci_func,
- iobase);
+ dev_dbg(dev->hw_dev, "b:s:f=%d:%d:%d, io=0x%4lx\n", pci_bus, pci_slot,
+ pci_func, iobase);
dev->iobase = iobase;
@@ -1471,10 +1469,8 @@ static int pci1710_attach(struct comedi_device *dev,
n_subdevices++;
ret = alloc_subdevices(dev, n_subdevices);
- if (ret < 0) {
- printk(" - Allocation failed!\n");
+ if (ret < 0)
return ret;
- }
pci1710_reset(dev);
@@ -1483,24 +1479,20 @@ static int pci1710_attach(struct comedi_device *dev,
if (request_irq(irq, interrupt_service_pci1710,
IRQF_SHARED, "Advantech PCI-1710",
dev)) {
- printk
- (", unable to allocate IRQ %d, DISABLING IT",
- irq);
+ dev_dbg(dev->hw_dev, "unable to allocate IRQ %d, DISABLING IT",
+ irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ dev_dbg(dev->hw_dev, "irq=%u", irq);
}
} else {
- printk(", IRQ disabled");
+ dev_dbg(dev->hw_dev, "IRQ disabled");
}
} else {
irq = 0;
}
dev->irq = irq;
-
- printk(".\n");
-
subdev = 0;
if (this_board->n_aichan) {
diff --git a/drivers/staging/comedi/drivers/adv_pci_dio.c b/drivers/staging/comedi/drivers/adv_pci_dio.c
index 69334f6..537e585 100644
--- a/drivers/staging/comedi/drivers/adv_pci_dio.c
+++ b/drivers/staging/comedi/drivers/adv_pci_dio.c
@@ -1106,13 +1106,10 @@ static int pci_dio_attach(struct comedi_device *dev,
unsigned long iobase;
struct pci_dev *pcidev = NULL;
- printk("comedi%d: adv_pci_dio: ", dev->minor);
ret = alloc_private(dev, sizeof(struct pci_dio_private));
- if (ret < 0) {
- printk(", Error: Cann't allocate private memory!\n");
+ if (ret < 0)
return -ENOMEM;
- }
for_each_pci_dev(pcidev) {
/* loop through cards supported by this driver */
@@ -1140,19 +1137,18 @@ static int pci_dio_attach(struct comedi_device *dev,
}
if (!dev->board_ptr) {
- printk(", Error: Requested type of the card was not found!\n");
+ dev_err(dev->hw_dev, "Error: Requested type of the card was not found!\n");
return -EIO;
}
if (comedi_pci_enable(pcidev, driver_pci_dio.driver_name)) {
- printk
- (", Error: Can't enable PCI device and request regions!\n");
+ dev_err(dev->hw_dev, "Error: Can't enable PCI device and request regions!\n");
return -EIO;
}
iobase = pci_resource_start(pcidev, this_board->main_pci_region);
- printk(", b:s:f=%d:%d:%d, io=0x%4lx",
- pcidev->bus->number, PCI_SLOT(pcidev->devfn),
- PCI_FUNC(pcidev->devfn), iobase);
+ dev_dbg(dev->hw_dev, "b:s:f=%d:%d:%d, io=0x%4lx\n",
+ pcidev->bus->number, PCI_SLOT(pcidev->devfn),
+ PCI_FUNC(pcidev->devfn), iobase);
dev->iobase = iobase;
dev->board_name = this_board->name;
@@ -1177,15 +1173,10 @@ static int pci_dio_attach(struct comedi_device *dev,
}
ret = alloc_subdevices(dev, n_subdevices);
- if (ret < 0) {
- printk(", Error: Cann't allocate subdevice memory!\n");
+ if (ret < 0)
return ret;
- }
-
- printk(".\n");
subdev = 0;
-
for (i = 0; i < MAX_DI_SUBDEVS; i++)
if (this_board->sdi[i].chans) {
s = dev->subdevices + subdev;
diff --git a/drivers/staging/comedi/drivers/amplc_dio200.c b/drivers/staging/comedi/drivers/amplc_dio200.c
index 93bbe4e..566cc44 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200.c
@@ -421,12 +421,9 @@ static const struct dio200_layout_struct dio200_layouts[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(dio200_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI215) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI272) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, dio200_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c
index 48246cd..7972cad 100644
--- a/drivers/staging/comedi/drivers/amplc_pc236.c
+++ b/drivers/staging/comedi/drivers/amplc_pc236.c
@@ -134,10 +134,8 @@ static const struct pc236_board pc236_boards[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(pc236_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI236) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pc236_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index 8a33880..191ac0d 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -101,10 +101,8 @@ static const struct pc263_board pc263_boards[] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(pc263_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI263) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pc263_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index 1b5ba1c..b278917 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -384,12 +384,9 @@ static const struct pci224_board pci224_boards[] = {
*/
static DEFINE_PCI_DEVICE_TABLE(pci224_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI224) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_AMPLICON_PCI234) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci224_pci_table);
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 7edeb11..5389795 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -501,12 +501,9 @@ static const struct pci230_board pci230_boards[] = {
};
static DEFINE_PCI_DEVICE_TABLE(pci230_pci_table) = {
- {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI230) },
+ { PCI_DEVICE(PCI_VENDOR_ID_AMPLICON, PCI_DEVICE_ID_PCI260) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci230_pci_table);
diff --git a/drivers/staging/comedi/drivers/cb_das16_cs.c b/drivers/staging/comedi/drivers/cb_das16_cs.c
index e171c56..49404f4 100644
--- a/drivers/staging/comedi/drivers/cb_das16_cs.c
+++ b/drivers/staging/comedi/drivers/cb_das16_cs.c
@@ -99,7 +99,7 @@ static struct comedi_driver driver_das16cs = {
.detach = das16cs_detach,
};
-static struct pcmcia_device *cur_dev = NULL;
+static struct pcmcia_device *cur_dev;
static const struct comedi_lrange das16cs_ai_range = { 4, {
RANGE(-10, 10),
@@ -150,7 +150,7 @@ static const struct das16cs_board *das16cs_probe(struct comedi_device *dev,
return das16cs_boards + i;
}
- printk("unknown board!\n");
+ dev_dbg(dev->hw_dev, "unknown board!\n");
return NULL;
}
@@ -163,20 +163,19 @@ static int das16cs_attach(struct comedi_device *dev,
int ret;
int i;
- printk("comedi%d: cb_das16_cs: ", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: cb_das16_cs: attached\n", dev->minor);
link = cur_dev; /* XXX hack */
if (!link)
return -EIO;
dev->iobase = link->resource[0]->start;
- printk("I/O base=0x%04lx ", dev->iobase);
+ dev_dbg(dev->hw_dev, "I/O base=0x%04lx\n", dev->iobase);
- printk("fingerprint:\n");
+ dev_dbg(dev->hw_dev, "fingerprint:\n");
for (i = 0; i < 48; i += 2)
- printk("%04x ", inw(dev->iobase + i));
+ dev_dbg(dev->hw_dev, "%04x\n", inw(dev->iobase + i));
- printk("\n");
ret = request_irq(link->irq, das16cs_interrupt,
IRQF_SHARED, "cb_das16_cs", dev);
@@ -185,7 +184,7 @@ static int das16cs_attach(struct comedi_device *dev,
dev->irq = link->irq;
- printk("irq=%u ", dev->irq);
+ dev_dbg(dev->hw_dev, "irq=%u\n", dev->irq);
dev->board_ptr = das16cs_probe(dev, link);
if (!dev->board_ptr)
@@ -252,14 +251,13 @@ static int das16cs_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- printk("attached\n");
return 1;
}
static int das16cs_detach(struct comedi_device *dev)
{
- printk("comedi%d: das16cs: remove\n", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: das16cs: remove\n", dev->minor);
if (dev->irq)
free_irq(dev->irq, dev);
@@ -312,7 +310,7 @@ static int das16cs_ai_rinsn(struct comedi_device *dev,
break;
}
if (to == TIMEOUT) {
- printk("cb_das16_cs: ai timeout\n");
+ dev_dbg(dev->hw_dev, "cb_das16_cs: ai timeout\n");
return -ETIME;
}
data[i] = (unsigned short)inw(dev->iobase + 0);
diff --git a/drivers/staging/comedi/drivers/cb_pcidas.c b/drivers/staging/comedi/drivers/cb_pcidas.c
index 61968a5..7e4ffcf 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas.c
@@ -565,8 +565,6 @@ static int cb_pcidas_attach(struct comedi_device *dev,
int index;
int i;
- printk("comedi%d: cb_pcidas: ", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -576,7 +574,6 @@ static int cb_pcidas_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
@@ -600,20 +597,20 @@ static int cb_pcidas_attach(struct comedi_device *dev,
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
- printk("Found %s on bus %i, slot %i\n", cb_pcidas_boards[index].name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n",
+ cb_pcidas_boards[index].name, pcidev->bus->number,
+ PCI_SLOT(pcidev->devfn));
/*
* Enable PCI device and reserve I/O ports.
*/
if (comedi_pci_enable(pcidev, "cb_pcidas")) {
- printk(" Failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n");
return -EIO;
}
/*
@@ -639,7 +636,8 @@ found:
/* get irq */
if (request_irq(devpriv->pci_dev->irq, cb_pcidas_interrupt,
IRQF_SHARED, "cb_pcidas", dev)) {
- printk(" unable to allocate irq %d\n", devpriv->pci_dev->irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %d\n",
+ devpriv->pci_dev->irq);
return -EINVAL;
}
dev->irq = devpriv->pci_dev->irq;
@@ -768,7 +766,6 @@ found:
*/
static int cb_pcidas_detach(struct comedi_device *dev)
{
- printk("comedi%d: cb_pcidas: remove\n", dev->minor);
if (devpriv) {
if (devpriv->s5933_config) {
@@ -776,8 +773,8 @@ static int cb_pcidas_detach(struct comedi_device *dev)
outl(INTCSR_INBOX_INTR_STATUS,
devpriv->s5933_config + AMCC_OP_REG_INTCSR);
#ifdef CB_PCIDAS_DEBUG
- printk("detaching, incsr is 0x%x\n",
- inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR));
+ dev_dbg(dev->hw_dev, "detaching, incsr is 0x%x\n",
+ inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR));
#endif
}
}
@@ -858,7 +855,8 @@ static int ai_config_calibration_source(struct comedi_device *dev,
unsigned int source = data[1];
if (source >= num_calibration_sources) {
- printk("invalid calibration source: %i\n", source);
+ dev_err(dev->hw_dev, "invalid calibration source: %i\n",
+ source);
return -EINVAL;
}
@@ -1279,7 +1277,7 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
outw(bits, devpriv->control_status + ADCMUX_CONT);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to adcmux control\n", bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to adcmux control\n", bits);
#endif
/* load counters */
@@ -1306,7 +1304,8 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
devpriv->adc_fifo_bits |= INT_FHF; /* interrupt fifo half full */
}
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits);
+ dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n",
+ devpriv->adc_fifo_bits);
#endif
/* enable (and clear) interrupts */
outw(devpriv->adc_fifo_bits | EOAI | INT | LADFUL,
@@ -1332,7 +1331,7 @@ static int cb_pcidas_ai_cmd(struct comedi_device *dev,
bits |= BURSTE;
outw(bits, devpriv->control_status + TRIG_CONTSTAT);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to trig control\n", bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to trig control\n", bits);
#endif
return 0;
@@ -1549,7 +1548,8 @@ static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->adc_fifo_bits |= DAEMIE | DAHFIE;
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: adc_fifo_bits are 0x%x\n", devpriv->adc_fifo_bits);
+ dev_dbg(dev->hw_dev, "comedi: adc_fifo_bits are 0x%x\n",
+ devpriv->adc_fifo_bits);
#endif
/* enable and clear interrupts */
outw(devpriv->adc_fifo_bits | DAEMI | DAHFI,
@@ -1559,7 +1559,8 @@ static int cb_pcidas_ao_inttrig(struct comedi_device *dev,
devpriv->ao_control_bits |= DAC_START | DACEN | DAC_EMPTY;
outw(devpriv->ao_control_bits, devpriv->control_status + DAC_CSR);
#ifdef CB_PCIDAS_DEBUG
- printk("comedi: sent 0x%x to dac control\n", devpriv->ao_control_bits);
+ dev_dbg(dev->hw_dev, "comedi: sent 0x%x to dac control\n",
+ devpriv->ao_control_bits);
#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
@@ -1587,8 +1588,9 @@ static irqreturn_t cb_pcidas_interrupt(int irq, void *d)
s5933_status = inl(devpriv->s5933_config + AMCC_OP_REG_INTCSR);
#ifdef CB_PCIDAS_DEBUG
- printk("intcsr 0x%x\n", s5933_status);
- printk("mbef 0x%x\n", inl(devpriv->s5933_config + AMCC_OP_REG_MBEF));
+ dev_dbg(dev->hw_dev, "intcsr 0x%x\n", s5933_status);
+ dev_dbg(dev->hw_dev, "mbef 0x%x\n",
+ inl(devpriv->s5933_config + AMCC_OP_REG_MBEF));
#endif
if ((INTCSR_INTR_ASSERTED & s5933_status) == 0)
diff --git a/drivers/staging/comedi/drivers/cb_pcidas64.c b/drivers/staging/comedi/drivers/cb_pcidas64.c
index 1e32419..c9e8c47 100644
--- a/drivers/staging/comedi/drivers/cb_pcidas64.c
+++ b/drivers/staging/comedi/drivers/cb_pcidas64.c
@@ -1739,8 +1739,6 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
uint32_t local_range, local_decode;
int retval;
- printk("comedi%d: cb_pcidas64\n", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -1781,12 +1779,11 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
return -EIO;
}
- printk("Found %s on bus %i, slot %i\n", board(dev)->name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", board(dev)->name,
+ pcidev->bus->number, PCI_SLOT(pcidev->devfn));
if (comedi_pci_enable(pcidev, driver_cb_pcidas.driver_name)) {
- printk(KERN_WARNING
- " failed to enable PCI device and request regions\n");
+ dev_warn(dev->hw_dev, "failed to enable PCI device and request regions\n");
return -EIO;
}
pci_set_master(pcidev);
@@ -1814,7 +1811,7 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (!priv(dev)->plx9080_iobase || !priv(dev)->main_iobase
|| !priv(dev)->dio_counter_iobase) {
- printk(" failed to remap io memory\n");
+ dev_warn(dev->hw_dev, "failed to remap io memory\n");
return -ENOMEM;
}
@@ -1850,17 +1847,19 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
priv(dev)->hw_revision =
hw_revision(dev, readw(priv(dev)->main_iobase + HW_STATUS_REG));
- printk(" stc hardware revision %i\n", priv(dev)->hw_revision);
+ dev_dbg(dev->hw_dev, "stc hardware revision %i\n",
+ priv(dev)->hw_revision);
init_plx9080(dev);
init_stc_registers(dev);
/* get irq */
if (request_irq(pcidev->irq, handle_interrupt, IRQF_SHARED,
"cb_pcidas64", dev)) {
- printk(" unable to allocate irq %u\n", pcidev->irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %u\n",
+ pcidev->irq);
return -EINVAL;
}
dev->irq = pcidev->irq;
- printk(" irq %u\n", dev->irq);
+ dev_dbg(dev->hw_dev, "irq %u\n", dev->irq);
retval = setup_subdevices(dev);
if (retval < 0)
@@ -1882,8 +1881,6 @@ static int detach(struct comedi_device *dev)
{
unsigned int i;
- printk("comedi%d: cb_pcidas: remove\n", dev->minor);
-
if (dev->irq)
free_irq(dev->irq, dev);
if (priv(dev)) {
@@ -2093,7 +2090,8 @@ static int ai_config_calibration_source(struct comedi_device *dev,
else
num_calibration_sources = 8;
if (source >= num_calibration_sources) {
- printk("invalid calibration source: %i\n", source);
+ dev_dbg(dev->hw_dev, "invalid calibration source: %i\n",
+ source);
return -EINVAL;
}
@@ -2924,7 +2922,7 @@ static void pio_drain_ai_fifo_16(struct comedi_device *dev)
}
if (num_samples < 0) {
- printk(" cb_pcidas64: bug! num_samples < 0\n");
+ dev_err(dev->hw_dev, "cb_pcidas64: bug! num_samples < 0\n");
break;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidda.c b/drivers/staging/comedi/drivers/cb_pcidda.c
index 49102b3..abba220 100644
--- a/drivers/staging/comedi/drivers/cb_pcidda.c
+++ b/drivers/staging/comedi/drivers/cb_pcidda.c
@@ -282,7 +282,6 @@ static int cb_pcidda_attach(struct comedi_device *dev,
struct pci_dev *pcidev = NULL;
int index;
- printk("comedi%d: cb_pcidda: ", dev->minor);
/*
* Allocate the private structure area.
@@ -293,7 +292,6 @@ static int cb_pcidda_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
if (pcidev->vendor == PCI_VENDOR_ID_CB) {
@@ -312,22 +310,21 @@ static int cb_pcidda_attach(struct comedi_device *dev,
}
}
if (!pcidev) {
- printk
- ("Not a ComputerBoards/MeasurementComputing card on requested position\n");
+ dev_err(dev->hw_dev, "Not a ComputerBoards/MeasurementComputing card on requested position\n");
return -EIO;
}
found:
devpriv->pci_dev = pcidev;
dev->board_ptr = cb_pcidda_boards + index;
/* "thisboard" macro can be used from here. */
- printk("Found %s at requested position\n", thisboard->name);
+ dev_dbg(dev->hw_dev, "Found %s at requested position\n",
+ thisboard->name);
/*
* Enable PCI device and request regions.
*/
if (comedi_pci_enable(pcidev, thisboard->name)) {
- printk
- ("cb_pcidda: failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "cb_pcidda: failed to enable PCI device and request regions\n");
return -EIO;
}
@@ -377,12 +374,11 @@ found:
s = dev->subdevices + 2;
subdev_8255_init(dev, s, NULL, devpriv->digitalio + PORT2A);
- printk(" eeprom:");
+ dev_dbg(dev->hw_dev, "eeprom:\n");
for (index = 0; index < EEPROM_SIZE; index++) {
devpriv->eeprom_data[index] = cb_pcidda_read_eeprom(dev, index);
- printk(" %i:0x%x ", index, devpriv->eeprom_data[index]);
+ dev_dbg(dev->hw_dev, "%i:0x%x\n", index, devpriv->eeprom_data[index]);
}
- printk("\n");
/* set calibrations dacs */
for (index = 0; index < thisboard->ao_chans; index++)
@@ -417,8 +413,6 @@ static int cb_pcidda_detach(struct comedi_device *dev)
subdev_8255_cleanup(dev, dev->subdevices + 2);
}
- printk("comedi%d: cb_pcidda: remove\n", dev->minor);
-
return 0;
}
diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c
index 79477a5..8f32152 100644
--- a/drivers/staging/comedi/drivers/cb_pcidio.c
+++ b/drivers/staging/comedi/drivers/cb_pcidio.c
@@ -184,8 +184,6 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int index;
int i;
- printk("comedi%d: cb_pcidio: \n", dev->minor);
-
/*
* Allocate the private structure area. alloc_private() is a
* convenient macro defined in comedidev.h.
@@ -223,8 +221,7 @@ static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
@@ -236,14 +233,12 @@ found:
dev->board_name = thisboard->name;
devpriv->pci_dev = pcidev;
- printk("Found %s on bus %i, slot %i\n", thisboard->name,
- devpriv->pci_dev->bus->number,
- PCI_SLOT(devpriv->pci_dev->devfn));
- if (comedi_pci_enable(pcidev, thisboard->name)) {
- printk
- ("cb_pcidio: failed to enable PCI device and request regions\n");
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n", thisboard->name,
+ devpriv->pci_dev->bus->number,
+ PCI_SLOT(devpriv->pci_dev->devfn));
+ if (comedi_pci_enable(pcidev, thisboard->name))
return -EIO;
- }
+
devpriv->dio_reg_base
=
pci_resource_start(devpriv->pci_dev,
@@ -259,11 +254,10 @@ found:
for (i = 0; i < thisboard->n_8255; i++) {
subdev_8255_init(dev, dev->subdevices + i,
NULL, devpriv->dio_reg_base + i * 4);
- printk(" subdev %d: base = 0x%lx\n", i,
- devpriv->dio_reg_base + i * 4);
+ dev_dbg(dev->hw_dev, "subdev %d: base = 0x%lx\n", i,
+ devpriv->dio_reg_base + i * 4);
}
- printk("attached\n");
return 1;
}
@@ -277,7 +271,6 @@ found:
*/
static int pcidio_detach(struct comedi_device *dev)
{
- printk("comedi%d: cb_pcidio: remove\n", dev->minor);
if (devpriv) {
if (devpriv->pci_dev) {
if (devpriv->dio_reg_base)
diff --git a/drivers/staging/comedi/drivers/cb_pcimdas.c b/drivers/staging/comedi/drivers/cb_pcimdas.c
index b1b832b..8ba6942 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdas.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdas.c
@@ -212,8 +212,6 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
int index;
/* int i; */
- printk("comedi%d: cb_pcimdas: ", dev->minor);
-
/*
* Allocate the private structure area.
*/
@@ -223,7 +221,6 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
/*
* Probe the device to determine what device in the series it is.
*/
- printk("\n");
for_each_pci_dev(pcidev) {
/* is it not a computer boards card? */
@@ -248,26 +245,26 @@ static int cb_pcimdas_attach(struct comedi_device *dev,
}
}
- printk("No supported ComputerBoards/MeasurementComputing card found on "
- "requested position\n");
+ dev_err(dev->hw_dev, "No supported ComputerBoards/MeasurementComputing card found on requested position\n");
return -EIO;
found:
- printk("Found %s on bus %i, slot %i\n", cb_pcimdas_boards[index].name,
- pcidev->bus->number, PCI_SLOT(pcidev->devfn));
+ dev_dbg(dev->hw_dev, "Found %s on bus %i, slot %i\n",
+ cb_pcimdas_boards[index].name, pcidev->bus->number,
+ PCI_SLOT(pcidev->devfn));
/* Warn about non-tested features */
switch (thisboard->device_id) {
case 0x56:
break;
default:
- printk("THIS CARD IS UNSUPPORTED.\n"
- "PLEASE REPORT USAGE TO <mocelet@sucs.org>\n");
+ dev_dbg(dev->hw_dev, "THIS CARD IS UNSUPPORTED.\n"
+ "PLEASE REPORT USAGE TO <mocelet@sucs.org>\n");
}
if (comedi_pci_enable(pcidev, "cb_pcimdas")) {
- printk(" Failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "Failed to enable PCI device and request regions\n");
return -EIO;
}
@@ -277,13 +274,11 @@ found:
devpriv->BADR3 = pci_resource_start(devpriv->pci_dev, 3);
devpriv->BADR4 = pci_resource_start(devpriv->pci_dev, 4);
-#ifdef CBPCIMDAS_DEBUG
- printk("devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
- printk("devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
- printk("devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
- printk("devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
- printk("devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
-#endif
+ dev_dbg(dev->hw_dev, "devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
+ dev_dbg(dev->hw_dev, "devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
+ dev_dbg(dev->hw_dev, "devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
+ dev_dbg(dev->hw_dev, "devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
+ dev_dbg(dev->hw_dev, "devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
/* Dont support IRQ yet */
/* get irq */
@@ -333,8 +328,6 @@ found:
else
s->type = COMEDI_SUBD_UNUSED;
- printk("attached\n");
-
return 1;
}
@@ -348,16 +341,19 @@ found:
*/
static int cb_pcimdas_detach(struct comedi_device *dev)
{
-#ifdef CBPCIMDAS_DEBUG
if (devpriv) {
- printk("devpriv->BADR0 = 0x%lx\n", devpriv->BADR0);
- printk("devpriv->BADR1 = 0x%lx\n", devpriv->BADR1);
- printk("devpriv->BADR2 = 0x%lx\n", devpriv->BADR2);
- printk("devpriv->BADR3 = 0x%lx\n", devpriv->BADR3);
- printk("devpriv->BADR4 = 0x%lx\n", devpriv->BADR4);
+ dev_dbg(dev->hw_dev, "devpriv->BADR0 = 0x%lx\n",
+ devpriv->BADR0);
+ dev_dbg(dev->hw_dev, "devpriv->BADR1 = 0x%lx\n",
+ devpriv->BADR1);
+ dev_dbg(dev->hw_dev, "devpriv->BADR2 = 0x%lx\n",
+ devpriv->BADR2);
+ dev_dbg(dev->hw_dev, "devpriv->BADR3 = 0x%lx\n",
+ devpriv->BADR3);
+ dev_dbg(dev->hw_dev, "devpriv->BADR4 = 0x%lx\n",
+ devpriv->BADR4);
}
-#endif
- printk("comedi%d: cb_pcimdas: remove\n", dev->minor);
+
if (dev->irq)
free_irq(dev->irq, dev);
if (devpriv) {
diff --git a/drivers/staging/comedi/drivers/cb_pcimdda.c b/drivers/staging/comedi/drivers/cb_pcimdda.c
index 8c981a8..40bddfa 100644
--- a/drivers/staging/comedi/drivers/cb_pcimdda.c
+++ b/drivers/staging/comedi/drivers/cb_pcimdda.c
@@ -105,7 +105,8 @@ struct board_struct {
int ao_bits;
int dio_chans;
int dio_method;
- int dio_offset; /* how many bytes into the BADR are the DIO ports */
+ /* how many bytes into the BADR are the DIO ports */
+ int dio_offset;
int regs_badrindex; /* IO Region for the control, analog output,
and DIO registers */
int reg_sz; /* number of bytes of registers in io region */
@@ -144,17 +145,18 @@ static const struct board_struct boards[] = {
/* Please add your PCI vendor ID to comedidev.h, and it will be forwarded
* upstream. */
static DEFINE_PCI_DEVICE_TABLE(pci_table) = {
- {
- PCI_VENDOR_ID_COMPUTERBOARDS, PCI_ID_PCIM_DDA06_16, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, PCI_ID_PCIM_DDA06_16) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, pci_table);
-/* this structure is for data unique to this hardware driver. If
- several hardware drivers keep similar information in this structure,
- feel free to suggest moving the variable to the struct comedi_device struct. */
+/*
+ * this structure is for data unique to this hardware driver. If
+ * several hardware drivers keep similar information in this structure,
+ * feel free to suggest moving the variable to the struct comedi_device
+ * struct.
+ */
struct board_private_struct {
unsigned long registers; /* set by probe */
unsigned long dio_registers;
@@ -335,7 +337,10 @@ static int attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (thisboard->dio_chans) {
switch (thisboard->dio_method) {
case DIO_8255:
- /* this is a straight 8255, so register us with the 8255 driver */
+ /*
+ * this is a straight 8255, so register us with
+ * the 8255 driver
+ */
subdev_8255_init(dev, s, NULL, devpriv->dio_registers);
devpriv->attached_to_8255 = 1;
break;
@@ -436,8 +441,11 @@ static int ao_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
for (i = 0; i < insn->n; i++) {
inw(devpriv->registers + chan * 2);
- /* should I set data[i] to the result of the actual read on the register
- or the cached unsigned int in devpriv->ao_readback[]? */
+ /*
+ * should I set data[i] to the result of the actual read
+ * on the register or the cached unsigned int in
+ * devpriv->ao_readback[]?
+ */
data[i] = devpriv->ao_readback[chan];
}
diff --git a/drivers/staging/comedi/drivers/contec_pci_dio.c b/drivers/staging/comedi/drivers/contec_pci_dio.c
index 871f109..e3659bd 100644
--- a/drivers/staging/comedi/drivers/contec_pci_dio.c
+++ b/drivers/staging/comedi/drivers/contec_pci_dio.c
@@ -57,10 +57,9 @@ static const struct contec_board contec_boards[] = {
#define PCI_DEVICE_ID_PIO1616L 0x8172
static DEFINE_PCI_DEVICE_TABLE(contec_pci_table) = {
- {
- PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, PIO1616L}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L),
+ .driver_data = PIO1616L },
+ {0}
};
MODULE_DEVICE_TABLE(pci, contec_pci_table);
@@ -197,8 +196,8 @@ static int contec_do_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
- printk("contec_do_insn_bits called\n");
- printk(" data: %d %d\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "contec_do_insn_bits called\n");
+ dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
@@ -206,8 +205,8 @@ static int contec_do_insn_bits(struct comedi_device *dev,
if (data[0]) {
s->state &= ~data[0];
s->state |= data[0] & data[1];
- printk(" out: %d on %lx\n", s->state,
- dev->iobase + thisboard->out_offs);
+ dev_dbg(dev->hw_dev, "out: %d on %lx\n", s->state,
+ dev->iobase + thisboard->out_offs);
outw(s->state, dev->iobase + thisboard->out_offs);
}
return 2;
@@ -218,8 +217,8 @@ static int contec_di_insn_bits(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
- printk("contec_di_insn_bits called\n");
- printk(" data: %d %d\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "contec_di_insn_bits called\n");
+ dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]);
if (insn->n != 2)
return -EINVAL;
diff --git a/drivers/staging/comedi/drivers/daqboard2000.c b/drivers/staging/comedi/drivers/daqboard2000.c
index 82be77d..e61c6a8 100644
--- a/drivers/staging/comedi/drivers/daqboard2000.c
+++ b/drivers/staging/comedi/drivers/daqboard2000.c
@@ -325,9 +325,8 @@ static const struct daq200_boardtype boardtypes[] = {
#define this_board ((const struct daq200_boardtype *)dev->board_ptr)
static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = {
- {
- 0x1616, 0x0409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(0x1616, 0x0409) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, daqboard2000_pci_table);
@@ -430,16 +429,14 @@ static int daqboard2000_ai_insn_read(struct comedi_device *dev,
/* Enable reading from the scanlist FIFO */
fpga->acqControl = DAQBOARD2000_SeqStartScanList;
for (timeout = 0; timeout < 20; timeout++) {
- if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull) {
+ if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull)
break;
- }
/* udelay(2); */
}
fpga->acqControl = DAQBOARD2000_AdcPacerEnable;
for (timeout = 0; timeout < 20; timeout++) {
- if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning) {
+ if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning)
break;
- }
/* udelay(2); */
}
for (timeout = 0; timeout < 20; timeout++) {
@@ -465,9 +462,8 @@ static int daqboard2000_ao_insn_read(struct comedi_device *dev,
int i;
int chan = CR_CHAN(insn->chanspec);
- for (i = 0; i < insn->n; i++) {
+ for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
- }
return i;
}
@@ -490,9 +486,8 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
/* fpga->dacControl = (chan + 2) * 0x0010 | 0x0001; udelay(1000); */
fpga->dacSetting[chan] = data[i];
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0) {
+ if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0)
break;
- }
/* udelay(2); */
}
devpriv->ao_readback[chan] = data[i];
@@ -507,7 +502,7 @@ static int daqboard2000_ao_insn_write(struct comedi_device *dev,
static void daqboard2000_resetLocalBus(struct comedi_device *dev)
{
- printk("daqboard2000_resetLocalBus\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_resetLocalBus\n");
writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c);
@@ -516,7 +511,7 @@ static void daqboard2000_resetLocalBus(struct comedi_device *dev)
static void daqboard2000_reloadPLX(struct comedi_device *dev)
{
- printk("daqboard2000_reloadPLX\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_reloadPLX\n");
writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c);
@@ -527,7 +522,7 @@ static void daqboard2000_reloadPLX(struct comedi_device *dev)
static void daqboard2000_pulseProgPin(struct comedi_device *dev)
{
- printk("daqboard2000_pulseProgPin 1\n");
+ dev_dbg(dev->hw_dev, "daqboard2000_pulseProgPin 1\n");
writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c);
udelay(10000);
writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c);
@@ -579,14 +574,14 @@ static int initialize_daqboard2000(struct comedi_device *dev,
secr = readl(devpriv->plx + 0x6c);
if (!(secr & DAQBOARD2000_EEPROM_PRESENT)) {
#ifdef DEBUG_EEPROM
- printk("no serial eeprom\n");
+ dev_dbg(dev->hw_dev, "no serial eeprom\n");
#endif
return -EIO;
}
for (retry = 0; retry < 3; retry++) {
#ifdef DEBUG_EEPROM
- printk("Programming EEPROM try %x\n", retry);
+ dev_dbg(dev->hw_dev, "Programming EEPROM try %x\n", retry);
#endif
daqboard2000_resetLocalBus(dev);
@@ -597,7 +592,8 @@ static int initialize_daqboard2000(struct comedi_device *dev,
if (cpld_array[i] == 0xff
&& cpld_array[i + 1] == 0x20) {
#ifdef DEBUG_EEPROM
- printk("Preamble found at %d\n", i);
+ dev_dbg(dev->hw_dev, "Preamble found at %d\n",
+ i);
#endif
break;
}
@@ -605,13 +601,12 @@ static int initialize_daqboard2000(struct comedi_device *dev,
for (; i < len; i += 2) {
int data =
(cpld_array[i] << 8) + cpld_array[i + 1];
- if (!daqboard2000_writeCPLD(dev, data)) {
+ if (!daqboard2000_writeCPLD(dev, data))
break;
- }
}
if (i >= len) {
#ifdef DEBUG_EEPROM
- printk("Programmed\n");
+ dev_dbg(dev->hw_dev, "Programmed\n");
#endif
daqboard2000_resetLocalBus(dev);
daqboard2000_reloadPLX(dev);
@@ -658,9 +653,8 @@ static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
/* Set the + reference dac value in the FPGA */
fpga->refDacs = 0x80 | DAQBOARD2000_PosRefDacSelect;
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) {
+ if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0)
break;
- }
udelay(2);
}
/* printk("DAQBOARD2000_PosRefDacSelect %d\n", timeout);*/
@@ -668,9 +662,8 @@ static void daqboard2000_activateReferenceDacs(struct comedi_device *dev)
/* Set the - reference dac value in the FPGA */
fpga->refDacs = 0x80 | DAQBOARD2000_NegRefDacSelect;
for (timeout = 0; timeout < 20; timeout++) {
- if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) {
+ if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0)
break;
- }
udelay(2);
}
/* printk("DAQBOARD2000_NegRefDacSelect %d\n", timeout);*/
@@ -737,15 +730,13 @@ static int daqboard2000_attach(struct comedi_device *dev,
unsigned int aux_len;
int bus, slot;
- printk("comedi%d: daqboard2000:", dev->minor);
-
bus = it->options[0];
slot = it->options[1];
result = alloc_private(dev, sizeof(struct daqboard2000_private));
- if (result < 0) {
+ if (result < 0)
return -ENOMEM;
- }
+
for (card = pci_get_device(0x1616, 0x0409, NULL);
card != NULL; card = pci_get_device(0x1616, 0x0409, card)) {
if (bus || slot) {
@@ -759,10 +750,10 @@ static int daqboard2000_attach(struct comedi_device *dev,
}
if (!card) {
if (bus || slot)
- printk(" no daqboard2000 found at bus/slot: %d/%d\n",
- bus, slot);
+ dev_err(dev->hw_dev, "no daqboard2000 found at bus/slot: %d/%d\n",
+ bus, slot);
else
- printk(" no daqboard2000 found\n");
+ dev_err(dev->hw_dev, "no daqboard2000 found\n");
return -EIO;
} else {
u32 id;
@@ -772,7 +763,8 @@ static int daqboard2000_attach(struct comedi_device *dev,
subsystem_device << 16) | card->subsystem_vendor;
for (i = 0; i < n_boardtypes; i++) {
if (boardtypes[i].id == id) {
- printk(" %s", boardtypes[i].name);
+ dev_dbg(dev->hw_dev, "%s\n",
+ boardtypes[i].name);
dev->board_ptr = boardtypes + i;
}
}
@@ -786,7 +778,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
result = comedi_pci_enable(card, "daqboard2000");
if (result < 0) {
- printk(" failed to enable PCI device and request regions\n");
+ dev_err(dev->hw_dev, "failed to enable PCI device and request regions\n");
return -EIO;
}
devpriv->got_regions = 1;
@@ -794,9 +786,8 @@ static int daqboard2000_attach(struct comedi_device *dev,
ioremap(pci_resource_start(card, 0), DAQBOARD2000_PLX_SIZE);
devpriv->daq =
ioremap(pci_resource_start(card, 2), DAQBOARD2000_DAQ_SIZE);
- if (!devpriv->plx || !devpriv->daq) {
+ if (!devpriv->plx || !devpriv->daq)
return -ENOMEM;
- }
result = alloc_subdevices(dev, 3);
if (result < 0)
@@ -817,7 +808,7 @@ static int daqboard2000_attach(struct comedi_device *dev,
if (aux_data && aux_len) {
result = initialize_daqboard2000(dev, aux_data, aux_len);
} else {
- printk("no FPGA initialization code, aborting\n");
+ dev_dbg(dev->hw_dev, "no FPGA initialization code, aborting\n");
result = -EIO;
}
if (result < 0)
@@ -857,30 +848,26 @@ static int daqboard2000_attach(struct comedi_device *dev,
result = subdev_8255_init(dev, s, daqboard2000_8255_cb,
(unsigned long)(dev->iobase + 0x40));
- printk("\n");
out:
return result;
}
static int daqboard2000_detach(struct comedi_device *dev)
{
- printk("comedi%d: daqboard2000: remove\n", dev->minor);
-
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 2);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
if (devpriv) {
if (devpriv->daq)
iounmap(devpriv->daq);
if (devpriv->plx)
iounmap(devpriv->plx);
if (devpriv->pci_dev) {
- if (devpriv->got_regions) {
+ if (devpriv->got_regions)
comedi_pci_disable(devpriv->pci_dev);
- }
pci_dev_put(devpriv->pci_dev);
}
}
diff --git a/drivers/staging/comedi/drivers/das08.c b/drivers/staging/comedi/drivers/das08.c
index 3141dc8..c2dd0ed 100644
--- a/drivers/staging/comedi/drivers/das08.c
+++ b/drivers/staging/comedi/drivers/das08.c
@@ -506,10 +506,8 @@ struct das08_board_struct das08_cs_boards[NUM_DAS08_CS_BOARDS] = {
#ifdef CONFIG_COMEDI_PCI
static DEFINE_PCI_DEVICE_TABLE(das08_pci_table) = {
- {
- PCI_VENDOR_ID_COMPUTERBOARDS, PCI_DEVICE_ID_PCIDAS08,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_COMPUTERBOARDS, PCI_DEVICE_ID_PCIDAS08) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, das08_pci_table);
diff --git a/drivers/staging/comedi/drivers/das08_cs.c b/drivers/staging/comedi/drivers/das08_cs.c
index 6d91d30..4ad398a 100644
--- a/drivers/staging/comedi/drivers/das08_cs.c
+++ b/drivers/staging/comedi/drivers/das08_cs.c
@@ -79,22 +79,20 @@ static int das08_cs_attach(struct comedi_device *dev,
if (ret < 0)
return ret;
- printk("comedi%d: das08_cs: ", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: das08_cs:\n", dev->minor);
/* deal with a pci board */
if (thisboard->bustype == pcmcia) {
if (link == NULL) {
- printk(" no pcmcia cards found\n");
+ dev_err(dev->hw_dev, "no pcmcia cards found\n");
return -EIO;
}
iobase = link->resource[0]->start;
} else {
- printk(" bug! board does not have PCMCIA bustype\n");
+ dev_err(dev->hw_dev, "bug! board does not have PCMCIA bustype\n");
return -EINVAL;
}
- printk("\n");
-
return das08_common_attach(dev, iobase);
}
diff --git a/drivers/staging/comedi/drivers/das16m1.c b/drivers/staging/comedi/drivers/das16m1.c
index a5ce3b2..5376e71 100644
--- a/drivers/staging/comedi/drivers/das16m1.c
+++ b/drivers/staging/comedi/drivers/das16m1.c
@@ -384,20 +384,20 @@ static int das16m1_cmd_exec(struct comedi_device *dev,
byte = 0;
/* if we are using external start trigger (also board dislikes having
* both start and conversion triggers external simultaneously) */
- if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT) {
+ if (cmd->start_src == TRIG_EXT && cmd->convert_src != TRIG_EXT)
byte |= EXT_TRIG_BIT;
- }
+
outb(byte, dev->iobase + DAS16M1_CS);
/* clear interrupt bit */
outb(0, dev->iobase + DAS16M1_CLEAR_INTR);
/* enable interrupts and internal pacer */
devpriv->control_state &= ~PACER_MASK;
- if (cmd->convert_src == TRIG_TIMER) {
+ if (cmd->convert_src == TRIG_TIMER)
devpriv->control_state |= INT_PACER;
- } else {
+ else
devpriv->control_state |= EXT_PACER;
- }
+
devpriv->control_state |= INTE;
outb(devpriv->control_state, dev->iobase + DAS16M1_INTR_CONTROL);
@@ -531,9 +531,8 @@ static void munge_sample_array(short *array, unsigned int num_elements)
{
unsigned int i;
- for (i = 0; i < num_elements; i++) {
+ for (i = 0; i < num_elements; i++)
array[i] = munge_sample(array[i]);
- }
}
static void das16m1_handler(struct comedi_device *dev, unsigned int status)
@@ -668,25 +667,20 @@ static int das16m1_attach(struct comedi_device *dev,
iobase = it->options[0];
- printk("comedi%d: das16m1:", dev->minor);
-
ret = alloc_private(dev, sizeof(struct das16m1_private_struct));
if (ret < 0)
return ret;
dev->board_name = thisboard->name;
- printk(" io 0x%lx-0x%lx 0x%lx-0x%lx",
- iobase, iobase + DAS16M1_SIZE,
- iobase + DAS16M1_82C55, iobase + DAS16M1_82C55 + DAS16M1_SIZE2);
if (!request_region(iobase, DAS16M1_SIZE, driver_das16m1.driver_name)) {
- printk(" I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
if (!request_region(iobase + DAS16M1_82C55, DAS16M1_SIZE2,
driver_das16m1.driver_name)) {
release_region(iobase, DAS16M1_SIZE);
- printk(" I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
@@ -697,17 +691,17 @@ static int das16m1_attach(struct comedi_device *dev,
if (das16m1_irq_bits(irq) >= 0) {
ret = request_irq(irq, das16m1_interrupt, 0,
driver_das16m1.driver_name, dev);
- if (ret < 0) {
- printk(", irq unavailable\n");
+ if (ret < 0)
return ret;
- }
dev->irq = irq;
- printk(", irq %u\n", irq);
+ printk
+ ("irq %u\n", irq);
} else if (irq == 0) {
- printk(", no irq\n");
+ printk
+ (", no irq\n");
} else {
- printk(", invalid irq\n"
- " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
+ comedi_error(dev, "invalid irq\n"
+ " valid irqs are 2, 3, 5, 7, 10, 11, 12, or 15\n");
return -EINVAL;
}
@@ -771,7 +765,6 @@ static int das16m1_attach(struct comedi_device *dev,
static int das16m1_detach(struct comedi_device *dev)
{
- printk("comedi%d: das16m1: remove\n", dev->minor);
/* das16m1_reset(dev); */
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index a6df30b..99ada5a 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -573,22 +573,23 @@ static int das1800_init_dma(struct comedi_device *dev, unsigned int dma0,
devpriv->dma_bits |= DMA_CH7_CH5;
break;
default:
- printk(" only supports dma channels 5 through 7\n"
- " Dual dma only allows the following combinations:\n"
- " dma 5,6 / 6,7 / or 7,5\n");
+ dev_err(dev->hw_dev, " only supports dma channels 5 through 7\n"
+ " Dual dma only allows the following combinations:\n"
+ " dma 5,6 / 6,7 / or 7,5\n");
return -EINVAL;
break;
}
if (request_dma(dma0, driver_das1800.driver_name)) {
- printk(" failed to allocate dma channel %i\n", dma0);
+ dev_err(dev->hw_dev, "failed to allocate dma channel %i\n",
+ dma0);
return -EINVAL;
}
devpriv->dma0 = dma0;
devpriv->dma_current = dma0;
if (dma1) {
if (request_dma(dma1, driver_das1800.driver_name)) {
- printk(" failed to allocate dma channel %i\n",
- dma1);
+ dev_err(dev->hw_dev, "failed to allocate dma channel %i\n",
+ dma1);
return -EINVAL;
}
devpriv->dma1 = dma1;
@@ -631,20 +632,20 @@ static int das1800_attach(struct comedi_device *dev,
if (alloc_private(dev, sizeof(struct das1800_private)) < 0)
return -ENOMEM;
- printk("comedi%d: %s: io 0x%lx", dev->minor, driver_das1800.driver_name,
- iobase);
+ printk(KERN_DEBUG "comedi%d: %s: io 0x%lx", dev->minor,
+ driver_das1800.driver_name, iobase);
if (irq) {
- printk(", irq %u", irq);
+ printk(KERN_CONT ", irq %u", irq);
if (dma0) {
- printk(", dma %u", dma0);
+ printk(KERN_CONT ", dma %u", dma0);
if (dma1)
- printk(" and %u", dma1);
+ printk(KERN_CONT " and %u", dma1);
}
}
- printk("\n");
+ printk(KERN_CONT "\n");
if (iobase == 0) {
- printk(" io base address required\n");
+ dev_err(dev->hw_dev, "io base address required\n");
return -EINVAL;
}
@@ -659,7 +660,7 @@ static int das1800_attach(struct comedi_device *dev,
board = das1800_probe(dev);
if (board < 0) {
- printk(" unable to determine board type\n");
+ dev_err(dev->hw_dev, "unable to determine board type\n");
return -ENODEV;
}
@@ -683,7 +684,8 @@ static int das1800_attach(struct comedi_device *dev,
if (irq) {
if (request_irq(irq, das1800_interrupt, 0,
driver_das1800.driver_name, dev)) {
- printk(" unable to allocate irq %u\n", irq);
+ dev_dbg(dev->hw_dev, "unable to allocate irq %u\n",
+ irq);
return -EINVAL;
}
}
@@ -712,7 +714,7 @@ static int das1800_attach(struct comedi_device *dev,
devpriv->irq_dma_bits |= 0x38;
break;
default:
- printk(" irq out of range\n");
+ dev_err(dev->hw_dev, "irq out of range\n");
return -EINVAL;
break;
}
@@ -813,8 +815,8 @@ static int das1800_detach(struct comedi_device *dev)
kfree(devpriv->ai_buf1);
}
- printk("comedi%d: %s: remove\n", dev->minor,
- driver_das1800.driver_name);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: remove\n", dev->minor,
+ driver_das1800.driver_name);
return 0;
};
@@ -833,8 +835,8 @@ static int das1800_probe(struct comedi_device *dev)
case 0x3:
if (board == das1801st_da || board == das1802st_da ||
board == das1701st_da || board == das1702st_da) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -843,8 +845,8 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x4:
if (board == das1802hr_da || board == das1702hr_da) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -854,8 +856,8 @@ static int das1800_probe(struct comedi_device *dev)
case 0x5:
if (board == das1801ao || board == das1802ao ||
board == das1701ao || board == das1702ao) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -864,18 +866,19 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x6:
if (board == das1802hr || board == das1702hr) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
- printk(" Board model (probed, not recommended): das-1802hr\n");
+ printk
+ (" Board model (probed, not recommended): das-1802hr\n");
return das1802hr;
break;
case 0x7:
if (board == das1801st || board == das1802st ||
board == das1701st || board == das1702st) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
@@ -884,8 +887,8 @@ static int das1800_probe(struct comedi_device *dev)
break;
case 0x8:
if (board == das1801hc || board == das1802hc) {
- printk(" Board model: %s\n",
- das1800_boards[board].name);
+ dev_dbg(dev->hw_dev, "Board model: %s\n",
+ das1800_boards[board].name);
return board;
}
printk
diff --git a/drivers/staging/comedi/drivers/das6402.c b/drivers/staging/comedi/drivers/das6402.c
index 6328f52..f256841 100644
--- a/drivers/staging/comedi/drivers/das6402.c
+++ b/drivers/staging/comedi/drivers/das6402.c
@@ -171,7 +171,7 @@ static irqreturn_t intr_handler(int irq, void *d)
struct comedi_subdevice *s = dev->subdevices;
if (!dev->attached || devpriv->das6402_ignoreirq) {
- printk("das6402: BUG: spurious interrupt\n");
+ dev_warn(dev->hw_dev, "BUG: spurious interrupt\n");
return IRQ_HANDLED;
}
#ifdef DEBUG
@@ -228,9 +228,7 @@ static int das6402_ai_cancel(struct comedi_device *dev,
*/
devpriv->das6402_ignoreirq = 1;
-#ifdef DEBUG
- printk("das6402: Stopping acquisition\n");
-#endif
+ dev_dbg(dev->hw_dev, "Stopping acquisition\n");
devpriv->das6402_ignoreirq = 1;
outb_p(0x02, dev->iobase + 10); /* disable external trigging */
outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
@@ -246,10 +244,7 @@ static int das6402_ai_mode2(struct comedi_device *dev,
struct comedi_subdevice *s, comedi_trig * it)
{
devpriv->das6402_ignoreirq = 1;
-
-#ifdef DEBUG
- printk("das6402: Starting acquisition\n");
-#endif
+ dev_dbg(dev->hw_dev, "Starting acquisition\n");
outb_p(0x03, dev->iobase + 10); /* enable external trigging */
outw_p(SCANL, dev->iobase + 2); /* resets the card fifo */
outb_p(IRQ | CONVSRC | BURSTEN | INTE, dev->iobase + 9);
@@ -329,10 +324,8 @@ static int das6402_attach(struct comedi_device *dev,
if (iobase == 0)
iobase = 0x300;
- printk("comedi%d: das6402: 0x%04lx", dev->minor, iobase);
-
if (!request_region(iobase, DAS6402_SIZE, "das6402")) {
- printk(" I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
@@ -340,14 +333,12 @@ static int das6402_attach(struct comedi_device *dev,
/* should do a probe here */
irq = it->options[0];
- printk(" ( irq = %u )", irq);
+ dev_dbg(dev->hw_dev, "( irq = %u )\n", irq);
ret = request_irq(irq, intr_handler, 0, "das6402", dev);
- if (ret < 0) {
- printk("irq conflict\n");
+ if (ret < 0)
return ret;
- }
- dev->irq = irq;
+ dev->irq = irq;
ret = alloc_private(dev, sizeof(struct das6402_private));
if (ret < 0)
return ret;
diff --git a/drivers/staging/comedi/drivers/das800.c b/drivers/staging/comedi/drivers/das800.c
index 96d41ad..6e347b4 100644
--- a/drivers/staging/comedi/drivers/das800.c
+++ b/drivers/staging/comedi/drivers/das800.c
@@ -296,47 +296,47 @@ static int das800_probe(struct comedi_device *dev)
switch (id_bits) {
case 0x0:
if (board == das800) {
- printk(" Board model: DAS-800\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-800\n");
return board;
}
if (board == ciodas800) {
- printk(" Board model: CIO-DAS800\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS800\n");
return board;
}
- printk(" Board model (probed): DAS-800\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-800\n");
return das800;
break;
case 0x2:
if (board == das801) {
- printk(" Board model: DAS-801\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-801\n");
return board;
}
if (board == ciodas801) {
- printk(" Board model: CIO-DAS801\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS801\n");
return board;
}
- printk(" Board model (probed): DAS-801\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-801\n");
return das801;
break;
case 0x3:
if (board == das802) {
- printk(" Board model: DAS-802\n");
+ dev_dbg(dev->hw_dev, "Board model: DAS-802\n");
return board;
}
if (board == ciodas802) {
- printk(" Board model: CIO-DAS802\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS802\n");
return board;
}
if (board == ciodas80216) {
- printk(" Board model: CIO-DAS802/16\n");
+ dev_dbg(dev->hw_dev, "Board model: CIO-DAS802/16\n");
return board;
}
- printk(" Board model (probed): DAS-802\n");
+ dev_dbg(dev->hw_dev, "Board model (probed): DAS-802\n");
return das802;
break;
default:
- printk(" Board model: probe returned 0x%x (unknown)\n",
- id_bits);
+ dev_dbg(dev->hw_dev, "Board model: probe returned 0x%x (unknown)\n",
+ id_bits);
return board;
break;
}
@@ -466,42 +466,43 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
unsigned long irq_flags;
int board;
- printk("comedi%d: das800: io 0x%lx", dev->minor, iobase);
+ dev_info(dev->hw_dev, "comedi%d: das800: io 0x%lx\n", dev->minor,
+ iobase);
if (irq)
- printk(", irq %u", irq);
- printk("\n");
+ dev_dbg(dev->hw_dev, "irq %u\n", irq);
/* allocate and initialize dev->private */
if (alloc_private(dev, sizeof(struct das800_private)) < 0)
return -ENOMEM;
if (iobase == 0) {
- printk("io base address required for das800\n");
+ dev_err(dev->hw_dev, "io base address required for das800\n");
return -EINVAL;
}
/* check if io addresses are available */
if (!request_region(iobase, DAS800_SIZE, "das800")) {
- printk("I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
board = das800_probe(dev);
if (board < 0) {
- printk("unable to determine board type\n");
+ dev_dbg(dev->hw_dev, "unable to determine board type\n");
return -ENODEV;
}
dev->board_ptr = das800_boards + board;
/* grab our IRQ */
if (irq == 1 || irq > 7) {
- printk("irq out of range\n");
+ dev_err(dev->hw_dev, "irq out of range\n");
return -EINVAL;
}
if (irq) {
if (request_irq(irq, das800_interrupt, 0, "das800", dev)) {
- printk("unable to allocate irq %u\n", irq);
+ dev_err(dev->hw_dev, "unable to allocate irq %u\n",
+ irq);
return -EINVAL;
}
}
@@ -557,7 +558,7 @@ static int das800_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int das800_detach(struct comedi_device *dev)
{
- printk("comedi%d: das800: remove\n", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: das800: remove\n", dev->minor);
/* only free stuff if it has been allocated by _attach */
if (dev->iobase)
diff --git a/drivers/staging/comedi/drivers/dt3000.c b/drivers/staging/comedi/drivers/dt3000.c
index 6170f7b..0a7979e 100644
--- a/drivers/staging/comedi/drivers/dt3000.c
+++ b/drivers/staging/comedi/drivers/dt3000.c
@@ -352,7 +352,8 @@ static int dt3k_send_cmd(struct comedi_device *dev, unsigned int cmd)
if ((status & DT3000_COMPLETION_MASK) == DT3000_NOERROR)
return 0;
- printk("dt3k_send_cmd() timeout/error status=0x%04x\n", status);
+ dev_dbg(dev->hw_dev, "dt3k_send_cmd() timeout/error status=0x%04x\n",
+ status);
return -ETIME;
}
@@ -383,7 +384,7 @@ static void dt3k_writesingle(struct comedi_device *dev, unsigned int subsys,
dt3k_send_cmd(dev, CMD_WRITESINGLE);
}
-static int debug_n_ints = 0;
+static int debug_n_ints;
/* FIXME! Assumes shared interrupt is for this card. */
/* What's this debug_n_ints stuff? Obviously needs some work... */
@@ -429,12 +430,12 @@ static char *intr_flags[] = {
static void debug_intr_flags(unsigned int flags)
{
int i;
- printk("dt3k: intr_flags:");
+ printk(KERN_DEBUG "dt3k: intr_flags:");
for (i = 0; i < 8; i++) {
if (flags & (1 << i))
- printk(" %s", intr_flags[i]);
+ printk(KERN_CONT " %s", intr_flags[i]);
}
- printk("\n");
+ printk(KERN_CONT "\n");
}
#endif
@@ -452,7 +453,7 @@ static void dt3k_ai_empty_fifo(struct comedi_device *dev,
if (count < 0)
count += AI_FIFO_DEPTH;
- printk("reading %d samples\n", count);
+ dev_dbg(dev->hw_dev, "reading %d samples\n", count);
rear = devpriv->ai_rear;
@@ -640,7 +641,7 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int ret;
unsigned int mode;
- printk("dt3k_ai_cmd:\n");
+ dev_dbg(dev->hw_dev, "dt3k_ai_cmd:\n");
for (i = 0; i < cmd->chanlist_len; i++) {
chan = CR_CHAN(cmd->chanlist[i]);
range = CR_RANGE(cmd->chanlist[i]);
@@ -651,15 +652,15 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
aref = CR_AREF(cmd->chanlist[0]);
writew(cmd->scan_end_arg, devpriv->io_addr + DPR_Params(0));
- printk("param[0]=0x%04x\n", cmd->scan_end_arg);
+ dev_dbg(dev->hw_dev, "param[0]=0x%04x\n", cmd->scan_end_arg);
if (cmd->convert_src == TRIG_TIMER) {
divider = dt3k_ns_to_timer(50, &cmd->convert_arg,
cmd->flags & TRIG_ROUND_MASK);
writew((divider >> 16), devpriv->io_addr + DPR_Params(1));
- printk("param[1]=0x%04x\n", divider >> 16);
+ dev_dbg(dev->hw_dev, "param[1]=0x%04x\n", divider >> 16);
writew((divider & 0xffff), devpriv->io_addr + DPR_Params(2));
- printk("param[2]=0x%04x\n", divider & 0xffff);
+ dev_dbg(dev->hw_dev, "param[2]=0x%04x\n", divider & 0xffff);
} else {
/* not supported */
}
@@ -668,21 +669,21 @@ static int dt3k_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
tscandiv = dt3k_ns_to_timer(100, &cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
writew((tscandiv >> 16), devpriv->io_addr + DPR_Params(3));
- printk("param[3]=0x%04x\n", tscandiv >> 16);
+ dev_dbg(dev->hw_dev, "param[3]=0x%04x\n", tscandiv >> 16);
writew((tscandiv & 0xffff), devpriv->io_addr + DPR_Params(4));
- printk("param[4]=0x%04x\n", tscandiv & 0xffff);
+ dev_dbg(dev->hw_dev, "param[4]=0x%04x\n", tscandiv & 0xffff);
} else {
/* not supported */
}
mode = DT3000_AD_RETRIG_INTERNAL | 0 | 0;
writew(mode, devpriv->io_addr + DPR_Params(5));
- printk("param[5]=0x%04x\n", mode);
+ dev_dbg(dev->hw_dev, "param[5]=0x%04x\n", mode);
writew(aref == AREF_DIFF, devpriv->io_addr + DPR_Params(6));
- printk("param[6]=0x%04x\n", aref == AREF_DIFF);
+ dev_dbg(dev->hw_dev, "param[6]=0x%04x\n", aref == AREF_DIFF);
writew(AI_FIFO_DEPTH / 2, devpriv->io_addr + DPR_Params(7));
- printk("param[7]=0x%04x\n", AI_FIFO_DEPTH / 2);
+ dev_dbg(dev->hw_dev, "param[7]=0x%04x\n", AI_FIFO_DEPTH / 2);
writew(SUBS_AI, devpriv->io_addr + DPR_SubSys);
ret = dt3k_send_cmd(dev, CMD_CONFIG);
@@ -848,7 +849,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
int bus, slot;
int ret = 0;
- printk("dt3000:");
+ dev_dbg(dev->hw_dev, "dt3000:\n");
bus = it->options[0];
slot = it->options[1];
@@ -860,7 +861,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
if (ret == 0) {
- printk(" no DT board found\n");
+ dev_warn(dev->hw_dev, "no DT board found\n");
return -ENODEV;
}
@@ -868,7 +869,8 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (request_irq(devpriv->pci_dev->irq, dt3k_interrupt, IRQF_SHARED,
"dt3000", dev)) {
- printk(" unable to allocate IRQ %u\n", devpriv->pci_dev->irq);
+ dev_err(dev->hw_dev, "unable to allocate IRQ %u\n",
+ devpriv->pci_dev->irq);
return -EINVAL;
}
dev->irq = devpriv->pci_dev->irq;
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index 8d98cf4..6a79ba1 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -71,18 +71,12 @@ static struct comedi_driver driver_jr3_pci = {
};
static DEFINE_PCI_DEVICE_TABLE(jr3_pci_pci_table) = {
- {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_1_CHANNEL_NEW) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_2_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_3_CHANNEL) },
+ { PCI_DEVICE(PCI_VENDOR_ID_JR3, PCI_DEVICE_ID_JR3_4_CHANNEL) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, jr3_pci_pci_table);
@@ -378,14 +372,14 @@ static int jr3_pci_open(struct comedi_device *dev)
int i;
struct jr3_pci_dev_private *devpriv = dev->private;
- printk("jr3_pci_open\n");
+ dev_dbg(dev->hw_dev, "jr3_pci_open\n");
for (i = 0; i < devpriv->n_channels; i++) {
struct jr3_pci_subdev_private *p;
p = dev->subdevices[i].private;
if (p) {
- printk("serial: %p %d (%d)\n", p, p->serial_no,
- p->channel_no);
+ dev_dbg(dev->hw_dev, "serial: %p %d (%d)\n", p,
+ p->serial_no, p->channel_no);
}
}
return 0;
@@ -463,8 +457,8 @@ static int jr3_download_firmware(struct comedi_device *dev, const u8 * data,
break;
more = more
&& read_idm_word(data, size, &pos, &addr);
- printk("Loading#%d %4.4x bytes at %4.4x\n", i,
- count, addr);
+ dev_dbg(dev->hw_dev, "Loading#%d %4.4x bytes at %4.4x\n",
+ i, count, addr);
while (more && count > 0) {
if (addr & 0x4000) {
/* 16 bit data, never seen in real life!! */
@@ -599,24 +593,24 @@ static struct poll_delay_t jr3_pci_poll_subdevice(struct comedi_subdevice *s)
min_full_scale =
get_min_full_scales(channel);
printk("Obtained Min. Full Scales:\n");
- printk("%i ", (min_full_scale).fx);
- printk("%i ", (min_full_scale).fy);
- printk("%i ", (min_full_scale).fz);
- printk("%i ", (min_full_scale).mx);
- printk("%i ", (min_full_scale).my);
- printk("%i ", (min_full_scale).mz);
- printk("\n");
+ printk(KERN_DEBUG "%i ", (min_full_scale).fx);
+ printk(KERN_CONT "%i ", (min_full_scale).fy);
+ printk(KERN_CONT "%i ", (min_full_scale).fz);
+ printk(KERN_CONT "%i ", (min_full_scale).mx);
+ printk(KERN_CONT "%i ", (min_full_scale).my);
+ printk(KERN_CONT "%i ", (min_full_scale).mz);
+ printk(KERN_CONT "\n");
max_full_scale =
get_max_full_scales(channel);
printk("Obtained Max. Full Scales:\n");
- printk("%i ", (max_full_scale).fx);
- printk("%i ", (max_full_scale).fy);
- printk("%i ", (max_full_scale).fz);
- printk("%i ", (max_full_scale).mx);
- printk("%i ", (max_full_scale).my);
- printk("%i ", (max_full_scale).mz);
- printk("\n");
+ printk(KERN_DEBUG "%i ", (max_full_scale).fx);
+ printk(KERN_CONT "%i ", (max_full_scale).fy);
+ printk(KERN_CONT "%i ", (max_full_scale).fz);
+ printk(KERN_CONT "%i ", (max_full_scale).mx);
+ printk(KERN_CONT "%i ", (max_full_scale).my);
+ printk(KERN_CONT "%i ", (max_full_scale).mz);
+ printk(KERN_CONT "\n");
set_full_scales(channel,
max_full_scale);
@@ -779,14 +773,12 @@ static int jr3_pci_attach(struct comedi_device *dev,
int opt_bus, opt_slot, i;
struct jr3_pci_dev_private *devpriv;
- printk("comedi%d: jr3_pci\n", dev->minor);
-
opt_bus = it->options[0];
opt_slot = it->options[1];
if (sizeof(struct jr3_channel) != 0xc00) {
- printk("sizeof(struct jr3_channel) = %x [expected %x]\n",
- (unsigned)sizeof(struct jr3_channel), 0xc00);
+ dev_err(dev->hw_dev, "sizeof(struct jr3_channel) = %x [expected %x]\n",
+ (unsigned)sizeof(struct jr3_channel), 0xc00);
return -EINVAL;
}
@@ -840,7 +832,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
}
}
if (!card) {
- printk(" no jr3_pci found\n");
+ dev_err(dev->hw_dev, "no jr3_pci found\n");
return -EIO;
} else {
devpriv->pci_dev = card;
@@ -875,10 +867,10 @@ static int jr3_pci_attach(struct comedi_device *dev,
p = dev->subdevices[i].private;
p->channel = &devpriv->iobase->channel[i].data;
- printk("p->channel %p %p (%tx)\n",
- p->channel, devpriv->iobase,
- ((char *)(p->channel) -
- (char *)(devpriv->iobase)));
+ dev_dbg(dev->hw_dev, "p->channel %p %p (%tx)\n",
+ p->channel, devpriv->iobase,
+ ((char *)(p->channel) -
+ (char *)(devpriv->iobase)));
p->channel_no = i;
for (j = 0; j < 8; j++) {
int k;
@@ -916,7 +908,7 @@ static int jr3_pci_attach(struct comedi_device *dev,
devpriv->iobase->channel[0].reset = 0;
result = comedi_load_firmware(dev, "jr3pci.idm", jr3_download_firmware);
- printk("Firmare load %d\n", result);
+ dev_dbg(dev->hw_dev, "Firmare load %d\n", result);
if (result < 0)
goto out;
@@ -934,9 +926,9 @@ static int jr3_pci_attach(struct comedi_device *dev,
*/
msleep_interruptible(25);
for (i = 0; i < 0x18; i++) {
- printk("%c",
- get_u16(&devpriv->iobase->channel[0].
- data.copyright[i]) >> 8);
+ dev_dbg(dev->hw_dev, "%c\n",
+ get_u16(&devpriv->iobase->channel[0].
+ data.copyright[i]) >> 8);
}
/* Start card timer */
@@ -963,7 +955,6 @@ static int jr3_pci_detach(struct comedi_device *dev)
int i;
struct jr3_pci_dev_private *devpriv = dev->private;
- printk("comedi%d: jr3_pci: remove\n", dev->minor);
if (devpriv) {
del_timer_sync(&devpriv->timer);
diff --git a/drivers/staging/comedi/drivers/ke_counter.c b/drivers/staging/comedi/drivers/ke_counter.c
index 286093b..4e9e9a0 100644
--- a/drivers/staging/comedi/drivers/ke_counter.c
+++ b/drivers/staging/comedi/drivers/ke_counter.c
@@ -52,10 +52,8 @@ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it);
static int cnt_detach(struct comedi_device *dev);
static DEFINE_PCI_DEVICE_TABLE(cnt_pci_table) = {
- {
- PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, cnt_pci_table);
diff --git a/drivers/staging/comedi/drivers/me_daq.c b/drivers/staging/comedi/drivers/me_daq.c
index cda4b22..8b812e4 100644
--- a/drivers/staging/comedi/drivers/me_daq.c
+++ b/drivers/staging/comedi/drivers/me_daq.c
@@ -188,12 +188,9 @@ static const struct comedi_lrange me2600_ao_range = {
};
static DEFINE_PCI_DEVICE_TABLE(me_pci_table) = {
- {
- PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID, PCI_ANY_ID,
- PCI_ANY_ID, 0, 0, 0}, {
- 0}
+ { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID) },
+ { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID) },
+ {0}
};
MODULE_DEVICE_TABLE(pci, me_pci_table);
diff --git a/drivers/staging/comedi/drivers/ni_at_a2150.c b/drivers/staging/comedi/drivers/ni_at_a2150.c
index 32e675e..c25e44c 100644
--- a/drivers/staging/comedi/drivers/ni_at_a2150.c
+++ b/drivers/staging/comedi/drivers/ni_at_a2150.c
@@ -731,9 +731,8 @@ static int a2150_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
outw(trigger_bits, dev->iobase + TRIGGER_REG);
/* start acquisition for soft trigger */
- if (cmd->start_src == TRIG_NOW) {
+ if (cmd->start_src == TRIG_NOW)
outw(0, dev->iobase + FIFO_START_REG);
- }
#ifdef A2150_DEBUG
ni_dump_regs(dev);
#endif
@@ -860,11 +859,10 @@ static int a2150_get_timing(struct comedi_device *dev, unsigned int *period,
case TRIG_ROUND_NEAREST:
default:
/* if least upper bound is better approximation */
- if (lub - *period < *period - glb) {
+ if (lub - *period < *period - glb)
*period = lub;
- } else {
+ else
*period = glb;
- }
break;
case TRIG_ROUND_UP:
*period = lub;
diff --git a/drivers/staging/comedi/drivers/ni_daq_dio24.c b/drivers/staging/comedi/drivers/ni_daq_dio24.c
index 49b824c..c0423a8 100644
--- a/drivers/staging/comedi/drivers/ni_daq_dio24.c
+++ b/drivers/staging/comedi/drivers/ni_daq_dio24.c
@@ -52,7 +52,7 @@ the PCMCIA interface.
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
-static struct pcmcia_device *pcmcia_cur_dev = NULL;
+static struct pcmcia_device *pcmcia_cur_dev;
#define DIO24_SIZE 4 /* size of io region used by board */
@@ -133,22 +133,19 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
#endif
break;
default:
- printk("bug! couldn't determine board type\n");
+ pr_err("bug! couldn't determine board type\n");
return -EINVAL;
break;
}
- printk("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor,
- thisboard->name, iobase);
+ pr_debug("comedi%d: ni_daq_dio24: %s, io 0x%lx", dev->minor,
+ thisboard->name, iobase);
#ifdef incomplete
- if (irq) {
- printk(", irq %u", irq);
- }
+ if (irq)
+ pr_debug("irq %u\n", irq);
#endif
- printk("\n");
-
if (iobase == 0) {
- printk("io base address is zero!\n");
+ pr_err("io base address is zero!\n");
return -EINVAL;
}
@@ -173,7 +170,7 @@ static int dio24_attach(struct comedi_device *dev, struct comedi_devconfig *it)
static int dio24_detach(struct comedi_device *dev)
{
- printk("comedi%d: ni_daq_dio24: remove\n", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: ni_daq_dio24: remove\n", dev->minor);
if (dev->subdevices)
subdev_8255_cleanup(dev, dev->subdevices + 0);
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index 832a517..ff38405 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -145,7 +145,7 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq = link->irq;
break;
default:
- printk("bug! couldn't determine board type\n");
+ pr_err("bug! couldn't determine board type\n");
return -EINVAL;
break;
}
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 9148abd..0f0d995 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1470,7 +1470,7 @@ static void m_series_stc_writew(struct comedi_device *dev, uint16_t data,
/* FIXME: DIO_Output_Register (16 bit reg) is replaced by M_Offset_Static_Digital_Output (32 bit)
and M_Offset_SCXI_Serial_Data_Out (8 bit) */
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
@@ -1505,7 +1505,7 @@ static uint16_t m_series_stc_readw(struct comedi_device *dev, int reg)
offset = M_Offset_G01_Status;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -1547,7 +1547,7 @@ static void m_series_stc_writel(struct comedi_device *dev, uint32_t data,
offset = M_Offset_G1_Load_B;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return;
@@ -1573,7 +1573,7 @@ static uint32_t m_series_stc_readl(struct comedi_device *dev, int reg)
offset = M_Offset_G1_Save;
break;
default:
- printk("%s: bug! unhandled register=0x%x in switch.\n",
+ printk(KERN_WARNING "%s: bug! unhandled register=0x%x in switch.\n",
__func__, reg);
BUG();
return 0;
@@ -1632,9 +1632,8 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
}
devpriv->serial_number = be32_to_cpu(devpriv->serial_number);
- for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i) {
+ for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i);
- }
writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
@@ -1665,9 +1664,9 @@ static void init_6143(struct comedi_device *dev)
static int pcimio_detach(struct comedi_device *dev)
{
mio_common_detach(dev);
- if (dev->irq) {
+ if (dev->irq)
free_irq(dev->irq, dev);
- }
+
if (dev->private) {
mite_free_ring(devpriv->ai_mite_ring);
mite_free_ring(devpriv->ao_mite_ring);
@@ -1685,7 +1684,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
int ret;
- printk("comedi%d: ni_pcimio:", dev->minor);
+ dev_info(dev->hw_dev, "comedi%d: ni_pcimio:\n", dev->minor);
ret = ni_alloc_private(dev);
if (ret < 0)
@@ -1695,7 +1694,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret < 0)
return ret;
- printk(" %s", boardtype.name);
+ dev_dbg(dev->hw_dev, "%s\n", boardtype.name);
dev->board_name = boardtype.name;
if (boardtype.reg_type & ni_reg_m_series_mask) {
@@ -1712,7 +1711,7 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
ret = mite_setup(devpriv->mite);
if (ret < 0) {
- printk(" error setting up mite\n");
+ pr_warn("error setting up mite\n");
return ret;
}
comedi_set_hw_dev(dev, &devpriv->mite->pcidev->dev);
@@ -1740,13 +1739,13 @@ static int pcimio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
dev->irq = mite_irq(devpriv->mite);
if (dev->irq == 0) {
- printk(" unknown irq (bad)\n");
+ pr_warn("unknown irq (bad)\n");
} else {
- printk(" ( irq = %u )", dev->irq);
+ pr_debug("( irq = %u )\n", dev->irq);
ret = request_irq(dev->irq, ni_E_interrupt, NI_E_IRQ_FLAGS,
DRV_NAME, dev);
if (ret < 0) {
- printk(" irq not available\n");
+ pr_warn("irq not available\n");
dev->irq = 0;
}
}
@@ -1787,7 +1786,7 @@ static int pcimio_find_device(struct comedi_device *dev, int bus, int slot)
}
}
}
- printk("no device found\n");
+ pr_warn("no device found\n");
mite_list_devices();
return -EIO;
}
diff --git a/drivers/staging/comedi/drivers/pcl816.c b/drivers/staging/comedi/drivers/pcl816.c
index 0b9bee3..96cd7ec 100644
--- a/drivers/staging/comedi/drivers/pcl816.c
+++ b/drivers/staging/comedi/drivers/pcl816.c
@@ -155,8 +155,8 @@ static int pcl816_attach(struct comedi_device *dev,
static int pcl816_detach(struct comedi_device *dev);
#ifdef unused
-static int RTC_lock = 0; /* RTC lock */
-static int RTC_timer_lock = 0; /* RTC int lock */
+static int RTC_lock; /* RTC lock */
+static int RTC_timer_lock; /* RTC int lock */
#endif
static struct comedi_driver driver_pcl816 = {
diff --git a/drivers/staging/comedi/drivers/pcl818.c b/drivers/staging/comedi/drivers/pcl818.c
index b45a9bd..7344a53 100644
--- a/drivers/staging/comedi/drivers/pcl818.c
+++ b/drivers/staging/comedi/drivers/pcl818.c
@@ -252,8 +252,8 @@ static int pcl818_attach(struct comedi_device *dev,
static int pcl818_detach(struct comedi_device *dev);
#ifdef unused
-static int RTC_lock = 0; /* RTC lock */
-static int RTC_timer_lock = 0; /* RTC int lock */
+static int RTC_lock; /* RTC lock */
+static int RTC_timer_lock; /* RTC int lock */
#endif
struct pcl818_board {
@@ -463,9 +463,8 @@ static int pcl818_ao_insn_read(struct comedi_device *dev,
int n;
int chan = CR_CHAN(insn->chanspec);
- for (n = 0; n < insn->n; n++) {
+ for (n = 0; n < insn->n; n++)
data[n] = devpriv->ao_readback[chan];
- }
return n;
}
@@ -571,9 +570,9 @@ conv_finish:
return IRQ_HANDLED;
}
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
/* printk("E"); */
@@ -645,9 +644,9 @@ static irqreturn_t interrupt_pcl818_ai_mode13_dma(int irq, void *d)
comedi_buf_put(s->async, ptr[bufptr++] >> 4); /* get one sample */
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
@@ -805,11 +804,10 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
return IRQ_HANDLED;
}
- if (lo & 2) {
+ if (lo & 2)
len = 512;
- } else {
+ else
len = 0;
- }
for (i = 0; i < len; i++) {
lo = inb(dev->iobase + PCL818_FI_DATALO);
@@ -827,9 +825,9 @@ static irqreturn_t interrupt_pcl818_ai_mode13_fifo(int irq, void *d)
comedi_buf_put(s->async, (lo >> 4) | (inb(dev->iobase + PCL818_FI_DATAHI) << 4)); /* get one sample */
devpriv->act_chanlist_pos++;
- if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len) {
+ if (devpriv->act_chanlist_pos >= devpriv->act_chanlist_len)
devpriv->act_chanlist_pos = 0;
- }
+
s->async->cur_chan++;
if (s->async->cur_chan >= devpriv->ai_n_chan) {
s->async->cur_chan = 0;
@@ -1009,7 +1007,7 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
int divisor1 = 0, divisor2 = 0;
unsigned int seglen;
- printk("pcl818_ai_cmd_mode()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd_mode()\n");
if ((!dev->irq) && (!devpriv->dma_rtc)) {
comedi_error(dev, "IRQ not defined!");
return -EINVAL;
@@ -1112,7 +1110,7 @@ static int pcl818_ai_cmd_mode(int mode, struct comedi_device *dev,
break;
}
#endif
- printk("pcl818_ai_cmd_mode() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd_mode() end\n");
return 0;
}
@@ -1309,11 +1307,9 @@ static void setup_channel_list(struct comedi_device *dev,
*/
static int check_single_ended(unsigned int port)
{
- if (inb(port + PCL818_STATUS) & 0x20) {
+ if (inb(port + PCL818_STATUS) & 0x20)
return 1;
- } else {
- return 0;
- }
+ return 0;
}
/*
@@ -1352,9 +1348,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
- if (err) {
+ if (err)
return 1;
- }
/* step 2: make sure trigger sources are unique and mutually compatible */
@@ -1377,9 +1372,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
if (cmd->stop_src != TRIG_NONE && cmd->stop_src != TRIG_COUNT)
err++;
- if (err) {
+ if (err)
return 2;
- }
/* step 3: make sure arguments are trivially compatible */
@@ -1421,9 +1415,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
}
}
- if (err) {
+ if (err)
return 3;
- }
/* step 4: fix up any arguments */
@@ -1438,9 +1431,8 @@ static int ai_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s,
err++;
}
- if (err) {
+ if (err)
return 4;
- }
/* step 5: complain about special chanlist considerations */
@@ -1461,7 +1453,7 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
struct comedi_cmd *cmd = &s->async->cmd;
int retval;
- printk("pcl818_ai_cmd()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd()\n");
devpriv->ai_n_chan = cmd->chanlist_len;
devpriv->ai_chanlist = cmd->chanlist;
devpriv->ai_flags = cmd->flags;
@@ -1470,17 +1462,16 @@ static int ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ai_timer1 = 0;
devpriv->ai_timer2 = 0;
- if (cmd->stop_src == TRIG_COUNT) {
+ if (cmd->stop_src == TRIG_COUNT)
devpriv->ai_scans = cmd->stop_arg;
- } else {
+ else
devpriv->ai_scans = 0;
- }
if (cmd->scan_begin_src == TRIG_FOLLOW) { /* mode 1, 3 */
if (cmd->convert_src == TRIG_TIMER) { /* mode 1 */
devpriv->ai_timer1 = cmd->convert_arg;
retval = pcl818_ai_cmd_mode(1, dev, s);
- printk("pcl818_ai_cmd() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cmd() end\n");
return retval;
}
if (cmd->convert_src == TRIG_EXT) { /* mode 3 */
@@ -1499,7 +1490,7 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
if (devpriv->irq_blocked > 0) {
- printk("pcl818_ai_cancel()\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cancel()\n");
devpriv->irq_was_now_closed = 1;
switch (devpriv->ai_mode) {
@@ -1549,7 +1540,7 @@ static int pcl818_ai_cancel(struct comedi_device *dev,
}
end:
- printk("pcl818_ai_cancel() end\n");
+ dev_dbg(dev->hw_dev, "pcl818_ai_cancel() end\n");
return 0;
}
@@ -1633,11 +1624,11 @@ static int set_rtc_irq_bit(unsigned char bit)
save_flags(flags);
cli();
val = CMOS_READ(RTC_CONTROL);
- if (bit) {
+ if (bit)
val |= RTC_PIE;
- } else {
+ else
val &= ~RTC_PIE;
- }
+
CMOS_WRITE(val, RTC_CONTROL);
CMOS_READ(RTC_INTR_FLAGS);
restore_flags(flags);
@@ -1754,22 +1745,23 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
/* claim our I/O space */
iobase = it->options[0];
- printk("comedi%d: pcl818: board=%s, ioport=0x%03lx",
- dev->minor, this_board->name, iobase);
+ printk
+ ("comedi%d: pcl818: board=%s, ioport=0x%03lx",
+ dev->minor, this_board->name, iobase);
devpriv->io_range = this_board->io_range;
if ((this_board->fifo) && (it->options[2] == -1)) { /* we've board with FIFO and we want to use FIFO */
devpriv->io_range = PCLx1xFIFO_RANGE;
devpriv->usefifo = 1;
}
if (!request_region(iobase, devpriv->io_range, "pcl818")) {
- printk("I/O port conflict\n");
+ comedi_error(dev, "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
if (pcl818_check(iobase)) {
- printk(", I can't detect board. FAIL!\n");
+ comedi_error(dev, "I can't detect board. FAIL!\n");
return -EIO;
}
@@ -1793,19 +1785,18 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq);
irq = 0; /* Can't use IRQ */
} else {
- printk(", irq=%u", irq);
+ printk(KERN_DEBUG "irq=%u", irq);
}
}
}
}
dev->irq = irq;
- if (irq) {
- devpriv->irq_free = 1;
- } /* 1=we have allocated irq */
- else {
+ if (irq)
+ devpriv->irq_free = 1; /* 1=we have allocated irq */
+ else
devpriv->irq_free = 0;
- }
+
devpriv->irq_blocked = 0; /* number of subdevice which use IRQ */
devpriv->ai_mode = 0; /* mode of irq */
@@ -1825,7 +1816,7 @@ static int pcl818_attach(struct comedi_device *dev, struct comedi_devconfig *it)
"pcl818 DMA (RTC)", dev)) {
devpriv->dma_rtc = 1;
devpriv->rtc_irq = RTC_IRQ;
- printk(", dma_irq=%u", devpriv->rtc_irq);
+ printk(KERN_DEBUG "dma_irq=%u", devpriv->rtc_irq);
} else {
RTC_lock--;
if (RTC_lock == 0) {
@@ -1850,34 +1841,26 @@ no_rtc:
if (dma < 1)
goto no_dma; /* DMA disabled */
if (((1 << dma) & this_board->DMAbits) == 0) {
- printk(", DMA is out of allowed range, FAIL!\n");
+ printk(KERN_ERR "DMA is out of allowed range, FAIL!\n");
return -EINVAL; /* Bad DMA */
}
ret = request_dma(dma, "pcl818");
- if (ret) {
- printk(", unable to allocate DMA %u, FAIL!\n", dma);
+ if (ret)
return -EBUSY; /* DMA isn't free */
- }
devpriv->dma = dma;
- printk(", dma=%u", dma);
pages = 2; /* we need 16KB */
devpriv->dmabuf[0] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[0]) {
- printk(", unable to allocate DMA buffer, FAIL!\n");
+ if (!devpriv->dmabuf[0])
/* maybe experiment with try_to_free_pages() will help .... */
return -EBUSY; /* no buffer :-( */
- }
devpriv->dmapages[0] = pages;
devpriv->hwdmaptr[0] = virt_to_bus((void *)devpriv->dmabuf[0]);
devpriv->hwdmasize[0] = (1 << pages) * PAGE_SIZE;
/* printk("%d %d %ld, ",devpriv->dmapages[0],devpriv->hwdmasize[0],PAGE_SIZE); */
if (devpriv->dma_rtc == 0) { /* we must do duble buff :-( */
devpriv->dmabuf[1] = __get_dma_pages(GFP_KERNEL, pages);
- if (!devpriv->dmabuf[1]) {
- printk
- (", unable to allocate DMA buffer, FAIL!\n");
+ if (!devpriv->dmabuf[1])
return -EBUSY;
- }
devpriv->dmapages[1] = pages;
devpriv->hwdmaptr[1] =
virt_to_bus((void *)devpriv->dmabuf[1]);
@@ -2017,11 +2000,10 @@ no_dma:
}
/* select 1/10MHz oscilator */
- if ((it->options[3] == 0) || (it->options[3] == 10)) {
+ if ((it->options[3] == 0) || (it->options[3] == 10))
devpriv->i8253_osc_base = 100;
- } else {
+ else
devpriv->i8253_osc_base = 1000;
- }
/* max sampling speed */
devpriv->ns_min = this_board->ns_min;
diff --git a/drivers/staging/comedi/drivers/pcmmio.c b/drivers/staging/comedi/drivers/pcmmio.c
index 3ad04aa..eddac00 100644
--- a/drivers/staging/comedi/drivers/pcmmio.c
+++ b/drivers/staging/comedi/drivers/pcmmio.c
@@ -371,15 +371,15 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
iobase = it->options[0];
irq[0] = it->options[1];
- printk(KERN_INFO "comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
- iobase);
+ printk(KERN_INFO "comedi%d: %s: io: %lx attaching...\n", dev->minor,
+ driver.driver_name, iobase);
dev->iobase = iobase;
if (!iobase || !request_region(iobase,
thisboard->total_iosize,
driver.driver_name)) {
- printk(KERN_ERR "I/O port conflict\n");
+ printk(KERN_ERR "comedi%d: I/O port conflict\n", dev->minor);
return -EIO;
}
@@ -394,7 +394,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmmio_private)) < 0) {
- printk(KERN_ERR "cannot allocate private data structure\n");
+ printk(KERN_ERR "comedi%d: cannot allocate private data structure\n",
+ dev->minor);
return -ENOMEM;
}
@@ -417,7 +418,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
kcalloc(n_subdevs, sizeof(struct pcmmio_subdev_private),
GFP_KERNEL);
if (!devpriv->sprivs) {
- printk(KERN_ERR "cannot allocate subdevice private data structures\n");
+ printk(KERN_ERR "comedi%d: cannot allocate subdevice private data structures\n",
+ dev->minor);
return -ENOMEM;
}
/*
@@ -427,7 +429,8 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* Allocate 1 AI + 1 AO + 2 DIO subdevs (24 lines per DIO)
*/
if (alloc_subdevices(dev, n_subdevs) < 0) {
- printk(KERN_ERR "cannot allocate subdevice data structures\n");
+ printk(KERN_ERR "comedi%d: cannot allocate subdevice data structures\n",
+ dev->minor);
return -ENOMEM;
}
@@ -557,14 +560,15 @@ static int pcmmio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
*/
if (irq[0]) {
- printk(KERN_DEBUG "irq: %u ", irq[0]);
+ printk(KERN_DEBUG "comedi%d: irq: %u\n", dev->minor, irq[0]);
if (thisboard->dio_num_asics == 2 && irq[1])
- printk(KERN_DEBUG "second ASIC irq: %u ", irq[1]);
+ printk(KERN_DEBUG "comedi%d: second ASIC irq: %u\n",
+ dev->minor, irq[1]);
} else {
- printk(KERN_INFO "(IRQ mode disabled) ");
+ printk(KERN_INFO "comedi%d: (IRQ mode disabled)\n", dev->minor);
}
- printk(KERN_INFO "attached\n");
+ printk(KERN_INFO "comedi%d: attached\n", dev->minor);
return 1;
}
@@ -663,7 +667,7 @@ static int pcmmio_dio_insn_bits(struct comedi_device *dev,
}
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk(KERN_DEBUG "data_out_byte %02x\n", (unsigned)byte);
+ printk("data_out_byte %02x\n", (unsigned)byte);
#endif
/* save the digital input lines for this byte.. */
s->state |= ((unsigned int)byte) << offset;
diff --git a/drivers/staging/comedi/drivers/pcmuio.c b/drivers/staging/comedi/drivers/pcmuio.c
index b2c2c89..661ba2e 100644
--- a/drivers/staging/comedi/drivers/pcmuio.c
+++ b/drivers/staging/comedi/drivers/pcmuio.c
@@ -295,15 +295,15 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irq[0] = it->options[1];
irq[1] = it->options[2];
- printk("comedi%d: %s: io: %lx ", dev->minor, driver.driver_name,
- iobase);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: io: %lx attached\n", dev->minor,
+ driver.driver_name, iobase);
dev->iobase = iobase;
if (!iobase || !request_region(iobase,
thisboard->num_asics * ASIC_IOSIZE,
driver.driver_name)) {
- printk("I/O port conflict\n");
+ dev_err(dev->hw_dev, "I/O port conflict\n");
return -EIO;
}
@@ -318,7 +318,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcmuio_private)) < 0) {
- printk("cannot allocate private data structure\n");
+ dev_warn(dev->hw_dev, "cannot allocate private data structure\n");
return -ENOMEM;
}
@@ -337,7 +337,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
kcalloc(n_subdevs, sizeof(struct pcmuio_subdev_private),
GFP_KERNEL);
if (!devpriv->sprivs) {
- printk("cannot allocate subdevice private data structures\n");
+ dev_warn(dev->hw_dev, "cannot allocate subdevice private data structures\n");
return -ENOMEM;
}
/*
@@ -348,7 +348,7 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
* 96-channel version of the board.
*/
if (alloc_subdevices(dev, n_subdevs) < 0) {
- printk("cannot allocate subdevice data structures\n");
+ dev_dbg(dev->hw_dev, "cannot allocate subdevice data structures\n");
return -ENOMEM;
}
@@ -436,14 +436,13 @@ static int pcmuio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
irqs.. */
if (irq[0]) {
- printk("irq: %u ", irq[0]);
+ dev_dbg(dev->hw_dev, "irq: %u\n", irq[0]);
if (irq[1] && thisboard->num_asics == 2)
- printk("second ASIC irq: %u ", irq[1]);
+ dev_dbg(dev->hw_dev, "second ASIC irq: %u\n", irq[1]);
} else {
- printk("(IRQ mode disabled) ");
+ dev_dbg(dev->hw_dev, "(IRQ mode disabled)\n");
}
- printk("attached\n");
return 1;
}
@@ -460,7 +459,8 @@ static int pcmuio_detach(struct comedi_device *dev)
{
int i;
- printk("comedi%d: %s: remove\n", dev->minor, driver.driver_name);
+ dev_dbg(dev->hw_dev, "comedi%d: %s: remove\n", dev->minor,
+ driver.driver_name);
if (dev->iobase)
release_region(dev->iobase, ASIC_IOSIZE * thisboard->num_asics);
@@ -501,7 +501,8 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("write mask: %08x data: %08x\n", data[0], data[1]);
+ dev_dbg(dev->hw_dev, "write mask: %08x data: %08x\n", data[0],
+ data[1]);
#endif
s->state = 0;
@@ -537,7 +538,7 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
}
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("data_out_byte %02x\n", (unsigned)byte);
+ dev_dbg(dev->hw_dev, "data_out_byte %02x\n", (unsigned)byte);
#endif
/* save the digital input lines for this byte.. */
s->state |= ((unsigned int)byte) << offset;
@@ -548,7 +549,8 @@ static int pcmuio_dio_insn_bits(struct comedi_device *dev,
#ifdef DAMMIT_ITS_BROKEN
/* DEBUG */
- printk("s->state %08x data_out %08x\n", s->state, data[1]);
+ dev_dbg(dev->hw_dev, "s->state %08x data_out %08x\n", s->state,
+ data[1]);
#endif
return 2;
@@ -951,14 +953,13 @@ pcmuio_inttrig_start_intr(struct comedi_device *dev, struct comedi_subdevice *s,
spin_lock_irqsave(&subpriv->intr.spinlock, flags);
s->async->inttrig = 0;
- if (subpriv->intr.active) {
+ if (subpriv->intr.active)
event = pcmuio_start_intr(dev, s);
- }
+
spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
- if (event) {
+ if (event)
comedi_event(dev, s);
- }
return 1;
}
@@ -1000,9 +1001,8 @@ static int pcmuio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
}
spin_unlock_irqrestore(&subpriv->intr.spinlock, flags);
- if (event) {
+ if (event)
comedi_event(dev, s);
- }
return 0;
}
diff --git a/drivers/staging/comedi/drivers/serial2002.c b/drivers/staging/comedi/drivers/serial2002.c
index ade2202..d880c2f 100644
--- a/drivers/staging/comedi/drivers/serial2002.c
+++ b/drivers/staging/comedi/drivers/serial2002.c
@@ -824,7 +824,7 @@ static int serial2002_attach(struct comedi_device *dev,
{
struct comedi_subdevice *s;
- printk("comedi%d: serial2002: ", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: attached\n", dev->minor);
dev->board_name = thisboard->name;
if (alloc_private(dev, sizeof(struct serial2002_private)) < 0)
return -ENOMEM;
@@ -832,7 +832,8 @@ static int serial2002_attach(struct comedi_device *dev,
dev->close = serial_2002_close;
devpriv->port = it->options[0];
devpriv->speed = it->options[1];
- printk("/dev/ttyS%d @ %d\n", devpriv->port, devpriv->speed);
+ dev_dbg(dev->hw_dev, "/dev/ttyS%d @ %d\n", devpriv->port,
+ devpriv->speed);
if (alloc_subdevices(dev, 5) < 0)
return -ENOMEM;
@@ -891,7 +892,7 @@ static int serial2002_detach(struct comedi_device *dev)
struct comedi_subdevice *s;
int i;
- printk("comedi%d: serial2002: remove\n", dev->minor);
+ dev_dbg(dev->hw_dev, "comedi%d: remove\n", dev->minor);
for (i = 0; i < 5; i++) {
s = &dev->subdevices[i];
kfree(s->maxdata_list);
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 6144afb..ca6bcf8 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -1524,15 +1524,17 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
return -EFAULT;
down(&this_usbduxsub->sem);
+
if (!(this_usbduxsub->probed)) {
- up(&this_usbduxsub->sem);
- return -ENODEV;
+ ret = -ENODEV;
+ goto out;
}
if (trignum != 0) {
dev_err(&this_usbduxsub->interface->dev,
"comedi%d: usbdux_ao_inttrig: invalid trignum\n",
dev->minor);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (!(this_usbduxsub->ao_cmd_running)) {
this_usbduxsub->ao_cmd_running = 1;
@@ -1542,8 +1544,7 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
"comedi%d: usbdux_ao_inttrig: submitURB: "
"err=%d\n", dev->minor, ret);
this_usbduxsub->ao_cmd_running = 0;
- up(&this_usbduxsub->sem);
- return ret;
+ goto out;
}
s->async->inttrig = NULL;
} else {
@@ -1551,8 +1552,10 @@ static int usbdux_ao_inttrig(struct comedi_device *dev,
"comedi%d: ao_inttrig but acqu is already running.\n",
dev->minor);
}
+ ret = 1;
+out:
up(&this_usbduxsub->sem);
- return 1;
+ return ret;
}
static int usbdux_ao_cmdtest(struct comedi_device *dev,
@@ -2689,6 +2692,7 @@ static int usbduxsigma_attach(struct comedi_device *dev,
if (ret < 0) {
dev_err(&udev->interface->dev,
"comedi%d: no space for subdev\n", dev->minor);
+ up(&udev->sem);
up(&start_stop_sem);
return ret;
}
diff --git a/drivers/staging/crystalhd/bc_dts_defs.h b/drivers/staging/crystalhd/bc_dts_defs.h
index fde5b06..8cd51a7 100644
--- a/drivers/staging/crystalhd/bc_dts_defs.h
+++ b/drivers/staging/crystalhd/bc_dts_defs.h
@@ -296,43 +296,79 @@ enum {
vdecColourPrimariesSMPTE240M,
vdecColourPrimariesGenericFilm,
};
-
+/**
+ * @vdecRESOLUTION_CUSTOM: custom
+ * @vdecRESOLUTION_480i: 480i
+ * @vdecRESOLUTION_1080i: 1080i (1920x1080, 60i)
+ * @vdecRESOLUTION_NTSC: NTSC (720x483, 60i)
+ * @vdecRESOLUTION_480p: 480p (720x480, 60p)
+ * @vdecRESOLUTION_720p: 720p (1280x720, 60p)
+ * @vdecRESOLUTION_PAL1: PAL_1 (720x576, 50i)
+ * @vdecRESOLUTION_1080i25: 1080i25 (1920x1080, 50i)
+ * @vdecRESOLUTION_720p50: 720p50 (1280x720, 50p)
+ * @vdecRESOLUTION_576p: 576p (720x576, 50p)
+ * @vdecRESOLUTION_1080i29_97: 1080i (1920x1080, 59.94i)
+ * @vdecRESOLUTION_720p59_94: 720p (1280x720, 59.94p)
+ * @vdecRESOLUTION_SD_DVD: SD DVD (720x483, 60i)
+ * @vdecRESOLUTION_480p656: 480p (720x480, 60p),
+ * output bus width 8 bit, clock 74.25MHz
+ * @vdecRESOLUTION_1080p23_976: 1080p23_976 (1920x1080, 23.976p)
+ * @vdecRESOLUTION_720p23_976: 720p23_976 (1280x720p, 23.976p)
+ * @vdecRESOLUTION_240p29_97: 240p (1440x240, 29.97p )
+ * @vdecRESOLUTION_240p30: 240p (1440x240, 30p)
+ * @vdecRESOLUTION_288p25: 288p (1440x288p, 25p)
+ * @vdecRESOLUTION_1080p29_97: 1080p29_97 (1920x1080, 29.97p)
+ * @vdecRESOLUTION_1080p30: 1080p30 (1920x1080, 30p)
+ * @vdecRESOLUTION_1080p24: 1080p24 (1920x1080, 24p)
+ * @vdecRESOLUTION_1080p25: 1080p25 (1920x1080, 25p)
+ * @vdecRESOLUTION_720p24: 720p24 (1280x720, 25p)
+ * @vdecRESOLUTION_720p29_97: 720p29.97 (1280x720, 29.97p)
+ * @vdecRESOLUTION_480p23_976: 480p23.976 (720*480, 23.976)
+ * @vdecRESOLUTION_480p29_97: 480p29.976 (720*480, 29.97p)
+ * @vdecRESOLUTION_576p25: 576p25 (720*576, 25p)
+ * @vdecRESOLUTION_480p0: 480p (720x480, 0p)
+ * @vdecRESOLUTION_480i0: 480i (720x480, 0i)
+ * @vdecRESOLUTION_576p0: 576p (720x576, 0p)
+ * @vdecRESOLUTION_720p0: 720p (1280x720, 0p)
+ * @vdecRESOLUTION_1080p0: 1080p (1920x1080, 0p)
+ * @vdecRESOLUTION_1080i0: 1080i (1920x1080, 0i)
+ */
enum {
- vdecRESOLUTION_CUSTOM = 0x00000000, /* custom */
- vdecRESOLUTION_480i = 0x00000001, /* 480i */
- vdecRESOLUTION_1080i = 0x00000002, /* 1080i (1920x1080, 60i) */
- vdecRESOLUTION_NTSC = 0x00000003, /* NTSC (720x483, 60i) */
- vdecRESOLUTION_480p = 0x00000004, /* 480p (720x480, 60p) */
- vdecRESOLUTION_720p = 0x00000005, /* 720p (1280x720, 60p) */
- vdecRESOLUTION_PAL1 = 0x00000006, /* PAL_1 (720x576, 50i) */
- vdecRESOLUTION_1080i25 = 0x00000007, /* 1080i25 (1920x1080, 50i) */
- vdecRESOLUTION_720p50 = 0x00000008, /* 720p50 (1280x720, 50p) */
- vdecRESOLUTION_576p = 0x00000009, /* 576p (720x576, 50p) */
- vdecRESOLUTION_1080i29_97 = 0x0000000A, /* 1080i (1920x1080, 59.94i) */
- vdecRESOLUTION_720p59_94 = 0x0000000B, /* 720p (1280x720, 59.94p) */
- vdecRESOLUTION_SD_DVD = 0x0000000C, /* SD DVD (720x483, 60i) */
- vdecRESOLUTION_480p656 = 0x0000000D, /* 480p (720x480, 60p), output bus width 8 bit, clock 74.25MHz */
- vdecRESOLUTION_1080p23_976 = 0x0000000E, /* 1080p23_976 (1920x1080, 23.976p) */
- vdecRESOLUTION_720p23_976 = 0x0000000F, /* 720p23_976 (1280x720p, 23.976p) */
- vdecRESOLUTION_240p29_97 = 0x00000010, /* 240p (1440x240, 29.97p ) */
- vdecRESOLUTION_240p30 = 0x00000011, /* 240p (1440x240, 30p) */
- vdecRESOLUTION_288p25 = 0x00000012, /* 288p (1440x288p, 25p) */
- vdecRESOLUTION_1080p29_97 = 0x00000013, /* 1080p29_97 (1920x1080, 29.97p) */
- vdecRESOLUTION_1080p30 = 0x00000014, /* 1080p30 (1920x1080, 30p) */
- vdecRESOLUTION_1080p24 = 0x00000015, /* 1080p24 (1920x1080, 24p) */
- vdecRESOLUTION_1080p25 = 0x00000016, /* 1080p25 (1920x1080, 25p) */
- vdecRESOLUTION_720p24 = 0x00000017, /* 720p24 (1280x720, 25p) */
- vdecRESOLUTION_720p29_97 = 0x00000018, /* 720p29.97 (1280x720, 29.97p) */
- vdecRESOLUTION_480p23_976 = 0x00000019, /* 480p23.976 (720*480, 23.976) */
- vdecRESOLUTION_480p29_97 = 0x0000001A, /* 480p29.976 (720*480, 29.97p) */
- vdecRESOLUTION_576p25 = 0x0000001B, /* 576p25 (720*576, 25p) */
+ vdecRESOLUTION_CUSTOM = 0x00000000,
+ vdecRESOLUTION_480i = 0x00000001,
+ vdecRESOLUTION_1080i = 0x00000002,
+ vdecRESOLUTION_NTSC = 0x00000003,
+ vdecRESOLUTION_480p = 0x00000004,
+ vdecRESOLUTION_720p = 0x00000005,
+ vdecRESOLUTION_PAL1 = 0x00000006,
+ vdecRESOLUTION_1080i25 = 0x00000007,
+ vdecRESOLUTION_720p50 = 0x00000008,
+ vdecRESOLUTION_576p = 0x00000009,
+ vdecRESOLUTION_1080i29_97 = 0x0000000A,
+ vdecRESOLUTION_720p59_94 = 0x0000000B,
+ vdecRESOLUTION_SD_DVD = 0x0000000C,
+ vdecRESOLUTION_480p656 = 0x0000000D,
+ vdecRESOLUTION_1080p23_976 = 0x0000000E,
+ vdecRESOLUTION_720p23_976 = 0x0000000F,
+ vdecRESOLUTION_240p29_97 = 0x00000010,
+ vdecRESOLUTION_240p30 = 0x00000011,
+ vdecRESOLUTION_288p25 = 0x00000012,
+ vdecRESOLUTION_1080p29_97 = 0x00000013,
+ vdecRESOLUTION_1080p30 = 0x00000014,
+ vdecRESOLUTION_1080p24 = 0x00000015,
+ vdecRESOLUTION_1080p25 = 0x00000016,
+ vdecRESOLUTION_720p24 = 0x00000017,
+ vdecRESOLUTION_720p29_97 = 0x00000018,
+ vdecRESOLUTION_480p23_976 = 0x00000019,
+ vdecRESOLUTION_480p29_97 = 0x0000001A,
+ vdecRESOLUTION_576p25 = 0x0000001B,
/* For Zero Frame Rate */
- vdecRESOLUTION_480p0 = 0x0000001C, /* 480p (720x480, 0p) */
- vdecRESOLUTION_480i0 = 0x0000001D, /* 480i (720x480, 0i) */
- vdecRESOLUTION_576p0 = 0x0000001E, /* 576p (720x576, 0p) */
- vdecRESOLUTION_720p0 = 0x0000001F, /* 720p (1280x720, 0p) */
- vdecRESOLUTION_1080p0 = 0x00000020, /* 1080p (1920x1080, 0p) */
- vdecRESOLUTION_1080i0 = 0x00000021, /* 1080i (1920x1080, 0i) */
+ vdecRESOLUTION_480p0 = 0x0000001C,
+ vdecRESOLUTION_480i0 = 0x0000001D,
+ vdecRESOLUTION_576p0 = 0x0000001E,
+ vdecRESOLUTION_720p0 = 0x0000001F,
+ vdecRESOLUTION_1080p0 = 0x00000020,
+ vdecRESOLUTION_1080i0 = 0x00000021,
};
/* Bit definitions for 'flags' field */
@@ -359,14 +395,23 @@ enum _BC_OUTPUT_FORMAT {
MODE422_YUY2 = 0x1,
MODE422_UYVY = 0x2,
};
-
+/**
+ * struct BC_PIC_INFO_BLOCK
+ * @timeStam;: Timestamp
+ * @picture_number: Ordinal display number
+ * @width: pixels
+ * @height: pixels
+ * @chroma_format: 0x420, 0x422 or 0x444
+ * @n_drop;: number of non-reference frames
+ * remaining to be dropped
+ */
struct BC_PIC_INFO_BLOCK {
/* Common fields. */
- uint64_t timeStamp; /* Timestamp */
- uint32_t picture_number; /* Ordinal display number */
- uint32_t width; /* pixels */
- uint32_t height; /* pixels */
- uint32_t chroma_format; /* 0x420, 0x422 or 0x444 */
+ uint64_t timeStamp;
+ uint32_t picture_number;
+ uint32_t width;
+ uint32_t height;
+ uint32_t chroma_format;
uint32_t pulldown;
uint32_t flags;
uint32_t frame_rate;
@@ -376,7 +421,8 @@ struct BC_PIC_INFO_BLOCK {
uint32_t sess_num;
uint32_t ycom;
uint32_t custom_aspect_ratio_width_height;
- uint32_t n_drop; /* number of non-reference frames remaining to be dropped */
+ uint32_t n_drop; /* number of non-reference frames
+ remaining to be dropped */
/* Protocol-specific extensions. */
union {
@@ -390,43 +436,71 @@ struct BC_PIC_INFO_BLOCK {
/*------------------------------------------------------*
* ProcOut Info *
*------------------------------------------------------*/
-/* Optional flags for ProcOut Interface.*/
+
+/**
+ * enum POUT_OPTIONAL_IN_FLAGS - Optional flags for ProcOut Interface.
+ * @BC_POUT_FLAGS_YV12: Copy Data in YV12 format
+ * @BC_POUT_FLAGS_STRIDE: Stride size is valid.
+ * @BC_POUT_FLAGS_SIZE: Take size information from Application
+ * @BC_POUT_FLAGS_INTERLACED: copy only half the bytes
+ * @BC_POUT_FLAGS_INTERLEAVED: interleaved frame
+ * @: * @BC_POUT_FLAGS_FMT_CHANGE: Data is not VALID when this flag is set
+ * @BC_POUT_FLAGS_PIB_VALID: PIB Information valid
+ * @BC_POUT_FLAGS_ENCRYPTED: Data is encrypted.
+ * @BC_POUT_FLAGS_FLD_BOT: Bottom Field data
+ */
enum POUT_OPTIONAL_IN_FLAGS_ {
/* Flags from App to Device */
- BC_POUT_FLAGS_YV12 = 0x01, /* Copy Data in YV12 format */
- BC_POUT_FLAGS_STRIDE = 0x02, /* Stride size is valid. */
- BC_POUT_FLAGS_SIZE = 0x04, /* Take size information from Application */
- BC_POUT_FLAGS_INTERLACED = 0x08, /* copy only half the bytes */
- BC_POUT_FLAGS_INTERLEAVED = 0x10, /* interleaved frame */
+ BC_POUT_FLAGS_YV12 = 0x01,
+ BC_POUT_FLAGS_STRIDE = 0x02,
+ BC_POUT_FLAGS_SIZE = 0x04,
+ BC_POUT_FLAGS_INTERLACED = 0x08,
+ BC_POUT_FLAGS_INTERLEAVED = 0x10,
/* Flags from Device to APP */
- BC_POUT_FLAGS_FMT_CHANGE = 0x10000, /* Data is not VALID when this flag is set */
- BC_POUT_FLAGS_PIB_VALID = 0x20000, /* PIB Information valid */
- BC_POUT_FLAGS_ENCRYPTED = 0x40000, /* Data is encrypted. */
- BC_POUT_FLAGS_FLD_BOT = 0x80000, /* Bottom Field data */
+ BC_POUT_FLAGS_FMT_CHANGE = 0x10000,
+ BC_POUT_FLAGS_PIB_VALID = 0x20000,
+ BC_POUT_FLAGS_ENCRYPTED = 0x40000,
+ BC_POUT_FLAGS_FLD_BOT = 0x80000,
};
-typedef enum BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut);
+typedef enum BC_STATUS(*dts_pout_callback)(void *shnd, uint32_t width,
+ uint32_t height, uint32_t stride, void *pOut);
/* Line 21 Closed Caption */
/* User Data */
#define MAX_UD_SIZE 1792 /* 1920 - 128 */
+/**
+ * struct BC_DTS_PROC_OUT
+ * @Ybuff: Caller Supplied buffer for Y data
+ * @YbuffSz: Caller Supplied Y buffer size
+ * @YBuffDoneSz: Transferred Y datasize
+ * @*UVbuff: Caller Supplied buffer for UV data
+ * @UVbuffSz: Caller Supplied UV buffer size
+ * @UVBuffDoneSz: Transferred UV data size
+ * @StrideSz: Caller supplied Stride Size
+ * @PoutFlags: Call IN Flags
+ * @discCnt: Picture discontinuity count
+ * @PicInfo: Picture Information Block Data
+ * @b422Mode: Picture output Mode
+ * @bPibEnc: PIB encrypted
+ */
struct BC_DTS_PROC_OUT {
- uint8_t *Ybuff; /* Caller Supplied buffer for Y data */
- uint32_t YbuffSz; /* Caller Supplied Y buffer size */
- uint32_t YBuffDoneSz; /* Transferred Y datasize */
+ uint8_t *Ybuff;
+ uint32_t YbuffSz;
+ uint32_t YBuffDoneSz;
- uint8_t *UVbuff; /* Caller Supplied buffer for UV data */
- uint32_t UVbuffSz; /* Caller Supplied UV buffer size */
- uint32_t UVBuffDoneSz; /* Transferred UV data size */
+ uint8_t *UVbuff;
+ uint32_t UVbuffSz;
+ uint32_t UVBuffDoneSz;
- uint32_t StrideSz; /* Caller supplied Stride Size */
- uint32_t PoutFlags; /* Call IN Flags */
+ uint32_t StrideSz;
+ uint32_t PoutFlags;
- uint32_t discCnt; /* Picture discontinuity count */
+ uint32_t discCnt;
- struct BC_PIC_INFO_BLOCK PicInfo; /* Picture Information Block Data */
+ struct BC_PIC_INFO_BLOCK PicInfo;
/* Line 21 Closed Caption */
/* User Data */
@@ -436,39 +510,47 @@ struct BC_DTS_PROC_OUT {
void *hnd;
dts_pout_callback AppCallBack;
uint8_t DropFrames;
- uint8_t b422Mode; /* Picture output Mode */
- uint8_t bPibEnc; /* PIB encrypted */
+ uint8_t b422Mode;
+ uint8_t bPibEnc;
uint8_t bRevertScramble;
};
-
+/**
+ * struct BC_DTS_STATUS
+ * @ReadyListCount: Number of frames in ready list (reported by driver)
+ * @PowerStateChange: Number of active state power
+ * transitions (reported by driver)
+ * @FramesDropped: Number of frames dropped. (reported by DIL)
+ * @FramesCaptured: Number of frames captured. (reported by DIL)
+ * @FramesRepeated: Number of frames repeated. (reported by DIL)
+ * @InputCount: Times compressed video has been sent to the HW.
+ * i.e. Successful DtsProcInput() calls (reported by DIL)
+ * @InputTotalSize: Amount of compressed video that has been sent to the HW.
+ * (reported by DIL)
+ * @InputBusyCount: Times compressed video has attempted to be sent to the HW
+ * but the input FIFO was full. (reported by DIL)
+ * @PIBMissCount: Amount of times a PIB is invalid. (reported by DIL)
+ * @cpbEmptySize: supported only for H.264, specifically changed for
+ * Adobe. Report size of CPB buffer available. (reported by DIL)
+ * @NextTimeStamp: TimeStamp of the next picture that will be returned
+ * by a call to ProcOutput. Added for Adobe. Reported
+ * back from the driver
+ */
struct BC_DTS_STATUS {
- uint8_t ReadyListCount; /* Number of frames in ready list (reported by driver) */
- uint8_t FreeListCount; /* Number of frame buffers free. (reported by driver) */
- uint8_t PowerStateChange; /* Number of active state power transitions (reported by driver) */
+ uint8_t ReadyListCount;
+ uint8_t FreeListCount;
+ uint8_t PowerStateChange;
uint8_t reserved_[1];
-
- uint32_t FramesDropped; /* Number of frames dropped. (reported by DIL) */
- uint32_t FramesCaptured; /* Number of frames captured. (reported by DIL) */
- uint32_t FramesRepeated; /* Number of frames repeated. (reported by DIL) */
-
- uint32_t InputCount; /* Times compressed video has been sent to the HW.
- * i.e. Successful DtsProcInput() calls (reported by DIL) */
- uint64_t InputTotalSize; /* Amount of compressed video that has been sent to the HW.
- * (reported by DIL) */
- uint32_t InputBusyCount; /* Times compressed video has attempted to be sent to the HW
- * but the input FIFO was full. (reported by DIL) */
-
- uint32_t PIBMissCount; /* Amount of times a PIB is invalid. (reported by DIL) */
-
- uint32_t cpbEmptySize; /* supported only for H.264, specifically changed for
- * Adobe. Report size of CPB buffer available.
- * Reported by DIL */
- uint64_t NextTimeStamp; /* TimeStamp of the next picture that will be returned
- * by a call to ProcOutput. Added for Adobe. Reported
- * back from the driver */
+ uint32_t FramesDropped;
+ uint32_t FramesCaptured;
+ uint32_t FramesRepeated;
+ uint32_t InputCount;
+ uint64_t InputTotalSize;
+ uint32_t InputBusyCount;
+ uint32_t PIBMissCount;
+ uint32_t cpbEmptySize;
+ uint64_t NextTimeStamp;
uint8_t reserved__[16];
-
};
#define BC_SWAP32(_v) \
diff --git a/drivers/staging/cxt1e1/comet.h b/drivers/staging/cxt1e1/comet.h
index 5cb3afd..e06da4a 100644
--- a/drivers/staging/cxt1e1/comet.h
+++ b/drivers/staging/cxt1e1/comet.h
@@ -1,7 +1,3 @@
-/*
- * $Id: comet.h,v 1.3 2005/09/28 00:10:07 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_COMET_H_
#define _INC_COMET_H_
@@ -23,27 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.3 $
- * Last changed on $Date: 2005/09/28 00:10:07 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet.h,v $
- * Revision 1.3 2005/09/28 00:10:07 rickd
- * Add RCS header. Switch to structure usage.
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-#if defined(__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-
#define VINT32 volatile u_int32_t
diff --git a/drivers/staging/cxt1e1/comet_tables.c b/drivers/staging/cxt1e1/comet_tables.c
index db1293c..8493111 100644
--- a/drivers/staging/cxt1e1/comet_tables.c
+++ b/drivers/staging/cxt1e1/comet_tables.c
@@ -1,7 +1,3 @@
-/*
- * $Id: comet_tables.c,v 1.2 2005/10/17 23:55:27 rickd PMCC4_3_1B $
- */
-
/*-----------------------------------------------------------------------------
* comet_tables.c - waveform tables for the PM4351 'COMET'
*
@@ -20,28 +16,8 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2005/10/17 23:55:27 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet_tables.c,v $
- * Revision 1.2 2005/10/17 23:55:27 rickd
- * Note that 75 Ohm transmit waveform is not supported on PMCC4.
- *
- * Revision 1.1 2005/09/28 00:10:05 rickd
- * Cosmetic alignment of tables for readability.
- *
- * Revision 1.0 2005/05/10 22:47:53 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-char SBEid_pmcc4_comet_tblc[] =
- "@(#)comet_tables.c - $Revision: 1.2 $ (c) Copyright 2004-2005 SBE, Inc.";
-
-
#include <linux/types.h>
/*****************************************************************************
diff --git a/drivers/staging/cxt1e1/comet_tables.h b/drivers/staging/cxt1e1/comet_tables.h
index 80424a2..3e2e5ba 100644
--- a/drivers/staging/cxt1e1/comet_tables.h
+++ b/drivers/staging/cxt1e1/comet_tables.h
@@ -1,7 +1,3 @@
-/*
- * $Id: comet_tables.h,v 1.5 2006/01/02 22:37:31 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_COMET_TBLS_H_
#define _INC_COMET_TBLS_H_
@@ -23,26 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.5 $
- * Last changed on $Date: 2006/01/02 22:37:31 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: comet_tables.h,v $
- * Revision 1.5 2006/01/02 22:37:31 rickd
- * Double indexed arrays need sizings to avoid CC errors under
- * gcc 4.0.0
- *
- * Revision 1.4 2005/10/17 23:55:28 rickd
- * The 75 Ohm transmit waveform is not supported on PMCC4.
- *
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU License info. Structures moved to -C- file.
- *
- * Revision 1.2 2005/04/28 23:43:04 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/libsbew.h b/drivers/staging/cxt1e1/libsbew.h
index 5c99646..4254c04 100644
--- a/drivers/staging/cxt1e1/libsbew.h
+++ b/drivers/staging/cxt1e1/libsbew.h
@@ -1,7 +1,3 @@
-/*
- * $Id: libsbew.h,v 2.1 2005/10/27 18:54:19 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_LIBSBEW_H_
#define _INC_LIBSBEW_H_
@@ -25,32 +21,8 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.1 $
- * Last changed on $Date: 2005/10/27 18:54:19 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: libsbew.h,v $
- * Revision 2.1 2005/10/27 18:54:19 rickd
- * Add E1PLAIN support.
- *
- * Revision 2.0 2005/09/28 00:10:08 rickd
- * Customized for PMCC4 comet-per-port design.
- *
- * Revision 1.15 2005/03/29 00:51:31 rickd
- * File imported from C1T3 port, Revision 1.15
- *-----------------------------------------------------------------------------
*/
-#ifndef __KERNEL__
-#include <sys/types.h>
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
/********************************/
/** set driver logging level **/
/********************************/
@@ -323,7 +295,7 @@ struct sbecom_port_param
#define CFG_CH_DINV_TX 0x02
-/* Posssible resettable chipsets/functions */
+/* Possible resettable chipsets/functions */
#define RESET_DEV_TEMUX 1
#define RESET_DEV_TECT3 RESET_DEV_TEMUX
#define RESET_DEV_PLL 2
@@ -574,8 +546,4 @@ struct sbecom_port_param
extern int wancfg_set_tsioc (wcfg_t *, struct wanc1t3_ts_param *);
#endif
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_LIBSBEW_H_ ***/
diff --git a/drivers/staging/cxt1e1/musycc.c b/drivers/staging/cxt1e1/musycc.c
index 5cc3423..90c0f1e 100644
--- a/drivers/staging/cxt1e1/musycc.c
+++ b/drivers/staging/cxt1e1/musycc.c
@@ -1,7 +1,3 @@
-/*
- * $Id: musycc.c,v 2.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
- */
-
unsigned int max_intcnt = 0;
unsigned int max_bh = 0;
@@ -24,53 +20,8 @@ unsigned int max_bh = 0;
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.1 $
- * Last changed on $Date: 2007/08/15 23:32:17 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: musycc.c,v $
- * Revision 2.1 2007/08/15 23:32:17 rickd
- * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
- *
- * Revision 2.0 2007/08/15 22:13:20 rickd
- * Update to printf pointer %p usage and correct some UINT to ULONG for
- * 64bit comptibility.
- *
- * Revision 1.7 2006/04/21 00:56:40 rickd
- * workqueue files now prefixed with <sbecom> prefix.
- *
- * Revision 1.6 2005/10/27 18:54:19 rickd
- * Clean out old code. Default to HDLC_FCS16, not TRANS.
- *
- * Revision 1.5 2005/10/17 23:55:28 rickd
- * Initial port of NCOMM support patches from original work found
- * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
- *
- * Revision 1.4 2005/10/13 20:35:25 rickd
- * Cleanup warning for unused <flags> variable.
- *
- * Revision 1.3 2005/10/13 19:19:22 rickd
- * Disable redundant driver removal cleanup code.
- *
- * Revision 1.2 2005/10/11 18:36:16 rickd
- * Clean up warning messages caused by de-implemented some <flags> associated
- * with spin_lock() removals.
- *
- * Revision 1.1 2005/10/05 00:45:28 rickd
- * Re-enable xmit on flow-controlled and full channel to fix restart hang.
- * Add some temp spin-lock debug code (rld_spin_owner).
- *
- * Revision 1.0 2005/09/28 00:10:06 rickd
- * Initial release for C4T1E1 support. Lots of transparent
- * mode updates.
- *
- *-----------------------------------------------------------------------------
*/
-char SBEid_pmcc4_musyccc[] =
-"@(#)musycc.c - $Revision: 2.1 $ (c) Copyright 2004-2006 SBE, Inc.";
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/types.h>
diff --git a/drivers/staging/cxt1e1/musycc.h b/drivers/staging/cxt1e1/musycc.h
index 68f3660..cf6b54e 100644
--- a/drivers/staging/cxt1e1/musycc.h
+++ b/drivers/staging/cxt1e1/musycc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: musycc.h,v 1.3 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_MUSYCC_H_
#define _INC_MUSYCC_H_
@@ -24,36 +20,13 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.3 $
- * Last changed on $Date: 2005/09/28 00:10:08 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: musycc.h,v $
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU license info. Add PMCC4 PCI/DevIDs. Implement new
- * musycc reg&bits namings. Use PORTMAP_0 GCD grouping.
- *
- * Revision 1.2 2005/04/28 23:43:04 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
#define VINT8 volatile u_int8_t
#define VINT32 volatile u_int32_t
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
#include "pmcc4_defs.h"
@@ -448,10 +421,6 @@ extern "C"
/* This must be defined on an entire channel group (Port) basis */
#define SUERM_THRESHOLD 0x1f
-#ifdef __cplusplus
-}
-#endif
-
#undef VINT32
#undef VINT8
diff --git a/drivers/staging/cxt1e1/ossiRelease.c b/drivers/staging/cxt1e1/ossiRelease.c
index a560298..f17a902 100644
--- a/drivers/staging/cxt1e1/ossiRelease.c
+++ b/drivers/staging/cxt1e1/ossiRelease.c
@@ -1,7 +1,3 @@
-/*
- * $Id: ossiRelease.c,v 1.2 2008/05/08 20:14:03 rdobbs PMCC4_3_1B $
- */
-
/*-----------------------------------------------------------------------------
* ossiRelease.c -
*
@@ -26,14 +22,8 @@
* One Stop Systems, Inc. Escondido, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2008/05/08 20:14:03 $
- * Changed by $Author: rdobbs $
- *-----------------------------------------------------------------------------
*/
-
char pmcc4_OSSI_release[] = "$Release: PMCC4_3_1B, Copyright (c) 2008 One Stop Systems$";
/*** End-of-File ***/
diff --git a/drivers/staging/cxt1e1/pmc93x6_eeprom.h b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
index c3ada87..96c48cb 100644
--- a/drivers/staging/cxt1e1/pmc93x6_eeprom.h
+++ b/drivers/staging/cxt1e1/pmc93x6_eeprom.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmc93x6_eeprom.h,v 1.1 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMC93X6_EEPROM_H_
#define _INC_PMC93X6_EEPROM_H_
@@ -23,26 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- *-----------------------------------------------------------------------------
- * $Log: pmc93x6_eeprom.h,v $
- * Revision 1.1 2005/09/28 00:10:08 rickd
- * pmc_verify_cksum return value is char.
- *
- * Revision 1.0 2005/05/04 17:20:51 rickd
- * Initial revision
- *
- * Revision 1.0 2005/04/22 23:48:48 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
#ifdef __KERNEL__
diff --git a/drivers/staging/cxt1e1/pmcc4.h b/drivers/staging/cxt1e1/pmcc4.h
index e046b87..b0ed4ad 100644
--- a/drivers/staging/cxt1e1/pmcc4.h
+++ b/drivers/staging/cxt1e1/pmcc4.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4.h,v 1.4 2005/11/01 19:24:48 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_H_
#define _INC_PMCC4_H_
@@ -23,49 +19,15 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.4 $
- * Last changed on $Date: 2005/11/01 19:24:48 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4.h,v $
- * Revision 1.4 2005/11/01 19:24:48 rickd
- * Remove de-implement function prototypes. Several <int> to
- * <status_t> changes for consistent usage of same.
- *
- * Revision 1.3 2005/09/28 00:10:08 rickd
- * Add GNU license info. Use config params from libsbew.h
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
-
-#if defined(__FreeBSD__) || defined(__NetBSD__)
-#include <sys/types.h>
-#else
-#ifndef __KERNEL__
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-#endif
-
-
typedef int status_t;
#define SBE_DRVR_FAIL 0
#define SBE_DRVR_SUCCESS 1
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
/********************/
/* PMCC4 memory Map */
/********************/
@@ -105,10 +67,6 @@ extern "C"
#define sbeE1errSMF 0x02
#define sbeE1CRC 0x01
-#ifdef __cplusplus
-}
-#endif
-
#ifdef __KERNEL__
/*
diff --git a/drivers/staging/cxt1e1/pmcc4_cpld.h b/drivers/staging/cxt1e1/pmcc4_cpld.h
index 6d8f033..a51209b 100644
--- a/drivers/staging/cxt1e1/pmcc4_cpld.h
+++ b/drivers/staging/cxt1e1/pmcc4_cpld.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4_cpld.h,v 1.0 2005/09/28 00:10:08 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_CPLD_H_
#define _INC_PMCC4_CPLD_H_
@@ -23,34 +19,9 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:08 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_cpld.h,v $
- * Revision 1.0 2005/09/28 00:10:08 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
-
-#if defined(__FreeBSD__) || defined(__NetBSD__)
-#include <sys/types.h>
-#else
-#ifndef __KERNEL__
-#include <sys/types.h>
-#else
#include <linux/types.h>
-#endif
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
/********************************/
/* iSPLD control chip registers */
@@ -117,8 +88,4 @@ extern "C"
#define PMCC4_CPLD_INTR_CMT_3 0x04
#define PMCC4_CPLD_INTR_CMT_4 0x08
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _INC_PMCC4_CPLD_H_ */
diff --git a/drivers/staging/cxt1e1/pmcc4_defs.h b/drivers/staging/cxt1e1/pmcc4_defs.h
index a39505f..83ceae4 100644
--- a/drivers/staging/cxt1e1/pmcc4_defs.h
+++ b/drivers/staging/cxt1e1/pmcc4_defs.h
@@ -1,7 +1,3 @@
-/*
- * $Id: pmcc4_defs.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_DEFS_H_
#define _INC_PMCC4_DEFS_H_
@@ -25,16 +21,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_defs.h,v $
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/pmcc4_drv.c b/drivers/staging/cxt1e1/pmcc4_drv.c
index e1f07fa..8a7b3a6 100644
--- a/drivers/staging/cxt1e1/pmcc4_drv.c
+++ b/drivers/staging/cxt1e1/pmcc4_drv.c
@@ -1,8 +1,3 @@
-/*
- * $Id: pmcc4_drv.c,v 3.1 2007/08/15 23:32:17 rickd PMCC4_3_1B $
- */
-
-
/*-----------------------------------------------------------------------------
* pmcc4_drv.c -
*
@@ -22,74 +17,10 @@
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 3.1 $
- * Last changed on $Date: 2007/08/15 23:32:17 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_drv.c,v $
- * Revision 3.1 2007/08/15 23:32:17 rickd
- * Use 'if 0' instead of GNU comment delimeter to avoid line wrap induced compiler errors.
- *
- * Revision 3.0 2007/08/15 22:19:55 rickd
- * Correct sizeof() castings and pi->regram to support 64bit compatibility.
- *
- * Revision 2.10 2006/04/21 00:56:40 rickd
- * workqueue files now prefixed with <sbecom> prefix.
- *
- * Revision 2.9 2005/11/01 19:22:49 rickd
- * Add sanity checks against max_port for ioctl functions.
- *
- * Revision 2.8 2005/10/27 18:59:25 rickd
- * Code cleanup. Default channel config to HDLC_FCS16.
- *
- * Revision 2.7 2005/10/18 18:16:30 rickd
- * Further NCOMM code repairs - (1) interrupt matrix usage inconsistent
- * for indexing into nciInterrupt[][], code missing double parameters.
- * (2) check input of ncomm interrupt registration cardID for correct
- * boundary values.
- *
- * Revision 2.6 2005/10/17 23:55:28 rickd
- * Initial port of NCOMM support patches from original work found
- * in pmc_c4t1e1 as updated by NCOMM. Ref: CONFIG_SBE_PMCC4_NCOMM.
- * Corrected NCOMMs wanpmcC4T1E1_getBaseAddress() to correctly handle
- * multiple boards.
- *
- * Revision 2.5 2005/10/13 23:01:28 rickd
- * Correct panic for illegal address reference w/in get_brdinfo on
- * first_if/last_if name acquistion under Linux 2.6
- *
- * Revision 2.4 2005/10/13 21:20:19 rickd
- * Correction of c4_cleanup() wherein next should be acquired before
- * ci_t structure is free'd.
- *
- * Revision 2.3 2005/10/13 19:20:10 rickd
- * Correct driver removal cleanup code for multiple boards.
- *
- * Revision 2.2 2005/10/11 18:34:04 rickd
- * New routine added to determine number of ports (comets) on board.
- *
- * Revision 2.1 2005/10/05 00:48:13 rickd
- * Add some RX activation trace code.
- *
- * Revision 2.0 2005/09/28 00:10:06 rickd
- * Implement 2.6 workqueue for TX/RX restart. Correction to
- * hardware register boundary checks allows expanded access of MUSYCC.
- * Implement new musycc reg&bits namings.
- *
- *-----------------------------------------------------------------------------
*/
-char OSSIid_pmcc4_drvc[] =
-"@(#)pmcc4_drv.c - $Revision: 3.1 $ (c) Copyright 2002-2007 One Stop Systems, Inc.";
-
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#if defined (__FreeBSD__) || defined (__NetBSD__)
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/errno.h>
-#else
#include <linux/types.h>
#include "pmcc4_sysdep.h"
#include <linux/errno.h>
@@ -98,7 +29,6 @@ char OSSIid_pmcc4_drvc[] =
#include <linux/timer.h> /* include for timer */
#include <linux/hdlc.h>
#include <asm/io.h>
-#endif
#include "sbecom_inline_linux.h"
#include "libsbew.h"
diff --git a/drivers/staging/cxt1e1/pmcc4_ioctls.h b/drivers/staging/cxt1e1/pmcc4_ioctls.h
index 6b8d656..56a1ee3 100644
--- a/drivers/staging/cxt1e1/pmcc4_ioctls.h
+++ b/drivers/staging/cxt1e1/pmcc4_ioctls.h
@@ -1,6 +1,3 @@
-/* RCSid: $Header: /home/rickd/projects/pmcc4/include/pmcc4_ioctls.h,v 2.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_PMCC4_IOCTLS_H_
#define _INC_PMCC4_IOCTLS_H_
@@ -22,19 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: pmcc4_ioctls.h,v $
- * Revision 2.0 2005/09/28 00:10:09 rickd
- * Add GNU license info. Switch Ioctls to sbe_ioc.h usage.
- *
- * Revision 1.2 2005/04/28 23:43:03 rickd
- * Add RCS tracking heading.
- *
- *-----------------------------------------------------------------------------
*/
#include "sbew_ioc.h"
diff --git a/drivers/staging/cxt1e1/sbe_bid.h b/drivers/staging/cxt1e1/sbe_bid.h
index 1f49b40..abc2e55f 100644
--- a/drivers/staging/cxt1e1/sbe_bid.h
+++ b/drivers/staging/cxt1e1/sbe_bid.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbe_bid.h,v 1.0 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEBID_H_
#define _INC_SBEBID_H_
@@ -24,16 +20,6 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbe_bid.h,v $
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
#define SBE_BID_REG 0x00000000 /* Board ID Register */
diff --git a/drivers/staging/cxt1e1/sbe_promformat.h b/drivers/staging/cxt1e1/sbe_promformat.h
index 746f81b..aad411d 100644
--- a/drivers/staging/cxt1e1/sbe_promformat.h
+++ b/drivers/staging/cxt1e1/sbe_promformat.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbe_promformat.h,v 2.2 2005/09/28 00:10:09 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBE_PROMFORMAT_H_
#define _INC_SBE_PROMFORMAT_H_
@@ -24,19 +20,6 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 2.2 $
- * Last changed on $Date: 2005/09/28 00:10:09 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbe_promformat.h,v $
- * Revision 2.2 2005/09/28 00:10:09 rickd
- * Add EEPROM sample from C4T1E1 board.
- *
- * Revision 2.1 2005/05/04 17:18:24 rickd
- * Initial CI.
- *
- *-----------------------------------------------------------------------------
*/
@@ -85,12 +68,6 @@
*
*/
-#ifdef __cplusplus
-extern "C"
-{
-#endif
-
-
#define STRUCT_OFFSET(type, symbol) ((long)&(((type *)0)->symbol))
/*------------------------------------------------------------------------
@@ -150,8 +127,4 @@ extern "C"
FLD_TYPE2 fldType2;
} PROMFORMAT;
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_SBE_PROMFORMAT_H_ ***/
diff --git a/drivers/staging/cxt1e1/sbecom_inline_linux.h b/drivers/staging/cxt1e1/sbecom_inline_linux.h
index 9ea2c0c..68ed445 100644
--- a/drivers/staging/cxt1e1/sbecom_inline_linux.h
+++ b/drivers/staging/cxt1e1/sbecom_inline_linux.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbecom_inline_linux.h,v 1.2 2007/08/15 22:51:35 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBECOM_INLNX_H_
#define _INC_SBECOM_INLNX_H_
@@ -24,22 +20,6 @@
* For further information, contact via email: support@onestopsystems.com
* One Stop Systems, Inc. Escondido, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2007/08/15 22:51:35 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbecom_inline_linux.h,v $
- * Revision 1.2 2007/08/15 22:51:35 rickd
- * Remove duplicate version.h entry.
- *
- * Revision 1.1 2007/08/15 22:50:29 rickd
- * Update linux/config for 2.6.18 and later.
- *
- * Revision 1.0 2005/09/28 00:10:09 rickd
- * Initial revision
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/sbeproc.h b/drivers/staging/cxt1e1/sbeproc.h
index 4aa53f4..e82be6a 100644
--- a/drivers/staging/cxt1e1/sbeproc.h
+++ b/drivers/staging/cxt1e1/sbeproc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbeproc.h,v 1.2 2005/10/17 23:55:28 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEPROC_H_
#define _INC_SBEPROC_H_
@@ -23,22 +19,6 @@
* For further information, contact via email: support@sbei.com
* SBE, Inc. San Ramon, California U.S.A.
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.2 $
- * Last changed on $Date: 2005/10/17 23:55:28 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbeproc.h,v $
- * Revision 1.2 2005/10/17 23:55:28 rickd
- * sbecom_proc_brd_init() is an declared an __init function.
- *
- * Revision 1.1 2005/09/28 00:10:09 rickd
- * Remove unneeded inclusion of c4_private.h.
- *
- * Revision 1.0 2005/05/10 22:21:46 rickd
- * Initial check-in.
- *
- *-----------------------------------------------------------------------------
*/
diff --git a/drivers/staging/cxt1e1/sbew_ioc.h b/drivers/staging/cxt1e1/sbew_ioc.h
index 14d3719..ce9b15c 100644
--- a/drivers/staging/cxt1e1/sbew_ioc.h
+++ b/drivers/staging/cxt1e1/sbew_ioc.h
@@ -1,7 +1,3 @@
-/*
- * $Id: sbew_ioc.h,v 1.0 2005/09/28 00:10:10 rickd PMCC4_3_1B $
- */
-
#ifndef _INC_SBEWIOC_H_
#define _INC_SBEWIOC_H_
@@ -24,55 +20,9 @@
* SBE, Inc. San Ramon, California U.S.A.
*
*-----------------------------------------------------------------------------
- * RCS info:
- * RCS revision: $Revision: 1.0 $
- * Last changed on $Date: 2005/09/28 00:10:10 $
- * Changed by $Author: rickd $
- *-----------------------------------------------------------------------------
- * $Log: sbew_ioc.h,v $
- * Revision 1.0 2005/09/28 00:10:10 rickd
- * Initial revision
- *
- * Revision 1.6 2005/01/11 18:41:01 rickd
- * Add BRDADDR_GET Ioctl.
- *
- * Revision 1.5 2004/09/16 18:55:59 rickd
- * Start setting up for generic framer configuration Ioctl by switch
- * from tect3_framer_param[] to sbecom_framer_param[].
- *
- * Revision 1.4 2004/06/28 17:58:15 rickd
- * Rename IOC_TSMAP_[GS] to IOC_TSIOC_[GS] to support need for
- * multiple formats of data when setting up TimeSlots.
- *
- * Revision 1.3 2004/06/22 21:18:13 rickd
- * read_vec now() ONLY handles a single common wrt_vec array.
- *
- * Revision 1.1 2004/06/10 18:11:34 rickd
- * Add IID_GET Ioctl reference.
- *
- * Revision 1.0 2004/06/08 22:59:38 rickd
- * Initial revision
- *
- * Revision 2.0 2004/06/07 17:49:47 rickd
- * Initial library release following merge of wanc1t3/wan256 into
- * common elements for lib.
- *
- *-----------------------------------------------------------------------------
*/
-#ifndef __KERNEL__
-#include <sys/types.h>
-#endif
-#ifdef SunOS
-#include <sys/ioccom.h>
-#else
#include <linux/ioctl.h>
-#endif
-
-#ifdef __cplusplus
-extern "C"
-{
-#endif
#define SBE_LOCKFILE "/tmp/.sbewan.LCK"
@@ -128,9 +78,4 @@ extern "C"
#define SBE_IOC_MAXVEC 1
-
-#ifdef __cplusplus
-}
-#endif
-
#endif /*** _INC_SBEWIOC_H_ ***/
diff --git a/drivers/staging/et131x/et131x.c b/drivers/staging/et131x/et131x.c
index 0c1c6ca..2c4069f 100644
--- a/drivers/staging/et131x/et131x.c
+++ b/drivers/staging/et131x/et131x.c
@@ -155,7 +155,6 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
#define fMP_ADAPTER_FAIL_SEND_MASK 0x3ff00000
/* Some offsets in PCI config space that are actually used. */
-#define ET1310_PCI_MAX_PYLD 0x4C
#define ET1310_PCI_MAC_ADDRESS 0xA4
#define ET1310_PCI_EEPROM_STATUS 0xB2
#define ET1310_PCI_ACK_NACK 0xC0
@@ -178,9 +177,7 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
/* RX defines */
#define USE_FBR0 1
-
#define FBR_CHUNKS 32
-
#define MAX_DESC_PER_RING_RX 1024
/* number of RFDs - default and min */
@@ -195,7 +192,6 @@ MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver "
#endif
#define NIC_MIN_NUM_RFD 64
-
#define NUM_PACKETS_HANDLED 256
#define ALCATEL_MULTICAST_PKT 0x01000000
@@ -303,8 +299,8 @@ struct fbr_lookup {
dma_addr_t ring_physaddr;
void *mem_virtaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
dma_addr_t mem_physaddrs[MAX_DESC_PER_RING_RX / FBR_CHUNKS];
- uint64_t real_physaddr;
- uint64_t offset;
+ u64 real_physaddr;
+ u64 offset;
u32 local_full;
u32 num_entries;
u32 buffsize;
@@ -427,7 +423,6 @@ struct tx_ring {
int since_irq;
};
-/* ADAPTER defines */
/*
* Do not change these values: if changed, then change also in respective
* TXdma and Rxdma engines
@@ -576,8 +571,6 @@ struct et131x_adapter {
struct net_device_stats net_stats;
};
-/* EEPROM functions */
-
static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status)
{
u32 reg;
@@ -795,7 +788,7 @@ static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata)
return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0;
}
-int et131x_init_eeprom(struct et131x_adapter *adapter)
+static int et131x_init_eeprom(struct et131x_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
u8 eestatus;
@@ -868,7 +861,7 @@ int et131x_init_eeprom(struct et131x_adapter *adapter)
* et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310.
* @adapter: pointer to our adapter structure
*/
-void et131x_rx_dma_enable(struct et131x_adapter *adapter)
+static void et131x_rx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the receive dma configuration register for normal operation */
u32 csr = 0x2000; /* FBR1 enable */
@@ -906,7 +899,7 @@ void et131x_rx_dma_enable(struct et131x_adapter *adapter)
* et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
-void et131x_rx_dma_disable(struct et131x_adapter *adapter)
+static void et131x_rx_dma_disable(struct et131x_adapter *adapter)
{
u32 csr;
/* Setup the receive dma configuration register */
@@ -928,7 +921,7 @@ void et131x_rx_dma_disable(struct et131x_adapter *adapter)
*
* Mainly used after a return to the D0 (full-power) state from a lower state.
*/
-void et131x_tx_dma_enable(struct et131x_adapter *adapter)
+static void et131x_tx_dma_enable(struct et131x_adapter *adapter)
{
/* Setup the transmit dma configuration register for normal
* operation
@@ -948,23 +941,10 @@ static inline void add_12bit(u32 *v, int n)
}
/**
- * nic_rx_pkts - Checks the hardware for available packets
- * @adapter: pointer to our adapter
- *
- * Returns rfd, a pointer to our MPRFD.
- *
- * Checks the hardware for available packets, using completion ring
- * If packets are available, it gets an RFD from the recv_list, attaches
- * the packet to it, puts the RFD in the RecvPendList, and also returns
- * the pointer to the RFD.
- */
-/* MAC functions */
-
-/**
* et1310_config_mac_regs1 - Initialize the first part of MAC regs
* @adapter: pointer to our adapter structure
*/
-void et1310_config_mac_regs1(struct et131x_adapter *adapter)
+static void et1310_config_mac_regs1(struct et131x_adapter *adapter)
{
struct mac_regs __iomem *macregs = &adapter->regs->mac;
u32 station1;
@@ -1024,7 +1004,7 @@ void et1310_config_mac_regs1(struct et131x_adapter *adapter)
* et1310_config_mac_regs2 - Initialize the second part of MAC regs
* @adapter: pointer to our adapter structure
*/
-void et1310_config_mac_regs2(struct et131x_adapter *adapter)
+static void et1310_config_mac_regs2(struct et131x_adapter *adapter)
{
int32_t delay = 0;
struct mac_regs __iomem *mac = &adapter->regs->mac;
@@ -1105,7 +1085,7 @@ void et1310_config_mac_regs2(struct et131x_adapter *adapter)
*
* Returns 0 if the device is not in phy coma, 1 if it is in phy coma
*/
-int et1310_in_phy_coma(struct et131x_adapter *adapter)
+static int et1310_in_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -1114,15 +1094,13 @@ int et1310_in_phy_coma(struct et131x_adapter *adapter)
return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0;
}
-void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
+static void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
- uint32_t nIndex;
- uint32_t result;
- uint32_t hash1 = 0;
- uint32_t hash2 = 0;
- uint32_t hash3 = 0;
- uint32_t hash4 = 0;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ u32 hash3 = 0;
+ u32 hash4 = 0;
u32 pm_csr;
/* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision
@@ -1131,10 +1109,13 @@ void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
* driver.
*/
if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) {
+ int i;
+
/* Loop through our multicast array and set up the device */
- for (nIndex = 0; nIndex < adapter->multicast_addr_count;
- nIndex++) {
- result = ether_crc(6, adapter->multicast_list[nIndex]);
+ for (i = 0; i < adapter->multicast_addr_count; i++) {
+ u32 result;
+
+ result = ether_crc(6, adapter->multicast_list[i]);
result = (result & 0x3F800000) >> 23;
@@ -1163,7 +1144,7 @@ void et1310_setup_device_for_multicast(struct et131x_adapter *adapter)
}
}
-void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
+static void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
u32 uni_pf1;
@@ -1203,7 +1184,7 @@ void et1310_setup_device_for_unicast(struct et131x_adapter *adapter)
}
}
-void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
+static void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
{
struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac;
struct phy_device *phydev = adapter->phydev;
@@ -1334,7 +1315,7 @@ void et1310_config_rxmac_regs(struct et131x_adapter *adapter)
writel(0x9, &rxmac->ctrl);
}
-void et1310_config_txmac_regs(struct et131x_adapter *adapter)
+static void et1310_config_txmac_regs(struct et131x_adapter *adapter)
{
struct txmac_regs __iomem *txmac = &adapter->regs->txmac;
@@ -1348,7 +1329,7 @@ void et1310_config_txmac_regs(struct et131x_adapter *adapter)
writel(0x40, &txmac->cf_param);
}
-void et1310_config_macstat_regs(struct et131x_adapter *adapter)
+static void et1310_config_macstat_regs(struct et131x_adapter *adapter)
{
struct macstat_regs __iomem *macstat =
&adapter->regs->macstat;
@@ -1422,7 +1403,7 @@ void et1310_config_macstat_regs(struct et131x_adapter *adapter)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
+static int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
u8 reg, u16 *value)
{
struct mac_regs __iomem *mac = &adapter->regs->mac;
@@ -1478,7 +1459,7 @@ int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr,
return status;
}
-int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
+static int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
{
struct phy_device *phydev = adapter->phydev;
@@ -1498,7 +1479,7 @@ int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value)
*
* Return 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
+static int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
{
struct mac_regs __iomem *mac = &adapter->regs->mac;
struct phy_device *phydev = adapter->phydev;
@@ -1564,8 +1545,9 @@ int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value)
}
/* Still used from _mac for BIT_READ */
-void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
- u16 regnum, u16 bitnum, u8 *value)
+static void et1310_phy_access_mii_bit(struct et131x_adapter *adapter,
+ u16 action, u16 regnum, u16 bitnum,
+ u8 *value)
{
u16 reg;
u16 mask = 0x0001 << bitnum;
@@ -1591,7 +1573,7 @@ void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action,
}
}
-void et1310_config_flow_control(struct et131x_adapter *adapter)
+static void et1310_config_flow_control(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
@@ -1632,7 +1614,7 @@ void et1310_config_flow_control(struct et131x_adapter *adapter)
* et1310_update_macstat_host_counters - Update the local copy of the statistics
* @adapter: pointer to the adapter structure
*/
-void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
+static void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
{
struct ce_stats *stats = &adapter->stats;
struct macstat_regs __iomem *macstat =
@@ -1664,7 +1646,7 @@ void et1310_update_macstat_host_counters(struct et131x_adapter *adapter)
* the statistics held in the adapter structure, checking the "wrap"
* bit for each counter.
*/
-void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
+static void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
{
u32 carry_reg1;
u32 carry_reg2;
@@ -1714,9 +1696,7 @@ void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter)
adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT;
}
-/* PHY functions */
-
-int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
+static int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1731,7 +1711,7 @@ int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg)
return value;
}
-int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
+static int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1739,7 +1719,7 @@ int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value)
return et131x_mii_write(adapter, reg, value);
}
-int et131x_mdio_reset(struct mii_bus *bus)
+static int et131x_mdio_reset(struct mii_bus *bus)
{
struct net_device *netdev = bus->priv;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -1759,7 +1739,7 @@ int et131x_mdio_reset(struct mii_bus *bus)
* Can't you see that this code processed
* Phy power, phy power..
*/
-void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
+static void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
{
u16 data;
@@ -1775,7 +1755,7 @@ void et1310_phy_power_down(struct et131x_adapter *adapter, bool down)
* @adapter: pointer to our private adapter structure
*
*/
-void et131x_xcvr_init(struct et131x_adapter *adapter)
+static void et131x_xcvr_init(struct et131x_adapter *adapter)
{
u16 imr;
u16 isr;
@@ -1822,7 +1802,7 @@ void et131x_xcvr_init(struct et131x_adapter *adapter)
*
* Used to configure the global registers on the JAGCore
*/
-void et131x_configure_global_regs(struct et131x_adapter *adapter)
+static void et131x_configure_global_regs(struct et131x_adapter *adapter)
{
struct global_regs __iomem *regs = &adapter->regs->global;
@@ -1863,13 +1843,11 @@ void et131x_configure_global_regs(struct et131x_adapter *adapter)
writel(0, &regs->watchdog_timer);
}
-/* PM functions */
-
/**
* et131x_config_rx_dma_regs - Start of Rx_DMA init sequence
* @adapter: pointer to our adapter structure
*/
-void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
+static void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
{
struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma;
struct rx_ring *rx_local = &adapter->rx_ring;
@@ -1987,7 +1965,7 @@ void et131x_config_rx_dma_regs(struct et131x_adapter *adapter)
* Configure the transmit engine with the ring buffers we have created
* and prepare it for use.
*/
-void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
+static void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
{
struct txdma_regs __iomem *txdma = &adapter->regs->txdma;
@@ -2017,7 +1995,7 @@ void et131x_config_tx_dma_regs(struct et131x_adapter *adapter)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-void et131x_adapter_setup(struct et131x_adapter *adapter)
+static void et131x_adapter_setup(struct et131x_adapter *adapter)
{
/* Configure the JAGCore */
et131x_configure_global_regs(adapter);
@@ -2044,7 +2022,7 @@ void et131x_adapter_setup(struct et131x_adapter *adapter)
* et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310
* @adapter: pointer to our private adapter structure
*/
-void et131x_soft_reset(struct et131x_adapter *adapter)
+static void et131x_soft_reset(struct et131x_adapter *adapter)
{
/* Disable MAC Core */
writel(0xc00f0000, &adapter->regs->mac.cfg1);
@@ -2062,7 +2040,7 @@ void et131x_soft_reset(struct et131x_adapter *adapter)
* Enable the appropriate interrupts on the ET131x according to our
* configuration
*/
-void et131x_enable_interrupts(struct et131x_adapter *adapter)
+static void et131x_enable_interrupts(struct et131x_adapter *adapter)
{
u32 mask;
@@ -2082,7 +2060,7 @@ void et131x_enable_interrupts(struct et131x_adapter *adapter)
*
* Block all interrupts from the et131x device at the device itself
*/
-void et131x_disable_interrupts(struct et131x_adapter *adapter)
+static void et131x_disable_interrupts(struct et131x_adapter *adapter)
{
/* Disable all global interrupts */
writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask);
@@ -2092,7 +2070,7 @@ void et131x_disable_interrupts(struct et131x_adapter *adapter)
* et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310
* @adapter: pointer to our adapter structure
*/
-void et131x_tx_dma_disable(struct et131x_adapter *adapter)
+static void et131x_tx_dma_disable(struct et131x_adapter *adapter)
{
/* Setup the tramsmit dma configuration register */
writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT,
@@ -2103,7 +2081,7 @@ void et131x_tx_dma_disable(struct et131x_adapter *adapter)
* et131x_enable_txrx - Enable tx/rx queues
* @netdev: device to be enabled
*/
-void et131x_enable_txrx(struct net_device *netdev)
+static void et131x_enable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2123,7 +2101,7 @@ void et131x_enable_txrx(struct net_device *netdev)
* et131x_disable_txrx - Disable tx/rx queues
* @netdev: device to be disabled
*/
-void et131x_disable_txrx(struct net_device *netdev)
+static void et131x_disable_txrx(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -2142,7 +2120,7 @@ void et131x_disable_txrx(struct net_device *netdev)
* et131x_init_send - Initialize send data structures
* @adapter: pointer to our private adapter structure
*/
-void et131x_init_send(struct et131x_adapter *adapter)
+static void et131x_init_send(struct et131x_adapter *adapter)
{
struct tcb *tcb;
u32 ct;
@@ -2192,7 +2170,7 @@ void et131x_init_send(struct et131x_adapter *adapter)
* indicating linkup status, call the MPDisablePhyComa routine to
* restore JAGCore and gigE PHY
*/
-void et1310_enable_phy_coma(struct et131x_adapter *adapter)
+static void et1310_enable_phy_coma(struct et131x_adapter *adapter)
{
unsigned long flags;
u32 pmcsr;
@@ -2231,7 +2209,7 @@ void et1310_enable_phy_coma(struct et131x_adapter *adapter)
* et1310_disable_phy_coma - Disable the Phy Coma Mode
* @adapter: pointer to our adapter structure
*/
-void et1310_disable_phy_coma(struct et131x_adapter *adapter)
+static void et1310_disable_phy_coma(struct et131x_adapter *adapter)
{
u32 pmcsr;
@@ -2269,8 +2247,6 @@ void et1310_disable_phy_coma(struct et131x_adapter *adapter)
et131x_enable_txrx(adapter->netdev);
}
-/* RX functions */
-
static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
{
u32 tmp_free_buff_ring = *free_buff_ring;
@@ -2296,16 +2272,14 @@ static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit)
* @offset: pointer to the offset variable
* @mask: correct mask
*/
-void et131x_align_allocated_memory(struct et131x_adapter *adapter,
- uint64_t *phys_addr,
- uint64_t *offset, uint64_t mask)
+static void et131x_align_allocated_memory(struct et131x_adapter *adapter,
+ u64 *phys_addr, u64 *offset,
+ u64 mask)
{
- uint64_t new_addr;
+ u64 new_addr = *phys_addr & ~mask;
*offset = 0;
- new_addr = *phys_addr & ~mask;
-
if (new_addr != *phys_addr) {
/* Move to next aligned block */
new_addr += mask + 1;
@@ -2325,7 +2299,7 @@ void et131x_align_allocated_memory(struct et131x_adapter *adapter,
* Allocates Free buffer ring 1 for sure, free buffer ring 0 if required,
* and the Packet Status Ring.
*/
-int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
{
u32 i, j;
u32 bufsize;
@@ -2454,8 +2428,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
rx_ring->fbr[1]->offset);
#endif
for (i = 0; i < (rx_ring->fbr[0]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr1_offset;
u64 fbr1_tmp_physaddr;
+ u64 fbr1_offset;
u32 fbr1_align;
/* This code allocates an area of memory big enough for N
@@ -2520,8 +2494,8 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
#ifdef USE_FBR0
/* Same for FBR0 (if in use) */
for (i = 0; i < (rx_ring->fbr[1]->num_entries / FBR_CHUNKS); i++) {
- u64 fbr0_offset;
u64 fbr0_tmp_physaddr;
+ u64 fbr0_offset;
fbr_chunksize =
((FBR_CHUNKS + 1) * rx_ring->fbr[1]->buffsize) - 1;
@@ -2629,7 +2603,7 @@ int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter)
* et131x_rx_dma_memory_free - Free all memory allocated within this module.
* @adapter: pointer to our private adapter structure
*/
-void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
+static void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
{
u32 index;
u32 bufsize;
@@ -2774,7 +2748,7 @@ void et131x_rx_dma_memory_free(struct et131x_adapter *adapter)
*
* Returns 0 on success and errno on failure (as defined in errno.h)
*/
-int et131x_init_recv(struct et131x_adapter *adapter)
+static int et131x_init_recv(struct et131x_adapter *adapter)
{
int status = -ENOMEM;
struct rfd *rfd = NULL;
@@ -2824,7 +2798,7 @@ int et131x_init_recv(struct et131x_adapter *adapter)
* et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate.
* @adapter: pointer to our adapter structure
*/
-void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
+static void et131x_set_rx_dma_timer(struct et131x_adapter *adapter)
{
struct phy_device *phydev = adapter->phydev;
@@ -2918,6 +2892,17 @@ static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd)
WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd);
}
+/**
+ * nic_rx_pkts - Checks the hardware for available packets
+ * @adapter: pointer to our adapter
+ *
+ * Returns rfd, a pointer to our MPRFD.
+ *
+ * Checks the hardware for available packets, using completion ring
+ * If packets are available, it gets an RFD from the recv_list, attaches
+ * the packet to it, puts the RFD in the RecvPendList, and also returns
+ * the pointer to the RFD.
+ */
static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
{
struct rx_ring *rx_local = &adapter->rx_ring;
@@ -3139,7 +3124,7 @@ static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter)
*
* Assumption, Rcv spinlock has been acquired.
*/
-void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
+static void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
{
struct rfd *rfd = NULL;
u32 count = 0;
@@ -3188,8 +3173,6 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
adapter->rx_ring.unfinished_receives = false;
}
-/* TX functions */
-
/**
* et131x_tx_dma_memory_alloc
* @adapter: pointer to our private adapter structure
@@ -3202,7 +3185,7 @@ void et131x_handle_recv_interrupt(struct et131x_adapter *adapter)
* memory. The device will update the "status" in memory each time it xmits a
* packet.
*/
-int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
{
int desc_size = 0;
struct tx_ring *tx_ring = &adapter->tx_ring;
@@ -3256,7 +3239,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
*
* Returns 0 on success and errno on failure (as defined in errno.h).
*/
-void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
+static void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
{
int desc_size = 0;
@@ -3578,7 +3561,7 @@ static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter)
*
* Return 0 in almost all cases; non-zero value in extreme hard failure only
*/
-int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
+static int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
{
int status = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -3696,7 +3679,7 @@ static inline void free_send_packet(struct et131x_adapter *adapter,
*
* Assumption - Send spinlock has been acquired
*/
-void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
+static void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
{
struct tcb *tcb;
unsigned long flags;
@@ -3743,7 +3726,7 @@ void et131x_free_busy_send_packets(struct et131x_adapter *adapter)
*
* Assumption - Send spinlock has been acquired
*/
-void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
+static void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
{
unsigned long flags;
u32 serviced;
@@ -3798,8 +3781,6 @@ void et131x_handle_send_interrupt(struct et131x_adapter *adapter)
spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags);
}
-/* ETHTOOL functions */
-
static int et131x_get_settings(struct net_device *netdev,
struct ethtool_cmd *cmd)
{
@@ -3965,18 +3946,16 @@ static struct ethtool_ops et131x_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
-void et131x_set_ethtool_ops(struct net_device *netdev)
+static void et131x_set_ethtool_ops(struct net_device *netdev)
{
SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops);
}
-/* PCI functions */
-
/**
* et131x_hwaddr_init - set up the MAC Address on the ET1310
* @adapter: pointer to our private adapter structure
*/
-void et131x_hwaddr_init(struct et131x_adapter *adapter)
+static void et131x_hwaddr_init(struct et131x_adapter *adapter)
{
/* If have our default mac from init and no mac address from
* EEPROM then we need to generate the last octet and set it on the
@@ -4022,24 +4001,31 @@ void et131x_hwaddr_init(struct et131x_adapter *adapter)
static int et131x_pci_init(struct et131x_adapter *adapter,
struct pci_dev *pdev)
{
- int i;
- u8 max_payload;
- u8 read_size_reg;
+ int cap = pci_pcie_cap(pdev);
+ u16 max_payload;
+ u16 ctl;
+ int i, rc;
- if (et131x_init_eeprom(adapter) < 0)
- return -EIO;
+ rc = et131x_init_eeprom(adapter);
+ if (rc < 0)
+ goto out;
+ if (!cap) {
+ dev_err(&pdev->dev, "Missing PCIe capabilities\n");
+ goto err_out;
+ }
+
/* Let's set up the PORT LOGIC Register. First we need to know what
* the max_payload_size is
*/
- if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) {
+ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCAP, &max_payload)) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max Payload Size\n");
- return -EIO;
+ goto err_out;
}
/* Program the Ack/Nak latency and replay timers */
- max_payload &= 0x07; /* Only the lower 3 bits are valid */
+ max_payload &= 0x07;
if (max_payload < 2) {
static const u16 acknak[2] = { 0x76, 0xD0 };
@@ -4049,13 +4035,13 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
acknak[max_payload])) {
dev_err(&pdev->dev,
"Could not write PCI config space for ACK/NAK\n");
- return -EIO;
+ goto err_out;
}
if (pci_write_config_word(pdev, ET1310_PCI_REPLAY,
replay[max_payload])) {
dev_err(&pdev->dev,
"Could not write PCI config space for Replay Timer\n");
- return -EIO;
+ goto err_out;
}
}
@@ -4065,23 +4051,22 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) {
dev_err(&pdev->dev,
"Could not write PCI config space for Latency Timers\n");
- return -EIO;
+ goto err_out;
}
/* Change the max read size to 2k */
- if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) {
+ if (pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl)) {
dev_err(&pdev->dev,
"Could not read PCI config space for Max read size\n");
- return -EIO;
+ goto err_out;
}
- read_size_reg &= 0x8f;
- read_size_reg |= 0x40;
+ ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | ( 0x04 << 12);
- if (pci_write_config_byte(pdev, 0x51, read_size_reg)) {
+ if (pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl)) {
dev_err(&pdev->dev,
"Could not write PCI config space for Max read size\n");
- return -EIO;
+ goto err_out;
}
/* Get MAC address from config space if an eeprom exists, otherwise
@@ -4096,11 +4081,15 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i,
adapter->rom_addr + i)) {
dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n");
- return -EIO;
+ goto err_out;
}
}
memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN);
- return 0;
+out:
+ return rc;
+err_out:
+ rc = -EIO;
+ goto out;
}
/**
@@ -4110,7 +4099,7 @@ static int et131x_pci_init(struct et131x_adapter *adapter,
* The routine called when the error timer expires, to track the number of
* recurring errors.
*/
-void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(unsigned long data)
{
struct et131x_adapter *adapter = (struct et131x_adapter *) data;
struct phy_device *phydev = adapter->phydev;
@@ -4153,7 +4142,7 @@ void et131x_error_timer_handler(unsigned long data)
*
* Allocate all the memory blocks for send, receive and others.
*/
-int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
+static int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
{
int status;
@@ -4188,7 +4177,7 @@ int et131x_adapter_memory_alloc(struct et131x_adapter *adapter)
* et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx
* @adapter: pointer to our private adapter structure
*/
-void et131x_adapter_memory_free(struct et131x_adapter *adapter)
+static void et131x_adapter_memory_free(struct et131x_adapter *adapter)
{
/* Free DMA memory */
et131x_tx_dma_memory_free(adapter);
@@ -4364,10 +4353,6 @@ static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev,
adapter->pdev = pci_dev_get(pdev);
adapter->netdev = netdev;
- /* Do the same for the netdev struct */
- netdev->irq = pdev->irq;
- netdev->base_addr = pci_resource_start(pdev, 0);
-
/* Initialize spinlocks here */
spin_lock_init(&adapter->lock);
spin_lock_init(&adapter->tcb_send_qlock);
@@ -4400,6 +4385,7 @@ static void __devexit et131x_pci_remove(struct pci_dev *pdev)
struct et131x_adapter *adapter = netdev_priv(netdev);
unregister_netdev(netdev);
+ phy_disconnect(adapter->phydev);
mdiobus_unregister(adapter->mii_bus);
kfree(adapter->mii_bus->irq);
mdiobus_free(adapter->mii_bus);
@@ -4417,7 +4403,7 @@ static void __devexit et131x_pci_remove(struct pci_dev *pdev)
* et131x_up - Bring up a device for use.
* @netdev: device to be opened
*/
-void et131x_up(struct net_device *netdev)
+static void et131x_up(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4429,7 +4415,7 @@ void et131x_up(struct net_device *netdev)
* et131x_down - Bring down the device
* @netdev: device to be broght down
*/
-void et131x_down(struct net_device *netdev)
+static void et131x_down(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
@@ -4475,8 +4461,6 @@ static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume);
#define ET131X_PM_OPS NULL
#endif
-/* ISR functions */
-
/**
* et131x_isr - The Interrupt Service Routine for the driver.
* @irq: the IRQ on which the interrupt was received.
@@ -4573,7 +4557,7 @@ out:
* scheduled to run in a deferred context by the ISR. This is where the ISR's
* work actually gets done.
*/
-void et131x_isr_handler(struct work_struct *work)
+static void et131x_isr_handler(struct work_struct *work)
{
struct et131x_adapter *adapter =
container_of(work, struct et131x_adapter, task);
@@ -4774,8 +4758,6 @@ void et131x_isr_handler(struct work_struct *work)
et131x_enable_interrupts(adapter);
}
-/* NETDEV functions */
-
/**
* et131x_stats - Return the current device statistics.
* @netdev: device whose stats are being queried
@@ -4829,10 +4811,12 @@ static struct net_device_stats *et131x_stats(struct net_device *netdev)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_open(struct net_device *netdev)
+static int et131x_open(struct net_device *netdev)
{
- int result = 0;
struct et131x_adapter *adapter = netdev_priv(netdev);
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned int irq = pdev->irq;
+ int result;
/* Start the timer to track NIC errors */
init_timer(&adapter->error_timer);
@@ -4841,12 +4825,9 @@ int et131x_open(struct net_device *netdev)
adapter->error_timer.data = (unsigned long)adapter;
add_timer(&adapter->error_timer);
- /* Register our IRQ */
- result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED,
- netdev->name, netdev);
+ result = request_irq(irq, et131x_isr, IRQF_SHARED, netdev->name, netdev);
if (result) {
- dev_err(&adapter->pdev->dev, "could not register IRQ %d\n",
- netdev->irq);
+ dev_err(&pdev->dev, "could not register IRQ %d\n", irq);
return result;
}
@@ -4863,14 +4844,14 @@ int et131x_open(struct net_device *netdev)
*
* Returns 0 on success, errno on failure (as defined in errno.h)
*/
-int et131x_close(struct net_device *netdev)
+static int et131x_close(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
et131x_down(netdev);
adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE;
- free_irq(netdev->irq, netdev);
+ free_irq(adapter->pdev->irq, netdev);
/* Stop the error timer */
return del_timer_sync(&adapter->error_timer);
@@ -4905,8 +4886,8 @@ static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf,
*/
static int et131x_set_packet_filter(struct et131x_adapter *adapter)
{
+ int filter = adapter->packet_filter;
int status = 0;
- uint32_t filter = adapter->packet_filter;
u32 ctrl;
u32 pf_ctrl;
@@ -4968,7 +4949,7 @@ static int et131x_set_packet_filter(struct et131x_adapter *adapter)
static void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
- uint32_t packet_filter = 0;
+ int packet_filter;
unsigned long flags;
struct netdev_hw_addr *ha;
int i;
@@ -5249,40 +5230,6 @@ static const struct net_device_ops et131x_netdev_ops = {
};
/**
- * et131x_device_alloc
- *
- * Returns pointer to the allocated and initialized net_device struct for
- * this device.
- *
- * Create instances of net_device and wl_private for the new adapter and
- * register the device's entry points in the net_device structure.
- */
-struct net_device *et131x_device_alloc(void)
-{
- struct net_device *netdev;
-
- /* Alloc net_device and adapter structs */
- netdev = alloc_etherdev(sizeof(struct et131x_adapter));
-
- if (!netdev) {
- printk(KERN_ERR "et131x: Alloc of net_device struct failed\n");
- return NULL;
- }
-
- /*
- * Setup the function registration table (and other data) for a
- * net_device
- */
- netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
- netdev->netdev_ops = &et131x_netdev_ops;
-
- /* Poll? */
- /* netdev->poll = &et131x_poll; */
- /* netdev->poll_controller = &et131x_poll_controller; */
- return netdev;
-}
-
-/**
* et131x_pci_setup - Perform device initialization
* @pdev: a pointer to the device's pci_dev structure
* @ent: this device's entry in the pci_device_id table
@@ -5297,24 +5244,26 @@ struct net_device *et131x_device_alloc(void)
static int __devinit et131x_pci_setup(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- int result;
struct net_device *netdev;
struct et131x_adapter *adapter;
+ int rc;
int ii;
- result = pci_enable_device(pdev);
- if (result) {
+ rc = pci_enable_device(pdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "pci_enable_device() failed\n");
- goto err_out;
+ goto out;
}
/* Perform some basic PCI checks */
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Can't find PCI device's base address\n");
+ rc = -ENODEV;
goto err_disable;
}
- if (pci_request_regions(pdev, DRIVER_NAME)) {
+ rc = pci_request_regions(pdev, DRIVER_NAME);
+ if (rc < 0) {
dev_err(&pdev->dev, "Can't get PCI resources\n");
goto err_disable;
}
@@ -5323,46 +5272,50 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
/* Check the DMA addressing support of this device */
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
- if (result) {
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+ if (rc < 0) {
dev_err(&pdev->dev,
"Unable to obtain 64 bit DMA for consistent allocations\n");
goto err_release_res;
}
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- result = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
- if (result) {
+ rc = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc < 0) {
dev_err(&pdev->dev,
"Unable to obtain 32 bit DMA for consistent allocations\n");
goto err_release_res;
}
} else {
dev_err(&pdev->dev, "No usable DMA addressing method\n");
- result = -EIO;
+ rc = -EIO;
goto err_release_res;
}
/* Allocate netdev and private adapter structs */
- netdev = et131x_device_alloc();
+ netdev = alloc_etherdev(sizeof(struct et131x_adapter));
if (!netdev) {
dev_err(&pdev->dev, "Couldn't alloc netdev struct\n");
- result = -ENOMEM;
+ rc = -ENOMEM;
goto err_release_res;
}
+ netdev->watchdog_timeo = ET131X_TX_TIMEOUT;
+ netdev->netdev_ops = &et131x_netdev_ops;
+
SET_NETDEV_DEV(netdev, &pdev->dev);
et131x_set_ethtool_ops(netdev);
adapter = et131x_adapter_init(netdev, pdev);
- /* Initialise the PCI setup for the device */
- et131x_pci_init(adapter, pdev);
+ rc = et131x_pci_init(adapter, pdev);
+ if (rc < 0)
+ goto err_free_dev;
/* Map the bus-relative registers to system virtual memory */
adapter->regs = pci_ioremap_bar(pdev, 0);
if (!adapter->regs) {
dev_err(&pdev->dev, "Cannot map device registers\n");
- result = -ENOMEM;
+ rc = -ENOMEM;
goto err_free_dev;
}
@@ -5376,8 +5329,8 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
et131x_disable_interrupts(adapter);
/* Allocate DMA memory */
- result = et131x_adapter_memory_alloc(adapter);
- if (result) {
+ rc = et131x_adapter_memory_alloc(adapter);
+ if (rc < 0) {
dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n");
goto err_iounmap;
}
@@ -5395,6 +5348,8 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
adapter->boot_coma = 0;
et1310_disable_phy_coma(adapter);
+ rc = -ENOMEM;
+
/* Setup the mii_bus struct */
adapter->mii_bus = mdiobus_alloc();
if (!adapter->mii_bus) {
@@ -5418,13 +5373,14 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
for (ii = 0; ii < PHY_MAX_ADDR; ii++)
adapter->mii_bus->irq[ii] = PHY_POLL;
- if (mdiobus_register(adapter->mii_bus)) {
+ rc = mdiobus_register(adapter->mii_bus);
+ if (rc < 0) {
dev_err(&pdev->dev, "failed to register MII bus\n");
- mdiobus_free(adapter->mii_bus);
goto err_mdio_free_irq;
}
- if (et131x_mii_probe(netdev)) {
+ rc = et131x_mii_probe(netdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "failed to probe MII bus\n");
goto err_mdio_unregister;
}
@@ -5440,10 +5396,10 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
*/
/* Register the net_device struct with the Linux network layer */
- result = register_netdev(netdev);
- if (result != 0) {
+ rc = register_netdev(netdev);
+ if (rc < 0) {
dev_err(&pdev->dev, "register_netdev() failed\n");
- goto err_mdio_unregister;
+ goto err_phy_disconnect;
}
/* Register the net_device struct with the PCI subsystem. Save a copy
@@ -5451,10 +5407,11 @@ static int __devinit et131x_pci_setup(struct pci_dev *pdev,
* been initialized, just in case it needs to be quickly restored.
*/
pci_set_drvdata(pdev, netdev);
- pci_save_state(adapter->pdev);
-
- return result;
+out:
+ return rc;
+err_phy_disconnect:
+ phy_disconnect(adapter->phydev);
err_mdio_unregister:
mdiobus_unregister(adapter->mii_bus);
err_mdio_free_irq:
@@ -5472,8 +5429,7 @@ err_release_res:
pci_release_regions(pdev);
err_disable:
pci_disable_device(pdev);
-err_out:
- return result;
+ goto out;
}
static DEFINE_PCI_DEVICE_TABLE(et131x_pci_table) = {
diff --git a/drivers/staging/frontier/alphatrack.c b/drivers/staging/frontier/alphatrack.c
index 2babb03..d8efed6 100644
--- a/drivers/staging/frontier/alphatrack.c
+++ b/drivers/staging/frontier/alphatrack.c
@@ -867,30 +867,4 @@ static struct usb_driver usb_alphatrack_driver = {
.id_table = usb_alphatrack_table,
};
-/**
- * usb_alphatrack_init
- */
-static int __init usb_alphatrack_init(void)
-{
- int retval;
-
- /* register this driver with the USB subsystem */
- retval = usb_register(&usb_alphatrack_driver);
- if (retval)
- err("usb_register failed for the " __FILE__
- " driver. Error number %d\n", retval);
-
- return retval;
-}
-
-/**
- * usb_alphatrack_exit
- */
-static void __exit usb_alphatrack_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&usb_alphatrack_driver);
-}
-
-module_init(usb_alphatrack_init);
-module_exit(usb_alphatrack_exit);
+module_usb_driver(usb_alphatrack_driver);
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index 8894ab1..cf47a5d 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -199,7 +199,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
struct usb_interface *intf = to_usb_interface(dev); \
struct usb_tranzport *t = usb_get_intfdata(intf); \
unsigned long temp; \
- if (strict_strtoul(buf, 10, &temp)) \
+ if (kstrtoul(buf, 10, &temp)) \
return -EINVAL; \
t->value = temp; \
return count; \
@@ -971,29 +971,4 @@ static struct usb_driver usb_tranzport_driver = {
.id_table = usb_tranzport_table,
};
-/**
- * usb_tranzport_init
- */
-static int __init usb_tranzport_init(void)
-{
- int retval;
-
- /* register this driver with the USB subsystem */
- retval = usb_register(&usb_tranzport_driver);
- if (retval)
- err("usb_register failed for the " __FILE__
- " driver. Error number %d\n", retval);
- return retval;
-}
-/**
- * usb_tranzport_exit
- */
-
-static void __exit usb_tranzport_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&usb_tranzport_driver);
-}
-
-module_init(usb_tranzport_init);
-module_exit(usb_tranzport_exit);
+module_usb_driver(usb_tranzport_driver);
diff --git a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
index b3d743a..917bbb0 100644
--- a/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-pcmcia/ft1000_hw.c
@@ -344,10 +344,10 @@ static void ft1000_reset_asic(struct net_device *dev)
}
mdelay(1);
if (info->AsicID == ELECTRABUZZ_ID) {
- // set watermark to -1 in order to not generate an interrrupt
+ // set watermark to -1 in order to not generate an interrupt
ft1000_write_reg(dev, FT1000_REG_WATERMARK, 0xffff);
} else {
- // set watermark to -1 in order to not generate an interrrupt
+ // set watermark to -1 in order to not generate an interrupt
ft1000_write_reg(dev, FT1000_REG_MAG_WATERMARK, 0xffff);
}
// clear interrupts
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
index aaf44c3..43b1d36 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_hw.c
@@ -601,7 +601,7 @@ static void ft1000_reset_asic(struct net_device *dev)
mdelay(1);
- /* set watermark to -1 in order to not generate an interrrupt */
+ /* set watermark to -1 in order to not generate an interrupt */
ft1000_write_register(ft1000dev, 0xffff, FT1000_REG_MAG_WATERMARK);
/* clear interrupts */
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
index 79482ac..84c38d5 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_usb.c
@@ -263,24 +263,4 @@ static struct usb_driver ft1000_usb_driver = {
.id_table = id_table,
};
-static int __init usb_ft1000_init(void)
-{
- int ret = 0;
-
- DEBUG("Initialize and register the driver\n");
-
- ret = usb_register(&ft1000_usb_driver);
- if (ret)
- err("usb_register failed. Error number %d", ret);
-
- return ret;
-}
-
-static void __exit usb_ft1000_exit(void)
-{
- DEBUG("Deregister the driver\n");
- usb_deregister(&ft1000_usb_driver);
-}
-
-module_init(usb_ft1000_init);
-module_exit(usb_ft1000_exit);
+module_usb_driver(ft1000_usb_driver);
diff --git a/drivers/staging/gma500/Makefile b/drivers/staging/gma500/Makefile
deleted file mode 100644
index c729868..0000000
--- a/drivers/staging/gma500/Makefile
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-# KMS driver for the GMA500
-#
-ccflags-y += -Iinclude/drm
-
-psb_gfx-y += gem_glue.o \
- accel_2d.o \
- backlight.o \
- framebuffer.o \
- gem.o \
- gtt.o \
- intel_bios.o \
- intel_i2c.o \
- intel_opregion.o \
- mmu.o \
- power.o \
- psb_drv.o \
- psb_intel_display.o \
- psb_intel_lvds.o \
- psb_intel_modes.o \
- psb_intel_sdvo.o \
- psb_lid.o \
- psb_irq.o \
- psb_device.o \
- mid_bios.o
-
-psb_gfx-$(CONFIG_DRM_PSB_CDV) += cdv_device.o \
- cdv_intel_crt.o \
- cdv_intel_display.o \
- cdv_intel_hdmi.o \
- cdv_intel_lvds.o
-
-psb_gfx-$(CONFIG_DRM_PSB_MRST) += mrst_device.o \
- mrst_crtc.o \
- mrst_lvds.o \
- mrst_hdmi.o \
- mrst_hdmi_i2c.o
-
-psb_gfx-$(CONFIG_DRM_PSB_MFLD) += mdfld_device.o \
- mdfld_output.o \
- mdfld_pyr_cmd.o \
- mdfld_tmd_vid.o \
- mdfld_tpo_cmd.o \
- mdfld_tpo_vid.o \
- mdfld_dsi_pkg_sender.o \
- mdfld_dsi_dpi.o \
- mdfld_dsi_output.o \
- mdfld_dsi_dbi.o \
- mdfld_dsi_dbi_dpu.o \
- mdfld_intel_display.o
-
-obj-$(CONFIG_DRM_PSB) += psb_gfx.o
diff --git a/drivers/staging/gma500/TODO b/drivers/staging/gma500/TODO
deleted file mode 100644
index fc83615..0000000
--- a/drivers/staging/gma500/TODO
+++ /dev/null
@@ -1,15 +0,0 @@
-- Sort out the power management side. Not important for Poulsbo but
- matters for Moorestown/Medfield
-- Debug Oaktrail/Moorestown support (single pipe, no BIOS on mrst,
- some other differences)
-- Add 2D acceleration via console and DRM
-- Add scrolling acceleration using the GTT to do remapping on the main
- framebuffer.
-- HDMI testing
-- Oaktrail HDMI and other features
-- Oaktrail MIPI
-- Medfield needs a lot of further love
-
-As per kernel policy and the in the interest of the safety of various
-kittens there is no support or plans to add hooks for the closed user space
-stuff.
diff --git a/drivers/staging/gma500/displays/hdmi.h b/drivers/staging/gma500/displays/hdmi.h
deleted file mode 100644
index d58ba9b..0000000
--- a/drivers/staging/gma500/displays/hdmi.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#ifndef HDMI_H
-#define HDMI_H
-
-extern void hdmi_init(struct drm_device *dev);
-
-#endif
diff --git a/drivers/staging/gma500/displays/pyr_cmd.h b/drivers/staging/gma500/displays/pyr_cmd.h
deleted file mode 100644
index 84bae5c..0000000
--- a/drivers/staging/gma500/displays/pyr_cmd.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#ifndef PYR_CMD_H
-#define PYR_CMD_H
-
-extern void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
-
-#endif
-
diff --git a/drivers/staging/gma500/displays/pyr_vid.h b/drivers/staging/gma500/displays/pyr_vid.h
deleted file mode 100644
index ce98860..0000000
--- a/drivers/staging/gma500/displays/pyr_vid.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#ifndef PYR_VID_H
-#define PYR_VID_H
-
-extern void pyr_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
-extern struct drm_display_mode *pyr_vid_get_config_mode(struct drm_device* dev);
-
-#endif
diff --git a/drivers/staging/gma500/displays/tmd_cmd.h b/drivers/staging/gma500/displays/tmd_cmd.h
deleted file mode 100644
index 641e85e..0000000
--- a/drivers/staging/gma500/displays/tmd_cmd.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#ifndef TMD_CMD_H
-#define TMD_CMD_H
-
-extern void tmd_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
-extern struct drm_display_mode *tmd_cmd_get_config_mode(struct drm_device *dev);
-
-#endif
diff --git a/drivers/staging/gma500/displays/tmd_vid.h b/drivers/staging/gma500/displays/tmd_vid.h
deleted file mode 100644
index 7a5fa3b..0000000
--- a/drivers/staging/gma500/displays/tmd_vid.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#ifndef TMD_VID_H
-#define TMD_VID_H
-
-extern void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
-extern struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev);
-
-#endif
diff --git a/drivers/staging/gma500/displays/tpo_cmd.h b/drivers/staging/gma500/displays/tpo_cmd.h
deleted file mode 100644
index 6105527..0000000
--- a/drivers/staging/gma500/displays/tpo_cmd.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#ifndef TPO_CMD_H
-#define TPO_CMD_H
-
-extern void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs);
-/* extern struct drm_display_mode * */
-/* tpo_cmd_get_config_mode(struct drm_device *dev); */
-
-#endif
diff --git a/drivers/staging/gma500/displays/tpo_vid.h b/drivers/staging/gma500/displays/tpo_vid.h
deleted file mode 100644
index c24f057..0000000
--- a/drivers/staging/gma500/displays/tpo_vid.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#ifndef TPO_VID_H
-#define TPO_VID_H
-
-extern void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs);
-
-#endif
diff --git a/drivers/staging/gma500/mdfld_device.c b/drivers/staging/gma500/mdfld_device.c
deleted file mode 100644
index f47aeb7..0000000
--- a/drivers/staging/gma500/mdfld_device.c
+++ /dev/null
@@ -1,714 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2011, Intel Corporation.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#include <linux/backlight.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include "psb_reg.h"
-#include "psb_intel_reg.h"
-#include "psb_drm.h"
-#include "psb_drv.h"
-#include "mdfld_output.h"
-#include "mdfld_dsi_output.h"
-#include "mid_bios.h"
-
-/*
- * Provide the Medfield specific backlight management
- */
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
-
-static int mdfld_brightness;
-struct backlight_device *mdfld_backlight_device;
-
-static int mfld_set_brightness(struct backlight_device *bd)
-{
- struct drm_device *dev = bl_get_data(mdfld_backlight_device);
- struct drm_psb_private *dev_priv = dev->dev_private;
- int level = bd->props.brightness;
-
- /* Percentage 1-100% being valid */
- if (level < 1)
- level = 1;
-
- if (gma_power_begin(dev, 0)) {
- /* Calculate and set the brightness value */
- u32 adjusted_level;
-
- /* Adjust the backlight level with the percent in
- * dev_priv->blc_adj2;
- */
- adjusted_level = level * dev_priv->blc_adj2;
- adjusted_level = adjusted_level / 100;
-#if 0
-#ifndef CONFIG_MDFLD_DSI_DPU
- if(!(dev_priv->dsr_fb_update & MDFLD_DSR_MIPI_CONTROL) &&
- (dev_priv->dbi_panel_on || dev_priv->dbi_panel_on2)){
- mdfld_dsi_dbi_exit_dsr(dev,MDFLD_DSR_MIPI_CONTROL, 0, 0);
- dev_dbg(dev->dev, "Out of DSR before set brightness to %d.\n",adjusted_level);
- }
-#endif
- mdfld_dsi_brightness_control(dev, 0, adjusted_level);
-
- if ((dev_priv->dbi_panel_on2) || (dev_priv->dpi_panel_on2))
- mdfld_dsi_brightness_control(dev, 2, adjusted_level);
-#endif
- gma_power_end(dev);
- }
- mdfld_brightness = level;
- return 0;
-}
-
-int psb_get_brightness(struct backlight_device *bd)
-{
- /* return locally cached var instead of HW read (due to DPST etc.) */
- /* FIXME: ideally return actual value in case firmware fiddled with
- it */
- return mdfld_brightness;
-}
-
-static const struct backlight_ops mfld_ops = {
- .get_brightness = psb_get_brightness,
- .update_status = mfld_set_brightness,
-};
-
-static int mdfld_backlight_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct backlight_properties props;
- memset(&props, 0, sizeof(struct backlight_properties));
- props.max_brightness = 100;
- props.type = BACKLIGHT_PLATFORM;
-
- mdfld_backlight_device = backlight_device_register("mfld-bl",
- NULL, (void *)dev, &mfld_ops, &props);
-
- if (IS_ERR(mdfld_backlight_device))
- return PTR_ERR(mdfld_backlight_device);
-
- dev_priv->blc_adj1 = 100;
- dev_priv->blc_adj2 = 100;
- mdfld_backlight_device->props.brightness = 100;
- mdfld_backlight_device->props.max_brightness = 100;
- backlight_update_status(mdfld_backlight_device);
- dev_priv->backlight_device = mdfld_backlight_device;
- return 0;
-}
-
-#endif
-
-/*
- * Provide the Medfield specific chip logic and low level methods for
- * power management.
- */
-
-static void mdfld_init_pm(struct drm_device *dev)
-{
- /* No work needed here yet */
-}
-
-/**
- * mdfld_save_display_registers - save registers for pipe
- * @dev: our device
- * @pipe: pipe to save
- *
- * Save the pipe state of the device before we power it off. Keep everything
- * we need to put it back again
- */
-static int mdfld_save_display_registers(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- int i;
-
- /* register */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
- u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 dspstatus_reg = PIPEASTAT;
- u32 palette_reg = PALETTE_A;
-
- /* pointer to values */
- u32 *dpll_val = &dev_priv->saveDPLL_A;
- u32 *fp_val = &dev_priv->saveFPA0;
- u32 *pipeconf_val = &dev_priv->savePIPEACONF;
- u32 *htot_val = &dev_priv->saveHTOTAL_A;
- u32 *hblank_val = &dev_priv->saveHBLANK_A;
- u32 *hsync_val = &dev_priv->saveHSYNC_A;
- u32 *vtot_val = &dev_priv->saveVTOTAL_A;
- u32 *vblank_val = &dev_priv->saveVBLANK_A;
- u32 *vsync_val = &dev_priv->saveVSYNC_A;
- u32 *pipesrc_val = &dev_priv->savePIPEASRC;
- u32 *dspstride_val = &dev_priv->saveDSPASTRIDE;
- u32 *dsplinoff_val = &dev_priv->saveDSPALINOFF;
- u32 *dsptileoff_val = &dev_priv->saveDSPATILEOFF;
- u32 *dspsize_val = &dev_priv->saveDSPASIZE;
- u32 *dsppos_val = &dev_priv->saveDSPAPOS;
- u32 *dspsurf_val = &dev_priv->saveDSPASURF;
- u32 *mipi_val = &dev_priv->saveMIPI;
- u32 *dspcntr_val = &dev_priv->saveDSPACNTR;
- u32 *dspstatus_val = &dev_priv->saveDSPASTATUS;
- u32 *palette_val = dev_priv->save_palette_a;
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- /* register */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- dspstatus_reg = PIPEBSTAT;
- palette_reg = PALETTE_B;
-
- /* values */
- dpll_val = &dev_priv->saveDPLL_B;
- fp_val = &dev_priv->saveFPB0;
- pipeconf_val = &dev_priv->savePIPEBCONF;
- htot_val = &dev_priv->saveHTOTAL_B;
- hblank_val = &dev_priv->saveHBLANK_B;
- hsync_val = &dev_priv->saveHSYNC_B;
- vtot_val = &dev_priv->saveVTOTAL_B;
- vblank_val = &dev_priv->saveVBLANK_B;
- vsync_val = &dev_priv->saveVSYNC_B;
- pipesrc_val = &dev_priv->savePIPEBSRC;
- dspstride_val = &dev_priv->saveDSPBSTRIDE;
- dsplinoff_val = &dev_priv->saveDSPBLINOFF;
- dsptileoff_val = &dev_priv->saveDSPBTILEOFF;
- dspsize_val = &dev_priv->saveDSPBSIZE;
- dsppos_val = &dev_priv->saveDSPBPOS;
- dspsurf_val = &dev_priv->saveDSPBSURF;
- dspcntr_val = &dev_priv->saveDSPBCNTR;
- dspstatus_val = &dev_priv->saveDSPBSTATUS;
- palette_val = dev_priv->save_palette_b;
- break;
- case 2:
- /* register */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
- mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- dspstatus_reg = PIPECSTAT;
- palette_reg = PALETTE_C;
-
- /* pointer to values */
- pipeconf_val = &dev_priv->savePIPECCONF;
- htot_val = &dev_priv->saveHTOTAL_C;
- hblank_val = &dev_priv->saveHBLANK_C;
- hsync_val = &dev_priv->saveHSYNC_C;
- vtot_val = &dev_priv->saveVTOTAL_C;
- vblank_val = &dev_priv->saveVBLANK_C;
- vsync_val = &dev_priv->saveVSYNC_C;
- pipesrc_val = &dev_priv->savePIPECSRC;
- dspstride_val = &dev_priv->saveDSPCSTRIDE;
- dsplinoff_val = &dev_priv->saveDSPCLINOFF;
- dsptileoff_val = &dev_priv->saveDSPCTILEOFF;
- dspsize_val = &dev_priv->saveDSPCSIZE;
- dsppos_val = &dev_priv->saveDSPCPOS;
- dspsurf_val = &dev_priv->saveDSPCSURF;
- mipi_val = &dev_priv->saveMIPI_C;
- dspcntr_val = &dev_priv->saveDSPCCNTR;
- dspstatus_val = &dev_priv->saveDSPCSTATUS;
- palette_val = dev_priv->save_palette_c;
- break;
- default:
- DRM_ERROR("%s, invalid pipe number.\n", __func__);
- return -EINVAL;
- }
-
- /* Pipe & plane A info */
- *dpll_val = PSB_RVDC32(dpll_reg);
- *fp_val = PSB_RVDC32(fp_reg);
- *pipeconf_val = PSB_RVDC32(pipeconf_reg);
- *htot_val = PSB_RVDC32(htot_reg);
- *hblank_val = PSB_RVDC32(hblank_reg);
- *hsync_val = PSB_RVDC32(hsync_reg);
- *vtot_val = PSB_RVDC32(vtot_reg);
- *vblank_val = PSB_RVDC32(vblank_reg);
- *vsync_val = PSB_RVDC32(vsync_reg);
- *pipesrc_val = PSB_RVDC32(pipesrc_reg);
- *dspstride_val = PSB_RVDC32(dspstride_reg);
- *dsplinoff_val = PSB_RVDC32(dsplinoff_reg);
- *dsptileoff_val = PSB_RVDC32(dsptileoff_reg);
- *dspsize_val = PSB_RVDC32(dspsize_reg);
- *dsppos_val = PSB_RVDC32(dsppos_reg);
- *dspsurf_val = PSB_RVDC32(dspsurf_reg);
- *dspcntr_val = PSB_RVDC32(dspcntr_reg);
- *dspstatus_val = PSB_RVDC32(dspstatus_reg);
-
- /*save palette (gamma) */
- for (i = 0; i < 256; i++)
- palette_val[i] = PSB_RVDC32(palette_reg + (i<<2));
-
- if (pipe == 1) {
- dev_priv->savePFIT_CONTROL = PSB_RVDC32(PFIT_CONTROL);
- dev_priv->savePFIT_PGM_RATIOS = PSB_RVDC32(PFIT_PGM_RATIOS);
- dev_priv->saveHDMIPHYMISCCTL = PSB_RVDC32(HDMIPHYMISCCTL);
- dev_priv->saveHDMIB_CONTROL = PSB_RVDC32(HDMIB_CONTROL);
- return 0;
- }
- *mipi_val = PSB_RVDC32(mipi_reg);
- return 0;
-}
-
-/**
- * mdfld_save_cursor_overlay_registers - save cursor overlay info
- * @dev: our device
- *
- * Save the cursor and overlay register state
- */
-static int mdfld_save_cursor_overlay_registers(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /* Save cursor regs */
- dev_priv->saveDSPACURSOR_CTRL = PSB_RVDC32(CURACNTR);
- dev_priv->saveDSPACURSOR_BASE = PSB_RVDC32(CURABASE);
- dev_priv->saveDSPACURSOR_POS = PSB_RVDC32(CURAPOS);
-
- dev_priv->saveDSPBCURSOR_CTRL = PSB_RVDC32(CURBCNTR);
- dev_priv->saveDSPBCURSOR_BASE = PSB_RVDC32(CURBBASE);
- dev_priv->saveDSPBCURSOR_POS = PSB_RVDC32(CURBPOS);
-
- dev_priv->saveDSPCCURSOR_CTRL = PSB_RVDC32(CURCCNTR);
- dev_priv->saveDSPCCURSOR_BASE = PSB_RVDC32(CURCBASE);
- dev_priv->saveDSPCCURSOR_POS = PSB_RVDC32(CURCPOS);
-
- /* HW overlay */
- dev_priv->saveOV_OVADD = PSB_RVDC32(OV_OVADD);
- dev_priv->saveOV_OGAMC0 = PSB_RVDC32(OV_OGAMC0);
- dev_priv->saveOV_OGAMC1 = PSB_RVDC32(OV_OGAMC1);
- dev_priv->saveOV_OGAMC2 = PSB_RVDC32(OV_OGAMC2);
- dev_priv->saveOV_OGAMC3 = PSB_RVDC32(OV_OGAMC3);
- dev_priv->saveOV_OGAMC4 = PSB_RVDC32(OV_OGAMC4);
- dev_priv->saveOV_OGAMC5 = PSB_RVDC32(OV_OGAMC5);
-
- dev_priv->saveOV_OVADD_C = PSB_RVDC32(OV_OVADD + OV_C_OFFSET);
- dev_priv->saveOV_OGAMC0_C = PSB_RVDC32(OV_OGAMC0 + OV_C_OFFSET);
- dev_priv->saveOV_OGAMC1_C = PSB_RVDC32(OV_OGAMC1 + OV_C_OFFSET);
- dev_priv->saveOV_OGAMC2_C = PSB_RVDC32(OV_OGAMC2 + OV_C_OFFSET);
- dev_priv->saveOV_OGAMC3_C = PSB_RVDC32(OV_OGAMC3 + OV_C_OFFSET);
- dev_priv->saveOV_OGAMC4_C = PSB_RVDC32(OV_OGAMC4 + OV_C_OFFSET);
- dev_priv->saveOV_OGAMC5_C = PSB_RVDC32(OV_OGAMC5 + OV_C_OFFSET);
-
- return 0;
-}
-/*
- * mdfld_restore_display_registers - restore the state of a pipe
- * @dev: our device
- * @pipe: the pipe to restore
- *
- * Restore the state of a pipe to that which was saved by the register save
- * functions.
- */
-static int mdfld_restore_display_registers(struct drm_device *dev, int pipe)
-{
- /* To get panel out of ULPS mode */
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dsi_config *dsi_config = NULL;
- u32 i = 0;
- u32 dpll = 0;
- u32 timeout = 0;
- u32 reg_offset = 0;
-
- /* register */
- u32 dpll_reg = MRST_DPLL_A;
- u32 fp_reg = MRST_FPA0;
- u32 pipeconf_reg = PIPEACONF;
- u32 htot_reg = HTOTAL_A;
- u32 hblank_reg = HBLANK_A;
- u32 hsync_reg = HSYNC_A;
- u32 vtot_reg = VTOTAL_A;
- u32 vblank_reg = VBLANK_A;
- u32 vsync_reg = VSYNC_A;
- u32 pipesrc_reg = PIPEASRC;
- u32 dspstride_reg = DSPASTRIDE;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dsptileoff_reg = DSPATILEOFF;
- u32 dspsize_reg = DSPASIZE;
- u32 dsppos_reg = DSPAPOS;
- u32 dspsurf_reg = DSPASURF;
- u32 dspstatus_reg = PIPEASTAT;
- u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 palette_reg = PALETTE_A;
-
- /* values */
- u32 dpll_val = dev_priv->saveDPLL_A & ~DPLL_VCO_ENABLE;
- u32 fp_val = dev_priv->saveFPA0;
- u32 pipeconf_val = dev_priv->savePIPEACONF;
- u32 htot_val = dev_priv->saveHTOTAL_A;
- u32 hblank_val = dev_priv->saveHBLANK_A;
- u32 hsync_val = dev_priv->saveHSYNC_A;
- u32 vtot_val = dev_priv->saveVTOTAL_A;
- u32 vblank_val = dev_priv->saveVBLANK_A;
- u32 vsync_val = dev_priv->saveVSYNC_A;
- u32 pipesrc_val = dev_priv->savePIPEASRC;
- u32 dspstride_val = dev_priv->saveDSPASTRIDE;
- u32 dsplinoff_val = dev_priv->saveDSPALINOFF;
- u32 dsptileoff_val = dev_priv->saveDSPATILEOFF;
- u32 dspsize_val = dev_priv->saveDSPASIZE;
- u32 dsppos_val = dev_priv->saveDSPAPOS;
- u32 dspsurf_val = dev_priv->saveDSPASURF;
- u32 dspstatus_val = dev_priv->saveDSPASTATUS;
- u32 mipi_val = dev_priv->saveMIPI;
- u32 dspcntr_val = dev_priv->saveDSPACNTR;
- u32 *palette_val = dev_priv->save_palette_a;
-
- switch (pipe) {
- case 0:
- dsi_config = dev_priv->dsi_configs[0];
- break;
- case 1:
- /* register */
- dpll_reg = MDFLD_DPLL_B;
- fp_reg = MDFLD_DPLL_DIV0;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- pipesrc_reg = PIPEBSRC;
- dspstride_reg = DSPBSTRIDE;
- dsplinoff_reg = DSPBLINOFF;
- dsptileoff_reg = DSPBTILEOFF;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- dspsurf_reg = DSPBSURF;
- dspcntr_reg = DSPBCNTR;
- palette_reg = PALETTE_B;
- dspstatus_reg = PIPEBSTAT;
-
- /* values */
- dpll_val = dev_priv->saveDPLL_B & ~DPLL_VCO_ENABLE;
- fp_val = dev_priv->saveFPB0;
- pipeconf_val = dev_priv->savePIPEBCONF;
- htot_val = dev_priv->saveHTOTAL_B;
- hblank_val = dev_priv->saveHBLANK_B;
- hsync_val = dev_priv->saveHSYNC_B;
- vtot_val = dev_priv->saveVTOTAL_B;
- vblank_val = dev_priv->saveVBLANK_B;
- vsync_val = dev_priv->saveVSYNC_B;
- pipesrc_val = dev_priv->savePIPEBSRC;
- dspstride_val = dev_priv->saveDSPBSTRIDE;
- dsplinoff_val = dev_priv->saveDSPBLINOFF;
- dsptileoff_val = dev_priv->saveDSPBTILEOFF;
- dspsize_val = dev_priv->saveDSPBSIZE;
- dsppos_val = dev_priv->saveDSPBPOS;
- dspsurf_val = dev_priv->saveDSPBSURF;
- dspcntr_val = dev_priv->saveDSPBCNTR;
- dspstatus_val = dev_priv->saveDSPBSTATUS;
- palette_val = dev_priv->save_palette_b;
- break;
- case 2:
- reg_offset = MIPIC_REG_OFFSET;
-
- /* register */
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- pipesrc_reg = PIPECSRC;
- dspstride_reg = DSPCSTRIDE;
- dsplinoff_reg = DSPCLINOFF;
- dsptileoff_reg = DSPCTILEOFF;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- dspsurf_reg = DSPCSURF;
- mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- palette_reg = PALETTE_C;
- dspstatus_reg = PIPECSTAT;
-
- /* values */
- pipeconf_val = dev_priv->savePIPECCONF;
- htot_val = dev_priv->saveHTOTAL_C;
- hblank_val = dev_priv->saveHBLANK_C;
- hsync_val = dev_priv->saveHSYNC_C;
- vtot_val = dev_priv->saveVTOTAL_C;
- vblank_val = dev_priv->saveVBLANK_C;
- vsync_val = dev_priv->saveVSYNC_C;
- pipesrc_val = dev_priv->savePIPECSRC;
- dspstride_val = dev_priv->saveDSPCSTRIDE;
- dsplinoff_val = dev_priv->saveDSPCLINOFF;
- dsptileoff_val = dev_priv->saveDSPCTILEOFF;
- dspsize_val = dev_priv->saveDSPCSIZE;
- dsppos_val = dev_priv->saveDSPCPOS;
- dspsurf_val = dev_priv->saveDSPCSURF;
- dspstatus_val = dev_priv->saveDSPCSTATUS;
- mipi_val = dev_priv->saveMIPI_C;
- dspcntr_val = dev_priv->saveDSPCCNTR;
- palette_val = dev_priv->save_palette_c;
-
- dsi_config = dev_priv->dsi_configs[1];
- break;
- default:
- DRM_ERROR("%s, invalid pipe number.\n", __func__);
- return -EINVAL;
- }
-
- /* Make sure VGA plane is off. it initializes to on after reset!*/
- PSB_WVDC32(0x80000000, VGACNTRL);
- if (pipe == 1) {
- PSB_WVDC32(dpll_val & ~DPLL_VCO_ENABLE, dpll_reg);
- PSB_RVDC32(dpll_reg);
-
- PSB_WVDC32(fp_val, fp_reg);
- } else {
- dpll = PSB_RVDC32(dpll_reg);
-
- if (!(dpll & DPLL_VCO_ENABLE)) {
-
- /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
- if (dpll & MDFLD_PWR_GATE_EN) {
- dpll &= ~MDFLD_PWR_GATE_EN;
- PSB_WVDC32(dpll, dpll_reg);
- udelay(500); /* FIXME: 1 ? */
- }
-
- PSB_WVDC32(fp_val, fp_reg);
- PSB_WVDC32(dpll_val, dpll_reg);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- dpll_val |= DPLL_VCO_ENABLE;
- PSB_WVDC32(dpll_val, dpll_reg);
- PSB_RVDC32(dpll_reg);
-
- /* wait for DSI PLL to lock */
- while ((timeout < 20000) && !(PSB_RVDC32(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout++;
- }
-
- if (timeout == 20000) {
- DRM_ERROR("%s, can't lock DSIPLL.\n",
- __func__);
- return -EINVAL;
- }
- }
- }
- /* Restore mode */
- PSB_WVDC32(htot_val, htot_reg);
- PSB_WVDC32(hblank_val, hblank_reg);
- PSB_WVDC32(hsync_val, hsync_reg);
- PSB_WVDC32(vtot_val, vtot_reg);
- PSB_WVDC32(vblank_val, vblank_reg);
- PSB_WVDC32(vsync_val, vsync_reg);
- PSB_WVDC32(pipesrc_val, pipesrc_reg);
- PSB_WVDC32(dspstatus_val, dspstatus_reg);
-
- /* Set up the plane */
- PSB_WVDC32(dspstride_val, dspstride_reg);
- PSB_WVDC32(dsplinoff_val, dsplinoff_reg);
- PSB_WVDC32(dsptileoff_val, dsptileoff_reg);
- PSB_WVDC32(dspsize_val, dspsize_reg);
- PSB_WVDC32(dsppos_val, dsppos_reg);
- PSB_WVDC32(dspsurf_val, dspsurf_reg);
-
- if (pipe == 1) {
- PSB_WVDC32(dev_priv->savePFIT_CONTROL, PFIT_CONTROL);
- PSB_WVDC32(dev_priv->savePFIT_PGM_RATIOS, PFIT_PGM_RATIOS);
- PSB_WVDC32(dev_priv->saveHDMIPHYMISCCTL, HDMIPHYMISCCTL);
- PSB_WVDC32(dev_priv->saveHDMIB_CONTROL, HDMIB_CONTROL);
-
- } else {
- /* Set up pipe related registers */
- PSB_WVDC32(mipi_val, mipi_reg);
- /* Setup MIPI adapter + MIPI IP registers */
- mdfld_dsi_controller_init(dsi_config, pipe);
- msleep(20);
- }
- /* Enable the plane */
- PSB_WVDC32(dspcntr_val, dspcntr_reg);
- msleep(20);
- /* Enable the pipe */
- PSB_WVDC32(pipeconf_val, pipeconf_reg);
-
- for (i = 0; i < 256; i++)
- PSB_WVDC32(palette_val[i], palette_reg + (i<<2));
- if (pipe == 1)
- return 0;
- if (!mdfld_panel_dpi(dev))
- mdfld_enable_te(dev, pipe);
- return 0;
-}
-
-/**
- * mdfld_restore_cursor_overlay_registers - restore cursor
- * @dev: our device
- *
- * Restore the cursor and overlay state that was saved earlier
- */
-static int mdfld_restore_cursor_overlay_registers(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /* Enable Cursor A */
- PSB_WVDC32(dev_priv->saveDSPACURSOR_CTRL, CURACNTR);
- PSB_WVDC32(dev_priv->saveDSPACURSOR_POS, CURAPOS);
- PSB_WVDC32(dev_priv->saveDSPACURSOR_BASE, CURABASE);
-
- PSB_WVDC32(dev_priv->saveDSPBCURSOR_CTRL, CURBCNTR);
- PSB_WVDC32(dev_priv->saveDSPBCURSOR_POS, CURBPOS);
- PSB_WVDC32(dev_priv->saveDSPBCURSOR_BASE, CURBBASE);
-
- PSB_WVDC32(dev_priv->saveDSPCCURSOR_CTRL, CURCCNTR);
- PSB_WVDC32(dev_priv->saveDSPCCURSOR_POS, CURCPOS);
- PSB_WVDC32(dev_priv->saveDSPCCURSOR_BASE, CURCBASE);
-
- /* Restore HW overlay */
- PSB_WVDC32(dev_priv->saveOV_OVADD, OV_OVADD);
- PSB_WVDC32(dev_priv->saveOV_OGAMC0, OV_OGAMC0);
- PSB_WVDC32(dev_priv->saveOV_OGAMC1, OV_OGAMC1);
- PSB_WVDC32(dev_priv->saveOV_OGAMC2, OV_OGAMC2);
- PSB_WVDC32(dev_priv->saveOV_OGAMC3, OV_OGAMC3);
- PSB_WVDC32(dev_priv->saveOV_OGAMC4, OV_OGAMC4);
- PSB_WVDC32(dev_priv->saveOV_OGAMC5, OV_OGAMC5);
-
- PSB_WVDC32(dev_priv->saveOV_OVADD_C, OV_OVADD + OV_C_OFFSET);
- PSB_WVDC32(dev_priv->saveOV_OGAMC0_C, OV_OGAMC0 + OV_C_OFFSET);
- PSB_WVDC32(dev_priv->saveOV_OGAMC1_C, OV_OGAMC1 + OV_C_OFFSET);
- PSB_WVDC32(dev_priv->saveOV_OGAMC2_C, OV_OGAMC2 + OV_C_OFFSET);
- PSB_WVDC32(dev_priv->saveOV_OGAMC3_C, OV_OGAMC3 + OV_C_OFFSET);
- PSB_WVDC32(dev_priv->saveOV_OGAMC4_C, OV_OGAMC4 + OV_C_OFFSET);
- PSB_WVDC32(dev_priv->saveOV_OGAMC5_C, OV_OGAMC5 + OV_C_OFFSET);
-
- return 0;
-}
-
-/**
- * mdfld_save_display_registers - save registers lost on suspend
- * @dev: our DRM device
- *
- * Save the state we need in order to be able to restore the interface
- * upon resume from suspend
- */
-static int mdfld_save_registers(struct drm_device *dev)
-{
- /* FIXME: We need to shut down panels here if using them
- and once the right bits are merged */
- mdfld_save_cursor_overlay_registers(dev);
- mdfld_save_display_registers(dev, 0);
- mdfld_save_display_registers(dev, 0);
- mdfld_save_display_registers(dev, 2);
- mdfld_save_display_registers(dev, 1);
- mdfld_disable_crtc(dev, 0);
- mdfld_disable_crtc(dev, 2);
- mdfld_disable_crtc(dev, 1);
- return 0;
-}
-
-/**
- * mdfld_restore_display_registers - restore lost register state
- * @dev: our DRM device
- *
- * Restore register state that was lost during suspend and resume.
- */
-static int mdfld_restore_registers(struct drm_device *dev)
-{
- mdfld_restore_display_registers(dev, 1);
- mdfld_restore_display_registers(dev, 0);
- mdfld_restore_display_registers(dev, 2);
- mdfld_restore_cursor_overlay_registers(dev);
- return 0;
-}
-
-static int mdfld_power_down(struct drm_device *dev)
-{
- /* FIXME */
- return 0;
-}
-
-static int mdfld_power_up(struct drm_device *dev)
-{
- /* FIXME */
- return 0;
-}
-
-const struct psb_ops mdfld_chip_ops = {
- .name = "Medfield",
- .accel_2d = 0,
- .pipes = 3,
- .crtcs = 2,
- .sgx_offset = MRST_SGX_OFFSET,
-
- .chip_setup = mid_chip_setup,
-
- .crtc_helper = &mdfld_helper_funcs,
- .crtc_funcs = &mdfld_intel_crtc_funcs,
-
- .output_init = mdfld_output_init,
-
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- .backlight_init = mdfld_backlight_init,
-#endif
-
- .init_pm = mdfld_init_pm,
- .save_regs = mdfld_save_registers,
- .restore_regs = mdfld_restore_registers,
- .power_down = mdfld_power_down,
- .power_up = mdfld_power_up,
-};
-
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.c b/drivers/staging/gma500/mdfld_dsi_dbi.c
deleted file mode 100644
index fd211f3..0000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi.c
+++ /dev/null
@@ -1,761 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dbi_dpu.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-#include "power.h"
-#include <linux/pm_runtime.h>
-
-int enable_gfx_rtpm;
-
-extern struct drm_device *gpDrmDevice;
-extern int gfxrtdelay;
-int enter_dsr;
-struct mdfld_dsi_dbi_output *gdbi_output;
-extern bool gbgfxsuspended;
-extern int enable_gfx_rtpm;
-extern int gfxrtdelay;
-
-#define MDFLD_DSR_MAX_IDLE_COUNT 2
-
-/*
- * set refreshing area
- */
-int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
- u16 x1, u16 y1, u16 x2, u16 y2)
-{
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
- u8 param[4];
- u8 cmd;
- int err;
-
- if (!sender) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- /* Set column */
- cmd = DCS_SET_COLUMN_ADDRESS;
- param[0] = x1 >> 8;
- param[1] = x1;
- param[2] = x2 >> 8;
- param[3] = x2;
-
- err = mdfld_dsi_send_dcs(sender,
- cmd,
- param,
- 4,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
- goto err_out;
- }
-
- /* Set page */
- cmd = DCS_SET_PAGE_ADDRESS;
- param[0] = y1 >> 8;
- param[1] = y1;
- param[2] = y2 >> 8;
- param[3] = y2;
-
- err = mdfld_dsi_send_dcs(sender,
- cmd,
- param,
- 4,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
- goto err_out;
- }
-
- /*update screen*/
- err = mdfld_dsi_send_dcs(sender,
- write_mem_start,
- NULL,
- 0,
- CMD_DATA_SRC_PIPE,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(sender->dev->dev, "DCS 0x%x sent failed\n", cmd);
- goto err_out;
- }
- mdfld_dsi_cmds_kick_out(sender);
-err_out:
- return err;
-}
-
-/*
- * set panel's power state
- */
-int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
- int mode)
-{
- struct drm_device *dev = dbi_output->dev;
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
- u8 param = 0;
- u32 err = 0;
-
- if (!sender) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- if (mode == DRM_MODE_DPMS_ON) {
- /* Exit sleep mode */
- err = mdfld_dsi_send_dcs(sender,
- DCS_EXIT_SLEEP_MODE,
- NULL,
- 0,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(dev->dev, "DCS 0x%x sent failed\n",
- DCS_EXIT_SLEEP_MODE);
- goto power_err;
- }
-
- /* Set display on */
- err = mdfld_dsi_send_dcs(sender,
- DCS_SET_DISPLAY_ON,
- NULL,
- 0,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(dev->dev, "DCS 0x%x sent failed\n",
- DCS_SET_DISPLAY_ON);
- goto power_err;
- }
-
- /* set tear effect on */
- err = mdfld_dsi_send_dcs(sender,
- DCS_SET_TEAR_ON,
- &param,
- 1,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(dev->dev, "DCS 0x%x sent failed\n",
- set_tear_on);
- goto power_err;
- }
-
- /**
- * FIXME: remove this later
- */
- err = mdfld_dsi_send_dcs(sender,
- DCS_WRITE_MEM_START,
- NULL,
- 0,
- CMD_DATA_SRC_PIPE,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(dev->dev, "DCS 0x%x sent failed\n",
- DCS_WRITE_MEM_START);
- goto power_err;
- }
- } else {
- /* Set tear effect off */
- err = mdfld_dsi_send_dcs(sender,
- DCS_SET_TEAR_OFF,
- NULL,
- 0,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(dev->dev, "DCS 0x%x sent failed\n",
- DCS_SET_TEAR_OFF);
- goto power_err;
- }
-
- /* Turn display off */
- err = mdfld_dsi_send_dcs(sender,
- DCS_SET_DISPLAY_OFF,
- NULL,
- 0,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(dev->dev, "DCS 0x%x sent failed\n",
- DCS_SET_DISPLAY_OFF);
- goto power_err;
- }
-
- /* Now enter sleep mode */
- err = mdfld_dsi_send_dcs(sender,
- DCS_ENTER_SLEEP_MODE,
- NULL,
- 0,
- CMD_DATA_SRC_SYSTEM_MEM,
- MDFLD_DSI_QUEUE_PACKAGE);
- if (err) {
- dev_err(dev->dev, "DCS 0x%x sent failed\n",
- DCS_ENTER_SLEEP_MODE);
- goto power_err;
- }
- }
- mdfld_dsi_cmds_kick_out(sender);
-power_err:
- return err;
-}
-
-/*
- * send a generic DCS command with a parameter list
- */
-int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
- u8 dcs, u8 *param, u32 num, u8 data_src)
-{
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
- int ret;
-
- if (!sender) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- ret = mdfld_dsi_send_dcs(sender,
- dcs,
- param,
- num,
- data_src,
- MDFLD_DSI_SEND_PACKAGE);
-
- return ret;
-}
-
-/*
- * Enter DSR
- */
-void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
-{
- u32 reg_val;
- struct drm_device *dev = dbi_output->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct drm_crtc *crtc = dbi_output->base.base.crtc;
- struct psb_intel_crtc *psb_crtc = (crtc) ?
- to_psb_intel_crtc(crtc) : NULL;
- u32 dpll_reg = MRST_DPLL_A;
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
-
- if (!dbi_output)
- return;
-
- /* FIXME check if can go */
- dev_priv->is_in_idle = true;
-
- gdbi_output = dbi_output;
- if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
- (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
- return;
-
- if (pipe == 2) {
- dpll_reg = MRST_DPLL_A;
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
- /* Disable te interrupts */
- mdfld_disable_te(dev, pipe);
-
- /* Disable plane */
- reg_val = REG_READ(dspcntr_reg);
- if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
- REG_WRITE(dspcntr_reg, reg_val & ~DISPLAY_PLANE_ENABLE);
- REG_READ(dspcntr_reg);
- }
-
- /* Disable pipe */
- reg_val = REG_READ(pipeconf_reg);
- if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
- reg_val &= ~DISPLAY_PLANE_ENABLE;
- reg_val |= (PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF);
- REG_WRITE(pipeconf_reg, reg_val);
- REG_READ(pipeconf_reg);
- mdfldWaitForPipeDisable(dev, pipe);
- }
-
- /* Disable DPLL */
- reg_val = REG_READ(dpll_reg);
- if (!(reg_val & DPLL_VCO_ENABLE)) {
- reg_val &= ~DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, reg_val);
- REG_READ(dpll_reg);
- udelay(500);
- }
-
- gma_power_end(dev);
- dbi_output->mode_flags |= MODE_SETTING_IN_DSR;
- if (pipe == 2) {
- enter_dsr = 1;
- /* pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
- }
-}
-
-static void mdfld_dbi_output_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
- int pipe)
-{
- struct drm_device *dev = dbi_output->dev;
- struct drm_crtc *crtc = dbi_output->base.base.crtc;
- struct psb_intel_crtc *psb_crtc = (crtc) ?
- to_psb_intel_crtc(crtc) : NULL;
- u32 reg_val;
- u32 dpll_reg = MRST_DPLL_A;
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
- u32 reg_offset = 0;
-
- /*if mode setting on-going, back off*/
- if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
- (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
- return;
-
- if (pipe == 2) {
- dpll_reg = MRST_DPLL_A;
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- reg_offset = MIPIC_REG_OFFSET;
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- /* Enable DPLL */
- reg_val = REG_READ(dpll_reg);
- if (!(reg_val & DPLL_VCO_ENABLE)) {
- if (reg_val & MDFLD_PWR_GATE_EN) {
- reg_val &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, reg_val);
- REG_READ(dpll_reg);
- udelay(500);
- }
-
- reg_val |= DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, reg_val);
- REG_READ(dpll_reg);
- udelay(500);
-
- /* Add timeout */
- while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
- cpu_relax();
- }
-
- /* Enable pipe */
- reg_val = REG_READ(pipeconf_reg);
- if (!(reg_val & PIPEACONF_ENABLE)) {
- reg_val |= PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, reg_val);
- REG_READ(pipeconf_reg);
- udelay(500);
- mdfldWaitForPipeEnable(dev, pipe);
- }
-
- /* Enable plane */
- reg_val = REG_READ(dspcntr_reg);
- if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
- reg_val |= DISPLAY_PLANE_ENABLE;
- REG_WRITE(dspcntr_reg, reg_val);
- REG_READ(dspcntr_reg);
- udelay(500);
- }
-
- /* Enable TE interrupt on this pipe */
- mdfld_enable_te(dev, pipe);
- gma_power_end(dev);
-
- /*clean IN_DSR flag*/
- dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
-}
-
-/*
- * Exit from DSR
- */
-void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
- struct mdfld_dsi_dbi_output **dbi_output;
- int i;
- int pipe;
-
- /* FIXME can go ? */
- dev_priv->is_in_idle = false;
- dbi_output = dsr_info->dbi_outputs;
-
-#ifdef CONFIG_PM_RUNTIME
- if (!enable_gfx_rtpm) {
-/* pm_runtime_allow(&gpDrmDevice->pdev->dev); */
-/* schedule_delayed_work(&rtpm_work, 30 * 1000);*/ /* FIXME: HZ ? */
- }
-#endif
-
- /* For each output, exit dsr */
- for (i = 0; i < dsr_info->dbi_output_num; i++) {
- /* If panel has been turned off, skip */
- if (!dbi_output[i] || !dbi_output[i]->dbi_panel_on)
- continue;
- pipe = dbi_output[i]->channel_num ? 2 : 0;
- enter_dsr = 0;
- mdfld_dbi_output_exit_dsr(dbi_output[i], pipe);
- }
- dev_priv->dsr_fb_update |= update_src;
-}
-
-static bool mdfld_dbi_is_in_dsr(struct drm_device *dev)
-{
- if (REG_READ(MRST_DPLL_A) & DPLL_VCO_ENABLE)
- return false;
- if ((REG_READ(PIPEACONF) & PIPEACONF_ENABLE) ||
- (REG_READ(PIPECCONF) & PIPEACONF_ENABLE))
- return false;
- if ((REG_READ(DSPACNTR) & DISPLAY_PLANE_ENABLE) ||
- (REG_READ(DSPCCNTR) & DISPLAY_PLANE_ENABLE))
- return false;
-
- return true;
-}
-
-/* Periodically update dbi panel */
-void mdfld_dbi_update_panel(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
- struct mdfld_dsi_dbi_output **dbi_outputs;
- struct mdfld_dsi_dbi_output *dbi_output;
- int i;
- int can_enter_dsr = 0;
- u32 damage_mask;
-
- dbi_outputs = dsr_info->dbi_outputs;
- dbi_output = pipe ? dbi_outputs[1] : dbi_outputs[0];
-
- if (!dbi_output)
- return;
-
- if (pipe == 0)
- damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_0;
- else if (pipe == 2)
- damage_mask = dev_priv->dsr_fb_update & MDFLD_DSR_DAMAGE_MASK_2;
- else
- return;
-
- /* If FB is damaged and panel is on update on-panel FB */
- if (damage_mask && dbi_output->dbi_panel_on) {
- dbi_output->dsr_fb_update_done = false;
-
- if (dbi_output->p_funcs->update_fb)
- dbi_output->p_funcs->update_fb(dbi_output, pipe);
-
- if (dev_priv->dsr_enable && dbi_output->dsr_fb_update_done)
- dev_priv->dsr_fb_update &= ~damage_mask;
-
- /*clean IN_DSR flag*/
- dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
-
- dbi_output->dsr_idle_count = 0;
- } else {
- dbi_output->dsr_idle_count++;
- }
-
- switch (dsr_info->dbi_output_num) {
- case 1:
- if (dbi_output->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
- can_enter_dsr = 1;
- break;
- case 2:
- if (dbi_outputs[0]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT
- && dbi_outputs[1]->dsr_idle_count > MDFLD_DSR_MAX_IDLE_COUNT)
- can_enter_dsr = 1;
- break;
- default:
- DRM_ERROR("Wrong DBI output number\n");
- }
-
- /* Try to enter DSR */
- if (can_enter_dsr) {
- for (i = 0; i < dsr_info->dbi_output_num; i++) {
- if (!mdfld_dbi_is_in_dsr(dev) && dbi_outputs[i] &&
- !(dbi_outputs[i]->mode_flags & MODE_SETTING_ON_GOING)) {
- mdfld_dsi_dbi_enter_dsr(dbi_outputs[i],
- dbi_outputs[i]->channel_num ? 2 : 0);
-#if 0
- enter_dsr = 1;
- pr_err("%s: enter_dsr = 1\n", __func__);
-#endif
- }
- }
- /*schedule rpm suspend after gfxrtdelay*/
-#ifdef CONFIG_GFX_RTPM
- if (!dev_priv->rpm_enabled
- || !enter_dsr
- /* || (REG_READ(HDMIB_CONTROL) & HDMIB_PORT_EN) */
- || pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay))
- dev_warn(dev->dev,
- "Runtime PM schedule suspend failed, rpm %d\n",
- dev_priv->rpm_enabled);
-#endif
- }
-}
-
-int mdfld_dbi_dsr_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
-
- if (!dsr_info || IS_ERR(dsr_info)) {
- dsr_info = kzalloc(sizeof(struct mdfld_dbi_dsr_info),
- GFP_KERNEL);
- if (!dsr_info) {
- dev_err(dev->dev, "No memory\n");
- return -ENOMEM;
- }
- dev_priv->dbi_dsr_info = dsr_info;
- }
- return 0;
-}
-
-void mdfld_dbi_dsr_exit(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dsr_info *dsr_info = dev_priv->dbi_dsr_info;
-
- if (dsr_info) {
- kfree(dsr_info);
- dev_priv->dbi_dsr_info = NULL;
- }
-}
-
-void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- int lane_count = dsi_config->lane_count;
- u32 val = 0;
-
- dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
-
- /* Un-ready device */
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
-
- /* Init dsi adapter before kicking off */
- REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
-
- /* TODO: figure out how to setup these registers */
- REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
- REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
- 0x000a0014);
- REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
- REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
- REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
-
- /* Enable all interrupts */
- REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
- /* Max value: 20 clock cycles of txclkesc */
- REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
- /* Min 21 txclkesc, max: ffffh */
- REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
- /* Min: 7d0 max: 4e20 */
- REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
-
- /* Set up func_prg */
- val |= lane_count;
- val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
- val |= DSI_DBI_COLOR_FORMAT_OPTION2;
- REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
-
- REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
- REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
-
- /* De-assert dbi_stall when half of DBI FIFO is empty */
- /* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
-
- REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
- REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
- REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
-}
-
-#if 0
-/*DBI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
- .dpms = mdfld_dsi_dbi_dpms,
- .mode_fixup = mdfld_dsi_dbi_mode_fixup,
- .prepare = mdfld_dsi_dbi_prepare,
- .mode_set = mdfld_dsi_dbi_mode_set,
- .commit = mdfld_dsi_dbi_commit,
-};
-
-/*DBI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-#endif
-
-/*
- * Init DSI DBI encoder.
- * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
- * return pointer of newly allocated DBI encoder, NULL on error
- */
-struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- struct panel_funcs *p_funcs)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dsi_dbi_output *dbi_output = NULL;
- struct mdfld_dsi_config *dsi_config;
- struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- struct drm_display_mode *fixed_mode = NULL;
- struct psb_gtt *pg = dev_priv ? (&dev_priv->gtt) : NULL;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv ? (dev_priv->dbi_dpu_info) : NULL;
- struct mdfld_dbi_dsr_info *dsr_info = dev_priv ? (dev_priv->dbi_dsr_info) : NULL;
- u32 data = 0;
- int pipe;
- int ret;
-
- if (!pg || !dsi_connector || !p_funcs) {
- WARN_ON(1);
- return NULL;
- }
-
- dsi_config = mdfld_dsi_get_config(dsi_connector);
- pipe = dsi_connector->pipe;
-
- /*panel hard-reset*/
- if (p_funcs->reset) {
- ret = p_funcs->reset(pipe);
- if (ret) {
- DRM_ERROR("Panel %d hard-reset failed\n", pipe);
- return NULL;
- }
- }
- /* Panel drvIC init */
- if (p_funcs->drv_ic_init)
- p_funcs->drv_ic_init(dsi_config, pipe);
-
- /* Panel power mode detect */
- ret = mdfld_dsi_get_power_mode(dsi_config,
- &data,
- MDFLD_DSI_HS_TRANSMISSION);
- if (ret) {
- DRM_ERROR("Panel %d get power mode failed\n", pipe);
- dsi_connector->status = connector_status_disconnected;
- } else {
- DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
- dsi_connector->status = connector_status_connected;
- }
-
- /*TODO: get panel info from DDB*/
-
- dbi_output = kzalloc(sizeof(struct mdfld_dsi_dbi_output), GFP_KERNEL);
- if (!dbi_output) {
- dev_err(dev->dev, "No memory\n");
- return NULL;
- }
-
- if (dsi_connector->pipe == 0) {
- dbi_output->channel_num = 0;
- dev_priv->dbi_output = dbi_output;
- } else if (dsi_connector->pipe == 2) {
- dbi_output->channel_num = 1;
- dev_priv->dbi_output2 = dbi_output;
- } else {
- dev_err(dev->dev, "only support 2 DSI outputs\n");
- goto out_err1;
- }
-
- dbi_output->dev = dev;
- dbi_output->p_funcs = p_funcs;
- fixed_mode = dsi_config->fixed_mode;
- dbi_output->panel_fixed_mode = fixed_mode;
-
- /* Create drm encoder object */
- connector = &dsi_connector->base.base;
- encoder = &dbi_output->base.base;
- /* Review this if we ever get MIPI-HDMI bridges or similar */
- drm_encoder_init(dev,
- encoder,
- p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
- drm_encoder_helper_add(encoder, p_funcs->encoder_helper_funcs);
-
- /* Attach to given connector */
- drm_mode_connector_attach_encoder(connector, encoder);
-
- /* Set possible CRTCs and clones */
- if (dsi_connector->pipe) {
- encoder->possible_crtcs = (1 << 2);
- encoder->possible_clones = (1 << 1);
- } else {
- encoder->possible_crtcs = (1 << 0);
- encoder->possible_clones = (1 << 0);
- }
-
- dev_priv->dsr_fb_update = 0;
- dev_priv->dsr_enable = false;
- dev_priv->exit_idle = mdfld_dsi_dbi_exit_dsr;
-
- dbi_output->first_boot = true;
- dbi_output->mode_flags = MODE_SETTING_IN_ENCODER;
-
- /* Add this output to dpu_info if in DPU mode */
- if (dpu_info && dsi_connector->status == connector_status_connected) {
- if (dsi_connector->pipe == 0)
- dpu_info->dbi_outputs[0] = dbi_output;
- else
- dpu_info->dbi_outputs[1] = dbi_output;
-
- dpu_info->dbi_output_num++;
- } else if (dsi_connector->status == connector_status_connected) {
- /* Add this output to dsr_info if not */
- if (dsi_connector->pipe == 0)
- dsr_info->dbi_outputs[0] = dbi_output;
- else
- dsr_info->dbi_outputs[1] = dbi_output;
-
- dsr_info->dbi_output_num++;
- }
- return &dbi_output->base;
-out_err1:
- kfree(dbi_output);
- return NULL;
-}
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi.h b/drivers/staging/gma500/mdfld_dsi_dbi.h
deleted file mode 100644
index f0fa986..0000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_DBI_H__
-#define __MDFLD_DSI_DBI_H__
-
-#include <linux/backlight.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-
-#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
-#include "power.h"
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-
-/*
- * DBI encoder which inherits from mdfld_dsi_encoder
- */
-struct mdfld_dsi_dbi_output {
- struct mdfld_dsi_encoder base;
- struct drm_display_mode *panel_fixed_mode;
- u8 last_cmd;
- u8 lane_count;
- u8 channel_num;
- struct drm_device *dev;
-
- /* Backlight operations */
-
- /* DSR timer */
- u32 dsr_idle_count;
- bool dsr_fb_update_done;
-
- /* Mode setting flags */
- u32 mode_flags;
-
- /* Panel status */
- bool dbi_panel_on;
- bool first_boot;
- struct panel_funcs *p_funcs;
-
- /* DPU */
- u32 *dbi_cb_addr;
- u32 dbi_cb_phy;
- spinlock_t cb_lock;
- u32 cb_write;
-};
-
-#define MDFLD_DSI_DBI_OUTPUT(dsi_encoder) \
- container_of(dsi_encoder, struct mdfld_dsi_dbi_output, base)
-
-struct mdfld_dbi_dsr_info {
- int dbi_output_num;
- struct mdfld_dsi_dbi_output *dbi_outputs[2];
-
- u32 dsr_idle_count;
-};
-
-#define DBI_CB_TIMEOUT_COUNT 0xffff
-
-/* Offsets */
-#define CMD_MEM_ADDR_OFFSET 0
-
-#define CMD_DATA_SRC_SYSTEM_MEM 0
-#define CMD_DATA_SRC_PIPE 1
-
-static inline int mdfld_dsi_dbi_fifo_ready(struct mdfld_dsi_dbi_output *dbi_output)
-{
- struct drm_device *dev = dbi_output->dev;
- u32 retry = DBI_CB_TIMEOUT_COUNT;
- int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
- int ret = 0;
-
- /* Query the dbi fifo status*/
- while (retry--) {
- if (REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset) & (1 << 27))
- break;
- }
-
- if (!retry) {
- DRM_ERROR("Timeout waiting for DBI FIFO empty\n");
- ret = -EAGAIN;
- }
- return ret;
-}
-
-static inline int mdfld_dsi_dbi_cmd_sent(struct mdfld_dsi_dbi_output *dbi_output)
-{
- struct drm_device *dev = dbi_output->dev;
- u32 retry = DBI_CB_TIMEOUT_COUNT;
- int reg_offset = (dbi_output->channel_num == 1) ? MIPIC_REG_OFFSET : 0;
- int ret = 0;
-
- /* Query the command execution status */
- while (retry--)
- if (!(REG_READ(MIPIA_CMD_ADD_REG + reg_offset) & (1 << 0)))
- break;
-
- if (!retry) {
- DRM_ERROR("Timeout waiting for DBI command status\n");
- ret = -EAGAIN;
- }
-
- return ret;
-}
-
-static inline int mdfld_dsi_dbi_cb_ready(struct mdfld_dsi_dbi_output *dbi_output)
-{
- int ret = 0;
-
- /* Query the command execution status*/
- ret = mdfld_dsi_dbi_cmd_sent(dbi_output);
- if (ret) {
- DRM_ERROR("Peripheral is busy\n");
- ret = -EAGAIN;
- }
- /* Query the dbi fifo status*/
- ret = mdfld_dsi_dbi_fifo_ready(dbi_output);
- if (ret) {
- DRM_ERROR("DBI FIFO is not empty\n");
- ret = -EAGAIN;
- }
- return ret;
-}
-
-extern void mdfld_dsi_dbi_output_init(struct drm_device *dev,
- struct psb_intel_mode_device *mode_dev, int pipe);
-extern void mdfld_dsi_dbi_exit_dsr(struct drm_device *dev, u32 update_src);
-extern void mdfld_dsi_dbi_enter_dsr(struct mdfld_dsi_dbi_output *dbi_output,
- int pipe);
-extern int mdfld_dbi_dsr_init(struct drm_device *dev);
-extern void mdfld_dbi_dsr_exit(struct drm_device *dev);
-extern struct mdfld_dsi_encoder *mdfld_dsi_dbi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- struct panel_funcs *p_funcs);
-extern int mdfld_dsi_dbi_send_dcs(struct mdfld_dsi_dbi_output *dbi_output,
- u8 dcs, u8 *param, u32 num, u8 data_src);
-extern int mdfld_dsi_dbi_update_area(struct mdfld_dsi_dbi_output *dbi_output,
- u16 x1, u16 y1, u16 x2, u16 y2);
-extern int mdfld_dsi_dbi_update_power(struct mdfld_dsi_dbi_output *dbi_output,
- int mode);
-extern void mdfld_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-
-#endif /*__MDFLD_DSI_DBI_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
deleted file mode 100644
index a4e2ff4..0000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.c
+++ /dev/null
@@ -1,778 +0,0 @@
-/*
- * Copyright © 2010-2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jim Liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_dbi_dpu.h"
-#include "mdfld_dsi_dbi.h"
-
-/*
- * NOTE: all mdlfd_x_damage funcs should be called by holding dpu_update_lock
- */
-
-static int mdfld_cursor_damage(struct mdfld_dbi_dpu_info *dpu_info,
- mdfld_plane_t plane,
- struct psb_drm_dpu_rect *damaged_rect)
-{
- int x, y;
- int new_x, new_y;
- struct psb_drm_dpu_rect *rect;
- struct psb_drm_dpu_rect *pipe_rect;
- int cursor_size;
- struct mdfld_cursor_info *cursor;
- mdfld_plane_t fb_plane;
-
- if (plane == MDFLD_CURSORA) {
- cursor = &dpu_info->cursors[0];
- x = dpu_info->cursors[0].x;
- y = dpu_info->cursors[0].y;
- cursor_size = dpu_info->cursors[0].size;
- pipe_rect = &dpu_info->damage_pipea;
- fb_plane = MDFLD_PLANEA;
- } else {
- cursor = &dpu_info->cursors[1];
- x = dpu_info->cursors[1].x;
- y = dpu_info->cursors[1].y;
- cursor_size = dpu_info->cursors[1].size;
- pipe_rect = &dpu_info->damage_pipec;
- fb_plane = MDFLD_PLANEC;
- }
- new_x = damaged_rect->x;
- new_y = damaged_rect->y;
-
- if (x == new_x && y == new_y)
- return 0;
-
- rect = &dpu_info->damaged_rects[plane];
- /* Move to right */
- if (new_x >= x) {
- if (new_y > y) {
- rect->x = x;
- rect->y = y;
- rect->width = (new_x + cursor_size) - x;
- rect->height = (new_y + cursor_size) - y;
- goto cursor_out;
- } else {
- rect->x = x;
- rect->y = new_y;
- rect->width = (new_x + cursor_size) - x;
- rect->height = (y - new_y);
- goto cursor_out;
- }
- } else {
- if (new_y > y) {
- rect->x = new_x;
- rect->y = y;
- rect->width = (x + cursor_size) - new_x;
- rect->height = new_y - y;
- goto cursor_out;
- } else {
- rect->x = new_x;
- rect->y = new_y;
- rect->width = (x + cursor_size) - new_x;
- rect->height = (y + cursor_size) - new_y;
- }
- }
-cursor_out:
- if (new_x < 0)
- cursor->x = 0;
- else if (new_x > 864)
- cursor->x = 864;
- else
- cursor->x = new_x;
-
- if (new_y < 0)
- cursor->y = 0;
- else if (new_y > 480)
- cursor->y = 480;
- else
- cursor->y = new_y;
-
- /*
- * FIXME: this is a workaround for cursor plane update,
- * remove it later!
- */
- rect->x = 0;
- rect->y = 0;
- rect->width = 864;
- rect->height = 480;
-
- mdfld_check_boundary(dpu_info, rect);
- mdfld_dpu_region_extent(pipe_rect, rect);
-
- /* Update pending status of dpu_info */
- dpu_info->pending |= (1 << plane);
- /* Update fb panel as well */
- dpu_info->pending |= (1 << fb_plane);
- return 0;
-}
-
-static int mdfld_fb_damage(struct mdfld_dbi_dpu_info *dpu_info,
- mdfld_plane_t plane,
- struct psb_drm_dpu_rect *damaged_rect)
-{
- struct psb_drm_dpu_rect *rect;
-
- if (plane == MDFLD_PLANEA)
- rect = &dpu_info->damage_pipea;
- else
- rect = &dpu_info->damage_pipec;
-
- mdfld_check_boundary(dpu_info, damaged_rect);
-
- /* Add fb damage area to this pipe */
- mdfld_dpu_region_extent(rect, damaged_rect);
-
- /* Update pending status of dpu_info */
- dpu_info->pending |= (1 << plane);
- return 0;
-}
-
-/* Do nothing here, right now */
-static int mdfld_overlay_damage(struct mdfld_dbi_dpu_info *dpu_info,
- mdfld_plane_t plane,
- struct psb_drm_dpu_rect *damaged_rect)
-{
- return 0;
-}
-
-int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
- mdfld_plane_t plane,
- struct psb_drm_dpu_rect *rect)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
- int ret = 0;
-
- /* DPU not in use, no damage reporting needed */
- if (dpu_info == NULL)
- return 0;
-
- spin_lock(&dpu_info->dpu_update_lock);
-
- switch (plane) {
- case MDFLD_PLANEA:
- case MDFLD_PLANEC:
- mdfld_fb_damage(dpu_info, plane, rect);
- break;
- case MDFLD_CURSORA:
- case MDFLD_CURSORC:
- mdfld_cursor_damage(dpu_info, plane, rect);
- break;
- case MDFLD_OVERLAYA:
- case MDFLD_OVERLAYC:
- mdfld_overlay_damage(dpu_info, plane, rect);
- break;
- default:
- DRM_ERROR("Invalid plane type %d\n", plane);
- ret = -EINVAL;
- }
- spin_unlock(&dpu_info->dpu_update_lock);
- return ret;
-}
-
-int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv;
- struct mdfld_dbi_dpu_info *dpu_info;
- struct mdfld_dsi_config *dsi_config;
- struct psb_drm_dpu_rect rect;
- int i;
-
- if (!dev) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- dev_priv = dev->dev_private;
- dpu_info = dev_priv->dbi_dpu_info;
-
- /* This is fine - we may be in non DPU mode */
- if (!dpu_info)
- return -EINVAL;
-
- for (i = 0; i < dpu_info->dbi_output_num; i++) {
- dsi_config = dev_priv->dsi_configs[i];
- if (dsi_config) {
- rect.x = rect.y = 0;
- rect.width = dsi_config->fixed_mode->hdisplay;
- rect.height = dsi_config->fixed_mode->vdisplay;
- mdfld_dbi_dpu_report_damage(dev,
- i ? (MDFLD_PLANEC) : (MDFLD_PLANEA),
- &rect);
- }
- }
- /* Exit DSR state */
- mdfld_dpu_exit_dsr(dev);
- return 0;
-}
-
-int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
- struct psb_drm_dpu_rect *rect)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
-
- mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, rect);
-
- /* If dual display mode */
- if (dpu_info->dbi_output_num == 2)
- mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, rect);
-
- /* Force dsi to exit DSR mode */
- mdfld_dpu_exit_dsr(dev);
- return 0;
-}
-
-static void mdfld_dpu_cursor_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
- mdfld_plane_t plane)
-{
- struct drm_device *dev = dpu_info->dev;
- u32 curpos_reg = CURAPOS;
- u32 curbase_reg = CURABASE;
- u32 curcntr_reg = CURACNTR;
- struct mdfld_cursor_info *cursor = &dpu_info->cursors[0];
-
- if (plane == MDFLD_CURSORC) {
- curpos_reg = CURCPOS;
- curbase_reg = CURCBASE;
- curcntr_reg = CURCCNTR;
- cursor = &dpu_info->cursors[1];
- }
-
- REG_WRITE(curcntr_reg, REG_READ(curcntr_reg));
- REG_WRITE(curpos_reg,
- (((cursor->x & CURSOR_POS_MASK) << CURSOR_X_SHIFT) |
- ((cursor->y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT)));
- REG_WRITE(curbase_reg, REG_READ(curbase_reg));
-}
-
-static void mdfld_dpu_fb_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
- mdfld_plane_t plane)
-{
- u32 pipesrc_reg = PIPEASRC;
- u32 dspsize_reg = DSPASIZE;
- u32 dspoff_reg = DSPALINOFF;
- u32 dspsurf_reg = DSPASURF;
- u32 dspstride_reg = DSPASTRIDE;
- u32 stride;
- struct psb_drm_dpu_rect *rect = &dpu_info->damage_pipea;
- struct drm_device *dev = dpu_info->dev;
-
- if (plane == MDFLD_PLANEC) {
- pipesrc_reg = PIPECSRC;
- dspsize_reg = DSPCSIZE;
- dspoff_reg = DSPCLINOFF;
- dspsurf_reg = DSPCSURF;
- dspstride_reg = DSPCSTRIDE;
- rect = &dpu_info->damage_pipec;
- }
-
- stride = REG_READ(dspstride_reg);
- /* FIXME: should I do the pipe src update here? */
- REG_WRITE(pipesrc_reg, ((rect->width - 1) << 16) | (rect->height - 1));
- /* Flush plane */
- REG_WRITE(dspsize_reg, ((rect->height - 1) << 16) | (rect->width - 1));
- REG_WRITE(dspoff_reg, ((rect->x * 4) + (rect->y * stride)));
- REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
-
- /*
- * TODO: wait for flip finished and restore the pipesrc reg,
- * or cursor will be show at a wrong position
- */
-}
-
-static void mdfld_dpu_overlay_plane_flush(struct mdfld_dbi_dpu_info *dpu_info,
- mdfld_plane_t plane)
-{
-}
-
-/*
- * TODO: we are still in dbi normal mode now, we will try to use partial
- * mode later.
- */
-static int mdfld_dbi_prepare_cb(struct mdfld_dsi_dbi_output *dbi_output,
- struct mdfld_dbi_dpu_info *dpu_info, int pipe)
-{
- u8 *cb_addr = (u8 *)dbi_output->dbi_cb_addr;
- u32 *index;
- struct psb_drm_dpu_rect *rect = pipe ?
- (&dpu_info->damage_pipec) : (&dpu_info->damage_pipea);
-
- /* FIXME: lock command buffer, this may lead to a deadlock,
- as we already hold the dpu_update_lock */
- if (!spin_trylock(&dbi_output->cb_lock)) {
- DRM_ERROR("lock command buffer failed, try again\n");
- return -EAGAIN;
- }
-
- index = &dbi_output->cb_write;
-
- if (*index) {
- DRM_ERROR("DBI command buffer unclean\n");
- return -EAGAIN;
- }
-
- /* Column address */
- *(cb_addr + ((*index)++)) = set_column_address;
- *(cb_addr + ((*index)++)) = rect->x >> 8;
- *(cb_addr + ((*index)++)) = rect->x;
- *(cb_addr + ((*index)++)) = (rect->x + rect->width - 1) >> 8;
- *(cb_addr + ((*index)++)) = (rect->x + rect->width - 1);
-
- *index = 8;
-
- /* Page address */
- *(cb_addr + ((*index)++)) = set_page_addr;
- *(cb_addr + ((*index)++)) = rect->y >> 8;
- *(cb_addr + ((*index)++)) = rect->y;
- *(cb_addr + ((*index)++)) = (rect->y + rect->height - 1) >> 8;
- *(cb_addr + ((*index)++)) = (rect->y + rect->height - 1);
-
- *index = 16;
-
- /*write memory*/
- *(cb_addr + ((*index)++)) = write_mem_start;
-
- return 0;
-}
-
-static int mdfld_dbi_flush_cb(struct mdfld_dsi_dbi_output *dbi_output, int pipe)
-{
- u32 cmd_phy = dbi_output->dbi_cb_phy;
- u32 *index = &dbi_output->cb_write;
- int reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- struct drm_device *dev = dbi_output->dev;
-
- if (*index == 0 || !dbi_output)
- return 0;
-
- REG_WRITE((MIPIA_CMD_LEN_REG + reg_offset), 0x010505);
- REG_WRITE((MIPIA_CMD_ADD_REG + reg_offset), cmd_phy | 3);
-
- *index = 0;
-
- /* FIXME: unlock command buffer */
- spin_unlock(&dbi_output->cb_lock);
- return 0;
-}
-
-static int mdfld_dpu_update_pipe(struct mdfld_dsi_dbi_output *dbi_output,
- struct mdfld_dbi_dpu_info *dpu_info, int pipe)
-{
- struct drm_device *dev = dbi_output->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- mdfld_plane_t cursor_plane = MDFLD_CURSORA;
- mdfld_plane_t fb_plane = MDFLD_PLANEA;
- mdfld_plane_t overlay_plane = MDFLD_OVERLAYA;
- int ret = 0;
- u32 plane_mask = MDFLD_PIPEA_PLANE_MASK;
-
- /* Damaged rects on this pipe */
- if (pipe) {
- cursor_plane = MDFLD_CURSORC;
- fb_plane = MDFLD_PLANEC;
- overlay_plane = MDFLD_OVERLAYC;
- plane_mask = MDFLD_PIPEC_PLANE_MASK;
- }
-
- /*update cursor which assigned to @pipe*/
- if (dpu_info->pending & (1 << cursor_plane))
- mdfld_dpu_cursor_plane_flush(dpu_info, cursor_plane);
-
- /*update fb which assigned to @pipe*/
- if (dpu_info->pending & (1 << fb_plane))
- mdfld_dpu_fb_plane_flush(dpu_info, fb_plane);
-
- /* TODO: update overlay */
- if (dpu_info->pending & (1 << overlay_plane))
- mdfld_dpu_overlay_plane_flush(dpu_info, overlay_plane);
-
- /* Flush damage area to panel fb */
- if (dpu_info->pending & plane_mask) {
- ret = mdfld_dbi_prepare_cb(dbi_output, dpu_info, pipe);
- /*
- * TODO: remove b_dsr_enable later,
- * added it so that text console could boot smoothly
- */
- /* Clean pending flags on this pipe */
- if (!ret && dev_priv->dsr_enable) {
- dpu_info->pending &= ~plane_mask;
- /* Reset overlay pipe damage rect */
- mdfld_dpu_init_damage(dpu_info, pipe);
- }
- }
- return ret;
-}
-
-static int mdfld_dpu_update_fb(struct drm_device *dev)
-{
- struct drm_crtc *crtc;
- struct psb_intel_crtc *psb_crtc;
- struct mdfld_dsi_dbi_output **dbi_output;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
- bool pipe_updated[2];
- unsigned long irq_flags;
- u32 dpll_reg = MRST_DPLL_A;
- u32 dspcntr_reg = DSPACNTR;
- u32 pipeconf_reg = PIPEACONF;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dspsurf_reg = DSPASURF;
- u32 mipi_state_reg = MIPIA_INTR_STAT_REG;
- u32 reg_offset = 0;
- int pipe;
- int i;
- int ret;
-
- dbi_output = dpu_info->dbi_outputs;
- pipe_updated[0] = pipe_updated[1] = false;
-
- if (!gma_power_begin(dev, true))
- return -EAGAIN;
-
- /* Try to prevent any new damage reports */
- if (!spin_trylock_irqsave(&dpu_info->dpu_update_lock, irq_flags))
- return -EAGAIN;
-
- for (i = 0; i < dpu_info->dbi_output_num; i++) {
- crtc = dbi_output[i]->base.base.crtc;
- psb_crtc = (crtc) ? to_psb_intel_crtc(crtc) : NULL;
-
- pipe = dbi_output[i]->channel_num ? 2 : 0;
-
- if (pipe == 2) {
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- dsplinoff_reg = DSPCLINOFF;
- dspsurf_reg = DSPCSURF;
- reg_offset = MIPIC_REG_OFFSET;
- }
-
- if (!(REG_READ((MIPIA_GEN_FIFO_STAT_REG + reg_offset))
- & (1 << 27)) ||
- !(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
- !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
- !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE)) {
- dev_err(dev->dev,
- "DBI FIFO is busy, DSI %d state %x\n",
- pipe,
- REG_READ(mipi_state_reg + reg_offset));
- continue;
- }
-
- /*
- * If DBI output is in a exclusive state then the pipe
- * change won't be updated
- */
- if (dbi_output[i]->dbi_panel_on &&
- !(dbi_output[i]->mode_flags & MODE_SETTING_ON_GOING) &&
- !(psb_crtc &&
- psb_crtc->mode_flags & MODE_SETTING_ON_GOING) &&
- !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
- ret = mdfld_dpu_update_pipe(dbi_output[i],
- dpu_info, dbi_output[i]->channel_num ? 2 : 0);
- if (!ret)
- pipe_updated[i] = true;
- }
- }
-
- for (i = 0; i < dpu_info->dbi_output_num; i++)
- if (pipe_updated[i])
- mdfld_dbi_flush_cb(dbi_output[i],
- dbi_output[i]->channel_num ? 2 : 0);
-
- spin_unlock_irqrestore(&dpu_info->dpu_update_lock, irq_flags);
- gma_power_end(dev);
- return 0;
-}
-
-static int __mdfld_dbi_exit_dsr(struct mdfld_dsi_dbi_output *dbi_output,
- int pipe)
-{
- struct drm_device *dev = dbi_output->dev;
- struct drm_crtc *crtc = dbi_output->base.base.crtc;
- struct psb_intel_crtc *psb_crtc = (crtc) ? to_psb_intel_crtc(crtc)
- : NULL;
- u32 reg_val;
- u32 dpll_reg = MRST_DPLL_A;
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
- u32 dspbase_reg = DSPABASE;
- u32 dspsurf_reg = DSPASURF;
- u32 reg_offset = 0;
-
- if (!dbi_output)
- return 0;
-
- /* If mode setting on-going, back off */
- if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
- (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING))
- return -EAGAIN;
-
- if (pipe == 2) {
- dpll_reg = MRST_DPLL_A;
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- dspsurf_reg = DSPCSURF;
-
- reg_offset = MIPIC_REG_OFFSET;
- }
-
- if (!gma_power_begin(dev, true))
- return -EAGAIN;
-
- /* Enable DPLL */
- reg_val = REG_READ(dpll_reg);
- if (!(reg_val & DPLL_VCO_ENABLE)) {
-
- if (reg_val & MDFLD_PWR_GATE_EN) {
- reg_val &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, reg_val);
- REG_READ(dpll_reg);
- udelay(500);
- }
-
- reg_val |= DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, reg_val);
- REG_READ(dpll_reg);
- udelay(500);
-
- /* FIXME: add timeout */
- while (!(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK))
- cpu_relax();
- }
-
- /* Enable pipe */
- reg_val = REG_READ(pipeconf_reg);
- if (!(reg_val & PIPEACONF_ENABLE)) {
- reg_val |= PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, reg_val);
- REG_READ(pipeconf_reg);
- udelay(500);
- mdfldWaitForPipeEnable(dev, pipe);
- }
-
- /* Enable plane */
- reg_val = REG_READ(dspcntr_reg);
- if (!(reg_val & DISPLAY_PLANE_ENABLE)) {
- reg_val |= DISPLAY_PLANE_ENABLE;
- REG_WRITE(dspcntr_reg, reg_val);
- REG_READ(dspcntr_reg);
- udelay(500);
- }
-
- gma_power_end(dev);
-
- /* Clean IN_DSR flag */
- dbi_output->mode_flags &= ~MODE_SETTING_IN_DSR;
-
- return 0;
-}
-
-int mdfld_dpu_exit_dsr(struct drm_device *dev)
-{
- struct mdfld_dsi_dbi_output **dbi_output;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
- int i;
- int pipe;
-
- dbi_output = dpu_info->dbi_outputs;
-
- for (i = 0; i < dpu_info->dbi_output_num; i++) {
- /* If this output is not in DSR mode, don't call exit dsr */
- if (dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)
- __mdfld_dbi_exit_dsr(dbi_output[i],
- dbi_output[i]->channel_num ? 2 : 0);
- }
-
- /* Enable TE interrupt */
- for (i = 0; i < dpu_info->dbi_output_num; i++) {
- /* If this output is not in DSR mode, don't call exit dsr */
- pipe = dbi_output[i]->channel_num ? 2 : 0;
- if (dbi_output[i]->dbi_panel_on && pipe) {
- mdfld_disable_te(dev, 0);
- mdfld_enable_te(dev, 2);
- } else if (dbi_output[i]->dbi_panel_on && !pipe) {
- mdfld_disable_te(dev, 2);
- mdfld_enable_te(dev, 0);
- }
- }
- return 0;
-}
-
-static int mdfld_dpu_enter_dsr(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
- struct mdfld_dsi_dbi_output **dbi_output;
- int i;
-
- dbi_output = dpu_info->dbi_outputs;
-
- for (i = 0; i < dpu_info->dbi_output_num; i++) {
- /* If output is off or already in DSR state, don't re-enter */
- if (dbi_output[i]->dbi_panel_on &&
- !(dbi_output[i]->mode_flags & MODE_SETTING_IN_DSR)) {
- mdfld_dsi_dbi_enter_dsr(dbi_output[i],
- dbi_output[i]->channel_num ? 2 : 0);
- }
- }
-
- return 0;
-}
-
-static void mdfld_dbi_dpu_timer_func(unsigned long data)
-{
- struct drm_device *dev = (struct drm_device *)data;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
- struct timer_list *dpu_timer = &dpu_info->dpu_timer;
- unsigned long flags;
-
- if (dpu_info->pending) {
- dpu_info->idle_count = 0;
- /* Update panel fb with damaged area */
- mdfld_dpu_update_fb(dev);
- } else {
- dpu_info->idle_count++;
- }
-
- if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
- mdfld_dpu_enter_dsr(dev);
- /* Stop timer by return */
- return;
- }
-
- spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
- if (!timer_pending(dpu_timer)) {
- dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
- add_timer(dpu_timer);
- }
- spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
-}
-
-void mdfld_dpu_update_panel(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
-
- if (dpu_info->pending) {
- dpu_info->idle_count = 0;
-
- /*update panel fb with damaged area*/
- mdfld_dpu_update_fb(dev);
- } else {
- dpu_info->idle_count++;
- }
-
- if (dpu_info->idle_count >= MDFLD_MAX_IDLE_COUNT) {
- /*enter dsr*/
- mdfld_dpu_enter_dsr(dev);
- }
-}
-
-static int mdfld_dbi_dpu_timer_init(struct drm_device *dev,
- struct mdfld_dbi_dpu_info *dpu_info)
-{
- struct timer_list *dpu_timer = &dpu_info->dpu_timer;
- unsigned long flags;
-
- spin_lock_init(&dpu_info->dpu_timer_lock);
- spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
-
- init_timer(dpu_timer);
-
- dpu_timer->data = (unsigned long)dev;
- dpu_timer->function = mdfld_dbi_dpu_timer_func;
- dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
-
- spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
-
- return 0;
-}
-
-void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info)
-{
- struct timer_list *dpu_timer = &dpu_info->dpu_timer;
- unsigned long flags;
-
- spin_lock_irqsave(&dpu_info->dpu_timer_lock, flags);
- if (!timer_pending(dpu_timer)) {
- dpu_timer->expires = jiffies + MDFLD_DSR_DELAY;
- add_timer(dpu_timer);
- }
- spin_unlock_irqrestore(&dpu_info->dpu_timer_lock, flags);
-}
-
-int mdfld_dbi_dpu_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
-
- if (!dpu_info || IS_ERR(dpu_info)) {
- dpu_info = kzalloc(sizeof(struct mdfld_dbi_dpu_info),
- GFP_KERNEL);
- if (!dpu_info) {
- DRM_ERROR("No memory\n");
- return -ENOMEM;
- }
- dev_priv->dbi_dpu_info = dpu_info;
- }
-
- dpu_info->dev = dev;
-
- dpu_info->cursors[0].size = MDFLD_CURSOR_SIZE;
- dpu_info->cursors[1].size = MDFLD_CURSOR_SIZE;
-
- /*init dpu_update_lock*/
- spin_lock_init(&dpu_info->dpu_update_lock);
-
- /*init dpu refresh timer*/
- mdfld_dbi_dpu_timer_init(dev, dpu_info);
-
- /*init pipe damage area*/
- mdfld_dpu_init_damage(dpu_info, 0);
- mdfld_dpu_init_damage(dpu_info, 2);
-
- return 0;
-}
-
-void mdfld_dbi_dpu_exit(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
-
- if (!dpu_info)
- return;
-
- del_timer_sync(&dpu_info->dpu_timer);
- kfree(dpu_info);
- dev_priv->dbi_dpu_info = NULL;
-}
-
-
diff --git a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h b/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
deleted file mode 100644
index 42367ed..0000000
--- a/drivers/staging/gma500/mdfld_dsi_dbi_dpu.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_DBI_DPU_H__
-#define __MDFLD_DSI_DBI_DPU_H__
-
-#include "mdfld_dsi_dbi.h"
-
-typedef enum {
- MDFLD_PLANEA,
- MDFLD_PLANEC,
- MDFLD_CURSORA,
- MDFLD_CURSORC,
- MDFLD_OVERLAYA,
- MDFLD_OVERLAYC,
- MDFLD_PLANE_NUM,
-} mdfld_plane_t;
-
-#define MDFLD_PIPEA_PLANE_MASK 0x15
-#define MDFLD_PIPEC_PLANE_MASK 0x2A
-
-struct mdfld_cursor_info {
- int x, y;
- int size;
-};
-
-#define MDFLD_CURSOR_SIZE 64
-
-/*
- * enter DSR mode if screen has no update for 2 frames.
- */
-#define MDFLD_MAX_IDLE_COUNT 2
-
-struct mdfld_dbi_dpu_info {
- struct drm_device *dev;
- /* Lock */
- spinlock_t dpu_update_lock;
-
- /* Cursor postion */
- struct mdfld_cursor_info cursors[2];
-
- /* Damaged area for each plane */
- struct psb_drm_dpu_rect damaged_rects[MDFLD_PLANE_NUM];
-
- /* Final damaged area */
- struct psb_drm_dpu_rect damage_pipea;
- struct psb_drm_dpu_rect damage_pipec;
-
- /* Pending */
- u32 pending;
-
- /* DPU timer */
- struct timer_list dpu_timer;
- spinlock_t dpu_timer_lock;
-
- /* DPU idle count */
- u32 idle_count;
-
- /* DSI outputs */
- struct mdfld_dsi_dbi_output *dbi_outputs[2];
- int dbi_output_num;
-};
-
-static inline int mdfld_dpu_region_extent(struct psb_drm_dpu_rect *origin,
- struct psb_drm_dpu_rect *rect)
-{
- int x1, y1, x2, y2;
-
- x1 = origin->x + origin->width;
- y1 = origin->y + origin->height;
-
- x2 = rect->x + rect->width;
- y2 = rect->y + rect->height;
-
- origin->x = min(origin->x, rect->x);
- origin->y = min(origin->y, rect->y);
- origin->width = max(x1, x2) - origin->x;
- origin->height = max(y1, y2) - origin->y;
-
- return 0;
-}
-
-static inline void mdfld_check_boundary(struct mdfld_dbi_dpu_info *dpu_info,
- struct psb_drm_dpu_rect *rect)
-{
- if (rect->x < 0)
- rect->x = 0;
- if (rect->y < 0)
- rect->y = 0;
-
- if (rect->x + rect->width > 864)
- rect->width = 864 - rect->x;
- if (rect->y + rect->height > 480)
- rect->height = 480 - rect->height;
-
- if (!rect->width)
- rect->width = 1;
- if (!rect->height)
- rect->height = 1;
-}
-
-static inline void mdfld_dpu_init_damage(struct mdfld_dbi_dpu_info *dpu_info,
- int pipe)
-{
- struct psb_drm_dpu_rect *rect;
-
- if (pipe == 0)
- rect = &dpu_info->damage_pipea;
- else
- rect = &dpu_info->damage_pipec;
-
- rect->x = 864;
- rect->y = 480;
- rect->width = -864;
- rect->height = -480;
-}
-
-extern int mdfld_dsi_dbi_dsr_off(struct drm_device *dev,
- struct psb_drm_dpu_rect *rect);
-extern int mdfld_dbi_dpu_report_damage(struct drm_device *dev,
- mdfld_plane_t plane,
- struct psb_drm_dpu_rect *rect);
-extern int mdfld_dbi_dpu_report_fullscreen_damage(struct drm_device *dev);
-extern int mdfld_dpu_exit_dsr(struct drm_device *dev);
-extern void mdfld_dbi_dpu_timer_start(struct mdfld_dbi_dpu_info *dpu_info);
-extern int mdfld_dbi_dpu_init(struct drm_device *dev);
-extern void mdfld_dbi_dpu_exit(struct drm_device *dev);
-extern void mdfld_dpu_update_panel(struct drm_device *dev);
-
-#endif /*__MDFLD_DSI_DBI_DPU_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.c b/drivers/staging/gma500/mdfld_dsi_dpi.c
deleted file mode 100644
index e685f12..0000000
--- a/drivers/staging/gma500/mdfld_dsi_dpi.c
+++ /dev/null
@@ -1,805 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-
-static void mdfld_wait_for_HS_DATA_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
- int timeout = 0;
-
- if (pipe == 2)
- gen_fifo_stat_reg += MIPIC_REG_OFFSET;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_DATA_FULL)) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- dev_warn(dev->dev, "MIPI: HS Data FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_HS_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
- int timeout = 0;
-
- if (pipe == 2)
- gen_fifo_stat_reg += MIPIC_REG_OFFSET;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && (REG_READ(gen_fifo_stat_reg) & DSI_FIFO_GEN_HS_CTRL_FULL)) {
- udelay(100);
- timeout++;
- }
- if (timeout == 20000)
- dev_warn(dev->dev, "MIPI: HS CMD FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_DPI_CTRL_FIFO(struct drm_device *dev, u32 pipe)
-{
- u32 gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
- int timeout = 0;
-
- if (pipe == 2)
- gen_fifo_stat_reg += MIPIC_REG_OFFSET;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && ((REG_READ(gen_fifo_stat_reg) & DPI_FIFO_EMPTY)
- != DPI_FIFO_EMPTY)) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- dev_warn(dev->dev, "MIPI: DPI FIFO was never cleared!\n");
-}
-
-static void mdfld_wait_for_SPL_PKG_SENT(struct drm_device *dev, u32 pipe)
-{
- u32 intr_stat_reg = MIPIA_INTR_STAT_REG;
- int timeout = 0;
-
- if (pipe == 2)
- intr_stat_reg += MIPIC_REG_OFFSET;
-
- udelay(500);
-
- /* This will time out after approximately 2+ seconds */
- while ((timeout < 20000) && (!(REG_READ(intr_stat_reg) & DSI_INTR_STATE_SPL_PKG_SENT))) {
- udelay(100);
- timeout++;
- }
-
- if (timeout == 20000)
- dev_warn(dev->dev, "MIPI: SPL_PKT_SENT_INTERRUPT was not sent successfully!\n");
-}
-
-
-/* ************************************************************************* *\
- * FUNCTION: mdfld_dsi_tpo_ic_init
- *
- * DESCRIPTION: This function is called only by mrst_dsi_mode_set and
- * restore_display_registers. since this function does not
- * acquire the mutex, it is important that the calling function
- * does!
-\* ************************************************************************* */
-void mdfld_dsi_tpo_ic_init(struct mdfld_dsi_config *dsi_config, u32 pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- u32 dcsChannelNumber = dsi_config->channel_num;
- u32 gen_data_reg = MIPIA_HS_GEN_DATA_REG;
- u32 gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
- u32 gen_ctrl_val = GEN_LONG_WRITE;
-
- if (pipe == 2) {
- gen_data_reg = HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
- gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
- }
-
- gen_ctrl_val |= dcsChannelNumber << DCS_CHANNEL_NUMBER_POS;
-
- /* Flip page order */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00008036);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
- /* 0xF0 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5af0);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* Write protection key */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5af1);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xFC */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x005a5afc);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xB7 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x770000b7);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000044);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x05 << WORD_COUNTS_POS));
-
- /* 0xB6 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000a0ab6);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-
- /* 0xF2 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x081010f2);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x4a070708);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000000c5);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xF8 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x024003f8);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x01030a04);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x0e020220);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000004);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x0d << WORD_COUNTS_POS));
-
- /* 0xE2 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x398fc3e2);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x0000916f);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x06 << WORD_COUNTS_POS));
-
- /* 0xB0 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000000b0);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x02 << WORD_COUNTS_POS));
-
- /* 0xF4 */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x240242f4);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x78ee2002);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2a071050);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x507fee10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x10300710);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x14 << WORD_COUNTS_POS));
-
- /* 0xBA */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x19fe07ba);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x101c0a31);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000010);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xBB */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x28ff07bb);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x24280a31);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000034);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x09 << WORD_COUNTS_POS));
-
- /* 0xFB */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535d05fb);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1b1a2130);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x221e180e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x131d2120);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535d0508);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1c1a2131);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x231f160d);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x111b2220);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x535c2008);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1f1d2433);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2c251a10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2c34372d);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000023);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
- /* 0xFA */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x525c0bfa);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1c1c232f);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x2623190e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x18212625);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x545d0d0e);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1e1d2333);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x26231a10);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x1a222725);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x545d280f);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x21202635);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x31292013);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x31393d33);
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x00000029);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x31 << WORD_COUNTS_POS));
-
- /* Set DM */
- mdfld_wait_for_HS_DATA_FIFO(dev, pipe);
- REG_WRITE(gen_data_reg, 0x000100f7);
- mdfld_wait_for_HS_CTRL_FIFO(dev, pipe);
- REG_WRITE(gen_ctrl_reg, gen_ctrl_val | (0x03 << WORD_COUNTS_POS));
-}
-
-static u16 mdfld_dsi_dpi_to_byte_clock_count(int pixel_clock_count,
- int num_lane, int bpp)
-{
- return (u16)((pixel_clock_count * bpp) / (num_lane * 8));
-}
-
-/*
- * Calculate the dpi time basing on a given drm mode @mode
- * return 0 on success.
- * FIXME: I was using proposed mode value for calculation, may need to
- * use crtc mode values later
- */
-int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
- struct mdfld_dsi_dpi_timing *dpi_timing,
- int num_lane, int bpp)
-{
- int pclk_hsync, pclk_hfp, pclk_hbp, pclk_hactive;
- int pclk_vsync, pclk_vfp, pclk_vbp, pclk_vactive;
-
- if(!mode || !dpi_timing) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- pclk_hactive = mode->hdisplay;
- pclk_hfp = mode->hsync_start - mode->hdisplay;
- pclk_hsync = mode->hsync_end - mode->hsync_start;
- pclk_hbp = mode->htotal - mode->hsync_end;
-
- pclk_vactive = mode->vdisplay;
- pclk_vfp = mode->vsync_start - mode->vdisplay;
- pclk_vsync = mode->vsync_end - mode->vsync_start;
- pclk_vbp = mode->vtotal - mode->vsync_end;
-
- /*
- * byte clock counts were calculated by following formula
- * bclock_count = pclk_count * bpp / num_lane / 8
- */
- dpi_timing->hsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hsync, num_lane, bpp);
- dpi_timing->hbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hbp, num_lane, bpp);
- dpi_timing->hfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hfp, num_lane, bpp);
- dpi_timing->hactive_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_hactive, num_lane, bpp);
- dpi_timing->vsync_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vsync, num_lane, bpp);
- dpi_timing->vbp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vbp, num_lane, bpp);
- dpi_timing->vfp_count = mdfld_dsi_dpi_to_byte_clock_count(pclk_vfp, num_lane, bpp);
-
- return 0;
-}
-
-void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- int lane_count = dsi_config->lane_count;
- struct mdfld_dsi_dpi_timing dpi_timing;
- struct drm_display_mode *mode = dsi_config->mode;
- u32 val = 0;
-
- /*un-ready device*/
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
-
- /*init dsi adapter before kicking off*/
- REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
-
- /*enable all interrupts*/
- REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
-
-
- /*set up func_prg*/
- val |= lane_count;
- val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
-
- switch(dsi_config->bpp) {
- case 16:
- val |= DSI_DPI_COLOR_FORMAT_RGB565;
- break;
- case 18:
- val |= DSI_DPI_COLOR_FORMAT_RGB666;
- break;
- case 24:
- val |= DSI_DPI_COLOR_FORMAT_RGB888;
- break;
- default:
- DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
- }
- REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
-
- REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset),
- (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
- REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
-
- /*max value: 20 clock cycles of txclkesc*/
- REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
-
- /*min 21 txclkesc, max: ffffh*/
- REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
-
- REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
-
- /*set DPI timing registers*/
- mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
-
- REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-
- REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
-
- /*min: 7d0 max: 4e20*/
- REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
-
- /*set up video mode*/
- val = 0;
- val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
- REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
-
- REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
-
- REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
-
- /*TODO: figure out how to setup these registers*/
- REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
-
- REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
- /*set device ready*/
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
-}
-
-void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output, int pipe)
-{
- struct drm_device *dev = output->dev;
- u32 reg_offset = 0;
-
- if(output->panel_on)
- return;
-
- if(pipe)
- reg_offset = MIPIC_REG_OFFSET;
-
- /* clear special packet sent bit */
- if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
- REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
- }
-
- /*send turn on package*/
- REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
-
- /*wait for SPL_PKG_SENT interrupt*/
- mdfld_wait_for_SPL_PKG_SENT(dev, pipe);
-
- if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
- REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
- }
-
- output->panel_on = 1;
-
- /* FIXME the following is disabled to WA the X slow start issue for TMD panel */
- /* if(pipe == 2) */
- /* dev_priv->dpi_panel_on2 = true; */
- /* else if (pipe == 0) */
- /* dev_priv->dpi_panel_on = true; */
-}
-
-static void mdfld_dsi_dpi_shut_down(struct mdfld_dsi_dpi_output *output, int pipe)
-{
- struct drm_device *dev = output->dev;
- u32 reg_offset = 0;
-
- /*if output is on, or mode setting didn't happen, ignore this*/
- if((!output->panel_on) || output->first_boot) {
- output->first_boot = 0;
- return;
- }
-
- if(pipe)
- reg_offset = MIPIC_REG_OFFSET;
-
- /* Wait for dpi fifo to empty */
- mdfld_wait_for_DPI_CTRL_FIFO(dev, pipe);
-
- /* Clear the special packet interrupt bit if set */
- if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
- REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
- }
-
- if(REG_READ(MIPIA_DPI_CONTROL_REG + reg_offset) == DSI_DPI_CTRL_HS_SHUTDOWN) {
- dev_warn(dev->dev, "try to send the same package again, abort!");
- goto shutdown_out;
- }
-
- REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
-
-shutdown_out:
- output->panel_on = 0;
- output->first_boot = 0;
-
- /* FIXME the following is disabled to WA the X slow start issue for TMD panel */
- /* if(pipe == 2) */
- /* dev_priv->dpi_panel_on2 = false; */
- /* else if (pipe == 0) */
- /* dev_priv->dpi_panel_on = false; */
- /* #ifdef CONFIG_PM_RUNTIME*/
- /* if (drm_psb_ospm && !enable_gfx_rtpm) { */
- /* pm_runtime_allow(&gpDrmDevice->pdev->dev); */
- /* schedule_delayed_work(&dev_priv->rtpm_work, 30 * 1000); */
- /* } */
- /*if (enable_gfx_rtpm) */
- /* pm_schedule_suspend(&dev->pdev->dev, gfxrtdelay); */
- /* #endif */
-}
-
-void mdfld_dsi_dpi_set_power(struct drm_encoder *encoder, bool on)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
- int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 mipi_reg = MIPI;
- u32 pipeconf_reg = PIPEACONF;
-
- if(pipe) {
- mipi_reg = MIPI_C;
- pipeconf_reg = PIPECCONF;
- }
-
- /* Start up display island if it was shutdown */
- if (!gma_power_begin(dev, true))
- return;
-
- if(on) {
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID){
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- } else {
- /* Enable mipi port */
- REG_WRITE(mipi_reg, (REG_READ(mipi_reg) | (1 << 31)));
- REG_READ(mipi_reg);
-
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- mdfld_dsi_tpo_ic_init(dsi_config, pipe);
- }
-
- if(pipe == 2) {
- dev_priv->dpi_panel_on2 = true;
- }
- else {
- dev_priv->dpi_panel_on = true;
- }
-
- } else {
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- } else {
- mdfld_dsi_dpi_shut_down(dpi_output, pipe);
- /* Disable mipi port */
- REG_WRITE(mipi_reg, (REG_READ(mipi_reg) & ~(1<<31)));
- REG_READ(mipi_reg);
- }
-
- if(pipe == 2)
- dev_priv->dpi_panel_on2 = false;
- else
- dev_priv->dpi_panel_on = false;
- }
- gma_power_end(dev);
-}
-
-void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode)
-{
- dev_dbg(encoder->dev->dev, "DPMS %s\n",
- (mode == DRM_MODE_DPMS_ON ? "on":"off"));
-
- if (mode == DRM_MODE_DPMS_ON)
- mdfld_dsi_dpi_set_power(encoder, true);
- else {
- mdfld_dsi_dpi_set_power(encoder, false);
-#if 0 /* FIXME */
-#ifdef CONFIG_PM_RUNTIME
- if (enable_gfx_rtpm)
- pm_schedule_suspend(&gpDrmDevice->pdev->dev, gfxrtdelay);
-#endif
-#endif
- }
-}
-
-bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_display_mode *fixed_mode = dsi_config->fixed_mode;
-
- if(fixed_mode) {
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
- adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- }
-
- return true;
-}
-
-void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder)
-{
- mdfld_dsi_dpi_set_power(encoder, false);
-}
-
-void mdfld_dsi_dpi_commit(struct drm_encoder *encoder)
-{
- mdfld_dsi_dpi_set_power(encoder, true);
-}
-
-void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dpi_output *dpi_output = MDFLD_DSI_DPI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config = mdfld_dsi_encoder_get_config(dsi_encoder);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = mdfld_dsi_encoder_get_pipe(dsi_encoder);
-
- u32 pipeconf_reg = PIPEACONF;
- u32 dspcntr_reg = DSPACNTR;
- u32 mipi_reg = MIPI;
- u32 reg_offset = 0;
-
- u32 pipeconf = dev_priv->pipeconf;
- u32 dspcntr = dev_priv->dspcntr;
- u32 mipi = MIPI_PORT_EN | PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
-
- dev_dbg(dev->dev, "set mode %dx%d on pipe %d\n",
- mode->hdisplay, mode->vdisplay, pipe);
-
- if(pipe) {
- pipeconf_reg = PIPECCONF;
- dspcntr_reg = DSPCCNTR;
- mipi_reg = MIPI_C;
- reg_offset = MIPIC_REG_OFFSET;
- } else {
- mipi |= 2;
- }
-
- if (!gma_power_begin(dev, true))
- return;
-
- /* Set up mipi port FIXME: do at init time */
- REG_WRITE(mipi_reg, mipi);
- REG_READ(mipi_reg);
-
- /* Set up DSI controller DPI interface */
- mdfld_dsi_dpi_controller_init(dsi_config, pipe);
-
- if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
- /* Turn on DPI interface */
- mdfld_dsi_dpi_turn_on(dpi_output, pipe);
- }
-
- /* Set up pipe */
- REG_WRITE(pipeconf_reg, pipeconf);
- REG_READ(pipeconf_reg);
-
- /* Set up display plane */
- REG_WRITE(dspcntr_reg, dspcntr);
- REG_READ(dspcntr_reg);
-
- msleep(20); /* FIXME: this should wait for vblank */
-
- dev_dbg(dev->dev, "State %x, power %d\n",
- REG_READ(MIPIA_INTR_STAT_REG + reg_offset),
- dpi_output->panel_on);
-
- if (mdfld_get_panel_type(dev, pipe) != TMD_VID) {
- /* Init driver ic */
- mdfld_dsi_tpo_ic_init(dsi_config, pipe);
- /* Init backlight */
- mdfld_dsi_brightness_init(dsi_config, pipe);
- }
- gma_power_end(dev);
-}
-
-
-/*
- * Init DSI DPI encoder.
- * Allocate an mdfld_dsi_encoder and attach it to given @dsi_connector
- * return pointer of newly allocated DPI encoder, NULL on error
- */
-struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- struct panel_funcs *p_funcs)
-{
- struct mdfld_dsi_dpi_output *dpi_output = NULL;
- struct mdfld_dsi_config *dsi_config;
- struct drm_connector *connector = NULL;
- struct drm_encoder *encoder = NULL;
- struct drm_display_mode *fixed_mode = NULL;
- int pipe;
- u32 data;
- int ret;
-
- if (!dsi_connector || !p_funcs) {
- WARN_ON(1);
- return NULL;
- }
-
- dsi_config = mdfld_dsi_get_config(dsi_connector);
- pipe = dsi_connector->pipe;
-
- /* Panel hard-reset */
- if (p_funcs->reset) {
- ret = p_funcs->reset(pipe);
- if (ret) {
- DRM_ERROR("Panel %d hard-reset failed\n", pipe);
- return NULL;
- }
- }
-
- /* Panel drvIC init */
- if (p_funcs->drv_ic_init)
- p_funcs->drv_ic_init(dsi_config, pipe);
-
- /* Panel power mode detect */
- ret = mdfld_dsi_get_power_mode(dsi_config,
- &data,
- MDFLD_DSI_LP_TRANSMISSION);
- if (ret) {
- DRM_ERROR("Panel %d get power mode failed\n", pipe);
- dsi_connector->status = connector_status_disconnected;
- } else {
- DRM_INFO("pipe %d power mode 0x%x\n", pipe, data);
- dsi_connector->status = connector_status_connected;
- }
-
- dpi_output = kzalloc(sizeof(struct mdfld_dsi_dpi_output), GFP_KERNEL);
- if(!dpi_output) {
- dev_err(dev->dev, "No memory for dsi_dpi_output\n");
- return NULL;
- }
-
- if(dsi_connector->pipe)
- dpi_output->panel_on = 0;
- else
- dpi_output->panel_on = 0;
-
- dpi_output->dev = dev;
- dpi_output->p_funcs = p_funcs;
- dpi_output->first_boot = 1;
-
- /* Get fixed mode */
- dsi_config = mdfld_dsi_get_config(dsi_connector);
- fixed_mode = dsi_config->fixed_mode;
-
- /* Create drm encoder object */
- connector = &dsi_connector->base.base;
- encoder = &dpi_output->base.base;
- /*
- * On existing hardware this will be a panel of some form,
- * if future devices also have HDMI bridges this will need
- * revisiting
- */
- drm_encoder_init(dev,
- encoder,
- p_funcs->encoder_funcs,
- DRM_MODE_ENCODER_LVDS);
- drm_encoder_helper_add(encoder,
- p_funcs->encoder_helper_funcs);
-
- /* Attach to given connector */
- drm_mode_connector_attach_encoder(connector, encoder);
-
- /* Set possible crtcs and clones */
- if(dsi_connector->pipe) {
- encoder->possible_crtcs = (1 << 2);
- encoder->possible_clones = (1 << 1);
- } else {
- encoder->possible_crtcs = (1 << 0);
- encoder->possible_clones = (1 << 0);
- }
- return &dpi_output->base;
-}
-
diff --git a/drivers/staging/gma500/mdfld_dsi_dpi.h b/drivers/staging/gma500/mdfld_dsi_dpi.h
deleted file mode 100644
index ed92d45..0000000
--- a/drivers/staging/gma500/mdfld_dsi_dpi.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_DPI_H__
-#define __MDFLD_DSI_DPI_H__
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-
-struct mdfld_dsi_dpi_timing {
- u16 hsync_count;
- u16 hbp_count;
- u16 hfp_count;
- u16 hactive_count;
- u16 vsync_count;
- u16 vbp_count;
- u16 vfp_count;
-};
-
-struct mdfld_dsi_dpi_output {
- struct mdfld_dsi_encoder base;
- struct drm_device *dev;
-
- int panel_on;
- int first_boot;
-
- struct panel_funcs *p_funcs;
-};
-
-#define MDFLD_DSI_DPI_OUTPUT(dsi_encoder) \
- container_of(dsi_encoder, struct mdfld_dsi_dpi_output, base)
-
-extern int mdfld_dsi_dpi_timing_calculation(struct drm_display_mode *mode,
- struct mdfld_dsi_dpi_timing *dpi_timing,
- int num_lane, int bpp);
-extern struct mdfld_dsi_encoder *mdfld_dsi_dpi_init(struct drm_device *dev,
- struct mdfld_dsi_connector *dsi_connector,
- struct panel_funcs *p_funcs);
-
-/* Medfield DPI helper functions */
-extern void mdfld_dsi_dpi_dpms(struct drm_encoder *encoder, int mode);
-extern bool mdfld_dsi_dpi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_prepare(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_commit(struct drm_encoder *encoder);
-extern void mdfld_dsi_dpi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-extern void mdfld_dsi_dpi_turn_on(struct mdfld_dsi_dpi_output *output,
- int pipe);
-extern void mdfld_dsi_dpi_controller_init(struct mdfld_dsi_config *si_config,
- int pipe);
-#endif /*__MDFLD_DSI_DPI_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_output.c b/drivers/staging/gma500/mdfld_dsi_output.c
deleted file mode 100644
index 3f979db..0000000
--- a/drivers/staging/gma500/mdfld_dsi_output.c
+++ /dev/null
@@ -1,1014 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_output.h"
-#include <asm/intel_scu_ipc.h>
-#include "mdfld_dsi_pkg_sender.h"
-#include <linux/pm_runtime.h>
-#include <linux/moduleparam.h>
-
-#define MDFLD_DSI_BRIGHTNESS_MAX_LEVEL 100
-
-static int CABC_control = 1;
-static int LABC_control = 1;
-
-module_param (CABC_control, int, 0644);
-module_param (LABC_control, int, 0644);
-
-/**
- * make these MCS command global
- * we don't need 'movl' everytime we send them.
- * FIXME: these datas were provided by OEM, we should get them from GCT.
- **/
-static u32 mdfld_dbi_mcs_hysteresis[] = {
- 0x42000f57, 0x8c006400, 0xff00bf00, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0x38000aff, 0x82005000, 0xff00ab00, 0xffffffff,
- 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
- 0x000000ff,
-};
-
-static u32 mdfld_dbi_mcs_display_profile[] = {
- 0x50281450, 0x0000c882, 0x00000000, 0x00000000,
- 0x00000000,
-};
-
-static u32 mdfld_dbi_mcs_kbbc_profile[] = {
- 0x00ffcc60, 0x00000000, 0x00000000, 0x00000000,
-};
-
-static u32 mdfld_dbi_mcs_gamma_profile[] = {
- 0x81111158, 0x88888888, 0x88888888,
-};
-
-/*
- * write hysteresis values.
- */
-static void mdfld_dsi_write_hysteresis (struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if(!sender) {
- WARN_ON(1);
- return;
- }
- mdfld_dsi_send_mcs_long_hs(sender,
- mdfld_dbi_mcs_hysteresis,
- 17,
- MDFLD_DSI_SEND_PACKAGE);
-}
-
-/*
- * write display profile values.
- */
-static void mdfld_dsi_write_display_profile(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if(!sender) {
- WARN_ON(1);
- return;
- }
- mdfld_dsi_send_mcs_long_hs(sender,
- mdfld_dbi_mcs_display_profile,
- 5,
- MDFLD_DSI_SEND_PACKAGE);
-}
-
-/*
- * write KBBC profile values.
- */
-static void mdfld_dsi_write_kbbc_profile (struct mdfld_dsi_config * dsi_config, int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if(!sender) {
- WARN_ON(1);
- return;
- }
- mdfld_dsi_send_mcs_long_hs(sender,
- mdfld_dbi_mcs_kbbc_profile,
- 4,
- MDFLD_DSI_SEND_PACKAGE);
-}
-
-/*
- * write gamma setting.
- */
-static void mdfld_dsi_write_gamma_setting (struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if(!sender) {
- WARN_ON(1);
- return;
- }
- mdfld_dsi_send_mcs_long_hs(sender,
- mdfld_dbi_mcs_gamma_profile,
- 3,
- MDFLD_DSI_SEND_PACKAGE);
-}
-
-/*
- * Check and see if the generic control or data buffer is empty and ready.
- */
-void mdfld_dsi_gen_fifo_ready (struct drm_device *dev, u32 gen_fifo_stat_reg, u32 fifo_stat)
-{
- u32 GEN_BF_time_out_count = 0;
-
- /* Check MIPI Adatper command registers */
- for (GEN_BF_time_out_count = 0; GEN_BF_time_out_count < GEN_FB_TIME_OUT; GEN_BF_time_out_count++)
- {
- if ((REG_READ(gen_fifo_stat_reg) & fifo_stat) == fifo_stat)
- break;
- udelay (100);
- }
-
- if (GEN_BF_time_out_count == GEN_FB_TIME_OUT)
- dev_err(dev->dev,
- "mdfld_dsi_gen_fifo_ready, Timeout. gen_fifo_stat_reg = 0x%x. \n",
- gen_fifo_stat_reg);
-}
-
-/*
- * Manage the DSI MIPI keyboard and display brightness.
- * FIXME: this is exported to OSPM code. should work out an specific
- * display interface to OSPM.
- */
-void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender = mdfld_dsi_get_pkg_sender(dsi_config);
- struct drm_device *dev = sender->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 gen_ctrl_val;
-
- if(!sender) {
- WARN_ON(1);
- return;
- }
- /* Set default display backlight value to 85% (0xd8)*/
- mdfld_dsi_send_mcs_short_hs(sender,
- write_display_brightness,
- 0xd8,
- 1,
- MDFLD_DSI_SEND_PACKAGE);
-
- /* Set minimum brightness setting of CABC function to 20% (0x33)*/
- mdfld_dsi_send_mcs_short_hs(sender,
- write_cabc_min_bright,
- 0x33,
- 1,
- MDFLD_DSI_SEND_PACKAGE);
-
- mdfld_dsi_write_hysteresis(dsi_config, pipe);
- mdfld_dsi_write_display_profile (dsi_config, pipe);
- mdfld_dsi_write_kbbc_profile (dsi_config, pipe);
- mdfld_dsi_write_gamma_setting (dsi_config, pipe);
-
- /* Enable backlight or/and LABC */
- gen_ctrl_val = BRIGHT_CNTL_BLOCK_ON | DISPLAY_DIMMING_ON| BACKLIGHT_ON;
- if (LABC_control == 1 || CABC_control == 1)
- gen_ctrl_val |= DISPLAY_DIMMING_ON| DISPLAY_BRIGHTNESS_AUTO | GAMMA_AUTO;
-
- if (LABC_control == 1)
- gen_ctrl_val |= AMBIENT_LIGHT_SENSE_ON;
-
- dev_priv->mipi_ctrl_display = gen_ctrl_val;
-
- mdfld_dsi_send_mcs_short_hs(sender,
- write_ctrl_display,
- (u8)gen_ctrl_val,
- 1,
- MDFLD_DSI_SEND_PACKAGE);
-
- if (CABC_control == 0)
- return;
- mdfld_dsi_send_mcs_short_hs(sender,
- write_ctrl_cabc,
- UI_IMAGE,
- 1,
- MDFLD_DSI_SEND_PACKAGE);
-}
-
-/*
- * Manage the mipi display brightness.
- * TODO: refine this interface later
- */
-void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe, int level)
-{
- struct mdfld_dsi_pkg_sender *sender;
- struct drm_psb_private *dev_priv;
- struct mdfld_dsi_config *dsi_config;
- u32 gen_ctrl_val;
- int p_type;
-
- if (!dev || (pipe != 0 && pipe != 2)) {
- dev_err(dev->dev, "Invalid parameter\n");
- return;
- }
-
- p_type = mdfld_get_panel_type(dev, 0);
-
- dev_priv = dev->dev_private;
-
- if(pipe)
- dsi_config = dev_priv->dsi_configs[1];
- else
- dsi_config = dev_priv->dsi_configs[0];
-
- sender = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if(!sender) {
- WARN_ON(1);
- return;
- }
-
- gen_ctrl_val = ((level * 0xff) / MDFLD_DSI_BRIGHTNESS_MAX_LEVEL) & 0xff;
-
- dev_dbg(dev->dev,
- "pipe = %d, gen_ctrl_val = %d. \n", pipe, gen_ctrl_val);
-
- if(p_type == TMD_VID || p_type == TMD_CMD){
- /* Set display backlight value */
- mdfld_dsi_send_mcs_short_hs(sender,
- tmd_write_display_brightness,
- (u8)gen_ctrl_val,
- 1,
- MDFLD_DSI_SEND_PACKAGE);
- } else {
- /* Set display backlight value */
- mdfld_dsi_send_mcs_short_hs(sender,
- write_display_brightness,
- (u8)gen_ctrl_val,
- 1,
- MDFLD_DSI_SEND_PACKAGE);
-
-
- /* Enable backlight control */
- if (level == 0)
- gen_ctrl_val = 0;
- else
- gen_ctrl_val = dev_priv->mipi_ctrl_display;
-
- mdfld_dsi_send_mcs_short_hs(sender,
- write_ctrl_display,
- (u8)gen_ctrl_val,
- 1,
- MDFLD_DSI_SEND_PACKAGE);
- }
-}
-
-/*
- * shut down DSI controller
- */
-void mdfld_dsi_controller_shutdown(struct mdfld_dsi_config * dsi_config, int pipe)
-{
- struct drm_device * dev;
- u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- int retry = 100;
-
- if (!dsi_config) {
- WARN_ON(1);
- return;
- }
-
- dev = dsi_config->dev;
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- if(!(REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & DSI_DEVICE_READY))
- goto shutdown_out;
-
- /* Send shut down package, clean packet send bit first */
- if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
- REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset),
- (REG_READ(MIPIA_INTR_STAT_REG + reg_offset) | DSI_INTR_STATE_SPL_PKG_SENT));
- }
-
- /*send shut down package in HS*/
- REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_SHUTDOWN);
-
-
- /*
- * make sure shut down is sent.
- * FIXME: add max retry counter
- */
- while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
- retry--;
-
- if(!retry) {
- dev_err(dev->dev, "timeout\n");
- break;
- }
- }
-
- /*sleep 1 ms to ensure shutdown finished*/
- msleep(100);
-
- /*un-ready device*/
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
- (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & ~DSI_DEVICE_READY));
-
-shutdown_out:
- gma_power_end(dev);
-}
-
-void mdfld_dsi_controller_startup(struct mdfld_dsi_config * dsi_config, int pipe)
-{
- struct drm_device * dev;
- u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- int retry = 100;
-
-
- if (!dsi_config) {
- WARN_ON(1);
- return;
- }
-
- dev = dsi_config->dev;
- dev_dbg(dev->dev, "starting up DSI controller on pipe %d...\n", pipe);
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- if((REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) & DSI_DEVICE_READY))
- goto startup_out;
-
- /*if config DPI, turn on DPI interface*/
- if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
- if(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT) {
- REG_WRITE((MIPIA_INTR_STAT_REG + reg_offset), DSI_INTR_STATE_SPL_PKG_SENT);
- }
-
- REG_WRITE((MIPIA_DPI_CONTROL_REG + reg_offset), DSI_DPI_CTRL_HS_TURN_ON);
-
- /*
- * make sure shut down is sent.
- * FIXME: add max retry counter
- */
- while(!(REG_READ(MIPIA_INTR_STAT_REG + reg_offset) & DSI_INTR_STATE_SPL_PKG_SENT)) {
- retry--;
- if(!retry) {
- dev_err(dev->dev, "timeout\n");
- break;
- }
- }
-
- msleep(100);
- }
-
- /*set device ready*/
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset),
- (REG_READ(MIPIA_DEVICE_READY_REG + reg_offset) | DSI_DEVICE_READY));
-
-startup_out:
- gma_power_end(dev);
-}
-
-
-static int mdfld_dsi_get_panel_status(struct mdfld_dsi_config *dsi_config,
- u8 dcs,
- u32 *data,
- u8 transmission)
-{
- struct mdfld_dsi_pkg_sender *sender
- = mdfld_dsi_get_pkg_sender(dsi_config);
-
- if (!sender || !data) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- if (transmission == MDFLD_DSI_HS_TRANSMISSION)
- return mdfld_dsi_read_mcs_hs(sender, dcs, data, 1);
- else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
- return mdfld_dsi_read_mcs_lp(sender, dcs, data, 1);
- else
- return -EINVAL;
-}
-
-int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
- u32 *mode,
- u8 transmission)
-{
- if (!dsi_config || !mode) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_get_panel_status(dsi_config, 0x0a, mode, transmission);
-}
-
-int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
- u32 *result,
- u8 transmission)
-{
- if (!dsi_config || !result) {
- DRM_ERROR("Invalid parameter\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_get_panel_status(dsi_config, 0x0f, result,
- transmission);
-}
-
-/*
- * NOTE: this function was used by OSPM.
- * TODO: will be removed later, should work out display interfaces for OSPM
- */
-void mdfld_dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
-{
- if(!dsi_config || ((pipe != 0) && (pipe != 2))) {
- WARN_ON(1);
- return;
- }
-
- if(dsi_config->type)
- mdfld_dsi_dpi_controller_init(dsi_config, pipe);
- else
- mdfld_dsi_controller_dbi_init(dsi_config, pipe);
-}
-
-static void mdfld_dsi_connector_save(struct drm_connector * connector)
-{
-}
-
-static void mdfld_dsi_connector_restore(struct drm_connector * connector)
-{
-}
-
-static enum drm_connector_status mdfld_dsi_connector_detect(struct drm_connector * connector, bool force)
-{
- struct psb_intel_output *psb_output
- = to_psb_intel_output(connector);
- struct mdfld_dsi_connector *dsi_connector
- = MDFLD_DSI_CONNECTOR(psb_output);
- return dsi_connector->status;
-}
-
-static int mdfld_dsi_connector_set_property(struct drm_connector *connector,
- struct drm_property *property,
- uint64_t value)
-{
- struct drm_encoder *encoder = connector->encoder;
-
- if (!strcmp(property->name, "scaling mode") && encoder) {
- struct psb_intel_crtc * psb_crtc = to_psb_intel_crtc(encoder->crtc);
- bool bTransitionFromToCentered;
- uint64_t curValue;
-
- if (!psb_crtc)
- goto set_prop_error;
-
- switch (value) {
- case DRM_MODE_SCALE_FULLSCREEN:
- break;
- case DRM_MODE_SCALE_NO_SCALE:
- break;
- case DRM_MODE_SCALE_ASPECT:
- break;
- default:
- goto set_prop_error;
- }
-
- if (drm_connector_property_get_value(connector, property, &curValue))
- goto set_prop_error;
-
- if (curValue == value)
- goto set_prop_done;
-
- if (drm_connector_property_set_value(connector, property, value))
- goto set_prop_error;
-
- bTransitionFromToCentered = (curValue == DRM_MODE_SCALE_NO_SCALE) ||
- (value == DRM_MODE_SCALE_NO_SCALE);
-
- if (psb_crtc->saved_mode.hdisplay != 0 &&
- psb_crtc->saved_mode.vdisplay != 0) {
- if (bTransitionFromToCentered) {
- if (!drm_crtc_helper_set_mode(encoder->crtc, &psb_crtc->saved_mode,
- encoder->crtc->x, encoder->crtc->y, encoder->crtc->fb))
- goto set_prop_error;
- } else {
- struct drm_encoder_helper_funcs *pEncHFuncs = encoder->helper_private;
- pEncHFuncs->mode_set(encoder, &psb_crtc->saved_mode,
- &psb_crtc->saved_adjusted_mode);
- }
- }
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- } else if (!strcmp(property->name, "backlight") && encoder) {
- struct drm_psb_private *dev_priv = encoder->dev->dev_private;
- struct backlight_device *psb_bd = dev_priv->backlight_device;
- dev_dbg(encoder->dev->dev, "backlight level = %d\n", (int)value);
- if (drm_connector_property_set_value(connector, property, value))
- goto set_prop_error;
- else {
- dev_dbg(encoder->dev->dev,
- "set brightness to %d", (int)value);
- if (psb_bd) {
- psb_bd->props.brightness = value;
- backlight_update_status(psb_bd);
- }
- }
-#endif
- }
-set_prop_done:
- return 0;
-set_prop_error:
- return -1;
-}
-
-static void mdfld_dsi_connector_destroy(struct drm_connector *connector)
-{
- struct psb_intel_output * psb_output = to_psb_intel_output(connector);
- struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
- struct mdfld_dsi_pkg_sender * sender;
-
- if(!dsi_connector)
- return;
-
- drm_sysfs_connector_remove(connector);
- drm_connector_cleanup(connector);
-
- sender = dsi_connector->pkg_sender;
-
- mdfld_dsi_pkg_sender_destroy(sender);
-
- kfree(dsi_connector);
-}
-
-static int mdfld_dsi_connector_get_modes(struct drm_connector * connector)
-{
- struct psb_intel_output * psb_output = to_psb_intel_output(connector);
- struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
- struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
- struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
- struct drm_display_mode * dup_mode = NULL;
- struct drm_device * dev = connector->dev;
-
- connector->display_info.min_vfreq = 0;
- connector->display_info.max_vfreq = 200;
- connector->display_info.min_hfreq = 0;
- connector->display_info.max_hfreq = 200;
-
- if(fixed_mode) {
- dev_dbg(dev->dev, "fixed_mode %dx%d\n",
- fixed_mode->hdisplay, fixed_mode->vdisplay);
-
- dup_mode = drm_mode_duplicate(dev, fixed_mode);
- drm_mode_probed_add(connector, dup_mode);
- return 1;
- }
- dev_err(dev->dev, "Didn't get any modes!\n");
- return 0;
-}
-
-static int mdfld_dsi_connector_mode_valid(struct drm_connector * connector, struct drm_display_mode * mode)
-{
- struct psb_intel_output * psb_output = to_psb_intel_output(connector);
- struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
- struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
- struct drm_display_mode * fixed_mode = dsi_config->fixed_mode;
-
- dev_dbg(connector->dev->dev, "mode %p, fixed mode %p\n",
- mode, fixed_mode);
-
- if(mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
- if(mode->flags & DRM_MODE_FLAG_INTERLACE)
- return MODE_NO_INTERLACE;
-
- /**
- * FIXME: current DC has no fitting unit, reject any mode setting request
- * will figure out a way to do up-scaling(pannel fitting) later.
- **/
- if(fixed_mode) {
- if(mode->hdisplay != fixed_mode->hdisplay)
- return MODE_PANEL;
-
- if(mode->vdisplay != fixed_mode->vdisplay)
- return MODE_PANEL;
- }
- dev_dbg(connector->dev->dev, "mode ok\n");
-
- return MODE_OK;
-}
-
-static void mdfld_dsi_connector_dpms(struct drm_connector *connector, int mode)
-{
-#ifdef CONFIG_PM_RUNTIME
- struct drm_device * dev = connector->dev;
- struct drm_psb_private * dev_priv = dev->dev_private;
- bool panel_on, panel_on2;
-#endif
- /* First, execute DPMS */
- drm_helper_connector_dpms(connector, mode);
-
-#ifdef CONFIG_PM_RUNTIME
- if(mdfld_panel_dpi(dev)) {
- /* DPI panel */
- panel_on = dev_priv->dpi_panel_on;
- panel_on2 = dev_priv->dpi_panel_on2;
- } else {
- /* DBI panel */
- panel_on = dev_priv->dbi_panel_on;
- panel_on2 = dev_priv->dbi_panel_on2;
- }
-
- /* Then check all display panels + monitors status */
- /* Make sure that the Display (B) sub-system status isn't i3 when
- * R/W the DC register, otherwise "Fabric error" issue would occur
- * during S0i3 state. */
- if(!panel_on && !panel_on2 && !(REG_READ(HDMIB_CONTROL)
- & HDMIB_PORT_EN)) {
- /* Request rpm idle */
- if(dev_priv->rpm_enabled)
- pm_request_idle(&dev->pdev->dev);
- }
- /*
- * if rpm wasn't enabled yet, try to allow it
- * FIXME: won't enable rpm for DPI since DPI
- * CRTC setting is a little messy now.
- * Enable it later!
- */
-#if 0
- if(!dev_priv->rpm_enabled && !mdfld_panel_dpi(dev))
- ospm_runtime_pm_allow(dev);
-#endif
-#endif
-}
-
-static struct drm_encoder *mdfld_dsi_connector_best_encoder(
- struct drm_connector *connector)
-{
- struct psb_intel_output * psb_output = to_psb_intel_output(connector);
- struct mdfld_dsi_connector * dsi_connector = MDFLD_DSI_CONNECTOR(psb_output);
- struct mdfld_dsi_config * dsi_config = mdfld_dsi_get_config(dsi_connector);
- struct mdfld_dsi_encoder * encoder = NULL;
-
- if(dsi_config->type == MDFLD_DSI_ENCODER_DBI)
- encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DBI];
- else if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
- encoder = dsi_config->encoders[MDFLD_DSI_ENCODER_DPI];
-
- dev_dbg(connector->dev->dev, "get encoder %p\n", encoder);
-
- if(!encoder) {
- dev_err(connector->dev->dev,
- "Invalid encoder for type %d\n", dsi_config->type);
- return NULL;
- }
- dsi_config->encoder = encoder;
- return &encoder->base;
-}
-
-/* DSI connector funcs */
-static const struct drm_connector_funcs mdfld_dsi_connector_funcs = {
- .dpms = /*drm_helper_connector_dpms*/mdfld_dsi_connector_dpms,
- .save = mdfld_dsi_connector_save,
- .restore = mdfld_dsi_connector_restore,
- .detect = mdfld_dsi_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .set_property = mdfld_dsi_connector_set_property,
- .destroy = mdfld_dsi_connector_destroy,
-};
-
-/* DSI connector helper funcs */
-static const struct drm_connector_helper_funcs mdfld_dsi_connector_helper_funcs = {
- .get_modes = mdfld_dsi_connector_get_modes,
- .mode_valid = mdfld_dsi_connector_mode_valid,
- .best_encoder = mdfld_dsi_connector_best_encoder,
-};
-
-static int mdfld_dsi_get_default_config(struct drm_device * dev,
- struct mdfld_dsi_config * config, int pipe)
-{
- if(!dev || !config) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- config->bpp = 24;
- config->type = mdfld_panel_dpi(dev);
- config->lane_count = 2;
- config->channel_num = 0;
- /*NOTE: video mode is ignored when type is MDFLD_DSI_ENCODER_DBI*/
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
- config->video_mode = MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE;
- } else {
- config->video_mode = MDFLD_DSI_VIDEO_BURST_MODE;
- }
-
- return 0;
-}
-
-/*
- * Returns the panel fixed mode from configuration.
- */
-struct drm_display_mode *
-mdfld_dsi_get_configuration_mode(struct mdfld_dsi_config * dsi_config, int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- struct drm_display_mode *mode;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
- bool use_gct = false;
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode) {
- dev_err(dev->dev, "Out of memory for mode\n");
- return NULL;
- }
- if (use_gct) {
- dev_dbg(dev->dev, "gct find MIPI panel.\n");
-
- mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
- mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
- mode->hsync_start = mode->hdisplay + \
- ((ti->hsync_offset_hi << 8) | \
- ti->hsync_offset_lo);
- mode->hsync_end = mode->hsync_start + \
- ((ti->hsync_pulse_width_hi << 8) | \
- ti->hsync_pulse_width_lo);
- mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
- ti->hblank_lo);
- mode->vsync_start = \
- mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
- ti->vsync_offset_lo);
- mode->vsync_end = \
- mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
- ti->vsync_pulse_width_lo);
- mode->vtotal = mode->vdisplay + \
- ((ti->vblank_hi << 8) | ti->vblank_lo);
- mode->clock = ti->pixel_clock * 10;
- } else {
- if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
- if (mdfld_get_panel_type(dev, pipe) == TMD_VID) {
- mode->hdisplay = 480;
- mode->vdisplay = 854;
- mode->hsync_start = 487;
- mode->hsync_end = 490;
- mode->htotal = 499;
- mode->vsync_start = 861;
- mode->vsync_end = 865;
- mode->vtotal = 873;
- mode->clock = 33264;
- } else {
- mode->hdisplay = 864;
- mode->vdisplay = 480;
- mode->hsync_start = 873;
- mode->hsync_end = 876;
- mode->htotal = 887;
- mode->vsync_start = 487;
- mode->vsync_end = 490;
- mode->vtotal = 499;
- mode->clock = 33264;
- }
- } else if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
- mode->hdisplay = 864;
- mode->vdisplay = 480;
- mode->hsync_start = 872;
- mode->hsync_end = 876;
- mode->htotal = 884;
- mode->vsync_start = 482;
- mode->vsync_end = 494;
- mode->vtotal = 486;
- mode->clock = 25777;
-
- }
- }
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-int mdfld_dsi_panel_reset(int pipe)
-{
- unsigned gpio;
- int ret = 0;
-
- switch (pipe) {
- case 0:
- gpio = 128;
- break;
- case 2:
- gpio = 34;
- break;
- default:
- DRM_ERROR("Invalid output\n");
- return -EINVAL;
- }
-
- ret = gpio_request(gpio, "gfx");
- if (ret) {
- DRM_ERROR("gpio_rqueset failed\n");
- return ret;
- }
-
- ret = gpio_direction_output(gpio, 1);
- if (ret) {
- DRM_ERROR("gpio_direction_output failed\n");
- goto gpio_error;
- }
-
- gpio_get_value(128);
-
-gpio_error:
- if (gpio_is_valid(gpio))
- gpio_free(gpio);
-
- return ret;
-}
-
-/*
- * MIPI output init
- * @dev drm device
- * @pipe pipe number. 0 or 2
- * @config
- *
- * Do the initialization of a MIPI output, including create DRM mode objects
- * initialization of DSI output on @pipe
- */
-void mdfld_dsi_output_init(struct drm_device *dev,
- int pipe,
- struct mdfld_dsi_config *config,
- struct panel_funcs* p_cmd_funcs,
- struct panel_funcs* p_vid_funcs)
-{
- struct mdfld_dsi_config * dsi_config;
- struct mdfld_dsi_connector * dsi_connector;
- struct psb_intel_output * psb_output;
- struct drm_connector * connector;
- struct mdfld_dsi_encoder * encoder;
- struct drm_psb_private * dev_priv = dev->dev_private;
- struct panel_info dsi_panel_info;
- u32 width_mm, height_mm;
-
- dev_dbg(dev->dev, "init DSI output on pipe %d\n", pipe);
-
- if(!dev || ((pipe != 0) && (pipe != 2))) {
- WARN_ON(1);
- return;
- }
-
- /*create a new connetor*/
- dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL);
- if(!dsi_connector) {
- DRM_ERROR("No memory");
- return;
- }
-
- dsi_connector->pipe = pipe;
-
- /*set DSI config*/
- if(config) {
- dsi_config = config;
- } else {
- dsi_config = kzalloc(sizeof(struct mdfld_dsi_config), GFP_KERNEL);
- if(!dsi_config) {
- dev_err(dev->dev,
- "cannot allocate memory for DSI config\n");
- goto dsi_init_err0;
- }
-
- mdfld_dsi_get_default_config(dev, dsi_config, pipe);
- }
-
- dsi_connector->private = dsi_config;
-
- dsi_config->changed = 1;
- dsi_config->dev = dev;
-
- /* Init fixed mode basing on DSI config type */
- if(dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
- dsi_config->fixed_mode = p_cmd_funcs->get_config_mode(dev);
- if(p_cmd_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
- goto dsi_init_err0;
- } else if(dsi_config->type == MDFLD_DSI_ENCODER_DPI) {
- dsi_config->fixed_mode = p_vid_funcs->get_config_mode(dev);
- if(p_vid_funcs->get_panel_info(dev, pipe, &dsi_panel_info))
- goto dsi_init_err0;
- }
-
- width_mm = dsi_panel_info.width_mm;
- height_mm = dsi_panel_info.height_mm;
-
- dsi_config->mode = dsi_config->fixed_mode;
- dsi_config->connector = dsi_connector;
-
- if(!dsi_config->fixed_mode) {
- dev_err(dev->dev, "No pannel fixed mode was found\n");
- goto dsi_init_err0;
- }
-
- if(pipe && dev_priv->dsi_configs[0]) {
- dsi_config->dvr_ic_inited = 0;
- dev_priv->dsi_configs[1] = dsi_config;
- } else if(pipe == 0) {
- dsi_config->dvr_ic_inited = 1;
- dev_priv->dsi_configs[0] = dsi_config;
- } else {
- dev_err(dev->dev, "Trying to init MIPI1 before MIPI0\n");
- goto dsi_init_err0;
- }
-
- /*init drm connector object*/
- psb_output = &dsi_connector->base;
-
- psb_output->type = (pipe == 0) ? INTEL_OUTPUT_MIPI : INTEL_OUTPUT_MIPI2;
-
- connector = &psb_output->base;
- /* Revisit type if MIPI/HDMI bridges ever appear on Medfield */
- drm_connector_init(dev, connector, &mdfld_dsi_connector_funcs,
- DRM_MODE_CONNECTOR_LVDS);
- drm_connector_helper_add(connector, &mdfld_dsi_connector_helper_funcs);
-
- connector->display_info.subpixel_order = SubPixelHorizontalRGB;
- connector->display_info.width_mm = width_mm;
- connector->display_info.height_mm = height_mm;
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
- /* Attach properties */
- drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property, DRM_MODE_SCALE_FULLSCREEN);
- drm_connector_attach_property(connector, dev_priv->backlight_property, MDFLD_DSI_BRIGHTNESS_MAX_LEVEL);
-
- /* Init DSI package sender on this output */
- if (mdfld_dsi_pkg_sender_init(dsi_connector, pipe)) {
- DRM_ERROR("Package Sender initialization failed on pipe %d\n", pipe);
- goto dsi_init_err0;
- }
-
- /* Init DBI & DPI encoders */
- if (p_cmd_funcs) {
- encoder = mdfld_dsi_dbi_init(dev, dsi_connector, p_cmd_funcs);
- if(!encoder) {
- dev_err(dev->dev, "Create DBI encoder failed\n");
- goto dsi_init_err1;
- }
- encoder->private = dsi_config;
- dsi_config->encoders[MDFLD_DSI_ENCODER_DBI] = encoder;
- }
-
- if(p_vid_funcs) {
- encoder = mdfld_dsi_dpi_init(dev, dsi_connector, p_vid_funcs);
- if(!encoder) {
- dev_err(dev->dev, "Create DPI encoder failed\n");
- goto dsi_init_err1;
- }
- encoder->private = dsi_config;
- dsi_config->encoders[MDFLD_DSI_ENCODER_DPI] = encoder;
- }
-
- drm_sysfs_connector_add(connector);
- return;
-
- /*TODO: add code to destroy outputs on error*/
-dsi_init_err1:
- /*destroy sender*/
- mdfld_dsi_pkg_sender_destroy(dsi_connector->pkg_sender);
-
- drm_connector_cleanup(connector);
- kfree(dsi_config->fixed_mode);
- kfree(dsi_config);
-dsi_init_err0:
- kfree(dsi_connector);
-}
diff --git a/drivers/staging/gma500/mdfld_dsi_output.h b/drivers/staging/gma500/mdfld_dsi_output.h
deleted file mode 100644
index 4699267..0000000
--- a/drivers/staging/gma500/mdfld_dsi_output.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#ifndef __MDFLD_DSI_OUTPUT_H__
-#define __MDFLD_DSI_OUTPUT_H__
-
-#include <linux/backlight.h>
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_edid.h>
-
-#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
-#include "power.h"
-#include "mdfld_output.h"
-
-#include <asm/mrst.h>
-
-
-static inline struct mdfld_dsi_config *
- mdfld_dsi_get_config(struct mdfld_dsi_connector *connector)
-{
- if (!connector)
- return NULL;
- return (struct mdfld_dsi_config *)connector->private;
-}
-
-static inline void *mdfld_dsi_get_pkg_sender(struct mdfld_dsi_config *config)
-{
- struct mdfld_dsi_connector *dsi_connector;
-
- if (!config)
- return NULL;
-
- dsi_connector = config->connector;
-
- if (!dsi_connector)
- return NULL;
-
- return dsi_connector->pkg_sender;
-}
-
-static inline struct mdfld_dsi_config *
- mdfld_dsi_encoder_get_config(struct mdfld_dsi_encoder *encoder)
-{
- if (!encoder)
- return NULL;
- return (struct mdfld_dsi_config *)encoder->private;
-}
-
-static inline struct mdfld_dsi_connector *
- mdfld_dsi_encoder_get_connector(struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_config *config;
-
- if (!encoder)
- return NULL;
-
- config = mdfld_dsi_encoder_get_config(encoder);
- if (!config)
- return NULL;
-
- return config->connector;
-}
-
-static inline void *mdfld_dsi_encoder_get_pkg_sender(
- struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_config *dsi_config;
-
- dsi_config = mdfld_dsi_encoder_get_config(encoder);
- if (!dsi_config)
- return NULL;
-
- return mdfld_dsi_get_pkg_sender(dsi_config);
-}
-
-static inline int mdfld_dsi_encoder_get_pipe(struct mdfld_dsi_encoder *encoder)
-{
- struct mdfld_dsi_connector *connector;
-
- if (!encoder)
- return -1;
-
- connector = mdfld_dsi_encoder_get_connector(encoder);
- if (!connector)
- return -1;
-
- return connector->pipe;
-}
-
-extern void mdfld_dsi_gen_fifo_ready(struct drm_device *dev,
- u32 gen_fifo_stat_reg, u32 fifo_stat);
-extern void mdfld_dsi_brightness_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-extern void mdfld_dsi_brightness_control(struct drm_device *dev, int pipe,
- int level);
-extern void mdfld_dsi_output_init(struct drm_device *dev, int pipe,
- struct mdfld_dsi_config *config,
- struct panel_funcs *p_cmd_funcs,
- struct panel_funcs *p_vid_funcs);
-extern void mdfld_dsi_controller_init(struct mdfld_dsi_config *dsi_config,
- int pipe);
-extern int mdfld_dsi_get_power_mode(struct mdfld_dsi_config *dsi_config,
- u32 *mode,
- u8 transmission);
-extern int mdfld_dsi_get_diagnostic_result(struct mdfld_dsi_config *dsi_config,
- u32 *result,
- u8 transmission);
-extern int mdfld_dsi_panel_reset(int pipe);
-
-#endif /*__MDFLD_DSI_OUTPUT_H__*/
diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.c b/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
deleted file mode 100644
index 9b96a5c..0000000
--- a/drivers/staging/gma500/mdfld_dsi_pkg_sender.c
+++ /dev/null
@@ -1,1484 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include <linux/freezer.h>
-
-#include "mdfld_dsi_output.h"
-#include "mdfld_dsi_pkg_sender.h"
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-
-#define MDFLD_DSI_DBI_FIFO_TIMEOUT 100
-#define MDFLD_DSI_MAX_RETURN_PACKET_SIZE 512
-#define MDFLD_DSI_READ_MAX_COUNT 5000
-
-static const char * const dsi_errors[] = {
- "RX SOT Error",
- "RX SOT Sync Error",
- "RX EOT Sync Error",
- "RX Escape Mode Entry Error",
- "RX LP TX Sync Error",
- "RX HS Receive Timeout Error",
- "RX False Control Error",
- "RX ECC Single Bit Error",
- "RX ECC Multibit Error",
- "RX Checksum Error",
- "RX DSI Data Type Not Recognised",
- "RX DSI VC ID Invalid",
- "TX False Control Error",
- "TX ECC Single Bit Error",
- "TX ECC Multibit Error",
- "TX Checksum Error",
- "TX DSI Data Type Not Recognised",
- "TX DSI VC ID invalid",
- "High Contention",
- "Low contention",
- "DPI FIFO Under run",
- "HS TX Timeout",
- "LP RX Timeout",
- "Turn Around ACK Timeout",
- "ACK With No Error",
- "RX Invalid TX Length",
- "RX Prot Violation",
- "HS Generic Write FIFO Full",
- "LP Generic Write FIFO Full",
- "Generic Read Data Avail",
- "Special Packet Sent",
- "Tearing Effect",
-};
-
-static int wait_for_gen_fifo_empty(struct mdfld_dsi_pkg_sender *sender,
- u32 mask)
-{
- struct drm_device *dev = sender->dev;
- u32 gen_fifo_stat_reg = sender->mipi_gen_fifo_stat_reg;
- int retry = 0xffff;
-
- while (retry--) {
- if ((mask & REG_READ(gen_fifo_stat_reg)) == mask)
- return 0;
- udelay(100);
- }
- dev_err(dev->dev, "fifo is NOT empty 0x%08x\n",
- REG_READ(gen_fifo_stat_reg));
- return -EIO;
-}
-
-static int wait_for_all_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 10) | (1 << 18)
- | (1 << 26) | (1 << 27) | (1 << 28));
-}
-
-static int wait_for_lp_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (1 << 10) | (1 << 26));
-}
-
-static int wait_for_hs_fifos_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (1 << 2) | (1 << 18));
-}
-
-static int wait_for_dbi_fifo_empty(struct mdfld_dsi_pkg_sender *sender)
-{
- return wait_for_gen_fifo_empty(sender, (1 << 27));
-}
-
-static int handle_dsi_error(struct mdfld_dsi_pkg_sender *sender, u32 mask)
-{
- u32 intr_stat_reg = sender->mipi_intr_stat_reg;
- struct drm_device *dev = sender->dev;
-
- switch (mask) {
- case (1 << 0):
- case (1 << 1):
- case (1 << 2):
- case (1 << 3):
- case (1 << 4):
- case (1 << 5):
- case (1 << 6):
- case (1 << 7):
- case (1 << 8):
- case (1 << 9):
- case (1 << 10):
- case (1 << 11):
- case (1 << 12):
- case (1 << 13):
- break;
- case (1 << 14):
- /*wait for all fifo empty*/
- /*wait_for_all_fifos_empty(sender)*/;
- break;
- case (1 << 15):
- break;
- case (1 << 16):
- break;
- case (1 << 17):
- break;
- case (1 << 18):
- case (1 << 19):
- /*wait for contention recovery time*/
- /*mdelay(10);*/
- /*wait for all fifo empty*/
- if (0)
- wait_for_all_fifos_empty(sender);
- break;
- case (1 << 20):
- break;
- case (1 << 21):
- /*wait for all fifo empty*/
- /*wait_for_all_fifos_empty(sender);*/
- break;
- case (1 << 22):
- break;
- case (1 << 23):
- case (1 << 24):
- case (1 << 25):
- case (1 << 26):
- case (1 << 27):
- /* HS Gen fifo full */
- REG_WRITE(intr_stat_reg, mask);
- wait_for_hs_fifos_empty(sender);
- break;
- case (1 << 28):
- /* LP Gen fifo full\n */
- REG_WRITE(intr_stat_reg, mask);
- wait_for_lp_fifos_empty(sender);
- break;
- case (1 << 29):
- case (1 << 30):
- case (1 << 31):
- break;
- }
-
- if (mask & REG_READ(intr_stat_reg))
- dev_warn(dev->dev, "Cannot clean interrupt 0x%08x\n", mask);
-
- return 0;
-}
-
-static int dsi_error_handler(struct mdfld_dsi_pkg_sender *sender)
-{
- struct drm_device *dev = sender->dev;
- u32 intr_stat_reg = sender->mipi_intr_stat_reg;
- u32 mask;
- u32 intr_stat;
- int i;
- int err = 0;
-
- intr_stat = REG_READ(intr_stat_reg);
-
- for (i = 0; i < 32; i++) {
- mask = (0x00000001UL) << i;
- if (intr_stat & mask) {
- dev_dbg(dev->dev, "[DSI]: %s\n", dsi_errors[i]);
- err = handle_dsi_error(sender, mask);
- if (err)
- dev_err(dev->dev, "Cannot handle error\n");
- }
- }
- return err;
-}
-
-static inline int dbi_cmd_sent(struct mdfld_dsi_pkg_sender *sender)
-{
- struct drm_device *dev = sender->dev;
- u32 retry = 0xffff;
- u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
-
- /* Query the command execution status */
- while (retry--) {
- if (!(REG_READ(dbi_cmd_addr_reg) & (1 << 0)))
- break;
- }
-
- if (!retry) {
- dev_err(dev->dev, "Timeout waiting for DBI Command status\n");
- return -EAGAIN;
- }
- return 0;
-}
-
-/*
- * NOTE: this interface is abandoned expect for write_mem_start DCS
- * other DCS are sent via generic pkg interfaces
- */
-static int send_dcs_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- struct drm_device *dev = sender->dev;
- struct mdfld_dsi_dcs_pkg *dcs_pkg = &pkg->pkg.dcs_pkg;
- u32 dbi_cmd_len_reg = sender->mipi_cmd_len_reg;
- u32 dbi_cmd_addr_reg = sender->mipi_cmd_addr_reg;
- u32 cb_phy = sender->dbi_cb_phy;
- u32 index = 0;
- u8 *cb = (u8 *)sender->dbi_cb_addr;
- int i;
- int ret;
-
- if (!sender->dbi_pkg_support) {
- dev_err(dev->dev, "Trying to send DCS on a non DBI output, abort!\n");
- return -ENOTSUPP;
- }
-
- /*wait for DBI fifo empty*/
- wait_for_dbi_fifo_empty(sender);
-
- *(cb + (index++)) = dcs_pkg->cmd;
- if (dcs_pkg->param_num) {
- for (i = 0; i < dcs_pkg->param_num; i++)
- *(cb + (index++)) = *(dcs_pkg->param + i);
- }
-
- REG_WRITE(dbi_cmd_len_reg, (1 + dcs_pkg->param_num));
- REG_WRITE(dbi_cmd_addr_reg,
- (cb_phy << CMD_MEM_ADDR_OFFSET)
- | (1 << 0)
- | ((dcs_pkg->data_src == CMD_DATA_SRC_PIPE) ? (1 << 1) : 0));
-
- ret = dbi_cmd_sent(sender);
- if (ret) {
- dev_err(dev->dev, "command 0x%x not complete\n", dcs_pkg->cmd);
- return -EAGAIN;
- }
- return 0;
-}
-
-static int __send_short_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- struct drm_device *dev = sender->dev;
- u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
- u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
- u32 gen_ctrl_val = 0;
- struct mdfld_dsi_gen_short_pkg *short_pkg = &pkg->pkg.short_pkg;
-
- gen_ctrl_val |= short_pkg->cmd << MCS_COMMANDS_POS;
- gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
- gen_ctrl_val |= pkg->pkg_type;
- gen_ctrl_val |= short_pkg->param << MCS_PARAMETER_POS;
-
- if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
- /* wait for hs fifo empty */
- /* wait_for_hs_fifos_empty(sender); */
- /* Send pkg */
- REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
- } else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
- /* wait_for_lp_fifos_empty(sender); */
- /* Send pkg*/
- REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
- } else {
- dev_err(dev->dev, "Unknown transmission type %d\n",
- pkg->transmission_type);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int __send_long_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- struct drm_device *dev = sender->dev;
- u32 hs_gen_ctrl_reg = sender->mipi_hs_gen_ctrl_reg;
- u32 hs_gen_data_reg = sender->mipi_hs_gen_data_reg;
- u32 lp_gen_ctrl_reg = sender->mipi_lp_gen_ctrl_reg;
- u32 lp_gen_data_reg = sender->mipi_lp_gen_data_reg;
- u32 gen_ctrl_val = 0;
- u32 *dp;
- int i;
- struct mdfld_dsi_gen_long_pkg *long_pkg = &pkg->pkg.long_pkg;
-
- dp = long_pkg->data;
-
- /*
- * Set up word count for long pkg
- * FIXME: double check word count field.
- * currently, using the byte counts of the payload as the word count.
- * ------------------------------------------------------------
- * | DI | WC | ECC| PAYLOAD |CHECKSUM|
- * ------------------------------------------------------------
- */
- gen_ctrl_val |= (long_pkg->len << 2) << WORD_COUNTS_POS;
- gen_ctrl_val |= 0 << DCS_CHANNEL_NUMBER_POS;
- gen_ctrl_val |= pkg->pkg_type;
-
- if (pkg->transmission_type == MDFLD_DSI_HS_TRANSMISSION) {
- /* Wait for hs ctrl and data fifos to be empty */
- /* wait_for_hs_fifos_empty(sender); */
- for (i = 0; i < long_pkg->len; i++)
- REG_WRITE(hs_gen_data_reg, *(dp + i));
- REG_WRITE(hs_gen_ctrl_reg, gen_ctrl_val);
- } else if (pkg->transmission_type == MDFLD_DSI_LP_TRANSMISSION) {
- /* wait_for_lp_fifos_empty(sender); */
- for (i = 0; i < long_pkg->len; i++)
- REG_WRITE(lp_gen_data_reg, *(dp + i));
- REG_WRITE(lp_gen_ctrl_reg, gen_ctrl_val);
- } else {
- dev_err(dev->dev, "Unknown transmission type %d\n",
- pkg->transmission_type);
- return -EINVAL;
- }
-
- return 0;
-
-}
-
-static int send_mcs_short_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- return __send_short_pkg(sender, pkg);
-}
-
-static int send_mcs_long_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- return __send_long_pkg(sender, pkg);
-}
-
-static int send_gen_short_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- return __send_short_pkg(sender, pkg);
-}
-
-static int send_gen_long_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- return __send_long_pkg(sender, pkg);
-}
-
-static int send_pkg_prepare(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- u8 cmd;
- u8 *data;
-
- switch (pkg->pkg_type) {
- case MDFLD_DSI_PKG_DCS:
- cmd = pkg->pkg.dcs_pkg.cmd;
- break;
- case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
- case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
- cmd = pkg->pkg.short_pkg.cmd;
- break;
- case MDFLD_DSI_PKG_MCS_LONG_WRITE:
- data = (u8 *)pkg->pkg.long_pkg.data;
- cmd = *data;
- break;
- default:
- return 0;
- }
-
- /* This prevents other package sending while doing msleep */
- sender->status = MDFLD_DSI_PKG_SENDER_BUSY;
-
- /* Check panel mode v.s. sending command */
- if ((sender->panel_mode & MDFLD_DSI_PANEL_MODE_SLEEP) &&
- cmd != exit_sleep_mode) {
- dev_err(sender->dev->dev,
- "sending 0x%x when panel sleep in\n", cmd);
- sender->status = MDFLD_DSI_PKG_SENDER_FREE;
- return -EINVAL;
- }
-
- /* Wait for 120 milliseconds in case exit_sleep_mode just be sent */
- if (cmd == DCS_ENTER_SLEEP_MODE) {
- /*TODO: replace it with msleep later*/
- mdelay(120);
- }
- return 0;
-}
-
-static int send_pkg_done(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- u8 cmd;
- u8 *data;
-
- switch (pkg->pkg_type) {
- case MDFLD_DSI_PKG_DCS:
- cmd = pkg->pkg.dcs_pkg.cmd;
- break;
- case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
- case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
- cmd = pkg->pkg.short_pkg.cmd;
- break;
- case MDFLD_DSI_PKG_MCS_LONG_WRITE:
- data = (u8 *)pkg->pkg.long_pkg.data;
- cmd = *data;
- break;
- default:
- return 0;
- }
-
- /* Update panel status */
- if (cmd == DCS_ENTER_SLEEP_MODE) {
- sender->panel_mode |= MDFLD_DSI_PANEL_MODE_SLEEP;
- /*TODO: replace it with msleep later*/
- mdelay(120);
- } else if (cmd == DCS_EXIT_SLEEP_MODE) {
- sender->panel_mode &= ~MDFLD_DSI_PANEL_MODE_SLEEP;
- /*TODO: replace it with msleep later*/
- mdelay(120);
- } else if (unlikely(cmd == DCS_SOFT_RESET)) {
- /*TODO: replace it with msleep later*/
- mdelay(5);
- }
- sender->status = MDFLD_DSI_PKG_SENDER_FREE;
- return 0;
-
-}
-
-static int do_send_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- int ret;
-
- if (sender->status == MDFLD_DSI_PKG_SENDER_BUSY) {
- dev_err(sender->dev->dev, "sender is busy\n");
- return -EAGAIN;
- }
-
- ret = send_pkg_prepare(sender, pkg);
- if (ret) {
- dev_err(sender->dev->dev, "send_pkg_prepare error\n");
- return ret;
- }
-
- switch (pkg->pkg_type) {
- case MDFLD_DSI_PKG_DCS:
- ret = send_dcs_pkg(sender, pkg);
- break;
- case MDFLD_DSI_PKG_GEN_SHORT_WRITE_0:
- case MDFLD_DSI_PKG_GEN_SHORT_WRITE_1:
- case MDFLD_DSI_PKG_GEN_SHORT_WRITE_2:
- case MDFLD_DSI_PKG_GEN_READ_0:
- case MDFLD_DSI_PKG_GEN_READ_1:
- case MDFLD_DSI_PKG_GEN_READ_2:
- ret = send_gen_short_pkg(sender, pkg);
- break;
- case MDFLD_DSI_PKG_GEN_LONG_WRITE:
- ret = send_gen_long_pkg(sender, pkg);
- break;
- case MDFLD_DSI_PKG_MCS_SHORT_WRITE_0:
- case MDFLD_DSI_PKG_MCS_SHORT_WRITE_1:
- case MDFLD_DSI_PKG_MCS_READ:
- ret = send_mcs_short_pkg(sender, pkg);
- break;
- case MDFLD_DSI_PKG_MCS_LONG_WRITE:
- ret = send_mcs_long_pkg(sender, pkg);
- break;
- default:
- dev_err(sender->dev->dev, "Invalid pkg type 0x%x\n",
- pkg->pkg_type);
- ret = -EINVAL;
- }
- send_pkg_done(sender, pkg);
- return ret;
-}
-
-static int send_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- int err ;
-
- /* Handle DSI error */
- err = dsi_error_handler(sender);
- if (err) {
- dev_err(sender->dev->dev, "Error handling failed\n");
- err = -EAGAIN;
- goto send_pkg_err;
- }
-
- /* Send pkg */
- err = do_send_pkg(sender, pkg);
- if (err) {
- dev_err(sender->dev->dev, "sent pkg failed\n");
- err = -EAGAIN;
- goto send_pkg_err;
- }
-
- /* FIXME: should I query complete and fifo empty here? */
-send_pkg_err:
- return err;
-}
-
-static struct mdfld_dsi_pkg *pkg_sender_get_pkg_locked(
- struct mdfld_dsi_pkg_sender *sender)
-{
- struct mdfld_dsi_pkg *pkg;
-
- if (list_empty(&sender->free_list)) {
- dev_err(sender->dev->dev, "No free pkg left\n");
- return NULL;
- }
- pkg = list_first_entry(&sender->free_list, struct mdfld_dsi_pkg, entry);
- /* Detach from free list */
- list_del_init(&pkg->entry);
- return pkg;
-}
-
-static void pkg_sender_put_pkg_locked(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg)
-{
- memset(pkg, 0, sizeof(struct mdfld_dsi_pkg));
- INIT_LIST_HEAD(&pkg->entry);
- list_add_tail(&pkg->entry, &sender->free_list);
-}
-
-static int mdfld_dbi_cb_init(struct mdfld_dsi_pkg_sender *sender,
- struct psb_gtt *pg, int pipe)
-{
- unsigned long phys;
- void *virt_addr = NULL;
-
- switch (pipe) {
- case 0:
- /* FIXME: Doesn't this collide with stolen space ? */
- phys = pg->gtt_phys_start - 0x1000;
- break;
- case 2:
- phys = pg->gtt_phys_start - 0x800;
- break;
- default:
- dev_err(sender->dev->dev, "Unsupported channel %d\n", pipe);
- return -EINVAL;
- }
-
- virt_addr = ioremap_nocache(phys, 0x800);
- if (!virt_addr) {
- dev_err(sender->dev->dev, "Map DBI command buffer error\n");
- return -ENOMEM;
- }
- sender->dbi_cb_phy = phys;
- sender->dbi_cb_addr = virt_addr;
- return 0;
-}
-
-static void mdfld_dbi_cb_destroy(struct mdfld_dsi_pkg_sender *sender)
-{
- if (sender && sender->dbi_cb_addr)
- iounmap(sender->dbi_cb_addr);
-}
-
-static void pkg_sender_queue_pkg(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg,
- int delay)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
-
- if (!delay) {
- send_pkg(sender, pkg);
- pkg_sender_put_pkg_locked(sender, pkg);
- } else {
- /* Queue it */
- list_add_tail(&pkg->entry, &sender->pkg_list);
- }
- spin_unlock_irqrestore(&sender->lock, flags);
-}
-
-static void process_pkg_list(struct mdfld_dsi_pkg_sender *sender)
-{
- struct mdfld_dsi_pkg *pkg;
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
-
- while (!list_empty(&sender->pkg_list)) {
- pkg = list_first_entry(&sender->pkg_list,
- struct mdfld_dsi_pkg, entry);
- send_pkg(sender, pkg);
- list_del_init(&pkg->entry);
- pkg_sender_put_pkg_locked(sender, pkg);
- }
-
- spin_unlock_irqrestore(&sender->lock, flags);
-}
-
-static int mdfld_dsi_send_mcs_long(struct mdfld_dsi_pkg_sender *sender,
- u32 *data, u32 len, u8 transmission, int delay)
-{
- struct mdfld_dsi_pkg *pkg;
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
- pkg = pkg_sender_get_pkg_locked(sender);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- if (!pkg) {
- dev_err(sender->dev->dev, "No memory\n");
- return -ENOMEM;
- }
- pkg->pkg_type = MDFLD_DSI_PKG_MCS_LONG_WRITE;
- pkg->transmission_type = transmission;
- pkg->pkg.long_pkg.data = data;
- pkg->pkg.long_pkg.len = len;
- INIT_LIST_HEAD(&pkg->entry);
-
- pkg_sender_queue_pkg(sender, pkg, delay);
- return 0;
-}
-
-static int mdfld_dsi_send_mcs_short(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd, u8 param, u8 param_num,
- u8 transmission,
- int delay)
-{
- struct mdfld_dsi_pkg *pkg;
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
- pkg = pkg_sender_get_pkg_locked(sender);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- if (!pkg) {
- dev_err(sender->dev->dev, "No memory\n");
- return -ENOMEM;
- }
-
- if (param_num) {
- pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_1;
- pkg->pkg.short_pkg.param = param;
- } else {
- pkg->pkg_type = MDFLD_DSI_PKG_MCS_SHORT_WRITE_0;
- pkg->pkg.short_pkg.param = 0;
- }
- pkg->transmission_type = transmission;
- pkg->pkg.short_pkg.cmd = cmd;
- INIT_LIST_HEAD(&pkg->entry);
-
- pkg_sender_queue_pkg(sender, pkg, delay);
- return 0;
-}
-
-static int mdfld_dsi_send_gen_short(struct mdfld_dsi_pkg_sender *sender,
- u8 param0, u8 param1, u8 param_num,
- u8 transmission,
- int delay)
-{
- struct mdfld_dsi_pkg *pkg;
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
- pkg = pkg_sender_get_pkg_locked(sender);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- if (!pkg) {
- dev_err(sender->dev->dev, "No pkg memory\n");
- return -ENOMEM;
- }
-
- switch (param_num) {
- case 0:
- pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_0;
- pkg->pkg.short_pkg.cmd = 0;
- pkg->pkg.short_pkg.param = 0;
- break;
- case 1:
- pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_1;
- pkg->pkg.short_pkg.cmd = param0;
- pkg->pkg.short_pkg.param = 0;
- break;
- case 2:
- pkg->pkg_type = MDFLD_DSI_PKG_GEN_SHORT_WRITE_2;
- pkg->pkg.short_pkg.cmd = param0;
- pkg->pkg.short_pkg.param = param1;
- break;
- }
-
- pkg->transmission_type = transmission;
- INIT_LIST_HEAD(&pkg->entry);
-
- pkg_sender_queue_pkg(sender, pkg, delay);
- return 0;
-}
-
-static int mdfld_dsi_send_gen_long(struct mdfld_dsi_pkg_sender *sender,
- u32 *data, u32 len, u8 transmission, int delay)
-{
- struct mdfld_dsi_pkg *pkg;
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
- pkg = pkg_sender_get_pkg_locked(sender);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- if (!pkg) {
- dev_err(sender->dev->dev, "No pkg memory\n");
- return -ENOMEM;
- }
-
- pkg->pkg_type = MDFLD_DSI_PKG_GEN_LONG_WRITE;
- pkg->transmission_type = transmission;
- pkg->pkg.long_pkg.data = data;
- pkg->pkg.long_pkg.len = len;
-
- INIT_LIST_HEAD(&pkg->entry);
-
- pkg_sender_queue_pkg(sender, pkg, delay);
-
- return 0;
-}
-
-static int __read_panel_data(struct mdfld_dsi_pkg_sender *sender,
- struct mdfld_dsi_pkg *pkg,
- u32 *data,
- u16 len)
-{
- unsigned long flags;
- struct drm_device *dev = sender->dev;
- int i;
- u32 gen_data_reg;
- int retry = MDFLD_DSI_READ_MAX_COUNT;
- u8 transmission = pkg->transmission_type;
-
- /*
- * do reading.
- * 0) send out generic read request
- * 1) polling read data avail interrupt
- * 2) read data
- */
- spin_lock_irqsave(&sender->lock, flags);
-
- REG_WRITE(sender->mipi_intr_stat_reg, 1 << 29);
-
- if ((REG_READ(sender->mipi_intr_stat_reg) & (1 << 29)))
- DRM_ERROR("Can NOT clean read data valid interrupt\n");
-
- /*send out read request*/
- send_pkg(sender, pkg);
-
- pkg_sender_put_pkg_locked(sender, pkg);
-
- /*polling read data avail interrupt*/
- while (retry && !(REG_READ(sender->mipi_intr_stat_reg) & (1 << 29))) {
- udelay(100);
- retry--;
- }
-
- if (!retry) {
- spin_unlock_irqrestore(&sender->lock, flags);
- return -ETIMEDOUT;
- }
-
- REG_WRITE(sender->mipi_intr_stat_reg, (1 << 29));
-
- /*read data*/
- if (transmission == MDFLD_DSI_HS_TRANSMISSION)
- gen_data_reg = sender->mipi_hs_gen_data_reg;
- else if (transmission == MDFLD_DSI_LP_TRANSMISSION)
- gen_data_reg = sender->mipi_lp_gen_data_reg;
- else {
- DRM_ERROR("Unknown transmission");
- spin_unlock_irqrestore(&sender->lock, flags);
- return -EINVAL;
- }
-
- for (i=0; i<len; i++)
- *(data + i) = REG_READ(gen_data_reg);
-
- spin_unlock_irqrestore(&sender->lock, flags);
-
- return 0;
-}
-
-static int mdfld_dsi_read_gen(struct mdfld_dsi_pkg_sender *sender,
- u8 param0,
- u8 param1,
- u8 param_num,
- u32 *data,
- u16 len,
- u8 transmission)
-{
- struct mdfld_dsi_pkg *pkg;
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
-
- pkg = pkg_sender_get_pkg_locked(sender);
-
- spin_unlock_irqrestore(&sender->lock,flags);
-
- if (!pkg) {
- dev_err(sender->dev->dev, "No pkg memory\n");
- return -ENOMEM;
- }
-
- switch (param_num) {
- case 0:
- pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_0;
- pkg->pkg.short_pkg.cmd = 0;
- pkg->pkg.short_pkg.param = 0;
- break;
- case 1:
- pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_1;
- pkg->pkg.short_pkg.cmd = param0;
- pkg->pkg.short_pkg.param = 0;
- break;
- case 2:
- pkg->pkg_type = MDFLD_DSI_PKG_GEN_READ_2;
- pkg->pkg.short_pkg.cmd = param0;
- pkg->pkg.short_pkg.param = param1;
- break;
- }
-
- pkg->transmission_type = transmission;
-
- INIT_LIST_HEAD(&pkg->entry);
-
- return __read_panel_data(sender, pkg, data, len);
-}
-
-static int mdfld_dsi_read_mcs(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd,
- u32 *data,
- u16 len,
- u8 transmission)
-{
- struct mdfld_dsi_pkg *pkg;
- unsigned long flags;
-
- spin_lock_irqsave(&sender->lock, flags);
-
- pkg = pkg_sender_get_pkg_locked(sender);
-
- spin_unlock_irqrestore(&sender->lock, flags);
-
- if (!pkg) {
- dev_err(sender->dev->dev, "No pkg memory\n");
- return -ENOMEM;
- }
-
- pkg->pkg_type = MDFLD_DSI_PKG_MCS_READ;
- pkg->pkg.short_pkg.cmd = cmd;
- pkg->pkg.short_pkg.param = 0;
-
- pkg->transmission_type = transmission;
-
- INIT_LIST_HEAD(&pkg->entry);
-
- return __read_panel_data(sender, pkg, data, len);
-}
-
-void dsi_controller_dbi_init(struct mdfld_dsi_config * dsi_config, int pipe)
-{
- struct drm_device * dev = dsi_config->dev;
- u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- int lane_count = dsi_config->lane_count;
- u32 val = 0;
-
- /*un-ready device*/
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
-
- /*init dsi adapter before kicking off*/
- REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
-
- /*TODO: figure out how to setup these registers*/
- REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
- REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), 0x000a0014);
- REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
- REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000001);
- REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
-
- /*enable all interrupts*/
- REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
- /*max value: 20 clock cycles of txclkesc*/
- REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
- /*min 21 txclkesc, max: ffffh*/
- REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
- /*min: 7d0 max: 4e20*/
- REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
-
- /*set up max return packet size*/
- REG_WRITE((MIPIA_MAX_RETURN_PACK_SIZE_REG + reg_offset),
- MDFLD_DSI_MAX_RETURN_PACKET_SIZE);
-
- /*set up func_prg*/
- val |= lane_count;
- val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
- val |= DSI_DBI_COLOR_FORMAT_OPTION2;
- REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
-
- REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
- REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
-
- REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
- REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
- REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
-}
-
-void dsi_controller_dpi_init(struct mdfld_dsi_config * dsi_config, int pipe)
-{
- struct drm_device * dev = dsi_config->dev;
- u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- int lane_count = dsi_config->lane_count;
- struct mdfld_dsi_dpi_timing dpi_timing;
- struct drm_display_mode * mode = dsi_config->mode;
- u32 val = 0;
-
- /*un-ready device*/
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
-
- /*init dsi adapter before kicking off*/
- REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
-
- /*enable all interrupts*/
- REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
-
- /*set up func_prg*/
- val |= lane_count;
- val |= dsi_config->channel_num << DSI_DPI_VIRT_CHANNEL_OFFSET;
-
- switch(dsi_config->bpp) {
- case 16:
- val |= DSI_DPI_COLOR_FORMAT_RGB565;
- break;
- case 18:
- val |= DSI_DPI_COLOR_FORMAT_RGB666;
- break;
- case 24:
- val |= DSI_DPI_COLOR_FORMAT_RGB888;
- break;
- default:
- DRM_ERROR("unsupported color format, bpp = %d\n", dsi_config->bpp);
- }
-
- REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
-
- REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset),
- (mode->vtotal * mode->htotal * dsi_config->bpp / (8 * lane_count)) & DSI_HS_TX_TIMEOUT_MASK);
- REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff & DSI_LP_RX_TIMEOUT_MASK);
-
- /*max value: 20 clock cycles of txclkesc*/
- REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x14 & DSI_TURN_AROUND_TIMEOUT_MASK);
-
- /*min 21 txclkesc, max: ffffh*/
- REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0xffff & DSI_RESET_TIMER_MASK);
-
- REG_WRITE((MIPIA_DPI_RESOLUTION_REG + reg_offset), mode->vdisplay << 16 | mode->hdisplay);
-
- /*set DPI timing registers*/
- mdfld_dsi_dpi_timing_calculation(mode, &dpi_timing, dsi_config->lane_count, dsi_config->bpp);
-
- REG_WRITE((MIPIA_HSYNC_COUNT_REG + reg_offset), dpi_timing.hsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_HBP_COUNT_REG + reg_offset), dpi_timing.hbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_HFP_COUNT_REG + reg_offset), dpi_timing.hfp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_HACTIVE_COUNT_REG + reg_offset), dpi_timing.hactive_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_VSYNC_COUNT_REG + reg_offset), dpi_timing.vsync_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_VBP_COUNT_REG + reg_offset), dpi_timing.vbp_count & DSI_DPI_TIMING_MASK);
- REG_WRITE((MIPIA_VFP_COUNT_REG + reg_offset), dpi_timing.vfp_count & DSI_DPI_TIMING_MASK);
-
- REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
-
- /*min: 7d0 max: 4e20*/
- REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x000007d0);
-
- /*set up video mode*/
- val = dsi_config->video_mode | DSI_DPI_COMPLETE_LAST_LINE;
- REG_WRITE((MIPIA_VIDEO_MODE_FORMAT_REG + reg_offset), val);
-
- REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000000);
-
- REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
-
- /*TODO: figure out how to setup these registers*/
- REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c3408);
-
- REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset), (0xa << 16) | 0x14);
-
- /*set device ready*/
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
-}
-
-static void dsi_controller_init(struct mdfld_dsi_config * dsi_config, int pipe)
-{
- if (!dsi_config || ((pipe != 0) && (pipe != 2))) {
- DRM_ERROR("Invalid parameters\n");
- return;
- }
-
- if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
- dsi_controller_dpi_init(dsi_config, pipe);
- else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
- dsi_controller_dbi_init(dsi_config, pipe);
- else
- DRM_ERROR("Bad DSI encoder type\n");
-}
-
-void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender)
-{
- process_pkg_list(sender);
-}
-
-int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender,
- u8 dcs, u8 *param, u32 param_num, u8 data_src,
- int delay)
-{
- struct mdfld_dsi_pkg *pkg;
- u32 cb_phy = sender->dbi_cb_phy;
- struct drm_device *dev = sender->dev;
- u32 index = 0;
- u8 *cb = (u8 *)sender->dbi_cb_addr;
- unsigned long flags;
- int retry;
- u8 *dst = NULL;
- u32 len;
-
- if (!sender) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- if (!sender->dbi_pkg_support) {
- dev_err(dev->dev, "No DBI pkg sending on this sender\n");
- return -ENOTSUPP;
- }
-
- if (param_num > MDFLD_MAX_DCS_PARAM) {
- dev_err(dev->dev, "Sender only supports up to %d DCS params\n",
- MDFLD_MAX_DCS_PARAM);
- return -EINVAL;
- }
-
- /*
- * If dcs is write_mem_start, send it directly using DSI adapter
- * interface
- */
- if (dcs == DCS_WRITE_MEM_START) {
- if (!spin_trylock(&sender->lock))
- return -EAGAIN;
-
- /*
- * query whether DBI FIFO is empty,
- * if not wait it becoming empty
- */
- retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
- while (retry &&
- !(REG_READ(sender->mipi_gen_fifo_stat_reg) & (1 << 27))) {
- udelay(500);
- retry--;
- }
-
- /* If DBI FIFO timeout, drop this frame */
- if (!retry) {
- spin_unlock(&sender->lock);
- return 0;
- }
-
- *(cb + (index++)) = write_mem_start;
-
- REG_WRITE(sender->mipi_cmd_len_reg, 1);
- REG_WRITE(sender->mipi_cmd_addr_reg,
- cb_phy | (1 << 0) | (1 << 1));
-
- retry = MDFLD_DSI_DBI_FIFO_TIMEOUT;
- while (retry &&
- (REG_READ(sender->mipi_cmd_addr_reg) & (1 << 0))) {
- udelay(1);
- retry--;
- }
-
- spin_unlock(&sender->lock);
- return 0;
- }
-
- /* Get a free pkg */
- spin_lock_irqsave(&sender->lock, flags);
- pkg = pkg_sender_get_pkg_locked(sender);
- spin_unlock_irqrestore(&sender->lock, flags);
-
- if (!pkg) {
- dev_err(dev->dev, "No packages memory\n");
- return -ENOMEM;
- }
-
- dst = pkg->pkg.dcs_pkg.param;
- memcpy(dst, param, param_num);
-
- pkg->pkg_type = MDFLD_DSI_PKG_DCS;
- pkg->transmission_type = MDFLD_DSI_DCS;
- pkg->pkg.dcs_pkg.cmd = dcs;
- pkg->pkg.dcs_pkg.param_num = param_num;
- pkg->pkg.dcs_pkg.data_src = data_src;
-
- INIT_LIST_HEAD(&pkg->entry);
-
- if (param_num == 0)
- return mdfld_dsi_send_mcs_short_hs(sender, dcs, 0, 0, delay);
- else if (param_num == 1)
- return mdfld_dsi_send_mcs_short_hs(sender, dcs,
- param[0], 1, delay);
- else if (param_num > 1) {
- len = (param_num + 1) / 4;
- if ((param_num + 1) % 4)
- len++;
- return mdfld_dsi_send_mcs_long_hs(sender,
- (u32 *)&pkg->pkg.dcs_pkg, len, delay);
- }
- return 0;
-}
-
-int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd, u8 param, u8 param_num, int delay)
-{
- if (!sender) {
- WARN_ON(1);
- return -EINVAL;
- }
- return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
- MDFLD_DSI_HS_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd, u8 param, u8 param_num, int delay)
-{
- if (!sender) {
- WARN_ON(1);
- return -EINVAL;
- }
- return mdfld_dsi_send_mcs_short(sender, cmd, param, param_num,
- MDFLD_DSI_LP_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
- u32 *data,
- u32 len,
- int delay)
-{
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
- return mdfld_dsi_send_mcs_long(sender, data, len,
- MDFLD_DSI_HS_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
- u32 *data,
- u32 len,
- int delay)
-{
- if (!sender || !data || !len) {
- WARN_ON(1);
- return -EINVAL;
- }
- return mdfld_dsi_send_mcs_long(sender, data, len,
- MDFLD_DSI_LP_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 param0, u8 param1, u8 param_num, int delay)
-{
- if (!sender) {
- WARN_ON(1);
- return -EINVAL;
- }
- return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
- MDFLD_DSI_HS_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 param0, u8 param1, u8 param_num, int delay)
-{
- if (!sender || param_num < 0 || param_num > 2) {
- WARN_ON(1);
- return -EINVAL;
- }
- return mdfld_dsi_send_gen_short(sender, param0, param1, param_num,
- MDFLD_DSI_LP_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
- u32 *data,
- u32 len,
- int delay)
-{
- if (!sender || !data || !len) {
- WARN_ON(1);
- return -EINVAL;
- }
- return mdfld_dsi_send_gen_long(sender, data, len,
- MDFLD_DSI_HS_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
- u32 *data,
- u32 len,
- int delay)
-{
- if (!sender || !data || !len) {
- WARN_ON(1);
- return -EINVAL;
- }
- return mdfld_dsi_send_gen_long(sender, data, len,
- MDFLD_DSI_LP_TRANSMISSION, delay);
-}
-
-int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 param0,
- u8 param1,
- u8 param_num,
- u32 *data,
- u16 len)
-{
- if (!sender || !data || param_num < 0 || param_num > 2
- || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_read_gen(sender, param0, param1, param_num,
- data, len, MDFLD_DSI_HS_TRANSMISSION);
-
-}
-
-int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 param0,
- u8 param1,
- u8 param_num,
- u32 *data,
- u16 len)
-{
- if (!sender || !data || param_num < 0 || param_num > 2
- || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_read_gen(sender, param0, param1, param_num,
- data, len, MDFLD_DSI_LP_TRANSMISSION);
-}
-
-int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd,
- u32 *data,
- u16 len)
-{
- if (!sender || !data || !len) {
- DRM_ERROR("Invalid parameters\n");
- return -EINVAL;
- }
-
- return mdfld_dsi_read_mcs(sender, cmd, data, len,
- MDFLD_DSI_HS_TRANSMISSION);
-}
-
-int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd,
- u32 *data,
- u16 len)
-{
- if (!sender || !data || !len) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- return mdfld_dsi_read_mcs(sender, cmd, data, len,
- MDFLD_DSI_LP_TRANSMISSION);
-}
-
-int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
- int pipe)
-{
- int ret;
- struct mdfld_dsi_pkg_sender *pkg_sender;
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_get_config(dsi_connector);
- struct drm_device *dev = dsi_config->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_gtt *pg = &dev_priv->gtt;
- int i;
- struct mdfld_dsi_pkg *pkg, *tmp;
- u32 mipi_val = 0;
-
- if (!dsi_connector) {
- WARN_ON(1);
- return -EINVAL;
- }
-
- pkg_sender = dsi_connector->pkg_sender;
-
- if (!pkg_sender || IS_ERR(pkg_sender)) {
- pkg_sender = kzalloc(sizeof(struct mdfld_dsi_pkg_sender),
- GFP_KERNEL);
- if (!pkg_sender) {
- dev_err(dev->dev, "Create DSI pkg sender failed\n");
- return -ENOMEM;
- }
-
- dsi_connector->pkg_sender = (void *)pkg_sender;
- }
-
- pkg_sender->dev = dev;
- pkg_sender->dsi_connector = dsi_connector;
- pkg_sender->pipe = pipe;
- pkg_sender->pkg_num = 0;
- pkg_sender->panel_mode = 0;
- pkg_sender->status = MDFLD_DSI_PKG_SENDER_FREE;
-
- /* Init dbi command buffer*/
-
- if (dsi_config->type == MDFLD_DSI_ENCODER_DBI) {
- pkg_sender->dbi_pkg_support = 1;
- ret = mdfld_dbi_cb_init(pkg_sender, pg, pipe);
- if (ret) {
- dev_err(dev->dev, "DBI command buffer map failed\n");
- goto mapping_err;
- }
- }
-
- /* Init regs */
- if (pipe == 0) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPACNTR;
- pkg_sender->pipeconf_reg = PIPEACONF;
- pkg_sender->dsplinoff_reg = DSPALINOFF;
- pkg_sender->dspsurf_reg = DSPASURF;
- pkg_sender->pipestat_reg = PIPEASTAT;
-
- pkg_sender->mipi_intr_stat_reg = MIPIA_INTR_STAT_REG;
- pkg_sender->mipi_lp_gen_data_reg = MIPIA_LP_GEN_DATA_REG;
- pkg_sender->mipi_hs_gen_data_reg = MIPIA_HS_GEN_DATA_REG;
- pkg_sender->mipi_lp_gen_ctrl_reg = MIPIA_LP_GEN_CTRL_REG;
- pkg_sender->mipi_hs_gen_ctrl_reg = MIPIA_HS_GEN_CTRL_REG;
- pkg_sender->mipi_gen_fifo_stat_reg = MIPIA_GEN_FIFO_STAT_REG;
- pkg_sender->mipi_data_addr_reg = MIPIA_DATA_ADD_REG;
- pkg_sender->mipi_data_len_reg = MIPIA_DATA_LEN_REG;
- pkg_sender->mipi_cmd_addr_reg = MIPIA_CMD_ADD_REG;
- pkg_sender->mipi_cmd_len_reg = MIPIA_CMD_LEN_REG;
- } else if (pipe == 2) {
- pkg_sender->dpll_reg = MRST_DPLL_A;
- pkg_sender->dspcntr_reg = DSPCCNTR;
- pkg_sender->pipeconf_reg = PIPECCONF;
- pkg_sender->dsplinoff_reg = DSPCLINOFF;
- pkg_sender->dspsurf_reg = DSPCSURF;
- pkg_sender->pipestat_reg = PIPECSTAT;
-
- pkg_sender->mipi_intr_stat_reg =
- MIPIA_INTR_STAT_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_lp_gen_data_reg =
- MIPIA_LP_GEN_DATA_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_hs_gen_data_reg =
- MIPIA_HS_GEN_DATA_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_lp_gen_ctrl_reg =
- MIPIA_LP_GEN_CTRL_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_hs_gen_ctrl_reg =
- MIPIA_HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_gen_fifo_stat_reg =
- MIPIA_GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_data_addr_reg =
- MIPIA_DATA_ADD_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_data_len_reg =
- MIPIA_DATA_LEN_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_cmd_addr_reg =
- MIPIA_CMD_ADD_REG + MIPIC_REG_OFFSET;
- pkg_sender->mipi_cmd_len_reg =
- MIPIA_CMD_LEN_REG + MIPIC_REG_OFFSET;
- }
-
- /* Init pkg list */
- INIT_LIST_HEAD(&pkg_sender->pkg_list);
- INIT_LIST_HEAD(&pkg_sender->free_list);
-
- spin_lock_init(&pkg_sender->lock);
-
- /* Allocate free pkg pool */
- for (i = 0; i < MDFLD_MAX_PKG_NUM; i++) {
- pkg = kzalloc(sizeof(struct mdfld_dsi_pkg), GFP_KERNEL);
- if (!pkg) {
- dev_err(dev->dev, "Out of memory allocating pkg pool");
- ret = -ENOMEM;
- goto pkg_alloc_err;
- }
- INIT_LIST_HEAD(&pkg->entry);
- list_add_tail(&pkg->entry, &pkg_sender->free_list);
- }
-
- /*
- * For video mode, don't enable DPI timing output here,
- * will init the DPI timing output during mode setting.
- */
- if (dsi_config->type == MDFLD_DSI_ENCODER_DPI)
- mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX;
- else if (dsi_config->type == MDFLD_DSI_ENCODER_DBI)
- mipi_val = PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX
- | TE_TRIGGER_GPIO_PIN;
- else
- DRM_ERROR("Bad DSI encoder type\n");
-
- if (pipe == 0) {
- mipi_val |= 0x2;
- REG_WRITE(MIPI, mipi_val);
- REG_READ(MIPI);
- } else if (pipe == 2) {
- REG_WRITE(MIPI_C, mipi_val);
- REG_READ(MIPI_C);
- }
-
- /*do dsi controller init*/
- dsi_controller_init(dsi_config, pipe);
-
- return 0;
-
-pkg_alloc_err:
- list_for_each_entry_safe(pkg, tmp, &pkg_sender->free_list, entry) {
- list_del(&pkg->entry);
- kfree(pkg);
- }
-
- /* Free mapped command buffer */
- mdfld_dbi_cb_destroy(pkg_sender);
-mapping_err:
- kfree(pkg_sender);
- dsi_connector->pkg_sender = NULL;
- return ret;
-}
-
-void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender)
-{
- struct mdfld_dsi_pkg *pkg, *tmp;
-
- if (!sender || IS_ERR(sender))
- return;
-
- /* Free pkg pool */
- list_for_each_entry_safe(pkg, tmp, &sender->free_list, entry) {
- list_del(&pkg->entry);
- kfree(pkg);
- }
- /* Free pkg list */
- list_for_each_entry_safe(pkg, tmp, &sender->pkg_list, entry) {
- list_del(&pkg->entry);
- kfree(pkg);
- }
- mdfld_dbi_cb_destroy(sender); /* free mapped command buffer */
- kfree(sender);
-}
diff --git a/drivers/staging/gma500/mdfld_dsi_pkg_sender.h b/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
deleted file mode 100644
index f24abc7..0000000
--- a/drivers/staging/gma500/mdfld_dsi_pkg_sender.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jackie Li<yaodong.li@intel.com>
- */
-#ifndef __MDFLD_DSI_PKG_SENDER_H__
-#define __MDFLD_DSI_PKG_SENDER_H__
-
-#include <linux/kthread.h>
-
-#define MDFLD_MAX_DCS_PARAM 8
-#define MDFLD_MAX_PKG_NUM 2048
-
-enum {
- MDFLD_DSI_PKG_DCS,
- MDFLD_DSI_PKG_GEN_SHORT_WRITE_0 = 0x03,
- MDFLD_DSI_PKG_GEN_SHORT_WRITE_1 = 0x13,
- MDFLD_DSI_PKG_GEN_SHORT_WRITE_2 = 0x23,
- MDFLD_DSI_PKG_GEN_READ_0 = 0x04,
- MDFLD_DSI_PKG_GEN_READ_1 = 0x14,
- MDFLD_DSI_PKG_GEN_READ_2 = 0x24,
- MDFLD_DSI_PKG_GEN_LONG_WRITE = 0x29,
- MDFLD_DSI_PKG_MCS_SHORT_WRITE_0 = 0x05,
- MDFLD_DSI_PKG_MCS_SHORT_WRITE_1 = 0x15,
- MDFLD_DSI_PKG_MCS_READ = 0x06,
- MDFLD_DSI_PKG_MCS_LONG_WRITE = 0x39,
-};
-
-enum {
- MDFLD_DSI_LP_TRANSMISSION,
- MDFLD_DSI_HS_TRANSMISSION,
- MDFLD_DSI_DCS,
-};
-
-enum {
- MDFLD_DSI_PANEL_MODE_SLEEP = 0x1,
-};
-
-enum {
- MDFLD_DSI_PKG_SENDER_FREE = 0x0,
- MDFLD_DSI_PKG_SENDER_BUSY = 0x1,
-};
-
-enum {
- MDFLD_DSI_SEND_PACKAGE,
- MDFLD_DSI_QUEUE_PACKAGE,
-};
-
-struct mdfld_dsi_gen_short_pkg {
- u8 cmd;
- u8 param;
-};
-
-struct mdfld_dsi_gen_long_pkg {
- u32 *data;
- u32 len;
-};
-
-struct mdfld_dsi_dcs_pkg {
- u8 cmd;
- u8 param[MDFLD_MAX_DCS_PARAM];
- u32 param_num;
- u8 data_src;
-};
-
-struct mdfld_dsi_pkg {
- u8 pkg_type;
- u8 transmission_type;
-
- union {
- struct mdfld_dsi_gen_short_pkg short_pkg;
- struct mdfld_dsi_gen_long_pkg long_pkg;
- struct mdfld_dsi_dcs_pkg dcs_pkg;
- } pkg;
-
- struct list_head entry;
-};
-
-struct mdfld_dsi_pkg_sender {
- struct drm_device *dev;
- struct mdfld_dsi_connector *dsi_connector;
- u32 status;
-
- u32 panel_mode;
-
- int pipe;
-
- spinlock_t lock;
- struct list_head pkg_list;
- struct list_head free_list;
-
- u32 pkg_num;
-
- int dbi_pkg_support;
-
- u32 dbi_cb_phy;
- void *dbi_cb_addr;
-
- /* Registers */
- u32 dpll_reg;
- u32 dspcntr_reg;
- u32 pipeconf_reg;
- u32 pipestat_reg;
- u32 dsplinoff_reg;
- u32 dspsurf_reg;
-
- u32 mipi_intr_stat_reg;
- u32 mipi_lp_gen_data_reg;
- u32 mipi_hs_gen_data_reg;
- u32 mipi_lp_gen_ctrl_reg;
- u32 mipi_hs_gen_ctrl_reg;
- u32 mipi_gen_fifo_stat_reg;
- u32 mipi_data_addr_reg;
- u32 mipi_data_len_reg;
- u32 mipi_cmd_addr_reg;
- u32 mipi_cmd_len_reg;
-};
-
-/* DCS definitions */
-#define DCS_SOFT_RESET 0x01
-#define DCS_ENTER_SLEEP_MODE 0x10
-#define DCS_EXIT_SLEEP_MODE 0x11
-#define DCS_SET_DISPLAY_OFF 0x28
-#define DCS_SET_DISPLAY_ON 0x29
-#define DCS_SET_COLUMN_ADDRESS 0x2a
-#define DCS_SET_PAGE_ADDRESS 0x2b
-#define DCS_WRITE_MEM_START 0x2c
-#define DCS_SET_TEAR_OFF 0x34
-#define DCS_SET_TEAR_ON 0x35
-
-extern int mdfld_dsi_pkg_sender_init(struct mdfld_dsi_connector *dsi_connector,
- int pipe);
-extern void mdfld_dsi_pkg_sender_destroy(struct mdfld_dsi_pkg_sender *sender);
-extern int mdfld_dsi_send_dcs(struct mdfld_dsi_pkg_sender *sender, u8 dcs,
- u8 *param, u32 param_num, u8 data_src, int delay);
-extern int mdfld_dsi_send_mcs_short_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd, u8 param, u8 param_num, int delay);
-extern int mdfld_dsi_send_mcs_short_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd, u8 param, u8 param_num, int delay);
-extern int mdfld_dsi_send_mcs_long_hs(struct mdfld_dsi_pkg_sender *sender,
- u32 *data, u32 len, int delay);
-extern int mdfld_dsi_send_mcs_long_lp(struct mdfld_dsi_pkg_sender *sender,
- u32 *data, u32 len, int delay);
-extern int mdfld_dsi_send_gen_short_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 param0, u8 param1, u8 param_num, int delay);
-extern int mdfld_dsi_send_gen_short_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 param0, u8 param1, u8 param_num, int delay);
-extern int mdfld_dsi_send_gen_long_hs(struct mdfld_dsi_pkg_sender *sender,
- u32 *data, u32 len, int delay);
-extern int mdfld_dsi_send_gen_long_lp(struct mdfld_dsi_pkg_sender *sender,
- u32 *data, u32 len, int delay);
-
-extern int mdfld_dsi_read_gen_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
-extern int mdfld_dsi_read_gen_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 param0, u8 param1, u8 param_num, u32 *data, u16 len);
-extern int mdfld_dsi_read_mcs_hs(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd, u32 *data, u16 len);
-extern int mdfld_dsi_read_mcs_lp(struct mdfld_dsi_pkg_sender *sender,
- u8 cmd, u32 *data, u16 len);
-
-extern void mdfld_dsi_cmds_kick_out(struct mdfld_dsi_pkg_sender *sender);
-
-#endif /* __MDFLD_DSI_PKG_SENDER_H__ */
diff --git a/drivers/staging/gma500/mdfld_intel_display.c b/drivers/staging/gma500/mdfld_intel_display.c
deleted file mode 100644
index 8eb827e..0000000
--- a/drivers/staging/gma500/mdfld_intel_display.c
+++ /dev/null
@@ -1,1404 +0,0 @@
-/*
- * Copyright © 2006-2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- */
-
-#include "framebuffer.h"
-#include "psb_intel_display.h"
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_dbi_dpu.h"
-
-#include <linux/pm_runtime.h>
-
-#ifdef MIN
-#undef MIN
-#endif
-
-#define MIN(x, y) (((x) < (y)) ? (x) : (y))
-
-/* Hardcoded currently */
-static int ksel = KSEL_CRYSTAL_19;
-
-extern void mdfld_save_display(struct drm_device *dev);
-extern bool gbgfxsuspended;
-
-struct psb_intel_range_t {
- int min, max;
-};
-
-struct mdfld_limit_t {
- struct psb_intel_range_t dot, m, p1;
-};
-
-struct mdfld_intel_clock_t {
- /* given values */
- int n;
- int m1, m2;
- int p1, p2;
- /* derived values */
- int dot;
- int vco;
- int m;
- int p;
-};
-
-
-
-#define COUNT_MAX 0x10000000
-
-void mdfldWaitForPipeDisable(struct drm_device *dev, int pipe)
-{
- int count, temp;
- u32 pipeconf_reg = PIPEACONF;
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- pipeconf_reg = PIPEBCONF;
- break;
- case 2:
- pipeconf_reg = PIPECCONF;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number. \n");
- return;
- }
-
- /* FIXME JLIU7_PO */
- psb_intel_wait_for_vblank(dev);
- return;
-
- /* Wait for for the pipe disable to take effect. */
- for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
- if ((temp & PIPEACONF_PIPE_STATE) == 0)
- break;
- }
-}
-
-void mdfldWaitForPipeEnable(struct drm_device *dev, int pipe)
-{
- int count, temp;
- u32 pipeconf_reg = PIPEACONF;
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- pipeconf_reg = PIPEBCONF;
- break;
- case 2:
- pipeconf_reg = PIPECCONF;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- /* FIXME JLIU7_PO */
- psb_intel_wait_for_vblank(dev);
- return;
-
- /* Wait for for the pipe enable to take effect. */
- for (count = 0; count < COUNT_MAX; count++) {
- temp = REG_READ(pipeconf_reg);
- if ((temp & PIPEACONF_PIPE_STATE) == 1)
- break;
- }
-}
-
-
-static int mdfld_intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
- uint32_t handle,
- uint32_t width, uint32_t height)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t control = CURACNTR;
- uint32_t base = CURABASE;
- uint32_t temp;
- size_t addr = 0;
- struct gtt_range *gt;
- struct drm_gem_object *obj;
- int ret;
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- control = CURBCNTR;
- base = CURBBASE;
- break;
- case 2:
- control = CURCCNTR;
- base = CURCBASE;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number. \n");
- return -EINVAL;
- }
-
-#if 1 /* FIXME_JLIU7 can't enalbe cursorB/C HW issue. need to remove after HW fix */
- if (pipe != 0)
- return 0;
-#endif
- /* if we want to turn of the cursor ignore width and height */
- if (!handle) {
- dev_dbg(dev->dev, "cursor off\n");
- /* turn off the cursor */
- temp = 0;
- temp |= CURSOR_MODE_DISABLE;
-
- if (gma_power_begin(dev, true)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, 0);
- gma_power_end(dev);
- }
- /* Unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = NULL;
- }
- return 0;
- }
-
- /* Currently we only support 64x64 cursors */
- if (width != 64 || height != 64) {
- DRM_ERROR("we currently only support 64x64 cursors\n");
- return -EINVAL;
- }
-
- obj = drm_gem_object_lookup(dev, file_priv, handle);
- if (!obj)
- return -ENOENT;
-
- if (obj->size < width * height * 4) {
- dev_dbg(dev->dev, "buffer is to small\n");
- return -ENOMEM;
- }
-
- gt = container_of(obj, struct gtt_range, gem);
-
- /* Pin the memory into the GTT */
- ret = psb_gtt_pin(gt);
- if (ret) {
- dev_err(dev->dev, "Can not pin down handle 0x%x\n", handle);
- return ret;
- }
-
-
- addr = gt->offset; /* Or resource.start ??? */
-
- psb_intel_crtc->cursor_addr = addr;
-
- temp = 0;
- /* set the pipe for the cursor */
- temp |= (pipe << 28);
- temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
-
- if (gma_power_begin(dev, true)) {
- REG_WRITE(control, temp);
- REG_WRITE(base, addr);
- gma_power_end(dev);
- }
- /* unpin the old GEM object */
- if (psb_intel_crtc->cursor_obj) {
- gt = container_of(psb_intel_crtc->cursor_obj,
- struct gtt_range, gem);
- psb_gtt_unpin(gt);
- drm_gem_object_unreference(psb_intel_crtc->cursor_obj);
- psb_intel_crtc->cursor_obj = obj;
- }
- return 0;
-}
-
-static int mdfld_intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private * dev_priv = (struct drm_psb_private *)dev->dev_private;
- struct mdfld_dbi_dpu_info *dpu_info = dev_priv->dbi_dpu_info;
- struct psb_drm_dpu_rect rect;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- uint32_t pos = CURAPOS;
- uint32_t base = CURABASE;
- uint32_t temp = 0;
- uint32_t addr;
-
- switch (pipe) {
- case 0:
- if (dpu_info) {
- rect.x = x;
- rect.y = y;
-
- mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORA, &rect);
- mdfld_dpu_exit_dsr(dev);
- } else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_0))
- mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_0);
- break;
- case 1:
- pos = CURBPOS;
- base = CURBBASE;
- break;
- case 2:
- if (dpu_info) {
- mdfld_dbi_dpu_report_damage(dev, MDFLD_CURSORC, &rect);
- mdfld_dpu_exit_dsr(dev);
- } else if (!(dev_priv->dsr_fb_update & MDFLD_DSR_CURSOR_2))
- mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_CURSOR_2);
- pos = CURCPOS;
- base = CURCBASE;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number. \n");
- return -EINVAL;
- }
-
-#if 1 /* FIXME_JLIU7 can't enable cursorB/C HW issue. need to remove after HW fix */
- if (pipe != 0)
- return 0;
-#endif
- if (x < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT);
- x = -x;
- }
- if (y < 0) {
- temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT);
- y = -y;
- }
-
- temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT);
- temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT);
-
- addr = psb_intel_crtc->cursor_addr;
-
- if (gma_power_begin(dev, true)) {
- REG_WRITE(pos, temp);
- REG_WRITE(base, addr);
- gma_power_end(dev);
- }
-
- return 0;
-}
-
-const struct drm_crtc_funcs mdfld_intel_crtc_funcs = {
- .cursor_set = mdfld_intel_crtc_cursor_set,
- .cursor_move = mdfld_intel_crtc_cursor_move,
- .gamma_set = psb_intel_crtc_gamma_set,
- .set_config = drm_crtc_helper_set_config,
- .destroy = psb_intel_crtc_destroy,
-};
-
-static struct drm_device globle_dev;
-
-void mdfld__intel_plane_set_alpha(int enable)
-{
- struct drm_device *dev = &globle_dev;
- int dspcntr_reg = DSPACNTR;
- u32 dspcntr;
-
- dspcntr = REG_READ(dspcntr_reg);
-
- if (enable) {
- dspcntr &= ~DISPPLANE_32BPP_NO_ALPHA;
- dspcntr |= DISPPLANE_32BPP;
- } else {
- dspcntr &= ~DISPPLANE_32BPP;
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- }
-
- REG_WRITE(dspcntr_reg, dspcntr);
-}
-
-int mdfld__intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- /* struct drm_i915_master_private *master_priv; */
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_framebuffer *psbfb = to_psb_fb(crtc->fb);
- int pipe = psb_intel_crtc->pipe;
- unsigned long start, offset;
- int dsplinoff = DSPALINOFF;
- int dspsurf = DSPASURF;
- int dspstride = DSPASTRIDE;
- int dspcntr_reg = DSPACNTR;
- u32 dspcntr;
- int ret = 0;
-
- memcpy(&globle_dev, dev, sizeof(struct drm_device));
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- /* no fb bound */
- if (!crtc->fb) {
- dev_err(dev->dev, "No FB bound\n");
- goto psb_intel_pipe_cleaner;
- }
-
- switch (pipe) {
- case 0:
- dsplinoff = DSPALINOFF;
- break;
- case 1:
- dsplinoff = DSPBLINOFF;
- dspsurf = DSPBSURF;
- dspstride = DSPBSTRIDE;
- dspcntr_reg = DSPBCNTR;
- break;
- case 2:
- dsplinoff = DSPCLINOFF;
- dspsurf = DSPCSURF;
- dspstride = DSPCSTRIDE;
- dspcntr_reg = DSPCCNTR;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return -EINVAL;
- }
-
- ret = psb_gtt_pin(psbfb->gtt);
- if (ret < 0)
- goto psb_intel_pipe_set_base_exit;
-
- start = psbfb->gtt->offset;
- offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
-
- REG_WRITE(dspstride, crtc->fb->pitch);
- dspcntr = REG_READ(dspcntr_reg);
- dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
-
- switch (crtc->fb->bits_per_pixel) {
- case 8:
- dspcntr |= DISPPLANE_8BPP;
- break;
- case 16:
- if (crtc->fb->depth == 15)
- dspcntr |= DISPPLANE_15_16BPP;
- else
- dspcntr |= DISPPLANE_16BPP;
- break;
- case 24:
- case 32:
- dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
- break;
- default:
- dev_err(dev->dev, "Unknown color depth\n");
- ret = -EINVAL;
- goto psb_intel_pipe_set_base_exit;
- }
- REG_WRITE(dspcntr_reg, dspcntr);
-
- dev_dbg(dev->dev, "Writing base %08lX %08lX %d %d\n",
- start, offset, x, y);
-
- REG_WRITE(dsplinoff, offset);
- REG_READ(dsplinoff);
- REG_WRITE(dspsurf, start);
- REG_READ(dspsurf);
-
-psb_intel_pipe_cleaner:
- /* If there was a previous display we can now unpin it */
- if (old_fb)
- psb_gtt_unpin(to_psb_fb(old_fb)->gtt);
-
-psb_intel_pipe_set_base_exit:
- gma_power_end(dev);
- return ret;
-}
-
-/**
- * Disable the pipe, plane and pll.
- *
- */
-void mdfld_disable_crtc (struct drm_device *dev, int pipe)
-{
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
- u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
- u32 temp;
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = MDFLD_DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = DSPBSURF;
- pipeconf_reg = PIPEBCONF;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number. \n");
- return;
- }
-
- if (pipe != 1)
- mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
- /* Disable display plane */
- temp = REG_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
- }
-
- /* FIXME_JLIU7 MDFLD_PO revisit */
- /* Wait for vblank for the disable to take effect */
-/* MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev); */
-
- /* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- temp &= ~PIPEACONF_ENABLE;
- temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
- REG_READ(pipeconf_reg);
-
- /* Wait for for the pipe disable to take effect. */
- mdfldWaitForPipeDisable(dev, pipe);
- }
-
- temp = REG_READ(dpll_reg);
- if (temp & DPLL_VCO_ENABLE) {
- if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
- || (pipe == 1)){
- temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
- /* Wait for the clocks to turn off. */
- /* FIXME_MDFLD PO may need more delay */
- udelay(500);
-
- if (!(temp & MDFLD_PWR_GATE_EN)) {
- /* gating power of DPLL */
- REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(5000);
- }
- }
- }
-
-}
-
-/**
- * Sets the power management mode of the pipe and plane.
- *
- * This code should probably grow support for turning the cursor off and back
- * on appropriately at the same time as we're turning the pipe off/on.
- */
-static void mdfld_crtc_dpms(struct drm_crtc *crtc, int mode)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- int pipe = psb_intel_crtc->pipe;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int dspbase_reg = MRST_DSPABASE;
- int pipeconf_reg = PIPEACONF;
- u32 pipestat_reg = PIPEASTAT;
- u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
- u32 pipeconf = dev_priv->pipeconf;
- u32 dspcntr = dev_priv->dspcntr;
- u32 mipi_enable_reg = MIPIA_DEVICE_READY_REG;
- u32 temp;
- bool enabled;
- int timeout = 0;
-
- if (!gma_power_begin(dev, true))
- return;
-
- /* Ignore if system is already in DSR and in suspended state. */
- if(/*gbgfxsuspended */0 && dev_priv->dispstatus == false && mode == 3){
- if(dev_priv->rpm_enabled && pipe == 1){
- // dev_priv->is_mipi_on = false;
- pm_request_idle(&dev->pdev->dev);
- }
- return;
- }else if(mode == 0) {
- //do not need to set gbdispstatus=true in crtc.
- //this will be set in encoder such as mdfld_dsi_dbi_dpms
- //gbdispstatus = true;
- }
-
-/* FIXME_JLIU7 MDFLD_PO replaced w/ the following function */
-/* mdfld_dbi_dpms (struct drm_device *dev, int pipe, bool enabled) */
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- dspbase_reg = MRST_DSPBBASE;
- pipeconf_reg = PIPEBCONF;
- pipeconf = dev_priv->pipeconf1;
- dspcntr = dev_priv->dspcntr1;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- dspbase_reg = MDFLD_DSPCBASE;
- pipeconf_reg = PIPECCONF;
- pipestat_reg = PIPECSTAT;
- pipeconf = dev_priv->pipeconf2;
- dspcntr = dev_priv->dspcntr2;
- gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET;
- mipi_enable_reg = MIPIA_DEVICE_READY_REG + MIPIC_REG_OFFSET;
- break;
- default:
- dev_err(dev->dev, "Illegal Pipe Number.\n");
- return;
- }
-
- /* XXX: When our outputs are all unaware of DPMS modes other than off
- * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
- */
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- case DRM_MODE_DPMS_STANDBY:
- case DRM_MODE_DPMS_SUSPEND:
- /* Enable the DPLL */
- temp = REG_READ(dpll_reg);
-
- if ((temp & DPLL_VCO_ENABLE) == 0) {
- /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
- if (temp & MDFLD_PWR_GATE_EN) {
- temp &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, temp);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- REG_WRITE(dpll_reg, temp | DPLL_VCO_ENABLE);
- REG_READ(dpll_reg);
-
- /**
- * wait for DSI PLL to lock
- * NOTE: only need to poll status of pipe 0 and pipe 1,
- * since both MIPI pipes share the same PLL.
- */
- while ((pipe != 2) && (timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout ++;
- }
- }
-
- /* Enable the plane */
- temp = REG_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
- REG_WRITE(dspcntr_reg,
- temp | DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- }
-
- /* Enable the pipe */
- temp = REG_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) == 0) {
- REG_WRITE(pipeconf_reg, pipeconf);
-
- /* Wait for for the pipe enable to take effect. */
- mdfldWaitForPipeEnable(dev, pipe);
- }
-
- /*workaround for sighting 3741701 Random X blank display*/
- /*perform w/a in video mode only on pipe A or C*/
- if ((pipe == 0 || pipe == 2) &&
- (mdfld_panel_dpi(dev) == true)) {
- REG_WRITE(pipestat_reg, REG_READ(pipestat_reg));
- msleep(100);
- if(PIPE_VBLANK_STATUS & REG_READ(pipestat_reg)) {
- printk(KERN_ALERT "OK");
- } else {
- printk(KERN_ALERT "STUCK!!!!");
- /*shutdown controller*/
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- /*mdfld_dsi_dpi_shut_down(dev, pipe);*/
- REG_WRITE(0xb048, 1);
- msleep(100);
- temp = REG_READ(pipeconf_reg);
- temp &= ~PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
- msleep(100); /*wait for pipe disable*/
- /*printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
- printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));*/
- REG_WRITE(mipi_enable_reg, 0);
- msleep(100);
- printk(KERN_ALERT "70008 is %x\n", REG_READ(0x70008));
- printk(KERN_ALERT "b074 is %x\n", REG_READ(0xb074));
- REG_WRITE(0xb004, REG_READ(0xb004));
- /* try to bring the controller back up again*/
- REG_WRITE(mipi_enable_reg, 1);
- temp = REG_READ(dspcntr_reg);
- REG_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- /*mdfld_dsi_dpi_turn_on(dev, pipe);*/
- REG_WRITE(0xb048, 2);
- msleep(100);
- temp = REG_READ(pipeconf_reg);
- temp |= PIPEACONF_ENABLE;
- REG_WRITE(pipeconf_reg, temp);
- }
- }
-
- psb_intel_crtc_load_lut(crtc);
-
- /* Give the overlay scaler a chance to enable
- if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, true); TODO */
-
- break;
- case DRM_MODE_DPMS_OFF:
- /* Give the overlay scaler a chance to disable
- * if it's on this pipe */
- /* psb_intel_crtc_dpms_video(crtc, FALSE); TODO */
- if (pipe != 1)
- mdfld_dsi_gen_fifo_ready (dev, gen_fifo_stat_reg, HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable display plane */
- temp = REG_READ(dspcntr_reg);
- if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
- REG_WRITE(dspcntr_reg,
- temp & ~DISPLAY_PLANE_ENABLE);
- /* Flush the plane changes */
- REG_WRITE(dspbase_reg, REG_READ(dspbase_reg));
- REG_READ(dspbase_reg);
- }
-
- /* FIXME_JLIU7 MDFLD_PO revisit */
- /* Wait for vblank for the disable to take effect */
-// MDFLD_PO_JLIU7 psb_intel_wait_for_vblank(dev);
-
- /* Next, disable display pipes */
- temp = REG_READ(pipeconf_reg);
- if ((temp & PIPEACONF_ENABLE) != 0) {
- temp &= ~PIPEACONF_ENABLE;
- temp |= PIPECONF_PLANE_OFF | PIPECONF_CURSOR_OFF;
- REG_WRITE(pipeconf_reg, temp);
-// REG_WRITE(pipeconf_reg, 0);
- REG_READ(pipeconf_reg);
-
- /* Wait for for the pipe disable to take effect. */
- mdfldWaitForPipeDisable(dev, pipe);
- }
-
- temp = REG_READ(dpll_reg);
- if (temp & DPLL_VCO_ENABLE) {
- if (((pipe != 1) && !((REG_READ(PIPEACONF) | REG_READ(PIPECCONF)) & PIPEACONF_ENABLE))
- || (pipe == 1)){
- temp &= ~(DPLL_VCO_ENABLE);
- REG_WRITE(dpll_reg, temp);
- REG_READ(dpll_reg);
- /* Wait for the clocks to turn off. */
- /* FIXME_MDFLD PO may need more delay */
- udelay(500);
-#if 0 /* MDFLD_PO_JLIU7 */
- if (!(temp & MDFLD_PWR_GATE_EN)) {
- /* gating power of DPLL */
- REG_WRITE(dpll_reg, temp | MDFLD_PWR_GATE_EN);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(5000);
- }
-#endif /* MDFLD_PO_JLIU7 */
- }
- }
- break;
- }
-
- enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
-
-#if 0 /* JB: Add vblank support later */
- if (enabled)
- dev_priv->vblank_pipe |= (1 << pipe);
- else
- dev_priv->vblank_pipe &= ~(1 << pipe);
-#endif
-
- gma_power_end(dev);
-}
-
-
-#define MDFLD_LIMT_DPLL_19 0
-#define MDFLD_LIMT_DPLL_25 1
-#define MDFLD_LIMT_DPLL_83 2
-#define MDFLD_LIMT_DPLL_100 3
-#define MDFLD_LIMT_DSIPLL_19 4
-#define MDFLD_LIMT_DSIPLL_25 5
-#define MDFLD_LIMT_DSIPLL_83 6
-#define MDFLD_LIMT_DSIPLL_100 7
-
-#define MDFLD_DOT_MIN 19750 /* FIXME_MDFLD JLIU7 need to find out min & max for MDFLD */
-#define MDFLD_DOT_MAX 120000
-#define MDFLD_DPLL_M_MIN_19 113
-#define MDFLD_DPLL_M_MAX_19 155
-#define MDFLD_DPLL_P1_MIN_19 2
-#define MDFLD_DPLL_P1_MAX_19 10
-#define MDFLD_DPLL_M_MIN_25 101
-#define MDFLD_DPLL_M_MAX_25 130
-#define MDFLD_DPLL_P1_MIN_25 2
-#define MDFLD_DPLL_P1_MAX_25 10
-#define MDFLD_DPLL_M_MIN_83 64
-#define MDFLD_DPLL_M_MAX_83 64
-#define MDFLD_DPLL_P1_MIN_83 2
-#define MDFLD_DPLL_P1_MAX_83 2
-#define MDFLD_DPLL_M_MIN_100 64
-#define MDFLD_DPLL_M_MAX_100 64
-#define MDFLD_DPLL_P1_MIN_100 2
-#define MDFLD_DPLL_P1_MAX_100 2
-#define MDFLD_DSIPLL_M_MIN_19 131
-#define MDFLD_DSIPLL_M_MAX_19 175
-#define MDFLD_DSIPLL_P1_MIN_19 3
-#define MDFLD_DSIPLL_P1_MAX_19 8
-#define MDFLD_DSIPLL_M_MIN_25 97
-#define MDFLD_DSIPLL_M_MAX_25 140
-#define MDFLD_DSIPLL_P1_MIN_25 3
-#define MDFLD_DSIPLL_P1_MAX_25 9
-#define MDFLD_DSIPLL_M_MIN_83 33
-#define MDFLD_DSIPLL_M_MAX_83 92
-#define MDFLD_DSIPLL_P1_MIN_83 2
-#define MDFLD_DSIPLL_P1_MAX_83 3
-#define MDFLD_DSIPLL_M_MIN_100 97
-#define MDFLD_DSIPLL_M_MAX_100 140
-#define MDFLD_DSIPLL_P1_MIN_100 3
-#define MDFLD_DSIPLL_P1_MAX_100 9
-
-static const struct mdfld_limit_t mdfld_limits[] = {
- { /* MDFLD_LIMT_DPLL_19 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_19, .max = MDFLD_DPLL_M_MAX_19},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_19, .max = MDFLD_DPLL_P1_MAX_19},
- },
- { /* MDFLD_LIMT_DPLL_25 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_25, .max = MDFLD_DPLL_M_MAX_25},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_25, .max = MDFLD_DPLL_P1_MAX_25},
- },
- { /* MDFLD_LIMT_DPLL_83 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_83, .max = MDFLD_DPLL_M_MAX_83},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_83, .max = MDFLD_DPLL_P1_MAX_83},
- },
- { /* MDFLD_LIMT_DPLL_100 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DPLL_M_MIN_100, .max = MDFLD_DPLL_M_MAX_100},
- .p1 = {.min = MDFLD_DPLL_P1_MIN_100, .max = MDFLD_DPLL_P1_MAX_100},
- },
- { /* MDFLD_LIMT_DSIPLL_19 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_19, .max = MDFLD_DSIPLL_M_MAX_19},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_19, .max = MDFLD_DSIPLL_P1_MAX_19},
- },
- { /* MDFLD_LIMT_DSIPLL_25 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_25, .max = MDFLD_DSIPLL_M_MAX_25},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_25, .max = MDFLD_DSIPLL_P1_MAX_25},
- },
- { /* MDFLD_LIMT_DSIPLL_83 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_83, .max = MDFLD_DSIPLL_M_MAX_83},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_83, .max = MDFLD_DSIPLL_P1_MAX_83},
- },
- { /* MDFLD_LIMT_DSIPLL_100 */
- .dot = {.min = MDFLD_DOT_MIN, .max = MDFLD_DOT_MAX},
- .m = {.min = MDFLD_DSIPLL_M_MIN_100, .max = MDFLD_DSIPLL_M_MAX_100},
- .p1 = {.min = MDFLD_DSIPLL_P1_MIN_100, .max = MDFLD_DSIPLL_P1_MAX_100},
- },
-};
-
-#define MDFLD_M_MIN 21
-#define MDFLD_M_MAX 180
-static const u32 mdfld_m_converts[] = {
-/* M configuration table from 9-bit LFSR table */
- 224, 368, 440, 220, 366, 439, 219, 365, 182, 347, /* 21 - 30 */
- 173, 342, 171, 85, 298, 149, 74, 37, 18, 265, /* 31 - 40 */
- 388, 194, 353, 432, 216, 108, 310, 155, 333, 166, /* 41 - 50 */
- 83, 41, 276, 138, 325, 162, 337, 168, 340, 170, /* 51 - 60 */
- 341, 426, 469, 234, 373, 442, 221, 110, 311, 411, /* 61 - 70 */
- 461, 486, 243, 377, 188, 350, 175, 343, 427, 213, /* 71 - 80 */
- 106, 53, 282, 397, 354, 227, 113, 56, 284, 142, /* 81 - 90 */
- 71, 35, 273, 136, 324, 418, 465, 488, 500, 506, /* 91 - 100 */
- 253, 126, 63, 287, 399, 455, 483, 241, 376, 444, /* 101 - 110 */
- 478, 495, 503, 251, 381, 446, 479, 239, 375, 443, /* 111 - 120 */
- 477, 238, 119, 315, 157, 78, 295, 147, 329, 420, /* 121 - 130 */
- 210, 105, 308, 154, 77, 38, 275, 137, 68, 290, /* 131 - 140 */
- 145, 328, 164, 82, 297, 404, 458, 485, 498, 249, /* 141 - 150 */
- 380, 190, 351, 431, 471, 235, 117, 314, 413, 206, /* 151 - 160 */
- 103, 51, 25, 12, 262, 387, 193, 96, 48, 280, /* 161 - 170 */
- 396, 198, 99, 305, 152, 76, 294, 403, 457, 228, /* 171 - 180 */
-};
-
-static const struct mdfld_limit_t *mdfld_limit(struct drm_crtc *crtc)
-{
- const struct mdfld_limit_t *limit = NULL;
- struct drm_device *dev = crtc->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI)
- || psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_MIPI2)) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_19];
- else if (ksel == KSEL_BYPASS_25)
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_25];
- else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_83];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
- limit = &mdfld_limits[MDFLD_LIMT_DSIPLL_100];
- } else if (psb_intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_19];
- else if (ksel == KSEL_BYPASS_25)
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_25];
- else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_83];
- else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 || dev_priv->core_freq == 200))
- limit = &mdfld_limits[MDFLD_LIMT_DPLL_100];
- } else {
- limit = NULL;
- dev_err(dev->dev, "mdfld_limit Wrong display type.\n");
- }
-
- return limit;
-}
-
-/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */
-static void mdfld_clock(int refclk, struct mdfld_intel_clock_t *clock)
-{
- clock->dot = (refclk * clock->m) / clock->p1;
-}
-
-/**
- * Returns a set of divisors for the desired target clock with the given refclk,
- * or FALSE. Divisor values are the actual divisors for
- */
-static bool
-mdfldFindBestPLL(struct drm_crtc *crtc, int target, int refclk,
- struct mdfld_intel_clock_t *best_clock)
-{
- struct mdfld_intel_clock_t clock;
- const struct mdfld_limit_t *limit = mdfld_limit(crtc);
- int err = target;
-
- memset(best_clock, 0, sizeof(*best_clock));
-
- for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) {
- for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max;
- clock.p1++) {
- int this_err;
-
- mdfld_clock(refclk, &clock);
-
- this_err = abs(clock.dot - target);
- if (this_err < err) {
- *best_clock = clock;
- err = this_err;
- }
- }
- }
- return err != target;
-}
-
-/**
- * Return the pipe currently connected to the panel fitter,
- * or -1 if the panel fitter is not present or not in use
- */
-static int mdfld_panel_fitter_pipe(struct drm_device *dev)
-{
- u32 pfit_control;
-
- pfit_control = REG_READ(PFIT_CONTROL);
-
- /* See if the panel fitter is in use */
- if ((pfit_control & PFIT_ENABLE) == 0)
- return -1;
- return (pfit_control >> 29) & 3;
-}
-
-static int mdfld_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode,
- int x, int y,
- struct drm_framebuffer *old_fb)
-{
- struct drm_device *dev = crtc->dev;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct drm_psb_private *dev_priv = dev->dev_private;
- int pipe = psb_intel_crtc->pipe;
- int fp_reg = MRST_FPA0;
- int dpll_reg = MRST_DPLL_A;
- int dspcntr_reg = DSPACNTR;
- int pipeconf_reg = PIPEACONF;
- int htot_reg = HTOTAL_A;
- int hblank_reg = HBLANK_A;
- int hsync_reg = HSYNC_A;
- int vtot_reg = VTOTAL_A;
- int vblank_reg = VBLANK_A;
- int vsync_reg = VSYNC_A;
- int dspsize_reg = DSPASIZE;
- int dsppos_reg = DSPAPOS;
- int pipesrc_reg = PIPEASRC;
- u32 *pipeconf = &dev_priv->pipeconf;
- u32 *dspcntr = &dev_priv->dspcntr;
- int refclk = 0;
- int clk_n = 0, clk_p2 = 0, clk_byte = 1, clk = 0, m_conv = 0, clk_tmp = 0;
- struct mdfld_intel_clock_t clock;
- bool ok;
- u32 dpll = 0, fp = 0;
- bool is_crt = false, is_lvds = false, is_tv = false;
- bool is_mipi = false, is_mipi2 = false, is_hdmi = false;
- struct drm_mode_config *mode_config = &dev->mode_config;
- struct psb_intel_output *psb_intel_output = NULL;
- uint64_t scalingType = DRM_MODE_SCALE_FULLSCREEN;
- struct drm_encoder *encoder;
- struct drm_connector *connector;
- int timeout = 0;
-
- dev_dbg(dev->dev, "pipe = 0x%x \n", pipe);
-
- switch (pipe) {
- case 0:
- break;
- case 1:
- fp_reg = FPB0;
- dpll_reg = DPLL_B;
- dspcntr_reg = DSPBCNTR;
- pipeconf_reg = PIPEBCONF;
- htot_reg = HTOTAL_B;
- hblank_reg = HBLANK_B;
- hsync_reg = HSYNC_B;
- vtot_reg = VTOTAL_B;
- vblank_reg = VBLANK_B;
- vsync_reg = VSYNC_B;
- dspsize_reg = DSPBSIZE;
- dsppos_reg = DSPBPOS;
- pipesrc_reg = PIPEBSRC;
- pipeconf = &dev_priv->pipeconf1;
- dspcntr = &dev_priv->dspcntr1;
- fp_reg = MDFLD_DPLL_DIV0;
- dpll_reg = MDFLD_DPLL_B;
- break;
- case 2:
- dpll_reg = MRST_DPLL_A;
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- htot_reg = HTOTAL_C;
- hblank_reg = HBLANK_C;
- hsync_reg = HSYNC_C;
- vtot_reg = VTOTAL_C;
- vblank_reg = VBLANK_C;
- vsync_reg = VSYNC_C;
- dspsize_reg = DSPCSIZE;
- dsppos_reg = DSPCPOS;
- pipesrc_reg = PIPECSRC;
- pipeconf = &dev_priv->pipeconf2;
- dspcntr = &dev_priv->dspcntr2;
- break;
- default:
- DRM_ERROR("Illegal Pipe Number. \n");
- return 0;
- }
-
- dev_dbg(dev->dev, "adjusted_hdisplay = %d\n",
- adjusted_mode->hdisplay);
- dev_dbg(dev->dev, "adjusted_vdisplay = %d\n",
- adjusted_mode->vdisplay);
- dev_dbg(dev->dev, "adjusted_hsync_start = %d\n",
- adjusted_mode->hsync_start);
- dev_dbg(dev->dev, "adjusted_hsync_end = %d\n",
- adjusted_mode->hsync_end);
- dev_dbg(dev->dev, "adjusted_htotal = %d\n",
- adjusted_mode->htotal);
- dev_dbg(dev->dev, "adjusted_vsync_start = %d\n",
- adjusted_mode->vsync_start);
- dev_dbg(dev->dev, "adjusted_vsync_end = %d\n",
- adjusted_mode->vsync_end);
- dev_dbg(dev->dev, "adjusted_vtotal = %d\n",
- adjusted_mode->vtotal);
- dev_dbg(dev->dev, "adjusted_clock = %d\n",
- adjusted_mode->clock);
- dev_dbg(dev->dev, "hdisplay = %d\n",
- mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay = %d\n",
- mode->vdisplay);
-
- if (!gma_power_begin(dev, true))
- return 0;
-
- memcpy(&psb_intel_crtc->saved_mode, mode, sizeof(struct drm_display_mode));
- memcpy(&psb_intel_crtc->saved_adjusted_mode, adjusted_mode, sizeof(struct drm_display_mode));
-
- list_for_each_entry(connector, &mode_config->connector_list, head) {
-
- encoder = connector->encoder;
-
- if(!encoder)
- continue;
-
- if (encoder->crtc != crtc)
- continue;
-
- psb_intel_output = to_psb_intel_output(connector);
-
- dev_dbg(dev->dev, "output->type = 0x%x \n", psb_intel_output->type);
-
- switch (psb_intel_output->type) {
- case INTEL_OUTPUT_LVDS:
- is_lvds = true;
- break;
- case INTEL_OUTPUT_TVOUT:
- is_tv = true;
- break;
- case INTEL_OUTPUT_ANALOG:
- is_crt = true;
- break;
- case INTEL_OUTPUT_MIPI:
- is_mipi = true;
- break;
- case INTEL_OUTPUT_MIPI2:
- is_mipi2 = true;
- break;
- case INTEL_OUTPUT_HDMI:
- is_hdmi = true;
- break;
- }
- }
-
- /* Disable the VGA plane that we never use */
- REG_WRITE(VGACNTRL, VGA_DISP_DISABLE);
-
- /* Disable the panel fitter if it was on our pipe */
- if (mdfld_panel_fitter_pipe(dev) == pipe)
- REG_WRITE(PFIT_CONTROL, 0);
-
- /* pipesrc and dspsize control the size that is scaled from,
- * which should always be the user's requested size.
- */
- if (pipe == 1) {
- /* FIXME: To make HDMI display with 864x480 (TPO), 480x864 (PYR) or 480x854 (TMD), set the sprite
- * width/height and souce image size registers with the adjusted mode for pipe B. */
-
- /* The defined sprite rectangle must always be completely contained within the displayable
- * area of the screen image (frame buffer). */
- REG_WRITE(dspsize_reg, ((MIN(mode->crtc_vdisplay, adjusted_mode->crtc_vdisplay) - 1) << 16)
- | (MIN(mode->crtc_hdisplay, adjusted_mode->crtc_hdisplay) - 1));
- /* Set the CRTC with encoder mode. */
- REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16)
- | (mode->crtc_vdisplay - 1));
- } else {
- REG_WRITE(dspsize_reg, ((mode->crtc_vdisplay - 1) << 16) | (mode->crtc_hdisplay - 1));
- REG_WRITE(pipesrc_reg, ((mode->crtc_hdisplay - 1) << 16) | (mode->crtc_vdisplay - 1));
- }
-
- REG_WRITE(dsppos_reg, 0);
-
- if (psb_intel_output)
- drm_connector_property_get_value(&psb_intel_output->base,
- dev->mode_config.scaling_mode_property, &scalingType);
-
- if (scalingType == DRM_MODE_SCALE_NO_SCALE) {
- /*
- * Medfield doesn't have register support for centering so
- * we need to mess with the h/vblank and h/vsync start and
- * ends to get central
- */
- int offsetX = 0, offsetY = 0;
-
- offsetX = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2;
- offsetY = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2;
-
- REG_WRITE(htot_reg, (mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - offsetX - 1) |
- ((adjusted_mode->crtc_hblank_end - offsetX - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - offsetX - 1) |
- ((adjusted_mode->crtc_hsync_end - offsetX - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - offsetY - 1) |
- ((adjusted_mode->crtc_vblank_end - offsetY - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - offsetY - 1) |
- ((adjusted_mode->crtc_vsync_end - offsetY - 1) << 16));
- } else {
- REG_WRITE(htot_reg, (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- REG_WRITE(vtot_reg, (adjusted_mode->crtc_vdisplay - 1) |
- ((adjusted_mode->crtc_vtotal - 1) << 16));
- REG_WRITE(hblank_reg, (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- REG_WRITE(hsync_reg, (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
- REG_WRITE(vblank_reg, (adjusted_mode->crtc_vblank_start - 1) |
- ((adjusted_mode->crtc_vblank_end - 1) << 16));
- REG_WRITE(vsync_reg, (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
- }
-
- /* Flush the plane changes */
- {
- struct drm_crtc_helper_funcs *crtc_funcs =
- crtc->helper_private;
- crtc_funcs->mode_set_base(crtc, x, y, old_fb);
- }
-
- /* setup pipeconf */
- *pipeconf = PIPEACONF_ENABLE; /* FIXME_JLIU7 REG_READ(pipeconf_reg); */
-
- /* Set up the display plane register */
- *dspcntr = REG_READ(dspcntr_reg);
- *dspcntr |= pipe << DISPPLANE_SEL_PIPE_POS;
- *dspcntr |= DISPLAY_PLANE_ENABLE;
-/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_BOTTOM; */
-/* MDFLD_PO_JLIU7 dspcntr |= DISPPLANE_GAMMA_ENABLE; */
-
- if (is_mipi2)
- {
- goto mrst_crtc_mode_set_exit;
- }
-/* FIXME JLIU7 Add MDFLD HDMI supports */
-/* FIXME_MDFLD JLIU7 DSIPLL clock *= 8? */
-/* FIXME_MDFLD JLIU7 need to revist for dual MIPI supports */
- clk = adjusted_mode->clock;
-
- if (is_hdmi) {
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19))
- {
- refclk = 19200;
-
- if (is_mipi || is_mipi2)
- {
- clk_n = 1, clk_p2 = 8;
- } else if (is_hdmi) {
- clk_n = 1, clk_p2 = 10;
- }
- } else if (ksel == KSEL_BYPASS_25) {
- refclk = 25000;
-
- if (is_mipi || is_mipi2)
- {
- clk_n = 1, clk_p2 = 8;
- } else if (is_hdmi) {
- clk_n = 1, clk_p2 = 10;
- }
- } else if ((ksel == KSEL_BYPASS_83_100) && (dev_priv->core_freq == 166)) {
- refclk = 83000;
-
- if (is_mipi || is_mipi2)
- {
- clk_n = 4, clk_p2 = 8;
- } else if (is_hdmi) {
- clk_n = 4, clk_p2 = 10;
- }
- } else if ((ksel == KSEL_BYPASS_83_100) &&
- (dev_priv->core_freq == 100 || dev_priv->core_freq == 200)) {
- refclk = 100000;
- if (is_mipi || is_mipi2)
- {
- clk_n = 4, clk_p2 = 8;
- } else if (is_hdmi) {
- clk_n = 4, clk_p2 = 10;
- }
- }
-
- if (is_mipi)
- clk_byte = dev_priv->bpp / 8;
- else if (is_mipi2)
- clk_byte = dev_priv->bpp2 / 8;
-
- clk_tmp = clk * clk_n * clk_p2 * clk_byte;
-
- dev_dbg(dev->dev, "clk = %d, clk_n = %d, clk_p2 = %d. \n", clk, clk_n, clk_p2);
- dev_dbg(dev->dev, "adjusted_mode->clock = %d, clk_tmp = %d. \n", adjusted_mode->clock, clk_tmp);
-
- ok = mdfldFindBestPLL(crtc, clk_tmp, refclk, &clock);
-
- if (!ok) {
- dev_err(dev->dev,
- "mdfldFindBestPLL fail in mdfld_crtc_mode_set. \n");
- } else {
- m_conv = mdfld_m_converts[(clock.m - MDFLD_M_MIN)];
-
- dev_dbg(dev->dev, "dot clock = %d,"
- "m = %d, p1 = %d, m_conv = %d. \n", clock.dot, clock.m,
- clock.p1, m_conv);
- }
-
- dpll = REG_READ(dpll_reg);
-
- if (dpll & DPLL_VCO_ENABLE) {
- dpll &= ~DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
-
- /* FIXME jliu7 check the DPLL lock bit PIPEACONF[29] */
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- /* reset M1, N1 & P1 */
- REG_WRITE(fp_reg, 0);
- dpll &= ~MDFLD_P1_MASK;
- REG_WRITE(dpll_reg, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- /* When ungating power of DPLL, needs to wait 0.5us before enable the VCO */
- if (dpll & MDFLD_PWR_GATE_EN) {
- dpll &= ~MDFLD_PWR_GATE_EN;
- REG_WRITE(dpll_reg, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
- }
-
- dpll = 0;
-
-#if 0 /* FIXME revisit later */
- if ((ksel == KSEL_CRYSTAL_19) || (ksel == KSEL_BYPASS_19) || (ksel == KSEL_BYPASS_25)) {
- dpll &= ~MDFLD_INPUT_REF_SEL;
- } else if (ksel == KSEL_BYPASS_83_100) {
- dpll |= MDFLD_INPUT_REF_SEL;
- }
-#endif /* FIXME revisit later */
-
- if (is_hdmi)
- dpll |= MDFLD_VCO_SEL;
-
- fp = (clk_n / 2) << 16;
- fp |= m_conv;
-
- /* compute bitmask from p1 value */
- dpll |= (1 << (clock.p1 - 2)) << 17;
-
-#if 0 /* 1080p30 & 720p */
- dpll = 0x00050000;
- fp = 0x000001be;
-#endif
-#if 0 /* 480p */
- dpll = 0x02010000;
- fp = 0x000000d2;
-#endif
- } else {
-#if 0 /*DBI_TPO_480x864*/
- dpll = 0x00020000;
- fp = 0x00000156;
-#endif /* DBI_TPO_480x864 */ /* get from spec. */
-
- dpll = 0x00800000;
- fp = 0x000000c1;
-}
-
- REG_WRITE(fp_reg, fp);
- REG_WRITE(dpll_reg, dpll);
- /* FIXME_MDFLD PO - change 500 to 1 after PO */
- udelay(500);
-
- dpll |= DPLL_VCO_ENABLE;
- REG_WRITE(dpll_reg, dpll);
- REG_READ(dpll_reg);
-
- /* wait for DSI PLL to lock */
- while ((timeout < 20000) && !(REG_READ(pipeconf_reg) & PIPECONF_DSIPLL_LOCK)) {
- udelay(150);
- timeout ++;
- }
-
- if (is_mipi)
- goto mrst_crtc_mode_set_exit;
-
- dev_dbg(dev->dev, "is_mipi = 0x%x \n", is_mipi);
-
- REG_WRITE(pipeconf_reg, *pipeconf);
- REG_READ(pipeconf_reg);
-
- /* Wait for for the pipe enable to take effect. */
-//FIXME_JLIU7 HDMI mrstWaitForPipeEnable(dev);
-
- REG_WRITE(dspcntr_reg, *dspcntr);
- psb_intel_wait_for_vblank(dev);
-
-mrst_crtc_mode_set_exit:
-
- gma_power_end(dev);
-
- return 0;
-}
-
-static void mdfld_crtc_prepare(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
-}
-
-static void mdfld_crtc_commit(struct drm_crtc *crtc)
-{
- struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
-}
-
-static bool mdfld_crtc_mode_fixup(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- return true;
-}
-
-const struct drm_crtc_helper_funcs mdfld_helper_funcs = {
- .dpms = mdfld_crtc_dpms,
- .mode_fixup = mdfld_crtc_mode_fixup,
- .mode_set = mdfld_crtc_mode_set,
- .mode_set_base = mdfld__intel_pipe_set_base,
- .prepare = mdfld_crtc_prepare,
- .commit = mdfld_crtc_commit,
-};
diff --git a/drivers/staging/gma500/mdfld_msic.h b/drivers/staging/gma500/mdfld_msic.h
deleted file mode 100644
index a7ad6547..0000000
--- a/drivers/staging/gma500/mdfld_msic.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jim Liu <jim.liu@intel.com>
- */
-
-#define MSIC_PCI_DEVICE_ID 0x831
-
-int msic_regsiter_driver(void);
-int msic_unregister_driver(void);
-extern void hpd_notify_um(void);
diff --git a/drivers/staging/gma500/mdfld_output.c b/drivers/staging/gma500/mdfld_output.c
deleted file mode 100644
index eabf53d..0000000
--- a/drivers/staging/gma500/mdfld_output.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#include <linux/init.h>
-#include <linux/moduleparam.h>
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "mdfld_dsi_dbi_dpu.h"
-
-#include "displays/tpo_cmd.h"
-#include "displays/tpo_vid.h"
-#include "displays/tmd_cmd.h"
-#include "displays/tmd_vid.h"
-#include "displays/pyr_cmd.h"
-#include "displays/pyr_vid.h"
-/* #include "displays/hdmi.h" */
-
-static int mdfld_dual_mipi;
-static int mdfld_hdmi;
-static int mdfld_dpu;
-
-module_param(mdfld_dual_mipi, int, 0600);
-MODULE_PARM_DESC(mdfld_dual_mipi, "Enable dual MIPI configuration");
-module_param(mdfld_hdmi, int, 0600);
-MODULE_PARM_DESC(mdfld_hdmi, "Enable Medfield HDMI");
-module_param(mdfld_dpu, int, 0600);
-MODULE_PARM_DESC(mdfld_dpu, "Enable Medfield DPU");
-
-/* For now a single type per device is all we cope with */
-int mdfld_get_panel_type(struct drm_device *dev, int pipe)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- return dev_priv->panel_id;
-}
-
-int mdfld_panel_dpi(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- switch (dev_priv->panel_id) {
- case TMD_VID:
- case TPO_VID:
- case PYR_VID:
- return true;
- case TMD_CMD:
- case TPO_CMD:
- case PYR_CMD:
- default:
- return false;
- }
-}
-
-static int init_panel(struct drm_device *dev, int mipi_pipe, int p_type)
-{
- struct panel_funcs *p_cmd_funcs;
- struct panel_funcs *p_vid_funcs;
-
- /* Oh boy ... FIXME */
- p_cmd_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
- if (p_cmd_funcs == NULL)
- return -ENODEV;
- p_vid_funcs = kzalloc(sizeof(struct panel_funcs), GFP_KERNEL);
- if (p_vid_funcs == NULL) {
- kfree(p_cmd_funcs);
- return -ENODEV;
- }
-
- switch (p_type) {
- case TPO_CMD:
- tpo_cmd_init(dev, p_cmd_funcs);
- mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
- break;
- case TPO_VID:
- tpo_vid_init(dev, p_vid_funcs);
- mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
- break;
- case TMD_CMD:
- /*tmd_cmd_init(dev, p_cmd_funcs); */
- mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
- break;
- case TMD_VID:
- tmd_vid_init(dev, p_vid_funcs);
- mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
- break;
- case PYR_CMD:
- pyr_cmd_init(dev, p_cmd_funcs);
- mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs, NULL);
- break;
- case PYR_VID:
- mdfld_dsi_output_init(dev, mipi_pipe, NULL, NULL, p_vid_funcs);
- break;
- case TPO: /* TPO panel supports both cmd & vid interfaces */
- tpo_cmd_init(dev, p_cmd_funcs);
- tpo_vid_init(dev, p_vid_funcs);
- mdfld_dsi_output_init(dev, mipi_pipe, NULL, p_cmd_funcs,
- p_vid_funcs);
- break;
- case TMD:
- break;
- case PYR:
- break;
-#if 0
- case HDMI:
- dev_dbg(dev->dev, "Initializing HDMI");
- mdfld_hdmi_init(dev, &dev_priv->mode_dev);
- break;
-#endif
- default:
- dev_err(dev->dev, "Unsupported interface %d", p_type);
- return -ENODEV;
- }
- return 0;
-}
-
-int mdfld_output_init(struct drm_device *dev)
-{
- int type;
-
- /* MIPI panel 1 */
- type = mdfld_get_panel_type(dev, 0);
- dev_info(dev->dev, "panel 1: type is %d\n", type);
- init_panel(dev, 0, type);
-
- if (mdfld_dual_mipi) {
- /* MIPI panel 2 */
- type = mdfld_get_panel_type(dev, 2);
- dev_info(dev->dev, "panel 2: type is %d\n", type);
- init_panel(dev, 2, type);
- }
- if (mdfld_hdmi)
- /* HDMI panel */
- init_panel(dev, 0, HDMI);
- return 0;
-}
-
-void mdfld_output_setup(struct drm_device *dev)
-{
- /* FIXME: this is not the right place for this stuff ! */
- if (IS_MFLD(dev)) {
- if (mdfld_dpu)
- mdfld_dbi_dpu_init(dev);
- else
- mdfld_dbi_dsr_init(dev);
- }
-}
diff --git a/drivers/staging/gma500/mdfld_output.h b/drivers/staging/gma500/mdfld_output.h
deleted file mode 100644
index daf33e7..0000000
--- a/drivers/staging/gma500/mdfld_output.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#ifndef MDFLD_OUTPUT_H
-#define MDFLD_OUTPUT_H
-
-int mdfld_output_init(struct drm_device *dev);
-int mdfld_panel_dpi(struct drm_device *dev);
-int mdfld_get_panel_type(struct drm_device *dev, int pipe);
-void mdfld_disable_crtc (struct drm_device *dev, int pipe);
-
-extern const struct drm_crtc_helper_funcs mdfld_helper_funcs;
-extern const struct drm_crtc_funcs mdfld_intel_crtc_funcs;
-
-extern void mdfld_output_setup(struct drm_device *dev);
-
-#endif
diff --git a/drivers/staging/gma500/mdfld_pyr_cmd.c b/drivers/staging/gma500/mdfld_pyr_cmd.c
deleted file mode 100644
index 523f2d8..0000000
--- a/drivers/staging/gma500/mdfld_pyr_cmd.c
+++ /dev/null
@@ -1,558 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
-*/
-
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "mdfld_dsi_dbi_dpu.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-#include "displays/pyr_cmd.h"
-
-static struct drm_display_mode *pyr_cmd_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode) {
- dev_err(dev->dev, "Out of memory\n");
- return NULL;
- }
-
- dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
- dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
- dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
- dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
- dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
- dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
- dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
- dev_dbg(dev->dev, "clock is %d\n", mode->clock);
-
- mode->hdisplay = 480;
- mode->vdisplay = 864;
- mode->hsync_start = 487;
- mode->hsync_end = 490;
- mode->htotal = 499;
- mode->vsync_start = 874;
- mode->vsync_end = 878;
- mode->vtotal = 886;
- mode->clock = 25777;
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static bool pyr_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_display_mode *fixed_mode = pyr_cmd_get_config_mode(dev);
-
- if (fixed_mode) {
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
- adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- kfree(fixed_mode);
- }
- return true;
-}
-
-static void pyr_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
-{
- int ret = 0;
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 reg_offset = 0;
- int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
-
- dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n", pipe,
- on ? "On" : "Off",
- dbi_output->dbi_panel_on ? "True" : "False");
-
- if (pipe == 2) {
- if (on)
- dev_priv->dual_mipi = true;
- else
- dev_priv->dual_mipi = false;
-
- reg_offset = MIPIC_REG_OFFSET;
- } else {
- if (!on)
- dev_priv->dual_mipi = false;
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
-
- if (on) {
- if (dbi_output->dbi_panel_on)
- goto out_err;
-
- ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
- if (ret) {
- dev_err(dev->dev, "power on error\n");
- goto out_err;
- }
-
- dbi_output->dbi_panel_on = true;
-
- if (pipe == 2) {
- dev_priv->dbi_panel_on2 = true;
- } else {
- dev_priv->dbi_panel_on = true;
- mdfld_enable_te(dev, 0);
- }
- } else {
- if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
- goto out_err;
-
- dbi_output->dbi_panel_on = false;
- dbi_output->first_boot = false;
-
- if (pipe == 2) {
- dev_priv->dbi_panel_on2 = false;
- mdfld_disable_te(dev, 2);
- } else {
- dev_priv->dbi_panel_on = false;
- mdfld_disable_te(dev, 0);
-
- if (dev_priv->dbi_panel_on2)
- mdfld_enable_te(dev, 2);
- }
-
- ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
- if (ret) {
- dev_err(dev->dev, "power on error\n");
- goto out_err;
- }
- }
-
-out_err:
- gma_power_end(dev);
-
- if (ret)
- dev_err(dev->dev, "failed\n");
-}
-
-static void pyr_dsi_controller_dbi_init(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- struct drm_device *dev = dsi_config->dev;
- u32 reg_offset = pipe ? MIPIC_REG_OFFSET : 0;
- int lane_count = dsi_config->lane_count;
- u32 val = 0;
-
- dev_dbg(dev->dev, "Init DBI interface on pipe %d...\n", pipe);
-
- /* Un-ready device */
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000000);
-
- /* Init dsi adapter before kicking off */
- REG_WRITE((MIPIA_CONTROL_REG + reg_offset), 0x00000018);
-
- /* TODO: figure out how to setup these registers */
- REG_WRITE((MIPIA_DPHY_PARAM_REG + reg_offset), 0x150c600F);
- REG_WRITE((MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG + reg_offset),
- 0x000a0014);
- REG_WRITE((MIPIA_DBI_BW_CTRL_REG + reg_offset), 0x00000400);
- REG_WRITE((MIPIA_HS_LS_DBI_ENABLE_REG + reg_offset), 0x00000000);
-
- /* Enable all interrupts */
- REG_WRITE((MIPIA_INTR_EN_REG + reg_offset), 0xffffffff);
- /* Max value: 20 clock cycles of txclkesc */
- REG_WRITE((MIPIA_TURN_AROUND_TIMEOUT_REG + reg_offset), 0x0000001f);
- /* Min 21 txclkesc, max: ffffh */
- REG_WRITE((MIPIA_DEVICE_RESET_TIMER_REG + reg_offset), 0x0000ffff);
- /* Min: 7d0 max: 4e20 */
- REG_WRITE((MIPIA_INIT_COUNT_REG + reg_offset), 0x00000fa0);
-
- /* Set up func_prg */
- val |= lane_count;
- val |= (dsi_config->channel_num << DSI_DBI_VIRT_CHANNEL_OFFSET);
- val |= DSI_DBI_COLOR_FORMAT_OPTION2;
- REG_WRITE((MIPIA_DSI_FUNC_PRG_REG + reg_offset), val);
-
- REG_WRITE((MIPIA_HS_TX_TIMEOUT_REG + reg_offset), 0x3fffff);
- REG_WRITE((MIPIA_LP_RX_TIMEOUT_REG + reg_offset), 0xffff);
-
- /* De-assert dbi_stall when half of DBI FIFO is empty */
- /* REG_WRITE((MIPIA_DBI_FIFO_THROTTLE_REG + reg_offset), 0x00000000); */
-
- REG_WRITE((MIPIA_HIGH_LOW_SWITCH_COUNT_REG + reg_offset), 0x46);
- REG_WRITE((MIPIA_EOT_DISABLE_REG + reg_offset), 0x00000002);
- REG_WRITE((MIPIA_LP_BYTECLK_REG + reg_offset), 0x00000004);
- REG_WRITE((MIPIA_DEVICE_READY_REG + reg_offset), 0x00000001);
-}
-
-static void pyr_dsi_dbi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- int ret = 0;
- struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dsi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
- int pipe = dsi_connector->pipe;
- u8 param = 0;
-
- /* Regs */
- u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 pipeconf_reg = PIPEACONF;
- u32 reg_offset = 0;
-
- /* Values */
- u32 dspcntr_val = dev_priv->dspcntr;
- u32 pipeconf_val = dev_priv->pipeconf;
- u32 h_active_area = mode->hdisplay;
- u32 v_active_area = mode->vdisplay;
- u32 mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
- TE_TRIGGER_GPIO_PIN);
-
- dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
-
- dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
- dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
-
- if (pipe == 2) {
- mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
-
- reg_offset = MIPIC_REG_OFFSET;
-
- dspcntr_val = dev_priv->dspcntr2;
- pipeconf_val = dev_priv->pipeconf2;
- } else {
- mipi_val |= 0x2; /* Two lanes for port A and C respectively */
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- /* Set up pipe related registers */
- REG_WRITE(mipi_reg, mipi_val);
- REG_READ(mipi_reg);
-
- pyr_dsi_controller_dbi_init(dsi_config, pipe);
-
- msleep(20);
-
- REG_WRITE(dspcntr_reg, dspcntr_val);
- REG_READ(dspcntr_reg);
-
- /* 20ms delay before sending exit_sleep_mode */
- msleep(20);
-
- /* Send exit_sleep_mode DCS */
- ret = mdfld_dsi_dbi_send_dcs(dsi_output, exit_sleep_mode, NULL,
- 0, CMD_DATA_SRC_SYSTEM_MEM);
- if (ret) {
- dev_err(dev->dev, "sent exit_sleep_mode faild\n");
- goto out_err;
- }
-
- /*send set_tear_on DCS*/
- ret = mdfld_dsi_dbi_send_dcs(dsi_output, set_tear_on,
- &param, 1, CMD_DATA_SRC_SYSTEM_MEM);
- if (ret) {
- dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
- goto out_err;
- }
-
- /* Do some init stuff */
- mdfld_dsi_brightness_init(dsi_config, pipe);
- mdfld_dsi_gen_fifo_ready(dev, (MIPIA_GEN_FIFO_STAT_REG + reg_offset),
- HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
-
- REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
- REG_READ(pipeconf_reg);
-
- /* TODO: this looks ugly, try to move it to CRTC mode setting */
- if (pipe == 2)
- dev_priv->pipeconf2 |= PIPEACONF_DSR;
- else
- dev_priv->pipeconf |= PIPEACONF_DSR;
-
- dev_dbg(dev->dev, "pipeconf %x\n", REG_READ(pipeconf_reg));
-
- ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
- h_active_area - 1, v_active_area - 1);
- if (ret) {
- dev_err(dev->dev, "update area failed\n");
- goto out_err;
- }
-
-out_err:
- gma_power_end(dev);
-
- if (ret)
- dev_err(dev->dev, "mode set failed\n");
- else
- dev_dbg(dev->dev, "mode set done successfully\n");
-}
-
-static void pyr_dsi_dbi_prepare(struct drm_encoder *encoder)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
-
- dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
- dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
-
- pyr_dsi_dbi_set_power(encoder, false);
-}
-
-static void pyr_dsi_dbi_commit(struct drm_encoder *encoder)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct drm_device *dev = dbi_output->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_drm_dpu_rect rect;
-
- pyr_dsi_dbi_set_power(encoder, true);
-
- dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
-
- rect.x = rect.y = 0;
- rect.width = 864;
- rect.height = 480;
-
- if (dbi_output->channel_num == 1) {
- dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
- /* If DPU enabled report a fullscreen damage */
- mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
- } else {
- dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
- mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
- }
- dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
-}
-
-static void pyr_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct drm_device *dev = dbi_output->dev;
-
- dev_dbg(dev->dev, "%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
-
- if (mode == DRM_MODE_DPMS_ON)
- pyr_dsi_dbi_set_power(encoder, true);
- else
- pyr_dsi_dbi_set_power(encoder, false);
-}
-
-/*
- * Update the DBI MIPI Panel Frame Buffer.
- */
-static void pyr_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
- int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
- struct drm_device *dev = dbi_output->dev;
- struct drm_crtc *crtc = dbi_output->base.base.crtc;
- struct psb_intel_crtc *psb_crtc = (crtc) ?
- to_psb_intel_crtc(crtc) : NULL;
-
- u32 dpll_reg = MRST_DPLL_A;
- u32 dspcntr_reg = DSPACNTR;
- u32 pipeconf_reg = PIPEACONF;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dspsurf_reg = DSPASURF;
- u32 hs_gen_ctrl_reg = HS_GEN_CTRL_REG;
- u32 gen_fifo_stat_reg = GEN_FIFO_STAT_REG;
- u32 reg_offset = 0;
-
- u32 intr_status;
- u32 fifo_stat_reg_val;
- u32 dpll_reg_val;
- u32 dspcntr_reg_val;
- u32 pipeconf_reg_val;
-
- /* If mode setting on-going, back off */
- if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
- (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
- !(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
- return;
-
- /*
- * Look for errors here. In particular we're checking for whatever
- * error status might have appeared during the last frame transmit
- * (memory write).
- *
- * Normally, the bits we're testing here would be set infrequently,
- * if at all. However, one panel (at least) returns at least one
- * error bit on most frames. So we've disabled the kernel message
- * for now.
- *
- * Still clear whatever error bits are set, except don't clear the
- * ones that would make the Penwell DSI controller reset if we
- * cleared them.
- */
- intr_status = REG_READ(INTR_STAT_REG);
- if ((intr_status & 0x26FFFFFF) != 0) {
- /* dev_err(dev->dev, "DSI status: 0x%08X\n", intr_status); */
- intr_status &= 0x26F3FFFF;
- REG_WRITE(INTR_STAT_REG, intr_status);
- }
-
- if (pipe == 2) {
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- dsplinoff_reg = DSPCLINOFF;
- dspsurf_reg = DSPCSURF;
-
- hs_gen_ctrl_reg = HS_GEN_CTRL_REG + MIPIC_REG_OFFSET;
- gen_fifo_stat_reg = GEN_FIFO_STAT_REG + MIPIC_REG_OFFSET,
-
- reg_offset = MIPIC_REG_OFFSET;
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- fifo_stat_reg_val = REG_READ(MIPIA_GEN_FIFO_STAT_REG + reg_offset);
- dpll_reg_val = REG_READ(dpll_reg);
- dspcntr_reg_val = REG_READ(dspcntr_reg);
- pipeconf_reg_val = REG_READ(pipeconf_reg);
-
- if (!(fifo_stat_reg_val & (1 << 27)) ||
- (dpll_reg_val & DPLL_VCO_ENABLE) ||
- !(dspcntr_reg_val & DISPLAY_PLANE_ENABLE) ||
- !(pipeconf_reg_val & DISPLAY_PLANE_ENABLE)) {
- goto update_fb_out0;
- }
-
- /* Refresh plane changes */
- REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
- REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
- REG_READ(dspsurf_reg);
-
- mdfld_dsi_send_dcs(sender,
- write_mem_start,
- NULL,
- 0,
- CMD_DATA_SRC_PIPE,
- MDFLD_DSI_SEND_PACKAGE);
-
- /*
- * The idea here is to transmit a Generic Read command after the
- * Write Memory Start/Continue commands finish. This asks for
- * the panel to return an "ACK No Errors," or (if it has errors
- * to report) an Error Report. This allows us to monitor the
- * panel's perception of the health of the DSI.
- */
- mdfld_dsi_gen_fifo_ready(dev, gen_fifo_stat_reg,
- HS_CTRL_FIFO_EMPTY | HS_DATA_FIFO_EMPTY);
- REG_WRITE(hs_gen_ctrl_reg, (1 << WORD_COUNTS_POS) | GEN_READ_0);
-
- dbi_output->dsr_fb_update_done = true;
-update_fb_out0:
- gma_power_end(dev);
-}
-
-/*
- * TODO: will be removed later, should work out display interfaces for power
- */
-void pyr_dsi_adapter_init(struct mdfld_dsi_config *dsi_config, int pipe)
-{
- if (!dsi_config || (pipe != 0 && pipe != 2)) {
- WARN_ON(1);
- return;
- }
- pyr_dsi_controller_dbi_init(dsi_config, pipe);
-}
-
-static int pyr_cmd_get_panel_info(struct drm_device *dev, int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = PYR_PANEL_WIDTH;
- pi->height_mm = PYR_PANEL_HEIGHT;
-
- return 0;
-}
-
-/* PYR DBI encoder helper funcs */
-static const struct drm_encoder_helper_funcs pyr_dsi_dbi_helper_funcs = {
- .dpms = pyr_dsi_dbi_dpms,
- .mode_fixup = pyr_dsi_dbi_mode_fixup,
- .prepare = pyr_dsi_dbi_prepare,
- .mode_set = pyr_dsi_dbi_mode_set,
- .commit = pyr_dsi_dbi_commit,
-};
-
-/* PYR DBI encoder funcs */
-static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-void pyr_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
-{
- p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
- p_funcs->encoder_helper_funcs = &pyr_dsi_dbi_helper_funcs;
- p_funcs->get_config_mode = &pyr_cmd_get_config_mode;
- p_funcs->update_fb = pyr_dsi_dbi_update_fb;
- p_funcs->get_panel_info = pyr_cmd_get_panel_info;
-}
diff --git a/drivers/staging/gma500/mdfld_tmd_vid.c b/drivers/staging/gma500/mdfld_tmd_vid.c
deleted file mode 100644
index affdc09..0000000
--- a/drivers/staging/gma500/mdfld_tmd_vid.c
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Jim Liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- * Gideon Eaton <eaton.
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-
-#include "mdfld_dsi_pkg_sender.h"
-
-#include "displays/tmd_vid.h"
-
-/* FIXME: static ? */
-struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
- bool use_gct = false; /*Disable GCT for now*/
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode) {
- dev_err(dev->dev, "Out of memory\n");
- return NULL;
- }
-
- if (use_gct) {
- dev_dbg(dev->dev, "gct find MIPI panel.\n");
-
- mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
- mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
- mode->hsync_start = mode->hdisplay +
- ((ti->hsync_offset_hi << 8) |
- ti->hsync_offset_lo);
- mode->hsync_end = mode->hsync_start +
- ((ti->hsync_pulse_width_hi << 8) |
- ti->hsync_pulse_width_lo);
- mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
- ti->hblank_lo);
- mode->vsync_start = \
- mode->vdisplay + ((ti->vsync_offset_hi << 8) |
- ti->vsync_offset_lo);
- mode->vsync_end = \
- mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
- ti->vsync_pulse_width_lo);
- mode->vtotal = mode->vdisplay +
- ((ti->vblank_hi << 8) | ti->vblank_lo);
- mode->clock = ti->pixel_clock * 10;
-
- dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
- dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
- dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
- dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
- dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
- dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
- dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
- dev_dbg(dev->dev, "clock is %d\n", mode->clock);
- } else {
- mode->hdisplay = 480;
- mode->vdisplay = 854;
- mode->hsync_start = 487;
- mode->hsync_end = 490;
- mode->htotal = 499;
- mode->vsync_start = 861;
- mode->vsync_end = 865;
- mode->vtotal = 873;
- mode->clock = 33264;
- }
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static int tmd_vid_get_panel_info(struct drm_device *dev,
- int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = TMD_PANEL_WIDTH;
- pi->height_mm = TMD_PANEL_HEIGHT;
-
- return 0;
-}
-
-/*
- * mdfld_init_TMD_MIPI - initialise a TMD interface
- * @dsi_config: configuration
- * @pipe: pipe to configure
- *
- * This function is called only by mrst_dsi_mode_set and
- * restore_display_registers. since this function does not
- * acquire the mutex, it is important that the calling function
- * does!
- */
-
-
-static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
- int pipe)
-{
- static u32 tmd_cmd_mcap_off[] = {0x000000b2};
- static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
- static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
- static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
- static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
- static u32 tmd_cmd_set_mode[] = {0x000000b3};
- static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
- static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
- static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
- static u32 tmd_cmd_set_video_mode[] = {0x00000153};
- /*no auto_bl,need add in furture*/
- static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
- static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
-
- struct mdfld_dsi_pkg_sender *sender
- = mdfld_dsi_get_pkg_sender(dsi_config);
-
- DRM_INFO("Enter mdfld init TMD MIPI display.\n");
-
- if (!sender) {
- DRM_ERROR("Cannot get sender\n");
- return;
- }
-
- if (dsi_config->dvr_ic_inited)
- return;
-
- msleep(3);
-
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_mcap_off, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_lane_switch, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_lane_num, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock0, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock1, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_mode, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_sync_pulse_mode, 1, 0);
- mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_column, 2, 0);
- mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_page, 2, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_video_mode, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_backlight, 1, 0);
- mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_backlight_dimming, 1, 0);
-
- dsi_config->dvr_ic_inited = 1;
-}
-
-/* TMD DPI encoder helper funcs */
-static const struct drm_encoder_helper_funcs
- mdfld_tpo_dpi_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-/* TMD DPI encoder funcs */
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
-{
- if (!dev || !p_funcs) {
- dev_err(dev->dev, "Invalid parameters\n");
- return;
- }
-
- p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
- p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
- p_funcs->get_config_mode = &tmd_vid_get_config_mode;
- p_funcs->update_fb = NULL;
- p_funcs->get_panel_info = tmd_vid_get_panel_info;
- p_funcs->reset = mdfld_dsi_panel_reset;
- p_funcs->drv_ic_init = mdfld_dsi_tmd_drv_ic_init;
-}
diff --git a/drivers/staging/gma500/mdfld_tpo_cmd.c b/drivers/staging/gma500/mdfld_tpo_cmd.c
deleted file mode 100644
index c7f7c9c..0000000
--- a/drivers/staging/gma500/mdfld_tpo_cmd.c
+++ /dev/null
@@ -1,509 +0,0 @@
-/*
- * Copyright (c) 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicensen
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * Thomas Eaton <thomas.g.eaton@intel.com>
- * Scott Rowe <scott.m.rowe@intel.com>
- */
-
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-#include "mdfld_dsi_dbi_dpu.h"
-#include "mdfld_dsi_pkg_sender.h"
-
-#include "displays/tpo_cmd.h"
-
-static struct drm_display_mode *tpo_cmd_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
- bool use_gct = false;
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode)
- return NULL;
-
- if (use_gct) {
- dev_dbg(dev->dev, "gct find MIPI panel.\n");
-
- mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
- mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
- mode->hsync_start = mode->hdisplay + \
- ((ti->hsync_offset_hi << 8) | \
- ti->hsync_offset_lo);
- mode->hsync_end = mode->hsync_start + \
- ((ti->hsync_pulse_width_hi << 8) | \
- ti->hsync_pulse_width_lo);
- mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
- ti->hblank_lo);
- mode->vsync_start = \
- mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
- ti->vsync_offset_lo);
- mode->vsync_end = \
- mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
- ti->vsync_pulse_width_lo);
- mode->vtotal = mode->vdisplay + \
- ((ti->vblank_hi << 8) | ti->vblank_lo);
- mode->clock = ti->pixel_clock * 10;
-
- dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
- dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
- dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
- dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
- dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
- dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
- dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
- dev_dbg(dev->dev, "clock is %d\n", mode->clock);
- } else {
- mode->hdisplay = 864;
- mode->vdisplay = 480;
- mode->hsync_start = 872;
- mode->hsync_end = 876;
- mode->htotal = 884;
- mode->vsync_start = 482;
- mode->vsync_end = 494;
- mode->vtotal = 486;
- mode->clock = 25777;
- }
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static bool mdfld_dsi_dbi_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_display_mode *fixed_mode = tpo_cmd_get_config_mode(dev);
-
- if (fixed_mode) {
- adjusted_mode->hdisplay = fixed_mode->hdisplay;
- adjusted_mode->hsync_start = fixed_mode->hsync_start;
- adjusted_mode->hsync_end = fixed_mode->hsync_end;
- adjusted_mode->htotal = fixed_mode->htotal;
- adjusted_mode->vdisplay = fixed_mode->vdisplay;
- adjusted_mode->vsync_start = fixed_mode->vsync_start;
- adjusted_mode->vsync_end = fixed_mode->vsync_end;
- adjusted_mode->vtotal = fixed_mode->vtotal;
- adjusted_mode->clock = fixed_mode->clock;
- drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
- kfree(fixed_mode);
- }
- return true;
-}
-
-static void mdfld_dsi_dbi_set_power(struct drm_encoder *encoder, bool on)
-{
- int ret = 0;
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_encoder_get_pkg_sender(dsi_encoder);
- struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- u32 reg_offset = 0;
- int pipe = (dbi_output->channel_num == 0) ? 0 : 2;
- u32 data = 0;
-
- dev_dbg(dev->dev, "pipe %d : %s, panel on: %s\n",
- pipe, on ? "On" : "Off",
- dbi_output->dbi_panel_on ? "True" : "False");
-
- if (pipe == 2) {
- if (on)
- dev_priv->dual_mipi = true;
- else
- dev_priv->dual_mipi = false;
- reg_offset = MIPIC_REG_OFFSET;
- } else {
- if (!on)
- dev_priv->dual_mipi = false;
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- if (on) {
- if (dbi_output->dbi_panel_on)
- goto out_err;
-
- ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_ON);
- if (ret) {
- dev_err(dev->dev, "power on error\n");
- goto out_err;
- }
-
- dbi_output->dbi_panel_on = true;
-
- if (pipe == 2)
- dev_priv->dbi_panel_on2 = true;
- else
- dev_priv->dbi_panel_on = true;
- mdfld_enable_te(dev, pipe);
- } else {
- if (!dbi_output->dbi_panel_on && !dbi_output->first_boot)
- goto out_err;
-
- dbi_output->dbi_panel_on = false;
- dbi_output->first_boot = false;
-
- if (pipe == 2)
- dev_priv->dbi_panel_on2 = false;
- else
- dev_priv->dbi_panel_on = false;
-
- mdfld_disable_te(dev, pipe);
-
- ret = mdfld_dsi_dbi_update_power(dbi_output, DRM_MODE_DPMS_OFF);
- if (ret) {
- dev_err(dev->dev, "power on error\n");
- goto out_err;
- }
- }
-
- /*
- * FIXME: this is a WA for TPO panel crash on DPMS on & off around
- * 83 times. the root cause of this issue is that Booster in
- * drvIC crashed. Add this WA so that we can resume the driver IC
- * once we found that booster has a fault
- */
- mdfld_dsi_get_power_mode(dsi_config,
- &data,
- MDFLD_DSI_HS_TRANSMISSION);
-
- if (on && data && !(data & (1 << 7))) {
- /* Soft reset */
- mdfld_dsi_send_dcs(sender,
- DCS_SOFT_RESET,
- NULL,
- 0,
- CMD_DATA_SRC_PIPE,
- MDFLD_DSI_SEND_PACKAGE);
-
- /* Init drvIC */
- if (dbi_output->p_funcs->drv_ic_init)
- dbi_output->p_funcs->drv_ic_init(dsi_config,
- pipe);
- }
-
-out_err:
- gma_power_end(dev);
- if (ret)
- dev_err(dev->dev, "failed\n");
-}
-
-
-static void mdfld_dsi_dbi_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- int ret = 0;
- struct drm_device *dev = encoder->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dsi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct mdfld_dsi_config *dsi_config =
- mdfld_dsi_encoder_get_config(dsi_encoder);
- struct mdfld_dsi_connector *dsi_connector = dsi_config->connector;
- int pipe = dsi_connector->pipe;
- u8 param = 0;
-
- /* Regs */
- u32 mipi_reg = MIPI;
- u32 dspcntr_reg = DSPACNTR;
- u32 pipeconf_reg = PIPEACONF;
- u32 reg_offset = 0;
-
- /* Values */
- u32 dspcntr_val = dev_priv->dspcntr;
- u32 pipeconf_val = dev_priv->pipeconf;
- u32 h_active_area = mode->hdisplay;
- u32 v_active_area = mode->vdisplay;
- u32 mipi_val;
-
- mipi_val = (PASS_FROM_SPHY_TO_AFE | SEL_FLOPPED_HSTX |
- TE_TRIGGER_GPIO_PIN);
-
- dev_dbg(dev->dev, "mipi_val =0x%x\n", mipi_val);
-
- dev_dbg(dev->dev, "type %s\n", (pipe == 2) ? "MIPI2" : "MIPI");
- dev_dbg(dev->dev, "h %d v %d\n", mode->hdisplay, mode->vdisplay);
-
- if (pipe == 2) {
- mipi_reg = MIPI_C;
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
-
- reg_offset = MIPIC_REG_OFFSET;
-
- dspcntr_val = dev_priv->dspcntr2;
- pipeconf_val = dev_priv->pipeconf2;
- } else {
- mipi_val |= 0x2; /*two lanes for port A and C respectively*/
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- REG_WRITE(dspcntr_reg, dspcntr_val);
- REG_READ(dspcntr_reg);
-
- /* 20ms delay before sending exit_sleep_mode */
- msleep(20);
-
- /* Send exit_sleep_mode DCS */
- ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_EXIT_SLEEP_MODE,
- NULL, 0, CMD_DATA_SRC_SYSTEM_MEM);
- if (ret) {
- dev_err(dev->dev, "sent exit_sleep_mode faild\n");
- goto out_err;
- }
-
- /* Send set_tear_on DCS */
- ret = mdfld_dsi_dbi_send_dcs(dsi_output, DCS_SET_TEAR_ON,
- &param, 1, CMD_DATA_SRC_SYSTEM_MEM);
- if (ret) {
- dev_err(dev->dev, "%s - sent set_tear_on faild\n", __func__);
- goto out_err;
- }
-
- /* Do some init stuff */
- REG_WRITE(pipeconf_reg, pipeconf_val | PIPEACONF_DSR);
- REG_READ(pipeconf_reg);
-
- /* TODO: this looks ugly, try to move it to CRTC mode setting*/
- if (pipe == 2)
- dev_priv->pipeconf2 |= PIPEACONF_DSR;
- else
- dev_priv->pipeconf |= PIPEACONF_DSR;
-
- dev_dbg(dev->dev, "pipeconf %x\n", REG_READ(pipeconf_reg));
-
- ret = mdfld_dsi_dbi_update_area(dsi_output, 0, 0,
- h_active_area - 1, v_active_area - 1);
- if (ret) {
- dev_err(dev->dev, "update area failed\n");
- goto out_err;
- }
-
-out_err:
- gma_power_end(dev);
-
- if (ret)
- dev_err(dev->dev, "mode set failed\n");
-}
-
-static void mdfld_dsi_dbi_prepare(struct drm_encoder *encoder)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output
- = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
-
- dbi_output->mode_flags |= MODE_SETTING_IN_ENCODER;
- dbi_output->mode_flags &= ~MODE_SETTING_ENCODER_DONE;
-
- mdfld_dsi_dbi_set_power(encoder, false);
-}
-
-static void mdfld_dsi_dbi_commit(struct drm_encoder *encoder)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output =
- MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct drm_device *dev = dbi_output->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_drm_dpu_rect rect;
-
- mdfld_dsi_dbi_set_power(encoder, true);
- dbi_output->mode_flags &= ~MODE_SETTING_IN_ENCODER;
-
- rect.x = rect.y = 0;
- rect.width = 864;
- rect.height = 480;
-
- if (dbi_output->channel_num == 1) {
- dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_2;
- /*if dpu enabled report a fullscreen damage*/
- mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEC, &rect);
- } else {
- dev_priv->dsr_fb_update |= MDFLD_DSR_2D_3D_0;
- mdfld_dbi_dpu_report_damage(dev, MDFLD_PLANEA, &rect);
- }
- dbi_output->mode_flags |= MODE_SETTING_ENCODER_DONE;
-}
-
-static void mdfld_dsi_dbi_dpms(struct drm_encoder *encoder, int mode)
-{
- struct mdfld_dsi_encoder *dsi_encoder = MDFLD_DSI_ENCODER(encoder);
- struct mdfld_dsi_dbi_output *dbi_output
- = MDFLD_DSI_DBI_OUTPUT(dsi_encoder);
- struct drm_device *dev = dbi_output->dev;
- struct drm_psb_private *dev_priv = dev->dev_private;
- static bool bdispoff;
-
- dev_dbg(dev->dev, "%s\n", (mode == DRM_MODE_DPMS_ON ? "on" : "off"));
-
- if (mode == DRM_MODE_DPMS_ON) {
- /*
- * FIXME: in case I am wrong!
- * we don't need to exit dsr here to wake up plane/pipe/pll
- * if everything goes right, hw_begin will resume them all
- * during set_power.
- */
- if (bdispoff /* FIXME && gbgfxsuspended */) {
- mdfld_dsi_dbi_exit_dsr(dev, MDFLD_DSR_2D_3D);
- bdispoff = false;
- dev_priv->dispstatus = true;
- }
-
- mdfld_dsi_dbi_set_power(encoder, true);
- /* FIXME if (gbgfxsuspended)
- gbgfxsuspended = false; */
- } else {
- /*
- * I am not sure whether this is the perfect place to
- * turn rpm on since we still have a lot of CRTC turnning
- * on work to do.
- */
- bdispoff = true;
- dev_priv->dispstatus = false;
- mdfld_dsi_dbi_set_power(encoder, false);
- }
-}
-
-
-/*
- * Update the DBI MIPI Panel Frame Buffer.
- */
-static void mdfld_dsi_dbi_update_fb(struct mdfld_dsi_dbi_output *dbi_output,
- int pipe)
-{
- struct mdfld_dsi_pkg_sender *sender =
- mdfld_dsi_encoder_get_pkg_sender(&dbi_output->base);
- struct drm_device *dev = dbi_output->dev;
- struct drm_crtc *crtc = dbi_output->base.base.crtc;
- struct psb_intel_crtc *psb_crtc = (crtc) ?
- to_psb_intel_crtc(crtc) : NULL;
- u32 dpll_reg = MRST_DPLL_A;
- u32 dspcntr_reg = DSPACNTR;
- u32 pipeconf_reg = PIPEACONF;
- u32 dsplinoff_reg = DSPALINOFF;
- u32 dspsurf_reg = DSPASURF;
- u32 reg_offset = 0;
-
- /* If mode setting on-going, back off */
- if ((dbi_output->mode_flags & MODE_SETTING_ON_GOING) ||
- (psb_crtc && psb_crtc->mode_flags & MODE_SETTING_ON_GOING) ||
- !(dbi_output->mode_flags & MODE_SETTING_ENCODER_DONE))
- return;
-
- if (pipe == 2) {
- dspcntr_reg = DSPCCNTR;
- pipeconf_reg = PIPECCONF;
- dsplinoff_reg = DSPCLINOFF;
- dspsurf_reg = DSPCSURF;
- reg_offset = MIPIC_REG_OFFSET;
- }
-
- if (!gma_power_begin(dev, true)) {
- dev_err(dev->dev, "hw begin failed\n");
- return;
- }
-
- /* Check DBI FIFO status */
- if (!(REG_READ(dpll_reg) & DPLL_VCO_ENABLE) ||
- !(REG_READ(dspcntr_reg) & DISPLAY_PLANE_ENABLE) ||
- !(REG_READ(pipeconf_reg) & DISPLAY_PLANE_ENABLE))
- goto update_fb_out0;
-
- /* Refresh plane changes */
- REG_WRITE(dsplinoff_reg, REG_READ(dsplinoff_reg));
- REG_WRITE(dspsurf_reg, REG_READ(dspsurf_reg));
- REG_READ(dspsurf_reg);
-
- mdfld_dsi_send_dcs(sender,
- DCS_WRITE_MEM_START,
- NULL,
- 0,
- CMD_DATA_SRC_PIPE,
- MDFLD_DSI_SEND_PACKAGE);
-
- dbi_output->dsr_fb_update_done = true;
-update_fb_out0:
- gma_power_end(dev);
-}
-
-static int tpo_cmd_get_panel_info(struct drm_device *dev,
- int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = TPO_PANEL_WIDTH;
- pi->height_mm = TPO_PANEL_HEIGHT;
-
- return 0;
-}
-
-
-/* TPO DBI encoder helper funcs */
-static const struct drm_encoder_helper_funcs mdfld_dsi_dbi_helper_funcs = {
- .dpms = mdfld_dsi_dbi_dpms,
- .mode_fixup = mdfld_dsi_dbi_mode_fixup,
- .prepare = mdfld_dsi_dbi_prepare,
- .mode_set = mdfld_dsi_dbi_mode_set,
- .commit = mdfld_dsi_dbi_commit,
-};
-
-/* TPO DBI encoder funcs */
-static const struct drm_encoder_funcs mdfld_dsi_dbi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-void tpo_cmd_init(struct drm_device *dev, struct panel_funcs *p_funcs)
-{
- p_funcs->encoder_funcs = &mdfld_dsi_dbi_encoder_funcs;
- p_funcs->encoder_helper_funcs = &mdfld_dsi_dbi_helper_funcs;
- p_funcs->get_config_mode = &tpo_cmd_get_config_mode;
- p_funcs->update_fb = mdfld_dsi_dbi_update_fb;
- p_funcs->get_panel_info = tpo_cmd_get_panel_info;
- p_funcs->reset = mdfld_dsi_panel_reset;
- p_funcs->drv_ic_init = mdfld_dsi_brightness_init;
-}
diff --git a/drivers/staging/gma500/mdfld_tpo_vid.c b/drivers/staging/gma500/mdfld_tpo_vid.c
deleted file mode 100644
index 9549017..0000000
--- a/drivers/staging/gma500/mdfld_tpo_vid.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright © 2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- * jim liu <jim.liu@intel.com>
- * Jackie Li<yaodong.li@intel.com>
- */
-
-#include "mdfld_dsi_dbi.h"
-#include "mdfld_dsi_dpi.h"
-#include "mdfld_dsi_output.h"
-#include "mdfld_output.h"
-
-#include "mdfld_dsi_pkg_sender.h"
-
-#include "displays/tpo_vid.h"
-
-static struct drm_display_mode *tpo_vid_get_config_mode(struct drm_device *dev)
-{
- struct drm_display_mode *mode;
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
- bool use_gct = false;
-
- mode = kzalloc(sizeof(*mode), GFP_KERNEL);
- if (!mode) {
- dev_err(dev->dev, "out of memory\n");
- return NULL;
- }
-
- if (use_gct) {
- mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
- mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
- mode->hsync_start = mode->hdisplay + \
- ((ti->hsync_offset_hi << 8) | \
- ti->hsync_offset_lo);
- mode->hsync_end = mode->hsync_start + \
- ((ti->hsync_pulse_width_hi << 8) | \
- ti->hsync_pulse_width_lo);
- mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) | \
- ti->hblank_lo);
- mode->vsync_start = \
- mode->vdisplay + ((ti->vsync_offset_hi << 8) | \
- ti->vsync_offset_lo);
- mode->vsync_end = \
- mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
- ti->vsync_pulse_width_lo);
- mode->vtotal = mode->vdisplay + \
- ((ti->vblank_hi << 8) | ti->vblank_lo);
- mode->clock = ti->pixel_clock * 10;
-
- dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
- dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
- dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
- dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
- dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
- dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
- dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
- dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
- dev_dbg(dev->dev, "clock is %d\n", mode->clock);
- } else {
- mode->hdisplay = 864;
- mode->vdisplay = 480;
- mode->hsync_start = 873;
- mode->hsync_end = 876;
- mode->htotal = 887;
- mode->vsync_start = 487;
- mode->vsync_end = 490;
- mode->vtotal = 499;
- mode->clock = 33264;
- }
-
- drm_mode_set_name(mode);
- drm_mode_set_crtcinfo(mode, 0);
-
- mode->type |= DRM_MODE_TYPE_PREFERRED;
-
- return mode;
-}
-
-static int tpo_vid_get_panel_info(struct drm_device *dev,
- int pipe,
- struct panel_info *pi)
-{
- if (!dev || !pi)
- return -EINVAL;
-
- pi->width_mm = TPO_PANEL_WIDTH;
- pi->height_mm = TPO_PANEL_HEIGHT;
-
- return 0;
-}
-
-/*TPO DPI encoder helper funcs*/
-static const struct drm_encoder_helper_funcs
- mdfld_tpo_dpi_encoder_helper_funcs = {
- .dpms = mdfld_dsi_dpi_dpms,
- .mode_fixup = mdfld_dsi_dpi_mode_fixup,
- .prepare = mdfld_dsi_dpi_prepare,
- .mode_set = mdfld_dsi_dpi_mode_set,
- .commit = mdfld_dsi_dpi_commit,
-};
-
-/*TPO DPI encoder funcs*/
-static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
- .destroy = drm_encoder_cleanup,
-};
-
-void tpo_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
-{
- if (!dev || !p_funcs) {
- dev_err(dev->dev, "tpo_vid_init: Invalid parameters\n");
- return;
- }
-
- p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
- p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
- p_funcs->get_config_mode = &tpo_vid_get_config_mode;
- p_funcs->update_fb = NULL;
- p_funcs->get_panel_info = tpo_vid_get_panel_info;
-}
diff --git a/drivers/staging/gma500/medfield.h b/drivers/staging/gma500/medfield.h
deleted file mode 100644
index 09e9687..0000000
--- a/drivers/staging/gma500/medfield.h
+++ /dev/null
@@ -1,268 +0,0 @@
-/*
- * Copyright © 2011 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-/* Medfield DSI controller registers */
-
-#define MIPIA_DEVICE_READY_REG 0xb000
-#define MIPIA_INTR_STAT_REG 0xb004
-#define MIPIA_INTR_EN_REG 0xb008
-#define MIPIA_DSI_FUNC_PRG_REG 0xb00c
-#define MIPIA_HS_TX_TIMEOUT_REG 0xb010
-#define MIPIA_LP_RX_TIMEOUT_REG 0xb014
-#define MIPIA_TURN_AROUND_TIMEOUT_REG 0xb018
-#define MIPIA_DEVICE_RESET_TIMER_REG 0xb01c
-#define MIPIA_DPI_RESOLUTION_REG 0xb020
-#define MIPIA_DBI_FIFO_THROTTLE_REG 0xb024
-#define MIPIA_HSYNC_COUNT_REG 0xb028
-#define MIPIA_HBP_COUNT_REG 0xb02c
-#define MIPIA_HFP_COUNT_REG 0xb030
-#define MIPIA_HACTIVE_COUNT_REG 0xb034
-#define MIPIA_VSYNC_COUNT_REG 0xb038
-#define MIPIA_VBP_COUNT_REG 0xb03c
-#define MIPIA_VFP_COUNT_REG 0xb040
-#define MIPIA_HIGH_LOW_SWITCH_COUNT_REG 0xb044
-#define MIPIA_DPI_CONTROL_REG 0xb048
-#define MIPIA_DPI_DATA_REG 0xb04c
-#define MIPIA_INIT_COUNT_REG 0xb050
-#define MIPIA_MAX_RETURN_PACK_SIZE_REG 0xb054
-#define MIPIA_VIDEO_MODE_FORMAT_REG 0xb058
-#define MIPIA_EOT_DISABLE_REG 0xb05c
-#define MIPIA_LP_BYTECLK_REG 0xb060
-#define MIPIA_LP_GEN_DATA_REG 0xb064
-#define MIPIA_HS_GEN_DATA_REG 0xb068
-#define MIPIA_LP_GEN_CTRL_REG 0xb06c
-#define MIPIA_HS_GEN_CTRL_REG 0xb070
-#define MIPIA_GEN_FIFO_STAT_REG 0xb074
-#define MIPIA_HS_LS_DBI_ENABLE_REG 0xb078
-#define MIPIA_DPHY_PARAM_REG 0xb080
-#define MIPIA_DBI_BW_CTRL_REG 0xb084
-#define MIPIA_CLK_LANE_SWITCH_TIME_CNT_REG 0xb088
-
-#define DSI_DEVICE_READY (0x1)
-#define DSI_POWER_STATE_ULPS_ENTER (0x2 << 1)
-#define DSI_POWER_STATE_ULPS_EXIT (0x1 << 1)
-#define DSI_POWER_STATE_ULPS_OFFSET (0x1)
-
-
-#define DSI_ONE_DATA_LANE (0x1)
-#define DSI_TWO_DATA_LANE (0x2)
-#define DSI_THREE_DATA_LANE (0X3)
-#define DSI_FOUR_DATA_LANE (0x4)
-#define DSI_DPI_VIRT_CHANNEL_OFFSET (0x3)
-#define DSI_DBI_VIRT_CHANNEL_OFFSET (0x5)
-#define DSI_DPI_COLOR_FORMAT_RGB565 (0x01 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666 (0x02 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB666_UNPACK (0x03 << 7)
-#define DSI_DPI_COLOR_FORMAT_RGB888 (0x04 << 7)
-#define DSI_DBI_COLOR_FORMAT_OPTION2 (0x05 << 13)
-
-#define DSI_INTR_STATE_RXSOTERROR 1
-
-#define DSI_INTR_STATE_SPL_PKG_SENT (1 << 30)
-#define DSI_INTR_STATE_TE (1 << 31)
-
-#define DSI_HS_TX_TIMEOUT_MASK (0xffffff)
-
-#define DSI_LP_RX_TIMEOUT_MASK (0xffffff)
-
-#define DSI_TURN_AROUND_TIMEOUT_MASK (0x3f)
-
-#define DSI_RESET_TIMER_MASK (0xffff)
-
-#define DSI_DBI_FIFO_WM_HALF (0x0)
-#define DSI_DBI_FIFO_WM_QUARTER (0x1)
-#define DSI_DBI_FIFO_WM_LOW (0x2)
-
-#define DSI_DPI_TIMING_MASK (0xffff)
-
-#define DSI_INIT_TIMER_MASK (0xffff)
-
-#define DSI_DBI_RETURN_PACK_SIZE_MASK (0x3ff)
-
-#define DSI_LP_BYTECLK_MASK (0x0ffff)
-
-#define DSI_HS_CTRL_GEN_SHORT_W0 (0x03)
-#define DSI_HS_CTRL_GEN_SHORT_W1 (0x13)
-#define DSI_HS_CTRL_GEN_SHORT_W2 (0x23)
-#define DSI_HS_CTRL_GEN_R0 (0x04)
-#define DSI_HS_CTRL_GEN_R1 (0x14)
-#define DSI_HS_CTRL_GEN_R2 (0x24)
-#define DSI_HS_CTRL_GEN_LONG_W (0x29)
-#define DSI_HS_CTRL_MCS_SHORT_W0 (0x05)
-#define DSI_HS_CTRL_MCS_SHORT_W1 (0x15)
-#define DSI_HS_CTRL_MCS_R0 (0x06)
-#define DSI_HS_CTRL_MCS_LONG_W (0x39)
-#define DSI_HS_CTRL_VC_OFFSET (0x06)
-#define DSI_HS_CTRL_WC_OFFSET (0x08)
-
-#define DSI_FIFO_GEN_HS_DATA_FULL (1 << 0)
-#define DSI_FIFO_GEN_HS_DATA_HALF_EMPTY (1 << 1)
-#define DSI_FIFO_GEN_HS_DATA_EMPTY (1 << 2)
-#define DSI_FIFO_GEN_LP_DATA_FULL (1 << 8)
-#define DSI_FIFO_GEN_LP_DATA_HALF_EMPTY (1 << 9)
-#define DSI_FIFO_GEN_LP_DATA_EMPTY (1 << 10)
-#define DSI_FIFO_GEN_HS_CTRL_FULL (1 << 16)
-#define DSI_FIFO_GEN_HS_CTRL_HALF_EMPTY (1 << 17)
-#define DSI_FIFO_GEN_HS_CTRL_EMPTY (1 << 18)
-#define DSI_FIFO_GEN_LP_CTRL_FULL (1 << 24)
-#define DSI_FIFO_GEN_LP_CTRL_HALF_EMPTY (1 << 25)
-#define DSI_FIFO_GEN_LP_CTRL_EMPTY (1 << 26)
-#define DSI_FIFO_DBI_EMPTY (1 << 27)
-#define DSI_FIFO_DPI_EMPTY (1 << 28)
-
-#define DSI_DBI_HS_LP_SWITCH_MASK (0x1)
-
-#define DSI_HS_LP_SWITCH_COUNTER_OFFSET (0x0)
-#define DSI_LP_HS_SWITCH_COUNTER_OFFSET (0x16)
-
-#define DSI_DPI_CTRL_HS_SHUTDOWN (0x00000001)
-#define DSI_DPI_CTRL_HS_TURN_ON (0x00000002)
-
-/* Medfield DSI adapter registers */
-#define MIPIA_CONTROL_REG 0xb104
-#define MIPIA_DATA_ADD_REG 0xb108
-#define MIPIA_DATA_LEN_REG 0xb10c
-#define MIPIA_CMD_ADD_REG 0xb110
-#define MIPIA_CMD_LEN_REG 0xb114
-
-/*dsi power modes*/
-#define DSI_POWER_MODE_DISPLAY_ON (1 << 2)
-#define DSI_POWER_MODE_NORMAL_ON (1 << 3)
-#define DSI_POWER_MODE_SLEEP_OUT (1 << 4)
-#define DSI_POWER_MODE_PARTIAL_ON (1 << 5)
-#define DSI_POWER_MODE_IDLE_ON (1 << 6)
-
-enum {
- MDFLD_DSI_ENCODER_DBI = 0,
- MDFLD_DSI_ENCODER_DPI,
-};
-
-enum {
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_PULSE = 1,
- MDFLD_DSI_VIDEO_NON_BURST_MODE_SYNC_EVENTS = 2,
- MDFLD_DSI_VIDEO_BURST_MODE = 3,
-};
-
-#define DSI_DPI_COMPLETE_LAST_LINE (1 << 2)
-#define DSI_DPI_DISABLE_BTA (1 << 3)
-/* Panel types */
-enum {
- TPO_CMD,
- TPO_VID,
- TMD_CMD,
- TMD_VID,
- PYR_CMD,
- PYR_VID,
- TPO,
- TMD,
- PYR,
- HDMI,
- GCT_DETECT
-};
-
-/* Junk that belongs elsewhere */
-#define TPO_PANEL_WIDTH 84
-#define TPO_PANEL_HEIGHT 46
-#define TMD_PANEL_WIDTH 39
-#define TMD_PANEL_HEIGHT 71
-#define PYR_PANEL_WIDTH 53
-#define PYR_PANEL_HEIGHT 95
-
-/* Panel interface */
-struct panel_info {
- u32 width_mm;
- u32 height_mm;
-};
-
-struct mdfld_dsi_dbi_output;
-
-struct mdfld_dsi_connector_state {
- u32 mipi_ctrl_reg;
-};
-
-struct mdfld_dsi_encoder_state {
-
-};
-
-struct mdfld_dsi_connector {
- /*
- * This is ugly, but I have to use connector in it! :-(
- * FIXME: use drm_connector instead.
- */
- struct psb_intel_output base;
-
- int pipe;
- void *private;
- void *pkg_sender;
-
- /* Connection status */
- enum drm_connector_status status;
-};
-
-struct mdfld_dsi_encoder {
- struct drm_encoder base;
- void *private;
-};
-
-/*
- * DSI config, consists of one DSI connector, two DSI encoders.
- * DRM will pick up on DSI encoder basing on differents configs.
- */
-struct mdfld_dsi_config {
- struct drm_device *dev;
- struct drm_display_mode *fixed_mode;
- struct drm_display_mode *mode;
-
- struct mdfld_dsi_connector *connector;
- struct mdfld_dsi_encoder *encoders[DRM_CONNECTOR_MAX_ENCODER];
- struct mdfld_dsi_encoder *encoder;
-
- int changed;
-
- int bpp;
- int type;
- int lane_count;
- /*Virtual channel number for this encoder*/
- int channel_num;
- /*video mode configure*/
- int video_mode;
-
- int dvr_ic_inited;
-};
-
-#define MDFLD_DSI_CONNECTOR(psb_output) \
- (container_of(psb_output, struct mdfld_dsi_connector, base))
-
-#define MDFLD_DSI_ENCODER(encoder) \
- (container_of(encoder, struct mdfld_dsi_encoder, base))
-
-struct panel_funcs {
- const struct drm_encoder_funcs *encoder_funcs;
- const struct drm_encoder_helper_funcs *encoder_helper_funcs;
- struct drm_display_mode *(*get_config_mode) (struct drm_device *);
- void (*update_fb) (struct mdfld_dsi_dbi_output *, int);
- int (*get_panel_info) (struct drm_device *, int, struct panel_info *);
- int (*reset)(int pipe);
- void (*drv_ic_init)(struct mdfld_dsi_config *dsi_config, int pipe);
-};
-
diff --git a/drivers/staging/gma500/psb_drm.h b/drivers/staging/gma500/psb_drm.h
deleted file mode 100644
index 0da8468..0000000
--- a/drivers/staging/gma500/psb_drm.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2007-2011, Intel Corporation.
- * All Rights Reserved.
- * Copyright (c) 2008, Tungsten Graphics Inc. Cedar Park, TX., USA.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#ifndef _PSB_DRM_H_
-#define _PSB_DRM_H_
-
-#define PSB_NUM_PIPE 3
-
-#define PSB_GPU_ACCESS_READ (1ULL << 32)
-#define PSB_GPU_ACCESS_WRITE (1ULL << 33)
-#define PSB_GPU_ACCESS_MASK (PSB_GPU_ACCESS_READ | PSB_GPU_ACCESS_WRITE)
-
-#define PSB_BO_FLAG_COMMAND (1ULL << 52)
-
-/*
- * Feedback components:
- */
-
-struct drm_psb_sizes_arg {
- u32 ta_mem_size;
- u32 mmu_size;
- u32 pds_size;
- u32 rastgeom_size;
- u32 tt_size;
- u32 vram_size;
-};
-
-struct drm_psb_dpst_lut_arg {
- uint8_t lut[256];
- int output_id;
-};
-
-#define PSB_DC_CRTC_SAVE 0x01
-#define PSB_DC_CRTC_RESTORE 0x02
-#define PSB_DC_OUTPUT_SAVE 0x04
-#define PSB_DC_OUTPUT_RESTORE 0x08
-#define PSB_DC_CRTC_MASK 0x03
-#define PSB_DC_OUTPUT_MASK 0x0C
-
-struct drm_psb_dc_state_arg {
- u32 flags;
- u32 obj_id;
-};
-
-struct drm_psb_mode_operation_arg {
- u32 obj_id;
- u16 operation;
- struct drm_mode_modeinfo mode;
- void *data;
-};
-
-struct drm_psb_stolen_memory_arg {
- u32 base;
- u32 size;
-};
-
-/*Display Register Bits*/
-#define REGRWBITS_PFIT_CONTROLS (1 << 0)
-#define REGRWBITS_PFIT_AUTOSCALE_RATIOS (1 << 1)
-#define REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS (1 << 2)
-#define REGRWBITS_PIPEASRC (1 << 3)
-#define REGRWBITS_PIPEBSRC (1 << 4)
-#define REGRWBITS_VTOTAL_A (1 << 5)
-#define REGRWBITS_VTOTAL_B (1 << 6)
-#define REGRWBITS_DSPACNTR (1 << 8)
-#define REGRWBITS_DSPBCNTR (1 << 9)
-#define REGRWBITS_DSPCCNTR (1 << 10)
-
-/*Overlay Register Bits*/
-#define OV_REGRWBITS_OVADD (1 << 0)
-#define OV_REGRWBITS_OGAM_ALL (1 << 1)
-
-#define OVC_REGRWBITS_OVADD (1 << 2)
-#define OVC_REGRWBITS_OGAM_ALL (1 << 3)
-
-struct drm_psb_register_rw_arg {
- u32 b_force_hw_on;
-
- u32 display_read_mask;
- u32 display_write_mask;
-
- struct {
- u32 pfit_controls;
- u32 pfit_autoscale_ratios;
- u32 pfit_programmed_scale_ratios;
- u32 pipeasrc;
- u32 pipebsrc;
- u32 vtotal_a;
- u32 vtotal_b;
- } display;
-
- u32 overlay_read_mask;
- u32 overlay_write_mask;
-
- struct {
- u32 OVADD;
- u32 OGAMC0;
- u32 OGAMC1;
- u32 OGAMC2;
- u32 OGAMC3;
- u32 OGAMC4;
- u32 OGAMC5;
- u32 IEP_ENABLED;
- u32 IEP_BLE_MINMAX;
- u32 IEP_BSSCC_CONTROL;
- u32 b_wait_vblank;
- } overlay;
-
- u32 sprite_enable_mask;
- u32 sprite_disable_mask;
-
- struct {
- u32 dspa_control;
- u32 dspa_key_value;
- u32 dspa_key_mask;
- u32 dspc_control;
- u32 dspc_stride;
- u32 dspc_position;
- u32 dspc_linear_offset;
- u32 dspc_size;
- u32 dspc_surface;
- } sprite;
-
- u32 subpicture_enable_mask;
- u32 subpicture_disable_mask;
-};
-
-/* Controlling the kernel modesetting buffers */
-
-#define DRM_PSB_SIZES 0x07
-#define DRM_PSB_FUSE_REG 0x08
-#define DRM_PSB_DC_STATE 0x0A
-#define DRM_PSB_ADB 0x0B
-#define DRM_PSB_MODE_OPERATION 0x0C
-#define DRM_PSB_STOLEN_MEMORY 0x0D
-#define DRM_PSB_REGISTER_RW 0x0E
-
-/*
- * NOTE: Add new commands here, but increment
- * the values below and increment their
- * corresponding defines where they're
- * defined elsewhere.
- */
-
-#define DRM_PSB_GEM_CREATE 0x10
-#define DRM_PSB_2D_OP 0x11
-#define DRM_PSB_GEM_MMAP 0x12
-#define DRM_PSB_DPST 0x1B
-#define DRM_PSB_GAMMA 0x1C
-#define DRM_PSB_DPST_BL 0x1D
-#define DRM_PSB_GET_PIPE_FROM_CRTC_ID 0x1F
-
-#define PSB_MODE_OPERATION_MODE_VALID 0x01
-#define PSB_MODE_OPERATION_SET_DC_BASE 0x02
-
-struct drm_psb_get_pipe_from_crtc_id_arg {
- /** ID of CRTC being requested **/
- u32 crtc_id;
-
- /** pipe of requested CRTC **/
- u32 pipe;
-};
-
-/* FIXME: move this into a medfield header once we are sure it isn't needed for an
- ioctl */
-struct psb_drm_dpu_rect {
- int x, y;
- int width, height;
-};
-
-struct drm_psb_gem_create {
- __u64 size;
- __u32 handle;
- __u32 flags;
-#define PSB_GEM_CREATE_STOLEN 1 /* Stolen memory can be used */
-};
-
-#define PSB_2D_OP_BUFLEN 16
-
-struct drm_psb_2d_op {
- __u32 src; /* Handles, only src supported right now */
- __u32 dst;
- __u32 mask;
- __u32 pat;
- __u32 size; /* In dwords of command */
- __u32 spare; /* And bumps array to u64 align */
- __u32 cmd[PSB_2D_OP_BUFLEN];
-};
-
-struct drm_psb_gem_mmap {
- __u32 handle;
- __u32 pad;
- /**
- * Fake offset to use for subsequent mmap call
- *
- * This is a fixed-size type for 32/64 compatibility.
- */
- __u64 offset;
-};
-
-#endif
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
deleted file mode 100644
index 986a04d..0000000
--- a/drivers/staging/gma500/psb_drv.c
+++ /dev/null
@@ -1,1229 +0,0 @@
-/**************************************************************************
- * Copyright (c) 2007-2011, Intel Corporation.
- * All Rights Reserved.
- * Copyright (c) 2008, Tungsten Graphics, Inc. Cedar Park, TX., USA.
- * All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- **************************************************************************/
-
-#include <drm/drmP.h>
-#include <drm/drm.h>
-#include "psb_drm.h"
-#include "psb_drv.h"
-#include "framebuffer.h"
-#include "psb_reg.h"
-#include "psb_intel_reg.h"
-#include "intel_bios.h"
-#include "mid_bios.h"
-#include "mdfld_dsi_dbi.h"
-#include <drm/drm_pciids.h>
-#include "power.h"
-#include <linux/cpu.h>
-#include <linux/notifier.h>
-#include <linux/spinlock.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <acpi/video.h>
-
-static int drm_psb_trap_pagefaults;
-
-int drm_psb_no_fb;
-
-static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
-
-MODULE_PARM_DESC(no_fb, "Disable FBdev");
-MODULE_PARM_DESC(trap_pagefaults, "Error and reset on MMU pagefaults");
-module_param_named(no_fb, drm_psb_no_fb, int, 0600);
-module_param_named(trap_pagefaults, drm_psb_trap_pagefaults, int, 0600);
-
-
-static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
- { 0x8086, 0x8108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
- { 0x8086, 0x8109, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &psb_chip_ops },
-#if defined(CONFIG_DRM_PSB_MRST)
- { 0x8086, 0x4100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
- { 0x8086, 0x4101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
- { 0x8086, 0x4102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
- { 0x8086, 0x4103, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
- { 0x8086, 0x4104, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
- { 0x8086, 0x4105, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
- { 0x8086, 0x4106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
- { 0x8086, 0x4107, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mrst_chip_ops},
-#endif
-#if defined(CONFIG_DRM_PSB_MFLD)
- { 0x8086, 0x0130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- { 0x8086, 0x0131, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- { 0x8086, 0x0132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- { 0x8086, 0x0133, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- { 0x8086, 0x0134, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- { 0x8086, 0x0135, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- { 0x8086, 0x0136, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
- { 0x8086, 0x0137, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &mdfld_chip_ops},
-#endif
-#if defined(CONFIG_DRM_PSB_CDV)
- { 0x8086, 0x0be0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
- { 0x8086, 0x0be7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (long) &cdv_chip_ops},
-#endif
- { 0, 0, 0}
-};
-MODULE_DEVICE_TABLE(pci, pciidlist);
-
-/*
- * Standard IOCTLs.
- */
-
-#define DRM_IOCTL_PSB_SIZES \
- DRM_IOR(DRM_PSB_SIZES + DRM_COMMAND_BASE, \
- struct drm_psb_sizes_arg)
-#define DRM_IOCTL_PSB_FUSE_REG \
- DRM_IOWR(DRM_PSB_FUSE_REG + DRM_COMMAND_BASE, uint32_t)
-#define DRM_IOCTL_PSB_DC_STATE \
- DRM_IOW(DRM_PSB_DC_STATE + DRM_COMMAND_BASE, \
- struct drm_psb_dc_state_arg)
-#define DRM_IOCTL_PSB_ADB \
- DRM_IOWR(DRM_PSB_ADB + DRM_COMMAND_BASE, uint32_t)
-#define DRM_IOCTL_PSB_MODE_OPERATION \
- DRM_IOWR(DRM_PSB_MODE_OPERATION + DRM_COMMAND_BASE, \
- struct drm_psb_mode_operation_arg)
-#define DRM_IOCTL_PSB_STOLEN_MEMORY \
- DRM_IOWR(DRM_PSB_STOLEN_MEMORY + DRM_COMMAND_BASE, \
- struct drm_psb_stolen_memory_arg)
-#define DRM_IOCTL_PSB_REGISTER_RW \
- DRM_IOWR(DRM_PSB_REGISTER_RW + DRM_COMMAND_BASE, \
- struct drm_psb_register_rw_arg)
-#define DRM_IOCTL_PSB_DPST \
- DRM_IOWR(DRM_PSB_DPST + DRM_COMMAND_BASE, \
- uint32_t)
-#define DRM_IOCTL_PSB_GAMMA \
- DRM_IOWR(DRM_PSB_GAMMA + DRM_COMMAND_BASE, \
- struct drm_psb_dpst_lut_arg)
-#define DRM_IOCTL_PSB_DPST_BL \
- DRM_IOWR(DRM_PSB_DPST_BL + DRM_COMMAND_BASE, \
- uint32_t)
-#define DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID \
- DRM_IOWR(DRM_PSB_GET_PIPE_FROM_CRTC_ID + DRM_COMMAND_BASE, \
- struct drm_psb_get_pipe_from_crtc_id_arg)
-#define DRM_IOCTL_PSB_GEM_CREATE \
- DRM_IOWR(DRM_PSB_GEM_CREATE + DRM_COMMAND_BASE, \
- struct drm_psb_gem_create)
-#define DRM_IOCTL_PSB_2D_OP \
- DRM_IOW(DRM_PSB_2D_OP + DRM_COMMAND_BASE, \
- struct drm_psb_2d_op)
-#define DRM_IOCTL_PSB_GEM_MMAP \
- DRM_IOWR(DRM_PSB_GEM_MMAP + DRM_COMMAND_BASE, \
- struct drm_psb_gem_mmap)
-
-static int psb_sizes_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_dc_state_ioctl(struct drm_device *dev, void * data,
- struct drm_file *file_priv);
-static int psb_adb_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_dpst_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_gamma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#define PSB_IOCTL_DEF(ioctl, func, flags) \
- [DRM_IOCTL_NR(ioctl) - DRM_COMMAND_BASE] = {ioctl, flags, func}
-
-static struct drm_ioctl_desc psb_ioctls[] = {
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_SIZES, psb_sizes_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_DC_STATE, psb_dc_state_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_ADB, psb_adb_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_MODE_OPERATION, psb_mode_operation_ioctl,
- DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_STOLEN_MEMORY, psb_stolen_memory_ioctl,
- DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_REGISTER_RW, psb_register_rw_ioctl,
- DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST, psb_dpst_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GAMMA, psb_gamma_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_DPST_BL, psb_dpst_bl_ioctl, DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GET_PIPE_FROM_CRTC_ID,
- psb_intel_get_pipe_from_crtc_id, 0),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_CREATE, psb_gem_create_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_2D_OP, psb_accel_ioctl,
- DRM_UNLOCKED| DRM_AUTH),
- PSB_IOCTL_DEF(DRM_IOCTL_PSB_GEM_MMAP, psb_gem_mmap_ioctl,
- DRM_UNLOCKED | DRM_AUTH),
-};
-
-static void psb_lastclose(struct drm_device *dev)
-{
- return;
-}
-
-static void psb_do_takedown(struct drm_device *dev)
-{
-}
-
-static int psb_do_init(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
- struct psb_gtt *pg = &dev_priv->gtt;
-
- uint32_t stolen_gtt;
-
- int ret = -ENOMEM;
-
- if (pg->mmu_gatt_start & 0x0FFFFFFF) {
- dev_err(dev->dev, "Gatt must be 256M aligned. This is a bug.\n");
- ret = -EINVAL;
- goto out_err;
- }
-
-
- stolen_gtt = (pg->stolen_size >> PAGE_SHIFT) * 4;
- stolen_gtt = (stolen_gtt + PAGE_SIZE - 1) >> PAGE_SHIFT;
- stolen_gtt =
- (stolen_gtt < pg->gtt_pages) ? stolen_gtt : pg->gtt_pages;
-
- dev_priv->gatt_free_offset = pg->mmu_gatt_start +
- (stolen_gtt << PAGE_SHIFT) * 1024;
-
- if (1 || drm_debug) {
- uint32_t core_id = PSB_RSGX32(PSB_CR_CORE_ID);
- uint32_t core_rev = PSB_RSGX32(PSB_CR_CORE_REVISION);
- DRM_INFO("SGX core id = 0x%08x\n", core_id);
- DRM_INFO("SGX core rev major = 0x%02x, minor = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAJOR_MASK) >>
- _PSB_CC_REVISION_MAJOR_SHIFT,
- (core_rev & _PSB_CC_REVISION_MINOR_MASK) >>
- _PSB_CC_REVISION_MINOR_SHIFT);
- DRM_INFO
- ("SGX core rev maintenance = 0x%02x, designer = 0x%02x\n",
- (core_rev & _PSB_CC_REVISION_MAINTENANCE_MASK) >>
- _PSB_CC_REVISION_MAINTENANCE_SHIFT,
- (core_rev & _PSB_CC_REVISION_DESIGNER_MASK) >>
- _PSB_CC_REVISION_DESIGNER_SHIFT);
- }
-
-
- spin_lock_init(&dev_priv->irqmask_lock);
- spin_lock_init(&dev_priv->lock_2d);
-
- PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK0);
- PSB_WSGX32(0x00000000, PSB_CR_BIF_BANK1);
- PSB_RSGX32(PSB_CR_BIF_BANK1);
- PSB_WSGX32(PSB_RSGX32(PSB_CR_BIF_CTRL) | _PSB_MMU_ER_MASK,
- PSB_CR_BIF_CTRL);
- psb_spank(dev_priv);
-
- /* mmu_gatt ?? */
- PSB_WSGX32(pg->gatt_start, PSB_CR_BIF_TWOD_REQ_BASE);
- return 0;
-out_err:
- psb_do_takedown(dev);
- return ret;
-}
-
-static int psb_driver_unload(struct drm_device *dev)
-{
- struct drm_psb_private *dev_priv = dev->dev_private;
-
- /* Kill vblank etc here */
-
- gma_backlight_exit(dev);
-
- if (drm_psb_no_fb == 0)
- psb_modeset_cleanup(dev);
-
- if (dev_priv) {
- psb_lid_timer_takedown(dev_priv);
- gma_intel_opregion_exit(dev);
-
- if (dev_priv->ops->chip_teardown)
- dev_priv->ops->chip_teardown(dev);
- psb_do_takedown(dev);
-
-
- if (dev_priv->pf_pd) {
- psb_mmu_free_pagedir(dev_priv->pf_pd);
- dev_priv->pf_pd = NULL;
- }
- if (dev_priv->mmu) {
- struct psb_gtt *pg = &dev_priv->gtt;
-
- down_read(&pg->sem);
- psb_mmu_remove_pfn_sequence(
- psb_mmu_get_default_pd
- (dev_priv->mmu),
- pg->mmu_gatt_start,
- dev_priv->vram_stolen_size >> PAGE_SHIFT);
- up_read(&pg->sem);
- psb_mmu_driver_takedown(dev_priv->mmu);
- dev_priv->mmu = NULL;
- }
- psb_gtt_takedown(dev);
- if (dev_priv->scratch_page) {
- __free_page(dev_priv->scratch_page);
- dev_priv->scratch_page = NULL;
- }
- if (dev_priv->vdc_reg) {
- iounmap(dev_priv->vdc_reg);
- dev_priv->vdc_reg = NULL;
- }
- if (dev_priv->sgx_reg) {
- iounmap(dev_priv->sgx_reg);
- dev_priv->sgx_reg = NULL;
- }
-
- kfree(dev_priv);
- dev->dev_private = NULL;
-
- /*destroy VBT data*/
- psb_intel_destroy_bios(dev);
- }
-
- gma_power_uninit(dev);
-
- return 0;
-}
-
-
-static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
-{
- struct drm_psb_private *dev_priv;
- unsigned long resource_start;
- struct psb_gtt *pg;
- unsigned long irqflags;
- int ret = -ENOMEM;
- uint32_t tt_pages;
- struct drm_connector *connector;
- struct psb_intel_output *psb_intel_output;
-
- dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
- if (dev_priv == NULL)
- return -ENOMEM;
-
- dev_priv->ops = (struct psb_ops *)chipset;
- dev_priv->dev = dev;
- dev->dev_private = (void *) dev_priv;
-
- if (!IS_PSB(dev)) {
- if (pci_enable_msi(dev->pdev))
- dev_warn(dev->dev, "Enabling MSI failed!\n");
- }
-
- dev_priv->num_pipe = dev_priv->ops->pipes;
-
- resource_start = pci_resource_start(dev->pdev, PSB_MMIO_RESOURCE);
-
- dev_priv->vdc_reg =
- ioremap(resource_start + PSB_VDC_OFFSET, PSB_VDC_SIZE);
- if (!dev_priv->vdc_reg)
- goto out_err;
-
- dev_priv->sgx_reg = ioremap(resource_start + dev_priv->ops->sgx_offset,
- PSB_SGX_SIZE);
- if (!dev_priv->sgx_reg)
- goto out_err;
-
- ret = dev_priv->ops->chip_setup(dev);
- if (ret)
- goto out_err;
-
- /* Init OSPM support */
- gma_power_init(dev);
-
- ret = -ENOMEM;
-
- dev_priv->scratch_page = alloc_page(GFP_DMA32 | __GFP_ZERO);
- if (!dev_priv->scratch_page)
- goto out_err;
-
- set_pages_uc(dev_priv->scratch_page, 1);
-
- ret = psb_gtt_init(dev, 0);
- if (ret)
- goto out_err;
-
- dev_priv->mmu = psb_mmu_driver_init((void *)0,
- drm_psb_trap_pagefaults, 0,
- dev_priv);
- if (!dev_priv->mmu)
- goto out_err;
-
- pg = &dev_priv->gtt;
-
- tt_pages = (pg->gatt_pages < PSB_TT_PRIV0_PLIMIT) ?
- (pg->gatt_pages) : PSB_TT_PRIV0_PLIMIT;
-
-
- dev_priv->pf_pd = psb_mmu_alloc_pd(dev_priv->mmu, 1, 0);
- if (!dev_priv->pf_pd)
- goto out_err;
-
- psb_mmu_set_pd_context(psb_mmu_get_default_pd(dev_priv->mmu), 0);
- psb_mmu_set_pd_context(dev_priv->pf_pd, 1);
-
- ret = psb_do_init(dev);
- if (ret)
- return ret;
-
- PSB_WSGX32(0x20000000, PSB_CR_PDS_EXEC_BASE);
- PSB_WSGX32(0x30000000, PSB_CR_BIF_3D_REQ_BASE);
-
-/* igd_opregion_init(&dev_priv->opregion_dev); */
- acpi_video_register();
- if (dev_priv->lid_state)
- psb_lid_timer_init(dev_priv);
-
- ret = drm_vblank_init(dev, dev_priv->num_pipe);
- if (ret)
- goto out_err;
-
- /*
- * Install interrupt handlers prior to powering off SGX or else we will
- * crash.
- */
- dev_priv->vdc_irq_mask = 0;
- dev_priv->pipestat[0] = 0;
- dev_priv->pipestat[1] = 0;
- dev_priv->pipestat[2] = 0;
- spin_lock_irqsave(&dev_priv->irqmask_lock, irqflags);
- PSB_WVDC32(0xFFFFFFFF, PSB_HWSTAM);
- PSB_WVDC32(0x00000000, PSB_INT_ENABLE_R);
- PSB_WVDC32(0xFFFFFFFF, PSB_INT_MASK_R);
- spin_unlock_irqrestore(&dev_priv->irqmask_lock, irqflags);
- if (IS_PSB(dev) && drm_core_check_feature(dev, DRIVER_MODESET))
- drm_irq_install(dev);
-
- dev->vblank_disable_allowed = 1;
-
- dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-
- dev->driver->get_vblank_counter = psb_get_vblank_counter;
-
-#if defined(CONFIG_DRM_PSB_MFLD)
- /* FIXME: this is not the right place for this stuff ! */
- mdfld_output_setup(dev);
-#endif
- if (drm_psb_no_fb == 0) {
- psb_modeset_init(dev);
- psb_fbdev_init(dev);
- drm_kms_helper_poll_init(dev);
- }
-
- /* Only add backlight support if we have LVDS output */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
- psb_intel_output = to_psb_intel_output(connector);
-
- switch (psb_intel_output->type) {
- case INTEL_OUTPUT_LVDS:
- case INTEL_OUTPUT_MIPI:
- ret = gma_backlight_init(dev);
- break;
- }
- }
-
- if (ret)
- return ret;
-
- /* Enable runtime pm at last */
- pm_runtime_set_active(&dev->pdev->dev);
- return 0;
-out_err:
- psb_driver_unload(dev);
- return ret;
-}
-
-int psb_driver_device_is_agp(struct drm_device *dev)
-{
- return 0;
-}
-
-
-static int psb_sizes_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- struct drm_psb_sizes_arg *arg = data;
-
- *arg = dev_priv->sizes;
- return 0;
-}
-
-static int psb_dc_state_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- uint32_t flags;
- uint32_t obj_id;
- struct drm_mode_object *obj;
- struct drm_connector *connector;
- struct drm_crtc *crtc;
- struct drm_psb_dc_state_arg *arg = data;
-
-
- /* Double check MRST case */
- if (IS_MRST(dev) || IS_MFLD(dev))
- return -EOPNOTSUPP;
-
- flags = arg->flags;
- obj_id = arg->obj_id;
-
- if (flags & PSB_DC_CRTC_MASK) {
- obj = drm_mode_object_find(dev, obj_id,
- DRM_MODE_OBJECT_CRTC);
- if (!obj) {
- dev_dbg(dev->dev, "Invalid CRTC object.\n");
- return -EINVAL;
- }
-
- crtc = obj_to_crtc(obj);
-
- mutex_lock(&dev->mode_config.mutex);
- if (drm_helper_crtc_in_use(crtc)) {
- if (flags & PSB_DC_CRTC_SAVE)
- crtc->funcs->save(crtc);
- else
- crtc->funcs->restore(crtc);
- }
- mutex_unlock(&dev->mode_config.mutex);
-
- return 0;
- } else if (flags & PSB_DC_OUTPUT_MASK) {
- obj = drm_mode_object_find(dev, obj_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- dev_dbg(dev->dev, "Invalid connector id.\n");
- return -EINVAL;
- }
-
- connector = obj_to_connector(obj);
- if (flags & PSB_DC_OUTPUT_SAVE)
- connector->funcs->save(connector);
- else
- connector->funcs->restore(connector);
-
- return 0;
- }
- return -EINVAL;
-}
-
-static inline void get_brightness(struct backlight_device *bd)
-{
-#ifdef CONFIG_BACKLIGHT_CLASS_DEVICE
- if (bd) {
- bd->props.brightness = bd->ops->get_brightness(bd);
- backlight_update_status(bd);
- }
-#endif
-}
-
-static int psb_dpst_bl_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- uint32_t *arg = data;
-
- dev_priv->blc_adj2 = *arg;
- get_brightness(dev_priv->backlight_device);
- return 0;
-}
-
-static int psb_adb_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- uint32_t *arg = data;
-
- dev_priv->blc_adj1 = *arg;
- get_brightness(dev_priv->backlight_device);
- return 0;
-}
-
-/* return the current mode to the dpst module */
-static int psb_dpst_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- uint32_t *arg = data;
- uint32_t x;
- uint32_t y;
- uint32_t reg;
-
- if (!gma_power_begin(dev, 0))
- return -EIO;
-
- reg = PSB_RVDC32(PIPEASRC);
-
- gma_power_end(dev);
-
- /* horizontal is the left 16 bits */
- x = reg >> 16;
- /* vertical is the right 16 bits */
- y = reg & 0x0000ffff;
-
- /* the values are the image size minus one */
- x++;
- y++;
-
- *arg = (x << 16) | y;
-
- return 0;
-}
-static int psb_gamma_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_dpst_lut_arg *lut_arg = data;
- struct drm_mode_object *obj;
- struct drm_crtc *crtc;
- struct drm_connector *connector;
- struct psb_intel_crtc *psb_intel_crtc;
- int i = 0;
- int32_t obj_id;
-
- obj_id = lut_arg->output_id;
- obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- dev_dbg(dev->dev, "Invalid Connector object.\n");
- return -EINVAL;
- }
-
- connector = obj_to_connector(obj);
- crtc = connector->encoder->crtc;
- psb_intel_crtc = to_psb_intel_crtc(crtc);
-
- for (i = 0; i < 256; i++)
- psb_intel_crtc->lut_adj[i] = lut_arg->lut[i];
-
- psb_intel_crtc_load_lut(crtc);
-
- return 0;
-}
-
-static int psb_mode_operation_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- uint32_t obj_id;
- uint16_t op;
- struct drm_mode_modeinfo *umode;
- struct drm_display_mode *mode = NULL;
- struct drm_psb_mode_operation_arg *arg;
- struct drm_mode_object *obj;
- struct drm_connector *connector;
- struct drm_framebuffer *drm_fb;
- struct psb_framebuffer *psb_fb;
- struct drm_connector_helper_funcs *connector_funcs;
- int ret = 0;
- int resp = MODE_OK;
- struct drm_psb_private *dev_priv = psb_priv(dev);
-
- arg = (struct drm_psb_mode_operation_arg *)data;
- obj_id = arg->obj_id;
- op = arg->operation;
-
- switch (op) {
- case PSB_MODE_OPERATION_SET_DC_BASE:
- obj = drm_mode_object_find(dev, obj_id, DRM_MODE_OBJECT_FB);
- if (!obj) {
- dev_dbg(dev->dev, "Invalid FB id %d\n", obj_id);
- return -EINVAL;
- }
-
- drm_fb = obj_to_fb(obj);
- psb_fb = to_psb_fb(drm_fb);
-
- if (gma_power_begin(dev, 0)) {
- REG_WRITE(DSPASURF, psb_fb->gtt->offset);
- REG_READ(DSPASURF);
- gma_power_end(dev);
- } else {
- dev_priv->saveDSPASURF = psb_fb->gtt->offset;
- }
-
- return 0;
- case PSB_MODE_OPERATION_MODE_VALID:
- umode = &arg->mode;
-
- mutex_lock(&dev->mode_config.mutex);
-
- obj = drm_mode_object_find(dev, obj_id,
- DRM_MODE_OBJECT_CONNECTOR);
- if (!obj) {
- ret = -EINVAL;
- goto mode_op_out;
- }
-
- connector = obj_to_connector(obj);
-
- mode = drm_mode_create(dev);
- if (!mode) {
- ret = -ENOMEM;
- goto mode_op_out;
- }
-
- /* drm_crtc_convert_umode(mode, umode); */
- {
- mode->clock = umode->clock;
- mode->hdisplay = umode->hdisplay;
- mode->hsync_start = umode->hsync_start;
- mode->hsync_end = umode->hsync_end;
- mode->htotal = umode->htotal;
- mode->hskew = umode->hskew;
- mode->vdisplay = umode->vdisplay;
- mode->vsync_start = umode->vsync_start;
- mode->vsync_end = umode->vsync_end;
- mode->vtotal = umode->vtotal;
- mode->vscan = umode->vscan;
- mode->vrefresh = umode->vrefresh;
- mode->flags = umode->flags;
- mode->type = umode->type;
- strncpy(mode->name, umode->name, DRM_DISPLAY_MODE_LEN);
- mode->name[DRM_DISPLAY_MODE_LEN-1] = 0;
- }
-
- connector_funcs = (struct drm_connector_helper_funcs *)
- connector->helper_private;
-
- if (connector_funcs->mode_valid) {
- resp = connector_funcs->mode_valid(connector, mode);
- arg->data = (void *)resp;
- }
-
- /*do some clean up work*/
- if (mode)
- drm_mode_destroy(dev, mode);
-mode_op_out:
- mutex_unlock(&dev->mode_config.mutex);
- return ret;
-
- default:
- dev_dbg(dev->dev, "Unsupported psb mode operation\n");
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-static int psb_stolen_memory_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- struct drm_psb_stolen_memory_arg *arg = data;
-
- arg->base = dev_priv->stolen_base;
- arg->size = dev_priv->vram_stolen_size;
-
- return 0;
-}
-
-/* FIXME: needs Medfield changes */
-static int psb_register_rw_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_psb_private *dev_priv = psb_priv(dev);
- struct drm_psb_register_rw_arg *arg = data;
- bool usage = arg->b_force_hw_on ? true : false;
-
- if (arg->display_write_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
- PSB_WVDC32(arg->display.pfit_controls,
- PFIT_CONTROL);
- if (arg->display_write_mask &
- REGRWBITS_PFIT_AUTOSCALE_RATIOS)
- PSB_WVDC32(arg->display.pfit_autoscale_ratios,
- PFIT_AUTO_RATIOS);
- if (arg->display_write_mask &
- REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
- PSB_WVDC32(
- arg->display.pfit_programmed_scale_ratios,
- PFIT_PGM_RATIOS);
- if (arg->display_write_mask & REGRWBITS_PIPEASRC)
- PSB_WVDC32(arg->display.pipeasrc,
- PIPEASRC);
- if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
- PSB_WVDC32(arg->display.pipebsrc,
- PIPEBSRC);
- if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
- PSB_WVDC32(arg->display.vtotal_a,
- VTOTAL_A);
- if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
- PSB_WVDC32(arg->display.vtotal_b,
- VTOTAL_B);
- gma_power_end(dev);
- } else {
- if (arg->display_write_mask & REGRWBITS_PFIT_CONTROLS)
- dev_priv->savePFIT_CONTROL =
- arg->display.pfit_controls;
- if (arg->display_write_mask &
- REGRWBITS_PFIT_AUTOSCALE_RATIOS)
- dev_priv->savePFIT_AUTO_RATIOS =
- arg->display.pfit_autoscale_ratios;
- if (arg->display_write_mask &
- REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
- dev_priv->savePFIT_PGM_RATIOS =
- arg->display.pfit_programmed_scale_ratios;
- if (arg->display_write_mask & REGRWBITS_PIPEASRC)
- dev_priv->savePIPEASRC = arg->display.pipeasrc;
- if (arg->display_write_mask & REGRWBITS_PIPEBSRC)
- dev_priv->savePIPEBSRC = arg->display.pipebsrc;
- if (arg->display_write_mask & REGRWBITS_VTOTAL_A)
- dev_priv->saveVTOTAL_A = arg->display.vtotal_a;
- if (arg->display_write_mask & REGRWBITS_VTOTAL_B)
- dev_priv->saveVTOTAL_B = arg->display.vtotal_b;
- }
- }
-
- if (arg->display_read_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- if (arg->display_read_mask &
- REGRWBITS_PFIT_CONTROLS)
- arg->display.pfit_controls =
- PSB_RVDC32(PFIT_CONTROL);
- if (arg->display_read_mask &
- REGRWBITS_PFIT_AUTOSCALE_RATIOS)
- arg->display.pfit_autoscale_ratios =
- PSB_RVDC32(PFIT_AUTO_RATIOS);
- if (arg->display_read_mask &
- REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
- arg->display.pfit_programmed_scale_ratios =
- PSB_RVDC32(PFIT_PGM_RATIOS);
- if (arg->display_read_mask & REGRWBITS_PIPEASRC)
- arg->display.pipeasrc = PSB_RVDC32(PIPEASRC);
- if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
- arg->display.pipebsrc = PSB_RVDC32(PIPEBSRC);
- if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
- arg->display.vtotal_a = PSB_RVDC32(VTOTAL_A);
- if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
- arg->display.vtotal_b = PSB_RVDC32(VTOTAL_B);
- gma_power_end(dev);
- } else {
- if (arg->display_read_mask &
- REGRWBITS_PFIT_CONTROLS)
- arg->display.pfit_controls =
- dev_priv->savePFIT_CONTROL;
- if (arg->display_read_mask &
- REGRWBITS_PFIT_AUTOSCALE_RATIOS)
- arg->display.pfit_autoscale_ratios =
- dev_priv->savePFIT_AUTO_RATIOS;
- if (arg->display_read_mask &
- REGRWBITS_PFIT_PROGRAMMED_SCALE_RATIOS)
- arg->display.pfit_programmed_scale_ratios =
- dev_priv->savePFIT_PGM_RATIOS;
- if (arg->display_read_mask & REGRWBITS_PIPEASRC)
- arg->display.pipeasrc = dev_priv->savePIPEASRC;
- if (arg->display_read_mask & REGRWBITS_PIPEBSRC)
- arg->display.pipebsrc = dev_priv->savePIPEBSRC;
- if (arg->display_read_mask & REGRWBITS_VTOTAL_A)
- arg->display.vtotal_a = dev_priv->saveVTOTAL_A;
- if (arg->display_read_mask & REGRWBITS_VTOTAL_B)
- arg->display.vtotal_b = dev_priv->saveVTOTAL_B;
- }
- }
-
- if (arg->overlay_write_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
- PSB_WVDC32(arg->overlay.OGAMC5, OV_OGAMC5);
- PSB_WVDC32(arg->overlay.OGAMC4, OV_OGAMC4);
- PSB_WVDC32(arg->overlay.OGAMC3, OV_OGAMC3);
- PSB_WVDC32(arg->overlay.OGAMC2, OV_OGAMC2);
- PSB_WVDC32(arg->overlay.OGAMC1, OV_OGAMC1);
- PSB_WVDC32(arg->overlay.OGAMC0, OV_OGAMC0);
- }
- if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
- PSB_WVDC32(arg->overlay.OGAMC5, OVC_OGAMC5);
- PSB_WVDC32(arg->overlay.OGAMC4, OVC_OGAMC4);
- PSB_WVDC32(arg->overlay.OGAMC3, OVC_OGAMC3);
- PSB_WVDC32(arg->overlay.OGAMC2, OVC_OGAMC2);
- PSB_WVDC32(arg->overlay.OGAMC1, OVC_OGAMC1);
- PSB_WVDC32(arg->overlay.OGAMC0, OVC_OGAMC0);
- }
-
- if (arg->overlay_write_mask & OV_REGRWBITS_OVADD) {
- PSB_WVDC32(arg->overlay.OVADD, OV_OVADD);
-
- if (arg->overlay.b_wait_vblank) {
- /* Wait for 20ms.*/
- unsigned long vblank_timeout = jiffies
- + HZ/50;
- uint32_t temp;
- while (time_before_eq(jiffies,
- vblank_timeout)) {
- temp = PSB_RVDC32(OV_DOVASTA);
- if ((temp & (0x1 << 31)) != 0)
- break;
- cpu_relax();
- }
- }
- }
- if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD) {
- PSB_WVDC32(arg->overlay.OVADD, OVC_OVADD);
- if (arg->overlay.b_wait_vblank) {
- /* Wait for 20ms.*/
- unsigned long vblank_timeout =
- jiffies + HZ/50;
- uint32_t temp;
- while (time_before_eq(jiffies,
- vblank_timeout)) {
- temp = PSB_RVDC32(OVC_DOVCSTA);
- if ((temp & (0x1 << 31)) != 0)
- break;
- cpu_relax();
- }
- }
- }
- gma_power_end(dev);
- } else {
- if (arg->overlay_write_mask & OV_REGRWBITS_OGAM_ALL) {
- dev_priv->saveOV_OGAMC5 = arg->overlay.OGAMC5;
- dev_priv->saveOV_OGAMC4 = arg->overlay.OGAMC4;
- dev_priv->saveOV_OGAMC3 = arg->overlay.OGAMC3;
- dev_priv->saveOV_OGAMC2 = arg->overlay.OGAMC2;
- dev_priv->saveOV_OGAMC1 = arg->overlay.OGAMC1;
- dev_priv->saveOV_OGAMC0 = arg->overlay.OGAMC0;
- }
- if (arg->overlay_write_mask & OVC_REGRWBITS_OGAM_ALL) {
- dev_priv->saveOVC_OGAMC5 = arg->overlay.OGAMC5;
- dev_priv->saveOVC_OGAMC4 = arg->overlay.OGAMC4;
- dev_priv->saveOVC_OGAMC3 = arg->overlay.OGAMC3;
- dev_priv->saveOVC_OGAMC2 = arg->overlay.OGAMC2;
- dev_priv->saveOVC_OGAMC1 = arg->overlay.OGAMC1;
- dev_priv->saveOVC_OGAMC0 = arg->overlay.OGAMC0;
- }
- if (arg->overlay_write_mask & OV_REGRWBITS_OVADD)
- dev_priv->saveOV_OVADD = arg->overlay.OVADD;
- if (arg->overlay_write_mask & OVC_REGRWBITS_OVADD)
- dev_priv->saveOVC_OVADD = arg->overlay.OVADD;
- }
- }
-
- if (arg->overlay_read_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
- arg->overlay.OGAMC5 = PSB_RVDC32(OV_OGAMC5);
- arg->overlay.OGAMC4 = PSB_RVDC32(OV_OGAMC4);
- arg->overlay.OGAMC3 = PSB_RVDC32(OV_OGAMC3);
- arg->overlay.OGAMC2 = PSB_RVDC32(OV_OGAMC2);
- arg->overlay.OGAMC1 = PSB_RVDC32(OV_OGAMC1);
- arg->overlay.OGAMC0 = PSB_RVDC32(OV_OGAMC0);
- }
- if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
- arg->overlay.OGAMC5 = PSB_RVDC32(OVC_OGAMC5);
- arg->overlay.OGAMC4 = PSB_RVDC32(OVC_OGAMC4);
- arg->overlay.OGAMC3 = PSB_RVDC32(OVC_OGAMC3);
- arg->overlay.OGAMC2 = PSB_RVDC32(OVC_OGAMC2);
- arg->overlay.OGAMC1 = PSB_RVDC32(OVC_OGAMC1);
- arg->overlay.OGAMC0 = PSB_RVDC32(OVC_OGAMC0);
- }
- if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
- arg->overlay.OVADD = PSB_RVDC32(OV_OVADD);
- if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
- arg->overlay.OVADD = PSB_RVDC32(OVC_OVADD);
- gma_power_end(dev);
- } else {
- if (arg->overlay_read_mask & OV_REGRWBITS_OGAM_ALL) {
- arg->overlay.OGAMC5 = dev_priv->saveOV_OGAMC5;
- arg->overlay.OGAMC4 = dev_priv->saveOV_OGAMC4;
- arg->overlay.OGAMC3 = dev_priv->saveOV_OGAMC3;
- arg->overlay.OGAMC2 = dev_priv->saveOV_OGAMC2;
- arg->overlay.OGAMC1 = dev_priv->saveOV_OGAMC1;
- arg->overlay.OGAMC0 = dev_priv->saveOV_OGAMC0;
- }
- if (arg->overlay_read_mask & OVC_REGRWBITS_OGAM_ALL) {
- arg->overlay.OGAMC5 = dev_priv->saveOVC_OGAMC5;
- arg->overlay.OGAMC4 = dev_priv->saveOVC_OGAMC4;
- arg->overlay.OGAMC3 = dev_priv->saveOVC_OGAMC3;
- arg->overlay.OGAMC2 = dev_priv->saveOVC_OGAMC2;
- arg->overlay.OGAMC1 = dev_priv->saveOVC_OGAMC1;
- arg->overlay.OGAMC0 = dev_priv->saveOVC_OGAMC0;
- }
- if (arg->overlay_read_mask & OV_REGRWBITS_OVADD)
- arg->overlay.OVADD = dev_priv->saveOV_OVADD;
- if (arg->overlay_read_mask & OVC_REGRWBITS_OVADD)
- arg->overlay.OVADD = dev_priv->saveOVC_OVADD;
- }
- }
-
- if (arg->sprite_enable_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- PSB_WVDC32(0x1F3E, DSPARB);
- PSB_WVDC32(arg->sprite.dspa_control
- | PSB_RVDC32(DSPACNTR), DSPACNTR);
- PSB_WVDC32(arg->sprite.dspa_key_value, DSPAKEYVAL);
- PSB_WVDC32(arg->sprite.dspa_key_mask, DSPAKEYMASK);
- PSB_WVDC32(PSB_RVDC32(DSPASURF), DSPASURF);
- PSB_RVDC32(DSPASURF);
- PSB_WVDC32(arg->sprite.dspc_control, DSPCCNTR);
- PSB_WVDC32(arg->sprite.dspc_stride, DSPCSTRIDE);
- PSB_WVDC32(arg->sprite.dspc_position, DSPCPOS);
- PSB_WVDC32(arg->sprite.dspc_linear_offset, DSPCLINOFF);
- PSB_WVDC32(arg->sprite.dspc_size, DSPCSIZE);
- PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
- PSB_RVDC32(DSPCSURF);
- gma_power_end(dev);
- }
- }
-
- if (arg->sprite_disable_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- PSB_WVDC32(0x3F3E, DSPARB);
- PSB_WVDC32(0x0, DSPCCNTR);
- PSB_WVDC32(arg->sprite.dspc_surface, DSPCSURF);
- PSB_RVDC32(DSPCSURF);
- gma_power_end(dev);
- }
- }
-
- if (arg->subpicture_enable_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- uint32_t temp;
- if (arg->subpicture_enable_mask & REGRWBITS_DSPACNTR) {
- temp = PSB_RVDC32(DSPACNTR);
- temp &= ~DISPPLANE_PIXFORMAT_MASK;
- temp &= ~DISPPLANE_BOTTOM;
- temp |= DISPPLANE_32BPP;
- PSB_WVDC32(temp, DSPACNTR);
-
- temp = PSB_RVDC32(DSPABASE);
- PSB_WVDC32(temp, DSPABASE);
- PSB_RVDC32(DSPABASE);
- temp = PSB_RVDC32(DSPASURF);
- PSB_WVDC32(temp, DSPASURF);
- PSB_RVDC32(DSPASURF);
- }
- if (arg->subpicture_enable_mask & REGRWBITS_DSPBCNTR) {
- temp = PSB_RVDC32(DSPBCNTR);
- temp &= ~DISPPLANE_PIXFORMAT_MASK;
- temp &= ~DISPPLANE_BOTTOM;
- temp |= DISPPLANE_32BPP;
- PSB_WVDC32(temp, DSPBCNTR);
-
- temp = PSB_RVDC32(DSPBBASE);
- PSB_WVDC32(temp, DSPBBASE);
- PSB_RVDC32(DSPBBASE);
- temp = PSB_RVDC32(DSPBSURF);
- PSB_WVDC32(temp, DSPBSURF);
- PSB_RVDC32(DSPBSURF);
- }
- if (arg->subpicture_enable_mask & REGRWBITS_DSPCCNTR) {
- temp = PSB_RVDC32(DSPCCNTR);
- temp &= ~DISPPLANE_PIXFORMAT_MASK;
- temp &= ~DISPPLANE_BOTTOM;
- temp |= DISPPLANE_32BPP;
- PSB_WVDC32(temp, DSPCCNTR);
-
- temp = PSB_RVDC32(DSPCBASE);
- PSB_WVDC32(temp, DSPCBASE);
- PSB_RVDC32(DSPCBASE);
- temp = PSB_RVDC32(DSPCSURF);
- PSB_WVDC32(temp, DSPCSURF);
- PSB_RVDC32(DSPCSURF);
- }
- gma_power_end(dev);
- }
- }
-
- if (arg->subpicture_disable_mask != 0) {
- if (gma_power_begin(dev, usage)) {
- uint32_t temp;
- if (arg->subpicture_disable_mask & REGRWBITS_DSPACNTR) {
- temp = PSB_RVDC32(DSPACNTR);
- temp &= ~DISPPLANE_PIXFORMAT_MASK;
- temp |= DISPPLANE_32BPP_NO_ALPHA;
- PSB_WVDC32(temp, DSPACNTR);
-
- temp = PSB_RVDC32(DSPABASE);
- PSB_WVDC32(temp, DSPABASE);
- PSB_RVDC32(DSPABASE);
- temp = PSB_RVDC32(DSPASURF);
- PSB_WVDC32(temp, DSPASURF);
- PSB_RVDC32(DSPASURF);
- }
- if (arg->subpicture_disable_mask & REGRWBITS_DSPBCNTR) {
- temp = PSB_RVDC32(DSPBCNTR);
- temp &= ~DISPPLANE_PIXFORMAT_MASK;
- temp |= DISPPLANE_32BPP_NO_ALPHA;
- PSB_WVDC32(temp, DSPBCNTR);
-
- temp = PSB_RVDC32(DSPBBASE);
- PSB_WVDC32(temp, DSPBBASE);
- PSB_RVDC32(DSPBBASE);
- temp = PSB_RVDC32(DSPBSURF);
- PSB_WVDC32(temp, DSPBSURF);
- PSB_RVDC32(DSPBSURF);
- }
- if (arg->subpicture_disable_mask & REGRWBITS_DSPCCNTR) {
- temp = PSB_RVDC32(DSPCCNTR);
- temp &= ~DISPPLANE_PIXFORMAT_MASK;
- temp |= DISPPLANE_32BPP_NO_ALPHA;
- PSB_WVDC32(temp, DSPCCNTR);
-
- temp = PSB_RVDC32(DSPCBASE);
- PSB_WVDC32(temp, DSPCBASE);
- PSB_RVDC32(DSPCBASE);
- temp = PSB_RVDC32(DSPCSURF);
- PSB_WVDC32(temp, DSPCSURF);
- PSB_RVDC32(DSPCSURF);
- }
- gma_power_end(dev);
- }
- }
-
- return 0;
-}
-
-static int psb_driver_open(struct drm_device *dev, struct drm_file *priv)
-{
- return 0;
-}
-
-static void psb_driver_close(struct drm_device *dev, struct drm_file *priv)
-{
-}
-
-static long psb_unlocked_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg)
-{
- struct drm_file *file_priv = filp->private_data;
- struct drm_device *dev = file_priv->minor->dev;
- int ret;
-
- pm_runtime_forbid(dev->dev);
- ret = drm_ioctl(filp, cmd, arg);
- pm_runtime_allow(dev->dev);
- return ret;
- /* FIXME: do we need to wrap the other side of this */
-}
-
-
-/* When a client dies:
- * - Check for and clean up flipped page state
- */
-void psb_driver_preclose(struct drm_device *dev, struct drm_file *priv)
-{
-}
-
-static void psb_remove(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
- drm_put_dev(dev);
-}
-
-static const struct dev_pm_ops psb_pm_ops = {
- .suspend = gma_power_suspend,
- .resume = gma_power_resume,
- .freeze = gma_power_suspend,
- .thaw = gma_power_resume,
- .poweroff = gma_power_suspend,
- .restore = gma_power_resume,
- .runtime_suspend = psb_runtime_suspend,
- .runtime_resume = psb_runtime_resume,
- .runtime_idle = psb_runtime_idle,
-};
-
-static struct vm_operations_struct psb_gem_vm_ops = {
- .fault = psb_gem_fault,
- .open = drm_gem_vm_open,
- .close = drm_gem_vm_close,
-};
-
-static struct drm_driver driver = {
- .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | \
- DRIVER_IRQ_VBL | DRIVER_MODESET | DRIVER_GEM ,
- .load = psb_driver_load,
- .unload = psb_driver_unload,
-
- .ioctls = psb_ioctls,
- .num_ioctls = DRM_ARRAY_SIZE(psb_ioctls),
- .device_is_agp = psb_driver_device_is_agp,
- .irq_preinstall = psb_irq_preinstall,
- .irq_postinstall = psb_irq_postinstall,
- .irq_uninstall = psb_irq_uninstall,
- .irq_handler = psb_irq_handler,
- .enable_vblank = psb_enable_vblank,
- .disable_vblank = psb_disable_vblank,
- .get_vblank_counter = psb_get_vblank_counter,
- .lastclose = psb_lastclose,
- .open = psb_driver_open,
- .preclose = psb_driver_preclose,
- .postclose = psb_driver_close,
- .reclaim_buffers = drm_core_reclaim_buffers,
-
- .gem_init_object = psb_gem_init_object,
- .gem_free_object = psb_gem_free_object,
- .gem_vm_ops = &psb_gem_vm_ops,
- .dumb_create = psb_gem_dumb_create,
- .dumb_map_offset = psb_gem_dumb_map_gtt,
- .dumb_destroy = psb_gem_dumb_destroy,
-
- .fops = {
- .owner = THIS_MODULE,
- .open = drm_open,
- .release = drm_release,
- .unlocked_ioctl = psb_unlocked_ioctl,
- .mmap = drm_gem_mmap,
- .poll = drm_poll,
- .fasync = drm_fasync,
- .read = drm_read,
- },
- .name = DRIVER_NAME,
- .desc = DRIVER_DESC,
- .date = PSB_DRM_DRIVER_DATE,
- .major = PSB_DRM_DRIVER_MAJOR,
- .minor = PSB_DRM_DRIVER_MINOR,
- .patchlevel = PSB_DRM_DRIVER_PATCHLEVEL
-};
-
-static struct pci_driver psb_pci_driver = {
- .name = DRIVER_NAME,
- .id_table = pciidlist,
- .probe = psb_probe,
- .remove = psb_remove,
- .driver.pm = &psb_pm_ops,
-};
-
-static int psb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
-{
- return drm_get_pci_dev(pdev, ent, &driver);
-}
-
-static int __init psb_init(void)
-{
- return drm_pci_init(&driver, &psb_pci_driver);
-}
-
-static void __exit psb_exit(void)
-{
- drm_pci_exit(&driver, &psb_pci_driver);
-}
-
-late_initcall(psb_init);
-module_exit(psb_exit);
-
-MODULE_AUTHOR("Alan Cox <alan@linux.intel.com> and others");
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/gma500/psb_intel_sdvo.c b/drivers/staging/gma500/psb_intel_sdvo.c
deleted file mode 100644
index a4bad1a..0000000
--- a/drivers/staging/gma500/psb_intel_sdvo.c
+++ /dev/null
@@ -1,1293 +0,0 @@
-/*
- * Copyright (c) 2006-2007 Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- */
-
-#include <linux/i2c.h>
-#include <linux/delay.h>
-/* #include <drm/drm_crtc.h> */
-#include <drm/drmP.h>
-#include "psb_drv.h"
-#include "psb_intel_drv.h"
-#include "psb_intel_reg.h"
-#include "psb_intel_sdvo_regs.h"
-
-struct psb_intel_sdvo_priv {
- struct psb_intel_i2c_chan *i2c_bus;
- int slaveaddr;
- int output_device;
-
- u16 active_outputs;
-
- struct psb_intel_sdvo_caps caps;
- int pixel_clock_min, pixel_clock_max;
-
- int save_sdvo_mult;
- u16 save_active_outputs;
- struct psb_intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
- struct psb_intel_sdvo_dtd save_output_dtd[16];
- u32 save_SDVOX;
- u8 in_out_map[4];
-
- u8 by_input_wiring;
- u32 active_device;
-};
-
-/**
- * Writes the SDVOB or SDVOC with the given value, but always writes both
- * SDVOB and SDVOC to work around apparent hardware issues (according to
- * comments in the BIOS).
- */
-void psb_intel_sdvo_write_sdvox(struct psb_intel_output *psb_intel_output,
- u32 val)
-{
- struct drm_device *dev = psb_intel_output->base.dev;
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- u32 bval = val, cval = val;
- int i;
-
- if (sdvo_priv->output_device == SDVOB)
- cval = REG_READ(SDVOC);
- else
- bval = REG_READ(SDVOB);
- /*
- * Write the registers twice for luck. Sometimes,
- * writing them only once doesn't appear to 'stick'.
- * The BIOS does this too. Yay, magic
- */
- for (i = 0; i < 2; i++) {
- REG_WRITE(SDVOB, bval);
- REG_READ(SDVOB);
- REG_WRITE(SDVOC, cval);
- REG_READ(SDVOC);
- }
-}
-
-static bool psb_intel_sdvo_read_byte(
- struct psb_intel_output *psb_intel_output,
- u8 addr, u8 *ch)
-{
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- u8 out_buf[2];
- u8 buf[2];
- int ret;
-
- struct i2c_msg msgs[] = {
- {
- .addr = sdvo_priv->i2c_bus->slave_addr,
- .flags = 0,
- .len = 1,
- .buf = out_buf,
- },
- {
- .addr = sdvo_priv->i2c_bus->slave_addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- out_buf[0] = addr;
- out_buf[1] = 0;
-
- ret = i2c_transfer(&sdvo_priv->i2c_bus->adapter, msgs, 2);
- if (ret == 2) {
- *ch = buf[0];
- return true;
- }
-
- return false;
-}
-
-static bool psb_intel_sdvo_write_byte(
- struct psb_intel_output *psb_intel_output,
- int addr, u8 ch)
-{
- u8 out_buf[2];
- struct i2c_msg msgs[] = {
- {
- .addr = psb_intel_output->i2c_bus->slave_addr,
- .flags = 0,
- .len = 2,
- .buf = out_buf,
- }
- };
-
- out_buf[0] = addr;
- out_buf[1] = ch;
-
- if (i2c_transfer(&psb_intel_output->i2c_bus->adapter, msgs, 1) == 1)
- return true;
- return false;
-}
-
-#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
-/** Mapping of command numbers to names, for debug output */
-static const struct _sdvo_cmd_name {
- u8 cmd;
- char *name;
-} sdvo_cmd_names[] = {
-SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
- SDVO_CMD_NAME_ENTRY
- (SDVO_CMD_SET_TV_RESOLUTION_SUPPORT),
- SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),};
-
-#define SDVO_NAME(dev_priv) \
- ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
-#define SDVO_PRIV(output) ((struct psb_intel_sdvo_priv *) (output)->dev_priv)
-
-static void psb_intel_sdvo_write_cmd(struct psb_intel_output *psb_intel_output,
- u8 cmd,
- void *args,
- int args_len)
-{
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- int i;
-
- if (0) {
- printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd);
- for (i = 0; i < args_len; i++)
- printk(KERN_CONT "%02X ", ((u8 *) args)[i]);
- for (; i < 8; i++)
- printk(KERN_CONT " ");
- for (i = 0;
- i <
- sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]);
- i++) {
- if (cmd == sdvo_cmd_names[i].cmd) {
- printk(KERN_CONT
- "(%s)", sdvo_cmd_names[i].name);
- break;
- }
- }
- if (i ==
- sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]))
- printk(KERN_CONT "(%02X)", cmd);
- printk(KERN_CONT "\n");
- }
-
- for (i = 0; i < args_len; i++) {
- psb_intel_sdvo_write_byte(psb_intel_output,
- SDVO_I2C_ARG_0 - i,
- ((u8 *) args)[i]);
- }
-
- psb_intel_sdvo_write_byte(psb_intel_output, SDVO_I2C_OPCODE, cmd);
-}
-
-static const char *const cmd_status_names[] = {
- "Power on",
- "Success",
- "Not supported",
- "Invalid arg",
- "Pending",
- "Target not specified",
- "Scaling not supported"
-};
-
-static u8 psb_intel_sdvo_read_response(
- struct psb_intel_output *psb_intel_output,
- void *response, int response_len)
-{
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- int i;
- u8 status;
- u8 retry = 50;
-
- while (retry--) {
- /* Read the command response */
- for (i = 0; i < response_len; i++) {
- psb_intel_sdvo_read_byte(psb_intel_output,
- SDVO_I2C_RETURN_0 + i,
- &((u8 *) response)[i]);
- }
-
- /* read the return status */
- psb_intel_sdvo_read_byte(psb_intel_output,
- SDVO_I2C_CMD_STATUS,
- &status);
-
- if (0) {
- pr_debug("%s: R: ", SDVO_NAME(sdvo_priv));
- for (i = 0; i < response_len; i++)
- printk(KERN_CONT "%02X ", ((u8 *) response)[i]);
- for (; i < 8; i++)
- printk(" ");
- if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
- printk(KERN_CONT "(%s)",
- cmd_status_names[status]);
- else
- printk(KERN_CONT "(??? %d)", status);
- printk(KERN_CONT "\n");
- }
-
- if (status != SDVO_CMD_STATUS_PENDING)
- return status;
-
- mdelay(50);
- }
-
- return status;
-}
-
-int psb_intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode)
-{
- if (mode->clock >= 100000)
- return 1;
- else if (mode->clock >= 50000)
- return 2;
- else
- return 4;
-}
-
-/**
- * Don't check status code from this as it switches the bus back to the
- * SDVO chips which defeats the purpose of doing a bus switch in the first
- * place.
- */
-void psb_intel_sdvo_set_control_bus_switch(
- struct psb_intel_output *psb_intel_output,
- u8 target)
-{
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_SET_CONTROL_BUS_SWITCH,
- &target,
- 1);
-}
-
-static bool psb_intel_sdvo_set_target_input(
- struct psb_intel_output *psb_intel_output,
- bool target_0, bool target_1)
-{
- struct psb_intel_sdvo_set_target_input_args targets = { 0 };
- u8 status;
-
- if (target_0 && target_1)
- return SDVO_CMD_STATUS_NOTSUPP;
-
- if (target_1)
- targets.target_1 = 1;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_INPUT,
- &targets, sizeof(targets));
-
- status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
-
- return status == SDVO_CMD_STATUS_SUCCESS;
-}
-
-/**
- * Return whether each input is trained.
- *
- * This function is making an assumption about the layout of the response,
- * which should be checked against the docs.
- */
-static bool psb_intel_sdvo_get_trained_inputs(struct psb_intel_output
- *psb_intel_output, bool *input_1,
- bool *input_2)
-{
- struct psb_intel_sdvo_get_trained_inputs_response response;
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_TRAINED_INPUTS,
- NULL, 0);
- status =
- psb_intel_sdvo_read_response(psb_intel_output, &response,
- sizeof(response));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- *input_1 = response.input0_trained;
- *input_2 = response.input1_trained;
- return true;
-}
-
-static bool psb_intel_sdvo_get_active_outputs(struct psb_intel_output
- *psb_intel_output, u16 *outputs)
-{
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_GET_ACTIVE_OUTPUTS,
- NULL, 0);
- status =
- psb_intel_sdvo_read_response(psb_intel_output, outputs,
- sizeof(*outputs));
-
- return status == SDVO_CMD_STATUS_SUCCESS;
-}
-
-static bool psb_intel_sdvo_set_active_outputs(struct psb_intel_output
- *psb_intel_output, u16 outputs)
-{
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_ACTIVE_OUTPUTS,
- &outputs, sizeof(outputs));
- status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
- return status == SDVO_CMD_STATUS_SUCCESS;
-}
-
-static bool psb_intel_sdvo_set_encoder_power_state(struct psb_intel_output
- *psb_intel_output, int mode)
-{
- u8 status, state = SDVO_ENCODER_STATE_ON;
-
- switch (mode) {
- case DRM_MODE_DPMS_ON:
- state = SDVO_ENCODER_STATE_ON;
- break;
- case DRM_MODE_DPMS_STANDBY:
- state = SDVO_ENCODER_STATE_STANDBY;
- break;
- case DRM_MODE_DPMS_SUSPEND:
- state = SDVO_ENCODER_STATE_SUSPEND;
- break;
- case DRM_MODE_DPMS_OFF:
- state = SDVO_ENCODER_STATE_OFF;
- break;
- }
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_SET_ENCODER_POWER_STATE, &state,
- sizeof(state));
- status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
-
- return status == SDVO_CMD_STATUS_SUCCESS;
-}
-
-static bool psb_intel_sdvo_get_input_pixel_clock_range(struct psb_intel_output
- *psb_intel_output,
- int *clock_min,
- int *clock_max)
-{
- struct psb_intel_sdvo_pixel_clock_range clocks;
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE, NULL,
- 0);
-
- status =
- psb_intel_sdvo_read_response(psb_intel_output, &clocks,
- sizeof(clocks));
-
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- /* Convert the values from units of 10 kHz to kHz. */
- *clock_min = clocks.min * 10;
- *clock_max = clocks.max * 10;
-
- return true;
-}
-
-static bool psb_intel_sdvo_set_target_output(
- struct psb_intel_output *psb_intel_output,
- u16 outputs)
-{
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, SDVO_CMD_SET_TARGET_OUTPUT,
- &outputs, sizeof(outputs));
-
- status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
- return status == SDVO_CMD_STATUS_SUCCESS;
-}
-
-static bool psb_intel_sdvo_get_timing(struct psb_intel_output *psb_intel_output,
- u8 cmd, struct psb_intel_sdvo_dtd *dtd)
-{
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, cmd, NULL, 0);
- status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part1,
- sizeof(dtd->part1));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, NULL, 0);
- status = psb_intel_sdvo_read_response(psb_intel_output, &dtd->part2,
- sizeof(dtd->part2));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool psb_intel_sdvo_get_input_timing(
- struct psb_intel_output *psb_intel_output,
- struct psb_intel_sdvo_dtd *dtd)
-{
- return psb_intel_sdvo_get_timing(psb_intel_output,
- SDVO_CMD_GET_INPUT_TIMINGS_PART1,
- dtd);
-}
-
-static bool psb_intel_sdvo_set_timing(
- struct psb_intel_output *psb_intel_output,
- u8 cmd,
- struct psb_intel_sdvo_dtd *dtd)
-{
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, cmd, &dtd->part1,
- sizeof(dtd->part1));
- status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- psb_intel_sdvo_write_cmd(psb_intel_output, cmd + 1, &dtd->part2,
- sizeof(dtd->part2));
- status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool psb_intel_sdvo_set_input_timing(
- struct psb_intel_output *psb_intel_output,
- struct psb_intel_sdvo_dtd *dtd)
-{
- return psb_intel_sdvo_set_timing(psb_intel_output,
- SDVO_CMD_SET_INPUT_TIMINGS_PART1,
- dtd);
-}
-
-static bool psb_intel_sdvo_set_output_timing(
- struct psb_intel_output *psb_intel_output,
- struct psb_intel_sdvo_dtd *dtd)
-{
- return psb_intel_sdvo_set_timing(psb_intel_output,
- SDVO_CMD_SET_OUTPUT_TIMINGS_PART1,
- dtd);
-}
-
-static int psb_intel_sdvo_get_clock_rate_mult(struct psb_intel_output
- *psb_intel_output)
-{
- u8 response, status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_CLOCK_RATE_MULT,
- NULL,
- 0);
-
- status = psb_intel_sdvo_read_response(psb_intel_output, &response, 1);
-
- if (status != SDVO_CMD_STATUS_SUCCESS) {
- DRM_DEBUG("Couldn't get SDVO clock rate multiplier\n");
- return SDVO_CLOCK_RATE_MULT_1X;
- } else {
- DRM_DEBUG("Current clock rate multiplier: %d\n", response);
- }
-
- return response;
-}
-
-static bool psb_intel_sdvo_set_clock_rate_mult(struct psb_intel_output
- *psb_intel_output, u8 val)
-{
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_SET_CLOCK_RATE_MULT,
- &val,
- 1);
-
- status = psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-static bool psb_sdvo_set_current_inoutmap(struct psb_intel_output *output,
- u32 in0outputmask,
- u32 in1outputmask)
-{
- u8 byArgs[4];
- u8 status;
- int i;
- struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
-
- /* Make all fields of the args/ret to zero */
- memset(byArgs, 0, sizeof(byArgs));
-
- /* Fill up the argument values; */
- byArgs[0] = (u8) (in0outputmask & 0xFF);
- byArgs[1] = (u8) ((in0outputmask >> 8) & 0xFF);
- byArgs[2] = (u8) (in1outputmask & 0xFF);
- byArgs[3] = (u8) ((in1outputmask >> 8) & 0xFF);
-
-
- /*save inoutmap arg here*/
- for (i = 0; i < 4; i++)
- sdvo_priv->in_out_map[i] = byArgs[0];
-
- psb_intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP, byArgs, 4);
- status = psb_intel_sdvo_read_response(output, NULL, 0);
-
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
- return true;
-}
-
-
-static void psb_intel_sdvo_set_iomap(struct psb_intel_output *output)
-{
- u32 dwCurrentSDVOIn0 = 0;
- u32 dwCurrentSDVOIn1 = 0;
- u32 dwDevMask = 0;
-
-
- struct psb_intel_sdvo_priv *sdvo_priv = output->dev_priv;
-
- /* Please DO NOT change the following code. */
- /* SDVOB_IN0 or SDVOB_IN1 ==> sdvo_in0 */
- /* SDVOC_IN0 or SDVOC_IN1 ==> sdvo_in1 */
- if (sdvo_priv->by_input_wiring & (SDVOB_IN0 | SDVOC_IN0)) {
- switch (sdvo_priv->active_device) {
- case SDVO_DEVICE_LVDS:
- dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
- break;
- case SDVO_DEVICE_TMDS:
- dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
- break;
- case SDVO_DEVICE_TV:
- dwDevMask =
- SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
- SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
- SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
- SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
- break;
- case SDVO_DEVICE_CRT:
- dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
- break;
- }
- dwCurrentSDVOIn0 = (sdvo_priv->active_outputs & dwDevMask);
- } else if (sdvo_priv->by_input_wiring & (SDVOB_IN1 | SDVOC_IN1)) {
- switch (sdvo_priv->active_device) {
- case SDVO_DEVICE_LVDS:
- dwDevMask = SDVO_OUTPUT_LVDS0 | SDVO_OUTPUT_LVDS1;
- break;
- case SDVO_DEVICE_TMDS:
- dwDevMask = SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1;
- break;
- case SDVO_DEVICE_TV:
- dwDevMask =
- SDVO_OUTPUT_YPRPB0 | SDVO_OUTPUT_SVID0 |
- SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_YPRPB1 |
- SDVO_OUTPUT_SVID1 | SDVO_OUTPUT_CVBS1 |
- SDVO_OUTPUT_SCART0 | SDVO_OUTPUT_SCART1;
- break;
- case SDVO_DEVICE_CRT:
- dwDevMask = SDVO_OUTPUT_RGB0 | SDVO_OUTPUT_RGB1;
- break;
- }
- dwCurrentSDVOIn1 = (sdvo_priv->active_outputs & dwDevMask);
- }
-
- psb_sdvo_set_current_inoutmap(output, dwCurrentSDVOIn0,
- dwCurrentSDVOIn1);
-}
-
-
-static bool psb_intel_sdvo_mode_fixup(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
- * device will be told of the multiplier during mode_set.
- */
- adjusted_mode->clock *= psb_intel_sdvo_get_pixel_multiplier(mode);
- return true;
-}
-
-static void psb_intel_sdvo_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = encoder->dev;
- struct drm_crtc *crtc = encoder->crtc;
- struct psb_intel_crtc *psb_intel_crtc = to_psb_intel_crtc(crtc);
- struct psb_intel_output *psb_intel_output =
- enc_to_psb_intel_output(encoder);
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- u16 width, height;
- u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
- u16 h_sync_offset, v_sync_offset;
- u32 sdvox;
- struct psb_intel_sdvo_dtd output_dtd;
- int sdvo_pixel_multiply;
-
- if (!mode)
- return;
-
- psb_intel_sdvo_set_target_output(psb_intel_output, 0);
-
- width = mode->crtc_hdisplay;
- height = mode->crtc_vdisplay;
-
- /* do some mode translations */
- h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
- h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
-
- v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
- v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
-
- h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
- v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
-
- output_dtd.part1.clock = mode->clock / 10;
- output_dtd.part1.h_active = width & 0xff;
- output_dtd.part1.h_blank = h_blank_len & 0xff;
- output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) |
- ((h_blank_len >> 8) & 0xf);
- output_dtd.part1.v_active = height & 0xff;
- output_dtd.part1.v_blank = v_blank_len & 0xff;
- output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) |
- ((v_blank_len >> 8) & 0xf);
-
- output_dtd.part2.h_sync_off = h_sync_offset;
- output_dtd.part2.h_sync_width = h_sync_len & 0xff;
- output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
- (v_sync_len & 0xf);
- output_dtd.part2.sync_off_width_high =
- ((h_sync_offset & 0x300) >> 2) | ((h_sync_len & 0x300) >> 4) |
- ((v_sync_offset & 0x30) >> 2) | ((v_sync_len & 0x30) >> 4);
-
- output_dtd.part2.dtd_flags = 0x18;
- if (mode->flags & DRM_MODE_FLAG_PHSYNC)
- output_dtd.part2.dtd_flags |= 0x2;
- if (mode->flags & DRM_MODE_FLAG_PVSYNC)
- output_dtd.part2.dtd_flags |= 0x4;
-
- output_dtd.part2.sdvo_flags = 0;
- output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0;
- output_dtd.part2.reserved = 0;
-
- /* Set the output timing to the screen */
- psb_intel_sdvo_set_target_output(psb_intel_output,
- sdvo_priv->active_outputs);
-
- /* Set the input timing to the screen. Assume always input 0. */
- psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
-
- psb_intel_sdvo_set_output_timing(psb_intel_output, &output_dtd);
-
- /* We would like to use i830_sdvo_create_preferred_input_timing() to
- * provide the device with a timing it can support, if it supports that
- * feature. However, presumably we would need to adjust the CRTC to
- * output the preferred timing, and we don't support that currently.
- */
- psb_intel_sdvo_set_input_timing(psb_intel_output, &output_dtd);
-
- switch (psb_intel_sdvo_get_pixel_multiplier(mode)) {
- case 1:
- psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
- SDVO_CLOCK_RATE_MULT_1X);
- break;
- case 2:
- psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
- SDVO_CLOCK_RATE_MULT_2X);
- break;
- case 4:
- psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
- SDVO_CLOCK_RATE_MULT_4X);
- break;
- }
-
- /* Set the SDVO control regs. */
- sdvox = REG_READ(sdvo_priv->output_device);
- switch (sdvo_priv->output_device) {
- case SDVOB:
- sdvox &= SDVOB_PRESERVE_MASK;
- break;
- case SDVOC:
- sdvox &= SDVOC_PRESERVE_MASK;
- break;
- }
- sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
- if (psb_intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
-
- sdvo_pixel_multiply = psb_intel_sdvo_get_pixel_multiplier(mode);
-
- psb_intel_sdvo_write_sdvox(psb_intel_output, sdvox);
-
- psb_intel_sdvo_set_iomap(psb_intel_output);
-}
-
-static void psb_intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
-{
- struct drm_device *dev = encoder->dev;
- struct psb_intel_output *psb_intel_output =
- enc_to_psb_intel_output(encoder);
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- u32 temp;
-
- if (mode != DRM_MODE_DPMS_ON) {
- psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
- if (0)
- psb_intel_sdvo_set_encoder_power_state(
- psb_intel_output,
- mode);
-
- if (mode == DRM_MODE_DPMS_OFF) {
- temp = REG_READ(sdvo_priv->output_device);
- if ((temp & SDVO_ENABLE) != 0) {
- psb_intel_sdvo_write_sdvox(psb_intel_output,
- temp &
- ~SDVO_ENABLE);
- }
- }
- } else {
- bool input1, input2;
- int i;
- u8 status;
-
- temp = REG_READ(sdvo_priv->output_device);
- if ((temp & SDVO_ENABLE) == 0)
- psb_intel_sdvo_write_sdvox(psb_intel_output,
- temp | SDVO_ENABLE);
- for (i = 0; i < 2; i++)
- psb_intel_wait_for_vblank(dev);
-
- status =
- psb_intel_sdvo_get_trained_inputs(psb_intel_output,
- &input1,
- &input2);
-
-
- /* Warn if the device reported failure to sync.
- * A lot of SDVO devices fail to notify of sync, but it's
- * a given it the status is a success, we succeeded.
- */
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1) {
- DRM_DEBUG
- ("First %s output reported failure to sync\n",
- SDVO_NAME(sdvo_priv));
- }
-
- if (0)
- psb_intel_sdvo_set_encoder_power_state(
- psb_intel_output,
- mode);
- psb_intel_sdvo_set_active_outputs(psb_intel_output,
- sdvo_priv->active_outputs);
- }
- return;
-}
-
-static void psb_intel_sdvo_save(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- /*int o;*/
-
- sdvo_priv->save_sdvo_mult =
- psb_intel_sdvo_get_clock_rate_mult(psb_intel_output);
- psb_intel_sdvo_get_active_outputs(psb_intel_output,
- &sdvo_priv->save_active_outputs);
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- psb_intel_sdvo_set_target_input(psb_intel_output,
- true,
- false);
- psb_intel_sdvo_get_input_timing(psb_intel_output,
- &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- psb_intel_sdvo_set_target_input(psb_intel_output,
- false,
- true);
- psb_intel_sdvo_get_input_timing(psb_intel_output,
- &sdvo_priv->save_input_dtd_2);
- }
- sdvo_priv->save_SDVOX = REG_READ(sdvo_priv->output_device);
-
- /*TODO: save the in_out_map state*/
-}
-
-static void psb_intel_sdvo_restore(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
- /*int o;*/
- int i;
- bool input1, input2;
- u8 status;
-
- psb_intel_sdvo_set_active_outputs(psb_intel_output, 0);
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x1) {
- psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
- psb_intel_sdvo_set_input_timing(psb_intel_output,
- &sdvo_priv->save_input_dtd_1);
- }
-
- if (sdvo_priv->caps.sdvo_inputs_mask & 0x2) {
- psb_intel_sdvo_set_target_input(psb_intel_output, false, true);
- psb_intel_sdvo_set_input_timing(psb_intel_output,
- &sdvo_priv->save_input_dtd_2);
- }
-
- psb_intel_sdvo_set_clock_rate_mult(psb_intel_output,
- sdvo_priv->save_sdvo_mult);
-
- REG_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX);
-
- if (sdvo_priv->save_SDVOX & SDVO_ENABLE) {
- for (i = 0; i < 2; i++)
- psb_intel_wait_for_vblank(dev);
- status =
- psb_intel_sdvo_get_trained_inputs(psb_intel_output,
- &input1,
- &input2);
- if (status == SDVO_CMD_STATUS_SUCCESS && !input1)
- DRM_DEBUG
- ("First %s output reported failure to sync\n",
- SDVO_NAME(sdvo_priv));
- }
-
- psb_intel_sdvo_set_active_outputs(psb_intel_output,
- sdvo_priv->save_active_outputs);
-
- /*TODO: restore in_out_map*/
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_SET_IN_OUT_MAP,
- sdvo_priv->in_out_map,
- 4);
-
- psb_intel_sdvo_read_response(psb_intel_output, NULL, 0);
-}
-
-static int psb_intel_sdvo_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
- struct psb_intel_sdvo_priv *sdvo_priv = psb_intel_output->dev_priv;
-
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
- if (sdvo_priv->pixel_clock_min > mode->clock)
- return MODE_CLOCK_LOW;
-
- if (sdvo_priv->pixel_clock_max < mode->clock)
- return MODE_CLOCK_HIGH;
-
- return MODE_OK;
-}
-
-static bool psb_intel_sdvo_get_capabilities(
- struct psb_intel_output *psb_intel_output,
- struct psb_intel_sdvo_caps *caps)
-{
- u8 status;
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_DEVICE_CAPS,
- NULL,
- 0);
- status = psb_intel_sdvo_read_response(psb_intel_output,
- caps,
- sizeof(*caps));
- if (status != SDVO_CMD_STATUS_SUCCESS)
- return false;
-
- return true;
-}
-
-struct drm_connector *psb_intel_sdvo_find(struct drm_device *dev, int sdvoB)
-{
- struct drm_connector *connector = NULL;
- struct psb_intel_output *iout = NULL;
- struct psb_intel_sdvo_priv *sdvo;
-
- /* find the sdvo connector */
- list_for_each_entry(connector, &dev->mode_config.connector_list,
- head) {
- iout = to_psb_intel_output(connector);
-
- if (iout->type != INTEL_OUTPUT_SDVO)
- continue;
-
- sdvo = iout->dev_priv;
-
- if (sdvo->output_device == SDVOB && sdvoB)
- return connector;
-
- if (sdvo->output_device == SDVOC && !sdvoB)
- return connector;
-
- }
-
- return NULL;
-}
-
-int psb_intel_sdvo_supports_hotplug(struct drm_connector *connector)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_output *psb_intel_output;
-
- if (!connector)
- return 0;
-
- psb_intel_output = to_psb_intel_output(connector);
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_HOT_PLUG_SUPPORT,
- NULL,
- 0);
- status = psb_intel_sdvo_read_response(psb_intel_output,
- &response,
- 2);
-
- if (response[0] != 0)
- return 1;
-
- return 0;
-}
-
-void psb_intel_sdvo_set_hotplug(struct drm_connector *connector, int on)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_ACTIVE_HOT_PLUG,
- NULL,
- 0);
- psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
-
- if (on) {
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL,
- 0);
- status = psb_intel_sdvo_read_response(psb_intel_output,
- &response,
- 2);
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &response, 2);
- } else {
- response[0] = 0;
- response[1] = 0;
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_SET_ACTIVE_HOT_PLUG,
- &response, 2);
- }
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_ACTIVE_HOT_PLUG,
- NULL,
- 0);
- psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
-}
-
-static enum drm_connector_status psb_intel_sdvo_detect(struct drm_connector
- *connector, bool force)
-{
- u8 response[2];
- u8 status;
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
-
- psb_intel_sdvo_write_cmd(psb_intel_output,
- SDVO_CMD_GET_ATTACHED_DISPLAYS,
- NULL,
- 0);
- status = psb_intel_sdvo_read_response(psb_intel_output, &response, 2);
-
- DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
- if ((response[0] != 0) || (response[1] != 0))
- return connector_status_connected;
- else
- return connector_status_disconnected;
-}
-
-static int psb_intel_sdvo_get_modes(struct drm_connector *connector)
-{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
-
- /* set the bus switch and get the modes */
- psb_intel_sdvo_set_control_bus_switch(psb_intel_output,
- SDVO_CONTROL_BUS_DDC2);
- psb_intel_ddc_get_modes(psb_intel_output);
-
- if (list_empty(&connector->probed_modes))
- return 0;
- return 1;
-}
-
-static void psb_intel_sdvo_destroy(struct drm_connector *connector)
-{
- struct psb_intel_output *psb_intel_output =
- to_psb_intel_output(connector);
-
- if (psb_intel_output->i2c_bus)
- psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
- drm_sysfs_connector_remove(connector);
- drm_connector_cleanup(connector);
- kfree(psb_intel_output);
-}
-
-static const struct drm_encoder_helper_funcs psb_intel_sdvo_helper_funcs = {
- .dpms = psb_intel_sdvo_dpms,
- .mode_fixup = psb_intel_sdvo_mode_fixup,
- .prepare = psb_intel_encoder_prepare,
- .mode_set = psb_intel_sdvo_mode_set,
- .commit = psb_intel_encoder_commit,
-};
-
-static const struct drm_connector_funcs psb_intel_sdvo_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
- .save = psb_intel_sdvo_save,
- .restore = psb_intel_sdvo_restore,
- .detect = psb_intel_sdvo_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = psb_intel_sdvo_destroy,
-};
-
-static const struct drm_connector_helper_funcs
- psb_intel_sdvo_connector_helper_funcs = {
- .get_modes = psb_intel_sdvo_get_modes,
- .mode_valid = psb_intel_sdvo_mode_valid,
- .best_encoder = psb_intel_best_encoder,
-};
-
-void psb_intel_sdvo_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs psb_intel_sdvo_enc_funcs = {
- .destroy = psb_intel_sdvo_enc_destroy,
-};
-
-
-void psb_intel_sdvo_init(struct drm_device *dev, int output_device)
-{
- struct drm_connector *connector;
- struct psb_intel_output *psb_intel_output;
- struct psb_intel_sdvo_priv *sdvo_priv;
- struct psb_intel_i2c_chan *i2cbus = NULL;
- int connector_type;
- u8 ch[0x40];
- int i;
- int encoder_type, output_id;
-
- psb_intel_output =
- kcalloc(sizeof(struct psb_intel_output) +
- sizeof(struct psb_intel_sdvo_priv), 1, GFP_KERNEL);
- if (!psb_intel_output)
- return;
-
- connector = &psb_intel_output->base;
-
- drm_connector_init(dev, connector, &psb_intel_sdvo_connector_funcs,
- DRM_MODE_CONNECTOR_Unknown);
- drm_connector_helper_add(connector,
- &psb_intel_sdvo_connector_helper_funcs);
- sdvo_priv = (struct psb_intel_sdvo_priv *) (psb_intel_output + 1);
- psb_intel_output->type = INTEL_OUTPUT_SDVO;
-
- connector->interlace_allowed = 0;
- connector->doublescan_allowed = 0;
-
- /* setup the DDC bus. */
- if (output_device == SDVOB)
- i2cbus =
- psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
- else
- i2cbus =
- psb_intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
-
- if (!i2cbus)
- goto err_connector;
-
- sdvo_priv->i2c_bus = i2cbus;
-
- if (output_device == SDVOB) {
- output_id = 1;
- sdvo_priv->by_input_wiring = SDVOB_IN0;
- sdvo_priv->i2c_bus->slave_addr = 0x38;
- } else {
- output_id = 2;
- sdvo_priv->i2c_bus->slave_addr = 0x39;
- }
-
- sdvo_priv->output_device = output_device;
- psb_intel_output->i2c_bus = i2cbus;
- psb_intel_output->dev_priv = sdvo_priv;
-
-
- /* Read the regs to test if we can talk to the device */
- for (i = 0; i < 0x40; i++) {
- if (!psb_intel_sdvo_read_byte(psb_intel_output, i, &ch[i])) {
- dev_dbg(dev->dev, "No SDVO device found on SDVO%c\n",
- output_device == SDVOB ? 'B' : 'C');
- goto err_i2c;
- }
- }
-
- psb_intel_sdvo_get_capabilities(psb_intel_output, &sdvo_priv->caps);
-
- memset(&sdvo_priv->active_outputs, 0,
- sizeof(sdvo_priv->active_outputs));
-
- /* TODO, CVBS, SVID, YPRPB & SCART outputs. */
- if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) {
- sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0;
- sdvo_priv->active_device = SDVO_DEVICE_CRT;
- connector->display_info.subpixel_order =
- SubPixelHorizontalRGB;
- encoder_type = DRM_MODE_ENCODER_DAC;
- connector_type = DRM_MODE_CONNECTOR_VGA;
- } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) {
- sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1;
- sdvo_priv->active_outputs = SDVO_DEVICE_CRT;
- connector->display_info.subpixel_order =
- SubPixelHorizontalRGB;
- encoder_type = DRM_MODE_ENCODER_DAC;
- connector_type = DRM_MODE_CONNECTOR_VGA;
- } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) {
- sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0;
- sdvo_priv->active_device = SDVO_DEVICE_TMDS;
- connector->display_info.subpixel_order =
- SubPixelHorizontalRGB;
- encoder_type = DRM_MODE_ENCODER_TMDS;
- connector_type = DRM_MODE_CONNECTOR_DVID;
- } else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) {
- sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1;
- sdvo_priv->active_device = SDVO_DEVICE_TMDS;
- connector->display_info.subpixel_order =
- SubPixelHorizontalRGB;
- encoder_type = DRM_MODE_ENCODER_TMDS;
- connector_type = DRM_MODE_CONNECTOR_DVID;
- } else {
- unsigned char bytes[2];
-
- memcpy(bytes, &sdvo_priv->caps.output_flags, 2);
- dev_dbg(dev->dev, "%s: No active RGB or TMDS outputs (0x%02x%02x)\n",
- SDVO_NAME(sdvo_priv), bytes[0], bytes[1]);
- goto err_i2c;
- }
-
- drm_encoder_init(dev, &psb_intel_output->enc, &psb_intel_sdvo_enc_funcs,
- encoder_type);
- drm_encoder_helper_add(&psb_intel_output->enc,
- &psb_intel_sdvo_helper_funcs);
- connector->connector_type = connector_type;
-
- drm_mode_connector_attach_encoder(&psb_intel_output->base,
- &psb_intel_output->enc);
- drm_sysfs_connector_add(connector);
-
- /* Set the input timing to the screen. Assume always input 0. */
- psb_intel_sdvo_set_target_input(psb_intel_output, true, false);
-
- psb_intel_sdvo_get_input_pixel_clock_range(psb_intel_output,
- &sdvo_priv->pixel_clock_min,
- &sdvo_priv->
- pixel_clock_max);
-
-
- dev_dbg(dev->dev, "%s device VID/DID: %02X:%02X.%02X, "
- "clock range %dMHz - %dMHz, "
- "input 1: %c, input 2: %c, "
- "output 1: %c, output 2: %c\n",
- SDVO_NAME(sdvo_priv),
- sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
- sdvo_priv->caps.device_rev_id,
- sdvo_priv->pixel_clock_min / 1000,
- sdvo_priv->pixel_clock_max / 1000,
- (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
- (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
- /* check currently supported outputs */
- sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
- sdvo_priv->caps.output_flags &
- (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
-
- psb_intel_output->ddc_bus = i2cbus;
-
- return;
-
-err_i2c:
- psb_intel_i2c_destroy(psb_intel_output->i2c_bus);
-err_connector:
- drm_connector_cleanup(connector);
- kfree(psb_intel_output);
-
- return;
-}
diff --git a/drivers/staging/gma500/psb_intel_sdvo_regs.h b/drivers/staging/gma500/psb_intel_sdvo_regs.h
deleted file mode 100644
index 96862ea..0000000
--- a/drivers/staging/gma500/psb_intel_sdvo_regs.h
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * SDVO command definitions and structures.
- *
- * Copyright (c) 2008, Intel Corporation
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- */
-
-#define SDVO_OUTPUT_FIRST (0)
-#define SDVO_OUTPUT_TMDS0 (1 << 0)
-#define SDVO_OUTPUT_RGB0 (1 << 1)
-#define SDVO_OUTPUT_CVBS0 (1 << 2)
-#define SDVO_OUTPUT_SVID0 (1 << 3)
-#define SDVO_OUTPUT_YPRPB0 (1 << 4)
-#define SDVO_OUTPUT_SCART0 (1 << 5)
-#define SDVO_OUTPUT_LVDS0 (1 << 6)
-#define SDVO_OUTPUT_TMDS1 (1 << 8)
-#define SDVO_OUTPUT_RGB1 (1 << 9)
-#define SDVO_OUTPUT_CVBS1 (1 << 10)
-#define SDVO_OUTPUT_SVID1 (1 << 11)
-#define SDVO_OUTPUT_YPRPB1 (1 << 12)
-#define SDVO_OUTPUT_SCART1 (1 << 13)
-#define SDVO_OUTPUT_LVDS1 (1 << 14)
-#define SDVO_OUTPUT_LAST (14)
-
-struct psb_intel_sdvo_caps {
- u8 vendor_id;
- u8 device_id;
- u8 device_rev_id;
- u8 sdvo_version_major;
- u8 sdvo_version_minor;
- unsigned int sdvo_inputs_mask:2;
- unsigned int smooth_scaling:1;
- unsigned int sharp_scaling:1;
- unsigned int up_scaling:1;
- unsigned int down_scaling:1;
- unsigned int stall_support:1;
- unsigned int pad:1;
- u16 output_flags;
-} __packed;
-
-/** This matches the EDID DTD structure, more or less */
-struct psb_intel_sdvo_dtd {
- struct {
- u16 clock; /**< pixel clock, in 10kHz units */
- u8 h_active; /**< lower 8 bits (pixels) */
- u8 h_blank; /**< lower 8 bits (pixels) */
- u8 h_high; /**< upper 4 bits each h_active, h_blank */
- u8 v_active; /**< lower 8 bits (lines) */
- u8 v_blank; /**< lower 8 bits (lines) */
- u8 v_high; /**< upper 4 bits each v_active, v_blank */
- } part1;
-
- struct {
- u8 h_sync_off;
- /**< lower 8 bits, from hblank start */
- u8 h_sync_width;/**< lower 8 bits (pixels) */
- /** lower 4 bits each vsync offset, vsync width */
- u8 v_sync_off_width;
- /**
- * 2 high bits of hsync offset, 2 high bits of hsync width,
- * bits 4-5 of vsync offset, and 2 high bits of vsync width.
- */
- u8 sync_off_width_high;
- u8 dtd_flags;
- u8 sdvo_flags;
- /** bits 6-7 of vsync offset at bits 6-7 */
- u8 v_sync_off_high;
- u8 reserved;
- } part2;
-} __packed;
-
-struct psb_intel_sdvo_pixel_clock_range {
- u16 min; /**< pixel clock, in 10kHz units */
- u16 max; /**< pixel clock, in 10kHz units */
-} __packed;
-
-struct psb_intel_sdvo_preferred_input_timing_args {
- u16 clock;
- u16 width;
- u16 height;
-} __packed;
-
-/* I2C registers for SDVO */
-#define SDVO_I2C_ARG_0 0x07
-#define SDVO_I2C_ARG_1 0x06
-#define SDVO_I2C_ARG_2 0x05
-#define SDVO_I2C_ARG_3 0x04
-#define SDVO_I2C_ARG_4 0x03
-#define SDVO_I2C_ARG_5 0x02
-#define SDVO_I2C_ARG_6 0x01
-#define SDVO_I2C_ARG_7 0x00
-#define SDVO_I2C_OPCODE 0x08
-#define SDVO_I2C_CMD_STATUS 0x09
-#define SDVO_I2C_RETURN_0 0x0a
-#define SDVO_I2C_RETURN_1 0x0b
-#define SDVO_I2C_RETURN_2 0x0c
-#define SDVO_I2C_RETURN_3 0x0d
-#define SDVO_I2C_RETURN_4 0x0e
-#define SDVO_I2C_RETURN_5 0x0f
-#define SDVO_I2C_RETURN_6 0x10
-#define SDVO_I2C_RETURN_7 0x11
-#define SDVO_I2C_VENDOR_BEGIN 0x20
-
-/* Status results */
-#define SDVO_CMD_STATUS_POWER_ON 0x0
-#define SDVO_CMD_STATUS_SUCCESS 0x1
-#define SDVO_CMD_STATUS_NOTSUPP 0x2
-#define SDVO_CMD_STATUS_INVALID_ARG 0x3
-#define SDVO_CMD_STATUS_PENDING 0x4
-#define SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED 0x5
-#define SDVO_CMD_STATUS_SCALING_NOT_SUPP 0x6
-
-/* SDVO commands, argument/result registers */
-
-#define SDVO_CMD_RESET 0x01
-
-/** Returns a struct psb_intel_sdvo_caps */
-#define SDVO_CMD_GET_DEVICE_CAPS 0x02
-
-#define SDVO_CMD_GET_FIRMWARE_REV 0x86
-# define SDVO_DEVICE_FIRMWARE_MINOR SDVO_I2C_RETURN_0
-# define SDVO_DEVICE_FIRMWARE_MAJOR SDVO_I2C_RETURN_1
-# define SDVO_DEVICE_FIRMWARE_PATCH SDVO_I2C_RETURN_2
-
-/**
- * Reports which inputs are trained (managed to sync).
- *
- * Devices must have trained within 2 vsyncs of a mode change.
- */
-#define SDVO_CMD_GET_TRAINED_INPUTS 0x03
-struct psb_intel_sdvo_get_trained_inputs_response {
- unsigned int input0_trained:1;
- unsigned int input1_trained:1;
- unsigned int pad:6;
-} __packed;
-
-/** Returns a struct psb_intel_sdvo_output_flags of active outputs. */
-#define SDVO_CMD_GET_ACTIVE_OUTPUTS 0x04
-
-/**
- * Sets the current set of active outputs.
- *
- * Takes a struct psb_intel_sdvo_output_flags.
- * Must be preceded by a SET_IN_OUT_MAP
- * on multi-output devices.
- */
-#define SDVO_CMD_SET_ACTIVE_OUTPUTS 0x05
-
-/**
- * Returns the current mapping of SDVO inputs to outputs on the device.
- *
- * Returns two struct psb_intel_sdvo_output_flags structures.
- */
-#define SDVO_CMD_GET_IN_OUT_MAP 0x06
-
-/**
- * Sets the current mapping of SDVO inputs to outputs on the device.
- *
- * Takes two struct i380_sdvo_output_flags structures.
- */
-#define SDVO_CMD_SET_IN_OUT_MAP 0x07
-
-/**
- * Returns a struct psb_intel_sdvo_output_flags of attached displays.
- */
-#define SDVO_CMD_GET_ATTACHED_DISPLAYS 0x0b
-
-/**
- * Returns a struct psb_intel_sdvo_ouptut_flags of displays supporting hot plugging.
- */
-#define SDVO_CMD_GET_HOT_PLUG_SUPPORT 0x0c
-
-/**
- * Takes a struct psb_intel_sdvo_output_flags.
- */
-#define SDVO_CMD_SET_ACTIVE_HOT_PLUG 0x0d
-
-/**
- * Returns a struct psb_intel_sdvo_output_flags of displays with hot plug
- * interrupts enabled.
- */
-#define SDVO_CMD_GET_ACTIVE_HOT_PLUG 0x0e
-
-#define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE 0x0f
-struct psb_intel_sdvo_get_interrupt_event_source_response {
- u16 interrupt_status;
- unsigned int ambient_light_interrupt:1;
- unsigned int pad:7;
-} __packed;
-
-/**
- * Selects which input is affected by future input commands.
- *
- * Commands affected include SET_INPUT_TIMINGS_PART[12],
- * GET_INPUT_TIMINGS_PART[12], GET_PREFERRED_INPUT_TIMINGS_PART[12],
- * GET_INPUT_PIXEL_CLOCK_RANGE, and CREATE_PREFERRED_INPUT_TIMINGS.
- */
-#define SDVO_CMD_SET_TARGET_INPUT 0x10
-struct psb_intel_sdvo_set_target_input_args {
- unsigned int target_1:1;
- unsigned int pad:7;
-} __packed;
-
-/**
- * Takes a struct psb_intel_sdvo_output_flags of which outputs are targeted by
- * future output commands.
- *
- * Affected commands inclue SET_OUTPUT_TIMINGS_PART[12],
- * GET_OUTPUT_TIMINGS_PART[12], and GET_OUTPUT_PIXEL_CLOCK_RANGE.
- */
-#define SDVO_CMD_SET_TARGET_OUTPUT 0x11
-
-#define SDVO_CMD_GET_INPUT_TIMINGS_PART1 0x12
-#define SDVO_CMD_GET_INPUT_TIMINGS_PART2 0x13
-#define SDVO_CMD_SET_INPUT_TIMINGS_PART1 0x14
-#define SDVO_CMD_SET_INPUT_TIMINGS_PART2 0x15
-#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART1 0x16
-#define SDVO_CMD_SET_OUTPUT_TIMINGS_PART2 0x17
-#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART1 0x18
-#define SDVO_CMD_GET_OUTPUT_TIMINGS_PART2 0x19
-/* Part 1 */
-# define SDVO_DTD_CLOCK_LOW SDVO_I2C_ARG_0
-# define SDVO_DTD_CLOCK_HIGH SDVO_I2C_ARG_1
-# define SDVO_DTD_H_ACTIVE SDVO_I2C_ARG_2
-# define SDVO_DTD_H_BLANK SDVO_I2C_ARG_3
-# define SDVO_DTD_H_HIGH SDVO_I2C_ARG_4
-# define SDVO_DTD_V_ACTIVE SDVO_I2C_ARG_5
-# define SDVO_DTD_V_BLANK SDVO_I2C_ARG_6
-# define SDVO_DTD_V_HIGH SDVO_I2C_ARG_7
-/* Part 2 */
-# define SDVO_DTD_HSYNC_OFF SDVO_I2C_ARG_0
-# define SDVO_DTD_HSYNC_WIDTH SDVO_I2C_ARG_1
-# define SDVO_DTD_VSYNC_OFF_WIDTH SDVO_I2C_ARG_2
-# define SDVO_DTD_SYNC_OFF_WIDTH_HIGH SDVO_I2C_ARG_3
-# define SDVO_DTD_DTD_FLAGS SDVO_I2C_ARG_4
-# define SDVO_DTD_DTD_FLAG_INTERLACED (1 << 7)
-# define SDVO_DTD_DTD_FLAG_STEREO_MASK (3 << 5)
-# define SDVO_DTD_DTD_FLAG_INPUT_MASK (3 << 3)
-# define SDVO_DTD_DTD_FLAG_SYNC_MASK (3 << 1)
-# define SDVO_DTD_SDVO_FLAS SDVO_I2C_ARG_5
-# define SDVO_DTD_SDVO_FLAG_STALL (1 << 7)
-# define SDVO_DTD_SDVO_FLAG_CENTERED (0 << 6)
-# define SDVO_DTD_SDVO_FLAG_UPPER_LEFT (1 << 6)
-# define SDVO_DTD_SDVO_FLAG_SCALING_MASK (3 << 4)
-# define SDVO_DTD_SDVO_FLAG_SCALING_NONE (0 << 4)
-# define SDVO_DTD_SDVO_FLAG_SCALING_SHARP (1 << 4)
-# define SDVO_DTD_SDVO_FLAG_SCALING_SMOOTH (2 << 4)
-# define SDVO_DTD_VSYNC_OFF_HIGH SDVO_I2C_ARG_6
-
-/**
- * Generates a DTD based on the given width, height, and flags.
- *
- * This will be supported by any device supporting scaling or interlaced
- * modes.
- */
-#define SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING 0x1a
-# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_LOW SDVO_I2C_ARG_0
-# define SDVO_PREFERRED_INPUT_TIMING_CLOCK_HIGH SDVO_I2C_ARG_1
-# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_LOW SDVO_I2C_ARG_2
-# define SDVO_PREFERRED_INPUT_TIMING_WIDTH_HIGH SDVO_I2C_ARG_3
-# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_LOW SDVO_I2C_ARG_4
-# define SDVO_PREFERRED_INPUT_TIMING_HEIGHT_HIGH SDVO_I2C_ARG_5
-# define SDVO_PREFERRED_INPUT_TIMING_FLAGS SDVO_I2C_ARG_6
-# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_INTERLACED (1 << 0)
-# define SDVO_PREFERRED_INPUT_TIMING_FLAGS_SCALED (1 << 1)
-
-#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1 0x1b
-#define SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2 0x1c
-
-/** Returns a struct psb_intel_sdvo_pixel_clock_range */
-#define SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE 0x1d
-/** Returns a struct psb_intel_sdvo_pixel_clock_range */
-#define SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE 0x1e
-
-/** Returns a byte bitfield containing SDVO_CLOCK_RATE_MULT_* flags */
-#define SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS 0x1f
-
-/** Returns a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
-#define SDVO_CMD_GET_CLOCK_RATE_MULT 0x20
-/** Takes a byte containing a SDVO_CLOCK_RATE_MULT_* flag */
-#define SDVO_CMD_SET_CLOCK_RATE_MULT 0x21
-# define SDVO_CLOCK_RATE_MULT_1X (1 << 0)
-# define SDVO_CLOCK_RATE_MULT_2X (1 << 1)
-# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
-
-#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
-
-#define SDVO_CMD_GET_TV_FORMAT 0x28
-
-#define SDVO_CMD_SET_TV_FORMAT 0x29
-
-#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
-#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
-#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
-# define SDVO_ENCODER_STATE_ON (1 << 0)
-# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
-# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
-# define SDVO_ENCODER_STATE_OFF (1 << 3)
-
-#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93
-
-#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
-# define SDVO_CONTROL_BUS_PROM 0x0
-# define SDVO_CONTROL_BUS_DDC1 0x1
-# define SDVO_CONTROL_BUS_DDC2 0x2
-# define SDVO_CONTROL_BUS_DDC3 0x3
-
-/* SDVO Bus & SDVO Inputs wiring details*/
-/* Bit 0: Is SDVOB connected to In0 (1 = yes, 0 = no*/
-/* Bit 1: Is SDVOB connected to In1 (1 = yes, 0 = no*/
-/* Bit 2: Is SDVOC connected to In0 (1 = yes, 0 = no*/
-/* Bit 3: Is SDVOC connected to In1 (1 = yes, 0 = no*/
-#define SDVOB_IN0 0x01
-#define SDVOB_IN1 0x02
-#define SDVOC_IN0 0x04
-#define SDVOC_IN1 0x08
-
-#define SDVO_DEVICE_NONE 0x00
-#define SDVO_DEVICE_CRT 0x01
-#define SDVO_DEVICE_TV 0x02
-#define SDVO_DEVICE_LVDS 0x04
-#define SDVO_DEVICE_TMDS 0x08
-
diff --git a/drivers/staging/hv/Kconfig b/drivers/staging/hv/Kconfig
index 072185e..60ac479 100644
--- a/drivers/staging/hv/Kconfig
+++ b/drivers/staging/hv/Kconfig
@@ -3,15 +3,3 @@ config HYPERV_STORAGE
depends on HYPERV && SCSI
help
Select this option to enable the Hyper-V virtual storage driver.
-
-config HYPERV_NET
- tristate "Microsoft Hyper-V virtual network driver"
- depends on HYPERV && NET
- help
- Select this option to enable the Hyper-V virtual network driver.
-
-config HYPERV_MOUSE
- tristate "Microsoft Hyper-V mouse driver"
- depends on HYPERV && HID
- help
- Select this option to enable the Hyper-V mouse driver.
diff --git a/drivers/staging/hv/Makefile b/drivers/staging/hv/Makefile
index 0f55cee..af95a6b 100644
--- a/drivers/staging/hv/Makefile
+++ b/drivers/staging/hv/Makefile
@@ -1,6 +1,3 @@
obj-$(CONFIG_HYPERV_STORAGE) += hv_storvsc.o
-obj-$(CONFIG_HYPERV_NET) += hv_netvsc.o
-obj-$(CONFIG_HYPERV_MOUSE) += hv_mouse.o
hv_storvsc-y := storvsc_drv.o
-hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
diff --git a/drivers/staging/hv/TODO b/drivers/staging/hv/TODO
index ed4d636..dea7d92 100644
--- a/drivers/staging/hv/TODO
+++ b/drivers/staging/hv/TODO
@@ -1,7 +1,5 @@
TODO:
- - audit the network driver
- audit the scsi driver
Please send patches for this code to Greg Kroah-Hartman <gregkh@suse.de>,
-Hank Janssen <hjanssen@microsoft.com>, Haiyang Zhang <haiyangz@microsoft.com>,
-K. Y. Srinivasan <kys@microsoft.com>
+Haiyang Zhang <haiyangz@microsoft.com>, and K. Y. Srinivasan <kys@microsoft.com>
diff --git a/drivers/staging/hv/storvsc_drv.c b/drivers/staging/hv/storvsc_drv.c
index ae8c33e..eb853f7 100644
--- a/drivers/staging/hv/storvsc_drv.c
+++ b/drivers/staging/hv/storvsc_drv.c
@@ -32,6 +32,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/hyperv.h>
+#include <linux/mempool.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
@@ -42,6 +43,7 @@
#include <scsi/scsi_dbg.h>
+#define STORVSC_MIN_BUF_NR 64
#define STORVSC_RING_BUFFER_SIZE (20*PAGE_SIZE)
static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
@@ -78,7 +80,7 @@ MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
/* V1 Beta 0.1 */
/* V1 RC < 2008/1/31 1.0 */
/* V1 RC > 2008/1/31 2.0 */
-#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(2, 0)
+#define VMSTOR_PROTOCOL_VERSION_CURRENT VMSTOR_PROTOCOL_VERSION(4, 2)
@@ -104,7 +106,8 @@ enum vstor_packet_operation {
VSTOR_OPERATION_END_INITIALIZATION = 8,
VSTOR_OPERATION_QUERY_PROTOCOL_VERSION = 9,
VSTOR_OPERATION_QUERY_PROPERTIES = 10,
- VSTOR_OPERATION_MAXIMUM = 10
+ VSTOR_OPERATION_ENUMERATE_BUS = 11,
+ VSTOR_OPERATION_MAXIMUM = 11
};
/*
@@ -234,8 +237,6 @@ struct vstor_packet {
#define STORVSC_MAX_CHANNELS 1
#define STORVSC_MAX_CMD_LEN 16
-struct hv_storvsc_request;
-
/* Matches Windows-end */
enum storvsc_request_type {
WRITE_TYPE,
@@ -284,9 +285,13 @@ struct storvsc_device {
struct hv_storvsc_request reset_request;
};
+struct stor_mem_pools {
+ struct kmem_cache *request_pool;
+ mempool_t *request_mempool;
+};
+
struct hv_host_device {
struct hv_device *dev;
- struct kmem_cache *request_pool;
unsigned int port;
unsigned char path;
unsigned char target;
@@ -302,6 +307,51 @@ struct storvsc_cmd_request {
struct hv_storvsc_request request;
};
+struct storvsc_scan_work {
+ struct work_struct work;
+ struct Scsi_Host *host;
+ uint lun;
+};
+
+static void storvsc_bus_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ int id, order_id;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ for (id = 0; id < wrk->host->max_id; ++id) {
+ if (wrk->host->reverse_ordering)
+ order_id = wrk->host->max_id - id - 1;
+ else
+ order_id = id;
+
+ scsi_scan_target(&wrk->host->shost_gendev, 0,
+ order_id, SCAN_WILD_CARD, 1);
+ }
+ kfree(wrk);
+}
+
+static void storvsc_remove_lun(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ if (!scsi_host_get(wrk->host))
+ goto done;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+
+ if (sdev) {
+ scsi_remove_device(sdev);
+ scsi_device_put(sdev);
+ }
+ scsi_host_put(wrk->host);
+
+done:
+ kfree(wrk);
+}
+
static inline struct storvsc_device *get_out_stor_device(
struct hv_device *device)
{
@@ -549,11 +599,25 @@ static void storvsc_on_receive(struct hv_device *device,
struct vstor_packet *vstor_packet,
struct hv_storvsc_request *request)
{
+ struct storvsc_scan_work *work;
+ struct storvsc_device *stor_device;
+
switch (vstor_packet->operation) {
case VSTOR_OPERATION_COMPLETE_IO:
storvsc_on_io_completion(device, vstor_packet, request);
break;
+
case VSTOR_OPERATION_REMOVE_DEVICE:
+ case VSTOR_OPERATION_ENUMERATE_BUS:
+ stor_device = get_in_stor_device(device);
+ work = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!work)
+ return;
+
+ INIT_WORK(&work->work, storvsc_bus_scan);
+ work->host = stor_device->host;
+ schedule_work(&work->work);
+ break;
default:
break;
@@ -729,19 +793,48 @@ static void storvsc_get_ide_info(struct hv_device *dev, int *target, int *path)
static int storvsc_device_alloc(struct scsi_device *sdevice)
{
- /*
- * This enables luns to be located sparsely. Otherwise, we may not
- * discovered them.
- */
- sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
+ struct stor_mem_pools *memp;
+ int number = STORVSC_MIN_BUF_NR;
+
+ memp = kzalloc(sizeof(struct stor_mem_pools), GFP_KERNEL);
+ if (!memp)
+ return -ENOMEM;
+
+ memp->request_pool =
+ kmem_cache_create(dev_name(&sdevice->sdev_dev),
+ sizeof(struct storvsc_cmd_request), 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+
+ if (!memp->request_pool)
+ goto err0;
+
+ memp->request_mempool = mempool_create(number, mempool_alloc_slab,
+ mempool_free_slab,
+ memp->request_pool);
+
+ if (!memp->request_mempool)
+ goto err1;
+
+ sdevice->hostdata = memp;
+
return 0;
+
+err1:
+ kmem_cache_destroy(memp->request_pool);
+
+err0:
+ kfree(memp);
+ return -ENOMEM;
}
-static int storvsc_merge_bvec(struct request_queue *q,
- struct bvec_merge_data *bmd, struct bio_vec *bvec)
+static void storvsc_device_destroy(struct scsi_device *sdevice)
{
- /* checking done by caller. */
- return bvec->bv_len;
+ struct stor_mem_pools *memp = sdevice->hostdata;
+
+ mempool_destroy(memp->request_mempool);
+ kmem_cache_destroy(memp->request_pool);
+ kfree(memp);
+ sdevice->hostdata = NULL;
}
static int storvsc_device_configure(struct scsi_device *sdevice)
@@ -751,8 +844,6 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
- blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
-
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
return 0;
@@ -802,12 +893,14 @@ static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
unsigned int sg_count,
- unsigned int len)
+ unsigned int len,
+ int write)
{
int i;
int num_pages;
struct scatterlist *bounce_sgl;
struct page *page_buf;
+ unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
@@ -819,7 +912,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
page_buf = alloc_page(GFP_ATOMIC);
if (!page_buf)
goto cleanup;
- sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
+ sg_set_page(&bounce_sgl[i], page_buf, buf_len, 0);
}
return bounce_sgl;
@@ -833,7 +926,8 @@ cleanup:
/* Assume the original sgl has enough room */
static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
struct scatterlist *bounce_sgl,
- unsigned int orig_sgl_count)
+ unsigned int orig_sgl_count,
+ unsigned int bounce_sgl_count)
{
int i;
int j = 0;
@@ -874,6 +968,24 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
kunmap_atomic((void *)bounce_addr, KM_IRQ0);
j++;
+ /*
+ * It is possible that the number of elements
+ * in the bounce buffer may not be equal to
+ * the number of elements in the original
+ * scatter list. Handle this correctly.
+ */
+
+ if (j == bounce_sgl_count) {
+ /*
+ * We are done; cleanup and return.
+ */
+ kunmap_atomic((void *)(dest_addr -
+ orig_sgl[i].offset),
+ KM_IRQ0);
+ local_irq_restore(flags);
+ return total_copied;
+ }
+
/* if we need to use another bounce buffer */
if (destlen || i != orig_sgl_count - 1)
bounce_addr =
@@ -965,18 +1077,13 @@ static int storvsc_remove(struct hv_device *dev)
{
struct storvsc_device *stor_device = hv_get_drvdata(dev);
struct Scsi_Host *host = stor_device->host;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)host->hostdata;
scsi_remove_host(host);
scsi_host_put(host);
storvsc_dev_remove(dev);
- if (host_dev->request_pool) {
- kmem_cache_destroy(host_dev->request_pool);
- host_dev->request_pool = NULL;
- }
+
return 0;
}
@@ -1014,7 +1121,7 @@ static int storvsc_host_reset(struct hv_device *device)
stor_device = get_out_stor_device(device);
if (!stor_device)
- return -ENODEV;
+ return FAILED;
request = &stor_device->reset_request;
vstor_packet = &request->vstor_packet;
@@ -1031,13 +1138,11 @@ static int storvsc_host_reset(struct hv_device *device)
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
- goto cleanup;
+ return FAILED;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
- if (t == 0) {
- ret = -ETIMEDOUT;
- goto cleanup;
- }
+ if (t == 0)
+ return TIMEOUT_ERROR;
/*
@@ -1045,8 +1150,7 @@ static int storvsc_host_reset(struct hv_device *device)
* should have been flushed out and return to us
*/
-cleanup:
- return ret;
+ return SUCCESS;
}
@@ -1055,16 +1159,10 @@ cleanup:
*/
static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
{
- int ret;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
struct hv_device *dev = host_dev->dev;
- ret = storvsc_host_reset(dev);
- if (ret != 0)
- return ret;
-
- return ret;
+ return storvsc_host_reset(dev);
}
@@ -1076,21 +1174,22 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
struct storvsc_cmd_request *cmd_request =
(struct storvsc_cmd_request *)request->context;
struct scsi_cmnd *scmnd = cmd_request->cmd;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(scmnd->device->host);
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
+ struct storvsc_scan_work *wrk;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
vm_srb = &request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
- if (vm_srb->data_in == READ_TYPE) {
+ if (vm_srb->data_in == READ_TYPE)
copy_from_bounce_buffer(scsi_sglist(scmnd),
cmd_request->bounce_sgl,
- scsi_sg_count(scmnd));
- destroy_bounce_buffer(cmd_request->bounce_sgl,
+ scsi_sg_count(scmnd),
+ cmd_request->bounce_sgl_count);
+ destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- }
}
/*
@@ -1103,6 +1202,29 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
else
scmnd->result = vm_srb->scsi_status;
+ /*
+ * If the LUN is invalid; remove the device.
+ */
+ if (vm_srb->srb_status == 0x20) {
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+ struct Scsi_Host *host;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
+
+ wrk = kmalloc(sizeof(struct storvsc_scan_work),
+ GFP_ATOMIC);
+ if (!wrk) {
+ scmnd->result = DID_TARGET_FAILURE << 16;
+ } else {
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, storvsc_remove_lun);
+ schedule_work(&wrk->work);
+ }
+ }
+
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
@@ -1120,7 +1242,7 @@ static void storvsc_command_completion(struct hv_storvsc_request *request)
scsi_done_fn(scmnd);
- kmem_cache_free(host_dev->request_pool, cmd_request);
+ mempool_free(cmd_request, memp->request_mempool);
}
static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
@@ -1131,7 +1253,7 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
switch (scsi_op) {
/* smartd sends this command, which will offline the device */
case SET_WINDOW:
- scmnd->result = DID_ERROR << 16;
+ scmnd->result = ILLEGAL_REQUEST << 16;
allowed = false;
break;
default:
@@ -1143,12 +1265,10 @@ static bool storvsc_check_scsi_cmd(struct scsi_cmnd *scmnd)
/*
* storvsc_queuecommand - Initiate command processing
*/
-static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
- void (*done)(struct scsi_cmnd *))
+static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
{
int ret;
- struct hv_host_device *host_dev =
- (struct hv_host_device *)scmnd->device->host->hostdata;
+ struct hv_host_device *host_dev = shost_priv(host);
struct hv_device *dev = host_dev->dev;
struct hv_storvsc_request *request;
struct storvsc_cmd_request *cmd_request;
@@ -1157,9 +1277,10 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
struct scatterlist *sgl;
unsigned int sg_count = 0;
struct vmscsi_request *vm_srb;
+ struct stor_mem_pools *memp = scmnd->device->hostdata;
if (storvsc_check_scsi_cmd(scmnd) == false) {
- done(scmnd);
+ scmnd->scsi_done(scmnd);
return 0;
}
@@ -1172,16 +1293,14 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
goto retry_request;
}
- scmnd->scsi_done = done;
-
request_size = sizeof(struct storvsc_cmd_request);
- cmd_request = kmem_cache_zalloc(host_dev->request_pool,
+ cmd_request = mempool_alloc(memp->request_mempool,
GFP_ATOMIC);
- if (!cmd_request) {
- scmnd->scsi_done = NULL;
+ if (!cmd_request)
return SCSI_MLQUEUE_DEVICE_BUSY;
- }
+
+ memset(cmd_request, 0, sizeof(struct storvsc_cmd_request));
/* Setup the cmd request */
cmd_request->bounce_sgl_count = 0;
@@ -1231,12 +1350,12 @@ static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
cmd_request->bounce_sgl =
create_bounce_buffer(sgl, scsi_sg_count(scmnd),
- scsi_bufflen(scmnd));
+ scsi_bufflen(scmnd),
+ vm_srb->data_in);
if (!cmd_request->bounce_sgl) {
- scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
- kmem_cache_free(host_dev->request_pool,
- cmd_request);
+ mempool_free(cmd_request,
+ memp->request_mempool);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -1278,9 +1397,8 @@ retry_request:
destroy_bounce_buffer(cmd_request->bounce_sgl,
cmd_request->bounce_sgl_count);
- kmem_cache_free(host_dev->request_pool, cmd_request);
+ mempool_free(cmd_request, memp->request_mempool);
- scmnd->scsi_done = NULL;
scmnd->host_scribble = NULL;
ret = SCSI_MLQUEUE_DEVICE_BUSY;
@@ -1289,9 +1407,6 @@ retry_request:
return ret;
}
-static DEF_SCSI_QCMD(storvsc_queuecommand)
-
-
/* Scsi driver */
static struct scsi_host_template scsi_driver = {
.module = THIS_MODULE,
@@ -1300,6 +1415,7 @@ static struct scsi_host_template scsi_driver = {
.queuecommand = storvsc_queuecommand,
.eh_host_reset_handler = storvsc_host_reset_handler,
.slave_alloc = storvsc_device_alloc,
+ .slave_destroy = storvsc_device_destroy,
.slave_configure = storvsc_device_configure,
.cmd_per_lun = 1,
/* 64 max_queue * 1 target */
@@ -1308,14 +1424,7 @@ static struct scsi_host_template scsi_driver = {
/* no use setting to 0 since ll_blk_rw reset it to 1 */
/* currently 32 */
.sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
- /*
- * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
- * into 1 sg element. If set, we must limit the max_segment_size to
- * PAGE_SIZE, otherwise we may get 1 sg element that represents
- * multiple
- */
- /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
- .use_clustering = ENABLE_CLUSTERING,
+ .use_clustering = DISABLE_CLUSTERING,
/* Make sure we dont get a sg segment crosses a page boundary */
.dma_boundary = PAGE_SIZE-1,
};
@@ -1360,27 +1469,17 @@ static int storvsc_probe(struct hv_device *device,
if (!host)
return -ENOMEM;
- host_dev = (struct hv_host_device *)host->hostdata;
+ host_dev = shost_priv(host);
memset(host_dev, 0, sizeof(struct hv_host_device));
host_dev->port = host->host_no;
host_dev->dev = device;
- host_dev->request_pool =
- kmem_cache_create(dev_name(&device->device),
- sizeof(struct storvsc_cmd_request), 0,
- SLAB_HWCACHE_ALIGN, NULL);
-
- if (!host_dev->request_pool) {
- scsi_host_put(host);
- return -ENOMEM;
- }
stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
if (!stor_device) {
- kmem_cache_destroy(host_dev->request_pool);
- scsi_host_put(host);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_out0;
}
stor_device->destroy = false;
@@ -1391,12 +1490,8 @@ static int storvsc_probe(struct hv_device *device,
stor_device->port_number = host->host_no;
ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size);
- if (ret) {
- kmem_cache_destroy(host_dev->request_pool);
- scsi_host_put(host);
- kfree(stor_device);
- return ret;
- }
+ if (ret)
+ goto err_out1;
if (dev_is_ide)
storvsc_get_ide_info(device, &target, &path);
@@ -1416,7 +1511,7 @@ static int storvsc_probe(struct hv_device *device,
/* Register the HBA and start the scsi bus scan */
ret = scsi_add_host(host, &device->device);
if (ret != 0)
- goto err_out;
+ goto err_out2;
if (!dev_is_ide) {
scsi_scan_host(host);
@@ -1425,21 +1520,32 @@ static int storvsc_probe(struct hv_device *device,
ret = scsi_add_device(host, 0, target, 0);
if (ret) {
scsi_remove_host(host);
- goto err_out;
+ goto err_out2;
}
return 0;
-err_out:
+err_out2:
+ /*
+ * Once we have connected with the host, we would need to
+ * to invoke storvsc_dev_remove() to rollback this state and
+ * this call also frees up the stor_device; hence the jump around
+ * err_out1 label.
+ */
storvsc_dev_remove(device);
- kmem_cache_destroy(host_dev->request_pool);
+ goto err_out0;
+
+err_out1:
+ kfree(stor_device);
+
+err_out0:
scsi_host_put(host);
- return -ENODEV;
+ return ret;
}
/* The one and only one */
static struct hv_driver storvsc_drv = {
- .name = "storvsc",
+ .name = KBUILD_MODNAME,
.id_table = id_table,
.probe = storvsc_probe,
.remove = storvsc_remove,
diff --git a/drivers/staging/iio/Documentation/generic_buffer.c b/drivers/staging/iio/Documentation/generic_buffer.c
index d580953..69a05b9 100644
--- a/drivers/staging/iio/Documentation/generic_buffer.c
+++ b/drivers/staging/iio/Documentation/generic_buffer.c
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <string.h>
#include <poll.h>
+#include <endian.h>
#include "iio_utils.h"
/**
@@ -56,6 +57,13 @@ int size_from_channelarray(struct iio_channel_info *channels, int num_channels)
void print2byte(int input, struct iio_channel_info *info)
{
+ /* First swap if incorrect endian */
+
+ if (info->be)
+ input = be16toh((uint_16t)input);
+ else
+ input = le16toh((uint_16t)input);
+
/* shift before conversion to avoid sign extension
of left aligned data */
input = input >> info->shift;
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 75938b2..6f3a392 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -13,6 +13,7 @@
#include <ctype.h>
#include <stdio.h>
#include <stdint.h>
+#include <dirent.h>
#define IIO_MAX_NAME_LENGTH 30
@@ -73,6 +74,7 @@ struct iio_channel_info {
unsigned bits_used;
unsigned shift;
uint64_t mask;
+ unsigned be;
unsigned is_signed;
unsigned enabled;
unsigned location;
@@ -83,6 +85,7 @@ struct iio_channel_info {
* @is_signed: output whether channel is signed
* @bytes: output how many bytes the channel storage occupies
* @mask: output a bit mask for the raw data
+ * @be: big endian
* @device_dir: the iio device directory
* @name: the channel name
* @generic_name: the channel type name
@@ -92,6 +95,7 @@ inline int iioutils_get_type(unsigned *is_signed,
unsigned *bits_used,
unsigned *shift,
uint64_t *mask,
+ unsigned *be,
const char *device_dir,
const char *name,
const char *generic_name)
@@ -100,7 +104,7 @@ inline int iioutils_get_type(unsigned *is_signed,
int ret;
DIR *dp;
char *scan_el_dir, *builtname, *builtname_generic, *filename = 0;
- char signchar;
+ char signchar, endianchar;
unsigned padint;
const struct dirent *ent;
@@ -144,9 +148,18 @@ inline int iioutils_get_type(unsigned *is_signed,
ret = -errno;
goto error_free_filename;
}
- fscanf(sysfsfp,
- "%c%u/%u>>%u", &signchar, bits_used,
- &padint, shift);
+
+ ret = fscanf(sysfsfp,
+ "%ce:%c%u/%u>>%u",
+ &endianchar,
+ &signchar,
+ bits_used,
+ &padint, shift);
+ if (ret < 0) {
+ printf("failed to pass scan type description\n");
+ return ret;
+ }
+ *be = (endianchar == 'b');
*bytes = padint / 8;
if (*bits_used == 64)
*mask = ~0;
@@ -156,6 +169,10 @@ inline int iioutils_get_type(unsigned *is_signed,
*is_signed = 1;
else
*is_signed = 0;
+ fclose(sysfsfp);
+ free(filename);
+
+ filename = 0;
}
error_free_filename:
if (filename)
@@ -386,6 +403,7 @@ inline int build_channel_array(const char *device_dir,
&current->bits_used,
&current->shift,
&current->mask,
+ &current->be,
device_dir,
current->name,
current->generic_name);
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
index 7e99ef2..e338077 100644
--- a/drivers/staging/iio/Documentation/ring.txt
+++ b/drivers/staging/iio/Documentation/ring.txt
@@ -23,10 +23,6 @@ The function pointers within here are used to allow the core to handle
as much buffer functionality as possible. Note almost all of these
are optional.
-mark_in_use, unmark_in_use
- Basically indicate that not changes should be made to the buffer state that
- will effect the form of the data being captures (e.g. scan elements or length)
-
store_to
If possible, push data to the buffer.
@@ -39,8 +35,6 @@ rip_first_n
The primary buffer reading function. Note that it may well not return
as much data as requested.
-mark_param_changed
- Used to indicate that something has changed. Used in conjunction with
request_update
If parameters have changed that require reinitialization or configuration of
the buffer this will trigger it.
@@ -51,7 +45,3 @@ get_bytes_per_datum, set_bytes_per_datum
get_length / set_length
Get/set the number of complete scans that may be held by the buffer.
-is_enabled
- Query if ring buffer is in use
-enable
- Start the ring buffer.
diff --git a/drivers/staging/iio/Documentation/sysfs-bus-iio b/drivers/staging/iio/Documentation/sysfs-bus-iio
index 0d6823d..46a995d 100644
--- a/drivers/staging/iio/Documentation/sysfs-bus-iio
+++ b/drivers/staging/iio/Documentation/sysfs-bus-iio
@@ -276,6 +276,16 @@ Description:
If a discrete set of scale values are available, they
are listed in this attribute.
+What: /sys/.../in_accel_filter_low_pass_3db_frequency
+What: /sys/.../in_magn_filter_low_pass_3db_frequency
+What: /sys/.../in_anglvel_filter_low_pass_3db_frequency
+KernelVersion: 3.2
+Contact: linux-iio@vger.kernel.org
+Description:
+ If a known or controllable low pass filter is applied
+ to the underlying data channel, then this parameter
+ gives the 3dB frequency of the filter in Hz.
+
What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
KernelVersion: 2.6.37
Contact: linux-iio@vger.kernel.org
diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig
index 4ec9118..90162aa 100644
--- a/drivers/staging/iio/Kconfig
+++ b/drivers/staging/iio/Kconfig
@@ -76,7 +76,6 @@ config IIO_DUMMY_EVGEN
config IIO_SIMPLE_DUMMY
tristate "An example driver with no hardware requirements"
- select IIO_SIMPLE_DUMMY_EVGEN if IIO_SIMPLE_DUMMY_EVENTS
help
Driver intended mainly as documentation for how to write
a driver. May also be useful for testing userspace code
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 1c5dad5..d439e45 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16201.h"
@@ -322,8 +322,7 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -348,10 +347,10 @@ static int adis16201_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -388,7 +387,7 @@ static int adis16201_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -408,36 +407,36 @@ static int adis16201_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16201_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16201_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16201_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16201_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16201_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16201_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_x, ADIS16201_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_y, ADIS16201_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(7)
@@ -549,19 +548,9 @@ static struct spi_driver adis16201_driver = {
.probe = adis16201_probe,
.remove = __devexit_p(adis16201_remove),
};
-
-static __init int adis16201_init(void)
-{
- return spi_register_driver(&adis16201_driver);
-}
-module_init(adis16201_init);
-
-static __exit void adis16201_exit(void)
-{
- spi_unregister_driver(&adis16201_driver);
-}
-module_exit(adis16201_exit);
+module_spi_driver(adis16201_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16201 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16201");
diff --git a/drivers/staging/iio/accel/adis16201_ring.c b/drivers/staging/iio/accel/adis16201_ring.c
index 0016ed3..26c610f 100644
--- a/drivers/staging/iio/accel/adis16201_ring.c
+++ b/drivers/staging/iio/accel/adis16201_ring.c
@@ -74,11 +74,11 @@ static irqreturn_t adis16201_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16201_read_ring_data(indio_dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)
+ && adis16201_read_ring_data(indio_dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -116,11 +116,9 @@ int adis16201_configure_ring(struct iio_dev *indio_dev)
}
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
- ring->bpe = 2;
ring->scan_timestamp = true;
ring->access = &ring_sw_access_funcs;
- ring->setup_ops = &adis16201_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16201_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16201_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index 8a33374..1a5140f 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16203.h"
@@ -329,8 +329,7 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -350,10 +349,10 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
bits = 14;
mutex_lock(&indio_dev->mlock);
addr = adis16203_addresses[chan->address][1];
@@ -374,26 +373,26 @@ static int adis16203_read_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16203_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16203_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16203_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
incli_x, ADIS16203_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
/* Fixme: Not what it appears to be - see data sheet */
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_y, ADIS16203_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16203_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(5),
@@ -504,19 +503,9 @@ static struct spi_driver adis16203_driver = {
.probe = adis16203_probe,
.remove = __devexit_p(adis16203_remove),
};
-
-static __init int adis16203_init(void)
-{
- return spi_register_driver(&adis16203_driver);
-}
-module_init(adis16203_init);
-
-static __exit void adis16203_exit(void)
-{
- spi_unregister_driver(&adis16203_driver);
-}
-module_exit(adis16203_exit);
+module_spi_driver(adis16203_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16203 Programmable Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16203");
diff --git a/drivers/staging/iio/accel/adis16203_ring.c b/drivers/staging/iio/accel/adis16203_ring.c
index 1fdfe6f..064640d 100644
--- a/drivers/staging/iio/accel/adis16203_ring.c
+++ b/drivers/staging/iio/accel/adis16203_ring.c
@@ -74,11 +74,11 @@ static irqreturn_t adis16203_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16203_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
+ adis16203_read_ring_data(&indio_dev->dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -118,11 +118,9 @@ int adis16203_configure_ring(struct iio_dev *indio_dev)
}
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
- ring->bpe = 2;
ring->scan_timestamp = true;
ring->access = &ring_sw_access_funcs;
- ring->setup_ops = &adis16203_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16203_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16203_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
index 644ac8e..fa89364 100644
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ b/drivers/staging/iio/accel/adis16204_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16204.h"
@@ -366,7 +366,7 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -390,12 +390,12 @@ static int adis16204_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
- if (mask == (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE)) {
+ case IIO_CHAN_INFO_CALIBBIAS:
+ case IIO_CHAN_INFO_PEAK:
+ if (mask == IIO_CHAN_INFO_CALIBBIAS) {
bits = 12;
addrind = 1;
} else { /* PEAK_SEPARATE */
@@ -428,7 +428,7 @@ static int adis16204_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 12;
@@ -445,28 +445,28 @@ static int adis16204_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16204_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 0, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16204_SCAN_SUPPLY,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16204_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16204_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
accel_x, ADIS16204_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
accel_y, ADIS16204_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(5),
@@ -577,19 +577,9 @@ static struct spi_driver adis16204_driver = {
.probe = adis16204_probe,
.remove = __devexit_p(adis16204_remove),
};
-
-static __init int adis16204_init(void)
-{
- return spi_register_driver(&adis16204_driver);
-}
-module_init(adis16204_init);
-
-static __exit void adis16204_exit(void)
-{
- spi_unregister_driver(&adis16204_driver);
-}
-module_exit(adis16204_exit);
+module_spi_driver(adis16204_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16204_ring.c b/drivers/staging/iio/accel/adis16204_ring.c
index 6fd3d8f..4081179 100644
--- a/drivers/staging/iio/accel/adis16204_ring.c
+++ b/drivers/staging/iio/accel/adis16204_ring.c
@@ -71,11 +71,11 @@ static irqreturn_t adis16204_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count)
- if (adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
- data[i] = be16_to_cpup(
- (__be16 *)&(st->rx[i*2]));
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
+ adis16204_read_ring_data(&indio_dev->dev, st->rx) >= 0)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
+ data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
@@ -114,10 +114,8 @@ int adis16204_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16204_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16204_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16204_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 0a8571b..a98715f 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -18,7 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16209.h"
@@ -304,7 +304,7 @@ static int adis16209_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
case IIO_INCLI:
@@ -355,8 +355,7 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -381,10 +380,10 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ACCEL:
bits = 14;
@@ -410,34 +409,34 @@ static int adis16209_read_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16209_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16209_SCAN_SUPPLY,
IIO_ST('u', 14, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT,
temp, ADIS16209_SCAN_TEMP,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16209_SCAN_ACC_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16209_SCAN_ACC_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_aux, ADIS16209_SCAN_AUX_ADC,
IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_x, ADIS16209_SCAN_INCLI_X,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_INCLI, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
incli_y, ADIS16209_SCAN_INCLI_Y,
IIO_ST('s', 14, 16, 0), 0),
IIO_CHAN(IIO_ROT, 0, 1, 0, NULL, 0, IIO_MOD_X,
@@ -553,19 +552,9 @@ static struct spi_driver adis16209_driver = {
.probe = adis16209_probe,
.remove = __devexit_p(adis16209_remove),
};
-
-static __init int adis16209_init(void)
-{
- return spi_register_driver(&adis16209_driver);
-}
-module_init(adis16209_init);
-
-static __exit void adis16209_exit(void)
-{
- spi_unregister_driver(&adis16209_driver);
-}
-module_exit(adis16209_exit);
+module_spi_driver(adis16209_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16209 Digital Vibration Sensor driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16209");
diff --git a/drivers/staging/iio/accel/adis16209_ring.c b/drivers/staging/iio/accel/adis16209_ring.c
index d17e39d..2a6fd334 100644
--- a/drivers/staging/iio/accel/adis16209_ring.c
+++ b/drivers/staging/iio/accel/adis16209_ring.c
@@ -72,9 +72,10 @@ static irqreturn_t adis16209_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -114,10 +115,8 @@ int adis16209_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16209_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16209_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16209_trigger_handler,
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index 6d4503d..51a852d 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -167,9 +167,9 @@ static ssize_t adis16220_write_16bit(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- long val;
+ u16 val;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou16(buf, 10, &val);
if (ret)
goto error_ret;
ret = adis16220_spi_write_reg_16(indio_dev, this_attr->address, val);
@@ -510,17 +510,17 @@ static int adis16220_read_raw(struct iio_dev *indio_dev,
case 0:
addrind = 0;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if (chan->type == IIO_TEMP) {
*val = 25;
return IIO_VAL_INT;
}
addrind = 1;
break;
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ case IIO_CHAN_INFO_PEAK:
addrind = 2;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
switch (chan->type) {
case IIO_TEMP:
@@ -575,27 +575,27 @@ static const struct iio_chan_spec adis16220_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
}, {
.type = IIO_ACCEL,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_PEAK_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_PEAK_SEPARATE_BIT,
.address = accel,
}, {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
}, {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_1,
}, {
.type = IIO_VOLTAGE,
@@ -708,19 +708,9 @@ static struct spi_driver adis16220_driver = {
.probe = adis16220_probe,
.remove = __devexit_p(adis16220_remove),
};
-
-static __init int adis16220_init(void)
-{
- return spi_register_driver(&adis16220_driver);
-}
-module_init(adis16220_init);
-
-static __exit void adis16220_exit(void)
-{
- spi_unregister_driver(&adis16220_driver);
-}
-module_exit(adis16220_exit);
+module_spi_driver(adis16220_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index b8be292..17f77fe 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -21,7 +21,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16240.h"
@@ -389,8 +389,7 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
*val = 0;
@@ -411,14 +410,14 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_PEAK_SCALE_SHARED):
+ case IIO_CHAN_INFO_PEAK_SCALE:
*val = 6;
*val2 = 629295;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
bits = 10;
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][1];
@@ -432,7 +431,7 @@ static int adis16240_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_PEAK_SEPARATE):
+ case IIO_CHAN_INFO_PEAK:
bits = 10;
mutex_lock(&indio_dev->mlock);
addr = adis16240_addresses[chan->address][2];
@@ -460,7 +459,7 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
val16 = val & ((1 << bits) - 1);
addr = adis16240_addresses[chan->address][1];
return adis16240_spi_write_reg_16(indio_dev, addr, val16);
@@ -470,7 +469,7 @@ static int adis16240_write_raw(struct iio_dev *indio_dev,
static struct iio_chan_spec adis16240_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
in_supply, ADIS16240_SCAN_SUPPLY,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
@@ -478,22 +477,22 @@ static struct iio_chan_spec adis16240_channels[] = {
in_aux, ADIS16240_SCAN_AUX_ADC,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_X,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_x, ADIS16240_SCAN_ACC_X,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Y,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_y, ADIS16240_SCAN_ACC_Y,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_ACCEL, 1, 0, 0, NULL, 0, IIO_MOD_Z,
- (1 << IIO_CHAN_INFO_SCALE_SHARED) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
accel_z, ADIS16240_SCAN_ACC_Z,
IIO_ST('s', 10, 16, 0), 0),
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
temp, ADIS16240_SCAN_TEMP,
IIO_ST('u', 10, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(6)
@@ -606,19 +605,9 @@ static struct spi_driver adis16240_driver = {
.probe = adis16240_probe,
.remove = __devexit_p(adis16240_remove),
};
-
-static __init int adis16240_init(void)
-{
- return spi_register_driver(&adis16240_driver);
-}
-module_init(adis16240_init);
-
-static __exit void adis16240_exit(void)
-{
- spi_unregister_driver(&adis16240_driver);
-}
-module_exit(adis16240_exit);
+module_spi_driver(adis16240_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices Programmable Impact Sensor and Recorder");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16240");
diff --git a/drivers/staging/iio/accel/adis16240_ring.c b/drivers/staging/iio/accel/adis16240_ring.c
index b907ca3..e23622d 100644
--- a/drivers/staging/iio/accel/adis16240_ring.c
+++ b/drivers/staging/iio/accel/adis16240_ring.c
@@ -69,9 +69,10 @@ static irqreturn_t adis16240_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16240_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -111,10 +112,8 @@ int adis16240_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16240_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16240_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16240_trigger_handler,
diff --git a/drivers/staging/iio/accel/kxsd9.c b/drivers/staging/iio/accel/kxsd9.c
index 5238503..d13d721 100644
--- a/drivers/staging/iio/accel/kxsd9.c
+++ b/drivers/staging/iio/accel/kxsd9.c
@@ -140,7 +140,7 @@ static int kxsd9_write_raw(struct iio_dev *indio_dev,
{
int ret = -EINVAL;
- if (mask == (1 << IIO_CHAN_INFO_SCALE_SHARED)) {
+ if (mask == IIO_CHAN_INFO_SCALE) {
/* Check no integer component */
if (val)
return -EINVAL;
@@ -164,7 +164,7 @@ static int kxsd9_read_raw(struct iio_dev *indio_dev,
goto error_ret;
*val = ret;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = spi_w8r8(st->us, KXSD9_READ(KXSD9_REG_CTRL_C));
if (ret)
goto error_ret;
@@ -181,7 +181,7 @@ error_ret:
.type = IIO_ACCEL, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = 1 << IIO_CHAN_INFO_SCALE_SHARED, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = KXSD9_REG_##axis, \
}
@@ -268,8 +268,10 @@ static int __devexit kxsd9_remove(struct spi_device *spi)
}
static const struct spi_device_id kxsd9_id[] = {
- {"kxsd9", 0}
+ {"kxsd9", 0},
+ { },
};
+MODULE_DEVICE_TABLE(spi, kxsd9_id);
static struct spi_driver kxsd9_driver = {
.driver = {
@@ -280,18 +282,7 @@ static struct spi_driver kxsd9_driver = {
.remove = __devexit_p(kxsd9_remove),
.id_table = kxsd9_id,
};
-
-static __init int kxsd9_spi_init(void)
-{
- return spi_register_driver(&kxsd9_driver);
-}
-module_init(kxsd9_spi_init);
-
-static __exit void kxsd9_spi_exit(void)
-{
- spi_unregister_driver(&kxsd9_driver);
-}
-module_exit(kxsd9_spi_exit);
+module_spi_driver(kxsd9_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Kionix KXSD9 SPI driver");
diff --git a/drivers/staging/iio/accel/lis3l02dq.h b/drivers/staging/iio/accel/lis3l02dq.h
index 7237a9a..2db383f 100644
--- a/drivers/staging/iio/accel/lis3l02dq.h
+++ b/drivers/staging/iio/accel/lis3l02dq.h
@@ -181,11 +181,6 @@ int lis3l02dq_disable_all_events(struct iio_dev *indio_dev);
void lis3l02dq_remove_trigger(struct iio_dev *indio_dev);
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev);
-ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val);
-
-
int lis3l02dq_configure_buffer(struct iio_dev *indio_dev);
void lis3l02dq_unconfigure_buffer(struct iio_dev *indio_dev);
@@ -212,13 +207,6 @@ static inline int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
return 0;
}
-static inline ssize_t
-lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val)
-{
- return 0;
-}
static int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
{
diff --git a/drivers/staging/iio/accel/lis3l02dq_core.c b/drivers/staging/iio/accel/lis3l02dq_core.c
index 559545a..376da51 100644
--- a/drivers/staging/iio/accel/lis3l02dq_core.c
+++ b/drivers/staging/iio/accel/lis3l02dq_core.c
@@ -25,7 +25,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "lis3l02dq.h"
@@ -226,14 +227,14 @@ static int lis3l02dq_write_raw(struct iio_dev *indio_dev,
u8 uval;
s8 sval;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if (val > 255 || val < -256)
return -EINVAL;
sval = val;
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_write_reg_8(indio_dev, reg, sval);
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val & ~0xFF)
return -EINVAL;
uval = val;
@@ -259,23 +260,20 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
case 0:
/* Take the iio_dev status lock */
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED)
- ret = lis3l02dq_read_accel_from_buffer(indio_dev->
- buffer,
- chan->scan_index,
- val);
- else {
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
+ } else {
reg = lis3l02dq_axis_map
[LIS3L02DQ_ACCEL][chan->address];
ret = lis3l02dq_read_reg_s16(indio_dev, reg, val);
}
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 9580;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
reg = lis3l02dq_axis_map[LIS3L02DQ_GAIN][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, &utemp);
if (ret)
@@ -284,7 +282,7 @@ static int lis3l02dq_read_raw(struct iio_dev *indio_dev,
*val = utemp;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
reg = lis3l02dq_axis_map[LIS3L02DQ_BIAS][chan->address];
ret = lis3l02dq_spi_read_reg_8(indio_dev, reg, (u8 *)&stemp);
/* to match with what previous code does */
@@ -331,11 +329,11 @@ static ssize_t lis3l02dq_write_frequency(struct device *dev,
size_t len)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- long val;
+ unsigned long val;
int ret;
u8 t;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -515,9 +513,9 @@ static irqreturn_t lis3l02dq_event_handler(int irq, void *private)
}
#define LIS3L02DQ_INFO_MASK \
- ((1 << IIO_CHAN_INFO_SCALE_SHARED) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE))
+ (IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT)
#define LIS3L02DQ_EVENT_MASK \
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) | \
@@ -534,7 +532,7 @@ static struct iio_chan_spec lis3l02dq_channels[] = {
};
-static ssize_t lis3l02dq_read_event_config(struct iio_dev *indio_dev,
+static int lis3l02dq_read_event_config(struct iio_dev *indio_dev,
u64 event_code)
{
@@ -804,19 +802,9 @@ static struct spi_driver lis3l02dq_driver = {
.probe = lis3l02dq_probe,
.remove = __devexit_p(lis3l02dq_remove),
};
-
-static __init int lis3l02dq_init(void)
-{
- return spi_register_driver(&lis3l02dq_driver);
-}
-module_init(lis3l02dq_init);
-
-static __exit void lis3l02dq_exit(void)
-{
- spi_unregister_driver(&lis3l02dq_driver);
-}
-module_exit(lis3l02dq_exit);
+module_spi_driver(lis3l02dq_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("ST LIS3L02DQ Accelerometer SPI driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:lis3l02dq");
diff --git a/drivers/staging/iio/accel/lis3l02dq_ring.c b/drivers/staging/iio/accel/lis3l02dq_ring.c
index 89527af..98c5c92 100644
--- a/drivers/staging/iio/accel/lis3l02dq_ring.c
+++ b/drivers/staging/iio/accel/lis3l02dq_ring.c
@@ -38,38 +38,6 @@ irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
return IRQ_WAKE_THREAD;
}
-/**
- * lis3l02dq_read_accel_from_buffer() individual acceleration read from buffer
- **/
-ssize_t lis3l02dq_read_accel_from_buffer(struct iio_buffer *buffer,
- int index,
- int *val)
-{
- int ret;
- s16 *data;
-
- if (!iio_scan_mask_query(buffer, index))
- return -EINVAL;
-
- if (!buffer->access->read_last)
- return -EBUSY;
-
- data = kmalloc(buffer->access->get_bytes_per_datum(buffer),
- GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- ret = buffer->access->read_last(buffer, (u8 *)data);
- if (ret)
- goto error_free_data;
- *val = data[bitmap_weight(buffer->scan_mask, index)];
-error_free_data:
-
- kfree(data);
-
- return ret;
-}
-
static const u8 read_all_tx_array[] = {
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
@@ -87,21 +55,21 @@ static const u8 read_all_tx_array[] = {
**/
static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
{
- struct iio_buffer *buffer = indio_dev->buffer;
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer *xfers;
struct spi_message msg;
int ret, i, j = 0;
- xfers = kzalloc((buffer->scan_count) * 2
- * sizeof(*xfers), GFP_KERNEL);
+ xfers = kcalloc(bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2,
+ sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
- if (test_bit(i, buffer->scan_mask)) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
/* lower byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4];
@@ -129,7 +97,8 @@ static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
* values in alternate bytes
*/
spi_message_init(&msg);
- for (j = 0; j < buffer->scan_count * 2; j++)
+ for (j = 0; j < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -145,14 +114,16 @@ static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev,
int ret, i;
u8 *rx_array ;
s16 *data = (s16 *)buf;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- rx_array = kzalloc(4 * (indio_dev->buffer->scan_count), GFP_KERNEL);
+ rx_array = kzalloc(4 * scan_count, GFP_KERNEL);
if (rx_array == NULL)
return -ENOMEM;
ret = lis3l02dq_read_all(indio_dev, rx_array);
if (ret < 0)
return ret;
- for (i = 0; i < indio_dev->buffer->scan_count; i++)
+ for (i = 0; i < scan_count; i++)
data[i] = combine_8_to_16(rx_array[i*4+1],
rx_array[i*4+3]);
kfree(rx_array);
@@ -175,7 +146,7 @@ static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (buffer->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
len = lis3l02dq_get_buffer_element(indio_dev, data);
/* Guaranteed to be aligned with 8 byte boundary */
@@ -363,17 +334,17 @@ static int lis3l02dq_buffer_postenable(struct iio_dev *indio_dev)
if (ret)
goto error_ret;
- if (iio_scan_mask_query(indio_dev->buffer, 0)) {
+ if (test_bit(0, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
- if (iio_scan_mask_query(indio_dev->buffer, 1)) {
+ if (test_bit(1, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
- if (iio_scan_mask_query(indio_dev->buffer, 2)) {
+ if (test_bit(2, indio_dev->active_scan_mask)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
oneenabled = true;
} else
@@ -437,11 +408,9 @@ int lis3l02dq_configure_buffer(struct iio_dev *indio_dev)
indio_dev->buffer = buffer;
/* Effectively select the buffer implementation */
indio_dev->buffer->access = &lis3l02dq_access_funcs;
- buffer->bpe = 2;
buffer->scan_timestamp = true;
- buffer->setup_ops = &lis3l02dq_buffer_setup_ops;
- buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &lis3l02dq_buffer_setup_ops;
/* Functions are NULL as we set handler below */
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
diff --git a/drivers/staging/iio/accel/sca3000_core.c b/drivers/staging/iio/accel/sca3000_core.c
index a44a705..49764fb 100644
--- a/drivers/staging/iio/accel/sca3000_core.c
+++ b/drivers/staging/iio/accel/sca3000_core.c
@@ -20,7 +20,8 @@
#include <linux/module.h>
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "sca3000.h"
@@ -381,13 +382,17 @@ sca3000_store_measurement_mode(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
int ret;
- int mask = 0x03;
- long val;
+ u8 mask = 0x03;
+ u8 val;
mutex_lock(&st->lock);
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtou8(buf, 10, &val);
if (ret)
goto error_ret;
+ if (val > 3) {
+ ret = -EINVAL;
+ goto error_ret;
+ }
ret = sca3000_read_data_short(st, SCA3000_REG_ADDR_MODE, 1);
if (ret)
goto error_ret;
@@ -424,7 +429,7 @@ static IIO_DEVICE_ATTR(measurement_mode, S_IRUGO | S_IWUSR,
static IIO_DEVICE_ATTR(revision, S_IRUGO, sca3000_show_rev, NULL, 0);
#define SCA3000_INFO_MASK \
- (1 << IIO_CHAN_INFO_SCALE_SHARED)
+ IIO_CHAN_INFO_SCALE_SHARED_BIT
#define SCA3000_EVENT_MASK \
(IIO_EV_BIT(IIO_EV_TYPE_MAG, IIO_EV_DIR_RISING))
@@ -474,7 +479,7 @@ static int sca3000_read_raw(struct iio_dev *indio_dev,
(sizeof(*val)*8 - 13);
mutex_unlock(&st->lock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
if (chan->type == IIO_ACCEL)
*val2 = st->info->scale;
@@ -1161,9 +1166,9 @@ static int __devinit sca3000_probe(struct spi_device *spi)
if (ret < 0)
goto error_unregister_dev;
if (indio_dev->buffer) {
- iio_scan_mask_set(indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev->buffer, 1);
- iio_scan_mask_set(indio_dev->buffer, 2);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 2);
}
if (spi->irq) {
@@ -1240,6 +1245,7 @@ static const struct spi_device_id sca3000_id[] = {
{"sca3000_e05", e05},
{}
};
+MODULE_DEVICE_TABLE(spi, sca3000_id);
static struct spi_driver sca3000_driver = {
.driver = {
@@ -1250,18 +1256,7 @@ static struct spi_driver sca3000_driver = {
.remove = __devexit_p(sca3000_remove),
.id_table = sca3000_id,
};
-
-static __init int sca3000_init(void)
-{
- return spi_register_driver(&sca3000_driver);
-}
-module_init(sca3000_init);
-
-static __exit void sca3000_exit(void)
-{
- spi_unregister_driver(&sca3000_driver);
-}
-module_exit(sca3000_exit);
+module_spi_driver(sca3000_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("VTI SCA3000 Series Accelerometers SPI driver");
diff --git a/drivers/staging/iio/accel/sca3000_ring.c b/drivers/staging/iio/accel/sca3000_ring.c
index 4a9a01d..6b824a1 100644
--- a/drivers/staging/iio/accel/sca3000_ring.c
+++ b/drivers/staging/iio/accel/sca3000_ring.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_hw.h"
#include "sca3000.h"
@@ -146,7 +146,6 @@ static int sca3000_ring_get_bytes_per_datum(struct iio_buffer *r)
}
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
/**
@@ -158,8 +157,7 @@ static ssize_t sca3000_query_ring_int(struct device *dev,
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret, val;
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
mutex_lock(&st->lock);
@@ -180,8 +178,7 @@ static ssize_t sca3000_set_ring_int(struct device *dev,
const char *buf,
size_t len)
{
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
long val;
@@ -222,8 +219,7 @@ static ssize_t sca3000_show_buffer_scale(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct iio_buffer *ring = dev_get_drvdata(dev);
- struct iio_dev *indio_dev = ring->indio_dev;
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct sca3000_state *st = iio_priv(indio_dev);
return sprintf(buf, "0.%06d\n", 4*st->info->scale);
@@ -243,7 +239,6 @@ static IIO_DEVICE_ATTR(in_accel_scale,
*/
static struct attribute *sca3000_ring_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
&iio_dev_attr_50_percent.dev_attr.attr,
&iio_dev_attr_75_percent.dev_attr.attr,
@@ -269,7 +264,7 @@ static struct iio_buffer *sca3000_rb_allocate(struct iio_dev *indio_dev)
buf = &ring->buf;
buf->stufftoread = 0;
buf->attrs = &sca3000_ring_attr;
- iio_buffer_init(buf, indio_dev);
+ iio_buffer_init(buf);
return buf;
}
@@ -350,7 +345,7 @@ static const struct iio_buffer_setup_ops sca3000_ring_setup_ops = {
void sca3000_register_ring_funcs(struct iio_dev *indio_dev)
{
- indio_dev->buffer->setup_ops = &sca3000_ring_setup_ops;
+ indio_dev->setup_ops = &sca3000_ring_setup_ops;
}
/**
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index 31c376b..45f4504 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -19,7 +19,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger.h"
#include "../trigger_consumer.h"
@@ -453,25 +453,6 @@ out:
return ret;
}
-static int ad7192_scan_from_ring(struct ad7192_state *st, unsigned ch, int *val)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int ret;
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!(test_bit(ch, ring->scan_mask)))
- return -EBUSY;
-
- ret = ring->access->read_last(ring, (u8 *) &dat64);
- if (ret)
- return ret;
-
- *val = *dat32;
-
- return 0;
-}
-
static int ad7192_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
@@ -479,12 +460,14 @@ static int ad7192_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- d_size = ring->scan_count *
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
indio_dev->channels[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -544,7 +527,7 @@ static irqreturn_t ad7192_trigger_handler(int irq, void *p)
s64 dat64[2];
s32 *dat32 = (s32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
__ad7192_read_reg(st, 1, 1, AD7192_REG_DATA,
dat32,
indio_dev->channels[0].scan_type.realbits/8);
@@ -592,7 +575,7 @@ static int ad7192_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7192_ring_setup_ops;
+ indio_dev->setup_ops = &ad7192_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
@@ -626,6 +609,10 @@ static irqreturn_t ad7192_data_rdy_trig_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static struct iio_trigger_ops ad7192_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int ad7192_probe_trigger(struct iio_dev *indio_dev)
{
struct ad7192_state *st = iio_priv(indio_dev);
@@ -638,7 +625,7 @@ static int ad7192_probe_trigger(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
-
+ st->trig->ops = &ad7192_trigger_ops;
ret = request_irq(st->spi->irq,
ad7192_data_rdy_trig_poll,
IRQF_TRIGGER_LOW,
@@ -650,7 +637,6 @@ static int ad7192_probe_trigger(struct iio_dev *indio_dev)
disable_irq_nosync(st->spi->irq);
st->irq_dis = true;
st->trig->dev.parent = &st->spi->dev;
- st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
ret = iio_trigger_register(st->trig);
@@ -795,7 +781,7 @@ static ssize_t ad7192_set(struct device *dev,
return -EBUSY;
}
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7192_REG_GPOCON:
if (val)
st->gpocon |= AD7192_GPOCON_BPDSW;
@@ -838,14 +824,14 @@ static struct attribute *ad7192_attributes[] = {
NULL
};
-static mode_t ad7192_attr_is_visible(struct kobject *kobj,
+static umode_t ad7192_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7192_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if ((st->devid != ID_AD7195) &&
(attr == &iio_dev_attr_ac_excitation_en.dev_attr.attr))
@@ -873,8 +859,7 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7192_scan_from_ring(st,
- chan->scan_index, &smpl);
+ ret = -EBUSY;
else
ret = ad7192_read(st, chan->address,
chan->scan_type.realbits / 8, &smpl);
@@ -901,18 +886,20 @@ static int ad7192_read_raw(struct iio_dev *indio_dev,
}
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- mutex_lock(&indio_dev->mlock);
- *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
- *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
- mutex_unlock(&indio_dev->mlock);
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- *val = 1000;
-
- return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ mutex_lock(&indio_dev->mlock);
+ *val = st->scale_avail[AD7192_CONF_GAIN(st->conf)][0];
+ *val2 = st->scale_avail[AD7192_CONF_GAIN(st->conf)][1];
+ mutex_unlock(&indio_dev->mlock);
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = 1000;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
@@ -935,7 +922,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
}
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
@@ -992,7 +979,7 @@ static const struct iio_info ad7192_info = {
.extend_name = _name, \
.channel = _chan, \
.channel2 = _chan2, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1001,7 +988,7 @@ static const struct iio_info ad7192_info = {
{ .type = IIO_VOLTAGE, \
.indexed = 1, \
.channel = _chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1010,7 +997,7 @@ static const struct iio_info ad7192_info = {
{ .type = IIO_TEMP, \
.indexed = 1, \
.channel = _chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = _address, \
.scan_index = _si, \
.scan_type = IIO_ST('s', 24, 32, 0)}
@@ -1151,6 +1138,7 @@ static const struct spi_device_id ad7192_id[] = {
{"ad7195", ID_AD7195},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7192_id);
static struct spi_driver ad7192_driver = {
.driver = {
@@ -1161,18 +1149,7 @@ static struct spi_driver ad7192_driver = {
.remove = __devexit_p(ad7192_remove),
.id_table = ad7192_id,
};
-
-static int __init ad7192_init(void)
-{
- return spi_register_driver(&ad7192_driver);
-}
-module_init(ad7192_init);
-
-static void __exit ad7192_exit(void)
-{
- spi_unregister_driver(&ad7192_driver);
-}
-module_exit(ad7192_exit);
+module_spi_driver(ad7192_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7190, AD7192, AD7195 ADC");
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 372d059..7dbd681 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -18,6 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "ad7280a.h"
@@ -455,10 +456,10 @@ static ssize_t ad7280_store_balance_timer(struct device *dev,
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long val;
+ unsigned long val;
int ret;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret)
return ret;
@@ -487,8 +488,8 @@ static int ad7280_channel_init(struct ad7280_state *st)
{
int dev, ch, cnt;
- st->channels = kzalloc(sizeof(*st->channels) *
- ((st->slave_num + 1) * 12 + 2), GFP_KERNEL);
+ st->channels = kcalloc((st->slave_num + 1) * 12 + 2,
+ sizeof(*st->channels), GFP_KERNEL);
if (st->channels == NULL)
return -ENOMEM;
@@ -507,7 +508,7 @@ static int ad7280_channel_init(struct ad7280_state *st)
}
st->channels[cnt].indexed = 1;
st->channels[cnt].info_mask =
- (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ IIO_CHAN_INFO_SCALE_SHARED_BIT;
st->channels[cnt].address =
AD7280A_DEVADDR(dev) << 8 | ch;
st->channels[cnt].scan_index = cnt;
@@ -523,7 +524,7 @@ static int ad7280_channel_init(struct ad7280_state *st)
st->channels[cnt].channel2 = dev * 6;
st->channels[cnt].address = AD7280A_ALL_CELLS;
st->channels[cnt].indexed = 1;
- st->channels[cnt].info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED);
+ st->channels[cnt].info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT;
st->channels[cnt].scan_index = cnt;
st->channels[cnt].scan_type.sign = 'u';
st->channels[cnt].scan_type.realbits = 32;
@@ -600,7 +601,7 @@ static ssize_t ad7280_read_channel_config(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
unsigned val;
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
val = 1000 + (st->cell_threshhigh * 1568) / 100;
break;
@@ -636,7 +637,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
if (ret)
return ret;
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
case AD7280A_CELL_UNDERVOLTAGE:
val = ((val - 1000) * 100) / 1568; /* LSB 15.68mV */
@@ -652,7 +653,7 @@ static ssize_t ad7280_write_channel_config(struct device *dev,
val = clamp(val, 0L, 0xFFL);
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
st->cell_threshhigh = val;
break;
@@ -682,13 +683,13 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
unsigned *channels;
int i, ret;
- channels = kzalloc(sizeof(*channels) * st->scan_cnt, GFP_KERNEL);
+ channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
if (channels == NULL)
return IRQ_HANDLED;
ret = ad7280_read_all_channels(st, st->scan_cnt, channels);
if (ret < 0)
- return IRQ_HANDLED;
+ goto out;
for (i = 0; i < st->scan_cnt; i++) {
if (((channels[i] >> 23) & 0xF) <= AD7280A_CELL_VOLTAGE_6) {
@@ -731,6 +732,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
}
}
+out:
kfree(channels);
return IRQ_HANDLED;
@@ -801,7 +803,7 @@ static int ad7280_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
if ((chan->address & 0xFF) <= AD7280A_CELL_VOLTAGE_6)
scale_uv = (4000 * 1000) >> AD7280A_BITS;
else
@@ -968,29 +970,18 @@ static const struct spi_device_id ad7280_id[] = {
{"ad7280a", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7280_id);
static struct spi_driver ad7280_driver = {
.driver = {
.name = "ad7280",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7280_probe,
.remove = __devexit_p(ad7280_remove),
.id_table = ad7280_id,
};
-
-static int __init ad7280_init(void)
-{
- return spi_register_driver(&ad7280_driver);
-}
-module_init(ad7280_init);
-
-static void __exit ad7280_exit(void)
-{
- spi_unregister_driver(&ad7280_driver);
-}
-module_exit(ad7280_exit);
+module_spi_driver(ad7280_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7280A");
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 10e79e8..0a13616 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -19,6 +19,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* Simplified handling
@@ -500,7 +501,7 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_data(chip->client,
AD7291_T_AVERAGE);
if (ret < 0)
@@ -509,18 +510,24 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
AD7291_VALUE_MASK) << 4) >> 4;
*val = signval;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- /*
- * One LSB of the ADC corresponds to 0.25 deg C.
- * The temperature reading is in 12-bit twos complement format
- */
- *val = 250;
- return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ scale_uv = (chip->int_vref_mv * 1000) >> AD7291_BITS;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ /*
+ * One LSB of the ADC corresponds to 0.25 deg C.
+ * The temperature reading is in 12-bit twos
+ * complement format
+ */
+ *val = 250;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
default:
return -EINVAL;
}
@@ -529,7 +536,7 @@ static int ad7291_read_raw(struct iio_dev *indio_dev,
#define AD7291_VOLTAGE_CHAN(_chan) \
{ \
.type = IIO_VOLTAGE, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.indexed = 1, \
.channel = _chan, \
.event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING)|\
@@ -547,8 +554,8 @@ static const struct iio_chan_spec ad7291_channels[] = {
AD7291_VOLTAGE_CHAN(7),
{
.type = IIO_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.indexed = 1,
.channel = 0,
.event_mask =
@@ -700,20 +707,8 @@ static struct i2c_driver ad7291_driver = {
.remove = __devexit_p(ad7291_remove),
.id_table = ad7291_id,
};
-
-static __init int ad7291_init(void)
-{
- return i2c_add_driver(&ad7291_driver);
-}
-
-static __exit void ad7291_exit(void)
-{
- i2c_del_driver(&ad7291_driver);
-}
+module_i2c_driver(ad7291_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7291 ADC driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7291_init);
-module_exit(ad7291_exit);
diff --git a/drivers/staging/iio/adc/ad7298.h b/drivers/staging/iio/adc/ad7298.h
index 9ddf5cf..a0e5dea 100644
--- a/drivers/staging/iio/adc/ad7298.h
+++ b/drivers/staging/iio/adc/ad7298.h
@@ -54,14 +54,9 @@ struct ad7298_state {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch);
int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7298_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
-{
- return 0;
-}
static inline int
ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7298_core.c b/drivers/staging/iio/adc/ad7298_core.c
index c1de73a..8dd6aa9 100644
--- a/drivers/staging/iio/adc/ad7298_core.c
+++ b/drivers/staging/iio/adc/ad7298_core.c
@@ -18,37 +18,37 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7298.h"
static struct iio_chan_spec ad7298_channels[] = {
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
9, AD7298_CH_TEMP, IIO_ST('s', 32, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
1, 1, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
2, 2, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 3, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
3, 3, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 4, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
4, 4, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 5, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
5, 5, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 6, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
6, 6, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 7, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
7, 7, IIO_ST('u', 12, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(8),
};
@@ -69,27 +69,28 @@ static int ad7298_scan_direct(struct ad7298_state *st, unsigned ch)
static int ad7298_scan_temp(struct ad7298_state *st, int *val)
{
int tmp, ret;
+ __be16 buf;
- tmp = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
+ buf = cpu_to_be16(AD7298_WRITE | AD7298_TSENSE |
AD7298_TAVG | st->ext_ref);
- ret = spi_write(st->spi, (u8 *)&tmp, 2);
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
- tmp = 0;
+ buf = cpu_to_be16(0);
- ret = spi_write(st->spi, (u8 *)&tmp, 2);
+ ret = spi_write(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
usleep_range(101, 1000); /* sleep > 100us */
- ret = spi_read(st->spi, (u8 *)&tmp, 2);
+ ret = spi_read(st->spi, (u8 *)&buf, 2);
if (ret)
return ret;
- tmp = be16_to_cpu(tmp) & RES_MASK(AD7298_BITS);
+ tmp = be16_to_cpu(buf) & RES_MASK(AD7298_BITS);
/*
* One LSB of the ADC corresponds to 0.25 deg C.
@@ -122,12 +123,8 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
switch (m) {
case 0:
mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- if (chan->address == AD7298_CH_TEMP)
- ret = -ENODEV;
- else
- ret = ad7298_scan_from_ring(indio_dev,
- chan->address);
+ if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ ret = -EBUSY;
} else {
if (chan->address == AD7298_CH_TEMP)
ret = ad7298_scan_temp(st, val);
@@ -143,15 +140,20 @@ static int ad7298_read_raw(struct iio_dev *indio_dev,
*val = ret & RES_MASK(AD7298_BITS);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
- *val = scale_uv / 1000;
- *val2 = (scale_uv % 1000) * 1000;
- return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- *val = 1;
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_VOLTAGE:
+ scale_uv = (st->int_vref_mv * 1000) >> AD7298_BITS;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 1;
+ *val2 = 0;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
}
return -EINVAL;
}
@@ -265,31 +267,19 @@ static const struct spi_device_id ad7298_id[] = {
{"ad7298", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7298_id);
static struct spi_driver ad7298_driver = {
.driver = {
.name = "ad7298",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7298_probe,
.remove = __devexit_p(ad7298_remove),
.id_table = ad7298_id,
};
-
-static int __init ad7298_init(void)
-{
- return spi_register_driver(&ad7298_driver);
-}
-module_init(ad7298_init);
-
-static void __exit ad7298_exit(void)
-{
- spi_unregister_driver(&ad7298_driver);
-}
-module_exit(ad7298_exit);
+module_spi_driver(ad7298_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7298 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7298");
diff --git a/drivers/staging/iio/adc/ad7298_ring.c b/drivers/staging/iio/adc/ad7298_ring.c
index 47630d5..d1a12dd 100644
--- a/drivers/staging/iio/adc/ad7298_ring.c
+++ b/drivers/staging/iio/adc/ad7298_ring.c
@@ -12,41 +12,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7298.h"
-int ad7298_scan_from_ring(struct iio_dev *indio_dev, long ch)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u16 *ring_data;
-
- if (!(test_bit(ch, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = be16_to_cpu(ring_data[ch]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7298_ring_preenable() setup the parameters of the ring before enabling
*
@@ -61,8 +32,9 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
int i, m;
unsigned short command;
-
- d_size = ring->scan_count * (AD7298_STORAGE_BITS / 8);
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ d_size = scan_count * (AD7298_STORAGE_BITS / 8);
if (ring->scan_timestamp) {
d_size += sizeof(s64);
@@ -79,7 +51,7 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
command = AD7298_WRITE | st->ext_ref;
for (i = 0, m = AD7298_CH(0); i < AD7298_MAX_CHAN; i++, m >>= 1)
- if (test_bit(i, ring->scan_mask))
+ if (test_bit(i, indio_dev->active_scan_mask))
command |= m;
st->tx_buf[0] = cpu_to_be16(command);
@@ -96,7 +68,7 @@ static int ad7298_ring_preenable(struct iio_dev *indio_dev)
spi_message_add_tail(&st->ring_xfer[0], &st->ring_msg);
spi_message_add_tail(&st->ring_xfer[1], &st->ring_msg);
- for (i = 0; i < ring->scan_count; i++) {
+ for (i = 0; i < scan_count; i++) {
st->ring_xfer[i + 2].rx_buf = &st->rx_buf[i];
st->ring_xfer[i + 2].len = 2;
st->ring_xfer[i + 2].cs_change = 1;
@@ -134,7 +106,8 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p)
&time_ns, sizeof(time_ns));
}
- for (i = 0; i < ring->scan_count; i++)
+ for (i = 0; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
buf[i] = be16_to_cpu(st->rx_buf[i]);
indio_dev->buffer->access->store_to(ring, (u8 *)buf, time_ns);
@@ -174,7 +147,7 @@ int ad7298_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7298_ring_setup_ops;
+ indio_dev->setup_ops = &ad7298_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/ad7476.h b/drivers/staging/iio/adc/ad7476.h
index 6014292..27f696c 100644
--- a/drivers/staging/iio/adc/ad7476.h
+++ b/drivers/staging/iio/adc/ad7476.h
@@ -50,14 +50,9 @@ enum ad7476_supported_device_ids {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7476_scan_from_ring(struct iio_dev *indio_dev);
int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7476_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7476_scan_from_ring(struct iio_dev *indio_dev)
-{
- return 0;
-}
static inline int
ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7476_core.c b/drivers/staging/iio/adc/ad7476_core.c
index fd79fac..0c064d1 100644
--- a/drivers/staging/iio/adc/ad7476_core.c
+++ b/drivers/staging/iio/adc/ad7476_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7476.h"
@@ -46,7 +46,7 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7476_scan_from_ring(indio_dev);
+ ret = -EBUSY;
else
ret = ad7476_scan_direct(st);
mutex_unlock(&indio_dev->mlock);
@@ -56,7 +56,7 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
*val = (ret >> st->chip_info->channel[0].scan_type.shift) &
RES_MASK(st->chip_info->channel[0].scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000)
>> st->chip_info->channel[0].scan_type.realbits;
*val = scale_uv/1000;
@@ -69,49 +69,49 @@ static int ad7476_read_raw(struct iio_dev *indio_dev,
static const struct ad7476_chip_info ad7476_chip_info_tbl[] = {
[ID_AD7466] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7467] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7468] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1 , 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7475] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7476] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7477] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 10, 16, 2), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7478] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 8, 16, 4), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
},
[ID_AD7495] = {
.channel[0] = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('u', 12, 16, 0), 0),
.channel[1] = IIO_CHAN_SOFT_TIMESTAMP(1),
.int_vref_mv = 2500,
@@ -237,31 +237,19 @@ static const struct spi_device_id ad7476_id[] = {
{"ad7495", ID_AD7495},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7476_id);
static struct spi_driver ad7476_driver = {
.driver = {
.name = "ad7476",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7476_probe,
.remove = __devexit_p(ad7476_remove),
.id_table = ad7476_id,
};
-
-static int __init ad7476_init(void)
-{
- return spi_register_driver(&ad7476_driver);
-}
-module_init(ad7476_init);
-
-static void __exit ad7476_exit(void)
-{
- spi_unregister_driver(&ad7476_driver);
-}
-module_exit(ad7476_exit);
+module_spi_driver(ad7476_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7475/6/7/8(A) AD7466/7/8 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7476");
diff --git a/drivers/staging/iio/adc/ad7476_ring.c b/drivers/staging/iio/adc/ad7476_ring.c
index e82c1a4..4e298b2 100644
--- a/drivers/staging/iio/adc/ad7476_ring.c
+++ b/drivers/staging/iio/adc/ad7476_ring.c
@@ -14,36 +14,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7476.h"
-int ad7476_scan_from_ring(struct iio_dev *indio_dev)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u8 *ring_data;
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = (ring_data[0] << 8) | ring_data[1];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7476_ring_preenable() setup the parameters of the ring before enabling
*
@@ -56,7 +32,8 @@ static int ad7476_ring_preenable(struct iio_dev *indio_dev)
struct ad7476_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
- st->d_size = ring->scan_count *
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -137,7 +114,7 @@ int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7476_ring_setup_ops;
+ indio_dev->setup_ops = &ad7476_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index 35018c3..10f5989 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -99,7 +99,6 @@ enum ad7606_supported_device_ids {
ID_AD7606_4
};
-int ad7606_scan_from_ring(struct iio_dev *indio_dev, unsigned ch);
int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7606_ring_cleanup(struct iio_dev *indio_dev);
#endif /* IIO_ADC_AD7606_H_ */
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 54423ab..ddb7ef9 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7606.h"
@@ -91,7 +91,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7606_scan_from_ring(indio_dev, chan->address);
+ ret = -EBUSY;
else
ret = ad7606_scan_direct(indio_dev, chan->address);
mutex_unlock(&indio_dev->mlock);
@@ -100,7 +100,7 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
return ret;
*val = (short) ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->range * 1000 * 2)
>> st->chip_info->channels[0].scan_type.realbits;
*val = scale_uv / 1000;
@@ -205,14 +205,14 @@ static struct attribute *ad7606_attributes[] = {
NULL,
};
-static mode_t ad7606_attr_is_visible(struct kobject *kobj,
+static umode_t ad7606_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (!(gpio_is_valid(st->pdata->gpio_os0) &&
gpio_is_valid(st->pdata->gpio_os1) &&
diff --git a/drivers/staging/iio/adc/ad7606_par.c b/drivers/staging/iio/adc/ad7606_par.c
index 688632e..cff9756 100644
--- a/drivers/staging/iio/adc/ad7606_par.c
+++ b/drivers/staging/iio/adc/ad7606_par.c
@@ -189,4 +189,3 @@ module_exit(ad7606_cleanup);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:ad7606_par");
diff --git a/drivers/staging/iio/adc/ad7606_ring.c b/drivers/staging/iio/adc/ad7606_ring.c
index 20927fd..e8f94a1 100644
--- a/drivers/staging/iio/adc/ad7606_ring.c
+++ b/drivers/staging/iio/adc/ad7606_ring.c
@@ -12,36 +12,12 @@
#include <linux/slab.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7606.h"
-int ad7606_scan_from_ring(struct iio_dev *indio_dev, unsigned ch)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int ret;
- u16 *ring_data;
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- ret = ring_data[ch];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7606_trigger_handler_th() th/bh of trigger launched polling to ring buffer
*
@@ -136,8 +112,6 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
- indio_dev->buffer->bpe =
- st->chip_info->channels[0].scan_type.storagebits / 8;
indio_dev->pollfunc = iio_alloc_pollfunc(&ad7606_trigger_handler_th_bh,
&ad7606_trigger_handler_th_bh,
0,
@@ -152,7 +126,7 @@ int ad7606_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7606_ring_setup_ops;
+ indio_dev->setup_ops = &ad7606_ring_setup_ops;
indio_dev->buffer->scan_timestamp = true ;
INIT_WORK(&st->poll_work, &ad7606_poll_bh_to_ring);
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index aede1ba..237f1c4 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -97,11 +97,11 @@ static const struct spi_device_id ad7606_id[] = {
{"ad7606-4", ID_AD7606_4},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7606_id);
static struct spi_driver ad7606_driver = {
.driver = {
.name = "ad7606",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
.pm = AD7606_SPI_PM_OPS,
},
@@ -109,20 +109,8 @@ static struct spi_driver ad7606_driver = {
.remove = __devexit_p(ad7606_spi_remove),
.id_table = ad7606_id,
};
-
-static int __init ad7606_spi_init(void)
-{
- return spi_register_driver(&ad7606_driver);
-}
-module_init(ad7606_spi_init);
-
-static void __exit ad7606_spi_exit(void)
-{
- spi_unregister_driver(&ad7606_driver);
-}
-module_exit(ad7606_spi_exit);
+module_spi_driver(ad7606_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7606_spi");
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 7a579a1..a13e58c 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -114,7 +114,7 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
*val *= 128;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 100000)
>> (channel.scan_type.realbits - 1);
*val = scale_uv / 100000;
@@ -127,12 +127,12 @@ static int ad7780_read_raw(struct iio_dev *indio_dev,
static const struct ad7780_chip_info ad7780_chip_info_tbl[] = {
[ID_AD7780] = {
.channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('s', 24, 32, 8), 0),
},
[ID_AD7781] = {
.channel = IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
0, 0, IIO_ST('s', 20, 32, 12), 0),
},
};
@@ -272,29 +272,18 @@ static const struct spi_device_id ad7780_id[] = {
{"ad7781", ID_AD7781},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7780_id);
static struct spi_driver ad7780_driver = {
.driver = {
.name = "ad7780",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7780_probe,
.remove = __devexit_p(ad7780_remove),
.id_table = ad7780_id,
};
-
-static int __init ad7780_init(void)
-{
- return spi_register_driver(&ad7780_driver);
-}
-module_init(ad7780_init);
-
-static void __exit ad7780_exit(void)
-{
- spi_unregister_driver(&ad7780_driver);
-}
-module_exit(ad7780_exit);
+module_spi_driver(ad7780_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7780/1 ADC");
diff --git a/drivers/staging/iio/adc/ad7793.c b/drivers/staging/iio/adc/ad7793.c
index 999f8f7..6a058b1 100644
--- a/drivers/staging/iio/adc/ad7793.c
+++ b/drivers/staging/iio/adc/ad7793.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger.h"
#include "../trigger_consumer.h"
@@ -316,25 +316,6 @@ out:
return ret;
}
-static int ad7793_scan_from_ring(struct ad7793_state *st, unsigned ch, int *val)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int ret;
- s64 dat64[2];
- u32 *dat32 = (u32 *)dat64;
-
- if (!(test_bit(ch, ring->scan_mask)))
- return -EBUSY;
-
- ret = ring->access->read_last(ring, (u8 *) &dat64);
- if (ret)
- return ret;
-
- *val = *dat32;
-
- return 0;
-}
-
static int ad7793_ring_preenable(struct iio_dev *indio_dev)
{
struct ad7793_state *st = iio_priv(indio_dev);
@@ -342,14 +323,15 @@ static int ad7793_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask,
+ channel = find_first_bit(indio_dev->active_scan_mask,
indio_dev->masklength);
- d_size = ring->scan_count *
- indio_dev->channels[0].scan_type.storagebits / 8;
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
+ indio_dev->channels[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
d_size += sizeof(s64);
@@ -411,7 +393,7 @@ static irqreturn_t ad7793_trigger_handler(int irq, void *p)
s64 dat64[2];
s32 *dat32 = (s32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
__ad7793_read_reg(st, 1, 1, AD7793_REG_DATA,
dat32,
indio_dev->channels[0].scan_type.realbits/8);
@@ -459,7 +441,7 @@ static int ad7793_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7793_ring_setup_ops;
+ indio_dev->setup_ops = &ad7793_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
@@ -493,6 +475,10 @@ static irqreturn_t ad7793_data_rdy_trig_poll(int irq, void *private)
return IRQ_HANDLED;
}
+static struct iio_trigger_ops ad7793_trigger_ops = {
+ .owner = THIS_MODULE,
+};
+
static int ad7793_probe_trigger(struct iio_dev *indio_dev)
{
struct ad7793_state *st = iio_priv(indio_dev);
@@ -505,6 +491,7 @@ static int ad7793_probe_trigger(struct iio_dev *indio_dev)
ret = -ENOMEM;
goto error_ret;
}
+ st->trig->ops = &ad7793_trigger_ops;
ret = request_irq(st->spi->irq,
ad7793_data_rdy_trig_poll,
@@ -517,7 +504,6 @@ static int ad7793_probe_trigger(struct iio_dev *indio_dev)
disable_irq_nosync(st->spi->irq);
st->irq_dis = true;
st->trig->dev.parent = &st->spi->dev;
- st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
ret = iio_trigger_register(st->trig);
@@ -649,8 +635,7 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7793_scan_from_ring(st,
- chan->scan_index, &smpl);
+ ret = -EBUSY;
else
ret = ad7793_read(st, chan->address,
chan->scan_type.realbits / 8, &smpl);
@@ -667,19 +652,21 @@ static int ad7793_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- *val = st->scale_avail[(st->conf >> 8) & 0x7][0];
- *val2 = st->scale_avail[(st->conf >> 8) & 0x7][1];
-
- return IIO_VAL_INT_PLUS_NANO;
-
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_VOLTAGE:
- /* 1170mV / 2^23 * 6 */
- scale_uv = (1170ULL * 100000000ULL * 6ULL)
- >> (chan->scan_type.realbits -
- (unipolar ? 0 : 1));
+ if (chan->differential) {
+ *val = st->
+ scale_avail[(st->conf >> 8) & 0x7][0];
+ *val2 = st->
+ scale_avail[(st->conf >> 8) & 0x7][1];
+ return IIO_VAL_INT_PLUS_NANO;
+ } else {
+ /* 1170mV / 2^23 * 6 */
+ scale_uv = (1170ULL * 100000000ULL * 6ULL)
+ >> (chan->scan_type.realbits -
+ (unipolar ? 0 : 1));
+ }
break;
case IIO_TEMP:
/* Always uses unity gain and internal ref */
@@ -716,7 +703,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev,
}
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
ret = -EINVAL;
for (i = 0; i < ARRAY_SIZE(st->scale_avail); i++)
if (val2 == st->scale_avail[i][1]) {
@@ -775,7 +762,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 0,
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 0,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -786,7 +773,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 1,
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 1,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -797,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -809,7 +796,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 24, 32, 0)
},
@@ -818,7 +805,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.address = AD7793_CH_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
.scan_type = IIO_ST('s', 24, 32, 0),
},
@@ -828,7 +815,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 5,
.scan_type = IIO_ST('s', 24, 32, 0),
},
@@ -842,7 +829,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 0,
.channel2 = 0,
.address = AD7793_CH_AIN1P_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 0,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -853,7 +840,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 1,
.channel2 = 1,
.address = AD7793_CH_AIN2P_AIN2M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 1,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -864,7 +851,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN3P_AIN3M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -876,7 +863,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.channel = 2,
.channel2 = 2,
.address = AD7793_CH_AIN1M_AIN1M,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = 2,
.scan_type = IIO_ST('s', 16, 32, 0)
},
@@ -885,7 +872,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 0,
.address = AD7793_CH_TEMP,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 4,
.scan_type = IIO_ST('s', 16, 32, 0),
},
@@ -895,7 +882,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = {
.indexed = 1,
.channel = 4,
.address = AD7793_CH_AVDD_MONITOR,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.scan_index = 5,
.scan_type = IIO_ST('s', 16, 32, 0),
},
@@ -1034,29 +1021,18 @@ static const struct spi_device_id ad7793_id[] = {
{"ad7793", ID_AD7793},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7793_id);
static struct spi_driver ad7793_driver = {
.driver = {
.name = "ad7793",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7793_probe,
.remove = __devexit_p(ad7793_remove),
.id_table = ad7793_id,
};
-
-static int __init ad7793_init(void)
-{
- return spi_register_driver(&ad7793_driver);
-}
-module_init(ad7793_init);
-
-static void __exit ad7793_exit(void)
-{
- spi_unregister_driver(&ad7793_driver);
-}
-module_exit(ad7793_exit);
+module_spi_driver(ad7793_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7792/3 ADC");
diff --git a/drivers/staging/iio/adc/ad7816.c b/drivers/staging/iio/adc/ad7816.c
index bdb9049..52b720e 100644
--- a/drivers/staging/iio/adc/ad7816.c
+++ b/drivers/staging/iio/adc/ad7816.c
@@ -18,6 +18,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* AD7816 config masks
@@ -459,28 +460,15 @@ MODULE_DEVICE_TABLE(spi, ad7816_id);
static struct spi_driver ad7816_driver = {
.driver = {
.name = "ad7816",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7816_probe,
.remove = __devexit_p(ad7816_remove),
.id_table = ad7816_id,
};
-
-static __init int ad7816_init(void)
-{
- return spi_register_driver(&ad7816_driver);
-}
-
-static __exit void ad7816_exit(void)
-{
- spi_unregister_driver(&ad7816_driver);
-}
+module_spi_driver(ad7816_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices AD7816/7/8 digital"
" temperature sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7816_init);
-module_exit(ad7816_exit);
diff --git a/drivers/staging/iio/adc/ad7887.h b/drivers/staging/iio/adc/ad7887.h
index 3452d18..bc53b65 100644
--- a/drivers/staging/iio/adc/ad7887.h
+++ b/drivers/staging/iio/adc/ad7887.h
@@ -83,14 +83,9 @@ enum ad7887_supported_device_ids {
};
#ifdef CONFIG_IIO_BUFFER
-int ad7887_scan_from_ring(struct ad7887_state *st, int channum);
int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad7887_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_IIO_BUFFER */
-static inline int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
-{
- return 0;
-}
static inline int
ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad7887_core.c b/drivers/staging/iio/adc/ad7887_core.c
index 609dcd5..e9bbc3e 100644
--- a/drivers/staging/iio/adc/ad7887_core.c
+++ b/drivers/staging/iio/adc/ad7887_core.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "ad7887.h"
@@ -45,7 +45,7 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad7887_scan_from_ring(st, 1 << chan->address);
+ ret = -EBUSY;
else
ret = ad7887_scan_direct(st, chan->address);
mutex_unlock(&indio_dev->mlock);
@@ -55,7 +55,7 @@ static int ad7887_read_raw(struct iio_dev *indio_dev,
*val = (ret >> st->chip_info->channel[0].scan_type.shift) &
RES_MASK(st->chip_info->channel[0].scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000)
>> st->chip_info->channel[0].scan_type.realbits;
*val = scale_uv/1000;
@@ -75,7 +75,7 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = 1,
.scan_index = 1,
.scan_type = IIO_ST('u', 12, 16, 0),
@@ -84,7 +84,7 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = 0,
.scan_index = 0,
.scan_type = IIO_ST('u', 12, 16, 0),
@@ -246,31 +246,19 @@ static const struct spi_device_id ad7887_id[] = {
{"ad7887", ID_AD7887},
{}
};
+MODULE_DEVICE_TABLE(spi, ad7887_id);
static struct spi_driver ad7887_driver = {
.driver = {
.name = "ad7887",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad7887_probe,
.remove = __devexit_p(ad7887_remove),
.id_table = ad7887_id,
};
-
-static int __init ad7887_init(void)
-{
- return spi_register_driver(&ad7887_driver);
-}
-module_init(ad7887_init);
-
-static void __exit ad7887_exit(void)
-{
- spi_unregister_driver(&ad7887_driver);
-}
-module_exit(ad7887_exit);
+module_spi_driver(ad7887_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7887 ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad7887");
diff --git a/drivers/staging/iio/adc/ad7887_ring.c b/drivers/staging/iio/adc/ad7887_ring.c
index cb74cad..85076cd 100644
--- a/drivers/staging/iio/adc/ad7887_ring.c
+++ b/drivers/staging/iio/adc/ad7887_ring.c
@@ -13,46 +13,12 @@
#include <linux/spi/spi.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad7887.h"
-int ad7887_scan_from_ring(struct ad7887_state *st, int channum)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int count = 0, ret;
- u16 *ring_data;
-
- if (!(test_bit(channum, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
-
- /* for single channel scan the result is stored with zero offset */
- if ((test_bit(1, ring->scan_mask) || test_bit(0, ring->scan_mask)) &&
- (channum == 1))
- count = 1;
-
- ret = be16_to_cpu(ring_data[count]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad7887_ring_preenable() setup the parameters of the ring before enabling
*
@@ -65,7 +31,8 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
struct ad7887_state *st = iio_priv(indio_dev);
struct iio_buffer *ring = indio_dev->buffer;
- st->d_size = ring->scan_count *
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
if (ring->scan_timestamp) {
@@ -80,7 +47,7 @@ static int ad7887_ring_preenable(struct iio_dev *indio_dev)
set_bytes_per_datum(indio_dev->buffer, st->d_size);
/* We know this is a single long so can 'cheat' */
- switch (*ring->scan_mask) {
+ switch (*indio_dev->active_scan_mask) {
case (1 << 0):
st->ring_msg = &st->msg[AD7887_CH0];
break;
@@ -121,7 +88,8 @@ static irqreturn_t ad7887_trigger_handler(int irq, void *p)
__u8 *buf;
int b_sent;
- unsigned int bytes = ring->scan_count *
+ unsigned int bytes = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
st->chip_info->channel[0].scan_type.storagebits / 8;
buf = kzalloc(st->d_size, GFP_KERNEL);
@@ -176,7 +144,7 @@ int ad7887_register_ring_funcs_and_init(struct iio_dev *indio_dev)
goto error_deallocate_sw_rb;
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad7887_ring_setup_ops;
+ indio_dev->setup_ops = &ad7887_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/staging/iio/adc/ad799x.h b/drivers/staging/iio/adc/ad799x.h
index eda01d5..356f690 100644
--- a/drivers/staging/iio/adc/ad799x.h
+++ b/drivers/staging/iio/adc/ad799x.h
@@ -124,15 +124,9 @@ struct ad799x_platform_data {
int ad7997_8_set_scan_mode(struct ad799x_state *st, unsigned mask);
#ifdef CONFIG_AD799X_RING_BUFFER
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum);
int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void ad799x_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_AD799X_RING_BUFFER */
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
-{
- return -EINVAL;
-}
-
static inline int
ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index ee6cd79..d5b581d 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -35,7 +35,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "ad799x.h"
@@ -150,8 +151,7 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_buffer_enabled(indio_dev))
- ret = ad799x_single_channel_from_ring(indio_dev,
- chan->scan_index);
+ ret = -EBUSY;
else
ret = ad799x_scan_direct(st, chan->scan_index);
mutex_unlock(&indio_dev->mlock);
@@ -161,7 +161,7 @@ static int ad799x_read_raw(struct iio_dev *indio_dev,
*val = (ret >> chan->scan_type.shift) &
RES_MASK(chan->scan_type.realbits);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->int_vref_mv * 1000) >> chan->scan_type.realbits;
*val = scale_uv / 1000;
*val2 = (scale_uv % 1000) * 1000;
@@ -929,21 +929,8 @@ static struct i2c_driver ad799x_driver = {
.remove = __devexit_p(ad799x_remove),
.id_table = ad799x_id,
};
-
-static __init int ad799x_init(void)
-{
- return i2c_add_driver(&ad799x_driver);
-}
-
-static __exit void ad799x_exit(void)
-{
- i2c_del_driver(&ad799x_driver);
-}
+module_i2c_driver(ad799x_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD799x ADC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("i2c:ad799x");
-
-module_init(ad799x_init);
-module_exit(ad799x_exit);
diff --git a/drivers/staging/iio/adc/ad799x_ring.c b/drivers/staging/iio/adc/ad799x_ring.c
index e3f4698..5dded9e 100644
--- a/drivers/staging/iio/adc/ad799x_ring.c
+++ b/drivers/staging/iio/adc/ad799x_ring.c
@@ -17,42 +17,12 @@
#include <linux/bitops.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "ad799x.h"
-int ad799x_single_channel_from_ring(struct iio_dev *indio_dev, int channum)
-{
- struct iio_buffer *ring = indio_dev->buffer;
- int count = 0, ret;
- u16 *ring_data;
-
- if (!(test_bit(channum, ring->scan_mask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, (u8 *) ring_data);
- if (ret)
- goto error_free_ring_data;
- /* Need a count of channels prior to this one */
- count = bitmap_weight(ring->scan_mask, channum);
- ret = be16_to_cpu(ring_data[count]);
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
/**
* ad799x_ring_preenable() setup the parameters of the ring before enabling
*
@@ -71,9 +41,10 @@ static int ad799x_ring_preenable(struct iio_dev *indio_dev)
*/
if (st->id == ad7997 || st->id == ad7998)
- ad7997_8_set_scan_mode(st, *ring->scan_mask);
+ ad7997_8_set_scan_mode(st, *indio_dev->active_scan_mask);
- st->d_size = ring->scan_count * 2;
+ st->d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2;
if (ring->scan_timestamp) {
st->d_size += sizeof(s64);
@@ -115,12 +86,13 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
case ad7991:
case ad7995:
case ad7999:
- cmd = st->config | (*ring->scan_mask << AD799X_CHANNEL_SHIFT);
+ cmd = st->config |
+ (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT);
break;
case ad7992:
case ad7993:
case ad7994:
- cmd = (*ring->scan_mask << AD799X_CHANNEL_SHIFT) |
+ cmd = (*indio_dev->active_scan_mask << AD799X_CHANNEL_SHIFT) |
AD7998_CONV_RES_REG;
break;
case ad7997:
@@ -132,7 +104,8 @@ static irqreturn_t ad799x_trigger_handler(int irq, void *p)
}
b_sent = i2c_smbus_read_i2c_block_data(st->client,
- cmd, ring->scan_count * 2, rxbuf);
+ cmd, bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) * 2, rxbuf);
if (b_sent < 0)
goto done;
@@ -183,7 +156,7 @@ int ad799x_register_ring_funcs_and_init(struct iio_dev *indio_dev)
}
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad799x_buf_setup_ops;
+ indio_dev->setup_ops = &ad799x_buf_setup_ops;
indio_dev->buffer->scan_timestamp = true;
/* Flag that polled ring buffering is possible */
diff --git a/drivers/staging/iio/adc/adt7310.c b/drivers/staging/iio/adc/adt7310.c
index c9e0be3..eec2f32 100644
--- a/drivers/staging/iio/adc/adt7310.c
+++ b/drivers/staging/iio/adc/adt7310.c
@@ -17,7 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
-
+#include "../events.h"
/*
* ADT7310 registers definition
*/
@@ -877,28 +877,15 @@ MODULE_DEVICE_TABLE(spi, adt7310_id);
static struct spi_driver adt7310_driver = {
.driver = {
.name = "adt7310",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = adt7310_probe,
.remove = __devexit_p(adt7310_remove),
.id_table = adt7310_id,
};
-
-static __init int adt7310_init(void)
-{
- return spi_register_driver(&adt7310_driver);
-}
-
-static __exit void adt7310_exit(void)
-{
- spi_unregister_driver(&adt7310_driver);
-}
+module_spi_driver(adt7310_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADT7310 digital"
" temperature sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7310_init);
-module_exit(adt7310_exit);
diff --git a/drivers/staging/iio/adc/adt7410.c b/drivers/staging/iio/adc/adt7410.c
index a289e42..c62248c 100644
--- a/drivers/staging/iio/adc/adt7410.c
+++ b/drivers/staging/iio/adc/adt7410.c
@@ -17,6 +17,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
/*
* ADT7410 registers definition
@@ -844,21 +845,9 @@ static struct i2c_driver adt7410_driver = {
.remove = __devexit_p(adt7410_remove),
.id_table = adt7410_id,
};
-
-static __init int adt7410_init(void)
-{
- return i2c_add_driver(&adt7410_driver);
-}
-
-static __exit void adt7410_exit(void)
-{
- i2c_del_driver(&adt7410_driver);
-}
+module_i2c_driver(adt7410_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("Analog Devices ADT7410 digital"
" temperature sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7410_init);
-module_exit(adt7410_exit);
diff --git a/drivers/staging/iio/adc/max1363.h b/drivers/staging/iio/adc/max1363.h
index cbcb08a..2cd0112 100644
--- a/drivers/staging/iio/adc/max1363.h
+++ b/drivers/staging/iio/adc/max1363.h
@@ -146,22 +146,22 @@ struct max1363_state {
};
const struct max1363_mode
-*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci);
+*max1363_match_mode(const unsigned long *mask,
+ const struct max1363_chip_info *ci);
int max1363_set_scan_mode(struct max1363_state *st);
#ifdef CONFIG_MAX1363_RING_BUFFER
-
-int max1363_single_channel_from_ring(const long *mask,
- struct max1363_state *st);
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev);
void max1363_ring_cleanup(struct iio_dev *indio_dev);
#else /* CONFIG_MAX1363_RING_BUFFER */
-
-int max1363_single_channel_from_ring(long mask, struct max1363_state *st)
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const long *scan_mask)
{
- return -EINVAL;
+ return 0;
}
static inline int
diff --git a/drivers/staging/iio/adc/max1363_core.c b/drivers/staging/iio/adc/max1363_core.c
index eb699ad..b92cb4a 100644
--- a/drivers/staging/iio/adc/max1363_core.c
+++ b/drivers/staging/iio/adc/max1363_core.c
@@ -34,7 +34,8 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../events.h"
+#include "../buffer.h"
#include "max1363.h"
@@ -147,7 +148,8 @@ static const struct max1363_mode max1363_mode_table[] = {
};
const struct max1363_mode
-*max1363_match_mode(unsigned long *mask, const struct max1363_chip_info *ci)
+*max1363_match_mode(const unsigned long *mask,
+const struct max1363_chip_info *ci)
{
int i;
if (mask)
@@ -189,7 +191,6 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
int ret = 0;
s32 data;
char rxbuf[2];
- const unsigned long *mask;
struct max1363_state *st = iio_priv(indio_dev);
struct i2c_client *client = st->client;
@@ -198,46 +199,38 @@ static int max1363_read_single_chan(struct iio_dev *indio_dev,
* If monitor mode is enabled, the method for reading a single
* channel will have to be rather different and has not yet
* been implemented.
+ *
+ * Also, cannot read directly if buffered capture enabled.
*/
- if (st->monitor_on) {
+ if (st->monitor_on || iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
- /* If ring buffer capture is occurring, query the buffer */
- if (iio_buffer_enabled(indio_dev)) {
- mask = max1363_mode_table[chan->address].modemask;
- data = max1363_single_channel_from_ring(mask, st);
+ /* Check to see if current scan mode is correct */
+ if (st->current_mode != &max1363_mode_table[chan->address]) {
+ /* Update scan mode if needed */
+ st->current_mode = &max1363_mode_table[chan->address];
+ ret = max1363_set_scan_mode(st);
+ if (ret < 0)
+ goto error_ret;
+ }
+ if (st->chip_info->bits != 8) {
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 2);
if (data < 0) {
ret = data;
goto error_ret;
}
+ data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
} else {
- /* Check to see if current scan mode is correct */
- if (st->current_mode != &max1363_mode_table[chan->address]) {
- /* Update scan mode if needed */
- st->current_mode = &max1363_mode_table[chan->address];
- ret = max1363_set_scan_mode(st);
- if (ret < 0)
- goto error_ret;
- }
- if (st->chip_info->bits != 8) {
- /* Get reading */
- data = i2c_master_recv(client, rxbuf, 2);
- if (data < 0) {
- ret = data;
- goto error_ret;
- }
- data = (s32)(rxbuf[1]) | ((s32)(rxbuf[0] & 0x0F)) << 8;
- } else {
- /* Get reading */
- data = i2c_master_recv(client, rxbuf, 1);
- if (data < 0) {
- ret = data;
- goto error_ret;
- }
- data = rxbuf[0];
+ /* Get reading */
+ data = i2c_master_recv(client, rxbuf, 1);
+ if (data < 0) {
+ ret = data;
+ goto error_ret;
}
+ data = rxbuf[0];
}
*val = data;
error_ret:
@@ -260,7 +253,7 @@ static int max1363_read_raw(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
if ((1 << (st->chip_info->bits + 1)) >
st->chip_info->int_vref_mv) {
*val = 0;
@@ -288,7 +281,7 @@ static const enum max1363_modes max1363_mode_list[] = {
#define MAX1363_EV_M \
(IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) \
| IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING))
-#define MAX1363_INFO_MASK (1 << IIO_CHAN_INFO_SCALE_SHARED)
+#define MAX1363_INFO_MASK IIO_CHAN_INFO_SCALE_SHARED_BIT
#define MAX1363_CHAN_U(num, addr, si, bits, evmask) \
{ \
.type = IIO_VOLTAGE, \
@@ -296,7 +289,13 @@ static const enum max1363_modes max1363_mode_list[] = {
.channel = num, \
.address = addr, \
.info_mask = MAX1363_INFO_MASK, \
- .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .datasheet_name = "AIN"#num, \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
.scan_index = si, \
.event_mask = evmask, \
}
@@ -311,7 +310,13 @@ static const enum max1363_modes max1363_mode_list[] = {
.channel2 = num2, \
.address = addr, \
.info_mask = MAX1363_INFO_MASK, \
- .scan_type = IIO_ST('u', bits, (bits > 8) ? 16 : 8, 0), \
+ .datasheet_name = "AIN"#num"-AIN"#num2, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = bits, \
+ .storagebits = (bits > 8) ? 16 : 8, \
+ .endianness = IIO_BE, \
+ }, \
.scan_index = si, \
.event_mask = evmask, \
}
@@ -833,6 +838,7 @@ static const struct iio_info max1363_info = {
.read_event_config = &max1363_read_event_config,
.write_event_config = &max1363_write_event_config,
.read_raw = &max1363_read_raw,
+ .update_scan_mode = &max1363_update_scan_mode,
.driver_module = THIS_MODULE,
.event_attrs = &max1363_event_attribute_group,
};
@@ -1410,20 +1416,8 @@ static struct i2c_driver max1363_driver = {
.remove = max1363_remove,
.id_table = max1363_id,
};
-
-static __init int max1363_init(void)
-{
- return i2c_add_driver(&max1363_driver);
-}
-
-static __exit void max1363_exit(void)
-{
- i2c_del_driver(&max1363_driver);
-}
+module_i2c_driver(max1363_driver);
MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>");
MODULE_DESCRIPTION("Maxim 1363 ADC");
MODULE_LICENSE("GPL v2");
-
-module_init(max1363_init);
-module_exit(max1363_exit);
diff --git a/drivers/staging/iio/adc/max1363_ring.c b/drivers/staging/iio/adc/max1363_ring.c
index df6893e..f730b3f 100644
--- a/drivers/staging/iio/adc/max1363_ring.c
+++ b/drivers/staging/iio/adc/max1363_ring.c
@@ -15,88 +15,25 @@
#include <linux/bitops.h>
#include "../iio.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "../trigger_consumer.h"
#include "max1363.h"
-int max1363_single_channel_from_ring(const long *mask, struct max1363_state *st)
-{
- struct iio_buffer *ring = iio_priv_to_dev(st)->buffer;
- int count = 0, ret, index;
- u8 *ring_data;
- index = find_first_bit(mask, MAX1363_MAX_CHANNELS);
-
- if (!(test_bit(index, st->current_mode->modemask))) {
- ret = -EBUSY;
- goto error_ret;
- }
-
- ring_data = kmalloc(ring->access->get_bytes_per_datum(ring),
- GFP_KERNEL);
- if (ring_data == NULL) {
- ret = -ENOMEM;
- goto error_ret;
- }
- ret = ring->access->read_last(ring, ring_data);
- if (ret)
- goto error_free_ring_data;
- /* Need a count of channels prior to this one */
-
- count = bitmap_weight(mask, index - 1);
- if (st->chip_info->bits != 8)
- ret = ((int)(ring_data[count*2 + 0] & 0x0F) << 8)
- + (int)(ring_data[count*2 + 1]);
- else
- ret = ring_data[count];
-
-error_free_ring_data:
- kfree(ring_data);
-error_ret:
- return ret;
-}
-
-
-/**
- * max1363_ring_preenable() - setup the parameters of the ring before enabling
- *
- * The complex nature of the setting of the nuber of bytes per datum is due
- * to this driver currently ensuring that the timestamp is stored at an 8
- * byte boundary.
- **/
-static int max1363_ring_preenable(struct iio_dev *indio_dev)
+int max1363_update_scan_mode(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask)
{
struct max1363_state *st = iio_priv(indio_dev);
- struct iio_buffer *ring = indio_dev->buffer;
- size_t d_size = 0;
- unsigned long numvals;
/*
* Need to figure out the current mode based upon the requested
* scan mask in iio_dev
*/
- st->current_mode = max1363_match_mode(ring->scan_mask,
- st->chip_info);
+ st->current_mode = max1363_match_mode(scan_mask, st->chip_info);
if (!st->current_mode)
return -EINVAL;
-
max1363_set_scan_mode(st);
-
- numvals = bitmap_weight(st->current_mode->modemask,
- indio_dev->masklength);
- if (ring->access->set_bytes_per_datum) {
- if (ring->scan_timestamp)
- d_size += sizeof(s64);
- if (st->chip_info->bits != 8)
- d_size += numvals*2;
- else
- d_size += numvals;
- if (ring->scan_timestamp && (d_size % 8))
- d_size += 8 - (d_size % 8);
- ring->access->set_bytes_per_datum(ring, d_size);
- }
-
return 0;
}
@@ -114,12 +51,14 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
/* Ensure the timestamp is 8 byte aligned */
if (st->chip_info->bits != 8)
- d_size = numvals*2 + sizeof(s64);
+ d_size = numvals*2;
else
- d_size = numvals + sizeof(s64);
- if (d_size % sizeof(s64))
- d_size += sizeof(s64) - (d_size % sizeof(s64));
-
+ d_size = numvals;
+ if (indio_dev->buffer->scan_timestamp) {
+ d_size += sizeof(s64);
+ if (d_size % sizeof(s64))
+ d_size += sizeof(s64) - (d_size % sizeof(s64));
+ }
/* Monitor mode prevents reading. Whilst not currently implemented
* might as well have this test in here in the meantime as it does
* no harm.
@@ -139,9 +78,10 @@ static irqreturn_t max1363_trigger_handler(int irq, void *p)
time_ns = iio_get_time_ns();
- memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+ if (indio_dev->buffer->scan_timestamp)
+ memcpy(rxbuf + d_size - sizeof(s64), &time_ns, sizeof(time_ns));
+ iio_push_to_buffer(indio_dev->buffer, rxbuf, time_ns);
- indio_dev->buffer->access->store_to(indio_dev->buffer, rxbuf, time_ns);
done:
iio_trigger_notify_done(indio_dev->trig);
kfree(rxbuf);
@@ -151,7 +91,7 @@ done:
static const struct iio_buffer_setup_ops max1363_ring_setup_ops = {
.postenable = &iio_triggered_buffer_postenable,
- .preenable = &max1363_ring_preenable,
+ .preenable = &iio_sw_buffer_preenable,
.predisable = &iio_triggered_buffer_predisable,
};
@@ -179,7 +119,7 @@ int max1363_register_ring_funcs_and_init(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &max1363_ring_setup_ops;
+ indio_dev->setup_ops = &max1363_ring_setup_ops;
/* Flag that polled ring buffering is possible */
indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
diff --git a/drivers/staging/iio/addac/adt7316-i2c.c b/drivers/staging/iio/addac/adt7316-i2c.c
index 07d718e..2c03a39 100644
--- a/drivers/staging/iio/addac/adt7316-i2c.c
+++ b/drivers/staging/iio/addac/adt7316-i2c.c
@@ -151,21 +151,9 @@ static struct i2c_driver adt7316_driver = {
.resume = adt7316_i2c_resume,
.id_table = adt7316_i2c_id,
};
-
-static __init int adt7316_i2c_init(void)
-{
- return i2c_add_driver(&adt7316_driver);
-}
-
-static __exit void adt7316_i2c_exit(void)
-{
- i2c_del_driver(&adt7316_driver);
-}
+module_i2c_driver(adt7316_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("I2C bus driver for Analog Devices ADT7316/7/9 and"
"ADT7516/7/8 digital temperature sensor, ADC and DAC");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7316_i2c_init);
-module_exit(adt7316_i2c_exit);
diff --git a/drivers/staging/iio/addac/adt7316-spi.c b/drivers/staging/iio/addac/adt7316-spi.c
index 369d4d0..1ea3cd0 100644
--- a/drivers/staging/iio/addac/adt7316-spi.c
+++ b/drivers/staging/iio/addac/adt7316-spi.c
@@ -151,7 +151,6 @@ static int adt7316_spi_resume(struct spi_device *spi_dev)
static struct spi_driver adt7316_driver = {
.driver = {
.name = "adt7316",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = adt7316_spi_probe,
@@ -160,21 +159,9 @@ static struct spi_driver adt7316_driver = {
.resume = adt7316_spi_resume,
.id_table = adt7316_spi_id,
};
-
-static __init int adt7316_spi_init(void)
-{
- return spi_register_driver(&adt7316_driver);
-}
-
-static __exit void adt7316_spi_exit(void)
-{
- spi_unregister_driver(&adt7316_driver);
-}
+module_spi_driver(adt7316_driver);
MODULE_AUTHOR("Sonic Zhang <sonic.zhang@analog.com>");
MODULE_DESCRIPTION("SPI bus driver for Analog Devices ADT7316/7/8 and"
"ADT7516/7/9 digital temperature sensor, ADC and DAC");
MODULE_LICENSE("GPL v2");
-
-module_init(adt7316_spi_init);
-module_exit(adt7316_spi_exit);
diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
index 8df2470..13c3929 100644
--- a/drivers/staging/iio/addac/adt7316.c
+++ b/drivers/staging/iio/addac/adt7316.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include "../iio.h"
+#include "../events.h"
#include "../sysfs.h"
#include "adt7316.h"
diff --git a/drivers/staging/iio/buffer_generic.h b/drivers/staging/iio/buffer.h
index 9e8f010..6fb6e64 100644
--- a/drivers/staging/iio/buffer_generic.h
+++ b/drivers/staging/iio/buffer.h
@@ -11,7 +11,6 @@
#define _IIO_BUFFER_GENERIC_H_
#include <linux/sysfs.h>
#include "iio.h"
-#include "chrdev.h"
#ifdef CONFIG_IIO_BUFFER
@@ -19,22 +18,14 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @mark_in_use: reference counting, typically to prevent module removal
- * @unmark_in_use: reduce reference count when no longer using buffer
* @store_to: actually store stuff to the buffer
- * @read_last: get the last element stored
- * @read_first_n: try to get a specified number of elements (must exist)
- * @mark_param_change: notify buffer that some relevant parameter has changed
- * Often this means the underlying storage may need to
- * change.
+ * @read_first_n: try to get a specified number of bytes (must exist)
* @request_update: if a parameter change has been marked, update underlying
* storage.
* @get_bytes_per_datum:get current bytes per datum
* @set_bytes_per_datum:set number of bytes per datum
* @get_length: get number of datums in buffer
* @set_length: set number of datums in buffer
- * @is_enabled: query if buffer is currently being used
- * @enable: enable the buffer
*
* The purpose of this structure is to make the buffer element
* modular as event for a given driver, different usecases may require
@@ -45,85 +36,60 @@ struct iio_buffer;
* any of them not existing.
**/
struct iio_buffer_access_funcs {
- void (*mark_in_use)(struct iio_buffer *buffer);
- void (*unmark_in_use)(struct iio_buffer *buffer);
-
int (*store_to)(struct iio_buffer *buffer, u8 *data, s64 timestamp);
- int (*read_last)(struct iio_buffer *buffer, u8 *data);
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
char __user *buf);
- int (*mark_param_change)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
int (*get_bytes_per_datum)(struct iio_buffer *buffer);
int (*set_bytes_per_datum)(struct iio_buffer *buffer, size_t bpd);
int (*get_length)(struct iio_buffer *buffer);
int (*set_length)(struct iio_buffer *buffer, int length);
-
- int (*is_enabled)(struct iio_buffer *buffer);
- int (*enable)(struct iio_buffer *buffer);
-};
-
-/**
- * struct iio_buffer_setup_ops - buffer setup related callbacks
- * @preenable: [DRIVER] function to run prior to marking buffer enabled
- * @postenable: [DRIVER] function to run after marking buffer enabled
- * @predisable: [DRIVER] function to run prior to marking buffer
- * disabled
- * @postdisable: [DRIVER] function to run after marking buffer disabled
- */
-struct iio_buffer_setup_ops {
- int (*preenable)(struct iio_dev *);
- int (*postenable)(struct iio_dev *);
- int (*predisable)(struct iio_dev *);
- int (*postdisable)(struct iio_dev *);
};
/**
* struct iio_buffer - general buffer structure
- * @indio_dev: industrial I/O device structure
- * @owner: module that owns the buffer (for ref counting)
* @length: [DEVICE] number of datums in buffer
* @bytes_per_datum: [DEVICE] size of individual datum including timestamp
- * @bpe: [DEVICE] size of individual channel value
* @scan_el_attrs: [DRIVER] control of scan elements if that scan mode
* control method is used
- * @scan_count: [INTERN] the number of elements in the current scan mode
* @scan_mask: [INTERN] bitmask used in masking scan mode elements
+ * @scan_index_timestamp:[INTERN] cache of the index to the timestamp
* @scan_timestamp: [INTERN] does the scan mode include a timestamp
* @access: [DRIVER] buffer access functions associated with the
* implementation.
- * @flags: [INTERN] file ops related flags including busy flag.
+ * @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
+ * @scan_el_group: [DRIVER] attribute group for those attributes not
+ * created from the iio_chan_info array.
+ * @pollq: [INTERN] wait queue to allow for polling on the buffer.
+ * @stufftoread: [INTERN] flag to indicate new data.
+ * @demux_list: [INTERN] list of operations required to demux the scan.
+ * @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
**/
struct iio_buffer {
- struct iio_dev *indio_dev;
- struct module *owner;
int length;
int bytes_per_datum;
- int bpe;
struct attribute_group *scan_el_attrs;
- int scan_count;
long *scan_mask;
bool scan_timestamp;
+ unsigned scan_index_timestamp;
const struct iio_buffer_access_funcs *access;
- const struct iio_buffer_setup_ops *setup_ops;
struct list_head scan_el_dev_attr_list;
struct attribute_group scan_el_group;
wait_queue_head_t pollq;
bool stufftoread;
- unsigned long flags;
const struct attribute_group *attrs;
+ struct list_head demux_list;
+ unsigned char *demux_bounce;
};
/**
* iio_buffer_init() - Initialize the buffer structure
* @buffer: buffer to be initialized
- * @indio_dev: the iio device the buffer is assocated with
**/
-void iio_buffer_init(struct iio_buffer *buffer,
- struct iio_dev *indio_dev);
+void iio_buffer_init(struct iio_buffer *buffer);
void iio_buffer_deinit(struct iio_buffer *buffer);
@@ -140,17 +106,27 @@ static inline void __iio_update_buffer(struct iio_buffer *buffer,
buffer->length = length;
}
-int iio_scan_mask_query(struct iio_buffer *buffer, int bit);
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
**/
-int iio_scan_mask_set(struct iio_buffer *buffer, int bit);
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit);
-#define to_iio_buffer(d) \
- container_of(d, struct iio_buffer, dev)
+/**
+ * iio_push_to_buffer() - push to a registered buffer.
+ * @buffer: IIO buffer structure for device
+ * @scan: Full scan.
+ * @timestamp:
+ */
+int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
+ s64 timestamp);
+
+int iio_update_demux(struct iio_dev *indio_dev);
/**
* iio_buffer_register() - register the buffer with IIO core
@@ -180,12 +156,6 @@ ssize_t iio_buffer_write_length(struct device *dev,
const char *buf,
size_t len);
/**
- * iio_buffer_read_bytes_per_datum() - attr for number of bytes in whole datum
- **/
-ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-/**
* iio_buffer_store_enable() - attr to turn the buffer on
**/
ssize_t iio_buffer_store_enable(struct device *dev,
@@ -201,9 +171,6 @@ ssize_t iio_buffer_show_enable(struct device *dev,
#define IIO_BUFFER_LENGTH_ATTR DEVICE_ATTR(length, S_IRUGO | S_IWUSR, \
iio_buffer_read_length, \
iio_buffer_write_length)
-#define IIO_BUFFER_BYTES_PER_DATUM_ATTR \
- DEVICE_ATTR(bytes_per_datum, S_IRUGO | S_IWUSR, \
- iio_buffer_read_bytes_per_datum, NULL)
#define IIO_BUFFER_ENABLE_ATTR DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, \
iio_buffer_show_enable, \
diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
index a761ca9..b73007d 100644
--- a/drivers/staging/iio/cdc/ad7150.c
+++ b/drivers/staging/iio/cdc/ad7150.c
@@ -15,7 +15,7 @@
#include "../iio.h"
#include "../sysfs.h"
-
+#include "../events.h"
/*
* AD7150 registers definition
*/
@@ -111,7 +111,7 @@ static int ad7150_read_raw(struct iio_dev *indio_dev,
return ret;
*val = swab16(ret);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE):
+ case IIO_CHAN_INFO_AVERAGE_RAW:
ret = i2c_smbus_read_word_data(chip->client,
ad7150_addresses[chan->channel][1]);
if (ret < 0)
@@ -429,7 +429,7 @@ static const struct iio_chan_spec ad7150_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT,
.event_mask =
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
@@ -441,7 +441,7 @@ static const struct iio_chan_spec ad7150_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT,
.event_mask =
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING) |
@@ -657,20 +657,8 @@ static struct i2c_driver ad7150_driver = {
.remove = __devexit_p(ad7150_remove),
.id_table = ad7150_id,
};
-
-static __init int ad7150_init(void)
-{
- return i2c_add_driver(&ad7150_driver);
-}
-
-static __exit void ad7150_exit(void)
-{
- i2c_del_driver(&ad7150_driver);
-}
+module_i2c_driver(ad7150_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD7150/1/6 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7150_init);
-module_exit(ad7150_exit);
diff --git a/drivers/staging/iio/cdc/ad7152.c b/drivers/staging/iio/cdc/ad7152.c
index 662584d..fdb83c3 100644
--- a/drivers/staging/iio/cdc/ad7152.c
+++ b/drivers/staging/iio/cdc/ad7152.c
@@ -259,7 +259,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val != 1) {
ret = -EINVAL;
goto out;
@@ -276,7 +276,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if ((val < 0) | (val > 0xFFFF)) {
ret = -EINVAL;
goto out;
@@ -289,7 +289,7 @@ static int ad7152_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
if (val != 0) {
ret = -EINVAL;
goto out;
@@ -372,7 +372,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
ret = i2c_smbus_read_word_data(chip->client,
ad7152_addresses[chan->channel][AD7152_GAIN]);
@@ -384,7 +384,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_MICRO;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_word_data(chip->client,
ad7152_addresses[chan->channel][AD7152_OFFS]);
if (ret < 0)
@@ -393,7 +393,7 @@ static int ad7152_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
ret = i2c_smbus_read_byte_data(chip->client,
ad7152_addresses[chan->channel][AD7152_SETUP]);
if (ret < 0)
@@ -416,7 +416,7 @@ static int ad7152_write_raw_get_fmt(struct iio_dev *indio_dev,
long mask)
{
switch (mask) {
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
return IIO_VAL_INT_PLUS_NANO;
default:
return IIO_VAL_INT_PLUS_MICRO;
@@ -436,34 +436,34 @@ static const struct iio_chan_spec ad7152_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 0,
.channel2 = 2,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}, {
.type = IIO_CAPACITANCE,
.differential = 1,
.indexed = 1,
.channel = 1,
.channel2 = 3,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
}
};
/*
@@ -540,20 +540,8 @@ static struct i2c_driver ad7152_driver = {
.remove = __devexit_p(ad7152_remove),
.id_table = ad7152_id,
};
-
-static __init int ad7152_init(void)
-{
- return i2c_add_driver(&ad7152_driver);
-}
-
-static __exit void ad7152_exit(void)
-{
- i2c_del_driver(&ad7152_driver);
-}
+module_i2c_driver(ad7152_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD7152/3 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7152_init);
-module_exit(ad7152_exit);
diff --git a/drivers/staging/iio/cdc/ad7746.c b/drivers/staging/iio/cdc/ad7746.c
index 2867943..40b8512 100644
--- a/drivers/staging/iio/cdc/ad7746.c
+++ b/drivers/staging/iio/cdc/ad7746.c
@@ -123,7 +123,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_EXT_VIN,
},
@@ -132,7 +132,7 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_VT_DATA_HIGH << 8 |
AD7746_VTSETUP_VTMD_VDD_MON,
},
@@ -156,10 +156,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8,
},
[CIN1_DIFF] = {
@@ -168,10 +168,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 0,
.channel2 = 2,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF
},
@@ -179,10 +179,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.type = IIO_CAPACITANCE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CIN2,
},
@@ -192,10 +192,10 @@ static const struct iio_chan_spec ad7746_channels[] = {
.indexed = 1,
.channel = 1,
.channel2 = 3,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) |
- (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED) |
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = AD7746_REG_CAP_DATA_HIGH << 8 |
AD7746_CAPSETUP_CAPDIFF | AD7746_CAPSETUP_CIN2,
}
@@ -477,7 +477,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
mutex_lock(&indio_dev->mlock);
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val != 1) {
ret = -EINVAL;
goto out;
@@ -503,7 +503,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ case IIO_CHAN_INFO_CALIBBIAS:
if ((val < 0) | (val > 0xFFFF)) {
ret = -EINVAL;
goto out;
@@ -515,7 +515,7 @@ static int ad7746_write_raw(struct iio_dev *indio_dev,
ret = 0;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if ((val < 0) | (val > 43008000)) { /* 21pF */
ret = -EINVAL;
goto out;
@@ -612,7 +612,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
reg = AD7746_REG_CAP_GAINH;
@@ -634,7 +634,7 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT_PLUS_MICRO;
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SHARED):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_word_data(chip->client,
AD7746_REG_CAP_OFFH);
if (ret < 0)
@@ -643,13 +643,13 @@ static int ad7746_read_raw(struct iio_dev *indio_dev,
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = AD7746_CAPDAC_DACP(chip->capdac[chan->channel]
[chan->differential]) * 338646;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_CAPACITANCE:
/* 8.192pf / 2^24 */
@@ -788,20 +788,8 @@ static struct i2c_driver ad7746_driver = {
.remove = __devexit_p(ad7746_remove),
.id_table = ad7746_id,
};
-
-static __init int ad7746_init(void)
-{
- return i2c_add_driver(&ad7746_driver);
-}
-
-static __exit void ad7746_exit(void)
-{
- i2c_del_driver(&ad7746_driver);
-}
+module_i2c_driver(ad7746_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7746/5/7 capacitive sensor driver");
MODULE_LICENSE("GPL v2");
-
-module_init(ad7746_init);
-module_exit(ad7746_exit);
diff --git a/drivers/staging/iio/chrdev.h b/drivers/staging/iio/chrdev.h
deleted file mode 100644
index d8e736f..0000000
--- a/drivers/staging/iio/chrdev.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* The industrial I/O core - character device related
- *
- * Copyright (c) 2008 Jonathan Cameron
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- */
-
-#ifndef _IIO_CHRDEV_H_
-#define _IIO_CHRDEV_H_
-
-/**
- * struct iio_event_data - The actual event being pushed to userspace
- * @id: event identifier
- * @timestamp: best estimate of time of event occurrence (often from
- * the interrupt handler)
- */
-struct iio_event_data {
- u64 id;
- s64 timestamp;
-};
-
-#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
-#endif
diff --git a/drivers/staging/iio/dac/Kconfig b/drivers/staging/iio/dac/Kconfig
index fac8549..13e2797 100644
--- a/drivers/staging/iio/dac/Kconfig
+++ b/drivers/staging/iio/dac/Kconfig
@@ -24,6 +24,29 @@ config AD5360
To compile this driver as module choose M here: the module will be called
ad5360.
+config AD5380
+ tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
+ depends on (SPI_MASTER || I2C)
+ select REGMAP_I2C if I2C
+ select REGMAP_SPI if SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5380, AD5381,
+ AD5382, AD5383, AD5384, AD5390, AD5391, AD5392 multi-channel
+ Digital to Analog Converters (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5380.
+
+config AD5421
+ tristate "Analog Devices AD5421 DAC driver"
+ depends on SPI
+ help
+ Say yes here to build support for Analog Devices AD5421 loop-powered
+ digital-to-analog convertors (DAC).
+
+ To compile this driver as module choose M here: the module will be called
+ ad5421.
+
config AD5624R_SPI
tristate "Analog Devices AD5624/44/64R DAC spi driver"
depends on SPI
@@ -37,7 +60,7 @@ config AD5446
help
Say yes here to build support for Analog Devices AD5444, AD5446,
AD5512A, AD5542A, AD5543, AD5553, AD5601, AD5611, AD5620, AD5621,
- AD5640, AD5660 DACs.
+ AD5640, AD5660, AD5662 DACs.
To compile this driver as a module, choose M here: the
module will be called ad5446.
@@ -52,12 +75,22 @@ config AD5504
To compile this driver as a module, choose M here: the
module will be called ad5504.
+config AD5764
+ tristate "Analog Devices AD5764/64R/44/44R DAC driver"
+ depends on SPI_MASTER
+ help
+ Say yes here to build support for Analog Devices AD5764, AD5764R, AD5744,
+ AD5744R Digital to Analog Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5764.
+
config AD5791
- tristate "Analog Devices AD5760/AD5780/AD5781/AD5791 DAC SPI driver"
+ tristate "Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC SPI driver"
depends on SPI
help
Say yes here to build support for Analog Devices AD5760, AD5780,
- AD5781, AD5791 High Resolution Voltage Output Digital to
+ AD5781, AD5790, AD5791 High Resolution Voltage Output Digital to
Analog Converter.
To compile this driver as a module, choose M here: the
diff --git a/drivers/staging/iio/dac/Makefile b/drivers/staging/iio/dac/Makefile
index 07b6f5e..8ab1d26 100644
--- a/drivers/staging/iio/dac/Makefile
+++ b/drivers/staging/iio/dac/Makefile
@@ -3,10 +3,13 @@
#
obj-$(CONFIG_AD5360) += ad5360.o
+obj-$(CONFIG_AD5380) += ad5380.o
+obj-$(CONFIG_AD5421) += ad5421.o
obj-$(CONFIG_AD5624R_SPI) += ad5624r_spi.o
obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
+obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_MAX517) += max517.o
diff --git a/drivers/staging/iio/dac/ad5064.c b/drivers/staging/iio/dac/ad5064.c
index 24279f2..049a855 100644
--- a/drivers/staging/iio/dac/ad5064.c
+++ b/drivers/staging/iio/dac/ad5064.c
@@ -91,7 +91,7 @@ enum ad5064_type {
.indexed = 1, \
.output = 1, \
.channel = (chan), \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = AD5064_ADDR_DAC(chan), \
.scan_type = IIO_ST('u', (bits), 16, 20 - (bits)) \
}
@@ -280,14 +280,14 @@ static int ad5064_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5064_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
unsigned int vref;
+ int scale_uv;
switch (m) {
case 0:
*val = st->dac_cache[chan->channel];
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
vref = st->chip_info->shared_vref ? 0 : chan->channel;
scale_uv = regulator_get_voltage(st->vref_reg[vref].consumer);
if (scale_uv < 0)
@@ -445,18 +445,7 @@ static struct spi_driver ad5064_driver = {
.remove = __devexit_p(ad5064_remove),
.id_table = ad5064_id,
};
-
-static __init int ad5064_spi_init(void)
-{
- return spi_register_driver(&ad5064_driver);
-}
-module_init(ad5064_spi_init);
-
-static __exit void ad5064_spi_exit(void)
-{
- spi_unregister_driver(&ad5064_driver);
-}
-module_exit(ad5064_spi_exit);
+module_spi_driver(ad5064_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5064/64-1/44/24 DAC");
diff --git a/drivers/staging/iio/dac/ad5360.c b/drivers/staging/iio/dac/ad5360.c
index 72d0f3f..710b256af 100644
--- a/drivers/staging/iio/dac/ad5360.c
+++ b/drivers/staging/iio/dac/ad5360.c
@@ -103,10 +103,10 @@ enum ad5360_type {
.type = IIO_VOLTAGE, \
.indexed = 1, \
.output = 1, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
.scan_type = IIO_ST('u', (bits), 16, 16 - (bits)) \
}
@@ -326,21 +326,21 @@ static int ad5360_write_raw(struct iio_dev *indio_dev,
return ad5360_write(indio_dev, AD5360_CMD_WRITE_DATA,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
if (val >= max_val || val < 0)
return -EINVAL;
return ad5360_write(indio_dev, AD5360_CMD_WRITE_OFFSET,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (val >= max_val || val < 0)
return -EINVAL;
return ad5360_write(indio_dev, AD5360_CMD_WRITE_GAIN,
chan->address, val, chan->scan_type.shift);
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
if (val <= -max_val || val > 0)
return -EINVAL;
@@ -371,8 +371,8 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad5360_state *st = iio_priv(indio_dev);
- unsigned long scale_uv;
unsigned int ofs_index;
+ int scale_uv;
int ret;
switch (m) {
@@ -383,7 +383,7 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
return ret;
*val = ret >> chan->scan_type.shift;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
/* vout = 4 * vref * dac_code */
scale_uv = ad5360_get_channel_vref(st, chan->channel) * 4 * 100;
if (scale_uv < 0)
@@ -393,21 +393,21 @@ static int ad5360_read_raw(struct iio_dev *indio_dev,
*val = scale_uv / 100000;
*val2 = (scale_uv % 100000) * 10;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = ad5360_read(indio_dev, AD5360_READBACK_OFFSET,
chan->address);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
ret = ad5360_read(indio_dev, AD5360_READBACK_GAIN,
chan->address);
if (ret < 0)
return ret;
*val = ret;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
ofs_index = ad5360_get_channel_vref_index(st, chan->channel);
ret = ad5360_read(indio_dev, AD5360_READBACK_SF,
AD5360_REG_SF_OFS(ofs_index));
@@ -563,18 +563,7 @@ static struct spi_driver ad5360_driver = {
.remove = __devexit_p(ad5360_remove),
.id_table = ad5360_ids,
};
-
-static __init int ad5360_spi_init(void)
-{
- return spi_register_driver(&ad5360_driver);
-}
-module_init(ad5360_spi_init);
-
-static __exit void ad5360_spi_exit(void)
-{
- spi_unregister_driver(&ad5360_driver);
-}
-module_exit(ad5360_spi_exit);
+module_spi_driver(ad5360_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("Analog Devices AD5360/61/62/63/70/71/72/73 DAC");
diff --git a/drivers/staging/iio/dac/ad5380.c b/drivers/staging/iio/dac/ad5380.c
new file mode 100644
index 0000000..eff97ae0
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5380.c
@@ -0,0 +1,676 @@
+/*
+ * Analog devices AD5380, AD5381, AD5382, AD5383, AD5390, AD5391, AD5392
+ * multi-channel Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+
+#define AD5380_REG_DATA(x) (((x) << 2) | 3)
+#define AD5380_REG_OFFSET(x) (((x) << 2) | 2)
+#define AD5380_REG_GAIN(x) (((x) << 2) | 1)
+#define AD5380_REG_SF_PWR_DOWN (8 << 2)
+#define AD5380_REG_SF_PWR_UP (9 << 2)
+#define AD5380_REG_SF_CTRL (12 << 2)
+
+#define AD5380_CTRL_PWR_DOWN_MODE_OFFSET 13
+#define AD5380_CTRL_INT_VREF_2V5 BIT(12)
+#define AD5380_CTRL_INT_VREF_EN BIT(10)
+
+/**
+ * struct ad5380_chip_info - chip specific information
+ * @channel_template: channel specification template
+ * @num_channels: number of channels
+ * @int_vref: internal vref in uV
+*/
+
+struct ad5380_chip_info {
+ struct iio_chan_spec channel_template;
+ unsigned int num_channels;
+ unsigned int int_vref;
+};
+
+/**
+ * struct ad5380_state - driver instance specific data
+ * @regmap: regmap instance used by the device
+ * @chip_info: chip model specific constants, available modes etc
+ * @vref_reg: vref supply regulator
+ * @vref: actual reference voltage used in uA
+ * @pwr_down: whether the chip is currently in power down mode
+ */
+
+struct ad5380_state {
+ struct regmap *regmap;
+ const struct ad5380_chip_info *chip_info;
+ struct regulator *vref_reg;
+ int vref;
+ bool pwr_down;
+};
+
+enum ad5380_type {
+ ID_AD5380_3,
+ ID_AD5380_5,
+ ID_AD5381_3,
+ ID_AD5381_5,
+ ID_AD5382_3,
+ ID_AD5382_5,
+ ID_AD5383_3,
+ ID_AD5383_5,
+ ID_AD5390_3,
+ ID_AD5390_5,
+ ID_AD5391_3,
+ ID_AD5391_5,
+ ID_AD5392_3,
+ ID_AD5392_5,
+};
+
+#define AD5380_CHANNEL(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
+ .scan_type = IIO_ST('u', (_bits), 16, 14 - (_bits)) \
+}
+
+static const struct ad5380_chip_info ad5380_chip_info_tbl[] = {
+ [ID_AD5380_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 1250000,
+ },
+ [ID_AD5380_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 40,
+ .int_vref = 2500000,
+ },
+ [ID_AD5381_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5381_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5382_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 1250000,
+ },
+ [ID_AD5382_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 32,
+ .int_vref = 2500000,
+ },
+ [ID_AD5383_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 1250000,
+ },
+ [ID_AD5383_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 32,
+ .int_vref = 2500000,
+ },
+ [ID_AD5390_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5390_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5391_3] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 1250000,
+ },
+ [ID_AD5391_5] = {
+ .channel_template = AD5380_CHANNEL(12),
+ .num_channels = 16,
+ .int_vref = 2500000,
+ },
+ [ID_AD5392_3] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 1250000,
+ },
+ [ID_AD5392_5] = {
+ .channel_template = AD5380_CHANNEL(14),
+ .num_channels = 8,
+ .int_vref = 2500000,
+ },
+};
+
+static ssize_t ad5380_read_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ return sprintf(buf, "%d\n", st->pwr_down);
+}
+
+static ssize_t ad5380_write_dac_powerdown(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ bool pwr_down;
+ int ret;
+
+ ret = strtobool(buf, &pwr_down);
+ if (ret)
+ return ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ if (pwr_down)
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_DOWN, 0);
+ else
+ ret = regmap_write(st->regmap, AD5380_REG_SF_PWR_UP, 0);
+
+ st->pwr_down = pwr_down;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown,
+ S_IRUGO | S_IWUSR,
+ ad5380_read_dac_powerdown,
+ ad5380_write_dac_powerdown, 0);
+
+static const char ad5380_powerdown_modes[][15] = {
+ [0] = "100kohm_to_gnd",
+ [1] = "three_state",
+};
+
+static ssize_t ad5380_read_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned int mode;
+ int ret;
+
+ ret = regmap_read(st->regmap, AD5380_REG_SF_CTRL, &mode);
+ if (ret)
+ return ret;
+
+ mode = (mode >> AD5380_CTRL_PWR_DOWN_MODE_OFFSET) & 1;
+
+ return sprintf(buf, "%s\n", ad5380_powerdown_modes[mode]);
+}
+
+static ssize_t ad5380_write_powerdown_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(ad5380_powerdown_modes); ++i) {
+ if (sysfs_streq(buf, ad5380_powerdown_modes[i]))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(ad5380_powerdown_modes))
+ return -EINVAL;
+
+ ret = regmap_update_bits(st->regmap, AD5380_REG_SF_CTRL,
+ 1 << AD5380_CTRL_PWR_DOWN_MODE_OFFSET,
+ i << AD5380_CTRL_PWR_DOWN_MODE_OFFSET);
+
+ return ret ? ret : len;
+}
+
+static IIO_DEVICE_ATTR(out_voltage_powerdown_mode,
+ S_IRUGO | S_IWUSR,
+ ad5380_read_powerdown_mode,
+ ad5380_write_powerdown_mode, 0);
+
+static IIO_CONST_ATTR(out_voltage_powerdown_mode_available,
+ "100kohm_to_gnd three_state");
+
+static struct attribute *ad5380_attributes[] = {
+ &iio_dev_attr_out_voltage_powerdown.dev_attr.attr,
+ &iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
+ &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ad5380_attribute_group = {
+ .attrs = ad5380_attributes,
+};
+
+static unsigned int ad5380_info_to_reg(struct iio_chan_spec const *chan,
+ long info)
+{
+ switch (info) {
+ case 0:
+ return AD5380_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5380_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5380_REG_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5380_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const unsigned int max_val = (1 << chan->scan_type.realbits);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ switch (info) {
+ case 0:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ ad5380_info_to_reg(chan, info),
+ val << chan->scan_type.shift);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += (1 << chan->scan_type.realbits) / 2;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return regmap_write(st->regmap,
+ AD5380_REG_OFFSET(chan->address),
+ val << chan->scan_type.shift);
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+static int ad5380_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ int ret;
+
+ switch (info) {
+ case 0:
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = regmap_read(st->regmap, ad5380_info_to_reg(chan, info),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = regmap_read(st->regmap, AD5380_REG_OFFSET(chan->address),
+ val);
+ if (ret)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ val -= (1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = ((2 * st->vref) >> chan->scan_type.realbits) * 100;
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5380_info = {
+ .read_raw = ad5380_read_raw,
+ .write_raw = ad5380_write_raw,
+ .attrs = &ad5380_attribute_group,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5380_alloc_channels(struct iio_dev *indio_dev)
+{
+ struct ad5380_state *st = iio_priv(indio_dev);
+ struct iio_chan_spec *channels;
+ unsigned int i;
+
+ channels = kcalloc(sizeof(struct iio_chan_spec),
+ st->chip_info->num_channels, GFP_KERNEL);
+
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < st->chip_info->num_channels; ++i) {
+ channels[i] = st->chip_info->channel_template;
+ channels[i].channel = i;
+ channels[i].address = i;
+ }
+
+ indio_dev->channels = channels;
+
+ return 0;
+}
+
+static int __devinit ad5380_probe(struct device *dev, struct regmap *regmap,
+ enum ad5380_type type, const char *name)
+{
+ struct iio_dev *indio_dev;
+ struct ad5380_state *st;
+ unsigned int ctrl = 0;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(dev, "Failed to allocate iio device\n");
+ ret = -ENOMEM;
+ goto error_regmap_exit;
+ }
+
+ st = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+
+ st->chip_info = &ad5380_chip_info_tbl[type];
+ st->regmap = regmap;
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = name;
+ indio_dev->info = &ad5380_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = st->chip_info->num_channels;
+
+ ret = ad5380_alloc_channels(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to allocate channel spec: %d\n", ret);
+ goto error_free;
+ }
+
+ if (st->chip_info->int_vref == 2500000)
+ ctrl |= AD5380_CTRL_INT_VREF_2V5;
+
+ st->vref_reg = regulator_get(dev, "vref");
+ if (!IS_ERR(st->vref_reg)) {
+ ret = regulator_enable(st->vref_reg);
+ if (ret) {
+ dev_err(dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ goto error_free_reg;
+ }
+
+ st->vref = regulator_get_voltage(st->vref_reg);
+ } else {
+ st->vref = st->chip_info->int_vref;
+ ctrl |= AD5380_CTRL_INT_VREF_EN;
+ }
+
+ ret = regmap_write(st->regmap, AD5380_REG_SF_CTRL, ctrl);
+ if (ret) {
+ dev_err(dev, "Failed to write to device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (!IS_ERR(st->vref_reg))
+ regulator_disable(st->vref_reg);
+error_free_reg:
+ if (!IS_ERR(st->vref_reg))
+ regulator_put(st->vref_reg);
+
+ kfree(indio_dev->channels);
+error_free:
+ iio_free_device(indio_dev);
+error_regmap_exit:
+ regmap_exit(regmap);
+
+ return ret;
+}
+
+static int __devexit ad5380_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct ad5380_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ kfree(indio_dev->channels);
+
+ if (!IS_ERR(st->vref_reg)) {
+ regulator_disable(st->vref_reg);
+ regulator_put(st->vref_reg);
+ }
+
+ regmap_exit(st->regmap);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static bool ad5380_reg_false(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static const struct regmap_config ad5380_regmap_config = {
+ .reg_bits = 10,
+ .val_bits = 14,
+
+ .max_register = AD5380_REG_DATA(40),
+ .cache_type = REGCACHE_RBTREE,
+
+ .volatile_reg = ad5380_reg_false,
+ .readable_reg = ad5380_reg_false,
+};
+
+#if IS_ENABLED(CONFIG_SPI_MASTER)
+
+static int __devinit ad5380_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+ struct regmap *regmap;
+
+ regmap = regmap_init_spi(spi, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&spi->dev, regmap, id->driver_data, id->name);
+}
+
+static int __devexit ad5380_spi_remove(struct spi_device *spi)
+{
+ return ad5380_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5380_spi_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5380_spi_ids);
+
+static struct spi_driver ad5380_spi_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_spi_probe,
+ .remove = __devexit_p(ad5380_spi_remove),
+ .id_table = ad5380_spi_ids,
+};
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return spi_register_driver(&ad5380_spi_driver);
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+ spi_unregister_driver(&ad5380_spi_driver);
+}
+
+#else
+
+static inline int ad5380_spi_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_spi_unregister_driver(void)
+{
+}
+
+#endif
+
+#if IS_ENABLED(CONFIG_I2C)
+
+static int __devinit ad5380_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+
+ regmap = regmap_init_i2c(i2c, &ad5380_regmap_config);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ return ad5380_probe(&i2c->dev, regmap, id->driver_data, id->name);
+}
+
+static int __devexit ad5380_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5380_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5380_i2c_ids[] = {
+ { "ad5380-3", ID_AD5380_3 },
+ { "ad5380-5", ID_AD5380_5 },
+ { "ad5381-3", ID_AD5381_3 },
+ { "ad5381-5", ID_AD5381_5 },
+ { "ad5382-3", ID_AD5382_3 },
+ { "ad5382-5", ID_AD5382_5 },
+ { "ad5383-3", ID_AD5383_3 },
+ { "ad5383-5", ID_AD5383_5 },
+ { "ad5384-3", ID_AD5380_3 },
+ { "ad5384-5", ID_AD5380_5 },
+ { "ad5390-3", ID_AD5390_3 },
+ { "ad5390-5", ID_AD5390_5 },
+ { "ad5391-3", ID_AD5391_3 },
+ { "ad5391-5", ID_AD5391_5 },
+ { "ad5392-3", ID_AD5392_3 },
+ { "ad5392-5", ID_AD5392_5 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, ad5380_i2c_ids);
+
+static struct i2c_driver ad5380_i2c_driver = {
+ .driver = {
+ .name = "ad5380",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5380_i2c_probe,
+ .remove = __devexit_p(ad5380_i2c_remove),
+ .id_table = ad5380_i2c_ids,
+};
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return i2c_add_driver(&ad5380_i2c_driver);
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+ i2c_del_driver(&ad5380_i2c_driver);
+}
+
+#else
+
+static inline int ad5380_i2c_register_driver(void)
+{
+ return 0;
+}
+
+static inline void ad5380_i2c_unregister_driver(void)
+{
+}
+
+#endif
+
+static int __init ad5380_spi_init(void)
+{
+ int ret;
+
+ ret = ad5380_spi_register_driver();
+ if (ret)
+ return ret;
+
+ ret = ad5380_i2c_register_driver();
+ if (ret) {
+ ad5380_spi_unregister_driver();
+ return ret;
+ }
+
+ return 0;
+}
+module_init(ad5380_spi_init);
+
+static void __exit ad5380_spi_exit(void)
+{
+ ad5380_i2c_unregister_driver();
+ ad5380_spi_unregister_driver();
+
+}
+module_exit(ad5380_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5380/81/82/83/84/90/91/92 DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5421.c b/drivers/staging/iio/dac/ad5421.c
new file mode 100644
index 0000000..71ee868
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5421.c
@@ -0,0 +1,555 @@
+/*
+ * AD5421 Digital to analog converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "../events.h"
+#include "dac.h"
+#include "ad5421.h"
+
+
+#define AD5421_REG_DAC_DATA 0x1
+#define AD5421_REG_CTRL 0x2
+#define AD5421_REG_OFFSET 0x3
+#define AD5421_REG_GAIN 0x4
+/* load dac and fault shared the same register number. Writing to it will cause
+ * a dac load command, reading from it will return the fault status register */
+#define AD5421_REG_LOAD_DAC 0x5
+#define AD5421_REG_FAULT 0x5
+#define AD5421_REG_FORCE_ALARM_CURRENT 0x6
+#define AD5421_REG_RESET 0x7
+#define AD5421_REG_START_CONVERSION 0x8
+#define AD5421_REG_NOOP 0x9
+
+#define AD5421_CTRL_WATCHDOG_DISABLE BIT(12)
+#define AD5421_CTRL_AUTO_FAULT_READBACK BIT(11)
+#define AD5421_CTRL_MIN_CURRENT BIT(9)
+#define AD5421_CTRL_ADC_SOURCE_TEMP BIT(8)
+#define AD5421_CTRL_ADC_ENABLE BIT(7)
+#define AD5421_CTRL_PWR_DOWN_INT_VREF BIT(6)
+
+#define AD5421_FAULT_SPI BIT(15)
+#define AD5421_FAULT_PEC BIT(14)
+#define AD5421_FAULT_OVER_CURRENT BIT(13)
+#define AD5421_FAULT_UNDER_CURRENT BIT(12)
+#define AD5421_FAULT_TEMP_OVER_140 BIT(11)
+#define AD5421_FAULT_TEMP_OVER_100 BIT(10)
+#define AD5421_FAULT_UNDER_VOLTAGE_6V BIT(9)
+#define AD5421_FAULT_UNDER_VOLTAGE_12V BIT(8)
+
+/* These bits will cause the fault pin to go high */
+#define AD5421_FAULT_TRIGGER_IRQ \
+ (AD5421_FAULT_SPI | AD5421_FAULT_PEC | AD5421_FAULT_OVER_CURRENT | \
+ AD5421_FAULT_UNDER_CURRENT | AD5421_FAULT_TEMP_OVER_140)
+
+/**
+ * struct ad5421_state - driver instance specific data
+ * @spi: spi_device
+ * @ctrl: control register cache
+ * @current_range: current range which the device is configured for
+ * @data: spi transfer buffers
+ * @fault_mask: software masking of events
+ */
+struct ad5421_state {
+ struct spi_device *spi;
+ unsigned int ctrl;
+ enum ad5421_current_range current_range;
+ unsigned int fault_mask;
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ u32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+static const struct iio_chan_spec ad5421_channels[] = {
+ {
+ .type = IIO_CURRENT,
+ .indexed = 1,
+ .output = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT |
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
+ .scan_type = IIO_ST('u', 16, 16, 0),
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING) |
+ IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_FALLING),
+ },
+ {
+ .type = IIO_TEMP,
+ .channel = -1,
+ .event_mask = IIO_EV_BIT(IIO_EV_TYPE_THRESH, IIO_EV_DIR_RISING),
+ },
+};
+
+static int ad5421_write_unlocked(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int val)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ return spi_write(st->spi, &st->data[0].d8[1], 3);
+}
+
+static int ad5421_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ ret = ad5421_write_unlocked(indio_dev, reg, val);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_read(struct iio_dev *indio_dev, unsigned int reg)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ struct spi_message m;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret >= 0)
+ ret = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
+ unsigned int clr)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int ret;
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->ctrl &= ~clr;
+ st->ctrl |= set;
+
+ ret = ad5421_write_unlocked(indio_dev, AD5421_REG_CTRL, st->ctrl);
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static irqreturn_t ad5421_fault_handler(int irq, void *data)
+{
+ struct iio_dev *indio_dev = data;
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int fault;
+ unsigned int old_fault = 0;
+ unsigned int events;
+
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+ if (!fault)
+ return IRQ_NONE;
+
+ /* If we had a fault, this might mean that the DAC has lost its state
+ * and has been reset. Make sure that the control register actually
+ * contains what we expect it to contain. Otherwise the watchdog might
+ * be enabled and we get watchdog timeout faults, which will render the
+ * DAC unusable. */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+
+ /* The fault pin stays high as long as a fault condition is present and
+ * it is not possible to mask fault conditions. For certain fault
+ * conditions for example like over-temperature it takes some time
+ * until the fault condition disappears. If we would exit the interrupt
+ * handler immediately after handling the event it would be entered
+ * again instantly. Thus we fall back to polling in case we detect that
+ * a interrupt condition is still present.
+ */
+ do {
+ /* 0xffff is a invalid value for the register and will only be
+ * read if there has been a communication error */
+ if (fault == 0xffff)
+ fault = 0;
+
+ /* we are only interested in new events */
+ events = (old_fault ^ fault) & fault;
+ events &= st->fault_mask;
+
+ if (events & AD5421_FAULT_OVER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_UNDER_CURRENT) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_CURRENT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_FALLING),
+ iio_get_time_ns());
+ }
+
+ if (events & AD5421_FAULT_TEMP_OVER_140) {
+ iio_push_event(indio_dev,
+ IIO_UNMOD_EVENT_CODE(IIO_TEMP,
+ 0,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_DIR_RISING),
+ iio_get_time_ns());
+ }
+
+ old_fault = fault;
+ fault = ad5421_read(indio_dev, AD5421_REG_FAULT);
+
+ /* still active? go to sleep for some time */
+ if (fault & AD5421_FAULT_TRIGGER_IRQ)
+ msleep(1000);
+
+ } while (fault & AD5421_FAULT_TRIGGER_IRQ);
+
+
+ return IRQ_HANDLED;
+}
+
+static void ad5421_get_current_min_max(struct ad5421_state *st,
+ unsigned int *min, unsigned int *max)
+{
+ /* The current range is configured using external pins, which are
+ * usually hard-wired and not run-time switchable. */
+ switch (st->current_range) {
+ case AD5421_CURRENT_RANGE_4mA_20mA:
+ *min = 4000;
+ *max = 20000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA8_21mA:
+ *min = 3800;
+ *max = 21000;
+ break;
+ case AD5421_CURRENT_RANGE_3mA2_24mA:
+ *min = 3200;
+ *max = 24000;
+ break;
+ default:
+ *min = 0;
+ *max = 1;
+ break;
+ }
+}
+
+static inline unsigned int ad5421_get_offset(struct ad5421_state *st)
+{
+ unsigned int min, max;
+
+ ad5421_get_current_min_max(st, &min, &max);
+ return (min * (1 << 16)) / (max - min);
+}
+
+static inline unsigned int ad5421_get_scale(struct ad5421_state *st)
+{
+ unsigned int min, max;
+
+ ad5421_get_current_min_max(st, &min, &max);
+ return ((max - min) * 1000) / (1 << 16);
+}
+
+static int ad5421_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long m)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ int ret;
+
+ if (chan->type != IIO_CURRENT)
+ return -EINVAL;
+
+ switch (m) {
+ case 0:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ *val2 = ad5421_get_scale(st);
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = ad5421_get_offset(st);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = ad5421_read(indio_dev, AD5421_REG_OFFSET);
+ if (ret < 0)
+ return ret;
+ *val = ret - 32768;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ ret = ad5421_read(indio_dev, AD5421_REG_GAIN);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ const unsigned int max_val = 1 << 16;
+
+ switch (mask) {
+ case 0:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_DAC_DATA, val);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ val += 32768;
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_OFFSET, val);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+
+ return ad5421_write(indio_dev, AD5421_REG_GAIN, val);
+ default:
+ break;
+ }
+
+ return -EINVAL;
+}
+
+static int ad5421_write_event_config(struct iio_dev *indio_dev,
+ u64 event_code, int state)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&indio_dev->mlock);
+ if (state)
+ st->fault_mask |= mask;
+ else
+ st->fault_mask &= ~mask;
+ mutex_unlock(&indio_dev->mlock);
+
+ return 0;
+}
+
+static int ad5421_read_event_config(struct iio_dev *indio_dev,
+ u64 event_code)
+{
+ struct ad5421_state *st = iio_priv(indio_dev);
+ unsigned int mask;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ if (IIO_EVENT_CODE_EXTRACT_DIR(event_code) ==
+ IIO_EV_DIR_RISING)
+ mask = AD5421_FAULT_OVER_CURRENT;
+ else
+ mask = AD5421_FAULT_UNDER_CURRENT;
+ break;
+ case IIO_TEMP:
+ mask = AD5421_FAULT_TEMP_OVER_140;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return (bool)(st->fault_mask & mask);
+}
+
+static int ad5421_read_event_value(struct iio_dev *indio_dev, u64 event_code,
+ int *val)
+{
+ int ret;
+
+ switch (IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(event_code)) {
+ case IIO_CURRENT:
+ ret = ad5421_read(indio_dev, AD5421_REG_DAC_DATA);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ break;
+ case IIO_TEMP:
+ *val = 140000;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info ad5421_info = {
+ .read_raw = ad5421_read_raw,
+ .write_raw = ad5421_write_raw,
+ .read_event_config = ad5421_read_event_config,
+ .write_event_config = ad5421_write_event_config,
+ .read_event_value = ad5421_read_event_value,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5421_probe(struct spi_device *spi)
+{
+ struct ad5421_platform_data *pdata = dev_get_platdata(&spi->dev);
+ struct iio_dev *indio_dev;
+ struct ad5421_state *st;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = "ad5421";
+ indio_dev->info = &ad5421_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = ad5421_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ad5421_channels);
+
+ st->ctrl = AD5421_CTRL_WATCHDOG_DISABLE |
+ AD5421_CTRL_AUTO_FAULT_READBACK;
+
+ if (pdata) {
+ st->current_range = pdata->current_range;
+ if (pdata->external_vref)
+ st->ctrl |= AD5421_CTRL_PWR_DOWN_INT_VREF;
+ } else {
+ st->current_range = AD5421_CURRENT_RANGE_4mA_20mA;
+ }
+
+ /* write initial ctrl register value */
+ ad5421_update_ctrl(indio_dev, 0, 0);
+
+ if (spi->irq) {
+ ret = request_threaded_irq(spi->irq,
+ NULL,
+ ad5421_fault_handler,
+ IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
+ "ad5421 fault",
+ indio_dev);
+ if (ret)
+ goto error_free;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_free_irq;
+ }
+
+ return 0;
+
+error_free_irq:
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5421_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+
+ iio_device_unregister(indio_dev);
+ if (spi->irq)
+ free_irq(spi->irq, indio_dev);
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static struct spi_driver ad5421_driver = {
+ .driver = {
+ .name = "ad5421",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5421_probe,
+ .remove = __devexit_p(ad5421_remove),
+};
+
+static __init int ad5421_init(void)
+{
+ return spi_register_driver(&ad5421_driver);
+}
+module_init(ad5421_init);
+
+static __exit void ad5421_exit(void)
+{
+ spi_unregister_driver(&ad5421_driver);
+}
+module_exit(ad5421_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5421 DAC");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad5421");
diff --git a/drivers/staging/iio/dac/ad5421.h b/drivers/staging/iio/dac/ad5421.h
new file mode 100644
index 0000000..cd2bb84
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5421.h
@@ -0,0 +1,32 @@
+#ifndef __IIO_DAC_AD5421_H__
+#define __IIO_DAC_AD5421_H__
+
+/*
+ * TODO: This file needs to go into include/linux/iio
+ */
+
+/**
+ * enum ad5421_current_range - Current range the AD5421 is configured for.
+ * @AD5421_CURRENT_RANGE_4mA_20mA: 4 mA to 20 mA (RANGE1,0 pins = 00)
+ * @AD5421_CURRENT_RANGE_3mA8_21mA: 3.8 mA to 21 mA (RANGE1,0 pins = x1)
+ * @AD5421_CURRENT_RANGE_3mA2_24mA: 3.2 mA to 24 mA (RANGE1,0 pins = 10)
+ */
+
+enum ad5421_current_range {
+ AD5421_CURRENT_RANGE_4mA_20mA,
+ AD5421_CURRENT_RANGE_3mA8_21mA,
+ AD5421_CURRENT_RANGE_3mA2_24mA,
+};
+
+/**
+ * struct ad5421_platform_data - AD5421 DAC driver platform data
+ * @external_vref: whether an external reference voltage is used or not
+ * @current_range: Current range the AD5421 is configured for
+ */
+
+struct ad5421_platform_data {
+ bool external_vref;
+ enum ad5421_current_range current_range;
+};
+
+#endif
diff --git a/drivers/staging/iio/dac/ad5446.c b/drivers/staging/iio/dac/ad5446.c
index e1c204d..693e748 100644
--- a/drivers/staging/iio/dac/ad5446.c
+++ b/drivers/staging/iio/dac/ad5446.c
@@ -26,19 +26,17 @@
static void ad5446_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(AD5446_LOAD |
- (val << st->chip_info->left_shift));
+ st->data.d16 = cpu_to_be16(AD5446_LOAD | val);
}
static void ad5542_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(val << st->chip_info->left_shift);
+ st->data.d16 = cpu_to_be16(val);
}
static void ad5620_store_sample(struct ad5446_state *st, unsigned val)
{
- st->data.d16 = cpu_to_be16(AD5620_LOAD |
- (val << st->chip_info->left_shift));
+ st->data.d16 = cpu_to_be16(AD5620_LOAD | val);
}
static void ad5660_store_sample(struct ad5446_state *st, unsigned val)
@@ -63,50 +61,6 @@ static void ad5660_store_pwr_down(struct ad5446_state *st, unsigned mode)
st->data.d24[2] = val & 0xFF;
}
-static ssize_t ad5446_write(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
- int ret;
- long val;
-
- ret = strict_strtol(buf, 10, &val);
- if (ret)
- goto error_ret;
-
- if (val > RES_MASK(st->chip_info->bits)) {
- ret = -EINVAL;
- goto error_ret;
- }
-
- mutex_lock(&indio_dev->mlock);
- st->cached_val = val;
- st->chip_info->store_sample(st, val);
- ret = spi_sync(st->spi, &st->msg);
- mutex_unlock(&indio_dev->mlock);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static IIO_DEV_ATTR_OUT_RAW(0, ad5446_write, 0);
-
-static ssize_t ad5446_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5446_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5446_show_scale, NULL, 0);
-
static ssize_t ad5446_write_powerdown_mode(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
@@ -189,22 +143,20 @@ static IIO_DEVICE_ATTR(out_voltage0_powerdown, S_IRUGO | S_IWUSR,
ad5446_write_dac_powerdown, 0);
static struct attribute *ad5446_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
NULL,
};
-static mode_t ad5446_attr_is_visible(struct kobject *kobj,
+static umode_t ad5446_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5446_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (!st->chip_info->store_pwr_down &&
(attr == &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr ||
@@ -223,121 +175,148 @@ static const struct attribute_group ad5446_attribute_group = {
.is_visible = ad5446_attr_is_visible,
};
+#define AD5446_CHANNEL(bits, storage, shift) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = 0, \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .scan_type = IIO_ST('u', (bits), (storage), (shift)) \
+}
+
static const struct ad5446_chip_info ad5446_chip_info_tbl[] = {
[ID_AD5444] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5446_store_sample,
},
[ID_AD5446] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5446_store_sample,
},
[ID_AD5541A] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5542A] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5543] = {
- .bits = 16,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5512A] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 4,
+ .channel = AD5446_CHANNEL(12, 16, 4),
.store_sample = ad5542_store_sample,
},
[ID_AD5553] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.store_sample = ad5542_store_sample,
},
[ID_AD5601] = {
- .bits = 8,
- .storagebits = 16,
- .left_shift = 6,
+ .channel = AD5446_CHANNEL(8, 16, 6),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5611] = {
- .bits = 10,
- .storagebits = 16,
- .left_shift = 4,
+ .channel = AD5446_CHANNEL(10, 16, 4),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5621] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.store_sample = ad5542_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_2500] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5620_1250] = {
- .bits = 12,
- .storagebits = 16,
- .left_shift = 2,
+ .channel = AD5446_CHANNEL(12, 16, 2),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_2500] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5640_1250] = {
- .bits = 14,
- .storagebits = 16,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(14, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5620_store_sample,
.store_pwr_down = ad5620_store_pwr_down,
},
[ID_AD5660_2500] = {
- .bits = 16,
- .storagebits = 24,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 2500,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
[ID_AD5660_1250] = {
- .bits = 16,
- .storagebits = 24,
- .left_shift = 0,
+ .channel = AD5446_CHANNEL(16, 16, 0),
.int_vref_mv = 1250,
.store_sample = ad5660_store_sample,
.store_pwr_down = ad5660_store_pwr_down,
},
};
+static int ad5446_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ }
+ return -EINVAL;
+}
+
+static int ad5446_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5446_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ val <<= chan->scan_type.shift;
+ mutex_lock(&indio_dev->mlock);
+ st->cached_val = val;
+ st->chip_info->store_sample(st, val);
+ ret = spi_sync(st->spi, &st->msg);
+ mutex_unlock(&indio_dev->mlock);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
static const struct iio_info ad5446_info = {
+ .read_raw = ad5446_read_raw,
+ .write_raw = ad5446_write_raw,
.attrs = &ad5446_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -376,11 +355,13 @@ static int __devinit ad5446_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5446_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = &st->chip_info->channel;
+ indio_dev->num_channels = 1;
/* Setup default message */
st->xfer.tx_buf = &st->data;
- st->xfer.len = st->chip_info->storagebits / 8;
+ st->xfer.len = st->chip_info->channel.scan_type.storagebits / 8;
spi_message_init(&st->msg);
spi_message_add_tail(&st->xfer, &st->msg);
@@ -454,31 +435,19 @@ static const struct spi_device_id ad5446_id[] = {
{"ad5660-1250", ID_AD5660_1250},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5446_id);
static struct spi_driver ad5446_driver = {
.driver = {
.name = "ad5446",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad5446_probe,
.remove = __devexit_p(ad5446_remove),
.id_table = ad5446_id,
};
-
-static int __init ad5446_init(void)
-{
- return spi_register_driver(&ad5446_driver);
-}
-module_init(ad5446_init);
-
-static void __exit ad5446_exit(void)
-{
- spi_unregister_driver(&ad5446_driver);
-}
-module_exit(ad5446_exit);
+module_spi_driver(ad5446_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5444/AD5446 DAC");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad5446");
diff --git a/drivers/staging/iio/dac/ad5446.h b/drivers/staging/iio/dac/ad5446.h
index 7118d65..4ea3476 100644
--- a/drivers/staging/iio/dac/ad5446.h
+++ b/drivers/staging/iio/dac/ad5446.h
@@ -25,8 +25,6 @@
#define AD5660_PWRDWN_100k (0x2 << 16) /* Power-down: 100kOhm to GND */
#define AD5660_PWRDWN_TRISTATE (0x3 << 16) /* Power-down: Three-state */
-#define RES_MASK(bits) ((1 << (bits)) - 1)
-
#define MODE_PWRDWN_1k 0x1
#define MODE_PWRDWN_100k 0x2
#define MODE_PWRDWN_TRISTATE 0x3
@@ -62,18 +60,14 @@ struct ad5446_state {
/**
* struct ad5446_chip_info - chip specific information
- * @bits: accuracy of the DAC in bits
- * @storagebits: number of bits written to the DAC
- * @left_shift: number of bits the datum must be shifted
+ * @channel: channel spec for the DAC
* @int_vref_mv: AD5620/40/60: the internal reference voltage
* @store_sample: chip specific helper function to store the datum
* @store_sample: chip specific helper function to store the powerpown cmd
*/
struct ad5446_chip_info {
- u8 bits;
- u8 storagebits;
- u8 left_shift;
+ struct iio_chan_spec channel;
u16 int_vref_mv;
void (*store_sample) (struct ad5446_state *st, unsigned val);
void (*store_pwr_down) (struct ad5446_state *st, unsigned mode);
diff --git a/drivers/staging/iio/dac/ad5504.c b/drivers/staging/iio/dac/ad5504.c
index 60dd640..bc17205 100644
--- a/drivers/staging/iio/dac/ad5504.c
+++ b/drivers/staging/iio/dac/ad5504.c
@@ -18,9 +18,27 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "dac.h"
#include "ad5504.h"
+#define AD5504_CHANNEL(_chan) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = AD5504_ADDR_DAC(_chan), \
+ .scan_type = IIO_ST('u', 12, 16, 0), \
+}
+
+static const struct iio_chan_spec ad5504_channels[] = {
+ AD5504_CHANNEL(0),
+ AD5504_CHANNEL(1),
+ AD5504_CHANNEL(2),
+ AD5504_CHANNEL(3),
+};
+
static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
{
u16 tmp = cpu_to_be16(AD5504_CMD_WRITE |
@@ -30,13 +48,14 @@ static int ad5504_spi_write(struct spi_device *spi, u8 addr, u16 val)
return spi_write(spi, (u8 *)&tmp, 2);
}
-static int ad5504_spi_read(struct spi_device *spi, u8 addr, u16 *val)
+static int ad5504_spi_read(struct spi_device *spi, u8 addr)
{
u16 tmp = cpu_to_be16(AD5504_CMD_READ | AD5504_ADDR(addr));
+ u16 val;
int ret;
struct spi_transfer t = {
.tx_buf = &tmp,
- .rx_buf = val,
+ .rx_buf = &val,
.len = 2,
};
struct spi_message m;
@@ -45,44 +64,61 @@ static int ad5504_spi_read(struct spi_device *spi, u8 addr, u16 *val)
spi_message_add_tail(&t, &m);
ret = spi_sync(spi, &m);
- *val = be16_to_cpu(*val) & AD5504_RES_MASK;
+ if (ret < 0)
+ return ret;
- return ret;
+ return be16_to_cpu(val) & AD5504_RES_MASK;
}
-static ssize_t ad5504_write_dac(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ad5504_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5504_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- long readin;
+ unsigned long scale_uv;
int ret;
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
+ switch (m) {
+ case 0:
+ ret = ad5504_spi_read(st->spi, chan->address);
+ if (ret < 0)
+ return ret;
- ret = ad5504_spi_write(st->spi, this_attr->address, readin);
- return ret ? ret : len;
+ *val = ret;
+
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+
+ }
+ return -EINVAL;
}
-static ssize_t ad5504_read_dac(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+static int ad5504_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5504_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- u16 val;
- ret = ad5504_spi_read(st->spi, this_attr->address, &val);
- if (ret)
- return ret;
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ad5504_spi_write(st->spi, chan->address, val);
+ default:
+ ret = -EINVAL;
+ }
- return sprintf(buf, "%d\n", val);
+ return -EINVAL;
}
static ssize_t ad5504_read_powerdown_mode(struct device *dev,
@@ -157,32 +193,6 @@ static ssize_t ad5504_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static ssize_t ad5504_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5504_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> AD5505_BITS;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5504_show_scale, NULL, 0);
-
-#define IIO_DEV_ATTR_OUT_RW_RAW(_num, _show, _store, _addr) \
- IIO_DEVICE_ATTR(out_voltage##_num##_raw, \
- S_IRUGO | S_IWUSR, _show, _store, _addr)
-
-static IIO_DEV_ATTR_OUT_RW_RAW(0, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC0);
-static IIO_DEV_ATTR_OUT_RW_RAW(1, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC1);
-static IIO_DEV_ATTR_OUT_RW_RAW(2, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC2);
-static IIO_DEV_ATTR_OUT_RW_RAW(3, ad5504_read_dac,
- ad5504_write_dac, AD5504_ADDR_DAC3);
-
static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5504_read_powerdown_mode,
ad5504_write_powerdown_mode, 0);
@@ -203,17 +213,12 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5504_read_dac_powerdown,
ad5504_write_dac_powerdown, 3);
static struct attribute *ad5504_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -222,11 +227,9 @@ static const struct attribute_group ad5504_attribute_group = {
};
static struct attribute *ad5501_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -261,12 +264,16 @@ static irqreturn_t ad5504_event_handler(int irq, void *private)
}
static const struct iio_info ad5504_info = {
+ .write_raw = ad5504_write_raw,
+ .read_raw = ad5504_read_raw,
.attrs = &ad5504_attribute_group,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
};
static const struct iio_info ad5501_info = {
+ .write_raw = ad5504_write_raw,
+ .read_raw = ad5504_read_raw,
.attrs = &ad5501_attribute_group,
.event_attrs = &ad5504_ev_attribute_group,
.driver_module = THIS_MODULE,
@@ -307,10 +314,14 @@ static int __devinit ad5504_probe(struct spi_device *spi)
st->spi = spi;
indio_dev->dev.parent = &spi->dev;
indio_dev->name = spi_get_device_id(st->spi)->name;
- if (spi_get_device_id(st->spi)->driver_data == ID_AD5501)
+ if (spi_get_device_id(st->spi)->driver_data == ID_AD5501) {
indio_dev->info = &ad5501_info;
- else
+ indio_dev->num_channels = 1;
+ } else {
indio_dev->info = &ad5504_info;
+ indio_dev->num_channels = 4;
+ }
+ indio_dev->channels = ad5504_channels;
indio_dev->modes = INDIO_DIRECT_MODE;
if (spi->irq) {
@@ -367,6 +378,7 @@ static const struct spi_device_id ad5504_id[] = {
{"ad5501", ID_AD5501},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5504_id);
static struct spi_driver ad5504_driver = {
.driver = {
@@ -377,18 +389,7 @@ static struct spi_driver ad5504_driver = {
.remove = __devexit_p(ad5504_remove),
.id_table = ad5504_id,
};
-
-static __init int ad5504_spi_init(void)
-{
- return spi_register_driver(&ad5504_driver);
-}
-module_init(ad5504_spi_init);
-
-static __exit void ad5504_spi_exit(void)
-{
- spi_unregister_driver(&ad5504_driver);
-}
-module_exit(ad5504_spi_exit);
+module_spi_driver(ad5504_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5501/AD5501 DAC");
diff --git a/drivers/staging/iio/dac/ad5504.h b/drivers/staging/iio/dac/ad5504.h
index 85beb1d..afe0952 100644
--- a/drivers/staging/iio/dac/ad5504.h
+++ b/drivers/staging/iio/dac/ad5504.h
@@ -18,10 +18,7 @@
/* Registers */
#define AD5504_ADDR_NOOP 0
-#define AD5504_ADDR_DAC0 1
-#define AD5504_ADDR_DAC1 2
-#define AD5504_ADDR_DAC2 3
-#define AD5504_ADDR_DAC3 4
+#define AD5504_ADDR_DAC(x) ((x) + 1)
#define AD5504_ADDR_ALL_DAC 5
#define AD5504_ADDR_CTRL 7
diff --git a/drivers/staging/iio/dac/ad5624r.h b/drivers/staging/iio/dac/ad5624r.h
index b71c6a0..5dca302 100644
--- a/drivers/staging/iio/dac/ad5624r.h
+++ b/drivers/staging/iio/dac/ad5624r.h
@@ -32,12 +32,12 @@
/**
* struct ad5624r_chip_info - chip specific information
- * @bits: accuracy of the DAC in bits
+ * @channels: channel spec for the DAC
* @int_vref_mv: AD5620/40/60: the internal reference voltage
*/
struct ad5624r_chip_info {
- u8 bits;
+ const struct iio_chan_spec *channels;
u16 int_vref_mv;
};
diff --git a/drivers/staging/iio/dac/ad5624r_spi.c b/drivers/staging/iio/dac/ad5624r_spi.c
index 284d879..10c7484 100644
--- a/drivers/staging/iio/dac/ad5624r_spi.c
+++ b/drivers/staging/iio/dac/ad5624r_spi.c
@@ -21,29 +21,51 @@
#include "dac.h"
#include "ad5624r.h"
+#define AD5624R_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
+ .address = (_chan), \
+ .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)), \
+}
+
+#define DECLARE_AD5624R_CHANNELS(_name, _bits) \
+ const struct iio_chan_spec _name##_channels[] = { \
+ AD5624R_CHANNEL(0, _bits), \
+ AD5624R_CHANNEL(1, _bits), \
+ AD5624R_CHANNEL(2, _bits), \
+ AD5624R_CHANNEL(3, _bits), \
+}
+
+static DECLARE_AD5624R_CHANNELS(ad5624r, 12);
+static DECLARE_AD5624R_CHANNELS(ad5644r, 14);
+static DECLARE_AD5624R_CHANNELS(ad5664r, 16);
+
static const struct ad5624r_chip_info ad5624r_chip_info_tbl[] = {
[ID_AD5624R3] = {
- .bits = 12,
- .int_vref_mv = 1250,
- },
- [ID_AD5644R3] = {
- .bits = 14,
- .int_vref_mv = 1250,
- },
- [ID_AD5664R3] = {
- .bits = 16,
+ .channels = ad5624r_channels,
.int_vref_mv = 1250,
},
[ID_AD5624R5] = {
- .bits = 12,
+ .channels = ad5624r_channels,
.int_vref_mv = 2500,
},
+ [ID_AD5644R3] = {
+ .channels = ad5644r_channels,
+ .int_vref_mv = 1250,
+ },
[ID_AD5644R5] = {
- .bits = 14,
+ .channels = ad5644r_channels,
.int_vref_mv = 2500,
},
+ [ID_AD5664R3] = {
+ .channels = ad5664r_channels,
+ .int_vref_mv = 1250,
+ },
[ID_AD5664R5] = {
- .bits = 16,
+ .channels = ad5664r_channels,
.int_vref_mv = 2500,
},
};
@@ -70,24 +92,49 @@ static int ad5624r_spi_write(struct spi_device *spi,
return spi_write(spi, msg, 3);
}
-static ssize_t ad5624r_write_dac(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
+static int ad5624r_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val,
+ int *val2,
+ long m)
{
- long readin;
- int ret;
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad5624r_state *st = iio_priv(indio_dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
+ unsigned long scale_uv;
- ret = strict_strtol(buf, 10, &readin);
- if (ret)
- return ret;
+ switch (m) {
+ case IIO_CHAN_INFO_SCALE:
+ scale_uv = (st->vref_mv * 1000) >> chan->scan_type.realbits;
+ *val = scale_uv / 1000;
+ *val2 = (scale_uv % 1000) * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
- ret = ad5624r_spi_write(st->us, AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
- this_attr->address, readin,
- st->chip_info->bits);
- return ret ? ret : len;
+ }
+ return -EINVAL;
+}
+
+static int ad5624r_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
+{
+ struct ad5624r_state *st = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case 0:
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ return ad5624r_spi_write(st->us,
+ AD5624R_CMD_WRITE_INPUT_N_UPDATE_N,
+ chan->address, val,
+ chan->scan_type.shift);
+ default:
+ ret = -EINVAL;
+ }
+
+ return -EINVAL;
}
static ssize_t ad5624r_read_powerdown_mode(struct device *dev,
@@ -161,24 +208,6 @@ static ssize_t ad5624r_write_dac_powerdown(struct device *dev,
return ret ? ret : len;
}
-static ssize_t ad5624r_show_scale(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct ad5624r_state *st = iio_priv(indio_dev);
- /* Corresponds to Vref / 2^(bits) */
- unsigned int scale_uv = (st->vref_mv * 1000) >> st->chip_info->bits;
-
- return sprintf(buf, "%d.%03d\n", scale_uv / 1000, scale_uv % 1000);
-}
-static IIO_DEVICE_ATTR(out_voltage_scale, S_IRUGO, ad5624r_show_scale, NULL, 0);
-
-static IIO_DEV_ATTR_OUT_RAW(0, ad5624r_write_dac, AD5624R_ADDR_DAC0);
-static IIO_DEV_ATTR_OUT_RAW(1, ad5624r_write_dac, AD5624R_ADDR_DAC1);
-static IIO_DEV_ATTR_OUT_RAW(2, ad5624r_write_dac, AD5624R_ADDR_DAC2);
-static IIO_DEV_ATTR_OUT_RAW(3, ad5624r_write_dac, AD5624R_ADDR_DAC3);
-
static IIO_DEVICE_ATTR(out_voltage_powerdown_mode, S_IRUGO |
S_IWUSR, ad5624r_read_powerdown_mode,
ad5624r_write_powerdown_mode, 0);
@@ -200,17 +229,12 @@ static IIO_DEV_ATTR_DAC_POWERDOWN(3, ad5624r_read_dac_powerdown,
ad5624r_write_dac_powerdown, 3);
static struct attribute *ad5624r_attributes[] = {
- &iio_dev_attr_out_voltage0_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage1_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage2_raw.dev_attr.attr,
- &iio_dev_attr_out_voltage3_raw.dev_attr.attr,
&iio_dev_attr_out_voltage0_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage1_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage2_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage3_powerdown.dev_attr.attr,
&iio_dev_attr_out_voltage_powerdown_mode.dev_attr.attr,
&iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr,
- &iio_dev_attr_out_voltage_scale.dev_attr.attr,
NULL,
};
@@ -219,6 +243,8 @@ static const struct attribute_group ad5624r_attribute_group = {
};
static const struct iio_info ad5624r_info = {
+ .write_raw = ad5624r_write_raw,
+ .read_raw = ad5624r_read_raw,
.attrs = &ad5624r_attribute_group,
.driver_module = THIS_MODULE,
};
@@ -259,6 +285,8 @@ static int __devinit ad5624r_probe(struct spi_device *spi)
indio_dev->name = spi_get_device_id(spi)->name;
indio_dev->info = &ad5624r_info;
indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = st->chip_info->channels;
+ indio_dev->num_channels = AD5624R_DAC_CHANNELS;
ret = ad5624r_spi_write(spi, AD5624R_CMD_INTERNAL_REFER_SETUP, 0,
!!voltage_uv, 16);
@@ -307,6 +335,7 @@ static const struct spi_device_id ad5624r_id[] = {
{"ad5664r5", ID_AD5664R5},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5624r_id);
static struct spi_driver ad5624r_driver = {
.driver = {
@@ -317,18 +346,7 @@ static struct spi_driver ad5624r_driver = {
.remove = __devexit_p(ad5624r_remove),
.id_table = ad5624r_id,
};
-
-static __init int ad5624r_spi_init(void)
-{
- return spi_register_driver(&ad5624r_driver);
-}
-module_init(ad5624r_spi_init);
-
-static __exit void ad5624r_spi_exit(void)
-{
- spi_unregister_driver(&ad5624r_driver);
-}
-module_exit(ad5624r_spi_exit);
+module_spi_driver(ad5624r_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD5624/44/64R DAC spi driver");
diff --git a/drivers/staging/iio/dac/ad5686.c b/drivers/staging/iio/dac/ad5686.c
index 974c6f5..ce2d619 100644
--- a/drivers/staging/iio/dac/ad5686.c
+++ b/drivers/staging/iio/dac/ad5686.c
@@ -99,7 +99,7 @@ enum ad5686_supported_device_ids {
.indexed = 1, \
.output = 1, \
.channel = chan, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = AD5686_ADDR_DAC(chan), \
.scan_type = IIO_ST('u', bits, 16, shift) \
}
@@ -306,7 +306,7 @@ static int ad5686_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
scale_uv = (st->vref_mv * 100000)
>> (chan->scan_type.realbits);
*val = scale_uv / 100000;
@@ -437,6 +437,7 @@ static const struct spi_device_id ad5686_id[] = {
{"ad5686", ID_AD5686},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5686_id);
static struct spi_driver ad5686_driver = {
.driver = {
@@ -447,18 +448,7 @@ static struct spi_driver ad5686_driver = {
.remove = __devexit_p(ad5686_remove),
.id_table = ad5686_id,
};
-
-static __init int ad5686_spi_init(void)
-{
- return spi_register_driver(&ad5686_driver);
-}
-module_init(ad5686_spi_init);
-
-static __exit void ad5686_spi_exit(void)
-{
- spi_unregister_driver(&ad5686_driver);
-}
-module_exit(ad5686_spi_exit);
+module_spi_driver(ad5686_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC");
diff --git a/drivers/staging/iio/dac/ad5764.c b/drivers/staging/iio/dac/ad5764.c
new file mode 100644
index 0000000..ff91480
--- /dev/null
+++ b/drivers/staging/iio/dac/ad5764.c
@@ -0,0 +1,393 @@
+/*
+ * Analog devices AD5764, AD5764R, AD5744, AD5744R quad-channel
+ * Digital to Analog Converters driver
+ *
+ * Copyright 2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+#include <linux/regulator/consumer.h>
+
+#include "../iio.h"
+#include "../sysfs.h"
+#include "dac.h"
+
+#define AD5764_REG_SF_NOP 0x0
+#define AD5764_REG_SF_CONFIG 0x1
+#define AD5764_REG_SF_CLEAR 0x4
+#define AD5764_REG_SF_LOAD 0x5
+#define AD5764_REG_DATA(x) ((2 << 3) | (x))
+#define AD5764_REG_COARSE_GAIN(x) ((3 << 3) | (x))
+#define AD5764_REG_FINE_GAIN(x) ((4 << 3) | (x))
+#define AD5764_REG_OFFSET(x) ((5 << 3) | (x))
+
+#define AD5764_NUM_CHANNELS 4
+
+/**
+ * struct ad5764_chip_info - chip specific information
+ * @int_vref: Value of the internal reference voltage in uV - 0 if external
+ * reference voltage is used
+ * @channel channel specification
+*/
+
+struct ad5764_chip_info {
+ unsigned long int_vref;
+ const struct iio_chan_spec *channels;
+};
+
+/**
+ * struct ad5764_state - driver instance specific data
+ * @spi: spi_device
+ * @chip_info: chip info
+ * @vref_reg: vref supply regulators
+ * @data: spi transfer buffers
+ */
+
+struct ad5764_state {
+ struct spi_device *spi;
+ const struct ad5764_chip_info *chip_info;
+ struct regulator_bulk_data vref_reg[2];
+
+ /*
+ * DMA (thus cache coherency maintenance) requires the
+ * transfer buffers to live in their own cache lines.
+ */
+ union {
+ __be32 d32;
+ u8 d8[4];
+ } data[2] ____cacheline_aligned;
+};
+
+enum ad5764_type {
+ ID_AD5744,
+ ID_AD5744R,
+ ID_AD5764,
+ ID_AD5764R,
+};
+
+#define AD5764_CHANNEL(_chan, _bits) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (_chan), \
+ .address = (_chan), \
+ .info_mask = IIO_CHAN_INFO_OFFSET_SHARED_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT, \
+ .scan_type = IIO_ST('u', (_bits), 16, 16 - (_bits)) \
+}
+
+#define DECLARE_AD5764_CHANNELS(_name, _bits) \
+const struct iio_chan_spec _name##_channels[] = { \
+ AD5764_CHANNEL(0, (_bits)), \
+ AD5764_CHANNEL(1, (_bits)), \
+ AD5764_CHANNEL(2, (_bits)), \
+ AD5764_CHANNEL(3, (_bits)), \
+};
+
+static DECLARE_AD5764_CHANNELS(ad5764, 16);
+static DECLARE_AD5764_CHANNELS(ad5744, 14);
+
+static const struct ad5764_chip_info ad5764_chip_infos[] = {
+ [ID_AD5744] = {
+ .int_vref = 0,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5744R] = {
+ .int_vref = 5000000,
+ .channels = ad5744_channels,
+ },
+ [ID_AD5764] = {
+ .int_vref = 0,
+ .channels = ad5764_channels,
+ },
+ [ID_AD5764R] = {
+ .int_vref = 5000000,
+ .channels = ad5764_channels,
+ },
+};
+
+static int ad5764_write(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&indio_dev->mlock);
+ st->data[0].d32 = cpu_to_be32((reg << 16) | val);
+
+ ret = spi_write(st->spi, &st->data[0].d8[1], 3);
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_read(struct iio_dev *indio_dev, unsigned int reg,
+ unsigned int *val)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ struct spi_message m;
+ int ret;
+ struct spi_transfer t[] = {
+ {
+ .tx_buf = &st->data[0].d8[1],
+ .len = 3,
+ .cs_change = 1,
+ }, {
+ .rx_buf = &st->data[1].d8[1],
+ .len = 3,
+ },
+ };
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t[0], &m);
+ spi_message_add_tail(&t[1], &m);
+
+ mutex_lock(&indio_dev->mlock);
+
+ st->data[0].d32 = cpu_to_be32((1 << 23) | (reg << 16));
+
+ ret = spi_sync(st->spi, &m);
+ if (ret >= 0)
+ *val = be32_to_cpu(st->data[1].d32) & 0xffff;
+
+ mutex_unlock(&indio_dev->mlock);
+
+ return ret;
+}
+
+static int ad5764_chan_info_to_reg(struct iio_chan_spec const *chan, long info)
+{
+ switch (info) {
+ case 0:
+ return AD5764_REG_DATA(chan->address);
+ case IIO_CHAN_INFO_CALIBBIAS:
+ return AD5764_REG_OFFSET(chan->address);
+ case IIO_CHAN_INFO_CALIBSCALE:
+ return AD5764_REG_FINE_GAIN(chan->address);
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int ad5764_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long info)
+{
+ const int max_val = (1 << chan->scan_type.realbits);
+ unsigned int reg;
+
+ switch (info) {
+ case 0:
+ if (val >= max_val || val < 0)
+ return -EINVAL;
+ val <<= chan->scan_type.shift;
+ break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ if (val >= 128 || val < -128)
+ return -EINVAL;
+ break;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ if (val >= 32 || val < -32)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ reg = ad5764_chan_info_to_reg(chan, info);
+ return ad5764_write(indio_dev, reg, (u16)val);
+}
+
+static int ad5764_get_channel_vref(struct ad5764_state *st,
+ unsigned int channel)
+{
+ if (st->chip_info->int_vref)
+ return st->chip_info->int_vref;
+ else
+ return regulator_get_voltage(st->vref_reg[channel / 2].consumer);
+}
+
+static int ad5764_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val, int *val2, long info)
+{
+ struct ad5764_state *st = iio_priv(indio_dev);
+ unsigned long scale_uv;
+ unsigned int reg;
+ int vref;
+ int ret;
+
+ switch (info) {
+ case 0:
+ reg = AD5764_REG_DATA(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val >>= chan->scan_type.shift;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ reg = AD5764_REG_OFFSET(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 7);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_CALIBSCALE:
+ reg = AD5764_REG_FINE_GAIN(chan->address);
+ ret = ad5764_read(indio_dev, reg, val);
+ if (ret < 0)
+ return ret;
+ *val = sign_extend32(*val, 5);
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ /* vout = 4 * vref + ((dac_code / 65535) - 0.5) */
+ vref = ad5764_get_channel_vref(st, chan->channel);
+ if (vref < 0)
+ return vref;
+
+ scale_uv = (vref * 4 * 100) >> chan->scan_type.realbits;
+ *val = scale_uv / 100000;
+ *val2 = (scale_uv % 100000) * 10;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_OFFSET:
+ *val = -(1 << chan->scan_type.realbits) / 2;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5764_info = {
+ .read_raw = ad5764_read_raw,
+ .write_raw = ad5764_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int __devinit ad5764_probe(struct spi_device *spi)
+{
+ enum ad5764_type type = spi_get_device_id(spi)->driver_data;
+ struct iio_dev *indio_dev;
+ struct ad5764_state *st;
+ int ret;
+
+ indio_dev = iio_allocate_device(sizeof(*st));
+ if (indio_dev == NULL) {
+ dev_err(&spi->dev, "Failed to allocate iio device\n");
+ return -ENOMEM;
+ }
+
+ st = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+
+ st->spi = spi;
+ st->chip_info = &ad5764_chip_infos[type];
+
+ indio_dev->dev.parent = &spi->dev;
+ indio_dev->name = spi_get_device_id(spi)->name;
+ indio_dev->info = &ad5764_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->num_channels = AD5764_NUM_CHANNELS;
+ indio_dev->channels = st->chip_info->channels;
+
+ if (st->chip_info->int_vref == 0) {
+ st->vref_reg[0].supply = "vrefAB";
+ st->vref_reg[1].supply = "vrefCD";
+
+ ret = regulator_bulk_get(&st->spi->dev,
+ ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to request vref regulators: %d\n",
+ ret);
+ goto error_free;
+ }
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(st->vref_reg),
+ st->vref_reg);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to enable vref regulators: %d\n",
+ ret);
+ goto error_free_reg;
+ }
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&spi->dev, "Failed to register iio device: %d\n", ret);
+ goto error_disable_reg;
+ }
+
+ return 0;
+
+error_disable_reg:
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+error_free_reg:
+ if (st->chip_info->int_vref == 0)
+ regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+error_free:
+ iio_free_device(indio_dev);
+
+ return ret;
+}
+
+static int __devexit ad5764_remove(struct spi_device *spi)
+{
+ struct iio_dev *indio_dev = spi_get_drvdata(spi);
+ struct ad5764_state *st = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ if (st->chip_info->int_vref == 0) {
+ regulator_bulk_disable(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ regulator_bulk_free(ARRAY_SIZE(st->vref_reg), st->vref_reg);
+ }
+
+ iio_free_device(indio_dev);
+
+ return 0;
+}
+
+static const struct spi_device_id ad5764_ids[] = {
+ { "ad5744", ID_AD5744 },
+ { "ad5744r", ID_AD5744R },
+ { "ad5764", ID_AD5764 },
+ { "ad5764r", ID_AD5764R },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ad5764_ids);
+
+static struct spi_driver ad5764_driver = {
+ .driver = {
+ .name = "ad5764",
+ .owner = THIS_MODULE,
+ },
+ .probe = ad5764_probe,
+ .remove = __devexit_p(ad5764_remove),
+ .id_table = ad5764_ids,
+};
+
+static int __init ad5764_spi_init(void)
+{
+ return spi_register_driver(&ad5764_driver);
+}
+module_init(ad5764_spi_init);
+
+static void __exit ad5764_spi_exit(void)
+{
+ spi_unregister_driver(&ad5764_driver);
+}
+module_exit(ad5764_spi_exit);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD5744/AD5744R/AD5764/AD5764R DAC");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/ad5791.c b/drivers/staging/iio/dac/ad5791.c
index 6fbca8d..ac45636 100644
--- a/drivers/staging/iio/dac/ad5791.c
+++ b/drivers/staging/iio/dac/ad5791.c
@@ -1,5 +1,6 @@
/*
- * AD5760, AD5780, AD5781, AD5791 Voltage Output Digital to Analog Converter
+ * AD5760, AD5780, AD5781, AD5790, AD5791 Voltage Output Digital to Analog
+ * Converter
*
* Copyright 2011 Analog Devices Inc.
*
@@ -77,8 +78,8 @@ static int ad5791_spi_read(struct spi_device *spi, u8 addr, u32 *val)
.indexed = 1, \
.address = AD5791_ADDR_DAC0, \
.channel = 0, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED) | \
- (1 << IIO_CHAN_INFO_OFFSET_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT | \
+ IIO_CHAN_INFO_OFFSET_SHARED_BIT, \
.scan_type = IIO_ST('u', bits, 24, shift) \
}
@@ -237,11 +238,11 @@ static int ad5791_read_raw(struct iio_dev *indio_dev,
*val &= AD5791_DAC_MASK;
*val >>= chan->scan_type.shift;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = (((u64)st->vref_mv) * 1000000ULL) >> chan->scan_type.realbits;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_OFFSET_SHARED):
+ case IIO_CHAN_INFO_OFFSET:
val64 = (((u64)st->vref_neg_mv) << chan->scan_type.realbits);
do_div(val64, st->vref_mv);
*val = -val64;
@@ -397,9 +398,11 @@ static const struct spi_device_id ad5791_id[] = {
{"ad5760", ID_AD5760},
{"ad5780", ID_AD5780},
{"ad5781", ID_AD5781},
+ {"ad5790", ID_AD5791},
{"ad5791", ID_AD5791},
{}
};
+MODULE_DEVICE_TABLE(spi, ad5791_id);
static struct spi_driver ad5791_driver = {
.driver = {
@@ -410,19 +413,8 @@ static struct spi_driver ad5791_driver = {
.remove = __devexit_p(ad5791_remove),
.id_table = ad5791_id,
};
-
-static __init int ad5791_spi_init(void)
-{
- return spi_register_driver(&ad5791_driver);
-}
-module_init(ad5791_spi_init);
-
-static __exit void ad5791_spi_exit(void)
-{
- spi_unregister_driver(&ad5791_driver);
-}
-module_exit(ad5791_spi_exit);
+module_spi_driver(ad5791_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
-MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5791 DAC");
+MODULE_DESCRIPTION("Analog Devices AD5760/AD5780/AD5781/AD5790/AD5791 DAC");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index adfbd20..a4df6d7 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -280,20 +280,8 @@ static struct i2c_driver max517_driver = {
.resume = max517_resume,
.id_table = max517_id,
};
-
-static int __init max517_init(void)
-{
- return i2c_add_driver(&max517_driver);
-}
-
-static void __exit max517_exit(void)
-{
- i2c_del_driver(&max517_driver);
-}
+module_i2c_driver(max517_driver);
MODULE_AUTHOR("Roland Stigge <stigge@antcom.de>");
MODULE_DESCRIPTION("MAX517/MAX518/MAX519 8-bit DAC");
MODULE_LICENSE("GPL");
-
-module_init(max517_init);
-module_exit(max517_exit);
diff --git a/drivers/staging/iio/dds/ad5930.c b/drivers/staging/iio/dds/ad5930.c
index f5e368b..9c32d1b 100644
--- a/drivers/staging/iio/dds/ad5930.c
+++ b/drivers/staging/iio/dds/ad5930.c
@@ -143,19 +143,9 @@ static struct spi_driver ad5930_driver = {
.probe = ad5930_probe,
.remove = __devexit_p(ad5930_remove),
};
-
-static __init int ad5930_spi_init(void)
-{
- return spi_register_driver(&ad5930_driver);
-}
-module_init(ad5930_spi_init);
-
-static __exit void ad5930_spi_exit(void)
-{
- spi_unregister_driver(&ad5930_driver);
-}
-module_exit(ad5930_spi_exit);
+module_spi_driver(ad5930_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad5930 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9832.c b/drivers/staging/iio/dds/ad9832.c
index 9b4ff60..2ccf25d 100644
--- a/drivers/staging/iio/dds/ad9832.c
+++ b/drivers/staging/iio/dds/ad9832.c
@@ -88,7 +88,7 @@ static ssize_t ad9832_write(struct device *dev,
goto error_ret;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD9832_FREQ0HM:
case AD9832_FREQ1HM:
ret = ad9832_write_frequency(st, this_attr->address, val);
@@ -344,31 +344,19 @@ static const struct spi_device_id ad9832_id[] = {
{"ad9835", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ad9832_id);
static struct spi_driver ad9832_driver = {
.driver = {
.name = "ad9832",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad9832_probe,
.remove = __devexit_p(ad9832_remove),
.id_table = ad9832_id,
};
-
-static int __init ad9832_init(void)
-{
- return spi_register_driver(&ad9832_driver);
-}
-module_init(ad9832_init);
-
-static void __exit ad9832_exit(void)
-{
- spi_unregister_driver(&ad9832_driver);
-}
-module_exit(ad9832_exit);
+module_spi_driver(ad9832_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD9832/AD9835 DDS");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad9832");
diff --git a/drivers/staging/iio/dds/ad9834.c b/drivers/staging/iio/dds/ad9834.c
index c468f69..5e67104 100644
--- a/drivers/staging/iio/dds/ad9834.c
+++ b/drivers/staging/iio/dds/ad9834.c
@@ -77,7 +77,7 @@ static ssize_t ad9834_write(struct device *dev,
goto error_ret;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD9834_REG_FREQ0:
case AD9834_REG_FREQ1:
ret = ad9834_write_frequency(st, this_attr->address, val);
@@ -153,7 +153,7 @@ static ssize_t ad9834_store_wavetype(struct device *dev,
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case 0:
if (sysfs_streq(buf, "sine")) {
st->control &= ~AD9834_MODE;
@@ -281,14 +281,14 @@ static struct attribute *ad9834_attributes[] = {
NULL,
};
-static mode_t ad9834_attr_is_visible(struct kobject *kobj,
+static umode_t ad9834_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad9834_state *st = iio_priv(indio_dev);
- mode_t mode = attr->mode;
+ umode_t mode = attr->mode;
if (((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) &&
((attr == &iio_dev_attr_dds0_out1_enable.dev_attr.attr) ||
@@ -435,31 +435,19 @@ static const struct spi_device_id ad9834_id[] = {
{"ad9838", ID_AD9838},
{}
};
+MODULE_DEVICE_TABLE(spi, ad9834_id);
static struct spi_driver ad9834_driver = {
.driver = {
.name = "ad9834",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = ad9834_probe,
.remove = __devexit_p(ad9834_remove),
.id_table = ad9834_id,
};
-
-static int __init ad9834_init(void)
-{
- return spi_register_driver(&ad9834_driver);
-}
-module_init(ad9834_init);
-
-static void __exit ad9834_exit(void)
-{
- spi_unregister_driver(&ad9834_driver);
-}
-module_exit(ad9834_exit);
+module_spi_driver(ad9834_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:ad9834");
diff --git a/drivers/staging/iio/dds/ad9850.c b/drivers/staging/iio/dds/ad9850.c
index a14771b..f4f731b 100644
--- a/drivers/staging/iio/dds/ad9850.c
+++ b/drivers/staging/iio/dds/ad9850.c
@@ -129,19 +129,9 @@ static struct spi_driver ad9850_driver = {
.probe = ad9850_probe,
.remove = __devexit_p(ad9850_remove),
};
-
-static __init int ad9850_spi_init(void)
-{
- return spi_register_driver(&ad9850_driver);
-}
-module_init(ad9850_spi_init);
-
-static __exit void ad9850_spi_exit(void)
-{
- spi_unregister_driver(&ad9850_driver);
-}
-module_exit(ad9850_spi_exit);
+module_spi_driver(ad9850_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9850 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9852.c b/drivers/staging/iio/dds/ad9852.c
index cfceaa6..554266c 100644
--- a/drivers/staging/iio/dds/ad9852.c
+++ b/drivers/staging/iio/dds/ad9852.c
@@ -280,19 +280,9 @@ static struct spi_driver ad9852_driver = {
.probe = ad9852_probe,
.remove = __devexit_p(ad9852_remove),
};
-
-static __init int ad9852_spi_init(void)
-{
- return spi_register_driver(&ad9852_driver);
-}
-module_init(ad9852_spi_init);
-
-static __exit void ad9852_spi_exit(void)
-{
- spi_unregister_driver(&ad9852_driver);
-}
-module_exit(ad9852_spi_exit);
+module_spi_driver(ad9852_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9852 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9910.c b/drivers/staging/iio/dds/ad9910.c
index da83d2b..3985766 100644
--- a/drivers/staging/iio/dds/ad9910.c
+++ b/drivers/staging/iio/dds/ad9910.c
@@ -413,19 +413,9 @@ static struct spi_driver ad9910_driver = {
.probe = ad9910_probe,
.remove = __devexit_p(ad9910_remove),
};
-
-static __init int ad9910_spi_init(void)
-{
- return spi_register_driver(&ad9910_driver);
-}
-module_init(ad9910_spi_init);
-
-static __exit void ad9910_spi_exit(void)
-{
- spi_unregister_driver(&ad9910_driver);
-}
-module_exit(ad9910_spi_exit);
+module_spi_driver(ad9910_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9910 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/dds/ad9951.c b/drivers/staging/iio/dds/ad9951.c
index 20c1825..4d15004 100644
--- a/drivers/staging/iio/dds/ad9951.c
+++ b/drivers/staging/iio/dds/ad9951.c
@@ -224,19 +224,9 @@ static struct spi_driver ad9951_driver = {
.probe = ad9951_probe,
.remove = __devexit_p(ad9951_remove),
};
-
-static __init int ad9951_spi_init(void)
-{
- return spi_register_driver(&ad9951_driver);
-}
-module_init(ad9951_spi_init);
-
-static __exit void ad9951_spi_exit(void)
-{
- spi_unregister_driver(&ad9951_driver);
-}
-module_exit(ad9951_spi_exit);
+module_spi_driver(ad9951_driver);
MODULE_AUTHOR("Cliff Cai");
MODULE_DESCRIPTION("Analog Devices ad9951 driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:" DRV_NAME);
diff --git a/drivers/staging/iio/events.h b/drivers/staging/iio/events.h
new file mode 100644
index 0000000..bfb6340
--- /dev/null
+++ b/drivers/staging/iio/events.h
@@ -0,0 +1,103 @@
+/* The industrial I/O - event passing to userspace
+ *
+ * Copyright (c) 2008-2011 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#ifndef _IIO_EVENTS_H_
+#define _IIO_EVENTS_H_
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include "types.h"
+
+/**
+ * struct iio_event_data - The actual event being pushed to userspace
+ * @id: event identifier
+ * @timestamp: best estimate of time of event occurrence (often from
+ * the interrupt handler)
+ */
+struct iio_event_data {
+ __u64 id;
+ __s64 timestamp;
+};
+
+#define IIO_GET_EVENT_FD_IOCTL _IOR('i', 0x90, int)
+
+enum iio_event_type {
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_TYPE_MAG,
+ IIO_EV_TYPE_ROC,
+ IIO_EV_TYPE_THRESH_ADAPTIVE,
+ IIO_EV_TYPE_MAG_ADAPTIVE,
+};
+
+enum iio_event_direction {
+ IIO_EV_DIR_EITHER,
+ IIO_EV_DIR_RISING,
+ IIO_EV_DIR_FALLING,
+};
+
+/**
+ * IIO_EVENT_CODE() - create event identifier
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @diff: Whether the event is for an differential channel or not.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @chan: Channel number for non-differential channels.
+ * @chan1: First channel number for differential channels.
+ * @chan2: Second channel number for differential channels.
+ */
+
+#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
+ type, chan, chan1, chan2) \
+ (((u64)type << 56) | ((u64)diff << 55) | \
+ ((u64)direction << 48) | ((u64)modifier << 40) | \
+ ((u64)chan_type << 32) | (((u16)chan2) << 16) | ((u16)chan1) | \
+ ((u16)chan))
+
+
+#define IIO_EV_DIR_MAX 4
+#define IIO_EV_BIT(type, direction) \
+ (1 << (type*IIO_EV_DIR_MAX + direction))
+
+/**
+ * IIO_MOD_EVENT_CODE() - create event identifier for modified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @modifier: Modifier for the channel. Should be one of enum iio_modifier.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_MOD_EVENT_CODE(chan_type, number, modifier, \
+ type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, modifier, direction, type, number, 0, 0)
+
+/**
+ * IIO_UNMOD_EVENT_CODE() - create event identifier for unmodified channels
+ * @chan_type: Type of the channel. Should be one of enum iio_chan_type.
+ * @number: Channel number.
+ * @type: Type of the event. Should be one enum iio_event_type.
+ * @direction: Direction of the event. One of enum iio_event_direction.
+ */
+
+#define IIO_UNMOD_EVENT_CODE(chan_type, number, type, direction) \
+ IIO_EVENT_CODE(chan_type, 0, 0, direction, type, number, 0, 0)
+
+#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
+
+#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
+
+#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
+
+/* Event code number extraction depends on which type of event we have.
+ * Perhaps review this function in the future*/
+#define IIO_EVENT_CODE_EXTRACT_NUM(mask) ((__s16)(mask & 0xFFFF))
+
+#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
+
+#endif
diff --git a/drivers/staging/iio/gyro/Kconfig b/drivers/staging/iio/gyro/Kconfig
index 22aea5b..ea295b2 100644
--- a/drivers/staging/iio/gyro/Kconfig
+++ b/drivers/staging/iio/gyro/Kconfig
@@ -37,11 +37,11 @@ config ADIS16260
will be called adis16260.
config ADXRS450
- tristate "Analog Devices ADXRS450 Digital Output Gyroscope SPI driver"
+ tristate "Analog Devices ADXRS450/3 Digital Output Gyroscope SPI driver"
depends on SPI
help
- Say yes here to build support for Analog Devices ADXRS450 programmable
- digital output gyroscope.
+ Say yes here to build support for Analog Devices ADXRS450 and ADXRS453
+ programmable digital output gyroscope.
This driver can also be built as a module. If so, the module
will be called adxrs450.
diff --git a/drivers/staging/iio/gyro/adis16060_core.c b/drivers/staging/iio/gyro/adis16060_core.c
index ff1b5a8..c0ca709 100644
--- a/drivers/staging/iio/gyro/adis16060_core.c
+++ b/drivers/staging/iio/gyro/adis16060_core.c
@@ -98,11 +98,11 @@ static int adis16060_read_raw(struct iio_dev *indio_dev,
mutex_unlock(&indio_dev->mlock);
*val = tval;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = -7;
*val2 = 461117;
return IIO_VAL_INT_PLUS_MICRO;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = 34000;
return IIO_VAL_INT_PLUS_MICRO;
@@ -136,8 +136,8 @@ static const struct iio_chan_spec adis16060_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = ADIS16060_TEMP_OUT,
}
};
diff --git a/drivers/staging/iio/gyro/adis16080_core.c b/drivers/staging/iio/gyro/adis16080_core.c
index 5d7a906..1815490 100644
--- a/drivers/staging/iio/gyro/adis16080_core.c
+++ b/drivers/staging/iio/gyro/adis16080_core.c
@@ -189,19 +189,9 @@ static struct spi_driver adis16080_driver = {
.probe = adis16080_probe,
.remove = __devexit_p(adis16080_remove),
};
-
-static __init int adis16080_init(void)
-{
- return spi_register_driver(&adis16080_driver);
-}
-module_init(adis16080_init);
-
-static __exit void adis16080_exit(void)
-{
- spi_unregister_driver(&adis16080_driver);
-}
-module_exit(adis16080_exit);
+module_spi_driver(adis16080_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16080/100 Yaw Rate Gyroscope Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16080");
diff --git a/drivers/staging/iio/gyro/adis16130_core.c b/drivers/staging/iio/gyro/adis16130_core.c
index 749240d..947eb86 100644
--- a/drivers/staging/iio/gyro/adis16130_core.c
+++ b/drivers/staging/iio/gyro/adis16130_core.c
@@ -168,19 +168,9 @@ static struct spi_driver adis16130_driver = {
.probe = adis16130_probe,
.remove = __devexit_p(adis16130_remove),
};
-
-static __init int adis16130_init(void)
-{
- return spi_register_driver(&adis16130_driver);
-}
-module_init(adis16130_init);
-
-static __exit void adis16130_exit(void)
-{
- spi_unregister_driver(&adis16130_driver);
-}
-module_exit(adis16130_exit);
+module_spi_driver(adis16130_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16130 High Precision Angular Rate");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:adis16130");
diff --git a/drivers/staging/iio/gyro/adis16260_core.c b/drivers/staging/iio/gyro/adis16260_core.c
index aaa3967..8f6af47 100644
--- a/drivers/staging/iio/gyro/adis16260_core.c
+++ b/drivers/staging/iio/gyro/adis16260_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16260.h"
@@ -390,9 +390,9 @@ enum adis16260_channel {
#define ADIS16260_GYRO_CHANNEL_SET(axis, mod) \
struct iio_chan_spec adis16260_channels_##axis[] = { \
IIO_CHAN(IIO_ANGL_VEL, 1, 0, 0, NULL, 0, mod, \
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) | \
- (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) | \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT | \
+ IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
gyro, ADIS16260_SCAN_GYRO, \
IIO_ST('s', 14, 16, 0), 0), \
IIO_CHAN(IIO_ANGL, 1, 0, 0, NULL, 0, mod, \
@@ -400,16 +400,16 @@ enum adis16260_channel {
angle, ADIS16260_SCAN_ANGL, \
IIO_ST('u', 14, 16, 0), 0), \
IIO_CHAN(IIO_TEMP, 0, 1, 0, NULL, 0, 0, \
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) | \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT | \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
temp, ADIS16260_SCAN_TEMP, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "supply", 0, 0, \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
in_supply, ADIS16260_SCAN_SUPPLY, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, NULL, 1, 0, \
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
in_aux, ADIS16260_SCAN_AUX_ADC, \
IIO_ST('u', 12, 16, 0), 0), \
IIO_CHAN_SOFT_TIMESTAMP(5) \
@@ -464,8 +464,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
@@ -489,10 +488,10 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
*val = 25;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
switch (chan->type) {
case IIO_ANGL_VEL:
bits = 12;
@@ -512,7 +511,7 @@ static int adis16260_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
bits = 12;
@@ -544,11 +543,11 @@ static int adis16260_write_raw(struct iio_dev *indio_dev,
s16 val16;
u8 addr;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
val16 = val & ((1 << bits) - 1);
addr = adis16260_addresses[chan->address][1];
return adis16260_spi_write_reg_16(indio_dev, addr, val16);
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
val16 = val & ((1 << bits) - 1);
addr = adis16260_addresses[chan->address][2];
return adis16260_spi_write_reg_16(indio_dev, addr, val16);
@@ -633,11 +632,16 @@ static int __devinit adis16260_probe(struct spi_device *spi)
}
if (indio_dev->buffer) {
/* Set default scan mode */
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_SUPPLY);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_GYRO);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_AUX_ADC);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_TEMP);
- iio_scan_mask_set(indio_dev->buffer, ADIS16260_SCAN_ANGL);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_SUPPLY);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_GYRO);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_AUX_ADC);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_TEMP);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer,
+ ADIS16260_SCAN_ANGL);
}
if (spi->irq) {
ret = adis16260_probe_trigger(indio_dev);
@@ -701,6 +705,7 @@ static const struct spi_device_id adis16260_id[] = {
{"adis16251", 1},
{}
};
+MODULE_DEVICE_TABLE(spi, adis16260_id);
static struct spi_driver adis16260_driver = {
.driver = {
@@ -711,18 +716,7 @@ static struct spi_driver adis16260_driver = {
.remove = __devexit_p(adis16260_remove),
.id_table = adis16260_id,
};
-
-static __init int adis16260_init(void)
-{
- return spi_register_driver(&adis16260_driver);
-}
-module_init(adis16260_init);
-
-static __exit void adis16260_exit(void)
-{
- spi_unregister_driver(&adis16260_driver);
-}
-module_exit(adis16260_exit);
+module_spi_driver(adis16260_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADIS16260/5 Digital Gyroscope Sensor");
diff --git a/drivers/staging/iio/gyro/adis16260_ring.c b/drivers/staging/iio/gyro/adis16260_ring.c
index 52a9e78..699a615 100644
--- a/drivers/staging/iio/gyro/adis16260_ring.c
+++ b/drivers/staging/iio/gyro/adis16260_ring.c
@@ -74,9 +74,10 @@ static irqreturn_t adis16260_trigger_handler(int irq, void *p)
return -ENOMEM;
}
- if (ring->scan_count &&
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
adis16260_read_ring_data(&indio_dev->dev, st->rx) >= 0)
- for (; i < ring->scan_count; i++)
+ for (; i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength); i++)
data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
/* Guaranteed to be aligned with 8 byte boundary */
@@ -116,10 +117,8 @@ int adis16260_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16260_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16260_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16260_trigger_handler,
diff --git a/drivers/staging/iio/gyro/adxrs450.h b/drivers/staging/iio/gyro/adxrs450.h
index b6b6828..af0c870 100644
--- a/drivers/staging/iio/gyro/adxrs450.h
+++ b/drivers/staging/iio/gyro/adxrs450.h
@@ -39,6 +39,11 @@
#define ADXRS450_GET_ST(a) ((a >> 26) & 0x3)
+enum {
+ ID_ADXRS450,
+ ID_ADXRS453,
+};
+
/**
* struct adxrs450_state - device instance specific data
* @us: actual spi_device
diff --git a/drivers/staging/iio/gyro/adxrs450_core.c b/drivers/staging/iio/gyro/adxrs450_core.c
index 3c3ef79..15e2496 100644
--- a/drivers/staging/iio/gyro/adxrs450_core.c
+++ b/drivers/staging/iio/gyro/adxrs450_core.c
@@ -1,5 +1,5 @@
/*
- * ADXRS450 Digital Output Gyroscope Driver
+ * ADXRS450/ADXRS453 Digital Output Gyroscope Driver
*
* Copyright 2011 Analog Devices Inc.
*
@@ -243,7 +243,7 @@ static int adxrs450_write_raw(struct iio_dev *indio_dev,
{
int ret;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
ret = adxrs450_spi_write_reg_16(indio_dev,
ADXRS450_DNC1,
val & 0x3FF);
@@ -263,7 +263,7 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
{
int ret;
s16 t;
- u16 ut;
+
switch (mask) {
case 0:
switch (chan->type) {
@@ -276,10 +276,10 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
break;
case IIO_TEMP:
ret = adxrs450_spi_read_reg_16(indio_dev,
- ADXRS450_TEMP1, &ut);
+ ADXRS450_TEMP1, &t);
if (ret)
break;
- *val = ut;
+ *val = (t >> 6) + 225;
ret = IIO_VAL_INT;
break;
default:
@@ -287,13 +287,34 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
break;
}
break;
- case (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_ANGL_VEL:
+ *val = 0;
+ *val2 = 218166;
+ return IIO_VAL_INT_PLUS_NANO;
+ case IIO_TEMP:
+ *val = 200;
+ *val2 = 0;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW:
ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_QUAD1, &t);
if (ret)
break;
*val = t;
ret = IIO_VAL_INT;
break;
+ case IIO_CHAN_INFO_CALIBBIAS:
+ ret = adxrs450_spi_read_reg_16(indio_dev, ADXRS450_DNC1, &t);
+ if (ret)
+ break;
+ *val = t;
+ ret = IIO_VAL_INT;
+ break;
default:
ret = -EINVAL;
break;
@@ -302,18 +323,36 @@ static int adxrs450_read_raw(struct iio_dev *indio_dev,
return ret;
}
-static const struct iio_chan_spec adxrs450_channels[] = {
- {
- .type = IIO_ANGL_VEL,
- .modified = 1,
- .channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE)
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- }
+static const struct iio_chan_spec adxrs450_channels[2][2] = {
+ [ID_ADXRS450] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }
+ },
+ [ID_ADXRS453] = {
+ {
+ .type = IIO_ANGL_VEL,
+ .modified = 1,
+ .channel2 = IIO_MOD_Z,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT,
+ }, {
+ .type = IIO_TEMP,
+ .indexed = 1,
+ .channel = 0,
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
+ }
+ },
};
static const struct iio_info adxrs450_info = {
@@ -343,7 +382,8 @@ static int __devinit adxrs450_probe(struct spi_device *spi)
indio_dev->dev.parent = &spi->dev;
indio_dev->info = &adxrs450_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adxrs450_channels;
+ indio_dev->channels =
+ adxrs450_channels[spi_get_device_id(spi)->driver_data];
indio_dev->num_channels = ARRAY_SIZE(adxrs450_channels);
indio_dev->name = spi->dev.driver->name;
@@ -373,6 +413,13 @@ static int adxrs450_remove(struct spi_device *spi)
return 0;
}
+static const struct spi_device_id adxrs450_id[] = {
+ {"adxrs450", ID_ADXRS450},
+ {"adxrs453", ID_ADXRS453},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, adxrs450_id);
+
static struct spi_driver adxrs450_driver = {
.driver = {
.name = "adxrs450",
@@ -380,20 +427,10 @@ static struct spi_driver adxrs450_driver = {
},
.probe = adxrs450_probe,
.remove = __devexit_p(adxrs450_remove),
+ .id_table = adxrs450_id,
};
-
-static __init int adxrs450_init(void)
-{
- return spi_register_driver(&adxrs450_driver);
-}
-module_init(adxrs450_init);
-
-static __exit void adxrs450_exit(void)
-{
- spi_unregister_driver(&adxrs450_driver);
-}
-module_exit(adxrs450_exit);
+module_spi_driver(adxrs450_driver);
MODULE_AUTHOR("Cliff Cai <cliff.cai@xxxxxxxxxx>");
-MODULE_DESCRIPTION("Analog Devices ADXRS450 Gyroscope SPI driver");
+MODULE_DESCRIPTION("Analog Devices ADXRS450/ADXRS453 Gyroscope SPI driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/iio/iio.h b/drivers/staging/iio/iio.h
index f3d88cd..be6ced3 100644
--- a/drivers/staging/iio/iio.h
+++ b/drivers/staging/iio/iio.h
@@ -7,13 +7,12 @@
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*/
-
#ifndef _INDUSTRIAL_IO_H_
#define _INDUSTRIAL_IO_H_
#include <linux/device.h>
#include <linux/cdev.h>
-
+#include "types.h"
/* IIO TODO LIST */
/*
* Provide means of adjusting timer accuracy.
@@ -25,62 +24,64 @@ enum iio_data_type {
IIO_PROCESSED,
};
-enum iio_chan_type {
- /* real channel types */
- IIO_VOLTAGE,
- IIO_CURRENT,
- IIO_POWER,
- IIO_ACCEL,
- IIO_ANGL_VEL,
- IIO_MAGN,
- IIO_LIGHT,
- IIO_INTENSITY,
- IIO_PROXIMITY,
- IIO_TEMP,
- IIO_INCLI,
- IIO_ROT,
- IIO_ANGL,
- IIO_TIMESTAMP,
- IIO_CAPACITANCE,
-};
-
-enum iio_modifier {
- IIO_NO_MOD,
- IIO_MOD_X,
- IIO_MOD_Y,
- IIO_MOD_Z,
- IIO_MOD_X_AND_Y,
- IIO_MOD_X_ANX_Z,
- IIO_MOD_Y_AND_Z,
- IIO_MOD_X_AND_Y_AND_Z,
- IIO_MOD_X_OR_Y,
- IIO_MOD_X_OR_Z,
- IIO_MOD_Y_OR_Z,
- IIO_MOD_X_OR_Y_OR_Z,
- IIO_MOD_LIGHT_BOTH,
- IIO_MOD_LIGHT_IR,
-};
-
/* Could add the raw attributes as well - allowing buffer only devices */
enum iio_chan_info_enum {
- IIO_CHAN_INFO_SCALE_SHARED,
- IIO_CHAN_INFO_SCALE_SEPARATE,
- IIO_CHAN_INFO_OFFSET_SHARED,
- IIO_CHAN_INFO_OFFSET_SEPARATE,
- IIO_CHAN_INFO_CALIBSCALE_SHARED,
- IIO_CHAN_INFO_CALIBSCALE_SEPARATE,
- IIO_CHAN_INFO_CALIBBIAS_SHARED,
- IIO_CHAN_INFO_CALIBBIAS_SEPARATE,
- IIO_CHAN_INFO_PEAK_SHARED,
- IIO_CHAN_INFO_PEAK_SEPARATE,
- IIO_CHAN_INFO_PEAK_SCALE_SHARED,
- IIO_CHAN_INFO_PEAK_SCALE_SEPARATE,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED,
- IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE,
- IIO_CHAN_INFO_AVERAGE_RAW_SHARED,
- IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE,
+ /* 0 is reserverd for raw attributes */
+ IIO_CHAN_INFO_SCALE = 1,
+ IIO_CHAN_INFO_OFFSET,
+ IIO_CHAN_INFO_CALIBSCALE,
+ IIO_CHAN_INFO_CALIBBIAS,
+ IIO_CHAN_INFO_PEAK,
+ IIO_CHAN_INFO_PEAK_SCALE,
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW,
+ IIO_CHAN_INFO_AVERAGE_RAW,
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY,
};
+#define IIO_CHAN_INFO_SHARED_BIT(type) BIT(type*2)
+#define IIO_CHAN_INFO_SEPARATE_BIT(type) BIT(type*2 + 1)
+
+#define IIO_CHAN_INFO_SCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_SCALE)
+#define IIO_CHAN_INFO_SCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_SCALE)
+#define IIO_CHAN_INFO_OFFSET_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_OFFSET)
+#define IIO_CHAN_INFO_OFFSET_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_OFFSET)
+#define IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_CALIBSCALE)
+#define IIO_CHAN_INFO_CALIBSCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_CALIBSCALE)
+#define IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_CALIBBIAS)
+#define IIO_CHAN_INFO_CALIBBIAS_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_CALIBBIAS)
+#define IIO_CHAN_INFO_PEAK_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_PEAK)
+#define IIO_CHAN_INFO_PEAK_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_PEAK)
+#define IIO_CHAN_INFO_PEAKSCALE_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_PEAKSCALE)
+#define IIO_CHAN_INFO_PEAKSCALE_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_PEAKSCALE)
+#define IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT( \
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW)
+#define IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT( \
+ IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW)
+#define IIO_CHAN_INFO_AVERAGE_RAW_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT(IIO_CHAN_INFO_AVERAGE_RAW)
+#define IIO_CHAN_INFO_AVERAGE_RAW_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT(IIO_CHAN_INFO_AVERAGE_RAW)
+#define IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT \
+ IIO_CHAN_INFO_SHARED_BIT( \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)
+#define IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SEPARATE_BIT \
+ IIO_CHAN_INFO_SEPARATE_BIT( \
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY)
+
enum iio_endian {
IIO_CPU,
IIO_BE,
@@ -109,6 +110,10 @@ enum iio_endian {
* @extend_name: Allows labeling of channel attributes with an
* informative name. Note this has no effect codes etc,
* unlike modifiers.
+ * @datasheet_name: A name used in in kernel mapping of channels. It should
+ * corrspond to the first name that the channel is referred
+ * to by in the datasheet (e.g. IND), or the nearest
+ * possible compound name (e.g. IND-INC).
* @processed_val: Flag to specify the data access attribute should be
* *_input rather than *_raw.
* @modified: Does a modifier apply to this channel. What these are
@@ -137,6 +142,7 @@ struct iio_chan_spec {
long info_mask;
long event_mask;
char *extend_name;
+ const char *datasheet_name;
unsigned processed_val:1;
unsigned modified:1;
unsigned indexed:1;
@@ -261,7 +267,23 @@ struct iio_info {
int val);
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
+ int (*update_scan_mode)(struct iio_dev *indio_dev,
+ const unsigned long *scan_mask);
+};
+/**
+ * struct iio_buffer_setup_ops - buffer setup related callbacks
+ * @preenable: [DRIVER] function to run prior to marking buffer enabled
+ * @postenable: [DRIVER] function to run after marking buffer enabled
+ * @predisable: [DRIVER] function to run prior to marking buffer
+ * disabled
+ * @postdisable: [DRIVER] function to run after marking buffer disabled
+ */
+struct iio_buffer_setup_ops {
+ int (*preenable)(struct iio_dev *);
+ int (*postenable)(struct iio_dev *);
+ int (*predisable)(struct iio_dev *);
+ int (*postdisable)(struct iio_dev *);
};
/**
@@ -278,6 +300,7 @@ struct iio_info {
* @available_scan_masks: [DRIVER] optional array of allowed bitmasks
* @masklength: [INTERN] the length of the mask established from
* channels
+ * @active_scan_mask: [INTERN] union of all scan masks requested by buffers
* @trig: [INTERN] current device trigger (buffer modes)
* @pollfunc: [DRIVER] function run on trigger being received
* @channels: [DRIVER] channel specification structure table
@@ -290,6 +313,7 @@ struct iio_info {
* @chrdev: [INTERN] associated character device
* @groups: [INTERN] attribute groups
* @groupcounter: [INTERN] index of next attribute group
+ * @flags: [INTERN] file ops related flags including busy flag.
**/
struct iio_dev {
int id;
@@ -305,6 +329,7 @@ struct iio_dev {
unsigned long *available_scan_masks;
unsigned masklength;
+ unsigned long *active_scan_mask;
struct iio_trigger *trig;
struct iio_poll_func *pollfunc;
@@ -315,13 +340,24 @@ struct iio_dev {
struct attribute_group chan_attr_group;
const char *name;
const struct iio_info *info;
+ const struct iio_buffer_setup_ops *setup_ops;
struct cdev chrdev;
#define IIO_MAX_GROUPS 6
const struct attribute_group *groups[IIO_MAX_GROUPS + 1];
int groupcounter;
+
+ unsigned long flags;
};
/**
+ * iio_find_channel_from_si() - get channel from its scan index
+ * @indio_dev: device
+ * @si: scan index to match
+ */
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
+
+/**
* iio_device_register() - register a device with the IIO subsystem
* @indio_dev: Device structure filled by the device driver
**/
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index 36159e0..107cfb1 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -33,9 +33,6 @@ int __iio_add_chan_devattr(const char *postfix,
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev);
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev);
-
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
@@ -47,14 +44,6 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
#else
-static inline int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
-{
- return -EINVAL;
-}
-
-static inline void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{}
-
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
diff --git a/drivers/staging/iio/iio_core_trigger.h b/drivers/staging/iio/iio_core_trigger.h
index 523c288..6f7c56f 100644
--- a/drivers/staging/iio/iio_core_trigger.h
+++ b/drivers/staging/iio/iio_core_trigger.h
@@ -13,8 +13,7 @@
* iio_device_register_trigger_consumer() - set up an iio_dev to use triggers
* @indio_dev: iio_dev associated with the device that will consume the trigger
**/
-
-int iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev);
/**
* iio_device_unregister_trigger_consumer() - reverse the registration process
diff --git a/drivers/staging/iio/iio_dummy_evgen.c b/drivers/staging/iio/iio_dummy_evgen.c
index da657d1..cdbf289 100644
--- a/drivers/staging/iio/iio_dummy_evgen.c
+++ b/drivers/staging/iio/iio_dummy_evgen.c
@@ -102,6 +102,10 @@ static int iio_dummy_evgen_create(void)
int iio_dummy_evgen_get_irq(void)
{
int i, ret = 0;
+
+ if (iio_evgen == NULL)
+ return -ENODEV;
+
mutex_lock(&iio_evgen->lock);
for (i = 0; i < IIO_EVENTGEN_NO; i++)
if (iio_evgen->inuse[i] == false) {
diff --git a/drivers/staging/iio/iio_simple_dummy.c b/drivers/staging/iio/iio_simple_dummy.c
index af0c992..e3a9457 100644
--- a/drivers/staging/iio/iio_simple_dummy.c
+++ b/drivers/staging/iio/iio_simple_dummy.c
@@ -21,7 +21,8 @@
#include "iio.h"
#include "sysfs.h"
-#include "buffer_generic.h"
+#include "events.h"
+#include "buffer.h"
#include "iio_simple_dummy.h"
/*
@@ -76,13 +77,13 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* Offset for userspace to apply prior to scale
* when converting to standard units (microvolts)
*/
- (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
+ IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
/*
* in_voltage0_scale
* Multipler for userspace to apply post offset
* when converting to standard units (microvolts)
*/
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
/* The ordering of elements in the buffer via an enum */
.scan_index = voltage0,
.scan_type = { /* Description of storage in buffer */
@@ -117,7 +118,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* Shared version of scale - shared by differential
* input channels of type IIO_VOLTAGE.
*/
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = diffvoltage1m2,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -134,7 +135,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
.channel = 3,
.channel2 = 4,
.info_mask =
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.scan_index = diffvoltage3m4,
.scan_type = {
.sign = 's',
@@ -159,7 +160,7 @@ static struct iio_chan_spec iio_dummy_channels[] = {
* seeing the readings. Typically part of hardware
* calibration.
*/
- (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE),
+ IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT,
.scan_index = accelx,
.scan_type = { /* Description of storage in buffer */
.sign = 's', /* signed */
@@ -228,29 +229,32 @@ static int iio_dummy_read_raw(struct iio_dev *indio_dev,
break;
}
break;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
/* only single ended adc -> 7 */
*val = 7;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
- /* only single ended adc -> 0.001333 */
- *val = 0;
- *val2 = 1333;
- ret = IIO_VAL_INT_PLUS_MICRO;
- break;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- /* all differential adc channels -> 0.000001344 */
- *val = 0;
- *val2 = 1344;
- ret = IIO_VAL_INT_PLUS_NANO;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->differential) {
+ case 0:
+ /* only single ended adc -> 0.001333 */
+ *val = 0;
+ *val2 = 1333;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ case 1:
+ /* all differential adc channels -> 0.000001344 */
+ *val = 0;
+ *val2 = 1344;
+ ret = IIO_VAL_INT_PLUS_NANO;
+ }
break;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
/* only the acceleration axis - read from cache */
*val = st->accel_calibbias;
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
*val = st->accel_calibscale->val;
*val2 = st->accel_calibscale->val2;
ret = IIO_VAL_INT_PLUS_MICRO;
@@ -295,7 +299,7 @@ static int iio_dummy_write_raw(struct iio_dev *indio_dev,
st->dac_val = val;
mutex_unlock(&st->lock);
return 0;
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&st->lock);
/* Compare against table - hard matching here */
for (i = 0; i < ARRAY_SIZE(dummy_scales); i++)
@@ -514,7 +518,8 @@ static __init int iio_dummy_init(void)
return -EINVAL;
}
/* Fake a bus */
- iio_dummy_devs = kzalloc(sizeof(*iio_dummy_devs)*instances, GFP_KERNEL);
+ iio_dummy_devs = kcalloc(instances, sizeof(*iio_dummy_devs),
+ GFP_KERNEL);
/* Here we have no actual device so call probe */
for (i = 0; i < instances; i++) {
ret = iio_dummy_probe(i);
diff --git a/drivers/staging/iio/iio_simple_dummy_buffer.c b/drivers/staging/iio/iio_simple_dummy_buffer.c
index edad0e7..d6a1c0e 100644
--- a/drivers/staging/iio/iio_simple_dummy_buffer.c
+++ b/drivers/staging/iio/iio_simple_dummy_buffer.c
@@ -57,7 +57,7 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
if (data == NULL)
return -ENOMEM;
- if (buffer->scan_count) {
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) {
/*
* Three common options here:
* hardware scans: certain combinations of channels make
@@ -75,7 +75,10 @@ static irqreturn_t iio_simple_dummy_trigger_h(int irq, void *p)
* in the constant table fakedata.
*/
int i, j;
- for (i = 0, j = 0; i < buffer->scan_count; i++) {
+ for (i = 0, j = 0;
+ i < bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
+ i++) {
j = find_next_bit(buffer->scan_mask,
indio_dev->masklength, j + 1);
/* random access read form the 'device' */
@@ -142,8 +145,6 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
/* Tell the core how to access the buffer */
buffer->access = &kfifo_access_funcs;
- /* Number of bytes per element */
- buffer->bpe = 2;
/* Enable timestamps by default */
buffer->scan_timestamp = true;
@@ -151,8 +152,7 @@ int iio_simple_dummy_configure_buffer(struct iio_dev *indio_dev)
* Tell the core what device type specific functions should
* be run on either side of buffer capture enable / disable.
*/
- buffer->setup_ops = &iio_simple_dummy_buffer_setup_ops;
- buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &iio_simple_dummy_buffer_setup_ops;
/*
* Configure a polling function.
diff --git a/drivers/staging/iio/iio_simple_dummy_events.c b/drivers/staging/iio/iio_simple_dummy_events.c
index 9f00cff..449c7a5 100644
--- a/drivers/staging/iio/iio_simple_dummy_events.c
+++ b/drivers/staging/iio/iio_simple_dummy_events.c
@@ -14,6 +14,7 @@
#include "iio.h"
#include "sysfs.h"
+#include "events.h"
#include "iio_simple_dummy.h"
/* Evgen 'fakes' interrupt events for this example */
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 1086e0b..9a2ca55 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -21,7 +21,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "../ring_sw.h"
#include "ad5933.h"
@@ -113,10 +113,10 @@ static struct iio_chan_spec ad5933_channels[] = {
0, AD5933_REG_TEMP_DATA, IIO_ST('s', 14, 16, 0), 0),
/* Ring Channels */
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "real_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_REAL_DATA, 0, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "imag_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
AD5933_REG_IMAG_DATA, 1, IIO_ST('s', 16, 16, 0), 0),
};
@@ -329,7 +329,7 @@ static ssize_t ad5933_show(struct device *dev,
int ret = 0, len = 0;
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%d\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -380,7 +380,7 @@ static ssize_t ad5933_store(struct device *dev,
}
mutex_lock(&indio_dev->mlock);
- switch (this_attr->address) {
+ switch ((u32) this_attr->address) {
case AD5933_OUT_RANGE:
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
@@ -537,14 +537,14 @@ static const struct iio_info ad5933_info = {
static int ad5933_ring_preenable(struct iio_dev *indio_dev)
{
struct ad5933_state *st = iio_priv(indio_dev);
- struct iio_buffer *ring = indio_dev->buffer;
size_t d_size;
int ret;
- if (!ring->scan_count)
+ if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- d_size = ring->scan_count *
+ d_size = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength) *
ad5933_channels[1].scan_type.storagebits / 8;
if (indio_dev->buffer->access->set_bytes_per_datum)
@@ -611,7 +611,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
indio_dev->buffer->access = &ring_sw_access_funcs;
/* Ring buffer functions - here trigger setup related */
- indio_dev->buffer->setup_ops = &ad5933_ring_setup_ops;
+ indio_dev->setup_ops = &ad5933_ring_setup_ops;
indio_dev->modes |= INDIO_BUFFER_HARDWARE;
@@ -640,12 +640,14 @@ static void ad5933_work(struct work_struct *work)
ad5933_i2c_read(st->client, AD5933_REG_STATUS, 1, &status);
if (status & AD5933_STAT_DATA_VALID) {
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
ad5933_i2c_read(st->client,
- test_bit(1, ring->scan_mask) ?
+ test_bit(1, indio_dev->active_scan_mask) ?
AD5933_REG_REAL_DATA : AD5933_REG_IMAG_DATA,
- ring->scan_count * 2, (u8 *)buf);
+ scan_count * 2, (u8 *)buf);
- if (ring->scan_count == 2) {
+ if (scan_count == 2) {
buf[0] = be16_to_cpu(buf[0]);
buf[1] = be16_to_cpu(buf[1]);
} else {
@@ -734,8 +736,8 @@ static int __devinit ad5933_probe(struct i2c_client *client,
goto error_unreg_ring;
/* enable both REAL and IMAG channels by default */
- iio_scan_mask_set(indio_dev->buffer, 0);
- iio_scan_mask_set(indio_dev->buffer, 1);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 0);
+ iio_scan_mask_set(indio_dev, indio_dev->buffer, 1);
ret = ad5933_setup(st);
if (ret)
@@ -796,18 +798,7 @@ static struct i2c_driver ad5933_driver = {
.remove = __devexit_p(ad5933_remove),
.id_table = ad5933_id,
};
-
-static __init int ad5933_init(void)
-{
- return i2c_add_driver(&ad5933_driver);
-}
-module_init(ad5933_init);
-
-static __exit void ad5933_exit(void)
-{
- i2c_del_driver(&ad5933_driver);
-}
-module_exit(ad5933_exit);
+module_i2c_driver(ad5933_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD5933 Impedance Conv. Network Analyzer");
diff --git a/drivers/staging/iio/imu/adis16400.h b/drivers/staging/iio/imu/adis16400.h
index f3546ee..83d133e 100644
--- a/drivers/staging/iio/imu/adis16400.h
+++ b/drivers/staging/iio/imu/adis16400.h
@@ -148,12 +148,14 @@ struct adis16400_chip_info {
* @tx: transmit buffer
* @rx: receive buffer
* @buf_lock: mutex to protect tx and rx
+ * @filt_int: integer part of requested filter frequency
**/
struct adis16400_state {
struct spi_device *us;
struct iio_trigger *trig;
struct mutex buf_lock;
struct adis16400_chip_info *variant;
+ int filt_int;
u8 tx[ADIS16400_MAX_TX] ____cacheline_aligned;
u8 rx[ADIS16400_MAX_RX] ____cacheline_aligned;
diff --git a/drivers/staging/iio/imu/adis16400_core.c b/drivers/staging/iio/imu/adis16400_core.c
index d082a37..e73ad78 100644
--- a/drivers/staging/iio/imu/adis16400_core.c
+++ b/drivers/staging/iio/imu/adis16400_core.c
@@ -28,7 +28,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "adis16400.h"
enum adis16400_chip_variant {
@@ -161,25 +161,65 @@ error_ret:
return ret;
}
+static int adis16400_get_freq(struct iio_dev *indio_dev)
+{
+ u16 t;
+ int sps, ret;
+
+ ret = adis16400_spi_read_reg_16(indio_dev, ADIS16400_SMPL_PRD, &t);
+ if (ret < 0)
+ return ret;
+ sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
+ sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
+
+ return sps;
+}
+
static ssize_t adis16400_read_frequency(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
int ret, len = 0;
- u16 t;
- int sps;
- ret = adis16400_spi_read_reg_16(indio_dev,
- ADIS16400_SMPL_PRD,
- &t);
- if (ret)
+ ret = adis16400_get_freq(indio_dev);
+ if (ret < 0)
return ret;
- sps = (t & ADIS16400_SMPL_PRD_TIME_BASE) ? 53 : 1638;
- sps /= (t & ADIS16400_SMPL_PRD_DIV_MASK) + 1;
- len = sprintf(buf, "%d SPS\n", sps);
+ len = sprintf(buf, "%d SPS\n", ret);
return len;
}
+static const unsigned adis16400_3db_divisors[] = {
+ [0] = 2, /* Special case */
+ [1] = 5,
+ [2] = 10,
+ [3] = 50,
+ [4] = 200,
+};
+
+static int adis16400_set_filter(struct iio_dev *indio_dev, int sps, int val)
+{
+ int i, ret;
+ u16 val16;
+ for (i = ARRAY_SIZE(adis16400_3db_divisors) - 1; i >= 0; i--)
+ if (sps/adis16400_3db_divisors[i] > val)
+ break;
+ if (i == -1)
+ ret = -EINVAL;
+ else {
+ ret = adis16400_spi_read_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0)
+ goto error_ret;
+
+ ret = adis16400_spi_write_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ (val16 & ~0x03) | i);
+ }
+error_ret:
+ return ret;
+}
+
static ssize_t adis16400_write_frequency(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -210,6 +250,7 @@ static ssize_t adis16400_write_frequency(struct device *dev,
ADIS16400_SMPL_PRD,
t);
+ /* Also update the filter */
mutex_unlock(&indio_dev->mlock);
return ret ? ret : len;
@@ -455,22 +496,39 @@ static u8 adis16400_addresses[17][2] = {
[incli_y] = { ADIS16300_ROLL_OUT }
};
+
static int adis16400_write_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int val,
int val2,
long mask)
{
- int ret;
+ struct adis16400_state *st = iio_priv(indio_dev);
+ int ret, sps;
switch (mask) {
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&indio_dev->mlock);
ret = adis16400_spi_write_reg_16(indio_dev,
adis16400_addresses[chan->address][1],
val);
mutex_unlock(&indio_dev->mlock);
return ret;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ /* Need to cache values so we can update if the frequency
+ changes */
+ mutex_lock(&indio_dev->mlock);
+ st->filt_int = val;
+ /* Work out update to current value */
+ sps = adis16400_get_freq(indio_dev);
+ if (sps < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return sps;
+ }
+
+ ret = adis16400_set_filter(indio_dev, sps, val);
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
default:
return -EINVAL;
}
@@ -504,8 +562,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
*val = val16;
mutex_unlock(&indio_dev->mlock);
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
switch (chan->type) {
case IIO_ANGL_VEL:
*val = 0;
@@ -533,7 +590,7 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
- case (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE):
+ case IIO_CHAN_INFO_CALIBBIAS:
mutex_lock(&indio_dev->mlock);
ret = adis16400_spi_read_reg_16(indio_dev,
adis16400_addresses[chan->address][1],
@@ -544,11 +601,29 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
val16 = ((val16 & 0xFFF) << 4) >> 4;
*val = val16;
return IIO_VAL_INT;
- case (1 << IIO_CHAN_INFO_OFFSET_SEPARATE):
+ case IIO_CHAN_INFO_OFFSET:
/* currently only temperature */
*val = 198;
*val2 = 160000;
return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY:
+ mutex_lock(&indio_dev->mlock);
+ /* Need both the number of taps and the sampling frequency */
+ ret = adis16400_spi_read_reg_16(indio_dev,
+ ADIS16400_SENS_AVG,
+ &val16);
+ if (ret < 0) {
+ mutex_unlock(&indio_dev->mlock);
+ return ret;
+ }
+ ret = adis16400_get_freq(indio_dev);
+ if (ret > 0)
+ *val = ret/adis16400_3db_divisors[val16 & 0x03];
+ *val2 = 0;
+ mutex_unlock(&indio_dev->mlock);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT_PLUS_MICRO;
default:
return -EINVAL;
}
@@ -560,7 +635,7 @@ static struct iio_chan_spec adis16400_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 14, 16, 0)
@@ -568,8 +643,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0)
@@ -577,8 +653,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -586,8 +663,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -595,8 +673,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -604,8 +683,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -613,8 +693,9 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -622,7 +703,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_x,
.scan_index = ADIS16400_SCAN_MAGN_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -630,7 +712,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_y,
.scan_index = ADIS16400_SCAN_MAGN_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -638,7 +721,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_MAGN,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = magn_z,
.scan_index = ADIS16400_SCAN_MAGN_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -646,8 +730,8 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
.scan_index = ADIS16400_SCAN_TEMP,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -655,7 +739,7 @@ static struct iio_chan_spec adis16400_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16400_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -669,7 +753,7 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 12, 16, 0)
@@ -677,8 +761,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0)
@@ -686,8 +771,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -695,8 +781,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -704,8 +791,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -713,8 +801,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -722,8 +811,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -732,8 +822,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "x",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = temp0,
.scan_index = ADIS16350_SCAN_TEMP_X,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -742,8 +833,9 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 1,
.extend_name = "y",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = temp1,
.scan_index = ADIS16350_SCAN_TEMP_Y,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -752,8 +844,8 @@ static struct iio_chan_spec adis16350_channels[] = {
.indexed = 1,
.channel = 2,
.extend_name = "z",
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp2,
.scan_index = ADIS16350_SCAN_TEMP_Z,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -761,7 +853,7 @@ static struct iio_chan_spec adis16350_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16350_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -775,7 +867,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.indexed = 1,
.channel = 0,
.extend_name = "supply",
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in_supply,
.scan_index = ADIS16400_SCAN_SUPPLY,
.scan_type = IIO_ST('u', 12, 16, 0)
@@ -783,8 +875,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -792,8 +885,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -801,8 +895,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -810,8 +905,9 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -819,8 +915,8 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_OFFSET_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_OFFSET_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = temp,
.scan_index = ADIS16400_SCAN_TEMP,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -828,7 +924,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_VOLTAGE,
.indexed = 1,
.channel = 1,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT,
.address = in1,
.scan_index = ADIS16350_SCAN_ADC_0,
.scan_type = IIO_ST('s', 12, 16, 0),
@@ -836,7 +932,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = incli_x,
.scan_index = ADIS16300_SCAN_INCLI_X,
.scan_type = IIO_ST('s', 13, 16, 0),
@@ -844,7 +940,7 @@ static struct iio_chan_spec adis16300_channels[] = {
.type = IIO_INCLI,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = incli_y,
.scan_index = ADIS16300_SCAN_INCLI_Y,
.scan_type = IIO_ST('s', 13, 16, 0),
@@ -857,8 +953,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_x,
.scan_index = ADIS16400_SCAN_GYRO_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -866,8 +963,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_y,
.scan_index = ADIS16400_SCAN_GYRO_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -875,8 +973,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ANGL_VEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = gyro_z,
.scan_index = ADIS16400_SCAN_GYRO_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -884,8 +983,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_X,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_x,
.scan_index = ADIS16400_SCAN_ACC_X,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -893,8 +993,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Y,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_y,
.scan_index = ADIS16400_SCAN_ACC_Y,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -902,8 +1003,9 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_ACCEL,
.modified = 1,
.channel2 = IIO_MOD_Z,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT |
+ IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -911,8 +1013,8 @@ static const struct iio_chan_spec adis16334_channels[] = {
.type = IIO_TEMP,
.indexed = 1,
.channel = 0,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBBIAS_SEPARATE) |
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ .info_mask = IIO_CHAN_INFO_CALIBBIAS_SEPARATE_BIT |
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
.address = accel_z,
.scan_index = ADIS16400_SCAN_ACC_Z,
.scan_type = IIO_ST('s', 14, 16, 0),
@@ -1118,6 +1220,7 @@ static const struct spi_device_id adis16400_id[] = {
{"adis16405", ADIS16400},
{}
};
+MODULE_DEVICE_TABLE(spi, adis16400_id);
static struct spi_driver adis16400_driver = {
.driver = {
@@ -1128,18 +1231,7 @@ static struct spi_driver adis16400_driver = {
.probe = adis16400_probe,
.remove = __devexit_p(adis16400_remove),
};
-
-static __init int adis16400_init(void)
-{
- return spi_register_driver(&adis16400_driver);
-}
-module_init(adis16400_init);
-
-static __exit void adis16400_exit(void)
-{
- spi_unregister_driver(&adis16400_driver);
-}
-module_exit(adis16400_exit);
+module_spi_driver(adis16400_driver);
MODULE_AUTHOR("Manuel Stahl <manuel.stahl@iis.fraunhofer.de>");
MODULE_DESCRIPTION("Analog Devices ADIS16400/5 IMU SPI driver");
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index fd886bf..ac22de5 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -79,14 +79,16 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
struct spi_message msg;
int i, j = 0, ret;
struct spi_transfer *xfers;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
- xfers = kzalloc(sizeof(*xfers)*indio_dev->buffer->scan_count + 1,
+ xfers = kzalloc(sizeof(*xfers)*(scan_count + 1),
GFP_KERNEL);
if (xfers == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(read_all_tx_array); i++)
- if (test_bit(i, indio_dev->buffer->scan_mask)) {
+ if (test_bit(i, indio_dev->active_scan_mask)) {
xfers[j].tx_buf = &read_all_tx_array[i];
xfers[j].bits_per_word = 16;
xfers[j].len = 2;
@@ -97,7 +99,7 @@ static int adis16350_spi_read_all(struct device *dev, u8 *rx)
xfers[j].len = 2;
spi_message_init(&msg);
- for (j = 0; j < indio_dev->buffer->scan_count + 1; j++)
+ for (j = 0; j < scan_count + 1; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
@@ -119,26 +121,27 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
s16 *data;
size_t datasize = ring->access->get_bytes_per_datum(ring);
/* Asumption that long is enough for maximum channels */
- unsigned long mask = *ring->scan_mask;
-
+ unsigned long mask = *indio_dev->active_scan_mask;
+ int scan_count = bitmap_weight(indio_dev->active_scan_mask,
+ indio_dev->masklength);
data = kmalloc(datasize , GFP_KERNEL);
if (data == NULL) {
dev_err(&st->us->dev, "memory alloc failed in ring bh");
return -ENOMEM;
}
- if (ring->scan_count) {
+ if (scan_count) {
if (st->variant->flags & ADIS16400_NO_BURST) {
ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
if (ret < 0)
goto err;
- for (; i < ring->scan_count; i++)
+ for (; i < scan_count; i++)
data[i] = *(s16 *)(st->rx + i*2);
} else {
ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
if (ret < 0)
goto err;
- for (; i < indio_dev->buffer->scan_count; i++) {
+ for (; i < scan_count; i++) {
j = __ffs(mask);
mask &= ~(1 << j);
data[i] = be16_to_cpup(
@@ -186,10 +189,8 @@ int adis16400_configure_ring(struct iio_dev *indio_dev)
indio_dev->buffer = ring;
/* Effectively select the ring buffer implementation */
ring->access = &ring_sw_access_funcs;
- ring->bpe = 2;
ring->scan_timestamp = true;
- ring->setup_ops = &adis16400_ring_setup_ops;
- ring->owner = THIS_MODULE;
+ indio_dev->setup_ops = &adis16400_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&adis16400_trigger_handler,
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index 9df0ce8..d7b1e9e 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -24,7 +24,7 @@
#include "iio.h"
#include "iio_core.h"
#include "sysfs.h"
-#include "buffer_generic.h"
+#include "buffer.h"
static const char * const iio_endian_prefix[] = {
[IIO_BE] = "be",
@@ -43,7 +43,7 @@ ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
struct iio_dev *indio_dev = filp->private_data;
struct iio_buffer *rb = indio_dev->buffer;
- if (!rb->access->read_first_n)
+ if (!rb || !rb->access->read_first_n)
return -EINVAL;
return rb->access->read_first_n(rb, n, buf);
}
@@ -64,28 +64,9 @@ unsigned int iio_buffer_poll(struct file *filp,
return 0;
}
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
+void iio_buffer_init(struct iio_buffer *buffer)
{
- struct iio_buffer *rb = indio_dev->buffer;
- if (!rb)
- return -EINVAL;
- if (rb->access->mark_in_use)
- rb->access->mark_in_use(rb);
- return 0;
-}
-
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{
- struct iio_buffer *rb = indio_dev->buffer;
-
- clear_bit(IIO_BUSY_BIT_POS, &rb->flags);
- if (rb->access->unmark_in_use)
- rb->access->unmark_in_use(rb);
-}
-
-void iio_buffer_init(struct iio_buffer *buffer, struct iio_dev *indio_dev)
-{
- buffer->indio_dev = indio_dev;
+ INIT_LIST_HEAD(&buffer->demux_list);
init_waitqueue_head(&buffer->pollq);
}
EXPORT_SYMBOL(iio_buffer_init);
@@ -126,17 +107,15 @@ static ssize_t iio_scan_el_show(struct device *dev,
int ret;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- ret = iio_scan_mask_query(indio_dev->buffer,
- to_iio_dev_attr(attr)->address);
- if (ret < 0)
- return ret;
+ ret = test_bit(to_iio_dev_attr(attr)->address,
+ indio_dev->buffer->scan_mask);
+
return sprintf(buf, "%d\n", ret);
}
static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
{
clear_bit(bit, buffer->scan_mask);
- buffer->scan_count--;
return 0;
}
@@ -153,11 +132,11 @@ static ssize_t iio_scan_el_store(struct device *dev,
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
- ret = iio_scan_mask_query(buffer, this_attr->address);
+ ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
if (ret < 0)
goto error_ret;
if (!state && ret) {
@@ -165,7 +144,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
if (ret)
goto error_ret;
} else if (state && !ret) {
- ret = iio_scan_mask_set(buffer, this_attr->address);
+ ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
if (ret)
goto error_ret;
}
@@ -173,7 +152,7 @@ static ssize_t iio_scan_el_store(struct device *dev,
error_ret:
mutex_unlock(&indio_dev->mlock);
- return ret ? ret : len;
+ return ret < 0 ? ret : len;
}
@@ -196,7 +175,7 @@ static ssize_t iio_scan_el_ts_store(struct device *dev,
state = !(buf[0] == '0');
mutex_lock(&indio_dev->mlock);
- if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
+ if (iio_buffer_enabled(indio_dev)) {
ret = -EBUSY;
goto error_ret;
}
@@ -311,12 +290,14 @@ int iio_buffer_register(struct iio_dev *indio_dev,
if (ret < 0)
goto error_cleanup_dynamic;
attrcount += ret;
+ if (channels[i].type == IIO_TIMESTAMP)
+ buffer->scan_index_timestamp =
+ channels[i].scan_index;
}
if (indio_dev->masklength && buffer->scan_mask == NULL) {
- buffer->scan_mask
- = kzalloc(sizeof(*buffer->scan_mask)*
- BITS_TO_LONGS(indio_dev->masklength),
- GFP_KERNEL);
+ buffer->scan_mask = kcalloc(BITS_TO_LONGS(indio_dev->masklength),
+ sizeof(*buffer->scan_mask),
+ GFP_KERNEL);
if (buffer->scan_mask == NULL) {
ret = -ENOMEM;
goto error_cleanup_dynamic;
@@ -326,10 +307,9 @@ int iio_buffer_register(struct iio_dev *indio_dev,
buffer->scan_el_group.name = iio_scan_elements_group_name;
- buffer->scan_el_group.attrs
- = kzalloc(sizeof(buffer->scan_el_group.attrs[0])*
- (attrcount + 1),
- GFP_KERNEL);
+ buffer->scan_el_group.attrs = kcalloc(attrcount + 1,
+ sizeof(buffer->scan_el_group.attrs[0]),
+ GFP_KERNEL);
if (buffer->scan_el_group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_scan_mask;
@@ -395,31 +375,20 @@ ssize_t iio_buffer_write_length(struct device *dev,
if (val == buffer->access->get_length(buffer))
return len;
- if (buffer->access->set_length) {
- buffer->access->set_length(buffer, val);
- if (buffer->access->mark_param_change)
- buffer->access->mark_param_change(buffer);
+ mutex_lock(&indio_dev->mlock);
+ if (iio_buffer_enabled(indio_dev)) {
+ ret = -EBUSY;
+ } else {
+ if (buffer->access->set_length)
+ buffer->access->set_length(buffer, val);
+ ret = 0;
}
+ mutex_unlock(&indio_dev->mlock);
- return len;
+ return ret ? ret : len;
}
EXPORT_SYMBOL(iio_buffer_write_length);
-ssize_t iio_buffer_read_bytes_per_datum(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev *indio_dev = dev_get_drvdata(dev);
- struct iio_buffer *buffer = indio_dev->buffer;
-
- if (buffer->access->get_bytes_per_datum)
- return sprintf(buf, "%d\n",
- buffer->access->get_bytes_per_datum(buffer));
-
- return 0;
-}
-EXPORT_SYMBOL(iio_buffer_read_bytes_per_datum);
-
ssize_t iio_buffer_store_enable(struct device *dev,
struct device_attribute *attr,
const char *buf,
@@ -434,14 +403,14 @@ ssize_t iio_buffer_store_enable(struct device *dev,
mutex_lock(&indio_dev->mlock);
previous_mode = indio_dev->currentmode;
requested_state = !(buf[0] == '0');
- current_state = !!(previous_mode & INDIO_ALL_BUFFER_MODES);
+ current_state = iio_buffer_enabled(indio_dev);
if (current_state == requested_state) {
printk(KERN_INFO "iio-buffer, current state requested again\n");
goto done;
}
if (requested_state) {
- if (buffer->setup_ops->preenable) {
- ret = buffer->setup_ops->preenable(indio_dev);
+ if (indio_dev->setup_ops->preenable) {
+ ret = indio_dev->setup_ops->preenable(indio_dev);
if (ret) {
printk(KERN_ERR
"Buffer not started:"
@@ -458,16 +427,12 @@ ssize_t iio_buffer_store_enable(struct device *dev,
goto error_ret;
}
}
- if (buffer->access->mark_in_use)
- buffer->access->mark_in_use(buffer);
/* Definitely possible for devices to support both of these.*/
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO
"Buffer not started: no trigger\n");
ret = -EINVAL;
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
goto error_ret;
}
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
@@ -478,32 +443,28 @@ ssize_t iio_buffer_store_enable(struct device *dev,
goto error_ret;
}
- if (buffer->setup_ops->postenable) {
- ret = buffer->setup_ops->postenable(indio_dev);
+ if (indio_dev->setup_ops->postenable) {
+ ret = indio_dev->setup_ops->postenable(indio_dev);
if (ret) {
printk(KERN_INFO
"Buffer not started:"
"postenable failed\n");
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = previous_mode;
- if (buffer->setup_ops->postdisable)
- buffer->setup_ops->
+ if (indio_dev->setup_ops->postdisable)
+ indio_dev->setup_ops->
postdisable(indio_dev);
goto error_ret;
}
}
} else {
- if (buffer->setup_ops->predisable) {
- ret = buffer->setup_ops->predisable(indio_dev);
+ if (indio_dev->setup_ops->predisable) {
+ ret = indio_dev->setup_ops->predisable(indio_dev);
if (ret)
goto error_ret;
}
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
- if (buffer->setup_ops->postdisable) {
- ret = buffer->setup_ops->postdisable(indio_dev);
+ if (indio_dev->setup_ops->postdisable) {
+ ret = indio_dev->setup_ops->postdisable(indio_dev);
if (ret)
goto error_ret;
}
@@ -523,37 +484,10 @@ ssize_t iio_buffer_show_enable(struct device *dev,
char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", !!(indio_dev->currentmode
- & INDIO_ALL_BUFFER_MODES));
+ return sprintf(buf, "%d\n", iio_buffer_enabled(indio_dev));
}
EXPORT_SYMBOL(iio_buffer_show_enable);
-int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
-{
- struct iio_buffer *buffer = indio_dev->buffer;
- size_t size;
- dev_dbg(&indio_dev->dev, "%s\n", __func__);
- /* Check if there are any scan elements enabled, if not fail*/
- if (!(buffer->scan_count || buffer->scan_timestamp))
- return -EINVAL;
- if (buffer->scan_timestamp)
- if (buffer->scan_count)
- /* Timestamp (aligned to s64) and data */
- size = (((buffer->scan_count * buffer->bpe)
- + sizeof(s64) - 1)
- & ~(sizeof(s64) - 1))
- + sizeof(s64);
- else /* Timestamp only */
- size = sizeof(s64);
- else /* Data only */
- size = buffer->scan_count * buffer->bpe;
- buffer->access->set_bytes_per_datum(buffer, size);
-
- return 0;
-}
-EXPORT_SYMBOL(iio_sw_buffer_preenable);
-
-
/* note NULL used as error indicator as it doesn't make sense. */
static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
unsigned int masklength,
@@ -569,14 +503,57 @@ static unsigned long *iio_scan_mask_match(unsigned long *av_masks,
return NULL;
}
+int iio_sw_buffer_preenable(struct iio_dev *indio_dev)
+{
+ struct iio_buffer *buffer = indio_dev->buffer;
+ const struct iio_chan_spec *ch;
+ unsigned bytes = 0;
+ int length, i;
+ dev_dbg(&indio_dev->dev, "%s\n", __func__);
+
+ /* How much space will the demuxed element take? */
+ for_each_set_bit(i, buffer->scan_mask,
+ indio_dev->masklength) {
+ ch = iio_find_channel_from_si(indio_dev, i);
+ length = ch->scan_type.storagebits/8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ if (buffer->scan_timestamp) {
+ ch = iio_find_channel_from_si(indio_dev,
+ buffer->scan_index_timestamp);
+ length = ch->scan_type.storagebits/8;
+ bytes = ALIGN(bytes, length);
+ bytes += length;
+ }
+ buffer->access->set_bytes_per_datum(buffer, bytes);
+
+ /* What scan mask do we actually have ?*/
+ if (indio_dev->available_scan_masks)
+ indio_dev->active_scan_mask =
+ iio_scan_mask_match(indio_dev->available_scan_masks,
+ indio_dev->masklength,
+ buffer->scan_mask);
+ else
+ indio_dev->active_scan_mask = buffer->scan_mask;
+ iio_update_demux(indio_dev);
+
+ if (indio_dev->info->update_scan_mode)
+ return indio_dev->info
+ ->update_scan_mode(indio_dev,
+ indio_dev->active_scan_mask);
+ return 0;
+}
+EXPORT_SYMBOL(iio_sw_buffer_preenable);
+
/**
* iio_scan_mask_set() - set particular bit in the scan mask
* @buffer: the buffer whose scan mask we are interested in
* @bit: the bit to be set.
**/
-int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
+int iio_scan_mask_set(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
{
- struct iio_dev *indio_dev = buffer->indio_dev;
unsigned long *mask;
unsigned long *trialmask;
@@ -604,7 +581,6 @@ int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
}
}
bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
- buffer->scan_count++;
kfree(trialmask);
@@ -612,25 +588,147 @@ int iio_scan_mask_set(struct iio_buffer *buffer, int bit)
};
EXPORT_SYMBOL_GPL(iio_scan_mask_set);
-int iio_scan_mask_query(struct iio_buffer *buffer, int bit)
+int iio_scan_mask_query(struct iio_dev *indio_dev,
+ struct iio_buffer *buffer, int bit)
{
- struct iio_dev *indio_dev = buffer->indio_dev;
- long *mask;
-
if (bit > indio_dev->masklength)
return -EINVAL;
if (!buffer->scan_mask)
return 0;
- if (indio_dev->available_scan_masks)
- mask = iio_scan_mask_match(indio_dev->available_scan_masks,
- indio_dev->masklength,
- buffer->scan_mask);
- else
- mask = buffer->scan_mask;
- if (!mask)
- return 0;
- return test_bit(bit, mask);
+ return test_bit(bit, buffer->scan_mask);
};
EXPORT_SYMBOL_GPL(iio_scan_mask_query);
+
+/**
+ * struct iio_demux_table() - table describing demux memcpy ops
+ * @from: index to copy from
+ * @to: index to copy to
+ * @length: how many bytes to copy
+ * @l: list head used for management
+ */
+struct iio_demux_table {
+ unsigned from;
+ unsigned to;
+ unsigned length;
+ struct list_head l;
+};
+
+static unsigned char *iio_demux(struct iio_buffer *buffer,
+ unsigned char *datain)
+{
+ struct iio_demux_table *t;
+
+ if (list_empty(&buffer->demux_list))
+ return datain;
+ list_for_each_entry(t, &buffer->demux_list, l)
+ memcpy(buffer->demux_bounce + t->to,
+ datain + t->from, t->length);
+
+ return buffer->demux_bounce;
+}
+
+int iio_push_to_buffer(struct iio_buffer *buffer, unsigned char *data,
+ s64 timestamp)
+{
+ unsigned char *dataout = iio_demux(buffer, data);
+
+ return buffer->access->store_to(buffer, dataout, timestamp);
+}
+EXPORT_SYMBOL_GPL(iio_push_to_buffer);
+
+int iio_update_demux(struct iio_dev *indio_dev)
+{
+ const struct iio_chan_spec *ch;
+ struct iio_buffer *buffer = indio_dev->buffer;
+ int ret, in_ind = -1, out_ind, length;
+ unsigned in_loc = 0, out_loc = 0;
+ struct iio_demux_table *p, *q;
+
+ /* Clear out any old demux */
+ list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
+ list_del(&p->l);
+ kfree(p);
+ }
+ kfree(buffer->demux_bounce);
+ buffer->demux_bounce = NULL;
+
+ /* First work out which scan mode we will actually have */
+ if (bitmap_equal(indio_dev->active_scan_mask,
+ buffer->scan_mask,
+ indio_dev->masklength))
+ return 0;
+
+ /* Now we have the two masks, work from least sig and build up sizes */
+ for_each_set_bit(out_ind,
+ indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ while (in_ind != out_ind) {
+ in_ind = find_next_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength,
+ in_ind + 1);
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ length = ch->scan_type.storagebits/8;
+ /* Make sure we are aligned */
+ in_loc += length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ }
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ ch = iio_find_channel_from_si(indio_dev, in_ind);
+ length = ch->scan_type.storagebits/8;
+ if (out_loc % length)
+ out_loc += length - out_loc % length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ p->from = in_loc;
+ p->to = out_loc;
+ p->length = length;
+ list_add_tail(&p->l, &buffer->demux_list);
+ out_loc += length;
+ in_loc += length;
+ }
+ /* Relies on scan_timestamp being last */
+ if (buffer->scan_timestamp) {
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (p == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ ch = iio_find_channel_from_si(indio_dev,
+ buffer->scan_index_timestamp);
+ length = ch->scan_type.storagebits/8;
+ if (out_loc % length)
+ out_loc += length - out_loc % length;
+ if (in_loc % length)
+ in_loc += length - in_loc % length;
+ p->from = in_loc;
+ p->to = out_loc;
+ p->length = length;
+ list_add_tail(&p->l, &buffer->demux_list);
+ out_loc += length;
+ in_loc += length;
+ }
+ buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
+ if (buffer->demux_bounce == NULL) {
+ ret = -ENOMEM;
+ goto error_clear_mux_table;
+ }
+ return 0;
+
+error_clear_mux_table:
+ list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
+ list_del(&p->l);
+ kfree(p);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(iio_update_demux);
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index aec9311..19f897f 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -25,8 +25,8 @@
#include "iio.h"
#include "iio_core.h"
#include "iio_core_trigger.h"
-#include "chrdev.h"
#include "sysfs.h"
+#include "events.h"
/* IDA to assign each registered device a unique id*/
static DEFINE_IDA(iio_ida);
@@ -77,17 +77,29 @@ static const char * const iio_modifier_names[] = {
/* relies on pairs of these shared then separate */
static const char * const iio_chan_info_postfix[] = {
- [IIO_CHAN_INFO_SCALE_SHARED/2] = "scale",
- [IIO_CHAN_INFO_OFFSET_SHARED/2] = "offset",
- [IIO_CHAN_INFO_CALIBSCALE_SHARED/2] = "calibscale",
- [IIO_CHAN_INFO_CALIBBIAS_SHARED/2] = "calibbias",
- [IIO_CHAN_INFO_PEAK_SHARED/2] = "peak_raw",
- [IIO_CHAN_INFO_PEAK_SCALE_SHARED/2] = "peak_scale",
- [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW_SHARED/2]
- = "quadrature_correction_raw",
- [IIO_CHAN_INFO_AVERAGE_RAW_SHARED/2] = "mean_raw",
+ [IIO_CHAN_INFO_SCALE] = "scale",
+ [IIO_CHAN_INFO_OFFSET] = "offset",
+ [IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
+ [IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
+ [IIO_CHAN_INFO_PEAK] = "peak_raw",
+ [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
+ [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
+ [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
+ [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
+ = "filter_low_pass_3db_frequency",
};
+const struct iio_chan_spec
+*iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
+{
+ int i;
+
+ for (i = 0; i < indio_dev->num_channels; i++)
+ if (indio_dev->channels[i].scan_index == si)
+ return &indio_dev->channels[i];
+ return NULL;
+}
+
/**
* struct iio_detected_event_list - list element for events that have occurred
* @list: linked list header
@@ -169,8 +181,11 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
{
struct iio_event_interface *ev_int = filep->private_data;
struct iio_detected_event_list *el;
+ size_t len = sizeof(el->ev);
int ret;
- size_t len;
+
+ if (count < len)
+ return -EINVAL;
mutex_lock(&ev_int->event_list_lock);
if (list_empty(&ev_int->det_events)) {
@@ -192,7 +207,6 @@ static ssize_t iio_event_chrdev_read(struct file *filep,
el = list_first_entry(&ev_int->det_events,
struct iio_detected_event_list,
list);
- len = sizeof el->ev;
if (copy_to_user(buf, &(el->ev), len)) {
ret = -EFAULT;
goto error_mutex_unlock;
@@ -415,7 +429,7 @@ int __iio_device_attr_init(struct device_attribute *dev_attr,
sysfs_attr_init(&dev_attr->attr);
/* Build up postfix of <extend_name>_<modifier>_postfix */
- if (chan->modified) {
+ if (chan->modified && !generic) {
if (chan->extend_name)
full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
iio_modifier_names[chan
@@ -600,7 +614,7 @@ static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
chan,
&iio_read_channel_info,
&iio_write_channel_info,
- (1 << i),
+ i/2,
!(i%2),
&indio_dev->dev,
&indio_dev->channel_attr_list);
@@ -665,10 +679,9 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
if (indio_dev->name)
attrcount++;
- indio_dev->chan_attr_group.attrs
- = kzalloc(sizeof(indio_dev->chan_attr_group.attrs[0])*
- (attrcount + 1),
- GFP_KERNEL);
+ indio_dev->chan_attr_group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->chan_attr_group.attrs[0]),
+ GFP_KERNEL);
if (indio_dev->chan_attr_group.attrs == NULL) {
ret = -ENOMEM;
goto error_clear_attrs;
@@ -788,6 +801,9 @@ static ssize_t iio_ev_value_store(struct device *dev,
unsigned long val;
int ret;
+ if (!indio_dev->info->write_event_value)
+ return -EINVAL;
+
ret = strict_strtoul(buf, 10, &val);
if (ret)
return ret;
@@ -958,10 +974,9 @@ static int iio_device_register_eventset(struct iio_dev *indio_dev)
}
indio_dev->event_interface->group.name = iio_event_group_name;
- indio_dev->event_interface->group.attrs =
- kzalloc(sizeof(indio_dev->event_interface->group.attrs[0])
- *(attrcount + 1),
- GFP_KERNEL);
+ indio_dev->event_interface->group.attrs = kcalloc(attrcount + 1,
+ sizeof(indio_dev->event_interface->group.attrs[0]),
+ GFP_KERNEL);
if (indio_dev->event_interface->group.attrs == NULL) {
ret = -ENOMEM;
goto error_free_setup_event_lines;
@@ -1068,9 +1083,13 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
+
+ if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
+ return -EBUSY;
+
filp->private_data = indio_dev;
- return iio_chrdev_buffer_open(indio_dev);
+ return 0;
}
/**
@@ -1078,8 +1097,9 @@ static int iio_chrdev_open(struct inode *inode, struct file *filp)
**/
static int iio_chrdev_release(struct inode *inode, struct file *filp)
{
- iio_chrdev_buffer_release(container_of(inode->i_cdev,
- struct iio_dev, chrdev));
+ struct iio_dev *indio_dev = container_of(inode->i_cdev,
+ struct iio_dev, chrdev);
+ clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
return 0;
}
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 2c626e0..47ecadd 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -159,13 +159,12 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll);
void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time)
{
int i;
- if (!trig->use_count) {
+ if (!trig->use_count)
for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++)
if (trig->subirqs[i].enabled) {
trig->use_count++;
handle_nested_irq(trig->subirq_base + i);
}
- }
}
EXPORT_SYMBOL(iio_trigger_poll_chained);
@@ -173,10 +172,9 @@ void iio_trigger_notify_done(struct iio_trigger *trig)
{
trig->use_count--;
if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable)
- if (trig->ops->try_reenable(trig)) {
+ if (trig->ops->try_reenable(trig))
/* Missed and interrupt so launch new poll now */
iio_trigger_poll(trig, 0);
- }
}
EXPORT_SYMBOL(iio_trigger_notify_done);
@@ -222,8 +220,16 @@ static int iio_trigger_attach_poll_func(struct iio_trigger *trig,
ret = request_threaded_irq(pf->irq, pf->h, pf->thread,
pf->type, pf->name,
pf);
- if (trig->ops && trig->ops->set_trigger_state && notinuse)
+ if (ret < 0) {
+ module_put(pf->indio_dev->info->driver_module);
+ return ret;
+ }
+
+ if (trig->ops && trig->ops->set_trigger_state && notinuse) {
ret = trig->ops->set_trigger_state(trig, true);
+ if (ret < 0)
+ module_put(pf->indio_dev->info->driver_module);
+ }
return ret;
}
@@ -295,7 +301,7 @@ void iio_dealloc_pollfunc(struct iio_poll_func *pf)
EXPORT_SYMBOL_GPL(iio_dealloc_pollfunc);
/**
- * iio_trigger_read_currrent() - trigger consumer sysfs query which trigger
+ * iio_trigger_read_current() - trigger consumer sysfs query which trigger
*
* For trigger consumers the current_trigger interface allows the trigger
* used by the device to be queried.
@@ -336,6 +342,8 @@ static ssize_t iio_trigger_write_current(struct device *dev,
mutex_unlock(&indio_dev->mlock);
trig = iio_trigger_find_by_name(buf, len);
+ if (oldtrig == trig)
+ return len;
if (trig && indio_dev->info->validate_trigger) {
ret = indio_dev->info->validate_trigger(indio_dev, trig);
@@ -473,12 +481,10 @@ void iio_free_trigger(struct iio_trigger *trig)
}
EXPORT_SYMBOL(iio_free_trigger);
-int iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
+void iio_device_register_trigger_consumer(struct iio_dev *indio_dev)
{
indio_dev->groups[indio_dev->groupcounter++] =
&iio_trigger_consumer_attr_group;
-
- return 0;
}
void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
@@ -490,18 +496,14 @@ void iio_device_unregister_trigger_consumer(struct iio_dev *indio_dev)
int iio_triggered_buffer_postenable(struct iio_dev *indio_dev)
{
- return indio_dev->trig
- ? iio_trigger_attach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
+ return iio_trigger_attach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_postenable);
int iio_triggered_buffer_predisable(struct iio_dev *indio_dev)
{
- return indio_dev->trig
- ? iio_trigger_dettach_poll_func(indio_dev->trig,
- indio_dev->pollfunc)
- : 0;
+ return iio_trigger_dettach_poll_func(indio_dev->trig,
+ indio_dev->pollfunc);
}
EXPORT_SYMBOL(iio_triggered_buffer_predisable);
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index e8c234b..e1e9c06 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -11,9 +11,7 @@
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
- int use_count;
int update_needed;
- struct mutex use_lock;
};
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
@@ -33,54 +31,25 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
if (!buf->update_needed)
goto error_ret;
- if (buf->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
error_ret:
- mutex_unlock(&buf->use_lock);
return ret;
}
-static void iio_mark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count++;
- mutex_unlock(&buf->use_lock);
-}
-
-static void iio_unmark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count--;
- mutex_unlock(&buf->use_lock);
-}
-
static int iio_get_length_kfifo(struct iio_buffer *r)
{
return r->length;
}
-static inline void __iio_init_kfifo(struct iio_kfifo *kf)
-{
- mutex_init(&kf->use_lock);
-}
-
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
static struct attribute *iio_kfifo_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
NULL,
};
@@ -98,9 +67,8 @@ struct iio_buffer *iio_kfifo_allocate(struct iio_dev *indio_dev)
if (!kf)
return NULL;
kf->update_needed = true;
- iio_buffer_init(&kf->buffer, indio_dev);
+ iio_buffer_init(&kf->buffer);
kf->buffer.attrs = &iio_kfifo_attribute_group;
- __iio_init_kfifo(kf);
return &kf->buffer;
}
@@ -111,20 +79,19 @@ static int iio_get_bytes_per_datum_kfifo(struct iio_buffer *r)
return r->bytes_per_datum;
}
-static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
+static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
{
- if (r->bytes_per_datum != bpd) {
- r->bytes_per_datum = bpd;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
- }
+ struct iio_kfifo *kf = iio_to_kfifo(r);
+ kf->update_needed = true;
return 0;
}
-static int iio_mark_update_needed_kfifo(struct iio_buffer *r)
+static int iio_set_bytes_per_datum_kfifo(struct iio_buffer *r, size_t bpd)
{
- struct iio_kfifo *kf = iio_to_kfifo(r);
- kf->update_needed = true;
+ if (r->bytes_per_datum != bpd) {
+ r->bytes_per_datum = bpd;
+ iio_mark_update_needed_kfifo(r);
+ }
return 0;
}
@@ -132,8 +99,7 @@ static int iio_set_length_kfifo(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_kfifo(r);
}
return 0;
}
@@ -150,16 +116,9 @@ static int iio_store_to_kfifo(struct iio_buffer *r,
{
int ret;
struct iio_kfifo *kf = iio_to_kfifo(r);
- u8 *datal = kmalloc(r->bytes_per_datum, GFP_KERNEL);
- memcpy(datal, data, r->bytes_per_datum - sizeof(timestamp));
- memcpy(datal + r->bytes_per_datum - sizeof(timestamp),
- &timestamp, sizeof(timestamp));
ret = kfifo_in(&kf->kf, data, r->bytes_per_datum);
- if (ret != r->bytes_per_datum) {
- kfree(datal);
+ if (ret != r->bytes_per_datum)
return -EBUSY;
- }
- kfree(datal);
return 0;
}
@@ -169,17 +128,18 @@ static int iio_read_first_n_kfifo(struct iio_buffer *r,
int ret, copied;
struct iio_kfifo *kf = iio_to_kfifo(r);
- ret = kfifo_to_user(&kf->kf, buf, r->bytes_per_datum*n, &copied);
+ if (n < r->bytes_per_datum)
+ return -EINVAL;
+
+ n = rounddown(n, r->bytes_per_datum);
+ ret = kfifo_to_user(&kf->kf, buf, n, &copied);
return copied;
}
const struct iio_buffer_access_funcs kfifo_access_funcs = {
- .mark_in_use = &iio_mark_kfifo_in_use,
- .unmark_in_use = &iio_unmark_kfifo_in_use,
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
- .mark_param_change = &iio_mark_update_needed_kfifo,
.request_update = &iio_request_update_kfifo,
.get_bytes_per_datum = &iio_get_bytes_per_datum_kfifo,
.set_bytes_per_datum = &iio_set_bytes_per_datum_kfifo,
diff --git a/drivers/staging/iio/kfifo_buf.h b/drivers/staging/iio/kfifo_buf.h
index a15598b..cc2bd9a 100644
--- a/drivers/staging/iio/kfifo_buf.h
+++ b/drivers/staging/iio/kfifo_buf.h
@@ -1,7 +1,7 @@
#include <linux/kfifo.h>
#include "iio.h"
-#include "buffer_generic.h"
+#include "buffer.h"
extern const struct iio_buffer_access_funcs kfifo_access_funcs;
diff --git a/drivers/staging/iio/light/isl29018.c b/drivers/staging/iio/light/isl29018.c
index 9dc9e63..849d6a5 100644
--- a/drivers/staging/iio/light/isl29018.c
+++ b/drivers/staging/iio/light/isl29018.c
@@ -362,8 +362,7 @@ static int isl29018_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
mutex_lock(&chip->lock);
- if (mask == (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE) &&
- chan->type == IIO_LIGHT) {
+ if (mask == IIO_CHAN_INFO_CALIBSCALE && chan->type == IIO_LIGHT) {
chip->lux_scale = val;
ret = 0;
}
@@ -402,7 +401,7 @@ static int isl29018_read_raw(struct iio_dev *indio_dev,
if (!ret)
ret = IIO_VAL_INT;
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (chan->type == IIO_LIGHT) {
*val = chip->lux_scale;
ret = IIO_VAL_INT;
@@ -421,7 +420,7 @@ static const struct iio_chan_spec isl29018_channels[] = {
.indexed = 1,
.channel = 0,
.processed_val = IIO_PROCESSED,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
}, {
.type = IIO_INTENSITY,
.modified = 1,
@@ -603,19 +602,7 @@ static struct i2c_driver isl29018_driver = {
.remove = __devexit_p(isl29018_remove),
.id_table = isl29018_id,
};
-
-static int __init isl29018_init(void)
-{
- return i2c_add_driver(&isl29018_driver);
-}
-
-static void __exit isl29018_exit(void)
-{
- i2c_del_driver(&isl29018_driver);
-}
-
-module_init(isl29018_init);
-module_exit(isl29018_exit);
+module_i2c_driver(isl29018_driver);
MODULE_DESCRIPTION("ISL29018 Ambient Light Sensor driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/iio/light/tsl2563.c b/drivers/staging/iio/light/tsl2563.c
index 7e984bc..ffca85e 100644
--- a/drivers/staging/iio/light/tsl2563.c
+++ b/drivers/staging/iio/light/tsl2563.c
@@ -37,6 +37,7 @@
#include "../iio.h"
#include "../sysfs.h"
+#include "../events.h"
#include "tsl2563.h"
/* Use this many bits for fraction part. */
@@ -226,6 +227,8 @@ static int tsl2563_read_id(struct tsl2563_chip *chip, u8 *id)
if (ret < 0)
return ret;
+ *id = ret;
+
return 0;
}
@@ -510,7 +513,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
}
break;
- case (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE):
+ case IIO_CHAN_INFO_CALIBSCALE:
if (chan->channel == 0)
*val = calib_to_sysfs(chip->calib0);
else
@@ -536,7 +539,7 @@ static const struct iio_chan_spec tsl2563_channels[] = {
.type = IIO_INTENSITY,
.modified = 1,
.channel2 = IIO_MOD_LIGHT_BOTH,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
.event_mask = (IIO_EV_BIT(IIO_EV_TYPE_THRESH,
IIO_EV_DIR_RISING) |
IIO_EV_BIT(IIO_EV_TYPE_THRESH,
@@ -544,8 +547,8 @@ static const struct iio_chan_spec tsl2563_channels[] = {
}, {
.type = IIO_INTENSITY,
.modified = 1,
- .channel2 = IIO_MOD_LIGHT_BOTH,
- .info_mask = (1 << IIO_CHAN_INFO_CALIBSCALE_SEPARATE),
+ .channel2 = IIO_MOD_LIGHT_IR,
+ .info_mask = IIO_CHAN_INFO_CALIBSCALE_SEPARATE_BIT,
}
};
@@ -866,20 +869,8 @@ static struct i2c_driver tsl2563_i2c_driver = {
.remove = __devexit_p(tsl2563_remove),
.id_table = tsl2563_id,
};
-
-static int __init tsl2563_init(void)
-{
- return i2c_add_driver(&tsl2563_i2c_driver);
-}
-
-static void __exit tsl2563_exit(void)
-{
- i2c_del_driver(&tsl2563_i2c_driver);
-}
+module_i2c_driver(tsl2563_i2c_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_DESCRIPTION("tsl2563 light sensor driver");
MODULE_LICENSE("GPL");
-
-module_init(tsl2563_init);
-module_exit(tsl2563_exit);
diff --git a/drivers/staging/iio/light/tsl2583.c b/drivers/staging/iio/light/tsl2583.c
index 80f77cf..5b6455a 100644
--- a/drivers/staging/iio/light/tsl2583.c
+++ b/drivers/staging/iio/light/tsl2583.c
@@ -194,6 +194,7 @@ static int taos_get_lux(struct iio_dev *indio_dev)
{
u16 ch0, ch1; /* separated ch0/ch1 data from device */
u32 lux; /* raw lux calculated from device data */
+ u64 lux64;
u32 ratio;
u8 buf[5];
struct taos_lux *p;
@@ -297,9 +298,19 @@ static int taos_get_lux(struct iio_dev *indio_dev)
lux = (lux + (chip->als_time_scale >> 1)) /
chip->als_time_scale;
- /* adjust for active gain scale */
- lux >>= 13; /* tables have factor of 8192 builtin for accuracy */
- lux = (lux * chip->taos_settings.als_gain_trim + 500) / 1000;
+ /* Adjust for active gain scale.
+ * The taos_device_lux tables above have a factor of 8192 built in,
+ * so we need to shift right.
+ * User-specified gain provides a multiplier.
+ * Apply user-specified gain before shifting right to retain precision.
+ * Use 64 bits to avoid overflow on multiplication.
+ * Then go back to 32 bits before division to avoid using div_u64().
+ */
+ lux64 = lux;
+ lux64 = lux64 * chip->taos_settings.als_gain_trim;
+ lux64 >>= 13;
+ lux = lux64;
+ lux = (lux + 500) / 1000;
if (lux > TSL258X_LUX_CALC_OVER_FLOW) { /* check for overflow */
return_max:
lux = TSL258X_LUX_CALC_OVER_FLOW;
@@ -933,19 +944,7 @@ static struct i2c_driver taos_driver = {
.probe = taos_probe,
.remove = __devexit_p(taos_remove),
};
-
-static int __init taos_init(void)
-{
- return i2c_add_driver(&taos_driver);
-}
-
-static void __exit taos_exit(void)
-{
- i2c_del_driver(&taos_driver);
-}
-
-module_init(taos_init);
-module_exit(taos_exit);
+module_i2c_driver(taos_driver);
MODULE_AUTHOR("J. August Brenner<jbrenner@taosinc.com>");
MODULE_DESCRIPTION("TAOS tsl2583 ambient light sensor driver");
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index 8b01712..3158f12 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -431,7 +431,7 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case 0:
return ak8975_read_axis(indio_dev, chan->address, val);
- case (1 << IIO_CHAN_INFO_SCALE_SEPARATE):
+ case IIO_CHAN_INFO_SCALE:
*val = data->raw_to_gauss[chan->address];
return IIO_VAL_INT;
}
@@ -443,7 +443,7 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SEPARATE), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \
.address = index, \
}
@@ -572,19 +572,7 @@ static struct i2c_driver ak8975_driver = {
.remove = __devexit_p(ak8975_remove),
.id_table = ak8975_id,
};
-
-static int __init ak8975_init(void)
-{
- return i2c_add_driver(&ak8975_driver);
-}
-
-static void __exit ak8975_exit(void)
-{
- i2c_del_driver(&ak8975_driver);
-}
-
-module_init(ak8975_init);
-module_exit(ak8975_exit);
+module_i2c_driver(ak8975_driver);
MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
MODULE_DESCRIPTION("AK8975 magnetometer driver");
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index fc9ee97..f2e85a9 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -463,7 +463,7 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
return hmc5843_read_measurement(indio_dev,
chan->address,
val);
- case (1 << IIO_CHAN_INFO_SCALE_SHARED):
+ case IIO_CHAN_INFO_SCALE:
*val = 0;
*val2 = hmc5843_regval_to_nanoscale[data->range];
return IIO_VAL_INT_PLUS_NANO;
@@ -476,7 +476,7 @@ static int hmc5843_read_raw(struct iio_dev *indio_dev,
.type = IIO_MAGN, \
.modified = 1, \
.channel2 = IIO_MOD_##axis, \
- .info_mask = (1 << IIO_CHAN_INFO_SCALE_SHARED), \
+ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \
.address = add \
}
@@ -605,6 +605,7 @@ static const struct i2c_device_id hmc5843_id[] = {
{ "hmc5843", 0 },
{ }
};
+MODULE_DEVICE_TABLE(i2c, hmc5843_id);
static struct i2c_driver hmc5843_driver = {
.driver = {
@@ -618,20 +619,8 @@ static struct i2c_driver hmc5843_driver = {
.suspend = hmc5843_suspend,
.resume = hmc5843_resume,
};
-
-static int __init hmc5843_init(void)
-{
- return i2c_add_driver(&hmc5843_driver);
-}
-
-static void __exit hmc5843_exit(void)
-{
- i2c_del_driver(&hmc5843_driver);
-}
+module_i2c_driver(hmc5843_driver);
MODULE_AUTHOR("Shubhrajyoti Datta <shubhrajyoti@ti.com");
MODULE_DESCRIPTION("HMC5843 driver");
MODULE_LICENSE("GPL");
-
-module_init(hmc5843_init);
-module_exit(hmc5843_exit);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 940fef6..57baac6 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -577,19 +577,9 @@ static struct spi_driver ade7753_driver = {
.probe = ade7753_probe,
.remove = __devexit_p(ade7753_remove),
};
-
-static __init int ade7753_init(void)
-{
- return spi_register_driver(&ade7753_driver);
-}
-module_init(ade7753_init);
-
-static __exit void ade7753_exit(void)
-{
- spi_unregister_driver(&ade7753_driver);
-}
-module_exit(ade7753_exit);
+module_spi_driver(ade7753_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7753/6 Single-Phase Multifunction Meter");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ade7753");
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index 33f0d32..8d81c920 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -600,19 +600,9 @@ static struct spi_driver ade7754_driver = {
.probe = ade7754_probe,
.remove = __devexit_p(ade7754_remove),
};
-
-static __init int ade7754_init(void)
-{
- return spi_register_driver(&ade7754_driver);
-}
-module_init(ade7754_init);
-
-static __exit void ade7754_exit(void)
-{
- spi_unregister_driver(&ade7754_driver);
-}
-module_exit(ade7754_exit);
+module_spi_driver(ade7754_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7754 Polyphase Multifunction Energy Metering IC Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7754");
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index c5dafbd..dcb2029 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -20,7 +20,7 @@
#include "../iio.h"
#include "../sysfs.h"
-#include "../buffer_generic.h"
+#include "../buffer.h"
#include "meter.h"
#include "ade7758.h"
@@ -663,63 +663,63 @@ static const struct attribute_group ade7758_attribute_group = {
static struct iio_chan_spec ade7758_channels[] = {
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
0, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
1, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
2, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
3, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 0, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
4, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
5, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
6, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
7, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
8, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 1, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
9, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_VOLTAGE, 0, 1, 0, "raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
10, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_CURRENT, 0, 1, 0, "raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
11, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "apparent_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
12, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "active_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
13, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN(IIO_POWER, 0, 1, 0, "reactive_raw", 2, 0,
- (1 << IIO_CHAN_INFO_SCALE_SHARED),
+ IIO_CHAN_INFO_SCALE_SHARED_BIT,
AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
14, IIO_ST('s', 24, 32, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(15),
@@ -746,12 +746,12 @@ static int __devinit ade7758_probe(struct spi_device *spi)
spi_set_drvdata(spi, indio_dev);
/* Allocate the comms buffers */
- st->rx = kzalloc(sizeof(*st->rx)*ADE7758_MAX_RX, GFP_KERNEL);
+ st->rx = kcalloc(ADE7758_MAX_RX, sizeof(*st->rx), GFP_KERNEL);
if (st->rx == NULL) {
ret = -ENOMEM;
goto error_free_dev;
}
- st->tx = kzalloc(sizeof(*st->tx)*ADE7758_MAX_TX, GFP_KERNEL);
+ st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL);
if (st->tx == NULL) {
ret = -ENOMEM;
goto error_free_rx;
@@ -843,6 +843,7 @@ static const struct spi_device_id ade7758_id[] = {
{"ade7758", 0},
{}
};
+MODULE_DEVICE_TABLE(spi, ade7758_id);
static struct spi_driver ade7758_driver = {
.driver = {
@@ -853,18 +854,7 @@ static struct spi_driver ade7758_driver = {
.remove = __devexit_p(ade7758_remove),
.id_table = ade7758_id,
};
-
-static __init int ade7758_init(void)
-{
- return spi_register_driver(&ade7758_driver);
-}
-module_init(ade7758_init);
-
-static __exit void ade7758_exit(void)
-{
- spi_unregister_driver(&ade7758_driver);
-}
-module_exit(ade7758_exit);
+module_spi_driver(ade7758_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7758 Polyphase Multifunction Energy Metering IC Driver");
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 00fa2ac..f29f2b2 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -67,7 +67,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
s64 dat64[2];
u32 *dat32 = (u32 *)dat64;
- if (ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
if (ade7758_spi_read_burst(&indio_dev->dev) >= 0)
*dat32 = get_unaligned_be32(&st->rx_buf[5]) & 0xFFFFFF;
@@ -96,10 +96,11 @@ static int ade7758_ring_preenable(struct iio_dev *indio_dev)
size_t d_size;
unsigned channel;
- if (!ring->scan_count)
+ if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
- channel = find_first_bit(ring->scan_mask, indio_dev->masklength);
+ channel = find_first_bit(indio_dev->active_scan_mask,
+ indio_dev->masklength);
d_size = st->ade7758_ring_channels[channel].scan_type.storagebits / 8;
@@ -145,8 +146,7 @@ int ade7758_configure_ring(struct iio_dev *indio_dev)
/* Effectively select the ring buffer implementation */
indio_dev->buffer->access = &ring_sw_access_funcs;
- indio_dev->buffer->setup_ops = &ade7758_ring_setup_ops;
- indio_dev->buffer->owner = THIS_MODULE;
+ indio_dev->setup_ops = &ade7758_ring_setup_ops;
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&ade7758_trigger_handler,
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index b691f10..0beab47 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -521,19 +521,9 @@ static struct spi_driver ade7759_driver = {
.probe = ade7759_probe,
.remove = __devexit_p(ade7759_remove),
};
-
-static __init int ade7759_init(void)
-{
- return spi_register_driver(&ade7759_driver);
-}
-module_init(ade7759_init);
-
-static __exit void ade7759_exit(void)
-{
- spi_unregister_driver(&ade7759_driver);
-}
-module_exit(ade7759_exit);
+module_spi_driver(ade7759_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7759 Active Energy Metering IC Driver");
MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:ad7759");
diff --git a/drivers/staging/iio/meter/ade7854-i2c.c b/drivers/staging/iio/meter/ade7854-i2c.c
index cbca3fd..1e1faa0 100644
--- a/drivers/staging/iio/meter/ade7854-i2c.c
+++ b/drivers/staging/iio/meter/ade7854-i2c.c
@@ -253,19 +253,7 @@ static struct i2c_driver ade7854_i2c_driver = {
.remove = __devexit_p(ade7854_i2c_remove),
.id_table = ade7854_id,
};
-
-static __init int ade7854_i2c_init(void)
-{
- return i2c_add_driver(&ade7854_i2c_driver);
-}
-module_init(ade7854_i2c_init);
-
-static __exit void ade7854_i2c_exit(void)
-{
- i2c_del_driver(&ade7854_i2c_driver);
-}
-module_exit(ade7854_i2c_exit);
-
+module_i2c_driver(ade7854_i2c_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 Polyphase Multifunction Energy Metering IC I2C Driver");
diff --git a/drivers/staging/iio/meter/ade7854-spi.c b/drivers/staging/iio/meter/ade7854-spi.c
index cfa23ba..8112186 100644
--- a/drivers/staging/iio/meter/ade7854-spi.c
+++ b/drivers/staging/iio/meter/ade7854-spi.c
@@ -343,6 +343,7 @@ static const struct spi_device_id ade7854_id[] = {
{ "ade7878", 0 },
{ }
};
+MODULE_DEVICE_TABLE(spi, ade7854_id);
static struct spi_driver ade7854_driver = {
.driver = {
@@ -353,18 +354,7 @@ static struct spi_driver ade7854_driver = {
.remove = __devexit_p(ade7854_spi_remove),
.id_table = ade7854_id,
};
-
-static __init int ade7854_init(void)
-{
- return spi_register_driver(&ade7854_driver);
-}
-module_init(ade7854_init);
-
-static __exit void ade7854_exit(void)
-{
- spi_unregister_driver(&ade7854_driver);
-}
-module_exit(ade7854_exit);
+module_spi_driver(ade7854_driver);
MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
MODULE_DESCRIPTION("Analog Devices ADE7854/58/68/78 SPI Driver");
diff --git a/drivers/staging/iio/resolver/ad2s1200.c b/drivers/staging/iio/resolver/ad2s1200.c
index d7ad46a..d8ce854 100644
--- a/drivers/staging/iio/resolver/ad2s1200.c
+++ b/drivers/staging/iio/resolver/ad2s1200.c
@@ -160,6 +160,7 @@ static const struct spi_device_id ad2s1200_id[] = {
{ "ad2s1205" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s1200_id);
static struct spi_driver ad2s1200_driver = {
.driver = {
@@ -170,18 +171,7 @@ static struct spi_driver ad2s1200_driver = {
.remove = __devexit_p(ad2s1200_remove),
.id_table = ad2s1200_id,
};
-
-static __init int ad2s1200_spi_init(void)
-{
- return spi_register_driver(&ad2s1200_driver);
-}
-module_init(ad2s1200_spi_init);
-
-static __exit void ad2s1200_spi_exit(void)
-{
- spi_unregister_driver(&ad2s1200_driver);
-}
-module_exit(ad2s1200_spi_exit);
+module_spi_driver(ad2s1200_driver);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S1200/1205 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index 6401a62..c439fcf 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -749,6 +749,7 @@ static const struct spi_device_id ad2s1210_id[] = {
{ "ad2s1210" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s1210_id);
static struct spi_driver ad2s1210_driver = {
.driver = {
@@ -759,18 +760,7 @@ static struct spi_driver ad2s1210_driver = {
.remove = __devexit_p(ad2s1210_remove),
.id_table = ad2s1210_id,
};
-
-static __init int ad2s1210_spi_init(void)
-{
- return spi_register_driver(&ad2s1210_driver);
-}
-module_init(ad2s1210_spi_init);
-
-static __exit void ad2s1210_spi_exit(void)
-{
- spi_unregister_driver(&ad2s1210_driver);
-}
-module_exit(ad2s1210_spi_exit);
+module_spi_driver(ad2s1210_driver);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S1210 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/resolver/ad2s90.c b/drivers/staging/iio/resolver/ad2s90.c
index a9200d9..2a86f58 100644
--- a/drivers/staging/iio/resolver/ad2s90.c
+++ b/drivers/staging/iio/resolver/ad2s90.c
@@ -109,6 +109,7 @@ static const struct spi_device_id ad2s90_id[] = {
{ "ad2s90" },
{}
};
+MODULE_DEVICE_TABLE(spi, ad2s90_id);
static struct spi_driver ad2s90_driver = {
.driver = {
@@ -119,18 +120,7 @@ static struct spi_driver ad2s90_driver = {
.remove = __devexit_p(ad2s90_remove),
.id_table = ad2s90_id,
};
-
-static __init int ad2s90_spi_init(void)
-{
- return spi_register_driver(&ad2s90_driver);
-}
-module_init(ad2s90_spi_init);
-
-static __exit void ad2s90_spi_exit(void)
-{
- spi_unregister_driver(&ad2s90_driver);
-}
-module_exit(ad2s90_spi_exit);
+module_spi_driver(ad2s90_driver);
MODULE_AUTHOR("Graff Yang <graff.yang@gmail.com>");
MODULE_DESCRIPTION("Analog Devices AD2S90 Resolver to Digital SPI driver");
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index 66a34ad..3e24ec4 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -23,11 +23,8 @@
* @data: the ring buffer memory
* @read_p: read pointer (oldest available)
* @write_p: write pointer
- * @last_written_p: read pointer (newest available)
* @half_p: half buffer length behind write_p (event generation)
- * @use_count: reference count to prevent resizing when in use
* @update_needed: flag to indicated change in size requested
- * @use_lock: lock to prevent change in size when in use
*
* Note that the first element of all ring buffers must be a
* struct iio_buffer.
@@ -37,12 +34,9 @@ struct iio_sw_ring_buffer {
unsigned char *data;
unsigned char *read_p;
unsigned char *write_p;
- unsigned char *last_written_p;
/* used to act as a point at which to signal an event */
unsigned char *half_p;
- int use_count;
int update_needed;
- spinlock_t use_lock;
};
#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
@@ -56,38 +50,15 @@ static inline int __iio_allocate_sw_ring_buffer(struct iio_sw_ring_buffer *ring,
ring->data = kmalloc(length*ring->buf.bytes_per_datum, GFP_ATOMIC);
ring->read_p = NULL;
ring->write_p = NULL;
- ring->last_written_p = NULL;
ring->half_p = NULL;
return ring->data ? 0 : -ENOMEM;
}
-static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
-{
- spin_lock_init(&ring->use_lock);
-}
-
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
}
-static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count++;
- spin_unlock(&ring->use_lock);
-}
-
-static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count--;
- spin_unlock(&ring->use_lock);
-}
-
-
/* Ring buffer related functionality */
/* Store to ring is typically called in the bh of a data ready interrupt handler
* in the device driver */
@@ -115,7 +86,6 @@ static int iio_store_to_sw_ring(struct iio_sw_ring_buffer *ring,
* Always valid as either points to latest or second latest value.
* Before this runs it is null and read attempts fail with -EAGAIN.
*/
- ring->last_written_p = ring->write_p;
barrier();
/* temp_ptr used to ensure we never have an invalid pointer
* it may be slightly lagging, but never invalid
@@ -174,6 +144,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
u8 *initial_read_p, *initial_write_p, *current_read_p, *end_read_p;
u8 *data;
int ret, max_copied, bytes_to_rip, dead_offset;
+ size_t data_available, buffer_size;
/* A userspace program has probably made an error if it tries to
* read something that is not a whole number of bpds.
@@ -186,9 +157,11 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
n, ring->buf.bytes_per_datum);
goto error_ret;
}
+
+ buffer_size = ring->buf.bytes_per_datum*ring->buf.length;
+
/* Limit size to whole of ring buffer */
- bytes_to_rip = min((size_t)(ring->buf.bytes_per_datum*ring->buf.length),
- n);
+ bytes_to_rip = min_t(size_t, buffer_size, n);
data = kmalloc(bytes_to_rip, GFP_KERNEL);
if (data == NULL) {
@@ -217,38 +190,24 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
goto error_free_data_cpy;
}
- if (initial_write_p >= initial_read_p + bytes_to_rip) {
- /* write_p is greater than necessary, all is easy */
- max_copied = bytes_to_rip;
- memcpy(data, initial_read_p, max_copied);
- end_read_p = initial_read_p + max_copied;
- } else if (initial_write_p > initial_read_p) {
- /*not enough data to cpy */
- max_copied = initial_write_p - initial_read_p;
+ if (initial_write_p >= initial_read_p)
+ data_available = initial_write_p - initial_read_p;
+ else
+ data_available = buffer_size - (initial_read_p - initial_write_p);
+
+ if (data_available < bytes_to_rip)
+ bytes_to_rip = data_available;
+
+ if (initial_read_p + bytes_to_rip >= ring->data + buffer_size) {
+ max_copied = ring->data + buffer_size - initial_read_p;
memcpy(data, initial_read_p, max_copied);
- end_read_p = initial_write_p;
+ memcpy(data + max_copied, ring->data, bytes_to_rip - max_copied);
+ end_read_p = ring->data + bytes_to_rip - max_copied;
} else {
- /* going through 'end' of ring buffer */
- max_copied = ring->data
- + ring->buf.length*ring->buf.bytes_per_datum - initial_read_p;
- memcpy(data, initial_read_p, max_copied);
- /* possible we are done if we align precisely with end */
- if (max_copied == bytes_to_rip)
- end_read_p = ring->data;
- else if (initial_write_p
- > ring->data + bytes_to_rip - max_copied) {
- /* enough data to finish */
- memcpy(data + max_copied, ring->data,
- bytes_to_rip - max_copied);
- max_copied = bytes_to_rip;
- end_read_p = ring->data + (bytes_to_rip - max_copied);
- } else { /* not enough data */
- memcpy(data + max_copied, ring->data,
- initial_write_p - ring->data);
- max_copied += initial_write_p - ring->data;
- end_read_p = initial_write_p;
- }
+ memcpy(data, initial_read_p, bytes_to_rip);
+ end_read_p = initial_read_p + bytes_to_rip;
}
+
/* Now to verify which section was cleanly copied - i.e. how far
* read pointer has been pushed */
current_read_p = ring->read_p;
@@ -256,15 +215,14 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
if (initial_read_p <= current_read_p)
dead_offset = current_read_p - initial_read_p;
else
- dead_offset = ring->buf.length*ring->buf.bytes_per_datum
- - (initial_read_p - current_read_p);
+ dead_offset = buffer_size - (initial_read_p - current_read_p);
/* possible issue if the initial write has been lapped or indeed
* the point we were reading to has been passed */
/* No valid data read.
* In this case the read pointer is already correct having been
* pushed further than we would look. */
- if (max_copied - dead_offset < 0) {
+ if (bytes_to_rip - dead_offset < 0) {
ret = 0;
goto error_free_data_cpy;
}
@@ -280,7 +238,7 @@ static int iio_read_first_n_sw_rb(struct iio_buffer *r,
while (ring->read_p != end_read_p)
ring->read_p = end_read_p;
- ret = max_copied - dead_offset;
+ ret = bytes_to_rip - dead_offset;
if (copy_to_user(buf, data + dead_offset, ret)) {
ret = -EFAULT;
@@ -305,52 +263,18 @@ static int iio_store_to_sw_rb(struct iio_buffer *r,
return iio_store_to_sw_ring(ring, data, timestamp);
}
-static int iio_read_last_from_sw_ring(struct iio_sw_ring_buffer *ring,
- unsigned char *data)
-{
- unsigned char *last_written_p_copy;
-
- iio_mark_sw_rb_in_use(&ring->buf);
-again:
- barrier();
- last_written_p_copy = ring->last_written_p;
- barrier(); /*unnessecary? */
- /* Check there is anything here */
- if (last_written_p_copy == NULL)
- return -EAGAIN;
- memcpy(data, last_written_p_copy, ring->buf.bytes_per_datum);
-
- if (unlikely(ring->last_written_p != last_written_p_copy))
- goto again;
-
- iio_unmark_sw_rb_in_use(&ring->buf);
- return 0;
-}
-
-static int iio_read_last_from_sw_rb(struct iio_buffer *r,
- unsigned char *data)
-{
- return iio_read_last_from_sw_ring(iio_to_sw_ring(r), data);
-}
-
static int iio_request_update_sw_rb(struct iio_buffer *r)
{
int ret = 0;
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
r->stufftoread = false;
- spin_lock(&ring->use_lock);
if (!ring->update_needed)
goto error_ret;
- if (ring->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
__iio_free_sw_ring_buffer(ring);
ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
ring->buf.length);
error_ret:
- spin_unlock(&ring->use_lock);
return ret;
}
@@ -360,12 +284,18 @@ static int iio_get_bytes_per_datum_sw_rb(struct iio_buffer *r)
return ring->buf.bytes_per_datum;
}
+static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
+{
+ struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
+ ring->update_needed = true;
+ return 0;
+}
+
static int iio_set_bytes_per_datum_sw_rb(struct iio_buffer *r, size_t bpd)
{
if (r->bytes_per_datum != bpd) {
r->bytes_per_datum = bpd;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_sw_rb(r);
}
return 0;
}
@@ -379,27 +309,17 @@ static int iio_set_length_sw_rb(struct iio_buffer *r, int length)
{
if (r->length != length) {
r->length = length;
- if (r->access->mark_param_change)
- r->access->mark_param_change(r);
+ iio_mark_update_needed_sw_rb(r);
}
return 0;
}
-static int iio_mark_update_needed_sw_rb(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- ring->update_needed = true;
- return 0;
-}
-
static IIO_BUFFER_ENABLE_ATTR;
-static IIO_BUFFER_BYTES_PER_DATUM_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
/* Standard set of ring buffer attributes */
static struct attribute *iio_ring_attributes[] = {
&dev_attr_length.attr,
- &dev_attr_bytes_per_datum.attr,
&dev_attr_enable.attr,
NULL,
};
@@ -419,8 +339,7 @@ struct iio_buffer *iio_sw_rb_allocate(struct iio_dev *indio_dev)
return NULL;
ring->update_needed = true;
buf = &ring->buf;
- iio_buffer_init(buf, indio_dev);
- __iio_init_sw_ring_buffer(ring);
+ iio_buffer_init(buf);
buf->attrs = &iio_ring_attribute_group;
return buf;
@@ -434,12 +353,8 @@ void iio_sw_rb_free(struct iio_buffer *r)
EXPORT_SYMBOL(iio_sw_rb_free);
const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .mark_in_use = &iio_mark_sw_rb_in_use,
- .unmark_in_use = &iio_unmark_sw_rb_in_use,
.store_to = &iio_store_to_sw_rb,
- .read_last = &iio_read_last_from_sw_rb,
.read_first_n = &iio_read_first_n_sw_rb,
- .mark_param_change = &iio_mark_update_needed_sw_rb,
.request_update = &iio_request_update_sw_rb,
.get_bytes_per_datum = &iio_get_bytes_per_datum_sw_rb,
.set_bytes_per_datum = &iio_set_bytes_per_datum_sw_rb,
diff --git a/drivers/staging/iio/ring_sw.h b/drivers/staging/iio/ring_sw.h
index a3e1578..e6a6e2c 100644
--- a/drivers/staging/iio/ring_sw.h
+++ b/drivers/staging/iio/ring_sw.h
@@ -23,7 +23,7 @@
#ifndef _IIO_RING_SW_H_
#define _IIO_RING_SW_H_
-#include "buffer_generic.h"
+#include "buffer.h"
/**
* ring_sw_access_funcs - access functions for a software ring buffer
diff --git a/drivers/staging/iio/sysfs.h b/drivers/staging/iio/sysfs.h
index 868952b..bfedb73 100644
--- a/drivers/staging/iio/sysfs.h
+++ b/drivers/staging/iio/sysfs.h
@@ -114,47 +114,4 @@ struct iio_const_attr {
#define IIO_CONST_ATTR_TEMP_SCALE(_string) \
IIO_CONST_ATTR(in_temp_scale, _string)
-enum iio_event_type {
- IIO_EV_TYPE_THRESH,
- IIO_EV_TYPE_MAG,
- IIO_EV_TYPE_ROC,
- IIO_EV_TYPE_THRESH_ADAPTIVE,
- IIO_EV_TYPE_MAG_ADAPTIVE,
-};
-
-enum iio_event_direction {
- IIO_EV_DIR_EITHER,
- IIO_EV_DIR_RISING,
- IIO_EV_DIR_FALLING,
-};
-
-#define IIO_EVENT_CODE(chan_type, diff, modifier, direction, \
- type, chan, chan1, chan2) \
- (((u64)type << 56) | ((u64)diff << 55) | \
- ((u64)direction << 48) | ((u64)modifier << 40) | \
- ((u64)chan_type << 32) | (chan2 << 16) | chan1 | chan)
-
-#define IIO_EV_DIR_MAX 4
-#define IIO_EV_BIT(type, direction) \
- (1 << (type*IIO_EV_DIR_MAX + direction))
-
-#define IIO_MOD_EVENT_CODE(channelclass, number, modifier, \
- type, direction) \
- IIO_EVENT_CODE(channelclass, 0, modifier, direction, type, number, 0, 0)
-
-#define IIO_UNMOD_EVENT_CODE(channelclass, number, type, direction) \
- IIO_EVENT_CODE(channelclass, 0, 0, direction, type, number, 0, 0)
-
-#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
-
-#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF)
-
-#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
-
-/* Event code number extraction depends on which type of event we have.
- * Perhaps review this function in the future*/
-#define IIO_EVENT_CODE_EXTRACT_NUM(mask) (mask & 0xFFFF)
-
-#define IIO_EVENT_CODE_EXTRACT_MODIFIER(mask) ((mask >> 40) & 0xFF)
-
#endif /* _INDUSTRIAL_IO_SYSFS_H_ */
diff --git a/drivers/staging/iio/trigger.h b/drivers/staging/iio/trigger.h
index 5cc42a6..1cfca23 100644
--- a/drivers/staging/iio/trigger.h
+++ b/drivers/staging/iio/trigger.h
@@ -46,7 +46,6 @@ struct iio_trigger_ops {
* @private_data: [DRIVER] device specific data
* @list: [INTERN] used in maintenance of global trigger list
* @alloc_list: [DRIVER] used for driver specific trigger list
- * @owner: [DRIVER] used to monitor usage count of the trigger.
* @use_count: use count for the trigger
* @subirq_chip: [INTERN] associate 'virtual' irq chip.
* @subirq_base: [INTERN] base number for irqs provided by trigger.
@@ -63,7 +62,6 @@ struct iio_trigger {
void *private_data;
struct list_head list;
struct list_head alloc_list;
- struct module *owner;
int use_count;
struct irq_chip subirq_chip;
diff --git a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
index d35d085..bd7416b 100644
--- a/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
+++ b/drivers/staging/iio/trigger/iio-trig-periodic-rtc.c
@@ -125,7 +125,6 @@ static int iio_trig_periodic_rtc_probe(struct platform_device *dev)
goto error_put_trigger_and_remove_from_list;
}
trig->private_data = trig_info;
- trig->owner = THIS_MODULE;
trig->ops = &iio_prtc_trigger_ops;
/* RTC access */
trig_info->rtc
diff --git a/drivers/staging/iio/types.h b/drivers/staging/iio/types.h
new file mode 100644
index 0000000..b7d2647
--- /dev/null
+++ b/drivers/staging/iio/types.h
@@ -0,0 +1,49 @@
+/* industrial I/O data types needed both in and out of kernel
+ *
+ * Copyright (c) 2008 Jonathan Cameron
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#ifndef _IIO_TYPES_H_
+#define _IIO_TYPES_H_
+
+enum iio_chan_type {
+ /* real channel types */
+ IIO_VOLTAGE,
+ IIO_CURRENT,
+ IIO_POWER,
+ IIO_ACCEL,
+ IIO_ANGL_VEL,
+ IIO_MAGN,
+ IIO_LIGHT,
+ IIO_INTENSITY,
+ IIO_PROXIMITY,
+ IIO_TEMP,
+ IIO_INCLI,
+ IIO_ROT,
+ IIO_ANGL,
+ IIO_TIMESTAMP,
+ IIO_CAPACITANCE,
+};
+
+enum iio_modifier {
+ IIO_NO_MOD,
+ IIO_MOD_X,
+ IIO_MOD_Y,
+ IIO_MOD_Z,
+ IIO_MOD_X_AND_Y,
+ IIO_MOD_X_AND_Z,
+ IIO_MOD_Y_AND_Z,
+ IIO_MOD_X_AND_Y_AND_Z,
+ IIO_MOD_X_OR_Y,
+ IIO_MOD_X_OR_Z,
+ IIO_MOD_Y_OR_Z,
+ IIO_MOD_X_OR_Y_OR_Z,
+ IIO_MOD_LIGHT_BOTH,
+ IIO_MOD_LIGHT_IR,
+};
+
+#endif /* _IIO_TYPES_H_ */
diff --git a/drivers/staging/intel_sst/Kconfig b/drivers/staging/intel_sst/Kconfig
deleted file mode 100644
index 8239107..0000000
--- a/drivers/staging/intel_sst/Kconfig
+++ /dev/null
@@ -1,19 +0,0 @@
-config SND_INTEL_SST
- tristate "Intel SST (LPE) Driver"
- depends on X86 && INTEL_SCU_IPC
- default n
- help
- Say Y here to include support for the Intel(R) MID SST DSP driver
- On other PC platforms if you are unsure answer 'N'
-
-config SND_INTELMID
- tristate "Intel MID sound card driver"
- depends on SOUND && SND
- select SND_PCM
- select SND_SEQUENCER
- select SND_JACK
- depends on SND_INTEL_SST
- default n
- help
- Say Y here to include support for the Intel(R) MID sound card driver
- On other PC platforms if you are unsure answer 'N'
diff --git a/drivers/staging/intel_sst/Makefile b/drivers/staging/intel_sst/Makefile
deleted file mode 100644
index 9eb7c15..0000000
--- a/drivers/staging/intel_sst/Makefile
+++ /dev/null
@@ -1,7 +0,0 @@
-#
-# Makefile for Intel MID Audio drivers
-#
-snd-intel-sst-y := intel_sst.o intel_sst_ipc.o intel_sst_stream.o intel_sst_drv_interface.o intel_sst_dsp.o intel_sst_pvt.o intel_sst_stream_encoded.o intel_sst_app_interface.o
-snd-intelmid-y := intelmid.o intelmid_msic_control.o intelmid_ctrl.o intelmid_pvt.o intelmid_v0_control.o intelmid_v1_control.o intelmid_v2_control.o
-obj-$(CONFIG_SND_INTEL_SST) += snd-intel-sst.o
-obj-$(CONFIG_SND_INTELMID) += snd-intelmid.o
diff --git a/drivers/staging/intel_sst/TODO b/drivers/staging/intel_sst/TODO
deleted file mode 100644
index c733d70..0000000
--- a/drivers/staging/intel_sst/TODO
+++ /dev/null
@@ -1,13 +0,0 @@
-TODO
-----
-
-Get the memrar driver cleaned up and upstream (dependency blocking SST)
-Replace long/short press with two virtual buttons
-Review the printks and kill off any left over ST_ERR: messages
-Review the misc device ioctls for 32/64bit safety and sanity
-Review the misc device ioctls for size safety depending on config and decide
- if space/unused areas should be left
-What the sound folks turn up on full review
-Using the ALSA frameworks properly
-
-
diff --git a/drivers/staging/intel_sst/intel_sst.c b/drivers/staging/intel_sst/intel_sst.c
deleted file mode 100644
index ff9aaec..0000000
--- a/drivers/staging/intel_sst/intel_sst.c
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
- * intel_sst.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all init functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/interrupt.h>
-#include <linux/firmware.h>
-#include <linux/miscdevice.h>
-#include <linux/pm_runtime.h>
-#include <linux/module.h>
-#include <asm/mrst.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
-MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
-MODULE_DESCRIPTION("Intel (R) SST(R) Audio Engine Driver");
-MODULE_LICENSE("GPL v2");
-MODULE_VERSION(SST_DRIVER_VERSION);
-
-struct intel_sst_drv *sst_drv_ctx;
-static struct mutex drv_ctx_lock;
-struct class *sst_class;
-
-/* fops Routines */
-static const struct file_operations intel_sst_fops = {
- .owner = THIS_MODULE,
- .open = intel_sst_open,
- .release = intel_sst_release,
- .read = intel_sst_read,
- .write = intel_sst_write,
- .unlocked_ioctl = intel_sst_ioctl,
- .mmap = intel_sst_mmap,
- .aio_read = intel_sst_aio_read,
- .aio_write = intel_sst_aio_write,
-};
-static const struct file_operations intel_sst_fops_cntrl = {
- .owner = THIS_MODULE,
- .open = intel_sst_open_cntrl,
- .release = intel_sst_release_cntrl,
- .unlocked_ioctl = intel_sst_ioctl,
-};
-
-static struct miscdevice lpe_dev = {
- .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
- .name = "intel_sst",/* /dev/intel_sst */
- .fops = &intel_sst_fops
-};
-
-
-static struct miscdevice lpe_ctrl = {
- .minor = MISC_DYNAMIC_MINOR,/* dynamic allocation */
- .name = "intel_sst_ctrl",/* /dev/intel_sst_ctrl */
- .fops = &intel_sst_fops_cntrl
-};
-
-/**
-* intel_sst_interrupt - Interrupt service routine for SST
-*
-* @irq: irq number of interrupt
-* @context: pointer to device structre
-*
-* This function is called by OS when SST device raises
-* an interrupt. This will be result of write in IPC register
-* Source can be busy or done interrupt
-*/
-static irqreturn_t intel_sst_interrupt(int irq, void *context)
-{
- union interrupt_reg isr;
- union ipc_header header;
- union interrupt_reg imr;
- struct intel_sst_drv *drv = (struct intel_sst_drv *) context;
- unsigned int size = 0, str_id;
- struct stream_info *stream ;
-
- /* Do not handle interrupt in suspended state */
- if (drv->sst_state == SST_SUSPENDED)
- return IRQ_NONE;
- /* Interrupt arrived, check src */
- isr.full = sst_shim_read(drv->shim, SST_ISRX);
-
- if (isr.part.busy_interrupt) {
- header.full = sst_shim_read(drv->shim, SST_IPCD);
- if (header.part.msg_id == IPC_SST_PERIOD_ELAPSED) {
- sst_clear_interrupt();
- str_id = header.part.str_id;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->period_elapsed)
- stream->period_elapsed(stream->pcm_substream);
- return IRQ_HANDLED;
- }
- if (header.part.large)
- size = header.part.data;
- if (header.part.msg_id & REPLY_MSG) {
- sst_drv_ctx->ipc_process_msg.header = header;
- memcpy_fromio(sst_drv_ctx->ipc_process_msg.mailbox,
- drv->mailbox + SST_MAILBOX_RCV, size);
- queue_work(sst_drv_ctx->process_msg_wq,
- &sst_drv_ctx->ipc_process_msg.wq);
- } else {
- sst_drv_ctx->ipc_process_reply.header = header;
- memcpy_fromio(sst_drv_ctx->ipc_process_reply.mailbox,
- drv->mailbox + SST_MAILBOX_RCV, size);
- queue_work(sst_drv_ctx->process_reply_wq,
- &sst_drv_ctx->ipc_process_reply.wq);
- }
- /* mask busy inetrrupt */
- imr.full = sst_shim_read(drv->shim, SST_IMRX);
- imr.part.busy_interrupt = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- return IRQ_HANDLED;
- } else if (isr.part.done_interrupt) {
- /* Clear done bit */
- header.full = sst_shim_read(drv->shim, SST_IPCX);
- header.part.done = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_IPCX, header.full);
- /* write 1 to clear status register */;
- isr.part.done_interrupt = 1;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
- queue_work(sst_drv_ctx->post_msg_wq,
- &sst_drv_ctx->ipc_post_msg.wq);
- return IRQ_HANDLED;
- } else
- return IRQ_NONE;
-
-}
-
-
-/*
-* intel_sst_probe - PCI probe function
-*
-* @pci: PCI device structure
-* @pci_id: PCI device ID structure
-*
-* This function is called by OS when a device is found
-* This enables the device, interrupt etc
-*/
-static int __devinit intel_sst_probe(struct pci_dev *pci,
- const struct pci_device_id *pci_id)
-{
- int i, ret = 0;
-
- pr_debug("Probe for DID %x\n", pci->device);
- mutex_lock(&drv_ctx_lock);
- if (sst_drv_ctx) {
- pr_err("Only one sst handle is supported\n");
- mutex_unlock(&drv_ctx_lock);
- return -EBUSY;
- }
-
- sst_drv_ctx = kzalloc(sizeof(*sst_drv_ctx), GFP_KERNEL);
- if (!sst_drv_ctx) {
- pr_err("malloc fail\n");
- mutex_unlock(&drv_ctx_lock);
- return -ENOMEM;
- }
- mutex_unlock(&drv_ctx_lock);
-
- sst_drv_ctx->pci_id = pci->device;
-
- mutex_init(&sst_drv_ctx->stream_lock);
- mutex_init(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
-
- sst_drv_ctx->stream_cnt = 0;
- sst_drv_ctx->encoded_cnt = 0;
- sst_drv_ctx->am_cnt = 0;
- sst_drv_ctx->pb_streams = 0;
- sst_drv_ctx->cp_streams = 0;
- sst_drv_ctx->unique_id = 0;
- sst_drv_ctx->pmic_port_instance = SST_DEFAULT_PMIC_PORT;
-
- INIT_LIST_HEAD(&sst_drv_ctx->ipc_dispatch_list);
- INIT_WORK(&sst_drv_ctx->ipc_post_msg.wq, sst_post_message);
- INIT_WORK(&sst_drv_ctx->ipc_process_msg.wq, sst_process_message);
- INIT_WORK(&sst_drv_ctx->ipc_process_reply.wq, sst_process_reply);
- INIT_WORK(&sst_drv_ctx->mad_ops.wq, sst_process_mad_ops);
- init_waitqueue_head(&sst_drv_ctx->wait_queue);
-
- sst_drv_ctx->mad_wq = create_workqueue("sst_mad_wq");
- if (!sst_drv_ctx->mad_wq)
- goto do_free_drv_ctx;
- sst_drv_ctx->post_msg_wq = create_workqueue("sst_post_msg_wq");
- if (!sst_drv_ctx->post_msg_wq)
- goto free_mad_wq;
- sst_drv_ctx->process_msg_wq = create_workqueue("sst_process_msg_wqq");
- if (!sst_drv_ctx->process_msg_wq)
- goto free_post_msg_wq;
- sst_drv_ctx->process_reply_wq = create_workqueue("sst_proces_reply_wq");
- if (!sst_drv_ctx->process_reply_wq)
- goto free_process_msg_wq;
-
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- }
- spin_lock_init(&sst_drv_ctx->list_spin_lock);
-
- sst_drv_ctx->max_streams = pci_id->driver_data;
- pr_debug("Got drv data max stream %d\n",
- sst_drv_ctx->max_streams);
- for (i = 1; i <= sst_drv_ctx->max_streams; i++) {
- struct stream_info *stream = &sst_drv_ctx->streams[i];
- INIT_LIST_HEAD(&stream->bufs);
- mutex_init(&stream->lock);
- spin_lock_init(&stream->pcm_lock);
- }
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- sst_drv_ctx->mmap_mem = NULL;
- sst_drv_ctx->mmap_len = SST_MMAP_PAGES * PAGE_SIZE;
- while (sst_drv_ctx->mmap_len > 0) {
- sst_drv_ctx->mmap_mem =
- kzalloc(sst_drv_ctx->mmap_len, GFP_KERNEL);
- if (sst_drv_ctx->mmap_mem) {
- pr_debug("Got memory %p size 0x%x\n",
- sst_drv_ctx->mmap_mem,
- sst_drv_ctx->mmap_len);
- break;
- }
- if (sst_drv_ctx->mmap_len < (SST_MMAP_STEP*PAGE_SIZE)) {
- pr_err("mem alloc fail...abort!!\n");
- ret = -ENOMEM;
- goto free_process_reply_wq;
- }
- sst_drv_ctx->mmap_len -= (SST_MMAP_STEP * PAGE_SIZE);
- pr_debug("mem alloc failed...trying %d\n",
- sst_drv_ctx->mmap_len);
- }
- }
-
- /* Init the device */
- ret = pci_enable_device(pci);
- if (ret) {
- pr_err("device can't be enabled\n");
- goto do_free_mem;
- }
- sst_drv_ctx->pci = pci_dev_get(pci);
- ret = pci_request_regions(pci, SST_DRV_NAME);
- if (ret)
- goto do_disable_device;
- /* map registers */
- /* SST Shim */
- sst_drv_ctx->shim_phy_add = pci_resource_start(pci, 1);
- sst_drv_ctx->shim = pci_ioremap_bar(pci, 1);
- if (!sst_drv_ctx->shim)
- goto do_release_regions;
- pr_debug("SST Shim Ptr %p\n", sst_drv_ctx->shim);
-
- /* Shared SRAM */
- sst_drv_ctx->mailbox = pci_ioremap_bar(pci, 2);
- if (!sst_drv_ctx->mailbox)
- goto do_unmap_shim;
- pr_debug("SRAM Ptr %p\n", sst_drv_ctx->mailbox);
-
- /* IRAM */
- sst_drv_ctx->iram = pci_ioremap_bar(pci, 3);
- if (!sst_drv_ctx->iram)
- goto do_unmap_sram;
- pr_debug("IRAM Ptr %p\n", sst_drv_ctx->iram);
-
- /* DRAM */
- sst_drv_ctx->dram = pci_ioremap_bar(pci, 4);
- if (!sst_drv_ctx->dram)
- goto do_unmap_iram;
- pr_debug("DRAM Ptr %p\n", sst_drv_ctx->dram);
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- /* Register the ISR */
- ret = request_irq(pci->irq, intel_sst_interrupt,
- IRQF_SHARED, SST_DRV_NAME, sst_drv_ctx);
- if (ret)
- goto do_unmap_dram;
- pr_debug("Registered IRQ 0x%x\n", pci->irq);
-
- /*Register LPE Control as misc driver*/
- ret = misc_register(&lpe_ctrl);
- if (ret) {
- pr_err("couldn't register control device\n");
- goto do_free_irq;
- }
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- ret = misc_register(&lpe_dev);
- if (ret) {
- pr_err("couldn't register LPE device\n");
- goto do_free_misc;
- }
- } else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
- u32 csr;
-
- /*allocate mem for fw context save during suspend*/
- sst_drv_ctx->fw_cntx = kzalloc(FW_CONTEXT_MEM, GFP_KERNEL);
- if (!sst_drv_ctx->fw_cntx) {
- ret = -ENOMEM;
- goto do_free_misc;
- }
- /*setting zero as that is valid mem to restore*/
- sst_drv_ctx->fw_cntx_size = 0;
-
- /*set lpe start clock and ram size*/
- csr = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr |= 0x30060; /*remove the clock ratio after fw fix*/
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr);
- }
- sst_drv_ctx->lpe_stalled = 0;
- pci_set_drvdata(pci, sst_drv_ctx);
- pm_runtime_allow(&pci->dev);
- pm_runtime_put_noidle(&pci->dev);
- pr_debug("...successfully done!!!\n");
- return ret;
-
-do_free_misc:
- misc_deregister(&lpe_ctrl);
-do_free_irq:
- free_irq(pci->irq, sst_drv_ctx);
-do_unmap_dram:
- iounmap(sst_drv_ctx->dram);
-do_unmap_iram:
- iounmap(sst_drv_ctx->iram);
-do_unmap_sram:
- iounmap(sst_drv_ctx->mailbox);
-do_unmap_shim:
- iounmap(sst_drv_ctx->shim);
-do_release_regions:
- pci_release_regions(pci);
-do_disable_device:
- pci_disable_device(pci);
-do_free_mem:
- kfree(sst_drv_ctx->mmap_mem);
-free_process_reply_wq:
- destroy_workqueue(sst_drv_ctx->process_reply_wq);
-free_process_msg_wq:
- destroy_workqueue(sst_drv_ctx->process_msg_wq);
-free_post_msg_wq:
- destroy_workqueue(sst_drv_ctx->post_msg_wq);
-free_mad_wq:
- destroy_workqueue(sst_drv_ctx->mad_wq);
-do_free_drv_ctx:
- kfree(sst_drv_ctx);
- sst_drv_ctx = NULL;
- pr_err("Probe failed with %d\n", ret);
- return ret;
-}
-
-/**
-* intel_sst_remove - PCI remove function
-*
-* @pci: PCI device structure
-*
-* This function is called by OS when a device is unloaded
-* This frees the interrupt etc
-*/
-static void __devexit intel_sst_remove(struct pci_dev *pci)
-{
- pm_runtime_get_noresume(&pci->dev);
- pm_runtime_forbid(&pci->dev);
- pci_dev_put(sst_drv_ctx->pci);
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- misc_deregister(&lpe_ctrl);
- free_irq(pci->irq, sst_drv_ctx);
- iounmap(sst_drv_ctx->dram);
- iounmap(sst_drv_ctx->iram);
- iounmap(sst_drv_ctx->mailbox);
- iounmap(sst_drv_ctx->shim);
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- misc_deregister(&lpe_dev);
- kfree(sst_drv_ctx->mmap_mem);
- } else
- kfree(sst_drv_ctx->fw_cntx);
- flush_scheduled_work();
- destroy_workqueue(sst_drv_ctx->process_reply_wq);
- destroy_workqueue(sst_drv_ctx->process_msg_wq);
- destroy_workqueue(sst_drv_ctx->post_msg_wq);
- destroy_workqueue(sst_drv_ctx->mad_wq);
- kfree(pci_get_drvdata(pci));
- sst_drv_ctx = NULL;
- pci_release_regions(pci);
- pci_disable_device(pci);
- pci_set_drvdata(pci, NULL);
-}
-
-void sst_save_dsp_context(void)
-{
- struct snd_sst_ctxt_params fw_context;
- unsigned int pvt_id, i;
- struct ipc_post *msg = NULL;
-
- /*check cpu type*/
- if (sst_drv_ctx->pci_id != SST_MFLD_PCI_ID)
- return;
- /*not supported for rest*/
- if (sst_drv_ctx->sst_state != SST_FW_RUNNING) {
- pr_debug("fw not running no context save ...\n");
- return;
- }
-
- /*send msg to fw*/
- if (sst_create_large_msg(&msg))
- return;
- pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- i = sst_get_block_stream(sst_drv_ctx);
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- sst_fill_header(&msg->header, IPC_IA_GET_FW_CTXT, 1, pvt_id);
- msg->header.part.data = sizeof(fw_context) + sizeof(u32);
- fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
- fw_context.size = FW_CONTEXT_MEM;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32),
- &fw_context, sizeof(fw_context));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- /*wait for reply*/
- if (sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]))
- pr_debug("err fw context save timeout ...\n");
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- pr_debug("fw context saved ...\n");
- return;
-}
-
-/* Power Management */
-/*
-* intel_sst_suspend - PCI suspend function
-*
-* @pci: PCI device structure
-* @state: PM message
-*
-* This function is called by OS when a power event occurs
-*/
-int intel_sst_suspend(struct pci_dev *pci, pm_message_t state)
-{
- union config_status_reg csr;
-
- pr_debug("intel_sst_suspend called\n");
-
- if (sst_drv_ctx->stream_cnt) {
- pr_err("active streams,not able to suspend\n");
- return -EBUSY;
- }
- /*save fw context*/
- sst_save_dsp_context();
- /*Assert RESET on LPE Processor*/
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full = csr.full | 0x2;
- /* Move the SST state to Suspended */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_SUSPENDED;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pci_set_drvdata(pci, sst_drv_ctx);
- pci_save_state(pci);
- pci_disable_device(pci);
- pci_set_power_state(pci, PCI_D3hot);
- return 0;
-}
-
-/**
-* intel_sst_resume - PCI resume function
-*
-* @pci: PCI device structure
-*
-* This function is called by OS when a power event occurs
-*/
-int intel_sst_resume(struct pci_dev *pci)
-{
- int ret = 0;
-
- pr_debug("intel_sst_resume called\n");
- if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
- pr_err("SST is not in suspended state\n");
- return 0;
- }
- sst_drv_ctx = pci_get_drvdata(pci);
- pci_set_power_state(pci, PCI_D0);
- pci_restore_state(pci);
- ret = pci_enable_device(pci);
- if (ret)
- pr_err("device can't be enabled\n");
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-/* The runtime_suspend/resume is pretty much similar to the legacy
- * suspend/resume with the noted exception below:
- * The PCI core takes care of taking the system through D3hot and
- * restoring it back to D0 and so there is no need to duplicate
- * that here.
- */
-static int intel_sst_runtime_suspend(struct device *dev)
-{
- union config_status_reg csr;
-
- pr_debug("intel_sst_runtime_suspend called\n");
- if (sst_drv_ctx->stream_cnt) {
- pr_err("active streams,not able to suspend\n");
- return -EBUSY;
- }
- /*save fw context*/
- sst_save_dsp_context();
- /*Assert RESET on LPE Processor*/
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full = csr.full | 0x2;
- /* Move the SST state to Suspended */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_SUSPENDED;
-
- /* Only needed by Medfield */
- if (sst_drv_ctx->pci_id != SST_MRST_PCI_ID)
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-static int intel_sst_runtime_resume(struct device *dev)
-{
-
- pr_debug("intel_sst_runtime_resume called\n");
- if (sst_drv_ctx->sst_state != SST_SUSPENDED) {
- pr_err("SST is not in suspended state\n");
- return 0;
- }
-
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-}
-
-static int intel_sst_runtime_idle(struct device *dev)
-{
- pr_debug("runtime_idle called\n");
- if (sst_drv_ctx->stream_cnt == 0 && sst_drv_ctx->am_cnt == 0)
- pm_schedule_suspend(dev, SST_SUSPEND_DELAY);
- return -EBUSY;
-}
-
-static const struct dev_pm_ops intel_sst_pm = {
- .runtime_suspend = intel_sst_runtime_suspend,
- .runtime_resume = intel_sst_runtime_resume,
- .runtime_idle = intel_sst_runtime_idle,
-};
-
-/* PCI Routines */
-static struct pci_device_id intel_sst_ids[] = {
- { PCI_VDEVICE(INTEL, SST_MRST_PCI_ID), 3},
- { PCI_VDEVICE(INTEL, SST_MFLD_PCI_ID), 6},
- { 0, }
-};
-MODULE_DEVICE_TABLE(pci, intel_sst_ids);
-
-static struct pci_driver driver = {
- .name = SST_DRV_NAME,
- .id_table = intel_sst_ids,
- .probe = intel_sst_probe,
- .remove = __devexit_p(intel_sst_remove),
-#ifdef CONFIG_PM
- .suspend = intel_sst_suspend,
- .resume = intel_sst_resume,
- .driver = {
- .pm = &intel_sst_pm,
- },
-#endif
-};
-
-/**
-* intel_sst_init - Module init function
-*
-* Registers with PCI
-* Registers with /dev
-* Init all data strutures
-*/
-static int __init intel_sst_init(void)
-{
- /* Init all variables, data structure etc....*/
- int ret = 0;
- pr_debug("INFO: ******** SST DRIVER loading.. Ver: %s\n",
- SST_DRIVER_VERSION);
-
- mutex_init(&drv_ctx_lock);
- /* Register with PCI */
- ret = pci_register_driver(&driver);
- if (ret)
- pr_err("PCI register failed\n");
- return ret;
-}
-
-/**
-* intel_sst_exit - Module exit function
-*
-* Unregisters with PCI
-* Unregisters with /dev
-* Frees all data strutures
-*/
-static void __exit intel_sst_exit(void)
-{
- pci_unregister_driver(&driver);
-
- pr_debug("driver unloaded\n");
- sst_drv_ctx = NULL;
- return;
-}
-
-module_init(intel_sst_init);
-module_exit(intel_sst_exit);
diff --git a/drivers/staging/intel_sst/intel_sst.h b/drivers/staging/intel_sst/intel_sst.h
deleted file mode 100644
index 4ad2829..0000000
--- a/drivers/staging/intel_sst/intel_sst.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef __INTEL_SST_H__
-#define __INTEL_SST_H__
-/*
- * intel_sst.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * This file is shared between the SST and MAD drivers
- */
-#include "intel_sst_ioctl.h"
-#include <sound/jack.h>
-
-#define SST_CARD_NAMES "intel_mid_card"
-
-#define MFLD_MAX_HW_CH 4
-/* control list Pmic & Lpe */
-/* Input controls */
-enum port_status {
- ACTIVATE = 1,
- DEACTIVATE,
-};
-
-/* Card states */
-enum sst_card_states {
- SND_CARD_UN_INIT = 0,
- SND_CARD_INIT_DONE,
-};
-
-enum sst_controls {
- SST_SND_ALLOC = 0x1000,
- SST_SND_PAUSE = 0x1001,
- SST_SND_RESUME = 0x1002,
- SST_SND_DROP = 0x1003,
- SST_SND_FREE = 0x1004,
- SST_SND_BUFFER_POINTER = 0x1005,
- SST_SND_STREAM_INIT = 0x1006,
- SST_SND_START = 0x1007,
- SST_SND_STREAM_PROCESS = 0x1008,
- SST_MAX_CONTROLS = 0x1008,
- SST_CONTROL_BASE = 0x1000,
- SST_ENABLE_RX_TIME_SLOT = 0x1009,
-};
-
-enum SND_CARDS {
- SND_FS = 0,
- SND_MX,
- SND_NC,
- SND_MSIC
-};
-
-struct pcm_stream_info {
- int str_id;
- void *mad_substream;
- void (*period_elapsed) (void *mad_substream);
- unsigned long long buffer_ptr;
- int sfreq;
-};
-
-struct snd_pmic_ops {
- int card_status;
- int master_mute;
- int num_channel;
- int input_dev_id;
- int mute_status;
- struct mutex lock;
- int pb_on, pbhs_on;
- int cap_on;
- int output_dev_id;
- int lineout_dev_id, line_out_names_cnt;
- int prev_lineout_dev_id;
- bool jack_interrupt_status;
- int (*set_input_dev) (u8 value);
- int (*set_output_dev) (u8 value);
- int (*set_lineout_dev) (u8 value);
- int (*set_mute) (int dev_id, u8 value);
- int (*get_mute) (int dev_id, u8 *value);
-
- int (*set_vol) (int dev_id, int value);
- int (*get_vol) (int dev_id, int *value);
-
- int (*init_card) (void);
- int (*set_pcm_audio_params)
- (int sfreq, int word_size , int num_channel);
- int (*set_pcm_voice_params) (void);
- int (*set_voice_port) (int status);
- int (*set_audio_port) (int status);
-
- int (*power_up_pmic_pb) (unsigned int port);
- int (*power_up_pmic_cp) (unsigned int port);
- int (*power_down_pmic_pb) (unsigned int device);
- int (*power_down_pmic_cp) (unsigned int device);
- int (*power_down_pmic) (void);
- void (*pmic_irq_cb) (void *cb_data, u8 value);
- void (*pmic_irq_enable)(void *data);
- int (*pmic_jack_enable) (void);
- int (*pmic_get_mic_bias)(void *intelmaddata);
- int (*pmic_set_headset_state)(int state);
-
- unsigned int hw_dmic_map[MFLD_MAX_HW_CH];
- unsigned int available_dmics;
- int (*set_hw_dmic_route) (u8 index);
-
- int gpio_amp;
-};
-
-extern void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent,
- int status);
-
-
-int intemad_set_headset_state(int state);
-int intelmad_get_mic_bias(void);
-
-struct intel_sst_pcm_control {
- int (*open) (struct snd_sst_params *str_param);
- int (*device_control) (int cmd, void *arg);
- int (*close) (unsigned int str_id);
-};
-struct intel_sst_card_ops {
- char *module_name;
- unsigned int vendor_id;
- struct intel_sst_pcm_control *pcm_control;
- struct snd_pmic_ops *scard_ops;
-};
-
-/* modified for generic access */
-struct sc_reg_access {
- u16 reg_addr;
- u8 value;
- u8 mask;
-};
-enum sc_reg_access_type {
- PMIC_READ = 0,
- PMIC_WRITE,
- PMIC_READ_MODIFY,
-};
-
-int register_sst_card(struct intel_sst_card_ops *card);
-void unregister_sst_card(struct intel_sst_card_ops *card);
-#endif /* __INTEL_SST_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
deleted file mode 100644
index 93b41a2..0000000
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ /dev/null
@@ -1,1460 +0,0 @@
-/*
- * intel_sst_interface.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * Jeeja KP <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * Upper layer interfaces (MAD driver, MMF) to SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/uio.h>
-#include <linux/aio.h>
-#include <linux/uaccess.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-#include <linux/ioctl.h>
-#ifdef CONFIG_MRST_RAR_HANDLER
-#include <linux/rar_register.h>
-#include "../../../drivers/staging/memrar/memrar.h"
-#endif
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-#define AM_MODULE 1
-#define STREAM_MODULE 0
-
-
-/**
-* intel_sst_check_device - checks SST device
-*
-* This utility function checks the state of SST device and downlaods FW if
-* not done, or resumes the device if suspended
-*/
-
-static int intel_sst_check_device(void)
-{
- int retval = 0;
- if (sst_drv_ctx->pmic_state != SND_MAD_INIT_DONE) {
- pr_warn("Sound card not available\n");
- return -EIO;
- }
- if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- pr_debug("Resuming from Suspended state\n");
- retval = intel_sst_resume(sst_drv_ctx->pci);
- if (retval) {
- pr_debug("Resume Failed= %#x,abort\n", retval);
- return retval;
- }
- }
-
- if (sst_drv_ctx->sst_state == SST_UN_INIT) {
- /* FW is not downloaded */
- retval = sst_download_fw();
- if (retval)
- return -ENODEV;
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- retval = sst_drv_ctx->rx_time_slot_status;
- if (retval != RX_TIMESLOT_UNINIT
- && sst_drv_ctx->pmic_vendor != SND_NC)
- sst_enable_rx_timeslot(retval);
- }
- }
- return 0;
-}
-
-/**
- * intel_sst_open - opens a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr:pointer to file
- *
- * This function is called by OS when a user space component
- * tries to get a driver handle. Only one handle at a time
- * will be allowed
- */
-int intel_sst_open(struct inode *i_node, struct file *file_ptr)
-{
- unsigned int retval;
-
- mutex_lock(&sst_drv_ctx->stream_lock);
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
- retval = intel_sst_check_device();
- if (retval) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
- }
-
- if (sst_drv_ctx->encoded_cnt < MAX_ENC_STREAM) {
- struct ioctl_pvt_data *data =
- kzalloc(sizeof(struct ioctl_pvt_data), GFP_KERNEL);
- if (!data) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return -ENOMEM;
- }
-
- sst_drv_ctx->encoded_cnt++;
- mutex_unlock(&sst_drv_ctx->stream_lock);
- data->pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- data->str_id = 0;
- file_ptr->private_data = (void *)data;
- pr_debug("pvt_id handle = %d!\n", data->pvt_id);
- } else {
- retval = -EUSERS;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- return retval;
-}
-
-/**
- * intel_sst_open_cntrl - opens a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr:pointer to file
- *
- * This function is called by OS when a user space component
- * tries to get a driver handle to /dev/intel_sst_control.
- * Only one handle at a time will be allowed
- * This is for control operations only
- */
-int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr)
-{
- unsigned int retval;
-
- /* audio manager open */
- mutex_lock(&sst_drv_ctx->stream_lock);
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
- retval = intel_sst_check_device();
- if (retval) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
- }
-
- if (sst_drv_ctx->am_cnt < MAX_AM_HANDLES) {
- sst_drv_ctx->am_cnt++;
- pr_debug("AM handle opened...\n");
- file_ptr->private_data = NULL;
- } else {
- retval = -EACCES;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- }
-
- mutex_unlock(&sst_drv_ctx->stream_lock);
- return retval;
-}
-
-/**
- * intel_sst_release - releases a handle to driver
- *
- * @i_node: inode structure
- * @file_ptr: pointer to file
- *
- * This function is called by OS when a user space component
- * tries to release a driver handle.
- */
-int intel_sst_release(struct inode *i_node, struct file *file_ptr)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
-
- pr_debug("Release called, closing app handle\n");
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_drv_ctx->encoded_cnt--;
- sst_drv_ctx->stream_cnt--;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- free_stream_context(data->str_id);
- kfree(data);
- return 0;
-}
-
-int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr)
-{
- /* audio manager close */
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_drv_ctx->am_cnt--;
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("AM handle closed\n");
- return 0;
-}
-
-/**
-* intel_sst_mmap - mmaps a kernel buffer to user space for copying data
-*
-* @vma: vm area structure instance
-* @file_ptr: pointer to file
-*
-* This function is called by OS when a user space component
-* tries to get mmap memory from driver
-*/
-int intel_sst_mmap(struct file *file_ptr, struct vm_area_struct *vma)
-{
- int retval, length;
- struct ioctl_pvt_data *data =
- (struct ioctl_pvt_data *)file_ptr->private_data;
- int str_id = data->str_id;
- void *mem_area;
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
-
- length = vma->vm_end - vma->vm_start;
- pr_debug("called for stream %d length 0x%x\n", str_id, length);
-
- if (length > sst_drv_ctx->mmap_len)
- return -ENOMEM;
- if (!sst_drv_ctx->mmap_mem)
- return -EIO;
-
- /* round it up to the page boundary */
- /*mem_area = (void *)((((unsigned long)sst_drv_ctx->mmap_mem)
- + PAGE_SIZE - 1) & PAGE_MASK);*/
- mem_area = (void *) PAGE_ALIGN((unsigned int) sst_drv_ctx->mmap_mem);
-
- /* map the whole physically contiguous area in one piece */
- retval = remap_pfn_range(vma,
- vma->vm_start,
- virt_to_phys((void *)mem_area) >> PAGE_SHIFT,
- length,
- vma->vm_page_prot);
- if (retval)
- sst_drv_ctx->streams[str_id].mmapped = false;
- else
- sst_drv_ctx->streams[str_id].mmapped = true;
-
- pr_debug("mmap ret 0x%x\n", retval);
- return retval;
-}
-
-/* sets mmap data buffers to play/capture*/
-static int intel_sst_mmap_play_capture(u32 str_id,
- struct snd_sst_mmap_buffs *mmap_buf)
-{
- struct sst_stream_bufs *bufs;
- int retval, i;
- struct stream_info *stream;
- struct snd_sst_mmap_buff_entry *buf_entry;
- struct snd_sst_mmap_buff_entry *tmp_buf;
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
-
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped != true)
- return -EIO;
-
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
-
- tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
- if (!tmp_buf)
- return -ENOMEM;
- if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
- mmap_buf->entries * sizeof(*tmp_buf))) {
- retval = -EFAULT;
- goto out_free;
- }
-
- pr_debug("new buffers count %d status %d\n",
- mmap_buf->entries, stream->status);
- buf_entry = tmp_buf;
- for (i = 0; i < mmap_buf->entries; i++) {
- bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
- if (!bufs) {
- retval = -ENOMEM;
- goto out_free;
- }
- bufs->size = buf_entry->size;
- bufs->offset = buf_entry->offset;
- bufs->addr = sst_drv_ctx->mmap_mem;
- bufs->in_use = false;
- buf_entry++;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- }
-
- mutex_lock(&stream->lock);
- stream->data_blk.condition = false;
- stream->data_blk.ret_code = 0;
- if (stream->status == STREAM_INIT &&
- stream->prev != STREAM_UN_INIT &&
- stream->need_draining != true) {
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- if (stream->ops == STREAM_OPS_PLAYBACK) {
- if (sst_play_frame(str_id) < 0) {
- pr_warn("play frames fail\n");
- mutex_unlock(&stream->lock);
- retval = -EIO;
- goto out_free;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- if (sst_capture_frame(str_id) < 0) {
- pr_warn("capture frame fail\n");
- mutex_unlock(&stream->lock);
- retval = -EIO;
- goto out_free;
- }
- }
- }
- mutex_unlock(&stream->lock);
- /* Block the call for reply */
- if (!list_empty(&stream->bufs)) {
- stream->data_blk.on = true;
- retval = sst_wait_interruptible(sst_drv_ctx,
- &stream->data_blk);
- }
-
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec ioctl bytes = %d!!\n", retval);
-
-out_free:
- kfree(tmp_buf);
- return retval;
-}
-
-/*sets user data buffers to play/capture*/
-static int intel_sst_play_capture(struct stream_info *stream, int str_id)
-{
- int retval;
-
- stream->data_blk.ret_code = 0;
- stream->data_blk.on = true;
- stream->data_blk.condition = false;
-
- mutex_lock(&stream->lock);
- if (stream->status == STREAM_INIT && stream->prev != STREAM_UN_INIT) {
- /* stream is started */
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- }
-
- if (stream->status == STREAM_INIT && stream->prev == STREAM_UN_INIT) {
- /* stream is not started yet */
- pr_debug("Stream isn't in started state %d, prev %d\n",
- stream->status, stream->prev);
- } else if ((stream->status == STREAM_RUNNING ||
- stream->status == STREAM_PAUSED) &&
- stream->need_draining != true) {
- /* stream is started */
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (sst_play_frame(str_id) < 0) {
- pr_warn("play frames failed\n");
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- if (sst_capture_frame(str_id) < 0) {
- pr_warn("capture frames failed\n");
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- }
- } else {
- mutex_unlock(&stream->lock);
- return -EIO;
- }
- mutex_unlock(&stream->lock);
- /* Block the call for reply */
-
- retval = sst_wait_interruptible(sst_drv_ctx, &stream->data_blk);
- if (retval) {
- stream->status = STREAM_INIT;
- pr_debug("wait returned error...\n");
- }
- return retval;
-}
-
-/* fills kernel list with buffer addresses for SST DSP driver to process*/
-static int snd_sst_fill_kernel_list(struct stream_info *stream,
- const struct iovec *iovec, unsigned long nr_segs,
- struct list_head *copy_to_list)
-{
- struct sst_stream_bufs *stream_bufs;
- unsigned long index, mmap_len;
- unsigned char __user *bufp;
- unsigned long size, copied_size;
- int retval = 0, add_to_list = 0;
- static int sent_offset;
- static unsigned long sent_index;
-
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- for (index = stream->sg_index; index < nr_segs; index++) {
- __u32 rar_handle;
- struct sst_stream_bufs *stream_bufs =
- kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
-
- stream->sg_index = index;
- if (!stream_bufs)
- return -ENOMEM;
- if (copy_from_user((void *) &rar_handle,
- iovec[index].iov_base,
- sizeof(__u32))) {
- kfree(stream_bufs);
- return -EFAULT;
- }
- stream_bufs->addr = (char *)rar_handle;
- stream_bufs->in_use = false;
- stream_bufs->size = iovec[0].iov_len;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&stream_bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- }
- stream->sg_index = index;
- return retval;
- }
-#endif
- stream_bufs = kzalloc(sizeof(*stream_bufs), GFP_KERNEL);
- if (!stream_bufs)
- return -ENOMEM;
- stream_bufs->addr = sst_drv_ctx->mmap_mem;
- mmap_len = sst_drv_ctx->mmap_len;
- stream_bufs->addr = sst_drv_ctx->mmap_mem;
- bufp = stream->cur_ptr;
-
- copied_size = 0;
-
- if (!stream->sg_index)
- sent_index = sent_offset = 0;
-
- for (index = stream->sg_index; index < nr_segs; index++) {
- stream->sg_index = index;
- if (!stream->cur_ptr)
- bufp = iovec[index].iov_base;
-
- size = ((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) - (unsigned long) bufp;
-
- if ((copied_size + size) > mmap_len)
- size = mmap_len - copied_size;
-
-
- if (stream->ops == STREAM_OPS_PLAYBACK) {
- if (copy_from_user((void *)
- (stream_bufs->addr + copied_size),
- bufp, size)) {
- /* Clean up the list and return error code */
- retval = -EFAULT;
- break;
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- struct snd_sst_user_cap_list *entry =
- kzalloc(sizeof(*entry), GFP_KERNEL);
-
- if (!entry) {
- kfree(stream_bufs);
- return -ENOMEM;
- }
- entry->iov_index = index;
- entry->iov_offset = (unsigned long) bufp -
- (unsigned long)iovec[index].iov_base;
- entry->offset = copied_size;
- entry->size = size;
- list_add_tail(&entry->node, copy_to_list);
- }
-
- stream->cur_ptr = bufp + size;
-
- if (((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) <
- ((unsigned long)iovec[index].iov_base)) {
- pr_debug("Buffer overflows\n");
- kfree(stream_bufs);
- return -EINVAL;
- }
-
- if (((unsigned long)iovec[index].iov_base
- + iovec[index].iov_len) ==
- (unsigned long)stream->cur_ptr) {
- stream->cur_ptr = NULL;
- stream->sg_index++;
- }
-
- copied_size += size;
- pr_debug("copied_size - %lx\n", copied_size);
- if ((copied_size >= mmap_len) ||
- (stream->sg_index == nr_segs)) {
- add_to_list = 1;
- }
-
- if (add_to_list) {
- stream_bufs->in_use = false;
- stream_bufs->size = copied_size;
- /* locking here */
- mutex_lock(&stream->lock);
- list_add_tail(&stream_bufs->node, &stream->bufs);
- mutex_unlock(&stream->lock);
- break;
- }
- }
- return retval;
-}
-
-/* This function copies the captured data returned from SST DSP engine
- * to the user buffers*/
-static int snd_sst_copy_userbuf_capture(struct stream_info *stream,
- const struct iovec *iovec,
- struct list_head *copy_to_list)
-{
- struct snd_sst_user_cap_list *entry, *_entry;
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- int retval = 0;
-
- /* copy sent buffers */
- pr_debug("capture stream copying to user now...\n");
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- /* copy to user */
- list_for_each_entry_safe(entry, _entry,
- copy_to_list, node) {
- if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
- kbufs->addr + entry->offset,
- entry->size)) {
- /* Clean up the list and return error */
- retval = -EFAULT;
- break;
- }
- list_del(&entry->node);
- kfree(entry);
- }
- }
- }
- pr_debug("end of cap copy\n");
- return retval;
-}
-
-/*
- * snd_sst_userbufs_play_cap - constructs the list from user buffers
- *
- * @iovec:pointer to iovec structure
- * @nr_segs:number entries in the iovec structure
- * @str_id:stream id
- * @stream:pointer to stream_info structure
- *
- * This function will traverse the user list and copy the data to the kernel
- * space buffers.
- */
-static int snd_sst_userbufs_play_cap(const struct iovec *iovec,
- unsigned long nr_segs, unsigned int str_id,
- struct stream_info *stream)
-{
- int retval;
- LIST_HEAD(copy_to_list);
-
-
- retval = snd_sst_fill_kernel_list(stream, iovec, nr_segs,
- &copy_to_list);
-
- retval = intel_sst_play_capture(stream, str_id);
- if (retval < 0)
- return retval;
-
- if (stream->ops == STREAM_OPS_CAPTURE) {
- retval = snd_sst_copy_userbuf_capture(stream, iovec,
- &copy_to_list);
- }
- return retval;
-}
-
-/* This function is common function across read/write
- for user buffers called from system calls*/
-static int intel_sst_read_write(unsigned int str_id, char __user *buf,
- size_t count)
-{
- int retval;
- struct stream_info *stream;
- struct iovec iovec;
- unsigned long nr_segs;
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true) {
- pr_warn("user write and stream is mapped\n");
- return -EIO;
- }
- if (!count)
- return -EINVAL;
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
- /* copy user buf details */
- pr_debug("new buffers %p, copy size %d, status %d\n" ,
- buf, (int) count, (int) stream->status);
-
- stream->buf_type = SST_BUF_USER_STATIC;
- iovec.iov_base = buf;
- iovec.iov_len = count;
- nr_segs = 1;
-
- do {
- retval = snd_sst_userbufs_play_cap(
- &iovec, nr_segs, str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/***
- * intel_sst_write - This function is called when user tries to play out data
- *
- * @file_ptr:pointer to file
- * @buf:user buffer to be played out
- * @count:size of tthe buffer
- * @offset:offset to start from
- *
- * writes the encoded data into DSP
- */
-int intel_sst_write(struct file *file_ptr, const char __user *buf,
- size_t count, loff_t *offset)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
- int str_id = data->str_id;
- struct stream_info *stream = &sst_drv_ctx->streams[str_id];
-
- pr_debug("called for %d\n", str_id);
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- return intel_sst_read_write(str_id, (char __user *)buf, count);
-}
-
-/*
- * intel_sst_aio_write - write buffers
- *
- * @kiocb:pointer to a structure containing file pointer
- * @iov:list of user buffer to be played out
- * @nr_segs:number of entries
- * @offset:offset to start from
- *
- * This function is called when user tries to play out multiple data buffers
- */
-ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
-{
- int retval;
- struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
- int str_id = data->str_id;
- struct stream_info *stream;
-
- pr_debug("entry - %ld\n", nr_segs);
-
- if (is_sync_kiocb(kiocb) == false)
- return -EINVAL;
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true)
- return -EIO;
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE) {
- return -EBADRQC;
- }
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
- pr_debug("new segs %ld, offset %d, status %d\n" ,
- nr_segs, (int) offset, (int) stream->status);
- stream->buf_type = SST_BUF_USER_STATIC;
- do {
- retval = snd_sst_userbufs_play_cap(iov, nr_segs,
- str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/*
- * intel_sst_read - read the encoded data
- *
- * @file_ptr: pointer to file
- * @buf: user buffer to be filled with captured data
- * @count: size of tthe buffer
- * @offset: offset to start from
- *
- * This function is called when user tries to capture data
- */
-int intel_sst_read(struct file *file_ptr, char __user *buf,
- size_t count, loff_t *offset)
-{
- struct ioctl_pvt_data *data = file_ptr->private_data;
- int str_id = data->str_id;
- struct stream_info *stream = &sst_drv_ctx->streams[str_id];
-
- pr_debug("called for %d\n", str_id);
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE)
- return -EBADRQC;
- return intel_sst_read_write(str_id, buf, count);
-}
-
-/*
- * intel_sst_aio_read - aio read
- *
- * @kiocb: pointer to a structure containing file pointer
- * @iov: list of user buffer to be filled with captured
- * @nr_segs: number of entries
- * @offset: offset to start from
- *
- * This function is called when user tries to capture out multiple data buffers
- */
-ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset)
-{
- int retval;
- struct ioctl_pvt_data *data = kiocb->ki_filp->private_data;
- int str_id = data->str_id;
- struct stream_info *stream;
-
- pr_debug("entry - %ld\n", nr_segs);
-
- if (is_sync_kiocb(kiocb) == false) {
- pr_debug("aio_read from user space is not allowed\n");
- return -EINVAL;
- }
-
- pr_debug("called for str_id %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- if (stream->mmapped == true)
- return -EIO;
- if (stream->status == STREAM_UN_INIT ||
- stream->status == STREAM_DECODE)
- return -EBADRQC;
- stream->curr_bytes = 0;
- stream->cumm_bytes = 0;
-
- pr_debug("new segs %ld, offset %d, status %d\n" ,
- nr_segs, (int) offset, (int) stream->status);
- stream->buf_type = SST_BUF_USER_STATIC;
- do {
- retval = snd_sst_userbufs_play_cap(iov, nr_segs,
- str_id, stream);
- if (retval < 0)
- break;
-
- } while (stream->sg_index < nr_segs);
-
- stream->sg_index = 0;
- stream->cur_ptr = NULL;
- if (retval >= 0)
- retval = stream->cumm_bytes;
- pr_debug("end of play/rec bytes = %d!!\n", retval);
- return retval;
-}
-
-/* sst_print_stream_params - prints the stream parameters (debug fn)*/
-static void sst_print_stream_params(struct snd_sst_get_stream_params *get_prm)
-{
- pr_debug("codec params:result = %d\n",
- get_prm->codec_params.result);
- pr_debug("codec params:stream = %d\n",
- get_prm->codec_params.stream_id);
- pr_debug("codec params:codec = %d\n",
- get_prm->codec_params.codec);
- pr_debug("codec params:ops = %d\n",
- get_prm->codec_params.ops);
- pr_debug("codec params:stream_type = %d\n",
- get_prm->codec_params.stream_type);
- pr_debug("pcmparams:sfreq = %d\n",
- get_prm->pcm_params.sfreq);
- pr_debug("pcmparams:num_chan = %d\n",
- get_prm->pcm_params.num_chan);
- pr_debug("pcmparams:pcm_wd_sz = %d\n",
- get_prm->pcm_params.pcm_wd_sz);
- return;
-}
-
-/**
- * sst_create_algo_ipc - create ipc msg for algorithm parameters
- *
- * @algo_params: Algorithm parameters
- * @msg: post msg pointer
- *
- * This function is called to create ipc msg
- */
-int sst_create_algo_ipc(struct snd_ppp_params *algo_params,
- struct ipc_post **msg)
-{
- if (sst_create_large_msg(msg))
- return -ENOMEM;
- sst_fill_header(&(*msg)->header,
- IPC_IA_ALG_PARAMS, 1, algo_params->str_id);
- (*msg)->header.part.data = sizeof(u32) +
- sizeof(*algo_params) + algo_params->size;
- memcpy((*msg)->mailbox_data, &(*msg)->header, sizeof(u32));
- memcpy((*msg)->mailbox_data + sizeof(u32),
- algo_params, sizeof(*algo_params));
- return 0;
-}
-
-/**
- * sst_send_algo_ipc - send ipc msg for algorithm parameters
- *
- * @msg: post msg pointer
- *
- * This function is called to send ipc msg
- */
-int sst_send_algo_ipc(struct ipc_post **msg)
-{
- sst_drv_ctx->ppp_params_blk.condition = false;
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- sst_drv_ctx->ppp_params_blk.on = true;
- sst_drv_ctx->ppp_params_blk.data = NULL;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&(*msg)->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->ppp_params_blk, SST_BLOCK_TIMEOUT);
-}
-
-/**
- * intel_sst_ioctl_dsp - receives the device ioctl's
- *
- * @cmd:Ioctl cmd
- * @arg:data
- *
- * This function is called when a user space component
- * sends a DSP Ioctl to SST driver
- */
-long intel_sst_ioctl_dsp(unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- struct snd_ppp_params algo_params;
- struct snd_ppp_params *algo_params_copied;
- struct ipc_post *msg;
-
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_SST_SET_ALGO):
- if (copy_from_user(&algo_params, (void __user *)arg,
- sizeof(algo_params)))
- return -EFAULT;
- if (algo_params.size > SST_MAILBOX_SIZE)
- return -EMSGSIZE;
-
- pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
- algo_params.algo_id, algo_params.str_id,
- algo_params.enable, algo_params.size);
- retval = sst_create_algo_ipc(&algo_params, &msg);
- if (retval)
- break;
- algo_params.reserved = 0;
- if (copy_from_user(msg->mailbox_data + sizeof(algo_params),
- algo_params.params, algo_params.size))
- return -EFAULT;
-
- retval = sst_send_algo_ipc(&msg);
- if (retval) {
- pr_debug("Error in sst_set_algo = %d\n", retval);
- retval = -EIO;
- }
- break;
-
- case _IOC_NR(SNDRV_SST_GET_ALGO):
- if (copy_from_user(&algo_params, (void __user *)arg,
- sizeof(algo_params)))
- return -EFAULT;
- pr_debug("Algo ID %d Str id %d Enable %d Size %d\n",
- algo_params.algo_id, algo_params.str_id,
- algo_params.enable, algo_params.size);
- retval = sst_create_algo_ipc(&algo_params, &msg);
- if (retval)
- break;
- algo_params.reserved = 1;
- retval = sst_send_algo_ipc(&msg);
- if (retval) {
- pr_debug("Error in sst_get_algo = %d\n", retval);
- retval = -EIO;
- break;
- }
- algo_params_copied = (struct snd_ppp_params *)
- sst_drv_ctx->ppp_params_blk.data;
- if (algo_params_copied->size > algo_params.size) {
- pr_debug("mem insufficient to copy\n");
- retval = -EMSGSIZE;
- goto free_mem;
- } else {
- char __user *tmp;
-
- if (copy_to_user(algo_params.params,
- algo_params_copied->params,
- algo_params_copied->size)) {
- retval = -EFAULT;
- goto free_mem;
- }
- tmp = (char __user *)arg + offsetof(
- struct snd_ppp_params, size);
- if (copy_to_user(tmp, &algo_params_copied->size,
- sizeof(__u32))) {
- retval = -EFAULT;
- goto free_mem;
- }
-
- }
-free_mem:
- kfree(algo_params_copied->params);
- kfree(algo_params_copied);
- break;
- }
- return retval;
-}
-
-
-int sst_ioctl_tuning_params(unsigned long arg)
-{
- struct snd_sst_tuning_params params;
- struct ipc_post *msg;
-
- if (copy_from_user(&params, (void __user *)arg, sizeof(params)))
- return -EFAULT;
- if (params.size > SST_MAILBOX_SIZE)
- return -ENOMEM;
- pr_debug("Parameter %d, Stream %d, Size %d\n", params.type,
- params.str_id, params.size);
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_TUNING_PARAMS, 1, params.str_id);
- msg->header.part.data = sizeof(u32) + sizeof(params) + params.size;
- memcpy(msg->mailbox_data, &msg->header.full, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &params, sizeof(params));
- if (copy_from_user(msg->mailbox_data + sizeof(params),
- (void __user *)(unsigned long)params.addr,
- params.size)) {
- kfree(msg->mailbox_data);
- kfree(msg);
- return -EFAULT;
- }
- return sst_send_algo_ipc(&msg);
-}
-/**
- * intel_sst_ioctl - receives the device ioctl's
- * @file_ptr:pointer to file
- * @cmd:Ioctl cmd
- * @arg:data
- *
- * This function is called by OS when a user space component
- * sends an Ioctl to SST driver
- */
-long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- struct ioctl_pvt_data *data = NULL;
- int str_id = 0, minor = 0;
-
- data = file_ptr->private_data;
- if (data) {
- minor = 0;
- str_id = data->str_id;
- } else
- minor = 1;
-
- if (sst_drv_ctx->sst_state != SST_FW_RUNNING)
- return -EBUSY;
-
- switch (_IOC_NR(cmd)) {
- case _IOC_NR(SNDRV_SST_STREAM_PAUSE):
- pr_debug("IOCTL_PAUSE received for %d!\n", str_id);
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_pause_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_RESUME):
- pr_debug("SNDRV_SST_IOCTL_RESUME received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_resume_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
- struct snd_sst_params str_param;
-
- pr_debug("IOCTL_SET_PARAMS received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
-
- if (copy_from_user(&str_param, (void __user *)arg,
- sizeof(str_param))) {
- retval = -EFAULT;
- break;
- }
-
- if (!str_id) {
-
- retval = sst_get_stream(&str_param);
- if (retval > 0) {
- struct stream_info *str_info;
- char __user *dest;
-
- sst_drv_ctx->stream_cnt++;
- data->str_id = retval;
- str_info = &sst_drv_ctx->streams[retval];
- str_info->src = SST_DRV;
- dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
- retval = copy_to_user(dest, &retval, sizeof(__u32));
- if (retval)
- retval = -EFAULT;
- } else {
- if (retval == -SST_ERR_INVALID_PARAMS)
- retval = -EINVAL;
- }
- } else {
- pr_debug("SET_STREAM_PARAMS received!\n");
- /* allocated set params only */
- retval = sst_set_stream_param(str_id, &str_param);
- /* Block the call for reply */
- if (!retval) {
- int sfreq = 0, word_size = 0, num_channel = 0;
- sfreq = str_param.sparams.uc.pcm_params.sfreq;
- word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
- num_channel = str_param.sparams.uc.pcm_params.num_chan;
- if (str_param.ops == STREAM_OPS_CAPTURE) {
- sst_drv_ctx->scard_ops->\
- set_pcm_audio_params(sfreq,
- word_size, num_channel);
- }
- }
- }
- break;
- }
- case _IOC_NR(SNDRV_SST_SET_VOL): {
- struct snd_sst_vol set_vol;
-
- if (copy_from_user(&set_vol, (void __user *)arg,
- sizeof(set_vol))) {
- pr_debug("copy failed\n");
- retval = -EFAULT;
- break;
- }
- pr_debug("SET_VOLUME received for %d!\n",
- set_vol.stream_id);
- if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
- pr_debug("invalid operation!\n");
- retval = -EPERM;
- break;
- }
- retval = sst_set_vol(&set_vol);
- break;
- }
- case _IOC_NR(SNDRV_SST_GET_VOL): {
- struct snd_sst_vol get_vol;
-
- if (copy_from_user(&get_vol, (void __user *)arg,
- sizeof(get_vol))) {
- retval = -EFAULT;
- break;
- }
- pr_debug("IOCTL_GET_VOLUME received for stream = %d!\n",
- get_vol.stream_id);
- if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
- pr_debug("invalid operation!\n");
- retval = -EPERM;
- break;
- }
- retval = sst_get_vol(&get_vol);
- if (retval) {
- retval = -EIO;
- break;
- }
- pr_debug("id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
- get_vol.stream_id, get_vol.volume,
- get_vol.ramp_duration, get_vol.ramp_type);
- if (copy_to_user((struct snd_sst_vol __user *)arg,
- &get_vol, sizeof(get_vol))) {
- retval = -EFAULT;
- break;
- }
- /*sst_print_get_vol_info(str_id, &get_vol);*/
- break;
- }
-
- case _IOC_NR(SNDRV_SST_MUTE): {
- struct snd_sst_mute set_mute;
-
- if (copy_from_user(&set_mute, (void __user *)arg,
- sizeof(set_mute))) {
- retval = -EFAULT;
- break;
- }
- pr_debug("SNDRV_SST_SET_VOLUME received for %d!\n",
- set_mute.stream_id);
- if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
- retval = -EPERM;
- break;
- }
- retval = sst_set_mute(&set_mute);
- break;
- }
- case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
- struct snd_sst_get_stream_params get_params;
-
- pr_debug("IOCTL_GET_PARAMS received!\n");
- if (minor != 0) {
- retval = -EBADRQC;
- break;
- }
-
- retval = sst_get_stream_params(str_id, &get_params);
- if (retval) {
- retval = -EIO;
- break;
- }
- if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
- &get_params, sizeof(get_params))) {
- retval = -EFAULT;
- break;
- }
- sst_print_stream_params(&get_params);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_MMAP_PLAY):
- case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
- struct snd_sst_mmap_buffs mmap_buf;
-
- pr_debug("SNDRV_SST_MMAP_PLAY/CAPTURE received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- if (copy_from_user(&mmap_buf, (void __user *)arg,
- sizeof(mmap_buf))) {
- retval = -EFAULT;
- break;
- }
- retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
- break;
- }
- case _IOC_NR(SNDRV_SST_STREAM_DROP):
- pr_debug("SNDRV_SST_IOCTL_DROP received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_drop_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
- struct snd_sst_tstamp tstamp = {0};
- unsigned long long time, freq, mod;
-
- pr_debug("SNDRV_SST_STREAM_GET_TSTAMP received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- memcpy_fromio(&tstamp,
- sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
- sizeof(tstamp));
- time = tstamp.samples_rendered;
- freq = (unsigned long long) tstamp.sampling_frequency;
- time = time * 1000; /* converting it to ms */
- mod = do_div(time, freq);
- if (copy_to_user((void __user *)arg, &time,
- sizeof(unsigned long long)))
- retval = -EFAULT;
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_START):{
- struct stream_info *stream;
-
- pr_debug("SNDRV_SST_STREAM_START received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
- stream = &sst_drv_ctx->streams[str_id];
- mutex_lock(&stream->lock);
- if (stream->status == STREAM_INIT &&
- stream->need_draining != true) {
- stream->prev = stream->status;
- stream->status = STREAM_RUNNING;
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- retval = sst_play_frame(str_id);
- } else if (stream->ops == STREAM_OPS_CAPTURE)
- retval = sst_capture_frame(str_id);
- else {
- retval = -EINVAL;
- mutex_unlock(&stream->lock);
- break;
- }
- if (retval < 0) {
- stream->status = STREAM_INIT;
- mutex_unlock(&stream->lock);
- break;
- }
- } else {
- retval = -EINVAL;
- }
- mutex_unlock(&stream->lock);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
- struct snd_sst_target_device target_device;
-
- pr_debug("SET_TARGET_DEVICE received!\n");
- if (copy_from_user(&target_device, (void __user *)arg,
- sizeof(target_device))) {
- retval = -EFAULT;
- break;
- }
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_target_device_select(&target_device);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
- struct snd_sst_driver_info info;
-
- pr_debug("SNDRV_SST_DRIVER_INFO received\n");
- info.version = SST_VERSION_NUM;
- /* hard coding, shud get sumhow later */
- info.active_pcm_streams = sst_drv_ctx->stream_cnt -
- sst_drv_ctx->encoded_cnt;
- info.active_enc_streams = sst_drv_ctx->encoded_cnt;
- info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
- info.max_enc_streams = MAX_ENC_STREAM;
- info.buf_per_stream = sst_drv_ctx->mmap_len;
- if (copy_to_user((void __user *)arg, &info,
- sizeof(info)))
- retval = -EFAULT;
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
- struct snd_sst_dbufs param;
- struct snd_sst_dbufs dbufs_local;
- struct snd_sst_buffs ibufs, obufs;
- struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
- char __user *dest;
-
- pr_debug("SNDRV_SST_STREAM_DECODE received\n");
- if (minor != STREAM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- if (copy_from_user(&param, (void __user *)arg,
- sizeof(param))) {
- retval = -EFAULT;
- break;
- }
-
- dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
- dbufs_local.output_bytes_produced =
- param.output_bytes_produced;
-
- if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
- retval = -EFAULT;
- break;
- }
- if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
- retval = -EFAULT;
- break;
- }
-
- ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
- obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
- if (!ibuf_tmp || !obuf_tmp) {
- retval = -ENOMEM;
- goto free_iobufs;
- }
-
- if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
- ibufs.entries * sizeof(*ibuf_tmp))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
- ibufs.buff_entry = ibuf_tmp;
- dbufs_local.ibufs = &ibufs;
-
- if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
- obufs.entries * sizeof(*obuf_tmp))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
- obufs.buff_entry = obuf_tmp;
- dbufs_local.obufs = &obufs;
-
- retval = sst_decode(str_id, &dbufs_local);
- if (retval) {
- retval = -EAGAIN;
- goto free_iobufs;
- }
-
- dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
- if (copy_to_user(dest,
- &dbufs_local.input_bytes_consumed,
- sizeof(unsigned long long))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
-
- dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
- if (copy_to_user(dest,
- &dbufs_local.output_bytes_produced,
- sizeof(unsigned long long))) {
- retval = -EFAULT;
- goto free_iobufs;
- }
-free_iobufs:
- kfree(ibuf_tmp);
- kfree(obuf_tmp);
- break;
- }
-
- case _IOC_NR(SNDRV_SST_STREAM_DRAIN):
- pr_debug("SNDRV_SST_STREAM_DRAIN received\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- retval = sst_drain_stream(str_id);
- break;
-
- case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
- unsigned long long __user *bytes = (unsigned long long __user *)arg;
- struct snd_sst_tstamp tstamp = {0};
-
- pr_debug("STREAM_BYTES_DECODED received!\n");
- if (minor != STREAM_MODULE) {
- retval = -EINVAL;
- break;
- }
- memcpy_fromio(&tstamp,
- sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
- sizeof(tstamp));
- if (copy_to_user(bytes, &tstamp.bytes_processed,
- sizeof(*bytes)))
- retval = -EFAULT;
- break;
- }
- case _IOC_NR(SNDRV_SST_FW_INFO): {
- struct snd_sst_fw_info *fw_info;
-
- pr_debug("SNDRV_SST_FW_INFO received\n");
-
- fw_info = kzalloc(sizeof(*fw_info), GFP_ATOMIC);
- if (!fw_info) {
- retval = -ENOMEM;
- break;
- }
- retval = sst_get_fw_info(fw_info);
- if (retval) {
- retval = -EIO;
- kfree(fw_info);
- break;
- }
- if (copy_to_user((struct snd_sst_dbufs __user *)arg,
- fw_info, sizeof(*fw_info))) {
- kfree(fw_info);
- retval = -EFAULT;
- break;
- }
- /*sst_print_fw_info(fw_info);*/
- kfree(fw_info);
- break;
- }
- case _IOC_NR(SNDRV_SST_GET_ALGO):
- case _IOC_NR(SNDRV_SST_SET_ALGO):
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = intel_sst_ioctl_dsp(cmd, arg);
- break;
-
- case _IOC_NR(SNDRV_SST_TUNING_PARAMS):
- if (minor != AM_MODULE) {
- retval = -EBADRQC;
- break;
- }
- retval = sst_ioctl_tuning_params(arg);
- break;
-
- default:
- retval = -EINVAL;
- }
- pr_debug("intel_sst_ioctl:complete ret code = %d\n", retval);
- return retval;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
deleted file mode 100644
index 870981b..0000000
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ /dev/null
@@ -1,623 +0,0 @@
-#ifndef __INTEL_SST_COMMON_H__
-#define __INTEL_SST_COMMON_H__
-/*
- * intel_sst_common.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Common private declarations for SST
- */
-
-#define SST_DRIVER_VERSION "1.2.17"
-#define SST_VERSION_NUM 0x1217
-
-/* driver names */
-#define SST_DRV_NAME "intel_sst_driver"
-#define SST_MRST_PCI_ID 0x080A
-#define SST_MFLD_PCI_ID 0x082F
-#define PCI_ID_LENGTH 4
-#define SST_SUSPEND_DELAY 2000
-#define FW_CONTEXT_MEM (64*1024)
-
-enum sst_states {
- SST_FW_LOADED = 1,
- SST_FW_RUNNING,
- SST_UN_INIT,
- SST_ERROR,
- SST_SUSPENDED
-};
-
-#define MAX_ACTIVE_STREAM 3
-#define MAX_ENC_STREAM 1
-#define MAX_AM_HANDLES 1
-#define ALLOC_TIMEOUT 5000
-/* SST numbers */
-#define SST_BLOCK_TIMEOUT 5000
-#define TARGET_DEV_BLOCK_TIMEOUT 5000
-
-#define BLOCK_UNINIT -1
-#define RX_TIMESLOT_UNINIT -1
-
-/* SST register map */
-#define SST_CSR 0x00
-#define SST_PISR 0x08
-#define SST_PIMR 0x10
-#define SST_ISRX 0x18
-#define SST_IMRX 0x28
-#define SST_IPCX 0x38 /* IPC IA-SST */
-#define SST_IPCD 0x40 /* IPC SST-IA */
-#define SST_ISRD 0x20 /* dummy register for shim workaround */
-#define SST_SHIM_SIZE 0X44
-
-#define SPI_MODE_ENABLE_BASE_ADDR 0xffae4000
-#define FW_SIGNATURE_SIZE 4
-
-/* PMIC and SST hardware states */
-enum sst_mad_states {
- SND_MAD_UN_INIT = 0,
- SND_MAD_INIT_DONE,
-};
-
-/* stream states */
-enum sst_stream_states {
- STREAM_UN_INIT = 0, /* Freed/Not used stream */
- STREAM_RUNNING = 1, /* Running */
- STREAM_PAUSED = 2, /* Paused stream */
- STREAM_DECODE = 3, /* stream is in decoding only state */
- STREAM_INIT = 4, /* stream init, waiting for data */
-};
-
-
-enum sst_ram_type {
- SST_IRAM = 1,
- SST_DRAM = 2,
-};
-/* SST shim registers to structure mapping */
-union config_status_reg {
- struct {
- u32 mfld_strb:1;
- u32 sst_reset:1;
- u32 hw_rsvd:3;
- u32 sst_clk:2;
- u32 bypass:3;
- u32 run_stall:1;
- u32 rsvd1:2;
- u32 strb_cntr_rst:1;
- u32 rsvd:18;
- } part;
- u32 full;
-};
-
-union interrupt_reg {
- struct {
- u32 done_interrupt:1;
- u32 busy_interrupt:1;
- u32 rsvd:30;
- } part;
- u32 full;
-};
-
-union sst_pisr_reg {
- struct {
- u32 pssp0:1;
- u32 pssp1:1;
- u32 rsvd0:3;
- u32 dmac:1;
- u32 rsvd1:26;
- } part;
- u32 full;
-};
-
-union sst_pimr_reg {
- struct {
- u32 ssp0:1;
- u32 ssp1:1;
- u32 rsvd0:3;
- u32 dmac:1;
- u32 rsvd1:10;
- u32 ssp0_sc:1;
- u32 ssp1_sc:1;
- u32 rsvd2:3;
- u32 dmac_sc:1;
- u32 rsvd3:10;
- } part;
- u32 full;
-};
-
-
-struct sst_stream_bufs {
- struct list_head node;
- u32 size;
- const char *addr;
- u32 data_copied;
- bool in_use;
- u32 offset;
-};
-
-struct snd_sst_user_cap_list {
- unsigned int iov_index; /* index of iov */
- unsigned long iov_offset; /* offset in iov */
- unsigned long offset; /* offset in kmem */
- unsigned long size; /* size copied */
- struct list_head node;
-};
-/*
-This structure is used to block a user/fw data call to another
-fw/user call
-*/
-struct sst_block {
- bool condition; /* condition for blocking check */
- int ret_code; /* ret code when block is released */
- void *data; /* data to be appsed for block if any */
- bool on;
-};
-
-enum snd_sst_buf_type {
- SST_BUF_USER_STATIC = 1,
- SST_BUF_USER_DYNAMIC,
- SST_BUF_MMAP_STATIC,
- SST_BUF_MMAP_DYNAMIC,
-};
-
-enum snd_src {
- SST_DRV = 1,
- MAD_DRV = 2
-};
-
-/**
- * struct stream_info - structure that holds the stream information
- *
- * @status : stream current state
- * @prev : stream prev state
- * @codec : stream codec
- * @sst_id : stream id
- * @ops : stream operation pb/cp/drm...
- * @bufs: stream buffer list
- * @lock : stream mutex for protecting state
- * @pcm_lock : spinlock for pcm path only
- * @mmapped : is stream mmapped
- * @sg_index : current stream user buffer index
- * @cur_ptr : stream user buffer pointer
- * @buf_entry : current user buffer
- * @data_blk : stream block for data operations
- * @ctrl_blk : stream block for ctrl operations
- * @buf_type : stream user buffer type
- * @pcm_substream : PCM substream
- * @period_elapsed : PCM period elapsed callback
- * @sfreq : stream sampling freq
- * @decode_ibuf : Decoded i/p buffers pointer
- * @decode_obuf : Decoded o/p buffers pointer
- * @decode_isize : Decoded i/p buffers size
- * @decode_osize : Decoded o/p buffers size
- * @decode_ibuf_type : Decoded i/p buffer type
- * @decode_obuf_type : Decoded o/p buffer type
- * @idecode_alloc : Decode alloc index
- * @need_draining : stream set for drain
- * @str_type : stream type
- * @curr_bytes : current bytes decoded
- * @cumm_bytes : cummulative bytes decoded
- * @str_type : stream type
- * @src : stream source
- * @device : output device type (medfield only)
- * @pcm_slot : pcm slot value
- */
-struct stream_info {
- unsigned int status;
- unsigned int prev;
- u8 codec;
- unsigned int sst_id;
- unsigned int ops;
- struct list_head bufs;
- struct mutex lock; /* mutex */
- spinlock_t pcm_lock;
- bool mmapped;
- unsigned int sg_index; /* current buf Index */
- unsigned char __user *cur_ptr; /* Current static bufs */
- struct snd_sst_buf_entry __user *buf_entry;
- struct sst_block data_blk; /* stream ops block */
- struct sst_block ctrl_blk; /* stream control cmd block */
- enum snd_sst_buf_type buf_type;
- void *pcm_substream;
- void (*period_elapsed) (void *pcm_substream);
- unsigned int sfreq;
- void *decode_ibuf, *decode_obuf;
- unsigned int decode_isize, decode_osize;
- u8 decode_ibuf_type, decode_obuf_type;
- unsigned int idecode_alloc;
- unsigned int need_draining;
- unsigned int str_type;
- u32 curr_bytes;
- u32 cumm_bytes;
- u32 src;
- enum snd_sst_audio_device_type device;
- u8 pcm_slot;
-};
-
-/*
- * struct stream_alloc_bloc - this structure is used for blocking the user's
- * alloc calls to fw's response to alloc calls
- *
- * @sst_id : session id of blocked stream
- * @ops_block : ops block struture
- */
-struct stream_alloc_block {
- int sst_id; /* session id of blocked stream */
- struct sst_block ops_block; /* ops block struture */
-};
-
-#define SST_FW_SIGN "$SST"
-#define SST_FW_LIB_SIGN "$LIB"
-
-/*
- * struct fw_header - FW file headers
- *
- * @signature : FW signature
- * @modules : # of modules
- * @file_format : version of header format
- * @reserved : reserved fields
- */
-struct fw_header {
- unsigned char signature[FW_SIGNATURE_SIZE]; /* FW signature */
- u32 file_size; /* size of fw minus this header */
- u32 modules; /* # of modules */
- u32 file_format; /* version of header format */
- u32 reserved[4];
-};
-
-struct fw_module_header {
- unsigned char signature[FW_SIGNATURE_SIZE]; /* module signature */
- u32 mod_size; /* size of module */
- u32 blocks; /* # of blocks */
- u32 type; /* codec type, pp lib */
- u32 entry_point;
-};
-
-struct dma_block_info {
- enum sst_ram_type type; /* IRAM/DRAM */
- u32 size; /* Bytes */
- u32 ram_offset; /* Offset in I/DRAM */
- u32 rsvd; /* Reserved field */
-};
-
-struct ioctl_pvt_data {
- int str_id;
- int pvt_id;
-};
-
-struct sst_ipc_msg_wq {
- union ipc_header header;
- char mailbox[SST_MAILBOX_SIZE];
- struct work_struct wq;
-};
-
-struct mad_ops_wq {
- int stream_id;
- enum sst_controls control_op;
- struct work_struct wq;
-
-};
-
-#define SST_MMAP_PAGES (640*1024 / PAGE_SIZE)
-#define SST_MMAP_STEP (40*1024 / PAGE_SIZE)
-
-/***
- * struct intel_sst_drv - driver ops
- *
- * @pmic_state : pmic state
- * @pmic_vendor : pmic vendor detected
- * @sst_state : current sst device state
- * @pci_id : PCI device id loaded
- * @shim : SST shim pointer
- * @mailbox : SST mailbox pointer
- * @iram : SST IRAM pointer
- * @dram : SST DRAM pointer
- * @shim_phy_add : SST shim phy addr
- * @ipc_dispatch_list : ipc messages dispatched
- * @ipc_post_msg_wq : wq to post IPC messages context
- * @ipc_process_msg : wq to process msgs from FW context
- * @ipc_process_reply : wq to process reply from FW context
- * @ipc_post_msg : wq to post reply from FW context
- * @mad_ops : MAD driver operations registered
- * @mad_wq : MAD driver wq
- * @post_msg_wq : wq to post IPC messages
- * @process_msg_wq : wq to process msgs from FW
- * @process_reply_wq : wq to process reply from FW
- * @streams : sst stream contexts
- * @alloc_block : block structure for alloc
- * @tgt_dev_blk : block structure for target device
- * @fw_info_blk : block structure for fw info block
- * @vol_info_blk : block structure for vol info block
- * @mute_info_blk : block structure for mute info block
- * @hs_info_blk : block structure for hs info block
- * @list_lock : sst driver list lock (deprecated)
- * @list_spin_lock : sst driver spin lock block
- * @scard_ops : sst card ops
- * @pci : sst pci device struture
- * @active_streams : sst active streams
- * @sst_lock : sst device lock
- * @stream_lock : sst stream lock
- * @unique_id : sst unique id
- * @stream_cnt : total sst active stream count
- * @pb_streams : total active pb streams
- * @cp_streams : total active cp streams
- * @lpe_stalled : lpe stall status
- * @pmic_port_instance : active pmic port instance
- * @rx_time_slot_status : active rx slot
- * @lpaudio_start : lpaudio status
- * @audio_start : audio status
- * @devt_d : pointer to /dev/lpe node
- * @devt_c : pointer to /dev/lpe_ctrl node
- * @max_streams : max streams allowed
- */
-struct intel_sst_drv {
- bool pmic_state;
- int pmic_vendor;
- int sst_state;
- unsigned int pci_id;
- void __iomem *shim;
- void __iomem *mailbox;
- void __iomem *iram;
- void __iomem *dram;
- unsigned int shim_phy_add;
- struct list_head ipc_dispatch_list;
- struct work_struct ipc_post_msg_wq;
- struct sst_ipc_msg_wq ipc_process_msg;
- struct sst_ipc_msg_wq ipc_process_reply;
- struct sst_ipc_msg_wq ipc_post_msg;
- struct mad_ops_wq mad_ops;
- wait_queue_head_t wait_queue;
- struct workqueue_struct *mad_wq;
- struct workqueue_struct *post_msg_wq;
- struct workqueue_struct *process_msg_wq;
- struct workqueue_struct *process_reply_wq;
-
- struct stream_info streams[MAX_NUM_STREAMS];
- struct stream_alloc_block alloc_block[MAX_ACTIVE_STREAM];
- struct sst_block tgt_dev_blk, fw_info_blk, ppp_params_blk,
- vol_info_blk, mute_info_blk, hs_info_blk;
- struct mutex list_lock;/* mutex for IPC list locking */
- spinlock_t list_spin_lock; /* mutex for IPC list locking */
- struct snd_pmic_ops *scard_ops;
- struct pci_dev *pci;
- int active_streams[MAX_NUM_STREAMS];
- void *mmap_mem;
- struct mutex sst_lock;
- struct mutex stream_lock;
- unsigned int mmap_len;
- unsigned int unique_id;
- unsigned int stream_cnt; /* total streams */
- unsigned int encoded_cnt; /* enocded streams only */
- unsigned int am_cnt;
- unsigned int pb_streams; /* pb streams active */
- unsigned int cp_streams; /* cp streams active */
- unsigned int lpe_stalled; /* LPE is stalled or not */
- unsigned int pmic_port_instance; /*pmic port instance*/
- int rx_time_slot_status;
- unsigned int lpaudio_start;
- /* 1 - LPA stream(MP3 pb) in progress*/
- unsigned int audio_start;
- dev_t devt_d, devt_c;
- unsigned int max_streams;
- unsigned int *fw_cntx;
- unsigned int fw_cntx_size;
-
- unsigned int fw_downloaded;
-};
-
-extern struct intel_sst_drv *sst_drv_ctx;
-
-#define CHIP_REV_REG 0xff108000
-#define CHIP_REV_ADDR 0x78
-
-/* misc definitions */
-#define FW_DWNL_ID 0xFF
-#define LOOP1 0x11111111
-#define LOOP2 0x22222222
-#define LOOP3 0x33333333
-#define LOOP4 0x44444444
-
-#define SST_DEFAULT_PMIC_PORT 1 /*audio port*/
-/* NOTE: status will have +ve for good cases and -ve for error ones */
-#define MAX_STREAM_FIELD 255
-
-int sst_alloc_stream(char *params, unsigned int stream_ops, u8 codec,
- unsigned int session_id);
-int sst_alloc_stream_response(unsigned int str_id,
- struct snd_sst_alloc_response *response);
-int sst_stalled(void);
-int sst_pause_stream(int id);
-int sst_resume_stream(int id);
-int sst_enable_rx_timeslot(int status);
-int sst_drop_stream(int id);
-int sst_free_stream(int id);
-int sst_start_stream(int streamID);
-int sst_play_frame(int streamID);
-int sst_pcm_play_frame(int str_id, struct sst_stream_bufs *sst_buf);
-int sst_capture_frame(int streamID);
-int sst_set_stream_param(int streamID, struct snd_sst_params *str_param);
-int sst_target_device_select(struct snd_sst_target_device *target_device);
-int sst_decode(int str_id, struct snd_sst_dbufs *dbufs);
-int sst_get_decoded_bytes(int str_id, unsigned long long *bytes);
-int sst_get_fw_info(struct snd_sst_fw_info *info);
-int sst_get_stream_params(int str_id,
- struct snd_sst_get_stream_params *get_params);
-int sst_get_stream(struct snd_sst_params *str_param);
-int sst_get_stream_allocated(struct snd_sst_params *str_param,
- struct snd_sst_lib_download **lib_dnld);
-int sst_drain_stream(int str_id);
-int sst_get_vol(struct snd_sst_vol *set_vol);
-int sst_set_vol(struct snd_sst_vol *set_vol);
-int sst_set_mute(struct snd_sst_mute *set_mute);
-
-
-void sst_post_message(struct work_struct *work);
-void sst_process_message(struct work_struct *work);
-void sst_process_reply(struct work_struct *work);
-void sst_process_mad_ops(struct work_struct *work);
-void sst_process_mad_jack_detection(struct work_struct *work);
-
-long intel_sst_ioctl(struct file *file_ptr, unsigned int cmd,
- unsigned long arg);
-int intel_sst_open(struct inode *i_node, struct file *file_ptr);
-int intel_sst_open_cntrl(struct inode *i_node, struct file *file_ptr);
-int intel_sst_release(struct inode *i_node, struct file *file_ptr);
-int intel_sst_release_cntrl(struct inode *i_node, struct file *file_ptr);
-int intel_sst_read(struct file *file_ptr, char __user *buf,
- size_t count, loff_t *ppos);
-int intel_sst_write(struct file *file_ptr, const char __user *buf,
- size_t count, loff_t *ppos);
-int intel_sst_mmap(struct file *fp, struct vm_area_struct *vma);
-ssize_t intel_sst_aio_write(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset);
-ssize_t intel_sst_aio_read(struct kiocb *kiocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t offset);
-
-int sst_load_fw(const struct firmware *fw, void *context);
-int sst_load_library(struct snd_sst_lib_download *lib, u8 ops);
-int sst_spi_mode_enable(void);
-int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx);
-
-int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block);
-int sst_wait_interruptible_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block, int timeout);
-int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct stream_alloc_block *block);
-int sst_create_large_msg(struct ipc_post **arg);
-int sst_create_short_msg(struct ipc_post **arg);
-void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
- u8 sst_id, int status, void *data);
-void sst_clear_interrupt(void);
-int intel_sst_resume(struct pci_dev *pci);
-int sst_download_fw(void);
-void free_stream_context(unsigned int str_id);
-void sst_clean_stream(struct stream_info *stream);
-
-/*
- * sst_fill_header - inline to fill sst header
- *
- * @header : ipc header
- * @msg : IPC message to be sent
- * @large : is ipc large msg
- * @str_id : stream id
- *
- * this function is an inline function that sets the headers before
- * sending a message
- */
-static inline void sst_fill_header(union ipc_header *header,
- int msg, int large, int str_id)
-{
- header->part.msg_id = msg;
- header->part.str_id = str_id;
- header->part.large = large;
- header->part.done = 0;
- header->part.busy = 1;
- header->part.data = 0;
-}
-
-/*
- * sst_assign_pvt_id - assign a pvt id for stream
- *
- * @sst_drv_ctx : driver context
- *
- * this inline function assigns a private id for calls that dont have stream
- * context yet, should be called with lock held
- */
-static inline unsigned int sst_assign_pvt_id(struct intel_sst_drv *sst_drv_ctx)
-{
- sst_drv_ctx->unique_id++;
- if (sst_drv_ctx->unique_id >= MAX_NUM_STREAMS)
- sst_drv_ctx->unique_id = 1;
- return sst_drv_ctx->unique_id;
-}
-
-/*
- * sst_init_stream - this function initialzes stream context
- *
- * @stream : stream struture
- * @codec : codec for stream
- * @sst_id : stream id
- * @ops : stream operation
- * @slot : stream pcm slot
- * @device : device type
- *
- * this inline function initialzes stream context for allocated stream
- */
-static inline void sst_init_stream(struct stream_info *stream,
- int codec, int sst_id, int ops, u8 slot,
- enum snd_sst_audio_device_type device)
-{
- stream->status = STREAM_INIT;
- stream->prev = STREAM_UN_INIT;
- stream->codec = codec;
- stream->sst_id = sst_id;
- stream->str_type = 0;
- stream->ops = ops;
- stream->data_blk.on = false;
- stream->data_blk.condition = false;
- stream->data_blk.ret_code = 0;
- stream->data_blk.data = NULL;
- stream->ctrl_blk.on = false;
- stream->ctrl_blk.condition = false;
- stream->ctrl_blk.ret_code = 0;
- stream->ctrl_blk.data = NULL;
- stream->need_draining = false;
- stream->decode_ibuf = NULL;
- stream->decode_isize = 0;
- stream->mmapped = false;
- stream->pcm_slot = slot;
- stream->device = device;
-}
-
-
-/*
- * sst_validate_strid - this function validates the stream id
- *
- * @str_id : stream id to be validated
- *
- * returns 0 if valid stream
- */
-static inline int sst_validate_strid(int str_id)
-{
- if (str_id <= 0 || str_id > sst_drv_ctx->max_streams) {
- pr_err("SST ERR: invalid stream id : %d MAX_STREAMS:%d\n",
- str_id, sst_drv_ctx->max_streams);
- return -EINVAL;
- } else
- return 0;
-}
-
-static inline int sst_shim_write(void __iomem *addr, int offset, int value)
-{
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- writel(value, addr + SST_ISRD); /*dummy*/
- writel(value, addr + offset);
- return 0;
-}
-
-static inline int sst_shim_read(void __iomem *addr, int offset)
-{
- return readl(addr + offset);
-}
-#endif /* __INTEL_SST_COMMON_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_drv_interface.c b/drivers/staging/intel_sst/intel_sst_drv_interface.c
deleted file mode 100644
index 22bd29c..0000000
--- a/drivers/staging/intel_sst/intel_sst_drv_interface.c
+++ /dev/null
@@ -1,564 +0,0 @@
-/*
- * intel_sst_interface.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com)
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- * Upper layer interfaces (MAD driver, MMF) to SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/delay.h>
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include <linux/pm_runtime.h>
-#include <linux/export.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-/*
- * sst_download_fw - download the audio firmware to DSP
- *
- * This function is called when the FW needs to be downloaded to SST DSP engine
- */
-int sst_download_fw(void)
-{
- int retval;
- const struct firmware *fw_sst;
- char name[20];
-
- if (sst_drv_ctx->sst_state != SST_UN_INIT)
- return -EPERM;
-
- /* Reload firmware is not needed for MRST */
- if ( (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) && sst_drv_ctx->fw_downloaded) {
- pr_debug("FW already downloaded, skip for MRST platform\n");
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- return 0;
- }
-
- snprintf(name, sizeof(name), "%s%04x%s", "fw_sst_",
- sst_drv_ctx->pci_id, ".bin");
-
- pr_debug("Downloading %s FW now...\n", name);
- retval = request_firmware(&fw_sst, name, &sst_drv_ctx->pci->dev);
- if (retval) {
- pr_err("request fw failed %d\n", retval);
- return retval;
- }
- sst_drv_ctx->alloc_block[0].sst_id = FW_DWNL_ID;
- sst_drv_ctx->alloc_block[0].ops_block.condition = false;
- retval = sst_load_fw(fw_sst, NULL);
- if (retval)
- goto end_restore;
-
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[0]);
- if (retval)
- pr_err("fw download failed %d\n" , retval);
- else
- sst_drv_ctx->fw_downloaded = 1;
-
-end_restore:
- release_firmware(fw_sst);
- sst_drv_ctx->alloc_block[0].sst_id = BLOCK_UNINIT;
- return retval;
-}
-
-
-/*
- * sst_stalled - this function checks if the lpe is in stalled state
- */
-int sst_stalled(void)
-{
- int retry = 1000;
- int retval = -1;
-
- while (retry) {
- if (!sst_drv_ctx->lpe_stalled)
- return 0;
- /*wait for time and re-check*/
- msleep(1);
-
- retry--;
- }
- pr_debug("in Stalled State\n");
- return retval;
-}
-
-void free_stream_context(unsigned int str_id)
-{
- struct stream_info *stream;
-
- if (!sst_validate_strid(str_id)) {
- /* str_id is valid, so stream is alloacted */
- stream = &sst_drv_ctx->streams[str_id];
- if (sst_free_stream(str_id))
- sst_clean_stream(&sst_drv_ctx->streams[str_id]);
- if (stream->ops == STREAM_OPS_PLAYBACK ||
- stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- sst_drv_ctx->pb_streams--;
- if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- sst_drv_ctx->scard_ops->power_down_pmic_pb(
- stream->device);
- else {
- if (sst_drv_ctx->pb_streams == 0)
- sst_drv_ctx->scard_ops->
- power_down_pmic_pb(stream->device);
- }
- } else if (stream->ops == STREAM_OPS_CAPTURE) {
- sst_drv_ctx->cp_streams--;
- if (sst_drv_ctx->cp_streams == 0)
- sst_drv_ctx->scard_ops->power_down_pmic_cp(
- stream->device);
- }
- if (sst_drv_ctx->pb_streams == 0
- && sst_drv_ctx->cp_streams == 0)
- sst_drv_ctx->scard_ops->power_down_pmic();
- }
-}
-
-/*
- * sst_get_stream_allocated - this function gets a stream allocated with
- * the given params
- *
- * @str_param : stream params
- * @lib_dnld : pointer to pointer of lib downlaod struct
- *
- * This creates new stream id for a stream, in case lib is to be downloaded to
- * DSP, it downloads that
- */
-int sst_get_stream_allocated(struct snd_sst_params *str_param,
- struct snd_sst_lib_download **lib_dnld)
-{
- int retval, str_id;
- struct stream_info *str_info;
-
- retval = sst_alloc_stream((char *) &str_param->sparams, str_param->ops,
- str_param->codec, str_param->device_type);
- if (retval < 0) {
- pr_err("sst_alloc_stream failed %d\n", retval);
- return retval;
- }
- pr_debug("Stream allocated %d\n", retval);
- str_id = retval;
- str_info = &sst_drv_ctx->streams[str_id];
- /* Block the call for reply */
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if ((retval != 0) || (str_info->ctrl_blk.ret_code != 0)) {
- pr_debug("FW alloc failed retval %d, ret_code %d\n",
- retval, str_info->ctrl_blk.ret_code);
- str_id = -str_info->ctrl_blk.ret_code; /*return error*/
- *lib_dnld = str_info->ctrl_blk.data;
- sst_clean_stream(str_info);
- } else
- pr_debug("FW Stream allocated success\n");
- return str_id; /*will ret either error (in above if) or correct str id*/
-}
-
-/*
- * sst_get_sfreq - this function returns the frequency of the stream
- *
- * @str_param : stream params
- */
-static int sst_get_sfreq(struct snd_sst_params *str_param)
-{
- switch (str_param->codec) {
- case SST_CODEC_TYPE_PCM:
- return 48000; /*str_param->sparams.uc.pcm_params.sfreq;*/
- case SST_CODEC_TYPE_MP3:
- return str_param->sparams.uc.mp3_params.sfreq;
- case SST_CODEC_TYPE_AAC:
- return str_param->sparams.uc.aac_params.sfreq;
- case SST_CODEC_TYPE_WMA9:
- return str_param->sparams.uc.wma_params.sfreq;
- default:
- return 0;
- }
-}
-
-/*
- * sst_get_stream - this function prepares for stream allocation
- *
- * @str_param : stream param
- */
-int sst_get_stream(struct snd_sst_params *str_param)
-{
- int i, retval;
- struct stream_info *str_info;
- struct snd_sst_lib_download *lib_dnld;
-
- /* stream is not allocated, we are allocating */
- retval = sst_get_stream_allocated(str_param, &lib_dnld);
- if (retval == -(SST_LIB_ERR_LIB_DNLD_REQUIRED)) {
- /* codec download is required */
- struct snd_sst_alloc_response *response;
-
- pr_debug("Codec is required.... trying that\n");
- if (lib_dnld == NULL) {
- pr_err("lib download null!!! abort\n");
- return -EIO;
- }
- i = sst_get_block_stream(sst_drv_ctx);
- response = sst_drv_ctx->alloc_block[i].ops_block.data;
- pr_debug("alloc block allocated = %d\n", i);
- if (i < 0) {
- kfree(lib_dnld);
- return -ENOMEM;
- }
- retval = sst_load_library(lib_dnld, str_param->ops);
- kfree(lib_dnld);
-
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- if (!retval) {
- pr_debug("codec was downloaded successfully\n");
-
- retval = sst_get_stream_allocated(str_param, &lib_dnld);
- if (retval <= 0)
- goto err;
-
- pr_debug("Alloc done stream id %d\n", retval);
- } else {
- pr_debug("codec download failed\n");
- retval = -EIO;
- goto err;
- }
- } else if (retval <= 0)
- goto err;
- /*else
- set_port_params(str_param, str_param->ops);*/
-
- /* store sampling freq */
- str_info = &sst_drv_ctx->streams[retval];
- str_info->sfreq = sst_get_sfreq(str_param);
-
- /* power on the analog, if reqd */
- if (str_param->ops == STREAM_OPS_PLAYBACK ||
- str_param->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- sst_drv_ctx->scard_ops->power_up_pmic_pb(
- sst_drv_ctx->pmic_port_instance);
- else
- sst_drv_ctx->scard_ops->power_up_pmic_pb(
- str_info->device);
- /*Only if the playback is MP3 - Send a message*/
- sst_drv_ctx->pb_streams++;
- } else if (str_param->ops == STREAM_OPS_CAPTURE) {
-
- sst_drv_ctx->scard_ops->power_up_pmic_cp(
- sst_drv_ctx->pmic_port_instance);
- /*Send a messageif not sent already*/
- sst_drv_ctx->cp_streams++;
- }
-
-err:
- return retval;
-}
-
-void sst_process_mad_ops(struct work_struct *work)
-{
-
- struct mad_ops_wq *mad_ops =
- container_of(work, struct mad_ops_wq, wq);
- int retval = 0;
-
- switch (mad_ops->control_op) {
- case SST_SND_PAUSE:
- retval = sst_pause_stream(mad_ops->stream_id);
- break;
- case SST_SND_RESUME:
- retval = sst_resume_stream(mad_ops->stream_id);
- break;
- case SST_SND_DROP:
- retval = sst_drop_stream(mad_ops->stream_id);
- break;
- case SST_SND_START:
- pr_debug("SST Debug: start stream\n");
- retval = sst_start_stream(mad_ops->stream_id);
- break;
- case SST_SND_STREAM_PROCESS:
- pr_debug("play/capt frames...\n");
- break;
- default:
- pr_err(" wrong control_ops reported\n");
- }
- return;
-}
-
-void send_intial_rx_timeslot(void)
-{
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID &&
- sst_drv_ctx->rx_time_slot_status != RX_TIMESLOT_UNINIT
- && sst_drv_ctx->pmic_vendor != SND_NC)
- sst_enable_rx_timeslot(sst_drv_ctx->rx_time_slot_status);
-}
-
-/*
- * sst_open_pcm_stream - Open PCM interface
- *
- * @str_param: parameters of pcm stream
- *
- * This function is called by MID sound card driver to open
- * a new pcm interface
- */
-int sst_open_pcm_stream(struct snd_sst_params *str_param)
-{
- struct stream_info *str_info;
- int retval;
-
- pm_runtime_get_sync(&sst_drv_ctx->pci->dev);
-
- if (sst_drv_ctx->sst_state == SST_SUSPENDED) {
- /* LPE is suspended, resume it before proceeding*/
- pr_debug("Resuming from Suspended state\n");
- retval = intel_sst_resume(sst_drv_ctx->pci);
- if (retval) {
- pr_err("Resume Failed = %#x, abort\n", retval);
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return retval;
- }
- }
- if (sst_drv_ctx->sst_state == SST_UN_INIT) {
- /* FW is not downloaded */
- pr_debug("DSP Downloading FW now...\n");
- retval = sst_download_fw();
- if (retval) {
- pr_err("FW download fail %x, abort\n", retval);
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return retval;
- }
- send_intial_rx_timeslot();
- }
-
- if (!str_param) {
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return -EINVAL;
- }
-
- retval = sst_get_stream(str_param);
- if (retval > 0) {
- sst_drv_ctx->stream_cnt++;
- str_info = &sst_drv_ctx->streams[retval];
- str_info->src = MAD_DRV;
- } else
- pm_runtime_put(&sst_drv_ctx->pci->dev);
-
- return retval;
-}
-
-/*
- * sst_close_pcm_stream - Close PCM interface
- *
- * @str_id: stream id to be closed
- *
- * This function is called by MID sound card driver to close
- * an existing pcm interface
- */
-int sst_close_pcm_stream(unsigned int str_id)
-{
- struct stream_info *stream;
-
- pr_debug("sst: stream free called\n");
- if (sst_validate_strid(str_id))
- return -EINVAL;
- stream = &sst_drv_ctx->streams[str_id];
- free_stream_context(str_id);
- stream->pcm_substream = NULL;
- stream->status = STREAM_UN_INIT;
- stream->period_elapsed = NULL;
- sst_drv_ctx->stream_cnt--;
- pr_debug("sst: will call runtime put now\n");
- pm_runtime_put(&sst_drv_ctx->pci->dev);
- return 0;
-}
-
-/*
- * sst_device_control - Set Control params
- *
- * @cmd: control cmd to be set
- * @arg: command argument
- *
- * This function is called by MID sound card driver to set
- * SST/Sound card controls for an opened stream.
- * This is registered with MID driver
- */
-int sst_device_control(int cmd, void *arg)
-{
- int retval = 0, str_id = 0;
-
- switch (cmd) {
- case SST_SND_PAUSE:
- case SST_SND_RESUME:
- case SST_SND_DROP:
- case SST_SND_START:
- sst_drv_ctx->mad_ops.control_op = cmd;
- sst_drv_ctx->mad_ops.stream_id = *(int *)arg;
- queue_work(sst_drv_ctx->mad_wq, &sst_drv_ctx->mad_ops.wq);
- break;
-
- case SST_SND_STREAM_INIT: {
- struct pcm_stream_info *str_info;
- struct stream_info *stream;
-
- pr_debug("stream init called\n");
- str_info = (struct pcm_stream_info *)arg;
- str_id = str_info->str_id;
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
-
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("setting the period ptrs\n");
- stream->pcm_substream = str_info->mad_substream;
- stream->period_elapsed = str_info->period_elapsed;
- stream->sfreq = str_info->sfreq;
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- break;
- }
-
- case SST_SND_BUFFER_POINTER: {
- struct pcm_stream_info *stream_info;
- struct snd_sst_tstamp fw_tstamp = {0,};
- struct stream_info *stream;
-
-
- stream_info = (struct pcm_stream_info *)arg;
- str_id = stream_info->str_id;
- retval = sst_validate_strid(str_id);
- if (retval)
- break;
- stream = &sst_drv_ctx->streams[str_id];
-
- if (!stream->pcm_substream)
- break;
- memcpy_fromio(&fw_tstamp,
- ((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
- +(str_id * sizeof(fw_tstamp))),
- sizeof(fw_tstamp));
-
- pr_debug("Pointer Query on strid = %d ops %d\n",
- str_id, stream->ops);
-
- if (stream->ops == STREAM_OPS_PLAYBACK)
- stream_info->buffer_ptr = fw_tstamp.samples_rendered;
- else
- stream_info->buffer_ptr = fw_tstamp.samples_processed;
- pr_debug("Samples rendered = %llu, buffer ptr %llu\n",
- fw_tstamp.samples_rendered, stream_info->buffer_ptr);
- break;
- }
- case SST_ENABLE_RX_TIME_SLOT: {
- int status = *(int *)arg;
- sst_drv_ctx->rx_time_slot_status = status ;
- sst_enable_rx_timeslot(status);
- break;
- }
- default:
- /* Illegal case */
- pr_warn("illegal req\n");
- return -EINVAL;
- }
-
- return retval;
-}
-
-
-struct intel_sst_pcm_control pcm_ops = {
- .open = sst_open_pcm_stream,
- .device_control = sst_device_control,
- .close = sst_close_pcm_stream,
-};
-
-struct intel_sst_card_ops sst_pmic_ops = {
- .pcm_control = &pcm_ops,
-};
-
-/*
- * register_sst_card - function for sound card to register
- *
- * @card: pointer to structure of operations
- *
- * This function is called card driver loads and is ready for registration
- */
-int register_sst_card(struct intel_sst_card_ops *card)
-{
- if (!sst_drv_ctx) {
- pr_err("No SST driver register card reject\n");
- return -ENODEV;
- }
-
- if (!card || !card->module_name) {
- pr_err("Null Pointer Passed\n");
- return -EINVAL;
- }
- if (sst_drv_ctx->pmic_state == SND_MAD_UN_INIT) {
- /* register this driver */
- if ((strncmp(SST_CARD_NAMES, card->module_name,
- strlen(SST_CARD_NAMES))) == 0) {
- sst_drv_ctx->pmic_vendor = card->vendor_id;
- sst_drv_ctx->scard_ops = card->scard_ops;
- sst_pmic_ops.module_name = card->module_name;
- sst_drv_ctx->pmic_state = SND_MAD_INIT_DONE;
- sst_drv_ctx->rx_time_slot_status = 0; /*default AMIC*/
- card->pcm_control = sst_pmic_ops.pcm_control;
- return 0;
- } else {
- pr_err("strcmp fail %s\n", card->module_name);
- return -EINVAL;
- }
-
- } else {
- /* already registered a driver */
- pr_err("Repeat for registration..denied\n");
- return -EBADRQC;
- }
- /* The ASoC code doesn't set scard_ops */
- if (sst_drv_ctx->scard_ops)
- sst_drv_ctx->scard_ops->card_status = SND_CARD_UN_INIT;
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_sst_card);
-
-/*
- * unregister_sst_card- function for sound card to un-register
- *
- * @card: pointer to structure of operations
- *
- * This function is called when card driver unloads
- */
-void unregister_sst_card(struct intel_sst_card_ops *card)
-{
- if (sst_pmic_ops.pcm_control == card->pcm_control) {
- /* unreg */
- sst_pmic_ops.module_name = "";
- sst_drv_ctx->pmic_state = SND_MAD_UN_INIT;
- pr_debug("Unregistered %s\n", card->module_name);
- }
- return;
-}
-EXPORT_SYMBOL_GPL(unregister_sst_card);
diff --git a/drivers/staging/intel_sst/intel_sst_dsp.c b/drivers/staging/intel_sst/intel_sst_dsp.c
deleted file mode 100644
index 426d2b9..0000000
--- a/drivers/staging/intel_sst/intel_sst_dsp.c
+++ /dev/null
@@ -1,496 +0,0 @@
-/*
- * intel_sst_dsp.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all dsp controlling functions like firmware download,
- * setting/resetting dsp cores, etc
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-
-/**
- * intel_sst_reset_dsp_mrst - Resetting SST DSP
- *
- * This resets DSP in case of MRST platfroms
- */
-static int intel_sst_reset_dsp_mrst(void)
-{
- union config_status_reg csr;
-
- pr_debug("Resetting the DSP in mrst\n");
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full |= 0x382;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.strb_cntr_rst = 0;
- csr.part.run_stall = 0x1;
- csr.part.bypass = 0x7;
- csr.part.sst_reset = 0x1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- return 0;
-}
-
-/**
- * intel_sst_reset_dsp_medfield - Resetting SST DSP
- *
- * This resets DSP in case of Medfield platfroms
- */
-static int intel_sst_reset_dsp_medfield(void)
-{
- union config_status_reg csr;
-
- pr_debug("Resetting the DSP in medfield\n");
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.full |= 0x382;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- return 0;
-}
-
-/**
- * sst_start_mrst - Start the SST DSP processor
- *
- * This starts the DSP in MRST platfroms
- */
-static int sst_start_mrst(void)
-{
- union config_status_reg csr;
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.part.run_stall = 0;
- csr.part.sst_reset = 0;
- csr.part.strb_cntr_rst = 1;
- pr_debug("Setting SST to execute_mrst 0x%x\n", csr.full);
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- return 0;
-}
-
-/**
- * sst_start_medfield - Start the SST DSP processor
- *
- * This starts the DSP in Medfield platfroms
- */
-static int sst_start_medfield(void)
-{
- union config_status_reg csr;
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.mfld_strb = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.run_stall = 0;
- csr.part.sst_reset = 0;
- pr_debug("Starting the DSP_medfld %x\n", csr.full);
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
- pr_debug("Starting the DSP_medfld\n");
-
- return 0;
-}
-
-/**
- * sst_parse_module - Parse audio FW modules
- *
- * @module: FW module header
- *
- * Parses modules that need to be placed in SST IRAM and DRAM
- * returns error or 0 if module sizes are proper
- */
-static int sst_parse_module(struct fw_module_header *module)
-{
- struct dma_block_info *block;
- u32 count;
- void __iomem *ram;
-
- pr_debug("module sign %s size %x blocks %x type %x\n",
- module->signature, module->mod_size,
- module->blocks, module->type);
- pr_debug("module entrypoint 0x%x\n", module->entry_point);
-
- block = (void *)module + sizeof(*module);
-
- for (count = 0; count < module->blocks; count++) {
- if (block->size <= 0) {
- pr_err("block size invalid\n");
- return -EINVAL;
- }
- switch (block->type) {
- case SST_IRAM:
- ram = sst_drv_ctx->iram;
- break;
- case SST_DRAM:
- ram = sst_drv_ctx->dram;
- break;
- default:
- pr_err("wrong ram type0x%x in block0x%x\n",
- block->type, count);
- return -EINVAL;
- }
- memcpy_toio(ram + block->ram_offset,
- (void *)block + sizeof(*block), block->size);
- block = (void *)block + sizeof(*block) + block->size;
- }
- return 0;
-}
-
-/**
- * sst_parse_fw_image - parse and load FW
- *
- * @sst_fw: pointer to audio fw
- *
- * This function is called to parse and download the FW image
- */
-static int sst_parse_fw_image(const struct firmware *sst_fw)
-{
- struct fw_header *header;
- u32 count;
- int ret_val;
- struct fw_module_header *module;
-
- BUG_ON(!sst_fw);
-
- /* Read the header information from the data pointer */
- header = (struct fw_header *)sst_fw->data;
-
- /* verify FW */
- if ((strncmp(header->signature, SST_FW_SIGN, 4) != 0) ||
- (sst_fw->size != header->file_size + sizeof(*header))) {
- /* Invalid FW signature */
- pr_err("Invalid FW sign/filesize mismatch\n");
- return -EINVAL;
- }
- pr_debug("header sign=%s size=%x modules=%x fmt=%x size=%x\n",
- header->signature, header->file_size, header->modules,
- header->file_format, sizeof(*header));
- module = (void *)sst_fw->data + sizeof(*header);
- for (count = 0; count < header->modules; count++) {
- /* module */
- ret_val = sst_parse_module(module);
- if (ret_val)
- return ret_val;
- module = (void *)module + sizeof(*module) + module->mod_size ;
- }
-
- return 0;
-}
-
-/**
- * sst_load_fw - function to load FW into DSP
- *
- * @fw: Pointer to driver loaded FW
- * @context: driver context
- *
- * This function is called by OS when the FW is loaded into kernel
- */
-int sst_load_fw(const struct firmware *fw, void *context)
-{
- int ret_val;
-
- pr_debug("load_fw called\n");
- BUG_ON(!fw);
-
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- ret_val = intel_sst_reset_dsp_mrst();
- else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- ret_val = intel_sst_reset_dsp_medfield();
- if (ret_val)
- return ret_val;
-
- ret_val = sst_parse_fw_image(fw);
- if (ret_val)
- return ret_val;
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_LOADED;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- /* 7. ask scu to reset the bypass bits */
- /* 8.bring sst out of reset */
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- ret_val = sst_start_mrst();
- else if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID)
- ret_val = sst_start_medfield();
- if (ret_val)
- return ret_val;
-
- pr_debug("fw loaded successful!!!\n");
- return ret_val;
-}
-
-/*This function is called when any codec/post processing library
- needs to be downloaded*/
-static int sst_download_library(const struct firmware *fw_lib,
- struct snd_sst_lib_download_info *lib)
-{
- /* send IPC message and wait */
- int i;
- u8 pvt_id;
- struct ipc_post *msg = NULL;
- union config_status_reg csr;
- struct snd_sst_str_type str_type = {0};
- int retval = 0;
-
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- pvt_id = sst_assign_pvt_id(sst_drv_ctx);
- i = sst_get_block_stream(sst_drv_ctx);
- pr_debug("alloc block allocated = %d, pvt_id %d\n", i, pvt_id);
- if (i < 0) {
- kfree(msg);
- return -ENOMEM;
- }
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- sst_fill_header(&msg->header, IPC_IA_PREP_LIB_DNLD, 1, pvt_id);
- msg->header.part.data = sizeof(u32) + sizeof(str_type);
- str_type.codec_type = lib->dload_lib.lib_info.lib_type;
- /*str_type.pvt_id = pvt_id;*/
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &str_type, sizeof(str_type));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
- if (retval) {
- /* error */
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- pr_err("Prep codec downloaded failed %d\n",
- retval);
- return -EIO;
- }
- pr_debug("FW responded, ready for download now...\n");
- /* downloading on success */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_LOADED;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- csr.full = readl(sst_drv_ctx->shim + SST_CSR);
- csr.part.run_stall = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0x7;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- sst_parse_fw_image(fw_lib);
-
- /* set the FW to running again */
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.bypass = 0x0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- csr.full = sst_shim_read(sst_drv_ctx->shim, SST_CSR);
- csr.part.run_stall = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_CSR, csr.full);
-
- /* send download complete and wait */
- if (sst_create_large_msg(&msg)) {
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_LIB_DNLD_CMPLT, 1, pvt_id);
- sst_drv_ctx->alloc_block[i].sst_id = pvt_id;
- msg->header.part.data = sizeof(u32) + sizeof(*lib);
- lib->pvt_id = pvt_id;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), lib, sizeof(*lib));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("Waiting for FW response Download complete\n");
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- retval = sst_wait_timeout(sst_drv_ctx, &sst_drv_ctx->alloc_block[i]);
- if (retval) {
- /* error */
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_UN_INIT;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- return -EIO;
- }
-
- pr_debug("FW success on Download complete\n");
- sst_drv_ctx->alloc_block[i].sst_id = BLOCK_UNINIT;
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- return 0;
-
-}
-
-/* This function is called before downloading the codec/postprocessing
-library is set for download to SST DSP*/
-static int sst_validate_library(const struct firmware *fw_lib,
- struct lib_slot_info *slot,
- u32 *entry_point)
-{
- struct fw_header *header;
- struct fw_module_header *module;
- struct dma_block_info *block;
- unsigned int n_blk, isize = 0, dsize = 0;
- int err = 0;
-
- header = (struct fw_header *)fw_lib->data;
- if (header->modules != 1) {
- pr_err("Module no mismatch found\n");
- err = -EINVAL;
- goto exit;
- }
- module = (void *)fw_lib->data + sizeof(*header);
- *entry_point = module->entry_point;
- pr_debug("Module entry point 0x%x\n", *entry_point);
- pr_debug("Module Sign %s, Size 0x%x, Blocks 0x%x Type 0x%x\n",
- module->signature, module->mod_size,
- module->blocks, module->type);
-
- block = (void *)module + sizeof(*module);
- for (n_blk = 0; n_blk < module->blocks; n_blk++) {
- switch (block->type) {
- case SST_IRAM:
- isize += block->size;
- break;
- case SST_DRAM:
- dsize += block->size;
- break;
- default:
- pr_err("Invalid block type for 0x%x\n", n_blk);
- err = -EINVAL;
- goto exit;
- }
- block = (void *)block + sizeof(*block) + block->size;
- }
- if (isize > slot->iram_size || dsize > slot->dram_size) {
- pr_err("library exceeds size allocated\n");
- err = -EINVAL;
- goto exit;
- } else
- pr_debug("Library is safe for download...\n");
-
- pr_debug("iram 0x%x, dram 0x%x, iram 0x%x, dram 0x%x\n",
- isize, dsize, slot->iram_size, slot->dram_size);
-exit:
- return err;
-
-}
-
-/* This function is called when FW requests for a particular library download
-This function prepares the library to download*/
-int sst_load_library(struct snd_sst_lib_download *lib, u8 ops)
-{
- char buf[20];
- const char *type, *dir;
- int len = 0, error = 0;
- u32 entry_point;
- const struct firmware *fw_lib;
- struct snd_sst_lib_download_info dload_info = {{{0},},};
-
- memset(buf, 0, sizeof(buf));
-
- pr_debug("Lib Type 0x%x, Slot 0x%x, ops 0x%x\n",
- lib->lib_info.lib_type, lib->slot_info.slot_num, ops);
- pr_debug("Version 0x%x, name %s, caps 0x%x media type 0x%x\n",
- lib->lib_info.lib_version, lib->lib_info.lib_name,
- lib->lib_info.lib_caps, lib->lib_info.media_type);
-
- pr_debug("IRAM Size 0x%x, offset 0x%x\n",
- lib->slot_info.iram_size, lib->slot_info.iram_offset);
- pr_debug("DRAM Size 0x%x, offset 0x%x\n",
- lib->slot_info.dram_size, lib->slot_info.dram_offset);
-
- switch (lib->lib_info.lib_type) {
- case SST_CODEC_TYPE_MP3:
- type = "mp3_";
- break;
- case SST_CODEC_TYPE_AAC:
- type = "aac_";
- break;
- case SST_CODEC_TYPE_AACP:
- type = "aac_v1_";
- break;
- case SST_CODEC_TYPE_eAACP:
- type = "aac_v2_";
- break;
- case SST_CODEC_TYPE_WMA9:
- type = "wma9_";
- break;
- default:
- pr_err("Invalid codec type\n");
- error = -EINVAL;
- goto wake;
- }
-
- if (ops == STREAM_OPS_CAPTURE)
- dir = "enc_";
- else
- dir = "dec_";
- len = strlen(type) + strlen(dir);
- strncpy(buf, type, sizeof(buf)-1);
- strncpy(buf + strlen(type), dir, sizeof(buf)-strlen(type)-1);
- len += snprintf(buf + len, sizeof(buf) - len, "%d",
- lib->slot_info.slot_num);
- len += snprintf(buf + len, sizeof(buf) - len, ".bin");
-
- pr_debug("Requesting %s\n", buf);
-
- error = request_firmware(&fw_lib, buf, &sst_drv_ctx->pci->dev);
- if (error) {
- pr_err("library load failed %d\n", error);
- goto wake;
- }
- error = sst_validate_library(fw_lib, &lib->slot_info, &entry_point);
- if (error)
- goto wake_free;
-
- lib->mod_entry_pt = entry_point;
- memcpy(&dload_info.dload_lib, lib, sizeof(*lib));
- error = sst_download_library(fw_lib, &dload_info);
- if (error)
- goto wake_free;
-
- /* lib is downloaded and init send alloc again */
- pr_debug("Library is downloaded now...\n");
-wake_free:
- /* sst_wake_up_alloc_block(sst_drv_ctx, pvt_id, error, NULL); */
- release_firmware(fw_lib);
-wake:
- return error;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_fw_ipc.h b/drivers/staging/intel_sst/intel_sst_fw_ipc.h
deleted file mode 100644
index 5d0cc56..0000000
--- a/drivers/staging/intel_sst/intel_sst_fw_ipc.h
+++ /dev/null
@@ -1,416 +0,0 @@
-#ifndef __INTEL_SST_FW_IPC_H__
-#define __INTEL_SST_FW_IPC_H__
-/*
-* intel_sst_fw_ipc.h - Intel SST Driver for audio engine
-*
-* Copyright (C) 2008-10 Intel Corporation
-* Author: Vinod Koul <vinod.koul@intel.com>
-* Harsha Priya <priya.harsha@intel.com>
-* Dharageswari R <dharageswari.r@intel.com>
-* KP Jeeja <jeeja.kp@intel.com>
-* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-*
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License as published by
-* the Free Software Foundation; version 2 of the License.
-*
-* This program is distributed in the hope that it will be useful, but
-* WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-* General Public License for more details.
-*
-* You should have received a copy of the GNU General Public License along
-* with this program; if not, write to the Free Software Foundation, Inc.,
-* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
-*
-* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-*
-* This driver exposes the audio engine functionalities to the ALSA
-* and middleware.
-* This file has definitions shared between the firmware and driver
-*/
-
-#define MAX_NUM_STREAMS_MRST 3
-#define MAX_NUM_STREAMS_MFLD 6
-#define MAX_NUM_STREAMS 6
-#define MAX_DBG_RW_BYTES 80
-#define MAX_NUM_SCATTER_BUFFERS 8
-#define MAX_LOOP_BACK_DWORDS 8
-/* IPC base address and mailbox, timestamp offsets */
-#define SST_MAILBOX_SIZE 0x0400
-#define SST_MAILBOX_SEND 0x0000
-#define SST_MAILBOX_RCV 0x0804
-#define SST_TIME_STAMP 0x1800
-#define SST_RESERVED_OFFSET 0x1A00
-#define SST_CHEKPOINT_OFFSET 0x1C00
-#define REPLY_MSG 0x80
-
-/* Message ID's for IPC messages */
-/* Bits B7: SST or IA/SC ; B6-B4: Msg Category; B3-B0: Msg Type */
-
-/* I2L Firmware/Codec Download msgs */
-#define IPC_IA_PREP_LIB_DNLD 0x01
-#define IPC_IA_LIB_DNLD_CMPLT 0x02
-
-#define IPC_IA_SET_PMIC_TYPE 0x03
-#define IPC_IA_GET_FW_VERSION 0x04
-#define IPC_IA_GET_FW_BUILD_INF 0x05
-#define IPC_IA_GET_FW_INFO 0x06
-#define IPC_IA_GET_FW_CTXT 0x07
-#define IPC_IA_SET_FW_CTXT 0x08
-
-/* I2L Codec Config/control msgs */
-#define IPC_IA_SET_CODEC_PARAMS 0x10
-#define IPC_IA_GET_CODEC_PARAMS 0x11
-#define IPC_IA_SET_PPP_PARAMS 0x12
-#define IPC_IA_GET_PPP_PARAMS 0x13
-#define IPC_IA_PLAY_FRAMES 0x14
-#define IPC_IA_CAPT_FRAMES 0x15
-#define IPC_IA_PLAY_VOICE 0x16
-#define IPC_IA_CAPT_VOICE 0x17
-#define IPC_IA_DECODE_FRAMES 0x18
-
-#define IPC_IA_ALG_PARAMS 0x1A
-#define IPC_IA_TUNING_PARAMS 0x1B
-
-/* I2L Stream config/control msgs */
-#define IPC_IA_ALLOC_STREAM 0x20 /* Allocate a stream ID */
-#define IPC_IA_FREE_STREAM 0x21 /* Free the stream ID */
-#define IPC_IA_SET_STREAM_PARAMS 0x22
-#define IPC_IA_GET_STREAM_PARAMS 0x23
-#define IPC_IA_PAUSE_STREAM 0x24
-#define IPC_IA_RESUME_STREAM 0x25
-#define IPC_IA_DROP_STREAM 0x26
-#define IPC_IA_DRAIN_STREAM 0x27 /* Short msg with str_id */
-#define IPC_IA_TARGET_DEV_SELECT 0x28
-#define IPC_IA_CONTROL_ROUTING 0x29
-
-#define IPC_IA_SET_STREAM_VOL 0x2A /*Vol for stream, pre mixer */
-#define IPC_IA_GET_STREAM_VOL 0x2B
-#define IPC_IA_SET_STREAM_MUTE 0x2C
-#define IPC_IA_GET_STREAM_MUTE 0x2D
-#define IPC_IA_ENABLE_RX_TIME_SLOT 0x2E /* Enable Rx time slot 0 or 1 */
-
-#define IPC_IA_START_STREAM 0x30 /* Short msg with str_id */
-
-/* Debug msgs */
-#define IPC_IA_DBG_MEM_READ 0x40
-#define IPC_IA_DBG_MEM_WRITE 0x41
-#define IPC_IA_DBG_LOOP_BACK 0x42
-
-/* L2I Firmware/Codec Download msgs */
-#define IPC_IA_FW_INIT_CMPLT 0x81
-#define IPC_IA_LPE_GETTING_STALLED 0x82
-#define IPC_IA_LPE_UNSTALLED 0x83
-
-/* L2I Codec Config/control msgs */
-#define IPC_SST_GET_PLAY_FRAMES 0x90 /* Request IA more data */
-#define IPC_SST_GET_CAPT_FRAMES 0x91 /* Request IA more data */
-#define IPC_SST_BUF_UNDER_RUN 0x92 /* PB Under run and stopped */
-#define IPC_SST_BUF_OVER_RUN 0x93 /* CAP Under run and stopped */
-#define IPC_SST_DRAIN_END 0x94 /* PB Drain complete and stopped */
-#define IPC_SST_CHNGE_SSP_PARAMS 0x95 /* PB SSP parameters changed */
-#define IPC_SST_STREAM_PROCESS_FATAL_ERR 0x96/* error in processing a stream */
-#define IPC_SST_PERIOD_ELAPSED 0x97 /* period elapsed */
-#define IPC_IA_TARGET_DEV_CHNGD 0x98 /* error in processing a stream */
-
-#define IPC_SST_ERROR_EVENT 0x99 /* Buffer over run occurred */
-/* L2S messages */
-#define IPC_SC_DDR_LINK_UP 0xC0
-#define IPC_SC_DDR_LINK_DOWN 0xC1
-#define IPC_SC_SET_LPECLK_REQ 0xC2
-#define IPC_SC_SSP_BIT_BANG 0xC3
-
-/* L2I Error reporting msgs */
-#define IPC_IA_MEM_ALLOC_FAIL 0xE0
-#define IPC_IA_PROC_ERR 0xE1 /* error in processing a
- stream can be used by playback and
- capture modules */
-
-/* L2I Debug msgs */
-#define IPC_IA_PRINT_STRING 0xF0
-
-
-
-/* Command Response or Acknowledge message to any IPC message will have
- * same message ID and stream ID information which is sent.
- * There is no specific Ack message ID. The data field is used as response
- * meaning.
- */
-enum ackData {
- IPC_ACK_SUCCESS = 0,
- IPC_ACK_FAILURE
-};
-
-
-enum sst_error_codes {
- /* Error code,response to msgId: Description */
- /* Common error codes */
- SST_SUCCESS = 0, /* Success */
- SST_ERR_INVALID_STREAM_ID = 1,
- SST_ERR_INVALID_MSG_ID = 2,
- SST_ERR_INVALID_STREAM_OP = 3,
- SST_ERR_INVALID_PARAMS = 4,
- SST_ERR_INVALID_CODEC = 5,
- SST_ERR_INVALID_MEDIA_TYPE = 6,
- SST_ERR_STREAM_ERR = 7,
-
- /* IPC specific error codes */
- SST_IPC_ERR_CALL_BACK_NOT_REGD = 8,
- SST_IPC_ERR_STREAM_NOT_ALLOCATED = 9,
- SST_IPC_ERR_STREAM_ALLOC_FAILED = 10,
- SST_IPC_ERR_GET_STREAM_FAILED = 11,
- SST_ERR_MOD_NOT_AVAIL = 12,
- SST_ERR_MOD_DNLD_RQD = 13,
- SST_ERR_STREAM_STOPPED = 14,
- SST_ERR_STREAM_IN_USE = 15,
-
- /* Capture specific error codes */
- SST_CAP_ERR_INCMPLTE_CAPTURE_MSG = 16,
- SST_CAP_ERR_CAPTURE_FAIL = 17,
- SST_CAP_ERR_GET_DDR_NEW_SGLIST = 18,
- SST_CAP_ERR_UNDER_RUN = 19,
- SST_CAP_ERR_OVERFLOW = 20,
-
- /* Playback specific error codes*/
- SST_PB_ERR_INCMPLTE_PLAY_MSG = 21,
- SST_PB_ERR_PLAY_FAIL = 22,
- SST_PB_ERR_GET_DDR_NEW_SGLIST = 23,
-
- /* Codec manager specific error codes */
- SST_LIB_ERR_LIB_DNLD_REQUIRED = 24,
- SST_LIB_ERR_LIB_NOT_SUPPORTED = 25,
-
- /* Library manager specific error codes */
- SST_SCC_ERR_PREP_DNLD_FAILED = 26,
- SST_SCC_ERR_LIB_DNLD_RES_FAILED = 27,
- /* Scheduler specific error codes */
- SST_SCH_ERR_FAIL = 28,
-
- /* DMA specific error codes */
- SST_DMA_ERR_NO_CHNL_AVAILABLE = 29,
- SST_DMA_ERR_INVALID_INPUT_PARAMS = 30,
- SST_DMA_ERR_CHNL_ALREADY_SUSPENDED = 31,
- SST_DMA_ERR_CHNL_ALREADY_STARTED = 32,
- SST_DMA_ERR_CHNL_NOT_ENABLED = 33,
- SST_DMA_ERR_TRANSFER_FAILED = 34,
-
- SST_SSP_ERR_ALREADY_ENABLED = 35,
- SST_SSP_ERR_ALREADY_DISABLED = 36,
- SST_SSP_ERR_NOT_INITIALIZED = 37,
- SST_SSP_ERR_SRAM_NO_DMA_DATA = 38,
-
- /* Other error codes */
- SST_ERR_MOD_INIT_FAIL = 39,
-
- /* FW init error codes */
- SST_RDR_ERR_IO_DEV_SEL_NOT_ALLOWED = 40,
- SST_RDR_ERR_ROUTE_ALREADY_STARTED = 41,
- SST_RDR_ERR_IO_DEV_SEL_FAILED = 42,
- SST_RDR_PREP_CODEC_DNLD_FAILED = 43,
-
- /* Memory debug error codes */
- SST_ERR_DBG_MEM_READ_FAIL = 44,
- SST_ERR_DBG_MEM_WRITE_FAIL = 45,
- SST_ERR_INSUFFICIENT_INPUT_SG_LIST = 46,
- SST_ERR_INSUFFICIENT_OUTPUT_SG_LIST = 47,
-
- SST_ERR_BUFFER_NOT_AVAILABLE = 48,
- SST_ERR_BUFFER_NOT_ALLOCATED = 49,
- SST_ERR_INVALID_REGION_TYPE = 50,
- SST_ERR_NULL_PTR = 51,
- SST_ERR_INVALID_BUFFER_SIZE = 52,
- SST_ERR_INVALID_BUFFER_INDEX = 53,
-
- /*IIPC specific error codes */
- SST_IIPC_QUEUE_FULL = 54,
- SST_IIPC_ERR_MSG_SND_FAILED = 55,
- SST_PB_ERR_UNDERRUN_OCCURED = 56,
- SST_RDR_INSUFFICIENT_MIXER_BUFFER = 57,
- SST_INVALID_TIME_SLOTS = 58,
-};
-
-enum dbg_mem_data_type {
- /* Data type of debug read/write */
- DATA_TYPE_U32,
- DATA_TYPE_U16,
- DATA_TYPE_U8,
-};
-
-/* CAUTION NOTE: All IPC message body must be multiple of 32 bits.*/
-
-/* IPC Header */
-union ipc_header {
- struct {
- u32 msg_id:8; /* Message ID - Max 256 Message Types */
- u32 str_id:5;
- u32 large:1; /* Large Message if large = 1 */
- u32 reserved:2; /* Reserved for future use */
- u32 data:14; /* Ack/Info for msg, size of msg in Mailbox */
- u32 done:1; /* bit 30 */
- u32 busy:1; /* bit 31 */
- } part;
- u32 full;
-} __attribute__ ((packed));
-
-/* Firmware build info */
-struct sst_fw_build_info {
- unsigned char date[16]; /* Firmware build date */
- unsigned char time[16]; /* Firmware build time */
-} __attribute__ ((packed));
-
-struct ipc_header_fw_init {
- struct snd_sst_fw_version fw_version;/* Firmware version details */
- struct sst_fw_build_info build_info;
- u16 result; /* Fw init result */
- u8 module_id; /* Module ID in case of error */
- u8 debug_info; /* Debug info from Module ID in case of fail */
-} __attribute__ ((packed));
-
-/* Address and size info of a frame buffer in DDR */
-struct sst_address_info {
- u32 addr; /* Address at IA */
- u32 size; /* Size of the buffer */
-} __attribute__ ((packed));
-
-/* Time stamp */
-struct snd_sst_tstamp {
- u64 samples_processed;/* capture - data in DDR */
- u64 samples_rendered;/* playback - data rendered */
- u64 bytes_processed;/* bytes decoded or encoded */
- u32 sampling_frequency;/* eg: 48000, 44100 */
- u32 dma_base_address;/* DMA base address */
- u16 dma_channel_no;/* DMA Channel used for the data transfer*/
- u16 reserved;/* 32 bit alignment */
-};
-
-/* Frame info to play or capture */
-struct sst_frame_info {
- u16 num_entries; /* number of entries to follow */
- u16 rsrvd;
- struct sst_address_info addr[MAX_NUM_SCATTER_BUFFERS];
-} __attribute__ ((packed));
-
-/* Frames info for decode */
-struct snd_sst_decode_info {
- unsigned long long input_bytes_consumed;
- unsigned long long output_bytes_produced;
- struct sst_frame_info frames_in;
- struct sst_frame_info frames_out;
-} __attribute__ ((packed));
-
-/* SST to IA print debug message*/
-struct ipc_sst_ia_print_params {
- u32 string_size;/* Max value is 160 */
- u8 prt_string[160];/* Null terminated Char string */
-} __attribute__ ((packed));
-
-/* Voice data message */
-struct snd_sst_voice_data {
- u16 num_bytes;/* Number of valid voice data bytes */
- u8 pcm_wd_size;/* 0=8 bit, 1=16 bit 2=32 bit */
- u8 reserved;/* Reserved */
- u8 voice_data_buf[0];/* Voice data buffer in bytes, little endian */
-} __attribute__ ((packed));
-
-/* SST to IA memory read debug message */
-struct ipc_sst_ia_dbg_mem_rw {
- u16 num_bytes;/* Maximum of MAX_DBG_RW_BYTES */
- u16 data_type;/* enum: dbg_mem_data_type */
- u32 address; /* Memory address of data memory of data_type */
- u8 rw_bytes[MAX_DBG_RW_BYTES];/* Maximum of 64 bytes can be RW */
-} __attribute__ ((packed));
-
-struct ipc_sst_ia_dbg_loop_back {
- u16 num_dwords; /* Maximum of MAX_DBG_RW_BYTES */
- u16 increment_val;/* Increments dwords by this value, 0- no increment */
- u32 lpbk_dwords[MAX_LOOP_BACK_DWORDS];/* Maximum of 8 dwords loopback */
-} __attribute__ ((packed));
-
-/* Stream type params struture for Alloc stream */
-struct snd_sst_str_type {
- u8 codec_type; /* Codec type */
- u8 str_type; /* 1 = voice 2 = music */
- u8 operation; /* Playback or Capture */
- u8 protected_str; /* 0=Non DRM, 1=DRM */
- u8 time_slots;
- u8 reserved; /* Reserved */
- u16 result; /* Result used for acknowledgment */
-} __attribute__ ((packed));
-
-/* Library info structure */
-struct module_info {
- u32 lib_version;
- u32 lib_type;/*TBD- KLOCKWORK u8 lib_type;*/
- u32 media_type;
- u8 lib_name[12];
- u32 lib_caps;
- unsigned char b_date[16]; /* Lib build date */
- unsigned char b_time[16]; /* Lib build time */
-} __attribute__ ((packed));
-
-/* Library slot info */
-struct lib_slot_info {
- u8 slot_num; /* 1 or 2 */
- u8 reserved1;
- u16 reserved2;
- u32 iram_size; /* slot size in IRAM */
- u32 dram_size; /* slot size in DRAM */
- u32 iram_offset; /* starting offset of slot in IRAM */
- u32 dram_offset; /* starting offset of slot in DRAM */
-} __attribute__ ((packed));
-
-struct snd_sst_lib_download {
- struct module_info lib_info; /* library info type, capabilities etc */
- struct lib_slot_info slot_info; /* slot info to be downloaded */
- u32 mod_entry_pt;
-};
-
-struct snd_sst_lib_download_info {
- struct snd_sst_lib_download dload_lib;
- u16 result; /* Result used for acknowledgment */
- u8 pvt_id; /* Private ID */
- u8 reserved; /* for alignment */
-};
-
-/* Alloc stream params structure */
-struct snd_sst_alloc_params {
- struct snd_sst_str_type str_type;
- struct snd_sst_stream_params stream_params;
-};
-
-struct snd_sst_fw_get_stream_params {
- struct snd_sst_stream_params codec_params;
- struct snd_sst_pmic_config pcm_params;
-};
-
-/* Alloc stream response message */
-struct snd_sst_alloc_response {
- struct snd_sst_str_type str_type; /* Stream type for allocation */
- struct snd_sst_lib_download lib_dnld; /* Valid only for codec dnld */
-};
-
-/* Drop response */
-struct snd_sst_drop_response {
- u32 result;
- u32 bytes;
-};
-
-/* CSV Voice call routing structure */
-struct snd_sst_control_routing {
- u8 control; /* 0=start, 1=Stop */
- u8 reserved[3]; /* Reserved- for 32 bit alignment */
-};
-
-
-struct ipc_post {
- struct list_head node;
- union ipc_header header; /* driver specific */
- char *mailbox_data;
-};
-
-struct snd_sst_ctxt_params {
- u32 address; /* Physical Address in DDR where the context is stored */
- u32 size; /* size of the context */
-};
-#endif /* __INTEL_SST_FW_IPC_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ioctl.h b/drivers/staging/intel_sst/intel_sst_ioctl.h
deleted file mode 100644
index 5da5ee0..0000000
--- a/drivers/staging/intel_sst/intel_sst_ioctl.h
+++ /dev/null
@@ -1,440 +0,0 @@
-#ifndef __INTEL_SST_IOCTL_H__
-#define __INTEL_SST_IOCTL_H__
-/*
- * intel_sst_ioctl.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all sst ioctls
- */
-
-/* codec and post/pre processing related info */
-
-#include <linux/types.h>
-
-enum sst_codec_types {
-/* AUDIO/MUSIC CODEC Type Definitions */
- SST_CODEC_TYPE_UNKNOWN = 0,
- SST_CODEC_TYPE_PCM, /* Pass through Audio codec */
- SST_CODEC_TYPE_MP3,
- SST_CODEC_TYPE_MP24,
- SST_CODEC_TYPE_AAC,
- SST_CODEC_TYPE_AACP,
- SST_CODEC_TYPE_eAACP,
- SST_CODEC_TYPE_WMA9,
- SST_CODEC_TYPE_WMA10,
- SST_CODEC_TYPE_WMA10P,
- SST_CODEC_TYPE_RA,
- SST_CODEC_TYPE_DDAC3,
- SST_CODEC_TYPE_STEREO_TRUE_HD,
- SST_CODEC_TYPE_STEREO_HD_PLUS,
-
- /* VOICE CODEC Type Definitions */
- SST_CODEC_TYPE_VOICE_PCM = 0x21, /* Pass through voice codec */
-};
-
-enum sst_algo_types {
- SST_CODEC_SRC = 0x64,
- SST_CODEC_MIXER = 0x65,
- SST_CODEC_DOWN_MIXER = 0x66,
- SST_CODEC_VOLUME_CONTROL = 0x67,
- SST_CODEC_OEM1 = 0xC8,
- SST_CODEC_OEM2 = 0xC9,
-};
-
-enum snd_sst_stream_ops {
- STREAM_OPS_PLAYBACK = 0, /* Decode */
- STREAM_OPS_CAPTURE, /* Encode */
- STREAM_OPS_PLAYBACK_DRM, /* Play Audio/Voice */
- STREAM_OPS_PLAYBACK_ALERT, /* Play Audio/Voice */
- STREAM_OPS_CAPTURE_VOICE_CALL, /* CSV Voice recording */
-};
-
-enum stream_mode {
- SST_STREAM_MODE_NONE = 0,
- SST_STREAM_MODE_DNR = 1,
- SST_STREAM_MODE_FNF = 2,
- SST_STREAM_MODE_CAPTURE = 3
-};
-
-enum stream_type {
- SST_STREAM_TYPE_NONE = 0,
- SST_STREAM_TYPE_MUSIC = 1,
- SST_STREAM_TYPE_NORMAL = 2,
- SST_STREAM_TYPE_LONG_PB = 3,
- SST_STREAM_TYPE_LOW_LATENCY = 4,
-};
-
-enum snd_sst_audio_device_type {
- SND_SST_DEVICE_HEADSET = 1,
- SND_SST_DEVICE_IHF,
- SND_SST_DEVICE_VIBRA,
- SND_SST_DEVICE_HAPTIC,
- SND_SST_DEVICE_CAPTURE,
-};
-
-/* Firmware Version info */
-struct snd_sst_fw_version {
- __u8 build; /* build number*/
- __u8 minor; /* minor number*/
- __u8 major; /* major number*/
- __u8 type; /* build type */
-};
-
-/* Port info structure */
-struct snd_sst_port_info {
- __u16 port_type;
- __u16 reserved;
-};
-
-/* Mixer info structure */
-struct snd_sst_mix_info {
- __u16 max_streams;
- __u16 reserved;
-};
-
-/* PCM Parameters */
-struct snd_pcm_params {
- __u16 codec; /* codec type */
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 reserved; /* Bitrate in bits per second */
- __u32 sfreq; /* Sampling rate in Hz */
- __u32 ring_buffer_size;
- __u32 period_count; /* period elapsed in samples*/
- __u32 ring_buffer_addr;
-};
-
-/* MP3 Music Parameters Message */
-struct snd_mp3_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate; /* Use the hard coded value. */
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u8 crc_check; /* crc_check - disable (0) or enable (1) */
- __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB*/
- __u16 reserved; /* Unused */
-};
-
-#define AAC_BIT_STREAM_ADTS 0
-#define AAC_BIT_STREAM_ADIF 1
-#define AAC_BIT_STREAM_RAW 2
-
-/* AAC Music Parameters Message */
-struct snd_aac_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo*/
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate;
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u32 aac_srate; /* Plain AAC decoder operating sample rate */
- __u8 mpg_id; /* 0=MPEG-2, 1=MPEG-4 */
- __u8 bs_format; /* input bit stream format adts=0, adif=1, raw=2 */
- __u8 aac_profile; /* 0=Main Profile, 1=LC profile, 3=SSR profile */
- __u8 ext_chl; /* No.of external channels */
- __u8 aot; /* Audio object type. 1=Main , 2=LC , 3=SSR, 4=SBR*/
- __u8 op_align; /* output alignment 0=16 bit , 1=MSB, 2= LSB align */
- __u8 brate_type; /* 0=CBR, 1=VBR */
- __u8 crc_check; /* crc check 0= disable, 1=enable */
- __s8 bit_stream_format[8]; /* input bit stream format adts/adif/raw */
- __u8 jstereo; /* Joint stereo Flag */
- __u8 sbr_present; /* 1 = SBR Present, 0 = SBR absent, for RAW */
- __u8 downsample; /* 1 = Downsampling ON, 0 = Downsampling OFF */
- __u8 num_syntc_elems; /* 1- Mono/stereo, 0 - Dual Mono, 0 - for raw */
- __s8 syntc_id[2]; /* 0 for ID_SCE(Dula Mono), -1 for raw */
- __s8 syntc_tag[2]; /* raw - -1 and 0 -16 for rest of the streams */
- __u8 pce_present; /* Flag. 1- present 0 - not present, for RAW */
- __u8 sbr_type; /* sbr_type: 0-plain aac, 1-aac-v1, 2-aac-v2 */
- __u8 outchmode; /*0- mono, 1-stereo, 2-dual mono 3-Parametric stereo */
- __u8 ps_present;
-};
-
-/* WMA Music Parameters Message */
-struct snd_wma_params {
- __u16 codec;
- __u8 num_chan; /* 1=Mono, 2=Stereo */
- __u8 pcm_wd_sz; /* 16/24 - bit*/
- __u32 brate; /* Use the hard coded value. */
- __u32 sfreq; /* Sampling freq eg. 8000, 441000, 48000 */
- __u32 channel_mask; /* Channel Mask */
- __u16 format_tag; /* Format Tag */
- __u16 block_align; /* packet size */
- __u16 wma_encode_opt;/* Encoder option */
- __u8 op_align; /* op align 0- 16 bit, 1- MSB, 2 LSB */
- __u8 pcm_src; /* input pcm bit width */
-};
-
-/* Pre processing param structure */
-struct snd_prp_params {
- __u32 reserved; /* No pre-processing defined yet */
-};
-
-/* Pre and post processing params structure */
-struct snd_ppp_params {
- __u8 algo_id;/* Post/Pre processing algorithm ID */
- __u8 str_id; /*Only 5 bits used 0 - 31 are valid*/
- __u8 enable; /* 0= disable, 1= enable*/
- __u8 reserved;
- __u32 size; /*Size of parameters for all blocks*/
- void *params;
-} __attribute__ ((packed));
-
-struct snd_sst_postproc_info {
- __u32 src_min; /* Supported SRC Min sampling freq */
- __u32 src_max; /* Supported SRC Max sampling freq */
- __u8 src; /* 0=Not supported, 1=Supported */
- __u8 bass_boost; /* 0=Not Supported, 1=Supported */
- __u8 stereo_widening; /* 0=Not Supported, 1=Supported */
- __u8 volume_control; /* 0=Not Supported, 1=Supported */
- __s16 min_vol; /* Minimum value of Volume in dB */
- __s16 max_vol; /* Maximum value of Volume in dB */
- __u8 mute_control; /* 0=No Mute, 1=Mute */
- __u8 reserved1;
- __u16 reserved2;
-};
-
-/* pre processing Capability info structure */
-struct snd_sst_prp_info {
- __s16 min_vol; /* Minimum value of Volume in dB */
- __s16 max_vol; /* Maximum value of Volume in dB */
- __u8 volume_control; /* 0=Not Supported, 1=Supported */
- __u8 reserved1; /* for 32 bit alignment */
- __u16 reserved2; /* for 32 bit alignment */
-} __attribute__ ((packed));
-
-/*Pre / Post processing algorithms support*/
-struct snd_sst_ppp_info {
- __u32 src:1; /* 0=Not supported, 1=Supported */
- __u32 mixer:1; /* 0=Not supported, 1=Supported */
- __u32 volume_control:1; /* 0=Not Supported, 1=Supported */
- __u32 mute_control:1; /* 0=Not Supported, 1=Supported */
- __u32 anc:1; /* 0=Not Supported, 1=Supported */
- __u32 side_tone:1; /* 0=Not Supported, 1=Supported */
- __u32 dc_removal:1; /* 0=Not Supported, 1=Supported */
- __u32 equalizer:1; /* 0=Not Supported, 1=Supported */
- __u32 spkr_prot:1; /* 0=Not Supported, 1=Supported */
- __u32 bass_boost:1; /* 0=Not Supported, 1=Supported */
- __u32 stereo_widening:1;/* 0=Not Supported, 1=Supported */
- __u32 rsvd1:21;
- __u32 rsvd2;
-};
-
-/* Firmware capabilities info */
-struct snd_sst_fw_info {
- struct snd_sst_fw_version fw_version; /* Firmware version */
- __u8 audio_codecs_supported[8]; /* Codecs supported by FW */
- __u32 recommend_min_duration; /* Min duration for Lowpower Playback */
- __u8 max_pcm_streams_supported; /* Max num of PCM streams supported */
- __u8 max_enc_streams_supported; /* Max number of Encoded streams */
- __u16 reserved; /* 32 bit alignment*/
- struct snd_sst_ppp_info ppp_info; /* pre_processing mod cap info */
- struct snd_sst_postproc_info pop_info; /* Post processing cap info*/
- struct snd_sst_port_info port_info[3]; /* Port info */
- struct snd_sst_mix_info mix_info;/* Mixer info */
- __u32 min_input_buf; /* minmum i/p buffer for decode */
-};
-
-/* Codec params struture */
-union snd_sst_codec_params {
- struct snd_pcm_params pcm_params;
- struct snd_mp3_params mp3_params;
- struct snd_aac_params aac_params;
- struct snd_wma_params wma_params;
-};
-
-
-struct snd_sst_stream_params {
- union snd_sst_codec_params uc;
-} __attribute__ ((packed));
-
-struct snd_sst_params {
- __u32 result;
- __u32 stream_id;
- __u8 codec;
- __u8 ops;
- __u8 stream_type;
- __u8 device_type;
- struct snd_sst_stream_params sparams;
-};
-
-struct snd_sst_vol {
- __u32 stream_id;
- __s32 volume;
- __u32 ramp_duration;
- __u32 ramp_type; /* Ramp type, default=0 */
-};
-
-struct snd_sst_mute {
- __u32 stream_id;
- __u32 mute;
-};
-
-/* ioctl related stuff here */
-struct snd_sst_pmic_config {
- __u32 sfreq; /* Sampling rate in Hz */
- __u16 num_chan; /* Mono =1 or Stereo =2 */
- __u16 pcm_wd_sz; /* Number of bits per sample */
-} __attribute__ ((packed));
-
-struct snd_sst_get_stream_params {
- struct snd_sst_params codec_params;
- struct snd_sst_pmic_config pcm_params;
-};
-
-enum snd_sst_target_type {
- SND_SST_TARGET_PMIC = 1,
- SND_SST_TARGET_LPE,
- SND_SST_TARGET_MODEM,
- SND_SST_TARGET_BT,
- SND_SST_TARGET_FM,
- SND_SST_TARGET_NONE,
-};
-
-enum snd_sst_device_type {
- SND_SST_DEVICE_SSP = 1,
- SND_SST_DEVICE_PCM,
- SND_SST_DEVICE_OTHER,
-};
-
-enum snd_sst_device_mode {
-
- SND_SST_DEV_MODE_PCM_MODE1 = 1, /*(16-bit word, bit-length frame sync)*/
- SND_SST_DEV_MODE_PCM_MODE2,
- SND_SST_DEV_MODE_PCM_MODE3,
- SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED,
- SND_SST_DEV_MODE_PCM_MODE4_LEFT_JUSTIFIED,
- SND_SST_DEV_MODE_PCM_MODE4_I2S, /*(I2S mode, 16-bit words)*/
- SND_SST_DEV_MODE_PCM_MODE5,
- SND_SST_DEV_MODE_PCM_MODE6,
-};
-
-enum snd_sst_port_action {
- SND_SST_PORT_PREPARE = 1,
- SND_SST_PORT_ACTIVATE,
-};
-
-/* Target selection per device structure */
-struct snd_sst_slot_info {
- __u8 mix_enable; /* Mixer enable or disable */
- __u8 device_type;
- __u8 device_instance; /* 0, 1, 2 */
- __u8 target_device;
- __u16 target_sink;
- __u8 slot[2];
- __u8 master;
- __u8 action;
- __u8 device_mode;
- __u8 reserved;
- struct snd_sst_pmic_config pcm_params;
-} __attribute__ ((packed));
-
-#define SST_MAX_TARGET_DEVICES 3
-/* Target device list structure */
-struct snd_sst_target_device {
- __u32 device_route;
- struct snd_sst_slot_info devices[SST_MAX_TARGET_DEVICES];
-} __attribute__ ((packed));
-
-struct snd_sst_driver_info {
- __u32 version; /* Version of the driver */
- __u32 active_pcm_streams;
- __u32 active_enc_streams;
- __u32 max_pcm_streams;
- __u32 max_enc_streams;
- __u32 buf_per_stream;
-};
-
-enum snd_sst_buff_type {
- SST_BUF_USER = 1,
- SST_BUF_MMAP,
- SST_BUF_RAR,
-};
-
-struct snd_sst_mmap_buff_entry {
- unsigned int offset;
- unsigned int size;
-};
-
-struct snd_sst_mmap_buffs {
- unsigned int entries;
- enum snd_sst_buff_type type;
- struct snd_sst_mmap_buff_entry *buff;
-};
-
-struct snd_sst_buff_entry {
- void *buffer;
- unsigned int size;
-};
-
-struct snd_sst_buffs {
- unsigned int entries;
- __u8 type;
- struct snd_sst_buff_entry *buff_entry;
-};
-
-struct snd_sst_dbufs {
- unsigned long long input_bytes_consumed;
- unsigned long long output_bytes_produced;
- struct snd_sst_buffs *ibufs;
- struct snd_sst_buffs *obufs;
-};
-
-struct snd_sst_tuning_params {
- __u8 type;
- __u8 str_id;
- __u8 size;
- __u8 rsvd;
- __aligned_u64 addr;
-} __attribute__ ((packed));
-/*IOCTL defined here */
-/*SST MMF IOCTLS only */
-#define SNDRV_SST_STREAM_SET_PARAMS _IOR('L', 0x00, \
- struct snd_sst_stream_params *)
-#define SNDRV_SST_STREAM_GET_PARAMS _IOWR('L', 0x01, \
- struct snd_sst_get_stream_params *)
-#define SNDRV_SST_STREAM_GET_TSTAMP _IOWR('L', 0x02, __u64 *)
-#define SNDRV_SST_STREAM_DECODE _IOWR('L', 0x03, struct snd_sst_dbufs *)
-#define SNDRV_SST_STREAM_BYTES_DECODED _IOWR('L', 0x04, __u64 *)
-#define SNDRV_SST_STREAM_START _IO('A', 0x42)
-#define SNDRV_SST_STREAM_DROP _IO('A', 0x43)
-#define SNDRV_SST_STREAM_DRAIN _IO('A', 0x44)
-#define SNDRV_SST_STREAM_PAUSE _IOW('A', 0x45, int)
-#define SNDRV_SST_STREAM_RESUME _IO('A', 0x47)
-#define SNDRV_SST_MMAP_PLAY _IOW('L', 0x05, struct snd_sst_mmap_buffs *)
-#define SNDRV_SST_MMAP_CAPTURE _IOW('L', 0x06, struct snd_sst_mmap_buffs *)
-/*SST common ioctls */
-#define SNDRV_SST_DRIVER_INFO _IOR('L', 0x10, struct snd_sst_driver_info *)
-#define SNDRV_SST_SET_VOL _IOW('L', 0x11, struct snd_sst_vol *)
-#define SNDRV_SST_GET_VOL _IOW('L', 0x12, struct snd_sst_vol *)
-#define SNDRV_SST_MUTE _IOW('L', 0x13, struct snd_sst_mute *)
-/*AM Ioctly only */
-#define SNDRV_SST_FW_INFO _IOR('L', 0x20, struct snd_sst_fw_info *)
-#define SNDRV_SST_SET_TARGET_DEVICE _IOW('L', 0x21, \
- struct snd_sst_target_device *)
-/*DSP Ioctls on /dev/intel_sst_ctrl only*/
-#define SNDRV_SST_SET_ALGO _IOW('L', 0x30, struct snd_ppp_params *)
-#define SNDRV_SST_GET_ALGO _IOWR('L', 0x31, struct snd_ppp_params *)
-#define SNDRV_SST_TUNING_PARAMS _IOW('L', 0x32, struct snd_sst_tuning_params *)
-
-#endif /* __INTEL_SST_IOCTL_H__ */
diff --git a/drivers/staging/intel_sst/intel_sst_ipc.c b/drivers/staging/intel_sst/intel_sst_ipc.c
deleted file mode 100644
index 5c3444f..0000000
--- a/drivers/staging/intel_sst/intel_sst_ipc.c
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * intel_sst_ipc.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all ipc functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_send_sound_card_type - send sound card type
- *
- * this function sends the sound card type to sst dsp engine
- */
-static void sst_send_sound_card_type(void)
-{
- struct ipc_post *msg = NULL;
-
- if (sst_create_short_msg(&msg))
- return;
-
- sst_fill_header(&msg->header, IPC_IA_SET_PMIC_TYPE, 0, 0);
- msg->header.part.data = sst_drv_ctx->pmic_vendor;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return;
-}
-
-/**
-* sst_post_message - Posts message to SST
-*
-* @work: Pointer to work structure
-*
-* This function is called by any component in driver which
-* wants to send an IPC message. This will post message only if
-* busy bit is free
-*/
-void sst_post_message(struct work_struct *work)
-{
- struct ipc_post *msg;
- union ipc_header header;
- union interrupt_reg imr;
- int retval = 0;
- imr.full = 0;
-
- /*To check if LPE is in stalled state.*/
- retval = sst_stalled();
- if (retval < 0) {
- pr_err("in stalled state\n");
- return;
- }
- pr_debug("post message called\n");
- spin_lock(&sst_drv_ctx->list_spin_lock);
-
- /* check list */
- if (list_empty(&sst_drv_ctx->ipc_dispatch_list)) {
- /* list is empty, mask imr */
- pr_debug("Empty msg queue... masking\n");
- imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
- imr.part.done_interrupt = 1;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- return;
- }
-
- /* check busy bit */
- header.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCX);
- if (header.part.busy) {
- /* busy, unmask */
- pr_debug("Busy not free... unmasking\n");
- imr.full = readl(sst_drv_ctx->shim + SST_IMRX);
- imr.part.done_interrupt = 0;
- /* dummy register for shim workaround */
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- return;
- }
- /* copy msg from list */
- msg = list_entry(sst_drv_ctx->ipc_dispatch_list.next,
- struct ipc_post, node);
- list_del(&msg->node);
- pr_debug("Post message: header = %x\n", msg->header.full);
- pr_debug("size: = %x\n", msg->header.part.data);
- if (msg->header.part.large)
- memcpy_toio(sst_drv_ctx->mailbox + SST_MAILBOX_SEND,
- msg->mailbox_data, msg->header.part.data);
- /* dummy register for shim workaround */
-
- sst_shim_write(sst_drv_ctx->shim, SST_IPCX, msg->header.full);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
-
- kfree(msg->mailbox_data);
- kfree(msg);
- return;
-}
-
-/*
- * sst_clear_interrupt - clear the SST FW interrupt
- *
- * This function clears the interrupt register after the interrupt
- * bottom half is complete allowing next interrupt to arrive
- */
-void sst_clear_interrupt(void)
-{
- union interrupt_reg isr;
- union interrupt_reg imr;
- union ipc_header clear_ipc;
-
- imr.full = sst_shim_read(sst_drv_ctx->shim, SST_IMRX);
- isr.full = sst_shim_read(sst_drv_ctx->shim, SST_ISRX);
- /* write 1 to clear */;
- isr.part.busy_interrupt = 1;
- sst_shim_write(sst_drv_ctx->shim, SST_ISRX, isr.full);
- /* Set IA done bit */
- clear_ipc.full = sst_shim_read(sst_drv_ctx->shim, SST_IPCD);
- clear_ipc.part.busy = 0;
- clear_ipc.part.done = 1;
- clear_ipc.part.data = IPC_ACK_SUCCESS;
- sst_shim_write(sst_drv_ctx->shim, SST_IPCD, clear_ipc.full);
- /* un mask busy interrupt */
- imr.part.busy_interrupt = 0;
- sst_shim_write(sst_drv_ctx->shim, SST_IMRX, imr.full);
-}
-
-void sst_restore_fw_context(void)
-{
- struct snd_sst_ctxt_params fw_context;
- struct ipc_post *msg = NULL;
-
- pr_debug("restore_fw_context\n");
- /*check cpu type*/
- if (sst_drv_ctx->pci_id != SST_MFLD_PCI_ID)
- return;
- /*not supported for rest*/
- if (!sst_drv_ctx->fw_cntx_size)
- return;
- /*nothing to restore*/
- pr_debug("restoring context......\n");
- /*send msg to fw*/
- if (sst_create_large_msg(&msg))
- return;
-
- sst_fill_header(&msg->header, IPC_IA_SET_FW_CTXT, 1, 0);
- msg->header.part.data = sizeof(fw_context) + sizeof(u32);
- fw_context.address = virt_to_phys((void *)sst_drv_ctx->fw_cntx);
- fw_context.size = sst_drv_ctx->fw_cntx_size;
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32),
- &fw_context, sizeof(fw_context));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return;
-}
-/*
- * process_fw_init - process the FW init msg
- *
- * @msg: IPC message from FW
- *
- * This function processes the FW init msg from FW
- * marks FW state and prints debug info of loaded FW
- */
-int process_fw_init(struct sst_ipc_msg_wq *msg)
-{
- struct ipc_header_fw_init *init =
- (struct ipc_header_fw_init *)msg->mailbox;
- int retval = 0;
-
- pr_debug("*** FW Init msg came***\n");
- if (init->result) {
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_ERROR;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("FW Init failed, Error %x\n", init->result);
- pr_err("FW Init failed, Error %x\n", init->result);
- retval = -init->result;
- return retval;
- }
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID)
- sst_send_sound_card_type();
- mutex_lock(&sst_drv_ctx->sst_lock);
- sst_drv_ctx->sst_state = SST_FW_RUNNING;
- sst_drv_ctx->lpe_stalled = 0;
- mutex_unlock(&sst_drv_ctx->sst_lock);
- pr_debug("FW Version %02x.%02x.%02x\n", init->fw_version.major,
- init->fw_version.minor, init->fw_version.build);
- pr_debug("Build Type %x\n", init->fw_version.type);
- pr_debug(" Build date %s Time %s\n",
- init->build_info.date, init->build_info.time);
- sst_wake_up_alloc_block(sst_drv_ctx, FW_DWNL_ID, retval, NULL);
- sst_restore_fw_context();
- return retval;
-}
-/**
-* sst_process_message - Processes message from SST
-*
-* @work: Pointer to work structure
-*
-* This function is scheduled by ISR
-* It take a msg from process_queue and does action based on msg
-*/
-void sst_process_message(struct work_struct *work)
-{
- struct sst_ipc_msg_wq *msg =
- container_of(work, struct sst_ipc_msg_wq, wq);
- int str_id = msg->header.part.str_id;
-
- pr_debug("IPC process for %x\n", msg->header.full);
-
- /* based on msg in list call respective handler */
- switch (msg->header.part.msg_id) {
- case IPC_SST_BUF_UNDER_RUN:
- case IPC_SST_BUF_OVER_RUN:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_err("Buffer under/overrun for %d\n",
- msg->header.part.str_id);
- pr_err("Got Underrun & not to send data...ignore\n");
- break;
-
- case IPC_SST_GET_PLAY_FRAMES:
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- struct stream_info *stream ;
-
- if (sst_validate_strid(str_id)) {
- pr_err("strid %d invalid\n", str_id);
- break;
- }
- /* call sst_play_frame */
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst_play_frames for %d\n",
- msg->header.part.str_id);
- mutex_lock(&sst_drv_ctx->streams[str_id].lock);
- sst_play_frame(msg->header.part.str_id);
- mutex_unlock(&sst_drv_ctx->streams[str_id].lock);
- break;
- } else
- pr_err("sst_play_frames for Penwell!!\n");
-
- case IPC_SST_GET_CAPT_FRAMES:
- if (sst_drv_ctx->pci_id == SST_MRST_PCI_ID) {
- struct stream_info *stream;
- /* call sst_capture_frame */
- if (sst_validate_strid(str_id)) {
- pr_err("str id %d invalid\n", str_id);
- break;
- }
- stream = &sst_drv_ctx->streams[str_id];
- pr_debug("sst_capture_frames for %d\n",
- msg->header.part.str_id);
- mutex_lock(&stream->lock);
- if (stream->mmapped == false &&
- stream->src == SST_DRV) {
- pr_debug("waking up block for copy.\n");
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
- } else
- sst_capture_frame(msg->header.part.str_id);
- mutex_unlock(&stream->lock);
- } else
- pr_err("sst_play_frames for Penwell!!\n");
- break;
-
- case IPC_IA_PRINT_STRING:
- pr_debug("been asked to print something by fw\n");
- /* TBD */
- break;
-
- case IPC_IA_FW_INIT_CMPLT: {
- /* send next data to FW */
- process_fw_init(msg);
- break;
- }
-
- case IPC_SST_STREAM_PROCESS_FATAL_ERR:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_err("codec fatal error %x stream %d...\n",
- msg->header.full, msg->header.part.str_id);
- pr_err("Dropping the stream\n");
- sst_drop_stream(msg->header.part.str_id);
- break;
- case IPC_IA_LPE_GETTING_STALLED:
- sst_drv_ctx->lpe_stalled = 1;
- break;
- case IPC_IA_LPE_UNSTALLED:
- sst_drv_ctx->lpe_stalled = 0;
- break;
- default:
- /* Illegal case */
- pr_err("Unhandled msg %x header %x\n",
- msg->header.part.msg_id, msg->header.full);
- }
- sst_clear_interrupt();
- return;
-}
-
-/**
-* sst_process_reply - Processes reply message from SST
-*
-* @work: Pointer to work structure
-*
-* This function is scheduled by ISR
-* It take a reply msg from response_queue and
-* does action based on msg
-*/
-void sst_process_reply(struct work_struct *work)
-{
- struct sst_ipc_msg_wq *msg =
- container_of(work, struct sst_ipc_msg_wq, wq);
-
- int str_id = msg->header.part.str_id;
- struct stream_info *str_info;
-
- switch (msg->header.part.msg_id) {
- case IPC_IA_TARGET_DEV_SELECT:
- if (!msg->header.part.data) {
- sst_drv_ctx->tgt_dev_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->tgt_dev_blk.ret_code =
- -msg->header.part.data;
- }
-
- if (sst_drv_ctx->tgt_dev_blk.on == true) {
- sst_drv_ctx->tgt_dev_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ALG_PARAMS: {
- pr_debug("sst:IPC_ALG_PARAMS response %x\n", msg->header.full);
- pr_debug("sst: data value %x\n", msg->header.part.data);
- pr_debug("sst: large value %x\n", msg->header.part.large);
-
- if (!msg->header.part.large) {
- if (!msg->header.part.data) {
- pr_debug("sst: alg set success\n");
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- } else {
- pr_debug("sst: alg set failed\n");
- sst_drv_ctx->ppp_params_blk.ret_code =
- -msg->header.part.data;
- }
-
- } else if (msg->header.part.data) {
- struct snd_ppp_params *mailbox_params, *get_params;
- char *params;
-
- pr_debug("sst: alg get success\n");
- mailbox_params = (struct snd_ppp_params *)msg->mailbox;
- get_params = kzalloc(sizeof(*get_params), GFP_KERNEL);
- if (get_params == NULL) {
- pr_err("sst: out of memory for ALG PARAMS");
- break;
- }
- memcpy_fromio(get_params, mailbox_params,
- sizeof(*get_params));
- get_params->params = kzalloc(mailbox_params->size,
- GFP_KERNEL);
- if (get_params->params == NULL) {
- kfree(get_params);
- pr_err("sst: out of memory for ALG PARAMS block");
- break;
- }
- params = msg->mailbox;
- params = params + sizeof(*mailbox_params) - sizeof(u32);
- memcpy_fromio(get_params->params, params,
- get_params->size);
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- sst_drv_ctx->ppp_params_blk.data = get_params;
- }
-
- if (sst_drv_ctx->ppp_params_blk.on == true) {
- sst_drv_ctx->ppp_params_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- }
-
- case IPC_IA_TUNING_PARAMS: {
- pr_debug("sst:IPC_TUNING_PARAMS resp: %x\n", msg->header.full);
- pr_debug("data value %x\n", msg->header.part.data);
- if (msg->header.part.large) {
- pr_debug("alg set failed\n");
- sst_drv_ctx->ppp_params_blk.ret_code =
- -msg->header.part.data;
- } else {
- pr_debug("alg set success\n");
- sst_drv_ctx->ppp_params_blk.ret_code = 0;
- }
- if (sst_drv_ctx->ppp_params_blk.on == true) {
- sst_drv_ctx->ppp_params_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- }
-
- case IPC_IA_GET_FW_INFO: {
- struct snd_sst_fw_info *fw_info =
- (struct snd_sst_fw_info *)msg->mailbox;
- if (msg->header.part.large) {
- int major = fw_info->fw_version.major;
- int minor = fw_info->fw_version.minor;
- int build = fw_info->fw_version.build;
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- pr_debug("INFO: ***FW*** = %02d.%02d.%02d\n",
- major, minor, build);
- memcpy_fromio(sst_drv_ctx->fw_info_blk.data,
- ((struct snd_sst_fw_info *)(msg->mailbox)),
- sizeof(struct snd_sst_fw_info));
- sst_drv_ctx->fw_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->fw_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->fw_info_blk.on == true) {
- pr_debug("Memcopy succeeded\n");
- sst_drv_ctx->fw_info_blk.on = false;
- sst_drv_ctx->fw_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- }
- case IPC_IA_SET_STREAM_MUTE:
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- sst_drv_ctx->mute_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->mute_info_blk.ret_code =
- -msg->header.part.data;
-
- }
- if (sst_drv_ctx->mute_info_blk.on == true) {
- sst_drv_ctx->mute_info_blk.on = false;
- sst_drv_ctx->mute_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_SET_STREAM_VOL:
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- sst_drv_ctx->vol_info_blk.ret_code =
- -msg->header.part.data;
-
- }
-
- if (sst_drv_ctx->vol_info_blk.on == true) {
- sst_drv_ctx->vol_info_blk.on = false;
- sst_drv_ctx->vol_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_GET_STREAM_VOL:
- if (msg->header.part.large) {
- pr_debug("Large Msg Received Successfully\n");
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- memcpy_fromio(sst_drv_ctx->vol_info_blk.data,
- (void *) msg->mailbox,
- sizeof(struct snd_sst_vol));
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- sst_drv_ctx->vol_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->vol_info_blk.on == true) {
- sst_drv_ctx->vol_info_blk.on = false;
- sst_drv_ctx->vol_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_GET_STREAM_PARAMS:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- pr_debug("Get stream large success\n");
- memcpy_fromio(str_info->ctrl_blk.data,
- ((void *)(msg->mailbox)),
- sizeof(struct snd_sst_fw_get_stream_params));
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_DECODE_FRAMES:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- memcpy_fromio(str_info->data_blk.data,
- ((void *)(msg->mailbox)),
- sizeof(struct snd_sst_decode_info));
- str_info->data_blk.ret_code = 0;
- } else {
- pr_err("Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->data_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->data_blk.on == true) {
- str_info->data_blk.on = false;
- str_info->data_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_DRAIN_STREAM:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- str_info->ctrl_blk.ret_code = 0;
-
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
-
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->data_blk.on == true) {
- str_info->data_blk.on = false;
- str_info->data_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_DROP_STREAM:
- if (sst_validate_strid(str_id)) {
- pr_err("str id %d invalid\n", str_id);
- break;
- }
- str_info = &sst_drv_ctx->streams[str_id];
- if (msg->header.part.large) {
- struct snd_sst_drop_response *drop_resp =
- (struct snd_sst_drop_response *)msg->mailbox;
-
- pr_debug("Drop ret bytes %x\n", drop_resp->bytes);
-
- str_info->curr_bytes = drop_resp->bytes;
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id, msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ENABLE_RX_TIME_SLOT:
- if (!msg->header.part.data) {
- pr_debug("RX_TIME_SLOT success\n");
- sst_drv_ctx->hs_info_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- sst_drv_ctx->hs_info_blk.ret_code =
- -msg->header.part.data;
- }
- if (sst_drv_ctx->hs_info_blk.on == true) {
- sst_drv_ctx->hs_info_blk.on = false;
- sst_drv_ctx->hs_info_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_PAUSE_STREAM:
- case IPC_IA_RESUME_STREAM:
- case IPC_IA_SET_STREAM_PARAMS:
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Msg succeeded %x\n",
- msg->header.part.msg_id);
- str_info->ctrl_blk.ret_code = 0;
- } else {
- pr_err(" Msg %x reply error %x\n",
- msg->header.part.msg_id,
- msg->header.part.data);
- str_info->ctrl_blk.ret_code = -msg->header.part.data;
- }
- if (sst_validate_strid(str_id)) {
- pr_err(" stream id %d invalid\n", str_id);
- break;
- }
-
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
-
- case IPC_IA_FREE_STREAM:
- str_info = &sst_drv_ctx->streams[str_id];
- if (!msg->header.part.data) {
- pr_debug("Stream %d freed\n", str_id);
- } else {
- pr_err("Free for %d ret error %x\n",
- str_id, msg->header.part.data);
- }
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.condition = true;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- break;
- case IPC_IA_ALLOC_STREAM: {
- /* map to stream, call play */
- struct snd_sst_alloc_response *resp =
- (struct snd_sst_alloc_response *)msg->mailbox;
- if (resp->str_type.result)
- pr_err("error alloc stream = %x\n",
- resp->str_type.result);
- sst_alloc_stream_response(str_id, resp);
- break;
- }
-
- case IPC_IA_PLAY_FRAMES:
- case IPC_IA_CAPT_FRAMES:
- if (sst_validate_strid(str_id)) {
- pr_err("stream id %d invalid\n", str_id);
- break;
- }
- pr_debug("Ack for play/capt frames received\n");
- break;
-
- case IPC_IA_PREP_LIB_DNLD: {
- struct snd_sst_str_type *str_type =
- (struct snd_sst_str_type *)msg->mailbox;
- pr_debug("Prep Lib download %x\n",
- msg->header.part.msg_id);
- if (str_type->result)
- pr_err("Prep lib download %x\n", str_type->result);
- else
- pr_debug("Can download codec now...\n");
- sst_wake_up_alloc_block(sst_drv_ctx, str_id,
- str_type->result, NULL);
- break;
- }
-
- case IPC_IA_LIB_DNLD_CMPLT: {
- struct snd_sst_lib_download_info *resp =
- (struct snd_sst_lib_download_info *)msg->mailbox;
- int retval = resp->result;
-
- pr_debug("Lib downloaded %x\n", msg->header.part.msg_id);
- if (resp->result) {
- pr_err("err in lib dload %x\n", resp->result);
- } else {
- pr_debug("Codec download complete...\n");
- pr_debug("codec Type %d Ver %d Built %s: %s\n",
- resp->dload_lib.lib_info.lib_type,
- resp->dload_lib.lib_info.lib_version,
- resp->dload_lib.lib_info.b_date,
- resp->dload_lib.lib_info.b_time);
- }
- sst_wake_up_alloc_block(sst_drv_ctx, str_id,
- retval, NULL);
- break;
- }
-
- case IPC_IA_GET_FW_VERSION: {
- struct ipc_header_fw_init *version =
- (struct ipc_header_fw_init *)msg->mailbox;
- int major = version->fw_version.major;
- int minor = version->fw_version.minor;
- int build = version->fw_version.build;
- dev_info(&sst_drv_ctx->pci->dev,
- "INFO: ***LOADED SST FW VERSION*** = %02d.%02d.%02d\n",
- major, minor, build);
- break;
- }
- case IPC_IA_GET_FW_BUILD_INF: {
- struct sst_fw_build_info *build =
- (struct sst_fw_build_info *)msg->mailbox;
- pr_debug("Build date:%sTime:%s", build->date, build->time);
- break;
- }
- case IPC_IA_SET_PMIC_TYPE:
- break;
- case IPC_IA_START_STREAM:
- pr_debug("reply for START STREAM %x\n", msg->header.full);
- break;
-
- case IPC_IA_GET_FW_CTXT:
- pr_debug("reply for get fw ctxt %x\n", msg->header.full);
- if (msg->header.part.data)
- sst_drv_ctx->fw_cntx_size = 0;
- else
- sst_drv_ctx->fw_cntx_size = *sst_drv_ctx->fw_cntx;
- pr_debug("fw copied data %x\n", sst_drv_ctx->fw_cntx_size);
- sst_wake_up_alloc_block(
- sst_drv_ctx, str_id, msg->header.part.data, NULL);
- break;
- default:
- /* Illegal case */
- pr_err("process reply:default = %x\n", msg->header.full);
- }
- sst_clear_interrupt();
- return;
-}
diff --git a/drivers/staging/intel_sst/intel_sst_pvt.c b/drivers/staging/intel_sst/intel_sst_pvt.c
deleted file mode 100644
index e034bea..0000000
--- a/drivers/staging/intel_sst/intel_sst_pvt.c
+++ /dev/null
@@ -1,313 +0,0 @@
-/*
- * intel_sst_pvt.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This driver exposes the audio engine functionalities to the ALSA
- * and middleware.
- *
- * This file contains all private functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/fs.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_get_block_stream - get a new block stream
- *
- * @sst_drv_ctx: Driver context structure
- *
- * This function assigns a block for the calls that dont have stream context yet
- * the blocks are used for waiting on Firmware's response for any operation
- * Should be called with stream lock held
- */
-int sst_get_block_stream(struct intel_sst_drv *sst_drv_ctx)
-{
- int i;
-
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- if (sst_drv_ctx->alloc_block[i].sst_id == BLOCK_UNINIT) {
- sst_drv_ctx->alloc_block[i].ops_block.condition = false;
- sst_drv_ctx->alloc_block[i].ops_block.ret_code = 0;
- sst_drv_ctx->alloc_block[i].sst_id = 0;
- break;
- }
- }
- if (i == MAX_ACTIVE_STREAM) {
- pr_err("max alloc_stream reached\n");
- i = -EBUSY; /* active stream limit reached */
- }
- return i;
-}
-
-/*
- * sst_wait_interruptible - wait on event
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- *
- * This function waits without a timeout (and is interruptable) for a
- * given block event
- */
-int sst_wait_interruptible(struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block)
-{
- int retval = 0;
-
- if (!wait_event_interruptible(sst_drv_ctx->wait_queue,
- block->condition)) {
- /* event wake */
- if (block->ret_code < 0) {
- pr_err("stream failed %d\n", block->ret_code);
- retval = -EBUSY;
- } else {
- pr_debug("event up\n");
- retval = 0;
- }
- } else {
- pr_err("signal interrupted\n");
- retval = -EINTR;
- }
- return retval;
-
-}
-
-
-/*
- * sst_wait_interruptible_timeout - wait on event interruptable
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- * @timeout: time for wait on
- *
- * This function waits with a timeout value (and is interruptible) on a
- * given block event
- */
-int sst_wait_interruptible_timeout(
- struct intel_sst_drv *sst_drv_ctx,
- struct sst_block *block, int timeout)
-{
- int retval = 0;
-
- pr_debug("sst_wait_interruptible_timeout - waiting....\n");
- if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
- block->condition,
- msecs_to_jiffies(timeout))) {
- if (block->ret_code < 0)
- pr_err("stream failed %d\n", block->ret_code);
- else
- pr_debug("event up\n");
- retval = block->ret_code;
- } else {
- block->on = false;
- pr_err("timeout occurred...\n");
- /*setting firmware state as uninit so that the
- firmware will get re-downloaded on next request
- this is because firmare not responding for 5 sec
- is equalant to some unrecoverable error of FW
- sst_drv_ctx->sst_state = SST_UN_INIT;*/
- retval = -EBUSY;
- }
- return retval;
-
-}
-
-
-/*
- * sst_wait_timeout - wait on event for timeout
- *
- * @sst_drv_ctx: Driver context
- * @block: Driver block to wait on
- *
- * This function waits with a timeout value (and is not interruptible) on a
- * given block event
- */
-int sst_wait_timeout(struct intel_sst_drv *sst_drv_ctx,
- struct stream_alloc_block *block)
-{
- int retval = 0;
-
- /* NOTE:
- Observed that FW processes the alloc msg and replies even
- before the alloc thread has finished execution */
- pr_debug("waiting for %x, condition %x\n",
- block->sst_id, block->ops_block.condition);
- if (wait_event_interruptible_timeout(sst_drv_ctx->wait_queue,
- block->ops_block.condition,
- msecs_to_jiffies(SST_BLOCK_TIMEOUT))) {
- /* event wake */
- pr_debug("Event wake %x\n", block->ops_block.condition);
- pr_debug("message ret: %d\n", block->ops_block.ret_code);
- retval = block->ops_block.ret_code;
- } else {
- block->ops_block.on = false;
- pr_err("Wait timed-out %x\n", block->ops_block.condition);
- /* settign firmware state as uninit so that the
- firmware will get redownloaded on next request
- this is because firmare not responding for 5 sec
- is equalant to some unrecoverable error of FW
- sst_drv_ctx->sst_state = SST_UN_INIT;*/
- retval = -EBUSY;
- }
- return retval;
-
-}
-
-/*
- * sst_create_large_msg - create a large IPC message
- *
- * @arg: ipc message
- *
- * this function allocates structures to send a large message to the firmware
- */
-int sst_create_large_msg(struct ipc_post **arg)
-{
- struct ipc_post *msg;
-
- msg = kzalloc(sizeof(struct ipc_post), GFP_ATOMIC);
- if (!msg) {
- pr_err("kzalloc msg failed\n");
- return -ENOMEM;
- }
-
- msg->mailbox_data = kzalloc(SST_MAILBOX_SIZE, GFP_ATOMIC);
- if (!msg->mailbox_data) {
- kfree(msg);
- pr_err("kzalloc mailbox_data failed");
- return -ENOMEM;
- }
- *arg = msg;
- return 0;
-}
-
-/*
- * sst_create_short_msg - create a short IPC message
- *
- * @arg: ipc message
- *
- * this function allocates structures to send a short message to the firmware
- */
-int sst_create_short_msg(struct ipc_post **arg)
-{
- struct ipc_post *msg;
-
- msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
- if (!msg) {
- pr_err("kzalloc msg failed\n");
- return -ENOMEM;
- }
- msg->mailbox_data = NULL;
- *arg = msg;
- return 0;
-}
-
-/*
- * sst_clean_stream - clean the stream context
- *
- * @stream: stream structure
- *
- * this function resets the stream contexts
- * should be called in free
- */
-void sst_clean_stream(struct stream_info *stream)
-{
- struct sst_stream_bufs *bufs = NULL, *_bufs;
- stream->status = STREAM_UN_INIT;
- stream->prev = STREAM_UN_INIT;
- mutex_lock(&stream->lock);
- list_for_each_entry_safe(bufs, _bufs, &stream->bufs, node) {
- list_del(&bufs->node);
- kfree(bufs);
- }
- mutex_unlock(&stream->lock);
-
- if (stream->ops != STREAM_OPS_PLAYBACK_DRM)
- kfree(stream->decode_ibuf);
-}
-
-/*
- * sst_wake_up_alloc_block - wake up waiting block
- *
- * @sst_drv_ctx: Driver context
- * @sst_id: stream id
- * @status: status of wakeup
- * @data: data pointer of wakeup
- *
- * This function wakes up a sleeping block event based on the response
- */
-void sst_wake_up_alloc_block(struct intel_sst_drv *sst_drv_ctx,
- u8 sst_id, int status, void *data)
-{
- int i;
-
- /* Unblock with retval code */
- for (i = 0; i < MAX_ACTIVE_STREAM; i++) {
- if (sst_id == sst_drv_ctx->alloc_block[i].sst_id) {
- sst_drv_ctx->alloc_block[i].ops_block.condition = true;
- sst_drv_ctx->alloc_block[i].ops_block.ret_code = status;
- sst_drv_ctx->alloc_block[i].ops_block.data = data;
- wake_up(&sst_drv_ctx->wait_queue);
- break;
- }
- }
-}
-
-/*
- * sst_enable_rx_timeslot - Send msg to query for stream parameters
- * @status: rx timeslot to be enabled
- *
- * This function is called when the RX timeslot is required to be enabled
- */
-int sst_enable_rx_timeslot(int status)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- if (sst_create_short_msg(&msg)) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- pr_debug("ipc message sending: ENABLE_RX_TIME_SLOT\n");
- sst_fill_header(&msg->header, IPC_IA_ENABLE_RX_TIME_SLOT, 0, 0);
- msg->header.part.data = status;
- sst_drv_ctx->hs_info_blk.condition = false;
- sst_drv_ctx->hs_info_blk.ret_code = 0;
- sst_drv_ctx->hs_info_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->hs_info_blk, SST_BLOCK_TIMEOUT);
- return retval;
-}
-
diff --git a/drivers/staging/intel_sst/intel_sst_stream.c b/drivers/staging/intel_sst/intel_sst_stream.c
deleted file mode 100644
index be4565e..0000000
--- a/drivers/staging/intel_sst/intel_sst_stream.c
+++ /dev/null
@@ -1,583 +0,0 @@
-/*
- * intel_sst_stream.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the stream operations of SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#include <linux/delay.h>
-#include "intel_sst_ioctl.h"
-#include "intel_sst.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-
-/*
- * sst_check_device_type - Check the medfield device type
- *
- * @device: Device to be checked
- * @num_ch: Number of channels queried
- * @pcm_slot: slot to be enabled for this device
- *
- * This checks the deivce against the map and calculates pcm_slot value
- */
-int sst_check_device_type(u32 device, u32 num_chan, u32 *pcm_slot)
-{
- if (device >= MAX_NUM_STREAMS_MFLD) {
- pr_debug("device type invalid %d\n", device);
- return -EINVAL;
- }
- if (sst_drv_ctx->streams[device].status == STREAM_UN_INIT) {
- if (device == SND_SST_DEVICE_VIBRA && num_chan == 1)
- *pcm_slot = 0x10;
- else if (device == SND_SST_DEVICE_HAPTIC && num_chan == 1)
- *pcm_slot = 0x20;
- else if (device == SND_SST_DEVICE_IHF && num_chan == 1)
- *pcm_slot = 0x04;
- else if (device == SND_SST_DEVICE_IHF && num_chan == 2)
- *pcm_slot = 0x0C;
- else if (device == SND_SST_DEVICE_HEADSET && num_chan == 1)
- *pcm_slot = 0x01;
- else if (device == SND_SST_DEVICE_HEADSET && num_chan == 2)
- *pcm_slot = 0x03;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 1)
- *pcm_slot = 0x01;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 2)
- *pcm_slot = 0x03;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 3)
- *pcm_slot = 0x07;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan == 4)
- *pcm_slot = 0x0F;
- else if (device == SND_SST_DEVICE_CAPTURE && num_chan > 4)
- *pcm_slot = 0x1F;
- else {
- pr_debug("No condition satisfied.. ret err\n");
- return -EINVAL;
- }
- } else {
- pr_debug("this stream state is not uni-init, is %d\n",
- sst_drv_ctx->streams[device].status);
- return -EBADRQC;
- }
- pr_debug("returning slot %x\n", *pcm_slot);
- return 0;
-}
-/**
- * get_mrst_stream_id - gets a new stream id for use
- *
- * This functions searches the current streams and allocated an empty stream
- * lock stream_lock required to be held before calling this
- */
-static unsigned int get_mrst_stream_id(void)
-{
- int i;
-
- for (i = 1; i <= MAX_NUM_STREAMS_MRST; i++) {
- if (sst_drv_ctx->streams[i].status == STREAM_UN_INIT)
- return i;
- }
- pr_debug("Didn't find empty stream for mrst\n");
- return -EBUSY;
-}
-
-/**
- * sst_alloc_stream - Send msg for a new stream ID
- *
- * @params: stream params
- * @stream_ops: operation of stream PB/capture
- * @codec: codec for stream
- * @device: device stream to be allocated for
- *
- * This function is called by any function which wants to start
- * a new stream. This also check if a stream exists which is idle
- * it initializes idle stream id to this request
- */
-int sst_alloc_stream(char *params, unsigned int stream_ops,
- u8 codec, unsigned int device)
-{
- struct ipc_post *msg = NULL;
- struct snd_sst_alloc_params alloc_param;
- unsigned int pcm_slot = 0, num_ch;
- int str_id;
- struct snd_sst_stream_params *sparams;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:entering sst_alloc_stream\n");
- pr_debug("SST DBG:%d %d %d\n", stream_ops, codec, device);
-
- BUG_ON(!params);
- sparams = (struct snd_sst_stream_params *)params;
- num_ch = sparams->uc.pcm_params.num_chan;
- /*check the device type*/
- if (sst_drv_ctx->pci_id == SST_MFLD_PCI_ID) {
- if (sst_check_device_type(device, num_ch, &pcm_slot))
- return -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- str_id = device;
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("SST_DBG: slot %x\n", pcm_slot);
- } else {
- mutex_lock(&sst_drv_ctx->stream_lock);
- str_id = get_mrst_stream_id();
- mutex_unlock(&sst_drv_ctx->stream_lock);
- if (str_id <= 0)
- return -EBUSY;
- }
- /*allocate device type context*/
- sst_init_stream(&sst_drv_ctx->streams[str_id], codec,
- str_id, stream_ops, pcm_slot, device);
- /* send msg to FW to allocate a stream */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_ALLOC_STREAM, 1, str_id);
- msg->header.part.data = sizeof(alloc_param) + sizeof(u32);
- alloc_param.str_type.codec_type = codec;
- alloc_param.str_type.str_type = SST_STREAM_TYPE_MUSIC;
- alloc_param.str_type.operation = stream_ops;
- alloc_param.str_type.protected_str = 0; /* non drm */
- alloc_param.str_type.time_slots = pcm_slot;
- alloc_param.str_type.result = alloc_param.str_type.reserved = 0;
- memcpy(&alloc_param.stream_params, params,
- sizeof(struct snd_sst_stream_params));
-
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &alloc_param,
- sizeof(alloc_param));
- str_info = &sst_drv_ctx->streams[str_id];
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("SST DBG:alloc stream done\n");
- return str_id;
-}
-
-
-/*
- * sst_alloc_stream_response - process alloc reply
- *
- * @str_id: stream id for which the stream has been allocated
- * @resp the stream response from firware
- *
- * This function is called by firmware as a response to stream allcoation
- * request
- */
-int sst_alloc_stream_response(unsigned int str_id,
- struct snd_sst_alloc_response *resp)
-{
- int retval = 0;
- struct stream_info *str_info;
- struct snd_sst_lib_download *lib_dnld;
-
- pr_debug("SST DEBUG: stream number given = %d\n", str_id);
- str_info = &sst_drv_ctx->streams[str_id];
- if (resp->str_type.result == SST_LIB_ERR_LIB_DNLD_REQUIRED) {
- lib_dnld = kzalloc(sizeof(*lib_dnld), GFP_KERNEL);
- memcpy(lib_dnld, &resp->lib_dnld, sizeof(*lib_dnld));
- } else
- lib_dnld = NULL;
- if (str_info->ctrl_blk.on == true) {
- str_info->ctrl_blk.on = false;
- str_info->ctrl_blk.data = lib_dnld;
- str_info->ctrl_blk.condition = true;
- str_info->ctrl_blk.ret_code = resp->str_type.result;
- pr_debug("SST DEBUG: sst_alloc_stream_response: waking up.\n");
- wake_up(&sst_drv_ctx->wait_queue);
- }
- return retval;
-}
-
-
-/**
-* sst_get_fw_info - Send msg to query for firmware configurations
-* @info: out param that holds the firmare configurations
-*
-* This function is called when the firmware configurations are queiried for
-*/
-int sst_get_fw_info(struct snd_sst_fw_info *info)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("SST DBG:sst_get_fw_info called\n");
-
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: message creation failed\n");
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_GET_FW_INFO, 0, 0);
- sst_drv_ctx->fw_info_blk.condition = false;
- sst_drv_ctx->fw_info_blk.ret_code = 0;
- sst_drv_ctx->fw_info_blk.on = true;
- sst_drv_ctx->fw_info_blk.data = info;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->fw_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("SST ERR: error in fw_info = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-
-/**
-* sst_pause_stream - Send msg for a pausing stream
-* @str_id: stream ID
-*
-* This function is called by any function which wants to pause
-* an already running stream.
-*/
-int sst_start_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("sst_start_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_INIT)
- return -EBADRQC;
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_START_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return retval;
-}
-
-/*
- * sst_pause_stream - Send msg for a pausing stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to pause
- * an already running stream.
- */
-int sst_pause_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_pause_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status == STREAM_PAUSED)
- return 0;
- if (str_info->status == STREAM_RUNNING ||
- str_info->status == STREAM_INIT) {
- if (str_info->prev == STREAM_UN_INIT)
- return -EBADRQC;
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path is in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_PAUSE_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval == 0) {
- str_info->prev = str_info->status;
- str_info->status = STREAM_PAUSED;
- } else if (retval == SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
-
- return retval;
-}
-
-/**
- * sst_resume_stream - Send msg for resuming stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to resume
- * an already paused stream.
- */
-int sst_resume_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_resume_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status == STREAM_RUNNING)
- return 0;
- if (str_info->status == STREAM_PAUSED) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_RESUME_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (!retval) {
- if (str_info->prev == STREAM_RUNNING)
- str_info->status = STREAM_RUNNING;
- else
- str_info->status = STREAM_INIT;
- str_info->prev = STREAM_PAUSED;
- } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
-
- return retval;
-}
-
-
-/**
- * sst_drop_stream - Send msg for stopping stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to stop
- * a stream.
- */
-int sst_drop_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_stream_bufs *bufs = NULL, *_bufs;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_drop_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_UN_INIT &&
- str_info->status != STREAM_DECODE) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("SST ERR: control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DROP_STREAM, 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (!retval) {
- pr_debug("SST DBG:drop success\n");
- str_info->prev = STREAM_UN_INIT;
- str_info->status = STREAM_INIT;
- if (str_info->src != MAD_DRV) {
- mutex_lock(&str_info->lock);
- list_for_each_entry_safe(bufs, _bufs,
- &str_info->bufs, node) {
- list_del(&bufs->node);
- kfree(bufs);
- }
- mutex_unlock(&str_info->lock);
- }
- str_info->cumm_bytes += str_info->curr_bytes;
- } else if (retval == -SST_ERR_INVALID_STREAM_ID) {
- retval = -EINVAL;
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- }
- if (str_info->data_blk.on == true) {
- str_info->data_blk.condition = true;
- str_info->data_blk.ret_code = retval;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- } else {
- retval = -EBADRQC;
- pr_err("SST ERR: BADQRC for stream\n");
- }
- return retval;
-}
-
-/**
-* sst_drain_stream - Send msg for draining stream
-* @str_id: stream ID
-*
-* This function is called by any function which wants to drain
-* a stream.
-*/
-int sst_drain_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_drain_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_RUNNING &&
- str_info->status != STREAM_INIT &&
- str_info->status != STREAM_PAUSED) {
- pr_err("SST ERR: BADQRC for stream = %d\n",
- str_info->status);
- return -EBADRQC;
- }
-
- if (str_info->status == STREAM_INIT) {
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- } else
- str_info->need_draining = true;
- str_info->data_blk.condition = false;
- str_info->data_blk.ret_code = 0;
- str_info->data_blk.on = true;
- retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
- str_info->need_draining = false;
- return retval;
-}
-
-/**
- * sst_free_stream - Frees a stream
- * @str_id: stream ID
- *
- * This function is called by any function which wants to free
- * a stream.
- */
-int sst_free_stream(int str_id)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- pr_debug("SST DBG:sst_free_stream for %d\n", str_id);
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- str_info = &sst_drv_ctx->streams[str_id];
-
- if (str_info->status != STREAM_UN_INIT) {
- if (sst_create_short_msg(&msg)) {
- pr_err("SST ERR: mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_FREE_STREAM, 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- str_info->prev = str_info->status;
- str_info->status = STREAM_UN_INIT;
- if (str_info->data_blk.on == true) {
- str_info->data_blk.condition = true;
- str_info->data_blk.ret_code = 0;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- str_info->data_blk.on = true;
- str_info->data_blk.condition = false;
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- pr_debug("wait for free returned %d\n", retval);
- msleep(100);
- mutex_lock(&sst_drv_ctx->stream_lock);
- sst_clean_stream(str_info);
- mutex_unlock(&sst_drv_ctx->stream_lock);
- pr_debug("SST DBG:Stream freed\n");
- } else {
- retval = -EBADRQC;
- pr_debug("SST DBG:BADQRC for stream\n");
- }
-
- return retval;
-}
-
-
diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
deleted file mode 100644
index 2be58c5..0000000
--- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c
+++ /dev/null
@@ -1,1273 +0,0 @@
-/*
- * intel_sst_stream.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the stream operations of SST driver
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/syscalls.h>
-#include <linux/firmware.h>
-#include <linux/sched.h>
-#ifdef CONFIG_MRST_RAR_HANDLER
-#include <linux/rar_register.h>
-#include "../memrar/memrar.h"
-#endif
-#include "intel_sst_ioctl.h"
-#include "intel_sst.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-/**
-* sst_get_stream_params - Send msg to query for stream parameters
-* @str_id: stream id for which the parameters are queried for
-* @get_params: out parameters to which the parameters are copied to
-*
-* This function is called when the stream parameters are queiried for
-*/
-int sst_get_stream_params(int str_id,
- struct snd_sst_get_stream_params *get_params)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
- struct snd_sst_fw_get_stream_params *fw_params;
-
- pr_debug("get_stream for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_UN_INIT) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("control path in use\n");
- return -EINVAL;
- }
- if (sst_create_short_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- fw_params = kzalloc(sizeof(*fw_params), GFP_ATOMIC);
- if (!fw_params) {
- pr_err("mem allocation failed\n");
- kfree(msg);
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_GET_STREAM_PARAMS,
- 0, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- str_info->ctrl_blk.data = (void *) fw_params;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- get_params->codec_params.result = retval;
- kfree(fw_params);
- return -EIO;
- }
- memcpy(&get_params->pcm_params, &fw_params->pcm_params,
- sizeof(fw_params->pcm_params));
- memcpy(&get_params->codec_params.sparams,
- &fw_params->codec_params,
- sizeof(fw_params->codec_params));
- get_params->codec_params.result = 0;
- get_params->codec_params.stream_id = str_id;
- get_params->codec_params.codec = str_info->codec;
- get_params->codec_params.ops = str_info->ops;
- get_params->codec_params.stream_type = str_info->str_type;
- kfree(fw_params);
- } else {
- pr_debug("Stream is not in the init state\n");
- }
- return retval;
-}
-
-/**
- * sst_set_stream_param - Send msg for setting stream parameters
- *
- * @str_id: stream id
- * @str_param: stream params
- *
- * This function sets stream params during runtime
- */
-int sst_set_stream_param(int str_id, struct snd_sst_params *str_param)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct stream_info *str_info;
-
- BUG_ON(!str_param);
- if (sst_drv_ctx->streams[str_id].ops != str_param->ops) {
- pr_err("Invalid operation\n");
- return -EINVAL;
- }
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- pr_debug("set_stream for %d\n", str_id);
- str_info = &sst_drv_ctx->streams[str_id];
- if (sst_drv_ctx->streams[str_id].status == STREAM_INIT) {
- if (str_info->ctrl_blk.on == true) {
- pr_err("control path in use\n");
- return -EAGAIN;
- }
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header,
- IPC_IA_SET_STREAM_PARAMS, 1, str_id);
- str_info->ctrl_blk.condition = false;
- str_info->ctrl_blk.ret_code = 0;
- str_info->ctrl_blk.on = true;
- msg->header.part.data = sizeof(u32) +
- sizeof(str_param->sparams);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &str_param->sparams,
- sizeof(str_param->sparams));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &str_info->ctrl_blk, SST_BLOCK_TIMEOUT);
- if (retval < 0) {
- retval = -EIO;
- sst_clean_stream(str_info);
- }
- } else {
- retval = -EBADRQC;
- pr_err("BADQRC for stream\n");
- }
- return retval;
-}
-
-/**
-* sst_get_vol - This function allows to get the premix gain or gain of a stream
-*
-* @get_vol: this is an output param through which the volume
-* structure is passed back to user
-*
-* This function is called when the premix gain or stream gain is queried for
-*/
-int sst_get_vol(struct snd_sst_vol *get_vol)
-{
- int retval = 0;
- struct ipc_post *msg = NULL;
- struct snd_sst_vol *fw_get_vol;
- int str_id = get_vol->stream_id;
-
- pr_debug("get vol called\n");
-
- if (sst_create_short_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header,
- IPC_IA_GET_STREAM_VOL, 0, str_id);
- sst_drv_ctx->vol_info_blk.condition = false;
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- sst_drv_ctx->vol_info_blk.on = true;
- fw_get_vol = kzalloc(sizeof(*fw_get_vol), GFP_ATOMIC);
- if (!fw_get_vol) {
- pr_err("mem allocation failed\n");
- kfree(msg);
- return -ENOMEM;
- }
- sst_drv_ctx->vol_info_blk.data = (void *)fw_get_vol;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
- if (retval)
- retval = -EIO;
- else {
- pr_debug("stream id %d\n", fw_get_vol->stream_id);
- pr_debug("volume %d\n", fw_get_vol->volume);
- pr_debug("ramp duration %d\n", fw_get_vol->ramp_duration);
- pr_debug("ramp_type %d\n", fw_get_vol->ramp_type);
- memcpy(get_vol, fw_get_vol, sizeof(*fw_get_vol));
- }
- return retval;
-}
-
-/**
-* sst_set_vol - This function allows to set the premix gain or gain of a stream
-*
-* @set_vol: this holds the volume structure that needs to be set
-*
-* This function is called when premix gain or stream gain is requested to be set
-*/
-int sst_set_vol(struct snd_sst_vol *set_vol)
-{
-
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("set vol called\n");
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_SET_STREAM_VOL, 1,
- set_vol->stream_id);
-
- msg->header.part.data = sizeof(u32) + sizeof(*set_vol);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), set_vol, sizeof(*set_vol));
- sst_drv_ctx->vol_info_blk.condition = false;
- sst_drv_ctx->vol_info_blk.ret_code = 0;
- sst_drv_ctx->vol_info_blk.on = true;
- sst_drv_ctx->vol_info_blk.data = set_vol;
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->vol_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("error in set_vol = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-/**
-* sst_set_mute - This function sets premix mute or soft mute of a stream
-*
-* @set_mute: this holds the mute structure that needs to be set
-*
-* This function is called when premix mute or stream mute requested to be set
-*/
-int sst_set_mute(struct snd_sst_mute *set_mute)
-{
-
- int retval = 0;
- struct ipc_post *msg = NULL;
-
- pr_debug("set mute called\n");
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_SET_STREAM_MUTE, 1,
- set_mute->stream_id);
- sst_drv_ctx->mute_info_blk.condition = false;
- sst_drv_ctx->mute_info_blk.ret_code = 0;
- sst_drv_ctx->mute_info_blk.on = true;
- sst_drv_ctx->mute_info_blk.data = set_mute;
-
- msg->header.part.data = sizeof(u32) + sizeof(*set_mute);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), set_mute,
- sizeof(*set_mute));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->mute_info_blk, SST_BLOCK_TIMEOUT);
- if (retval) {
- pr_err("error in set_mute = %d\n", retval);
- retval = -EIO;
- }
- return retval;
-}
-
-int sst_prepare_target(struct snd_sst_slot_info *slot)
-{
- if (slot->target_device == SND_SST_TARGET_PMIC
- && slot->device_instance == 1) {
- /*music mode*/
- if (sst_drv_ctx->pmic_port_instance == 0)
- sst_drv_ctx->scard_ops->set_voice_port(
- DEACTIVATE);
- } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
- slot->target_device == SND_SST_TARGET_MODEM) &&
- slot->device_instance == 0) {
- /*voip mode where pcm0 is active*/
- if (sst_drv_ctx->pmic_port_instance == 1)
- sst_drv_ctx->scard_ops->set_audio_port(
- DEACTIVATE);
- }
- return 0;
-}
-
-int sst_activate_target(struct snd_sst_slot_info *slot)
-{
- if (slot->target_device == SND_SST_TARGET_PMIC &&
- slot->device_instance == 1) {
- /*music mode*/
- sst_drv_ctx->pmic_port_instance = 1;
- sst_drv_ctx->scard_ops->set_audio_port(ACTIVATE);
- sst_drv_ctx->scard_ops->set_pcm_audio_params(
- slot->pcm_params.sfreq,
- slot->pcm_params.pcm_wd_sz,
- slot->pcm_params.num_chan);
- if (sst_drv_ctx->pb_streams)
- sst_drv_ctx->scard_ops->power_up_pmic_pb(1);
- if (sst_drv_ctx->cp_streams)
- sst_drv_ctx->scard_ops->power_up_pmic_cp(1);
- } else if ((slot->target_device == SND_SST_TARGET_PMIC ||
- slot->target_device == SND_SST_TARGET_MODEM) &&
- slot->device_instance == 0) {
- /*voip mode where pcm0 is active*/
- sst_drv_ctx->pmic_port_instance = 0;
- sst_drv_ctx->scard_ops->set_voice_port(
- ACTIVATE);
- sst_drv_ctx->scard_ops->power_up_pmic_pb(0);
- /*sst_drv_ctx->scard_ops->power_up_pmic_cp(0);*/
- }
- return 0;
-}
-
-int sst_parse_target(struct snd_sst_slot_info *slot)
-{
- int retval = 0;
-
- if (slot->action == SND_SST_PORT_ACTIVATE &&
- slot->device_type == SND_SST_DEVICE_PCM) {
- retval = sst_activate_target(slot);
- if (retval)
- pr_err("SST_Activate_target_fail\n");
- else
- pr_err("SST_Activate_target_pass\n");
- } else if (slot->action == SND_SST_PORT_PREPARE &&
- slot->device_type == SND_SST_DEVICE_PCM) {
- retval = sst_prepare_target(slot);
- if (retval)
- pr_err("SST_prepare_target_fail\n");
- else
- pr_err("SST_prepare_target_pass\n");
- } else {
- pr_err("slot_action : %d, device_type: %d\n",
- slot->action, slot->device_type);
- }
- return retval;
-}
-
-int sst_send_target(struct snd_sst_target_device *target)
-{
- int retval;
- struct ipc_post *msg;
-
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_TARGET_DEV_SELECT, 1, 0);
- sst_drv_ctx->tgt_dev_blk.condition = false;
- sst_drv_ctx->tgt_dev_blk.ret_code = 0;
- sst_drv_ctx->tgt_dev_blk.on = true;
-
- msg->header.part.data = sizeof(u32) + sizeof(*target);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), target,
- sizeof(*target));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- pr_debug("message sent- waiting\n");
- retval = sst_wait_interruptible_timeout(sst_drv_ctx,
- &sst_drv_ctx->tgt_dev_blk, TARGET_DEV_BLOCK_TIMEOUT);
- if (retval)
- pr_err("target device ipc failed = 0x%x\n", retval);
- return retval;
-
-}
-
-int sst_target_device_validate(struct snd_sst_target_device *target)
-{
- int retval = 0;
- int i;
-
- for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
- if (target->devices[i].device_type == SND_SST_DEVICE_PCM) {
- /*pcm device, check params*/
- if (target->devices[i].device_instance == 1) {
- if ((target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_I2S) &&
- (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_RIGHT_JUSTIFIED)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE1))
- goto err;
- } else if (target->devices[i].device_instance == 0) {
- if ((target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE2)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE4_I2S)
- && (target->devices[i].device_mode !=
- SND_SST_DEV_MODE_PCM_MODE1))
- goto err;
- if (target->devices[i].pcm_params.sfreq != 8000
- || target->devices[i].pcm_params.num_chan != 1
- || target->devices[i].pcm_params.pcm_wd_sz !=
- 16)
- goto err;
- } else {
-err:
- pr_err("i/p params incorrect\n");
- return -EINVAL;
- }
- }
- }
- return retval;
-}
-
-/**
- * sst_target_device_select - This function sets the target device configurations
- *
- * @target: this parameter holds the configurations to be set
- *
- * This function is called when the user layer wants to change the target
- * device's configurations
- */
-
-int sst_target_device_select(struct snd_sst_target_device *target)
-{
- int retval, i, prepare_count = 0;
-
- pr_debug("Target Device Select\n");
-
- if (target->device_route < 0 || target->device_route > 2) {
- pr_err("device route is invalid\n");
- return -EINVAL;
- }
-
- if (target->device_route != 0) {
- pr_err("Unsupported config\n");
- return -EIO;
- }
- retval = sst_target_device_validate(target);
- if (retval)
- return retval;
-
- retval = sst_send_target(target);
- if (retval)
- return retval;
- for (i = 0; i < SST_MAX_TARGET_DEVICES; i++) {
- if (target->devices[i].action == SND_SST_PORT_ACTIVATE) {
- pr_debug("activate called in %d\n", i);
- retval = sst_parse_target(&target->devices[i]);
- if (retval)
- return retval;
- } else if (target->devices[i].action == SND_SST_PORT_PREPARE) {
- pr_debug("PREPARE in %d, Forwarding\n", i);
- retval = sst_parse_target(&target->devices[i]);
- if (retval) {
- pr_err("Parse Target fail %d\n", retval);
- return retval;
- }
- pr_debug("Parse Target successful %d\n", retval);
- if (target->devices[i].device_type ==
- SND_SST_DEVICE_PCM)
- prepare_count++;
- }
- }
- if (target->devices[0].action == SND_SST_PORT_PREPARE &&
- prepare_count == 0)
- sst_drv_ctx->scard_ops->power_down_pmic();
-
- return retval;
-}
-#ifdef CONFIG_MRST_RAR_HANDLER
-/*This function gets the physical address of the secure memory from the handle*/
-static inline int sst_get_RAR(struct RAR_buffer *buffers, int count)
-{
- int retval = 0, rar_status = 0;
-
- rar_status = rar_handle_to_bus(buffers, count);
-
- if (count != rar_status) {
- pr_err("The rar CALL Failed");
- retval = -EIO;
- }
- if (buffers->info.type != RAR_TYPE_AUDIO) {
- pr_err("Invalid RAR type\n");
- return -EINVAL;
- }
- return retval;
-}
-
-#endif
-
-/* This function creates the scatter gather list to be sent to firmware to
-capture/playback data*/
-static int sst_create_sg_list(struct stream_info *stream,
- struct sst_frame_info *sg_list)
-{
- struct sst_stream_bufs *kbufs = NULL;
-#ifdef CONFIG_MRST_RAR_HANDLER
- struct RAR_buffer rar_buffers;
- int retval = 0;
-#endif
- int i = 0;
- list_for_each_entry(kbufs, &stream->bufs, node) {
- if (kbufs->in_use == false) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (stream->ops == STREAM_OPS_PLAYBACK_DRM) {
- pr_debug("DRM playback handling\n");
- rar_buffers.info.handle = (__u32)kbufs->addr;
- rar_buffers.info.size = kbufs->size;
- pr_debug("rar handle 0x%x size=0x%x\n",
- rar_buffers.info.handle,
- rar_buffers.info.size);
- retval = sst_get_RAR(&rar_buffers, 1);
-
- if (retval)
- return retval;
- sg_list->addr[i].addr = rar_buffers.bus_address;
- /* rar_buffers.info.size; */
- sg_list->addr[i].size = (__u32)kbufs->size;
- pr_debug("phyaddr[%d] 0x%x Size:0x%x\n"
- , i, sg_list->addr[i].addr,
- sg_list->addr[i].size);
- }
-#endif
- if (stream->ops != STREAM_OPS_PLAYBACK_DRM) {
- sg_list->addr[i].addr =
- virt_to_phys((void *)
- kbufs->addr + kbufs->offset);
- sg_list->addr[i].size = kbufs->size;
- pr_debug("phyaddr[%d]:0x%x Size:0x%x\n"
- , i , sg_list->addr[i].addr, kbufs->size);
- }
- stream->curr_bytes += sg_list->addr[i].size;
- kbufs->in_use = true;
- i++;
- }
- if (i >= MAX_NUM_SCATTER_BUFFERS)
- break;
- }
-
- sg_list->num_entries = i;
- pr_debug("sg list entries = %d\n", sg_list->num_entries);
- return i;
-}
-
-
-/**
- * sst_play_frame - Send msg for sending stream frames
- *
- * @str_id: ID of stream
- *
- * This function is called to send data to be played out
- * to the firmware
- */
-int sst_play_frame(int str_id)
-{
- int i = 0, retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_frame_info sg_list = {0};
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- struct stream_info *stream;
-
- pr_debug("play frame for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- stream = &sst_drv_ctx->streams[str_id];
- /* clear prev sent buffers */
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- spin_lock(&stream->pcm_lock);
- list_del(&kbufs->node);
- spin_unlock(&stream->pcm_lock);
- kfree(kbufs);
- }
- }
- /* update bytes sent */
- stream->cumm_bytes += stream->curr_bytes;
- stream->curr_bytes = 0;
- if (list_empty(&stream->bufs)) {
- /* no user buffer available */
- pr_debug("Null buffer stream status %d\n", stream->status);
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- pr_debug("new stream status = %d\n", stream->status);
- if (stream->need_draining == true) {
- pr_debug("draining stream\n");
- if (sst_create_short_msg(&msg)) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- sst_fill_header(&msg->header, IPC_IA_DRAIN_STREAM,
- 0, str_id);
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node,
- &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- } else if (stream->data_blk.on == true) {
- pr_debug("user list empty.. wake\n");
- /* unblock */
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
- }
- return 0;
- }
-
- /* create list */
- i = sst_create_sg_list(stream, &sg_list);
-
- /* post msg */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_PLAY_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(sg_list);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- return 0;
-
-}
-
-/**
- * sst_capture_frame - Send msg for sending stream frames
- *
- * @str_id: ID of stream
- *
- * This function is called to capture data from the firmware
- */
-int sst_capture_frame(int str_id)
-{
- int i = 0, retval = 0;
- struct ipc_post *msg = NULL;
- struct sst_frame_info sg_list = {0};
- struct sst_stream_bufs *kbufs = NULL, *_kbufs;
- struct stream_info *stream;
-
-
- pr_debug("capture frame for %d\n", str_id);
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
- stream = &sst_drv_ctx->streams[str_id];
- /* clear prev sent buffers */
- list_for_each_entry_safe(kbufs, _kbufs, &stream->bufs, node) {
- if (kbufs->in_use == true) {
- list_del(&kbufs->node);
- kfree(kbufs);
- pr_debug("del node\n");
- }
- }
- if (list_empty(&stream->bufs)) {
- /* no user buffer available */
- pr_debug("Null buffer!!!!stream status %d\n",
- stream->status);
- stream->prev = stream->status;
- stream->status = STREAM_INIT;
- pr_debug("new stream status = %d\n",
- stream->status);
- if (stream->data_blk.on == true) {
- pr_debug("user list empty.. wake\n");
- /* unblock */
- stream->data_blk.ret_code = 0;
- stream->data_blk.condition = true;
- stream->data_blk.on = false;
- wake_up(&sst_drv_ctx->wait_queue);
-
- }
- return 0;
- }
- /* create new sg list */
- i = sst_create_sg_list(stream, &sg_list);
-
- /* post msg */
- if (sst_create_large_msg(&msg))
- return -ENOMEM;
-
- sst_fill_header(&msg->header, IPC_IA_CAPT_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(sg_list);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), &sg_list, sizeof(sg_list));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
-
-
- /*update bytes recevied*/
- stream->cumm_bytes += stream->curr_bytes;
- stream->curr_bytes = 0;
-
- pr_debug("Cum bytes = %d\n", stream->cumm_bytes);
- return 0;
-}
-
-/*This function is used to calculate the minimum size of input buffers given*/
-static unsigned int calculate_min_size(struct snd_sst_buffs *bufs)
-{
- int i, min_val = bufs->buff_entry[0].size;
- for (i = 1 ; i < bufs->entries; i++) {
- if (bufs->buff_entry[i].size < min_val)
- min_val = bufs->buff_entry[i].size;
- }
- pr_debug("min_val = %d\n", min_val);
- return min_val;
-}
-
-static unsigned int calculate_max_size(struct snd_sst_buffs *bufs)
-{
- int i, max_val = bufs->buff_entry[0].size;
- for (i = 1 ; i < bufs->entries; i++) {
- if (bufs->buff_entry[i].size > max_val)
- max_val = bufs->buff_entry[i].size;
- }
- pr_debug("max_val = %d\n", max_val);
- return max_val;
-}
-
-/*This function is used to allocate input and output buffers to be sent to
-the firmware that will take encoded data and return decoded data*/
-static int sst_allocate_decode_buf(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- unsigned int cum_input_given,
- unsigned int cum_output_given)
-{
-#ifdef CONFIG_MRST_RAR_HANDLER
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
-
- if (dbufs->ibufs->type == SST_BUF_RAR &&
- dbufs->obufs->type == SST_BUF_RAR) {
- if (dbufs->ibufs->entries == dbufs->obufs->entries)
- return 0;
- else {
- pr_err("RAR entries dont match\n");
- return -EINVAL;
- }
- } else
- str_info->decode_osize = cum_output_given;
- return 0;
-
- }
-#endif
- if (!str_info->decode_ibuf) {
- pr_debug("no i/p buffers, trying full size\n");
- str_info->decode_isize = cum_input_given;
- str_info->decode_ibuf = kzalloc(str_info->decode_isize,
- GFP_KERNEL);
- str_info->idecode_alloc = str_info->decode_isize;
- }
- if (!str_info->decode_ibuf) {
- pr_debug("buff alloc failed, try max size\n");
- str_info->decode_isize = calculate_max_size(dbufs->ibufs);
- str_info->decode_ibuf = kzalloc(
- str_info->decode_isize, GFP_KERNEL);
- str_info->idecode_alloc = str_info->decode_isize;
- }
- if (!str_info->decode_ibuf) {
- pr_debug("buff alloc failed, try min size\n");
- str_info->decode_isize = calculate_min_size(dbufs->ibufs);
- str_info->decode_ibuf = kzalloc(str_info->decode_isize,
- GFP_KERNEL);
- if (!str_info->decode_ibuf) {
- pr_err("mem allocation failed\n");
- return -ENOMEM;
- }
- str_info->idecode_alloc = str_info->decode_isize;
- }
- str_info->decode_osize = cum_output_given;
- if (str_info->decode_osize > sst_drv_ctx->mmap_len)
- str_info->decode_osize = sst_drv_ctx->mmap_len;
- return 0;
-}
-
-/*This function is used to send the message to firmware to decode the data*/
-static int sst_send_decode_mess(int str_id, struct stream_info *str_info,
- struct snd_sst_decode_info *dec_info)
-{
- struct ipc_post *msg = NULL;
- int retval = 0;
-
- pr_debug("SST DBG:sst_set_mute:called\n");
-
- if (str_info->decode_ibuf_type == SST_BUF_RAR) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- dec_info->frames_in.addr[0].addr =
- (unsigned long)str_info->decode_ibuf;
- dec_info->frames_in.addr[0].size =
- str_info->decode_isize;
-#endif
-
- } else {
- dec_info->frames_in.addr[0].addr = virt_to_phys((void *)
- str_info->decode_ibuf);
- dec_info->frames_in.addr[0].size = str_info->decode_isize;
- }
-
-
- if (str_info->decode_obuf_type == SST_BUF_RAR) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- dec_info->frames_out.addr[0].addr =
- (unsigned long)str_info->decode_obuf;
- dec_info->frames_out.addr[0].size = str_info->decode_osize;
-#endif
-
- } else {
- dec_info->frames_out.addr[0].addr = virt_to_phys((void *)
- str_info->decode_obuf) ;
- dec_info->frames_out.addr[0].size = str_info->decode_osize;
- }
-
- dec_info->frames_in.num_entries = 1;
- dec_info->frames_out.num_entries = 1;
- dec_info->frames_in.rsrvd = 0;
- dec_info->frames_out.rsrvd = 0;
- dec_info->input_bytes_consumed = 0;
- dec_info->output_bytes_produced = 0;
- if (sst_create_large_msg(&msg)) {
- pr_err("message creation failed\n");
- return -ENOMEM;
- }
-
- sst_fill_header(&msg->header, IPC_IA_DECODE_FRAMES, 1, str_id);
- msg->header.part.data = sizeof(u32) + sizeof(*dec_info);
- memcpy(msg->mailbox_data, &msg->header, sizeof(u32));
- memcpy(msg->mailbox_data + sizeof(u32), dec_info,
- sizeof(*dec_info));
- spin_lock(&sst_drv_ctx->list_spin_lock);
- list_add_tail(&msg->node, &sst_drv_ctx->ipc_dispatch_list);
- spin_unlock(&sst_drv_ctx->list_spin_lock);
- str_info->data_blk.condition = false;
- str_info->data_blk.ret_code = 0;
- str_info->data_blk.on = true;
- str_info->data_blk.data = dec_info;
- sst_post_message(&sst_drv_ctx->ipc_post_msg_wq);
- retval = sst_wait_interruptible(sst_drv_ctx, &str_info->data_blk);
- return retval;
-}
-
-#ifdef CONFIG_MRST_RAR_HANDLER
-static int sst_prepare_input_buffers_rar(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *input_index, int *in_copied,
- int *input_index_valid_size, int *new_entry_flag)
-{
- int retval = 0, i;
-
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
- struct RAR_buffer rar_buffers;
- __u32 info;
- retval = copy_from_user((void *) &info,
- dbufs->ibufs->buff_entry[i].buffer,
- sizeof(__u32));
- if (retval) {
- pr_err("cpy from user fail\n");
- return -EAGAIN;
- }
- rar_buffers.info.type = dbufs->ibufs->type;
- rar_buffers.info.size = dbufs->ibufs->buff_entry[i].size;
- rar_buffers.info.handle = info;
- pr_debug("rar in DnR(input buffer function)=0x%x size=0x%x",
- rar_buffers.info.handle,
- rar_buffers.info.size);
- retval = sst_get_RAR(&rar_buffers, 1);
- if (retval) {
- pr_debug("SST ERR: RAR API failed\n");
- return retval;
- }
- str_info->decode_ibuf =
- (void *) ((unsigned long) rar_buffers.bus_address);
- pr_debug("RAR buf addr in DnR (input buffer function)0x%lu",
- (unsigned long) str_info->decode_ibuf);
- pr_debug("rar in DnR decode function/output b_add rar =0x%lu",
- (unsigned long) rar_buffers.bus_address);
- *input_index = i + 1;
- str_info->decode_isize = dbufs->ibufs->buff_entry[i].size;
- str_info->decode_ibuf_type = dbufs->ibufs->type;
- *in_copied = str_info->decode_isize;
- }
- return retval;
-}
-#endif
-/*This function is used to prepare the kernel input buffers with contents
-before sending for decode*/
-static int sst_prepare_input_buffers(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *input_index, int *in_copied,
- int *input_index_valid_size, int *new_entry_flag)
-{
- int i, cpy_size, retval = 0;
-
- pr_debug("input_index = %d, input entries = %d\n",
- *input_index, dbufs->ibufs->entries);
- for (i = *input_index; i < dbufs->ibufs->entries; i++) {
-#ifdef CONFIG_MRST_RAR_HANDLER
- retval = sst_prepare_input_buffers_rar(str_info,
- dbufs, input_index, in_copied,
- input_index_valid_size, new_entry_flag);
- if (retval) {
- pr_err("In prepare input buffers for RAR\n");
- return -EIO;
- }
-#endif
- *input_index = i;
- if (*input_index_valid_size == 0)
- *input_index_valid_size =
- dbufs->ibufs->buff_entry[i].size;
- pr_debug("inout addr = %p, size = %d\n",
- dbufs->ibufs->buff_entry[i].buffer,
- *input_index_valid_size);
- pr_debug("decode_isize = %d, in_copied %d\n",
- str_info->decode_isize, *in_copied);
- if (*input_index_valid_size <=
- (str_info->decode_isize - *in_copied))
- cpy_size = *input_index_valid_size;
- else
- cpy_size = str_info->decode_isize - *in_copied;
-
- pr_debug("cpy size = %d\n", cpy_size);
- if (!dbufs->ibufs->buff_entry[i].buffer) {
- pr_err("i/p buffer is null\n");
- return -EINVAL;
- }
- pr_debug("Try copy To %p, From %p, size %d\n",
- str_info->decode_ibuf + *in_copied,
- dbufs->ibufs->buff_entry[i].buffer, cpy_size);
-
- retval =
- copy_from_user((void *)(str_info->decode_ibuf + *in_copied),
- (void *) dbufs->ibufs->buff_entry[i].buffer,
- cpy_size);
- if (retval) {
- pr_err("copy from user failed\n");
- return -EIO;
- }
- *in_copied += cpy_size;
- *input_index_valid_size -= cpy_size;
- pr_debug("in buff size = %d, in_copied = %d\n",
- *input_index_valid_size, *in_copied);
- if (*input_index_valid_size != 0) {
- pr_debug("more input buffers left\n");
- dbufs->ibufs->buff_entry[i].buffer += cpy_size;
- break;
- }
- if (*in_copied == str_info->decode_isize &&
- *input_index_valid_size == 0 &&
- (i+1) <= dbufs->ibufs->entries) {
- pr_debug("all input buffers copied\n");
- *new_entry_flag = true;
- *input_index = i + 1;
- break;
- }
- }
- return retval;
-}
-
-/* This function is used to copy the decoded data from kernel buffers to
-the user output buffers with contents after decode*/
-static int sst_prepare_output_buffers(struct stream_info *str_info,
- struct snd_sst_dbufs *dbufs,
- int *output_index, int output_size,
- int *out_copied)
-
-{
- int i, cpy_size, retval = 0;
- pr_debug("output_index = %d, output entries = %d\n",
- *output_index,
- dbufs->obufs->entries);
- for (i = *output_index; i < dbufs->obufs->entries; i++) {
- *output_index = i;
- pr_debug("output addr = %p, size = %d\n",
- dbufs->obufs->buff_entry[i].buffer,
- dbufs->obufs->buff_entry[i].size);
- pr_debug("output_size = %d, out_copied = %d\n",
- output_size, *out_copied);
- if (dbufs->obufs->buff_entry[i].size <
- (output_size - *out_copied))
- cpy_size = dbufs->obufs->buff_entry[i].size;
- else
- cpy_size = output_size - *out_copied;
- pr_debug("cpy size = %d\n", cpy_size);
- pr_debug("Try copy To: %p, From %p, size %d\n",
- dbufs->obufs->buff_entry[i].buffer,
- sst_drv_ctx->mmap_mem + *out_copied,
- cpy_size);
- retval = copy_to_user(dbufs->obufs->buff_entry[i].buffer,
- sst_drv_ctx->mmap_mem + *out_copied,
- cpy_size);
- if (retval) {
- pr_err("copy to user failed\n");
- return -EIO;
- } else
- pr_debug("copy to user passed\n");
- *out_copied += cpy_size;
- dbufs->obufs->buff_entry[i].size -= cpy_size;
- pr_debug("o/p buff size %d, out_copied %d\n",
- dbufs->obufs->buff_entry[i].size, *out_copied);
- if (dbufs->obufs->buff_entry[i].size != 0) {
- *output_index = i;
- dbufs->obufs->buff_entry[i].buffer += cpy_size;
- break;
- } else if (*out_copied == output_size) {
- *output_index = i + 1;
- break;
- }
- }
- return retval;
-}
-
-/**
- * sst_decode - Send msg for decoding frames
- *
- * @str_id: ID of stream
- * @dbufs: param that holds the user input and output buffers and size
- *
- * This function is called to decode data from the firmware
- */
-int sst_decode(int str_id, struct snd_sst_dbufs *dbufs)
-{
- int retval = 0, i;
- unsigned long long total_input = 0 , total_output = 0;
- unsigned int cum_input_given = 0 , cum_output_given = 0;
- int copy_in_done = false, copy_out_done = false;
- int input_index = 0, output_index = 0;
- int input_index_valid_size = 0;
- int in_copied, out_copied;
- int new_entry_flag;
- u64 output_size;
- struct stream_info *str_info;
- struct snd_sst_decode_info dec_info;
- unsigned long long input_bytes, output_bytes;
-
- sst_drv_ctx->scard_ops->power_down_pmic();
- pr_debug("Powering_down_PMIC...\n");
-
- retval = sst_validate_strid(str_id);
- if (retval)
- return retval;
-
- str_info = &sst_drv_ctx->streams[str_id];
- if (str_info->status != STREAM_INIT) {
- pr_err("invalid stream state = %d\n",
- str_info->status);
- return -EINVAL;
- }
-
- str_info->prev = str_info->status;
- str_info->status = STREAM_DECODE;
-
- for (i = 0; i < dbufs->ibufs->entries; i++)
- cum_input_given += dbufs->ibufs->buff_entry[i].size;
- for (i = 0; i < dbufs->obufs->entries; i++)
- cum_output_given += dbufs->obufs->buff_entry[i].size;
-
- /* input and output buffer allocation */
- retval = sst_allocate_decode_buf(str_info, dbufs,
- cum_input_given, cum_output_given);
- if (retval) {
- pr_err("mem allocation failed, abort!!!\n");
- retval = -ENOMEM;
- goto finish;
- }
-
- str_info->decode_isize = str_info->idecode_alloc;
- str_info->decode_ibuf_type = dbufs->ibufs->type;
- str_info->decode_obuf_type = dbufs->obufs->type;
-
- while ((copy_out_done == false) && (copy_in_done == false)) {
- in_copied = 0;
- new_entry_flag = false;
- retval = sst_prepare_input_buffers(str_info,\
- dbufs, &input_index, &in_copied,
- &input_index_valid_size, &new_entry_flag);
- if (retval) {
- pr_err("prepare in buffers failed\n");
- goto finish;
- }
-
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM)
- str_info->decode_obuf = sst_drv_ctx->mmap_mem;
-
-#ifdef CONFIG_MRST_RAR_HANDLER
- else {
- if (dbufs->obufs->type == SST_BUF_RAR) {
- struct RAR_buffer rar_buffers;
- __u32 info;
-
- pr_debug("DRM");
- retval = copy_from_user((void *) &info,
- dbufs->obufs->
- buff_entry[output_index].buffer,
- sizeof(__u32));
-
- rar_buffers.info.size = dbufs->obufs->
- buff_entry[output_index].size;
- rar_buffers.info.handle = info;
- retval = sst_get_RAR(&rar_buffers, 1);
- if (retval)
- return retval;
-
- str_info->decode_obuf = (void *)((unsigned long)
- rar_buffers.bus_address);
- str_info->decode_osize = dbufs->obufs->
- buff_entry[output_index].size;
- str_info->decode_obuf_type = dbufs->obufs->type;
- pr_debug("DRM handling\n");
- pr_debug("o/p_add=0x%lu Size=0x%x\n",
- (unsigned long) str_info->decode_obuf,
- str_info->decode_osize);
- } else {
- str_info->decode_obuf = sst_drv_ctx->mmap_mem;
- str_info->decode_osize = dbufs->obufs->
- buff_entry[output_index].size;
-
- }
- }
-#endif
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
- if (str_info->decode_isize > in_copied) {
- str_info->decode_isize = in_copied;
- pr_debug("i/p size = %d\n",
- str_info->decode_isize);
- }
- }
-
-
- retval = sst_send_decode_mess(str_id, str_info, &dec_info);
- if (retval || dec_info.input_bytes_consumed == 0) {
- pr_err("SST ERR: mess failed or no input consumed\n");
- goto finish;
- }
- input_bytes = dec_info.input_bytes_consumed;
- output_bytes = dec_info.output_bytes_produced;
-
- pr_debug("in_copied=%d, con=%lld, prod=%lld\n",
- in_copied, input_bytes, output_bytes);
- if (dbufs->obufs->type == SST_BUF_RAR) {
- output_index += 1;
- if (output_index == dbufs->obufs->entries) {
- copy_in_done = true;
- pr_debug("all i/p cpy done\n");
- }
- total_output += output_bytes;
- } else {
- out_copied = 0;
- output_size = output_bytes;
- retval = sst_prepare_output_buffers(str_info, dbufs,
- &output_index, output_size, &out_copied);
- if (retval) {
- pr_err("prep out buff fail\n");
- goto finish;
- }
- if (str_info->ops != STREAM_OPS_PLAYBACK_DRM) {
- if (in_copied != input_bytes) {
- int bytes_left = in_copied -
- input_bytes;
- pr_debug("bytes %d\n",
- bytes_left);
- if (new_entry_flag == true)
- input_index--;
- while (bytes_left) {
- struct snd_sst_buffs *ibufs;
- struct snd_sst_buff_entry
- *buff_entry;
- unsigned int size_sent;
-
- ibufs = dbufs->ibufs;
- buff_entry =
- &ibufs->buff_entry[input_index];
- size_sent = buff_entry->size -\
- input_index_valid_size;
- if (bytes_left == size_sent) {
- bytes_left = 0;
- } else if (bytes_left <
- size_sent) {
- buff_entry->buffer +=
- (size_sent -
- bytes_left);
- buff_entry->size -=
- (size_sent -
- bytes_left);
- bytes_left = 0;
- } else {
- bytes_left -= size_sent;
- input_index--;
- input_index_valid_size =
- 0;
- }
- }
-
- }
- }
-
- total_output += out_copied;
- if (str_info->decode_osize != out_copied) {
- str_info->decode_osize -= out_copied;
- pr_debug("output size modified = %d\n",
- str_info->decode_osize);
- }
- }
- total_input += input_bytes;
-
- if (str_info->ops == STREAM_OPS_PLAYBACK_DRM) {
- if (total_input == cum_input_given)
- copy_in_done = true;
- copy_out_done = true;
-
- } else {
- if (total_output == cum_output_given) {
- copy_out_done = true;
- pr_debug("all o/p cpy done\n");
- }
-
- if (total_input == cum_input_given) {
- copy_in_done = true;
- pr_debug("all i/p cpy done\n");
- }
- }
-
- pr_debug("copy_out = %d, copy_in = %d\n",
- copy_out_done, copy_in_done);
- }
-
-finish:
- dbufs->input_bytes_consumed = total_input;
- dbufs->output_bytes_produced = total_output;
- str_info->status = str_info->prev;
- str_info->prev = STREAM_DECODE;
- kfree(str_info->decode_ibuf);
- str_info->decode_ibuf = NULL;
- return retval;
-}
diff --git a/drivers/staging/intel_sst/intelmid.c b/drivers/staging/intel_sst/intelmid.c
deleted file mode 100644
index 492b660..0000000
--- a/drivers/staging/intel_sst/intelmid.c
+++ /dev/null
@@ -1,1022 +0,0 @@
-/*
- * intelmid.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver for Intel MID sound card chipset
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/interrupt.h>
-#include <linux/sched.h>
-#include <linux/firmware.h>
-#include <linux/input.h>
-#include <sound/control.h>
-#include <asm/mrst.h>
-#include <sound/pcm.h>
-#include <sound/jack.h>
-#include <sound/pcm_params.h>
-#include <sound/initval.h>
-#include <linux/gpio.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intel_sst_fw_ipc.h"
-#include "intel_sst_common.h"
-#include "intelmid_snd_control.h"
-#include "intelmid_adc_control.h"
-#include "intelmid.h"
-
-MODULE_AUTHOR("Vinod Koul <vinod.koul@intel.com>");
-MODULE_AUTHOR("Harsha Priya <priya.harsha@intel.com>");
-MODULE_AUTHOR("Dharageswari R <dharageswari.r@intel.com>");
-MODULE_AUTHOR("KP Jeeja <jeeja.kp@intel.com>");
-MODULE_DESCRIPTION("Intel MAD Sound card driver");
-MODULE_LICENSE("GPL v2");
-MODULE_SUPPORTED_DEVICE("{Intel,Intel_MAD}");
-
-
-static int card_index = SNDRV_DEFAULT_IDX1;/* Index 0-MAX */
-static char *card_id = SNDRV_DEFAULT_STR1; /* ID for this card */
-
-module_param(card_index, int, 0444);
-MODULE_PARM_DESC(card_index, "Index value for INTELMAD soundcard.");
-module_param(card_id, charp, 0444);
-MODULE_PARM_DESC(card_id, "ID string for INTELMAD soundcard.");
-
-int sst_card_vendor_id;
-int intelmid_audio_interrupt_enable;/*checkpatch fix*/
-struct snd_intelmad *intelmad_drv;
-
-#define INFO(_cpu_id, _irq_cache, _size) \
- ((kernel_ulong_t)&(struct snd_intelmad_probe_info) { \
- .cpu_id = (_cpu_id), \
- .irq_cache = (_irq_cache), \
- .size = (_size), \
- })
-/* Data path functionalities */
-static struct snd_pcm_hardware snd_intelmad_stream = {
- .info = (SNDRV_PCM_INFO_INTERLEAVED |
- SNDRV_PCM_INFO_DOUBLE |
- SNDRV_PCM_INFO_PAUSE |
- SNDRV_PCM_INFO_RESUME |
- SNDRV_PCM_INFO_MMAP|
- SNDRV_PCM_INFO_MMAP_VALID |
- SNDRV_PCM_INFO_BLOCK_TRANSFER |
- SNDRV_PCM_INFO_SYNC_START),
- .formats = (SNDRV_PCM_FMTBIT_S16 | SNDRV_PCM_FMTBIT_U16 |
- SNDRV_PCM_FMTBIT_S24 | SNDRV_PCM_FMTBIT_U24 |
- SNDRV_PCM_FMTBIT_S32 | SNDRV_PCM_FMTBIT_U32),
- .rates = (SNDRV_PCM_RATE_8000|
- SNDRV_PCM_RATE_44100 |
- SNDRV_PCM_RATE_48000),
- .rate_min = MIN_RATE,
-
- .rate_max = MAX_RATE,
- .channels_min = MIN_CHANNEL,
- .channels_max = MAX_CHANNEL_AMIC,
- .buffer_bytes_max = MAX_BUFFER,
- .period_bytes_min = MIN_PERIOD_BYTES,
- .period_bytes_max = MAX_PERIOD_BYTES,
- .periods_min = MIN_PERIODS,
- .periods_max = MAX_PERIODS,
- .fifo_size = FIFO_SIZE,
-};
-
-
-/**
- * snd_intelmad_pcm_trigger - stream activities are handled here
- *
- * @substream:substream for which the stream function is called
- * @cmd:the stream commamd that requested from upper layer
- *
- * This function is called whenever an a stream activity is invoked
- */
-static int snd_intelmad_pcm_trigger(struct snd_pcm_substream *substream,
- int cmd)
-{
- int ret_val = 0, str_id;
- struct snd_intelmad *intelmaddata;
- struct mad_stream_pvt *stream;
- struct intel_sst_pcm_control *sst_ops;
-
- WARN_ON(!substream);
-
- intelmaddata = snd_pcm_substream_chip(substream);
- stream = substream->runtime->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
- WARN_ON(!intelmaddata->sstdrv_ops->scard_ops);
- sst_ops = intelmaddata->sstdrv_ops->pcm_control;
- str_id = stream->stream_info.str_id;
-
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- pr_debug("Trigger Start\n");
- ret_val = sst_ops->device_control(SST_SND_START, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = RUNNING;
- stream->substream = substream;
- break;
- case SNDRV_PCM_TRIGGER_STOP:
- pr_debug("in stop\n");
- ret_val = sst_ops->device_control(SST_SND_DROP, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = DROPPED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- pr_debug("in pause\n");
- ret_val = sst_ops->device_control(SST_SND_PAUSE, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = PAUSED;
- break;
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- pr_debug("in pause release\n");
- ret_val = sst_ops->device_control(SST_SND_RESUME, &str_id);
- if (ret_val)
- return ret_val;
- stream->stream_status = RUNNING;
- break;
- default:
- return -EINVAL;
- }
- return ret_val;
-}
-
-/**
-* snd_intelmad_pcm_prepare- internal preparation before starting a stream
-*
-* @substream: substream for which the function is called
-*
-* This function is called when a stream is started for internal preparation.
-*/
-static int snd_intelmad_pcm_prepare(struct snd_pcm_substream *substream)
-{
- struct mad_stream_pvt *stream;
- int ret_val = 0;
- struct snd_intelmad *intelmaddata;
-
- pr_debug("pcm_prepare called\n");
-
- WARN_ON(!substream);
- stream = substream->runtime->private_data;
- intelmaddata = snd_pcm_substream_chip(substream);
- pr_debug("pb cnt = %d cap cnt = %d\n",\
- intelmaddata->playback_cnt,
- intelmaddata->capture_cnt);
-
- if (stream->stream_info.str_id) {
- pr_debug("Prepare called for already set stream\n");
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_DROP, &stream->stream_info.str_id);
- return ret_val;
- }
-
- ret_val = snd_intelmad_alloc_stream(substream);
- if (ret_val < 0)
- return ret_val;
- stream->dbg_cum_bytes = 0;
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- intelmaddata->playback_cnt++;
- else
- intelmaddata->capture_cnt++;
- /* return back the stream id */
- snprintf(substream->pcm->id, sizeof(substream->pcm->id),
- "%d", stream->stream_info.str_id);
- pr_debug("stream id to user = %s\n",
- substream->pcm->id);
-
- ret_val = snd_intelmad_init_stream(substream);
- if (ret_val)
- return ret_val;
- substream->runtime->hw.info = SNDRV_PCM_INFO_BLOCK_TRANSFER;
- return ret_val;
-}
-
-static int snd_intelmad_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *hw_params)
-{
- int ret_val;
-
- pr_debug("snd_intelmad_hw_params called\n");
- ret_val = snd_pcm_lib_malloc_pages(substream,
- params_buffer_bytes(hw_params));
- memset(substream->runtime->dma_area, 0,
- params_buffer_bytes(hw_params));
-
- return ret_val;
-}
-
-static int snd_intelmad_hw_free(struct snd_pcm_substream *substream)
-{
- pr_debug("snd_intelmad_hw_free called\n");
- return snd_pcm_lib_free_pages(substream);
-}
-
-/**
- * snd_intelmad_pcm_pointer- to send the current buffer pointer processed by hw
- *
- * @substream: substream for which the function is called
- *
- * This function is called by ALSA framework to get the current hw buffer ptr
- * when a period is elapsed
- */
-static snd_pcm_uframes_t snd_intelmad_pcm_pointer
- (struct snd_pcm_substream *substream)
-{
- /* struct snd_pcm_runtime *runtime = substream->runtime; */
- struct mad_stream_pvt *stream;
- struct snd_intelmad *intelmaddata;
- int ret_val;
-
- WARN_ON(!substream);
-
- intelmaddata = snd_pcm_substream_chip(substream);
- stream = substream->runtime->private_data;
- if (stream->stream_status == INIT)
- return 0;
-
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_BUFFER_POINTER, &stream->stream_info);
- if (ret_val) {
- pr_err("error code = 0x%x\n", ret_val);
- return ret_val;
- }
- pr_debug("samples reported out 0x%llx\n",
- stream->stream_info.buffer_ptr);
- pr_debug("Frame bits:: %d period_count :: %d\n",
- (int)substream->runtime->frame_bits,
- (int)substream->runtime->period_size);
-
- return stream->stream_info.buffer_ptr;
-
-}
-
-/**
- * snd_intelmad_close- to free parameteres when stream is stopped
- *
- * @substream: substream for which the function is called
- *
- * This function is called by ALSA framework when stream is stopped
- */
-static int snd_intelmad_close(struct snd_pcm_substream *substream)
-{
- struct snd_intelmad *intelmaddata;
- struct mad_stream_pvt *stream;
- int ret_val = 0, str_id;
-
- WARN_ON(!substream);
-
- stream = substream->runtime->private_data;
- str_id = stream->stream_info.str_id;
-
- pr_debug("sst: snd_intelmad_close called for %d\n", str_id);
- intelmaddata = snd_pcm_substream_chip(substream);
-
- pr_debug("str id = %d\n", stream->stream_info.str_id);
- if (stream->stream_info.str_id) {
- /* SST API to actually stop/free the stream */
- ret_val = intelmaddata->sstdrv_ops->pcm_control->close(str_id);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- intelmaddata->playback_cnt--;
- else
- intelmaddata->capture_cnt--;
- }
- pr_debug("snd_intelmad_close : pb cnt = %d cap cnt = %d\n",
- intelmaddata->playback_cnt, intelmaddata->capture_cnt);
- kfree(substream->runtime->private_data);
- return ret_val;
-}
-
-/**
- * snd_intelmad_open- to set runtime parameters during stream start
- *
- * @substream: substream for which the function is called
- * @type: audio device type
- *
- * This function is called by ALSA framework when stream is started
- */
-static int snd_intelmad_open(struct snd_pcm_substream *substream,
- enum snd_sst_audio_device_type type)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pcm_runtime *runtime;
- struct mad_stream_pvt *stream;
-
- WARN_ON(!substream);
-
- pr_debug("snd_intelmad_open called\n");
-
- intelmaddata = snd_pcm_substream_chip(substream);
- runtime = substream->runtime;
- /* set the runtime hw parameter with local snd_pcm_hardware struct */
- runtime->hw = snd_intelmad_stream;
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- /*
- * MRST firmware currently denies stereo recording requests.
- */
- if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
- runtime->hw.formats = (SNDRV_PCM_FMTBIT_S16 |
- SNDRV_PCM_FMTBIT_U16);
- runtime->hw.channels_max = 1;
- }
- }
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- runtime->hw = snd_intelmad_stream;
- runtime->hw.rates = SNDRV_PCM_RATE_48000;
- runtime->hw.rate_min = MAX_RATE;
- runtime->hw.formats = (SNDRV_PCM_FMTBIT_S24 |
- SNDRV_PCM_FMTBIT_U24);
- if (intelmaddata->sstdrv_ops->scard_ops->input_dev_id == AMIC)
- runtime->hw.channels_max = MAX_CHANNEL_AMIC;
- else
- runtime->hw.channels_max = MAX_CHANNEL_DMIC;
-
- }
- /* setup the internal datastruture stream pointers based on it being
- playback or capture stream */
- stream = kzalloc(sizeof(*stream), GFP_KERNEL);
- if (!stream)
- return -ENOMEM;
- stream->stream_info.str_id = 0;
- stream->device = type;
- stream->stream_status = INIT;
- runtime->private_data = stream;
- return snd_pcm_hw_constraint_integer(runtime,
- SNDRV_PCM_HW_PARAM_PERIODS);
-}
-
-static int snd_intelmad_headset_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_HEADSET);
-}
-
-static int snd_intelmad_ihf_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_IHF);
-}
-
-static int snd_intelmad_vibra_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_VIBRA);
-}
-
-static int snd_intelmad_haptic_open(struct snd_pcm_substream *substream)
-{
- return snd_intelmad_open(substream, SND_SST_DEVICE_HAPTIC);
-}
-
-static struct snd_pcm_ops snd_intelmad_headset_ops = {
- .open = snd_intelmad_headset_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_ihf_ops = {
- .open = snd_intelmad_ihf_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_vibra_ops = {
- .open = snd_intelmad_vibra_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_haptic_ops = {
- .open = snd_intelmad_haptic_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-static struct snd_pcm_ops snd_intelmad_capture_ops = {
- .open = snd_intelmad_headset_open,
- .close = snd_intelmad_close,
- .ioctl = snd_pcm_lib_ioctl,
- .hw_params = snd_intelmad_hw_params,
- .hw_free = snd_intelmad_hw_free,
- .prepare = snd_intelmad_pcm_prepare,
- .trigger = snd_intelmad_pcm_trigger,
- .pointer = snd_intelmad_pcm_pointer,
-};
-
-int intelmad_get_mic_bias(void)
-{
- struct snd_pmic_ops *pmic_ops;
-
- if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
- return -ENODEV;
- pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
- if (pmic_ops && pmic_ops->pmic_get_mic_bias)
- return pmic_ops->pmic_get_mic_bias(intelmad_drv);
- else
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(intelmad_get_mic_bias);
-
-int intelmad_set_headset_state(int state)
-{
- struct snd_pmic_ops *pmic_ops;
-
- if (!intelmad_drv || !intelmad_drv->sstdrv_ops)
- return -ENODEV;
- pmic_ops = intelmad_drv->sstdrv_ops->scard_ops;
- if (pmic_ops && pmic_ops->pmic_set_headset_state)
- return pmic_ops->pmic_set_headset_state(state);
- else
- return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(intelmad_set_headset_state);
-
-void sst_process_mad_jack_detection(struct work_struct *work)
-{
- u8 interrupt_status;
- struct mad_jack_msg_wq *mad_jack_detect =
- container_of(work, struct mad_jack_msg_wq, wq);
-
- struct snd_intelmad *intelmaddata =
- mad_jack_detect->intelmaddata;
-
- if (!intelmaddata)
- return;
-
- interrupt_status = mad_jack_detect->intsts;
- if (intelmaddata->sstdrv_ops && intelmaddata->sstdrv_ops->scard_ops
- && intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb) {
- intelmaddata->sstdrv_ops->scard_ops->pmic_irq_cb(
- (void *)intelmaddata, interrupt_status);
- intelmaddata->sstdrv_ops->scard_ops->pmic_jack_enable();
- }
- kfree(mad_jack_detect);
-}
-/**
- * snd_intelmad_intr_handler- interrupt handler
- *
- * @irq : irq number of the interrupt received
- * @dev: device context
- *
- * This function is called when an interrupt is raised at the sound card
- */
-static irqreturn_t snd_intelmad_intr_handler(int irq, void *dev)
-{
- struct snd_intelmad *intelmaddata =
- (struct snd_intelmad *)dev;
- u8 interrupt_status;
- struct mad_jack_msg_wq *mad_jack_msg;
- memcpy_fromio(&interrupt_status,
- ((void *)(intelmaddata->int_base)),
- sizeof(u8));
-
- mad_jack_msg = kzalloc(sizeof(*mad_jack_msg), GFP_ATOMIC);
- mad_jack_msg->intsts = interrupt_status;
- mad_jack_msg->intelmaddata = intelmaddata;
- INIT_WORK(&mad_jack_msg->wq, sst_process_mad_jack_detection);
- queue_work(intelmaddata->mad_jack_wq, &mad_jack_msg->wq);
-
- return IRQ_HANDLED;
-}
-
-void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent , int status)
-{
-
- if (!jack) {
- pr_debug("MAD error jack empty\n");
-
- } else {
- snd_jack_report(jack, status);
- /* button pressed and released */
- if (buttonpressevent)
- snd_jack_report(jack, 0);
- pr_debug("MAD sending jack report Done !!!\n");
- }
-}
-
-static int __devinit snd_intelmad_register_irq(
- struct snd_intelmad *intelmaddata, unsigned int regbase,
- unsigned int regsize)
-{
- int ret_val;
- char *drv_name;
-
- pr_debug("irq reg regbase 0x%x, regsize 0x%x\n",
- regbase, regsize);
- intelmaddata->int_base = ioremap_nocache(regbase, regsize);
- if (!intelmaddata->int_base)
- pr_err("Mapping of cache failed\n");
- pr_debug("irq = 0x%x\n", intelmaddata->irq);
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
- drv_name = DRIVER_NAME_MFLD;
- else
- drv_name = DRIVER_NAME_MRST;
- ret_val = request_irq(intelmaddata->irq,
- snd_intelmad_intr_handler,
- IRQF_SHARED, drv_name,
- intelmaddata);
- if (ret_val)
- pr_err("cannot register IRQ\n");
- return ret_val;
-}
-
-static int __devinit snd_intelmad_sst_register(
- struct snd_intelmad *intelmaddata)
-{
- int ret_val = 0;
- struct snd_pmic_ops *intelmad_vendor_ops[MAX_VENDORS] = {
- &snd_pmic_ops_fs,
- &snd_pmic_ops_mx,
- &snd_pmic_ops_nc,
- &snd_msic_ops
- };
-
- struct sc_reg_access vendor_addr = {0x00, 0x00, 0x00};
-
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- ret_val = sst_sc_reg_access(&vendor_addr, PMIC_READ, 1);
- if (ret_val)
- return ret_val;
- sst_card_vendor_id = (vendor_addr.value & (MASK2|MASK1|MASK0));
- pr_debug("original n extrated vendor id = 0x%x %d\n",
- vendor_addr.value, sst_card_vendor_id);
- if (sst_card_vendor_id < 0 || sst_card_vendor_id > 2) {
- pr_err("vendor card not supported!!\n");
- return -EIO;
- }
- } else
- sst_card_vendor_id = 0x3;
-
- intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
- intelmaddata->sstdrv_ops->vendor_id = sst_card_vendor_id;
- BUG_ON(!intelmad_vendor_ops[sst_card_vendor_id]);
- intelmaddata->sstdrv_ops->scard_ops =
- intelmad_vendor_ops[sst_card_vendor_id];
-
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- intelmaddata->sstdrv_ops->scard_ops->pb_on = 0;
- intelmaddata->sstdrv_ops->scard_ops->cap_on = 0;
- intelmaddata->sstdrv_ops->scard_ops->input_dev_id = DMIC;
- intelmaddata->sstdrv_ops->scard_ops->output_dev_id =
- STEREO_HEADPHONE;
- intelmaddata->sstdrv_ops->scard_ops->lineout_dev_id = NONE;
- }
-
- /* registering with SST driver to get access to SST APIs to use */
- ret_val = register_sst_card(intelmaddata->sstdrv_ops);
- if (ret_val) {
- pr_err("sst card registration failed\n");
- return ret_val;
- }
- sst_card_vendor_id = intelmaddata->sstdrv_ops->vendor_id;
- intelmaddata->pmic_status = PMIC_UNINIT;
- return ret_val;
-}
-
-static void snd_intelmad_page_free(struct snd_pcm *pcm)
-{
- snd_pcm_lib_preallocate_free_for_all(pcm);
-}
-/* Driver Init/exit functionalities */
-/**
- * snd_intelmad_pcm_new - to setup pcm for the card
- *
- * @card: pointer to the sound card structure
- * @intelmaddata: pointer to internal context
- * @pb: playback count for this card
- * @cap: capture count for this card
- * @index: device index
- *
- * This function is called from probe function to set up pcm params
- * and functions
- */
-static int __devinit snd_intelmad_pcm_new(struct snd_card *card,
- struct snd_intelmad *intelmaddata,
- unsigned int pb, unsigned int cap, unsigned int index)
-{
- int ret_val = 0;
- struct snd_pcm *pcm;
- char name[32] = INTEL_MAD;
- struct snd_pcm_ops *pb_ops = NULL, *cap_ops = NULL;
-
- pr_debug("called for pb %d, cp %d, idx %d\n", pb, cap, index);
- ret_val = snd_pcm_new(card, name, index, pb, cap, &pcm);
- if (ret_val)
- return ret_val;
- /* setup the ops for playback and capture streams */
- switch (index) {
- case 0:
- pb_ops = &snd_intelmad_headset_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 1:
- pb_ops = &snd_intelmad_ihf_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 2:
- pb_ops = &snd_intelmad_vibra_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- case 3:
- pb_ops = &snd_intelmad_haptic_ops;
- cap_ops = &snd_intelmad_capture_ops;
- break;
- }
- if (pb)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, pb_ops);
- if (cap)
- snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, cap_ops);
- /* setup private data which can be retrieved when required */
- pcm->private_data = intelmaddata;
- pcm->private_free = snd_intelmad_page_free;
- pcm->info_flags = 0;
- strncpy(pcm->name, card->shortname, strlen(card->shortname));
- /* allocate dma pages for ALSA stream operations */
- snd_pcm_lib_preallocate_pages_for_all(pcm,
- SNDRV_DMA_TYPE_CONTINUOUS,
- snd_dma_continuous_data(GFP_KERNEL),
- MIN_BUFFER, MAX_BUFFER);
- return ret_val;
-}
-
-static int __devinit snd_intelmad_pcm(struct snd_card *card,
- struct snd_intelmad *intelmaddata)
-{
- int ret_val = 0;
-
- WARN_ON(!card);
- WARN_ON(!intelmaddata);
- pr_debug("snd_intelmad_pcm called\n");
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 1, 0);
- if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT)
- return ret_val;
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 1);
- if (ret_val)
- return ret_val;
- ret_val = snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 2);
- if (ret_val)
- return ret_val;
- return snd_intelmad_pcm_new(card, intelmaddata, 1, 0, 3);
-}
-
-/**
- * snd_intelmad_jack- to setup jack settings of the card
- *
- * @intelmaddata: pointer to internal context
- *
- * This function is called send jack events
- */
-static int snd_intelmad_jack(struct snd_intelmad *intelmaddata)
-{
- struct snd_jack *jack;
- int retval;
-
- pr_debug("snd_intelmad_jack called\n");
- jack = &intelmaddata->jack[0].jack;
- snd_jack_set_key(jack, SND_JACK_BTN_0, KEY_PHONE);
- retval = snd_jack_new(intelmaddata->card, "Intel(R) MID Audio Jack",
- SND_JACK_HEADPHONE | SND_JACK_HEADSET |
- SW_JACK_PHYSICAL_INSERT | SND_JACK_BTN_0
- | SND_JACK_BTN_1, &jack);
- pr_debug("snd_intelmad_jack called\n");
- if (retval < 0)
- return retval;
- snd_jack_report(jack, 0);
-
- jack->private_data = jack;
- intelmaddata->jack[0].jack = *jack;
-
- return retval;
-}
-
-/**
- * snd_intelmad_mixer- to setup mixer settings of the card
- *
- * @intelmaddata: pointer to internal context
- *
- * This function is called from probe function to set up mixer controls
- */
-static int __devinit snd_intelmad_mixer(struct snd_intelmad *intelmaddata)
-{
- struct snd_card *card;
- unsigned int idx;
- int ret_val = 0, max_controls = 0;
- char *mixername = "IntelMAD Controls";
- struct snd_kcontrol_new *controls;
-
- WARN_ON(!intelmaddata);
-
- card = intelmaddata->card;
- strncpy(card->mixername, mixername, sizeof(card->mixername)-1);
- /* add all widget controls and expose the same */
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- max_controls = MAX_CTRL_MFLD;
- controls = snd_intelmad_controls_mfld;
- } else {
- max_controls = MAX_CTRL_MRST;
- controls = snd_intelmad_controls_mrst;
- }
- for (idx = 0; idx < max_controls; idx++) {
- ret_val = snd_ctl_add(card,
- snd_ctl_new1(&controls[idx],
- intelmaddata));
- pr_debug("mixer[idx]=%d added\n", idx);
- if (ret_val) {
- pr_err("in adding of control index = %d\n", idx);
- break;
- }
- }
- return ret_val;
-}
-
-static int snd_intelmad_dev_free(struct snd_device *device)
-{
- struct snd_intelmad *intelmaddata;
-
- WARN_ON(!device);
-
- intelmaddata = device->device_data;
-
- pr_debug("snd_intelmad_dev_free called\n");
- unregister_sst_card(intelmaddata->sstdrv_ops);
-
- /* free allocated memory for internal context */
- destroy_workqueue(intelmaddata->mad_jack_wq);
- device->device_data = NULL;
- kfree(intelmaddata->sstdrv_ops);
- kfree(intelmaddata);
-
- return 0;
-}
-
-static int __devinit snd_intelmad_create(
- struct snd_intelmad *intelmaddata,
- struct snd_card *card)
-{
- int ret_val;
- static struct snd_device_ops ops = {
- .dev_free = snd_intelmad_dev_free,
- };
-
- WARN_ON(!intelmaddata);
- WARN_ON(!card);
- /* ALSA api to register for the device */
- ret_val = snd_device_new(card, SNDRV_DEV_LOWLEVEL, intelmaddata, &ops);
- return ret_val;
-}
-
-/**
-* snd_intelmad_probe- function registred for init
-* @pdev : pointer to the device struture
-* This function is called when the device is initialized
-*/
-int __devinit snd_intelmad_probe(struct platform_device *pdev)
-{
- struct snd_card *card;
- int ret_val;
- struct snd_intelmad *intelmaddata;
- const struct platform_device_id *id = platform_get_device_id(pdev);
- struct snd_intelmad_probe_info *info = (void *)id->driver_data;
-
- pr_debug("probe for %s cpu_id %d\n", pdev->name, info->cpu_id);
- pr_debug("rq_chache %x of size %x\n", info->irq_cache, info->size);
- if (!strcmp(pdev->name, DRIVER_NAME_MRST))
- pr_debug("detected MRST\n");
- else if (!strcmp(pdev->name, DRIVER_NAME_MFLD))
- pr_debug("detected MFLD\n");
- else {
- pr_err("detected unknown device abort!!\n");
- return -EIO;
- }
- if ((info->cpu_id < CPU_CHIP_LINCROFT) ||
- (info->cpu_id > CPU_CHIP_PENWELL)) {
- pr_err("detected unknown cpu_id abort!!\n");
- return -EIO;
- }
- /* allocate memory for saving internal context and working */
- intelmaddata = kzalloc(sizeof(*intelmaddata), GFP_KERNEL);
- if (!intelmaddata) {
- pr_debug("mem alloctn fail\n");
- return -ENOMEM;
- }
- intelmad_drv = intelmaddata;
-
- /* allocate memory for LPE API set */
- intelmaddata->sstdrv_ops = kzalloc(sizeof(struct intel_sst_card_ops),
- GFP_KERNEL);
- if (!intelmaddata->sstdrv_ops) {
- pr_err("mem allocation for ops fail\n");
- kfree(intelmaddata);
- return -ENOMEM;
- }
-
- intelmaddata->cpu_id = info->cpu_id;
- /* create a card instance with ALSA framework */
- ret_val = snd_card_create(card_index, card_id, THIS_MODULE, 0, &card);
- if (ret_val) {
- pr_err("snd_card_create fail\n");
- goto free_allocs;
- }
-
- intelmaddata->pdev = pdev;
- intelmaddata->irq = platform_get_irq(pdev, 0);
- platform_set_drvdata(pdev, intelmaddata);
- intelmaddata->card = card;
- intelmaddata->card_id = card_id;
- intelmaddata->card_index = card_index;
- intelmaddata->master_mute = UNMUTE;
- intelmaddata->playback_cnt = intelmaddata->capture_cnt = 0;
- strncpy(card->driver, INTEL_MAD, strlen(INTEL_MAD));
- strncpy(card->shortname, INTEL_MAD, strlen(INTEL_MAD));
-
- intelmaddata->sstdrv_ops->module_name = SST_CARD_NAMES;
- /* registering with LPE driver to get access to SST APIs to use */
- ret_val = snd_intelmad_sst_register(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_sst_register failed\n");
- goto set_null_data;
- }
-
- intelmaddata->pmic_status = PMIC_INIT;
-
- ret_val = snd_intelmad_pcm(card, intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_pcm failed\n");
- goto free_sst;
- }
-
- ret_val = snd_intelmad_mixer(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_mixer failed\n");
- goto free_card;
- }
-
- ret_val = snd_intelmad_jack(intelmaddata);
- if (ret_val) {
- pr_err("snd_intelmad_jack failed\n");
- goto free_card;
- }
- intelmaddata->adc_address = mid_initialize_adc();
-
- /*create work queue for jack interrupt*/
- INIT_WORK(&intelmaddata->mad_jack_msg.wq,
- sst_process_mad_jack_detection);
-
- intelmaddata->mad_jack_wq = create_workqueue("sst_mad_jack_wq");
- if (!intelmaddata->mad_jack_wq)
- goto free_card;
-
- ret_val = snd_intelmad_register_irq(intelmaddata,
- info->irq_cache, info->size);
- if (ret_val) {
- pr_err("snd_intelmad_register_irq fail\n");
- goto free_mad_jack_wq;
- }
-
- /* internal function call to register device with ALSA */
- ret_val = snd_intelmad_create(intelmaddata, card);
- if (ret_val) {
- pr_err("snd_intelmad_create failed\n");
- goto set_pvt_data;
- }
- card->private_data = &intelmaddata;
- snd_card_set_dev(card, &pdev->dev);
- ret_val = snd_card_register(card);
- if (ret_val) {
- pr_err("snd_card_register failed\n");
- goto set_pvt_data;
- }
- if (pdev->dev.platform_data) {
- int gpio_amp = *(int *)pdev->dev.platform_data;
- if (gpio_request_one(gpio_amp, GPIOF_OUT_INIT_LOW, "amp power"))
- gpio_amp = 0;
- intelmaddata->sstdrv_ops->scard_ops->gpio_amp = gpio_amp;
- }
-
- pr_debug("snd_intelmad_probe complete\n");
- return ret_val;
-
-set_pvt_data:
- card->private_data = NULL;
-free_mad_jack_wq:
- destroy_workqueue(intelmaddata->mad_jack_wq);
-free_card:
- snd_card_free(intelmaddata->card);
-free_sst:
- unregister_sst_card(intelmaddata->sstdrv_ops);
-set_null_data:
- platform_set_drvdata(pdev, NULL);
-free_allocs:
- pr_err("probe failed\n");
- snd_card_free(card);
- kfree(intelmaddata->sstdrv_ops);
- kfree(intelmaddata);
- return ret_val;
-}
-
-
-static int snd_intelmad_remove(struct platform_device *pdev)
-{
- struct snd_intelmad *intelmaddata = platform_get_drvdata(pdev);
-
- if (intelmaddata) {
- if (intelmaddata->sstdrv_ops->scard_ops->gpio_amp)
- gpio_free(intelmaddata->sstdrv_ops->scard_ops->gpio_amp);
- free_irq(intelmaddata->irq, intelmaddata);
- snd_card_free(intelmaddata->card);
- }
- intelmad_drv = NULL;
- platform_set_drvdata(pdev, NULL);
- return 0;
-}
-
-/*********************************************************************
- * Driver initialization and exit
- *********************************************************************/
-static const struct platform_device_id snd_intelmad_ids[] = {
- {DRIVER_NAME_MRST, INFO(CPU_CHIP_LINCROFT, AUDINT_BASE, 1)},
- {DRIVER_NAME_MFLD, INFO(CPU_CHIP_PENWELL, 0xFFFF7FCD, 1)},
- {"", 0},
-
-};
-
-static struct platform_driver snd_intelmad_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = "intel_mid_sound_card",
- },
- .id_table = snd_intelmad_ids,
- .probe = snd_intelmad_probe,
- .remove = __devexit_p(snd_intelmad_remove),
-};
-
-/*
- * alsa_card_intelmad_init- driver init function
- *
- * This function is called when driver module is inserted
- */
-static int __init alsa_card_intelmad_init(void)
-{
- pr_debug("mad_init called\n");
- return platform_driver_register(&snd_intelmad_driver);
-}
-
-/**
- * alsa_card_intelmad_exit- driver exit function
- *
- * This function is called when driver module is removed
- */
-static void __exit alsa_card_intelmad_exit(void)
-{
- pr_debug("mad_exit called\n");
- return platform_driver_unregister(&snd_intelmad_driver);
-}
-
-module_init(alsa_card_intelmad_init)
-module_exit(alsa_card_intelmad_exit)
-
diff --git a/drivers/staging/intel_sst/intelmid.h b/drivers/staging/intel_sst/intelmid.h
deleted file mode 100644
index 14a7ba0..0000000
--- a/drivers/staging/intel_sst/intelmid.h
+++ /dev/null
@@ -1,209 +0,0 @@
-/*
- * intelmid.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver header for Intel MAD chipset
- */
-#ifndef __INTELMID_H
-#define __INTELMID_H
-
-#include <linux/time.h>
-#include <sound/jack.h>
-
-#define DRIVER_NAME_MFLD "msic_audio"
-#define DRIVER_NAME_MRST "pmic_audio"
-#define DRIVER_NAME "intelmid_audio"
-#define PMIC_SOUND_IRQ_TYPE_MASK (1 << 15)
-#define AUDINT_BASE (0xFFFFEFF8 + (6 * sizeof(u8)))
-#define REG_IRQ
-/* values #defined */
-/* will differ for different hw - to be taken from config */
-#define MAX_DEVICES 1
-#define MIN_RATE 8000
-#define MAX_RATE 48000
-#define MAX_BUFFER (800*1024) /* for PCM */
-#define MIN_BUFFER (800*1024)
-#define MAX_PERIODS (1024*2)
-#define MIN_PERIODS 2
-#define MAX_PERIOD_BYTES MAX_BUFFER
-#define MIN_PERIOD_BYTES 32
-/*#define MIN_PERIOD_BYTES 160*/
-#define MAX_MUTE 1
-#define MIN_MUTE 0
-#define MONO_CNTL 1
-#define STEREO_CNTL 2
-#define MIN_CHANNEL 1
-#define MAX_CHANNEL_AMIC 2
-#define MAX_CHANNEL_DMIC 5
-#define FIFO_SIZE 0 /* fifo not being used */
-#define INTEL_MAD "Intel MAD"
-#define MAX_CTRL_MRST 8
-#define MAX_CTRL_MFLD 7
-#define MAX_CTRL 8
-#define MAX_VENDORS 4
-/* TODO +6 db */
-#define MAX_VOL 64
-/* TODO -57 db */
-#define MIN_VOL 0
-#define PLAYBACK_COUNT 1
-#define CAPTURE_COUNT 1
-#define ADC_ONE_LSB_MULTIPLIER 2346
-
-#define MID_JACK_HS_LONG_PRESS SND_JACK_BTN_0
-#define MID_JACK_HS_SHORT_PRESS SND_JACK_BTN_1
-
-extern int sst_card_vendor_id;
-
-struct mad_jack {
- struct snd_jack jack;
- int jack_status;
- int jack_dev_state;
- struct timeval buttonpressed;
- struct timeval buttonreleased;
-};
-
-struct mad_jack_msg_wq {
- u8 intsts;
- struct snd_intelmad *intelmaddata;
- struct work_struct wq;
-
-};
-
-struct snd_intelmad_probe_info {
- unsigned int cpu_id;
- unsigned int irq_cache;
- unsigned int size;
-};
-
-/**
- * struct snd_intelmad - intelmad driver structure
- *
- * @card: ptr to the card details
- * @card_index: sound card index
- * @card_id: sound card id detected
- * @sstdrv_ops: ptr to sst driver ops
- * @pdev: ptr to platform device
- * @irq: interrupt number detected
- * @pmic_status: Device status of sound card
- * @int_base: ptr to MMIO interrupt region
- * @output_sel: device selected as o/p
- * @input_sel: device selected as i/p
- * @master_mute: master mute status
- * @jack: jack status
- * @playback_cnt: active pb streams
- * @capture_cnt: active cp streams
- * @mad_jack_msg: wq struct for jack interrupt processing
- * @mad_jack_wq: wq for jack interrupt processing
- * @jack_prev_state: Previos state of jack detected
- * @cpu_id: current cpu id loaded for
- */
-struct snd_intelmad {
- struct snd_card *card; /* ptr to the card details */
- int card_index;/* card index */
- char *card_id; /* card id */
- struct intel_sst_card_ops *sstdrv_ops;/* ptr to sst driver ops */
- struct platform_device *pdev;
- int irq;
- int pmic_status;
- void __iomem *int_base;
- int output_sel;
- int input_sel;
- int lineout_sel;
- int master_mute;
- struct mad_jack jack[4];
- int playback_cnt;
- int capture_cnt;
- u16 adc_address;
- struct mad_jack_msg_wq mad_jack_msg;
- struct workqueue_struct *mad_jack_wq;
- u8 jack_prev_state;
- unsigned int cpu_id;
-};
-
-struct snd_control_val {
- int playback_vol_max;
- int playback_vol_min;
- int capture_vol_max;
- int capture_vol_min;
- int master_vol_max;
- int master_vol_min;
-};
-
-struct mad_stream_pvt {
- int stream_status;
- int stream_ops;
- struct snd_pcm_substream *substream;
- struct pcm_stream_info stream_info;
- ssize_t dbg_cum_bytes;
- enum snd_sst_device_type device;
-};
-
-enum mad_drv_status {
- INIT = 1,
- STARTED,
- RUNNING,
- PAUSED,
- DROPPED,
-};
-
-enum mad_pmic_status {
- PMIC_UNINIT = 1,
- PMIC_INIT,
-};
-enum _widget_ctrl {
- OUTPUT_SEL = 1,
- INPUT_SEL,
- PLAYBACK_VOL,
- PLAYBACK_MUTE,
- CAPTURE_VOL,
- CAPTURE_MUTE,
- MASTER_VOL,
- MASTER_MUTE
-};
-enum _widget_ctrl_mfld {
- LINEOUT_SEL_MFLD = 3,
-};
-enum hw_chs {
- HW_CH0 = 0,
- HW_CH1,
- HW_CH2,
- HW_CH3
-};
-
-void period_elapsed(void *mad_substream);
-int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream);
-int snd_intelmad_init_stream(struct snd_pcm_substream *substream);
-
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val);
-#define CPU_CHIP_LINCROFT 1 /* System running lincroft */
-#define CPU_CHIP_PENWELL 2 /* System running penwell */
-
-extern struct snd_control_val intelmad_ctrl_val[];
-extern struct snd_kcontrol_new snd_intelmad_controls_mrst[];
-extern struct snd_kcontrol_new snd_intelmad_controls_mfld[];
-extern struct snd_pmic_ops *intelmad_vendor_ops[];
-void sst_mad_send_jack_report(struct snd_jack *jack,
- int buttonpressevent , int status);
-
-#endif /* __INTELMID_H */
diff --git a/drivers/staging/intel_sst/intelmid_adc_control.h b/drivers/staging/intel_sst/intelmid_adc_control.h
deleted file mode 100644
index 65d5c39..0000000
--- a/drivers/staging/intel_sst/intelmid_adc_control.h
+++ /dev/null
@@ -1,193 +0,0 @@
-#ifndef __INTELMID_ADC_CONTROL_H__
-#define __INTELMID_ADC_CONTROL_H_
-/*
- * intelmid_adc_control.h - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: R Durgadadoss <r.durgadoss@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * Common private ADC declarations for SST
- */
-
-
-#define MSIC_ADC1CNTL1 0x1C0
-#define MSIC_ADC_ENBL 0x10
-#define MSIC_ADC_START 0x08
-
-#define MSIC_ADC1CNTL3 0x1C2
-#define MSIC_ADCTHERM_ENBL 0x04
-#define MSIC_ADCRRDATA_ENBL 0x05
-
-#define MSIC_STOPBIT_MASK 16
-#define MSIC_ADCTHERM_MASK 4
-
-#define ADC_CHANLS_MAX 15 /* Number of ADC channels */
-#define ADC_LOOP_MAX (ADC_CHANLS_MAX - 1)
-
-/* ADC channel code values */
-#define AUDIO_DETECT_CODE 0x06
-
-/* ADC base addresses */
-#define ADC_CHNL_START_ADDR 0x1C5 /* increments by 1 */
-#define ADC_DATA_START_ADDR 0x1D4 /* increments by 2 */
-
-
-/**
- * configure_adc - enables/disables the ADC for conversion
- * @val: zero: disables the ADC non-zero:enables the ADC
- *
- * Enable/Disable the ADC depending on the argument
- *
- * Can sleep
- */
-static inline int configure_adc(int val)
-{
- int ret;
- struct sc_reg_access sc_access = {0,};
-
-
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if (val)
- /* Enable and start the ADC */
- sc_access.value |= (MSIC_ADC_ENBL | MSIC_ADC_START);
- else
- /* Just stop the ADC */
- sc_access.value &= (~MSIC_ADC_START);
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- return sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
-}
-
-/**
- * reset_stopbit - sets the stop bit to 0 on the given channel
- * @addr: address of the channel
- *
- * Can sleep
- */
-static inline int reset_stopbit(uint16_t addr)
-{
- int ret;
- struct sc_reg_access sc_access = {0,};
- sc_access.reg_addr = addr;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
- /* Set the stop bit to zero */
- sc_access.reg_addr = addr;
- sc_access.value = (sc_access.value) & 0xEF;
- return sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
-}
-
-/**
- * find_free_channel - finds an empty channel for conversion
- *
- * If the ADC is not enabled then start using 0th channel
- * itself. Otherwise find an empty channel by looking for a
- * channel in which the stopbit is set to 1. returns the index
- * of the first free channel if succeeds or an error code.
- *
- * Context: can sleep
- *
- */
-static inline int find_free_channel(void)
-{
- int ret;
- int i;
-
- struct sc_reg_access sc_access = {0,};
-
- /* check whether ADC is enabled */
- sc_access.reg_addr = MSIC_ADC1CNTL1;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if ((sc_access.value & MSIC_ADC_ENBL) == 0)
- return 0;
-
- /* ADC is already enabled; Looking for an empty channel */
- for (i = 0; i < ADC_CHANLS_MAX; i++) {
-
- sc_access.reg_addr = ADC_CHNL_START_ADDR + i;
- ret = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- if (ret)
- return ret;
-
- if (sc_access.value & MSIC_STOPBIT_MASK) {
- ret = i;
- break;
- }
- }
- return (ret > ADC_LOOP_MAX) ? (-EINVAL) : ret;
-}
-
-/**
- * mid_initialize_adc - initializing the ADC
- * @dev: our device structure
- *
- * Initialize the ADC for reading thermistor values. Can sleep.
- */
-static inline int mid_initialize_adc(void)
-{
- int base_addr, chnl_addr;
- int ret;
- static int channel_index;
- struct sc_reg_access sc_access = {0,};
-
- /* Index of the first channel in which the stop bit is set */
- channel_index = find_free_channel();
- if (channel_index < 0) {
- pr_err("No free ADC channels");
- return channel_index;
- }
-
- base_addr = ADC_CHNL_START_ADDR + channel_index;
-
- if (!(channel_index == 0 || channel_index == ADC_LOOP_MAX)) {
- /* Reset stop bit for channels other than 0 and 12 */
- ret = reset_stopbit(base_addr);
- if (ret)
- return ret;
-
- /* Index of the first free channel */
- base_addr++;
- channel_index++;
- }
-
- /* Since this is the last channel, set the stop bit
- to 1 by ORing the DIE_SENSOR_CODE with 0x10 */
- sc_access.reg_addr = base_addr;
- sc_access.value = AUDIO_DETECT_CODE | 0x10;
- ret = sst_sc_reg_access(&sc_access, PMIC_WRITE, 1);
- if (ret) {
- pr_err("unable to enable ADC");
- return ret;
- }
-
- chnl_addr = ADC_DATA_START_ADDR + 2 * channel_index;
- pr_debug("mid_initialize : %x", chnl_addr);
- configure_adc(1);
- return chnl_addr;
-}
-#endif
-
diff --git a/drivers/staging/intel_sst/intelmid_ctrl.c b/drivers/staging/intel_sst/intelmid_ctrl.c
deleted file mode 100644
index 19ec474..0000000
--- a/drivers/staging/intel_sst/intelmid_ctrl.c
+++ /dev/null
@@ -1,921 +0,0 @@
-/*
- * intelmid_ctrl.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver handling mixer controls for Intel MAD chipset
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <sound/core.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-#define HW_CH_BASE 4
-
-
-#define HW_CH_0 "Hw1"
-#define HW_CH_1 "Hw2"
-#define HW_CH_2 "Hw3"
-#define HW_CH_3 "Hw4"
-
-static char *router_dmics[] = { "DMIC1",
- "DMIC2",
- "DMIC3",
- "DMIC4",
- "DMIC5",
- "DMIC6"
- };
-
-static char *out_names_mrst[] = {"Headphones",
- "Internal speakers"};
-static char *in_names_mrst[] = {"AMIC",
- "DMIC",
- "HS_MIC"};
-static char *line_out_names_mfld[] = {"Headset",
- "IHF ",
- "Vibra1 ",
- "Vibra2 ",
- "NONE "};
-static char *out_names_mfld[] = {"Headset ",
- "EarPiece "};
-static char *in_names_mfld[] = {"AMIC",
- "DMIC"};
-
-struct snd_control_val intelmad_ctrl_val[MAX_VENDORS] = {
- {
- .playback_vol_max = 63,
- .playback_vol_min = 0,
- .capture_vol_max = 63,
- .capture_vol_min = 0,
- },
- {
- .playback_vol_max = 0,
- .playback_vol_min = -31,
- .capture_vol_max = 0,
- .capture_vol_min = -20,
- },
- {
- .playback_vol_max = 0,
- .playback_vol_min = -31,
- .capture_vol_max = 0,
- .capture_vol_min = -31,
- .master_vol_max = 0,
- .master_vol_min = -126,
- },
-};
-
-/* control path functionalities */
-
-static inline int snd_intelmad_volume_info(struct snd_ctl_elem_info *uinfo,
- int control_type, int max, int min)
-{
- WARN_ON(!uinfo);
-
- uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
- uinfo->count = control_type;
- uinfo->value.integer.min = min;
- uinfo->value.integer.max = max;
- return 0;
-}
-
-/**
-* snd_intelmad_mute_info - provides information about the mute controls
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_mute_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- WARN_ON(!uinfo);
- WARN_ON(!kcontrol);
-
- /* set up the mute as a boolean mono control with min-max values */
- uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
- uinfo->count = MONO_CNTL;
- uinfo->value.integer.min = MIN_MUTE;
- uinfo->value.integer.max = MAX_MUTE;
- return 0;
-}
-
-/**
-* snd_intelmad_capture_volume_info - provides info about the volume control
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_capture_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, MONO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].capture_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].capture_vol_min);
- return 0;
-}
-
-/**
-* snd_intelmad_playback_volume_info - provides info about the volume control
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when a mixer application requests for control's info
-*/
-static int snd_intelmad_playback_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, STEREO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].playback_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].playback_vol_min);
- return 0;
-}
-
-static int snd_intelmad_master_volume_info(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- snd_intelmad_volume_info(uinfo, STEREO_CNTL,
- intelmad_ctrl_val[sst_card_vendor_id].master_vol_max,
- intelmad_ctrl_val[sst_card_vendor_id].master_vol_min);
- return 0;
-}
-
-/**
-* snd_intelmad_device_info_mrst - provides information about the devices available
-*
-* @kcontrol: pointer to the control
-* @uinfo: pointer to the structure where the devices's info need
-* to be filled
-*
-* This function is called when a mixer application requests for device's info
-*/
-static int snd_intelmad_device_info_mrst(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
-
- WARN_ON(!kcontrol);
- WARN_ON(!uinfo);
-
- /* setup device select as drop down controls with different values */
- if (kcontrol->id.numid == OUTPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mrst);
- else
- uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mrst);
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = 1;
- if (kcontrol->id.numid == OUTPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- out_names_mrst[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else
- strncpy(uinfo->value.enumerated.name,
- in_names_mrst[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- return 0;
-}
-
-static int snd_intelmad_device_info_mfld(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct snd_pmic_ops *scard_ops;
- struct snd_intelmad *intelmaddata;
-
- WARN_ON(!kcontrol);
- WARN_ON(!uinfo);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- /* setup device select as drop down controls with different values */
- if (kcontrol->id.numid == OUTPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(out_names_mfld);
- else if (kcontrol->id.numid == INPUT_SEL)
- uinfo->value.enumerated.items = ARRAY_SIZE(in_names_mfld);
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD) {
- uinfo->value.enumerated.items = ARRAY_SIZE(line_out_names_mfld);
- scard_ops->line_out_names_cnt = uinfo->value.enumerated.items;
- } else
- return -EINVAL;
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item = 1;
- if (kcontrol->id.numid == OUTPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- out_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else if (kcontrol->id.numid == INPUT_SEL)
- strncpy(uinfo->value.enumerated.name,
- in_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD)
- strncpy(uinfo->value.enumerated.name,
- line_out_names_mfld[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
- else
- return -EINVAL;
- return 0;
-}
-
-/**
-* snd_intelmad_volume_get - gets the current volume for the control
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_volume_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- int ret_val = 0, cntl_list[2] = {0,};
- int value = 0;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("snd_intelmad_volume_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_VOL:
- cntl_list[0] = PMIC_SND_RIGHT_PB_VOL;
- cntl_list[1] = PMIC_SND_LEFT_PB_VOL;
- break;
-
- case CAPTURE_VOL:
- cntl_list[0] = PMIC_SND_CAPTURE_VOL;
- break;
-
- case MASTER_VOL:
- cntl_list[0] = PMIC_SND_RIGHT_MASTER_VOL;
- cntl_list[1] = PMIC_SND_LEFT_MASTER_VOL;
- break;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->get_vol(cntl_list[0], &value);
- uval->value.integer.value[0] = value;
-
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_VOL ||
- kcontrol->id.numid == MASTER_VOL) {
- ret_val = scard_ops->get_vol(cntl_list[1], &value);
- uval->value.integer.value[1] = value;
- }
- return ret_val;
-}
-
-/**
-* snd_intelmad_mute_get - gets the current mute status for the control
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info need
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_mute_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
-
- int cntl_list = 0, ret_val = 0;
- u8 value = 0;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("Mute_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_MUTE:
- if (intelmaddata->output_sel == STEREO_HEADPHONE)
- cntl_list = PMIC_SND_LEFT_HP_MUTE;
- else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
- (intelmaddata->output_sel == MONO_EARPIECE))
- cntl_list = PMIC_SND_LEFT_SPEAKER_MUTE;
- break;
-
- case CAPTURE_MUTE:
- if (intelmaddata->input_sel == DMIC)
- cntl_list = PMIC_SND_DMIC_MUTE;
- else if (intelmaddata->input_sel == AMIC)
- cntl_list = PMIC_SND_AMIC_MUTE;
- else if (intelmaddata->input_sel == HS_MIC)
- cntl_list = PMIC_SND_HP_MIC_MUTE;
- break;
- case MASTER_MUTE:
- uval->value.integer.value[0] = intelmaddata->master_mute;
- return 0;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->get_mute(cntl_list, &value);
- uval->value.integer.value[0] = value;
- return ret_val;
-}
-
-/**
-* snd_intelmad_volume_set - sets the volume control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_volume_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
-
- int ret_val, cntl_list[2] = {0,};
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("volume set called:%ld %ld\n",
- uval->value.integer.value[0],
- uval->value.integer.value[1]);
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_VOL:
- cntl_list[0] = PMIC_SND_LEFT_PB_VOL;
- cntl_list[1] = PMIC_SND_RIGHT_PB_VOL;
- break;
-
- case CAPTURE_VOL:
- cntl_list[0] = PMIC_SND_CAPTURE_VOL;
- break;
-
- case MASTER_VOL:
- cntl_list[0] = PMIC_SND_LEFT_MASTER_VOL;
- cntl_list[1] = PMIC_SND_RIGHT_MASTER_VOL;
- break;
-
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->set_vol(cntl_list[0],
- uval->value.integer.value[0]);
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_VOL ||
- kcontrol->id.numid == MASTER_VOL)
- ret_val = scard_ops->set_vol(cntl_list[1],
- uval->value.integer.value[1]);
- return ret_val;
-}
-
-/**
-* snd_intelmad_mute_set - sets the mute control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_mute_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- int cntl_list[2] = {0,}, ret_val;
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- pr_debug("snd_intelmad_mute_set called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- kcontrol->private_value = uval->value.integer.value[0];
-
- switch (kcontrol->id.numid) {
- case PLAYBACK_MUTE:
- if (intelmaddata->output_sel == STEREO_HEADPHONE) {
- cntl_list[0] = PMIC_SND_LEFT_HP_MUTE;
- cntl_list[1] = PMIC_SND_RIGHT_HP_MUTE;
- } else if ((intelmaddata->output_sel == INTERNAL_SPKR) ||
- (intelmaddata->output_sel == MONO_EARPIECE)) {
- cntl_list[0] = PMIC_SND_LEFT_SPEAKER_MUTE;
- cntl_list[1] = PMIC_SND_RIGHT_SPEAKER_MUTE;
- }
- break;
-
- case CAPTURE_MUTE:/*based on sel device mute the i/p dev*/
- if (intelmaddata->input_sel == DMIC)
- cntl_list[0] = PMIC_SND_DMIC_MUTE;
- else if (intelmaddata->input_sel == AMIC)
- cntl_list[0] = PMIC_SND_AMIC_MUTE;
- else if (intelmaddata->input_sel == HS_MIC)
- cntl_list[0] = PMIC_SND_HP_MIC_MUTE;
- break;
- case MASTER_MUTE:
- cntl_list[0] = PMIC_SND_MUTE_ALL;
- intelmaddata->master_mute = uval->value.integer.value[0];
- break;
- default:
- return -EINVAL;
- }
-
- ret_val = scard_ops->set_mute(cntl_list[0],
- uval->value.integer.value[0]);
- if (ret_val)
- return ret_val;
-
- if (kcontrol->id.numid == PLAYBACK_MUTE)
- ret_val = scard_ops->set_mute(cntl_list[1],
- uval->value.integer.value[0]);
- return ret_val;
-}
-
-/**
-* snd_intelmad_device_get - get the device select control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* to be filled
-*
-* This function is called when .get function of a control is invoked from app
-*/
-static int snd_intelmad_device_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- pr_debug("device_get called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL) {
- if (kcontrol->id.numid == OUTPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->output_dev_id;
- else if (kcontrol->id.numid == INPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->input_dev_id;
- else if (kcontrol->id.numid == LINEOUT_SEL_MFLD)
- uval->value.enumerated.item[0] =
- scard_ops->lineout_dev_id;
- else
- return -EINVAL;
- } else if (intelmaddata->cpu_id == CPU_CHIP_LINCROFT) {
- if (kcontrol->id.numid == OUTPUT_SEL)
- /* There is a mismatch here.
- * ALSA expects 1 for internal speaker.
- * But internally, we may give 2 for internal speaker.
- */
- if (scard_ops->output_dev_id == MONO_EARPIECE ||
- scard_ops->output_dev_id == INTERNAL_SPKR)
- uval->value.enumerated.item[0] = MONO_EARPIECE;
- else if (scard_ops->output_dev_id == STEREO_HEADPHONE)
- uval->value.enumerated.item[0] =
- STEREO_HEADPHONE;
- else
- return -EINVAL;
- else if (kcontrol->id.numid == INPUT_SEL)
- uval->value.enumerated.item[0] =
- scard_ops->input_dev_id;
- else
- return -EINVAL;
- } else
- uval->value.enumerated.item[0] = kcontrol->private_value;
- return 0;
-}
-
-/**
-* snd_intelmad_device_set - set the device select control's info
-*
-* @kcontrol: pointer to the control
-* @uval: pointer to the structure where the control's info is
-* available to be set
-*
-* This function is called when .set function of a control is invoked from app
-*/
-static int snd_intelmad_device_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- int ret_val = 0, vendor, status;
- struct intel_sst_pcm_control *pcm_control;
-
- pr_debug("snd_intelmad_device_set called\n");
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
- status = -1;
-
- intelmaddata = kcontrol->private_data;
-
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- WARN_ON(!scard_ops);
-
- /* store value with driver */
- kcontrol->private_value = uval->value.enumerated.item[0];
-
- switch (kcontrol->id.numid) {
- case OUTPUT_SEL:
- ret_val = scard_ops->set_output_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->output_sel = uval->value.enumerated.item[0];
- break;
- case INPUT_SEL:
- vendor = intelmaddata->sstdrv_ops->vendor_id;
- if ((vendor == SND_MX) || (vendor == SND_FS)) {
- pcm_control = intelmaddata->sstdrv_ops->pcm_control;
- if (uval->value.enumerated.item[0] == HS_MIC)
- status = 1;
- else
- status = 0;
- pcm_control->device_control(
- SST_ENABLE_RX_TIME_SLOT, &status);
- }
- ret_val = scard_ops->set_input_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->input_sel = uval->value.enumerated.item[0];
- break;
- case LINEOUT_SEL_MFLD:
- ret_val = scard_ops->set_lineout_dev(
- uval->value.enumerated.item[0]);
- intelmaddata->lineout_sel = uval->value.enumerated.item[0];
- break;
- default:
- return -EINVAL;
- }
- kcontrol->private_value = uval->value.enumerated.item[0];
- return ret_val;
-}
-
-static int snd_intelmad_device_dmic_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
-
- if (scard_ops->input_dev_id != DMIC) {
- pr_debug("input dev = 0x%x\n", scard_ops->input_dev_id);
- return 0;
- }
-
- if (intelmaddata->cpu_id == CPU_CHIP_PENWELL)
- uval->value.enumerated.item[0] = kcontrol->private_value;
- else
- pr_debug(" CPU id = 0x%xis invalid.\n",
- intelmaddata->cpu_id);
- return 0;
-}
-
-void msic_set_bit(u8 index, unsigned int *available_dmics)
-{
- *available_dmics |= (1 << index);
-}
-
-void msic_clear_bit(u8 index, unsigned int *available_dmics)
-{
- *available_dmics &= ~(1 << index);
-}
-
-int msic_is_set_bit(u8 index, unsigned int *available_dmics)
-{
- int ret_val;
-
- ret_val = (*available_dmics & (1 << index));
- return ret_val;
-}
-
-static int snd_intelmad_device_dmic_set(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *uval)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
- int i, dmic_index;
- unsigned int available_dmics;
- int jump_count;
- int max_dmics = ARRAY_SIZE(router_dmics);
-
- WARN_ON(!uval);
- WARN_ON(!kcontrol);
-
- intelmaddata = kcontrol->private_data;
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- WARN_ON(!scard_ops);
-
- if (scard_ops->input_dev_id != DMIC) {
- pr_debug("input dev = 0x%x\n", scard_ops->input_dev_id);
- return 0;
- }
-
- available_dmics = scard_ops->available_dmics;
-
- if (kcontrol->private_value > uval->value.enumerated.item[0]) {
- pr_debug("jump count -1.\n");
- jump_count = -1;
- } else {
- pr_debug("jump count 1.\n");
- jump_count = 1;
- }
-
- dmic_index = uval->value.enumerated.item[0];
- pr_debug("set function. dmic_index = %d, avl_dmic = 0x%x\n",
- dmic_index, available_dmics);
- for (i = 0; i < max_dmics; i++) {
- pr_debug("set function. loop index = 0x%x. dmic_index = 0x%x\n",
- i, dmic_index);
- if (!msic_is_set_bit(dmic_index, &available_dmics)) {
- msic_clear_bit(kcontrol->private_value,
- &available_dmics);
- msic_set_bit(dmic_index, &available_dmics);
- kcontrol->private_value = dmic_index;
- scard_ops->available_dmics = available_dmics;
- scard_ops->hw_dmic_map[kcontrol->id.numid-HW_CH_BASE] =
- kcontrol->private_value;
- scard_ops->set_hw_dmic_route
- (kcontrol->id.numid-HW_CH_BASE);
- return 0;
- }
-
- dmic_index += jump_count;
-
- if (dmic_index > (max_dmics - 1) && jump_count == 1) {
- pr_debug("Resettingthe dmic index to 0.\n");
- dmic_index = 0;
- } else if (dmic_index == -1 && jump_count == -1) {
- pr_debug("Resetting the dmic index to 5.\n");
- dmic_index = max_dmics - 1;
- }
- }
-
- return -EINVAL;
-}
-
-static int snd_intelmad_device_dmic_info_mfld(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_info *uinfo)
-{
- struct snd_intelmad *intelmaddata;
- struct snd_pmic_ops *scard_ops;
-
- uinfo->count = MONO_CNTL;
- uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
- uinfo->value.enumerated.items = ARRAY_SIZE(router_dmics);
-
- intelmaddata = kcontrol->private_data;
- WARN_ON(!intelmaddata->sstdrv_ops);
-
- scard_ops = intelmaddata->sstdrv_ops->scard_ops;
- WARN_ON(!scard_ops);
-
- if (uinfo->value.enumerated.item >= uinfo->value.enumerated.items)
- uinfo->value.enumerated.item =
- uinfo->value.enumerated.items - 1;
-
- strncpy(uinfo->value.enumerated.name,
- router_dmics[uinfo->value.enumerated.item],
- sizeof(uinfo->value.enumerated.name)-1);
-
-
- msic_set_bit(kcontrol->private_value, &scard_ops->available_dmics);
- pr_debug("info function. avl_dmic = 0x%x",
- scard_ops->available_dmics);
-
- scard_ops->hw_dmic_map[kcontrol->id.numid-HW_CH_BASE] =
- kcontrol->private_value;
-
- return 0;
-}
-
-struct snd_kcontrol_new snd_intelmad_controls_mrst[MAX_CTRL] __devinitdata = {
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mrst,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mrst,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_playback_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_capture_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Volume",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_master_volume_info,
- .get = snd_intelmad_volume_get,
- .put = snd_intelmad_volume_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Master Playback Switch",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_mute_info,
- .get = snd_intelmad_mute_get,
- .put = snd_intelmad_mute_set,
- .private_value = 0,
-},
-};
-
-struct snd_kcontrol_new
-snd_intelmad_controls_mfld[MAX_CTRL_MFLD] __devinitdata = {
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Playback Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "PCM Capture Source",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = "Line out",
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_info_mfld,
- .get = snd_intelmad_device_get,
- .put = snd_intelmad_device_set,
- .private_value = 0,
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_0,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 0
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_1,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 1
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_2,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 2
-},
-{
- .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
- .name = HW_CH_3,
- .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
- .info = snd_intelmad_device_dmic_info_mfld,
- .get = snd_intelmad_device_dmic_get,
- .put = snd_intelmad_device_dmic_set,
- .private_value = 3
-}
-};
-
diff --git a/drivers/staging/intel_sst/intelmid_msic_control.c b/drivers/staging/intel_sst/intelmid_msic_control.c
deleted file mode 100644
index 70cdb16..0000000
--- a/drivers/staging/intel_sst/intelmid_msic_control.c
+++ /dev/null
@@ -1,1047 +0,0 @@
-/*
- * intelmid_vm_control.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2010 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of msic vendors
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/file.h>
-#include <linux/delay.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include <linux/input.h>
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-#define AUDIOMUX12 0x24c
-#define AUDIOMUX34 0x24d
-
-static int msic_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- /* dmic configuration */
- {0x241, 0x85, 0},
- {0x242, 0x02, 0},
- /* audio paths config */
- {0x24C, 0x10, 0},
- {0x24D, 0x32, 0},
- /* PCM2 interface slots */
- /* preconfigured slots for 0-5 both tx, rx */
- {0x272, 0x10, 0},
- {0x273, 0x32, 0},
- {0x274, 0xFF, 0},
- {0x275, 0x10, 0},
- {0x276, 0x32, 0},
- {0x277, 0x54, 0},
- /*Sinc5 decimator*/
- {0x24E, 0x28, 0},
- /*TI vibra w/a settings*/
- {0x384, 0x80, 0},
- {0x385, 0x80, 0},
- {0x267, 0x00, 0},
- {0x261, 0x00, 0},
- /* pcm port setting */
- {0x278, 0x00, 0},
- {0x27B, 0x01, 0},
- {0x27C, 0x0a, 0},
- /* Set vol HSLRVOLCTRL, IHFVOL */
- {0x259, 0x08, 0},
- {0x25A, 0x08, 0},
- {0x25B, 0x08, 0},
- {0x25C, 0x08, 0},
- /* HSEPRXCTRL Enable the headset left and right FIR filters */
- {0x250, 0x30, 0},
- /* HSMIXER */
- {0x256, 0x11, 0},
- /* amic configuration */
- {0x249, 0x01, 0x0},
- {0x24A, 0x01, 0x0},
- /* unmask ocaudio/accdet interrupts */
- {0x1d, 0x00, 0x00},
- {0x1e, 0x00, 0x00},
- };
- snd_msic_ops.card_status = SND_CARD_INIT_DONE;
- sst_sc_reg_access(sc_access, PMIC_WRITE, 28);
- snd_msic_ops.pb_on = 0;
- snd_msic_ops.pbhs_on = 0;
- snd_msic_ops.cap_on = 0;
- snd_msic_ops.input_dev_id = DMIC; /*def dev*/
- snd_msic_ops.output_dev_id = STEREO_HEADPHONE;
- snd_msic_ops.jack_interrupt_status = false;
- pr_debug("msic init complete!!\n");
- return 0;
-}
-static int msic_line_out_restore(u8 value)
-{
- struct sc_reg_access hs_drv_en[] = {
- {0x25d, 0x03, 0x03},
- };
- struct sc_reg_access ep_drv_en[] = {
- {0x25d, 0x40, 0x40},
- };
- struct sc_reg_access ihf_drv_en[] = {
- {0x25d, 0x0c, 0x0c},
- };
- struct sc_reg_access vib1_drv_en[] = {
- {0x25d, 0x10, 0x10},
- };
- struct sc_reg_access vib2_drv_en[] = {
- {0x25d, 0x20, 0x20},
- };
- struct sc_reg_access pmode_enable[] = {
- {0x381, 0x10, 0x10},
- };
- int retval = 0;
-
- pr_debug("msic_lineout_restore_lineout_dev:%d\n", value);
-
- switch (value) {
- case HEADSET:
- pr_debug("Selecting Lineout-HEADSET-restore\n");
- if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE)
- retval = sst_sc_reg_access(hs_drv_en,
- PMIC_READ_MODIFY, 1);
- else
- retval = sst_sc_reg_access(ep_drv_en,
- PMIC_READ_MODIFY, 1);
- break;
- case IHF:
- pr_debug("Selecting Lineout-IHF-restore\n");
- retval = sst_sc_reg_access(ihf_drv_en, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_enable, PMIC_READ_MODIFY, 1);
- break;
- case VIBRA1:
- pr_debug("Selecting Lineout-Vibra1-restore\n");
- retval = sst_sc_reg_access(vib1_drv_en, PMIC_READ_MODIFY, 1);
- break;
- case VIBRA2:
- pr_debug("Selecting Lineout-VIBRA2-restore\n");
- retval = sst_sc_reg_access(vib2_drv_en, PMIC_READ_MODIFY, 1);
- break;
- case NONE:
- pr_debug("Selecting Lineout-NONE-restore\n");
- break;
- default:
- return -EINVAL;
- }
- return retval;
-}
-static int msic_get_lineout_prvstate(void)
-{
- struct sc_reg_access hs_ihf_drv[2] = {
- {0x257, 0x0, 0x0},
- {0x25d, 0x0, 0x0},
- };
- struct sc_reg_access vib1drv[2] = {
- {0x264, 0x0, 0x0},
- {0x25D, 0x0, 0x0},
- };
- struct sc_reg_access vib2drv[2] = {
- {0x26A, 0x0, 0x0},
- {0x25D, 0x0, 0x0},
- };
- int retval = 0, drv_en, dac_en, dev_id, mask;
- for (dev_id = 0; dev_id < snd_msic_ops.line_out_names_cnt; dev_id++) {
- switch (dev_id) {
- case HEADSET:
- pr_debug("msic_get_lineout_prvs_state: HEADSET\n");
- sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
-
- mask = (MASK0|MASK1);
- dac_en = (hs_ihf_drv[0].value) & mask;
-
- mask = ((MASK0|MASK1)|MASK6);
- drv_en = (hs_ihf_drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = HEADSET;
- return retval;
- }
- break;
- case IHF:
- pr_debug("msic_get_lineout_prvstate: IHF\n");
- sst_sc_reg_access(hs_ihf_drv, PMIC_READ, 2);
-
- mask = (MASK2 | MASK3);
- dac_en = (hs_ihf_drv[0].value) & mask;
-
- mask = (MASK2 | MASK3);
- drv_en = (hs_ihf_drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = IHF;
- return retval;
- }
- break;
- case VIBRA1:
- pr_debug("msic_get_lineout_prvstate: vibra1\n");
- sst_sc_reg_access(vib1drv, PMIC_READ, 2);
-
- mask = MASK1;
- dac_en = (vib1drv[0].value) & mask;
-
- mask = MASK4;
- drv_en = (vib1drv[1].value) & mask;
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = VIBRA1;
- return retval;
- }
- break;
- case VIBRA2:
- pr_debug("msic_get_lineout_prvstate: vibra2\n");
- sst_sc_reg_access(vib2drv, PMIC_READ, 2);
-
- mask = MASK1;
- dac_en = (vib2drv[0].value) & mask;
-
- mask = MASK5;
- drv_en = ((vib2drv[1].value) & mask);
-
- if (dac_en && (!drv_en)) {
- snd_msic_ops.prev_lineout_dev_id = VIBRA2;
- return retval;
- }
- break;
- case NONE:
- pr_debug("msic_get_lineout_prvstate: NONE\n");
- snd_msic_ops.prev_lineout_dev_id = NONE;
- return retval;
- default:
- pr_debug("Invalid device id\n");
- snd_msic_ops.prev_lineout_dev_id = NONE;
- return -EINVAL;
- }
- }
- return retval;
-}
-static int msic_set_selected_lineout_dev(u8 value)
-{
- struct sc_reg_access lout_hs[] = {
- {0x25e, 0x33, 0xFF},
- {0x25d, 0x0, 0x43},
- };
- struct sc_reg_access lout_ihf[] = {
- {0x25e, 0x55, 0xff},
- {0x25d, 0x0, 0x0c},
- };
- struct sc_reg_access lout_vibra1[] = {
-
- {0x25e, 0x61, 0xff},
- {0x25d, 0x0, 0x10},
- };
- struct sc_reg_access lout_vibra2[] = {
-
- {0x25e, 0x16, 0xff},
- {0x25d, 0x0, 0x20},
- };
- struct sc_reg_access lout_def[] = {
- {0x25e, 0x66, 0x0},
- };
- struct sc_reg_access pmode_disable[] = {
- {0x381, 0x00, 0x10},
- };
- struct sc_reg_access pmode_enable[] = {
- {0x381, 0x10, 0x10},
- };
- int retval = 0;
-
- pr_debug("msic_set_selected_lineout_dev:%d\n", value);
- msic_get_lineout_prvstate();
- msic_line_out_restore(snd_msic_ops.prev_lineout_dev_id);
- snd_msic_ops.lineout_dev_id = value;
-
- switch (value) {
- case HEADSET:
- pr_debug("Selecting Lineout-HEADSET\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_hs,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case IHF:
- pr_debug("Selecting Lineout-IHF\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_ihf,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_enable,
- PMIC_READ_MODIFY, 1);
- break;
- case VIBRA1:
- pr_debug("Selecting Lineout-Vibra1\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_vibra1,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case VIBRA2:
- pr_debug("Selecting Lineout-VIBRA2\n");
- if (snd_msic_ops.pb_on)
- retval = sst_sc_reg_access(lout_vibra2,
- PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- case NONE:
- pr_debug("Selecting Lineout-NONE\n");
- retval = sst_sc_reg_access(lout_def,
- PMIC_WRITE, 1);
- if (retval)
- return retval;
- retval = sst_sc_reg_access(pmode_disable,
- PMIC_READ_MODIFY, 1);
- break;
- default:
- return -EINVAL;
- }
- return retval;
-}
-
-
-static int msic_power_up_pb(unsigned int device)
-{
- struct sc_reg_access vaud[] = {
- /* turn on the audio power supplies */
- {0x0DB, 0x07, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn on PLL */
- {0x240, 0x20, 0},
- };
- struct sc_reg_access vhs[] = {
- /* VHSP */
- {0x0DC, 0x3D, 0},
- /* VHSN */
- {0x0DD, 0x3F, 0},
- };
- struct sc_reg_access hsdac[] = {
- {0x382, 0x40, 0x40},
- /* disable driver */
- {0x25D, 0x0, 0x43},
- /* DAC CONFIG ; both HP, LP on */
- {0x257, 0x03, 0x03},
- };
- struct sc_reg_access hs_filter[] = {
- /* HSEPRXCTRL Enable the headset left and right FIR filters */
- {0x250, 0x30, 0},
- /* HSMIXER */
- {0x256, 0x11, 0},
- };
- struct sc_reg_access hs_enable[] = {
- /* enable driver */
- {0x25D, 0x3, 0x3},
- {0x26C, 0x0, 0x2},
- /* unmute the headset */
- { 0x259, 0x80, 0x80},
- { 0x25A, 0x80, 0x80},
- };
- struct sc_reg_access vihf[] = {
- /* VIHF ON */
- {0x0C9, 0x27, 0x00},
- };
- struct sc_reg_access ihf_filter[] = {
- /* disable driver */
- {0x25D, 0x00, 0x0C},
- /*Filer DAC enable*/
- {0x251, 0x03, 0x03},
- {0x257, 0x0C, 0x0C},
- };
- struct sc_reg_access ihf_en[] = {
- /*enable drv*/
- {0x25D, 0x0C, 0x0c},
- };
- struct sc_reg_access ihf_unmute[] = {
- /*unmute headset*/
- {0x25B, 0x80, 0x80},
- {0x25C, 0x80, 0x80},
- };
- struct sc_reg_access epdac[] = {
- /* disable driver */
- {0x25D, 0x0, 0x43},
- /* DAC CONFIG ; both HP, LP on */
- {0x257, 0x03, 0x03},
- };
- struct sc_reg_access ep_enable[] = {
- /* enable driver */
- {0x25D, 0x40, 0x40},
- /* unmute the headset */
- { 0x259, 0x80, 0x80},
- { 0x25A, 0x80, 0x80},
- };
- struct sc_reg_access vib1_en[] = {
- /* enable driver, ADC */
- {0x25D, 0x10, 0x10},
- {0x264, 0x02, 0x82},
- };
- struct sc_reg_access vib2_en[] = {
- /* enable driver, ADC */
- {0x25D, 0x20, 0x20},
- {0x26A, 0x02, 0x82},
- };
- struct sc_reg_access pcm2_en[] = {
- /* enable pcm 2 */
- {0x27C, 0x1, 0x1},
- };
- int retval = 0;
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("powering up pb.... Device %d\n", device);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- msleep(1);
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- switch (device) {
- case SND_SST_DEVICE_HEADSET:
- snd_msic_ops.pb_on = 1;
- snd_msic_ops.pbhs_on = 1;
- if (snd_msic_ops.output_dev_id == STEREO_HEADPHONE) {
- sst_sc_reg_access(vhs, PMIC_WRITE, 2);
- sst_sc_reg_access(hsdac, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 4);
- } else {
- sst_sc_reg_access(epdac, PMIC_READ_MODIFY, 2);
- sst_sc_reg_access(hs_filter, PMIC_WRITE, 2);
- sst_sc_reg_access(ep_enable, PMIC_READ_MODIFY, 3);
- }
- if (snd_msic_ops.lineout_dev_id == HEADSET)
- msic_set_selected_lineout_dev(HEADSET);
- break;
- case SND_SST_DEVICE_IHF:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vihf, PMIC_WRITE, 1);
- sst_sc_reg_access(ihf_filter, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(ihf_en, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(ihf_unmute, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == IHF)
- msic_set_selected_lineout_dev(IHF);
- break;
-
- case SND_SST_DEVICE_VIBRA:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vib1_en, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == VIBRA1)
- msic_set_selected_lineout_dev(VIBRA1);
- break;
-
- case SND_SST_DEVICE_HAPTIC:
- snd_msic_ops.pb_on = 1;
- sst_sc_reg_access(vib2_en, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == VIBRA2)
- msic_set_selected_lineout_dev(VIBRA2);
- break;
-
- default:
- pr_warn("Wrong Device %d, selected %d\n",
- device, snd_msic_ops.output_dev_id);
- }
- return sst_sc_reg_access(pcm2_en, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_power_up_cp(unsigned int device)
-{
- struct sc_reg_access vaud[] = {
- /* turn on the audio power supplies */
- {0x0DB, 0x07, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn on PLL */
- {0x240, 0x20, 0},
- };
- struct sc_reg_access dmic_bias[] = {
- /* Turn on AMIC supply */
- {0x247, 0xA0, 0xA0},
- };
- struct sc_reg_access dmic[] = {
- /* mic demux enable */
- {0x245, 0x3F, 0x3F},
- {0x246, 0x07, 0x07},
-
- };
- struct sc_reg_access amic_bias[] = {
- /* Turn on AMIC supply */
- {0x247, 0xFC, 0xFC},
- };
- struct sc_reg_access amic[] = {
- /*MIC EN*/
- {0x249, 0x01, 0x01},
- {0x24A, 0x01, 0x01},
- /*ADC EN*/
- {0x248, 0x05, 0x0F},
-
- };
- struct sc_reg_access pcm2[] = {
- /* enable pcm 2 */
- {0x27C, 0x1, 0x1},
- };
- struct sc_reg_access tx_on[] = {
- /*wait for mic to stabalize before turning on audio channels*/
- {0x24F, 0x3C, 0x0},
- };
- int retval = 0;
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("powering up cp....%d\n", snd_msic_ops.input_dev_id);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- msleep(500);/*FIXME need optimzed value here*/
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- snd_msic_ops.cap_on = 1;
- if (snd_msic_ops.input_dev_id == AMIC) {
- sst_sc_reg_access(amic_bias, PMIC_READ_MODIFY, 1);
- msleep(1);
- sst_sc_reg_access(amic, PMIC_READ_MODIFY, 3);
- } else {
- sst_sc_reg_access(dmic_bias, PMIC_READ_MODIFY, 1);
- msleep(1);
- sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 2);
- }
- msleep(1);
- sst_sc_reg_access(tx_on, PMIC_WRITE, 1);
- return sst_sc_reg_access(pcm2, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_power_down(void)
-{
- struct sc_reg_access power_dn[] = {
- /* VHSP */
- {0x0DC, 0xC4, 0},
- /* VHSN */
- {0x0DD, 0x04, 0},
- /* VIHF */
- {0x0C9, 0x24, 0},
- };
- struct sc_reg_access pll[] = {
- /* turn off PLL*/
- {0x240, 0x00, 0x0},
- };
- struct sc_reg_access vaud[] = {
- /* turn off VAUD*/
- {0x0DB, 0x04, 0},
- };
-
- pr_debug("powering dn msic\n");
- snd_msic_ops.pbhs_on = 0;
- snd_msic_ops.pb_on = 0;
- snd_msic_ops.cap_on = 0;
- sst_sc_reg_access(power_dn, PMIC_WRITE, 3);
- msleep(1);
- sst_sc_reg_access(pll, PMIC_WRITE, 1);
- msleep(1);
- sst_sc_reg_access(vaud, PMIC_WRITE, 1);
- return 0;
-}
-
-static int msic_power_down_pb(unsigned int device)
-{
- struct sc_reg_access drv_enable[] = {
- {0x25D, 0x00, 0x00},
- };
- struct sc_reg_access hs_mute[] = {
- {0x259, 0x80, 0x80},
- {0x25A, 0x80, 0x80},
- {0x26C, 0x02, 0x02},
- };
- struct sc_reg_access hs_off[] = {
- {0x257, 0x00, 0x03},
- {0x250, 0x00, 0x30},
- {0x382, 0x00, 0x40},
- };
- struct sc_reg_access ihf_mute[] = {
- {0x25B, 0x80, 0x80},
- {0x25C, 0x80, 0x80},
- };
- struct sc_reg_access ihf_off[] = {
- {0x257, 0x00, 0x0C},
- {0x251, 0x00, 0x03},
- };
- struct sc_reg_access vib1_off[] = {
- {0x264, 0x00, 0x82},
- };
- struct sc_reg_access vib2_off[] = {
- {0x26A, 0x00, 0x82},
- };
- struct sc_reg_access lout_off[] = {
- {0x25e, 0x66, 0x00},
- };
- struct sc_reg_access pmode_disable[] = {
- {0x381, 0x00, 0x10},
- };
-
-
-
- pr_debug("powering dn pb for device %d\n", device);
- switch (device) {
- case SND_SST_DEVICE_HEADSET:
- snd_msic_ops.pbhs_on = 0;
- sst_sc_reg_access(hs_mute, PMIC_READ_MODIFY, 3);
- drv_enable[0].mask = 0x43;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(hs_off, PMIC_READ_MODIFY, 3);
- if (snd_msic_ops.lineout_dev_id == HEADSET)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
-
- case SND_SST_DEVICE_IHF:
- sst_sc_reg_access(ihf_mute, PMIC_READ_MODIFY, 2);
- drv_enable[0].mask = 0x0C;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- sst_sc_reg_access(ihf_off, PMIC_READ_MODIFY, 2);
- if (snd_msic_ops.lineout_dev_id == IHF) {
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- sst_sc_reg_access(pmode_disable, PMIC_READ_MODIFY, 1);
- }
- break;
-
- case SND_SST_DEVICE_VIBRA:
- sst_sc_reg_access(vib1_off, PMIC_READ_MODIFY, 1);
- drv_enable[0].mask = 0x10;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.lineout_dev_id == VIBRA1)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
-
- case SND_SST_DEVICE_HAPTIC:
- sst_sc_reg_access(vib2_off, PMIC_READ_MODIFY, 1);
- drv_enable[0].mask = 0x20;
- sst_sc_reg_access(drv_enable, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.lineout_dev_id == VIBRA2)
- sst_sc_reg_access(lout_off, PMIC_WRITE, 1);
- break;
- }
- return 0;
-}
-
-static int msic_power_down_cp(unsigned int device)
-{
- struct sc_reg_access dmic[] = {
- {0x247, 0x00, 0xA0},
- {0x245, 0x00, 0x38},
- {0x246, 0x00, 0x07},
- };
- struct sc_reg_access amic[] = {
- {0x248, 0x00, 0x05},
- {0x249, 0x00, 0x01},
- {0x24A, 0x00, 0x01},
- {0x247, 0x00, 0xA3},
- };
- struct sc_reg_access tx_off[] = {
- {0x24F, 0x00, 0x3C},
- };
-
- pr_debug("powering dn cp....\n");
- snd_msic_ops.cap_on = 0;
- sst_sc_reg_access(tx_off, PMIC_READ_MODIFY, 1);
- if (snd_msic_ops.input_dev_id == DMIC)
- sst_sc_reg_access(dmic, PMIC_READ_MODIFY, 3);
- else
- sst_sc_reg_access(amic, PMIC_READ_MODIFY, 4);
- return 0;
-}
-
-static int msic_set_selected_output_dev(u8 value)
-{
- int retval = 0;
-
- pr_debug("msic set selected output:%d\n", value);
- snd_msic_ops.output_dev_id = value;
- if (snd_msic_ops.pbhs_on)
- msic_power_up_pb(SND_SST_DEVICE_HEADSET);
- return retval;
-}
-
-static int msic_set_selected_input_dev(u8 value)
-{
-
- struct sc_reg_access sc_access_dmic[] = {
- {0x24C, 0x10, 0x0},
- };
- struct sc_reg_access sc_access_amic[] = {
- {0x24C, 0x76, 0x0},
-
- };
- int retval = 0;
-
- pr_debug("msic_set_selected_input_dev:%d\n", value);
- snd_msic_ops.input_dev_id = value;
- switch (value) {
- case AMIC:
- pr_debug("Selecting AMIC1\n");
- retval = sst_sc_reg_access(sc_access_amic, PMIC_WRITE, 1);
- break;
- case DMIC:
- pr_debug("Selecting DMIC1\n");
- retval = sst_sc_reg_access(sc_access_dmic, PMIC_WRITE, 1);
- break;
- default:
- return -EINVAL;
-
- }
- if (snd_msic_ops.cap_on)
- retval = msic_power_up_cp(SND_SST_DEVICE_CAPTURE);
- return retval;
-}
-
-static int msic_set_hw_dmic_route(u8 hw_ch_index)
-{
- struct sc_reg_access sc_access_router;
- int retval = -EINVAL;
-
- switch (hw_ch_index) {
- case HW_CH0:
- sc_access_router.reg_addr = AUDIOMUX12;
- sc_access_router.value = snd_msic_ops.hw_dmic_map[0];
- sc_access_router.mask = (MASK2 | MASK1 | MASK0);
- pr_debug("hw_ch0. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH1:
- sc_access_router.reg_addr = AUDIOMUX12;
- sc_access_router.value = (snd_msic_ops.hw_dmic_map[1]) << 4;
- sc_access_router.mask = (MASK6 | MASK5 | MASK4);
- pr_debug("### hw_ch1. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH2:
- sc_access_router.reg_addr = AUDIOMUX34;
- sc_access_router.value = snd_msic_ops.hw_dmic_map[2];
- sc_access_router.mask = (MASK2 | MASK1 | MASK0);
- pr_debug("hw_ch2. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
-
- case HW_CH3:
- sc_access_router.reg_addr = AUDIOMUX34;
- sc_access_router.value = (snd_msic_ops.hw_dmic_map[3]) << 4;
- sc_access_router.mask = (MASK6 | MASK5 | MASK4);
- pr_debug("hw_ch3. value = 0x%x\n",
- sc_access_router.value);
- retval = sst_sc_reg_access(&sc_access_router,
- PMIC_READ_MODIFY, 1);
- break;
- }
-
- return retval;
-}
-
-
-static int msic_set_pcm_voice_params(void)
-{
- return 0;
-}
-
-static int msic_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- return 0;
-}
-
-static int msic_set_audio_port(int status)
-{
- return 0;
-}
-
-static int msic_set_voice_port(int status)
-{
- return 0;
-}
-
-static int msic_set_mute(int dev_id, u8 value)
-{
- return 0;
-}
-
-static int msic_set_vol(int dev_id, int value)
-{
- return 0;
-}
-
-static int msic_get_mute(int dev_id, u8 *value)
-{
- return 0;
-}
-
-static int msic_get_vol(int dev_id, int *value)
-{
- return 0;
-}
-
-static int msic_set_headset_state(int state)
-{
- struct sc_reg_access hs_enable[] = {
- {0x25D, 0x03, 0x03},
- };
-
- if (state)
- /*enable*/
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
- else {
- hs_enable[0].value = 0;
- sst_sc_reg_access(hs_enable, PMIC_READ_MODIFY, 1);
- }
- return 0;
-}
-
-static int msic_enable_mic_bias(void)
-{
- struct sc_reg_access jack_interrupt_reg[] = {
- {0x0DB, 0x07, 0x00},
-
- };
- struct sc_reg_access jack_bias_reg[] = {
- {0x247, 0x0C, 0x0C},
- };
-
- sst_sc_reg_access(jack_interrupt_reg, PMIC_WRITE, 1);
- sst_sc_reg_access(jack_bias_reg, PMIC_READ_MODIFY, 1);
- return 0;
-}
-
-static int msic_disable_mic_bias(void)
-{
- if (snd_msic_ops.jack_interrupt_status == true)
- return 0;
- if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
- msic_power_down();
- return 0;
-}
-
-static int msic_disable_jack_btn(void)
-{
- struct sc_reg_access btn_disable[] = {
- {0x26C, 0x00, 0x01}
- };
-
- if (!(snd_msic_ops.pb_on || snd_msic_ops.cap_on))
- msic_power_down();
- snd_msic_ops.jack_interrupt_status = false;
- return sst_sc_reg_access(btn_disable, PMIC_READ_MODIFY, 1);
-}
-
-static int msic_enable_jack_btn(void)
-{
- struct sc_reg_access btn_enable[] = {
- {0x26b, 0x77, 0x00},
- {0x26C, 0x01, 0x00},
- };
- return sst_sc_reg_access(btn_enable, PMIC_WRITE, 2);
-}
-static int msic_convert_adc_to_mvolt(unsigned int mic_bias)
-{
- return (ADC_ONE_LSB_MULTIPLIER * mic_bias) / 1000;
-}
-int msic_get_headset_state(int mic_bias)
-{
- struct sc_reg_access msic_hs_toggle[] = {
- {0x070, 0x00, 0x01},
- };
- if (mic_bias >= 0 && mic_bias < 400) {
-
- pr_debug("Detected Headphone!!!\n");
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
-
- } else if (mic_bias > 400 && mic_bias < 650) {
-
- pr_debug("Detected American headset\n");
- msic_hs_toggle[0].value = 0x01;
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
-
- } else if (mic_bias >= 650 && mic_bias < 2000) {
-
- pr_debug("Detected Headset!!!\n");
- sst_sc_reg_access(msic_hs_toggle, PMIC_READ_MODIFY, 1);
- /*power on jack and btn*/
- snd_msic_ops.jack_interrupt_status = true;
- msic_enable_jack_btn();
- msic_enable_mic_bias();
- return SND_JACK_HEADSET;
-
- } else
- pr_debug("Detected Open Cable!!!\n");
-
- return SND_JACK_HEADPHONE;
-}
-
-static int msic_get_mic_bias(void *arg)
-{
- struct snd_intelmad *intelmad_drv = (struct snd_intelmad *)arg;
- u16 adc_adr = intelmad_drv->adc_address;
- u16 adc_val;
- int ret;
- struct sc_reg_access adc_ctrl3[2] = {
- {0x1C2, 0x05, 0x0},
- };
-
- struct sc_reg_access audio_adc_reg1 = {0,};
- struct sc_reg_access audio_adc_reg2 = {0,};
-
- msic_enable_mic_bias();
- /* Enable the msic for conversion before reading */
- ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
- if (ret)
- return ret;
- adc_ctrl3[0].value = 0x04;
- /* Re-toggle the RRDATARD bit */
- ret = sst_sc_reg_access(adc_ctrl3, PMIC_WRITE, 1);
- if (ret)
- return ret;
-
- audio_adc_reg1.reg_addr = adc_adr;
- /* Read the higher bits of data */
- msleep(1000);
- ret = sst_sc_reg_access(&audio_adc_reg1, PMIC_READ, 1);
- if (ret)
- return ret;
- pr_debug("adc read value %x", audio_adc_reg1.value);
-
- /* Shift bits to accomodate the lower two data bits */
- adc_val = (audio_adc_reg1.value << 2);
- adc_adr++;
- audio_adc_reg2. reg_addr = adc_adr;
- ret = sst_sc_reg_access(&audio_adc_reg2, PMIC_READ, 1);
- if (ret)
- return ret;
- pr_debug("adc read value %x", audio_adc_reg2.value);
-
- /* Adding lower two bits to the higher bits */
- audio_adc_reg2.value &= 03;
- adc_val += audio_adc_reg2.value;
-
- pr_debug("ADC value 0x%x", adc_val);
- msic_disable_mic_bias();
- return adc_val;
-}
-
-static void msic_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- struct snd_intelmad *intelmaddata = cb_data;
- int retval = 0;
-
- pr_debug("value returned = 0x%x\n", intsts);
-
- if (snd_msic_ops.card_status == SND_CARD_UN_INIT) {
- retval = msic_init_card();
- if (retval)
- return;
- }
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x1) {
- pr_debug("MAD short_push detected\n");
- present = SND_JACK_BTN_0;
- jack_event_flag = buttonpressflag = 1;
- mjack->jack.type = SND_JACK_BTN_0;
- mjack->jack.key[0] = BTN_0 ;
- }
-
- if (intsts & 0x2) {
- pr_debug(":MAD long_push detected\n");
- jack_event_flag = buttonpressflag = 1;
- mjack->jack.type = present = SND_JACK_BTN_1;
- mjack->jack.key[1] = BTN_1;
- }
-
- if (intsts & 0x4) {
- unsigned int mic_bias;
- jack_event_flag = 1;
- buttonpressflag = 0;
- mic_bias = msic_get_mic_bias(intelmaddata);
- pr_debug("mic_bias = %d\n", mic_bias);
- mic_bias = msic_convert_adc_to_mvolt(mic_bias);
- pr_debug("mic_bias after conversion = %d mV\n", mic_bias);
- mjack->jack_dev_state = msic_get_headset_state(mic_bias);
- mjack->jack.type = present = mjack->jack_dev_state;
- }
-
- if (intsts & 0x8) {
- mjack->jack.type = mjack->jack_dev_state;
- present = 0;
- jack_event_flag = 1;
- buttonpressflag = 0;
- msic_disable_jack_btn();
- msic_disable_mic_bias();
- }
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-
-
-
-struct snd_pmic_ops snd_msic_ops = {
- .set_input_dev = msic_set_selected_input_dev,
- .set_output_dev = msic_set_selected_output_dev,
- .set_lineout_dev = msic_set_selected_lineout_dev,
- .set_hw_dmic_route = msic_set_hw_dmic_route,
- .set_mute = msic_set_mute,
- .get_mute = msic_get_mute,
- .set_vol = msic_set_vol,
- .get_vol = msic_get_vol,
- .init_card = msic_init_card,
- .set_pcm_audio_params = msic_set_pcm_audio_params,
- .set_pcm_voice_params = msic_set_pcm_voice_params,
- .set_voice_port = msic_set_voice_port,
- .set_audio_port = msic_set_audio_port,
- .power_up_pmic_pb = msic_power_up_pb,
- .power_up_pmic_cp = msic_power_up_cp,
- .power_down_pmic_pb = msic_power_down_pb,
- .power_down_pmic_cp = msic_power_down_cp,
- .power_down_pmic = msic_power_down,
- .pmic_irq_cb = msic_pmic_irq_cb,
- .pmic_jack_enable = msic_enable_mic_bias,
- .pmic_get_mic_bias = msic_get_mic_bias,
- .pmic_set_headset_state = msic_set_headset_state,
-};
diff --git a/drivers/staging/intel_sst/intelmid_pvt.c b/drivers/staging/intel_sst/intelmid_pvt.c
deleted file mode 100644
index 90e0e64..0000000
--- a/drivers/staging/intel_sst/intelmid_pvt.c
+++ /dev/null
@@ -1,173 +0,0 @@
-/*
- * intelmid_pvt.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Harsha Priya <priya.harsha@intel.com>
- * Vinod Koul <vinod.koul@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- * ALSA driver for Intel MID sound card chipset - holding private functions
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/io.h>
-#include <asm/intel_scu_ipc.h>
-#include <sound/core.h>
-#include <sound/control.h>
-#include <sound/pcm.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-
-void period_elapsed(void *mad_substream)
-{
- struct snd_pcm_substream *substream = mad_substream;
- struct mad_stream_pvt *stream;
-
-
-
- if (!substream || !substream->runtime)
- return;
- stream = substream->runtime->private_data;
- if (!stream)
- return;
-
- if (stream->stream_status != RUNNING)
- return;
- pr_debug("calling period elapsed\n");
- snd_pcm_period_elapsed(substream);
- return;
-}
-
-
-int snd_intelmad_alloc_stream(struct snd_pcm_substream *substream)
-{
- struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
- struct mad_stream_pvt *stream = substream->runtime->private_data;
- struct snd_sst_stream_params param = {{{0,},},};
- struct snd_sst_params str_params = {0};
- int ret_val;
-
- /* set codec params and inform SST driver the same */
-
- param.uc.pcm_params.codec = SST_CODEC_TYPE_PCM;
- param.uc.pcm_params.num_chan = (u8) substream->runtime->channels;
- param.uc.pcm_params.pcm_wd_sz = substream->runtime->sample_bits;
- param.uc.pcm_params.reserved = 0;
- param.uc.pcm_params.sfreq = substream->runtime->rate;
- param.uc.pcm_params.ring_buffer_size =
- snd_pcm_lib_buffer_bytes(substream);
- param.uc.pcm_params.period_count = substream->runtime->period_size;
- param.uc.pcm_params.ring_buffer_addr =
- virt_to_phys(substream->runtime->dma_area);
- pr_debug("period_cnt = %d\n", param.uc.pcm_params.period_count);
- pr_debug("sfreq= %d, wd_sz = %d\n",
- param.uc.pcm_params.sfreq, param.uc.pcm_params.pcm_wd_sz);
-
- str_params.sparams = param;
- str_params.codec = SST_CODEC_TYPE_PCM;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- str_params.ops = STREAM_OPS_PLAYBACK;
- pr_debug("Playbck stream,Device %d\n", stream->device);
- } else {
- str_params.ops = STREAM_OPS_CAPTURE;
- stream->device = SND_SST_DEVICE_CAPTURE;
- pr_debug("Capture stream,Device %d\n", stream->device);
- }
- str_params.device_type = stream->device;
- ret_val = intelmaddata->sstdrv_ops->pcm_control->open(&str_params);
- pr_debug("sst: SST_SND_PLAY/CAPTURE ret_val = %x\n", ret_val);
- if (ret_val < 0)
- return ret_val;
-
- stream->stream_info.str_id = ret_val;
- stream->stream_status = INIT;
- stream->stream_info.buffer_ptr = 0;
- pr_debug("str id : %d\n", stream->stream_info.str_id);
-
- return ret_val;
-}
-
-int snd_intelmad_init_stream(struct snd_pcm_substream *substream)
-{
- struct mad_stream_pvt *stream = substream->runtime->private_data;
- struct snd_intelmad *intelmaddata = snd_pcm_substream_chip(substream);
- int ret_val;
-
- pr_debug("setting buffer ptr param\n");
- stream->stream_info.period_elapsed = period_elapsed;
- stream->stream_info.mad_substream = substream;
- stream->stream_info.buffer_ptr = 0;
- stream->stream_info.sfreq = substream->runtime->rate;
- ret_val = intelmaddata->sstdrv_ops->pcm_control->device_control(
- SST_SND_STREAM_INIT, &stream->stream_info);
- if (ret_val)
- pr_err("control_set ret error %d\n", ret_val);
- return ret_val;
-
-}
-
-
-/**
- * sst_sc_reg_access - IPC read/write wrapper
- *
- * @sc_access: array of data, addresses and mask
- * @type: operation type
- * @num_val: number of reg to opertae on
- *
- * Reads/writes/read-modify operations on registers accessed through SCU (sound
- * card and few SST DSP regsisters that are not accissible to IA)
- */
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val)
-{
- int i, retval = 0;
- if (type == PMIC_WRITE) {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_iowrite8(sc_access[i].reg_addr,
- sc_access[i].value);
- if (retval)
- goto err;
- }
- } else if (type == PMIC_READ) {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_ioread8(sc_access[i].reg_addr,
- &(sc_access[i].value));
- if (retval)
- goto err;
- }
- } else {
- for (i = 0; i < num_val; i++) {
- retval = intel_scu_ipc_update_register(
- sc_access[i].reg_addr, sc_access[i].value,
- sc_access[i].mask);
- if (retval)
- goto err;
- }
- }
- return 0;
-err:
- pr_err("IPC failed for cmd %d, %d\n", retval, type);
- pr_err("reg:0x%2x addr:0x%2x\n",
- sc_access[i].reg_addr, sc_access[i].value);
- return retval;
-}
diff --git a/drivers/staging/intel_sst/intelmid_snd_control.h b/drivers/staging/intel_sst/intelmid_snd_control.h
deleted file mode 100644
index 06ad3a1..0000000
--- a/drivers/staging/intel_sst/intelmid_snd_control.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#ifndef __INTELMID_SND_CTRL_H__
-#define __INTELMID_SND_CTRL_H__
-/*
- * intelmid_snd_control.h - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file defines all snd control functions
- */
-
-/*
-Mask bits
-*/
-#define MASK0 0x01 /* 0000 0001 */
-#define MASK1 0x02 /* 0000 0010 */
-#define MASK2 0x04 /* 0000 0100 */
-#define MASK3 0x08 /* 0000 1000 */
-#define MASK4 0x10 /* 0001 0000 */
-#define MASK5 0x20 /* 0010 0000 */
-#define MASK6 0x40 /* 0100 0000 */
-#define MASK7 0x80 /* 1000 0000 */
-/*
-value bits
-*/
-#define VALUE0 0x01 /* 0000 0001 */
-#define VALUE1 0x02 /* 0000 0010 */
-#define VALUE2 0x04 /* 0000 0100 */
-#define VALUE3 0x08 /* 0000 1000 */
-#define VALUE4 0x10 /* 0001 0000 */
-#define VALUE5 0x20 /* 0010 0000 */
-#define VALUE6 0x40 /* 0100 0000 */
-#define VALUE7 0x80 /* 1000 0000 */
-
-#define MUTE 0 /* ALSA Passes 0 for mute */
-#define UNMUTE 1 /* ALSA Passes 1 for unmute */
-
-#define MAX_VOL_PMIC_VENDOR0 0x3f /* max vol in dB for stereo & voice DAC */
-#define MIN_VOL_PMIC_VENDOR0 0 /* min vol in dB for stereo & voice DAC */
-/* Head phone volume control */
-#define MAX_HP_VOL_PMIC_VENDOR1 6 /* max volume in dB for HP */
-#define MIN_HP_VOL_PMIC_VENDOR1 (-84) /* min volume in dB for HP */
-#define MAX_HP_VOL_INDX_PMIC_VENDOR1 40 /* Number of HP volume control values */
-
-/* Mono Earpiece Volume control */
-#define MAX_EP_VOL_PMIC_VENDOR1 0 /* max volume in dB for EP */
-#define MIN_EP_VOL_PMIC_VENDOR1 (-75) /* min volume in dB for EP */
-#define MAX_EP_VOL_INDX_PMIC_VENDOR1 32 /* Number of EP volume control values */
-
-int sst_sc_reg_access(struct sc_reg_access *sc_access,
- int type, int num_val);
-extern struct snd_pmic_ops snd_pmic_ops_fs;
-extern struct snd_pmic_ops snd_pmic_ops_mx;
-extern struct snd_pmic_ops snd_pmic_ops_nc;
-extern struct snd_pmic_ops snd_msic_ops;
-
-/* device */
-enum SND_INPUT_DEVICE {
- AMIC,
- DMIC,
- HS_MIC,
- IN_UNDEFINED
-};
-enum SND_LINE_OUT_DEVICE {
- HEADSET,
- IHF,
- VIBRA1,
- VIBRA2,
- NONE,
-};
-
-enum SND_OUTPUT_DEVICE {
- STEREO_HEADPHONE,
- MONO_EARPIECE,
-
- INTERNAL_SPKR,
- RECEIVER,
- OUT_UNDEFINED
-};
-
-enum pmic_controls {
- PMIC_SND_HP_MIC_MUTE = 0x0001,
- PMIC_SND_AMIC_MUTE = 0x0002,
- PMIC_SND_DMIC_MUTE = 0x0003,
- PMIC_SND_CAPTURE_VOL = 0x0004,
-/* Output controls */
- PMIC_SND_LEFT_PB_VOL = 0x0010,
- PMIC_SND_RIGHT_PB_VOL = 0x0011,
- PMIC_SND_LEFT_HP_MUTE = 0x0012,
- PMIC_SND_RIGHT_HP_MUTE = 0x0013,
- PMIC_SND_LEFT_SPEAKER_MUTE = 0x0014,
- PMIC_SND_RIGHT_SPEAKER_MUTE = 0x0015,
- PMIC_SND_RECEIVER_VOL = 0x0016,
- PMIC_SND_RECEIVER_MUTE = 0x0017,
- PMIC_SND_LEFT_MASTER_VOL = 0x0018,
- PMIC_SND_RIGHT_MASTER_VOL = 0x0019,
-/* Other controls */
- PMIC_SND_MUTE_ALL = 0x0020,
- PMIC_MAX_CONTROLS = 0x0020,
-};
-
-#endif /* __INTELMID_SND_CTRL_H__ */
-
-
diff --git a/drivers/staging/intel_sst/intelmid_v0_control.c b/drivers/staging/intel_sst/intelmid_v0_control.c
deleted file mode 100644
index b8dfdb9..0000000
--- a/drivers/staging/intel_sst/intelmid_v0_control.c
+++ /dev/null
@@ -1,866 +0,0 @@
-/*
- * intel_sst_v0_control.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corporation
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 1
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/file.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-enum _reg_v1 {
- VOICEPORT1 = 0x180,
- VOICEPORT2 = 0x181,
- AUDIOPORT1 = 0x182,
- AUDIOPORT2 = 0x183,
- MISCVOICECTRL = 0x184,
- MISCAUDCTRL = 0x185,
- DMICCTRL1 = 0x186,
- AUDIOBIAS = 0x187,
- MICCTRL = 0x188,
- MICLICTRL1 = 0x189,
- MICLICTRL2 = 0x18A,
- MICLICTRL3 = 0x18B,
- VOICEDACCTRL1 = 0x18C,
- STEREOADCCTRL = 0x18D,
- AUD15 = 0x18E,
- AUD16 = 0x18F,
- AUD17 = 0x190,
- AUD18 = 0x191,
- RMIXOUTSEL = 0x192,
- ANALOGLBR = 0x193,
- ANALOGLBL = 0x194,
- POWERCTRL1 = 0x195,
- POWERCTRL2 = 0x196,
- HEADSETDETECTINT = 0x197,
- HEADSETDETECTINTMASK = 0x198,
- TRIMENABLE = 0x199,
-};
-
-int rev_id = 0x20;
-static bool jack_det_enabled;
-
-/****
- * fs_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int fs_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x180, 0x00, 0x0},
- {0x181, 0x00, 0x0},
- {0x182, 0xF8, 0x0},
- {0x183, 0x08, 0x0},
- {0x184, 0x00, 0x0},
- {0x185, 0x40, 0x0},
- {0x186, 0x06, 0x0},
- {0x187, 0x80, 0x0},
- {0x188, 0x40, 0x0},
- {0x189, 0x39, 0x0},
- {0x18a, 0x39, 0x0},
- {0x18b, 0x1F, 0x0},
- {0x18c, 0x00, 0x0},
- {0x18d, 0x00, 0x0},
- {0x18e, 0x39, 0x0},
- {0x18f, 0x39, 0x0},
- {0x190, 0x39, 0x0},
- {0x191, 0x11, 0x0},
- {0x192, 0x0E, 0x0},
- {0x193, 0x00, 0x0},
- {0x194, 0x00, 0x0},
- {0x195, 0x00, 0x0},
- {0x196, 0x7C, 0x0},
- {0x197, 0x00, 0x0},
- {0x198, 0x0B, 0x0},
- {0x199, 0x00, 0x0},
- {0x037, 0x3F, 0x0},
- };
-
- snd_pmic_ops_fs.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_fs.master_mute = UNMUTE;
- snd_pmic_ops_fs.mute_status = UNMUTE;
- snd_pmic_ops_fs.num_channel = 2;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
-}
-
-static int fs_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD17;
- sc_access[2].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = sc_access[2].mask = MASK7;
-
- if (snd_pmic_ops_fs.mute_status == MUTE)
- return 0;
- if (value == MUTE) {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = 0x80;
-
- } else {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = 0x0;
- }
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[1].value = sc_access[2].value = 0x80;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
-}
-
-static int fs_power_up_pb(unsigned int port)
-{
- struct sc_reg_access sc_access[] = {
- {AUDIOBIAS, 0x00, MASK7},
- {POWERCTRL1, 0xC6, 0xC6},
- {POWERCTRL2, 0x30, 0x30},
-
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- retval = fs_enable_audiodac(MUTE);
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
- if (retval)
- return retval;
-
- pr_debug("in fs power up pb\n");
- return fs_enable_audiodac(UNMUTE);
-}
-
-static int fs_power_down_pb(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL1, 0x00, 0xC6},
- {POWERCTRL2, 0x00, 0x30},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- retval = fs_enable_audiodac(MUTE);
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- if (retval)
- return retval;
-
- pr_debug("in fsl power down pb\n");
- return fs_enable_audiodac(UNMUTE);
-}
-
-static int fs_power_up_cp(unsigned int port)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL2, 0x32, 0x32}, /*NOTE power up A ADC only as*/
- {AUDIOBIAS, 0x00, MASK7},
- /*as turning on V ADC causes noise*/
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int fs_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL2, 0x00, 0x03},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int fs_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {AUDIOBIAS, MASK7, MASK7},
- };
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int fs_set_pcm_voice_params(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x180, 0xA0, 0},
- {0x181, 0x04, 0},
- {0x182, 0x0, 0},
- {0x183, 0x0, 0},
- {0x184, 0x18, 0},
- {0x185, 0x40, 0},
- {0x186, 0x06, 0},
- {0x187, 0x0, 0},
- {0x188, 0x10, 0},
- {0x189, 0x39, 0},
- {0x18a, 0x39, 0},
- {0x18b, 0x02, 0},
- {0x18c, 0x0, 0},
- {0x18d, 0x0, 0},
- {0x18e, 0x39, 0},
- {0x18f, 0x0, 0},
- {0x190, 0x0, 0},
- {0x191, 0x20, 0},
- {0x192, 0x20, 0},
- {0x193, 0x0, 0},
- {0x194, 0x0, 0},
- {0x195, 0x06, 0},
- {0x196, 0x25, 0},
- {0x197, 0x0, 0},
- {0x198, 0xF, 0},
- {0x199, 0x0, 0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 26);
-}
-
-static int fs_set_audio_port(int status)
-{
- struct sc_reg_access sc_access[2];
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK4|MASK5;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- sc_access[0].value = 0xC0;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[1].value = 0x30;
- sc_access[1].mask = MASK4|MASK5;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else
- return -EINVAL;
-}
-
-static int fs_set_voice_port(int status)
-{
- struct sc_reg_access sc_access[2];
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = VOICEPORT1;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK0|MASK1;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- sc_access[0].value = 0xC0;
- sc_access[0].mask = MASK6|MASK7;
- sc_access[0].reg_addr = VOICEPORT1;
- sc_access[1].value = 0x03;
- sc_access[1].mask = MASK0|MASK1;
- sc_access[1].reg_addr = POWERCTRL2;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- } else
- return -EINVAL;
-}
-
-static int fs_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- u8 config1 = 0;
- struct sc_reg_access sc_access[4];
- int retval = 0, num_value = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
- switch (sfreq) {
- case 8000:
- config1 = 0x00;
- break;
- case 11025:
- config1 = 0x01;
- break;
- case 12000:
- config1 = 0x02;
- break;
- case 16000:
- config1 = 0x03;
- break;
- case 22050:
- config1 = 0x04;
- break;
- case 24000:
- config1 = 0x05;
- break;
- case 26000:
- config1 = 0x06;
- break;
- case 32000:
- config1 = 0x07;
- break;
- case 44100:
- config1 = 0x08;
- break;
- case 48000:
- config1 = 0x09;
- break;
- }
- snd_pmic_ops_fs.num_channel = num_channel;
- if (snd_pmic_ops_fs.num_channel == 1) {
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = 0x80;
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- } else {
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = 0x00;
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- }
- pr_debug("sfreq:%d,Register value = %x\n", sfreq, config1);
-
- if (word_size == 24) {
- sc_access[0].reg_addr = AUDIOPORT1;
- sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
- sc_access[0].value = 0xFB;
-
-
- sc_access[1].reg_addr = AUDIOPORT2;
- sc_access[1].value = config1 | 0x10;
- sc_access[1].mask = MASK0 | MASK1 | MASK2 | MASK3
- | MASK4 | MASK5 | MASK6;
-
- sc_access[2].reg_addr = MISCAUDCTRL;
- sc_access[2].value = 0x02;
- sc_access[2].mask = 0x02;
-
- num_value = 3 ;
-
- } else {
-
- sc_access[0].reg_addr = AUDIOPORT2;
- sc_access[0].value = config1;
- sc_access[0].mask = MASK0|MASK1|MASK2|MASK3;
-
- sc_access[1].reg_addr = MISCAUDCTRL;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x02;
- num_value = 2;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_value);
-
-}
-
-static int fs_set_selected_input_dev(u8 value)
-{
- struct sc_reg_access sc_access_dmic[] = {
- {MICCTRL, 0x81, 0xf7},
- {MICLICTRL3, 0x00, 0xE0},
- };
- struct sc_reg_access sc_access_mic[] = {
- {MICCTRL, 0x40, MASK2|MASK4|MASK5|MASK6|MASK7},
- {MICLICTRL3, 0x00, 0xE0},
- };
- struct sc_reg_access sc_access_hsmic[] = {
- {MICCTRL, 0x10, MASK2|MASK4|MASK5|MASK6|MASK7},
- {MICLICTRL3, 0x00, 0xE0},
- };
-
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (value) {
- case AMIC:
- pr_debug("Selecting amic not supported in mono cfg\n");
- return sst_sc_reg_access(sc_access_mic, PMIC_READ_MODIFY, 2);
- break;
-
- case HS_MIC:
- pr_debug("Selecting hsmic\n");
- return sst_sc_reg_access(sc_access_hsmic,
- PMIC_READ_MODIFY, 2);
- break;
-
- case DMIC:
- pr_debug("Selecting dmic\n");
- return sst_sc_reg_access(sc_access_dmic, PMIC_READ_MODIFY, 2);
- break;
-
- default:
- return -EINVAL;
-
- }
-}
-
-static int fs_set_selected_output_dev(u8 value)
-{
- struct sc_reg_access sc_access_hp[] = {
- {0x191, 0x11, 0x0},
- {0x192, 0x0E, 0x0},
- };
- struct sc_reg_access sc_access_is[] = {
- {0x191, 0x17, 0xFF},
- {0x192, 0x08, 0xFF},
- };
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (value) {
- case STEREO_HEADPHONE:
- pr_debug("SST DBG:Selecting headphone\n");
- return sst_sc_reg_access(sc_access_hp, PMIC_WRITE, 2);
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- pr_debug("SST DBG:Selecting internal spkr\n");
- return sst_sc_reg_access(sc_access_is, PMIC_READ_MODIFY, 2);
- break;
-
- default:
- return -EINVAL;
-
- }
-}
-
-static int fs_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[6] = {{0,},};
- int reg_num = 0;
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
-
- pr_debug("dev_id:0x%x value:0x%x\n", dev_id, value);
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- sc_access[0].reg_addr = MICCTRL;
- sc_access[1].reg_addr = MICLICTRL1;
- sc_access[2].reg_addr = MICLICTRL2;
- sc_access[0].mask = MASK5;
- sc_access[1].mask = sc_access[2].mask = MASK6;
- if (value == MUTE) {
- sc_access[0].value = 0x20;
- sc_access[2].value = sc_access[1].value = 0x40;
- } else
- sc_access[0].value = sc_access[1].value
- = sc_access[2].value = 0x0;
- reg_num = 3;
- break;
- case PMIC_SND_HP_MIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- sc_access[0].reg_addr = MICLICTRL1;
- sc_access[1].reg_addr = MICLICTRL2;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x40;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- reg_num = 2;
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_LEFT_HP_MUTE:
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD15;
-
- sc_access[0].mask = sc_access[1].mask = MASK7;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x80;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- reg_num = 2;
- snd_pmic_ops_fs.mute_status = value;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- if (value == MUTE)
- sc_access[0].value = sc_access[1].value = 0x80;
- else
- sc_access[0].value = sc_access[1].value = 0x0;
- snd_pmic_ops_fs.mute_status = value;
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[0].value = sc_access[1].value = 0x80;
- reg_num = 2;
- break;
- case PMIC_SND_MUTE_ALL:
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD17;
- sc_access[2].reg_addr = AUD15;
- sc_access[3].reg_addr = MICCTRL;
- sc_access[4].reg_addr = MICLICTRL1;
- sc_access[5].reg_addr = MICLICTRL2;
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = MASK7;
- sc_access[3].mask = MASK5;
- sc_access[4].mask = sc_access[5].mask = MASK6;
-
- if (value == MUTE) {
- sc_access[0].value =
- sc_access[1].value = sc_access[2].value = 0x80;
- sc_access[3].value = 0x20;
- sc_access[4].value = sc_access[5].value = 0x40;
-
- } else {
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = sc_access[3].value =
- sc_access[4].value = sc_access[5].value = 0x0;
- }
- if (snd_pmic_ops_fs.num_channel == 1)
- sc_access[1].value = sc_access[2].value = 0x80;
- reg_num = 6;
- snd_pmic_ops_fs.mute_status = value;
- snd_pmic_ops_fs.master_mute = value;
- break;
-
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
-}
-
-static int fs_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_acces, sc_access[4] = {{0},};
- int reg_num = 0;
- int retval = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_PB_VOL:%d\n", value);
- sc_access[0].value = sc_access[1].value = value;
- sc_access[0].reg_addr = AUD16;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- reg_num = 2;
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RIGHT_PB_VOL:%d\n", value);
- sc_access[0].value = sc_access[1].value = value;
- sc_access[0].reg_addr = AUD17;
- sc_access[1].reg_addr = AUD15;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- if (snd_pmic_ops_fs.num_channel == 1) {
- sc_access[0].value = sc_access[1].value = 0x80;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- }
- reg_num = 2;
- break;
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL:%d\n", value);
- sc_access[0].reg_addr = MICLICTRL1;
- sc_access[1].reg_addr = MICLICTRL2;
- sc_access[2].reg_addr = DMICCTRL1;
- sc_access[2].value = value;
- sc_access[0].value = sc_access[1].value = value;
- sc_acces.reg_addr = MICLICTRL3;
- sc_acces.value = value;
- sc_acces.mask = (MASK0|MASK1|MASK2|MASK3|MASK5|MASK6|MASK7);
- retval = sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- reg_num = 3;
- break;
-
- default:
- return -EINVAL;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, reg_num);
-}
-
-static int fs_get_mute(int dev_id, u8 *value)
-{
- struct sc_reg_access sc_access[6] = {{0,},};
-
- int retval = 0, temp_value = 0, mask = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
-
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = MICLICTRL1;
- mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- if (sc_access[0].value & mask)
- *value = MUTE;
- else
- *value = UNMUTE;
- break;
- case PMIC_SND_DMIC_MUTE:
- sc_access[0].reg_addr = MICCTRL;
- mask = MASK5;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = (sc_access[0].value & mask);
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
-
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD16;
- mask = MASK7;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = sc_access[0].value & mask;
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = AUD17;
- mask = MASK7;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, 1);
- temp_value = sc_access[0].value & mask;
- if (temp_value == 0)
- *value = UNMUTE;
- else
- *value = MUTE;
- break;
- default:
- return -EINVAL;
- }
-
- return retval;
-}
-
-static int fs_get_vol(int dev_id, int *value)
-{
- struct sc_reg_access sc_access = {0,};
- int retval = 0, mask = 0;
-
- if (snd_pmic_ops_fs.card_status == SND_CARD_UN_INIT)
- retval = fs_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL\n");
- sc_access.reg_addr = MICLICTRL1;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_PB_VOL\n");
- sc_access.reg_addr = AUD16;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RT_PB_VOL\n");
- sc_access.reg_addr = AUD17;
- mask = (MASK5|MASK4|MASK3|MASK2|MASK1|MASK0);
- break;
- default:
- return -EINVAL;
- }
-
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("value read = 0x%x\n", sc_access.value);
- *value = (int) (sc_access.value & mask);
- pr_debug("value returned = 0x%x\n", *value);
- return retval;
-}
-
-static void fs_pmic_irq_enable(void *data)
-{
- struct snd_intelmad *intelmaddata = data;
- struct sc_reg_access sc_access[] = {
- {0x187, 0x00, MASK7},
- {0x188, 0x10, MASK4},
- {0x18b, 0x10, MASK4},
- };
-
- struct sc_reg_access sc_access_write[] = {
- {0x198, 0x00, 0x0},
- };
- pr_debug("Audio interrupt enable\n");
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
- sst_sc_reg_access(sc_access_write, PMIC_WRITE, 1);
-
- intelmaddata->jack[0].jack_status = 0;
- /*intelmaddata->jack[1].jack_status = 0;*/
-
- jack_det_enabled = true;
- return;
-}
-
-static void fs_pmic_irq_cb(void *cb_data, u8 value)
-{
- struct mad_jack *mjack = NULL;
- struct snd_intelmad *intelmaddata = cb_data;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
-
- mjack = &intelmaddata->jack[0];
-
- if (value & 0x4) {
- if (!jack_det_enabled)
- fs_pmic_irq_enable(intelmaddata);
-
- /* send headphone detect */
- pr_debug(":MAD headphone %d\n", value & 0x4);
- present = !(mjack->jack_status);
- mjack->jack_status = present;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if (value & 0x2) {
- /* send short push */
- pr_debug(":MAD short push %d\n", value & 0x2);
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_SHORT_PRESS;
- }
-
- if (value & 0x1) {
- /* send long push */
- pr_debug(":MAD long push %d\n", value & 0x1);
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_LONG_PRESS;
- }
-
- if (value & 0x8) {
- if (!jack_det_enabled)
- fs_pmic_irq_enable(intelmaddata);
- /* send headset detect */
- pr_debug(":MAD headset = %d\n", value & 0x8);
- present = !(mjack->jack_status);
- mjack->jack_status = present;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-
- return;
-}
-static int fs_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_fs = {
- .set_input_dev = fs_set_selected_input_dev,
- .set_output_dev = fs_set_selected_output_dev,
- .set_mute = fs_set_mute,
- .get_mute = fs_get_mute,
- .set_vol = fs_set_vol,
- .get_vol = fs_get_vol,
- .init_card = fs_init_card,
- .set_pcm_audio_params = fs_set_pcm_audio_params,
- .set_pcm_voice_params = fs_set_pcm_voice_params,
- .set_voice_port = fs_set_voice_port,
- .set_audio_port = fs_set_audio_port,
- .power_up_pmic_pb = fs_power_up_pb,
- .power_up_pmic_cp = fs_power_up_cp,
- .power_down_pmic_pb = fs_power_down_pb,
- .power_down_pmic_cp = fs_power_down_cp,
- .power_down_pmic = fs_power_down,
- .pmic_irq_cb = fs_pmic_irq_cb,
- /*
- * Jack detection enabling
- * need be delayed till first IRQ happen.
- */
- .pmic_irq_enable = NULL,
- .pmic_jack_enable = fs_jack_enable,
-};
diff --git a/drivers/staging/intel_sst/intelmid_v1_control.c b/drivers/staging/intel_sst/intelmid_v1_control.c
deleted file mode 100644
index 9d00728..0000000
--- a/drivers/staging/intel_sst/intelmid_v1_control.c
+++ /dev/null
@@ -1,978 +0,0 @@
-/* intel_sst_v1_control.c - Intel SST Driver for audio engine
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 2
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <asm/mrst.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/control.h>
-#include <sound/initval.h>
-#include "intel_sst.h"
-#include "intel_sst_ioctl.h"
-#include "intelmid.h"
-#include "intelmid_snd_control.h"
-
-#include <linux/gpio.h>
-#define KOSKI_VOICE_CODEC_ENABLE 46
-
-enum _reg_v2 {
-
- MASTER_CLOCK_PRESCALAR = 0x205,
- SET_MASTER_AND_LR_CLK1 = 0x20b,
- SET_MASTER_AND_LR_CLK2 = 0x20c,
- MASTER_MODE_AND_DATA_DELAY = 0x20d,
- DIGITAL_INTERFACE_TO_DAI2 = 0x20e,
- CLK_AND_FS1 = 0x208,
- CLK_AND_FS2 = 0x209,
- DAI2_TO_DAC_HP = 0x210,
- HP_OP_SINGLE_ENDED = 0x224,
- ENABLE_OPDEV_CTRL = 0x226,
- ENABLE_DEV_AND_USE_XTAL = 0x227,
-
- /* Max audio subsystem (PQ49) MAX 8921 */
- AS_IP_MODE_CTL = 0xF9,
- AS_LEFT_SPKR_VOL_CTL = 0xFA, /* Mono Earpiece volume control */
- AS_RIGHT_SPKR_VOL_CTL = 0xFB,
- AS_LEFT_HP_VOL_CTL = 0xFC,
- AS_RIGHT_HP_VOL_CTL = 0xFD,
- AS_OP_MIX_CTL = 0xFE,
- AS_CONFIG = 0xFF,
-
- /* Headphone volume control & mute registers */
- VOL_CTRL_LT = 0x21c,
- VOL_CTRL_RT = 0x21d,
-
-};
-/**
- * mx_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int mx_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x200, 0x80, 0x00},
- {0x201, 0xC0, 0x00},
- {0x202, 0x00, 0x00},
- {0x203, 0x00, 0x00},
- {0x204, 0x02, 0x00},
- {0x205, 0x10, 0x00},
- {0x206, 0x60, 0x00},
- {0x207, 0x00, 0x00},
- {0x208, 0x90, 0x00},
- {0x209, 0x51, 0x00},
- {0x20a, 0x00, 0x00},
- {0x20b, 0x10, 0x00},
- {0x20c, 0x00, 0x00},
- {0x20d, 0x00, 0x00},
- {0x20e, 0x21, 0x00},
- {0x20f, 0x00, 0x00},
- {0x210, 0x84, 0x00},
- {0x211, 0xB3, 0x00},
- {0x212, 0x00, 0x00},
- {0x213, 0x00, 0x00},
- {0x214, 0x41, 0x00},
- {0x215, 0x00, 0x00},
- {0x216, 0x00, 0x00},
- {0x217, 0x00, 0x00},
- {0x218, 0x03, 0x00},
- {0x219, 0x03, 0x00},
- {0x21a, 0x00, 0x00},
- {0x21b, 0x00, 0x00},
- {0x21c, 0x00, 0x00},
- {0x21d, 0x00, 0x00},
- {0x21e, 0x00, 0x00},
- {0x21f, 0x00, 0x00},
- {0x220, 0x20, 0x00},
- {0x221, 0x20, 0x00},
- {0x222, 0x51, 0x00},
- {0x223, 0x20, 0x00},
- {0x224, 0x04, 0x00},
- {0x225, 0x80, 0x00},
- {0x226, 0x0F, 0x00},
- {0x227, 0x08, 0x00},
- {0xf9, 0x40, 0x00},
- {0xfa, 0x1f, 0x00},
- {0xfb, 0x1f, 0x00},
- {0xfc, 0x1f, 0x00},
- {0xfd, 0x1f, 0x00},
- {0xfe, 0x00, 0x00},
- {0xff, 0x0c, 0x00},
- };
- snd_pmic_ops_mx.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_mx.num_channel = 2;
- snd_pmic_ops_mx.master_mute = UNMUTE;
- snd_pmic_ops_mx.mute_status = UNMUTE;
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 47);
-}
-
-static int mx_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- int mute_val = 0;
- int mute_val1 = 0;
- int retval = 0;
-
- sc_access[0].reg_addr = AS_LEFT_HP_VOL_CTL;
- sc_access[1].reg_addr = AS_RIGHT_HP_VOL_CTL;
-
- if (value == UNMUTE) {
- mute_val = 0x1F;
- mute_val1 = 0x00;
- } else {
- mute_val = 0x00;
- mute_val1 = 0x40;
- }
- sc_access[0].mask = sc_access[1].mask = MASK0|MASK1|MASK2|MASK3|MASK4;
- sc_access[0].value = sc_access[1].value = (u8)mute_val;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- pr_debug("mute status = %d\n", snd_pmic_ops_mx.mute_status);
- if (snd_pmic_ops_mx.mute_status == MUTE ||
- snd_pmic_ops_mx.master_mute == MUTE)
- return retval;
-
- sc_access[0].reg_addr = VOL_CTRL_LT;
- sc_access[1].reg_addr = VOL_CTRL_RT;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- sc_access[0].value = sc_access[1].value = mute_val1;
- if (snd_pmic_ops_mx.num_channel == 1)
- sc_access[1].value = 0x40;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int mx_power_up_pb(unsigned int port)
-{
-
- int retval = 0;
- struct sc_reg_access sc_access[3];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- msleep(10);
-
- sc_access[0].reg_addr = AS_CONFIG;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x80;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = 0xff;
- sc_access[0].value = 0x3C;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
- sc_access[0].mask = 0x80;
- sc_access[0].value = 0x80;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_power_down_pb(unsigned int device)
-{
- struct sc_reg_access sc_access[3];
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = MASK3|MASK2;
- sc_access[0].value = 0x00;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_power_up_cp(unsigned int port)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {ENABLE_DEV_AND_USE_XTAL, 0x80, MASK7},
- {ENABLE_OPDEV_CTRL, 0x3, 0x3},
- };
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-}
-
-static int mx_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {ENABLE_OPDEV_CTRL, 0x00, MASK1|MASK0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int mx_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[3];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- retval = mx_enable_audiodac(MUTE);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = AS_CONFIG;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_DEV_AND_USE_XTAL;
- sc_access[0].mask = MASK7;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- sc_access[0].reg_addr = ENABLE_OPDEV_CTRL;
- sc_access[0].mask = MASK3|MASK2;
- sc_access[0].value = 0x00;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- if (retval)
- return retval;
-
- return mx_enable_audiodac(UNMUTE);
-}
-
-static int mx_set_pcm_voice_params(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[] = {
- {0x200, 0x80, 0x00},
- {0x201, 0xC0, 0x00},
- {0x202, 0x00, 0x00},
- {0x203, 0x00, 0x00},
- {0x204, 0x0e, 0x00},
- {0x205, 0x20, 0x00},
- {0x206, 0x8f, 0x00},
- {0x207, 0x21, 0x00},
- {0x208, 0x18, 0x00},
- {0x209, 0x32, 0x00},
- {0x20a, 0x00, 0x00},
- {0x20b, 0x5A, 0x00},
- {0x20c, 0xBE, 0x00},/* 0x00 -> 0xBE Koski */
- {0x20d, 0x00, 0x00}, /* DAI2 'off' */
- {0x20e, 0x40, 0x00},
- {0x20f, 0x00, 0x00},
- {0x210, 0x84, 0x00},
- {0x211, 0x33, 0x00}, /* Voice filter */
- {0x212, 0x00, 0x00},
- {0x213, 0x00, 0x00},
- {0x214, 0x41, 0x00},
- {0x215, 0x00, 0x00},
- {0x216, 0x00, 0x00},
- {0x217, 0x20, 0x00},
- {0x218, 0x00, 0x00},
- {0x219, 0x00, 0x00},
- {0x21a, 0x40, 0x00},
- {0x21b, 0x40, 0x00},
- {0x21c, 0x09, 0x00},
- {0x21d, 0x09, 0x00},
- {0x21e, 0x00, 0x00},
- {0x21f, 0x00, 0x00},
- {0x220, 0x00, 0x00}, /* Microphone configurations */
- {0x221, 0x00, 0x00}, /* Microphone configurations */
- {0x222, 0x50, 0x00}, /* Microphone configurations */
- {0x223, 0x21, 0x00}, /* Microphone configurations */
- {0x224, 0x00, 0x00},
- {0x225, 0x80, 0x00},
- {0xf9, 0x40, 0x00},
- {0xfa, 0x19, 0x00},
- {0xfb, 0x19, 0x00},
- {0xfc, 0x12, 0x00},
- {0xfd, 0x12, 0x00},
- {0xfe, 0x00, 0x00},
- };
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- pr_debug("SST DBG:mx_set_pcm_voice_params called\n");
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 44);
-}
-
-static int mx_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- int retval = 0;
-
- int config1 = 0, config2 = 0, filter = 0xB3;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- switch (sfreq) {
- case 8000:
- config1 = 0x10;
- config2 = 0x00;
- filter = 0x33;
- break;
- case 11025:
- config1 = 0x16;
- config2 = 0x0d;
- break;
- case 12000:
- config1 = 0x18;
- config2 = 0x00;
- break;
- case 16000:
- config1 = 0x20;
- config2 = 0x00;
- break;
- case 22050:
- config1 = 0x2c;
- config2 = 0x1a;
- break;
- case 24000:
- config1 = 0x30;
- config2 = 0x00;
- break;
- case 32000:
- config1 = 0x40;
- config2 = 0x00;
- break;
- case 44100:
- config1 = 0x58;
- config2 = 0x33;
- break;
- case 48000:
- config1 = 0x60;
- config2 = 0x00;
- break;
- }
- snd_pmic_ops_mx.num_channel = num_channel;
- /*mute the right channel if MONO*/
- if (snd_pmic_ops_mx.num_channel == 1) {
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6;
-
- sc_access[1].reg_addr = 0x224;
- sc_access[1].value = 0x05;
- sc_access[1].mask = MASK0|MASK1|MASK2;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- } else {
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
-
- sc_access[1].reg_addr = 0x224;
- sc_access[1].value = 0x04;
- sc_access[1].mask = MASK0|MASK1|MASK2;
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
- if (retval)
- return retval;
- }
- sc_access[0].reg_addr = 0x206;
- sc_access[0].value = config1;
- sc_access[1].reg_addr = 0x207;
- sc_access[1].value = config2;
-
- if (word_size == 16) {
- sc_access[2].value = 0x51;
- sc_access[3].value = 0x31;
- } else if (word_size == 24) {
- sc_access[2].value = 0x52;
- sc_access[3].value = 0x92;
- }
-
- sc_access[2].reg_addr = 0x209;
- sc_access[3].reg_addr = 0x20e;
-
- sc_access[4].reg_addr = 0x211;
- sc_access[4].value = filter;
-
- return sst_sc_reg_access(sc_access, PMIC_WRITE, 5);
-}
-
-static int mx_set_selected_output_dev(u8 dev_id)
-{
- struct sc_reg_access sc_access[2];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
- pr_debug("mx_set_selected_output_dev dev_id:0x%x\n", dev_id);
- snd_pmic_ops_mx.output_dev_id = dev_id;
- switch (dev_id) {
- case STEREO_HEADPHONE:
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0x8C;
- sc_access[0].mask =
- MASK2|MASK3|MASK5|MASK6|MASK4;
-
- num_reg = 1;
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0xb0;
- sc_access[0].mask = MASK2|MASK3|MASK5|MASK6|MASK4;
-
- num_reg = 1;
- break;
- case RECEIVER:
- pr_debug("RECEIVER Koski selected\n");
-
- /* configuration - AS enable, receiver enable */
- sc_access[0].reg_addr = 0xFF;
- sc_access[0].value = 0x81;
- sc_access[0].mask = 0xff;
-
- num_reg = 1;
- break;
- default:
- pr_err("Not a valid output dev\n");
- return 0;
- }
- return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
-}
-
-
-static int mx_set_voice_port(int status)
-{
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- if (status == ACTIVATE)
- retval = mx_set_pcm_voice_params();
-
- return retval;
-}
-
-static int mx_set_audio_port(int status)
-{
- return 0;
-}
-
-static int mx_set_selected_input_dev(u8 dev_id)
-{
- struct sc_reg_access sc_access[2];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- snd_pmic_ops_mx.input_dev_id = dev_id;
- pr_debug("mx_set_selected_input_dev dev_id:0x%x\n", dev_id);
-
- switch (dev_id) {
- case AMIC:
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x50;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
- num_reg = 2;
- break;
-
- case HS_MIC:
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x20;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x51;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4;
- num_reg = 2;
- break;
- case DMIC:
- sc_access[1].reg_addr = 0x222;
- sc_access[1].value = 0x00;
- sc_access[1].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- sc_access[0].reg_addr = 0x223;
- sc_access[0].value = 0x20;
- sc_access[0].mask = MASK7|MASK6|MASK5|MASK4|MASK0;
- num_reg = 2;
- break;
- }
- return sst_sc_reg_access(sc_access, PMIC_WRITE, num_reg);
-}
-
-static int mx_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[5];
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
-
-
- pr_debug("set_mute dev_id:0x%x , value:%d\n", dev_id, value);
-
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = 0x220;
- sc_access[1].reg_addr = 0x221;
- sc_access[2].reg_addr = 0x223;
- if (value == MUTE) {
- sc_access[0].value = 0x00;
- sc_access[1].value = 0x00;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[2].value = 0x00;
- else
- sc_access[2].value = 0x20;
- } else {
- sc_access[0].value = 0x20;
- sc_access[1].value = 0x20;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[2].value = 0x20;
- else
- sc_access[2].value = 0x00;
- }
- sc_access[0].mask = MASK5|MASK6;
- sc_access[1].mask = MASK5|MASK6;
- sc_access[2].mask = MASK5|MASK6;
- num_reg = 3;
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_LEFT_HP_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_LT;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- snd_pmic_ops_mx.mute_status = value;
- break;
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- if (snd_pmic_ops_mx.num_channel == 1)
- value = MUTE;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- snd_pmic_ops_mx.mute_status = value;
- break;
- case PMIC_SND_MUTE_ALL:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[1].reg_addr = VOL_CTRL_LT;
- sc_access[2].reg_addr = 0x220;
- sc_access[3].reg_addr = 0x221;
- sc_access[4].reg_addr = 0x223;
- snd_pmic_ops_mx.master_mute = value;
- if (value == MUTE) {
- sc_access[0].value = sc_access[1].value = 0x40;
- sc_access[2].value = 0x00;
- sc_access[3].value = 0x00;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[4].value = 0x00;
- else
- sc_access[4].value = 0x20;
-
- } else {
- sc_access[0].value = sc_access[1].value = 0x00;
- sc_access[2].value = sc_access[3].value = 0x20;
- sc_access[4].value = 0x20;
- if (snd_pmic_ops_mx.input_dev_id == DMIC)
- sc_access[4].value = 0x20;
- else
- sc_access[4].value = 0x00;
-
-
- }
- if (snd_pmic_ops_mx.num_channel == 1)
- sc_access[0].value = 0x40;
- sc_access[0].mask = sc_access[1].mask = MASK6;
- sc_access[2].mask = MASK5|MASK6;
- sc_access[3].mask = MASK5|MASK6|MASK2|MASK4;
- sc_access[4].mask = MASK5|MASK6|MASK4;
-
- num_reg = 5;
- break;
- case PMIC_SND_RECEIVER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- if (value == MUTE)
- sc_access[0].value = 0x40;
- else
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK6;
- num_reg = 1;
- break;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
-}
-
-static int mx_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_access[2] = {{0},};
- int num_reg = 0;
- int retval = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- pr_debug("set_vol dev_id:0x%x ,value:%d\n", dev_id, value);
- switch (dev_id) {
- case PMIC_SND_RECEIVER_VOL:
- return 0;
- break;
- case PMIC_SND_CAPTURE_VOL:
- sc_access[0].reg_addr = 0x220;
- sc_access[1].reg_addr = 0x221;
- sc_access[0].value = sc_access[1].value = -value;
- sc_access[0].mask = sc_access[1].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4);
- num_reg = 2;
- break;
- case PMIC_SND_LEFT_PB_VOL:
- sc_access[0].value = -value;
- sc_access[0].reg_addr = VOL_CTRL_LT;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- num_reg = 1;
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- sc_access[0].value = -value;
- sc_access[0].reg_addr = VOL_CTRL_RT;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- if (snd_pmic_ops_mx.num_channel == 1) {
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6;
- sc_access[0].reg_addr = VOL_CTRL_RT;
- }
- num_reg = 1;
- break;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_reg);
-}
-
-static int mx_get_mute(int dev_id, u8 *value)
-{
- struct sc_reg_access sc_access[4] = {{0},};
- int retval = 0, num_reg = 0, mask = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- switch (dev_id) {
- case PMIC_SND_DMIC_MUTE:
- case PMIC_SND_AMIC_MUTE:
- case PMIC_SND_HP_MIC_MUTE:
- sc_access[0].reg_addr = 0x220;
- mask = MASK5|MASK6;
- num_reg = 1;
- retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = sc_access[0].value & mask;
- if (*value)
- *value = UNMUTE;
- else
- *value = MUTE;
- return retval;
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_LT;
- num_reg = 1;
- mask = MASK6;
- break;
- case PMIC_SND_RIGHT_HP_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- sc_access[0].reg_addr = VOL_CTRL_RT;
- num_reg = 1;
- mask = MASK6;
- break;
- }
- retval = sst_sc_reg_access(sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = sc_access[0].value & mask;
- if (*value)
- *value = MUTE;
- else
- *value = UNMUTE;
- return retval;
-}
-
-static int mx_get_vol(int dev_id, int *value)
-{
- struct sc_reg_access sc_access = {0,};
- int retval = 0, mask = 0, num_reg = 0;
-
- if (snd_pmic_ops_mx.card_status == SND_CARD_UN_INIT) {
- retval = mx_init_card();
- if (retval)
- return retval;
- }
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- sc_access.reg_addr = 0x220;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4;
- num_reg = 1;
- break;
- case PMIC_SND_LEFT_PB_VOL:
- sc_access.reg_addr = VOL_CTRL_LT;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
- num_reg = 1;
- break;
- case PMIC_SND_RIGHT_PB_VOL:
- sc_access.reg_addr = VOL_CTRL_RT;
- mask = MASK0|MASK1|MASK2|MASK3|MASK4|MASK5;
- num_reg = 1;
- break;
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, num_reg);
- if (retval)
- return retval;
- *value = -(sc_access.value & mask);
- pr_debug("get volume value extracted %d\n", *value);
- return retval;
-}
-
-static u8 mx_get_jack_status(void)
-{
- struct sc_reg_access sc_access_read = {0,};
-
- sc_access_read.reg_addr = 0x201;
- sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
- pr_debug("value returned = 0x%x\n", sc_access_read.value);
- return sc_access_read.value;
-}
-
-static void mx_pmic_irq_enable(void *data)
-{
- struct snd_intelmad *intelmaddata = data;
-
- intelmaddata->jack_prev_state = 0xc0;
- return;
-}
-
-static void mx_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- u8 jack_cur_status, jack_prev_state = 0;
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- time_t timediff;
- struct snd_intelmad *intelmaddata = cb_data;
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x2) {
- jack_cur_status = mx_get_jack_status();
- jack_prev_state = intelmaddata->jack_prev_state;
- if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x40)) {
- /*headset insert detected. */
- pr_debug("MAD headset inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack_status = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
- if ((jack_prev_state == 0xc0) && (jack_cur_status == 0x00)) {
- /* headphone insert detected. */
- pr_debug("MAD headphone inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if ((jack_prev_state == 0x40) && (jack_cur_status == 0xc0)) {
- /* headset remove detected. */
- pr_debug("MAD headset removed\n");
-
- present = 0;
- jack_event_flag = 1;
- mjack->jack_status = 0;
- mjack->jack.type = SND_JACK_HEADSET;
- }
-
- if ((jack_prev_state == 0x00) && (jack_cur_status == 0xc0)) {
- /* headphone remove detected. */
- pr_debug("MAD headphone removed\n");
- present = 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- }
-
- if ((jack_prev_state == 0x40) && (jack_cur_status == 0x00)) {
- /* button pressed */
- do_gettimeofday(&mjack->buttonpressed);
- pr_debug("MAD button press detected\n");
- }
-
- if ((jack_prev_state == 0x00) && (jack_cur_status == 0x40)) {
- if (mjack->jack_status) {
- /*button pressed */
- do_gettimeofday(
- &mjack->buttonreleased);
- /*button pressed */
- pr_debug("MAD Button Released detected\n");
- timediff = mjack->buttonreleased.tv_sec -
- mjack->buttonpressed.tv_sec;
- buttonpressflag = 1;
-
- if (timediff > 1) {
- pr_debug("MAD long press dtd\n");
- /* send headphone detect/undetect */
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type =
- MID_JACK_HS_LONG_PRESS;
- } else {
- pr_debug("MAD short press dtd\n");
- /* send headphone detect/undetect */
- present = 1;
- jack_event_flag = 1;
- mjack->jack.type =
- MID_JACK_HS_SHORT_PRESS;
- }
- } else {
- /***workaround for maxim
- hw issue,0x00 t 0x40 is not
- a valid transiton for Headset insertion */
- /*headset insert detected. */
- pr_debug("MAD headset inserted\n");
- present = 1;
- jack_event_flag = 1;
- mjack->jack_status = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- }
- }
- intelmaddata->jack_prev_state = jack_cur_status;
- pr_debug("mx_pmic_irq_cb prv_state= 0x%x\n",
- intelmaddata->jack_prev_state);
- }
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-static int mx_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_mx = {
- .set_input_dev = mx_set_selected_input_dev,
- .set_output_dev = mx_set_selected_output_dev,
- .set_mute = mx_set_mute,
- .get_mute = mx_get_mute,
- .set_vol = mx_set_vol,
- .get_vol = mx_get_vol,
- .init_card = mx_init_card,
- .set_pcm_audio_params = mx_set_pcm_audio_params,
- .set_pcm_voice_params = mx_set_pcm_voice_params,
- .set_voice_port = mx_set_voice_port,
- .set_audio_port = mx_set_audio_port,
- .power_up_pmic_pb = mx_power_up_pb,
- .power_up_pmic_cp = mx_power_up_cp,
- .power_down_pmic_pb = mx_power_down_pb,
- .power_down_pmic_cp = mx_power_down_cp,
- .power_down_pmic = mx_power_down,
- .pmic_irq_cb = mx_pmic_irq_cb,
- .pmic_irq_enable = mx_pmic_irq_enable,
- .pmic_jack_enable = mx_jack_enable,
-};
-
diff --git a/drivers/staging/intel_sst/intelmid_v2_control.c b/drivers/staging/intel_sst/intelmid_v2_control.c
deleted file mode 100644
index 46ab55e..0000000
--- a/drivers/staging/intel_sst/intelmid_v2_control.c
+++ /dev/null
@@ -1,1156 +0,0 @@
-/*
- * intelmid_v2_control.c - Intel Sound card driver for MID
- *
- * Copyright (C) 2008-10 Intel Corp
- * Authors: Vinod Koul <vinod.koul@intel.com>
- * Harsha Priya <priya.harsha@intel.com>
- * KP Jeeja <jeeja.kp@intel.com>
- * Dharageswari R <dharageswari.r@intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This file contains the control operations of vendor 3
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/gpio.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/file.h>
-#include <sound/control.h>
-#include "intel_sst.h"
-#include "intelmid_snd_control.h"
-#include "intelmid.h"
-
-enum reg_v3 {
- VAUDIOCNT = 0x51,
- VOICEPORT1 = 0x100,
- VOICEPORT2 = 0x101,
- AUDIOPORT1 = 0x102,
- AUDIOPORT2 = 0x103,
- ADCSAMPLERATE = 0x104,
- DMICCTRL1 = 0x105,
- DMICCTRL2 = 0x106,
- MICCTRL = 0x107,
- MICSELVOL = 0x108,
- LILSEL = 0x109,
- LIRSEL = 0x10a,
- VOICEVOL = 0x10b,
- AUDIOLVOL = 0x10c,
- AUDIORVOL = 0x10d,
- LMUTE = 0x10e,
- RMUTE = 0x10f,
- POWERCTRL1 = 0x110,
- POWERCTRL2 = 0x111,
- DRVPOWERCTRL = 0x112,
- VREFPLL = 0x113,
- PCMBUFCTRL = 0x114,
- SOFTMUTE = 0x115,
- DTMFPATH = 0x116,
- DTMFVOL = 0x117,
- DTMFFREQ = 0x118,
- DTMFHFREQ = 0x119,
- DTMFLFREQ = 0x11a,
- DTMFCTRL = 0x11b,
- DTMFASON = 0x11c,
- DTMFASOFF = 0x11d,
- DTMFASINUM = 0x11e,
- CLASSDVOL = 0x11f,
- VOICEDACAVOL = 0x120,
- AUDDACAVOL = 0x121,
- LOMUTEVOL = 0x122,
- HPLVOL = 0x123,
- HPRVOL = 0x124,
- MONOVOL = 0x125,
- LINEOUTMIXVOL = 0x126,
- EPMIXVOL = 0x127,
- LINEOUTLSEL = 0x128,
- LINEOUTRSEL = 0x129,
- EPMIXOUTSEL = 0x12a,
- HPLMIXSEL = 0x12b,
- HPRMIXSEL = 0x12c,
- LOANTIPOP = 0x12d,
- AUXDBNC = 0x12f,
-};
-
-static void nc_set_amp_power(int power)
-{
- if (snd_pmic_ops_nc.gpio_amp)
- gpio_set_value(snd_pmic_ops_nc.gpio_amp, power);
-}
-
-/****
- * nc_init_card - initialize the sound card
- *
- * This initializes the audio paths to know values in case of this sound card
- */
-static int nc_init_card(void)
-{
- struct sc_reg_access sc_access[] = {
- {VAUDIOCNT, 0x25, 0},
- {VOICEPORT1, 0x00, 0},
- {VOICEPORT2, 0x00, 0},
- {AUDIOPORT1, 0x98, 0},
- {AUDIOPORT2, 0x09, 0},
- {AUDIOLVOL, 0x00, 0},
- {AUDIORVOL, 0x00, 0},
- {LMUTE, 0x03, 0},
- {RMUTE, 0x03, 0},
- {POWERCTRL1, 0x00, 0},
- {POWERCTRL2, 0x00, 0},
- {DRVPOWERCTRL, 0x00, 0},
- {VREFPLL, 0x10, 0},
- {HPLMIXSEL, 0xee, 0},
- {HPRMIXSEL, 0xf6, 0},
- {PCMBUFCTRL, 0x0, 0},
- {VOICEVOL, 0x0e, 0},
- {HPLVOL, 0x06, 0},
- {HPRVOL, 0x06, 0},
- {MICCTRL, 0x51, 0x00},
- {ADCSAMPLERATE, 0x8B, 0x00},
- {MICSELVOL, 0x5B, 0x00},
- {LILSEL, 0x06, 0},
- {LIRSEL, 0x46, 0},
- {LOANTIPOP, 0x00, 0},
- {DMICCTRL1, 0x40, 0},
- {AUXDBNC, 0xff, 0},
- };
- snd_pmic_ops_nc.card_status = SND_CARD_INIT_DONE;
- snd_pmic_ops_nc.master_mute = UNMUTE;
- snd_pmic_ops_nc.mute_status = UNMUTE;
- sst_sc_reg_access(sc_access, PMIC_WRITE, 27);
- mutex_init(&snd_pmic_ops_nc.lock);
- pr_debug("init complete!!\n");
- return 0;
-}
-
-static int nc_enable_audiodac(int value)
-{
- struct sc_reg_access sc_access[3];
- int mute_val = 0;
-
- if (snd_pmic_ops_nc.mute_status == MUTE)
- return 0;
-
- if (((snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE) ||
- (snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)) &&
- (value == UNMUTE))
- return 0;
- if (value == UNMUTE) {
- /* unmute the system, set the 7th bit to zero */
- mute_val = 0x00;
- } else {
- /* MUTE:Set the seventh bit */
- mute_val = 0x04;
-
- }
- sc_access[0].reg_addr = LMUTE;
- sc_access[1].reg_addr = RMUTE;
- sc_access[0].mask = sc_access[1].mask = MASK2;
- sc_access[0].value = sc_access[1].value = mute_val;
-
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[1].value = 0x04;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-}
-
-static int nc_power_up_pb(unsigned int port)
-{
- struct sc_reg_access sc_access[7];
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- if (port == 0xFF)
- return 0;
- mutex_lock(&snd_pmic_ops_nc.lock);
- nc_enable_audiodac(MUTE);
- msleep(30);
-
- pr_debug("powering up pb....\n");
-
- sc_access[0].reg_addr = VAUDIOCNT;
- sc_access[0].value = 0x27;
- sc_access[0].mask = 0x27;
- sc_access[1].reg_addr = VREFPLL;
- if (port == 0) {
- sc_access[1].value = 0x3A;
- sc_access[1].mask = 0x3A;
- } else if (port == 1) {
- sc_access[1].value = 0x35;
- sc_access[1].mask = 0x35;
- }
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-
-
- sc_access[0].reg_addr = POWERCTRL1;
- if (port == 0) {
- sc_access[0].value = 0x40;
- sc_access[0].mask = 0x40;
- } else if (port == 1) {
- sc_access[0].value = 0x01;
- sc_access[0].mask = 0x01;
- }
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x0C;
- sc_access[1].mask = 0x0C;
-
- sc_access[2].reg_addr = DRVPOWERCTRL;
- sc_access[2].value = 0x86;
- sc_access[2].mask = 0x86;
-
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 3);
-
- msleep(30);
-
- snd_pmic_ops_nc.pb_on = 1;
-
- /*
- * There is a mismatch between Playback Sources and the enumerated
- * values of output sources. This mismatch causes ALSA upper to send
- * Item 1 for Internal Speaker, but the expected enumeration is 2! For
- * now, treat MONO_EARPIECE and INTERNAL_SPKR identically and power up
- * the needed resources
- */
- if (snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE ||
- snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)
- nc_set_amp_power(1);
- nc_enable_audiodac(UNMUTE);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return 0;
-}
-
-static int nc_power_up_cp(unsigned int port)
-{
- struct sc_reg_access sc_access[5];
- int retval = 0;
-
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
-
- pr_debug("powering up cp....\n");
-
- if (port == 0xFF)
- return 0;
- sc_access[0].reg_addr = VAUDIOCNT;
- sc_access[0].value = 0x27;
- sc_access[0].mask = 0x27;
- sc_access[1].reg_addr = VREFPLL;
- if (port == 0) {
- sc_access[1].value = 0x3E;
- sc_access[1].mask = 0x3E;
- } else if (port == 1) {
- sc_access[1].value = 0x35;
- sc_access[1].mask = 0x35;
- }
-
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-
- sc_access[0].reg_addr = POWERCTRL1;
- if (port == 0) {
- sc_access[0].value = 0xB4;
- sc_access[0].mask = 0xB4;
- } else if (port == 1) {
- sc_access[0].value = 0xBF;
- sc_access[0].mask = 0xBF;
- }
- sc_access[1].reg_addr = POWERCTRL2;
- if (port == 0) {
- sc_access[1].value = 0x0C;
- sc_access[1].mask = 0x0C;
- } else if (port == 1) {
- sc_access[1].value = 0x02;
- sc_access[1].mask = 0x02;
- }
-
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
-}
-
-static int nc_power_down(void)
-{
- int retval = 0;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- nc_enable_audiodac(MUTE);
-
-
- pr_debug("powering dn nc_power_down ....\n");
-
- if (snd_pmic_ops_nc.output_dev_id == MONO_EARPIECE ||
- snd_pmic_ops_nc.output_dev_id == INTERNAL_SPKR)
- nc_set_amp_power(0);
-
- msleep(30);
-
- sc_access[0].reg_addr = DRVPOWERCTRL;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
-
- sc_access[0].reg_addr = POWERCTRL1;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x00;
-
-
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
-
- msleep(30);
- sc_access[0].reg_addr = VREFPLL;
- sc_access[0].value = 0x10;
- sc_access[0].mask = 0x10;
-
- sc_access[1].reg_addr = VAUDIOCNT;
- sc_access[1].value = 0x25;
- sc_access[1].mask = 0x25;
-
-
- retval = sst_sc_reg_access(sc_access, PMIC_WRITE, 2);
-
- msleep(30);
- return nc_enable_audiodac(UNMUTE);
-}
-
-static int nc_power_down_pb(unsigned int device)
-{
-
- int retval = 0;
- struct sc_reg_access sc_access[5];
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("powering dn pb....\n");
- mutex_lock(&snd_pmic_ops_nc.lock);
- nc_enable_audiodac(MUTE);
-
-
- msleep(30);
-
-
- sc_access[0].reg_addr = DRVPOWERCTRL;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x00;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 1);
-
- msleep(30);
-
- sc_access[0].reg_addr = POWERCTRL1;
- sc_access[0].value = 0x00;
- sc_access[0].mask = 0x41;
-
- sc_access[1].reg_addr = POWERCTRL2;
- sc_access[1].value = 0x00;
- sc_access[1].mask = 0x0C;
-
- sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2);
-
- msleep(30);
-
- snd_pmic_ops_nc.pb_on = 0;
-
- nc_enable_audiodac(UNMUTE);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return 0;
-}
-
-static int nc_power_down_cp(unsigned int device)
-{
- struct sc_reg_access sc_access[] = {
- {POWERCTRL1, 0x00, 0xBE},
- {POWERCTRL2, 0x00, 0x02},
- };
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("powering dn cp....\n");
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
-}
-
-static int nc_set_pcm_voice_params(void)
-{
- struct sc_reg_access sc_access[] = {
- {0x100, 0xD5, 0},
- {0x101, 0x08, 0},
- {0x104, 0x03, 0},
- {0x107, 0x10, 0},
- {0x10B, 0x0E, 0},
- {0x10E, 0x03, 0},
- {0x10F, 0x03, 0},
- {0x114, 0x13, 0},
- {0x115, 0x00, 0},
- {0x128, 0xFE, 0},
- {0x129, 0xFE, 0},
- {0x12A, 0xFE, 0},
- {0x12B, 0xDE, 0},
- {0x12C, 0xDE, 0},
- };
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 14);
- pr_debug("Voice parameters set successfully!!\n");
- return 0;
-}
-
-
-static int nc_set_pcm_audio_params(int sfreq, int word_size, int num_channel)
-{
- int config2 = 0;
- struct sc_reg_access sc_access;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- switch (sfreq) {
- case 8000:
- config2 = 0x00;
- break;
- case 11025:
- config2 = 0x01;
- break;
- case 12000:
- config2 = 0x02;
- break;
- case 16000:
- config2 = 0x03;
- break;
- case 22050:
- config2 = 0x04;
- break;
- case 24000:
- config2 = 0x05;
- break;
- case 32000:
- config2 = 0x07;
- break;
- case 44100:
- config2 = 0x08;
- break;
- case 48000:
- config2 = 0x09;
- break;
- }
-
- snd_pmic_ops_nc.num_channel = num_channel;
- if (snd_pmic_ops_nc.num_channel == 1) {
-
- sc_access.value = 0x07;
- sc_access.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_access.value);
- sc_access.mask = MASK2;
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
- } else {
- sc_access.value = 0x00;
- sc_access.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value %d\n", sc_access.value);
- sc_access.mask = MASK2;
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
-
- }
-
- pr_debug("word_size = %d\n", word_size);
-
- if (word_size == 24) {
- sc_access.reg_addr = AUDIOPORT2;
- sc_access.value = config2 | 0x10;
- sc_access.mask = 0x1F;
- } else {
- sc_access.value = config2;
- sc_access.mask = 0x1F;
- sc_access.reg_addr = AUDIOPORT2;
- }
- sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
- pr_debug("word_size = %d\n", word_size);
- sc_access.reg_addr = AUDIOPORT1;
- sc_access.mask = MASK5|MASK4|MASK1|MASK0;
- if (word_size == 16)
- sc_access.value = 0x98;
- else if (word_size == 24)
- sc_access.value = 0xAB;
-
- return sst_sc_reg_access(&sc_access, PMIC_READ_MODIFY, 1);
-
-
-
-}
-
-static int nc_set_selected_output_dev(u8 value)
-{
- struct sc_reg_access sc_access_HP[] = {
- {LMUTE, 0x02, 0x06},
- {RMUTE, 0x02, 0x06},
- {DRVPOWERCTRL, 0x06, 0x06},
- };
- struct sc_reg_access sc_access_IS[] = {
- {LMUTE, 0x04, 0x06},
- {RMUTE, 0x04, 0x06},
- {DRVPOWERCTRL, 0x00, 0x06},
- };
- int retval = 0;
-
- snd_pmic_ops_nc.output_dev_id = value;
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- pr_debug("nc set selected output:%d\n", value);
- mutex_lock(&snd_pmic_ops_nc.lock);
- switch (value) {
- case STEREO_HEADPHONE:
- if (snd_pmic_ops_nc.pb_on)
- sst_sc_reg_access(sc_access_HP+2, PMIC_WRITE, 1);
- retval = sst_sc_reg_access(sc_access_HP, PMIC_WRITE, 2);
- nc_set_amp_power(0);
- break;
- case MONO_EARPIECE:
- case INTERNAL_SPKR:
- retval = sst_sc_reg_access(sc_access_IS, PMIC_WRITE, 3);
- if (snd_pmic_ops_nc.pb_on)
- nc_set_amp_power(1);
- break;
- default:
- pr_err("rcvd illegal request: %d\n", value);
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return -EINVAL;
- }
- mutex_unlock(&snd_pmic_ops_nc.lock);
- return retval;
-}
-
-static int nc_audio_init(void)
-{
- struct sc_reg_access sc_acces, sc_access[] = {
- {0x100, 0x00, 0},
- {0x101, 0x00, 0},
- {0x104, 0x8B, 0},
- {0x107, 0x11, 0},
- {0x10B, 0x0E, 0},
- {0x114, 0x00, 0},
- {0x115, 0x00, 0},
- {0x128, 0x00, 0},
- {0x129, 0x00, 0},
- {0x12A, 0x00, 0},
- {0x12B, 0xee, 0},
- {0x12C, 0xf6, 0},
- };
-
- sst_sc_reg_access(sc_access, PMIC_WRITE, 12);
- pr_debug("Audio Init successfully!!\n");
-
- /*set output device */
- nc_set_selected_output_dev(snd_pmic_ops_nc.output_dev_id);
-
- if (snd_pmic_ops_nc.num_channel == 1) {
- sc_acces.value = 0x07;
- sc_acces.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
- sc_acces.mask = MASK2;
- sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- } else {
- sc_acces.value = 0x00;
- sc_acces.reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value%d\n", sc_acces.value);
- sc_acces.mask = MASK2;
- sst_sc_reg_access(&sc_acces, PMIC_READ_MODIFY, 1);
- }
-
- return 0;
-}
-
-static int nc_set_audio_port(int status)
-{
- struct sc_reg_access sc_access[2] = {{0,},};
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- if (status == DEACTIVATE) {
- /* Deactivate audio port-tristate and power */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK4|MASK5;
- sc_access[0].reg_addr = AUDIOPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else if (status == ACTIVATE) {
- /* activate audio port */
- nc_audio_init();
- sc_access[0].value = 0x10;
- sc_access[0].mask = MASK4|MASK5 ;
- sc_access[0].reg_addr = AUDIOPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else
- return -EINVAL;
-
-}
-
-static int nc_set_voice_port(int status)
-{
- struct sc_reg_access sc_access[2] = {{0,},};
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- if (status == DEACTIVATE) {
- /* Activate Voice port */
- sc_access[0].value = 0x00;
- sc_access[0].mask = MASK4;
- sc_access[0].reg_addr = VOICEPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else if (status == ACTIVATE) {
- /* Deactivate voice port */
- nc_set_pcm_voice_params();
- sc_access[0].value = 0x10;
- sc_access[0].mask = MASK4;
- sc_access[0].reg_addr = VOICEPORT1;
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- } else
- return -EINVAL;
-}
-
-static int nc_set_mute(int dev_id, u8 value)
-{
- struct sc_reg_access sc_access[3];
- u8 mute_val, cap_mute;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("set device id::%d, value %d\n", dev_id, value);
-
- switch (dev_id) {
- case PMIC_SND_MUTE_ALL:
- pr_debug("PMIC_SND_MUTE_ALL value %d\n", value);
- snd_pmic_ops_nc.mute_status = value;
- snd_pmic_ops_nc.master_mute = value;
- if (value == UNMUTE) {
- /* unmute the system, set the 7th bit to zero */
- mute_val = cap_mute = 0x00;
- } else {
- /* MUTE:Set the seventh bit */
- mute_val = 0x80;
- cap_mute = 0x40;
- }
- sc_access[0].reg_addr = AUDIOLVOL;
- sc_access[1].reg_addr = AUDIORVOL;
- sc_access[0].mask = sc_access[1].mask = MASK7;
- sc_access[0].value = sc_access[1].value = mute_val;
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[1].value = 0x80;
- if (!sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 2)) {
- sc_access[0].reg_addr = 0x109;
- sc_access[1].reg_addr = 0x10a;
- sc_access[2].reg_addr = 0x105;
- sc_access[0].mask = sc_access[1].mask =
- sc_access[2].mask = MASK6;
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = cap_mute;
-
- if ((snd_pmic_ops_nc.input_dev_id == AMIC) ||
- (snd_pmic_ops_nc.input_dev_id == DMIC))
- sc_access[1].value = 0x40;
- if (snd_pmic_ops_nc.input_dev_id == HS_MIC)
- sc_access[0].value = 0x40;
- retval = sst_sc_reg_access(sc_access,
- PMIC_READ_MODIFY, 3);
- }
- break;
- case PMIC_SND_HP_MIC_MUTE:
- pr_debug("PMIC_SND_HPMIC_MUTE value %d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = LIRSEL;
- sc_access[0].mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- case PMIC_SND_AMIC_MUTE:
- pr_debug("PMIC_SND_AMIC_MUTE value %d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = LILSEL;
- sc_access[0].mask = MASK6;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
-
- case PMIC_SND_DMIC_MUTE:
- pr_debug("INPUT_MUTE_DMIC value%d\n", value);
- if (value == UNMUTE) {
- /* unmute the system, set the 6th bit to one */
- sc_access[1].value = 0x00;
- sc_access[0].value = 0x00;
- } else {
- /* mute the system, reset the 6th bit to zero */
- sc_access[1].value = 0x40;
- sc_access[0].value = 0x40;
- }
- sc_access[0].reg_addr = DMICCTRL1;
- sc_access[0].mask = MASK6;
- sc_access[1].reg_addr = LILSEL;
- sc_access[1].mask = MASK6;
- retval = sst_sc_reg_access(sc_access,
- PMIC_READ_MODIFY, 2);
- break;
-
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- snd_pmic_ops_nc.mute_status = value;
- if (value == UNMUTE)
- sc_access[0].value = 0x0;
- else
- sc_access[0].value = 0x04;
-
- if (dev_id == PMIC_SND_LEFT_HP_MUTE) {
- sc_access[0].reg_addr = LMUTE;
- pr_debug("LEFT_HP_MUTE value %d\n",
- sc_access[0].value);
- } else {
- if (snd_pmic_ops_nc.num_channel == 1)
- sc_access[0].value = 0x04;
- sc_access[0].reg_addr = RMUTE;
- pr_debug("RIGHT_HP_MUTE value %d\n",
- sc_access[0].value);
- }
- sc_access[0].mask = MASK2;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- case PMIC_SND_RIGHT_SPEAKER_MUTE:
- if (value == UNMUTE)
- sc_access[0].value = 0x00;
- else
- sc_access[0].value = 0x03;
- sc_access[0].reg_addr = LMUTE;
- pr_debug("SPEAKER_MUTE %d\n", sc_access[0].value);
- sc_access[0].mask = MASK1;
- retval = sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, 1);
- break;
- default:
- return -EINVAL;
- }
- return retval ;
-
-}
-
-static int nc_set_vol(int dev_id, int value)
-{
- struct sc_reg_access sc_access[3];
- int retval = 0, entries = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("set volume:%d\n", dev_id);
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_CAPTURE_VOL:value::%d\n", value);
- sc_access[0].value = sc_access[1].value =
- sc_access[2].value = -value;
- sc_access[0].mask = sc_access[1].mask = sc_access[2].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- sc_access[0].reg_addr = 0x10a;
- sc_access[1].reg_addr = 0x109;
- sc_access[2].reg_addr = 0x105;
- entries = 3;
- break;
-
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("PMIC_SND_LEFT_HP_VOL %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = HPLVOL;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- entries = 1;
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("PMIC_SND_RIGHT_HP_VOL value %d\n", value);
- if (snd_pmic_ops_nc.num_channel == 1) {
- sc_access[0].value = 0x04;
- sc_access[0].reg_addr = RMUTE;
- sc_access[0].mask = MASK2;
- } else {
- sc_access[0].value = -value;
- sc_access[0].reg_addr = HPRVOL;
- sc_access[0].mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- }
- entries = 1;
- break;
-
- case PMIC_SND_LEFT_MASTER_VOL:
- pr_debug("PMIC_SND_LEFT_MASTER_VOL value %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = AUDIOLVOL;
- sc_access[0].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- entries = 1;
- break;
-
- case PMIC_SND_RIGHT_MASTER_VOL:
- pr_debug("PMIC_SND_RIGHT_MASTER_VOL value %d\n", value);
- sc_access[0].value = -value;
- sc_access[0].reg_addr = AUDIORVOL;
- sc_access[0].mask =
- (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- entries = 1;
- break;
-
- default:
- return -EINVAL;
-
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, entries);
-}
-
-static int nc_set_selected_input_dev(u8 value)
-{
- struct sc_reg_access sc_access[6];
- u8 num_val;
- int retval = 0;
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
- snd_pmic_ops_nc.input_dev_id = value;
-
- pr_debug("nc set selected input:%d\n", value);
-
- switch (value) {
- case AMIC:
- pr_debug("Selecting AMIC\n");
- sc_access[0].reg_addr = 0x107;
- sc_access[0].value = 0x40;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[1].reg_addr = 0x10a;
- sc_access[1].value = 0x40;
- sc_access[1].mask = MASK6;
- sc_access[2].reg_addr = 0x109;
- sc_access[2].value = 0x00;
- sc_access[2].mask = MASK6;
- sc_access[3].reg_addr = 0x105;
- sc_access[3].value = 0x40;
- sc_access[3].mask = MASK6;
- num_val = 4;
- break;
-
- case HS_MIC:
- pr_debug("Selecting HS_MIC\n");
- sc_access[0].reg_addr = MICCTRL;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[0].value = 0x00;
- sc_access[1].reg_addr = 0x109;
- sc_access[1].mask = MASK6;
- sc_access[1].value = 0x40;
- sc_access[2].reg_addr = 0x10a;
- sc_access[2].mask = MASK6;
- sc_access[2].value = 0x00;
- sc_access[3].reg_addr = 0x105;
- sc_access[3].value = 0x40;
- sc_access[3].mask = MASK6;
- sc_access[4].reg_addr = ADCSAMPLERATE;
- sc_access[4].mask = MASK7|MASK6|MASK5|MASK4|MASK3;
- sc_access[4].value = 0xc8;
- num_val = 5;
- break;
-
- case DMIC:
- pr_debug("DMIC\n");
- sc_access[0].reg_addr = MICCTRL;
- sc_access[0].mask = MASK6|MASK3|MASK1|MASK0;
- sc_access[0].value = 0x0B;
- sc_access[1].reg_addr = 0x105;
- sc_access[1].value = 0x80;
- sc_access[1].mask = MASK7|MASK6;
- sc_access[2].reg_addr = 0x10a;
- sc_access[2].value = 0x40;
- sc_access[2].mask = MASK6;
- sc_access[3].reg_addr = LILSEL;
- sc_access[3].mask = MASK6;
- sc_access[3].value = 0x00;
- sc_access[4].reg_addr = ADCSAMPLERATE;
- sc_access[4].mask = MASK7|MASK6|MASK5|MASK4|MASK3;
- sc_access[4].value = 0x33;
- num_val = 5;
- break;
- default:
- return -EINVAL;
- }
- return sst_sc_reg_access(sc_access, PMIC_READ_MODIFY, num_val);
-}
-
-static int nc_get_mute(int dev_id, u8 *value)
-{
- int retval = 0, mask = 0;
- struct sc_reg_access sc_access = {0,};
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- pr_debug("get mute::%d\n", dev_id);
-
- switch (dev_id) {
- case PMIC_SND_AMIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_MIC1\n");
- sc_access.reg_addr = LILSEL;
- mask = MASK6;
- break;
- case PMIC_SND_HP_MIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_MIC2\n");
- sc_access.reg_addr = LIRSEL;
- mask = MASK6;
- break;
- case PMIC_SND_LEFT_HP_MUTE:
- case PMIC_SND_RIGHT_HP_MUTE:
- mask = MASK2;
- pr_debug("PMIC_SN_LEFT/RIGHT_HP_MUTE\n");
- if (dev_id == PMIC_SND_RIGHT_HP_MUTE)
- sc_access.reg_addr = RMUTE;
- else
- sc_access.reg_addr = LMUTE;
- break;
-
- case PMIC_SND_LEFT_SPEAKER_MUTE:
- pr_debug("PMIC_MONO_EARPIECE_MUTE\n");
- sc_access.reg_addr = RMUTE;
- mask = MASK1;
- break;
- case PMIC_SND_DMIC_MUTE:
- pr_debug("PMIC_SND_INPUT_MUTE_DMIC\n");
- sc_access.reg_addr = 0x105;
- mask = MASK6;
- break;
- default:
- return -EINVAL;
-
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("reg value = %d\n", sc_access.value);
- if (retval)
- return retval;
- *value = (sc_access.value) & mask;
- pr_debug("masked value = %d\n", *value);
- if (*value)
- *value = 0;
- else
- *value = 1;
- pr_debug("value returned = 0x%x\n", *value);
- return retval;
-}
-
-static int nc_get_vol(int dev_id, int *value)
-{
- int retval = 0, mask = 0;
- struct sc_reg_access sc_access = {0,};
-
- if (snd_pmic_ops_nc.card_status == SND_CARD_UN_INIT)
- retval = nc_init_card();
- if (retval)
- return retval;
-
- switch (dev_id) {
- case PMIC_SND_CAPTURE_VOL:
- pr_debug("PMIC_SND_INPUT_CAPTURE_VOL\n");
- sc_access.reg_addr = LILSEL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5);
- break;
-
- case PMIC_SND_LEFT_MASTER_VOL:
- pr_debug("GET_VOLUME_PMIC_LEFT_MASTER_VOL\n");
- sc_access.reg_addr = AUDIOLVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- break;
-
- case PMIC_SND_RIGHT_MASTER_VOL:
- pr_debug("GET_VOLUME_PMIC_RIGHT_MASTER_VOL\n");
- sc_access.reg_addr = AUDIORVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4|MASK5|MASK6);
- break;
-
- case PMIC_SND_RIGHT_PB_VOL:
- pr_debug("GET_VOLUME_PMIC_RIGHT_HP_VOL\n");
- sc_access.reg_addr = HPRVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- break;
-
- case PMIC_SND_LEFT_PB_VOL:
- pr_debug("GET_VOLUME_PMIC_LEFT_HP_VOL\n");
- sc_access.reg_addr = HPLVOL;
- mask = (MASK0|MASK1|MASK2|MASK3|MASK4);
- break;
-
- default:
- return -EINVAL;
-
- }
- retval = sst_sc_reg_access(&sc_access, PMIC_READ, 1);
- pr_debug("value read = 0x%x\n", sc_access.value);
- *value = -((sc_access.value) & mask);
- pr_debug("get vol value returned = %d\n", *value);
- return retval;
-}
-
-static void hp_automute(enum snd_jack_types type, int present)
-{
- u8 in = DMIC;
- u8 out = INTERNAL_SPKR;
- if (present) {
- if (type == SND_JACK_HEADSET)
- in = HS_MIC;
- out = STEREO_HEADPHONE;
- }
- nc_set_selected_input_dev(in);
- nc_set_selected_output_dev(out);
-}
-
-static void nc_pmic_irq_cb(void *cb_data, u8 intsts)
-{
- u8 value = 0;
- struct mad_jack *mjack = NULL;
- unsigned int present = 0, jack_event_flag = 0, buttonpressflag = 0;
- struct snd_intelmad *intelmaddata = cb_data;
- struct sc_reg_access sc_access_read = {0,};
-
- sc_access_read.reg_addr = 0x132;
- sst_sc_reg_access(&sc_access_read, PMIC_READ, 1);
- value = (sc_access_read.value);
- pr_debug("value returned = 0x%x\n", value);
-
- mjack = &intelmaddata->jack[0];
- if (intsts & 0x1) {
- pr_debug("SST DBG:MAD headset detected\n");
- /* send headset detect/undetect */
- present = (value == 0x1) ? 3 : 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADSET;
- hp_automute(SND_JACK_HEADSET, present);
- }
-
- if (intsts & 0x2) {
- pr_debug(":MAD headphone detected\n");
- /* send headphone detect/undetect */
- present = (value == 0x2) ? 1 : 0;
- jack_event_flag = 1;
- mjack->jack.type = SND_JACK_HEADPHONE;
- hp_automute(SND_JACK_HEADPHONE, present);
- }
-
- if (intsts & 0x4) {
- pr_debug("MAD short push detected\n");
- /* send short push */
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_SHORT_PRESS;
- }
-
- if (intsts & 0x8) {
- pr_debug(":MAD long push detected\n");
- /* send long push */
- present = 1;
- jack_event_flag = 1;
- buttonpressflag = 1;
- mjack->jack.type = MID_JACK_HS_LONG_PRESS;
- }
-
- if (jack_event_flag)
- sst_mad_send_jack_report(&mjack->jack,
- buttonpressflag, present);
-}
-static int nc_jack_enable(void)
-{
- return 0;
-}
-
-struct snd_pmic_ops snd_pmic_ops_nc = {
- .input_dev_id = DMIC,
- .output_dev_id = INTERNAL_SPKR,
- .set_input_dev = nc_set_selected_input_dev,
- .set_output_dev = nc_set_selected_output_dev,
- .set_mute = nc_set_mute,
- .get_mute = nc_get_mute,
- .set_vol = nc_set_vol,
- .get_vol = nc_get_vol,
- .init_card = nc_init_card,
- .set_pcm_audio_params = nc_set_pcm_audio_params,
- .set_pcm_voice_params = nc_set_pcm_voice_params,
- .set_voice_port = nc_set_voice_port,
- .set_audio_port = nc_set_audio_port,
- .power_up_pmic_pb = nc_power_up_pb,
- .power_up_pmic_cp = nc_power_up_cp,
- .power_down_pmic_pb = nc_power_down_pb,
- .power_down_pmic_cp = nc_power_down_cp,
- .power_down_pmic = nc_power_down,
- .pmic_irq_cb = nc_pmic_irq_cb,
- .pmic_jack_enable = nc_jack_enable,
-};
diff --git a/drivers/staging/keucr/smilmain.c b/drivers/staging/keucr/smilmain.c
index 31f7813..cc49038 100644
--- a/drivers/staging/keucr/smilmain.c
+++ b/drivers/staging/keucr/smilmain.c
@@ -148,7 +148,7 @@ int Media_D_ReadSector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
{
WORD len, bn;
- //if (Check_D_MediaPower()) ; b 6250 don't care
+ //if (Check_D_MediaPower()) ; ¦b 6250 don't care
// return(ErrCode);
//if (Check_D_MediaFmt(fdoExt)) ;
// return(ErrCode);
@@ -594,7 +594,7 @@ int Media_D_OneSectWriteFlush(PFDO_DEVICE_EXTENSION fdoExt)
// if (Check_D_CardStsChg())
// MediaChange = ERROR;
// //usleep(56*1024);
-// if ((!Check_D_CntPower())&&(!MediaChange)) // power & Media SQ change, h return success
+// if ((!Check_D_CntPower())&&(!MediaChange)) // ¦³ power & Media ¨S³Q change, «h return success
// return(SMSUCCESS);
// //usleep(56*1024);
//
diff --git a/drivers/staging/keucr/usb.c b/drivers/staging/keucr/usb.c
index 66aad3a..4833034 100644
--- a/drivers/staging/keucr/usb.c
+++ b/drivers/staging/keucr/usb.c
@@ -701,26 +701,4 @@ static struct usb_driver usb_storage_driver = {
.soft_unbind = 1,
};
-//----- usb_stor_init() ---------------------
-static int __init usb_stor_init(void)
-{
- int retval;
- pr_info("usb --- usb_stor_init start\n");
-
- retval = usb_register(&usb_storage_driver);
- if (retval == 0)
- pr_info("ENE USB Mass Storage support registered.\n");
-
- return retval;
-}
-
-//----- usb_stor_exit() ---------------------
-static void __exit usb_stor_exit(void)
-{
- pr_info("usb --- usb_stor_exit\n");
-
- usb_deregister(&usb_storage_driver) ;
-}
-
-module_init(usb_stor_init);
-module_exit(usb_stor_exit);
+module_usb_driver(usb_storage_driver);
diff --git a/drivers/staging/line6/Makefile b/drivers/staging/line6/Makefile
index de6bd12..34a2dda 100644
--- a/drivers/staging/line6/Makefile
+++ b/drivers/staging/line6/Makefile
@@ -12,4 +12,5 @@ line6usb-y := \
playback.o \
pod.o \
toneport.o \
- variax.o
+ variax.o \
+ podhd.o
diff --git a/drivers/staging/line6/capture.c b/drivers/staging/line6/capture.c
index 9647154..127f952 100644
--- a/drivers/staging/line6/capture.c
+++ b/drivers/staging/line6/capture.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -192,6 +193,31 @@ void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length)
}
}
+int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm)
+{
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (line6pcm->buffer_in)
+ return 0;
+
+ line6pcm->buffer_in =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_in) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc capture buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm)
+{
+ kfree(line6pcm->buffer_in);
+ line6pcm->buffer_in = NULL;
+}
+
/*
* Callback for completed capture URB.
*/
@@ -243,11 +269,7 @@ static void audio_in_callback(struct urb *urb)
length += fsize;
/* the following assumes LINE6_ISO_PACKETS == 1: */
-#if LINE6_BACKUP_MONITOR_SIGNAL
- memcpy(line6pcm->prev_fbuf, fbuf, fsize);
-#else
line6pcm->prev_fbuf = fbuf;
-#endif
line6pcm->prev_fsize = fsize;
#ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE
@@ -319,6 +341,13 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
+ if ((line6pcm->flags & MASK_CAPTURE) == 0) {
+ ret = line6_alloc_capture_buffer(line6pcm);
+
+ if (ret < 0)
+ return ret;
+ }
+
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0)
@@ -331,6 +360,13 @@ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream,
/* hw_free capture callback */
static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream)
{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ if ((line6pcm->flags & MASK_CAPTURE) == 0) {
+ line6_unlink_wait_clear_audio_in_urbs(line6pcm);
+ line6_free_capture_buffer(line6pcm);
+ }
+
return snd_pcm_lib_free_pages(substream);
}
diff --git a/drivers/staging/line6/capture.h b/drivers/staging/line6/capture.h
index a7509fb..366cbaa 100644
--- a/drivers/staging/line6/capture.h
+++ b/drivers/staging/line6/capture.h
@@ -19,11 +19,13 @@
extern struct snd_pcm_ops snd_line6_capture_ops;
+extern int line6_alloc_capture_buffer(struct snd_line6_pcm *line6pcm);
extern void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf,
int fsize);
extern void line6_capture_check_period(struct snd_line6_pcm *line6pcm,
int length);
extern int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm);
+extern void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm
diff --git a/drivers/staging/line6/driver.c b/drivers/staging/line6/driver.c
index 851b762..6a1959e 100644
--- a/drivers/staging/line6/driver.c
+++ b/drivers/staging/line6/driver.c
@@ -21,6 +21,7 @@
#include "midi.h"
#include "playback.h"
#include "pod.h"
+#include "podhd.h"
#include "revision.h"
#include "toneport.h"
#include "usbdefs.h"
@@ -37,6 +38,8 @@ static const struct usb_device_id line6_id_table[] = {
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_BASSPODXTPRO)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_GUITARPORT)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_POCKETPOD)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD300)},
+ {USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODHD500)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_GX)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX1)},
{USB_DEVICE(LINE6_VENDOR_ID, LINE6_DEVID_PODSTUDIO_UX2)},
@@ -56,23 +59,25 @@ MODULE_DEVICE_TABLE(usb, line6_id_table);
/* *INDENT-OFF* */
static struct line6_properties line6_properties_table[] = {
- { "BassPODxt", "BassPODxt", LINE6_BIT_BASSPODXT, LINE6_BIT_CONTROL_PCM_HWMON },
- { "BassPODxtLive", "BassPODxt Live", LINE6_BIT_BASSPODXTLIVE, LINE6_BIT_CONTROL_PCM_HWMON },
- { "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_BASSPODXTPRO, LINE6_BIT_CONTROL_PCM_HWMON },
- { "GuitarPort", "GuitarPort", LINE6_BIT_GUITARPORT, LINE6_BIT_PCM },
- { "PocketPOD", "Pocket POD", LINE6_BIT_POCKETPOD, LINE6_BIT_CONTROL },
- { "PODStudioGX", "POD Studio GX", LINE6_BIT_PODSTUDIO_GX, LINE6_BIT_PCM },
- { "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PODSTUDIO_UX1, LINE6_BIT_PCM },
- { "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PODSTUDIO_UX2, LINE6_BIT_PCM },
- { "PODX3", "POD X3", LINE6_BIT_PODX3, LINE6_BIT_PCM },
- { "PODX3Live", "POD X3 Live", LINE6_BIT_PODX3LIVE, LINE6_BIT_PCM },
- { "PODxt", "PODxt", LINE6_BIT_PODXT, LINE6_BIT_CONTROL_PCM_HWMON },
- { "PODxtLive", "PODxt Live", LINE6_BIT_PODXTLIVE, LINE6_BIT_CONTROL_PCM_HWMON },
- { "PODxtPro", "PODxt Pro", LINE6_BIT_PODXTPRO, LINE6_BIT_CONTROL_PCM_HWMON },
- { "TonePortGX", "TonePort GX", LINE6_BIT_TONEPORT_GX, LINE6_BIT_PCM },
- { "TonePortUX1", "TonePort UX1", LINE6_BIT_TONEPORT_UX1, LINE6_BIT_PCM },
- { "TonePortUX2", "TonePort UX2", LINE6_BIT_TONEPORT_UX2, LINE6_BIT_PCM },
- { "Variax", "Variax Workbench", LINE6_BIT_VARIAX, LINE6_BIT_CONTROL }
+ { LINE6_BIT_BASSPODXT, "BassPODxt", "BassPODxt", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_BASSPODXTLIVE, "BassPODxtLive", "BassPODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_BASSPODXTPRO, "BassPODxtPro", "BassPODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_GUITARPORT, "GuitarPort", "GuitarPort", LINE6_BIT_PCM },
+ { LINE6_BIT_POCKETPOD, "PocketPOD", "Pocket POD", LINE6_BIT_CONTROL },
+ { LINE6_BIT_PODHD300, "PODHD300", "POD HD300", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODHD500, "PODHD500", "POD HD500", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODSTUDIO_GX, "PODStudioGX", "POD Studio GX", LINE6_BIT_PCM },
+ { LINE6_BIT_PODSTUDIO_UX1, "PODStudioUX1", "POD Studio UX1", LINE6_BIT_PCM },
+ { LINE6_BIT_PODSTUDIO_UX2, "PODStudioUX2", "POD Studio UX2", LINE6_BIT_PCM },
+ { LINE6_BIT_PODX3, "PODX3", "POD X3", LINE6_BIT_PCM },
+ { LINE6_BIT_PODX3LIVE, "PODX3Live", "POD X3 Live", LINE6_BIT_PCM },
+ { LINE6_BIT_PODXT, "PODxt", "PODxt", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODXTLIVE, "PODxtLive", "PODxt Live", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_PODXTPRO, "PODxtPro", "PODxt Pro", LINE6_BIT_CONTROL_PCM_HWMON },
+ { LINE6_BIT_TONEPORT_GX, "TonePortGX", "TonePort GX", LINE6_BIT_PCM },
+ { LINE6_BIT_TONEPORT_UX1, "TonePortUX1", "TonePort UX1", LINE6_BIT_PCM },
+ { LINE6_BIT_TONEPORT_UX2, "TonePortUX2", "TonePort UX2", LINE6_BIT_PCM },
+ { LINE6_BIT_VARIAX, "Variax", "Variax Workbench", LINE6_BIT_CONTROL },
};
/* *INDENT-ON* */
@@ -437,6 +442,10 @@ static void line6_data_received(struct urb *urb)
line6);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ break; /* let userspace handle MIDI */
+
case LINE6_DEVID_PODXTLIVE:
switch (line6->interface_number) {
case PODXTLIVE_INTERFACE_POD:
@@ -720,8 +729,8 @@ static int line6_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int devtype;
- struct usb_device *usbdev = NULL;
- struct usb_line6 *line6 = NULL;
+ struct usb_device *usbdev;
+ struct usb_line6 *line6;
const struct line6_properties *properties;
int devnum;
int interface_number, alternate = 0;
@@ -794,6 +803,7 @@ static int line6_probe(struct usb_interface *interface,
}
break;
+ case LINE6_DEVID_PODHD500:
case LINE6_DEVID_PODX3:
case LINE6_DEVID_PODX3LIVE:
switch (interface_number) {
@@ -812,6 +822,7 @@ static int line6_probe(struct usb_interface *interface,
case LINE6_DEVID_BASSPODXTPRO:
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTPRO:
+ case LINE6_DEVID_PODHD300:
alternate = 5;
break;
@@ -865,6 +876,18 @@ static int line6_probe(struct usb_interface *interface,
ep_write = 0x03;
break;
+ case LINE6_DEVID_PODHD300:
+ size = sizeof(struct usb_line6_podhd);
+ ep_read = 0x84;
+ ep_write = 0x03;
+ break;
+
+ case LINE6_DEVID_PODHD500:
+ size = sizeof(struct usb_line6_podhd);
+ ep_read = 0x81;
+ ep_write = 0x01;
+ break;
+
case LINE6_DEVID_POCKETPOD:
size = sizeof(struct usb_line6_pod);
ep_read = 0x82;
@@ -923,7 +946,7 @@ static int line6_probe(struct usb_interface *interface,
}
if (size == 0) {
- dev_err(line6->ifcdev,
+ dev_err(&interface->dev,
"driver bug: interface data size not set\n");
ret = -ENODEV;
goto err_put;
@@ -1017,6 +1040,12 @@ static int line6_probe(struct usb_interface *interface,
ret = line6_pod_init(interface, (struct usb_line6_pod *)line6);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ ret = line6_podhd_init(interface,
+ (struct usb_line6_podhd *)line6);
+ break;
+
case LINE6_DEVID_PODXTLIVE:
switch (interface_number) {
case PODXTLIVE_INTERFACE_POD:
@@ -1139,6 +1168,11 @@ static void line6_disconnect(struct usb_interface *interface)
line6_pod_disconnect(interface);
break;
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ line6_podhd_disconnect(interface);
+ break;
+
case LINE6_DEVID_PODXTLIVE:
switch (interface_number) {
case PODXTLIVE_INTERFACE_POD:
diff --git a/drivers/staging/line6/driver.h b/drivers/staging/line6/driver.h
index 553192f..117bf99 100644
--- a/drivers/staging/line6/driver.h
+++ b/drivers/staging/line6/driver.h
@@ -88,6 +88,11 @@ static const int SYSEX_EXTRA_SIZE = sizeof(line6_midi_id) + 4;
*/
struct line6_properties {
/**
+ Bit identifying this device in the line6usb driver.
+ */
+ int device_bit;
+
+ /**
Card id string (maximum 16 characters).
This can be used to address the device in ALSA programs as
"default:CARD=<id>"
@@ -100,11 +105,6 @@ struct line6_properties {
const char *name;
/**
- Bit identifying this device in the line6usb driver.
- */
- int device_bit;
-
- /**
Bit vector defining this device's capabilities in the
line6usb driver.
*/
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index e554a2d..13d0293 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -135,7 +135,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
line6_write_hexdump(line6, 'S', data, length);
#endif
- transfer_buffer = kmalloc(length, GFP_ATOMIC);
+ transfer_buffer = kmemdup(data, length, GFP_ATOMIC);
if (transfer_buffer == NULL) {
usb_free_urb(urb);
@@ -143,7 +143,6 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
return -ENOMEM;
}
- memcpy(transfer_buffer, data, length);
usb_fill_int_urb(urb, line6->usbdev,
usb_sndbulkpipe(line6->usbdev,
line6->ep_control_write),
@@ -173,6 +172,8 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
break;
case LINE6_DEVID_VARIAX:
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
break;
default:
@@ -307,10 +308,10 @@ static ssize_t midi_set_midi_mask_transmit(struct device *dev,
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
- unsigned long value;
+ unsigned short value;
int ret;
- ret = strict_strtoul(buf, 10, &value);
+ ret = kstrtou16(buf, 10, &value);
if (ret)
return ret;
@@ -339,10 +340,10 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev,
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6 *line6 = usb_get_intfdata(interface);
- unsigned long value;
+ unsigned short value;
int ret;
- ret = strict_strtoul(buf, 10, &value);
+ ret = kstrtou16(buf, 10, &value);
if (ret)
return ret;
@@ -391,16 +392,32 @@ int line6_init_midi(struct usb_line6 *line6)
return -ENOMEM;
err = line6_midibuf_init(&line6midi->midibuf_in, MIDI_BUFFER_SIZE, 0);
- if (err < 0)
+ if (err < 0) {
+ kfree(line6midi);
return err;
+ }
err = line6_midibuf_init(&line6midi->midibuf_out, MIDI_BUFFER_SIZE, 1);
- if (err < 0)
+ if (err < 0) {
+ kfree(line6midi->midibuf_in.buf);
+ kfree(line6midi);
return err;
+ }
line6midi->line6 = line6;
- line6midi->midi_mask_transmit = 1;
- line6midi->midi_mask_receive = 4;
+
+ switch(line6->product) {
+ case LINE6_DEVID_PODHD300:
+ case LINE6_DEVID_PODHD500:
+ line6midi->midi_mask_transmit = 1;
+ line6midi->midi_mask_receive = 1;
+ break;
+
+ default:
+ line6midi->midi_mask_transmit = 1;
+ line6midi->midi_mask_receive = 4;
+ }
+
line6->line6midi = line6midi;
err = snd_device_new(line6->card, SNDRV_DEV_RAWMIDI, line6midi,
diff --git a/drivers/staging/line6/midi.h b/drivers/staging/line6/midi.h
index b73a025..4a9e9f9 100644
--- a/drivers/staging/line6/midi.h
+++ b/drivers/staging/line6/midi.h
@@ -57,12 +57,12 @@ struct snd_line6_midi {
/**
Bit mask for output MIDI channels.
*/
- int midi_mask_transmit;
+ unsigned short midi_mask_transmit;
/**
Bit mask for input MIDI channels.
*/
- int midi_mask_receive;
+ unsigned short midi_mask_receive;
/**
Buffer for incoming MIDI stream.
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index 9d4c8a6..37675e6 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -86,31 +86,22 @@ static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period,
#endif
+static bool test_flags(unsigned long flags0, unsigned long flags1,
+ unsigned long mask)
+{
+ return ((flags0 & mask) == 0) && ((flags1 & mask) != 0);
+}
+
int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
{
unsigned long flags_old =
__sync_fetch_and_or(&line6pcm->flags, channels);
unsigned long flags_new = flags_old | channels;
int err = 0;
-
-#if LINE6_BACKUP_MONITOR_SIGNAL
- if (!(line6pcm->line6->properties->capabilities & LINE6_BIT_HWMON)) {
- line6pcm->prev_fbuf =
- kmalloc(LINE6_ISO_PACKETS * line6pcm->max_packet_size,
- GFP_KERNEL);
-
- if (!line6pcm->prev_fbuf) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc monitor buffer\n");
- return -ENOMEM;
- }
- }
-#else
+
line6pcm->prev_fbuf = NULL;
-#endif
- if (((flags_old & MASK_CAPTURE) == 0) &&
- ((flags_new & MASK_CAPTURE) != 0)) {
+ if (test_flags(flags_old, flags_new, MASK_CAPTURE)) {
/*
Waiting for completion of active URBs in the stop handler is
a bug, we therefore report an error if capturing is restarted
@@ -119,54 +110,47 @@ int line6_pcm_start(struct snd_line6_pcm *line6pcm, int channels)
if (line6pcm->active_urb_in | line6pcm->unlink_urb_in)
return -EBUSY;
- line6pcm->buffer_in =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
+ if (!(flags_new & MASK_PCM_ALSA_CAPTURE)) {
+ err = line6_alloc_capture_buffer(line6pcm);
- if (!line6pcm->buffer_in) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc capture buffer\n");
- return -ENOMEM;
+ if (err < 0)
+ goto pcm_start_error;
}
line6pcm->count_in = 0;
line6pcm->prev_fsize = 0;
err = line6_submit_audio_in_all_urbs(line6pcm);
- if (err < 0) {
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
- return err;
- }
+ if (err < 0)
+ goto pcm_start_error;
}
- if (((flags_old & MASK_PLAYBACK) == 0) &&
- ((flags_new & MASK_PLAYBACK) != 0)) {
+ if (test_flags(flags_old, flags_new, MASK_PLAYBACK)) {
/*
See comment above regarding PCM restart.
*/
if (line6pcm->active_urb_out | line6pcm->unlink_urb_out)
return -EBUSY;
- line6pcm->buffer_out =
- kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
- line6pcm->max_packet_size, GFP_KERNEL);
+ if (!(flags_new & MASK_PCM_ALSA_PLAYBACK)) {
+ err = line6_alloc_playback_buffer(line6pcm);
- if (!line6pcm->buffer_out) {
- dev_err(line6pcm->line6->ifcdev,
- "cannot malloc playback buffer\n");
- return -ENOMEM;
+ if (err < 0)
+ goto pcm_start_error;
}
line6pcm->count_out = 0;
err = line6_submit_audio_out_all_urbs(line6pcm);
- if (err < 0) {
- __sync_fetch_and_and(&line6pcm->flags, ~channels);
- return err;
- }
+ if (err < 0)
+ goto pcm_start_error;
}
return 0;
+
+pcm_start_error:
+ __sync_fetch_and_and(&line6pcm->flags, ~channels);
+ return err;
}
int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
@@ -175,22 +159,19 @@ int line6_pcm_stop(struct snd_line6_pcm *line6pcm, int channels)
__sync_fetch_and_and(&line6pcm->flags, ~channels);
unsigned long flags_new = flags_old & ~channels;
- if (((flags_old & MASK_CAPTURE) != 0) &&
- ((flags_new & MASK_CAPTURE) == 0)) {
+ if (test_flags(flags_new, flags_old, MASK_CAPTURE)) {
line6_unlink_audio_in_urbs(line6pcm);
- kfree(line6pcm->buffer_in);
- line6pcm->buffer_in = NULL;
+
+ if (!(flags_old & MASK_PCM_ALSA_CAPTURE))
+ line6_free_capture_buffer(line6pcm);
}
- if (((flags_old & MASK_PLAYBACK) != 0) &&
- ((flags_new & MASK_PLAYBACK) == 0)) {
+ if (test_flags(flags_new, flags_old, MASK_PLAYBACK)) {
line6_unlink_audio_out_urbs(line6pcm);
- kfree(line6pcm->buffer_out);
- line6pcm->buffer_out = NULL;
+
+ if (!(flags_old & MASK_PCM_ALSA_PLAYBACK))
+ line6_free_playback_buffer(line6pcm);
}
-#if LINE6_BACKUP_MONITOR_SIGNAL
- kfree(line6pcm->prev_fbuf);
-#endif
return 0;
}
@@ -403,10 +384,12 @@ int line6_init_pcm(struct usb_line6 *line6,
case LINE6_DEVID_PODXT:
case LINE6_DEVID_PODXTLIVE:
case LINE6_DEVID_PODXTPRO:
+ case LINE6_DEVID_PODHD300:
ep_read = 0x82;
ep_write = 0x01;
break;
+ case LINE6_DEVID_PODHD500:
case LINE6_DEVID_PODX3:
case LINE6_DEVID_PODX3LIVE:
ep_read = 0x86;
@@ -451,9 +434,14 @@ int line6_init_pcm(struct usb_line6 *line6,
line6pcm->line6 = line6;
line6pcm->ep_audio_read = ep_read;
line6pcm->ep_audio_write = ep_write;
- line6pcm->max_packet_size = usb_maxpacket(line6->usbdev,
- usb_rcvintpipe(line6->usbdev,
- ep_read), 0);
+
+ /* Read and write buffers are sized identically, so choose minimum */
+ line6pcm->max_packet_size = min(
+ usb_maxpacket(line6->usbdev,
+ usb_rcvisocpipe(line6->usbdev, ep_read), 0),
+ usb_maxpacket(line6->usbdev,
+ usb_sndisocpipe(line6->usbdev, ep_write), 1));
+
line6pcm->properties = properties;
line6->line6pcm = line6pcm;
@@ -508,6 +496,23 @@ int snd_line6_prepare(struct snd_pcm_substream *substream)
{
struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+ switch (substream->stream) {
+ case SNDRV_PCM_STREAM_PLAYBACK:
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0)
+ line6_unlink_wait_clear_audio_out_urbs(line6pcm);
+
+ break;
+
+ case SNDRV_PCM_STREAM_CAPTURE:
+ if ((line6pcm->flags & MASK_CAPTURE) == 0)
+ line6_unlink_wait_clear_audio_in_urbs(line6pcm);
+
+ break;
+
+ default:
+ MISSING_CASE;
+ }
+
if (!test_and_set_bit(BIT_PREPARED, &line6pcm->flags)) {
line6pcm->count_out = 0;
line6pcm->pos_out = 0;
diff --git a/drivers/staging/line6/pcm.h b/drivers/staging/line6/pcm.h
index 77055b3..55d8297 100644
--- a/drivers/staging/line6/pcm.h
+++ b/drivers/staging/line6/pcm.h
@@ -39,9 +39,6 @@
#define LINE6_IMPULSE_DEFAULT_PERIOD 100
#endif
-#define LINE6_BACKUP_MONITOR_SIGNAL 0
-#define LINE6_REUSE_DMA_AREA_FOR_PLAYBACK 0
-
/*
Get substream from Line6 PCM data structure
*/
@@ -149,11 +146,6 @@ struct snd_line6_pcm {
unsigned char *buffer_in;
/**
- Temporary buffer index for playback.
- */
- int index_out;
-
- /**
Previously captured frame (for software monitoring).
*/
unsigned char *prev_fbuf;
diff --git a/drivers/staging/line6/playback.c b/drivers/staging/line6/playback.c
index 10c5438..4152db2 100644
--- a/drivers/staging/line6/playback.c
+++ b/drivers/staging/line6/playback.c
@@ -9,6 +9,7 @@
*
*/
+#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
@@ -191,13 +192,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
urb_frames = urb_size / bytes_per_frame;
urb_out->transfer_buffer =
line6pcm->buffer_out +
- line6pcm->max_packet_size * line6pcm->index_out;
+ index * LINE6_ISO_PACKETS * line6pcm->max_packet_size;
urb_out->transfer_buffer_length = urb_size;
urb_out->context = line6pcm;
- if (++line6pcm->index_out == LINE6_ISO_BUFFERS)
- line6pcm->index_out = 0;
-
if (test_bit(BIT_PCM_ALSA_PLAYBACK, &line6pcm->flags) &&
!test_bit(BIT_PAUSE_PLAYBACK, &line6pcm->flags)) {
struct snd_pcm_runtime *runtime =
@@ -222,18 +220,10 @@ static int submit_audio_out_urb(struct snd_line6_pcm *line6pcm)
} else
dev_err(line6pcm->line6->ifcdev, "driver bug: len = %d\n", len); /* this is somewhat paranoid */
} else {
-#if LINE6_REUSE_DMA_AREA_FOR_PLAYBACK
- /* set the buffer pointer */
- urb_out->transfer_buffer =
- runtime->dma_area +
- line6pcm->pos_out * bytes_per_frame;
-#else
- /* copy data */
memcpy(urb_out->transfer_buffer,
runtime->dma_area +
line6pcm->pos_out * bytes_per_frame,
urb_out->transfer_buffer_length);
-#endif
}
line6pcm->pos_out += urb_frames;
@@ -361,6 +351,31 @@ void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm *line6pcm)
wait_clear_audio_out_urbs(line6pcm);
}
+int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm)
+{
+ /* We may be invoked multiple times in a row so allocate once only */
+ if (line6pcm->buffer_out)
+ return 0;
+
+ line6pcm->buffer_out =
+ kmalloc(LINE6_ISO_BUFFERS * LINE6_ISO_PACKETS *
+ line6pcm->max_packet_size, GFP_KERNEL);
+
+ if (!line6pcm->buffer_out) {
+ dev_err(line6pcm->line6->ifcdev,
+ "cannot malloc playback buffer\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm)
+{
+ kfree(line6pcm->buffer_out);
+ line6pcm->buffer_out = NULL;
+}
+
/*
Callback for completed playback URB.
*/
@@ -469,6 +484,13 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
}
/* -- [FD] end */
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
+ ret = line6_alloc_playback_buffer(line6pcm);
+
+ if (ret < 0)
+ return ret;
+ }
+
ret = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (ret < 0)
@@ -481,6 +503,13 @@ static int snd_line6_playback_hw_params(struct snd_pcm_substream *substream,
/* hw_free playback callback */
static int snd_line6_playback_hw_free(struct snd_pcm_substream *substream)
{
+ struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream);
+
+ if ((line6pcm->flags & MASK_PLAYBACK) == 0) {
+ line6_unlink_wait_clear_audio_out_urbs(line6pcm);
+ line6_free_playback_buffer(line6pcm);
+ }
+
return snd_pcm_lib_free_pages(substream);
}
diff --git a/drivers/staging/line6/playback.h b/drivers/staging/line6/playback.h
index f2fc8c0..02487ff 100644
--- a/drivers/staging/line6/playback.h
+++ b/drivers/staging/line6/playback.h
@@ -29,7 +29,9 @@
extern struct snd_pcm_ops snd_line6_playback_ops;
+extern int line6_alloc_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_create_audio_out_urbs(struct snd_line6_pcm *line6pcm);
+extern void line6_free_playback_buffer(struct snd_line6_pcm *line6pcm);
extern int line6_submit_audio_out_all_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_audio_out_urbs(struct snd_line6_pcm *line6pcm);
extern void line6_unlink_wait_clear_audio_out_urbs(struct snd_line6_pcm
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index d9b3021..4dadc57 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -1149,14 +1149,10 @@ static struct snd_kcontrol_new pod_control_monitor = {
static void pod_destruct(struct usb_interface *interface)
{
struct usb_line6_pod *pod = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (pod == NULL)
return;
- line6 = &pod->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&pod->line6);
del_timer(&pod->startup_timer);
cancel_work_sync(&pod->startup_work);
diff --git a/drivers/staging/line6/podhd.c b/drivers/staging/line6/podhd.c
new file mode 100644
index 0000000..7ef4543
--- /dev/null
+++ b/drivers/staging/line6/podhd.c
@@ -0,0 +1,154 @@
+/*
+ * Line6 Pod HD
+ *
+ * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#include <sound/core.h>
+#include <sound/pcm.h>
+
+#include "audio.h"
+#include "driver.h"
+#include "pcm.h"
+#include "podhd.h"
+
+#define PODHD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
+
+static struct snd_ratden podhd_ratden = {
+ .num_min = 48000,
+ .num_max = 48000,
+ .num_step = 1,
+ .den = 1,
+};
+
+static struct line6_pcm_properties podhd_pcm_properties = {
+ .snd_line6_playback_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+ SNDRV_PCM_INFO_PAUSE |
+#ifdef CONFIG_PM
+ SNDRV_PCM_INFO_RESUME |
+#endif
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .snd_line6_capture_hw = {
+ .info = (SNDRV_PCM_INFO_MMAP |
+ SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER |
+ SNDRV_PCM_INFO_MMAP_VALID |
+#ifdef CONFIG_PM
+ SNDRV_PCM_INFO_RESUME |
+#endif
+ SNDRV_PCM_INFO_SYNC_START),
+ .formats = SNDRV_PCM_FMTBIT_S24_3LE,
+ .rates = SNDRV_PCM_RATE_48000,
+ .rate_min = 48000,
+ .rate_max = 48000,
+ .channels_min = 2,
+ .channels_max = 2,
+ .buffer_bytes_max = 60000,
+ .period_bytes_min = 64,
+ .period_bytes_max = 8192,
+ .periods_min = 1,
+ .periods_max = 1024},
+ .snd_line6_rates = {
+ .nrats = 1,
+ .rats = &podhd_ratden},
+ .bytes_per_frame = PODHD_BYTES_PER_FRAME
+};
+
+/*
+ POD HD destructor.
+*/
+static void podhd_destruct(struct usb_interface *interface)
+{
+ struct usb_line6_podhd *podhd = usb_get_intfdata(interface);
+
+ if (podhd == NULL)
+ return;
+ line6_cleanup_audio(&podhd->line6);
+}
+
+/*
+ Try to init POD HD device.
+*/
+static int podhd_try_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd)
+{
+ int err;
+ struct usb_line6 *line6 = &podhd->line6;
+
+ if ((interface == NULL) || (podhd == NULL))
+ return -ENODEV;
+
+ /* initialize audio system: */
+ err = line6_init_audio(line6);
+ if (err < 0)
+ return err;
+
+ /* initialize MIDI subsystem: */
+ err = line6_init_midi(line6);
+ if (err < 0)
+ return err;
+
+ /* initialize PCM subsystem: */
+ err = line6_init_pcm(line6, &podhd_pcm_properties);
+ if (err < 0)
+ return err;
+
+ /* register USB audio system: */
+ err = line6_register_audio(line6);
+ return err;
+}
+
+/*
+ Init POD HD device (and clean up in case of failure).
+*/
+int line6_podhd_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd)
+{
+ int err = podhd_try_init(interface, podhd);
+
+ if (err < 0)
+ podhd_destruct(interface);
+
+ return err;
+}
+
+/*
+ POD HD device disconnected.
+*/
+void line6_podhd_disconnect(struct usb_interface *interface)
+{
+ struct usb_line6_podhd *podhd;
+
+ if (interface == NULL)
+ return;
+ podhd = usb_get_intfdata(interface);
+
+ if (podhd != NULL) {
+ struct snd_line6_pcm *line6pcm = podhd->line6.line6pcm;
+
+ if (line6pcm != NULL)
+ line6_pcm_disconnect(line6pcm);
+ }
+
+ podhd_destruct(interface);
+}
diff --git a/drivers/staging/line6/podhd.h b/drivers/staging/line6/podhd.h
new file mode 100644
index 0000000..652f740
--- /dev/null
+++ b/drivers/staging/line6/podhd.h
@@ -0,0 +1,30 @@
+/*
+ * Line6 Pod HD
+ *
+ * Copyright (C) 2011 Stefan Hajnoczi <stefanha@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ *
+ */
+
+#ifndef PODHD_H
+#define PODHD_H
+
+#include <linux/usb.h>
+
+#include "driver.h"
+
+struct usb_line6_podhd {
+ /**
+ Generic Line6 USB data.
+ */
+ struct usb_line6 line6;
+};
+
+extern void line6_podhd_disconnect(struct usb_interface *interface);
+extern int line6_podhd_init(struct usb_interface *interface,
+ struct usb_line6_podhd *podhd);
+
+#endif /* PODHD_H */
diff --git a/drivers/staging/line6/revision.h b/drivers/staging/line6/revision.h
index 350d0df..b4eee2b 100644
--- a/drivers/staging/line6/revision.h
+++ b/drivers/staging/line6/revision.h
@@ -1,4 +1,4 @@
#ifndef DRIVER_REVISION
/* current subversion revision */
-#define DRIVER_REVISION " (revision 690)"
+#define DRIVER_REVISION " (904)"
#endif
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 879e699..f310578 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -295,14 +295,10 @@ static struct snd_kcontrol_new toneport_control_source = {
static void toneport_destruct(struct usb_interface *interface)
{
struct usb_line6_toneport *toneport = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (toneport == NULL)
return;
- line6 = &toneport->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&toneport->line6);
}
/*
diff --git a/drivers/staging/line6/usbdefs.h b/drivers/staging/line6/usbdefs.h
index c6dffe6..aff9e5c 100644
--- a/drivers/staging/line6/usbdefs.h
+++ b/drivers/staging/line6/usbdefs.h
@@ -24,6 +24,8 @@
#define LINE6_DEVID_BASSPODXTPRO 0x4252
#define LINE6_DEVID_GUITARPORT 0x4750
#define LINE6_DEVID_POCKETPOD 0x5051
+#define LINE6_DEVID_PODHD300 0x5057
+#define LINE6_DEVID_PODHD500 0x414D
#define LINE6_DEVID_PODSTUDIO_GX 0x4153
#define LINE6_DEVID_PODSTUDIO_UX1 0x4150
#define LINE6_DEVID_PODSTUDIO_UX2 0x4151
@@ -37,48 +39,71 @@
#define LINE6_DEVID_TONEPORT_UX2 0x4142
#define LINE6_DEVID_VARIAX 0x534d
-#define LINE6_BIT_BASSPODXT (1 << 0)
-#define LINE6_BIT_BASSPODXTLIVE (1 << 1)
-#define LINE6_BIT_BASSPODXTPRO (1 << 2)
-#define LINE6_BIT_GUITARPORT (1 << 3)
-#define LINE6_BIT_POCKETPOD (1 << 4)
-#define LINE6_BIT_PODSTUDIO_GX (1 << 5)
-#define LINE6_BIT_PODSTUDIO_UX1 (1 << 6)
-#define LINE6_BIT_PODSTUDIO_UX2 (1 << 7)
-#define LINE6_BIT_PODX3 (1 << 8)
-#define LINE6_BIT_PODX3LIVE (1 << 9)
-#define LINE6_BIT_PODXT (1 << 10)
-#define LINE6_BIT_PODXTLIVE (1 << 11)
-#define LINE6_BIT_PODXTPRO (1 << 12)
-#define LINE6_BIT_TONEPORT_GX (1 << 13)
-#define LINE6_BIT_TONEPORT_UX1 (1 << 14)
-#define LINE6_BIT_TONEPORT_UX2 (1 << 15)
-#define LINE6_BIT_VARIAX (1 << 16)
+enum {
+ LINE6_ID_BASSPODXT,
+ LINE6_ID_BASSPODXTLIVE,
+ LINE6_ID_BASSPODXTPRO,
+ LINE6_ID_GUITARPORT,
+ LINE6_ID_POCKETPOD,
+ LINE6_ID_PODHD300,
+ LINE6_ID_PODHD500,
+ LINE6_ID_PODSTUDIO_GX,
+ LINE6_ID_PODSTUDIO_UX1,
+ LINE6_ID_PODSTUDIO_UX2,
+ LINE6_ID_PODX3,
+ LINE6_ID_PODX3LIVE,
+ LINE6_ID_PODXT,
+ LINE6_ID_PODXTLIVE,
+ LINE6_ID_PODXTPRO,
+ LINE6_ID_TONEPORT_GX,
+ LINE6_ID_TONEPORT_UX1,
+ LINE6_ID_TONEPORT_UX2,
+ LINE6_ID_VARIAX
+};
-#define LINE6_BITS_PRO (LINE6_BIT_BASSPODXTPRO | \
- LINE6_BIT_PODXTPRO)
-#define LINE6_BITS_LIVE (LINE6_BIT_BASSPODXTLIVE | \
- LINE6_BIT_PODXTLIVE | \
- LINE6_BIT_PODX3LIVE)
-#define LINE6_BITS_PODXTALL (LINE6_BIT_PODXT | \
- LINE6_BIT_PODXTLIVE | \
- LINE6_BIT_PODXTPRO)
-#define LINE6_BITS_BASSPODXTALL (LINE6_BIT_BASSPODXT | \
- LINE6_BIT_BASSPODXTLIVE | \
- LINE6_BIT_BASSPODXTPRO)
+#define LINE6_BIT(x) LINE6_BIT_ ## x = 1 << LINE6_ID_ ## x
+
+enum {
+ LINE6_BIT(BASSPODXT),
+ LINE6_BIT(BASSPODXTLIVE),
+ LINE6_BIT(BASSPODXTPRO),
+ LINE6_BIT(GUITARPORT),
+ LINE6_BIT(POCKETPOD),
+ LINE6_BIT(PODHD300),
+ LINE6_BIT(PODHD500),
+ LINE6_BIT(PODSTUDIO_GX),
+ LINE6_BIT(PODSTUDIO_UX1),
+ LINE6_BIT(PODSTUDIO_UX2),
+ LINE6_BIT(PODX3),
+ LINE6_BIT(PODX3LIVE),
+ LINE6_BIT(PODXT),
+ LINE6_BIT(PODXTLIVE),
+ LINE6_BIT(PODXTPRO),
+ LINE6_BIT(TONEPORT_GX),
+ LINE6_BIT(TONEPORT_UX1),
+ LINE6_BIT(TONEPORT_UX2),
+ LINE6_BIT(VARIAX),
+
+ LINE6_BITS_PRO = LINE6_BIT_BASSPODXTPRO | LINE6_BIT_PODXTPRO,
+ LINE6_BITS_LIVE = LINE6_BIT_BASSPODXTLIVE | LINE6_BIT_PODXTLIVE | LINE6_BIT_PODX3LIVE,
+ LINE6_BITS_PODXTALL = LINE6_BIT_PODXT | LINE6_BIT_PODXTLIVE | LINE6_BIT_PODXTPRO,
+ LINE6_BITS_PODX3ALL = LINE6_BIT_PODX3 | LINE6_BIT_PODX3LIVE,
+ LINE6_BITS_PODHDALL = LINE6_BIT_PODHD300 | LINE6_BIT_PODHD500,
+ LINE6_BITS_BASSPODXTALL = LINE6_BIT_BASSPODXT | LINE6_BIT_BASSPODXTLIVE | LINE6_BIT_BASSPODXTPRO
+};
/* device supports settings parameter via USB */
-#define LINE6_BIT_CONTROL (1 << 0)
+#define LINE6_BIT_CONTROL (1 << 0)
/* device supports PCM input/output via USB */
-#define LINE6_BIT_PCM (1 << 1)
+#define LINE6_BIT_PCM (1 << 1)
/* device support hardware monitoring */
-#define LINE6_BIT_HWMON (1 << 2)
+#define LINE6_BIT_HWMON (1 << 2)
#define LINE6_BIT_CONTROL_PCM_HWMON (LINE6_BIT_CONTROL | \
LINE6_BIT_PCM | \
LINE6_BIT_HWMON)
-#define LINE6_FALLBACK_INTERVAL 10
-#define LINE6_FALLBACK_MAXPACKETSIZE 16
+#define LINE6_FALLBACK_INTERVAL 10
+#define LINE6_FALLBACK_MAXPACKETSIZE 16
#endif
diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
index 81241cd..d366222 100644
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -572,14 +572,10 @@ static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
static void variax_destruct(struct usb_interface *interface)
{
struct usb_line6_variax *variax = usb_get_intfdata(interface);
- struct usb_line6 *line6;
if (variax == NULL)
return;
- line6 = &variax->line6;
- if (line6 == NULL)
- return;
- line6_cleanup_audio(line6);
+ line6_cleanup_audio(&variax->line6);
del_timer(&variax->startup_timer1);
del_timer(&variax->startup_timer2);
diff --git a/drivers/staging/media/go7007/go7007-usb.c b/drivers/staging/media/go7007/go7007-usb.c
index cffb0b3..5443e25 100644
--- a/drivers/staging/media/go7007/go7007-usb.c
+++ b/drivers/staging/media/go7007/go7007-usb.c
@@ -1278,17 +1278,5 @@ static struct usb_driver go7007_usb_driver = {
.id_table = go7007_usb_id_table,
};
-static int __init go7007_usb_init(void)
-{
- return usb_register(&go7007_usb_driver);
-}
-
-static void __exit go7007_usb_cleanup(void)
-{
- usb_deregister(&go7007_usb_driver);
-}
-
-module_init(go7007_usb_init);
-module_exit(go7007_usb_cleanup);
-
+module_usb_driver(go7007_usb_driver);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/media/go7007/snd-go7007.c b/drivers/staging/media/go7007/snd-go7007.c
index deac938..d071c83 100644
--- a/drivers/staging/media/go7007/snd-go7007.c
+++ b/drivers/staging/media/go7007/snd-go7007.c
@@ -38,7 +38,7 @@
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
-static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
+static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
module_param_array(id, charp, NULL, 0444);
diff --git a/drivers/staging/media/lirc/lirc_bt829.c b/drivers/staging/media/lirc/lirc_bt829.c
index c5a0d27..4d20e9f 100644
--- a/drivers/staging/media/lirc/lirc_bt829.c
+++ b/drivers/staging/media/lirc/lirc_bt829.c
@@ -53,7 +53,7 @@ static unsigned char do_get_bits(void);
#define DRIVER_NAME "lirc_bt829"
-static int debug;
+static bool debug;
#define dprintk(fmt, args...) \
do { \
if (debug) \
diff --git a/drivers/staging/media/lirc/lirc_igorplugusb.c b/drivers/staging/media/lirc/lirc_igorplugusb.c
index 0dc2c2b..7a25017 100644
--- a/drivers/staging/media/lirc/lirc_igorplugusb.c
+++ b/drivers/staging/media/lirc/lirc_igorplugusb.c
@@ -62,9 +62,9 @@
/* debugging support */
#ifdef CONFIG_USB_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
#define dprintk(fmt, args...) \
@@ -541,26 +541,7 @@ static struct usb_driver igorplugusb_remote_driver = {
.id_table = igorplugusb_remote_id_table
};
-static int __init igorplugusb_remote_init(void)
-{
- int ret = 0;
-
- dprintk(DRIVER_NAME ": loaded, debug mode enabled\n");
-
- ret = usb_register(&igorplugusb_remote_driver);
- if (ret)
- printk(KERN_ERR DRIVER_NAME ": usb register failed!\n");
-
- return ret;
-}
-
-static void __exit igorplugusb_remote_exit(void)
-{
- usb_deregister(&igorplugusb_remote_driver);
-}
-
-module_init(igorplugusb_remote_init);
-module_exit(igorplugusb_remote_exit);
+module_usb_driver(igorplugusb_remote_driver);
#include <linux/vermagic.h>
MODULE_INFO(vermagic, VERMAGIC_STRING);
diff --git a/drivers/staging/media/lirc/lirc_imon.c b/drivers/staging/media/lirc/lirc_imon.c
index c1654b1..5f7f8cd 100644
--- a/drivers/staging/media/lirc/lirc_imon.c
+++ b/drivers/staging/media/lirc/lirc_imon.c
@@ -1025,26 +1025,4 @@ static int imon_resume(struct usb_interface *intf)
return rc;
}
-static int __init imon_init(void)
-{
- int rc;
-
- printk(KERN_INFO MOD_NAME ": " MOD_DESC ", v" MOD_VERSION "\n");
-
- rc = usb_register(&imon_driver);
- if (rc) {
- err("%s: usb register failed(%d)", __func__, rc);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __exit imon_exit(void)
-{
- usb_deregister(&imon_driver);
- printk(KERN_INFO MOD_NAME ": module removed. Goodbye!\n");
-}
-
-module_init(imon_init);
-module_exit(imon_exit);
+module_usb_driver(imon_driver);
diff --git a/drivers/staging/media/lirc/lirc_parallel.c b/drivers/staging/media/lirc/lirc_parallel.c
index 792aac0..dd2bca7 100644
--- a/drivers/staging/media/lirc/lirc_parallel.c
+++ b/drivers/staging/media/lirc/lirc_parallel.c
@@ -63,8 +63,8 @@
/*** Global Variables ***/
-static int debug;
-static int check_pselecd;
+static bool debug;
+static bool check_pselecd;
unsigned int irq = LIRC_IRQ;
unsigned int io = LIRC_PORT;
@@ -752,4 +752,4 @@ module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Enable debugging messages");
module_param(check_pselecd, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Check for printer (default: 0)");
+MODULE_PARM_DESC(check_pselecd, "Check for printer (default: 0)");
diff --git a/drivers/staging/media/lirc/lirc_sasem.c b/drivers/staging/media/lirc/lirc_sasem.c
index a2d18b0..7855baa 100644
--- a/drivers/staging/media/lirc/lirc_sasem.c
+++ b/drivers/staging/media/lirc/lirc_sasem.c
@@ -913,27 +913,4 @@ static void sasem_disconnect(struct usb_interface *interface)
mutex_unlock(&disconnect_lock);
}
-static int __init sasem_init(void)
-{
- int rc;
-
- printk(KERN_INFO MOD_DESC ", v" MOD_VERSION "\n");
- printk(KERN_INFO MOD_AUTHOR "\n");
-
- rc = usb_register(&sasem_driver);
- if (rc < 0) {
- err("%s: usb register failed (%d)", __func__, rc);
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit sasem_exit(void)
-{
- usb_deregister(&sasem_driver);
- printk(KERN_INFO "module removed. Goodbye!\n");
-}
-
-
-module_init(sasem_init);
-module_exit(sasem_exit);
+module_usb_driver(sasem_driver);
diff --git a/drivers/staging/media/lirc/lirc_serial.c b/drivers/staging/media/lirc/lirc_serial.c
index 0ca308a..8dd8897 100644
--- a/drivers/staging/media/lirc/lirc_serial.c
+++ b/drivers/staging/media/lirc/lirc_serial.c
@@ -107,13 +107,13 @@ struct lirc_serial {
static int type;
static int io;
static int irq;
-static int iommap;
+static bool iommap;
static int ioshift;
-static int softcarrier = 1;
-static int share_irq;
-static int debug;
+static bool softcarrier = 1;
+static bool share_irq;
+static bool debug;
static int sense = -1; /* -1 = auto, 0 = active high, 1 = active low */
-static int txsense; /* 0 = active high, 1 = active low */
+static bool txsense; /* 0 = active high, 1 = active low */
#define dprintk(fmt, args...) \
do { \
diff --git a/drivers/staging/media/lirc/lirc_sir.c b/drivers/staging/media/lirc/lirc_sir.c
index 6903d39..c94382b 100644
--- a/drivers/staging/media/lirc/lirc_sir.c
+++ b/drivers/staging/media/lirc/lirc_sir.c
@@ -173,7 +173,7 @@ static DEFINE_SPINLOCK(hardware_lock);
static int rx_buf[RBUF_LEN];
static unsigned int rx_tail, rx_head;
-static int debug;
+static bool debug;
#define dprintk(fmt, args...) \
do { \
if (debug) \
diff --git a/drivers/staging/media/lirc/lirc_ttusbir.c b/drivers/staging/media/lirc/lirc_ttusbir.c
index e4b329b..7950887 100644
--- a/drivers/staging/media/lirc/lirc_ttusbir.c
+++ b/drivers/staging/media/lirc/lirc_ttusbir.c
@@ -372,24 +372,4 @@ static void disconnect(struct usb_interface *intf)
kfree(ttusbir);
}
-static int ttusbir_init_module(void)
-{
- int result;
-
- DPRINTK(KERN_DEBUG "Module ttusbir init\n");
-
- /* register this driver with the USB subsystem */
- result = usb_register(&usb_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
- return result;
-}
-
-static void ttusbir_exit_module(void)
-{
- printk(KERN_DEBUG "Module ttusbir exit\n");
- usb_deregister(&usb_driver);
-}
-
-module_init(ttusbir_init_module);
-module_exit(ttusbir_exit_module);
+module_usb_driver(usb_driver);
diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
index 0302d82..76ea4a8 100644
--- a/drivers/staging/media/lirc/lirc_zilog.c
+++ b/drivers/staging/media/lirc/lirc_zilog.c
@@ -155,8 +155,8 @@ static struct mutex tx_data_lock;
#define zilog_info(s, args...) printk(KERN_INFO KBUILD_MODNAME ": " s, ## args)
/* module parameters */
-static int debug; /* debug output */
-static int tx_only; /* only handle the IR Tx function */
+static bool debug; /* debug output */
+static bool tx_only; /* only handle the IR Tx function */
static int minor = -1; /* minor number */
#define dprintk(fmt, args...) \
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 8bf3479..4ac3696 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -38,7 +38,6 @@ void mei_io_list_init(struct mei_io_list *list)
{
/* initialize our queue list */
INIT_LIST_HEAD(&list->mei_cb.cb_list);
- list->status = 0;
}
/**
@@ -49,22 +48,15 @@ void mei_io_list_init(struct mei_io_list *list)
*/
void mei_io_list_flush(struct mei_io_list *list, struct mei_cl *cl)
{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
+ struct mei_cl_cb *pos;
+ struct mei_cl_cb *next;
- if (list->status != 0)
- return;
-
- if (list_empty(&list->mei_cb.cb_list))
- return;
-
- list_for_each_entry_safe(cb_pos, cb_next,
- &list->mei_cb.cb_list, cb_list) {
- if (cb_pos) {
+ list_for_each_entry_safe(pos, next, &list->mei_cb.cb_list, cb_list) {
+ if (pos->file_private) {
struct mei_cl *cl_tmp;
- cl_tmp = (struct mei_cl *)cb_pos->file_private;
+ cl_tmp = (struct mei_cl *)pos->file_private;
if (mei_cl_cmp_id(cl, cl_tmp))
- list_del(&cb_pos->cb_list);
+ list_del(&pos->cb_list);
}
}
}
@@ -338,16 +330,10 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
}
}
/* remove all waiting requests */
- if (dev->write_list.status == 0 &&
- !list_empty(&dev->write_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- if (cb_pos) {
- list_del(&cb_pos->cb_list);
- mei_free_cb_private(cb_pos);
- cb_pos = NULL;
- }
- }
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->write_list.mei_cb.cb_list, cb_list) {
+ list_del(&cb_pos->cb_list);
+ mei_free_cb_private(cb_pos);
}
}
@@ -380,8 +366,7 @@ void mei_host_start_message(struct mei_device *dev)
host_start_req->host_version.major_version = HBM_MAJOR_VERSION;
host_start_req->host_version.minor_version = HBM_MINOR_VERSION;
dev->recvd_msg = false;
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) (host_start_req),
+ if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_start_req,
mei_hdr->length)) {
dev_dbg(&dev->pdev->dev, "write send version message to FW fail.\n");
dev->mei_state = MEI_RESETING;
@@ -414,8 +399,7 @@ void mei_host_enum_clients_message(struct mei_device *dev)
host_enum_req = (struct hbm_host_enum_request *) &dev->wr_msg_buf[1];
memset(host_enum_req, 0, sizeof(struct hbm_host_enum_request));
host_enum_req->cmd.cmd = HOST_ENUM_REQ_CMD;
- if (!mei_write_message(dev, mei_hdr,
- (unsigned char *) (host_enum_req),
+ if (!mei_write_message(dev, mei_hdr, (unsigned char *)host_enum_req,
mei_hdr->length)) {
dev->mei_state = MEI_RESETING;
dev_dbg(&dev->pdev->dev, "write send enumeration request message to FW fail.\n");
@@ -605,15 +589,10 @@ void mei_host_init_iamthif(struct mei_device *dev)
return;
}
- /* Do not render the system unusable when iamthif_mtu is not equal to
- the value received from ME.
- Assign iamthif_mtu to the value received from ME in order to solve the
- hardware macro incompatibility. */
+ /* Assign iamthif_mtu to the value received from ME */
- dev_dbg(&dev->pdev->dev, "[DEFAULT] IAMTHIF = %d\n", dev->iamthif_mtu);
dev->iamthif_mtu = dev->me_clients[i].props.max_msg_length;
- dev_dbg(&dev->pdev->dev,
- "IAMTHIF = %d\n",
+ dev_dbg(&dev->pdev->dev, "IAMTHIF_MTU = %d\n",
dev->me_clients[i].props.max_msg_length);
kfree(dev->iamthif_msg_buf);
diff --git a/drivers/staging/mei/interface.c b/drivers/staging/mei/interface.c
index a65dacf..eb5df7f 100644
--- a/drivers/staging/mei/interface.c
+++ b/drivers/staging/mei/interface.c
@@ -128,9 +128,9 @@ int mei_count_empty_write_slots(struct mei_device *dev)
* returns 1 if success, 0 - otherwise.
*/
int mei_write_message(struct mei_device *dev,
- struct mei_msg_hdr *header,
- unsigned char *write_buffer,
- unsigned long write_length)
+ struct mei_msg_hdr *header,
+ unsigned char *write_buffer,
+ unsigned long write_length)
{
u32 temp_msg = 0;
unsigned long bytes_written = 0;
@@ -216,7 +216,7 @@ int mei_count_full_read_slots(struct mei_device *dev)
* @buffer_length: message size will be read
*/
void mei_read_slots(struct mei_device *dev,
- unsigned char *buffer, unsigned long buffer_length)
+ unsigned char *buffer, unsigned long buffer_length)
{
u32 i = 0;
unsigned char temp_buf[sizeof(u32)];
diff --git a/drivers/staging/mei/interface.h b/drivers/staging/mei/interface.h
index 7bd38ae..aeae511 100644
--- a/drivers/staging/mei/interface.h
+++ b/drivers/staging/mei/interface.h
@@ -52,6 +52,17 @@ int mei_wd_send(struct mei_device *dev);
int mei_wd_stop(struct mei_device *dev, bool preserve);
bool mei_wd_host_init(struct mei_device *dev);
void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout);
+/*
+ * mei_watchdog_register - Registering watchdog interface
+ * once we got connection to the WD Client
+ * @dev - mei device
+ */
+void mei_watchdog_register(struct mei_device *dev);
+/*
+ * mei_watchdog_unregister - Uegistering watchdog interface
+ * @dev - mei device
+ */
+void mei_watchdog_unregister(struct mei_device *dev);
int mei_flow_ctrl_reduce(struct mei_device *dev, struct mei_cl *cl);
diff --git a/drivers/staging/mei/interrupt.c b/drivers/staging/mei/interrupt.c
index 882d106..3544fee 100644
--- a/drivers/staging/mei/interrupt.c
+++ b/drivers/staging/mei/interrupt.c
@@ -198,8 +198,7 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
unsigned char *buffer = NULL;
dev_dbg(&dev->pdev->dev, "start client msg\n");
- if (!(dev->read_list.status == 0 &&
- !list_empty(&dev->read_list.mei_cb.cb_list)))
+ if (list_empty(&dev->read_list.mei_cb.cb_list))
goto quit;
list_for_each_entry_safe(cb_pos, cb_next,
@@ -210,9 +209,6 @@ static int mei_irq_thread_read_client_message(struct mei_io_list *complete_list,
buffer = (unsigned char *)
(cb_pos->response_buffer.data +
cb_pos->information);
- BUG_ON(cb_pos->response_buffer.size <
- mei_hdr->length +
- cb_pos->information);
if (cb_pos->response_buffer.size <
mei_hdr->length + cb_pos->information) {
@@ -390,24 +386,10 @@ static void mei_client_connect_response(struct mei_device *dev,
/* if WD or iamthif client treat specially */
if (is_treat_specially_client(&(dev->wd_cl), rs)) {
- dev_dbg(&dev->pdev->dev, "dev->wd_timeout =%d.\n",
- dev->wd_timeout);
-
- dev->wd_due_counter = (dev->wd_timeout) ? 1 : 0;
-
dev_dbg(&dev->pdev->dev, "successfully connected to WD client.\n");
+ mei_watchdog_register(dev);
- /* Registering watchdog interface device once we got connection
- to the WD Client
- */
- if (watchdog_register_device(&amt_wd_dev)) {
- printk(KERN_ERR "mei: unable to register watchdog device.\n");
- dev->wd_interface_reg = false;
- } else {
- dev_dbg(&dev->pdev->dev, "successfully register watchdog interface.\n");
- dev->wd_interface_reg = true;
- }
-
+ /* next step in the state maching */
mei_host_init_iamthif(dev);
return;
}
@@ -416,22 +398,20 @@ static void mei_client_connect_response(struct mei_device *dev,
dev->iamthif_state = MEI_IAMTHIF_IDLE;
return;
}
- if (!dev->ctrl_rd_list.status &&
- !list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
- if (!cl) {
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
+
+ cl = (struct mei_cl *)cb_pos->file_private;
+ if (!cl) {
+ list_del(&cb_pos->cb_list);
+ return;
+ }
+ if (MEI_IOCTL == cb_pos->major_file_operations) {
+ if (is_treat_specially_client(cl, rs)) {
list_del(&cb_pos->cb_list);
- return;
- }
- if (MEI_IOCTL == cb_pos->major_file_operations) {
- if (is_treat_specially_client(cl, rs)) {
- list_del(&cb_pos->cb_list);
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
}
}
}
@@ -458,29 +438,26 @@ static void mei_client_disconnect_response(struct mei_device *dev,
rs->host_addr,
rs->status);
- if (!dev->ctrl_rd_list.status &&
- !list_empty(&dev->ctrl_rd_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
+ list_for_each_entry_safe(cb_pos, cb_next,
+ &dev->ctrl_rd_list.mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)cb_pos->file_private;
- if (!cl) {
- list_del(&cb_pos->cb_list);
- return;
- }
+ if (!cl) {
+ list_del(&cb_pos->cb_list);
+ return;
+ }
- dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
- if (cl->host_client_id == rs->host_addr &&
- cl->me_client_id == rs->me_addr) {
+ dev_dbg(&dev->pdev->dev, "list_for_each_entry_safe in ctrl_rd_list.\n");
+ if (cl->host_client_id == rs->host_addr &&
+ cl->me_client_id == rs->me_addr) {
- list_del(&cb_pos->cb_list);
- if (!rs->status)
- cl->state = MEI_FILE_DISCONNECTED;
+ list_del(&cb_pos->cb_list);
+ if (!rs->status)
+ cl->state = MEI_FILE_DISCONNECTED;
- cl->status = 0;
- cl->timer_count = 0;
- break;
- }
+ cl->status = 0;
+ cl->timer_count = 0;
+ break;
}
}
}
@@ -718,7 +695,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
case CLIENT_DISCONNECT_RES_CMD:
disconnect_res =
(struct hbm_client_connect_response *) mei_msg;
- mei_client_disconnect_response(dev, disconnect_res);
+ mei_client_disconnect_response(dev, disconnect_res);
dev_dbg(&dev->pdev->dev, "client disconnect response message received.\n");
wake_up(&dev->wait_recvd_msg);
break;
@@ -736,7 +713,7 @@ static void mei_irq_thread_read_bus_message(struct mei_device *dev,
mei_reset(dev, 1);
return;
}
- if (dev->me_clients[dev->me_client_presentation_num]
+ if (dev->me_clients[dev->me_client_presentation_num]
.client_id == props_res->address) {
dev->me_clients[dev->me_client_presentation_num].props
@@ -1228,7 +1205,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
{
struct mei_cl *cl;
- struct mei_cl_cb *cb_pos = NULL, *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL, *next = NULL;
struct mei_io_list *list;
int ret;
@@ -1241,36 +1218,31 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
list = &dev->write_waiting_list;
- if (!list->status && !list_empty(&list->mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &list->mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
- if (cl) {
- cl->status = 0;
- list_del(&cb_pos->cb_list);
- if (MEI_WRITING == cl->writing_state &&
- (cb_pos->major_file_operations ==
- MEI_WRITE) &&
- (cl != &dev->iamthif_cl)) {
- dev_dbg(&dev->pdev->dev,
- "MEI WRITE COMPLETE\n");
- cl->writing_state =
- MEI_WRITE_COMPLETE;
- list_add_tail(&cb_pos->cb_list,
- &cmpl_list->mei_cb.cb_list);
- }
- if (cl == &dev->iamthif_cl) {
- dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
- if (dev->iamthif_flow_control_pending) {
- ret =
- _mei_irq_thread_iamthif_read(
- dev, slots);
- if (ret)
- return ret;
- }
- }
+ list_for_each_entry_safe(pos, next,
+ &list->mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)pos->file_private;
+ if (cl == NULL)
+ continue;
+
+ cl->status = 0;
+ list_del(&pos->cb_list);
+ if (MEI_WRITING == cl->writing_state &&
+ (pos->major_file_operations == MEI_WRITE) &&
+ (cl != &dev->iamthif_cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "MEI WRITE COMPLETE\n");
+ cl->writing_state = MEI_WRITE_COMPLETE;
+ list_add_tail(&pos->cb_list,
+ &cmpl_list->mei_cb.cb_list);
+ }
+ if (cl == &dev->iamthif_cl) {
+ dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
+ if (dev->iamthif_flow_control_pending) {
+ ret = _mei_irq_thread_iamthif_read(
+ dev, slots);
+ if (ret)
+ return ret;
}
-
}
}
@@ -1317,101 +1289,88 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list,
return ~ENODEV;
/* complete control write list CB */
- if (!dev->ctrl_wr_list.status) {
- /* complete control write list CB */
- dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
- list_for_each_entry_safe(cb_pos, cb_next,
+ dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
+ list_for_each_entry_safe(pos, next,
&dev->ctrl_wr_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)
- cb_pos->file_private;
- if (!cl) {
- list_del(&cb_pos->cb_list);
- return -ENODEV;
- }
- switch (cb_pos->major_file_operations) {
- case MEI_CLOSE:
- /* send disconnect message */
- ret = _mei_irq_thread_close(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
-
- break;
- case MEI_READ:
- /* send flow control message */
- ret = _mei_irq_thread_read(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
+ cl = (struct mei_cl *) pos->file_private;
+ if (!cl) {
+ list_del(&pos->cb_list);
+ return -ENODEV;
+ }
+ switch (pos->major_file_operations) {
+ case MEI_CLOSE:
+ /* send disconnect message */
+ ret = _mei_irq_thread_close(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- break;
- case MEI_IOCTL:
- /* connect message */
- if (!mei_other_client_is_connecting(dev,
- cl))
- continue;
- ret = _mei_irq_thread_ioctl(dev, slots,
- cb_pos, cl, cmpl_list);
- if (ret)
- return ret;
+ break;
+ case MEI_READ:
+ /* send flow control message */
+ ret = _mei_irq_thread_read(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- break;
+ break;
+ case MEI_IOCTL:
+ /* connect message */
+ if (mei_other_client_is_connecting(dev, cl))
+ continue;
+ ret = _mei_irq_thread_ioctl(dev, slots, pos, cl, cmpl_list);
+ if (ret)
+ return ret;
- default:
- BUG();
- }
+ break;
+ default:
+ BUG();
}
+
}
/* complete write list CB */
- if (!dev->write_list.status &&
- !list_empty(&dev->write_list.mei_cb.cb_list)) {
- dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->write_list.mei_cb.cb_list, cb_list) {
- cl = (struct mei_cl *)cb_pos->file_private;
-
- if (cl) {
- if (cl != &dev->iamthif_cl) {
- if (!mei_flow_ctrl_creds(dev,
- cl)) {
- dev_dbg(&dev->pdev->dev,
- "No flow control"
- " credentials for client"
- " %d, not sending.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl(dev, slots,
- cb_pos,
- cl, cmpl_list);
- if (ret)
- return ret;
-
- } else if (cl == &dev->iamthif_cl) {
- /* IAMTHIF IOCTL */
- dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
- if (!mei_flow_ctrl_creds(dev,
- cl)) {
- dev_dbg(&dev->pdev->dev,
- "No flow control"
- " credentials for amthi"
- " client %d.\n",
- cl->host_client_id);
- continue;
- }
- ret = _mei_irq_thread_cmpl_iamthif(dev,
- slots,
- cb_pos,
- cl,
- cmpl_list);
- if (ret)
- return ret;
-
- }
+ dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
+ list_for_each_entry_safe(pos, next,
+ &dev->write_list.mei_cb.cb_list, cb_list) {
+ cl = (struct mei_cl *)pos->file_private;
+ if (cl == NULL)
+ continue;
+
+ if (cl != &dev->iamthif_cl) {
+ if (!mei_flow_ctrl_creds(dev, cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "No flow control"
+ " credentials for client"
+ " %d, not sending.\n",
+ cl->host_client_id);
+ continue;
+ }
+ ret = _mei_irq_thread_cmpl(dev, slots,
+ pos,
+ cl, cmpl_list);
+ if (ret)
+ return ret;
+
+ } else if (cl == &dev->iamthif_cl) {
+ /* IAMTHIF IOCTL */
+ dev_dbg(&dev->pdev->dev, "complete amthi write cb.\n");
+ if (!mei_flow_ctrl_creds(dev, cl)) {
+ dev_dbg(&dev->pdev->dev,
+ "No flow control"
+ " credentials for amthi"
+ " client %d.\n",
+ cl->host_client_id);
+ continue;
}
+ ret = _mei_irq_thread_cmpl_iamthif(dev,
+ slots,
+ pos,
+ cl,
+ cmpl_list);
+ if (ret)
+ return ret;
}
+
}
return 0;
}
@@ -1502,18 +1461,13 @@ void mei_timer(struct work_struct *work)
amthi_complete_list = &dev->amthi_read_complete_list.
mei_cb.cb_list;
- if (!list_empty(amthi_complete_list)) {
-
- list_for_each_entry_safe(cb_pos, cb_next,
- amthi_complete_list,
- cb_list) {
+ list_for_each_entry_safe(cb_pos, cb_next, amthi_complete_list, cb_list) {
- cl_pos = cb_pos->file_object->private_data;
+ cl_pos = cb_pos->file_object->private_data;
- /* Finding the AMTHI entry. */
- if (cl_pos == &dev->iamthif_cl)
- list_del(&cb_pos->cb_list);
- }
+ /* Finding the AMTHI entry. */
+ if (cl_pos == &dev->iamthif_cl)
+ list_del(&cb_pos->cb_list);
}
if (dev->iamthif_current_cb)
mei_free_cb_private(dev->iamthif_current_cb);
@@ -1527,8 +1481,8 @@ void mei_timer(struct work_struct *work)
}
}
out:
- schedule_delayed_work(&dev->timer_work, 2 * HZ);
- mutex_unlock(&dev->device_lock);
+ schedule_delayed_work(&dev->timer_work, 2 * HZ);
+ mutex_unlock(&dev->device_lock);
}
/**
@@ -1624,7 +1578,7 @@ end:
wake_up_interruptible(&dev->wait_recvd_msg);
bus_message_received = false;
}
- if (complete_list.status || list_empty(&complete_list.mei_cb.cb_list))
+ if (list_empty(&complete_list.mei_cb.cb_list))
return IRQ_HANDLED;
diff --git a/drivers/staging/mei/iorw.c b/drivers/staging/mei/iorw.c
index 8a61d12..0752ead 100644
--- a/drivers/staging/mei/iorw.c
+++ b/drivers/staging/mei/iorw.c
@@ -228,18 +228,15 @@ struct mei_cl_cb *find_amthi_read_list_entry(
struct file *file)
{
struct mei_cl *cl_temp;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
-
- if (!dev->amthi_read_complete_list.status &&
- !list_empty(&dev->amthi_read_complete_list.mei_cb.cb_list)) {
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
- cl_temp = (struct mei_cl *)cb_pos->file_private;
- if (cl_temp && cl_temp == &dev->iamthif_cl &&
- cb_pos->file_object == file)
- return cb_pos;
- }
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
+
+ list_for_each_entry_safe(pos, next,
+ &dev->amthi_read_complete_list.mei_cb.cb_list, cb_list) {
+ cl_temp = (struct mei_cl *)pos->file_private;
+ if (cl_temp && cl_temp == &dev->iamthif_cl &&
+ pos->file_object == file)
+ return pos;
}
return NULL;
}
@@ -262,7 +259,7 @@ struct mei_cl_cb *find_amthi_read_list_entry(
* negative on failure.
*/
int amthi_read(struct mei_device *dev, struct file *file,
- char __user *ubuf, size_t length, loff_t *offset)
+ char __user *ubuf, size_t length, loff_t *offset)
{
int rets;
int wait_ret;
@@ -334,8 +331,7 @@ int amthi_read(struct mei_device *dev, struct file *file,
}
}
/* if the whole message will fit remove it from the list */
- if (cb->information >= *offset &&
- length >= (cb->information - *offset))
+ if (cb->information >= *offset && length >= (cb->information - *offset))
list_del(&cb->cb_list);
else if (cb->information > 0 && cb->information <= *offset) {
/* end of the message has been reached */
@@ -356,9 +352,7 @@ int amthi_read(struct mei_device *dev, struct file *file,
* the information may be longer */
length = min_t(size_t, length, (cb->information - *offset));
- if (copy_to_user(ubuf,
- cb->response_buffer.data + *offset,
- length))
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length))
rets = -EFAULT;
else {
rets = length;
@@ -427,7 +421,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
cb->response_buffer.size = dev->me_clients[i].props.max_msg_length;
cb->response_buffer.data =
- kmalloc(cb->response_buffer.size, GFP_KERNEL);
+ kmalloc(cb->response_buffer.size, GFP_KERNEL);
if (!cb->response_buffer.data) {
rets = -ENOMEM;
goto unlock;
@@ -448,8 +442,7 @@ int mei_start_read(struct mei_device *dev, struct mei_cl *cl)
&dev->read_list.mei_cb.cb_list);
}
} else {
- list_add_tail(&cb->cb_list,
- &dev->ctrl_wr_list.mei_cb.cb_list);
+ list_add_tail(&cb->cb_list, &dev->ctrl_wr_list.mei_cb.cb_list);
}
return rets;
unlock:
@@ -482,7 +475,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
dev->iamthif_ioctl = true;
dev->iamthif_msg_buf_size = cb->request_buffer.size;
memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
- cb->request_buffer.size);
+ cb->request_buffer.size);
ret = mei_flow_ctrl_creds(dev, &dev->iamthif_cl);
if (ret < 0)
@@ -534,8 +527,7 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
dev_dbg(&dev->pdev->dev, "No flow control credentials, "
"so add iamthif cb to write list.\n");
- list_add_tail(&cb->cb_list,
- &dev->write_list.mei_cb.cb_list);
+ list_add_tail(&cb->cb_list, &dev->write_list.mei_cb.cb_list);
}
return 0;
}
@@ -550,8 +542,8 @@ int amthi_write(struct mei_device *dev, struct mei_cl_cb *cb)
void mei_run_next_iamthif_cmd(struct mei_device *dev)
{
struct mei_cl *cl_tmp;
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
int status;
if (!dev)
@@ -565,25 +557,22 @@ void mei_run_next_iamthif_cmd(struct mei_device *dev)
dev->iamthif_timer = 0;
dev->iamthif_file_object = NULL;
- if (dev->amthi_cmd_list.status == 0 &&
- !list_empty(&dev->amthi_cmd_list.mei_cb.cb_list)) {
- dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
-
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
- list_del(&cb_pos->cb_list);
- cl_tmp = (struct mei_cl *)cb_pos->file_private;
-
- if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
- status = amthi_write(dev, cb_pos);
- if (status) {
- dev_dbg(&dev->pdev->dev,
- "amthi write failed status = %d\n",
- status);
- return;
- }
- break;
+ dev_dbg(&dev->pdev->dev, "complete amthi cmd_list cb.\n");
+
+ list_for_each_entry_safe(pos, next,
+ &dev->amthi_cmd_list.mei_cb.cb_list, cb_list) {
+ list_del(&pos->cb_list);
+ cl_tmp = (struct mei_cl *)pos->file_private;
+
+ if (cl_tmp && cl_tmp == &dev->iamthif_cl) {
+ status = amthi_write(dev, pos);
+ if (status) {
+ dev_dbg(&dev->pdev->dev,
+ "amthi write failed status = %d\n",
+ status);
+ return;
}
+ break;
}
}
}
diff --git a/drivers/staging/mei/main.c b/drivers/staging/mei/main.c
index eb05c36..1e1a9f9 100644
--- a/drivers/staging/mei/main.c
+++ b/drivers/staging/mei/main.c
@@ -33,6 +33,7 @@
#include <linux/compat.h>
#include <linux/jiffies.h>
#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
#include "mei_dev.h"
#include "mei.h"
@@ -51,18 +52,10 @@ static char mei_driver_name[] = MEI_DRIVER_NAME;
static const char mei_driver_string[] = "Intel(R) Management Engine Interface";
static const char mei_driver_version[] = MEI_DRIVER_VERSION;
-/* mei char device for registration */
-static struct cdev mei_cdev;
-
-/* major number for device */
-static int mei_major;
/* The device pointer */
/* Currently this driver works as long as there is only a single AMT device. */
struct pci_dev *mei_device;
-static struct class *mei_class;
-
-
/* mei_pci_tbl - PCI Device ID Table */
static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
@@ -105,173 +98,6 @@ MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
static DEFINE_MUTEX(mei_mutex);
-/**
- * mei_probe - Device Initialization Routine
- *
- * @pdev: PCI device structure
- * @ent: entry in kcs_pci_tbl
- *
- * returns 0 on success, <0 on failure.
- */
-static int __devinit mei_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct mei_device *dev;
- int err;
-
- mutex_lock(&mei_mutex);
- if (mei_device) {
- err = -EEXIST;
- goto end;
- }
- /* enable pci dev */
- err = pci_enable_device(pdev);
- if (err) {
- printk(KERN_ERR "mei: Failed to enable pci device.\n");
- goto end;
- }
- /* set PCI host mastering */
- pci_set_master(pdev);
- /* pci request regions for mei driver */
- err = pci_request_regions(pdev, mei_driver_name);
- if (err) {
- printk(KERN_ERR "mei: Failed to get pci regions.\n");
- goto disable_device;
- }
- /* allocates and initializes the mei dev structure */
- dev = mei_device_init(pdev);
- if (!dev) {
- err = -ENOMEM;
- goto release_regions;
- }
- /* mapping IO device memory */
- dev->mem_addr = pci_iomap(pdev, 0, 0);
- if (!dev->mem_addr) {
- printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
- err = -ENOMEM;
- goto free_device;
- }
- pci_enable_msi(pdev);
-
- /* request and enable interrupt */
- if (pci_dev_msi_enabled(pdev))
- err = request_threaded_irq(pdev->irq,
- NULL,
- mei_interrupt_thread_handler,
- 0, mei_driver_name, dev);
- else
- err = request_threaded_irq(pdev->irq,
- mei_interrupt_quick_handler,
- mei_interrupt_thread_handler,
- IRQF_SHARED, mei_driver_name, dev);
-
- if (err) {
- printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
- pdev->irq);
- goto unmap_memory;
- }
- INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
- if (mei_hw_init(dev)) {
- printk(KERN_ERR "mei: Init hw failure.\n");
- err = -ENODEV;
- goto release_irq;
- }
- mei_device = pdev;
- pci_set_drvdata(pdev, dev);
- schedule_delayed_work(&dev->timer_work, HZ);
-
- mutex_unlock(&mei_mutex);
-
- pr_debug("mei: Driver initialization successful.\n");
-
- return 0;
-
-release_irq:
- /* disable interrupts */
- dev->host_hw_state = mei_hcsr_read(dev);
- mei_disable_interrupts(dev);
- flush_scheduled_work();
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
-unmap_memory:
- pci_iounmap(pdev, dev->mem_addr);
-free_device:
- kfree(dev);
-release_regions:
- pci_release_regions(pdev);
-disable_device:
- pci_disable_device(pdev);
-end:
- mutex_unlock(&mei_mutex);
- printk(KERN_ERR "mei: Driver initialization failed.\n");
- return err;
-}
-
-/**
- * mei_remove - Device Removal Routine
- *
- * @pdev: PCI device structure
- *
- * mei_remove is called by the PCI subsystem to alert the driver
- * that it should release a PCI device.
- */
-static void __devexit mei_remove(struct pci_dev *pdev)
-{
- struct mei_device *dev;
-
- if (mei_device != pdev)
- return;
-
- dev = pci_get_drvdata(pdev);
- if (!dev)
- return;
-
- mutex_lock(&dev->device_lock);
-
- mei_wd_stop(dev, false);
-
- mei_device = NULL;
-
- if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
- dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->iamthif_cl);
- }
- if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
- dev->wd_cl.state = MEI_FILE_DISCONNECTING;
- mei_disconnect_host_client(dev, &dev->wd_cl);
- }
-
- /* Unregistering watchdog device */
- if (dev->wd_interface_reg)
- watchdog_unregister_device(&amt_wd_dev);
-
- /* remove entry if already in list */
- dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
- mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
- mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
-
- dev->iamthif_current_cb = NULL;
- dev->me_clients_num = 0;
-
- mutex_unlock(&dev->device_lock);
-
- flush_scheduled_work();
-
- /* disable interrupts */
- mei_disable_interrupts(dev);
-
- free_irq(pdev->irq, dev);
- pci_disable_msi(pdev);
- pci_set_drvdata(pdev, NULL);
-
- if (dev->mem_addr)
- pci_iounmap(pdev, dev->mem_addr);
-
- kfree(dev);
-
- pci_release_regions(pdev);
- pci_disable_device(pdev);
-}
/**
* mei_clear_list - removes all callbacks associated with file
@@ -372,21 +198,17 @@ static struct mei_cl_cb *find_read_list_entry(
struct mei_device *dev,
struct mei_cl *cl)
{
- struct mei_cl_cb *cb_pos = NULL;
- struct mei_cl_cb *cb_next = NULL;
-
- if (!dev->read_list.status &&
- !list_empty(&dev->read_list.mei_cb.cb_list)) {
+ struct mei_cl_cb *pos = NULL;
+ struct mei_cl_cb *next = NULL;
- dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
- list_for_each_entry_safe(cb_pos, cb_next,
- &dev->read_list.mei_cb.cb_list, cb_list) {
- struct mei_cl *cl_temp;
- cl_temp = (struct mei_cl *)cb_pos->file_private;
+ dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
+ list_for_each_entry_safe(pos, next,
+ &dev->read_list.mei_cb.cb_list, cb_list) {
+ struct mei_cl *cl_temp;
+ cl_temp = (struct mei_cl *)pos->file_private;
- if (mei_cl_cmp_id(cl, cl_temp))
- return cb_pos;
- }
+ if (mei_cl_cmp_id(cl, cl_temp))
+ return pos;
}
return NULL;
}
@@ -402,15 +224,16 @@ static struct mei_cl_cb *find_read_list_entry(
static int mei_open(struct inode *inode, struct file *file)
{
struct mei_cl *cl;
- int if_num = iminor(inode), err;
struct mei_device *dev;
+ unsigned long cl_id;
+ int err;
err = -ENODEV;
if (!mei_device)
goto out;
dev = pci_get_drvdata(mei_device);
- if (if_num != MEI_MINOR_NUMBER || !dev)
+ if (!dev)
goto out;
mutex_lock(&dev->device_lock);
@@ -429,14 +252,16 @@ static int mei_open(struct inode *inode, struct file *file)
if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT)
goto out_unlock;
- cl->host_client_id = find_first_zero_bit(dev->host_clients_map,
- MEI_CLIENTS_MAX);
- if (cl->host_client_id > MEI_CLIENTS_MAX)
+ cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
+ if (cl_id >= MEI_CLIENTS_MAX)
goto out_unlock;
+ cl->host_client_id = cl_id;
+
dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
dev->open_handle_count++;
+
list_add_tail(&cl->link, &dev->file_list);
set_bit(cl->host_client_id, dev->host_clients_map);
@@ -446,7 +271,7 @@ static int mei_open(struct inode *inode, struct file *file)
file->private_data = cl;
mutex_unlock(&dev->device_lock);
- return 0;
+ return nonseekable_open(inode, file);
out_unlock:
mutex_unlock(&dev->device_lock);
@@ -492,8 +317,7 @@ static int mei_release(struct inode *inode, struct file *file)
cl->me_client_id);
if (dev->open_handle_count > 0) {
- clear_bit(cl->host_client_id,
- dev->host_clients_map);
+ clear_bit(cl->host_client_id, dev->host_clients_map);
dev->open_handle_count--;
}
mei_remove_client_from_file_list(dev, cl->host_client_id);
@@ -554,7 +378,7 @@ static int mei_release(struct inode *inode, struct file *file)
* returns >=0 data length on success , <0 on error
*/
static ssize_t mei_read(struct file *file, char __user *ubuf,
- size_t length, loff_t *offset)
+ size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *cb_pos = NULL;
@@ -673,9 +497,7 @@ copy_buffer:
/* information size may be longer */
length = min_t(size_t, length, (cb->information - *offset));
- if (copy_to_user(ubuf,
- cb->response_buffer.data + *offset,
- length)) {
+ if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
rets = -EFAULT;
goto free;
}
@@ -711,7 +533,7 @@ out:
* returns >=0 data length on success , <0 on error
*/
static ssize_t mei_write(struct file *file, const char __user *ubuf,
- size_t length, loff_t *offset)
+ size_t length, loff_t *offset)
{
struct mei_cl *cl = file->private_data;
struct mei_cl_cb *write_cb = NULL;
@@ -762,8 +584,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
cl->read_cb = NULL;
cl->read_pending = 0;
}
- } else if (cl->reading_state == MEI_IDLE &&
- !cl->read_pending)
+ } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
*offset = 0;
@@ -1034,7 +855,7 @@ out:
*/
#ifdef CONFIG_COMPAT
static long mei_compat_ioctl(struct file *file,
- unsigned int cmd, unsigned long data)
+ unsigned int cmd, unsigned long data)
{
return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
}
@@ -1090,6 +911,206 @@ out:
return mask;
}
+/*
+ * file operations structure will be used for mei char device.
+ */
+static const struct file_operations mei_fops = {
+ .owner = THIS_MODULE,
+ .read = mei_read,
+ .unlocked_ioctl = mei_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mei_compat_ioctl,
+#endif
+ .open = mei_open,
+ .release = mei_release,
+ .write = mei_write,
+ .poll = mei_poll,
+ .llseek = no_llseek
+};
+
+
+/*
+ * Misc Device Struct
+ */
+static struct miscdevice mei_misc_device = {
+ .name = MEI_DRIVER_NAME,
+ .fops = &mei_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+/**
+ * mei_probe - Device Initialization Routine
+ *
+ * @pdev: PCI device structure
+ * @ent: entry in kcs_pci_tbl
+ *
+ * returns 0 on success, <0 on failure.
+ */
+static int __devinit mei_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct mei_device *dev;
+ int err;
+
+ mutex_lock(&mei_mutex);
+ if (mei_device) {
+ err = -EEXIST;
+ goto end;
+ }
+ /* enable pci dev */
+ err = pci_enable_device(pdev);
+ if (err) {
+ printk(KERN_ERR "mei: Failed to enable pci device.\n");
+ goto end;
+ }
+ /* set PCI host mastering */
+ pci_set_master(pdev);
+ /* pci request regions for mei driver */
+ err = pci_request_regions(pdev, mei_driver_name);
+ if (err) {
+ printk(KERN_ERR "mei: Failed to get pci regions.\n");
+ goto disable_device;
+ }
+ /* allocates and initializes the mei dev structure */
+ dev = mei_device_init(pdev);
+ if (!dev) {
+ err = -ENOMEM;
+ goto release_regions;
+ }
+ /* mapping IO device memory */
+ dev->mem_addr = pci_iomap(pdev, 0, 0);
+ if (!dev->mem_addr) {
+ printk(KERN_ERR "mei: mapping I/O device memory failure.\n");
+ err = -ENOMEM;
+ goto free_device;
+ }
+ pci_enable_msi(pdev);
+
+ /* request and enable interrupt */
+ if (pci_dev_msi_enabled(pdev))
+ err = request_threaded_irq(pdev->irq,
+ NULL,
+ mei_interrupt_thread_handler,
+ 0, mei_driver_name, dev);
+ else
+ err = request_threaded_irq(pdev->irq,
+ mei_interrupt_quick_handler,
+ mei_interrupt_thread_handler,
+ IRQF_SHARED, mei_driver_name, dev);
+
+ if (err) {
+ printk(KERN_ERR "mei: request_threaded_irq failure. irq = %d\n",
+ pdev->irq);
+ goto unmap_memory;
+ }
+ INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
+ if (mei_hw_init(dev)) {
+ printk(KERN_ERR "mei: Init hw failure.\n");
+ err = -ENODEV;
+ goto release_irq;
+ }
+
+ err = misc_register(&mei_misc_device);
+ if (err)
+ goto release_irq;
+
+ mei_device = pdev;
+ pci_set_drvdata(pdev, dev);
+
+
+ schedule_delayed_work(&dev->timer_work, HZ);
+
+ mutex_unlock(&mei_mutex);
+
+ pr_debug("mei: Driver initialization successful.\n");
+
+ return 0;
+
+release_irq:
+ /* disable interrupts */
+ dev->host_hw_state = mei_hcsr_read(dev);
+ mei_disable_interrupts(dev);
+ flush_scheduled_work();
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+unmap_memory:
+ pci_iounmap(pdev, dev->mem_addr);
+free_device:
+ kfree(dev);
+release_regions:
+ pci_release_regions(pdev);
+disable_device:
+ pci_disable_device(pdev);
+end:
+ mutex_unlock(&mei_mutex);
+ printk(KERN_ERR "mei: Driver initialization failed.\n");
+ return err;
+}
+
+/**
+ * mei_remove - Device Removal Routine
+ *
+ * @pdev: PCI device structure
+ *
+ * mei_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.
+ */
+static void __devexit mei_remove(struct pci_dev *pdev)
+{
+ struct mei_device *dev;
+
+ if (mei_device != pdev)
+ return;
+
+ dev = pci_get_drvdata(pdev);
+ if (!dev)
+ return;
+
+ mutex_lock(&dev->device_lock);
+
+ mei_wd_stop(dev, false);
+
+ mei_device = NULL;
+
+ if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
+ dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
+ mei_disconnect_host_client(dev, &dev->iamthif_cl);
+ }
+ if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
+ dev->wd_cl.state = MEI_FILE_DISCONNECTING;
+ mei_disconnect_host_client(dev, &dev->wd_cl);
+ }
+
+ /* Unregistering watchdog device */
+ mei_watchdog_unregister(dev);
+
+ /* remove entry if already in list */
+ dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
+ mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
+ mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
+
+ dev->iamthif_current_cb = NULL;
+ dev->me_clients_num = 0;
+
+ mutex_unlock(&dev->device_lock);
+
+ flush_scheduled_work();
+
+ /* disable interrupts */
+ mei_disable_interrupts(dev);
+
+ free_irq(pdev->irq, dev);
+ pci_disable_msi(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (dev->mem_addr)
+ pci_iounmap(pdev, dev->mem_addr);
+
+ kfree(dev);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
#ifdef CONFIG_PM
static int mei_pci_suspend(struct device *device)
{
@@ -1173,131 +1194,6 @@ static struct pci_driver mei_driver = {
.driver.pm = MEI_PM_OPS,
};
-/*
- * file operations structure will be used for mei char device.
- */
-static const struct file_operations mei_fops = {
- .owner = THIS_MODULE,
- .read = mei_read,
- .unlocked_ioctl = mei_ioctl,
-#ifdef CONFIG_COMPAT
- .compat_ioctl = mei_compat_ioctl,
-#endif
- .open = mei_open,
- .release = mei_release,
- .write = mei_write,
- .poll = mei_poll,
-};
-
-/**
- * mei_registration_cdev - sets up the cdev structure for mei device.
- *
- * @dev: char device struct
- * @hminor: minor number for registration char device
- * @fops: file operations structure
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_registration_cdev(struct cdev *dev, int hminor,
- const struct file_operations *fops)
-{
- int ret, devno = MKDEV(mei_major, hminor);
-
- cdev_init(dev, fops);
- dev->owner = THIS_MODULE;
- ret = cdev_add(dev, devno, 1);
- /* Fail gracefully if need be */
- if (ret)
- printk(KERN_ERR "mei: Error %d registering mei device %d\n",
- ret, hminor);
- return ret;
-}
-
-/**
- * mei_register_cdev - registers mei char device
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_register_cdev(void)
-{
- int ret;
- dev_t dev;
-
- /* registration of char devices */
- ret = alloc_chrdev_region(&dev, MEI_MINORS_BASE, MEI_MINORS_COUNT,
- MEI_DRIVER_NAME);
- if (ret) {
- printk(KERN_ERR "mei: Error allocating char device region.\n");
- return ret;
- }
-
- mei_major = MAJOR(dev);
-
- ret = mei_registration_cdev(&mei_cdev, MEI_MINOR_NUMBER,
- &mei_fops);
- if (ret)
- unregister_chrdev_region(MKDEV(mei_major, MEI_MINORS_BASE),
- MEI_MINORS_COUNT);
-
- return ret;
-}
-
-/**
- * mei_unregister_cdev - unregisters mei char device
- */
-static void mei_unregister_cdev(void)
-{
- cdev_del(&mei_cdev);
- unregister_chrdev_region(MKDEV(mei_major, MEI_MINORS_BASE),
- MEI_MINORS_COUNT);
-}
-
-/**
- * mei_sysfs_device_create - adds device entry to sysfs
- *
- * returns 0 on success, <0 on failure.
- */
-static int mei_sysfs_device_create(void)
-{
- struct class *class;
- void *tmphdev;
- int err;
-
- class = class_create(THIS_MODULE, MEI_DRIVER_NAME);
- if (IS_ERR(class)) {
- err = PTR_ERR(class);
- printk(KERN_ERR "mei: Error creating mei class.\n");
- goto err_out;
- }
-
- tmphdev = device_create(class, NULL, mei_cdev.dev, NULL,
- MEI_DEV_NAME);
- if (IS_ERR(tmphdev)) {
- err = PTR_ERR(tmphdev);
- goto err_destroy;
- }
-
- mei_class = class;
- return 0;
-
-err_destroy:
- class_destroy(class);
-err_out:
- return err;
-}
-
-/**
- * mei_sysfs_device_remove - unregisters the device entry on sysfs
- */
-static void mei_sysfs_device_remove(void)
-{
- if (IS_ERR_OR_NULL(mei_class))
- return;
-
- device_destroy(mei_class, mei_cdev.dev);
- class_destroy(mei_class);
-}
-
/**
* mei_init_module - Driver Registration Routine
*
@@ -1314,26 +1210,9 @@ static int __init mei_init_module(void)
mei_driver_string, mei_driver_version);
/* init pci module */
ret = pci_register_driver(&mei_driver);
- if (ret < 0) {
+ if (ret < 0)
printk(KERN_ERR "mei: Error registering driver.\n");
- goto end;
- }
- ret = mei_register_cdev();
- if (ret)
- goto unregister_pci;
-
- ret = mei_sysfs_device_create();
- if (ret)
- goto unregister_cdev;
-
- return ret;
-
-unregister_cdev:
- mei_unregister_cdev();
-unregister_pci:
- pci_unregister_driver(&mei_driver);
-end:
return ret;
}
@@ -1347,8 +1226,7 @@ module_init(mei_init_module);
*/
static void __exit mei_exit_module(void)
{
- mei_sysfs_device_remove();
- mei_unregister_cdev();
+ misc_deregister(&mei_misc_device);
pci_unregister_driver(&mei_driver);
pr_debug("mei: Driver unloaded successfully.\n");
diff --git a/drivers/staging/mei/mei.txt b/drivers/staging/mei/mei.txt
index 17302ad..516bfe7 100644
--- a/drivers/staging/mei/mei.txt
+++ b/drivers/staging/mei/mei.txt
@@ -1,78 +1,74 @@
-Intel MEI
+Intel(R) Management Engine Interface (Intel(R) MEI)
=======================
Introduction
=======================
-The Intel Management Engine (Intel ME) is an isolated and
-protected computing resource (Coprocessor) residing inside
-Intel chipsets. The Intel ME provides support for computer/IT
-management features.
-The Feature set depends on the Intel chipset SKU.
+The Intel Management Engine (Intel ME) is an isolated andprotected computing
+resource (Co-processor) residing inside certain Intel chipsets. The Intel ME
+provides support for computer/IT management features. The feature set
+depends on the Intel chipset SKU.
-The Intel Management Engine Interface (Intel MEI, previously known
-as HECI) is the interface between the Host and Intel ME.
-This interface is exposed to the host as a PCI device.
-The Intel MEI Driver is in charge of the communication channel
-between a host application and the ME feature.
+The Intel Management Engine Interface (Intel MEI, previously known as HECI)
+is the interface between the Host and Intel ME. This interface is exposed
+to the host as a PCI device. The Intel MEI Driver is in charge of the
+communication channel between a host application and the Intel ME feature.
-Each Intel ME feature (Intel ME Client) is addressed by
-GUID/UUID and each feature defines its own protocol.
-The protocol is message-based with a header and payload up to
-512 bytes.
+Each Intel ME feature (Intel ME Client) is addressed by a GUID/UUID and
+each client has its own protocol. The protocol is message-based with a
+header and payload up to 512 bytes.
-[place holder to URL to protocol definitions]
-
-Prominent usage of the Interface is to communicate with
-Intel Active Management Technology (Intel AMT)
-implemented in firmware running on the Intel ME.
+Prominent usage of the Intel ME Interface is to communicate with Intel(R)
+Active Management Technology (Intel AMT)implemented in firmware running on
+the Intel ME.
Intel AMT provides the ability to manage a host remotely out-of-band (OOB)
-even when the host processor has crashed or is in a sleep state.
+even when the operating system running on the host processor has crashed or
+is in a sleep state.
Some examples of Intel AMT usage are:
- Monitoring hardware state and platform components
- - Remote power off/on (useful for green computing or overnight IT maintenance)
+ - Remote power off/on (useful for green computing or overnight IT
+ maintenance)
- OS updates
- Storage of useful platform information such as software assets
- - built-in hardware KVM
- - selective network isolation of Ethernet and IP protocol flows based on
- policies set by a remote management console
+ - Built-in hardware KVM
+ - Selective network isolation of Ethernet and IP protocol flows based
+ on policies set by a remote management console
- IDE device redirection from remote management console
Intel AMT (OOB) communication is based on SOAP (deprecated
-starting with Release 6.0) over HTTP/HTTPS or WS-Management protocol
-over HTTP and HTTPS that are received from a remote
-management console application.
+starting with Release 6.0) over HTTP/S or WS-Management protocol over
+HTTP/S that are received from a remote management console application.
For more information about Intel AMT:
-http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/aboutintelamt.htm
-
+http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
-MEI Driver
+Intel MEI Driver
=======================
-The driver exposes a character device called /dev/mei.
+The driver exposes a misc device called /dev/mei.
-An application maintains communication with an ME feature while
-/dev/mei is open. The binding to a specific features is performed
-by calling MEI_CONNECT_CLIENT_IOCTL, which passes the desired UUID.
-The number of instances of an ME feature that can be opened
-at the same time depends on the ME feature, but most of the
+An application maintains communication with an Intel ME feature while
+/dev/mei is open. The binding to a specific features is performed by calling
+MEI_CONNECT_CLIENT_IOCTL, which passes the desired UUID.
+The number of instances of an Intel ME feature that can be opened
+at the same time depends on the Intel ME feature, but most of the
features allow only a single instance.
-
-The Intel AMT Host Interface (AMTHI) feature requires multiple
-simultaneous user applications, therefore the MEI driver handles
+The Intel AMT Host Interface (Intel AMTHI) feature supports multiple
+simultaneous user applications. Therefore, the Intel MEI driver handles
this internally by maintaining request queues for the applications.
-The driver is oblivious to data that are passed between
+The driver is oblivious to data that is passed between firmware feature
+and host application.
-Because some of the ME features can change the system
-configuration, the driver by default allows only privileged
+Because some of the Intel ME features can change the system
+configuration, the driver by default allows only a privileged
user to access it.
-A Code snippet for application communicating with AMTHI client:
+A code snippet for an application communicating with
+Intel AMTHI client:
struct mei_connect_client_data data;
fd = open(MEI_DEVICE);
@@ -80,7 +76,7 @@ A Code snippet for application communicating with AMTHI client:
ioctl(fd, IOCTL_MEI_CONNECT_CLIENT, &data);
- printf(“Ver=%d, MaxLen=%ld\n”,
+ printf("Ver=%d, MaxLen=%ld\n",
data.d.in_client_uuid.protocol_version,
data.d.in_client_uuid.max_msg_length);
@@ -95,76 +91,106 @@ A Code snippet for application communicating with AMTHI client:
[...]
close(fd);
-ME Applications:
+IOCTL:
+======
+The Intel MEI Driver supports the following IOCTL command:
+ IOCTL_MEI_CONNECT_CLIENT Connect to firmware Feature (client).
+
+ usage:
+ struct mei_connect_client_data clientData;
+ ioctl(fd, IOCTL_MEI_CONNECT_CLIENT, &clientData);
+
+ inputs:
+ mei_connect_client_data struct contain the following
+ input field:
+
+ in_client_uuid - UUID of the FW Feature that needs
+ to connect to.
+ outputs:
+ out_client_properties - Client Properties: MTU and Protocol Version.
+
+ error returns:
+ EINVAL Wrong IOCTL Number
+ ENODEV Device or Connection is not initialized or ready.
+ (e.g. Wrong UUID)
+ ENOMEM Unable to allocate memory to client internal data.
+ EFAULT Fatal Error (e.g. Unable to access user input data)
+ EBUSY Connection Already Open
+
+ Notes:
+ max_msg_length (MTU) in client properties describes the maximum
+ data that can be sent or received. (e.g. if MTU=2K, can send
+ requests up to bytes 2k and received responses upto 2k bytes).
+
+Intel ME Applications:
==============
1) Intel Local Management Service (Intel LMS)
- Applications running locally on the platform communicate with
- Intel AMT Release 2.0 and later releases in the same way
- that network applications do via SOAP over HTTP (deprecated
- starting with Release 6.0) or with WS-Management over SOAP over
- HTTP. which means that some Intel AMT feature can be access
- from a local application using same Network interface as for
- remote application.
-
- When a local application sends a message addressed to the local
- Intel AMT host name, the Local Manageability Service (LMS),
- which listens for traffic directed to the host name, intercepts
- the message and routes it to the Intel Management Engine Interface.
+
+ Applications running locally on the platform communicate with Intel AMT Release
+ 2.0 and later releases in the same way that network applications do via SOAP
+ over HTTP (deprecated starting with Release 6.0) or with WS-Management over
+ SOAP over HTTP. This means that some Intel AMT features can be accessed from a
+ local application using the same network interface as a remote application
+ communicating with Intel AMT over the network.
+
+ When a local application sends a message addressed to the local Intel AMT host
+ name, the Intel LMS, which listens for traffic directed to the host name,
+ intercepts the message and routes it to the Intel MEI.
For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_
- Reference_Guide/WordDocuments/localaccess1.htm
-
- The LMS opens a connection using the MEI driver to the LMS
- FW feature using a defined UUID and then communicates with the
- feature using a protocol
- called Intel(R) AMT Port Forwarding Protocol (APF protocol).
- The protocol is used to maintain multiple sessions with
- Intel AMT from a single application.
- See the protocol specification in
- the Intel(R) AMT Implementation and Reference Guide
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/HTMLDocuments/MPSDocuments/Intel%20AMT%20Port%20Forwarding%20Protocol%20Reference%20Manual.pdf
-
- 2) Intel AMT Remote configuration using a Local Agent:
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "About Intel AMT" => "Local Access"
+
+ For downloading Intel LMS:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
+
+ The Intel LMS opens a connection using the Intel MEI driver to the Intel LMS
+ firmware feature using a defined UUID and then communicates with the feature
+ using a protocol called Intel AMT Port Forwarding Protocol(Intel APF protocol).
+ The protocol is used to maintain multiple sessions with Intel AMT from a
+ single application.
+
+ See the protocol specification in the Intel AMT Software Development Kit(SDK)
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "SDK Resources" => "Intel(R) vPro(TM) Gateway(MPS)"
+ => "Information for Intel(R) vPro(TM) Gateway Developers"
+ => "Description of the Intel AMT Port Forwarding (APF)Protocol"
+
+ 2) Intel AMT Remote configuration using a Local Agent
A Local Agent enables IT personnel to configure Intel AMT out-of-the-box
- without requiring installing additional data to enable setup.
- The remote configuration process may involve an ISV-developed remote
- configuration agent that runs on the host.
+ without requiring installing additional data to enable setup. The remote
+ configuration process may involve an ISV-developed remote configuration
+ agent that runs on the host.
For more information:
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/remoteconfigurationwithalocalagent.htm
+ http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide
+ Under "Setup and Configuration of Intel AMT" =>
+ "SDK Tools Supporting Setup and Configuration" =>
+ "Using the Local Agent Sample"
+
+ An open source Intel AMT configuration utility, implementing a local agent
+ that accesses the Intel MEI driver, can be found here:
+ http://software.intel.com/en-us/articles/download-the-latest-intel-amt-open-source-drivers/
- How the Local Agent Works (including Command structs):
- http://software.intel.com/sites/manageability/AMT_Implementation_and_Reference_Guide/WordDocuments/howthelocalagentsampleworks.htm
Intel AMT OS Health Watchdog:
=============================
The Intel AMT Watchdog is an OS Health (Hang/Crash) watchdog.
Whenever the OS hangs or crashes, Intel AMT will send an event
-to whoever subscribed to this event. This mechanism means that
-IT knows when a platform crashes even when there is a hard failure
-on the host.
-The AMT Watchdog is composed of two parts:
- 1) FW Feature - that receives the heartbeats
- and sends an event when the heartbeats stop.
- 2) MEI driver – connects to the watchdog (WD) feature,
- configures the watchdog and sends the heartbeats.
-
-The MEI driver configures the Watchdog to expire by default
-every 120sec unless set by the user using module parameters.
-The Driver then sends heartbeats every 2sec.
+to any subsciber to this event. This mechanism means that
+IT knows when a platform crashes even when there is a hard failureon the host.
-If WD feature does not exist (i.e. the connection failed),
-the MEI driver will disable the sending of heartbeats.
+The Intel AMT Watchdog is composed of two parts:
+ 1) Firmware feature - receives the heartbeats
+ and sends an event when the heartbeats stop.
+ 2) Intel MEI driver - connects to the watchdog feature, configures the
+ watchdog and sends the heartbeats.
-Module Parameters
-=================
-watchdog_timeout - the user can use this module parameter
-to change the watchdog timeout setting.
+The Intel MEI driver uses the kernel watchdog to configure the Intel AMT
+Watchdog and to send heartbeats to it. The default timeout of the
+watchdog is 120 seconds.
-This value sets the Intel AMT watchdog timeout interval in seconds;
-the default value is 120sec.
-in order to disable the watchdog activites set the value to 0.
-Normal values should be between 120 and 65535
+If the Intel AMT Watchdog feature does not exist (i.e. the connection failed),
+the Intel MEI driver will disable the sending of heartbeats.
Supported Chipsets:
==================
diff --git a/drivers/staging/mei/mei_dev.h b/drivers/staging/mei/mei_dev.h
index af4b1af..82bacfc 100644
--- a/drivers/staging/mei/mei_dev.h
+++ b/drivers/staging/mei/mei_dev.h
@@ -23,13 +23,6 @@
#include "hw.h"
/*
- * MEI Char Driver Minors
- */
-#define MEI_MINORS_BASE 1
-#define MEI_MINORS_COUNT 1
-#define MEI_MINOR_NUMBER 1
-
-/*
* watch dog definition
*/
#define MEI_WATCHDOG_DATA_SIZE 16
@@ -42,11 +35,6 @@
*/
extern struct pci_dev *mei_device;
-/*
- * AMT Watchdog Device
- */
-#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
-extern struct watchdog_device amt_wd_dev;
/*
* AMTHI Client UUID
@@ -175,7 +163,6 @@ struct mei_cl {
struct mei_io_list {
struct mei_cl_cb mei_cb;
- int status;
};
/* MEI private device struct */
diff --git a/drivers/staging/mei/wd.c b/drivers/staging/mei/wd.c
index ffca7ca..8094941 100644
--- a/drivers/staging/mei/wd.c
+++ b/drivers/staging/mei/wd.c
@@ -35,12 +35,16 @@ const u8 mei_wd_state_independence_msg[3][4] = {
{0x07, 0x02, 0x01, 0x10}
};
+/*
+ * AMT Watchdog Device
+ */
+#define INTEL_AMT_WATCHDOG_ID "INTCAMT"
+
/* UUIDs for AMT F/W clients */
const uuid_le mei_wd_guid = UUID_LE(0x05B79A6F, 0x4628, 0x4D7F, 0x89,
0x9D, 0xA9, 0x15, 0x14, 0xCB,
0x32, 0xAB);
-
void mei_wd_set_start_timeout(struct mei_device *dev, u16 timeout)
{
dev_dbg(&dev->pdev->dev, "timeout=%d.\n", timeout);
@@ -331,14 +335,14 @@ static int mei_wd_ops_set_timeout(struct watchdog_device *wd_dev, unsigned int t
/*
* Watchdog Device structs
*/
-const struct watchdog_ops wd_ops = {
+static const struct watchdog_ops wd_ops = {
.owner = THIS_MODULE,
.start = mei_wd_ops_start,
.stop = mei_wd_ops_stop,
.ping = mei_wd_ops_ping,
.set_timeout = mei_wd_ops_set_timeout,
};
-const struct watchdog_info wd_info = {
+static const struct watchdog_info wd_info = {
.identity = INTEL_AMT_WATCHDOG_ID,
.options = WDIOF_KEEPALIVEPING,
};
@@ -352,3 +356,25 @@ struct watchdog_device amt_wd_dev = {
};
+void mei_watchdog_register(struct mei_device *dev)
+{
+ dev_dbg(&dev->pdev->dev, "dev->wd_timeout =%d.\n", dev->wd_timeout);
+
+ dev->wd_due_counter = !!dev->wd_timeout;
+
+ if (watchdog_register_device(&amt_wd_dev)) {
+ dev_err(&dev->pdev->dev, "unable to register watchdog device.\n");
+ dev->wd_interface_reg = false;
+ } else {
+ dev_dbg(&dev->pdev->dev, "successfully register watchdog interface.\n");
+ dev->wd_interface_reg = true;
+ }
+}
+
+void mei_watchdog_unregister(struct mei_device *dev)
+{
+ if (dev->wd_interface_reg)
+ watchdog_unregister_device(&amt_wd_dev);
+ dev->wd_interface_reg = false;
+}
+
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index e06b867..fafdfa2 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -27,6 +27,8 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
#include <linux/list.h>
#include <linux/mfd/core.h>
#include <linux/mutex.h>
@@ -727,8 +729,24 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, nvec);
nvec->dev = &pdev->dev;
- nvec->gpio = pdata->gpio;
- nvec->i2c_addr = pdata->i2c_addr;
+
+ if (pdata) {
+ nvec->gpio = pdata->gpio;
+ nvec->i2c_addr = pdata->i2c_addr;
+ } else if (nvec->dev->of_node) {
+ nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
+ if (nvec->gpio < 0) {
+ dev_err(&pdev->dev, "no gpio specified");
+ goto failed;
+ }
+ if (of_property_read_u32(nvec->dev->of_node, "slave-addr", &nvec->i2c_addr)) {
+ dev_err(&pdev->dev, "no i2c address specified");
+ goto failed;
+ }
+ } else {
+ dev_err(&pdev->dev, "no platform data\n");
+ goto failed;
+ }
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -893,6 +911,13 @@ static int tegra_nvec_resume(struct platform_device *pdev)
#define tegra_nvec_resume NULL
#endif
+/* Match table for of_platform binding */
+static const struct of_device_id nvidia_nvec_of_match[] __devinitconst = {
+ { .compatible = "nvidia,nvec", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
+
static struct platform_driver nvec_device_driver = {
.probe = tegra_nvec_probe,
.remove = __devexit_p(tegra_nvec_remove),
@@ -901,6 +926,7 @@ static struct platform_driver nvec_device_driver = {
.driver = {
.name = "nvec",
.owner = THIS_MODULE,
+ .of_match_table = nvidia_nvec_of_match,
}
};
diff --git a/drivers/staging/octeon/Makefile b/drivers/staging/octeon/Makefile
index fc850ba..9012dee 100644
--- a/drivers/staging/octeon/Makefile
+++ b/drivers/staging/octeon/Makefile
@@ -20,9 +20,4 @@ octeon-ethernet-y += ethernet-sgmii.o
octeon-ethernet-y += ethernet-spi.o
octeon-ethernet-y += ethernet-tx.o
octeon-ethernet-y += ethernet-xaui.o
-octeon-ethernet-y += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \
- cvmx-helper-board.o cvmx-helper.o cvmx-helper-xaui.o \
- cvmx-helper-rgmii.o cvmx-helper-sgmii.o cvmx-helper-npi.o \
- cvmx-helper-loop.o cvmx-helper-spi.o cvmx-helper-util.o \
- cvmx-interrupt-decodes.o cvmx-interrupt-rsl.o
diff --git a/drivers/staging/octeon/cvmx-address.h b/drivers/staging/octeon/cvmx-address.h
deleted file mode 100644
index 3c74d82..0000000
--- a/drivers/staging/octeon/cvmx-address.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2009 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * Typedefs and defines for working with Octeon physical addresses.
- *
- */
-#ifndef __CVMX_ADDRESS_H__
-#define __CVMX_ADDRESS_H__
-
-#if 0
-typedef enum {
- CVMX_MIPS_SPACE_XKSEG = 3LL,
- CVMX_MIPS_SPACE_XKPHYS = 2LL,
- CVMX_MIPS_SPACE_XSSEG = 1LL,
- CVMX_MIPS_SPACE_XUSEG = 0LL
-} cvmx_mips_space_t;
-#endif
-
-typedef enum {
- CVMX_MIPS_XKSEG_SPACE_KSEG0 = 0LL,
- CVMX_MIPS_XKSEG_SPACE_KSEG1 = 1LL,
- CVMX_MIPS_XKSEG_SPACE_SSEG = 2LL,
- CVMX_MIPS_XKSEG_SPACE_KSEG3 = 3LL
-} cvmx_mips_xkseg_space_t;
-
-/* decodes <14:13> of a kseg3 window address */
-typedef enum {
- CVMX_ADD_WIN_SCR = 0L,
- /* see cvmx_add_win_dma_dec_t for further decode */
- CVMX_ADD_WIN_DMA = 1L,
- CVMX_ADD_WIN_UNUSED = 2L,
- CVMX_ADD_WIN_UNUSED2 = 3L
-} cvmx_add_win_dec_t;
-
-/* decode within DMA space */
-typedef enum {
- /*
- * Add store data to the write buffer entry, allocating it if
- * necessary.
- */
- CVMX_ADD_WIN_DMA_ADD = 0L,
- /* send out the write buffer entry to DRAM */
- CVMX_ADD_WIN_DMA_SENDMEM = 1L,
- /* store data must be normal DRAM memory space address in this case */
- /* send out the write buffer entry as an IOBDMA command */
- CVMX_ADD_WIN_DMA_SENDDMA = 2L,
- /* see CVMX_ADD_WIN_DMA_SEND_DEC for data contents */
- /* send out the write buffer entry as an IO write */
- CVMX_ADD_WIN_DMA_SENDIO = 3L,
- /* store data must be normal IO space address in this case */
- /* send out a single-tick command on the NCB bus */
- CVMX_ADD_WIN_DMA_SENDSINGLE = 4L,
- /* no write buffer data needed/used */
-} cvmx_add_win_dma_dec_t;
-
-/*
- * Physical Address Decode
- *
- * Octeon-I HW never interprets this X (<39:36> reserved
- * for future expansion), software should set to 0.
- *
- * - 0x0 XXX0 0000 0000 to DRAM Cached
- * - 0x0 XXX0 0FFF FFFF
- *
- * - 0x0 XXX0 1000 0000 to Boot Bus Uncached (Converted to 0x1 00X0 1000 0000
- * - 0x0 XXX0 1FFF FFFF + EJTAG to 0x1 00X0 1FFF FFFF)
- *
- * - 0x0 XXX0 2000 0000 to DRAM Cached
- * - 0x0 XXXF FFFF FFFF
- *
- * - 0x1 00X0 0000 0000 to Boot Bus Uncached
- * - 0x1 00XF FFFF FFFF
- *
- * - 0x1 01X0 0000 0000 to Other NCB Uncached
- * - 0x1 FFXF FFFF FFFF devices
- *
- * Decode of all Octeon addresses
- */
-typedef union {
-
- uint64_t u64;
- /* mapped or unmapped virtual address */
- struct {
- uint64_t R:2;
- uint64_t offset:62;
- } sva;
-
- /* mapped USEG virtual addresses (typically) */
- struct {
- uint64_t zeroes:33;
- uint64_t offset:31;
- } suseg;
-
- /* mapped or unmapped virtual address */
- struct {
- uint64_t ones:33;
- uint64_t sp:2;
- uint64_t offset:29;
- } sxkseg;
-
- /*
- * physical address accessed through xkphys unmapped virtual
- * address.
- */
- struct {
- uint64_t R:2; /* CVMX_MIPS_SPACE_XKPHYS in this case */
- uint64_t cca:3; /* ignored by octeon */
- uint64_t mbz:10;
- uint64_t pa:49; /* physical address */
- } sxkphys;
-
- /* physical address */
- struct {
- uint64_t mbz:15;
- /* if set, the address is uncached and resides on MCB bus */
- uint64_t is_io:1;
- /*
- * the hardware ignores this field when is_io==0, else
- * device ID.
- */
- uint64_t did:8;
- /* the hardware ignores <39:36> in Octeon I */
- uint64_t unaddr:4;
- uint64_t offset:36;
- } sphys;
-
- /* physical mem address */
- struct {
- /* techically, <47:40> are dont-cares */
- uint64_t zeroes:24;
- /* the hardware ignores <39:36> in Octeon I */
- uint64_t unaddr:4;
- uint64_t offset:36;
- } smem;
-
- /* physical IO address */
- struct {
- uint64_t mem_region:2;
- uint64_t mbz:13;
- /* 1 in this case */
- uint64_t is_io:1;
- /*
- * The hardware ignores this field when is_io==0, else
- * device ID.
- */
- uint64_t did:8;
- /* the hardware ignores <39:36> in Octeon I */
- uint64_t unaddr:4;
- uint64_t offset:36;
- } sio;
-
- /*
- * Scratchpad virtual address - accessed through a window at
- * the end of kseg3
- */
- struct {
- uint64_t ones:49;
- /* CVMX_ADD_WIN_SCR (0) in this case */
- cvmx_add_win_dec_t csrdec:2;
- uint64_t addr:13;
- } sscr;
-
- /* there should only be stores to IOBDMA space, no loads */
- /*
- * IOBDMA virtual address - accessed through a window at the
- * end of kseg3
- */
- struct {
- uint64_t ones:49;
- uint64_t csrdec:2; /* CVMX_ADD_WIN_DMA (1) in this case */
- uint64_t unused2:3;
- uint64_t type:3;
- uint64_t addr:7;
- } sdma;
-
- struct {
- uint64_t didspace:24;
- uint64_t unused:40;
- } sfilldidspace;
-
-} cvmx_addr_t;
-
-/* These macros for used by 32 bit applications */
-
-#define CVMX_MIPS32_SPACE_KSEG0 1l
-#define CVMX_ADD_SEG32(segment, add) \
- (((int32_t)segment << 31) | (int32_t)(add))
-
-/*
- * Currently all IOs are performed using XKPHYS addressing. Linux uses
- * the CvmMemCtl register to enable XKPHYS addressing to IO space from
- * user mode. Future OSes may need to change the upper bits of IO
- * addresses. The following define controls the upper two bits for all
- * IO addresses generated by the simple executive library.
- */
-#define CVMX_IO_SEG CVMX_MIPS_SPACE_XKPHYS
-
-/* These macros simplify the process of creating common IO addresses */
-#define CVMX_ADD_SEG(segment, add) ((((uint64_t)segment) << 62) | (add))
-#ifndef CVMX_ADD_IO_SEG
-#define CVMX_ADD_IO_SEG(add) CVMX_ADD_SEG(CVMX_IO_SEG, (add))
-#endif
-#define CVMX_ADDR_DIDSPACE(did) (((CVMX_IO_SEG) << 22) | ((1ULL) << 8) | (did))
-#define CVMX_ADDR_DID(did) (CVMX_ADDR_DIDSPACE(did) << 40)
-#define CVMX_FULL_DID(did, subdid) (((did) << 3) | (subdid))
-
- /* from include/ncb_rsl_id.v */
-#define CVMX_OCT_DID_MIS 0ULL /* misc stuff */
-#define CVMX_OCT_DID_GMX0 1ULL
-#define CVMX_OCT_DID_GMX1 2ULL
-#define CVMX_OCT_DID_PCI 3ULL
-#define CVMX_OCT_DID_KEY 4ULL
-#define CVMX_OCT_DID_FPA 5ULL
-#define CVMX_OCT_DID_DFA 6ULL
-#define CVMX_OCT_DID_ZIP 7ULL
-#define CVMX_OCT_DID_RNG 8ULL
-#define CVMX_OCT_DID_IPD 9ULL
-#define CVMX_OCT_DID_PKT 10ULL
-#define CVMX_OCT_DID_TIM 11ULL
-#define CVMX_OCT_DID_TAG 12ULL
- /* the rest are not on the IO bus */
-#define CVMX_OCT_DID_L2C 16ULL
-#define CVMX_OCT_DID_LMC 17ULL
-#define CVMX_OCT_DID_SPX0 18ULL
-#define CVMX_OCT_DID_SPX1 19ULL
-#define CVMX_OCT_DID_PIP 20ULL
-#define CVMX_OCT_DID_ASX0 22ULL
-#define CVMX_OCT_DID_ASX1 23ULL
-#define CVMX_OCT_DID_IOB 30ULL
-
-#define CVMX_OCT_DID_PKT_SEND CVMX_FULL_DID(CVMX_OCT_DID_PKT, 2ULL)
-#define CVMX_OCT_DID_TAG_SWTAG CVMX_FULL_DID(CVMX_OCT_DID_TAG, 0ULL)
-#define CVMX_OCT_DID_TAG_TAG1 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 1ULL)
-#define CVMX_OCT_DID_TAG_TAG2 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 2ULL)
-#define CVMX_OCT_DID_TAG_TAG3 CVMX_FULL_DID(CVMX_OCT_DID_TAG, 3ULL)
-#define CVMX_OCT_DID_TAG_NULL_RD CVMX_FULL_DID(CVMX_OCT_DID_TAG, 4ULL)
-#define CVMX_OCT_DID_TAG_CSR CVMX_FULL_DID(CVMX_OCT_DID_TAG, 7ULL)
-#define CVMX_OCT_DID_FAU_FAI CVMX_FULL_DID(CVMX_OCT_DID_IOB, 0ULL)
-#define CVMX_OCT_DID_TIM_CSR CVMX_FULL_DID(CVMX_OCT_DID_TIM, 0ULL)
-#define CVMX_OCT_DID_KEY_RW CVMX_FULL_DID(CVMX_OCT_DID_KEY, 0ULL)
-#define CVMX_OCT_DID_PCI_6 CVMX_FULL_DID(CVMX_OCT_DID_PCI, 6ULL)
-#define CVMX_OCT_DID_MIS_BOO CVMX_FULL_DID(CVMX_OCT_DID_MIS, 0ULL)
-#define CVMX_OCT_DID_PCI_RML CVMX_FULL_DID(CVMX_OCT_DID_PCI, 0ULL)
-#define CVMX_OCT_DID_IPD_CSR CVMX_FULL_DID(CVMX_OCT_DID_IPD, 7ULL)
-#define CVMX_OCT_DID_DFA_CSR CVMX_FULL_DID(CVMX_OCT_DID_DFA, 7ULL)
-#define CVMX_OCT_DID_MIS_CSR CVMX_FULL_DID(CVMX_OCT_DID_MIS, 7ULL)
-#define CVMX_OCT_DID_ZIP_CSR CVMX_FULL_DID(CVMX_OCT_DID_ZIP, 0ULL)
-
-#endif /* __CVMX_ADDRESS_H__ */
diff --git a/drivers/staging/octeon/cvmx-asxx-defs.h b/drivers/staging/octeon/cvmx-asxx-defs.h
deleted file mode 100644
index 91415a8..0000000
--- a/drivers/staging/octeon/cvmx-asxx-defs.h
+++ /dev/null
@@ -1,475 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_ASXX_DEFS_H__
-#define __CVMX_ASXX_DEFS_H__
-
-#define CVMX_ASXX_GMII_RX_CLK_SET(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000180ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_ASXX_GMII_RX_DAT_SET(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000188ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_ASXX_INT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000018ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000010ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_MII_RX_DAT_SET(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000190ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_ASXX_PRT_LOOP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000040ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_BYPASS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000248ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_BYPASS_SETTING(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000250ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_COMP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000220ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_DATA_DRV(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000218ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_FCRAM_MODE(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000210ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_NCTL_STRONG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000230ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_NCTL_WEAK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000240ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_PCTL_STRONG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000228ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_PCTL_WEAK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000238ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RLD_SETTING(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000258ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_CLK_SETX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000020ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_PRT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000000ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000100ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL_MSK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000108ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL_POWOK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000118ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_RX_WOL_SIG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000110ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_CLK_SETX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000048ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_COMP_BYP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000068ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_HI_WATERX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000080ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_ASXX_TX_PRT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000008ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_asxx_gmii_rx_clk_set {
- uint64_t u64;
- struct cvmx_asxx_gmii_rx_clk_set_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_gmii_rx_clk_set_s cn30xx;
- struct cvmx_asxx_gmii_rx_clk_set_s cn31xx;
- struct cvmx_asxx_gmii_rx_clk_set_s cn50xx;
-};
-
-union cvmx_asxx_gmii_rx_dat_set {
- uint64_t u64;
- struct cvmx_asxx_gmii_rx_dat_set_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_gmii_rx_dat_set_s cn30xx;
- struct cvmx_asxx_gmii_rx_dat_set_s cn31xx;
- struct cvmx_asxx_gmii_rx_dat_set_s cn50xx;
-};
-
-union cvmx_asxx_int_en {
- uint64_t u64;
- struct cvmx_asxx_int_en_s {
- uint64_t reserved_12_63:52;
- uint64_t txpsh:4;
- uint64_t txpop:4;
- uint64_t ovrflw:4;
- } s;
- struct cvmx_asxx_int_en_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t txpsh:3;
- uint64_t reserved_7_7:1;
- uint64_t txpop:3;
- uint64_t reserved_3_3:1;
- uint64_t ovrflw:3;
- } cn30xx;
- struct cvmx_asxx_int_en_cn30xx cn31xx;
- struct cvmx_asxx_int_en_s cn38xx;
- struct cvmx_asxx_int_en_s cn38xxp2;
- struct cvmx_asxx_int_en_cn30xx cn50xx;
- struct cvmx_asxx_int_en_s cn58xx;
- struct cvmx_asxx_int_en_s cn58xxp1;
-};
-
-union cvmx_asxx_int_reg {
- uint64_t u64;
- struct cvmx_asxx_int_reg_s {
- uint64_t reserved_12_63:52;
- uint64_t txpsh:4;
- uint64_t txpop:4;
- uint64_t ovrflw:4;
- } s;
- struct cvmx_asxx_int_reg_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t txpsh:3;
- uint64_t reserved_7_7:1;
- uint64_t txpop:3;
- uint64_t reserved_3_3:1;
- uint64_t ovrflw:3;
- } cn30xx;
- struct cvmx_asxx_int_reg_cn30xx cn31xx;
- struct cvmx_asxx_int_reg_s cn38xx;
- struct cvmx_asxx_int_reg_s cn38xxp2;
- struct cvmx_asxx_int_reg_cn30xx cn50xx;
- struct cvmx_asxx_int_reg_s cn58xx;
- struct cvmx_asxx_int_reg_s cn58xxp1;
-};
-
-union cvmx_asxx_mii_rx_dat_set {
- uint64_t u64;
- struct cvmx_asxx_mii_rx_dat_set_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_mii_rx_dat_set_s cn30xx;
- struct cvmx_asxx_mii_rx_dat_set_s cn50xx;
-};
-
-union cvmx_asxx_prt_loop {
- uint64_t u64;
- struct cvmx_asxx_prt_loop_s {
- uint64_t reserved_8_63:56;
- uint64_t ext_loop:4;
- uint64_t int_loop:4;
- } s;
- struct cvmx_asxx_prt_loop_cn30xx {
- uint64_t reserved_7_63:57;
- uint64_t ext_loop:3;
- uint64_t reserved_3_3:1;
- uint64_t int_loop:3;
- } cn30xx;
- struct cvmx_asxx_prt_loop_cn30xx cn31xx;
- struct cvmx_asxx_prt_loop_s cn38xx;
- struct cvmx_asxx_prt_loop_s cn38xxp2;
- struct cvmx_asxx_prt_loop_cn30xx cn50xx;
- struct cvmx_asxx_prt_loop_s cn58xx;
- struct cvmx_asxx_prt_loop_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_bypass {
- uint64_t u64;
- struct cvmx_asxx_rld_bypass_s {
- uint64_t reserved_1_63:63;
- uint64_t bypass:1;
- } s;
- struct cvmx_asxx_rld_bypass_s cn38xx;
- struct cvmx_asxx_rld_bypass_s cn38xxp2;
- struct cvmx_asxx_rld_bypass_s cn58xx;
- struct cvmx_asxx_rld_bypass_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_bypass_setting {
- uint64_t u64;
- struct cvmx_asxx_rld_bypass_setting_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_rld_bypass_setting_s cn38xx;
- struct cvmx_asxx_rld_bypass_setting_s cn38xxp2;
- struct cvmx_asxx_rld_bypass_setting_s cn58xx;
- struct cvmx_asxx_rld_bypass_setting_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_comp {
- uint64_t u64;
- struct cvmx_asxx_rld_comp_s {
- uint64_t reserved_9_63:55;
- uint64_t pctl:5;
- uint64_t nctl:4;
- } s;
- struct cvmx_asxx_rld_comp_cn38xx {
- uint64_t reserved_8_63:56;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } cn38xx;
- struct cvmx_asxx_rld_comp_cn38xx cn38xxp2;
- struct cvmx_asxx_rld_comp_s cn58xx;
- struct cvmx_asxx_rld_comp_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_data_drv {
- uint64_t u64;
- struct cvmx_asxx_rld_data_drv_s {
- uint64_t reserved_8_63:56;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } s;
- struct cvmx_asxx_rld_data_drv_s cn38xx;
- struct cvmx_asxx_rld_data_drv_s cn38xxp2;
- struct cvmx_asxx_rld_data_drv_s cn58xx;
- struct cvmx_asxx_rld_data_drv_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_fcram_mode {
- uint64_t u64;
- struct cvmx_asxx_rld_fcram_mode_s {
- uint64_t reserved_1_63:63;
- uint64_t mode:1;
- } s;
- struct cvmx_asxx_rld_fcram_mode_s cn38xx;
- struct cvmx_asxx_rld_fcram_mode_s cn38xxp2;
-};
-
-union cvmx_asxx_rld_nctl_strong {
- uint64_t u64;
- struct cvmx_asxx_rld_nctl_strong_s {
- uint64_t reserved_5_63:59;
- uint64_t nctl:5;
- } s;
- struct cvmx_asxx_rld_nctl_strong_s cn38xx;
- struct cvmx_asxx_rld_nctl_strong_s cn38xxp2;
- struct cvmx_asxx_rld_nctl_strong_s cn58xx;
- struct cvmx_asxx_rld_nctl_strong_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_nctl_weak {
- uint64_t u64;
- struct cvmx_asxx_rld_nctl_weak_s {
- uint64_t reserved_5_63:59;
- uint64_t nctl:5;
- } s;
- struct cvmx_asxx_rld_nctl_weak_s cn38xx;
- struct cvmx_asxx_rld_nctl_weak_s cn38xxp2;
- struct cvmx_asxx_rld_nctl_weak_s cn58xx;
- struct cvmx_asxx_rld_nctl_weak_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_pctl_strong {
- uint64_t u64;
- struct cvmx_asxx_rld_pctl_strong_s {
- uint64_t reserved_5_63:59;
- uint64_t pctl:5;
- } s;
- struct cvmx_asxx_rld_pctl_strong_s cn38xx;
- struct cvmx_asxx_rld_pctl_strong_s cn38xxp2;
- struct cvmx_asxx_rld_pctl_strong_s cn58xx;
- struct cvmx_asxx_rld_pctl_strong_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_pctl_weak {
- uint64_t u64;
- struct cvmx_asxx_rld_pctl_weak_s {
- uint64_t reserved_5_63:59;
- uint64_t pctl:5;
- } s;
- struct cvmx_asxx_rld_pctl_weak_s cn38xx;
- struct cvmx_asxx_rld_pctl_weak_s cn38xxp2;
- struct cvmx_asxx_rld_pctl_weak_s cn58xx;
- struct cvmx_asxx_rld_pctl_weak_s cn58xxp1;
-};
-
-union cvmx_asxx_rld_setting {
- uint64_t u64;
- struct cvmx_asxx_rld_setting_s {
- uint64_t reserved_13_63:51;
- uint64_t dfaset:5;
- uint64_t dfalag:1;
- uint64_t dfalead:1;
- uint64_t dfalock:1;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_rld_setting_cn38xx {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } cn38xx;
- struct cvmx_asxx_rld_setting_cn38xx cn38xxp2;
- struct cvmx_asxx_rld_setting_s cn58xx;
- struct cvmx_asxx_rld_setting_s cn58xxp1;
-};
-
-union cvmx_asxx_rx_clk_setx {
- uint64_t u64;
- struct cvmx_asxx_rx_clk_setx_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_rx_clk_setx_s cn30xx;
- struct cvmx_asxx_rx_clk_setx_s cn31xx;
- struct cvmx_asxx_rx_clk_setx_s cn38xx;
- struct cvmx_asxx_rx_clk_setx_s cn38xxp2;
- struct cvmx_asxx_rx_clk_setx_s cn50xx;
- struct cvmx_asxx_rx_clk_setx_s cn58xx;
- struct cvmx_asxx_rx_clk_setx_s cn58xxp1;
-};
-
-union cvmx_asxx_rx_prt_en {
- uint64_t u64;
- struct cvmx_asxx_rx_prt_en_s {
- uint64_t reserved_4_63:60;
- uint64_t prt_en:4;
- } s;
- struct cvmx_asxx_rx_prt_en_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t prt_en:3;
- } cn30xx;
- struct cvmx_asxx_rx_prt_en_cn30xx cn31xx;
- struct cvmx_asxx_rx_prt_en_s cn38xx;
- struct cvmx_asxx_rx_prt_en_s cn38xxp2;
- struct cvmx_asxx_rx_prt_en_cn30xx cn50xx;
- struct cvmx_asxx_rx_prt_en_s cn58xx;
- struct cvmx_asxx_rx_prt_en_s cn58xxp1;
-};
-
-union cvmx_asxx_rx_wol {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_s {
- uint64_t reserved_2_63:62;
- uint64_t status:1;
- uint64_t enable:1;
- } s;
- struct cvmx_asxx_rx_wol_s cn38xx;
- struct cvmx_asxx_rx_wol_s cn38xxp2;
-};
-
-union cvmx_asxx_rx_wol_msk {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_msk_s {
- uint64_t msk:64;
- } s;
- struct cvmx_asxx_rx_wol_msk_s cn38xx;
- struct cvmx_asxx_rx_wol_msk_s cn38xxp2;
-};
-
-union cvmx_asxx_rx_wol_powok {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_powok_s {
- uint64_t reserved_1_63:63;
- uint64_t powerok:1;
- } s;
- struct cvmx_asxx_rx_wol_powok_s cn38xx;
- struct cvmx_asxx_rx_wol_powok_s cn38xxp2;
-};
-
-union cvmx_asxx_rx_wol_sig {
- uint64_t u64;
- struct cvmx_asxx_rx_wol_sig_s {
- uint64_t reserved_32_63:32;
- uint64_t sig:32;
- } s;
- struct cvmx_asxx_rx_wol_sig_s cn38xx;
- struct cvmx_asxx_rx_wol_sig_s cn38xxp2;
-};
-
-union cvmx_asxx_tx_clk_setx {
- uint64_t u64;
- struct cvmx_asxx_tx_clk_setx_s {
- uint64_t reserved_5_63:59;
- uint64_t setting:5;
- } s;
- struct cvmx_asxx_tx_clk_setx_s cn30xx;
- struct cvmx_asxx_tx_clk_setx_s cn31xx;
- struct cvmx_asxx_tx_clk_setx_s cn38xx;
- struct cvmx_asxx_tx_clk_setx_s cn38xxp2;
- struct cvmx_asxx_tx_clk_setx_s cn50xx;
- struct cvmx_asxx_tx_clk_setx_s cn58xx;
- struct cvmx_asxx_tx_clk_setx_s cn58xxp1;
-};
-
-union cvmx_asxx_tx_comp_byp {
- uint64_t u64;
- struct cvmx_asxx_tx_comp_byp_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_asxx_tx_comp_byp_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t bypass:1;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } cn30xx;
- struct cvmx_asxx_tx_comp_byp_cn30xx cn31xx;
- struct cvmx_asxx_tx_comp_byp_cn38xx {
- uint64_t reserved_8_63:56;
- uint64_t pctl:4;
- uint64_t nctl:4;
- } cn38xx;
- struct cvmx_asxx_tx_comp_byp_cn38xx cn38xxp2;
- struct cvmx_asxx_tx_comp_byp_cn50xx {
- uint64_t reserved_17_63:47;
- uint64_t bypass:1;
- uint64_t reserved_13_15:3;
- uint64_t pctl:5;
- uint64_t reserved_5_7:3;
- uint64_t nctl:5;
- } cn50xx;
- struct cvmx_asxx_tx_comp_byp_cn58xx {
- uint64_t reserved_13_63:51;
- uint64_t pctl:5;
- uint64_t reserved_5_7:3;
- uint64_t nctl:5;
- } cn58xx;
- struct cvmx_asxx_tx_comp_byp_cn58xx cn58xxp1;
-};
-
-union cvmx_asxx_tx_hi_waterx {
- uint64_t u64;
- struct cvmx_asxx_tx_hi_waterx_s {
- uint64_t reserved_4_63:60;
- uint64_t mark:4;
- } s;
- struct cvmx_asxx_tx_hi_waterx_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t mark:3;
- } cn30xx;
- struct cvmx_asxx_tx_hi_waterx_cn30xx cn31xx;
- struct cvmx_asxx_tx_hi_waterx_s cn38xx;
- struct cvmx_asxx_tx_hi_waterx_s cn38xxp2;
- struct cvmx_asxx_tx_hi_waterx_cn30xx cn50xx;
- struct cvmx_asxx_tx_hi_waterx_s cn58xx;
- struct cvmx_asxx_tx_hi_waterx_s cn58xxp1;
-};
-
-union cvmx_asxx_tx_prt_en {
- uint64_t u64;
- struct cvmx_asxx_tx_prt_en_s {
- uint64_t reserved_4_63:60;
- uint64_t prt_en:4;
- } s;
- struct cvmx_asxx_tx_prt_en_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t prt_en:3;
- } cn30xx;
- struct cvmx_asxx_tx_prt_en_cn30xx cn31xx;
- struct cvmx_asxx_tx_prt_en_s cn38xx;
- struct cvmx_asxx_tx_prt_en_s cn38xxp2;
- struct cvmx_asxx_tx_prt_en_cn30xx cn50xx;
- struct cvmx_asxx_tx_prt_en_s cn58xx;
- struct cvmx_asxx_tx_prt_en_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.c b/drivers/staging/octeon/cvmx-cmd-queue.c
deleted file mode 100644
index e9809d3..0000000
--- a/drivers/staging/octeon/cvmx-cmd-queue.c
+++ /dev/null
@@ -1,306 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Support functions for managing command queues used for
- * various hardware blocks.
- */
-
-#include <linux/kernel.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-#include "cvmx-fpa.h"
-#include "cvmx-cmd-queue.h"
-
-#include <asm/octeon/cvmx-npei-defs.h>
-#include <asm/octeon/cvmx-pexp-defs.h>
-#include "cvmx-pko-defs.h"
-
-/**
- * This application uses this pointer to access the global queue
- * state. It points to a bootmem named block.
- */
-__cvmx_cmd_queue_all_state_t *__cvmx_cmd_queue_state_ptr;
-
-/**
- * Initialize the Global queue state pointer.
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static cvmx_cmd_queue_result_t __cvmx_cmd_queue_init_state_ptr(void)
-{
- char *alloc_name = "cvmx_cmd_queues";
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
- extern uint64_t octeon_reserve32_memory;
-#endif
-
- if (likely(__cvmx_cmd_queue_state_ptr))
- return CVMX_CMD_QUEUE_SUCCESS;
-
-#if defined(CONFIG_CAVIUM_RESERVE32) && CONFIG_CAVIUM_RESERVE32
- if (octeon_reserve32_memory)
- __cvmx_cmd_queue_state_ptr =
- cvmx_bootmem_alloc_named_range(sizeof(*__cvmx_cmd_queue_state_ptr),
- octeon_reserve32_memory,
- octeon_reserve32_memory +
- (CONFIG_CAVIUM_RESERVE32 <<
- 20) - 1, 128, alloc_name);
- else
-#endif
- __cvmx_cmd_queue_state_ptr =
- cvmx_bootmem_alloc_named(sizeof(*__cvmx_cmd_queue_state_ptr),
- 128,
- alloc_name);
- if (__cvmx_cmd_queue_state_ptr)
- memset(__cvmx_cmd_queue_state_ptr, 0,
- sizeof(*__cvmx_cmd_queue_state_ptr));
- else {
- struct cvmx_bootmem_named_block_desc *block_desc =
- cvmx_bootmem_find_named_block(alloc_name);
- if (block_desc)
- __cvmx_cmd_queue_state_ptr =
- cvmx_phys_to_ptr(block_desc->base_addr);
- else {
- cvmx_dprintf
- ("ERROR: cvmx_cmd_queue_initialize: Unable to get named block %s.\n",
- alloc_name);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- }
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Initialize a command queue for use. The initial FPA buffer is
- * allocated and the hardware unit is configured to point to the
- * new command queue.
- *
- * @queue_id: Hardware command queue to initialize.
- * @max_depth: Maximum outstanding commands that can be queued.
- * @fpa_pool: FPA pool the command queues should come from.
- * @pool_size: Size of each buffer in the FPA pool (bytes)
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
- int max_depth, int fpa_pool,
- int pool_size)
-{
- __cvmx_cmd_queue_state_t *qstate;
- cvmx_cmd_queue_result_t result = __cvmx_cmd_queue_init_state_ptr();
- if (result != CVMX_CMD_QUEUE_SUCCESS)
- return result;
-
- qstate = __cvmx_cmd_queue_get_state(queue_id);
- if (qstate == NULL)
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-
- /*
- * We artificially limit max_depth to 1<<20 words. It is an
- * arbitrary limit.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH) {
- if ((max_depth < 0) || (max_depth > 1 << 20))
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- } else if (max_depth != 0)
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-
- if ((fpa_pool < 0) || (fpa_pool > 7))
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- if ((pool_size < 128) || (pool_size > 65536))
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-
- /* See if someone else has already initialized the queue */
- if (qstate->base_ptr_div128) {
- if (max_depth != (int)qstate->max_depth) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initialized with different "
- "max_depth (%d).\n",
- (int)qstate->max_depth);
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- if (fpa_pool != qstate->fpa_pool) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initialized with different "
- "FPA pool (%u).\n",
- qstate->fpa_pool);
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- if ((pool_size >> 3) - 1 != qstate->pool_size_m1) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Queue already initialized with different "
- "FPA pool size (%u).\n",
- (qstate->pool_size_m1 + 1) << 3);
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- CVMX_SYNCWS;
- return CVMX_CMD_QUEUE_ALREADY_SETUP;
- } else {
- union cvmx_fpa_ctl_status status;
- void *buffer;
-
- status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
- if (!status.s.enb) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "FPA is not enabled.\n");
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- buffer = cvmx_fpa_alloc(fpa_pool);
- if (buffer == NULL) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_initialize: "
- "Unable to allocate initial buffer.\n");
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
-
- memset(qstate, 0, sizeof(*qstate));
- qstate->max_depth = max_depth;
- qstate->fpa_pool = fpa_pool;
- qstate->pool_size_m1 = (pool_size >> 3) - 1;
- qstate->base_ptr_div128 = cvmx_ptr_to_phys(buffer) / 128;
- /*
- * We zeroed the now serving field so we need to also
- * zero the ticket.
- */
- __cvmx_cmd_queue_state_ptr->
- ticket[__cvmx_cmd_queue_get_index(queue_id)] = 0;
- CVMX_SYNCWS;
- return CVMX_CMD_QUEUE_SUCCESS;
- }
-}
-
-/**
- * Shutdown a queue a free it's command buffers to the FPA. The
- * hardware connected to the queue must be stopped before this
- * function is called.
- *
- * @queue_id: Queue to shutdown
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
- if (qptr == NULL) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Unable to "
- "get queue information.\n");
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
-
- if (cvmx_cmd_queue_length(queue_id) > 0) {
- cvmx_dprintf("ERROR: cvmx_cmd_queue_shutdown: Queue still "
- "has data in it.\n");
- return CVMX_CMD_QUEUE_FULL;
- }
-
- __cvmx_cmd_queue_lock(queue_id, qptr);
- if (qptr->base_ptr_div128) {
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) qptr->base_ptr_div128 << 7),
- qptr->fpa_pool, 0);
- qptr->base_ptr_div128 = 0;
- }
- __cvmx_cmd_queue_unlock(qptr);
-
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Return the number of command words pending in the queue. This
- * function may be relatively slow for some hardware units.
- *
- * @queue_id: Hardware command queue to query
- *
- * Returns Number of outstanding commands
- */
-int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id)
-{
- if (CVMX_ENABLE_PARAMETER_CHECKING) {
- if (__cvmx_cmd_queue_get_state(queue_id) == NULL)
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
-
- /*
- * The cast is here so gcc with check that all values in the
- * cvmx_cmd_queue_id_t enumeration are here.
- */
- switch ((cvmx_cmd_queue_id_t) (queue_id & 0xff0000)) {
- case CVMX_CMD_QUEUE_PKO_BASE:
- /*
- * FIXME: Need atomic lock on
- * CVMX_PKO_REG_READ_IDX. Right now we are normally
- * called with the queue lock, so that is a SLIGHT
- * amount of protection.
- */
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue_id & 0xffff);
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pko_mem_debug9 debug9;
- debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
- return debug9.cn38xx.doorbell;
- } else {
- union cvmx_pko_mem_debug8 debug8;
- debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- return debug8.cn58xx.doorbell;
- }
- case CVMX_CMD_QUEUE_ZIP:
- case CVMX_CMD_QUEUE_DFA:
- case CVMX_CMD_QUEUE_RAID:
- /* FIXME: Implement other lengths */
- return 0;
- case CVMX_CMD_QUEUE_DMA_BASE:
- {
- union cvmx_npei_dmax_counts dmax_counts;
- dmax_counts.u64 =
- cvmx_read_csr(CVMX_PEXP_NPEI_DMAX_COUNTS
- (queue_id & 0x7));
- return dmax_counts.s.dbell;
- }
- case CVMX_CMD_QUEUE_END:
- return CVMX_CMD_QUEUE_INVALID_PARAM;
- }
- return CVMX_CMD_QUEUE_INVALID_PARAM;
-}
-
-/**
- * Return the command buffer to be written to. The purpose of this
- * function is to allow CVMX routine access t othe low level buffer
- * for initial hardware setup. User applications should not call this
- * function directly.
- *
- * @queue_id: Command queue to query
- *
- * Returns Command buffer or NULL on failure
- */
-void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
- if (qptr && qptr->base_ptr_div128)
- return cvmx_phys_to_ptr((uint64_t) qptr->base_ptr_div128 << 7);
- else
- return NULL;
-}
diff --git a/drivers/staging/octeon/cvmx-cmd-queue.h b/drivers/staging/octeon/cvmx-cmd-queue.h
deleted file mode 100644
index 614653b..0000000
--- a/drivers/staging/octeon/cvmx-cmd-queue.h
+++ /dev/null
@@ -1,617 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Support functions for managing command queues used for
- * various hardware blocks.
- *
- * The common command queue infrastructure abstracts out the
- * software necessary for adding to Octeon's chained queue
- * structures. These structures are used for commands to the
- * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
- * hardware unit takes commands and CSRs of different types,
- * they all use basic linked command buffers to store the
- * pending request. In general, users of the CVMX API don't
- * call cvmx-cmd-queue functions directly. Instead the hardware
- * unit specific wrapper should be used. The wrappers perform
- * unit specific validation and CSR writes to submit the
- * commands.
- *
- * Even though most software will never directly interact with
- * cvmx-cmd-queue, knowledge of its internal working can help
- * in diagnosing performance problems and help with debugging.
- *
- * Command queue pointers are stored in a global named block
- * called "cvmx_cmd_queues". Except for the PKO queues, each
- * hardware queue is stored in its own cache line to reduce SMP
- * contention on spin locks. The PKO queues are stored such that
- * every 16th queue is next to each other in memory. This scheme
- * allows for queues being in separate cache lines when there
- * are low number of queues per port. With 16 queues per port,
- * the first queue for each port is in the same cache area. The
- * second queues for each port are in another area, etc. This
- * allows software to implement very efficient lockless PKO with
- * 16 queues per port using a minimum of cache lines per core.
- * All queues for a given core will be isolated in the same
- * cache area.
- *
- * In addition to the memory pointer layout, cvmx-cmd-queue
- * provides an optimized fair ll/sc locking mechanism for the
- * queues. The lock uses a "ticket / now serving" model to
- * maintain fair order on contended locks. In addition, it uses
- * predicted locking time to limit cache contention. When a core
- * know it must wait in line for a lock, it spins on the
- * internal cycle counter to completely eliminate any causes of
- * bus traffic.
- *
- */
-
-#ifndef __CVMX_CMD_QUEUE_H__
-#define __CVMX_CMD_QUEUE_H__
-
-#include <linux/prefetch.h>
-
-#include "cvmx-fpa.h"
-/**
- * By default we disable the max depth support. Most programs
- * don't use it and it slows down the command queue processing
- * significantly.
- */
-#ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
-#define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
-#endif
-
-/**
- * Enumeration representing all hardware blocks that use command
- * queues. Each hardware block has up to 65536 sub identifiers for
- * multiple command queues. Not all chips support all hardware
- * units.
- */
-typedef enum {
- CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
-
-#define CVMX_CMD_QUEUE_PKO(queue) \
- ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
-
- CVMX_CMD_QUEUE_ZIP = 0x10000,
- CVMX_CMD_QUEUE_DFA = 0x20000,
- CVMX_CMD_QUEUE_RAID = 0x30000,
- CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
-
-#define CVMX_CMD_QUEUE_DMA(queue) \
- ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
-
- CVMX_CMD_QUEUE_END = 0x50000,
-} cvmx_cmd_queue_id_t;
-
-/**
- * Command write operations can fail if the command queue needs
- * a new buffer and the associated FPA pool is empty. It can also
- * fail if the number of queued command words reaches the maximum
- * set at initialization.
- */
-typedef enum {
- CVMX_CMD_QUEUE_SUCCESS = 0,
- CVMX_CMD_QUEUE_NO_MEMORY = -1,
- CVMX_CMD_QUEUE_FULL = -2,
- CVMX_CMD_QUEUE_INVALID_PARAM = -3,
- CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
-} cvmx_cmd_queue_result_t;
-
-typedef struct {
- /* You have lock when this is your ticket */
- uint8_t now_serving;
- uint64_t unused1:24;
- /* Maximum outstanding command words */
- uint32_t max_depth;
- /* FPA pool buffers come from */
- uint64_t fpa_pool:3;
- /* Top of command buffer pointer shifted 7 */
- uint64_t base_ptr_div128:29;
- uint64_t unused2:6;
- /* FPA buffer size in 64bit words minus 1 */
- uint64_t pool_size_m1:13;
- /* Number of commands already used in buffer */
- uint64_t index:13;
-} __cvmx_cmd_queue_state_t;
-
-/**
- * This structure contains the global state of all command queues.
- * It is stored in a bootmem named block and shared by all
- * applications running on Octeon. Tickets are stored in a differnet
- * cahce line that queue information to reduce the contention on the
- * ll/sc used to get a ticket. If this is not the case, the update
- * of queue state causes the ll/sc to fail quite often.
- */
-typedef struct {
- uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
- __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
-} __cvmx_cmd_queue_all_state_t;
-
-/**
- * Initialize a command queue for use. The initial FPA buffer is
- * allocated and the hardware unit is configured to point to the
- * new command queue.
- *
- * @queue_id: Hardware command queue to initialize.
- * @max_depth: Maximum outstanding commands that can be queued.
- * @fpa_pool: FPA pool the command queues should come from.
- * @pool_size: Size of each buffer in the FPA pool (bytes)
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
- int max_depth, int fpa_pool,
- int pool_size);
-
-/**
- * Shutdown a queue a free it's command buffers to the FPA. The
- * hardware connected to the queue must be stopped before this
- * function is called.
- *
- * @queue_id: Queue to shutdown
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
-
-/**
- * Return the number of command words pending in the queue. This
- * function may be relatively slow for some hardware units.
- *
- * @queue_id: Hardware command queue to query
- *
- * Returns Number of outstanding commands
- */
-int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
-
-/**
- * Return the command buffer to be written to. The purpose of this
- * function is to allow CVMX routine access t othe low level buffer
- * for initial hardware setup. User applications should not call this
- * function directly.
- *
- * @queue_id: Command queue to query
- *
- * Returns Command buffer or NULL on failure
- */
-void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
-
-/**
- * Get the index into the state arrays for the supplied queue id.
- *
- * @queue_id: Queue ID to get an index for
- *
- * Returns Index into the state arrays
- */
-static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
-{
- /*
- * Warning: This code currently only works with devices that
- * have 256 queues or less. Devices with more than 16 queues
- * are laid out in memory to allow cores quick access to
- * every 16th queue. This reduces cache thrashing when you are
- * running 16 queues per port to support lockless operation.
- */
- int unit = queue_id >> 16;
- int q = (queue_id >> 4) & 0xf;
- int core = queue_id & 0xf;
- return unit * 256 + core * 16 + q;
-}
-
-/**
- * Lock the supplied queue so nobody else is updating it at the same
- * time as us.
- *
- * @queue_id: Queue ID to lock
- * @qptr: Pointer to the queue's global state
- */
-static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
- __cvmx_cmd_queue_state_t *qptr)
-{
- extern __cvmx_cmd_queue_all_state_t
- *__cvmx_cmd_queue_state_ptr;
- int tmp;
- int my_ticket;
- prefetch(qptr);
- asm volatile (
- ".set push\n"
- ".set noreorder\n"
- "1:\n"
- /* Atomic add one to ticket_ptr */
- "ll %[my_ticket], %[ticket_ptr]\n"
- /* and store the original value */
- "li %[ticket], 1\n"
- /* in my_ticket */
- "baddu %[ticket], %[my_ticket]\n"
- "sc %[ticket], %[ticket_ptr]\n"
- "beqz %[ticket], 1b\n"
- " nop\n"
- /* Load the current now_serving ticket */
- "lbu %[ticket], %[now_serving]\n"
- "2:\n"
- /* Jump out if now_serving == my_ticket */
- "beq %[ticket], %[my_ticket], 4f\n"
- /* Find out how many tickets are in front of me */
- " subu %[ticket], %[my_ticket], %[ticket]\n"
- /* Use tickets in front of me minus one to delay */
- "subu %[ticket], 1\n"
- /* Delay will be ((tickets in front)-1)*32 loops */
- "cins %[ticket], %[ticket], 5, 7\n"
- "3:\n"
- /* Loop here until our ticket might be up */
- "bnez %[ticket], 3b\n"
- " subu %[ticket], 1\n"
- /* Jump back up to check out ticket again */
- "b 2b\n"
- /* Load the current now_serving ticket */
- " lbu %[ticket], %[now_serving]\n"
- "4:\n"
- ".set pop\n" :
- [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
- [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
- [my_ticket] "=r"(my_ticket)
- );
-}
-
-/**
- * Unlock the queue, flushing all writes.
- *
- * @qptr: Queue to unlock
- */
-static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
-{
- qptr->now_serving++;
- CVMX_SYNCWS;
-}
-
-/**
- * Get the queue state structure for the given queue id
- *
- * @queue_id: Queue id to get
- *
- * Returns Queue structure or NULL on failure
- */
-static inline __cvmx_cmd_queue_state_t
- *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
-{
- extern __cvmx_cmd_queue_all_state_t
- *__cvmx_cmd_queue_state_ptr;
- return &__cvmx_cmd_queue_state_ptr->
- state[__cvmx_cmd_queue_get_index(queue_id)];
-}
-
-/**
- * Write an arbitrary number of command words to a command queue.
- * This is a generic function; the fixed number of command word
- * functions yield higher performance.
- *
- * @queue_id: Hardware command queue to write to
- * @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
- * @cmd_count: Number of command words to write
- * @cmds: Array of commands to write
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
- queue_id,
- int use_locking,
- int cmd_count,
- uint64_t *cmds)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
-
- /* Make sure nobody else is updating the same queue */
- if (likely(use_locking))
- __cvmx_cmd_queue_lock(queue_id, qptr);
-
- /*
- * If a max queue length was specified then make sure we don't
- * exceed it. If any part of the command would be below the
- * limit we allow it.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
- if (unlikely
- (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_FULL;
- }
- }
-
- /*
- * Normally there is plenty of room in the current buffer for
- * the command.
- */
- if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
- uint64_t *ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- qptr->index += cmd_count;
- while (cmd_count--)
- *ptr++ = *cmds++;
- } else {
- uint64_t *ptr;
- int count;
- /*
- * We need a new command buffer. Fail if there isn't
- * one available.
- */
- uint64_t *new_buffer =
- (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
- if (unlikely(new_buffer == NULL)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- /*
- * Figure out how many command words will fit in this
- * buffer. One location will be needed for the next
- * buffer pointer.
- */
- count = qptr->pool_size_m1 - qptr->index;
- ptr += qptr->index;
- cmd_count -= count;
- while (count--)
- *ptr++ = *cmds++;
- *ptr = cvmx_ptr_to_phys(new_buffer);
- /*
- * The current buffer is full and has a link to the
- * next buffer. Time to write the rest of the commands
- * into the new buffer.
- */
- qptr->base_ptr_div128 = *ptr >> 7;
- qptr->index = cmd_count;
- ptr = new_buffer;
- while (cmd_count--)
- *ptr++ = *cmds++;
- }
-
- /* All updates are complete. Release the lock and return */
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Simple function to write two command words to a command
- * queue.
- *
- * @queue_id: Hardware command queue to write to
- * @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
- * @cmd1: Command
- * @cmd2: Command
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
- queue_id,
- int use_locking,
- uint64_t cmd1,
- uint64_t cmd2)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
-
- /* Make sure nobody else is updating the same queue */
- if (likely(use_locking))
- __cvmx_cmd_queue_lock(queue_id, qptr);
-
- /*
- * If a max queue length was specified then make sure we don't
- * exceed it. If any part of the command would be below the
- * limit we allow it.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
- if (unlikely
- (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_FULL;
- }
- }
-
- /*
- * Normally there is plenty of room in the current buffer for
- * the command.
- */
- if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
- uint64_t *ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- qptr->index += 2;
- ptr[0] = cmd1;
- ptr[1] = cmd2;
- } else {
- uint64_t *ptr;
- /*
- * Figure out how many command words will fit in this
- * buffer. One location will be needed for the next
- * buffer pointer.
- */
- int count = qptr->pool_size_m1 - qptr->index;
- /*
- * We need a new command buffer. Fail if there isn't
- * one available.
- */
- uint64_t *new_buffer =
- (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
- if (unlikely(new_buffer == NULL)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- count--;
- ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- *ptr++ = cmd1;
- if (likely(count))
- *ptr++ = cmd2;
- *ptr = cvmx_ptr_to_phys(new_buffer);
- /*
- * The current buffer is full and has a link to the
- * next buffer. Time to write the rest of the commands
- * into the new buffer.
- */
- qptr->base_ptr_div128 = *ptr >> 7;
- qptr->index = 0;
- if (unlikely(count == 0)) {
- qptr->index = 1;
- new_buffer[0] = cmd2;
- }
- }
-
- /* All updates are complete. Release the lock and return */
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-/**
- * Simple function to write three command words to a command
- * queue.
- *
- * @queue_id: Hardware command queue to write to
- * @use_locking:
- * Use internal locking to ensure exclusive access for queue
- * updates. If you don't use this locking you must ensure
- * exclusivity some other way. Locking is strongly recommended.
- * @cmd1: Command
- * @cmd2: Command
- * @cmd3: Command
- *
- * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
- */
-static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
- queue_id,
- int use_locking,
- uint64_t cmd1,
- uint64_t cmd2,
- uint64_t cmd3)
-{
- __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
-
- /* Make sure nobody else is updating the same queue */
- if (likely(use_locking))
- __cvmx_cmd_queue_lock(queue_id, qptr);
-
- /*
- * If a max queue length was specified then make sure we don't
- * exceed it. If any part of the command would be below the
- * limit we allow it.
- */
- if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
- if (unlikely
- (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_FULL;
- }
- }
-
- /*
- * Normally there is plenty of room in the current buffer for
- * the command.
- */
- if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
- uint64_t *ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- qptr->index += 3;
- ptr[0] = cmd1;
- ptr[1] = cmd2;
- ptr[2] = cmd3;
- } else {
- uint64_t *ptr;
- /*
- * Figure out how many command words will fit in this
- * buffer. One location will be needed for the next
- * buffer pointer
- */
- int count = qptr->pool_size_m1 - qptr->index;
- /*
- * We need a new command buffer. Fail if there isn't
- * one available
- */
- uint64_t *new_buffer =
- (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
- if (unlikely(new_buffer == NULL)) {
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_NO_MEMORY;
- }
- count--;
- ptr =
- (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
- base_ptr_div128 << 7);
- ptr += qptr->index;
- *ptr++ = cmd1;
- if (count) {
- *ptr++ = cmd2;
- if (count > 1)
- *ptr++ = cmd3;
- }
- *ptr = cvmx_ptr_to_phys(new_buffer);
- /*
- * The current buffer is full and has a link to the
- * next buffer. Time to write the rest of the commands
- * into the new buffer.
- */
- qptr->base_ptr_div128 = *ptr >> 7;
- qptr->index = 0;
- ptr = new_buffer;
- if (count == 0) {
- *ptr++ = cmd2;
- qptr->index++;
- }
- if (count < 2) {
- *ptr++ = cmd3;
- qptr->index++;
- }
- }
-
- /* All updates are complete. Release the lock and return */
- if (likely(use_locking))
- __cvmx_cmd_queue_unlock(qptr);
- return CVMX_CMD_QUEUE_SUCCESS;
-}
-
-#endif /* __CVMX_CMD_QUEUE_H__ */
diff --git a/drivers/staging/octeon/cvmx-config.h b/drivers/staging/octeon/cvmx-config.h
deleted file mode 100644
index 078a520..0000000
--- a/drivers/staging/octeon/cvmx-config.h
+++ /dev/null
@@ -1,169 +0,0 @@
-#ifndef __CVMX_CONFIG_H__
-#define __CVMX_CONFIG_H__
-
-/************************* Config Specific Defines ************************/
-#define CVMX_LLM_NUM_PORTS 1
-#define CVMX_NULL_POINTER_PROTECT 1
-#define CVMX_ENABLE_DEBUG_PRINTS 1
-/* PKO queues per port for interface 0 (ports 0-15) */
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1
-/* PKO queues per port for interface 1 (ports 16-31) */
-#define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1
-/* Limit on the number of PKO ports enabled for interface 0 */
-#define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
-/* Limit on the number of PKO ports enabled for interface 1 */
-#define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
-/* PKO queues per port for PCI (ports 32-35) */
-#define CVMX_PKO_QUEUES_PER_PORT_PCI 1
-/* PKO queues per port for Loop devices (ports 36-39) */
-#define CVMX_PKO_QUEUES_PER_PORT_LOOP 1
-
-/************************* FPA allocation *********************************/
-/* Pool sizes in bytes, must be multiple of a cache line */
-#define CVMX_FPA_POOL_0_SIZE (16 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-#define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE)
-
-/* Pools in use */
-/* Packet buffers */
-#define CVMX_FPA_PACKET_POOL (0)
-#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
-/* Work queue entrys */
-#define CVMX_FPA_WQE_POOL (1)
-#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
-/* PKO queue command buffers */
-#define CVMX_FPA_OUTPUT_BUFFER_POOL (2)
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE
-
-/************************* FAU allocation ********************************/
-/* The fetch and add registers are allocated here. They are arranged
- * in order of descending size so that all alignment constraints are
- * automatically met. The enums are linked so that the following enum
- * continues allocating where the previous one left off, so the
- * numbering within each enum always starts with zero. The macros
- * take care of the address increment size, so the values entered
- * always increase by 1. FAU registers are accessed with byte
- * addresses.
- */
-
-#define CVMX_FAU_REG_64_ADDR(x) ((x << 3) + CVMX_FAU_REG_64_START)
-typedef enum {
- CVMX_FAU_REG_64_START = 0,
- CVMX_FAU_REG_64_END = CVMX_FAU_REG_64_ADDR(0),
-} cvmx_fau_reg_64_t;
-
-#define CVMX_FAU_REG_32_ADDR(x) ((x << 2) + CVMX_FAU_REG_32_START)
-typedef enum {
- CVMX_FAU_REG_32_START = CVMX_FAU_REG_64_END,
- CVMX_FAU_REG_32_END = CVMX_FAU_REG_32_ADDR(0),
-} cvmx_fau_reg_32_t;
-
-#define CVMX_FAU_REG_16_ADDR(x) ((x << 1) + CVMX_FAU_REG_16_START)
-typedef enum {
- CVMX_FAU_REG_16_START = CVMX_FAU_REG_32_END,
- CVMX_FAU_REG_16_END = CVMX_FAU_REG_16_ADDR(0),
-} cvmx_fau_reg_16_t;
-
-#define CVMX_FAU_REG_8_ADDR(x) ((x) + CVMX_FAU_REG_8_START)
-typedef enum {
- CVMX_FAU_REG_8_START = CVMX_FAU_REG_16_END,
- CVMX_FAU_REG_8_END = CVMX_FAU_REG_8_ADDR(0),
-} cvmx_fau_reg_8_t;
-
-/*
- * The name CVMX_FAU_REG_AVAIL_BASE is provided to indicate the first
- * available FAU address that is not allocated in cvmx-config.h. This
- * is 64 bit aligned.
- */
-#define CVMX_FAU_REG_AVAIL_BASE ((CVMX_FAU_REG_8_END + 0x7) & (~0x7ULL))
-#define CVMX_FAU_REG_END (2048)
-
-/********************** scratch memory allocation *************************/
-/* Scratchpad memory allocation. Note that these are byte memory
- * addresses. Some uses of scratchpad (IOBDMA for example) require
- * the use of 8-byte aligned addresses, so proper alignment needs to
- * be taken into account.
- */
-/* Generic scratch iobdma area */
-#define CVMX_SCR_SCRATCH (0)
-/* First location available after cvmx-config.h allocated region. */
-#define CVMX_SCR_REG_AVAIL_BASE (8)
-
-/*
- * CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve
- * before the beginning of the packet. If necessary, override the
- * default here. See the IPD section of the hardware manual for MBUFF
- * SKIP details.
- */
-#define CVMX_HELPER_FIRST_MBUFF_SKIP 184
-
-/*
- * CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve
- * in each chained packet element. If necessary, override the default
- * here.
- */
-#define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0
-
-/*
- * CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is
- * enabled for all input ports. This controls if IPD sends
- * backpressure to all ports if Octeon's FPA pools don't have enough
- * packet or work queue entries. Even when this is off, it is still
- * possible to get backpressure from individual hardware ports. When
- * configuring backpressure, also check
- * CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override
- * the default here.
- */
-#define CVMX_HELPER_ENABLE_BACK_PRESSURE 1
-
-/*
- * CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper
- * function. Once it is enabled the hardware starts accepting
- * packets. You might want to skip the IPD enable if configuration
- * changes are need from the default helper setup. If necessary,
- * override the default here.
- */
-#define CVMX_HELPER_ENABLE_IPD 0
-
-/*
- * CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns
- * to incoming packets.
- */
-#define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED
-
-#define CVMX_ENABLE_PARAMETER_CHECKING 0
-
-/*
- * The following select which fields are used by the PIP to generate
- * the tag on INPUT
- * 0: don't include
- * 1: include
- */
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0
-#define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0
-#define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1
-
-/* Select skip mode for input ports */
-#define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2
-
-/*
- * Force backpressure to be disabled. This overrides all other
- * backpressure configuration.
- */
-#define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 0
-
-#endif /* __CVMX_CONFIG_H__ */
-
diff --git a/drivers/staging/octeon/cvmx-dbg-defs.h b/drivers/staging/octeon/cvmx-dbg-defs.h
deleted file mode 100644
index abbf42d..0000000
--- a/drivers/staging/octeon/cvmx-dbg-defs.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_DBG_DEFS_H__
-#define __CVMX_DBG_DEFS_H__
-
-#define CVMX_DBG_DATA \
- CVMX_ADD_IO_SEG(0x00011F00000001E8ull)
-
-union cvmx_dbg_data {
- uint64_t u64;
- struct cvmx_dbg_data_s {
- uint64_t reserved_23_63:41;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } s;
- struct cvmx_dbg_data_cn30xx {
- uint64_t reserved_31_63:33;
- uint64_t pll_mul:3;
- uint64_t reserved_23_27:5;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } cn30xx;
- struct cvmx_dbg_data_cn30xx cn31xx;
- struct cvmx_dbg_data_cn38xx {
- uint64_t reserved_29_63:35;
- uint64_t d_mul:4;
- uint64_t dclk_mul2:1;
- uint64_t cclk_div2:1;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } cn38xx;
- struct cvmx_dbg_data_cn38xx cn38xxp2;
- struct cvmx_dbg_data_cn30xx cn50xx;
- struct cvmx_dbg_data_cn58xx {
- uint64_t reserved_29_63:35;
- uint64_t rem:6;
- uint64_t c_mul:5;
- uint64_t dsel_ext:1;
- uint64_t data:17;
- } cn58xx;
- struct cvmx_dbg_data_cn58xx cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-fau.h b/drivers/staging/octeon/cvmx-fau.h
deleted file mode 100644
index a6939fc..0000000
--- a/drivers/staging/octeon/cvmx-fau.h
+++ /dev/null
@@ -1,597 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Interface to the hardware Fetch and Add Unit.
- */
-
-#ifndef __CVMX_FAU_H__
-#define __CVMX_FAU_H__
-
-/*
- * Octeon Fetch and Add Unit (FAU)
- */
-
-#define CVMX_FAU_LOAD_IO_ADDRESS cvmx_build_io_address(0x1e, 0)
-#define CVMX_FAU_BITS_SCRADDR 63, 56
-#define CVMX_FAU_BITS_LEN 55, 48
-#define CVMX_FAU_BITS_INEVAL 35, 14
-#define CVMX_FAU_BITS_TAGWAIT 13, 13
-#define CVMX_FAU_BITS_NOADD 13, 13
-#define CVMX_FAU_BITS_SIZE 12, 11
-#define CVMX_FAU_BITS_REGISTER 10, 0
-
-typedef enum {
- CVMX_FAU_OP_SIZE_8 = 0,
- CVMX_FAU_OP_SIZE_16 = 1,
- CVMX_FAU_OP_SIZE_32 = 2,
- CVMX_FAU_OP_SIZE_64 = 3
-} cvmx_fau_op_size_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int64_t value:63;
-} cvmx_fau_tagwait64_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int32_t value:31;
-} cvmx_fau_tagwait32_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int16_t value:15;
-} cvmx_fau_tagwait16_t;
-
-/**
- * Tagwait return definition. If a timeout occurs, the error
- * bit will be set. Otherwise the value of the register before
- * the update will be returned.
- */
-typedef struct {
- uint64_t error:1;
- int8_t value:7;
-} cvmx_fau_tagwait8_t;
-
-/**
- * Asynchronous tagwait return definition. If a timeout occurs,
- * the error bit will be set. Otherwise the value of the
- * register before the update will be returned.
- */
-typedef union {
- uint64_t u64;
- struct {
- uint64_t invalid:1;
- uint64_t data:63; /* unpredictable if invalid is set */
- } s;
-} cvmx_fau_async_tagwait_result_t;
-
-/**
- * Builds a store I/O address for writing to the FAU
- *
- * @noadd: 0 = Store value is atomically added to the current value
- * 1 = Store value is atomically written over the current value
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
- * Returns Address to store for atomic update
- */
-static inline uint64_t __cvmx_fau_store_address(uint64_t noadd, uint64_t reg)
-{
- return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
- cvmx_build_bits(CVMX_FAU_BITS_NOADD, noadd) |
- cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
-}
-
-/**
- * Builds a I/O address for accessing the FAU
- *
- * @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
- * Returns Address to read from for atomic update
- */
-static inline uint64_t __cvmx_fau_atomic_address(uint64_t tagwait, uint64_t reg,
- int64_t value)
-{
- return CVMX_ADD_IO_SEG(CVMX_FAU_LOAD_IO_ADDRESS) |
- cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
- cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
- cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
-}
-
-/**
- * Perform an atomic 64 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Value of the register before the update
- */
-static inline int64_t cvmx_fau_fetch_and_add64(cvmx_fau_reg_64_t reg,
- int64_t value)
-{
- return cvmx_read64_int64(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 32 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Value of the register before the update
- */
-static inline int32_t cvmx_fau_fetch_and_add32(cvmx_fau_reg_32_t reg,
- int32_t value)
-{
- return cvmx_read64_int32(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 16 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- * Returns Value of the register before the update
- */
-static inline int16_t cvmx_fau_fetch_and_add16(cvmx_fau_reg_16_t reg,
- int16_t value)
-{
- return cvmx_read64_int16(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 8 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- * Returns Value of the register before the update
- */
-static inline int8_t cvmx_fau_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- return cvmx_read64_int8(__cvmx_fau_atomic_address(0, reg, value));
-}
-
-/**
- * Perform an atomic 64 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait64_t
-cvmx_fau_tagwait_fetch_and_add64(cvmx_fau_reg_64_t reg, int64_t value)
-{
- union {
- uint64_t i64;
- cvmx_fau_tagwait64_t t;
- } result;
- result.i64 =
- cvmx_read64_int64(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Perform an atomic 32 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait32_t
-cvmx_fau_tagwait_fetch_and_add32(cvmx_fau_reg_32_t reg, int32_t value)
-{
- union {
- uint64_t i32;
- cvmx_fau_tagwait32_t t;
- } result;
- result.i32 =
- cvmx_read64_int32(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Perform an atomic 16 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait16_t
-cvmx_fau_tagwait_fetch_and_add16(cvmx_fau_reg_16_t reg, int16_t value)
-{
- union {
- uint64_t i16;
- cvmx_fau_tagwait16_t t;
- } result;
- result.i16 =
- cvmx_read64_int16(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Perform an atomic 8 bit add after the current tag switch
- * completes
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- * Returns If a timeout occurs, the error bit will be set. Otherwise
- * the value of the register before the update will be
- * returned
- */
-static inline cvmx_fau_tagwait8_t
-cvmx_fau_tagwait_fetch_and_add8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- union {
- uint64_t i8;
- cvmx_fau_tagwait8_t t;
- } result;
- result.i8 = cvmx_read64_int8(__cvmx_fau_atomic_address(1, reg, value));
- return result.t;
-}
-
-/**
- * Builds I/O data for async operations
- *
- * @scraddr: Scratch pad byte address to write to. Must be 8 byte aligned
- * @value: Signed value to add.
- * Note: When performing 32 and 64 bit access, only the low
- * 22 bits are available.
- * @tagwait: Should the atomic add wait for the current tag switch
- * operation to complete.
- * - 0 = Don't wait
- * - 1 = Wait for tag switch to complete
- * @size: The size of the operation:
- * - CVMX_FAU_OP_SIZE_8 (0) = 8 bits
- * - CVMX_FAU_OP_SIZE_16 (1) = 16 bits
- * - CVMX_FAU_OP_SIZE_32 (2) = 32 bits
- * - CVMX_FAU_OP_SIZE_64 (3) = 64 bits
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * - Step by 4 for 32 bit access.
- * - Step by 8 for 64 bit access.
- * Returns Data to write using cvmx_send_single
- */
-static inline uint64_t __cvmx_fau_iobdma_data(uint64_t scraddr, int64_t value,
- uint64_t tagwait,
- cvmx_fau_op_size_t size,
- uint64_t reg)
-{
- return CVMX_FAU_LOAD_IO_ADDRESS |
- cvmx_build_bits(CVMX_FAU_BITS_SCRADDR, scraddr >> 3) |
- cvmx_build_bits(CVMX_FAU_BITS_LEN, 1) |
- cvmx_build_bits(CVMX_FAU_BITS_INEVAL, value) |
- cvmx_build_bits(CVMX_FAU_BITS_TAGWAIT, tagwait) |
- cvmx_build_bits(CVMX_FAU_BITS_SIZE, size) |
- cvmx_build_bits(CVMX_FAU_BITS_REGISTER, reg);
-}
-
-/**
- * Perform an async atomic 64 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add64(uint64_t scraddr,
- cvmx_fau_reg_64_t reg,
- int64_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_64, reg));
-}
-
-/**
- * Perform an async atomic 32 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add32(uint64_t scraddr,
- cvmx_fau_reg_32_t reg,
- int32_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_32, reg));
-}
-
-/**
- * Perform an async atomic 16 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add16(uint64_t scraddr,
- cvmx_fau_reg_16_t reg,
- int16_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_16, reg));
-}
-
-/**
- * Perform an async atomic 8 bit add. The old value is
- * placed in the scratch memory at byte address scraddr.
- *
- * @scraddr: Scratch memory byte address to put response in.
- * Must be 8 byte aligned.
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_fetch_and_add8(uint64_t scraddr,
- cvmx_fau_reg_8_t reg,
- int8_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 0, CVMX_FAU_OP_SIZE_8, reg));
-}
-
-/**
- * Perform an async atomic 64 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add64(uint64_t scraddr,
- cvmx_fau_reg_64_t reg,
- int64_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_64, reg));
-}
-
-/**
- * Perform an async atomic 32 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- * Note: Only the low 22 bits are available.
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add32(uint64_t scraddr,
- cvmx_fau_reg_32_t reg,
- int32_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_32, reg));
-}
-
-/**
- * Perform an async atomic 16 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- *
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add16(uint64_t scraddr,
- cvmx_fau_reg_16_t reg,
- int16_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_16, reg));
-}
-
-/**
- * Perform an async atomic 8 bit add after the current tag
- * switch completes.
- *
- * @scraddr: Scratch memory byte address to put response in. Must be
- * 8 byte aligned. If a timeout occurs, the error bit (63)
- * will be set. Otherwise the value of the register before
- * the update will be returned
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- *
- * Returns Placed in the scratch pad register
- */
-static inline void cvmx_fau_async_tagwait_fetch_and_add8(uint64_t scraddr,
- cvmx_fau_reg_8_t reg,
- int8_t value)
-{
- cvmx_send_single(__cvmx_fau_iobdma_data
- (scraddr, value, 1, CVMX_FAU_OP_SIZE_8, reg));
-}
-
-/**
- * Perform an atomic 64 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add64(cvmx_fau_reg_64_t reg, int64_t value)
-{
- cvmx_write64_int64(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 32 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add32(cvmx_fau_reg_32_t reg, int32_t value)
-{
- cvmx_write64_int32(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 16 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add16(cvmx_fau_reg_16_t reg, int16_t value)
-{
- cvmx_write64_int16(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 8 bit add
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to add.
- */
-static inline void cvmx_fau_atomic_add8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- cvmx_write64_int8(__cvmx_fau_store_address(0, reg), value);
-}
-
-/**
- * Perform an atomic 64 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 8 for 64 bit access.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write64(cvmx_fau_reg_64_t reg, int64_t value)
-{
- cvmx_write64_int64(__cvmx_fau_store_address(1, reg), value);
-}
-
-/**
- * Perform an atomic 32 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 4 for 32 bit access.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write32(cvmx_fau_reg_32_t reg, int32_t value)
-{
- cvmx_write64_int32(__cvmx_fau_store_address(1, reg), value);
-}
-
-/**
- * Perform an atomic 16 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * - Step by 2 for 16 bit access.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write16(cvmx_fau_reg_16_t reg, int16_t value)
-{
- cvmx_write64_int16(__cvmx_fau_store_address(1, reg), value);
-}
-
-/**
- * Perform an atomic 8 bit write
- *
- * @reg: FAU atomic register to access. 0 <= reg < 2048.
- * @value: Signed value to write.
- */
-static inline void cvmx_fau_atomic_write8(cvmx_fau_reg_8_t reg, int8_t value)
-{
- cvmx_write64_int8(__cvmx_fau_store_address(1, reg), value);
-}
-
-#endif /* __CVMX_FAU_H__ */
diff --git a/drivers/staging/octeon/cvmx-fpa-defs.h b/drivers/staging/octeon/cvmx-fpa-defs.h
deleted file mode 100644
index bf5546b..0000000
--- a/drivers/staging/octeon/cvmx-fpa-defs.h
+++ /dev/null
@@ -1,403 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_FPA_DEFS_H__
-#define __CVMX_FPA_DEFS_H__
-
-#define CVMX_FPA_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011800280000E8ull)
-#define CVMX_FPA_CTL_STATUS \
- CVMX_ADD_IO_SEG(0x0001180028000050ull)
-#define CVMX_FPA_FPF0_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000000ull)
-#define CVMX_FPA_FPF0_SIZE \
- CVMX_ADD_IO_SEG(0x0001180028000058ull)
-#define CVMX_FPA_FPF1_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000008ull)
-#define CVMX_FPA_FPF2_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000010ull)
-#define CVMX_FPA_FPF3_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000018ull)
-#define CVMX_FPA_FPF4_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000020ull)
-#define CVMX_FPA_FPF5_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000028ull)
-#define CVMX_FPA_FPF6_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000030ull)
-#define CVMX_FPA_FPF7_MARKS \
- CVMX_ADD_IO_SEG(0x0001180028000038ull)
-#define CVMX_FPA_FPFX_MARKS(offset) \
- CVMX_ADD_IO_SEG(0x0001180028000008ull + (((offset) & 7) * 8) - 8 * 1)
-#define CVMX_FPA_FPFX_SIZE(offset) \
- CVMX_ADD_IO_SEG(0x0001180028000060ull + (((offset) & 7) * 8) - 8 * 1)
-#define CVMX_FPA_INT_ENB \
- CVMX_ADD_IO_SEG(0x0001180028000048ull)
-#define CVMX_FPA_INT_SUM \
- CVMX_ADD_IO_SEG(0x0001180028000040ull)
-#define CVMX_FPA_QUE0_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x00011800280000F0ull)
-#define CVMX_FPA_QUE1_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x00011800280000F8ull)
-#define CVMX_FPA_QUE2_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000100ull)
-#define CVMX_FPA_QUE3_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000108ull)
-#define CVMX_FPA_QUE4_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000110ull)
-#define CVMX_FPA_QUE5_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000118ull)
-#define CVMX_FPA_QUE6_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000120ull)
-#define CVMX_FPA_QUE7_PAGE_INDEX \
- CVMX_ADD_IO_SEG(0x0001180028000128ull)
-#define CVMX_FPA_QUEX_AVAILABLE(offset) \
- CVMX_ADD_IO_SEG(0x0001180028000098ull + (((offset) & 7) * 8))
-#define CVMX_FPA_QUEX_PAGE_INDEX(offset) \
- CVMX_ADD_IO_SEG(0x00011800280000F0ull + (((offset) & 7) * 8))
-#define CVMX_FPA_QUE_ACT \
- CVMX_ADD_IO_SEG(0x0001180028000138ull)
-#define CVMX_FPA_QUE_EXP \
- CVMX_ADD_IO_SEG(0x0001180028000130ull)
-#define CVMX_FPA_WART_CTL \
- CVMX_ADD_IO_SEG(0x00011800280000D8ull)
-#define CVMX_FPA_WART_STATUS \
- CVMX_ADD_IO_SEG(0x00011800280000E0ull)
-
-union cvmx_fpa_bist_status {
- uint64_t u64;
- struct cvmx_fpa_bist_status_s {
- uint64_t reserved_5_63:59;
- uint64_t frd:1;
- uint64_t fpf0:1;
- uint64_t fpf1:1;
- uint64_t ffr:1;
- uint64_t fdr:1;
- } s;
- struct cvmx_fpa_bist_status_s cn30xx;
- struct cvmx_fpa_bist_status_s cn31xx;
- struct cvmx_fpa_bist_status_s cn38xx;
- struct cvmx_fpa_bist_status_s cn38xxp2;
- struct cvmx_fpa_bist_status_s cn50xx;
- struct cvmx_fpa_bist_status_s cn52xx;
- struct cvmx_fpa_bist_status_s cn52xxp1;
- struct cvmx_fpa_bist_status_s cn56xx;
- struct cvmx_fpa_bist_status_s cn56xxp1;
- struct cvmx_fpa_bist_status_s cn58xx;
- struct cvmx_fpa_bist_status_s cn58xxp1;
-};
-
-union cvmx_fpa_ctl_status {
- uint64_t u64;
- struct cvmx_fpa_ctl_status_s {
- uint64_t reserved_18_63:46;
- uint64_t reset:1;
- uint64_t use_ldt:1;
- uint64_t use_stt:1;
- uint64_t enb:1;
- uint64_t mem1_err:7;
- uint64_t mem0_err:7;
- } s;
- struct cvmx_fpa_ctl_status_s cn30xx;
- struct cvmx_fpa_ctl_status_s cn31xx;
- struct cvmx_fpa_ctl_status_s cn38xx;
- struct cvmx_fpa_ctl_status_s cn38xxp2;
- struct cvmx_fpa_ctl_status_s cn50xx;
- struct cvmx_fpa_ctl_status_s cn52xx;
- struct cvmx_fpa_ctl_status_s cn52xxp1;
- struct cvmx_fpa_ctl_status_s cn56xx;
- struct cvmx_fpa_ctl_status_s cn56xxp1;
- struct cvmx_fpa_ctl_status_s cn58xx;
- struct cvmx_fpa_ctl_status_s cn58xxp1;
-};
-
-union cvmx_fpa_fpfx_marks {
- uint64_t u64;
- struct cvmx_fpa_fpfx_marks_s {
- uint64_t reserved_22_63:42;
- uint64_t fpf_wr:11;
- uint64_t fpf_rd:11;
- } s;
- struct cvmx_fpa_fpfx_marks_s cn38xx;
- struct cvmx_fpa_fpfx_marks_s cn38xxp2;
- struct cvmx_fpa_fpfx_marks_s cn56xx;
- struct cvmx_fpa_fpfx_marks_s cn56xxp1;
- struct cvmx_fpa_fpfx_marks_s cn58xx;
- struct cvmx_fpa_fpfx_marks_s cn58xxp1;
-};
-
-union cvmx_fpa_fpfx_size {
- uint64_t u64;
- struct cvmx_fpa_fpfx_size_s {
- uint64_t reserved_11_63:53;
- uint64_t fpf_siz:11;
- } s;
- struct cvmx_fpa_fpfx_size_s cn38xx;
- struct cvmx_fpa_fpfx_size_s cn38xxp2;
- struct cvmx_fpa_fpfx_size_s cn56xx;
- struct cvmx_fpa_fpfx_size_s cn56xxp1;
- struct cvmx_fpa_fpfx_size_s cn58xx;
- struct cvmx_fpa_fpfx_size_s cn58xxp1;
-};
-
-union cvmx_fpa_fpf0_marks {
- uint64_t u64;
- struct cvmx_fpa_fpf0_marks_s {
- uint64_t reserved_24_63:40;
- uint64_t fpf_wr:12;
- uint64_t fpf_rd:12;
- } s;
- struct cvmx_fpa_fpf0_marks_s cn38xx;
- struct cvmx_fpa_fpf0_marks_s cn38xxp2;
- struct cvmx_fpa_fpf0_marks_s cn56xx;
- struct cvmx_fpa_fpf0_marks_s cn56xxp1;
- struct cvmx_fpa_fpf0_marks_s cn58xx;
- struct cvmx_fpa_fpf0_marks_s cn58xxp1;
-};
-
-union cvmx_fpa_fpf0_size {
- uint64_t u64;
- struct cvmx_fpa_fpf0_size_s {
- uint64_t reserved_12_63:52;
- uint64_t fpf_siz:12;
- } s;
- struct cvmx_fpa_fpf0_size_s cn38xx;
- struct cvmx_fpa_fpf0_size_s cn38xxp2;
- struct cvmx_fpa_fpf0_size_s cn56xx;
- struct cvmx_fpa_fpf0_size_s cn56xxp1;
- struct cvmx_fpa_fpf0_size_s cn58xx;
- struct cvmx_fpa_fpf0_size_s cn58xxp1;
-};
-
-union cvmx_fpa_int_enb {
- uint64_t u64;
- struct cvmx_fpa_int_enb_s {
- uint64_t reserved_28_63:36;
- uint64_t q7_perr:1;
- uint64_t q7_coff:1;
- uint64_t q7_und:1;
- uint64_t q6_perr:1;
- uint64_t q6_coff:1;
- uint64_t q6_und:1;
- uint64_t q5_perr:1;
- uint64_t q5_coff:1;
- uint64_t q5_und:1;
- uint64_t q4_perr:1;
- uint64_t q4_coff:1;
- uint64_t q4_und:1;
- uint64_t q3_perr:1;
- uint64_t q3_coff:1;
- uint64_t q3_und:1;
- uint64_t q2_perr:1;
- uint64_t q2_coff:1;
- uint64_t q2_und:1;
- uint64_t q1_perr:1;
- uint64_t q1_coff:1;
- uint64_t q1_und:1;
- uint64_t q0_perr:1;
- uint64_t q0_coff:1;
- uint64_t q0_und:1;
- uint64_t fed1_dbe:1;
- uint64_t fed1_sbe:1;
- uint64_t fed0_dbe:1;
- uint64_t fed0_sbe:1;
- } s;
- struct cvmx_fpa_int_enb_s cn30xx;
- struct cvmx_fpa_int_enb_s cn31xx;
- struct cvmx_fpa_int_enb_s cn38xx;
- struct cvmx_fpa_int_enb_s cn38xxp2;
- struct cvmx_fpa_int_enb_s cn50xx;
- struct cvmx_fpa_int_enb_s cn52xx;
- struct cvmx_fpa_int_enb_s cn52xxp1;
- struct cvmx_fpa_int_enb_s cn56xx;
- struct cvmx_fpa_int_enb_s cn56xxp1;
- struct cvmx_fpa_int_enb_s cn58xx;
- struct cvmx_fpa_int_enb_s cn58xxp1;
-};
-
-union cvmx_fpa_int_sum {
- uint64_t u64;
- struct cvmx_fpa_int_sum_s {
- uint64_t reserved_28_63:36;
- uint64_t q7_perr:1;
- uint64_t q7_coff:1;
- uint64_t q7_und:1;
- uint64_t q6_perr:1;
- uint64_t q6_coff:1;
- uint64_t q6_und:1;
- uint64_t q5_perr:1;
- uint64_t q5_coff:1;
- uint64_t q5_und:1;
- uint64_t q4_perr:1;
- uint64_t q4_coff:1;
- uint64_t q4_und:1;
- uint64_t q3_perr:1;
- uint64_t q3_coff:1;
- uint64_t q3_und:1;
- uint64_t q2_perr:1;
- uint64_t q2_coff:1;
- uint64_t q2_und:1;
- uint64_t q1_perr:1;
- uint64_t q1_coff:1;
- uint64_t q1_und:1;
- uint64_t q0_perr:1;
- uint64_t q0_coff:1;
- uint64_t q0_und:1;
- uint64_t fed1_dbe:1;
- uint64_t fed1_sbe:1;
- uint64_t fed0_dbe:1;
- uint64_t fed0_sbe:1;
- } s;
- struct cvmx_fpa_int_sum_s cn30xx;
- struct cvmx_fpa_int_sum_s cn31xx;
- struct cvmx_fpa_int_sum_s cn38xx;
- struct cvmx_fpa_int_sum_s cn38xxp2;
- struct cvmx_fpa_int_sum_s cn50xx;
- struct cvmx_fpa_int_sum_s cn52xx;
- struct cvmx_fpa_int_sum_s cn52xxp1;
- struct cvmx_fpa_int_sum_s cn56xx;
- struct cvmx_fpa_int_sum_s cn56xxp1;
- struct cvmx_fpa_int_sum_s cn58xx;
- struct cvmx_fpa_int_sum_s cn58xxp1;
-};
-
-union cvmx_fpa_quex_available {
- uint64_t u64;
- struct cvmx_fpa_quex_available_s {
- uint64_t reserved_29_63:35;
- uint64_t que_siz:29;
- } s;
- struct cvmx_fpa_quex_available_s cn30xx;
- struct cvmx_fpa_quex_available_s cn31xx;
- struct cvmx_fpa_quex_available_s cn38xx;
- struct cvmx_fpa_quex_available_s cn38xxp2;
- struct cvmx_fpa_quex_available_s cn50xx;
- struct cvmx_fpa_quex_available_s cn52xx;
- struct cvmx_fpa_quex_available_s cn52xxp1;
- struct cvmx_fpa_quex_available_s cn56xx;
- struct cvmx_fpa_quex_available_s cn56xxp1;
- struct cvmx_fpa_quex_available_s cn58xx;
- struct cvmx_fpa_quex_available_s cn58xxp1;
-};
-
-union cvmx_fpa_quex_page_index {
- uint64_t u64;
- struct cvmx_fpa_quex_page_index_s {
- uint64_t reserved_25_63:39;
- uint64_t pg_num:25;
- } s;
- struct cvmx_fpa_quex_page_index_s cn30xx;
- struct cvmx_fpa_quex_page_index_s cn31xx;
- struct cvmx_fpa_quex_page_index_s cn38xx;
- struct cvmx_fpa_quex_page_index_s cn38xxp2;
- struct cvmx_fpa_quex_page_index_s cn50xx;
- struct cvmx_fpa_quex_page_index_s cn52xx;
- struct cvmx_fpa_quex_page_index_s cn52xxp1;
- struct cvmx_fpa_quex_page_index_s cn56xx;
- struct cvmx_fpa_quex_page_index_s cn56xxp1;
- struct cvmx_fpa_quex_page_index_s cn58xx;
- struct cvmx_fpa_quex_page_index_s cn58xxp1;
-};
-
-union cvmx_fpa_que_act {
- uint64_t u64;
- struct cvmx_fpa_que_act_s {
- uint64_t reserved_29_63:35;
- uint64_t act_que:3;
- uint64_t act_indx:26;
- } s;
- struct cvmx_fpa_que_act_s cn30xx;
- struct cvmx_fpa_que_act_s cn31xx;
- struct cvmx_fpa_que_act_s cn38xx;
- struct cvmx_fpa_que_act_s cn38xxp2;
- struct cvmx_fpa_que_act_s cn50xx;
- struct cvmx_fpa_que_act_s cn52xx;
- struct cvmx_fpa_que_act_s cn52xxp1;
- struct cvmx_fpa_que_act_s cn56xx;
- struct cvmx_fpa_que_act_s cn56xxp1;
- struct cvmx_fpa_que_act_s cn58xx;
- struct cvmx_fpa_que_act_s cn58xxp1;
-};
-
-union cvmx_fpa_que_exp {
- uint64_t u64;
- struct cvmx_fpa_que_exp_s {
- uint64_t reserved_29_63:35;
- uint64_t exp_que:3;
- uint64_t exp_indx:26;
- } s;
- struct cvmx_fpa_que_exp_s cn30xx;
- struct cvmx_fpa_que_exp_s cn31xx;
- struct cvmx_fpa_que_exp_s cn38xx;
- struct cvmx_fpa_que_exp_s cn38xxp2;
- struct cvmx_fpa_que_exp_s cn50xx;
- struct cvmx_fpa_que_exp_s cn52xx;
- struct cvmx_fpa_que_exp_s cn52xxp1;
- struct cvmx_fpa_que_exp_s cn56xx;
- struct cvmx_fpa_que_exp_s cn56xxp1;
- struct cvmx_fpa_que_exp_s cn58xx;
- struct cvmx_fpa_que_exp_s cn58xxp1;
-};
-
-union cvmx_fpa_wart_ctl {
- uint64_t u64;
- struct cvmx_fpa_wart_ctl_s {
- uint64_t reserved_16_63:48;
- uint64_t ctl:16;
- } s;
- struct cvmx_fpa_wart_ctl_s cn30xx;
- struct cvmx_fpa_wart_ctl_s cn31xx;
- struct cvmx_fpa_wart_ctl_s cn38xx;
- struct cvmx_fpa_wart_ctl_s cn38xxp2;
- struct cvmx_fpa_wart_ctl_s cn50xx;
- struct cvmx_fpa_wart_ctl_s cn52xx;
- struct cvmx_fpa_wart_ctl_s cn52xxp1;
- struct cvmx_fpa_wart_ctl_s cn56xx;
- struct cvmx_fpa_wart_ctl_s cn56xxp1;
- struct cvmx_fpa_wart_ctl_s cn58xx;
- struct cvmx_fpa_wart_ctl_s cn58xxp1;
-};
-
-union cvmx_fpa_wart_status {
- uint64_t u64;
- struct cvmx_fpa_wart_status_s {
- uint64_t reserved_32_63:32;
- uint64_t status:32;
- } s;
- struct cvmx_fpa_wart_status_s cn30xx;
- struct cvmx_fpa_wart_status_s cn31xx;
- struct cvmx_fpa_wart_status_s cn38xx;
- struct cvmx_fpa_wart_status_s cn38xxp2;
- struct cvmx_fpa_wart_status_s cn50xx;
- struct cvmx_fpa_wart_status_s cn52xx;
- struct cvmx_fpa_wart_status_s cn52xxp1;
- struct cvmx_fpa_wart_status_s cn56xx;
- struct cvmx_fpa_wart_status_s cn56xxp1;
- struct cvmx_fpa_wart_status_s cn58xx;
- struct cvmx_fpa_wart_status_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-fpa.c b/drivers/staging/octeon/cvmx-fpa.c
deleted file mode 100644
index ad44b8b..0000000
--- a/drivers/staging/octeon/cvmx-fpa.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Support library for the hardware Free Pool Allocator.
- *
- *
- */
-
-#include "cvmx-config.h"
-#include "cvmx.h"
-#include "cvmx-fpa.h"
-#include "cvmx-ipd.h"
-
-/**
- * Current state of all the pools. Use access functions
- * instead of using it directly.
- */
-CVMX_SHARED cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
-
-/**
- * Setup a FPA pool to control a new block of memory. The
- * buffer pointer must be a physical address.
- *
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
- * @block_size: Size for each block controlled by the FPA
- * @num_blocks: Number of blocks
- *
- * Returns 0 on Success,
- * -1 on failure
- */
-int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
- uint64_t block_size, uint64_t num_blocks)
-{
- char *ptr;
- if (!buffer) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: NULL buffer pointer!\n");
- return -1;
- }
- if (pool >= CVMX_FPA_NUM_POOLS) {
- cvmx_dprintf("ERROR: cvmx_fpa_setup_pool: Illegal pool!\n");
- return -1;
- }
-
- if (block_size < CVMX_FPA_MIN_BLOCK_SIZE) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: Block size too small.\n");
- return -1;
- }
-
- if (((unsigned long)buffer & (CVMX_FPA_ALIGNMENT - 1)) != 0) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_setup_pool: Buffer not aligned properly.\n");
- return -1;
- }
-
- cvmx_fpa_pool_info[pool].name = name;
- cvmx_fpa_pool_info[pool].size = block_size;
- cvmx_fpa_pool_info[pool].starting_element_count = num_blocks;
- cvmx_fpa_pool_info[pool].base = buffer;
-
- ptr = (char *)buffer;
- while (num_blocks--) {
- cvmx_fpa_free(ptr, pool, 0);
- ptr += block_size;
- }
- return 0;
-}
-
-/**
- * Shutdown a Memory pool and validate that it had all of
- * the buffers originally placed in it.
- *
- * @pool: Pool to shutdown
- * Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
- */
-uint64_t cvmx_fpa_shutdown_pool(uint64_t pool)
-{
- uint64_t errors = 0;
- uint64_t count = 0;
- uint64_t base = cvmx_ptr_to_phys(cvmx_fpa_pool_info[pool].base);
- uint64_t finish =
- base +
- cvmx_fpa_pool_info[pool].size *
- cvmx_fpa_pool_info[pool].starting_element_count;
- void *ptr;
- uint64_t address;
-
- count = 0;
- do {
- ptr = cvmx_fpa_alloc(pool);
- if (ptr)
- address = cvmx_ptr_to_phys(ptr);
- else
- address = 0;
- if (address) {
- if ((address >= base) && (address < finish) &&
- (((address -
- base) % cvmx_fpa_pool_info[pool].size) == 0)) {
- count++;
- } else {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_shutdown_pool: Illegal address 0x%llx in pool %s(%d)\n",
- (unsigned long long)address,
- cvmx_fpa_pool_info[pool].name, (int)pool);
- errors++;
- }
- }
- } while (address);
-
-#ifdef CVMX_ENABLE_PKO_FUNCTIONS
- if (pool == 0)
- cvmx_ipd_free_ptr();
-#endif
-
- if (errors) {
- cvmx_dprintf
- ("ERROR: cvmx_fpa_shutdown_pool: Pool %s(%d) started at 0x%llx, ended at 0x%llx, with a step of 0x%llx\n",
- cvmx_fpa_pool_info[pool].name, (int)pool,
- (unsigned long long)base, (unsigned long long)finish,
- (unsigned long long)cvmx_fpa_pool_info[pool].size);
- return -errors;
- } else
- return 0;
-}
-
-uint64_t cvmx_fpa_get_block_size(uint64_t pool)
-{
- switch (pool) {
- case 0:
- return CVMX_FPA_POOL_0_SIZE;
- case 1:
- return CVMX_FPA_POOL_1_SIZE;
- case 2:
- return CVMX_FPA_POOL_2_SIZE;
- case 3:
- return CVMX_FPA_POOL_3_SIZE;
- case 4:
- return CVMX_FPA_POOL_4_SIZE;
- case 5:
- return CVMX_FPA_POOL_5_SIZE;
- case 6:
- return CVMX_FPA_POOL_6_SIZE;
- case 7:
- return CVMX_FPA_POOL_7_SIZE;
- default:
- return 0;
- }
-}
diff --git a/drivers/staging/octeon/cvmx-fpa.h b/drivers/staging/octeon/cvmx-fpa.h
deleted file mode 100644
index 1f04f96..0000000
--- a/drivers/staging/octeon/cvmx-fpa.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the hardware Free Pool Allocator.
- *
- *
- */
-
-#ifndef __CVMX_FPA_H__
-#define __CVMX_FPA_H__
-
-#include "cvmx-address.h"
-#include "cvmx-fpa-defs.h"
-
-#define CVMX_FPA_NUM_POOLS 8
-#define CVMX_FPA_MIN_BLOCK_SIZE 128
-#define CVMX_FPA_ALIGNMENT 128
-
-/**
- * Structure describing the data format used for stores to the FPA.
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * the (64-bit word) location in scratchpad to write
- * to (if len != 0)
- */
- uint64_t scraddr:8;
- /* the number of words in the response (0 => no response) */
- uint64_t len:8;
- /* the ID of the device on the non-coherent bus */
- uint64_t did:8;
- /*
- * the address that will appear in the first tick on
- * the NCB bus.
- */
- uint64_t addr:40;
- } s;
-} cvmx_fpa_iobdma_data_t;
-
-/**
- * Structure describing the current state of a FPA pool.
- */
-typedef struct {
- /* Name it was created under */
- const char *name;
- /* Size of each block */
- uint64_t size;
- /* The base memory address of whole block */
- void *base;
- /* The number of elements in the pool at creation */
- uint64_t starting_element_count;
-} cvmx_fpa_pool_info_t;
-
-/**
- * Current state of all the pools. Use access functions
- * instead of using it directly.
- */
-extern cvmx_fpa_pool_info_t cvmx_fpa_pool_info[CVMX_FPA_NUM_POOLS];
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Return the name of the pool
- *
- * @pool: Pool to get the name of
- * Returns The name
- */
-static inline const char *cvmx_fpa_get_name(uint64_t pool)
-{
- return cvmx_fpa_pool_info[pool].name;
-}
-
-/**
- * Return the base of the pool
- *
- * @pool: Pool to get the base of
- * Returns The base
- */
-static inline void *cvmx_fpa_get_base(uint64_t pool)
-{
- return cvmx_fpa_pool_info[pool].base;
-}
-
-/**
- * Check if a pointer belongs to an FPA pool. Return non-zero
- * if the supplied pointer is inside the memory controlled by
- * an FPA pool.
- *
- * @pool: Pool to check
- * @ptr: Pointer to check
- * Returns Non-zero if pointer is in the pool. Zero if not
- */
-static inline int cvmx_fpa_is_member(uint64_t pool, void *ptr)
-{
- return ((ptr >= cvmx_fpa_pool_info[pool].base) &&
- ((char *)ptr <
- ((char *)(cvmx_fpa_pool_info[pool].base)) +
- cvmx_fpa_pool_info[pool].size *
- cvmx_fpa_pool_info[pool].starting_element_count));
-}
-
-/**
- * Enable the FPA for use. Must be performed after any CSR
- * configuration but before any other FPA functions.
- */
-static inline void cvmx_fpa_enable(void)
-{
- union cvmx_fpa_ctl_status status;
-
- status.u64 = cvmx_read_csr(CVMX_FPA_CTL_STATUS);
- if (status.s.enb) {
- cvmx_dprintf
- ("Warning: Enabling FPA when FPA already enabled.\n");
- }
-
- /*
- * Do runtime check as we allow pass1 compiled code to run on
- * pass2 chips.
- */
- if (cvmx_octeon_is_pass1()) {
- union cvmx_fpa_fpfx_marks marks;
- int i;
- for (i = 1; i < 8; i++) {
- marks.u64 =
- cvmx_read_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull);
- marks.s.fpf_wr = 0xe0;
- cvmx_write_csr(CVMX_FPA_FPF1_MARKS + (i - 1) * 8ull,
- marks.u64);
- }
-
- /* Enforce a 10 cycle delay between config and enable */
- cvmx_wait(10);
- }
-
- /* FIXME: CVMX_FPA_CTL_STATUS read is unmodelled */
- status.u64 = 0;
- status.s.enb = 1;
- cvmx_write_csr(CVMX_FPA_CTL_STATUS, status.u64);
-}
-
-/**
- * Get a new block from the FPA
- *
- * @pool: Pool to get the block from
- * Returns Pointer to the block or NULL on failure
- */
-static inline void *cvmx_fpa_alloc(uint64_t pool)
-{
- uint64_t address =
- cvmx_read_csr(CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool)));
- if (address)
- return cvmx_phys_to_ptr(address);
- else
- return NULL;
-}
-
-/**
- * Asynchronously get a new block from the FPA
- *
- * @scr_addr: Local scratch address to put response in. This is a byte address,
- * but must be 8 byte aligned.
- * @pool: Pool to get the block from
- */
-static inline void cvmx_fpa_async_alloc(uint64_t scr_addr, uint64_t pool)
-{
- cvmx_fpa_iobdma_data_t data;
-
- /*
- * Hardware only uses 64 bit aligned locations, so convert
- * from byte address to 64-bit index
- */
- data.s.scraddr = scr_addr >> 3;
- data.s.len = 1;
- data.s.did = CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool);
- data.s.addr = 0;
- cvmx_send_single(data.u64);
-}
-
-/**
- * Free a block allocated with a FPA pool. Does NOT provide memory
- * ordering in cases where the memory block was modified by the core.
- *
- * @ptr: Block to free
- * @pool: Pool to put it in
- * @num_cache_lines:
- * Cache lines to invalidate
- */
-static inline void cvmx_fpa_free_nosync(void *ptr, uint64_t pool,
- uint64_t num_cache_lines)
-{
- cvmx_addr_t newptr;
- newptr.u64 = cvmx_ptr_to_phys(ptr);
- newptr.sfilldidspace.didspace =
- CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
- /* Prevent GCC from reordering around free */
- barrier();
- /* value written is number of cache lines not written back */
- cvmx_write_io(newptr.u64, num_cache_lines);
-}
-
-/**
- * Free a block allocated with a FPA pool. Provides required memory
- * ordering in cases where memory block was modified by core.
- *
- * @ptr: Block to free
- * @pool: Pool to put it in
- * @num_cache_lines:
- * Cache lines to invalidate
- */
-static inline void cvmx_fpa_free(void *ptr, uint64_t pool,
- uint64_t num_cache_lines)
-{
- cvmx_addr_t newptr;
- newptr.u64 = cvmx_ptr_to_phys(ptr);
- newptr.sfilldidspace.didspace =
- CVMX_ADDR_DIDSPACE(CVMX_FULL_DID(CVMX_OCT_DID_FPA, pool));
- /*
- * Make sure that any previous writes to memory go out before
- * we free this buffer. This also serves as a barrier to
- * prevent GCC from reordering operations to after the
- * free.
- */
- CVMX_SYNCWS;
- /* value written is number of cache lines not written back */
- cvmx_write_io(newptr.u64, num_cache_lines);
-}
-
-/**
- * Setup a FPA pool to control a new block of memory.
- * This can only be called once per pool. Make sure proper
- * locking enforces this.
- *
- * @pool: Pool to initialize
- * 0 <= pool < 8
- * @name: Constant character string to name this pool.
- * String is not copied.
- * @buffer: Pointer to the block of memory to use. This must be
- * accessible by all processors and external hardware.
- * @block_size: Size for each block controlled by the FPA
- * @num_blocks: Number of blocks
- *
- * Returns 0 on Success,
- * -1 on failure
- */
-extern int cvmx_fpa_setup_pool(uint64_t pool, const char *name, void *buffer,
- uint64_t block_size, uint64_t num_blocks);
-
-/**
- * Shutdown a Memory pool and validate that it had all of
- * the buffers originally placed in it. This should only be
- * called by one processor after all hardware has finished
- * using the pool.
- *
- * @pool: Pool to shutdown
- * Returns Zero on success
- * - Positive is count of missing buffers
- * - Negative is too many buffers or corrupted pointers
- */
-extern uint64_t cvmx_fpa_shutdown_pool(uint64_t pool);
-
-/**
- * Get the size of blocks controlled by the pool
- * This is resolved to a constant at compile time.
- *
- * @pool: Pool to access
- * Returns Size of the block in bytes
- */
-uint64_t cvmx_fpa_get_block_size(uint64_t pool);
-
-#endif /* __CVM_FPA_H__ */
diff --git a/drivers/staging/octeon/cvmx-gmxx-defs.h b/drivers/staging/octeon/cvmx-gmxx-defs.h
deleted file mode 100644
index 946a43a..0000000
--- a/drivers/staging/octeon/cvmx-gmxx-defs.h
+++ /dev/null
@@ -1,2529 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_GMXX_DEFS_H__
-#define __CVMX_GMXX_DEFS_H__
-
-#define CVMX_GMXX_BAD_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000518ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_BIST(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000400ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_CLK_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080007F0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_HG2_CONTROL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000550ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_INF_MODE(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080007F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_NXA_ADR(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000510ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_PRTX_CBFC_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000580ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_PRTX_CFG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000010ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM0(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000180ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM1(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000188ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM2(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000190ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM3(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000198ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM4(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080001A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM5(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080001A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CAM_EN(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000108ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_ADR_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000100ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_DECISION(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000040ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_CHK(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000020ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000018ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_MAX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000030ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_FRM_MIN(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000028ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_IFG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000058ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_INT_EN(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000008ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_INT_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000000ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_JABBER(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000038ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_PAUSE_DROP_TIME(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000068ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_RX_INBND(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000060ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000050ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000088ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000098ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS_DMAC(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_OCTS_DRP(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000080ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_BAD(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000090ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_DMAC(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_STATS_PKTS_DRP(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080000B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RXX_UDD_SKP(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000048ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_BP_DROPX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000420ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_BP_OFFX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000460ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_BP_ONX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000440ull + (((offset) & 3) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_HG2_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000548ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PASS_EN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080005F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PASS_MAPX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000600ull + (((offset) & 15) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PRTS(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000410ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_PRT_INFO(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004E8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_TX_STATUS(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080007E8ull + (((block_id) & 0) * 0x8000000ull))
-#define CVMX_GMXX_RX_XAUI_BAD_COL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000538ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_RX_XAUI_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000530ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_SMACX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000230ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_STAT_BP(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000520ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_APPEND(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000218ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_BURST(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000228ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CBFC_XOFF(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080005A0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CBFC_XON(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080005C0ull + (((offset) & 0) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CLK(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000208ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000270ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_MIN_PKT(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000240ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000248ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_PKT_TIME(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000238ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_TOGO(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000258ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_PAUSE_ZERO(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000260ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_SGMII_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000300ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_SLOT(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000220ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_SOFT_PAUSE(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000250ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT0(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000280ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT1(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000288ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT2(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000290ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT3(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000298ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT4(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002A0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT5(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002A8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT6(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002B0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT7(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002B8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT8(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002C0ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STAT9(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800080002C8ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_STATS_CTL(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000268ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TXX_THRESH(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000210ull + (((offset) & 3) * 2048) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_BP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004D0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_CLK_MSKX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000780ull + (((offset) & 1) * 8) + (((block_id) & 0) * 0x0ull))
-#define CVMX_GMXX_TX_COL_ATTEMPT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000498ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_CORRUPT(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004D8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_HG2_REG1(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000558ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_HG2_REG2(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000560ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_IFG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000488ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_INT_EN(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000508ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000500ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_JAM(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000490ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_LFSR(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_OVR_BP(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004C8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_PAUSE_PKT_DMAC(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004A0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_PAUSE_PKT_TYPE(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004A8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_PRTS(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000480ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004C0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_DRAIN(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004E0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_MAX(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004B0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_ROUNDX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000680ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_SPI_THRESH(block_id) \
- CVMX_ADD_IO_SEG(0x00011800080004B8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_TX_XAUI_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000528ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_GMXX_XAUI_EXT_LOOPBACK(block_id) \
- CVMX_ADD_IO_SEG(0x0001180008000540ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_gmxx_bad_reg {
- uint64_t u64;
- struct cvmx_gmxx_bad_reg_s {
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t loststat:4;
- uint64_t reserved_18_21:4;
- uint64_t out_ovr:16;
- uint64_t ncb_ovr:1;
- uint64_t out_col:1;
- } s;
- struct cvmx_gmxx_bad_reg_cn30xx {
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t reserved_25_25:1;
- uint64_t loststat:3;
- uint64_t reserved_5_21:17;
- uint64_t out_ovr:3;
- uint64_t reserved_0_1:2;
- } cn30xx;
- struct cvmx_gmxx_bad_reg_cn30xx cn31xx;
- struct cvmx_gmxx_bad_reg_s cn38xx;
- struct cvmx_gmxx_bad_reg_s cn38xxp2;
- struct cvmx_gmxx_bad_reg_cn30xx cn50xx;
- struct cvmx_gmxx_bad_reg_cn52xx {
- uint64_t reserved_31_63:33;
- uint64_t inb_nxa:4;
- uint64_t statovr:1;
- uint64_t loststat:4;
- uint64_t reserved_6_21:16;
- uint64_t out_ovr:4;
- uint64_t reserved_0_1:2;
- } cn52xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_bad_reg_cn52xx cn56xx;
- struct cvmx_gmxx_bad_reg_cn52xx cn56xxp1;
- struct cvmx_gmxx_bad_reg_s cn58xx;
- struct cvmx_gmxx_bad_reg_s cn58xxp1;
-};
-
-union cvmx_gmxx_bist {
- uint64_t u64;
- struct cvmx_gmxx_bist_s {
- uint64_t reserved_17_63:47;
- uint64_t status:17;
- } s;
- struct cvmx_gmxx_bist_cn30xx {
- uint64_t reserved_10_63:54;
- uint64_t status:10;
- } cn30xx;
- struct cvmx_gmxx_bist_cn30xx cn31xx;
- struct cvmx_gmxx_bist_cn30xx cn38xx;
- struct cvmx_gmxx_bist_cn30xx cn38xxp2;
- struct cvmx_gmxx_bist_cn50xx {
- uint64_t reserved_12_63:52;
- uint64_t status:12;
- } cn50xx;
- struct cvmx_gmxx_bist_cn52xx {
- uint64_t reserved_16_63:48;
- uint64_t status:16;
- } cn52xx;
- struct cvmx_gmxx_bist_cn52xx cn52xxp1;
- struct cvmx_gmxx_bist_cn52xx cn56xx;
- struct cvmx_gmxx_bist_cn52xx cn56xxp1;
- struct cvmx_gmxx_bist_s cn58xx;
- struct cvmx_gmxx_bist_s cn58xxp1;
-};
-
-union cvmx_gmxx_clk_en {
- uint64_t u64;
- struct cvmx_gmxx_clk_en_s {
- uint64_t reserved_1_63:63;
- uint64_t clk_en:1;
- } s;
- struct cvmx_gmxx_clk_en_s cn52xx;
- struct cvmx_gmxx_clk_en_s cn52xxp1;
- struct cvmx_gmxx_clk_en_s cn56xx;
- struct cvmx_gmxx_clk_en_s cn56xxp1;
-};
-
-union cvmx_gmxx_hg2_control {
- uint64_t u64;
- struct cvmx_gmxx_hg2_control_s {
- uint64_t reserved_19_63:45;
- uint64_t hg2tx_en:1;
- uint64_t hg2rx_en:1;
- uint64_t phys_en:1;
- uint64_t logl_en:16;
- } s;
- struct cvmx_gmxx_hg2_control_s cn52xx;
- struct cvmx_gmxx_hg2_control_s cn52xxp1;
- struct cvmx_gmxx_hg2_control_s cn56xx;
-};
-
-union cvmx_gmxx_inf_mode {
- uint64_t u64;
- struct cvmx_gmxx_inf_mode_s {
- uint64_t reserved_10_63:54;
- uint64_t speed:2;
- uint64_t reserved_6_7:2;
- uint64_t mode:2;
- uint64_t reserved_3_3:1;
- uint64_t p0mii:1;
- uint64_t en:1;
- uint64_t type:1;
- } s;
- struct cvmx_gmxx_inf_mode_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t p0mii:1;
- uint64_t en:1;
- uint64_t type:1;
- } cn30xx;
- struct cvmx_gmxx_inf_mode_cn31xx {
- uint64_t reserved_2_63:62;
- uint64_t en:1;
- uint64_t type:1;
- } cn31xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn38xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn38xxp2;
- struct cvmx_gmxx_inf_mode_cn30xx cn50xx;
- struct cvmx_gmxx_inf_mode_cn52xx {
- uint64_t reserved_10_63:54;
- uint64_t speed:2;
- uint64_t reserved_6_7:2;
- uint64_t mode:2;
- uint64_t reserved_2_3:2;
- uint64_t en:1;
- uint64_t type:1;
- } cn52xx;
- struct cvmx_gmxx_inf_mode_cn52xx cn52xxp1;
- struct cvmx_gmxx_inf_mode_cn52xx cn56xx;
- struct cvmx_gmxx_inf_mode_cn52xx cn56xxp1;
- struct cvmx_gmxx_inf_mode_cn31xx cn58xx;
- struct cvmx_gmxx_inf_mode_cn31xx cn58xxp1;
-};
-
-union cvmx_gmxx_nxa_adr {
- uint64_t u64;
- struct cvmx_gmxx_nxa_adr_s {
- uint64_t reserved_6_63:58;
- uint64_t prt:6;
- } s;
- struct cvmx_gmxx_nxa_adr_s cn30xx;
- struct cvmx_gmxx_nxa_adr_s cn31xx;
- struct cvmx_gmxx_nxa_adr_s cn38xx;
- struct cvmx_gmxx_nxa_adr_s cn38xxp2;
- struct cvmx_gmxx_nxa_adr_s cn50xx;
- struct cvmx_gmxx_nxa_adr_s cn52xx;
- struct cvmx_gmxx_nxa_adr_s cn52xxp1;
- struct cvmx_gmxx_nxa_adr_s cn56xx;
- struct cvmx_gmxx_nxa_adr_s cn56xxp1;
- struct cvmx_gmxx_nxa_adr_s cn58xx;
- struct cvmx_gmxx_nxa_adr_s cn58xxp1;
-};
-
-union cvmx_gmxx_prtx_cbfc_ctl {
- uint64_t u64;
- struct cvmx_gmxx_prtx_cbfc_ctl_s {
- uint64_t phys_en:16;
- uint64_t logl_en:16;
- uint64_t phys_bp:16;
- uint64_t reserved_4_15:12;
- uint64_t bck_en:1;
- uint64_t drp_en:1;
- uint64_t tx_en:1;
- uint64_t rx_en:1;
- } s;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn52xx;
- struct cvmx_gmxx_prtx_cbfc_ctl_s cn56xx;
-};
-
-union cvmx_gmxx_prtx_cfg {
- uint64_t u64;
- struct cvmx_gmxx_prtx_cfg_s {
- uint64_t reserved_14_63:50;
- uint64_t tx_idle:1;
- uint64_t rx_idle:1;
- uint64_t reserved_9_11:3;
- uint64_t speed_msb:1;
- uint64_t reserved_4_7:4;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } s;
- struct cvmx_gmxx_prtx_cfg_cn30xx {
- uint64_t reserved_4_63:60;
- uint64_t slottime:1;
- uint64_t duplex:1;
- uint64_t speed:1;
- uint64_t en:1;
- } cn30xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn31xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn38xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn38xxp2;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn50xx;
- struct cvmx_gmxx_prtx_cfg_s cn52xx;
- struct cvmx_gmxx_prtx_cfg_s cn52xxp1;
- struct cvmx_gmxx_prtx_cfg_s cn56xx;
- struct cvmx_gmxx_prtx_cfg_s cn56xxp1;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn58xx;
- struct cvmx_gmxx_prtx_cfg_cn30xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam0 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam0_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam0_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam0_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam0_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam0_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam1 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam1_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam1_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam1_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam1_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam1_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam2 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam2_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam2_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam2_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam2_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam2_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam3 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam3_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam3_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam3_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam3_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam3_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam4 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam4_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam4_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam4_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam4_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam4_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam5 {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam5_s {
- uint64_t adr:64;
- } s;
- struct cvmx_gmxx_rxx_adr_cam5_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam5_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam5_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam5_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_cam_en {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_cam_en_s {
- uint64_t reserved_8_63:56;
- uint64_t en:8;
- } s;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn30xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn31xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn38xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn50xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn52xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn56xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn58xx;
- struct cvmx_gmxx_rxx_adr_cam_en_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_adr_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_adr_ctl_s {
- uint64_t reserved_4_63:60;
- uint64_t cam_mode:1;
- uint64_t mcst:2;
- uint64_t bcst:1;
- } s;
- struct cvmx_gmxx_rxx_adr_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_adr_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_adr_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_adr_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_decision {
- uint64_t u64;
- struct cvmx_gmxx_rxx_decision_s {
- uint64_t reserved_5_63:59;
- uint64_t cnt:5;
- } s;
- struct cvmx_gmxx_rxx_decision_s cn30xx;
- struct cvmx_gmxx_rxx_decision_s cn31xx;
- struct cvmx_gmxx_rxx_decision_s cn38xx;
- struct cvmx_gmxx_rxx_decision_s cn38xxp2;
- struct cvmx_gmxx_rxx_decision_s cn50xx;
- struct cvmx_gmxx_rxx_decision_s cn52xx;
- struct cvmx_gmxx_rxx_decision_s cn52xxp1;
- struct cvmx_gmxx_rxx_decision_s cn56xx;
- struct cvmx_gmxx_rxx_decision_s cn56xxp1;
- struct cvmx_gmxx_rxx_decision_s cn58xx;
- struct cvmx_gmxx_rxx_decision_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_chk {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_chk_s {
- uint64_t reserved_10_63:54;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } s;
- struct cvmx_gmxx_rxx_frm_chk_s cn30xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn31xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn38xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_chk_cn50xx {
- uint64_t reserved_10_63:54;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_6_6:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx {
- uint64_t reserved_9_63:55;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn52xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xx;
- struct cvmx_gmxx_rxx_frm_chk_cn52xx cn56xxp1;
- struct cvmx_gmxx_rxx_frm_chk_s cn58xx;
- struct cvmx_gmxx_rxx_frm_chk_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_ctl_s {
- uint64_t reserved_11_63:53;
- uint64_t null_dis:1;
- uint64_t pre_align:1;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } s;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn30xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn31xx {
- uint64_t reserved_8_63:56;
- uint64_t vlan_len:1;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn31xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn31xx cn38xxp2;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx {
- uint64_t reserved_11_63:53;
- uint64_t null_dis:1;
- uint64_t pre_align:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn52xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_cn50xx cn56xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn56xxp1 {
- uint64_t reserved_10_63:54;
- uint64_t pre_align:1;
- uint64_t reserved_7_8:2;
- uint64_t pre_free:1;
- uint64_t ctl_smac:1;
- uint64_t ctl_mcst:1;
- uint64_t ctl_bck:1;
- uint64_t ctl_drp:1;
- uint64_t pre_strp:1;
- uint64_t pre_chk:1;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_frm_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_frm_ctl_cn30xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_max {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_max_s {
- uint64_t reserved_16_63:48;
- uint64_t len:16;
- } s;
- struct cvmx_gmxx_rxx_frm_max_s cn30xx;
- struct cvmx_gmxx_rxx_frm_max_s cn31xx;
- struct cvmx_gmxx_rxx_frm_max_s cn38xx;
- struct cvmx_gmxx_rxx_frm_max_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_max_s cn58xx;
- struct cvmx_gmxx_rxx_frm_max_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_frm_min {
- uint64_t u64;
- struct cvmx_gmxx_rxx_frm_min_s {
- uint64_t reserved_16_63:48;
- uint64_t len:16;
- } s;
- struct cvmx_gmxx_rxx_frm_min_s cn30xx;
- struct cvmx_gmxx_rxx_frm_min_s cn31xx;
- struct cvmx_gmxx_rxx_frm_min_s cn38xx;
- struct cvmx_gmxx_rxx_frm_min_s cn38xxp2;
- struct cvmx_gmxx_rxx_frm_min_s cn58xx;
- struct cvmx_gmxx_rxx_frm_min_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_ifg {
- uint64_t u64;
- struct cvmx_gmxx_rxx_ifg_s {
- uint64_t reserved_4_63:60;
- uint64_t ifg:4;
- } s;
- struct cvmx_gmxx_rxx_ifg_s cn30xx;
- struct cvmx_gmxx_rxx_ifg_s cn31xx;
- struct cvmx_gmxx_rxx_ifg_s cn38xx;
- struct cvmx_gmxx_rxx_ifg_s cn38xxp2;
- struct cvmx_gmxx_rxx_ifg_s cn50xx;
- struct cvmx_gmxx_rxx_ifg_s cn52xx;
- struct cvmx_gmxx_rxx_ifg_s cn52xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn56xx;
- struct cvmx_gmxx_rxx_ifg_s cn56xxp1;
- struct cvmx_gmxx_rxx_ifg_s cn58xx;
- struct cvmx_gmxx_rxx_ifg_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_int_en {
- uint64_t u64;
- struct cvmx_gmxx_rxx_int_en_s {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } s;
- struct cvmx_gmxx_rxx_int_en_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn30xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn31xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_int_en_cn30xx cn38xxp2;
- struct cvmx_gmxx_rxx_int_en_cn50xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_6_6:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_int_en_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn52xx;
- struct cvmx_gmxx_rxx_int_en_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_int_en_cn52xx cn56xx;
- struct cvmx_gmxx_rxx_int_en_cn56xxp1 {
- uint64_t reserved_27_63:37;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_int_en_cn58xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn58xx;
- struct cvmx_gmxx_rxx_int_en_cn58xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_int_reg {
- uint64_t u64;
- struct cvmx_gmxx_rxx_int_reg_s {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } s;
- struct cvmx_gmxx_rxx_int_reg_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn30xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn31xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xx;
- struct cvmx_gmxx_rxx_int_reg_cn30xx cn38xxp2;
- struct cvmx_gmxx_rxx_int_reg_cn50xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_6_6:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn50xx;
- struct cvmx_gmxx_rxx_int_reg_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t hg2cc:1;
- uint64_t hg2fld:1;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn52xx;
- struct cvmx_gmxx_rxx_int_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn52xx cn56xx;
- struct cvmx_gmxx_rxx_int_reg_cn56xxp1 {
- uint64_t reserved_27_63:37;
- uint64_t undat:1;
- uint64_t uneop:1;
- uint64_t unsop:1;
- uint64_t bad_term:1;
- uint64_t bad_seq:1;
- uint64_t rem_fault:1;
- uint64_t loc_fault:1;
- uint64_t pause_drp:1;
- uint64_t reserved_16_18:3;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t reserved_9_9:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t reserved_5_6:2;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t reserved_2_2:1;
- uint64_t carext:1;
- uint64_t reserved_0_0:1;
- } cn56xxp1;
- struct cvmx_gmxx_rxx_int_reg_cn58xx {
- uint64_t reserved_20_63:44;
- uint64_t pause_drp:1;
- uint64_t phy_dupx:1;
- uint64_t phy_spd:1;
- uint64_t phy_link:1;
- uint64_t ifgerr:1;
- uint64_t coldet:1;
- uint64_t falerr:1;
- uint64_t rsverr:1;
- uint64_t pcterr:1;
- uint64_t ovrerr:1;
- uint64_t niberr:1;
- uint64_t skperr:1;
- uint64_t rcverr:1;
- uint64_t lenerr:1;
- uint64_t alnerr:1;
- uint64_t fcserr:1;
- uint64_t jabber:1;
- uint64_t maxerr:1;
- uint64_t carext:1;
- uint64_t minerr:1;
- } cn58xx;
- struct cvmx_gmxx_rxx_int_reg_cn58xx cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_jabber {
- uint64_t u64;
- struct cvmx_gmxx_rxx_jabber_s {
- uint64_t reserved_16_63:48;
- uint64_t cnt:16;
- } s;
- struct cvmx_gmxx_rxx_jabber_s cn30xx;
- struct cvmx_gmxx_rxx_jabber_s cn31xx;
- struct cvmx_gmxx_rxx_jabber_s cn38xx;
- struct cvmx_gmxx_rxx_jabber_s cn38xxp2;
- struct cvmx_gmxx_rxx_jabber_s cn50xx;
- struct cvmx_gmxx_rxx_jabber_s cn52xx;
- struct cvmx_gmxx_rxx_jabber_s cn52xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn56xx;
- struct cvmx_gmxx_rxx_jabber_s cn56xxp1;
- struct cvmx_gmxx_rxx_jabber_s cn58xx;
- struct cvmx_gmxx_rxx_jabber_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_pause_drop_time {
- uint64_t u64;
- struct cvmx_gmxx_rxx_pause_drop_time_s {
- uint64_t reserved_16_63:48;
- uint64_t status:16;
- } s;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn50xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn52xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn52xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn56xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn56xxp1;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn58xx;
- struct cvmx_gmxx_rxx_pause_drop_time_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_rx_inbnd {
- uint64_t u64;
- struct cvmx_gmxx_rxx_rx_inbnd_s {
- uint64_t reserved_4_63:60;
- uint64_t duplex:1;
- uint64_t speed:2;
- uint64_t status:1;
- } s;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn30xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn31xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn38xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn38xxp2;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn50xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn58xx;
- struct cvmx_gmxx_rxx_rx_inbnd_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t rd_clr:1;
- } s;
- struct cvmx_gmxx_rxx_stats_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs_dmac {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_dmac_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_octs_drp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_octs_drp_s {
- uint64_t reserved_48_63:16;
- uint64_t cnt:48;
- } s;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn30xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn31xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn50xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xx;
- struct cvmx_gmxx_rxx_stats_octs_drp_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_bad {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_bad_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_dmac {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_dmac_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_stats_pkts_drp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn30xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn31xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn38xxp2;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn50xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn52xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn56xxp1;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xx;
- struct cvmx_gmxx_rxx_stats_pkts_drp_s cn58xxp1;
-};
-
-union cvmx_gmxx_rxx_udd_skp {
- uint64_t u64;
- struct cvmx_gmxx_rxx_udd_skp_s {
- uint64_t reserved_9_63:55;
- uint64_t fcssel:1;
- uint64_t reserved_7_7:1;
- uint64_t len:7;
- } s;
- struct cvmx_gmxx_rxx_udd_skp_s cn30xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn31xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn38xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn38xxp2;
- struct cvmx_gmxx_rxx_udd_skp_s cn50xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn52xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn52xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn56xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn56xxp1;
- struct cvmx_gmxx_rxx_udd_skp_s cn58xx;
- struct cvmx_gmxx_rxx_udd_skp_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_bp_dropx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_dropx_s {
- uint64_t reserved_6_63:58;
- uint64_t mark:6;
- } s;
- struct cvmx_gmxx_rx_bp_dropx_s cn30xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn31xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn38xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_dropx_s cn50xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn52xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn56xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_dropx_s cn58xx;
- struct cvmx_gmxx_rx_bp_dropx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_bp_offx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_offx_s {
- uint64_t reserved_6_63:58;
- uint64_t mark:6;
- } s;
- struct cvmx_gmxx_rx_bp_offx_s cn30xx;
- struct cvmx_gmxx_rx_bp_offx_s cn31xx;
- struct cvmx_gmxx_rx_bp_offx_s cn38xx;
- struct cvmx_gmxx_rx_bp_offx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_offx_s cn50xx;
- struct cvmx_gmxx_rx_bp_offx_s cn52xx;
- struct cvmx_gmxx_rx_bp_offx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn56xx;
- struct cvmx_gmxx_rx_bp_offx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_offx_s cn58xx;
- struct cvmx_gmxx_rx_bp_offx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_bp_onx {
- uint64_t u64;
- struct cvmx_gmxx_rx_bp_onx_s {
- uint64_t reserved_9_63:55;
- uint64_t mark:9;
- } s;
- struct cvmx_gmxx_rx_bp_onx_s cn30xx;
- struct cvmx_gmxx_rx_bp_onx_s cn31xx;
- struct cvmx_gmxx_rx_bp_onx_s cn38xx;
- struct cvmx_gmxx_rx_bp_onx_s cn38xxp2;
- struct cvmx_gmxx_rx_bp_onx_s cn50xx;
- struct cvmx_gmxx_rx_bp_onx_s cn52xx;
- struct cvmx_gmxx_rx_bp_onx_s cn52xxp1;
- struct cvmx_gmxx_rx_bp_onx_s cn56xx;
- struct cvmx_gmxx_rx_bp_onx_s cn56xxp1;
- struct cvmx_gmxx_rx_bp_onx_s cn58xx;
- struct cvmx_gmxx_rx_bp_onx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_hg2_status {
- uint64_t u64;
- struct cvmx_gmxx_rx_hg2_status_s {
- uint64_t reserved_48_63:16;
- uint64_t phtim2go:16;
- uint64_t xof:16;
- uint64_t lgtim2go:16;
- } s;
- struct cvmx_gmxx_rx_hg2_status_s cn52xx;
- struct cvmx_gmxx_rx_hg2_status_s cn52xxp1;
- struct cvmx_gmxx_rx_hg2_status_s cn56xx;
-};
-
-union cvmx_gmxx_rx_pass_en {
- uint64_t u64;
- struct cvmx_gmxx_rx_pass_en_s {
- uint64_t reserved_16_63:48;
- uint64_t en:16;
- } s;
- struct cvmx_gmxx_rx_pass_en_s cn38xx;
- struct cvmx_gmxx_rx_pass_en_s cn38xxp2;
- struct cvmx_gmxx_rx_pass_en_s cn58xx;
- struct cvmx_gmxx_rx_pass_en_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_pass_mapx {
- uint64_t u64;
- struct cvmx_gmxx_rx_pass_mapx_s {
- uint64_t reserved_4_63:60;
- uint64_t dprt:4;
- } s;
- struct cvmx_gmxx_rx_pass_mapx_s cn38xx;
- struct cvmx_gmxx_rx_pass_mapx_s cn38xxp2;
- struct cvmx_gmxx_rx_pass_mapx_s cn58xx;
- struct cvmx_gmxx_rx_pass_mapx_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_prt_info {
- uint64_t u64;
- struct cvmx_gmxx_rx_prt_info_s {
- uint64_t reserved_32_63:32;
- uint64_t drop:16;
- uint64_t commit:16;
- } s;
- struct cvmx_gmxx_rx_prt_info_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t drop:3;
- uint64_t reserved_3_15:13;
- uint64_t commit:3;
- } cn30xx;
- struct cvmx_gmxx_rx_prt_info_cn30xx cn31xx;
- struct cvmx_gmxx_rx_prt_info_s cn38xx;
- struct cvmx_gmxx_rx_prt_info_cn30xx cn50xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx {
- uint64_t reserved_20_63:44;
- uint64_t drop:4;
- uint64_t reserved_4_15:12;
- uint64_t commit:4;
- } cn52xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn52xxp1;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn56xx;
- struct cvmx_gmxx_rx_prt_info_cn52xx cn56xxp1;
- struct cvmx_gmxx_rx_prt_info_s cn58xx;
- struct cvmx_gmxx_rx_prt_info_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_prts {
- uint64_t u64;
- struct cvmx_gmxx_rx_prts_s {
- uint64_t reserved_3_63:61;
- uint64_t prts:3;
- } s;
- struct cvmx_gmxx_rx_prts_s cn30xx;
- struct cvmx_gmxx_rx_prts_s cn31xx;
- struct cvmx_gmxx_rx_prts_s cn38xx;
- struct cvmx_gmxx_rx_prts_s cn38xxp2;
- struct cvmx_gmxx_rx_prts_s cn50xx;
- struct cvmx_gmxx_rx_prts_s cn52xx;
- struct cvmx_gmxx_rx_prts_s cn52xxp1;
- struct cvmx_gmxx_rx_prts_s cn56xx;
- struct cvmx_gmxx_rx_prts_s cn56xxp1;
- struct cvmx_gmxx_rx_prts_s cn58xx;
- struct cvmx_gmxx_rx_prts_s cn58xxp1;
-};
-
-union cvmx_gmxx_rx_tx_status {
- uint64_t u64;
- struct cvmx_gmxx_rx_tx_status_s {
- uint64_t reserved_7_63:57;
- uint64_t tx:3;
- uint64_t reserved_3_3:1;
- uint64_t rx:3;
- } s;
- struct cvmx_gmxx_rx_tx_status_s cn30xx;
- struct cvmx_gmxx_rx_tx_status_s cn31xx;
- struct cvmx_gmxx_rx_tx_status_s cn50xx;
-};
-
-union cvmx_gmxx_rx_xaui_bad_col {
- uint64_t u64;
- struct cvmx_gmxx_rx_xaui_bad_col_s {
- uint64_t reserved_40_63:24;
- uint64_t val:1;
- uint64_t state:3;
- uint64_t lane_rxc:4;
- uint64_t lane_rxd:32;
- } s;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn52xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn52xxp1;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn56xx;
- struct cvmx_gmxx_rx_xaui_bad_col_s cn56xxp1;
-};
-
-union cvmx_gmxx_rx_xaui_ctl {
- uint64_t u64;
- struct cvmx_gmxx_rx_xaui_ctl_s {
- uint64_t reserved_2_63:62;
- uint64_t status:2;
- } s;
- struct cvmx_gmxx_rx_xaui_ctl_s cn52xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn52xxp1;
- struct cvmx_gmxx_rx_xaui_ctl_s cn56xx;
- struct cvmx_gmxx_rx_xaui_ctl_s cn56xxp1;
-};
-
-union cvmx_gmxx_smacx {
- uint64_t u64;
- struct cvmx_gmxx_smacx_s {
- uint64_t reserved_48_63:16;
- uint64_t smac:48;
- } s;
- struct cvmx_gmxx_smacx_s cn30xx;
- struct cvmx_gmxx_smacx_s cn31xx;
- struct cvmx_gmxx_smacx_s cn38xx;
- struct cvmx_gmxx_smacx_s cn38xxp2;
- struct cvmx_gmxx_smacx_s cn50xx;
- struct cvmx_gmxx_smacx_s cn52xx;
- struct cvmx_gmxx_smacx_s cn52xxp1;
- struct cvmx_gmxx_smacx_s cn56xx;
- struct cvmx_gmxx_smacx_s cn56xxp1;
- struct cvmx_gmxx_smacx_s cn58xx;
- struct cvmx_gmxx_smacx_s cn58xxp1;
-};
-
-union cvmx_gmxx_stat_bp {
- uint64_t u64;
- struct cvmx_gmxx_stat_bp_s {
- uint64_t reserved_17_63:47;
- uint64_t bp:1;
- uint64_t cnt:16;
- } s;
- struct cvmx_gmxx_stat_bp_s cn30xx;
- struct cvmx_gmxx_stat_bp_s cn31xx;
- struct cvmx_gmxx_stat_bp_s cn38xx;
- struct cvmx_gmxx_stat_bp_s cn38xxp2;
- struct cvmx_gmxx_stat_bp_s cn50xx;
- struct cvmx_gmxx_stat_bp_s cn52xx;
- struct cvmx_gmxx_stat_bp_s cn52xxp1;
- struct cvmx_gmxx_stat_bp_s cn56xx;
- struct cvmx_gmxx_stat_bp_s cn56xxp1;
- struct cvmx_gmxx_stat_bp_s cn58xx;
- struct cvmx_gmxx_stat_bp_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_append {
- uint64_t u64;
- struct cvmx_gmxx_txx_append_s {
- uint64_t reserved_4_63:60;
- uint64_t force_fcs:1;
- uint64_t fcs:1;
- uint64_t pad:1;
- uint64_t preamble:1;
- } s;
- struct cvmx_gmxx_txx_append_s cn30xx;
- struct cvmx_gmxx_txx_append_s cn31xx;
- struct cvmx_gmxx_txx_append_s cn38xx;
- struct cvmx_gmxx_txx_append_s cn38xxp2;
- struct cvmx_gmxx_txx_append_s cn50xx;
- struct cvmx_gmxx_txx_append_s cn52xx;
- struct cvmx_gmxx_txx_append_s cn52xxp1;
- struct cvmx_gmxx_txx_append_s cn56xx;
- struct cvmx_gmxx_txx_append_s cn56xxp1;
- struct cvmx_gmxx_txx_append_s cn58xx;
- struct cvmx_gmxx_txx_append_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_burst {
- uint64_t u64;
- struct cvmx_gmxx_txx_burst_s {
- uint64_t reserved_16_63:48;
- uint64_t burst:16;
- } s;
- struct cvmx_gmxx_txx_burst_s cn30xx;
- struct cvmx_gmxx_txx_burst_s cn31xx;
- struct cvmx_gmxx_txx_burst_s cn38xx;
- struct cvmx_gmxx_txx_burst_s cn38xxp2;
- struct cvmx_gmxx_txx_burst_s cn50xx;
- struct cvmx_gmxx_txx_burst_s cn52xx;
- struct cvmx_gmxx_txx_burst_s cn52xxp1;
- struct cvmx_gmxx_txx_burst_s cn56xx;
- struct cvmx_gmxx_txx_burst_s cn56xxp1;
- struct cvmx_gmxx_txx_burst_s cn58xx;
- struct cvmx_gmxx_txx_burst_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_cbfc_xoff {
- uint64_t u64;
- struct cvmx_gmxx_txx_cbfc_xoff_s {
- uint64_t reserved_16_63:48;
- uint64_t xoff:16;
- } s;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn52xx;
- struct cvmx_gmxx_txx_cbfc_xoff_s cn56xx;
-};
-
-union cvmx_gmxx_txx_cbfc_xon {
- uint64_t u64;
- struct cvmx_gmxx_txx_cbfc_xon_s {
- uint64_t reserved_16_63:48;
- uint64_t xon:16;
- } s;
- struct cvmx_gmxx_txx_cbfc_xon_s cn52xx;
- struct cvmx_gmxx_txx_cbfc_xon_s cn56xx;
-};
-
-union cvmx_gmxx_txx_clk {
- uint64_t u64;
- struct cvmx_gmxx_txx_clk_s {
- uint64_t reserved_6_63:58;
- uint64_t clk_cnt:6;
- } s;
- struct cvmx_gmxx_txx_clk_s cn30xx;
- struct cvmx_gmxx_txx_clk_s cn31xx;
- struct cvmx_gmxx_txx_clk_s cn38xx;
- struct cvmx_gmxx_txx_clk_s cn38xxp2;
- struct cvmx_gmxx_txx_clk_s cn50xx;
- struct cvmx_gmxx_txx_clk_s cn58xx;
- struct cvmx_gmxx_txx_clk_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_ctl_s {
- uint64_t reserved_2_63:62;
- uint64_t xsdef_en:1;
- uint64_t xscol_en:1;
- } s;
- struct cvmx_gmxx_txx_ctl_s cn30xx;
- struct cvmx_gmxx_txx_ctl_s cn31xx;
- struct cvmx_gmxx_txx_ctl_s cn38xx;
- struct cvmx_gmxx_txx_ctl_s cn38xxp2;
- struct cvmx_gmxx_txx_ctl_s cn50xx;
- struct cvmx_gmxx_txx_ctl_s cn52xx;
- struct cvmx_gmxx_txx_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_ctl_s cn56xx;
- struct cvmx_gmxx_txx_ctl_s cn56xxp1;
- struct cvmx_gmxx_txx_ctl_s cn58xx;
- struct cvmx_gmxx_txx_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_min_pkt {
- uint64_t u64;
- struct cvmx_gmxx_txx_min_pkt_s {
- uint64_t reserved_8_63:56;
- uint64_t min_size:8;
- } s;
- struct cvmx_gmxx_txx_min_pkt_s cn30xx;
- struct cvmx_gmxx_txx_min_pkt_s cn31xx;
- struct cvmx_gmxx_txx_min_pkt_s cn38xx;
- struct cvmx_gmxx_txx_min_pkt_s cn38xxp2;
- struct cvmx_gmxx_txx_min_pkt_s cn50xx;
- struct cvmx_gmxx_txx_min_pkt_s cn52xx;
- struct cvmx_gmxx_txx_min_pkt_s cn52xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn56xx;
- struct cvmx_gmxx_txx_min_pkt_s cn56xxp1;
- struct cvmx_gmxx_txx_min_pkt_s cn58xx;
- struct cvmx_gmxx_txx_min_pkt_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_pkt_interval {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_pkt_interval_s {
- uint64_t reserved_16_63:48;
- uint64_t interval:16;
- } s;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn30xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn31xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn50xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xx;
- struct cvmx_gmxx_txx_pause_pkt_interval_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_pkt_time {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_pkt_time_s {
- uint64_t reserved_16_63:48;
- uint64_t time:16;
- } s;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn30xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn31xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn38xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn50xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn52xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn56xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn58xx;
- struct cvmx_gmxx_txx_pause_pkt_time_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_togo {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_togo_s {
- uint64_t reserved_32_63:32;
- uint64_t msg_time:16;
- uint64_t time:16;
- } s;
- struct cvmx_gmxx_txx_pause_togo_cn30xx {
- uint64_t reserved_16_63:48;
- uint64_t time:16;
- } cn30xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn31xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn38xxp2;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn50xx;
- struct cvmx_gmxx_txx_pause_togo_s cn52xx;
- struct cvmx_gmxx_txx_pause_togo_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_togo_s cn56xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn56xxp1;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xx;
- struct cvmx_gmxx_txx_pause_togo_cn30xx cn58xxp1;
-};
-
-union cvmx_gmxx_txx_pause_zero {
- uint64_t u64;
- struct cvmx_gmxx_txx_pause_zero_s {
- uint64_t reserved_1_63:63;
- uint64_t send:1;
- } s;
- struct cvmx_gmxx_txx_pause_zero_s cn30xx;
- struct cvmx_gmxx_txx_pause_zero_s cn31xx;
- struct cvmx_gmxx_txx_pause_zero_s cn38xx;
- struct cvmx_gmxx_txx_pause_zero_s cn38xxp2;
- struct cvmx_gmxx_txx_pause_zero_s cn50xx;
- struct cvmx_gmxx_txx_pause_zero_s cn52xx;
- struct cvmx_gmxx_txx_pause_zero_s cn52xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn56xx;
- struct cvmx_gmxx_txx_pause_zero_s cn56xxp1;
- struct cvmx_gmxx_txx_pause_zero_s cn58xx;
- struct cvmx_gmxx_txx_pause_zero_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_sgmii_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_sgmii_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t align:1;
- } s;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn52xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn56xx;
- struct cvmx_gmxx_txx_sgmii_ctl_s cn56xxp1;
-};
-
-union cvmx_gmxx_txx_slot {
- uint64_t u64;
- struct cvmx_gmxx_txx_slot_s {
- uint64_t reserved_10_63:54;
- uint64_t slot:10;
- } s;
- struct cvmx_gmxx_txx_slot_s cn30xx;
- struct cvmx_gmxx_txx_slot_s cn31xx;
- struct cvmx_gmxx_txx_slot_s cn38xx;
- struct cvmx_gmxx_txx_slot_s cn38xxp2;
- struct cvmx_gmxx_txx_slot_s cn50xx;
- struct cvmx_gmxx_txx_slot_s cn52xx;
- struct cvmx_gmxx_txx_slot_s cn52xxp1;
- struct cvmx_gmxx_txx_slot_s cn56xx;
- struct cvmx_gmxx_txx_slot_s cn56xxp1;
- struct cvmx_gmxx_txx_slot_s cn58xx;
- struct cvmx_gmxx_txx_slot_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_soft_pause {
- uint64_t u64;
- struct cvmx_gmxx_txx_soft_pause_s {
- uint64_t reserved_16_63:48;
- uint64_t time:16;
- } s;
- struct cvmx_gmxx_txx_soft_pause_s cn30xx;
- struct cvmx_gmxx_txx_soft_pause_s cn31xx;
- struct cvmx_gmxx_txx_soft_pause_s cn38xx;
- struct cvmx_gmxx_txx_soft_pause_s cn38xxp2;
- struct cvmx_gmxx_txx_soft_pause_s cn50xx;
- struct cvmx_gmxx_txx_soft_pause_s cn52xx;
- struct cvmx_gmxx_txx_soft_pause_s cn52xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn56xx;
- struct cvmx_gmxx_txx_soft_pause_s cn56xxp1;
- struct cvmx_gmxx_txx_soft_pause_s cn58xx;
- struct cvmx_gmxx_txx_soft_pause_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat0 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat0_s {
- uint64_t xsdef:32;
- uint64_t xscol:32;
- } s;
- struct cvmx_gmxx_txx_stat0_s cn30xx;
- struct cvmx_gmxx_txx_stat0_s cn31xx;
- struct cvmx_gmxx_txx_stat0_s cn38xx;
- struct cvmx_gmxx_txx_stat0_s cn38xxp2;
- struct cvmx_gmxx_txx_stat0_s cn50xx;
- struct cvmx_gmxx_txx_stat0_s cn52xx;
- struct cvmx_gmxx_txx_stat0_s cn52xxp1;
- struct cvmx_gmxx_txx_stat0_s cn56xx;
- struct cvmx_gmxx_txx_stat0_s cn56xxp1;
- struct cvmx_gmxx_txx_stat0_s cn58xx;
- struct cvmx_gmxx_txx_stat0_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat1 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat1_s {
- uint64_t scol:32;
- uint64_t mcol:32;
- } s;
- struct cvmx_gmxx_txx_stat1_s cn30xx;
- struct cvmx_gmxx_txx_stat1_s cn31xx;
- struct cvmx_gmxx_txx_stat1_s cn38xx;
- struct cvmx_gmxx_txx_stat1_s cn38xxp2;
- struct cvmx_gmxx_txx_stat1_s cn50xx;
- struct cvmx_gmxx_txx_stat1_s cn52xx;
- struct cvmx_gmxx_txx_stat1_s cn52xxp1;
- struct cvmx_gmxx_txx_stat1_s cn56xx;
- struct cvmx_gmxx_txx_stat1_s cn56xxp1;
- struct cvmx_gmxx_txx_stat1_s cn58xx;
- struct cvmx_gmxx_txx_stat1_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat2 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat2_s {
- uint64_t reserved_48_63:16;
- uint64_t octs:48;
- } s;
- struct cvmx_gmxx_txx_stat2_s cn30xx;
- struct cvmx_gmxx_txx_stat2_s cn31xx;
- struct cvmx_gmxx_txx_stat2_s cn38xx;
- struct cvmx_gmxx_txx_stat2_s cn38xxp2;
- struct cvmx_gmxx_txx_stat2_s cn50xx;
- struct cvmx_gmxx_txx_stat2_s cn52xx;
- struct cvmx_gmxx_txx_stat2_s cn52xxp1;
- struct cvmx_gmxx_txx_stat2_s cn56xx;
- struct cvmx_gmxx_txx_stat2_s cn56xxp1;
- struct cvmx_gmxx_txx_stat2_s cn58xx;
- struct cvmx_gmxx_txx_stat2_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat3 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat3_s {
- uint64_t reserved_32_63:32;
- uint64_t pkts:32;
- } s;
- struct cvmx_gmxx_txx_stat3_s cn30xx;
- struct cvmx_gmxx_txx_stat3_s cn31xx;
- struct cvmx_gmxx_txx_stat3_s cn38xx;
- struct cvmx_gmxx_txx_stat3_s cn38xxp2;
- struct cvmx_gmxx_txx_stat3_s cn50xx;
- struct cvmx_gmxx_txx_stat3_s cn52xx;
- struct cvmx_gmxx_txx_stat3_s cn52xxp1;
- struct cvmx_gmxx_txx_stat3_s cn56xx;
- struct cvmx_gmxx_txx_stat3_s cn56xxp1;
- struct cvmx_gmxx_txx_stat3_s cn58xx;
- struct cvmx_gmxx_txx_stat3_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat4 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat4_s {
- uint64_t hist1:32;
- uint64_t hist0:32;
- } s;
- struct cvmx_gmxx_txx_stat4_s cn30xx;
- struct cvmx_gmxx_txx_stat4_s cn31xx;
- struct cvmx_gmxx_txx_stat4_s cn38xx;
- struct cvmx_gmxx_txx_stat4_s cn38xxp2;
- struct cvmx_gmxx_txx_stat4_s cn50xx;
- struct cvmx_gmxx_txx_stat4_s cn52xx;
- struct cvmx_gmxx_txx_stat4_s cn52xxp1;
- struct cvmx_gmxx_txx_stat4_s cn56xx;
- struct cvmx_gmxx_txx_stat4_s cn56xxp1;
- struct cvmx_gmxx_txx_stat4_s cn58xx;
- struct cvmx_gmxx_txx_stat4_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat5 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat5_s {
- uint64_t hist3:32;
- uint64_t hist2:32;
- } s;
- struct cvmx_gmxx_txx_stat5_s cn30xx;
- struct cvmx_gmxx_txx_stat5_s cn31xx;
- struct cvmx_gmxx_txx_stat5_s cn38xx;
- struct cvmx_gmxx_txx_stat5_s cn38xxp2;
- struct cvmx_gmxx_txx_stat5_s cn50xx;
- struct cvmx_gmxx_txx_stat5_s cn52xx;
- struct cvmx_gmxx_txx_stat5_s cn52xxp1;
- struct cvmx_gmxx_txx_stat5_s cn56xx;
- struct cvmx_gmxx_txx_stat5_s cn56xxp1;
- struct cvmx_gmxx_txx_stat5_s cn58xx;
- struct cvmx_gmxx_txx_stat5_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat6 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat6_s {
- uint64_t hist5:32;
- uint64_t hist4:32;
- } s;
- struct cvmx_gmxx_txx_stat6_s cn30xx;
- struct cvmx_gmxx_txx_stat6_s cn31xx;
- struct cvmx_gmxx_txx_stat6_s cn38xx;
- struct cvmx_gmxx_txx_stat6_s cn38xxp2;
- struct cvmx_gmxx_txx_stat6_s cn50xx;
- struct cvmx_gmxx_txx_stat6_s cn52xx;
- struct cvmx_gmxx_txx_stat6_s cn52xxp1;
- struct cvmx_gmxx_txx_stat6_s cn56xx;
- struct cvmx_gmxx_txx_stat6_s cn56xxp1;
- struct cvmx_gmxx_txx_stat6_s cn58xx;
- struct cvmx_gmxx_txx_stat6_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat7 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat7_s {
- uint64_t hist7:32;
- uint64_t hist6:32;
- } s;
- struct cvmx_gmxx_txx_stat7_s cn30xx;
- struct cvmx_gmxx_txx_stat7_s cn31xx;
- struct cvmx_gmxx_txx_stat7_s cn38xx;
- struct cvmx_gmxx_txx_stat7_s cn38xxp2;
- struct cvmx_gmxx_txx_stat7_s cn50xx;
- struct cvmx_gmxx_txx_stat7_s cn52xx;
- struct cvmx_gmxx_txx_stat7_s cn52xxp1;
- struct cvmx_gmxx_txx_stat7_s cn56xx;
- struct cvmx_gmxx_txx_stat7_s cn56xxp1;
- struct cvmx_gmxx_txx_stat7_s cn58xx;
- struct cvmx_gmxx_txx_stat7_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat8 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat8_s {
- uint64_t mcst:32;
- uint64_t bcst:32;
- } s;
- struct cvmx_gmxx_txx_stat8_s cn30xx;
- struct cvmx_gmxx_txx_stat8_s cn31xx;
- struct cvmx_gmxx_txx_stat8_s cn38xx;
- struct cvmx_gmxx_txx_stat8_s cn38xxp2;
- struct cvmx_gmxx_txx_stat8_s cn50xx;
- struct cvmx_gmxx_txx_stat8_s cn52xx;
- struct cvmx_gmxx_txx_stat8_s cn52xxp1;
- struct cvmx_gmxx_txx_stat8_s cn56xx;
- struct cvmx_gmxx_txx_stat8_s cn56xxp1;
- struct cvmx_gmxx_txx_stat8_s cn58xx;
- struct cvmx_gmxx_txx_stat8_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stat9 {
- uint64_t u64;
- struct cvmx_gmxx_txx_stat9_s {
- uint64_t undflw:32;
- uint64_t ctl:32;
- } s;
- struct cvmx_gmxx_txx_stat9_s cn30xx;
- struct cvmx_gmxx_txx_stat9_s cn31xx;
- struct cvmx_gmxx_txx_stat9_s cn38xx;
- struct cvmx_gmxx_txx_stat9_s cn38xxp2;
- struct cvmx_gmxx_txx_stat9_s cn50xx;
- struct cvmx_gmxx_txx_stat9_s cn52xx;
- struct cvmx_gmxx_txx_stat9_s cn52xxp1;
- struct cvmx_gmxx_txx_stat9_s cn56xx;
- struct cvmx_gmxx_txx_stat9_s cn56xxp1;
- struct cvmx_gmxx_txx_stat9_s cn58xx;
- struct cvmx_gmxx_txx_stat9_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_stats_ctl {
- uint64_t u64;
- struct cvmx_gmxx_txx_stats_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t rd_clr:1;
- } s;
- struct cvmx_gmxx_txx_stats_ctl_s cn30xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn31xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn38xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn38xxp2;
- struct cvmx_gmxx_txx_stats_ctl_s cn50xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn52xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn52xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn56xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn56xxp1;
- struct cvmx_gmxx_txx_stats_ctl_s cn58xx;
- struct cvmx_gmxx_txx_stats_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_txx_thresh {
- uint64_t u64;
- struct cvmx_gmxx_txx_thresh_s {
- uint64_t reserved_9_63:55;
- uint64_t cnt:9;
- } s;
- struct cvmx_gmxx_txx_thresh_cn30xx {
- uint64_t reserved_7_63:57;
- uint64_t cnt:7;
- } cn30xx;
- struct cvmx_gmxx_txx_thresh_cn30xx cn31xx;
- struct cvmx_gmxx_txx_thresh_s cn38xx;
- struct cvmx_gmxx_txx_thresh_s cn38xxp2;
- struct cvmx_gmxx_txx_thresh_cn30xx cn50xx;
- struct cvmx_gmxx_txx_thresh_s cn52xx;
- struct cvmx_gmxx_txx_thresh_s cn52xxp1;
- struct cvmx_gmxx_txx_thresh_s cn56xx;
- struct cvmx_gmxx_txx_thresh_s cn56xxp1;
- struct cvmx_gmxx_txx_thresh_s cn58xx;
- struct cvmx_gmxx_txx_thresh_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_bp {
- uint64_t u64;
- struct cvmx_gmxx_tx_bp_s {
- uint64_t reserved_4_63:60;
- uint64_t bp:4;
- } s;
- struct cvmx_gmxx_tx_bp_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t bp:3;
- } cn30xx;
- struct cvmx_gmxx_tx_bp_cn30xx cn31xx;
- struct cvmx_gmxx_tx_bp_s cn38xx;
- struct cvmx_gmxx_tx_bp_s cn38xxp2;
- struct cvmx_gmxx_tx_bp_cn30xx cn50xx;
- struct cvmx_gmxx_tx_bp_s cn52xx;
- struct cvmx_gmxx_tx_bp_s cn52xxp1;
- struct cvmx_gmxx_tx_bp_s cn56xx;
- struct cvmx_gmxx_tx_bp_s cn56xxp1;
- struct cvmx_gmxx_tx_bp_s cn58xx;
- struct cvmx_gmxx_tx_bp_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_clk_mskx {
- uint64_t u64;
- struct cvmx_gmxx_tx_clk_mskx_s {
- uint64_t reserved_1_63:63;
- uint64_t msk:1;
- } s;
- struct cvmx_gmxx_tx_clk_mskx_s cn30xx;
- struct cvmx_gmxx_tx_clk_mskx_s cn50xx;
-};
-
-union cvmx_gmxx_tx_col_attempt {
- uint64_t u64;
- struct cvmx_gmxx_tx_col_attempt_s {
- uint64_t reserved_5_63:59;
- uint64_t limit:5;
- } s;
- struct cvmx_gmxx_tx_col_attempt_s cn30xx;
- struct cvmx_gmxx_tx_col_attempt_s cn31xx;
- struct cvmx_gmxx_tx_col_attempt_s cn38xx;
- struct cvmx_gmxx_tx_col_attempt_s cn38xxp2;
- struct cvmx_gmxx_tx_col_attempt_s cn50xx;
- struct cvmx_gmxx_tx_col_attempt_s cn52xx;
- struct cvmx_gmxx_tx_col_attempt_s cn52xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn56xx;
- struct cvmx_gmxx_tx_col_attempt_s cn56xxp1;
- struct cvmx_gmxx_tx_col_attempt_s cn58xx;
- struct cvmx_gmxx_tx_col_attempt_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_corrupt {
- uint64_t u64;
- struct cvmx_gmxx_tx_corrupt_s {
- uint64_t reserved_4_63:60;
- uint64_t corrupt:4;
- } s;
- struct cvmx_gmxx_tx_corrupt_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t corrupt:3;
- } cn30xx;
- struct cvmx_gmxx_tx_corrupt_cn30xx cn31xx;
- struct cvmx_gmxx_tx_corrupt_s cn38xx;
- struct cvmx_gmxx_tx_corrupt_s cn38xxp2;
- struct cvmx_gmxx_tx_corrupt_cn30xx cn50xx;
- struct cvmx_gmxx_tx_corrupt_s cn52xx;
- struct cvmx_gmxx_tx_corrupt_s cn52xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn56xx;
- struct cvmx_gmxx_tx_corrupt_s cn56xxp1;
- struct cvmx_gmxx_tx_corrupt_s cn58xx;
- struct cvmx_gmxx_tx_corrupt_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_hg2_reg1 {
- uint64_t u64;
- struct cvmx_gmxx_tx_hg2_reg1_s {
- uint64_t reserved_16_63:48;
- uint64_t tx_xof:16;
- } s;
- struct cvmx_gmxx_tx_hg2_reg1_s cn52xx;
- struct cvmx_gmxx_tx_hg2_reg1_s cn52xxp1;
- struct cvmx_gmxx_tx_hg2_reg1_s cn56xx;
-};
-
-union cvmx_gmxx_tx_hg2_reg2 {
- uint64_t u64;
- struct cvmx_gmxx_tx_hg2_reg2_s {
- uint64_t reserved_16_63:48;
- uint64_t tx_xon:16;
- } s;
- struct cvmx_gmxx_tx_hg2_reg2_s cn52xx;
- struct cvmx_gmxx_tx_hg2_reg2_s cn52xxp1;
- struct cvmx_gmxx_tx_hg2_reg2_s cn56xx;
-};
-
-union cvmx_gmxx_tx_ifg {
- uint64_t u64;
- struct cvmx_gmxx_tx_ifg_s {
- uint64_t reserved_8_63:56;
- uint64_t ifg2:4;
- uint64_t ifg1:4;
- } s;
- struct cvmx_gmxx_tx_ifg_s cn30xx;
- struct cvmx_gmxx_tx_ifg_s cn31xx;
- struct cvmx_gmxx_tx_ifg_s cn38xx;
- struct cvmx_gmxx_tx_ifg_s cn38xxp2;
- struct cvmx_gmxx_tx_ifg_s cn50xx;
- struct cvmx_gmxx_tx_ifg_s cn52xx;
- struct cvmx_gmxx_tx_ifg_s cn52xxp1;
- struct cvmx_gmxx_tx_ifg_s cn56xx;
- struct cvmx_gmxx_tx_ifg_s cn56xxp1;
- struct cvmx_gmxx_tx_ifg_s cn58xx;
- struct cvmx_gmxx_tx_ifg_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_int_en {
- uint64_t u64;
- struct cvmx_gmxx_tx_int_en_s {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } s;
- struct cvmx_gmxx_tx_int_en_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t late_col:3;
- uint64_t reserved_15_15:1;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn30xx;
- struct cvmx_gmxx_tx_int_en_cn31xx {
- uint64_t reserved_15_63:49;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn31xx;
- struct cvmx_gmxx_tx_int_en_s cn38xx;
- struct cvmx_gmxx_tx_int_en_cn38xxp2 {
- uint64_t reserved_16_63:48;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } cn38xxp2;
- struct cvmx_gmxx_tx_int_en_cn30xx cn50xx;
- struct cvmx_gmxx_tx_int_en_cn52xx {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn52xx;
- struct cvmx_gmxx_tx_int_en_cn52xx cn52xxp1;
- struct cvmx_gmxx_tx_int_en_cn52xx cn56xx;
- struct cvmx_gmxx_tx_int_en_cn52xx cn56xxp1;
- struct cvmx_gmxx_tx_int_en_s cn58xx;
- struct cvmx_gmxx_tx_int_en_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_int_reg {
- uint64_t u64;
- struct cvmx_gmxx_tx_int_reg_s {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } s;
- struct cvmx_gmxx_tx_int_reg_cn30xx {
- uint64_t reserved_19_63:45;
- uint64_t late_col:3;
- uint64_t reserved_15_15:1;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn30xx;
- struct cvmx_gmxx_tx_int_reg_cn31xx {
- uint64_t reserved_15_63:49;
- uint64_t xsdef:3;
- uint64_t reserved_11_11:1;
- uint64_t xscol:3;
- uint64_t reserved_5_7:3;
- uint64_t undflw:3;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn31xx;
- struct cvmx_gmxx_tx_int_reg_s cn38xx;
- struct cvmx_gmxx_tx_int_reg_cn38xxp2 {
- uint64_t reserved_16_63:48;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t ncb_nxa:1;
- uint64_t pko_nxa:1;
- } cn38xxp2;
- struct cvmx_gmxx_tx_int_reg_cn30xx cn50xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx {
- uint64_t reserved_20_63:44;
- uint64_t late_col:4;
- uint64_t xsdef:4;
- uint64_t xscol:4;
- uint64_t reserved_6_7:2;
- uint64_t undflw:4;
- uint64_t reserved_1_1:1;
- uint64_t pko_nxa:1;
- } cn52xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn52xxp1;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn56xx;
- struct cvmx_gmxx_tx_int_reg_cn52xx cn56xxp1;
- struct cvmx_gmxx_tx_int_reg_s cn58xx;
- struct cvmx_gmxx_tx_int_reg_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_jam {
- uint64_t u64;
- struct cvmx_gmxx_tx_jam_s {
- uint64_t reserved_8_63:56;
- uint64_t jam:8;
- } s;
- struct cvmx_gmxx_tx_jam_s cn30xx;
- struct cvmx_gmxx_tx_jam_s cn31xx;
- struct cvmx_gmxx_tx_jam_s cn38xx;
- struct cvmx_gmxx_tx_jam_s cn38xxp2;
- struct cvmx_gmxx_tx_jam_s cn50xx;
- struct cvmx_gmxx_tx_jam_s cn52xx;
- struct cvmx_gmxx_tx_jam_s cn52xxp1;
- struct cvmx_gmxx_tx_jam_s cn56xx;
- struct cvmx_gmxx_tx_jam_s cn56xxp1;
- struct cvmx_gmxx_tx_jam_s cn58xx;
- struct cvmx_gmxx_tx_jam_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_lfsr {
- uint64_t u64;
- struct cvmx_gmxx_tx_lfsr_s {
- uint64_t reserved_16_63:48;
- uint64_t lfsr:16;
- } s;
- struct cvmx_gmxx_tx_lfsr_s cn30xx;
- struct cvmx_gmxx_tx_lfsr_s cn31xx;
- struct cvmx_gmxx_tx_lfsr_s cn38xx;
- struct cvmx_gmxx_tx_lfsr_s cn38xxp2;
- struct cvmx_gmxx_tx_lfsr_s cn50xx;
- struct cvmx_gmxx_tx_lfsr_s cn52xx;
- struct cvmx_gmxx_tx_lfsr_s cn52xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn56xx;
- struct cvmx_gmxx_tx_lfsr_s cn56xxp1;
- struct cvmx_gmxx_tx_lfsr_s cn58xx;
- struct cvmx_gmxx_tx_lfsr_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_ovr_bp {
- uint64_t u64;
- struct cvmx_gmxx_tx_ovr_bp_s {
- uint64_t reserved_48_63:16;
- uint64_t tx_prt_bp:16;
- uint64_t reserved_12_31:20;
- uint64_t en:4;
- uint64_t bp:4;
- uint64_t ign_full:4;
- } s;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t en:3;
- uint64_t reserved_7_7:1;
- uint64_t bp:3;
- uint64_t reserved_3_3:1;
- uint64_t ign_full:3;
- } cn30xx;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx cn31xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx {
- uint64_t reserved_12_63:52;
- uint64_t en:4;
- uint64_t bp:4;
- uint64_t ign_full:4;
- } cn38xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn38xxp2;
- struct cvmx_gmxx_tx_ovr_bp_cn30xx cn50xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn52xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn52xxp1;
- struct cvmx_gmxx_tx_ovr_bp_s cn56xx;
- struct cvmx_gmxx_tx_ovr_bp_s cn56xxp1;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xx;
- struct cvmx_gmxx_tx_ovr_bp_cn38xx cn58xxp1;
-};
-
-union cvmx_gmxx_tx_pause_pkt_dmac {
- uint64_t u64;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s {
- uint64_t reserved_48_63:16;
- uint64_t dmac:48;
- } s;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn30xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn31xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn38xxp2;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn50xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn52xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn56xxp1;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xx;
- struct cvmx_gmxx_tx_pause_pkt_dmac_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_pause_pkt_type {
- uint64_t u64;
- struct cvmx_gmxx_tx_pause_pkt_type_s {
- uint64_t reserved_16_63:48;
- uint64_t type:16;
- } s;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn30xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn31xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn38xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn38xxp2;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn50xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn52xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn52xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn56xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn56xxp1;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn58xx;
- struct cvmx_gmxx_tx_pause_pkt_type_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_prts {
- uint64_t u64;
- struct cvmx_gmxx_tx_prts_s {
- uint64_t reserved_5_63:59;
- uint64_t prts:5;
- } s;
- struct cvmx_gmxx_tx_prts_s cn30xx;
- struct cvmx_gmxx_tx_prts_s cn31xx;
- struct cvmx_gmxx_tx_prts_s cn38xx;
- struct cvmx_gmxx_tx_prts_s cn38xxp2;
- struct cvmx_gmxx_tx_prts_s cn50xx;
- struct cvmx_gmxx_tx_prts_s cn52xx;
- struct cvmx_gmxx_tx_prts_s cn52xxp1;
- struct cvmx_gmxx_tx_prts_s cn56xx;
- struct cvmx_gmxx_tx_prts_s cn56xxp1;
- struct cvmx_gmxx_tx_prts_s cn58xx;
- struct cvmx_gmxx_tx_prts_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_ctl {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_ctl_s {
- uint64_t reserved_2_63:62;
- uint64_t tpa_clr:1;
- uint64_t cont_pkt:1;
- } s;
- struct cvmx_gmxx_tx_spi_ctl_s cn38xx;
- struct cvmx_gmxx_tx_spi_ctl_s cn38xxp2;
- struct cvmx_gmxx_tx_spi_ctl_s cn58xx;
- struct cvmx_gmxx_tx_spi_ctl_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_drain {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_drain_s {
- uint64_t reserved_16_63:48;
- uint64_t drain:16;
- } s;
- struct cvmx_gmxx_tx_spi_drain_s cn38xx;
- struct cvmx_gmxx_tx_spi_drain_s cn58xx;
- struct cvmx_gmxx_tx_spi_drain_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_max {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_max_s {
- uint64_t reserved_23_63:41;
- uint64_t slice:7;
- uint64_t max2:8;
- uint64_t max1:8;
- } s;
- struct cvmx_gmxx_tx_spi_max_cn38xx {
- uint64_t reserved_16_63:48;
- uint64_t max2:8;
- uint64_t max1:8;
- } cn38xx;
- struct cvmx_gmxx_tx_spi_max_cn38xx cn38xxp2;
- struct cvmx_gmxx_tx_spi_max_s cn58xx;
- struct cvmx_gmxx_tx_spi_max_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_roundx {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_roundx_s {
- uint64_t reserved_16_63:48;
- uint64_t round:16;
- } s;
- struct cvmx_gmxx_tx_spi_roundx_s cn58xx;
- struct cvmx_gmxx_tx_spi_roundx_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_spi_thresh {
- uint64_t u64;
- struct cvmx_gmxx_tx_spi_thresh_s {
- uint64_t reserved_6_63:58;
- uint64_t thresh:6;
- } s;
- struct cvmx_gmxx_tx_spi_thresh_s cn38xx;
- struct cvmx_gmxx_tx_spi_thresh_s cn38xxp2;
- struct cvmx_gmxx_tx_spi_thresh_s cn58xx;
- struct cvmx_gmxx_tx_spi_thresh_s cn58xxp1;
-};
-
-union cvmx_gmxx_tx_xaui_ctl {
- uint64_t u64;
- struct cvmx_gmxx_tx_xaui_ctl_s {
- uint64_t reserved_11_63:53;
- uint64_t hg_pause_hgi:2;
- uint64_t hg_en:1;
- uint64_t reserved_7_7:1;
- uint64_t ls_byp:1;
- uint64_t ls:2;
- uint64_t reserved_2_3:2;
- uint64_t uni_en:1;
- uint64_t dic_en:1;
- } s;
- struct cvmx_gmxx_tx_xaui_ctl_s cn52xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn52xxp1;
- struct cvmx_gmxx_tx_xaui_ctl_s cn56xx;
- struct cvmx_gmxx_tx_xaui_ctl_s cn56xxp1;
-};
-
-union cvmx_gmxx_xaui_ext_loopback {
- uint64_t u64;
- struct cvmx_gmxx_xaui_ext_loopback_s {
- uint64_t reserved_5_63:59;
- uint64_t en:1;
- uint64_t thresh:4;
- } s;
- struct cvmx_gmxx_xaui_ext_loopback_s cn52xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn52xxp1;
- struct cvmx_gmxx_xaui_ext_loopback_s cn56xx;
- struct cvmx_gmxx_xaui_ext_loopback_s cn56xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-board.c b/drivers/staging/octeon/cvmx-helper-board.c
deleted file mode 100644
index 57d35dc..0000000
--- a/drivers/staging/octeon/cvmx-helper-board.c
+++ /dev/null
@@ -1,695 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Helper functions to abstract board specific data about
- * network ports from the rest of the cvmx-helper files.
- */
-
-#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-bootinfo.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-mdio.h"
-
-#include "cvmx-helper.h"
-#include "cvmx-helper-util.h"
-#include "cvmx-helper-board.h"
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-asxx-defs.h"
-
-/**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port) =
- NULL;
-
-/**
- * Return the MII PHY address associated with the given IPD
- * port. A result of -1 means there isn't a MII capable PHY
- * connected to this port. On chips supporting multiple MII
- * busses the bus number is encoded in bits <15:8>.
- *
- * This function must be modified for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It replies on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: Octeon IPD port to get the MII address for.
- *
- * Returns MII PHY address and bus number or -1.
- */
-int cvmx_helper_board_get_mii_address(int ipd_port)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_SIM:
- /* Simulator doesn't have MII */
- return -1;
- case CVMX_BOARD_TYPE_EBT3000:
- case CVMX_BOARD_TYPE_EBT5800:
- case CVMX_BOARD_TYPE_THUNDER:
- case CVMX_BOARD_TYPE_NICPRO2:
- /* Interface 0 is SPI4, interface 1 is RGMII */
- if ((ipd_port >= 16) && (ipd_port < 20))
- return ipd_port - 16;
- else
- return -1;
- case CVMX_BOARD_TYPE_KODAMA:
- case CVMX_BOARD_TYPE_EBH3100:
- case CVMX_BOARD_TYPE_HIKARI:
- case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
- /*
- * Port 0 is WAN connected to a PHY, Port 1 is GMII
- * connected to a switch
- */
- if (ipd_port == 0)
- return 4;
- else if (ipd_port == 1)
- return 9;
- else
- return -1;
- case CVMX_BOARD_TYPE_NAC38:
- /* Board has 8 RGMII ports PHYs are 0-7 */
- if ((ipd_port >= 0) && (ipd_port < 4))
- return ipd_port;
- else if ((ipd_port >= 16) && (ipd_port < 20))
- return ipd_port - 16 + 4;
- else
- return -1;
- case CVMX_BOARD_TYPE_EBH3000:
- /* Board has dual SPI4 and no PHYs */
- return -1;
- case CVMX_BOARD_TYPE_EBH5200:
- case CVMX_BOARD_TYPE_EBH5201:
- case CVMX_BOARD_TYPE_EBT5200:
- /*
- * Board has 4 SGMII ports. The PHYs start right after the MII
- * ports MII0 = 0, MII1 = 1, SGMII = 2-5.
- */
- if ((ipd_port >= 0) && (ipd_port < 4))
- return ipd_port + 2;
- else
- return -1;
- case CVMX_BOARD_TYPE_EBH5600:
- case CVMX_BOARD_TYPE_EBH5601:
- case CVMX_BOARD_TYPE_EBH5610:
- /*
- * Board has 8 SGMII ports. 4 connect out, two connect
- * to a switch, and 2 loop to each other
- */
- if ((ipd_port >= 0) && (ipd_port < 4))
- return ipd_port + 1;
- else
- return -1;
- case CVMX_BOARD_TYPE_CUST_NB5:
- if (ipd_port == 2)
- return 4;
- else
- return -1;
- case CVMX_BOARD_TYPE_NIC_XLE_4G:
- /* Board has 4 SGMII ports. connected QLM3(interface 1) */
- if ((ipd_port >= 16) && (ipd_port < 20))
- return ipd_port - 16 + 1;
- else
- return -1;
- case CVMX_BOARD_TYPE_BBGW_REF:
- /*
- * No PHYs are connected to Octeon, everything is
- * through switch.
- */
- return -1;
-
- case CVMX_BOARD_TYPE_CUST_WSX16:
- if (ipd_port >= 0 && ipd_port <= 3)
- return ipd_port;
- else if (ipd_port >= 16 && ipd_port <= 19)
- return ipd_port - 16 + 4;
- else
- return -1;
- }
-
- /* Some unknown board. Somebody forgot to update this function... */
- cvmx_dprintf
- ("cvmx_helper_board_get_mii_address: Unknown board type %d\n",
- cvmx_sysinfo_get()->board_type);
- return -1;
-}
-
-/**
- * This function is the board specific method of determining an
- * ethernet ports link speed. Most Octeon boards have Marvell PHYs
- * and are handled by the fall through case. This function must be
- * updated for boards that don't have the normal Marvell PHYs.
- *
- * This function must be modified for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relies on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: IPD input port associated with the port we want to get link
- * status for.
- *
- * Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
- */
-cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- int phy_addr;
- int is_broadcom_phy = 0;
-
- /* Give the user a chance to override the processing of this function */
- if (cvmx_override_board_link_get)
- return cvmx_override_board_link_get(ipd_port);
-
- /* Unless we fix it later, all links are defaulted to down */
- result.u64 = 0;
-
- /*
- * This switch statement should handle all ports that either don't use
- * Marvell PHYS, or don't support in-band status.
- */
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_SIM:
- /* The simulator gives you a simulated 1Gbps full duplex link */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- case CVMX_BOARD_TYPE_EBH3100:
- case CVMX_BOARD_TYPE_CN3010_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
- case CVMX_BOARD_TYPE_CN3020_EVB_HS5:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 1) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
- /* Fall through to the generic code below */
- break;
- case CVMX_BOARD_TYPE_CUST_NB5:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 1) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- } else /* The other port uses a broadcom PHY */
- is_broadcom_phy = 1;
- break;
- case CVMX_BOARD_TYPE_BBGW_REF:
- /* Port 1 on these boards is always Gigabit */
- if (ipd_port == 2) {
- /* Port 2 is not hooked up */
- result.u64 = 0;
- return result;
- } else {
- /* Ports 0 and 1 connect to the switch */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
- break;
- }
-
- phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
- if (phy_addr != -1) {
- if (is_broadcom_phy) {
- /*
- * Below we are going to read SMI/MDIO
- * register 0x19 which works on Broadcom
- * parts
- */
- int phy_status =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- 0x19);
- switch ((phy_status >> 8) & 0x7) {
- case 0:
- result.u64 = 0;
- break;
- case 1:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 10;
- break;
- case 2:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10;
- break;
- case 3:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 100;
- break;
- case 4:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 100;
- break;
- case 5:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 100;
- break;
- case 6:
- result.s.link_up = 1;
- result.s.full_duplex = 0;
- result.s.speed = 1000;
- break;
- case 7:
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- break;
- }
- } else {
- /*
- * This code assumes we are using a Marvell
- * Gigabit PHY. All the speed information can
- * be read from register 17 in one
- * go. Somebody using a different PHY will
- * need to handle it above in the board
- * specific area.
- */
- int phy_status =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff, 17);
-
- /*
- * If the resolve bit 11 isn't set, see if
- * autoneg is turned off (bit 12, reg 0). The
- * resolve bit doesn't get set properly when
- * autoneg is off, so force it.
- */
- if ((phy_status & (1 << 11)) == 0) {
- int auto_status =
- cvmx_mdio_read(phy_addr >> 8,
- phy_addr & 0xff, 0);
- if ((auto_status & (1 << 12)) == 0)
- phy_status |= 1 << 11;
- }
-
- /*
- * Only return a link if the PHY has finished
- * auto negotiation and set the resolved bit
- * (bit 11)
- */
- if (phy_status & (1 << 11)) {
- result.s.link_up = 1;
- result.s.full_duplex = ((phy_status >> 13) & 1);
- switch ((phy_status >> 14) & 3) {
- case 0: /* 10 Mbps */
- result.s.speed = 10;
- break;
- case 1: /* 100 Mbps */
- result.s.speed = 100;
- break;
- case 2: /* 1 Gbps */
- result.s.speed = 1000;
- break;
- case 3: /* Illegal */
- result.u64 = 0;
- break;
- }
- }
- }
- } else if (OCTEON_IS_MODEL(OCTEON_CN3XXX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /*
- * We don't have a PHY address, so attempt to use
- * in-band status. It is really important that boards
- * not supporting in-band status never get
- * here. Reading broken in-band status tends to do bad
- * things
- */
- union cvmx_gmxx_rxx_rx_inbnd inband_status;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- inband_status.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_RX_INBND(index, interface));
-
- result.s.link_up = inband_status.s.status;
- result.s.full_duplex = inband_status.s.duplex;
- switch (inband_status.s.speed) {
- case 0: /* 10 Mbps */
- result.s.speed = 10;
- break;
- case 1: /* 100 Mbps */
- result.s.speed = 100;
- break;
- case 2: /* 1 Gbps */
- result.s.speed = 1000;
- break;
- case 3: /* Illegal */
- result.u64 = 0;
- break;
- }
- } else {
- /*
- * We don't have a PHY address and we don't have
- * in-band status. There is no way to determine the
- * link speed. Return down assuming this port isn't
- * wired
- */
- result.u64 = 0;
- }
-
- /* If link is down, return all fields as zero. */
- if (!result.s.link_up)
- result.u64 = 0;
-
- return result;
-}
-
-/**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and auto-negotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr: The address of the PHY to program
- * @enable_autoneg:
- * Non zero if you want to enable auto-negotiation.
- * @link_info: Link speed to program. If the speed is zero and auto-negotiation
- * is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
- cvmx_helper_board_set_phy_link_flags_types_t
- link_flags,
- cvmx_helper_link_info_t link_info)
-{
-
- /* Set the flow control settings based on link_flags */
- if ((link_flags & set_phy_link_flags_flow_control_mask) !=
- set_phy_link_flags_flow_control_dont_touch) {
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.asymmetric_pause =
- (link_flags & set_phy_link_flags_flow_control_mask) ==
- set_phy_link_flags_flow_control_enable;
- reg_autoneg_adver.s.pause =
- (link_flags & set_phy_link_flags_flow_control_mask) ==
- set_phy_link_flags_flow_control_enable;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- }
-
- /* If speed isn't set and autoneg is on advertise all supported modes */
- if ((link_flags & set_phy_link_flags_autoneg)
- && (link_info.s.speed == 0)) {
- cvmx_mdio_phy_reg_control_t reg_control;
- cvmx_mdio_phy_reg_status_t reg_status;
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
- cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
- reg_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_STATUS);
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.advert_100base_t4 =
- reg_status.s.capable_100base_t4;
- reg_autoneg_adver.s.advert_10base_tx_full =
- reg_status.s.capable_10_full;
- reg_autoneg_adver.s.advert_10base_tx_half =
- reg_status.s.capable_10_half;
- reg_autoneg_adver.s.advert_100base_tx_full =
- reg_status.s.capable_100base_x_full;
- reg_autoneg_adver.s.advert_100base_tx_half =
- reg_status.s.capable_100base_x_half;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- if (reg_status.s.capable_extended_status) {
- reg_extended_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
- reg_control_1000.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000);
- reg_control_1000.s.advert_1000base_t_full =
- reg_extended_status.s.capable_1000base_t_full;
- reg_control_1000.s.advert_1000base_t_half =
- reg_extended_status.s.capable_1000base_t_half;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000,
- reg_control_1000.u16);
- }
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 1;
- reg_control.s.restart_autoneg = 1;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- } else if ((link_flags & set_phy_link_flags_autoneg)) {
- cvmx_mdio_phy_reg_control_t reg_control;
- cvmx_mdio_phy_reg_status_t reg_status;
- cvmx_mdio_phy_reg_autoneg_adver_t reg_autoneg_adver;
- cvmx_mdio_phy_reg_extended_status_t reg_extended_status;
- cvmx_mdio_phy_reg_control_1000_t reg_control_1000;
-
- reg_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_STATUS);
- reg_autoneg_adver.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER);
- reg_autoneg_adver.s.advert_100base_t4 = 0;
- reg_autoneg_adver.s.advert_10base_tx_full = 0;
- reg_autoneg_adver.s.advert_10base_tx_half = 0;
- reg_autoneg_adver.s.advert_100base_tx_full = 0;
- reg_autoneg_adver.s.advert_100base_tx_half = 0;
- if (reg_status.s.capable_extended_status) {
- reg_extended_status.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_EXTENDED_STATUS);
- reg_control_1000.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000);
- reg_control_1000.s.advert_1000base_t_full = 0;
- reg_control_1000.s.advert_1000base_t_half = 0;
- }
- switch (link_info.s.speed) {
- case 10:
- reg_autoneg_adver.s.advert_10base_tx_full =
- link_info.s.full_duplex;
- reg_autoneg_adver.s.advert_10base_tx_half =
- !link_info.s.full_duplex;
- break;
- case 100:
- reg_autoneg_adver.s.advert_100base_tx_full =
- link_info.s.full_duplex;
- reg_autoneg_adver.s.advert_100base_tx_half =
- !link_info.s.full_duplex;
- break;
- case 1000:
- reg_control_1000.s.advert_1000base_t_full =
- link_info.s.full_duplex;
- reg_control_1000.s.advert_1000base_t_half =
- !link_info.s.full_duplex;
- break;
- }
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_AUTONEG_ADVER,
- reg_autoneg_adver.u16);
- if (reg_status.s.capable_extended_status)
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL_1000,
- reg_control_1000.u16);
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 1;
- reg_control.s.restart_autoneg = 1;
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- } else {
- cvmx_mdio_phy_reg_control_t reg_control;
- reg_control.u16 =
- cvmx_mdio_read(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL);
- reg_control.s.autoneg_enable = 0;
- reg_control.s.restart_autoneg = 1;
- reg_control.s.duplex = link_info.s.full_duplex;
- if (link_info.s.speed == 1000) {
- reg_control.s.speed_msb = 1;
- reg_control.s.speed_lsb = 0;
- } else if (link_info.s.speed == 100) {
- reg_control.s.speed_msb = 0;
- reg_control.s.speed_lsb = 1;
- } else if (link_info.s.speed == 10) {
- reg_control.s.speed_msb = 0;
- reg_control.s.speed_lsb = 0;
- }
- cvmx_mdio_write(phy_addr >> 8, phy_addr & 0xff,
- CVMX_MDIO_PHY_REG_CONTROL, reg_control.u16);
- }
- return 0;
-}
-
-/**
- * This function is called by cvmx_helper_interface_probe() after it
- * determines the number of ports Octeon can support on a specific
- * interface. This function is the per board location to override
- * this value. It is called with the number of ports Octeon might
- * support and should return the number of actual ports on the
- * board.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @interface: Interface to probe
- * @supported_ports:
- * Number of ports Octeon supports.
- *
- * Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
- */
-int __cvmx_helper_board_interface_probe(int interface, int supported_ports)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_CN3005_EVB_HS5:
- if (interface == 0)
- return 2;
- break;
- case CVMX_BOARD_TYPE_BBGW_REF:
- if (interface == 0)
- return 2;
- break;
- case CVMX_BOARD_TYPE_NIC_XLE_4G:
- if (interface == 0)
- return 0;
- break;
- /* The 2nd interface on the EBH5600 is connected to the Marvel switch,
- which we don't support. Disable ports connected to it */
- case CVMX_BOARD_TYPE_EBH5600:
- if (interface == 1)
- return 0;
- break;
- }
- return supported_ports;
-}
-
-/**
- * Enable packet input/output from the hardware. This function is
- * called after by cvmx_helper_packet_hardware_enable() to
- * perform board specific initialization. For most boards
- * nothing is needed.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_board_hardware_enable(int interface)
-{
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5) {
- if (interface == 0) {
- /* Different config for switch port */
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(1, interface), 0);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(1, interface), 0);
- /*
- * Boards with gigabit WAN ports need a
- * different setting that is compatible with
- * 100 Mbit settings
- */
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(0, interface),
- 0xc);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(0, interface),
- 0xc);
- }
- } else if (cvmx_sysinfo_get()->board_type ==
- CVMX_BOARD_TYPE_CN3010_EVB_HS5) {
- /*
- * Broadcom PHYs require differnet ASX
- * clocks. Unfortunately many boards don't define a
- * new board Id and simply mangle the
- * CN3010_EVB_HS5
- */
- if (interface == 0) {
- /*
- * Some boards use a hacked up bootloader that
- * identifies them as CN3010_EVB_HS5
- * evaluation boards. This leads to all kinds
- * of configuration problems. Detect one
- * case, and print warning, while trying to do
- * the right thing.
- */
- int phy_addr = cvmx_helper_board_get_mii_address(0);
- if (phy_addr != -1) {
- int phy_identifier =
- cvmx_mdio_read(phy_addr >> 8,
- phy_addr & 0xff, 0x2);
- /* Is it a Broadcom PHY? */
- if (phy_identifier == 0x0143) {
- cvmx_dprintf("\n");
- cvmx_dprintf("ERROR:\n");
- cvmx_dprintf
- ("ERROR: Board type is CVMX_BOARD_TYPE_CN3010_EVB_HS5, but Broadcom PHY found.\n");
- cvmx_dprintf
- ("ERROR: The board type is mis-configured, and software malfunctions are likely.\n");
- cvmx_dprintf
- ("ERROR: All boards require a unique board type to identify them.\n");
- cvmx_dprintf("ERROR:\n");
- cvmx_dprintf("\n");
- cvmx_wait(1000000000);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX
- (0, interface), 5);
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX
- (0, interface), 5);
- }
- }
- }
- }
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-board.h b/drivers/staging/octeon/cvmx-helper-board.h
deleted file mode 100644
index b465bec..0000000
--- a/drivers/staging/octeon/cvmx-helper-board.h
+++ /dev/null
@@ -1,151 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Helper functions to abstract board specific data about
- * network ports from the rest of the cvmx-helper files.
- *
- */
-#ifndef __CVMX_HELPER_BOARD_H__
-#define __CVMX_HELPER_BOARD_H__
-
-#include "cvmx-helper.h"
-
-typedef enum {
- set_phy_link_flags_autoneg = 0x1,
- set_phy_link_flags_flow_control_dont_touch = 0x0 << 1,
- set_phy_link_flags_flow_control_enable = 0x1 << 1,
- set_phy_link_flags_flow_control_disable = 0x2 << 1,
- set_phy_link_flags_flow_control_mask = 0x3 << 1, /* Mask for 2 bit wide flow control field */
-} cvmx_helper_board_set_phy_link_flags_types_t;
-
-/**
- * cvmx_override_board_link_get(int ipd_port) is a function
- * pointer. It is meant to allow customization of the process of
- * talking to a PHY to determine link speed. It is called every
- * time a PHY must be polled for link status. Users should set
- * this pointer to a function before calling any cvmx-helper
- * operations.
- */
-extern cvmx_helper_link_info_t(*cvmx_override_board_link_get) (int ipd_port);
-
-/**
- * Return the MII PHY address associated with the given IPD
- * port. A result of -1 means there isn't a MII capable PHY
- * connected to this port. On chips supporting multiple MII
- * busses the bus number is encoded in bits <15:8>.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: Octeon IPD port to get the MII address for.
- *
- * Returns MII PHY address and bus number or -1.
- */
-extern int cvmx_helper_board_get_mii_address(int ipd_port);
-
-/**
- * This function as a board specific method of changing the PHY
- * speed, duplex, and autonegotiation. This programs the PHY and
- * not Octeon. This can be used to force Octeon's links to
- * specific settings.
- *
- * @phy_addr: The address of the PHY to program
- * @link_flags:
- * Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backware compatibility.
- * @link_info: Link speed to program. If the speed is zero and autonegotiation
- * is enabled, all possible negotiation speeds are advertised.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_board_link_set_phy(int phy_addr,
- cvmx_helper_board_set_phy_link_flags_types_t
- link_flags,
- cvmx_helper_link_info_t link_info);
-
-/**
- * This function is the board specific method of determining an
- * ethernet ports link speed. Most Octeon boards have Marvell PHYs
- * and are handled by the fall through case. This function must be
- * updated for boards that don't have the normal Marvell PHYs.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @ipd_port: IPD input port associated with the port we want to get link
- * status for.
- *
- * Returns The ports link status. If the link isn't fully resolved, this must
- * return zero.
- */
-extern cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port);
-
-/**
- * This function is called by cvmx_helper_interface_probe() after it
- * determines the number of ports Octeon can support on a specific
- * interface. This function is the per board location to override
- * this value. It is called with the number of ports Octeon might
- * support and should return the number of actual ports on the
- * board.
- *
- * This function must be modifed for every new Octeon board.
- * Internally it uses switch statements based on the cvmx_sysinfo
- * data to determine board types and revisions. It relys on the
- * fact that every Octeon board receives a unique board type
- * enumeration from the bootloader.
- *
- * @interface: Interface to probe
- * @supported_ports:
- * Number of ports Octeon supports.
- *
- * Returns Number of ports the actual board supports. Many times this will
- * simple be "support_ports".
- */
-extern int __cvmx_helper_board_interface_probe(int interface,
- int supported_ports);
-
-/**
- * Enable packet input/output from the hardware. This function is
- * called after by cvmx_helper_packet_hardware_enable() to
- * perform board specific initialization. For most boards
- * nothing is needed.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_board_hardware_enable(int interface);
-
-#endif /* __CVMX_HELPER_BOARD_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.c b/drivers/staging/octeon/cvmx-helper-fpa.c
deleted file mode 100644
index c239e5f..0000000
--- a/drivers/staging/octeon/cvmx-helper-fpa.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Helper functions for FPA setup.
- *
- */
-#include "executive-config.h"
-#include "cvmx-config.h"
-#include "cvmx.h"
-#include "cvmx-bootmem.h"
-#include "cvmx-fpa.h"
-#include "cvmx-helper-fpa.h"
-
-/**
- * Allocate memory for and initialize a single FPA pool.
- *
- * @pool: Pool to initialize
- * @buffer_size: Size of buffers to allocate in bytes
- * @buffers: Number of buffers to put in the pool. Zero is allowed
- * @name: String name of the pool for debugging purposes
- * Returns Zero on success, non-zero on failure
- */
-static int __cvmx_helper_initialize_fpa_pool(int pool, uint64_t buffer_size,
- uint64_t buffers, const char *name)
-{
- uint64_t current_num;
- void *memory;
- uint64_t align = CVMX_CACHE_LINE_SIZE;
-
- /*
- * Align the allocation so that power of 2 size buffers are
- * naturally aligned.
- */
- while (align < buffer_size)
- align = align << 1;
-
- if (buffers == 0)
- return 0;
-
- current_num = cvmx_read_csr(CVMX_FPA_QUEX_AVAILABLE(pool));
- if (current_num) {
- cvmx_dprintf("Fpa pool %d(%s) already has %llu buffers. "
- "Skipping setup.\n",
- pool, name, (unsigned long long)current_num);
- return 0;
- }
-
- memory = cvmx_bootmem_alloc(buffer_size * buffers, align);
- if (memory == NULL) {
- cvmx_dprintf("Out of memory initializing fpa pool %d(%s).\n",
- pool, name);
- return -1;
- }
- cvmx_fpa_setup_pool(pool, name, memory, buffer_size, buffers);
- return 0;
-}
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Specifying zero for the number of
- * buffers will cause that FPA pool to not be setup. This is
- * useful if you aren't using some of the hardware and want
- * to save memory. Use cvmx_helper_initialize_fpa instead of
- * this function directly.
- *
- * @pip_pool: Should always be CVMX_FPA_PACKET_POOL
- * @pip_size: Should always be CVMX_FPA_PACKET_POOL_SIZE
- * @pip_buffers:
- * Number of packet buffers.
- * @wqe_pool: Should always be CVMX_FPA_WQE_POOL
- * @wqe_size: Should always be CVMX_FPA_WQE_POOL_SIZE
- * @wqe_entries:
- * Number of work queue entries
- * @pko_pool: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL
- * @pko_size: Should always be CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_pool: Should always be CVMX_FPA_TIMER_POOL
- * @tim_size: Should always be CVMX_FPA_TIMER_POOL_SIZE
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_pool: Should always be CVMX_FPA_DFA_POOL
- * @dfa_size: Should always be CVMX_FPA_DFA_POOL_SIZE
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-static int __cvmx_helper_initialize_fpa(int pip_pool, int pip_size,
- int pip_buffers, int wqe_pool,
- int wqe_size, int wqe_entries,
- int pko_pool, int pko_size,
- int pko_buffers, int tim_pool,
- int tim_size, int tim_buffers,
- int dfa_pool, int dfa_size,
- int dfa_buffers)
-{
- int status;
-
- cvmx_fpa_enable();
-
- if ((pip_buffers > 0) && (pip_buffers <= 64))
- cvmx_dprintf
- ("Warning: %d packet buffers may not be enough for hardware"
- " prefetch. 65 or more is recommended.\n", pip_buffers);
-
- if (pip_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(pip_pool, pip_size,
- pip_buffers,
- "Packet Buffers");
- if (status)
- return status;
- }
-
- if (wqe_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(wqe_pool, wqe_size,
- wqe_entries,
- "Work Queue Entries");
- if (status)
- return status;
- }
-
- if (pko_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(pko_pool, pko_size,
- pko_buffers,
- "PKO Command Buffers");
- if (status)
- return status;
- }
-
- if (tim_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(tim_pool, tim_size,
- tim_buffers,
- "TIM Command Buffers");
- if (status)
- return status;
- }
-
- if (dfa_pool >= 0) {
- status =
- __cvmx_helper_initialize_fpa_pool(dfa_pool, dfa_size,
- dfa_buffers,
- "DFA Command Buffers");
- if (status)
- return status;
- }
-
- return 0;
-}
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Sizes of each element in the pools is
- * controlled by the cvmx-config.h header file. Specifying
- * zero for any parameter will cause that FPA pool to not be
- * setup. This is useful if you aren't using some of the
- * hardware and want to save memory.
- *
- * @packet_buffers:
- * Number of packet buffers to allocate
- * @work_queue_entries:
- * Number of work queue entries
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-int cvmx_helper_initialize_fpa(int packet_buffers, int work_queue_entries,
- int pko_buffers, int tim_buffers,
- int dfa_buffers)
-{
-#ifndef CVMX_FPA_PACKET_POOL
-#define CVMX_FPA_PACKET_POOL -1
-#define CVMX_FPA_PACKET_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_WQE_POOL
-#define CVMX_FPA_WQE_POOL -1
-#define CVMX_FPA_WQE_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_OUTPUT_BUFFER_POOL
-#define CVMX_FPA_OUTPUT_BUFFER_POOL -1
-#define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_TIMER_POOL
-#define CVMX_FPA_TIMER_POOL -1
-#define CVMX_FPA_TIMER_POOL_SIZE 0
-#endif
-#ifndef CVMX_FPA_DFA_POOL
-#define CVMX_FPA_DFA_POOL -1
-#define CVMX_FPA_DFA_POOL_SIZE 0
-#endif
- return __cvmx_helper_initialize_fpa(CVMX_FPA_PACKET_POOL,
- CVMX_FPA_PACKET_POOL_SIZE,
- packet_buffers, CVMX_FPA_WQE_POOL,
- CVMX_FPA_WQE_POOL_SIZE,
- work_queue_entries,
- CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE,
- pko_buffers, CVMX_FPA_TIMER_POOL,
- CVMX_FPA_TIMER_POOL_SIZE,
- tim_buffers, CVMX_FPA_DFA_POOL,
- CVMX_FPA_DFA_POOL_SIZE,
- dfa_buffers);
-}
diff --git a/drivers/staging/octeon/cvmx-helper-fpa.h b/drivers/staging/octeon/cvmx-helper-fpa.h
deleted file mode 100644
index 5ff8c93..0000000
--- a/drivers/staging/octeon/cvmx-helper-fpa.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Helper functions for FPA setup.
- *
- */
-#ifndef __CVMX_HELPER_H_FPA__
-#define __CVMX_HELPER_H_FPA__
-
-/**
- * Allocate memory and initialize the FPA pools using memory
- * from cvmx-bootmem. Sizes of each element in the pools is
- * controlled by the cvmx-config.h header file. Specifying
- * zero for any parameter will cause that FPA pool to not be
- * setup. This is useful if you aren't using some of the
- * hardware and want to save memory.
- *
- * @packet_buffers:
- * Number of packet buffers to allocate
- * @work_queue_entries:
- * Number of work queue entries
- * @pko_buffers:
- * PKO Command buffers. You should at minimum have two per
- * each PKO queue.
- * @tim_buffers:
- * TIM ring buffer command queues. At least two per timer bucket
- * is recommened.
- * @dfa_buffers:
- * DFA command buffer. A relatively small (32 for example)
- * number should work.
- * Returns Zero on success, non-zero if out of memory
- */
-extern int cvmx_helper_initialize_fpa(int packet_buffers,
- int work_queue_entries, int pko_buffers,
- int tim_buffers, int dfa_buffers);
-
-#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-loop.c b/drivers/staging/octeon/cvmx-helper-loop.c
deleted file mode 100644
index 55a571a..0000000
--- a/drivers/staging/octeon/cvmx-helper-loop.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for LOOP initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-helper.h"
-#include "cvmx-pip-defs.h"
-
-/**
- * Probe a LOOP interface and determine the number of ports
- * connected to it. The LOOP interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_loop_probe(int interface)
-{
- union cvmx_ipd_sub_port_fcs ipd_sub_port_fcs;
- int num_ports = 4;
- int port;
-
- /* We need to disable length checking so packet < 64 bytes and jumbo
- frames don't get errors */
- for (port = 0; port < num_ports; port++) {
- union cvmx_pip_prt_cfgx port_cfg;
- int ipd_port = cvmx_helper_get_ipd_port(interface, port);
- port_cfg.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- port_cfg.s.maxerr_en = 0;
- port_cfg.s.minerr_en = 0;
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_cfg.u64);
- }
-
- /* Disable FCS stripping for loopback ports */
- ipd_sub_port_fcs.u64 = cvmx_read_csr(CVMX_IPD_SUB_PORT_FCS);
- ipd_sub_port_fcs.s.port_bit2 = 0;
- cvmx_write_csr(CVMX_IPD_SUB_PORT_FCS, ipd_sub_port_fcs.u64);
- return num_ports;
-}
-
-/**
- * Bringup and enable a LOOP interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_loop_enable(int interface)
-{
- /* Do nothing. */
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-loop.h b/drivers/staging/octeon/cvmx-helper-loop.h
deleted file mode 100644
index e646a6c..0000000
--- a/drivers/staging/octeon/cvmx-helper-loop.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as published by
- * the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful,
- * but AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or NONINFRINGEMENT.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for LOOP initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_LOOP_H__
-#define __CVMX_HELPER_LOOP_H__
-
-/**
- * Probe a LOOP interface and determine the number of ports
- * connected to it. The LOOP interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_loop_probe(int interface);
-
-/**
- * Bringup and enable a LOOP interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_loop_enable(int interface);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-npi.c b/drivers/staging/octeon/cvmx-helper-npi.c
deleted file mode 100644
index 7388a1e..0000000
--- a/drivers/staging/octeon/cvmx-helper-npi.c
+++ /dev/null
@@ -1,113 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for NPI initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-helper.h"
-
-#include "cvmx-pip-defs.h"
-
-/**
- * Probe a NPI interface and determine the number of ports
- * connected to it. The NPI interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_npi_probe(int interface)
-{
-#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
- return 4;
- else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
- && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
- /* The packet engines didn't exist before pass 2 */
- return 4;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
- && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
- /* The packet engines didn't exist before pass 2 */
- return 4;
-#if 0
- /*
- * Technically CN30XX, CN31XX, and CN50XX contain packet
- * engines, but nobody ever uses them. Since this is the case,
- * we disable them here.
- */
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX))
- return 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- return 1;
-#endif
-#endif
- return 0;
-}
-
-/**
- * Bringup and enable a NPI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_npi_enable(int interface)
-{
- /*
- * On CN50XX, CN52XX, and CN56XX we need to disable length
- * checking so packet < 64 bytes and jumbo frames don't get
- * errors.
- */
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
- !OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- for (port = 0; port < num_ports; port++) {
- union cvmx_pip_prt_cfgx port_cfg;
- int ipd_port =
- cvmx_helper_get_ipd_port(interface, port);
- port_cfg.u64 =
- cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- port_cfg.s.maxerr_en = 0;
- port_cfg.s.minerr_en = 0;
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
- port_cfg.u64);
- }
- }
-
- /* Enables are controlled by the remote host, so nothing to do here */
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-npi.h b/drivers/staging/octeon/cvmx-helper-npi.h
deleted file mode 100644
index 908e7b0..0000000
--- a/drivers/staging/octeon/cvmx-helper-npi.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for NPI initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_NPI_H__
-#define __CVMX_HELPER_NPI_H__
-
-/**
- * Probe a NPI interface and determine the number of ports
- * connected to it. The NPI interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_npi_probe(int interface);
-
-/**
- * Bringup and enable a NPI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_npi_enable(int interface);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.c b/drivers/staging/octeon/cvmx-helper-rgmii.c
deleted file mode 100644
index aa2d5d7..0000000
--- a/drivers/staging/octeon/cvmx-helper-rgmii.c
+++ /dev/null
@@ -1,525 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for RGMII/GMII/MII initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-
-#include "cvmx-mdio.h"
-#include "cvmx-pko.h"
-#include "cvmx-helper.h"
-#include "cvmx-helper-board.h"
-
-#include <asm/octeon/cvmx-npi-defs.h>
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-asxx-defs.h"
-#include "cvmx-dbg-defs.h"
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_asxx_enable(int block);
-
-/**
- * Probe RGMII ports and determine the number present
- *
- * @interface: Interface to probe
- *
- * Returns Number of RGMII/GMII/MII ports (0-4).
- */
-int __cvmx_helper_rgmii_probe(int interface)
-{
- int num_ports = 0;
- union cvmx_gmxx_inf_mode mode;
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (mode.s.type) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- cvmx_dprintf("ERROR: RGMII initialize called in "
- "SPI interface\n");
- } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN30XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /*
- * On these chips "type" says we're in
- * GMII/MII mode. This limits us to 2 ports
- */
- num_ports = 2;
- } else {
- cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
- __func__);
- }
- } else {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- num_ports = 4;
- } else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN30XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- num_ports = 3;
- } else {
- cvmx_dprintf("ERROR: Unsupported Octeon model in %s\n",
- __func__);
- }
- }
- return num_ports;
-}
-
-/**
- * Put an RGMII interface in loopback mode. Internal packets sent
- * out will be received back again on the same port. Externally
- * received packets will echo back out.
- *
- * @port: IPD port number to loop.
- */
-void cvmx_helper_rgmii_internal_loopback(int port)
-{
- int interface = (port >> 4) & 1;
- int index = port & 0xf;
- uint64_t tmp;
-
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- gmx_cfg.u64 = 0;
- gmx_cfg.s.duplex = 1;
- gmx_cfg.s.slottime = 1;
- gmx_cfg.s.speed = 1;
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- tmp = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), (1 << index) | tmp);
- tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), (1 << index) | tmp);
- tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), (1 << index) | tmp);
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-}
-
-/**
- * Workaround ASX setup errata with CN38XX pass1
- *
- * @interface: Interface to setup
- * @port: Port to setup (0..3)
- * @cpu_clock_hz:
- * Chip frequency in Hertz
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_errata_asx_pass1(int interface, int port,
- int cpu_clock_hz)
-{
- /* Set hi water mark as per errata GMX-4 */
- if (cpu_clock_hz >= 325000000 && cpu_clock_hz < 375000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 12);
- else if (cpu_clock_hz >= 375000000 && cpu_clock_hz < 437000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 11);
- else if (cpu_clock_hz >= 437000000 && cpu_clock_hz < 550000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 10);
- else if (cpu_clock_hz >= 550000000 && cpu_clock_hz < 687000000)
- cvmx_write_csr(CVMX_ASXX_TX_HI_WATERX(port, interface), 9);
- else
- cvmx_dprintf("Illegal clock frequency (%d). "
- "CVMX_ASXX_TX_HI_WATERX not set\n", cpu_clock_hz);
- return 0;
-}
-
-/**
- * Configure all of the ASX, GMX, and PKO regsiters required
- * to get RGMII to function on the supplied interface.
- *
- * @interface: PKO Interface to configure (0 or 1)
- *
- * Returns Zero on success
- */
-int __cvmx_helper_rgmii_enable(int interface)
-{
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int port;
- struct cvmx_sysinfo *sys_info_ptr = cvmx_sysinfo_get();
- union cvmx_gmxx_inf_mode mode;
- union cvmx_asxx_tx_prt_en asx_tx;
- union cvmx_asxx_rx_prt_en asx_rx;
-
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (mode.s.en == 0)
- return -1;
- if ((OCTEON_IS_MODEL(OCTEON_CN38XX) ||
- OCTEON_IS_MODEL(OCTEON_CN58XX)) && mode.s.type == 1)
- /* Ignore SPI interfaces */
- return -1;
-
- /* Configure the ASX registers needed to use the RGMII ports */
- asx_tx.u64 = 0;
- asx_tx.s.prt_en = cvmx_build_mask(num_ports);
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface), asx_tx.u64);
-
- asx_rx.u64 = 0;
- asx_rx.s.prt_en = cvmx_build_mask(num_ports);
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface), asx_rx.u64);
-
- /* Configure the GMX registers needed to use the RGMII ports */
- for (port = 0; port < num_ports; port++) {
- /* Setting of CVMX_GMXX_TXX_THRESH has been moved to
- __cvmx_helper_setup_gmx() */
-
- if (cvmx_octeon_is_pass1())
- __cvmx_helper_errata_asx_pass1(interface, port,
- sys_info_ptr->
- cpu_clock_hz);
- else {
- /*
- * Configure more flexible RGMII preamble
- * checking. Pass 1 doesn't support this
- * feature.
- */
- union cvmx_gmxx_rxx_frm_ctl frm_ctl;
- frm_ctl.u64 =
- cvmx_read_csr(CVMX_GMXX_RXX_FRM_CTL
- (port, interface));
- /* New field, so must be compile time */
- frm_ctl.s.pre_free = 1;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_CTL(port, interface),
- frm_ctl.u64);
- }
-
- /*
- * Each pause frame transmitted will ask for about 10M
- * bit times before resume. If buffer space comes
- * available before that time has expired, an XON
- * pause frame (0 time) will be transmitted to restart
- * the flow.
- */
- cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_TIME(port, interface),
- 20000);
- cvmx_write_csr(CVMX_GMXX_TXX_PAUSE_PKT_INTERVAL
- (port, interface), 19000);
-
- if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
- 16);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
- 16);
- } else {
- cvmx_write_csr(CVMX_ASXX_TX_CLK_SETX(port, interface),
- 24);
- cvmx_write_csr(CVMX_ASXX_RX_CLK_SETX(port, interface),
- 24);
- }
- }
-
- __cvmx_helper_setup_gmx(interface, num_ports);
-
- /* enable the ports now */
- for (port = 0; port < num_ports; port++) {
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port
- (interface, port));
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(port, interface));
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(port, interface),
- gmx_cfg.u64);
- }
- __cvmx_interrupt_asxx_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
-
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_asxx_prt_loop asxx_prt_loop;
-
- asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
- if (asxx_prt_loop.s.int_loop & (1 << index)) {
- /* Force 1Gbps full duplex on internal loopback */
- cvmx_helper_link_info_t result;
- result.u64 = 0;
- result.s.full_duplex = 1;
- result.s.link_up = 1;
- result.s.speed = 1000;
- return result;
- } else
- return __cvmx_helper_board_link_get(ipd_port);
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_rgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info)
-{
- int result = 0;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_gmxx_prtx_cfg original_gmx_cfg;
- union cvmx_gmxx_prtx_cfg new_gmx_cfg;
- union cvmx_pko_mem_queue_qos pko_mem_queue_qos;
- union cvmx_pko_mem_queue_qos pko_mem_queue_qos_save[16];
- union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp;
- union cvmx_gmxx_tx_ovr_bp gmx_tx_ovr_bp_save;
- int i;
-
- /* Ignore speed sets in the simulator */
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM)
- return 0;
-
- /* Read the current settings so we know the current enable state */
- original_gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- new_gmx_cfg = original_gmx_cfg;
-
- /* Disable the lowest level RX */
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
- cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) &
- ~(1 << index));
-
- /* Disable all queues so that TX should become idle */
- for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
- int queue = cvmx_pko_get_base_queue(ipd_port) + i;
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
- pko_mem_queue_qos.u64 = cvmx_read_csr(CVMX_PKO_MEM_QUEUE_QOS);
- pko_mem_queue_qos.s.pid = ipd_port;
- pko_mem_queue_qos.s.qid = queue;
- pko_mem_queue_qos_save[i] = pko_mem_queue_qos;
- pko_mem_queue_qos.s.qos_mask = 0;
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS, pko_mem_queue_qos.u64);
- }
-
- /* Disable backpressure */
- gmx_tx_ovr_bp.u64 = cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
- gmx_tx_ovr_bp_save = gmx_tx_ovr_bp;
- gmx_tx_ovr_bp.s.bp &= ~(1 << index);
- gmx_tx_ovr_bp.s.en |= 1 << index;
- cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp.u64);
- cvmx_read_csr(CVMX_GMXX_TX_OVR_BP(interface));
-
- /*
- * Poll the GMX state machine waiting for it to become
- * idle. Preferably we should only change speed when it is
- * idle. If it doesn't become idle we will still do the speed
- * change, but there is a slight chance that GMX will
- * lockup.
- */
- cvmx_write_csr(CVMX_NPI_DBG_SELECT,
- interface * 0x800 + index * 0x100 + 0x880);
- CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 7,
- ==, 0, 10000);
- CVMX_WAIT_FOR_FIELD64(CVMX_DBG_DATA, union cvmx_dbg_data, data & 0xf,
- ==, 0, 10000);
-
- /* Disable the port before we make any changes */
- new_gmx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /* Set full/half duplex */
- if (cvmx_octeon_is_pass1())
- /* Half duplex is broken for 38XX Pass 1 */
- new_gmx_cfg.s.duplex = 1;
- else if (!link_info.s.link_up)
- /* Force full duplex on down links */
- new_gmx_cfg.s.duplex = 1;
- else
- new_gmx_cfg.s.duplex = link_info.s.full_duplex;
-
- /* Set the link speed. Anything unknown is set to 1Gbps */
- if (link_info.s.speed == 10) {
- new_gmx_cfg.s.slottime = 0;
- new_gmx_cfg.s.speed = 0;
- } else if (link_info.s.speed == 100) {
- new_gmx_cfg.s.slottime = 0;
- new_gmx_cfg.s.speed = 0;
- } else {
- new_gmx_cfg.s.slottime = 1;
- new_gmx_cfg.s.speed = 1;
- }
-
- /* Adjust the clocks */
- if (link_info.s.speed == 10) {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 50);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- } else if (link_info.s.speed == 100) {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 5);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x40);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- } else {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
- }
-
- if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- if ((link_info.s.speed == 10) || (link_info.s.speed == 100)) {
- union cvmx_gmxx_inf_mode mode;
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- /*
- * Port .en .type .p0mii Configuration
- * ---- --- ----- ------ -----------------------------------------
- * X 0 X X All links are disabled.
- * 0 1 X 0 Port 0 is RGMII
- * 0 1 X 1 Port 0 is MII
- * 1 1 0 X Ports 1 and 2 are configured as RGMII ports.
- * 1 1 1 X Port 1: GMII/MII; Port 2: disabled. GMII or
- * MII port is selected by GMX_PRT1_CFG[SPEED].
- */
-
- /* In MII mode, CLK_CNT = 1. */
- if (((index == 0) && (mode.s.p0mii == 1))
- || ((index != 0) && (mode.s.type == 1))) {
- cvmx_write_csr(CVMX_GMXX_TXX_CLK
- (index, interface), 1);
- }
- }
- }
-
- /* Do a read to make sure all setup stuff is complete */
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /* Save the new GMX setting without enabling the port */
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
-
- /* Enable the lowest level RX */
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
- cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface)) | (1 <<
- index));
-
- /* Re-enable the TX path */
- for (i = 0; i < cvmx_pko_get_num_queues(ipd_port); i++) {
- int queue = cvmx_pko_get_base_queue(ipd_port) + i;
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue);
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_QOS,
- pko_mem_queue_qos_save[i].u64);
- }
-
- /* Restore backpressure */
- cvmx_write_csr(CVMX_GMXX_TX_OVR_BP(interface), gmx_tx_ovr_bp_save.u64);
-
- /* Restore the GMX enable state. Port config is complete */
- new_gmx_cfg.s.en = original_gmx_cfg.s.en;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), new_gmx_cfg.u64);
-
- return result;
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int __cvmx_helper_rgmii_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- int original_enable;
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- union cvmx_asxx_prt_loop asxx_prt_loop;
-
- /* Read the current enable state and save it */
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- original_enable = gmx_cfg.s.en;
- /* Force port to be disabled */
- gmx_cfg.s.en = 0;
- if (enable_internal) {
- /* Force speed if we're doing internal loopback */
- gmx_cfg.s.duplex = 1;
- gmx_cfg.s.slottime = 1;
- gmx_cfg.s.speed = 1;
- cvmx_write_csr(CVMX_GMXX_TXX_CLK(index, interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 0x200);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0x2000);
- }
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
-
- /* Set the loopback bits */
- asxx_prt_loop.u64 = cvmx_read_csr(CVMX_ASXX_PRT_LOOP(interface));
- if (enable_internal)
- asxx_prt_loop.s.int_loop |= 1 << index;
- else
- asxx_prt_loop.s.int_loop &= ~(1 << index);
- if (enable_external)
- asxx_prt_loop.s.ext_loop |= 1 << index;
- else
- asxx_prt_loop.s.ext_loop &= ~(1 << index);
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(interface), asxx_prt_loop.u64);
-
- /* Force enables in internal loopback */
- if (enable_internal) {
- uint64_t tmp;
- tmp = cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(interface),
- (1 << index) | tmp);
- tmp = cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(interface));
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(interface),
- (1 << index) | tmp);
- original_enable = 1;
- }
-
- /* Restore the enable state */
- gmx_cfg.s.en = original_enable;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmx_cfg.u64);
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-rgmii.h b/drivers/staging/octeon/cvmx-helper-rgmii.h
deleted file mode 100644
index ea26526..0000000
--- a/drivers/staging/octeon/cvmx-helper-rgmii.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for RGMII/GMII/MII initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_RGMII_H__
-#define __CVMX_HELPER_RGMII_H__
-
-/**
- * Probe RGMII ports and determine the number present
- *
- * @interface: Interface to probe
- *
- * Returns Number of RGMII/GMII/MII ports (0-4).
- */
-extern int __cvmx_helper_rgmii_probe(int interface);
-
-/**
- * Put an RGMII interface in loopback mode. Internal packets sent
- * out will be received back again on the same port. Externally
- * received packets will echo back out.
- *
- * @port: IPD port number to loop.
- */
-extern void cvmx_helper_rgmii_internal_loopback(int port);
-
-/**
- * Configure all of the ASX, GMX, and PKO regsiters required
- * to get RGMII to function on the supplied interface.
- *
- * @interface: PKO Interface to configure (0 or 1)
- *
- * Returns Zero on success
- */
-extern int __cvmx_helper_rgmii_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_rgmii_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_rgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_rgmii_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.c b/drivers/staging/octeon/cvmx-helper-sgmii.c
deleted file mode 100644
index 6214e3b..0000000
--- a/drivers/staging/octeon/cvmx-helper-sgmii.c
+++ /dev/null
@@ -1,550 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for SGMII initialization, configuration,
- * and monitoring.
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-mdio.h"
-#include "cvmx-helper.h"
-#include "cvmx-helper-board.h"
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-pcsx-defs.h"
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-
-/**
- * Perform initialization required only once for an SGMII port.
- *
- * @interface: Interface to init
- * @index: Index of prot on the interface
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
-{
- const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
- union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
- union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
- union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
-
- /* Disable GMX */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmxx_prtx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- /*
- * Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
- * appropriate value. 1000BASE-X specifies a 10ms
- * interval. SGMII specifies a 1.6ms interval.
- */
- pcs_misc_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- pcsx_linkx_timer_count_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
- if (pcs_misc_ctl_reg.s.mode) {
- /* 1000BASE-X */
- pcsx_linkx_timer_count_reg.s.count =
- (10000ull * clock_mhz) >> 10;
- } else {
- /* SGMII */
- pcsx_linkx_timer_count_reg.s.count =
- (1600ull * clock_mhz) >> 10;
- }
- cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
- pcsx_linkx_timer_count_reg.u64);
-
- /*
- * Write the advertisement register to be used as the
- * tx_Config_Reg<D15:D0> of the autonegotiation. In
- * 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
- * In SGMII PHY mode, tx_Config_Reg<D15:D0> is
- * PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
- * tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
- * step can be skipped.
- */
- if (pcs_misc_ctl_reg.s.mode) {
- /* 1000BASE-X */
- union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
- pcsx_anx_adv_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
- pcsx_anx_adv_reg.s.rem_flt = 0;
- pcsx_anx_adv_reg.s.pause = 3;
- pcsx_anx_adv_reg.s.hfd = 1;
- pcsx_anx_adv_reg.s.fd = 1;
- cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
- pcsx_anx_adv_reg.u64);
- } else {
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- if (pcsx_miscx_ctl_reg.s.mac_phy) {
- /* PHY Mode */
- union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
- pcsx_sgmx_an_adv_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
- (index, interface));
- pcsx_sgmx_an_adv_reg.s.link = 1;
- pcsx_sgmx_an_adv_reg.s.dup = 1;
- pcsx_sgmx_an_adv_reg.s.speed = 2;
- cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
- (index, interface),
- pcsx_sgmx_an_adv_reg.u64);
- } else {
- /* MAC Mode - Nothing to do */
- }
- }
- return 0;
-}
-
-/**
- * Initialize the SERTES link for the first time or after a loss
- * of link.
- *
- * @interface: Interface to init
- * @index: Index of prot on the interface
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
-{
- union cvmx_pcsx_mrx_control_reg control_reg;
-
- /*
- * Take PCS through a reset sequence.
- * PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
- * Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
- * value of the other PCS*_MR*_CONTROL_REG bits). Read
- * PCS*_MR*_CONTROL_REG[RESET] until it changes value to
- * zero.
- */
- control_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
- if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
- control_reg.s.reset = 1;
- cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- control_reg.u64);
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
- cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
- "to finish reset\n",
- interface, index);
- return -1;
- }
- }
-
- /*
- * Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
- * sgmii negotiation starts.
- */
- control_reg.s.rst_an = 1;
- control_reg.s.an_en = 1;
- control_reg.s.pwr_dn = 0;
- cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- control_reg.u64);
-
- /*
- * Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
- * that sgmii autonegotiation is complete. In MAC mode this
- * isn't an ethernet link, but a link between Octeon and the
- * PHY.
- */
- if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
- CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
- union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
- 10000)) {
- /* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
- return -1;
- }
- return 0;
-}
-
-/**
- * Configure an SGMII link to the specified speed after the SERTES
- * link is up.
- *
- * @interface: Interface to init
- * @index: Index of prot on the interface
- * @link_info: Link state to configure
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
- int index,
- cvmx_helper_link_info_t
- link_info)
-{
- int is_enabled;
- union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
-
- /* Disable GMX before we make any changes. Remember the enable state */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- is_enabled = gmxx_prtx_cfg.s.en;
- gmxx_prtx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- /* Wait for GMX to be idle */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
- rx_idle, ==, 1, 10000)
- || CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
- union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
- 10000)) {
- cvmx_dprintf
- ("SGMII%d: Timeout waiting for port %d to be idle\n",
- interface, index);
- return -1;
- }
-
- /* Read GMX CFG again to make sure the disable completed */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /*
- * Get the misc control for PCS. We will need to set the
- * duplication amount.
- */
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
-
- /*
- * Use GMXENO to force the link down if the status we get says
- * it should be down.
- */
- pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
-
- /* Only change the duplex setting if the link is up */
- if (link_info.s.link_up)
- gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
-
- /* Do speed based setting for GMX */
- switch (link_info.s.speed) {
- case 10:
- gmxx_prtx_cfg.s.speed = 0;
- gmxx_prtx_cfg.s.speed_msb = 1;
- gmxx_prtx_cfg.s.slottime = 0;
- /* Setting from GMX-603 */
- pcsx_miscx_ctl_reg.s.samp_pt = 25;
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- break;
- case 100:
- gmxx_prtx_cfg.s.speed = 0;
- gmxx_prtx_cfg.s.speed_msb = 0;
- gmxx_prtx_cfg.s.slottime = 0;
- pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
- break;
- case 1000:
- gmxx_prtx_cfg.s.speed = 1;
- gmxx_prtx_cfg.s.speed_msb = 0;
- gmxx_prtx_cfg.s.slottime = 1;
- pcsx_miscx_ctl_reg.s.samp_pt = 1;
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
- break;
- default:
- break;
- }
-
- /* Write the new misc control for PCS */
- cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
- pcsx_miscx_ctl_reg.u64);
-
- /* Write the new GMX settings with the port still disabled */
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- /* Read GMX CFG again to make sure the config completed */
- gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
-
- /* Restore the enabled / disabled state */
- gmxx_prtx_cfg.s.en = is_enabled;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
-
- return 0;
-}
-
-/**
- * Bring up the SGMII interface to be ready for packet I/O but
- * leave I/O disabled using the GMX override. This function
- * follows the bringup documented in 10.6.3 of the manual.
- *
- * @interface: Interface to bringup
- * @num_ports: Number of ports on the interface
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
-{
- int index;
-
- __cvmx_helper_setup_gmx(interface, num_ports);
-
- for (index = 0; index < num_ports; index++) {
- int ipd_port = cvmx_helper_get_ipd_port(interface, index);
- __cvmx_helper_sgmii_hardware_init_one_time(interface, index);
- __cvmx_helper_sgmii_link_set(ipd_port,
- __cvmx_helper_sgmii_link_get
- (ipd_port));
-
- }
-
- return 0;
-}
-
-/**
- * Probe a SGMII interface and determine the number of ports
- * connected to it. The SGMII interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_sgmii_probe(int interface)
-{
- union cvmx_gmxx_inf_mode mode;
-
- /*
- * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
- * interface needs to be enabled before IPD otherwise per port
- * backpressure may not work properly
- */
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
- mode.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
- return 4;
-}
-
-/**
- * Bringup and enable a SGMII interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_sgmii_enable(int interface)
-{
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int index;
-
- __cvmx_helper_sgmii_hardware_init(interface, num_ports);
-
- for (index = 0; index < num_ports; index++) {
- union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
- gmxx_prtx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
- gmxx_prtx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
- gmxx_prtx_cfg.u64);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
- }
- __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
-
- result.u64 = 0;
-
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
- /* The simulator gives you a simulated 1Gbps full duplex link */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
-
- pcsx_mrx_control_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
- if (pcsx_mrx_control_reg.s.loopbck1) {
- /* Force 1Gbps full duplex link for internal loopback */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- }
-
- pcs_misc_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- if (pcs_misc_ctl_reg.s.mode) {
- /* 1000BASE-X */
- /* FIXME */
- } else {
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- if (pcsx_miscx_ctl_reg.s.mac_phy) {
- /* PHY Mode */
- union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
- union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
-
- /*
- * Don't bother continuing if the SERTES low
- * level link is down
- */
- pcsx_mrx_status_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
- (index, interface));
- if (pcsx_mrx_status_reg.s.lnk_st == 0) {
- if (__cvmx_helper_sgmii_hardware_init_link
- (interface, index) != 0)
- return result;
- }
-
- /* Read the autoneg results */
- pcsx_anx_results_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
- (index, interface));
- if (pcsx_anx_results_reg.s.an_cpt) {
- /*
- * Auto negotiation is complete. Set
- * status accordingly.
- */
- result.s.full_duplex =
- pcsx_anx_results_reg.s.dup;
- result.s.link_up =
- pcsx_anx_results_reg.s.link_ok;
- switch (pcsx_anx_results_reg.s.spd) {
- case 0:
- result.s.speed = 10;
- break;
- case 1:
- result.s.speed = 100;
- break;
- case 2:
- result.s.speed = 1000;
- break;
- default:
- result.s.speed = 0;
- result.s.link_up = 0;
- break;
- }
- } else {
- /*
- * Auto negotiation isn't
- * complete. Return link down.
- */
- result.s.speed = 0;
- result.s.link_up = 0;
- }
- } else { /* MAC Mode */
-
- result = __cvmx_helper_board_link_get(ipd_port);
- }
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_sgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- __cvmx_helper_sgmii_hardware_init_link(interface, index);
- return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
- link_info);
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal
- * loopback causes packets sent by the port to be received by
- * Octeon. External loopback causes packets received from the wire to
- * sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
- union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
-
- pcsx_mrx_control_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
- pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
- cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
- pcsx_mrx_control_reg.u64);
-
- pcsx_miscx_ctl_reg.u64 =
- cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
- pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
- cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
- pcsx_miscx_ctl_reg.u64);
-
- __cvmx_helper_sgmii_hardware_init_link(interface, index);
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-sgmii.h b/drivers/staging/octeon/cvmx-helper-sgmii.h
deleted file mode 100644
index 19b48d6..0000000
--- a/drivers/staging/octeon/cvmx-helper-sgmii.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for SGMII initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_SGMII_H__
-#define __CVMX_HELPER_SGMII_H__
-
-/**
- * Probe a SGMII interface and determine the number of ports
- * connected to it. The SGMII interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_sgmii_probe(int interface);
-
-/**
- * Bringup and enable a SGMII interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_sgmii_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_sgmii_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_sgmii_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-spi.c b/drivers/staging/octeon/cvmx-helper-spi.c
deleted file mode 100644
index 8ba6c83..0000000
--- a/drivers/staging/octeon/cvmx-helper-spi.c
+++ /dev/null
@@ -1,195 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_spxx_int_msk_enable(int index);
-void __cvmx_interrupt_stxx_int_msk_enable(int index);
-
-/*
- * Functions for SPI initialization, configuration,
- * and monitoring.
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-#include "cvmx-spi.h"
-#include "cvmx-helper.h"
-
-#include "cvmx-pip-defs.h"
-#include "cvmx-pko-defs.h"
-
-/*
- * CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI
- * initialization routines wait for SPI training. You can override the
- * value using executive-config.h if necessary.
- */
-#ifndef CVMX_HELPER_SPI_TIMEOUT
-#define CVMX_HELPER_SPI_TIMEOUT 10
-#endif
-
-/**
- * Probe a SPI interface and determine the number of ports
- * connected to it. The SPI interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_spi_probe(int interface)
-{
- int num_ports = 0;
-
- if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
- cvmx_spi4000_is_present(interface)) {
- num_ports = 10;
- } else {
- union cvmx_pko_reg_crc_enable enable;
- num_ports = 16;
- /*
- * Unlike the SPI4000, most SPI devices don't
- * automatically put on the L2 CRC. For everything
- * except for the SPI4000 have PKO append the L2 CRC
- * to the packet.
- */
- enable.u64 = cvmx_read_csr(CVMX_PKO_REG_CRC_ENABLE);
- enable.s.enable |= 0xffff << (interface * 16);
- cvmx_write_csr(CVMX_PKO_REG_CRC_ENABLE, enable.u64);
- }
- __cvmx_helper_setup_gmx(interface, num_ports);
- return num_ports;
-}
-
-/**
- * Bringup and enable a SPI interface. After this call packet I/O
- * should be fully functional. This is called with IPD enabled but
- * PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_spi_enable(int interface)
-{
- /*
- * Normally the ethernet L2 CRC is checked and stripped in the
- * GMX block. When you are using SPI, this isn' the case and
- * IPD needs to check the L2 CRC.
- */
- int num_ports = cvmx_helper_ports_on_interface(interface);
- int ipd_port;
- for (ipd_port = interface * 16; ipd_port < interface * 16 + num_ports;
- ipd_port++) {
- union cvmx_pip_prt_cfgx port_config;
- port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- port_config.s.crc_en = 1;
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port), port_config.u64);
- }
-
- if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
- cvmx_spi_start_interface(interface, CVMX_SPI_MODE_DUPLEX,
- CVMX_HELPER_SPI_TIMEOUT, num_ports);
- if (cvmx_spi4000_is_present(interface))
- cvmx_spi4000_initialize(interface);
- }
- __cvmx_interrupt_spxx_int_msk_enable(interface);
- __cvmx_interrupt_stxx_int_msk_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
- result.u64 = 0;
-
- if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
- /* The simulator gives you a simulated full duplex link */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10000;
- } else if (cvmx_spi4000_is_present(interface)) {
- union cvmx_gmxx_rxx_rx_inbnd inband =
- cvmx_spi4000_check_speed(interface, index);
- result.s.link_up = inband.s.status;
- result.s.full_duplex = inband.s.duplex;
- switch (inband.s.speed) {
- case 0: /* 10 Mbps */
- result.s.speed = 10;
- break;
- case 1: /* 100 Mbps */
- result.s.speed = 100;
- break;
- case 2: /* 1 Gbps */
- result.s.speed = 1000;
- break;
- case 3: /* Illegal */
- result.s.speed = 0;
- result.s.link_up = 0;
- break;
- }
- } else {
- /* For generic SPI we can't determine the link, just return some
- sane results */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10000;
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_spi_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
-{
- /* Nothing to do. If we have a SPI4000 then the setup was already performed
- by cvmx_spi4000_check_speed(). If not then there isn't any link
- info */
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-spi.h b/drivers/staging/octeon/cvmx-helper-spi.h
deleted file mode 100644
index 69bac03..0000000
--- a/drivers/staging/octeon/cvmx-helper-spi.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for SPI initialization, configuration,
- * and monitoring.
- */
-#ifndef __CVMX_HELPER_SPI_H__
-#define __CVMX_HELPER_SPI_H__
-
-/**
- * Probe a SPI interface and determine the number of ports
- * connected to it. The SPI interface should still be down after
- * this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_spi_probe(int interface);
-
-/**
- * Bringup and enable a SPI interface. After this call packet I/O
- * should be fully functional. This is called with IPD enabled but
- * PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_spi_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_spi_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_spi_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper-util.c b/drivers/staging/octeon/cvmx-helper-util.c
deleted file mode 100644
index 131182b..0000000
--- a/drivers/staging/octeon/cvmx-helper-util.c
+++ /dev/null
@@ -1,433 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Small helper utilities.
- */
-#include <linux/kernel.h>
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-fpa.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-ipd.h"
-#include "cvmx-spi.h"
-
-#include "cvmx-helper.h"
-#include "cvmx-helper-util.h"
-
-#include <asm/octeon/cvmx-ipd-defs.h>
-
-/**
- * Convert a interface mode into a human readable string
- *
- * @mode: Mode to convert
- *
- * Returns String
- */
-const char *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t
- mode)
-{
- switch (mode) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- return "DISABLED";
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- return "RGMII";
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- return "GMII";
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- return "SPI";
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- return "PCIE";
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- return "XAUI";
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- return "SGMII";
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- return "PICMG";
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- return "NPI";
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- return "LOOP";
- }
- return "UNKNOWN";
-}
-
-/**
- * Debug routine to dump the packet structure to the console
- *
- * @work: Work queue entry containing the packet to dump
- * Returns
- */
-int cvmx_helper_dump_packet(cvmx_wqe_t *work)
-{
- uint64_t count;
- uint64_t remaining_bytes;
- union cvmx_buf_ptr buffer_ptr;
- uint64_t start_of_buffer;
- uint8_t *data_address;
- uint8_t *end_of_data;
-
- cvmx_dprintf("Packet Length: %u\n", work->len);
- cvmx_dprintf(" Input Port: %u\n", work->ipprt);
- cvmx_dprintf(" QoS: %u\n", work->qos);
- cvmx_dprintf(" Buffers: %u\n", work->word2.s.bufs);
-
- if (work->word2.s.bufs == 0) {
- union cvmx_ipd_wqe_fpa_queue wqe_pool;
- wqe_pool.u64 = cvmx_read_csr(CVMX_IPD_WQE_FPA_QUEUE);
- buffer_ptr.u64 = 0;
- buffer_ptr.s.pool = wqe_pool.s.wqe_pool;
- buffer_ptr.s.size = 128;
- buffer_ptr.s.addr = cvmx_ptr_to_phys(work->packet_data);
- if (likely(!work->word2.s.not_IP)) {
- union cvmx_pip_ip_offset pip_ip_offset;
- pip_ip_offset.u64 = cvmx_read_csr(CVMX_PIP_IP_OFFSET);
- buffer_ptr.s.addr +=
- (pip_ip_offset.s.offset << 3) -
- work->word2.s.ip_offset;
- buffer_ptr.s.addr += (work->word2.s.is_v6 ^ 1) << 2;
- } else {
- /*
- * WARNING: This code assumes that the packet
- * is not RAW. If it was, we would use
- * PIP_GBL_CFG[RAW_SHF] instead of
- * PIP_GBL_CFG[NIP_SHF].
- */
- union cvmx_pip_gbl_cfg pip_gbl_cfg;
- pip_gbl_cfg.u64 = cvmx_read_csr(CVMX_PIP_GBL_CFG);
- buffer_ptr.s.addr += pip_gbl_cfg.s.nip_shf;
- }
- } else
- buffer_ptr = work->packet_ptr;
- remaining_bytes = work->len;
-
- while (remaining_bytes) {
- start_of_buffer =
- ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- cvmx_dprintf(" Buffer Start:%llx\n",
- (unsigned long long)start_of_buffer);
- cvmx_dprintf(" Buffer I : %u\n", buffer_ptr.s.i);
- cvmx_dprintf(" Buffer Back: %u\n", buffer_ptr.s.back);
- cvmx_dprintf(" Buffer Pool: %u\n", buffer_ptr.s.pool);
- cvmx_dprintf(" Buffer Data: %llx\n",
- (unsigned long long)buffer_ptr.s.addr);
- cvmx_dprintf(" Buffer Size: %u\n", buffer_ptr.s.size);
-
- cvmx_dprintf("\t\t");
- data_address = (uint8_t *) cvmx_phys_to_ptr(buffer_ptr.s.addr);
- end_of_data = data_address + buffer_ptr.s.size;
- count = 0;
- while (data_address < end_of_data) {
- if (remaining_bytes == 0)
- break;
- else
- remaining_bytes--;
- cvmx_dprintf("%02x", (unsigned int)*data_address);
- data_address++;
- if (remaining_bytes && (count == 7)) {
- cvmx_dprintf("\n\t\t");
- count = 0;
- } else
- count++;
- }
- cvmx_dprintf("\n");
-
- if (remaining_bytes)
- buffer_ptr = *(union cvmx_buf_ptr *)
- cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- }
- return 0;
-}
-
-/**
- * Setup Random Early Drop on a specific input queue
- *
- * @queue: Input queue to setup RED on (0-7)
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-int cvmx_helper_setup_red_queue(int queue, int pass_thresh, int drop_thresh)
-{
- union cvmx_ipd_qosx_red_marks red_marks;
- union cvmx_ipd_red_quex_param red_param;
-
- /* Set RED to begin dropping packets when there are pass_thresh buffers
- left. It will linearly drop more packets until reaching drop_thresh
- buffers */
- red_marks.u64 = 0;
- red_marks.s.drop = drop_thresh;
- red_marks.s.pass = pass_thresh;
- cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64);
-
- /* Use the actual queue 0 counter, not the average */
- red_param.u64 = 0;
- red_param.s.prb_con =
- (255ul << 24) / (red_marks.s.pass - red_marks.s.drop);
- red_param.s.avg_con = 1;
- red_param.s.new_con = 255;
- red_param.s.use_pcnt = 1;
- cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64);
- return 0;
-}
-
-/**
- * Setup Random Early Drop to automatically begin dropping packets.
- *
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-int cvmx_helper_setup_red(int pass_thresh, int drop_thresh)
-{
- union cvmx_ipd_portx_bp_page_cnt page_cnt;
- union cvmx_ipd_bp_prt_red_end ipd_bp_prt_red_end;
- union cvmx_ipd_red_port_enable red_port_enable;
- int queue;
- int interface;
- int port;
-
- /* Disable backpressure based on queued buffers. It needs SW support */
- page_cnt.u64 = 0;
- page_cnt.s.bp_enb = 0;
- page_cnt.s.page_cnt = 100;
- for (interface = 0; interface < 2; interface++) {
- for (port = cvmx_helper_get_first_ipd_port(interface);
- port < cvmx_helper_get_last_ipd_port(interface); port++)
- cvmx_write_csr(CVMX_IPD_PORTX_BP_PAGE_CNT(port),
- page_cnt.u64);
- }
-
- for (queue = 0; queue < 8; queue++)
- cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh);
-
- /* Shutoff the dropping based on the per port page count. SW isn't
- decrementing it right now */
- ipd_bp_prt_red_end.u64 = 0;
- ipd_bp_prt_red_end.s.prt_enb = 0;
- cvmx_write_csr(CVMX_IPD_BP_PRT_RED_END, ipd_bp_prt_red_end.u64);
-
- red_port_enable.u64 = 0;
- red_port_enable.s.prt_enb = 0xfffffffffull;
- red_port_enable.s.avg_dly = 10000;
- red_port_enable.s.prb_dly = 10000;
- cvmx_write_csr(CVMX_IPD_RED_PORT_ENABLE, red_port_enable.u64);
-
- return 0;
-}
-
-/**
- * Setup the common GMX settings that determine the number of
- * ports. These setting apply to almost all configurations of all
- * chips.
- *
- * @interface: Interface to configure
- * @num_ports: Number of ports on the interface
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_setup_gmx(int interface, int num_ports)
-{
- union cvmx_gmxx_tx_prts gmx_tx_prts;
- union cvmx_gmxx_rx_prts gmx_rx_prts;
- union cvmx_pko_reg_gmx_port_mode pko_mode;
- union cvmx_gmxx_txx_thresh gmx_tx_thresh;
- int index;
-
- /* Tell GMX the number of TX ports on this interface */
- gmx_tx_prts.u64 = cvmx_read_csr(CVMX_GMXX_TX_PRTS(interface));
- gmx_tx_prts.s.prts = num_ports;
- cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), gmx_tx_prts.u64);
-
- /* Tell GMX the number of RX ports on this interface. This only
- ** applies to *GMII and XAUI ports */
- if (cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_RGMII
- || cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_SGMII
- || cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_GMII
- || cvmx_helper_interface_get_mode(interface) ==
- CVMX_HELPER_INTERFACE_MODE_XAUI) {
- if (num_ports > 4) {
- cvmx_dprintf("__cvmx_helper_setup_gmx: Illegal "
- "num_ports\n");
- return -1;
- }
-
- gmx_rx_prts.u64 = cvmx_read_csr(CVMX_GMXX_RX_PRTS(interface));
- gmx_rx_prts.s.prts = num_ports;
- cvmx_write_csr(CVMX_GMXX_RX_PRTS(interface), gmx_rx_prts.u64);
- }
-
- /* Skip setting CVMX_PKO_REG_GMX_PORT_MODE on 30XX, 31XX, and 50XX */
- if (!OCTEON_IS_MODEL(OCTEON_CN30XX) && !OCTEON_IS_MODEL(OCTEON_CN31XX)
- && !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /* Tell PKO the number of ports on this interface */
- pko_mode.u64 = cvmx_read_csr(CVMX_PKO_REG_GMX_PORT_MODE);
- if (interface == 0) {
- if (num_ports == 1)
- pko_mode.s.mode0 = 4;
- else if (num_ports == 2)
- pko_mode.s.mode0 = 3;
- else if (num_ports <= 4)
- pko_mode.s.mode0 = 2;
- else if (num_ports <= 8)
- pko_mode.s.mode0 = 1;
- else
- pko_mode.s.mode0 = 0;
- } else {
- if (num_ports == 1)
- pko_mode.s.mode1 = 4;
- else if (num_ports == 2)
- pko_mode.s.mode1 = 3;
- else if (num_ports <= 4)
- pko_mode.s.mode1 = 2;
- else if (num_ports <= 8)
- pko_mode.s.mode1 = 1;
- else
- pko_mode.s.mode1 = 0;
- }
- cvmx_write_csr(CVMX_PKO_REG_GMX_PORT_MODE, pko_mode.u64);
- }
-
- /*
- * Set GMX to buffer as much data as possible before starting
- * transmit. This reduces the chances that we have a TX under
- * run due to memory contention. Any packet that fits entirely
- * in the GMX FIFO can never have an under run regardless of
- * memory load.
- */
- gmx_tx_thresh.u64 = cvmx_read_csr(CVMX_GMXX_TXX_THRESH(0, interface));
- if (OCTEON_IS_MODEL(OCTEON_CN30XX) || OCTEON_IS_MODEL(OCTEON_CN31XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /* These chips have a fixed max threshold of 0x40 */
- gmx_tx_thresh.s.cnt = 0x40;
- } else {
- /* Choose the max value for the number of ports */
- if (num_ports <= 1)
- gmx_tx_thresh.s.cnt = 0x100 / 1;
- else if (num_ports == 2)
- gmx_tx_thresh.s.cnt = 0x100 / 2;
- else
- gmx_tx_thresh.s.cnt = 0x100 / 4;
- }
- /*
- * SPI and XAUI can have lots of ports but the GMX hardware
- * only ever has a max of 4.
- */
- if (num_ports > 4)
- num_ports = 4;
- for (index = 0; index < num_ports; index++)
- cvmx_write_csr(CVMX_GMXX_TXX_THRESH(index, interface),
- gmx_tx_thresh.u64);
-
- return 0;
-}
-
-/**
- * Returns the IPD/PKO port number for a port on the given
- * interface.
- *
- * @interface: Interface to use
- * @port: Port on the interface
- *
- * Returns IPD/PKO port number
- */
-int cvmx_helper_get_ipd_port(int interface, int port)
-{
- switch (interface) {
- case 0:
- return port;
- case 1:
- return port + 16;
- case 2:
- return port + 32;
- case 3:
- return port + 36;
- }
- return -1;
-}
-
-/**
- * Returns the interface number for an IPD/PKO port number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface number
- */
-int cvmx_helper_get_interface_num(int ipd_port)
-{
- if (ipd_port < 16)
- return 0;
- else if (ipd_port < 32)
- return 1;
- else if (ipd_port < 36)
- return 2;
- else if (ipd_port < 40)
- return 3;
- else
- cvmx_dprintf("cvmx_helper_get_interface_num: Illegal IPD "
- "port number\n");
-
- return -1;
-}
-
-/**
- * Returns the interface index number for an IPD/PKO port
- * number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface index number
- */
-int cvmx_helper_get_interface_index_num(int ipd_port)
-{
- if (ipd_port < 32)
- return ipd_port & 15;
- else if (ipd_port < 36)
- return ipd_port & 3;
- else if (ipd_port < 40)
- return ipd_port & 3;
- else
- cvmx_dprintf("cvmx_helper_get_interface_index_num: "
- "Illegal IPD port number\n");
-
- return -1;
-}
diff --git a/drivers/staging/octeon/cvmx-helper-util.h b/drivers/staging/octeon/cvmx-helper-util.h
deleted file mode 100644
index 6a6e52f..0000000
--- a/drivers/staging/octeon/cvmx-helper-util.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Small helper utilities.
- *
- */
-
-#ifndef __CVMX_HELPER_UTIL_H__
-#define __CVMX_HELPER_UTIL_H__
-
-/**
- * Convert a interface mode into a human readable string
- *
- * @mode: Mode to convert
- *
- * Returns String
- */
-extern const char
- *cvmx_helper_interface_mode_to_string(cvmx_helper_interface_mode_t mode);
-
-/**
- * Debug routine to dump the packet structure to the console
- *
- * @work: Work queue entry containing the packet to dump
- * Returns
- */
-extern int cvmx_helper_dump_packet(cvmx_wqe_t *work);
-
-/**
- * Setup Random Early Drop on a specific input queue
- *
- * @queue: Input queue to setup RED on (0-7)
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-extern int cvmx_helper_setup_red_queue(int queue, int pass_thresh,
- int drop_thresh);
-
-/**
- * Setup Random Early Drop to automatically begin dropping packets.
- *
- * @pass_thresh:
- * Packets will begin slowly dropping when there are less than
- * this many packet buffers free in FPA 0.
- * @drop_thresh:
- * All incomming packets will be dropped when there are less
- * than this many free packet buffers in FPA 0.
- * Returns Zero on success. Negative on failure
- */
-extern int cvmx_helper_setup_red(int pass_thresh, int drop_thresh);
-
-/**
- * Get the version of the CVMX libraries.
- *
- * Returns Version string. Note this buffer is allocated statically
- * and will be shared by all callers.
- */
-extern const char *cvmx_helper_get_version(void);
-
-/**
- * Setup the common GMX settings that determine the number of
- * ports. These setting apply to almost all configurations of all
- * chips.
- *
- * @interface: Interface to configure
- * @num_ports: Number of ports on the interface
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_setup_gmx(int interface, int num_ports);
-
-/**
- * Returns the IPD/PKO port number for a port on the given
- * interface.
- *
- * @interface: Interface to use
- * @port: Port on the interface
- *
- * Returns IPD/PKO port number
- */
-extern int cvmx_helper_get_ipd_port(int interface, int port);
-
-/**
- * Returns the IPD/PKO port number for the first port on the given
- * interface.
- *
- * @interface: Interface to use
- *
- * Returns IPD/PKO port number
- */
-static inline int cvmx_helper_get_first_ipd_port(int interface)
-{
- return cvmx_helper_get_ipd_port(interface, 0);
-}
-
-/**
- * Returns the IPD/PKO port number for the last port on the given
- * interface.
- *
- * @interface: Interface to use
- *
- * Returns IPD/PKO port number
- */
-static inline int cvmx_helper_get_last_ipd_port(int interface)
-{
- extern int cvmx_helper_ports_on_interface(int interface);
-
- return cvmx_helper_get_first_ipd_port(interface) +
- cvmx_helper_ports_on_interface(interface) - 1;
-}
-
-/**
- * Free the packet buffers contained in a work queue entry.
- * The work queue entry is not freed.
- *
- * @work: Work queue entry with packet to free
- */
-static inline void cvmx_helper_free_packet_data(cvmx_wqe_t *work)
-{
- uint64_t number_buffers;
- union cvmx_buf_ptr buffer_ptr;
- union cvmx_buf_ptr next_buffer_ptr;
- uint64_t start_of_buffer;
-
- number_buffers = work->word2.s.bufs;
- if (number_buffers == 0)
- return;
- buffer_ptr = work->packet_ptr;
-
- /*
- * Since the number of buffers is not zero, we know this is
- * not a dynamic short packet. We need to check if it is a
- * packet received with IPD_CTL_STATUS[NO_WPTR]. If this is
- * true, we need to free all buffers except for the first
- * one. The caller doesn't expect their WQE pointer to be
- * freed
- */
- start_of_buffer = ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- if (cvmx_ptr_to_phys(work) == start_of_buffer) {
- next_buffer_ptr =
- *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- buffer_ptr = next_buffer_ptr;
- number_buffers--;
- }
-
- while (number_buffers--) {
- /*
- * Remember the back pointer is in cache lines, not
- * 64bit words
- */
- start_of_buffer =
- ((buffer_ptr.s.addr >> 7) - buffer_ptr.s.back) << 7;
- /*
- * Read pointer to next buffer before we free the
- * current buffer.
- */
- next_buffer_ptr =
- *(union cvmx_buf_ptr *) cvmx_phys_to_ptr(buffer_ptr.s.addr - 8);
- cvmx_fpa_free(cvmx_phys_to_ptr(start_of_buffer),
- buffer_ptr.s.pool, 0);
- buffer_ptr = next_buffer_ptr;
- }
-}
-
-/**
- * Returns the interface number for an IPD/PKO port number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface number
- */
-extern int cvmx_helper_get_interface_num(int ipd_port);
-
-/**
- * Returns the interface index number for an IPD/PKO port
- * number.
- *
- * @ipd_port: IPD/PKO port number
- *
- * Returns Interface index number
- */
-extern int cvmx_helper_get_interface_index_num(int ipd_port);
-
-#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.c b/drivers/staging/octeon/cvmx-helper-xaui.c
deleted file mode 100644
index a11e676..0000000
--- a/drivers/staging/octeon/cvmx-helper-xaui.c
+++ /dev/null
@@ -1,348 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Functions for XAUI initialization, configuration,
- * and monitoring.
- *
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-helper.h"
-
-#include "cvmx-pko-defs.h"
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-pcsxx-defs.h"
-
-void __cvmx_interrupt_gmxx_enable(int interface);
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
-/**
- * Probe a XAUI interface and determine the number of ports
- * connected to it. The XAUI interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-int __cvmx_helper_xaui_probe(int interface)
-{
- int i;
- union cvmx_gmxx_hg2_control gmx_hg2_control;
- union cvmx_gmxx_inf_mode mode;
-
- /*
- * Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
- * interface needs to be enabled before IPD otherwise per port
- * backpressure may not work properly.
- */
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
- mode.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
-
- __cvmx_helper_setup_gmx(interface, 1);
-
- /*
- * Setup PKO to support 16 ports for HiGig2 virtual
- * ports. We're pointing all of the PKO packet ports for this
- * interface to the XAUI. This allows us to use HiGig2
- * backpressure per port.
- */
- for (i = 0; i < 16; i++) {
- union cvmx_pko_mem_port_ptrs pko_mem_port_ptrs;
- pko_mem_port_ptrs.u64 = 0;
- /*
- * We set each PKO port to have equal priority in a
- * round robin fashion.
- */
- pko_mem_port_ptrs.s.static_p = 0;
- pko_mem_port_ptrs.s.qos_mask = 0xff;
- /* All PKO ports map to the same XAUI hardware port */
- pko_mem_port_ptrs.s.eid = interface * 4;
- pko_mem_port_ptrs.s.pid = interface * 16 + i;
- cvmx_write_csr(CVMX_PKO_MEM_PORT_PTRS, pko_mem_port_ptrs.u64);
- }
-
- /* If HiGig2 is enabled return 16 ports, otherwise return 1 port */
- gmx_hg2_control.u64 = cvmx_read_csr(CVMX_GMXX_HG2_CONTROL(interface));
- if (gmx_hg2_control.s.hg2tx_en)
- return 16;
- else
- return 1;
-}
-
-/**
- * Bringup and enable a XAUI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_xaui_enable(int interface)
-{
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- union cvmx_pcsxx_control1_reg xauiCtl;
- union cvmx_pcsxx_misc_ctl_reg xauiMiscCtl;
- union cvmx_gmxx_tx_xaui_ctl gmxXauiTxCtl;
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- union cvmx_gmxx_tx_int_en gmx_tx_int_en;
- union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
-
- /* (1) Interface has already been enabled. */
-
- /* (2) Disable GMX. */
- xauiMiscCtl.u64 = cvmx_read_csr(CVMX_PCSXX_MISC_CTL_REG(interface));
- xauiMiscCtl.s.gmxeno = 1;
- cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
-
- /* (3) Disable GMX and PCSX interrupts. */
- gmx_rx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_RXX_INT_EN(0, interface));
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
- gmx_tx_int_en.u64 = cvmx_read_csr(CVMX_GMXX_TX_INT_EN(interface));
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
- pcsx_int_en_reg.u64 = cvmx_read_csr(CVMX_PCSXX_INT_EN_REG(interface));
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
-
- /* (4) Bring up the PCSX and GMX reconciliation layer. */
- /* (4)a Set polarity and lane swapping. */
- /* (4)b */
- gmxXauiTxCtl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
- /* Enable better IFG packing and improves performance */
- gmxXauiTxCtl.s.dic_en = 1;
- gmxXauiTxCtl.s.uni_en = 0;
- cvmx_write_csr(CVMX_GMXX_TX_XAUI_CTL(interface), gmxXauiTxCtl.u64);
-
- /* (4)c Aply reset sequence */
- xauiCtl.u64 = cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
- xauiCtl.s.lo_pwr = 0;
- xauiCtl.s.reset = 1;
- cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface), xauiCtl.u64);
-
- /* Wait for PCS to come out of reset */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_CONTROL1_REG(interface), union cvmx_pcsxx_control1_reg,
- reset, ==, 0, 10000))
- return -1;
- /* Wait for PCS to be aligned */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_10GBX_STATUS_REG(interface),
- union cvmx_pcsxx_10gbx_status_reg, alignd, ==, 1, 10000))
- return -1;
- /* Wait for RX to be ready */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_RX_XAUI_CTL(interface), union cvmx_gmxx_rx_xaui_ctl,
- status, ==, 0, 10000))
- return -1;
-
- /* (6) Configure GMX */
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
- gmx_cfg.s.en = 0;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
-
- /* Wait for GMX RX to be idle */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
- rx_idle, ==, 1, 10000))
- return -1;
- /* Wait for GMX TX to be idle */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_GMXX_PRTX_CFG(0, interface), union cvmx_gmxx_prtx_cfg,
- tx_idle, ==, 1, 10000))
- return -1;
-
- /* GMX configure */
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
- gmx_cfg.s.speed = 1;
- gmx_cfg.s.speed_msb = 0;
- gmx_cfg.s.slottime = 1;
- cvmx_write_csr(CVMX_GMXX_TX_PRTS(interface), 1);
- cvmx_write_csr(CVMX_GMXX_TXX_SLOT(0, interface), 512);
- cvmx_write_csr(CVMX_GMXX_TXX_BURST(0, interface), 8192);
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
-
- /* (7) Clear out any error state */
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(0, interface),
- cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(0, interface)));
- cvmx_write_csr(CVMX_GMXX_TX_INT_REG(interface),
- cvmx_read_csr(CVMX_GMXX_TX_INT_REG(interface)));
- cvmx_write_csr(CVMX_PCSXX_INT_REG(interface),
- cvmx_read_csr(CVMX_PCSXX_INT_REG(interface)));
-
- /* Wait for receive link */
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_STATUS1_REG(interface), union cvmx_pcsxx_status1_reg,
- rcv_lnk, ==, 1, 10000))
- return -1;
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
- xmtflt, ==, 0, 10000))
- return -1;
- if (CVMX_WAIT_FOR_FIELD64
- (CVMX_PCSXX_STATUS2_REG(interface), union cvmx_pcsxx_status2_reg,
- rcvflt, ==, 0, 10000))
- return -1;
-
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), gmx_rx_int_en.u64);
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), pcsx_int_en_reg.u64);
-
- cvmx_helper_link_autoconf(cvmx_helper_get_ipd_port(interface, 0));
-
- /* (8) Enable packet reception */
- xauiMiscCtl.s.gmxeno = 0;
- cvmx_write_csr(CVMX_PCSXX_MISC_CTL_REG(interface), xauiMiscCtl.u64);
-
- gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(0, interface));
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG(0, interface), gmx_cfg.u64);
-
- __cvmx_interrupt_pcsx_intx_en_reg_enable(0, interface);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(1, interface);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(2, interface);
- __cvmx_interrupt_pcsx_intx_en_reg_enable(3, interface);
- __cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
- __cvmx_interrupt_gmxx_enable(interface);
-
- return 0;
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
- union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
- union cvmx_pcsxx_status1_reg pcsxx_status1_reg;
- cvmx_helper_link_info_t result;
-
- gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
- gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
- pcsxx_status1_reg.u64 =
- cvmx_read_csr(CVMX_PCSXX_STATUS1_REG(interface));
- result.u64 = 0;
-
- /* Only return a link if both RX and TX are happy */
- if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0) &&
- (pcsxx_status1_reg.s.rcv_lnk == 1)) {
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 10000;
- } else {
- /* Disable GMX and PCSX interrupts. */
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(0, interface), 0x0);
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), 0x0);
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(interface), 0x0);
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int __cvmx_helper_xaui_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- union cvmx_gmxx_tx_xaui_ctl gmxx_tx_xaui_ctl;
- union cvmx_gmxx_rx_xaui_ctl gmxx_rx_xaui_ctl;
-
- gmxx_tx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_TX_XAUI_CTL(interface));
- gmxx_rx_xaui_ctl.u64 = cvmx_read_csr(CVMX_GMXX_RX_XAUI_CTL(interface));
-
- /* If the link shouldn't be up, then just return */
- if (!link_info.s.link_up)
- return 0;
-
- /* Do nothing if both RX and TX are happy */
- if ((gmxx_tx_xaui_ctl.s.ls == 0) && (gmxx_rx_xaui_ctl.s.status == 0))
- return 0;
-
- /* Bring the link up */
- return __cvmx_helper_xaui_enable(interface);
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external)
-{
- int interface = cvmx_helper_get_interface_num(ipd_port);
- union cvmx_pcsxx_control1_reg pcsxx_control1_reg;
- union cvmx_gmxx_xaui_ext_loopback gmxx_xaui_ext_loopback;
-
- /* Set the internal loop */
- pcsxx_control1_reg.u64 =
- cvmx_read_csr(CVMX_PCSXX_CONTROL1_REG(interface));
- pcsxx_control1_reg.s.loopbck1 = enable_internal;
- cvmx_write_csr(CVMX_PCSXX_CONTROL1_REG(interface),
- pcsxx_control1_reg.u64);
-
- /* Set the external loop */
- gmxx_xaui_ext_loopback.u64 =
- cvmx_read_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface));
- gmxx_xaui_ext_loopback.s.en = enable_external;
- cvmx_write_csr(CVMX_GMXX_XAUI_EXT_LOOPBACK(interface),
- gmxx_xaui_ext_loopback.u64);
-
- /* Take the link through a reset */
- return __cvmx_helper_xaui_enable(interface);
-}
diff --git a/drivers/staging/octeon/cvmx-helper-xaui.h b/drivers/staging/octeon/cvmx-helper-xaui.h
deleted file mode 100644
index 4b4db2f..0000000
--- a/drivers/staging/octeon/cvmx-helper-xaui.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * @file
- *
- * Functions for XAUI initialization, configuration,
- * and monitoring.
- *
- */
-#ifndef __CVMX_HELPER_XAUI_H__
-#define __CVMX_HELPER_XAUI_H__
-
-/**
- * Probe a XAUI interface and determine the number of ports
- * connected to it. The XAUI interface should still be down
- * after this call.
- *
- * @interface: Interface to probe
- *
- * Returns Number of ports on the interface. Zero to disable.
- */
-extern int __cvmx_helper_xaui_probe(int interface);
-
-/**
- * Bringup and enable a XAUI interface. After this call packet
- * I/O should be fully functional. This is called with IPD
- * enabled but PKO disabled.
- *
- * @interface: Interface to bring up
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_xaui_enable(int interface);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t __cvmx_helper_xaui_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int __cvmx_helper_xaui_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int __cvmx_helper_xaui_configure_loopback(int ipd_port,
- int enable_internal,
- int enable_external);
-#endif
diff --git a/drivers/staging/octeon/cvmx-helper.c b/drivers/staging/octeon/cvmx-helper.c
deleted file mode 100644
index e9c5c83..0000000
--- a/drivers/staging/octeon/cvmx-helper.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Helper functions for common, but complicated tasks.
- *
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-fpa.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-ipd.h"
-#include "cvmx-spi.h"
-#include "cvmx-helper.h"
-#include "cvmx-helper-board.h"
-
-#include "cvmx-pip-defs.h"
-#include "cvmx-smix-defs.h"
-#include "cvmx-asxx-defs.h"
-
-/**
- * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
- * priorities[16]) is a function pointer. It is meant to allow
- * customization of the PKO queue priorities based on the port
- * number. Users should set this pointer to a function before
- * calling any cvmx-helper operations.
- */
-void (*cvmx_override_pko_queue_priority) (int pko_port,
- uint64_t priorities[16]);
-
-/**
- * cvmx_override_ipd_port_setup(int ipd_port) is a function
- * pointer. It is meant to allow customization of the IPD port
- * setup before packet input/output comes online. It is called
- * after cvmx-helper does the default IPD configuration, but
- * before IPD is enabled. Users should set this pointer to a
- * function before calling any cvmx-helper operations.
- */
-void (*cvmx_override_ipd_port_setup) (int ipd_port);
-
-/* Port count per interface */
-static int interface_port_count[4] = { 0, 0, 0, 0 };
-
-/* Port last configured link info index by IPD/PKO port */
-static cvmx_helper_link_info_t
- port_link_info[CVMX_PIP_NUM_INPUT_PORTS];
-
-/**
- * Return the number of interfaces the chip has. Each interface
- * may have multiple ports. Most chips support two interfaces,
- * but the CNX0XX and CNX1XX are exceptions. These only support
- * one interface.
- *
- * Returns Number of interfaces on chip
- */
-int cvmx_helper_get_number_of_interfaces(void)
-{
- if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
- return 4;
- else
- return 3;
-}
-
-/**
- * Return the number of ports on an interface. Depending on the
- * chip and configuration, this can be 1-16. A value of 0
- * specifies that the interface doesn't exist or isn't usable.
- *
- * @interface: Interface to get the port count for
- *
- * Returns Number of ports on interface. Can be Zero.
- */
-int cvmx_helper_ports_on_interface(int interface)
-{
- return interface_port_count[interface];
-}
-
-/**
- * Get the operating mode of an interface. Depending on the Octeon
- * chip and configuration, this function returns an enumeration
- * of the type of packet I/O supported by an interface.
- *
- * @interface: Interface to probe
- *
- * Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
- */
-cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
-{
- union cvmx_gmxx_inf_mode mode;
- if (interface == 2)
- return CVMX_HELPER_INTERFACE_MODE_NPI;
-
- if (interface == 3) {
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX))
- return CVMX_HELPER_INTERFACE_MODE_LOOP;
- else
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
- }
-
- if (interface == 0
- && cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_CN3005_EVB_HS5
- && cvmx_sysinfo_get()->board_rev_major == 1) {
- /*
- * Lie about interface type of CN3005 board. This
- * board has a switch on port 1 like the other
- * evaluation boards, but it is connected over RGMII
- * instead of GMII. Report GMII mode so that the
- * speed is forced to 1 Gbit full duplex. Other than
- * some initial configuration (which does not use the
- * output of this function) there is no difference in
- * setup between GMII and RGMII modes.
- */
- return CVMX_HELPER_INTERFACE_MODE_GMII;
- }
-
- /* Interface 1 is always disabled on CN31XX and CN30XX */
- if ((interface == 1)
- && (OCTEON_IS_MODEL(OCTEON_CN31XX) || OCTEON_IS_MODEL(OCTEON_CN30XX)
- || OCTEON_IS_MODEL(OCTEON_CN50XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX)))
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
-
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- switch (mode.cn56xx.mode) {
- case 0:
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
- case 1:
- return CVMX_HELPER_INTERFACE_MODE_XAUI;
- case 2:
- return CVMX_HELPER_INTERFACE_MODE_SGMII;
- case 3:
- return CVMX_HELPER_INTERFACE_MODE_PICMG;
- default:
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
- }
- } else {
- if (!mode.s.en)
- return CVMX_HELPER_INTERFACE_MODE_DISABLED;
-
- if (mode.s.type) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX))
- return CVMX_HELPER_INTERFACE_MODE_SPI;
- else
- return CVMX_HELPER_INTERFACE_MODE_GMII;
- } else
- return CVMX_HELPER_INTERFACE_MODE_RGMII;
- }
-}
-
-/**
- * Configure the IPD/PIP tagging and QoS options for a specific
- * port. This function determines the POW work queue entry
- * contents for a port. The setup performed here is controlled by
- * the defines in executive-config.h.
- *
- * @ipd_port: Port to configure. This follows the IPD numbering, not the
- * per interface numbering
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_port_setup_ipd(int ipd_port)
-{
- union cvmx_pip_prt_cfgx port_config;
- union cvmx_pip_prt_tagx tag_config;
-
- port_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
- tag_config.u64 = cvmx_read_csr(CVMX_PIP_PRT_TAGX(ipd_port));
-
- /* Have each port go to a different POW queue */
- port_config.s.qos = ipd_port & 0x7;
-
- /* Process the headers and place the IP header in the work queue */
- port_config.s.mode = CVMX_HELPER_INPUT_PORT_SKIP_MODE;
-
- tag_config.s.ip6_src_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP;
- tag_config.s.ip6_dst_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_IP;
- tag_config.s.ip6_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT;
- tag_config.s.ip6_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT;
- tag_config.s.ip6_nxth_flag = CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER;
- tag_config.s.ip4_src_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP;
- tag_config.s.ip4_dst_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_IP;
- tag_config.s.ip4_sprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT;
- tag_config.s.ip4_dprt_flag = CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT;
- tag_config.s.ip4_pctl_flag = CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL;
- tag_config.s.inc_prt_flag = CVMX_HELPER_INPUT_TAG_INPUT_PORT;
- tag_config.s.tcp6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.tcp4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.ip6_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.ip4_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- tag_config.s.non_tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
- /* Put all packets in group 0. Other groups can be used by the app */
- tag_config.s.grp = 0;
-
- cvmx_pip_config_port(ipd_port, port_config, tag_config);
-
- /* Give the user a chance to override our setting for each port */
- if (cvmx_override_ipd_port_setup)
- cvmx_override_ipd_port_setup(ipd_port);
-
- return 0;
-}
-
-/**
- * This function probes an interface to determine the actual
- * number of hardware ports connected to it. It doesn't setup the
- * ports or enable them. The main goal here is to set the global
- * interface_port_count[interface] correctly. Hardware setup of the
- * ports will be performed later.
- *
- * @interface: Interface to probe
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_interface_probe(int interface)
-{
- /* At this stage in the game we don't want packets to be moving yet.
- The following probe calls should perform hardware setup
- needed to determine port counts. Receive must still be disabled */
- switch (cvmx_helper_interface_get_mode(interface)) {
- /* These types don't support ports to IPD/PKO */
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- interface_port_count[interface] = 0;
- break;
- /* XAUI is a single high speed port */
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- interface_port_count[interface] =
- __cvmx_helper_xaui_probe(interface);
- break;
- /*
- * RGMII/GMII/MII are all treated about the same. Most
- * functions refer to these ports as RGMII.
- */
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- interface_port_count[interface] =
- __cvmx_helper_rgmii_probe(interface);
- break;
- /*
- * SPI4 can have 1-16 ports depending on the device at
- * the other end.
- */
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- interface_port_count[interface] =
- __cvmx_helper_spi_probe(interface);
- break;
- /*
- * SGMII can have 1-4 ports depending on how many are
- * hooked up.
- */
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- interface_port_count[interface] =
- __cvmx_helper_sgmii_probe(interface);
- break;
- /* PCI target Network Packet Interface */
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- interface_port_count[interface] =
- __cvmx_helper_npi_probe(interface);
- break;
- /*
- * Special loopback only ports. These are not the same
- * as other ports in loopback mode.
- */
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- interface_port_count[interface] =
- __cvmx_helper_loop_probe(interface);
- break;
- }
-
- interface_port_count[interface] =
- __cvmx_helper_board_interface_probe(interface,
- interface_port_count
- [interface]);
-
- /* Make sure all global variables propagate to other cores */
- CVMX_SYNCWS;
-
- return 0;
-}
-
-/**
- * Setup the IPD/PIP for the ports on an interface. Packet
- * classification and tagging are set for every port on the
- * interface. The number of ports on the interface must already
- * have been probed.
- *
- * @interface: Interface to setup IPD/PIP for
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_interface_setup_ipd(int interface)
-{
- int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
- int num_ports = interface_port_count[interface];
-
- while (num_ports--) {
- __cvmx_helper_port_setup_ipd(ipd_port);
- ipd_port++;
- }
- return 0;
-}
-
-/**
- * Setup global setting for IPD/PIP not related to a specific
- * interface or port. This must be called before IPD is enabled.
- *
- * Returns Zero on success, negative on failure.
- */
-static int __cvmx_helper_global_setup_ipd(void)
-{
- /* Setup the global packet input options */
- cvmx_ipd_config(CVMX_FPA_PACKET_POOL_SIZE / 8,
- CVMX_HELPER_FIRST_MBUFF_SKIP / 8,
- CVMX_HELPER_NOT_FIRST_MBUFF_SKIP / 8,
- /* The +8 is to account for the next ptr */
- (CVMX_HELPER_FIRST_MBUFF_SKIP + 8) / 128,
- /* The +8 is to account for the next ptr */
- (CVMX_HELPER_NOT_FIRST_MBUFF_SKIP + 8) / 128,
- CVMX_FPA_WQE_POOL,
- CVMX_IPD_OPC_MODE_STT,
- CVMX_HELPER_ENABLE_BACK_PRESSURE);
- return 0;
-}
-
-/**
- * Setup the PKO for the ports on an interface. The number of
- * queues per port and the priority of each PKO output queue
- * is set here. PKO must be disabled when this function is called.
- *
- * @interface: Interface to setup PKO for
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_interface_setup_pko(int interface)
-{
- /*
- * Each packet output queue has an associated priority. The
- * higher the priority, the more often it can send a packet. A
- * priority of 8 means it can send in all 8 rounds of
- * contention. We're going to make each queue one less than
- * the last. The vector of priorities has been extended to
- * support CN5xxx CPUs, where up to 16 queues can be
- * associated to a port. To keep backward compatibility we
- * don't change the initial 8 priorities and replicate them in
- * the second half. With per-core PKO queues (PKO lockless
- * operation) all queues have the same priority.
- */
- uint64_t priorities[16] =
- { 8, 7, 6, 5, 4, 3, 2, 1, 8, 7, 6, 5, 4, 3, 2, 1 };
-
- /*
- * Setup the IPD/PIP and PKO for the ports discovered
- * above. Here packet classification, tagging and output
- * priorities are set.
- */
- int ipd_port = cvmx_helper_get_ipd_port(interface, 0);
- int num_ports = interface_port_count[interface];
- while (num_ports--) {
- /*
- * Give the user a chance to override the per queue
- * priorities.
- */
- if (cvmx_override_pko_queue_priority)
- cvmx_override_pko_queue_priority(ipd_port, priorities);
-
- cvmx_pko_config_port(ipd_port,
- cvmx_pko_get_base_queue_per_core(ipd_port,
- 0),
- cvmx_pko_get_num_queues(ipd_port),
- priorities);
- ipd_port++;
- }
- return 0;
-}
-
-/**
- * Setup global setting for PKO not related to a specific
- * interface or port. This must be called before PKO is enabled.
- *
- * Returns Zero on success, negative on failure.
- */
-static int __cvmx_helper_global_setup_pko(void)
-{
- /*
- * Disable tagwait FAU timeout. This needs to be done before
- * anyone might start packet output using tags.
- */
- union cvmx_iob_fau_timeout fau_to;
- fau_to.u64 = 0;
- fau_to.s.tout_val = 0xfff;
- fau_to.s.tout_enb = 0;
- cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_to.u64);
- return 0;
-}
-
-/**
- * Setup global backpressure setting.
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_global_setup_backpressure(void)
-{
-#if CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE
- /* Disable backpressure if configured to do so */
- /* Disable backpressure (pause frame) generation */
- int num_interfaces = cvmx_helper_get_number_of_interfaces();
- int interface;
- for (interface = 0; interface < num_interfaces; interface++) {
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- cvmx_gmx_set_backpressure_override(interface, 0xf);
- break;
- }
- }
-#endif
-
- return 0;
-}
-
-/**
- * Enable packet input/output from the hardware. This function is
- * called after all internal setup is complete and IPD is enabled.
- * After this function completes, packets will be accepted from the
- * hardware ports. PKO should still be disabled to make sure packets
- * aren't sent out partially setup hardware.
- *
- * @interface: Interface to enable
- *
- * Returns Zero on success, negative on failure
- */
-static int __cvmx_helper_packet_hardware_enable(int interface)
-{
- int result = 0;
- switch (cvmx_helper_interface_get_mode(interface)) {
- /* These types don't support ports to IPD/PKO */
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- /* Nothing to do */
- break;
- /* XAUI is a single high speed port */
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result = __cvmx_helper_xaui_enable(interface);
- break;
- /*
- * RGMII/GMII/MII are all treated about the same. Most
- * functions refer to these ports as RGMII
- */
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- result = __cvmx_helper_rgmii_enable(interface);
- break;
- /*
- * SPI4 can have 1-16 ports depending on the device at
- * the other end
- */
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- result = __cvmx_helper_spi_enable(interface);
- break;
- /*
- * SGMII can have 1-4 ports depending on how many are
- * hooked up
- */
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result = __cvmx_helper_sgmii_enable(interface);
- break;
- /* PCI target Network Packet Interface */
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- result = __cvmx_helper_npi_enable(interface);
- break;
- /*
- * Special loopback only ports. These are not the same
- * as other ports in loopback mode
- */
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- result = __cvmx_helper_loop_enable(interface);
- break;
- }
- result |= __cvmx_helper_board_hardware_enable(interface);
- return result;
-}
-
-/**
- * Function to adjust internal IPD pointer alignments
- *
- * Returns 0 on success
- * !0 on failure
- */
-int __cvmx_helper_errata_fix_ipd_ptr_alignment(void)
-{
-#define FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES \
- (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_FIRST_MBUFF_SKIP)
-#define FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES \
- (CVMX_FPA_PACKET_POOL_SIZE-8-CVMX_HELPER_NOT_FIRST_MBUFF_SKIP)
-#define FIX_IPD_OUTPORT 0
- /* Ports 0-15 are interface 0, 16-31 are interface 1 */
-#define INTERFACE(port) (port >> 4)
-#define INDEX(port) (port & 0xf)
- uint64_t *p64;
- cvmx_pko_command_word0_t pko_command;
- union cvmx_buf_ptr g_buffer, pkt_buffer;
- cvmx_wqe_t *work;
- int size, num_segs = 0, wqe_pcnt, pkt_pcnt;
- union cvmx_gmxx_prtx_cfg gmx_cfg;
- int retry_cnt;
- int retry_loop_cnt;
- int mtu;
- int i;
- cvmx_helper_link_info_t link_info;
-
- /* Save values for restore at end */
- uint64_t prtx_cfg =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t tx_ptr_en =
- cvmx_read_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t rx_ptr_en =
- cvmx_read_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t rxx_jabber =
- cvmx_read_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
- uint64_t frame_max =
- cvmx_read_csr(CVMX_GMXX_RXX_FRM_MAX
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)));
-
- /* Configure port to gig FDX as required for loopback mode */
- cvmx_helper_rgmii_internal_loopback(FIX_IPD_OUTPORT);
-
- /*
- * Disable reception on all ports so if traffic is present it
- * will not interfere.
- */
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)), 0);
-
- cvmx_wait(100000000ull);
-
- for (retry_loop_cnt = 0; retry_loop_cnt < 10; retry_loop_cnt++) {
- retry_cnt = 100000;
- wqe_pcnt = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
- pkt_pcnt = (wqe_pcnt >> 7) & 0x7f;
- wqe_pcnt &= 0x7f;
-
- num_segs = (2 + pkt_pcnt - wqe_pcnt) & 3;
-
- if (num_segs == 0)
- goto fix_ipd_exit;
-
- num_segs += 1;
-
- size =
- FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES +
- ((num_segs - 1) * FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES) -
- (FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES / 2);
-
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)),
- 1 << INDEX(FIX_IPD_OUTPORT));
- CVMX_SYNC;
-
- g_buffer.u64 = 0;
- g_buffer.s.addr =
- cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_WQE_POOL));
- if (g_buffer.s.addr == 0) {
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
- "buffer allocation failure.\n");
- goto fix_ipd_exit;
- }
-
- g_buffer.s.pool = CVMX_FPA_WQE_POOL;
- g_buffer.s.size = num_segs;
-
- pkt_buffer.u64 = 0;
- pkt_buffer.s.addr =
- cvmx_ptr_to_phys(cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL));
- if (pkt_buffer.s.addr == 0) {
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
- "buffer allocation failure.\n");
- goto fix_ipd_exit;
- }
- pkt_buffer.s.i = 1;
- pkt_buffer.s.pool = CVMX_FPA_PACKET_POOL;
- pkt_buffer.s.size = FIX_IPD_FIRST_BUFF_PAYLOAD_BYTES;
-
- p64 = (uint64_t *) cvmx_phys_to_ptr(pkt_buffer.s.addr);
- p64[0] = 0xffffffffffff0000ull;
- p64[1] = 0x08004510ull;
- p64[2] = ((uint64_t) (size - 14) << 48) | 0x5ae740004000ull;
- p64[3] = 0x3a5fc0a81073c0a8ull;
-
- for (i = 0; i < num_segs; i++) {
- if (i > 0)
- pkt_buffer.s.size =
- FIX_IPD_NON_FIRST_BUFF_PAYLOAD_BYTES;
-
- if (i == (num_segs - 1))
- pkt_buffer.s.i = 0;
-
- *(uint64_t *) cvmx_phys_to_ptr(g_buffer.s.addr +
- 8 * i) = pkt_buffer.u64;
- }
-
- /* Build the PKO command */
- pko_command.u64 = 0;
- pko_command.s.segs = num_segs;
- pko_command.s.total_bytes = size;
- pko_command.s.dontfree = 0;
- pko_command.s.gather = 1;
-
- gmx_cfg.u64 =
- cvmx_read_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)));
- gmx_cfg.s.en = 1;
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)), gmx_cfg.u64);
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- 1 << INDEX(FIX_IPD_OUTPORT));
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- 1 << INDEX(FIX_IPD_OUTPORT));
-
- mtu =
- cvmx_read_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)));
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
- (INDEX(FIX_IPD_OUTPORT),
- INTERFACE(FIX_IPD_OUTPORT)), 65392 - 14 - 4);
-
- cvmx_pko_send_packet_prepare(FIX_IPD_OUTPORT,
- cvmx_pko_get_base_queue
- (FIX_IPD_OUTPORT),
- CVMX_PKO_LOCK_CMD_QUEUE);
- cvmx_pko_send_packet_finish(FIX_IPD_OUTPORT,
- cvmx_pko_get_base_queue
- (FIX_IPD_OUTPORT), pko_command,
- g_buffer, CVMX_PKO_LOCK_CMD_QUEUE);
-
- CVMX_SYNC;
-
- do {
- work = cvmx_pow_work_request_sync(CVMX_POW_WAIT);
- retry_cnt--;
- } while ((work == NULL) && (retry_cnt > 0));
-
- if (!retry_cnt)
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT "
- "get_work() timeout occurred.\n");
-
- /* Free packet */
- if (work)
- cvmx_helper_free_packet_data(work);
- }
-
-fix_ipd_exit:
-
- /* Return CSR configs to saved values */
- cvmx_write_csr(CVMX_GMXX_PRTX_CFG
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
- prtx_cfg);
- cvmx_write_csr(CVMX_ASXX_TX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- tx_ptr_en);
- cvmx_write_csr(CVMX_ASXX_RX_PRT_EN(INTERFACE(FIX_IPD_OUTPORT)),
- rx_ptr_en);
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
- rxx_jabber);
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX
- (INDEX(FIX_IPD_OUTPORT), INTERFACE(FIX_IPD_OUTPORT)),
- frame_max);
- cvmx_write_csr(CVMX_ASXX_PRT_LOOP(INTERFACE(FIX_IPD_OUTPORT)), 0);
- /* Set link to down so autonegotiation will set it up again */
- link_info.u64 = 0;
- cvmx_helper_link_set(FIX_IPD_OUTPORT, link_info);
-
- /*
- * Bring the link back up as autonegotiation is not done in
- * user applications.
- */
- cvmx_helper_link_autoconf(FIX_IPD_OUTPORT);
-
- CVMX_SYNC;
- if (num_segs)
- cvmx_dprintf("WARNING: FIX_IPD_PTR_ALIGNMENT failed.\n");
-
- return !!num_segs;
-
-}
-
-/**
- * Called after all internal packet IO paths are setup. This
- * function enables IPD/PIP and begins packet input and output.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_ipd_and_packet_input_enable(void)
-{
- int num_interfaces;
- int interface;
-
- /* Enable IPD */
- cvmx_ipd_enable();
-
- /*
- * Time to enable hardware ports packet input and output. Note
- * that at this point IPD/PIP must be fully functional and PKO
- * must be disabled
- */
- num_interfaces = cvmx_helper_get_number_of_interfaces();
- for (interface = 0; interface < num_interfaces; interface++) {
- if (cvmx_helper_ports_on_interface(interface) > 0)
- __cvmx_helper_packet_hardware_enable(interface);
- }
-
- /* Finally enable PKO now that the entire path is up and running */
- cvmx_pko_enable();
-
- if ((OCTEON_IS_MODEL(OCTEON_CN31XX_PASS1)
- || OCTEON_IS_MODEL(OCTEON_CN30XX_PASS1))
- && (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM))
- __cvmx_helper_errata_fix_ipd_ptr_alignment();
- return 0;
-}
-
-/**
- * Initialize the PIP, IPD, and PKO hardware to support
- * simple priority based queues for the ethernet ports. Each
- * port is configured with a number of priority queues based
- * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
- * priority than the previous.
- *
- * Returns Zero on success, non-zero on failure
- */
-int cvmx_helper_initialize_packet_io_global(void)
-{
- int result = 0;
- int interface;
- union cvmx_l2c_cfg l2c_cfg;
- union cvmx_smix_en smix_en;
- const int num_interfaces = cvmx_helper_get_number_of_interfaces();
-
- /*
- * CN52XX pass 1: Due to a bug in 2nd order CDR, it needs to
- * be disabled.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_0))
- __cvmx_helper_errata_qlm_disable_2nd_order_cdr(1);
-
- /*
- * Tell L2 to give the IOB statically higher priority compared
- * to the cores. This avoids conditions where IO blocks might
- * be starved under very high L2 loads.
- */
- l2c_cfg.u64 = cvmx_read_csr(CVMX_L2C_CFG);
- l2c_cfg.s.lrf_arb_mode = 0;
- l2c_cfg.s.rfb_arb_mode = 0;
- cvmx_write_csr(CVMX_L2C_CFG, l2c_cfg.u64);
-
- /* Make sure SMI/MDIO is enabled so we can query PHYs */
- smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(0));
- if (!smix_en.s.en) {
- smix_en.s.en = 1;
- cvmx_write_csr(CVMX_SMIX_EN(0), smix_en.u64);
- }
-
- /* Newer chips actually have two SMI/MDIO interfaces */
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
- !OCTEON_IS_MODEL(OCTEON_CN58XX) &&
- !OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- smix_en.u64 = cvmx_read_csr(CVMX_SMIX_EN(1));
- if (!smix_en.s.en) {
- smix_en.s.en = 1;
- cvmx_write_csr(CVMX_SMIX_EN(1), smix_en.u64);
- }
- }
-
- cvmx_pko_initialize_global();
- for (interface = 0; interface < num_interfaces; interface++) {
- result |= cvmx_helper_interface_probe(interface);
- if (cvmx_helper_ports_on_interface(interface) > 0)
- cvmx_dprintf("Interface %d has %d ports (%s)\n",
- interface,
- cvmx_helper_ports_on_interface(interface),
- cvmx_helper_interface_mode_to_string
- (cvmx_helper_interface_get_mode
- (interface)));
- result |= __cvmx_helper_interface_setup_ipd(interface);
- result |= __cvmx_helper_interface_setup_pko(interface);
- }
-
- result |= __cvmx_helper_global_setup_ipd();
- result |= __cvmx_helper_global_setup_pko();
-
- /* Enable any flow control and backpressure */
- result |= __cvmx_helper_global_setup_backpressure();
-
-#if CVMX_HELPER_ENABLE_IPD
- result |= cvmx_helper_ipd_and_packet_input_enable();
-#endif
- return result;
-}
-
-/**
- * Does core local initialization for packet io
- *
- * Returns Zero on success, non-zero on failure
- */
-int cvmx_helper_initialize_packet_io_local(void)
-{
- return cvmx_pko_initialize_local();
-}
-
-/**
- * Auto configure an IPD/PKO port link state and speed. This
- * function basically does the equivalent of:
- * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
- *
- * @ipd_port: IPD/PKO port to auto configure
- *
- * Returns Link state after configure
- */
-cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port)
-{
- cvmx_helper_link_info_t link_info;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(interface)) {
- link_info.u64 = 0;
- return link_info;
- }
-
- link_info = cvmx_helper_link_get(ipd_port);
- if (link_info.u64 == port_link_info[ipd_port].u64)
- return link_info;
-
- /* If we fail to set the link speed, port_link_info will not change */
- cvmx_helper_link_set(ipd_port, link_info);
-
- /*
- * port_link_info should be the current value, which will be
- * different than expect if cvmx_helper_link_set() failed.
- */
- return port_link_info[ipd_port];
-}
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port)
-{
- cvmx_helper_link_info_t result;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- /* The default result will be a down link unless the code below
- changes it */
- result.u64 = 0;
-
- if (index >= cvmx_helper_ports_on_interface(interface))
- return result;
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- /* Network links are not supported */
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result = __cvmx_helper_xaui_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- if (index == 0)
- result = __cvmx_helper_rgmii_link_get(ipd_port);
- else {
- result.s.full_duplex = 1;
- result.s.link_up = 1;
- result.s.speed = 1000;
- }
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- result = __cvmx_helper_rgmii_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- result = __cvmx_helper_spi_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result = __cvmx_helper_sgmii_link_get(ipd_port);
- break;
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- /* Network links are not supported */
- break;
- }
- return result;
-}
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_helper_link_set(int ipd_port, cvmx_helper_link_info_t link_info)
-{
- int result = -1;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(interface))
- return -1;
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result = __cvmx_helper_xaui_link_set(ipd_port, link_info);
- break;
- /*
- * RGMII/GMII/MII are all treated about the same. Most
- * functions refer to these ports as RGMII.
- */
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- result = __cvmx_helper_rgmii_link_set(ipd_port, link_info);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- result = __cvmx_helper_spi_link_set(ipd_port, link_info);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result = __cvmx_helper_sgmii_link_set(ipd_port, link_info);
- break;
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- break;
- }
- /* Set the port_link_info here so that the link status is updated
- no matter how cvmx_helper_link_set is called. We don't change
- the value if link_set failed */
- if (result == 0)
- port_link_info[ipd_port].u64 = link_info.u64;
- return result;
-}
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
- int enable_external)
-{
- int result = -1;
- int interface = cvmx_helper_get_interface_num(ipd_port);
- int index = cvmx_helper_get_interface_index_num(ipd_port);
-
- if (index >= cvmx_helper_ports_on_interface(interface))
- return -1;
-
- switch (cvmx_helper_interface_get_mode(interface)) {
- case CVMX_HELPER_INTERFACE_MODE_DISABLED:
- case CVMX_HELPER_INTERFACE_MODE_PCIE:
- case CVMX_HELPER_INTERFACE_MODE_SPI:
- case CVMX_HELPER_INTERFACE_MODE_NPI:
- case CVMX_HELPER_INTERFACE_MODE_LOOP:
- break;
- case CVMX_HELPER_INTERFACE_MODE_XAUI:
- result =
- __cvmx_helper_xaui_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- case CVMX_HELPER_INTERFACE_MODE_RGMII:
- case CVMX_HELPER_INTERFACE_MODE_GMII:
- result =
- __cvmx_helper_rgmii_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- case CVMX_HELPER_INTERFACE_MODE_SGMII:
- case CVMX_HELPER_INTERFACE_MODE_PICMG:
- result =
- __cvmx_helper_sgmii_configure_loopback(ipd_port,
- enable_internal,
- enable_external);
- break;
- }
- return result;
-}
diff --git a/drivers/staging/octeon/cvmx-helper.h b/drivers/staging/octeon/cvmx-helper.h
deleted file mode 100644
index 51916f3..0000000
--- a/drivers/staging/octeon/cvmx-helper.h
+++ /dev/null
@@ -1,227 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Helper functions for common, but complicated tasks.
- *
- */
-
-#ifndef __CVMX_HELPER_H__
-#define __CVMX_HELPER_H__
-
-#include "cvmx-config.h"
-#include "cvmx-fpa.h"
-#include "cvmx-wqe.h"
-
-typedef enum {
- CVMX_HELPER_INTERFACE_MODE_DISABLED,
- CVMX_HELPER_INTERFACE_MODE_RGMII,
- CVMX_HELPER_INTERFACE_MODE_GMII,
- CVMX_HELPER_INTERFACE_MODE_SPI,
- CVMX_HELPER_INTERFACE_MODE_PCIE,
- CVMX_HELPER_INTERFACE_MODE_XAUI,
- CVMX_HELPER_INTERFACE_MODE_SGMII,
- CVMX_HELPER_INTERFACE_MODE_PICMG,
- CVMX_HELPER_INTERFACE_MODE_NPI,
- CVMX_HELPER_INTERFACE_MODE_LOOP,
-} cvmx_helper_interface_mode_t;
-
-typedef union {
- uint64_t u64;
- struct {
- uint64_t reserved_20_63:44;
- uint64_t link_up:1; /**< Is the physical link up? */
- uint64_t full_duplex:1; /**< 1 if the link is full duplex */
- uint64_t speed:18; /**< Speed of the link in Mbps */
- } s;
-} cvmx_helper_link_info_t;
-
-#include "cvmx-helper-fpa.h"
-
-#include <asm/octeon/cvmx-helper-errata.h>
-#include "cvmx-helper-loop.h"
-#include "cvmx-helper-npi.h"
-#include "cvmx-helper-rgmii.h"
-#include "cvmx-helper-sgmii.h"
-#include "cvmx-helper-spi.h"
-#include "cvmx-helper-util.h"
-#include "cvmx-helper-xaui.h"
-
-/**
- * cvmx_override_pko_queue_priority(int ipd_port, uint64_t
- * priorities[16]) is a function pointer. It is meant to allow
- * customization of the PKO queue priorities based on the port
- * number. Users should set this pointer to a function before
- * calling any cvmx-helper operations.
- */
-extern void (*cvmx_override_pko_queue_priority) (int pko_port,
- uint64_t priorities[16]);
-
-/**
- * cvmx_override_ipd_port_setup(int ipd_port) is a function
- * pointer. It is meant to allow customization of the IPD port
- * setup before packet input/output comes online. It is called
- * after cvmx-helper does the default IPD configuration, but
- * before IPD is enabled. Users should set this pointer to a
- * function before calling any cvmx-helper operations.
- */
-extern void (*cvmx_override_ipd_port_setup) (int ipd_port);
-
-/**
- * This function enables the IPD and also enables the packet interfaces.
- * The packet interfaces (RGMII and SPI) must be enabled after the
- * IPD. This should be called by the user program after any additional
- * IPD configuration changes are made if CVMX_HELPER_ENABLE_IPD
- * is not set in the executive-config.h file.
- *
- * Returns 0 on success
- * -1 on failure
- */
-extern int cvmx_helper_ipd_and_packet_input_enable(void);
-
-/**
- * Initialize the PIP, IPD, and PKO hardware to support
- * simple priority based queues for the ethernet ports. Each
- * port is configured with a number of priority queues based
- * on CVMX_PKO_QUEUES_PER_PORT_* where each queue is lower
- * priority than the previous.
- *
- * Returns Zero on success, non-zero on failure
- */
-extern int cvmx_helper_initialize_packet_io_global(void);
-
-/**
- * Does core local initialization for packet io
- *
- * Returns Zero on success, non-zero on failure
- */
-extern int cvmx_helper_initialize_packet_io_local(void);
-
-/**
- * Returns the number of ports on the given interface.
- * The interface must be initialized before the port count
- * can be returned.
- *
- * @interface: Which interface to return port count for.
- *
- * Returns Port count for interface
- * -1 for uninitialized interface
- */
-extern int cvmx_helper_ports_on_interface(int interface);
-
-/**
- * Return the number of interfaces the chip has. Each interface
- * may have multiple ports. Most chips support two interfaces,
- * but the CNX0XX and CNX1XX are exceptions. These only support
- * one interface.
- *
- * Returns Number of interfaces on chip
- */
-extern int cvmx_helper_get_number_of_interfaces(void);
-
-/**
- * Get the operating mode of an interface. Depending on the Octeon
- * chip and configuration, this function returns an enumeration
- * of the type of packet I/O supported by an interface.
- *
- * @interface: Interface to probe
- *
- * Returns Mode of the interface. Unknown or unsupported interfaces return
- * DISABLED.
- */
-extern cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int
- interface);
-
-/**
- * Auto configure an IPD/PKO port link state and speed. This
- * function basically does the equivalent of:
- * cvmx_helper_link_set(ipd_port, cvmx_helper_link_get(ipd_port));
- *
- * @ipd_port: IPD/PKO port to auto configure
- *
- * Returns Link state after configure
- */
-extern cvmx_helper_link_info_t cvmx_helper_link_autoconf(int ipd_port);
-
-/**
- * Return the link state of an IPD/PKO port as returned by
- * auto negotiation. The result of this function may not match
- * Octeon's link config if auto negotiation has changed since
- * the last call to cvmx_helper_link_set().
- *
- * @ipd_port: IPD/PKO port to query
- *
- * Returns Link state
- */
-extern cvmx_helper_link_info_t cvmx_helper_link_get(int ipd_port);
-
-/**
- * Configure an IPD/PKO port for the specified link state. This
- * function does not influence auto negotiation at the PHY level.
- * The passed link state must always match the link state returned
- * by cvmx_helper_link_get(). It is normally best to use
- * cvmx_helper_link_autoconf() instead.
- *
- * @ipd_port: IPD/PKO port to configure
- * @link_info: The new link state
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_helper_link_set(int ipd_port,
- cvmx_helper_link_info_t link_info);
-
-/**
- * This function probes an interface to determine the actual
- * number of hardware ports connected to it. It doesn't setup the
- * ports or enable them. The main goal here is to set the global
- * interface_port_count[interface] correctly. Hardware setup of the
- * ports will be performed later.
- *
- * @interface: Interface to probe
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_helper_interface_probe(int interface);
-
-/**
- * Configure a port for internal and/or external loopback. Internal loopback
- * causes packets sent by the port to be received by Octeon. External loopback
- * causes packets received from the wire to sent out again.
- *
- * @ipd_port: IPD/PKO port to loopback.
- * @enable_internal:
- * Non zero if you want internal loopback
- * @enable_external:
- * Non zero if you want external loopback
- *
- * Returns Zero on success, negative on failure.
- */
-extern int cvmx_helper_configure_loopback(int ipd_port, int enable_internal,
- int enable_external);
-
-#endif /* __CVMX_HELPER_H__ */
diff --git a/drivers/staging/octeon/cvmx-interrupt-decodes.c b/drivers/staging/octeon/cvmx-interrupt-decodes.c
deleted file mode 100644
index a3337e3..0000000
--- a/drivers/staging/octeon/cvmx-interrupt-decodes.c
+++ /dev/null
@@ -1,371 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2009 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Automatically generated functions useful for enabling
- * and decoding RSL_INT_BLOCKS interrupts.
- *
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-pcsx-defs.h"
-#include "cvmx-pcsxx-defs.h"
-#include "cvmx-spxx-defs.h"
-#include "cvmx-stxx-defs.h"
-
-#ifndef PRINT_ERROR
-#define PRINT_ERROR(format, ...)
-#endif
-
-
-/**
- * __cvmx_interrupt_gmxx_rxx_int_en_enable enables all interrupt bits in cvmx_gmxx_rxx_int_en_t
- */
-void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
-{
- union cvmx_gmxx_rxx_int_en gmx_rx_int_en;
- cvmx_write_csr(CVMX_GMXX_RXX_INT_REG(index, block),
- cvmx_read_csr(CVMX_GMXX_RXX_INT_REG(index, block)));
- gmx_rx_int_en.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_29_63 */
- gmx_rx_int_en.s.hg2cc = 1;
- gmx_rx_int_en.s.hg2fld = 1;
- gmx_rx_int_en.s.undat = 1;
- gmx_rx_int_en.s.uneop = 1;
- gmx_rx_int_en.s.unsop = 1;
- gmx_rx_int_en.s.bad_term = 1;
- gmx_rx_int_en.s.bad_seq = 1;
- gmx_rx_int_en.s.rem_fault = 1;
- gmx_rx_int_en.s.loc_fault = 1;
- gmx_rx_int_en.s.pause_drp = 1;
- /* Skipping gmx_rx_int_en.s.reserved_16_18 */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_9_9 */
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_5_6 */
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- /* Skipping gmx_rx_int_en.s.reserved_2_2 */
- gmx_rx_int_en.s.carext = 1;
- /* Skipping gmx_rx_int_en.s.reserved_0_0 */
- }
- if (OCTEON_IS_MODEL(OCTEON_CN30XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_19_63 */
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN50XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_20_63 */
- gmx_rx_int_en.s.pause_drp = 1;
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_6_6 */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- /* Skipping gmx_rx_int_en.s.reserved_2_2 */
- gmx_rx_int_en.s.carext = 1;
- /* Skipping gmx_rx_int_en.s.reserved_0_0 */
- }
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_19_63 */
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN31XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_19_63 */
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_20_63 */
- gmx_rx_int_en.s.pause_drp = 1;
- /*gmx_rx_int_en.s.phy_dupx = 1; */
- /*gmx_rx_int_en.s.phy_spd = 1; */
- /*gmx_rx_int_en.s.phy_link = 1; */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- gmx_rx_int_en.s.niberr = 1;
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /*gmx_rx_int_en.s.lenerr = 1; // Length errors are handled when we get work */
- gmx_rx_int_en.s.alnerr = 1;
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- gmx_rx_int_en.s.maxerr = 1;
- gmx_rx_int_en.s.carext = 1;
- gmx_rx_int_en.s.minerr = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- /* Skipping gmx_rx_int_en.s.reserved_29_63 */
- gmx_rx_int_en.s.hg2cc = 1;
- gmx_rx_int_en.s.hg2fld = 1;
- gmx_rx_int_en.s.undat = 1;
- gmx_rx_int_en.s.uneop = 1;
- gmx_rx_int_en.s.unsop = 1;
- gmx_rx_int_en.s.bad_term = 1;
- gmx_rx_int_en.s.bad_seq = 0;
- gmx_rx_int_en.s.rem_fault = 1;
- gmx_rx_int_en.s.loc_fault = 0;
- gmx_rx_int_en.s.pause_drp = 1;
- /* Skipping gmx_rx_int_en.s.reserved_16_18 */
- /*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
- /*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
- /*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
- /*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
- gmx_rx_int_en.s.ovrerr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_9_9 */
- gmx_rx_int_en.s.skperr = 1;
- gmx_rx_int_en.s.rcverr = 1;
- /* Skipping gmx_rx_int_en.s.reserved_5_6 */
- /*gmx_rx_int_en.s.fcserr = 1; // FCS errors are handled when we get work */
- gmx_rx_int_en.s.jabber = 1;
- /* Skipping gmx_rx_int_en.s.reserved_2_2 */
- gmx_rx_int_en.s.carext = 1;
- /* Skipping gmx_rx_int_en.s.reserved_0_0 */
- }
- cvmx_write_csr(CVMX_GMXX_RXX_INT_EN(index, block), gmx_rx_int_en.u64);
-}
-/**
- * __cvmx_interrupt_pcsx_intx_en_reg_enable enables all interrupt bits in cvmx_pcsx_intx_en_reg_t
- */
-void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block)
-{
- union cvmx_pcsx_intx_en_reg pcs_int_en_reg;
- cvmx_write_csr(CVMX_PCSX_INTX_REG(index, block),
- cvmx_read_csr(CVMX_PCSX_INTX_REG(index, block)));
- pcs_int_en_reg.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- /* Skipping pcs_int_en_reg.s.reserved_12_63 */
- /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
- pcs_int_en_reg.s.sync_bad_en = 1;
- pcs_int_en_reg.s.an_bad_en = 1;
- pcs_int_en_reg.s.rxlock_en = 1;
- pcs_int_en_reg.s.rxbad_en = 1;
- /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
- pcs_int_en_reg.s.txbad_en = 1;
- pcs_int_en_reg.s.txfifo_en = 1;
- pcs_int_en_reg.s.txfifu_en = 1;
- pcs_int_en_reg.s.an_err_en = 1;
- /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
- /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
- }
- if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- /* Skipping pcs_int_en_reg.s.reserved_12_63 */
- /*pcs_int_en_reg.s.dup = 1; // This happens during normal operation */
- pcs_int_en_reg.s.sync_bad_en = 1;
- pcs_int_en_reg.s.an_bad_en = 1;
- pcs_int_en_reg.s.rxlock_en = 1;
- pcs_int_en_reg.s.rxbad_en = 1;
- /*pcs_int_en_reg.s.rxerr_en = 1; // This happens during normal operation */
- pcs_int_en_reg.s.txbad_en = 1;
- pcs_int_en_reg.s.txfifo_en = 1;
- pcs_int_en_reg.s.txfifu_en = 1;
- pcs_int_en_reg.s.an_err_en = 1;
- /*pcs_int_en_reg.s.xmit_en = 1; // This happens during normal operation */
- /*pcs_int_en_reg.s.lnkspd_en = 1; // This happens during normal operation */
- }
- cvmx_write_csr(CVMX_PCSX_INTX_EN_REG(index, block), pcs_int_en_reg.u64);
-}
-/**
- * __cvmx_interrupt_pcsxx_int_en_reg_enable enables all interrupt bits in cvmx_pcsxx_int_en_reg_t
- */
-void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index)
-{
- union cvmx_pcsxx_int_en_reg pcsx_int_en_reg;
- cvmx_write_csr(CVMX_PCSXX_INT_REG(index),
- cvmx_read_csr(CVMX_PCSXX_INT_REG(index)));
- pcsx_int_en_reg.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
- pcsx_int_en_reg.s.algnlos_en = 1;
- pcsx_int_en_reg.s.synlos_en = 1;
- pcsx_int_en_reg.s.bitlckls_en = 1;
- pcsx_int_en_reg.s.rxsynbad_en = 1;
- pcsx_int_en_reg.s.rxbad_en = 1;
- pcsx_int_en_reg.s.txflt_en = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- /* Skipping pcsx_int_en_reg.s.reserved_6_63 */
- pcsx_int_en_reg.s.algnlos_en = 1;
- pcsx_int_en_reg.s.synlos_en = 1;
- pcsx_int_en_reg.s.bitlckls_en = 0; /* Happens if XAUI module is not installed */
- pcsx_int_en_reg.s.rxsynbad_en = 1;
- pcsx_int_en_reg.s.rxbad_en = 1;
- pcsx_int_en_reg.s.txflt_en = 1;
- }
- cvmx_write_csr(CVMX_PCSXX_INT_EN_REG(index), pcsx_int_en_reg.u64);
-}
-
-/**
- * __cvmx_interrupt_spxx_int_msk_enable enables all interrupt bits in cvmx_spxx_int_msk_t
- */
-void __cvmx_interrupt_spxx_int_msk_enable(int index)
-{
- union cvmx_spxx_int_msk spx_int_msk;
- cvmx_write_csr(CVMX_SPXX_INT_REG(index),
- cvmx_read_csr(CVMX_SPXX_INT_REG(index)));
- spx_int_msk.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- /* Skipping spx_int_msk.s.reserved_12_63 */
- spx_int_msk.s.calerr = 1;
- spx_int_msk.s.syncerr = 1;
- spx_int_msk.s.diperr = 1;
- spx_int_msk.s.tpaovr = 1;
- spx_int_msk.s.rsverr = 1;
- spx_int_msk.s.drwnng = 1;
- spx_int_msk.s.clserr = 1;
- spx_int_msk.s.spiovr = 1;
- /* Skipping spx_int_msk.s.reserved_2_3 */
- spx_int_msk.s.abnorm = 1;
- spx_int_msk.s.prtnxa = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Skipping spx_int_msk.s.reserved_12_63 */
- spx_int_msk.s.calerr = 1;
- spx_int_msk.s.syncerr = 1;
- spx_int_msk.s.diperr = 1;
- spx_int_msk.s.tpaovr = 1;
- spx_int_msk.s.rsverr = 1;
- spx_int_msk.s.drwnng = 1;
- spx_int_msk.s.clserr = 1;
- spx_int_msk.s.spiovr = 1;
- /* Skipping spx_int_msk.s.reserved_2_3 */
- spx_int_msk.s.abnorm = 1;
- spx_int_msk.s.prtnxa = 1;
- }
- cvmx_write_csr(CVMX_SPXX_INT_MSK(index), spx_int_msk.u64);
-}
-/**
- * __cvmx_interrupt_stxx_int_msk_enable enables all interrupt bits in cvmx_stxx_int_msk_t
- */
-void __cvmx_interrupt_stxx_int_msk_enable(int index)
-{
- union cvmx_stxx_int_msk stx_int_msk;
- cvmx_write_csr(CVMX_STXX_INT_REG(index),
- cvmx_read_csr(CVMX_STXX_INT_REG(index)));
- stx_int_msk.u64 = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- /* Skipping stx_int_msk.s.reserved_8_63 */
- stx_int_msk.s.frmerr = 1;
- stx_int_msk.s.unxfrm = 1;
- stx_int_msk.s.nosync = 1;
- stx_int_msk.s.diperr = 1;
- stx_int_msk.s.datovr = 1;
- stx_int_msk.s.ovrbst = 1;
- stx_int_msk.s.calpar1 = 1;
- stx_int_msk.s.calpar0 = 1;
- }
- if (OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /* Skipping stx_int_msk.s.reserved_8_63 */
- stx_int_msk.s.frmerr = 1;
- stx_int_msk.s.unxfrm = 1;
- stx_int_msk.s.nosync = 1;
- stx_int_msk.s.diperr = 1;
- stx_int_msk.s.datovr = 1;
- stx_int_msk.s.ovrbst = 1;
- stx_int_msk.s.calpar1 = 1;
- stx_int_msk.s.calpar0 = 1;
- }
- cvmx_write_csr(CVMX_STXX_INT_MSK(index), stx_int_msk.u64);
-}
diff --git a/drivers/staging/octeon/cvmx-interrupt-rsl.c b/drivers/staging/octeon/cvmx-interrupt-rsl.c
deleted file mode 100644
index df50048..0000000
--- a/drivers/staging/octeon/cvmx-interrupt-rsl.c
+++ /dev/null
@@ -1,140 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Utility functions to decode Octeon's RSL_INT_BLOCKS
- * interrupts into error messages.
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-asxx-defs.h"
-#include "cvmx-gmxx-defs.h"
-
-#ifndef PRINT_ERROR
-#define PRINT_ERROR(format, ...)
-#endif
-
-void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block);
-
-/**
- * Enable ASX error interrupts that exist on CN3XXX, CN50XX, and
- * CN58XX.
- *
- * @block: Interface to enable 0-1
- */
-void __cvmx_interrupt_asxx_enable(int block)
-{
- int mask;
- union cvmx_asxx_int_en csr;
- /*
- * CN38XX and CN58XX have two interfaces with 4 ports per
- * interface. All other chips have a max of 3 ports on
- * interface 0
- */
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
- mask = 0xf; /* Set enables for 4 ports */
- else
- mask = 0x7; /* Set enables for 3 ports */
-
- /* Enable interface interrupts */
- csr.u64 = cvmx_read_csr(CVMX_ASXX_INT_EN(block));
- csr.s.txpsh = mask;
- csr.s.txpop = mask;
- csr.s.ovrflw = mask;
- cvmx_write_csr(CVMX_ASXX_INT_EN(block), csr.u64);
-}
-/**
- * Enable GMX error reporting for the supplied interface
- *
- * @interface: Interface to enable
- */
-void __cvmx_interrupt_gmxx_enable(int interface)
-{
- union cvmx_gmxx_inf_mode mode;
- union cvmx_gmxx_tx_int_en gmx_tx_int_en;
- int num_ports;
- int index;
-
- mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- if (mode.s.en) {
- switch (mode.cn56xx.mode) {
- case 1: /* XAUI */
- num_ports = 1;
- break;
- case 2: /* SGMII */
- case 3: /* PICMG */
- num_ports = 4;
- break;
- default: /* Disabled */
- num_ports = 0;
- break;
- }
- } else
- num_ports = 0;
- } else {
- if (mode.s.en) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- /*
- * SPI on CN38XX and CN58XX report all
- * errors through port 0. RGMII needs
- * to check all 4 ports
- */
- if (mode.s.type)
- num_ports = 1;
- else
- num_ports = 4;
- } else {
- /*
- * CN30XX, CN31XX, and CN50XX have two
- * or three ports. GMII and MII has 2,
- * RGMII has three
- */
- if (mode.s.type)
- num_ports = 2;
- else
- num_ports = 3;
- }
- } else
- num_ports = 0;
- }
-
- gmx_tx_int_en.u64 = 0;
- if (num_ports) {
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)
- || OCTEON_IS_MODEL(OCTEON_CN58XX))
- gmx_tx_int_en.s.ncb_nxa = 1;
- gmx_tx_int_en.s.pko_nxa = 1;
- }
- gmx_tx_int_en.s.undflw = (1 << num_ports) - 1;
- cvmx_write_csr(CVMX_GMXX_TX_INT_EN(interface), gmx_tx_int_en.u64);
- for (index = 0; index < num_ports; index++)
- __cvmx_interrupt_gmxx_rxx_int_en_enable(index, interface);
-}
diff --git a/drivers/staging/octeon/cvmx-ipd.h b/drivers/staging/octeon/cvmx-ipd.h
deleted file mode 100644
index 115a552..0000000
--- a/drivers/staging/octeon/cvmx-ipd.h
+++ /dev/null
@@ -1,338 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Interface to the hardware Input Packet Data unit.
- */
-
-#ifndef __CVMX_IPD_H__
-#define __CVMX_IPD_H__
-
-#include <asm/octeon/octeon-feature.h>
-
-#include <asm/octeon/cvmx-ipd-defs.h>
-
-enum cvmx_ipd_mode {
- CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
- CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
- CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
- CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
-};
-
-#ifndef CVMX_ENABLE_LEN_M8_FIX
-#define CVMX_ENABLE_LEN_M8_FIX 0
-#endif
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-typedef union cvmx_ipd_1st_mbuff_skip cvmx_ipd_mbuff_first_skip_t;
-typedef union cvmx_ipd_1st_next_ptr_back cvmx_ipd_first_next_ptr_back_t;
-
-typedef cvmx_ipd_mbuff_first_skip_t cvmx_ipd_mbuff_not_first_skip_t;
-typedef cvmx_ipd_first_next_ptr_back_t cvmx_ipd_second_next_ptr_back_t;
-
-/**
- * Configure IPD
- *
- * @mbuff_size: Packets buffer size in 8 byte words
- * @first_mbuff_skip:
- * Number of 8 byte words to skip in the first buffer
- * @not_first_mbuff_skip:
- * Number of 8 byte words to skip in each following buffer
- * @first_back: Must be same as first_mbuff_skip / 128
- * @second_back:
- * Must be same as not_first_mbuff_skip / 128
- * @wqe_fpa_pool:
- * FPA pool to get work entries from
- * @cache_mode:
- * @back_pres_enable_flag:
- * Enable or disable port back pressure
- */
-static inline void cvmx_ipd_config(uint64_t mbuff_size,
- uint64_t first_mbuff_skip,
- uint64_t not_first_mbuff_skip,
- uint64_t first_back,
- uint64_t second_back,
- uint64_t wqe_fpa_pool,
- enum cvmx_ipd_mode cache_mode,
- uint64_t back_pres_enable_flag)
-{
- cvmx_ipd_mbuff_first_skip_t first_skip;
- cvmx_ipd_mbuff_not_first_skip_t not_first_skip;
- union cvmx_ipd_packet_mbuff_size size;
- cvmx_ipd_first_next_ptr_back_t first_back_struct;
- cvmx_ipd_second_next_ptr_back_t second_back_struct;
- union cvmx_ipd_wqe_fpa_queue wqe_pool;
- union cvmx_ipd_ctl_status ipd_ctl_reg;
-
- first_skip.u64 = 0;
- first_skip.s.skip_sz = first_mbuff_skip;
- cvmx_write_csr(CVMX_IPD_1ST_MBUFF_SKIP, first_skip.u64);
-
- not_first_skip.u64 = 0;
- not_first_skip.s.skip_sz = not_first_mbuff_skip;
- cvmx_write_csr(CVMX_IPD_NOT_1ST_MBUFF_SKIP, not_first_skip.u64);
-
- size.u64 = 0;
- size.s.mb_size = mbuff_size;
- cvmx_write_csr(CVMX_IPD_PACKET_MBUFF_SIZE, size.u64);
-
- first_back_struct.u64 = 0;
- first_back_struct.s.back = first_back;
- cvmx_write_csr(CVMX_IPD_1st_NEXT_PTR_BACK, first_back_struct.u64);
-
- second_back_struct.u64 = 0;
- second_back_struct.s.back = second_back;
- cvmx_write_csr(CVMX_IPD_2nd_NEXT_PTR_BACK, second_back_struct.u64);
-
- wqe_pool.u64 = 0;
- wqe_pool.s.wqe_pool = wqe_fpa_pool;
- cvmx_write_csr(CVMX_IPD_WQE_FPA_QUEUE, wqe_pool.u64);
-
- ipd_ctl_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_ctl_reg.s.opc_mode = cache_mode;
- ipd_ctl_reg.s.pbp_en = back_pres_enable_flag;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_reg.u64);
-
- /* Note: the example RED code that used to be here has been moved to
- cvmx_helper_setup_red */
-}
-
-/**
- * Enable IPD
- */
-static inline void cvmx_ipd_enable(void)
-{
- union cvmx_ipd_ctl_status ipd_reg;
- ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- if (ipd_reg.s.ipd_en) {
- cvmx_dprintf
- ("Warning: Enabling IPD when IPD already enabled.\n");
- }
- ipd_reg.s.ipd_en = 1;
-#if CVMX_ENABLE_LEN_M8_FIX
- if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2))
- ipd_reg.s.len_m8 = TRUE;
-#endif
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
-}
-
-/**
- * Disable IPD
- */
-static inline void cvmx_ipd_disable(void)
-{
- union cvmx_ipd_ctl_status ipd_reg;
- ipd_reg.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_reg.s.ipd_en = 0;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_reg.u64);
-}
-
-/**
- * Supportive function for cvmx_fpa_shutdown_pool.
- */
-static inline void cvmx_ipd_free_ptr(void)
-{
- /* Only CN38XXp{1,2} cannot read pointer out of the IPD */
- if (!OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1)
- && !OCTEON_IS_MODEL(OCTEON_CN38XX_PASS2)) {
- int no_wptr = 0;
- union cvmx_ipd_ptr_count ipd_ptr_count;
- ipd_ptr_count.u64 = cvmx_read_csr(CVMX_IPD_PTR_COUNT);
-
- /* Handle Work Queue Entry in cn56xx and cn52xx */
- if (octeon_has_feature(OCTEON_FEATURE_NO_WPTR)) {
- union cvmx_ipd_ctl_status ipd_ctl_status;
- ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- if (ipd_ctl_status.s.no_wptr)
- no_wptr = 1;
- }
-
- /* Free the prefetched WQE */
- if (ipd_ptr_count.s.wqev_cnt) {
- union cvmx_ipd_wqe_ptr_valid ipd_wqe_ptr_valid;
- ipd_wqe_ptr_valid.u64 =
- cvmx_read_csr(CVMX_IPD_WQE_PTR_VALID);
- if (no_wptr)
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) ipd_wqe_ptr_valid.s.
- ptr << 7), CVMX_FPA_PACKET_POOL,
- 0);
- else
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) ipd_wqe_ptr_valid.s.
- ptr << 7), CVMX_FPA_WQE_POOL, 0);
- }
-
- /* Free all WQE in the fifo */
- if (ipd_ptr_count.s.wqe_pcnt) {
- int i;
- union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
- for (i = 0; i < ipd_ptr_count.s.wqe_pcnt; i++) {
- ipd_pwp_ptr_fifo_ctl.s.cena = 0;
- ipd_pwp_ptr_fifo_ctl.s.raddr =
- ipd_pwp_ptr_fifo_ctl.s.max_cnts +
- (ipd_pwp_ptr_fifo_ctl.s.wraddr +
- i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
- if (no_wptr)
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_pwp_ptr_fifo_ctl.s.
- ptr << 7),
- CVMX_FPA_PACKET_POOL, 0);
- else
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_pwp_ptr_fifo_ctl.s.
- ptr << 7),
- CVMX_FPA_WQE_POOL, 0);
- }
- ipd_pwp_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- }
-
- /* Free the prefetched packet */
- if (ipd_ptr_count.s.pktv_cnt) {
- union cvmx_ipd_pkt_ptr_valid ipd_pkt_ptr_valid;
- ipd_pkt_ptr_valid.u64 =
- cvmx_read_csr(CVMX_IPD_PKT_PTR_VALID);
- cvmx_fpa_free(cvmx_phys_to_ptr
- (ipd_pkt_ptr_valid.s.ptr << 7),
- CVMX_FPA_PACKET_POOL, 0);
- }
-
- /* Free the per port prefetched packets */
- if (1) {
- int i;
- union cvmx_ipd_prc_port_ptr_fifo_ctl
- ipd_prc_port_ptr_fifo_ctl;
- ipd_prc_port_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
-
- for (i = 0; i < ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
- i++) {
- ipd_prc_port_ptr_fifo_ctl.s.cena = 0;
- ipd_prc_port_ptr_fifo_ctl.s.raddr =
- i % ipd_prc_port_ptr_fifo_ctl.s.max_pkt;
- cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
- ipd_prc_port_ptr_fifo_ctl.u64);
- ipd_prc_port_ptr_fifo_ctl.u64 =
- cvmx_read_csr
- (CVMX_IPD_PRC_PORT_PTR_FIFO_CTL);
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_prc_port_ptr_fifo_ctl.s.
- ptr << 7), CVMX_FPA_PACKET_POOL,
- 0);
- }
- ipd_prc_port_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PRC_PORT_PTR_FIFO_CTL,
- ipd_prc_port_ptr_fifo_ctl.u64);
- }
-
- /* Free all packets in the holding fifo */
- if (ipd_ptr_count.s.pfif_cnt) {
- int i;
- union cvmx_ipd_prc_hold_ptr_fifo_ctl
- ipd_prc_hold_ptr_fifo_ctl;
-
- ipd_prc_hold_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
-
- for (i = 0; i < ipd_ptr_count.s.pfif_cnt; i++) {
- ipd_prc_hold_ptr_fifo_ctl.s.cena = 0;
- ipd_prc_hold_ptr_fifo_ctl.s.raddr =
- (ipd_prc_hold_ptr_fifo_ctl.s.praddr +
- i) % ipd_prc_hold_ptr_fifo_ctl.s.max_pkt;
- cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
- ipd_prc_hold_ptr_fifo_ctl.u64);
- ipd_prc_hold_ptr_fifo_ctl.u64 =
- cvmx_read_csr
- (CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL);
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t)
- ipd_prc_hold_ptr_fifo_ctl.s.
- ptr << 7), CVMX_FPA_PACKET_POOL,
- 0);
- }
- ipd_prc_hold_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PRC_HOLD_PTR_FIFO_CTL,
- ipd_prc_hold_ptr_fifo_ctl.u64);
- }
-
- /* Free all packets in the fifo */
- if (ipd_ptr_count.s.pkt_pcnt) {
- int i;
- union cvmx_ipd_pwp_ptr_fifo_ctl ipd_pwp_ptr_fifo_ctl;
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
-
- for (i = 0; i < ipd_ptr_count.s.pkt_pcnt; i++) {
- ipd_pwp_ptr_fifo_ctl.s.cena = 0;
- ipd_pwp_ptr_fifo_ctl.s.raddr =
- (ipd_pwp_ptr_fifo_ctl.s.praddr +
- i) % ipd_pwp_ptr_fifo_ctl.s.max_cnts;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- ipd_pwp_ptr_fifo_ctl.u64 =
- cvmx_read_csr(CVMX_IPD_PWP_PTR_FIFO_CTL);
- cvmx_fpa_free(cvmx_phys_to_ptr
- ((uint64_t) ipd_pwp_ptr_fifo_ctl.
- s.ptr << 7),
- CVMX_FPA_PACKET_POOL, 0);
- }
- ipd_pwp_ptr_fifo_ctl.s.cena = 1;
- cvmx_write_csr(CVMX_IPD_PWP_PTR_FIFO_CTL,
- ipd_pwp_ptr_fifo_ctl.u64);
- }
-
- /* Reset the IPD to get all buffers out of it */
- {
- union cvmx_ipd_ctl_status ipd_ctl_status;
- ipd_ctl_status.u64 = cvmx_read_csr(CVMX_IPD_CTL_STATUS);
- ipd_ctl_status.s.reset = 1;
- cvmx_write_csr(CVMX_IPD_CTL_STATUS, ipd_ctl_status.u64);
- }
-
- /* Reset the PIP */
- {
- union cvmx_pip_sft_rst pip_sft_rst;
- pip_sft_rst.u64 = cvmx_read_csr(CVMX_PIP_SFT_RST);
- pip_sft_rst.s.rst = 1;
- cvmx_write_csr(CVMX_PIP_SFT_RST, pip_sft_rst.u64);
- }
- }
-}
-
-#endif /* __CVMX_IPD_H__ */
diff --git a/drivers/staging/octeon/cvmx-mdio.h b/drivers/staging/octeon/cvmx-mdio.h
deleted file mode 100644
index d88ab8d..0000000
--- a/drivers/staging/octeon/cvmx-mdio.h
+++ /dev/null
@@ -1,506 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Interface to the SMI/MDIO hardware, including support for both IEEE 802.3
- * clause 22 and clause 45 operations.
- *
- */
-
-#ifndef __CVMX_MIO_H__
-#define __CVMX_MIO_H__
-
-#include "cvmx-smix-defs.h"
-
-/**
- * PHY register 0 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL 0
-typedef union {
- uint16_t u16;
- struct {
- uint16_t reset:1;
- uint16_t loopback:1;
- uint16_t speed_lsb:1;
- uint16_t autoneg_enable:1;
- uint16_t power_down:1;
- uint16_t isolate:1;
- uint16_t restart_autoneg:1;
- uint16_t duplex:1;
- uint16_t collision_test:1;
- uint16_t speed_msb:1;
- uint16_t unidirectional_enable:1;
- uint16_t reserved_0_4:5;
- } s;
-} cvmx_mdio_phy_reg_control_t;
-
-/**
- * PHY register 1 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS 1
-typedef union {
- uint16_t u16;
- struct {
- uint16_t capable_100base_t4:1;
- uint16_t capable_100base_x_full:1;
- uint16_t capable_100base_x_half:1;
- uint16_t capable_10_full:1;
- uint16_t capable_10_half:1;
- uint16_t capable_100base_t2_full:1;
- uint16_t capable_100base_t2_half:1;
- uint16_t capable_extended_status:1;
- uint16_t capable_unidirectional:1;
- uint16_t capable_mf_preamble_suppression:1;
- uint16_t autoneg_complete:1;
- uint16_t remote_fault:1;
- uint16_t capable_autoneg:1;
- uint16_t link_status:1;
- uint16_t jabber_detect:1;
- uint16_t capable_extended_registers:1;
-
- } s;
-} cvmx_mdio_phy_reg_status_t;
-
-/**
- * PHY register 2 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID1 2
-typedef union {
- uint16_t u16;
- struct {
- uint16_t oui_bits_3_18;
- } s;
-} cvmx_mdio_phy_reg_id1_t;
-
-/**
- * PHY register 3 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_ID2 3
-typedef union {
- uint16_t u16;
- struct {
- uint16_t oui_bits_19_24:6;
- uint16_t model:6;
- uint16_t revision:4;
- } s;
-} cvmx_mdio_phy_reg_id2_t;
-
-/**
- * PHY register 4 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_ADVER 4
-typedef union {
- uint16_t u16;
- struct {
- uint16_t next_page:1;
- uint16_t reserved_14:1;
- uint16_t remote_fault:1;
- uint16_t reserved_12:1;
- uint16_t asymmetric_pause:1;
- uint16_t pause:1;
- uint16_t advert_100base_t4:1;
- uint16_t advert_100base_tx_full:1;
- uint16_t advert_100base_tx_half:1;
- uint16_t advert_10base_tx_full:1;
- uint16_t advert_10base_tx_half:1;
- uint16_t selector:5;
- } s;
-} cvmx_mdio_phy_reg_autoneg_adver_t;
-
-/**
- * PHY register 5 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_LINK_PARTNER_ABILITY 5
-typedef union {
- uint16_t u16;
- struct {
- uint16_t next_page:1;
- uint16_t ack:1;
- uint16_t remote_fault:1;
- uint16_t reserved_12:1;
- uint16_t asymmetric_pause:1;
- uint16_t pause:1;
- uint16_t advert_100base_t4:1;
- uint16_t advert_100base_tx_full:1;
- uint16_t advert_100base_tx_half:1;
- uint16_t advert_10base_tx_full:1;
- uint16_t advert_10base_tx_half:1;
- uint16_t selector:5;
- } s;
-} cvmx_mdio_phy_reg_link_partner_ability_t;
-
-/**
- * PHY register 6 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_AUTONEG_EXPANSION 6
-typedef union {
- uint16_t u16;
- struct {
- uint16_t reserved_5_15:11;
- uint16_t parallel_detection_fault:1;
- uint16_t link_partner_next_page_capable:1;
- uint16_t local_next_page_capable:1;
- uint16_t page_received:1;
- uint16_t link_partner_autoneg_capable:1;
-
- } s;
-} cvmx_mdio_phy_reg_autoneg_expansion_t;
-
-/**
- * PHY register 9 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_CONTROL_1000 9
-typedef union {
- uint16_t u16;
- struct {
- uint16_t test_mode:3;
- uint16_t manual_master_slave:1;
- uint16_t master:1;
- uint16_t port_type:1;
- uint16_t advert_1000base_t_full:1;
- uint16_t advert_1000base_t_half:1;
- uint16_t reserved_0_7:8;
- } s;
-} cvmx_mdio_phy_reg_control_1000_t;
-
-/**
- * PHY register 10 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_STATUS_1000 10
-typedef union {
- uint16_t u16;
- struct {
- uint16_t master_slave_fault:1;
- uint16_t is_master:1;
- uint16_t local_receiver_ok:1;
- uint16_t remote_receiver_ok:1;
- uint16_t remote_capable_1000base_t_full:1;
- uint16_t remote_capable_1000base_t_half:1;
- uint16_t reserved_8_9:2;
- uint16_t idle_error_count:8;
- } s;
-} cvmx_mdio_phy_reg_status_1000_t;
-
-/**
- * PHY register 15 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_EXTENDED_STATUS 15
-typedef union {
- uint16_t u16;
- struct {
- uint16_t capable_1000base_x_full:1;
- uint16_t capable_1000base_x_half:1;
- uint16_t capable_1000base_t_full:1;
- uint16_t capable_1000base_t_half:1;
- uint16_t reserved_0_11:12;
- } s;
-} cvmx_mdio_phy_reg_extended_status_t;
-
-/**
- * PHY register 13 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_CONTROL 13
-typedef union {
- uint16_t u16;
- struct {
- uint16_t function:2;
- uint16_t reserved_5_13:9;
- uint16_t devad:5;
- } s;
-} cvmx_mdio_phy_reg_mmd_control_t;
-
-/**
- * PHY register 14 from the 802.3 spec
- */
-#define CVMX_MDIO_PHY_REG_MMD_ADDRESS_DATA 14
-typedef union {
- uint16_t u16;
- struct {
- uint16_t address_data:16;
- } s;
-} cvmx_mdio_phy_reg_mmd_address_data_t;
-
-/* Operating request encodings. */
-#define MDIO_CLAUSE_22_WRITE 0
-#define MDIO_CLAUSE_22_READ 1
-
-#define MDIO_CLAUSE_45_ADDRESS 0
-#define MDIO_CLAUSE_45_WRITE 1
-#define MDIO_CLAUSE_45_READ_INC 2
-#define MDIO_CLAUSE_45_READ 3
-
-/* MMD identifiers, mostly for accessing devices within XENPAK modules. */
-#define CVMX_MMD_DEVICE_PMA_PMD 1
-#define CVMX_MMD_DEVICE_WIS 2
-#define CVMX_MMD_DEVICE_PCS 3
-#define CVMX_MMD_DEVICE_PHY_XS 4
-#define CVMX_MMD_DEVICE_DTS_XS 5
-#define CVMX_MMD_DEVICE_TC 6
-#define CVMX_MMD_DEVICE_CL22_EXT 29
-#define CVMX_MMD_DEVICE_VENDOR_1 30
-#define CVMX_MMD_DEVICE_VENDOR_2 31
-
-/* Helper function to put MDIO interface into clause 45 mode */
-static inline void __cvmx_mdio_set_clause45_mode(int bus_id)
-{
- union cvmx_smix_clk smi_clk;
- /* Put bus into clause 45 mode */
- smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
- smi_clk.s.mode = 1;
- smi_clk.s.preamble = 1;
- cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/* Helper function to put MDIO interface into clause 22 mode */
-static inline void __cvmx_mdio_set_clause22_mode(int bus_id)
-{
- union cvmx_smix_clk smi_clk;
- /* Put bus into clause 22 mode */
- smi_clk.u64 = cvmx_read_csr(CVMX_SMIX_CLK(bus_id));
- smi_clk.s.mode = 0;
- cvmx_write_csr(CVMX_SMIX_CLK(bus_id), smi_clk.u64);
-}
-
-/**
- * Perform an MII read. This function is used to read PHY
- * registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-static inline int cvmx_mdio_read(int bus_id, int phy_id, int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
- int timeout = 1000;
-
- if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- __cvmx_mdio_set_clause22_mode(bus_id);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
- } while (smi_rd.s.pending && timeout--);
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else
- return -1;
-}
-
-/**
- * Perform an MII write. This function is used to write PHY
- * registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @location: Register location to write
- * @val: Value to write
- *
- * Returns -1 on error
- * 0 on success
- */
-static inline int cvmx_mdio_write(int bus_id, int phy_id, int location, int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- __cvmx_mdio_set_clause22_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = location;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- return 0;
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII read. This function is used to
- * read PHY registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @device: MDIO Managable Device (MMD) id
- * @location: Register location to read
- *
- * Returns Result from the read or -1 on failure
- */
-
-static inline int cvmx_mdio_45_read(int bus_id, int phy_id, int device,
- int location)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_rd_dat smi_rd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- return -1;
-
- __cvmx_mdio_set_clause45_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = location;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0) {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(address)\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_rd.u64 = cvmx_read_csr(CVMX_SMIX_RD_DAT(bus_id));
- } while (smi_rd.s.pending && --timeout);
-
- if (timeout <= 0) {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d TIME OUT(data)\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-
- if (smi_rd.s.val)
- return smi_rd.s.dat;
- else {
- cvmx_dprintf("cvmx_mdio_45_read: bus_id %d phy_id %2d "
- "device %2d register %2d INVALID READ\n",
- bus_id, phy_id, device, location);
- return -1;
- }
-}
-
-/**
- * Perform an IEEE 802.3 clause 45 MII write. This function is used to
- * write PHY registers controlling auto negotiation.
- *
- * @bus_id: MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
- * support multiple busses.
- * @phy_id: The MII phy id
- * @device: MDIO Managable Device (MMD) id
- * @location: Register location to write
- * @val: Value to write
- *
- * Returns -1 on error
- * 0 on success
- */
-static inline int cvmx_mdio_45_write(int bus_id, int phy_id, int device,
- int location, int val)
-{
- union cvmx_smix_cmd smi_cmd;
- union cvmx_smix_wr_dat smi_wr;
- int timeout = 1000;
-
- if (!octeon_has_feature(OCTEON_FEATURE_MDIO_CLAUSE_45))
- return -1;
-
- __cvmx_mdio_set_clause45_mode(bus_id);
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = location;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- smi_wr.u64 = 0;
- smi_wr.s.dat = val;
- cvmx_write_csr(CVMX_SMIX_WR_DAT(bus_id), smi_wr.u64);
-
- smi_cmd.u64 = 0;
- smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
- smi_cmd.s.phy_adr = phy_id;
- smi_cmd.s.reg_adr = device;
- cvmx_write_csr(CVMX_SMIX_CMD(bus_id), smi_cmd.u64);
-
- do {
- cvmx_wait(1000);
- smi_wr.u64 = cvmx_read_csr(CVMX_SMIX_WR_DAT(bus_id));
- } while (smi_wr.s.pending && --timeout);
- if (timeout <= 0)
- return -1;
-
- return 0;
-}
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-packet.h b/drivers/staging/octeon/cvmx-packet.h
deleted file mode 100644
index 62ffe78a..0000000
--- a/drivers/staging/octeon/cvmx-packet.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Packet buffer defines.
- */
-
-#ifndef __CVMX_PACKET_H__
-#define __CVMX_PACKET_H__
-
-/**
- * This structure defines a buffer pointer on Octeon
- */
-union cvmx_buf_ptr {
- void *ptr;
- uint64_t u64;
- struct {
- /*
- * if set, invert the "free" pick of the overall
- * packet. HW always sets this bit to 0 on inbound
- * packet
- */
- uint64_t i:1;
- /*
- * Indicates the amount to back up to get to the
- * buffer start in cache lines. In most cases this is
- * less than one complete cache line, so the value is
- * zero.
- */
- uint64_t back:4;
- /* The pool that the buffer came from / goes to */
- uint64_t pool:3;
- /* The size of the segment pointed to by addr (in bytes) */
- uint64_t size:16;
- /* Pointer to the first byte of the data, NOT buffer */
- uint64_t addr:40;
- } s;
-};
-
-#endif /* __CVMX_PACKET_H__ */
diff --git a/drivers/staging/octeon/cvmx-pcsx-defs.h b/drivers/staging/octeon/cvmx-pcsx-defs.h
deleted file mode 100644
index d45952d..0000000
--- a/drivers/staging/octeon/cvmx-pcsx-defs.h
+++ /dev/null
@@ -1,370 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCSX_DEFS_H__
-#define __CVMX_PCSX_DEFS_H__
-
-#define CVMX_PCSX_ANX_ADV_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001010ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_ANX_EXT_ST_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001028ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_ANX_LP_ABIL_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001018ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_ANX_RESULTS_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001020ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_INTX_EN_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001088ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_INTX_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001080ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_LINKX_TIMER_COUNT_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001040ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_LOG_ANLX_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001090ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_MISCX_CTL_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001078ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_MRX_CONTROL_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001000ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_MRX_STATUS_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001008ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_RXX_STATES_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001058ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_RXX_SYNC_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001050ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_SGMX_AN_ADV_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001068ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_SGMX_LP_ADV_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001070ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_TXX_STATES_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001060ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSX_TX_RXX_POLARITY_REG(offset, block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0001048ull + (((offset) & 3) * 1024) + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_pcsx_anx_adv_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_adv_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t np:1;
- uint64_t reserved_14_14:1;
- uint64_t rem_flt:2;
- uint64_t reserved_9_11:3;
- uint64_t pause:2;
- uint64_t hfd:1;
- uint64_t fd:1;
- uint64_t reserved_0_4:5;
- } s;
- struct cvmx_pcsx_anx_adv_reg_s cn52xx;
- struct cvmx_pcsx_anx_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_adv_reg_s cn56xx;
- struct cvmx_pcsx_anx_adv_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_anx_ext_st_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_ext_st_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t thou_xfd:1;
- uint64_t thou_xhd:1;
- uint64_t thou_tfd:1;
- uint64_t thou_thd:1;
- uint64_t reserved_0_11:12;
- } s;
- struct cvmx_pcsx_anx_ext_st_reg_s cn52xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_ext_st_reg_s cn56xx;
- struct cvmx_pcsx_anx_ext_st_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_anx_lp_abil_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_lp_abil_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t np:1;
- uint64_t ack:1;
- uint64_t rem_flt:2;
- uint64_t reserved_9_11:3;
- uint64_t pause:2;
- uint64_t hfd:1;
- uint64_t fd:1;
- uint64_t reserved_0_4:5;
- } s;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn52xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn56xx;
- struct cvmx_pcsx_anx_lp_abil_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_anx_results_reg {
- uint64_t u64;
- struct cvmx_pcsx_anx_results_reg_s {
- uint64_t reserved_7_63:57;
- uint64_t pause:2;
- uint64_t spd:2;
- uint64_t an_cpt:1;
- uint64_t dup:1;
- uint64_t link_ok:1;
- } s;
- struct cvmx_pcsx_anx_results_reg_s cn52xx;
- struct cvmx_pcsx_anx_results_reg_s cn52xxp1;
- struct cvmx_pcsx_anx_results_reg_s cn56xx;
- struct cvmx_pcsx_anx_results_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_intx_en_reg {
- uint64_t u64;
- struct cvmx_pcsx_intx_en_reg_s {
- uint64_t reserved_12_63:52;
- uint64_t dup:1;
- uint64_t sync_bad_en:1;
- uint64_t an_bad_en:1;
- uint64_t rxlock_en:1;
- uint64_t rxbad_en:1;
- uint64_t rxerr_en:1;
- uint64_t txbad_en:1;
- uint64_t txfifo_en:1;
- uint64_t txfifu_en:1;
- uint64_t an_err_en:1;
- uint64_t xmit_en:1;
- uint64_t lnkspd_en:1;
- } s;
- struct cvmx_pcsx_intx_en_reg_s cn52xx;
- struct cvmx_pcsx_intx_en_reg_s cn52xxp1;
- struct cvmx_pcsx_intx_en_reg_s cn56xx;
- struct cvmx_pcsx_intx_en_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_intx_reg {
- uint64_t u64;
- struct cvmx_pcsx_intx_reg_s {
- uint64_t reserved_12_63:52;
- uint64_t dup:1;
- uint64_t sync_bad:1;
- uint64_t an_bad:1;
- uint64_t rxlock:1;
- uint64_t rxbad:1;
- uint64_t rxerr:1;
- uint64_t txbad:1;
- uint64_t txfifo:1;
- uint64_t txfifu:1;
- uint64_t an_err:1;
- uint64_t xmit:1;
- uint64_t lnkspd:1;
- } s;
- struct cvmx_pcsx_intx_reg_s cn52xx;
- struct cvmx_pcsx_intx_reg_s cn52xxp1;
- struct cvmx_pcsx_intx_reg_s cn56xx;
- struct cvmx_pcsx_intx_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_linkx_timer_count_reg {
- uint64_t u64;
- struct cvmx_pcsx_linkx_timer_count_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t count:16;
- } s;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn52xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn52xxp1;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn56xx;
- struct cvmx_pcsx_linkx_timer_count_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_log_anlx_reg {
- uint64_t u64;
- struct cvmx_pcsx_log_anlx_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t lafifovfl:1;
- uint64_t la_en:1;
- uint64_t pkt_sz:2;
- } s;
- struct cvmx_pcsx_log_anlx_reg_s cn52xx;
- struct cvmx_pcsx_log_anlx_reg_s cn52xxp1;
- struct cvmx_pcsx_log_anlx_reg_s cn56xx;
- struct cvmx_pcsx_log_anlx_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_miscx_ctl_reg {
- uint64_t u64;
- struct cvmx_pcsx_miscx_ctl_reg_s {
- uint64_t reserved_13_63:51;
- uint64_t sgmii:1;
- uint64_t gmxeno:1;
- uint64_t loopbck2:1;
- uint64_t mac_phy:1;
- uint64_t mode:1;
- uint64_t an_ovrd:1;
- uint64_t samp_pt:7;
- } s;
- struct cvmx_pcsx_miscx_ctl_reg_s cn52xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn52xxp1;
- struct cvmx_pcsx_miscx_ctl_reg_s cn56xx;
- struct cvmx_pcsx_miscx_ctl_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_mrx_control_reg {
- uint64_t u64;
- struct cvmx_pcsx_mrx_control_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t reset:1;
- uint64_t loopbck1:1;
- uint64_t spdlsb:1;
- uint64_t an_en:1;
- uint64_t pwr_dn:1;
- uint64_t reserved_10_10:1;
- uint64_t rst_an:1;
- uint64_t dup:1;
- uint64_t coltst:1;
- uint64_t spdmsb:1;
- uint64_t uni:1;
- uint64_t reserved_0_4:5;
- } s;
- struct cvmx_pcsx_mrx_control_reg_s cn52xx;
- struct cvmx_pcsx_mrx_control_reg_s cn52xxp1;
- struct cvmx_pcsx_mrx_control_reg_s cn56xx;
- struct cvmx_pcsx_mrx_control_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_mrx_status_reg {
- uint64_t u64;
- struct cvmx_pcsx_mrx_status_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t hun_t4:1;
- uint64_t hun_xfd:1;
- uint64_t hun_xhd:1;
- uint64_t ten_fd:1;
- uint64_t ten_hd:1;
- uint64_t hun_t2fd:1;
- uint64_t hun_t2hd:1;
- uint64_t ext_st:1;
- uint64_t reserved_7_7:1;
- uint64_t prb_sup:1;
- uint64_t an_cpt:1;
- uint64_t rm_flt:1;
- uint64_t an_abil:1;
- uint64_t lnk_st:1;
- uint64_t reserved_1_1:1;
- uint64_t extnd:1;
- } s;
- struct cvmx_pcsx_mrx_status_reg_s cn52xx;
- struct cvmx_pcsx_mrx_status_reg_s cn52xxp1;
- struct cvmx_pcsx_mrx_status_reg_s cn56xx;
- struct cvmx_pcsx_mrx_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_rxx_states_reg {
- uint64_t u64;
- struct cvmx_pcsx_rxx_states_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t rx_bad:1;
- uint64_t rx_st:5;
- uint64_t sync_bad:1;
- uint64_t sync:4;
- uint64_t an_bad:1;
- uint64_t an_st:4;
- } s;
- struct cvmx_pcsx_rxx_states_reg_s cn52xx;
- struct cvmx_pcsx_rxx_states_reg_s cn52xxp1;
- struct cvmx_pcsx_rxx_states_reg_s cn56xx;
- struct cvmx_pcsx_rxx_states_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_rxx_sync_reg {
- uint64_t u64;
- struct cvmx_pcsx_rxx_sync_reg_s {
- uint64_t reserved_2_63:62;
- uint64_t sync:1;
- uint64_t bit_lock:1;
- } s;
- struct cvmx_pcsx_rxx_sync_reg_s cn52xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn52xxp1;
- struct cvmx_pcsx_rxx_sync_reg_s cn56xx;
- struct cvmx_pcsx_rxx_sync_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_sgmx_an_adv_reg {
- uint64_t u64;
- struct cvmx_pcsx_sgmx_an_adv_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t link:1;
- uint64_t ack:1;
- uint64_t reserved_13_13:1;
- uint64_t dup:1;
- uint64_t speed:2;
- uint64_t reserved_1_9:9;
- uint64_t one:1;
- } s;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xx;
- struct cvmx_pcsx_sgmx_an_adv_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_sgmx_lp_adv_reg {
- uint64_t u64;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t link:1;
- uint64_t reserved_13_14:2;
- uint64_t dup:1;
- uint64_t speed:2;
- uint64_t reserved_1_9:9;
- uint64_t one:1;
- } s;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn52xxp1;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xx;
- struct cvmx_pcsx_sgmx_lp_adv_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_txx_states_reg {
- uint64_t u64;
- struct cvmx_pcsx_txx_states_reg_s {
- uint64_t reserved_7_63:57;
- uint64_t xmit:2;
- uint64_t tx_bad:1;
- uint64_t ord_st:4;
- } s;
- struct cvmx_pcsx_txx_states_reg_s cn52xx;
- struct cvmx_pcsx_txx_states_reg_s cn52xxp1;
- struct cvmx_pcsx_txx_states_reg_s cn56xx;
- struct cvmx_pcsx_txx_states_reg_s cn56xxp1;
-};
-
-union cvmx_pcsx_tx_rxx_polarity_reg {
- uint64_t u64;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t rxovrd:1;
- uint64_t autorxpl:1;
- uint64_t rxplrt:1;
- uint64_t txplrt:1;
- } s;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn52xxp1;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xx;
- struct cvmx_pcsx_tx_rxx_polarity_reg_s cn56xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pcsxx-defs.h b/drivers/staging/octeon/cvmx-pcsxx-defs.h
deleted file mode 100644
index 55d120f..0000000
--- a/drivers/staging/octeon/cvmx-pcsxx-defs.h
+++ /dev/null
@@ -1,316 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PCSXX_DEFS_H__
-#define __CVMX_PCSXX_DEFS_H__
-
-#define CVMX_PCSXX_10GBX_STATUS_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000828ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_BIST_STATUS_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000870ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_BIT_LOCK_STATUS_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000850ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_CONTROL1_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000800ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_CONTROL2_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000818ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_INT_EN_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000860ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000858ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_LOG_ANL_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000868ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_MISC_CTL_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000848ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_RX_SYNC_STATES_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000838ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_SPD_ABIL_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000810ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_STATUS1_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000808ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_STATUS2_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000820ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_TX_RX_POLARITY_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000840ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_PCSXX_TX_RX_STATES_REG(block_id) \
- CVMX_ADD_IO_SEG(0x00011800B0000830ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_pcsxx_10gbx_status_reg {
- uint64_t u64;
- struct cvmx_pcsxx_10gbx_status_reg_s {
- uint64_t reserved_13_63:51;
- uint64_t alignd:1;
- uint64_t pattst:1;
- uint64_t reserved_4_10:7;
- uint64_t l3sync:1;
- uint64_t l2sync:1;
- uint64_t l1sync:1;
- uint64_t l0sync:1;
- } s;
- struct cvmx_pcsxx_10gbx_status_reg_s cn52xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_10gbx_status_reg_s cn56xx;
- struct cvmx_pcsxx_10gbx_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_bist_status_reg {
- uint64_t u64;
- struct cvmx_pcsxx_bist_status_reg_s {
- uint64_t reserved_1_63:63;
- uint64_t bist_status:1;
- } s;
- struct cvmx_pcsxx_bist_status_reg_s cn52xx;
- struct cvmx_pcsxx_bist_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_bist_status_reg_s cn56xx;
- struct cvmx_pcsxx_bist_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_bit_lock_status_reg {
- uint64_t u64;
- struct cvmx_pcsxx_bit_lock_status_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t bitlck3:1;
- uint64_t bitlck2:1;
- uint64_t bitlck1:1;
- uint64_t bitlck0:1;
- } s;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn52xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn52xxp1;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn56xx;
- struct cvmx_pcsxx_bit_lock_status_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_control1_reg {
- uint64_t u64;
- struct cvmx_pcsxx_control1_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t reset:1;
- uint64_t loopbck1:1;
- uint64_t spdsel1:1;
- uint64_t reserved_12_12:1;
- uint64_t lo_pwr:1;
- uint64_t reserved_7_10:4;
- uint64_t spdsel0:1;
- uint64_t spd:4;
- uint64_t reserved_0_1:2;
- } s;
- struct cvmx_pcsxx_control1_reg_s cn52xx;
- struct cvmx_pcsxx_control1_reg_s cn52xxp1;
- struct cvmx_pcsxx_control1_reg_s cn56xx;
- struct cvmx_pcsxx_control1_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_control2_reg {
- uint64_t u64;
- struct cvmx_pcsxx_control2_reg_s {
- uint64_t reserved_2_63:62;
- uint64_t type:2;
- } s;
- struct cvmx_pcsxx_control2_reg_s cn52xx;
- struct cvmx_pcsxx_control2_reg_s cn52xxp1;
- struct cvmx_pcsxx_control2_reg_s cn56xx;
- struct cvmx_pcsxx_control2_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_int_en_reg {
- uint64_t u64;
- struct cvmx_pcsxx_int_en_reg_s {
- uint64_t reserved_6_63:58;
- uint64_t algnlos_en:1;
- uint64_t synlos_en:1;
- uint64_t bitlckls_en:1;
- uint64_t rxsynbad_en:1;
- uint64_t rxbad_en:1;
- uint64_t txflt_en:1;
- } s;
- struct cvmx_pcsxx_int_en_reg_s cn52xx;
- struct cvmx_pcsxx_int_en_reg_s cn52xxp1;
- struct cvmx_pcsxx_int_en_reg_s cn56xx;
- struct cvmx_pcsxx_int_en_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_int_reg {
- uint64_t u64;
- struct cvmx_pcsxx_int_reg_s {
- uint64_t reserved_6_63:58;
- uint64_t algnlos:1;
- uint64_t synlos:1;
- uint64_t bitlckls:1;
- uint64_t rxsynbad:1;
- uint64_t rxbad:1;
- uint64_t txflt:1;
- } s;
- struct cvmx_pcsxx_int_reg_s cn52xx;
- struct cvmx_pcsxx_int_reg_s cn52xxp1;
- struct cvmx_pcsxx_int_reg_s cn56xx;
- struct cvmx_pcsxx_int_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_log_anl_reg {
- uint64_t u64;
- struct cvmx_pcsxx_log_anl_reg_s {
- uint64_t reserved_7_63:57;
- uint64_t enc_mode:1;
- uint64_t drop_ln:2;
- uint64_t lafifovfl:1;
- uint64_t la_en:1;
- uint64_t pkt_sz:2;
- } s;
- struct cvmx_pcsxx_log_anl_reg_s cn52xx;
- struct cvmx_pcsxx_log_anl_reg_s cn52xxp1;
- struct cvmx_pcsxx_log_anl_reg_s cn56xx;
- struct cvmx_pcsxx_log_anl_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_misc_ctl_reg {
- uint64_t u64;
- struct cvmx_pcsxx_misc_ctl_reg_s {
- uint64_t reserved_4_63:60;
- uint64_t tx_swap:1;
- uint64_t rx_swap:1;
- uint64_t xaui:1;
- uint64_t gmxeno:1;
- } s;
- struct cvmx_pcsxx_misc_ctl_reg_s cn52xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn52xxp1;
- struct cvmx_pcsxx_misc_ctl_reg_s cn56xx;
- struct cvmx_pcsxx_misc_ctl_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_rx_sync_states_reg {
- uint64_t u64;
- struct cvmx_pcsxx_rx_sync_states_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t sync3st:4;
- uint64_t sync2st:4;
- uint64_t sync1st:4;
- uint64_t sync0st:4;
- } s;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn52xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn52xxp1;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn56xx;
- struct cvmx_pcsxx_rx_sync_states_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_spd_abil_reg {
- uint64_t u64;
- struct cvmx_pcsxx_spd_abil_reg_s {
- uint64_t reserved_2_63:62;
- uint64_t tenpasst:1;
- uint64_t tengb:1;
- } s;
- struct cvmx_pcsxx_spd_abil_reg_s cn52xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn52xxp1;
- struct cvmx_pcsxx_spd_abil_reg_s cn56xx;
- struct cvmx_pcsxx_spd_abil_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_status1_reg {
- uint64_t u64;
- struct cvmx_pcsxx_status1_reg_s {
- uint64_t reserved_8_63:56;
- uint64_t flt:1;
- uint64_t reserved_3_6:4;
- uint64_t rcv_lnk:1;
- uint64_t lpable:1;
- uint64_t reserved_0_0:1;
- } s;
- struct cvmx_pcsxx_status1_reg_s cn52xx;
- struct cvmx_pcsxx_status1_reg_s cn52xxp1;
- struct cvmx_pcsxx_status1_reg_s cn56xx;
- struct cvmx_pcsxx_status1_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_status2_reg {
- uint64_t u64;
- struct cvmx_pcsxx_status2_reg_s {
- uint64_t reserved_16_63:48;
- uint64_t dev:2;
- uint64_t reserved_12_13:2;
- uint64_t xmtflt:1;
- uint64_t rcvflt:1;
- uint64_t reserved_3_9:7;
- uint64_t tengb_w:1;
- uint64_t tengb_x:1;
- uint64_t tengb_r:1;
- } s;
- struct cvmx_pcsxx_status2_reg_s cn52xx;
- struct cvmx_pcsxx_status2_reg_s cn52xxp1;
- struct cvmx_pcsxx_status2_reg_s cn56xx;
- struct cvmx_pcsxx_status2_reg_s cn56xxp1;
-};
-
-union cvmx_pcsxx_tx_rx_polarity_reg {
- uint64_t u64;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s {
- uint64_t reserved_10_63:54;
- uint64_t xor_rxplrt:4;
- uint64_t xor_txplrt:4;
- uint64_t rxplrt:1;
- uint64_t txplrt:1;
- } s;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn52xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 {
- uint64_t reserved_2_63:62;
- uint64_t rxplrt:1;
- uint64_t txplrt:1;
- } cn52xxp1;
- struct cvmx_pcsxx_tx_rx_polarity_reg_s cn56xx;
- struct cvmx_pcsxx_tx_rx_polarity_reg_cn52xxp1 cn56xxp1;
-};
-
-union cvmx_pcsxx_tx_rx_states_reg {
- uint64_t u64;
- struct cvmx_pcsxx_tx_rx_states_reg_s {
- uint64_t reserved_14_63:50;
- uint64_t term_err:1;
- uint64_t syn3bad:1;
- uint64_t syn2bad:1;
- uint64_t syn1bad:1;
- uint64_t syn0bad:1;
- uint64_t rxbad:1;
- uint64_t algn_st:3;
- uint64_t rx_st:2;
- uint64_t tx_st:3;
- } s;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn52xx;
- struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 {
- uint64_t reserved_13_63:51;
- uint64_t syn3bad:1;
- uint64_t syn2bad:1;
- uint64_t syn1bad:1;
- uint64_t syn0bad:1;
- uint64_t rxbad:1;
- uint64_t algn_st:3;
- uint64_t rx_st:2;
- uint64_t tx_st:3;
- } cn52xxp1;
- struct cvmx_pcsxx_tx_rx_states_reg_s cn56xx;
- struct cvmx_pcsxx_tx_rx_states_reg_cn52xxp1 cn56xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pip-defs.h b/drivers/staging/octeon/cvmx-pip-defs.h
deleted file mode 100644
index 5a36910..0000000
--- a/drivers/staging/octeon/cvmx-pip-defs.h
+++ /dev/null
@@ -1,1267 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PIP_DEFS_H__
-#define __CVMX_PIP_DEFS_H__
-
-/*
- * Enumeration representing the amount of packet processing
- * and validation performed by the input hardware.
- */
-enum cvmx_pip_port_parse_mode {
- /*
- * Packet input doesn't perform any processing of the input
- * packet.
- */
- CVMX_PIP_PORT_CFG_MODE_NONE = 0ull,
- /*
- * Full packet processing is performed with pointer starting
- * at the L2 (ethernet MAC) header.
- */
- CVMX_PIP_PORT_CFG_MODE_SKIPL2 = 1ull,
- /*
- * Input packets are assumed to be IP. Results from non IP
- * packets is undefined. Pointers reference the beginning of
- * the IP header.
- */
- CVMX_PIP_PORT_CFG_MODE_SKIPIP = 2ull
-};
-
-#define CVMX_PIP_BCK_PRS \
- CVMX_ADD_IO_SEG(0x00011800A0000038ull)
-#define CVMX_PIP_BIST_STATUS \
- CVMX_ADD_IO_SEG(0x00011800A0000000ull)
-#define CVMX_PIP_CRC_CTLX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000040ull + (((offset) & 1) * 8))
-#define CVMX_PIP_CRC_IVX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000050ull + (((offset) & 1) * 8))
-#define CVMX_PIP_DEC_IPSECX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000080ull + (((offset) & 3) * 8))
-#define CVMX_PIP_DSA_SRC_GRP \
- CVMX_ADD_IO_SEG(0x00011800A0000190ull)
-#define CVMX_PIP_DSA_VID_GRP \
- CVMX_ADD_IO_SEG(0x00011800A0000198ull)
-#define CVMX_PIP_FRM_LEN_CHKX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000180ull + (((offset) & 1) * 8))
-#define CVMX_PIP_GBL_CFG \
- CVMX_ADD_IO_SEG(0x00011800A0000028ull)
-#define CVMX_PIP_GBL_CTL \
- CVMX_ADD_IO_SEG(0x00011800A0000020ull)
-#define CVMX_PIP_HG_PRI_QOS \
- CVMX_ADD_IO_SEG(0x00011800A00001A0ull)
-#define CVMX_PIP_INT_EN \
- CVMX_ADD_IO_SEG(0x00011800A0000010ull)
-#define CVMX_PIP_INT_REG \
- CVMX_ADD_IO_SEG(0x00011800A0000008ull)
-#define CVMX_PIP_IP_OFFSET \
- CVMX_ADD_IO_SEG(0x00011800A0000060ull)
-#define CVMX_PIP_PRT_CFGX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000200ull + (((offset) & 63) * 8))
-#define CVMX_PIP_PRT_TAGX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000400ull + (((offset) & 63) * 8))
-#define CVMX_PIP_QOS_DIFFX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000600ull + (((offset) & 63) * 8))
-#define CVMX_PIP_QOS_VLANX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A00000C0ull + (((offset) & 7) * 8))
-#define CVMX_PIP_QOS_WATCHX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000100ull + (((offset) & 7) * 8))
-#define CVMX_PIP_RAW_WORD \
- CVMX_ADD_IO_SEG(0x00011800A00000B0ull)
-#define CVMX_PIP_SFT_RST \
- CVMX_ADD_IO_SEG(0x00011800A0000030ull)
-#define CVMX_PIP_STAT0_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000800ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT1_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000808ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT2_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000810ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT3_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000818ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT4_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000820ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT5_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000828ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT6_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000830ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT7_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000838ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT8_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000840ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT9_PRTX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0000848ull + (((offset) & 63) * 80))
-#define CVMX_PIP_STAT_CTL \
- CVMX_ADD_IO_SEG(0x00011800A0000018ull)
-#define CVMX_PIP_STAT_INB_ERRSX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001A10ull + (((offset) & 63) * 32))
-#define CVMX_PIP_STAT_INB_OCTSX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001A08ull + (((offset) & 63) * 32))
-#define CVMX_PIP_STAT_INB_PKTSX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001A00ull + (((offset) & 63) * 32))
-#define CVMX_PIP_TAG_INCX(offset) \
- CVMX_ADD_IO_SEG(0x00011800A0001800ull + (((offset) & 63) * 8))
-#define CVMX_PIP_TAG_MASK \
- CVMX_ADD_IO_SEG(0x00011800A0000070ull)
-#define CVMX_PIP_TAG_SECRET \
- CVMX_ADD_IO_SEG(0x00011800A0000068ull)
-#define CVMX_PIP_TODO_ENTRY \
- CVMX_ADD_IO_SEG(0x00011800A0000078ull)
-
-union cvmx_pip_bck_prs {
- uint64_t u64;
- struct cvmx_pip_bck_prs_s {
- uint64_t bckprs:1;
- uint64_t reserved_13_62:50;
- uint64_t hiwater:5;
- uint64_t reserved_5_7:3;
- uint64_t lowater:5;
- } s;
- struct cvmx_pip_bck_prs_s cn38xx;
- struct cvmx_pip_bck_prs_s cn38xxp2;
- struct cvmx_pip_bck_prs_s cn56xx;
- struct cvmx_pip_bck_prs_s cn56xxp1;
- struct cvmx_pip_bck_prs_s cn58xx;
- struct cvmx_pip_bck_prs_s cn58xxp1;
-};
-
-union cvmx_pip_bist_status {
- uint64_t u64;
- struct cvmx_pip_bist_status_s {
- uint64_t reserved_18_63:46;
- uint64_t bist:18;
- } s;
- struct cvmx_pip_bist_status_s cn30xx;
- struct cvmx_pip_bist_status_s cn31xx;
- struct cvmx_pip_bist_status_s cn38xx;
- struct cvmx_pip_bist_status_s cn38xxp2;
- struct cvmx_pip_bist_status_cn50xx {
- uint64_t reserved_17_63:47;
- uint64_t bist:17;
- } cn50xx;
- struct cvmx_pip_bist_status_s cn52xx;
- struct cvmx_pip_bist_status_s cn52xxp1;
- struct cvmx_pip_bist_status_s cn56xx;
- struct cvmx_pip_bist_status_s cn56xxp1;
- struct cvmx_pip_bist_status_s cn58xx;
- struct cvmx_pip_bist_status_s cn58xxp1;
-};
-
-union cvmx_pip_crc_ctlx {
- uint64_t u64;
- struct cvmx_pip_crc_ctlx_s {
- uint64_t reserved_2_63:62;
- uint64_t invres:1;
- uint64_t reflect:1;
- } s;
- struct cvmx_pip_crc_ctlx_s cn38xx;
- struct cvmx_pip_crc_ctlx_s cn38xxp2;
- struct cvmx_pip_crc_ctlx_s cn58xx;
- struct cvmx_pip_crc_ctlx_s cn58xxp1;
-};
-
-union cvmx_pip_crc_ivx {
- uint64_t u64;
- struct cvmx_pip_crc_ivx_s {
- uint64_t reserved_32_63:32;
- uint64_t iv:32;
- } s;
- struct cvmx_pip_crc_ivx_s cn38xx;
- struct cvmx_pip_crc_ivx_s cn38xxp2;
- struct cvmx_pip_crc_ivx_s cn58xx;
- struct cvmx_pip_crc_ivx_s cn58xxp1;
-};
-
-union cvmx_pip_dec_ipsecx {
- uint64_t u64;
- struct cvmx_pip_dec_ipsecx_s {
- uint64_t reserved_18_63:46;
- uint64_t tcp:1;
- uint64_t udp:1;
- uint64_t dprt:16;
- } s;
- struct cvmx_pip_dec_ipsecx_s cn30xx;
- struct cvmx_pip_dec_ipsecx_s cn31xx;
- struct cvmx_pip_dec_ipsecx_s cn38xx;
- struct cvmx_pip_dec_ipsecx_s cn38xxp2;
- struct cvmx_pip_dec_ipsecx_s cn50xx;
- struct cvmx_pip_dec_ipsecx_s cn52xx;
- struct cvmx_pip_dec_ipsecx_s cn52xxp1;
- struct cvmx_pip_dec_ipsecx_s cn56xx;
- struct cvmx_pip_dec_ipsecx_s cn56xxp1;
- struct cvmx_pip_dec_ipsecx_s cn58xx;
- struct cvmx_pip_dec_ipsecx_s cn58xxp1;
-};
-
-union cvmx_pip_dsa_src_grp {
- uint64_t u64;
- struct cvmx_pip_dsa_src_grp_s {
- uint64_t map15:4;
- uint64_t map14:4;
- uint64_t map13:4;
- uint64_t map12:4;
- uint64_t map11:4;
- uint64_t map10:4;
- uint64_t map9:4;
- uint64_t map8:4;
- uint64_t map7:4;
- uint64_t map6:4;
- uint64_t map5:4;
- uint64_t map4:4;
- uint64_t map3:4;
- uint64_t map2:4;
- uint64_t map1:4;
- uint64_t map0:4;
- } s;
- struct cvmx_pip_dsa_src_grp_s cn52xx;
- struct cvmx_pip_dsa_src_grp_s cn52xxp1;
- struct cvmx_pip_dsa_src_grp_s cn56xx;
-};
-
-union cvmx_pip_dsa_vid_grp {
- uint64_t u64;
- struct cvmx_pip_dsa_vid_grp_s {
- uint64_t map15:4;
- uint64_t map14:4;
- uint64_t map13:4;
- uint64_t map12:4;
- uint64_t map11:4;
- uint64_t map10:4;
- uint64_t map9:4;
- uint64_t map8:4;
- uint64_t map7:4;
- uint64_t map6:4;
- uint64_t map5:4;
- uint64_t map4:4;
- uint64_t map3:4;
- uint64_t map2:4;
- uint64_t map1:4;
- uint64_t map0:4;
- } s;
- struct cvmx_pip_dsa_vid_grp_s cn52xx;
- struct cvmx_pip_dsa_vid_grp_s cn52xxp1;
- struct cvmx_pip_dsa_vid_grp_s cn56xx;
-};
-
-union cvmx_pip_frm_len_chkx {
- uint64_t u64;
- struct cvmx_pip_frm_len_chkx_s {
- uint64_t reserved_32_63:32;
- uint64_t maxlen:16;
- uint64_t minlen:16;
- } s;
- struct cvmx_pip_frm_len_chkx_s cn50xx;
- struct cvmx_pip_frm_len_chkx_s cn52xx;
- struct cvmx_pip_frm_len_chkx_s cn52xxp1;
- struct cvmx_pip_frm_len_chkx_s cn56xx;
- struct cvmx_pip_frm_len_chkx_s cn56xxp1;
-};
-
-union cvmx_pip_gbl_cfg {
- uint64_t u64;
- struct cvmx_pip_gbl_cfg_s {
- uint64_t reserved_19_63:45;
- uint64_t tag_syn:1;
- uint64_t ip6_udp:1;
- uint64_t max_l2:1;
- uint64_t reserved_11_15:5;
- uint64_t raw_shf:3;
- uint64_t reserved_3_7:5;
- uint64_t nip_shf:3;
- } s;
- struct cvmx_pip_gbl_cfg_s cn30xx;
- struct cvmx_pip_gbl_cfg_s cn31xx;
- struct cvmx_pip_gbl_cfg_s cn38xx;
- struct cvmx_pip_gbl_cfg_s cn38xxp2;
- struct cvmx_pip_gbl_cfg_s cn50xx;
- struct cvmx_pip_gbl_cfg_s cn52xx;
- struct cvmx_pip_gbl_cfg_s cn52xxp1;
- struct cvmx_pip_gbl_cfg_s cn56xx;
- struct cvmx_pip_gbl_cfg_s cn56xxp1;
- struct cvmx_pip_gbl_cfg_s cn58xx;
- struct cvmx_pip_gbl_cfg_s cn58xxp1;
-};
-
-union cvmx_pip_gbl_ctl {
- uint64_t u64;
- struct cvmx_pip_gbl_ctl_s {
- uint64_t reserved_27_63:37;
- uint64_t dsa_grp_tvid:1;
- uint64_t dsa_grp_scmd:1;
- uint64_t dsa_grp_sid:1;
- uint64_t reserved_21_23:3;
- uint64_t ring_en:1;
- uint64_t reserved_17_19:3;
- uint64_t ignrs:1;
- uint64_t vs_wqe:1;
- uint64_t vs_qos:1;
- uint64_t l2_mal:1;
- uint64_t tcp_flag:1;
- uint64_t l4_len:1;
- uint64_t l4_chk:1;
- uint64_t l4_prt:1;
- uint64_t l4_mal:1;
- uint64_t reserved_6_7:2;
- uint64_t ip6_eext:2;
- uint64_t ip4_opts:1;
- uint64_t ip_hop:1;
- uint64_t ip_mal:1;
- uint64_t ip_chk:1;
- } s;
- struct cvmx_pip_gbl_ctl_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t ignrs:1;
- uint64_t vs_wqe:1;
- uint64_t vs_qos:1;
- uint64_t l2_mal:1;
- uint64_t tcp_flag:1;
- uint64_t l4_len:1;
- uint64_t l4_chk:1;
- uint64_t l4_prt:1;
- uint64_t l4_mal:1;
- uint64_t reserved_6_7:2;
- uint64_t ip6_eext:2;
- uint64_t ip4_opts:1;
- uint64_t ip_hop:1;
- uint64_t ip_mal:1;
- uint64_t ip_chk:1;
- } cn30xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn31xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn38xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn38xxp2;
- struct cvmx_pip_gbl_ctl_cn30xx cn50xx;
- struct cvmx_pip_gbl_ctl_s cn52xx;
- struct cvmx_pip_gbl_ctl_s cn52xxp1;
- struct cvmx_pip_gbl_ctl_s cn56xx;
- struct cvmx_pip_gbl_ctl_cn56xxp1 {
- uint64_t reserved_21_63:43;
- uint64_t ring_en:1;
- uint64_t reserved_17_19:3;
- uint64_t ignrs:1;
- uint64_t vs_wqe:1;
- uint64_t vs_qos:1;
- uint64_t l2_mal:1;
- uint64_t tcp_flag:1;
- uint64_t l4_len:1;
- uint64_t l4_chk:1;
- uint64_t l4_prt:1;
- uint64_t l4_mal:1;
- uint64_t reserved_6_7:2;
- uint64_t ip6_eext:2;
- uint64_t ip4_opts:1;
- uint64_t ip_hop:1;
- uint64_t ip_mal:1;
- uint64_t ip_chk:1;
- } cn56xxp1;
- struct cvmx_pip_gbl_ctl_cn30xx cn58xx;
- struct cvmx_pip_gbl_ctl_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_hg_pri_qos {
- uint64_t u64;
- struct cvmx_pip_hg_pri_qos_s {
- uint64_t reserved_11_63:53;
- uint64_t qos:3;
- uint64_t reserved_6_7:2;
- uint64_t pri:6;
- } s;
- struct cvmx_pip_hg_pri_qos_s cn52xx;
- struct cvmx_pip_hg_pri_qos_s cn52xxp1;
- struct cvmx_pip_hg_pri_qos_s cn56xx;
-};
-
-union cvmx_pip_int_en {
- uint64_t u64;
- struct cvmx_pip_int_en_s {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } s;
- struct cvmx_pip_int_en_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn30xx;
- struct cvmx_pip_int_en_cn30xx cn31xx;
- struct cvmx_pip_int_en_cn30xx cn38xx;
- struct cvmx_pip_int_en_cn30xx cn38xxp2;
- struct cvmx_pip_int_en_cn50xx {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn50xx;
- struct cvmx_pip_int_en_cn52xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn52xx;
- struct cvmx_pip_int_en_cn52xx cn52xxp1;
- struct cvmx_pip_int_en_s cn56xx;
- struct cvmx_pip_int_en_cn56xxp1 {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn56xxp1;
- struct cvmx_pip_int_en_cn58xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t reserved_9_11:3;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn58xx;
- struct cvmx_pip_int_en_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_int_reg {
- uint64_t u64;
- struct cvmx_pip_int_reg_s {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } s;
- struct cvmx_pip_int_reg_cn30xx {
- uint64_t reserved_9_63:55;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn30xx;
- struct cvmx_pip_int_reg_cn30xx cn31xx;
- struct cvmx_pip_int_reg_cn30xx cn38xx;
- struct cvmx_pip_int_reg_cn30xx cn38xxp2;
- struct cvmx_pip_int_reg_cn50xx {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn50xx;
- struct cvmx_pip_int_reg_cn52xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t reserved_1_1:1;
- uint64_t pktdrp:1;
- } cn52xx;
- struct cvmx_pip_int_reg_cn52xx cn52xxp1;
- struct cvmx_pip_int_reg_s cn56xx;
- struct cvmx_pip_int_reg_cn56xxp1 {
- uint64_t reserved_12_63:52;
- uint64_t lenerr:1;
- uint64_t maxerr:1;
- uint64_t minerr:1;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn56xxp1;
- struct cvmx_pip_int_reg_cn58xx {
- uint64_t reserved_13_63:51;
- uint64_t punyerr:1;
- uint64_t reserved_9_11:3;
- uint64_t beperr:1;
- uint64_t feperr:1;
- uint64_t todoovr:1;
- uint64_t skprunt:1;
- uint64_t badtag:1;
- uint64_t prtnxa:1;
- uint64_t bckprs:1;
- uint64_t crcerr:1;
- uint64_t pktdrp:1;
- } cn58xx;
- struct cvmx_pip_int_reg_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_ip_offset {
- uint64_t u64;
- struct cvmx_pip_ip_offset_s {
- uint64_t reserved_3_63:61;
- uint64_t offset:3;
- } s;
- struct cvmx_pip_ip_offset_s cn30xx;
- struct cvmx_pip_ip_offset_s cn31xx;
- struct cvmx_pip_ip_offset_s cn38xx;
- struct cvmx_pip_ip_offset_s cn38xxp2;
- struct cvmx_pip_ip_offset_s cn50xx;
- struct cvmx_pip_ip_offset_s cn52xx;
- struct cvmx_pip_ip_offset_s cn52xxp1;
- struct cvmx_pip_ip_offset_s cn56xx;
- struct cvmx_pip_ip_offset_s cn56xxp1;
- struct cvmx_pip_ip_offset_s cn58xx;
- struct cvmx_pip_ip_offset_s cn58xxp1;
-};
-
-union cvmx_pip_prt_cfgx {
- uint64_t u64;
- struct cvmx_pip_prt_cfgx_s {
- uint64_t reserved_53_63:11;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t lenerr_en:1;
- uint64_t maxerr_en:1;
- uint64_t minerr_en:1;
- uint64_t grp_wat_47:4;
- uint64_t qos_wat_47:4;
- uint64_t reserved_37_39:3;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t hg_qos:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t qos_vsel:1;
- uint64_t qos_vod:1;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t higig_en:1;
- uint64_t dsa_en:1;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } s;
- struct cvmx_pip_prt_cfgx_cn30xx {
- uint64_t reserved_37_63:27;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_18_19:2;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_10_15:6;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn30xx;
- struct cvmx_pip_prt_cfgx_cn30xx cn31xx;
- struct cvmx_pip_prt_cfgx_cn38xx {
- uint64_t reserved_37_63:27;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_18_19:2;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t reserved_10_11:2;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn38xx;
- struct cvmx_pip_prt_cfgx_cn38xx cn38xxp2;
- struct cvmx_pip_prt_cfgx_cn50xx {
- uint64_t reserved_53_63:11;
- uint64_t pad_len:1;
- uint64_t vlan_len:1;
- uint64_t lenerr_en:1;
- uint64_t maxerr_en:1;
- uint64_t minerr_en:1;
- uint64_t grp_wat_47:4;
- uint64_t qos_wat_47:4;
- uint64_t reserved_37_39:3;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_19_19:1;
- uint64_t qos_vod:1;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t reserved_10_11:2;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn50xx;
- struct cvmx_pip_prt_cfgx_s cn52xx;
- struct cvmx_pip_prt_cfgx_s cn52xxp1;
- struct cvmx_pip_prt_cfgx_s cn56xx;
- struct cvmx_pip_prt_cfgx_cn50xx cn56xxp1;
- struct cvmx_pip_prt_cfgx_cn58xx {
- uint64_t reserved_37_63:27;
- uint64_t rawdrp:1;
- uint64_t tag_inc:2;
- uint64_t dyn_rs:1;
- uint64_t inst_hdr:1;
- uint64_t grp_wat:4;
- uint64_t reserved_27_27:1;
- uint64_t qos:3;
- uint64_t qos_wat:4;
- uint64_t reserved_19_19:1;
- uint64_t qos_vod:1;
- uint64_t qos_diff:1;
- uint64_t qos_vlan:1;
- uint64_t reserved_13_15:3;
- uint64_t crc_en:1;
- uint64_t reserved_10_11:2;
- uint64_t mode:2;
- uint64_t reserved_7_7:1;
- uint64_t skip:7;
- } cn58xx;
- struct cvmx_pip_prt_cfgx_cn58xx cn58xxp1;
-};
-
-union cvmx_pip_prt_tagx {
- uint64_t u64;
- struct cvmx_pip_prt_tagx_s {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t grptag_mskip:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } s;
- struct cvmx_pip_prt_tagx_cn30xx {
- uint64_t reserved_40_63:24;
- uint64_t grptagbase:4;
- uint64_t grptagmask:4;
- uint64_t grptag:1;
- uint64_t reserved_30_30:1;
- uint64_t tag_mode:2;
- uint64_t inc_vs:2;
- uint64_t inc_vlan:1;
- uint64_t inc_prt_flag:1;
- uint64_t ip6_dprt_flag:1;
- uint64_t ip4_dprt_flag:1;
- uint64_t ip6_sprt_flag:1;
- uint64_t ip4_sprt_flag:1;
- uint64_t ip6_nxth_flag:1;
- uint64_t ip4_pctl_flag:1;
- uint64_t ip6_dst_flag:1;
- uint64_t ip4_dst_flag:1;
- uint64_t ip6_src_flag:1;
- uint64_t ip4_src_flag:1;
- uint64_t tcp6_tag_type:2;
- uint64_t tcp4_tag_type:2;
- uint64_t ip6_tag_type:2;
- uint64_t ip4_tag_type:2;
- uint64_t non_tag_type:2;
- uint64_t grp:4;
- } cn30xx;
- struct cvmx_pip_prt_tagx_cn30xx cn31xx;
- struct cvmx_pip_prt_tagx_cn30xx cn38xx;
- struct cvmx_pip_prt_tagx_cn30xx cn38xxp2;
- struct cvmx_pip_prt_tagx_s cn50xx;
- struct cvmx_pip_prt_tagx_s cn52xx;
- struct cvmx_pip_prt_tagx_s cn52xxp1;
- struct cvmx_pip_prt_tagx_s cn56xx;
- struct cvmx_pip_prt_tagx_s cn56xxp1;
- struct cvmx_pip_prt_tagx_cn30xx cn58xx;
- struct cvmx_pip_prt_tagx_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_qos_diffx {
- uint64_t u64;
- struct cvmx_pip_qos_diffx_s {
- uint64_t reserved_3_63:61;
- uint64_t qos:3;
- } s;
- struct cvmx_pip_qos_diffx_s cn30xx;
- struct cvmx_pip_qos_diffx_s cn31xx;
- struct cvmx_pip_qos_diffx_s cn38xx;
- struct cvmx_pip_qos_diffx_s cn38xxp2;
- struct cvmx_pip_qos_diffx_s cn50xx;
- struct cvmx_pip_qos_diffx_s cn52xx;
- struct cvmx_pip_qos_diffx_s cn52xxp1;
- struct cvmx_pip_qos_diffx_s cn56xx;
- struct cvmx_pip_qos_diffx_s cn56xxp1;
- struct cvmx_pip_qos_diffx_s cn58xx;
- struct cvmx_pip_qos_diffx_s cn58xxp1;
-};
-
-union cvmx_pip_qos_vlanx {
- uint64_t u64;
- struct cvmx_pip_qos_vlanx_s {
- uint64_t reserved_7_63:57;
- uint64_t qos1:3;
- uint64_t reserved_3_3:1;
- uint64_t qos:3;
- } s;
- struct cvmx_pip_qos_vlanx_cn30xx {
- uint64_t reserved_3_63:61;
- uint64_t qos:3;
- } cn30xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn31xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn38xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn38xxp2;
- struct cvmx_pip_qos_vlanx_cn30xx cn50xx;
- struct cvmx_pip_qos_vlanx_s cn52xx;
- struct cvmx_pip_qos_vlanx_s cn52xxp1;
- struct cvmx_pip_qos_vlanx_s cn56xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn56xxp1;
- struct cvmx_pip_qos_vlanx_cn30xx cn58xx;
- struct cvmx_pip_qos_vlanx_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_qos_watchx {
- uint64_t u64;
- struct cvmx_pip_qos_watchx_s {
- uint64_t reserved_48_63:16;
- uint64_t mask:16;
- uint64_t reserved_28_31:4;
- uint64_t grp:4;
- uint64_t reserved_23_23:1;
- uint64_t qos:3;
- uint64_t reserved_19_19:1;
- uint64_t match_type:3;
- uint64_t match_value:16;
- } s;
- struct cvmx_pip_qos_watchx_cn30xx {
- uint64_t reserved_48_63:16;
- uint64_t mask:16;
- uint64_t reserved_28_31:4;
- uint64_t grp:4;
- uint64_t reserved_23_23:1;
- uint64_t qos:3;
- uint64_t reserved_18_19:2;
- uint64_t match_type:2;
- uint64_t match_value:16;
- } cn30xx;
- struct cvmx_pip_qos_watchx_cn30xx cn31xx;
- struct cvmx_pip_qos_watchx_cn30xx cn38xx;
- struct cvmx_pip_qos_watchx_cn30xx cn38xxp2;
- struct cvmx_pip_qos_watchx_s cn50xx;
- struct cvmx_pip_qos_watchx_s cn52xx;
- struct cvmx_pip_qos_watchx_s cn52xxp1;
- struct cvmx_pip_qos_watchx_s cn56xx;
- struct cvmx_pip_qos_watchx_s cn56xxp1;
- struct cvmx_pip_qos_watchx_cn30xx cn58xx;
- struct cvmx_pip_qos_watchx_cn30xx cn58xxp1;
-};
-
-union cvmx_pip_raw_word {
- uint64_t u64;
- struct cvmx_pip_raw_word_s {
- uint64_t reserved_56_63:8;
- uint64_t word:56;
- } s;
- struct cvmx_pip_raw_word_s cn30xx;
- struct cvmx_pip_raw_word_s cn31xx;
- struct cvmx_pip_raw_word_s cn38xx;
- struct cvmx_pip_raw_word_s cn38xxp2;
- struct cvmx_pip_raw_word_s cn50xx;
- struct cvmx_pip_raw_word_s cn52xx;
- struct cvmx_pip_raw_word_s cn52xxp1;
- struct cvmx_pip_raw_word_s cn56xx;
- struct cvmx_pip_raw_word_s cn56xxp1;
- struct cvmx_pip_raw_word_s cn58xx;
- struct cvmx_pip_raw_word_s cn58xxp1;
-};
-
-union cvmx_pip_sft_rst {
- uint64_t u64;
- struct cvmx_pip_sft_rst_s {
- uint64_t reserved_1_63:63;
- uint64_t rst:1;
- } s;
- struct cvmx_pip_sft_rst_s cn30xx;
- struct cvmx_pip_sft_rst_s cn31xx;
- struct cvmx_pip_sft_rst_s cn38xx;
- struct cvmx_pip_sft_rst_s cn50xx;
- struct cvmx_pip_sft_rst_s cn52xx;
- struct cvmx_pip_sft_rst_s cn52xxp1;
- struct cvmx_pip_sft_rst_s cn56xx;
- struct cvmx_pip_sft_rst_s cn56xxp1;
- struct cvmx_pip_sft_rst_s cn58xx;
- struct cvmx_pip_sft_rst_s cn58xxp1;
-};
-
-union cvmx_pip_stat0_prtx {
- uint64_t u64;
- struct cvmx_pip_stat0_prtx_s {
- uint64_t drp_pkts:32;
- uint64_t drp_octs:32;
- } s;
- struct cvmx_pip_stat0_prtx_s cn30xx;
- struct cvmx_pip_stat0_prtx_s cn31xx;
- struct cvmx_pip_stat0_prtx_s cn38xx;
- struct cvmx_pip_stat0_prtx_s cn38xxp2;
- struct cvmx_pip_stat0_prtx_s cn50xx;
- struct cvmx_pip_stat0_prtx_s cn52xx;
- struct cvmx_pip_stat0_prtx_s cn52xxp1;
- struct cvmx_pip_stat0_prtx_s cn56xx;
- struct cvmx_pip_stat0_prtx_s cn56xxp1;
- struct cvmx_pip_stat0_prtx_s cn58xx;
- struct cvmx_pip_stat0_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat1_prtx {
- uint64_t u64;
- struct cvmx_pip_stat1_prtx_s {
- uint64_t reserved_48_63:16;
- uint64_t octs:48;
- } s;
- struct cvmx_pip_stat1_prtx_s cn30xx;
- struct cvmx_pip_stat1_prtx_s cn31xx;
- struct cvmx_pip_stat1_prtx_s cn38xx;
- struct cvmx_pip_stat1_prtx_s cn38xxp2;
- struct cvmx_pip_stat1_prtx_s cn50xx;
- struct cvmx_pip_stat1_prtx_s cn52xx;
- struct cvmx_pip_stat1_prtx_s cn52xxp1;
- struct cvmx_pip_stat1_prtx_s cn56xx;
- struct cvmx_pip_stat1_prtx_s cn56xxp1;
- struct cvmx_pip_stat1_prtx_s cn58xx;
- struct cvmx_pip_stat1_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat2_prtx {
- uint64_t u64;
- struct cvmx_pip_stat2_prtx_s {
- uint64_t pkts:32;
- uint64_t raw:32;
- } s;
- struct cvmx_pip_stat2_prtx_s cn30xx;
- struct cvmx_pip_stat2_prtx_s cn31xx;
- struct cvmx_pip_stat2_prtx_s cn38xx;
- struct cvmx_pip_stat2_prtx_s cn38xxp2;
- struct cvmx_pip_stat2_prtx_s cn50xx;
- struct cvmx_pip_stat2_prtx_s cn52xx;
- struct cvmx_pip_stat2_prtx_s cn52xxp1;
- struct cvmx_pip_stat2_prtx_s cn56xx;
- struct cvmx_pip_stat2_prtx_s cn56xxp1;
- struct cvmx_pip_stat2_prtx_s cn58xx;
- struct cvmx_pip_stat2_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat3_prtx {
- uint64_t u64;
- struct cvmx_pip_stat3_prtx_s {
- uint64_t bcst:32;
- uint64_t mcst:32;
- } s;
- struct cvmx_pip_stat3_prtx_s cn30xx;
- struct cvmx_pip_stat3_prtx_s cn31xx;
- struct cvmx_pip_stat3_prtx_s cn38xx;
- struct cvmx_pip_stat3_prtx_s cn38xxp2;
- struct cvmx_pip_stat3_prtx_s cn50xx;
- struct cvmx_pip_stat3_prtx_s cn52xx;
- struct cvmx_pip_stat3_prtx_s cn52xxp1;
- struct cvmx_pip_stat3_prtx_s cn56xx;
- struct cvmx_pip_stat3_prtx_s cn56xxp1;
- struct cvmx_pip_stat3_prtx_s cn58xx;
- struct cvmx_pip_stat3_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat4_prtx {
- uint64_t u64;
- struct cvmx_pip_stat4_prtx_s {
- uint64_t h65to127:32;
- uint64_t h64:32;
- } s;
- struct cvmx_pip_stat4_prtx_s cn30xx;
- struct cvmx_pip_stat4_prtx_s cn31xx;
- struct cvmx_pip_stat4_prtx_s cn38xx;
- struct cvmx_pip_stat4_prtx_s cn38xxp2;
- struct cvmx_pip_stat4_prtx_s cn50xx;
- struct cvmx_pip_stat4_prtx_s cn52xx;
- struct cvmx_pip_stat4_prtx_s cn52xxp1;
- struct cvmx_pip_stat4_prtx_s cn56xx;
- struct cvmx_pip_stat4_prtx_s cn56xxp1;
- struct cvmx_pip_stat4_prtx_s cn58xx;
- struct cvmx_pip_stat4_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat5_prtx {
- uint64_t u64;
- struct cvmx_pip_stat5_prtx_s {
- uint64_t h256to511:32;
- uint64_t h128to255:32;
- } s;
- struct cvmx_pip_stat5_prtx_s cn30xx;
- struct cvmx_pip_stat5_prtx_s cn31xx;
- struct cvmx_pip_stat5_prtx_s cn38xx;
- struct cvmx_pip_stat5_prtx_s cn38xxp2;
- struct cvmx_pip_stat5_prtx_s cn50xx;
- struct cvmx_pip_stat5_prtx_s cn52xx;
- struct cvmx_pip_stat5_prtx_s cn52xxp1;
- struct cvmx_pip_stat5_prtx_s cn56xx;
- struct cvmx_pip_stat5_prtx_s cn56xxp1;
- struct cvmx_pip_stat5_prtx_s cn58xx;
- struct cvmx_pip_stat5_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat6_prtx {
- uint64_t u64;
- struct cvmx_pip_stat6_prtx_s {
- uint64_t h1024to1518:32;
- uint64_t h512to1023:32;
- } s;
- struct cvmx_pip_stat6_prtx_s cn30xx;
- struct cvmx_pip_stat6_prtx_s cn31xx;
- struct cvmx_pip_stat6_prtx_s cn38xx;
- struct cvmx_pip_stat6_prtx_s cn38xxp2;
- struct cvmx_pip_stat6_prtx_s cn50xx;
- struct cvmx_pip_stat6_prtx_s cn52xx;
- struct cvmx_pip_stat6_prtx_s cn52xxp1;
- struct cvmx_pip_stat6_prtx_s cn56xx;
- struct cvmx_pip_stat6_prtx_s cn56xxp1;
- struct cvmx_pip_stat6_prtx_s cn58xx;
- struct cvmx_pip_stat6_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat7_prtx {
- uint64_t u64;
- struct cvmx_pip_stat7_prtx_s {
- uint64_t fcs:32;
- uint64_t h1519:32;
- } s;
- struct cvmx_pip_stat7_prtx_s cn30xx;
- struct cvmx_pip_stat7_prtx_s cn31xx;
- struct cvmx_pip_stat7_prtx_s cn38xx;
- struct cvmx_pip_stat7_prtx_s cn38xxp2;
- struct cvmx_pip_stat7_prtx_s cn50xx;
- struct cvmx_pip_stat7_prtx_s cn52xx;
- struct cvmx_pip_stat7_prtx_s cn52xxp1;
- struct cvmx_pip_stat7_prtx_s cn56xx;
- struct cvmx_pip_stat7_prtx_s cn56xxp1;
- struct cvmx_pip_stat7_prtx_s cn58xx;
- struct cvmx_pip_stat7_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat8_prtx {
- uint64_t u64;
- struct cvmx_pip_stat8_prtx_s {
- uint64_t frag:32;
- uint64_t undersz:32;
- } s;
- struct cvmx_pip_stat8_prtx_s cn30xx;
- struct cvmx_pip_stat8_prtx_s cn31xx;
- struct cvmx_pip_stat8_prtx_s cn38xx;
- struct cvmx_pip_stat8_prtx_s cn38xxp2;
- struct cvmx_pip_stat8_prtx_s cn50xx;
- struct cvmx_pip_stat8_prtx_s cn52xx;
- struct cvmx_pip_stat8_prtx_s cn52xxp1;
- struct cvmx_pip_stat8_prtx_s cn56xx;
- struct cvmx_pip_stat8_prtx_s cn56xxp1;
- struct cvmx_pip_stat8_prtx_s cn58xx;
- struct cvmx_pip_stat8_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat9_prtx {
- uint64_t u64;
- struct cvmx_pip_stat9_prtx_s {
- uint64_t jabber:32;
- uint64_t oversz:32;
- } s;
- struct cvmx_pip_stat9_prtx_s cn30xx;
- struct cvmx_pip_stat9_prtx_s cn31xx;
- struct cvmx_pip_stat9_prtx_s cn38xx;
- struct cvmx_pip_stat9_prtx_s cn38xxp2;
- struct cvmx_pip_stat9_prtx_s cn50xx;
- struct cvmx_pip_stat9_prtx_s cn52xx;
- struct cvmx_pip_stat9_prtx_s cn52xxp1;
- struct cvmx_pip_stat9_prtx_s cn56xx;
- struct cvmx_pip_stat9_prtx_s cn56xxp1;
- struct cvmx_pip_stat9_prtx_s cn58xx;
- struct cvmx_pip_stat9_prtx_s cn58xxp1;
-};
-
-union cvmx_pip_stat_ctl {
- uint64_t u64;
- struct cvmx_pip_stat_ctl_s {
- uint64_t reserved_1_63:63;
- uint64_t rdclr:1;
- } s;
- struct cvmx_pip_stat_ctl_s cn30xx;
- struct cvmx_pip_stat_ctl_s cn31xx;
- struct cvmx_pip_stat_ctl_s cn38xx;
- struct cvmx_pip_stat_ctl_s cn38xxp2;
- struct cvmx_pip_stat_ctl_s cn50xx;
- struct cvmx_pip_stat_ctl_s cn52xx;
- struct cvmx_pip_stat_ctl_s cn52xxp1;
- struct cvmx_pip_stat_ctl_s cn56xx;
- struct cvmx_pip_stat_ctl_s cn56xxp1;
- struct cvmx_pip_stat_ctl_s cn58xx;
- struct cvmx_pip_stat_ctl_s cn58xxp1;
-};
-
-union cvmx_pip_stat_inb_errsx {
- uint64_t u64;
- struct cvmx_pip_stat_inb_errsx_s {
- uint64_t reserved_16_63:48;
- uint64_t errs:16;
- } s;
- struct cvmx_pip_stat_inb_errsx_s cn30xx;
- struct cvmx_pip_stat_inb_errsx_s cn31xx;
- struct cvmx_pip_stat_inb_errsx_s cn38xx;
- struct cvmx_pip_stat_inb_errsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_errsx_s cn50xx;
- struct cvmx_pip_stat_inb_errsx_s cn52xx;
- struct cvmx_pip_stat_inb_errsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn56xx;
- struct cvmx_pip_stat_inb_errsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_errsx_s cn58xx;
- struct cvmx_pip_stat_inb_errsx_s cn58xxp1;
-};
-
-union cvmx_pip_stat_inb_octsx {
- uint64_t u64;
- struct cvmx_pip_stat_inb_octsx_s {
- uint64_t reserved_48_63:16;
- uint64_t octs:48;
- } s;
- struct cvmx_pip_stat_inb_octsx_s cn30xx;
- struct cvmx_pip_stat_inb_octsx_s cn31xx;
- struct cvmx_pip_stat_inb_octsx_s cn38xx;
- struct cvmx_pip_stat_inb_octsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_octsx_s cn50xx;
- struct cvmx_pip_stat_inb_octsx_s cn52xx;
- struct cvmx_pip_stat_inb_octsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn56xx;
- struct cvmx_pip_stat_inb_octsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_octsx_s cn58xx;
- struct cvmx_pip_stat_inb_octsx_s cn58xxp1;
-};
-
-union cvmx_pip_stat_inb_pktsx {
- uint64_t u64;
- struct cvmx_pip_stat_inb_pktsx_s {
- uint64_t reserved_32_63:32;
- uint64_t pkts:32;
- } s;
- struct cvmx_pip_stat_inb_pktsx_s cn30xx;
- struct cvmx_pip_stat_inb_pktsx_s cn31xx;
- struct cvmx_pip_stat_inb_pktsx_s cn38xx;
- struct cvmx_pip_stat_inb_pktsx_s cn38xxp2;
- struct cvmx_pip_stat_inb_pktsx_s cn50xx;
- struct cvmx_pip_stat_inb_pktsx_s cn52xx;
- struct cvmx_pip_stat_inb_pktsx_s cn52xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn56xx;
- struct cvmx_pip_stat_inb_pktsx_s cn56xxp1;
- struct cvmx_pip_stat_inb_pktsx_s cn58xx;
- struct cvmx_pip_stat_inb_pktsx_s cn58xxp1;
-};
-
-union cvmx_pip_tag_incx {
- uint64_t u64;
- struct cvmx_pip_tag_incx_s {
- uint64_t reserved_8_63:56;
- uint64_t en:8;
- } s;
- struct cvmx_pip_tag_incx_s cn30xx;
- struct cvmx_pip_tag_incx_s cn31xx;
- struct cvmx_pip_tag_incx_s cn38xx;
- struct cvmx_pip_tag_incx_s cn38xxp2;
- struct cvmx_pip_tag_incx_s cn50xx;
- struct cvmx_pip_tag_incx_s cn52xx;
- struct cvmx_pip_tag_incx_s cn52xxp1;
- struct cvmx_pip_tag_incx_s cn56xx;
- struct cvmx_pip_tag_incx_s cn56xxp1;
- struct cvmx_pip_tag_incx_s cn58xx;
- struct cvmx_pip_tag_incx_s cn58xxp1;
-};
-
-union cvmx_pip_tag_mask {
- uint64_t u64;
- struct cvmx_pip_tag_mask_s {
- uint64_t reserved_16_63:48;
- uint64_t mask:16;
- } s;
- struct cvmx_pip_tag_mask_s cn30xx;
- struct cvmx_pip_tag_mask_s cn31xx;
- struct cvmx_pip_tag_mask_s cn38xx;
- struct cvmx_pip_tag_mask_s cn38xxp2;
- struct cvmx_pip_tag_mask_s cn50xx;
- struct cvmx_pip_tag_mask_s cn52xx;
- struct cvmx_pip_tag_mask_s cn52xxp1;
- struct cvmx_pip_tag_mask_s cn56xx;
- struct cvmx_pip_tag_mask_s cn56xxp1;
- struct cvmx_pip_tag_mask_s cn58xx;
- struct cvmx_pip_tag_mask_s cn58xxp1;
-};
-
-union cvmx_pip_tag_secret {
- uint64_t u64;
- struct cvmx_pip_tag_secret_s {
- uint64_t reserved_32_63:32;
- uint64_t dst:16;
- uint64_t src:16;
- } s;
- struct cvmx_pip_tag_secret_s cn30xx;
- struct cvmx_pip_tag_secret_s cn31xx;
- struct cvmx_pip_tag_secret_s cn38xx;
- struct cvmx_pip_tag_secret_s cn38xxp2;
- struct cvmx_pip_tag_secret_s cn50xx;
- struct cvmx_pip_tag_secret_s cn52xx;
- struct cvmx_pip_tag_secret_s cn52xxp1;
- struct cvmx_pip_tag_secret_s cn56xx;
- struct cvmx_pip_tag_secret_s cn56xxp1;
- struct cvmx_pip_tag_secret_s cn58xx;
- struct cvmx_pip_tag_secret_s cn58xxp1;
-};
-
-union cvmx_pip_todo_entry {
- uint64_t u64;
- struct cvmx_pip_todo_entry_s {
- uint64_t val:1;
- uint64_t reserved_62_62:1;
- uint64_t entry:62;
- } s;
- struct cvmx_pip_todo_entry_s cn30xx;
- struct cvmx_pip_todo_entry_s cn31xx;
- struct cvmx_pip_todo_entry_s cn38xx;
- struct cvmx_pip_todo_entry_s cn38xxp2;
- struct cvmx_pip_todo_entry_s cn50xx;
- struct cvmx_pip_todo_entry_s cn52xx;
- struct cvmx_pip_todo_entry_s cn52xxp1;
- struct cvmx_pip_todo_entry_s cn56xx;
- struct cvmx_pip_todo_entry_s cn56xxp1;
- struct cvmx_pip_todo_entry_s cn58xx;
- struct cvmx_pip_todo_entry_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pip.h b/drivers/staging/octeon/cvmx-pip.h
deleted file mode 100644
index 78dbce8..0000000
--- a/drivers/staging/octeon/cvmx-pip.h
+++ /dev/null
@@ -1,524 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Interface to the hardware Packet Input Processing unit.
- *
- */
-
-#ifndef __CVMX_PIP_H__
-#define __CVMX_PIP_H__
-
-#include "cvmx-wqe.h"
-#include "cvmx-fpa.h"
-#include "cvmx-pip-defs.h"
-
-#define CVMX_PIP_NUM_INPUT_PORTS 40
-#define CVMX_PIP_NUM_WATCHERS 4
-
-/*
- * Encodes the different error and exception codes
- */
-typedef enum {
- CVMX_PIP_L4_NO_ERR = 0ull,
- /*
- * 1 = TCP (UDP) packet not long enough to cover TCP (UDP)
- * header
- */
- CVMX_PIP_L4_MAL_ERR = 1ull,
- /* 2 = TCP/UDP checksum failure */
- CVMX_PIP_CHK_ERR = 2ull,
- /*
- * 3 = TCP/UDP length check (TCP/UDP length does not match IP
- * length).
- */
- CVMX_PIP_L4_LENGTH_ERR = 3ull,
- /* 4 = illegal TCP/UDP port (either source or dest port is zero) */
- CVMX_PIP_BAD_PRT_ERR = 4ull,
- /* 8 = TCP flags = FIN only */
- CVMX_PIP_TCP_FLG8_ERR = 8ull,
- /* 9 = TCP flags = 0 */
- CVMX_PIP_TCP_FLG9_ERR = 9ull,
- /* 10 = TCP flags = FIN+RST+* */
- CVMX_PIP_TCP_FLG10_ERR = 10ull,
- /* 11 = TCP flags = SYN+URG+* */
- CVMX_PIP_TCP_FLG11_ERR = 11ull,
- /* 12 = TCP flags = SYN+RST+* */
- CVMX_PIP_TCP_FLG12_ERR = 12ull,
- /* 13 = TCP flags = SYN+FIN+* */
- CVMX_PIP_TCP_FLG13_ERR = 13ull
-} cvmx_pip_l4_err_t;
-
-typedef enum {
-
- CVMX_PIP_IP_NO_ERR = 0ull,
- /* 1 = not IPv4 or IPv6 */
- CVMX_PIP_NOT_IP = 1ull,
- /* 2 = IPv4 header checksum violation */
- CVMX_PIP_IPV4_HDR_CHK = 2ull,
- /* 3 = malformed (packet not long enough to cover IP hdr) */
- CVMX_PIP_IP_MAL_HDR = 3ull,
- /* 4 = malformed (packet not long enough to cover len in IP hdr) */
- CVMX_PIP_IP_MAL_PKT = 4ull,
- /* 5 = TTL / hop count equal zero */
- CVMX_PIP_TTL_HOP = 5ull,
- /* 6 = IPv4 options / IPv6 early extension headers */
- CVMX_PIP_OPTS = 6ull
-} cvmx_pip_ip_exc_t;
-
-/**
- * NOTES
- * late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as bad FCS
- * or carrier extend error which is CVMX_PIP_EXTEND_ERR
- */
-typedef enum {
- /* No error */
- CVMX_PIP_RX_NO_ERR = 0ull,
- /* RGM+SPI 1 = partially received packet (buffering/bandwidth
- * not adequate) */
- CVMX_PIP_PARTIAL_ERR = 1ull,
- /* RGM+SPI 2 = receive packet too large and truncated */
- CVMX_PIP_JABBER_ERR = 2ull,
- /*
- * RGM 3 = max frame error (pkt len > max frame len) (with FCS
- * error)
- */
- CVMX_PIP_OVER_FCS_ERR = 3ull,
- /* RGM+SPI 4 = max frame error (pkt len > max frame len) */
- CVMX_PIP_OVER_ERR = 4ull,
- /*
- * RGM 5 = nibble error (data not byte multiple - 100M and 10M
- * only)
- */
- CVMX_PIP_ALIGN_ERR = 5ull,
- /*
- * RGM 6 = min frame error (pkt len < min frame len) (with FCS
- * error)
- */
- CVMX_PIP_UNDER_FCS_ERR = 6ull,
- /* RGM 7 = FCS error */
- CVMX_PIP_GMX_FCS_ERR = 7ull,
- /* RGM+SPI 8 = min frame error (pkt len < min frame len) */
- CVMX_PIP_UNDER_ERR = 8ull,
- /* RGM 9 = Frame carrier extend error */
- CVMX_PIP_EXTEND_ERR = 9ull,
- /*
- * RGM 10 = length mismatch (len did not match len in L2
- * length/type)
- */
- CVMX_PIP_LENGTH_ERR = 10ull,
- /* RGM 11 = Frame error (some or all data bits marked err) */
- CVMX_PIP_DAT_ERR = 11ull,
- /* SPI 11 = DIP4 error */
- CVMX_PIP_DIP_ERR = 11ull,
- /*
- * RGM 12 = packet was not large enough to pass the skipper -
- * no inspection could occur.
- */
- CVMX_PIP_SKIP_ERR = 12ull,
- /*
- * RGM 13 = studder error (data not repeated - 100M and 10M
- * only)
- */
- CVMX_PIP_NIBBLE_ERR = 13ull,
- /* RGM+SPI 16 = FCS error */
- CVMX_PIP_PIP_FCS = 16L,
- /*
- * RGM+SPI+PCI 17 = packet was not large enough to pass the
- * skipper - no inspection could occur.
- */
- CVMX_PIP_PIP_SKIP_ERR = 17L,
- /*
- * RGM+SPI+PCI 18 = malformed l2 (packet not long enough to
- * cover L2 hdr).
- */
- CVMX_PIP_PIP_L2_MAL_HDR = 18L
- /*
- * NOTES: xx = late collision (data received before collision)
- * late collisions cannot be detected by the receiver
- * they would appear as JAM bits which would appear as
- * bad FCS or carrier extend error which is
- * CVMX_PIP_EXTEND_ERR
- */
-} cvmx_pip_rcv_err_t;
-
-/**
- * This defines the err_code field errors in the work Q entry
- */
-typedef union {
- cvmx_pip_l4_err_t l4_err;
- cvmx_pip_ip_exc_t ip_exc;
- cvmx_pip_rcv_err_t rcv_err;
-} cvmx_pip_err_t;
-
-/**
- * Status statistics for a port
- */
-typedef struct {
- /* Inbound octets marked to be dropped by the IPD */
- uint32_t dropped_octets;
- /* Inbound packets marked to be dropped by the IPD */
- uint32_t dropped_packets;
- /* RAW PCI Packets received by PIP per port */
- uint32_t pci_raw_packets;
- /* Number of octets processed by PIP */
- uint32_t octets;
- /* Number of packets processed by PIP */
- uint32_t packets;
- /*
- * Number of indentified L2 multicast packets. Does not
- * include broadcast packets. Only includes packets whose
- * parse mode is SKIP_TO_L2
- */
- uint32_t multicast_packets;
- /*
- * Number of indentified L2 broadcast packets. Does not
- * include multicast packets. Only includes packets whose
- * parse mode is SKIP_TO_L2
- */
- uint32_t broadcast_packets;
- /* Number of 64B packets */
- uint32_t len_64_packets;
- /* Number of 65-127B packets */
- uint32_t len_65_127_packets;
- /* Number of 128-255B packets */
- uint32_t len_128_255_packets;
- /* Number of 256-511B packets */
- uint32_t len_256_511_packets;
- /* Number of 512-1023B packets */
- uint32_t len_512_1023_packets;
- /* Number of 1024-1518B packets */
- uint32_t len_1024_1518_packets;
- /* Number of 1519-max packets */
- uint32_t len_1519_max_packets;
- /* Number of packets with FCS or Align opcode errors */
- uint32_t fcs_align_err_packets;
- /* Number of packets with length < min */
- uint32_t runt_packets;
- /* Number of packets with length < min and FCS error */
- uint32_t runt_crc_packets;
- /* Number of packets with length > max */
- uint32_t oversize_packets;
- /* Number of packets with length > max and FCS error */
- uint32_t oversize_crc_packets;
- /* Number of packets without GMX/SPX/PCI errors received by PIP */
- uint32_t inb_packets;
- /*
- * Total number of octets from all packets received by PIP,
- * including CRC
- */
- uint64_t inb_octets;
- /* Number of packets with GMX/SPX/PCI errors received by PIP */
- uint16_t inb_errors;
-} cvmx_pip_port_status_t;
-
-/**
- * Definition of the PIP custom header that can be prepended
- * to a packet by external hardware.
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * Documented as R - Set if the Packet is RAWFULL. If
- * set, this header must be the full 8 bytes.
- */
- uint64_t rawfull:1;
- /* Must be zero */
- uint64_t reserved0:5;
- /* PIP parse mode for this packet */
- uint64_t parse_mode:2;
- /* Must be zero */
- uint64_t reserved1:1;
- /*
- * Skip amount, including this header, to the
- * beginning of the packet
- */
- uint64_t skip_len:7;
- /* Must be zero */
- uint64_t reserved2:6;
- /* POW input queue for this packet */
- uint64_t qos:3;
- /* POW input group for this packet */
- uint64_t grp:4;
- /*
- * Flag to store this packet in the work queue entry,
- * if possible
- */
- uint64_t rs:1;
- /* POW input tag type */
- uint64_t tag_type:2;
- /* POW input tag */
- uint64_t tag:32;
- } s;
-} cvmx_pip_pkt_inst_hdr_t;
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Configure an ethernet input port
- *
- * @port_num: Port number to configure
- * @port_cfg: Port hardware configuration
- * @port_tag_cfg:
- * Port POW tagging configuration
- */
-static inline void cvmx_pip_config_port(uint64_t port_num,
- union cvmx_pip_prt_cfgx port_cfg,
- union cvmx_pip_prt_tagx port_tag_cfg)
-{
- cvmx_write_csr(CVMX_PIP_PRT_CFGX(port_num), port_cfg.u64);
- cvmx_write_csr(CVMX_PIP_PRT_TAGX(port_num), port_tag_cfg.u64);
-}
-#if 0
-/**
- * @deprecated This function is a thin wrapper around the Pass1 version
- * of the CVMX_PIP_QOS_WATCHX CSR; Pass2 has added a field for
- * setting the group that is incompatible with this function,
- * the preferred upgrade path is to use the CSR directly.
- *
- * Configure the global QoS packet watchers. Each watcher is
- * capable of matching a field in a packet to determine the
- * QoS queue for scheduling.
- *
- * @watcher: Watcher number to configure (0 - 3).
- * @match_type: Watcher match type
- * @match_value:
- * Value the watcher will match against
- * @qos: QoS queue for packets matching this watcher
- */
-static inline void cvmx_pip_config_watcher(uint64_t watcher,
- cvmx_pip_qos_watch_types match_type,
- uint64_t match_value, uint64_t qos)
-{
- cvmx_pip_port_watcher_cfg_t watcher_config;
-
- watcher_config.u64 = 0;
- watcher_config.s.match_type = match_type;
- watcher_config.s.match_value = match_value;
- watcher_config.s.qos = qos;
-
- cvmx_write_csr(CVMX_PIP_QOS_WATCHX(watcher), watcher_config.u64);
-}
-#endif
-/**
- * Configure the VLAN priority to QoS queue mapping.
- *
- * @vlan_priority:
- * VLAN priority (0-7)
- * @qos: QoS queue for packets matching this watcher
- */
-static inline void cvmx_pip_config_vlan_qos(uint64_t vlan_priority,
- uint64_t qos)
-{
- union cvmx_pip_qos_vlanx pip_qos_vlanx;
- pip_qos_vlanx.u64 = 0;
- pip_qos_vlanx.s.qos = qos;
- cvmx_write_csr(CVMX_PIP_QOS_VLANX(vlan_priority), pip_qos_vlanx.u64);
-}
-
-/**
- * Configure the Diffserv to QoS queue mapping.
- *
- * @diffserv: Diffserv field value (0-63)
- * @qos: QoS queue for packets matching this watcher
- */
-static inline void cvmx_pip_config_diffserv_qos(uint64_t diffserv, uint64_t qos)
-{
- union cvmx_pip_qos_diffx pip_qos_diffx;
- pip_qos_diffx.u64 = 0;
- pip_qos_diffx.s.qos = qos;
- cvmx_write_csr(CVMX_PIP_QOS_DIFFX(diffserv), pip_qos_diffx.u64);
-}
-
-/**
- * Get the status counters for a port.
- *
- * @port_num: Port number to get statistics for.
- * @clear: Set to 1 to clear the counters after they are read
- * @status: Where to put the results.
- */
-static inline void cvmx_pip_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pip_port_status_t *status)
-{
- union cvmx_pip_stat_ctl pip_stat_ctl;
- union cvmx_pip_stat0_prtx stat0;
- union cvmx_pip_stat1_prtx stat1;
- union cvmx_pip_stat2_prtx stat2;
- union cvmx_pip_stat3_prtx stat3;
- union cvmx_pip_stat4_prtx stat4;
- union cvmx_pip_stat5_prtx stat5;
- union cvmx_pip_stat6_prtx stat6;
- union cvmx_pip_stat7_prtx stat7;
- union cvmx_pip_stat8_prtx stat8;
- union cvmx_pip_stat9_prtx stat9;
- union cvmx_pip_stat_inb_pktsx pip_stat_inb_pktsx;
- union cvmx_pip_stat_inb_octsx pip_stat_inb_octsx;
- union cvmx_pip_stat_inb_errsx pip_stat_inb_errsx;
-
- pip_stat_ctl.u64 = 0;
- pip_stat_ctl.s.rdclr = clear;
- cvmx_write_csr(CVMX_PIP_STAT_CTL, pip_stat_ctl.u64);
-
- stat0.u64 = cvmx_read_csr(CVMX_PIP_STAT0_PRTX(port_num));
- stat1.u64 = cvmx_read_csr(CVMX_PIP_STAT1_PRTX(port_num));
- stat2.u64 = cvmx_read_csr(CVMX_PIP_STAT2_PRTX(port_num));
- stat3.u64 = cvmx_read_csr(CVMX_PIP_STAT3_PRTX(port_num));
- stat4.u64 = cvmx_read_csr(CVMX_PIP_STAT4_PRTX(port_num));
- stat5.u64 = cvmx_read_csr(CVMX_PIP_STAT5_PRTX(port_num));
- stat6.u64 = cvmx_read_csr(CVMX_PIP_STAT6_PRTX(port_num));
- stat7.u64 = cvmx_read_csr(CVMX_PIP_STAT7_PRTX(port_num));
- stat8.u64 = cvmx_read_csr(CVMX_PIP_STAT8_PRTX(port_num));
- stat9.u64 = cvmx_read_csr(CVMX_PIP_STAT9_PRTX(port_num));
- pip_stat_inb_pktsx.u64 =
- cvmx_read_csr(CVMX_PIP_STAT_INB_PKTSX(port_num));
- pip_stat_inb_octsx.u64 =
- cvmx_read_csr(CVMX_PIP_STAT_INB_OCTSX(port_num));
- pip_stat_inb_errsx.u64 =
- cvmx_read_csr(CVMX_PIP_STAT_INB_ERRSX(port_num));
-
- status->dropped_octets = stat0.s.drp_octs;
- status->dropped_packets = stat0.s.drp_pkts;
- status->octets = stat1.s.octs;
- status->pci_raw_packets = stat2.s.raw;
- status->packets = stat2.s.pkts;
- status->multicast_packets = stat3.s.mcst;
- status->broadcast_packets = stat3.s.bcst;
- status->len_64_packets = stat4.s.h64;
- status->len_65_127_packets = stat4.s.h65to127;
- status->len_128_255_packets = stat5.s.h128to255;
- status->len_256_511_packets = stat5.s.h256to511;
- status->len_512_1023_packets = stat6.s.h512to1023;
- status->len_1024_1518_packets = stat6.s.h1024to1518;
- status->len_1519_max_packets = stat7.s.h1519;
- status->fcs_align_err_packets = stat7.s.fcs;
- status->runt_packets = stat8.s.undersz;
- status->runt_crc_packets = stat8.s.frag;
- status->oversize_packets = stat9.s.oversz;
- status->oversize_crc_packets = stat9.s.jabber;
- status->inb_packets = pip_stat_inb_pktsx.s.pkts;
- status->inb_octets = pip_stat_inb_octsx.s.octs;
- status->inb_errors = pip_stat_inb_errsx.s.errs;
-
- if (cvmx_octeon_is_pass1()) {
- /*
- * Kludge to fix Octeon Pass 1 errata - Drop counts
- * don't work.
- */
- if (status->inb_packets > status->packets)
- status->dropped_packets =
- status->inb_packets - status->packets;
- else
- status->dropped_packets = 0;
- if (status->inb_octets - status->inb_packets * 4 >
- status->octets)
- status->dropped_octets =
- status->inb_octets - status->inb_packets * 4 -
- status->octets;
- else
- status->dropped_octets = 0;
- }
-}
-
-/**
- * Configure the hardware CRC engine
- *
- * @interface: Interface to configure (0 or 1)
- * @invert_result:
- * Invert the result of the CRC
- * @reflect: Reflect
- * @initialization_vector:
- * CRC initialization vector
- */
-static inline void cvmx_pip_config_crc(uint64_t interface,
- uint64_t invert_result, uint64_t reflect,
- uint32_t initialization_vector)
-{
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)) {
- union cvmx_pip_crc_ctlx config;
- union cvmx_pip_crc_ivx pip_crc_ivx;
-
- config.u64 = 0;
- config.s.invres = invert_result;
- config.s.reflect = reflect;
- cvmx_write_csr(CVMX_PIP_CRC_CTLX(interface), config.u64);
-
- pip_crc_ivx.u64 = 0;
- pip_crc_ivx.s.iv = initialization_vector;
- cvmx_write_csr(CVMX_PIP_CRC_IVX(interface), pip_crc_ivx.u64);
- }
-}
-
-/**
- * Clear all bits in a tag mask. This should be called on
- * startup before any calls to cvmx_pip_tag_mask_set. Each bit
- * set in the final mask represent a byte used in the packet for
- * tag generation.
- *
- * @mask_index: Which tag mask to clear (0..3)
- */
-static inline void cvmx_pip_tag_mask_clear(uint64_t mask_index)
-{
- uint64_t index;
- union cvmx_pip_tag_incx pip_tag_incx;
- pip_tag_incx.u64 = 0;
- pip_tag_incx.s.en = 0;
- for (index = mask_index * 16; index < (mask_index + 1) * 16; index++)
- cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
-}
-
-/**
- * Sets a range of bits in the tag mask. The tag mask is used
- * when the cvmx_pip_port_tag_cfg_t tag_mode is non zero.
- * There are four separate masks that can be configured.
- *
- * @mask_index: Which tag mask to modify (0..3)
- * @offset: Offset into the bitmask to set bits at. Use the GCC macro
- * offsetof() to determine the offsets into packet headers.
- * For example, offsetof(ethhdr, protocol) returns the offset
- * of the ethernet protocol field. The bitmask selects which
- * bytes to include the the tag, with bit offset X selecting
- * byte at offset X from the beginning of the packet data.
- * @len: Number of bytes to include. Usually this is the sizeof()
- * the field.
- */
-static inline void cvmx_pip_tag_mask_set(uint64_t mask_index, uint64_t offset,
- uint64_t len)
-{
- while (len--) {
- union cvmx_pip_tag_incx pip_tag_incx;
- uint64_t index = mask_index * 16 + offset / 8;
- pip_tag_incx.u64 = cvmx_read_csr(CVMX_PIP_TAG_INCX(index));
- pip_tag_incx.s.en |= 0x80 >> (offset & 0x7);
- cvmx_write_csr(CVMX_PIP_TAG_INCX(index), pip_tag_incx.u64);
- offset++;
- }
-}
-
-#endif /* __CVMX_PIP_H__ */
diff --git a/drivers/staging/octeon/cvmx-pko-defs.h b/drivers/staging/octeon/cvmx-pko-defs.h
deleted file mode 100644
index 50e779c..0000000
--- a/drivers/staging/octeon/cvmx-pko-defs.h
+++ /dev/null
@@ -1,1133 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_PKO_DEFS_H__
-#define __CVMX_PKO_DEFS_H__
-
-#define CVMX_PKO_MEM_COUNT0 \
- CVMX_ADD_IO_SEG(0x0001180050001080ull)
-#define CVMX_PKO_MEM_COUNT1 \
- CVMX_ADD_IO_SEG(0x0001180050001088ull)
-#define CVMX_PKO_MEM_DEBUG0 \
- CVMX_ADD_IO_SEG(0x0001180050001100ull)
-#define CVMX_PKO_MEM_DEBUG1 \
- CVMX_ADD_IO_SEG(0x0001180050001108ull)
-#define CVMX_PKO_MEM_DEBUG10 \
- CVMX_ADD_IO_SEG(0x0001180050001150ull)
-#define CVMX_PKO_MEM_DEBUG11 \
- CVMX_ADD_IO_SEG(0x0001180050001158ull)
-#define CVMX_PKO_MEM_DEBUG12 \
- CVMX_ADD_IO_SEG(0x0001180050001160ull)
-#define CVMX_PKO_MEM_DEBUG13 \
- CVMX_ADD_IO_SEG(0x0001180050001168ull)
-#define CVMX_PKO_MEM_DEBUG14 \
- CVMX_ADD_IO_SEG(0x0001180050001170ull)
-#define CVMX_PKO_MEM_DEBUG2 \
- CVMX_ADD_IO_SEG(0x0001180050001110ull)
-#define CVMX_PKO_MEM_DEBUG3 \
- CVMX_ADD_IO_SEG(0x0001180050001118ull)
-#define CVMX_PKO_MEM_DEBUG4 \
- CVMX_ADD_IO_SEG(0x0001180050001120ull)
-#define CVMX_PKO_MEM_DEBUG5 \
- CVMX_ADD_IO_SEG(0x0001180050001128ull)
-#define CVMX_PKO_MEM_DEBUG6 \
- CVMX_ADD_IO_SEG(0x0001180050001130ull)
-#define CVMX_PKO_MEM_DEBUG7 \
- CVMX_ADD_IO_SEG(0x0001180050001138ull)
-#define CVMX_PKO_MEM_DEBUG8 \
- CVMX_ADD_IO_SEG(0x0001180050001140ull)
-#define CVMX_PKO_MEM_DEBUG9 \
- CVMX_ADD_IO_SEG(0x0001180050001148ull)
-#define CVMX_PKO_MEM_PORT_PTRS \
- CVMX_ADD_IO_SEG(0x0001180050001010ull)
-#define CVMX_PKO_MEM_PORT_QOS \
- CVMX_ADD_IO_SEG(0x0001180050001018ull)
-#define CVMX_PKO_MEM_PORT_RATE0 \
- CVMX_ADD_IO_SEG(0x0001180050001020ull)
-#define CVMX_PKO_MEM_PORT_RATE1 \
- CVMX_ADD_IO_SEG(0x0001180050001028ull)
-#define CVMX_PKO_MEM_QUEUE_PTRS \
- CVMX_ADD_IO_SEG(0x0001180050001000ull)
-#define CVMX_PKO_MEM_QUEUE_QOS \
- CVMX_ADD_IO_SEG(0x0001180050001008ull)
-#define CVMX_PKO_REG_BIST_RESULT \
- CVMX_ADD_IO_SEG(0x0001180050000080ull)
-#define CVMX_PKO_REG_CMD_BUF \
- CVMX_ADD_IO_SEG(0x0001180050000010ull)
-#define CVMX_PKO_REG_CRC_CTLX(offset) \
- CVMX_ADD_IO_SEG(0x0001180050000028ull + (((offset) & 1) * 8))
-#define CVMX_PKO_REG_CRC_ENABLE \
- CVMX_ADD_IO_SEG(0x0001180050000020ull)
-#define CVMX_PKO_REG_CRC_IVX(offset) \
- CVMX_ADD_IO_SEG(0x0001180050000038ull + (((offset) & 1) * 8))
-#define CVMX_PKO_REG_DEBUG0 \
- CVMX_ADD_IO_SEG(0x0001180050000098ull)
-#define CVMX_PKO_REG_DEBUG1 \
- CVMX_ADD_IO_SEG(0x00011800500000A0ull)
-#define CVMX_PKO_REG_DEBUG2 \
- CVMX_ADD_IO_SEG(0x00011800500000A8ull)
-#define CVMX_PKO_REG_DEBUG3 \
- CVMX_ADD_IO_SEG(0x00011800500000B0ull)
-#define CVMX_PKO_REG_ENGINE_INFLIGHT \
- CVMX_ADD_IO_SEG(0x0001180050000050ull)
-#define CVMX_PKO_REG_ENGINE_THRESH \
- CVMX_ADD_IO_SEG(0x0001180050000058ull)
-#define CVMX_PKO_REG_ERROR \
- CVMX_ADD_IO_SEG(0x0001180050000088ull)
-#define CVMX_PKO_REG_FLAGS \
- CVMX_ADD_IO_SEG(0x0001180050000000ull)
-#define CVMX_PKO_REG_GMX_PORT_MODE \
- CVMX_ADD_IO_SEG(0x0001180050000018ull)
-#define CVMX_PKO_REG_INT_MASK \
- CVMX_ADD_IO_SEG(0x0001180050000090ull)
-#define CVMX_PKO_REG_QUEUE_MODE \
- CVMX_ADD_IO_SEG(0x0001180050000048ull)
-#define CVMX_PKO_REG_QUEUE_PTRS1 \
- CVMX_ADD_IO_SEG(0x0001180050000100ull)
-#define CVMX_PKO_REG_READ_IDX \
- CVMX_ADD_IO_SEG(0x0001180050000008ull)
-
-union cvmx_pko_mem_count0 {
- uint64_t u64;
- struct cvmx_pko_mem_count0_s {
- uint64_t reserved_32_63:32;
- uint64_t count:32;
- } s;
- struct cvmx_pko_mem_count0_s cn30xx;
- struct cvmx_pko_mem_count0_s cn31xx;
- struct cvmx_pko_mem_count0_s cn38xx;
- struct cvmx_pko_mem_count0_s cn38xxp2;
- struct cvmx_pko_mem_count0_s cn50xx;
- struct cvmx_pko_mem_count0_s cn52xx;
- struct cvmx_pko_mem_count0_s cn52xxp1;
- struct cvmx_pko_mem_count0_s cn56xx;
- struct cvmx_pko_mem_count0_s cn56xxp1;
- struct cvmx_pko_mem_count0_s cn58xx;
- struct cvmx_pko_mem_count0_s cn58xxp1;
-};
-
-union cvmx_pko_mem_count1 {
- uint64_t u64;
- struct cvmx_pko_mem_count1_s {
- uint64_t reserved_48_63:16;
- uint64_t count:48;
- } s;
- struct cvmx_pko_mem_count1_s cn30xx;
- struct cvmx_pko_mem_count1_s cn31xx;
- struct cvmx_pko_mem_count1_s cn38xx;
- struct cvmx_pko_mem_count1_s cn38xxp2;
- struct cvmx_pko_mem_count1_s cn50xx;
- struct cvmx_pko_mem_count1_s cn52xx;
- struct cvmx_pko_mem_count1_s cn52xxp1;
- struct cvmx_pko_mem_count1_s cn56xx;
- struct cvmx_pko_mem_count1_s cn56xxp1;
- struct cvmx_pko_mem_count1_s cn58xx;
- struct cvmx_pko_mem_count1_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug0 {
- uint64_t u64;
- struct cvmx_pko_mem_debug0_s {
- uint64_t fau:28;
- uint64_t cmd:14;
- uint64_t segs:6;
- uint64_t size:16;
- } s;
- struct cvmx_pko_mem_debug0_s cn30xx;
- struct cvmx_pko_mem_debug0_s cn31xx;
- struct cvmx_pko_mem_debug0_s cn38xx;
- struct cvmx_pko_mem_debug0_s cn38xxp2;
- struct cvmx_pko_mem_debug0_s cn50xx;
- struct cvmx_pko_mem_debug0_s cn52xx;
- struct cvmx_pko_mem_debug0_s cn52xxp1;
- struct cvmx_pko_mem_debug0_s cn56xx;
- struct cvmx_pko_mem_debug0_s cn56xxp1;
- struct cvmx_pko_mem_debug0_s cn58xx;
- struct cvmx_pko_mem_debug0_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug1 {
- uint64_t u64;
- struct cvmx_pko_mem_debug1_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } s;
- struct cvmx_pko_mem_debug1_s cn30xx;
- struct cvmx_pko_mem_debug1_s cn31xx;
- struct cvmx_pko_mem_debug1_s cn38xx;
- struct cvmx_pko_mem_debug1_s cn38xxp2;
- struct cvmx_pko_mem_debug1_s cn50xx;
- struct cvmx_pko_mem_debug1_s cn52xx;
- struct cvmx_pko_mem_debug1_s cn52xxp1;
- struct cvmx_pko_mem_debug1_s cn56xx;
- struct cvmx_pko_mem_debug1_s cn56xxp1;
- struct cvmx_pko_mem_debug1_s cn58xx;
- struct cvmx_pko_mem_debug1_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug10 {
- uint64_t u64;
- struct cvmx_pko_mem_debug10_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug10_cn30xx {
- uint64_t fau:28;
- uint64_t cmd:14;
- uint64_t segs:6;
- uint64_t size:16;
- } cn30xx;
- struct cvmx_pko_mem_debug10_cn30xx cn31xx;
- struct cvmx_pko_mem_debug10_cn30xx cn38xx;
- struct cvmx_pko_mem_debug10_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug10_cn50xx {
- uint64_t reserved_49_63:15;
- uint64_t ptrs1:17;
- uint64_t reserved_17_31:15;
- uint64_t ptrs2:17;
- } cn50xx;
- struct cvmx_pko_mem_debug10_cn50xx cn52xx;
- struct cvmx_pko_mem_debug10_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn56xx;
- struct cvmx_pko_mem_debug10_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug10_cn50xx cn58xx;
- struct cvmx_pko_mem_debug10_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug11 {
- uint64_t u64;
- struct cvmx_pko_mem_debug11_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t reserved_0_39:40;
- } s;
- struct cvmx_pko_mem_debug11_cn30xx {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } cn30xx;
- struct cvmx_pko_mem_debug11_cn30xx cn31xx;
- struct cvmx_pko_mem_debug11_cn30xx cn38xx;
- struct cvmx_pko_mem_debug11_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug11_cn50xx {
- uint64_t reserved_23_63:41;
- uint64_t maj:1;
- uint64_t uid:3;
- uint64_t sop:1;
- uint64_t len:1;
- uint64_t chk:1;
- uint64_t cnt:13;
- uint64_t mod:3;
- } cn50xx;
- struct cvmx_pko_mem_debug11_cn50xx cn52xx;
- struct cvmx_pko_mem_debug11_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn56xx;
- struct cvmx_pko_mem_debug11_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug11_cn50xx cn58xx;
- struct cvmx_pko_mem_debug11_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug12 {
- uint64_t u64;
- struct cvmx_pko_mem_debug12_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug12_cn30xx {
- uint64_t data:64;
- } cn30xx;
- struct cvmx_pko_mem_debug12_cn30xx cn31xx;
- struct cvmx_pko_mem_debug12_cn30xx cn38xx;
- struct cvmx_pko_mem_debug12_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug12_cn50xx {
- uint64_t fau:28;
- uint64_t cmd:14;
- uint64_t segs:6;
- uint64_t size:16;
- } cn50xx;
- struct cvmx_pko_mem_debug12_cn50xx cn52xx;
- struct cvmx_pko_mem_debug12_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn56xx;
- struct cvmx_pko_mem_debug12_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug12_cn50xx cn58xx;
- struct cvmx_pko_mem_debug12_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug13 {
- uint64_t u64;
- struct cvmx_pko_mem_debug13_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t reserved_0_55:56;
- } s;
- struct cvmx_pko_mem_debug13_cn30xx {
- uint64_t reserved_51_63:13;
- uint64_t widx:17;
- uint64_t ridx2:17;
- uint64_t widx2:17;
- } cn30xx;
- struct cvmx_pko_mem_debug13_cn30xx cn31xx;
- struct cvmx_pko_mem_debug13_cn30xx cn38xx;
- struct cvmx_pko_mem_debug13_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug13_cn50xx {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } cn50xx;
- struct cvmx_pko_mem_debug13_cn50xx cn52xx;
- struct cvmx_pko_mem_debug13_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn56xx;
- struct cvmx_pko_mem_debug13_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug13_cn50xx cn58xx;
- struct cvmx_pko_mem_debug13_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug14 {
- uint64_t u64;
- struct cvmx_pko_mem_debug14_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug14_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t ridx:17;
- } cn30xx;
- struct cvmx_pko_mem_debug14_cn30xx cn31xx;
- struct cvmx_pko_mem_debug14_cn30xx cn38xx;
- struct cvmx_pko_mem_debug14_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug14_cn52xx {
- uint64_t data:64;
- } cn52xx;
- struct cvmx_pko_mem_debug14_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug14_cn52xx cn56xx;
- struct cvmx_pko_mem_debug14_cn52xx cn56xxp1;
-};
-
-union cvmx_pko_mem_debug2 {
- uint64_t u64;
- struct cvmx_pko_mem_debug2_s {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } s;
- struct cvmx_pko_mem_debug2_s cn30xx;
- struct cvmx_pko_mem_debug2_s cn31xx;
- struct cvmx_pko_mem_debug2_s cn38xx;
- struct cvmx_pko_mem_debug2_s cn38xxp2;
- struct cvmx_pko_mem_debug2_s cn50xx;
- struct cvmx_pko_mem_debug2_s cn52xx;
- struct cvmx_pko_mem_debug2_s cn52xxp1;
- struct cvmx_pko_mem_debug2_s cn56xx;
- struct cvmx_pko_mem_debug2_s cn56xxp1;
- struct cvmx_pko_mem_debug2_s cn58xx;
- struct cvmx_pko_mem_debug2_s cn58xxp1;
-};
-
-union cvmx_pko_mem_debug3 {
- uint64_t u64;
- struct cvmx_pko_mem_debug3_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug3_cn30xx {
- uint64_t i:1;
- uint64_t back:4;
- uint64_t pool:3;
- uint64_t size:16;
- uint64_t ptr:40;
- } cn30xx;
- struct cvmx_pko_mem_debug3_cn30xx cn31xx;
- struct cvmx_pko_mem_debug3_cn30xx cn38xx;
- struct cvmx_pko_mem_debug3_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug3_cn50xx {
- uint64_t data:64;
- } cn50xx;
- struct cvmx_pko_mem_debug3_cn50xx cn52xx;
- struct cvmx_pko_mem_debug3_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn56xx;
- struct cvmx_pko_mem_debug3_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug3_cn50xx cn58xx;
- struct cvmx_pko_mem_debug3_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug4 {
- uint64_t u64;
- struct cvmx_pko_mem_debug4_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug4_cn30xx {
- uint64_t data:64;
- } cn30xx;
- struct cvmx_pko_mem_debug4_cn30xx cn31xx;
- struct cvmx_pko_mem_debug4_cn30xx cn38xx;
- struct cvmx_pko_mem_debug4_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug4_cn50xx {
- uint64_t cmnd_segs:3;
- uint64_t cmnd_siz:16;
- uint64_t cmnd_off:6;
- uint64_t uid:3;
- uint64_t dread_sop:1;
- uint64_t init_dwrite:1;
- uint64_t chk_once:1;
- uint64_t chk_mode:1;
- uint64_t active:1;
- uint64_t static_p:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_off_max:4;
- uint64_t qid_off:4;
- uint64_t qid_base:8;
- uint64_t wait:1;
- uint64_t minor:2;
- uint64_t major:3;
- } cn50xx;
- struct cvmx_pko_mem_debug4_cn52xx {
- uint64_t curr_siz:8;
- uint64_t curr_off:16;
- uint64_t cmnd_segs:6;
- uint64_t cmnd_siz:16;
- uint64_t cmnd_off:6;
- uint64_t uid:2;
- uint64_t dread_sop:1;
- uint64_t init_dwrite:1;
- uint64_t chk_once:1;
- uint64_t chk_mode:1;
- uint64_t wait:1;
- uint64_t minor:2;
- uint64_t major:3;
- } cn52xx;
- struct cvmx_pko_mem_debug4_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug4_cn52xx cn56xx;
- struct cvmx_pko_mem_debug4_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug4_cn50xx cn58xx;
- struct cvmx_pko_mem_debug4_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug5 {
- uint64_t u64;
- struct cvmx_pko_mem_debug5_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_mem_debug5_cn30xx {
- uint64_t dwri_mod:1;
- uint64_t dwri_sop:1;
- uint64_t dwri_len:1;
- uint64_t dwri_cnt:13;
- uint64_t cmnd_siz:16;
- uint64_t uid:1;
- uint64_t xfer_wor:1;
- uint64_t xfer_dwr:1;
- uint64_t cbuf_fre:1;
- uint64_t reserved_27_27:1;
- uint64_t chk_mode:1;
- uint64_t active:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_off:3;
- uint64_t qid_base:7;
- uint64_t wait:1;
- uint64_t minor:2;
- uint64_t major:4;
- } cn30xx;
- struct cvmx_pko_mem_debug5_cn30xx cn31xx;
- struct cvmx_pko_mem_debug5_cn30xx cn38xx;
- struct cvmx_pko_mem_debug5_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug5_cn50xx {
- uint64_t curr_ptr:29;
- uint64_t curr_siz:16;
- uint64_t curr_off:16;
- uint64_t cmnd_segs:3;
- } cn50xx;
- struct cvmx_pko_mem_debug5_cn52xx {
- uint64_t reserved_54_63:10;
- uint64_t nxt_inflt:6;
- uint64_t curr_ptr:40;
- uint64_t curr_siz:8;
- } cn52xx;
- struct cvmx_pko_mem_debug5_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug5_cn52xx cn56xx;
- struct cvmx_pko_mem_debug5_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug5_cn50xx cn58xx;
- struct cvmx_pko_mem_debug5_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug6 {
- uint64_t u64;
- struct cvmx_pko_mem_debug6_s {
- uint64_t reserved_37_63:27;
- uint64_t qid_offres:4;
- uint64_t qid_offths:4;
- uint64_t preempter:1;
- uint64_t preemptee:1;
- uint64_t preempted:1;
- uint64_t active:1;
- uint64_t statc:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_offmax:4;
- uint64_t reserved_0_11:12;
- } s;
- struct cvmx_pko_mem_debug6_cn30xx {
- uint64_t reserved_11_63:53;
- uint64_t qid_offm:3;
- uint64_t static_p:1;
- uint64_t work_min:3;
- uint64_t dwri_chk:1;
- uint64_t dwri_uid:1;
- uint64_t dwri_mod:2;
- } cn30xx;
- struct cvmx_pko_mem_debug6_cn30xx cn31xx;
- struct cvmx_pko_mem_debug6_cn30xx cn38xx;
- struct cvmx_pko_mem_debug6_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug6_cn50xx {
- uint64_t reserved_11_63:53;
- uint64_t curr_ptr:11;
- } cn50xx;
- struct cvmx_pko_mem_debug6_cn52xx {
- uint64_t reserved_37_63:27;
- uint64_t qid_offres:4;
- uint64_t qid_offths:4;
- uint64_t preempter:1;
- uint64_t preemptee:1;
- uint64_t preempted:1;
- uint64_t active:1;
- uint64_t statc:1;
- uint64_t qos:3;
- uint64_t qcb_ridx:5;
- uint64_t qid_offmax:4;
- uint64_t qid_off:4;
- uint64_t qid_base:8;
- } cn52xx;
- struct cvmx_pko_mem_debug6_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug6_cn52xx cn56xx;
- struct cvmx_pko_mem_debug6_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug6_cn50xx cn58xx;
- struct cvmx_pko_mem_debug6_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug7 {
- uint64_t u64;
- struct cvmx_pko_mem_debug7_s {
- uint64_t qos:5;
- uint64_t tail:1;
- uint64_t reserved_0_57:58;
- } s;
- struct cvmx_pko_mem_debug7_cn30xx {
- uint64_t reserved_58_63:6;
- uint64_t dwb:9;
- uint64_t start:33;
- uint64_t size:16;
- } cn30xx;
- struct cvmx_pko_mem_debug7_cn30xx cn31xx;
- struct cvmx_pko_mem_debug7_cn30xx cn38xx;
- struct cvmx_pko_mem_debug7_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug7_cn50xx {
- uint64_t qos:5;
- uint64_t tail:1;
- uint64_t buf_siz:13;
- uint64_t buf_ptr:33;
- uint64_t qcb_widx:6;
- uint64_t qcb_ridx:6;
- } cn50xx;
- struct cvmx_pko_mem_debug7_cn50xx cn52xx;
- struct cvmx_pko_mem_debug7_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn56xx;
- struct cvmx_pko_mem_debug7_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug7_cn50xx cn58xx;
- struct cvmx_pko_mem_debug7_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug8 {
- uint64_t u64;
- struct cvmx_pko_mem_debug8_s {
- uint64_t reserved_59_63:5;
- uint64_t tail:1;
- uint64_t buf_siz:13;
- uint64_t reserved_0_44:45;
- } s;
- struct cvmx_pko_mem_debug8_cn30xx {
- uint64_t qos:5;
- uint64_t tail:1;
- uint64_t buf_siz:13;
- uint64_t buf_ptr:33;
- uint64_t qcb_widx:6;
- uint64_t qcb_ridx:6;
- } cn30xx;
- struct cvmx_pko_mem_debug8_cn30xx cn31xx;
- struct cvmx_pko_mem_debug8_cn30xx cn38xx;
- struct cvmx_pko_mem_debug8_cn30xx cn38xxp2;
- struct cvmx_pko_mem_debug8_cn50xx {
- uint64_t reserved_28_63:36;
- uint64_t doorbell:20;
- uint64_t reserved_6_7:2;
- uint64_t static_p:1;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn50xx;
- struct cvmx_pko_mem_debug8_cn52xx {
- uint64_t reserved_29_63:35;
- uint64_t preempter:1;
- uint64_t doorbell:20;
- uint64_t reserved_7_7:1;
- uint64_t preemptee:1;
- uint64_t static_p:1;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn52xx;
- struct cvmx_pko_mem_debug8_cn52xx cn52xxp1;
- struct cvmx_pko_mem_debug8_cn52xx cn56xx;
- struct cvmx_pko_mem_debug8_cn52xx cn56xxp1;
- struct cvmx_pko_mem_debug8_cn50xx cn58xx;
- struct cvmx_pko_mem_debug8_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_debug9 {
- uint64_t u64;
- struct cvmx_pko_mem_debug9_s {
- uint64_t reserved_49_63:15;
- uint64_t ptrs0:17;
- uint64_t reserved_0_31:32;
- } s;
- struct cvmx_pko_mem_debug9_cn30xx {
- uint64_t reserved_28_63:36;
- uint64_t doorbell:20;
- uint64_t reserved_5_7:3;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn30xx;
- struct cvmx_pko_mem_debug9_cn30xx cn31xx;
- struct cvmx_pko_mem_debug9_cn38xx {
- uint64_t reserved_28_63:36;
- uint64_t doorbell:20;
- uint64_t reserved_6_7:2;
- uint64_t static_p:1;
- uint64_t s_tail:1;
- uint64_t static_q:1;
- uint64_t qos:3;
- } cn38xx;
- struct cvmx_pko_mem_debug9_cn38xx cn38xxp2;
- struct cvmx_pko_mem_debug9_cn50xx {
- uint64_t reserved_49_63:15;
- uint64_t ptrs0:17;
- uint64_t reserved_17_31:15;
- uint64_t ptrs3:17;
- } cn50xx;
- struct cvmx_pko_mem_debug9_cn50xx cn52xx;
- struct cvmx_pko_mem_debug9_cn50xx cn52xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn56xx;
- struct cvmx_pko_mem_debug9_cn50xx cn56xxp1;
- struct cvmx_pko_mem_debug9_cn50xx cn58xx;
- struct cvmx_pko_mem_debug9_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_mem_port_ptrs {
- uint64_t u64;
- struct cvmx_pko_mem_port_ptrs_s {
- uint64_t reserved_62_63:2;
- uint64_t static_p:1;
- uint64_t qos_mask:8;
- uint64_t reserved_16_52:37;
- uint64_t bp_port:6;
- uint64_t eid:4;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_ptrs_s cn52xx;
- struct cvmx_pko_mem_port_ptrs_s cn52xxp1;
- struct cvmx_pko_mem_port_ptrs_s cn56xx;
- struct cvmx_pko_mem_port_ptrs_s cn56xxp1;
-};
-
-union cvmx_pko_mem_port_qos {
- uint64_t u64;
- struct cvmx_pko_mem_port_qos_s {
- uint64_t reserved_61_63:3;
- uint64_t qos_mask:8;
- uint64_t reserved_10_52:43;
- uint64_t eid:4;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_qos_s cn52xx;
- struct cvmx_pko_mem_port_qos_s cn52xxp1;
- struct cvmx_pko_mem_port_qos_s cn56xx;
- struct cvmx_pko_mem_port_qos_s cn56xxp1;
-};
-
-union cvmx_pko_mem_port_rate0 {
- uint64_t u64;
- struct cvmx_pko_mem_port_rate0_s {
- uint64_t reserved_51_63:13;
- uint64_t rate_word:19;
- uint64_t rate_pkt:24;
- uint64_t reserved_6_7:2;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_rate0_s cn52xx;
- struct cvmx_pko_mem_port_rate0_s cn52xxp1;
- struct cvmx_pko_mem_port_rate0_s cn56xx;
- struct cvmx_pko_mem_port_rate0_s cn56xxp1;
-};
-
-union cvmx_pko_mem_port_rate1 {
- uint64_t u64;
- struct cvmx_pko_mem_port_rate1_s {
- uint64_t reserved_32_63:32;
- uint64_t rate_lim:24;
- uint64_t reserved_6_7:2;
- uint64_t pid:6;
- } s;
- struct cvmx_pko_mem_port_rate1_s cn52xx;
- struct cvmx_pko_mem_port_rate1_s cn52xxp1;
- struct cvmx_pko_mem_port_rate1_s cn56xx;
- struct cvmx_pko_mem_port_rate1_s cn56xxp1;
-};
-
-union cvmx_pko_mem_queue_ptrs {
- uint64_t u64;
- struct cvmx_pko_mem_queue_ptrs_s {
- uint64_t s_tail:1;
- uint64_t static_p:1;
- uint64_t static_q:1;
- uint64_t qos_mask:8;
- uint64_t buf_ptr:36;
- uint64_t tail:1;
- uint64_t index:3;
- uint64_t port:6;
- uint64_t queue:7;
- } s;
- struct cvmx_pko_mem_queue_ptrs_s cn30xx;
- struct cvmx_pko_mem_queue_ptrs_s cn31xx;
- struct cvmx_pko_mem_queue_ptrs_s cn38xx;
- struct cvmx_pko_mem_queue_ptrs_s cn38xxp2;
- struct cvmx_pko_mem_queue_ptrs_s cn50xx;
- struct cvmx_pko_mem_queue_ptrs_s cn52xx;
- struct cvmx_pko_mem_queue_ptrs_s cn52xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn56xx;
- struct cvmx_pko_mem_queue_ptrs_s cn56xxp1;
- struct cvmx_pko_mem_queue_ptrs_s cn58xx;
- struct cvmx_pko_mem_queue_ptrs_s cn58xxp1;
-};
-
-union cvmx_pko_mem_queue_qos {
- uint64_t u64;
- struct cvmx_pko_mem_queue_qos_s {
- uint64_t reserved_61_63:3;
- uint64_t qos_mask:8;
- uint64_t reserved_13_52:40;
- uint64_t pid:6;
- uint64_t qid:7;
- } s;
- struct cvmx_pko_mem_queue_qos_s cn30xx;
- struct cvmx_pko_mem_queue_qos_s cn31xx;
- struct cvmx_pko_mem_queue_qos_s cn38xx;
- struct cvmx_pko_mem_queue_qos_s cn38xxp2;
- struct cvmx_pko_mem_queue_qos_s cn50xx;
- struct cvmx_pko_mem_queue_qos_s cn52xx;
- struct cvmx_pko_mem_queue_qos_s cn52xxp1;
- struct cvmx_pko_mem_queue_qos_s cn56xx;
- struct cvmx_pko_mem_queue_qos_s cn56xxp1;
- struct cvmx_pko_mem_queue_qos_s cn58xx;
- struct cvmx_pko_mem_queue_qos_s cn58xxp1;
-};
-
-union cvmx_pko_reg_bist_result {
- uint64_t u64;
- struct cvmx_pko_reg_bist_result_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_pko_reg_bist_result_cn30xx {
- uint64_t reserved_27_63:37;
- uint64_t psb2:5;
- uint64_t count:1;
- uint64_t rif:1;
- uint64_t wif:1;
- uint64_t ncb:1;
- uint64_t out:1;
- uint64_t crc:1;
- uint64_t chk:1;
- uint64_t qsb:2;
- uint64_t qcb:2;
- uint64_t pdb:4;
- uint64_t psb:7;
- } cn30xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn31xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn38xx;
- struct cvmx_pko_reg_bist_result_cn30xx cn38xxp2;
- struct cvmx_pko_reg_bist_result_cn50xx {
- uint64_t reserved_33_63:31;
- uint64_t csr:1;
- uint64_t iob:1;
- uint64_t out_crc:1;
- uint64_t out_ctl:3;
- uint64_t out_sta:1;
- uint64_t out_wif:1;
- uint64_t prt_chk:3;
- uint64_t prt_nxt:1;
- uint64_t prt_psb:6;
- uint64_t ncb_inb:2;
- uint64_t prt_qcb:2;
- uint64_t prt_qsb:3;
- uint64_t dat_dat:4;
- uint64_t dat_ptr:4;
- } cn50xx;
- struct cvmx_pko_reg_bist_result_cn52xx {
- uint64_t reserved_35_63:29;
- uint64_t csr:1;
- uint64_t iob:1;
- uint64_t out_dat:1;
- uint64_t out_ctl:3;
- uint64_t out_sta:1;
- uint64_t out_wif:1;
- uint64_t prt_chk:3;
- uint64_t prt_nxt:1;
- uint64_t prt_psb:8;
- uint64_t ncb_inb:2;
- uint64_t prt_qcb:2;
- uint64_t prt_qsb:3;
- uint64_t prt_ctl:2;
- uint64_t dat_dat:2;
- uint64_t dat_ptr:4;
- } cn52xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn52xxp1;
- struct cvmx_pko_reg_bist_result_cn52xx cn56xx;
- struct cvmx_pko_reg_bist_result_cn52xx cn56xxp1;
- struct cvmx_pko_reg_bist_result_cn50xx cn58xx;
- struct cvmx_pko_reg_bist_result_cn50xx cn58xxp1;
-};
-
-union cvmx_pko_reg_cmd_buf {
- uint64_t u64;
- struct cvmx_pko_reg_cmd_buf_s {
- uint64_t reserved_23_63:41;
- uint64_t pool:3;
- uint64_t reserved_13_19:7;
- uint64_t size:13;
- } s;
- struct cvmx_pko_reg_cmd_buf_s cn30xx;
- struct cvmx_pko_reg_cmd_buf_s cn31xx;
- struct cvmx_pko_reg_cmd_buf_s cn38xx;
- struct cvmx_pko_reg_cmd_buf_s cn38xxp2;
- struct cvmx_pko_reg_cmd_buf_s cn50xx;
- struct cvmx_pko_reg_cmd_buf_s cn52xx;
- struct cvmx_pko_reg_cmd_buf_s cn52xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn56xx;
- struct cvmx_pko_reg_cmd_buf_s cn56xxp1;
- struct cvmx_pko_reg_cmd_buf_s cn58xx;
- struct cvmx_pko_reg_cmd_buf_s cn58xxp1;
-};
-
-union cvmx_pko_reg_crc_ctlx {
- uint64_t u64;
- struct cvmx_pko_reg_crc_ctlx_s {
- uint64_t reserved_2_63:62;
- uint64_t invres:1;
- uint64_t refin:1;
- } s;
- struct cvmx_pko_reg_crc_ctlx_s cn38xx;
- struct cvmx_pko_reg_crc_ctlx_s cn38xxp2;
- struct cvmx_pko_reg_crc_ctlx_s cn58xx;
- struct cvmx_pko_reg_crc_ctlx_s cn58xxp1;
-};
-
-union cvmx_pko_reg_crc_enable {
- uint64_t u64;
- struct cvmx_pko_reg_crc_enable_s {
- uint64_t reserved_32_63:32;
- uint64_t enable:32;
- } s;
- struct cvmx_pko_reg_crc_enable_s cn38xx;
- struct cvmx_pko_reg_crc_enable_s cn38xxp2;
- struct cvmx_pko_reg_crc_enable_s cn58xx;
- struct cvmx_pko_reg_crc_enable_s cn58xxp1;
-};
-
-union cvmx_pko_reg_crc_ivx {
- uint64_t u64;
- struct cvmx_pko_reg_crc_ivx_s {
- uint64_t reserved_32_63:32;
- uint64_t iv:32;
- } s;
- struct cvmx_pko_reg_crc_ivx_s cn38xx;
- struct cvmx_pko_reg_crc_ivx_s cn38xxp2;
- struct cvmx_pko_reg_crc_ivx_s cn58xx;
- struct cvmx_pko_reg_crc_ivx_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug0 {
- uint64_t u64;
- struct cvmx_pko_reg_debug0_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug0_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t asserts:17;
- } cn30xx;
- struct cvmx_pko_reg_debug0_cn30xx cn31xx;
- struct cvmx_pko_reg_debug0_cn30xx cn38xx;
- struct cvmx_pko_reg_debug0_cn30xx cn38xxp2;
- struct cvmx_pko_reg_debug0_s cn50xx;
- struct cvmx_pko_reg_debug0_s cn52xx;
- struct cvmx_pko_reg_debug0_s cn52xxp1;
- struct cvmx_pko_reg_debug0_s cn56xx;
- struct cvmx_pko_reg_debug0_s cn56xxp1;
- struct cvmx_pko_reg_debug0_s cn58xx;
- struct cvmx_pko_reg_debug0_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug1 {
- uint64_t u64;
- struct cvmx_pko_reg_debug1_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug1_s cn50xx;
- struct cvmx_pko_reg_debug1_s cn52xx;
- struct cvmx_pko_reg_debug1_s cn52xxp1;
- struct cvmx_pko_reg_debug1_s cn56xx;
- struct cvmx_pko_reg_debug1_s cn56xxp1;
- struct cvmx_pko_reg_debug1_s cn58xx;
- struct cvmx_pko_reg_debug1_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug2 {
- uint64_t u64;
- struct cvmx_pko_reg_debug2_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug2_s cn50xx;
- struct cvmx_pko_reg_debug2_s cn52xx;
- struct cvmx_pko_reg_debug2_s cn52xxp1;
- struct cvmx_pko_reg_debug2_s cn56xx;
- struct cvmx_pko_reg_debug2_s cn56xxp1;
- struct cvmx_pko_reg_debug2_s cn58xx;
- struct cvmx_pko_reg_debug2_s cn58xxp1;
-};
-
-union cvmx_pko_reg_debug3 {
- uint64_t u64;
- struct cvmx_pko_reg_debug3_s {
- uint64_t asserts:64;
- } s;
- struct cvmx_pko_reg_debug3_s cn50xx;
- struct cvmx_pko_reg_debug3_s cn52xx;
- struct cvmx_pko_reg_debug3_s cn52xxp1;
- struct cvmx_pko_reg_debug3_s cn56xx;
- struct cvmx_pko_reg_debug3_s cn56xxp1;
- struct cvmx_pko_reg_debug3_s cn58xx;
- struct cvmx_pko_reg_debug3_s cn58xxp1;
-};
-
-union cvmx_pko_reg_engine_inflight {
- uint64_t u64;
- struct cvmx_pko_reg_engine_inflight_s {
- uint64_t reserved_40_63:24;
- uint64_t engine9:4;
- uint64_t engine8:4;
- uint64_t engine7:4;
- uint64_t engine6:4;
- uint64_t engine5:4;
- uint64_t engine4:4;
- uint64_t engine3:4;
- uint64_t engine2:4;
- uint64_t engine1:4;
- uint64_t engine0:4;
- } s;
- struct cvmx_pko_reg_engine_inflight_s cn52xx;
- struct cvmx_pko_reg_engine_inflight_s cn52xxp1;
- struct cvmx_pko_reg_engine_inflight_s cn56xx;
- struct cvmx_pko_reg_engine_inflight_s cn56xxp1;
-};
-
-union cvmx_pko_reg_engine_thresh {
- uint64_t u64;
- struct cvmx_pko_reg_engine_thresh_s {
- uint64_t reserved_10_63:54;
- uint64_t mask:10;
- } s;
- struct cvmx_pko_reg_engine_thresh_s cn52xx;
- struct cvmx_pko_reg_engine_thresh_s cn52xxp1;
- struct cvmx_pko_reg_engine_thresh_s cn56xx;
- struct cvmx_pko_reg_engine_thresh_s cn56xxp1;
-};
-
-union cvmx_pko_reg_error {
- uint64_t u64;
- struct cvmx_pko_reg_error_s {
- uint64_t reserved_3_63:61;
- uint64_t currzero:1;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } s;
- struct cvmx_pko_reg_error_cn30xx {
- uint64_t reserved_2_63:62;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } cn30xx;
- struct cvmx_pko_reg_error_cn30xx cn31xx;
- struct cvmx_pko_reg_error_cn30xx cn38xx;
- struct cvmx_pko_reg_error_cn30xx cn38xxp2;
- struct cvmx_pko_reg_error_s cn50xx;
- struct cvmx_pko_reg_error_s cn52xx;
- struct cvmx_pko_reg_error_s cn52xxp1;
- struct cvmx_pko_reg_error_s cn56xx;
- struct cvmx_pko_reg_error_s cn56xxp1;
- struct cvmx_pko_reg_error_s cn58xx;
- struct cvmx_pko_reg_error_s cn58xxp1;
-};
-
-union cvmx_pko_reg_flags {
- uint64_t u64;
- struct cvmx_pko_reg_flags_s {
- uint64_t reserved_4_63:60;
- uint64_t reset:1;
- uint64_t store_be:1;
- uint64_t ena_dwb:1;
- uint64_t ena_pko:1;
- } s;
- struct cvmx_pko_reg_flags_s cn30xx;
- struct cvmx_pko_reg_flags_s cn31xx;
- struct cvmx_pko_reg_flags_s cn38xx;
- struct cvmx_pko_reg_flags_s cn38xxp2;
- struct cvmx_pko_reg_flags_s cn50xx;
- struct cvmx_pko_reg_flags_s cn52xx;
- struct cvmx_pko_reg_flags_s cn52xxp1;
- struct cvmx_pko_reg_flags_s cn56xx;
- struct cvmx_pko_reg_flags_s cn56xxp1;
- struct cvmx_pko_reg_flags_s cn58xx;
- struct cvmx_pko_reg_flags_s cn58xxp1;
-};
-
-union cvmx_pko_reg_gmx_port_mode {
- uint64_t u64;
- struct cvmx_pko_reg_gmx_port_mode_s {
- uint64_t reserved_6_63:58;
- uint64_t mode1:3;
- uint64_t mode0:3;
- } s;
- struct cvmx_pko_reg_gmx_port_mode_s cn30xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn31xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn38xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn38xxp2;
- struct cvmx_pko_reg_gmx_port_mode_s cn50xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn52xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn52xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn56xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn56xxp1;
- struct cvmx_pko_reg_gmx_port_mode_s cn58xx;
- struct cvmx_pko_reg_gmx_port_mode_s cn58xxp1;
-};
-
-union cvmx_pko_reg_int_mask {
- uint64_t u64;
- struct cvmx_pko_reg_int_mask_s {
- uint64_t reserved_3_63:61;
- uint64_t currzero:1;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } s;
- struct cvmx_pko_reg_int_mask_cn30xx {
- uint64_t reserved_2_63:62;
- uint64_t doorbell:1;
- uint64_t parity:1;
- } cn30xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn31xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn38xx;
- struct cvmx_pko_reg_int_mask_cn30xx cn38xxp2;
- struct cvmx_pko_reg_int_mask_s cn50xx;
- struct cvmx_pko_reg_int_mask_s cn52xx;
- struct cvmx_pko_reg_int_mask_s cn52xxp1;
- struct cvmx_pko_reg_int_mask_s cn56xx;
- struct cvmx_pko_reg_int_mask_s cn56xxp1;
- struct cvmx_pko_reg_int_mask_s cn58xx;
- struct cvmx_pko_reg_int_mask_s cn58xxp1;
-};
-
-union cvmx_pko_reg_queue_mode {
- uint64_t u64;
- struct cvmx_pko_reg_queue_mode_s {
- uint64_t reserved_2_63:62;
- uint64_t mode:2;
- } s;
- struct cvmx_pko_reg_queue_mode_s cn30xx;
- struct cvmx_pko_reg_queue_mode_s cn31xx;
- struct cvmx_pko_reg_queue_mode_s cn38xx;
- struct cvmx_pko_reg_queue_mode_s cn38xxp2;
- struct cvmx_pko_reg_queue_mode_s cn50xx;
- struct cvmx_pko_reg_queue_mode_s cn52xx;
- struct cvmx_pko_reg_queue_mode_s cn52xxp1;
- struct cvmx_pko_reg_queue_mode_s cn56xx;
- struct cvmx_pko_reg_queue_mode_s cn56xxp1;
- struct cvmx_pko_reg_queue_mode_s cn58xx;
- struct cvmx_pko_reg_queue_mode_s cn58xxp1;
-};
-
-union cvmx_pko_reg_queue_ptrs1 {
- uint64_t u64;
- struct cvmx_pko_reg_queue_ptrs1_s {
- uint64_t reserved_2_63:62;
- uint64_t idx3:1;
- uint64_t qid7:1;
- } s;
- struct cvmx_pko_reg_queue_ptrs1_s cn50xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn52xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn52xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn56xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn56xxp1;
- struct cvmx_pko_reg_queue_ptrs1_s cn58xx;
- struct cvmx_pko_reg_queue_ptrs1_s cn58xxp1;
-};
-
-union cvmx_pko_reg_read_idx {
- uint64_t u64;
- struct cvmx_pko_reg_read_idx_s {
- uint64_t reserved_16_63:48;
- uint64_t inc:8;
- uint64_t index:8;
- } s;
- struct cvmx_pko_reg_read_idx_s cn30xx;
- struct cvmx_pko_reg_read_idx_s cn31xx;
- struct cvmx_pko_reg_read_idx_s cn38xx;
- struct cvmx_pko_reg_read_idx_s cn38xxp2;
- struct cvmx_pko_reg_read_idx_s cn50xx;
- struct cvmx_pko_reg_read_idx_s cn52xx;
- struct cvmx_pko_reg_read_idx_s cn52xxp1;
- struct cvmx_pko_reg_read_idx_s cn56xx;
- struct cvmx_pko_reg_read_idx_s cn56xxp1;
- struct cvmx_pko_reg_read_idx_s cn58xx;
- struct cvmx_pko_reg_read_idx_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-pko.c b/drivers/staging/octeon/cvmx-pko.c
deleted file mode 100644
index 50a2c9b..0000000
--- a/drivers/staging/octeon/cvmx-pko.c
+++ /dev/null
@@ -1,506 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- * Support library for the hardware Packet Output unit.
- */
-
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-#include "cvmx-pko.h"
-#include "cvmx-helper.h"
-
-/**
- * Internal state of packet output
- */
-
-/**
- * Call before any other calls to initialize the packet
- * output system. This does chip global config, and should only be
- * done by one core.
- */
-
-void cvmx_pko_initialize_global(void)
-{
- int i;
- uint64_t priority = 8;
- union cvmx_pko_reg_cmd_buf config;
-
- /*
- * Set the size of the PKO command buffers to an odd number of
- * 64bit words. This allows the normal two word send to stay
- * aligned and never span a command word buffer.
- */
- config.u64 = 0;
- config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
- config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1;
-
- cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64);
-
- for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++)
- cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1,
- &priority);
-
- /*
- * If we aren't using all of the queues optimize PKO's
- * internal memory.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)
- || OCTEON_IS_MODEL(OCTEON_CN56XX)
- || OCTEON_IS_MODEL(OCTEON_CN52XX)) {
- int num_interfaces = cvmx_helper_get_number_of_interfaces();
- int last_port =
- cvmx_helper_get_last_ipd_port(num_interfaces - 1);
- int max_queues =
- cvmx_pko_get_base_queue(last_port) +
- cvmx_pko_get_num_queues(last_port);
- if (OCTEON_IS_MODEL(OCTEON_CN38XX)) {
- if (max_queues <= 32)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
- else if (max_queues <= 64)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
- } else {
- if (max_queues <= 64)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2);
- else if (max_queues <= 128)
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1);
- }
- }
-}
-
-/**
- * This function does per-core initialization required by the PKO routines.
- * This must be called on all cores that will do packet output, and must
- * be called after the FPA has been initialized and filled with pages.
- *
- * Returns 0 on success
- * !0 on failure
- */
-int cvmx_pko_initialize_local(void)
-{
- /* Nothing to do */
- return 0;
-}
-
-/**
- * Enables the packet output hardware. It must already be
- * configured.
- */
-void cvmx_pko_enable(void)
-{
- union cvmx_pko_reg_flags flags;
-
- flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
- if (flags.s.ena_pko)
- cvmx_dprintf
- ("Warning: Enabling PKO when PKO already enabled.\n");
-
- flags.s.ena_dwb = 1;
- flags.s.ena_pko = 1;
- /*
- * always enable big endian for 3-word command. Does nothing
- * for 2-word.
- */
- flags.s.store_be = 1;
- cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64);
-}
-
-/**
- * Disables the packet output. Does not affect any configuration.
- */
-void cvmx_pko_disable(void)
-{
- union cvmx_pko_reg_flags pko_reg_flags;
- pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
- pko_reg_flags.s.ena_pko = 0;
- cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
-}
-
-
-/**
- * Reset the packet output.
- */
-static void __cvmx_pko_reset(void)
-{
- union cvmx_pko_reg_flags pko_reg_flags;
- pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS);
- pko_reg_flags.s.reset = 1;
- cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64);
-}
-
-/**
- * Shutdown and free resources required by packet output.
- */
-void cvmx_pko_shutdown(void)
-{
- union cvmx_pko_mem_queue_ptrs config;
- int queue;
-
- cvmx_pko_disable();
-
- for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) {
- config.u64 = 0;
- config.s.tail = 1;
- config.s.index = 0;
- config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID;
- config.s.queue = queue & 0x7f;
- config.s.qos_mask = 0;
- config.s.buf_ptr = 0;
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pko_reg_queue_ptrs1 config1;
- config1.u64 = 0;
- config1.s.qid7 = queue >> 7;
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
- }
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
- cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue));
- }
- __cvmx_pko_reset();
-}
-
-/**
- * Configure a output port and the associated queues for use.
- *
- * @port: Port to configure.
- * @base_queue: First queue number to associate with this port.
- * @num_queues: Number of queues to associate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 0-8. A value of 8 get 8 times the traffic
- * of a value of 1. A value of 0 indicates that no rounds
- * will be participated in. These priorities can be changed
- * on the fly while the pko is enabled. A priority of 9
- * indicates that static priority should be used. If static
- * priority is used all queues with static priority must be
- * contiguous starting at the base_queue, and lower numbered
- * queues have higher priority than higher numbered queues.
- * There must be num_queues elements in the array.
- */
-cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue,
- uint64_t num_queues,
- const uint64_t priority[])
-{
- cvmx_pko_status_t result_code;
- uint64_t queue;
- union cvmx_pko_mem_queue_ptrs config;
- union cvmx_pko_reg_queue_ptrs1 config1;
- int static_priority_base = -1;
- int static_priority_end = -1;
-
- if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS)
- && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) {
- cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n",
- (unsigned long long)port);
- return CVMX_PKO_INVALID_PORT;
- }
-
- if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) {
- cvmx_dprintf
- ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n",
- (unsigned long long)(base_queue + num_queues));
- return CVMX_PKO_INVALID_QUEUE;
- }
-
- if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
- /*
- * Validate the static queue priority setup and set
- * static_priority_base and static_priority_end
- * accordingly.
- */
- for (queue = 0; queue < num_queues; queue++) {
- /* Find first queue of static priority */
- if (static_priority_base == -1
- && priority[queue] ==
- CVMX_PKO_QUEUE_STATIC_PRIORITY)
- static_priority_base = queue;
- /* Find last queue of static priority */
- if (static_priority_base != -1
- && static_priority_end == -1
- && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY
- && queue)
- static_priority_end = queue - 1;
- else if (static_priority_base != -1
- && static_priority_end == -1
- && queue == num_queues - 1)
- /* all queues are static priority */
- static_priority_end = queue;
- /*
- * Check to make sure all static priority
- * queues are contiguous. Also catches some
- * cases of static priorites not starting at
- * queue 0.
- */
- if (static_priority_end != -1
- && (int)queue > static_priority_end
- && priority[queue] ==
- CVMX_PKO_QUEUE_STATIC_PRIORITY) {
- cvmx_dprintf("ERROR: cvmx_pko_config_port: "
- "Static priority queues aren't "
- "contiguous or don't start at "
- "base queue. q: %d, eq: %d\n",
- (int)queue, static_priority_end);
- return CVMX_PKO_INVALID_PRIORITY;
- }
- }
- if (static_priority_base > 0) {
- cvmx_dprintf("ERROR: cvmx_pko_config_port: Static "
- "priority queues don't start at base "
- "queue. sq: %d\n",
- static_priority_base);
- return CVMX_PKO_INVALID_PRIORITY;
- }
-#if 0
- cvmx_dprintf("Port %d: Static priority queue base: %d, "
- "end: %d\n", port,
- static_priority_base, static_priority_end);
-#endif
- }
- /*
- * At this point, static_priority_base and static_priority_end
- * are either both -1, or are valid start/end queue
- * numbers.
- */
-
- result_code = CVMX_PKO_SUCCESS;
-
-#ifdef PKO_DEBUG
- cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues,
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0,
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1);
-#endif
-
- for (queue = 0; queue < num_queues; queue++) {
- uint64_t *buf_ptr = NULL;
-
- config1.u64 = 0;
- config1.s.idx3 = queue >> 3;
- config1.s.qid7 = (base_queue + queue) >> 7;
-
- config.u64 = 0;
- config.s.tail = queue == (num_queues - 1);
- config.s.index = queue;
- config.s.port = port;
- config.s.queue = base_queue + queue;
-
- if (!cvmx_octeon_is_pass1()) {
- config.s.static_p = static_priority_base >= 0;
- config.s.static_q = (int)queue <= static_priority_end;
- config.s.s_tail = (int)queue == static_priority_end;
- }
- /*
- * Convert the priority into an enable bit field. Try
- * to space the bits out evenly so the packet don't
- * get grouped up
- */
- switch ((int)priority[queue]) {
- case 0:
- config.s.qos_mask = 0x00;
- break;
- case 1:
- config.s.qos_mask = 0x01;
- break;
- case 2:
- config.s.qos_mask = 0x11;
- break;
- case 3:
- config.s.qos_mask = 0x49;
- break;
- case 4:
- config.s.qos_mask = 0x55;
- break;
- case 5:
- config.s.qos_mask = 0x57;
- break;
- case 6:
- config.s.qos_mask = 0x77;
- break;
- case 7:
- config.s.qos_mask = 0x7f;
- break;
- case 8:
- config.s.qos_mask = 0xff;
- break;
- case CVMX_PKO_QUEUE_STATIC_PRIORITY:
- /* Pass 1 will fall through to the error case */
- if (!cvmx_octeon_is_pass1()) {
- config.s.qos_mask = 0xff;
- break;
- }
- default:
- cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid "
- "priority %llu\n",
- (unsigned long long)priority[queue]);
- config.s.qos_mask = 0xff;
- result_code = CVMX_PKO_INVALID_PRIORITY;
- break;
- }
-
- if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) {
- cvmx_cmd_queue_result_t cmd_res =
- cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO
- (base_queue + queue),
- CVMX_PKO_MAX_QUEUE_DEPTH,
- CVMX_FPA_OUTPUT_BUFFER_POOL,
- CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE
- -
- CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST
- * 8);
- if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) {
- switch (cmd_res) {
- case CVMX_CMD_QUEUE_NO_MEMORY:
- cvmx_dprintf("ERROR: "
- "cvmx_pko_config_port: "
- "Unable to allocate "
- "output buffer.\n");
- return CVMX_PKO_NO_MEMORY;
- case CVMX_CMD_QUEUE_ALREADY_SETUP:
- cvmx_dprintf
- ("ERROR: cvmx_pko_config_port: Port already setup.\n");
- return CVMX_PKO_PORT_ALREADY_SETUP;
- case CVMX_CMD_QUEUE_INVALID_PARAM:
- default:
- cvmx_dprintf
- ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n");
- return CVMX_PKO_CMD_QUEUE_INIT_ERROR;
- }
- }
-
- buf_ptr =
- (uint64_t *)
- cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO
- (base_queue + queue));
- config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr);
- } else
- config.s.buf_ptr = 0;
-
- CVMX_SYNCWS;
-
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX))
- cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64);
- cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64);
- }
-
- return result_code;
-}
-
-#ifdef PKO_DEBUG
-/**
- * Show map of ports -> queues for different cores.
- */
-void cvmx_pko_show_queue_map()
-{
- int core, port;
- int pko_output_ports = 36;
-
- cvmx_dprintf("port");
- for (port = 0; port < pko_output_ports; port++)
- cvmx_dprintf("%3d ", port);
- cvmx_dprintf("\n");
-
- for (core = 0; core < CVMX_MAX_CORES; core++) {
- cvmx_dprintf("\n%2d: ", core);
- for (port = 0; port < pko_output_ports; port++) {
- cvmx_dprintf("%3d ",
- cvmx_pko_get_base_queue_per_core(port,
- core));
- }
- }
- cvmx_dprintf("\n");
-}
-#endif
-
-/**
- * Rate limit a PKO port to a max packets/sec. This function is only
- * supported on CN51XX and higher, excluding CN58XX.
- *
- * @port: Port to rate limit
- * @packets_s: Maximum packet/sec
- * @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst)
-{
- union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
- union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
-
- pko_mem_port_rate0.u64 = 0;
- pko_mem_port_rate0.s.pid = port;
- pko_mem_port_rate0.s.rate_pkt =
- cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16;
- /* No cost per word since we are limited by packets/sec, not bits/sec */
- pko_mem_port_rate0.s.rate_word = 0;
-
- pko_mem_port_rate1.u64 = 0;
- pko_mem_port_rate1.s.pid = port;
- pko_mem_port_rate1.s.rate_lim =
- ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8;
-
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
- return 0;
-}
-
-/**
- * Rate limit a PKO port to a max bits/sec. This function is only
- * supported on CN51XX and higher, excluding CN58XX.
- *
- * @port: Port to rate limit
- * @bits_s: PKO rate limit in bits/sec
- * @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst)
-{
- union cvmx_pko_mem_port_rate0 pko_mem_port_rate0;
- union cvmx_pko_mem_port_rate1 pko_mem_port_rate1;
- uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz;
- uint64_t tokens_per_bit = clock_rate * 16 / bits_s;
-
- pko_mem_port_rate0.u64 = 0;
- pko_mem_port_rate0.s.pid = port;
- /*
- * Each packet has a 12 bytes of interframe gap, an 8 byte
- * preamble, and a 4 byte CRC. These are not included in the
- * per word count. Multiply by 8 to covert to bits and divide
- * by 256 for limit granularity.
- */
- pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256;
- /* Each 8 byte word has 64bits */
- pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit;
-
- pko_mem_port_rate1.u64 = 0;
- pko_mem_port_rate1.s.pid = port;
- pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256;
-
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64);
- cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64);
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-pko.h b/drivers/staging/octeon/cvmx-pko.h
deleted file mode 100644
index de3412a..0000000
--- a/drivers/staging/octeon/cvmx-pko.h
+++ /dev/null
@@ -1,610 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * Interface to the hardware Packet Output unit.
- *
- * Starting with SDK 1.7.0, the PKO output functions now support
- * two types of locking. CVMX_PKO_LOCK_ATOMIC_TAG continues to
- * function similarly to previous SDKs by using POW atomic tags
- * to preserve ordering and exclusivity. As a new option, you
- * can now pass CVMX_PKO_LOCK_CMD_QUEUE which uses a ll/sc
- * memory based locking instead. This locking has the advantage
- * of not affecting the tag state but doesn't preserve packet
- * ordering. CVMX_PKO_LOCK_CMD_QUEUE is appropriate in most
- * generic code while CVMX_PKO_LOCK_CMD_QUEUE should be used
- * with hand tuned fast path code.
- *
- * Some of other SDK differences visible to the command command
- * queuing:
- * - PKO indexes are no longer stored in the FAU. A large
- * percentage of the FAU register block used to be tied up
- * maintaining PKO queue pointers. These are now stored in a
- * global named block.
- * - The PKO <b>use_locking</b> parameter can now have a global
- * effect. Since all application use the same named block,
- * queue locking correctly applies across all operating
- * systems when using CVMX_PKO_LOCK_CMD_QUEUE.
- * - PKO 3 word commands are now supported. Use
- * cvmx_pko_send_packet_finish3().
- *
- */
-
-#ifndef __CVMX_PKO_H__
-#define __CVMX_PKO_H__
-
-#include "cvmx-fpa.h"
-#include "cvmx-pow.h"
-#include "cvmx-cmd-queue.h"
-#include "cvmx-pko-defs.h"
-
-/* Adjust the command buffer size by 1 word so that in the case of using only
- * two word PKO commands no command words stradle buffers. The useful values
- * for this are 0 and 1. */
-#define CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST (1)
-
-#define CVMX_PKO_MAX_OUTPUT_QUEUES_STATIC 256
-#define CVMX_PKO_MAX_OUTPUT_QUEUES ((OCTEON_IS_MODEL(OCTEON_CN31XX) || \
- OCTEON_IS_MODEL(OCTEON_CN3010) || OCTEON_IS_MODEL(OCTEON_CN3005) || \
- OCTEON_IS_MODEL(OCTEON_CN50XX)) ? 32 : \
- (OCTEON_IS_MODEL(OCTEON_CN58XX) || \
- OCTEON_IS_MODEL(OCTEON_CN56XX)) ? 256 : 128)
-#define CVMX_PKO_NUM_OUTPUT_PORTS 40
-/* use this for queues that are not used */
-#define CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID 63
-#define CVMX_PKO_QUEUE_STATIC_PRIORITY 9
-#define CVMX_PKO_ILLEGAL_QUEUE 0xFFFF
-#define CVMX_PKO_MAX_QUEUE_DEPTH 0
-
-typedef enum {
- CVMX_PKO_SUCCESS,
- CVMX_PKO_INVALID_PORT,
- CVMX_PKO_INVALID_QUEUE,
- CVMX_PKO_INVALID_PRIORITY,
- CVMX_PKO_NO_MEMORY,
- CVMX_PKO_PORT_ALREADY_SETUP,
- CVMX_PKO_CMD_QUEUE_INIT_ERROR
-} cvmx_pko_status_t;
-
-/**
- * This enumeration represents the differnet locking modes supported by PKO.
- */
-typedef enum {
- /*
- * PKO doesn't do any locking. It is the responsibility of the
- * application to make sure that no other core is accessing
- * the same queue at the same time
- */
- CVMX_PKO_LOCK_NONE = 0,
- /*
- * PKO performs an atomic tagswitch to insure exclusive access
- * to the output queue. This will maintain packet ordering on
- * output.
- */
- CVMX_PKO_LOCK_ATOMIC_TAG = 1,
- /*
- * PKO uses the common command queue locks to insure exclusive
- * access to the output queue. This is a memory based
- * ll/sc. This is the most portable locking mechanism.
- */
- CVMX_PKO_LOCK_CMD_QUEUE = 2,
-} cvmx_pko_lock_t;
-
-typedef struct {
- uint32_t packets;
- uint64_t octets;
- uint64_t doorbell;
-} cvmx_pko_port_status_t;
-
-/**
- * This structure defines the address to use on a packet enqueue
- */
-typedef union {
- uint64_t u64;
- struct {
- /* Must CVMX_IO_SEG */
- uint64_t mem_space:2;
- /* Must be zero */
- uint64_t reserved:13;
- /* Must be one */
- uint64_t is_io:1;
- /* The ID of the device on the non-coherent bus */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved2:4;
- /* Must be zero */
- uint64_t reserved3:18;
- /*
- * The hardware likes to have the output port in
- * addition to the output queue,
- */
- uint64_t port:6;
- /*
- * The output queue to send the packet to (0-127 are
- * legal)
- */
- uint64_t queue:9;
- /* Must be zero */
- uint64_t reserved4:3;
- } s;
-} cvmx_pko_doorbell_address_t;
-
-/**
- * Structure of the first packet output command word.
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * The size of the reg1 operation - could be 8, 16,
- * 32, or 64 bits.
- */
- uint64_t size1:2;
- /*
- * The size of the reg0 operation - could be 8, 16,
- * 32, or 64 bits.
- */
- uint64_t size0:2;
- /*
- * If set, subtract 1, if clear, subtract packet
- * size.
- */
- uint64_t subone1:1;
- /*
- * The register, subtract will be done if reg1 is
- * non-zero.
- */
- uint64_t reg1:11;
- /* If set, subtract 1, if clear, subtract packet size */
- uint64_t subone0:1;
- /* The register, subtract will be done if reg0 is non-zero */
- uint64_t reg0:11;
- /*
- * When set, interpret segment pointer and segment
- * bytes in little endian order.
- */
- uint64_t le:1;
- /*
- * When set, packet data not allocated in L2 cache by
- * PKO.
- */
- uint64_t n2:1;
- /*
- * If set and rsp is set, word3 contains a pointer to
- * a work queue entry.
- */
- uint64_t wqp:1;
- /* If set, the hardware will send a response when done */
- uint64_t rsp:1;
- /*
- * If set, the supplied pkt_ptr is really a pointer to
- * a list of pkt_ptr's.
- */
- uint64_t gather:1;
- /*
- * If ipoffp1 is non zero, (ipoffp1-1) is the number
- * of bytes to IP header, and the hardware will
- * calculate and insert the UDP/TCP checksum.
- */
- uint64_t ipoffp1:7;
- /*
- * If set, ignore the I bit (force to zero) from all
- * pointer structures.
- */
- uint64_t ignore_i:1;
- /*
- * If clear, the hardware will attempt to free the
- * buffers containing the packet.
- */
- uint64_t dontfree:1;
- /*
- * The total number of segs in the packet, if gather
- * set, also gather list length.
- */
- uint64_t segs:6;
- /* Including L2, but no trailing CRC */
- uint64_t total_bytes:16;
- } s;
-} cvmx_pko_command_word0_t;
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Definition of internal state for Packet output processing
- */
-typedef struct {
- /* ptr to start of buffer, offset kept in FAU reg */
- uint64_t *start_ptr;
-} cvmx_pko_state_elem_t;
-
-/**
- * Call before any other calls to initialize the packet
- * output system.
- */
-extern void cvmx_pko_initialize_global(void);
-extern int cvmx_pko_initialize_local(void);
-
-/**
- * Enables the packet output hardware. It must already be
- * configured.
- */
-extern void cvmx_pko_enable(void);
-
-/**
- * Disables the packet output. Does not affect any configuration.
- */
-extern void cvmx_pko_disable(void);
-
-/**
- * Shutdown and free resources required by packet output.
- */
-
-extern void cvmx_pko_shutdown(void);
-
-/**
- * Configure a output port and the associated queues for use.
- *
- * @port: Port to configure.
- * @base_queue: First queue number to associate with this port.
- * @num_queues: Number of queues t oassociate with this port
- * @priority: Array of priority levels for each queue. Values are
- * allowed to be 1-8. A value of 8 get 8 times the traffic
- * of a value of 1. There must be num_queues elements in the
- * array.
- */
-extern cvmx_pko_status_t cvmx_pko_config_port(uint64_t port,
- uint64_t base_queue,
- uint64_t num_queues,
- const uint64_t priority[]);
-
-/**
- * Ring the packet output doorbell. This tells the packet
- * output hardware that "len" command words have been added
- * to its pending list. This command includes the required
- * CVMX_SYNCWS before the doorbell ring.
- *
- * @port: Port the packet is for
- * @queue: Queue the packet is for
- * @len: Length of the command in 64 bit words
- */
-static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue,
- uint64_t len)
-{
- cvmx_pko_doorbell_address_t ptr;
-
- ptr.u64 = 0;
- ptr.s.mem_space = CVMX_IO_SEG;
- ptr.s.did = CVMX_OCT_DID_PKT_SEND;
- ptr.s.is_io = 1;
- ptr.s.port = port;
- ptr.s.queue = queue;
- /*
- * Need to make sure output queue data is in DRAM before
- * doorbell write.
- */
- CVMX_SYNCWS;
- cvmx_write_io(ptr.u64, len);
-}
-
-/**
- * Prepare to send a packet. This may initiate a tag switch to
- * get exclusive access to the output queue structure, and
- * performs other prep work for the packet send operation.
- *
- * cvmx_pko_send_packet_finish() MUST be called after this function is called,
- * and must be called with the same port/queue/use_locking arguments.
- *
- * The use_locking parameter allows the caller to use three
- * possible locking modes.
- * - CVMX_PKO_LOCK_NONE
- * - PKO doesn't do any locking. It is the responsibility
- * of the application to make sure that no other core
- * is accessing the same queue at the same time.
- * - CVMX_PKO_LOCK_ATOMIC_TAG
- * - PKO performs an atomic tagswitch to insure exclusive
- * access to the output queue. This will maintain
- * packet ordering on output.
- * - CVMX_PKO_LOCK_CMD_QUEUE
- * - PKO uses the common command queue locks to insure
- * exclusive access to the output queue. This is a
- * memory based ll/sc. This is the most portable
- * locking mechanism.
- *
- * NOTE: If atomic locking is used, the POW entry CANNOT be
- * descheduled, as it does not contain a valid WQE pointer.
- *
- * @port: Port to send it on
- * @queue: Queue to use
- * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
- */
-
-static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue,
- cvmx_pko_lock_t use_locking)
-{
- if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG) {
- /*
- * Must do a full switch here to handle all cases. We
- * use a fake WQE pointer, as the POW does not access
- * this memory. The WQE pointer and group are only
- * used if this work is descheduled, which is not
- * supported by the
- * cvmx_pko_send_packet_prepare/cvmx_pko_send_packet_finish
- * combination. Note that this is a special case in
- * which these fake values can be used - this is not a
- * general technique.
- */
- uint32_t tag =
- CVMX_TAG_SW_BITS_INTERNAL << CVMX_TAG_SW_SHIFT |
- CVMX_TAG_SUBGROUP_PKO << CVMX_TAG_SUBGROUP_SHIFT |
- (CVMX_TAG_SUBGROUP_MASK & queue);
- cvmx_pow_tag_sw_full((cvmx_wqe_t *) cvmx_phys_to_ptr(0x80), tag,
- CVMX_POW_TAG_TYPE_ATOMIC, 0);
- }
-}
-
-/**
- * Complete packet output. cvmx_pko_send_packet_prepare() must be
- * called exactly once before this, and the same parameters must be
- * passed to both cvmx_pko_send_packet_prepare() and
- * cvmx_pko_send_packet_finish().
- *
- * @port: Port to send it on
- * @queue: Queue to use
- * @pko_command:
- * PKO HW command word
- * @packet: Packet to send
- * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
- *
- * Returns returns CVMX_PKO_SUCCESS on success, or error code on
- * failure of output
- */
-static inline cvmx_pko_status_t cvmx_pko_send_packet_finish(
- uint64_t port,
- uint64_t queue,
- cvmx_pko_command_word0_t pko_command,
- union cvmx_buf_ptr packet,
- cvmx_pko_lock_t use_locking)
-{
- cvmx_cmd_queue_result_t result;
- if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
- cvmx_pow_tag_sw_wait();
- result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue),
- (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
- pko_command.u64, packet.u64);
- if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
- cvmx_pko_doorbell(port, queue, 2);
- return CVMX_PKO_SUCCESS;
- } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
- || (result == CVMX_CMD_QUEUE_FULL)) {
- return CVMX_PKO_NO_MEMORY;
- } else {
- return CVMX_PKO_INVALID_QUEUE;
- }
-}
-
-/**
- * Complete packet output. cvmx_pko_send_packet_prepare() must be
- * called exactly once before this, and the same parameters must be
- * passed to both cvmx_pko_send_packet_prepare() and
- * cvmx_pko_send_packet_finish().
- *
- * @port: Port to send it on
- * @queue: Queue to use
- * @pko_command:
- * PKO HW command word
- * @packet: Packet to send
- * @addr: Plysical address of a work queue entry or physical address
- * to zero on complete.
- * @use_locking: CVMX_PKO_LOCK_NONE, CVMX_PKO_LOCK_ATOMIC_TAG, or
- * CVMX_PKO_LOCK_CMD_QUEUE
- *
- * Returns returns CVMX_PKO_SUCCESS on success, or error code on
- * failure of output
- */
-static inline cvmx_pko_status_t cvmx_pko_send_packet_finish3(
- uint64_t port,
- uint64_t queue,
- cvmx_pko_command_word0_t pko_command,
- union cvmx_buf_ptr packet,
- uint64_t addr,
- cvmx_pko_lock_t use_locking)
-{
- cvmx_cmd_queue_result_t result;
- if (use_locking == CVMX_PKO_LOCK_ATOMIC_TAG)
- cvmx_pow_tag_sw_wait();
- result = cvmx_cmd_queue_write3(CVMX_CMD_QUEUE_PKO(queue),
- (use_locking == CVMX_PKO_LOCK_CMD_QUEUE),
- pko_command.u64, packet.u64, addr);
- if (likely(result == CVMX_CMD_QUEUE_SUCCESS)) {
- cvmx_pko_doorbell(port, queue, 3);
- return CVMX_PKO_SUCCESS;
- } else if ((result == CVMX_CMD_QUEUE_NO_MEMORY)
- || (result == CVMX_CMD_QUEUE_FULL)) {
- return CVMX_PKO_NO_MEMORY;
- } else {
- return CVMX_PKO_INVALID_QUEUE;
- }
-}
-
-/**
- * Return the pko output queue associated with a port and a specific core.
- * In normal mode (PKO lockless operation is disabled), the value returned
- * is the base queue.
- *
- * @port: Port number
- * @core: Core to get queue for
- *
- * Returns Core-specific output queue
- */
-static inline int cvmx_pko_get_base_queue_per_core(int port, int core)
-{
-#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0
-#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 16
-#endif
-#ifndef CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1
-#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 16
-#endif
-
- if (port < CVMX_PKO_MAX_PORTS_INTERFACE0)
- return port * CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + core;
- else if (port >= 16 && port < 16 + CVMX_PKO_MAX_PORTS_INTERFACE1)
- return CVMX_PKO_MAX_PORTS_INTERFACE0 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 + (port -
- 16) *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + core;
- else if ((port >= 32) && (port < 36))
- return CVMX_PKO_MAX_PORTS_INTERFACE0 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
- CVMX_PKO_MAX_PORTS_INTERFACE1 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 + (port -
- 32) *
- CVMX_PKO_QUEUES_PER_PORT_PCI;
- else if ((port >= 36) && (port < 40))
- return CVMX_PKO_MAX_PORTS_INTERFACE0 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 +
- CVMX_PKO_MAX_PORTS_INTERFACE1 *
- CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 +
- 4 * CVMX_PKO_QUEUES_PER_PORT_PCI + (port -
- 36) *
- CVMX_PKO_QUEUES_PER_PORT_LOOP;
- else
- /* Given the limit on the number of ports we can map to
- * CVMX_MAX_OUTPUT_QUEUES_STATIC queues (currently 256,
- * divided among all cores), the remaining unmapped ports
- * are assigned an illegal queue number */
- return CVMX_PKO_ILLEGAL_QUEUE;
-}
-
-/**
- * For a given port number, return the base pko output queue
- * for the port.
- *
- * @port: Port number
- * Returns Base output queue
- */
-static inline int cvmx_pko_get_base_queue(int port)
-{
- return cvmx_pko_get_base_queue_per_core(port, 0);
-}
-
-/**
- * For a given port number, return the number of pko output queues.
- *
- * @port: Port number
- * Returns Number of output queues
- */
-static inline int cvmx_pko_get_num_queues(int port)
-{
- if (port < 16)
- return CVMX_PKO_QUEUES_PER_PORT_INTERFACE0;
- else if (port < 32)
- return CVMX_PKO_QUEUES_PER_PORT_INTERFACE1;
- else if (port < 36)
- return CVMX_PKO_QUEUES_PER_PORT_PCI;
- else if (port < 40)
- return CVMX_PKO_QUEUES_PER_PORT_LOOP;
- else
- return 0;
-}
-
-/**
- * Get the status counters for a port.
- *
- * @port_num: Port number to get statistics for.
- * @clear: Set to 1 to clear the counters after they are read
- * @status: Where to put the results.
- */
-static inline void cvmx_pko_get_port_status(uint64_t port_num, uint64_t clear,
- cvmx_pko_port_status_t *status)
-{
- union cvmx_pko_reg_read_idx pko_reg_read_idx;
- union cvmx_pko_mem_count0 pko_mem_count0;
- union cvmx_pko_mem_count1 pko_mem_count1;
-
- pko_reg_read_idx.u64 = 0;
- pko_reg_read_idx.s.index = port_num;
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
-
- pko_mem_count0.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT0);
- status->packets = pko_mem_count0.s.count;
- if (clear) {
- pko_mem_count0.s.count = port_num;
- cvmx_write_csr(CVMX_PKO_MEM_COUNT0, pko_mem_count0.u64);
- }
-
- pko_mem_count1.u64 = cvmx_read_csr(CVMX_PKO_MEM_COUNT1);
- status->octets = pko_mem_count1.s.count;
- if (clear) {
- pko_mem_count1.s.count = port_num;
- cvmx_write_csr(CVMX_PKO_MEM_COUNT1, pko_mem_count1.u64);
- }
-
- if (OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pko_mem_debug9 debug9;
- pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
- debug9.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG9);
- status->doorbell = debug9.cn38xx.doorbell;
- } else {
- union cvmx_pko_mem_debug8 debug8;
- pko_reg_read_idx.s.index = cvmx_pko_get_base_queue(port_num);
- cvmx_write_csr(CVMX_PKO_REG_READ_IDX, pko_reg_read_idx.u64);
- debug8.u64 = cvmx_read_csr(CVMX_PKO_MEM_DEBUG8);
- status->doorbell = debug8.cn58xx.doorbell;
- }
-}
-
-/**
- * Rate limit a PKO port to a max packets/sec. This function is only
- * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
- *
- * @port: Port to rate limit
- * @packets_s: Maximum packet/sec
- * @burst: Maximum number of packets to burst in a row before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst);
-
-/**
- * Rate limit a PKO port to a max bits/sec. This function is only
- * supported on CN57XX, CN56XX, CN55XX, and CN54XX.
- *
- * @port: Port to rate limit
- * @bits_s: PKO rate limit in bits/sec
- * @burst: Maximum number of bits to burst before rate
- * limiting cuts in.
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst);
-
-#endif /* __CVMX_PKO_H__ */
diff --git a/drivers/staging/octeon/cvmx-pow.h b/drivers/staging/octeon/cvmx-pow.h
deleted file mode 100644
index 999aefe..0000000
--- a/drivers/staging/octeon/cvmx-pow.h
+++ /dev/null
@@ -1,1982 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- * Interface to the hardware Packet Order / Work unit.
- *
- * New, starting with SDK 1.7.0, cvmx-pow supports a number of
- * extended consistency checks. The define
- * CVMX_ENABLE_POW_CHECKS controls the runtime insertion of POW
- * internal state checks to find common programming errors. If
- * CVMX_ENABLE_POW_CHECKS is not defined, checks are by default
- * enabled. For example, cvmx-pow will check for the following
- * program errors or POW state inconsistency.
- * - Requesting a POW operation with an active tag switch in
- * progress.
- * - Waiting for a tag switch to complete for an excessively
- * long period. This is normally a sign of an error in locking
- * causing deadlock.
- * - Illegal tag switches from NULL_NULL.
- * - Illegal tag switches from NULL.
- * - Illegal deschedule request.
- * - WQE pointer not matching the one attached to the core by
- * the POW.
- *
- */
-
-#ifndef __CVMX_POW_H__
-#define __CVMX_POW_H__
-
-#include <asm/octeon/cvmx-pow-defs.h>
-
-#include "cvmx-scratch.h"
-#include "cvmx-wqe.h"
-
-/* Default to having all POW constancy checks turned on */
-#ifndef CVMX_ENABLE_POW_CHECKS
-#define CVMX_ENABLE_POW_CHECKS 1
-#endif
-
-enum cvmx_pow_tag_type {
- /* Tag ordering is maintained */
- CVMX_POW_TAG_TYPE_ORDERED = 0L,
- /* Tag ordering is maintained, and at most one PP has the tag */
- CVMX_POW_TAG_TYPE_ATOMIC = 1L,
- /*
- * The work queue entry from the order - NEVER tag switch from
- * NULL to NULL
- */
- CVMX_POW_TAG_TYPE_NULL = 2L,
- /* A tag switch to NULL, and there is no space reserved in POW
- * - NEVER tag switch to NULL_NULL
- * - NEVER tag switch from NULL_NULL
- * - NULL_NULL is entered at the beginning of time and on a deschedule.
- * - NULL_NULL can be exited by a new work request. A NULL_SWITCH
- * load can also switch the state to NULL
- */
- CVMX_POW_TAG_TYPE_NULL_NULL = 3L
-};
-
-/**
- * Wait flag values for pow functions.
- */
-typedef enum {
- CVMX_POW_WAIT = 1,
- CVMX_POW_NO_WAIT = 0,
-} cvmx_pow_wait_t;
-
-/**
- * POW tag operations. These are used in the data stored to the POW.
- */
-typedef enum {
- /*
- * switch the tag (only) for this PP
- * - the previous tag should be non-NULL in this case
- * - tag switch response required
- * - fields used: op, type, tag
- */
- CVMX_POW_TAG_OP_SWTAG = 0L,
- /*
- * switch the tag for this PP, with full information
- * - this should be used when the previous tag is NULL
- * - tag switch response required
- * - fields used: address, op, grp, type, tag
- */
- CVMX_POW_TAG_OP_SWTAG_FULL = 1L,
- /*
- * switch the tag (and/or group) for this PP and de-schedule
- * - OK to keep the tag the same and only change the group
- * - fields used: op, no_sched, grp, type, tag
- */
- CVMX_POW_TAG_OP_SWTAG_DESCH = 2L,
- /*
- * just de-schedule
- * - fields used: op, no_sched
- */
- CVMX_POW_TAG_OP_DESCH = 3L,
- /*
- * create an entirely new work queue entry
- * - fields used: address, op, qos, grp, type, tag
- */
- CVMX_POW_TAG_OP_ADDWQ = 4L,
- /*
- * just update the work queue pointer and grp for this PP
- * - fields used: address, op, grp
- */
- CVMX_POW_TAG_OP_UPDATE_WQP_GRP = 5L,
- /*
- * set the no_sched bit on the de-schedule list
- *
- * - does nothing if the selected entry is not on the
- * de-schedule list
- *
- * - does nothing if the stored work queue pointer does not
- * match the address field
- *
- * - fields used: address, index, op
- *
- * Before issuing a *_NSCHED operation, SW must guarantee
- * that all prior deschedules and set/clr NSCHED operations
- * are complete and all prior switches are complete. The
- * hardware provides the opsdone bit and swdone bit for SW
- * polling. After issuing a *_NSCHED operation, SW must
- * guarantee that the set/clr NSCHED is complete before any
- * subsequent operations.
- */
- CVMX_POW_TAG_OP_SET_NSCHED = 6L,
- /*
- * clears the no_sched bit on the de-schedule list
- *
- * - does nothing if the selected entry is not on the
- * de-schedule list
- *
- * - does nothing if the stored work queue pointer does not
- * match the address field
- *
- * - fields used: address, index, op
- *
- * Before issuing a *_NSCHED operation, SW must guarantee that
- * all prior deschedules and set/clr NSCHED operations are
- * complete and all prior switches are complete. The hardware
- * provides the opsdone bit and swdone bit for SW
- * polling. After issuing a *_NSCHED operation, SW must
- * guarantee that the set/clr NSCHED is complete before any
- * subsequent operations.
- */
- CVMX_POW_TAG_OP_CLR_NSCHED = 7L,
- /* do nothing */
- CVMX_POW_TAG_OP_NOP = 15L
-} cvmx_pow_tag_op_t;
-
-/**
- * This structure defines the store data on a store to POW
- */
-typedef union {
- uint64_t u64;
- struct {
- /*
- * Don't reschedule this entry. no_sched is used for
- * CVMX_POW_TAG_OP_SWTAG_DESCH and
- * CVMX_POW_TAG_OP_DESCH
- */
- uint64_t no_sched:1;
- uint64_t unused:2;
- /* Tontains index of entry for a CVMX_POW_TAG_OP_*_NSCHED */
- uint64_t index:13;
- /* The operation to perform */
- cvmx_pow_tag_op_t op:4;
- uint64_t unused2:2;
- /*
- * The QOS level for the packet. qos is only used for
- * CVMX_POW_TAG_OP_ADDWQ
- */
- uint64_t qos:3;
- /*
- * The group that the work queue entry will be
- * scheduled to grp is used for CVMX_POW_TAG_OP_ADDWQ,
- * CVMX_POW_TAG_OP_SWTAG_FULL,
- * CVMX_POW_TAG_OP_SWTAG_DESCH, and
- * CVMX_POW_TAG_OP_UPDATE_WQP_GRP
- */
- uint64_t grp:4;
- /*
- * The type of the tag. type is used for everything
- * except CVMX_POW_TAG_OP_DESCH,
- * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
- * CVMX_POW_TAG_OP_*_NSCHED
- */
- uint64_t type:3;
- /*
- * The actual tag. tag is used for everything except
- * CVMX_POW_TAG_OP_DESCH,
- * CVMX_POW_TAG_OP_UPDATE_WQP_GRP, and
- * CVMX_POW_TAG_OP_*_NSCHED
- */
- uint64_t tag:32;
- } s;
-} cvmx_pow_tag_req_t;
-
-/**
- * This structure describes the address to load stuff from POW
- */
-typedef union {
- uint64_t u64;
-
- /**
- * Address for new work request loads (did<2:0> == 0)
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 0 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_4_39:36;
- /*
- * If set, don't return load response until work is
- * available.
- */
- uint64_t wait:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } swork;
-
- /**
- * Address for loads to get POW internal status
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 1 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_10_39:30;
- /* The core id to get status for */
- uint64_t coreid:4;
- /*
- * If set and get_cur is set, return reverse tag-list
- * pointer rather than forward tag-list pointer.
- */
- uint64_t get_rev:1;
- /*
- * If set, return current status rather than pending
- * status.
- */
- uint64_t get_cur:1;
- /*
- * If set, get the work-queue pointer rather than
- * tag/type.
- */
- uint64_t get_wqp:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } sstatus;
-
- /**
- * Address for memory loads to get POW internal state
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 2 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_16_39:24;
- /* POW memory index */
- uint64_t index:11;
- /*
- * If set, return deschedule information rather than
- * the standard response for work-queue index (invalid
- * if the work-queue entry is not on the deschedule
- * list).
- */
- uint64_t get_des:1;
- /*
- * If set, get the work-queue pointer rather than
- * tag/type (no effect when get_des set).
- */
- uint64_t get_wqp:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } smemload;
-
- /**
- * Address for index/pointer loads
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 3 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_9_39:31;
- /*
- * when {get_rmt ==0 AND get_des_get_tail == 0}, this
- * field selects one of eight POW internal-input
- * queues (0-7), one per QOS level; values 8-15 are
- * illegal in this case; when {get_rmt ==0 AND
- * get_des_get_tail == 1}, this field selects one of
- * 16 deschedule lists (per group); when get_rmt ==1,
- * this field selects one of 16 memory-input queue
- * lists. The two memory-input queue lists associated
- * with each QOS level are:
- *
- * - qosgrp = 0, qosgrp = 8: QOS0
- * - qosgrp = 1, qosgrp = 9: QOS1
- * - qosgrp = 2, qosgrp = 10: QOS2
- * - qosgrp = 3, qosgrp = 11: QOS3
- * - qosgrp = 4, qosgrp = 12: QOS4
- * - qosgrp = 5, qosgrp = 13: QOS5
- * - qosgrp = 6, qosgrp = 14: QOS6
- * - qosgrp = 7, qosgrp = 15: QOS7
- */
- uint64_t qosgrp:4;
- /*
- * If set and get_rmt is clear, return deschedule list
- * indexes rather than indexes for the specified qos
- * level; if set and get_rmt is set, return the tail
- * pointer rather than the head pointer for the
- * specified qos level.
- */
- uint64_t get_des_get_tail:1;
- /*
- * If set, return remote pointers rather than the
- * local indexes for the specified qos level.
- */
- uint64_t get_rmt:1;
- /* Must be zero */
- uint64_t reserved_0_2:3;
- } sindexload;
-
- /**
- * address for NULL_RD request (did<2:0> == 4) when this is read,
- * HW attempts to change the state to NULL if it is NULL_NULL (the
- * hardware cannot switch from NULL_NULL to NULL if a POW entry is
- * not available - software may need to recover by finishing
- * another piece of work before a POW entry can ever become
- * available.)
- */
- struct {
- /* Mips64 address region. Should be CVMX_IO_SEG */
- uint64_t mem_region:2;
- /* Must be zero */
- uint64_t reserved_49_61:13;
- /* Must be one */
- uint64_t is_io:1;
- /* the ID of POW -- did<2:0> == 4 in this case */
- uint64_t did:8;
- /* Must be zero */
- uint64_t reserved_0_39:40;
- } snull_rd;
-} cvmx_pow_load_addr_t;
-
-/**
- * This structure defines the response to a load/SENDSINGLE to POW
- * (except CSR reads)
- */
-typedef union {
- uint64_t u64;
-
- /**
- * Response to new work request loads
- */
- struct {
- /*
- * Set when no new work queue entry was returned. *
- * If there was de-scheduled work, the HW will
- * definitely return it. When this bit is set, it
- * could mean either mean:
- *
- * - There was no work, or
- *
- * - There was no work that the HW could find. This
- * case can happen, regardless of the wait bit value
- * in the original request, when there is work in
- * the IQ's that is too deep down the list.
- */
- uint64_t no_work:1;
- /* Must be zero */
- uint64_t reserved_40_62:23;
- /* 36 in O1 -- the work queue pointer */
- uint64_t addr:40;
- } s_work;
-
- /**
- * Result for a POW Status Load (when get_cur==0 and get_wqp==0)
- */
- struct {
- uint64_t reserved_62_63:2;
- /* Set when there is a pending non-NULL SWTAG or
- * SWTAG_FULL, and the POW entry has not left the list
- * for the original tag. */
- uint64_t pend_switch:1;
- /* Set when SWTAG_FULL and pend_switch is set. */
- uint64_t pend_switch_full:1;
- /*
- * Set when there is a pending NULL SWTAG, or an
- * implicit switch to NULL.
- */
- uint64_t pend_switch_null:1;
- /* Set when there is a pending DESCHED or SWTAG_DESCHED. */
- uint64_t pend_desched:1;
- /*
- * Set when there is a pending SWTAG_DESCHED and
- * pend_desched is set.
- */
- uint64_t pend_desched_switch:1;
- /* Set when nosched is desired and pend_desched is set. */
- uint64_t pend_nosched:1;
- /* Set when there is a pending GET_WORK. */
- uint64_t pend_new_work:1;
- /*
- * When pend_new_work is set, this bit indicates that
- * the wait bit was set.
- */
- uint64_t pend_new_work_wait:1;
- /* Set when there is a pending NULL_RD. */
- uint64_t pend_null_rd:1;
- /* Set when there is a pending CLR_NSCHED. */
- uint64_t pend_nosched_clr:1;
- uint64_t reserved_51:1;
- /* This is the index when pend_nosched_clr is set. */
- uint64_t pend_index:11;
- /*
- * This is the new_grp when (pend_desched AND
- * pend_desched_switch) is set.
- */
- uint64_t pend_grp:4;
- uint64_t reserved_34_35:2;
- /*
- * This is the tag type when pend_switch or
- * (pend_desched AND pend_desched_switch) are set.
- */
- uint64_t pend_type:2;
- /*
- * - this is the tag when pend_switch or (pend_desched
- * AND pend_desched_switch) are set.
- */
- uint64_t pend_tag:32;
- } s_sstatus0;
-
- /**
- * Result for a POW Status Load (when get_cur==0 and get_wqp==1)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Set when there is a pending non-NULL SWTAG or
- * SWTAG_FULL, and the POW entry has not left the list
- * for the original tag.
- */
- uint64_t pend_switch:1;
- /* Set when SWTAG_FULL and pend_switch is set. */
- uint64_t pend_switch_full:1;
- /*
- * Set when there is a pending NULL SWTAG, or an
- * implicit switch to NULL.
- */
- uint64_t pend_switch_null:1;
- /*
- * Set when there is a pending DESCHED or
- * SWTAG_DESCHED.
- */
- uint64_t pend_desched:1;
- /*
- * Set when there is a pending SWTAG_DESCHED and
- * pend_desched is set.
- */
- uint64_t pend_desched_switch:1;
- /* Set when nosched is desired and pend_desched is set. */
- uint64_t pend_nosched:1;
- /* Set when there is a pending GET_WORK. */
- uint64_t pend_new_work:1;
- /*
- * When pend_new_work is set, this bit indicates that
- * the wait bit was set.
- */
- uint64_t pend_new_work_wait:1;
- /* Set when there is a pending NULL_RD. */
- uint64_t pend_null_rd:1;
- /* Set when there is a pending CLR_NSCHED. */
- uint64_t pend_nosched_clr:1;
- uint64_t reserved_51:1;
- /* This is the index when pend_nosched_clr is set. */
- uint64_t pend_index:11;
- /*
- * This is the new_grp when (pend_desched AND
- * pend_desched_switch) is set.
- */
- uint64_t pend_grp:4;
- /* This is the wqp when pend_nosched_clr is set. */
- uint64_t pend_wqp:36;
- } s_sstatus1;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==0, and
- * get_rev==0)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the next POW entry in the tag list when
- * tail == 0 (and tag_type is not NULL or NULL_NULL).
- */
- uint64_t link_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /*
- * Set when this POW entry is at the head of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t head:1;
- /*
- * Set when this POW entry is at the tail of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t tail:1;
- /*
- * The tag type attached to the core (updated when new
- * tag list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag_type:2;
- /*
- * The tag attached to the core (updated when new tag
- * list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag:32;
- } s_sstatus2;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==0, and get_rev==1)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the prior POW entry in the tag list when
- * head == 0 (and tag_type is not NULL or
- * NULL_NULL). This field is unpredictable when the
- * core's state is NULL or NULL_NULL.
- */
- uint64_t revlink_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /* Set when this POW entry is at the head of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t head:1;
- /*
- * Set when this POW entry is at the tail of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t tail:1;
- /*
- * The tag type attached to the core (updated when new
- * tag list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag_type:2;
- /*
- * The tag attached to the core (updated when new tag
- * list entered on SWTAG, SWTAG_FULL, or
- * SWTAG_DESCHED).
- */
- uint64_t tag:32;
- } s_sstatus3;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
- * get_rev==0)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the next POW entry in the tag list when
- * tail == 0 (and tag_type is not NULL or NULL_NULL).
- */
- uint64_t link_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /*
- * The wqp attached to the core (updated when new tag
- * list entered on SWTAG_FULL).
- */
- uint64_t wqp:36;
- } s_sstatus4;
-
- /**
- * Result for a POW Status Load (when get_cur==1, get_wqp==1, and
- * get_rev==1)
- */
- struct {
- uint64_t reserved_62_63:2;
- /*
- * Points to the prior POW entry in the tag list when
- * head == 0 (and tag_type is not NULL or
- * NULL_NULL). This field is unpredictable when the
- * core's state is NULL or NULL_NULL.
- */
- uint64_t revlink_index:11;
- /* The POW entry attached to the core. */
- uint64_t index:11;
- /*
- * The group attached to the core (updated when new
- * tag list entered on SWTAG_FULL).
- */
- uint64_t grp:4;
- /*
- * The wqp attached to the core (updated when new tag
- * list entered on SWTAG_FULL).
- */
- uint64_t wqp:36;
- } s_sstatus5;
-
- /**
- * Result For POW Memory Load (get_des == 0 and get_wqp == 0)
- */
- struct {
- uint64_t reserved_51_63:13;
- /*
- * The next entry in the input, free, descheduled_head
- * list (unpredictable if entry is the tail of the
- * list).
- */
- uint64_t next_index:11;
- /* The group of the POW entry. */
- uint64_t grp:4;
- uint64_t reserved_35:1;
- /*
- * Set when this POW entry is at the tail of its tag
- * list (also set when in the NULL or NULL_NULL
- * state).
- */
- uint64_t tail:1;
- /* The tag type of the POW entry. */
- uint64_t tag_type:2;
- /* The tag of the POW entry. */
- uint64_t tag:32;
- } s_smemload0;
-
- /**
- * Result For POW Memory Load (get_des == 0 and get_wqp == 1)
- */
- struct {
- uint64_t reserved_51_63:13;
- /*
- * The next entry in the input, free, descheduled_head
- * list (unpredictable if entry is the tail of the
- * list).
- */
- uint64_t next_index:11;
- /* The group of the POW entry. */
- uint64_t grp:4;
- /* The WQP held in the POW entry. */
- uint64_t wqp:36;
- } s_smemload1;
-
- /**
- * Result For POW Memory Load (get_des == 1)
- */
- struct {
- uint64_t reserved_51_63:13;
- /*
- * The next entry in the tag list connected to the
- * descheduled head.
- */
- uint64_t fwd_index:11;
- /* The group of the POW entry. */
- uint64_t grp:4;
- /* The nosched bit for the POW entry. */
- uint64_t nosched:1;
- /* There is a pending tag switch */
- uint64_t pend_switch:1;
- /*
- * The next tag type for the new tag list when
- * pend_switch is set.
- */
- uint64_t pend_type:2;
- /*
- * The next tag for the new tag list when pend_switch
- * is set.
- */
- uint64_t pend_tag:32;
- } s_smemload2;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 0)
- */
- struct {
- uint64_t reserved_52_63:12;
- /*
- * set when there is one or more POW entries on the
- * free list.
- */
- uint64_t free_val:1;
- /*
- * set when there is exactly one POW entry on the free
- * list.
- */
- uint64_t free_one:1;
- uint64_t reserved_49:1;
- /*
- * when free_val is set, indicates the first entry on
- * the free list.
- */
- uint64_t free_head:11;
- uint64_t reserved_37:1;
- /*
- * when free_val is set, indicates the last entry on
- * the free list.
- */
- uint64_t free_tail:11;
- /*
- * set when there is one or more POW entries on the
- * input Q list selected by qosgrp.
- */
- uint64_t loc_val:1;
- /*
- * set when there is exactly one POW entry on the
- * input Q list selected by qosgrp.
- */
- uint64_t loc_one:1;
- uint64_t reserved_23:1;
- /*
- * when loc_val is set, indicates the first entry on
- * the input Q list selected by qosgrp.
- */
- uint64_t loc_head:11;
- uint64_t reserved_11:1;
- /*
- * when loc_val is set, indicates the last entry on
- * the input Q list selected by qosgrp.
- */
- uint64_t loc_tail:11;
- } sindexload0;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt == 0/get_des_get_tail == 1)
- */
- struct {
- uint64_t reserved_52_63:12;
- /*
- * set when there is one or more POW entries on the
- * nosched list.
- */
- uint64_t nosched_val:1;
- /*
- * set when there is exactly one POW entry on the
- * nosched list.
- */
- uint64_t nosched_one:1;
- uint64_t reserved_49:1;
- /*
- * when nosched_val is set, indicates the first entry
- * on the nosched list.
- */
- uint64_t nosched_head:11;
- uint64_t reserved_37:1;
- /*
- * when nosched_val is set, indicates the last entry
- * on the nosched list.
- */
- uint64_t nosched_tail:11;
- /*
- * set when there is one or more descheduled heads on
- * the descheduled list selected by qosgrp.
- */
- uint64_t des_val:1;
- /*
- * set when there is exactly one descheduled head on
- * the descheduled list selected by qosgrp.
- */
- uint64_t des_one:1;
- uint64_t reserved_23:1;
- /*
- * when des_val is set, indicates the first
- * descheduled head on the descheduled list selected
- * by qosgrp.
- */
- uint64_t des_head:11;
- uint64_t reserved_11:1;
- /*
- * when des_val is set, indicates the last descheduled
- * head on the descheduled list selected by qosgrp.
- */
- uint64_t des_tail:11;
- } sindexload1;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt == 1/get_des_get_tail == 0)
- */
- struct {
- uint64_t reserved_39_63:25;
- /*
- * Set when this DRAM list is the current head
- * (i.e. is the next to be reloaded when the POW
- * hardware reloads a POW entry from DRAM). The POW
- * hardware alternates between the two DRAM lists
- * associated with a QOS level when it reloads work
- * from DRAM into the POW unit.
- */
- uint64_t rmt_is_head:1;
- /*
- * Set when the DRAM portion of the input Q list
- * selected by qosgrp contains one or more pieces of
- * work.
- */
- uint64_t rmt_val:1;
- /*
- * Set when the DRAM portion of the input Q list
- * selected by qosgrp contains exactly one piece of
- * work.
- */
- uint64_t rmt_one:1;
- /*
- * When rmt_val is set, indicates the first piece of
- * work on the DRAM input Q list selected by
- * qosgrp.
- */
- uint64_t rmt_head:36;
- } sindexload2;
-
- /**
- * Result For POW Index/Pointer Load (get_rmt ==
- * 1/get_des_get_tail == 1)
- */
- struct {
- uint64_t reserved_39_63:25;
- /*
- * set when this DRAM list is the current head
- * (i.e. is the next to be reloaded when the POW
- * hardware reloads a POW entry from DRAM). The POW
- * hardware alternates between the two DRAM lists
- * associated with a QOS level when it reloads work
- * from DRAM into the POW unit.
- */
- uint64_t rmt_is_head:1;
- /*
- * set when the DRAM portion of the input Q list
- * selected by qosgrp contains one or more pieces of
- * work.
- */
- uint64_t rmt_val:1;
- /*
- * set when the DRAM portion of the input Q list
- * selected by qosgrp contains exactly one piece of
- * work.
- */
- uint64_t rmt_one:1;
- /*
- * when rmt_val is set, indicates the last piece of
- * work on the DRAM input Q list selected by
- * qosgrp.
- */
- uint64_t rmt_tail:36;
- } sindexload3;
-
- /**
- * Response to NULL_RD request loads
- */
- struct {
- uint64_t unused:62;
- /* of type cvmx_pow_tag_type_t. state is one of the
- * following:
- *
- * - CVMX_POW_TAG_TYPE_ORDERED
- * - CVMX_POW_TAG_TYPE_ATOMIC
- * - CVMX_POW_TAG_TYPE_NULL
- * - CVMX_POW_TAG_TYPE_NULL_NULL
- */
- uint64_t state:2;
- } s_null_rd;
-
-} cvmx_pow_tag_load_resp_t;
-
-/**
- * This structure describes the address used for stores to the POW.
- * The store address is meaningful on stores to the POW. The
- * hardware assumes that an aligned 64-bit store was used for all
- * these stores. Note the assumption that the work queue entry is
- * aligned on an 8-byte boundary (since the low-order 3 address bits
- * must be zero). Note that not all fields are used by all
- * operations.
- *
- * NOTE: The following is the behavior of the pending switch bit at the PP
- * for POW stores (i.e. when did<7:3> == 0xc)
- * - did<2:0> == 0 => pending switch bit is set
- * - did<2:0> == 1 => no affect on the pending switch bit
- * - did<2:0> == 3 => pending switch bit is cleared
- * - did<2:0> == 7 => no affect on the pending switch bit
- * - did<2:0> == others => must not be used
- * - No other loads/stores have an affect on the pending switch bit
- * - The switch bus from POW can clear the pending switch bit
- *
- * NOTE: did<2:0> == 2 is used by the HW for a special single-cycle
- * ADDWQ command that only contains the pointer). SW must never use
- * did<2:0> == 2.
- */
-typedef union {
- /**
- * Unsigned 64 bit integer representation of store address
- */
- uint64_t u64;
-
- struct {
- /* Memory region. Should be CVMX_IO_SEG in most cases */
- uint64_t mem_reg:2;
- uint64_t reserved_49_61:13; /* Must be zero */
- uint64_t is_io:1; /* Must be one */
- /* Device ID of POW. Note that different sub-dids are used. */
- uint64_t did:8;
- uint64_t reserved_36_39:4; /* Must be zero */
- /* Address field. addr<2:0> must be zero */
- uint64_t addr:36;
- } stag;
-} cvmx_pow_tag_store_addr_t;
-
-/**
- * decode of the store data when an IOBDMA SENDSINGLE is sent to POW
- */
-typedef union {
- uint64_t u64;
-
- struct {
- /*
- * the (64-bit word) location in scratchpad to write
- * to (if len != 0)
- */
- uint64_t scraddr:8;
- /* the number of words in the response (0 => no response) */
- uint64_t len:8;
- /* the ID of the device on the non-coherent bus */
- uint64_t did:8;
- uint64_t unused:36;
- /* if set, don't return load response until work is available */
- uint64_t wait:1;
- uint64_t unused2:3;
- } s;
-
-} cvmx_pow_iobdma_store_t;
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-/**
- * Get the POW tag for this core. This returns the current
- * tag type, tag, group, and POW entry index associated with
- * this core. Index is only valid if the tag type isn't NULL_NULL.
- * If a tag switch is pending this routine returns the tag before
- * the tag switch, not after.
- *
- * Returns Current tag
- */
-static inline cvmx_pow_tag_req_t cvmx_pow_get_current_tag(void)
-{
- cvmx_pow_load_addr_t load_addr;
- cvmx_pow_tag_load_resp_t load_resp;
- cvmx_pow_tag_req_t result;
-
- load_addr.u64 = 0;
- load_addr.sstatus.mem_region = CVMX_IO_SEG;
- load_addr.sstatus.is_io = 1;
- load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
- load_addr.sstatus.coreid = cvmx_get_core_num();
- load_addr.sstatus.get_cur = 1;
- load_resp.u64 = cvmx_read_csr(load_addr.u64);
- result.u64 = 0;
- result.s.grp = load_resp.s_sstatus2.grp;
- result.s.index = load_resp.s_sstatus2.index;
- result.s.type = load_resp.s_sstatus2.tag_type;
- result.s.tag = load_resp.s_sstatus2.tag;
- return result;
-}
-
-/**
- * Get the POW WQE for this core. This returns the work queue
- * entry currently associated with this core.
- *
- * Returns WQE pointer
- */
-static inline cvmx_wqe_t *cvmx_pow_get_current_wqp(void)
-{
- cvmx_pow_load_addr_t load_addr;
- cvmx_pow_tag_load_resp_t load_resp;
-
- load_addr.u64 = 0;
- load_addr.sstatus.mem_region = CVMX_IO_SEG;
- load_addr.sstatus.is_io = 1;
- load_addr.sstatus.did = CVMX_OCT_DID_TAG_TAG1;
- load_addr.sstatus.coreid = cvmx_get_core_num();
- load_addr.sstatus.get_cur = 1;
- load_addr.sstatus.get_wqp = 1;
- load_resp.u64 = cvmx_read_csr(load_addr.u64);
- return (cvmx_wqe_t *) cvmx_phys_to_ptr(load_resp.s_sstatus4.wqp);
-}
-
-#ifndef CVMX_MF_CHORD
-#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
-#endif
-
-/**
- * Print a warning if a tag switch is pending for this core
- *
- * @function: Function name checking for a pending tag switch
- */
-static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
-{
- uint64_t switch_complete;
- CVMX_MF_CHORD(switch_complete);
- if (!switch_complete)
- pr_warning("%s called with tag switch in progress\n", function);
-}
-
-/**
- * Waits for a tag switch to complete by polling the completion bit.
- * Note that switches to NULL complete immediately and do not need
- * to be waited for.
- */
-static inline void cvmx_pow_tag_sw_wait(void)
-{
- const uint64_t MAX_CYCLES = 1ull << 31;
- uint64_t switch_complete;
- uint64_t start_cycle = cvmx_get_cycle();
- while (1) {
- CVMX_MF_CHORD(switch_complete);
- if (unlikely(switch_complete))
- break;
- if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
- pr_warning("Tag switch is taking a long time, "
- "possible deadlock\n");
- start_cycle = -MAX_CYCLES - 1;
- }
- }
-}
-
-/**
- * Synchronous work request. Requests work from the POW.
- * This function does NOT wait for previous tag switches to complete,
- * so the caller must ensure that there is not a pending tag switch.
- *
- * @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
- *
- * Returns Returns the WQE pointer from POW. Returns NULL if no work
- * was available.
- */
-static inline cvmx_wqe_t *cvmx_pow_work_request_sync_nocheck(cvmx_pow_wait_t
- wait)
-{
- cvmx_pow_load_addr_t ptr;
- cvmx_pow_tag_load_resp_t result;
-
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- ptr.u64 = 0;
- ptr.swork.mem_region = CVMX_IO_SEG;
- ptr.swork.is_io = 1;
- ptr.swork.did = CVMX_OCT_DID_TAG_SWTAG;
- ptr.swork.wait = wait;
-
- result.u64 = cvmx_read_csr(ptr.u64);
-
- if (result.s_work.no_work)
- return NULL;
- else
- return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
-}
-
-/**
- * Synchronous work request. Requests work from the POW.
- * This function waits for any previous tag switch to complete before
- * requesting the new work.
- *
- * @wait: When set, call stalls until work becomes avaiable, or times out.
- * If not set, returns immediately.
- *
- * Returns Returns the WQE pointer from POW. Returns NULL if no work
- * was available.
- */
-static inline cvmx_wqe_t *cvmx_pow_work_request_sync(cvmx_pow_wait_t wait)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Must not have a switch pending when requesting work */
- cvmx_pow_tag_sw_wait();
- return cvmx_pow_work_request_sync_nocheck(wait);
-
-}
-
-/**
- * Synchronous null_rd request. Requests a switch out of NULL_NULL POW state.
- * This function waits for any previous tag switch to complete before
- * requesting the null_rd.
- *
- * Returns Returns the POW state of type cvmx_pow_tag_type_t.
- */
-static inline enum cvmx_pow_tag_type cvmx_pow_work_request_null_rd(void)
-{
- cvmx_pow_load_addr_t ptr;
- cvmx_pow_tag_load_resp_t result;
-
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Must not have a switch pending when requesting work */
- cvmx_pow_tag_sw_wait();
-
- ptr.u64 = 0;
- ptr.snull_rd.mem_region = CVMX_IO_SEG;
- ptr.snull_rd.is_io = 1;
- ptr.snull_rd.did = CVMX_OCT_DID_TAG_NULL_RD;
-
- result.u64 = cvmx_read_csr(ptr.u64);
-
- return (enum cvmx_pow_tag_type) result.s_null_rd.state;
-}
-
-/**
- * Asynchronous work request. Work is requested from the POW unit,
- * and should later be checked with function
- * cvmx_pow_work_response_async. This function does NOT wait for
- * previous tag switches to complete, so the caller must ensure that
- * there is not a pending tag switch.
- *
- * @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
- *
- * @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
- */
-static inline void cvmx_pow_work_request_async_nocheck(int scr_addr,
- cvmx_pow_wait_t wait)
-{
- cvmx_pow_iobdma_store_t data;
-
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* scr_addr must be 8 byte aligned */
- data.s.scraddr = scr_addr >> 3;
- data.s.len = 1;
- data.s.did = CVMX_OCT_DID_TAG_SWTAG;
- data.s.wait = wait;
- cvmx_send_single(data.u64);
-}
-
-/**
- * Asynchronous work request. Work is requested from the POW unit,
- * and should later be checked with function
- * cvmx_pow_work_response_async. This function waits for any previous
- * tag switch to complete before requesting the new work.
- *
- * @scr_addr: Scratch memory address that response will be returned
- * to, which is either a valid WQE, or a response with the
- * invalid bit set. Byte address, must be 8 byte aligned.
- *
- * @wait: 1 to cause response to wait for work to become available (or
- * timeout), 0 to cause response to return immediately
- */
-static inline void cvmx_pow_work_request_async(int scr_addr,
- cvmx_pow_wait_t wait)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Must not have a switch pending when requesting work */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_work_request_async_nocheck(scr_addr, wait);
-}
-
-/**
- * Gets result of asynchronous work request. Performs a IOBDMA sync
- * to wait for the response.
- *
- * @scr_addr: Scratch memory address to get result from Byte address,
- * must be 8 byte aligned.
- *
- * Returns Returns the WQE from the scratch register, or NULL if no
- * work was available.
- */
-static inline cvmx_wqe_t *cvmx_pow_work_response_async(int scr_addr)
-{
- cvmx_pow_tag_load_resp_t result;
-
- CVMX_SYNCIOBDMA;
- result.u64 = cvmx_scratch_read64(scr_addr);
-
- if (result.s_work.no_work)
- return NULL;
- else
- return (cvmx_wqe_t *) cvmx_phys_to_ptr(result.s_work.addr);
-}
-
-/**
- * Checks if a work queue entry pointer returned by a work
- * request is valid. It may be invalid due to no work
- * being available or due to a timeout.
- *
- * @wqe_ptr: pointer to a work queue entry returned by the POW
- *
- * Returns 0 if pointer is valid
- * 1 if invalid (no work was returned)
- */
-static inline uint64_t cvmx_pow_work_invalid(cvmx_wqe_t *wqe_ptr)
-{
- return wqe_ptr == NULL;
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * NOTE: This should not be used when switching from a NULL tag. Use
- * cvmx_pow_tag_sw_full() instead.
- *
- * This function does no checks, so the caller must ensure that any
- * previous tag switch has completed.
- *
- * @tag: new tag value
- * @tag_type: new tag type (ordered or atomic)
- */
-static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
- enum cvmx_pow_tag_type tag_type)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag\n", __func__);
- if ((current_tag.s.type == tag_type)
- && (current_tag.s.tag == tag))
- pr_warning("%s called to perform a tag switch to the "
- "same tag\n",
- __func__);
- if (tag_type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called to perform a tag switch to "
- "NULL. Use cvmx_pow_tag_sw_null() instead\n",
- __func__);
- }
-
- /*
- * Note that WQE in DRAM is not updated here, as the POW does
- * not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
- * responsibility to keep track of the current tag value if
- * that is important.
- */
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
- tag_req.s.tag = tag;
- tag_req.s.type = tag_type;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
-
- /* once this store arrives at POW, it will attempt the switch
- software must wait for the switch to complete separately */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * NOTE: This should not be used when switching from a NULL tag. Use
- * cvmx_pow_tag_sw_full() instead.
- *
- * This function waits for any previous tag switch to complete, and also
- * displays an error on tag switches to NULL.
- *
- * @tag: new tag value
- * @tag_type: new tag type (ordered or atomic)
- */
-static inline void cvmx_pow_tag_sw(uint32_t tag,
- enum cvmx_pow_tag_type tag_type)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /*
- * Note that WQE in DRAM is not updated here, as the POW does
- * not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
- * responsibility to keep track of the current tag value if
- * that is important.
- */
-
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_nocheck(tag, tag_type);
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * This function must be used for tag switches from NULL.
- *
- * This function does no checks, so the caller must ensure that any
- * previous tag switch has completed.
- *
- * @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
- * @tag: tag value to be assigned to work queue entry
- * @tag_type: type of tag
- * @group: group value for the work queue entry.
- */
-static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if ((current_tag.s.type == tag_type)
- && (current_tag.s.tag == tag))
- pr_warning("%s called to perform a tag switch to "
- "the same tag\n",
- __func__);
- if (tag_type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called to perform a tag switch to "
- "NULL. Use cvmx_pow_tag_sw_null() instead\n",
- __func__);
- if (wqp != cvmx_phys_to_ptr(0x80))
- if (wqp != cvmx_pow_get_current_wqp())
- pr_warning("%s passed WQE(%p) doesn't match "
- "the address in the POW(%p)\n",
- __func__, wqp,
- cvmx_pow_get_current_wqp());
- }
-
- /*
- * Note that WQE in DRAM is not updated here, as the POW does
- * not read from DRAM once the WQE is in flight. See hardware
- * manual for complete details. It is the application's
- * responsibility to keep track of the current tag value if
- * that is important.
- */
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_FULL;
- tag_req.s.tag = tag;
- tag_req.s.type = tag_type;
- tag_req.s.grp = group;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_SWTAG;
- ptr.sio.offset = CAST64(wqp);
-
- /*
- * once this store arrives at POW, it will attempt the switch
- * software must wait for the switch to complete separately.
- */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * Starts a tag switch to the provided tag value and tag type.
- * Completion for the tag switch must be checked for separately. This
- * function does NOT update the work queue entry in dram to match tag
- * value and type, so the application must keep track of these if they
- * are important to the application. This tag switch command must not
- * be used for switches to NULL, as the tag switch pending bit will be
- * set by the switch request, but never cleared by the hardware.
- *
- * This function must be used for tag switches from NULL.
- *
- * This function waits for any pending tag switches to complete
- * before requesting the tag switch.
- *
- * @wqp: pointer to work queue entry to submit. This entry is updated
- * to match the other parameters
- * @tag: tag value to be assigned to work queue entry
- * @tag_type: type of tag
- * @group: group value for the work queue entry.
- */
-static inline void cvmx_pow_tag_sw_full(cvmx_wqe_t *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_full_nocheck(wqp, tag, tag_type, group);
-}
-
-/**
- * Switch to a NULL tag, which ends any ordering or
- * synchronization provided by the POW for the current
- * work queue entry. This operation completes immediately,
- * so completion should not be waited for.
- * This function does NOT wait for previous tag switches to complete,
- * so the caller must ensure that any previous tag switches have completed.
- */
-static inline void cvmx_pow_tag_sw_null_nocheck(void)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called when we already have a "
- "NULL tag\n",
- __func__);
- }
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG;
- tag_req.s.type = CVMX_POW_TAG_TYPE_NULL;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
-
- cvmx_write_io(ptr.u64, tag_req.u64);
-
- /* switch to NULL completes immediately */
-}
-
-/**
- * Switch to a NULL tag, which ends any ordering or
- * synchronization provided by the POW for the current
- * work queue entry. This operation completes immediately,
- * so completion should not be waited for.
- * This function waits for any pending tag switches to complete
- * before requesting the switch to NULL.
- */
-static inline void cvmx_pow_tag_sw_null(void)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_null_nocheck();
-
- /* switch to NULL completes immediately */
-}
-
-/**
- * Submits work to an input queue. This function updates the work
- * queue entry in DRAM to match the arguments given. Note that the
- * tag provided is for the work queue entry submitted, and is
- * unrelated to the tag that the core currently holds.
- *
- * @wqp: pointer to work queue entry to submit. This entry is
- * updated to match the other parameters
- * @tag: tag value to be assigned to work queue entry
- * @tag_type: type of tag
- * @qos: Input queue to add to.
- * @grp: group value for the work queue entry.
- */
-static inline void cvmx_pow_work_submit(cvmx_wqe_t *wqp, uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t qos, uint64_t grp)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- wqp->qos = qos;
- wqp->tag = tag;
- wqp->tag_type = tag_type;
- wqp->grp = grp;
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_ADDWQ;
- tag_req.s.type = tag_type;
- tag_req.s.tag = tag;
- tag_req.s.qos = qos;
- tag_req.s.grp = grp;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG1;
- ptr.sio.offset = cvmx_ptr_to_phys(wqp);
-
- /*
- * SYNC write to memory before the work submit. This is
- * necessary as POW may read values from DRAM at this time.
- */
- CVMX_SYNCWS;
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * This function sets the group mask for a core. The group mask
- * indicates which groups each core will accept work from. There are
- * 16 groups.
- *
- * @core_num: core to apply mask to
- * @mask: Group mask. There are 16 groups, so only bits 0-15 are valid,
- * representing groups 0-15.
- * Each 1 bit in the mask enables the core to accept work from
- * the corresponding group.
- */
-static inline void cvmx_pow_set_group_mask(uint64_t core_num, uint64_t mask)
-{
- union cvmx_pow_pp_grp_mskx grp_msk;
-
- grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
- grp_msk.s.grp_msk = mask;
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
-}
-
-/**
- * This function sets POW static priorities for a core. Each input queue has
- * an associated priority value.
- *
- * @core_num: core to apply priorities to
- * @priority: Vector of 8 priorities, one per POW Input Queue (0-7).
- * Highest priority is 0 and lowest is 7. A priority value
- * of 0xF instructs POW to skip the Input Queue when
- * scheduling to this specific core.
- * NOTE: priorities should not have gaps in values, meaning
- * {0,1,1,1,1,1,1,1} is a valid configuration while
- * {0,2,2,2,2,2,2,2} is not.
- */
-static inline void cvmx_pow_set_priority(uint64_t core_num,
- const uint8_t priority[])
-{
- /* POW priorities are supported on CN5xxx and later */
- if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
- union cvmx_pow_pp_grp_mskx grp_msk;
-
- grp_msk.u64 = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(core_num));
- grp_msk.s.qos0_pri = priority[0];
- grp_msk.s.qos1_pri = priority[1];
- grp_msk.s.qos2_pri = priority[2];
- grp_msk.s.qos3_pri = priority[3];
- grp_msk.s.qos4_pri = priority[4];
- grp_msk.s.qos5_pri = priority[5];
- grp_msk.s.qos6_pri = priority[6];
- grp_msk.s.qos7_pri = priority[7];
-
- /* Detect gaps between priorities and flag error */
- {
- int i;
- uint32_t prio_mask = 0;
-
- for (i = 0; i < 8; i++)
- if (priority[i] != 0xF)
- prio_mask |= 1 << priority[i];
-
- if (prio_mask ^ ((1 << cvmx_pop(prio_mask)) - 1)) {
- pr_err("POW static priorities should be "
- "contiguous (0x%llx)\n",
- (unsigned long long)prio_mask);
- return;
- }
- }
-
- cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(core_num), grp_msk.u64);
- }
-}
-
-/**
- * Performs a tag switch and then an immediate deschedule. This completes
- * immediately, so completion must not be waited for. This function does NOT
- * update the wqe in DRAM to match arguments.
- *
- * This function does NOT wait for any prior tag switches to complete, so the
- * calling code must do this.
- *
- * Note the following CAVEAT of the Octeon HW behavior when
- * re-scheduling DE-SCHEDULEd items whose (next) state is
- * ORDERED:
- * - If there are no switches pending at the time that the
- * HW executes the de-schedule, the HW will only re-schedule
- * the head of the FIFO associated with the given tag. This
- * means that in many respects, the HW treats this ORDERED
- * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
- * case (to an ORDERED tag), the HW will do the switch
- * before the deschedule whenever it is possible to do
- * the switch immediately, so it may often look like
- * this case.
- * - If there is a pending switch to ORDERED at the time
- * the HW executes the de-schedule, the HW will perform
- * the switch at the time it re-schedules, and will be
- * able to reschedule any/all of the entries with the
- * same tag.
- * Due to this behavior, the RECOMMENDATION to software is
- * that they have a (next) state of ATOMIC when they
- * DE-SCHEDULE. If an ORDERED tag is what was really desired,
- * SW can choose to immediately switch to an ORDERED tag
- * after the work (that has an ATOMIC tag) is re-scheduled.
- * Note that since there are never any tag switches pending
- * when the HW re-schedules, this switch can be IMMEDIATE upon
- * the reception of the pointer during the re-schedule.
- *
- * @tag: New tag value
- * @tag_type: New tag type
- * @group: New group value
- * @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
- */
-static inline void cvmx_pow_tag_sw_desched_nocheck(
- uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group,
- uint64_t no_sched)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag. Deschedule not "
- "allowed from NULL state\n",
- __func__);
- if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
- && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
- pr_warning("%s called where neither the before or "
- "after tag is ATOMIC\n",
- __func__);
- }
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_SWTAG_DESCH;
- tag_req.s.tag = tag;
- tag_req.s.type = tag_type;
- tag_req.s.grp = group;
- tag_req.s.no_sched = no_sched;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
- /*
- * since TAG3 is used, this store will clear the local pending
- * switch bit.
- */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/**
- * Performs a tag switch and then an immediate deschedule. This completes
- * immediately, so completion must not be waited for. This function does NOT
- * update the wqe in DRAM to match arguments.
- *
- * This function waits for any prior tag switches to complete, so the
- * calling code may call this function with a pending tag switch.
- *
- * Note the following CAVEAT of the Octeon HW behavior when
- * re-scheduling DE-SCHEDULEd items whose (next) state is
- * ORDERED:
- * - If there are no switches pending at the time that the
- * HW executes the de-schedule, the HW will only re-schedule
- * the head of the FIFO associated with the given tag. This
- * means that in many respects, the HW treats this ORDERED
- * tag as an ATOMIC tag. Note that in the SWTAG_DESCH
- * case (to an ORDERED tag), the HW will do the switch
- * before the deschedule whenever it is possible to do
- * the switch immediately, so it may often look like
- * this case.
- * - If there is a pending switch to ORDERED at the time
- * the HW executes the de-schedule, the HW will perform
- * the switch at the time it re-schedules, and will be
- * able to reschedule any/all of the entries with the
- * same tag.
- * Due to this behavior, the RECOMMENDATION to software is
- * that they have a (next) state of ATOMIC when they
- * DE-SCHEDULE. If an ORDERED tag is what was really desired,
- * SW can choose to immediately switch to an ORDERED tag
- * after the work (that has an ATOMIC tag) is re-scheduled.
- * Note that since there are never any tag switches pending
- * when the HW re-schedules, this switch can be IMMEDIATE upon
- * the reception of the pointer during the re-schedule.
- *
- * @tag: New tag value
- * @tag_type: New tag type
- * @group: New group value
- * @no_sched: Control whether this work queue entry will be rescheduled.
- * - 1 : don't schedule this work
- * - 0 : allow this work to be scheduled.
- */
-static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
- enum cvmx_pow_tag_type tag_type,
- uint64_t group, uint64_t no_sched)
-{
- if (CVMX_ENABLE_POW_CHECKS)
- __cvmx_pow_warn_if_pending_switch(__func__);
-
- /* Need to make sure any writes to the work queue entry are complete */
- CVMX_SYNCWS;
- /*
- * Ensure that there is not a pending tag switch, as a tag
- * switch cannot be started if a previous switch is still
- * pending.
- */
- cvmx_pow_tag_sw_wait();
- cvmx_pow_tag_sw_desched_nocheck(tag, tag_type, group, no_sched);
-}
-
-/**
- * Descchedules the current work queue entry.
- *
- * @no_sched: no schedule flag value to be set on the work queue
- * entry. If this is set the entry will not be
- * rescheduled.
- */
-static inline void cvmx_pow_desched(uint64_t no_sched)
-{
- cvmx_addr_t ptr;
- cvmx_pow_tag_req_t tag_req;
-
- if (CVMX_ENABLE_POW_CHECKS) {
- cvmx_pow_tag_req_t current_tag;
- __cvmx_pow_warn_if_pending_switch(__func__);
- current_tag = cvmx_pow_get_current_tag();
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
- pr_warning("%s called with NULL_NULL tag\n",
- __func__);
- if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
- pr_warning("%s called with NULL tag. Deschedule not "
- "expected from NULL state\n",
- __func__);
- }
-
- /* Need to make sure any writes to the work queue entry are complete */
- CVMX_SYNCWS;
-
- tag_req.u64 = 0;
- tag_req.s.op = CVMX_POW_TAG_OP_DESCH;
- tag_req.s.no_sched = no_sched;
-
- ptr.u64 = 0;
- ptr.sio.mem_region = CVMX_IO_SEG;
- ptr.sio.is_io = 1;
- ptr.sio.did = CVMX_OCT_DID_TAG_TAG3;
- /*
- * since TAG3 is used, this store will clear the local pending
- * switch bit.
- */
- cvmx_write_io(ptr.u64, tag_req.u64);
-}
-
-/****************************************************
-* Define usage of bits within the 32 bit tag values.
-*****************************************************/
-
-/*
- * Number of bits of the tag used by software. The SW bits are always
- * a contiguous block of the high starting at bit 31. The hardware
- * bits are always the low bits. By default, the top 8 bits of the
- * tag are reserved for software, and the low 24 are set by the IPD
- * unit.
- */
-#define CVMX_TAG_SW_BITS (8)
-#define CVMX_TAG_SW_SHIFT (32 - CVMX_TAG_SW_BITS)
-
-/* Below is the list of values for the top 8 bits of the tag. */
-/*
- * Tag values with top byte of this value are reserved for internal
- * executive uses.
- */
-#define CVMX_TAG_SW_BITS_INTERNAL 0x1
-/* The executive divides the remaining 24 bits as follows:
- * - the upper 8 bits (bits 23 - 16 of the tag) define a subgroup
- *
- * - the lower 16 bits (bits 15 - 0 of the tag) define are the value
- * with the subgroup
- *
- * Note that this section describes the format of tags generated by
- * software - refer to the hardware documentation for a description of
- * the tags values generated by the packet input hardware. Subgroups
- * are defined here.
- */
-/* Mask for the value portion of the tag */
-#define CVMX_TAG_SUBGROUP_MASK 0xFFFF
-#define CVMX_TAG_SUBGROUP_SHIFT 16
-#define CVMX_TAG_SUBGROUP_PKO 0x1
-
-/* End of executive tag subgroup definitions */
-
-/*
- * The remaining values software bit values 0x2 - 0xff are available
- * for application use.
- */
-
-/**
- * This function creates a 32 bit tag value from the two values provided.
- *
- * @sw_bits: The upper bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * hw_bits parameter.
- *
- * @hw_bits: The lower bits (number depends on configuration) are set
- * to this value. The remainder of bits are set by the
- * sw_bits parameter.
- *
- * Returns 32 bit value of the combined hw and sw bits.
- */
-static inline uint32_t cvmx_pow_tag_compose(uint64_t sw_bits, uint64_t hw_bits)
-{
- return ((sw_bits & cvmx_build_mask(CVMX_TAG_SW_BITS)) <<
- CVMX_TAG_SW_SHIFT) |
- (hw_bits & cvmx_build_mask(32 - CVMX_TAG_SW_BITS));
-}
-
-/**
- * Extracts the bits allocated for software use from the tag
- *
- * @tag: 32 bit tag value
- *
- * Returns N bit software tag value, where N is configurable with the
- * CVMX_TAG_SW_BITS define
- */
-static inline uint32_t cvmx_pow_tag_get_sw_bits(uint64_t tag)
-{
- return (tag >> (32 - CVMX_TAG_SW_BITS)) &
- cvmx_build_mask(CVMX_TAG_SW_BITS);
-}
-
-/**
- *
- * Extracts the bits allocated for hardware use from the tag
- *
- * @tag: 32 bit tag value
- *
- * Returns (32 - N) bit software tag value, where N is configurable
- * with the CVMX_TAG_SW_BITS define
- */
-static inline uint32_t cvmx_pow_tag_get_hw_bits(uint64_t tag)
-{
- return tag & cvmx_build_mask(32 - CVMX_TAG_SW_BITS);
-}
-
-/**
- * Store the current POW internal state into the supplied
- * buffer. It is recommended that you pass a buffer of at least
- * 128KB. The format of the capture may change based on SDK
- * version and Octeon chip.
- *
- * @buffer: Buffer to store capture into
- * @buffer_size:
- * The size of the supplied buffer
- *
- * Returns Zero on success, negative on failure
- */
-extern int cvmx_pow_capture(void *buffer, int buffer_size);
-
-/**
- * Dump a POW capture to the console in a human readable format.
- *
- * @buffer: POW capture from cvmx_pow_capture()
- * @buffer_size:
- * Size of the buffer
- */
-extern void cvmx_pow_display(void *buffer, int buffer_size);
-
-/**
- * Return the number of POW entries supported by this chip
- *
- * Returns Number of POW entries
- */
-extern int cvmx_pow_get_num_entries(void);
-
-#endif /* __CVMX_POW_H__ */
diff --git a/drivers/staging/octeon/cvmx-scratch.h b/drivers/staging/octeon/cvmx-scratch.h
deleted file mode 100644
index 96b70cf..0000000
--- a/drivers/staging/octeon/cvmx-scratch.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * This file provides support for the processor local scratch memory.
- * Scratch memory is byte addressable - all addresses are byte addresses.
- *
- */
-
-#ifndef __CVMX_SCRATCH_H__
-#define __CVMX_SCRATCH_H__
-
-/*
- * Note: This define must be a long, not a long long in order to
- * compile without warnings for both 32bit and 64bit.
- */
-#define CVMX_SCRATCH_BASE (-32768l) /* 0xffffffffffff8000 */
-
-/**
- * Reads an 8 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint8_t cvmx_scratch_read8(uint64_t address)
-{
- return *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Reads a 16 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint16_t cvmx_scratch_read16(uint64_t address)
-{
- return *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Reads a 32 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint32_t cvmx_scratch_read32(uint64_t address)
-{
- return *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Reads a 64 bit value from the processor local scratchpad memory.
- *
- * @address: byte address to read from
- *
- * Returns value read
- */
-static inline uint64_t cvmx_scratch_read64(uint64_t address)
-{
- return *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address);
-}
-
-/**
- * Writes an 8 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write8(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint8_t, CVMX_SCRATCH_BASE + address) =
- (uint8_t) value;
-}
-
-/**
- * Writes a 32 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write16(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint16_t, CVMX_SCRATCH_BASE + address) =
- (uint16_t) value;
-}
-
-/**
- * Writes a 16 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write32(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint32_t, CVMX_SCRATCH_BASE + address) =
- (uint32_t) value;
-}
-
-/**
- * Writes a 64 bit value to the processor local scratchpad memory.
- *
- * @address: byte address to write to
- * @value: value to write
- */
-static inline void cvmx_scratch_write64(uint64_t address, uint64_t value)
-{
- *CASTPTR(volatile uint64_t, CVMX_SCRATCH_BASE + address) = value;
-}
-
-#endif /* __CVMX_SCRATCH_H__ */
diff --git a/drivers/staging/octeon/cvmx-smix-defs.h b/drivers/staging/octeon/cvmx-smix-defs.h
deleted file mode 100644
index 9ae45fc..0000000
--- a/drivers/staging/octeon/cvmx-smix-defs.h
+++ /dev/null
@@ -1,178 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SMIX_DEFS_H__
-#define __CVMX_SMIX_DEFS_H__
-
-#define CVMX_SMIX_CLK(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001818ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_CMD(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001800ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_EN(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001820ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_RD_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001810ull + (((offset) & 1) * 256))
-#define CVMX_SMIX_WR_DAT(offset) \
- CVMX_ADD_IO_SEG(0x0001180000001808ull + (((offset) & 1) * 256))
-
-union cvmx_smix_clk {
- uint64_t u64;
- struct cvmx_smix_clk_s {
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t sample_mode:1;
- uint64_t reserved_14_14:1;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } s;
- struct cvmx_smix_clk_cn30xx {
- uint64_t reserved_21_63:43;
- uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } cn30xx;
- struct cvmx_smix_clk_cn30xx cn31xx;
- struct cvmx_smix_clk_cn30xx cn38xx;
- struct cvmx_smix_clk_cn30xx cn38xxp2;
- struct cvmx_smix_clk_cn50xx {
- uint64_t reserved_25_63:39;
- uint64_t mode:1;
- uint64_t reserved_21_23:3;
- uint64_t sample_hi:5;
- uint64_t reserved_14_15:2;
- uint64_t clk_idle:1;
- uint64_t preamble:1;
- uint64_t sample:4;
- uint64_t phase:8;
- } cn50xx;
- struct cvmx_smix_clk_s cn52xx;
- struct cvmx_smix_clk_cn50xx cn52xxp1;
- struct cvmx_smix_clk_s cn56xx;
- struct cvmx_smix_clk_cn50xx cn56xxp1;
- struct cvmx_smix_clk_cn30xx cn58xx;
- struct cvmx_smix_clk_cn30xx cn58xxp1;
-};
-
-union cvmx_smix_cmd {
- uint64_t u64;
- struct cvmx_smix_cmd_s {
- uint64_t reserved_18_63:46;
- uint64_t phy_op:2;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
- } s;
- struct cvmx_smix_cmd_cn30xx {
- uint64_t reserved_17_63:47;
- uint64_t phy_op:1;
- uint64_t reserved_13_15:3;
- uint64_t phy_adr:5;
- uint64_t reserved_5_7:3;
- uint64_t reg_adr:5;
- } cn30xx;
- struct cvmx_smix_cmd_cn30xx cn31xx;
- struct cvmx_smix_cmd_cn30xx cn38xx;
- struct cvmx_smix_cmd_cn30xx cn38xxp2;
- struct cvmx_smix_cmd_s cn50xx;
- struct cvmx_smix_cmd_s cn52xx;
- struct cvmx_smix_cmd_s cn52xxp1;
- struct cvmx_smix_cmd_s cn56xx;
- struct cvmx_smix_cmd_s cn56xxp1;
- struct cvmx_smix_cmd_cn30xx cn58xx;
- struct cvmx_smix_cmd_cn30xx cn58xxp1;
-};
-
-union cvmx_smix_en {
- uint64_t u64;
- struct cvmx_smix_en_s {
- uint64_t reserved_1_63:63;
- uint64_t en:1;
- } s;
- struct cvmx_smix_en_s cn30xx;
- struct cvmx_smix_en_s cn31xx;
- struct cvmx_smix_en_s cn38xx;
- struct cvmx_smix_en_s cn38xxp2;
- struct cvmx_smix_en_s cn50xx;
- struct cvmx_smix_en_s cn52xx;
- struct cvmx_smix_en_s cn52xxp1;
- struct cvmx_smix_en_s cn56xx;
- struct cvmx_smix_en_s cn56xxp1;
- struct cvmx_smix_en_s cn58xx;
- struct cvmx_smix_en_s cn58xxp1;
-};
-
-union cvmx_smix_rd_dat {
- uint64_t u64;
- struct cvmx_smix_rd_dat_s {
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
- } s;
- struct cvmx_smix_rd_dat_s cn30xx;
- struct cvmx_smix_rd_dat_s cn31xx;
- struct cvmx_smix_rd_dat_s cn38xx;
- struct cvmx_smix_rd_dat_s cn38xxp2;
- struct cvmx_smix_rd_dat_s cn50xx;
- struct cvmx_smix_rd_dat_s cn52xx;
- struct cvmx_smix_rd_dat_s cn52xxp1;
- struct cvmx_smix_rd_dat_s cn56xx;
- struct cvmx_smix_rd_dat_s cn56xxp1;
- struct cvmx_smix_rd_dat_s cn58xx;
- struct cvmx_smix_rd_dat_s cn58xxp1;
-};
-
-union cvmx_smix_wr_dat {
- uint64_t u64;
- struct cvmx_smix_wr_dat_s {
- uint64_t reserved_18_63:46;
- uint64_t pending:1;
- uint64_t val:1;
- uint64_t dat:16;
- } s;
- struct cvmx_smix_wr_dat_s cn30xx;
- struct cvmx_smix_wr_dat_s cn31xx;
- struct cvmx_smix_wr_dat_s cn38xx;
- struct cvmx_smix_wr_dat_s cn38xxp2;
- struct cvmx_smix_wr_dat_s cn50xx;
- struct cvmx_smix_wr_dat_s cn52xx;
- struct cvmx_smix_wr_dat_s cn52xxp1;
- struct cvmx_smix_wr_dat_s cn56xx;
- struct cvmx_smix_wr_dat_s cn56xxp1;
- struct cvmx_smix_wr_dat_s cn58xx;
- struct cvmx_smix_wr_dat_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-spi.c b/drivers/staging/octeon/cvmx-spi.c
deleted file mode 100644
index 82794d9..0000000
--- a/drivers/staging/octeon/cvmx-spi.c
+++ /dev/null
@@ -1,667 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * Support library for the SPI
- */
-#include <asm/octeon/octeon.h>
-
-#include "cvmx-config.h"
-
-#include "cvmx-pko.h"
-#include "cvmx-spi.h"
-
-#include "cvmx-spxx-defs.h"
-#include "cvmx-stxx-defs.h"
-#include "cvmx-srxx-defs.h"
-
-#define INVOKE_CB(function_p, args...) \
- do { \
- if (function_p) { \
- res = function_p(args); \
- if (res) \
- return res; \
- } \
- } while (0)
-
-#if CVMX_ENABLE_DEBUG_PRINTS
-static const char *modes[] =
- { "UNKNOWN", "TX Halfplex", "Rx Halfplex", "Duplex" };
-#endif
-
-/* Default callbacks, can be overridden
- * using cvmx_spi_get_callbacks/cvmx_spi_set_callbacks
- */
-static cvmx_spi_callbacks_t cvmx_spi_callbacks = {
- .reset_cb = cvmx_spi_reset_cb,
- .calendar_setup_cb = cvmx_spi_calendar_setup_cb,
- .clock_detect_cb = cvmx_spi_clock_detect_cb,
- .training_cb = cvmx_spi_training_cb,
- .calendar_sync_cb = cvmx_spi_calendar_sync_cb,
- .interface_up_cb = cvmx_spi_interface_up_cb
-};
-
-/**
- * Get current SPI4 initialization callbacks
- *
- * @callbacks: Pointer to the callbacks structure.to fill
- *
- * Returns Pointer to cvmx_spi_callbacks_t structure.
- */
-void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks)
-{
- memcpy(callbacks, &cvmx_spi_callbacks, sizeof(cvmx_spi_callbacks));
-}
-
-/**
- * Set new SPI4 initialization callbacks
- *
- * @new_callbacks: Pointer to an updated callbacks structure.
- */
-void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks)
-{
- memcpy(&cvmx_spi_callbacks, new_callbacks, sizeof(cvmx_spi_callbacks));
-}
-
-/**
- * Initialize and start the SPI interface.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- * @num_ports: Number of SPI ports to configure
- *
- * Returns Zero on success, negative of failure.
- */
-int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode, int timeout,
- int num_ports)
-{
- int res = -1;
-
- if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
- return res;
-
- /* Callback to perform SPI4 reset */
- INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
-
- /* Callback to perform calendar setup */
- INVOKE_CB(cvmx_spi_callbacks.calendar_setup_cb, interface, mode,
- num_ports);
-
- /* Callback to perform clock detection */
- INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
-
- /* Callback to perform SPI4 link training */
- INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
-
- /* Callback to perform calendar sync */
- INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
- timeout);
-
- /* Callback to handle interface coming up */
- INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
-
- return res;
-}
-
-/**
- * This routine restarts the SPI interface after it has lost synchronization
- * with its correspondent system.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- *
- * Returns Zero on success, negative of failure.
- */
-int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- int res = -1;
-
- if (!(OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX)))
- return res;
-
- cvmx_dprintf("SPI%d: Restart %s\n", interface, modes[mode]);
-
- /* Callback to perform SPI4 reset */
- INVOKE_CB(cvmx_spi_callbacks.reset_cb, interface, mode);
-
- /* NOTE: Calendar setup is not performed during restart */
- /* Refer to cvmx_spi_start_interface() for the full sequence */
-
- /* Callback to perform clock detection */
- INVOKE_CB(cvmx_spi_callbacks.clock_detect_cb, interface, mode, timeout);
-
- /* Callback to perform SPI4 link training */
- INVOKE_CB(cvmx_spi_callbacks.training_cb, interface, mode, timeout);
-
- /* Callback to perform calendar sync */
- INVOKE_CB(cvmx_spi_callbacks.calendar_sync_cb, interface, mode,
- timeout);
-
- /* Callback to handle interface coming up */
- INVOKE_CB(cvmx_spi_callbacks.interface_up_cb, interface, mode);
-
- return res;
-}
-
-/**
- * Callback to perform SPI4 reset
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode)
-{
- union cvmx_spxx_dbg_deskew_ctl spxx_dbg_deskew_ctl;
- union cvmx_spxx_clk_ctl spxx_clk_ctl;
- union cvmx_spxx_bist_stat spxx_bist_stat;
- union cvmx_spxx_int_msk spxx_int_msk;
- union cvmx_stxx_int_msk stxx_int_msk;
- union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
- int index;
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
-
- /* Disable SPI error events while we run BIST */
- spxx_int_msk.u64 = cvmx_read_csr(CVMX_SPXX_INT_MSK(interface));
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
- stxx_int_msk.u64 = cvmx_read_csr(CVMX_STXX_INT_MSK(interface));
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
-
- /* Run BIST in the SPI interface */
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), 0);
- cvmx_write_csr(CVMX_STXX_COM_CTL(interface), 0);
- spxx_clk_ctl.u64 = 0;
- spxx_clk_ctl.s.runbist = 1;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(10 * MS);
- spxx_bist_stat.u64 = cvmx_read_csr(CVMX_SPXX_BIST_STAT(interface));
- if (spxx_bist_stat.s.stat0)
- cvmx_dprintf
- ("ERROR SPI%d: BIST failed on receive datapath FIFO\n",
- interface);
- if (spxx_bist_stat.s.stat1)
- cvmx_dprintf("ERROR SPI%d: BIST failed on RX calendar table\n",
- interface);
- if (spxx_bist_stat.s.stat2)
- cvmx_dprintf("ERROR SPI%d: BIST failed on TX calendar table\n",
- interface);
-
- /* Clear the calendar table after BIST to fix parity errors */
- for (index = 0; index < 32; index++) {
- union cvmx_srxx_spi4_calx srxx_spi4_calx;
- union cvmx_stxx_spi4_calx stxx_spi4_calx;
-
- srxx_spi4_calx.u64 = 0;
- srxx_spi4_calx.s.oddpar = 1;
- cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
- srxx_spi4_calx.u64);
-
- stxx_spi4_calx.u64 = 0;
- stxx_spi4_calx.s.oddpar = 1;
- cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
- stxx_spi4_calx.u64);
- }
-
- /* Re enable reporting of error interrupts */
- cvmx_write_csr(CVMX_SPXX_INT_REG(interface),
- cvmx_read_csr(CVMX_SPXX_INT_REG(interface)));
- cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), spxx_int_msk.u64);
- cvmx_write_csr(CVMX_STXX_INT_REG(interface),
- cvmx_read_csr(CVMX_STXX_INT_REG(interface)));
- cvmx_write_csr(CVMX_STXX_INT_MSK(interface), stxx_int_msk.u64);
-
- /* Setup the CLKDLY right in the middle */
- spxx_clk_ctl.u64 = 0;
- spxx_clk_ctl.s.seetrn = 0;
- spxx_clk_ctl.s.clkdly = 0x10;
- spxx_clk_ctl.s.runbist = 0;
- spxx_clk_ctl.s.statdrv = 0;
- /* This should always be on the opposite edge as statdrv */
- spxx_clk_ctl.s.statrcv = 1;
- spxx_clk_ctl.s.sndtrn = 0;
- spxx_clk_ctl.s.drptrn = 0;
- spxx_clk_ctl.s.rcvtrn = 0;
- spxx_clk_ctl.s.srxdlck = 0;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(100 * MS);
-
- /* Reset SRX0 DLL */
- spxx_clk_ctl.s.srxdlck = 1;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
-
- /* Waiting for Inf0 Spi4 RX DLL to lock */
- cvmx_wait(100 * MS);
-
- /* Enable dynamic alignment */
- spxx_trn4_ctl.s.trntest = 0;
- spxx_trn4_ctl.s.jitter = 1;
- spxx_trn4_ctl.s.clr_boot = 1;
- spxx_trn4_ctl.s.set_boot = 0;
- if (OCTEON_IS_MODEL(OCTEON_CN58XX))
- spxx_trn4_ctl.s.maxdist = 3;
- else
- spxx_trn4_ctl.s.maxdist = 8;
- spxx_trn4_ctl.s.macro_en = 1;
- spxx_trn4_ctl.s.mux_en = 1;
- cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
-
- spxx_dbg_deskew_ctl.u64 = 0;
- cvmx_write_csr(CVMX_SPXX_DBG_DESKEW_CTL(interface),
- spxx_dbg_deskew_ctl.u64);
-
- return 0;
-}
-
-/**
- * Callback to setup calendar and miscellaneous settings before clock detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @num_ports: Number of ports to configure on SPI
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
- int num_ports)
-{
- int port;
- int index;
- if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
- union cvmx_srxx_com_ctl srxx_com_ctl;
- union cvmx_srxx_spi4_stat srxx_spi4_stat;
-
- /* SRX0 number of Ports */
- srxx_com_ctl.u64 = 0;
- srxx_com_ctl.s.prts = num_ports - 1;
- srxx_com_ctl.s.st_en = 0;
- srxx_com_ctl.s.inf_en = 0;
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
-
- /* SRX0 Calendar Table. This round robbins through all ports */
- port = 0;
- index = 0;
- while (port < num_ports) {
- union cvmx_srxx_spi4_calx srxx_spi4_calx;
- srxx_spi4_calx.u64 = 0;
- srxx_spi4_calx.s.prt0 = port++;
- srxx_spi4_calx.s.prt1 = port++;
- srxx_spi4_calx.s.prt2 = port++;
- srxx_spi4_calx.s.prt3 = port++;
- srxx_spi4_calx.s.oddpar =
- ~(cvmx_dpop(srxx_spi4_calx.u64) & 1);
- cvmx_write_csr(CVMX_SRXX_SPI4_CALX(index, interface),
- srxx_spi4_calx.u64);
- index++;
- }
- srxx_spi4_stat.u64 = 0;
- srxx_spi4_stat.s.len = num_ports;
- srxx_spi4_stat.s.m = 1;
- cvmx_write_csr(CVMX_SRXX_SPI4_STAT(interface),
- srxx_spi4_stat.u64);
- }
-
- if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
- union cvmx_stxx_arb_ctl stxx_arb_ctl;
- union cvmx_gmxx_tx_spi_max gmxx_tx_spi_max;
- union cvmx_gmxx_tx_spi_thresh gmxx_tx_spi_thresh;
- union cvmx_gmxx_tx_spi_ctl gmxx_tx_spi_ctl;
- union cvmx_stxx_spi4_stat stxx_spi4_stat;
- union cvmx_stxx_spi4_dat stxx_spi4_dat;
-
- /* STX0 Config */
- stxx_arb_ctl.u64 = 0;
- stxx_arb_ctl.s.igntpa = 0;
- stxx_arb_ctl.s.mintrn = 0;
- cvmx_write_csr(CVMX_STXX_ARB_CTL(interface), stxx_arb_ctl.u64);
-
- gmxx_tx_spi_max.u64 = 0;
- gmxx_tx_spi_max.s.max1 = 8;
- gmxx_tx_spi_max.s.max2 = 4;
- gmxx_tx_spi_max.s.slice = 0;
- cvmx_write_csr(CVMX_GMXX_TX_SPI_MAX(interface),
- gmxx_tx_spi_max.u64);
-
- gmxx_tx_spi_thresh.u64 = 0;
- gmxx_tx_spi_thresh.s.thresh = 4;
- cvmx_write_csr(CVMX_GMXX_TX_SPI_THRESH(interface),
- gmxx_tx_spi_thresh.u64);
-
- gmxx_tx_spi_ctl.u64 = 0;
- gmxx_tx_spi_ctl.s.tpa_clr = 0;
- gmxx_tx_spi_ctl.s.cont_pkt = 0;
- cvmx_write_csr(CVMX_GMXX_TX_SPI_CTL(interface),
- gmxx_tx_spi_ctl.u64);
-
- /* STX0 Training Control */
- stxx_spi4_dat.u64 = 0;
- /*Minimum needed by dynamic alignment */
- stxx_spi4_dat.s.alpha = 32;
- stxx_spi4_dat.s.max_t = 0xFFFF; /*Minimum interval is 0x20 */
- cvmx_write_csr(CVMX_STXX_SPI4_DAT(interface),
- stxx_spi4_dat.u64);
-
- /* STX0 Calendar Table. This round robbins through all ports */
- port = 0;
- index = 0;
- while (port < num_ports) {
- union cvmx_stxx_spi4_calx stxx_spi4_calx;
- stxx_spi4_calx.u64 = 0;
- stxx_spi4_calx.s.prt0 = port++;
- stxx_spi4_calx.s.prt1 = port++;
- stxx_spi4_calx.s.prt2 = port++;
- stxx_spi4_calx.s.prt3 = port++;
- stxx_spi4_calx.s.oddpar =
- ~(cvmx_dpop(stxx_spi4_calx.u64) & 1);
- cvmx_write_csr(CVMX_STXX_SPI4_CALX(index, interface),
- stxx_spi4_calx.u64);
- index++;
- }
- stxx_spi4_stat.u64 = 0;
- stxx_spi4_stat.s.len = num_ports;
- stxx_spi4_stat.s.m = 1;
- cvmx_write_csr(CVMX_STXX_SPI4_STAT(interface),
- stxx_spi4_stat.u64);
- }
-
- return 0;
-}
-
-/**
- * Callback to perform clock detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- int clock_transitions;
- union cvmx_spxx_clk_stat stat;
- uint64_t timeout_time;
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
-
- /*
- * Regardless of operating mode, both Tx and Rx clocks must be
- * present for the SPI interface to operate.
- */
- cvmx_dprintf("SPI%d: Waiting to see TsClk...\n", interface);
- timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- /*
- * Require 100 clock transitions in order to avoid any noise
- * in the beginning.
- */
- clock_transitions = 100;
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (stat.s.s4clk0 && stat.s.s4clk1 && clock_transitions) {
- /*
- * We've seen a clock transition, so decrement
- * the number we still need.
- */
- clock_transitions--;
- cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
- stat.s.s4clk0 = 0;
- stat.s.s4clk1 = 0;
- }
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.s4clk0 == 0 || stat.s.s4clk1 == 0);
-
- cvmx_dprintf("SPI%d: Waiting to see RsClk...\n", interface);
- timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- /*
- * Require 100 clock transitions in order to avoid any noise in the
- * beginning.
- */
- clock_transitions = 100;
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (stat.s.d4clk0 && stat.s.d4clk1 && clock_transitions) {
- /*
- * We've seen a clock transition, so decrement
- * the number we still need
- */
- clock_transitions--;
- cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
- stat.s.d4clk0 = 0;
- stat.s.d4clk1 = 0;
- }
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.d4clk0 == 0 || stat.s.d4clk1 == 0);
-
- return 0;
-}
-
-/**
- * Callback to perform link training
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for link to be trained (in seconds)
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- union cvmx_spxx_trn4_ctl spxx_trn4_ctl;
- union cvmx_spxx_clk_stat stat;
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
- uint64_t timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- int rx_training_needed;
-
- /* SRX0 & STX0 Inf0 Links are configured - begin training */
- union cvmx_spxx_clk_ctl spxx_clk_ctl;
- spxx_clk_ctl.u64 = 0;
- spxx_clk_ctl.s.seetrn = 0;
- spxx_clk_ctl.s.clkdly = 0x10;
- spxx_clk_ctl.s.runbist = 0;
- spxx_clk_ctl.s.statdrv = 0;
- /* This should always be on the opposite edge as statdrv */
- spxx_clk_ctl.s.statrcv = 1;
- spxx_clk_ctl.s.sndtrn = 1;
- spxx_clk_ctl.s.drptrn = 1;
- spxx_clk_ctl.s.rcvtrn = 1;
- spxx_clk_ctl.s.srxdlck = 1;
- cvmx_write_csr(CVMX_SPXX_CLK_CTL(interface), spxx_clk_ctl.u64);
- cvmx_wait(1000 * MS);
-
- /* SRX0 clear the boot bit */
- spxx_trn4_ctl.u64 = cvmx_read_csr(CVMX_SPXX_TRN4_CTL(interface));
- spxx_trn4_ctl.s.clr_boot = 1;
- cvmx_write_csr(CVMX_SPXX_TRN4_CTL(interface), spxx_trn4_ctl.u64);
-
- /* Wait for the training sequence to complete */
- cvmx_dprintf("SPI%d: Waiting for training\n", interface);
- cvmx_wait(1000 * MS);
- /* Wait a really long time here */
- timeout_time = cvmx_get_cycle() + 1000ull * MS * 600;
- /*
- * The HRM says we must wait for 34 + 16 * MAXDIST training sequences.
- * We'll be pessimistic and wait for a lot more.
- */
- rx_training_needed = 500;
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (stat.s.srxtrn && rx_training_needed) {
- rx_training_needed--;
- cvmx_write_csr(CVMX_SPXX_CLK_STAT(interface), stat.u64);
- stat.s.srxtrn = 0;
- }
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.srxtrn == 0);
-
- return 0;
-}
-
-/**
- * Callback to perform calendar data synchronization
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for calendar data in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode, int timeout)
-{
- uint64_t MS = cvmx_sysinfo_get()->cpu_clock_hz / 1000;
- if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
- /* SRX0 interface should be good, send calendar data */
- union cvmx_srxx_com_ctl srxx_com_ctl;
- cvmx_dprintf
- ("SPI%d: Rx is synchronized, start sending calendar data\n",
- interface);
- srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
- srxx_com_ctl.s.inf_en = 1;
- srxx_com_ctl.s.st_en = 1;
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
- }
-
- if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
- /* STX0 has achieved sync */
- /* The corespondant board should be sending calendar data */
- /* Enable the STX0 STAT receiver. */
- union cvmx_spxx_clk_stat stat;
- uint64_t timeout_time;
- union cvmx_stxx_com_ctl stxx_com_ctl;
- stxx_com_ctl.u64 = 0;
- stxx_com_ctl.s.st_en = 1;
- cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
-
- /* Waiting for calendar sync on STX0 STAT */
- cvmx_dprintf("SPI%d: Waiting to sync on STX[%d] STAT\n",
- interface, interface);
- timeout_time = cvmx_get_cycle() + 1000ull * MS * timeout;
- /* SPX0_CLK_STAT - SPX0_CLK_STAT[STXCAL] should be 1 (bit10) */
- do {
- stat.u64 = cvmx_read_csr(CVMX_SPXX_CLK_STAT(interface));
- if (cvmx_get_cycle() > timeout_time) {
- cvmx_dprintf("SPI%d: Timeout\n", interface);
- return -1;
- }
- } while (stat.s.stxcal == 0);
- }
-
- return 0;
-}
-
-/**
- * Callback to handle interface up
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode)
-{
- union cvmx_gmxx_rxx_frm_min gmxx_rxx_frm_min;
- union cvmx_gmxx_rxx_frm_max gmxx_rxx_frm_max;
- union cvmx_gmxx_rxx_jabber gmxx_rxx_jabber;
-
- if (mode & CVMX_SPI_MODE_RX_HALFPLEX) {
- union cvmx_srxx_com_ctl srxx_com_ctl;
- srxx_com_ctl.u64 = cvmx_read_csr(CVMX_SRXX_COM_CTL(interface));
- srxx_com_ctl.s.inf_en = 1;
- cvmx_write_csr(CVMX_SRXX_COM_CTL(interface), srxx_com_ctl.u64);
- cvmx_dprintf("SPI%d: Rx is now up\n", interface);
- }
-
- if (mode & CVMX_SPI_MODE_TX_HALFPLEX) {
- union cvmx_stxx_com_ctl stxx_com_ctl;
- stxx_com_ctl.u64 = cvmx_read_csr(CVMX_STXX_COM_CTL(interface));
- stxx_com_ctl.s.inf_en = 1;
- cvmx_write_csr(CVMX_STXX_COM_CTL(interface), stxx_com_ctl.u64);
- cvmx_dprintf("SPI%d: Tx is now up\n", interface);
- }
-
- gmxx_rxx_frm_min.u64 = 0;
- gmxx_rxx_frm_min.s.len = 64;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MIN(0, interface),
- gmxx_rxx_frm_min.u64);
- gmxx_rxx_frm_max.u64 = 0;
- gmxx_rxx_frm_max.s.len = 64 * 1024 - 4;
- cvmx_write_csr(CVMX_GMXX_RXX_FRM_MAX(0, interface),
- gmxx_rxx_frm_max.u64);
- gmxx_rxx_jabber.u64 = 0;
- gmxx_rxx_jabber.s.cnt = 64 * 1024 - 4;
- cvmx_write_csr(CVMX_GMXX_RXX_JABBER(0, interface), gmxx_rxx_jabber.u64);
-
- return 0;
-}
diff --git a/drivers/staging/octeon/cvmx-spi.h b/drivers/staging/octeon/cvmx-spi.h
deleted file mode 100644
index e814648..0000000
--- a/drivers/staging/octeon/cvmx-spi.h
+++ /dev/null
@@ -1,269 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/*
- *
- * This file contains defines for the SPI interface
- */
-#ifndef __CVMX_SPI_H__
-#define __CVMX_SPI_H__
-
-#include "cvmx-gmxx-defs.h"
-
-/* CSR typedefs have been moved to cvmx-csr-*.h */
-
-typedef enum {
- CVMX_SPI_MODE_UNKNOWN = 0,
- CVMX_SPI_MODE_TX_HALFPLEX = 1,
- CVMX_SPI_MODE_RX_HALFPLEX = 2,
- CVMX_SPI_MODE_DUPLEX = 3
-} cvmx_spi_mode_t;
-
-/** Callbacks structure to customize SPI4 initialization sequence */
-typedef struct {
- /** Called to reset SPI4 DLL */
- int (*reset_cb) (int interface, cvmx_spi_mode_t mode);
-
- /** Called to setup calendar */
- int (*calendar_setup_cb) (int interface, cvmx_spi_mode_t mode,
- int num_ports);
-
- /** Called for Tx and Rx clock detection */
- int (*clock_detect_cb) (int interface, cvmx_spi_mode_t mode,
- int timeout);
-
- /** Called to perform link training */
- int (*training_cb) (int interface, cvmx_spi_mode_t mode, int timeout);
-
- /** Called for calendar data synchronization */
- int (*calendar_sync_cb) (int interface, cvmx_spi_mode_t mode,
- int timeout);
-
- /** Called when interface is up */
- int (*interface_up_cb) (int interface, cvmx_spi_mode_t mode);
-
-} cvmx_spi_callbacks_t;
-
-/**
- * Return true if the supplied interface is configured for SPI
- *
- * @interface: Interface to check
- * Returns True if interface is SPI
- */
-static inline int cvmx_spi_is_spi_interface(int interface)
-{
- uint64_t gmxState = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
- return (gmxState & 0x2) && (gmxState & 0x1);
-}
-
-/**
- * Initialize and start the SPI interface.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- * @num_ports: Number of SPI ports to configure
- *
- * Returns Zero on success, negative of failure.
- */
-extern int cvmx_spi_start_interface(int interface, cvmx_spi_mode_t mode,
- int timeout, int num_ports);
-
-/**
- * This routine restarts the SPI interface after it has lost synchronization
- * with its corespondant system.
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- * Returns Zero on success, negative of failure.
- */
-extern int cvmx_spi_restart_interface(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Return non-zero if the SPI interface has a SPI4000 attached
- *
- * @interface: SPI interface the SPI4000 is connected to
- *
- * Returns
- */
-static inline int cvmx_spi4000_is_present(int interface)
-{
- return 0;
-}
-
-/**
- * Initialize the SPI4000 for use
- *
- * @interface: SPI interface the SPI4000 is connected to
- */
-static inline int cvmx_spi4000_initialize(int interface)
-{
- return 0;
-}
-
-/**
- * Poll all the SPI4000 port and check its speed
- *
- * @interface: Interface the SPI4000 is on
- * @port: Port to poll (0-9)
- * Returns Status of the port. 0=down. All other values the port is up.
- */
-static inline union cvmx_gmxx_rxx_rx_inbnd cvmx_spi4000_check_speed(
- int interface,
- int port)
-{
- union cvmx_gmxx_rxx_rx_inbnd r;
- r.u64 = 0;
- return r;
-}
-
-/**
- * Get current SPI4 initialization callbacks
- *
- * @callbacks: Pointer to the callbacks structure.to fill
- *
- * Returns Pointer to cvmx_spi_callbacks_t structure.
- */
-extern void cvmx_spi_get_callbacks(cvmx_spi_callbacks_t *callbacks);
-
-/**
- * Set new SPI4 initialization callbacks
- *
- * @new_callbacks: Pointer to an updated callbacks structure.
- */
-extern void cvmx_spi_set_callbacks(cvmx_spi_callbacks_t *new_callbacks);
-
-/**
- * Callback to perform SPI4 reset
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_reset_cb(int interface, cvmx_spi_mode_t mode);
-
-/**
- * Callback to setup calendar and miscellaneous settings before clock
- * detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @num_ports: Number of ports to configure on SPI
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_calendar_setup_cb(int interface, cvmx_spi_mode_t mode,
- int num_ports);
-
-/**
- * Callback to perform clock detection
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for clock synchronization in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_clock_detect_cb(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Callback to perform link training
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for link to be trained (in seconds)
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_training_cb(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Callback to perform calendar data synchronization
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- * @timeout: Timeout to wait for calendar data in seconds
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_calendar_sync_cb(int interface, cvmx_spi_mode_t mode,
- int timeout);
-
-/**
- * Callback to handle interface up
- *
- * @interface: The identifier of the packet interface to configure and
- * use as a SPI interface.
- * @mode: The operating mode for the SPI interface. The interface
- * can operate as a full duplex (both Tx and Rx data paths
- * active) or as a halfplex (either the Tx data path is
- * active or the Rx data path is active, but not both).
- *
- * Returns Zero on success, non-zero error code on failure (will cause
- * SPI initialization to abort)
- */
-extern int cvmx_spi_interface_up_cb(int interface, cvmx_spi_mode_t mode);
-
-#endif /* __CVMX_SPI_H__ */
diff --git a/drivers/staging/octeon/cvmx-spxx-defs.h b/drivers/staging/octeon/cvmx-spxx-defs.h
deleted file mode 100644
index b16940e..0000000
--- a/drivers/staging/octeon/cvmx-spxx-defs.h
+++ /dev/null
@@ -1,347 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SPXX_DEFS_H__
-#define __CVMX_SPXX_DEFS_H__
-
-#define CVMX_SPXX_BCKPRS_CNT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000340ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_BIST_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x00011800900007F8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_CLK_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000348ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_CLK_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000350ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_DBG_DESKEW_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000368ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_DBG_DESKEW_STATE(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000370ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_DRV_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000358ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_ERR_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000320ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_DAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000318ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_MSK(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000308ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000300ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_INT_SYNC(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000310ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TPA_ACC(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000338ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TPA_MAX(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000330ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TPA_SEL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000328ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SPXX_TRN4_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000360ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_spxx_bckprs_cnt {
- uint64_t u64;
- struct cvmx_spxx_bckprs_cnt_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_spxx_bckprs_cnt_s cn38xx;
- struct cvmx_spxx_bckprs_cnt_s cn38xxp2;
- struct cvmx_spxx_bckprs_cnt_s cn58xx;
- struct cvmx_spxx_bckprs_cnt_s cn58xxp1;
-};
-
-union cvmx_spxx_bist_stat {
- uint64_t u64;
- struct cvmx_spxx_bist_stat_s {
- uint64_t reserved_3_63:61;
- uint64_t stat2:1;
- uint64_t stat1:1;
- uint64_t stat0:1;
- } s;
- struct cvmx_spxx_bist_stat_s cn38xx;
- struct cvmx_spxx_bist_stat_s cn38xxp2;
- struct cvmx_spxx_bist_stat_s cn58xx;
- struct cvmx_spxx_bist_stat_s cn58xxp1;
-};
-
-union cvmx_spxx_clk_ctl {
- uint64_t u64;
- struct cvmx_spxx_clk_ctl_s {
- uint64_t reserved_17_63:47;
- uint64_t seetrn:1;
- uint64_t reserved_12_15:4;
- uint64_t clkdly:5;
- uint64_t runbist:1;
- uint64_t statdrv:1;
- uint64_t statrcv:1;
- uint64_t sndtrn:1;
- uint64_t drptrn:1;
- uint64_t rcvtrn:1;
- uint64_t srxdlck:1;
- } s;
- struct cvmx_spxx_clk_ctl_s cn38xx;
- struct cvmx_spxx_clk_ctl_s cn38xxp2;
- struct cvmx_spxx_clk_ctl_s cn58xx;
- struct cvmx_spxx_clk_ctl_s cn58xxp1;
-};
-
-union cvmx_spxx_clk_stat {
- uint64_t u64;
- struct cvmx_spxx_clk_stat_s {
- uint64_t reserved_11_63:53;
- uint64_t stxcal:1;
- uint64_t reserved_9_9:1;
- uint64_t srxtrn:1;
- uint64_t s4clk1:1;
- uint64_t s4clk0:1;
- uint64_t d4clk1:1;
- uint64_t d4clk0:1;
- uint64_t reserved_0_3:4;
- } s;
- struct cvmx_spxx_clk_stat_s cn38xx;
- struct cvmx_spxx_clk_stat_s cn38xxp2;
- struct cvmx_spxx_clk_stat_s cn58xx;
- struct cvmx_spxx_clk_stat_s cn58xxp1;
-};
-
-union cvmx_spxx_dbg_deskew_ctl {
- uint64_t u64;
- struct cvmx_spxx_dbg_deskew_ctl_s {
- uint64_t reserved_30_63:34;
- uint64_t fallnop:1;
- uint64_t fall8:1;
- uint64_t reserved_26_27:2;
- uint64_t sstep_go:1;
- uint64_t sstep:1;
- uint64_t reserved_22_23:2;
- uint64_t clrdly:1;
- uint64_t dec:1;
- uint64_t inc:1;
- uint64_t mux:1;
- uint64_t offset:5;
- uint64_t bitsel:5;
- uint64_t offdly:6;
- uint64_t dllfrc:1;
- uint64_t dlldis:1;
- } s;
- struct cvmx_spxx_dbg_deskew_ctl_s cn38xx;
- struct cvmx_spxx_dbg_deskew_ctl_s cn38xxp2;
- struct cvmx_spxx_dbg_deskew_ctl_s cn58xx;
- struct cvmx_spxx_dbg_deskew_ctl_s cn58xxp1;
-};
-
-union cvmx_spxx_dbg_deskew_state {
- uint64_t u64;
- struct cvmx_spxx_dbg_deskew_state_s {
- uint64_t reserved_9_63:55;
- uint64_t testres:1;
- uint64_t unxterm:1;
- uint64_t muxsel:2;
- uint64_t offset:5;
- } s;
- struct cvmx_spxx_dbg_deskew_state_s cn38xx;
- struct cvmx_spxx_dbg_deskew_state_s cn38xxp2;
- struct cvmx_spxx_dbg_deskew_state_s cn58xx;
- struct cvmx_spxx_dbg_deskew_state_s cn58xxp1;
-};
-
-union cvmx_spxx_drv_ctl {
- uint64_t u64;
- struct cvmx_spxx_drv_ctl_s {
- uint64_t reserved_0_63:64;
- } s;
- struct cvmx_spxx_drv_ctl_cn38xx {
- uint64_t reserved_16_63:48;
- uint64_t stx4ncmp:4;
- uint64_t stx4pcmp:4;
- uint64_t srx4cmp:8;
- } cn38xx;
- struct cvmx_spxx_drv_ctl_cn38xx cn38xxp2;
- struct cvmx_spxx_drv_ctl_cn58xx {
- uint64_t reserved_24_63:40;
- uint64_t stx4ncmp:4;
- uint64_t stx4pcmp:4;
- uint64_t reserved_10_15:6;
- uint64_t srx4cmp:10;
- } cn58xx;
- struct cvmx_spxx_drv_ctl_cn58xx cn58xxp1;
-};
-
-union cvmx_spxx_err_ctl {
- uint64_t u64;
- struct cvmx_spxx_err_ctl_s {
- uint64_t reserved_9_63:55;
- uint64_t prtnxa:1;
- uint64_t dipcls:1;
- uint64_t dippay:1;
- uint64_t reserved_4_5:2;
- uint64_t errcnt:4;
- } s;
- struct cvmx_spxx_err_ctl_s cn38xx;
- struct cvmx_spxx_err_ctl_s cn38xxp2;
- struct cvmx_spxx_err_ctl_s cn58xx;
- struct cvmx_spxx_err_ctl_s cn58xxp1;
-};
-
-union cvmx_spxx_int_dat {
- uint64_t u64;
- struct cvmx_spxx_int_dat_s {
- uint64_t reserved_32_63:32;
- uint64_t mul:1;
- uint64_t reserved_14_30:17;
- uint64_t calbnk:2;
- uint64_t rsvop:4;
- uint64_t prt:8;
- } s;
- struct cvmx_spxx_int_dat_s cn38xx;
- struct cvmx_spxx_int_dat_s cn38xxp2;
- struct cvmx_spxx_int_dat_s cn58xx;
- struct cvmx_spxx_int_dat_s cn58xxp1;
-};
-
-union cvmx_spxx_int_msk {
- uint64_t u64;
- struct cvmx_spxx_int_msk_s {
- uint64_t reserved_12_63:52;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
- struct cvmx_spxx_int_msk_s cn38xx;
- struct cvmx_spxx_int_msk_s cn38xxp2;
- struct cvmx_spxx_int_msk_s cn58xx;
- struct cvmx_spxx_int_msk_s cn58xxp1;
-};
-
-union cvmx_spxx_int_reg {
- uint64_t u64;
- struct cvmx_spxx_int_reg_s {
- uint64_t reserved_32_63:32;
- uint64_t spf:1;
- uint64_t reserved_12_30:19;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
- struct cvmx_spxx_int_reg_s cn38xx;
- struct cvmx_spxx_int_reg_s cn38xxp2;
- struct cvmx_spxx_int_reg_s cn58xx;
- struct cvmx_spxx_int_reg_s cn58xxp1;
-};
-
-union cvmx_spxx_int_sync {
- uint64_t u64;
- struct cvmx_spxx_int_sync_s {
- uint64_t reserved_12_63:52;
- uint64_t calerr:1;
- uint64_t syncerr:1;
- uint64_t diperr:1;
- uint64_t tpaovr:1;
- uint64_t rsverr:1;
- uint64_t drwnng:1;
- uint64_t clserr:1;
- uint64_t spiovr:1;
- uint64_t reserved_2_3:2;
- uint64_t abnorm:1;
- uint64_t prtnxa:1;
- } s;
- struct cvmx_spxx_int_sync_s cn38xx;
- struct cvmx_spxx_int_sync_s cn38xxp2;
- struct cvmx_spxx_int_sync_s cn58xx;
- struct cvmx_spxx_int_sync_s cn58xxp1;
-};
-
-union cvmx_spxx_tpa_acc {
- uint64_t u64;
- struct cvmx_spxx_tpa_acc_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_spxx_tpa_acc_s cn38xx;
- struct cvmx_spxx_tpa_acc_s cn38xxp2;
- struct cvmx_spxx_tpa_acc_s cn58xx;
- struct cvmx_spxx_tpa_acc_s cn58xxp1;
-};
-
-union cvmx_spxx_tpa_max {
- uint64_t u64;
- struct cvmx_spxx_tpa_max_s {
- uint64_t reserved_32_63:32;
- uint64_t max:32;
- } s;
- struct cvmx_spxx_tpa_max_s cn38xx;
- struct cvmx_spxx_tpa_max_s cn38xxp2;
- struct cvmx_spxx_tpa_max_s cn58xx;
- struct cvmx_spxx_tpa_max_s cn58xxp1;
-};
-
-union cvmx_spxx_tpa_sel {
- uint64_t u64;
- struct cvmx_spxx_tpa_sel_s {
- uint64_t reserved_4_63:60;
- uint64_t prtsel:4;
- } s;
- struct cvmx_spxx_tpa_sel_s cn38xx;
- struct cvmx_spxx_tpa_sel_s cn38xxp2;
- struct cvmx_spxx_tpa_sel_s cn58xx;
- struct cvmx_spxx_tpa_sel_s cn58xxp1;
-};
-
-union cvmx_spxx_trn4_ctl {
- uint64_t u64;
- struct cvmx_spxx_trn4_ctl_s {
- uint64_t reserved_13_63:51;
- uint64_t trntest:1;
- uint64_t jitter:3;
- uint64_t clr_boot:1;
- uint64_t set_boot:1;
- uint64_t maxdist:5;
- uint64_t macro_en:1;
- uint64_t mux_en:1;
- } s;
- struct cvmx_spxx_trn4_ctl_s cn38xx;
- struct cvmx_spxx_trn4_ctl_s cn38xxp2;
- struct cvmx_spxx_trn4_ctl_s cn58xx;
- struct cvmx_spxx_trn4_ctl_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-srxx-defs.h b/drivers/staging/octeon/cvmx-srxx-defs.h
deleted file mode 100644
index d82b366..0000000
--- a/drivers/staging/octeon/cvmx-srxx-defs.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_SRXX_DEFS_H__
-#define __CVMX_SRXX_DEFS_H__
-
-#define CVMX_SRXX_COM_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000200ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_IGN_RX_FULL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000218ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SPI4_CALX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000000ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SPI4_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000208ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SW_TICK_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000220ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_SRXX_SW_TICK_DAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000228ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_srxx_com_ctl {
- uint64_t u64;
- struct cvmx_srxx_com_ctl_s {
- uint64_t reserved_8_63:56;
- uint64_t prts:4;
- uint64_t st_en:1;
- uint64_t reserved_1_2:2;
- uint64_t inf_en:1;
- } s;
- struct cvmx_srxx_com_ctl_s cn38xx;
- struct cvmx_srxx_com_ctl_s cn38xxp2;
- struct cvmx_srxx_com_ctl_s cn58xx;
- struct cvmx_srxx_com_ctl_s cn58xxp1;
-};
-
-union cvmx_srxx_ign_rx_full {
- uint64_t u64;
- struct cvmx_srxx_ign_rx_full_s {
- uint64_t reserved_16_63:48;
- uint64_t ignore:16;
- } s;
- struct cvmx_srxx_ign_rx_full_s cn38xx;
- struct cvmx_srxx_ign_rx_full_s cn38xxp2;
- struct cvmx_srxx_ign_rx_full_s cn58xx;
- struct cvmx_srxx_ign_rx_full_s cn58xxp1;
-};
-
-union cvmx_srxx_spi4_calx {
- uint64_t u64;
- struct cvmx_srxx_spi4_calx_s {
- uint64_t reserved_17_63:47;
- uint64_t oddpar:1;
- uint64_t prt3:4;
- uint64_t prt2:4;
- uint64_t prt1:4;
- uint64_t prt0:4;
- } s;
- struct cvmx_srxx_spi4_calx_s cn38xx;
- struct cvmx_srxx_spi4_calx_s cn38xxp2;
- struct cvmx_srxx_spi4_calx_s cn58xx;
- struct cvmx_srxx_spi4_calx_s cn58xxp1;
-};
-
-union cvmx_srxx_spi4_stat {
- uint64_t u64;
- struct cvmx_srxx_spi4_stat_s {
- uint64_t reserved_16_63:48;
- uint64_t m:8;
- uint64_t reserved_7_7:1;
- uint64_t len:7;
- } s;
- struct cvmx_srxx_spi4_stat_s cn38xx;
- struct cvmx_srxx_spi4_stat_s cn38xxp2;
- struct cvmx_srxx_spi4_stat_s cn58xx;
- struct cvmx_srxx_spi4_stat_s cn58xxp1;
-};
-
-union cvmx_srxx_sw_tick_ctl {
- uint64_t u64;
- struct cvmx_srxx_sw_tick_ctl_s {
- uint64_t reserved_14_63:50;
- uint64_t eop:1;
- uint64_t sop:1;
- uint64_t mod:4;
- uint64_t opc:4;
- uint64_t adr:4;
- } s;
- struct cvmx_srxx_sw_tick_ctl_s cn38xx;
- struct cvmx_srxx_sw_tick_ctl_s cn58xx;
- struct cvmx_srxx_sw_tick_ctl_s cn58xxp1;
-};
-
-union cvmx_srxx_sw_tick_dat {
- uint64_t u64;
- struct cvmx_srxx_sw_tick_dat_s {
- uint64_t dat:64;
- } s;
- struct cvmx_srxx_sw_tick_dat_s cn38xx;
- struct cvmx_srxx_sw_tick_dat_s cn58xx;
- struct cvmx_srxx_sw_tick_dat_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-stxx-defs.h b/drivers/staging/octeon/cvmx-stxx-defs.h
deleted file mode 100644
index 4f209b6..0000000
--- a/drivers/staging/octeon/cvmx-stxx-defs.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_STXX_DEFS_H__
-#define __CVMX_STXX_DEFS_H__
-
-#define CVMX_STXX_ARB_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000608ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_BCKPRS_CNT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000688ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_COM_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000600ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_DIP_CNT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000690ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_IGN_CAL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000610ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_INT_MSK(block_id) \
- CVMX_ADD_IO_SEG(0x00011800900006A0ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_INT_REG(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000698ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_INT_SYNC(block_id) \
- CVMX_ADD_IO_SEG(0x00011800900006A8ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_MIN_BST(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000618ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_SPI4_CALX(offset, block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000400ull + (((offset) & 31) * 8) + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_SPI4_DAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000628ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_SPI4_STAT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000630ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_BYTES_HI(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000648ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_BYTES_LO(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000680ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_CTL(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000638ull + (((block_id) & 1) * 0x8000000ull))
-#define CVMX_STXX_STAT_PKT_XMT(block_id) \
- CVMX_ADD_IO_SEG(0x0001180090000640ull + (((block_id) & 1) * 0x8000000ull))
-
-union cvmx_stxx_arb_ctl {
- uint64_t u64;
- struct cvmx_stxx_arb_ctl_s {
- uint64_t reserved_6_63:58;
- uint64_t mintrn:1;
- uint64_t reserved_4_4:1;
- uint64_t igntpa:1;
- uint64_t reserved_0_2:3;
- } s;
- struct cvmx_stxx_arb_ctl_s cn38xx;
- struct cvmx_stxx_arb_ctl_s cn38xxp2;
- struct cvmx_stxx_arb_ctl_s cn58xx;
- struct cvmx_stxx_arb_ctl_s cn58xxp1;
-};
-
-union cvmx_stxx_bckprs_cnt {
- uint64_t u64;
- struct cvmx_stxx_bckprs_cnt_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_bckprs_cnt_s cn38xx;
- struct cvmx_stxx_bckprs_cnt_s cn38xxp2;
- struct cvmx_stxx_bckprs_cnt_s cn58xx;
- struct cvmx_stxx_bckprs_cnt_s cn58xxp1;
-};
-
-union cvmx_stxx_com_ctl {
- uint64_t u64;
- struct cvmx_stxx_com_ctl_s {
- uint64_t reserved_4_63:60;
- uint64_t st_en:1;
- uint64_t reserved_1_2:2;
- uint64_t inf_en:1;
- } s;
- struct cvmx_stxx_com_ctl_s cn38xx;
- struct cvmx_stxx_com_ctl_s cn38xxp2;
- struct cvmx_stxx_com_ctl_s cn58xx;
- struct cvmx_stxx_com_ctl_s cn58xxp1;
-};
-
-union cvmx_stxx_dip_cnt {
- uint64_t u64;
- struct cvmx_stxx_dip_cnt_s {
- uint64_t reserved_8_63:56;
- uint64_t frmmax:4;
- uint64_t dipmax:4;
- } s;
- struct cvmx_stxx_dip_cnt_s cn38xx;
- struct cvmx_stxx_dip_cnt_s cn38xxp2;
- struct cvmx_stxx_dip_cnt_s cn58xx;
- struct cvmx_stxx_dip_cnt_s cn58xxp1;
-};
-
-union cvmx_stxx_ign_cal {
- uint64_t u64;
- struct cvmx_stxx_ign_cal_s {
- uint64_t reserved_16_63:48;
- uint64_t igntpa:16;
- } s;
- struct cvmx_stxx_ign_cal_s cn38xx;
- struct cvmx_stxx_ign_cal_s cn38xxp2;
- struct cvmx_stxx_ign_cal_s cn58xx;
- struct cvmx_stxx_ign_cal_s cn58xxp1;
-};
-
-union cvmx_stxx_int_msk {
- uint64_t u64;
- struct cvmx_stxx_int_msk_s {
- uint64_t reserved_8_63:56;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
- struct cvmx_stxx_int_msk_s cn38xx;
- struct cvmx_stxx_int_msk_s cn38xxp2;
- struct cvmx_stxx_int_msk_s cn58xx;
- struct cvmx_stxx_int_msk_s cn58xxp1;
-};
-
-union cvmx_stxx_int_reg {
- uint64_t u64;
- struct cvmx_stxx_int_reg_s {
- uint64_t reserved_9_63:55;
- uint64_t syncerr:1;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
- struct cvmx_stxx_int_reg_s cn38xx;
- struct cvmx_stxx_int_reg_s cn38xxp2;
- struct cvmx_stxx_int_reg_s cn58xx;
- struct cvmx_stxx_int_reg_s cn58xxp1;
-};
-
-union cvmx_stxx_int_sync {
- uint64_t u64;
- struct cvmx_stxx_int_sync_s {
- uint64_t reserved_8_63:56;
- uint64_t frmerr:1;
- uint64_t unxfrm:1;
- uint64_t nosync:1;
- uint64_t diperr:1;
- uint64_t datovr:1;
- uint64_t ovrbst:1;
- uint64_t calpar1:1;
- uint64_t calpar0:1;
- } s;
- struct cvmx_stxx_int_sync_s cn38xx;
- struct cvmx_stxx_int_sync_s cn38xxp2;
- struct cvmx_stxx_int_sync_s cn58xx;
- struct cvmx_stxx_int_sync_s cn58xxp1;
-};
-
-union cvmx_stxx_min_bst {
- uint64_t u64;
- struct cvmx_stxx_min_bst_s {
- uint64_t reserved_9_63:55;
- uint64_t minb:9;
- } s;
- struct cvmx_stxx_min_bst_s cn38xx;
- struct cvmx_stxx_min_bst_s cn38xxp2;
- struct cvmx_stxx_min_bst_s cn58xx;
- struct cvmx_stxx_min_bst_s cn58xxp1;
-};
-
-union cvmx_stxx_spi4_calx {
- uint64_t u64;
- struct cvmx_stxx_spi4_calx_s {
- uint64_t reserved_17_63:47;
- uint64_t oddpar:1;
- uint64_t prt3:4;
- uint64_t prt2:4;
- uint64_t prt1:4;
- uint64_t prt0:4;
- } s;
- struct cvmx_stxx_spi4_calx_s cn38xx;
- struct cvmx_stxx_spi4_calx_s cn38xxp2;
- struct cvmx_stxx_spi4_calx_s cn58xx;
- struct cvmx_stxx_spi4_calx_s cn58xxp1;
-};
-
-union cvmx_stxx_spi4_dat {
- uint64_t u64;
- struct cvmx_stxx_spi4_dat_s {
- uint64_t reserved_32_63:32;
- uint64_t alpha:16;
- uint64_t max_t:16;
- } s;
- struct cvmx_stxx_spi4_dat_s cn38xx;
- struct cvmx_stxx_spi4_dat_s cn38xxp2;
- struct cvmx_stxx_spi4_dat_s cn58xx;
- struct cvmx_stxx_spi4_dat_s cn58xxp1;
-};
-
-union cvmx_stxx_spi4_stat {
- uint64_t u64;
- struct cvmx_stxx_spi4_stat_s {
- uint64_t reserved_16_63:48;
- uint64_t m:8;
- uint64_t reserved_7_7:1;
- uint64_t len:7;
- } s;
- struct cvmx_stxx_spi4_stat_s cn38xx;
- struct cvmx_stxx_spi4_stat_s cn38xxp2;
- struct cvmx_stxx_spi4_stat_s cn58xx;
- struct cvmx_stxx_spi4_stat_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_bytes_hi {
- uint64_t u64;
- struct cvmx_stxx_stat_bytes_hi_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_stat_bytes_hi_s cn38xx;
- struct cvmx_stxx_stat_bytes_hi_s cn38xxp2;
- struct cvmx_stxx_stat_bytes_hi_s cn58xx;
- struct cvmx_stxx_stat_bytes_hi_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_bytes_lo {
- uint64_t u64;
- struct cvmx_stxx_stat_bytes_lo_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_stat_bytes_lo_s cn38xx;
- struct cvmx_stxx_stat_bytes_lo_s cn38xxp2;
- struct cvmx_stxx_stat_bytes_lo_s cn58xx;
- struct cvmx_stxx_stat_bytes_lo_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_ctl {
- uint64_t u64;
- struct cvmx_stxx_stat_ctl_s {
- uint64_t reserved_5_63:59;
- uint64_t clr:1;
- uint64_t bckprs:4;
- } s;
- struct cvmx_stxx_stat_ctl_s cn38xx;
- struct cvmx_stxx_stat_ctl_s cn38xxp2;
- struct cvmx_stxx_stat_ctl_s cn58xx;
- struct cvmx_stxx_stat_ctl_s cn58xxp1;
-};
-
-union cvmx_stxx_stat_pkt_xmt {
- uint64_t u64;
- struct cvmx_stxx_stat_pkt_xmt_s {
- uint64_t reserved_32_63:32;
- uint64_t cnt:32;
- } s;
- struct cvmx_stxx_stat_pkt_xmt_s cn38xx;
- struct cvmx_stxx_stat_pkt_xmt_s cn38xxp2;
- struct cvmx_stxx_stat_pkt_xmt_s cn58xx;
- struct cvmx_stxx_stat_pkt_xmt_s cn58xxp1;
-};
-
-#endif
diff --git a/drivers/staging/octeon/cvmx-wqe.h b/drivers/staging/octeon/cvmx-wqe.h
deleted file mode 100644
index 6536109..0000000
--- a/drivers/staging/octeon/cvmx-wqe.h
+++ /dev/null
@@ -1,397 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2008 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-/**
- *
- * This header file defines the work queue entry (wqe) data structure.
- * Since this is a commonly used structure that depends on structures
- * from several hardware blocks, those definitions have been placed
- * in this file to create a single point of definition of the wqe
- * format.
- * Data structures are still named according to the block that they
- * relate to.
- *
- */
-
-#ifndef __CVMX_WQE_H__
-#define __CVMX_WQE_H__
-
-#include "cvmx-packet.h"
-
-
-#define OCT_TAG_TYPE_STRING(x) \
- (((x) == CVMX_POW_TAG_TYPE_ORDERED) ? "ORDERED" : \
- (((x) == CVMX_POW_TAG_TYPE_ATOMIC) ? "ATOMIC" : \
- (((x) == CVMX_POW_TAG_TYPE_NULL) ? "NULL" : \
- "NULL_NULL")))
-
-/**
- * HW decode / err_code in work queue entry
- */
-typedef union {
- uint64_t u64;
-
- /* Use this struct if the hardware determines that the packet is IP */
- struct {
- /* HW sets this to the number of buffers used by this packet */
- uint64_t bufs:8;
- /* HW sets to the number of L2 bytes prior to the IP */
- uint64_t ip_offset:8;
- /* set to 1 if we found DSA/VLAN in the L2 */
- uint64_t vlan_valid:1;
- /* Set to 1 if the DSA/VLAN tag is stacked */
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- /* HW sets to the DSA/VLAN CFI flag (valid when vlan_valid) */
- uint64_t vlan_cfi:1;
- /* HW sets to the DSA/VLAN_ID field (valid when vlan_valid) */
- uint64_t vlan_id:12;
- /* Ring Identifier (if PCIe). Requires PIP_GBL_CTL[RING_EN]=1 */
- uint64_t pr:4;
- uint64_t unassigned2:8;
- /* the packet needs to be decompressed */
- uint64_t dec_ipcomp:1;
- /* the packet is either TCP or UDP */
- uint64_t tcp_or_udp:1;
- /* the packet needs to be decrypted (ESP or AH) */
- uint64_t dec_ipsec:1;
- /* the packet is IPv6 */
- uint64_t is_v6:1;
-
- /*
- * (rcv_error, not_IP, IP_exc, is_frag, L4_error,
- * software, etc.).
- */
-
- /*
- * reserved for software use, hardware will clear on
- * packet creation.
- */
- uint64_t software:1;
- /* exceptional conditions below */
- /* the receive interface hardware detected an L4 error
- * (only applies if !is_frag) (only applies if
- * !rcv_error && !not_IP && !IP_exc && !is_frag)
- * failure indicated in err_code below, decode:
- *
- * - 1 = Malformed L4
- * - 2 = L4 Checksum Error: the L4 checksum value is
- * - 3 = UDP Length Error: The UDP length field would
- * make the UDP data longer than what remains in
- * the IP packet (as defined by the IP header
- * length field).
- * - 4 = Bad L4 Port: either the source or destination
- * TCP/UDP port is 0.
- * - 8 = TCP FIN Only: the packet is TCP and only the
- * FIN flag set.
- * - 9 = TCP No Flags: the packet is TCP and no flags
- * are set.
- * - 10 = TCP FIN RST: the packet is TCP and both FIN
- * and RST are set.
- * - 11 = TCP SYN URG: the packet is TCP and both SYN
- * and URG are set.
- * - 12 = TCP SYN RST: the packet is TCP and both SYN
- * and RST are set.
- * - 13 = TCP SYN FIN: the packet is TCP and both SYN
- * and FIN are set.
- */
- uint64_t L4_error:1;
- /* set if the packet is a fragment */
- uint64_t is_frag:1;
- /* the receive interface hardware detected an IP error
- * / exception (only applies if !rcv_error && !not_IP)
- * failure indicated in err_code below, decode:
- *
- * - 1 = Not IP: the IP version field is neither 4 nor
- * 6.
- * - 2 = IPv4 Header Checksum Error: the IPv4 header
- * has a checksum violation.
- * - 3 = IP Malformed Header: the packet is not long
- * enough to contain the IP header.
- * - 4 = IP Malformed: the packet is not long enough
- * to contain the bytes indicated by the IP
- * header. Pad is allowed.
- * - 5 = IP TTL Hop: the IPv4 TTL field or the IPv6
- * Hop Count field are zero.
- * - 6 = IP Options
- */
- uint64_t IP_exc:1;
- /*
- * Set if the hardware determined that the packet is a
- * broadcast.
- */
- uint64_t is_bcast:1;
- /*
- * St if the hardware determined that the packet is a
- * multi-cast.
- */
- uint64_t is_mcast:1;
- /*
- * Set if the packet may not be IP (must be zero in
- * this case).
- */
- uint64_t not_IP:1;
- /*
- * The receive interface hardware detected a receive
- * error (must be zero in this case).
- */
- uint64_t rcv_error:1;
- /* lower err_code = first-level descriptor of the
- * work */
- /* zero for packet submitted by hardware that isn't on
- * the slow path */
- /* type is cvmx_pip_err_t */
- uint64_t err_code:8;
- } s;
-
- /* use this to get at the 16 vlan bits */
- struct {
- uint64_t unused1:16;
- uint64_t vlan:16;
- uint64_t unused2:32;
- } svlan;
-
- /*
- * use this struct if the hardware could not determine that
- * the packet is ip.
- */
- struct {
- /*
- * HW sets this to the number of buffers used by this
- * packet.
- */
- uint64_t bufs:8;
- uint64_t unused:8;
- /* set to 1 if we found DSA/VLAN in the L2 */
- uint64_t vlan_valid:1;
- /* Set to 1 if the DSA/VLAN tag is stacked */
- uint64_t vlan_stacked:1;
- uint64_t unassigned:1;
- /*
- * HW sets to the DSA/VLAN CFI flag (valid when
- * vlan_valid)
- */
- uint64_t vlan_cfi:1;
- /*
- * HW sets to the DSA/VLAN_ID field (valid when
- * vlan_valid).
- */
- uint64_t vlan_id:12;
- /*
- * Ring Identifier (if PCIe). Requires
- * PIP_GBL_CTL[RING_EN]=1
- */
- uint64_t pr:4;
- uint64_t unassigned2:12;
- /*
- * reserved for software use, hardware will clear on
- * packet creation.
- */
- uint64_t software:1;
- uint64_t unassigned3:1;
- /*
- * set if the hardware determined that the packet is
- * rarp.
- */
- uint64_t is_rarp:1;
- /*
- * set if the hardware determined that the packet is
- * arp
- */
- uint64_t is_arp:1;
- /*
- * set if the hardware determined that the packet is a
- * broadcast.
- */
- uint64_t is_bcast:1;
- /*
- * set if the hardware determined that the packet is a
- * multi-cast
- */
- uint64_t is_mcast:1;
- /*
- * set if the packet may not be IP (must be one in
- * this case)
- */
- uint64_t not_IP:1;
- /* The receive interface hardware detected a receive
- * error. Failure indicated in err_code below,
- * decode:
- *
- * - 1 = partial error: a packet was partially
- * received, but internal buffering / bandwidth
- * was not adequate to receive the entire
- * packet.
- * - 2 = jabber error: the RGMII packet was too large
- * and is truncated.
- * - 3 = overrun error: the RGMII packet is longer
- * than allowed and had an FCS error.
- * - 4 = oversize error: the RGMII packet is longer
- * than allowed.
- * - 5 = alignment error: the RGMII packet is not an
- * integer number of bytes
- * and had an FCS error (100M and 10M only).
- * - 6 = fragment error: the RGMII packet is shorter
- * than allowed and had an FCS error.
- * - 7 = GMX FCS error: the RGMII packet had an FCS
- * error.
- * - 8 = undersize error: the RGMII packet is shorter
- * than allowed.
- * - 9 = extend error: the RGMII packet had an extend
- * error.
- * - 10 = length mismatch error: the RGMII packet had
- * a length that did not match the length field
- * in the L2 HDR.
- * - 11 = RGMII RX error/SPI4 DIP4 Error: the RGMII
- * packet had one or more data reception errors
- * (RXERR) or the SPI4 packet had one or more
- * DIP4 errors.
- * - 12 = RGMII skip error/SPI4 Abort Error: the RGMII
- * packet was not large enough to cover the
- * skipped bytes or the SPI4 packet was
- * terminated with an About EOPS.
- * - 13 = RGMII nibble error/SPI4 Port NXA Error: the
- * RGMII packet had a studder error (data not
- * repeated - 10/100M only) or the SPI4 packet
- * was sent to an NXA.
- * - 16 = FCS error: a SPI4.2 packet had an FCS error.
- * - 17 = Skip error: a packet was not large enough to
- * cover the skipped bytes.
- * - 18 = L2 header malformed: the packet is not long
- * enough to contain the L2.
- */
-
- uint64_t rcv_error:1;
- /*
- * lower err_code = first-level descriptor of the
- * work
- */
- /*
- * zero for packet submitted by hardware that isn't on
- * the slow path
- */
- /* type is cvmx_pip_err_t (union, so can't use directly */
- uint64_t err_code:8;
- } snoip;
-
-} cvmx_pip_wqe_word2;
-
-/**
- * Work queue entry format
- *
- * must be 8-byte aligned
- */
-typedef struct {
-
- /*****************************************************************
- * WORD 0
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
- */
-
- /**
- * raw chksum result generated by the HW
- */
- uint16_t hw_chksum;
- /**
- * Field unused by hardware - available for software
- */
- uint8_t unused;
- /**
- * Next pointer used by hardware for list maintenance.
- * May be written/read by HW before the work queue
- * entry is scheduled to a PP
- * (Only 36 bits used in Octeon 1)
- */
- uint64_t next_ptr:40;
-
- /*****************************************************************
- * WORD 1
- * HW WRITE: the following 64 bits are filled by HW when a packet arrives
- */
-
- /**
- * HW sets to the total number of bytes in the packet
- */
- uint64_t len:16;
- /**
- * HW sets this to input physical port
- */
- uint64_t ipprt:6;
-
- /**
- * HW sets this to what it thought the priority of the input packet was
- */
- uint64_t qos:3;
-
- /**
- * the group that the work queue entry will be scheduled to
- */
- uint64_t grp:4;
- /**
- * the type of the tag (ORDERED, ATOMIC, NULL)
- */
- uint64_t tag_type:3;
- /**
- * the synchronization/ordering tag
- */
- uint64_t tag:32;
-
- /**
- * WORD 2 HW WRITE: the following 64-bits are filled in by
- * hardware when a packet arrives This indicates a variety of
- * status and error conditions.
- */
- cvmx_pip_wqe_word2 word2;
-
- /**
- * Pointer to the first segment of the packet.
- */
- union cvmx_buf_ptr packet_ptr;
-
- /**
- * HW WRITE: octeon will fill in a programmable amount from the
- * packet, up to (at most, but perhaps less) the amount
- * needed to fill the work queue entry to 128 bytes
- *
- * If the packet is recognized to be IP, the hardware starts
- * (except that the IPv4 header is padded for appropriate
- * alignment) writing here where the IP header starts. If the
- * packet is not recognized to be IP, the hardware starts
- * writing the beginning of the packet here.
- */
- uint8_t packet_data[96];
-
- /**
- * If desired, SW can make the work Q entry any length. For the
- * purposes of discussion here, Assume 128B always, as this is all that
- * the hardware deals with.
- *
- */
-
-} CVMX_CACHE_LINE_ALIGNED cvmx_wqe_t;
-
-#endif /* __CVMX_WQE_H__ */
diff --git a/drivers/staging/octeon/ethernet-defines.h b/drivers/staging/octeon/ethernet-defines.h
index 6a2cd50..bdaec8d 100644
--- a/drivers/staging/octeon/ethernet-defines.h
+++ b/drivers/staging/octeon/ethernet-defines.h
@@ -59,7 +59,7 @@
#ifndef __ETHERNET_DEFINES_H__
#define __ETHERNET_DEFINES_H__
-#include "cvmx-config.h"
+#include <asm/octeon/cvmx-config.h>
#define OCTEON_ETHERNET_VERSION "1.9"
diff --git a/drivers/staging/octeon/ethernet-mdio.c b/drivers/staging/octeon/ethernet-mdio.c
index f18e3e1..63800ba 100644
--- a/drivers/staging/octeon/ethernet-mdio.c
+++ b/drivers/staging/octeon/ethernet-mdio.c
@@ -38,9 +38,9 @@
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include "cvmx-helper-board.h"
+#include <asm/octeon/cvmx-helper-board.h>
-#include "cvmx-smix-defs.h"
+#include <asm/octeon/cvmx-smix-defs.h>
static void cvm_oct_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
diff --git a/drivers/staging/octeon/ethernet-mem.c b/drivers/staging/octeon/ethernet-mem.c
index 635bb86..78b6cb7 100644
--- a/drivers/staging/octeon/ethernet-mem.c
+++ b/drivers/staging/octeon/ethernet-mem.c
@@ -32,7 +32,7 @@
#include "ethernet-defines.h"
-#include "cvmx-fpa.h"
+#include <asm/octeon/cvmx-fpa.h>
/**
* cvm_oct_fill_hw_skbuff - fill the supplied hardware pool with skbuffs
diff --git a/drivers/staging/octeon/ethernet-rgmii.c b/drivers/staging/octeon/ethernet-rgmii.c
index c3d73f8..d8f5f69 100644
--- a/drivers/staging/octeon/ethernet-rgmii.c
+++ b/drivers/staging/octeon/ethernet-rgmii.c
@@ -37,11 +37,11 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-npi-defs.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
DEFINE_SPINLOCK(global_register_lock);
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index 8b307b4..400df8c 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -52,14 +52,14 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
-#include "cvmx-wqe.h"
-#include "cvmx-fau.h"
-#include "cvmx-pow.h"
-#include "cvmx-pip.h"
-#include "cvmx-scratch.h"
-
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-helper.h>
+#include <asm/octeon/cvmx-wqe.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-pow.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-scratch.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
struct cvm_napi_wrapper {
struct napi_struct napi;
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a0743b8..9240c85 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -24,7 +24,7 @@
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
*********************************************************************/
-#include "cvmx-fau.h"
+#include <asm/octeon/cvmx-fau.h>
void cvm_oct_poll_controller(struct net_device *dev);
void cvm_oct_rx_initialize(void);
diff --git a/drivers/staging/octeon/ethernet-sgmii.c b/drivers/staging/octeon/ethernet-sgmii.c
index 5e148b5..d3e8243 100644
--- a/drivers/staging/octeon/ethernet-sgmii.c
+++ b/drivers/staging/octeon/ethernet-sgmii.c
@@ -35,9 +35,9 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-helper.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
int cvm_oct_sgmii_open(struct net_device *dev)
{
diff --git a/drivers/staging/octeon/ethernet-spi.c b/drivers/staging/octeon/ethernet-spi.c
index d0e2d51..af8d628 100644
--- a/drivers/staging/octeon/ethernet-spi.c
+++ b/drivers/staging/octeon/ethernet-spi.c
@@ -35,11 +35,11 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-spi.h"
+#include <asm/octeon/cvmx-spi.h>
#include <asm/octeon/cvmx-npi-defs.h>
-#include "cvmx-spxx-defs.h"
-#include "cvmx-stxx-defs.h"
+#include <asm/octeon/cvmx-spxx-defs.h>
+#include <asm/octeon/cvmx-stxx-defs.h>
static int number_spi_ports;
static int need_retrain[2] = { 0, 0 };
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 2542c37..56d74dc 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -47,13 +47,13 @@
#include "ethernet-tx.h"
#include "ethernet-util.h"
-#include "cvmx-wqe.h"
-#include "cvmx-fau.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-wqe.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-helper.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
#define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
diff --git a/drivers/staging/octeon/ethernet-xaui.c b/drivers/staging/octeon/ethernet-xaui.c
index 861a4b3..419f8c3 100644
--- a/drivers/staging/octeon/ethernet-xaui.c
+++ b/drivers/staging/octeon/ethernet-xaui.c
@@ -35,9 +35,9 @@
#include "octeon-ethernet.h"
#include "ethernet-util.h"
-#include "cvmx-helper.h"
+#include <asm/octeon/cvmx-helper.h>
-#include "cvmx-gmxx-defs.h"
+#include <asm/octeon/cvmx-gmxx-defs.h>
int cvm_oct_xaui_open(struct net_device *dev)
{
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 076f866..9112cd8 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -44,14 +44,14 @@
#include "ethernet-mdio.h"
#include "ethernet-util.h"
-#include "cvmx-pip.h"
-#include "cvmx-pko.h"
-#include "cvmx-fau.h"
-#include "cvmx-ipd.h"
-#include "cvmx-helper.h"
-
-#include "cvmx-gmxx-defs.h"
-#include "cvmx-smix-defs.h"
+#include <asm/octeon/cvmx-pip.h>
+#include <asm/octeon/cvmx-pko.h>
+#include <asm/octeon/cvmx-fau.h>
+#include <asm/octeon/cvmx-ipd.h>
+#include <asm/octeon/cvmx-helper.h>
+
+#include <asm/octeon/cvmx-gmxx-defs.h>
+#include <asm/octeon/cvmx-smix-defs.h>
#if defined(CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS) \
&& CONFIG_CAVIUM_OCTEON_NUM_PACKET_BUFFERS
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.c b/drivers/staging/olpc_dcon/olpc_dcon.c
index af24ddf..3d91993 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon.c
@@ -34,8 +34,8 @@
/* Module definitions */
-static int resumeline = 898;
-module_param(resumeline, int, 0444);
+static ushort resumeline = 898;
+module_param(resumeline, ushort, 0444);
/* Default off since it doesn't work on DCON ASIC in B-test OLPC board */
static int useaa = 1;
@@ -456,7 +456,7 @@ static ssize_t dcon_mono_store(struct device *dev,
unsigned long enable_mono;
int rc;
- rc = strict_strtoul(buf, 10, &enable_mono);
+ rc = kstrtoul(buf, 10, &enable_mono);
if (rc)
return rc;
@@ -472,7 +472,7 @@ static ssize_t dcon_freeze_store(struct device *dev,
unsigned long output;
int ret;
- ret = strict_strtoul(buf, 10, &output);
+ ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
@@ -498,10 +498,10 @@ static ssize_t dcon_freeze_store(struct device *dev,
static ssize_t dcon_resumeline_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- unsigned long rl;
+ unsigned short rl;
int rc;
- rc = strict_strtoul(buf, 10, &rl);
+ rc = kstrtou16(buf, 10, &rl);
if (rc)
return rc;
@@ -517,7 +517,7 @@ static ssize_t dcon_sleep_store(struct device *dev,
unsigned long output;
int ret;
- ret = strict_strtoul(buf, 10, &output);
+ ret = kstrtoul(buf, 10, &output);
if (ret)
return ret;
@@ -755,9 +755,9 @@ static int dcon_resume(struct i2c_client *client)
irqreturn_t dcon_interrupt(int irq, void *id)
{
struct dcon_priv *dcon = id;
- int status = pdata->read_status();
+ u8 status;
- if (status == -1)
+ if (pdata->read_status(&status))
return IRQ_NONE;
switch (status & 3) {
diff --git a/drivers/staging/olpc_dcon/olpc_dcon.h b/drivers/staging/olpc_dcon/olpc_dcon.h
index 0264c94..167a417 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon.h
+++ b/drivers/staging/olpc_dcon/olpc_dcon.h
@@ -84,7 +84,7 @@ struct dcon_platform_data {
int (*init)(struct dcon_priv *);
void (*bus_stabilize_wiggle)(void);
void (*set_dconload)(int);
- u8 (*read_status)(void);
+ int (*read_status)(u8 *);
};
#include <linux/interrupt.h>
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
index 2245213..cb6ce0c 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1.c
@@ -183,17 +183,15 @@ static void dcon_set_dconload_1(int val)
gpio_set_value(OLPC_GPIO_DCON_LOAD, val);
}
-static u8 dcon_read_status_xo_1(void)
+static int dcon_read_status_xo_1(u8 *status)
{
- u8 status;
-
- status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
- status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
+ *status = gpio_get_value(OLPC_GPIO_DCON_STAT0);
+ *status |= gpio_get_value(OLPC_GPIO_DCON_STAT1) << 1;
/* Clear the negative edge status for GPIO7 */
cs5535_gpio_set(OLPC_GPIO_DCON_IRQ, GPIO_NEGATIVE_EDGE_STS);
- return status;
+ return 0;
}
struct dcon_platform_data dcon_pdata_xo_1 = {
diff --git a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
index a6a6cf2..69415ee 100644
--- a/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
+++ b/drivers/staging/olpc_dcon/olpc_dcon_xo_1_5.c
@@ -167,20 +167,18 @@ static void dcon_set_dconload_xo_1_5(int val)
gpio_set_value(VX855_GPIO(1), val);
}
-static u8 dcon_read_status_xo_1_5(void)
+static int dcon_read_status_xo_1_5(u8 *status)
{
- u8 status;
-
if (!dcon_was_irq())
return -1;
/* i believe this is the same as "inb(0x44b) & 3" */
- status = gpio_get_value(VX855_GPI(10));
- status |= gpio_get_value(VX855_GPI(11)) << 1;
+ *status = gpio_get_value(VX855_GPI(10));
+ *status |= gpio_get_value(VX855_GPI(11)) << 1;
dcon_clear_irq();
- return status;
+ return 0;
}
struct dcon_platform_data dcon_pdata_xo_1_5 = {
diff --git a/drivers/staging/omapdrm/Kconfig b/drivers/staging/omapdrm/Kconfig
new file mode 100644
index 0000000..81a7cba
--- /dev/null
+++ b/drivers/staging/omapdrm/Kconfig
@@ -0,0 +1,25 @@
+
+config DRM_OMAP
+ tristate "OMAP DRM"
+ depends on DRM && !CONFIG_FB_OMAP2
+ depends on ARCH_OMAP2PLUS
+ select DRM_KMS_HELPER
+ select OMAP2_DSS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FB_SYS_FOPS
+ default n
+ help
+ DRM display driver for OMAP2/3/4 based boards.
+
+config DRM_OMAP_NUM_CRTCS
+ int "Number of CRTCs"
+ range 1 10
+ default 1 if ARCH_OMAP2 || ARCH_OMAP3
+ default 2 if ARCH_OMAP4
+ depends on DRM_OMAP
+ help
+ Select the number of video overlays which can be used as framebuffers.
+ The remaining overlays are reserved for video.
+
diff --git a/drivers/staging/omapdrm/Makefile b/drivers/staging/omapdrm/Makefile
new file mode 100644
index 0000000..d9cdc12
--- /dev/null
+++ b/drivers/staging/omapdrm/Makefile
@@ -0,0 +1,22 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+# Direct Rendering Infrastructure (DRI)
+#
+
+ccflags-y := -Iinclude/drm -Werror
+omapdrm-y := omap_drv.o \
+ omap_debugfs.o \
+ omap_crtc.o \
+ omap_plane.o \
+ omap_encoder.o \
+ omap_connector.o \
+ omap_fb.o \
+ omap_fbdev.o \
+ omap_gem.o \
+ omap_dmm_tiler.o \
+ tcm-sita.o
+
+# temporary:
+omapdrm-y += omap_gem_helpers.o
+
+obj-$(CONFIG_DRM_OMAP) += omapdrm.o
diff --git a/drivers/staging/omapdrm/TODO b/drivers/staging/omapdrm/TODO
new file mode 100644
index 0000000..55b1837
--- /dev/null
+++ b/drivers/staging/omapdrm/TODO
@@ -0,0 +1,38 @@
+TODO
+. check error handling/cleanup paths
+. add drm_plane / overlay support
+. add video decode/encode support (via syslink3 + codec-engine)
+. still some rough edges with flipping.. event back to userspace should
+ really come after VSYNC interrupt
+. where should we do eviction (detatch_pages())? We aren't necessarily
+ accessing the pages via a GART, so maybe we need some other threshold
+ to put a cap on the # of pages that can be pin'd. (It is mostly only
+ of interest in case you have a swap partition/file.. which a lot of
+ these devices do not.. but it doesn't hurt for the driver to do the
+ right thing anyways.)
+ . Use mm_shrinker to trigger unpinning pages. Need to figure out how
+ to handle next issue first (I think?)
+ . Note TTM already has some mm_shrinker stuff.. maybe an argument to
+ move to TTM? Or maybe something that could be factored out in common?
+. GEM/shmem backed pages can have existing mappings (kernel linear map,
+ etc..), which isn't really ideal.
+. Revisit GEM sync object infrastructure.. TTM has some framework for this
+ already. Possibly this could be refactored out and made more common?
+ There should be some way to do this with less wheel-reinvention.
+. Review DSS vs KMS mismatches. The omap_dss_device is sort of part encoder,
+ part connector. Which results in a bit of duct tape to fwd calls from
+ encoder to connector. Possibly this could be done a bit better.
+. Solve PM sequencing on resume. DMM/TILER must be reloaded before any
+ access is made from any component in the system. Which means on suspend
+ CRTC's should be disabled, and on resume the LUT should be reprogrammed
+ before CRTC's are re-enabled, to prevent DSS from trying to DMA from a
+ buffer mapped in DMM/TILER before LUT is reloaded.
+. Add debugfs information for DMM/TILER
+
+Userspace:
+. git://github.com/robclark/xf86-video-omap.git
+
+Currently tested on
+. OMAP3530 beagleboard
+. OMAP4430 pandaboard
+. OMAP4460 pandaboard
diff --git a/drivers/staging/omapdrm/omap_connector.c b/drivers/staging/omapdrm/omap_connector.c
new file mode 100644
index 0000000..5e2856c
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_connector.c
@@ -0,0 +1,371 @@
+/*
+ * drivers/staging/omapdrm/omap_connector.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * connector funcs
+ */
+
+#define to_omap_connector(x) container_of(x, struct omap_connector, base)
+
+struct omap_connector {
+ struct drm_connector base;
+ struct omap_dss_device *dssdev;
+};
+
+static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode,
+ struct omap_video_timings *timings)
+{
+ mode->clock = timings->pixel_clock;
+
+ mode->hdisplay = timings->x_res;
+ mode->hsync_start = mode->hdisplay + timings->hfp;
+ mode->hsync_end = mode->hsync_start + timings->hsw;
+ mode->htotal = mode->hsync_end + timings->hbp;
+
+ mode->vdisplay = timings->y_res;
+ mode->vsync_start = mode->vdisplay + timings->vfp;
+ mode->vsync_end = mode->vsync_start + timings->vsw;
+ mode->vtotal = mode->vsync_end + timings->vbp;
+
+ /* note: whether or not it is interlaced, +/- h/vsync, etc,
+ * which should be set in the mode flags, is not exposed in
+ * the omap_video_timings struct.. but hdmi driver tracks
+ * those separately so all we have to have to set the mode
+ * is the way to recover these timings values, and the
+ * omap_dss_driver would do the rest.
+ */
+}
+
+static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings,
+ struct drm_display_mode *mode)
+{
+ timings->pixel_clock = mode->clock;
+
+ timings->x_res = mode->hdisplay;
+ timings->hfp = mode->hsync_start - mode->hdisplay;
+ timings->hsw = mode->hsync_end - mode->hsync_start;
+ timings->hbp = mode->htotal - mode->hsync_end;
+
+ timings->y_res = mode->vdisplay;
+ timings->vfp = mode->vsync_start - mode->vdisplay;
+ timings->vsw = mode->vsync_end - mode->vsync_start;
+ timings->vbp = mode->vtotal - mode->vsync_end;
+}
+
+static void omap_connector_dpms(struct drm_connector *connector, int mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ int old_dpms;
+
+ DBG("%s: %d", dssdev->name, mode);
+
+ old_dpms = connector->dpms;
+
+ /* from off to on, do from crtc to connector */
+ if (mode < old_dpms)
+ drm_helper_connector_dpms(connector, mode);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ /* store resume info for suspended displays */
+ switch (dssdev->state) {
+ case OMAP_DSS_DISPLAY_SUSPENDED:
+ dssdev->activate_after_resume = true;
+ break;
+ case OMAP_DSS_DISPLAY_DISABLED: {
+ int ret = dssdev->driver->enable(dssdev);
+ if (ret) {
+ DBG("%s: failed to enable: %d",
+ dssdev->name, ret);
+ dssdev->driver->disable(dssdev);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ } else {
+ /* TODO */
+ }
+
+ /* from on to off, do from connector to crtc */
+ if (mode > old_dpms)
+ drm_helper_connector_dpms(connector, mode);
+}
+
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ enum drm_connector_status ret;
+
+ if (dssdrv->detect) {
+ if (dssdrv->detect(dssdev)) {
+ ret = connector_status_connected;
+ } else {
+ ret = connector_status_disconnected;
+ }
+ } else {
+ ret = connector_status_unknown;
+ }
+
+ VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+
+ return ret;
+}
+
+static void omap_connector_destroy(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+
+ dssdev->driver->disable(dssdev);
+
+ DBG("%s", omap_connector->dssdev->name);
+ drm_sysfs_connector_remove(connector);
+ drm_connector_cleanup(connector);
+ kfree(omap_connector);
+
+ omap_dss_put_device(dssdev);
+}
+
+#define MAX_EDID 512
+
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct drm_device *dev = connector->dev;
+ int n = 0;
+
+ DBG("%s", omap_connector->dssdev->name);
+
+ /* if display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.. otherwise (ie. fixed resolution
+ * LCD panels) we just return a single mode corresponding to the
+ * currently configured timings:
+ */
+ if (dssdrv->read_edid) {
+ void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
+
+ if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
+ drm_edid_is_valid(edid)) {
+ drm_mode_connector_update_edid_property(
+ connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+ kfree(connector->display_info.raw_edid);
+ connector->display_info.raw_edid = edid;
+ } else {
+ drm_mode_connector_update_edid_property(
+ connector, NULL);
+ connector->display_info.raw_edid = NULL;
+ kfree(edid);
+ }
+ } else {
+ struct drm_display_mode *mode = drm_mode_create(dev);
+ struct omap_video_timings timings;
+
+ dssdrv->get_timings(dssdev, &timings);
+
+ copy_timings_omap_to_drm(mode, &timings);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ n = 1;
+ }
+
+ return n;
+}
+
+static int omap_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings = {0};
+ struct drm_device *dev = connector->dev;
+ struct drm_display_mode *new_mode;
+ int ret = MODE_BAD;
+
+ copy_timings_drm_to_omap(&timings, mode);
+ mode->vrefresh = drm_mode_vrefresh(mode);
+
+ if (!dssdrv->check_timings(dssdev, &timings)) {
+ /* check if vrefresh is still valid */
+ new_mode = drm_mode_duplicate(dev, mode);
+ new_mode->clock = timings.pixel_clock;
+ new_mode->vrefresh = 0;
+ if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+ ret = MODE_OK;
+ drm_mode_destroy(dev, new_mode);
+ }
+
+ DBG("connector: mode %s: "
+ "%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ (ret == MODE_OK) ? "valid" : "invalid",
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ return ret;
+}
+
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector)
+{
+ int i;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
+ struct drm_mode_object *obj;
+
+ if (connector->encoder_ids[i] == 0)
+ break;
+
+ obj = drm_mode_object_find(connector->dev,
+ connector->encoder_ids[i],
+ DRM_MODE_OBJECT_ENCODER);
+
+ if (obj) {
+ struct drm_encoder *encoder = obj_to_encoder(obj);
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+ DBG("%s: found %s", omap_connector->dssdev->name,
+ mgr->name);
+ return encoder;
+ }
+ }
+
+ DBG("%s: no encoder", omap_connector->dssdev->name);
+
+ return NULL;
+}
+
+static const struct drm_connector_funcs omap_connector_funcs = {
+ .dpms = omap_connector_dpms,
+ .detect = omap_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = omap_connector_destroy,
+};
+
+static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
+ .get_modes = omap_connector_get_modes,
+ .mode_valid = omap_connector_mode_valid,
+ .best_encoder = omap_connector_attached_encoder,
+};
+
+/* called from encoder when mode is set, to propagate settings to the dssdev */
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct drm_device *dev = connector->dev;
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev = omap_connector->dssdev;
+ struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_video_timings timings;
+
+ copy_timings_drm_to_omap(&timings, mode);
+
+ DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
+ omap_connector->dssdev->name,
+ mode->base.id, mode->name, mode->vrefresh, mode->clock,
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start,
+ mode->vsync_end, mode->vtotal, mode->type, mode->flags);
+
+ if (dssdrv->check_timings(dssdev, &timings)) {
+ dev_err(dev->dev, "could not set timings\n");
+ return;
+ }
+
+ dssdrv->set_timings(dssdev, &timings);
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+
+ /* TODO: enable when supported in dss */
+ VERB("%s: %d,%d, %dx%d", omap_connector->dssdev->name, x, y, w, h);
+}
+
+/* initialize connector */
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev)
+{
+ struct drm_connector *connector = NULL;
+ struct omap_connector *omap_connector;
+
+ DBG("%s", dssdev->name);
+
+ omap_dss_get_device(dssdev);
+
+ omap_connector = kzalloc(sizeof(struct omap_connector), GFP_KERNEL);
+ if (!omap_connector) {
+ dev_err(dev->dev, "could not allocate connector\n");
+ goto fail;
+ }
+
+ omap_connector->dssdev = dssdev;
+ connector = &omap_connector->base;
+
+ drm_connector_init(dev, connector, &omap_connector_funcs,
+ connector_type);
+ drm_connector_helper_add(connector, &omap_connector_helper_funcs);
+
+#if 0 /* enable when dss2 supports hotplug */
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_HPD)
+ connector->polled = 0;
+ else
+#endif
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
+
+ drm_sysfs_connector_add(connector);
+
+ return connector;
+
+fail:
+ if (connector) {
+ omap_connector_destroy(connector);
+ }
+
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_crtc.c b/drivers/staging/omapdrm/omap_crtc.c
new file mode 100644
index 0000000..17ca163
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_crtc.c
@@ -0,0 +1,225 @@
+/*
+ * drivers/staging/omapdrm/omap_crtc.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_mode.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#define to_omap_crtc(x) container_of(x, struct omap_crtc, base)
+
+struct omap_crtc {
+ struct drm_crtc base;
+ struct drm_plane *plane;
+ const char *name;
+ int id;
+
+ /* if there is a pending flip, these will be non-null: */
+ struct drm_pending_vblank_event *event;
+ struct drm_framebuffer *old_fb;
+};
+
+static void omap_crtc_gamma_set(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size)
+{
+ /* not supported.. at least not yet */
+}
+
+static void omap_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ omap_crtc->plane->funcs->destroy(omap_crtc->plane);
+ drm_crtc_cleanup(crtc);
+ kfree(omap_crtc);
+}
+
+static void omap_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+ struct omap_drm_private *priv = crtc->dev->dev_private;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ int i;
+
+ WARN_ON(omap_plane_dpms(omap_crtc->plane, mode));
+
+ for (i = 0; i < priv->num_planes; i++) {
+ struct drm_plane *plane = priv->planes[i];
+ if (plane->crtc == crtc)
+ WARN_ON(omap_plane_dpms(plane, mode));
+ }
+}
+
+static bool omap_crtc_mode_fixup(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static int omap_crtc_mode_set(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_plane *plane = omap_crtc->plane;
+
+ return omap_plane_mode_set(plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+}
+
+static void omap_crtc_prepare(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->name);
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_crtc_commit(struct drm_crtc *crtc)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ DBG("%s", omap_crtc->name);
+ omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+}
+
+static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_plane *plane = omap_crtc->plane;
+ struct drm_display_mode *mode = &crtc->mode;
+
+ return plane->funcs->update_plane(plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+}
+
+static void omap_crtc_load_lut(struct drm_crtc *crtc)
+{
+}
+
+static void page_flip_cb(void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+ struct drm_pending_vblank_event *event = omap_crtc->event;
+ struct drm_framebuffer *old_fb = omap_crtc->old_fb;
+ struct timeval now;
+ unsigned long flags;
+
+ WARN_ON(!event);
+
+ omap_crtc->event = NULL;
+ omap_crtc->old_fb = NULL;
+
+ omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb);
+
+ /* wakeup userspace */
+ /* TODO: this should happen *after* flip in vsync IRQ handler */
+ if (event) {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event->event.sequence = drm_vblank_count_and_time(
+ dev, omap_crtc->id, &now);
+ event->event.tv_sec = now.tv_sec;
+ event->event.tv_usec = now.tv_usec;
+ list_add_tail(&event->base.link,
+ &event->base.file_priv->event_list);
+ wake_up_interruptible(&event->base.file_priv->event_wait);
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+}
+
+static int omap_crtc_page_flip_locked(struct drm_crtc *crtc,
+ struct drm_framebuffer *fb,
+ struct drm_pending_vblank_event *event)
+{
+ struct drm_device *dev = crtc->dev;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
+ DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id);
+
+ if (omap_crtc->event) {
+ dev_err(dev->dev, "already a pending flip\n");
+ return -EINVAL;
+ }
+
+ omap_crtc->old_fb = crtc->fb;
+ omap_crtc->event = event;
+ crtc->fb = fb;
+
+ omap_gem_op_async(omap_framebuffer_bo(fb, 0), OMAP_GEM_READ,
+ page_flip_cb, crtc);
+
+ return 0;
+}
+
+static const struct drm_crtc_funcs omap_crtc_funcs = {
+ .gamma_set = omap_crtc_gamma_set,
+ .set_config = drm_crtc_helper_set_config,
+ .destroy = omap_crtc_destroy,
+ .page_flip = omap_crtc_page_flip_locked,
+};
+
+static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = {
+ .dpms = omap_crtc_dpms,
+ .mode_fixup = omap_crtc_mode_fixup,
+ .mode_set = omap_crtc_mode_set,
+ .prepare = omap_crtc_prepare,
+ .commit = omap_crtc_commit,
+ .mode_set_base = omap_crtc_mode_set_base,
+ .load_lut = omap_crtc_load_lut,
+};
+
+/* initialize crtc */
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl, int id)
+{
+ struct drm_crtc *crtc = NULL;
+ struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
+
+ DBG("%s", ovl->name);
+
+ if (!omap_crtc) {
+ dev_err(dev->dev, "could not allocate CRTC\n");
+ goto fail;
+ }
+
+ crtc = &omap_crtc->base;
+
+ omap_crtc->plane = omap_plane_init(dev, ovl, (1 << id), true);
+ omap_crtc->plane->crtc = crtc;
+ omap_crtc->name = ovl->name;
+ omap_crtc->id = id;
+
+ drm_crtc_init(dev, crtc, &omap_crtc_funcs);
+ drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs);
+
+ return crtc;
+
+fail:
+ if (crtc) {
+ omap_crtc_destroy(crtc);
+ }
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_debugfs.c b/drivers/staging/omapdrm/omap_debugfs.c
new file mode 100644
index 0000000..da920df
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_debugfs.c
@@ -0,0 +1,42 @@
+/*
+ * drivers/staging/omapdrm/omap_debugfs.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+static struct drm_info_list omap_debugfs_list[] = {
+ {"tiler_map", tiler_map_show, 0},
+};
+
+int omap_debugfs_init(struct drm_minor *minor)
+{
+ return drm_debugfs_create_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+void omap_debugfs_cleanup(struct drm_minor *minor)
+{
+ drm_debugfs_remove_files(omap_debugfs_list,
+ ARRAY_SIZE(omap_debugfs_list), minor);
+}
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_priv.h b/drivers/staging/omapdrm/omap_dmm_priv.h
new file mode 100644
index 0000000..2f529ab
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_priv.h
@@ -0,0 +1,187 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_PRIV_H
+#define OMAP_DMM_PRIV_H
+
+#define DMM_REVISION 0x000
+#define DMM_HWINFO 0x004
+#define DMM_LISA_HWINFO 0x008
+#define DMM_DMM_SYSCONFIG 0x010
+#define DMM_LISA_LOCK 0x01C
+#define DMM_LISA_MAP__0 0x040
+#define DMM_LISA_MAP__1 0x044
+#define DMM_TILER_HWINFO 0x208
+#define DMM_TILER_OR__0 0x220
+#define DMM_TILER_OR__1 0x224
+#define DMM_PAT_HWINFO 0x408
+#define DMM_PAT_GEOMETRY 0x40C
+#define DMM_PAT_CONFIG 0x410
+#define DMM_PAT_VIEW__0 0x420
+#define DMM_PAT_VIEW__1 0x424
+#define DMM_PAT_VIEW_MAP__0 0x440
+#define DMM_PAT_VIEW_MAP_BASE 0x460
+#define DMM_PAT_IRQ_EOI 0x478
+#define DMM_PAT_IRQSTATUS_RAW 0x480
+#define DMM_PAT_IRQSTATUS 0x490
+#define DMM_PAT_IRQENABLE_SET 0x4A0
+#define DMM_PAT_IRQENABLE_CLR 0x4B0
+#define DMM_PAT_STATUS__0 0x4C0
+#define DMM_PAT_STATUS__1 0x4C4
+#define DMM_PAT_STATUS__2 0x4C8
+#define DMM_PAT_STATUS__3 0x4CC
+#define DMM_PAT_DESCR__0 0x500
+#define DMM_PAT_DESCR__1 0x510
+#define DMM_PAT_DESCR__2 0x520
+#define DMM_PAT_DESCR__3 0x530
+#define DMM_PEG_HWINFO 0x608
+#define DMM_PEG_PRIO 0x620
+#define DMM_PEG_PRIO_PAT 0x640
+
+#define DMM_IRQSTAT_DST (1<<0)
+#define DMM_IRQSTAT_LST (1<<1)
+#define DMM_IRQSTAT_ERR_INV_DSC (1<<2)
+#define DMM_IRQSTAT_ERR_INV_DATA (1<<3)
+#define DMM_IRQSTAT_ERR_UPD_AREA (1<<4)
+#define DMM_IRQSTAT_ERR_UPD_CTRL (1<<5)
+#define DMM_IRQSTAT_ERR_UPD_DATA (1<<6)
+#define DMM_IRQSTAT_ERR_LUT_MISS (1<<7)
+
+#define DMM_IRQSTAT_ERR_MASK (DMM_IRQ_STAT_ERR_INV_DSC | \
+ DMM_IRQ_STAT_ERR_INV_DATA | \
+ DMM_IRQ_STAT_ERR_UPD_AREA | \
+ DMM_IRQ_STAT_ERR_UPD_CTRL | \
+ DMM_IRQ_STAT_ERR_UPD_DATA | \
+ DMM_IRQ_STAT_ERR_LUT_MISS)
+
+#define DMM_PATSTATUS_READY (1<<0)
+#define DMM_PATSTATUS_VALID (1<<1)
+#define DMM_PATSTATUS_RUN (1<<2)
+#define DMM_PATSTATUS_DONE (1<<3)
+#define DMM_PATSTATUS_LINKED (1<<4)
+#define DMM_PATSTATUS_BYPASSED (1<<7)
+#define DMM_PATSTATUS_ERR_INV_DESCR (1<<10)
+#define DMM_PATSTATUS_ERR_INV_DATA (1<<11)
+#define DMM_PATSTATUS_ERR_UPD_AREA (1<<12)
+#define DMM_PATSTATUS_ERR_UPD_CTRL (1<<13)
+#define DMM_PATSTATUS_ERR_UPD_DATA (1<<14)
+#define DMM_PATSTATUS_ERR_ACCESS (1<<15)
+
+/* note: don't treat DMM_PATSTATUS_ERR_ACCESS as an error */
+#define DMM_PATSTATUS_ERR (DMM_PATSTATUS_ERR_INV_DESCR | \
+ DMM_PATSTATUS_ERR_INV_DATA | \
+ DMM_PATSTATUS_ERR_UPD_AREA | \
+ DMM_PATSTATUS_ERR_UPD_CTRL | \
+ DMM_PATSTATUS_ERR_UPD_DATA)
+
+
+
+enum {
+ PAT_STATUS,
+ PAT_DESCR
+};
+
+struct pat_ctrl {
+ u32 start:4;
+ u32 dir:4;
+ u32 lut_id:8;
+ u32 sync:12;
+ u32 ini:4;
+};
+
+struct pat {
+ uint32_t next_pa;
+ struct pat_area area;
+ struct pat_ctrl ctrl;
+ uint32_t data_pa;
+};
+
+#define DMM_FIXED_RETRY_COUNT 1000
+
+/* create refill buffer big enough to refill all slots, plus 3 descriptors..
+ * 3 descriptors is probably the worst-case for # of 2d-slices in a 1d area,
+ * but I guess you don't hit that worst case at the same time as full area
+ * refill
+ */
+#define DESCR_SIZE 128
+#define REFILL_BUFFER_SIZE ((4 * 128 * 256) + (3 * DESCR_SIZE))
+
+struct dmm;
+
+struct dmm_txn {
+ void *engine_handle;
+ struct tcm *tcm;
+
+ uint8_t *current_va;
+ dma_addr_t current_pa;
+
+ struct pat *last_pat;
+};
+
+struct refill_engine {
+ int id;
+ struct dmm *dmm;
+ struct tcm *tcm;
+
+ uint8_t *refill_va;
+ dma_addr_t refill_pa;
+
+ /* only one trans per engine for now */
+ struct dmm_txn txn;
+
+ /* offset to lut associated with container */
+ u32 *lut_offset;
+
+ wait_queue_head_t wait_for_refill;
+
+ struct list_head idle_node;
+};
+
+struct dmm {
+ struct device *dev;
+ void __iomem *base;
+ int irq;
+
+ struct page *dummy_page;
+ dma_addr_t dummy_pa;
+
+ void *refill_va;
+ dma_addr_t refill_pa;
+
+ /* refill engines */
+ struct semaphore engine_sem;
+ struct list_head idle_head;
+ struct refill_engine *engines;
+ int num_engines;
+
+ /* container information */
+ int container_width;
+ int container_height;
+ int lut_width;
+ int lut_height;
+ int num_lut;
+
+ /* array of LUT - TCM containers */
+ struct tcm **tcm;
+
+ /* LUT table storage */
+ u32 *lut;
+
+ /* allocation list and lock */
+ struct list_head alloc_head;
+ spinlock_t list_lock;
+};
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.c b/drivers/staging/omapdrm/omap_dmm_tiler.c
new file mode 100644
index 0000000..852d944
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.c
@@ -0,0 +1,830 @@
+/*
+ * DMM IOMMU driver support functions for TI OMAP processors.
+ *
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/platform_device.h> /* platform_device() */
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <linux/semaphore.h>
+
+#include "omap_dmm_tiler.h"
+#include "omap_dmm_priv.h"
+
+/* mappings for associating views to luts */
+static struct tcm *containers[TILFMT_NFORMATS];
+static struct dmm *omap_dmm;
+
+/* Geometry table */
+#define GEOM(xshift, yshift, bytes_per_pixel) { \
+ .x_shft = (xshift), \
+ .y_shft = (yshift), \
+ .cpp = (bytes_per_pixel), \
+ .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
+ .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
+ }
+
+static const struct {
+ uint32_t x_shft; /* unused X-bits (as part of bpp) */
+ uint32_t y_shft; /* unused Y-bits (as part of bpp) */
+ uint32_t cpp; /* bytes/chars per pixel */
+ uint32_t slot_w; /* width of each slot (in pixels) */
+ uint32_t slot_h; /* height of each slot (in pixels) */
+} geom[TILFMT_NFORMATS] = {
+ [TILFMT_8BIT] = GEOM(0, 0, 1),
+ [TILFMT_16BIT] = GEOM(0, 1, 2),
+ [TILFMT_32BIT] = GEOM(1, 1, 4),
+ [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
+};
+
+
+/* lookup table for registers w/ per-engine instances */
+static const uint32_t reg[][4] = {
+ [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
+ DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
+ [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
+ DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
+};
+
+/* simple allocator to grab next 16 byte aligned memory from txn */
+static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
+{
+ void *ptr;
+ struct refill_engine *engine = txn->engine_handle;
+
+ /* dmm programming requires 16 byte aligned addresses */
+ txn->current_pa = round_up(txn->current_pa, 16);
+ txn->current_va = (void *)round_up((long)txn->current_va, 16);
+
+ ptr = txn->current_va;
+ *pa = txn->current_pa;
+
+ txn->current_pa += sz;
+ txn->current_va += sz;
+
+ BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
+
+ return ptr;
+}
+
+/* check status and spin until wait_mask comes true */
+static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
+{
+ struct dmm *dmm = engine->dmm;
+ uint32_t r = 0, err, i;
+
+ i = DMM_FIXED_RETRY_COUNT;
+ while (true) {
+ r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+ err = r & DMM_PATSTATUS_ERR;
+ if (err)
+ return -EFAULT;
+
+ if ((r & wait_mask) == wait_mask)
+ break;
+
+ if (--i == 0)
+ return -ETIMEDOUT;
+
+ udelay(1);
+ }
+
+ return 0;
+}
+
+irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
+{
+ struct dmm *dmm = arg;
+ uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+ int i;
+
+ /* ack IRQ */
+ writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+
+ for (i = 0; i < dmm->num_engines; i++) {
+ if (status & DMM_IRQSTAT_LST)
+ wake_up_interruptible(&dmm->engines[i].wait_for_refill);
+
+ status >>= 8;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * Get a handle for a DMM transaction
+ */
+static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
+{
+ struct dmm_txn *txn = NULL;
+ struct refill_engine *engine = NULL;
+
+ down(&dmm->engine_sem);
+
+ /* grab an idle engine */
+ spin_lock(&dmm->list_lock);
+ if (!list_empty(&dmm->idle_head)) {
+ engine = list_entry(dmm->idle_head.next, struct refill_engine,
+ idle_node);
+ list_del(&engine->idle_node);
+ }
+ spin_unlock(&dmm->list_lock);
+
+ BUG_ON(!engine);
+
+ txn = &engine->txn;
+ engine->tcm = tcm;
+ txn->engine_handle = engine;
+ txn->last_pat = NULL;
+ txn->current_va = engine->refill_va;
+ txn->current_pa = engine->refill_pa;
+
+ return txn;
+}
+
+/**
+ * Add region to DMM transaction. If pages or pages[i] is NULL, then the
+ * corresponding slot is cleared (ie. dummy_pa is programmed)
+ */
+static int dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
+ struct page **pages, uint32_t npages, uint32_t roll)
+{
+ dma_addr_t pat_pa = 0;
+ uint32_t *data;
+ struct pat *pat;
+ struct refill_engine *engine = txn->engine_handle;
+ int columns = (1 + area->x1 - area->x0);
+ int rows = (1 + area->y1 - area->y0);
+ int i = columns*rows;
+ u32 *lut = omap_dmm->lut + (engine->tcm->lut_id * omap_dmm->lut_width *
+ omap_dmm->lut_height) +
+ (area->y0 * omap_dmm->lut_width) + area->x0;
+
+ pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
+
+ if (txn->last_pat)
+ txn->last_pat->next_pa = (uint32_t)pat_pa;
+
+ pat->area = *area;
+ pat->ctrl = (struct pat_ctrl){
+ .start = 1,
+ .lut_id = engine->tcm->lut_id,
+ };
+
+ data = alloc_dma(txn, 4*i, &pat->data_pa);
+
+ while (i--) {
+ int n = i + roll;
+ if (n >= npages)
+ n -= npages;
+ data[i] = (pages && pages[n]) ?
+ page_to_phys(pages[n]) : engine->dmm->dummy_pa;
+ }
+
+ /* fill in lut with new addresses */
+ for (i = 0; i < rows; i++, lut += omap_dmm->lut_width)
+ memcpy(lut, &data[i*columns], columns * sizeof(u32));
+
+ txn->last_pat = pat;
+
+ return 0;
+}
+
+/**
+ * Commit the DMM transaction.
+ */
+static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
+{
+ int ret = 0;
+ struct refill_engine *engine = txn->engine_handle;
+ struct dmm *dmm = engine->dmm;
+
+ if (!txn->last_pat) {
+ dev_err(engine->dmm->dev, "need at least one txn\n");
+ ret = -EINVAL;
+ goto cleanup;
+ }
+
+ txn->last_pat->next_pa = 0;
+
+ /* write to PAT_DESCR to clear out any pending transaction */
+ writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+
+ /* wait for engine ready: */
+ ret = wait_status(engine, DMM_PATSTATUS_READY);
+ if (ret) {
+ ret = -EFAULT;
+ goto cleanup;
+ }
+
+ /* kick reload */
+ writel(engine->refill_pa,
+ dmm->base + reg[PAT_DESCR][engine->id]);
+
+ if (wait) {
+ if (wait_event_interruptible_timeout(engine->wait_for_refill,
+ wait_status(engine, DMM_PATSTATUS_READY) == 0,
+ msecs_to_jiffies(1)) <= 0) {
+ dev_err(dmm->dev, "timed out waiting for done\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+cleanup:
+ spin_lock(&dmm->list_lock);
+ list_add(&engine->idle_node, &dmm->idle_head);
+ spin_unlock(&dmm->list_lock);
+
+ up(&omap_dmm->engine_sem);
+ return ret;
+}
+
+/*
+ * DMM programming
+ */
+static int fill(struct tcm_area *area, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret = 0;
+ struct tcm_area slice, area_s;
+ struct dmm_txn *txn;
+
+ txn = dmm_txn_init(omap_dmm, area->tcm);
+ if (IS_ERR_OR_NULL(txn))
+ return PTR_ERR(txn);
+
+ tcm_for_each_slice(slice, *area, area_s) {
+ struct pat_area p_area = {
+ .x0 = slice.p0.x, .y0 = slice.p0.y,
+ .x1 = slice.p1.x, .y1 = slice.p1.y,
+ };
+
+ ret = dmm_txn_append(txn, &p_area, pages, npages, roll);
+ if (ret)
+ goto fail;
+
+ roll += tcm_sizeof(slice);
+ }
+
+ ret = dmm_txn_commit(txn, wait);
+
+fail:
+ return ret;
+}
+
+/*
+ * Pin/unpin
+ */
+
+/* note: slots for which pages[i] == NULL are filled w/ dummy page
+ */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait)
+{
+ int ret;
+
+ ret = fill(&block->area, pages, npages, roll, wait);
+
+ if (ret)
+ tiler_unpin(block);
+
+ return ret;
+}
+
+int tiler_unpin(struct tiler_block *block)
+{
+ return fill(&block->area, NULL, 0, 0, false);
+}
+
+/*
+ * Reserve/release
+ */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
+ uint16_t h, uint16_t align)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ u32 min_align = 128;
+ int ret;
+
+ BUG_ON(!validfmt(fmt));
+
+ /* convert width/height to slots */
+ w = DIV_ROUND_UP(w, geom[fmt].slot_w);
+ h = DIV_ROUND_UP(h, geom[fmt].slot_h);
+
+ /* convert alignment to slots */
+ min_align = max(min_align, (geom[fmt].slot_w * geom[fmt].cpp));
+ align = ALIGN(align, min_align);
+ align /= geom[fmt].slot_w * geom[fmt].cpp;
+
+ block->fmt = fmt;
+
+ ret = tcm_reserve_2d(containers[fmt], w, h, align, &block->area);
+ if (ret) {
+ kfree(block);
+ return 0;
+ }
+
+ /* add to allocation list */
+ spin_lock(&omap_dmm->list_lock);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock(&omap_dmm->list_lock);
+
+ return block;
+}
+
+struct tiler_block *tiler_reserve_1d(size_t size)
+{
+ struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
+ int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ if (!block)
+ return 0;
+
+ block->fmt = TILFMT_PAGE;
+
+ if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
+ &block->area)) {
+ kfree(block);
+ return 0;
+ }
+
+ spin_lock(&omap_dmm->list_lock);
+ list_add(&block->alloc_node, &omap_dmm->alloc_head);
+ spin_unlock(&omap_dmm->list_lock);
+
+ return block;
+}
+
+/* note: if you have pin'd pages, you should have already unpin'd first! */
+int tiler_release(struct tiler_block *block)
+{
+ int ret = tcm_free(&block->area);
+
+ if (block->area.tcm)
+ dev_err(omap_dmm->dev, "failed to release block\n");
+
+ spin_lock(&omap_dmm->list_lock);
+ list_del(&block->alloc_node);
+ spin_unlock(&omap_dmm->list_lock);
+
+ kfree(block);
+ return ret;
+}
+
+/*
+ * Utils
+ */
+
+/* calculate the tiler space address of a pixel in a view orientation */
+static u32 tiler_get_address(u32 orient, enum tiler_fmt fmt, u32 x, u32 y)
+{
+ u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
+
+ x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
+ y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
+ alignment = geom[fmt].x_shft + geom[fmt].y_shft;
+
+ /* validate coordinate */
+ x_mask = MASK(x_bits);
+ y_mask = MASK(y_bits);
+
+ if (x < 0 || x > x_mask || y < 0 || y > y_mask)
+ return 0;
+
+ /* account for mirroring */
+ if (orient & MASK_X_INVERT)
+ x ^= x_mask;
+ if (orient & MASK_Y_INVERT)
+ y ^= y_mask;
+
+ /* get coordinate address */
+ if (orient & MASK_XY_FLIP)
+ tmp = ((x << y_bits) + y);
+ else
+ tmp = ((y << x_bits) + x);
+
+ return TIL_ADDR((tmp << alignment), orient, fmt);
+}
+
+dma_addr_t tiler_ssptr(struct tiler_block *block)
+{
+ BUG_ON(!validfmt(block->fmt));
+
+ return TILVIEW_8BIT + tiler_get_address(0, block->fmt,
+ block->area.p0.x * geom[block->fmt].slot_w,
+ block->area.p0.y * geom[block->fmt].slot_h);
+}
+
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
+{
+ BUG_ON(!validfmt(fmt));
+ *w = round_up(*w, geom[fmt].slot_w);
+ *h = round_up(*h, geom[fmt].slot_h);
+}
+
+uint32_t tiler_stride(enum tiler_fmt fmt)
+{
+ BUG_ON(!validfmt(fmt));
+
+ return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
+}
+
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ tiler_align(fmt, &w, &h);
+ return geom[fmt].cpp * w * h;
+}
+
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
+{
+ BUG_ON(!validfmt(fmt));
+ return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
+}
+
+int omap_dmm_remove(void)
+{
+ struct tiler_block *block, *_block;
+ int i;
+
+ if (omap_dmm) {
+ /* free all area regions */
+ spin_lock(&omap_dmm->list_lock);
+ list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
+ alloc_node) {
+ list_del(&block->alloc_node);
+ kfree(block);
+ }
+ spin_unlock(&omap_dmm->list_lock);
+
+ for (i = 0; i < omap_dmm->num_lut; i++)
+ if (omap_dmm->tcm && omap_dmm->tcm[i])
+ omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
+ kfree(omap_dmm->tcm);
+
+ kfree(omap_dmm->engines);
+ if (omap_dmm->refill_va)
+ dma_free_coherent(omap_dmm->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ omap_dmm->refill_va,
+ omap_dmm->refill_pa);
+ if (omap_dmm->dummy_page)
+ __free_page(omap_dmm->dummy_page);
+
+ vfree(omap_dmm->lut);
+
+ if (omap_dmm->irq != -1)
+ free_irq(omap_dmm->irq, omap_dmm);
+
+ kfree(omap_dmm);
+ }
+
+ return 0;
+}
+
+int omap_dmm_init(struct drm_device *dev)
+{
+ int ret = -EFAULT, i;
+ struct tcm_area area = {0};
+ u32 hwinfo, pat_geom, lut_table_size;
+ struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+
+ if (!pdata || !pdata->dmm_pdata) {
+ dev_err(dev->dev, "dmm platform data not present, skipping\n");
+ return ret;
+ }
+
+ omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
+ if (!omap_dmm) {
+ dev_err(dev->dev, "failed to allocate driver data section\n");
+ goto fail;
+ }
+
+ /* lookup hwmod data - base address and irq */
+ omap_dmm->base = pdata->dmm_pdata->base;
+ omap_dmm->irq = pdata->dmm_pdata->irq;
+ omap_dmm->dev = dev->dev;
+
+ if (!omap_dmm->base) {
+ dev_err(dev->dev, "failed to get dmm base address\n");
+ goto fail;
+ }
+
+ hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+ omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
+ omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
+ omap_dmm->container_width = 256;
+ omap_dmm->container_height = 128;
+
+ /* read out actual LUT width and height */
+ pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+ omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
+ omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
+
+ /* initialize DMM registers */
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
+ writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
+ writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
+ writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
+ writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+
+ ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
+ "omap_dmm_irq_handler", omap_dmm);
+
+ if (ret) {
+ dev_err(dev->dev, "couldn't register IRQ %d, error %d\n",
+ omap_dmm->irq, ret);
+ omap_dmm->irq = -1;
+ goto fail;
+ }
+
+ /* Enable all interrupts for each refill engine except
+ * ERR_LUT_MISS<n> (which is just advisory, and we don't care
+ * about because we want to be able to refill live scanout
+ * buffers for accelerated pan/scroll) and FILL_DSC<n> which
+ * we just generally don't care about.
+ */
+ writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+
+ lut_table_size = omap_dmm->lut_width * omap_dmm->lut_height *
+ omap_dmm->num_lut;
+
+ omap_dmm->lut = vmalloc(lut_table_size * sizeof(*omap_dmm->lut));
+ if (!omap_dmm->lut) {
+ dev_err(dev->dev, "could not allocate lut table\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
+ if (!omap_dmm->dummy_page) {
+ dev_err(dev->dev, "could not allocate dummy page\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
+
+ /* alloc refill memory */
+ omap_dmm->refill_va = dma_alloc_coherent(dev->dev,
+ REFILL_BUFFER_SIZE * omap_dmm->num_engines,
+ &omap_dmm->refill_pa, GFP_KERNEL);
+ if (!omap_dmm->refill_va) {
+ dev_err(dev->dev, "could not allocate refill memory\n");
+ goto fail;
+ }
+
+ /* alloc engines */
+ omap_dmm->engines = kzalloc(
+ omap_dmm->num_engines * sizeof(struct refill_engine),
+ GFP_KERNEL);
+ if (!omap_dmm->engines) {
+ dev_err(dev->dev, "could not allocate engines\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sema_init(&omap_dmm->engine_sem, omap_dmm->num_engines);
+ INIT_LIST_HEAD(&omap_dmm->idle_head);
+ for (i = 0; i < omap_dmm->num_engines; i++) {
+ omap_dmm->engines[i].id = i;
+ omap_dmm->engines[i].dmm = omap_dmm;
+ omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
+ (REFILL_BUFFER_SIZE * i);
+ omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
+ (REFILL_BUFFER_SIZE * i);
+ init_waitqueue_head(&omap_dmm->engines[i].wait_for_refill);
+
+ list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
+ }
+
+ omap_dmm->tcm = kzalloc(omap_dmm->num_lut * sizeof(*omap_dmm->tcm),
+ GFP_KERNEL);
+ if (!omap_dmm->tcm) {
+ dev_err(dev->dev, "failed to allocate lut ptrs\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ /* init containers */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
+ omap_dmm->container_height,
+ NULL);
+
+ if (!omap_dmm->tcm[i]) {
+ dev_err(dev->dev, "failed to allocate container\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ omap_dmm->tcm[i]->lut_id = i;
+ }
+
+ /* assign access mode containers to applicable tcm container */
+ /* OMAP 4 has 1 container for all 4 views */
+ containers[TILFMT_8BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_16BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_32BIT] = omap_dmm->tcm[0];
+ containers[TILFMT_PAGE] = omap_dmm->tcm[0];
+
+ INIT_LIST_HEAD(&omap_dmm->alloc_head);
+ spin_lock_init(&omap_dmm->list_lock);
+
+ area = (struct tcm_area) {
+ .is2d = true,
+ .tcm = NULL,
+ .p1.x = omap_dmm->container_width - 1,
+ .p1.y = omap_dmm->container_height - 1,
+ };
+
+ for (i = 0; i < lut_table_size; i++)
+ omap_dmm->lut[i] = omap_dmm->dummy_pa;
+
+ /* initialize all LUTs to dummy page entries */
+ for (i = 0; i < omap_dmm->num_lut; i++) {
+ area.tcm = omap_dmm->tcm[i];
+ if (fill(&area, NULL, 0, 0, true))
+ dev_err(omap_dmm->dev, "refill failed");
+ }
+
+ dev_info(omap_dmm->dev, "initialized all PAT entries\n");
+
+ return 0;
+
+fail:
+ omap_dmm_remove();
+ return ret;
+}
+
+/*
+ * debugfs support
+ */
+
+#ifdef CONFIG_DEBUG_FS
+
+static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+static const char *special = ".,:;'\"`~!^-+";
+
+static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
+ char c, bool ovw)
+{
+ int x, y;
+ for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
+ for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
+ if (map[y][x] == ' ' || ovw)
+ map[y][x] = c;
+}
+
+static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
+ char c)
+{
+ map[p->y / ydiv][p->x / xdiv] = c;
+}
+
+static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
+{
+ return map[p->y / ydiv][p->x / xdiv];
+}
+
+static int map_width(int xdiv, int x0, int x1)
+{
+ return (x1 / xdiv) - (x0 / xdiv) + 1;
+}
+
+static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
+{
+ char *p = map[yd] + (x0 / xdiv);
+ int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
+ if (w >= 0) {
+ p += w;
+ while (*nice)
+ *p++ = *nice++;
+ }
+}
+
+static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
+ if (a->p0.y + 1 < a->p1.y) {
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
+ 256 - 1);
+ } else if (a->p0.y < a->p1.y) {
+ if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
+ text_map(map, xdiv, nice, a->p0.y / ydiv,
+ a->p0.x + xdiv, 256 - 1);
+ else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
+ text_map(map, xdiv, nice, a->p1.y / ydiv,
+ 0, a->p1.y - xdiv);
+ } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
+ text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
+ }
+}
+
+static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
+ struct tcm_area *a)
+{
+ sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
+ if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
+ text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
+ a->p0.x, a->p1.x);
+}
+
+int tiler_map_show(struct seq_file *s, void *arg)
+{
+ int xdiv = 2, ydiv = 1;
+ char **map = NULL, *global_map;
+ struct tiler_block *block;
+ struct tcm_area a, p;
+ int i;
+ const char *m2d = alphabet;
+ const char *a2d = special;
+ const char *m2dp = m2d, *a2dp = a2d;
+ char nice[128];
+ int h_adj = omap_dmm->lut_height / ydiv;
+ int w_adj = omap_dmm->lut_width / xdiv;
+ unsigned long flags;
+
+ map = kzalloc(h_adj * sizeof(*map), GFP_KERNEL);
+ global_map = kzalloc((w_adj + 1) * h_adj, GFP_KERNEL);
+
+ if (!map || !global_map)
+ goto error;
+
+ memset(global_map, ' ', (w_adj + 1) * h_adj);
+ for (i = 0; i < omap_dmm->lut_height; i++) {
+ map[i] = global_map + i * (w_adj + 1);
+ map[i][w_adj] = 0;
+ }
+ spin_lock_irqsave(&omap_dmm->list_lock, flags);
+
+ list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
+ if (block->fmt != TILFMT_PAGE) {
+ fill_map(map, xdiv, ydiv, &block->area, *m2dp, true);
+ if (!*++a2dp)
+ a2dp = a2d;
+ if (!*++m2dp)
+ m2dp = m2d;
+ map_2d_info(map, xdiv, ydiv, nice, &block->area);
+ } else {
+ bool start = read_map_pt(map, xdiv, ydiv,
+ &block->area.p0)
+ == ' ';
+ bool end = read_map_pt(map, xdiv, ydiv, &block->area.p1)
+ == ' ';
+ tcm_for_each_slice(a, block->area, p)
+ fill_map(map, xdiv, ydiv, &a, '=', true);
+ fill_map_pt(map, xdiv, ydiv, &block->area.p0,
+ start ? '<' : 'X');
+ fill_map_pt(map, xdiv, ydiv, &block->area.p1,
+ end ? '>' : 'X');
+ map_1d_info(map, xdiv, ydiv, nice, &block->area);
+ }
+ }
+
+ spin_unlock_irqrestore(&omap_dmm->list_lock, flags);
+
+ if (s) {
+ seq_printf(s, "BEGIN DMM TILER MAP\n");
+ for (i = 0; i < 128; i++)
+ seq_printf(s, "%03d:%s\n", i, map[i]);
+ seq_printf(s, "END TILER MAP\n");
+ } else {
+ dev_dbg(omap_dmm->dev, "BEGIN DMM TILER MAP\n");
+ for (i = 0; i < 128; i++)
+ dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
+ dev_dbg(omap_dmm->dev, "END TILER MAP\n");
+ }
+
+error:
+ kfree(map);
+ kfree(global_map);
+
+ return 0;
+}
+#endif
diff --git a/drivers/staging/omapdrm/omap_dmm_tiler.h b/drivers/staging/omapdrm/omap_dmm_tiler.h
new file mode 100644
index 0000000..f87cb65
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_dmm_tiler.h
@@ -0,0 +1,135 @@
+/*
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Rob Clark <rob@ti.com>
+ * Andy Gross <andy.gross@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef OMAP_DMM_TILER_H
+#define OMAP_DMM_TILER_H
+
+#include "omap_drv.h"
+#include "tcm.h"
+
+enum tiler_fmt {
+ TILFMT_8BIT = 0,
+ TILFMT_16BIT,
+ TILFMT_32BIT,
+ TILFMT_PAGE,
+ TILFMT_NFORMATS
+};
+
+struct pat_area {
+ u32 x0:8;
+ u32 y0:8;
+ u32 x1:8;
+ u32 y1:8;
+};
+
+struct tiler_block {
+ struct list_head alloc_node; /* node for global block list */
+ struct tcm_area area; /* area */
+ enum tiler_fmt fmt; /* format */
+};
+
+/* bits representing the same slot in DMM-TILER hw-block */
+#define SLOT_WIDTH_BITS 6
+#define SLOT_HEIGHT_BITS 6
+
+/* bits reserved to describe coordinates in DMM-TILER hw-block */
+#define CONT_WIDTH_BITS 14
+#define CONT_HEIGHT_BITS 13
+
+/* calculated constants */
+#define TILER_PAGE (1 << (SLOT_WIDTH_BITS + SLOT_HEIGHT_BITS))
+#define TILER_WIDTH (1 << (CONT_WIDTH_BITS - SLOT_WIDTH_BITS))
+#define TILER_HEIGHT (1 << (CONT_HEIGHT_BITS - SLOT_HEIGHT_BITS))
+
+/* tiler space addressing bitfields */
+#define MASK_XY_FLIP (1 << 31)
+#define MASK_Y_INVERT (1 << 30)
+#define MASK_X_INVERT (1 << 29)
+#define SHIFT_ACC_MODE 27
+#define MASK_ACC_MODE 3
+
+#define MASK(bits) ((1 << (bits)) - 1)
+
+#define TILVIEW_8BIT 0x60000000u
+#define TILVIEW_16BIT (TILVIEW_8BIT + VIEW_SIZE)
+#define TILVIEW_32BIT (TILVIEW_16BIT + VIEW_SIZE)
+#define TILVIEW_PAGE (TILVIEW_32BIT + VIEW_SIZE)
+#define TILVIEW_END (TILVIEW_PAGE + VIEW_SIZE)
+
+/* create tsptr by adding view orientation and access mode */
+#define TIL_ADDR(x, orient, a)\
+ ((u32) (x) | (orient) | ((a) << SHIFT_ACC_MODE))
+
+/* externally accessible functions */
+int omap_dmm_init(struct drm_device *dev);
+int omap_dmm_remove(void);
+
+#ifdef CONFIG_DEBUG_FS
+int tiler_map_show(struct seq_file *s, void *arg);
+#endif
+
+/* pin/unpin */
+int tiler_pin(struct tiler_block *block, struct page **pages,
+ uint32_t npages, uint32_t roll, bool wait);
+int tiler_unpin(struct tiler_block *block);
+
+/* reserve/release */
+struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w, uint16_t h,
+ uint16_t align);
+struct tiler_block *tiler_reserve_1d(size_t size);
+int tiler_release(struct tiler_block *block);
+
+/* utilities */
+dma_addr_t tiler_ssptr(struct tiler_block *block);
+uint32_t tiler_stride(enum tiler_fmt fmt);
+size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h);
+void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h);
+
+
+/* GEM bo flags -> tiler fmt */
+static inline enum tiler_fmt gem2fmt(uint32_t flags)
+{
+ switch (flags & OMAP_BO_TILED) {
+ case OMAP_BO_TILED_8:
+ return TILFMT_8BIT;
+ case OMAP_BO_TILED_16:
+ return TILFMT_16BIT;
+ case OMAP_BO_TILED_32:
+ return TILFMT_32BIT;
+ default:
+ return TILFMT_PAGE;
+ }
+}
+
+static inline bool validfmt(enum tiler_fmt fmt)
+{
+ switch (fmt) {
+ case TILFMT_8BIT:
+ case TILFMT_16BIT:
+ case TILFMT_32BIT:
+ case TILFMT_PAGE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct omap_dmm_platform_data {
+ void __iomem *base;
+ int irq;
+};
+
+#endif
diff --git a/drivers/staging/omapdrm/omap_drm.h b/drivers/staging/omapdrm/omap_drm.h
new file mode 100644
index 0000000..f0ac34a
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drm.h
@@ -0,0 +1,123 @@
+/*
+ * include/drm/omap_drm.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRM_H__
+#define __OMAP_DRM_H__
+
+#include <drm/drm.h>
+
+/* Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ */
+
+#define OMAP_PARAM_CHIPSET_ID 1 /* ie. 0x3430, 0x4430, etc */
+
+struct drm_omap_param {
+ uint64_t param; /* in */
+ uint64_t value; /* in (set_param), out (get_param) */
+};
+
+#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
+#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
+#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
+
+/* cache modes */
+#define OMAP_BO_CACHED 0x00000000 /* default */
+#define OMAP_BO_WC 0x00000002 /* write-combine */
+#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
+
+/* tiled modes */
+#define OMAP_BO_TILED_8 0x00000100
+#define OMAP_BO_TILED_16 0x00000200
+#define OMAP_BO_TILED_32 0x00000300
+#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+
+union omap_gem_size {
+ uint32_t bytes; /* (for non-tiled formats) */
+ struct {
+ uint16_t width;
+ uint16_t height;
+ } tiled; /* (for tiled formats) */
+};
+
+struct drm_omap_gem_new {
+ union omap_gem_size size; /* in */
+ uint32_t flags; /* in */
+ uint32_t handle; /* out */
+ uint32_t __pad;
+};
+
+/* mask of operations: */
+enum omap_gem_op {
+ OMAP_GEM_READ = 0x01,
+ OMAP_GEM_WRITE = 0x02,
+};
+
+struct drm_omap_gem_cpu_prep {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+};
+
+struct drm_omap_gem_cpu_fini {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t op; /* mask of omap_gem_op (in) */
+ /* TODO maybe here we pass down info about what regions are touched
+ * by sw so we can be clever about cache ops? For now a placeholder,
+ * set to zero and we just do full buffer flush..
+ */
+ uint32_t nregions;
+ uint32_t __pad;
+};
+
+struct drm_omap_gem_info {
+ uint32_t handle; /* buffer handle (in) */
+ uint32_t pad;
+ uint64_t offset; /* mmap offset (out) */
+ /* note: in case of tiled buffers, the user virtual size can be
+ * different from the physical size (ie. how many pages are needed
+ * to back the object) which is returned in DRM_IOCTL_GEM_OPEN..
+ * This size here is the one that should be used if you want to
+ * mmap() the buffer:
+ */
+ uint32_t size; /* virtual size for mmap'ing (out) */
+ uint32_t __pad;
+};
+
+#define DRM_OMAP_GET_PARAM 0x00
+#define DRM_OMAP_SET_PARAM 0x01
+/* placeholder for plugin-api
+#define DRM_OMAP_GET_BASE 0x02
+*/
+#define DRM_OMAP_GEM_NEW 0x03
+#define DRM_OMAP_GEM_CPU_PREP 0x04
+#define DRM_OMAP_GEM_CPU_FINI 0x05
+#define DRM_OMAP_GEM_INFO 0x06
+#define DRM_OMAP_NUM_IOCTLS 0x07
+
+#define DRM_IOCTL_OMAP_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_PARAM, struct drm_omap_param)
+#define DRM_IOCTL_OMAP_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_SET_PARAM, struct drm_omap_param)
+/* placeholder for plugin-api
+#define DRM_IOCTL_OMAP_GET_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GET_BASE, struct drm_omap_get_base)
+*/
+#define DRM_IOCTL_OMAP_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_NEW, struct drm_omap_gem_new)
+#define DRM_IOCTL_OMAP_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_PREP, struct drm_omap_gem_cpu_prep)
+#define DRM_IOCTL_OMAP_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_OMAP_GEM_CPU_FINI, struct drm_omap_gem_cpu_fini)
+#define DRM_IOCTL_OMAP_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_OMAP_GEM_INFO, struct drm_omap_gem_info)
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
new file mode 100644
index 0000000..3bbea9a
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -0,0 +1,836 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
+
+#define DRIVER_NAME MODULE_NAME
+#define DRIVER_DESC "OMAP DRM"
+#define DRIVER_DATE "20110917"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+#define DRIVER_PATCHLEVEL 0
+
+struct drm_device *drm_device;
+
+static int num_crtc = CONFIG_DRM_OMAP_NUM_CRTCS;
+
+MODULE_PARM_DESC(num_crtc, "Number of overlays to use as CRTCs");
+module_param(num_crtc, int, 0600);
+
+/*
+ * mode config funcs
+ */
+
+/* Notes about mapping DSS and DRM entities:
+ * CRTC: overlay
+ * encoder: manager.. with some extension to allow one primary CRTC
+ * and zero or more video CRTC's to be mapped to one encoder?
+ * connector: dssdev.. manager can be attached/detached from different
+ * devices
+ */
+
+static void omap_fb_output_poll_changed(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ DBG("dev=%p", dev);
+ if (priv->fbdev) {
+ drm_fb_helper_hotplug_event(priv->fbdev);
+ }
+}
+
+static struct drm_mode_config_funcs omap_mode_config_funcs = {
+ .fb_create = omap_framebuffer_create,
+ .output_poll_changed = omap_fb_output_poll_changed,
+};
+
+static int get_connector_type(struct omap_dss_device *dssdev)
+{
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_HDMI:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case OMAP_DISPLAY_TYPE_DPI:
+ if (!strcmp(dssdev->name, "dvi"))
+ return DRM_MODE_CONNECTOR_DVID;
+ /* fallthrough */
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
+#if 0 /* enable when dss2 supports hotplug */
+static int omap_drm_notifier(struct notifier_block *nb,
+ unsigned long evt, void *arg)
+{
+ switch (evt) {
+ case OMAP_DSS_SIZE_CHANGE:
+ case OMAP_DSS_HOTPLUG_CONNECT:
+ case OMAP_DSS_HOTPLUG_DISCONNECT: {
+ struct drm_device *dev = drm_device;
+ DBG("hotplug event: evt=%d, dev=%p", evt, dev);
+ if (dev) {
+ drm_sysfs_hotplug_event(dev);
+ }
+ return NOTIFY_OK;
+ }
+ default: /* don't care about other events for now */
+ return NOTIFY_DONE;
+ }
+}
+#endif
+
+static void dump_video_chains(void)
+{
+ int i;
+
+ DBG("dumping video chains: ");
+ for (i = 0; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+ struct omap_overlay_manager *mgr = ovl->manager;
+ struct omap_dss_device *dssdev = mgr ? mgr->device : NULL;
+ if (dssdev) {
+ DBG("%d: %s -> %s -> %s", i, ovl->name, mgr->name,
+ dssdev->name);
+ } else if (mgr) {
+ DBG("%d: %s -> %s", i, ovl->name, mgr->name);
+ } else {
+ DBG("%d: %s", i, ovl->name);
+ }
+ }
+}
+
+/* create encoders for each manager */
+static int create_encoder(struct drm_device *dev,
+ struct omap_overlay_manager *mgr)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_encoder *encoder = omap_encoder_init(dev, mgr);
+
+ if (!encoder) {
+ dev_err(dev->dev, "could not create encoder: %s\n",
+ mgr->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_encoders >= ARRAY_SIZE(priv->encoders));
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ return 0;
+}
+
+/* create connectors for each display device */
+static int create_connector(struct drm_device *dev,
+ struct omap_dss_device *dssdev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ static struct notifier_block *notifier;
+ struct drm_connector *connector;
+ int j;
+
+ if (!dssdev->driver) {
+ dev_warn(dev->dev, "%s has no driver.. skipping it\n",
+ dssdev->name);
+ return 0;
+ }
+
+ if (!(dssdev->driver->get_timings ||
+ dssdev->driver->read_edid)) {
+ dev_warn(dev->dev, "%s driver does not support "
+ "get_timings or read_edid.. skipping it!\n",
+ dssdev->name);
+ return 0;
+ }
+
+ connector = omap_connector_init(dev,
+ get_connector_type(dssdev), dssdev);
+
+ if (!connector) {
+ dev_err(dev->dev, "could not create connector: %s\n",
+ dssdev->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_connectors >= ARRAY_SIZE(priv->connectors));
+
+ priv->connectors[priv->num_connectors++] = connector;
+
+#if 0 /* enable when dss2 supports hotplug */
+ notifier = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
+ notifier->notifier_call = omap_drm_notifier;
+ omap_dss_add_notify(dssdev, notifier);
+#else
+ notifier = NULL;
+#endif
+
+ for (j = 0; j < priv->num_encoders; j++) {
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(priv->encoders[j]);
+ if (mgr->device == dssdev) {
+ drm_mode_connector_attach_encoder(connector,
+ priv->encoders[j]);
+ }
+ }
+
+ return 0;
+}
+
+/* create up to max_overlays CRTCs mapping to overlays.. by default,
+ * connect the overlays to different managers/encoders, giving priority
+ * to encoders connected to connectors with a detected connection
+ */
+static int create_crtc(struct drm_device *dev, struct omap_overlay *ovl,
+ int *j, unsigned int connected_connectors)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_overlay_manager *mgr = NULL;
+ struct drm_crtc *crtc;
+
+ /* find next best connector, ones with detected connection first
+ */
+ while (*j < priv->num_connectors && !mgr) {
+ if (connected_connectors & (1 << *j)) {
+ struct drm_encoder *encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[*j]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ (*j)++;
+ }
+
+ /* if we couldn't find another connected connector, lets start
+ * looking at the unconnected connectors:
+ *
+ * note: it might not be immediately apparent, but thanks to
+ * the !mgr check in both this loop and the one above, the only
+ * way to enter this loop is with *j == priv->num_connectors,
+ * so idx can never go negative.
+ */
+ while (*j < 2 * priv->num_connectors && !mgr) {
+ int idx = *j - priv->num_connectors;
+ if (!(connected_connectors & (1 << idx))) {
+ struct drm_encoder *encoder =
+ omap_connector_attached_encoder(
+ priv->connectors[idx]);
+ if (encoder) {
+ mgr = omap_encoder_get_manager(encoder);
+ }
+ }
+ (*j)++;
+ }
+
+ crtc = omap_crtc_init(dev, ovl, priv->num_crtcs);
+
+ if (!crtc) {
+ dev_err(dev->dev, "could not create CRTC: %s\n",
+ ovl->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_crtcs >= ARRAY_SIZE(priv->crtcs));
+
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ return 0;
+}
+
+static int create_plane(struct drm_device *dev, struct omap_overlay *ovl,
+ unsigned int possible_crtcs)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_plane *plane =
+ omap_plane_init(dev, ovl, possible_crtcs, false);
+
+ if (!plane) {
+ dev_err(dev->dev, "could not create plane: %s\n",
+ ovl->name);
+ return -ENOMEM;
+ }
+
+ BUG_ON(priv->num_planes >= ARRAY_SIZE(priv->planes));
+
+ priv->planes[priv->num_planes++] = plane;
+
+ return 0;
+}
+
+static int match_dev_name(struct omap_dss_device *dssdev, void *data)
+{
+ return !strcmp(dssdev->name, data);
+}
+
+static unsigned int detect_connectors(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ unsigned int connected_connectors = 0;
+ int i;
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (omap_connector_detect(connector, true) ==
+ connector_status_connected) {
+ connected_connectors |= (1 << i);
+ }
+ }
+
+ return connected_connectors;
+}
+
+static int omap_modeset_init(struct drm_device *dev)
+{
+ const struct omap_drm_platform_data *pdata = dev->dev->platform_data;
+ struct omap_kms_platform_data *kms_pdata = NULL;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_dss_device *dssdev = NULL;
+ int i, j;
+ unsigned int connected_connectors = 0;
+
+ drm_mode_config_init(dev);
+
+ if (pdata && pdata->kms_pdata) {
+ kms_pdata = pdata->kms_pdata;
+
+ /* if platform data is provided by the board file, use it to
+ * control which overlays, managers, and devices we own.
+ */
+ for (i = 0; i < kms_pdata->mgr_cnt; i++) {
+ struct omap_overlay_manager *mgr =
+ omap_dss_get_overlay_manager(
+ kms_pdata->mgr_ids[i]);
+ create_encoder(dev, mgr);
+ }
+
+ for (i = 0; i < kms_pdata->dev_cnt; i++) {
+ struct omap_dss_device *dssdev =
+ omap_dss_find_device(
+ (void *)kms_pdata->dev_names[i],
+ match_dev_name);
+ if (!dssdev) {
+ dev_warn(dev->dev, "no such dssdev: %s\n",
+ kms_pdata->dev_names[i]);
+ continue;
+ }
+ create_connector(dev, dssdev);
+ }
+
+ connected_connectors = detect_connectors(dev);
+
+ j = 0;
+ for (i = 0; i < kms_pdata->ovl_cnt; i++) {
+ struct omap_overlay *ovl =
+ omap_dss_get_overlay(kms_pdata->ovl_ids[i]);
+ create_crtc(dev, ovl, &j, connected_connectors);
+ }
+
+ for (i = 0; i < kms_pdata->pln_cnt; i++) {
+ struct omap_overlay *ovl =
+ omap_dss_get_overlay(kms_pdata->pln_ids[i]);
+ create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
+ }
+ } else {
+ /* otherwise just grab up to CONFIG_DRM_OMAP_NUM_CRTCS and try
+ * to make educated guesses about everything else
+ */
+ int max_overlays = min(omap_dss_get_num_overlays(), num_crtc);
+
+ for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
+ create_encoder(dev, omap_dss_get_overlay_manager(i));
+ }
+
+ for_each_dss_dev(dssdev) {
+ create_connector(dev, dssdev);
+ }
+
+ connected_connectors = detect_connectors(dev);
+
+ j = 0;
+ for (i = 0; i < max_overlays; i++) {
+ create_crtc(dev, omap_dss_get_overlay(i),
+ &j, connected_connectors);
+ }
+
+ /* use any remaining overlays as drm planes */
+ for (; i < omap_dss_get_num_overlays(); i++) {
+ struct omap_overlay *ovl = omap_dss_get_overlay(i);
+ create_plane(dev, ovl, (1 << priv->num_crtcs) - 1);
+ }
+ }
+
+ /* for now keep the mapping of CRTCs and encoders static.. */
+ for (i = 0; i < priv->num_encoders; i++) {
+ struct drm_encoder *encoder = priv->encoders[i];
+ struct omap_overlay_manager *mgr =
+ omap_encoder_get_manager(encoder);
+
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ DBG("%s: possible_crtcs=%08x", mgr->name,
+ encoder->possible_crtcs);
+ }
+
+ dump_video_chains();
+
+ dev->mode_config.min_width = 32;
+ dev->mode_config.min_height = 32;
+
+ /* note: eventually will need some cpu_is_omapXYZ() type stuff here
+ * to fill in these limits properly on different OMAP generations..
+ */
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+
+ dev->mode_config.funcs = &omap_mode_config_funcs;
+
+ return 0;
+}
+
+static void omap_modeset_free(struct drm_device *dev)
+{
+ drm_mode_config_cleanup(dev);
+}
+
+/*
+ * drm ioctl funcs
+ */
+
+
+static int ioctl_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_param *args = data;
+
+ DBG("%p: param=%llu", dev, args->param);
+
+ switch (args->param) {
+ case OMAP_PARAM_CHIPSET_ID:
+ args->value = GET_OMAP_TYPE;
+ break;
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_set_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_param *args = data;
+
+ switch (args->param) {
+ default:
+ DBG("unknown parameter %lld", args->param);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ioctl_gem_new(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_new *args = data;
+ DBG("%p:%p: size=0x%08x, flags=%08x", dev, file_priv,
+ args->size.bytes, args->flags);
+ return omap_gem_new_handle(dev, file_priv, args->size,
+ args->flags, &args->handle);
+}
+
+static int ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_prep *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d, op=%x", dev, file_priv, args->handle, args->op);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ ret = omap_gem_op_sync(obj, args->op);
+
+ if (!ret) {
+ ret = omap_gem_op_start(obj, args->op);
+ }
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_cpu_fini *args = data;
+ struct drm_gem_object *obj;
+ int ret;
+
+ VERB("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ /* XXX flushy, flushy */
+ ret = 0;
+
+ if (!ret) {
+ ret = omap_gem_op_finish(obj, args->op);
+ }
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+static int ioctl_gem_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_omap_gem_info *args = data;
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ DBG("%p:%p: handle=%d", dev, file_priv, args->handle);
+
+ obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ if (!obj) {
+ return -ENOENT;
+ }
+
+ args->size = omap_gem_mmap_size(obj);
+ args->offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+struct drm_ioctl_desc ioctls[DRM_COMMAND_END - DRM_COMMAND_BASE] = {
+ DRM_IOCTL_DEF_DRV(OMAP_GET_PARAM, ioctl_get_param, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_SET_PARAM, ioctl_set_param, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_NEW, ioctl_gem_new, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_PREP, ioctl_gem_cpu_prep, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_CPU_FINI, ioctl_gem_cpu_fini, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(OMAP_GEM_INFO, ioctl_gem_info, DRM_UNLOCKED|DRM_AUTH),
+};
+
+/*
+ * drm driver funcs
+ */
+
+/**
+ * load - setup chip and create an initial config
+ * @dev: DRM device
+ * @flags: startup flags
+ *
+ * The driver load routine has to do several things:
+ * - initialize the memory manager
+ * - allocate initial config memory
+ * - setup the DRM framebuffer with the allocated memory
+ */
+static int dev_load(struct drm_device *dev, unsigned long flags)
+{
+ struct omap_drm_private *priv;
+ int ret;
+
+ DBG("load: dev=%p", dev);
+
+ drm_device = dev;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(dev->dev, "could not allocate priv\n");
+ return -ENOMEM;
+ }
+
+ dev->dev_private = priv;
+
+ omap_gem_init(dev);
+
+ ret = omap_modeset_init(dev);
+ if (ret) {
+ dev_err(dev->dev, "omap_modeset_init failed: ret=%d\n", ret);
+ dev->dev_private = NULL;
+ kfree(priv);
+ return ret;
+ }
+
+ priv->fbdev = omap_fbdev_init(dev);
+ if (!priv->fbdev) {
+ dev_warn(dev->dev, "omap_fbdev_init failed\n");
+ /* well, limp along without an fbdev.. maybe X11 will work? */
+ }
+
+ drm_kms_helper_poll_init(dev);
+
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret) {
+ dev_warn(dev->dev, "could not init vblank\n");
+ }
+
+ return 0;
+}
+
+static int dev_unload(struct drm_device *dev)
+{
+ DBG("unload: dev=%p", dev);
+
+ drm_vblank_cleanup(dev);
+ drm_kms_helper_poll_fini(dev);
+
+ omap_fbdev_free(dev);
+ omap_modeset_free(dev);
+ omap_gem_deinit(dev);
+
+ kfree(dev->dev_private);
+ dev->dev_private = NULL;
+
+ return 0;
+}
+
+static int dev_open(struct drm_device *dev, struct drm_file *file)
+{
+ file->driver_priv = NULL;
+
+ DBG("open: dev=%p, file=%p", dev, file);
+
+ return 0;
+}
+
+static int dev_firstopen(struct drm_device *dev)
+{
+ DBG("firstopen: dev=%p", dev);
+ return 0;
+}
+
+/**
+ * lastclose - clean up after all DRM clients have exited
+ * @dev: DRM device
+ *
+ * Take care of cleaning up after all DRM clients have exited. In the
+ * mode setting case, we want to restore the kernel's initial mode (just
+ * in case the last client left us in a bad state).
+ */
+static void dev_lastclose(struct drm_device *dev)
+{
+ /* we don't support vga-switcheroo.. so just make sure the fbdev
+ * mode is active
+ */
+ struct omap_drm_private *priv = dev->dev_private;
+ int ret;
+
+ DBG("lastclose: dev=%p", dev);
+
+ ret = drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ if (ret)
+ DBG("failed to restore crtc mode");
+}
+
+static void dev_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("preclose: dev=%p", dev);
+}
+
+static void dev_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ DBG("postclose: dev=%p, file=%p", dev, file);
+}
+
+/**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+static int dev_enable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("enable_vblank: dev=%p, crtc=%d", dev, crtc);
+ return 0;
+}
+
+/**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+static void dev_disable_vblank(struct drm_device *dev, int crtc)
+{
+ DBG("disable_vblank: dev=%p, crtc=%d", dev, crtc);
+}
+
+static irqreturn_t dev_irq_handler(DRM_IRQ_ARGS)
+{
+ return IRQ_HANDLED;
+}
+
+static void dev_irq_preinstall(struct drm_device *dev)
+{
+ DBG("irq_preinstall: dev=%p", dev);
+}
+
+static int dev_irq_postinstall(struct drm_device *dev)
+{
+ DBG("irq_postinstall: dev=%p", dev);
+ return 0;
+}
+
+static void dev_irq_uninstall(struct drm_device *dev)
+{
+ DBG("irq_uninstall: dev=%p", dev);
+}
+
+static struct vm_operations_struct omap_gem_vm_ops = {
+ .fault = omap_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations omapdriver_fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .unlocked_ioctl = drm_ioctl,
+ .release = drm_release,
+ .mmap = omap_gem_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ .read = drm_read,
+ .llseek = noop_llseek,
+};
+
+static struct drm_driver omap_drm_driver = {
+ .driver_features =
+ DRIVER_HAVE_IRQ | DRIVER_MODESET | DRIVER_GEM,
+ .load = dev_load,
+ .unload = dev_unload,
+ .open = dev_open,
+ .firstopen = dev_firstopen,
+ .lastclose = dev_lastclose,
+ .preclose = dev_preclose,
+ .postclose = dev_postclose,
+ .get_vblank_counter = drm_vblank_count,
+ .enable_vblank = dev_enable_vblank,
+ .disable_vblank = dev_disable_vblank,
+ .irq_preinstall = dev_irq_preinstall,
+ .irq_postinstall = dev_irq_postinstall,
+ .irq_uninstall = dev_irq_uninstall,
+ .irq_handler = dev_irq_handler,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = omap_debugfs_init,
+ .debugfs_cleanup = omap_debugfs_cleanup,
+#endif
+ .gem_init_object = omap_gem_init_object,
+ .gem_free_object = omap_gem_free_object,
+ .gem_vm_ops = &omap_gem_vm_ops,
+ .dumb_create = omap_gem_dumb_create,
+ .dumb_map_offset = omap_gem_dumb_map_offset,
+ .dumb_destroy = omap_gem_dumb_destroy,
+ .ioctls = ioctls,
+ .num_ioctls = DRM_OMAP_NUM_IOCTLS,
+ .fops = &omapdriver_fops,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+};
+
+static int pdev_suspend(struct platform_device *pDevice, pm_message_t state)
+{
+ DBG("");
+ return 0;
+}
+
+static int pdev_resume(struct platform_device *device)
+{
+ DBG("");
+ return 0;
+}
+
+static void pdev_shutdown(struct platform_device *device)
+{
+ DBG("");
+}
+
+static int pdev_probe(struct platform_device *device)
+{
+ DBG("%s", device->name);
+ return drm_platform_init(&omap_drm_driver, device);
+}
+
+static int pdev_remove(struct platform_device *device)
+{
+ DBG("");
+ drm_platform_exit(&omap_drm_driver, device);
+ return 0;
+}
+
+struct platform_driver pdev = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = pdev_probe,
+ .remove = pdev_remove,
+ .suspend = pdev_suspend,
+ .resume = pdev_resume,
+ .shutdown = pdev_shutdown,
+};
+
+static int __init omap_drm_init(void)
+{
+ DBG("init");
+ return platform_driver_register(&pdev);
+}
+
+static void __exit omap_drm_fini(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&pdev);
+}
+
+/* need late_initcall() so we load after dss_driver's are loaded */
+late_initcall(omap_drm_init);
+module_exit(omap_drm_fini);
+
+MODULE_AUTHOR("Rob Clark <rob@ti.com>");
+MODULE_DESCRIPTION("OMAP DRM Display Driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/staging/omapdrm/omap_drv.h b/drivers/staging/omapdrm/omap_drv.h
new file mode 100644
index 0000000..61fe022
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_drv.h
@@ -0,0 +1,175 @@
+/*
+ * drivers/staging/omapdrm/omap_drv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_DRV_H__
+#define __OMAP_DRV_H__
+
+#include <video/omapdss.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include "omap_drm.h"
+#include "omap_priv.h"
+
+#define DBG(fmt, ...) DRM_DEBUG(fmt"\n", ##__VA_ARGS__)
+#define VERB(fmt, ...) if (0) DRM_DEBUG(fmt, ##__VA_ARGS__) /* verbose debug */
+
+#define MODULE_NAME "omapdrm"
+
+/* max # of mapper-id's that can be assigned.. todo, come up with a better
+ * (but still inexpensive) way to store/access per-buffer mapper private
+ * data..
+ */
+#define MAX_MAPPERS 2
+
+struct omap_drm_private {
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[8];
+ unsigned int num_planes;
+ struct drm_plane *planes[8];
+ unsigned int num_encoders;
+ struct drm_encoder *encoders[8];
+ unsigned int num_connectors;
+ struct drm_connector *connectors[8];
+
+ struct drm_fb_helper *fbdev;
+
+ bool has_dmm;
+};
+
+#ifdef CONFIG_DEBUG_FS
+int omap_debugfs_init(struct drm_minor *minor);
+void omap_debugfs_cleanup(struct drm_minor *minor);
+#endif
+
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev);
+void omap_fbdev_free(struct drm_device *dev);
+
+struct drm_crtc *omap_crtc_init(struct drm_device *dev,
+ struct omap_overlay *ovl, int id);
+
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+ struct omap_overlay *ovl, unsigned int possible_crtcs,
+ bool priv);
+int omap_plane_dpms(struct drm_plane *plane, int mode);
+int omap_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h);
+
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr);
+struct omap_overlay_manager *omap_encoder_get_manager(
+ struct drm_encoder *encoder);
+struct drm_encoder *omap_connector_attached_encoder(
+ struct drm_connector *connector);
+enum drm_connector_status omap_connector_detect(
+ struct drm_connector *connector, bool force);
+
+struct drm_connector *omap_connector_init(struct drm_device *dev,
+ int connector_type, struct omap_dss_device *dssdev);
+void omap_connector_mode_set(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+void omap_connector_flush(struct drm_connector *connector,
+ int x, int y, int w, int h);
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos);
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p);
+int omap_framebuffer_pin(struct drm_framebuffer *fb);
+void omap_framebuffer_unpin(struct drm_framebuffer *fb);
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
+ struct omap_overlay_info *info);
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from);
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h);
+
+void omap_gem_init(struct drm_device *dev);
+void omap_gem_deinit(struct drm_device *dev);
+
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags);
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle);
+void omap_gem_free_object(struct drm_gem_object *obj);
+int omap_gem_init_object(struct drm_gem_object *obj);
+void *omap_gem_vaddr(struct drm_gem_object *obj);
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle);
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args);
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op);
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg);
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll);
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap);
+int omap_gem_put_paddr(struct drm_gem_object *obj);
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj);
+size_t omap_gem_mmap_size(struct drm_gem_object *obj);
+
+static inline int align_pitch(int pitch, int width, int bpp)
+{
+ int bytespp = (bpp + 7) / 8;
+ /* in case someone tries to feed us a completely bogus stride: */
+ pitch = max(pitch, width * bytespp);
+ /* PVR needs alignment to 8 pixels.. right now that is the most
+ * restrictive stride requirement..
+ */
+ return ALIGN(pitch, 8 * bytespp);
+}
+
+/* should these be made into common util helpers?
+ */
+
+static inline int objects_lookup(struct drm_device *dev,
+ struct drm_file *filp, uint32_t pixel_format,
+ struct drm_gem_object **bos, uint32_t *handles)
+{
+ int i, n = drm_format_num_planes(pixel_format);
+
+ for (i = 0; i < n; i++) {
+ bos[i] = drm_gem_object_lookup(dev, filp, handles[i]);
+ if (!bos[i]) {
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ while (--i > 0) {
+ drm_gem_object_unreference_unlocked(bos[i]);
+ }
+ return -ENOENT;
+}
+
+#endif /* __OMAP_DRV_H__ */
diff --git a/drivers/staging/omapdrm/omap_encoder.c b/drivers/staging/omapdrm/omap_encoder.c
new file mode 100644
index 0000000..06c52cb
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_encoder.c
@@ -0,0 +1,171 @@
+/*
+ * drivers/staging/omapdrm/omap_encoder.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * encoder funcs
+ */
+
+#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
+
+struct omap_encoder {
+ struct drm_encoder base;
+ struct omap_overlay_manager *mgr;
+};
+
+static void omap_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ drm_encoder_cleanup(encoder);
+ kfree(omap_encoder);
+}
+
+static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s: %d", omap_encoder->mgr->name, mode);
+}
+
+static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ DBG("%s", omap_encoder->mgr->name);
+ return true;
+}
+
+static void omap_encoder_mode_set(struct drm_encoder *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ int i;
+
+ mode = adjusted_mode;
+
+ DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
+ mode->hdisplay, mode->vdisplay);
+
+ for (i = 0; i < priv->num_connectors; i++) {
+ struct drm_connector *connector = priv->connectors[i];
+ if (connector->encoder == encoder) {
+ omap_connector_mode_set(connector, mode);
+ }
+ }
+}
+
+static void omap_encoder_prepare(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_encoder_commit(struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ struct drm_encoder_helper_funcs *encoder_funcs =
+ encoder->helper_private;
+ DBG("%s", omap_encoder->mgr->name);
+ omap_encoder->mgr->apply(omap_encoder->mgr);
+ encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
+}
+
+static const struct drm_encoder_funcs omap_encoder_funcs = {
+ .destroy = omap_encoder_destroy,
+};
+
+static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
+ .dpms = omap_encoder_dpms,
+ .mode_fixup = omap_encoder_mode_fixup,
+ .mode_set = omap_encoder_mode_set,
+ .prepare = omap_encoder_prepare,
+ .commit = omap_encoder_commit,
+};
+
+struct omap_overlay_manager *omap_encoder_get_manager(
+ struct drm_encoder *encoder)
+{
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ return omap_encoder->mgr;
+}
+
+/* initialize encoder */
+struct drm_encoder *omap_encoder_init(struct drm_device *dev,
+ struct omap_overlay_manager *mgr)
+{
+ struct drm_encoder *encoder = NULL;
+ struct omap_encoder *omap_encoder;
+ struct omap_overlay_manager_info info;
+ int ret;
+
+ DBG("%s", mgr->name);
+
+ omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
+ if (!omap_encoder) {
+ dev_err(dev->dev, "could not allocate encoder\n");
+ goto fail;
+ }
+
+ omap_encoder->mgr = mgr;
+ encoder = &omap_encoder->base;
+
+ drm_encoder_init(dev, encoder, &omap_encoder_funcs,
+ DRM_MODE_ENCODER_TMDS);
+ drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
+
+ mgr->get_manager_info(mgr, &info);
+
+ /* TODO: fix hard-coded setup.. */
+ info.default_color = 0x00000000;
+ info.trans_key = 0x00000000;
+ info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
+ info.trans_enabled = false;
+
+ ret = mgr->set_manager_info(mgr, &info);
+ if (ret) {
+ dev_err(dev->dev, "could not set manager info\n");
+ goto fail;
+ }
+
+ ret = mgr->apply(mgr);
+ if (ret) {
+ dev_err(dev->dev, "could not apply\n");
+ goto fail;
+ }
+
+ return encoder;
+
+fail:
+ if (encoder) {
+ omap_encoder_destroy(encoder);
+ }
+
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_fb.c b/drivers/staging/omapdrm/omap_fb.c
new file mode 100644
index 0000000..d021a7e
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_fb.c
@@ -0,0 +1,353 @@
+/*
+ * drivers/staging/omapdrm/omap_fb.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+/*
+ * framebuffer funcs
+ */
+
+/* per-format info: */
+struct format {
+ enum omap_color_mode dss_format;
+ uint32_t pixel_format;
+ struct {
+ int stride_bpp; /* this times width is stride */
+ int sub_y; /* sub-sample in y dimension */
+ } planes[4];
+ bool yuv;
+};
+
+static const struct format formats[] = {
+ /* 16bpp [A]RGB: */
+ { OMAP_DSS_COLOR_RGB16, DRM_FORMAT_RGB565, {{2, 1}}, false }, /* RGB16-565 */
+ { OMAP_DSS_COLOR_RGB12U, DRM_FORMAT_RGBX4444, {{2, 1}}, false }, /* RGB12x-4444 */
+ { OMAP_DSS_COLOR_RGBX16, DRM_FORMAT_XRGB4444, {{2, 1}}, false }, /* xRGB12-4444 */
+ { OMAP_DSS_COLOR_RGBA16, DRM_FORMAT_RGBA4444, {{2, 1}}, false }, /* RGBA12-4444 */
+ { OMAP_DSS_COLOR_ARGB16, DRM_FORMAT_ARGB4444, {{2, 1}}, false }, /* ARGB16-4444 */
+ { OMAP_DSS_COLOR_XRGB16_1555, DRM_FORMAT_XRGB1555, {{2, 1}}, false }, /* xRGB15-1555 */
+ { OMAP_DSS_COLOR_ARGB16_1555, DRM_FORMAT_ARGB1555, {{2, 1}}, false }, /* ARGB16-1555 */
+ /* 24bpp RGB: */
+ { OMAP_DSS_COLOR_RGB24P, DRM_FORMAT_RGB888, {{3, 1}}, false }, /* RGB24-888 */
+ /* 32bpp [A]RGB: */
+ { OMAP_DSS_COLOR_RGBX32, DRM_FORMAT_RGBX8888, {{4, 1}}, false }, /* RGBx24-8888 */
+ { OMAP_DSS_COLOR_RGB24U, DRM_FORMAT_XRGB8888, {{4, 1}}, false }, /* xRGB24-8888 */
+ { OMAP_DSS_COLOR_RGBA32, DRM_FORMAT_RGBA8888, {{4, 1}}, false }, /* RGBA32-8888 */
+ { OMAP_DSS_COLOR_ARGB32, DRM_FORMAT_ARGB8888, {{4, 1}}, false }, /* ARGB32-8888 */
+ /* YUV: */
+ { OMAP_DSS_COLOR_NV12, DRM_FORMAT_NV12, {{1, 1}, {1, 2}}, true },
+ { OMAP_DSS_COLOR_YUV2, DRM_FORMAT_YUYV, {{2, 1}}, true },
+ { OMAP_DSS_COLOR_UYVY, DRM_FORMAT_UYVY, {{2, 1}}, true },
+};
+
+/* per-plane info for the fb: */
+struct plane {
+ struct drm_gem_object *bo;
+ uint32_t pitch;
+ uint32_t offset;
+ dma_addr_t paddr;
+};
+
+#define to_omap_framebuffer(x) container_of(x, struct omap_framebuffer, base)
+
+struct omap_framebuffer {
+ struct drm_framebuffer base;
+ const struct format *format;
+ struct plane planes[4];
+};
+
+static int omap_framebuffer_create_handle(struct drm_framebuffer *fb,
+ struct drm_file *file_priv,
+ unsigned int *handle)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ return drm_gem_handle_create(file_priv,
+ omap_fb->planes[0].bo, handle);
+}
+
+static void omap_framebuffer_destroy(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
+
+ DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+
+ drm_framebuffer_cleanup(fb);
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ if (plane->bo)
+ drm_gem_object_unreference_unlocked(plane->bo);
+ }
+
+ kfree(omap_fb);
+}
+
+static int omap_framebuffer_dirty(struct drm_framebuffer *fb,
+ struct drm_file *file_priv, unsigned flags, unsigned color,
+ struct drm_clip_rect *clips, unsigned num_clips)
+{
+ int i;
+
+ for (i = 0; i < num_clips; i++) {
+ omap_framebuffer_flush(fb, clips[i].x1, clips[i].y1,
+ clips[i].x2 - clips[i].x1,
+ clips[i].y2 - clips[i].y1);
+ }
+
+ return 0;
+}
+
+static const struct drm_framebuffer_funcs omap_framebuffer_funcs = {
+ .create_handle = omap_framebuffer_create_handle,
+ .destroy = omap_framebuffer_destroy,
+ .dirty = omap_framebuffer_dirty,
+};
+
+/* pins buffer in preparation for scanout */
+int omap_framebuffer_pin(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int ret, i, n = drm_format_num_planes(omap_fb->format->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ ret = omap_gem_get_paddr(plane->bo, &plane->paddr, true);
+ if (ret)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ while (--i > 0) {
+ struct plane *plane = &omap_fb->planes[i];
+ omap_gem_put_paddr(plane->bo);
+ }
+ return ret;
+}
+
+/* releases buffer when done with scanout */
+void omap_framebuffer_unpin(struct drm_framebuffer *fb)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ int i, n = drm_format_num_planes(omap_fb->format->pixel_format);
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ omap_gem_put_paddr(plane->bo);
+ }
+}
+
+/* update ovl info for scanout, handles cases of multi-planar fb's, etc.
+ */
+void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, int x, int y,
+ struct omap_overlay_info *info)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ const struct format *format = omap_fb->format;
+ struct plane *plane = &omap_fb->planes[0];
+ unsigned int offset;
+
+ offset = plane->offset +
+ (x * format->planes[0].stride_bpp) +
+ (y * plane->pitch / format->planes[0].sub_y);
+
+ info->color_mode = format->dss_format;
+ info->paddr = plane->paddr + offset;
+ info->screen_width = plane->pitch / format->planes[0].stride_bpp;
+
+ if (format->dss_format == OMAP_DSS_COLOR_NV12) {
+ plane = &omap_fb->planes[1];
+ offset = plane->offset +
+ (x * format->planes[1].stride_bpp) +
+ (y * plane->pitch / format->planes[1].sub_y);
+ info->p_uv_addr = plane->paddr + offset;
+ } else {
+ info->p_uv_addr = 0;
+ }
+}
+
+struct drm_gem_object *omap_framebuffer_bo(struct drm_framebuffer *fb, int p)
+{
+ struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb);
+ if (p >= drm_format_num_planes(omap_fb->format->pixel_format))
+ return NULL;
+ return omap_fb->planes[p].bo;
+}
+
+/* iterate thru all the connectors, returning ones that are attached
+ * to the same fb..
+ */
+struct drm_connector *omap_framebuffer_get_next_connector(
+ struct drm_framebuffer *fb, struct drm_connector *from)
+{
+ struct drm_device *dev = fb->dev;
+ struct list_head *connector_list = &dev->mode_config.connector_list;
+ struct drm_connector *connector = from;
+
+ if (!from) {
+ return list_first_entry(connector_list, typeof(*from), head);
+ }
+
+ list_for_each_entry_from(connector, connector_list, head) {
+ if (connector != from) {
+ struct drm_encoder *encoder = connector->encoder;
+ struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
+ if (crtc && crtc->fb == fb) {
+ return connector;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+void omap_framebuffer_flush(struct drm_framebuffer *fb,
+ int x, int y, int w, int h)
+{
+ struct drm_connector *connector = NULL;
+
+ VERB("flush: %d,%d %dx%d, fb=%p", x, y, w, h, fb);
+
+ while ((connector = omap_framebuffer_get_next_connector(fb, connector))) {
+ /* only consider connectors that are part of a chain */
+ if (connector->encoder && connector->encoder->crtc) {
+ /* TODO: maybe this should propagate thru the crtc who
+ * could do the coordinate translation..
+ */
+ struct drm_crtc *crtc = connector->encoder->crtc;
+ int cx = max(0, x - crtc->x);
+ int cy = max(0, y - crtc->y);
+ int cw = w + (x - crtc->x) - cx;
+ int ch = h + (y - crtc->y) - cy;
+
+ omap_connector_flush(connector, cx, cy, cw, ch);
+ }
+ }
+}
+
+struct drm_framebuffer *omap_framebuffer_create(struct drm_device *dev,
+ struct drm_file *file, struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_gem_object *bos[4];
+ struct drm_framebuffer *fb;
+ int ret;
+
+ ret = objects_lookup(dev, file, mode_cmd->pixel_format,
+ bos, mode_cmd->handles);
+ if (ret)
+ return ERR_PTR(ret);
+
+ fb = omap_framebuffer_init(dev, mode_cmd, bos);
+ if (IS_ERR(fb)) {
+ int i, n = drm_format_num_planes(mode_cmd->pixel_format);
+ for (i = 0; i < n; i++)
+ drm_gem_object_unreference_unlocked(bos[i]);
+ return fb;
+ }
+ return fb;
+}
+
+struct drm_framebuffer *omap_framebuffer_init(struct drm_device *dev,
+ struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **bos)
+{
+ struct omap_framebuffer *omap_fb;
+ struct drm_framebuffer *fb = NULL;
+ const struct format *format = NULL;
+ int ret, i, n = drm_format_num_planes(mode_cmd->pixel_format);
+
+ DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ dev, mode_cmd, mode_cmd->width, mode_cmd->height,
+ (char *)&mode_cmd->pixel_format);
+
+ for (i = 0; i < ARRAY_SIZE(formats); i++) {
+ if (formats[i].pixel_format == mode_cmd->pixel_format) {
+ format = &formats[i];
+ break;
+ }
+ }
+
+ if (!format) {
+ dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
+ (char *)&mode_cmd->pixel_format);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ omap_fb = kzalloc(sizeof(*omap_fb), GFP_KERNEL);
+ if (!omap_fb) {
+ dev_err(dev->dev, "could not allocate fb\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = &omap_fb->base;
+ ret = drm_framebuffer_init(dev, fb, &omap_framebuffer_funcs);
+ if (ret) {
+ dev_err(dev->dev, "framebuffer init failed: %d\n", ret);
+ goto fail;
+ }
+
+ DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+
+ omap_fb->format = format;
+
+ for (i = 0; i < n; i++) {
+ struct plane *plane = &omap_fb->planes[i];
+ int size, pitch = mode_cmd->pitches[i];
+
+ if (pitch < (mode_cmd->width * format->planes[i].stride_bpp)) {
+ dev_err(dev->dev, "provided buffer pitch is too small! %d < %d\n",
+ pitch, mode_cmd->width * format->planes[i].stride_bpp);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ size = pitch * mode_cmd->height / format->planes[i].sub_y;
+
+ if (size > (bos[i]->size - mode_cmd->offsets[i])) {
+ dev_err(dev->dev, "provided buffer object is too small! %d < %d\n",
+ bos[i]->size - mode_cmd->offsets[i], size);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ plane->bo = bos[i];
+ plane->offset = mode_cmd->offsets[i];
+ plane->pitch = mode_cmd->pitches[i];
+ plane->paddr = pitch;
+ }
+
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+ return fb;
+
+fail:
+ if (fb) {
+ omap_framebuffer_destroy(fb);
+ }
+ return ERR_PTR(ret);
+}
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
new file mode 100644
index 0000000..96940bb
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -0,0 +1,387 @@
+/*
+ * drivers/staging/omapdrm/omap_fbdev.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+
+MODULE_PARM_DESC(ywrap, "Enable ywrap scrolling (omap44xx and later, default 'y')");
+static bool ywrap_enabled = true;
+module_param_named(ywrap, ywrap_enabled, bool, 0644);
+
+/*
+ * fbdev funcs, to implement legacy fbdev interface on top of drm driver
+ */
+
+#define to_omap_fbdev(x) container_of(x, struct omap_fbdev, base)
+
+struct omap_fbdev {
+ struct drm_fb_helper base;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *bo;
+ bool ywrap_enabled;
+};
+
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h);
+static struct drm_fb_helper *get_fb(struct fb_info *fbi);
+
+static ssize_t omap_fbdev_write(struct fb_info *fbi, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ ssize_t res;
+
+ res = fb_sys_write(fbi, buf, count, ppos);
+ omap_fbdev_flush(fbi, 0, 0, fbi->var.xres, fbi->var.yres);
+
+ return res;
+}
+
+static void omap_fbdev_fillrect(struct fb_info *fbi,
+ const struct fb_fillrect *rect)
+{
+ sys_fillrect(fbi, rect);
+ omap_fbdev_flush(fbi, rect->dx, rect->dy, rect->width, rect->height);
+}
+
+static void omap_fbdev_copyarea(struct fb_info *fbi,
+ const struct fb_copyarea *area)
+{
+ sys_copyarea(fbi, area);
+ omap_fbdev_flush(fbi, area->dx, area->dy, area->width, area->height);
+}
+
+static void omap_fbdev_imageblit(struct fb_info *fbi,
+ const struct fb_image *image)
+{
+ sys_imageblit(fbi, image);
+ omap_fbdev_flush(fbi, image->dx, image->dy,
+ image->width, image->height);
+}
+
+static int omap_fbdev_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *fbi)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ int npages;
+
+ if (!helper)
+ goto fallback;
+
+ if (!fbdev->ywrap_enabled)
+ goto fallback;
+
+ /* DMM roll shifts in 4K pages: */
+ npages = fbi->fix.line_length >> PAGE_SHIFT;
+ omap_gem_roll(fbdev->bo, var->yoffset * npages);
+
+ return 0;
+
+fallback:
+ return drm_fb_helper_pan_display(var, fbi);
+}
+
+static struct fb_ops omap_fb_ops = {
+ .owner = THIS_MODULE,
+
+ /* Note: to properly handle manual update displays, we wrap the
+ * basic fbdev ops which write to the framebuffer
+ */
+ .fb_read = fb_sys_read,
+ .fb_write = omap_fbdev_write,
+ .fb_fillrect = omap_fbdev_fillrect,
+ .fb_copyarea = omap_fbdev_copyarea,
+ .fb_imageblit = omap_fbdev_imageblit,
+
+ .fb_check_var = drm_fb_helper_check_var,
+ .fb_set_par = drm_fb_helper_set_par,
+ .fb_pan_display = omap_fbdev_pan_display,
+ .fb_blank = drm_fb_helper_blank,
+ .fb_setcmap = drm_fb_helper_setcmap,
+
+ .fb_debug_enter = drm_fb_helper_debug_enter,
+ .fb_debug_leave = drm_fb_helper_debug_leave,
+};
+
+static int omap_fbdev_create(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ struct omap_fbdev *fbdev = to_omap_fbdev(helper);
+ struct drm_device *dev = helper->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_framebuffer *fb = NULL;
+ union omap_gem_size gsize;
+ struct fb_info *fbi = NULL;
+ struct drm_mode_fb_cmd2 mode_cmd = {0};
+ dma_addr_t paddr;
+ int ret;
+
+ /* only doing ARGB32 since this is what is needed to alpha-blend
+ * with video overlays:
+ */
+ sizes->surface_bpp = 32;
+ sizes->surface_depth = 32;
+
+ DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
+ sizes->surface_height, sizes->surface_bpp,
+ sizes->fb_width, sizes->fb_height);
+
+ mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
+ sizes->surface_depth);
+
+ mode_cmd.width = sizes->surface_width;
+ mode_cmd.height = sizes->surface_height;
+
+ mode_cmd.pitches[0] = align_pitch(
+ mode_cmd.width * ((sizes->surface_bpp + 7) / 8),
+ mode_cmd.width, sizes->surface_bpp);
+
+ fbdev->ywrap_enabled = priv->has_dmm && ywrap_enabled;
+ if (fbdev->ywrap_enabled) {
+ /* need to align pitch to page size if using DMM scrolling */
+ mode_cmd.pitches[0] = ALIGN(mode_cmd.pitches[0], PAGE_SIZE);
+ }
+
+ /* allocate backing bo */
+ gsize = (union omap_gem_size){
+ .bytes = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height),
+ };
+ DBG("allocating %d bytes for fb %d", gsize.bytes, dev->primary->index);
+ fbdev->bo = omap_gem_new(dev, gsize, OMAP_BO_SCANOUT | OMAP_BO_WC);
+ if (!fbdev->bo) {
+ dev_err(dev->dev, "failed to allocate buffer object\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ fb = omap_framebuffer_init(dev, &mode_cmd, &fbdev->bo);
+ if (IS_ERR(fb)) {
+ dev_err(dev->dev, "failed to allocate fb\n");
+ /* note: if fb creation failed, we can't rely on fb destroy
+ * to unref the bo:
+ */
+ drm_gem_object_unreference(fbdev->bo);
+ ret = PTR_ERR(fb);
+ goto fail;
+ }
+
+ /* note: this keeps the bo pinned.. which is perhaps not ideal,
+ * but is needed as long as we use fb_mmap() to mmap to userspace
+ * (since this happens using fix.smem_start). Possibly we could
+ * implement our own mmap using GEM mmap support to avoid this
+ * (non-tiled buffer doesn't need to be pinned for fbcon to write
+ * to it). Then we just need to be sure that we are able to re-
+ * pin it in case of an opps.
+ */
+ ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
+ if (ret) {
+ dev_err(dev->dev, "could not map (paddr)!\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+
+ fbi = framebuffer_alloc(0, dev->dev);
+ if (!fbi) {
+ dev_err(dev->dev, "failed to allocate fb info\n");
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ DBG("fbi=%p, dev=%p", fbi, dev);
+
+ fbdev->fb = fb;
+ helper->fb = fb;
+ helper->fbdev = fbi;
+
+ fbi->par = helper;
+ fbi->flags = FBINFO_DEFAULT;
+ fbi->fbops = &omap_fb_ops;
+
+ strcpy(fbi->fix.id, MODULE_NAME);
+
+ ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_unlock;
+ }
+
+ drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
+ drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
+
+ dev->mode_config.fb_base = paddr;
+
+ fbi->screen_base = omap_gem_vaddr(fbdev->bo);
+ fbi->screen_size = fbdev->bo->size;
+ fbi->fix.smem_start = paddr;
+ fbi->fix.smem_len = fbdev->bo->size;
+
+ /* if we have DMM, then we can use it for scrolling by just
+ * shuffling pages around in DMM rather than doing sw blit.
+ */
+ if (fbdev->ywrap_enabled) {
+ DRM_INFO("Enabling DMM ywrap scrolling\n");
+ fbi->flags |= FBINFO_HWACCEL_YWRAP | FBINFO_READS_FAST;
+ fbi->fix.ywrapstep = 1;
+ }
+
+
+ DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+
+fail_unlock:
+ mutex_unlock(&dev->struct_mutex);
+fail:
+
+ if (ret) {
+ if (fbi)
+ framebuffer_release(fbi);
+ if (fb)
+ fb->funcs->destroy(fb);
+ }
+
+ return ret;
+}
+
+static void omap_crtc_fb_gamma_set(struct drm_crtc *crtc,
+ u16 red, u16 green, u16 blue, int regno)
+{
+ DBG("fbdev: set gamma");
+}
+
+static void omap_crtc_fb_gamma_get(struct drm_crtc *crtc,
+ u16 *red, u16 *green, u16 *blue, int regno)
+{
+ DBG("fbdev: get gamma");
+}
+
+static int omap_fbdev_probe(struct drm_fb_helper *helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ int new_fb = 0;
+ int ret;
+
+ if (!helper->fb) {
+ ret = omap_fbdev_create(helper, sizes);
+ if (ret)
+ return ret;
+ new_fb = 1;
+ }
+ return new_fb;
+}
+
+static struct drm_fb_helper_funcs omap_fb_helper_funcs = {
+ .gamma_set = omap_crtc_fb_gamma_set,
+ .gamma_get = omap_crtc_fb_gamma_get,
+ .fb_probe = omap_fbdev_probe,
+};
+
+static struct drm_fb_helper *get_fb(struct fb_info *fbi)
+{
+ if (!fbi || strcmp(fbi->fix.id, MODULE_NAME)) {
+ /* these are not the fb's you're looking for */
+ return NULL;
+ }
+ return fbi->par;
+}
+
+/* flush an area of the framebuffer (in case of manual update display that
+ * is not automatically flushed)
+ */
+static void omap_fbdev_flush(struct fb_info *fbi, int x, int y, int w, int h)
+{
+ struct drm_fb_helper *helper = get_fb(fbi);
+
+ if (!helper)
+ return;
+
+ VERB("flush fbdev: %d,%d %dx%d, fbi=%p", x, y, w, h, fbi);
+
+ omap_framebuffer_flush(helper->fb, x, y, w, h);
+}
+
+/* initialize fbdev helper */
+struct drm_fb_helper *omap_fbdev_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_fbdev *fbdev = NULL;
+ struct drm_fb_helper *helper;
+ int ret = 0;
+
+ fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+ if (!fbdev) {
+ dev_err(dev->dev, "could not allocate fbdev\n");
+ goto fail;
+ }
+
+ helper = &fbdev->base;
+
+ helper->funcs = &omap_fb_helper_funcs;
+
+ ret = drm_fb_helper_init(dev, helper,
+ priv->num_crtcs, priv->num_connectors);
+ if (ret) {
+ dev_err(dev->dev, "could not init fbdev: ret=%d\n", ret);
+ goto fail;
+ }
+
+ drm_fb_helper_single_add_all_connectors(helper);
+ drm_fb_helper_initial_config(helper, 32);
+
+ priv->fbdev = helper;
+
+ return helper;
+
+fail:
+ kfree(fbdev);
+ return NULL;
+}
+
+void omap_fbdev_free(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct drm_fb_helper *helper = priv->fbdev;
+ struct omap_fbdev *fbdev;
+ struct fb_info *fbi;
+
+ DBG();
+
+ fbi = helper->fbdev;
+
+ unregister_framebuffer(fbi);
+ framebuffer_release(fbi);
+
+ drm_fb_helper_fini(helper);
+
+ fbdev = to_omap_fbdev(priv->fbdev);
+
+ /* this will free the backing object */
+ if (fbdev->fb)
+ fbdev->fb->funcs->destroy(fbdev->fb);
+
+ kfree(fbdev);
+
+ priv->fbdev = NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_gem.c b/drivers/staging/omapdrm/omap_gem.c
new file mode 100644
index 0000000..b7d6f88
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_gem.c
@@ -0,0 +1,1246 @@
+/*
+ * drivers/staging/omapdrm/omap_gem.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+
+#include <linux/spinlock.h>
+#include <linux/shmem_fs.h>
+
+#include "omap_drv.h"
+#include "omap_dmm_tiler.h"
+
+/* remove these once drm core helpers are merged */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed);
+int _drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size);
+
+/*
+ * GEM buffer object implementation.
+ */
+
+#define to_omap_bo(x) container_of(x, struct omap_gem_object, base)
+
+/* note: we use upper 8 bits of flags for driver-internal flags: */
+#define OMAP_BO_DMA 0x01000000 /* actually is physically contiguous */
+#define OMAP_BO_EXT_SYNC 0x02000000 /* externally allocated sync object */
+#define OMAP_BO_EXT_MEM 0x04000000 /* externally allocated memory */
+
+
+struct omap_gem_object {
+ struct drm_gem_object base;
+
+ uint32_t flags;
+
+ /** width/height for tiled formats (rounded up to slot boundaries) */
+ uint16_t width, height;
+
+ /** roll applied when mapping to DMM */
+ uint32_t roll;
+
+ /**
+ * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
+ * is set and the paddr is valid. Also if the buffer is remapped in
+ * TILER and paddr_cnt > 0, then paddr is valid. But if you are using
+ * the physical address and OMAP_BO_DMA is not set, then you should
+ * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
+ * not removed from under your feet.
+ *
+ * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
+ * buffer is requested, but doesn't mean that it is. Use the
+ * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
+ * physical address.
+ */
+ dma_addr_t paddr;
+
+ /**
+ * # of users of paddr
+ */
+ uint32_t paddr_cnt;
+
+ /**
+ * tiler block used when buffer is remapped in DMM/TILER.
+ */
+ struct tiler_block *block;
+
+ /**
+ * Array of backing pages, if allocated. Note that pages are never
+ * allocated for buffers originally allocated from contiguous memory
+ */
+ struct page **pages;
+
+ /** addresses corresponding to pages in above array */
+ dma_addr_t *addrs;
+
+ /**
+ * Virtual address, if mapped.
+ */
+ void *vaddr;
+
+ /**
+ * sync-object allocated on demand (if needed)
+ *
+ * Per-buffer sync-object for tracking pending and completed hw/dma
+ * read and write operations. The layout in memory is dictated by
+ * the SGX firmware, which uses this information to stall the command
+ * stream if a surface is not ready yet.
+ *
+ * Note that when buffer is used by SGX, the sync-object needs to be
+ * allocated from a special heap of sync-objects. This way many sync
+ * objects can be packed in a page, and not waste GPU virtual address
+ * space. Because of this we have to have a omap_gem_set_sync_object()
+ * API to allow replacement of the syncobj after it has (potentially)
+ * already been allocated. A bit ugly but I haven't thought of a
+ * better alternative.
+ */
+ struct {
+ uint32_t write_pending;
+ uint32_t write_complete;
+ uint32_t read_pending;
+ uint32_t read_complete;
+ } *sync;
+};
+
+static int get_pages(struct drm_gem_object *obj, struct page ***pages);
+static uint64_t mmap_offset(struct drm_gem_object *obj);
+
+/* To deal with userspace mmap'ings of 2d tiled buffers, which (a) are
+ * not necessarily pinned in TILER all the time, and (b) when they are
+ * they are not necessarily page aligned, we reserve one or more small
+ * regions in each of the 2d containers to use as a user-GART where we
+ * can create a second page-aligned mapping of parts of the buffer
+ * being accessed from userspace.
+ *
+ * Note that we could optimize slightly when we know that multiple
+ * tiler containers are backed by the same PAT.. but I'll leave that
+ * for later..
+ */
+#define NUM_USERGART_ENTRIES 2
+struct usergart_entry {
+ struct tiler_block *block; /* the reserved tiler block */
+ dma_addr_t paddr;
+ struct drm_gem_object *obj; /* the current pinned obj */
+ pgoff_t obj_pgoff; /* page offset of obj currently
+ mapped in */
+};
+static struct {
+ struct usergart_entry entry[NUM_USERGART_ENTRIES];
+ int height; /* height in rows */
+ int height_shift; /* ilog2(height in rows) */
+ int slot_shift; /* ilog2(width per slot) */
+ int stride_pfn; /* stride in pages */
+ int last; /* index of last used entry */
+} *usergart;
+
+static void evict_entry(struct drm_gem_object *obj,
+ enum tiler_fmt fmt, struct usergart_entry *entry)
+{
+ if (obj->dev->dev_mapping) {
+ size_t size = PAGE_SIZE * usergart[fmt].height;
+ loff_t off = mmap_offset(obj) +
+ (entry->obj_pgoff << PAGE_SHIFT);
+ unmap_mapping_range(obj->dev->dev_mapping, off, size, 1);
+ }
+
+ entry->obj = NULL;
+}
+
+/* Evict a buffer from usergart, if it is mapped there */
+static void evict(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ int i;
+
+ if (!usergart)
+ return;
+
+ for (i = 0; i < NUM_USERGART_ENTRIES; i++) {
+ struct usergart_entry *entry = &usergart[fmt].entry[i];
+ if (entry->obj == obj)
+ evict_entry(obj, fmt, entry);
+ }
+ }
+}
+
+/* GEM objects can either be allocated from contiguous memory (in which
+ * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL). But non
+ * contiguous buffers can be remapped in TILER/DMM if they need to be
+ * contiguous... but we don't do this all the time to reduce pressure
+ * on TILER/DMM space when we know at allocation time that the buffer
+ * will need to be scanned out.
+ */
+static inline bool is_shmem(struct drm_gem_object *obj)
+{
+ return obj->filp != NULL;
+}
+
+static DEFINE_SPINLOCK(sync_lock);
+
+/** ensure backing pages are allocated */
+static int omap_gem_attach_pages(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct page **pages;
+
+ WARN_ON(omap_obj->pages);
+
+ /* TODO: __GFP_DMA32 .. but somehow GFP_HIGHMEM is coming from the
+ * mapping_gfp_mask(mapping) which conflicts w/ GFP_DMA32.. probably
+ * we actually want CMA memory for it all anyways..
+ */
+ pages = _drm_gem_get_pages(obj, GFP_KERNEL);
+ if (IS_ERR(pages)) {
+ dev_err(obj->dev->dev, "could not get pages: %ld\n", PTR_ERR(pages));
+ return PTR_ERR(pages);
+ }
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ dma_addr_t *addrs = kmalloc(npages * sizeof(addrs), GFP_KERNEL);
+ for (i = 0; i < npages; i++) {
+ addrs[i] = dma_map_page(obj->dev->dev, pages[i],
+ 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ omap_obj->addrs = addrs;
+ }
+
+ omap_obj->pages = pages;
+ return 0;
+}
+
+/** release backing pages */
+static void omap_gem_detach_pages(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ /* for non-cached buffers, ensure the new pages are clean because
+ * DSS, GPU, etc. are not cache coherent:
+ */
+ if (omap_obj->flags & (OMAP_BO_WC|OMAP_BO_UNCACHED)) {
+ int i, npages = obj->size >> PAGE_SHIFT;
+ for (i = 0; i < npages; i++) {
+ dma_unmap_page(obj->dev->dev, omap_obj->addrs[i],
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
+ }
+ kfree(omap_obj->addrs);
+ omap_obj->addrs = NULL;
+ }
+
+ _drm_gem_put_pages(obj, omap_obj->pages, true, false);
+ omap_obj->pages = NULL;
+}
+
+/** get mmap offset */
+static uint64_t mmap_offset(struct drm_gem_object *obj)
+{
+ if (!obj->map_list.map) {
+ /* Make it mmapable */
+ size_t size = omap_gem_mmap_size(obj);
+ int ret = _drm_gem_create_mmap_offset_size(obj, size);
+
+ if (ret) {
+ dev_err(obj->dev->dev, "could not allocate mmap offset");
+ return 0;
+ }
+ }
+
+ return (uint64_t)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+uint64_t omap_gem_mmap_offset(struct drm_gem_object *obj)
+{
+ uint64_t offset;
+ mutex_lock(&obj->dev->struct_mutex);
+ offset = mmap_offset(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return offset;
+}
+
+/** get mmap size */
+size_t omap_gem_mmap_size(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ size_t size = obj->size;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ /* for tiled buffers, the virtual size has stride rounded up
+ * to 4kb.. (to hide the fact that row n+1 might start 16kb or
+ * 32kb later!). But we don't back the entire buffer with
+ * pages, only the valid picture part.. so need to adjust for
+ * this in the size used to mmap and generate mmap offset
+ */
+ size = tiler_vsize(gem2fmt(omap_obj->flags),
+ omap_obj->width, omap_obj->height);
+ }
+
+ return size;
+}
+
+
+/* Normal handling for the case of faulting in non-tiled buffers */
+static int fault_1d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ unsigned long pfn;
+ pgoff_t pgoff;
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ if (omap_obj->pages) {
+ pfn = page_to_pfn(omap_obj->pages[pgoff]);
+ } else {
+ BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+ pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
+ }
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ return vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+}
+
+/* Special handling for the case of faulting in 2d tiled buffers */
+static int fault_2d(struct drm_gem_object *obj,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct usergart_entry *entry;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct page *pages[64]; /* XXX is this too much to have on stack? */
+ unsigned long pfn;
+ pgoff_t pgoff, base_pgoff;
+ void __user *vaddr;
+ int i, ret, slots;
+
+ if (!usergart)
+ return -EFAULT;
+
+ /* TODO: this fxn might need a bit tweaking to deal w/ tiled buffers
+ * that are wider than 4kb
+ */
+
+ /* We don't use vmf->pgoff since that has the fake offset: */
+ pgoff = ((unsigned long)vmf->virtual_address -
+ vma->vm_start) >> PAGE_SHIFT;
+
+ /* actual address we start mapping at is rounded down to previous slot
+ * boundary in the y direction:
+ */
+ base_pgoff = round_down(pgoff, usergart[fmt].height);
+ vaddr = vmf->virtual_address - ((pgoff - base_pgoff) << PAGE_SHIFT);
+ entry = &usergart[fmt].entry[usergart[fmt].last];
+
+ slots = omap_obj->width >> usergart[fmt].slot_shift;
+
+ /* evict previous buffer using this usergart entry, if any: */
+ if (entry->obj)
+ evict_entry(entry->obj, fmt, entry);
+
+ entry->obj = obj;
+ entry->obj_pgoff = base_pgoff;
+
+ /* now convert base_pgoff to phys offset from virt offset:
+ */
+ base_pgoff = (base_pgoff >> usergart[fmt].height_shift) * slots;
+
+ /* map in pages. Note the height of the slot is also equal to the
+ * number of pages that need to be mapped in to fill 4kb wide CPU page.
+ * If the height is 64, then 64 pages fill a 4kb wide by 64 row region.
+ * Beyond the valid pixel part of the buffer, we set pages[i] to NULL to
+ * get a dummy page mapped in.. if someone reads/writes it they will get
+ * random/undefined content, but at least it won't be corrupting
+ * whatever other random page used to be mapped in, or other undefined
+ * behavior.
+ */
+ memcpy(pages, &omap_obj->pages[base_pgoff],
+ sizeof(struct page *) * slots);
+ memset(pages + slots, 0,
+ sizeof(struct page *) * (usergart[fmt].height - slots));
+
+ ret = tiler_pin(entry->block, pages, ARRAY_SIZE(pages), 0, true);
+ if (ret) {
+ dev_err(obj->dev->dev, "failed to pin: %d\n", ret);
+ return ret;
+ }
+
+ i = usergart[fmt].height;
+ pfn = entry->paddr >> PAGE_SHIFT;
+
+ VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ pfn, pfn << PAGE_SHIFT);
+
+ while (i--) {
+ vm_insert_mixed(vma, (unsigned long)vaddr, pfn);
+ pfn += usergart[fmt].stride_pfn;
+ vaddr += PAGE_SIZE;
+ }
+
+ /* simple round-robin: */
+ usergart[fmt].last = (usergart[fmt].last + 1) % NUM_USERGART_ENTRIES;
+
+ return 0;
+}
+
+/**
+ * omap_gem_fault - pagefault handler for GEM objects
+ * @vma: the VMA of the GEM object
+ * @vmf: fault detail
+ *
+ * Invoked when a fault occurs on an mmap of a GEM managed area. GEM
+ * does most of the work for us including the actual map/unmap calls
+ * but we need to do the actual page work.
+ *
+ * The VMA was set up by GEM. In doing so it also ensured that the
+ * vma->vm_private_data points to the GEM object that is backing this
+ * mapping.
+ */
+int omap_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ struct drm_device *dev = obj->dev;
+ struct page **pages;
+ int ret;
+
+ /* Make sure we don't parallel update on a fault, nor move or remove
+ * something from beneath our feet
+ */
+ mutex_lock(&dev->struct_mutex);
+
+ /* if a shmem backed object, make sure we have pages attached now */
+ ret = get_pages(obj, &pages);
+ if (ret) {
+ goto fail;
+ }
+
+ /* where should we do corresponding put_pages().. we are mapping
+ * the original page, rather than thru a GART, so we can't rely
+ * on eviction to trigger this. But munmap() or all mappings should
+ * probably trigger put_pages()?
+ */
+
+ if (omap_obj->flags & OMAP_BO_TILED)
+ ret = fault_2d(obj, vma, vmf);
+ else
+ ret = fault_1d(obj, vma, vmf);
+
+
+fail:
+ mutex_unlock(&dev->struct_mutex);
+ switch (ret) {
+ case 0:
+ case -ERESTARTSYS:
+ case -EINTR:
+ return VM_FAULT_NOPAGE;
+ case -ENOMEM:
+ return VM_FAULT_OOM;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+}
+
+/** We override mainly to fix up some of the vm mapping flags.. */
+int omap_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct omap_gem_object *omap_obj;
+ int ret;
+
+ ret = drm_gem_mmap(filp, vma);
+ if (ret) {
+ DBG("mmap failed: %d", ret);
+ return ret;
+ }
+
+ /* after drm_gem_mmap(), it is safe to access the obj */
+ omap_obj = to_omap_bo(vma->vm_private_data);
+
+ vma->vm_flags &= ~VM_PFNMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
+
+ if (omap_obj->flags & OMAP_BO_WC) {
+ vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
+ } else if (omap_obj->flags & OMAP_BO_UNCACHED) {
+ vma->vm_page_prot = pgprot_noncached(vm_get_page_prot(vma->vm_flags));
+ } else {
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ }
+
+ return ret;
+}
+
+/**
+ * omap_gem_dumb_create - create a dumb buffer
+ * @drm_file: our client file
+ * @dev: our device
+ * @args: the requested arguments copied from userspace
+ *
+ * Allocate a buffer suitable for use for a frame buffer of the
+ * form described by user space. Give userspace a handle by which
+ * to reference it.
+ */
+int omap_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ union omap_gem_size gsize;
+
+ /* in case someone tries to feed us a completely bogus stride: */
+ args->pitch = align_pitch(args->pitch, args->width, args->bpp);
+ args->size = PAGE_ALIGN(args->pitch * args->height);
+
+ gsize = (union omap_gem_size){
+ .bytes = args->size,
+ };
+
+ return omap_gem_new_handle(dev, file, gsize,
+ OMAP_BO_SCANOUT | OMAP_BO_WC, &args->handle);
+}
+
+/**
+ * omap_gem_dumb_destroy - destroy a dumb buffer
+ * @file: client file
+ * @dev: our DRM device
+ * @handle: the object handle
+ *
+ * Destroy a handle that was created via omap_gem_dumb_create.
+ */
+int omap_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle)
+{
+ /* No special work needed, drop the reference and see what falls out */
+ return drm_gem_handle_delete(file, handle);
+}
+
+/**
+ * omap_gem_dumb_map - buffer mapping for dumb interface
+ * @file: our drm client file
+ * @dev: drm device
+ * @handle: GEM handle to the object (from dumb_create)
+ *
+ * Do the necessary setup to allow the mapping of the frame buffer
+ * into user memory. We don't have to do much here at the moment.
+ */
+int omap_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *obj;
+ int ret = 0;
+
+ /* GEM does all our handle to object mapping */
+ obj = drm_gem_object_lookup(dev, file, handle);
+ if (obj == NULL) {
+ ret = -ENOENT;
+ goto fail;
+ }
+
+ *offset = omap_gem_mmap_offset(obj);
+
+ drm_gem_object_unreference_unlocked(obj);
+
+fail:
+ return ret;
+}
+
+/* Set scrolling position. This allows us to implement fast scrolling
+ * for console.
+ */
+int omap_gem_roll(struct drm_gem_object *obj, uint32_t roll)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ int ret = 0;
+
+ if (roll > npages) {
+ dev_err(obj->dev->dev, "invalid roll: %d\n", roll);
+ return -EINVAL;
+ }
+
+ omap_obj->roll = roll;
+
+ if (in_atomic() || mutex_is_locked(&obj->dev->struct_mutex)) {
+ /* this can get called from fbcon in atomic context.. so
+ * just ignore it and wait for next time called from
+ * interruptible context to update the PAT.. the result
+ * may be that user sees wrap-around instead of scrolling
+ * momentarily on the screen. If we wanted to be fancier
+ * we could perhaps schedule some workqueue work at this
+ * point.
+ */
+ return 0;
+ }
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ /* if we aren't mapped yet, we don't need to do anything */
+ if (omap_obj->block) {
+ struct page **pages;
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+ ret = tiler_pin(omap_obj->block, pages, npages, roll, true);
+ if (ret)
+ dev_err(obj->dev->dev, "could not repin: %d\n", ret);
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Get physical address for DMA.. if 'remap' is true, and the buffer is not
+ * already contiguous, remap it to pin in physically contiguous memory.. (ie.
+ * map in TILER)
+ */
+int omap_gem_get_paddr(struct drm_gem_object *obj,
+ dma_addr_t *paddr, bool remap)
+{
+ struct omap_drm_private *priv = obj->dev->dev_private;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+
+ if (remap && is_shmem(obj) && priv->has_dmm) {
+ if (omap_obj->paddr_cnt == 0) {
+ struct page **pages;
+ uint32_t npages = obj->size >> PAGE_SHIFT;
+ enum tiler_fmt fmt = gem2fmt(omap_obj->flags);
+ struct tiler_block *block;
+
+ BUG_ON(omap_obj->block);
+
+ ret = get_pages(obj, &pages);
+ if (ret)
+ goto fail;
+
+ if (omap_obj->flags & OMAP_BO_TILED) {
+ block = tiler_reserve_2d(fmt,
+ omap_obj->width,
+ omap_obj->height, 0);
+ } else {
+ block = tiler_reserve_1d(obj->size);
+ }
+
+ if (IS_ERR(block)) {
+ ret = PTR_ERR(block);
+ dev_err(obj->dev->dev,
+ "could not remap: %d (%d)\n", ret, fmt);
+ goto fail;
+ }
+
+ /* TODO: enable async refill.. */
+ ret = tiler_pin(block, pages, npages,
+ omap_obj->roll, true);
+ if (ret) {
+ tiler_release(block);
+ dev_err(obj->dev->dev,
+ "could not pin: %d\n", ret);
+ goto fail;
+ }
+
+ omap_obj->paddr = tiler_ssptr(block);
+ omap_obj->block = block;
+
+ DBG("got paddr: %08x", omap_obj->paddr);
+ }
+
+ omap_obj->paddr_cnt++;
+
+ *paddr = omap_obj->paddr;
+ } else if (omap_obj->flags & OMAP_BO_DMA) {
+ *paddr = omap_obj->paddr;
+ } else {
+ ret = -EINVAL;
+ }
+
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+
+ return ret;
+}
+
+/* Release physical address, when DMA is no longer being performed.. this
+ * could potentially unpin and unmap buffers from TILER
+ */
+int omap_gem_put_paddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ mutex_lock(&obj->dev->struct_mutex);
+ if (omap_obj->paddr_cnt > 0) {
+ omap_obj->paddr_cnt--;
+ if (omap_obj->paddr_cnt == 0) {
+ ret = tiler_unpin(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not unpin pages: %d\n", ret);
+ goto fail;
+ }
+ ret = tiler_release(omap_obj->block);
+ if (ret) {
+ dev_err(obj->dev->dev,
+ "could not release unmap: %d\n", ret);
+ }
+ omap_obj->block = NULL;
+ }
+ }
+fail:
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* acquire pages when needed (for example, for DMA where physically
+ * contiguous buffer is not required
+ */
+static int get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ if (is_shmem(obj) && !omap_obj->pages) {
+ ret = omap_gem_attach_pages(obj);
+ if (ret) {
+ dev_err(obj->dev->dev, "could not attach pages\n");
+ return ret;
+ }
+ }
+
+ /* TODO: even phys-contig.. we should have a list of pages? */
+ *pages = omap_obj->pages;
+
+ return 0;
+}
+
+int omap_gem_get_pages(struct drm_gem_object *obj, struct page ***pages)
+{
+ int ret;
+ mutex_lock(&obj->dev->struct_mutex);
+ ret = get_pages(obj, pages);
+ mutex_unlock(&obj->dev->struct_mutex);
+ return ret;
+}
+
+/* release pages when DMA no longer being performed */
+int omap_gem_put_pages(struct drm_gem_object *obj)
+{
+ /* do something here if we dynamically attach/detach pages.. at
+ * least they would no longer need to be pinned if everyone has
+ * released the pages..
+ */
+ return 0;
+}
+
+/* Get kernel virtual address for CPU access.. this more or less only
+ * exists for omap_fbdev. This should be called with struct_mutex
+ * held.
+ */
+void *omap_gem_vaddr(struct drm_gem_object *obj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ WARN_ON(! mutex_is_locked(&obj->dev->struct_mutex));
+ if (!omap_obj->vaddr) {
+ struct page **pages;
+ int ret = get_pages(obj, &pages);
+ if (ret)
+ return ERR_PTR(ret);
+ omap_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
+ VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ }
+ return omap_obj->vaddr;
+}
+
+/* Buffer Synchronization:
+ */
+
+struct omap_gem_sync_waiter {
+ struct list_head list;
+ struct omap_gem_object *omap_obj;
+ enum omap_gem_op op;
+ uint32_t read_target, write_target;
+ /* notify called w/ sync_lock held */
+ void (*notify)(void *arg);
+ void *arg;
+};
+
+/* list of omap_gem_sync_waiter.. the notify fxn gets called back when
+ * the read and/or write target count is achieved which can call a user
+ * callback (ex. to kick 3d and/or 2d), wakeup blocked task (prep for
+ * cpu access), etc.
+ */
+static LIST_HEAD(waiters);
+
+static inline bool is_waiting(struct omap_gem_sync_waiter *waiter)
+{
+ struct omap_gem_object *omap_obj = waiter->omap_obj;
+ if ((waiter->op & OMAP_GEM_READ) &&
+ (omap_obj->sync->read_complete < waiter->read_target))
+ return true;
+ if ((waiter->op & OMAP_GEM_WRITE) &&
+ (omap_obj->sync->write_complete < waiter->write_target))
+ return true;
+ return false;
+}
+
+/* macro for sync debug.. */
+#define SYNCDBG 0
+#define SYNC(fmt, ...) do { if (SYNCDBG) \
+ printk(KERN_ERR "%s:%d: "fmt"\n", \
+ __func__, __LINE__, ##__VA_ARGS__); \
+ } while (0)
+
+
+static void sync_op_update(void)
+{
+ struct omap_gem_sync_waiter *waiter, *n;
+ list_for_each_entry_safe(waiter, n, &waiters, list) {
+ if (!is_waiting(waiter)) {
+ list_del(&waiter->list);
+ SYNC("notify: %p", waiter);
+ waiter->notify(waiter->arg);
+ kfree(waiter);
+ }
+ }
+}
+
+static inline int sync_op(struct drm_gem_object *obj,
+ enum omap_gem_op op, bool start)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if (!omap_obj->sync) {
+ omap_obj->sync = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ if (!omap_obj->sync) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ }
+
+ if (start) {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_pending++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_pending++;
+ } else {
+ if (op & OMAP_GEM_READ)
+ omap_obj->sync->read_complete++;
+ if (op & OMAP_GEM_WRITE)
+ omap_obj->sync->write_complete++;
+ sync_op_update();
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+
+ return ret;
+}
+
+/* it is a bit lame to handle updates in this sort of polling way, but
+ * in case of PVR, the GPU can directly update read/write complete
+ * values, and not really tell us which ones it updated.. this also
+ * means that sync_lock is not quite sufficient. So we'll need to
+ * do something a bit better when it comes time to add support for
+ * separate 2d hw..
+ */
+void omap_gem_op_update(void)
+{
+ spin_lock(&sync_lock);
+ sync_op_update();
+ spin_unlock(&sync_lock);
+}
+
+/* mark the start of read and/or write operation */
+int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, true);
+}
+
+int omap_gem_op_finish(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ return sync_op(obj, op, false);
+}
+
+static DECLARE_WAIT_QUEUE_HEAD(sync_event);
+
+static void sync_notify(void *arg)
+{
+ struct task_struct **waiter_task = arg;
+ *waiter_task = NULL;
+ wake_up_all(&sync_event);
+}
+
+int omap_gem_op_sync(struct drm_gem_object *obj, enum omap_gem_op op)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+ if (omap_obj->sync) {
+ struct task_struct *waiter_task = current;
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_KERNEL);
+
+ if (!waiter) {
+ return -ENOMEM;
+ }
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = sync_notify;
+ waiter->arg = &waiter_task;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ ret = wait_event_interruptible(sync_event,
+ (waiter_task == NULL));
+ spin_lock(&sync_lock);
+ if (waiter_task) {
+ SYNC("interrupted: %p", waiter);
+ /* we were interrupted */
+ list_del(&waiter->list);
+ waiter_task = NULL;
+ } else {
+ /* freed in sync_op_update() */
+ waiter = NULL;
+ }
+ }
+ spin_unlock(&sync_lock);
+
+ if (waiter) {
+ kfree(waiter);
+ }
+ }
+ return ret;
+}
+
+/* call fxn(arg), either synchronously or asynchronously if the op
+ * is currently blocked.. fxn() can be called from any context
+ *
+ * (TODO for now fxn is called back from whichever context calls
+ * omap_gem_op_update().. but this could be better defined later
+ * if needed)
+ *
+ * TODO more code in common w/ _sync()..
+ */
+int omap_gem_op_async(struct drm_gem_object *obj, enum omap_gem_op op,
+ void (*fxn)(void *arg), void *arg)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ if (omap_obj->sync) {
+ struct omap_gem_sync_waiter *waiter =
+ kzalloc(sizeof(*waiter), GFP_ATOMIC);
+
+ if (!waiter) {
+ return -ENOMEM;
+ }
+
+ waiter->omap_obj = omap_obj;
+ waiter->op = op;
+ waiter->read_target = omap_obj->sync->read_pending;
+ waiter->write_target = omap_obj->sync->write_pending;
+ waiter->notify = fxn;
+ waiter->arg = arg;
+
+ spin_lock(&sync_lock);
+ if (is_waiting(waiter)) {
+ SYNC("waited: %p", waiter);
+ list_add_tail(&waiter->list, &waiters);
+ spin_unlock(&sync_lock);
+ return 0;
+ }
+
+ spin_unlock(&sync_lock);
+ }
+
+ /* no waiting.. */
+ fxn(arg);
+
+ return 0;
+}
+
+/* special API so PVR can update the buffer to use a sync-object allocated
+ * from it's sync-obj heap. Only used for a newly allocated (from PVR's
+ * perspective) sync-object, so we overwrite the new syncobj w/ values
+ * from the already allocated syncobj (if there is one)
+ */
+int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
+{
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+ int ret = 0;
+
+ spin_lock(&sync_lock);
+
+ if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
+ /* clearing a previously set syncobj */
+ syncobj = kzalloc(sizeof(*omap_obj->sync), GFP_ATOMIC);
+ if (!syncobj) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+ memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+ omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+ /* replacing an existing syncobj */
+ if (omap_obj->sync) {
+ memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
+ kfree(omap_obj->sync);
+ }
+ omap_obj->flags |= OMAP_BO_EXT_SYNC;
+ omap_obj->sync = syncobj;
+ }
+
+unlock:
+ spin_unlock(&sync_lock);
+ return ret;
+}
+
+int omap_gem_init_object(struct drm_gem_object *obj)
+{
+ return -EINVAL; /* unused */
+}
+
+/* don't call directly.. called from GEM core when it is time to actually
+ * free the object..
+ */
+void omap_gem_free_object(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct omap_gem_object *omap_obj = to_omap_bo(obj);
+
+ evict(obj);
+
+ if (obj->map_list.map) {
+ drm_gem_free_mmap_offset(obj);
+ }
+
+ /* this means the object is still pinned.. which really should
+ * not happen. I think..
+ */
+ WARN_ON(omap_obj->paddr_cnt > 0);
+
+ /* don't free externally allocated backing memory */
+ if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
+ if (omap_obj->pages) {
+ omap_gem_detach_pages(obj);
+ }
+ if (!is_shmem(obj)) {
+ dma_free_writecombine(dev->dev, obj->size,
+ omap_obj->vaddr, omap_obj->paddr);
+ } else if (omap_obj->vaddr) {
+ vunmap(omap_obj->vaddr);
+ }
+ }
+
+ /* don't free externally allocated syncobj */
+ if (!(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
+ kfree(omap_obj->sync);
+ }
+
+ drm_gem_object_release(obj);
+
+ kfree(obj);
+}
+
+/* convenience method to construct a GEM buffer object, and userspace handle */
+int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
+ union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = omap_gem_new(dev, gsize, flags);
+ if (!obj)
+ return -ENOMEM;
+
+ ret = drm_gem_handle_create(file, obj, handle);
+ if (ret) {
+ drm_gem_object_release(obj);
+ kfree(obj); /* TODO isn't there a dtor to call? just copying i915 */
+ return ret;
+ }
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return 0;
+}
+
+/* GEM buffer object constructor */
+struct drm_gem_object *omap_gem_new(struct drm_device *dev,
+ union omap_gem_size gsize, uint32_t flags)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_gem_object *omap_obj;
+ struct drm_gem_object *obj = NULL;
+ size_t size;
+ int ret;
+
+ if (flags & OMAP_BO_TILED) {
+ if (!usergart) {
+ dev_err(dev->dev, "Tiled buffers require DMM\n");
+ goto fail;
+ }
+
+ /* tiled buffers are always shmem paged backed.. when they are
+ * scanned out, they are remapped into DMM/TILER
+ */
+ flags &= ~OMAP_BO_SCANOUT;
+
+ /* currently don't allow cached buffers.. there is some caching
+ * stuff that needs to be handled better
+ */
+ flags &= ~(OMAP_BO_CACHED|OMAP_BO_UNCACHED);
+ flags |= OMAP_BO_WC;
+
+ /* align dimensions to slot boundaries... */
+ tiler_align(gem2fmt(flags),
+ &gsize.tiled.width, &gsize.tiled.height);
+
+ /* ...and calculate size based on aligned dimensions */
+ size = tiler_size(gem2fmt(flags),
+ gsize.tiled.width, gsize.tiled.height);
+ } else {
+ size = PAGE_ALIGN(gsize.bytes);
+ }
+
+ omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
+ if (!omap_obj) {
+ dev_err(dev->dev, "could not allocate GEM object\n");
+ goto fail;
+ }
+
+ obj = &omap_obj->base;
+
+ if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+ /* attempt to allocate contiguous memory if we don't
+ * have DMM for remappign discontiguous buffers
+ */
+ omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
+ &omap_obj->paddr, GFP_KERNEL);
+ if (omap_obj->vaddr) {
+ flags |= OMAP_BO_DMA;
+ }
+ }
+
+ omap_obj->flags = flags;
+
+ if (flags & OMAP_BO_TILED) {
+ omap_obj->width = gsize.tiled.width;
+ omap_obj->height = gsize.tiled.height;
+ }
+
+ if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
+ ret = drm_gem_private_object_init(dev, obj, size);
+ } else {
+ ret = drm_gem_object_init(dev, obj, size);
+ }
+
+ if (ret) {
+ goto fail;
+ }
+
+ return obj;
+
+fail:
+ if (obj) {
+ omap_gem_free_object(obj);
+ }
+ return NULL;
+}
+
+/* init/cleanup.. if DMM is used, we need to set some stuff up.. */
+void omap_gem_init(struct drm_device *dev)
+{
+ struct omap_drm_private *priv = dev->dev_private;
+ const enum tiler_fmt fmts[] = {
+ TILFMT_8BIT, TILFMT_16BIT, TILFMT_32BIT
+ };
+ int i, j, ret;
+
+ ret = omap_dmm_init(dev);
+ if (ret) {
+ /* DMM only supported on OMAP4 and later, so this isn't fatal */
+ dev_warn(dev->dev, "omap_dmm_init failed, disabling DMM\n");
+ return;
+ }
+
+ usergart = kzalloc(3 * sizeof(*usergart), GFP_KERNEL);
+ if (!usergart) {
+ dev_warn(dev->dev, "could not allocate usergart\n");
+ return;
+ }
+
+ /* reserve 4k aligned/wide regions for userspace mappings: */
+ for (i = 0; i < ARRAY_SIZE(fmts); i++) {
+ uint16_t h = 1, w = PAGE_SIZE >> i;
+ tiler_align(fmts[i], &w, &h);
+ /* note: since each region is 1 4kb page wide, and minimum
+ * number of rows, the height ends up being the same as the
+ * # of pages in the region
+ */
+ usergart[i].height = h;
+ usergart[i].height_shift = ilog2(h);
+ usergart[i].stride_pfn = tiler_stride(fmts[i]) >> PAGE_SHIFT;
+ usergart[i].slot_shift = ilog2((PAGE_SIZE / h) >> i);
+ for (j = 0; j < NUM_USERGART_ENTRIES; j++) {
+ struct usergart_entry *entry = &usergart[i].entry[j];
+ struct tiler_block *block =
+ tiler_reserve_2d(fmts[i], w, h,
+ PAGE_SIZE);
+ if (IS_ERR(block)) {
+ dev_err(dev->dev,
+ "reserve failed: %d, %d, %ld\n",
+ i, j, PTR_ERR(block));
+ return;
+ }
+ entry->paddr = tiler_ssptr(block);
+ entry->block = block;
+
+ DBG("%d:%d: %dx%d: paddr=%08x stride=%d", i, j, w, h,
+ entry->paddr,
+ usergart[i].stride_pfn << PAGE_SHIFT);
+ }
+ }
+
+ priv->has_dmm = true;
+}
+
+void omap_gem_deinit(struct drm_device *dev)
+{
+ /* I believe we can rely on there being no more outstanding GEM
+ * objects which could depend on usergart/dmm at this point.
+ */
+ omap_dmm_remove();
+ kfree(usergart);
+}
diff --git a/drivers/staging/omapdrm/omap_gem_helpers.c b/drivers/staging/omapdrm/omap_gem_helpers.c
new file mode 100644
index 0000000..29275c7
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_gem_helpers.c
@@ -0,0 +1,169 @@
+/*
+ * drivers/staging/omapdrm/omap_gem_helpers.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* temporary copy of drm_gem_{get,put}_pages() until the
+ * "drm/gem: add functions to get/put pages" patch is merged..
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+
+/**
+ * drm_gem_get_pages - helper to allocate backing pages for a GEM object
+ * @obj: obj in question
+ * @gfpmask: gfp mask of requested pages
+ */
+struct page ** _drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
+{
+ struct inode *inode;
+ struct address_space *mapping;
+ struct page *p, **pages;
+ int i, npages;
+
+ /* This is the shared memory object that backs the GEM resource */
+ inode = obj->filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ pages = drm_malloc_ab(npages, sizeof(struct page *));
+ if (pages == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ gfpmask |= mapping_gfp_mask(mapping);
+
+ for (i = 0; i < npages; i++) {
+ p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
+ if (IS_ERR(p))
+ goto fail;
+ pages[i] = p;
+
+ /* There is a hypothetical issue w/ drivers that require
+ * buffer memory in the low 4GB.. if the pages are un-
+ * pinned, and swapped out, they can end up swapped back
+ * in above 4GB. If pages are already in memory, then
+ * shmem_read_mapping_page_gfp will ignore the gfpmask,
+ * even if the already in-memory page disobeys the mask.
+ *
+ * It is only a theoretical issue today, because none of
+ * the devices with this limitation can be populated with
+ * enough memory to trigger the issue. But this BUG_ON()
+ * is here as a reminder in case the problem with
+ * shmem_read_mapping_page_gfp() isn't solved by the time
+ * it does become a real issue.
+ *
+ * See this thread: http://lkml.org/lkml/2011/7/11/238
+ */
+ BUG_ON((gfpmask & __GFP_DMA32) &&
+ (page_to_pfn(p) >= 0x00100000UL));
+ }
+
+ return pages;
+
+fail:
+ while (i--) {
+ page_cache_release(pages[i]);
+ }
+ drm_free_large(pages);
+ return ERR_PTR(PTR_ERR(p));
+}
+
+/**
+ * drm_gem_put_pages - helper to free backing pages for a GEM object
+ * @obj: obj in question
+ * @pages: pages to free
+ */
+void _drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
+ bool dirty, bool accessed)
+{
+ int i, npages;
+
+ npages = obj->size >> PAGE_SHIFT;
+
+ for (i = 0; i < npages; i++) {
+ if (dirty)
+ set_page_dirty(pages[i]);
+
+ if (accessed)
+ mark_page_accessed(pages[i]);
+
+ /* Undo the reference we took when populating the table */
+ page_cache_release(pages[i]);
+ }
+
+ drm_free_large(pages);
+}
+
+int
+_drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_device *dev = obj->dev;
+ struct drm_gem_mm *mm = dev->mm_private;
+ struct drm_map_list *list;
+ struct drm_local_map *map;
+ int ret = 0;
+
+ /* Set the object up for mmap'ing */
+ list = &obj->map_list;
+ list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+ if (!list->map)
+ return -ENOMEM;
+
+ map = list->map;
+ map->type = _DRM_GEM;
+ map->size = size;
+ map->handle = obj;
+
+ /* Get a DRM GEM mmap offset allocated... */
+ list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+ size / PAGE_SIZE, 0, 0);
+
+ if (!list->file_offset_node) {
+ DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ ret = -ENOSPC;
+ goto out_free_list;
+ }
+
+ list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+ size / PAGE_SIZE, 0);
+ if (!list->file_offset_node) {
+ ret = -ENOMEM;
+ goto out_free_list;
+ }
+
+ list->hash.key = list->file_offset_node->start;
+ ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+ if (ret) {
+ DRM_ERROR("failed to add to map hash\n");
+ goto out_free_mm;
+ }
+
+ return 0;
+
+out_free_mm:
+ drm_mm_put_block(list->file_offset_node);
+out_free_list:
+ kfree(list->map);
+ list->map = NULL;
+
+ return ret;
+}
diff --git a/drivers/staging/omapdrm/omap_plane.c b/drivers/staging/omapdrm/omap_plane.c
new file mode 100644
index 0000000..9790912
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_plane.c
@@ -0,0 +1,344 @@
+/*
+ * drivers/staging/omapdrm/omap_plane.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob.clark@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "omap_drv.h"
+
+/* some hackery because omapdss has an 'enum omap_plane' (which would be
+ * better named omap_plane_id).. and compiler seems unhappy about having
+ * both a 'struct omap_plane' and 'enum omap_plane'
+ */
+#define omap_plane _omap_plane
+
+/*
+ * plane funcs
+ */
+
+#define to_omap_plane(x) container_of(x, struct omap_plane, base)
+
+struct omap_plane {
+ struct drm_plane base;
+ struct omap_overlay *ovl;
+ struct omap_overlay_info info;
+
+ /* Source values, converted to integers because we don't support
+ * fractional positions:
+ */
+ unsigned int src_x, src_y;
+
+ /* last fb that we pinned: */
+ struct drm_framebuffer *pinned_fb;
+};
+
+
+/* push changes down to dss2 */
+static int commit(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_overlay *ovl = omap_plane->ovl;
+ struct omap_overlay_info *info = &omap_plane->info;
+ int ret;
+
+ DBG("%s", ovl->name);
+ DBG("%dx%d -> %dx%d (%d)", info->width, info->height, info->out_width,
+ info->out_height, info->screen_width);
+ DBG("%d,%d %08x %08x", info->pos_x, info->pos_y,
+ info->paddr, info->p_uv_addr);
+
+ /* NOTE: do we want to do this at all here, or just wait
+ * for dpms(ON) since other CRTC's may not have their mode
+ * set yet, so fb dimensions may still change..
+ */
+ ret = ovl->set_overlay_info(ovl, info);
+ if (ret) {
+ dev_err(dev->dev, "could not set overlay info\n");
+ return ret;
+ }
+
+ /* our encoder doesn't necessarily get a commit() after this, in
+ * particular in the dpms() and mode_set_base() cases, so force the
+ * manager to update:
+ *
+ * could this be in the encoder somehow?
+ */
+ if (ovl->manager) {
+ ret = ovl->manager->apply(ovl->manager);
+ if (ret) {
+ dev_err(dev->dev, "could not apply settings\n");
+ return ret;
+ }
+ }
+
+ if (ovl->is_enabled(ovl)) {
+ omap_framebuffer_flush(plane->fb, info->pos_x, info->pos_y,
+ info->out_width, info->out_height);
+ }
+
+ return 0;
+}
+
+/* when CRTC that we are attached to has potentially changed, this checks
+ * if we are attached to proper manager, and if necessary updates.
+ */
+static void update_manager(struct drm_plane *plane)
+{
+ struct omap_drm_private *priv = plane->dev->dev_private;
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_overlay *ovl = omap_plane->ovl;
+ struct omap_overlay_manager *mgr = NULL;
+ int i;
+
+ if (plane->crtc) {
+ for (i = 0; i < priv->num_encoders; i++) {
+ struct drm_encoder *encoder = priv->encoders[i];
+ if (encoder->crtc == plane->crtc) {
+ mgr = omap_encoder_get_manager(encoder);
+ break;
+ }
+ }
+ }
+
+ if (ovl->manager != mgr) {
+ bool enabled = ovl->is_enabled(ovl);
+
+ /* don't switch things around with enabled overlays: */
+ if (enabled)
+ omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+
+ if (ovl->manager) {
+ DBG("disconnecting %s from %s", ovl->name,
+ ovl->manager->name);
+ ovl->unset_manager(ovl);
+ }
+
+ if (mgr) {
+ DBG("connecting %s to %s", ovl->name, mgr->name);
+ ovl->set_manager(ovl, mgr);
+ }
+
+ if (enabled && mgr)
+ omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
+ }
+}
+
+/* update which fb (if any) is pinned for scanout */
+static int update_pin(struct drm_plane *plane, struct drm_framebuffer *fb)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ int ret = 0;
+
+ if (omap_plane->pinned_fb != fb) {
+ if (omap_plane->pinned_fb)
+ omap_framebuffer_unpin(omap_plane->pinned_fb);
+ omap_plane->pinned_fb = fb;
+ if (fb)
+ ret = omap_framebuffer_pin(fb);
+ }
+
+ return ret;
+}
+
+/* update parameters that are dependent on the framebuffer dimensions and
+ * position within the fb that this plane scans out from. This is called
+ * when framebuffer or x,y base may have changed.
+ */
+static void update_scanout(struct drm_plane *plane)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_overlay_info *info = &omap_plane->info;
+ int ret;
+
+ ret = update_pin(plane, plane->fb);
+ if (ret) {
+ dev_err(plane->dev->dev,
+ "could not pin fb: %d\n", ret);
+ omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+ return;
+ }
+
+ omap_framebuffer_update_scanout(plane->fb,
+ omap_plane->src_x, omap_plane->src_y, info);
+
+ DBG("%s: %d,%d: %08x %08x (%d)", omap_plane->ovl->name,
+ omap_plane->src_x, omap_plane->src_y,
+ (u32)info->paddr, (u32)info->p_uv_addr,
+ info->screen_width);
+}
+
+int omap_plane_mode_set(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+
+ /* src values are in Q16 fixed point, convert to integer: */
+ src_x = src_x >> 16;
+ src_y = src_y >> 16;
+ src_w = src_w >> 16;
+ src_h = src_h >> 16;
+
+ omap_plane->info.pos_x = crtc_x;
+ omap_plane->info.pos_y = crtc_y;
+ omap_plane->info.out_width = crtc_w;
+ omap_plane->info.out_height = crtc_h;
+ omap_plane->info.width = src_w;
+ omap_plane->info.height = src_h;
+ omap_plane->src_x = src_x;
+ omap_plane->src_y = src_y;
+
+ /* note: this is done after this fxn returns.. but if we need
+ * to do a commit/update_scanout, etc before this returns we
+ * need the current value.
+ */
+ plane->fb = fb;
+ plane->crtc = crtc;
+
+ update_scanout(plane);
+ update_manager(plane);
+
+ return 0;
+}
+
+static int omap_plane_update(struct drm_plane *plane,
+ struct drm_crtc *crtc, struct drm_framebuffer *fb,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h,
+ uint32_t src_x, uint32_t src_y,
+ uint32_t src_w, uint32_t src_h)
+{
+ omap_plane_mode_set(plane, crtc, fb, crtc_x, crtc_y, crtc_w, crtc_h,
+ src_x, src_y, src_w, src_h);
+ return omap_plane_dpms(plane, DRM_MODE_DPMS_ON);
+}
+
+static int omap_plane_disable(struct drm_plane *plane)
+{
+ return omap_plane_dpms(plane, DRM_MODE_DPMS_OFF);
+}
+
+static void omap_plane_destroy(struct drm_plane *plane)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ DBG("%s", omap_plane->ovl->name);
+ omap_plane_disable(plane);
+ drm_plane_cleanup(plane);
+ kfree(omap_plane);
+}
+
+int omap_plane_dpms(struct drm_plane *plane, int mode)
+{
+ struct omap_plane *omap_plane = to_omap_plane(plane);
+ struct omap_overlay *ovl = omap_plane->ovl;
+ int r;
+
+ DBG("%s: %d", omap_plane->ovl->name, mode);
+
+ if (mode == DRM_MODE_DPMS_ON) {
+ update_scanout(plane);
+ r = commit(plane);
+ if (!r)
+ r = ovl->enable(ovl);
+ } else {
+ r = ovl->disable(ovl);
+ update_pin(plane, NULL);
+ }
+
+ return r;
+}
+
+static const struct drm_plane_funcs omap_plane_funcs = {
+ .update_plane = omap_plane_update,
+ .disable_plane = omap_plane_disable,
+ .destroy = omap_plane_destroy,
+};
+
+static const uint32_t formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_UYVY,
+};
+
+/* initialize plane */
+struct drm_plane *omap_plane_init(struct drm_device *dev,
+ struct omap_overlay *ovl, unsigned int possible_crtcs,
+ bool priv)
+{
+ struct drm_plane *plane = NULL;
+ struct omap_plane *omap_plane;
+
+ DBG("%s: possible_crtcs=%08x, priv=%d", ovl->name,
+ possible_crtcs, priv);
+
+ omap_plane = kzalloc(sizeof(*omap_plane), GFP_KERNEL);
+ if (!omap_plane) {
+ dev_err(dev->dev, "could not allocate plane\n");
+ goto fail;
+ }
+
+ omap_plane->ovl = ovl;
+ plane = &omap_plane->base;
+
+ drm_plane_init(dev, plane, possible_crtcs, &omap_plane_funcs,
+ formats, ARRAY_SIZE(formats), priv);
+
+ /* get our starting configuration, set defaults for parameters
+ * we don't currently use, etc:
+ */
+ ovl->get_overlay_info(ovl, &omap_plane->info);
+ omap_plane->info.rotation_type = OMAP_DSS_ROT_DMA;
+ omap_plane->info.rotation = OMAP_DSS_ROT_0;
+ omap_plane->info.global_alpha = 0xff;
+ omap_plane->info.mirror = 0;
+ omap_plane->info.mirror = 0;
+
+ /* Set defaults depending on whether we are a CRTC or overlay
+ * layer.
+ * TODO add ioctl to give userspace an API to change this.. this
+ * will come in a subsequent patch.
+ */
+ if (priv)
+ omap_plane->info.zorder = 0;
+ else
+ omap_plane->info.zorder = 1;
+
+ update_manager(plane);
+
+ return plane;
+
+fail:
+ if (plane) {
+ omap_plane_destroy(plane);
+ }
+ return NULL;
+}
diff --git a/drivers/staging/omapdrm/omap_priv.h b/drivers/staging/omapdrm/omap_priv.h
new file mode 100644
index 0000000..ef64414
--- /dev/null
+++ b/drivers/staging/omapdrm/omap_priv.h
@@ -0,0 +1,55 @@
+/*
+ * include/drm/omap_priv.h
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Rob Clark <rob@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __OMAP_PRIV_H__
+#define __OMAP_PRIV_H__
+
+/* Non-userspace facing APIs
+ */
+
+/* optional platform data to configure the default configuration of which
+ * pipes/overlays/CRTCs are used.. if this is not provided, then instead the
+ * first CONFIG_DRM_OMAP_NUM_CRTCS are used, and they are each connected to
+ * one manager, with priority given to managers that are connected to
+ * detected devices. Remaining overlays are used as video planes. This
+ * should be a good default behavior for most cases, but yet there still
+ * might be times when you wish to do something different.
+ */
+struct omap_kms_platform_data {
+ /* overlays to use as CRTCs: */
+ int ovl_cnt;
+ const int *ovl_ids;
+
+ /* overlays to use as video planes: */
+ int pln_cnt;
+ const int *pln_ids;
+
+ int mgr_cnt;
+ const int *mgr_ids;
+
+ int dev_cnt;
+ const char **dev_names;
+};
+
+struct omap_drm_platform_data {
+ struct omap_kms_platform_data *kms_pdata;
+ struct omap_dmm_platform_data *dmm_pdata;
+};
+
+#endif /* __OMAP_DRM_H__ */
diff --git a/drivers/staging/omapdrm/tcm-sita.c b/drivers/staging/omapdrm/tcm-sita.c
new file mode 100644
index 0000000..10d5ac3
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm-sita.c
@@ -0,0 +1,703 @@
+/*
+ * tcm-sita.c
+ *
+ * SImple Tiler Allocator (SiTA): 2D and 1D allocation(reservation) algorithm
+ *
+ * Authors: Ravi Ramachandra <r.ramachandra@ti.com>,
+ * Lajos Molnar <molnar@ti.com>
+ *
+ * Copyright (C) 2009-2010 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ *
+ */
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "tcm-sita.h"
+
+#define ALIGN_DOWN(value, align) ((value) & ~((align) - 1))
+
+/* Individual selection criteria for different scan areas */
+static s32 CR_L2R_T2B = CR_BIAS_HORIZONTAL;
+static s32 CR_R2L_T2B = CR_DIAGONAL_BALANCE;
+
+/*********************************************
+ * TCM API - Sita Implementation
+ *********************************************/
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area);
+static s32 sita_reserve_1d(struct tcm *tcm, u32 slots, struct tcm_area *area);
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area);
+static void sita_deinit(struct tcm *tcm);
+
+/*********************************************
+ * Main Scanner functions
+ *********************************************/
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area);
+
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area);
+
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area);
+
+/*********************************************
+ * Support Infrastructure Methods
+ *********************************************/
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h);
+
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best);
+
+static void get_nearness_factor(struct tcm_area *field,
+ struct tcm_area *candidate,
+ struct nearness_factor *nf);
+
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat);
+
+static void fill_area(struct tcm *tcm,
+ struct tcm_area *area, struct tcm_area *parent);
+
+
+/*********************************************/
+
+/*********************************************
+ * Utility Methods
+ *********************************************/
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr)
+{
+ struct tcm *tcm;
+ struct sita_pvt *pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ if (width == 0 || height == 0)
+ return NULL;
+
+ tcm = kmalloc(sizeof(*tcm), GFP_KERNEL);
+ pvt = kmalloc(sizeof(*pvt), GFP_KERNEL);
+ if (!tcm || !pvt)
+ goto error;
+
+ memset(tcm, 0, sizeof(*tcm));
+ memset(pvt, 0, sizeof(*pvt));
+
+ /* Updating the pointers to SiTA implementation APIs */
+ tcm->height = height;
+ tcm->width = width;
+ tcm->reserve_2d = sita_reserve_2d;
+ tcm->reserve_1d = sita_reserve_1d;
+ tcm->free = sita_free;
+ tcm->deinit = sita_deinit;
+ tcm->pvt = (void *)pvt;
+
+ spin_lock_init(&(pvt->lock));
+
+ /* Creating tam map */
+ pvt->map = kmalloc(sizeof(*pvt->map) * tcm->width, GFP_KERNEL);
+ if (!pvt->map)
+ goto error;
+
+ for (i = 0; i < tcm->width; i++) {
+ pvt->map[i] =
+ kmalloc(sizeof(**pvt->map) * tcm->height,
+ GFP_KERNEL);
+ if (pvt->map[i] == NULL) {
+ while (i--)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ goto error;
+ }
+ }
+
+ if (attr && attr->x <= tcm->width && attr->y <= tcm->height) {
+ pvt->div_pt.x = attr->x;
+ pvt->div_pt.y = attr->y;
+
+ } else {
+ /* Defaulting to 3:1 ratio on width for 2D area split */
+ /* Defaulting to 3:1 ratio on height for 2D and 1D split */
+ pvt->div_pt.x = (tcm->width * 3) / 4;
+ pvt->div_pt.y = (tcm->height * 3) / 4;
+ }
+
+ spin_lock(&(pvt->lock));
+ assign(&area, 0, 0, width - 1, height - 1);
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+ return tcm;
+
+error:
+ kfree(tcm);
+ kfree(pvt);
+ return NULL;
+}
+
+static void sita_deinit(struct tcm *tcm)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area area = {0};
+ s32 i;
+
+ area.p1.x = tcm->width - 1;
+ area.p1.y = tcm->height - 1;
+
+ spin_lock(&(pvt->lock));
+ fill_area(tcm, &area, NULL);
+ spin_unlock(&(pvt->lock));
+
+ for (i = 0; i < tcm->height; i++)
+ kfree(pvt->map[i]);
+ kfree(pvt->map);
+ kfree(pvt);
+}
+
+/**
+ * Reserve a 1D area in the container
+ *
+ * @param num_slots size of 1D area
+ * @param area pointer to the area that will be populated with the
+ * reserved area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_1d(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct tcm_area field = {0};
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* Scanning entire container */
+ assign(&field, tcm->width - 1, tcm->height - 1, 0, 0);
+
+ ret = scan_r2l_b2t_one_dim(tcm, num_slots, &field, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Reserve a 2D area in the container
+ *
+ * @param w width
+ * @param h height
+ * @param area pointer to the area that will be populated with the reesrved
+ * area
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 sita_reserve_2d(struct tcm *tcm, u16 h, u16 w, u8 align,
+ struct tcm_area *area)
+{
+ s32 ret;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* not supporting more than 64 as alignment */
+ if (align > 64)
+ return -EINVAL;
+
+ /* we prefer 1, 32 and 64 as alignment */
+ align = align <= 1 ? 1 : align <= 32 ? 32 : 64;
+
+ spin_lock(&(pvt->lock));
+ ret = scan_areas_and_find_fit(tcm, w, h, align, area);
+ if (!ret)
+ /* update map */
+ fill_area(tcm, area, area);
+
+ spin_unlock(&(pvt->lock));
+ return ret;
+}
+
+/**
+ * Unreserve a previously allocated 2D or 1D area
+ * @param area area to be freed
+ * @return 0 - success
+ */
+static s32 sita_free(struct tcm *tcm, struct tcm_area *area)
+{
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ spin_lock(&(pvt->lock));
+
+ /* check that this is in fact an existing area */
+ WARN_ON(pvt->map[area->p0.x][area->p0.y] != area ||
+ pvt->map[area->p1.x][area->p1.y] != area);
+
+ /* Clear the contents of the associated tiles in the map */
+ fill_area(tcm, area, NULL);
+
+ spin_unlock(&(pvt->lock));
+
+ return 0;
+}
+
+/**
+ * Note: In general the cordinates in the scan field area relevant to the can
+ * sweep directions. The scan origin (e.g. top-left corner) will always be
+ * the p0 member of the field. Therfore, for a scan from top-left p0.x <= p1.x
+ * and p0.y <= p1.y; whereas, for a scan from bottom-right p1.x <= p0.x and p1.y
+ * <= p0.y
+ */
+
+/**
+ * Raster scan horizontally right to left from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p0.x < field->p1.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(start_x, end_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ /* adjust start_x and end_y, as allocation would not fit beyond */
+ start_x = ALIGN_DOWN(start_x - w + 1, align); /* - 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* check if allocation would still fit in scan area */
+ if (start_x < end_x)
+ return -ENOSPC;
+
+ /* scan field top-to-bottom, right-to-left */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x >= end_x; x -= align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_R2L_T2B, &best))
+ goto done;
+
+ /* change upper x bound */
+ end_x = x + 1;
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN(map[x][y]->p0.x - w + 1, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally left to right from top to bottom to find a place for
+ * a 2D area of given size inside a scan field.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_l2r_t2b(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 x, y;
+ s16 start_x, end_x, start_y, end_y, found_x = -1;
+ struct tcm_area ***map = ((struct sita_pvt *)tcm->pvt)->map;
+ struct score best = {{0}, {0}, {0}, 0};
+
+ start_x = field->p0.x;
+ end_x = field->p1.x;
+ start_y = field->p0.y;
+ end_y = field->p1.y;
+
+ /* check scan area co-ordinates */
+ if (field->p1.x < field->p0.x ||
+ field->p1.y < field->p0.y)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (w > LEN(end_x, start_x) || h > LEN(end_y, start_y))
+ return -ENOSPC;
+
+ start_x = ALIGN(start_x, align);
+
+ /* check if allocation would still fit in scan area */
+ if (w > LEN(end_x, start_x))
+ return -ENOSPC;
+
+ /* adjust end_x and end_y, as allocation would not fit beyond */
+ end_x = end_x - w + 1; /* + 1 to be inclusive */
+ end_y = end_y - h + 1;
+
+ /* scan field top-to-bottom, left-to-right */
+ for (y = start_y; y <= end_y; y++) {
+ for (x = start_x; x <= end_x; x += align) {
+ if (is_area_free(map, x, y, w, h)) {
+ found_x = x;
+
+ /* update best candidate */
+ if (update_candidate(tcm, x, y, w, h, field,
+ CR_L2R_T2B, &best))
+ goto done;
+ /* change upper x bound */
+ end_x = x - 1;
+
+ break;
+ } else if (map[x][y] && map[x][y]->is2d) {
+ /* step over 2D areas */
+ x = ALIGN_DOWN(map[x][y]->p1.x, align);
+ }
+ }
+
+ /* break if you find a free area shouldering the scan field */
+ if (found_x == start_x)
+ break;
+ }
+
+ if (!best.a.tcm)
+ return -ENOSPC;
+done:
+ assign(area, best.a.p0.x, best.a.p0.y, best.a.p1.x, best.a.p1.y);
+ return 0;
+}
+
+/**
+ * Raster scan horizontally right to left from bottom to top to find a place
+ * for a 1D area of given size inside a scan field.
+ *
+ * @param num_slots size of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best
+ * position
+ * @param field area to scan (inclusive)
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_r2l_b2t_one_dim(struct tcm *tcm, u32 num_slots,
+ struct tcm_area *field, struct tcm_area *area)
+{
+ s32 found = 0;
+ s16 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area *p;
+
+ /* check scan area co-ordinates */
+ if (field->p0.y < field->p1.y)
+ return -EINVAL;
+
+ /**
+ * Currently we only support full width 1D scan field, which makes sense
+ * since 1D slot-ordering spans the full container width.
+ */
+ if (tcm->width != field->p0.x - field->p1.x + 1)
+ return -EINVAL;
+
+ /* check if allocation would fit in scan area */
+ if (num_slots > tcm->width * LEN(field->p0.y, field->p1.y))
+ return -ENOSPC;
+
+ x = field->p0.x;
+ y = field->p0.y;
+
+ /* find num_slots consecutive free slots to the left */
+ while (found < num_slots) {
+ if (y < 0)
+ return -ENOSPC;
+
+ /* remember bottom-right corner */
+ if (found == 0) {
+ area->p1.x = x;
+ area->p1.y = y;
+ }
+
+ /* skip busy regions */
+ p = pvt->map[x][y];
+ if (p) {
+ /* move to left of 2D areas, top left of 1D */
+ x = p->p0.x;
+ if (!p->is2d)
+ y = p->p0.y;
+
+ /* start over */
+ found = 0;
+ } else {
+ /* count consecutive free slots */
+ found++;
+ if (found == num_slots)
+ break;
+ }
+
+ /* move to the left */
+ if (x == 0)
+ y--;
+ x = (x ? : tcm->width) - 1;
+
+ }
+
+ /* set top-left corner */
+ area->p0.x = x;
+ area->p0.y = y;
+ return 0;
+}
+
+/**
+ * Find a place for a 2D area of given size inside a scan field based on its
+ * alignment needs.
+ *
+ * @param w width of desired area
+ * @param h height of desired area
+ * @param align desired area alignment
+ * @param area pointer to the area that will be set to the best position
+ *
+ * @return 0 on success, non-0 error value on failure.
+ */
+static s32 scan_areas_and_find_fit(struct tcm *tcm, u16 w, u16 h, u16 align,
+ struct tcm_area *area)
+{
+ s32 ret = 0;
+ struct tcm_area field = {0};
+ u16 boundary_x, boundary_y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ if (align > 1) {
+ /* prefer top-left corner */
+ boundary_x = pvt->div_pt.x - 1;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > pvt->div_pt.x)
+ boundary_x = tcm->width - 1;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, 0, 0, boundary_x, boundary_y);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != tcm->width - 1 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, 0, 0, tcm->width - 1, tcm->height - 1);
+ ret = scan_l2r_t2b(tcm, w, h, align, &field, area);
+ }
+ } else if (align == 1) {
+ /* prefer top-right corner */
+ boundary_x = pvt->div_pt.x;
+ boundary_y = pvt->div_pt.y - 1;
+
+ /* expand width and height if needed */
+ if (w > (tcm->width - pvt->div_pt.x))
+ boundary_x = 0;
+ if (h > pvt->div_pt.y)
+ boundary_y = tcm->height - 1;
+
+ assign(&field, tcm->width - 1, 0, boundary_x, boundary_y);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field, area);
+
+ /* scan whole container if failed, but do not scan 2x */
+ if (ret != 0 && (boundary_x != 0 ||
+ boundary_y != tcm->height - 1)) {
+ /* scan the entire container if nothing found */
+ assign(&field, tcm->width - 1, 0, 0, tcm->height - 1);
+ ret = scan_r2l_t2b(tcm, w, h, align, &field,
+ area);
+ }
+ }
+
+ return ret;
+}
+
+/* check if an entire area is free */
+static s32 is_area_free(struct tcm_area ***map, u16 x0, u16 y0, u16 w, u16 h)
+{
+ u16 x = 0, y = 0;
+ for (y = y0; y < y0 + h; y++) {
+ for (x = x0; x < x0 + w; x++) {
+ if (map[x][y])
+ return false;
+ }
+ }
+ return true;
+}
+
+/* fills an area with a parent tcm_area */
+static void fill_area(struct tcm *tcm, struct tcm_area *area,
+ struct tcm_area *parent)
+{
+ s32 x, y;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+ struct tcm_area a, a_;
+
+ /* set area's tcm; otherwise, enumerator considers it invalid */
+ area->tcm = tcm;
+
+ tcm_for_each_slice(a, *area, a_) {
+ for (x = a.p0.x; x <= a.p1.x; ++x)
+ for (y = a.p0.y; y <= a.p1.y; ++y)
+ pvt->map[x][y] = parent;
+
+ }
+}
+
+/**
+ * Compares a candidate area to the current best area, and if it is a better
+ * fit, it updates the best to this one.
+ *
+ * @param x0, y0, w, h top, left, width, height of candidate area
+ * @param field scan field
+ * @param criteria scan criteria
+ * @param best best candidate and its scores
+ *
+ * @return 1 (true) if the candidate area is known to be the final best, so no
+ * more searching should be performed
+ */
+static s32 update_candidate(struct tcm *tcm, u16 x0, u16 y0, u16 w, u16 h,
+ struct tcm_area *field, s32 criteria,
+ struct score *best)
+{
+ struct score me; /* score for area */
+
+ /*
+ * NOTE: For horizontal bias we always give the first found, because our
+ * scan is horizontal-raster-based and the first candidate will always
+ * have the horizontal bias.
+ */
+ bool first = criteria & CR_BIAS_HORIZONTAL;
+
+ assign(&me.a, x0, y0, x0 + w - 1, y0 + h - 1);
+
+ /* calculate score for current candidate */
+ if (!first) {
+ get_neighbor_stats(tcm, &me.a, &me.n);
+ me.neighs = me.n.edge + me.n.busy;
+ get_nearness_factor(field, &me.a, &me.f);
+ }
+
+ /* the 1st candidate is always the best */
+ if (!best->a.tcm)
+ goto better;
+
+ BUG_ON(first);
+
+ /* diagonal balance check */
+ if ((criteria & CR_DIAGONAL_BALANCE) &&
+ best->neighs <= me.neighs &&
+ (best->neighs < me.neighs ||
+ /* this implies that neighs and occupied match */
+ best->n.busy < me.n.busy ||
+ (best->n.busy == me.n.busy &&
+ /* check the nearness factor */
+ best->f.x + best->f.y > me.f.x + me.f.y)))
+ goto better;
+
+ /* not better, keep going */
+ return 0;
+
+better:
+ /* save current area as best */
+ memcpy(best, &me, sizeof(me));
+ best->a.tcm = tcm;
+ return first;
+}
+
+/**
+ * Calculate the nearness factor of an area in a search field. The nearness
+ * factor is smaller if the area is closer to the search origin.
+ */
+static void get_nearness_factor(struct tcm_area *field, struct tcm_area *area,
+ struct nearness_factor *nf)
+{
+ /**
+ * Using signed math as field coordinates may be reversed if
+ * search direction is right-to-left or bottom-to-top.
+ */
+ nf->x = (s32)(area->p0.x - field->p0.x) * 1000 /
+ (field->p1.x - field->p0.x);
+ nf->y = (s32)(area->p0.y - field->p0.y) * 1000 /
+ (field->p1.y - field->p0.y);
+}
+
+/* get neighbor statistics */
+static void get_neighbor_stats(struct tcm *tcm, struct tcm_area *area,
+ struct neighbor_stats *stat)
+{
+ s16 x = 0, y = 0;
+ struct sita_pvt *pvt = (struct sita_pvt *)tcm->pvt;
+
+ /* Clearing any exisiting values */
+ memset(stat, 0, sizeof(*stat));
+
+ /* process top & bottom edges */
+ for (x = area->p0.x; x <= area->p1.x; x++) {
+ if (area->p0.y == 0)
+ stat->edge++;
+ else if (pvt->map[x][area->p0.y - 1])
+ stat->busy++;
+
+ if (area->p1.y == tcm->height - 1)
+ stat->edge++;
+ else if (pvt->map[x][area->p1.y + 1])
+ stat->busy++;
+ }
+
+ /* process left & right edges */
+ for (y = area->p0.y; y <= area->p1.y; ++y) {
+ if (area->p0.x == 0)
+ stat->edge++;
+ else if (pvt->map[area->p0.x - 1][y])
+ stat->busy++;
+
+ if (area->p1.x == tcm->width - 1)
+ stat->edge++;
+ else if (pvt->map[area->p1.x + 1][y])
+ stat->busy++;
+ }
+}
diff --git a/drivers/staging/omapdrm/tcm-sita.h b/drivers/staging/omapdrm/tcm-sita.h
new file mode 100644
index 0000000..0444f86
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm-sita.h
@@ -0,0 +1,95 @@
+/*
+ * tcm_sita.h
+ *
+ * SImple Tiler Allocator (SiTA) private structures.
+ *
+ * Author: Ravi Ramachandra <r.ramachandra@ti.com>
+ *
+ * Copyright (C) 2009-2011 Texas Instruments, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _TCM_SITA_H
+#define _TCM_SITA_H
+
+#include "tcm.h"
+
+/* length between two coordinates */
+#define LEN(a, b) ((a) > (b) ? (a) - (b) + 1 : (b) - (a) + 1)
+
+enum criteria {
+ CR_MAX_NEIGHS = 0x01,
+ CR_FIRST_FOUND = 0x10,
+ CR_BIAS_HORIZONTAL = 0x20,
+ CR_BIAS_VERTICAL = 0x40,
+ CR_DIAGONAL_BALANCE = 0x80
+};
+
+/* nearness to the beginning of the search field from 0 to 1000 */
+struct nearness_factor {
+ s32 x;
+ s32 y;
+};
+
+/*
+ * Statistics on immediately neighboring slots. Edge is the number of
+ * border segments that are also border segments of the scan field. Busy
+ * refers to the number of neighbors that are occupied.
+ */
+struct neighbor_stats {
+ u16 edge;
+ u16 busy;
+};
+
+/* structure to keep the score of a potential allocation */
+struct score {
+ struct nearness_factor f;
+ struct neighbor_stats n;
+ struct tcm_area a;
+ u16 neighs; /* number of busy neighbors */
+};
+
+struct sita_pvt {
+ spinlock_t lock; /* spinlock to protect access */
+ struct tcm_pt div_pt; /* divider point splitting container */
+ struct tcm_area ***map; /* pointers to the parent area for each slot */
+};
+
+/* assign coordinates to area */
+static inline
+void assign(struct tcm_area *a, u16 x0, u16 y0, u16 x1, u16 y1)
+{
+ a->p0.x = x0;
+ a->p0.y = y0;
+ a->p1.x = x1;
+ a->p1.y = y1;
+}
+
+#endif
diff --git a/drivers/staging/omapdrm/tcm.h b/drivers/staging/omapdrm/tcm.h
new file mode 100644
index 0000000..d273e3e
--- /dev/null
+++ b/drivers/staging/omapdrm/tcm.h
@@ -0,0 +1,326 @@
+/*
+ * tcm.h
+ *
+ * TILER container manager specification and support functions for TI
+ * TILER driver.
+ *
+ * Author: Lajos Molnar <molnar@ti.com>
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Texas Instruments Incorporated nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+ * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+ * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+ * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TCM_H
+#define TCM_H
+
+struct tcm;
+
+/* point */
+struct tcm_pt {
+ u16 x;
+ u16 y;
+};
+
+/* 1d or 2d area */
+struct tcm_area {
+ bool is2d; /* whether area is 1d or 2d */
+ struct tcm *tcm; /* parent */
+ struct tcm_pt p0;
+ struct tcm_pt p1;
+};
+
+struct tcm {
+ u16 width, height; /* container dimensions */
+ int lut_id; /* Lookup table identifier */
+
+ /* 'pvt' structure shall contain any tcm details (attr) along with
+ linked list of allocated areas and mutex for mutually exclusive access
+ to the list. It may also contain copies of width and height to notice
+ any changes to the publicly available width and height fields. */
+ void *pvt;
+
+ /* function table */
+ s32 (*reserve_2d)(struct tcm *tcm, u16 height, u16 width, u8 align,
+ struct tcm_area *area);
+ s32 (*reserve_1d)(struct tcm *tcm, u32 slots, struct tcm_area *area);
+ s32 (*free) (struct tcm *tcm, struct tcm_area *area);
+ void (*deinit) (struct tcm *tcm);
+};
+
+/*=============================================================================
+ BASIC TILER CONTAINER MANAGER INTERFACE
+=============================================================================*/
+
+/*
+ * NOTE:
+ *
+ * Since some basic parameter checking is done outside the TCM algorithms,
+ * TCM implementation do NOT have to check the following:
+ *
+ * area pointer is NULL
+ * width and height fits within container
+ * number of pages is more than the size of the container
+ *
+ */
+
+struct tcm *sita_init(u16 width, u16 height, struct tcm_pt *attr);
+
+
+/**
+ * Deinitialize tiler container manager.
+ *
+ * @param tcm Pointer to container manager.
+ *
+ * @return 0 on success, non-0 error value on error. The call
+ * should free as much memory as possible and meaningful
+ * even on failure. Some error codes: -ENODEV: invalid
+ * manager.
+ */
+static inline void tcm_deinit(struct tcm *tcm)
+{
+ if (tcm)
+ tcm->deinit(tcm);
+}
+
+/**
+ * Reserves a 2D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param height Height(in pages) of area to be reserved.
+ * @param width Width(in pages) of area to be reserved.
+ * @param align Alignment requirement for top-left corner of area. Not
+ * all values may be supported by the container manager,
+ * but it must support 0 (1), 32 and 64.
+ * 0 value is equivalent to 1.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_2d(struct tcm *tcm, u16 width, u16 height,
+ u16 align, struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || width == 0 || height == 0 ||
+ /* align must be a 2 power */
+ (align & (align - 1))) ? -EINVAL :
+ (height > tcm->height || width > tcm->width) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = true;
+ res = tcm->reserve_2d(tcm, height, width, align, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Reserves a 1D area in the container.
+ *
+ * @param tcm Pointer to container manager.
+ * @param slots Number of (contiguous) slots to reserve.
+ * @param area Pointer to where the reserved area should be stored.
+ *
+ * @return 0 on success. Non-0 error code on failure. Also,
+ * the tcm field of the area will be set to NULL on
+ * failure. Some error codes: -ENODEV: invalid manager,
+ * -EINVAL: invalid area, -ENOMEM: not enough space for
+ * allocation.
+ */
+static inline s32 tcm_reserve_1d(struct tcm *tcm, u32 slots,
+ struct tcm_area *area)
+{
+ /* perform rudimentary error checking */
+ s32 res = tcm == NULL ? -ENODEV :
+ (area == NULL || slots == 0) ? -EINVAL :
+ slots > (tcm->width * (u32) tcm->height) ? -ENOMEM : 0;
+
+ if (!res) {
+ area->is2d = false;
+ res = tcm->reserve_1d(tcm, slots, area);
+ area->tcm = res ? NULL : tcm;
+ }
+
+ return res;
+}
+
+/**
+ * Free a previously reserved area from the container.
+ *
+ * @param area Pointer to area reserved by a prior call to
+ * tcm_reserve_1d or tcm_reserve_2d call, whether
+ * it was successful or not. (Note: all fields of
+ * the structure must match.)
+ *
+ * @return 0 on success. Non-0 error code on failure. Also, the tcm
+ * field of the area is set to NULL on success to avoid subsequent
+ * freeing. This call will succeed even if supplying
+ * the area from a failed reserved call.
+ */
+static inline s32 tcm_free(struct tcm_area *area)
+{
+ s32 res = 0; /* free succeeds by default */
+
+ if (area && area->tcm) {
+ res = area->tcm->free(area->tcm, area);
+ if (res == 0)
+ area->tcm = NULL;
+ }
+
+ return res;
+}
+
+/*=============================================================================
+ HELPER FUNCTION FOR ANY TILER CONTAINER MANAGER
+=============================================================================*/
+
+/**
+ * This method slices off the topmost 2D slice from the parent area, and stores
+ * it in the 'slice' parameter. The 'parent' parameter will get modified to
+ * contain the remaining portion of the area. If the whole parent area can
+ * fit in a 2D slice, its tcm pointer is set to NULL to mark that it is no
+ * longer a valid area.
+ *
+ * @param parent Pointer to a VALID parent area that will get modified
+ * @param slice Pointer to the slice area that will get modified
+ */
+static inline void tcm_slice(struct tcm_area *parent, struct tcm_area *slice)
+{
+ *slice = *parent;
+
+ /* check if we need to slice */
+ if (slice->tcm && !slice->is2d &&
+ slice->p0.y != slice->p1.y &&
+ (slice->p0.x || (slice->p1.x != slice->tcm->width - 1))) {
+ /* set end point of slice (start always remains) */
+ slice->p1.x = slice->tcm->width - 1;
+ slice->p1.y = (slice->p0.x) ? slice->p0.y : slice->p1.y - 1;
+ /* adjust remaining area */
+ parent->p0.x = 0;
+ parent->p0.y = slice->p1.y + 1;
+ } else {
+ /* mark this as the last slice */
+ parent->tcm = NULL;
+ }
+}
+
+/* Verify if a tcm area is logically valid */
+static inline bool tcm_area_is_valid(struct tcm_area *area)
+{
+ return area && area->tcm &&
+ /* coordinate bounds */
+ area->p1.x < area->tcm->width &&
+ area->p1.y < area->tcm->height &&
+ area->p0.y <= area->p1.y &&
+ /* 1D coordinate relationship + p0.x check */
+ ((!area->is2d &&
+ area->p0.x < area->tcm->width &&
+ area->p0.x + area->p0.y * area->tcm->width <=
+ area->p1.x + area->p1.y * area->tcm->width) ||
+ /* 2D coordinate relationship */
+ (area->is2d &&
+ area->p0.x <= area->p1.x));
+}
+
+/* see if a coordinate is within an area */
+static inline bool __tcm_is_in(struct tcm_pt *p, struct tcm_area *a)
+{
+ u16 i;
+
+ if (a->is2d) {
+ return p->x >= a->p0.x && p->x <= a->p1.x &&
+ p->y >= a->p0.y && p->y <= a->p1.y;
+ } else {
+ i = p->x + p->y * a->tcm->width;
+ return i >= a->p0.x + a->p0.y * a->tcm->width &&
+ i <= a->p1.x + a->p1.y * a->tcm->width;
+ }
+}
+
+/* calculate area width */
+static inline u16 __tcm_area_width(struct tcm_area *area)
+{
+ return area->p1.x - area->p0.x + 1;
+}
+
+/* calculate area height */
+static inline u16 __tcm_area_height(struct tcm_area *area)
+{
+ return area->p1.y - area->p0.y + 1;
+}
+
+/* calculate number of slots in an area */
+static inline u16 __tcm_sizeof(struct tcm_area *area)
+{
+ return area->is2d ?
+ __tcm_area_width(area) * __tcm_area_height(area) :
+ (area->p1.x - area->p0.x + 1) + (area->p1.y - area->p0.y) *
+ area->tcm->width;
+}
+#define tcm_sizeof(area) __tcm_sizeof(&(area))
+#define tcm_awidth(area) __tcm_area_width(&(area))
+#define tcm_aheight(area) __tcm_area_height(&(area))
+#define tcm_is_in(pt, area) __tcm_is_in(&(pt), &(area))
+
+/* limit a 1D area to the first N pages */
+static inline s32 tcm_1d_limit(struct tcm_area *a, u32 num_pg)
+{
+ if (__tcm_sizeof(a) < num_pg)
+ return -ENOMEM;
+ if (!num_pg)
+ return -EINVAL;
+
+ a->p1.x = (a->p0.x + num_pg - 1) % a->tcm->width;
+ a->p1.y = a->p0.y + ((a->p0.x + num_pg - 1) / a->tcm->width);
+ return 0;
+}
+
+/**
+ * Iterate through 2D slices of a valid area. Behaves
+ * syntactically as a for(;;) statement.
+ *
+ * @param var Name of a local variable of type 'struct
+ * tcm_area *' that will get modified to
+ * contain each slice.
+ * @param area Pointer to the VALID parent area. This
+ * structure will not get modified
+ * throughout the loop.
+ *
+ */
+#define tcm_for_each_slice(var, area, safe) \
+ for (safe = area, \
+ tcm_slice(&safe, &var); \
+ var.tcm; tcm_slice(&safe, &var))
+
+#endif
diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
index 683657c..d77b21f 100644
--- a/drivers/staging/phison/phison.c
+++ b/drivers/staging/phison/phison.c
@@ -70,7 +70,7 @@ static int phison_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
static DEFINE_PCI_DEVICE_TABLE(phison_pci_tbl) = {
- { PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000, PCI_ANY_ID, PCI_ANY_ID,
+ { PCI_DEVICE(PCI_VENDOR_ID_PHISON, PCI_DEVICE_ID_PS5000),
PCI_CLASS_STORAGE_IDE << 8, 0xffff00, 0 },
{ 0, },
};
diff --git a/drivers/staging/pohmelfs/Kconfig b/drivers/staging/pohmelfs/Kconfig
deleted file mode 100644
index 8d53b1a..0000000
--- a/drivers/staging/pohmelfs/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
-config POHMELFS
- tristate "POHMELFS filesystem support"
- depends on NET
- select CONNECTOR
- select CRYPTO
- select CRYPTO_BLKCIPHER
- select CRYPTO_HMAC
- help
- POHMELFS stands for Parallel Optimized Host Message Exchange Layered
- File System. This is a network filesystem which supports coherent
- caching of data and metadata on clients.
-
-config POHMELFS_DEBUG
- bool "POHMELFS debugging"
- depends on POHMELFS
- default n
- help
- Turns on excessive POHMELFS debugging facilities.
- You usually do not want to slow things down noticeably and get really
- lots of kernel messages in syslog.
diff --git a/drivers/staging/pohmelfs/Makefile b/drivers/staging/pohmelfs/Makefile
deleted file mode 100644
index 196561c..0000000
--- a/drivers/staging/pohmelfs/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_POHMELFS) += pohmelfs.o
-
-pohmelfs-y := inode.o config.o dir.o net.o path_entry.o trans.o crypto.o lock.o mcache.o
diff --git a/drivers/staging/pohmelfs/config.c b/drivers/staging/pohmelfs/config.c
deleted file mode 100644
index b6c42cb..0000000
--- a/drivers/staging/pohmelfs/config.c
+++ /dev/null
@@ -1,611 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/connector.h>
-#include <linux/crypto.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/string.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-
-#include "netfs.h"
-
-/*
- * Global configuration list.
- * Each client can be asked to get one of them.
- *
- * Allows to provide remote server address (ipv4/v6/whatever), port
- * and so on via kernel connector.
- */
-
-static struct cb_id pohmelfs_cn_id = {.idx = POHMELFS_CN_IDX, .val = POHMELFS_CN_VAL};
-static LIST_HEAD(pohmelfs_config_list);
-static DEFINE_MUTEX(pohmelfs_config_lock);
-
-static inline int pohmelfs_config_eql(struct pohmelfs_ctl *sc, struct pohmelfs_ctl *ctl)
-{
- if (sc->idx == ctl->idx && sc->type == ctl->type &&
- sc->proto == ctl->proto &&
- sc->addrlen == ctl->addrlen &&
- !memcmp(&sc->addr, &ctl->addr, ctl->addrlen))
- return 1;
-
- return 0;
-}
-
-static struct pohmelfs_config_group *pohmelfs_find_config_group(unsigned int idx)
-{
- struct pohmelfs_config_group *g, *group = NULL;
-
- list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
- if (g->idx == idx) {
- group = g;
- break;
- }
- }
-
- return group;
-}
-
-static struct pohmelfs_config_group *pohmelfs_find_create_config_group(unsigned int idx)
-{
- struct pohmelfs_config_group *g;
-
- g = pohmelfs_find_config_group(idx);
- if (g)
- return g;
-
- g = kzalloc(sizeof(struct pohmelfs_config_group), GFP_KERNEL);
- if (!g)
- return NULL;
-
- INIT_LIST_HEAD(&g->config_list);
- g->idx = idx;
- g->num_entry = 0;
-
- list_add_tail(&g->group_entry, &pohmelfs_config_list);
-
- return g;
-}
-
-static inline void pohmelfs_insert_config_entry(struct pohmelfs_sb *psb, struct pohmelfs_config *dst)
-{
- struct pohmelfs_config *tmp;
-
- INIT_LIST_HEAD(&dst->config_entry);
-
- list_for_each_entry(tmp, &psb->state_list, config_entry) {
- if (dst->state.ctl.prio > tmp->state.ctl.prio)
- list_add_tail(&dst->config_entry, &tmp->config_entry);
- }
- if (list_empty(&dst->config_entry))
- list_add_tail(&dst->config_entry, &psb->state_list);
-}
-
-static int pohmelfs_move_config_entry(struct pohmelfs_sb *psb,
- struct pohmelfs_config *dst, struct pohmelfs_config *new)
-{
- if ((dst->state.ctl.prio == new->state.ctl.prio) &&
- (dst->state.ctl.perm == new->state.ctl.perm))
- return 0;
-
- dprintk("%s: dst: prio: %d, perm: %x, new: prio: %d, perm: %d.\n",
- __func__, dst->state.ctl.prio, dst->state.ctl.perm,
- new->state.ctl.prio, new->state.ctl.perm);
- dst->state.ctl.prio = new->state.ctl.prio;
- dst->state.ctl.perm = new->state.ctl.perm;
-
- list_del_init(&dst->config_entry);
- pohmelfs_insert_config_entry(psb, dst);
- return 0;
-}
-
-/*
- * pohmelfs_copy_config() is used to copy new state configs from the
- * config group (controlled by the netlink messages) into the superblock.
- * This happens either at startup time where no transactions can access
- * the list of the configs (and thus list of the network states), or at
- * run-time, where it is protected by the psb->state_lock.
- */
-int pohmelfs_copy_config(struct pohmelfs_sb *psb)
-{
- struct pohmelfs_config_group *g;
- struct pohmelfs_config *c, *dst;
- int err = -ENODEV;
-
- mutex_lock(&pohmelfs_config_lock);
-
- g = pohmelfs_find_config_group(psb->idx);
- if (!g)
- goto out_unlock;
-
- /*
- * Run over all entries in given config group and try to create and
- * initialize those, which do not exist in superblock list.
- * Skip all existing entries.
- */
-
- list_for_each_entry(c, &g->config_list, config_entry) {
- err = 0;
- list_for_each_entry(dst, &psb->state_list, config_entry) {
- if (pohmelfs_config_eql(&dst->state.ctl, &c->state.ctl)) {
- err = pohmelfs_move_config_entry(psb, dst, c);
- if (!err)
- err = -EEXIST;
- break;
- }
- }
-
- if (err)
- continue;
-
- dst = kzalloc(sizeof(struct pohmelfs_config), GFP_KERNEL);
- if (!dst) {
- err = -ENOMEM;
- break;
- }
-
- memcpy(&dst->state.ctl, &c->state.ctl, sizeof(struct pohmelfs_ctl));
-
- pohmelfs_insert_config_entry(psb, dst);
-
- err = pohmelfs_state_init_one(psb, dst);
- if (err) {
- list_del(&dst->config_entry);
- kfree(dst);
- }
-
- err = 0;
- }
-
-out_unlock:
- mutex_unlock(&pohmelfs_config_lock);
-
- return err;
-}
-
-int pohmelfs_copy_crypto(struct pohmelfs_sb *psb)
-{
- struct pohmelfs_config_group *g;
- int err = -ENOENT;
-
- mutex_lock(&pohmelfs_config_lock);
- g = pohmelfs_find_config_group(psb->idx);
- if (!g)
- goto err_out_exit;
-
- if (g->hash_string) {
- err = -ENOMEM;
- psb->hash_string = kstrdup(g->hash_string, GFP_KERNEL);
- if (!psb->hash_string)
- goto err_out_exit;
- psb->hash_strlen = g->hash_strlen;
- }
-
- if (g->cipher_string) {
- psb->cipher_string = kstrdup(g->cipher_string, GFP_KERNEL);
- if (!psb->cipher_string)
- goto err_out_free_hash_string;
- psb->cipher_strlen = g->cipher_strlen;
- }
-
- if (g->hash_keysize) {
- psb->hash_key = kmemdup(g->hash_key, g->hash_keysize,
- GFP_KERNEL);
- if (!psb->hash_key)
- goto err_out_free_cipher_string;
- psb->hash_keysize = g->hash_keysize;
- }
-
- if (g->cipher_keysize) {
- psb->cipher_key = kmemdup(g->cipher_key, g->cipher_keysize,
- GFP_KERNEL);
- if (!psb->cipher_key)
- goto err_out_free_hash;
- psb->cipher_keysize = g->cipher_keysize;
- }
-
- mutex_unlock(&pohmelfs_config_lock);
-
- return 0;
-
-err_out_free_hash:
- kfree(psb->hash_key);
-err_out_free_cipher_string:
- kfree(psb->cipher_string);
-err_out_free_hash_string:
- kfree(psb->hash_string);
-err_out_exit:
- mutex_unlock(&pohmelfs_config_lock);
- return err;
-}
-
-static int pohmelfs_send_reply(int err, int msg_num, int action, struct cn_msg *msg, struct pohmelfs_ctl *ctl)
-{
- struct pohmelfs_cn_ack *ack;
-
- ack = kzalloc(sizeof(struct pohmelfs_cn_ack), GFP_KERNEL);
- if (!ack)
- return -ENOMEM;
-
- memcpy(&ack->msg, msg, sizeof(struct cn_msg));
-
- if (action == POHMELFS_CTLINFO_ACK)
- memcpy(&ack->ctl, ctl, sizeof(struct pohmelfs_ctl));
-
- ack->msg.len = sizeof(struct pohmelfs_cn_ack) - sizeof(struct cn_msg);
- ack->msg.ack = msg->ack + 1;
- ack->error = err;
- ack->msg_num = msg_num;
-
- cn_netlink_send(&ack->msg, 0, GFP_KERNEL);
- kfree(ack);
- return 0;
-}
-
-static int pohmelfs_cn_disp(struct cn_msg *msg)
-{
- struct pohmelfs_config_group *g;
- struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
- struct pohmelfs_config *c, *tmp;
- int err = 0, i = 1;
-
- if (msg->len != sizeof(struct pohmelfs_ctl))
- return -EBADMSG;
-
- mutex_lock(&pohmelfs_config_lock);
-
- g = pohmelfs_find_config_group(ctl->idx);
- if (!g) {
- pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL);
- goto out_unlock;
- }
-
- list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
- struct pohmelfs_ctl *sc = &c->state.ctl;
- if (pohmelfs_send_reply(err, g->num_entry - i, POHMELFS_CTLINFO_ACK, msg, sc)) {
- err = -ENOMEM;
- goto out_unlock;
- }
- i += 1;
- }
-
- out_unlock:
- mutex_unlock(&pohmelfs_config_lock);
- return err;
-}
-
-static int pohmelfs_cn_dump(struct cn_msg *msg)
-{
- struct pohmelfs_config_group *g;
- struct pohmelfs_config *c, *tmp;
- int err = 0, i = 1;
- int total_msg = 0;
-
- if (msg->len != sizeof(struct pohmelfs_ctl))
- return -EBADMSG;
-
- mutex_lock(&pohmelfs_config_lock);
-
- list_for_each_entry(g, &pohmelfs_config_list, group_entry)
- total_msg += g->num_entry;
- if (total_msg == 0) {
- if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
- err = -ENOMEM;
- goto out_unlock;
- }
-
- list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
- list_for_each_entry_safe(c, tmp, &g->config_list,
- config_entry) {
- struct pohmelfs_ctl *sc = &c->state.ctl;
- if (pohmelfs_send_reply(err, total_msg - i,
- POHMELFS_CTLINFO_ACK, msg,
- sc)) {
- err = -ENOMEM;
- goto out_unlock;
- }
- i += 1;
- }
- }
-
-out_unlock:
- mutex_unlock(&pohmelfs_config_lock);
- return err;
-}
-
-static int pohmelfs_cn_flush(struct cn_msg *msg)
-{
- struct pohmelfs_config_group *g;
- struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
- struct pohmelfs_config *c, *tmp;
- int err = 0;
-
- if (msg->len != sizeof(struct pohmelfs_ctl))
- return -EBADMSG;
-
- mutex_lock(&pohmelfs_config_lock);
-
- if (ctl->idx != POHMELFS_NULL_IDX) {
- g = pohmelfs_find_config_group(ctl->idx);
-
- if (!g)
- goto out_unlock;
-
- list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
- list_del(&c->config_entry);
- g->num_entry--;
- kfree(c);
- }
- } else {
- list_for_each_entry(g, &pohmelfs_config_list, group_entry) {
- list_for_each_entry_safe(c, tmp, &g->config_list,
- config_entry) {
- list_del(&c->config_entry);
- g->num_entry--;
- kfree(c);
- }
- }
- }
-
-out_unlock:
- mutex_unlock(&pohmelfs_config_lock);
- pohmelfs_cn_dump(msg);
-
- return err;
-}
-
-static int pohmelfs_modify_config(struct pohmelfs_ctl *old, struct pohmelfs_ctl *new)
-{
- old->perm = new->perm;
- old->prio = new->prio;
- return 0;
-}
-
-static int pohmelfs_cn_ctl(struct cn_msg *msg, int action)
-{
- struct pohmelfs_config_group *g;
- struct pohmelfs_ctl *ctl = (struct pohmelfs_ctl *)msg->data;
- struct pohmelfs_config *c, *tmp;
- int err = 0;
-
- if (msg->len != sizeof(struct pohmelfs_ctl))
- return -EBADMSG;
-
- mutex_lock(&pohmelfs_config_lock);
-
- g = pohmelfs_find_create_config_group(ctl->idx);
- if (!g) {
- err = -ENOMEM;
- goto out_unlock;
- }
-
- list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
- struct pohmelfs_ctl *sc = &c->state.ctl;
-
- if (pohmelfs_config_eql(sc, ctl)) {
- if (action == POHMELFS_FLAGS_ADD) {
- err = -EEXIST;
- goto out_unlock;
- } else if (action == POHMELFS_FLAGS_DEL) {
- list_del(&c->config_entry);
- g->num_entry--;
- kfree(c);
- goto out_unlock;
- } else if (action == POHMELFS_FLAGS_MODIFY) {
- err = pohmelfs_modify_config(sc, ctl);
- goto out_unlock;
- } else {
- err = -EEXIST;
- goto out_unlock;
- }
- }
- }
- if (action == POHMELFS_FLAGS_DEL) {
- err = -EBADMSG;
- goto out_unlock;
- }
-
- c = kzalloc(sizeof(struct pohmelfs_config), GFP_KERNEL);
- if (!c) {
- err = -ENOMEM;
- goto out_unlock;
- }
- memcpy(&c->state.ctl, ctl, sizeof(struct pohmelfs_ctl));
- g->num_entry++;
-
- list_add_tail(&c->config_entry, &g->config_list);
-
- out_unlock:
- mutex_unlock(&pohmelfs_config_lock);
- if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
- err = -ENOMEM;
-
- return err;
-}
-
-static int pohmelfs_crypto_hash_init(struct pohmelfs_config_group *g, struct pohmelfs_crypto *c)
-{
- char *algo = (char *)c->data;
- u8 *key = (u8 *)(algo + c->strlen);
-
- if (g->hash_string)
- return -EEXIST;
-
- g->hash_string = kstrdup(algo, GFP_KERNEL);
- if (!g->hash_string)
- return -ENOMEM;
- g->hash_strlen = c->strlen;
- g->hash_keysize = c->keysize;
-
- g->hash_key = kmemdup(key, c->keysize, GFP_KERNEL);
- if (!g->hash_key) {
- kfree(g->hash_string);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int pohmelfs_crypto_cipher_init(struct pohmelfs_config_group *g, struct pohmelfs_crypto *c)
-{
- char *algo = (char *)c->data;
- u8 *key = (u8 *)(algo + c->strlen);
-
- if (g->cipher_string)
- return -EEXIST;
-
- g->cipher_string = kstrdup(algo, GFP_KERNEL);
- if (!g->cipher_string)
- return -ENOMEM;
- g->cipher_strlen = c->strlen;
- g->cipher_keysize = c->keysize;
-
- g->cipher_key = kmemdup(key, c->keysize, GFP_KERNEL);
- if (!g->cipher_key) {
- kfree(g->cipher_string);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int pohmelfs_cn_crypto(struct cn_msg *msg)
-{
- struct pohmelfs_crypto *crypto = (struct pohmelfs_crypto *)msg->data;
- struct pohmelfs_config_group *g;
- int err = 0;
-
- dprintk("%s: idx: %u, strlen: %u, type: %u, keysize: %u, algo: %s.\n",
- __func__, crypto->idx, crypto->strlen, crypto->type,
- crypto->keysize, (char *)crypto->data);
-
- mutex_lock(&pohmelfs_config_lock);
- g = pohmelfs_find_create_config_group(crypto->idx);
- if (!g) {
- err = -ENOMEM;
- goto out_unlock;
- }
-
- switch (crypto->type) {
- case POHMELFS_CRYPTO_HASH:
- err = pohmelfs_crypto_hash_init(g, crypto);
- break;
- case POHMELFS_CRYPTO_CIPHER:
- err = pohmelfs_crypto_cipher_init(g, crypto);
- break;
- default:
- err = -ENOTSUPP;
- break;
- }
-
-out_unlock:
- mutex_unlock(&pohmelfs_config_lock);
- if (pohmelfs_send_reply(err, 0, POHMELFS_NOINFO_ACK, msg, NULL))
- err = -ENOMEM;
-
- return err;
-}
-
-static void pohmelfs_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
-{
- int err;
-
- if (!cap_raised(current_cap(), CAP_SYS_ADMIN))
- return;
-
- switch (msg->flags) {
- case POHMELFS_FLAGS_ADD:
- case POHMELFS_FLAGS_DEL:
- case POHMELFS_FLAGS_MODIFY:
- err = pohmelfs_cn_ctl(msg, msg->flags);
- break;
- case POHMELFS_FLAGS_FLUSH:
- err = pohmelfs_cn_flush(msg);
- break;
- case POHMELFS_FLAGS_SHOW:
- err = pohmelfs_cn_disp(msg);
- break;
- case POHMELFS_FLAGS_DUMP:
- err = pohmelfs_cn_dump(msg);
- break;
- case POHMELFS_FLAGS_CRYPTO:
- err = pohmelfs_cn_crypto(msg);
- break;
- default:
- err = -ENOSYS;
- break;
- }
-}
-
-int pohmelfs_config_check(struct pohmelfs_config *config, int idx)
-{
- struct pohmelfs_ctl *ctl = &config->state.ctl;
- struct pohmelfs_config *tmp;
- int err = -ENOENT;
- struct pohmelfs_ctl *sc;
- struct pohmelfs_config_group *g;
-
- mutex_lock(&pohmelfs_config_lock);
-
- g = pohmelfs_find_config_group(ctl->idx);
- if (g) {
- list_for_each_entry(tmp, &g->config_list, config_entry) {
- sc = &tmp->state.ctl;
-
- if (pohmelfs_config_eql(sc, ctl)) {
- err = 0;
- break;
- }
- }
- }
-
- mutex_unlock(&pohmelfs_config_lock);
-
- return err;
-}
-
-int __init pohmelfs_config_init(void)
-{
- /* XXX remove (void *) cast when vanilla connector got synced */
- return cn_add_callback(&pohmelfs_cn_id, "pohmelfs", (void *)pohmelfs_cn_callback);
-}
-
-void pohmelfs_config_exit(void)
-{
- struct pohmelfs_config *c, *tmp;
- struct pohmelfs_config_group *g, *gtmp;
-
- cn_del_callback(&pohmelfs_cn_id);
-
- mutex_lock(&pohmelfs_config_lock);
- list_for_each_entry_safe(g, gtmp, &pohmelfs_config_list, group_entry) {
- list_for_each_entry_safe(c, tmp, &g->config_list, config_entry) {
- list_del(&c->config_entry);
- kfree(c);
- }
-
- list_del(&g->group_entry);
-
- kfree(g->hash_string);
-
- kfree(g->cipher_string);
-
- kfree(g);
- }
- mutex_unlock(&pohmelfs_config_lock);
-}
diff --git a/drivers/staging/pohmelfs/crypto.c b/drivers/staging/pohmelfs/crypto.c
deleted file mode 100644
index ad92771..0000000
--- a/drivers/staging/pohmelfs/crypto.c
+++ /dev/null
@@ -1,878 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/crypto.h>
-#include <linux/highmem.h>
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-#include <linux/scatterlist.h>
-#include <linux/slab.h>
-
-#include "netfs.h"
-
-static struct crypto_hash *pohmelfs_init_hash(struct pohmelfs_sb *psb)
-{
- int err;
- struct crypto_hash *hash;
-
- hash = crypto_alloc_hash(psb->hash_string, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(hash)) {
- err = PTR_ERR(hash);
- dprintk("%s: idx: %u: failed to allocate hash '%s', err: %d.\n",
- __func__, psb->idx, psb->hash_string, err);
- goto err_out_exit;
- }
-
- psb->crypto_attached_size = crypto_hash_digestsize(hash);
-
- if (!psb->hash_keysize)
- return hash;
-
- err = crypto_hash_setkey(hash, psb->hash_key, psb->hash_keysize);
- if (err) {
- dprintk("%s: idx: %u: failed to set key for hash '%s', err: %d.\n",
- __func__, psb->idx, psb->hash_string, err);
- goto err_out_free;
- }
-
- return hash;
-
-err_out_free:
- crypto_free_hash(hash);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-static struct crypto_ablkcipher *pohmelfs_init_cipher(struct pohmelfs_sb *psb)
-{
- int err = -EINVAL;
- struct crypto_ablkcipher *cipher;
-
- if (!psb->cipher_keysize)
- goto err_out_exit;
-
- cipher = crypto_alloc_ablkcipher(psb->cipher_string, 0, 0);
- if (IS_ERR(cipher)) {
- err = PTR_ERR(cipher);
- dprintk("%s: idx: %u: failed to allocate cipher '%s', err: %d.\n",
- __func__, psb->idx, psb->cipher_string, err);
- goto err_out_exit;
- }
-
- crypto_ablkcipher_clear_flags(cipher, ~0);
-
- err = crypto_ablkcipher_setkey(cipher, psb->cipher_key, psb->cipher_keysize);
- if (err) {
- dprintk("%s: idx: %u: failed to set key for cipher '%s', err: %d.\n",
- __func__, psb->idx, psb->cipher_string, err);
- goto err_out_free;
- }
-
- return cipher;
-
-err_out_free:
- crypto_free_ablkcipher(cipher);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-int pohmelfs_crypto_engine_init(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb)
-{
- int err;
-
- e->page_num = 0;
-
- e->size = PAGE_SIZE;
- e->data = kmalloc(e->size, GFP_KERNEL);
- if (!e->data) {
- err = -ENOMEM;
- goto err_out_exit;
- }
-
- if (psb->hash_string) {
- e->hash = pohmelfs_init_hash(psb);
- if (IS_ERR(e->hash)) {
- err = PTR_ERR(e->hash);
- e->hash = NULL;
- goto err_out_free;
- }
- }
-
- if (psb->cipher_string) {
- e->cipher = pohmelfs_init_cipher(psb);
- if (IS_ERR(e->cipher)) {
- err = PTR_ERR(e->cipher);
- e->cipher = NULL;
- goto err_out_free_hash;
- }
- }
-
- return 0;
-
-err_out_free_hash:
- crypto_free_hash(e->hash);
-err_out_free:
- kfree(e->data);
-err_out_exit:
- return err;
-}
-
-void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e)
-{
- crypto_free_hash(e->hash);
- crypto_free_ablkcipher(e->cipher);
- kfree(e->data);
-}
-
-static void pohmelfs_crypto_complete(struct crypto_async_request *req, int err)
-{
- struct pohmelfs_crypto_completion *c = req->data;
-
- if (err == -EINPROGRESS)
- return;
-
- dprintk("%s: req: %p, err: %d.\n", __func__, req, err);
- c->error = err;
- complete(&c->complete);
-}
-
-static int pohmelfs_crypto_process(struct ablkcipher_request *req,
- struct scatterlist *sg_dst, struct scatterlist *sg_src,
- void *iv, int enc, unsigned long timeout)
-{
- struct pohmelfs_crypto_completion complete;
- int err;
-
- init_completion(&complete.complete);
- complete.error = -EINPROGRESS;
-
- ablkcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- pohmelfs_crypto_complete, &complete);
-
- ablkcipher_request_set_crypt(req, sg_src, sg_dst, sg_src->length, iv);
-
- if (enc)
- err = crypto_ablkcipher_encrypt(req);
- else
- err = crypto_ablkcipher_decrypt(req);
-
- switch (err) {
- case -EINPROGRESS:
- case -EBUSY:
- err = wait_for_completion_interruptible_timeout(&complete.complete,
- timeout);
- if (!err)
- err = -ETIMEDOUT;
- else if (err > 0)
- err = complete.error;
- break;
- default:
- break;
- }
-
- return err;
-}
-
-int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 cmd_iv,
- void *data, struct page *page, unsigned int size)
-{
- int err;
- struct scatterlist sg;
-
- if (!e->cipher && !e->hash)
- return 0;
-
- dprintk("%s: eng: %p, iv: %llx, data: %p, page: %p/%lu, size: %u.\n",
- __func__, e, cmd_iv, data, page, (page) ? page->index : 0, size);
-
- if (data) {
- sg_init_one(&sg, data, size);
- } else {
- sg_init_table(&sg, 1);
- sg_set_page(&sg, page, size, 0);
- }
-
- if (e->cipher) {
- struct ablkcipher_request *req = e->data + crypto_hash_digestsize(e->hash);
- u8 iv[32];
-
- memset(iv, 0, sizeof(iv));
- memcpy(iv, &cmd_iv, sizeof(cmd_iv));
-
- ablkcipher_request_set_tfm(req, e->cipher);
-
- err = pohmelfs_crypto_process(req, &sg, &sg, iv, 0, e->timeout);
- if (err)
- goto err_out_exit;
- }
-
- if (e->hash) {
- struct hash_desc desc;
- void *dst = e->data + e->size/2;
-
- desc.tfm = e->hash;
- desc.flags = 0;
-
- err = crypto_hash_init(&desc);
- if (err)
- goto err_out_exit;
-
- err = crypto_hash_update(&desc, &sg, size);
- if (err)
- goto err_out_exit;
-
- err = crypto_hash_final(&desc, dst);
- if (err)
- goto err_out_exit;
-
- err = !!memcmp(dst, e->data, crypto_hash_digestsize(e->hash));
-
- if (err) {
-#ifdef CONFIG_POHMELFS_DEBUG
- unsigned int i;
- unsigned char *recv = e->data, *calc = dst;
-
- dprintk("%s: eng: %p, hash: %p, cipher: %p: iv : %llx, hash mismatch (recv/calc): ",
- __func__, e, e->hash, e->cipher, cmd_iv);
- for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) {
-#if 0
- dprintka("%02x ", recv[i]);
- if (recv[i] != calc[i]) {
- dprintka("| calc byte: %02x.\n", calc[i]);
- break;
- }
-#else
- dprintka("%02x/%02x ", recv[i], calc[i]);
-#endif
- }
- dprintk("\n");
-#endif
- goto err_out_exit;
- } else {
- dprintk("%s: eng: %p, hash: %p, cipher: %p: hashes matched.\n",
- __func__, e, e->hash, e->cipher);
- }
- }
-
- dprintk("%s: eng: %p, size: %u, hash: %p, cipher: %p: completed.\n",
- __func__, e, e->size, e->hash, e->cipher);
-
- return 0;
-
-err_out_exit:
- dprintk("%s: eng: %p, hash: %p, cipher: %p: err: %d.\n",
- __func__, e, e->hash, e->cipher, err);
- return err;
-}
-
-static int pohmelfs_trans_iter(struct netfs_trans *t, struct pohmelfs_crypto_engine *e,
- int (*iterator) (struct pohmelfs_crypto_engine *e,
- struct scatterlist *dst,
- struct scatterlist *src))
-{
- void *data = t->iovec.iov_base + sizeof(struct netfs_cmd) + t->psb->crypto_attached_size;
- unsigned int size = t->iovec.iov_len - sizeof(struct netfs_cmd) - t->psb->crypto_attached_size;
- struct netfs_cmd *cmd = data;
- unsigned int sz, pages = t->attached_pages, i, csize, cmd_cmd, dpage_idx;
- struct scatterlist sg_src, sg_dst;
- int err;
-
- while (size) {
- cmd = data;
- cmd_cmd = __be16_to_cpu(cmd->cmd);
- csize = __be32_to_cpu(cmd->size);
- cmd->iv = __cpu_to_be64(e->iv);
-
- if (cmd_cmd == NETFS_READ_PAGES || cmd_cmd == NETFS_READ_PAGE)
- csize = __be16_to_cpu(cmd->ext);
-
- sz = csize + __be16_to_cpu(cmd->cpad) + sizeof(struct netfs_cmd);
-
- dprintk("%s: size: %u, sz: %u, cmd_size: %u, cmd_cpad: %u.\n",
- __func__, size, sz, __be32_to_cpu(cmd->size), __be16_to_cpu(cmd->cpad));
-
- data += sz;
- size -= sz;
-
- sg_init_one(&sg_src, cmd->data, sz - sizeof(struct netfs_cmd));
- sg_init_one(&sg_dst, cmd->data, sz - sizeof(struct netfs_cmd));
-
- err = iterator(e, &sg_dst, &sg_src);
- if (err)
- return err;
- }
-
- if (!pages)
- return 0;
-
- dpage_idx = 0;
- for (i = 0; i < t->page_num; ++i) {
- struct page *page = t->pages[i];
- struct page *dpage = e->pages[dpage_idx];
-
- if (!page)
- continue;
-
- sg_init_table(&sg_src, 1);
- sg_init_table(&sg_dst, 1);
- sg_set_page(&sg_src, page, page_private(page), 0);
- sg_set_page(&sg_dst, dpage, page_private(page), 0);
-
- err = iterator(e, &sg_dst, &sg_src);
- if (err)
- return err;
-
- pages--;
- if (!pages)
- break;
- dpage_idx++;
- }
-
- return 0;
-}
-
-static int pohmelfs_encrypt_iterator(struct pohmelfs_crypto_engine *e,
- struct scatterlist *sg_dst, struct scatterlist *sg_src)
-{
- struct ablkcipher_request *req = e->data;
- u8 iv[32];
-
- memset(iv, 0, sizeof(iv));
-
- memcpy(iv, &e->iv, sizeof(e->iv));
-
- return pohmelfs_crypto_process(req, sg_dst, sg_src, iv, 1, e->timeout);
-}
-
-static int pohmelfs_encrypt(struct pohmelfs_crypto_thread *tc)
-{
- struct netfs_trans *t = tc->trans;
- struct pohmelfs_crypto_engine *e = &tc->eng;
- struct ablkcipher_request *req = e->data;
-
- memset(req, 0, sizeof(struct ablkcipher_request));
- ablkcipher_request_set_tfm(req, e->cipher);
-
- e->iv = pohmelfs_gen_iv(t);
-
- return pohmelfs_trans_iter(t, e, pohmelfs_encrypt_iterator);
-}
-
-static int pohmelfs_hash_iterator(struct pohmelfs_crypto_engine *e,
- struct scatterlist *sg_dst, struct scatterlist *sg_src)
-{
- return crypto_hash_update(e->data, sg_src, sg_src->length);
-}
-
-static int pohmelfs_hash(struct pohmelfs_crypto_thread *tc)
-{
- struct pohmelfs_crypto_engine *e = &tc->eng;
- struct hash_desc *desc = e->data;
- unsigned char *dst = tc->trans->iovec.iov_base + sizeof(struct netfs_cmd);
- int err;
-
- desc->tfm = e->hash;
- desc->flags = 0;
-
- err = crypto_hash_init(desc);
- if (err)
- return err;
-
- err = pohmelfs_trans_iter(tc->trans, e, pohmelfs_hash_iterator);
- if (err)
- return err;
-
- err = crypto_hash_final(desc, dst);
- if (err)
- return err;
-
- {
- unsigned int i;
- dprintk("%s: ", __func__);
- for (i = 0; i < tc->psb->crypto_attached_size; ++i)
- dprintka("%02x ", dst[i]);
- dprintka("\n");
- }
-
- return 0;
-}
-
-static void pohmelfs_crypto_pages_free(struct pohmelfs_crypto_engine *e)
-{
- unsigned int i;
-
- for (i = 0; i < e->page_num; ++i)
- __free_page(e->pages[i]);
- kfree(e->pages);
-}
-
-static int pohmelfs_crypto_pages_alloc(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb)
-{
- unsigned int i;
-
- e->pages = kmalloc(psb->trans_max_pages * sizeof(struct page *), GFP_KERNEL);
- if (!e->pages)
- return -ENOMEM;
-
- for (i = 0; i < psb->trans_max_pages; ++i) {
- e->pages[i] = alloc_page(GFP_KERNEL);
- if (!e->pages[i])
- break;
- }
-
- e->page_num = i;
- if (!e->page_num)
- goto err_out_free;
-
- return 0;
-
-err_out_free:
- kfree(e->pages);
- return -ENOMEM;
-}
-
-static void pohmelfs_sys_crypto_exit_one(struct pohmelfs_crypto_thread *t)
-{
- struct pohmelfs_sb *psb = t->psb;
-
- if (t->thread)
- kthread_stop(t->thread);
-
- mutex_lock(&psb->crypto_thread_lock);
- list_del(&t->thread_entry);
- psb->crypto_thread_num--;
- mutex_unlock(&psb->crypto_thread_lock);
-
- pohmelfs_crypto_engine_exit(&t->eng);
- pohmelfs_crypto_pages_free(&t->eng);
- kfree(t);
-}
-
-static int pohmelfs_crypto_finish(struct netfs_trans *t, struct pohmelfs_sb *psb, int err)
-{
- struct netfs_cmd *cmd = t->iovec.iov_base;
- netfs_convert_cmd(cmd);
-
- if (likely(!err))
- err = netfs_trans_finish_send(t, psb);
-
- t->result = err;
- netfs_trans_put(t);
-
- return err;
-}
-
-void pohmelfs_crypto_thread_make_ready(struct pohmelfs_crypto_thread *th)
-{
- struct pohmelfs_sb *psb = th->psb;
-
- th->page = NULL;
- th->trans = NULL;
-
- mutex_lock(&psb->crypto_thread_lock);
- list_move_tail(&th->thread_entry, &psb->crypto_ready_list);
- mutex_unlock(&psb->crypto_thread_lock);
- wake_up(&psb->wait);
-}
-
-static int pohmelfs_crypto_thread_trans(struct pohmelfs_crypto_thread *t)
-{
- struct netfs_trans *trans;
- int err = 0;
-
- trans = t->trans;
- trans->eng = NULL;
-
- if (t->eng.hash) {
- err = pohmelfs_hash(t);
- if (err)
- goto out_complete;
- }
-
- if (t->eng.cipher) {
- err = pohmelfs_encrypt(t);
- if (err)
- goto out_complete;
- trans->eng = &t->eng;
- }
-
-out_complete:
- t->page = NULL;
- t->trans = NULL;
-
- if (!trans->eng)
- pohmelfs_crypto_thread_make_ready(t);
-
- pohmelfs_crypto_finish(trans, t->psb, err);
- return err;
-}
-
-static int pohmelfs_crypto_thread_page(struct pohmelfs_crypto_thread *t)
-{
- struct pohmelfs_crypto_engine *e = &t->eng;
- struct page *page = t->page;
- int err;
-
- WARN_ON(!PageChecked(page));
-
- err = pohmelfs_crypto_process_input_data(e, e->iv, NULL, page, t->size);
- if (!err)
- SetPageUptodate(page);
- else
- SetPageError(page);
- unlock_page(page);
- page_cache_release(page);
-
- pohmelfs_crypto_thread_make_ready(t);
-
- return err;
-}
-
-static int pohmelfs_crypto_thread_func(void *data)
-{
- struct pohmelfs_crypto_thread *t = data;
-
- while (!kthread_should_stop()) {
- wait_event_interruptible(t->wait, kthread_should_stop() ||
- t->trans || t->page);
-
- if (kthread_should_stop())
- break;
-
- if (!t->trans && !t->page)
- continue;
-
- dprintk("%s: thread: %p, trans: %p, page: %p.\n",
- __func__, t, t->trans, t->page);
-
- if (t->trans)
- pohmelfs_crypto_thread_trans(t);
- else if (t->page)
- pohmelfs_crypto_thread_page(t);
- }
-
- return 0;
-}
-
-static void pohmelfs_crypto_flush(struct pohmelfs_sb *psb, struct list_head *head)
-{
- while (!list_empty(head)) {
- struct pohmelfs_crypto_thread *t = NULL;
-
- mutex_lock(&psb->crypto_thread_lock);
- if (!list_empty(head)) {
- t = list_first_entry(head, struct pohmelfs_crypto_thread, thread_entry);
- list_del_init(&t->thread_entry);
- }
- mutex_unlock(&psb->crypto_thread_lock);
-
- if (t)
- pohmelfs_sys_crypto_exit_one(t);
- }
-}
-
-static void pohmelfs_sys_crypto_exit(struct pohmelfs_sb *psb)
-{
- while (!list_empty(&psb->crypto_active_list) || !list_empty(&psb->crypto_ready_list)) {
- dprintk("%s: crypto_thread_num: %u.\n", __func__, psb->crypto_thread_num);
- pohmelfs_crypto_flush(psb, &psb->crypto_active_list);
- pohmelfs_crypto_flush(psb, &psb->crypto_ready_list);
- }
-}
-
-static int pohmelfs_sys_crypto_init(struct pohmelfs_sb *psb)
-{
- unsigned int i;
- struct pohmelfs_crypto_thread *t;
- struct pohmelfs_config *c;
- struct netfs_state *st;
- int err;
-
- list_for_each_entry(c, &psb->state_list, config_entry) {
- st = &c->state;
-
- err = pohmelfs_crypto_engine_init(&st->eng, psb);
- if (err)
- goto err_out_exit;
-
- dprintk("%s: st: %p, eng: %p, hash: %p, cipher: %p.\n",
- __func__, st, &st->eng, &st->eng.hash, &st->eng.cipher);
- }
-
- for (i = 0; i < psb->crypto_thread_num; ++i) {
- err = -ENOMEM;
- t = kzalloc(sizeof(struct pohmelfs_crypto_thread), GFP_KERNEL);
- if (!t)
- goto err_out_free_state_engines;
-
- init_waitqueue_head(&t->wait);
-
- t->psb = psb;
- t->trans = NULL;
- t->eng.thread = t;
-
- err = pohmelfs_crypto_engine_init(&t->eng, psb);
- if (err)
- goto err_out_free_state_engines;
-
- err = pohmelfs_crypto_pages_alloc(&t->eng, psb);
- if (err)
- goto err_out_free;
-
- t->thread = kthread_run(pohmelfs_crypto_thread_func, t,
- "pohmelfs-crypto-%d-%d", psb->idx, i);
- if (IS_ERR(t->thread)) {
- err = PTR_ERR(t->thread);
- t->thread = NULL;
- goto err_out_free;
- }
-
- if (t->eng.cipher)
- psb->crypto_align_size = crypto_ablkcipher_blocksize(t->eng.cipher);
-
- mutex_lock(&psb->crypto_thread_lock);
- list_add_tail(&t->thread_entry, &psb->crypto_ready_list);
- mutex_unlock(&psb->crypto_thread_lock);
- }
-
- psb->crypto_thread_num = i;
- return 0;
-
-err_out_free:
- pohmelfs_sys_crypto_exit_one(t);
-err_out_free_state_engines:
- list_for_each_entry(c, &psb->state_list, config_entry) {
- st = &c->state;
- pohmelfs_crypto_engine_exit(&st->eng);
- }
-err_out_exit:
- pohmelfs_sys_crypto_exit(psb);
- return err;
-}
-
-void pohmelfs_crypto_exit(struct pohmelfs_sb *psb)
-{
- pohmelfs_sys_crypto_exit(psb);
-
- kfree(psb->hash_string);
- kfree(psb->cipher_string);
-}
-
-static int pohmelfs_crypt_init_complete(struct page **pages, unsigned int page_num,
- void *private, int err)
-{
- struct pohmelfs_sb *psb = private;
-
- psb->flags = -err;
- dprintk("%s: err: %d.\n", __func__, err);
-
- wake_up(&psb->wait);
-
- return err;
-}
-
-static int pohmelfs_crypto_init_handshake(struct pohmelfs_sb *psb)
-{
- struct netfs_trans *t;
- struct netfs_crypto_capabilities *cap;
- struct netfs_cmd *cmd;
- char *str;
- int err = -ENOMEM, size;
-
- size = sizeof(struct netfs_crypto_capabilities) +
- psb->cipher_strlen + psb->hash_strlen + 2; /* 0 bytes */
-
- t = netfs_trans_alloc(psb, size, 0, 0);
- if (!t)
- goto err_out_exit;
-
- t->complete = pohmelfs_crypt_init_complete;
- t->private = psb;
-
- cmd = netfs_trans_current(t);
- cap = (struct netfs_crypto_capabilities *)(cmd + 1);
- str = (char *)(cap + 1);
-
- cmd->cmd = NETFS_CAPABILITIES;
- cmd->id = POHMELFS_CRYPTO_CAPABILITIES;
- cmd->size = size;
- cmd->start = 0;
- cmd->ext = 0;
- cmd->csize = 0;
-
- netfs_convert_cmd(cmd);
- netfs_trans_update(cmd, t, size);
-
- cap->hash_strlen = psb->hash_strlen;
- if (cap->hash_strlen) {
- sprintf(str, "%s", psb->hash_string);
- str += cap->hash_strlen;
- }
-
- cap->cipher_strlen = psb->cipher_strlen;
- cap->cipher_keysize = psb->cipher_keysize;
- if (cap->cipher_strlen)
- sprintf(str, "%s", psb->cipher_string);
-
- netfs_convert_crypto_capabilities(cap);
-
- psb->flags = ~0;
- err = netfs_trans_finish(t, psb);
- if (err)
- goto err_out_exit;
-
- err = wait_event_interruptible_timeout(psb->wait, (psb->flags != ~0),
- psb->wait_on_page_timeout);
- if (!err)
- err = -ETIMEDOUT;
- else if (err > 0)
- err = -psb->flags;
-
- if (!err)
- psb->perform_crypto = 1;
- psb->flags = 0;
-
- /*
- * At this point NETFS_CAPABILITIES response command
- * should setup superblock in a way, which is acceptable
- * for both client and server, so if server refuses connection,
- * it will send error in transaction response.
- */
-
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-int pohmelfs_crypto_init(struct pohmelfs_sb *psb)
-{
- int err;
-
- if (!psb->cipher_string && !psb->hash_string)
- return 0;
-
- err = pohmelfs_crypto_init_handshake(psb);
- if (err)
- return err;
-
- err = pohmelfs_sys_crypto_init(psb);
- if (err)
- return err;
-
- return 0;
-}
-
-static int pohmelfs_crypto_thread_get(struct pohmelfs_sb *psb,
- int (*action)(struct pohmelfs_crypto_thread *t, void *data), void *data)
-{
- struct pohmelfs_crypto_thread *t = NULL;
- int err;
-
- while (!t) {
- err = wait_event_interruptible_timeout(psb->wait,
- !list_empty(&psb->crypto_ready_list),
- psb->wait_on_page_timeout);
-
- t = NULL;
- err = 0;
- mutex_lock(&psb->crypto_thread_lock);
- if (!list_empty(&psb->crypto_ready_list)) {
- t = list_entry(psb->crypto_ready_list.prev,
- struct pohmelfs_crypto_thread,
- thread_entry);
-
- list_move_tail(&t->thread_entry,
- &psb->crypto_active_list);
-
- action(t, data);
- wake_up(&t->wait);
-
- }
- mutex_unlock(&psb->crypto_thread_lock);
- }
-
- return err;
-}
-
-static int pohmelfs_trans_crypt_action(struct pohmelfs_crypto_thread *t, void *data)
-{
- struct netfs_trans *trans = data;
-
- netfs_trans_get(trans);
- t->trans = trans;
-
- dprintk("%s: t: %p, gen: %u, thread: %p.\n", __func__, trans, trans->gen, t);
- return 0;
-}
-
-int pohmelfs_trans_crypt(struct netfs_trans *trans, struct pohmelfs_sb *psb)
-{
- if ((!psb->hash_string && !psb->cipher_string) || !psb->perform_crypto) {
- netfs_trans_get(trans);
- return pohmelfs_crypto_finish(trans, psb, 0);
- }
-
- return pohmelfs_crypto_thread_get(psb, pohmelfs_trans_crypt_action, trans);
-}
-
-struct pohmelfs_crypto_input_action_data {
- struct page *page;
- struct pohmelfs_crypto_engine *e;
- u64 iv;
- unsigned int size;
-};
-
-static int pohmelfs_crypt_input_page_action(struct pohmelfs_crypto_thread *t, void *data)
-{
- struct pohmelfs_crypto_input_action_data *act = data;
-
- memcpy(t->eng.data, act->e->data, t->psb->crypto_attached_size);
-
- t->size = act->size;
- t->eng.iv = act->iv;
-
- t->page = act->page;
- return 0;
-}
-
-int pohmelfs_crypto_process_input_page(struct pohmelfs_crypto_engine *e,
- struct page *page, unsigned int size, u64 iv)
-{
- struct inode *inode = page->mapping->host;
- struct pohmelfs_crypto_input_action_data act;
- int err = -ENOENT;
-
- act.page = page;
- act.e = e;
- act.size = size;
- act.iv = iv;
-
- err = pohmelfs_crypto_thread_get(POHMELFS_SB(inode->i_sb),
- pohmelfs_crypt_input_page_action, &act);
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- SetPageUptodate(page);
- page_cache_release(page);
-
- return err;
-}
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c
deleted file mode 100644
index 7598e77..0000000
--- a/drivers/staging/pohmelfs/dir.c
+++ /dev/null
@@ -1,1101 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/jhash.h>
-#include <linux/namei.h>
-#include <linux/slab.h>
-#include <linux/pagemap.h>
-
-#include "netfs.h"
-
-static int pohmelfs_cmp_hash(struct pohmelfs_name *n, u32 hash)
-{
- if (n->hash > hash)
- return -1;
- if (n->hash < hash)
- return 1;
-
- return 0;
-}
-
-static struct pohmelfs_name *pohmelfs_search_hash_unprecise(struct pohmelfs_inode *pi, u32 hash)
-{
- struct rb_node *n = pi->hash_root.rb_node;
- struct pohmelfs_name *tmp = NULL;
- int cmp;
-
- while (n) {
- tmp = rb_entry(n, struct pohmelfs_name, hash_node);
-
- cmp = pohmelfs_cmp_hash(tmp, hash);
- if (cmp < 0)
- n = n->rb_left;
- else if (cmp > 0)
- n = n->rb_right;
- else
- break;
-
- }
-
- return tmp;
-}
-
-struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash)
-{
- struct pohmelfs_name *tmp;
-
- tmp = pohmelfs_search_hash_unprecise(pi, hash);
- if (tmp && (tmp->hash == hash))
- return tmp;
-
- return NULL;
-}
-
-static void __pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
-{
- rb_erase(&node->hash_node, &parent->hash_root);
-}
-
-/*
- * Remove name cache entry from its caches and free it.
- */
-static void pohmelfs_name_free(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
-{
- __pohmelfs_name_del(parent, node);
- list_del(&node->sync_create_entry);
- kfree(node);
-}
-
-static struct pohmelfs_name *pohmelfs_insert_hash(struct pohmelfs_inode *pi,
- struct pohmelfs_name *new)
-{
- struct rb_node **n = &pi->hash_root.rb_node, *parent = NULL;
- struct pohmelfs_name *ret = NULL, *tmp;
- int cmp;
-
- while (*n) {
- parent = *n;
-
- tmp = rb_entry(parent, struct pohmelfs_name, hash_node);
-
- cmp = pohmelfs_cmp_hash(tmp, new->hash);
- if (cmp < 0)
- n = &parent->rb_left;
- else if (cmp > 0)
- n = &parent->rb_right;
- else {
- ret = tmp;
- break;
- }
- }
-
- if (ret) {
- printk("%s: exist: parent: %llu, ino: %llu, hash: %x, len: %u, data: '%s', "
- "new: ino: %llu, hash: %x, len: %u, data: '%s'.\n",
- __func__, pi->ino,
- ret->ino, ret->hash, ret->len, ret->data,
- new->ino, new->hash, new->len, new->data);
- ret->ino = new->ino;
- return ret;
- }
-
- rb_link_node(&new->hash_node, parent, n);
- rb_insert_color(&new->hash_node, &pi->hash_root);
-
- return NULL;
-}
-
-/*
- * Free name cache for given inode.
- */
-void pohmelfs_free_names(struct pohmelfs_inode *parent)
-{
- struct rb_node *rb_node;
- struct pohmelfs_name *n;
-
- for (rb_node = rb_first(&parent->hash_root); rb_node;) {
- n = rb_entry(rb_node, struct pohmelfs_name, hash_node);
- rb_node = rb_next(rb_node);
-
- pohmelfs_name_free(parent, n);
- }
-}
-
-static void pohmelfs_fix_offset(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
-{
- parent->total_len -= node->len;
-}
-
-/*
- * Free name cache entry helper.
- */
-void pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *node)
-{
- pohmelfs_fix_offset(parent, node);
- pohmelfs_name_free(parent, node);
-}
-
-/*
- * Insert new name cache entry into all hash cache.
- */
-static int pohmelfs_insert_name(struct pohmelfs_inode *parent, struct pohmelfs_name *n)
-{
- struct pohmelfs_name *name;
-
- name = pohmelfs_insert_hash(parent, n);
- if (name)
- return -EEXIST;
-
- parent->total_len += n->len;
- list_add_tail(&n->sync_create_entry, &parent->sync_create_list);
-
- return 0;
-}
-
-/*
- * Allocate new name cache entry.
- */
-static struct pohmelfs_name *pohmelfs_name_alloc(unsigned int len)
-{
- struct pohmelfs_name *n;
-
- n = kzalloc(sizeof(struct pohmelfs_name) + len, GFP_KERNEL);
- if (!n)
- return NULL;
-
- INIT_LIST_HEAD(&n->sync_create_entry);
-
- n->data = (char *)(n+1);
-
- return n;
-}
-
-/*
- * Add new name entry into directory's cache.
- */
-static int pohmelfs_add_dir(struct pohmelfs_sb *psb, struct pohmelfs_inode *parent,
- struct pohmelfs_inode *npi, struct qstr *str, unsigned int mode, int link)
-{
- int err = -ENOMEM;
- struct pohmelfs_name *n;
-
- n = pohmelfs_name_alloc(str->len + 1);
- if (!n)
- goto err_out_exit;
-
- n->ino = npi->ino;
- n->mode = mode;
- n->len = str->len;
- n->hash = str->hash;
- sprintf(n->data, "%s", str->name);
-
- mutex_lock(&parent->offset_lock);
- err = pohmelfs_insert_name(parent, n);
- mutex_unlock(&parent->offset_lock);
-
- if (err) {
- if (err != -EEXIST)
- goto err_out_free;
- kfree(n);
- }
-
- return 0;
-
-err_out_free:
- kfree(n);
-err_out_exit:
- return err;
-}
-
-/*
- * Create new inode for given parameters (name, inode info, parent).
- * This does not create object on the server, it will be synced there during writeback.
- */
-struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str,
- struct netfs_inode_info *info, int link)
-{
- struct inode *new = NULL;
- struct pohmelfs_inode *npi;
- int err = -EEXIST;
-
- dprintk("%s: creating inode: parent: %llu, ino: %llu, str: %p.\n",
- __func__, (parent) ? parent->ino : 0, info->ino, str);
-
- err = -ENOMEM;
- new = iget_locked(psb->sb, info->ino);
- if (!new)
- goto err_out_exit;
-
- npi = POHMELFS_I(new);
- npi->ino = info->ino;
- err = 0;
-
- if (new->i_state & I_NEW) {
- dprintk("%s: filling VFS inode: %lu/%llu.\n",
- __func__, new->i_ino, info->ino);
- pohmelfs_fill_inode(new, info);
-
- if (S_ISDIR(info->mode)) {
- struct qstr s;
-
- s.name = ".";
- s.len = 1;
- s.hash = jhash(s.name, s.len, 0);
-
- err = pohmelfs_add_dir(psb, npi, npi, &s, info->mode, 0);
- if (err)
- goto err_out_put;
-
- s.name = "..";
- s.len = 2;
- s.hash = jhash(s.name, s.len, 0);
-
- err = pohmelfs_add_dir(psb, npi, (parent) ? parent : npi, &s,
- (parent) ? parent->vfs_inode.i_mode : npi->vfs_inode.i_mode, 0);
- if (err)
- goto err_out_put;
- }
- }
-
- if (str) {
- if (parent) {
- err = pohmelfs_add_dir(psb, parent, npi, str, info->mode, link);
-
- dprintk("%s: %s inserted name: '%s', new_offset: %llu, ino: %llu, parent: %llu.\n",
- __func__, (err) ? "unsuccessfully" : "successfully",
- str->name, parent->total_len, info->ino, parent->ino);
-
- if (err && err != -EEXIST)
- goto err_out_put;
- }
- }
-
- if (new->i_state & I_NEW) {
- if (parent)
- mark_inode_dirty(&parent->vfs_inode);
- mark_inode_dirty(new);
- }
-
- set_bit(NETFS_INODE_OWNED, &npi->state);
- npi->lock_type = POHMELFS_WRITE_LOCK;
- unlock_new_inode(new);
-
- return npi;
-
-err_out_put:
- printk("%s: putting inode: %p, npi: %p, error: %d.\n", __func__, new, npi, err);
- iput(new);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-static int pohmelfs_remote_sync_complete(struct page **pages, unsigned int page_num,
- void *private, int err)
-{
- struct pohmelfs_inode *pi = private;
- struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
-
- dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
-
- if (err)
- pi->error = err;
- wake_up(&psb->wait);
- pohmelfs_put_inode(pi);
-
- return err;
-}
-
-/*
- * Receive directory content from the server.
- * This should be only done for objects, which were not created locally,
- * and which were not synced previously.
- */
-static int pohmelfs_sync_remote_dir(struct pohmelfs_inode *pi)
-{
- struct inode *inode = &pi->vfs_inode;
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- long ret = psb->wait_on_page_timeout;
- int err;
-
- dprintk("%s: dir: %llu, state: %lx: remote_synced: %d.\n",
- __func__, pi->ino, pi->state, test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state));
-
- if (test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state))
- return 0;
-
- if (!igrab(inode)) {
- err = -ENOENT;
- goto err_out_exit;
- }
-
- err = pohmelfs_meta_command(pi, NETFS_READDIR, NETFS_TRANS_SINGLE_DST,
- pohmelfs_remote_sync_complete, pi, 0);
- if (err)
- goto err_out_exit;
-
- pi->error = 0;
- ret = wait_event_interruptible_timeout(psb->wait,
- test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state) || pi->error, ret);
- dprintk("%s: awake dir: %llu, ret: %ld, err: %d.\n", __func__, pi->ino, ret, pi->error);
- if (ret <= 0) {
- err = ret;
- if (!err)
- err = -ETIMEDOUT;
- goto err_out_exit;
- }
-
- if (pi->error)
- return pi->error;
-
- return 0;
-
-err_out_exit:
- clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
-
- return err;
-}
-
-static int pohmelfs_dir_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return 0;
-}
-
-/*
- * VFS readdir callback. Syncs directory content from server if needed,
- * and provides direntry info to the userspace.
- */
-static int pohmelfs_readdir(struct file *file, void *dirent, filldir_t filldir)
-{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- struct pohmelfs_name *n;
- struct rb_node *rb_node;
- int err = 0, mode;
- u64 len;
-
- dprintk("%s: parent: %llu, fpos: %llu, hash: %08lx.\n",
- __func__, pi->ino, (u64)file->f_pos,
- (unsigned long)file->private_data);
-#if 0
- err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
- if (err)
- return err;
-#endif
- err = pohmelfs_sync_remote_dir(pi);
- if (err)
- return err;
-
- if (file->private_data && (file->private_data == (void *)(unsigned long)file->f_pos))
- return 0;
-
- mutex_lock(&pi->offset_lock);
- n = pohmelfs_search_hash_unprecise(pi, (unsigned long)file->private_data);
-
- while (n) {
- mode = (n->mode >> 12) & 15;
-
- dprintk("%s: offset: %llu, parent ino: %llu, name: '%s', len: %u, ino: %llu, "
- "mode: %o/%o, fpos: %llu, hash: %08x.\n",
- __func__, file->f_pos, pi->ino, n->data, n->len,
- n->ino, n->mode, mode, file->f_pos, n->hash);
-
- file->private_data = (void *)(unsigned long)n->hash;
-
- len = n->len;
- err = filldir(dirent, n->data, n->len, file->f_pos, n->ino, mode);
-
- if (err < 0) {
- dprintk("%s: err: %d.\n", __func__, err);
- err = 0;
- break;
- }
-
- file->f_pos += len;
-
- rb_node = rb_next(&n->hash_node);
-
- if (!rb_node || (rb_node == &n->hash_node)) {
- file->private_data = (void *)(unsigned long)file->f_pos;
- break;
- }
-
- n = rb_entry(rb_node, struct pohmelfs_name, hash_node);
- }
- mutex_unlock(&pi->offset_lock);
-
- return err;
-}
-
-static loff_t pohmelfs_dir_lseek(struct file *file, loff_t offset, int origin)
-{
- file->f_pos = offset;
- file->private_data = NULL;
- return offset;
-}
-
-const struct file_operations pohmelfs_dir_fops = {
- .open = pohmelfs_dir_open,
- .read = generic_read_dir,
- .llseek = pohmelfs_dir_lseek,
- .readdir = pohmelfs_readdir,
-};
-
-/*
- * Lookup single object on server.
- */
-static int pohmelfs_lookup_single(struct pohmelfs_inode *parent,
- struct qstr *str, u64 ino)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(parent->vfs_inode.i_sb);
- long ret = msecs_to_jiffies(5000);
- int err;
-
- set_bit(NETFS_COMMAND_PENDING, &parent->state);
- err = pohmelfs_meta_command_data(parent, parent->ino, NETFS_LOOKUP,
- (char *)str->name, NETFS_TRANS_SINGLE_DST, NULL, NULL, ino);
- if (err)
- goto err_out_exit;
-
- err = 0;
- ret = wait_event_interruptible_timeout(psb->wait,
- !test_bit(NETFS_COMMAND_PENDING, &parent->state), ret);
- if (ret <= 0) {
- err = ret;
- if (!err)
- err = -ETIMEDOUT;
- }
-
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- clear_bit(NETFS_COMMAND_PENDING, &parent->state);
-
- printk("%s: failed: parent: %llu, ino: %llu, name: '%s', err: %d.\n",
- __func__, parent->ino, ino, str->name, err);
-
- return err;
-}
-
-/*
- * VFS lookup callback.
- * We first try to get inode number from local name cache, if we have one,
- * then inode can be found in inode cache. If there is no inode or no object in
- * local cache, try to lookup it on server. This only should be done for directories,
- * which were not created locally, otherwise remote server does not know about dir at all,
- * so no need to try to know that.
- */
-struct dentry *pohmelfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
-{
- struct pohmelfs_inode *parent = POHMELFS_I(dir);
- struct pohmelfs_name *n;
- struct inode *inode = NULL;
- unsigned long ino = 0;
- int err, lock_type = POHMELFS_READ_LOCK, need_lock = 1;
- struct qstr str = dentry->d_name;
-
- if ((nd->intent.open.flags & O_ACCMODE) != O_RDONLY)
- lock_type = POHMELFS_WRITE_LOCK;
-
- if (test_bit(NETFS_INODE_OWNED, &parent->state)) {
- if (lock_type == parent->lock_type)
- need_lock = 0;
- if ((lock_type == POHMELFS_READ_LOCK) && (parent->lock_type == POHMELFS_WRITE_LOCK))
- need_lock = 0;
- }
-
- if ((lock_type == POHMELFS_READ_LOCK) && !test_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &parent->state))
- need_lock = 1;
-
- str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
-
- mutex_lock(&parent->offset_lock);
- n = pohmelfs_search_hash(parent, str.hash);
- if (n)
- ino = n->ino;
- mutex_unlock(&parent->offset_lock);
-
- dprintk("%s: start ino: %lu, inode: %p, name: '%s', hash: %x, parent_state: %lx, need_lock: %d.\n",
- __func__, ino, inode, str.name, str.hash, parent->state, need_lock);
-
- if (ino) {
- inode = ilookup(dir->i_sb, ino);
- if (inode)
- goto out;
- }
-
- dprintk("%s: no inode dir: %p, dir_ino: %llu, name: '%s', len: %u, dir_state: %lx, ino: %lu.\n",
- __func__, dir, parent->ino,
- str.name, str.len, parent->state, ino);
-
- if (!ino) {
- if (!need_lock)
- goto out;
- }
-
- err = pohmelfs_data_lock(parent, 0, ~0, lock_type);
- if (err)
- goto out;
-
- err = pohmelfs_lookup_single(parent, &str, ino);
- if (err)
- goto out;
-
- if (!ino) {
- mutex_lock(&parent->offset_lock);
- n = pohmelfs_search_hash(parent, str.hash);
- if (n)
- ino = n->ino;
- mutex_unlock(&parent->offset_lock);
- }
-
- if (ino) {
- inode = ilookup(dir->i_sb, ino);
- dprintk("%s: second lookup ino: %lu, inode: %p, name: '%s', hash: %x.\n",
- __func__, ino, inode, str.name, str.hash);
- if (!inode) {
- dprintk("%s: No inode for ino: %lu, name: '%s', hash: %x.\n",
- __func__, ino, str.name, str.hash);
- /* return NULL; */
- return ERR_PTR(-EACCES);
- }
- } else {
- printk("%s: No inode number : name: '%s', hash: %x.\n",
- __func__, str.name, str.hash);
- }
-out:
- return d_splice_alias(inode, dentry);
-}
-
-/*
- * Create new object in local cache. Object will be synced to server
- * during writeback for given inode.
- */
-struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode)
-{
- struct pohmelfs_inode *npi;
- int err = -ENOMEM;
- struct netfs_inode_info info;
-
- dprintk("%s: name: '%s', mode: %o, start: %llu.\n",
- __func__, str->name, mode, start);
-
- info.mode = mode;
- info.ino = start;
-
- if (!start)
- info.ino = pohmelfs_new_ino(psb);
-
- info.nlink = S_ISDIR(mode) ? 2 : 1;
- info.uid = current_fsuid();
- info.gid = current_fsgid();
- info.size = 0;
- info.blocksize = 512;
- info.blocks = 0;
- info.rdev = 0;
- info.version = 0;
-
- npi = pohmelfs_new_inode(psb, parent, str, &info, !!start);
- if (IS_ERR(npi)) {
- err = PTR_ERR(npi);
- goto err_out_unlock;
- }
-
- return npi;
-
-err_out_unlock:
- dprintk("%s: err: %d.\n", __func__, err);
- return ERR_PTR(err);
-}
-
-/*
- * Create local object and bind it to dentry.
- */
-static int pohmelfs_create_entry(struct inode *dir, struct dentry *dentry, u64 start, int mode)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb);
- struct pohmelfs_inode *npi, *parent;
- struct qstr str = dentry->d_name;
- int err;
-
- parent = POHMELFS_I(dir);
-
- err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK);
- if (err)
- return err;
-
- str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
-
- npi = pohmelfs_create_entry_local(psb, parent, &str, start, mode);
- if (IS_ERR(npi))
- return PTR_ERR(npi);
-
- d_instantiate(dentry, &npi->vfs_inode);
-
- dprintk("%s: parent: %llu, inode: %llu, name: '%s', parent_nlink: %d, nlink: %d.\n",
- __func__, parent->ino, npi->ino, dentry->d_name.name,
- (signed)dir->i_nlink, (signed)npi->vfs_inode.i_nlink);
-
- return 0;
-}
-
-/*
- * VFS create and mkdir callbacks.
- */
-static int pohmelfs_create(struct inode *dir, struct dentry *dentry, int mode,
- struct nameidata *nd)
-{
- return pohmelfs_create_entry(dir, dentry, 0, mode);
-}
-
-static int pohmelfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
-{
- int err;
-
- inode_inc_link_count(dir);
- err = pohmelfs_create_entry(dir, dentry, 0, mode | S_IFDIR);
- if (err)
- inode_dec_link_count(dir);
-
- return err;
-}
-
-static int pohmelfs_remove_entry(struct inode *dir, struct dentry *dentry)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(dir->i_sb);
- struct inode *inode = dentry->d_inode;
- struct pohmelfs_inode *parent = POHMELFS_I(dir), *pi = POHMELFS_I(inode);
- struct pohmelfs_name *n;
- int err = -ENOENT;
- struct qstr str = dentry->d_name;
-
- err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK);
- if (err)
- return err;
-
- str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
-
- dprintk("%s: dir_ino: %llu, inode: %llu, name: '%s', nlink: %d.\n",
- __func__, parent->ino, pi->ino,
- str.name, (signed)inode->i_nlink);
-
- BUG_ON(!inode);
-
- mutex_lock(&parent->offset_lock);
- n = pohmelfs_search_hash(parent, str.hash);
- if (n) {
- pohmelfs_fix_offset(parent, n);
- if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
- pohmelfs_remove_child(pi, n);
-
- pohmelfs_name_free(parent, n);
- err = 0;
- }
- mutex_unlock(&parent->offset_lock);
-
- if (!err) {
- psb->avail_size += inode->i_size;
-
- pohmelfs_inode_del_inode(psb, pi);
-
- mark_inode_dirty(dir);
-
- inode->i_ctime = dir->i_ctime;
- if (inode->i_nlink)
- inode_dec_link_count(inode);
- }
-
- return err;
-}
-
-/*
- * Unlink and rmdir VFS callbacks.
- */
-static int pohmelfs_unlink(struct inode *dir, struct dentry *dentry)
-{
- return pohmelfs_remove_entry(dir, dentry);
-}
-
-static int pohmelfs_rmdir(struct inode *dir, struct dentry *dentry)
-{
- int err;
- struct inode *inode = dentry->d_inode;
-
- dprintk("%s: parent: %llu, inode: %llu, name: '%s', parent_nlink: %d, nlink: %d.\n",
- __func__, POHMELFS_I(dir)->ino, POHMELFS_I(inode)->ino,
- dentry->d_name.name, (signed)dir->i_nlink, (signed)inode->i_nlink);
-
- err = pohmelfs_remove_entry(dir, dentry);
- if (!err) {
- inode_dec_link_count(dir);
- inode_dec_link_count(inode);
- }
-
- return err;
-}
-
-/*
- * Link creation is synchronous.
- * I'm lazy.
- * Earth is somewhat round.
- */
-static int pohmelfs_create_link(struct pohmelfs_inode *parent, struct qstr *obj,
- struct pohmelfs_inode *target, struct qstr *tstr)
-{
- struct super_block *sb = parent->vfs_inode.i_sb;
- struct pohmelfs_sb *psb = POHMELFS_SB(sb);
- struct netfs_cmd *cmd;
- struct netfs_trans *t;
- void *data;
- int err, parent_len, target_len = 0, cur_len, path_size = 0;
-
- err = pohmelfs_data_lock(parent, 0, ~0, POHMELFS_WRITE_LOCK);
- if (err)
- return err;
-
- err = sb->s_op->write_inode(&parent->vfs_inode, 0);
- if (err)
- goto err_out_exit;
-
- if (tstr)
- target_len = tstr->len;
-
- parent_len = pohmelfs_path_length(parent);
- if (target)
- target_len += pohmelfs_path_length(target);
-
- if (parent_len < 0) {
- err = parent_len;
- goto err_out_exit;
- }
-
- if (target_len < 0) {
- err = target_len;
- goto err_out_exit;
- }
-
- t = netfs_trans_alloc(psb, parent_len + target_len + obj->len + 2, 0, 0);
- if (!t) {
- err = -ENOMEM;
- goto err_out_exit;
- }
- cur_len = netfs_trans_cur_len(t);
-
- cmd = netfs_trans_current(t);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto err_out_free;
- }
-
- data = (void *)(cmd + 1);
- cur_len -= sizeof(struct netfs_cmd);
-
- err = pohmelfs_construct_path_string(parent, data, parent_len);
- if (err > 0) {
- /* Do not place null-byte before the slash */
- path_size = err - 1;
- cur_len -= path_size;
-
- err = snprintf(data + path_size, cur_len, "/%s|", obj->name);
-
- path_size += err;
- cur_len -= err;
-
- cmd->ext = path_size - 1; /* No | symbol */
-
- if (target) {
- err = pohmelfs_construct_path_string(target, data + path_size, target_len);
- if (err > 0) {
- path_size += err;
- cur_len -= err;
- }
- }
- }
-
- if (err < 0)
- goto err_out_free;
-
- cmd->start = 0;
-
- if (!target && tstr) {
- if (tstr->len > cur_len - 1) {
- err = -ENAMETOOLONG;
- goto err_out_free;
- }
-
- err = snprintf(data + path_size, cur_len, "%s", tstr->name) + 1; /* 0-byte */
- path_size += err;
- cur_len -= err;
- cmd->start = 1;
- }
-
- dprintk("%s: parent: %llu, obj: '%s', target_inode: %llu, target_str: '%s', full: '%s'.\n",
- __func__, parent->ino, obj->name, (target) ? target->ino : 0, (tstr) ? tstr->name : NULL,
- (char *)data);
-
- cmd->cmd = NETFS_LINK;
- cmd->size = path_size;
- cmd->id = parent->ino;
-
- netfs_convert_cmd(cmd);
-
- netfs_trans_update(cmd, t, path_size);
-
- err = netfs_trans_finish(t, psb);
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_free:
- t->result = err;
- netfs_trans_put(t);
-err_out_exit:
- return err;
-}
-
-/*
- * VFS hard and soft link callbacks.
- */
-static int pohmelfs_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *dentry)
-{
- struct inode *inode = old_dentry->d_inode;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- int err;
- struct qstr str = dentry->d_name;
-
- str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
-
- err = inode->i_sb->s_op->write_inode(inode, 0);
- if (err)
- return err;
-
- err = pohmelfs_create_link(POHMELFS_I(dir), &str, pi, NULL);
- if (err)
- return err;
-
- return pohmelfs_create_entry(dir, dentry, pi->ino, inode->i_mode);
-}
-
-static int pohmelfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
-{
- struct qstr sym_str;
- struct qstr str = dentry->d_name;
- struct inode *inode;
- int err;
-
- str.hash = jhash(dentry->d_name.name, dentry->d_name.len, 0);
-
- sym_str.name = symname;
- sym_str.len = strlen(symname);
-
- err = pohmelfs_create_link(POHMELFS_I(dir), &str, NULL, &sym_str);
- if (err)
- goto err_out_exit;
-
- err = pohmelfs_create_entry(dir, dentry, 0, S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO);
- if (err)
- goto err_out_exit;
-
- inode = dentry->d_inode;
-
- err = page_symlink(inode, symname, sym_str.len + 1);
- if (err)
- goto err_out_put;
-
- return 0;
-
-err_out_put:
- iput(inode);
-err_out_exit:
- return err;
-}
-
-static int pohmelfs_send_rename(struct pohmelfs_inode *pi, struct pohmelfs_inode *parent,
- struct qstr *str)
-{
- int path_len, err, total_len = 0, inode_len, parent_len;
- char *path;
- struct netfs_trans *t;
- struct netfs_cmd *cmd;
- struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
-
- parent_len = pohmelfs_path_length(parent);
- inode_len = pohmelfs_path_length(pi);
-
- if (parent_len < 0 || inode_len < 0)
- return -EINVAL;
-
- path_len = parent_len + inode_len + str->len + 3;
-
- t = netfs_trans_alloc(psb, path_len, 0, 0);
- if (!t)
- return -ENOMEM;
-
- cmd = netfs_trans_current(t);
- path = (char *)(cmd + 1);
-
- err = pohmelfs_construct_path_string(pi, path, inode_len);
- if (err < 0)
- goto err_out_unlock;
-
- cmd->ext = err;
-
- path += err;
- total_len += err;
- path_len -= err;
-
- *path = '|';
- path++;
- total_len++;
- path_len--;
-
- err = pohmelfs_construct_path_string(parent, path, parent_len);
- if (err < 0)
- goto err_out_unlock;
-
- /*
- * Do not place a null-byte before the final slash and the name.
- */
- err--;
- path += err;
- total_len += err;
- path_len -= err;
-
- err = snprintf(path, path_len - 1, "/%s", str->name);
-
- total_len += err + 1; /* 0 symbol */
- path_len -= err + 1;
-
- cmd->cmd = NETFS_RENAME;
- cmd->id = pi->ino;
- cmd->start = parent->ino;
- cmd->size = total_len;
-
- netfs_convert_cmd(cmd);
-
- netfs_trans_update(cmd, t, total_len);
-
- return netfs_trans_finish(t, psb);
-
-err_out_unlock:
- netfs_trans_free(t);
- return err;
-}
-
-static int pohmelfs_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
-{
- struct inode *inode = old_dentry->d_inode;
- struct pohmelfs_inode *old_parent, *pi, *new_parent;
- struct qstr str = new_dentry->d_name;
- struct pohmelfs_name *n;
- unsigned int old_hash;
- int err = -ENOENT;
-
- pi = POHMELFS_I(inode);
- old_parent = POHMELFS_I(old_dir);
-
- if (new_dir)
- new_dir->i_sb->s_op->write_inode(new_dir, 0);
-
- old_hash = jhash(old_dentry->d_name.name, old_dentry->d_name.len, 0);
- str.hash = jhash(new_dentry->d_name.name, new_dentry->d_name.len, 0);
-
- str.len = new_dentry->d_name.len;
- str.name = new_dentry->d_name.name;
- str.hash = jhash(new_dentry->d_name.name, new_dentry->d_name.len, 0);
-
- if (new_dir) {
- new_parent = POHMELFS_I(new_dir);
- err = -ENOTEMPTY;
-
- if (S_ISDIR(inode->i_mode) &&
- new_parent->total_len <= 3)
- goto err_out_exit;
- } else {
- new_parent = old_parent;
- }
-
- dprintk("%s: ino: %llu, parent: %llu, name: '%s' -> parent: %llu, name: '%s', i_size: %llu.\n",
- __func__, pi->ino, old_parent->ino, old_dentry->d_name.name,
- new_parent->ino, new_dentry->d_name.name, inode->i_size);
-
- if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state) &&
- test_bit(NETFS_INODE_OWNED, &pi->state)) {
- err = pohmelfs_send_rename(pi, new_parent, &str);
- if (err)
- goto err_out_exit;
- }
-
- n = pohmelfs_name_alloc(str.len + 1);
- if (!n)
- goto err_out_exit;
-
- mutex_lock(&new_parent->offset_lock);
- n->ino = pi->ino;
- n->mode = inode->i_mode;
- n->len = str.len;
- n->hash = str.hash;
- sprintf(n->data, "%s", str.name);
-
- err = pohmelfs_insert_name(new_parent, n);
- mutex_unlock(&new_parent->offset_lock);
-
- if (err)
- goto err_out_exit;
-
- mutex_lock(&old_parent->offset_lock);
- n = pohmelfs_search_hash(old_parent, old_hash);
- if (n)
- pohmelfs_name_del(old_parent, n);
- mutex_unlock(&old_parent->offset_lock);
-
- mark_inode_dirty(inode);
- mark_inode_dirty(&new_parent->vfs_inode);
-
- WARN_ON_ONCE(list_empty(&inode->i_dentry));
-
- return 0;
-
-err_out_exit:
-
- clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
-
- return err;
-}
-
-/*
- * POHMELFS directory inode operations.
- */
-const struct inode_operations pohmelfs_dir_inode_ops = {
- .link = pohmelfs_link,
- .symlink = pohmelfs_symlink,
- .unlink = pohmelfs_unlink,
- .mkdir = pohmelfs_mkdir,
- .rmdir = pohmelfs_rmdir,
- .create = pohmelfs_create,
- .lookup = pohmelfs_lookup,
- .setattr = pohmelfs_setattr,
- .rename = pohmelfs_rename,
-};
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
deleted file mode 100644
index 7a19555..0000000
--- a/drivers/staging/pohmelfs/inode.c
+++ /dev/null
@@ -1,2056 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/backing-dev.h>
-#include <linux/crypto.h>
-#include <linux/fs.h>
-#include <linux/jhash.h>
-#include <linux/hash.h>
-#include <linux/ktime.h>
-#include <linux/mm.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/pagevec.h>
-#include <linux/parser.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/statfs.h>
-#include <linux/writeback.h>
-#include <linux/prefetch.h>
-
-#include "netfs.h"
-
-#define POHMELFS_MAGIC_NUM 0x504f482e
-
-static struct kmem_cache *pohmelfs_inode_cache;
-static atomic_t psb_bdi_num = ATOMIC_INIT(0);
-
-/*
- * Removes inode from all trees, drops local name cache and removes all queued
- * requests for object removal.
- */
-void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi)
-{
- mutex_lock(&pi->offset_lock);
- pohmelfs_free_names(pi);
- mutex_unlock(&pi->offset_lock);
-
- dprintk("%s: deleted stuff in ino: %llu.\n", __func__, pi->ino);
-}
-
-/*
- * Sync inode to server.
- * Returns zero in success and negative error value otherwise.
- * It will gather path to root directory into structures containing
- * creation mode, permissions and names, so that the whole path
- * to given inode could be created using only single network command.
- */
-int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans)
-{
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- int err = -ENOMEM, size;
- struct netfs_cmd *cmd;
- void *data;
- int cur_len = netfs_trans_cur_len(trans);
-
- if (unlikely(cur_len < 0))
- return -ETOOSMALL;
-
- cmd = netfs_trans_current(trans);
- cur_len -= sizeof(struct netfs_cmd);
-
- data = (void *)(cmd + 1);
-
- err = pohmelfs_construct_path_string(pi, data, cur_len);
- if (err < 0)
- goto err_out_exit;
-
- size = err;
-
- cmd->start = i_size_read(inode);
- cmd->cmd = NETFS_CREATE;
- cmd->size = size;
- cmd->id = pi->ino;
- cmd->ext = inode->i_mode;
-
- netfs_convert_cmd(cmd);
-
- netfs_trans_update(cmd, trans, size);
-
- return 0;
-
-err_out_exit:
- printk("%s: completed ino: %llu, err: %d.\n", __func__, pi->ino, err);
- return err;
-}
-
-static int pohmelfs_write_trans_complete(struct page **pages, unsigned int page_num,
- void *private, int err)
-{
- unsigned i;
-
- dprintk("%s: pages: %lu-%lu, page_num: %u, err: %d.\n",
- __func__, pages[0]->index, pages[page_num-1]->index,
- page_num, err);
-
- for (i = 0; i < page_num; i++) {
- struct page *page = pages[i];
-
- if (!page)
- continue;
-
- end_page_writeback(page);
-
- if (err < 0) {
- SetPageError(page);
- set_page_dirty(page);
- }
-
- unlock_page(page);
- page_cache_release(page);
-
- /* dprintk("%s: %3u/%u: page: %p.\n", __func__, i, page_num, page); */
- }
- return err;
-}
-
-static int pohmelfs_inode_has_dirty_pages(struct address_space *mapping, pgoff_t index)
-{
- int ret;
- struct page *page;
-
- rcu_read_lock();
- ret = radix_tree_gang_lookup_tag(&mapping->page_tree,
- (void **)&page, index, 1, PAGECACHE_TAG_DIRTY);
- rcu_read_unlock();
- return ret;
-}
-
-static int pohmelfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
-{
- struct inode *inode = mapping->host;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- int err = 0;
- int done = 0;
- int nr_pages;
- pgoff_t index;
- pgoff_t end; /* Inclusive */
- int scanned = 0;
- int range_whole = 0;
-
- if (wbc->range_cyclic) {
- index = mapping->writeback_index; /* Start from prev offset */
- end = -1;
- } else {
- index = wbc->range_start >> PAGE_CACHE_SHIFT;
- end = wbc->range_end >> PAGE_CACHE_SHIFT;
- if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
- range_whole = 1;
- scanned = 1;
- }
-retry:
- while (!done && (index <= end)) {
- unsigned int i = min(end - index, (pgoff_t)psb->trans_max_pages);
- int path_len;
- struct netfs_trans *trans;
-
- err = pohmelfs_inode_has_dirty_pages(mapping, index);
- if (!err)
- break;
-
- err = pohmelfs_path_length(pi);
- if (err < 0)
- break;
-
- path_len = err;
-
- if (path_len <= 2) {
- err = -ENOENT;
- break;
- }
-
- trans = netfs_trans_alloc(psb, path_len, 0, i);
- if (!trans) {
- err = -ENOMEM;
- break;
- }
- trans->complete = &pohmelfs_write_trans_complete;
-
- trans->page_num = nr_pages = find_get_pages_tag(mapping, &index,
- PAGECACHE_TAG_DIRTY, trans->page_num,
- trans->pages);
-
- dprintk("%s: t: %p, nr_pages: %u, end: %lu, index: %lu, max: %u.\n",
- __func__, trans, nr_pages, end, index, trans->page_num);
-
- if (!nr_pages)
- goto err_out_reset;
-
- err = pohmelfs_write_inode_create(inode, trans);
- if (err)
- goto err_out_reset;
-
- err = 0;
- scanned = 1;
-
- for (i = 0; i < trans->page_num; i++) {
- struct page *page = trans->pages[i];
-
- lock_page(page);
-
- if (unlikely(page->mapping != mapping))
- goto out_continue;
-
- if (!wbc->range_cyclic && page->index > end) {
- done = 1;
- goto out_continue;
- }
-
- if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
-
- if (PageWriteback(page) ||
- !clear_page_dirty_for_io(page)) {
- dprintk("%s: not clear for io page: %p, writeback: %d.\n",
- __func__, page, PageWriteback(page));
- goto out_continue;
- }
-
- set_page_writeback(page);
-
- trans->attached_size += page_private(page);
- trans->attached_pages++;
-#if 0
- dprintk("%s: %u/%u added trans: %p, gen: %u, page: %p, [High: %d], size: %lu, idx: %lu.\n",
- __func__, i, trans->page_num, trans, trans->gen, page,
- !!PageHighMem(page), page_private(page), page->index);
-#endif
- wbc->nr_to_write--;
-
- if (wbc->nr_to_write <= 0)
- done = 1;
-
- continue;
-out_continue:
- unlock_page(page);
- trans->pages[i] = NULL;
- }
-
- err = netfs_trans_finish(trans, psb);
- if (err)
- break;
-
- continue;
-
-err_out_reset:
- trans->result = err;
- netfs_trans_reset(trans);
- netfs_trans_put(trans);
- break;
- }
-
- if (!scanned && !done) {
- /*
- * We hit the last page and there is more work to be done: wrap
- * back to the start of the file
- */
- scanned = 1;
- index = 0;
- goto retry;
- }
-
- if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
- mapping->writeback_index = index;
-
- return err;
-}
-
-/*
- * Inode writeback creation completion callback.
- * Only invoked for just created inodes, which do not have pages attached,
- * like dirs and empty files.
- */
-static int pohmelfs_write_inode_complete(struct page **pages, unsigned int page_num,
- void *private, int err)
-{
- struct inode *inode = private;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
-
- if (inode) {
- if (err) {
- mark_inode_dirty(inode);
- clear_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
- } else {
- set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
- }
-
- pohmelfs_put_inode(pi);
- }
-
- return err;
-}
-
-int pohmelfs_write_create_inode(struct pohmelfs_inode *pi)
-{
- struct netfs_trans *t;
- struct inode *inode = &pi->vfs_inode;
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- int err;
-
- if (test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
- return 0;
-
- dprintk("%s: started ino: %llu.\n", __func__, pi->ino);
-
- err = pohmelfs_path_length(pi);
- if (err < 0)
- goto err_out_exit;
-
- t = netfs_trans_alloc(psb, err + 1, 0, 0);
- if (!t) {
- err = -ENOMEM;
- goto err_out_exit;
- }
- t->complete = pohmelfs_write_inode_complete;
- t->private = igrab(inode);
- if (!t->private) {
- err = -ENOENT;
- goto err_out_put;
- }
-
- err = pohmelfs_write_inode_create(inode, t);
- if (err)
- goto err_out_put;
-
- netfs_trans_finish(t, POHMELFS_SB(inode->i_sb));
-
- return 0;
-
-err_out_put:
- t->result = err;
- netfs_trans_put(t);
-err_out_exit:
- return err;
-}
-
-/*
- * Sync all not-yet-created children in given directory to the server.
- */
-static int pohmelfs_write_inode_create_children(struct inode *inode)
-{
- struct pohmelfs_inode *parent = POHMELFS_I(inode);
- struct super_block *sb = inode->i_sb;
- struct pohmelfs_name *n;
-
- while (!list_empty(&parent->sync_create_list)) {
- n = NULL;
- mutex_lock(&parent->offset_lock);
- if (!list_empty(&parent->sync_create_list)) {
- n = list_first_entry(&parent->sync_create_list,
- struct pohmelfs_name, sync_create_entry);
- list_del_init(&n->sync_create_entry);
- }
- mutex_unlock(&parent->offset_lock);
-
- if (!n)
- break;
-
- inode = ilookup(sb, n->ino);
-
- dprintk("%s: parent: %llu, ino: %llu, inode: %p.\n",
- __func__, parent->ino, n->ino, inode);
-
- if (inode && (inode->i_state & I_DIRTY)) {
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- pohmelfs_write_create_inode(pi);
- /* pohmelfs_meta_command(pi, NETFS_INODE_INFO, 0, NULL, NULL, 0); */
- iput(inode);
- }
- }
-
- return 0;
-}
-
-/*
- * Removes given child from given inode on server.
- */
-int pohmelfs_remove_child(struct pohmelfs_inode *pi, struct pohmelfs_name *n)
-{
- return pohmelfs_meta_command_data(pi, pi->ino, NETFS_REMOVE, NULL, 0, NULL, NULL, 0);
-}
-
-/*
- * Writeback for given inode.
- */
-static int pohmelfs_write_inode(struct inode *inode,
- struct writeback_control *wbc)
-{
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
-
- pohmelfs_write_create_inode(pi);
- pohmelfs_write_inode_create_children(inode);
-
- return 0;
-}
-
-/*
- * It is not exported, sorry...
- */
-static inline wait_queue_head_t *page_waitqueue(struct page *page)
-{
- const struct zone *zone = page_zone(page);
-
- return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)];
-}
-
-static int pohmelfs_wait_on_page_locked(struct page *page)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(page->mapping->host->i_sb);
- long ret = psb->wait_on_page_timeout;
- DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
- int err = 0;
-
- if (!PageLocked(page))
- return 0;
-
- for (;;) {
- prepare_to_wait(page_waitqueue(page),
- &wait.wait, TASK_INTERRUPTIBLE);
-
- dprintk("%s: page: %p, locked: %d, uptodate: %d, error: %d, flags: %lx.\n",
- __func__, page, PageLocked(page), PageUptodate(page),
- PageError(page), page->flags);
-
- if (!PageLocked(page))
- break;
-
- if (!signal_pending(current)) {
- ret = schedule_timeout(ret);
- if (!ret)
- break;
- continue;
- }
- ret = -ERESTARTSYS;
- break;
- }
- finish_wait(page_waitqueue(page), &wait.wait);
-
- if (!ret)
- err = -ETIMEDOUT;
-
-
- if (!err)
- SetPageUptodate(page);
-
- if (err)
- printk("%s: page: %p, uptodate: %d, locked: %d, err: %d.\n",
- __func__, page, PageUptodate(page), PageLocked(page), err);
-
- return err;
-}
-
-static int pohmelfs_read_page_complete(struct page **pages, unsigned int page_num,
- void *private, int err)
-{
- struct page *page = private;
-
- if (PageChecked(page))
- return err;
-
- if (err < 0) {
- dprintk("%s: page: %p, err: %d.\n", __func__, page, err);
- SetPageError(page);
- }
-
- unlock_page(page);
-
- return err;
-}
-
-/*
- * Read a page from remote server.
- * Function will wait until page is unlocked.
- */
-static int pohmelfs_readpage(struct file *file, struct page *page)
-{
- struct inode *inode = page->mapping->host;
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- struct netfs_trans *t;
- struct netfs_cmd *cmd;
- int err, path_len;
- void *data;
- u64 isize;
-
- err = pohmelfs_data_lock(pi, page->index << PAGE_CACHE_SHIFT,
- PAGE_SIZE, POHMELFS_READ_LOCK);
- if (err)
- goto err_out_exit;
-
- isize = i_size_read(inode);
- if (isize <= page->index << PAGE_CACHE_SHIFT) {
- SetPageUptodate(page);
- unlock_page(page);
- return 0;
- }
-
- path_len = pohmelfs_path_length(pi);
- if (path_len < 0) {
- err = path_len;
- goto err_out_exit;
- }
-
- t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
- if (!t) {
- err = -ENOMEM;
- goto err_out_exit;
- }
-
- t->complete = pohmelfs_read_page_complete;
- t->private = page;
-
- cmd = netfs_trans_current(t);
- data = (void *)(cmd + 1);
-
- err = pohmelfs_construct_path_string(pi, data, path_len);
- if (err < 0)
- goto err_out_free;
-
- path_len = err;
-
- cmd->id = pi->ino;
- cmd->start = page->index;
- cmd->start <<= PAGE_CACHE_SHIFT;
- cmd->size = PAGE_CACHE_SIZE + path_len;
- cmd->cmd = NETFS_READ_PAGE;
- cmd->ext = path_len;
-
- dprintk("%s: path: '%s', page: %p, ino: %llu, start: %llu, size: %lu.\n",
- __func__, (char *)data, page, pi->ino, cmd->start, PAGE_CACHE_SIZE);
-
- netfs_convert_cmd(cmd);
- netfs_trans_update(cmd, t, path_len);
-
- err = netfs_trans_finish(t, psb);
- if (err)
- goto err_out_return;
-
- return pohmelfs_wait_on_page_locked(page);
-
-err_out_free:
- t->result = err;
- netfs_trans_put(t);
-err_out_exit:
- SetPageError(page);
- if (PageLocked(page))
- unlock_page(page);
-err_out_return:
- printk("%s: page: %p, start: %lu, size: %lu, err: %d.\n",
- __func__, page, page->index << PAGE_CACHE_SHIFT, PAGE_CACHE_SIZE, err);
-
- return err;
-}
-
-/*
- * Write begin/end magic.
- * Allocates a page and writes inode if it was not synced to server before.
- */
-static int pohmelfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
- struct page **pagep, void **fsdata)
-{
- struct inode *inode = mapping->host;
- struct page *page;
- pgoff_t index;
- unsigned start, end;
- int err;
-
- *pagep = NULL;
-
- index = pos >> PAGE_CACHE_SHIFT;
- start = pos & (PAGE_CACHE_SIZE - 1);
- end = start + len;
-
- page = grab_cache_page(mapping, index);
-#if 0
- dprintk("%s: page: %p pos: %llu, len: %u, index: %lu, start: %u, end: %u, uptodate: %d.\n",
- __func__, page, pos, len, index, start, end, PageUptodate(page));
-#endif
- if (!page) {
- err = -ENOMEM;
- goto err_out_exit;
- }
-
- while (!PageUptodate(page)) {
- if (start && test_bit(NETFS_INODE_REMOTE_SYNCED, &POHMELFS_I(inode)->state)) {
- err = pohmelfs_readpage(file, page);
- if (err)
- goto err_out_exit;
-
- lock_page(page);
- continue;
- }
-
- if (len != PAGE_CACHE_SIZE) {
- void *kaddr = kmap_atomic(page, KM_USER0);
-
- memset(kaddr + start, 0, PAGE_CACHE_SIZE - start);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- }
- SetPageUptodate(page);
- }
-
- set_page_private(page, end);
-
- *pagep = page;
-
- return 0;
-
-err_out_exit:
- page_cache_release(page);
- *pagep = NULL;
-
- return err;
-}
-
-static int pohmelfs_write_end(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
-{
- struct inode *inode = mapping->host;
-
- if (copied != len) {
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
- void *kaddr = kmap_atomic(page, KM_USER0);
-
- memset(kaddr + from + copied, 0, len - copied);
- flush_dcache_page(page);
- kunmap_atomic(kaddr, KM_USER0);
- }
-
- SetPageUptodate(page);
- set_page_dirty(page);
-#if 0
- dprintk("%s: page: %p [U: %d, D: %d, L: %d], pos: %llu, len: %u, copied: %u.\n",
- __func__, page,
- PageUptodate(page), PageDirty(page), PageLocked(page),
- pos, len, copied);
-#endif
- flush_dcache_page(page);
-
- unlock_page(page);
- page_cache_release(page);
-
- if (pos + copied > inode->i_size) {
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
-
- psb->avail_size -= pos + copied - inode->i_size;
-
- i_size_write(inode, pos + copied);
- }
-
- return copied;
-}
-
-static int pohmelfs_readpages_trans_complete(struct page **__pages, unsigned int page_num,
- void *private, int err)
-{
- struct pohmelfs_inode *pi = private;
- unsigned int i, num;
- struct page **pages, *page = (struct page *)__pages;
- loff_t index = page->index;
-
- pages = kzalloc(sizeof(void *) * page_num, GFP_NOIO);
- if (!pages)
- return -ENOMEM;
-
- num = find_get_pages_contig(pi->vfs_inode.i_mapping, index, page_num, pages);
- if (num <= 0) {
- err = num;
- goto err_out_free;
- }
-
- for (i = 0; i < num; ++i) {
- page = pages[i];
-
- if (err)
- printk("%s: %u/%u: page: %p, index: %lu, uptodate: %d, locked: %d, err: %d.\n",
- __func__, i, num, page, page->index,
- PageUptodate(page), PageLocked(page), err);
-
- if (!PageChecked(page)) {
- if (err < 0)
- SetPageError(page);
- unlock_page(page);
- }
- page_cache_release(page);
- page_cache_release(page);
- }
-
-err_out_free:
- kfree(pages);
- return err;
-}
-
-static int pohmelfs_send_readpages(struct pohmelfs_inode *pi, struct page *first, unsigned int num)
-{
- struct netfs_trans *t;
- struct netfs_cmd *cmd;
- struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
- int err, path_len;
- void *data;
-
- err = pohmelfs_data_lock(pi, first->index << PAGE_CACHE_SHIFT,
- num * PAGE_SIZE, POHMELFS_READ_LOCK);
- if (err)
- goto err_out_exit;
-
- path_len = pohmelfs_path_length(pi);
- if (path_len < 0) {
- err = path_len;
- goto err_out_exit;
- }
-
- t = netfs_trans_alloc(psb, path_len, NETFS_TRANS_SINGLE_DST, 0);
- if (!t) {
- err = -ENOMEM;
- goto err_out_exit;
- }
-
- cmd = netfs_trans_current(t);
- data = (void *)(cmd + 1);
-
- t->complete = pohmelfs_readpages_trans_complete;
- t->private = pi;
- t->page_num = num;
- t->pages = (struct page **)first;
-
- err = pohmelfs_construct_path_string(pi, data, path_len);
- if (err < 0)
- goto err_out_put;
-
- path_len = err;
-
- cmd->cmd = NETFS_READ_PAGES;
- cmd->start = first->index;
- cmd->start <<= PAGE_CACHE_SHIFT;
- cmd->size = (num << 8 | PAGE_CACHE_SHIFT);
- cmd->id = pi->ino;
- cmd->ext = path_len;
-
- dprintk("%s: t: %p, gen: %u, path: '%s', path_len: %u, "
- "start: %lu, num: %u.\n",
- __func__, t, t->gen, (char *)data, path_len,
- first->index, num);
-
- netfs_convert_cmd(cmd);
- netfs_trans_update(cmd, t, path_len);
-
- return netfs_trans_finish(t, psb);
-
-err_out_put:
- netfs_trans_free(t);
-err_out_exit:
- pohmelfs_readpages_trans_complete((struct page **)first, num, pi, err);
- return err;
-}
-
-#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
-
-static int pohmelfs_readpages(struct file *file, struct address_space *mapping,
- struct list_head *pages, unsigned nr_pages)
-{
- unsigned int page_idx, num = 0;
- struct page *page = NULL, *first = NULL;
-
- for (page_idx = 0; page_idx < nr_pages; page_idx++) {
- page = list_to_page(pages);
-
- prefetchw(&page->flags);
- list_del(&page->lru);
-
- if (!add_to_page_cache_lru(page, mapping,
- page->index, GFP_KERNEL)) {
-
- if (!num) {
- num = 1;
- first = page;
- continue;
- }
-
- dprintk("%s: added to lru page: %p, page_index: %lu, first_index: %lu.\n",
- __func__, page, page->index, first->index);
-
- if (unlikely(first->index + num != page->index) || (num > 500)) {
- pohmelfs_send_readpages(POHMELFS_I(mapping->host),
- first, num);
- first = page;
- num = 0;
- }
-
- num++;
- }
- }
- pohmelfs_send_readpages(POHMELFS_I(mapping->host), first, num);
-
- /*
- * This will be sync read, so when last page is processed,
- * all previous are alerady unlocked and ready to be used.
- */
- return 0;
-}
-
-/*
- * Small address space operations for POHMELFS.
- */
-const struct address_space_operations pohmelfs_aops = {
- .readpage = pohmelfs_readpage,
- .readpages = pohmelfs_readpages,
- .writepages = pohmelfs_writepages,
- .write_begin = pohmelfs_write_begin,
- .write_end = pohmelfs_write_end,
- .set_page_dirty = __set_page_dirty_nobuffers,
-};
-
-static void pohmelfs_i_callback(struct rcu_head *head)
-{
- struct inode *inode = container_of(head, struct inode, i_rcu);
- INIT_LIST_HEAD(&inode->i_dentry);
- kmem_cache_free(pohmelfs_inode_cache, POHMELFS_I(inode));
-}
-
-/*
- * ->destroy_inode() callback. Deletes inode from the caches
- * and frees private data.
- */
-static void pohmelfs_destroy_inode(struct inode *inode)
-{
- struct super_block *sb = inode->i_sb;
- struct pohmelfs_sb *psb = POHMELFS_SB(sb);
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
-
- /* pohmelfs_data_unlock(pi, 0, inode->i_size, POHMELFS_READ_LOCK); */
-
- pohmelfs_inode_del_inode(psb, pi);
-
- dprintk("%s: pi: %p, inode: %p, ino: %llu.\n",
- __func__, pi, &pi->vfs_inode, pi->ino);
- atomic_long_dec(&psb->total_inodes);
- call_rcu(&inode->i_rcu, pohmelfs_i_callback);
-}
-
-/*
- * ->alloc_inode() callback. Allocates inode and initializes private data.
- */
-static struct inode *pohmelfs_alloc_inode(struct super_block *sb)
-{
- struct pohmelfs_inode *pi;
-
- pi = kmem_cache_alloc(pohmelfs_inode_cache, GFP_NOIO);
- if (!pi)
- return NULL;
-
- pi->hash_root = RB_ROOT;
- mutex_init(&pi->offset_lock);
-
- INIT_LIST_HEAD(&pi->sync_create_list);
-
- INIT_LIST_HEAD(&pi->inode_entry);
-
- pi->lock_type = 0;
- pi->state = 0;
- pi->total_len = 0;
- pi->drop_count = 0;
-
- dprintk("%s: pi: %p, inode: %p.\n", __func__, pi, &pi->vfs_inode);
-
- atomic_long_inc(&POHMELFS_SB(sb)->total_inodes);
-
- return &pi->vfs_inode;
-}
-
-/*
- * We want fsync() to work on POHMELFS.
- */
-static int pohmelfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
-{
- struct inode *inode = file->f_mapping->host;
- int err = filemap_write_and_wait_range(inode->i_mapping, start, end);
- if (!err) {
- mutex_lock(&inode->i_mutex);
- err = sync_inode_metadata(inode, 1);
- mutex_unlock(&inode->i_mutex);
- }
- return err;
-}
-
-ssize_t pohmelfs_write(struct file *file, const char __user *buf,
- size_t len, loff_t *ppos)
-{
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- struct iovec iov = { .iov_base = (void __user *)buf, .iov_len = len };
- struct kiocb kiocb;
- ssize_t ret;
- loff_t pos = *ppos;
-
- init_sync_kiocb(&kiocb, file);
- kiocb.ki_pos = pos;
- kiocb.ki_left = len;
-
- dprintk("%s: len: %zu, pos: %llu.\n", __func__, len, pos);
-
- mutex_lock(&inode->i_mutex);
- ret = pohmelfs_data_lock(pi, pos, len, POHMELFS_WRITE_LOCK);
- if (ret)
- goto err_out_unlock;
-
- ret = __generic_file_aio_write(&kiocb, &iov, 1, &kiocb.ki_pos);
- *ppos = kiocb.ki_pos;
-
- mutex_unlock(&inode->i_mutex);
- WARN_ON(ret < 0);
-
- if (ret > 0) {
- ssize_t err;
-
- err = generic_write_sync(file, pos, ret);
- if (err < 0)
- ret = err;
- WARN_ON(ret < 0);
- }
-
- return ret;
-
-err_out_unlock:
- mutex_unlock(&inode->i_mutex);
- return ret;
-}
-
-static const struct file_operations pohmelfs_file_ops = {
- .open = generic_file_open,
- .fsync = pohmelfs_fsync,
-
- .llseek = generic_file_llseek,
-
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
-
- .mmap = generic_file_mmap,
-
- .splice_read = generic_file_splice_read,
- .splice_write = generic_file_splice_write,
-
- .write = pohmelfs_write,
- .aio_write = generic_file_aio_write,
-};
-
-const struct inode_operations pohmelfs_symlink_inode_operations = {
- .readlink = generic_readlink,
- .follow_link = page_follow_link_light,
- .put_link = page_put_link,
-};
-
-int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
-{
- int err;
-
- err = inode_change_ok(inode, attr);
- if (err) {
- dprintk("%s: ino: %llu, inode changes are not allowed.\n", __func__, POHMELFS_I(inode)->ino);
- goto err_out_exit;
- }
-
- if ((attr->ia_valid & ATTR_SIZE) &&
- attr->ia_size != i_size_read(inode)) {
- err = vmtruncate(inode, attr->ia_size);
- if (err) {
- dprintk("%s: ino: %llu, failed to set the attributes.\n", __func__, POHMELFS_I(inode)->ino);
- goto err_out_exit;
- }
- }
-
- setattr_copy(inode, attr);
- mark_inode_dirty(inode);
-
- dprintk("%s: ino: %llu, mode: %o -> %o, uid: %u -> %u, gid: %u -> %u, size: %llu -> %llu.\n",
- __func__, POHMELFS_I(inode)->ino, inode->i_mode, attr->ia_mode,
- inode->i_uid, attr->ia_uid, inode->i_gid, attr->ia_gid, inode->i_size, attr->ia_size);
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr)
-{
- struct inode *inode = dentry->d_inode;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- int err;
-
- err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
- if (err)
- goto err_out_exit;
-
- err = security_inode_setattr(dentry, attr);
- if (err)
- goto err_out_exit;
-
- err = pohmelfs_setattr_raw(inode, attr);
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-static int pohmelfs_send_xattr_req(struct pohmelfs_inode *pi, u64 id, u64 start,
- const char *name, const void *value, size_t attrsize, int command)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
- int err, path_len, namelen = strlen(name) + 1; /* 0-byte */
- struct netfs_trans *t;
- struct netfs_cmd *cmd;
- void *data;
-
- dprintk("%s: id: %llu, start: %llu, name: '%s', attrsize: %zu, cmd: %d.\n",
- __func__, id, start, name, attrsize, command);
-
- path_len = pohmelfs_path_length(pi);
- if (path_len < 0) {
- err = path_len;
- goto err_out_exit;
- }
-
- t = netfs_trans_alloc(psb, namelen + path_len + attrsize, 0, 0);
- if (!t) {
- err = -ENOMEM;
- goto err_out_exit;
- }
-
- cmd = netfs_trans_current(t);
- data = cmd + 1;
-
- path_len = pohmelfs_construct_path_string(pi, data, path_len);
- if (path_len < 0) {
- err = path_len;
- goto err_out_put;
- }
- data += path_len;
-
- /*
- * 'name' is a NUL-terminated string already and
- * 'namelen' includes 0-byte.
- */
- memcpy(data, name, namelen);
- data += namelen;
-
- memcpy(data, value, attrsize);
-
- cmd->cmd = command;
- cmd->id = id;
- cmd->start = start;
- cmd->size = attrsize + namelen + path_len;
- cmd->ext = path_len;
- cmd->csize = 0;
- cmd->cpad = 0;
-
- netfs_convert_cmd(cmd);
- netfs_trans_update(cmd, t, namelen + path_len + attrsize);
-
- return netfs_trans_finish(t, psb);
-
-err_out_put:
- t->result = err;
- netfs_trans_put(t);
-err_out_exit:
- return err;
-}
-
-static int pohmelfs_setxattr(struct dentry *dentry, const char *name,
- const void *value, size_t attrsize, int flags)
-{
- struct inode *inode = dentry->d_inode;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
-
- if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
- return -EOPNOTSUPP;
-
- return pohmelfs_send_xattr_req(pi, flags, attrsize, name,
- value, attrsize, NETFS_XATTR_SET);
-}
-
-static ssize_t pohmelfs_getxattr(struct dentry *dentry, const char *name,
- void *value, size_t attrsize)
-{
- struct inode *inode = dentry->d_inode;
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- struct pohmelfs_mcache *m;
- int err;
- long timeout = psb->mcache_timeout;
-
- if (!(psb->state_flags & POHMELFS_FLAGS_XATTR))
- return -EOPNOTSUPP;
-
- m = pohmelfs_mcache_alloc(psb, 0, attrsize, value);
- if (IS_ERR(m))
- return PTR_ERR(m);
-
- dprintk("%s: ino: %llu, name: '%s', size: %zu.\n",
- __func__, pi->ino, name, attrsize);
-
- err = pohmelfs_send_xattr_req(pi, m->gen, attrsize, name, value, 0, NETFS_XATTR_GET);
- if (err)
- goto err_out_put;
-
- do {
- err = wait_for_completion_timeout(&m->complete, timeout);
- if (err) {
- err = m->err;
- break;
- }
-
- /*
- * This loop is a bit ugly, since it waits until reference counter
- * hits 1 and then puts the object here. Main goal is to prevent race with
- * the network thread, when it can start processing the given request, i.e.
- * increase its reference counter but yet not complete it, while
- * we will exit from ->getxattr() with timeout, and although request
- * will not be freed (its reference counter was increased by network
- * thread), data pointer provided by user may be released, so we will
- * overwrite an already freed area in the network thread.
- *
- * Now after timeout we remove request from the cache, so it can not be
- * found by network thread, and wait for its reference counter to hit 1,
- * i.e. if network thread already started to process this request, we wait
- * for it to finish, and then free object locally. If reference counter is
- * already 1, i.e. request is not used by anyone else, we can free it without
- * problem.
- */
- err = -ETIMEDOUT;
- timeout = HZ;
-
- pohmelfs_mcache_remove_locked(psb, m);
- } while (atomic_read(&m->refcnt) != 1);
-
- pohmelfs_mcache_put(psb, m);
-
- dprintk("%s: ino: %llu, err: %d.\n", __func__, pi->ino, err);
-
- return err;
-
-err_out_put:
- pohmelfs_mcache_put(psb, m);
- return err;
-}
-
-static int pohmelfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
-{
- struct inode *inode = dentry->d_inode;
-#if 0
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
- int err;
-
- err = pohmelfs_data_lock(pi, 0, ~0, POHMELFS_READ_LOCK);
- if (err)
- return err;
- dprintk("%s: ino: %llu, mode: %o, uid: %u, gid: %u, size: %llu.\n",
- __func__, pi->ino, inode->i_mode, inode->i_uid,
- inode->i_gid, inode->i_size);
-#endif
-
- generic_fillattr(inode, stat);
- return 0;
-}
-
-const struct inode_operations pohmelfs_file_inode_operations = {
- .setattr = pohmelfs_setattr,
- .getattr = pohmelfs_getattr,
- .setxattr = pohmelfs_setxattr,
- .getxattr = pohmelfs_getxattr,
-};
-
-/*
- * Fill inode data: mode, size, operation callbacks and so on...
- */
-void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info)
-{
- inode->i_mode = info->mode;
- set_nlink(inode, info->nlink);
- inode->i_uid = info->uid;
- inode->i_gid = info->gid;
- inode->i_blocks = info->blocks;
- inode->i_rdev = info->rdev;
- inode->i_size = info->size;
- inode->i_version = info->version;
- inode->i_blkbits = ffs(info->blocksize);
-
- dprintk("%s: inode: %p, num: %lu/%llu inode is regular: %d, dir: %d, link: %d, mode: %o, size: %llu.\n",
- __func__, inode, inode->i_ino, info->ino,
- S_ISREG(inode->i_mode), S_ISDIR(inode->i_mode),
- S_ISLNK(inode->i_mode), inode->i_mode, inode->i_size);
-
- inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
-
- /*
- * i_mapping is a pointer to i_data during inode initialization.
- */
- inode->i_data.a_ops = &pohmelfs_aops;
-
- if (S_ISREG(inode->i_mode)) {
- inode->i_fop = &pohmelfs_file_ops;
- inode->i_op = &pohmelfs_file_inode_operations;
- } else if (S_ISDIR(inode->i_mode)) {
- inode->i_fop = &pohmelfs_dir_fops;
- inode->i_op = &pohmelfs_dir_inode_ops;
- } else if (S_ISLNK(inode->i_mode)) {
- inode->i_op = &pohmelfs_symlink_inode_operations;
- inode->i_fop = &pohmelfs_file_ops;
- } else {
- inode->i_fop = &generic_ro_fops;
- }
-}
-
-static int pohmelfs_drop_inode(struct inode *inode)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- struct pohmelfs_inode *pi = POHMELFS_I(inode);
-
- spin_lock(&psb->ino_lock);
- list_del_init(&pi->inode_entry);
- spin_unlock(&psb->ino_lock);
-
- return generic_drop_inode(inode);
-}
-
-static struct pohmelfs_inode *pohmelfs_get_inode_from_list(struct pohmelfs_sb *psb,
- struct list_head *head, unsigned int *count)
-{
- struct pohmelfs_inode *pi = NULL;
-
- spin_lock(&psb->ino_lock);
- if (!list_empty(head)) {
- pi = list_entry(head->next, struct pohmelfs_inode,
- inode_entry);
- list_del_init(&pi->inode_entry);
- *count = pi->drop_count;
- pi->drop_count = 0;
- }
- spin_unlock(&psb->ino_lock);
-
- return pi;
-}
-
-static void pohmelfs_flush_transactions(struct pohmelfs_sb *psb)
-{
- struct pohmelfs_config *c;
-
- mutex_lock(&psb->state_lock);
- list_for_each_entry(c, &psb->state_list, config_entry) {
- pohmelfs_state_flush_transactions(&c->state);
- }
- mutex_unlock(&psb->state_lock);
-}
-
-/*
- * ->put_super() callback. Invoked before superblock is destroyed,
- * so it has to clean all private data.
- */
-static void pohmelfs_put_super(struct super_block *sb)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(sb);
- struct pohmelfs_inode *pi;
- unsigned int count = 0;
- unsigned int in_drop_list = 0;
- struct inode *inode, *tmp;
-
- dprintk("%s.\n", __func__);
-
- /*
- * Kill pending transactions, which could affect inodes in-flight.
- */
- pohmelfs_flush_transactions(psb);
-
- while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count))) {
- inode = &pi->vfs_inode;
-
- dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
- __func__, pi->ino, pi, inode, count);
-
- if (atomic_read(&inode->i_count) != count) {
- printk("%s: ino: %llu, pi: %p, inode: %p, count: %u, i_count: %d.\n",
- __func__, pi->ino, pi, inode, count,
- atomic_read(&inode->i_count));
- count = atomic_read(&inode->i_count);
- in_drop_list++;
- }
-
- while (count--)
- iput(&pi->vfs_inode);
- }
-
- list_for_each_entry_safe(inode, tmp, &sb->s_inodes, i_sb_list) {
- pi = POHMELFS_I(inode);
-
- dprintk("%s: ino: %llu, pi: %p, inode: %p, i_count: %u.\n",
- __func__, pi->ino, pi, inode, atomic_read(&inode->i_count));
-
- /*
- * These are special inodes, they were created during
- * directory reading or lookup, and were not bound to dentry,
- * so they live here with reference counter being 1 and prevent
- * umount from succeed since it believes that they are busy.
- */
- count = atomic_read(&inode->i_count);
- if (count) {
- list_del_init(&inode->i_sb_list);
- while (count--)
- iput(&pi->vfs_inode);
- }
- }
-
- psb->trans_scan_timeout = psb->drop_scan_timeout = 0;
- cancel_delayed_work_sync(&psb->dwork);
- cancel_delayed_work_sync(&psb->drop_dwork);
- flush_scheduled_work();
-
- dprintk("%s: stopped workqueues.\n", __func__);
-
- pohmelfs_crypto_exit(psb);
- pohmelfs_state_exit(psb);
-
- bdi_destroy(&psb->bdi);
-
- kfree(psb);
- sb->s_fs_info = NULL;
-}
-
-static int pohmelfs_statfs(struct dentry *dentry, struct kstatfs *buf)
-{
- struct super_block *sb = dentry->d_sb;
- struct pohmelfs_sb *psb = POHMELFS_SB(sb);
-
- /*
- * There are no filesystem size limits yet.
- */
- memset(buf, 0, sizeof(struct kstatfs));
-
- buf->f_type = POHMELFS_MAGIC_NUM; /* 'POH.' */
- buf->f_bsize = sb->s_blocksize;
- buf->f_files = psb->ino;
- buf->f_namelen = 255;
- buf->f_files = atomic_long_read(&psb->total_inodes);
- buf->f_bfree = buf->f_bavail = psb->avail_size >> PAGE_SHIFT;
- buf->f_blocks = psb->total_size >> PAGE_SHIFT;
-
- dprintk("%s: total: %llu, avail: %llu, inodes: %llu, bsize: %lu.\n",
- __func__, psb->total_size, psb->avail_size, buf->f_files, sb->s_blocksize);
-
- return 0;
-}
-
-static int pohmelfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(vfs->mnt_sb);
-
- seq_printf(seq, ",idx=%u", psb->idx);
- seq_printf(seq, ",trans_scan_timeout=%u", jiffies_to_msecs(psb->trans_scan_timeout));
- seq_printf(seq, ",drop_scan_timeout=%u", jiffies_to_msecs(psb->drop_scan_timeout));
- seq_printf(seq, ",wait_on_page_timeout=%u", jiffies_to_msecs(psb->wait_on_page_timeout));
- seq_printf(seq, ",trans_retries=%u", psb->trans_retries);
- seq_printf(seq, ",crypto_thread_num=%u", psb->crypto_thread_num);
- seq_printf(seq, ",trans_max_pages=%u", psb->trans_max_pages);
- seq_printf(seq, ",mcache_timeout=%u", jiffies_to_msecs(psb->mcache_timeout));
- if (psb->crypto_fail_unsupported)
- seq_printf(seq, ",crypto_fail_unsupported");
-
- return 0;
-}
-
-enum {
- pohmelfs_opt_idx,
- pohmelfs_opt_crypto_thread_num,
- pohmelfs_opt_trans_max_pages,
- pohmelfs_opt_crypto_fail_unsupported,
-
- /* Remountable options */
- pohmelfs_opt_trans_scan_timeout,
- pohmelfs_opt_drop_scan_timeout,
- pohmelfs_opt_wait_on_page_timeout,
- pohmelfs_opt_trans_retries,
- pohmelfs_opt_mcache_timeout,
-};
-
-static struct match_token pohmelfs_tokens[] = {
- {pohmelfs_opt_idx, "idx=%u"},
- {pohmelfs_opt_crypto_thread_num, "crypto_thread_num=%u"},
- {pohmelfs_opt_trans_max_pages, "trans_max_pages=%u"},
- {pohmelfs_opt_crypto_fail_unsupported, "crypto_fail_unsupported"},
- {pohmelfs_opt_trans_scan_timeout, "trans_scan_timeout=%u"},
- {pohmelfs_opt_drop_scan_timeout, "drop_scan_timeout=%u"},
- {pohmelfs_opt_wait_on_page_timeout, "wait_on_page_timeout=%u"},
- {pohmelfs_opt_trans_retries, "trans_retries=%u"},
- {pohmelfs_opt_mcache_timeout, "mcache_timeout=%u"},
-};
-
-static int pohmelfs_parse_options(char *options, struct pohmelfs_sb *psb, int remount)
-{
- char *p;
- substring_t args[MAX_OPT_ARGS];
- int option, err;
-
- if (!options)
- return 0;
-
- while ((p = strsep(&options, ",")) != NULL) {
- int token;
- if (!*p)
- continue;
-
- token = match_token(p, pohmelfs_tokens, args);
-
- err = match_int(&args[0], &option);
- if (err)
- return err;
-
- if (remount && token <= pohmelfs_opt_crypto_fail_unsupported)
- continue;
-
- switch (token) {
- case pohmelfs_opt_idx:
- psb->idx = option;
- break;
- case pohmelfs_opt_trans_scan_timeout:
- psb->trans_scan_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_drop_scan_timeout:
- psb->drop_scan_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_wait_on_page_timeout:
- psb->wait_on_page_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_mcache_timeout:
- psb->mcache_timeout = msecs_to_jiffies(option);
- break;
- case pohmelfs_opt_trans_retries:
- psb->trans_retries = option;
- break;
- case pohmelfs_opt_crypto_thread_num:
- psb->crypto_thread_num = option;
- break;
- case pohmelfs_opt_trans_max_pages:
- psb->trans_max_pages = option;
- break;
- case pohmelfs_opt_crypto_fail_unsupported:
- psb->crypto_fail_unsupported = 1;
- break;
- default:
- return -EINVAL;
- }
- }
-
- return 0;
-}
-
-static int pohmelfs_remount(struct super_block *sb, int *flags, char *data)
-{
- int err;
- struct pohmelfs_sb *psb = POHMELFS_SB(sb);
- unsigned long old_sb_flags = sb->s_flags;
-
- err = pohmelfs_parse_options(data, psb, 1);
- if (err)
- goto err_out_restore;
-
- if (!(*flags & MS_RDONLY))
- sb->s_flags &= ~MS_RDONLY;
- return 0;
-
-err_out_restore:
- sb->s_flags = old_sb_flags;
- return err;
-}
-
-static void pohmelfs_flush_inode(struct pohmelfs_inode *pi, unsigned int count)
-{
- struct inode *inode = &pi->vfs_inode;
-
- dprintk("%s: %p: ino: %llu, owned: %d.\n",
- __func__, inode, pi->ino, test_bit(NETFS_INODE_OWNED, &pi->state));
-
- mutex_lock(&inode->i_mutex);
- if (test_and_clear_bit(NETFS_INODE_OWNED, &pi->state)) {
- filemap_fdatawrite(inode->i_mapping);
- inode->i_sb->s_op->write_inode(inode, 0);
- }
-
-#ifdef POHMELFS_TRUNCATE_ON_INODE_FLUSH
- truncate_inode_pages(inode->i_mapping, 0);
-#endif
-
- pohmelfs_data_unlock(pi, 0, ~0, POHMELFS_WRITE_LOCK);
- mutex_unlock(&inode->i_mutex);
-}
-
-static void pohmelfs_put_inode_count(struct pohmelfs_inode *pi, unsigned int count)
-{
- dprintk("%s: ino: %llu, pi: %p, inode: %p, count: %u.\n",
- __func__, pi->ino, pi, &pi->vfs_inode, count);
-
- if (test_and_clear_bit(NETFS_INODE_NEED_FLUSH, &pi->state))
- pohmelfs_flush_inode(pi, count);
-
- while (count--)
- iput(&pi->vfs_inode);
-}
-
-static void pohmelfs_drop_scan(struct work_struct *work)
-{
- struct pohmelfs_sb *psb =
- container_of(work, struct pohmelfs_sb, drop_dwork.work);
- struct pohmelfs_inode *pi;
- unsigned int count = 0;
-
- while ((pi = pohmelfs_get_inode_from_list(psb, &psb->drop_list, &count)))
- pohmelfs_put_inode_count(pi, count);
-
- pohmelfs_check_states(psb);
-
- if (psb->drop_scan_timeout)
- schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
-}
-
-/*
- * Run through all transactions starting from the oldest,
- * drop transaction from current state and try to send it
- * to all remote nodes, which are currently installed.
- */
-static void pohmelfs_trans_scan_state(struct netfs_state *st)
-{
- struct rb_node *rb_node;
- struct netfs_trans_dst *dst;
- struct pohmelfs_sb *psb = st->psb;
- unsigned int timeout = psb->trans_scan_timeout;
- struct netfs_trans *t;
- int err;
-
- mutex_lock(&st->trans_lock);
- for (rb_node = rb_first(&st->trans_root); rb_node; ) {
- dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
- t = dst->trans;
-
- if (timeout && time_after(dst->send_time + timeout, jiffies)
- && dst->retries == 0)
- break;
-
- dprintk("%s: t: %p, gen: %u, st: %p, retries: %u, max: %u.\n",
- __func__, t, t->gen, st, dst->retries, psb->trans_retries);
- netfs_trans_get(t);
-
- rb_node = rb_next(rb_node);
-
- err = -ETIMEDOUT;
- if (timeout && (++dst->retries < psb->trans_retries))
- err = netfs_trans_resend(t, psb);
-
- if (err || (t->flags & NETFS_TRANS_SINGLE_DST)) {
- if (netfs_trans_remove_nolock(dst, st))
- netfs_trans_drop_dst_nostate(dst);
- }
-
- t->result = err;
- netfs_trans_put(t);
- }
- mutex_unlock(&st->trans_lock);
-}
-
-/*
- * Walk through all installed network states and resend all
- * transactions, which are old enough.
- */
-static void pohmelfs_trans_scan(struct work_struct *work)
-{
- struct pohmelfs_sb *psb =
- container_of(work, struct pohmelfs_sb, dwork.work);
- struct netfs_state *st;
- struct pohmelfs_config *c;
-
- mutex_lock(&psb->state_lock);
- list_for_each_entry(c, &psb->state_list, config_entry) {
- st = &c->state;
-
- pohmelfs_trans_scan_state(st);
- }
- mutex_unlock(&psb->state_lock);
-
- /*
- * If no timeout specified then system is in the middle of umount process,
- * so no need to reschedule scanning process again.
- */
- if (psb->trans_scan_timeout)
- schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
-}
-
-int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
- unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start)
-{
- struct inode *inode = &pi->vfs_inode;
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- int err = 0, sz;
- struct netfs_trans *t;
- int path_len, addon_len = 0;
- void *data;
- struct netfs_inode_info *info;
- struct netfs_cmd *cmd;
-
- dprintk("%s: ino: %llu, cmd: %u, addon: %p.\n", __func__, pi->ino, cmd_op, addon);
-
- path_len = pohmelfs_path_length(pi);
- if (path_len < 0) {
- err = path_len;
- goto err_out_exit;
- }
-
- if (addon)
- addon_len = strlen(addon) + 1; /* 0-byte */
- sz = addon_len;
-
- if (cmd_op == NETFS_INODE_INFO)
- sz += sizeof(struct netfs_inode_info);
-
- t = netfs_trans_alloc(psb, sz + path_len, flags, 0);
- if (!t) {
- err = -ENOMEM;
- goto err_out_exit;
- }
- t->complete = complete;
- t->private = priv;
-
- cmd = netfs_trans_current(t);
- data = (void *)(cmd + 1);
-
- if (cmd_op == NETFS_INODE_INFO) {
- info = (struct netfs_inode_info *)(cmd + 1);
- data = (void *)(info + 1);
-
- /*
- * We are under i_mutex, can read and change whatever we want...
- */
- info->mode = inode->i_mode;
- info->nlink = inode->i_nlink;
- info->uid = inode->i_uid;
- info->gid = inode->i_gid;
- info->blocks = inode->i_blocks;
- info->rdev = inode->i_rdev;
- info->size = inode->i_size;
- info->version = inode->i_version;
-
- netfs_convert_inode_info(info);
- }
-
- path_len = pohmelfs_construct_path_string(pi, data, path_len);
- if (path_len < 0)
- goto err_out_free;
-
- dprintk("%s: path_len: %d.\n", __func__, path_len);
-
- if (addon) {
- path_len--; /* Do not place null-byte before the addon */
- path_len += sprintf(data + path_len, "/%s", addon) + 1; /* 0 - byte */
- }
-
- sz += path_len;
-
- cmd->cmd = cmd_op;
- cmd->ext = path_len;
- cmd->size = sz;
- cmd->id = id;
- cmd->start = start;
-
- netfs_convert_cmd(cmd);
- netfs_trans_update(cmd, t, sz);
-
- /*
- * Note, that it is possible to leak error here: transaction callback will not
- * be invoked for allocation path failure.
- */
- return netfs_trans_finish(t, psb);
-
-err_out_free:
- netfs_trans_free(t);
-err_out_exit:
- if (complete)
- complete(NULL, 0, priv, err);
- return err;
-}
-
-int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
- netfs_trans_complete_t complete, void *priv, u64 start)
-{
- return pohmelfs_meta_command_data(pi, pi->ino, cmd_op, NULL, flags, complete, priv, start);
-}
-
-/*
- * Send request and wait for POHMELFS root capabilities response,
- * which will update server's informaion about size of the export,
- * permissions, number of objects, available size and so on.
- */
-static int pohmelfs_root_handshake(struct pohmelfs_sb *psb)
-{
- struct netfs_trans *t;
- struct netfs_cmd *cmd;
- int err = -ENOMEM;
-
- t = netfs_trans_alloc(psb, 0, 0, 0);
- if (!t)
- goto err_out_exit;
-
- cmd = netfs_trans_current(t);
-
- cmd->cmd = NETFS_CAPABILITIES;
- cmd->id = POHMELFS_ROOT_CAPABILITIES;
- cmd->size = 0;
- cmd->start = 0;
- cmd->ext = 0;
- cmd->csize = 0;
-
- netfs_convert_cmd(cmd);
- netfs_trans_update(cmd, t, 0);
-
- err = netfs_trans_finish(t, psb);
- if (err)
- goto err_out_exit;
-
- psb->flags = ~0;
- err = wait_event_interruptible_timeout(psb->wait,
- (psb->flags != ~0),
- psb->wait_on_page_timeout);
- if (!err)
- err = -ETIMEDOUT;
- else if (err > 0)
- err = -psb->flags;
-
- if (err)
- goto err_out_exit;
-
- return 0;
-
-err_out_exit:
- return err;
-}
-
-static int pohmelfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
-{
- struct netfs_state *st;
- struct pohmelfs_ctl *ctl;
- struct pohmelfs_sb *psb = POHMELFS_SB(mnt->mnt_sb);
- struct pohmelfs_config *c;
-
- mutex_lock(&psb->state_lock);
-
- seq_printf(m, "\nidx addr(:port) socket_type protocol active priority permissions\n");
-
- list_for_each_entry(c, &psb->state_list, config_entry) {
- st = &c->state;
- ctl = &st->ctl;
-
- seq_printf(m, "%u ", ctl->idx);
- if (ctl->addr.sa_family == AF_INET) {
- struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
- seq_printf(m, "%pI4:%u", &sin->sin_addr.s_addr, ntohs(sin->sin_port));
- } else if (ctl->addr.sa_family == AF_INET6) {
- struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
- seq_printf(m, "%pi6:%u", &sin->sin6_addr, ntohs(sin->sin6_port));
- } else {
- unsigned int i;
- for (i = 0; i < ctl->addrlen; ++i)
- seq_printf(m, "%02x.", ctl->addr.addr[i]);
- }
-
- seq_printf(m, " %u %u %d %u %x\n",
- ctl->type, ctl->proto,
- st->socket != NULL,
- ctl->prio, ctl->perm);
- }
- mutex_unlock(&psb->state_lock);
-
- return 0;
-}
-
-static const struct super_operations pohmelfs_sb_ops = {
- .alloc_inode = pohmelfs_alloc_inode,
- .destroy_inode = pohmelfs_destroy_inode,
- .drop_inode = pohmelfs_drop_inode,
- .write_inode = pohmelfs_write_inode,
- .put_super = pohmelfs_put_super,
- .remount_fs = pohmelfs_remount,
- .statfs = pohmelfs_statfs,
- .show_options = pohmelfs_show_options,
- .show_stats = pohmelfs_show_stats,
-};
-
-/*
- * Allocate private superblock and create root dir.
- */
-static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
-{
- struct pohmelfs_sb *psb;
- int err = -ENOMEM;
- struct inode *root;
- struct pohmelfs_inode *npi;
- struct qstr str;
-
- psb = kzalloc(sizeof(struct pohmelfs_sb), GFP_KERNEL);
- if (!psb)
- goto err_out_exit;
-
- err = bdi_init(&psb->bdi);
- if (err)
- goto err_out_free_sb;
-
- err = bdi_register(&psb->bdi, NULL, "pfs-%d", atomic_inc_return(&psb_bdi_num));
- if (err) {
- bdi_destroy(&psb->bdi);
- goto err_out_free_sb;
- }
-
- sb->s_fs_info = psb;
- sb->s_op = &pohmelfs_sb_ops;
- sb->s_magic = POHMELFS_MAGIC_NUM;
- sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_blocksize = PAGE_SIZE;
- sb->s_bdi = &psb->bdi;
-
- psb->sb = sb;
-
- psb->ino = 2;
- psb->idx = 0;
- psb->active_state = NULL;
- psb->trans_retries = 5;
- psb->trans_data_size = PAGE_SIZE;
- psb->drop_scan_timeout = msecs_to_jiffies(1000);
- psb->trans_scan_timeout = msecs_to_jiffies(5000);
- psb->wait_on_page_timeout = msecs_to_jiffies(5000);
- init_waitqueue_head(&psb->wait);
-
- spin_lock_init(&psb->ino_lock);
-
- INIT_LIST_HEAD(&psb->drop_list);
-
- mutex_init(&psb->mcache_lock);
- psb->mcache_root = RB_ROOT;
- psb->mcache_timeout = msecs_to_jiffies(5000);
- atomic_long_set(&psb->mcache_gen, 0);
-
- psb->trans_max_pages = 100;
-
- psb->crypto_align_size = 16;
- psb->crypto_attached_size = 0;
- psb->hash_strlen = 0;
- psb->cipher_strlen = 0;
- psb->perform_crypto = 0;
- psb->crypto_thread_num = 2;
- psb->crypto_fail_unsupported = 0;
- mutex_init(&psb->crypto_thread_lock);
- INIT_LIST_HEAD(&psb->crypto_ready_list);
- INIT_LIST_HEAD(&psb->crypto_active_list);
-
- atomic_set(&psb->trans_gen, 1);
- atomic_long_set(&psb->total_inodes, 0);
-
- mutex_init(&psb->state_lock);
- INIT_LIST_HEAD(&psb->state_list);
-
- err = pohmelfs_parse_options((char *) data, psb, 0);
- if (err)
- goto err_out_free_bdi;
-
- err = pohmelfs_copy_crypto(psb);
- if (err)
- goto err_out_free_bdi;
-
- err = pohmelfs_state_init(psb);
- if (err)
- goto err_out_free_strings;
-
- err = pohmelfs_crypto_init(psb);
- if (err)
- goto err_out_state_exit;
-
- err = pohmelfs_root_handshake(psb);
- if (err)
- goto err_out_crypto_exit;
-
- str.name = "/";
- str.hash = jhash("/", 1, 0);
- str.len = 1;
-
- npi = pohmelfs_create_entry_local(psb, NULL, &str, 0, 0755|S_IFDIR);
- if (IS_ERR(npi)) {
- err = PTR_ERR(npi);
- goto err_out_crypto_exit;
- }
- set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
- clear_bit(NETFS_INODE_OWNED, &npi->state);
-
- root = &npi->vfs_inode;
-
- sb->s_root = d_alloc_root(root);
- if (!sb->s_root)
- goto err_out_put_root;
-
- INIT_DELAYED_WORK(&psb->drop_dwork, pohmelfs_drop_scan);
- schedule_delayed_work(&psb->drop_dwork, psb->drop_scan_timeout);
-
- INIT_DELAYED_WORK(&psb->dwork, pohmelfs_trans_scan);
- schedule_delayed_work(&psb->dwork, psb->trans_scan_timeout);
-
- return 0;
-
-err_out_put_root:
- iput(root);
-err_out_crypto_exit:
- pohmelfs_crypto_exit(psb);
-err_out_state_exit:
- pohmelfs_state_exit(psb);
-err_out_free_strings:
- kfree(psb->cipher_string);
- kfree(psb->hash_string);
-err_out_free_bdi:
- bdi_destroy(&psb->bdi);
-err_out_free_sb:
- kfree(psb);
-err_out_exit:
-
- dprintk("%s: err: %d.\n", __func__, err);
- return err;
-}
-
-/*
- * Some VFS magic here...
- */
-static struct dentry *pohmelfs_mount(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data)
-{
- return mount_nodev(fs_type, flags, data, pohmelfs_fill_super);
-}
-
-/*
- * We need this to sync all inodes earlier, since when writeback
- * is invoked from the umount/mntput path dcache is already shrunk,
- * see generic_shutdown_super(), and no inodes can access the path.
- */
-static void pohmelfs_kill_super(struct super_block *sb)
-{
- sync_inodes_sb(sb);
- kill_anon_super(sb);
-}
-
-static struct file_system_type pohmel_fs_type = {
- .owner = THIS_MODULE,
- .name = "pohmel",
- .mount = pohmelfs_mount,
- .kill_sb = pohmelfs_kill_super,
-};
-
-/*
- * Cache and module initializations and freeing routings.
- */
-static void pohmelfs_init_once(void *data)
-{
- struct pohmelfs_inode *pi = data;
-
- inode_init_once(&pi->vfs_inode);
-}
-
-static int __init pohmelfs_init_inodecache(void)
-{
- pohmelfs_inode_cache = kmem_cache_create("pohmelfs_inode_cache",
- sizeof(struct pohmelfs_inode),
- 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD),
- pohmelfs_init_once);
- if (!pohmelfs_inode_cache)
- return -ENOMEM;
-
- return 0;
-}
-
-static void pohmelfs_destroy_inodecache(void)
-{
- kmem_cache_destroy(pohmelfs_inode_cache);
-}
-
-static int __init init_pohmel_fs(void)
-{
- int err;
-
- err = pohmelfs_config_init();
- if (err)
- goto err_out_exit;
-
- err = pohmelfs_init_inodecache();
- if (err)
- goto err_out_config_exit;
-
- err = pohmelfs_mcache_init();
- if (err)
- goto err_out_destroy;
-
- err = netfs_trans_init();
- if (err)
- goto err_out_mcache_exit;
-
- err = register_filesystem(&pohmel_fs_type);
- if (err)
- goto err_out_trans;
-
- return 0;
-
-err_out_trans:
- netfs_trans_exit();
-err_out_mcache_exit:
- pohmelfs_mcache_exit();
-err_out_destroy:
- pohmelfs_destroy_inodecache();
-err_out_config_exit:
- pohmelfs_config_exit();
-err_out_exit:
- return err;
-}
-
-static void __exit exit_pohmel_fs(void)
-{
- unregister_filesystem(&pohmel_fs_type);
- pohmelfs_destroy_inodecache();
- pohmelfs_mcache_exit();
- pohmelfs_config_exit();
- netfs_trans_exit();
-}
-
-module_init(init_pohmel_fs);
-module_exit(exit_pohmel_fs);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
-MODULE_DESCRIPTION("Pohmel filesystem");
diff --git a/drivers/staging/pohmelfs/lock.c b/drivers/staging/pohmelfs/lock.c
deleted file mode 100644
index 6710114cd..0000000
--- a/drivers/staging/pohmelfs/lock.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/backing-dev.h>
-#include <linux/fs.h>
-#include <linux/fsnotify.h>
-#include <linux/mempool.h>
-
-#include "netfs.h"
-
-static int pohmelfs_send_lock_trans(struct pohmelfs_inode *pi,
- u64 id, u64 start, u32 size, int type)
-{
- struct inode *inode = &pi->vfs_inode;
- struct pohmelfs_sb *psb = POHMELFS_SB(inode->i_sb);
- struct netfs_trans *t;
- struct netfs_cmd *cmd;
- int path_len, err;
- void *data;
- struct netfs_lock *l;
- int isize = (type & POHMELFS_LOCK_GRAB) ? 0 : sizeof(struct netfs_inode_info);
-
- err = pohmelfs_path_length(pi);
- if (err < 0)
- goto err_out_exit;
-
- path_len = err;
-
- err = -ENOMEM;
- t = netfs_trans_alloc(psb, path_len + sizeof(struct netfs_lock) + isize,
- NETFS_TRANS_SINGLE_DST, 0);
- if (!t)
- goto err_out_exit;
-
- cmd = netfs_trans_current(t);
- data = cmd + 1;
-
- err = pohmelfs_construct_path_string(pi, data, path_len);
- if (err < 0)
- goto err_out_free;
- path_len = err;
-
- l = data + path_len;
-
- l->start = start;
- l->size = size;
- l->type = type;
- l->ino = pi->ino;
-
- cmd->cmd = NETFS_LOCK;
- cmd->start = 0;
- cmd->id = id;
- cmd->size = sizeof(struct netfs_lock) + path_len + isize;
- cmd->ext = path_len;
- cmd->csize = 0;
-
- netfs_convert_cmd(cmd);
- netfs_convert_lock(l);
-
- if (isize) {
- struct netfs_inode_info *info = (struct netfs_inode_info *)(l + 1);
-
- info->mode = inode->i_mode;
- info->nlink = inode->i_nlink;
- info->uid = inode->i_uid;
- info->gid = inode->i_gid;
- info->blocks = inode->i_blocks;
- info->rdev = inode->i_rdev;
- info->size = inode->i_size;
- info->version = inode->i_version;
-
- netfs_convert_inode_info(info);
- }
-
- netfs_trans_update(cmd, t, path_len + sizeof(struct netfs_lock) + isize);
-
- return netfs_trans_finish(t, psb);
-
-err_out_free:
- netfs_trans_free(t);
-err_out_exit:
- printk("%s: err: %d.\n", __func__, err);
- return err;
-}
-
-int pohmelfs_data_lock(struct pohmelfs_inode *pi, u64 start, u32 size, int type)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
- struct pohmelfs_mcache *m;
- int err = -ENOMEM;
- struct iattr iattr;
- struct inode *inode = &pi->vfs_inode;
-
- dprintk("%s: %p: ino: %llu, start: %llu, size: %u, "
- "type: %d, locked as: %d, owned: %d.\n",
- __func__, &pi->vfs_inode, pi->ino,
- start, size, type, pi->lock_type,
- !!test_bit(NETFS_INODE_OWNED, &pi->state));
-
- if (!pohmelfs_need_lock(pi, type))
- return 0;
-
- m = pohmelfs_mcache_alloc(psb, start, size, NULL);
- if (IS_ERR(m))
- return PTR_ERR(m);
-
- err = pohmelfs_send_lock_trans(pi, m->gen, start, size,
- type | POHMELFS_LOCK_GRAB);
- if (err)
- goto err_out_put;
-
- err = wait_for_completion_timeout(&m->complete, psb->mcache_timeout);
- if (err)
- err = m->err;
- else
- err = -ETIMEDOUT;
-
- if (err) {
- printk("%s: %p: ino: %llu, mgen: %llu, start: %llu, size: %u, err: %d.\n",
- __func__, &pi->vfs_inode, pi->ino, m->gen, start, size, err);
- }
-
- if (err && (err != -ENOENT))
- goto err_out_put;
-
- if (!err) {
- netfs_convert_inode_info(&m->info);
-
- iattr.ia_valid = ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_SIZE | ATTR_ATIME;
- iattr.ia_mode = m->info.mode;
- iattr.ia_uid = m->info.uid;
- iattr.ia_gid = m->info.gid;
- iattr.ia_size = m->info.size;
- iattr.ia_atime = CURRENT_TIME;
-
- dprintk("%s: %p: ino: %llu, mgen: %llu, start: %llu, isize: %llu -> %llu.\n",
- __func__, &pi->vfs_inode, pi->ino, m->gen, start, inode->i_size, m->info.size);
-
- err = pohmelfs_setattr_raw(inode, &iattr);
- if (!err) {
- struct dentry *dentry = d_find_alias(inode);
- if (dentry) {
- fsnotify_change(dentry, iattr.ia_valid);
- dput(dentry);
- }
- }
- }
-
- pi->lock_type = type;
- set_bit(NETFS_INODE_OWNED, &pi->state);
-
- pohmelfs_mcache_put(psb, m);
-
- return 0;
-
-err_out_put:
- pohmelfs_mcache_put(psb, m);
- return err;
-}
-
-int pohmelfs_data_unlock(struct pohmelfs_inode *pi, u64 start, u32 size, int type)
-{
- dprintk("%s: %p: ino: %llu, start: %llu, size: %u, type: %d.\n",
- __func__, &pi->vfs_inode, pi->ino, start, size, type);
- pi->lock_type = 0;
- clear_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &pi->state);
- clear_bit(NETFS_INODE_OWNED, &pi->state);
- return pohmelfs_send_lock_trans(pi, pi->ino, start, size, type);
-}
diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
deleted file mode 100644
index e22665c..0000000
--- a/drivers/staging/pohmelfs/mcache.c
+++ /dev/null
@@ -1,171 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/mempool.h>
-
-#include "netfs.h"
-
-static struct kmem_cache *pohmelfs_mcache_cache;
-static mempool_t *pohmelfs_mcache_pool;
-
-static inline int pohmelfs_mcache_cmp(u64 gen, u64 new)
-{
- if (gen < new)
- return 1;
- if (gen > new)
- return -1;
- return 0;
-}
-
-struct pohmelfs_mcache *pohmelfs_mcache_search(struct pohmelfs_sb *psb, u64 gen)
-{
- struct rb_root *root = &psb->mcache_root;
- struct rb_node *n = root->rb_node;
- struct pohmelfs_mcache *tmp, *ret = NULL;
- int cmp;
-
- while (n) {
- tmp = rb_entry(n, struct pohmelfs_mcache, mcache_entry);
-
- cmp = pohmelfs_mcache_cmp(tmp->gen, gen);
- if (cmp < 0)
- n = n->rb_left;
- else if (cmp > 0)
- n = n->rb_right;
- else {
- ret = tmp;
- pohmelfs_mcache_get(ret);
- break;
- }
- }
-
- return ret;
-}
-
-static int pohmelfs_mcache_insert(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
-{
- struct rb_root *root = &psb->mcache_root;
- struct rb_node **n = &root->rb_node, *parent = NULL;
- struct pohmelfs_mcache *ret = NULL, *tmp;
- int cmp;
-
- while (*n) {
- parent = *n;
-
- tmp = rb_entry(parent, struct pohmelfs_mcache, mcache_entry);
-
- cmp = pohmelfs_mcache_cmp(tmp->gen, m->gen);
- if (cmp < 0)
- n = &parent->rb_left;
- else if (cmp > 0)
- n = &parent->rb_right;
- else {
- ret = tmp;
- break;
- }
- }
-
- if (ret)
- return -EEXIST;
-
- rb_link_node(&m->mcache_entry, parent, n);
- rb_insert_color(&m->mcache_entry, root);
-
- return 0;
-}
-
-static int pohmelfs_mcache_remove(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
-{
- if (m && m->mcache_entry.rb_parent_color) {
- rb_erase(&m->mcache_entry, &psb->mcache_root);
- m->mcache_entry.rb_parent_color = 0;
- return 1;
- }
- return 0;
-}
-
-void pohmelfs_mcache_remove_locked(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
-{
- mutex_lock(&psb->mcache_lock);
- pohmelfs_mcache_remove(psb, m);
- mutex_unlock(&psb->mcache_lock);
-}
-
-struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start,
- unsigned int size, void *data)
-{
- struct pohmelfs_mcache *m;
- int err = -ENOMEM;
-
- m = mempool_alloc(pohmelfs_mcache_pool, GFP_KERNEL);
- if (!m)
- goto err_out_exit;
-
- init_completion(&m->complete);
- m->err = 0;
- atomic_set(&m->refcnt, 1);
- m->data = data;
- m->start = start;
- m->size = size;
- m->gen = atomic_long_inc_return(&psb->mcache_gen);
-
- mutex_lock(&psb->mcache_lock);
- err = pohmelfs_mcache_insert(psb, m);
- mutex_unlock(&psb->mcache_lock);
- if (err)
- goto err_out_free;
-
- return m;
-
-err_out_free:
- mempool_free(m, pohmelfs_mcache_pool);
-err_out_exit:
- return ERR_PTR(err);
-}
-
-void pohmelfs_mcache_free(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m)
-{
- pohmelfs_mcache_remove_locked(psb, m);
-
- mempool_free(m, pohmelfs_mcache_pool);
-}
-
-int __init pohmelfs_mcache_init(void)
-{
- pohmelfs_mcache_cache = kmem_cache_create("pohmelfs_mcache_cache",
- sizeof(struct pohmelfs_mcache),
- 0, (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD), NULL);
- if (!pohmelfs_mcache_cache)
- goto err_out_exit;
-
- pohmelfs_mcache_pool = mempool_create_slab_pool(256, pohmelfs_mcache_cache);
- if (!pohmelfs_mcache_pool)
- goto err_out_free;
-
- return 0;
-
-err_out_free:
- kmem_cache_destroy(pohmelfs_mcache_cache);
-err_out_exit:
- return -ENOMEM;
-}
-
-void pohmelfs_mcache_exit(void)
-{
- mempool_destroy(pohmelfs_mcache_pool);
- kmem_cache_destroy(pohmelfs_mcache_cache);
-}
diff --git a/drivers/staging/pohmelfs/net.c b/drivers/staging/pohmelfs/net.c
deleted file mode 100644
index b2e9186..0000000
--- a/drivers/staging/pohmelfs/net.c
+++ /dev/null
@@ -1,1209 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/fsnotify.h>
-#include <linux/jhash.h>
-#include <linux/in.h>
-#include <linux/in6.h>
-#include <linux/kthread.h>
-#include <linux/pagemap.h>
-#include <linux/poll.h>
-#include <linux/slab.h>
-#include <linux/swap.h>
-#include <linux/syscalls.h>
-#include <linux/vmalloc.h>
-
-#include "netfs.h"
-
-/*
- * Async machinery lives here.
- * All commands being sent to server do _not_ require sync reply,
- * instead, if it is really needed, like readdir or readpage, caller
- * sleeps waiting for data, which will be placed into provided buffer
- * and caller will be awakened.
- *
- * Every command response can come without some listener. For example
- * readdir response will add new objects into cache without appropriate
- * request from userspace. This is used in cache coherency.
- *
- * If object is not found for given data, it is discarded.
- *
- * All requests are received by dedicated kernel thread.
- */
-
-/*
- * Basic network sending/receiving functions.
- * Blocked mode is used.
- */
-static int netfs_data_recv(struct netfs_state *st, void *buf, u64 size)
-{
- struct msghdr msg;
- struct kvec iov;
- int err;
-
- BUG_ON(!size);
-
- iov.iov_base = buf;
- iov.iov_len = size;
-
- msg.msg_iov = (struct iovec *)&iov;
- msg.msg_iovlen = 1;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = MSG_DONTWAIT;
-
- err = kernel_recvmsg(st->socket, &msg, &iov, 1, iov.iov_len,
- msg.msg_flags);
- if (err <= 0) {
- printk("%s: failed to recv data: size: %llu, err: %d.\n", __func__, size, err);
- if (err == 0)
- err = -ECONNRESET;
- }
-
- return err;
-}
-
-static int pohmelfs_data_recv(struct netfs_state *st, void *data, unsigned int size)
-{
- unsigned int revents = 0;
- unsigned int err_mask = POLLERR | POLLHUP | POLLRDHUP;
- unsigned int mask = err_mask | POLLIN;
- int err = 0;
-
- while (size && !err) {
- revents = netfs_state_poll(st);
-
- if (!(revents & mask)) {
- DEFINE_WAIT(wait);
-
- for (;;) {
- prepare_to_wait(&st->thread_wait, &wait, TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
- break;
-
- revents = netfs_state_poll(st);
-
- if (revents & mask)
- break;
-
- if (signal_pending(current))
- break;
-
- schedule();
- continue;
- }
- finish_wait(&st->thread_wait, &wait);
- }
-
- err = 0;
- netfs_state_lock(st);
- if (st->socket && (st->read_socket == st->socket) && (revents & POLLIN)) {
- err = netfs_data_recv(st, data, size);
- if (err > 0) {
- data += err;
- size -= err;
- err = 0;
- } else if (err == 0)
- err = -ECONNRESET;
- }
-
- if (revents & err_mask) {
- printk("%s: revents: %x, socket: %p, size: %u, err: %d.\n",
- __func__, revents, st->socket, size, err);
- err = -ECONNRESET;
- }
- netfs_state_unlock(st);
-
- if (err < 0) {
- if (netfs_state_trylock_send(st)) {
- netfs_state_exit(st);
- err = netfs_state_init(st);
- if (!err)
- err = -EAGAIN;
- netfs_state_unlock_send(st);
- } else {
- st->need_reset = 1;
- }
- }
-
- if (kthread_should_stop())
- err = -ENODEV;
-
- if (err)
- printk("%s: socket: %p, read_socket: %p, revents: %x, rev_error: %d, "
- "should_stop: %d, size: %u, err: %d.\n",
- __func__, st->socket, st->read_socket,
- revents, revents & err_mask, kthread_should_stop(), size, err);
- }
-
- return err;
-}
-
-int pohmelfs_data_recv_and_check(struct netfs_state *st, void *data, unsigned int size)
-{
- struct netfs_cmd *cmd = &st->cmd;
- int err;
-
- err = pohmelfs_data_recv(st, data, size);
- if (err)
- return err;
-
- return pohmelfs_crypto_process_input_data(&st->eng, cmd->iv, data, NULL, size);
-}
-
-/*
- * Polling machinery.
- */
-
-struct netfs_poll_helper {
- poll_table pt;
- struct netfs_state *st;
-};
-
-static int netfs_queue_wake(wait_queue_t *wait, unsigned mode, int sync, void *key)
-{
- struct netfs_state *st = container_of(wait, struct netfs_state, wait);
-
- wake_up(&st->thread_wait);
- return 1;
-}
-
-static void netfs_queue_func(struct file *file, wait_queue_head_t *whead,
- poll_table *pt)
-{
- struct netfs_state *st = container_of(pt, struct netfs_poll_helper, pt)->st;
-
- st->whead = whead;
- init_waitqueue_func_entry(&st->wait, netfs_queue_wake);
- add_wait_queue(whead, &st->wait);
-}
-
-static void netfs_poll_exit(struct netfs_state *st)
-{
- if (st->whead) {
- remove_wait_queue(st->whead, &st->wait);
- st->whead = NULL;
- }
-}
-
-static int netfs_poll_init(struct netfs_state *st)
-{
- struct netfs_poll_helper ph;
-
- ph.st = st;
- init_poll_funcptr(&ph.pt, &netfs_queue_func);
-
- st->socket->ops->poll(NULL, st->socket, &ph.pt);
- return 0;
-}
-
-/*
- * Get response for readpage command. We search inode and page in its mapping
- * and copy data into. If it was async request, then we queue page into shared
- * data and wakeup listener, who will copy it to userspace.
- *
- * There is a work in progress of allowing to call copy_to_user() directly from
- * async receiving kernel thread.
- */
-static int pohmelfs_read_page_response(struct netfs_state *st)
-{
- struct pohmelfs_sb *psb = st->psb;
- struct netfs_cmd *cmd = &st->cmd;
- struct inode *inode;
- struct page *page;
- int err = 0;
-
- if (cmd->size > PAGE_CACHE_SIZE) {
- err = -EINVAL;
- goto err_out_exit;
- }
-
- inode = ilookup(st->psb->sb, cmd->id);
- if (!inode) {
- printk("%s: failed to find inode: id: %llu.\n", __func__, cmd->id);
- err = -ENOENT;
- goto err_out_exit;
- }
-
- page = find_get_page(inode->i_mapping, cmd->start >> PAGE_CACHE_SHIFT);
- if (!page || !PageLocked(page)) {
- printk("%s: failed to find/lock page: page: %p, id: %llu, start: %llu, index: %llu.\n",
- __func__, page, cmd->id, cmd->start, cmd->start >> PAGE_CACHE_SHIFT);
-
- while (cmd->size) {
- unsigned int sz = min(cmd->size, st->size);
-
- err = pohmelfs_data_recv(st, st->data, sz);
- if (err)
- break;
-
- cmd->size -= sz;
- }
-
- err = -ENODEV;
- if (page)
- goto err_out_page_put;
- goto err_out_put;
- }
-
- if (cmd->size) {
- void *addr;
-
- addr = kmap(page);
- err = pohmelfs_data_recv(st, addr, cmd->size);
- kunmap(page);
-
- if (err)
- goto err_out_page_unlock;
- }
-
- dprintk("%s: page: %p, start: %llu, size: %u, locked: %d.\n",
- __func__, page, cmd->start, cmd->size, PageLocked(page));
-
- SetPageChecked(page);
- if ((psb->hash_string || psb->cipher_string) && psb->perform_crypto && cmd->size) {
- err = pohmelfs_crypto_process_input_page(&st->eng, page, cmd->size, cmd->iv);
- if (err < 0)
- goto err_out_page_unlock;
- } else {
- SetPageUptodate(page);
- unlock_page(page);
- page_cache_release(page);
- }
-
- pohmelfs_put_inode(POHMELFS_I(inode));
- wake_up(&st->psb->wait);
-
- return 0;
-
-err_out_page_unlock:
- SetPageError(page);
- unlock_page(page);
-err_out_page_put:
- page_cache_release(page);
-err_out_put:
- pohmelfs_put_inode(POHMELFS_I(inode));
-err_out_exit:
- wake_up(&st->psb->wait);
- return err;
-}
-
-static int pohmelfs_check_name(struct pohmelfs_inode *parent, struct qstr *str,
- struct netfs_inode_info *info)
-{
- struct inode *inode;
- struct pohmelfs_name *n;
- int err = 0;
- u64 ino = 0;
-
- mutex_lock(&parent->offset_lock);
- n = pohmelfs_search_hash(parent, str->hash);
- if (n)
- ino = n->ino;
- mutex_unlock(&parent->offset_lock);
-
- if (!ino)
- goto out;
-
- inode = ilookup(parent->vfs_inode.i_sb, ino);
- if (!inode)
- goto out;
-
- dprintk("%s: parent: %llu, inode: %llu.\n", __func__, parent->ino, ino);
-
- pohmelfs_fill_inode(inode, info);
- pohmelfs_put_inode(POHMELFS_I(inode));
- err = -EEXIST;
-out:
- return err;
-}
-
-/*
- * Readdir response from server. If special field is set, we wakeup
- * listener (readdir() call), which will copy data to userspace.
- */
-static int pohmelfs_readdir_response(struct netfs_state *st)
-{
- struct inode *inode;
- struct netfs_cmd *cmd = &st->cmd;
- struct netfs_inode_info *info;
- struct pohmelfs_inode *parent = NULL, *npi;
- int err = 0, last = cmd->ext;
- struct qstr str;
-
- if (cmd->size > st->size)
- return -EINVAL;
-
- inode = ilookup(st->psb->sb, cmd->id);
- if (!inode) {
- printk("%s: failed to find inode: id: %llu.\n", __func__, cmd->id);
- return -ENOENT;
- }
- parent = POHMELFS_I(inode);
-
- if (!cmd->size && cmd->start) {
- err = -cmd->start;
- goto out;
- }
-
- if (cmd->size) {
- char *name;
-
- err = pohmelfs_data_recv_and_check(st, st->data, cmd->size);
- if (err)
- goto err_out_put;
-
- info = (struct netfs_inode_info *)(st->data);
-
- name = (char *)(info + 1);
- str.len = cmd->size - sizeof(struct netfs_inode_info) - 1 - cmd->cpad;
- name[str.len] = 0;
- str.name = name;
- str.hash = jhash(str.name, str.len, 0);
-
- netfs_convert_inode_info(info);
-
- if (parent) {
- err = pohmelfs_check_name(parent, &str, info);
- if (err) {
- if (err == -EEXIST)
- err = 0;
- goto out;
- }
- }
-
- info->ino = cmd->start;
- if (!info->ino)
- info->ino = pohmelfs_new_ino(st->psb);
-
- dprintk("%s: parent: %llu, ino: %llu, name: '%s', hash: %x, len: %u, mode: %o.\n",
- __func__, parent->ino, info->ino, str.name, str.hash, str.len,
- info->mode);
-
- npi = pohmelfs_new_inode(st->psb, parent, &str, info, 0);
- if (IS_ERR(npi)) {
- err = PTR_ERR(npi);
-
- if (err != -EEXIST)
- goto err_out_put;
- } else {
- struct dentry *dentry, *alias, *pd;
-
- set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
- clear_bit(NETFS_INODE_OWNED, &npi->state);
-
- pd = d_find_alias(&parent->vfs_inode);
- if (pd) {
- str.hash = full_name_hash(str.name, str.len);
- dentry = d_alloc(pd, &str);
- if (dentry) {
- alias = d_materialise_unique(dentry, &npi->vfs_inode);
- if (alias)
- dput(alias);
- }
-
- dput(dentry);
- dput(pd);
- }
- }
- }
-out:
- if (last) {
- set_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &parent->state);
- set_bit(NETFS_INODE_REMOTE_SYNCED, &parent->state);
- wake_up(&st->psb->wait);
- }
- pohmelfs_put_inode(parent);
-
- return err;
-
-err_out_put:
- clear_bit(NETFS_INODE_REMOTE_DIR_SYNCED, &parent->state);
- printk("%s: parent: %llu, ino: %llu, cmd_id: %llu.\n", __func__, parent->ino, cmd->start, cmd->id);
- pohmelfs_put_inode(parent);
- wake_up(&st->psb->wait);
- return err;
-}
-
-/*
- * Lookup command response.
- * It searches for inode to be looked at (if it exists) and substitutes
- * its inode information (size, permission, mode and so on), if inode does
- * not exist, new one will be created and inserted into caches.
- */
-static int pohmelfs_lookup_response(struct netfs_state *st)
-{
- struct inode *inode = NULL;
- struct netfs_cmd *cmd = &st->cmd;
- struct netfs_inode_info *info;
- struct pohmelfs_inode *parent = NULL, *npi;
- int err = -EINVAL;
- char *name;
-
- inode = ilookup(st->psb->sb, cmd->id);
- if (!inode) {
- printk("%s: lookup response: id: %llu, start: %llu, size: %u.\n",
- __func__, cmd->id, cmd->start, cmd->size);
- err = -ENOENT;
- goto err_out_exit;
- }
- parent = POHMELFS_I(inode);
-
- if (!cmd->size) {
- err = -cmd->start;
- goto err_out_put;
- }
-
- if (cmd->size < sizeof(struct netfs_inode_info)) {
- printk("%s: broken lookup response: id: %llu, start: %llu, size: %u.\n",
- __func__, cmd->id, cmd->start, cmd->size);
- err = -EINVAL;
- goto err_out_put;
- }
-
- err = pohmelfs_data_recv_and_check(st, st->data, cmd->size);
- if (err)
- goto err_out_put;
-
- info = (struct netfs_inode_info *)(st->data);
- name = (char *)(info + 1);
-
- netfs_convert_inode_info(info);
-
- info->ino = cmd->start;
- if (!info->ino)
- info->ino = pohmelfs_new_ino(st->psb);
-
- dprintk("%s: parent: %llu, ino: %llu, name: '%s', start: %llu.\n",
- __func__, parent->ino, info->ino, name, cmd->start);
-
- if (cmd->start)
- npi = pohmelfs_new_inode(st->psb, parent, NULL, info, 0);
- else {
- struct qstr str;
-
- str.name = name;
- str.len = cmd->size - sizeof(struct netfs_inode_info) - 1 - cmd->cpad;
- str.hash = jhash(name, str.len, 0);
-
- npi = pohmelfs_new_inode(st->psb, parent, &str, info, 0);
- }
- if (IS_ERR(npi)) {
- err = PTR_ERR(npi);
-
- if (err != -EEXIST)
- goto err_out_put;
- } else {
- set_bit(NETFS_INODE_REMOTE_SYNCED, &npi->state);
- clear_bit(NETFS_INODE_OWNED, &npi->state);
- }
-
- clear_bit(NETFS_COMMAND_PENDING, &parent->state);
- pohmelfs_put_inode(parent);
-
- wake_up(&st->psb->wait);
-
- return 0;
-
-err_out_put:
- pohmelfs_put_inode(parent);
-err_out_exit:
- clear_bit(NETFS_COMMAND_PENDING, &parent->state);
- wake_up(&st->psb->wait);
- printk("%s: inode: %p, id: %llu, start: %llu, size: %u, err: %d.\n",
- __func__, inode, cmd->id, cmd->start, cmd->size, err);
- return err;
-}
-
-/*
- * Create response, just marks local inode as 'created', so that writeback
- * for any of its children (or own) would not try to sync it again.
- */
-static int pohmelfs_create_response(struct netfs_state *st)
-{
- struct inode *inode;
- struct netfs_cmd *cmd = &st->cmd;
- struct pohmelfs_inode *pi;
-
- inode = ilookup(st->psb->sb, cmd->id);
- if (!inode) {
- printk("%s: failed to find inode: id: %llu, start: %llu.\n",
- __func__, cmd->id, cmd->start);
- goto err_out_exit;
- }
-
- pi = POHMELFS_I(inode);
-
- /*
- * To lock or not to lock?
- * We actually do not care if it races...
- */
- if (cmd->start)
- make_bad_inode(inode);
- set_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state);
-
- pohmelfs_put_inode(pi);
-
- wake_up(&st->psb->wait);
- return 0;
-
-err_out_exit:
- wake_up(&st->psb->wait);
- return -ENOENT;
-}
-
-/*
- * Object remove response. Just says that remove request has been received.
- * Used in cache coherency protocol.
- */
-static int pohmelfs_remove_response(struct netfs_state *st)
-{
- struct netfs_cmd *cmd = &st->cmd;
- int err;
-
- err = pohmelfs_data_recv_and_check(st, st->data, cmd->size);
- if (err)
- return err;
-
- dprintk("%s: parent: %llu, path: '%s'.\n", __func__, cmd->id, (char *)st->data);
-
- return 0;
-}
-
-/*
- * Transaction reply processing.
- *
- * Find transaction based on its generation number, bump its reference counter,
- * so that none could free it under us, drop from the trees and lists and
- * drop reference counter. When it hits zero (when all destinations replied
- * and all timeout handled by async scanning code), completion will be called
- * and transaction will be freed.
- */
-static int pohmelfs_transaction_response(struct netfs_state *st)
-{
- struct netfs_trans_dst *dst;
- struct netfs_trans *t = NULL;
- struct netfs_cmd *cmd = &st->cmd;
- short err = (signed)cmd->ext;
-
- mutex_lock(&st->trans_lock);
- dst = netfs_trans_search(st, cmd->start);
- if (dst) {
- netfs_trans_remove_nolock(dst, st);
- t = dst->trans;
- }
- mutex_unlock(&st->trans_lock);
-
- if (!t) {
- printk("%s: failed to find transaction: start: %llu: id: %llu, size: %u, ext: %u.\n",
- __func__, cmd->start, cmd->id, cmd->size, cmd->ext);
- err = -EINVAL;
- goto out;
- }
-
- t->result = err;
- netfs_trans_drop_dst_nostate(dst);
-
-out:
- wake_up(&st->psb->wait);
- return err;
-}
-
-/*
- * Inode metadata cache coherency message.
- */
-static int pohmelfs_page_cache_response(struct netfs_state *st)
-{
- struct netfs_cmd *cmd = &st->cmd;
- struct inode *inode;
-
- dprintk("%s: st: %p, id: %llu, start: %llu, size: %u.\n", __func__, st, cmd->id, cmd->start, cmd->size);
-
- inode = ilookup(st->psb->sb, cmd->id);
- if (!inode) {
- printk("%s: failed to find inode: id: %llu.\n", __func__, cmd->id);
- return -ENOENT;
- }
-
- set_bit(NETFS_INODE_NEED_FLUSH, &POHMELFS_I(inode)->state);
- pohmelfs_put_inode(POHMELFS_I(inode));
-
- return 0;
-}
-
-/*
- * Root capabilities response: export statistics
- * like used and available size, number of files and dirs,
- * permissions.
- */
-static int pohmelfs_root_cap_response(struct netfs_state *st)
-{
- struct netfs_cmd *cmd = &st->cmd;
- struct netfs_root_capabilities *cap;
- struct pohmelfs_sb *psb = st->psb;
-
- if (cmd->size != sizeof(struct netfs_root_capabilities)) {
- psb->flags = EPROTO;
- wake_up(&psb->wait);
- return -EPROTO;
- }
-
- cap = st->data;
-
- netfs_convert_root_capabilities(cap);
-
- if (psb->total_size < cap->used + cap->avail)
- psb->total_size = cap->used + cap->avail;
- if (cap->avail)
- psb->avail_size = cap->avail;
- psb->state_flags = cap->flags;
-
- if (psb->state_flags & POHMELFS_FLAGS_RO) {
- psb->sb->s_flags |= MS_RDONLY;
- printk(KERN_INFO "Mounting POHMELFS (%d) read-only.\n", psb->idx);
- }
-
- if (psb->state_flags & POHMELFS_FLAGS_XATTR)
- printk(KERN_INFO "Mounting POHMELFS (%d) "
- "with extended attributes support.\n", psb->idx);
-
- if (atomic_long_read(&psb->total_inodes) <= 1)
- atomic_long_set(&psb->total_inodes, cap->nr_files);
-
- dprintk("%s: total: %llu, avail: %llu, flags: %llx, inodes: %llu.\n",
- __func__, psb->total_size, psb->avail_size, psb->state_flags, cap->nr_files);
-
- psb->flags = 0;
- wake_up(&psb->wait);
- return 0;
-}
-
-/*
- * Crypto capabilities of the server, where it says that
- * it supports or does not requested hash/cipher algorithms.
- */
-static int pohmelfs_crypto_cap_response(struct netfs_state *st)
-{
- struct netfs_cmd *cmd = &st->cmd;
- struct netfs_crypto_capabilities *cap;
- struct pohmelfs_sb *psb = st->psb;
- int err = 0;
-
- if (cmd->size != sizeof(struct netfs_crypto_capabilities)) {
- psb->flags = EPROTO;
- wake_up(&psb->wait);
- return -EPROTO;
- }
-
- cap = st->data;
-
- dprintk("%s: cipher '%s': %s, hash: '%s': %s.\n",
- __func__,
- psb->cipher_string, (cap->cipher_strlen) ? "SUPPORTED" : "NOT SUPPORTED",
- psb->hash_string, (cap->hash_strlen) ? "SUPPORTED" : "NOT SUPPORTED");
-
- if (!cap->hash_strlen) {
- if (psb->hash_strlen && psb->crypto_fail_unsupported)
- err = -ENOTSUPP;
- psb->hash_strlen = 0;
- kfree(psb->hash_string);
- psb->hash_string = NULL;
- }
-
- if (!cap->cipher_strlen) {
- if (psb->cipher_strlen && psb->crypto_fail_unsupported)
- err = -ENOTSUPP;
- psb->cipher_strlen = 0;
- kfree(psb->cipher_string);
- psb->cipher_string = NULL;
- }
-
- return err;
-}
-
-/*
- * Capabilities handshake response.
- */
-static int pohmelfs_capabilities_response(struct netfs_state *st)
-{
- struct netfs_cmd *cmd = &st->cmd;
- int err = 0;
-
- err = pohmelfs_data_recv(st, st->data, cmd->size);
- if (err)
- return err;
-
- switch (cmd->id) {
- case POHMELFS_CRYPTO_CAPABILITIES:
- return pohmelfs_crypto_cap_response(st);
- case POHMELFS_ROOT_CAPABILITIES:
- return pohmelfs_root_cap_response(st);
- default:
- break;
- }
- return -EINVAL;
-}
-
-/*
- * Receiving extended attribute.
- * Does not work properly if received size is more than requested one,
- * it should not happen with current request/reply model though.
- */
-static int pohmelfs_getxattr_response(struct netfs_state *st)
-{
- struct pohmelfs_sb *psb = st->psb;
- struct netfs_cmd *cmd = &st->cmd;
- struct pohmelfs_mcache *m;
- short error = (signed short)cmd->ext, err;
- unsigned int sz, total_size;
-
- m = pohmelfs_mcache_search(psb, cmd->id);
-
- dprintk("%s: id: %llu, gen: %llu, err: %d.\n",
- __func__, cmd->id, (m) ? m->gen : 0, error);
-
- if (!m) {
- printk("%s: failed to find getxattr cache entry: id: %llu.\n", __func__, cmd->id);
- return -ENOENT;
- }
-
- if (cmd->size) {
- sz = min_t(unsigned int, cmd->size, m->size);
- err = pohmelfs_data_recv_and_check(st, m->data, sz);
- if (err) {
- error = err;
- goto out;
- }
-
- m->size = sz;
- total_size = cmd->size - sz;
-
- while (total_size) {
- sz = min(total_size, st->size);
-
- err = pohmelfs_data_recv_and_check(st, st->data, sz);
- if (err) {
- error = err;
- break;
- }
-
- total_size -= sz;
- }
- }
-
-out:
- m->err = error;
- complete(&m->complete);
- pohmelfs_mcache_put(psb, m);
-
- return error;
-}
-
-int pohmelfs_data_lock_response(struct netfs_state *st)
-{
- struct pohmelfs_sb *psb = st->psb;
- struct netfs_cmd *cmd = &st->cmd;
- struct pohmelfs_mcache *m;
- short err = (signed short)cmd->ext;
- u64 id = cmd->id;
-
- m = pohmelfs_mcache_search(psb, id);
-
- dprintk("%s: id: %llu, gen: %llu, err: %d.\n",
- __func__, cmd->id, (m) ? m->gen : 0, err);
-
- if (!m) {
- pohmelfs_data_recv(st, st->data, cmd->size);
- printk("%s: failed to find data lock response: id: %llu.\n", __func__, cmd->id);
- return -ENOENT;
- }
-
- if (cmd->size)
- err = pohmelfs_data_recv_and_check(st, &m->info, cmd->size);
-
- m->err = err;
- complete(&m->complete);
- pohmelfs_mcache_put(psb, m);
-
- return err;
-}
-
-static void __inline__ netfs_state_reset(struct netfs_state *st)
-{
- netfs_state_lock_send(st);
- netfs_state_exit(st);
- netfs_state_init(st);
- netfs_state_unlock_send(st);
-}
-
-/*
- * Main receiving function, called from dedicated kernel thread.
- */
-static int pohmelfs_recv(void *data)
-{
- int err = -EINTR;
- struct netfs_state *st = data;
- struct netfs_cmd *cmd = &st->cmd;
-
- while (!kthread_should_stop()) {
- /*
- * If socket will be reset after this statement, then
- * pohmelfs_data_recv() will just fail and loop will
- * start again, so it can be done without any locks.
- *
- * st->read_socket is needed to prevents state machine
- * breaking between this data reading and subsequent one
- * in protocol specific functions during connection reset.
- * In case of reset we have to read next command and do
- * not expect data for old command to magically appear in
- * new connection.
- */
- st->read_socket = st->socket;
- err = pohmelfs_data_recv(st, cmd, sizeof(struct netfs_cmd));
- if (err) {
- msleep(1000);
- continue;
- }
-
- netfs_convert_cmd(cmd);
-
- dprintk("%s: cmd: %u, id: %llu, start: %llu, size: %u, "
- "ext: %u, csize: %u, cpad: %u.\n",
- __func__, cmd->cmd, cmd->id, cmd->start,
- cmd->size, cmd->ext, cmd->csize, cmd->cpad);
-
- if (cmd->csize) {
- struct pohmelfs_crypto_engine *e = &st->eng;
-
- if (unlikely(cmd->csize > e->size/2)) {
- netfs_state_reset(st);
- continue;
- }
-
- if (e->hash && unlikely(cmd->csize != st->psb->crypto_attached_size)) {
- dprintk("%s: cmd: cmd: %u, id: %llu, start: %llu, size: %u, "
- "csize: %u != digest size %u.\n",
- __func__, cmd->cmd, cmd->id, cmd->start, cmd->size,
- cmd->csize, st->psb->crypto_attached_size);
- netfs_state_reset(st);
- continue;
- }
-
- err = pohmelfs_data_recv(st, e->data, cmd->csize);
- if (err) {
- netfs_state_reset(st);
- continue;
- }
-
-#ifdef CONFIG_POHMELFS_DEBUG
- {
- unsigned int i;
- unsigned char *hash = e->data;
-
- dprintk("%s: received hash: ", __func__);
- for (i = 0; i < cmd->csize; ++i)
- printk("%02x ", hash[i]);
-
- printk("\n");
- }
-#endif
- cmd->size -= cmd->csize;
- }
-
- /*
- * This should catch protocol breakage and random garbage instead of commands.
- */
- if (unlikely((cmd->size > st->size) && (cmd->cmd != NETFS_XATTR_GET))) {
- netfs_state_reset(st);
- continue;
- }
-
- switch (cmd->cmd) {
- case NETFS_READ_PAGE:
- err = pohmelfs_read_page_response(st);
- break;
- case NETFS_READDIR:
- err = pohmelfs_readdir_response(st);
- break;
- case NETFS_LOOKUP:
- err = pohmelfs_lookup_response(st);
- break;
- case NETFS_CREATE:
- err = pohmelfs_create_response(st);
- break;
- case NETFS_REMOVE:
- err = pohmelfs_remove_response(st);
- break;
- case NETFS_TRANS:
- err = pohmelfs_transaction_response(st);
- break;
- case NETFS_PAGE_CACHE:
- err = pohmelfs_page_cache_response(st);
- break;
- case NETFS_CAPABILITIES:
- err = pohmelfs_capabilities_response(st);
- break;
- case NETFS_LOCK:
- err = pohmelfs_data_lock_response(st);
- break;
- case NETFS_XATTR_GET:
- err = pohmelfs_getxattr_response(st);
- break;
- default:
- printk("%s: wrong cmd: %u, id: %llu, start: %llu, size: %u, ext: %u.\n",
- __func__, cmd->cmd, cmd->id, cmd->start, cmd->size, cmd->ext);
- netfs_state_reset(st);
- break;
- }
- }
-
- while (!kthread_should_stop())
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
-
- return err;
-}
-
-int netfs_state_init(struct netfs_state *st)
-{
- int err;
- struct pohmelfs_ctl *ctl = &st->ctl;
-
- err = sock_create(ctl->addr.sa_family, ctl->type, ctl->proto, &st->socket);
- if (err) {
- printk("%s: failed to create a socket: family: %d, type: %d, proto: %d, err: %d.\n",
- __func__, ctl->addr.sa_family, ctl->type, ctl->proto, err);
- goto err_out_exit;
- }
-
- st->socket->sk->sk_allocation = GFP_NOIO;
- st->socket->sk->sk_sndtimeo = st->socket->sk->sk_rcvtimeo = msecs_to_jiffies(60000);
-
- err = kernel_connect(st->socket, (struct sockaddr *)&ctl->addr, ctl->addrlen, 0);
- if (err) {
- printk("%s: failed to connect to server: idx: %u, err: %d.\n",
- __func__, st->psb->idx, err);
- goto err_out_release;
- }
- st->socket->sk->sk_sndtimeo = st->socket->sk->sk_rcvtimeo = msecs_to_jiffies(60000);
-
- err = netfs_poll_init(st);
- if (err)
- goto err_out_release;
-
- if (st->socket->ops->family == AF_INET) {
- struct sockaddr_in *sin = (struct sockaddr_in *)&ctl->addr;
- printk(KERN_INFO "%s: (re)connected to peer %pi4:%d.\n", __func__,
- &sin->sin_addr.s_addr, ntohs(sin->sin_port));
- } else if (st->socket->ops->family == AF_INET6) {
- struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&ctl->addr;
- printk(KERN_INFO "%s: (re)connected to peer %pi6:%d", __func__,
- &sin->sin6_addr, ntohs(sin->sin6_port));
- }
-
- return 0;
-
-err_out_release:
- sock_release(st->socket);
-err_out_exit:
- st->socket = NULL;
- return err;
-}
-
-void netfs_state_exit(struct netfs_state *st)
-{
- if (st->socket) {
- netfs_poll_exit(st);
- st->socket->ops->shutdown(st->socket, 2);
-
- if (st->socket->ops->family == AF_INET) {
- struct sockaddr_in *sin = (struct sockaddr_in *)&st->ctl.addr;
- printk(KERN_INFO "%s: disconnected from peer %pi4:%d.\n", __func__,
- &sin->sin_addr.s_addr, ntohs(sin->sin_port));
- } else if (st->socket->ops->family == AF_INET6) {
- struct sockaddr_in6 *sin = (struct sockaddr_in6 *)&st->ctl.addr;
- printk(KERN_INFO "%s: disconnected from peer %pi6:%d", __func__,
- &sin->sin6_addr, ntohs(sin->sin6_port));
- }
-
- sock_release(st->socket);
- st->socket = NULL;
- st->read_socket = NULL;
- st->need_reset = 0;
- }
-}
-
-int pohmelfs_state_init_one(struct pohmelfs_sb *psb, struct pohmelfs_config *conf)
-{
- struct netfs_state *st = &conf->state;
- int err = -ENOMEM;
-
- mutex_init(&st->__state_lock);
- mutex_init(&st->__state_send_lock);
- init_waitqueue_head(&st->thread_wait);
-
- st->psb = psb;
- st->trans_root = RB_ROOT;
- mutex_init(&st->trans_lock);
-
- st->size = psb->trans_data_size;
- st->data = kmalloc(st->size, GFP_KERNEL);
- if (!st->data)
- goto err_out_exit;
-
- if (psb->perform_crypto) {
- err = pohmelfs_crypto_engine_init(&st->eng, psb);
- if (err)
- goto err_out_free_data;
- }
-
- err = netfs_state_init(st);
- if (err)
- goto err_out_free_engine;
-
- st->thread = kthread_run(pohmelfs_recv, st, "pohmelfs/%u", psb->idx);
- if (IS_ERR(st->thread)) {
- err = PTR_ERR(st->thread);
- goto err_out_netfs_exit;
- }
-
- if (!psb->active_state)
- psb->active_state = conf;
-
- dprintk("%s: conf: %p, st: %p, socket: %p.\n",
- __func__, conf, st, st->socket);
- return 0;
-
-err_out_netfs_exit:
- netfs_state_exit(st);
-err_out_free_engine:
- pohmelfs_crypto_engine_exit(&st->eng);
-err_out_free_data:
- kfree(st->data);
-err_out_exit:
- return err;
-
-}
-
-void pohmelfs_state_flush_transactions(struct netfs_state *st)
-{
- struct rb_node *rb_node;
- struct netfs_trans_dst *dst;
-
- mutex_lock(&st->trans_lock);
- for (rb_node = rb_first(&st->trans_root); rb_node; ) {
- dst = rb_entry(rb_node, struct netfs_trans_dst, state_entry);
- rb_node = rb_next(rb_node);
-
- dst->trans->result = -EINVAL;
- netfs_trans_remove_nolock(dst, st);
- netfs_trans_drop_dst_nostate(dst);
- }
- mutex_unlock(&st->trans_lock);
-}
-
-static void pohmelfs_state_exit_one(struct pohmelfs_config *c)
-{
- struct netfs_state *st = &c->state;
-
- dprintk("%s: exiting, st: %p.\n", __func__, st);
- if (st->thread) {
- kthread_stop(st->thread);
- st->thread = NULL;
- }
-
- netfs_state_lock_send(st);
- netfs_state_exit(st);
- netfs_state_unlock_send(st);
-
- pohmelfs_state_flush_transactions(st);
-
- pohmelfs_crypto_engine_exit(&st->eng);
- kfree(st->data);
-
- kfree(c);
-}
-
-/*
- * Initialize network stack. It searches for given ID in global
- * configuration table, this contains information of the remote server
- * (address (any supported by socket interface) and port, protocol and so on).
- */
-int pohmelfs_state_init(struct pohmelfs_sb *psb)
-{
- int err = -ENOMEM;
-
- err = pohmelfs_copy_config(psb);
- if (err) {
- pohmelfs_state_exit(psb);
- return err;
- }
-
- return 0;
-}
-
-void pohmelfs_state_exit(struct pohmelfs_sb *psb)
-{
- struct pohmelfs_config *c, *tmp;
-
- list_for_each_entry_safe(c, tmp, &psb->state_list, config_entry) {
- list_del(&c->config_entry);
- pohmelfs_state_exit_one(c);
- }
-}
-
-void pohmelfs_switch_active(struct pohmelfs_sb *psb)
-{
- struct pohmelfs_config *c = psb->active_state;
-
- if (!list_empty(&psb->state_list)) {
- if (c->config_entry.next != &psb->state_list) {
- psb->active_state = list_entry(c->config_entry.next,
- struct pohmelfs_config, config_entry);
- } else {
- psb->active_state = list_entry(psb->state_list.next,
- struct pohmelfs_config, config_entry);
- }
-
- dprintk("%s: empty: %d, active %p -> %p.\n",
- __func__, list_empty(&psb->state_list), c,
- psb->active_state);
- } else
- psb->active_state = NULL;
-}
-
-void pohmelfs_check_states(struct pohmelfs_sb *psb)
-{
- struct pohmelfs_config *c, *tmp;
- LIST_HEAD(delete_list);
-
- mutex_lock(&psb->state_lock);
- list_for_each_entry_safe(c, tmp, &psb->state_list, config_entry) {
- if (pohmelfs_config_check(c, psb->idx)) {
-
- if (psb->active_state == c)
- pohmelfs_switch_active(psb);
- list_move(&c->config_entry, &delete_list);
- }
- }
- pohmelfs_copy_config(psb);
- mutex_unlock(&psb->state_lock);
-
- list_for_each_entry_safe(c, tmp, &delete_list, config_entry) {
- list_del(&c->config_entry);
- pohmelfs_state_exit_one(c);
- }
-}
diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
deleted file mode 100644
index 985b6b7..0000000
--- a/drivers/staging/pohmelfs/netfs.h
+++ /dev/null
@@ -1,919 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __NETFS_H
-#define __NETFS_H
-
-#include <linux/types.h>
-#include <linux/connector.h>
-#include <linux/backing-dev.h>
-
-#define POHMELFS_CN_IDX 5
-#define POHMELFS_CN_VAL 0
-
-#define POHMELFS_CTLINFO_ACK 1
-#define POHMELFS_NOINFO_ACK 2
-
-#define POHMELFS_NULL_IDX 65535
-
-/*
- * Network command structure.
- * Will be extended.
- */
-struct netfs_cmd {
- __u16 cmd; /* Command number */
- __u16 csize; /* Attached crypto information size */
- __u16 cpad; /* Attached padding size */
- __u16 ext; /* External flags */
- __u32 size; /* Size of the attached data */
- __u32 trans; /* Transaction id */
- __u64 id; /* Object ID to operate on. Used for feedback.*/
- __u64 start; /* Start of the object. */
- __u64 iv; /* IV sequence */
- __u8 data[0];
-};
-
-static inline void netfs_convert_cmd(struct netfs_cmd *cmd)
-{
- cmd->id = __be64_to_cpu(cmd->id);
- cmd->start = __be64_to_cpu(cmd->start);
- cmd->iv = __be64_to_cpu(cmd->iv);
- cmd->cmd = __be16_to_cpu(cmd->cmd);
- cmd->ext = __be16_to_cpu(cmd->ext);
- cmd->csize = __be16_to_cpu(cmd->csize);
- cmd->cpad = __be16_to_cpu(cmd->cpad);
- cmd->size = __be32_to_cpu(cmd->size);
-}
-
-#define NETFS_TRANS_SINGLE_DST (1<<0)
-
-enum {
- NETFS_READDIR = 1, /* Read directory for given inode number */
- NETFS_READ_PAGE, /* Read data page from the server */
- NETFS_WRITE_PAGE, /* Write data page to the server */
- NETFS_CREATE, /* Create directory entry */
- NETFS_REMOVE, /* Remove directory entry */
-
- NETFS_LOOKUP, /* Lookup single object */
- NETFS_LINK, /* Create a link */
- NETFS_TRANS, /* Transaction */
- NETFS_OPEN, /* Open intent */
- NETFS_INODE_INFO, /* Metadata cache coherency synchronization message */
-
- NETFS_PAGE_CACHE, /* Page cache invalidation message */
- NETFS_READ_PAGES, /* Read multiple contiguous pages in one go */
- NETFS_RENAME, /* Rename object */
- NETFS_CAPABILITIES, /* Capabilities of the client, for example supported crypto */
- NETFS_LOCK, /* Distributed lock message */
-
- NETFS_XATTR_SET, /* Set extended attribute */
- NETFS_XATTR_GET, /* Get extended attribute */
- NETFS_CMD_MAX
-};
-
-enum {
- POHMELFS_FLAGS_ADD = 0, /* Network state control message for ADD */
- POHMELFS_FLAGS_DEL, /* Network state control message for DEL */
- POHMELFS_FLAGS_SHOW, /* Network state control message for SHOW */
- POHMELFS_FLAGS_CRYPTO, /* Crypto data control message */
- POHMELFS_FLAGS_MODIFY, /* Network state modification message */
- POHMELFS_FLAGS_DUMP, /* Network state control message for SHOW ALL */
- POHMELFS_FLAGS_FLUSH, /* Network state control message for FLUSH */
-};
-
-/*
- * Always wanted to copy it from socket headers into public one,
- * since they are __KERNEL__ protected there.
- */
-#define _K_SS_MAXSIZE 128
-
-struct saddr {
- unsigned short sa_family;
- char addr[_K_SS_MAXSIZE];
-};
-
-enum {
- POHMELFS_CRYPTO_HASH = 0,
- POHMELFS_CRYPTO_CIPHER,
-};
-
-struct pohmelfs_crypto {
- unsigned int idx; /* Config index */
- unsigned short strlen; /* Size of the attached crypto string including 0-byte
- * "cbc(aes)" for example */
- unsigned short type; /* HMAC, cipher, both */
- unsigned int keysize; /* Key size */
- unsigned char data[0]; /* Algorithm string, key and IV */
-};
-
-#define POHMELFS_IO_PERM_READ (1<<0)
-#define POHMELFS_IO_PERM_WRITE (1<<1)
-
-/*
- * Configuration command used to create table of different remote servers.
- */
-struct pohmelfs_ctl {
- __u32 idx; /* Config index */
- __u32 type; /* Socket type */
- __u32 proto; /* Socket protocol */
- __u16 addrlen; /* Size of the address */
- __u16 perm; /* IO permission */
- __u16 prio; /* IO priority */
- struct saddr addr; /* Remote server address */
-};
-
-/*
- * Ack for userspace about requested command.
- */
-struct pohmelfs_cn_ack {
- struct cn_msg msg;
- int error;
- int msg_num;
- int unused[3];
- struct pohmelfs_ctl ctl;
-};
-
-/*
- * Inode info structure used to sync with server.
- * Check what stat() returns.
- */
-struct netfs_inode_info {
- unsigned int mode;
- unsigned int nlink;
- unsigned int uid;
- unsigned int gid;
- unsigned int blocksize;
- unsigned int padding;
- __u64 ino;
- __u64 blocks;
- __u64 rdev;
- __u64 size;
- __u64 version;
-};
-
-static inline void netfs_convert_inode_info(struct netfs_inode_info *info)
-{
- info->mode = __cpu_to_be32(info->mode);
- info->nlink = __cpu_to_be32(info->nlink);
- info->uid = __cpu_to_be32(info->uid);
- info->gid = __cpu_to_be32(info->gid);
- info->blocksize = __cpu_to_be32(info->blocksize);
- info->blocks = __cpu_to_be64(info->blocks);
- info->rdev = __cpu_to_be64(info->rdev);
- info->size = __cpu_to_be64(info->size);
- info->version = __cpu_to_be64(info->version);
- info->ino = __cpu_to_be64(info->ino);
-}
-
-/*
- * Cache state machine.
- */
-enum {
- NETFS_COMMAND_PENDING = 0, /* Command is being executed */
- NETFS_INODE_REMOTE_SYNCED, /* Inode was synced to server */
- NETFS_INODE_REMOTE_DIR_SYNCED, /* Inode (directory) was synced from the server */
- NETFS_INODE_OWNED, /* Inode is owned by given host */
- NETFS_INODE_NEED_FLUSH, /* Inode has to be flushed to the server */
-};
-
-/*
- * POHMELFS capabilities: information about supported
- * crypto operations (hash/cipher, modes, key sizes and so on),
- * root information (used/available size, number of objects, permissions)
- */
-enum pohmelfs_capabilities {
- POHMELFS_CRYPTO_CAPABILITIES = 0,
- POHMELFS_ROOT_CAPABILITIES,
-};
-
-/* Read-only mount */
-#define POHMELFS_FLAGS_RO (1<<0)
-/* Extended attributes support on/off */
-#define POHMELFS_FLAGS_XATTR (1<<1)
-
-struct netfs_root_capabilities {
- __u64 nr_files;
- __u64 used, avail;
- __u64 flags;
-};
-
-static inline void netfs_convert_root_capabilities(struct netfs_root_capabilities *cap)
-{
- cap->nr_files = __cpu_to_be64(cap->nr_files);
- cap->used = __cpu_to_be64(cap->used);
- cap->avail = __cpu_to_be64(cap->avail);
- cap->flags = __cpu_to_be64(cap->flags);
-}
-
-struct netfs_crypto_capabilities {
- unsigned short hash_strlen; /* Hash string length, like "hmac(sha1) including 0 byte "*/
- unsigned short cipher_strlen; /* Cipher string length with the same format */
- unsigned int cipher_keysize; /* Cipher key size */
-};
-
-static inline void netfs_convert_crypto_capabilities(struct netfs_crypto_capabilities *cap)
-{
- cap->hash_strlen = __cpu_to_be16(cap->hash_strlen);
- cap->cipher_strlen = __cpu_to_be16(cap->cipher_strlen);
- cap->cipher_keysize = __cpu_to_be32(cap->cipher_keysize);
-}
-
-enum pohmelfs_lock_type {
- POHMELFS_LOCK_GRAB = (1<<15),
-
- POHMELFS_READ_LOCK = 0,
- POHMELFS_WRITE_LOCK,
-};
-
-struct netfs_lock {
- __u64 start;
- __u64 ino;
- __u32 size;
- __u32 type;
-};
-
-static inline void netfs_convert_lock(struct netfs_lock *lock)
-{
- lock->start = __cpu_to_be64(lock->start);
- lock->ino = __cpu_to_be64(lock->ino);
- lock->size = __cpu_to_be32(lock->size);
- lock->type = __cpu_to_be32(lock->type);
-}
-
-#ifdef __KERNEL__
-
-#include <linux/kernel.h>
-#include <linux/completion.h>
-#include <linux/rbtree.h>
-#include <linux/net.h>
-#include <linux/poll.h>
-
-/*
- * Private POHMELFS cache of objects in directory.
- */
-struct pohmelfs_name {
- struct rb_node hash_node;
-
- struct list_head sync_create_entry;
-
- u64 ino;
-
- u32 hash;
- u32 mode;
- u32 len;
-
- char *data;
-};
-
-/*
- * POHMELFS inode. Main object.
- */
-struct pohmelfs_inode {
- struct list_head inode_entry; /* Entry in superblock list.
- * Objects which are not bound to dentry require to be dropped
- * in ->put_super()
- */
- struct rb_root hash_root; /* The same, but indexed by name hash and len */
- struct mutex offset_lock; /* Protect both above trees */
-
- struct list_head sync_create_list; /* List of created but not yet synced to the server children */
-
- unsigned int drop_count;
-
- int lock_type; /* How this inode is locked: read or write */
-
- int error; /* Transaction error for given inode */
-
- long state; /* State machine above */
-
- u64 ino; /* Inode number */
- u64 total_len; /* Total length of all children names, used to create offsets */
-
- struct inode vfs_inode;
-};
-
-struct netfs_trans;
-typedef int (*netfs_trans_complete_t)(struct page **pages, unsigned int page_num,
- void *private, int err);
-
-struct netfs_state;
-struct pohmelfs_sb;
-
-struct netfs_trans {
- /*
- * Transaction header and attached contiguous data live here.
- */
- struct iovec iovec;
-
- /*
- * Pages attached to transaction.
- */
- struct page **pages;
-
- /*
- * List and protecting lock for transaction destination
- * network states.
- */
- spinlock_t dst_lock;
- struct list_head dst_list;
-
- /*
- * Number of users for given transaction.
- * For example each network state attached to transaction
- * via dst_list increases it.
- */
- atomic_t refcnt;
-
- /*
- * Number of pages attached to given transaction.
- * Some slots in above page array can be NULL, since
- * for example page can be under writeback already,
- * so we skip it in this transaction.
- */
- unsigned int page_num;
-
- /*
- * Transaction flags: single dst or broadcast and so on.
- */
- unsigned int flags;
-
- /*
- * Size of the data, which can be placed into
- * iovec.iov_base area.
- */
- unsigned int total_size;
-
- /*
- * Number of pages to be sent to remote server.
- * Usually equal to above page_num, but in case of partial
- * writeback it can accumulate only pages already completed
- * previous writeback.
- */
- unsigned int attached_pages;
-
- /*
- * Attached number of bytes in all above pages.
- */
- unsigned int attached_size;
-
- /*
- * Unique transacton generation number.
- * Used as identity in the network state tree of transactions.
- */
- unsigned int gen;
-
- /*
- * Transaction completion status.
- */
- int result;
-
- /*
- * Superblock this transaction belongs to
- */
- struct pohmelfs_sb *psb;
-
- /*
- * Crypto engine, which processed this transaction.
- * Can be not NULL only if crypto engine holds encrypted pages.
- */
- struct pohmelfs_crypto_engine *eng;
-
- /* Private data */
- void *private;
-
- /* Completion callback, invoked just before transaction is destroyed */
- netfs_trans_complete_t complete;
-};
-
-static inline int netfs_trans_cur_len(struct netfs_trans *t)
-{
- return (signed)(t->total_size - t->iovec.iov_len);
-}
-
-static inline void *netfs_trans_current(struct netfs_trans *t)
-{
- return t->iovec.iov_base + t->iovec.iov_len;
-}
-
-struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
- unsigned int flags, unsigned int nr);
-void netfs_trans_free(struct netfs_trans *t);
-int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb);
-int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb);
-
-static inline void netfs_trans_reset(struct netfs_trans *t)
-{
- t->complete = NULL;
-}
-
-struct netfs_trans_dst {
- struct list_head trans_entry;
- struct rb_node state_entry;
-
- unsigned long send_time;
-
- /*
- * Times this transaction was resent to its old or new,
- * depending on flags, destinations. When it reaches maximum
- * allowed number, specified in superblock->trans_retries,
- * transaction will be freed with ETIMEDOUT error.
- */
- unsigned int retries;
-
- struct netfs_trans *trans;
- struct netfs_state *state;
-};
-
-struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen);
-void netfs_trans_drop_dst(struct netfs_trans_dst *dst);
-void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst);
-void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st);
-void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st);
-int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb);
-int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st);
-
-int netfs_trans_init(void);
-void netfs_trans_exit(void);
-
-struct pohmelfs_crypto_engine {
- u64 iv; /* Crypto IV for current operation */
- unsigned long timeout; /* Crypto waiting timeout */
- unsigned int size; /* Size of crypto scratchpad */
- void *data; /* Temporal crypto scratchpad */
- /*
- * Crypto operations performed on objects.
- */
- struct crypto_hash *hash;
- struct crypto_ablkcipher *cipher;
-
- struct pohmelfs_crypto_thread *thread; /* Crypto thread which hosts this engine */
-
- struct page **pages;
- unsigned int page_num;
-};
-
-struct pohmelfs_crypto_thread {
- struct list_head thread_entry;
-
- struct task_struct *thread;
- struct pohmelfs_sb *psb;
-
- struct pohmelfs_crypto_engine eng;
-
- struct netfs_trans *trans;
-
- wait_queue_head_t wait;
- int error;
-
- unsigned int size;
- struct page *page;
-};
-
-void pohmelfs_crypto_thread_make_ready(struct pohmelfs_crypto_thread *th);
-
-/*
- * Network state, attached to one server.
- */
-struct netfs_state {
- struct mutex __state_lock; /* Can not allow to use the same socket simultaneously */
- struct mutex __state_send_lock;
- struct netfs_cmd cmd; /* Cached command */
- struct netfs_inode_info info; /* Cached inode info */
-
- void *data; /* Cached some data */
- unsigned int size; /* Size of that data */
-
- struct pohmelfs_sb *psb; /* Superblock */
-
- struct task_struct *thread; /* Async receiving thread */
-
- /* Waiting/polling machinery */
- wait_queue_t wait;
- wait_queue_head_t *whead;
- wait_queue_head_t thread_wait;
-
- struct mutex trans_lock;
- struct rb_root trans_root;
-
- struct pohmelfs_ctl ctl; /* Remote peer */
-
- struct socket *socket; /* Socket object */
- struct socket *read_socket; /* Cached pointer to socket object.
- * Used to determine if between lock drops socket was changed.
- * Never used to read data or any kind of access.
- */
- /*
- * Crypto engines to process incoming data.
- */
- struct pohmelfs_crypto_engine eng;
-
- int need_reset;
-};
-
-int netfs_state_init(struct netfs_state *st);
-void netfs_state_exit(struct netfs_state *st);
-
-static inline void netfs_state_lock_send(struct netfs_state *st)
-{
- mutex_lock(&st->__state_send_lock);
-}
-
-static inline int netfs_state_trylock_send(struct netfs_state *st)
-{
- return mutex_trylock(&st->__state_send_lock);
-}
-
-static inline void netfs_state_unlock_send(struct netfs_state *st)
-{
- BUG_ON(!mutex_is_locked(&st->__state_send_lock));
-
- mutex_unlock(&st->__state_send_lock);
-}
-
-static inline void netfs_state_lock(struct netfs_state *st)
-{
- mutex_lock(&st->__state_lock);
-}
-
-static inline void netfs_state_unlock(struct netfs_state *st)
-{
- BUG_ON(!mutex_is_locked(&st->__state_lock));
-
- mutex_unlock(&st->__state_lock);
-}
-
-static inline unsigned int netfs_state_poll(struct netfs_state *st)
-{
- unsigned int revents = POLLHUP | POLLERR;
-
- netfs_state_lock(st);
- if (st->socket)
- revents = st->socket->ops->poll(NULL, st->socket, NULL);
- netfs_state_unlock(st);
-
- return revents;
-}
-
-struct pohmelfs_config;
-
-struct pohmelfs_sb {
- struct rb_root mcache_root;
- struct mutex mcache_lock;
- atomic_long_t mcache_gen;
- unsigned long mcache_timeout;
-
- unsigned int idx;
-
- unsigned int trans_retries;
-
- atomic_t trans_gen;
-
- unsigned int crypto_attached_size;
- unsigned int crypto_align_size;
-
- unsigned int crypto_fail_unsupported;
-
- unsigned int crypto_thread_num;
- struct list_head crypto_active_list, crypto_ready_list;
- struct mutex crypto_thread_lock;
-
- unsigned int trans_max_pages;
- unsigned long trans_data_size;
- unsigned long trans_timeout;
-
- unsigned long drop_scan_timeout;
- unsigned long trans_scan_timeout;
-
- unsigned long wait_on_page_timeout;
-
- struct list_head flush_list;
- struct list_head drop_list;
- spinlock_t ino_lock;
- u64 ino;
-
- /*
- * Remote nodes POHMELFS connected to.
- */
- struct list_head state_list;
- struct mutex state_lock;
-
- /*
- * Currently active state to request data from.
- */
- struct pohmelfs_config *active_state;
-
-
- wait_queue_head_t wait;
-
- /*
- * Timed checks: stale transactions, inodes to be freed and so on.
- */
- struct delayed_work dwork;
- struct delayed_work drop_dwork;
-
- struct super_block *sb;
-
- struct backing_dev_info bdi;
-
- /*
- * Algorithm strings.
- */
- char *hash_string;
- char *cipher_string;
-
- u8 *hash_key;
- u8 *cipher_key;
-
- /*
- * Algorithm string lengths.
- */
- unsigned int hash_strlen;
- unsigned int cipher_strlen;
- unsigned int hash_keysize;
- unsigned int cipher_keysize;
-
- /*
- * Controls whether to perfrom crypto processing or not.
- */
- int perform_crypto;
-
- /*
- * POHMELFS statistics.
- */
- u64 total_size;
- u64 avail_size;
- atomic_long_t total_inodes;
-
- /*
- * Xattr support, read-only and so on.
- */
- u64 state_flags;
-
- /*
- * Temporary storage to detect changes in the wait queue.
- */
- long flags;
-};
-
-static inline void netfs_trans_update(struct netfs_cmd *cmd,
- struct netfs_trans *t, unsigned int size)
-{
- unsigned int sz = ALIGN(size, t->psb->crypto_align_size);
-
- t->iovec.iov_len += sizeof(struct netfs_cmd) + sz;
- cmd->cpad = __cpu_to_be16(sz - size);
-}
-
-static inline struct pohmelfs_sb *POHMELFS_SB(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-static inline struct pohmelfs_inode *POHMELFS_I(struct inode *inode)
-{
- return container_of(inode, struct pohmelfs_inode, vfs_inode);
-}
-
-static inline u64 pohmelfs_new_ino(struct pohmelfs_sb *psb)
-{
- u64 ino;
-
- spin_lock(&psb->ino_lock);
- ino = psb->ino++;
- spin_unlock(&psb->ino_lock);
-
- return ino;
-}
-
-static inline void pohmelfs_put_inode(struct pohmelfs_inode *pi)
-{
- struct pohmelfs_sb *psb = POHMELFS_SB(pi->vfs_inode.i_sb);
-
- spin_lock(&psb->ino_lock);
- list_move_tail(&pi->inode_entry, &psb->drop_list);
- pi->drop_count++;
- spin_unlock(&psb->ino_lock);
-}
-
-struct pohmelfs_config {
- struct list_head config_entry;
-
- struct netfs_state state;
-};
-
-struct pohmelfs_config_group {
- /*
- * Entry in the global config group list.
- */
- struct list_head group_entry;
-
- /*
- * Index of the current group.
- */
- unsigned int idx;
- /*
- * Number of config_list entries in this group entry.
- */
- unsigned int num_entry;
- /*
- * Algorithm strings.
- */
- char *hash_string;
- char *cipher_string;
-
- /*
- * Algorithm string lengths.
- */
- unsigned int hash_strlen;
- unsigned int cipher_strlen;
-
- /*
- * Key and its size.
- */
- unsigned int hash_keysize;
- unsigned int cipher_keysize;
- u8 *hash_key;
- u8 *cipher_key;
-
- /*
- * List of config entries (network state info) for given idx.
- */
- struct list_head config_list;
-};
-
-int __init pohmelfs_config_init(void);
-void pohmelfs_config_exit(void);
-int pohmelfs_copy_config(struct pohmelfs_sb *psb);
-int pohmelfs_copy_crypto(struct pohmelfs_sb *psb);
-int pohmelfs_config_check(struct pohmelfs_config *config, int idx);
-int pohmelfs_state_init_one(struct pohmelfs_sb *psb, struct pohmelfs_config *conf);
-
-extern const struct file_operations pohmelfs_dir_fops;
-extern const struct inode_operations pohmelfs_dir_inode_ops;
-
-int pohmelfs_state_init(struct pohmelfs_sb *psb);
-void pohmelfs_state_exit(struct pohmelfs_sb *psb);
-void pohmelfs_state_flush_transactions(struct netfs_state *st);
-
-void pohmelfs_fill_inode(struct inode *inode, struct netfs_inode_info *info);
-
-void pohmelfs_name_del(struct pohmelfs_inode *parent, struct pohmelfs_name *n);
-void pohmelfs_free_names(struct pohmelfs_inode *parent);
-struct pohmelfs_name *pohmelfs_search_hash(struct pohmelfs_inode *pi, u32 hash);
-
-void pohmelfs_inode_del_inode(struct pohmelfs_sb *psb, struct pohmelfs_inode *pi);
-
-struct pohmelfs_inode *pohmelfs_create_entry_local(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str, u64 start, int mode);
-
-int pohmelfs_write_create_inode(struct pohmelfs_inode *pi);
-
-int pohmelfs_write_inode_create(struct inode *inode, struct netfs_trans *trans);
-int pohmelfs_remove_child(struct pohmelfs_inode *parent, struct pohmelfs_name *n);
-
-struct pohmelfs_inode *pohmelfs_new_inode(struct pohmelfs_sb *psb,
- struct pohmelfs_inode *parent, struct qstr *str,
- struct netfs_inode_info *info, int link);
-
-int pohmelfs_setattr(struct dentry *dentry, struct iattr *attr);
-int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr);
-
-int pohmelfs_meta_command(struct pohmelfs_inode *pi, unsigned int cmd_op, unsigned int flags,
- netfs_trans_complete_t complete, void *priv, u64 start);
-int pohmelfs_meta_command_data(struct pohmelfs_inode *pi, u64 id, unsigned int cmd_op, char *addon,
- unsigned int flags, netfs_trans_complete_t complete, void *priv, u64 start);
-
-void pohmelfs_check_states(struct pohmelfs_sb *psb);
-void pohmelfs_switch_active(struct pohmelfs_sb *psb);
-
-int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int len);
-int pohmelfs_path_length(struct pohmelfs_inode *pi);
-
-struct pohmelfs_crypto_completion {
- struct completion complete;
- int error;
-};
-
-int pohmelfs_trans_crypt(struct netfs_trans *t, struct pohmelfs_sb *psb);
-void pohmelfs_crypto_exit(struct pohmelfs_sb *psb);
-int pohmelfs_crypto_init(struct pohmelfs_sb *psb);
-
-int pohmelfs_crypto_engine_init(struct pohmelfs_crypto_engine *e, struct pohmelfs_sb *psb);
-void pohmelfs_crypto_engine_exit(struct pohmelfs_crypto_engine *e);
-
-int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 iv,
- void *data, struct page *page, unsigned int size);
-int pohmelfs_crypto_process_input_page(struct pohmelfs_crypto_engine *e,
- struct page *page, unsigned int size, u64 iv);
-
-static inline u64 pohmelfs_gen_iv(struct netfs_trans *t)
-{
- u64 iv = t->gen;
-
- iv <<= 32;
- iv |= ((unsigned long)t) & 0xffffffff;
-
- return iv;
-}
-
-int pohmelfs_data_lock(struct pohmelfs_inode *pi, u64 start, u32 size, int type);
-int pohmelfs_data_unlock(struct pohmelfs_inode *pi, u64 start, u32 size, int type);
-int pohmelfs_data_lock_response(struct netfs_state *st);
-
-static inline int pohmelfs_need_lock(struct pohmelfs_inode *pi, int type)
-{
- if (test_bit(NETFS_INODE_OWNED, &pi->state)) {
- if (type == pi->lock_type)
- return 0;
- if ((type == POHMELFS_READ_LOCK) && (pi->lock_type == POHMELFS_WRITE_LOCK))
- return 0;
- }
-
- if (!test_bit(NETFS_INODE_REMOTE_SYNCED, &pi->state))
- return 0;
-
- return 1;
-}
-
-int __init pohmelfs_mcache_init(void);
-void pohmelfs_mcache_exit(void);
-
-/* #define CONFIG_POHMELFS_DEBUG */
-
-#ifdef CONFIG_POHMELFS_DEBUG
-#define dprintka(f, a...) printk(f, ##a)
-#define dprintk(f, a...) printk("%d: " f, task_pid_vnr(current), ##a)
-#else
-#define dprintka(f, a...) do {} while (0)
-#define dprintk(f, a...) do {} while (0)
-#endif
-
-static inline void netfs_trans_get(struct netfs_trans *t)
-{
- atomic_inc(&t->refcnt);
-}
-
-static inline void netfs_trans_put(struct netfs_trans *t)
-{
- if (atomic_dec_and_test(&t->refcnt)) {
- dprintk("%s: t: %p, gen: %u, err: %d.\n",
- __func__, t, t->gen, t->result);
- if (t->complete)
- t->complete(t->pages, t->page_num,
- t->private, t->result);
- netfs_trans_free(t);
- }
-}
-
-struct pohmelfs_mcache {
- struct rb_node mcache_entry;
- struct completion complete;
-
- atomic_t refcnt;
-
- u64 gen;
-
- void *data;
- u64 start;
- u32 size;
- int err;
-
- struct netfs_inode_info info;
-};
-
-struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start,
- unsigned int size, void *data);
-void pohmelfs_mcache_free(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m);
-struct pohmelfs_mcache *pohmelfs_mcache_search(struct pohmelfs_sb *psb, u64 gen);
-void pohmelfs_mcache_remove_locked(struct pohmelfs_sb *psb, struct pohmelfs_mcache *m);
-
-static inline void pohmelfs_mcache_get(struct pohmelfs_mcache *m)
-{
- atomic_inc(&m->refcnt);
-}
-
-static inline void pohmelfs_mcache_put(struct pohmelfs_sb *psb,
- struct pohmelfs_mcache *m)
-{
- if (atomic_dec_and_test(&m->refcnt))
- pohmelfs_mcache_free(psb, m);
-}
-
-/*#define POHMELFS_TRUNCATE_ON_INODE_FLUSH
- */
-
-#endif /* __KERNEL__*/
-
-#endif /* __NETFS_H */
diff --git a/drivers/staging/pohmelfs/path_entry.c b/drivers/staging/pohmelfs/path_entry.c
deleted file mode 100644
index 400a9fc..0000000
--- a/drivers/staging/pohmelfs/path_entry.c
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/fs.h>
-#include <linux/ktime.h>
-#include <linux/fs_struct.h>
-#include <linux/pagemap.h>
-#include <linux/writeback.h>
-#include <linux/mount.h>
-#include <linux/mm.h>
-
-#include "netfs.h"
-
-#define UNHASHED_OBSCURE_STRING_SIZE sizeof(" (deleted)")
-
-/*
- * Create path from root for given inode.
- * Path is formed as set of stuctures, containing name of the object
- * and its inode data (mode, permissions and so on).
- */
-int pohmelfs_construct_path_string(struct pohmelfs_inode *pi, void *data, int len)
-{
- struct path path;
- struct dentry *d;
- char *ptr;
- int err = 0, strlen, reduce = 0;
-
- d = d_find_alias(&pi->vfs_inode);
- if (!d) {
- printk("%s: no alias, list_empty: %d.\n", __func__, list_empty(&pi->vfs_inode.i_dentry));
- return -ENOENT;
- }
-
- spin_lock(&current->fs->lock);
- path.mnt = mntget(current->fs->root.mnt);
- spin_unlock(&current->fs->lock);
-
- path.dentry = d;
-
- if (!IS_ROOT(d) && d_unhashed(d))
- reduce = 1;
-
- ptr = d_path(&path, data, len);
- if (IS_ERR(ptr)) {
- err = PTR_ERR(ptr);
- goto out;
- }
-
- if (reduce && len >= UNHASHED_OBSCURE_STRING_SIZE) {
- char *end = data + len - UNHASHED_OBSCURE_STRING_SIZE;
- *end = '\0';
- }
-
- strlen = len - (ptr - (char *)data);
- memmove(data, ptr, strlen);
- ptr = data;
-
- err = strlen;
-
- dprintk("%s: dname: '%s', len: %u, maxlen: %u, name: '%s', strlen: %d.\n",
- __func__, d->d_name.name, d->d_name.len, len, ptr, strlen);
-
-out:
- dput(d);
- mntput(path.mnt);
-
- return err;
-}
-
-int pohmelfs_path_length(struct pohmelfs_inode *pi)
-{
- struct dentry *d, *root, *first;
- int len;
- unsigned seq;
-
- first = d_find_alias(&pi->vfs_inode);
- if (!first) {
- dprintk("%s: ino: %llu, mode: %o.\n", __func__, pi->ino, pi->vfs_inode.i_mode);
- return -ENOENT;
- }
-
- spin_lock(&current->fs->lock);
- root = dget(current->fs->root.dentry);
- spin_unlock(&current->fs->lock);
-
-rename_retry:
- len = 1; /* Root slash */
- d = first;
- seq = read_seqbegin(&rename_lock);
- rcu_read_lock();
-
- if (!IS_ROOT(d) && d_unhashed(d))
- len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */
-
- while (d && d != root && !IS_ROOT(d)) {
- len += d->d_name.len + 1; /* Plus slash */
- d = d->d_parent;
- }
- rcu_read_unlock();
- if (read_seqretry(&rename_lock, seq))
- goto rename_retry;
-
- dput(root);
- dput(first);
-
- return len + 1; /* Including zero-byte */
-}
diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
deleted file mode 100644
index 06c1a74..0000000
--- a/drivers/staging/pohmelfs/trans.c
+++ /dev/null
@@ -1,706 +0,0 @@
-/*
- * 2007+ Copyright (c) Evgeniy Polyakov <zbr@ioremap.net>
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/crypto.h>
-#include <linux/fs.h>
-#include <linux/jhash.h>
-#include <linux/hash.h>
-#include <linux/ktime.h>
-#include <linux/mempool.h>
-#include <linux/mm.h>
-#include <linux/mount.h>
-#include <linux/pagemap.h>
-#include <linux/parser.h>
-#include <linux/poll.h>
-#include <linux/swap.h>
-#include <linux/slab.h>
-#include <linux/statfs.h>
-#include <linux/writeback.h>
-
-#include "netfs.h"
-
-static struct kmem_cache *netfs_trans_dst;
-static mempool_t *netfs_trans_dst_pool;
-
-static void netfs_trans_init_static(struct netfs_trans *t, int num, int size)
-{
- t->page_num = num;
- t->total_size = size;
- atomic_set(&t->refcnt, 1);
-
- spin_lock_init(&t->dst_lock);
- INIT_LIST_HEAD(&t->dst_list);
-}
-
-static int netfs_trans_send_pages(struct netfs_trans *t, struct netfs_state *st)
-{
- int err = 0;
- unsigned int i, attached_pages = t->attached_pages, ci;
- struct msghdr msg;
- struct page **pages = (t->eng) ? t->eng->pages : t->pages;
- struct page *p;
- unsigned int size;
-
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = MSG_WAITALL | MSG_MORE;
-
- ci = 0;
- for (i = 0; i < t->page_num; ++i) {
- struct page *page = pages[ci];
- struct netfs_cmd cmd;
- struct iovec io;
-
- p = t->pages[i];
-
- if (!p)
- continue;
-
- size = page_private(p);
-
- io.iov_base = &cmd;
- io.iov_len = sizeof(struct netfs_cmd);
-
- cmd.cmd = NETFS_WRITE_PAGE;
- cmd.ext = 0;
- cmd.id = 0;
- cmd.size = size;
- cmd.start = p->index;
- cmd.start <<= PAGE_CACHE_SHIFT;
- cmd.csize = 0;
- cmd.cpad = 0;
- cmd.iv = pohmelfs_gen_iv(t);
-
- netfs_convert_cmd(&cmd);
-
- msg.msg_iov = &io;
- msg.msg_iovlen = 1;
- msg.msg_flags = MSG_WAITALL | MSG_MORE;
-
- err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, sizeof(struct netfs_cmd));
- if (err <= 0) {
- printk("%s: %d/%d failed to send transaction header: t: %p, gen: %u, err: %d.\n",
- __func__, i, t->page_num, t, t->gen, err);
- if (err == 0)
- err = -ECONNRESET;
- goto err_out;
- }
-
- msg.msg_flags = MSG_WAITALL | (attached_pages == 1 ? 0 :
- MSG_MORE);
-
- err = kernel_sendpage(st->socket, page, 0, size, msg.msg_flags);
- if (err <= 0) {
- printk("%s: %d/%d failed to send transaction page: t: %p, gen: %u, size: %u, err: %d.\n",
- __func__, i, t->page_num, t, t->gen, size, err);
- if (err == 0)
- err = -ECONNRESET;
- goto err_out;
- }
-
- dprintk("%s: %d/%d sent t: %p, gen: %u, page: %p/%p, size: %u.\n",
- __func__, i, t->page_num, t, t->gen, page, p, size);
-
- err = 0;
- attached_pages--;
- if (!attached_pages)
- break;
- ci++;
-
- continue;
-
-err_out:
- printk("%s: t: %p, gen: %u, err: %d.\n", __func__, t, t->gen, err);
- netfs_state_exit(st);
- break;
- }
-
- return err;
-}
-
-int netfs_trans_send(struct netfs_trans *t, struct netfs_state *st)
-{
- int err;
- struct msghdr msg;
-
- BUG_ON(!t->iovec.iov_len);
- BUG_ON(t->iovec.iov_len > 1024*1024*1024);
-
- netfs_state_lock_send(st);
- if (!st->socket) {
- err = netfs_state_init(st);
- if (err)
- goto err_out_unlock_return;
- }
-
- msg.msg_iov = &t->iovec;
- msg.msg_iovlen = 1;
- msg.msg_name = NULL;
- msg.msg_namelen = 0;
- msg.msg_control = NULL;
- msg.msg_controllen = 0;
- msg.msg_flags = MSG_WAITALL;
-
- if (t->attached_pages)
- msg.msg_flags |= MSG_MORE;
-
- err = kernel_sendmsg(st->socket, &msg, (struct kvec *)msg.msg_iov, 1, t->iovec.iov_len);
- if (err <= 0) {
- printk("%s: failed to send contig transaction: t: %p, gen: %u, size: %zu, err: %d.\n",
- __func__, t, t->gen, t->iovec.iov_len, err);
- if (err == 0)
- err = -ECONNRESET;
- goto err_out_unlock_return;
- }
-
- dprintk("%s: sent %s transaction: t: %p, gen: %u, size: %zu, page_num: %u.\n",
- __func__, (t->page_num) ? "partial" : "full",
- t, t->gen, t->iovec.iov_len, t->page_num);
-
- err = 0;
- if (t->attached_pages)
- err = netfs_trans_send_pages(t, st);
-
-err_out_unlock_return:
-
- if (st->need_reset)
- netfs_state_exit(st);
-
- netfs_state_unlock_send(st);
-
- dprintk("%s: t: %p, gen: %u, err: %d.\n",
- __func__, t, t->gen, err);
-
- t->result = err;
- return err;
-}
-
-static inline int netfs_trans_cmp(unsigned int gen, unsigned int new)
-{
- if (gen < new)
- return 1;
- if (gen > new)
- return -1;
- return 0;
-}
-
-struct netfs_trans_dst *netfs_trans_search(struct netfs_state *st, unsigned int gen)
-{
- struct rb_root *root = &st->trans_root;
- struct rb_node *n = root->rb_node;
- struct netfs_trans_dst *tmp, *ret = NULL;
- struct netfs_trans *t;
- int cmp;
-
- while (n) {
- tmp = rb_entry(n, struct netfs_trans_dst, state_entry);
- t = tmp->trans;
-
- cmp = netfs_trans_cmp(t->gen, gen);
- if (cmp < 0)
- n = n->rb_left;
- else if (cmp > 0)
- n = n->rb_right;
- else {
- ret = tmp;
- break;
- }
- }
-
- return ret;
-}
-
-static int netfs_trans_insert(struct netfs_trans_dst *ndst, struct netfs_state *st)
-{
- struct rb_root *root = &st->trans_root;
- struct rb_node **n = &root->rb_node, *parent = NULL;
- struct netfs_trans_dst *ret = NULL, *tmp;
- struct netfs_trans *t = NULL, *new = ndst->trans;
- int cmp;
-
- while (*n) {
- parent = *n;
-
- tmp = rb_entry(parent, struct netfs_trans_dst, state_entry);
- t = tmp->trans;
-
- cmp = netfs_trans_cmp(t->gen, new->gen);
- if (cmp < 0)
- n = &parent->rb_left;
- else if (cmp > 0)
- n = &parent->rb_right;
- else {
- ret = tmp;
- break;
- }
- }
-
- if (ret) {
- printk("%s: exist: old: gen: %u, flags: %x, send_time: %lu, "
- "new: gen: %u, flags: %x, send_time: %lu.\n",
- __func__, t->gen, t->flags, ret->send_time,
- new->gen, new->flags, ndst->send_time);
- return -EEXIST;
- }
-
- rb_link_node(&ndst->state_entry, parent, n);
- rb_insert_color(&ndst->state_entry, root);
- ndst->send_time = jiffies;
-
- return 0;
-}
-
-int netfs_trans_remove_nolock(struct netfs_trans_dst *dst, struct netfs_state *st)
-{
- if (dst && dst->state_entry.rb_parent_color) {
- rb_erase(&dst->state_entry, &st->trans_root);
- dst->state_entry.rb_parent_color = 0;
- return 1;
- }
- return 0;
-}
-
-static int netfs_trans_remove_state(struct netfs_trans_dst *dst)
-{
- int ret;
- struct netfs_state *st = dst->state;
-
- mutex_lock(&st->trans_lock);
- ret = netfs_trans_remove_nolock(dst, st);
- mutex_unlock(&st->trans_lock);
-
- return ret;
-}
-
-/*
- * Create new destination for given transaction associated with given network state.
- * Transaction's reference counter is bumped and will be dropped when either
- * reply is received or when async timeout detection task will fail resending
- * and drop transaction.
- */
-static int netfs_trans_push_dst(struct netfs_trans *t, struct netfs_state *st)
-{
- struct netfs_trans_dst *dst;
- int err;
-
- dst = mempool_alloc(netfs_trans_dst_pool, GFP_KERNEL);
- if (!dst)
- return -ENOMEM;
-
- dst->retries = 0;
- dst->send_time = 0;
- dst->state = st;
- dst->trans = t;
- netfs_trans_get(t);
-
- mutex_lock(&st->trans_lock);
- err = netfs_trans_insert(dst, st);
- mutex_unlock(&st->trans_lock);
-
- if (err)
- goto err_out_free;
-
- spin_lock(&t->dst_lock);
- list_add_tail(&dst->trans_entry, &t->dst_list);
- spin_unlock(&t->dst_lock);
-
- return 0;
-
-err_out_free:
- t->result = err;
- netfs_trans_put(t);
- mempool_free(dst, netfs_trans_dst_pool);
- return err;
-}
-
-static void netfs_trans_free_dst(struct netfs_trans_dst *dst)
-{
- netfs_trans_put(dst->trans);
- mempool_free(dst, netfs_trans_dst_pool);
-}
-
-static void netfs_trans_remove_dst(struct netfs_trans_dst *dst)
-{
- if (netfs_trans_remove_state(dst))
- netfs_trans_free_dst(dst);
-}
-
-/*
- * Drop destination transaction entry when we know it.
- */
-void netfs_trans_drop_dst(struct netfs_trans_dst *dst)
-{
- struct netfs_trans *t = dst->trans;
-
- spin_lock(&t->dst_lock);
- list_del_init(&dst->trans_entry);
- spin_unlock(&t->dst_lock);
-
- netfs_trans_remove_dst(dst);
-}
-
-/*
- * Drop destination transaction entry when we know it and when we
- * already removed dst from state tree.
- */
-void netfs_trans_drop_dst_nostate(struct netfs_trans_dst *dst)
-{
- struct netfs_trans *t = dst->trans;
-
- spin_lock(&t->dst_lock);
- list_del_init(&dst->trans_entry);
- spin_unlock(&t->dst_lock);
-
- netfs_trans_free_dst(dst);
-}
-
-/*
- * This drops destination transaction entry from appropriate network state
- * tree and drops related reference counter. It is possible that transaction
- * will be freed here if its reference counter hits zero.
- * Destination transaction entry will be freed.
- */
-void netfs_trans_drop_trans(struct netfs_trans *t, struct netfs_state *st)
-{
- struct netfs_trans_dst *dst, *tmp, *ret = NULL;
-
- spin_lock(&t->dst_lock);
- list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
- if (dst->state == st) {
- ret = dst;
- list_del(&dst->trans_entry);
- break;
- }
- }
- spin_unlock(&t->dst_lock);
-
- if (ret)
- netfs_trans_remove_dst(ret);
-}
-
-/*
- * This drops destination transaction entry from appropriate network state
- * tree and drops related reference counter. It is possible that transaction
- * will be freed here if its reference counter hits zero.
- * Destination transaction entry will be freed.
- */
-void netfs_trans_drop_last(struct netfs_trans *t, struct netfs_state *st)
-{
- struct netfs_trans_dst *dst, *tmp, *ret;
-
- spin_lock(&t->dst_lock);
- ret = list_entry(t->dst_list.prev, struct netfs_trans_dst, trans_entry);
- if (ret->state != st) {
- ret = NULL;
- list_for_each_entry_safe(dst, tmp, &t->dst_list, trans_entry) {
- if (dst->state == st) {
- ret = dst;
- list_del_init(&dst->trans_entry);
- break;
- }
- }
- } else {
- list_del(&ret->trans_entry);
- }
- spin_unlock(&t->dst_lock);
-
- if (ret)
- netfs_trans_remove_dst(ret);
-}
-
-static int netfs_trans_push(struct netfs_trans *t, struct netfs_state *st)
-{
- int err;
-
- err = netfs_trans_push_dst(t, st);
- if (err)
- return err;
-
- err = netfs_trans_send(t, st);
- if (err)
- goto err_out_free;
-
- if (t->flags & NETFS_TRANS_SINGLE_DST)
- pohmelfs_switch_active(st->psb);
-
- return 0;
-
-err_out_free:
- t->result = err;
- netfs_trans_drop_last(t, st);
-
- return err;
-}
-
-int netfs_trans_finish_send(struct netfs_trans *t, struct pohmelfs_sb *psb)
-{
- struct pohmelfs_config *c;
- int err = -ENODEV;
- struct netfs_state *st;
-#if 0
- dprintk("%s: t: %p, gen: %u, size: %u, page_num: %u, active: %p.\n",
- __func__, t, t->gen, t->iovec.iov_len, t->page_num, psb->active_state);
-#endif
- mutex_lock(&psb->state_lock);
- list_for_each_entry(c, &psb->state_list, config_entry) {
- st = &c->state;
-
- if (t->flags & NETFS_TRANS_SINGLE_DST) {
- if (!(st->ctl.perm & POHMELFS_IO_PERM_READ))
- continue;
- } else {
- if (!(st->ctl.perm & POHMELFS_IO_PERM_WRITE))
- continue;
- }
-
- if (psb->active_state && (psb->active_state->state.ctl.prio >= st->ctl.prio) &&
- (t->flags & NETFS_TRANS_SINGLE_DST))
- st = &psb->active_state->state;
-
- err = netfs_trans_push(t, st);
- if (!err && (t->flags & NETFS_TRANS_SINGLE_DST))
- break;
- }
-
- mutex_unlock(&psb->state_lock);
-#if 0
- dprintk("%s: fully sent t: %p, gen: %u, size: %u, page_num: %u, err: %d.\n",
- __func__, t, t->gen, t->iovec.iov_len, t->page_num, err);
-#endif
- if (err)
- t->result = err;
- return err;
-}
-
-int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
-{
- int err;
- struct netfs_cmd *cmd = t->iovec.iov_base;
-
- t->gen = atomic_inc_return(&psb->trans_gen);
-
- cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
- t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
- cmd->cmd = NETFS_TRANS;
- cmd->start = t->gen;
- cmd->id = 0;
-
- if (psb->perform_crypto) {
- cmd->ext = psb->crypto_attached_size;
- cmd->csize = psb->crypto_attached_size;
- }
-
- dprintk("%s: t: %u, size: %u, iov_len: %zu, attached_size: %u, attached_pages: %u.\n",
- __func__, t->gen, cmd->size, t->iovec.iov_len, t->attached_size, t->attached_pages);
- err = pohmelfs_trans_crypt(t, psb);
- if (err) {
- t->result = err;
- netfs_convert_cmd(cmd);
- dprintk("%s: trans: %llu, crypto_attached_size: %u, attached_size: %u, attached_pages: %d, trans_size: %u, err: %d.\n",
- __func__, cmd->start, psb->crypto_attached_size, t->attached_size, t->attached_pages, cmd->size, err);
- }
- netfs_trans_put(t);
- return err;
-}
-
-/*
- * Resend transaction to remote server(s).
- * If new servers were added into superblock, we can try to send data
- * to them too.
- *
- * It is called under superblock's state_lock, so we can safely
- * dereference psb->state_list. Also, transaction's reference counter is
- * bumped, so it can not go away under us, thus we can safely access all
- * its members. State is locked.
- *
- * This function returns 0 if transaction was successfully sent to at
- * least one destination target.
- */
-int netfs_trans_resend(struct netfs_trans *t, struct pohmelfs_sb *psb)
-{
- struct netfs_trans_dst *dst;
- struct netfs_state *st;
- struct pohmelfs_config *c;
- int err, exist, error = -ENODEV;
-
- list_for_each_entry(c, &psb->state_list, config_entry) {
- st = &c->state;
-
- exist = 0;
- spin_lock(&t->dst_lock);
- list_for_each_entry(dst, &t->dst_list, trans_entry) {
- if (st == dst->state) {
- exist = 1;
- break;
- }
- }
- spin_unlock(&t->dst_lock);
-
- if (exist) {
- if (!(t->flags & NETFS_TRANS_SINGLE_DST) ||
- (c->config_entry.next == &psb->state_list)) {
- dprintk("%s: resending st: %p, t: %p, gen: %u.\n",
- __func__, st, t, t->gen);
- err = netfs_trans_send(t, st);
- if (!err)
- error = 0;
- }
- continue;
- }
-
- dprintk("%s: pushing/resending st: %p, t: %p, gen: %u.\n",
- __func__, st, t, t->gen);
- err = netfs_trans_push(t, st);
- if (err)
- continue;
- error = 0;
- if (t->flags & NETFS_TRANS_SINGLE_DST)
- break;
- }
-
- t->result = error;
- return error;
-}
-
-void *netfs_trans_add(struct netfs_trans *t, unsigned int size)
-{
- struct iovec *io = &t->iovec;
- void *ptr;
-
- if (size > t->total_size) {
- ptr = ERR_PTR(-EINVAL);
- goto out;
- }
-
- if (io->iov_len + size > t->total_size) {
- dprintk("%s: too big size t: %p, gen: %u, iov_len: %zu, size: %u, total: %u.\n",
- __func__, t, t->gen, io->iov_len, size, t->total_size);
- ptr = ERR_PTR(-E2BIG);
- goto out;
- }
-
- ptr = io->iov_base + io->iov_len;
- io->iov_len += size;
-
-out:
- dprintk("%s: t: %p, gen: %u, size: %u, total: %zu.\n",
- __func__, t, t->gen, size, io->iov_len);
- return ptr;
-}
-
-void netfs_trans_free(struct netfs_trans *t)
-{
- if (t->eng)
- pohmelfs_crypto_thread_make_ready(t->eng->thread);
- kfree(t);
-}
-
-struct netfs_trans *netfs_trans_alloc(struct pohmelfs_sb *psb, unsigned int size,
- unsigned int flags, unsigned int nr)
-{
- struct netfs_trans *t;
- unsigned int num, cont, pad, size_no_trans;
- unsigned int crypto_added = 0;
- struct netfs_cmd *cmd;
-
- if (psb->perform_crypto)
- crypto_added = psb->crypto_attached_size;
-
- /*
- * |sizeof(struct netfs_trans)|
- * |sizeof(struct netfs_cmd)| - transaction header
- * |size| - buffer with requested size
- * |padding| - crypto padding, zero bytes
- * |nr * sizeof(struct page *)| - array of page pointers
- *
- * Overall size should be less than PAGE_SIZE for guaranteed allocation.
- */
-
- cont = size;
- size = ALIGN(size, psb->crypto_align_size);
- pad = size - cont;
-
- size_no_trans = size + sizeof(struct netfs_cmd) * 2 + crypto_added;
-
- cont = sizeof(struct netfs_trans) + size_no_trans;
-
- num = (PAGE_SIZE - cont)/sizeof(struct page *);
-
- if (nr > num)
- nr = num;
-
- t = kzalloc(cont + nr*sizeof(struct page *), GFP_NOIO);
- if (!t)
- goto err_out_exit;
-
- t->iovec.iov_base = (void *)(t + 1);
- t->pages = (struct page **)(t->iovec.iov_base + size_no_trans);
-
- /*
- * Reserving space for transaction header.
- */
- t->iovec.iov_len = sizeof(struct netfs_cmd) + crypto_added;
-
- netfs_trans_init_static(t, nr, size_no_trans);
-
- t->flags = flags;
- t->psb = psb;
-
- cmd = (struct netfs_cmd *)t->iovec.iov_base;
-
- cmd->size = size;
- cmd->cpad = pad;
- cmd->csize = crypto_added;
-
- dprintk("%s: t: %p, gen: %u, size: %u, padding: %u, align_size: %u, flags: %x, "
- "page_num: %u, base: %p, pages: %p.\n",
- __func__, t, t->gen, size, pad, psb->crypto_align_size, flags, nr,
- t->iovec.iov_base, t->pages);
-
- return t;
-
-err_out_exit:
- return NULL;
-}
-
-int netfs_trans_init(void)
-{
- int err = -ENOMEM;
-
- netfs_trans_dst = kmem_cache_create("netfs_trans_dst", sizeof(struct netfs_trans_dst),
- 0, 0, NULL);
- if (!netfs_trans_dst)
- goto err_out_exit;
-
- netfs_trans_dst_pool = mempool_create_slab_pool(256, netfs_trans_dst);
- if (!netfs_trans_dst_pool)
- goto err_out_free;
-
- return 0;
-
-err_out_free:
- kmem_cache_destroy(netfs_trans_dst);
-err_out_exit:
- return err;
-}
-
-void netfs_trans_exit(void)
-{
- mempool_destroy(netfs_trans_dst_pool);
- kmem_cache_destroy(netfs_trans_dst);
-}
diff --git a/drivers/staging/quatech_usb2/quatech_usb2.c b/drivers/staging/quatech_usb2/quatech_usb2.c
index 02fafec..897a3a9 100644
--- a/drivers/staging/quatech_usb2/quatech_usb2.c
+++ b/drivers/staging/quatech_usb2/quatech_usb2.c
@@ -16,7 +16,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
/* Version Information */
#define DRIVER_VERSION "v2.00"
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 750c347..f87e211 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,9 +1,43 @@
-config RTL8192E
- tristate "RealTek RTL8192E Wireless LAN NIC driver"
- depends on PCI && WLAN
- depends on m
- select WIRELESS_EXT
- select WEXT_PRIV
- select CRYPTO
- default N
+config RTLLIB
+ tristate "Support for rtllib wireless devices"
+ depends on WLAN && m
+ default n
+ select LIB80211
---help---
+ If you have a wireless card that uses rtllib, say
+ Y. Currently the only card is the rtl8192e.
+
+ If unsure, say N.
+
+if RTLLIB
+
+config RTLLIB_CRYPTO_CCMP
+ tristate "Support for rtllib CCMP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ CCMP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+config RTLLIB_CRYPTO_TKIP
+ tristate "Support for rtllib TKIP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ TKIP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+config RTLLIB_CRYPTO_WEP
+ tristate "Support for rtllib WEP crypto"
+ depends on RTLLIB
+ default y
+ ---help---
+ TKIP crypto driver for rtllib.
+
+ If you enabled RTLLIB, you want this.
+
+source "drivers/staging/rtl8192e/rtl8192e/Kconfig"
+
+endif
diff --git a/drivers/staging/rtl8192e/Makefile b/drivers/staging/rtl8192e/Makefile
index a66a9ad..cb18db7 100644
--- a/drivers/staging/rtl8192e/Makefile
+++ b/drivers/staging/rtl8192e/Makefile
@@ -1,41 +1,21 @@
-ccflags-y += -DUSE_FW_SOURCE_IMG_FILE
-ccflags-y += -DCONFIG_PM_RTL
-ccflags-y += -DCONFIG_PM
-ccflags-y += -DHAVE_NET_DEVICE_OPS
-ccflags-y += -DENABLE_DOT11D
-
-r8192e_pci-objs := \
- rtl_core.o \
- rtl_eeprom.o \
- rtl_ps.o \
- rtl_wx.o \
- rtl_cam.o \
- rtl_dm.o \
- rtl_pm.o \
- rtl_pci.o \
- rtl_debug.o \
- rtl_ethtool.o \
- r8192E_dev.o \
- r8192E_phy.o \
- r8192E_firmware.o \
- r8192E_cmdpkt.o \
- r8192E_hwimg.o \
- r8190P_rtl8256.o \
+rtllib-objs := \
+ dot11d.o \
+ rtllib_module.o \
rtllib_rx.o \
- rtllib_softmac.o \
rtllib_tx.o \
rtllib_wx.o \
- rtllib_module.o \
+ rtllib_softmac.o \
rtllib_softmac_wx.o \
- rtl819x_HTProc.o \
- rtl819x_TSProc.o \
rtl819x_BAProc.o \
- dot11d.o \
- rtllib_crypt.o \
- rtllib_crypt_tkip.o \
- rtllib_crypt_ccmp.o \
- rtllib_crypt_wep.o
+ rtl819x_HTProc.o \
+ rtl819x_TSProc.o
+
+obj-$(CONFIG_RTLLIB) += rtllib.o
+
+obj-$(CONFIG_RTLLIB_CRYPTO_CCMP) += rtllib_crypt_ccmp.o
+obj-$(CONFIG_RTLLIB_CRYPTO_TKIP) += rtllib_crypt_tkip.o
+obj-$(CONFIG_RTLLIB_CRYPTO_WEP) += rtllib_crypt_wep.o
-obj-$(CONFIG_RTL8192E) += r8192e_pci.o
+obj-$(CONFIG_RTL8192E) += rtl8192e/
ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/dot11d.c b/drivers/staging/rtl8192e/dot11d.c
index ee0381e..f7b14f8 100644
--- a/drivers/staging/rtl8192e/dot11d.c
+++ b/drivers/staging/rtl8192e/dot11d.c
@@ -46,7 +46,7 @@ static struct channel_list ChannelPlan[] = {
56, 60, 64}, 21}
};
-void Dot11d_Init(struct rtllib_device *ieee)
+void dot11d_init(struct rtllib_device *ieee)
{
struct rt_dot11d_info *pDot11dInfo = GET_DOT11D_INFO(ieee);
pDot11dInfo->bEnabled = false;
@@ -58,6 +58,7 @@ void Dot11d_Init(struct rtllib_device *ieee)
RESET_CIE_WATCHDOG(ieee);
}
+EXPORT_SYMBOL(dot11d_init);
void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
{
@@ -99,6 +100,7 @@ void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee)
break;
}
}
+EXPORT_SYMBOL(Dot11d_Channelmap);
void Dot11d_Reset(struct rtllib_device *ieee)
diff --git a/drivers/staging/rtl8192e/dot11d.h b/drivers/staging/rtl8192e/dot11d.h
index 032f700..71f4549 100644
--- a/drivers/staging/rtl8192e/dot11d.h
+++ b/drivers/staging/rtl8192e/dot11d.h
@@ -93,7 +93,7 @@ static inline void cpMacAddr(unsigned char *des, unsigned char *src)
#define IS_DOT11D_STATE_DONE(__pIeeeDev) \
(GET_DOT11D_INFO(__pIeeeDev)->State == DOT11D_STATE_DONE)
-void Dot11d_Init(struct rtllib_device *dev);
+void dot11d_init(struct rtllib_device *dev);
void Dot11d_Channelmap(u8 channel_plan, struct rtllib_device *ieee);
void Dot11d_Reset(struct rtllib_device *dev);
void Dot11d_UpdateCountryIe(struct rtllib_device *dev, u8 *pTaddr,
diff --git a/drivers/staging/rtl8192e/rtl8192e/Kconfig b/drivers/staging/rtl8192e/rtl8192e/Kconfig
new file mode 100644
index 0000000..50e0d91
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl8192e/Kconfig
@@ -0,0 +1,9 @@
+config RTL8192E
+ tristate "RealTek RTL8192E Wireless LAN NIC driver"
+ depends on PCI && WLAN && RTLLIB
+ depends on m
+ select WIRELESS_EXT
+ select WEXT_PRIV
+ select CRYPTO
+ default N
+ ---help---
diff --git a/drivers/staging/rtl8192e/rtl8192e/Makefile b/drivers/staging/rtl8192e/rtl8192e/Makefile
new file mode 100644
index 0000000..313a92e
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtl8192e/Makefile
@@ -0,0 +1,21 @@
+r8192e_pci-objs := \
+ r8192E_dev.o \
+ r8192E_phy.o \
+ r8192E_firmware.o \
+ r8192E_cmdpkt.o \
+ r8192E_hwimg.o \
+ r8190P_rtl8256.o \
+ rtl_cam.o \
+ rtl_core.o \
+ rtl_debug.o \
+ rtl_dm.o \
+ rtl_eeprom.o \
+ rtl_ethtool.o \
+ rtl_pci.o \
+ rtl_pm.o \
+ rtl_ps.o \
+ rtl_wx.o \
+
+obj-$(CONFIG_RTL8192E) += r8192e_pci.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/staging/rtl8192e/r8190P_def.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
index b7bb71f..b7bb71f 100644
--- a/drivers/staging/rtl8192e/r8190P_def.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_def.h
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.c b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
index 0da56c8..0da56c8 100644
--- a/drivers/staging/rtl8192e/r8190P_rtl8256.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.c
diff --git a/drivers/staging/rtl8192e/r8190P_rtl8256.h b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
index 64e831d..64e831d 100644
--- a/drivers/staging/rtl8192e/r8190P_rtl8256.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8190P_rtl8256.h
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
index 58d044e..58d044e 100644
--- a/drivers/staging/rtl8192e/r8192E_cmdpkt.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.c
diff --git a/drivers/staging/rtl8192e/r8192E_cmdpkt.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
index 23219e1..23219e1 100644
--- a/drivers/staging/rtl8192e/r8192E_cmdpkt.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_cmdpkt.h
diff --git a/drivers/staging/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index 808aab6..808aab6 100644
--- a/drivers/staging/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
diff --git a/drivers/staging/rtl8192e/r8192E_dev.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
index b9b3b52..b9b3b52 100644
--- a/drivers/staging/rtl8192e/r8192E_dev.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.h
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
index 3771985..3771985 100644
--- a/drivers/staging/rtl8192e/r8192E_firmware.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.c
diff --git a/drivers/staging/rtl8192e/r8192E_firmware.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
index caa8788..caa8788 100644
--- a/drivers/staging/rtl8192e/r8192E_firmware.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_firmware.h
diff --git a/drivers/staging/rtl8192e/r8192E_hw.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
index 43c3fb8..43c3fb8 100644
--- a/drivers/staging/rtl8192e/r8192E_hw.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hw.h
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
index 08e7dbb..08e7dbb 100644
--- a/drivers/staging/rtl8192e/r8192E_hwimg.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.c
diff --git a/drivers/staging/rtl8192e/r8192E_hwimg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
index 019836b..019836b 100644
--- a/drivers/staging/rtl8192e/r8192E_hwimg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_hwimg.h
diff --git a/drivers/staging/rtl8192e/r8192E_phy.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
index 7fe69a3..3e705ef 100644
--- a/drivers/staging/rtl8192e/r8192E_phy.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.c
@@ -23,7 +23,6 @@
#include "r8190P_rtl8256.h"
#include "r8192E_phy.h"
#include "rtl_dm.h"
-#include "dot11d.h"
#include "r8192E_hwimg.h"
@@ -859,7 +858,7 @@ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel,
RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n",
__func__, *stage, *step, channel);
- if (!IsLegalChannel(priv->rtllib, channel)) {
+ if (!rtllib_legal_channel(priv->rtllib, channel)) {
RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n",
channel);
return true;
diff --git a/drivers/staging/rtl8192e/r8192E_phy.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
index 7318f88..7318f88 100644
--- a/drivers/staging/rtl8192e/r8192E_phy.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phy.h
diff --git a/drivers/staging/rtl8192e/r8192E_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
index 7899dd5..7899dd5 100644
--- a/drivers/staging/rtl8192e/r8192E_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_phyreg.h
diff --git a/drivers/staging/rtl8192e/r819xE_phyreg.h b/drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h
index d5de279..d5de279 100644
--- a/drivers/staging/rtl8192e/r819xE_phyreg.h
+++ b/drivers/staging/rtl8192e/rtl8192e/r819xE_phyreg.h
diff --git a/drivers/staging/rtl8192e/rtl_cam.c b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
index baf3b63..baf3b63 100644
--- a/drivers/staging/rtl8192e/rtl_cam.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.c
diff --git a/drivers/staging/rtl8192e/rtl_cam.h b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
index fa607f9..fa607f9 100644
--- a/drivers/staging/rtl8192e/rtl_cam.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_cam.h
diff --git a/drivers/staging/rtl8192e/rtl_core.c b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
index 5ad9664..71adb6b 100644
--- a/drivers/staging/rtl8192e/rtl_core.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.c
@@ -53,9 +53,7 @@
#include "rtl_wx.h"
#include "rtl_dm.h"
-#ifdef CONFIG_PM_RTL
#include "rtl_pm.h"
-#endif
int hwwep = 1;
static int channels = 0x3fff;
@@ -581,7 +579,7 @@ static void rtl8192_update_beacon(void *data)
struct rtllib_network *net = &ieee->current_network;
if (ieee->pHTInfo->bCurrentHTSupport)
- HTUpdateSelfAndPeerSetting(ieee, net);
+ HT_update_self_and_peer_setting(ieee, net);
ieee->pHTInfo->bCurrentRT2RTLongSlotTime =
net->bssht.bdRT2RTLongSlotTime;
ieee->pHTInfo->RT2RT_HT_Mode = net->bssht.RT2RT_HT_Mode;
@@ -1289,7 +1287,7 @@ static short rtl8192_get_channel_map(struct net_device *dev)
priv->ChannelPlan = COUNTRY_CODE_FCC;
}
RT_TRACE(COMP_INIT, "Channel plan is %d\n", priv->ChannelPlan);
- Dot11d_Init(priv->rtllib);
+ dot11d_init(priv->rtllib);
Dot11d_Channelmap(priv->ChannelPlan, priv->rtllib);
for (i = 1; i <= 11; i++)
(priv->rtllib->active_channel_map)[i] = 1;
@@ -1305,7 +1303,6 @@ static short rtl8192_init(struct net_device *dev)
memset(&(priv->stats), 0, sizeof(struct rt_stats));
- rtl8192_dbgp_flag_init(dev);
rtl8192_init_priv_handler(dev);
rtl8192_init_priv_constant(dev);
rtl8192_init_priv_variable(dev);
@@ -2839,7 +2836,6 @@ done:
/****************************************************************************
---------------------------- PCI_STUFF---------------------------
*****************************************************************************/
-#ifdef HAVE_NET_DEVICE_OPS
static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_open = rtl8192_open,
.ndo_stop = rtl8192_close,
@@ -2851,7 +2847,6 @@ static const struct net_device_ops rtl8192_netdev_ops = {
.ndo_change_mtu = eth_change_mtu,
.ndo_start_xmit = rtllib_xmit,
};
-#endif
static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
@@ -2938,17 +2933,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
priv->irq = 0;
-#ifdef HAVE_NET_DEVICE_OPS
dev->netdev_ops = &rtl8192_netdev_ops;
-#else
- dev->open = rtl8192_open;
- dev->stop = rtl8192_close;
- dev->tx_timeout = rtl8192_tx_timeout;
- dev->do_ioctl = rtl8192_ioctl;
- dev->set_multicast_list = r8192_set_multicast;
- dev->set_mac_address = r8192_set_mac_adr;
- dev->hard_start_xmit = rtllib_xmit;
-#endif
dev->wireless_handlers = (struct iw_handler_def *)
&r8192_wx_handlers_def;
@@ -2974,10 +2959,7 @@ static int __devinit rtl8192_pci_probe(struct pci_dev *pdev,
register_netdev(dev);
RT_TRACE(COMP_INIT, "dev name: %s\n", dev->name);
- err = rtl_debug_module_init(priv, dev->name);
- if (err)
- RT_TRACE(COMP_DBG, "failed to create debugfs files. Ignoring "
- "error: %d\n", err);
+
rtl8192_proc_init_one(dev);
if (priv->polling_timer_on == 0)
@@ -3015,7 +2997,6 @@ static void __devexit rtl8192_pci_disconnect(struct pci_dev *pdev)
del_timer_sync(&priv->gpio_polling_timer);
cancel_delayed_work(&priv->gpio_change_rf_wq);
priv->polling_timer_on = 0;
- rtl_debug_module_remove(priv);
rtl8192_proc_remove_one(dev);
rtl8192_down(dev, true);
deinit_hal_dm(dev);
@@ -3103,43 +3084,9 @@ bool NicIFDisableNIC(struct net_device *dev)
static int __init rtl8192_pci_module_init(void)
{
- int ret;
- int error;
-
- ret = rtllib_init();
- if (ret) {
- printk(KERN_ERR "rtllib_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_tkip_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_tkip_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_ccmp_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_ccmp_init() failed %d\n", ret);
- return ret;
- }
- ret = rtllib_crypto_wep_init();
- if (ret) {
- printk(KERN_ERR "rtllib_crypto_wep_init() failed %d\n", ret);
- return ret;
- }
printk(KERN_INFO "\nLinux kernel driver for RTL8192E WLAN cards\n");
printk(KERN_INFO "Copyright (c) 2007-2008, Realsil Wlan Driver\n");
- error = rtl_create_debugfs_root();
- if (error) {
- RT_TRACE(COMP_DBG, "Create debugfs root fail: %d\n", error);
- goto err_out;
- }
-
rtl8192_proc_module_init();
if (0 != pci_register_driver(&rtl8192_pci_driver)) {
DMESG("No device found");
@@ -3147,9 +3094,6 @@ static int __init rtl8192_pci_module_init(void)
return -ENODEV;
}
return 0;
-err_out:
- return error;
-
}
static void __exit rtl8192_pci_module_exit(void)
@@ -3158,12 +3102,6 @@ static void __exit rtl8192_pci_module_exit(void)
RT_TRACE(COMP_DOWN, "Exiting");
rtl8192_proc_module_remove();
- rtl_remove_debugfs_root();
- rtllib_crypto_tkip_exit();
- rtllib_crypto_ccmp_exit();
- rtllib_crypto_wep_exit();
- rtllib_crypto_deinit();
- rtllib_exit();
}
void check_rfctrl_gpio_timer(unsigned long data)
diff --git a/drivers/staging/rtl8192e/rtl_core.h b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
index f9af515..2a2519c 100644
--- a/drivers/staging/rtl8192e/rtl_core.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_core.h
@@ -44,11 +44,14 @@
#include <linux/proc_fs.h>
#include <linux/if_arp.h>
#include <linux/random.h>
-#include <linux/version.h>
#include <linux/io.h>
-#include "rtllib.h"
-#include "dot11d.h"
+/* Need this defined before including local include files */
+#define DRV_NAME "rtl819xE"
+
+#include "../rtllib.h"
+
+#include "../dot11d.h"
#include "r8192E_firmware.h"
#include "r8192E_hw.h"
@@ -56,7 +59,6 @@
#include "r8190P_def.h"
#include "r8192E_dev.h"
-#include "rtl_debug.h"
#include "rtl_eeprom.h"
#include "rtl_ps.h"
#include "rtl_pci.h"
@@ -67,8 +69,6 @@
#define DRV_AUTHOR "<wlanfae@realtek.com>"
#define DRV_VERSION "0014.0401.2010"
-#define DRV_NAME "rtl819xE"
-
#define IS_HARDWARE_TYPE_819xP(_priv) \
((((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8190P) || \
(((struct r8192_priv *)rtllib_priv(dev))->card_8192 == NIC_8192E))
@@ -215,41 +215,6 @@ enum RTL819x_PHY_PARAM {
RTL819X_EFUSE_MAP = 19,
};
-enum RTL_DEBUG {
- COMP_TRACE = BIT0,
- COMP_DBG = BIT1,
- COMP_INIT = BIT2,
- COMP_RECV = BIT3,
- COMP_SEND = BIT4,
- COMP_CMD = BIT5,
- COMP_POWER = BIT6,
- COMP_EPROM = BIT7,
- COMP_SWBW = BIT8,
- COMP_SEC = BIT9,
- COMP_LPS = BIT10,
- COMP_QOS = BIT11,
- COMP_RATE = BIT12,
- COMP_RXDESC = BIT13,
- COMP_PHY = BIT14,
- COMP_DIG = BIT15,
- COMP_TXAGC = BIT16,
- COMP_HALDM = BIT17,
- COMP_POWER_TRACKING = BIT18,
- COMP_CH = BIT19,
- COMP_RF = BIT20,
- COMP_FIRMWARE = BIT21,
- COMP_HT = BIT22,
- COMP_RESET = BIT23,
- COMP_CMDPKT = BIT24,
- COMP_SCAN = BIT25,
- COMP_PS = BIT26,
- COMP_DOWN = BIT27,
- COMP_INTR = BIT28,
- COMP_LED = BIT29,
- COMP_MLME = BIT30,
- COMP_ERR = BIT31
-};
-
enum nic_t {
NIC_UNKNOWN = 0,
NIC_8192E = 1,
@@ -1121,4 +1086,10 @@ void ActUpdateChannelAccessSetting(struct net_device *dev,
enum wireless_mode WirelessMode,
struct channel_access_setting *ChnlAccessSetting);
+/* proc stuff from rtl_debug.c */
+void rtl8192_proc_init_one(struct net_device *dev);
+void rtl8192_proc_remove_one(struct net_device *dev);
+void rtl8192_proc_module_init(void);
+void rtl8192_proc_module_remove(void);
+
#endif
diff --git a/drivers/staging/rtl8192e/rtl_crypto.h b/drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h
index ee57c0f..ee57c0f 100644
--- a/drivers/staging/rtl8192e/rtl_crypto.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_crypto.h
diff --git a/drivers/staging/rtl8192e/rtl_debug.c b/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
index 22bc2dd..c19b14c 100644
--- a/drivers/staging/rtl8192e/rtl_debug.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_debug.c
@@ -22,91 +22,12 @@
* Contact Information:
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#include "rtl_debug.h"
#include "rtl_core.h"
#include "r8192E_phy.h"
#include "r8192E_phyreg.h"
#include "r8190P_rtl8256.h" /* RTL8225 Radio frontend */
#include "r8192E_cmdpkt.h"
-u32 rt_global_debug_component = \
- COMP_ERR ;
-
-/*------------------Declare variable-----------------------*/
-u32 DBGP_Type[DBGP_TYPE_MAX];
-
-/*-----------------------------------------------------------------------------
- * Function: DBGP_Flag_Init
- *
- * Overview: Refresh all debug print control flag content to zero.
- *
- * Input: NONE
- *
- * Output: NONE
- *
- * Return: NONE
- *
- * Revised History:
- * When Who Remark
- * 10/20/2006 MHC Create Version 0.
- *
- *---------------------------------------------------------------------------*/
-void rtl8192_dbgp_flag_init(struct net_device *dev)
-{
- u8 i;
-
- for (i = 0; i < DBGP_TYPE_MAX; i++)
- DBGP_Type[i] = 0;
-
-
-} /* DBGP_Flag_Init */
-
-/* this is only for debugging */
-void print_buffer(u32 *buffer, int len)
-{
- int i;
- u8 *buf = (u8 *)buffer;
-
- printk(KERN_INFO "ASCII BUFFER DUMP (len: %x):\n", len);
-
- for (i = 0; i < len; i++)
- printk(KERN_INFO "%c", buf[i]);
-
- printk(KERN_INFO "\nBINARY BUFFER DUMP (len: %x):\n", len);
-
- for (i = 0; i < len; i++)
- printk(KERN_INFO "%x", buf[i]);
-
- printk(KERN_INFO "\n");
-}
-
-/* this is only for debug */
-void dump_eprom(struct net_device *dev)
-{
- int i;
-
- for (i = 0; i < 0xff; i++)
- RT_TRACE(COMP_INIT, "EEPROM addr %x : %x", i,
- eprom_read(dev, i));
-}
-
-/* this is only for debug */
-void rtl8192_dump_reg(struct net_device *dev)
-{
- int i;
- int n;
- int max = 0x5ff;
-
- RT_TRACE(COMP_INIT, "Dumping NIC register map");
-
- for (n = 0; n <= max; ) {
- printk(KERN_INFO "\nD: %2x> ", n);
- for (i = 0; i < 16 && n <= max; i++, n++)
- printk(KERN_INFO "%2x ", read_nic_byte(dev, n));
- }
- printk(KERN_INFO "\n");
-}
-
/****************************************************************************
-----------------------------PROCFS STUFF-------------------------
*****************************************************************************/
diff --git a/drivers/staging/rtl8192e/rtl_dm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
index a7fa9aa..a7fa9aa 100644
--- a/drivers/staging/rtl8192e/rtl_dm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.c
diff --git a/drivers/staging/rtl8192e/rtl_dm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
index ab44a9a..ab44a9a 100644
--- a/drivers/staging/rtl8192e/rtl_dm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_dm.h
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.c b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
index c1ccff4..c1ccff4 100644
--- a/drivers/staging/rtl8192e/rtl_eeprom.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.c
diff --git a/drivers/staging/rtl8192e/rtl_eeprom.h b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
index 9452e16..9452e16 100644
--- a/drivers/staging/rtl8192e/rtl_eeprom.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_eeprom.h
diff --git a/drivers/staging/rtl8192e/rtl_ethtool.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
index 36452fb..36452fb 100644
--- a/drivers/staging/rtl8192e/rtl_ethtool.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ethtool.c
diff --git a/drivers/staging/rtl8192e/rtl_pci.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
index ddadcc3..ddadcc3 100644
--- a/drivers/staging/rtl8192e/rtl_pci.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.c
diff --git a/drivers/staging/rtl8192e/rtl_pci.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
index 7ea5a47..28c7da6 100644
--- a/drivers/staging/rtl8192e/rtl_pci.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pci.h
@@ -27,7 +27,6 @@
#include <linux/types.h>
#include <linux/pci.h>
-#include "rtllib.h"
static inline void NdisRawWritePortUlong(u32 port, u32 val)
{
diff --git a/drivers/staging/rtl8192e/rtl_pm.c b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
index 92e2fde..8e1a5d5 100644
--- a/drivers/staging/rtl8192e/rtl_pm.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.c
@@ -17,7 +17,6 @@
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#ifdef CONFIG_PM_RTL
#include "rtl_core.h"
#include "r8192E_hw.h"
#include "r8190P_rtl8256.h"
@@ -133,4 +132,3 @@ int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable)
return -EAGAIN;
}
-#endif
diff --git a/drivers/staging/rtl8192e/rtl_pm.h b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
index 4d7f406..e5299fc 100644
--- a/drivers/staging/rtl8192e/rtl_pm.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_pm.h
@@ -17,8 +17,6 @@
* wlanfae <wlanfae@realtek.com>
******************************************************************************/
-#ifdef CONFIG_PM_RTL
-
#ifndef R8192E_PM_H
#define R8192E_PM_H
@@ -31,5 +29,3 @@ int rtl8192E_resume(struct pci_dev *dev);
int rtl8192E_enable_wake(struct pci_dev *dev, pm_message_t state, int enable);
#endif
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtl_ps.c b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
index c9a7c56..c9a7c56 100644
--- a/drivers/staging/rtl8192e/rtl_ps.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.c
diff --git a/drivers/staging/rtl8192e/rtl_ps.h b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
index a9c2d79..df79d6c 100644
--- a/drivers/staging/rtl8192e/rtl_ps.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_ps.h
@@ -26,7 +26,7 @@
#define _RTL_PS_H
#include <linux/types.h>
-#include "rtllib.h"
+
struct net_device;
#define RT_CHECK_FOR_HANG_PERIOD 2
diff --git a/drivers/staging/rtl8192e/rtl_wx.c b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
index 93b1edb..4e93669 100644
--- a/drivers/staging/rtl8192e/rtl_wx.c
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.c
@@ -19,7 +19,6 @@
#include <linux/string.h>
#include "rtl_core.h"
-#include "dot11d.h"
#define RATE_COUNT 12
static u32 rtl8192_rates[] = {
@@ -803,7 +802,7 @@ static int r8192_wx_set_enc(struct net_device *dev,
switch (wrqu->encoding.flags & IW_ENCODE_INDEX) {
case 0:
- key_idx = ieee->tx_keyidx;
+ key_idx = ieee->crypt_info.tx_keyidx;
break;
case 1:
key_idx = 0;
diff --git a/drivers/staging/rtl8192e/rtl_wx.h b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
index 6a51a25..6a51a25 100644
--- a/drivers/staging/rtl8192e/rtl_wx.h
+++ b/drivers/staging/rtl8192e/rtl8192e/rtl_wx.h
diff --git a/drivers/staging/rtl8192e/rtl819x_BAProc.c b/drivers/staging/rtl8192e/rtl819x_BAProc.c
index 8b9d85c..32fbbc9 100644
--- a/drivers/staging/rtl8192e/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_BAProc.c
@@ -18,7 +18,6 @@
******************************************************************************/
#include "rtllib.h"
#include "rtl819x_BA.h"
-#include "rtl_core.h"
static void ActivateBAEntry(struct rtllib_device *ieee, struct ba_record *pBA,
u16 Time)
diff --git a/drivers/staging/rtl8192e/rtl819x_HTProc.c b/drivers/staging/rtl8192e/rtl819x_HTProc.c
index b1c0c56..8b74129 100644
--- a/drivers/staging/rtl8192e/rtl819x_HTProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_HTProc.c
@@ -943,8 +943,8 @@ void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
}
}
-void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork)
+void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork)
{
struct rt_hi_throughput *pHTInfo = ieee->pHTInfo;
struct ht_info_ele *pPeerHTInfo =
@@ -955,6 +955,7 @@ void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
pHTInfo->CurrentOpMode = pPeerHTInfo->OptMode;
}
}
+EXPORT_SYMBOL(HT_update_self_and_peer_setting);
void HTUseDefaultSetting(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtl819x_TSProc.c b/drivers/staging/rtl8192e/rtl819x_TSProc.c
index 09a602f..711a096 100644
--- a/drivers/staging/rtl8192e/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192e/rtl819x_TSProc.c
@@ -497,6 +497,7 @@ void RemovePeerTS(struct rtllib_device *ieee, u8 *Addr)
}
}
}
+EXPORT_SYMBOL(RemovePeerTS);
void RemoveAllTS(struct rtllib_device *ieee)
{
diff --git a/drivers/staging/rtl8192e/rtl_debug.h b/drivers/staging/rtl8192e/rtl_debug.h
deleted file mode 100644
index 50fb9a9..0000000
--- a/drivers/staging/rtl8192e/rtl_debug.h
+++ /dev/null
@@ -1,299 +0,0 @@
-/******************************************************************************
- * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
- *
- * Based on the r8180 driver, which is:
- * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * wlanfae <wlanfae@realtek.com>
-******************************************************************************/
-#ifndef _RTL_DEBUG_H
-#define _RTL_DEBUG_H
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/version.h>
-#include <linux/debugfs.h>
-
-struct r8192_priv;
-struct _tx_desc_8192se;
-struct _TX_DESC_8192CE;
-struct net_device;
-
-#define DBG_LOUD 4
-
-#define RT_ASSERT(_Exp, Fmt) \
- if (!(_Exp)) { \
- printk("Rtl819x: "); \
- printk Fmt; \
- }
-
-enum dbgp_flag {
- FQoS = 0,
- FTX = 1,
- FRX = 2,
- FSEC = 3,
- FMGNT = 4,
- FMLME = 5,
- FRESOURCE = 6,
- FBEACON = 7,
- FISR = 8,
- FPHY = 9,
- FMP = 10,
- FEEPROM = 11,
- FPWR = 12,
- FDM = 13,
- FDBGCtrl = 14,
- FC2H = 15,
- FBT = 16,
- FINIT = 17,
- FIOCTL = 18,
- DBGP_TYPE_MAX
-};
-
-#define QoS_INIT BIT0
-#define QoS_VISTA BIT1
-
-#define TX_DESC BIT0
-#define TX_DESC_TID BIT1
-
-#define RX_DATA BIT0
-#define RX_PHY_STS BIT1
-#define RX_PHY_SS BIT2
-#define RX_PHY_SQ BIT3
-#define RX_PHY_ASTS BIT4
-#define RX_ERR_LEN BIT5
-#define RX_DEFRAG BIT6
-#define RX_ERR_RATE BIT7
-
-
-
-#define MEDIA_STS BIT0
-#define LINK_STS BIT1
-
-#define OS_CHK BIT0
-
-#define BCN_SHOW BIT0
-#define BCN_PEER BIT1
-
-#define ISR_CHK BIT0
-
-#define PHY_BBR BIT0
-#define PHY_BBW BIT1
-#define PHY_RFR BIT2
-#define PHY_RFW BIT3
-#define PHY_MACR BIT4
-#define PHY_MACW BIT5
-#define PHY_ALLR BIT6
-#define PHY_ALLW BIT7
-#define PHY_TXPWR BIT8
-#define PHY_PWRDIFF BIT9
-
-#define MP_RX BIT0
-#define MP_SWICH_CH BIT1
-
-#define EEPROM_W BIT0
-#define EFUSE_PG BIT1
-#define EFUSE_READ_ALL BIT2
-
-#define LPS BIT0
-#define IPS BIT1
-#define PWRSW BIT2
-#define PWRHW BIT3
-#define PWRHAL BIT4
-
-#define WA_IOT BIT0
-#define DM_PWDB BIT1
-#define DM_Monitor BIT2
-#define DM_DIG BIT3
-#define DM_EDCA_Turbo BIT4
-
-#define DbgCtrl_Trace BIT0
-#define DbgCtrl_InbandNoise BIT1
-
-#define BT_TRACE BIT0
-#define BT_RFPoll BIT1
-
-#define C2H_Summary BIT0
-#define C2H_PacketData BIT1
-#define C2H_ContentData BIT2
-#define BT_TRACE BIT0
-#define BT_RFPoll BIT1
-
-#define INIT_EEPROM BIT0
-#define INIT_TxPower BIT1
-#define INIT_IQK BIT2
-#define INIT_RF BIT3
-
-#define IOCTL_TRACE BIT0
-#define IOCTL_BT_EVENT BIT1
-#define IOCTL_BT_EVENT_DETAIL BIT2
-#define IOCTL_BT_TX_ACLDATA BIT3
-#define IOCTL_BT_TX_ACLDATA_DETAIL BIT4
-#define IOCTL_BT_RX_ACLDATA BIT5
-#define IOCTL_BT_RX_ACLDATA_DETAIL BIT6
-#define IOCTL_BT_HCICMD BIT7
-#define IOCTL_BT_HCICMD_DETAIL BIT8
-#define IOCTL_IRP BIT9
-#define IOCTL_IRP_DETAIL BIT10
-#define IOCTL_CALLBACK_FUN BIT11
-#define IOCTL_STATE BIT12
-#define IOCTL_BT_TP BIT13
-#define IOCTL_BT_LOGO BIT14
-
-/* 2007/07/13 MH ------For DeBuG Print modeue------*/
-/*------------------------------Define structure----------------------------*/
-
-
-/*------------------------Export Marco Definition---------------------------*/
-#define DEBUG_PRINT 1
-
-#if (DEBUG_PRINT == 1)
-#define RTPRINT(dbgtype, dbgflag, printstr) \
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- printk printstr; \
- } \
-}
-
-#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr) \
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- int __i; \
- u8 *ptr = (u8 *)_Ptr; \
- printk printstr; \
- printk(" "); \
- for (__i = 0; __i < 6; __i++) \
- printk("%02X%s", ptr[__i], \
- (__i == 5) ? "" : "-"); \
- printk("\n"); \
- } \
-}
-
-#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)\
-{ \
- if (DBGP_Type[dbgtype] & dbgflag) { \
- int __i; \
- u8 *ptr = (u8 *)_HexData; \
- printk(_TitleString); \
- for (__i = 0; __i < (int)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) \
- % 4) == 0) ? " " : " "); \
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
-}
-#else
-#define RTPRINT(dbgtype, dbgflag, printstr)
-#define RTPRINT_ADDR(dbgtype, dbgflag, printstr, _Ptr)
-#define RTPRINT_DATA(dbgtype, dbgflag, _TitleString, _HexData, _HexDataLen)
-#endif
-
-extern u32 DBGP_Type[DBGP_TYPE_MAX];
-
-#define RT_PRINT_DATA(_Comp, _Level, _TitleString, _HexData, _HexDataLen) \
-do {\
- if (((_Comp) & rt_global_debug_component) && \
- (_Level <= rt_global_debug_component)) { \
- int __i; \
- u8* ptr = (u8 *)_HexData; \
- printk(KERN_INFO "Rtl819x: "); \
- printk(_TitleString); \
- for (__i = 0; __i < (int)_HexDataLen; __i++) { \
- printk("%02X%s", ptr[__i], (((__i + 1) % \
- 4) == 0) ? " " : " "); \
- if (((__i + 1) % 16) == 0) \
- printk("\n"); \
- } \
- printk("\n"); \
- } \
-} while (0);
-
-#define DMESG(x, a...)
-#define DMESGW(x, a...)
-#define DMESGE(x, a...)
-extern u32 rt_global_debug_component;
-#define RT_TRACE(component, x, args...) \
-do { \
- if (rt_global_debug_component & component) \
- printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
- ##args);\
-} while (0);
-
-#define assert(expr) \
- if (!(expr)) { \
- printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
- }
-#define RT_DEBUG_DATA(level, data, datalen) \
- do { \
- if ((rt_global_debug_component & (level)) == (level)) {\
- int _i; \
- u8 *_pdata = (u8 *)data; \
- printk(KERN_DEBUG DRV_NAME ": %s()\n", __func__); \
- for (_i = 0; _i < (int)(datalen); _i++) { \
- printk(KERN_INFO "%2x ", _pdata[_i]); \
- if ((_i+1) % 16 == 0) \
- printk("\n"); \
- } \
- printk(KERN_INFO "\n"); \
- } \
- } while (0)
-
-struct rtl_fs_debug {
- const char *name;
- struct dentry *dir_drv;
- struct dentry *debug_register;
- u32 hw_type;
- u32 hw_offset;
- bool hw_holding;
-};
-
-void print_buffer(u32 *buffer, int len);
-void dump_eprom(struct net_device *dev);
-void rtl8192_dump_reg(struct net_device *dev);
-
-/* debugfs stuff */
-static inline int rtl_debug_module_init(struct r8192_priv *priv,
- const char *name)
-{
- return 0;
-}
-
-static inline void rtl_debug_module_remove(struct r8192_priv *priv)
-{
-}
-
-static inline int rtl_create_debugfs_root(void)
-{
- return 0;
-}
-
-static inline void rtl_remove_debugfs_root(void)
-{
-}
-
-/* proc stuff */
-void rtl8192_proc_init_one(struct net_device *dev);
-void rtl8192_proc_remove_one(struct net_device *dev);
-void rtl8192_proc_module_init(void);
-void rtl8192_proc_module_remove(void);
-void rtl8192_dbgp_flag_init(struct net_device *dev);
-
-#endif
diff --git a/drivers/staging/rtl8192e/rtllib.h b/drivers/staging/rtl8192e/rtllib.h
index de25975..e26aec8 100644
--- a/drivers/staging/rtl8192e/rtllib.h
+++ b/drivers/staging/rtl8192e/rtllib.h
@@ -25,7 +25,6 @@
#define RTLLIB_H
#include <linux/if_ether.h> /* ETH_ALEN */
#include <linux/kernel.h> /* ARRAY_SIZE */
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
@@ -36,12 +35,14 @@
#include <linux/delay.h>
#include <linux/wireless.h>
+#include "rtllib_debug.h"
#include "rtl819x_HT.h"
#include "rtl819x_BA.h"
#include "rtl819x_TS.h"
#include <linux/netdevice.h>
#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <net/lib80211.h>
#define MAX_PRECMD_CNT 16
#define MAX_RFDEPENDCMD_CNT 16
@@ -870,69 +871,6 @@ enum _REG_PREAMBLE_MODE {
#define WLAN_ERP_USE_PROTECTION (1<<1)
#define WLAN_ERP_BARKER_PREAMBLE (1<<2)
-/* Status codes */
-enum rtllib_statuscode {
- WLAN_STATUS_SUCCESS = 0,
- WLAN_STATUS_UNSPECIFIED_FAILURE = 1,
- WLAN_STATUS_CAPS_UNSUPPORTED = 10,
- WLAN_STATUS_REASSOC_NO_ASSOC = 11,
- WLAN_STATUS_ASSOC_DENIED_UNSPEC = 12,
- WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG = 13,
- WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION = 14,
- WLAN_STATUS_CHALLENGE_FAIL = 15,
- WLAN_STATUS_AUTH_TIMEOUT = 16,
- WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA = 17,
- WLAN_STATUS_ASSOC_DENIED_RATES = 18,
- /* 802.11b */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTPREAMBLE = 19,
- WLAN_STATUS_ASSOC_DENIED_NOPBCC = 20,
- WLAN_STATUS_ASSOC_DENIED_NOAGILITY = 21,
- /* 802.11h */
- WLAN_STATUS_ASSOC_DENIED_NOSPECTRUM = 22,
- WLAN_STATUS_ASSOC_REJECTED_BAD_POWER = 23,
- WLAN_STATUS_ASSOC_REJECTED_BAD_SUPP_CHAN = 24,
- /* 802.11g */
- WLAN_STATUS_ASSOC_DENIED_NOSHORTTIME = 25,
- WLAN_STATUS_ASSOC_DENIED_NODSSSOFDM = 26,
- /* 802.11i */
- WLAN_STATUS_INVALID_IE = 40,
- WLAN_STATUS_INVALID_GROUP_CIPHER = 41,
- WLAN_STATUS_INVALID_PAIRWISE_CIPHER = 42,
- WLAN_STATUS_INVALID_AKMP = 43,
- WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
- WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
- WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
-};
-
-/* Reason codes */
-enum rtllib_reasoncode {
- WLAN_REASON_UNSPECIFIED = 1,
- WLAN_REASON_PREV_AUTH_NOT_VALID = 2,
- WLAN_REASON_DEAUTH_LEAVING = 3,
- WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY = 4,
- WLAN_REASON_DISASSOC_AP_BUSY = 5,
- WLAN_REASON_CLASS2_FRAME_FROM_NONAUTH_STA = 6,
- WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA = 7,
- WLAN_REASON_DISASSOC_STA_HAS_LEFT = 8,
- WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH = 9,
- /* 802.11h */
- WLAN_REASON_DISASSOC_BAD_POWER = 10,
- WLAN_REASON_DISASSOC_BAD_SUPP_CHAN = 11,
- /* 802.11i */
- WLAN_REASON_INVALID_IE = 13,
- WLAN_REASON_MIC_FAILURE = 14,
- WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT = 15,
- WLAN_REASON_GROUP_KEY_HANDSHAKE_TIMEOUT = 16,
- WLAN_REASON_IE_DIFFERENT = 17,
- WLAN_REASON_INVALID_GROUP_CIPHER = 18,
- WLAN_REASON_INVALID_PAIRWISE_CIPHER = 19,
- WLAN_REASON_INVALID_AKMP = 20,
- WLAN_REASON_UNSUPP_RSN_VERSION = 21,
- WLAN_REASON_INVALID_RSN_IE_CAP = 22,
- WLAN_REASON_IEEE8021X_FAILED = 23,
- WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
-};
-
#define RTLLIB_STATMASK_SIGNAL (1<<0)
#define RTLLIB_STATMASK_RSSI (1<<1)
#define RTLLIB_STATMASK_NOISE (1<<2)
@@ -1122,8 +1060,6 @@ struct rtllib_stats {
struct rtllib_device;
-#include "rtllib_crypt.h"
-
#define SEC_KEY_1 (1<<0)
#define SEC_KEY_2 (1<<1)
#define SEC_KEY_3 (1<<2)
@@ -1146,7 +1082,6 @@ struct rtllib_device;
#define SEC_ALG_TKIP 2
#define SEC_ALG_CCMP 4
-#define WEP_KEYS 4
#define WEP_KEY_LEN 13
#define SCM_KEY_LEN 32
#define SCM_TEMPORAL_KEY_LENGTH 16
@@ -1158,8 +1093,8 @@ struct rtllib_security {
auth_algo:4,
unicast_uses_group:1,
encrypt:1;
- u8 key_sizes[WEP_KEYS];
- u8 keys[WEP_KEYS][SCM_KEY_LEN];
+ u8 key_sizes[NUM_WEP_KEYS];
+ u8 keys[NUM_WEP_KEYS][SCM_KEY_LEN];
u8 level;
u16 flags;
} __packed;
@@ -2251,14 +2186,10 @@ struct rtllib_device {
u8 ap_mac_addr[6];
u16 pairwise_key_type;
u16 group_key_type;
- struct list_head crypt_deinit_list;
- struct rtllib_crypt_data *crypt[WEP_KEYS];
- int tx_keyidx; /* default TX key index (crypt[tx_keyidx]) */
- struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
- struct timer_list crypt_deinit_timer;
- int crypt_quiesced;
+ struct lib80211_crypt_info crypt_info;
+ struct sw_cam_table swcamtable[TOTAL_CAM_ENTRY];
int bcrx_sta_key; /* use individual keys to override default keys even
* with RX of broad/multicast frames */
@@ -2774,7 +2705,7 @@ extern void rtllib_rx_mgt(struct rtllib_device *ieee,
struct rtllib_rx_stats *stats);
extern void rtllib_rx_probe_rq(struct rtllib_device *ieee,
struct sk_buff *skb);
-extern int IsLegalChannel(struct rtllib_device *rtllib, u8 channel);
+extern int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel);
/* rtllib_wx.c */
extern int rtllib_wx_get_scan(struct rtllib_device *ieee,
@@ -2804,7 +2735,7 @@ extern int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len);
/* rtllib_softmac.c */
extern short rtllib_is_54g(struct rtllib_network *net);
-extern short rtllib_is_shortslot(struct rtllib_network net);
+extern short rtllib_is_shortslot(const struct rtllib_network *net);
extern int rtllib_rx_frame_softmac(struct rtllib_device *ieee,
struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats, u16 type,
@@ -2971,8 +2902,8 @@ extern void HTInitializeHTInfo(struct rtllib_device *ieee);
extern void HTInitializeBssDesc(struct bss_ht *pBssHT);
extern void HTResetSelfAndSavePeerSetting(struct rtllib_device *ieee,
struct rtllib_network *pNetwork);
-extern void HTUpdateSelfAndPeerSetting(struct rtllib_device *ieee,
- struct rtllib_network *pNetwork);
+extern void HT_update_self_and_peer_setting(struct rtllib_device *ieee,
+ struct rtllib_network *pNetwork);
extern u8 HTGetHighestMCSRate(struct rtllib_device *ieee, u8 *pMCSRateSet,
u8 *pMCSFilter);
extern u8 MCS_FILTER_ALL[];
@@ -3052,21 +2983,6 @@ static inline const char *escape_essid(const char *essid, u8 essid_len)
(HTMcsToDataRate(_ieee, (u8)_MGN_RATE)))
/* fun with the built-in rtllib stack... */
-int rtllib_init(void);
-void rtllib_exit(void);
-int rtllib_crypto_init(void);
-void rtllib_crypto_deinit(void);
-int rtllib_crypto_tkip_init(void);
-void rtllib_crypto_tkip_exit(void);
-int rtllib_crypto_ccmp_init(void);
-void rtllib_crypto_ccmp_exit(void);
-int rtllib_crypto_wep_init(void);
-void rtllib_crypto_wep_exit(void);
-
-void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib);
-void rtllib_MlmeDisassociateRequest(struct rtllib_device *rtllib, u8 *asSta,
- u8 asRsn);
-void rtllib_MgntDisconnectAP(struct rtllib_device *rtllib, u8 asRsn);
bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn);
@@ -3133,12 +3049,5 @@ extern void rtllib_TURBO_Info(struct rtllib_device *ieee, u8 **tag_p);
#define MUTEX_LOCK_PRIV(pmutex) mutex_lock(pmutex)
#define MUTEX_UNLOCK_PRIV(pmutex) mutex_unlock(pmutex)
#endif
-static inline void dump_buf(u8 *buf, u32 len)
-{
- u32 i;
- printk(KERN_INFO "-----------------Len %d----------------\n", len);
- for (i = 0; i < len; i++)
- printk("%2.2x-", *(buf+i));
- printk("\n");
-}
+
#endif /* RTLLIB_H */
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.c b/drivers/staging/rtl8192e/rtllib_crypt.c
index acda37b..86152d0 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt.c
@@ -11,7 +11,6 @@
*
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -22,7 +21,7 @@
struct rtllib_crypto_alg {
struct list_head list;
- struct rtllib_crypto_ops *ops;
+ struct lib80211_crypto_ops *ops;
};
@@ -33,15 +32,15 @@ struct rtllib_crypto {
static struct rtllib_crypto *hcrypt;
-void rtllib_crypt_deinit_entries(struct rtllib_device *ieee,
+void rtllib_crypt_deinit_entries(struct lib80211_crypt_info *info,
int force)
{
struct list_head *ptr, *n;
- struct rtllib_crypt_data *entry;
+ struct lib80211_crypt_data *entry;
- for (ptr = ieee->crypt_deinit_list.next, n = ptr->next;
- ptr != &ieee->crypt_deinit_list; ptr = n, n = ptr->next) {
- entry = list_entry(ptr, struct rtllib_crypt_data, list);
+ for (ptr = info->crypt_deinit_list.next, n = ptr->next;
+ ptr != &info->crypt_deinit_list; ptr = n, n = ptr->next) {
+ entry = list_entry(ptr, struct lib80211_crypt_data, list);
if (atomic_read(&entry->refcnt) != 0 && !force)
continue;
@@ -53,28 +52,30 @@ void rtllib_crypt_deinit_entries(struct rtllib_device *ieee,
kfree(entry);
}
}
+EXPORT_SYMBOL(rtllib_crypt_deinit_entries);
void rtllib_crypt_deinit_handler(unsigned long data)
{
- struct rtllib_device *ieee = (struct rtllib_device *)data;
+ struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
unsigned long flags;
- spin_lock_irqsave(&ieee->lock, flags);
- rtllib_crypt_deinit_entries(ieee, 0);
- if (!list_empty(&ieee->crypt_deinit_list)) {
+ spin_lock_irqsave(info->lock, flags);
+ rtllib_crypt_deinit_entries(info, 0);
+ if (!list_empty(&info->crypt_deinit_list)) {
printk(KERN_DEBUG "%s: entries remaining in delayed crypt "
- "deletion list\n", ieee->dev->name);
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
+ "deletion list\n", info->name);
+ info->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&info->crypt_deinit_timer);
}
- spin_unlock_irqrestore(&ieee->lock, flags);
+ spin_unlock_irqrestore(info->lock, flags);
}
+EXPORT_SYMBOL(rtllib_crypt_deinit_handler);
-void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
- struct rtllib_crypt_data **crypt)
+void rtllib_crypt_delayed_deinit(struct lib80211_crypt_info *info,
+ struct lib80211_crypt_data **crypt)
{
- struct rtllib_crypt_data *tmp;
+ struct lib80211_crypt_data *tmp;
unsigned long flags;
if (*crypt == NULL)
@@ -87,16 +88,17 @@ void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
* decrypt operations. Use a list of delayed deinits to avoid needing
* locking. */
- spin_lock_irqsave(&ieee->lock, flags);
- list_add(&tmp->list, &ieee->crypt_deinit_list);
- if (!timer_pending(&ieee->crypt_deinit_timer)) {
- ieee->crypt_deinit_timer.expires = jiffies + HZ;
- add_timer(&ieee->crypt_deinit_timer);
+ spin_lock_irqsave(info->lock, flags);
+ list_add(&tmp->list, &info->crypt_deinit_list);
+ if (!timer_pending(&info->crypt_deinit_timer)) {
+ info->crypt_deinit_timer.expires = jiffies + HZ;
+ add_timer(&info->crypt_deinit_timer);
}
- spin_unlock_irqrestore(&ieee->lock, flags);
+ spin_unlock_irqrestore(info->lock, flags);
}
+EXPORT_SYMBOL(rtllib_crypt_delayed_deinit);
-int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
+int rtllib_register_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct rtllib_crypto_alg *alg;
@@ -104,11 +106,10 @@ int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
if (hcrypt == NULL)
return -1;
- alg = kmalloc(sizeof(*alg), GFP_KERNEL);
+ alg = kzalloc(sizeof(*alg), GFP_KERNEL);
if (alg == NULL)
return -ENOMEM;
- memset(alg, 0, sizeof(*alg));
alg->ops = ops;
spin_lock_irqsave(&hcrypt->lock, flags);
@@ -120,8 +121,9 @@ int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops)
return 0;
}
+EXPORT_SYMBOL(rtllib_register_crypto_ops);
-int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops)
+int rtllib_unregister_crypto_ops(struct lib80211_crypto_ops *ops)
{
unsigned long flags;
struct list_head *ptr;
@@ -150,9 +152,10 @@ int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops)
return del_alg ? 0 : -1;
}
+EXPORT_SYMBOL(rtllib_unregister_crypto_ops);
-struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name)
+struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name)
{
unsigned long flags;
struct list_head *ptr;
@@ -177,12 +180,13 @@ struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name)
else
return NULL;
}
+EXPORT_SYMBOL(rtllib_get_crypto_ops);
static void * rtllib_crypt_null_init(int keyidx) { return (void *) 1; }
static void rtllib_crypt_null_deinit(void *priv) {}
-static struct rtllib_crypto_ops rtllib_crypt_null = {
+static struct lib80211_crypto_ops rtllib_crypt_null = {
.name = "NULL",
.init = rtllib_crypt_null_init,
.deinit = rtllib_crypt_null_deinit,
@@ -192,8 +196,10 @@ static struct rtllib_crypto_ops rtllib_crypt_null = {
.decrypt_msdu = NULL,
.set_key = NULL,
.get_key = NULL,
- .extra_prefix_len = 0,
- .extra_postfix_len = 0,
+ .extra_mpdu_prefix_len = 0,
+ .extra_mpdu_postfix_len = 0,
+ .extra_msdu_prefix_len = 0,
+ .extra_msdu_postfix_len = 0,
.owner = THIS_MODULE,
};
@@ -202,15 +208,14 @@ int __init rtllib_crypto_init(void)
{
int ret = -ENOMEM;
- hcrypt = kmalloc(sizeof(*hcrypt), GFP_KERNEL);
+ hcrypt = kzalloc(sizeof(*hcrypt), GFP_KERNEL);
if (!hcrypt)
goto out;
- memset(hcrypt, 0, sizeof(*hcrypt));
INIT_LIST_HEAD(&hcrypt->algs);
spin_lock_init(&hcrypt->lock);
- ret = rtllib_register_crypto_ops(&rtllib_crypt_null);
+ ret = lib80211_register_crypto_ops(&rtllib_crypt_null);
if (ret < 0) {
kfree(hcrypt);
hcrypt = NULL;
@@ -239,3 +244,8 @@ void __exit rtllib_crypto_deinit(void)
kfree(hcrypt);
}
+
+module_init(rtllib_crypto_init);
+module_exit(rtllib_crypto_deinit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt.h b/drivers/staging/rtl8192e/rtllib_crypt.h
index 49b90b7..e177c92 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt.h
+++ b/drivers/staging/rtl8192e/rtllib_crypt.h
@@ -25,61 +25,11 @@
#include <linux/skbuff.h>
-struct rtllib_crypto_ops {
- const char *name;
-
- /* init new crypto context (e.g., allocate private data space,
- * select IV, etc.); returns NULL on failure or pointer to allocated
- * private data on success */
- void * (*init)(int keyidx);
-
- /* deinitialize crypto context and free allocated private data */
- void (*deinit)(void *priv);
-
- /* encrypt/decrypt return < 0 on error or >= 0 on success. The return
- * value from decrypt_mpdu is passed as the keyidx value for
- * decrypt_msdu. skb must have enough head and tail room for the
- * encryption; if not, error will be returned; these functions are
- * called for all MPDUs (i.e., fragments).
- */
- int (*encrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_mpdu)(struct sk_buff *skb, int hdr_len, void *priv);
-
- /* These functions are called for full MSDUs, i.e. full frames.
- * These can be NULL if full MSDU operations are not needed. */
- int (*encrypt_msdu)(struct sk_buff *skb, int hdr_len, void *priv);
- int (*decrypt_msdu)(struct sk_buff *skb, int keyidx, int hdr_len,
- void *priv, struct rtllib_device* ieee);
-
- int (*set_key)(void *key, int len, u8 *seq, void *priv);
- int (*get_key)(void *key, int len, u8 *seq, void *priv);
-
- /* procfs handler for printing out key information and possible
- * statistics */
- char * (*print_stats)(char *p, void *priv);
-
- /* maximum number of bytes added by encryption; encrypt buf is
- * allocated with extra_prefix_len bytes, copy of in_buf, and
- * extra_postfix_len; encrypt need not use all this space, but
- * the result must start at the beginning of the struct buffer and
- * correct length must be returned */
- int extra_prefix_len, extra_postfix_len;
-
- struct module *owner;
-};
-
-struct rtllib_crypt_data {
- struct list_head list; /* delayed deletion list */
- struct rtllib_crypto_ops *ops;
- void *priv;
- atomic_t refcnt;
-};
-
-int rtllib_register_crypto_ops(struct rtllib_crypto_ops *ops);
-int rtllib_unregister_crypto_ops(struct rtllib_crypto_ops *ops);
-struct rtllib_crypto_ops *rtllib_get_crypto_ops(const char *name);
-void rtllib_crypt_deinit_entries(struct rtllib_device *, int);
-void rtllib_crypt_deinit_handler(unsigned long);
-void rtllib_crypt_delayed_deinit(struct rtllib_device *ieee,
- struct rtllib_crypt_data **crypt);
+int rtllib_register_crypto_ops(struct lib80211_crypto_ops *ops);
+int rtllib_unregister_crypto_ops(struct lib80211_crypto_ops *ops);
+struct lib80211_crypto_ops *rtllib_get_crypto_ops(const char *name);
+void rtllib_crypt_deinit_entries(struct lib80211_crypt_info *info, int force);
+void rtllib_crypt_deinit_handler(unsigned long data);
+void rtllib_crypt_delayed_deinit(struct lib80211_crypt_info *info,
+ struct lib80211_crypt_data **crypt);
#endif
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
index 6196b9a..4217b88 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_ccmp.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -63,10 +62,9 @@ static void *rtllib_ccmp_init(int key_idx)
{
struct rtllib_ccmp_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tfm = (void *)crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC);
@@ -429,13 +427,8 @@ static char *rtllib_ccmp_print_stats(char *p, void *priv)
return p;
}
-void rtllib_ccmp_null(void)
-{
- return;
-}
-
-static struct rtllib_crypto_ops rtllib_crypt_ccmp = {
- .name = "CCMP",
+static struct lib80211_crypto_ops rtllib_crypt_ccmp = {
+ .name = "R-CCMP",
.init = rtllib_ccmp_init,
.deinit = rtllib_ccmp_deinit,
.encrypt_mpdu = rtllib_ccmp_encrypt,
@@ -445,19 +438,24 @@ static struct rtllib_crypto_ops rtllib_crypt_ccmp = {
.set_key = rtllib_ccmp_set_key,
.get_key = rtllib_ccmp_get_key,
.print_stats = rtllib_ccmp_print_stats,
- .extra_prefix_len = CCMP_HDR_LEN,
- .extra_postfix_len = CCMP_MIC_LEN,
+ .extra_mpdu_prefix_len = CCMP_HDR_LEN,
+ .extra_mpdu_postfix_len = CCMP_MIC_LEN,
.owner = THIS_MODULE,
};
int __init rtllib_crypto_ccmp_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_ccmp);
+ return lib80211_register_crypto_ops(&rtllib_crypt_ccmp);
}
void __exit rtllib_crypto_ccmp_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_ccmp);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_ccmp);
}
+
+module_init(rtllib_crypto_ccmp_init);
+module_exit(rtllib_crypto_ccmp_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
index 6a0c878..8009250 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_tkip.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -60,10 +59,9 @@ static void *rtllib_tkip_init(int key_idx)
{
struct rtllib_tkip_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = key_idx;
priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0,
CRYPTO_ALG_ASYNC);
@@ -598,8 +596,7 @@ static void rtllib_michael_mic_failure(struct net_device *dev,
}
static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
- int hdr_len, void *priv,
- struct rtllib_device *ieee)
+ int hdr_len, void *priv)
{
struct rtllib_tkip_data *tkey = priv;
u8 mic[8];
@@ -618,23 +615,20 @@ static int rtllib_michael_mic_verify(struct sk_buff *skb, int keyidx,
skb->data + hdr_len, skb->len - 8 - hdr_len, mic))
return -1;
- if ((memcmp(mic, skb->data + skb->len - 8, 8) != 0) ||
- (ieee->force_mic_error)) {
+ if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) {
struct rtllib_hdr_4addr *hdr;
hdr = (struct rtllib_hdr_4addr *) skb->data;
printk(KERN_DEBUG "%s: Michael MIC verification failed for "
"MSDU from %pM keyidx=%d\n",
skb->dev ? skb->dev->name : "N/A", hdr->addr2,
keyidx);
- printk(KERN_DEBUG "%d, force_mic_error = %d\n",
- (memcmp(mic, skb->data + skb->len - 8, 8) != 0),\
- ieee->force_mic_error);
+ printk(KERN_DEBUG "%d\n",
+ memcmp(mic, skb->data + skb->len - 8, 8) != 0);
if (skb->dev) {
printk(KERN_INFO "skb->dev != NULL\n");
rtllib_michael_mic_failure(skb->dev, hdr, keyidx);
}
tkey->dot11RSNAStatsTKIPLocalMICFailures++;
- ieee->force_mic_error = false;
return -1;
}
@@ -740,9 +734,8 @@ static char *rtllib_tkip_print_stats(char *p, void *priv)
return p;
}
-
-static struct rtllib_crypto_ops rtllib_crypt_tkip = {
- .name = "TKIP",
+static struct lib80211_crypto_ops rtllib_crypt_tkip = {
+ .name = "R-TKIP",
.init = rtllib_tkip_init,
.deinit = rtllib_tkip_deinit,
.encrypt_mpdu = rtllib_tkip_encrypt,
@@ -752,24 +745,25 @@ static struct rtllib_crypto_ops rtllib_crypt_tkip = {
.set_key = rtllib_tkip_set_key,
.get_key = rtllib_tkip_get_key,
.print_stats = rtllib_tkip_print_stats,
- .extra_prefix_len = 4 + 4, /* IV + ExtIV */
- .extra_postfix_len = 8 + 4, /* MIC + ICV */
+ .extra_mpdu_prefix_len = 4 + 4, /* IV + ExtIV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
+ .extra_msdu_postfix_len = 8, /* MIC */
.owner = THIS_MODULE,
};
int __init rtllib_crypto_tkip_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_tkip);
+ return lib80211_register_crypto_ops(&rtllib_crypt_tkip);
}
void __exit rtllib_crypto_tkip_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_tkip);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_tkip);
}
-void rtllib_tkip_null(void)
-{
- return;
-}
+module_init(rtllib_crypto_tkip_init);
+module_exit(rtllib_crypto_tkip_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_crypt_wep.c b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
index c59bf10..8cdf389 100644
--- a/drivers/staging/rtl8192e/rtllib_crypt_wep.c
+++ b/drivers/staging/rtl8192e/rtllib_crypt_wep.c
@@ -9,7 +9,6 @@
* more details.
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -38,10 +37,9 @@ static void *prism2_wep_init(int keyidx)
{
struct prism2_wep_data *priv;
- priv = kmalloc(sizeof(*priv), GFP_ATOMIC);
+ priv = kzalloc(sizeof(*priv), GFP_ATOMIC);
if (priv == NULL)
goto fail;
- memset(priv, 0, sizeof(*priv));
priv->key_idx = keyidx;
priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
@@ -257,9 +255,8 @@ static char *prism2_wep_print_stats(char *p, void *priv)
return p;
}
-
-static struct rtllib_crypto_ops rtllib_crypt_wep = {
- .name = "WEP",
+static struct lib80211_crypto_ops rtllib_crypt_wep = {
+ .name = "R-WEP",
.init = prism2_wep_init,
.deinit = prism2_wep_deinit,
.encrypt_mpdu = prism2_wep_encrypt,
@@ -269,24 +266,24 @@ static struct rtllib_crypto_ops rtllib_crypt_wep = {
.set_key = prism2_wep_set_key,
.get_key = prism2_wep_get_key,
.print_stats = prism2_wep_print_stats,
- .extra_prefix_len = 4, /* IV */
- .extra_postfix_len = 4, /* ICV */
+ .extra_mpdu_prefix_len = 4, /* IV */
+ .extra_mpdu_postfix_len = 4, /* ICV */
.owner = THIS_MODULE,
};
int __init rtllib_crypto_wep_init(void)
{
- return rtllib_register_crypto_ops(&rtllib_crypt_wep);
+ return lib80211_register_crypto_ops(&rtllib_crypt_wep);
}
void __exit rtllib_crypto_wep_exit(void)
{
- rtllib_unregister_crypto_ops(&rtllib_crypt_wep);
+ lib80211_unregister_crypto_ops(&rtllib_crypt_wep);
}
-void rtllib_wep_null(void)
-{
- return;
-}
+module_init(rtllib_crypto_wep_init);
+module_exit(rtllib_crypto_wep_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_debug.h b/drivers/staging/rtl8192e/rtllib_debug.h
new file mode 100644
index 0000000..2bfc115
--- /dev/null
+++ b/drivers/staging/rtl8192e/rtllib_debug.h
@@ -0,0 +1,86 @@
+/******************************************************************************
+ * Copyright(c) 2008 - 2010 Realtek Corporation. All rights reserved.
+ *
+ * Based on the r8180 driver, which is:
+ * Copyright 2004-2005 Andrea Merello <andreamrl@tiscali.it>, et al.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called LICENSE.
+ *
+ * Contact Information:
+ * wlanfae <wlanfae@realtek.com>
+******************************************************************************/
+#ifndef _RTL_DEBUG_H
+#define _RTL_DEBUG_H
+
+/* Allow files to override DRV_NAME */
+#ifndef DRV_NAME
+#define DRV_NAME "rtllib_92e"
+#endif
+
+#define DMESG(x, a...)
+
+extern u32 rt_global_debug_component;
+
+/* These are the defines for rt_global_debug_component */
+enum RTL_DEBUG {
+ COMP_TRACE = (1 << 0),
+ COMP_DBG = (1 << 1),
+ COMP_INIT = (1 << 2),
+ COMP_RECV = (1 << 3),
+ COMP_SEND = (1 << 4),
+ COMP_CMD = (1 << 5),
+ COMP_POWER = (1 << 6),
+ COMP_EPROM = (1 << 7),
+ COMP_SWBW = (1 << 8),
+ COMP_SEC = (1 << 9),
+ COMP_LPS = (1 << 10),
+ COMP_QOS = (1 << 11),
+ COMP_RATE = (1 << 12),
+ COMP_RXDESC = (1 << 13),
+ COMP_PHY = (1 << 14),
+ COMP_DIG = (1 << 15),
+ COMP_TXAGC = (1 << 16),
+ COMP_HALDM = (1 << 17),
+ COMP_POWER_TRACKING = (1 << 18),
+ COMP_CH = (1 << 19),
+ COMP_RF = (1 << 20),
+ COMP_FIRMWARE = (1 << 21),
+ COMP_HT = (1 << 22),
+ COMP_RESET = (1 << 23),
+ COMP_CMDPKT = (1 << 24),
+ COMP_SCAN = (1 << 25),
+ COMP_PS = (1 << 26),
+ COMP_DOWN = (1 << 27),
+ COMP_INTR = (1 << 28),
+ COMP_LED = (1 << 29),
+ COMP_MLME = (1 << 30),
+ COMP_ERR = (1 << 31)
+};
+
+#define RT_TRACE(component, x, args...) \
+do { \
+ if (rt_global_debug_component & component) \
+ printk(KERN_DEBUG DRV_NAME ":" x "\n" , \
+ ##args);\
+} while (0);
+
+#define assert(expr) \
+ if (!(expr)) { \
+ printk(KERN_INFO "Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ }
+
+#endif
diff --git a/drivers/staging/rtl8192e/rtllib_module.c b/drivers/staging/rtl8192e/rtllib_module.c
index c36a140..f9dae95 100644
--- a/drivers/staging/rtl8192e/rtllib_module.c
+++ b/drivers/staging/rtl8192e/rtllib_module.c
@@ -45,7 +45,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -54,7 +53,9 @@
#include "rtllib.h"
-#define DRV_NAME "rtllib_92e"
+u32 rt_global_debug_component = COMP_ERR;
+EXPORT_SYMBOL(rt_global_debug_component);
+
void _setup_timer(struct timer_list *ptimer, void *fun, unsigned long data)
{
@@ -135,10 +136,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->host_decrypt = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
- INIT_LIST_HEAD(&ieee->crypt_deinit_list);
- _setup_timer(&ieee->crypt_deinit_timer,
- rtllib_crypt_deinit_handler,
- (unsigned long) ieee);
ieee->rtllib_ap_sec_type = rtllib_ap_sec_type;
spin_lock_init(&ieee->lock);
@@ -148,6 +145,9 @@ struct net_device *alloc_rtllib(int sizeof_priv)
atomic_set(&(ieee->atm_chnlop), 0);
atomic_set(&(ieee->atm_swbw), 0);
+ /* SAM FIXME */
+ lib80211_crypt_info_init(&ieee->crypt_info, "RTLLIB", &ieee->lock);
+
ieee->bHalfNMode = false;
ieee->wpa_enabled = 0;
ieee->tkip_countermeasures = 0;
@@ -177,10 +177,6 @@ struct net_device *alloc_rtllib(int sizeof_priv)
ieee->last_packet_time[i] = 0;
}
- rtllib_tkip_null();
- rtllib_wep_null();
- rtllib_ccmp_null();
-
return dev;
failed:
@@ -188,32 +184,23 @@ struct net_device *alloc_rtllib(int sizeof_priv)
free_netdev(dev);
return NULL;
}
+EXPORT_SYMBOL(alloc_rtllib);
void free_rtllib(struct net_device *dev)
{
struct rtllib_device *ieee = (struct rtllib_device *)
netdev_priv_rsl(dev);
- int i;
kfree(ieee->pHTInfo);
ieee->pHTInfo = NULL;
rtllib_softmac_free(ieee);
- del_timer_sync(&ieee->crypt_deinit_timer);
- rtllib_crypt_deinit_entries(ieee, 1);
-
- for (i = 0; i < WEP_KEYS; i++) {
- struct rtllib_crypt_data *crypt = ieee->crypt[i];
- if (crypt) {
- if (crypt->ops)
- crypt->ops->deinit(crypt->priv);
- kfree(crypt);
- ieee->crypt[i] = NULL;
- }
- }
+
+ lib80211_crypt_info_free(&ieee->crypt_info);
rtllib_networks_free(ieee);
free_netdev(dev);
}
+EXPORT_SYMBOL(free_rtllib);
u32 rtllib_debug_level;
static int debug = \
@@ -287,3 +274,8 @@ void __exit rtllib_exit(void)
rtllib_proc = NULL;
}
}
+
+module_init(rtllib_init);
+module_exit(rtllib_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index 8d0af5e..6c5061f 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -36,7 +36,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -281,7 +280,7 @@ static int rtllib_is_eapol_frame(struct rtllib_device *ieee,
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_crypt_data *crypt)
+ struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
@@ -322,7 +321,7 @@ rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
/* Called only as a tasklet (software IRQ), by rtllib_rx */
static inline int
rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
- int keyidx, struct rtllib_crypt_data *crypt)
+ int keyidx, struct lib80211_crypt_data *crypt)
{
struct rtllib_hdr_4addr *hdr;
int res, hdrlen;
@@ -341,7 +340,7 @@ rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb,
hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl));
atomic_inc(&crypt->refcnt);
- res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv, ieee);
+ res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed"
@@ -1010,7 +1009,7 @@ static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc,
}
static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
- struct rtllib_crypt_data **crypt, size_t hdrlen)
+ struct lib80211_crypt_data **crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
u16 fc = le16_to_cpu(hdr->frame_ctl);
@@ -1020,7 +1019,7 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
if (skb->len >= hdrlen + 3)
idx = skb->data[hdrlen + 3] >> 6;
- *crypt = ieee->crypt[idx];
+ *crypt = ieee->crypt_info.crypt[idx];
/* allow NULL decrypt to indicate an station specific override
* for default encryption */
if (*crypt && ((*crypt)->ops == NULL ||
@@ -1045,7 +1044,7 @@ static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb,
static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb,
struct rtllib_rx_stats *rx_stats,
- struct rtllib_crypt_data *crypt, size_t hdrlen)
+ struct lib80211_crypt_data *crypt, size_t hdrlen)
{
struct rtllib_hdr_4addr *hdr;
int keyidx = 0;
@@ -1253,7 +1252,7 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
{
struct net_device *dev = ieee->dev;
struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data;
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
struct rtllib_rxb *rxb = NULL;
struct rx_ts_record *pTS = NULL;
u16 fc, sc, SeqNum = 0;
@@ -1497,6 +1496,7 @@ int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->stats.rx_dropped++;
return 0;
}
+EXPORT_SYMBOL(rtllib_rx);
static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 };
@@ -2492,7 +2492,7 @@ static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel)
return 0;
}
-int IsLegalChannel(struct rtllib_device *rtllib, u8 channel)
+int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel)
{
if (MAX_CHANNEL_NUMBER < channel) {
printk(KERN_INFO "%s(): Invalid Channel\n", __func__);
@@ -2503,6 +2503,7 @@ int IsLegalChannel(struct rtllib_device *rtllib, u8 channel)
return 0;
}
+EXPORT_SYMBOL(rtllib_legal_channel);
static inline void rtllib_process_probe_response(
struct rtllib_device *ieee,
@@ -2553,7 +2554,7 @@ static inline void rtllib_process_probe_response(
}
- if (!IsLegalChannel(ieee, network->channel))
+ if (!rtllib_legal_channel(ieee, network->channel))
goto free_network;
if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
diff --git a/drivers/staging/rtl8192e/rtllib_softmac.c b/drivers/staging/rtl8192e/rtllib_softmac.c
index b508685..1637f11 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac.c
@@ -15,11 +15,9 @@
#include "rtllib.h"
-#include "rtl_core.h"
#include <linux/random.h>
#include <linux/delay.h>
-#include <linux/version.h>
#include <linux/uaccess.h>
#include "dot11d.h"
@@ -28,9 +26,9 @@ short rtllib_is_54g(struct rtllib_network *net)
return (net->rates_ex_len > 0) || (net->rates_len > 4);
}
-short rtllib_is_shortslot(struct rtllib_network net)
+short rtllib_is_shortslot(const struct rtllib_network *net)
{
- return net.capability & WLAN_CAPABILITY_SHORT_SLOT_TIME;
+ return net->capability & WLAN_CAPABILITY_SHORT_SLOT_TIME;
}
/* returns the total length needed for pleacing the RATE MFIE
@@ -468,6 +466,7 @@ void rtllib_EnableIntelPromiscuousMode(struct net_device *dev,
ieee->bNetPromiscuousMode = true;
}
+EXPORT_SYMBOL(rtllib_EnableIntelPromiscuousMode);
/*
@@ -490,6 +489,7 @@ void rtllib_DisableIntelPromiscuousMode(struct net_device *dev,
ieee->bNetPromiscuousMode = false;
}
+EXPORT_SYMBOL(rtllib_DisableIntelPromiscuousMode);
static void rtllib_send_probe(struct rtllib_device *ieee, u8 is_mesh)
{
@@ -685,6 +685,7 @@ void rtllib_stop_send_beacons(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_stop(ieee);
}
+EXPORT_SYMBOL(rtllib_stop_send_beacons);
void rtllib_start_send_beacons(struct rtllib_device *ieee)
@@ -694,6 +695,7 @@ void rtllib_start_send_beacons(struct rtllib_device *ieee)
if (ieee->softmac_features & IEEE_SOFTMAC_BEACONS)
rtllib_beacons_start(ieee);
}
+EXPORT_SYMBOL(rtllib_start_send_beacons);
static void rtllib_softmac_stop_scan(struct rtllib_device *ieee)
@@ -719,6 +721,7 @@ void rtllib_stop_scan(struct rtllib_device *ieee)
ieee->rtllib_stop_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_stop_scan);
void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
{
@@ -729,6 +732,7 @@ void rtllib_stop_scan_syncro(struct rtllib_device *ieee)
ieee->rtllib_stop_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_stop_scan_syncro);
bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
{
@@ -741,6 +745,7 @@ bool rtllib_act_scanning(struct rtllib_device *ieee, bool sync_scan)
return test_bit(STATUS_SCANNING, &ieee->status);
}
}
+EXPORT_SYMBOL(rtllib_act_scanning);
/* called with ieee->lock held */
static void rtllib_start_scan(struct rtllib_device *ieee)
@@ -781,6 +786,7 @@ void rtllib_start_scan_syncro(struct rtllib_device *ieee, u8 is_mesh)
ieee->rtllib_start_hw_scan(ieee->dev);
}
}
+EXPORT_SYMBOL(rtllib_start_scan_syncro);
inline struct sk_buff *rtllib_authentication_req(struct rtllib_network *beacon,
struct rtllib_device *ieee, int challengelen, u8 *daddr)
@@ -830,7 +836,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
struct sk_buff *skb = NULL;
int encrypt;
int atim_len, erp_len;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
char *ssid = ieee->current_network.ssid;
int ssid_len = ieee->current_network.ssid_len;
@@ -865,9 +871,9 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
} else
erp_len = 0;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") || wpa_ie_len));
+ ((0 == strcmp(crypt->ops->name, "R-WEP") || wpa_ie_len));
if (ieee->pHTInfo->bCurrentHTSupport) {
tmp_ht_cap_buf = (u8 *) &(ieee->pHTInfo->SelfHTCap);
tmp_ht_cap_len = sizeof(ieee->pHTInfo->SelfHTCap);
@@ -917,7 +923,7 @@ static struct sk_buff *rtllib_probe_resp(struct rtllib_device *ieee, u8 *dest)
cpu_to_le16((beacon_buf->capability |=
WLAN_CAPABILITY_SHORT_SLOT_TIME));
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (encrypt)
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
@@ -976,7 +982,7 @@ static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
struct sk_buff *skb;
u8 *tag;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
struct rtllib_assoc_response_frame *assoc;
short encrypt;
@@ -1007,7 +1013,7 @@ static struct sk_buff *rtllib_assoc_resp(struct rtllib_device *ieee, u8 *dest)
cpu_to_le16(WLAN_CAPABILITY_SHORT_SLOT_TIME);
if (ieee->host_encrypt)
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
else
crypt = NULL;
@@ -1172,7 +1178,7 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
unsigned int ckip_ie_len = 0;
unsigned int ccxrm_ie_len = 0;
unsigned int cxvernum_ie_len = 0;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int encrypt;
int PMKCacheIdx;
@@ -1185,10 +1191,10 @@ inline struct sk_buff *rtllib_association_req(struct rtllib_network *beacon,
unsigned int turbo_info_len = beacon->Turbo_Enable ? 9 : 0;
int len = 0;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (crypt != NULL)
encrypt = ieee->host_encrypt && crypt && crypt->ops &&
- ((0 == strcmp(crypt->ops->name, "WEP") ||
+ ((0 == strcmp(crypt->ops->name, "R-WEP") ||
wpa_ie_len));
else
encrypt = 0;
@@ -1956,6 +1962,7 @@ void rtllib_sta_ps_send_null_frame(struct rtllib_device *ieee, short pwr)
if (buf)
softmac_ps_mgmt_xmit(buf, ieee);
}
+EXPORT_SYMBOL(rtllib_sta_ps_send_null_frame);
void rtllib_sta_ps_send_pspoll_frame(struct rtllib_device *ieee)
{
@@ -2168,6 +2175,7 @@ void rtllib_ps_tx_ack(struct rtllib_device *ieee, short success)
}
spin_unlock_irqrestore(&ieee->lock, flags);
}
+EXPORT_SYMBOL(rtllib_ps_tx_ack);
static void rtllib_process_action(struct rtllib_device *ieee, struct sk_buff *skb)
{
@@ -2540,6 +2548,7 @@ void rtllib_reset_queue(struct rtllib_device *ieee)
spin_unlock_irqrestore(&ieee->lock, flags);
}
+EXPORT_SYMBOL(rtllib_reset_queue);
void rtllib_wake_queue(struct rtllib_device *ieee)
{
@@ -2928,6 +2937,7 @@ struct sk_buff *rtllib_get_beacon(struct rtllib_device *ieee)
return skb;
}
+EXPORT_SYMBOL(rtllib_get_beacon);
void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
u8 shutdown)
@@ -2937,6 +2947,7 @@ void rtllib_softmac_stop_protocol(struct rtllib_device *ieee, u8 mesh_flag,
rtllib_stop_protocol(ieee, shutdown);
up(&ieee->wx_sem);
}
+EXPORT_SYMBOL(rtllib_softmac_stop_protocol);
void rtllib_stop_protocol(struct rtllib_device *ieee, u8 shutdown)
@@ -2985,6 +2996,7 @@ void rtllib_softmac_start_protocol(struct rtllib_device *ieee, u8 mesh_flag)
rtllib_start_protocol(ieee);
up(&ieee->wx_sem);
}
+EXPORT_SYMBOL(rtllib_softmac_start_protocol);
void rtllib_start_protocol(struct rtllib_device *ieee)
{
@@ -3048,10 +3060,9 @@ void rtllib_softmac_init(struct rtllib_device *ieee)
ieee->state = RTLLIB_NOLINK;
for (i = 0; i < 5; i++)
ieee->seq_ctrl[i] = 0;
- ieee->pDot11dInfo = kmalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
+ ieee->pDot11dInfo = kzalloc(sizeof(struct rt_dot11d_info), GFP_ATOMIC);
if (!ieee->pDot11dInfo)
RTLLIB_DEBUG(RTLLIB_DL_ERR, "can't alloc memory for DOT11D\n");
- memset(ieee->pDot11dInfo, 0, sizeof(struct rt_dot11d_info));
ieee->LinkDetectInfo.SlotIndex = 0;
ieee->LinkDetectInfo.SlotNum = 2;
ieee->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3207,11 +3218,11 @@ static int rtllib_wpa_set_wpa_ie(struct rtllib_device *ieee,
return -EINVAL;
if (param->u.wpa_ie.len) {
- buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
+ buf = kmemdup(param->u.wpa_ie.data, param->u.wpa_ie.len,
+ GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
@@ -3334,8 +3345,8 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
u8 is_mesh)
{
int ret = 0;
- struct rtllib_crypto_ops *ops;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypto_ops *ops;
+ struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
.flags = 0,
@@ -3354,9 +3365,9 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
- if (param->u.crypt.idx >= WEP_KEYS)
+ if (param->u.crypt.idx >= NUM_WEP_KEYS)
return -EINVAL;
- crypt = &ieee->crypt[param->u.crypt.idx];
+ crypt = &ieee->crypt_info.crypt[param->u.crypt.idx];
} else {
return -EINVAL;
}
@@ -3366,7 +3377,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
}
goto done;
}
@@ -3375,19 +3386,19 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
/* IPW HW cannot build TKIP MIC, host decryption still needed. */
if (!(ieee->host_encrypt || ieee->host_decrypt) &&
- strcmp(param->u.crypt.alg, "TKIP"))
+ strcmp(param->u.crypt.alg, "R-TKIP"))
goto skip_host_crypt;
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ if (ops == NULL && strcmp(param->u.crypt.alg, "R-WEP") == 0) {
request_module("rtllib_crypt_wep");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
request_module("rtllib_crypt_tkip");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
- } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
+ } else if (ops == NULL && strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
request_module("rtllib_crypt_ccmp");
- ops = rtllib_get_crypto_ops(param->u.crypt.alg);
+ ops = lib80211_get_crypto_ops(param->u.crypt.alg);
}
if (ops == NULL) {
printk(KERN_INFO "unknown crypto alg '%s'\n",
@@ -3397,17 +3408,17 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
goto done;
}
if (*crypt == NULL || (*crypt)->ops != ops) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
- new_crypt = (struct rtllib_crypt_data *)
+ new_crypt = (struct lib80211_crypt_data *)
kmalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
- memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
+ memset(new_crypt, 0, sizeof(struct lib80211_crypt_data));
new_crypt->ops = ops;
if (new_crypt->ops)
new_crypt->priv =
@@ -3435,7 +3446,7 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
skip_host_crypt:
if (param->u.crypt.set_tx) {
- ieee->tx_keyidx = param->u.crypt.idx;
+ ieee->crypt_info.tx_keyidx = param->u.crypt.idx;
sec.active_key = param->u.crypt.idx;
sec.flags |= SEC_ACTIVE_KEY;
} else
@@ -3448,13 +3459,13 @@ static int rtllib_wpa_set_encryption(struct rtllib_device *ieee,
sec.key_sizes[param->u.crypt.idx] = param->u.crypt.key_len;
sec.flags |= (1 << param->u.crypt.idx);
- if (strcmp(param->u.crypt.alg, "WEP") == 0) {
+ if (strcmp(param->u.crypt.alg, "R-WEP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
- } else if (strcmp(param->u.crypt.alg, "TKIP") == 0) {
+ } else if (strcmp(param->u.crypt.alg, "R-TKIP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
- } else if (strcmp(param->u.crypt.alg, "CCMP") == 0) {
+ } else if (strcmp(param->u.crypt.alg, "R-CCMP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
@@ -3551,13 +3562,13 @@ u8 rtllib_ap_sec_type(struct rtllib_device *ieee)
static u8 ccmp_ie[4] = {0x00, 0x50, 0xf2, 0x04};
static u8 ccmp_rsn_ie[4] = {0x00, 0x0f, 0xac, 0x04};
int wpa_ie_len = ieee->wpa_ie_len;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int encrypt;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = (ieee->current_network.capability & WLAN_CAPABILITY_PRIVACY)
|| (ieee->host_encrypt && crypt && crypt->ops &&
- (0 == strcmp(crypt->ops->name, "WEP")));
+ (0 == strcmp(crypt->ops->name, "R-WEP")));
/* simply judge */
if (encrypt && (wpa_ie_len == 0)) {
@@ -3634,6 +3645,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(rtllib_wpa_supplicant_ioctl);
void rtllib_MgntDisconnectIBSS(struct rtllib_device *rtllib)
{
@@ -3719,6 +3731,7 @@ bool rtllib_MgntDisconnect(struct rtllib_device *rtllib, u8 asRsn)
return true;
}
+EXPORT_SYMBOL(rtllib_MgntDisconnect);
void notify_wx_assoc_event(struct rtllib_device *ieee)
{
@@ -3739,3 +3752,4 @@ void notify_wx_assoc_event(struct rtllib_device *ieee)
}
wireless_send_event(ieee->dev, SIOCGIWAP, &wrqu, NULL);
}
+EXPORT_SYMBOL(notify_wx_assoc_event);
diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
index 22988fb..1523bc7 100644
--- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c
@@ -15,7 +15,6 @@
#include "rtllib.h"
-#include "rtl_core.h"
#include "dot11d.h"
/* FIXME: add A freqs */
@@ -25,6 +24,7 @@ const long rtllib_wlan_frequencies[] = {
2452, 2457, 2462, 2467,
2472, 2484
};
+EXPORT_SYMBOL(rtllib_wlan_frequencies);
int rtllib_wx_set_freq(struct rtllib_device *ieee, struct iw_request_info *a,
@@ -82,6 +82,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_freq);
int rtllib_wx_get_freq(struct rtllib_device *ieee,
@@ -97,6 +98,7 @@ int rtllib_wx_get_freq(struct rtllib_device *ieee,
fwrq->e = 1;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_freq);
int rtllib_wx_get_wap(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -125,6 +127,7 @@ int rtllib_wx_get_wap(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_wap);
int rtllib_wx_set_wap(struct rtllib_device *ieee,
@@ -184,6 +187,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_wap);
int rtllib_wx_get_essid(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -220,6 +224,7 @@ out:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_get_essid);
int rtllib_wx_set_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -231,6 +236,7 @@ int rtllib_wx_set_rate(struct rtllib_device *ieee,
ieee->rate = target_rate/100000;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rate);
int rtllib_wx_get_rate(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -243,6 +249,7 @@ int rtllib_wx_get_rate(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_rate);
int rtllib_wx_set_rts(struct rtllib_device *ieee,
@@ -259,6 +266,7 @@ int rtllib_wx_set_rts(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rts);
int rtllib_wx_get_rts(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -269,6 +277,7 @@ int rtllib_wx_get_rts(struct rtllib_device *ieee,
wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_rts);
int rtllib_wx_set_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -314,6 +323,7 @@ out:
up(&ieee->wx_sem);
return set_mode_status;
}
+EXPORT_SYMBOL(rtllib_wx_set_mode);
void rtllib_wx_sync_scan_wq(void *data)
{
@@ -428,6 +438,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_scan);
int rtllib_wx_set_essid(struct rtllib_device *ieee,
struct iw_request_info *a,
@@ -490,6 +501,7 @@ out:
up(&ieee->wx_sem);
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_essid);
int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -497,6 +509,7 @@ int rtllib_wx_get_mode(struct rtllib_device *ieee, struct iw_request_info *a,
wrqu->mode = ieee->iw_mode;
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_mode);
int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -533,6 +546,7 @@ int rtllib_wx_set_rawtx(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_rawtx);
int rtllib_wx_get_name(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -548,6 +562,7 @@ int rtllib_wx_get_name(struct rtllib_device *ieee,
strcat(wrqu->name, "n");
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_name);
/* this is mostly stolen from hostap */
@@ -605,6 +620,7 @@ exit:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_power);
/* this is stolen from hostap */
int rtllib_wx_get_power(struct rtllib_device *ieee,
@@ -643,3 +659,4 @@ exit:
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_get_power);
diff --git a/drivers/staging/rtl8192e/rtllib_tx.c b/drivers/staging/rtl8192e/rtllib_tx.c
index 44e8006..f451bfc 100644
--- a/drivers/staging/rtl8192e/rtllib_tx.c
+++ b/drivers/staging/rtl8192e/rtllib_tx.c
@@ -46,7 +46,6 @@
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
-#include <linux/version.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
@@ -180,10 +179,10 @@ inline int rtllib_put_snap(u8 *data, u16 h_proto)
int rtllib_encrypt_fragment(struct rtllib_device *ieee, struct sk_buff *frag,
int hdr_len)
{
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
int res;
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
if (!(crypt && crypt->ops)) {
printk(KERN_INFO "=========>%s(), crypt is null\n", __func__);
@@ -569,7 +568,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
};
u8 dest[ETH_ALEN], src[ETH_ALEN];
int qos_actived = ieee->current_network.qos_data.active;
- struct rtllib_crypt_data *crypt = NULL;
+ struct lib80211_crypt_data *crypt = NULL;
struct cb_desc *tcb_desc;
u8 bIsMulticast = false;
u8 IsAmsdu = false;
@@ -646,7 +645,7 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
}
skb->priority = rtllib_classify(skb, IsAmsdu);
- crypt = ieee->crypt[ieee->tx_keyidx];
+ crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
ieee->host_encrypt && crypt && crypt->ops;
if (!encrypt && ieee->ieee802_1x &&
@@ -742,8 +741,10 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
/* Each fragment may need to have room for encryptiong
* pre/postfix */
if (encrypt) {
- bytes_per_frag -= crypt->ops->extra_prefix_len +
- crypt->ops->extra_postfix_len;
+ bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_mpdu_postfix_len +
+ crypt->ops->extra_msdu_prefix_len +
+ crypt->ops->extra_msdu_postfix_len;
}
/* Number of fragments is the total bytes_per_frag /
* payload_per_fragment */
@@ -791,7 +792,8 @@ int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag,
- crypt->ops->extra_prefix_len);
+ crypt->ops->extra_mpdu_prefix_len +
+ crypt->ops->extra_msdu_prefix_len);
} else {
tcb_desc->bHwSec = 0;
}
@@ -965,3 +967,4 @@ int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
memset(skb->cb, 0, sizeof(skb->cb));
return rtllib_xmit_inter(skb, dev);
}
+EXPORT_SYMBOL(rtllib_xmit);
diff --git a/drivers/staging/rtl8192e/rtllib_wx.c b/drivers/staging/rtl8192e/rtllib_wx.c
index 8cea4a6..c27ff7e 100644
--- a/drivers/staging/rtl8192e/rtllib_wx.c
+++ b/drivers/staging/rtl8192e/rtllib_wx.c
@@ -30,7 +30,6 @@
******************************************************************************/
#include <linux/wireless.h>
-#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/module.h>
@@ -295,6 +294,7 @@ int rtllib_wx_get_scan(struct rtllib_device *ieee,
return err;
}
+EXPORT_SYMBOL(rtllib_wx_get_scan);
int rtllib_wx_set_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -306,44 +306,44 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
.flags = 0
};
int i, key, key_provided, len;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypt_data **crypt;
RTLLIB_DEBUG_WX("SET_ENCODE\n");
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
- if (key > WEP_KEYS)
+ if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
key_provided = 1;
} else {
key_provided = 0;
- key = ieee->tx_keyidx;
+ key = ieee->crypt_info.tx_keyidx;
}
RTLLIB_DEBUG_WX("Key: %d [%s]\n", key, key_provided ?
"provided" : "default");
- crypt = &ieee->crypt[key];
+ crypt = &ieee->crypt_info.crypt[key];
if (erq->flags & IW_ENCODE_DISABLED) {
if (key_provided && *crypt) {
RTLLIB_DEBUG_WX("Disabling encryption on key %d.\n",
key);
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
} else
RTLLIB_DEBUG_WX("Disabling encryption.\n");
/* Check all the keys to see if any are still configured,
* and if no key index was provided, de-init them all */
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i] != NULL) {
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (ieee->crypt_info.crypt[i] != NULL) {
if (key_provided)
break;
- rtllib_crypt_delayed_deinit(ieee,
- &ieee->crypt[i]);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info,
+ &ieee->crypt_info.crypt[i]);
}
}
- if (i == WEP_KEYS) {
+ if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_ENABLED | SEC_LEVEL;
@@ -358,25 +358,24 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
sec.flags |= SEC_ENABLED;
if (*crypt != NULL && (*crypt)->ops != NULL &&
- strcmp((*crypt)->ops->name, "WEP") != 0) {
+ strcmp((*crypt)->ops->name, "R-WEP") != 0) {
/* changing to use WEP; deinit previously used algorithm
* on this key */
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
}
if (*crypt == NULL) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
/* take WEP into use */
- new_crypt = kmalloc(sizeof(struct rtllib_crypt_data),
+ new_crypt = kzalloc(sizeof(struct lib80211_crypt_data),
GFP_KERNEL);
if (new_crypt == NULL)
return -ENOMEM;
- memset(new_crypt, 0, sizeof(struct rtllib_crypt_data));
- new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
if (!new_crypt->ops) {
request_module("rtllib_crypt_wep");
- new_crypt->ops = rtllib_get_crypto_ops("WEP");
+ new_crypt->ops = lib80211_get_crypto_ops("R-WEP");
}
if (new_crypt->ops)
@@ -412,7 +411,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
* explicitely set */
if (key == sec.active_key)
sec.flags |= SEC_ACTIVE_KEY;
- ieee->tx_keyidx = key;
+ ieee->crypt_info.tx_keyidx = key;
} else {
len = (*crypt)->ops->get_key(sec.keys[key], WEP_KEY_LEN,
@@ -435,7 +434,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
if (key_provided) {
RTLLIB_DEBUG_WX(
"Setting key %d to default Tx key.\n", key);
- ieee->tx_keyidx = key;
+ ieee->crypt_info.tx_keyidx = key;
sec.active_key = key;
sec.flags |= SEC_ACTIVE_KEY;
}
@@ -470,6 +469,7 @@ int rtllib_wx_set_encode(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_encode);
int rtllib_wx_get_encode(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -477,7 +477,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
{
struct iw_point *erq = &(wrqu->encoding);
int len, key;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
RTLLIB_DEBUG_WX("GET_ENCODE\n");
@@ -486,13 +486,13 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
key = erq->flags & IW_ENCODE_INDEX;
if (key) {
- if (key > WEP_KEYS)
+ if (key > NUM_WEP_KEYS)
return -EINVAL;
key--;
} else {
- key = ieee->tx_keyidx;
+ key = ieee->crypt_info.tx_keyidx;
}
- crypt = ieee->crypt[key];
+ crypt = ieee->crypt_info.crypt[key];
erq->flags = key + 1;
@@ -513,6 +513,7 @@ int rtllib_wx_get_encode(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_get_encode);
int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -525,29 +526,29 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
int i, idx;
int group_key = 0;
const char *alg, *module;
- struct rtllib_crypto_ops *ops;
- struct rtllib_crypt_data **crypt;
+ struct lib80211_crypto_ops *ops;
+ struct lib80211_crypt_data **crypt;
struct rtllib_security sec = {
.flags = 0,
};
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
+ if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
} else{
- idx = ieee->tx_keyidx;
+ idx = ieee->crypt_info.tx_keyidx;
}
if (ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
- crypt = &ieee->crypt[idx];
+ crypt = &ieee->crypt_info.crypt[idx];
group_key = 1;
} else {
/* some Cisco APs use idx>0 for unicast in dynamic WEP */
if (idx != 0 && ext->alg != IW_ENCODE_ALG_WEP)
return -EINVAL;
if (ieee->iw_mode == IW_MODE_INFRA)
- crypt = &ieee->crypt[idx];
+ crypt = &ieee->crypt_info.crypt[idx];
else
return -EINVAL;
}
@@ -556,13 +557,13 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) {
if (*crypt)
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
- for (i = 0; i < WEP_KEYS; i++) {
- if (ieee->crypt[i] != NULL)
+ for (i = 0; i < NUM_WEP_KEYS; i++) {
+ if (ieee->crypt_info.crypt[i] != NULL)
break;
}
- if (i == WEP_KEYS) {
+ if (i == NUM_WEP_KEYS) {
sec.enabled = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
@@ -573,15 +574,15 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
sec.enabled = 1;
switch (ext->alg) {
case IW_ENCODE_ALG_WEP:
- alg = "WEP";
+ alg = "R-WEP";
module = "rtllib_crypt_wep";
break;
case IW_ENCODE_ALG_TKIP:
- alg = "TKIP";
+ alg = "R-TKIP";
module = "rtllib_crypt_tkip";
break;
case IW_ENCODE_ALG_CCMP:
- alg = "CCMP";
+ alg = "R-CCMP";
module = "rtllib_crypt_ccmp";
break;
default:
@@ -592,14 +593,14 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
}
printk(KERN_INFO "alg name:%s\n", alg);
- ops = rtllib_get_crypto_ops(alg);
+ ops = lib80211_get_crypto_ops(alg);
if (ops == NULL) {
char tempbuf[100];
memset(tempbuf, 0x00, 100);
sprintf(tempbuf, "%s", module);
request_module("%s", tempbuf);
- ops = rtllib_get_crypto_ops(alg);
+ ops = lib80211_get_crypto_ops(alg);
}
if (ops == NULL) {
RTLLIB_DEBUG_WX("%s: unknown crypto alg %d\n",
@@ -610,9 +611,9 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
}
if (*crypt == NULL || (*crypt)->ops != ops) {
- struct rtllib_crypt_data *new_crypt;
+ struct lib80211_crypt_data *new_crypt;
- rtllib_crypt_delayed_deinit(ieee, crypt);
+ lib80211_crypt_delayed_deinit(&ieee->crypt_info, crypt);
new_crypt = kzalloc(sizeof(*new_crypt), GFP_KERNEL);
if (new_crypt == NULL) {
@@ -641,7 +642,7 @@ int rtllib_wx_set_encode_ext(struct rtllib_device *ieee,
goto done;
}
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
- ieee->tx_keyidx = idx;
+ ieee->crypt_info.tx_keyidx = idx;
sec.active_key = idx;
sec.flags |= SEC_ACTIVE_KEY;
}
@@ -674,6 +675,7 @@ done:
}
return ret;
}
+EXPORT_SYMBOL(rtllib_wx_set_encode_ext);
int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -681,7 +683,7 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
{
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
- struct rtllib_crypt_data *crypt;
+ struct lib80211_crypt_data *crypt;
int idx, max_key_len;
max_key_len = encoding->length - sizeof(*ext);
@@ -690,18 +692,18 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
- if (idx < 1 || idx > WEP_KEYS)
+ if (idx < 1 || idx > NUM_WEP_KEYS)
return -EINVAL;
idx--;
} else {
- idx = ieee->tx_keyidx;
+ idx = ieee->crypt_info.tx_keyidx;
}
if (!(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) &&
(ext->alg != IW_ENCODE_ALG_WEP))
if (idx != 0 || (ieee->iw_mode != IW_MODE_INFRA))
return -EINVAL;
- crypt = ieee->crypt[idx];
+ crypt = ieee->crypt_info.crypt[idx];
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
@@ -711,11 +713,11 @@ int rtllib_wx_get_encode_ext(struct rtllib_device *ieee,
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
- if (strcmp(crypt->ops->name, "WEP") == 0)
+ if (strcmp(crypt->ops->name, "R-WEP") == 0)
ext->alg = IW_ENCODE_ALG_WEP;
- else if (strcmp(crypt->ops->name, "TKIP"))
+ else if (strcmp(crypt->ops->name, "R-TKIP"))
ext->alg = IW_ENCODE_ALG_TKIP;
- else if (strcmp(crypt->ops->name, "CCMP"))
+ else if (strcmp(crypt->ops->name, "R-CCMP"))
ext->alg = IW_ENCODE_ALG_CCMP;
else
return -EINVAL;
@@ -778,6 +780,7 @@ int rtllib_wx_set_mlme(struct rtllib_device *ieee,
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_mlme);
int rtllib_wx_set_auth(struct rtllib_device *ieee,
struct iw_request_info *info,
@@ -830,6 +833,7 @@ int rtllib_wx_set_auth(struct rtllib_device *ieee,
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_auth);
int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
{
@@ -846,10 +850,9 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
ieee->wps_ie_len = (len < MAX_WZC_IE_LEN) ? (len) :
(MAX_WZC_IE_LEN);
- buf = kmalloc(ieee->wps_ie_len, GFP_KERNEL);
+ buf = kmemdup(ie, ieee->wps_ie_len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, ieee->wps_ie_len);
ieee->wps_ie = buf;
return 0;
}
@@ -860,10 +863,9 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
if (len) {
if (len != ie[1]+2)
return -EINVAL;
- buf = kmalloc(len, GFP_KERNEL);
+ buf = kmemdup(ie, len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
- memcpy(buf, ie, len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = len;
@@ -874,3 +876,4 @@ int rtllib_wx_set_gen_ie(struct rtllib_device *ieee, u8 *ie, size_t len)
}
return 0;
}
+EXPORT_SYMBOL(rtllib_wx_set_gen_ie);
diff --git a/drivers/staging/rtl8192u/ieee80211/api.c b/drivers/staging/rtl8192u/ieee80211/api.c
deleted file mode 100644
index 5f46e50..0000000
--- a/drivers/staging/rtl8192u/ieee80211/api.c
+++ /dev/null
@@ -1,244 +0,0 @@
-/*
- * Scatterlist Cryptographic API.
- *
- * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
- * Copyright (c) 2002 David S. Miller (davem@redhat.com)
- *
- * Portions derived from Cryptoapi, by Alexander Kjeldaas <astor@fast.no>
- * and Nettle, by Niels M鰈ler.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
-#include "kmap_types.h"
-
-#include <linux/init.h>
-#include <linux/module.h>
-//#include <linux/crypto.h>
-#include "rtl_crypto.h"
-#include <linux/errno.h>
-#include <linux/rwsem.h>
-#include <linux/slab.h>
-#include "internal.h"
-
-LIST_HEAD(crypto_alg_list);
-DECLARE_RWSEM(crypto_alg_sem);
-
-static inline int crypto_alg_get(struct crypto_alg *alg)
-{
- return try_inc_mod_count(alg->cra_module);
-}
-
-static inline void crypto_alg_put(struct crypto_alg *alg)
-{
- if (alg->cra_module)
- __MOD_DEC_USE_COUNT(alg->cra_module);
-}
-
-struct crypto_alg *crypto_alg_lookup(const char *name)
-{
- struct crypto_alg *q, *alg = NULL;
-
- if (!name)
- return NULL;
-
- down_read(&crypto_alg_sem);
-
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, name))) {
- if (crypto_alg_get(q))
- alg = q;
- break;
- }
- }
-
- up_read(&crypto_alg_sem);
- return alg;
-}
-
-static int crypto_init_flags(struct crypto_tfm *tfm, u32 flags)
-{
- tfm->crt_flags = 0;
-
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_flags(tfm, flags);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_flags(tfm, flags);
-
- default:
- break;
- }
-
- BUG();
- return -EINVAL;
-}
-
-static int crypto_init_ops(struct crypto_tfm *tfm)
-{
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- return crypto_init_cipher_ops(tfm);
-
- case CRYPTO_ALG_TYPE_DIGEST:
- return crypto_init_digest_ops(tfm);
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- return crypto_init_compress_ops(tfm);
-
- default:
- break;
- }
-
- BUG();
- return -EINVAL;
-}
-
-static void crypto_exit_ops(struct crypto_tfm *tfm)
-{
- switch (crypto_tfm_alg_type(tfm)) {
- case CRYPTO_ALG_TYPE_CIPHER:
- crypto_exit_cipher_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_DIGEST:
- crypto_exit_digest_ops(tfm);
- break;
-
- case CRYPTO_ALG_TYPE_COMPRESS:
- crypto_exit_compress_ops(tfm);
- break;
-
- default:
- BUG();
-
- }
-}
-
-struct crypto_tfm *crypto_alloc_tfm(const char *name, u32 flags)
-{
- struct crypto_tfm *tfm = NULL;
- struct crypto_alg *alg;
-
- alg = crypto_alg_mod_lookup(name);
- if (alg == NULL)
- goto out;
-
- tfm = kzalloc(sizeof(*tfm) + alg->cra_ctxsize, GFP_KERNEL);
- if (tfm == NULL)
- goto out_put;
-
- tfm->__crt_alg = alg;
-
- if (crypto_init_flags(tfm, flags))
- goto out_free_tfm;
-
- if (crypto_init_ops(tfm)) {
- crypto_exit_ops(tfm);
- goto out_free_tfm;
- }
-
- goto out;
-
-out_free_tfm:
- kfree(tfm);
- tfm = NULL;
-out_put:
- crypto_alg_put(alg);
-out:
- return tfm;
-}
-
-void crypto_free_tfm(struct crypto_tfm *tfm)
-{
- struct crypto_alg *alg = tfm->__crt_alg;
- int size = sizeof(*tfm) + alg->cra_ctxsize;
-
- crypto_exit_ops(tfm);
- crypto_alg_put(alg);
- memset(tfm, 0, size);
- kfree(tfm);
-}
-
-int crypto_register_alg(struct crypto_alg *alg)
-{
- int ret = 0;
- struct crypto_alg *q;
-
- down_write(&crypto_alg_sem);
-
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (!(strcmp(q->cra_name, alg->cra_name))) {
- ret = -EEXIST;
- goto out;
- }
- }
-
- list_add_tail(&alg->cra_list, &crypto_alg_list);
-out:
- up_write(&crypto_alg_sem);
- return ret;
-}
-
-int crypto_unregister_alg(struct crypto_alg *alg)
-{
- int ret = -ENOENT;
- struct crypto_alg *q;
-
- BUG_ON(!alg->cra_module);
-
- down_write(&crypto_alg_sem);
- list_for_each_entry(q, &crypto_alg_list, cra_list) {
- if (alg == q) {
- list_del(&alg->cra_list);
- ret = 0;
- goto out;
- }
- }
-out:
- up_write(&crypto_alg_sem);
- return ret;
-}
-
-int crypto_alg_available(const char *name, u32 flags)
-{
- int ret = 0;
- struct crypto_alg *alg = crypto_alg_mod_lookup(name);
-
- if (alg) {
- crypto_alg_put(alg);
- ret = 1;
- }
-
- return ret;
-}
-
-static int __init init_crypto(void)
-{
- printk(KERN_INFO "Initializing Cryptographic API\n");
- crypto_init_proc();
- return 0;
-}
-
-__initcall(init_crypto);
-
-/*
-EXPORT_SYMBOL_GPL(crypto_register_alg);
-EXPORT_SYMBOL_GPL(crypto_unregister_alg);
-EXPORT_SYMBOL_GPL(crypto_alloc_tfm);
-EXPORT_SYMBOL_GPL(crypto_free_tfm);
-EXPORT_SYMBOL_GPL(crypto_alg_available);
-*/
-
-EXPORT_SYMBOL_NOVERS(crypto_register_alg);
-EXPORT_SYMBOL_NOVERS(crypto_unregister_alg);
-EXPORT_SYMBOL_NOVERS(crypto_alloc_tfm);
-EXPORT_SYMBOL_NOVERS(crypto_free_tfm);
-EXPORT_SYMBOL_NOVERS(crypto_alg_available);
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 9b5d771..ed85b44 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -37,6 +37,8 @@ struct _adapter;
#include "wlan_bssdef.h"
#include "rtl8712_spec.h"
#include "rtl8712_hal.h"
+#include <linux/mutex.h>
+#include <linux/completion.h>
enum _NIC_VERSION {
RTL8711_NIC,
@@ -168,6 +170,7 @@ struct _adapter {
s32 bSurpriseRemoved;
u32 IsrContent;
u32 ImrContent;
+ bool fw_found;
u8 EepromAddressSize;
u8 hw_init_completed;
struct task_struct *cmdThread;
@@ -184,6 +187,10 @@ struct _adapter {
_workitem wkFilterRxFF0;
u8 blnEnableRxFF0Filter;
spinlock_t lockRxFF0Filter;
+ const struct firmware *fw;
+ struct usb_interface *pusb_intf;
+ struct mutex mutex_start;
+ struct completion rtl8712_fw_ready;
};
static inline u8 *myid(struct eeprom_priv *peepriv)
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index d0029aa..cc893c0 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -42,29 +42,56 @@
#define FWBUFF_ALIGN_SZ 512
#define MAX_DUMP_FWSZ 49152 /*default = 49152 (48k)*/
-static u32 rtl871x_open_fw(struct _adapter *padapter, void **pphfwfile_hdl,
- const u8 **ppmappedfw)
+static void rtl871x_load_fw_cb(const struct firmware *firmware, void *context)
{
+ struct _adapter *padapter = context;
+
+ complete(&padapter->rtl8712_fw_ready);
+ if (!firmware) {
+ struct usb_device *udev = padapter->dvobjpriv.pusbdev;
+ struct usb_interface *pusb_intf = padapter->pusb_intf;
+ printk(KERN_ERR "r8712u: Firmware request failed\n");
+ padapter->fw_found = false;
+ usb_put_dev(udev);
+ usb_set_intfdata(pusb_intf, NULL);
+ return;
+ }
+ padapter->fw = firmware;
+ padapter->fw_found = true;
+ /* firmware available - start netdev */
+ register_netdev(padapter->pnetdev);
+}
+
+static const char firmware_file[] = "rtlwifi/rtl8712u.bin";
+
+int rtl871x_load_fw(struct _adapter *padapter)
+{
+ struct device *dev = &padapter->dvobjpriv.pusbdev->dev;
int rc;
- const char firmware_file[] = "rtlwifi/rtl8712u.bin";
- const struct firmware **praw = (const struct firmware **)
- (pphfwfile_hdl);
- struct dvobj_priv *pdvobjpriv = (struct dvobj_priv *)
- (&padapter->dvobjpriv);
- struct usb_device *pusbdev = pdvobjpriv->pusbdev;
+ init_completion(&padapter->rtl8712_fw_ready);
printk(KERN_INFO "r8712u: Loading firmware from \"%s\"\n",
firmware_file);
- rc = request_firmware(praw, firmware_file, &pusbdev->dev);
- if (rc < 0) {
- printk(KERN_ERR "r8712u: Unable to load firmware\n");
- printk(KERN_ERR "r8712u: Install latest linux-firmware\n");
+ rc = request_firmware_nowait(THIS_MODULE, 1, firmware_file, dev,
+ GFP_KERNEL, padapter, rtl871x_load_fw_cb);
+ if (rc)
+ printk(KERN_ERR "r8712u: Firmware request error %d\n", rc);
+ return rc;
+}
+MODULE_FIRMWARE("rtlwifi/rtl8712u.bin");
+
+static u32 rtl871x_open_fw(struct _adapter *padapter, const u8 **ppmappedfw)
+{
+ const struct firmware **praw = &padapter->fw;
+
+ if (padapter->fw->size > 200000) {
+ printk(KERN_ERR "r8172u: Badfw->size of %d\n",
+ (int)padapter->fw->size);
return 0;
}
*ppmappedfw = (u8 *)((*praw)->data);
return (*praw)->size;
}
-MODULE_FIRMWARE("rtlwifi/rtl8712u.bin");
static void fill_fwpriv(struct _adapter *padapter, struct fw_priv *pfwpriv)
{
@@ -142,18 +169,17 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
uint dump_imem_sz, imem_sz, dump_emem_sz, emem_sz; /* max = 49152; */
struct fw_hdr fwhdr;
u32 ulfilelength; /* FW file size */
- void *phfwfile_hdl = NULL;
const u8 *pmappedfw = NULL;
u8 *ptmpchar = NULL, *ppayload, *ptr;
struct tx_desc *ptx_desc;
u32 txdscp_sz = sizeof(struct tx_desc);
u8 ret = _FAIL;
- ulfilelength = rtl871x_open_fw(padapter, &phfwfile_hdl, &pmappedfw);
+ ulfilelength = rtl871x_open_fw(padapter, &pmappedfw);
if (pmappedfw && (ulfilelength > 0)) {
update_fwhdr(&fwhdr, pmappedfw);
if (chk_fwhdr(&fwhdr, ulfilelength) == _FAIL)
- goto firmware_rel;
+ return ret;
fill_fwpriv(padapter, &fwhdr.fwpriv);
/* firmware check ok */
maxlen = (fwhdr.img_IMEM_size > fwhdr.img_SRAM_size) ?
@@ -161,7 +187,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
maxlen += txdscp_sz;
ptmpchar = _malloc(maxlen + FWBUFF_ALIGN_SZ);
if (ptmpchar == NULL)
- goto firmware_rel;
+ return ret;
ptx_desc = (struct tx_desc *)(ptmpchar + FWBUFF_ALIGN_SZ -
((addr_t)(ptmpchar) & (FWBUFF_ALIGN_SZ - 1)));
@@ -297,8 +323,6 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
exit_fail:
kfree(ptmpchar);
-firmware_rel:
- release_firmware((struct firmware *)phfwfile_hdl);
return ret;
}
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index 9a75c6d..98a3d68 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -31,6 +31,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kthread.h>
+#include <linux/firmware.h>
#include "osdep_service.h"
#include "drv_types.h"
#include "xmit_osdep.h"
@@ -264,12 +265,12 @@ static void start_drv_timers(struct _adapter *padapter)
void r8712_stop_drv_timers(struct _adapter *padapter)
{
_cancel_timer_ex(&padapter->mlmepriv.assoc_timer);
- _cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl.
- sitesurvey_ctrl_timer);
_cancel_timer_ex(&padapter->securitypriv.tkip_timer);
_cancel_timer_ex(&padapter->mlmepriv.scan_to_timer);
_cancel_timer_ex(&padapter->mlmepriv.dhcp_timer);
_cancel_timer_ex(&padapter->mlmepriv.wdg_timer);
+ _cancel_timer_ex(&padapter->mlmepriv.sitesurveyctrl.
+ sitesurvey_ctrl_timer);
}
static u8 init_default_value(struct _adapter *padapter)
@@ -347,7 +348,8 @@ u8 r8712_free_drv_sw(struct _adapter *padapter)
r8712_free_mlme_priv(&padapter->mlmepriv);
r8712_free_io_queue(padapter);
_free_xmit_priv(&padapter->xmitpriv);
- _r8712_free_sta_priv(&padapter->stapriv);
+ if (padapter->fw_found)
+ _r8712_free_sta_priv(&padapter->stapriv);
_r8712_free_recv_priv(&padapter->recvpriv);
mp871xdeinit(padapter);
if (pnetdev)
@@ -388,6 +390,7 @@ static int netdev_open(struct net_device *pnetdev)
{
struct _adapter *padapter = (struct _adapter *)netdev_priv(pnetdev);
+ mutex_lock(&padapter->mutex_start);
if (padapter->bup == false) {
padapter->bDriverStopped = false;
padapter->bSurpriseRemoved = false;
@@ -435,11 +438,13 @@ static int netdev_open(struct net_device *pnetdev)
/* start driver mlme relation timer */
start_drv_timers(padapter);
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK);
+ mutex_unlock(&padapter->mutex_start);
return 0;
netdev_open_error:
padapter->bup = false;
netif_carrier_off(pnetdev);
netif_stop_queue(pnetdev);
+ mutex_unlock(&padapter->mutex_start);
return -1;
}
@@ -473,6 +478,9 @@ static int netdev_close(struct net_device *pnetdev)
r8712_free_network_queue(padapter);
/* The interface is no longer Up: */
padapter->bup = false;
+ release_firmware(padapter->fw);
+ /* never exit with a firmware callback pending */
+ wait_for_completion(&padapter->rtl8712_fw_ready);
return 0;
}
diff --git a/drivers/staging/rtl8712/rtl8712_hal.h b/drivers/staging/rtl8712/rtl8712_hal.h
index 665e718..d19865a 100644
--- a/drivers/staging/rtl8712/rtl8712_hal.h
+++ b/drivers/staging/rtl8712/rtl8712_hal.h
@@ -145,5 +145,6 @@ struct hal_priv {
};
uint rtl8712_hal_init(struct _adapter *padapter);
+int rtl871x_load_fw(struct _adapter *padapter);
#endif
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index ef8eb6c..4277d03 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -551,7 +551,7 @@ void r8712_survey_event_callback(struct _adapter *adapter, u8 *pbuf)
ibss_wlan = r8712_find_network(
&pmlmepriv->scanned_queue,
pnetwork->MacAddress);
- if (!ibss_wlan) {
+ if (ibss_wlan) {
memcpy(ibss_wlan->network.IEs,
pnetwork->IEs, 8);
goto exit;
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index 64f5696..81bde80 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -43,6 +43,7 @@ static void _init_stainfo(struct sta_info *psta)
_r8712_init_sta_xmit_priv(&psta->sta_xmitpriv);
_r8712_init_sta_recv_priv(&psta->sta_recvpriv);
#ifdef CONFIG_R8712_AP
+ _init_listhead(&psta->asoc_list);
_init_listhead(&psta->auth_list);
#endif
}
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 5385da2..9bade18 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -89,6 +89,7 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005B)},
{USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
@@ -389,6 +390,7 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
pdvobjpriv = &padapter->dvobjpriv;
pdvobjpriv->padapter = padapter;
padapter->dvobjpriv.pusbdev = udev;
+ padapter->pusb_intf = pusb_intf;
usb_set_intfdata(pusb_intf, pnetdev);
SET_NETDEV_DEV(pnetdev, &pusb_intf->dev);
/* step 2. */
@@ -595,10 +597,11 @@ static int r871xu_drv_init(struct usb_interface *pusb_intf,
"%pM\n", mac);
memcpy(pnetdev->dev_addr, mac, ETH_ALEN);
}
- /* step 6. Tell the network stack we exist */
- if (register_netdev(pnetdev) != 0)
+ /* step 6. Load the firmware asynchronously */
+ if (rtl871x_load_fw(padapter))
goto error;
spin_lock_init(&padapter->lockRxFF0Filter);
+ mutex_init(&padapter->mutex_start);
return 0;
error:
usb_put_dev(udev);
@@ -629,7 +632,8 @@ static void r871xu_dev_remove(struct usb_interface *pusb_intf)
flush_scheduled_work();
udelay(1);
/*Stop driver mlme relation timer */
- r8712_stop_drv_timers(padapter);
+ if (padapter->fw_found)
+ r8712_stop_drv_timers(padapter);
r871x_dev_unload(padapter);
r8712_free_drv_sw(padapter);
}
diff --git a/drivers/staging/rts5139/rts51x.c b/drivers/staging/rts5139/rts51x.c
index d9cee6d..2b9f785 100644
--- a/drivers/staging/rts5139/rts51x.c
+++ b/drivers/staging/rts5139/rts51x.c
@@ -934,34 +934,4 @@ struct usb_driver rts51x_driver = {
.soft_unbind = 1,
};
-static int __init rts51x_init(void)
-{
- int retval;
-
- printk(KERN_INFO "Initializing %s USB card reader driver...\n",
- RTS51X_NAME);
-
- /* register the driver, return usb_register return code if error */
- retval = usb_register(&rts51x_driver);
- if (retval == 0) {
- printk(KERN_INFO
- "Realtek %s USB card reader support registered.\n",
- RTS51X_NAME);
- }
- return retval;
-}
-
-static void __exit rts51x_exit(void)
-{
- RTS51X_DEBUGP("rts51x_exit() called\n");
-
- /* Deregister the driver
- * This will cause disconnect() to be called for each
- * attached unit
- */
- RTS51X_DEBUGP("-- calling usb_deregister()\n");
- usb_deregister(&rts51x_driver);
-}
-
-module_init(rts51x_init);
-module_exit(rts51x_exit);
+module_usb_driver(rts51x_driver);
diff --git a/drivers/staging/rts5139/rts51x.h b/drivers/staging/rts5139/rts51x.h
index 9415d5c..b2c5839 100644
--- a/drivers/staging/rts5139/rts51x.h
+++ b/drivers/staging/rts5139/rts51x.h
@@ -34,7 +34,6 @@
#include <linux/mutex.h>
#include <linux/cdrom.h>
#include <linux/kernel.h>
-#include <linux/version.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
diff --git a/drivers/staging/rts5139/rts51x_transport.h b/drivers/staging/rts5139/rts51x_transport.h
index f7aa87f..8464c48 100644
--- a/drivers/staging/rts5139/rts51x_transport.h
+++ b/drivers/staging/rts5139/rts51x_transport.h
@@ -28,7 +28,6 @@
#define __RTS51X_TRANSPORT_H
#include <linux/kernel.h>
-#include <linux/version.h>
#include "rts51x.h"
#include "rts51x_chip.h"
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index 115635f..a7feb3e 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -466,8 +466,6 @@ static int rtsx_control_thread(void *__dev)
struct rtsx_chip *chip = dev->chip;
struct Scsi_Host *host = rtsx_to_host(dev);
- current->flags |= PF_NOFREEZE;
-
for (;;) {
if (wait_for_completion_interruptible(&dev->cmnd_ready))
break;
diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
index 8ac3fae..6b3d156 100644
--- a/drivers/staging/sep/sep_driver.c
+++ b/drivers/staging/sep/sep_driver.c
@@ -1235,7 +1235,7 @@ static void sep_build_lli_table(struct sep_device *sep,
/* Counter of lli array entry */
u32 array_counter;
- /* Init currrent table data size and lli array entry counter */
+ /* Init current table data size and lli array entry counter */
curr_table_data_size = 0;
array_counter = 0;
*num_table_entries_ptr = 1;
@@ -2120,6 +2120,8 @@ static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
}
}
if (tail_size) {
+ if (tail_size > sizeof(dcb_table_ptr->tail_data))
+ return -EINVAL;
if (is_kva == true) {
memcpy(dcb_table_ptr->tail_data,
(void *)(app_in_address + data_in_size -
diff --git a/drivers/staging/serial/68360serial.c b/drivers/staging/serial/68360serial.c
index 0a3e878..daf0b1d 100644
--- a/drivers/staging/serial/68360serial.c
+++ b/drivers/staging/serial/68360serial.c
@@ -2771,8 +2771,8 @@ static int __init rs_360_init(void)
*/
/* cpm_install_handler(IRQ_MACHSPEC | state->irq, rs_360_interrupt, info); */
/*request_irq(IRQ_MACHSPEC | state->irq, rs_360_interrupt, */
- request_irq(state->irq, rs_360_interrupt,
- IRQ_FLG_LOCK, "ttyS", (void *)info);
+ request_irq(state->irq, rs_360_interrupt, 0, "ttyS",
+ (void *)info);
/* Set up the baud rate generator.
*/
diff --git a/drivers/staging/serqt_usb2/serqt_usb2.c b/drivers/staging/serqt_usb2/serqt_usb2.c
index c44e41a..1c5780f 100644
--- a/drivers/staging/serqt_usb2/serqt_usb2.c
+++ b/drivers/staging/serqt_usb2/serqt_usb2.c
@@ -16,7 +16,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
/* Version Information */
#define DRIVER_VERSION "v2.14"
diff --git a/drivers/staging/sm7xx/smtcfb.c b/drivers/staging/sm7xx/smtcfb.c
index 39dbf33..ae0035f 100644
--- a/drivers/staging/sm7xx/smtcfb.c
+++ b/drivers/staging/sm7xx/smtcfb.c
@@ -1024,9 +1024,9 @@ failed_free:
/* Jason (08/11/2009) PCI_DRV wrapper essential structs */
static DEFINE_PCI_DEVICE_TABLE(smtcfb_pci_table) = {
- {0x126f, 0x710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x126f, 0x712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0x126f, 0x720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ { PCI_DEVICE(0x126f, 0x710), },
+ { PCI_DEVICE(0x126f, 0x712), },
+ { PCI_DEVICE(0x126f, 0x720), },
{0,}
};
diff --git a/drivers/staging/speakup/kobjects.c b/drivers/staging/speakup/kobjects.c
index 07a7f54..2093896 100644
--- a/drivers/staging/speakup/kobjects.c
+++ b/drivers/staging/speakup/kobjects.c
@@ -265,12 +265,11 @@ static ssize_t keymap_store(struct kobject *kobj, struct kobj_attribute *attr,
unsigned long flags;
spk_lock(flags);
- in_buff = kmalloc(count + 1, GFP_ATOMIC);
+ in_buff = kmemdup(buf, count + 1, GFP_ATOMIC);
if (!in_buff) {
spk_unlock(flags);
return -ENOMEM;
}
- memcpy(in_buff, buf, count + 1);
if (strchr("dDrR", *in_buff)) {
set_key_info(key_defaults, key_buf);
pr_info("keymap set to default values\n");
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index 8be5604..c7b03f0 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -2268,8 +2268,6 @@ static int __init speakup_init(void)
set_mask_bits(0, i, 2);
set_key_info(key_defaults, key_buf);
- if (quiet_boot)
- spk_shut_up |= 0x01;
/* From here on out, initializations can fail. */
err = speakup_add_virtual_keyboard();
@@ -2292,6 +2290,9 @@ static int __init speakup_init(void)
goto error_kobjects;
}
+ if (quiet_boot)
+ spk_shut_up |= 0x01;
+
err = speakup_kobj_init();
if (err)
goto error_kobjects;
diff --git a/drivers/staging/speakup/speakup.h b/drivers/staging/speakup/speakup.h
index 412b879..e66579e 100644
--- a/drivers/staging/speakup/speakup.h
+++ b/drivers/staging/speakup/speakup.h
@@ -116,7 +116,7 @@ extern int bleep_time, bell_pos;
extern int spell_delay, key_echo;
extern short punc_mask;
extern short pitch_shift, synth_flags;
-extern int quiet_boot;
+extern bool quiet_boot;
extern char *synth_name;
extern struct bleep unprocessed_sound;
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index c241074..2222d69 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -22,7 +22,7 @@ static struct spk_synth *synths[MAXSYNTHS];
struct spk_synth *synth;
char pitch_buff[32] = "";
static int module_status;
-int quiet_boot;
+bool quiet_boot;
struct speakup_info_t speakup_info = {
.spinlock = __SPIN_LOCK_UNLOCKED(speakup_info.spinlock),
diff --git a/drivers/staging/spectra/Kconfig b/drivers/staging/spectra/Kconfig
deleted file mode 100644
index 4fc2064..0000000
--- a/drivers/staging/spectra/Kconfig
+++ /dev/null
@@ -1,41 +0,0 @@
-
-menuconfig SPECTRA
- tristate "Denali Spectra Flash Translation Layer"
- depends on BLOCK
- depends on X86_MRST
- default n
- ---help---
- Enable the FTL pseudo-filesystem used with the NAND Flash
- controller on Intel Moorestown Platform to pretend to be a disk.
-
-choice
- prompt "Compile for"
- depends on SPECTRA
- default SPECTRA_MRST_HW
-
-config SPECTRA_MRST_HW
- bool "Moorestown hardware mode"
- help
- Driver communicates with the Moorestown hardware's register interface.
- in DMA mode.
-
-config SPECTRA_MTD
- bool "Linux MTD mode"
- depends on MTD
- help
- Driver communicates with the kernel MTD subsystem instead of its own
- built-in hardware driver.
-
-config SPECTRA_EMU
- bool "RAM emulator testing"
- help
- Driver emulates Flash on a RAM buffer and / or disk file. Useful to test the behavior of FTL layer.
-
-endchoice
-
-config SPECTRA_MRST_HW_DMA
- bool
- default n
- depends on SPECTRA_MRST_HW
- help
- Use DMA for native hardware interface.
diff --git a/drivers/staging/spectra/Makefile b/drivers/staging/spectra/Makefile
deleted file mode 100644
index f777dfb..0000000
--- a/drivers/staging/spectra/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
-#
-# Makefile of Intel Moorestown NAND controller driver
-#
-
-obj-$(CONFIG_SPECTRA) += spectra.o
-spectra-y := ffsport.o flash.o lld.o
-spectra-$(CONFIG_SPECTRA_MRST_HW) += lld_nand.o
-spectra-$(CONFIG_SPECTRA_MRST_HW_DMA) += lld_cdma.o
-spectra-$(CONFIG_SPECTRA_EMU) += lld_emu.o
-spectra-$(CONFIG_SPECTRA_MTD) += lld_mtd.o
-
diff --git a/drivers/staging/spectra/README b/drivers/staging/spectra/README
deleted file mode 100644
index ecba559..0000000
--- a/drivers/staging/spectra/README
+++ /dev/null
@@ -1,29 +0,0 @@
-This is a driver for NAND controller of Intel Moorestown platform.
-
-This driver is a standalone linux block device driver, it acts as if it's a normal hard disk.
-It includes three layer:
- block layer interface - file ffsport.c
- Flash Translation Layer (FTL) - file flash.c (implement the NAND flash Translation Layer, includs address mapping, garbage collection, wear-leveling and so on)
- Low level layer - file lld_nand.c/lld_cdma.c/lld_emu.c (which implements actual controller hardware registers access)
-
-This driver can be build as modules or build-in.
-
-Dependency:
-This driver has dependency on IA Firmware of Intel Moorestown platform.
-It need the IA Firmware to create the block table for the first time.
-And to validate this driver code without IA Firmware, you can change the
-macro AUTO_FORMAT_FLASH from 0 to 1 in file spectraswconfig.h. Thus the
-driver will erase the whole nand flash and create a new block table.
-
-TODO:
- - Enable Command DMA feature support
- - lower the memory footprint
- - Remove most of the unnecessary global variables
- - Change all the upcase variable / functions name to lowercase
- - Some other misc bugs
-
-Please send patches to:
- Greg Kroah-Hartman <gregkh@suse.de>
-
-And Cc to: Gao Yunpeng <yunpeng.gao@intel.com>
-
diff --git a/drivers/staging/spectra/ffsdefs.h b/drivers/staging/spectra/ffsdefs.h
deleted file mode 100644
index a9e9cd2..0000000
--- a/drivers/staging/spectra/ffsdefs.h
+++ /dev/null
@@ -1,58 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FFSDEFS_
-#define _FFSDEFS_
-
-#define CLEAR 0 /*use this to clear a field instead of "fail"*/
-#define SET 1 /*use this to set a field instead of "pass"*/
-#define FAIL 1 /*failed flag*/
-#define PASS 0 /*success flag*/
-#define ERR -1 /*error flag*/
-
-#define ERASE_CMD 10
-#define WRITE_MAIN_CMD 11
-#define READ_MAIN_CMD 12
-#define WRITE_SPARE_CMD 13
-#define READ_SPARE_CMD 14
-#define WRITE_MAIN_SPARE_CMD 15
-#define READ_MAIN_SPARE_CMD 16
-#define MEMCOPY_CMD 17
-#define DUMMY_CMD 99
-
-#define EVENT_PASS 0x00
-#define EVENT_CORRECTABLE_DATA_ERROR_FIXED 0x01
-#define EVENT_UNCORRECTABLE_DATA_ERROR 0x02
-#define EVENT_TIME_OUT 0x03
-#define EVENT_PROGRAM_FAILURE 0x04
-#define EVENT_ERASE_FAILURE 0x05
-#define EVENT_MEMCOPY_FAILURE 0x06
-#define EVENT_FAIL 0x07
-
-#define EVENT_NONE 0x22
-#define EVENT_DMA_CMD_COMP 0x77
-#define EVENT_ECC_TRANSACTION_DONE 0x88
-#define EVENT_DMA_CMD_FAIL 0x99
-
-#define CMD_PASS 0
-#define CMD_FAIL 1
-#define CMD_ABORT 2
-#define CMD_NOT_DONE 3
-
-#endif /* _FFSDEFS_ */
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
deleted file mode 100644
index 86d556d..0000000
--- a/drivers/staging/spectra/ffsport.c
+++ /dev/null
@@ -1,834 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "ffsport.h"
-#include "flash.h"
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/blkdev.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/kthread.h>
-#include <linux/log2.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/async.h>
-
-/**** Helper functions used for Div, Remainder operation on u64 ****/
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_Calc_Used_Bits
-* Inputs: Power of 2 number
-* Outputs: Number of Used Bits
-* 0, if the argument is 0
-* Description: Calculate the number of bits used by a given power of 2 number
-* Number can be up to 32 bit
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_Calc_Used_Bits(u32 n)
-{
- int tot_bits = 0;
-
- if (n >= 1 << 16) {
- n >>= 16;
- tot_bits += 16;
- }
-
- if (n >= 1 << 8) {
- n >>= 8;
- tot_bits += 8;
- }
-
- if (n >= 1 << 4) {
- n >>= 4;
- tot_bits += 4;
- }
-
- if (n >= 1 << 2) {
- n >>= 2;
- tot_bits += 2;
- }
-
- if (n >= 1 << 1)
- tot_bits += 1;
-
- return ((n == 0) ? (0) : tot_bits);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_u64_Div
-* Inputs: Number of u64
-* A power of 2 number as Division
-* Outputs: Quotient of the Divisor operation
-* Description: It divides the address by divisor by using bit shift operation
-* (essentially without explicitely using "/").
-* Divisor is a power of 2 number and Divided is of u64
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u64 GLOB_u64_Div(u64 addr, u32 divisor)
-{
- return (u64)(addr >> GLOB_Calc_Used_Bits(divisor));
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_u64_Remainder
-* Inputs: Number of u64
-* Divisor Type (1 -PageAddress, 2- BlockAddress)
-* Outputs: Remainder of the Division operation
-* Description: It calculates the remainder of a number (of u64) by
-* divisor(power of 2 number ) by using bit shifting and multiply
-* operation(essentially without explicitely using "/").
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type)
-{
- u64 result = 0;
-
- if (divisor_type == 1) { /* Remainder -- Page */
- result = (addr >> DeviceInfo.nBitsInPageDataSize);
- result = result * DeviceInfo.wPageDataSize;
- } else if (divisor_type == 2) { /* Remainder -- Block */
- result = (addr >> DeviceInfo.nBitsInBlockDataSize);
- result = result * DeviceInfo.wBlockDataSize;
- }
-
- result = addr - result;
-
- return result;
-}
-
-#define NUM_DEVICES 1
-#define PARTITIONS 8
-
-#define GLOB_SBD_NAME "nd"
-#define GLOB_SBD_IRQ_NUM (29)
-
-#define GLOB_SBD_IOCTL_GC (0x7701)
-#define GLOB_SBD_IOCTL_WL (0x7702)
-#define GLOB_SBD_IOCTL_FORMAT (0x7703)
-#define GLOB_SBD_IOCTL_ERASE_FLASH (0x7704)
-#define GLOB_SBD_IOCTL_FLUSH_CACHE (0x7705)
-#define GLOB_SBD_IOCTL_COPY_BLK_TABLE (0x7706)
-#define GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE (0x7707)
-#define GLOB_SBD_IOCTL_GET_NAND_INFO (0x7708)
-#define GLOB_SBD_IOCTL_WRITE_DATA (0x7709)
-#define GLOB_SBD_IOCTL_READ_DATA (0x770A)
-
-static int reserved_mb = 0;
-module_param(reserved_mb, int, 0);
-MODULE_PARM_DESC(reserved_mb, "Reserved space for OS image, in MiB (default 25 MiB)");
-
-int nand_debug_level;
-module_param(nand_debug_level, int, 0644);
-MODULE_PARM_DESC(nand_debug_level, "debug level value: 1-3");
-
-MODULE_LICENSE("GPL");
-
-struct spectra_nand_dev {
- struct pci_dev *dev;
- u64 size;
- u16 users;
- spinlock_t qlock;
- void __iomem *ioaddr; /* Mapped address */
- struct request_queue *queue;
- struct task_struct *thread;
- struct gendisk *gd;
- u8 *tmp_buf;
-};
-
-
-static int GLOB_SBD_majornum;
-
-static char *GLOB_version = GLOB_VERSION;
-
-static struct spectra_nand_dev nand_device[NUM_DEVICES];
-
-static struct mutex spectra_lock;
-
-static int res_blks_os = 1;
-
-struct spectra_indentfy_dev_tag IdentifyDeviceData;
-
-static int force_flush_cache(void)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (ERR == GLOB_FTL_Flush_Cache()) {
- printk(KERN_ERR "Fail to Flush FTL Cache!\n");
- return -EFAULT;
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-}
-
-struct ioctl_rw_page_info {
- u8 *data;
- unsigned int page;
-};
-
-static int ioctl_read_page_data(unsigned long arg)
-{
- u8 *buf;
- struct ioctl_rw_page_info info;
- int result = PASS;
-
- if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- return -EFAULT;
-
- buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
- if (!buf) {
- printk(KERN_ERR "ioctl_read_page_data: "
- "failed to allocate memory\n");
- return -ENOMEM;
- }
-
- mutex_lock(&spectra_lock);
- result = GLOB_FTL_Page_Read(buf,
- (u64)info.page * IdentifyDeviceData.PageDataSize);
- mutex_unlock(&spectra_lock);
-
- if (copy_to_user((void __user *)info.data, buf,
- IdentifyDeviceData.PageDataSize)) {
- printk(KERN_ERR "ioctl_read_page_data: "
- "failed to copy user data\n");
- kfree(buf);
- return -EFAULT;
- }
-
- kfree(buf);
- return result;
-}
-
-static int ioctl_write_page_data(unsigned long arg)
-{
- u8 *buf;
- struct ioctl_rw_page_info info;
- int result = PASS;
-
- if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
- return -EFAULT;
-
- buf = memdup_user((void __user *)info.data,
- IdentifyDeviceData.PageDataSize);
- if (IS_ERR(buf)) {
- printk(KERN_ERR "ioctl_write_page_data: "
- "failed to copy user data\n");
- return PTR_ERR(buf);
- }
-
- mutex_lock(&spectra_lock);
- result = GLOB_FTL_Page_Write(buf,
- (u64)info.page * IdentifyDeviceData.PageDataSize);
- mutex_unlock(&spectra_lock);
-
- kfree(buf);
- return result;
-}
-
-/* Return how many blocks should be reserved for bad block replacement */
-static int get_res_blk_num_bad_blk(void)
-{
- return IdentifyDeviceData.wDataBlockNum / 10;
-}
-
-/* Return how many blocks should be reserved for OS image */
-static int get_res_blk_num_os(void)
-{
- u32 res_blks, blk_size;
-
- blk_size = IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock;
-
- res_blks = (reserved_mb * 1024 * 1024) / blk_size;
-
- if ((res_blks < 1) || (res_blks >= IdentifyDeviceData.wDataBlockNum))
- res_blks = 1; /* Reserved 1 block for block table */
-
- return res_blks;
-}
-
-/* Transfer a full request. */
-static int do_transfer(struct spectra_nand_dev *tr, struct request *req)
-{
- u64 start_addr, addr;
- u32 logical_start_sect, hd_start_sect;
- u32 nsect, hd_sects;
- u32 rsect, tsect = 0;
- char *buf;
- u32 ratio = IdentifyDeviceData.PageDataSize >> 9;
-
- start_addr = (u64)(blk_rq_pos(req)) << 9;
- /* Add a big enough offset to prevent the OS Image from
- * being accessed or damaged by file system */
- start_addr += IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock *
- res_blks_os;
-
- if (req->cmd_type & REQ_FLUSH) {
- if (force_flush_cache()) /* Fail to flush cache */
- return -EIO;
- else
- return 0;
- }
-
- if (req->cmd_type != REQ_TYPE_FS)
- return -EIO;
-
- if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > get_capacity(tr->gd)) {
- printk(KERN_ERR "Spectra error: request over the NAND "
- "capacity!sector %d, current_nr_sectors %d, "
- "while capacity is %d\n",
- (int)blk_rq_pos(req),
- blk_rq_cur_sectors(req),
- (int)get_capacity(tr->gd));
- return -EIO;
- }
-
- logical_start_sect = start_addr >> 9;
- hd_start_sect = logical_start_sect / ratio;
- rsect = logical_start_sect - hd_start_sect * ratio;
-
- addr = (u64)hd_start_sect * ratio * 512;
- buf = req->buffer;
- nsect = blk_rq_cur_sectors(req);
-
- if (rsect)
- tsect = (ratio - rsect) < nsect ? (ratio - rsect) : nsect;
-
- switch (rq_data_dir(req)) {
- case READ:
- /* Read the first NAND page */
- if (rsect) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(buf, tr->tmp_buf + (rsect << 9), tsect << 9);
- addr += IdentifyDeviceData.PageDataSize;
- buf += tsect << 9;
- nsect -= tsect;
- }
-
- /* Read the other NAND pages */
- for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
- if (GLOB_FTL_Page_Read(buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += IdentifyDeviceData.PageDataSize;
- }
-
- /* Read the last NAND pages */
- if (nsect % ratio) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(buf, tr->tmp_buf, (nsect % ratio) << 9);
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-
- case WRITE:
- /* Write the first NAND page */
- if (rsect) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(tr->tmp_buf + (rsect << 9), buf, tsect << 9);
- if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += tsect << 9;
- nsect -= tsect;
- }
-
- /* Write the other NAND pages */
- for (hd_sects = nsect / ratio; hd_sects > 0; hd_sects--) {
- if (GLOB_FTL_Page_Write(buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- addr += IdentifyDeviceData.PageDataSize;
- buf += IdentifyDeviceData.PageDataSize;
- }
-
- /* Write the last NAND pages */
- if (nsect % ratio) {
- if (GLOB_FTL_Page_Read(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- memcpy(tr->tmp_buf, buf, (nsect % ratio) << 9);
- if (GLOB_FTL_Page_Write(tr->tmp_buf, addr)) {
- printk(KERN_ERR "Error in %s, Line %d\n",
- __FILE__, __LINE__);
- return -EIO;
- }
- }
-#if CMD_DMA
- if (glob_ftl_execute_cmds())
- return -EIO;
- else
- return 0;
-#endif
- return 0;
-
- default:
- printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
- return -EIO;
- }
-}
-
-/* This function is copied from drivers/mtd/mtd_blkdevs.c */
-static int spectra_trans_thread(void *arg)
-{
- struct spectra_nand_dev *tr = arg;
- struct request_queue *rq = tr->queue;
- struct request *req = NULL;
-
- /* we might get involved when memory gets low, so use PF_MEMALLOC */
- current->flags |= PF_MEMALLOC;
-
- spin_lock_irq(rq->queue_lock);
- while (!kthread_should_stop()) {
- int res;
-
- if (!req) {
- req = blk_fetch_request(rq);
- if (!req) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_unlock_irq(rq->queue_lock);
- schedule();
- spin_lock_irq(rq->queue_lock);
- continue;
- }
- }
-
- spin_unlock_irq(rq->queue_lock);
-
- mutex_lock(&spectra_lock);
- res = do_transfer(tr, req);
- mutex_unlock(&spectra_lock);
-
- spin_lock_irq(rq->queue_lock);
-
- if (!__blk_end_request_cur(req, res))
- req = NULL;
- }
-
- if (req)
- __blk_end_request_all(req, -EIO);
-
- spin_unlock_irq(rq->queue_lock);
-
- return 0;
-}
-
-
-/* Request function that "handles clustering". */
-static void GLOB_SBD_request(struct request_queue *rq)
-{
- struct spectra_nand_dev *pdev = rq->queuedata;
- wake_up_process(pdev->thread);
-}
-
-static int GLOB_SBD_open(struct block_device *bdev, fmode_t mode)
-
-{
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- return 0;
-}
-
-static int GLOB_SBD_release(struct gendisk *disk, fmode_t mode)
-{
- int ret;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- mutex_lock(&spectra_lock);
- ret = force_flush_cache();
- mutex_unlock(&spectra_lock);
-
- return 0;
-}
-
-static int GLOB_SBD_getgeo(struct block_device *bdev, struct hd_geometry *geo)
-{
- geo->heads = 4;
- geo->sectors = 16;
- geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "heads: %d, sectors: %d, cylinders: %d\n",
- geo->heads, geo->sectors, geo->cylinders);
-
- return 0;
-}
-
-int GLOB_SBD_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- switch (cmd) {
- case GLOB_SBD_IOCTL_GC:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra IOCTL: Garbage Collection "
- "being performed\n");
- if (PASS != GLOB_FTL_Garbage_Collection())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_WL:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra IOCTL: Static Wear Leveling "
- "being performed\n");
- if (PASS != GLOB_FTL_Wear_Leveling())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_FORMAT:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Flash format "
- "being performed\n");
- if (PASS != GLOB_FTL_Flash_Format())
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_FLUSH_CACHE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: Cache flush "
- "being performed\n");
- mutex_lock(&spectra_lock);
- ret = force_flush_cache();
- mutex_unlock(&spectra_lock);
- return ret;
-
- case GLOB_SBD_IOCTL_COPY_BLK_TABLE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Copy block table\n");
- if (copy_to_user((void __user *)arg,
- get_blk_table_start_addr(),
- get_blk_table_len()))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_COPY_WEAR_LEVELING_TABLE:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Copy wear leveling table\n");
- if (copy_to_user((void __user *)arg,
- get_wear_leveling_table_start_addr(),
- get_wear_leveling_table_len()))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_GET_NAND_INFO:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Get NAND info\n");
- if (copy_to_user((void __user *)arg, &IdentifyDeviceData,
- sizeof(IdentifyDeviceData)))
- return -EFAULT;
- return 0;
-
- case GLOB_SBD_IOCTL_WRITE_DATA:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Write one page data\n");
- return ioctl_write_page_data(arg);
-
- case GLOB_SBD_IOCTL_READ_DATA:
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra IOCTL: "
- "Read one page data\n");
- return ioctl_read_page_data(arg);
- }
-
- return -ENOTTY;
-}
-
-static DEFINE_MUTEX(ffsport_mutex);
-
-int GLOB_SBD_unlocked_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg)
-{
- int ret;
-
- mutex_lock(&ffsport_mutex);
- ret = GLOB_SBD_ioctl(bdev, mode, cmd, arg);
- mutex_unlock(&ffsport_mutex);
-
- return ret;
-}
-
-static struct block_device_operations GLOB_SBD_ops = {
- .owner = THIS_MODULE,
- .open = GLOB_SBD_open,
- .release = GLOB_SBD_release,
- .ioctl = GLOB_SBD_unlocked_ioctl,
- .getgeo = GLOB_SBD_getgeo,
-};
-
-static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
-{
- int res_blks;
- u32 sects;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- memset(dev, 0, sizeof(struct spectra_nand_dev));
-
- nand_dbg_print(NAND_DBG_WARN, "Reserved %d blocks "
- "for OS image, %d blocks for bad block replacement.\n",
- get_res_blk_num_os(),
- get_res_blk_num_bad_blk());
-
- res_blks = get_res_blk_num_bad_blk() + get_res_blk_num_os();
-
- dev->size = (u64)IdentifyDeviceData.PageDataSize *
- IdentifyDeviceData.PagesPerBlock *
- (IdentifyDeviceData.wDataBlockNum - res_blks);
-
- res_blks_os = get_res_blk_num_os();
-
- spin_lock_init(&dev->qlock);
-
- dev->tmp_buf = kmalloc(IdentifyDeviceData.PageDataSize, GFP_ATOMIC);
- if (!dev->tmp_buf) {
- printk(KERN_ERR "Failed to kmalloc memory in %s Line %d, exit.\n",
- __FILE__, __LINE__);
- goto out_vfree;
- }
-
- dev->queue = blk_init_queue(GLOB_SBD_request, &dev->qlock);
- if (dev->queue == NULL) {
- printk(KERN_ERR
- "Spectra: Request queue could not be initialized."
- " Aborting\n ");
- goto out_vfree;
- }
- dev->queue->queuedata = dev;
-
- /* As Linux block layer doesn't support >4KB hardware sector, */
- /* Here we force report 512 byte hardware sector size to Kernel */
- blk_queue_logical_block_size(dev->queue, 512);
-
- blk_queue_flush(dev->queue, REQ_FLUSH);
-
- dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
- if (IS_ERR(dev->thread)) {
- blk_cleanup_queue(dev->queue);
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
- return PTR_ERR(dev->thread);
- }
-
- dev->gd = alloc_disk(PARTITIONS);
- if (!dev->gd) {
- printk(KERN_ERR
- "Spectra: Could not allocate disk. Aborting \n ");
- goto out_vfree;
- }
- dev->gd->major = GLOB_SBD_majornum;
- dev->gd->first_minor = which * PARTITIONS;
- dev->gd->fops = &GLOB_SBD_ops;
- dev->gd->queue = dev->queue;
- dev->gd->private_data = dev;
- snprintf(dev->gd->disk_name, 32, "%s%c", GLOB_SBD_NAME, which + 'a');
-
- sects = dev->size >> 9;
- nand_dbg_print(NAND_DBG_WARN, "Capacity sects: %d\n", sects);
- set_capacity(dev->gd, sects);
-
- add_disk(dev->gd);
-
- return 0;
-out_vfree:
- return -ENOMEM;
-}
-
-/*
-static ssize_t show_nand_block_num(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.wDataBlockNum);
-}
-
-static ssize_t show_nand_pages_per_block(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.PagesPerBlock);
-}
-
-static ssize_t show_nand_page_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- return snprintf(buf, PAGE_SIZE, "%d\n",
- (int)IdentifyDeviceData.PageDataSize);
-}
-
-static DEVICE_ATTR(nand_block_num, 0444, show_nand_block_num, NULL);
-static DEVICE_ATTR(nand_pages_per_block, 0444, show_nand_pages_per_block, NULL);
-static DEVICE_ATTR(nand_page_size, 0444, show_nand_page_size, NULL);
-
-static void create_sysfs_entry(struct device *dev)
-{
- if (device_create_file(dev, &dev_attr_nand_block_num))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_block_num.\n");
- if (device_create_file(dev, &dev_attr_nand_pages_per_block))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_pages_per_block.\n");
- if (device_create_file(dev, &dev_attr_nand_page_size))
- printk(KERN_ERR "Spectra: "
- "failed to create sysfs entry nand_page_size.\n");
-}
-*/
-
-static void register_spectra_ftl_async(void *unused, async_cookie_t cookie)
-{
- int i;
-
- /* create_sysfs_entry(&dev->dev); */
-
- if (PASS != GLOB_FTL_IdentifyDevice(&IdentifyDeviceData)) {
- printk(KERN_ERR "Spectra: Unable to Read Flash Device. "
- "Aborting\n");
- return;
- } else {
- nand_dbg_print(NAND_DBG_WARN, "In GLOB_SBD_init: "
- "Num blocks=%d, pagesperblock=%d, "
- "pagedatasize=%d, ECCBytesPerSector=%d\n",
- (int)IdentifyDeviceData.NumBlocks,
- (int)IdentifyDeviceData.PagesPerBlock,
- (int)IdentifyDeviceData.PageDataSize,
- (int)IdentifyDeviceData.wECCBytesPerSector);
- }
-
- printk(KERN_ALERT "Spectra: searching block table, please wait ...\n");
- if (GLOB_FTL_Init() != PASS) {
- printk(KERN_ERR "Spectra: Unable to Initialize FTL Layer. "
- "Aborting\n");
- goto out_ftl_flash_register;
- }
- printk(KERN_ALERT "Spectra: block table has been found.\n");
-
- GLOB_SBD_majornum = register_blkdev(0, GLOB_SBD_NAME);
- if (GLOB_SBD_majornum <= 0) {
- printk(KERN_ERR "Unable to get the major %d for Spectra",
- GLOB_SBD_majornum);
- goto out_ftl_flash_register;
- }
-
- for (i = 0; i < NUM_DEVICES; i++)
- if (SBD_setup_device(&nand_device[i], i) == -ENOMEM)
- goto out_blk_register;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra: module loaded with major number %d\n",
- GLOB_SBD_majornum);
-
- return;
-
-out_blk_register:
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-out_ftl_flash_register:
- GLOB_FTL_Cache_Release();
- printk(KERN_ERR "Spectra: Module load failed.\n");
-}
-
-int register_spectra_ftl()
-{
- async_schedule(register_spectra_ftl_async, NULL);
- return 0;
-}
-EXPORT_SYMBOL_GPL(register_spectra_ftl);
-
-static int GLOB_SBD_init(void)
-{
- /* Set debug output level (0~3) here. 3 is most verbose */
- printk(KERN_ALERT "Spectra: %s\n", GLOB_version);
-
- mutex_init(&spectra_lock);
-
- if (PASS != GLOB_FTL_Flash_Init()) {
- printk(KERN_ERR "Spectra: Unable to Initialize Flash Device. "
- "Aborting\n");
- return -ENODEV;
- }
- return 0;
-}
-
-static void __exit GLOB_SBD_exit(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < NUM_DEVICES; i++) {
- struct spectra_nand_dev *dev = &nand_device[i];
- if (dev->gd) {
- del_gendisk(dev->gd);
- put_disk(dev->gd);
- }
- if (dev->queue)
- blk_cleanup_queue(dev->queue);
- kfree(dev->tmp_buf);
- }
-
- unregister_blkdev(GLOB_SBD_majornum, GLOB_SBD_NAME);
-
- mutex_lock(&spectra_lock);
- force_flush_cache();
- mutex_unlock(&spectra_lock);
-
- GLOB_FTL_Cache_Release();
-
- GLOB_FTL_Flash_Release();
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Spectra FTL module (major number %d) unloaded.\n",
- GLOB_SBD_majornum);
-}
-
-module_init(GLOB_SBD_init);
-module_exit(GLOB_SBD_exit);
diff --git a/drivers/staging/spectra/ffsport.h b/drivers/staging/spectra/ffsport.h
deleted file mode 100644
index 85c0750..0000000
--- a/drivers/staging/spectra/ffsport.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FFSPORT_
-#define _FFSPORT_
-
-#include "ffsdefs.h"
-
-#if defined __GNUC__
-#define PACKED
-#define PACKED_GNU __attribute__ ((packed))
-#define UNALIGNED
-#endif
-
-#include <linux/semaphore.h>
-#include <linux/string.h> /* for strcpy(), stricmp(), etc */
-#include <linux/mm.h> /* for kmalloc(), kfree() */
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/init.h>
-
-#include <linux/kernel.h> /* printk() */
-#include <linux/fs.h> /* everything... */
-#include <linux/errno.h> /* error codes */
-#include <linux/types.h> /* size_t */
-#include <linux/genhd.h>
-#include <linux/blkdev.h>
-#include <linux/hdreg.h>
-#include <linux/pci.h>
-#include "flash.h"
-
-#define VERBOSE 1
-
-#define NAND_DBG_WARN 1
-#define NAND_DBG_DEBUG 2
-#define NAND_DBG_TRACE 3
-
-extern int nand_debug_level;
-
-#ifdef VERBOSE
-#define nand_dbg_print(level, args...) \
- do { \
- if (level <= nand_debug_level) \
- printk(KERN_ALERT args); \
- } while (0)
-#else
-#define nand_dbg_print(level, args...)
-#endif
-
-#ifdef SUPPORT_BIG_ENDIAN
-#define INVERTUINT16(w) ((u16)(((u16)(w)) << 8) | \
- (u16)((u16)(w) >> 8))
-
-#define INVERTUINT32(dw) (((u32)(dw) << 24) | \
- (((u32)(dw) << 8) & 0x00ff0000) | \
- (((u32)(dw) >> 8) & 0x0000ff00) | \
- ((u32)(dw) >> 24))
-#else
-#define INVERTUINT16(w) w
-#define INVERTUINT32(dw) dw
-#endif
-
-extern int GLOB_Calc_Used_Bits(u32 n);
-extern u64 GLOB_u64_Div(u64 addr, u32 divisor);
-extern u64 GLOB_u64_Remainder(u64 addr, u32 divisor_type);
-extern int register_spectra_ftl(void);
-
-#endif /* _FFSPORT_ */
diff --git a/drivers/staging/spectra/flash.c b/drivers/staging/spectra/flash.c
deleted file mode 100644
index aead358..0000000
--- a/drivers/staging/spectra/flash.c
+++ /dev/null
@@ -1,4305 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld.h"
-#include "lld_nand.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-#endif
-
-#define BLK_FROM_ADDR(addr) ((u32)(addr >> DeviceInfo.nBitsInBlockDataSize))
-#define PAGE_FROM_ADDR(addr, Block) ((u16)((addr - (u64)Block * \
- DeviceInfo.wBlockDataSize) >> DeviceInfo.nBitsInPageDataSize))
-
-#define IS_SPARE_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
- BAD_BLOCK) && SPARE_BLOCK == (pbt[blk] & SPARE_BLOCK))
-
-#define IS_DATA_BLOCK(blk) (0 == (pbt[blk] & BAD_BLOCK))
-
-#define IS_DISCARDED_BLOCK(blk) (BAD_BLOCK != (pbt[blk] &\
- BAD_BLOCK) && DISCARD_BLOCK == (pbt[blk] & DISCARD_BLOCK))
-
-#define IS_BAD_BLOCK(blk) (BAD_BLOCK == (pbt[blk] & BAD_BLOCK))
-
-#if DEBUG_BNDRY
-void debug_boundary_lineno_error(int chnl, int limit, int no,
- int lineno, char *filename)
-{
- if (chnl >= limit)
- printk(KERN_ERR "Boundary Check Fail value %d >= limit %d, "
- "at %s:%d. Other info:%d. Aborting...\n",
- chnl, limit, filename, lineno, no);
-}
-/* static int globalmemsize; */
-#endif
-
-static u16 FTL_Cache_If_Hit(u64 dwPageAddr);
-static int FTL_Cache_Read(u64 dwPageAddr);
-static void FTL_Cache_Read_Page(u8 *pData, u64 dwPageAddr,
- u16 cache_blk);
-static void FTL_Cache_Write_Page(u8 *pData, u64 dwPageAddr,
- u8 cache_blk, u16 flag);
-static int FTL_Cache_Write(void);
-static void FTL_Calculate_LRU(void);
-static u32 FTL_Get_Block_Index(u32 wBlockNum);
-
-static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
- u8 BT_Tag, u16 *Page);
-static int FTL_Read_Block_Table(void);
-static int FTL_Write_Block_Table(int wForce);
-static int FTL_Write_Block_Table_Data(void);
-static int FTL_Check_Block_Table(int wOldTable);
-static int FTL_Static_Wear_Leveling(void);
-static u32 FTL_Replace_Block_Table(void);
-static int FTL_Write_IN_Progress_Block_Table_Page(void);
-
-static u32 FTL_Get_Page_Num(u64 length);
-static u64 FTL_Get_Physical_Block_Addr(u64 blk_addr);
-
-static u32 FTL_Replace_OneBlock(u32 wBlockNum,
- u32 wReplaceNum);
-static u32 FTL_Replace_LWBlock(u32 wBlockNum,
- int *pGarbageCollect);
-static u32 FTL_Replace_MWBlock(void);
-static int FTL_Replace_Block(u64 blk_addr);
-static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX);
-
-struct device_info_tag DeviceInfo;
-struct flash_cache_tag Cache;
-static struct spectra_l2_cache_info cache_l2;
-
-static u8 *cache_l2_page_buf;
-static u8 *cache_l2_blk_buf;
-
-u8 *g_pBlockTable;
-u8 *g_pWearCounter;
-u16 *g_pReadCounter;
-u32 *g_pBTBlocks;
-static u16 g_wBlockTableOffset;
-static u32 g_wBlockTableIndex;
-static u8 g_cBlockTableStatus;
-
-static u8 *g_pTempBuf;
-static u8 *flag_check_blk_table;
-static u8 *tmp_buf_search_bt_in_block;
-static u8 *spare_buf_search_bt_in_block;
-static u8 *spare_buf_bt_search_bt_in_block;
-static u8 *tmp_buf1_read_blk_table;
-static u8 *tmp_buf2_read_blk_table;
-static u8 *flags_static_wear_leveling;
-static u8 *tmp_buf_write_blk_table_data;
-static u8 *tmp_buf_read_disturbance;
-
-u8 *buf_read_page_main_spare;
-u8 *buf_write_page_main_spare;
-u8 *buf_read_page_spare;
-u8 *buf_get_bad_block;
-
-#if (RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE && CMD_DMA)
-struct flash_cache_delta_list_tag int_cache[MAX_CHANS + MAX_DESCS];
-struct flash_cache_tag cache_start_copy;
-#endif
-
-int g_wNumFreeBlocks;
-u8 g_SBDCmdIndex;
-
-static u8 *g_pIPF;
-static u8 bt_flag = FIRST_BT_ID;
-static u8 bt_block_changed;
-
-static u16 cache_block_to_write;
-static u8 last_erased = FIRST_BT_ID;
-
-static u8 GC_Called;
-static u8 BT_GC_Called;
-
-#if CMD_DMA
-#define COPY_BACK_BUF_NUM 10
-
-static u8 ftl_cmd_cnt; /* Init value is 0 */
-u8 *g_pBTDelta;
-u8 *g_pBTDelta_Free;
-u8 *g_pBTStartingCopy;
-u8 *g_pWearCounterCopy;
-u16 *g_pReadCounterCopy;
-u8 *g_pBlockTableCopies;
-u8 *g_pNextBlockTable;
-static u8 *cp_back_buf_copies[COPY_BACK_BUF_NUM];
-static int cp_back_buf_idx;
-
-static u8 *g_temp_buf;
-
-#pragma pack(push, 1)
-#pragma pack(1)
-struct BTableChangesDelta {
- u8 ftl_cmd_cnt;
- u8 ValidFields;
- u16 g_wBlockTableOffset;
- u32 g_wBlockTableIndex;
- u32 BT_Index;
- u32 BT_Entry_Value;
- u32 WC_Index;
- u8 WC_Entry_Value;
- u32 RC_Index;
- u16 RC_Entry_Value;
-};
-
-#pragma pack(pop)
-
-struct BTableChangesDelta *p_BTableChangesDelta;
-#endif
-
-
-#define MARK_BLOCK_AS_BAD(blocknode) (blocknode |= BAD_BLOCK)
-#define MARK_BLK_AS_DISCARD(blk) (blk = (blk & ~SPARE_BLOCK) | DISCARD_BLOCK)
-
-#define FTL_Get_LBAPBA_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u32))
-#define FTL_Get_WearCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u8))
-#define FTL_Get_ReadCounter_Table_Mem_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u16))
-#if SUPPORT_LARGE_BLOCKNUM
-#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u8) * 3)
-#else
-#define FTL_Get_LBAPBA_Table_Flash_Size_Bytes() (DeviceInfo.wDataBlockNum *\
- sizeof(u16))
-#endif
-#define FTL_Get_WearCounter_Table_Flash_Size_Bytes \
- FTL_Get_WearCounter_Table_Mem_Size_Bytes
-#define FTL_Get_ReadCounter_Table_Flash_Size_Bytes \
- FTL_Get_ReadCounter_Table_Mem_Size_Bytes
-
-static u32 FTL_Get_Block_Table_Flash_Size_Bytes(void)
-{
- u32 byte_num;
-
- if (DeviceInfo.MLCDevice) {
- byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
- DeviceInfo.wDataBlockNum * sizeof(u8) +
- DeviceInfo.wDataBlockNum * sizeof(u16);
- } else {
- byte_num = FTL_Get_LBAPBA_Table_Flash_Size_Bytes() +
- DeviceInfo.wDataBlockNum * sizeof(u8);
- }
-
- byte_num += 4 * sizeof(u8);
-
- return byte_num;
-}
-
-static u16 FTL_Get_Block_Table_Flash_Size_Pages(void)
-{
- return (u16)FTL_Get_Page_Num(FTL_Get_Block_Table_Flash_Size_Bytes());
-}
-
-static int FTL_Copy_Block_Table_To_Flash(u8 *flashBuf, u32 sizeToTx,
- u32 sizeTxed)
-{
- u32 wBytesCopied, blk_tbl_size, wBytes;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
- for (wBytes = 0;
- (wBytes < sizeToTx) && ((wBytes + sizeTxed) < blk_tbl_size);
- wBytes++) {
-#if SUPPORT_LARGE_BLOCKNUM
- flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 3]
- >> (((wBytes + sizeTxed) % 3) ?
- ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16)) & 0xFF;
-#else
- flashBuf[wBytes] = (u8)(pbt[(wBytes + sizeTxed) / 2]
- >> (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
-#endif
- }
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
- blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
- wBytesCopied = wBytes;
- wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
- (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
- memcpy(flashBuf + wBytesCopied, g_pWearCounter + sizeTxed, wBytes);
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-
- if (DeviceInfo.MLCDevice) {
- blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
- wBytesCopied += wBytes;
- for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++)
- flashBuf[wBytes + wBytesCopied] =
- (g_pReadCounter[(wBytes + sizeTxed) / 2] >>
- (((wBytes + sizeTxed) % 2) ? 0 : 8)) & 0xFF;
- }
-
- return wBytesCopied + wBytes;
-}
-
-static int FTL_Copy_Block_Table_From_Flash(u8 *flashBuf,
- u32 sizeToTx, u32 sizeTxed)
-{
- u32 wBytesCopied, blk_tbl_size, wBytes;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- blk_tbl_size = FTL_Get_LBAPBA_Table_Flash_Size_Bytes();
- for (wBytes = 0; (wBytes < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
-#if SUPPORT_LARGE_BLOCKNUM
- if (!((wBytes + sizeTxed) % 3))
- pbt[(wBytes + sizeTxed) / 3] = 0;
- pbt[(wBytes + sizeTxed) / 3] |=
- (flashBuf[wBytes] << (((wBytes + sizeTxed) % 3) ?
- ((((wBytes + sizeTxed) % 3) == 2) ? 0 : 8) : 16));
-#else
- if (!((wBytes + sizeTxed) % 2))
- pbt[(wBytes + sizeTxed) / 2] = 0;
- pbt[(wBytes + sizeTxed) / 2] |=
- (flashBuf[wBytes] << (((wBytes + sizeTxed) % 2) ?
- 0 : 8));
-#endif
- }
-
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
- blk_tbl_size = FTL_Get_WearCounter_Table_Flash_Size_Bytes();
- wBytesCopied = wBytes;
- wBytes = ((blk_tbl_size - sizeTxed) > (sizeToTx - wBytesCopied)) ?
- (sizeToTx - wBytesCopied) : (blk_tbl_size - sizeTxed);
- memcpy(g_pWearCounter + sizeTxed, flashBuf + wBytesCopied, wBytes);
- sizeTxed = (sizeTxed > blk_tbl_size) ? (sizeTxed - blk_tbl_size) : 0;
-
- if (DeviceInfo.MLCDevice) {
- wBytesCopied += wBytes;
- blk_tbl_size = FTL_Get_ReadCounter_Table_Flash_Size_Bytes();
- for (wBytes = 0; ((wBytes + wBytesCopied) < sizeToTx) &&
- ((wBytes + sizeTxed) < blk_tbl_size); wBytes++) {
- if (((wBytes + sizeTxed) % 2))
- g_pReadCounter[(wBytes + sizeTxed) / 2] = 0;
- g_pReadCounter[(wBytes + sizeTxed) / 2] |=
- (flashBuf[wBytes] <<
- (((wBytes + sizeTxed) % 2) ? 0 : 8));
- }
- }
-
- return wBytesCopied+wBytes;
-}
-
-static int FTL_Insert_Block_Table_Signature(u8 *buf, u8 tag)
-{
- int i;
-
- for (i = 0; i < BTSIG_BYTES; i++)
- buf[BTSIG_OFFSET + i] =
- ((tag + (i * BTSIG_DELTA) - FIRST_BT_ID) %
- (1 + LAST_BT_ID-FIRST_BT_ID)) + FIRST_BT_ID;
-
- return PASS;
-}
-
-static int FTL_Extract_Block_Table_Tag(u8 *buf, u8 **tagarray)
-{
- static u8 tag[BTSIG_BYTES >> 1];
- int i, j, k, tagi, tagtemp, status;
-
- *tagarray = (u8 *)tag;
- tagi = 0;
-
- for (i = 0; i < (BTSIG_BYTES - 1); i++) {
- for (j = i + 1; (j < BTSIG_BYTES) &&
- (tagi < (BTSIG_BYTES >> 1)); j++) {
- tagtemp = buf[BTSIG_OFFSET + j] -
- buf[BTSIG_OFFSET + i];
- if (tagtemp && !(tagtemp % BTSIG_DELTA)) {
- tagtemp = (buf[BTSIG_OFFSET + i] +
- (1 + LAST_BT_ID - FIRST_BT_ID) -
- (i * BTSIG_DELTA)) %
- (1 + LAST_BT_ID - FIRST_BT_ID);
- status = FAIL;
- for (k = 0; k < tagi; k++) {
- if (tagtemp == tag[k])
- status = PASS;
- }
-
- if (status == FAIL) {
- tag[tagi++] = tagtemp;
- i = (j == (i + 1)) ? i + 1 : i;
- j = (j == (i + 1)) ? i + 1 : i;
- }
- }
- }
- }
-
- return tagi;
-}
-
-
-static int FTL_Execute_SPL_Recovery(void)
-{
- u32 j, block, blks;
- u32 *pbt = (u32 *)g_pBlockTable;
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- blks = DeviceInfo.wSpectraEndBlock - DeviceInfo.wSpectraStartBlock;
- for (j = 0; j <= blks; j++) {
- block = (pbt[j]);
- if (((block & BAD_BLOCK) != BAD_BLOCK) &&
- ((block & SPARE_BLOCK) == SPARE_BLOCK)) {
- ret = GLOB_LLD_Erase_Block(block & ~BAD_BLOCK);
- if (FAIL == ret) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)(block & ~BAD_BLOCK));
- MARK_BLOCK_AS_BAD(pbt[j]);
- }
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_IdentifyDevice
-* Inputs: pointer to identify data structure
-* Outputs: PASS / FAIL
-* Description: the identify data structure is filled in with
-* information for the block driver.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev_data->NumBlocks = DeviceInfo.wTotalBlocks;
- dev_data->PagesPerBlock = DeviceInfo.wPagesPerBlock;
- dev_data->PageDataSize = DeviceInfo.wPageDataSize;
- dev_data->wECCBytesPerSector = DeviceInfo.wECCBytesPerSector;
- dev_data->wDataBlockNum = DeviceInfo.wDataBlockNum;
-
- return PASS;
-}
-
-/* ..... */
-static int allocate_memory(void)
-{
- u32 block_table_size, page_size, block_size, mem_size;
- u32 total_bytes = 0;
- int i;
-#if CMD_DMA
- int j;
-#endif
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- page_size = DeviceInfo.wPageSize;
- block_size = DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-
- block_table_size = DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8) + sizeof(u16));
- block_table_size += (DeviceInfo.wPageDataSize -
- (block_table_size % DeviceInfo.wPageDataSize)) %
- DeviceInfo.wPageDataSize;
-
- /* Malloc memory for block tables */
- g_pBlockTable = kzalloc(block_table_size, GFP_ATOMIC);
- if (!g_pBlockTable)
- goto block_table_fail;
- total_bytes += block_table_size;
-
- g_pWearCounter = (u8 *)(g_pBlockTable +
- DeviceInfo.wDataBlockNum * sizeof(u32));
-
- if (DeviceInfo.MLCDevice)
- g_pReadCounter = (u16 *)(g_pBlockTable +
- DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8)));
-
- /* Malloc memory and init for cache items */
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- Cache.array[i].address = NAND_CACHE_INIT_ADDR;
- Cache.array[i].use_cnt = 0;
- Cache.array[i].changed = CLEAR;
- Cache.array[i].buf = kzalloc(Cache.cache_item_size,
- GFP_ATOMIC);
- if (!Cache.array[i].buf)
- goto cache_item_fail;
- total_bytes += Cache.cache_item_size;
- }
-
- /* Malloc memory for IPF */
- g_pIPF = kzalloc(page_size, GFP_ATOMIC);
- if (!g_pIPF)
- goto ipf_fail;
- total_bytes += page_size;
-
- /* Malloc memory for data merging during Level2 Cache flush */
- cache_l2_page_buf = kmalloc(page_size, GFP_ATOMIC);
- if (!cache_l2_page_buf)
- goto cache_l2_page_buf_fail;
- memset(cache_l2_page_buf, 0xff, page_size);
- total_bytes += page_size;
-
- cache_l2_blk_buf = kmalloc(block_size, GFP_ATOMIC);
- if (!cache_l2_blk_buf)
- goto cache_l2_blk_buf_fail;
- memset(cache_l2_blk_buf, 0xff, block_size);
- total_bytes += block_size;
-
- /* Malloc memory for temp buffer */
- g_pTempBuf = kzalloc(Cache.cache_item_size, GFP_ATOMIC);
- if (!g_pTempBuf)
- goto Temp_buf_fail;
- total_bytes += Cache.cache_item_size;
-
- /* Malloc memory for block table blocks */
- mem_size = (1 + LAST_BT_ID - FIRST_BT_ID) * sizeof(u32);
- g_pBTBlocks = kmalloc(mem_size, GFP_ATOMIC);
- if (!g_pBTBlocks)
- goto bt_blocks_fail;
- memset(g_pBTBlocks, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Check_Block_Table */
- flag_check_blk_table = kmalloc(DeviceInfo.wDataBlockNum, GFP_ATOMIC);
- if (!flag_check_blk_table)
- goto flag_check_blk_table_fail;
- total_bytes += DeviceInfo.wDataBlockNum;
-
- /* Malloc memory for function FTL_Search_Block_Table_IN_Block */
- tmp_buf_search_bt_in_block = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf_search_bt_in_block)
- goto tmp_buf_search_bt_in_block_fail;
- memset(tmp_buf_search_bt_in_block, 0xff, page_size);
- total_bytes += page_size;
-
- mem_size = DeviceInfo.wPageSize - DeviceInfo.wPageDataSize;
- spare_buf_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
- if (!spare_buf_search_bt_in_block)
- goto spare_buf_search_bt_in_block_fail;
- memset(spare_buf_search_bt_in_block, 0xff, mem_size);
- total_bytes += mem_size;
-
- spare_buf_bt_search_bt_in_block = kmalloc(mem_size, GFP_ATOMIC);
- if (!spare_buf_bt_search_bt_in_block)
- goto spare_buf_bt_search_bt_in_block_fail;
- memset(spare_buf_bt_search_bt_in_block, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Read_Block_Table */
- tmp_buf1_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf1_read_blk_table)
- goto tmp_buf1_read_blk_table_fail;
- memset(tmp_buf1_read_blk_table, 0xff, page_size);
- total_bytes += page_size;
-
- tmp_buf2_read_blk_table = kmalloc(page_size, GFP_ATOMIC);
- if (!tmp_buf2_read_blk_table)
- goto tmp_buf2_read_blk_table_fail;
- memset(tmp_buf2_read_blk_table, 0xff, page_size);
- total_bytes += page_size;
-
- /* Malloc memory for function FTL_Static_Wear_Leveling */
- flags_static_wear_leveling = kmalloc(DeviceInfo.wDataBlockNum,
- GFP_ATOMIC);
- if (!flags_static_wear_leveling)
- goto flags_static_wear_leveling_fail;
- total_bytes += DeviceInfo.wDataBlockNum;
-
- /* Malloc memory for function FTL_Write_Block_Table_Data */
- if (FTL_Get_Block_Table_Flash_Size_Pages() > 3)
- mem_size = FTL_Get_Block_Table_Flash_Size_Bytes() -
- 2 * DeviceInfo.wPageSize;
- else
- mem_size = DeviceInfo.wPageSize;
- tmp_buf_write_blk_table_data = kmalloc(mem_size, GFP_ATOMIC);
- if (!tmp_buf_write_blk_table_data)
- goto tmp_buf_write_blk_table_data_fail;
- memset(tmp_buf_write_blk_table_data, 0xff, mem_size);
- total_bytes += mem_size;
-
- /* Malloc memory for function FTL_Read_Disturbance */
- tmp_buf_read_disturbance = kmalloc(block_size, GFP_ATOMIC);
- if (!tmp_buf_read_disturbance)
- goto tmp_buf_read_disturbance_fail;
- memset(tmp_buf_read_disturbance, 0xff, block_size);
- total_bytes += block_size;
-
- /* Alloc mem for function NAND_Read_Page_Main_Spare of lld_nand.c */
- buf_read_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
- if (!buf_read_page_main_spare)
- goto buf_read_page_main_spare_fail;
- total_bytes += DeviceInfo.wPageSize;
-
- /* Alloc mem for function NAND_Write_Page_Main_Spare of lld_nand.c */
- buf_write_page_main_spare = kmalloc(DeviceInfo.wPageSize, GFP_ATOMIC);
- if (!buf_write_page_main_spare)
- goto buf_write_page_main_spare_fail;
- total_bytes += DeviceInfo.wPageSize;
-
- /* Alloc mem for function NAND_Read_Page_Spare of lld_nand.c */
- buf_read_page_spare = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
- if (!buf_read_page_spare)
- goto buf_read_page_spare_fail;
- memset(buf_read_page_spare, 0xff, DeviceInfo.wPageSpareSize);
- total_bytes += DeviceInfo.wPageSpareSize;
-
- /* Alloc mem for function NAND_Get_Bad_Block of lld_nand.c */
- buf_get_bad_block = kmalloc(DeviceInfo.wPageSpareSize, GFP_ATOMIC);
- if (!buf_get_bad_block)
- goto buf_get_bad_block_fail;
- memset(buf_get_bad_block, 0xff, DeviceInfo.wPageSpareSize);
- total_bytes += DeviceInfo.wPageSpareSize;
-
-#if CMD_DMA
- g_temp_buf = kmalloc(block_size, GFP_ATOMIC);
- if (!g_temp_buf)
- goto temp_buf_fail;
- memset(g_temp_buf, 0xff, block_size);
- total_bytes += block_size;
-
- /* Malloc memory for copy of block table used in CDMA mode */
- g_pBTStartingCopy = kzalloc(block_table_size, GFP_ATOMIC);
- if (!g_pBTStartingCopy)
- goto bt_starting_copy;
- total_bytes += block_table_size;
-
- g_pWearCounterCopy = (u8 *)(g_pBTStartingCopy +
- DeviceInfo.wDataBlockNum * sizeof(u32));
-
- if (DeviceInfo.MLCDevice)
- g_pReadCounterCopy = (u16 *)(g_pBTStartingCopy +
- DeviceInfo.wDataBlockNum *
- (sizeof(u32) + sizeof(u8)));
-
- /* Malloc memory for block table copies */
- mem_size = 5 * DeviceInfo.wDataBlockNum * sizeof(u32) +
- 5 * DeviceInfo.wDataBlockNum * sizeof(u8);
- if (DeviceInfo.MLCDevice)
- mem_size += 5 * DeviceInfo.wDataBlockNum * sizeof(u16);
- g_pBlockTableCopies = kzalloc(mem_size, GFP_ATOMIC);
- if (!g_pBlockTableCopies)
- goto blk_table_copies_fail;
- total_bytes += mem_size;
- g_pNextBlockTable = g_pBlockTableCopies;
-
- /* Malloc memory for Block Table Delta */
- mem_size = MAX_DESCS * sizeof(struct BTableChangesDelta);
- g_pBTDelta = kzalloc(mem_size, GFP_ATOMIC);
- if (!g_pBTDelta)
- goto bt_delta_fail;
- total_bytes += mem_size;
- g_pBTDelta_Free = g_pBTDelta;
-
- /* Malloc memory for Copy Back Buffers */
- for (j = 0; j < COPY_BACK_BUF_NUM; j++) {
- cp_back_buf_copies[j] = kzalloc(block_size, GFP_ATOMIC);
- if (!cp_back_buf_copies[j])
- goto cp_back_buf_copies_fail;
- total_bytes += block_size;
- }
- cp_back_buf_idx = 0;
-
- /* Malloc memory for pending commands list */
- mem_size = sizeof(struct pending_cmd) * MAX_DESCS;
- info.pcmds = kzalloc(mem_size, GFP_KERNEL);
- if (!info.pcmds)
- goto pending_cmds_buf_fail;
- total_bytes += mem_size;
-
- /* Malloc memory for CDMA descripter table */
- mem_size = sizeof(struct cdma_descriptor) * MAX_DESCS;
- info.cdma_desc_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!info.cdma_desc_buf)
- goto cdma_desc_buf_fail;
- total_bytes += mem_size;
-
- /* Malloc memory for Memcpy descripter table */
- mem_size = sizeof(struct memcpy_descriptor) * MAX_DESCS;
- info.memcp_desc_buf = kzalloc(mem_size, GFP_KERNEL);
- if (!info.memcp_desc_buf)
- goto memcp_desc_buf_fail;
- total_bytes += mem_size;
-#endif
-
- nand_dbg_print(NAND_DBG_WARN,
- "Total memory allocated in FTL layer: %d\n", total_bytes);
-
- return PASS;
-
-#if CMD_DMA
-memcp_desc_buf_fail:
- kfree(info.cdma_desc_buf);
-cdma_desc_buf_fail:
- kfree(info.pcmds);
-pending_cmds_buf_fail:
-cp_back_buf_copies_fail:
- j--;
- for (; j >= 0; j--)
- kfree(cp_back_buf_copies[j]);
- kfree(g_pBTDelta);
-bt_delta_fail:
- kfree(g_pBlockTableCopies);
-blk_table_copies_fail:
- kfree(g_pBTStartingCopy);
-bt_starting_copy:
- kfree(g_temp_buf);
-temp_buf_fail:
- kfree(buf_get_bad_block);
-#endif
-
-buf_get_bad_block_fail:
- kfree(buf_read_page_spare);
-buf_read_page_spare_fail:
- kfree(buf_write_page_main_spare);
-buf_write_page_main_spare_fail:
- kfree(buf_read_page_main_spare);
-buf_read_page_main_spare_fail:
- kfree(tmp_buf_read_disturbance);
-tmp_buf_read_disturbance_fail:
- kfree(tmp_buf_write_blk_table_data);
-tmp_buf_write_blk_table_data_fail:
- kfree(flags_static_wear_leveling);
-flags_static_wear_leveling_fail:
- kfree(tmp_buf2_read_blk_table);
-tmp_buf2_read_blk_table_fail:
- kfree(tmp_buf1_read_blk_table);
-tmp_buf1_read_blk_table_fail:
- kfree(spare_buf_bt_search_bt_in_block);
-spare_buf_bt_search_bt_in_block_fail:
- kfree(spare_buf_search_bt_in_block);
-spare_buf_search_bt_in_block_fail:
- kfree(tmp_buf_search_bt_in_block);
-tmp_buf_search_bt_in_block_fail:
- kfree(flag_check_blk_table);
-flag_check_blk_table_fail:
- kfree(g_pBTBlocks);
-bt_blocks_fail:
- kfree(g_pTempBuf);
-Temp_buf_fail:
- kfree(cache_l2_blk_buf);
-cache_l2_blk_buf_fail:
- kfree(cache_l2_page_buf);
-cache_l2_page_buf_fail:
- kfree(g_pIPF);
-ipf_fail:
-cache_item_fail:
- i--;
- for (; i >= 0; i--)
- kfree(Cache.array[i].buf);
- kfree(g_pBlockTable);
-block_table_fail:
- printk(KERN_ERR "Failed to kmalloc memory in %s Line %d.\n",
- __FILE__, __LINE__);
-
- return -ENOMEM;
-}
-
-/* .... */
-static int free_memory(void)
-{
- int i;
-
-#if CMD_DMA
- kfree(info.memcp_desc_buf);
- kfree(info.cdma_desc_buf);
- kfree(info.pcmds);
- for (i = COPY_BACK_BUF_NUM - 1; i >= 0; i--)
- kfree(cp_back_buf_copies[i]);
- kfree(g_pBTDelta);
- kfree(g_pBlockTableCopies);
- kfree(g_pBTStartingCopy);
- kfree(g_temp_buf);
- kfree(buf_get_bad_block);
-#endif
- kfree(buf_read_page_spare);
- kfree(buf_write_page_main_spare);
- kfree(buf_read_page_main_spare);
- kfree(tmp_buf_read_disturbance);
- kfree(tmp_buf_write_blk_table_data);
- kfree(flags_static_wear_leveling);
- kfree(tmp_buf2_read_blk_table);
- kfree(tmp_buf1_read_blk_table);
- kfree(spare_buf_bt_search_bt_in_block);
- kfree(spare_buf_search_bt_in_block);
- kfree(tmp_buf_search_bt_in_block);
- kfree(flag_check_blk_table);
- kfree(g_pBTBlocks);
- kfree(g_pTempBuf);
- kfree(g_pIPF);
- for (i = CACHE_ITEM_NUM - 1; i >= 0; i--)
- kfree(Cache.array[i].buf);
- kfree(g_pBlockTable);
-
- return 0;
-}
-
-static void dump_cache_l2_table(void)
-{
- struct list_head *p;
- struct spectra_l2_cache_list *pnd;
- int n;
-
- n = 0;
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- nand_dbg_print(NAND_DBG_WARN, "dump_cache_l2_table node: %d, logical_blk_num: %d\n", n, pnd->logical_blk_num);
-/*
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
- if (pnd->pages_array[i] != MAX_U32_VALUE)
- nand_dbg_print(NAND_DBG_WARN, " pages_array[%d]: 0x%x\n", i, pnd->pages_array[i]);
- }
-*/
- n++;
- }
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Init
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: allocates the memory for cache array,
-* important data structures
-* clears the cache array
-* reads the block table from flash into array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Init(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- Cache.pages_per_item = 1;
- Cache.cache_item_size = 1 * DeviceInfo.wPageDataSize;
-
- if (allocate_memory() != PASS)
- return FAIL;
-
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_CHANS + MAX_DESCS));
-#endif
- ftl_cmd_cnt = 0;
-#endif
-
- if (FTL_Read_Block_Table() != PASS)
- return FAIL;
-
- /* Init the Level2 Cache data structure */
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
- cache_l2.blk_array[i] = MAX_U32_VALUE;
- cache_l2.cur_blk_idx = 0;
- cache_l2.cur_page_num = 0;
- INIT_LIST_HEAD(&cache_l2.table.list);
- cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-
- dump_cache_l2_table();
-
- return 0;
-}
-
-
-#if CMD_DMA
-#if 0
-static void save_blk_table_changes(u16 idx)
-{
- u8 ftl_cmd;
- u32 *pbt = (u32 *)g_pBTStartingCopy;
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u16 id;
- u8 cache_blks;
-
- id = idx - MAX_CHANS;
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed =
- int_cache[id].cache.changed;
- }
-#endif
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
-
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- if (p_BTableChangesDelta->ValidFields == 0x01) {
- g_wBlockTableOffset =
- p_BTableChangesDelta->g_wBlockTableOffset;
- } else if (p_BTableChangesDelta->ValidFields == 0x0C) {
- pbt[p_BTableChangesDelta->BT_Index] =
- p_BTableChangesDelta->BT_Entry_Value;
- debug_boundary_error(((
- p_BTableChangesDelta->BT_Index)),
- DeviceInfo.wDataBlockNum, 0);
- } else if (p_BTableChangesDelta->ValidFields == 0x03) {
- g_wBlockTableOffset =
- p_BTableChangesDelta->g_wBlockTableOffset;
- g_wBlockTableIndex =
- p_BTableChangesDelta->g_wBlockTableIndex;
- } else if (p_BTableChangesDelta->ValidFields == 0x30) {
- g_pWearCounterCopy[p_BTableChangesDelta->WC_Index] =
- p_BTableChangesDelta->WC_Entry_Value;
- } else if ((DeviceInfo.MLCDevice) &&
- (p_BTableChangesDelta->ValidFields == 0xC0)) {
- g_pReadCounterCopy[p_BTableChangesDelta->RC_Index] =
- p_BTableChangesDelta->RC_Entry_Value;
- nand_dbg_print(NAND_DBG_DEBUG,
- "In event status setting read counter "
- "GLOB_ftl_cmd_cnt %u Count %u Index %u\n",
- ftl_cmd,
- p_BTableChangesDelta->RC_Entry_Value,
- (unsigned int)p_BTableChangesDelta->RC_Index);
- } else {
- nand_dbg_print(NAND_DBG_DEBUG,
- "This should never occur \n");
- }
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-}
-
-static void discard_cmds(u16 n)
-{
- u32 *pbt = (u32 *)g_pBTStartingCopy;
- u8 ftl_cmd;
- unsigned long k;
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u8 cache_blks;
- u16 id;
-#endif
-
- if ((PendingCMD[n].CMD == WRITE_MAIN_CMD) ||
- (PendingCMD[n].CMD == WRITE_MAIN_SPARE_CMD)) {
- for (k = 0; k < DeviceInfo.wDataBlockNum; k++) {
- if (PendingCMD[n].Block == (pbt[k] & (~BAD_BLOCK)))
- MARK_BLK_AS_DISCARD(pbt[k]);
- }
- }
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[n].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- id = n - MAX_CHANS;
-
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- if (PendingCMD[n].CMD == MEMCOPY_CMD) {
- if ((cache_start_copy.array[cache_blks].buf <=
- PendingCMD[n].DataDestAddr) &&
- ((cache_start_copy.array[cache_blks].buf +
- Cache.cache_item_size) >
- PendingCMD[n].DataDestAddr)) {
- cache_start_copy.array[cache_blks].address =
- NAND_CACHE_INIT_ADDR;
- cache_start_copy.array[cache_blks].use_cnt =
- 0;
- cache_start_copy.array[cache_blks].changed =
- CLEAR;
- }
- } else {
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed =
- int_cache[id].cache.changed;
- }
- }
-#endif
-}
-
-static void process_cmd_pass(int *first_failed_cmd, u16 idx)
-{
- if (0 == *first_failed_cmd)
- save_blk_table_changes(idx);
- else
- discard_cmds(idx);
-}
-
-static void process_cmd_fail_abort(int *first_failed_cmd,
- u16 idx, int event)
-{
- u32 *pbt = (u32 *)g_pBTStartingCopy;
- u8 ftl_cmd;
- unsigned long i;
- int erase_fail, program_fail;
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- u8 cache_blks;
- u16 id;
-#endif
-
- if (0 == *first_failed_cmd)
- *first_failed_cmd = PendingCMD[idx].SBDCmdIndex;
-
- nand_dbg_print(NAND_DBG_DEBUG, "Uncorrectable error has occurred "
- "while executing %u Command %u accesing Block %u\n",
- (unsigned int)p_BTableChangesDelta->ftl_cmd_cnt,
- PendingCMD[idx].CMD,
- (unsigned int)PendingCMD[idx].Block);
-
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- id = idx - MAX_CHANS;
-
- if (int_cache[id].item != -1) {
- cache_blks = int_cache[id].item;
- if ((PendingCMD[idx].CMD == WRITE_MAIN_CMD)) {
- cache_start_copy.array[cache_blks].address =
- int_cache[id].cache.address;
- cache_start_copy.array[cache_blks].changed = SET;
- } else if ((PendingCMD[idx].CMD == READ_MAIN_CMD)) {
- cache_start_copy.array[cache_blks].address =
- NAND_CACHE_INIT_ADDR;
- cache_start_copy.array[cache_blks].use_cnt = 0;
- cache_start_copy.array[cache_blks].changed =
- CLEAR;
- } else if (PendingCMD[idx].CMD == ERASE_CMD) {
- /* ? */
- } else if (PendingCMD[idx].CMD == MEMCOPY_CMD) {
- /* ? */
- }
- }
-#endif
-
- erase_fail = (event == EVENT_ERASE_FAILURE) &&
- (PendingCMD[idx].CMD == ERASE_CMD);
-
- program_fail = (event == EVENT_PROGRAM_FAILURE) &&
- ((PendingCMD[idx].CMD == WRITE_MAIN_CMD) ||
- (PendingCMD[idx].CMD == WRITE_MAIN_SPARE_CMD));
-
- if (erase_fail || program_fail) {
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (PendingCMD[idx].Block ==
- (pbt[i] & (~BAD_BLOCK)))
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-}
-
-static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-{
- u8 ftl_cmd;
- int cmd_match = 0;
-
- if (p_BTableChangesDelta->ftl_cmd_cnt == PendingCMD[idx].Tag)
- cmd_match = 1;
-
- if (PendingCMD[idx].Status == CMD_PASS) {
- process_cmd_pass(first_failed_cmd, idx);
- } else if ((PendingCMD[idx].Status == CMD_FAIL) ||
- (PendingCMD[idx].Status == CMD_ABORT)) {
- process_cmd_fail_abort(first_failed_cmd, idx, event);
- } else if ((PendingCMD[idx].Status == CMD_NOT_DONE) &&
- PendingCMD[idx].Tag) {
- nand_dbg_print(NAND_DBG_DEBUG,
- " Command no. %hu is not executed\n",
- (unsigned int)PendingCMD[idx].Tag);
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- while (ftl_cmd <= PendingCMD[idx].Tag) {
- p_BTableChangesDelta += 1;
- ftl_cmd = p_BTableChangesDelta->ftl_cmd_cnt;
- }
- }
-}
-#endif
-
-static void process_cmd(int *first_failed_cmd, u16 idx, int event)
-{
- printk(KERN_ERR "temporary workaround function. "
- "Should not be called! \n");
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Event_Status
-* Inputs: none
-* Outputs: Event Code
-* Description: It is called by SBD after hardware interrupt signalling
-* completion of commands chain
-* It does following things
-* get event status from LLD
-* analyze command chain status
-* determine last command executed
-* analyze results
-* rebuild the block table in case of uncorrectable error
-* return event code
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Event_Status(int *first_failed_cmd)
-{
- int event_code = PASS;
- u16 i_P;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- *first_failed_cmd = 0;
-
- event_code = GLOB_LLD_Event_Status();
-
- switch (event_code) {
- case EVENT_PASS:
- nand_dbg_print(NAND_DBG_DEBUG, "Handling EVENT_PASS\n");
- break;
- case EVENT_UNCORRECTABLE_DATA_ERROR:
- nand_dbg_print(NAND_DBG_DEBUG, "Handling Uncorrectable ECC!\n");
- break;
- case EVENT_PROGRAM_FAILURE:
- case EVENT_ERASE_FAILURE:
- nand_dbg_print(NAND_DBG_WARN, "Handling Ugly case. "
- "Event code: 0x%x\n", event_code);
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta;
- for (i_P = MAX_CHANS; i_P < (ftl_cmd_cnt + MAX_CHANS);
- i_P++)
- process_cmd(first_failed_cmd, i_P, event_code);
- memcpy(g_pBlockTable, g_pBTStartingCopy,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memcpy(g_pWearCounter, g_pWearCounterCopy,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memcpy(g_pReadCounter, g_pReadCounterCopy,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&Cache, (void *)&cache_start_copy,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_DESCS + MAX_CHANS));
-#endif
- break;
- default:
- nand_dbg_print(NAND_DBG_WARN,
- "Handling unexpected event code - 0x%x\n",
- event_code);
- event_code = ERR;
- break;
- }
-
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32));
- memcpy(g_pWearCounterCopy, g_pWearCounter,
- DeviceInfo.wDataBlockNum * sizeof(u8));
- if (DeviceInfo.MLCDevice)
- memcpy(g_pReadCounterCopy, g_pReadCounter,
- DeviceInfo.wDataBlockNum * sizeof(u16));
-
- g_pBTDelta_Free = g_pBTDelta;
- ftl_cmd_cnt = 0;
- g_pNextBlockTable = g_pBlockTableCopies;
- cp_back_buf_idx = 0;
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- memcpy((void *)&cache_start_copy, (void *)&Cache,
- sizeof(struct flash_cache_tag));
- memset((void *)&int_cache, -1,
- sizeof(struct flash_cache_delta_list_tag) *
- (MAX_DESCS + MAX_CHANS));
-#endif
-
- return event_code;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: glob_ftl_execute_cmds
-* Inputs: none
-* Outputs: none
-* Description: pass thru to LLD
-***************************************************************/
-u16 glob_ftl_execute_cmds(void)
-{
- nand_dbg_print(NAND_DBG_TRACE,
- "glob_ftl_execute_cmds: ftl_cmd_cnt %u\n",
- (unsigned int)ftl_cmd_cnt);
- g_SBDCmdIndex = 0;
- return glob_lld_execute_cmds();
-}
-
-#endif
-
-#if !CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Read Immediate
-* Inputs: pointer to data
-* address of data
-* Outputs: PASS / FAIL
-* Description: Reads one page of data into RAM directly from flash without
-* using or disturbing cache.It is assumed this function is called
-* with CMD-DMA disabled.
-*****************************************************************/
-int GLOB_FTL_Read_Immediate(u8 *read_data, u64 addr)
-{
- int wResult = FAIL;
- u32 Block;
- u16 Page;
- u32 phy_blk;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- Block = BLK_FROM_ADDR(addr);
- Page = PAGE_FROM_ADDR(addr, Block);
-
- if (!IS_SPARE_BLOCK(Block))
- return FAIL;
-
- phy_blk = pbt[Block];
- wResult = GLOB_LLD_Read_Page_Main(read_data, phy_blk, Page, 1);
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]++;
- if (g_pReadCounter[phy_blk - DeviceInfo.wSpectraStartBlock]
- >= MAX_READ_COUNTER)
- FTL_Read_Disturbance(phy_blk);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-
- return wResult;
-}
-#endif
-
-#ifdef SUPPORT_BIG_ENDIAN
-/*********************************************************************
-* Function: FTL_Invert_Block_Table
-* Inputs: none
-* Outputs: none
-* Description: Re-format the block table in ram based on BIG_ENDIAN and
-* LARGE_BLOCKNUM if necessary
-**********************************************************************/
-static void FTL_Invert_Block_Table(void)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#ifdef SUPPORT_LARGE_BLOCKNUM
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- pbt[i] = INVERTUINT32(pbt[i]);
- g_pWearCounter[i] = INVERTUINT32(g_pWearCounter[i]);
- }
-#else
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- pbt[i] = INVERTUINT16(pbt[i]);
- g_pWearCounter[i] = INVERTUINT16(g_pWearCounter[i]);
- }
-#endif
-}
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-* Description: The flash controller is initialized
-* The flash device is reset
-* Perform a flash READ ID command to confirm that a
-* valid device is attached and active.
-* The DeviceInfo structure gets filled in
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flash_Init(void)
-{
- int status = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- g_SBDCmdIndex = 0;
-
- status = GLOB_LLD_Flash_Init();
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Inputs: none
-* Outputs: PASS=0 / FAIL=0x01 (based on read ID)
-* Description: The flash controller is released
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return GLOB_LLD_Flash_Release();
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Cache_Release
-* Inputs: none
-* Outputs: none
-* Description: release all allocated memory in GLOB_FTL_Init
-* (allocated in GLOB_FTL_Init)
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void GLOB_FTL_Cache_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- free_memory();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_If_Hit
-* Inputs: Page Address
-* Outputs: Block number/UNHIT BLOCK
-* Description: Determines if the addressed page is in cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u16 FTL_Cache_If_Hit(u64 page_addr)
-{
- u16 item;
- u64 addr;
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- item = UNHIT_CACHE_ITEM;
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- addr = Cache.array[i].address;
- if ((page_addr >= addr) &&
- (page_addr < (addr + Cache.cache_item_size))) {
- item = i;
- break;
- }
- }
-
- return item;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Calculate_LRU
-* Inputs: None
-* Outputs: None
-* Description: Calculate the least recently block in a cache and record its
-* index in LRU field.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Calculate_LRU(void)
-{
- u16 i, bCurrentLRU, bTempCount;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bCurrentLRU = 0;
- bTempCount = MAX_WORD_VALUE;
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (Cache.array[i].use_cnt < bTempCount) {
- bCurrentLRU = i;
- bTempCount = Cache.array[i].use_cnt;
- }
- }
-
- Cache.LRU = bCurrentLRU;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read_Page
-* Inputs: pointer to read buffer, logical address and cache item number
-* Outputs: None
-* Description: Read the page from the cached block addressed by blocknumber
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Cache_Read_Page(u8 *data_buf, u64 logic_addr, u16 cache_item)
-{
- u8 *start_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- start_addr = Cache.array[cache_item].buf;
- start_addr += (u32)(((logic_addr - Cache.array[cache_item].address) >>
- DeviceInfo.nBitsInPageDataSize) * DeviceInfo.wPageDataSize);
-
-#if CMD_DMA
- GLOB_LLD_MemCopy_CMD(data_buf, start_addr,
- DeviceInfo.wPageDataSize, 0);
- ftl_cmd_cnt++;
-#else
- memcpy(data_buf, start_addr, DeviceInfo.wPageDataSize);
-#endif
-
- if (Cache.array[cache_item].use_cnt < MAX_WORD_VALUE)
- Cache.array[cache_item].use_cnt++;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read_All
-* Inputs: pointer to read buffer,block address
-* Outputs: PASS=0 / FAIL =1
-* Description: It reads pages in cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Read_All(u8 *pData, u64 phy_addr)
-{
- int wResult = PASS;
- u32 Block;
- u32 lba;
- u16 Page;
- u16 PageCount;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 i;
-
- Block = BLK_FROM_ADDR(phy_addr);
- Page = PAGE_FROM_ADDR(phy_addr, Block);
- PageCount = Cache.pages_per_item;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "%s, Line %d, Function: %s, Block: 0x%x\n",
- __FILE__, __LINE__, __func__, Block);
-
- lba = 0xffffffff;
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if ((pbt[i] & (~BAD_BLOCK)) == Block) {
- lba = i;
- if (IS_SPARE_BLOCK(i) || IS_BAD_BLOCK(i) ||
- IS_DISCARDED_BLOCK(i)) {
- /* Add by yunpeng -2008.12.3 */
-#if CMD_DMA
- GLOB_LLD_MemCopy_CMD(pData, g_temp_buf,
- PageCount * DeviceInfo.wPageDataSize, 0);
- ftl_cmd_cnt++;
-#else
- memset(pData, 0xFF,
- PageCount * DeviceInfo.wPageDataSize);
-#endif
- return wResult;
- } else {
- continue; /* break ?? */
- }
- }
- }
-
- if (0xffffffff == lba)
- printk(KERN_ERR "FTL_Cache_Read_All: Block is not found in BT\n");
-
-#if CMD_DMA
- wResult = GLOB_LLD_Read_Page_Main_cdma(pData, Block, Page,
- PageCount, LLD_CMD_FLAG_MODE_CDMA);
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Counter modified in ftl_cmd_cnt %u"
- " Block %u Counter%u\n",
- ftl_cmd_cnt, (unsigned int)Block,
- g_pReadCounter[Block -
- DeviceInfo.wSpectraStartBlock]);
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->RC_Index =
- Block - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->RC_Entry_Value =
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0xC0;
-
- ftl_cmd_cnt++;
-
- if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
- MAX_READ_COUNTER)
- FTL_Read_Disturbance(Block);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- } else {
- ftl_cmd_cnt++;
- }
-#else
- wResult = GLOB_LLD_Read_Page_Main(pData, Block, Page, PageCount);
- if (wResult == FAIL)
- return wResult;
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock]++;
- if (g_pReadCounter[Block - DeviceInfo.wSpectraStartBlock] >=
- MAX_READ_COUNTER)
- FTL_Read_Disturbance(Block);
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_All
-* Inputs: pointer to cache in sys memory
-* address of free block in flash
-* Outputs: PASS=0 / FAIL=1
-* Description: writes all the pages of the block in cache to flash
-*
-* NOTE:need to make sure this works ok when cache is limited
-* to a partial block. This is where copy-back would be
-* activated. This would require knowing which pages in the
-* cached block are clean/dirty.Right now we only know if
-* the whole block is clean/dirty.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write_All(u8 *pData, u64 blk_addr)
-{
- u16 wResult = PASS;
- u32 Block;
- u16 Page;
- u16 PageCount;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG, "This block %d going to be written "
- "on %d\n", cache_block_to_write,
- (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize));
-
- Block = BLK_FROM_ADDR(blk_addr);
- Page = PAGE_FROM_ADDR(blk_addr, Block);
- PageCount = Cache.pages_per_item;
-
-#if CMD_DMA
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(pData,
- Block, Page, PageCount)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated! "
- "Need Bad Block replacing.\n",
- __FILE__, __LINE__, __func__, Block);
- wResult = FAIL;
- }
- ftl_cmd_cnt++;
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(pData, Block, Page, PageCount)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in %s,"
- " Line %d, Function %s, new Bad Block %d generated!"
- "Need Bad Block replacing.\n",
- __FILE__, __LINE__, __func__, Block);
- wResult = FAIL;
- }
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Copy_Block
-* Inputs: source block address
-* Destination block address
-* Outputs: PASS=0 / FAIL=1
-* Description: used only for static wear leveling to move the block
-* containing static data to new blocks(more worn)
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Copy_Block(u64 old_blk_addr, u64 blk_addr)
-{
- int i, r1, r2, wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i += Cache.pages_per_item) {
- r1 = FTL_Cache_Read_All(g_pTempBuf, old_blk_addr +
- i * DeviceInfo.wPageDataSize);
- r2 = FTL_Cache_Write_All(g_pTempBuf, blk_addr +
- i * DeviceInfo.wPageDataSize);
- if ((ERR == r1) || (FAIL == r2)) {
- wResult = FAIL;
- break;
- }
- }
-
- return wResult;
-}
-
-/* Search the block table to find out the least wear block and then return it */
-static u32 find_least_worn_blk_for_l2_cache(void)
-{
- int i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 least_wear_cnt = MAX_BYTE_VALUE;
- u32 least_wear_blk_idx = MAX_U32_VALUE;
- u32 phy_idx;
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- phy_idx = (u32)((~BAD_BLOCK) & pbt[i]);
- if (phy_idx > DeviceInfo.wSpectraEndBlock)
- printk(KERN_ERR "find_least_worn_blk_for_l2_cache: "
- "Too big phy block num (%d)\n", phy_idx);
- if (g_pWearCounter[phy_idx -DeviceInfo.wSpectraStartBlock] < least_wear_cnt) {
- least_wear_cnt = g_pWearCounter[phy_idx - DeviceInfo.wSpectraStartBlock];
- least_wear_blk_idx = i;
- }
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN,
- "find_least_worn_blk_for_l2_cache: "
- "find block %d with least worn counter (%d)\n",
- least_wear_blk_idx, least_wear_cnt);
-
- return least_wear_blk_idx;
-}
-
-
-
-/* Get blocks for Level2 Cache */
-static int get_l2_cache_blks(void)
-{
- int n;
- u32 blk;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- for (n = 0; n < BLK_NUM_FOR_L2_CACHE; n++) {
- blk = find_least_worn_blk_for_l2_cache();
- if (blk >= DeviceInfo.wDataBlockNum) {
- nand_dbg_print(NAND_DBG_WARN,
- "find_least_worn_blk_for_l2_cache: "
- "No enough free NAND blocks (n: %d) for L2 Cache!\n", n);
- return FAIL;
- }
- /* Tag the free block as discard in block table */
- pbt[blk] = (pbt[blk] & (~BAD_BLOCK)) | DISCARD_BLOCK;
- /* Add the free block to the L2 Cache block array */
- cache_l2.blk_array[n] = pbt[blk] & (~BAD_BLOCK);
- }
-
- return PASS;
-}
-
-static int erase_l2_cache_blocks(void)
-{
- int i, ret = PASS;
- u32 pblk, lblk = BAD_BLOCK;
- u64 addr;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++) {
- pblk = cache_l2.blk_array[i];
-
- /* If the L2 cache block is invalid, then just skip it */
- if (MAX_U32_VALUE == pblk)
- continue;
-
- BUG_ON(pblk > DeviceInfo.wSpectraEndBlock);
-
- addr = (u64)pblk << DeviceInfo.nBitsInBlockDataSize;
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- /* Get logical block number of the erased block */
- lblk = FTL_Get_Block_Index(pblk);
- BUG_ON(BAD_BLOCK == lblk);
- /* Tag it as free in the block table */
- pbt[lblk] &= (u32)(~DISCARD_BLOCK);
- pbt[lblk] |= (u32)(SPARE_BLOCK);
- } else {
- MARK_BLOCK_AS_BAD(pbt[lblk]);
- ret = ERR;
- }
- }
-
- return ret;
-}
-
-/*
- * Merge the valid data page in the L2 cache blocks into NAND.
-*/
-static int flush_l2_cache(void)
-{
- struct list_head *p;
- struct spectra_l2_cache_list *pnd, *tmp_pnd;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 phy_blk, l2_blk;
- u64 addr;
- u16 l2_page;
- int i, ret = PASS;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (list_empty(&cache_l2.table.list)) /* No data to flush */
- return ret;
-
- //dump_cache_l2_table();
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (IS_SPARE_BLOCK(pnd->logical_blk_num) ||
- IS_BAD_BLOCK(pnd->logical_blk_num) ||
- IS_DISCARDED_BLOCK(pnd->logical_blk_num)) {
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
- memset(cache_l2_blk_buf, 0xff, DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize);
- } else {
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d\n", __FILE__, __LINE__);
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- ret = GLOB_LLD_Read_Page_Main(cache_l2_blk_buf,
- phy_blk, 0, DeviceInfo.wPagesPerBlock);
- if (ret == FAIL) {
- printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
- }
- }
-
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++) {
- if (pnd->pages_array[i] != MAX_U32_VALUE) {
- l2_blk = cache_l2.blk_array[(pnd->pages_array[i] >> 16) & 0xffff];
- l2_page = pnd->pages_array[i] & 0xffff;
- ret = GLOB_LLD_Read_Page_Main(cache_l2_page_buf, l2_blk, l2_page, 1);
- if (ret == FAIL) {
- printk(KERN_ERR "Read NAND page fail in %s, Line %d\n", __FILE__, __LINE__);
- }
- memcpy(cache_l2_blk_buf + i * DeviceInfo.wPageDataSize, cache_l2_page_buf, DeviceInfo.wPageDataSize);
- }
- }
-
- /* Find a free block and tag the original block as discarded */
- addr = (u64)pnd->logical_blk_num << DeviceInfo.nBitsInBlockDataSize;
- ret = FTL_Replace_Block(addr);
- if (ret == FAIL) {
- printk(KERN_ERR "FTL_Replace_Block fail in %s, Line %d\n", __FILE__, __LINE__);
- }
-
- /* Write back the updated data into NAND */
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
- nand_dbg_print(NAND_DBG_WARN,
- "Program NAND block %d fail in %s, Line %d\n",
- phy_blk, __FILE__, __LINE__);
- /* This may not be really a bad block. So just tag it as discarded. */
- /* Then it has a chance to be erased when garbage collection. */
- /* If it is really bad, then the erase will fail and it will be marked */
- /* as bad then. Otherwise it will be marked as free and can be used again */
- MARK_BLK_AS_DISCARD(pbt[pnd->logical_blk_num]);
- /* Find another free block and write it again */
- FTL_Replace_Block(addr);
- phy_blk = pbt[pnd->logical_blk_num] & (~BAD_BLOCK);
- if (FAIL == GLOB_LLD_Write_Page_Main(cache_l2_blk_buf, phy_blk, 0, DeviceInfo.wPagesPerBlock)) {
- printk(KERN_ERR "Failed to write back block %d when flush L2 cache."
- "Some data will be lost!\n", phy_blk);
- MARK_BLOCK_AS_BAD(pbt[pnd->logical_blk_num]);
- }
- } else {
- /* tag the new free block as used block */
- pbt[pnd->logical_blk_num] &= (~SPARE_BLOCK);
- }
- }
-
- /* Destroy the L2 Cache table and free the memory of all nodes */
- list_for_each_entry_safe(pnd, tmp_pnd, &cache_l2.table.list, list) {
- list_del(&pnd->list);
- kfree(pnd);
- }
-
- /* Erase discard L2 cache blocks */
- if (erase_l2_cache_blocks() != PASS)
- nand_dbg_print(NAND_DBG_WARN,
- " Erase L2 cache blocks error in %s, Line %d\n",
- __FILE__, __LINE__);
-
- /* Init the Level2 Cache data structure */
- for (i = 0; i < BLK_NUM_FOR_L2_CACHE; i++)
- cache_l2.blk_array[i] = MAX_U32_VALUE;
- cache_l2.cur_blk_idx = 0;
- cache_l2.cur_page_num = 0;
- INIT_LIST_HEAD(&cache_l2.table.list);
- cache_l2.table.logical_blk_num = MAX_U32_VALUE;
-
- return ret;
-}
-
-/*
- * Write back a changed victim cache item to the Level2 Cache
- * and update the L2 Cache table to map the change.
- * If the L2 Cache is full, then start to do the L2 Cache flush.
-*/
-static int write_back_to_l2_cache(u8 *buf, u64 logical_addr)
-{
- u32 logical_blk_num;
- u16 logical_page_num;
- struct list_head *p;
- struct spectra_l2_cache_list *pnd, *pnd_new;
- u32 node_size;
- int i, found;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /*
- * If Level2 Cache table is empty, then it means either:
- * 1. This is the first time that the function called after FTL_init
- * or
- * 2. The Level2 Cache has just been flushed
- *
- * So, 'steal' some free blocks from NAND for L2 Cache using
- * by just mask them as discard in the block table
- */
- if (list_empty(&cache_l2.table.list)) {
- BUG_ON(cache_l2.cur_blk_idx != 0);
- BUG_ON(cache_l2.cur_page_num!= 0);
- BUG_ON(cache_l2.table.logical_blk_num != MAX_U32_VALUE);
- if (FAIL == get_l2_cache_blks()) {
- GLOB_FTL_Garbage_Collection();
- if (FAIL == get_l2_cache_blks()) {
- printk(KERN_ALERT "Fail to get L2 cache blks!\n");
- return FAIL;
- }
- }
- }
-
- logical_blk_num = BLK_FROM_ADDR(logical_addr);
- logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
- BUG_ON(logical_blk_num == MAX_U32_VALUE);
-
- /* Write the cache item data into the current position of L2 Cache */
-#if CMD_DMA
- /*
- * TODO
- */
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(buf,
- cache_l2.blk_array[cache_l2.cur_blk_idx],
- cache_l2.cur_page_num, 1)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
- "%s, Line %d, new Bad Block %d generated!\n",
- __FILE__, __LINE__,
- cache_l2.blk_array[cache_l2.cur_blk_idx]);
-
- /* TODO: tag the current block as bad and try again */
-
- return FAIL;
- }
-#endif
-
- /*
- * Update the L2 Cache table.
- *
- * First seaching in the table to see whether the logical block
- * has been mapped. If not, then kmalloc a new node for the
- * logical block, fill data, and then insert it to the list.
- * Otherwise, just update the mapped node directly.
- */
- found = 0;
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (pnd->logical_blk_num == logical_blk_num) {
- pnd->pages_array[logical_page_num] =
- (cache_l2.cur_blk_idx << 16) |
- cache_l2.cur_page_num;
- found = 1;
- break;
- }
- }
- if (!found) { /* Create new node for the logical block here */
-
- /* The logical pages to physical pages map array is
- * located at the end of struct spectra_l2_cache_list.
- */
- node_size = sizeof(struct spectra_l2_cache_list) +
- sizeof(u32) * DeviceInfo.wPagesPerBlock;
- pnd_new = kmalloc(node_size, GFP_ATOMIC);
- if (!pnd_new) {
- printk(KERN_ERR "Failed to kmalloc in %s Line %d\n",
- __FILE__, __LINE__);
- /*
- * TODO: Need to flush all the L2 cache into NAND ASAP
- * since no memory available here
- */
- }
- pnd_new->logical_blk_num = logical_blk_num;
- for (i = 0; i < DeviceInfo.wPagesPerBlock; i++)
- pnd_new->pages_array[i] = MAX_U32_VALUE;
- pnd_new->pages_array[logical_page_num] =
- (cache_l2.cur_blk_idx << 16) | cache_l2.cur_page_num;
- list_add(&pnd_new->list, &cache_l2.table.list);
- }
-
- /* Increasing the current position pointer of the L2 Cache */
- cache_l2.cur_page_num++;
- if (cache_l2.cur_page_num >= DeviceInfo.wPagesPerBlock) {
- cache_l2.cur_blk_idx++;
- if (cache_l2.cur_blk_idx >= BLK_NUM_FOR_L2_CACHE) {
- /* The L2 Cache is full. Need to flush it now */
- nand_dbg_print(NAND_DBG_WARN,
- "L2 Cache is full, will start to flush it\n");
- flush_l2_cache();
- } else {
- cache_l2.cur_page_num = 0;
- }
- }
-
- return PASS;
-}
-
-/*
- * Search in the Level2 Cache table to find the cache item.
- * If find, read the data from the NAND page of L2 Cache,
- * Otherwise, return FAIL.
- */
-static int search_l2_cache(u8 *buf, u64 logical_addr)
-{
- u32 logical_blk_num;
- u16 logical_page_num;
- struct list_head *p;
- struct spectra_l2_cache_list *pnd;
- u32 tmp = MAX_U32_VALUE;
- u32 phy_blk;
- u16 phy_page;
- int ret = FAIL;
-
- logical_blk_num = BLK_FROM_ADDR(logical_addr);
- logical_page_num = PAGE_FROM_ADDR(logical_addr, logical_blk_num);
-
- list_for_each(p, &cache_l2.table.list) {
- pnd = list_entry(p, struct spectra_l2_cache_list, list);
- if (pnd->logical_blk_num == logical_blk_num) {
- tmp = pnd->pages_array[logical_page_num];
- break;
- }
- }
-
- if (tmp != MAX_U32_VALUE) { /* Found valid map */
- phy_blk = cache_l2.blk_array[(tmp >> 16) & 0xFFFF];
- phy_page = tmp & 0xFFFF;
-#if CMD_DMA
- /* TODO */
-#else
- ret = GLOB_LLD_Read_Page_Main(buf, phy_blk, phy_page, 1);
-#endif
- }
-
- return ret;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write_Page
-* Inputs: Pointer to buffer, page address, cache block number
-* Outputs: PASS=0 / FAIL=1
-* Description: It writes the data in Cache Block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static void FTL_Cache_Write_Page(u8 *pData, u64 page_addr,
- u8 cache_blk, u16 flag)
-{
- u8 *pDest;
- u64 addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- addr = Cache.array[cache_blk].address;
- pDest = Cache.array[cache_blk].buf;
-
- pDest += (unsigned long)(page_addr - addr);
- Cache.array[cache_blk].changed = SET;
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = cache_blk;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[cache_blk].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[cache_blk].changed;
-#endif
- GLOB_LLD_MemCopy_CMD(pDest, pData, DeviceInfo.wPageDataSize, flag);
- ftl_cmd_cnt++;
-#else
- memcpy(pDest, pData, DeviceInfo.wPageDataSize);
-#endif
- if (Cache.array[cache_blk].use_cnt < MAX_WORD_VALUE)
- Cache.array[cache_blk].use_cnt++;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Write
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: It writes least frequently used Cache block to flash if it
-* has been changed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Write(void)
-{
- int i, bResult = PASS;
- u16 bNO, least_count = 0xFFFF;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FTL_Calculate_LRU();
-
- bNO = Cache.LRU;
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: "
- "Least used cache block is %d\n", bNO);
-
- if (Cache.array[bNO].changed != SET)
- return bResult;
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Cache_Write: Cache"
- " Block %d containing logical block %d is dirty\n",
- bNO,
- (u32)(Cache.array[bNO].address >>
- DeviceInfo.nBitsInBlockDataSize));
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = bNO;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[bNO].address;
- int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-#endif
-#endif
- bResult = write_back_to_l2_cache(Cache.array[bNO].buf,
- Cache.array[bNO].address);
- if (bResult != ERR)
- Cache.array[bNO].changed = CLEAR;
-
- least_count = Cache.array[bNO].use_cnt;
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (i == bNO)
- continue;
- if (Cache.array[i].use_cnt > 0)
- Cache.array[i].use_cnt -= least_count;
- }
-
- return bResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Cache_Read
-* Inputs: Page address
-* Outputs: PASS=0 / FAIL=1
-* Description: It reads the block from device in Cache Block
-* Set the LRU count to 1
-* Mark the Cache Block as clean
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Cache_Read(u64 logical_addr)
-{
- u64 item_addr, phy_addr;
- u16 num;
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- num = Cache.LRU; /* The LRU cache item will be overwritten */
-
- item_addr = (u64)GLOB_u64_Div(logical_addr, Cache.cache_item_size) *
- Cache.cache_item_size;
- Cache.array[num].address = item_addr;
- Cache.array[num].use_cnt = 1;
- Cache.array[num].changed = CLEAR;
-
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = num;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[num].address;
- int_cache[ftl_cmd_cnt].cache.changed =
- Cache.array[num].changed;
-#endif
-#endif
- /*
- * Search in L2 Cache. If hit, fill data into L1 Cache item buffer,
- * Otherwise, read it from NAND
- */
- ret = search_l2_cache(Cache.array[num].buf, logical_addr);
- if (PASS == ret) /* Hit in L2 Cache */
- return ret;
-
- /* Compute the physical start address of NAND device according to */
- /* the logical start address of the cache item (LRU cache item) */
- phy_addr = FTL_Get_Physical_Block_Addr(item_addr) +
- GLOB_u64_Remainder(item_addr, 2);
-
- return FTL_Cache_Read_All(Cache.array[num].buf, phy_addr);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Check_Block_Table
-* Inputs: ?
-* Outputs: PASS=0 / FAIL=1
-* Description: It checks the correctness of each block table entry
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Check_Block_Table(int wOldTable)
-{
- u32 i;
- int wResult = PASS;
- u32 blk_idx;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 *pFlag = flag_check_blk_table;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (NULL != pFlag) {
- memset(pFlag, FAIL, DeviceInfo.wDataBlockNum);
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- blk_idx = (u32)(pbt[i] & (~BAD_BLOCK));
-
- /*
- * 20081006/KBV - Changed to pFlag[i] reference
- * to avoid buffer overflow
- */
-
- /*
- * 2008-10-20 Yunpeng Note: This change avoid
- * buffer overflow, but changed function of
- * the code, so it should be re-write later
- */
- if ((blk_idx > DeviceInfo.wSpectraEndBlock) ||
- PASS == pFlag[i]) {
- wResult = FAIL;
- break;
- } else {
- pFlag[i] = PASS;
- }
- }
- }
-
- return wResult;
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_Block_Table
-* Inputs: flasg
-* Outputs: 0=Block Table was updated. No write done. 1=Block write needs to
-* happen. -1 Error
-* Description: It writes the block table
-* Block table always mapped to LBA 0 which inturn mapped
-* to any physical block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Write_Block_Table(int wForce)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- int wSuccess = PASS;
- u32 wTempBlockTableIndex;
- u16 bt_pages, new_bt_offset;
- u8 blockchangeoccured = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus)
- return 0;
-
- if (PASS == wForce) {
- g_wBlockTableOffset =
- (u16)(DeviceInfo.wPagesPerBlock - bt_pages);
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset =
- g_wBlockTableOffset;
- p_BTableChangesDelta->ValidFields = 0x01;
-#endif
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Inside FTL_Write_Block_Table: block %d Page:%d\n",
- g_wBlockTableIndex, g_wBlockTableOffset);
-
- do {
- new_bt_offset = g_wBlockTableOffset + bt_pages + 1;
- if ((0 == (new_bt_offset % DeviceInfo.wPagesPerBlock)) ||
- (new_bt_offset > DeviceInfo.wPagesPerBlock) ||
- (FAIL == wSuccess)) {
- wTempBlockTableIndex = FTL_Replace_Block_Table();
- if (BAD_BLOCK == wTempBlockTableIndex)
- return ERR;
- if (!blockchangeoccured) {
- bt_block_changed = 1;
- blockchangeoccured = 1;
- }
-
- g_wBlockTableIndex = wTempBlockTableIndex;
- g_wBlockTableOffset = 0;
- pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset =
- g_wBlockTableOffset;
- p_BTableChangesDelta->g_wBlockTableIndex =
- g_wBlockTableIndex;
- p_BTableChangesDelta->ValidFields = 0x03;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- BLOCK_TABLE_INDEX;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[BLOCK_TABLE_INDEX];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- }
-
- wSuccess = FTL_Write_Block_Table_Data();
- if (FAIL == wSuccess)
- MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
- } while (FAIL == wSuccess);
-
- g_cBlockTableStatus = CURRENT_BLOCK_TABLE;
-
- return 1;
-}
-
-static int force_format_nand(void)
-{
- u32 i;
-
- /* Force erase the whole unprotected physical partiton of NAND */
- printk(KERN_ALERT "Start to force erase whole NAND device ...\n");
- printk(KERN_ALERT "From phyical block %d to %d\n",
- DeviceInfo.wSpectraStartBlock, DeviceInfo.wSpectraEndBlock);
- for (i = DeviceInfo.wSpectraStartBlock; i <= DeviceInfo.wSpectraEndBlock; i++) {
- if (GLOB_LLD_Erase_Block(i))
- printk(KERN_ERR "Failed to force erase NAND block %d\n", i);
- }
- printk(KERN_ALERT "Force Erase ends. Please reboot the system ...\n");
- while(1);
-
- return PASS;
-}
-
-int GLOB_FTL_Flash_Format(void)
-{
- //return FTL_Format_Flash(1);
- return force_format_nand();
-
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Search_Block_Table_IN_Block
-* Inputs: Block Number
-* Pointer to page
-* Outputs: PASS / FAIL
-* Page contatining the block table
-* Description: It searches the block table in the block
-* passed as an argument.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Search_Block_Table_IN_Block(u32 BT_Block,
- u8 BT_Tag, u16 *Page)
-{
- u16 i, j, k;
- u16 Result = PASS;
- u16 Last_IPF = 0;
- u8 BT_Found = 0;
- u8 *tagarray;
- u8 *tempbuf = tmp_buf_search_bt_in_block;
- u8 *pSpareBuf = spare_buf_search_bt_in_block;
- u8 *pSpareBufBTLastPage = spare_buf_bt_search_bt_in_block;
- u8 bt_flag_last_page = 0xFF;
- u8 search_in_previous_pages = 0;
- u16 bt_pages;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Searching block table in %u block\n",
- (unsigned int)BT_Block);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- for (i = bt_pages; i < DeviceInfo.wPagesPerBlock;
- i += (bt_pages + 1)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Searching last IPF: %d\n", i);
- Result = GLOB_LLD_Read_Page_Main_Polling(tempbuf,
- BT_Block, i, 1);
-
- if (0 == memcmp(tempbuf, g_pIPF, DeviceInfo.wPageDataSize)) {
- if ((i + bt_pages + 1) < DeviceInfo.wPagesPerBlock) {
- continue;
- } else {
- search_in_previous_pages = 1;
- Last_IPF = i;
- }
- }
-
- if (!search_in_previous_pages) {
- if (i != bt_pages) {
- i -= (bt_pages + 1);
- Last_IPF = i;
- }
- }
-
- if (0 == Last_IPF)
- break;
-
- if (!search_in_previous_pages) {
- i = i + 1;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf,
- BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(
- pSpareBufBTLastPage, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag_last_page = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found"
- " in page after IPF "
- "at block %d "
- "page %d\n",
- (int)BT_Block, i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- CURRENT_BLOCK_TABLE;
- break;
- } else {
- Result = FAIL;
- }
- }
- }
- }
-
- if (search_in_previous_pages)
- i = i - bt_pages;
- else
- i = i - (bt_pages + 1);
-
- Result = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %d Page %d",
- (int)BT_Block, i);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
- &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j) {
- bt_flag_last_page = tagarray[k];
- } else {
- Result = FAIL;
- break;
- }
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found "
- "in page prior to IPF "
- "at block %u page %d\n",
- (unsigned int)BT_Block, i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- IN_PROGRESS_BLOCK_TABLE;
- break;
- } else {
- Result = FAIL;
- break;
- }
- }
- }
- }
-
- if (Result == FAIL) {
- if ((Last_IPF > bt_pages) && (i < Last_IPF) && (!BT_Found)) {
- BT_Found = 1;
- *Page = i - (bt_pages + 1);
- }
- if ((Last_IPF == bt_pages) && (i < Last_IPF) && (!BT_Found))
- goto func_return;
- }
-
- if (Last_IPF == 0) {
- i = 0;
- Result = PASS;
- nand_dbg_print(NAND_DBG_DEBUG, "Reading the spare area of "
- "Block %u Page %u", (unsigned int)BT_Block, i);
-
- Result = GLOB_LLD_Read_Page_Spare(pSpareBuf, BT_Block, i, 1);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reading the spare area of Block %u Page %u",
- (unsigned int)BT_Block, i + bt_pages - 1);
- Result = GLOB_LLD_Read_Page_Spare(pSpareBufBTLastPage,
- BT_Block, i + bt_pages - 1, 1);
-
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBuf, &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- k = 0;
- j = FTL_Extract_Block_Table_Tag(pSpareBufBTLastPage,
- &tagarray);
- if (j) {
- for (; k < j; k++) {
- if (tagarray[k] == BT_Tag)
- break;
- }
- }
-
- if (k < j)
- bt_flag_last_page = tagarray[k];
- else
- Result = FAIL;
-
- if (Result == PASS) {
- if (bt_flag == bt_flag_last_page) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is found "
- "in page after IPF at "
- "block %u page %u\n",
- (unsigned int)BT_Block,
- (unsigned int)i);
- BT_Found = 1;
- *Page = i;
- g_cBlockTableStatus =
- CURRENT_BLOCK_TABLE;
- goto func_return;
- } else {
- Result = FAIL;
- }
- }
- }
-
- if (Result == FAIL)
- goto func_return;
- }
-func_return:
- return Result;
-}
-
-u8 *get_blk_table_start_addr(void)
-{
- return g_pBlockTable;
-}
-
-unsigned long get_blk_table_len(void)
-{
- return DeviceInfo.wDataBlockNum * sizeof(u32);
-}
-
-u8 *get_wear_leveling_table_start_addr(void)
-{
- return g_pWearCounter;
-}
-
-unsigned long get_wear_leveling_table_len(void)
-{
- return DeviceInfo.wDataBlockNum * sizeof(u8);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Read_Block_Table
-* Inputs: none
-* Outputs: PASS / FAIL
-* Description: read the flash spare area and find a block containing the
-* most recent block table(having largest block_table_counter).
-* Find the last written Block table in this block.
-* Check the correctness of Block Table
-* If CDMA is enabled, this function is called in
-* polling mode.
-* We don't need to store changes in Block table in this
-* function as it is called only at initialization
-*
-* Note: Currently this function is called at initialization
-* before any read/erase/write command issued to flash so,
-* there is no need to wait for CDMA list to complete as of now
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Read_Block_Table(void)
-{
- u16 i = 0;
- int k, j;
- u8 *tempBuf, *tagarray;
- int wResult = FAIL;
- int status = FAIL;
- u8 block_table_found = 0;
- int search_result;
- u32 Block;
- u16 Page = 0;
- u16 PageCount;
- u16 bt_pages;
- int wBytesCopied = 0, tempvar;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- tempBuf = tmp_buf1_read_blk_table;
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- for (j = DeviceInfo.wSpectraStartBlock;
- j <= (int)DeviceInfo.wSpectraEndBlock;
- j++) {
- status = GLOB_LLD_Read_Page_Spare(tempBuf, j, 0, 1);
- k = 0;
- i = FTL_Extract_Block_Table_Tag(tempBuf, &tagarray);
- if (i) {
- status = GLOB_LLD_Read_Page_Main_Polling(tempBuf,
- j, 0, 1);
- for (; k < i; k++) {
- if (tagarray[k] == tempBuf[3])
- break;
- }
- }
-
- if (k < i)
- k = tagarray[k];
- else
- continue;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block table is contained in Block %d %d\n",
- (unsigned int)j, (unsigned int)k);
-
- if (g_pBTBlocks[k-FIRST_BT_ID] == BTBLOCK_INVAL) {
- g_pBTBlocks[k-FIRST_BT_ID] = j;
- block_table_found = 1;
- } else {
- printk(KERN_ERR "FTL_Read_Block_Table -"
- "This should never happens. "
- "Two block table have same counter %u!\n", k);
- }
- }
-
- if (block_table_found) {
- if (g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL &&
- g_pBTBlocks[LAST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) {
- j = LAST_BT_ID;
- while ((j > FIRST_BT_ID) &&
- (g_pBTBlocks[j - FIRST_BT_ID] != BTBLOCK_INVAL))
- j--;
- if (j == FIRST_BT_ID) {
- j = LAST_BT_ID;
- last_erased = LAST_BT_ID;
- } else {
- last_erased = (u8)j + 1;
- while ((j > FIRST_BT_ID) && (BTBLOCK_INVAL ==
- g_pBTBlocks[j - FIRST_BT_ID]))
- j--;
- }
- } else {
- j = FIRST_BT_ID;
- while (g_pBTBlocks[j - FIRST_BT_ID] == BTBLOCK_INVAL)
- j++;
- last_erased = (u8)j;
- while ((j < LAST_BT_ID) && (BTBLOCK_INVAL !=
- g_pBTBlocks[j - FIRST_BT_ID]))
- j++;
- if (g_pBTBlocks[j-FIRST_BT_ID] == BTBLOCK_INVAL)
- j--;
- }
-
- if (last_erased > j)
- j += (1 + LAST_BT_ID - FIRST_BT_ID);
-
- for (; (j >= last_erased) && (FAIL == wResult); j--) {
- i = (j - FIRST_BT_ID) %
- (1 + LAST_BT_ID - FIRST_BT_ID);
- search_result =
- FTL_Search_Block_Table_IN_Block(g_pBTBlocks[i],
- i + FIRST_BT_ID, &Page);
- if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
- block_table_found = 0;
-
- while ((search_result == PASS) && (FAIL == wResult)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Read_Block_Table:"
- "Block: %u Page: %u "
- "contains block table\n",
- (unsigned int)g_pBTBlocks[i],
- (unsigned int)Page);
-
- tempBuf = tmp_buf2_read_blk_table;
-
- for (k = 0; k < bt_pages; k++) {
- Block = g_pBTBlocks[i];
- PageCount = 1;
-
- status =
- GLOB_LLD_Read_Page_Main_Polling(
- tempBuf, Block, Page, PageCount);
-
- tempvar = k ? 0 : 4;
-
- wBytesCopied +=
- FTL_Copy_Block_Table_From_Flash(
- tempBuf + tempvar,
- DeviceInfo.wPageDataSize - tempvar,
- wBytesCopied);
-
- Page++;
- }
-
- wResult = FTL_Check_Block_Table(FAIL);
- if (FAIL == wResult) {
- block_table_found = 0;
- if (Page > bt_pages)
- Page -= ((bt_pages<<1) + 1);
- else
- search_result = FAIL;
- }
- }
- }
- }
-
- if (PASS == wResult) {
- if (!block_table_found)
- FTL_Execute_SPL_Recovery();
-
- if (g_cBlockTableStatus == IN_PROGRESS_BLOCK_TABLE)
- g_wBlockTableOffset = (u16)Page + 1;
- else
- g_wBlockTableOffset = (u16)Page - bt_pages;
-
- g_wBlockTableIndex = (u32)g_pBTBlocks[i];
-
-#if CMD_DMA
- if (DeviceInfo.MLCDevice)
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32)
- + DeviceInfo.wDataBlockNum * sizeof(u8)
- + DeviceInfo.wDataBlockNum * sizeof(u16));
- else
- memcpy(g_pBTStartingCopy, g_pBlockTable,
- DeviceInfo.wDataBlockNum * sizeof(u32)
- + DeviceInfo.wDataBlockNum * sizeof(u8));
-#endif
- }
-
- if (FAIL == wResult)
- printk(KERN_ERR "Yunpeng - "
- "Can not find valid spectra block table!\n");
-
-#if AUTO_FORMAT_FLASH
- if (FAIL == wResult) {
- nand_dbg_print(NAND_DBG_DEBUG, "doing auto-format\n");
- wResult = FTL_Format_Flash(0);
- }
-#endif
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Page_Num
-* Inputs: Size in bytes
-* Outputs: Size in pages
-* Description: It calculates the pages required for the length passed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Get_Page_Num(u64 length)
-{
- return (u32)((length >> DeviceInfo.nBitsInPageDataSize) +
- (GLOB_u64_Remainder(length , 1) > 0 ? 1 : 0));
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Physical_Block_Addr
-* Inputs: Block Address (byte format)
-* Outputs: Physical address of the block.
-* Description: It translates LBA to PBA by returning address stored
-* at the LBA location in the block table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u64 FTL_Get_Physical_Block_Addr(u64 logical_addr)
-{
- u32 *pbt;
- u64 physical_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- pbt = (u32 *)g_pBlockTable;
- physical_addr = (u64) DeviceInfo.wBlockDataSize *
- (pbt[BLK_FROM_ADDR(logical_addr)] & (~BAD_BLOCK));
-
- return physical_addr;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Get_Block_Index
-* Inputs: Physical Block no.
-* Outputs: Logical block no. /BAD_BLOCK
-* Description: It returns the logical block no. for the PBA passed
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Get_Block_Index(u32 wBlockNum)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
- if (wBlockNum == (pbt[i] & (~BAD_BLOCK)))
- return i;
-
- return BAD_BLOCK;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Wear_Leveling
-* Inputs: none
-* Outputs: PASS=0
-* Description: This is static wear leveling (done by explicit call)
-* do complete static wear leveling
-* do complete garbage collection
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Wear_Leveling(void)
-{
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FTL_Static_Wear_Leveling();
- GLOB_FTL_Garbage_Collection();
-
- return PASS;
-}
-
-static void find_least_most_worn(u8 *chg,
- u32 *least_idx, u8 *least_cnt,
- u32 *most_idx, u8 *most_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 idx;
- u8 cnt;
- int i;
-
- for (i = BLOCK_TABLE_INDEX + 1; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_BAD_BLOCK(i) || PASS == chg[i])
- continue;
-
- idx = (u32) ((~BAD_BLOCK) & pbt[i]);
- cnt = g_pWearCounter[idx - DeviceInfo.wSpectraStartBlock];
-
- if (IS_SPARE_BLOCK(i)) {
- if (cnt > *most_cnt) {
- *most_cnt = cnt;
- *most_idx = idx;
- }
- }
-
- if (IS_DATA_BLOCK(i)) {
- if (cnt < *least_cnt) {
- *least_cnt = cnt;
- *least_idx = idx;
- }
- }
-
- if (PASS == chg[*most_idx] || PASS == chg[*least_idx]) {
- debug_boundary_error(*most_idx,
- DeviceInfo.wDataBlockNum, 0);
- debug_boundary_error(*least_idx,
- DeviceInfo.wDataBlockNum, 0);
- continue;
- }
- }
-}
-
-static int move_blks_for_wear_leveling(u8 *chg,
- u32 *least_idx, u32 *rep_blk_num, int *result)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 rep_blk;
- int j, ret_cp_blk, ret_erase;
- int ret = PASS;
-
- chg[*least_idx] = PASS;
- debug_boundary_error(*least_idx, DeviceInfo.wDataBlockNum, 0);
-
- rep_blk = FTL_Replace_MWBlock();
- if (rep_blk != BAD_BLOCK) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "More than two spare blocks exist so do it\n");
- nand_dbg_print(NAND_DBG_DEBUG, "Block Replaced is %d\n",
- rep_blk);
-
- chg[rep_blk] = PASS;
-
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- for (j = 0; j < RETRY_TIMES; j++) {
- ret_cp_blk = FTL_Copy_Block((u64)(*least_idx) *
- DeviceInfo.wBlockDataSize,
- (u64)rep_blk * DeviceInfo.wBlockDataSize);
- if (FAIL == ret_cp_blk) {
- ret_erase = GLOB_FTL_Block_Erase((u64)rep_blk
- * DeviceInfo.wBlockDataSize);
- if (FAIL == ret_erase)
- MARK_BLOCK_AS_BAD(pbt[rep_blk]);
- } else {
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Copy_Block == OK\n");
- break;
- }
- }
-
- if (j < RETRY_TIMES) {
- u32 tmp;
- u32 old_idx = FTL_Get_Block_Index(*least_idx);
- u32 rep_idx = FTL_Get_Block_Index(rep_blk);
- tmp = (u32)(DISCARD_BLOCK | pbt[old_idx]);
- pbt[old_idx] = (u32)((~SPARE_BLOCK) &
- pbt[rep_idx]);
- pbt[rep_idx] = tmp;
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = old_idx;
- p_BTableChangesDelta->BT_Entry_Value = pbt[old_idx];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = rep_idx;
- p_BTableChangesDelta->BT_Entry_Value = pbt[rep_idx];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- } else {
- pbt[FTL_Get_Block_Index(rep_blk)] |= BAD_BLOCK;
-#if CMD_DMA
- p_BTableChangesDelta = (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index =
- FTL_Get_Block_Index(rep_blk);
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[FTL_Get_Block_Index(rep_blk)];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- *result = FAIL;
- ret = FAIL;
- }
-
- if (((*rep_blk_num)++) > WEAR_LEVELING_BLOCK_NUM)
- ret = FAIL;
- } else {
- printk(KERN_ERR "Less than 3 spare blocks exist so quit\n");
- ret = FAIL;
- }
-
- return ret;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Static_Wear_Leveling
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: This is static wear leveling (done by explicit call)
-* search for most&least used
-* if difference < GATE:
-* update the block table with exhange
-* mark block table in flash as IN_PROGRESS
-* copy flash block
-* the caller should handle GC clean up after calling this function
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Static_Wear_Leveling(void)
-{
- u8 most_worn_cnt;
- u8 least_worn_cnt;
- u32 most_worn_idx;
- u32 least_worn_idx;
- int result = PASS;
- int go_on = PASS;
- u32 replaced_blks = 0;
- u8 *chang_flag = flags_static_wear_leveling;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!chang_flag)
- return FAIL;
-
- memset(chang_flag, FAIL, DeviceInfo.wDataBlockNum);
- while (go_on == PASS) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "starting static wear leveling\n");
- most_worn_cnt = 0;
- least_worn_cnt = 0xFF;
- least_worn_idx = BLOCK_TABLE_INDEX;
- most_worn_idx = BLOCK_TABLE_INDEX;
-
- find_least_most_worn(chang_flag, &least_worn_idx,
- &least_worn_cnt, &most_worn_idx, &most_worn_cnt);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Used and least worn is block %u, whos count is %u\n",
- (unsigned int)least_worn_idx,
- (unsigned int)least_worn_cnt);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Free and most worn is block %u, whos count is %u\n",
- (unsigned int)most_worn_idx,
- (unsigned int)most_worn_cnt);
-
- if ((most_worn_cnt > least_worn_cnt) &&
- (most_worn_cnt - least_worn_cnt > WEAR_LEVELING_GATE))
- go_on = move_blks_for_wear_leveling(chang_flag,
- &least_worn_idx, &replaced_blks, &result);
- else
- go_on = FAIL;
- }
-
- return result;
-}
-
-#if CMD_DMA
-static int do_garbage_collection(u32 discard_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 pba;
- u8 bt_block_erased = 0;
- int i, cnt, ret = FAIL;
- u64 addr;
-
- i = 0;
- while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0) &&
- ((ftl_cmd_cnt + 28) < 256)) {
- if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[i] & DISCARD_BLOCK)) {
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
- pba = BLK_FROM_ADDR(addr);
-
- for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
- if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase BT block %u\n",
- (unsigned int)pba);
- discard_cnt--;
- i++;
- bt_block_erased = 1;
- break;
- }
- }
-
- if (bt_block_erased) {
- bt_block_erased = 0;
- continue;
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
-
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[i] &= (u32)(~DISCARD_BLOCK);
- pbt[i] |= (u32)(SPARE_BLOCK);
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt - 1;
- p_BTableChangesDelta->BT_Index = i;
- p_BTableChangesDelta->BT_Entry_Value = pbt[i];
- p_BTableChangesDelta->ValidFields = 0x0C;
- discard_cnt--;
- ret = PASS;
- } else {
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-
- i++;
- }
-
- return ret;
-}
-
-#else
-static int do_garbage_collection(u32 discard_cnt)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 pba;
- u8 bt_block_erased = 0;
- int i, cnt, ret = FAIL;
- u64 addr;
-
- i = 0;
- while ((i < DeviceInfo.wDataBlockNum) && (discard_cnt > 0)) {
- if (((pbt[i] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[i] & DISCARD_BLOCK)) {
- if (IN_PROGRESS_BLOCK_TABLE != g_cBlockTableStatus) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
- pba = BLK_FROM_ADDR(addr);
-
- for (cnt = FIRST_BT_ID; cnt <= LAST_BT_ID; cnt++) {
- if (pba == g_pBTBlocks[cnt - FIRST_BT_ID]) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase BT block %d\n",
- pba);
- discard_cnt--;
- i++;
- bt_block_erased = 1;
- break;
- }
- }
-
- if (bt_block_erased) {
- bt_block_erased = 0;
- continue;
- }
-
- /* If the discard block is L2 cache block, then just skip it */
- for (cnt = 0; cnt < BLK_NUM_FOR_L2_CACHE; cnt++) {
- if (cache_l2.blk_array[cnt] == pba) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GC will erase L2 cache blk %d\n",
- pba);
- break;
- }
- }
- if (cnt < BLK_NUM_FOR_L2_CACHE) { /* Skip it */
- discard_cnt--;
- i++;
- continue;
- }
-
- addr = FTL_Get_Physical_Block_Addr((u64)i *
- DeviceInfo.wBlockDataSize);
-
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[i] &= (u32)(~DISCARD_BLOCK);
- pbt[i] |= (u32)(SPARE_BLOCK);
- discard_cnt--;
- ret = PASS;
- } else {
- MARK_BLOCK_AS_BAD(pbt[i]);
- }
- }
-
- i++;
- }
-
- return ret;
-}
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Garbage_Collection
-* Inputs: none
-* Outputs: PASS / FAIL (returns the number of un-erased blocks
-* Description: search the block table for all discarded blocks to erase
-* for each discarded block:
-* set the flash block to IN_PROGRESS
-* erase the block
-* update the block table
-* write the block table to flash
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Garbage_Collection(void)
-{
- u32 i;
- u32 wDiscard = 0;
- int wResult = FAIL;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (GC_Called) {
- printk(KERN_ALERT "GLOB_FTL_Garbage_Collection() "
- "has been re-entered! Exit.\n");
- return PASS;
- }
-
- GC_Called = 1;
-
- GLOB_FTL_BT_Garbage_Collection();
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_DISCARDED_BLOCK(i))
- wDiscard++;
- }
-
- if (wDiscard <= 0) {
- GC_Called = 0;
- return wResult;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Found %d discarded blocks\n", wDiscard);
-
- FTL_Write_Block_Table(FAIL);
-
- wResult = do_garbage_collection(wDiscard);
-
- FTL_Write_Block_Table(FAIL);
-
- GC_Called = 0;
-
- return wResult;
-}
-
-
-#if CMD_DMA
-static int do_bt_garbage_collection(void)
-{
- u32 pba, lba;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
- u64 addr;
- int i, ret = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (BT_GC_Called)
- return PASS;
-
- BT_GC_Called = 1;
-
- for (i = last_erased; (i <= LAST_BT_ID) &&
- (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
- FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL) &&
- ((ftl_cmd_cnt + 28)) < 256; i++) {
- pba = pBTBlocksNode[i - FIRST_BT_ID];
- lba = FTL_Get_Block_Index(pba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection: pba %d, lba %d\n",
- pba, lba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Entry: %d", pbt[lba]);
-
- if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[lba] & DISCARD_BLOCK)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection_cdma: "
- "Erasing Block tables present in block %d\n",
- pba);
- addr = FTL_Get_Physical_Block_Addr((u64)lba *
- DeviceInfo.wBlockDataSize);
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[lba] &= (u32)(~DISCARD_BLOCK);
- pbt[lba] |= (u32)(SPARE_BLOCK);
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)
- g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt - 1;
- p_BTableChangesDelta->BT_Index = lba;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[lba];
-
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- ret = PASS;
- pBTBlocksNode[last_erased - FIRST_BT_ID] =
- BTBLOCK_INVAL;
- nand_dbg_print(NAND_DBG_DEBUG,
- "resetting bt entry at index %d "
- "value %d\n", i,
- pBTBlocksNode[i - FIRST_BT_ID]);
- if (last_erased == LAST_BT_ID)
- last_erased = FIRST_BT_ID;
- else
- last_erased++;
- } else {
- MARK_BLOCK_AS_BAD(pbt[lba]);
- }
- }
- }
-
- BT_GC_Called = 0;
-
- return ret;
-}
-
-#else
-static int do_bt_garbage_collection(void)
-{
- u32 pba, lba;
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 *pBTBlocksNode = (u32 *)g_pBTBlocks;
- u64 addr;
- int i, ret = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (BT_GC_Called)
- return PASS;
-
- BT_GC_Called = 1;
-
- for (i = last_erased; (i <= LAST_BT_ID) &&
- (g_pBTBlocks[((i + 2) % (1 + LAST_BT_ID - FIRST_BT_ID)) +
- FIRST_BT_ID - FIRST_BT_ID] != BTBLOCK_INVAL); i++) {
- pba = pBTBlocksNode[i - FIRST_BT_ID];
- lba = FTL_Get_Block_Index(pba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection_cdma: pba %d, lba %d\n",
- pba, lba);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Entry: %d", pbt[lba]);
-
- if (((pbt[lba] & BAD_BLOCK) != BAD_BLOCK) &&
- (pbt[lba] & DISCARD_BLOCK)) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "do_bt_garbage_collection: "
- "Erasing Block tables present in block %d\n",
- pba);
- addr = FTL_Get_Physical_Block_Addr((u64)lba *
- DeviceInfo.wBlockDataSize);
- if (PASS == GLOB_FTL_Block_Erase(addr)) {
- pbt[lba] &= (u32)(~DISCARD_BLOCK);
- pbt[lba] |= (u32)(SPARE_BLOCK);
- ret = PASS;
- pBTBlocksNode[last_erased - FIRST_BT_ID] =
- BTBLOCK_INVAL;
- nand_dbg_print(NAND_DBG_DEBUG,
- "resetting bt entry at index %d "
- "value %d\n", i,
- pBTBlocksNode[i - FIRST_BT_ID]);
- if (last_erased == LAST_BT_ID)
- last_erased = FIRST_BT_ID;
- else
- last_erased++;
- } else {
- MARK_BLOCK_AS_BAD(pbt[lba]);
- }
- }
- }
-
- BT_GC_Called = 0;
-
- return ret;
-}
-
-#endif
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_BT_Garbage_Collection
-* Inputs: none
-* Outputs: PASS / FAIL (returns the number of un-erased blocks
-* Description: Erases discarded blocks containing Block table
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_BT_Garbage_Collection(void)
-{
- return do_bt_garbage_collection();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_OneBlock
-* Inputs: Block number 1
-* Block number 2
-* Outputs: Replaced Block Number
-* Description: Interchange block table entries at wBlockNum and wReplaceNum
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_OneBlock(u32 blk, u32 rep_blk)
-{
- u32 tmp_blk;
- u32 replace_node = BAD_BLOCK;
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (rep_blk != BAD_BLOCK) {
- if (IS_BAD_BLOCK(blk))
- tmp_blk = pbt[blk];
- else
- tmp_blk = DISCARD_BLOCK | (~SPARE_BLOCK & pbt[blk]);
-
- replace_node = (u32) ((~SPARE_BLOCK) & pbt[rep_blk]);
- pbt[blk] = replace_node;
- pbt[rep_blk] = tmp_blk;
-
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[blk];
-
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = rep_blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[rep_blk];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- }
-
- return replace_node;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_Block_Table_Data
-* Inputs: Block table size in pages
-* Outputs: PASS=0 / FAIL=1
-* Description: Write block table data in flash
-* If first page and last page
-* Write data+BT flag
-* else
-* Write data
-* BT flag is a counter. Its value is incremented for block table
-* write in a new Block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Write_Block_Table_Data(void)
-{
- u64 dwBlockTableAddr, pTempAddr;
- u32 Block;
- u16 Page, PageCount;
- u8 *tempBuf = tmp_buf_write_blk_table_data;
- int wBytesCopied;
- u16 bt_pages;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dwBlockTableAddr =
- (u64)((u64)g_wBlockTableIndex * DeviceInfo.wBlockDataSize +
- (u64)g_wBlockTableOffset * DeviceInfo.wPageDataSize);
- pTempAddr = dwBlockTableAddr;
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: "
- "page= %d BlockTableIndex= %d "
- "BlockTableOffset=%d\n", bt_pages,
- g_wBlockTableIndex, g_wBlockTableOffset);
-
- Block = BLK_FROM_ADDR(pTempAddr);
- Page = PAGE_FROM_ADDR(pTempAddr, Block);
- PageCount = 1;
-
- if (bt_block_changed) {
- if (bt_flag == LAST_BT_ID) {
- bt_flag = FIRST_BT_ID;
- g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
- } else if (bt_flag < LAST_BT_ID) {
- bt_flag++;
- g_pBTBlocks[bt_flag - FIRST_BT_ID] = Block;
- }
-
- if ((bt_flag > (LAST_BT_ID-4)) &&
- g_pBTBlocks[FIRST_BT_ID - FIRST_BT_ID] !=
- BTBLOCK_INVAL) {
- bt_block_changed = 0;
- GLOB_FTL_BT_Garbage_Collection();
- }
-
- bt_block_changed = 0;
- nand_dbg_print(NAND_DBG_DEBUG,
- "Block Table Counter is %u Block %u\n",
- bt_flag, (unsigned int)Block);
- }
-
- memset(tempBuf, 0, 3);
- tempBuf[3] = bt_flag;
- wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf + 4,
- DeviceInfo.wPageDataSize - 4, 0);
- memset(&tempBuf[wBytesCopied + 4], 0xff,
- DeviceInfo.wPageSize - (wBytesCopied + 4));
- FTL_Insert_Block_Table_Signature(&tempBuf[DeviceInfo.wPageDataSize],
- bt_flag);
-
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- DeviceInfo.wPageSize * sizeof(u8));
- nand_dbg_print(NAND_DBG_DEBUG, "Writing First Page of Block Table "
- "Block %u Page %u\n", (unsigned int)Block, Page);
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(g_pNextBlockTable,
- Block, Page, 1,
- LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
- nand_dbg_print(NAND_DBG_WARN, "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-
- ftl_cmd_cnt++;
- g_pNextBlockTable += ((DeviceInfo.wPageSize * sizeof(u8)));
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf, Block, Page, 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-#endif
-
- if (bt_pages > 1) {
- PageCount = bt_pages - 1;
- if (PageCount > 1) {
- wBytesCopied += FTL_Copy_Block_Table_To_Flash(tempBuf,
- DeviceInfo.wPageDataSize * (PageCount - 1),
- wBytesCopied);
-
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- (PageCount - 1) * DeviceInfo.wPageDataSize);
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
- g_pNextBlockTable, Block, Page + 1,
- PageCount - 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)Block);
- goto func_return;
- }
-
- ftl_cmd_cnt++;
- g_pNextBlockTable += (PageCount - 1) *
- DeviceInfo.wPageDataSize * sizeof(u8);
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main(tempBuf,
- Block, Page + 1, PageCount - 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)Block);
- goto func_return;
- }
-#endif
- }
-
- wBytesCopied = FTL_Copy_Block_Table_To_Flash(tempBuf,
- DeviceInfo.wPageDataSize, wBytesCopied);
- memset(&tempBuf[wBytesCopied], 0xff,
- DeviceInfo.wPageSize-wBytesCopied);
- FTL_Insert_Block_Table_Signature(
- &tempBuf[DeviceInfo.wPageDataSize], bt_flag);
-#if CMD_DMA
- memcpy(g_pNextBlockTable, tempBuf,
- DeviceInfo.wPageSize * sizeof(u8));
- nand_dbg_print(NAND_DBG_DEBUG,
- "Writing the last Page of Block Table "
- "Block %u Page %u\n",
- (unsigned int)Block, Page + bt_pages - 1);
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare_cdma(
- g_pNextBlockTable, Block, Page + bt_pages - 1, 1,
- LLD_CMD_FLAG_MODE_CDMA |
- LLD_CMD_FLAG_ORDER_BEFORE_REST)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
- ftl_cmd_cnt++;
-#else
- if (FAIL == GLOB_LLD_Write_Page_Main_Spare(tempBuf,
- Block, Page+bt_pages - 1, 1)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, "
- "new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, Block);
- goto func_return;
- }
-#endif
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "FTL_Write_Block_Table_Data: done\n");
-
-func_return:
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_Block_Table
-* Inputs: None
-* Outputs: PASS=0 / FAIL=1
-* Description: Get a new block to write block table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_Block_Table(void)
-{
- u32 blk;
- int gc;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
-
- if ((BAD_BLOCK == blk) && (PASS == gc)) {
- GLOB_FTL_Garbage_Collection();
- blk = FTL_Replace_LWBlock(BLOCK_TABLE_INDEX, &gc);
- }
- if (BAD_BLOCK == blk)
- printk(KERN_ERR "%s, %s: There is no spare block. "
- "It should never happen\n",
- __FILE__, __func__);
-
- nand_dbg_print(NAND_DBG_DEBUG, "New Block table Block is %d\n", blk);
-
- return blk;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_LWBlock
-* Inputs: Block number
-* Pointer to Garbage Collect flag
-* Outputs:
-* Description: Determine the least weared block by traversing
-* block table
-* Set Garbage collection to be called if number of spare
-* block is less than Free Block Gate count
-* Change Block table entry to map least worn block for current
-* operation
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_LWBlock(u32 wBlockNum, int *pGarbageCollect)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 wLeastWornCounter = 0xFF;
- u32 wLeastWornIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
- u32 wDiscardBlockNum = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (IS_SPARE_BLOCK(wBlockNum)) {
- *pGarbageCollect = FAIL;
- pbt[wBlockNum] = (u32)(pbt[wBlockNum] & (~SPARE_BLOCK));
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = (u32)(wBlockNum);
- p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
- p_BTableChangesDelta->ValidFields = 0x0C;
-#endif
- return pbt[wBlockNum];
- }
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_DISCARDED_BLOCK(i))
- wDiscardBlockNum++;
-
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex = (u32)((~BAD_BLOCK) & pbt[i]);
- if (wPhysicalIndex > DeviceInfo.wSpectraEndBlock)
- printk(KERN_ERR "FTL_Replace_LWBlock: "
- "This should never occur!\n");
- if (g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] <
- wLeastWornCounter) {
- wLeastWornCounter =
- g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wLeastWornIndex = i;
- }
- wSpareBlockNum++;
- }
- }
-
- nand_dbg_print(NAND_DBG_WARN,
- "FTL_Replace_LWBlock: Least Worn Counter %d\n",
- (int)wLeastWornCounter);
-
- if ((wDiscardBlockNum >= NUM_FREE_BLOCKS_GATE) ||
- (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE))
- *pGarbageCollect = PASS;
- else
- *pGarbageCollect = FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "FTL_Replace_LWBlock: Discarded Blocks %u Spare"
- " Blocks %u\n",
- (unsigned int)wDiscardBlockNum,
- (unsigned int)wSpareBlockNum);
-
- return FTL_Replace_OneBlock(wBlockNum, wLeastWornIndex);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_MWBlock
-* Inputs: None
-* Outputs: most worn spare block no./BAD_BLOCK
-* Description: It finds most worn spare block.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static u32 FTL_Replace_MWBlock(void)
-{
- u32 i;
- u32 *pbt = (u32 *)g_pBlockTable;
- u8 wMostWornCounter = 0;
- u32 wMostWornIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex = (u32)((~SPARE_BLOCK) & pbt[i]);
- if (g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] >
- wMostWornCounter) {
- wMostWornCounter =
- g_pWearCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wMostWornIndex = wPhysicalIndex;
- }
- wSpareBlockNum++;
- }
- }
-
- if (wSpareBlockNum <= 2)
- return BAD_BLOCK;
-
- return wMostWornIndex;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Replace_Block
-* Inputs: Block Address
-* Outputs: PASS=0 / FAIL=1
-* Description: If block specified by blk_addr parameter is not free,
-* replace it with the least worn block.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Replace_Block(u64 blk_addr)
-{
- u32 current_blk = BLK_FROM_ADDR(blk_addr);
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
- int GarbageCollect = FAIL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (IS_SPARE_BLOCK(current_blk)) {
- pbt[current_blk] = (~SPARE_BLOCK) & pbt[current_blk];
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = current_blk;
- p_BTableChangesDelta->BT_Entry_Value = pbt[current_blk];
- p_BTableChangesDelta->ValidFields = 0x0C ;
-#endif
- return wResult;
- }
-
- FTL_Replace_LWBlock(current_blk, &GarbageCollect);
-
- if (PASS == GarbageCollect)
- wResult = GLOB_FTL_Garbage_Collection();
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Is_BadBlock
-* Inputs: block number to test
-* Outputs: PASS (block is BAD) / FAIL (block is not bad)
-* Description: test if this block number is flagged as bad
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Is_BadBlock(u32 wBlockNum)
-{
- u32 *pbt = (u32 *)g_pBlockTable;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (wBlockNum >= DeviceInfo.wSpectraStartBlock
- && BAD_BLOCK == (pbt[wBlockNum] & BAD_BLOCK))
- return PASS;
- else
- return FAIL;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Flush_Cache
-* Inputs: none
-* Outputs: PASS=0 / FAIL=1
-* Description: flush all the cache blocks to flash
-* if a cache block is not dirty, don't do anything with it
-* else, write the block and update the block table
-* Note: This function should be called at shutdown/power down.
-* to write important data into device
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Flush_Cache(void)
-{
- int i, ret;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < CACHE_ITEM_NUM; i++) {
- if (SET == Cache.array[i].changed) {
-#if CMD_DMA
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
- int_cache[ftl_cmd_cnt].item = i;
- int_cache[ftl_cmd_cnt].cache.address =
- Cache.array[i].address;
- int_cache[ftl_cmd_cnt].cache.changed = CLEAR;
-#endif
-#endif
- ret = write_back_to_l2_cache(Cache.array[i].buf, Cache.array[i].address);
- if (PASS == ret) {
- Cache.array[i].changed = CLEAR;
- } else {
- printk(KERN_ALERT "Failed when write back to L2 cache!\n");
- /* TODO - How to handle this? */
- }
- }
- }
-
- flush_l2_cache();
-
- return FTL_Write_Block_Table(FAIL);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Page_Read
-* Inputs: pointer to data
-* logical address of data (u64 is LBA * Bytes/Page)
-* Outputs: PASS=0 / FAIL=1
-* Description: reads a page of data into RAM from the cache
-* if the data is not already in cache, read from flash to cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Page_Read(u8 *data, u64 logical_addr)
-{
- u16 cache_item;
- int res = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG, "GLOB_FTL_Page_Read - "
- "page_addr: %llu\n", logical_addr);
-
- cache_item = FTL_Cache_If_Hit(logical_addr);
-
- if (UNHIT_CACHE_ITEM == cache_item) {
- nand_dbg_print(NAND_DBG_DEBUG,
- "GLOB_FTL_Page_Read: Cache not hit\n");
- res = FTL_Cache_Write();
- if (ERR == FTL_Cache_Read(logical_addr))
- res = ERR;
- cache_item = Cache.LRU;
- }
-
- FTL_Cache_Read_Page(data, logical_addr, cache_item);
-
- return res;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Page_Write
-* Inputs: pointer to data
-* address of data (ADDRESSTYPE is LBA * Bytes/Page)
-* Outputs: PASS=0 / FAIL=1
-* Description: writes a page of data from RAM to the cache
-* if the data is not already in cache, write back the
-* least recently used block and read the addressed block
-* from flash to cache
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Page_Write(u8 *pData, u64 dwPageAddr)
-{
- u16 cache_blk;
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "GLOB_FTL_Page_Write - "
- "dwPageAddr: %llu\n", dwPageAddr);
-
- cache_blk = FTL_Cache_If_Hit(dwPageAddr);
-
- if (UNHIT_CACHE_ITEM == cache_blk) {
- wResult = FTL_Cache_Write();
- if (IS_BAD_BLOCK(BLK_FROM_ADDR(dwPageAddr))) {
- wResult = FTL_Replace_Block(dwPageAddr);
- pbt[BLK_FROM_ADDR(dwPageAddr)] |= SPARE_BLOCK;
- if (wResult == FAIL)
- return FAIL;
- }
- if (ERR == FTL_Cache_Read(dwPageAddr))
- wResult = ERR;
- cache_blk = Cache.LRU;
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
- } else {
-#if CMD_DMA
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk,
- LLD_CMD_FLAG_ORDER_BEFORE_REST);
-#else
- FTL_Cache_Write_Page(pData, dwPageAddr, cache_blk, 0);
-#endif
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: GLOB_FTL_Block_Erase
-* Inputs: address of block to erase (now in byte format, should change to
-* block format)
-* Outputs: PASS=0 / FAIL=1
-* Description: erases the specified block
-* increments the erase count
-* If erase count reaches its upper limit,call function to
-* do the adjustment as per the relative erase count values
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int GLOB_FTL_Block_Erase(u64 blk_addr)
-{
- int status;
- u32 BlkIdx;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- BlkIdx = (u32)(blk_addr >> DeviceInfo.nBitsInBlockDataSize);
-
- if (BlkIdx < DeviceInfo.wSpectraStartBlock) {
- printk(KERN_ERR "GLOB_FTL_Block_Erase: "
- "This should never occur\n");
- return FAIL;
- }
-
-#if CMD_DMA
- status = GLOB_LLD_Erase_Block_cdma(BlkIdx, LLD_CMD_FLAG_MODE_CDMA);
- if (status == FAIL)
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, BlkIdx);
-#else
- status = GLOB_LLD_Erase_Block(BlkIdx);
- if (status == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__, BlkIdx);
- return status;
- }
-#endif
-
- if (DeviceInfo.MLCDevice) {
- g_pReadCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] = 0;
- if (g_cBlockTableStatus != IN_PROGRESS_BLOCK_TABLE) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
- }
-
- g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock]++;
-
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index =
- BlkIdx - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-
- if (DeviceInfo.MLCDevice) {
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->RC_Index =
- BlkIdx - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->RC_Entry_Value =
- g_pReadCounter[BlkIdx -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0xC0;
- }
-
- ftl_cmd_cnt++;
-#endif
-
- if (g_pWearCounter[BlkIdx - DeviceInfo.wSpectraStartBlock] == 0xFE)
- FTL_Adjust_Relative_Erase_Count(BlkIdx);
-
- return status;
-}
-
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Adjust_Relative_Erase_Count
-* Inputs: index to block that was just incremented and is at the max
-* Outputs: PASS=0 / FAIL=1
-* Description: If any erase counts at MAX, adjusts erase count of every
-* block by subtracting least worn
-* counter from counter value of every entry in wear table
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-static int FTL_Adjust_Relative_Erase_Count(u32 Index_of_MAX)
-{
- u8 wLeastWornCounter = MAX_BYTE_VALUE;
- u8 wWearCounter;
- u32 i, wWearIndex;
- u32 *pbt = (u32 *)g_pBlockTable;
- int wResult = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_BAD_BLOCK(i))
- continue;
- wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
-
- if ((wWearIndex - DeviceInfo.wSpectraStartBlock) < 0)
- printk(KERN_ERR "FTL_Adjust_Relative_Erase_Count:"
- "This should never occur\n");
- wWearCounter = g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock];
- if (wWearCounter < wLeastWornCounter)
- wLeastWornCounter = wWearCounter;
- }
-
- if (wLeastWornCounter == 0) {
- nand_dbg_print(NAND_DBG_WARN,
- "Adjusting Wear Levelling Counters: Special Case\n");
- g_pWearCounter[Index_of_MAX -
- DeviceInfo.wSpectraStartBlock]--;
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index =
- Index_of_MAX - DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[Index_of_MAX -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-#endif
- FTL_Static_Wear_Leveling();
- } else {
- for (i = 0; i < DeviceInfo.wDataBlockNum; i++)
- if (!IS_BAD_BLOCK(i)) {
- wWearIndex = (u32)(pbt[i] & (~BAD_BLOCK));
- g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock] =
- (u8)(g_pWearCounter
- [wWearIndex -
- DeviceInfo.wSpectraStartBlock] -
- wLeastWornCounter);
-#if CMD_DMA
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free +=
- sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->WC_Index = wWearIndex -
- DeviceInfo.wSpectraStartBlock;
- p_BTableChangesDelta->WC_Entry_Value =
- g_pWearCounter[wWearIndex -
- DeviceInfo.wSpectraStartBlock];
- p_BTableChangesDelta->ValidFields = 0x30;
-#endif
- }
- }
-
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Write_IN_Progress_Block_Table_Page
-* Inputs: None
-* Outputs: None
-* Description: It writes in-progress flag page to the page next to
-* block table
-***********************************************************************/
-static int FTL_Write_IN_Progress_Block_Table_Page(void)
-{
- int wResult = PASS;
- u16 bt_pages;
- u16 dwIPFPageAddr;
-#if CMD_DMA
-#else
- u32 *pbt = (u32 *)g_pBlockTable;
- u32 wTempBlockTableIndex;
-#endif
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- bt_pages = FTL_Get_Block_Table_Flash_Size_Pages();
-
- dwIPFPageAddr = g_wBlockTableOffset + bt_pages;
-
- nand_dbg_print(NAND_DBG_DEBUG, "Writing IPF at "
- "Block %d Page %d\n",
- g_wBlockTableIndex, dwIPFPageAddr);
-
-#if CMD_DMA
- wResult = GLOB_LLD_Write_Page_Main_Spare_cdma(g_pIPF,
- g_wBlockTableIndex, dwIPFPageAddr, 1,
- LLD_CMD_FLAG_MODE_CDMA | LLD_CMD_FLAG_ORDER_BEFORE_REST);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- g_wBlockTableIndex);
- }
- g_wBlockTableOffset = dwIPFPageAddr + 1;
- p_BTableChangesDelta = (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
- p_BTableChangesDelta->ftl_cmd_cnt = ftl_cmd_cnt;
- p_BTableChangesDelta->g_wBlockTableOffset = g_wBlockTableOffset;
- p_BTableChangesDelta->ValidFields = 0x01;
- ftl_cmd_cnt++;
-#else
- wResult = GLOB_LLD_Write_Page_Main_Spare(g_pIPF,
- g_wBlockTableIndex, dwIPFPageAddr, 1);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in %s, Line %d, "
- "Function: %s, new Bad Block %d generated!\n",
- __FILE__, __LINE__, __func__,
- (int)g_wBlockTableIndex);
- MARK_BLOCK_AS_BAD(pbt[BLOCK_TABLE_INDEX]);
- wTempBlockTableIndex = FTL_Replace_Block_Table();
- bt_block_changed = 1;
- if (BAD_BLOCK == wTempBlockTableIndex)
- return ERR;
- g_wBlockTableIndex = wTempBlockTableIndex;
- g_wBlockTableOffset = 0;
- /* Block table tag is '00'. Means it's used one */
- pbt[BLOCK_TABLE_INDEX] = g_wBlockTableIndex;
- return FAIL;
- }
- g_wBlockTableOffset = dwIPFPageAddr + 1;
-#endif
- return wResult;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: FTL_Read_Disturbance
-* Inputs: block address
-* Outputs: PASS=0 / FAIL=1
-* Description: used to handle read disturbance. Data in block that
-* reaches its read limit is moved to new block
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int FTL_Read_Disturbance(u32 blk_addr)
-{
- int wResult = FAIL;
- u32 *pbt = (u32 *) g_pBlockTable;
- u32 dwOldBlockAddr = blk_addr;
- u32 wBlockNum;
- u32 i;
- u32 wLeastReadCounter = 0xFFFF;
- u32 wLeastReadIndex = BAD_BLOCK;
- u32 wSpareBlockNum = 0;
- u32 wTempNode;
- u32 wReplacedNode;
- u8 *g_pTempBuf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#if CMD_DMA
- g_pTempBuf = cp_back_buf_copies[cp_back_buf_idx];
- cp_back_buf_idx++;
- if (cp_back_buf_idx > COPY_BACK_BUF_NUM) {
- printk(KERN_ERR "cp_back_buf_copies overflow! Exit."
- "Maybe too many pending commands in your CDMA chain.\n");
- return FAIL;
- }
-#else
- g_pTempBuf = tmp_buf_read_disturbance;
-#endif
-
- wBlockNum = FTL_Get_Block_Index(blk_addr);
-
- do {
- /* This is a bug.Here 'i' should be logical block number
- * and start from 1 (0 is reserved for block table).
- * Have fixed it. - Yunpeng 2008. 12. 19
- */
- for (i = 1; i < DeviceInfo.wDataBlockNum; i++) {
- if (IS_SPARE_BLOCK(i)) {
- u32 wPhysicalIndex =
- (u32)((~SPARE_BLOCK) & pbt[i]);
- if (g_pReadCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock] <
- wLeastReadCounter) {
- wLeastReadCounter =
- g_pReadCounter[wPhysicalIndex -
- DeviceInfo.wSpectraStartBlock];
- wLeastReadIndex = i;
- }
- wSpareBlockNum++;
- }
- }
-
- if (wSpareBlockNum <= NUM_FREE_BLOCKS_GATE) {
- wResult = GLOB_FTL_Garbage_Collection();
- if (PASS == wResult)
- continue;
- else
- break;
- } else {
- wTempNode = (u32)(DISCARD_BLOCK | pbt[wBlockNum]);
- wReplacedNode = (u32)((~SPARE_BLOCK) &
- pbt[wLeastReadIndex]);
-#if CMD_DMA
- pbt[wBlockNum] = wReplacedNode;
- pbt[wLeastReadIndex] = wTempNode;
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = wBlockNum;
- p_BTableChangesDelta->BT_Entry_Value = pbt[wBlockNum];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- p_BTableChangesDelta =
- (struct BTableChangesDelta *)g_pBTDelta_Free;
- g_pBTDelta_Free += sizeof(struct BTableChangesDelta);
-
- p_BTableChangesDelta->ftl_cmd_cnt =
- ftl_cmd_cnt;
- p_BTableChangesDelta->BT_Index = wLeastReadIndex;
- p_BTableChangesDelta->BT_Entry_Value =
- pbt[wLeastReadIndex];
- p_BTableChangesDelta->ValidFields = 0x0C;
-
- wResult = GLOB_LLD_Read_Page_Main_cdma(g_pTempBuf,
- dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock,
- LLD_CMD_FLAG_MODE_CDMA);
- if (wResult == FAIL)
- return wResult;
-
- ftl_cmd_cnt++;
-
- if (wResult != FAIL) {
- if (FAIL == GLOB_LLD_Write_Page_Main_cdma(
- g_pTempBuf, pbt[wBlockNum], 0,
- DeviceInfo.wPagesPerBlock)) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)pbt[wBlockNum]);
- wResult = FAIL;
- MARK_BLOCK_AS_BAD(pbt[wBlockNum]);
- }
- ftl_cmd_cnt++;
- }
-#else
- wResult = GLOB_LLD_Read_Page_Main(g_pTempBuf,
- dwOldBlockAddr, 0, DeviceInfo.wPagesPerBlock);
- if (wResult == FAIL)
- return wResult;
-
- if (wResult != FAIL) {
- /* This is a bug. At this time, pbt[wBlockNum]
- is still the physical address of
- discard block, and should not be write.
- Have fixed it as below.
- -- Yunpeng 2008.12.19
- */
- wResult = GLOB_LLD_Write_Page_Main(g_pTempBuf,
- wReplacedNode, 0,
- DeviceInfo.wPagesPerBlock);
- if (wResult == FAIL) {
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Program fail in "
- "%s, Line %d, Function: %s, "
- "new Bad Block %d "
- "generated!\n",
- __FILE__, __LINE__, __func__,
- (int)wReplacedNode);
- MARK_BLOCK_AS_BAD(wReplacedNode);
- } else {
- pbt[wBlockNum] = wReplacedNode;
- pbt[wLeastReadIndex] = wTempNode;
- }
- }
-
- if ((wResult == PASS) && (g_cBlockTableStatus !=
- IN_PROGRESS_BLOCK_TABLE)) {
- g_cBlockTableStatus = IN_PROGRESS_BLOCK_TABLE;
- FTL_Write_IN_Progress_Block_Table_Page();
- }
-#endif
- }
- } while (wResult != PASS)
- ;
-
-#if CMD_DMA
- /* ... */
-#endif
-
- return wResult;
-}
-
diff --git a/drivers/staging/spectra/flash.h b/drivers/staging/spectra/flash.h
deleted file mode 100644
index e59cf4e..0000000
--- a/drivers/staging/spectra/flash.h
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _FLASH_INTERFACE_
-#define _FLASH_INTERFACE_
-
-#include "ffsport.h"
-#include "spectraswconfig.h"
-
-#define MAX_BYTE_VALUE 0xFF
-#define MAX_WORD_VALUE 0xFFFF
-#define MAX_U32_VALUE 0xFFFFFFFF
-
-#define MAX_BLOCKNODE_VALUE 0xFFFFFF
-#define DISCARD_BLOCK 0x800000
-#define SPARE_BLOCK 0x400000
-#define BAD_BLOCK 0xC00000
-
-#define UNHIT_CACHE_ITEM 0xFFFF
-
-#define NAND_CACHE_INIT_ADDR 0xffffffffffffffffULL
-
-#define IN_PROGRESS_BLOCK_TABLE 0x00
-#define CURRENT_BLOCK_TABLE 0x01
-
-#define BTSIG_OFFSET (0)
-#define BTSIG_BYTES (5)
-#define BTSIG_DELTA (3)
-
-#define MAX_READ_COUNTER 0x2710
-
-#define FIRST_BT_ID (1)
-#define LAST_BT_ID (254)
-#define BTBLOCK_INVAL (u32)(0xFFFFFFFF)
-
-struct device_info_tag {
- u16 wDeviceMaker;
- u16 wDeviceID;
- u32 wDeviceType;
- u32 wSpectraStartBlock;
- u32 wSpectraEndBlock;
- u32 wTotalBlocks;
- u16 wPagesPerBlock;
- u16 wPageSize;
- u16 wPageDataSize;
- u16 wPageSpareSize;
- u16 wNumPageSpareFlag;
- u16 wECCBytesPerSector;
- u32 wBlockSize;
- u32 wBlockDataSize;
- u32 wDataBlockNum;
- u8 bPlaneNum;
- u16 wDeviceMainAreaSize;
- u16 wDeviceSpareAreaSize;
- u16 wDevicesConnected;
- u16 wDeviceWidth;
- u16 wHWRevision;
- u16 wHWFeatures;
-
- u16 wONFIDevFeatures;
- u16 wONFIOptCommands;
- u16 wONFITimingMode;
- u16 wONFIPgmCacheTimingMode;
-
- u16 MLCDevice;
- u16 wSpareSkipBytes;
-
- u8 nBitsInPageNumber;
- u8 nBitsInPageDataSize;
- u8 nBitsInBlockDataSize;
-};
-
-extern struct device_info_tag DeviceInfo;
-
-/* Cache item format */
-struct flash_cache_item_tag {
- u64 address;
- u16 use_cnt;
- u16 changed;
- u8 *buf;
-};
-
-struct flash_cache_tag {
- u32 cache_item_size; /* Size in bytes of each cache item */
- u16 pages_per_item; /* How many NAND pages in each cache item */
- u16 LRU; /* No. of the least recently used cache item */
- struct flash_cache_item_tag array[CACHE_ITEM_NUM];
-};
-
-/*
- *Data structure for each list node of the management table
- * used for the Level 2 Cache. Each node maps one logical NAND block.
- */
-struct spectra_l2_cache_list {
- struct list_head list;
- u32 logical_blk_num; /* Logical block number */
- u32 pages_array[]; /* Page map array of this logical block.
- * Array index is the logical block number,
- * and for every item of this arry:
- * high 16 bit is index of the L2 cache block num,
- * low 16 bit is the phy page num
- * of the above L2 cache block.
- * This array will be kmalloc during run time.
- */
-};
-
-struct spectra_l2_cache_info {
- u32 blk_array[BLK_NUM_FOR_L2_CACHE];
- u16 cur_blk_idx; /* idx to the phy block number of current using */
- u16 cur_page_num; /* pages number of current using */
- struct spectra_l2_cache_list table; /* First node of the table */
-};
-
-#define RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE 1
-
-#if RESTORE_CACHE_ON_CDMA_CHAIN_FAILURE
-struct flash_cache_mod_item_tag {
- u64 address;
- u8 changed;
-};
-
-struct flash_cache_delta_list_tag {
- u8 item; /* used cache item */
- struct flash_cache_mod_item_tag cache;
-};
-#endif
-
-extern struct flash_cache_tag Cache;
-
-extern u8 *buf_read_page_main_spare;
-extern u8 *buf_write_page_main_spare;
-extern u8 *buf_read_page_spare;
-extern u8 *buf_get_bad_block;
-extern u8 *cdma_desc_buf;
-extern u8 *memcp_desc_buf;
-
-/* struture used for IndentfyDevice function */
-struct spectra_indentfy_dev_tag {
- u32 NumBlocks;
- u16 PagesPerBlock;
- u16 PageDataSize;
- u16 wECCBytesPerSector;
- u32 wDataBlockNum;
-};
-
-int GLOB_FTL_Flash_Init(void);
-int GLOB_FTL_Flash_Release(void);
-/*void GLOB_FTL_Erase_Flash(void);*/
-int GLOB_FTL_Block_Erase(u64 block_addr);
-int GLOB_FTL_Is_BadBlock(u32 block_num);
-int GLOB_FTL_IdentifyDevice(struct spectra_indentfy_dev_tag *dev_data);
-int GLOB_FTL_Event_Status(int *);
-u16 glob_ftl_execute_cmds(void);
-
-/*int FTL_Read_Disturbance(ADDRESSTYPE dwBlockAddr);*/
-int FTL_Read_Disturbance(u32 dwBlockAddr);
-
-/*Flash r/w based on cache*/
-int GLOB_FTL_Page_Read(u8 *read_data, u64 page_addr);
-int GLOB_FTL_Page_Write(u8 *write_data, u64 page_addr);
-int GLOB_FTL_Wear_Leveling(void);
-int GLOB_FTL_Flash_Format(void);
-int GLOB_FTL_Init(void);
-int GLOB_FTL_Flush_Cache(void);
-int GLOB_FTL_Garbage_Collection(void);
-int GLOB_FTL_BT_Garbage_Collection(void);
-void GLOB_FTL_Cache_Release(void);
-u8 *get_blk_table_start_addr(void);
-u8 *get_wear_leveling_table_start_addr(void);
-unsigned long get_blk_table_len(void);
-unsigned long get_wear_leveling_table_len(void);
-
-#if DEBUG_BNDRY
-void debug_boundary_lineno_error(int chnl, int limit, int no, int lineno,
- char *filename);
-#define debug_boundary_error(chnl, limit, no) debug_boundary_lineno_error(chnl,\
- limit, no, __LINE__, __FILE__)
-#else
-#define debug_boundary_error(chnl, limit, no) ;
-#endif
-
-#endif /*_FLASH_INTERFACE_*/
diff --git a/drivers/staging/spectra/lld.c b/drivers/staging/spectra/lld.c
deleted file mode 100644
index 5c3b976..0000000
--- a/drivers/staging/spectra/lld.c
+++ /dev/null
@@ -1,339 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "spectraswconfig.h"
-#include "ffsport.h"
-#include "ffsdefs.h"
-#include "lld.h"
-#include "lld_nand.h"
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_EMU /* vector all the LLD calls to the LLD_EMU code */
-#include "lld_emu.h"
-#include "lld_cdma.h"
-
-/* common functions: */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return emu_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return emu_Read_Device_ID();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return emu_Flash_Release();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return emu_Flash_Init();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return emu_Erase_Block(block_add);
-}
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Read_Page_Main(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return emu_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return emu_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return emu_Read_Page_Main_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return emu_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return emu_Get_Bad_Block(block);
-}
-
-#endif /* FLASH_EMU */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_MTD /* vector all the LLD calls to the LLD_MTD code */
-#include "lld_mtd.h"
-#include "lld_cdma.h"
-
-/* common functions: */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return mtd_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return mtd_Read_Device_ID();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return mtd_Flash_Release();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return mtd_Flash_Init();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return mtd_Erase_Block(block_add);
-}
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Read_Page_Main(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return mtd_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return mtd_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return mtd_Read_Page_Main_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return mtd_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return mtd_Get_Bad_Block(block);
-}
-
-#endif /* FLASH_MTD */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-#if FLASH_NAND /* vector all the LLD calls to the NAND controller code */
-#include "lld_nand.h"
-#include "lld_cdma.h"
-#include "flash.h"
-
-/* common functions for LLD_NAND */
-void GLOB_LLD_ECC_Control(int enable)
-{
- NAND_ECC_Ctrl(enable);
-}
-
-/* common functions for LLD_NAND */
-u16 GLOB_LLD_Flash_Reset(void)
-{
- return NAND_Flash_Reset();
-}
-
-u16 GLOB_LLD_Read_Device_ID(void)
-{
- return NAND_Read_Device_ID();
-}
-
-u16 GLOB_LLD_UnlockArrayAll(void)
-{
- return NAND_UnlockArrayAll();
-}
-
-u16 GLOB_LLD_Flash_Init(void)
-{
- return NAND_Flash_Init();
-}
-
-int GLOB_LLD_Flash_Release(void)
-{
- return nand_release_spectra();
-}
-
-u16 GLOB_LLD_Erase_Block(u32 block_add)
-{
- return NAND_Erase_Block(block_add);
-}
-
-
-u16 GLOB_LLD_Write_Page_Main(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Write_Page_Main(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- if (page_count == 1) /* Using polling to improve read speed */
- return NAND_Read_Page_Main_Polling(read_data, block, page, 1);
- else
- return NAND_Read_Page_Main(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- return NAND_Read_Page_Main_Polling(read_data,
- block, page, page_count);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 Page, u16 PageCount)
-{
- return NAND_Write_Page_Main_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Write_Page_Spare(u8 *write_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Write_Page_Spare(write_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data, u32 block,
- u16 page, u16 page_count)
-{
- return NAND_Read_Page_Main_Spare(read_data, block, page, page_count);
-}
-
-u16 GLOB_LLD_Read_Page_Spare(u8 *read_data, u32 block, u16 Page,
- u16 PageCount)
-{
- return NAND_Read_Page_Spare(read_data, block, Page, PageCount);
-}
-
-u16 GLOB_LLD_Get_Bad_Block(u32 block)
-{
- return NAND_Get_Bad_Block(block);
-}
-
-#if CMD_DMA
-u16 GLOB_LLD_Event_Status(void)
-{
- return CDMA_Event_Status();
-}
-
-u16 glob_lld_execute_cmds(void)
-{
- return CDMA_Execute_CMDs();
-}
-
-u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src,
- u32 ByteCount, u16 flag)
-{
- /* Replace the hardware memcopy with software memcpy function */
- if (CDMA_Execute_CMDs())
- return FAIL;
- memcpy(dest, src, ByteCount);
- return PASS;
-
- /* return CDMA_MemCopy_CMD(dest, src, ByteCount, flag); */
-}
-
-u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags)
-{
- return CDMA_Data_CMD(ERASE_CMD, 0, block, 0, 0, flags);
-}
-
-u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data, u32 block, u16 page, u16 count)
-{
- return CDMA_Data_CMD(WRITE_MAIN_CMD, data, block, page, count, 0);
-}
-
-u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data, u32 block, u16 page,
- u16 count, u16 flags)
-{
- return CDMA_Data_CMD(READ_MAIN_CMD, data, block, page, count, flags);
-}
-
-u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data, u32 block, u16 page,
- u16 count, u16 flags)
-{
- return CDMA_Data_CMD(WRITE_MAIN_SPARE_CMD,
- data, block, page, count, flags);
-}
-
-u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count)
-{
- return CDMA_Data_CMD(READ_MAIN_SPARE_CMD, data, block, page, count,
- LLD_CMD_FLAG_MODE_CDMA);
-}
-
-#endif /* CMD_DMA */
-#endif /* FLASH_NAND */
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-/* end of LLD.c */
diff --git a/drivers/staging/spectra/lld.h b/drivers/staging/spectra/lld.h
deleted file mode 100644
index d3738e0..0000000
--- a/drivers/staging/spectra/lld.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-
-
-#ifndef _LLD_
-#define _LLD_
-
-#include "ffsport.h"
-#include "spectraswconfig.h"
-#include "flash.h"
-
-#define GOOD_BLOCK 0
-#define DEFECTIVE_BLOCK 1
-#define READ_ERROR 2
-
-#define CLK_X 5
-#define CLK_MULTI 4
-
-/* Typedefs */
-
-/* prototypes: API for LLD */
-/* Currently, Write_Page_Main
- * MemCopy
- * Read_Page_Main_Spare
- * do not have flag because they were not implemented prior to this
- * They are not being added to keep changes to a minimum for now.
- * Currently, they are not required (only reqd for Wr_P_M_S.)
- * Later on, these NEED to be changed.
- */
-
-extern void GLOB_LLD_ECC_Control(int enable);
-
-extern u16 GLOB_LLD_Flash_Reset(void);
-
-extern u16 GLOB_LLD_Read_Device_ID(void);
-
-extern u16 GLOB_LLD_UnlockArrayAll(void);
-
-extern u16 GLOB_LLD_Flash_Init(void);
-
-extern int GLOB_LLD_Flash_Release(void);
-
-extern u16 GLOB_LLD_Erase_Block(u32 block_add);
-
-extern u16 GLOB_LLD_Write_Page_Main(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Read_Page_Main(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Write_Page_Main_Spare(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Write_Page_Spare(u8 *write_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Read_Page_Main_Spare(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-
-extern u16 GLOB_LLD_Read_Page_Spare(u8 *read_data,
- u32 block, u16 Page, u16 PageCount);
-
-extern u16 GLOB_LLD_Get_Bad_Block(u32 block);
-
-extern u16 GLOB_LLD_Event_Status(void);
-
-extern u16 GLOB_LLD_MemCopy_CMD(u8 *dest, u8 *src, u32 ByteCount, u16 flag);
-
-extern u16 glob_lld_execute_cmds(void);
-
-extern u16 GLOB_LLD_Erase_Block_cdma(u32 block, u16 flags);
-
-extern u16 GLOB_LLD_Write_Page_Main_cdma(u8 *data,
- u32 block, u16 page, u16 count);
-
-extern u16 GLOB_LLD_Read_Page_Main_cdma(u8 *data,
- u32 block, u16 page, u16 count, u16 flags);
-
-extern u16 GLOB_LLD_Write_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count, u16 flags);
-
-extern u16 GLOB_LLD_Read_Page_Main_Spare_cdma(u8 *data,
- u32 block, u16 page, u16 count);
-
-#define LLD_CMD_FLAG_ORDER_BEFORE_REST (0x1)
-#define LLD_CMD_FLAG_MODE_CDMA (0x8)
-
-
-#endif /*_LLD_ */
-
-
diff --git a/drivers/staging/spectra/lld_cdma.c b/drivers/staging/spectra/lld_cdma.c
deleted file mode 100644
index c6e7610..0000000
--- a/drivers/staging/spectra/lld_cdma.c
+++ /dev/null
@@ -1,910 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-
-#include "spectraswconfig.h"
-#include "lld.h"
-#include "lld_nand.h"
-#include "lld_cdma.h"
-#include "lld_emu.h"
-#include "flash.h"
-#include "nand_regs.h"
-
-#define MAX_PENDING_CMDS 4
-#define MODE_02 (0x2 << 26)
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Data_Cmd
-* Inputs: cmd code (aligned for hw)
-* data: pointer to source or destination
-* block: block address
-* page: page address
-* num: num pages to transfer
-* Outputs: PASS
-* Description: This function takes the parameters and puts them
-* into the "pending commands" array.
-* It does not parse or validate the parameters.
-* The array index is same as the tag.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags)
-{
- u8 bank;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (0 == cmd)
- nand_dbg_print(NAND_DBG_DEBUG,
- "%s, Line %d, Illegal cmd (0)\n", __FILE__, __LINE__);
-
- /* If a command of another bank comes, then first execute */
- /* pending commands of the current bank, then set the new */
- /* bank as current bank */
- bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
- if (bank != info.flash_bank) {
- nand_dbg_print(NAND_DBG_WARN,
- "Will access new bank. old bank: %d, new bank: %d\n",
- info.flash_bank, bank);
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- info.flash_bank = bank;
- }
-
- info.pcmds[info.pcmds_num].CMD = cmd;
- info.pcmds[info.pcmds_num].DataAddr = data;
- info.pcmds[info.pcmds_num].Block = block;
- info.pcmds[info.pcmds_num].Page = page;
- info.pcmds[info.pcmds_num].PageCount = num;
- info.pcmds[info.pcmds_num].DataDestAddr = 0;
- info.pcmds[info.pcmds_num].DataSrcAddr = 0;
- info.pcmds[info.pcmds_num].MemCopyByteCnt = 0;
- info.pcmds[info.pcmds_num].Flags = flags;
- info.pcmds[info.pcmds_num].Status = 0xB0B;
-
- switch (cmd) {
- case WRITE_MAIN_SPARE_CMD:
- Conv_Main_Spare_Data_Log2Phy_Format(data, num);
- break;
- case WRITE_SPARE_CMD:
- Conv_Spare_Data_Log2Phy_Format(data);
- break;
- default:
- break;
- }
-
- info.pcmds_num++;
-
- if (info.pcmds_num >= MAX_PENDING_CMDS) {
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_MemCopy_CMD
-* Inputs: dest: pointer to destination
-* src: pointer to source
-* count: num bytes to transfer
-* Outputs: PASS
-* Description: This function takes the parameters and puts them
-* into the "pending commands" array.
-* It does not parse or validate the parameters.
-* The array index is same as the tag.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- info.pcmds[info.pcmds_num].CMD = MEMCOPY_CMD;
- info.pcmds[info.pcmds_num].DataAddr = 0;
- info.pcmds[info.pcmds_num].Block = 0;
- info.pcmds[info.pcmds_num].Page = 0;
- info.pcmds[info.pcmds_num].PageCount = 0;
- info.pcmds[info.pcmds_num].DataDestAddr = dest;
- info.pcmds[info.pcmds_num].DataSrcAddr = src;
- info.pcmds[info.pcmds_num].MemCopyByteCnt = byte_cnt;
- info.pcmds[info.pcmds_num].Flags = flags;
- info.pcmds[info.pcmds_num].Status = 0xB0B;
-
- info.pcmds_num++;
-
- if (info.pcmds_num >= MAX_PENDING_CMDS) {
- if (CDMA_Execute_CMDs()) {
- printk(KERN_ERR "CDMA_Execute_CMDs fail!\n");
- return FAIL;
- }
- }
-
- return PASS;
-}
-
-#if 0
-/* Prints the PendingCMDs array */
-void print_pending_cmds(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < info.pcmds_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- switch (info.pcmds[i].CMD) {
- case ERASE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Erase Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case WRITE_MAIN_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Write Main Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case WRITE_MAIN_SPARE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Write Main Spare Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case READ_MAIN_SPARE_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Main Spare Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case READ_MAIN_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Read Main Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case MEMCOPY_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Memcopy Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- case DUMMY_CMD:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Dummy Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- default:
- nand_dbg_print(NAND_DBG_DEBUG,
- "Illegal Command (0x%x)\n",
- info.pcmds[i].CMD);
- break;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "DataAddr: 0x%x\n",
- (u32)info.pcmds[i].DataAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "Block: %d\n",
- info.pcmds[i].Block);
- nand_dbg_print(NAND_DBG_DEBUG, "Page: %d\n",
- info.pcmds[i].Page);
- nand_dbg_print(NAND_DBG_DEBUG, "PageCount: %d\n",
- info.pcmds[i].PageCount);
- nand_dbg_print(NAND_DBG_DEBUG, "DataDestAddr: 0x%x\n",
- (u32)info.pcmds[i].DataDestAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "DataSrcAddr: 0x%x\n",
- (u32)info.pcmds[i].DataSrcAddr);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyByteCnt: %d\n",
- info.pcmds[i].MemCopyByteCnt);
- nand_dbg_print(NAND_DBG_DEBUG, "Flags: 0x%x\n",
- info.pcmds[i].Flags);
- nand_dbg_print(NAND_DBG_DEBUG, "Status: 0x%x\n",
- info.pcmds[i].Status);
- }
-}
-
-/* Print the CDMA descriptors */
-void print_cdma_descriptors(void)
-{
- struct cdma_descriptor *pc;
- int i;
-
- pc = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump cdma descriptors:\n");
-
- for (i = 0; i < info.cdma_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- nand_dbg_print(NAND_DBG_DEBUG,
- "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
- pc[i].NxtPointerHi, pc[i].NxtPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "FlashPointerHi: 0x%x, FlashPointerLo: 0x%x\n",
- pc[i].FlashPointerHi, pc[i].FlashPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG, "CommandType: 0x%x\n",
- pc[i].CommandType);
- nand_dbg_print(NAND_DBG_DEBUG,
- "MemAddrHi: 0x%x, MemAddrLo: 0x%x\n",
- pc[i].MemAddrHi, pc[i].MemAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG, "CommandFlags: 0x%x\n",
- pc[i].CommandFlags);
- nand_dbg_print(NAND_DBG_DEBUG, "Channel: %d, Status: 0x%x\n",
- pc[i].Channel, pc[i].Status);
- nand_dbg_print(NAND_DBG_DEBUG,
- "MemCopyPointerHi: 0x%x, MemCopyPointerLo: 0x%x\n",
- pc[i].MemCopyPointerHi, pc[i].MemCopyPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Reserved12: 0x%x, Reserved13: 0x%x, "
- "Reserved14: 0x%x, pcmd: %d\n",
- pc[i].Reserved12, pc[i].Reserved13,
- pc[i].Reserved14, pc[i].pcmd);
- }
-}
-
-/* Print the Memory copy descriptors */
-static void print_memcp_descriptors(void)
-{
- struct memcpy_descriptor *pm;
- int i;
-
- pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-
- nand_dbg_print(NAND_DBG_DEBUG, "\nWill dump mem_cpy descriptors:\n");
-
- for (i = 0; i < info.cdma_num; i++) {
- nand_dbg_print(NAND_DBG_DEBUG, "\ni: %d\n", i);
- nand_dbg_print(NAND_DBG_DEBUG,
- "NxtPointerHi: 0x%x, NxtPointerLo: 0x%x\n",
- pm[i].NxtPointerHi, pm[i].NxtPointerLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "SrcAddrHi: 0x%x, SrcAddrLo: 0x%x\n",
- pm[i].SrcAddrHi, pm[i].SrcAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG,
- "DestAddrHi: 0x%x, DestAddrLo: 0x%x\n",
- pm[i].DestAddrHi, pm[i].DestAddrLo);
- nand_dbg_print(NAND_DBG_DEBUG, "XferSize: %d\n",
- pm[i].XferSize);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyFlags: 0x%x\n",
- pm[i].MemCopyFlags);
- nand_dbg_print(NAND_DBG_DEBUG, "MemCopyStatus: %d\n",
- pm[i].MemCopyStatus);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved9: 0x%x\n",
- pm[i].reserved9);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved10: 0x%x\n",
- pm[i].reserved10);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved11: 0x%x\n",
- pm[i].reserved11);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved12: 0x%x\n",
- pm[i].reserved12);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved13: 0x%x\n",
- pm[i].reserved13);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved14: 0x%x\n",
- pm[i].reserved14);
- nand_dbg_print(NAND_DBG_DEBUG, "reserved15: 0x%x\n",
- pm[i].reserved15);
- }
-}
-#endif
-
-/* Reset cdma_descriptor chain to 0 */
-static void reset_cdma_desc(int i)
-{
- struct cdma_descriptor *ptr;
-
- BUG_ON(i >= MAX_DESCS);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- ptr[i].NxtPointerHi = 0;
- ptr[i].NxtPointerLo = 0;
- ptr[i].FlashPointerHi = 0;
- ptr[i].FlashPointerLo = 0;
- ptr[i].CommandType = 0;
- ptr[i].MemAddrHi = 0;
- ptr[i].MemAddrLo = 0;
- ptr[i].CommandFlags = 0;
- ptr[i].Channel = 0;
- ptr[i].Status = 0;
- ptr[i].MemCopyPointerHi = 0;
- ptr[i].MemCopyPointerLo = 0;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_UpdateEventStatus
-* Inputs: none
-* Outputs: none
-* Description: This function update the event status of all the channels
-* when an error condition is reported.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void CDMA_UpdateEventStatus(void)
-{
- int i, j, active_chan;
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (j = 0; j < info.cdma_num; j++) {
- /* Check for the descriptor with failure */
- if ((ptr[j].Status & CMD_DMA_DESC_FAIL))
- break;
-
- }
-
- /* All the previous cmd's status for this channel must be good */
- for (i = 0; i < j; i++) {
- if (ptr[i].pcmd != 0xff)
- info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
- }
-
- /* Abort the channel with type 0 reset command. It resets the */
- /* selected channel after the descriptor completes the flash */
- /* operation and status has been updated for the descriptor. */
- /* Memory Copy and Sync associated with this descriptor will */
- /* not be executed */
- active_chan = ioread32(FlashReg + CHNL_ACTIVE);
- if ((active_chan & (1 << info.flash_bank)) == (1 << info.flash_bank)) {
- iowrite32(MODE_02 | (0 << 4), FlashMem); /* Type 0 reset */
- iowrite32((0xF << 4) | info.flash_bank, FlashMem + 0x10);
- } else { /* Should not reached here */
- printk(KERN_ERR "Error! Used bank is not set in"
- " reg CHNL_ACTIVE\n");
- }
-}
-
-static void cdma_trans(u16 chan)
-{
- u32 addr;
-
- addr = info.cdma_desc;
-
- iowrite32(MODE_10 | (chan << 24), FlashMem);
- iowrite32((1 << 7) | chan, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & (addr >> 16)) << 8),
- FlashMem);
- iowrite32((1 << 7) | (1 << 4) | 0, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24) | ((0x0FFFF & addr) << 8), FlashMem);
- iowrite32((1 << 7) | (1 << 5) | 0, FlashMem + 0x10);
-
- iowrite32(MODE_10 | (chan << 24), FlashMem);
- iowrite32((1 << 7) | (1 << 5) | (1 << 4) | 0, FlashMem + 0x10);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs (for use with CMD_DMA)
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: Build the SDMA chain(s) by making one CMD-DMA descriptor
-* for each pending command, start the CDMA engine, and return.
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Execute_CMDs(void)
-{
- int i, ret;
- u64 flash_add;
- u32 ptr;
- dma_addr_t map_addr, next_ptr;
- u16 status = PASS;
- u16 tmp_c;
- struct cdma_descriptor *pc;
- struct memcpy_descriptor *pm;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /* No pending cmds to execute, just exit */
- if (0 == info.pcmds_num) {
- nand_dbg_print(NAND_DBG_TRACE,
- "No pending cmds to execute. Just exit.\n");
- return PASS;
- }
-
- for (i = 0; i < MAX_DESCS; i++)
- reset_cdma_desc(i);
-
- pc = (struct cdma_descriptor *)info.cdma_desc_buf;
- pm = (struct memcpy_descriptor *)info.memcp_desc_buf;
-
- info.cdma_desc = virt_to_bus(info.cdma_desc_buf);
- info.memcp_desc = virt_to_bus(info.memcp_desc_buf);
- next_ptr = info.cdma_desc;
- info.cdma_num = 0;
-
- for (i = 0; i < info.pcmds_num; i++) {
- if (info.pcmds[i].Block >= DeviceInfo.wTotalBlocks) {
- info.pcmds[i].Status = CMD_NOT_DONE;
- continue;
- }
-
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-
- /* Use the Block offset within a bank */
- tmp_c = info.pcmds[i].Block /
- (DeviceInfo.wTotalBlocks / totalUsedBanks);
- flash_add = (u64)(info.pcmds[i].Block - tmp_c *
- (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
- DeviceInfo.wBlockDataSize +
- (u64)(info.pcmds[i].Page) *
- DeviceInfo.wPageDataSize;
-
- ptr = MODE_10 | (info.flash_bank << 24) |
- (u32)GLOB_u64_Div(flash_add,
- DeviceInfo.wPageDataSize);
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-
- if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
- (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
- /* Descriptor to set Main+Spare Access Mode */
- pc[info.cdma_num].CommandType = 0x43;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- reset_cdma_desc(info.cdma_num);
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
- }
-
- switch (info.pcmds[i].CMD) {
- case ERASE_CMD:
- pc[info.cdma_num].CommandType = 1;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- break;
-
- case WRITE_MAIN_CMD:
- pc[info.cdma_num].CommandType =
- 0x2100 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case READ_MAIN_CMD:
- pc[info.cdma_num].CommandType =
- 0x2000 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case WRITE_MAIN_SPARE_CMD:
- pc[info.cdma_num].CommandType =
- 0x2100 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case READ_MAIN_SPARE_CMD:
- pc[info.cdma_num].CommandType =
- 0x2000 | info.pcmds[i].PageCount;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- map_addr = virt_to_bus(info.pcmds[i].DataAddr);
- pc[info.cdma_num].MemAddrHi = map_addr >> 16;
- pc[info.cdma_num].MemAddrLo = map_addr & 0xffff;
- break;
-
- case MEMCOPY_CMD:
- pc[info.cdma_num].CommandType = 0xFFFF; /* NOP cmd */
- /* Set bit 11 to let the CDMA engine continue to */
- /* execute only after it has finished processing */
- /* the memcopy descriptor. */
- /* Also set bit 10 and bit 9 to 1 */
- pc[info.cdma_num].CommandFlags = 0x0E40;
- map_addr = info.memcp_desc + info.cdma_num *
- sizeof(struct memcpy_descriptor);
- pc[info.cdma_num].MemCopyPointerHi = map_addr >> 16;
- pc[info.cdma_num].MemCopyPointerLo = map_addr & 0xffff;
-
- pm[info.cdma_num].NxtPointerHi = 0;
- pm[info.cdma_num].NxtPointerLo = 0;
-
- map_addr = virt_to_bus(info.pcmds[i].DataSrcAddr);
- pm[info.cdma_num].SrcAddrHi = map_addr >> 16;
- pm[info.cdma_num].SrcAddrLo = map_addr & 0xffff;
- map_addr = virt_to_bus(info.pcmds[i].DataDestAddr);
- pm[info.cdma_num].DestAddrHi = map_addr >> 16;
- pm[info.cdma_num].DestAddrLo = map_addr & 0xffff;
-
- pm[info.cdma_num].XferSize =
- info.pcmds[i].MemCopyByteCnt;
- pm[info.cdma_num].MemCopyFlags =
- (0 << 15 | 0 << 14 | 27 << 8 | 0x40);
- pm[info.cdma_num].MemCopyStatus = 0;
- break;
-
- case DUMMY_CMD:
- default:
- pc[info.cdma_num].CommandType = 0XFFFF;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
- break;
- }
-
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- if ((info.pcmds[i].CMD == WRITE_MAIN_SPARE_CMD) ||
- (info.pcmds[i].CMD == READ_MAIN_SPARE_CMD)) {
- /* Descriptor to set back Main Area Access Mode */
- reset_cdma_desc(info.cdma_num);
- next_ptr += sizeof(struct cdma_descriptor);
- pc[info.cdma_num].NxtPointerHi = next_ptr >> 16;
- pc[info.cdma_num].NxtPointerLo = next_ptr & 0xffff;
-
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
-
- pc[info.cdma_num].CommandType = 0x42;
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (1 << 9) | (0 << 8) | 0x40;
- pc[info.cdma_num].MemAddrHi = 0;
- pc[info.cdma_num].MemAddrLo = 0;
-
- pc[info.cdma_num].Channel = 0;
- pc[info.cdma_num].Status = 0;
- pc[info.cdma_num].pcmd = i;
-
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
- }
- }
-
- /* Add a dummy descriptor at end of the CDMA chain */
- reset_cdma_desc(info.cdma_num);
- ptr = MODE_10 | (info.flash_bank << 24);
- pc[info.cdma_num].FlashPointerHi = ptr >> 16;
- pc[info.cdma_num].FlashPointerLo = ptr & 0xffff;
- pc[info.cdma_num].CommandType = 0xFFFF; /* NOP command */
- /* Set Command Flags for the last CDMA descriptor: */
- /* set Continue bit (bit 9) to 0 and Interrupt bit (bit 8) to 1 */
- pc[info.cdma_num].CommandFlags =
- (0 << 10) | (0 << 9) | (1 << 8) | 0x40;
- pc[info.cdma_num].pcmd = 0xff; /* Set it to an illegal value */
- info.cdma_num++;
- BUG_ON(info.cdma_num >= MAX_DESCS);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- /* Wait for DMA to be enabled before issuing the next command */
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- cdma_trans(info.flash_bank);
-
- ret = wait_for_completion_timeout(&info.complete, 50 * HZ);
- if (!ret)
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = info.ret;
-
- info.pcmds_num = 0; /* Clear the pending cmds number to 0 */
-
- return status;
-}
-
-int is_cdma_interrupt(void)
-{
- u32 ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma;
- u32 int_en_mask;
- u32 cdma_int_en_mask;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- /* Set the global Enable masks for only those interrupts
- * that are supported */
- cdma_int_en_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
- DMA_INTR__DESC_COMP_CHANNEL1 |
- DMA_INTR__DESC_COMP_CHANNEL2 |
- DMA_INTR__DESC_COMP_CHANNEL3 |
- DMA_INTR__MEMCOPY_DESC_COMP);
-
- int_en_mask = (INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL);
-
- ints_b0 = ioread32(FlashReg + INTR_STATUS0) & int_en_mask;
- ints_b1 = ioread32(FlashReg + INTR_STATUS1) & int_en_mask;
- ints_b2 = ioread32(FlashReg + INTR_STATUS2) & int_en_mask;
- ints_b3 = ioread32(FlashReg + INTR_STATUS3) & int_en_mask;
- ints_cdma = ioread32(FlashReg + DMA_INTR) & cdma_int_en_mask;
-
- nand_dbg_print(NAND_DBG_WARN, "ints_bank0 to ints_bank3: "
- "0x%x, 0x%x, 0x%x, 0x%x, ints_cdma: 0x%x\n",
- ints_b0, ints_b1, ints_b2, ints_b3, ints_cdma);
-
- if (ints_b0 || ints_b1 || ints_b2 || ints_b3 || ints_cdma) {
- return 1;
- } else {
- iowrite32(ints_b0, FlashReg + INTR_STATUS0);
- iowrite32(ints_b1, FlashReg + INTR_STATUS1);
- iowrite32(ints_b2, FlashReg + INTR_STATUS2);
- iowrite32(ints_b3, FlashReg + INTR_STATUS3);
- nand_dbg_print(NAND_DBG_DEBUG,
- "Not a NAND controller interrupt! Ignore it.\n");
- return 0;
- }
-}
-
-static void update_event_status(void)
-{
- int i;
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (i = 0; i < info.cdma_num; i++) {
- if (ptr[i].pcmd != 0xff)
- info.pcmds[ptr[i].pcmd].Status = CMD_PASS;
- if ((ptr[i].CommandType == 0x41) ||
- (ptr[i].CommandType == 0x42) ||
- (ptr[i].CommandType == 0x43))
- continue;
-
- switch (info.pcmds[ptr[i].pcmd].CMD) {
- case READ_MAIN_SPARE_CMD:
- Conv_Main_Spare_Data_Phy2Log_Format(
- info.pcmds[ptr[i].pcmd].DataAddr,
- info.pcmds[ptr[i].pcmd].PageCount);
- break;
- case READ_SPARE_CMD:
- Conv_Spare_Data_Phy2Log_Format(
- info.pcmds[ptr[i].pcmd].DataAddr);
- break;
- }
- }
-}
-
-static u16 do_ecc_for_desc(u32 ch, u8 *buf, u16 page)
-{
- u16 event = EVENT_NONE;
- u16 err_byte;
- u16 err_page = 0;
- u8 err_sector;
- u8 err_device;
- u16 ecc_correction_info;
- u16 err_address;
- u32 eccSectorSize;
- u8 *err_pos;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- do {
- if (0 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR0);
- else if (1 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR1);
- else if (2 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR2);
- else if (3 == ch)
- err_page = ioread32(FlashReg + ERR_PAGE_ADDR3);
-
- err_address = ioread32(FlashReg + ECC_ERROR_ADDRESS);
- err_byte = err_address & ECC_ERROR_ADDRESS__OFFSET;
- err_sector = ((err_address &
- ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
-
- ecc_correction_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
- err_device = ((ecc_correction_info &
- ERR_CORRECTION_INFO__DEVICE_NR) >> 8);
-
- if (ecc_correction_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
- event = EVENT_UNCORRECTABLE_DATA_ERROR;
- } else {
- event = EVENT_CORRECTABLE_DATA_ERROR_FIXED;
- if (err_byte < ECC_SECTOR_SIZE) {
- err_pos = buf +
- (err_page - page) *
- DeviceInfo.wPageDataSize +
- err_sector * eccSectorSize +
- err_byte *
- DeviceInfo.wDevicesConnected +
- err_device;
- *err_pos ^= ecc_correction_info &
- ERR_CORRECTION_INFO__BYTEMASK;
- }
- }
- } while (!(ecc_correction_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-
- return event;
-}
-
-static u16 process_ecc_int(u32 c, u16 *p_desc_num)
-{
- struct cdma_descriptor *ptr;
- u16 j;
- int event = EVENT_PASS;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (c != info.flash_bank)
- printk(KERN_ERR "Error!info.flash_bank is %d, while c is %d\n",
- info.flash_bank, c);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- for (j = 0; j < info.cdma_num; j++)
- if ((ptr[j].Status & CMD_DMA_DESC_COMP) != CMD_DMA_DESC_COMP)
- break;
-
- *p_desc_num = j; /* Pass the descripter number found here */
-
- if (j >= info.cdma_num) {
- printk(KERN_ERR "Can not find the correct descriptor number "
- "when ecc interrupt triggered!"
- "info.cdma_num: %d, j: %d\n", info.cdma_num, j);
- return EVENT_UNCORRECTABLE_DATA_ERROR;
- }
-
- event = do_ecc_for_desc(c, info.pcmds[ptr[j].pcmd].DataAddr,
- info.pcmds[ptr[j].pcmd].Page);
-
- if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
- printk(KERN_ERR "Uncorrectable ECC error!"
- "info.cdma_num: %d, j: %d, "
- "pending cmd CMD: 0x%x, "
- "Block: 0x%x, Page: 0x%x, PageCount: 0x%x\n",
- info.cdma_num, j,
- info.pcmds[ptr[j].pcmd].CMD,
- info.pcmds[ptr[j].pcmd].Block,
- info.pcmds[ptr[j].pcmd].Page,
- info.pcmds[ptr[j].pcmd].PageCount);
-
- if (ptr[j].pcmd != 0xff)
- info.pcmds[ptr[j].pcmd].Status = CMD_FAIL;
- CDMA_UpdateEventStatus();
- }
-
- return event;
-}
-
-static void process_prog_erase_fail_int(u16 desc_num)
-{
- struct cdma_descriptor *ptr;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ptr = (struct cdma_descriptor *)info.cdma_desc_buf;
-
- if (ptr[desc_num].pcmd != 0xFF)
- info.pcmds[ptr[desc_num].pcmd].Status = CMD_FAIL;
-
- CDMA_UpdateEventStatus();
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Event_Status (for use with CMD_DMA)
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function is called after an interrupt has happened
-* It reads the HW status register and ...tbd
-* It returns the appropriate event status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 CDMA_Event_Status(void)
-{
- u32 ints_addr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- u32 dma_intr_bit[4] = {DMA_INTR__DESC_COMP_CHANNEL0,
- DMA_INTR__DESC_COMP_CHANNEL1,
- DMA_INTR__DESC_COMP_CHANNEL2,
- DMA_INTR__DESC_COMP_CHANNEL3};
- u32 cdma_int_status, int_status;
- u32 ecc_enable = 0;
- u16 event = EVENT_PASS;
- u16 cur_desc = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- ecc_enable = ioread32(FlashReg + ECC_ENABLE);
-
- while (1) {
- int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
- if (ecc_enable && (int_status & INTR_STATUS0__ECC_ERR)) {
- event = process_ecc_int(info.flash_bank, &cur_desc);
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + ints_addr[info.flash_bank]);
- if (EVENT_UNCORRECTABLE_DATA_ERROR == event) {
- nand_dbg_print(NAND_DBG_WARN,
- "ints_bank0 to ints_bank3: "
- "0x%x, 0x%x, 0x%x, 0x%x, "
- "ints_cdma: 0x%x\n",
- ioread32(FlashReg + INTR_STATUS0),
- ioread32(FlashReg + INTR_STATUS1),
- ioread32(FlashReg + INTR_STATUS2),
- ioread32(FlashReg + INTR_STATUS3),
- ioread32(FlashReg + DMA_INTR));
- break;
- }
- } else if (int_status & INTR_STATUS0__PROGRAM_FAIL) {
- printk(KERN_ERR "NAND program fail interrupt!\n");
- process_prog_erase_fail_int(cur_desc);
- event = EVENT_PROGRAM_FAILURE;
- break;
- } else if (int_status & INTR_STATUS0__ERASE_FAIL) {
- printk(KERN_ERR "NAND erase fail interrupt!\n");
- process_prog_erase_fail_int(cur_desc);
- event = EVENT_ERASE_FAILURE;
- break;
- } else {
- cdma_int_status = ioread32(FlashReg + DMA_INTR);
- if (cdma_int_status & dma_intr_bit[info.flash_bank]) {
- iowrite32(dma_intr_bit[info.flash_bank],
- FlashReg + DMA_INTR);
- update_event_status();
- event = EVENT_PASS;
- break;
- }
- }
- }
-
- int_status = ioread32(FlashReg + ints_addr[info.flash_bank]);
- iowrite32(int_status, FlashReg + ints_addr[info.flash_bank]);
- cdma_int_status = ioread32(FlashReg + DMA_INTR);
- iowrite32(cdma_int_status, FlashReg + DMA_INTR);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return event;
-}
-
-
-
diff --git a/drivers/staging/spectra/lld_cdma.h b/drivers/staging/spectra/lld_cdma.h
deleted file mode 100644
index 854ea06..0000000
--- a/drivers/staging/spectra/lld_cdma.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-/* header for LLD_CDMA.c module */
-
-#ifndef _LLD_CDMA_
-#define _LLD_CDMA_
-
-#include "flash.h"
-
-#define DEBUG_SYNC 1
-
-/*/////////// CDMA specific MACRO definition */
-#define MAX_DESCS (255)
-#define MAX_CHANS (4)
-#define MAX_SYNC_POINTS (16)
-#define MAX_DESC_PER_CHAN (MAX_DESCS * 3 + MAX_SYNC_POINTS + 2)
-
-#define CHANNEL_SYNC_MASK (0x000F)
-#define CHANNEL_DMA_MASK (0x00F0)
-#define CHANNEL_ID_MASK (0x0300)
-#define CHANNEL_CONT_MASK (0x4000)
-#define CHANNEL_INTR_MASK (0x8000)
-
-#define CHANNEL_SYNC_OFFSET (0)
-#define CHANNEL_DMA_OFFSET (4)
-#define CHANNEL_ID_OFFSET (8)
-#define CHANNEL_CONT_OFFSET (14)
-#define CHANNEL_INTR_OFFSET (15)
-
-u16 CDMA_Data_CMD(u8 cmd, u8 *data, u32 block, u16 page, u16 num, u16 flags);
-u16 CDMA_MemCopy_CMD(u8 *dest, u8 *src, u32 byte_cnt, u16 flags);
-u16 CDMA_Execute_CMDs(void);
-void print_pending_cmds(void);
-void print_cdma_descriptors(void);
-
-extern u8 g_SBDCmdIndex;
-extern struct mrst_nand_info info;
-
-
-/*/////////// prototypes: APIs for LLD_CDMA */
-int is_cdma_interrupt(void);
-u16 CDMA_Event_Status(void);
-
-/* CMD-DMA Descriptor Struct. These are defined by the CMD_DMA HW */
-struct cdma_descriptor {
- u32 NxtPointerHi;
- u32 NxtPointerLo;
- u32 FlashPointerHi;
- u32 FlashPointerLo;
- u32 CommandType;
- u32 MemAddrHi;
- u32 MemAddrLo;
- u32 CommandFlags;
- u32 Channel;
- u32 Status;
- u32 MemCopyPointerHi;
- u32 MemCopyPointerLo;
- u32 Reserved12;
- u32 Reserved13;
- u32 Reserved14;
- u32 pcmd; /* pending cmd num related to this descriptor */
-};
-
-/* This struct holds one MemCopy descriptor as defined by the HW */
-struct memcpy_descriptor {
- u32 NxtPointerHi;
- u32 NxtPointerLo;
- u32 SrcAddrHi;
- u32 SrcAddrLo;
- u32 DestAddrHi;
- u32 DestAddrLo;
- u32 XferSize;
- u32 MemCopyFlags;
- u32 MemCopyStatus;
- u32 reserved9;
- u32 reserved10;
- u32 reserved11;
- u32 reserved12;
- u32 reserved13;
- u32 reserved14;
- u32 reserved15;
-};
-
-/* Pending CMD table entries (includes MemCopy parameters */
-struct pending_cmd {
- u8 CMD;
- u8 *DataAddr;
- u32 Block;
- u16 Page;
- u16 PageCount;
- u8 *DataDestAddr;
- u8 *DataSrcAddr;
- u32 MemCopyByteCnt;
- u16 Flags;
- u16 Status;
-};
-
-#if DEBUG_SYNC
-extern u32 debug_sync_cnt;
-#endif
-
-/* Definitions for CMD DMA descriptor chain fields */
-#define CMD_DMA_DESC_COMP 0x8000
-#define CMD_DMA_DESC_FAIL 0x4000
-
-#endif /*_LLD_CDMA_*/
diff --git a/drivers/staging/spectra/lld_emu.c b/drivers/staging/spectra/lld_emu.c
deleted file mode 100644
index 095f2f0..0000000
--- a/drivers/staging/spectra/lld_emu.c
+++ /dev/null
@@ -1,776 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld_emu.h"
-#include "lld.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-#if FLASH_EMU
-u32 totalUsedBanks;
-u32 valid_banks[MAX_CHANS];
-#endif
-#endif
-
-#define GLOB_LLD_PAGES 64
-#define GLOB_LLD_PAGE_SIZE (512+16)
-#define GLOB_LLD_PAGE_DATA_SIZE 512
-#define GLOB_LLD_BLOCKS 2048
-
-#if FLASH_EMU /* This is for entire module */
-
-static u8 *flash_memory[GLOB_LLD_BLOCKS * GLOB_LLD_PAGES];
-
-/* Read nand emu file and then fill it's content to flash_memory */
-int emu_load_file_to_mem(void)
-{
- mm_segment_t fs;
- struct file *nef_filp = NULL;
- struct inode *inode = NULL;
- loff_t nef_size = 0;
- loff_t tmp_file_offset, file_offset;
- ssize_t nread;
- int i, rc = -EINVAL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- fs = get_fs();
- set_fs(get_ds());
-
- nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(nef_filp)) {
- printk(KERN_ERR "filp_open error: "
- "Unable to open nand emu file!\n");
- return PTR_ERR(nef_filp);
- }
-
- if (nef_filp->f_path.dentry) {
- inode = nef_filp->f_path.dentry->d_inode;
- } else {
- printk(KERN_ERR "Can not get valid inode!\n");
- goto out;
- }
-
- nef_size = i_size_read(inode->i_mapping->host);
- if (nef_size <= 0) {
- printk(KERN_ERR "Invalid nand emu file size: "
- "0x%llx\n", nef_size);
- goto out;
- } else {
- nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: %lld\n",
- nef_size);
- }
-
- file_offset = 0;
- for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
- tmp_file_offset = file_offset;
- nread = vfs_read(nef_filp,
- (char __user *)flash_memory[i],
- GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
- if (nread < GLOB_LLD_PAGE_SIZE) {
- printk(KERN_ERR "%s, Line %d - "
- "nand emu file partial read: "
- "%d bytes\n", __FILE__, __LINE__, (int)nread);
- goto out;
- }
- file_offset += GLOB_LLD_PAGE_SIZE;
- }
- rc = 0;
-
-out:
- filp_close(nef_filp, current->files);
- set_fs(fs);
- return rc;
-}
-
-/* Write contents of flash_memory to nand emu file */
-int emu_write_mem_to_file(void)
-{
- mm_segment_t fs;
- struct file *nef_filp = NULL;
- struct inode *inode = NULL;
- loff_t nef_size = 0;
- loff_t tmp_file_offset, file_offset;
- ssize_t nwritten;
- int i, rc = -EINVAL;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- fs = get_fs();
- set_fs(get_ds());
-
- nef_filp = filp_open("/root/nand_emu_file", O_RDWR | O_LARGEFILE, 0);
- if (IS_ERR(nef_filp)) {
- printk(KERN_ERR "filp_open error: "
- "Unable to open nand emu file!\n");
- return PTR_ERR(nef_filp);
- }
-
- if (nef_filp->f_path.dentry) {
- inode = nef_filp->f_path.dentry->d_inode;
- } else {
- printk(KERN_ERR "Invalid " "nef_filp->f_path.dentry value!\n");
- goto out;
- }
-
- nef_size = i_size_read(inode->i_mapping->host);
- if (nef_size <= 0) {
- printk(KERN_ERR "Invalid "
- "nand emu file size: 0x%llx\n", nef_size);
- goto out;
- } else {
- nand_dbg_print(NAND_DBG_DEBUG, "nand emu file size: "
- "%lld\n", nef_size);
- }
-
- file_offset = 0;
- for (i = 0; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++) {
- tmp_file_offset = file_offset;
- nwritten = vfs_write(nef_filp,
- (char __user *)flash_memory[i],
- GLOB_LLD_PAGE_SIZE, &tmp_file_offset);
- if (nwritten < GLOB_LLD_PAGE_SIZE) {
- printk(KERN_ERR "%s, Line %d - "
- "nand emu file partial write: "
- "%d bytes\n", __FILE__, __LINE__, (int)nwritten);
- goto out;
- }
- file_offset += GLOB_LLD_PAGE_SIZE;
- }
- rc = 0;
-
-out:
- filp_close(nef_filp, current->files);
- set_fs(fs);
- return rc;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Creates & initializes the flash RAM array.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Flash_Init(void)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- flash_memory[0] = vmalloc(GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS *
- GLOB_LLD_PAGES * sizeof(u8));
- if (!flash_memory[0]) {
- printk(KERN_ERR "Fail to allocate memory "
- "for nand emulator!\n");
- return ERR;
- }
-
- memset((char *)(flash_memory[0]), 0xFF,
- GLOB_LLD_PAGE_SIZE * GLOB_LLD_BLOCKS * GLOB_LLD_PAGES *
- sizeof(u8));
-
- for (i = 1; i < GLOB_LLD_BLOCKS * GLOB_LLD_PAGES; i++)
- flash_memory[i] = flash_memory[i - 1] + GLOB_LLD_PAGE_SIZE;
-
- emu_load_file_to_mem(); /* Load nand emu file to mem */
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Release
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Releases the flash.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int emu_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- emu_write_mem_to_file(); /* Write back mem to nand emu file */
-
- vfree(flash_memory[0]);
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Device_ID
-* Inputs: none
-* Outputs: PASS=1 FAIL=0
-* Description: Reads the info from the controller registers.
-* Sets up DeviceInfo structure with device parameters
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-u16 emu_Read_Device_ID(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- DeviceInfo.wDeviceMaker = 0;
- DeviceInfo.wDeviceType = 8;
- DeviceInfo.wSpectraStartBlock = 36;
- DeviceInfo.wSpectraEndBlock = GLOB_LLD_BLOCKS - 1;
- DeviceInfo.wTotalBlocks = GLOB_LLD_BLOCKS;
- DeviceInfo.wPagesPerBlock = GLOB_LLD_PAGES;
- DeviceInfo.wPageSize = GLOB_LLD_PAGE_SIZE;
- DeviceInfo.wPageDataSize = GLOB_LLD_PAGE_DATA_SIZE;
- DeviceInfo.wPageSpareSize = GLOB_LLD_PAGE_SIZE -
- GLOB_LLD_PAGE_DATA_SIZE;
- DeviceInfo.wBlockSize = DeviceInfo.wPageSize * GLOB_LLD_PAGES;
- DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * GLOB_LLD_PAGES;
- DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock
- + 1);
- DeviceInfo.MLCDevice = 1; /* Emulate MLC device */
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
-#if CMD_DMA
- totalUsedBanks = 4;
- valid_banks[0] = 1;
- valid_banks[1] = 1;
- valid_banks[2] = 1;
- valid_banks[3] = 1;
-#endif
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Flash_Reset
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Reset the flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Flash_Reset(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Erase_Block
-* Inputs: Address
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Erase a block
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Erase_Block(u32 block_add)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (block_add >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "emu_Erase_Block error! "
- "Too big block address: %d\n", block_add);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
- (int)block_add);
-
- for (i = block_add * GLOB_LLD_PAGES;
- i < ((block_add + 1) * GLOB_LLD_PAGES); i++) {
- if (flash_memory[i]) {
- memset((u8 *)(flash_memory[i]), 0xFF,
- DeviceInfo.wPageSize * sizeof(u8));
- }
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Main
-* Inputs: Write buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the data in the buffer to main area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "emu_Write_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory\n");
- return FAIL;
- }
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
- write_data, DeviceInfo.wPageDataSize);
- write_data += DeviceInfo.wPageDataSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Main
-* Inputs: Read buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read the data from the flash main area to the buffer
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Main(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "emu_Read_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(read_data, 0xFF, DeviceInfo.wPageDataSize);
- } else {
- memcpy(read_data,
- (u8 *) (flash_memory[Block * GLOB_LLD_PAGES
- + Page]),
- DeviceInfo.wPageDataSize);
- }
- read_data += DeviceInfo.wPageDataSize;
- Page++;
- }
-
- return PASS;
-}
-
-#ifndef ELDORA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Main_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read from flash main+spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- int i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)PageCount,
- (unsigned int)Block, (unsigned int)Page);
-
- for (i = 0; i < PageCount; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(read_data, 0xFF, DeviceInfo.wPageSize);
- } else {
- memcpy(read_data, (u8 *) (flash_memory[Block *
- GLOB_LLD_PAGES
- + Page]),
- DeviceInfo.wPageSize);
- }
-
- read_data += DeviceInfo.wPageSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Main_Spare
-* Inputs: Write buffer
-* address
-* buffer length
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer to main+spare area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 page_count)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + page_count > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)page_count,
- (unsigned int)Block, (unsigned int)Page);
-
- for (i = 0; i < page_count; i++) {
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory!\n");
- return FAIL;
- }
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]),
- write_data, DeviceInfo.wPageSize);
- write_data += DeviceInfo.wPageSize;
- Page++;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Write_Page_Spare
-* Inputs: Write buffer
-* Address
-* buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer in the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare Error: "
- "Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare Error: "
- "Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Spare- "
- "block %u page %u\n",
- (unsigned int)Block, (unsigned int)Page);
-
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- printk(KERN_ERR "Run out of memory!\n");
- return FAIL;
- }
-
- memcpy((u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page] +
- DeviceInfo.wPageDataSize), write_data,
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Read_Page_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read data from the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_Read_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
- "block %u page %u\n",
- (unsigned int)Block, (unsigned int)Page);
-
- if (NULL == flash_memory[Block * GLOB_LLD_PAGES + Page]) {
- memset(write_data, 0xFF,
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
- } else {
- memcpy(write_data,
- (u8 *) (flash_memory[Block * GLOB_LLD_PAGES + Page]
- + DeviceInfo.wPageDataSize),
- (DeviceInfo.wPageSize - DeviceInfo.wPageDataSize));
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Enable_Disable_Interrupts
-* Inputs: enable or disable
-* Outputs: none
-* Description: NOP
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void emu_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-}
-
-u16 emu_Get_Bad_Block(u32 block)
-{
- return 0;
-}
-
-#if CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Support for CDMA functions
-************************************
-* emu_CDMA_Flash_Init
-* CDMA_process_data command (use LLD_CDMA)
-* CDMA_MemCopy_CMD (use LLD_CDMA)
-* emu_CDMA_execute all commands
-* emu_CDMA_Event_Status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Flash_Init(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = 3;
- }
-
- return PASS;
-}
-
-static void emu_isr(int irq, void *dev_id)
-{
- /* TODO: ... */
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: execute each command in the pending CMD array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Execute_CMDs(u16 tag_count)
-{
- u16 i, j;
- u8 CMD; /* cmd parameter */
- u8 *data;
- u32 block;
- u16 page;
- u16 count;
- u16 status = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
- "Tag Count %u\n", tag_count);
-
- for (i = 0; i < totalUsedBanks; i++) {
- PendingCMD[i].CMD = DUMMY_CMD;
- PendingCMD[i].Tag = 0xFF;
- PendingCMD[i].Block =
- (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
-
- for (j = 0; j <= MAX_CHANS; j++)
- PendingCMD[i].ChanSync[j] = 0;
- }
-
- CDMA_Execute_CMDs(tag_count);
-
- print_pending_cmds(tag_count);
-
-#if DEBUG_SYNC
- }
- debug_sync_cnt++;
-#endif
-
- for (i = MAX_CHANS;
- i < tag_count + MAX_CHANS; i++) {
- CMD = PendingCMD[i].CMD;
- data = PendingCMD[i].DataAddr;
- block = PendingCMD[i].Block;
- page = PendingCMD[i].Page;
- count = PendingCMD[i].PageCount;
-
- switch (CMD) {
- case ERASE_CMD:
- emu_Erase_Block(block);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_CMD:
- emu_Write_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_SPARE_CMD:
- emu_Write_Page_Main_Spare(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case READ_MAIN_CMD:
- emu_Read_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case MEMCOPY_CMD:
- memcpy(PendingCMD[i].DataDestAddr,
- PendingCMD[i].DataSrcAddr,
- PendingCMD[i].MemCopyByteCnt);
- case DUMMY_CMD:
- PendingCMD[i].Status = PASS;
- break;
- default:
- PendingCMD[i].Status = FAIL;
- break;
- }
- }
-
- /*
- * Temperory adding code to reset PendingCMD array for basic testing.
- * It should be done at the end of event status function.
- */
- for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = CMD_NOT_DONE;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
-
- emu_isr(0, 0); /* This is a null isr now. Need fill it in future */
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: emu_Event_Status
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function can also be used to force errors
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 emu_CDMA_Event_Status(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return EVENT_PASS;
-}
-
-#endif /* CMD_DMA */
-#endif /* !ELDORA */
-#endif /* FLASH_EMU */
diff --git a/drivers/staging/spectra/lld_emu.h b/drivers/staging/spectra/lld_emu.h
deleted file mode 100644
index 63f84c3..0000000
--- a/drivers/staging/spectra/lld_emu.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_EMU_
-#define _LLD_EMU_
-
-#include "ffsport.h"
-#include "ffsdefs.h"
-
-/* prototypes: emulator API functions */
-extern u16 emu_Flash_Reset(void);
-extern u16 emu_Flash_Init(void);
-extern int emu_Flash_Release(void);
-extern u16 emu_Read_Device_ID(void);
-extern u16 emu_Erase_Block(u32 block_addr);
-extern u16 emu_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 emu_Event_Status(void);
-extern void emu_Enable_Disable_Interrupts(u16 INT_ENABLE);
-extern u16 emu_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 emu_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 emu_Get_Bad_Block(u32 block);
-
-u16 emu_CDMA_Flash_Init(void);
-u16 emu_CDMA_Execute_CMDs(u16 tag_count);
-u16 emu_CDMA_Event_Status(void);
-#endif /*_LLD_EMU_*/
diff --git a/drivers/staging/spectra/lld_mtd.c b/drivers/staging/spectra/lld_mtd.c
deleted file mode 100644
index a9c309a..0000000
--- a/drivers/staging/spectra/lld_mtd.c
+++ /dev/null
@@ -1,683 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include <linux/fs.h>
-#include <linux/slab.h>
-#include <linux/mtd/mtd.h>
-#include "flash.h"
-#include "ffsdefs.h"
-#include "lld_emu.h"
-#include "lld.h"
-#if CMD_DMA
-#include "lld_cdma.h"
-u32 totalUsedBanks;
-u32 valid_banks[MAX_CHANS];
-#endif
-
-#define GLOB_LLD_PAGES 64
-#define GLOB_LLD_PAGE_SIZE (512+16)
-#define GLOB_LLD_PAGE_DATA_SIZE 512
-#define GLOB_LLD_BLOCKS 2048
-
-static struct mtd_info *spectra_mtd;
-static int mtddev = -1;
-module_param(mtddev, int, 0);
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Init
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Creates & initializes the flash RAM array.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Flash_Init(void)
-{
- if (mtddev == -1) {
- printk(KERN_ERR "No MTD device specified. Give mtddev parameter\n");
- return FAIL;
- }
-
- spectra_mtd = get_mtd_device(NULL, mtddev);
- if (!spectra_mtd) {
- printk(KERN_ERR "Failed to obtain MTD device #%d\n", mtddev);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Release
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Releases the flash.
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-int mtd_Flash_Release(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- if (!spectra_mtd)
- return PASS;
-
- put_mtd_device(spectra_mtd);
- spectra_mtd = NULL;
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Device_ID
-* Inputs: none
-* Outputs: PASS=1 FAIL=0
-* Description: Reads the info from the controller registers.
-* Sets up DeviceInfo structure with device parameters
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-
-u16 mtd_Read_Device_ID(void)
-{
- uint64_t tmp;
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!spectra_mtd)
- return FAIL;
-
- DeviceInfo.wDeviceMaker = 0;
- DeviceInfo.wDeviceType = 8;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- tmp = spectra_mtd->size;
- do_div(tmp, spectra_mtd->erasesize);
- DeviceInfo.wTotalBlocks = tmp;
- DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wPagesPerBlock = spectra_mtd->erasesize / spectra_mtd->writesize;
- DeviceInfo.wPageSize = spectra_mtd->writesize + spectra_mtd->oobsize;
- DeviceInfo.wPageDataSize = spectra_mtd->writesize;
- DeviceInfo.wPageSpareSize = spectra_mtd->oobsize;
- DeviceInfo.wBlockSize = DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wBlockDataSize = DeviceInfo.wPageDataSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wDataBlockNum = (u32) (DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock
- + 1);
- DeviceInfo.MLCDevice = 0;//spectra_mtd->celltype & NAND_CI_CELLTYPE_MSK;
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
-#if CMD_DMA
- totalUsedBanks = 4;
- valid_banks[0] = 1;
- valid_banks[1] = 1;
- valid_banks[2] = 1;
- valid_banks[3] = 1;
-#endif
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Flash_Reset
-* Inputs: none
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Reset the flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Flash_Reset(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-void erase_callback(struct erase_info *e)
-{
- complete((void *)e->priv);
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Erase_Block
-* Inputs: Address
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Erase a block
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Erase_Block(u32 block_add)
-{
- struct erase_info erase;
- DECLARE_COMPLETION_ONSTACK(comp);
- int ret;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (block_add >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "mtd_Erase_Block error! "
- "Too big block address: %d\n", block_add);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Erasing block %d\n",
- (int)block_add);
-
- erase.mtd = spectra_mtd;
- erase.callback = erase_callback;
- erase.addr = block_add * spectra_mtd->erasesize;
- erase.len = spectra_mtd->erasesize;
- erase.priv = (unsigned long)&comp;
-
- ret = spectra_mtd->erase(spectra_mtd, &erase);
- if (!ret) {
- wait_for_completion(&comp);
- if (erase.state != MTD_ERASE_DONE)
- ret = -EIO;
- }
- if (ret) {
- printk(KERN_WARNING "mtd_Erase_Block error! "
- "erase of region [0x%llx, 0x%llx] failed\n",
- erase.addr, erase.len);
- return FAIL;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Main
-* Inputs: Write buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the data in the buffer to main area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- size_t retlen;
- int ret = 0;
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "mtd_Write_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
-
- while (PageCount) {
- ret = spectra_mtd->write(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- DeviceInfo.wPageDataSize, &retlen, write_data);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- write_data += DeviceInfo.wPageDataSize;
- Page++;
- PageCount--;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Main
-* Inputs: Read buffer address pointer
-* Block number
-* Page number
-* Number of pages to process
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read the data from the flash main area to the buffer
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Main(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- size_t retlen;
- int ret = 0;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks)
- return FAIL;
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock)
- return FAIL;
-
- nand_dbg_print(NAND_DBG_DEBUG, "mtd_Read_Page_Main: "
- "lba %u Page %u PageCount %u\n",
- (unsigned int)Block,
- (unsigned int)Page, (unsigned int)PageCount);
-
-
- while (PageCount) {
- ret = spectra_mtd->read(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- DeviceInfo.wPageDataSize, &retlen, read_data);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- read_data += DeviceInfo.wPageDataSize;
- Page++;
- PageCount--;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return PASS;
-}
-
-#ifndef ELDORA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Main_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read from flash main+spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Main+Spare "
- "Error: Page number %d+%d too big in block %d\n",
- Page, PageCount, Block);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Main + Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)PageCount,
- (unsigned int)Block, (unsigned int)Page);
-
-
- while (PageCount) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = read_data;
- ops.len = DeviceInfo.wPageDataSize;
- ops.oobbuf = read_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->read_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- read_data += DeviceInfo.wPageSize;
- Page++;
- PageCount--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Main_Spare
-* Inputs: Write buffer
-* address
-* buffer length
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer to main+spare area of flash
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 page_count)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + page_count > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Write Page Main + Spare "
- "Error: Page number %d+%d too big in block %d\n",
- Page, page_count, Block);
- WARN_ON(1);
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Write Page Main+Spare - "
- "No. of pages %u block %u start page %u\n",
- (unsigned int)page_count,
- (unsigned int)Block, (unsigned int)Page);
-
- while (page_count) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = write_data;
- ops.len = DeviceInfo.wPageDataSize;
- ops.oobbuf = write_data + DeviceInfo.wPageDataSize + BTSIG_OFFSET;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->write_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
- write_data += DeviceInfo.wPageSize;
- Page++;
- page_count--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Write_Page_Spare
-* Inputs: Write buffer
-* Address
-* buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Write the buffer in the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- WARN_ON(1);
- return FAIL;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Read_Page_Spare
-* Inputs: Write Buffer
-* Address
-* Buffer size
-* Outputs: PASS=0 (notice 0=ok here)
-* Description: Read data from the spare area
-*
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (Block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Block Address too big\n");
- return FAIL;
- }
-
- if (Page + PageCount > DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "Read Page Spare "
- "Error: Page number too big\n");
- return FAIL;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Read Page Spare- "
- "block %u page %u (%u pages)\n",
- (unsigned int)Block, (unsigned int)Page, PageCount);
-
- while (PageCount) {
- struct mtd_oob_ops ops;
- int ret;
-
- ops.mode = MTD_OPS_AUTO_OOB;
- ops.datbuf = NULL;
- ops.len = 0;
- ops.oobbuf = read_data;
- ops.ooblen = BTSIG_BYTES;
- ops.ooboffs = 0;
-
- ret = spectra_mtd->read_oob(spectra_mtd,
- (Block * spectra_mtd->erasesize) + (Page * spectra_mtd->writesize),
- &ops);
- if (ret) {
- printk(KERN_ERR "%s failed %d\n", __func__, ret);
- return FAIL;
- }
-
- read_data += DeviceInfo.wPageSize;
- Page++;
- PageCount--;
- }
-
- return PASS;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Enable_Disable_Interrupts
-* Inputs: enable or disable
-* Outputs: none
-* Description: NOP
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-}
-
-u16 mtd_Get_Bad_Block(u32 block)
-{
- return 0;
-}
-
-#if CMD_DMA
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Support for CDMA functions
-************************************
-* mtd_CDMA_Flash_Init
-* CDMA_process_data command (use LLD_CDMA)
-* CDMA_MemCopy_CMD (use LLD_CDMA)
-* mtd_CDMA_execute all commands
-* mtd_CDMA_Event_Status
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Flash_Init(void)
-{
- u16 i;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0; i < MAX_DESCS + MAX_CHANS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = 3;
- }
-
- return PASS;
-}
-
-static void mtd_isr(int irq, void *dev_id)
-{
- /* TODO: ... */
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: CDMA_Execute_CMDs
-* Inputs: tag_count: the number of pending cmds to do
-* Outputs: PASS/FAIL
-* Description: execute each command in the pending CMD array
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Execute_CMDs(u16 tag_count)
-{
- u16 i, j;
- u8 CMD; /* cmd parameter */
- u8 *data;
- u32 block;
- u16 page;
- u16 count;
- u16 status = PASS;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- nand_dbg_print(NAND_DBG_TRACE, "At start of Execute CMDs: "
- "Tag Count %u\n", tag_count);
-
- for (i = 0; i < totalUsedBanks; i++) {
- PendingCMD[i].CMD = DUMMY_CMD;
- PendingCMD[i].Tag = 0xFF;
- PendingCMD[i].Block =
- (DeviceInfo.wTotalBlocks / totalUsedBanks) * i;
-
- for (j = 0; j <= MAX_CHANS; j++)
- PendingCMD[i].ChanSync[j] = 0;
- }
-
- CDMA_Execute_CMDs(tag_count);
-
-#ifdef VERBOSE
- print_pending_cmds(tag_count);
-#endif
-#if DEBUG_SYNC
- }
- debug_sync_cnt++;
-#endif
-
- for (i = MAX_CHANS;
- i < tag_count + MAX_CHANS; i++) {
- CMD = PendingCMD[i].CMD;
- data = PendingCMD[i].DataAddr;
- block = PendingCMD[i].Block;
- page = PendingCMD[i].Page;
- count = PendingCMD[i].PageCount;
-
- switch (CMD) {
- case ERASE_CMD:
- mtd_Erase_Block(block);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_CMD:
- mtd_Write_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case WRITE_MAIN_SPARE_CMD:
- mtd_Write_Page_Main_Spare(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case READ_MAIN_CMD:
- mtd_Read_Page_Main(data, block, page, count);
- PendingCMD[i].Status = PASS;
- break;
- case MEMCOPY_CMD:
- memcpy(PendingCMD[i].DataDestAddr,
- PendingCMD[i].DataSrcAddr,
- PendingCMD[i].MemCopyByteCnt);
- case DUMMY_CMD:
- PendingCMD[i].Status = PASS;
- break;
- default:
- PendingCMD[i].Status = FAIL;
- break;
- }
- }
-
- /*
- * Temperory adding code to reset PendingCMD array for basic testing.
- * It should be done at the end of event status function.
- */
- for (i = tag_count + MAX_CHANS; i < MAX_DESCS; i++) {
- PendingCMD[i].CMD = 0;
- PendingCMD[i].Tag = 0;
- PendingCMD[i].DataAddr = 0;
- PendingCMD[i].Block = 0;
- PendingCMD[i].Page = 0;
- PendingCMD[i].PageCount = 0;
- PendingCMD[i].DataDestAddr = 0;
- PendingCMD[i].DataSrcAddr = 0;
- PendingCMD[i].MemCopyByteCnt = 0;
- PendingCMD[i].ChanSync[0] = 0;
- PendingCMD[i].ChanSync[1] = 0;
- PendingCMD[i].ChanSync[2] = 0;
- PendingCMD[i].ChanSync[3] = 0;
- PendingCMD[i].ChanSync[4] = 0;
- PendingCMD[i].Status = CMD_NOT_DONE;
- }
-
- nand_dbg_print(NAND_DBG_TRACE, "At end of Execute CMDs.\n");
-
- mtd_isr(0, 0); /* This is a null isr now. Need fill it in future */
-
- return status;
-}
-
-/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
-* Function: mtd_Event_Status
-* Inputs: none
-* Outputs: Event_Status code
-* Description: This function can also be used to force errors
-*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
-u16 mtd_CDMA_Event_Status(void)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- return EVENT_PASS;
-}
-
-#endif /* CMD_DMA */
-#endif /* !ELDORA */
diff --git a/drivers/staging/spectra/lld_mtd.h b/drivers/staging/spectra/lld_mtd.h
deleted file mode 100644
index 4e81ee8..0000000
--- a/drivers/staging/spectra/lld_mtd.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_MTD_
-#define _LLD_MTD_
-
-#include "ffsport.h"
-#include "ffsdefs.h"
-
-/* prototypes: MTD API functions */
-extern u16 mtd_Flash_Reset(void);
-extern u16 mtd_Flash_Init(void);
-extern int mtd_Flash_Release(void);
-extern u16 mtd_Read_Device_ID(void);
-extern u16 mtd_Erase_Block(u32 block_addr);
-extern u16 mtd_Write_Page_Main(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Main(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 mtd_Event_Status(void);
-extern void mtd_Enable_Disable_Interrupts(u16 INT_ENABLE);
-extern u16 mtd_Write_Page_Main_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Write_Page_Spare(u8 *write_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Main_Spare(u8 *read_data, u32 Block,
- u16 Page, u16 PageCount);
-extern u16 mtd_Read_Page_Spare(u8 *read_data, u32 Block, u16 Page,
- u16 PageCount);
-extern u16 mtd_Get_Bad_Block(u32 block);
-
-u16 mtd_CDMA_Flash_Init(void);
-u16 mtd_CDMA_Execute_CMDs(u16 tag_count);
-u16 mtd_CDMA_Event_Status(void);
-#endif /*_LLD_MTD_*/
diff --git a/drivers/staging/spectra/lld_nand.c b/drivers/staging/spectra/lld_nand.c
deleted file mode 100644
index 60a14ff..0000000
--- a/drivers/staging/spectra/lld_nand.c
+++ /dev/null
@@ -1,2619 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#include "lld.h"
-#include "lld_nand.h"
-#include "lld_cdma.h"
-
-#include "spectraswconfig.h"
-#include "flash.h"
-#include "ffsdefs.h"
-
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-
-#include "nand_regs.h"
-
-#define SPECTRA_NAND_NAME "nd"
-
-#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
-#define MAX_PAGES_PER_RW 128
-
-#define INT_IDLE_STATE 0
-#define INT_READ_PAGE_MAIN 0x01
-#define INT_WRITE_PAGE_MAIN 0x02
-#define INT_PIPELINE_READ_AHEAD 0x04
-#define INT_PIPELINE_WRITE_AHEAD 0x08
-#define INT_MULTI_PLANE_READ 0x10
-#define INT_MULTI_PLANE_WRITE 0x11
-
-static u32 enable_ecc;
-
-struct mrst_nand_info info;
-
-int totalUsedBanks;
-u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-
-void __iomem *FlashReg;
-void __iomem *FlashMem;
-
-u16 conf_parameters[] = {
- 0x0000,
- 0x0000,
- 0x01F4,
- 0x01F4,
- 0x01F4,
- 0x01F4,
- 0x0000,
- 0x0000,
- 0x0001,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0000,
- 0x0040,
- 0x0001,
- 0x000A,
- 0x000A,
- 0x000A,
- 0x0000,
- 0x0000,
- 0x0005,
- 0x0012,
- 0x000C
-};
-
-u16 NAND_Get_Bad_Block(u32 block)
-{
- u32 status = PASS;
- u32 flag_bytes = 0;
- u32 skip_bytes = DeviceInfo.wSpareSkipBytes;
- u32 page, i;
- u8 *pReadSpareBuf = buf_get_bad_block;
-
- if (enable_ecc)
- flag_bytes = DeviceInfo.wNumPageSpareFlag;
-
- for (page = 0; page < 2; page++) {
- status = NAND_Read_Page_Spare(pReadSpareBuf, block, page, 1);
- if (status != PASS)
- return READ_ERROR;
- for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
- if (pReadSpareBuf[i] != 0xff)
- return DEFECTIVE_BLOCK;
- }
-
- for (page = 1; page < 3; page++) {
- status = NAND_Read_Page_Spare(pReadSpareBuf, block,
- DeviceInfo.wPagesPerBlock - page , 1);
- if (status != PASS)
- return READ_ERROR;
- for (i = flag_bytes; i < (flag_bytes + skip_bytes); i++)
- if (pReadSpareBuf[i] != 0xff)
- return DEFECTIVE_BLOCK;
- }
-
- return GOOD_BLOCK;
-}
-
-
-u16 NAND_Flash_Reset(void)
-{
- u32 i;
- u32 intr_status_rst_comp[4] = {INTR_STATUS0__RST_COMP,
- INTR_STATUS1__RST_COMP,
- INTR_STATUS2__RST_COMP,
- INTR_STATUS3__RST_COMP};
- u32 intr_status_time_out[4] = {INTR_STATUS0__TIME_OUT,
- INTR_STATUS1__TIME_OUT,
- INTR_STATUS2__TIME_OUT,
- INTR_STATUS3__TIME_OUT};
- u32 intr_status[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- u32 device_reset_banks[4] = {DEVICE_RESET__BANK0,
- DEVICE_RESET__BANK1,
- DEVICE_RESET__BANK2,
- DEVICE_RESET__BANK3};
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
- FlashReg + intr_status[i]);
-
- for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
- iowrite32(device_reset_banks[i], FlashReg + DEVICE_RESET);
- while (!(ioread32(FlashReg + intr_status[i]) &
- (intr_status_rst_comp[i] | intr_status_time_out[i])))
- ;
- if (ioread32(FlashReg + intr_status[i]) &
- intr_status_time_out[i])
- nand_dbg_print(NAND_DBG_WARN,
- "NAND Reset operation timed out on bank %d\n", i);
- }
-
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
- iowrite32(intr_status_rst_comp[i] | intr_status_time_out[i],
- FlashReg + intr_status[i]);
-
- return PASS;
-}
-
-static void NAND_ONFi_Timing_Mode(u16 mode)
-{
- u16 Trea[6] = {40, 30, 25, 20, 20, 16};
- u16 Trp[6] = {50, 25, 17, 15, 12, 10};
- u16 Treh[6] = {30, 15, 15, 10, 10, 7};
- u16 Trc[6] = {100, 50, 35, 30, 25, 20};
- u16 Trhoh[6] = {0, 15, 15, 15, 15, 15};
- u16 Trloh[6] = {0, 0, 0, 0, 5, 5};
- u16 Tcea[6] = {100, 45, 30, 25, 25, 25};
- u16 Tadl[6] = {200, 100, 100, 100, 70, 70};
- u16 Trhw[6] = {200, 100, 100, 100, 100, 100};
- u16 Trhz[6] = {200, 100, 100, 100, 100, 100};
- u16 Twhr[6] = {120, 80, 80, 60, 60, 60};
- u16 Tcs[6] = {70, 35, 25, 25, 20, 15};
-
- u16 TclsRising = 1;
- u16 data_invalid_rhoh, data_invalid_rloh, data_invalid;
- u16 dv_window = 0;
- u16 en_lo, en_hi;
- u16 acc_clks;
- u16 addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- en_lo = CEIL_DIV(Trp[mode], CLK_X);
- en_hi = CEIL_DIV(Treh[mode], CLK_X);
-
-#if ONFI_BLOOM_TIME
- if ((en_hi * CLK_X) < (Treh[mode] + 2))
- en_hi++;
-#endif
-
- if ((en_lo + en_hi) * CLK_X < Trc[mode])
- en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
-
- if ((en_lo + en_hi) < CLK_MULTI)
- en_lo += CLK_MULTI - en_lo - en_hi;
-
- while (dv_window < 8) {
- data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
-
- data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
-
- data_invalid =
- data_invalid_rhoh <
- data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
-
- dv_window = data_invalid - Trea[mode];
-
- if (dv_window < 8)
- en_lo++;
- }
-
- acc_clks = CEIL_DIV(Trea[mode], CLK_X);
-
- while (((acc_clks * CLK_X) - Trea[mode]) < 3)
- acc_clks++;
-
- if ((data_invalid - acc_clks * CLK_X) < 2)
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
- __FILE__, __LINE__);
-
- addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
- re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
- re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
- we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
- cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
- if (!TclsRising)
- cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
- if (cs_cnt == 0)
- cs_cnt = 1;
-
- if (Tcea[mode]) {
- while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
- cs_cnt++;
- }
-
-#if MODE5_WORKAROUND
- if (mode == 5)
- acc_clks = 5;
-#endif
-
- /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
- if ((ioread32(FlashReg + MANUFACTURER_ID) == 0) &&
- (ioread32(FlashReg + DEVICE_ID) == 0x88))
- acc_clks = 6;
-
- iowrite32(acc_clks, FlashReg + ACC_CLKS);
- iowrite32(re_2_we, FlashReg + RE_2_WE);
- iowrite32(re_2_re, FlashReg + RE_2_RE);
- iowrite32(we_2_re, FlashReg + WE_2_RE);
- iowrite32(addr_2_data, FlashReg + ADDR_2_DATA);
- iowrite32(en_lo, FlashReg + RDWR_EN_LO_CNT);
- iowrite32(en_hi, FlashReg + RDWR_EN_HI_CNT);
- iowrite32(cs_cnt, FlashReg + CS_SETUP_CNT);
-}
-
-static void index_addr(u32 address, u32 data)
-{
- iowrite32(address, FlashMem);
- iowrite32(data, FlashMem + 0x10);
-}
-
-static void index_addr_read_data(u32 address, u32 *pdata)
-{
- iowrite32(address, FlashMem);
- *pdata = ioread32(FlashMem + 0x10);
-}
-
-static void set_ecc_config(void)
-{
-#if SUPPORT_8BITECC
- if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
- (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) <= 128))
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
-
- if ((ioread32(FlashReg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
- == 1) {
- DeviceInfo.wECCBytesPerSector = 4;
- DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
- DeviceInfo.wNumPageSpareFlag =
- DeviceInfo.wPageSpareSize -
- DeviceInfo.wPageDataSize /
- (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
- DeviceInfo.wECCBytesPerSector
- - DeviceInfo.wSpareSkipBytes;
- } else {
- DeviceInfo.wECCBytesPerSector =
- (ioread32(FlashReg + ECC_CORRECTION) &
- ECC_CORRECTION__VALUE) * 13 / 8;
- if ((DeviceInfo.wECCBytesPerSector) % 2 == 0)
- DeviceInfo.wECCBytesPerSector += 2;
- else
- DeviceInfo.wECCBytesPerSector += 1;
-
- DeviceInfo.wECCBytesPerSector *= DeviceInfo.wDevicesConnected;
- DeviceInfo.wNumPageSpareFlag = DeviceInfo.wPageSpareSize -
- DeviceInfo.wPageDataSize /
- (ECC_SECTOR_SIZE * DeviceInfo.wDevicesConnected) *
- DeviceInfo.wECCBytesPerSector
- - DeviceInfo.wSpareSkipBytes;
- }
-}
-
-static u16 get_onfi_nand_para(void)
-{
- int i;
- u16 blks_lun_l, blks_lun_h, n_of_luns;
- u32 blockperlun, id;
-
- iowrite32(DEVICE_RESET__BANK0, FlashReg + DEVICE_RESET);
-
- while (!((ioread32(FlashReg + INTR_STATUS0) &
- INTR_STATUS0__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS0) &
- INTR_STATUS0__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK1, FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS1) &
- INTR_STATUS1__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK2,
- FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__TIME_OUT)))
- ;
-
- if (ioread32(FlashReg + INTR_STATUS2) &
- INTR_STATUS2__RST_COMP) {
- iowrite32(DEVICE_RESET__BANK3,
- FlashReg + DEVICE_RESET);
- while (!((ioread32(FlashReg + INTR_STATUS3) &
- INTR_STATUS3__RST_COMP) |
- (ioread32(FlashReg + INTR_STATUS3) &
- INTR_STATUS3__TIME_OUT)))
- ;
- } else {
- printk(KERN_ERR "Getting a time out for bank 2!\n");
- }
- } else {
- printk(KERN_ERR "Getting a time out for bank 1!\n");
- }
- }
-
- iowrite32(INTR_STATUS0__TIME_OUT, FlashReg + INTR_STATUS0);
- iowrite32(INTR_STATUS1__TIME_OUT, FlashReg + INTR_STATUS1);
- iowrite32(INTR_STATUS2__TIME_OUT, FlashReg + INTR_STATUS2);
- iowrite32(INTR_STATUS3__TIME_OUT, FlashReg + INTR_STATUS3);
-
- DeviceInfo.wONFIDevFeatures =
- ioread32(FlashReg + ONFI_DEVICE_FEATURES);
- DeviceInfo.wONFIOptCommands =
- ioread32(FlashReg + ONFI_OPTIONAL_COMMANDS);
- DeviceInfo.wONFITimingMode =
- ioread32(FlashReg + ONFI_TIMING_MODE);
- DeviceInfo.wONFIPgmCacheTimingMode =
- ioread32(FlashReg + ONFI_PGM_CACHE_TIMING_MODE);
-
- n_of_luns = ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
- blks_lun_l = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
- blks_lun_h = ioread32(FlashReg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
-
- blockperlun = (blks_lun_h << 16) | blks_lun_l;
-
- DeviceInfo.wTotalBlocks = n_of_luns * blockperlun;
-
- if (!(ioread32(FlashReg + ONFI_TIMING_MODE) &
- ONFI_TIMING_MODE__VALUE))
- return FAIL;
-
- for (i = 5; i > 0; i--) {
- if (ioread32(FlashReg + ONFI_TIMING_MODE) & (0x01 << i))
- break;
- }
-
- NAND_ONFi_Timing_Mode(i);
-
- index_addr(MODE_11 | 0, 0x90);
- index_addr(MODE_11 | 1, 0);
-
- for (i = 0; i < 3; i++)
- index_addr_read_data(MODE_11 | 2, &id);
-
- nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
-
- DeviceInfo.MLCDevice = id & 0x0C;
-
- /* By now, all the ONFI devices we know support the page cache */
- /* rw feature. So here we enable the pipeline_rw_ahead feature */
- /* iowrite32(1, FlashReg + CACHE_WRITE_ENABLE); */
- /* iowrite32(1, FlashReg + CACHE_READ_ENABLE); */
-
- return PASS;
-}
-
-static void get_samsung_nand_para(void)
-{
- u8 no_of_planes;
- u32 blk_size;
- u64 plane_size, capacity;
- u32 id_bytes[5];
- int i;
-
- index_addr((u32)(MODE_11 | 0), 0x90);
- index_addr((u32)(MODE_11 | 1), 0);
- for (i = 0; i < 5; i++)
- index_addr_read_data((u32)(MODE_11 | 2), &id_bytes[i]);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
- id_bytes[0], id_bytes[1], id_bytes[2],
- id_bytes[3], id_bytes[4]);
-
- if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
- /* Set timing register values according to datasheet */
- iowrite32(5, FlashReg + ACC_CLKS);
- iowrite32(20, FlashReg + RE_2_WE);
- iowrite32(12, FlashReg + WE_2_RE);
- iowrite32(14, FlashReg + ADDR_2_DATA);
- iowrite32(3, FlashReg + RDWR_EN_LO_CNT);
- iowrite32(2, FlashReg + RDWR_EN_HI_CNT);
- iowrite32(2, FlashReg + CS_SETUP_CNT);
- }
-
- no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
- plane_size = (u64)64 << ((id_bytes[4] & 0x70) >> 4);
- blk_size = 64 << ((ioread32(FlashReg + DEVICE_PARAM_1) & 0x30) >> 4);
- capacity = (u64)128 * plane_size * no_of_planes;
-
- DeviceInfo.wTotalBlocks = (u32)GLOB_u64_Div(capacity, blk_size);
-}
-
-static void get_toshiba_nand_para(void)
-{
- void __iomem *scratch_reg;
- u32 tmp;
-
- /* Workaround to fix a controller bug which reports a wrong */
- /* spare area size for some kind of Toshiba NAND device */
- if ((ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
- (ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE) == 64)) {
- iowrite32(216, FlashReg + DEVICE_SPARE_AREA_SIZE);
- tmp = ioread32(FlashReg + DEVICES_CONNECTED) *
- ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
- iowrite32(tmp, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-#if SUPPORT_15BITECC
- iowrite32(15, FlashReg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
- }
-
- /* As Toshiba NAND can not provide it's block number, */
- /* so here we need user to provide the correct block */
- /* number in a scratch register before the Linux NAND */
- /* driver is loaded. If no valid value found in the scratch */
- /* register, then we use default block number value */
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (DeviceInfo.wTotalBlocks < 512)
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
- }
-}
-
-static void get_hynix_nand_para(void)
-{
- void __iomem *scratch_reg;
- u32 main_size, spare_size;
-
- switch (DeviceInfo.wDeviceID) {
- case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
- case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
- iowrite32(128, FlashReg + PAGES_PER_BLOCK);
- iowrite32(4096, FlashReg + DEVICE_MAIN_AREA_SIZE);
- iowrite32(224, FlashReg + DEVICE_SPARE_AREA_SIZE);
- main_size = 4096 * ioread32(FlashReg + DEVICES_CONNECTED);
- spare_size = 224 * ioread32(FlashReg + DEVICES_CONNECTED);
- iowrite32(main_size, FlashReg + LOGICAL_PAGE_DATA_SIZE);
- iowrite32(spare_size, FlashReg + LOGICAL_PAGE_SPARE_SIZE);
- iowrite32(0, FlashReg + DEVICE_WIDTH);
-#if SUPPORT_15BITECC
- iowrite32(15, FlashReg + ECC_CORRECTION);
-#elif SUPPORT_8BITECC
- iowrite32(8, FlashReg + ECC_CORRECTION);
-#endif
- DeviceInfo.MLCDevice = 1;
- break;
- default:
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
- "Will use default parameter values instead.\n",
- DeviceInfo.wDeviceID);
- }
-
- scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
- if (!scratch_reg) {
- printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
- __FILE__, __LINE__);
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
- DeviceInfo.wTotalBlocks = 1 << ioread8(scratch_reg);
- if (DeviceInfo.wTotalBlocks < 512)
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- iounmap(scratch_reg);
- }
-}
-
-static void find_valid_banks(void)
-{
- u32 id[LLD_MAX_FLASH_BANKS];
- int i;
-
- totalUsedBanks = 0;
- for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
- index_addr((u32)(MODE_11 | (i << 24) | 0), 0x90);
- index_addr((u32)(MODE_11 | (i << 24) | 1), 0);
- index_addr_read_data((u32)(MODE_11 | (i << 24) | 2), &id[i]);
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "Return 1st ID for bank[%d]: %x\n", i, id[i]);
-
- if (i == 0) {
- if (id[i] & 0x0ff)
- GLOB_valid_banks[i] = 1;
- } else {
- if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
- GLOB_valid_banks[i] = 1;
- }
-
- totalUsedBanks += GLOB_valid_banks[i];
- }
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "totalUsedBanks: %d\n", totalUsedBanks);
-}
-
-static void detect_partition_feature(void)
-{
- if (ioread32(FlashReg + FEATURES) & FEATURES__PARTITION) {
- if ((ioread32(FlashReg + PERM_SRC_ID_1) &
- PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
- DeviceInfo.wSpectraStartBlock =
- ((ioread32(FlashReg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MIN_VALUE) *
- DeviceInfo.wTotalBlocks)
- +
- (ioread32(FlashReg + MIN_BLK_ADDR_1) &
- MIN_BLK_ADDR_1__VALUE);
-
- DeviceInfo.wSpectraEndBlock =
- (((ioread32(FlashReg + MIN_MAX_BANK_1) &
- MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
- DeviceInfo.wTotalBlocks)
- +
- (ioread32(FlashReg + MAX_BLK_ADDR_1) &
- MAX_BLK_ADDR_1__VALUE);
-
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
-
- if (DeviceInfo.wSpectraEndBlock >=
- DeviceInfo.wTotalBlocks) {
- DeviceInfo.wSpectraEndBlock =
- DeviceInfo.wTotalBlocks - 1;
- }
-
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- } else {
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- DeviceInfo.wSpectraEndBlock =
- DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- }
- } else {
- DeviceInfo.wTotalBlocks *= totalUsedBanks;
- DeviceInfo.wSpectraStartBlock = SPECTRA_START_BLOCK;
- DeviceInfo.wSpectraEndBlock = DeviceInfo.wTotalBlocks - 1;
- DeviceInfo.wDataBlockNum =
- DeviceInfo.wSpectraEndBlock -
- DeviceInfo.wSpectraStartBlock + 1;
- }
-}
-
-static void dump_device_info(void)
-{
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceInfo:\n");
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
- DeviceInfo.wDeviceMaker);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
- DeviceInfo.wDeviceID);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
- DeviceInfo.wDeviceType);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
- DeviceInfo.wSpectraStartBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
- DeviceInfo.wSpectraEndBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
- DeviceInfo.wTotalBlocks);
- nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
- DeviceInfo.wPagesPerBlock);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
- DeviceInfo.wPageSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
- DeviceInfo.wPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
- DeviceInfo.wPageSpareSize);
- nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
- DeviceInfo.wNumPageSpareFlag);
- nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
- DeviceInfo.wECCBytesPerSector);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
- DeviceInfo.wBlockSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
- DeviceInfo.wBlockDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
- DeviceInfo.wDataBlockNum);
- nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
- DeviceInfo.bPlaneNum);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
- DeviceInfo.wDeviceMainAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
- DeviceInfo.wDeviceSpareAreaSize);
- nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
- DeviceInfo.wDevicesConnected);
- nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
- DeviceInfo.wDeviceWidth);
- nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
- DeviceInfo.wHWRevision);
- nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
- DeviceInfo.wHWFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
- DeviceInfo.wONFIDevFeatures);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
- DeviceInfo.wONFIOptCommands);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
- DeviceInfo.wONFITimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
- DeviceInfo.wONFIPgmCacheTimingMode);
- nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
- DeviceInfo.MLCDevice ? "Yes" : "No");
- nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
- DeviceInfo.wSpareSkipBytes);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
- DeviceInfo.nBitsInPageNumber);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
- DeviceInfo.nBitsInPageDataSize);
- nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
- DeviceInfo.nBitsInBlockDataSize);
-}
-
-u16 NAND_Read_Device_ID(void)
-{
- u16 status = PASS;
- u8 no_of_planes;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- iowrite32(0x02, FlashReg + SPARE_AREA_SKIP_BYTES);
- iowrite32(0xffff, FlashReg + SPARE_AREA_MARKER);
- DeviceInfo.wDeviceMaker = ioread32(FlashReg + MANUFACTURER_ID);
- DeviceInfo.wDeviceID = ioread32(FlashReg + DEVICE_ID);
- DeviceInfo.MLCDevice = ioread32(FlashReg + DEVICE_PARAM_0) & 0x0c;
-
- if (ioread32(FlashReg + ONFI_DEVICE_NO_OF_LUNS) &
- ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
- if (FAIL == get_onfi_nand_para())
- return FAIL;
- } else if (DeviceInfo.wDeviceMaker == 0xEC) { /* Samsung NAND */
- get_samsung_nand_para();
- } else if (DeviceInfo.wDeviceMaker == 0x98) { /* Toshiba NAND */
- get_toshiba_nand_para();
- } else if (DeviceInfo.wDeviceMaker == 0xAD) { /* Hynix NAND */
- get_hynix_nand_para();
- } else {
- DeviceInfo.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
- }
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(FlashReg + ACC_CLKS),
- ioread32(FlashReg + RE_2_WE),
- ioread32(FlashReg + WE_2_RE),
- ioread32(FlashReg + ADDR_2_DATA),
- ioread32(FlashReg + RDWR_EN_LO_CNT),
- ioread32(FlashReg + RDWR_EN_HI_CNT),
- ioread32(FlashReg + CS_SETUP_CNT));
-
- DeviceInfo.wHWRevision = ioread32(FlashReg + REVISION);
- DeviceInfo.wHWFeatures = ioread32(FlashReg + FEATURES);
-
- DeviceInfo.wDeviceMainAreaSize =
- ioread32(FlashReg + DEVICE_MAIN_AREA_SIZE);
- DeviceInfo.wDeviceSpareAreaSize =
- ioread32(FlashReg + DEVICE_SPARE_AREA_SIZE);
-
- DeviceInfo.wPageDataSize =
- ioread32(FlashReg + LOGICAL_PAGE_DATA_SIZE);
-
- /* Note: When using the Micon 4K NAND device, the controller will report
- * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
- * And if force set it to 218 bytes, the controller can not work
- * correctly. So just let it be. But keep in mind that this bug may
- * cause
- * other problems in future. - Yunpeng 2008-10-10
- */
- DeviceInfo.wPageSpareSize =
- ioread32(FlashReg + LOGICAL_PAGE_SPARE_SIZE);
-
- DeviceInfo.wPagesPerBlock = ioread32(FlashReg + PAGES_PER_BLOCK);
-
- DeviceInfo.wPageSize =
- DeviceInfo.wPageDataSize + DeviceInfo.wPageSpareSize;
- DeviceInfo.wBlockSize =
- DeviceInfo.wPageSize * DeviceInfo.wPagesPerBlock;
- DeviceInfo.wBlockDataSize =
- DeviceInfo.wPagesPerBlock * DeviceInfo.wPageDataSize;
-
- DeviceInfo.wDeviceWidth = ioread32(FlashReg + DEVICE_WIDTH);
- DeviceInfo.wDeviceType =
- ((ioread32(FlashReg + DEVICE_WIDTH) > 0) ? 16 : 8);
-
- DeviceInfo.wDevicesConnected = ioread32(FlashReg + DEVICES_CONNECTED);
-
- DeviceInfo.wSpareSkipBytes =
- ioread32(FlashReg + SPARE_AREA_SKIP_BYTES) *
- DeviceInfo.wDevicesConnected;
-
- DeviceInfo.nBitsInPageNumber =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPagesPerBlock);
- DeviceInfo.nBitsInPageDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wPageDataSize);
- DeviceInfo.nBitsInBlockDataSize =
- (u8)GLOB_Calc_Used_Bits(DeviceInfo.wBlockDataSize);
-
- set_ecc_config();
-
- no_of_planes = ioread32(FlashReg + NUMBER_OF_PLANES) &
- NUMBER_OF_PLANES__VALUE;
-
- switch (no_of_planes) {
- case 0:
- case 1:
- case 3:
- case 7:
- DeviceInfo.bPlaneNum = no_of_planes + 1;
- break;
- default:
- status = FAIL;
- break;
- }
-
- find_valid_banks();
-
- detect_partition_feature();
-
- dump_device_info();
-
- return status;
-}
-
-u16 NAND_UnlockArrayAll(void)
-{
- u64 start_addr, end_addr;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- start_addr = 0;
- end_addr = ((u64)DeviceInfo.wBlockSize *
- (DeviceInfo.wTotalBlocks - 1)) >>
- DeviceInfo.nBitsInPageDataSize;
-
- index_addr((u32)(MODE_10 | (u32)start_addr), 0x10);
- index_addr((u32)(MODE_10 | (u32)end_addr), 0x11);
-
- return PASS;
-}
-
-void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE)
-{
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (INT_ENABLE)
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE);
- else
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-}
-
-u16 NAND_Erase_Block(u32 block)
-{
- u16 status = PASS;
- u64 flash_add;
- u16 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (block >= DeviceInfo.wTotalBlocks)
- status = FAIL;
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
- FlashReg + intr_status);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 1);
-
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ERASE_FAIL)
- status = FAIL;
-
- iowrite32(INTR_STATUS0__ERASE_COMP | INTR_STATUS0__ERASE_FAIL,
- FlashReg + intr_status);
- }
-
- return status;
-}
-
-static u32 Boundary_Check_Block_Page(u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
-
- if (block >= DeviceInfo.wTotalBlocks)
- status = FAIL;
-
- if (page + page_count > DeviceInfo.wPagesPerBlock)
- status = FAIL;
-
- return status;
-}
-
-u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 i;
- u64 flash_add;
- u32 PageSpareSize = DeviceInfo.wPageSpareSize;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_spare = buf_read_page_spare;
-
- if (block >= DeviceInfo.wTotalBlocks) {
- printk(KERN_ERR "block too big: %d\n", (int)block);
- status = FAIL;
- }
-
- if (page >= DeviceInfo.wPagesPerBlock) {
- printk(KERN_ERR "page too big: %d\n", page);
- status = FAIL;
- }
-
- if (page_count > 1) {
- printk(KERN_ERR "page count too big: %d\n", page_count);
- status = FAIL;
- }
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x41);
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x2000 | page_count);
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__LOAD_COMP))
- ;
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- for (i = 0; i < (PageSpareSize / 4); i++)
- *((u32 *)page_spare + i) =
- ioread32(FlashMem + 0x10);
-
- if (enable_ecc) {
- for (i = 0; i < spareFlagBytes; i++)
- read_data[i] =
- page_spare[PageSpareSize -
- spareFlagBytes + i];
- for (i = 0; i < (PageSpareSize - spareFlagBytes); i++)
- read_data[spareFlagBytes + i] =
- page_spare[i];
- } else {
- for (i = 0; i < PageSpareSize; i++)
- read_data[i] = page_spare[i];
- }
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- }
-
- return status;
-}
-
-/* No use function. Should be removed later */
-u16 NAND_Write_Page_Spare(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- printk(KERN_ERR
- "Error! This function (NAND_Write_Page_Spare) should never"
- " be called!\n");
- return ERR;
-}
-
-/* op value: 0 - DDMA read; 1 - DDMA write */
-static void ddma_trans(u8 *data, u64 flash_add,
- u32 flash_bank, int op, u32 numPages)
-{
- u32 data_addr;
-
- /* Map virtual address to bus address for DDMA */
- data_addr = virt_to_bus(data);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- (u16)(2 << 12) | (op << 8) | numPages);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- ((u16)(0x0FFFF & (data_addr >> 16)) << 8)),
- (u16)(2 << 12) | (2 << 8) | 0);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- ((u16)(0x0FFFF & data_addr) << 8)),
- (u16)(2 << 12) | (3 << 8) | 0);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (1 << 16) | (0x40 << 8)),
- (u16)(2 << 12) | (4 << 8) | 0);
-}
-
-/* If data in buf are all 0xff, then return 1; otherwise return 0 */
-static int check_all_1(u8 *buf)
-{
- int i, j, cnt;
-
- for (i = 0; i < DeviceInfo.wPageDataSize; i++) {
- if (buf[i] != 0xff) {
- cnt = 0;
- nand_dbg_print(NAND_DBG_WARN,
- "the first non-0xff data byte is: %d\n", i);
- for (j = i; j < DeviceInfo.wPageDataSize; j++) {
- nand_dbg_print(NAND_DBG_WARN, "0x%x ", buf[j]);
- cnt++;
- if (cnt > 8)
- break;
- }
- nand_dbg_print(NAND_DBG_WARN, "\n");
- return 0;
- }
- }
-
- return 1;
-}
-
-static int do_ecc_new(unsigned long bank, u8 *buf,
- u32 block, u16 page)
-{
- int status = PASS;
- u16 err_page = 0;
- u16 err_byte;
- u8 err_sect;
- u8 err_dev;
- u16 err_fix_info;
- u16 err_addr;
- u32 ecc_sect_size;
- u8 *err_pos;
- u32 err_page_addr[4] = {ERR_PAGE_ADDR0,
- ERR_PAGE_ADDR1, ERR_PAGE_ADDR2, ERR_PAGE_ADDR3};
-
- ecc_sect_size = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- do {
- err_page = ioread32(FlashReg + err_page_addr[bank]);
- err_addr = ioread32(FlashReg + ECC_ERROR_ADDRESS);
- err_byte = err_addr & ECC_ERROR_ADDRESS__OFFSET;
- err_sect = ((err_addr & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12);
- err_fix_info = ioread32(FlashReg + ERR_CORRECTION_INFO);
- err_dev = ((err_fix_info & ERR_CORRECTION_INFO__DEVICE_NR)
- >> 8);
- if (err_fix_info & ERR_CORRECTION_INFO__ERROR_TYPE) {
- nand_dbg_print(NAND_DBG_WARN,
- "%s, Line %d Uncorrectable ECC error "
- "when read block %d page %d."
- "PTN_INTR register: 0x%x "
- "err_page: %d, err_sect: %d, err_byte: %d, "
- "err_dev: %d, ecc_sect_size: %d, "
- "err_fix_info: 0x%x\n",
- __FILE__, __LINE__, block, page,
- ioread32(FlashReg + PTN_INTR),
- err_page, err_sect, err_byte, err_dev,
- ecc_sect_size, (u32)err_fix_info);
-
- if (check_all_1(buf))
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
- "All 0xff!\n",
- __FILE__, __LINE__);
- else
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d"
- "Not all 0xff!\n",
- __FILE__, __LINE__);
- status = FAIL;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "%s, Line %d Found ECC error "
- "when read block %d page %d."
- "err_page: %d, err_sect: %d, err_byte: %d, "
- "err_dev: %d, ecc_sect_size: %d, "
- "err_fix_info: 0x%x\n",
- __FILE__, __LINE__, block, page,
- err_page, err_sect, err_byte, err_dev,
- ecc_sect_size, (u32)err_fix_info);
- if (err_byte < ECC_SECTOR_SIZE) {
- err_pos = buf +
- (err_page - page) *
- DeviceInfo.wPageDataSize +
- err_sect * ecc_sect_size +
- err_byte *
- DeviceInfo.wDevicesConnected +
- err_dev;
-
- *err_pos ^= err_fix_info &
- ERR_CORRECTION_INFO__BYTEMASK;
- }
- }
- } while (!(err_fix_info & ERR_CORRECTION_INFO__LAST_ERR_INFO));
-
- return status;
-}
-
-u16 NAND_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *read_data_l;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- if (page_count > 1) {
- read_data_l = read_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Read_Ahead_Polling(
- read_data_l, block, page,
- MAX_PAGES_PER_RW);
-
- if (status == FAIL)
- return status;
-
- read_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Read_Ahead_Polling(
- read_data_l, block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-
- if (enable_ecc) {
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank, read_data,
- block, page);
- }
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE &
- INTR_STATUS0__ECC_ERR)
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE)
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR)
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
- iowrite32(INTR_STATUS0__DMA_CMD_COMP, FlashReg + intr_status);
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 ecc_done_OR_dma_comp;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- *DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- ecc_done_OR_dma_comp = 0;
- while (1) {
- if (enable_ecc) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
-
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
-
- }
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- }
- return status;
-}
-
-u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
- u8 *read_data_l;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- if (page_count > 1) {
- read_data_l = read_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Read_Ahead(
- read_data_l, block, page,
- MAX_PAGES_PER_RW);
-
- if (status == FAIL)
- return status;
-
- read_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Read(read_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Read_Ahead(
- read_data_l, block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_READ_PAGE_MAIN;
- info.read_data = read_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- ddma_trans(read_data, flash_add, flash_bank, 0, 1);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-void Conv_Spare_Data_Log2Phy_Format(u8 *data)
-{
- int i;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-
- if (enable_ecc) {
- for (i = spareFlagBytes - 1; i >= 0; i--)
- data[PageSpareSize - spareFlagBytes + i] = data[i];
- }
-}
-
-void Conv_Spare_Data_Phy2Log_Format(u8 *data)
-{
- int i;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- const u32 PageSpareSize = DeviceInfo.wPageSpareSize;
-
- if (enable_ecc) {
- for (i = 0; i < spareFlagBytes; i++)
- data[i] = data[PageSpareSize - spareFlagBytes + i];
- }
-}
-
-
-void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count)
-{
- const u32 PageSize = DeviceInfo.wPageSize;
- const u32 PageDataSize = DeviceInfo.wPageDataSize;
- const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 eccSectorSize;
- u32 page_offset;
- int i, j;
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
- if (enable_ecc) {
- while (page_count > 0) {
- page_offset = (page_count - 1) * PageSize;
- j = (DeviceInfo.wPageDataSize / eccSectorSize);
- for (i = spareFlagBytes - 1; i >= 0; i--)
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i] =
- data[page_offset + PageDataSize + i];
- for (j--; j >= 1; j--) {
- for (i = eccSectorSize - 1; i >= 0; i--)
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i] =
- data[page_offset +
- eccSectorSize * j + i];
- }
- for (i = (PageSize - spareSkipBytes) - 1;
- i >= PageDataSize; i--)
- data[page_offset + i + spareSkipBytes] =
- data[page_offset + i];
- page_count--;
- }
- }
-}
-
-void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count)
-{
- const u32 PageSize = DeviceInfo.wPageSize;
- const u32 PageDataSize = DeviceInfo.wPageDataSize;
- const u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- const u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- const u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 eccSectorSize;
- u32 page_offset;
- int i, j;
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
- if (enable_ecc) {
- while (page_count > 0) {
- page_offset = (page_count - 1) * PageSize;
- for (i = PageDataSize;
- i < PageSize - spareSkipBytes;
- i++)
- data[page_offset + i] =
- data[page_offset + i +
- spareSkipBytes];
- for (j = 1;
- j < DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
- for (i = 0; i < eccSectorSize; i++)
- data[page_offset +
- eccSectorSize * j + i] =
- data[page_offset +
- (eccSectorSize + eccBytes) * j
- + i];
- }
- for (i = 0; i < spareFlagBytes; i++)
- data[page_offset + PageDataSize + i] =
- data[page_offset +
- (eccSectorSize + eccBytes) * j + i];
- page_count--;
- }
- }
-}
-
-/* Un-tested function */
-u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 ecc_done_OR_dma_comp;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- ecc_done_OR_dma_comp = 0;
- while (1) {
- if (enable_ecc) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
-
- if (1 == ecc_done_OR_dma_comp)
- break;
-
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP))
- ;
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
-
- }
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
- }
-
- return status;
-}
-
-u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block,
- u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- *DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_PIPELINE_READ_AHEAD;
- info.read_data = read_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(read_data, flash_add, flash_bank, 0, NumPages);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable Interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-
-u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u64 flash_add;
- u32 intr_status = 0;
- u32 flash_bank;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
- u8 *write_data_l;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL, FlashReg + intr_status);
-
- if (page_count > 1) {
- write_data_l = write_data;
- while (page_count > MAX_PAGES_PER_RW) {
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Write(write_data_l,
- block, page, MAX_PAGES_PER_RW);
- else
- status = NAND_Pipeline_Write_Ahead(
- write_data_l, block, page,
- MAX_PAGES_PER_RW);
- if (status == FAIL)
- return status;
-
- write_data_l += DeviceInfo.wPageDataSize *
- MAX_PAGES_PER_RW;
- page_count -= MAX_PAGES_PER_RW;
- page += MAX_PAGES_PER_RW;
- }
- if (ioread32(FlashReg + MULTIPLANE_OPERATION))
- status = NAND_Multiplane_Write(write_data_l,
- block, page, page_count);
- else
- status = NAND_Pipeline_Write_Ahead(write_data_l,
- block, page, page_count);
-
- return status;
- }
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_WRITE_PAGE_MAIN;
- info.write_data = write_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- ddma_trans(write_data, flash_add, flash_bank, 1, 1);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while (ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG)
- ;
-
- return status;
-}
-
-void NAND_ECC_Ctrl(int enable)
-{
- if (enable) {
- nand_dbg_print(NAND_DBG_WARN,
- "Will enable ECC in %s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- iowrite32(1, FlashReg + ECC_ENABLE);
- enable_ecc = 1;
- } else {
- nand_dbg_print(NAND_DBG_WARN,
- "Will disable ECC in %s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
- iowrite32(0, FlashReg + ECC_ENABLE);
- enable_ecc = 0;
- }
-}
-
-u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 page, u16 page_count)
-{
- u32 status = PASS;
- u32 i, j, page_num = 0;
- u32 PageSize = DeviceInfo.wPageSize;
- u32 PageDataSize = DeviceInfo.wPageDataSize;
- u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- u64 flash_add;
- u32 eccSectorSize;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_main_spare = buf_write_page_main_spare;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-
- while ((status != FAIL) && (page_count > 0)) {
- flash_add = (u64)(block %
- (DeviceInfo.wTotalBlocks / totalUsedBanks)) *
- DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >>
- DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- if (enable_ecc) {
- for (j = 0;
- j <
- DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
- for (i = 0; i < eccSectorSize; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j +
- i] =
- write_data[eccSectorSize *
- j + i];
-
- for (i = 0; i < eccBytes; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j +
- eccSectorSize +
- i] =
- write_data[PageDataSize +
- spareFlagBytes +
- eccBytes * j +
- i];
- }
-
- for (i = 0; i < spareFlagBytes; i++)
- page_main_spare[(eccSectorSize +
- eccBytes) * j + i] =
- write_data[PageDataSize + i];
-
- for (i = PageSize - 1; i >= PageDataSize +
- spareSkipBytes; i--)
- page_main_spare[i] = page_main_spare[i -
- spareSkipBytes];
-
- for (i = PageDataSize; i < PageDataSize +
- spareSkipBytes; i++)
- page_main_spare[i] = 0xff;
-
- for (i = 0; i < PageSize / 4; i++)
- iowrite32(
- *((u32 *)page_main_spare + i),
- FlashMem + 0x10);
- } else {
-
- for (i = 0; i < PageSize / 4; i++)
- iowrite32(*((u32 *)write_data + i),
- FlashMem + 0x10);
- }
-
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__PROGRAM_COMP |
- INTR_STATUS0__PROGRAM_FAIL)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL)
- status = FAIL;
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- page_num++;
- page_count--;
- write_data += PageSize;
- }
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- }
-
- return status;
-}
-
-u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count)
-{
- u32 status = PASS;
- u32 i, j;
- u64 flash_add = 0;
- u32 PageSize = DeviceInfo.wPageSize;
- u32 PageDataSize = DeviceInfo.wPageDataSize;
- u32 PageSpareSize = DeviceInfo.wPageSpareSize;
- u32 eccBytes = DeviceInfo.wECCBytesPerSector;
- u32 spareFlagBytes = DeviceInfo.wNumPageSpareFlag;
- u32 spareSkipBytes = DeviceInfo.wSpareSkipBytes;
- u32 eccSectorSize;
- u32 flash_bank;
- u32 intr_status = 0;
- u8 *read_data_l = read_data;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u8 *page_main_spare = buf_read_page_main_spare;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- eccSectorSize = ECC_SECTOR_SIZE * (DeviceInfo.wDevicesConnected);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- if (status == PASS) {
- intr_status = intr_status_addresses[flash_bank];
-
- iowrite32(1, FlashReg + TRANSFER_SPARE_REG);
-
- iowrite32(ioread32(FlashReg + intr_status),
- FlashReg + intr_status);
-
- while ((status != FAIL) && (page_count > 0)) {
- flash_add = (u64)(block %
- (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x43);
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)),
- 0x2000 | page_count);
-
- while (!(ioread32(FlashReg + intr_status) &
- INTR_STATUS0__LOAD_COMP))
- ;
-
- iowrite32((u32)(MODE_01 | (flash_bank << 24) |
- (flash_add >>
- DeviceInfo.nBitsInPageDataSize)),
- FlashMem);
-
- for (i = 0; i < PageSize / 4; i++)
- *(((u32 *)page_main_spare) + i) =
- ioread32(FlashMem + 0x10);
-
- if (enable_ecc) {
- for (i = PageDataSize; i < PageSize -
- spareSkipBytes; i++)
- page_main_spare[i] = page_main_spare[i +
- spareSkipBytes];
-
- for (j = 0;
- j < DeviceInfo.wPageDataSize / eccSectorSize;
- j++) {
-
- for (i = 0; i < eccSectorSize; i++)
- read_data_l[eccSectorSize * j +
- i] =
- page_main_spare[
- (eccSectorSize +
- eccBytes) * j + i];
-
- for (i = 0; i < eccBytes; i++)
- read_data_l[PageDataSize +
- spareFlagBytes +
- eccBytes * j + i] =
- page_main_spare[
- (eccSectorSize +
- eccBytes) * j +
- eccSectorSize + i];
- }
-
- for (i = 0; i < spareFlagBytes; i++)
- read_data_l[PageDataSize + i] =
- page_main_spare[(eccSectorSize +
- eccBytes) * j + i];
- } else {
- for (i = 0; i < (PageDataSize + PageSpareSize);
- i++)
- read_data_l[i] = page_main_spare[i];
-
- }
-
- if (enable_ecc) {
- while (!(ioread32(FlashReg + intr_status) &
- (INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR)))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- status = do_ecc_new(flash_bank,
- read_data, block, page);
- }
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(
- INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- }
- }
-
- page++;
- page_count--;
- read_data_l += PageSize;
- }
- }
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- return status;
-}
-
-u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
- u16 page, u16 page_count)
-{
- u16 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- int ret;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
-
- if (page_count < 2)
- status = FAIL;
-
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- /* Fill the mrst_nand_info structure */
- info.state = INT_PIPELINE_WRITE_AHEAD;
- info.write_data = write_data;
- info.flash_bank = flash_bank;
- info.block = block;
- info.page = page;
- info.ret = PASS;
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-
- iowrite32(1, FlashReg + GLOBAL_INT_ENABLE); /* Enable interrupt */
-
- ret = wait_for_completion_timeout(&info.complete, 10 * HZ);
- if (!ret) {
- printk(KERN_ERR "Wait for completion timeout "
- "in %s, Line %d\n", __FILE__, __LINE__);
- status = ERR;
- } else {
- status = info.ret;
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- return status;
-}
-
-/* Un-tested function */
-u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
- u16 page_count)
-{
- u16 status = PASS;
- u32 NumPages = page_count;
- u64 flash_add;
- u32 flash_bank;
- u32 intr_status = 0;
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u16 status2 = PASS;
- u32 t;
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- status = Boundary_Check_Block_Page(block, page, page_count);
- if (status != PASS)
- return status;
-
- flash_add = (u64)(block % (DeviceInfo.wTotalBlocks / totalUsedBanks))
- * DeviceInfo.wBlockDataSize +
- (u64)page * DeviceInfo.wPageDataSize;
-
- flash_bank = block / (DeviceInfo.wTotalBlocks / totalUsedBanks);
-
- intr_status = intr_status_addresses[flash_bank];
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
- iowrite32(0x01, FlashReg + MULTIPLANE_OPERATION);
-
- iowrite32(1, FlashReg + DMA_ENABLE);
- while (!(ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + TRANSFER_SPARE_REG);
-
- index_addr((u32)(MODE_10 | (flash_bank << 24) |
- (flash_add >> DeviceInfo.nBitsInPageDataSize)), 0x42);
-
- ddma_trans(write_data, flash_add, flash_bank, 1, NumPages);
-
- while (1) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- status = PASS;
- if (status2 == FAIL)
- status = FAIL;
- break;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL) {
- status2 = FAIL;
- status = FAIL;
- t = ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL;
- iowrite32(t, FlashReg + intr_status);
- } else {
- iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
- }
-
- iowrite32(ioread32(FlashReg + intr_status), FlashReg + intr_status);
-
- iowrite32(0, FlashReg + DMA_ENABLE);
-
- while ((ioread32(FlashReg + DMA_ENABLE) & DMA_ENABLE__FLAG))
- ;
-
- iowrite32(0, FlashReg + MULTIPLANE_OPERATION);
-
- return status;
-}
-
-
-#if CMD_DMA
-static irqreturn_t cdma_isr(int irq, void *dev_id)
-{
- struct mrst_nand_info *dev = dev_id;
- int first_failed_cmd;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- if (!is_cdma_interrupt())
- return IRQ_NONE;
-
- /* Disable controller interrupts */
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- GLOB_FTL_Event_Status(&first_failed_cmd);
- complete(&dev->complete);
-
- return IRQ_HANDLED;
-}
-#else
-static void handle_nand_int_read(struct mrst_nand_info *dev)
-{
- u32 intr_status_addresses[4] = {INTR_STATUS0,
- INTR_STATUS1, INTR_STATUS2, INTR_STATUS3};
- u32 intr_status;
- u32 ecc_done_OR_dma_comp = 0;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev->ret = PASS;
- intr_status = intr_status_addresses[dev->flash_bank];
-
- while (1) {
- if (enable_ecc) {
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_ERR) {
- iowrite32(INTR_STATUS0__ECC_ERR,
- FlashReg + intr_status);
- dev->ret = do_ecc_new(dev->flash_bank,
- dev->read_data,
- dev->block, dev->page);
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- if (1 == ecc_done_OR_dma_comp)
- break;
- ecc_done_OR_dma_comp = 1;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__ECC_TRANSACTION_DONE) {
- iowrite32(INTR_STATUS0__ECC_TRANSACTION_DONE,
- FlashReg + intr_status);
- if (1 == ecc_done_OR_dma_comp)
- break;
- ecc_done_OR_dma_comp = 1;
- }
- } else {
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- break;
- } else {
- printk(KERN_ERR "Illegal INTS "
- "(offset addr 0x%x) value: 0x%x\n",
- intr_status,
- ioread32(FlashReg + intr_status));
- }
- }
-
- iowrite32((~INTR_STATUS0__ECC_ERR) &
- (~INTR_STATUS0__ECC_TRANSACTION_DONE) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
-}
-
-static void handle_nand_int_write(struct mrst_nand_info *dev)
-{
- u32 intr_status;
- u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
- int status = PASS;
-
- nand_dbg_print(NAND_DBG_DEBUG, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- dev->ret = PASS;
- intr_status = intr[dev->flash_bank];
-
- while (1) {
- while (!ioread32(FlashReg + intr_status))
- ;
-
- if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__DMA_CMD_COMP) {
- iowrite32(INTR_STATUS0__DMA_CMD_COMP,
- FlashReg + intr_status);
- if (FAIL == status)
- dev->ret = FAIL;
- break;
- } else if (ioread32(FlashReg + intr_status) &
- INTR_STATUS0__PROGRAM_FAIL) {
- status = FAIL;
- iowrite32(INTR_STATUS0__PROGRAM_FAIL,
- FlashReg + intr_status);
- } else {
- iowrite32((~INTR_STATUS0__PROGRAM_FAIL) &
- (~INTR_STATUS0__DMA_CMD_COMP),
- FlashReg + intr_status);
- }
- }
-}
-
-static irqreturn_t ddma_isr(int irq, void *dev_id)
-{
- struct mrst_nand_info *dev = dev_id;
- u32 int_mask, ints0, ints1, ints2, ints3, ints_offset;
- u32 intr[4] = {INTR_STATUS0, INTR_STATUS1,
- INTR_STATUS2, INTR_STATUS3};
-
- int_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL;
-
- ints0 = ioread32(FlashReg + INTR_STATUS0);
- ints1 = ioread32(FlashReg + INTR_STATUS1);
- ints2 = ioread32(FlashReg + INTR_STATUS2);
- ints3 = ioread32(FlashReg + INTR_STATUS3);
-
- ints_offset = intr[dev->flash_bank];
-
- nand_dbg_print(NAND_DBG_DEBUG,
- "INTR0: 0x%x, INTR1: 0x%x, INTR2: 0x%x, INTR3: 0x%x, "
- "DMA_INTR: 0x%x, "
- "dev->state: 0x%x, dev->flash_bank: %d\n",
- ints0, ints1, ints2, ints3,
- ioread32(FlashReg + DMA_INTR),
- dev->state, dev->flash_bank);
-
- if (!(ioread32(FlashReg + ints_offset) & int_mask)) {
- iowrite32(ints0, FlashReg + INTR_STATUS0);
- iowrite32(ints1, FlashReg + INTR_STATUS1);
- iowrite32(ints2, FlashReg + INTR_STATUS2);
- iowrite32(ints3, FlashReg + INTR_STATUS3);
- nand_dbg_print(NAND_DBG_WARN,
- "ddma_isr: Invalid interrupt for NAND controller. "
- "Ignore it\n");
- return IRQ_NONE;
- }
-
- switch (dev->state) {
- case INT_READ_PAGE_MAIN:
- case INT_PIPELINE_READ_AHEAD:
- /* Disable controller interrupts */
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- handle_nand_int_read(dev);
- break;
- case INT_WRITE_PAGE_MAIN:
- case INT_PIPELINE_WRITE_AHEAD:
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
- handle_nand_int_write(dev);
- break;
- default:
- printk(KERN_ERR "ddma_isr - Illegal state: 0x%x\n",
- dev->state);
- return IRQ_NONE;
- }
-
- dev->state = INT_IDLE_STATE;
- complete(&dev->complete);
- return IRQ_HANDLED;
-}
-#endif
-
-static const struct pci_device_id nand_pci_ids[] = {
- {
- .vendor = 0x8086,
- .device = 0x0809,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { /* end: all zeroes */ }
-};
-
-static int nand_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
-{
- int ret = -ENODEV;
- unsigned long csr_base;
- unsigned long csr_len;
- struct mrst_nand_info *pndev = &info;
- u32 int_mask;
-
- ret = pci_enable_device(dev);
- if (ret) {
- printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
- return ret;
- }
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- FlashReg = ioremap_nocache(GLOB_HWCTL_REG_BASE,
- GLOB_HWCTL_REG_SIZE);
- if (!FlashReg) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- goto failed_disable;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped reg base address: "
- "0x%p, len: %d\n",
- FlashReg, GLOB_HWCTL_REG_SIZE);
-
- FlashMem = ioremap_nocache(GLOB_HWCTL_MEM_BASE,
- GLOB_HWCTL_MEM_SIZE);
- if (!FlashMem) {
- printk(KERN_ERR "Spectra: ioremap_nocache failed!");
- iounmap(FlashReg);
- goto failed_disable;
- }
- nand_dbg_print(NAND_DBG_WARN,
- "Spectra: Remapped flash base address: "
- "0x%p, len: %d\n",
- (void *)FlashMem, GLOB_HWCTL_MEM_SIZE);
-
- nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
- "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
- "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
- "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
- ioread32(FlashReg + ACC_CLKS),
- ioread32(FlashReg + RE_2_WE),
- ioread32(FlashReg + WE_2_RE),
- ioread32(FlashReg + ADDR_2_DATA),
- ioread32(FlashReg + RDWR_EN_LO_CNT),
- ioread32(FlashReg + RDWR_EN_HI_CNT),
- ioread32(FlashReg + CS_SETUP_CNT));
-
- NAND_Flash_Reset();
-
- iowrite32(0, FlashReg + GLOBAL_INT_ENABLE);
-
-#if CMD_DMA
- info.pcmds_num = 0;
- info.flash_bank = 0;
- info.cdma_num = 0;
- int_mask = (DMA_INTR__DESC_COMP_CHANNEL0 |
- DMA_INTR__DESC_COMP_CHANNEL1 |
- DMA_INTR__DESC_COMP_CHANNEL2 |
- DMA_INTR__DESC_COMP_CHANNEL3 |
- DMA_INTR__MEMCOPY_DESC_COMP);
- iowrite32(int_mask, FlashReg + DMA_INTR_EN);
- iowrite32(0xFFFF, FlashReg + DMA_INTR);
-
- int_mask = (INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL);
-#else
- int_mask = INTR_STATUS0__DMA_CMD_COMP |
- INTR_STATUS0__ECC_TRANSACTION_DONE |
- INTR_STATUS0__ECC_ERR |
- INTR_STATUS0__PROGRAM_FAIL |
- INTR_STATUS0__ERASE_FAIL;
-#endif
- iowrite32(int_mask, FlashReg + INTR_EN0);
- iowrite32(int_mask, FlashReg + INTR_EN1);
- iowrite32(int_mask, FlashReg + INTR_EN2);
- iowrite32(int_mask, FlashReg + INTR_EN3);
-
- /* Clear all status bits */
- iowrite32(0xFFFF, FlashReg + INTR_STATUS0);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS1);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS2);
- iowrite32(0xFFFF, FlashReg + INTR_STATUS3);
-
- iowrite32(0x0F, FlashReg + RB_PIN_ENABLED);
- iowrite32(CHIP_EN_DONT_CARE__FLAG, FlashReg + CHIP_ENABLE_DONT_CARE);
-
- /* Should set value for these registers when init */
- iowrite32(0, FlashReg + TWO_ROW_ADDR_CYCLES);
- iowrite32(1, FlashReg + ECC_ENABLE);
- enable_ecc = 1;
-
- pci_set_master(dev);
- pndev->dev = dev;
-
- csr_base = pci_resource_start(dev, 0);
- if (!csr_base) {
- printk(KERN_ERR "Spectra: pci_resource_start failed!\n");
- ret = -ENODEV;
- goto failed_req_csr;
- }
-
- csr_len = pci_resource_len(dev, 0);
- if (!csr_len) {
- printk(KERN_ERR "Spectra: pci_resource_len failed!\n");
- ret = -ENODEV;
- goto failed_req_csr;
- }
-
- ret = pci_request_regions(dev, SPECTRA_NAND_NAME);
- if (ret) {
- printk(KERN_ERR "Spectra: Unable to request "
- "memory region\n");
- goto failed_req_csr;
- }
-
- pndev->ioaddr = ioremap_nocache(csr_base, csr_len);
- if (!pndev->ioaddr) {
- printk(KERN_ERR "Spectra: Unable to remap memory region\n");
- ret = -ENOMEM;
- goto failed_remap_csr;
- }
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08lx -> 0x%p (0x%lx)\n",
- csr_base, pndev->ioaddr, csr_len);
-
- init_completion(&pndev->complete);
- nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
-
-#if CMD_DMA
- if (request_irq(dev->irq, cdma_isr, IRQF_SHARED,
- SPECTRA_NAND_NAME, &info)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-#else
- if (request_irq(dev->irq, ddma_isr, IRQF_SHARED,
- SPECTRA_NAND_NAME, &info)) {
- printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
- ret = -ENODEV;
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-#endif
-
- pci_set_drvdata(dev, pndev);
-
- ret = GLOB_LLD_Read_Device_ID();
- if (ret) {
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-
- ret = register_spectra_ftl();
- if (ret) {
- iounmap(pndev->ioaddr);
- goto failed_remap_csr;
- }
-
- return 0;
-
-failed_remap_csr:
- pci_release_regions(dev);
-failed_req_csr:
- iounmap(FlashMem);
- iounmap(FlashReg);
-failed_disable:
- pci_disable_device(dev);
-
- return ret;
-}
-
-static void nand_pci_remove(struct pci_dev *dev)
-{
- struct mrst_nand_info *pndev = pci_get_drvdata(dev);
-
- nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
-#if CMD_DMA
- free_irq(dev->irq, pndev);
-#endif
- iounmap(pndev->ioaddr);
- pci_release_regions(dev);
- pci_disable_device(dev);
-}
-
-MODULE_DEVICE_TABLE(pci, nand_pci_ids);
-
-static struct pci_driver nand_pci_driver = {
- .name = SPECTRA_NAND_NAME,
- .id_table = nand_pci_ids,
- .probe = nand_pci_probe,
- .remove = nand_pci_remove,
-};
-
-int NAND_Flash_Init(void)
-{
- int retval;
-
- nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
- __FILE__, __LINE__, __func__);
-
- retval = pci_register_driver(&nand_pci_driver);
- if (retval)
- return -ENOMEM;
-
- return PASS;
-}
-
-/* Free memory */
-int nand_release_spectra(void)
-{
- pci_unregister_driver(&nand_pci_driver);
- iounmap(FlashMem);
- iounmap(FlashReg);
-
- return 0;
-}
-
-
-
diff --git a/drivers/staging/spectra/lld_nand.h b/drivers/staging/spectra/lld_nand.h
deleted file mode 100644
index d083882..0000000
--- a/drivers/staging/spectra/lld_nand.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _LLD_NAND_
-#define _LLD_NAND_
-
-#ifdef ELDORA
-#include "defs.h"
-#else
-#include "flash.h"
-#include "ffsport.h"
-#endif
-
-#define MODE_00 0x00000000
-#define MODE_01 0x04000000
-#define MODE_10 0x08000000
-#define MODE_11 0x0C000000
-
-
-#define DATA_TRANSFER_MODE 0
-#define PROTECTION_PER_BLOCK 1
-#define LOAD_WAIT_COUNT 2
-#define PROGRAM_WAIT_COUNT 3
-#define ERASE_WAIT_COUNT 4
-#define INT_MONITOR_CYCLE_COUNT 5
-#define READ_BUSY_PIN_ENABLED 6
-#define MULTIPLANE_OPERATION_SUPPORT 7
-#define PRE_FETCH_MODE 8
-#define CE_DONT_CARE_SUPPORT 9
-#define COPYBACK_SUPPORT 10
-#define CACHE_WRITE_SUPPORT 11
-#define CACHE_READ_SUPPORT 12
-#define NUM_PAGES_IN_BLOCK 13
-#define ECC_ENABLE_SELECT 14
-#define WRITE_ENABLE_2_READ_ENABLE 15
-#define ADDRESS_2_DATA 16
-#define READ_ENABLE_2_WRITE_ENABLE 17
-#define TWO_ROW_ADDRESS_CYCLES 18
-#define MULTIPLANE_ADDRESS_RESTRICT 19
-#define ACC_CLOCKS 20
-#define READ_WRITE_ENABLE_LOW_COUNT 21
-#define READ_WRITE_ENABLE_HIGH_COUNT 22
-
-#define ECC_SECTOR_SIZE 512
-#define LLD_MAX_FLASH_BANKS 4
-
-struct mrst_nand_info {
- struct pci_dev *dev;
- u32 state;
- u32 flash_bank;
- u8 *read_data;
- u8 *write_data;
- u32 block;
- u16 page;
- u32 use_dma;
- void __iomem *ioaddr; /* Mapped io reg base address */
- int ret;
- u32 pcmds_num;
- struct pending_cmd *pcmds;
- int cdma_num; /* CDMA descriptor number in this chan */
- u8 *cdma_desc_buf; /* CDMA descriptor table */
- u8 *memcp_desc_buf; /* Memory copy descriptor table */
- dma_addr_t cdma_desc; /* Mapped CDMA descriptor table */
- dma_addr_t memcp_desc; /* Mapped memory copy descriptor table */
- struct completion complete;
-};
-
-int NAND_Flash_Init(void);
-int nand_release_spectra(void);
-u16 NAND_Flash_Reset(void);
-u16 NAND_Read_Device_ID(void);
-u16 NAND_Erase_Block(u32 flash_add);
-u16 NAND_Write_Page_Main(u8 *write_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Main(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_UnlockArrayAll(void);
-u16 NAND_Write_Page_Main_Spare(u8 *write_data, u32 block,
- u16 page, u16 page_count);
-u16 NAND_Write_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Main_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Read_Page_Spare(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-void NAND_LLD_Enable_Disable_Interrupts(u16 INT_ENABLE);
-u16 NAND_Get_Bad_Block(u32 block);
-u16 NAND_Pipeline_Read_Ahead(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Pipeline_Write_Ahead(u8 *write_data, u32 block,
- u16 page, u16 page_count);
-u16 NAND_Multiplane_Read(u8 *read_data, u32 block, u16 page,
- u16 page_count);
-u16 NAND_Multiplane_Write(u8 *write_data, u32 block, u16 page,
- u16 page_count);
-void NAND_ECC_Ctrl(int enable);
-u16 NAND_Read_Page_Main_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-u16 NAND_Pipeline_Read_Ahead_Polling(u8 *read_data,
- u32 block, u16 page, u16 page_count);
-void Conv_Spare_Data_Log2Phy_Format(u8 *data);
-void Conv_Spare_Data_Phy2Log_Format(u8 *data);
-void Conv_Main_Spare_Data_Log2Phy_Format(u8 *data, u16 page_count);
-void Conv_Main_Spare_Data_Phy2Log_Format(u8 *data, u16 page_count);
-
-extern void __iomem *FlashReg;
-extern void __iomem *FlashMem;
-
-extern int totalUsedBanks;
-extern u32 GLOB_valid_banks[LLD_MAX_FLASH_BANKS];
-
-#endif /*_LLD_NAND_*/
-
-
-
diff --git a/drivers/staging/spectra/nand_regs.h b/drivers/staging/spectra/nand_regs.h
deleted file mode 100644
index e192e4a..0000000
--- a/drivers/staging/spectra/nand_regs.h
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#define DEVICE_RESET 0x0
-#define DEVICE_RESET__BANK0 0x0001
-#define DEVICE_RESET__BANK1 0x0002
-#define DEVICE_RESET__BANK2 0x0004
-#define DEVICE_RESET__BANK3 0x0008
-
-#define TRANSFER_SPARE_REG 0x10
-#define TRANSFER_SPARE_REG__FLAG 0x0001
-
-#define LOAD_WAIT_CNT 0x20
-#define LOAD_WAIT_CNT__VALUE 0xffff
-
-#define PROGRAM_WAIT_CNT 0x30
-#define PROGRAM_WAIT_CNT__VALUE 0xffff
-
-#define ERASE_WAIT_CNT 0x40
-#define ERASE_WAIT_CNT__VALUE 0xffff
-
-#define INT_MON_CYCCNT 0x50
-#define INT_MON_CYCCNT__VALUE 0xffff
-
-#define RB_PIN_ENABLED 0x60
-#define RB_PIN_ENABLED__BANK0 0x0001
-#define RB_PIN_ENABLED__BANK1 0x0002
-#define RB_PIN_ENABLED__BANK2 0x0004
-#define RB_PIN_ENABLED__BANK3 0x0008
-
-#define MULTIPLANE_OPERATION 0x70
-#define MULTIPLANE_OPERATION__FLAG 0x0001
-
-#define MULTIPLANE_READ_ENABLE 0x80
-#define MULTIPLANE_READ_ENABLE__FLAG 0x0001
-
-#define COPYBACK_DISABLE 0x90
-#define COPYBACK_DISABLE__FLAG 0x0001
-
-#define CACHE_WRITE_ENABLE 0xa0
-#define CACHE_WRITE_ENABLE__FLAG 0x0001
-
-#define CACHE_READ_ENABLE 0xb0
-#define CACHE_READ_ENABLE__FLAG 0x0001
-
-#define PREFETCH_MODE 0xc0
-#define PREFETCH_MODE__PREFETCH_EN 0x0001
-#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0
-
-#define CHIP_ENABLE_DONT_CARE 0xd0
-#define CHIP_EN_DONT_CARE__FLAG 0x01
-
-#define ECC_ENABLE 0xe0
-#define ECC_ENABLE__FLAG 0x0001
-
-#define GLOBAL_INT_ENABLE 0xf0
-#define GLOBAL_INT_EN_FLAG 0x01
-
-#define WE_2_RE 0x100
-#define WE_2_RE__VALUE 0x003f
-
-#define ADDR_2_DATA 0x110
-#define ADDR_2_DATA__VALUE 0x003f
-
-#define RE_2_WE 0x120
-#define RE_2_WE__VALUE 0x003f
-
-#define ACC_CLKS 0x130
-#define ACC_CLKS__VALUE 0x000f
-
-#define NUMBER_OF_PLANES 0x140
-#define NUMBER_OF_PLANES__VALUE 0x0007
-
-#define PAGES_PER_BLOCK 0x150
-#define PAGES_PER_BLOCK__VALUE 0xffff
-
-#define DEVICE_WIDTH 0x160
-#define DEVICE_WIDTH__VALUE 0x0003
-
-#define DEVICE_MAIN_AREA_SIZE 0x170
-#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff
-
-#define DEVICE_SPARE_AREA_SIZE 0x180
-#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff
-
-#define TWO_ROW_ADDR_CYCLES 0x190
-#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001
-
-#define MULTIPLANE_ADDR_RESTRICT 0x1a0
-#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001
-
-#define ECC_CORRECTION 0x1b0
-#define ECC_CORRECTION__VALUE 0x001f
-
-#define READ_MODE 0x1c0
-#define READ_MODE__VALUE 0x000f
-
-#define WRITE_MODE 0x1d0
-#define WRITE_MODE__VALUE 0x000f
-
-#define COPYBACK_MODE 0x1e0
-#define COPYBACK_MODE__VALUE 0x000f
-
-#define RDWR_EN_LO_CNT 0x1f0
-#define RDWR_EN_LO_CNT__VALUE 0x001f
-
-#define RDWR_EN_HI_CNT 0x200
-#define RDWR_EN_HI_CNT__VALUE 0x001f
-
-#define MAX_RD_DELAY 0x210
-#define MAX_RD_DELAY__VALUE 0x000f
-
-#define CS_SETUP_CNT 0x220
-#define CS_SETUP_CNT__VALUE 0x001f
-
-#define SPARE_AREA_SKIP_BYTES 0x230
-#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f
-
-#define SPARE_AREA_MARKER 0x240
-#define SPARE_AREA_MARKER__VALUE 0xffff
-
-#define DEVICES_CONNECTED 0x250
-#define DEVICES_CONNECTED__VALUE 0x0007
-
-#define DIE_MASK 0x260
-#define DIE_MASK__VALUE 0x00ff
-
-#define FIRST_BLOCK_OF_NEXT_PLANE 0x270
-#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff
-
-#define WRITE_PROTECT 0x280
-#define WRITE_PROTECT__FLAG 0x0001
-
-#define RE_2_RE 0x290
-#define RE_2_RE__VALUE 0x003f
-
-#define MANUFACTURER_ID 0x300
-#define MANUFACTURER_ID__VALUE 0x00ff
-
-#define DEVICE_ID 0x310
-#define DEVICE_ID__VALUE 0x00ff
-
-#define DEVICE_PARAM_0 0x320
-#define DEVICE_PARAM_0__VALUE 0x00ff
-
-#define DEVICE_PARAM_1 0x330
-#define DEVICE_PARAM_1__VALUE 0x00ff
-
-#define DEVICE_PARAM_2 0x340
-#define DEVICE_PARAM_2__VALUE 0x00ff
-
-#define LOGICAL_PAGE_DATA_SIZE 0x350
-#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff
-
-#define LOGICAL_PAGE_SPARE_SIZE 0x360
-#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff
-
-#define REVISION 0x370
-#define REVISION__VALUE 0xffff
-
-#define ONFI_DEVICE_FEATURES 0x380
-#define ONFI_DEVICE_FEATURES__VALUE 0x003f
-
-#define ONFI_OPTIONAL_COMMANDS 0x390
-#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f
-
-#define ONFI_TIMING_MODE 0x3a0
-#define ONFI_TIMING_MODE__VALUE 0x003f
-
-#define ONFI_PGM_CACHE_TIMING_MODE 0x3b0
-#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f
-
-#define ONFI_DEVICE_NO_OF_LUNS 0x3c0
-#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff
-#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff
-
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0
-#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff
-
-#define FEATURES 0x3f0
-#define FEATURES__N_BANKS 0x0003
-#define FEATURES__ECC_MAX_ERR 0x003c
-#define FEATURES__DMA 0x0040
-#define FEATURES__CMD_DMA 0x0080
-#define FEATURES__PARTITION 0x0100
-#define FEATURES__XDMA_SIDEBAND 0x0200
-#define FEATURES__GPREG 0x0400
-#define FEATURES__INDEX_ADDR 0x0800
-
-#define TRANSFER_MODE 0x400
-#define TRANSFER_MODE__VALUE 0x0003
-
-#define INTR_STATUS0 0x410
-#define INTR_STATUS0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS0__ECC_ERR 0x0002
-#define INTR_STATUS0__DMA_CMD_COMP 0x0004
-#define INTR_STATUS0__TIME_OUT 0x0008
-#define INTR_STATUS0__PROGRAM_FAIL 0x0010
-#define INTR_STATUS0__ERASE_FAIL 0x0020
-#define INTR_STATUS0__LOAD_COMP 0x0040
-#define INTR_STATUS0__PROGRAM_COMP 0x0080
-#define INTR_STATUS0__ERASE_COMP 0x0100
-#define INTR_STATUS0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS0__LOCKED_BLK 0x0400
-#define INTR_STATUS0__UNSUP_CMD 0x0800
-#define INTR_STATUS0__INT_ACT 0x1000
-#define INTR_STATUS0__RST_COMP 0x2000
-#define INTR_STATUS0__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS0__PAGE_XFER_INC 0x8000
-
-#define INTR_EN0 0x420
-#define INTR_EN0__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN0__ECC_ERR 0x0002
-#define INTR_EN0__DMA_CMD_COMP 0x0004
-#define INTR_EN0__TIME_OUT 0x0008
-#define INTR_EN0__PROGRAM_FAIL 0x0010
-#define INTR_EN0__ERASE_FAIL 0x0020
-#define INTR_EN0__LOAD_COMP 0x0040
-#define INTR_EN0__PROGRAM_COMP 0x0080
-#define INTR_EN0__ERASE_COMP 0x0100
-#define INTR_EN0__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN0__LOCKED_BLK 0x0400
-#define INTR_EN0__UNSUP_CMD 0x0800
-#define INTR_EN0__INT_ACT 0x1000
-#define INTR_EN0__RST_COMP 0x2000
-#define INTR_EN0__PIPE_CMD_ERR 0x4000
-#define INTR_EN0__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT0 0x430
-#define PAGE_CNT0__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR0 0x440
-#define ERR_PAGE_ADDR0__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR0 0x450
-#define ERR_BLOCK_ADDR0__VALUE 0xffff
-
-#define INTR_STATUS1 0x460
-#define INTR_STATUS1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS1__ECC_ERR 0x0002
-#define INTR_STATUS1__DMA_CMD_COMP 0x0004
-#define INTR_STATUS1__TIME_OUT 0x0008
-#define INTR_STATUS1__PROGRAM_FAIL 0x0010
-#define INTR_STATUS1__ERASE_FAIL 0x0020
-#define INTR_STATUS1__LOAD_COMP 0x0040
-#define INTR_STATUS1__PROGRAM_COMP 0x0080
-#define INTR_STATUS1__ERASE_COMP 0x0100
-#define INTR_STATUS1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS1__LOCKED_BLK 0x0400
-#define INTR_STATUS1__UNSUP_CMD 0x0800
-#define INTR_STATUS1__INT_ACT 0x1000
-#define INTR_STATUS1__RST_COMP 0x2000
-#define INTR_STATUS1__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS1__PAGE_XFER_INC 0x8000
-
-#define INTR_EN1 0x470
-#define INTR_EN1__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN1__ECC_ERR 0x0002
-#define INTR_EN1__DMA_CMD_COMP 0x0004
-#define INTR_EN1__TIME_OUT 0x0008
-#define INTR_EN1__PROGRAM_FAIL 0x0010
-#define INTR_EN1__ERASE_FAIL 0x0020
-#define INTR_EN1__LOAD_COMP 0x0040
-#define INTR_EN1__PROGRAM_COMP 0x0080
-#define INTR_EN1__ERASE_COMP 0x0100
-#define INTR_EN1__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN1__LOCKED_BLK 0x0400
-#define INTR_EN1__UNSUP_CMD 0x0800
-#define INTR_EN1__INT_ACT 0x1000
-#define INTR_EN1__RST_COMP 0x2000
-#define INTR_EN1__PIPE_CMD_ERR 0x4000
-#define INTR_EN1__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT1 0x480
-#define PAGE_CNT1__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR1 0x490
-#define ERR_PAGE_ADDR1__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR1 0x4a0
-#define ERR_BLOCK_ADDR1__VALUE 0xffff
-
-#define INTR_STATUS2 0x4b0
-#define INTR_STATUS2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS2__ECC_ERR 0x0002
-#define INTR_STATUS2__DMA_CMD_COMP 0x0004
-#define INTR_STATUS2__TIME_OUT 0x0008
-#define INTR_STATUS2__PROGRAM_FAIL 0x0010
-#define INTR_STATUS2__ERASE_FAIL 0x0020
-#define INTR_STATUS2__LOAD_COMP 0x0040
-#define INTR_STATUS2__PROGRAM_COMP 0x0080
-#define INTR_STATUS2__ERASE_COMP 0x0100
-#define INTR_STATUS2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS2__LOCKED_BLK 0x0400
-#define INTR_STATUS2__UNSUP_CMD 0x0800
-#define INTR_STATUS2__INT_ACT 0x1000
-#define INTR_STATUS2__RST_COMP 0x2000
-#define INTR_STATUS2__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS2__PAGE_XFER_INC 0x8000
-
-#define INTR_EN2 0x4c0
-#define INTR_EN2__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN2__ECC_ERR 0x0002
-#define INTR_EN2__DMA_CMD_COMP 0x0004
-#define INTR_EN2__TIME_OUT 0x0008
-#define INTR_EN2__PROGRAM_FAIL 0x0010
-#define INTR_EN2__ERASE_FAIL 0x0020
-#define INTR_EN2__LOAD_COMP 0x0040
-#define INTR_EN2__PROGRAM_COMP 0x0080
-#define INTR_EN2__ERASE_COMP 0x0100
-#define INTR_EN2__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN2__LOCKED_BLK 0x0400
-#define INTR_EN2__UNSUP_CMD 0x0800
-#define INTR_EN2__INT_ACT 0x1000
-#define INTR_EN2__RST_COMP 0x2000
-#define INTR_EN2__PIPE_CMD_ERR 0x4000
-#define INTR_EN2__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT2 0x4d0
-#define PAGE_CNT2__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR2 0x4e0
-#define ERR_PAGE_ADDR2__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR2 0x4f0
-#define ERR_BLOCK_ADDR2__VALUE 0xffff
-
-#define INTR_STATUS3 0x500
-#define INTR_STATUS3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_STATUS3__ECC_ERR 0x0002
-#define INTR_STATUS3__DMA_CMD_COMP 0x0004
-#define INTR_STATUS3__TIME_OUT 0x0008
-#define INTR_STATUS3__PROGRAM_FAIL 0x0010
-#define INTR_STATUS3__ERASE_FAIL 0x0020
-#define INTR_STATUS3__LOAD_COMP 0x0040
-#define INTR_STATUS3__PROGRAM_COMP 0x0080
-#define INTR_STATUS3__ERASE_COMP 0x0100
-#define INTR_STATUS3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_STATUS3__LOCKED_BLK 0x0400
-#define INTR_STATUS3__UNSUP_CMD 0x0800
-#define INTR_STATUS3__INT_ACT 0x1000
-#define INTR_STATUS3__RST_COMP 0x2000
-#define INTR_STATUS3__PIPE_CMD_ERR 0x4000
-#define INTR_STATUS3__PAGE_XFER_INC 0x8000
-
-#define INTR_EN3 0x510
-#define INTR_EN3__ECC_TRANSACTION_DONE 0x0001
-#define INTR_EN3__ECC_ERR 0x0002
-#define INTR_EN3__DMA_CMD_COMP 0x0004
-#define INTR_EN3__TIME_OUT 0x0008
-#define INTR_EN3__PROGRAM_FAIL 0x0010
-#define INTR_EN3__ERASE_FAIL 0x0020
-#define INTR_EN3__LOAD_COMP 0x0040
-#define INTR_EN3__PROGRAM_COMP 0x0080
-#define INTR_EN3__ERASE_COMP 0x0100
-#define INTR_EN3__PIPE_CPYBCK_CMD_COMP 0x0200
-#define INTR_EN3__LOCKED_BLK 0x0400
-#define INTR_EN3__UNSUP_CMD 0x0800
-#define INTR_EN3__INT_ACT 0x1000
-#define INTR_EN3__RST_COMP 0x2000
-#define INTR_EN3__PIPE_CMD_ERR 0x4000
-#define INTR_EN3__PAGE_XFER_INC 0x8000
-
-#define PAGE_CNT3 0x520
-#define PAGE_CNT3__VALUE 0x00ff
-
-#define ERR_PAGE_ADDR3 0x530
-#define ERR_PAGE_ADDR3__VALUE 0xffff
-
-#define ERR_BLOCK_ADDR3 0x540
-#define ERR_BLOCK_ADDR3__VALUE 0xffff
-
-#define DATA_INTR 0x550
-#define DATA_INTR__WRITE_SPACE_AV 0x0001
-#define DATA_INTR__READ_DATA_AV 0x0002
-
-#define DATA_INTR_EN 0x560
-#define DATA_INTR_EN__WRITE_SPACE_AV 0x0001
-#define DATA_INTR_EN__READ_DATA_AV 0x0002
-
-#define GPREG_0 0x570
-#define GPREG_0__VALUE 0xffff
-
-#define GPREG_1 0x580
-#define GPREG_1__VALUE 0xffff
-
-#define GPREG_2 0x590
-#define GPREG_2__VALUE 0xffff
-
-#define GPREG_3 0x5a0
-#define GPREG_3__VALUE 0xffff
-
-#define ECC_THRESHOLD 0x600
-#define ECC_THRESHOLD__VALUE 0x03ff
-
-#define ECC_ERROR_BLOCK_ADDRESS 0x610
-#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff
-
-#define ECC_ERROR_PAGE_ADDRESS 0x620
-#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff
-#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000
-
-#define ECC_ERROR_ADDRESS 0x630
-#define ECC_ERROR_ADDRESS__OFFSET 0x0fff
-#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000
-
-#define ERR_CORRECTION_INFO 0x640
-#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff
-#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00
-#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000
-#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000
-
-#define DMA_ENABLE 0x700
-#define DMA_ENABLE__FLAG 0x0001
-
-#define IGNORE_ECC_DONE 0x710
-#define IGNORE_ECC_DONE__FLAG 0x0001
-
-#define DMA_INTR 0x720
-#define DMA_INTR__TARGET_ERROR 0x0001
-#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002
-#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004
-#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008
-#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010
-#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020
-
-#define DMA_INTR_EN 0x730
-#define DMA_INTR_EN__TARGET_ERROR 0x0001
-#define DMA_INTR_EN__DESC_COMP_CHANNEL0 0x0002
-#define DMA_INTR_EN__DESC_COMP_CHANNEL1 0x0004
-#define DMA_INTR_EN__DESC_COMP_CHANNEL2 0x0008
-#define DMA_INTR_EN__DESC_COMP_CHANNEL3 0x0010
-#define DMA_INTR_EN__MEMCOPY_DESC_COMP 0x0020
-
-#define TARGET_ERR_ADDR_LO 0x740
-#define TARGET_ERR_ADDR_LO__VALUE 0xffff
-
-#define TARGET_ERR_ADDR_HI 0x750
-#define TARGET_ERR_ADDR_HI__VALUE 0xffff
-
-#define CHNL_ACTIVE 0x760
-#define CHNL_ACTIVE__CHANNEL0 0x0001
-#define CHNL_ACTIVE__CHANNEL1 0x0002
-#define CHNL_ACTIVE__CHANNEL2 0x0004
-#define CHNL_ACTIVE__CHANNEL3 0x0008
-
-#define ACTIVE_SRC_ID 0x800
-#define ACTIVE_SRC_ID__VALUE 0x00ff
-
-#define PTN_INTR 0x810
-#define PTN_INTR__CONFIG_ERROR 0x0001
-#define PTN_INTR__ACCESS_ERROR_BANK0 0x0002
-#define PTN_INTR__ACCESS_ERROR_BANK1 0x0004
-#define PTN_INTR__ACCESS_ERROR_BANK2 0x0008
-#define PTN_INTR__ACCESS_ERROR_BANK3 0x0010
-#define PTN_INTR__REG_ACCESS_ERROR 0x0020
-
-#define PTN_INTR_EN 0x820
-#define PTN_INTR_EN__CONFIG_ERROR 0x0001
-#define PTN_INTR_EN__ACCESS_ERROR_BANK0 0x0002
-#define PTN_INTR_EN__ACCESS_ERROR_BANK1 0x0004
-#define PTN_INTR_EN__ACCESS_ERROR_BANK2 0x0008
-#define PTN_INTR_EN__ACCESS_ERROR_BANK3 0x0010
-#define PTN_INTR_EN__REG_ACCESS_ERROR 0x0020
-
-#define PERM_SRC_ID_0 0x830
-#define PERM_SRC_ID_0__SRCID 0x00ff
-#define PERM_SRC_ID_0__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_0__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_0__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_0__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_0 0x840
-#define MIN_BLK_ADDR_0__VALUE 0xffff
-
-#define MAX_BLK_ADDR_0 0x850
-#define MAX_BLK_ADDR_0__VALUE 0xffff
-
-#define MIN_MAX_BANK_0 0x860
-#define MIN_MAX_BANK_0__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_0__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_1 0x870
-#define PERM_SRC_ID_1__SRCID 0x00ff
-#define PERM_SRC_ID_1__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_1__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_1__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_1__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_1 0x880
-#define MIN_BLK_ADDR_1__VALUE 0xffff
-
-#define MAX_BLK_ADDR_1 0x890
-#define MAX_BLK_ADDR_1__VALUE 0xffff
-
-#define MIN_MAX_BANK_1 0x8a0
-#define MIN_MAX_BANK_1__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_1__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_2 0x8b0
-#define PERM_SRC_ID_2__SRCID 0x00ff
-#define PERM_SRC_ID_2__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_2__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_2__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_2__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_2 0x8c0
-#define MIN_BLK_ADDR_2__VALUE 0xffff
-
-#define MAX_BLK_ADDR_2 0x8d0
-#define MAX_BLK_ADDR_2__VALUE 0xffff
-
-#define MIN_MAX_BANK_2 0x8e0
-#define MIN_MAX_BANK_2__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_2__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_3 0x8f0
-#define PERM_SRC_ID_3__SRCID 0x00ff
-#define PERM_SRC_ID_3__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_3__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_3__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_3__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_3 0x900
-#define MIN_BLK_ADDR_3__VALUE 0xffff
-
-#define MAX_BLK_ADDR_3 0x910
-#define MAX_BLK_ADDR_3__VALUE 0xffff
-
-#define MIN_MAX_BANK_3 0x920
-#define MIN_MAX_BANK_3__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_3__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_4 0x930
-#define PERM_SRC_ID_4__SRCID 0x00ff
-#define PERM_SRC_ID_4__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_4__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_4__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_4__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_4 0x940
-#define MIN_BLK_ADDR_4__VALUE 0xffff
-
-#define MAX_BLK_ADDR_4 0x950
-#define MAX_BLK_ADDR_4__VALUE 0xffff
-
-#define MIN_MAX_BANK_4 0x960
-#define MIN_MAX_BANK_4__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_4__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_5 0x970
-#define PERM_SRC_ID_5__SRCID 0x00ff
-#define PERM_SRC_ID_5__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_5__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_5__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_5__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_5 0x980
-#define MIN_BLK_ADDR_5__VALUE 0xffff
-
-#define MAX_BLK_ADDR_5 0x990
-#define MAX_BLK_ADDR_5__VALUE 0xffff
-
-#define MIN_MAX_BANK_5 0x9a0
-#define MIN_MAX_BANK_5__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_5__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_6 0x9b0
-#define PERM_SRC_ID_6__SRCID 0x00ff
-#define PERM_SRC_ID_6__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_6__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_6__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_6__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_6 0x9c0
-#define MIN_BLK_ADDR_6__VALUE 0xffff
-
-#define MAX_BLK_ADDR_6 0x9d0
-#define MAX_BLK_ADDR_6__VALUE 0xffff
-
-#define MIN_MAX_BANK_6 0x9e0
-#define MIN_MAX_BANK_6__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_6__MAX_VALUE 0x000c
-
-#define PERM_SRC_ID_7 0x9f0
-#define PERM_SRC_ID_7__SRCID 0x00ff
-#define PERM_SRC_ID_7__DIRECT_ACCESS_ACTIVE 0x0800
-#define PERM_SRC_ID_7__WRITE_ACTIVE 0x2000
-#define PERM_SRC_ID_7__READ_ACTIVE 0x4000
-#define PERM_SRC_ID_7__PARTITION_VALID 0x8000
-
-#define MIN_BLK_ADDR_7 0xa00
-#define MIN_BLK_ADDR_7__VALUE 0xffff
-
-#define MAX_BLK_ADDR_7 0xa10
-#define MAX_BLK_ADDR_7__VALUE 0xffff
-
-#define MIN_MAX_BANK_7 0xa20
-#define MIN_MAX_BANK_7__MIN_VALUE 0x0003
-#define MIN_MAX_BANK_7__MAX_VALUE 0x000c
diff --git a/drivers/staging/spectra/spectraswconfig.h b/drivers/staging/spectra/spectraswconfig.h
deleted file mode 100644
index 1725946..0000000
--- a/drivers/staging/spectra/spectraswconfig.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * NAND Flash Controller Device Driver
- * Copyright (c) 2009, Intel Corporation and its suppliers.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef _SPECTRASWCONFIG_
-#define _SPECTRASWCONFIG_
-
-/* NAND driver version */
-#define GLOB_VERSION "driver version 20100311"
-
-
-/***** Common Parameters *****/
-#define RETRY_TIMES 3
-
-#define READ_BADBLOCK_INFO 1
-#define READBACK_VERIFY 0
-#define AUTO_FORMAT_FLASH 0
-
-/***** Cache Parameters *****/
-#define CACHE_ITEM_NUM 128
-#define BLK_NUM_FOR_L2_CACHE 16
-
-/***** Block Table Parameters *****/
-#define BLOCK_TABLE_INDEX 0
-
-/***** Wear Leveling Parameters *****/
-#define WEAR_LEVELING_GATE 0x10
-#define WEAR_LEVELING_BLOCK_NUM 10
-
-#define DEBUG_BNDRY 0
-
-/***** Product Feature Support *****/
-#define FLASH_EMU defined(CONFIG_SPECTRA_EMU)
-#define FLASH_NAND defined(CONFIG_SPECTRA_MRST_HW)
-#define FLASH_MTD defined(CONFIG_SPECTRA_MTD)
-#define CMD_DMA defined(CONFIG_SPECTRA_MRST_HW_DMA)
-
-#define SPECTRA_PARTITION_ID 0
-
-/* Enable this macro if the number of flash blocks is larger than 16K. */
-#define SUPPORT_LARGE_BLOCKNUM 1
-
-/**** Block Table and Reserved Block Parameters *****/
-#define SPECTRA_START_BLOCK 3
-//#define NUM_FREE_BLOCKS_GATE 30
-#define NUM_FREE_BLOCKS_GATE 60
-
-/**** Hardware Parameters ****/
-#define GLOB_HWCTL_REG_BASE 0xFFA40000
-#define GLOB_HWCTL_REG_SIZE 4096
-
-#define GLOB_HWCTL_MEM_BASE 0xFFA48000
-#define GLOB_HWCTL_MEM_SIZE 4096
-
-/* KBV - Updated to LNW scratch register address */
-#define SCRATCH_REG_ADDR 0xFF108018
-#define SCRATCH_REG_SIZE 64
-
-#define GLOB_HWCTL_DEFAULT_BLKS 2048
-
-#define SUPPORT_15BITECC 1
-#define SUPPORT_8BITECC 1
-
-#define ONFI_BLOOM_TIME 0
-#define MODE5_WORKAROUND 1
-
-#endif /*_SPECTRASWCONFIG_*/
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index 93de4f2..21a559e 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -78,7 +78,7 @@ config TIDSPBRIDGE_NTFY_PWRERR
bool "Notify power errors"
depends on TIDSPBRIDGE
help
- Enable notifications to registered clients on the event of power errror
+ Enable notifications to registered clients on the event of power error
trying to suspend bridge driver. Say Y, to signal this event as a fatal
error, this will require a bridge restart to recover.
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index e1c4492..dde559d 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -1046,8 +1046,6 @@ static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
/* Free the driver's device context: */
kfree(drv_datap->base_img);
- kfree(drv_datap);
- dev_set_drvdata(bridge, NULL);
kfree((void *)dev_ctxt);
return status;
}
diff --git a/drivers/staging/tidspbridge/rmgr/dbdcd.c b/drivers/staging/tidspbridge/rmgr/dbdcd.c
index a7e407e..fda2402 100644
--- a/drivers/staging/tidspbridge/rmgr/dbdcd.c
+++ b/drivers/staging/tidspbridge/rmgr/dbdcd.c
@@ -285,7 +285,7 @@ int dcd_enumerate_object(s32 index, enum dsp_dcdobjtype obj_type,
enum_refs = 0;
/*
- * TODO: Revisit, this is not an errror case but code
+ * TODO: Revisit, this is not an error case but code
* expects non-zero value.
*/
status = ENODATA;
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 76cfc6e..385740b 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -410,6 +410,9 @@ static int __devexit omap34_xx_bridge_remove(struct platform_device *pdev)
DBC_ASSERT(ret == true);
}
+ kfree(drv_datap);
+ dev_set_drvdata(bridge, NULL);
+
func_cont:
mem_ext_phys_pool_release();
@@ -500,35 +503,42 @@ static int bridge_open(struct inode *ip, struct file *filp)
}
#endif
pr_ctxt = kzalloc(sizeof(struct process_context), GFP_KERNEL);
- if (pr_ctxt) {
- pr_ctxt->res_state = PROC_RES_ALLOCATED;
- spin_lock_init(&pr_ctxt->dmm_map_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
- spin_lock_init(&pr_ctxt->dmm_rsv_lock);
- INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
-
- pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (pr_ctxt->node_id) {
- idr_init(pr_ctxt->node_id);
- } else {
- status = -ENOMEM;
- goto err;
- }
+ if (!pr_ctxt)
+ return -ENOMEM;
+
+ pr_ctxt->res_state = PROC_RES_ALLOCATED;
+ spin_lock_init(&pr_ctxt->dmm_map_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
+ spin_lock_init(&pr_ctxt->dmm_rsv_lock);
+ INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
- pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
- if (pr_ctxt->stream_id)
- idr_init(pr_ctxt->stream_id);
- else
- status = -ENOMEM;
- } else {
+ pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (!pr_ctxt->node_id) {
status = -ENOMEM;
+ goto err1;
}
-err:
+
+ idr_init(pr_ctxt->node_id);
+
+ pr_ctxt->stream_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
+ if (!pr_ctxt->stream_id) {
+ status = -ENOMEM;
+ goto err2;
+ }
+
+ idr_init(pr_ctxt->stream_id);
+
filp->private_data = pr_ctxt;
+
#ifdef CONFIG_TIDSPBRIDGE_RECOVERY
- if (!status)
- atomic_inc(&bridge_cref);
+ atomic_inc(&bridge_cref);
#endif
+ return 0;
+
+err2:
+ kfree(pr_ctxt->node_id);
+err1:
+ kfree(pr_ctxt);
return status;
}
@@ -550,6 +560,8 @@ static int bridge_release(struct inode *ip, struct file *filp)
flush_signals(current);
drv_remove_all_resources(pr_ctxt);
proc_detach(pr_ctxt);
+ kfree(pr_ctxt->node_id);
+ kfree(pr_ctxt->stream_id);
kfree(pr_ctxt);
filp->private_data = NULL;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 55c0b51..03420e2 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -109,11 +109,6 @@ static ssize_t store_sockfd(struct device *dev, struct device_attribute *attr,
spin_unlock(&sdev->ud.lock);
return -EINVAL;
}
-#if 0
- setnodelay(socket);
- setkeepalive(socket);
- setreuse(socket);
-#endif
sdev->ud.tcp_socket = socket;
spin_unlock(&sdev->ud.lock);
diff --git a/drivers/staging/usbip/stub_main.c b/drivers/staging/usbip/stub_main.c
index 2d63178..705a9e5 100644
--- a/drivers/staging/usbip/stub_main.c
+++ b/drivers/staging/usbip/stub_main.c
@@ -246,8 +246,9 @@ static int __init usbip_host_init(void)
{
int ret;
- stub_priv_cache = KMEM_CACHE(stub_priv, SLAB_HWCACHE_ALIGN);
+ init_busid_table();
+ stub_priv_cache = KMEM_CACHE(stub_priv, SLAB_HWCACHE_ALIGN);
if (!stub_priv_cache) {
pr_err("kmem_cache_create failed\n");
return -ENOMEM;
@@ -266,7 +267,6 @@ static int __init usbip_host_init(void)
goto err_create_file;
}
- init_busid_table();
pr_info(DRIVER_DESC " v" USBIP_VERSION "\n");
return ret;
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index 6b4e3e1..27ac363 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -564,7 +564,7 @@ static void stub_rx_pdu(struct usbip_device *ud)
memset(&pdu, 0, sizeof(pdu));
/* 1. receive a pdu header */
- ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret != sizeof(pdu)) {
dev_err(dev, "recv a header, %d\n", ret);
usbip_event_add(ud, SDEV_EVENT_ERROR_TCP);
diff --git a/drivers/staging/usbip/usbip_common.c b/drivers/staging/usbip/usbip_common.c
index 3b7a847..d93e7f1 100644
--- a/drivers/staging/usbip/usbip_common.c
+++ b/drivers/staging/usbip/usbip_common.c
@@ -334,9 +334,8 @@ void usbip_dump_header(struct usbip_header *pdu)
}
EXPORT_SYMBOL_GPL(usbip_dump_header);
-/* Send/receive messages over TCP/IP. I refer drivers/block/nbd.c */
-int usbip_xmit(int send, struct socket *sock, char *buf, int size,
- int msg_flags)
+/* Receive data over TCP/IP. */
+int usbip_recv(struct socket *sock, void *buf, int size)
{
int result;
struct msghdr msg;
@@ -355,19 +354,6 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
return -EINVAL;
}
- if (usbip_dbg_flag_xmit) {
- if (send) {
- if (!in_interrupt())
- pr_debug("%-10s:", current->comm);
- else
- pr_debug("interrupt :");
-
- pr_debug("sending... , sock %p, buf %p, size %d, "
- "msg_flags %d\n", sock, buf, size, msg_flags);
- usbip_dump_buffer(buf, size);
- }
- }
-
do {
sock->sk->sk_allocation = GFP_NOIO;
iov.iov_base = buf;
@@ -377,42 +363,30 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
msg.msg_control = NULL;
msg.msg_controllen = 0;
msg.msg_namelen = 0;
- msg.msg_flags = msg_flags | MSG_NOSIGNAL;
-
- if (send)
- result = kernel_sendmsg(sock, &msg, &iov, 1, size);
- else
- result = kernel_recvmsg(sock, &msg, &iov, 1, size,
- MSG_WAITALL);
+ msg.msg_flags = MSG_NOSIGNAL;
+ result = kernel_recvmsg(sock, &msg, &iov, 1, size, MSG_WAITALL);
if (result <= 0) {
- pr_debug("%s sock %p buf %p size %u ret %d total %d\n",
- send ? "send" : "receive", sock, buf, size,
- result, total);
+ pr_debug("receive sock %p buf %p size %u ret %d total %d\n",
+ sock, buf, size, result, total);
goto err;
}
size -= result;
buf += result;
total += result;
-
} while (size > 0);
if (usbip_dbg_flag_xmit) {
- if (!send) {
- if (!in_interrupt())
- pr_debug("%-10s:", current->comm);
- else
- pr_debug("interrupt :");
-
- pr_debug("receiving....\n");
- usbip_dump_buffer(bp, osize);
- pr_debug("received, osize %d ret %d size %d total %d\n",
- osize, result, size, total);
- }
+ if (!in_interrupt())
+ pr_debug("%-10s:", current->comm);
+ else
+ pr_debug("interrupt :");
- if (send)
- pr_debug("send, total %d\n", total);
+ pr_debug("receiving....\n");
+ usbip_dump_buffer(bp, osize);
+ pr_debug("received, osize %d ret %d size %d total %d\n",
+ osize, result, size, total);
}
return total;
@@ -420,7 +394,7 @@ int usbip_xmit(int send, struct socket *sock, char *buf, int size,
err:
return result;
}
-EXPORT_SYMBOL_GPL(usbip_xmit);
+EXPORT_SYMBOL_GPL(usbip_recv);
struct socket *sockfd_to_socket(unsigned int sockfd)
{
@@ -712,7 +686,7 @@ int usbip_recv_iso(struct usbip_device *ud, struct urb *urb)
if (!buff)
return -ENOMEM;
- ret = usbip_xmit(0, ud->tcp_socket, buff, size, 0);
+ ret = usbip_recv(ud->tcp_socket, buff, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv iso_frame_descriptor, %d\n",
ret);
@@ -823,8 +797,7 @@ int usbip_recv_xbuff(struct usbip_device *ud, struct urb *urb)
if (!(size > 0))
return 0;
- ret = usbip_xmit(0, ud->tcp_socket, (char *)urb->transfer_buffer,
- size, 0);
+ ret = usbip_recv(ud->tcp_socket, urb->transfer_buffer, size);
if (ret != size) {
dev_err(&urb->dev->dev, "recv xbuf, %d\n", ret);
if (ud->side == USBIP_STUB) {
diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
index be21617..b8f8c48 100644
--- a/drivers/staging/usbip/usbip_common.h
+++ b/drivers/staging/usbip/usbip_common.h
@@ -292,21 +292,11 @@ struct usbip_device {
} eh_ops;
};
-#if 0
-int usbip_sendmsg(struct socket *, struct msghdr *, int);
-int set_sockaddr(struct socket *socket, struct sockaddr_storage *ss);
-int setnodelay(struct socket *);
-int setquickack(struct socket *);
-int setkeepalive(struct socket *socket);
-void setreuse(struct socket *);
-#endif
-
/* usbip_common.c */
void usbip_dump_urb(struct urb *purb);
void usbip_dump_header(struct usbip_header *pdu);
-int usbip_xmit(int send, struct socket *sock, char *buf, int size,
- int msg_flags);
+int usbip_recv(struct socket *sock, void *buf, int size);
struct socket *sockfd_to_socket(unsigned int sockfd);
void usbip_pack_pdu(struct usbip_header *pdu, struct urb *urb, int cmd,
@@ -337,9 +327,4 @@ static inline int interface_to_devnum(struct usb_interface *interface)
return udev->devnum;
}
-static inline int interface_to_infnum(struct usb_interface *interface)
-{
- return interface->cur_altsetting->desc.bInterfaceNumber;
-}
-
#endif /* __USBIP_COMMON_H */
diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
index 3872b8c..3f511b4 100644
--- a/drivers/staging/usbip/vhci_rx.c
+++ b/drivers/staging/usbip/vhci_rx.c
@@ -206,7 +206,7 @@ static void vhci_rx_pdu(struct usbip_device *ud)
memset(&pdu, 0, sizeof(pdu));
/* 1. receive a pdu header */
- ret = usbip_xmit(0, ud->tcp_socket, (char *) &pdu, sizeof(pdu), 0);
+ ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
if (ret < 0) {
if (ret == -ECONNRESET)
pr_info("connection reset by peer\n");
diff --git a/drivers/staging/vme/TODO b/drivers/staging/vme/TODO
index 82c222b..79f0033 100644
--- a/drivers/staging/vme/TODO
+++ b/drivers/staging/vme/TODO
@@ -1,70 +1,5 @@
TODO
====
-API
-===
-
-Master window broadcast select mask
------------------------------------
-
-API currently provides no method to set or get Broadcast Select mask. Suggest
-somthing like:
-
- int vme_master_bmsk_set (struct vme_resource *res, int mask);
- int vme_master_bmsk_get (struct vme_resource *res, int *mask);
-
-
-Interrupt Generation
---------------------
-
-Add optional timeout when waiting for an IACK.
-
-
-CR/CSR Buffer
--------------
-
-The VME API provides no functions to access the buffer mapped into the CR/CSR
-space.
-
-
-Mailboxes
----------
-
-Whilst not part of the VME specification, they are provided by a number of
-chips. They are currently not supported at all by the API.
-
-
-Core
-====
-
-- Improve generic sanity checks (Such as does an offset and size fit within a
- window and parameter checking).
-
-Bridge Support
-==============
-
-Tempe (tsi148)
---------------
-
-- 2eSST Broadcast mode.
-- Mailboxes unsupported.
-- Improve error detection.
-- Control of prefetch size, threshold.
-- Arbiter control
-- Requestor control
-
-Universe II (ca91c142)
-----------------------
-
-- Mailboxes unsupported.
-- Error Detection.
-- Control of prefetch size, threshold.
-- Arbiter control
-- Requestor control
-- Slot detection
-
-Universe I (ca91x042)
----------------------
-
-Currently completely unsupported.
+- Add one or more device drivers which use the VME framework.
diff --git a/drivers/staging/vme/bridges/vme_ca91cx42.c b/drivers/staging/vme/bridges/vme_ca91cx42.c
index 0e4feac..515b8b8 100644
--- a/drivers/staging/vme/bridges/vme_ca91cx42.c
+++ b/drivers/staging/vme/bridges/vme_ca91cx42.c
@@ -338,7 +338,7 @@ static int ca91cx42_irq_generate(struct vme_bridge *ca91cx42_bridge, int level,
static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity;
unsigned int temp_ctl = 0;
@@ -444,7 +444,7 @@ static int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned long long vme_bound, pci_offset;
@@ -595,8 +595,8 @@ static void ca91cx42_free_resource(struct vme_master_resource *image)
static int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i, granularity = 0;
@@ -753,7 +753,7 @@ err_window:
static int __ca91cx42_master_get(struct vme_master_resource *image,
int *enabled, unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ u32 *aspace, u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned long long pci_base, pci_bound, vme_offset;
@@ -839,8 +839,8 @@ static int __ca91cx42_master_get(struct vme_master_resource *image,
}
static int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
int retval;
@@ -876,13 +876,13 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
* maximal configured data cycle is used and splits it
* automatically for non-aligned addresses.
*/
- if ((int)addr & 0x1) {
+ if ((uintptr_t)addr & 0x1) {
*(u8 *)buf = ioread8(addr);
done += 1;
if (done == count)
goto out;
}
- if ((int)addr & 0x2) {
+ if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -930,13 +930,13 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
/* Here we apply for the same strategy we do in master_read
* function in order to assure D16 cycle when required.
*/
- if ((int)addr & 0x1) {
+ if ((uintptr_t)addr & 0x1) {
iowrite8(*(u8 *)buf, addr);
done += 1;
if (done == count)
goto out;
}
- if ((int)addr & 0x2) {
+ if ((uintptr_t)addr & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
@@ -973,7 +973,8 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
unsigned int mask, unsigned int compare, unsigned int swap,
loff_t offset)
{
- u32 pci_addr, result;
+ u32 result;
+ uintptr_t pci_addr;
int i;
struct ca91cx42_driver *bridge;
struct device *dev;
@@ -990,7 +991,7 @@ static unsigned int ca91cx42_master_rmw(struct vme_master_resource *image,
/* Lock image */
spin_lock(&image->lock);
- pci_addr = (u32)image->kern_base + offset;
+ pci_addr = (uintptr_t)image->kern_base + offset;
/* Address must be 4-byte aligned */
if (pci_addr & 0x3) {
@@ -1291,7 +1292,7 @@ static int ca91cx42_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int ca91cx42_lm_set(struct vme_lm_resource *lm,
- unsigned long long lm_base, vme_address_t aspace, vme_cycle_t cycle)
+ unsigned long long lm_base, u32 aspace, u32 cycle)
{
u32 temp_base, lm_ctl = 0;
int i;
@@ -1359,7 +1360,7 @@ static int ca91cx42_lm_set(struct vme_lm_resource *lm,
* or disabled.
*/
static int ca91cx42_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_ctl, enabled = 0;
struct ca91cx42_driver *bridge;
diff --git a/drivers/staging/vme/bridges/vme_tsi148.c b/drivers/staging/vme/bridges/vme_tsi148.c
index 6c1167c..f505821 100644
--- a/drivers/staging/vme/bridges/vme_tsi148.c
+++ b/drivers/staging/vme/bridges/vme_tsi148.c
@@ -41,7 +41,7 @@ static void __exit tsi148_exit(void);
/* Module parameter */
-static int err_chk;
+static bool err_chk;
static int geoid;
static const char driver_name[] = "vme_tsi148";
@@ -483,7 +483,7 @@ static int tsi148_irq_generate(struct vme_bridge *tsi148_bridge, int level,
* Find the first error in this address range
*/
static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
- vme_address_t aspace, unsigned long long address, size_t count)
+ u32 aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos;
struct vme_bus_error *vme_err, *valid = NULL;
@@ -517,7 +517,7 @@ static struct vme_bus_error *tsi148_find_error(struct vme_bridge *tsi148_bridge,
* Clear errors in the provided address range.
*/
static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
- vme_address_t aspace, unsigned long long address, size_t count)
+ u32 aspace, unsigned long long address, size_t count)
{
struct list_head *err_pos, *temp;
struct vme_bus_error *vme_err;
@@ -551,7 +551,7 @@ static void tsi148_clear_errors(struct vme_bridge *tsi148_bridge,
*/
static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t pci_base, u32 aspace, u32 cycle)
{
unsigned int i, addr = 0, granularity = 0;
unsigned int temp_ctl = 0;
@@ -701,7 +701,7 @@ static int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
*/
static int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *pci_base, u32 *aspace, u32 *cycle)
{
unsigned int i, granularity = 0, ctl = 0;
unsigned int vme_base_low, vme_base_high;
@@ -893,8 +893,8 @@ static void tsi148_free_resource(struct vme_master_resource *image)
* Set the attributes of an outbound window.
*/
static int tsi148_master_set(struct vme_master_resource *image, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
int retval = 0;
unsigned int i;
@@ -1129,8 +1129,8 @@ err_window:
* XXX Not parsing prefetch information.
*/
static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
unsigned int i, ctl;
unsigned int pci_base_low, pci_base_high;
@@ -1239,8 +1239,8 @@ static int __tsi148_master_get(struct vme_master_resource *image, int *enabled,
static int tsi148_master_get(struct vme_master_resource *image, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
int retval;
@@ -1259,9 +1259,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
{
int retval, enabled;
unsigned long long vme_base, size;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
@@ -1301,9 +1299,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
{
int retval = 0, enabled;
unsigned long long vme_base, size;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
struct vme_bus_error *vme_err = NULL;
struct vme_bridge *tsi148_bridge;
@@ -1420,7 +1416,7 @@ static unsigned int tsi148_master_rmw(struct vme_master_resource *image,
}
static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1514,7 +1510,7 @@ static int tsi148_dma_set_vme_src_attributes(struct device *dev, u32 *attr,
}
static int tsi148_dma_set_vme_dest_attributes(struct device *dev, u32 *attr,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
/* Setup 2eSST speeds */
switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
@@ -1886,7 +1882,7 @@ static int tsi148_dma_list_empty(struct vme_dma_list *list)
* callback is attached and disabled when the last callback is removed.
*/
static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
- vme_address_t aspace, vme_cycle_t cycle)
+ u32 aspace, u32 cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl = 0;
int i;
@@ -1953,7 +1949,7 @@ static int tsi148_lm_set(struct vme_lm_resource *lm, unsigned long long lm_base,
* or disabled.
*/
static int tsi148_lm_get(struct vme_lm_resource *lm,
- unsigned long long *lm_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ unsigned long long *lm_base, u32 *aspace, u32 *cycle)
{
u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
struct tsi148_driver *bridge;
diff --git a/drivers/staging/vme/devices/Kconfig b/drivers/staging/vme/devices/Kconfig
index ca5ba89..55ec30c 100644
--- a/drivers/staging/vme/devices/Kconfig
+++ b/drivers/staging/vme/devices/Kconfig
@@ -6,3 +6,16 @@ config VME_USER
If you say Y here you want to be able to access a limited number of
VME windows in a manner at least semi-compatible with the interface
provided with the original driver at http://vmelinux.org/.
+
+config VME_PIO2
+ tristate "GE PIO2 VME"
+ depends on GPIOLIB
+ help
+ Say Y here to include support for the GE PIO2. The PIO2 is a 6U VME
+ slave card, implementing 32 solid-state relay switched IO lines, in
+ 4 groups of 8. Each bank of IO lines is built to function as input,
+ output or both depending on the variant of the card.
+
+ To compile this driver as a module, choose M here. The module will
+ be called vme_pio2. If unsure, say N.
+
diff --git a/drivers/staging/vme/devices/Makefile b/drivers/staging/vme/devices/Makefile
index 459742a..172512c 100644
--- a/drivers/staging/vme/devices/Makefile
+++ b/drivers/staging/vme/devices/Makefile
@@ -3,3 +3,6 @@
#
obj-$(CONFIG_VME_USER) += vme_user.o
+
+vme_pio2-objs := vme_pio2_cntr.o vme_pio2_gpio.o vme_pio2_core.o
+obj-$(CONFIG_VME_PIO2) += vme_pio2.o
diff --git a/drivers/staging/vme/devices/vme_pio2.h b/drivers/staging/vme/devices/vme_pio2.h
new file mode 100644
index 0000000..3c59313
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2.h
@@ -0,0 +1,249 @@
+#ifndef _VME_PIO2_H_
+#define _VME_PIO2_H_
+
+#define PIO2_CARDS_MAX 32
+
+#define PIO2_VARIANT_LENGTH 5
+
+#define PIO2_NUM_CHANNELS 32
+#define PIO2_NUM_IRQS 11
+#define PIO2_NUM_CNTRS 6
+
+#define PIO2_REGS_SIZE 0x40
+
+#define PIO2_REGS_DATA0 0x0
+#define PIO2_REGS_DATA1 0x1
+#define PIO2_REGS_DATA2 0x2
+#define PIO2_REGS_DATA3 0x3
+
+static const int PIO2_REGS_DATA[4] = { PIO2_REGS_DATA0, PIO2_REGS_DATA1,
+ PIO2_REGS_DATA2, PIO2_REGS_DATA3 };
+
+#define PIO2_REGS_INT_STAT0 0x8
+#define PIO2_REGS_INT_STAT1 0x9
+#define PIO2_REGS_INT_STAT2 0xa
+#define PIO2_REGS_INT_STAT3 0xb
+
+static const int PIO2_REGS_INT_STAT[4] = { PIO2_REGS_INT_STAT0,
+ PIO2_REGS_INT_STAT1,
+ PIO2_REGS_INT_STAT2,
+ PIO2_REGS_INT_STAT3 };
+
+#define PIO2_REGS_INT_STAT_CNTR 0xc
+#define PIO2_REGS_INT_MASK0 0x10
+#define PIO2_REGS_INT_MASK1 0x11
+#define PIO2_REGS_INT_MASK2 0x12
+#define PIO2_REGS_INT_MASK3 0x13
+#define PIO2_REGS_INT_MASK4 0x14
+#define PIO2_REGS_INT_MASK5 0x15
+#define PIO2_REGS_INT_MASK6 0x16
+#define PIO2_REGS_INT_MASK7 0x17
+
+static const int PIO2_REGS_INT_MASK[8] = { PIO2_REGS_INT_MASK0,
+ PIO2_REGS_INT_MASK1,
+ PIO2_REGS_INT_MASK2,
+ PIO2_REGS_INT_MASK3,
+ PIO2_REGS_INT_MASK4,
+ PIO2_REGS_INT_MASK5,
+ PIO2_REGS_INT_MASK6,
+ PIO2_REGS_INT_MASK7 };
+
+
+
+#define PIO2_REGS_CTRL 0x18
+#define PIO2_REGS_VME_VECTOR 0x19
+#define PIO2_REGS_CNTR0 0x20
+#define PIO2_REGS_CNTR1 0x22
+#define PIO2_REGS_CNTR2 0x24
+#define PIO2_REGS_CTRL_WRD0 0x26
+#define PIO2_REGS_CNTR3 0x28
+#define PIO2_REGS_CNTR4 0x2a
+#define PIO2_REGS_CNTR5 0x2c
+#define PIO2_REGS_CTRL_WRD1 0x2e
+
+#define PIO2_REGS_ID 0x30
+
+
+/* PIO2_REGS_DATAx (0x0 - 0x3) */
+
+static const int PIO2_CHANNEL_BANK[32] = { 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 1,
+ 2, 2, 2, 2, 2, 2, 2, 2,
+ 3, 3, 3, 3, 3, 3, 3, 3 };
+
+#define PIO2_CHANNEL0_BIT (1 << 0)
+#define PIO2_CHANNEL1_BIT (1 << 1)
+#define PIO2_CHANNEL2_BIT (1 << 2)
+#define PIO2_CHANNEL3_BIT (1 << 3)
+#define PIO2_CHANNEL4_BIT (1 << 4)
+#define PIO2_CHANNEL5_BIT (1 << 5)
+#define PIO2_CHANNEL6_BIT (1 << 6)
+#define PIO2_CHANNEL7_BIT (1 << 7)
+#define PIO2_CHANNEL8_BIT (1 << 0)
+#define PIO2_CHANNEL9_BIT (1 << 1)
+#define PIO2_CHANNEL10_BIT (1 << 2)
+#define PIO2_CHANNEL11_BIT (1 << 3)
+#define PIO2_CHANNEL12_BIT (1 << 4)
+#define PIO2_CHANNEL13_BIT (1 << 5)
+#define PIO2_CHANNEL14_BIT (1 << 6)
+#define PIO2_CHANNEL15_BIT (1 << 7)
+#define PIO2_CHANNEL16_BIT (1 << 0)
+#define PIO2_CHANNEL17_BIT (1 << 1)
+#define PIO2_CHANNEL18_BIT (1 << 2)
+#define PIO2_CHANNEL19_BIT (1 << 3)
+#define PIO2_CHANNEL20_BIT (1 << 4)
+#define PIO2_CHANNEL21_BIT (1 << 5)
+#define PIO2_CHANNEL22_BIT (1 << 6)
+#define PIO2_CHANNEL23_BIT (1 << 7)
+#define PIO2_CHANNEL24_BIT (1 << 0)
+#define PIO2_CHANNEL25_BIT (1 << 1)
+#define PIO2_CHANNEL26_BIT (1 << 2)
+#define PIO2_CHANNEL27_BIT (1 << 3)
+#define PIO2_CHANNEL28_BIT (1 << 4)
+#define PIO2_CHANNEL29_BIT (1 << 5)
+#define PIO2_CHANNEL30_BIT (1 << 6)
+#define PIO2_CHANNEL31_BIT (1 << 7)
+
+static const int PIO2_CHANNEL_BIT[32] = { PIO2_CHANNEL0_BIT, PIO2_CHANNEL1_BIT,
+ PIO2_CHANNEL2_BIT, PIO2_CHANNEL3_BIT,
+ PIO2_CHANNEL4_BIT, PIO2_CHANNEL5_BIT,
+ PIO2_CHANNEL6_BIT, PIO2_CHANNEL7_BIT,
+ PIO2_CHANNEL8_BIT, PIO2_CHANNEL9_BIT,
+ PIO2_CHANNEL10_BIT, PIO2_CHANNEL11_BIT,
+ PIO2_CHANNEL12_BIT, PIO2_CHANNEL13_BIT,
+ PIO2_CHANNEL14_BIT, PIO2_CHANNEL15_BIT,
+ PIO2_CHANNEL16_BIT, PIO2_CHANNEL17_BIT,
+ PIO2_CHANNEL18_BIT, PIO2_CHANNEL19_BIT,
+ PIO2_CHANNEL20_BIT, PIO2_CHANNEL21_BIT,
+ PIO2_CHANNEL22_BIT, PIO2_CHANNEL23_BIT,
+ PIO2_CHANNEL24_BIT, PIO2_CHANNEL25_BIT,
+ PIO2_CHANNEL26_BIT, PIO2_CHANNEL27_BIT,
+ PIO2_CHANNEL28_BIT, PIO2_CHANNEL29_BIT,
+ PIO2_CHANNEL30_BIT, PIO2_CHANNEL31_BIT
+ };
+
+/* PIO2_REGS_INT_STAT_CNTR (0xc) */
+#define PIO2_COUNTER0 (1 << 0)
+#define PIO2_COUNTER1 (1 << 1)
+#define PIO2_COUNTER2 (1 << 2)
+#define PIO2_COUNTER3 (1 << 3)
+#define PIO2_COUNTER4 (1 << 4)
+#define PIO2_COUNTER5 (1 << 5)
+
+static const int PIO2_COUNTER[6] = { PIO2_COUNTER0, PIO2_COUNTER1,
+ PIO2_COUNTER2, PIO2_COUNTER3,
+ PIO2_COUNTER4, PIO2_COUNTER5 };
+
+/* PIO2_REGS_CTRL (0x18) */
+#define PIO2_VME_INT_MASK 0x7
+#define PIO2_LED (1 << 6)
+#define PIO2_LOOP (1 << 7)
+
+/* PIO2_REGS_VME_VECTOR (0x19) */
+#define PIO2_VME_VECTOR_SPUR 0x0
+#define PIO2_VME_VECTOR_BANK0 0x1
+#define PIO2_VME_VECTOR_BANK1 0x2
+#define PIO2_VME_VECTOR_BANK2 0x3
+#define PIO2_VME_VECTOR_BANK3 0x4
+#define PIO2_VME_VECTOR_CNTR0 0x5
+#define PIO2_VME_VECTOR_CNTR1 0x6
+#define PIO2_VME_VECTOR_CNTR2 0x7
+#define PIO2_VME_VECTOR_CNTR3 0x8
+#define PIO2_VME_VECTOR_CNTR4 0x9
+#define PIO2_VME_VECTOR_CNTR5 0xa
+
+#define PIO2_VME_VECTOR_MASK 0xf0
+
+static const int PIO2_VECTOR_BANK[4] = { PIO2_VME_VECTOR_BANK0,
+ PIO2_VME_VECTOR_BANK1,
+ PIO2_VME_VECTOR_BANK2,
+ PIO2_VME_VECTOR_BANK3 };
+
+static const int PIO2_VECTOR_CNTR[6] = { PIO2_VME_VECTOR_CNTR0,
+ PIO2_VME_VECTOR_CNTR1,
+ PIO2_VME_VECTOR_CNTR2,
+ PIO2_VME_VECTOR_CNTR3,
+ PIO2_VME_VECTOR_CNTR4,
+ PIO2_VME_VECTOR_CNTR5 };
+
+/* PIO2_REGS_CNTRx (0x20 - 0x24 & 0x28 - 0x2c) */
+
+static const int PIO2_CNTR_DATA[6] = { PIO2_REGS_CNTR0, PIO2_REGS_CNTR1,
+ PIO2_REGS_CNTR2, PIO2_REGS_CNTR3,
+ PIO2_REGS_CNTR4, PIO2_REGS_CNTR5 };
+
+/* PIO2_REGS_CTRL_WRDx (0x26 & 0x2e) */
+
+static const int PIO2_CNTR_CTRL[6] = { PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD0,
+ PIO2_REGS_CTRL_WRD1,
+ PIO2_REGS_CTRL_WRD1,
+ PIO2_REGS_CTRL_WRD1 };
+
+#define PIO2_CNTR_SC_DEV0 0
+#define PIO2_CNTR_SC_DEV1 (1 << 6)
+#define PIO2_CNTR_SC_DEV2 (2 << 6)
+#define PIO2_CNTR_SC_RDBACK (3 << 6)
+
+static const int PIO2_CNTR_SC_DEV[6] = { PIO2_CNTR_SC_DEV0, PIO2_CNTR_SC_DEV1,
+ PIO2_CNTR_SC_DEV2, PIO2_CNTR_SC_DEV0,
+ PIO2_CNTR_SC_DEV1, PIO2_CNTR_SC_DEV2 };
+
+#define PIO2_CNTR_RW_LATCH 0
+#define PIO2_CNTR_RW_LSB (1 << 4)
+#define PIO2_CNTR_RW_MSB (2 << 4)
+#define PIO2_CNTR_RW_BOTH (3 << 4)
+
+#define PIO2_CNTR_MODE0 0
+#define PIO2_CNTR_MODE1 (1 << 1)
+#define PIO2_CNTR_MODE2 (2 << 1)
+#define PIO2_CNTR_MODE3 (3 << 1)
+#define PIO2_CNTR_MODE4 (4 << 1)
+#define PIO2_CNTR_MODE5 (5 << 1)
+
+#define PIO2_CNTR_BCD 1
+
+
+
+enum pio2_bank_config { NOFIT, INPUT, OUTPUT, BOTH };
+enum pio2_int_config { NONE = 0, LOW2HIGH = 1, HIGH2LOW = 2, EITHER = 4 };
+
+/* Bank configuration structure */
+struct pio2_io_bank {
+ enum pio2_bank_config config;
+ u8 value;
+ enum pio2_int_config irq[8];
+};
+
+/* Counter configuration structure */
+struct pio2_cntr {
+ int mode;
+ int count;
+};
+
+struct pio2_card {
+ int id;
+ int bus;
+ long base;
+ int irq_vector;
+ int irq_level;
+ char variant[6];
+ int led;
+
+ struct vme_dev *vdev;
+ struct vme_resource *window;
+
+ struct gpio_chip gc;
+ struct pio2_io_bank bank[4];
+
+ struct pio2_cntr cntr[6];
+};
+
+int pio2_cntr_reset(struct pio2_card *);
+
+int pio2_gpio_reset(struct pio2_card *);
+int __init pio2_gpio_init(struct pio2_card *);
+void __exit pio2_gpio_exit(struct pio2_card *);
+
+#endif /* _VME_PIO2_H_ */
diff --git a/drivers/staging/vme/devices/vme_pio2_cntr.c b/drivers/staging/vme/devices/vme_pio2_cntr.c
new file mode 100644
index 0000000..08e0d59
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_cntr.c
@@ -0,0 +1,71 @@
+/*
+ * GE PIO2 Counter Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * The PIO-2 has 6 counters, currently this code just disables the interrupts
+ * and leaves them alone.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/types.h>
+#include <linux/gpio.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+static int pio2_cntr_irq_set(struct pio2_card *card, int id)
+{
+ int retval;
+ u8 data;
+
+ data = PIO2_CNTR_SC_DEV[id] | PIO2_CNTR_RW_BOTH | card->cntr[id].mode;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_CTRL[id]);
+ if (retval < 0)
+ return retval;
+
+ data = card->cntr[id].count & 0xFF;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
+ if (retval < 0)
+ return retval;
+
+ data = (card->cntr[id].count >> 8) & 0xFF;
+ retval = vme_master_write(card->window, &data, 1, PIO2_CNTR_DATA[id]);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+int pio2_cntr_reset(struct pio2_card *card)
+{
+ int i, retval = 0;
+ u8 reg;
+
+ /* Clear down all timers */
+ for (i = 0; i < 6; i++) {
+ card->cntr[i].mode = PIO2_CNTR_MODE5;
+ card->cntr[i].count = 0;
+ retval = pio2_cntr_irq_set(card, i);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Ensure all counter interrupts are cleared */
+ do {
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_INT_STAT_CNTR);
+ if (retval < 0)
+ return retval;
+ } while (reg != 0);
+
+ return retval;
+}
+
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
new file mode 100644
index 0000000..9fedc44
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -0,0 +1,524 @@
+/*
+ * GE PIO2 6U VME I/O Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/ctype.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+
+static const char driver_name[] = "pio2";
+
+static int bus[PIO2_CARDS_MAX];
+static int bus_num;
+static long base[PIO2_CARDS_MAX];
+static int base_num;
+static int vector[PIO2_CARDS_MAX];
+static int vector_num;
+static int level[PIO2_CARDS_MAX];
+static int level_num;
+static const char *variant[PIO2_CARDS_MAX];
+static int variant_num;
+
+static int loopback;
+
+static int pio2_match(struct vme_dev *);
+static int __devinit pio2_probe(struct vme_dev *);
+static int __devexit pio2_remove(struct vme_dev *);
+
+static int pio2_get_led(struct pio2_card *card)
+{
+ /* Can't read hardware, state saved in structure */
+ return card->led;
+}
+
+static int pio2_set_led(struct pio2_card *card, int state)
+{
+ u8 reg;
+ int retval;
+
+ reg = card->irq_level;
+
+ /* Register state inverse of led state */
+ if (!state)
+ reg |= PIO2_LED;
+
+ if (loopback)
+ reg |= PIO2_LOOP;
+
+ retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ card->led = state ? 1 : 0;
+
+ return 0;
+}
+
+static void pio2_int(int level, int vector, void *ptr)
+{
+ int vec, i, channel, retval;
+ u8 reg;
+ struct pio2_card *card = ptr;
+
+ vec = vector & ~PIO2_VME_VECTOR_MASK;
+
+ switch (vec) {
+ case 0:
+ dev_warn(&card->vdev->dev, "Spurious Interrupt\n");
+ break;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ /* Channels 0 to 7 */
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_INT_STAT[vec - 1]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to read IRQ status register\n");
+ return;
+ }
+ for (i = 0; i < 8; i++) {
+ channel = ((vec - 1) * 8) + i;
+ if (reg & PIO2_CHANNEL_BIT[channel])
+ dev_info(&card->vdev->dev,
+ "Interrupt on I/O channel %d\n",
+ channel);
+ }
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ case 9:
+ case 10:
+ /* Counters are dealt with by their own handler */
+ dev_err(&card->vdev->dev,
+ "Counter interrupt\n");
+ break;
+ }
+}
+
+
+/*
+ * We return whether this has been successful - this is used in the probe to
+ * ensure we have a valid card.
+ */
+static int pio2_reset_card(struct pio2_card *card)
+{
+ int retval = 0;
+ u8 data = 0;
+
+ /* Clear main register*/
+ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ /* Clear VME vector */
+ retval = vme_master_write(card->window, &data, 1, PIO2_REGS_VME_VECTOR);
+ if (retval < 0)
+ return retval;
+
+ /* Reset GPIO */
+ retval = pio2_gpio_reset(card);
+ if (retval < 0)
+ return retval;
+
+ /* Reset counters */
+ retval = pio2_cntr_reset(card);
+ if (retval < 0)
+ return retval;
+
+ return 0;
+}
+
+static struct vme_driver pio2_driver = {
+ .name = driver_name,
+ .match = pio2_match,
+ .probe = pio2_probe,
+ .remove = __devexit_p(pio2_remove),
+};
+
+
+static int __init pio2_init(void)
+{
+ int retval = 0;
+
+ if (bus_num == 0) {
+ printk(KERN_ERR "%s: No cards, skipping registration\n",
+ driver_name);
+ goto err_nocard;
+ }
+
+ if (bus_num > PIO2_CARDS_MAX) {
+ printk(KERN_ERR
+ "%s: Driver only able to handle %d PIO2 Cards\n",
+ driver_name, PIO2_CARDS_MAX);
+ bus_num = PIO2_CARDS_MAX;
+ }
+
+ /* Register the PIO2 driver */
+ retval = vme_register_driver(&pio2_driver, bus_num);
+ if (retval != 0)
+ goto err_reg;
+
+ return retval;
+
+err_reg:
+err_nocard:
+ return retval;
+}
+
+static int pio2_match(struct vme_dev *vdev)
+{
+
+ if (vdev->num >= bus_num) {
+ dev_err(&vdev->dev,
+ "The enumeration of the VMEbus to which the board is connected must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= base_num) {
+ dev_err(&vdev->dev,
+ "The VME address for the cards registers must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= vector_num) {
+ dev_err(&vdev->dev,
+ "The IRQ vector used by the card must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= level_num) {
+ dev_err(&vdev->dev,
+ "The IRQ level used by the card must be specified");
+ return 0;
+ }
+
+ if (vdev->num >= variant_num) {
+ dev_err(&vdev->dev, "The variant of the card must be specified");
+ return 0;
+ }
+
+ return 1;
+}
+
+static int __devinit pio2_probe(struct vme_dev *vdev)
+{
+ struct pio2_card *card;
+ int retval;
+ int i;
+ u8 reg;
+ int vec;
+
+ card = kzalloc(sizeof(struct pio2_card), GFP_KERNEL);
+ if (card == NULL) {
+ dev_err(&vdev->dev, "Unable to allocate card structure\n");
+ retval = -ENOMEM;
+ goto err_struct;
+ }
+
+ card->id = vdev->num;
+ card->bus = bus[card->id];
+ card->base = base[card->id];
+ card->irq_vector = vector[card->id];
+ card->irq_level = level[card->id] & PIO2_VME_INT_MASK;
+ strncpy(card->variant, variant[card->id], PIO2_VARIANT_LENGTH);
+ card->vdev = vdev;
+
+ for (i = 0; i < PIO2_VARIANT_LENGTH; i++) {
+
+ if (isdigit(card->variant[i]) == 0) {
+ dev_err(&card->vdev->dev, "Variant invalid\n");
+ retval = -EINVAL;
+ goto err_variant;
+ }
+ }
+
+ /*
+ * Bottom 4 bits of VME interrupt vector used to determine source,
+ * provided vector should only use upper 4 bits.
+ */
+ if (card->irq_vector & ~PIO2_VME_VECTOR_MASK) {
+ dev_err(&card->vdev->dev,
+ "Invalid VME IRQ Vector, vector must not use lower 4 bits\n");
+ retval = -EINVAL;
+ goto err_vector;
+ }
+
+ /*
+ * There is no way to determine the build variant or whether each bank
+ * is input, output or both at run time. The inputs are also inverted
+ * if configured as both.
+ *
+ * We pass in the board variant and use that to determine the
+ * configuration of the banks.
+ */
+ for (i = 1; i < PIO2_VARIANT_LENGTH; i++) {
+ switch (card->variant[i]) {
+ case '0':
+ card->bank[i-1].config = NOFIT;
+ break;
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ card->bank[i-1].config = INPUT;
+ break;
+ case '5':
+ card->bank[i-1].config = OUTPUT;
+ break;
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ card->bank[i-1].config = BOTH;
+ break;
+ }
+ }
+
+ /* Get a master window and position over regs */
+ card->window = vme_master_request(vdev, VME_A24, VME_SCT, VME_D16);
+ if (card->window == NULL) {
+ dev_err(&card->vdev->dev,
+ "Unable to assign VME master resource\n");
+ retval = -EIO;
+ goto err_window;
+ }
+
+ retval = vme_master_set(card->window, 1, card->base, 0x10000, VME_A24,
+ (VME_SCT | VME_USER | VME_DATA), VME_D16);
+ if (retval) {
+ dev_err(&card->vdev->dev,
+ "Unable to configure VME master resource\n");
+ goto err_set;
+ }
+
+ /*
+ * There is also no obvious register which we can probe to determine
+ * whether the provided base is valid. If we can read the "ID Register"
+ * offset and the reset function doesn't error, assume we have a valid
+ * location.
+ */
+ retval = vme_master_read(card->window, &reg, 1, PIO2_REGS_ID);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to read from device\n");
+ goto err_read;
+ }
+
+ dev_dbg(&card->vdev->dev, "ID Register:%x\n", reg);
+
+ /*
+ * Ensure all the I/O is cleared. We can't read back the states, so
+ * this is the only method we have to ensure that the I/O is in a known
+ * state.
+ */
+ retval = pio2_reset_card(card);
+ if (retval) {
+ dev_err(&card->vdev->dev,
+ "Failed to reset card, is location valid?");
+ retval = -ENODEV;
+ goto err_reset;
+ }
+
+ /* Configure VME Interrupts */
+ reg = card->irq_level;
+ if (pio2_get_led(card))
+ reg |= PIO2_LED;
+ if (loopback)
+ reg |= PIO2_LOOP;
+ retval = vme_master_write(card->window, &reg, 1, PIO2_REGS_CTRL);
+ if (retval < 0)
+ return retval;
+
+ /* Set VME vector */
+ retval = vme_master_write(card->window, &card->irq_vector, 1,
+ PIO2_REGS_VME_VECTOR);
+ if (retval < 0)
+ return retval;
+
+ /* Attach spurious interrupt handler. */
+ vec = card->irq_vector | PIO2_VME_VECTOR_SPUR;
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_irq;
+ }
+
+ /* Attach GPIO interrupt handlers. */
+ for (i = 0; i < 4; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_gpio_irq;
+ }
+ }
+
+ /* Attach counter interrupt handlers. */
+ for (i = 0; i < 6; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+
+ retval = vme_irq_request(vdev, card->irq_level, vec,
+ &pio2_int, (void *)card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to attach VME interrupt vector0x%x, level 0x%x\n",
+ vec, card->irq_level);
+ goto err_cntr_irq;
+ }
+ }
+
+ /* Register IO */
+ retval = pio2_gpio_init(card);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev,
+ "Unable to register with GPIO framework\n");
+ goto err_gpio;
+ }
+
+ /* Set LED - This also sets interrupt level */
+ retval = pio2_set_led(card, 0);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to set LED\n");
+ goto err_led;
+ }
+
+ dev_set_drvdata(&card->vdev->dev, card);
+
+ dev_info(&card->vdev->dev,
+ "PIO2 (variant %s) configured at 0x%lx\n", card->variant,
+ card->base);
+
+ return 0;
+
+err_led:
+ pio2_gpio_exit(card);
+err_gpio:
+ i = 6;
+err_cntr_irq:
+ while (i > 0) {
+ i--;
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ i = 4;
+err_gpio_irq:
+ while (i > 0) {
+ i--;
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
+ vme_irq_free(vdev, card->irq_level, vec);
+err_irq:
+ pio2_reset_card(card);
+err_reset:
+err_read:
+ vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
+err_set:
+ vme_master_free(card->window);
+err_window:
+err_vector:
+err_variant:
+ kfree(card);
+err_struct:
+ return retval;
+}
+
+static int __devexit pio2_remove(struct vme_dev *vdev)
+{
+ int vec;
+ int i;
+
+ struct pio2_card *card = dev_get_drvdata(&vdev->dev);
+
+ pio2_gpio_exit(card);
+
+ for (i = 0; i < 6; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_CNTR[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ for (i = 0; i < 4; i++) {
+ vec = card->irq_vector | PIO2_VECTOR_BANK[i];
+ vme_irq_free(vdev, card->irq_level, vec);
+ }
+
+ vec = (card->irq_vector & PIO2_VME_VECTOR_MASK) | PIO2_VME_VECTOR_SPUR;
+ vme_irq_free(vdev, card->irq_level, vec);
+
+ pio2_reset_card(card);
+
+ vme_master_set(card->window, 0, 0, 0, VME_A16, 0, VME_D16);
+
+ vme_master_free(card->window);
+
+ kfree(card);
+
+ return 0;
+}
+
+static void __exit pio2_exit(void)
+{
+ vme_unregister_driver(&pio2_driver);
+}
+
+
+/* These are required for each board */
+MODULE_PARM_DESC(bus, "Enumeration of VMEbus to which the board is connected");
+module_param_array(bus, int, &bus_num, S_IRUGO);
+
+MODULE_PARM_DESC(base, "Base VME address for PIO2 Registers");
+module_param_array(base, long, &base_num, S_IRUGO);
+
+MODULE_PARM_DESC(vector, "VME IRQ Vector (Lower 4 bits masked)");
+module_param_array(vector, int, &vector_num, S_IRUGO);
+
+MODULE_PARM_DESC(level, "VME IRQ Level");
+module_param_array(level, int, &level_num, S_IRUGO);
+
+MODULE_PARM_DESC(variant, "Last 4 characters of PIO2 board variant");
+module_param_array(variant, charp, &variant_num, S_IRUGO);
+
+/* This is for debugging */
+MODULE_PARM_DESC(loopback, "Enable loopback mode on all cards");
+module_param(loopback, bool, S_IRUGO);
+
+MODULE_DESCRIPTION("GE PIO2 6U VME I/O Driver");
+MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
+MODULE_LICENSE("GPL");
+
+module_init(pio2_init);
+module_exit(pio2_exit);
+
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
new file mode 100644
index 0000000..dc837de
--- /dev/null
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -0,0 +1,232 @@
+/*
+ * GE PIO2 GPIO Driver
+ *
+ * Author: Martyn Welch <martyn.welch@ge.com>
+ * Copyright 2009 GE Intelligent Platforms Embedded Systems, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/ctype.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include "../vme.h"
+#include "vme_pio2.h"
+
+static const char driver_name[] = "pio2_gpio";
+
+static struct pio2_card *gpio_to_pio2_card(struct gpio_chip *chip)
+{
+ return container_of(chip, struct pio2_card, gc);
+}
+
+static int pio2_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ u8 reg;
+ int retval;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+
+ dev_err(&card->vdev->dev, "Channel not available as input\n");
+ return 0;
+ }
+
+ retval = vme_master_read(card->window, &reg, 1,
+ PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to read from GPIO\n");
+ return 0;
+ }
+
+ /*
+ * Remember, input on channels configured as both input and output
+ * are inverted!
+ */
+ if (reg & PIO2_CHANNEL_BIT[offset]) {
+ if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
+ return 0;
+ else
+ return 1;
+ } else {
+ if (card->bank[PIO2_CHANNEL_BANK[offset]].config != BOTH)
+ return 1;
+ else
+ return 0;
+ }
+}
+
+static void pio2_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ u8 reg;
+ int retval;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+
+ dev_err(&card->vdev->dev, "Channel not availabe as output\n");
+ return;
+ }
+
+ if (value)
+ reg = card->bank[PIO2_CHANNEL_BANK[offset]].value |
+ PIO2_CHANNEL_BIT[offset];
+ else
+ reg = card->bank[PIO2_CHANNEL_BANK[offset]].value &
+ ~PIO2_CHANNEL_BIT[offset];
+
+ retval = vme_master_write(card->window, &reg, 1,
+ PIO2_REGS_DATA[PIO2_CHANNEL_BANK[offset]]);
+ if (retval < 0) {
+ dev_err(&card->vdev->dev, "Unable to write to GPIO\n");
+ return;
+ }
+
+ card->bank[PIO2_CHANNEL_BANK[offset]].value = reg;
+}
+
+/* Directionality configured at board build - send appropriate response */
+static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+{
+ int data;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == OUTPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+ dev_err(&card->vdev->dev,
+ "Channel directionality not configurable at runtine\n");
+
+ data = -EINVAL;
+ } else {
+ data = 0;
+ }
+
+ return data;
+}
+
+/* Directionality configured at board build - send appropriate response */
+static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+{
+ int data;
+ struct pio2_card *card = gpio_to_pio2_card(chip);
+
+ if ((card->bank[PIO2_CHANNEL_BANK[offset]].config == INPUT) |
+ (card->bank[PIO2_CHANNEL_BANK[offset]].config == NOFIT)) {
+ dev_err(&card->vdev->dev,
+ "Channel directionality not configurable at runtine\n");
+
+ data = -EINVAL;
+ } else {
+ data = 0;
+ }
+
+ return data;
+}
+
+/*
+ * We return whether this has been successful - this is used in the probe to
+ * ensure we have a valid card.
+ */
+int pio2_gpio_reset(struct pio2_card *card)
+{
+ int retval = 0;
+ int i, j;
+
+ u8 data = 0;
+
+ /* Zero output registers */
+ for (i = 0; i < 4; i++) {
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_DATA[i]);
+ if (retval < 0)
+ return retval;
+ card->bank[i].value = 0;
+ }
+
+ /* Set input interrupt masks */
+ for (i = 0; i < 4; i++) {
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_INT_MASK[i * 2]);
+ if (retval < 0)
+ return retval;
+
+ retval = vme_master_write(card->window, &data, 1,
+ PIO2_REGS_INT_MASK[(i * 2) + 1]);
+ if (retval < 0)
+ return retval;
+
+ for (j = 0; j < 8; j++)
+ card->bank[i].irq[j] = NONE;
+ }
+
+ /* Ensure all I/O interrupts are cleared */
+ for (i = 0; i < 4; i++) {
+ do {
+ retval = vme_master_read(card->window, &data, 1,
+ PIO2_REGS_INT_STAT[i]);
+ if (retval < 0)
+ return retval;
+ } while (data != 0);
+ }
+
+ return 0;
+}
+
+int __init pio2_gpio_init(struct pio2_card *card)
+{
+ int retval = 0;
+ char *label;
+
+ label = kmalloc(PIO2_NUM_CHANNELS, GFP_KERNEL);
+ if (label == NULL) {
+ dev_err(&card->vdev->dev, "Unable to allocate GPIO label\n");
+ return -ENOMEM;
+ }
+
+ sprintf(label, "%s@%s", driver_name, dev_name(&card->vdev->dev));
+ card->gc.label = label;
+
+ card->gc.ngpio = PIO2_NUM_CHANNELS;
+ /* Dynamic allocation of base */
+ card->gc.base = -1;
+ /* Setup pointers to chip functions */
+ card->gc.direction_input = pio2_gpio_dir_in;
+ card->gc.direction_output = pio2_gpio_dir_out;
+ card->gc.get = pio2_gpio_get;
+ card->gc.set = pio2_gpio_set;
+
+ /* This function adds a memory mapped GPIO chip */
+ retval = gpiochip_add(&(card->gc));
+ if (retval) {
+ dev_err(&card->vdev->dev, "Unable to register GPIO\n");
+ kfree(card->gc.label);
+ }
+
+ return retval;
+};
+
+void __exit pio2_gpio_exit(struct pio2_card *card)
+{
+ const char *label = card->gc.label;
+
+ if (gpiochip_remove(&(card->gc)))
+ dev_err(&card->vdev->dev, "Failed to remove GPIO");
+
+ kfree(label);
+}
+
diff --git a/drivers/staging/vme/devices/vme_user.h b/drivers/staging/vme/devices/vme_user.h
index d85a1e9..7d24cd6 100644
--- a/drivers/staging/vme/devices/vme_user.h
+++ b/drivers/staging/vme/devices/vme_user.h
@@ -10,9 +10,9 @@ struct vme_master {
int enable; /* State of Window */
unsigned long long vme_addr; /* Starting Address on the VMEbus */
unsigned long long size; /* Window Size */
- vme_address_t aspace; /* Address Space */
- vme_cycle_t cycle; /* Cycle properties */
- vme_width_t dwidth; /* Maximum Data Width */
+ u32 aspace; /* Address Space */
+ u32 cycle; /* Cycle properties */
+ u32 dwidth; /* Maximum Data Width */
#if 0
char prefetchEnable; /* Prefetch Read Enable State */
int prefetchSize; /* Prefetch Read Size (Cache Lines) */
@@ -34,8 +34,8 @@ struct vme_slave {
int enable; /* State of Window */
unsigned long long vme_addr; /* Starting Address on the VMEbus */
unsigned long long size; /* Window Size */
- vme_address_t aspace; /* Address Space */
- vme_cycle_t cycle; /* Cycle properties */
+ u32 aspace; /* Address Space */
+ u32 cycle; /* Cycle properties */
#if 0
char wrPostEnable; /* Write Post State */
char rmwLock; /* Lock PCI during RMW Cycles */
diff --git a/drivers/staging/vme/vme.c b/drivers/staging/vme/vme.c
index b04b468..70722ae 100644
--- a/drivers/staging/vme/vme.c
+++ b/drivers/staging/vme/vme.c
@@ -153,9 +153,7 @@ size_t vme_get_size(struct vme_resource *resource)
int enabled, retval;
unsigned long long base, size;
dma_addr_t buf_base;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace, cycle, dwidth;
switch (resource->type) {
case VME_MASTER:
@@ -181,7 +179,7 @@ size_t vme_get_size(struct vme_resource *resource)
}
EXPORT_SYMBOL(vme_get_size);
-static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
+static int vme_check_window(u32 aspace, unsigned long long vme_base,
unsigned long long size)
{
int retval = 0;
@@ -232,8 +230,8 @@ static int vme_check_window(vme_address_t aspace, unsigned long long vme_base,
* Request a slave image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_slave_request(struct vme_dev *vdev,
- vme_address_t address, vme_cycle_t cycle)
+struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
+ u32 cycle)
{
struct vme_bridge *bridge;
struct list_head *slave_pos = NULL;
@@ -298,7 +296,7 @@ EXPORT_SYMBOL(vme_slave_request);
int vme_slave_set(struct vme_resource *resource, int enabled,
unsigned long long vme_base, unsigned long long size,
- dma_addr_t buf_base, vme_address_t aspace, vme_cycle_t cycle)
+ dma_addr_t buf_base, u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -333,7 +331,7 @@ EXPORT_SYMBOL(vme_slave_set);
int vme_slave_get(struct vme_resource *resource, int *enabled,
unsigned long long *vme_base, unsigned long long *size,
- dma_addr_t *buf_base, vme_address_t *aspace, vme_cycle_t *cycle)
+ dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_slave_resource *image;
@@ -388,8 +386,8 @@ EXPORT_SYMBOL(vme_slave_free);
* Request a master image with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_master_request(struct vme_dev *vdev,
- vme_address_t address, vme_cycle_t cycle, vme_width_t dwidth)
+struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
+ u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge;
struct list_head *master_pos = NULL;
@@ -456,8 +454,8 @@ err_bus:
EXPORT_SYMBOL(vme_master_request);
int vme_master_set(struct vme_resource *resource, int enabled,
- unsigned long long vme_base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ unsigned long long vme_base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -492,8 +490,8 @@ int vme_master_set(struct vme_resource *resource, int enabled,
EXPORT_SYMBOL(vme_master_set);
int vme_master_get(struct vme_resource *resource, int *enabled,
- unsigned long long *vme_base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
+ unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *dwidth)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_master_resource *image;
@@ -646,8 +644,7 @@ EXPORT_SYMBOL(vme_master_free);
* Request a DMA controller with specific attributes, return some unique
* identifier.
*/
-struct vme_resource *vme_dma_request(struct vme_dev *vdev,
- vme_dma_route_t route)
+struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
{
struct vme_bridge *bridge;
struct list_head *dma_pos = NULL;
@@ -743,8 +740,7 @@ EXPORT_SYMBOL(vme_new_dma_list);
/*
* Create "Pattern" type attributes
*/
-struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
- vme_pattern_t type)
+struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
{
struct vme_dma_attr *attributes;
struct vme_dma_pattern *pattern_attr;
@@ -822,7 +818,7 @@ EXPORT_SYMBOL(vme_dma_pci_attribute);
* Create "VME" type attributes
*/
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
+ u32 aspace, u32 cycle, u32 dwidth)
{
struct vme_dma_attr *attributes;
struct vme_dma_vme *vme_attr;
@@ -1173,7 +1169,7 @@ int vme_lm_count(struct vme_resource *resource)
EXPORT_SYMBOL(vme_lm_count);
int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
- vme_address_t aspace, vme_cycle_t cycle)
+ u32 aspace, u32 cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1195,7 +1191,7 @@ int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
EXPORT_SYMBOL(vme_lm_set);
int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
- vme_address_t *aspace, vme_cycle_t *cycle)
+ u32 *aspace, u32 *cycle)
{
struct vme_bridge *bridge = find_bridge(resource);
struct vme_lm_resource *lm;
@@ -1307,7 +1303,12 @@ EXPORT_SYMBOL(vme_slot_get);
/* - Bridge Registration --------------------------------------------------- */
-static int vme_add_bus(struct vme_bridge *bridge)
+static void vme_dev_release(struct device *dev)
+{
+ kfree(dev_to_vme_dev(dev));
+}
+
+int vme_register_bridge(struct vme_bridge *bridge)
{
int i;
int ret = -1;
@@ -1327,8 +1328,9 @@ static int vme_add_bus(struct vme_bridge *bridge)
return ret;
}
+EXPORT_SYMBOL(vme_register_bridge);
-static void vme_remove_bus(struct vme_bridge *bridge)
+void vme_unregister_bridge(struct vme_bridge *bridge)
{
struct vme_dev *vdev;
struct vme_dev *tmp;
@@ -1343,22 +1345,6 @@ static void vme_remove_bus(struct vme_bridge *bridge)
list_del(&bridge->bus_list);
mutex_unlock(&vme_buses_lock);
}
-
-static void vme_dev_release(struct device *dev)
-{
- kfree(dev_to_vme_dev(dev));
-}
-
-int vme_register_bridge(struct vme_bridge *bridge)
-{
- return vme_add_bus(bridge);
-}
-EXPORT_SYMBOL(vme_register_bridge);
-
-void vme_unregister_bridge(struct vme_bridge *bridge)
-{
- vme_remove_bus(bridge);
-}
EXPORT_SYMBOL(vme_unregister_bridge);
/* - Driver Registration --------------------------------------------------- */
@@ -1421,10 +1407,7 @@ static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
* and if the bridge is removed, it will have to go through
* vme_unregister_bridge() to do it (which calls remove() on
* the bridge which in turn tries to acquire vme_buses_lock and
- * will have to wait). The probe() called after device
- * registration in __vme_register_driver below will also fail
- * as the bridge is being removed (since the probe() calls
- * vme_bridge_get()).
+ * will have to wait).
*/
err = __vme_register_driver_bus(drv, bridge, ndevs);
if (err)
diff --git a/drivers/staging/vme/vme.h b/drivers/staging/vme/vme.h
index e3828ba..9d38cee 100644
--- a/drivers/staging/vme/vme.h
+++ b/drivers/staging/vme/vme.h
@@ -10,7 +10,6 @@ enum vme_resource_type {
};
/* VME Address Spaces */
-typedef u32 vme_address_t;
#define VME_A16 0x1
#define VME_A24 0x2
#define VME_A32 0x4
@@ -29,7 +28,6 @@ typedef u32 vme_address_t;
/* VME Cycle Types */
-typedef u32 vme_cycle_t;
#define VME_SCT 0x1
#define VME_BLT 0x2
#define VME_MBLT 0x4
@@ -47,28 +45,23 @@ typedef u32 vme_cycle_t;
#define VME_DATA 0x8000
/* VME Data Widths */
-typedef u32 vme_width_t;
#define VME_D8 0x1
#define VME_D16 0x2
#define VME_D32 0x4
#define VME_D64 0x8
/* Arbitration Scheduling Modes */
-typedef u32 vme_arbitration_t;
#define VME_R_ROBIN_MODE 0x1
#define VME_PRIORITY_MODE 0x2
-typedef u32 vme_dma_t;
#define VME_DMA_PATTERN (1<<0)
#define VME_DMA_PCI (1<<1)
#define VME_DMA_VME (1<<2)
-typedef u32 vme_pattern_t;
#define VME_DMA_PATTERN_BYTE (1<<0)
#define VME_DMA_PATTERN_WORD (1<<1)
#define VME_DMA_PATTERN_INCREMENT (1<<2)
-typedef u32 vme_dma_route_t;
#define VME_DMA_VME_TO_MEM (1<<0)
#define VME_DMA_MEM_TO_VME (1<<1)
#define VME_DMA_VME_TO_VME (1<<2)
@@ -77,7 +70,7 @@ typedef u32 vme_dma_route_t;
#define VME_DMA_PATTERN_TO_MEM (1<<5)
struct vme_dma_attr {
- vme_dma_t type;
+ u32 type;
void *private;
};
@@ -97,7 +90,7 @@ extern struct bus_type vme_bus_type;
/**
* Structure representing a VME device
- * @id: The ID of the device (currently the bus and slot number)
+ * @num: The device number
* @bridge: Pointer to the bridge device this device is on
* @dev: Internal device structure
* @drv_list: List of devices (per driver)
@@ -128,32 +121,29 @@ void vme_free_consistent(struct vme_resource *, size_t, void *,
size_t vme_get_size(struct vme_resource *);
-struct vme_resource *vme_slave_request(struct vme_dev *, vme_address_t,
- vme_cycle_t);
+struct vme_resource *vme_slave_request(struct vme_dev *, u32, u32);
int vme_slave_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
+ unsigned long long, dma_addr_t, u32, u32);
int vme_slave_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
+ unsigned long long *, dma_addr_t *, u32 *, u32 *);
void vme_slave_free(struct vme_resource *);
-struct vme_resource *vme_master_request(struct vme_dev *, vme_address_t,
- vme_cycle_t, vme_width_t);
+struct vme_resource *vme_master_request(struct vme_dev *, u32, u32, u32);
int vme_master_set(struct vme_resource *, int, unsigned long long,
- unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
+ unsigned long long, u32, u32, u32);
int vme_master_get(struct vme_resource *, int *, unsigned long long *,
- unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
+ unsigned long long *, u32 *, u32 *, u32 *);
ssize_t vme_master_read(struct vme_resource *, void *, size_t, loff_t);
ssize_t vme_master_write(struct vme_resource *, void *, size_t, loff_t);
unsigned int vme_master_rmw(struct vme_resource *, unsigned int, unsigned int,
unsigned int, loff_t);
void vme_master_free(struct vme_resource *);
-struct vme_resource *vme_dma_request(struct vme_dev *, vme_dma_route_t);
+struct vme_resource *vme_dma_request(struct vme_dev *, u32);
struct vme_dma_list *vme_new_dma_list(struct vme_resource *);
-struct vme_dma_attr *vme_dma_pattern_attribute(u32, vme_pattern_t);
+struct vme_dma_attr *vme_dma_pattern_attribute(u32, u32);
struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t);
-struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, vme_address_t,
- vme_cycle_t, vme_width_t);
+struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long, u32, u32, u32);
void vme_dma_free_attribute(struct vme_dma_attr *);
int vme_dma_list_add(struct vme_dma_list *, struct vme_dma_attr *,
struct vme_dma_attr *, size_t);
@@ -168,10 +158,8 @@ int vme_irq_generate(struct vme_dev *, int, int);
struct vme_resource * vme_lm_request(struct vme_dev *);
int vme_lm_count(struct vme_resource *);
-int vme_lm_set(struct vme_resource *, unsigned long long, vme_address_t,
- vme_cycle_t);
-int vme_lm_get(struct vme_resource *, unsigned long long *, vme_address_t *,
- vme_cycle_t *);
+int vme_lm_set(struct vme_resource *, unsigned long long, u32, u32);
+int vme_lm_get(struct vme_resource *, unsigned long long *, u32 *, u32 *);
int vme_lm_attach(struct vme_resource *, int, void (*callback)(int));
int vme_lm_detach(struct vme_resource *, int);
void vme_lm_free(struct vme_resource *);
diff --git a/drivers/staging/vme/vme_api.txt b/drivers/staging/vme/vme_api.txt
index e8ff215..856efa3 100644
--- a/drivers/staging/vme/vme_api.txt
+++ b/drivers/staging/vme/vme_api.txt
@@ -86,26 +86,26 @@ driver allows a resource to be assigned based on the required attributes of the
driver in question:
struct vme_resource * vme_master_request(struct vme_dev *dev,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
-
- struct vme_resource * vme_slave_request(struct vme_dev *dev,
- vme_address_t aspace, vme_cycle_t cycle);
-
- struct vme_resource *vme_dma_request(struct vme_dev *dev,
- vme_dma_route_t route);
-
-For slave windows these attributes are split into those of type 'vme_address_t'
-and 'vme_cycle_t'. Master windows add a further set of attributes
-'vme_cycle_t'. These attributes are defined as bitmasks and as such any
-combination of the attributes can be requested for a single window, the core
-will assign a window that meets the requirements, returning a pointer of type
-vme_resource that should be used to identify the allocated resource when it is
-used. For DMA controllers, the request function requires the potential
-direction of any transfers to be provided in the route attributes. This is
-typically VME-to-MEM and/or MEM-to-VME, though some hardware can support
-VME-to-VME and MEM-to-MEM transfers as well as test pattern generation. If an
-unallocated window fitting the requirements can not be found a NULL pointer
-will be returned.
+ u32 aspace, u32 cycle, u32 width);
+
+ struct vme_resource * vme_slave_request(struct vme_dev *dev, u32 aspace,
+ u32 cycle);
+
+ struct vme_resource *vme_dma_request(struct vme_dev *dev, u32 route);
+
+For slave windows these attributes are split into the VME address spaces that
+need to be accessed in 'aspace' and VME bus cycle types required in 'cycle'.
+Master windows add a further set of attributes in 'width' specifying the
+required data transfer widths. These attributes are defined as bitmasks and as
+such any combination of the attributes can be requested for a single window,
+the core will assign a window that meets the requirements, returning a pointer
+of type vme_resource that should be used to identify the allocated resource
+when it is used. For DMA controllers, the request function requires the
+potential direction of any transfers to be provided in the route attributes.
+This is typically VME-to-MEM and/or MEM-to-VME, though some hardware can
+support VME-to-VME and MEM-to-MEM transfers as well as test pattern generation.
+If an unallocated window fitting the requirements can not be found a NULL
+pointer will be returned.
Functions are also provided to free window allocations once they are no longer
required. These functions should be passed the pointer to the resource provided
@@ -133,12 +133,12 @@ Once a master window has been assigned the following functions can be used to
configure it and retrieve the current settings:
int vme_master_set (struct vme_resource *res, int enabled,
- unsigned long long base, unsigned long long size,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
+ unsigned long long base, unsigned long long size, u32 aspace,
+ u32 cycle, u32 width);
int vme_master_get (struct vme_resource *res, int *enabled,
- unsigned long long *base, unsigned long long *size,
- vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *width);
+ unsigned long long *base, unsigned long long *size, u32 *aspace,
+ u32 *cycle, u32 *width);
The address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
@@ -189,11 +189,11 @@ configure it and retrieve the current settings:
int vme_slave_set (struct vme_resource *res, int enabled,
unsigned long long base, unsigned long long size,
- dma_addr_t mem, vme_address_t aspace, vme_cycle_t cycle);
+ dma_addr_t mem, u32 aspace, u32 cycle);
int vme_slave_get (struct vme_resource *res, int *enabled,
unsigned long long *base, unsigned long long *size,
- dma_addr_t *mem, vme_address_t *aspace, vme_cycle_t *cycle);
+ dma_addr_t *mem, u32 *aspace, u32 *cycle);
The address spaces, transfer widths and cycle types are the same as described
under resource management, however some of the options are mutually exclusive.
@@ -273,8 +273,7 @@ and pattern sources and destinations (where appropriate):
Pattern source:
- struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern,
- vme_pattern_t type);
+ struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type);
PCI source or destination:
@@ -283,7 +282,7 @@ PCI source or destination:
VME source or destination:
struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long base,
- vme_address_t aspace, vme_cycle_t cycle, vme_width_t width);
+ u32 aspace, u32 cycle, u32 width);
The following function should be used to free an attribute:
@@ -366,10 +365,10 @@ Once a bank of location monitors has been allocated, the following functions
are provided to configure the location and mode of the location monitor:
int vme_lm_set(struct vme_resource *res, unsigned long long base,
- vme_address_t aspace, vme_cycle_t cycle);
+ u32 aspace, u32 cycle);
int vme_lm_get(struct vme_resource *res, unsigned long long *base,
- vme_address_t *aspace, vme_cycle_t *cycle);
+ u32 *aspace, u32 *cycle);
Location Monitor Use
diff --git a/drivers/staging/vme/vme_bridge.h b/drivers/staging/vme/vme_bridge.h
index c2deda2..934949a 100644
--- a/drivers/staging/vme/vme_bridge.h
+++ b/drivers/staging/vme/vme_bridge.h
@@ -15,9 +15,9 @@ struct vme_master_resource {
spinlock_t lock;
int locked;
int number;
- vme_address_t address_attr;
- vme_cycle_t cycle_attr;
- vme_width_t width_attr;
+ u32 address_attr;
+ u32 cycle_attr;
+ u32 width_attr;
struct resource bus_resource;
void __iomem *kern_base;
};
@@ -28,13 +28,13 @@ struct vme_slave_resource {
struct mutex mtx;
int locked;
int number;
- vme_address_t address_attr;
- vme_cycle_t cycle_attr;
+ u32 address_attr;
+ u32 cycle_attr;
};
struct vme_dma_pattern {
u32 pattern;
- vme_pattern_t type;
+ u32 type;
};
struct vme_dma_pci {
@@ -43,9 +43,9 @@ struct vme_dma_pci {
struct vme_dma_vme {
unsigned long long address;
- vme_address_t aspace;
- vme_cycle_t cycle;
- vme_width_t dwidth;
+ u32 aspace;
+ u32 cycle;
+ u32 dwidth;
};
struct vme_dma_list {
@@ -63,7 +63,7 @@ struct vme_dma_resource {
int number;
struct list_head pending;
struct list_head running;
- vme_dma_route_t route_attr;
+ u32 route_attr;
};
struct vme_lm_resource {
@@ -122,17 +122,16 @@ struct vme_bridge {
/* Slave Functions */
int (*slave_get) (struct vme_slave_resource *, int *,
unsigned long long *, unsigned long long *, dma_addr_t *,
- vme_address_t *, vme_cycle_t *);
+ u32 *, u32 *);
int (*slave_set) (struct vme_slave_resource *, int, unsigned long long,
- unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
+ unsigned long long, dma_addr_t, u32, u32);
/* Master Functions */
int (*master_get) (struct vme_master_resource *, int *,
- unsigned long long *, unsigned long long *, vme_address_t *,
- vme_cycle_t *, vme_width_t *);
+ unsigned long long *, unsigned long long *, u32 *, u32 *,
+ u32 *);
int (*master_set) (struct vme_master_resource *, int,
- unsigned long long, unsigned long long, vme_address_t,
- vme_cycle_t, vme_width_t);
+ unsigned long long, unsigned long long, u32, u32, u32);
ssize_t (*master_read) (struct vme_master_resource *, void *, size_t,
loff_t);
ssize_t (*master_write) (struct vme_master_resource *, void *, size_t,
@@ -151,10 +150,9 @@ struct vme_bridge {
int (*irq_generate) (struct vme_bridge *, int, int);
/* Location monitor functions */
- int (*lm_set) (struct vme_lm_resource *, unsigned long long,
- vme_address_t, vme_cycle_t);
- int (*lm_get) (struct vme_lm_resource *, unsigned long long *,
- vme_address_t *, vme_cycle_t *);
+ int (*lm_set) (struct vme_lm_resource *, unsigned long long, u32, u32);
+ int (*lm_get) (struct vme_lm_resource *, unsigned long long *, u32 *,
+ u32 *);
int (*lm_attach) (struct vme_lm_resource *, int, void (*callback)(int));
int (*lm_detach) (struct vme_lm_resource *, int);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index d8dd784..3e8283c 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3153,11 +3153,7 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
break;
case SIOCGIWNWID: //0x8b03 support
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- rc = iwctl_giwnwid(dev, NULL, &(wrq->u.nwid), NULL);
- #else
- rc = -EOPNOTSUPP;
- #endif
+ rc = -EOPNOTSUPP;
break;
// Set frequency/channel
diff --git a/drivers/staging/vt6655/ioctl.c b/drivers/staging/vt6655/ioctl.c
index 432a209..7fd5cc5 100644
--- a/drivers/staging/vt6655/ioctl.c
+++ b/drivers/staging/vt6655/ioctl.c
@@ -300,6 +300,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
+ result = -EINVAL;
+ break;
+ }
pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
if (pList == NULL) {
result = -ENOMEM;
@@ -571,6 +575,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
+ result = -EINVAL;
+ break;
+ }
pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
if (pNodeList == NULL) {
result = -ENOMEM;
diff --git a/drivers/staging/vt6655/iwctl.c b/drivers/staging/vt6655/iwctl.c
index 5e425d1..87288db 100644
--- a/drivers/staging/vt6655/iwctl.c
+++ b/drivers/staging/vt6655/iwctl.c
@@ -151,18 +151,6 @@ int iwctl_giwname(struct net_device *dev,
return 0;
}
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- //wrq->value = 0x100;
- //wrq->disabled = 0;
- //wrq->fixed = 1;
- //return 0;
- return -EOPNOTSUPP;
-}
-
/*
* Wireless Handler : set scan
*/
diff --git a/drivers/staging/vt6655/iwctl.h b/drivers/staging/vt6655/iwctl.h
index 3096de0..d224f91 100644
--- a/drivers/staging/vt6655/iwctl.h
+++ b/drivers/staging/vt6655/iwctl.h
@@ -79,11 +79,6 @@ int iwctl_giwname(struct net_device *dev,
char *wrq,
char *extra);
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra) ;
-
int iwctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
diff --git a/drivers/staging/vt6656/80211mgr.c b/drivers/staging/vt6656/80211mgr.c
index fceec49..39f9842 100644
--- a/drivers/staging/vt6656/80211mgr.c
+++ b/drivers/staging/vt6656/80211mgr.c
@@ -224,8 +224,6 @@ vMgrDecodeBeacon(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
-
- return;
}
@@ -248,8 +246,6 @@ vMgrEncodeIBSSATIM(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
pFrame->len = WLAN_HDR_ADDR3_LEN;
-
- return;
}
@@ -270,8 +266,6 @@ vMgrDecodeIBSSATIM(
)
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
-
- return;
}
@@ -298,8 +292,6 @@ vMgrEncodeDisassociation(
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DISASSOC_OFF_REASON + sizeof(*(pFrame->pwReason));
-
- return;
}
@@ -324,8 +316,6 @@ vMgrDecodeDisassociation(
/* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DISASSOC_OFF_REASON);
-
- return;
}
/*+
@@ -352,7 +342,6 @@ vMgrEncodeAssocRequest(
pFrame->pwListenInterval = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_ASSOCREQ_OFF_LISTEN_INT);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCREQ_OFF_LISTEN_INT + sizeof(*(pFrame->pwListenInterval));
- return;
}
@@ -418,7 +407,6 @@ vMgrDecodeAssocRequest(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
/*+
@@ -448,8 +436,6 @@ vMgrEncodeAssocResponse(
+ WLAN_ASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_ASSOCRESP_OFF_AID
+ sizeof(*(pFrame->pwAid));
-
- return;
}
@@ -491,10 +477,8 @@ vMgrDecodeAssocResponse(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES)) {
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pFrame->pExtSuppRates=[%p].\n", pItem);
- } else {
+ } else
pFrame->pExtSuppRates = NULL;
- }
- return;
}
@@ -524,8 +508,6 @@ vMgrEncodeReassocRequest(
pFrame->pAddrCurrAP = (PIEEE_ADDR)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_REASSOCREQ_OFF_CURR_AP);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCREQ_OFF_CURR_AP + sizeof(*(pFrame->pAddrCurrAP));
-
- return;
}
@@ -578,10 +560,9 @@ vMgrDecodeReassocRequest(
pFrame->pRSN = (PWLAN_IE_RSN)pItem;
break;
case WLAN_EID_RSN_WPA:
- if (pFrame->pRSNWPA == NULL) {
+ if (pFrame->pRSNWPA == NULL)
if (WPAb_Is_RSN((PWLAN_IE_RSN_EXT)pItem) == TRUE)
pFrame->pRSNWPA = (PWLAN_IE_RSN_EXT)pItem;
- }
break;
case WLAN_EID_EXTSUPP_RATES:
@@ -595,7 +576,6 @@ vMgrDecodeReassocRequest(
}
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -619,7 +599,6 @@ vMgrEncodeProbeRequest(
{
pFrame->pHdr = (PUWLAN_80211HDR)pFrame->pBuf;
pFrame->len = WLAN_HDR_ADDR3_LEN;
- return;
}
/*+
@@ -670,7 +649,6 @@ vMgrDecodeProbeRequest(
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -703,8 +681,6 @@ vMgrEncodeProbeResponse(
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_PROBERESP_OFF_CAP_INFO +
sizeof(*(pFrame->pwCapInfo));
-
- return;
}
@@ -818,7 +794,6 @@ vMgrDecodeProbeResponse(
pItem = (PWLAN_IE)(((PBYTE)pItem) + 2 + pItem->len);
}
- return;
}
@@ -848,7 +823,6 @@ vMgrEncodeAuthen(
pFrame->pwStatus = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_AUTHEN_OFF_STATUS);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_AUTHEN_OFF_STATUS + sizeof(*(pFrame->pwStatus));
- return;
}
@@ -886,7 +860,6 @@ vMgrDecodeAuthen(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_CHALLENGE))
pFrame->pChallenge = (PWLAN_IE_CHALLENGE)pItem;
- return;
}
@@ -912,7 +885,6 @@ vMgrEncodeDeauthen(
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_DEAUTHEN_OFF_REASON + sizeof(*(pFrame->pwReason));
- return;
}
@@ -937,7 +909,6 @@ vMgrDecodeDeauthen(
/* Fixed Fields */
pFrame->pwReason = (PWORD)(WLAN_HDR_A3_DATA_PTR(&(pFrame->pHdr->sA3))
+ WLAN_DEAUTHEN_OFF_REASON);
- return;
}
@@ -968,7 +939,6 @@ vMgrEncodeReassocResponse(
+ WLAN_REASSOCRESP_OFF_AID);
pFrame->len = WLAN_HDR_ADDR3_LEN + WLAN_REASSOCRESP_OFF_AID + sizeof(*(pFrame->pwAid));
- return;
}
@@ -1010,5 +980,4 @@ vMgrDecodeReassocResponse(
if ((((PBYTE)pItem) < (pFrame->pBuf + pFrame->len)) && (pItem->byElementID == WLAN_EID_EXTSUPP_RATES))
pFrame->pExtSuppRates = (PWLAN_IE_SUPP_RATES)pItem;
- return;
}
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 0d11147..06f27f6 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -932,30 +932,8 @@ BBvCaculateParameter (
void
BBvSetAntennaMode (PSDevice pDevice, BYTE byAntennaMode)
{
- //{{ RobertYu: 20041124, ABG Mode, VC1/VC2 define, make the ANT_A, ANT_B inverted
- /*if ( (pDevice->byRFType == RF_MAXIM2829) ||
- (pDevice->byRFType == RF_UW2452) ||
- (pDevice->byRFType == RF_AIROHA7230) ) { // RobertYu: 20041210, 20050104
-
- switch (byAntennaMode) {
- case ANT_TXA:
- byAntennaMode = ANT_TXB;
- break;
- case ANT_TXB:
- byAntennaMode = ANT_TXA;
- break;
- case ANT_RXA:
- byAntennaMode = ANT_RXB;
- break;
- case ANT_RXB:
- byAntennaMode = ANT_RXA;
- break;
- }
- }*/
-
switch (byAntennaMode) {
case ANT_TXA:
- break;
case ANT_TXB:
break;
case ANT_RXA:
@@ -1249,8 +1227,7 @@ void BBvLoopbackOff (PSDevice pDevice)
// Set the CR33 Bit2 to disable internal Loopback.
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x21, &byData);//CR33
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x21, (BYTE)(byData & 0xFE));//CR33
- }
- else { // OFDM
+ } else { /* OFDM */
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0x9A, &byData);//CR154
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x9A, (BYTE)(byData & 0xFE));//CR154
}
@@ -1277,19 +1254,16 @@ BBvSetShortSlotTime (PSDevice pDevice)
{
BYTE byBBVGA=0;
- if (pDevice->bShortSlotTime) {
+ if (pDevice->bShortSlotTime)
pDevice->byBBRxConf &= 0xDF;//1101 1111
- } else {
+ else
pDevice->byBBRxConf |= 0x20;//0010 0000
- }
ControlvReadByte (pDevice, MESSAGE_REQUEST_BBREG, 0xE7, &byBBVGA);
- if (byBBVGA == pDevice->abyBBVGA[0]) {
+ if (byBBVGA == pDevice->abyBBVGA[0])
pDevice->byBBRxConf |= 0x20;//0010 0000
- }
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);
-
}
@@ -1299,13 +1273,11 @@ void BBvSetVGAGainOffset(PSDevice pDevice, BYTE byData)
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0xE7, byData);
// patch for 3253B0 Baseband with Cardbus module
- if (byData == pDevice->abyBBVGA[0]) {
- pDevice->byBBRxConf |= 0x20;//0010 0000
- } else if (pDevice->bShortSlotTime) {
- pDevice->byBBRxConf &= 0xDF;//1101 1111
- } else {
- pDevice->byBBRxConf |= 0x20;//0010 0000
- }
+ if (pDevice->bShortSlotTime)
+ pDevice->byBBRxConf &= 0xDF; /* 1101 1111 */
+ else
+ pDevice->byBBRxConf |= 0x20; /* 0010 0000 */
+
ControlvWriteByte(pDevice, MESSAGE_REQUEST_BBREG, 0x0A, pDevice->byBBRxConf);//CR10
}
@@ -1365,15 +1337,14 @@ static unsigned long s_ulGetLowSQ3(PSDevice pDevice)
unsigned long ulMaxPacket;
ulMaxPacket = pDevice->aulPktNum[RATE_54M];
- if ( pDevice->aulPktNum[RATE_54M] != 0 ) {
+ if (pDevice->aulPktNum[RATE_54M] != 0)
ulSQ3 = pDevice->aulSQ3Val[RATE_54M] / pDevice->aulPktNum[RATE_54M];
- }
- for ( ii=RATE_48M;ii>=RATE_6M;ii-- ) {
- if ( pDevice->aulPktNum[ii] > ulMaxPacket ) {
+
+ for (ii = RATE_48M; ii >= RATE_6M; ii--)
+ if (pDevice->aulPktNum[ii] > ulMaxPacket) {
ulMaxPacket = pDevice->aulPktNum[ii];
ulSQ3 = pDevice->aulSQ3Val[ii] / pDevice->aulPktNum[ii];
}
- }
return ulSQ3;
}
@@ -1392,7 +1363,7 @@ static unsigned long s_ulGetRatio(PSDevice pDevice)
ulRatio = (ulPacketNum * 1000 / pDevice->uDiversityCnt);
ulRatio += TOP_RATE_54M;
}
- for ( ii=RATE_48M;ii>=RATE_1M;ii-- ) {
+ for (ii = RATE_48M; ii >= RATE_1M; ii--)
if ( pDevice->aulPktNum[ii] > ulMaxPacket ) {
ulPacketNum = 0;
for ( jj=RATE_54M;jj>=ii;jj--)
@@ -1402,8 +1373,6 @@ static unsigned long s_ulGetRatio(PSDevice pDevice)
ulMaxPacket = pDevice->aulPktNum[ii];
}
- }
-
return ulRatio;
}
@@ -1589,7 +1558,6 @@ void TimerSQ3CallBack(void *hDeviceContext)
spin_unlock_irq(&pDevice->lock);
- return;
}
@@ -1637,7 +1605,6 @@ void TimerSQ3Tmax3CallBack(void *hDeviceContext)
add_timer(&pDevice->TimerSQ3Tmax1);
spin_unlock_irq(&pDevice->lock);
- return;
}
void
diff --git a/drivers/staging/vt6656/bssdb.c b/drivers/staging/vt6656/bssdb.c
index af006df..32c67ed 100644
--- a/drivers/staging/vt6656/bssdb.c
+++ b/drivers/staging/vt6656/bssdb.c
@@ -216,26 +216,6 @@ PKnownBSS BSSpSearchBSSList(void *hDeviceContext,
continue;
}
}
-/*
- if (pMgmt->eAuthenMode < WMAC_AUTH_WPA) {
- if (pCurrBSS->bWPAValid == TRUE) {
- // WPA AP will reject connection of station without WPA enable.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPAPSK)) {
- if (pCurrBSS->bWPAValid == FALSE) {
- // station with WPA enable can't join NonWPA AP.
- continue;
- }
- } else if ((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
- (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) {
- if (pCurrBSS->bWPA2Valid == FALSE) {
- // station with WPA2 enable can't join NonWPA2 AP.
- continue;
- }
- }
-*/
pMgmt->pSameBSS[jj].uChannel = pCurrBSS->uChannel;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSpSearchBSSList pSelect1[%02X %02X %02X-%02X %02X %02X]\n",*pCurrBSS->abyBSSID,*(pCurrBSS->abyBSSID+1),*(pCurrBSS->abyBSSID+2),*(pCurrBSS->abyBSSID+3),*(pCurrBSS->abyBSSID+4),*(pCurrBSS->abyBSSID+5));
@@ -300,18 +280,11 @@ void BSSvClearBSSList(void *hDeviceContext, BOOL bKeepCurrBSSID)
continue;
}
}
-/*
- if ((pMgmt->sBSSList[ii].bActive) && (pMgmt->sBSSList[ii].uClearCount < BSS_CLEAR_COUNT)) {
- pMgmt->sBSSList[ii].uClearCount ++;
- continue;
- }
-*/
- pMgmt->sBSSList[ii].bActive = FALSE;
+
+ pMgmt->sBSSList[ii].bActive = FALSE;
memset(&pMgmt->sBSSList[ii], 0, sizeof(KnownBSS));
}
BSSvClearAnyBSSJoinRecord(pDevice);
-
- return;
}
@@ -524,46 +497,6 @@ BOOL BSSbInsertToBSSList(void *hDeviceContext,
pBSSList->ldBmAverage[ii] = 0;
}
-/*
- if ((pIE_Country != NULL) &&
- (pMgmt->b11hEnable == TRUE)) {
- CARDvSetCountryInfo(pMgmt->pAdapter,
- pBSSList->eNetworkTypeInUse,
- pIE_Country);
- }
-
- if ((bParsingQuiet == TRUE) && (pIE_Quiet != NULL)) {
- if ((((PWLAN_IE_QUIET)pIE_Quiet)->len == 8) &&
- (((PWLAN_IE_QUIET)pIE_Quiet)->byQuietCount != 0)) {
- // valid EID
- if (pQuiet == NULL) {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet( pMgmt->pAdapter,
- TRUE,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
- );
- } else {
- pQuiet = (PWLAN_IE_QUIET)pIE_Quiet;
- CARDbSetQuiet( pMgmt->pAdapter,
- FALSE,
- pQuiet->byQuietCount,
- pQuiet->byQuietPeriod,
- *((PWORD)pQuiet->abyQuietDuration),
- *((PWORD)pQuiet->abyQuietOffset)
- );
- }
- }
- }
-
- if ((bParsingQuiet == TRUE) &&
- (pQuiet != NULL)) {
- CARDbStartQuiet(pMgmt->pAdapter);
- }
-*/
-
pBSSList->uIELength = uIELength;
if (pBSSList->uIELength > WLAN_BEACON_FR_MAXLEN)
pBSSList->uIELength = WLAN_BEACON_FR_MAXLEN;
@@ -609,8 +542,6 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
PSRxMgmtPacket pRxPacket = (PSRxMgmtPacket)pRxPacketContext;
signed long ldBm, ldBmSum;
BOOL bParsingQuiet = FALSE;
- // BYTE abyTmpSSID[WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1];
-
if (pBSSList == NULL)
return FALSE;
@@ -622,7 +553,6 @@ BOOL BSSbUpdateToBSSList(void *hDeviceContext,
pBSSList->wCapInfo = cpu_to_le16(wCapInfo);
pBSSList->uClearCount = 0;
pBSSList->uChannel = byCurrChannel;
-// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BSSbUpdateToBSSList: pBSSList->uChannel: %d\n", pBSSList->uChannel);
if (pSSID->len > WLAN_SSID_MAXLEN)
pSSID->len = WLAN_SSID_MAXLEN;
@@ -809,7 +739,6 @@ void BSSvCreateOneNode(void *hDeviceContext, unsigned int *puNodeIndex)
pMgmt->sNodeDBTable[*puNodeIndex].byAuthSequence = 0;
pMgmt->sNodeDBTable[*puNodeIndex].wEnQueueCnt = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Create node index = %d\n", ii);
- return;
};
@@ -840,8 +769,6 @@ void BSSvRemoveOneNode(void *hDeviceContext, unsigned int uNodeIndex)
memset(&pMgmt->sNodeDBTable[uNodeIndex], 0, sizeof(KnownNodeDB));
// clear tx bit map
pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[uNodeIndex].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[uNodeIndex].wAID & 7];
-
- return;
};
/*+
*
@@ -1054,10 +981,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
// Rate fallback check
if (!pDevice->bFixRate) {
-/*
- if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (ii == 0))
- RATEvTxRateFallBack(pDevice, &(pMgmt->sNodeDBTable[ii]));
-*/
if (ii > 0) {
// ii = 0 for multicast node (AP & Adhoc)
RATEvTxRateFallBack((void *)pDevice,
@@ -1152,7 +1075,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
(pMgmt->eCurrMode == WMAC_MODE_ESS_STA)) {
if (pMgmt->sNodeDBTable[0].bActive) { // Assoc with BSS
- // DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "Callback inactive Count = [%d]\n", pMgmt->sNodeDBTable[0].uInActiveCount);
if (pDevice->bUpdateBBVGA) {
/* s_vCheckSensitivity((void *) pDevice); */
@@ -1194,7 +1116,6 @@ if((pMgmt->eCurrState!=WMAC_STATE_ASSOC) &&
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
}
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- // if(pDevice->bWPASuppWextEnabled == TRUE)
{
union iwreq_data wrqu;
memset(&wrqu, 0, sizeof (wrqu));
@@ -1223,7 +1144,6 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
pDevice->uIsroamingTime = 0;
pDevice->bRoaming = FALSE;
-// if ((pDevice->bWPADEVUp) && (pDevice->skb != NULL)) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_CCKM_ROAM_MSG;
wpahdr->resp_ie_len = 0;
@@ -1237,7 +1157,6 @@ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bIsRoaming %d, !\n", pDevice->bIsRoaming );
netif_rx(pDevice->skb);
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
-// }
}
else if ((pDevice->bRoaming == FALSE)&&(pDevice->bIsRoaming == TRUE)) {
pDevice->uIsroamingTime++;
@@ -1315,7 +1234,6 @@ else {
pMgmt->sTimerSecondCallback.expires = RUN_AT(HZ);
add_timer(&pMgmt->sTimerSecondCallback);
- return;
}
/*+
@@ -1364,7 +1282,6 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
// Only Unicast using support rates
if (wFIFOCtl & FIFOCTL_NEEDACK) {
- //DBG_PRN_GRP21(("Device %08X, wRate %04X, byTSR %02X\n", hDeviceContext, wRate, byTSR));
if (pMgmt->eCurrMode == WMAC_MODE_ESS_STA) {
pMgmt->sNodeDBTable[0].uTxAttempts += 1;
if ( !(byTSR & (TSR_TMO | TSR_RETRYTMO))) {
@@ -1475,10 +1392,6 @@ void BSSvUpdateNodeTxCounter(void *hDeviceContext,
}
}
}
-
- return;
-
-
}
/*+
@@ -1519,8 +1432,6 @@ void BSSvClearNodeDBTable(void *hDeviceContext,
memset(&pMgmt->sNodeDBTable[ii], 0, sizeof(KnownNodeDB));
}
}
-
- return;
};
void s_vCheckSensitivity(void *hDeviceContext)
@@ -1584,7 +1495,6 @@ RxOkRatio = (RxCnt < 6) ? 2000:((pDevice->scStatistic.RxOkCnt * 2000) / RxCnt);
//decide link quality
if(pDevice->bLinkPass !=TRUE)
{
- // printk("s_uCalculateLinkQual-->Link disconnect and Poor quality**\n");
pDevice->scStatistic.LinkQuality = 0;
pDevice->scStatistic.SignalStren = 0;
}
@@ -1608,7 +1518,6 @@ else
pDevice->scStatistic.TxFailCount = 0;
pDevice->scStatistic.TxNoRetryOkCount = 0;
pDevice->scStatistic.TxRetryOkCount = 0;
- return;
}
void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
@@ -1617,10 +1526,8 @@ void BSSvClearAnyBSSJoinRecord(void *hDeviceContext)
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
unsigned int ii;
- for (ii = 0; ii < MAX_BSS_NUM; ii++) {
+ for (ii = 0; ii < MAX_BSS_NUM; ii++)
pMgmt->sBSSList[ii].bSelected = FALSE;
- }
- return;
}
void s_vCheckPreEDThreshold(void *hDeviceContext)
@@ -1637,6 +1544,5 @@ void s_vCheckPreEDThreshold(void *hDeviceContext)
BBvUpdatePreEDThreshold(pDevice, FALSE);
}
}
- return;
}
diff --git a/drivers/staging/vt6656/card.c b/drivers/staging/vt6656/card.c
index a49053b..9d09e9f 100644
--- a/drivers/staging/vt6656/card.c
+++ b/drivers/staging/vt6656/card.c
@@ -91,15 +91,10 @@ const WORD cwRXBCNTSFOff[MAX_RATE] =
* uConnectionChannel - Channel to be set
* Out:
* none
- *
- * Return Value: TRUE if succeeded; FALSE if failed.
- *
*/
-BOOL CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
+void CARDbSetMediaChannel(void *pDeviceHandler, unsigned int uConnectionChannel)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
-BOOL bResult = TRUE;
-
if (pDevice->byBBType == BB_TYPE_11A) { // 15 ~ 38
if ((uConnectionChannel < (CB_MAX_CHANNEL_24G+1)) || (uConnectionChannel > CB_MAX_CHANNEL))
@@ -140,7 +135,6 @@ BOOL bResult = TRUE;
RFbRawSetPower(pDevice, pDevice->abyCCKPwrTbl[uConnectionChannel-1], RATE_1M);
}
ControlvWriteByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_CHANNEL,(BYTE)(uConnectionChannel|0x80));
- return(bResult);
}
/*
@@ -607,7 +601,7 @@ BYTE ii;
* Return Value: TRUE if succeeded; FALSE if failed.
*
*/
-BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
+void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx)
{
PSDevice pDevice = (PSDevice) pDeviceHandler;
WORD wRate = (WORD)(1<<wRateIdx);
@@ -616,8 +610,6 @@ WORD wRate = (WORD)(1<<wRateIdx);
//Determines the highest basic rate.
CARDvUpdateBasicTopRate(pDevice);
-
- return(TRUE);
}
BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler)
@@ -1090,7 +1082,7 @@ CARDbChannelSwitch (
if (byCount == 0) {
pDevice->sMgmtObj.uCurrChannel = byNewChannel;
- bResult = CARDbSetMediaChannel(pDevice, byNewChannel);
+ CARDbSetMediaChannel(pDevice, byNewChannel);
return bResult;
}
diff --git a/drivers/staging/vt6656/card.h b/drivers/staging/vt6656/card.h
index 6c91343..9cf71a3 100644
--- a/drivers/staging/vt6656/card.h
+++ b/drivers/staging/vt6656/card.h
@@ -60,12 +60,12 @@ typedef enum _CARD_OP_MODE {
/*--------------------- Export Functions --------------------------*/
-BOOL CARDbSetMediaChannel(void *pDeviceHandler,
+void CARDbSetMediaChannel(void *pDeviceHandler,
unsigned int uConnectionChannel);
void CARDvSetRSPINF(void *pDeviceHandler, BYTE byBBType);
void vUpdateIFS(void *pDeviceHandler);
void CARDvUpdateBasicTopRate(void *pDeviceHandler);
-BOOL CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
+void CARDbAddBasicRate(void *pDeviceHandler, WORD wRateIdx);
BOOL CARDbIsOFDMinBasicRate(void *pDeviceHandler);
void CARDvAdjustTSF(void *pDeviceHandler, BYTE byRxRate,
QWORD qwBSSTimestamp, QWORD qwLocalTSF);
diff --git a/drivers/staging/vt6656/int.c b/drivers/staging/vt6656/int.c
index c95833a..0a11423 100644
--- a/drivers/staging/vt6656/int.c
+++ b/drivers/staging/vt6656/int.c
@@ -92,9 +92,8 @@ void INTvWorkItem(void *Context)
spin_unlock_irq(&pDevice->lock);
}
-int INTnsProcessData(PSDevice pDevice)
+void INTnsProcessData(PSDevice pDevice)
{
- int status = STATUS_SUCCESS;
PSINTData pINTData;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
struct net_device_stats *pStats = &pDevice->stats;
@@ -218,6 +217,4 @@ int INTnsProcessData(PSDevice pDevice)
pDevice->scStatistic.ullTxBroadcastBytes;
pStats->tx_errors = pDevice->scStatistic.dwTsrErr;
pStats->tx_dropped = pDevice->scStatistic.dwTsrErr;
-
- return status;
}
diff --git a/drivers/staging/vt6656/int.h b/drivers/staging/vt6656/int.h
index 3176c8d..a5d96b9 100644
--- a/drivers/staging/vt6656/int.h
+++ b/drivers/staging/vt6656/int.h
@@ -68,6 +68,6 @@ SINTData, *PSINTData;
/*--------------------- Export Functions --------------------------*/
void INTvWorkItem(void *Context);
-int INTnsProcessData(PSDevice pDevice);
+void INTnsProcessData(PSDevice pDevice);
#endif /* __INT_H__ */
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index 4939002..1463d76 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -295,6 +295,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sList.uItem > (ULONG_MAX - sizeof(SBSSIDList)) / sizeof(SBSSIDItem)) {
+ result = -EINVAL;
+ break;
+ }
pList = (PSBSSIDList)kmalloc(sizeof(SBSSIDList) + (sList.uItem * sizeof(SBSSIDItem)), (int)GFP_ATOMIC);
if (pList == NULL) {
result = -ENOMEM;
@@ -557,6 +561,10 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
result = -EFAULT;
break;
}
+ if (sNodeList.uItem > (ULONG_MAX - sizeof(SNodeList)) / sizeof(SNodeItem)) {
+ result = -ENOMEM;
+ break;
+ }
pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
if (pNodeList == NULL) {
result = -ENOMEM;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index 2121205..ecfda52 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -128,17 +128,6 @@ int iwctl_giwname(struct net_device *dev,
return 0;
}
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra)
-{
- //wrq->value = 0x100;
- //wrq->disabled = 0;
- //wrq->fixed = 1;
- //return 0;
- return -EOPNOTSUPP;
-}
/*
* Wireless Handler : set scan
*/
@@ -1939,7 +1928,6 @@ static const iw_handler iwctl_handler[] =
(iw_handler) iwctl_commit, // SIOCSIWCOMMIT
(iw_handler) iwctl_giwname, // SIOCGIWNAME
(iw_handler) NULL, // SIOCSIWNWID
- (iw_handler) NULL, // SIOCGIWNWID
(iw_handler) iwctl_siwfreq, // SIOCSIWFREQ
(iw_handler) iwctl_giwfreq, // SIOCGIWFREQ
(iw_handler) iwctl_siwmode, // SIOCSIWMODE
diff --git a/drivers/staging/vt6656/iwctl.h b/drivers/staging/vt6656/iwctl.h
index cc48954..10a240e 100644
--- a/drivers/staging/vt6656/iwctl.h
+++ b/drivers/staging/vt6656/iwctl.h
@@ -77,11 +77,6 @@ int iwctl_giwname(struct net_device *dev,
char *wrq,
char *extra);
-int iwctl_giwnwid(struct net_device *dev,
- struct iw_request_info *info,
- struct iw_param *wrq,
- char *extra) ;
-
int iwctl_giwsens(struct net_device *dev,
struct iw_request_info *info,
struct iw_param *wrq,
diff --git a/drivers/staging/vt6656/mac.c b/drivers/staging/vt6656/mac.c
index 26c19d1..af4a29d 100644
--- a/drivers/staging/vt6656/mac.c
+++ b/drivers/staging/vt6656/mac.c
@@ -133,10 +133,9 @@ void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData)
* Out:
* none
*
- * Return Value: TRUE if success; otherwise FALSE
*
*/
-BOOL MACbShutdown (PSDevice pDevice)
+void MACbShutdown(PSDevice pDevice)
{
CONTROLnsRequestOutAsyn(pDevice,
MESSAGE_TYPE_MACSHUTDOWN,
@@ -145,7 +144,6 @@ BOOL MACbShutdown (PSDevice pDevice)
0,
NULL
);
- return TRUE;
}
void MACvSetBBType(PSDevice pDevice,BYTE byType)
diff --git a/drivers/staging/vt6656/mac.h b/drivers/staging/vt6656/mac.h
index 491ff5e..147ac50 100644
--- a/drivers/staging/vt6656/mac.h
+++ b/drivers/staging/vt6656/mac.h
@@ -422,7 +422,7 @@
void MACvSetMultiAddrByHash(PSDevice pDevice, BYTE byHashIdx);
void MACvWriteMultiAddr(PSDevice pDevice, unsigned int uByteIdx, BYTE byData);
-BOOL MACbShutdown(PSDevice pDevice);
+void MACbShutdown(PSDevice pDevice);
void MACvSetBBType(PSDevice pDevice, BYTE byType);
void MACvSetMISCFifo(PSDevice pDevice, WORD wOffset, DWORD dwData);
void MACvDisableKeyEntry(PSDevice pDevice, unsigned int uEntryIdx);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index 27521b6..6a708f4 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -900,7 +900,7 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
}
// allocate rcb mem
- pDevice->pRCBMem = kmalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
+ pDevice->pRCBMem = kzalloc((sizeof(RCB) * pDevice->cbRD), GFP_KERNEL);
if (pDevice->pRCBMem == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : alloc rx usb context failed\n", pDevice->dev->name);
goto free_tx;
@@ -912,7 +912,6 @@ static BOOL device_alloc_bufs(PSDevice pDevice) {
pDevice->FirstRecvMngList = NULL;
pDevice->LastRecvMngList = NULL;
pDevice->NumRecvFreeList = 0;
- memset(pDevice->pRCBMem, 0, (sizeof(RCB) * pDevice->cbRD));
pRCB = (PRCB) pDevice->pRCBMem;
for (ii = 0; ii < pDevice->cbRD; ii++) {
@@ -1618,15 +1617,8 @@ static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
break;
case SIOCSIWNWID:
- rc = -EOPNOTSUPP;
- break;
-
case SIOCGIWNWID: //0x8b03 support
- #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
- rc = iwctl_giwnwid(dev, NULL, &(wrq->u.nwid), NULL);
- #else
- rc = -EOPNOTSUPP;
- #endif
+ rc = -EOPNOTSUPP;
break;
// Set frequency/channel
@@ -2103,16 +2095,4 @@ static struct usb_driver vt6656_driver = {
#endif /* CONFIG_PM */
};
-static int __init vt6656_init_module(void)
-{
- printk(KERN_NOTICE DEVICE_FULL_DRV_NAM " " DEVICE_VERSION);
- return usb_register(&vt6656_driver);
-}
-
-static void __exit vt6656_cleanup_module(void)
-{
- usb_deregister(&vt6656_driver);
-}
-
-module_init(vt6656_init_module);
-module_exit(vt6656_cleanup_module);
+module_usb_driver(vt6656_driver);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index f958eb4..c3751a7 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -865,15 +865,4 @@ static struct usb_driver wb35_driver = {
.disconnect = wb35_disconnect,
};
-static int __init wb35_init(void)
-{
- return usb_register(&wb35_driver);
-}
-
-static void __exit wb35_exit(void)
-{
- usb_deregister(&wb35_driver);
-}
-
-module_init(wb35_init);
-module_exit(wb35_exit);
+module_usb_driver(wb35_driver);
diff --git a/drivers/staging/wlags49_h2/debug.h b/drivers/staging/wlags49_h2/debug.h
index 8d5dddf..811698f 100644
--- a/drivers/staging/wlags49_h2/debug.h
+++ b/drivers/staging/wlags49_h2/debug.h
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/dhfcfg.h b/drivers/staging/wlags49_h2/dhfcfg.h
index 75c279f..147f4c8 100644
--- a/drivers/staging/wlags49_h2/dhfcfg.h
+++ b/drivers/staging/wlags49_h2/dhfcfg.h
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/hcf.c b/drivers/staging/wlags49_h2/hcf.c
index 7dc176a..b0087733 100644
--- a/drivers/staging/wlags49_h2/hcf.c
+++ b/drivers/staging/wlags49_h2/hcf.c
@@ -32,9 +32,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
- * COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
- * COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcf.h b/drivers/staging/wlags49_h2/hcf.h
index 0009947..95527b5 100644
--- a/drivers/staging/wlags49_h2/hcf.h
+++ b/drivers/staging/wlags49_h2/hcf.h
@@ -40,9 +40,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfcfg.h b/drivers/staging/wlags49_h2/hcfcfg.h
index 7545bc5..ef60da8 100644
--- a/drivers/staging/wlags49_h2/hcfcfg.h
+++ b/drivers/staging/wlags49_h2/hcfcfg.h
@@ -64,9 +64,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/hcfdef.h b/drivers/staging/wlags49_h2/hcfdef.h
index a62b53a..30744e1 100644
--- a/drivers/staging/wlags49_h2/hcfdef.h
+++ b/drivers/staging/wlags49_h2/hcfdef.h
@@ -33,9 +33,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
- * COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
- * COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+ * COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+ * COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+ * COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mdd.h b/drivers/staging/wlags49_h2/mdd.h
index b02e3ea..5f951ef 100644
--- a/drivers/staging/wlags49_h2/mdd.h
+++ b/drivers/staging/wlags49_h2/mdd.h
@@ -33,9 +33,9 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 1994 - 1995 by AT&T. All Rights Reserved
-* COPYRIGHT 1996 - 2000 by Lucent Technologies. All Rights Reserved
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 1994 - 1995 by AT&T. All Rights Reserved
+* COPYRIGHT © 1996 - 2000 by Lucent Technologies. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.c b/drivers/staging/wlags49_h2/mmd.c
index de138c4..c8f5210 100644
--- a/drivers/staging/wlags49_h2/mmd.c
+++ b/drivers/staging/wlags49_h2/mmd.c
@@ -35,7 +35,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/mmd.h b/drivers/staging/wlags49_h2/mmd.h
index 06890c1..9149525 100644
--- a/drivers/staging/wlags49_h2/mmd.h
+++ b/drivers/staging/wlags49_h2/mmd.h
@@ -33,7 +33,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
-* COPYRIGHT 2001 - 2004 by Agere Systems Inc. All Rights Reserved
+* COPYRIGHT © 2001 - 2004 by Agere Systems Inc. All Rights Reserved
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
diff --git a/drivers/staging/wlags49_h2/wl_cs.h b/drivers/staging/wlags49_h2/wl_cs.h
index 21f17be..a7ab579 100644
--- a/drivers/staging/wlags49_h2/wl_cs.h
+++ b/drivers/staging/wlags49_h2/wl_cs.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.c b/drivers/staging/wlags49_h2/wl_enc.c
index 26cf548..4c6f776 100644
--- a/drivers/staging/wlags49_h2/wl_enc.c
+++ b/drivers/staging/wlags49_h2/wl_enc.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_enc.h b/drivers/staging/wlags49_h2/wl_enc.h
index b4f54d8..46629f3 100644
--- a/drivers/staging/wlags49_h2/wl_enc.h
+++ b/drivers/staging/wlags49_h2/wl_enc.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_if.h b/drivers/staging/wlags49_h2/wl_if.h
index ed2b413..6a26130 100644
--- a/drivers/staging/wlags49_h2/wl_if.h
+++ b/drivers/staging/wlags49_h2/wl_if.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_internal.h b/drivers/staging/wlags49_h2/wl_internal.h
index 5753408..553601f 100644
--- a/drivers/staging/wlags49_h2/wl_internal.h
+++ b/drivers/staging/wlags49_h2/wl_internal.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.c b/drivers/staging/wlags49_h2/wl_main.c
index 483eee1..dab603e 100644
--- a/drivers/staging/wlags49_h2/wl_main.c
+++ b/drivers/staging/wlags49_h2/wl_main.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_main.h b/drivers/staging/wlags49_h2/wl_main.h
index d593ae5..3b5acdf 100644
--- a/drivers/staging/wlags49_h2/wl_main.h
+++ b/drivers/staging/wlags49_h2/wl_main.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 5a2b334..9c16f54 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_netdev.h b/drivers/staging/wlags49_h2/wl_netdev.h
index 632ab2e..61f040f 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.h
+++ b/drivers/staging/wlags49_h2/wl_netdev.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_pci.c b/drivers/staging/wlags49_h2/wl_pci.c
index 28ae9dd..2bd9b84 100644
--- a/drivers/staging/wlags49_h2/wl_pci.c
+++ b/drivers/staging/wlags49_h2/wl_pci.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
@@ -112,17 +112,10 @@ extern dbg_info_t *DbgInfo;
#endif // DBG
/* define the PCI device Table Cardname and id tables */
-enum hermes_pci_versions {
- CH_Agere_Systems_Mini_PCI_V1 = 0,
-};
-
static struct pci_device_id wl_pci_tbl[] __devinitdata = {
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
- { PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2,
- PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Agere_Systems_Mini_PCI_V1 },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0), },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1), },
+ { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2), },
{ } /* Terminating entry */
};
diff --git a/drivers/staging/wlags49_h2/wl_pci.h b/drivers/staging/wlags49_h2/wl_pci.h
index cea04c4..86831f1 100644
--- a/drivers/staging/wlags49_h2/wl_pci.h
+++ b/drivers/staging/wlags49_h2/wl_pci.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.c b/drivers/staging/wlags49_h2/wl_priv.c
index 260d4f0..f30e5ee 100644
--- a/drivers/staging/wlags49_h2/wl_priv.c
+++ b/drivers/staging/wlags49_h2/wl_priv.c
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_priv.h b/drivers/staging/wlags49_h2/wl_priv.h
index 9b02544..b647bfd 100644
--- a/drivers/staging/wlags49_h2/wl_priv.h
+++ b/drivers/staging/wlags49_h2/wl_priv.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.c b/drivers/staging/wlags49_h2/wl_profile.c
index a459e48..b8c96cf 100644
--- a/drivers/staging/wlags49_h2/wl_profile.c
+++ b/drivers/staging/wlags49_h2/wl_profile.c
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_profile.h b/drivers/staging/wlags49_h2/wl_profile.h
index 81db8e8..f81df51 100644
--- a/drivers/staging/wlags49_h2/wl_profile.h
+++ b/drivers/staging/wlags49_h2/wl_profile.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.c b/drivers/staging/wlags49_h2/wl_util.c
index 3b6f5a5..b748a3f 100644
--- a/drivers/staging/wlags49_h2/wl_util.c
+++ b/drivers/staging/wlags49_h2/wl_util.c
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_util.h b/drivers/staging/wlags49_h2/wl_util.h
index 2661bcd..946b1b6 100644
--- a/drivers/staging/wlags49_h2/wl_util.h
+++ b/drivers/staging/wlags49_h2/wl_util.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_version.h b/drivers/staging/wlags49_h2/wl_version.h
index fd37040..3deacfa 100644
--- a/drivers/staging/wlags49_h2/wl_version.h
+++ b/drivers/staging/wlags49_h2/wl_version.h
@@ -23,7 +23,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -44,7 +44,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 8ac5e10..7ff0a10 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -18,7 +18,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -39,7 +39,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlags49_h2/wl_wext.h b/drivers/staging/wlags49_h2/wl_wext.h
index a713058..029da52 100644
--- a/drivers/staging/wlags49_h2/wl_wext.h
+++ b/drivers/staging/wlags49_h2/wl_wext.h
@@ -22,7 +22,7 @@
* software indicates your acceptance of these terms and conditions. If you do
* not agree with these terms and conditions, do not use the software.
*
- * Copyright 2003 Agere Systems Inc.
+ * Copyright © 2003 Agere Systems Inc.
* All rights reserved.
*
* Redistribution and use in source or binary forms, with or without
@@ -43,7 +43,7 @@
*
* Disclaimer
*
- * THIS SOFTWARE IS PROVIDED AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES,
+ * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY
* USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 4efa027..b1aed1f 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -358,16 +358,4 @@ static struct usb_driver prism2_usb_driver = {
/* fops, minor? */
};
-static int __init prism2usb_init(void)
-{
- /* This call will result in calls to prism2sta_probe_usb. */
- return usb_register(&prism2_usb_driver);
-};
-
-static void __exit prism2usb_cleanup(void)
-{
- usb_deregister(&prism2_usb_driver);
-};
-
-module_init(prism2usb_init);
-module_exit(prism2usb_cleanup);
+module_usb_driver(prism2_usb_driver);
diff --git a/drivers/staging/xgifb/Makefile b/drivers/staging/xgifb/Makefile
index 3c8c7de..55e5199 100644
--- a/drivers/staging/xgifb/Makefile
+++ b/drivers/staging/xgifb/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_FB_XGI) += xgifb.o
-xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o vb_ext.o
+xgifb-y := XGI_main_26.o vb_init.o vb_setmode.o vb_util.o
diff --git a/drivers/staging/xgifb/XGI_main.h b/drivers/staging/xgifb/XGI_main.h
index 71aebe3..35f7b2a 100644
--- a/drivers/staging/xgifb/XGI_main.h
+++ b/drivers/staging/xgifb/XGI_main.h
@@ -32,14 +32,10 @@
#endif
static DEFINE_PCI_DEVICE_TABLE(xgifb_pci_table) = {
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 0},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 1},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 2},
- {PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42, PCI_ANY_ID, PCI_ANY_ID,
- 0, 0, 3},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_20)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_27)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_40)},
+ {PCI_DEVICE(PCI_VENDOR_ID_XG, PCI_DEVICE_ID_XG_42)},
{0}
};
@@ -128,7 +124,6 @@ MODULE_DEVICE_TABLE(pci, xgifb_pci_table);
/* display status */
static int XGIfb_crt1off;
static int XGIfb_forcecrt1 = -1;
-static int XGIfb_userom ;
/* global flags */
static int XGIfb_tvmode;
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 277e408..2502c49 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -21,7 +21,6 @@
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/pci.h>
-#include <linux/vmalloc.h>
#include <linux/vt_kern.h>
#include <linux/capability.h>
#include <linux/fs.h>
@@ -46,8 +45,7 @@
#define GPIOG_EN (1<<6)
#define GPIOG_READ (1<<1)
-#define XGIFB_ROM_SIZE 65536
-
+static char *forcecrt2type;
static char *mode;
static int vesa = -1;
static unsigned int refresh_rate;
@@ -159,7 +157,6 @@ static int XGIfb_mode_rate_to_dclock(struct vb_device_info *XGI_Pr,
/* unsigned long temp = 0; */
int Clock;
- XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
@@ -199,7 +196,6 @@ static int XGIfb_mode_rate_to_ddata(struct vb_device_info *XGI_Pr,
unsigned char sr_data, cr_data, cr_data2;
unsigned long cr_data3;
int A, B, C, D, E, F, temp, j;
- XGI_Pr->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
InitTo330Pointer(HwDeviceExtension->jChipType, XGI_Pr);
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, XGI_Pr);
@@ -387,7 +383,7 @@ static void XGIRegInit(struct vb_device_info *XGI_Pr, unsigned long BaseAddr)
/* ------------------ Internal helper routines ----------------- */
-static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
+static int XGIfb_GetXG21DefaultLVDSModeIdx(struct xgifb_video_info *xgifb_info)
{
int found_mode = 0;
@@ -396,11 +392,11 @@ static int XGIfb_GetXG21DefaultLVDSModeIdx(void)
found_mode = 0;
while ((XGIbios_mode[XGIfb_mode_idx].mode_no != 0)
&& (XGIbios_mode[XGIfb_mode_idx].xres
- <= XGI21_LCDCapList[0].LVDSHDE)) {
+ <= xgifb_info->lvds_data.LVDSHDE)) {
if ((XGIbios_mode[XGIfb_mode_idx].xres
- == XGI21_LCDCapList[0].LVDSHDE)
+ == xgifb_info->lvds_data.LVDSHDE)
&& (XGIbios_mode[XGIfb_mode_idx].yres
- == XGI21_LCDCapList[0].LVDSVDE)
+ == xgifb_info->lvds_data.LVDSVDE)
&& (XGIbios_mode[XGIfb_mode_idx].bpp == 8)) {
found_mode = 1;
break;
@@ -456,51 +452,6 @@ invalid:
printk(KERN_INFO "XGIfb: Invalid VESA mode 0x%x'\n", vesamode);
}
-static int XGIfb_GetXG21LVDSData(struct xgifb_video_info *xgifb_info)
-{
- u8 tmp;
- void __iomem *data = xgifb_info->mmio_vbase + 0x20000;
- int i, j, k;
-
- tmp = xgifb_reg_get(XGISR, 0x1e);
- xgifb_reg_set(XGISR, 0x1e, tmp | 4);
-
- if ((readb(data) == 0x55) &&
- (readb(data + 1) == 0xAA) &&
- (readb(data + 0x65) & 0x1)) {
- i = readw(data + 0x316);
- j = readb(data + i - 1);
- if (j == 0xff)
- j = 1;
-
- k = 0;
- do {
- XGI21_LCDCapList[k].LVDS_Capability = readw(data + i);
- XGI21_LCDCapList[k].LVDSHT = readw(data + i + 2);
- XGI21_LCDCapList[k].LVDSVT = readw(data + i + 4);
- XGI21_LCDCapList[k].LVDSHDE = readw(data + i + 6);
- XGI21_LCDCapList[k].LVDSVDE = readw(data + i + 8);
- XGI21_LCDCapList[k].LVDSHFP = readw(data + i + 10);
- XGI21_LCDCapList[k].LVDSVFP = readw(data + i + 12);
- XGI21_LCDCapList[k].LVDSHSYNC = readw(data + i + 14);
- XGI21_LCDCapList[k].LVDSVSYNC = readw(data + i + 16);
- XGI21_LCDCapList[k].VCLKData1 = readb(data + i + 18);
- XGI21_LCDCapList[k].VCLKData2 = readb(data + i + 19);
- XGI21_LCDCapList[k].PSC_S1 = readb(data + i + 20);
- XGI21_LCDCapList[k].PSC_S2 = readb(data + i + 21);
- XGI21_LCDCapList[k].PSC_S3 = readb(data + i + 22);
- XGI21_LCDCapList[k].PSC_S4 = readb(data + i + 23);
- XGI21_LCDCapList[k].PSC_S5 = readb(data + i + 24);
- i += 25;
- j--;
- k++;
- } while ((j > 0) && (k < (sizeof(XGI21_LCDCapList)
- / sizeof(struct XGI21_LVDSCapStruct))));
- return 1;
- }
- return 0;
-}
-
static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
{
u16 xres, yres;
@@ -508,8 +459,8 @@ static int XGIfb_validate_mode(struct xgifb_video_info *xgifb_info, int myindex)
if (xgifb_info->chip == XG21) {
if (xgifb_info->display2 == XGIFB_DISP_LCD) {
- xres = XGI21_LCDCapList[0].LVDSHDE;
- yres = XGI21_LCDCapList[0].LVDSVDE;
+ xres = xgifb_info->lvds_data.LVDSHDE;
+ yres = xgifb_info->lvds_data.LVDSVDE;
if (XGIbios_mode[myindex].xres > xres)
return -1;
if (XGIbios_mode[myindex].yres > yres)
@@ -1223,7 +1174,7 @@ static int XGIfb_do_set_var(struct fb_var_screeninfo *var, int isactive,
if (isactive) {
XGIfb_pre_setmode(xgifb_info);
- if (XGISetModeNew(hw_info,
+ if (XGISetModeNew(xgifb_info, hw_info,
XGIbios_mode[xgifb_info->mode_idx].mode_no)
== 0) {
printk(KERN_ERR "XGIfb: Setting mode[0x%x] failed\n",
@@ -1794,17 +1745,16 @@ static void XGIfb_detect_VB(struct xgifb_video_info *xgifb_info)
XGIfb_crt1off = 0;
}
- if (XGIfb_crt2type != -1)
- /* TW: Override with option */
- xgifb_info->display2 = XGIfb_crt2type;
- else if (cr32 & XGI_VB_TV)
- xgifb_info->display2 = XGIFB_DISP_TV;
- else if (cr32 & XGI_VB_LCD)
- xgifb_info->display2 = XGIFB_DISP_LCD;
- else if (cr32 & XGI_VB_CRT2)
- xgifb_info->display2 = XGIFB_DISP_CRT;
- else
- xgifb_info->display2 = XGIFB_DISP_NONE;
+ if (!xgifb_info->display2_force) {
+ if (cr32 & XGI_VB_TV)
+ xgifb_info->display2 = XGIFB_DISP_TV;
+ else if (cr32 & XGI_VB_LCD)
+ xgifb_info->display2 = XGIFB_DISP_LCD;
+ else if (cr32 & XGI_VB_CRT2)
+ xgifb_info->display2 = XGIFB_DISP_CRT;
+ else
+ xgifb_info->display2 = XGIFB_DISP_NONE;
+ }
if (XGIfb_tvplug != -1)
/* PR/TW: Override with option */
@@ -1925,8 +1875,6 @@ static int __init XGIfb_setup(char *options)
XGIfb_crt2type = XGIFB_DISP_LCD;
} else if (!strncmp(this_opt, "noypan", 6)) {
XGIfb_ypan = 0;
- } else if (!strncmp(this_opt, "userom:", 7)) {
- XGIfb_userom = xgifb_optval(this_opt, 7);
} else {
mode = this_opt;
}
@@ -1934,35 +1882,12 @@ static int __init XGIfb_setup(char *options)
return 0;
}
-static unsigned char *xgifb_copy_rom(struct pci_dev *dev)
-{
- void __iomem *rom_address;
- unsigned char *rom_copy;
- size_t rom_size;
-
- rom_address = pci_map_rom(dev, &rom_size);
- if (rom_address == NULL)
- return NULL;
-
- rom_copy = vzalloc(XGIFB_ROM_SIZE);
- if (rom_copy == NULL)
- goto done;
-
- rom_size = min_t(size_t, rom_size, XGIFB_ROM_SIZE);
- memcpy_fromio(rom_copy, rom_address, rom_size);
-
-done:
- pci_unmap_rom(dev, rom_address);
- return rom_copy;
-}
-
static int __devinit xgifb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
u8 reg, reg1;
u8 CR48, CR38;
int ret;
- bool xgi21_drvlcdcaplist = false;
struct fb_info *fb_info;
struct xgifb_video_info *xgifb_info;
struct xgi_hw_device_info *hw_info;
@@ -2001,6 +1926,11 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error;
}
+ if (XGIfb_crt2type != -1) {
+ xgifb_info->display2 = XGIfb_crt2type;
+ xgifb_info->display2_force = true;
+ }
+
XGIRegInit(&xgifb_info->dev_info, (unsigned long)hw_info->pjIOAddress);
xgifb_reg_set(XGISR, IND_XGI_PASSWORD, XGI_PASSWORD);
@@ -2041,18 +1971,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
printk("XGIfb:chipid = %x\n", xgifb_info->chip);
hw_info->jChipType = xgifb_info->chip;
- if ((xgifb_info->chip == XG21) || (XGIfb_userom)) {
- hw_info->pjVirtualRomBase = xgifb_copy_rom(pdev);
- if (hw_info->pjVirtualRomBase)
- printk(KERN_INFO "XGIfb: Video ROM found and mapped to %p\n",
- hw_info->pjVirtualRomBase);
- else
- printk(KERN_INFO "XGIfb: Video ROM not found\n");
- } else {
- hw_info->pjVirtualRomBase = NULL;
- printk(KERN_INFO "XGIfb: Video ROM usage disabled\n");
- }
-
if (XGIfb_get_dram_size(xgifb_info)) {
printk(KERN_INFO "XGIfb: Fatal error: Unable to determine RAM size.\n");
ret = -ENODEV;
@@ -2117,8 +2035,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
CR38 = xgifb_reg_get(XGICR, 0x38);
if ((CR38&0xE0) == 0xC0) {
xgifb_info->display2 = XGIFB_DISP_LCD;
- if (!XGIfb_GetXG21LVDSData(xgifb_info))
- xgi21_drvlcdcaplist = true;
} else if ((CR38&0xE0) == 0x60) {
xgifb_info->hasVB = HASVB_CHRONTEL;
} else {
@@ -2193,6 +2109,8 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (xgifb_info->hasVB != HASVB_NONE)
XGIfb_detect_VB(xgifb_info);
+ else if (xgifb_info->chip != XG21)
+ xgifb_info->display2 = XGIFB_DISP_NONE;
if (xgifb_info->display2 == XGIFB_DISP_LCD) {
if (!enable_dstn) {
@@ -2254,7 +2172,7 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
if (xgifb_info->display2 == XGIFB_DISP_LCD &&
xgifb_info->chip == XG21)
xgifb_info->mode_idx =
- XGIfb_GetXG21DefaultLVDSModeIdx();
+ XGIfb_GetXG21DefaultLVDSModeIdx(xgifb_info);
else
xgifb_info->mode_idx = DEFAULT_MODE;
}
@@ -2264,21 +2182,6 @@ static int __devinit xgifb_probe(struct pci_dev *pdev,
goto error_1;
}
- if (xgi21_drvlcdcaplist) {
- int m;
-
- for (m = 0; m < ARRAY_SIZE(XGI21_LCDCapList); m++)
- if ((XGI21_LCDCapList[m].LVDSHDE ==
- XGIbios_mode[xgifb_info->mode_idx].xres) &&
- (XGI21_LCDCapList[m].LVDSVDE ==
- XGIbios_mode[xgifb_info->mode_idx].yres)) {
- xgifb_reg_set(xgifb_info->dev_info.P3d4,
- 0x36,
- m);
- break;
- }
- }
-
/* yilin set default refresh rate */
xgifb_info->refresh_rate = refresh_rate;
if (xgifb_info->refresh_rate == 0)
@@ -2418,7 +2321,6 @@ error_1:
error_0:
release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
error:
- vfree(hw_info->pjVirtualRomBase);
framebuffer_release(fb_info);
return ret;
}
@@ -2442,7 +2344,6 @@ static void __devexit xgifb_remove(struct pci_dev *pdev)
iounmap(xgifb_info->video_vbase);
release_mem_region(xgifb_info->mmio_base, xgifb_info->mmio_size);
release_mem_region(xgifb_info->video_base, xgifb_info->video_size);
- vfree(xgifb_info->hw_info.pjVirtualRomBase);
framebuffer_release(fb_info);
pci_set_drvdata(pdev, NULL);
}
@@ -2458,6 +2359,8 @@ static int __init xgifb_init(void)
{
char *option = NULL;
+ if (forcecrt2type != NULL)
+ XGIfb_search_crt2type(forcecrt2type);
if (fb_get_options("xgifb", &option))
return -ENODEV;
XGIfb_setup(option);
@@ -2480,6 +2383,11 @@ MODULE_AUTHOR("XGITECH , Others");
module_param(mode, charp, 0);
module_param(vesa, int, 0);
module_param(filter, int, 0);
+module_param(forcecrt2type, charp, 0);
+
+MODULE_PARM_DESC(forcecrt2type,
+ "\nForce the second display output type. Possible values are NONE,\n"
+ "LCD, TV, VGA, SVIDEO or COMPOSITE.\n");
MODULE_PARM_DESC(mode,
"\nSelects the desired default display mode in the format XxYxDepth,\n"
diff --git a/drivers/staging/xgifb/XGIfb.h b/drivers/staging/xgifb/XGIfb.h
index 7611846..2c866bb 100644
--- a/drivers/staging/xgifb/XGIfb.h
+++ b/drivers/staging/xgifb/XGIfb.h
@@ -86,10 +86,13 @@ struct xgifb_video_info {
unsigned int refresh_rate;
enum xgifb_display_type display2; /* the second display output type */
+ bool display2_force;
unsigned char hasVB;
unsigned char TV_type;
unsigned char TV_plug;
+ struct XGI21_LVDSCapStruct lvds_data;
+
enum XGI_CHIP_TYPE chip;
unsigned char revision_id;
diff --git a/drivers/staging/xgifb/vb_ext.c b/drivers/staging/xgifb/vb_ext.c
deleted file mode 100644
index b1a2573..0000000
--- a/drivers/staging/xgifb/vb_ext.c
+++ /dev/null
@@ -1,444 +0,0 @@
-#include <linux/io.h>
-#include <linux/types.h>
-#include "XGIfb.h"
-
-#include "vb_def.h"
-#include "vgatypes.h"
-#include "vb_struct.h"
-#include "vb_util.h"
-#include "vb_setmode.h"
-#include "vb_ext.h"
-
-/**************************************************************
- *********************** Dynamic Sense ************************
- *************************************************************/
-
-static unsigned char XGINew_Is301B(struct vb_device_info *pVBInfo)
-{
- unsigned short flag;
-
- flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
-
- if (flag > 0x0B0)
- return 0; /* 301b */
- else
- return 1;
-}
-
-static unsigned char XGINew_Sense(unsigned short tempbx,
- unsigned short tempcx,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp, i, tempch;
-
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0x7F00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp > 0)
- return 1;
- else
- return 0;
-}
-
-static unsigned char
-XGINew_GetLCDDDCInfo(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short temp;
-
- /* add lcd sense */
- if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
- return 0;
- } else {
- temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
- switch (HwDeviceExtension->ulCRT2LCDType) {
- case LCD_INVALID:
- case LCD_800x600:
- case LCD_1024x768:
- case LCD_1280x1024:
- break;
-
- case LCD_640x480:
- case LCD_1024x600:
- case LCD_1152x864:
- case LCD_1280x960:
- case LCD_1152x768:
- temp = 0;
- break;
-
- case LCD_1400x1050:
- case LCD_1280x768:
- case LCD_1600x1200:
- break;
-
- case LCD_1920x1440:
- case LCD_2048x1536:
- temp = 0;
- break;
-
- default:
- break;
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
- return 1;
- }
-}
-
-static unsigned char XGINew_GetPanelID(struct vb_device_info *pVBInfo)
-{
- unsigned short PanelTypeTable[16] = { SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType00, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType01, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType02, SyncNN | PanelRGB18Bit
- | Panel640x480 | _PanelType03, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType04, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType05, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType06, SyncNN | PanelRGB24Bit
- | Panel1024x768 | _PanelType07, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType08, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType09, SyncNN | PanelRGB18Bit
- | Panel800x600 | _PanelType0A, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0B, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0C, SyncNN | PanelRGB24Bit
- | Panel1024x768 | _PanelType0D, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0E, SyncNN | PanelRGB18Bit
- | Panel1024x768 | _PanelType0F };
- unsigned short tempax, tempbx, temp;
- /* unsigned short return_flag; */
-
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x1A);
- tempbx = tempax & 0x1E;
-
- if (tempax == 0)
- return 0;
- else {
- /*
- if (!(tempax & 0x10)) {
- if (pVBInfo->IF_DEF_LVDS == 1) {
- tempbx = 0;
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x38);
- if (temp & 0x40)
- tempbx |= 0x08;
- if (temp & 0x20)
- tempbx |= 0x02;
- if (temp & 0x01)
- tempbx |= 0x01;
-
- temp = xgifb_reg_get(pVBInfo->P3c4, 0x39);
- if (temp & 0x80)
- tempbx |= 0x04;
- } else {
- return(0);
- }
- }
- */
-
- tempbx = tempbx >> 1;
- temp = tempbx & 0x00F;
- xgifb_reg_set(pVBInfo->P3d4, 0x36, temp);
- tempbx--;
- tempbx = PanelTypeTable[tempbx];
-
- temp = (tempbx & 0xFF00) >> 8;
- xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~(LCDSyncBit
- | LCDRGB18Bit), temp);
- return 1;
- }
-}
-
-static unsigned char
-XGINew_BridgeIsEnable(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short flag;
-
- if (XGI_BridgeIsOn(pVBInfo) == 0) {
- flag = xgifb_reg_get(pVBInfo->Part1Port, 0x0);
-
- if (flag & 0x050)
- return 1;
- else
- return 0;
-
- }
- return 0;
-}
-
-static unsigned char
-XGINew_SenseHiTV(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempbx, tempcx, temp, i, tempch;
-
- tempbx = *pVBInfo->pYCSenseData2;
-
- tempcx = 0x0604;
-
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch)
- return 0;
-
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch) {
- return 0;
- } else {
- tempbx = 0x3FF;
- tempcx = 0x0804;
- temp = tempbx & 0xFF;
- xgifb_reg_set(pVBInfo->Part4Port, 0x11, temp);
- temp = (tempbx & 0xFF00) >> 8;
- temp |= (tempcx & 0x00FF);
- xgifb_reg_and_or(pVBInfo->Part4Port, 0x10, ~0x1F, temp);
-
- for (i = 0; i < 10; i++)
- XGI_LongWait(pVBInfo);
-
- tempch = (tempcx & 0xFF00) >> 8;
- temp = xgifb_reg_get(pVBInfo->Part4Port, 0x03);
- temp = temp ^ (0x0E);
- temp &= tempch;
-
- if (temp != tempch)
- return 1;
- else
- return 0;
- }
-}
-
-void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- unsigned short tempax = 0, tempbx, tempcx, temp,
- P2reg0 = 0, SenseModeNo = 0,
- OutputSelect = *pVBInfo->pOutputSelect,
- ModeIdIndex, i;
- pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
-
- if (pVBInfo->IF_DEF_LVDS == 1) {
- /* ynlai 02/27/2002 */
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x1A);
- tempbx = xgifb_reg_get(pVBInfo->P3c4, 0x1B);
- tempax = ((tempax & 0xFE) >> 1) | (tempbx << 8);
- if (tempax == 0x00) { /* Get Panel id from DDC */
- temp = XGINew_GetLCDDDCInfo(HwDeviceExtension, pVBInfo);
- if (temp == 1) { /* LCD connect */
- /* set CR39 bit0="1" */
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x39, 0xFF, 0x01);
- /* clean CR37 bit4="0" */
- xgifb_reg_and_or(pVBInfo->P3d4,
- 0x37, 0xEF, 0x00);
- temp = LCDSense;
- } else { /* LCD don't connect */
- temp = 0;
- }
- } else {
- XGINew_GetPanelID(pVBInfo);
- temp = LCDSense;
- }
-
- tempbx = ~(LCDSense | AVIDEOSense | SVIDEOSense);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, tempbx, temp);
- } else { /* for 301 */
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiVision */
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x38);
- temp = tempax & 0x01;
- tempax = xgifb_reg_get(pVBInfo->P3c4, 0x3A);
- temp = temp | (tempax & 0x02);
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, 0xA0, temp);
- } else {
- if (XGI_BridgeIsOn(pVBInfo)) {
- P2reg0 = xgifb_reg_get(pVBInfo->Part2Port,
- 0x00);
- if (!XGINew_BridgeIsEnable(HwDeviceExtension,
- pVBInfo)) {
- SenseModeNo = 0x2e;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x41);
- * XGISetModeNew(HwDeviceExtension, 0x2e);
- * // ynlai InitMode */
-
- temp = XGI_SearchModeID(SenseModeNo,
- &ModeIdIndex,
- pVBInfo);
- XGI_GetVGAType(HwDeviceExtension,
- pVBInfo);
- XGI_GetVBType(pVBInfo);
- pVBInfo->SetFlag = 0x00;
- pVBInfo->ModeType = ModeVGA;
- pVBInfo->VBInfo = SetCRT2ToRAMDAC |
- LoadDACFlag |
- SetInSlaveMode;
- XGI_GetLCDInfo(0x2e,
- ModeIdIndex,
- pVBInfo);
- XGI_GetTVInfo(0x2e,
- ModeIdIndex,
- pVBInfo);
- XGI_EnableBridge(HwDeviceExtension,
- pVBInfo);
- XGI_SetCRT2Group301(SenseModeNo,
- HwDeviceExtension,
- pVBInfo);
- XGI_SetCRT2ModeRegs(0x2e,
- HwDeviceExtension,
- pVBInfo);
- /* XGI_DisableBridge(HwDeviceExtension,
- * pVBInfo ) ; */
- /* Display Off 0212 */
- xgifb_reg_and_or(pVBInfo->P3c4,
- 0x01,
- 0xDF,
- 0x20);
- for (i = 0; i < 20; i++)
- XGI_LongWait(pVBInfo);
- }
- xgifb_reg_set(pVBInfo->Part2Port, 0x00, 0x1c);
- tempax = 0;
- tempbx = *pVBInfo->pRGBSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pRGBSenseData2;
-
- tempcx = 0x0E08;
- if (XGINew_Sense(tempbx, tempcx, pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= Monitor2Sense;
- }
-
- if (pVBInfo->VBType & VB_XGI301C)
- xgifb_reg_or(pVBInfo->Part4Port,
- 0x0d,
- 0x04);
-
- /* add by kuku for Multi-adapter sense HiTV */
- if (XGINew_SenseHiTV(HwDeviceExtension,
- pVBInfo)) {
- tempax |= HiTVSense;
- if ((pVBInfo->VBType & VB_XGI301C))
- tempax ^= (HiTVSense |
- YPbPrSense);
- }
-
- /* start */
- if (!(tempax & (HiTVSense | YPbPrSense))) {
- tempbx = *pVBInfo->pYCSenseData;
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pYCSenseData2;
- tempcx = 0x0604;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= SVIDEOSense;
- }
-
- if (OutputSelect & BoardTVType) {
- tempbx = *pVBInfo->pVideoSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo))
- tempax |= AVIDEOSense;
- }
- } else {
- if (!(tempax & SVIDEOSense)) {
- tempbx = *pVBInfo->pVideoSenseData;
-
- if (!(XGINew_Is301B(pVBInfo)))
- tempbx = *pVBInfo->pVideoSenseData2;
-
- tempcx = 0x0804;
- if (XGINew_Sense(tempbx,
- tempcx,
- pVBInfo)) {
- if (XGINew_Sense(tempbx, tempcx, pVBInfo))
- tempax |= AVIDEOSense;
- }
- }
- }
- }
- } /* end */
- if (!(tempax & Monitor2Sense)) {
- if (XGINew_SenseLCD(HwDeviceExtension, pVBInfo))
- tempax |= LCDSense;
- }
- tempbx = 0;
- tempcx = 0;
- XGINew_Sense(tempbx, tempcx, pVBInfo);
-
- xgifb_reg_and_or(pVBInfo->P3d4, 0x32, ~0xDF, tempax);
- xgifb_reg_set(pVBInfo->Part2Port, 0x00, P2reg0);
-
- if (!(P2reg0 & 0x20)) {
- pVBInfo->VBInfo = DisableCRT2Display;
- /* XGI_SetCRT2Group301(SenseModeNo,
- * HwDeviceExtension,
- * pVBInfo); */
- }
- }
- }
- XGI_DisableBridge(HwDeviceExtension, pVBInfo); /* shampoo 0226 */
-
-}
-
-unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- /* unsigned short SoftSetting ; */
- unsigned short temp;
-
- temp = XGINew_GetLCDDDCInfo(HwDeviceExtension, pVBInfo);
-
- return temp;
-}
diff --git a/drivers/staging/xgifb/vb_ext.h b/drivers/staging/xgifb/vb_ext.h
deleted file mode 100644
index 0b1f55b..0000000
--- a/drivers/staging/xgifb/vb_ext.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _VBEXT_
-#define _VBEXT_
-
-extern void XGI_GetSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo);
-extern unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *,
- struct vb_device_info *pVBInfo);
-
-#endif
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 9e890a1..4ccd988 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -1,6 +1,7 @@
#include <linux/types.h>
#include <linux/delay.h> /* udelay */
#include <linux/pci.h>
+#include <linux/vmalloc.h>
#include "vgatypes.h"
#include "XGIfb.h"
@@ -10,7 +11,6 @@
#include "vb_util.h"
#include "vb_setmode.h"
#include "vb_init.h"
-#include "vb_ext.h"
#include <linux/io.h>
@@ -35,6 +35,8 @@ static const unsigned short XGINew_DDRDRAM_TYPE20[12][5] = {
{ 2, 12, 9, 8, 0x35},
{ 2, 12, 8, 4, 0x31} };
+#define XGIFB_ROM_SIZE 65536
+
static unsigned char
XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -1068,20 +1070,20 @@ static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension,
return 0;
}
-static void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short data;
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
- XGISetModeNew(HwDeviceExtension, 0x2e);
+ XGISetModeNew(xgifb_info, HwDeviceExtension, 0x2e);
data = xgifb_reg_get(pVBInfo->P3c4, 0x21);
/* disable read cache */
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data & 0xDF));
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
/* data = xgifb_reg_get(pVBInfo->P3c4, 0x1); */
/* data |= 0x20 ; */
@@ -1092,118 +1094,100 @@ static void XGINew_SetDRAMSize_340(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data | 0x20));
}
-static void ReadVBIOSTablData(unsigned char ChipType,
+static u8 *xgifb_copy_rom(struct pci_dev *dev, size_t *rom_size)
+{
+ void __iomem *rom_address;
+ u8 *rom_copy;
+
+ rom_address = pci_map_rom(dev, rom_size);
+ if (rom_address == NULL)
+ return NULL;
+
+ rom_copy = vzalloc(XGIFB_ROM_SIZE);
+ if (rom_copy == NULL)
+ goto done;
+
+ *rom_size = min_t(size_t, *rom_size, XGIFB_ROM_SIZE);
+ memcpy_fromio(rom_copy, rom_address, *rom_size);
+
+done:
+ pci_unmap_rom(dev, rom_address);
+ return rom_copy;
+}
+
+static void xgifb_read_vbios(struct pci_dev *pdev,
struct vb_device_info *pVBInfo)
{
- volatile unsigned char *pVideoMemory =
- (unsigned char *) pVBInfo->ROMAddr;
+ struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev);
+ u8 *vbios;
unsigned long i;
- unsigned char j, k;
- /* Volari customize data area end */
-
- if (ChipType == XG21) {
- pVBInfo->IF_DEF_LVDS = 0;
- if (pVideoMemory[0x65] & 0x1) {
- pVBInfo->IF_DEF_LVDS = 1;
- i = pVideoMemory[0x316] | (pVideoMemory[0x317] << 8);
- j = pVideoMemory[i - 1];
- if (j != 0xff) {
- k = 0;
- do {
- pVBInfo->XG21_LVDSCapList[k].
- LVDS_Capability
- = pVideoMemory[i] |
- (pVideoMemory[i + 1] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHT
- = pVideoMemory[i + 2] |
- (pVideoMemory[i + 3] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVT
- = pVideoMemory[i + 4] |
- (pVideoMemory[i + 5] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHDE
- = pVideoMemory[i + 6] |
- (pVideoMemory[i + 7] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVDE
- = pVideoMemory[i + 8] |
- (pVideoMemory[i + 9] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHFP
- = pVideoMemory[i + 10] |
- (pVideoMemory[i + 11] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVFP
- = pVideoMemory[i + 12] |
- (pVideoMemory[i + 13] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSHSYNC
- = pVideoMemory[i + 14] |
- (pVideoMemory[i + 15] << 8);
- pVBInfo->XG21_LVDSCapList[k].LVDSVSYNC
- = pVideoMemory[i + 16] |
- (pVideoMemory[i + 17] << 8);
- pVBInfo->XG21_LVDSCapList[k].VCLKData1
- = pVideoMemory[i + 18];
- pVBInfo->XG21_LVDSCapList[k].VCLKData2
- = pVideoMemory[i + 19];
- pVBInfo->XG21_LVDSCapList[k].PSC_S1
- = pVideoMemory[i + 20];
- pVBInfo->XG21_LVDSCapList[k].PSC_S2
- = pVideoMemory[i + 21];
- pVBInfo->XG21_LVDSCapList[k].PSC_S3
- = pVideoMemory[i + 22];
- pVBInfo->XG21_LVDSCapList[k].PSC_S4
- = pVideoMemory[i + 23];
- pVBInfo->XG21_LVDSCapList[k].PSC_S5
- = pVideoMemory[i + 24];
- i += 25;
- j--;
- k++;
- } while ((j > 0) &&
- (k < (sizeof(XGI21_LCDCapList) /
- sizeof(struct
- XGI21_LVDSCapStruct))));
- } else {
- pVBInfo->XG21_LVDSCapList[0].LVDS_Capability
- = pVideoMemory[i] |
- (pVideoMemory[i + 1] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHT
- = pVideoMemory[i + 2] |
- (pVideoMemory[i + 3] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVT
- = pVideoMemory[i + 4] |
- (pVideoMemory[i + 5] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHDE
- = pVideoMemory[i + 6] |
- (pVideoMemory[i + 7] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVDE
- = pVideoMemory[i + 8] |
- (pVideoMemory[i + 9] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHFP
- = pVideoMemory[i + 10] |
- (pVideoMemory[i + 11] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVFP
- = pVideoMemory[i + 12] |
- (pVideoMemory[i + 13] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSHSYNC
- = pVideoMemory[i + 14] |
- (pVideoMemory[i + 15] << 8);
- pVBInfo->XG21_LVDSCapList[0].LVDSVSYNC
- = pVideoMemory[i + 16] |
- (pVideoMemory[i + 17] << 8);
- pVBInfo->XG21_LVDSCapList[0].VCLKData1
- = pVideoMemory[i + 18];
- pVBInfo->XG21_LVDSCapList[0].VCLKData2
- = pVideoMemory[i + 19];
- pVBInfo->XG21_LVDSCapList[0].PSC_S1
- = pVideoMemory[i + 20];
- pVBInfo->XG21_LVDSCapList[0].PSC_S2
- = pVideoMemory[i + 21];
- pVBInfo->XG21_LVDSCapList[0].PSC_S3
- = pVideoMemory[i + 22];
- pVBInfo->XG21_LVDSCapList[0].PSC_S4
- = pVideoMemory[i + 23];
- pVBInfo->XG21_LVDSCapList[0].PSC_S5
- = pVideoMemory[i + 24];
- }
- }
+ unsigned char j;
+ struct XGI21_LVDSCapStruct *lvds;
+ size_t vbios_size;
+ int entry;
+
+ if (xgifb_info->chip != XG21)
+ return;
+ pVBInfo->IF_DEF_LVDS = 0;
+ vbios = xgifb_copy_rom(pdev, &vbios_size);
+ if (vbios == NULL) {
+ dev_err(&pdev->dev, "video BIOS not available\n");
+ return;
+ }
+ if (vbios_size <= 0x65)
+ goto error;
+ /*
+ * The user can ignore the LVDS bit in the BIOS and force the display
+ * type.
+ */
+ if (!(vbios[0x65] & 0x1) &&
+ (!xgifb_info->display2_force ||
+ xgifb_info->display2 != XGIFB_DISP_LCD)) {
+ vfree(vbios);
+ return;
}
+ if (vbios_size <= 0x317)
+ goto error;
+ i = vbios[0x316] | (vbios[0x317] << 8);
+ if (vbios_size <= i - 1)
+ goto error;
+ j = vbios[i - 1];
+ if (j == 0)
+ goto error;
+ if (j == 0xff)
+ j = 1;
+ /*
+ * Read the LVDS table index scratch register set by the BIOS.
+ */
+ entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36);
+ if (entry >= j)
+ entry = 0;
+ i += entry * 25;
+ lvds = &xgifb_info->lvds_data;
+ if (vbios_size <= i + 24)
+ goto error;
+ lvds->LVDS_Capability = vbios[i] | (vbios[i + 1] << 8);
+ lvds->LVDSHT = vbios[i + 2] | (vbios[i + 3] << 8);
+ lvds->LVDSVT = vbios[i + 4] | (vbios[i + 5] << 8);
+ lvds->LVDSHDE = vbios[i + 6] | (vbios[i + 7] << 8);
+ lvds->LVDSVDE = vbios[i + 8] | (vbios[i + 9] << 8);
+ lvds->LVDSHFP = vbios[i + 10] | (vbios[i + 11] << 8);
+ lvds->LVDSVFP = vbios[i + 12] | (vbios[i + 13] << 8);
+ lvds->LVDSHSYNC = vbios[i + 14] | (vbios[i + 15] << 8);
+ lvds->LVDSVSYNC = vbios[i + 16] | (vbios[i + 17] << 8);
+ lvds->VCLKData1 = vbios[i + 18];
+ lvds->VCLKData2 = vbios[i + 19];
+ lvds->PSC_S1 = vbios[i + 20];
+ lvds->PSC_S2 = vbios[i + 21];
+ lvds->PSC_S3 = vbios[i + 22];
+ lvds->PSC_S4 = vbios[i + 23];
+ lvds->PSC_S5 = vbios[i + 24];
+ vfree(vbios);
+ pVBInfo->IF_DEF_LVDS = 1;
+ return;
+error:
+ dev_err(&pdev->dev, "video BIOS corrupted\n");
+ vfree(vbios);
}
static void XGINew_ChkSenseStatus(struct xgi_hw_device_info *HwDeviceExtension,
@@ -1336,18 +1320,57 @@ static void XGINew_SetModeScratch(struct xgi_hw_device_info *HwDeviceExtension,
}
+static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info
+ *HwDeviceExtension,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned short temp;
+
+ /* add lcd sense */
+ if (HwDeviceExtension->ulCRT2LCDType == LCD_UNKNOWN) {
+ return 0;
+ } else {
+ temp = (unsigned short) HwDeviceExtension->ulCRT2LCDType;
+ switch (HwDeviceExtension->ulCRT2LCDType) {
+ case LCD_INVALID:
+ case LCD_800x600:
+ case LCD_1024x768:
+ case LCD_1280x1024:
+ break;
+
+ case LCD_640x480:
+ case LCD_1024x600:
+ case LCD_1152x864:
+ case LCD_1280x960:
+ case LCD_1152x768:
+ temp = 0;
+ break;
+
+ case LCD_1400x1050:
+ case LCD_1280x768:
+ case LCD_1600x1200:
+ break;
+
+ case LCD_1920x1440:
+ case LCD_2048x1536:
+ temp = 0;
+ break;
+
+ default:
+ break;
+ }
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp);
+ return 1;
+ }
+}
+
static void XGINew_GetXG21Sense(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char Temp;
- volatile unsigned char *pVideoMemory =
- (unsigned char *) pVBInfo->ROMAddr;
-
- pVBInfo->IF_DEF_LVDS = 0;
#if 1
- if ((pVideoMemory[0x65] & 0x01)) { /* For XG21 LVDS */
- pVBInfo->IF_DEF_LVDS = 1;
+ if (pVBInfo->IF_DEF_LVDS) { /* For XG21 LVDS */
xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense);
/* LVDS on chip */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
@@ -1393,7 +1416,6 @@ static void XGINew_GetXG27Sense(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A);
if (Temp <= 0x02) {
- pVBInfo->IF_DEF_LVDS = 1;
/* LVDS setting */
xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0);
xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x21);
@@ -1451,24 +1473,15 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
struct vb_device_info *pVBInfo = &VBINF;
unsigned char i, temp = 0, temp1;
/* VBIOSVersion[5]; */
- volatile unsigned char *pVideoMemory;
/* unsigned long j, k; */
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
-
pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
- pVideoMemory = (unsigned char *) pVBInfo->ROMAddr;
-
/* Newdebugcode(0x99); */
-
- /* if (pVBInfo->ROMAddr == 0) */
- /* return(0); */
-
if (pVBInfo->FBAddr == NULL) {
printk("\n pVBInfo->FBAddr == 0 ");
return 0;
@@ -1516,8 +1529,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
- /* ReadVBIOSData */
- ReadVBIOSTablData(HwDeviceExtension->jChipType, pVBInfo);
+ xgifb_read_vbios(pdev, pVBInfo);
/* 1.Openkey */
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
@@ -1774,7 +1786,7 @@ unsigned char XGIInitNew(struct pci_dev *pdev)
pVBInfo);
printk("20");
- XGINew_SetDRAMSize_340(HwDeviceExtension, pVBInfo);
+ XGINew_SetDRAMSize_340(xgifb_info, HwDeviceExtension, pVBInfo);
printk("21");
printk("22");
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 81c0cc4..67a316c 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -67,11 +67,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
pVBInfo->XGINEWUB_CRT1Table
= (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
- /* add for new UNIVGABIOS */
- /* XGINew_UBLCDDataTable =
- * (struct XGI_LCDDataTablStruct *) XGI_LCDDataTable; */
- /* XGINew_UBTVDataTable = (XGI_TVDataTablStruct *) XGI_TVDataTable; */
-
pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData;
@@ -148,9 +143,6 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
else
pVBInfo->LCDCapList = XGI_LCDCapList;
- if ((ChipType == XG21) || (ChipType == XG27))
- pVBInfo->XG21_LVDSCapList = XGI21_LCDCapList;
-
pVBInfo->XGI_TVDelayList = XGI301TVDelayList;
pVBInfo->XGI_TVDelayList2 = XGI301TVDelayList2;
@@ -236,28 +228,6 @@ static void XGI_SetSeqRegs(unsigned short ModeNo,
}
}
-static void XGI_SetMiscRegs(unsigned short StandTableIndex,
- struct vb_device_info *pVBInfo)
-{
- unsigned char Miscdata;
-
- /* Get Misc from file */
- Miscdata = pVBInfo->StandTable[StandTableIndex].MISC;
- /*
- if (pVBInfo->VBType & (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
- VB_XGI301C)) {
- if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
- Miscdata |= 0x0C;
- }
- }
- */
-
- outb(Miscdata, pVBInfo->P3c2); /* Set Misc(3c2) */
-}
-
static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short StandTableIndex,
struct vb_device_info *pVBInfo)
@@ -274,16 +244,6 @@ static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
CRTCdata = pVBInfo->StandTable[StandTableIndex].CRTC[i];
xgifb_reg_set(pVBInfo->P3d4, i, CRTCdata); /* Set CRTC(3d4) */
}
- /*
- if ((HwDeviceExtension->jChipType == XGI_630) &&
- (HwDeviceExtension->jChipRevision == 0x30)) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
- xgifb_reg_set(pVBInfo->P3d4, 0x18, 0xFE);
- }
- }
- }
- */
}
static void XGI_SetATTRegs(unsigned short ModeNo,
@@ -530,10 +490,6 @@ static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
unsigned char data, data1, pushax;
unsigned short i, j;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
- /* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
- /* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
-
/* unlock cr0-7 */
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
@@ -595,10 +551,6 @@ static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
unsigned char data;
unsigned short i, j;
- /* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
- /* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
- /* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
-
for (i = 0x00; i <= 0x01; i++) {
data = pVBInfo->TimingV[0].data[i];
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 6), data);
@@ -976,6 +928,20 @@ static void XGI_SetXG27CRTC(unsigned short ModeNo,
}
}
+static void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
+{
+ unsigned char temp;
+
+ /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
+ temp = (temp & 3) << 6;
+ /* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
+ /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
+
+}
+
static void xgifb_set_lcd(int chip_id,
struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex,
@@ -1088,6 +1054,20 @@ static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
}
}
+static unsigned short XGI_GetResInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
+{
+ unsigned short resindex;
+
+ if (ModeNo <= 0x13)
+ /* si+St_ResInfo */
+ resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
+ else
+ /* si+Ext_ResInfo */
+ resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
+ return resindex;
+}
+
static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
@@ -1127,9 +1107,6 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
tempcx = 8;
- /* if (!(modeflag & Charx8Dot)) */
- /* tempcx = 9; */
-
tempax /= tempcx;
tempax -= 1;
tempbx -= 1;
@@ -1163,20 +1140,6 @@ static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp);
}
-unsigned short XGI_GetResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
-{
- unsigned short resindex;
-
- if (ModeNo <= 0x13)
- /* si+St_ResInfo */
- resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
- else
- /* si+Ext_ResInfo */
- resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- return resindex;
-}
-
static void XGI_SetCRT1Offset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
@@ -1308,77 +1271,55 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
VCLKIndex = LCDXlat1VCLK[CRT2Index];
- } else { /* for TV */
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
- if (pVBInfo->SetFlag & RPLLDIV2XO) {
- VCLKIndex = HiTVVCLKDIV2;
- VCLKIndex += 25;
- } else {
- VCLKIndex = HiTVVCLK;
- VCLKIndex += 25;
- }
-
- if (pVBInfo->SetFlag & TVSimuMode) {
- if (modeflag & Charx8Dot) {
- VCLKIndex =
- HiTVSimuVCLK;
- VCLKIndex += 25;
- } else {
- VCLKIndex =
- HiTVTextVCLK;
- VCLKIndex += 25;
- }
- }
+ } else if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO) {
+ VCLKIndex = HiTVVCLKDIV2;
+ VCLKIndex += 25;
+ } else {
+ VCLKIndex = HiTVVCLK;
+ VCLKIndex += 25;
+ }
- /* 301lv */
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (!(pVBInfo->VBExtInfo ==
- VB_YPbPr1080i)) {
- VCLKIndex =
- YPbPr750pVCLK;
- if (!(pVBInfo->VBExtInfo
- ==
- VB_YPbPr750p)) {
- VCLKIndex =
- YPbPr525pVCLK;
- if (!(pVBInfo->VBExtInfo
- == VB_YPbPr525p)) {
- VCLKIndex
- = YPbPr525iVCLK_2;
- if (!(pVBInfo->SetFlag
- & RPLLDIV2XO))
- VCLKIndex
- = YPbPr525iVCLK;
- }
- }
- }
- }
+ if (pVBInfo->SetFlag & TVSimuMode) {
+ if (modeflag & Charx8Dot) {
+ VCLKIndex = HiTVSimuVCLK;
+ VCLKIndex += 25;
} else {
- if (pVBInfo->VBInfo & SetCRT2ToTV) {
- if (pVBInfo->SetFlag &
- RPLLDIV2XO) {
- VCLKIndex = TVVCLKDIV2;
- VCLKIndex += 25;
- } else {
- VCLKIndex = TVVCLK;
- VCLKIndex += 25;
- }
- }
+ VCLKIndex = HiTVTextVCLK;
+ VCLKIndex += 25;
}
- } else { /* for CRT2 */
- /* Port 3cch */
- VCLKIndex = (unsigned char) inb(
- (pVBInfo->P3ca + 0x02));
- VCLKIndex = ((VCLKIndex >> 2) & 0x03);
- if (ModeNo > 0x13) {
- /* di+Ext_CRTVCLK */
- VCLKIndex =
- pVBInfo->RefIndex[
+ }
+
+ /* 301lv */
+ if ((pVBInfo->VBType & VB_XGI301LV) &&
+ !(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
+ if (pVBInfo->VBExtInfo == VB_YPbPr750p)
+ VCLKIndex = YPbPr750pVCLK;
+ else if (pVBInfo->VBExtInfo == VB_YPbPr525p)
+ VCLKIndex = YPbPr525pVCLK;
+ else if (pVBInfo->SetFlag & RPLLDIV2XO)
+ VCLKIndex = YPbPr525iVCLK_2;
+ else
+ VCLKIndex = YPbPr525iVCLK;
+ }
+ } else if (pVBInfo->VBInfo & SetCRT2ToTV) {
+ if (pVBInfo->SetFlag & RPLLDIV2XO) {
+ VCLKIndex = TVVCLKDIV2;
+ VCLKIndex += 25;
+ } else {
+ VCLKIndex = TVVCLK;
+ VCLKIndex += 25;
+ }
+ } else { /* for CRT2 */
+ /* Port 3cch */
+ VCLKIndex = (unsigned char) inb((pVBInfo->P3ca + 0x02));
+ VCLKIndex = ((VCLKIndex >> 2) & 0x03);
+ if (ModeNo > 0x13) {
+ /* di+Ext_CRTVCLK */
+ VCLKIndex = pVBInfo->RefIndex[
RefreshRateTableIndex].
Ext_CRTVCLK;
- VCLKIndex &= IndexMask;
- }
+ VCLKIndex &= IndexMask;
}
}
} else { /* LVDS */
@@ -1397,7 +1338,6 @@ static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
else
VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
}
- /* VCLKIndex = VCLKIndex&IndexMask; */
return VCLKIndex;
}
@@ -1461,6 +1401,19 @@ static void XGI_SetCRT1VCLK(unsigned short ModeNo,
}
}
+static void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
+{
+ unsigned char temp;
+
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
+ temp = (temp & 1) << 6;
+ /* SR06[6] 18bit Dither */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
+ /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
+ xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
+
+}
+
static void XGI_SetCRT1FIFO(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
@@ -1532,16 +1485,6 @@ static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_set(pVBInfo->P3c4, 0x1F, data);
}
- /* Jong for Adavantech LCD ripple issue
- if ((VCLK >= 0) && (VCLK < 135))
- data2 = 0x03;
- else if ((VCLK >= 135) && (VCLK < 160))
- data2 = 0x02;
- else if ((VCLK >= 160) && (VCLK < 260))
- data2 = 0x01;
- else if (VCLK > 260)
- data2 = 0x00;
- */
data2 = 0x00;
xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2);
@@ -1591,7 +1534,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
data2 |= 0x20;
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x3F, data2);
- /* xgifb_reg_set(pVBInfo->P3c4,0x06,data2); */
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13)
xres = pVBInfo->StResInfo[resindex].HTotal;
@@ -1636,11 +1578,6 @@ static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetVCLKState(HwDeviceExtension, ModeNo, RefreshRateTableIndex,
pVBInfo);
- /* if (modeflag&HalfDCLK) //030305 fix lowresolution bug */
- /* if (XGINew_IF_DEF_NEW_LOWRES) */
- /* XGI_VesaLowResolution(ModeNo, ModeIdIndex);
- * //030305 fix lowresolution bug */
-
data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
if (HwDeviceExtension->jChipType == XG27) {
@@ -1803,11 +1740,6 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- /* if (ModeNo > 0x13) */
- /* modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag; */
- /* else */
- /* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag; */
-
if (ModeNo <= 0x13)
/* si+St_ResInfo */
resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
@@ -1815,8 +1747,6 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
/* si+Ext_ResInfo */
resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
- /* resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo); */
-
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
@@ -1831,13 +1761,10 @@ static void XGI_GetLVDSResInfo(unsigned short ModeNo,
if (modeflag & DoubleScanMode)
yres = yres << 1;
}
- /* if (modeflag & Charx8Dot) */
- /* { */
if (xres == 720)
xres = 640;
- /* } */
pVBInfo->VGAHDE = xres;
pVBInfo->HDE = xres;
pVBInfo->VGAVDE = yres;
@@ -1890,7 +1817,7 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
tempal = (tempal & 0x0f);
}
- tempcx = LCDLenList[tempbx]; /* mov cl,byte ptr cs:LCDLenList[bx] */
+ tempcx = LCDLenList[tempbx];
if (pVBInfo->LCDInfo & EnableScalingLCD) { /* ScaleLCD */
if ((tempbx == 5) || (tempbx) == 7)
@@ -1898,9 +1825,6 @@ static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
else if ((tempbx == 3) || (tempbx == 8))
tempcx = LVDSDesDataLen2;
}
- /* mov di, word ptr cs:LCDDataList[bx] */
- /* tempdi = pVideoMemory[LCDDataList + tempbx * 2] |
- (pVideoMemory[LCDDataList + tempbx * 2 + 1] << 8); */
switch (tempbx) {
case 0:
@@ -2321,10 +2245,10 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
switch (tempbx) {
case 0:
- tempdi = NULL; /*EPLCHTVCRT1Ptr_H;*/
+ tempdi = NULL;
break;
case 1:
- tempdi = NULL; /*EPLCHTVCRT1Ptr_V;*/
+ tempdi = NULL;
break;
case 2:
case 6:
@@ -2363,9 +2287,7 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
}
/* 07/05/22 */
- if (table == 0x00) {
- } else if (table == 0x01) {
- } else if (table == 0x04) {
+ if (table == 0x04) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_ExtPALData[tempal];
@@ -2429,7 +2351,6 @@ static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
default:
break;
}
- } else if (table == 0x06) {
}
return NULL;
}
@@ -2741,7 +2662,6 @@ static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
else
tempbx = LCDPtr->LCDVRS;
- /* tempbx = tempbx >> 4; */
tempcx = push1;
if (pVBInfo->LCDInfo & EnableScalingLCD)
@@ -2881,7 +2801,6 @@ static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
unsigned short index;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
- /* index = XGI_GetLCDCapPtr(pVBInfo); */
index = XGI_GetLCDCapPtr1(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
@@ -3105,19 +3024,6 @@ static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
}
}
-void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *pVBInfo)
-{
- /*
- if ( HwDeviceExtension->jChipType >= XG20 ) {
- pVBInfo->Set_VGAType = XG20;
- } else {
- pVBInfo->Set_VGAType = VGA_XGI340;
- }
- */
- pVBInfo->Set_VGAType = HwDeviceExtension->jChipType;
-}
-
void XGI_GetVBType(struct vb_device_info *pVBInfo)
{
unsigned short flag, tempbx, tempah;
@@ -3160,7 +3066,7 @@ void XGI_GetVBType(struct vb_device_info *pVBInfo)
}
}
-void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -3193,14 +3099,9 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
if (pVBInfo->IF_DEF_LCDA == 1) {
- if ((pVBInfo->Set_VGAType >= XG20)
- || (pVBInfo->Set_VGAType >= XG40)) {
+ if ((HwDeviceExtension->jChipType >= XG20) ||
+ (HwDeviceExtension->jChipType >= XG40)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
- /* if ((pVBInfo->VBType & VB_XGI302B)
- || (pVBInfo->VBType & VB_XGI301LV)
- || (pVBInfo->VBType & VB_XGI302LV)
- || (pVBInfo->VBType & VB_XGI301C))
- */
if (pVBInfo->VBType &
(VB_XGI302B |
VB_XGI301LV |
@@ -3225,7 +3126,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV) ||
(pVBInfo->VBType & VB_XGI301C)))) {
- if (temp & SetYPbPr) { /* temp = CR38 */
+ if (temp & SetYPbPr) {
if (pVBInfo->IF_DEF_HiVision == 1) {
/* shampoo add for new
* scratch */
@@ -3242,8 +3143,6 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
SetCRT2ToYPbPr;
}
}
-
- /* tempbx |= SetCRT2ToYPbPr; */
}
}
}
@@ -3368,7 +3267,7 @@ void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->VBInfo = tempbx;
}
-void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
+static void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short temp, tempbx = 0, resinfo = 0, modeflag, index1;
@@ -3404,17 +3303,6 @@ void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx &= (SetCHTVOverScan |
SetNTSCJ |
SetPALTV);
- /*
- if (pVBInfo->IF_DEF_LVDS == 0) {
- //PAL-M/PAL-N Info
- index1 = xgifb_reg_get(pVBInfo->P3d4, 0x38);
- //00:PAL, 01:PAL-M, 10:PAL-N
- temp2 = (index1 & 0xC0) >> 5;
- tempbx |= temp2;
- if (temp2 & 0x02) //PAL-M
- tempbx &= (~SetPALTV);
- }
- */
}
if (pVBInfo->IF_DEF_LVDS == 0) {
@@ -3476,8 +3364,8 @@ void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
pVBInfo->TVInfo = tempbx;
}
-unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo)
+static unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
+ unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short temp, tempax, tempbx, modeflag, resinfo = 0, LCDIdIndex;
@@ -3553,15 +3441,8 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx |= SetLCDtoNonExpanding;
}
- /*
- if (tempax & LCDBToA) {
- tempbx |= SetLCDBToA;
- }
- */
-
if (pVBInfo->IF_DEF_ExpLink == 1) {
if (modeflag & HalfDCLK) {
- /* if (!(pVBInfo->LCDInfo&LCDNonExpanding)) */
if (!(tempbx & SetLCDtoNonExpanding)) {
tempbx |= EnableLVDSDDA;
} else {
@@ -3604,25 +3485,6 @@ unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
- /*
- if (pVBInfo->IF_DEF_LVDS == 0) {
- if (tempax & (LockLCDBToA | StLCDBToA)) {
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- if (!((!(tempax & LockLCDBToA)) &&
- (ModeNo > 0x13))) {
- pVBInfo->VBInfo &=
- ~(SetSimuScanMode |
- SetInSlaveMode |
- SetCRT2ToLCD);
- pVBInfo->VBInfo |=
- SetCRT2ToLCDA |
- SetCRT2ToDualEdge;
- }
- }
- }
- }
- */
-
return 1;
}
@@ -3632,10 +3494,6 @@ unsigned char XGI_SearchModeID(unsigned short ModeNo,
if (ModeNo <= 5)
ModeNo |= 1;
if (ModeNo <= 0x13) {
- /* for (*ModeIdIndex=0;
- *ModeIdIndex < sizeof(pVBInfo->SModeIDTable)
- / sizeof(struct XGI_StStruct);
- (*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->SModeIDTable[*ModeIdIndex].St_ModeID ==
ModeNo)
@@ -3651,10 +3509,6 @@ unsigned char XGI_SearchModeID(unsigned short ModeNo,
(*ModeIdIndex) += 2; /* 400 lines */
/* else 350 lines */
} else {
- /* for (*ModeIdIndex=0;
- *ModeIdIndex < sizeof(pVBInfo->EModeIDTable)
- / sizeof(struct XGI_ExtStruct);
- (*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->EModeIDTable[*ModeIdIndex].Ext_ModeID ==
ModeNo)
@@ -3675,7 +3529,6 @@ static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
for (i = 0; i < 8; i++) {
ujRet = ujRet << 1;
- /* ujRet |= GETBITS(ujDate >> i, 0:0); */
ujRet |= (ujDate >> i) & 1;
}
@@ -3726,7 +3579,101 @@ static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
return temp;
}
-void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
+/*----------------------------------------------------------------------------*/
+/* input */
+/* bl[5] : 1;LVDS signal on */
+/* bl[1] : 1;LVDS backlight on */
+/* bl[0] : 1:LVDS VDD on */
+/* bh: 100000b : clear bit 5, to set bit5 */
+/* 000010b : clear bit 1, to set bit1 */
+/* 000001b : clear bit 0, to set bit0 */
+/*----------------------------------------------------------------------------*/
+static void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned char CR4A, temp;
+
+ CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
+ tempbh &= 0x23;
+ tempbl &= 0x23;
+ xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
+
+ if (tempbh & 0x20) {
+ temp = (tempbl >> 4) & 0x02;
+
+ /* CR B4[1] */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
+
+ }
+
+ temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
+
+ temp = XG21GPIODataTransfer(temp);
+ temp &= ~tempbh;
+ temp |= tempbl;
+ xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
+}
+
+static void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ unsigned char CR4A, temp;
+ unsigned short tempbh0, tempbl0;
+
+ tempbh0 = tempbh;
+ tempbl0 = tempbl;
+ tempbh0 &= 0x20;
+ tempbl0 &= 0x20;
+ tempbh0 >>= 3;
+ tempbl0 >>= 3;
+
+ if (tempbh & 0x20) {
+ temp = (tempbl >> 4) & 0x02;
+
+ /* CR B4[1] */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
+
+ }
+ xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
+
+ CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
+ tempbh &= 0x03;
+ tempbl &= 0x03;
+ tempbh <<= 2;
+ tempbl <<= 2; /* GPIOC,GPIOD */
+ xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
+ xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
+}
+
+/* --------------------------------------------------------------------- */
+/* Function : XGI_XG21SetPanelDelay */
+/* Input : */
+/* Output : */
+/* Description : */
+/* I/P : bl : 1 ; T1 : the duration between CPL on and signal on */
+/* : bl : 2 ; T2 : the duration signal on and Vdd on */
+/* : bl : 3 ; T3 : the duration between CPL off and signal off */
+/* : bl : 4 ; T4 : the duration signal off and Vdd off */
+/* --------------------------------------------------------------------- */
+static void XGI_XG21SetPanelDelay(struct xgifb_video_info *xgifb_info,
+ unsigned short tempbl,
+ struct vb_device_info *pVBInfo)
+{
+ if (tempbl == 1)
+ mdelay(xgifb_info->lvds_data.PSC_S1);
+
+ if (tempbl == 2)
+ mdelay(xgifb_info->lvds_data.PSC_S2);
+
+ if (tempbl == 3)
+ mdelay(xgifb_info->lvds_data.PSC_S3);
+
+ if (tempbl == 4)
+ mdelay(xgifb_info->lvds_data.PSC_S4);
+}
+
+static void XGI_DisplayOn(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
@@ -3736,12 +3683,12 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG21BLSignalVDD(0x01, 0x01, pVBInfo);
- XGI_XG21SetPanelDelay(2, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 2, pVBInfo);
}
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
/* LVDS backlight on */
XGI_XG21BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
@@ -3756,12 +3703,12 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG27BLSignalVDD(0x01, 0x01, pVBInfo);
- XGI_XG21SetPanelDelay(2, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 2, pVBInfo);
}
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
/* LVDS backlight on */
XGI_XG27BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
@@ -3772,7 +3719,8 @@ void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
}
}
-void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
+void XGI_DisplayOff(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
@@ -3780,7 +3728,7 @@ void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
if (pVBInfo->IF_DEF_LVDS == 1) {
/* LVDS backlight off */
XGI_XG21BLSignalVDD(0x02, 0x00, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
} else {
/* DVO/DVI signal off */
XGI_XG21BLSignalVDD(0x20, 0x00, pVBInfo);
@@ -3791,7 +3739,7 @@ void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
if ((XGI_XG27GetPSCValue(pVBInfo) & 0x2)) {
/* LVDS backlight off */
XGI_XG27BLSignalVDD(0x02, 0x00, pVBInfo);
- XGI_XG21SetPanelDelay(3, pVBInfo);
+ XGI_XG21SetPanelDelay(xgifb_info, 3, pVBInfo);
}
if (pVBInfo->IF_DEF_LVDS == 0)
@@ -3838,26 +3786,17 @@ static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
- /* si+St_ResInfo */
- /* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;*/
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
/* si+St_ModeFlag */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
- /*
- if (pVBInfo->IF_DEF_FSTN) {
- xres *= 2;
- yres *= 2;
- } else {
- */
if (modeflag & HalfDCLK)
xres *= 2;
if (modeflag & DoubleScanMode)
yres *= 2;
- /* } */
}
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
@@ -4028,8 +3967,6 @@ static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
tempbx = 775;
else if (pVBInfo->VGAVDE == 600)
tempbx = 775;
- /* else if (pVBInfo->VGAVDE==350) tempbx=560; */
- /* else if (pVBInfo->VGAVDE==400) tempbx=640; */
else
tempbx = 768;
} else
@@ -4294,7 +4231,6 @@ static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetCRT2FIFO(pVBInfo);
- /* XGI_SetCRT2Sync(ModeNo,RefreshRateTableIndex); */
for (tempcx = 4; tempcx < 7; tempcx++)
xgifb_reg_set(pVBInfo->Part1Port, tempcx, 0x0);
@@ -4497,9 +4433,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = 0xFF; /* set MAX HT */
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
- /* if (modeflag & Charx8Dot) */
- /* tempcx = 0x08; */
- /* else */
tempcx = 0x08;
if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
@@ -4565,7 +4498,6 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
}
}
} else {
- /* tempcx = tempbx & 0x00FF ; */
tempbx = (tempbx & 0xFF00) >> 8;
tempcx = (tempcx + tempbx) >> 1;
temp = (tempcx & 0x00FF) + 2;
@@ -4579,36 +4511,23 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp -= 6;
}
}
- } else {
- if (!(modeflag & HalfDCLK)) {
- temp -= 4;
- if (pVBInfo->LCDResInfo != Panel1280x960) {
- if (pVBInfo->VGAHDE >= 800) {
- temp -= 7;
- if (pVBInfo->ModeType ==
- ModeEGA) {
- if (pVBInfo->VGAVDE ==
- 1024) {
- temp += 15;
- if (pVBInfo->LCDResInfo != Panel1280x1024) {
- temp +=
- 7;
- }
- }
- }
-
- if (pVBInfo->VGAHDE >= 1280) {
- if (pVBInfo->LCDResInfo
- != Panel1280x960) {
- if (pVBInfo->LCDInfo
- & LCDNonExpanding) {
- temp
- += 28;
- }
- }
- }
- }
+ } else if (!(modeflag & HalfDCLK)) {
+ temp -= 4;
+ if (pVBInfo->LCDResInfo != Panel1280x960 &&
+ pVBInfo->VGAHDE >= 800) {
+ temp -= 7;
+ if (pVBInfo->ModeType == ModeEGA &&
+ pVBInfo->VGAVDE == 1024) {
+ temp += 15;
+ if (pVBInfo->LCDResInfo !=
+ Panel1280x1024)
+ temp += 7;
}
+
+ if (pVBInfo->VGAHDE >= 1280 &&
+ pVBInfo->LCDResInfo != Panel1280x960 &&
+ (pVBInfo->LCDInfo & LCDNonExpanding))
+ temp += 28;
}
}
}
@@ -5297,7 +5216,6 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax--;
xgifb_reg_and(pVBInfo->Part2Port, 0x01, tempax);
- /* if ( !( pVBInfo->VBType & VB_XGI301C ) ) */
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
}
@@ -5436,7 +5354,6 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
tempax = pVBInfo->VT;
tempbx = pVBInfo->LCDVRS;
- /* if (SetLCD_Info & EnableScalingLCD) */
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
@@ -5478,12 +5395,10 @@ static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
temp = (tempcx & 0xFF00) >> 8;
xgifb_reg_set(pVBInfo->Part2Port, 0x25, temp);
- /* getlcdsync() */
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
tempcx = tempax;
tempax = pVBInfo->HT;
tempbx = pVBInfo->LCDHRS;
- /* if ( SetLCD_Info & EnableScalingLCD) */
if (XGI_IsLCDDualLink(pVBInfo)) {
tempax = tempax >> 1;
tempbx = tempbx >> 1;
@@ -5801,9 +5716,6 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
if (XGI_IsLCDDualLink(pVBInfo))
tempax = tempax >> 1;
- /* if((pVBInfo->VBInfo&(SetCRT2ToLCD)) ||
- ((pVBInfo->TVInfo&SetYPbPrMode525p) ||
- (pVBInfo->TVInfo&SetYPbPrMode750p))) { */
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (tempax > 800)
tempax -= 800;
@@ -5817,33 +5729,6 @@ static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
}
tempax -= 1;
- /*
- if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
- if (pVBInfo->VBType & VB_XGI301LV) {
- if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p |
- SetYPbPrMode750p |
- SetYPbPrMode1080i))) {
- if (pVBInfo->VGAHDE > 800) {
- if (pVBInfo->VGAHDE == 1024)
- tempax =(tempax * 25 /
- 32) - 1;
- else
- tempax = (tempax * 20 /
- 32) - 1;
- }
- }
- } else {
- if (pVBInfo->VGAHDE > 800) {
- if (pVBInfo->VGAHDE == 1024)
- tempax = (tempax * 25 / 32) - 1;
- else
- tempax = (tempax * 20 / 32) - 1;
- }
- }
- }
- */
-
temp = (tempax & 0xFF00) >> 8;
temp = ((temp & 0x0003) << 4);
xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
@@ -5902,7 +5787,6 @@ static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
| CRT2DisplayFlag))) {
XGINew_EnableCRT2(pVBInfo);
- /* LoadDAC2(pVBInfo->Part5Port, ModeNo, ModeIdIndex); */
}
}
return;
@@ -5921,118 +5805,11 @@ static void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x00);
}
-/*----------------------------------------------------------------------------*/
-/* input */
-/* bl[5] : 1;LVDS signal on */
-/* bl[1] : 1;LVDS backlight on */
-/* bl[0] : 1:LVDS VDD on */
-/* bh: 100000b : clear bit 5, to set bit5 */
-/* 000010b : clear bit 1, to set bit1 */
-/* 000001b : clear bit 0, to set bit0 */
-/*----------------------------------------------------------------------------*/
-void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x23;
- tempbl &= 0x23;
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
- }
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
-
- temp = XG21GPIODataTransfer(temp);
- temp &= ~tempbh;
- temp |= tempbl;
- xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
-}
-
-void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
- struct vb_device_info *pVBInfo)
-{
- unsigned char CR4A, temp;
- unsigned short tempbh0, tempbl0;
-
- tempbh0 = tempbh;
- tempbl0 = tempbl;
- tempbh0 &= 0x20;
- tempbl0 &= 0x20;
- tempbh0 >>= 3;
- tempbl0 >>= 3;
-
- if (tempbh & 0x20) {
- temp = (tempbl >> 4) & 0x02;
-
- /* CR B4[1] */
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
-
- }
- xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
-
- CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
- tempbh &= 0x03;
- tempbl &= 0x03;
- tempbh <<= 2;
- tempbl <<= 2; /* GPIOC,GPIOD */
- xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
- xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
-}
-
-/* --------------------------------------------------------------------- */
-unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo)
-{
- unsigned short index;
-
- index = xgifb_reg_get(pVBInfo->P3d4, 0x36);
- if (index < sizeof(XGI21_LCDCapList)
- / sizeof(struct XGI21_LVDSCapStruct))
- return index;
- return 0;
-}
-
-/* --------------------------------------------------------------------- */
-/* Function : XGI_XG21SetPanelDelay */
-/* Input : */
-/* Output : */
-/* Description : */
-/* I/P : bl : 1 ; T1 : the duration between CPL on and signal on */
-/* : bl : 2 ; T2 : the duration signal on and Vdd on */
-/* : bl : 3 ; T3 : the duration between CPL off and signal off */
-/* : bl : 4 ; T4 : the duration signal off and Vdd off */
-/* --------------------------------------------------------------------- */
-void XGI_XG21SetPanelDelay(unsigned short tempbl,
+static unsigned char XGI_XG21CheckLVDSMode(struct xgifb_video_info *xgifb_info,
+ unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- unsigned short index;
-
- index = XGI_GetLVDSOEMTableIndex(pVBInfo);
- if (tempbl == 1)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S1);
-
- if (tempbl == 2)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S2);
-
- if (tempbl == 3)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S3);
-
- if (tempbl == 4)
- mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S4);
-}
-
-unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
- unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
-{
- unsigned short xres, yres, colordepth, modeflag, resindex,
- lvdstableindex;
+ unsigned short xres, yres, colordepth, modeflag, resindex;
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
@@ -6061,18 +5838,15 @@ unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
}
- lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
- if (xres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE))
+ if (xres > xgifb_info->lvds_data.LVDSHDE)
return 0;
- if (yres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE))
+ if (yres > xgifb_info->lvds_data.LVDSVDE)
return 0;
if (ModeNo > 0x13) {
- if ((xres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSHDE)) ||
- (yres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSVDE))) {
+ if (xres != xgifb_info->lvds_data.LVDSHDE ||
+ yres != xgifb_info->lvds_data.LVDSVDE) {
colordepth = XGI_GetColorDepth(ModeNo,
ModeIdIndex,
pVBInfo);
@@ -6084,55 +5858,26 @@ unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
return 1;
}
-void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
- temp = (temp & 1) << 6;
- /* SR06[6] 18bit Dither */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
-}
-
-void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
-{
- unsigned char temp;
-
- /* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
- temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
- temp = (temp & 3) << 6;
- /* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
- /* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
-
-}
-
-static void xgifb_set_lvds(int chip_id,
+static void xgifb_set_lvds(struct xgifb_video_info *xgifb_info,
+ int chip_id,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char temp, Miscdata;
- unsigned short xres, yres, modeflag, resindex, lvdstableindex;
+ unsigned short xres, yres, modeflag, resindex;
unsigned short LVDSHT, LVDSHBS, LVDSHRS, LVDSHRE, LVDSHBE;
unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
unsigned short value;
- lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
- temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability &
+ temp = (unsigned char) ((xgifb_info->lvds_data.LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
Miscdata = (unsigned char) inb(pVBInfo->P3cc);
outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
- temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDS_Capability & LCDPolarity);
+ temp = xgifb_info->lvds_data.LVDS_Capability & LCDPolarity;
/* SR35[7] FP VSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x80, temp & 0x80);
/* SR30[5] FP HSync polarity */
@@ -6159,48 +5904,43 @@ static void xgifb_set_lvds(int chip_id,
if (!(modeflag & Charx8Dot))
xres = xres * 8 / 9;
- LVDSHT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHT;
+ LVDSHT = xgifb_info->lvds_data.LVDSHT;
- LVDSHBS = xres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE
- - xres) / 2;
+ LVDSHBS = xres + (xgifb_info->lvds_data.LVDSHDE - xres) / 2;
if ((ModeNo <= 0x13) && (modeflag & HalfDCLK))
LVDSHBS -= xres / 4;
if (LVDSHBS > LVDSHT)
LVDSHBS -= LVDSHT;
- LVDSHRS = LVDSHBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHFP;
+ LVDSHRS = LVDSHBS + xgifb_info->lvds_data.LVDSHFP;
if (LVDSHRS > LVDSHT)
LVDSHRS -= LVDSHT;
- LVDSHRE = LVDSHRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHSYNC;
+ LVDSHRE = LVDSHRS + xgifb_info->lvds_data.LVDSHSYNC;
if (LVDSHRE > LVDSHT)
LVDSHRE -= LVDSHT;
- LVDSHBE = LVDSHBS + LVDSHT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE;
+ LVDSHBE = LVDSHBS + LVDSHT - xgifb_info->lvds_data.LVDSHDE;
- LVDSVT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVT;
+ LVDSVT = xgifb_info->lvds_data.LVDSVT;
- LVDSVBS = yres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE
- - yres) / 2;
+ LVDSVBS = yres + (xgifb_info->lvds_data.LVDSVDE - yres) / 2;
if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
LVDSVBS += yres / 2;
if (LVDSVBS > LVDSVT)
LVDSVBS -= LVDSVT;
- LVDSVRS = LVDSVBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVFP;
+ LVDSVRS = LVDSVBS + xgifb_info->lvds_data.LVDSVFP;
if (LVDSVRS > LVDSVT)
LVDSVRS -= LVDSVT;
- LVDSVRE = LVDSVRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].
- LVDSVSYNC;
+ LVDSVRE = LVDSVRS + xgifb_info->lvds_data.LVDSVSYNC;
if (LVDSVRE > LVDSVT)
LVDSVRE -= LVDSVT;
- LVDSVBE = LVDSVBS + LVDSVT
- - pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE;
+ LVDSVBE = LVDSVBS + LVDSVT - xgifb_info->lvds_data.LVDSVDE;
temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
@@ -6300,13 +6040,9 @@ static void xgifb_set_lvds(int chip_id,
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
xgifb_reg_set(pVBInfo->P3c4,
- 0x2B,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData1);
+ 0x2B, xgifb_info->lvds_data.VCLKData1);
xgifb_reg_set(pVBInfo->P3c4,
- 0x2C,
- pVBInfo->XG21_LVDSCapList[lvdstableindex].
- VCLKData2);
+ 0x2C, xgifb_info->lvds_data.VCLKData2);
value += 0x10;
}
@@ -6398,7 +6134,8 @@ static unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo)
return 0;
}
-void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_DisableBridge(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah = 0;
@@ -6442,7 +6179,7 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
| SetSimuScanMode))) {
if (pVBInfo->SetFlag & GatingCRT)
XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
@@ -6464,7 +6201,6 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
- /* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
if ((pVBInfo->SetFlag & DisableChB) ||
@@ -6484,7 +6220,6 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
}
} else { /* {301} */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
- /* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
/* Disable CRT2 */
xgifb_reg_and(pVBInfo->Part1Port, 0x1E, 0xDF);
@@ -6494,7 +6229,7 @@ void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA
| SetSimuScanMode))
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
}
}
@@ -6610,17 +6345,6 @@ static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempbl = tempbl >> 4;
- /*
- if (pVBInfo->VBInfo & SetCRT2ToRAMDAC)
- tempbl = CRT2Delay1; // Get CRT2 Delay
- if (pVBInfo->VBType &
- (VB_XGI301B |
- VB_XGI302B |
- VB_XGI301LV |
- VB_XGI302LV |
- VB_XGI301C))
- tempbl = CRT2Delay2;
- */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
/* Get LCD Delay */
index = XGI_GetLCDCapPtr(pVBInfo);
@@ -6680,23 +6404,6 @@ static void XGI_SetLCDCap_A(unsigned short tempcx,
(unsigned short) (0x30 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
}
-
- /*
- if (tempcx & EnableLCD24bpp) { // 24bits
- xgifb_reg_and_or(pVBInfo->Part1Port,
- 0x19,
- 0x0F,
- (unsigned short)(0x30 | (tempcx&0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
- } else {
- xgifb_reg_and_or(pVBInfo->Part1Port,
- 0x19,
- 0x0F,
- // Enable Dither
- (unsigned short)(0x20 | (tempcx&0x00C0)));
- xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
- }
- */
}
/* --------------------------------------------------------------------- */
@@ -6718,6 +6425,25 @@ static void XGI_SetLCDCap_B(unsigned short tempcx,
| 0x18)); /* Enable Dither */
}
+static void XGI_LongWait(struct vb_device_info *pVBInfo)
+{
+ unsigned short i;
+
+ i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
+
+ if (!(i & 0xC0)) {
+ for (i = 0; i < 0xFFFF; i++) {
+ if (!(inb(pVBInfo->P3da) & 0x08))
+ break;
+ }
+
+ for (i = 0; i < 0xFFFF; i++) {
+ if ((inb(pVBInfo->P3da) & 0x08))
+ break;
+ }
+ }
+}
+
static void SetSpectrum(struct vb_device_info *pVBInfo)
{
unsigned short index;
@@ -6940,14 +6666,12 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
- /* GetPart1IO(); */
XGI_SetDelayComp(pVBInfo);
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
XGI_SetLCDCap(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* GetPart2IO() */
XGI_SetPhaseIncr(pVBInfo);
XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
@@ -6963,7 +6687,7 @@ static void XGI_OEM310Setting(unsigned short ModeNo,
/* Output : */
/* Description : Origin code for crt2group */
/* --------------------------------------------------------------------- */
-void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
+static void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -6972,8 +6696,6 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
unsigned char tempah;
- /* // fix write part1 index 0 BTDRAM bit Bug
- * xgifb_reg_set(pVBInfo->Part1Port, 0x03, 0x00); */
tempah = 0;
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x00);
@@ -6999,32 +6721,6 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
}
}
- /* 0210 shampoo
- if (pVBInfo->VBInfo & DisableCRT2Display) {
- tempah = 0;
- }
-
- xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
- if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD)) {
- tempcl = pVBInfo->ModeType;
- if (ModeNo > 0x13) {
- tempcl -= ModeVGA;
- if ((tempcl > 0) || (tempcl == 0)) {
- tempah=(0x008>>tempcl) ;
- if (tempah == 0)
- tempah = 1;
- tempah |= 0x040;
- }
- } else {
- tempah = 0x040;
- }
-
- if (pVBInfo->VBInfo & SetInSlaveMode) {
- tempah = (tempah ^ 0x050);
- }
- }
- */
-
xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
tempah = 0x08;
tempbl = 0xf0;
@@ -7093,14 +6789,11 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x080;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* if (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p))) { */
tempah |= 0x020;
if (ModeNo > 0x13) {
if (pVBInfo->VBInfo & DriverMode)
tempah = tempah ^ 0x20;
}
- /* } */
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D, ~0x0BF, tempah);
@@ -7110,12 +6803,8 @@ void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
tempah |= 0x40;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
- /* if ((!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) &&
- (!(pVBInfo->TVInfo &
- (SetYPbPrMode525p | SetYPbPrMode750p)))) { */
if (pVBInfo->TVInfo & RPLLDIV2XO)
tempah |= 0x40;
- /* } */
}
if ((pVBInfo->LCDResInfo == Panel1280x1024)
@@ -7219,57 +6908,6 @@ unsigned char XGI_BridgeIsOn(struct vb_device_info *pVBInfo)
}
}
-void XGI_LongWait(struct vb_device_info *pVBInfo)
-{
- unsigned short i;
-
- i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
-
- if (!(i & 0xC0)) {
- for (i = 0; i < 0xFFFF; i++) {
- if (!(inb(pVBInfo->P3da) & 0x08))
- break;
- }
-
- for (i = 0; i < 0xFFFF; i++) {
- if ((inb(pVBInfo->P3da) & 0x08))
- break;
- }
- }
-}
-
-static void XGI_VBLongWait(struct vb_device_info *pVBInfo)
-{
- unsigned short tempal, temp, i, j;
- return;
- if (!(pVBInfo->VBInfo & SetCRT2ToTV)) {
- temp = 0;
- for (i = 0; i < 3; i++) {
- for (j = 0; j < 100; j++) {
- tempal = inb(pVBInfo->P3da);
- if (temp & 0x01) { /* VBWaitMode2 */
- if ((tempal & 0x08))
- continue;
-
- if (!(tempal & 0x08))
- break;
-
- } else { /* VBWaitMode1 */
- if (!(tempal & 0x08))
- continue;
-
- if ((tempal & 0x08))
- break;
- }
- }
- temp = temp ^ 0x01;
- }
- } else {
- XGI_LongWait(pVBInfo);
- }
- return;
-}
-
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
@@ -7322,12 +6960,6 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
RefreshRateTableIndex = pVBInfo->EModeIDTable[ModeIdIndex].REFindex;
ModeNo = pVBInfo->RefIndex[RefreshRateTableIndex].ModeID;
if (pXGIHWDE->jChipType >= XG20) { /* for XG20, XG21, XG27 */
- /*
- if (pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag &
- XG2xNotSupport) {
- index++;
- }
- */
if ((pVBInfo->RefIndex[RefreshRateTableIndex].XRes == 800) &&
(pVBInfo->RefIndex[RefreshRateTableIndex].YRes == 600)) {
index++;
@@ -7371,7 +7003,7 @@ unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
temp = XGI_AjustCRT2Rate(ModeNo, ModeIdIndex,
RefreshRateTableIndex, &i, pVBInfo);
}
- return RefreshRateTableIndex + i; /* return (0x01 | (temp1<<1)); */
+ return RefreshRateTableIndex + i;
}
static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
@@ -7379,9 +7011,6 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex;
- /* unsigned short temp ; */
-
- /* pVBInfo->SelectCRT2Rate = 0; */
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
@@ -7394,7 +7023,7 @@ static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
XGI_SetCRT2ECLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
-unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
+static unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
@@ -7499,10 +7128,6 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
outb((unsigned char) DAC_TEST_PARMS[2], (pVBInfo->P3c8 + 1));
}
- XGI_VBLongWait(pVBInfo);
- XGI_VBLongWait(pVBInfo);
- XGI_VBLongWait(pVBInfo);
-
mdelay(1);
XGI_WaitDisply(pVBInfo);
@@ -7532,7 +7157,8 @@ void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
}
-void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_EnableBridge(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah;
@@ -7544,7 +7170,6 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
} else {
- /* SetCRT2ToLCDA ) */
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port,
@@ -7572,10 +7197,8 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->Part1Port, 0x2E);
if (!(tempah & 0x80))
- /* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port,
0x2E, 0x80);
- /* BScreenOFF = 0 */
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
}
}
@@ -7638,12 +7261,11 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
if (!(pVBInfo->SetFlag & DisableChA)) {
- XGI_VBLongWait(pVBInfo);
if (!(pVBInfo->SetFlag & GatingCRT)) {
XGI_DisableGatingCRT(HwDeviceExtension,
pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- XGI_VBLongWait(pVBInfo);
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension,
+ pVBInfo);
}
}
} /* 301 */
@@ -7656,15 +7278,15 @@ void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
tempah = (unsigned char) xgifb_reg_get(pVBInfo->Part1Port,
0x2E);
if (!(tempah & 0x80))
- /* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
} /* End of VB */
}
-static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
+static void XGI_SetCRT1Group(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
@@ -7672,18 +7294,14 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short XGINew_P3cc = pVBInfo->P3cc;
- /* XGINew_CRT1Mode = ModeNo; // SaveModeID */
StandTableIndex = XGI_GetModePtr(ModeNo, ModeIdIndex, pVBInfo);
- /* XGI_SetBIOSData(ModeNo, ModeIdIndex); */
- /* XGI_ClearBankRegs(ModeNo, ModeIdIndex); */
XGI_SetSeqRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
- XGI_SetMiscRegs(StandTableIndex, pVBInfo);
+ outb(pVBInfo->StandTable[StandTableIndex].MISC, pVBInfo->P3c2);
XGI_SetCRTCRegs(HwDeviceExtension, StandTableIndex, pVBInfo);
XGI_SetATTRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
XGI_SetGRCRegs(StandTableIndex, pVBInfo);
XGI_ClearExt1Regs(pVBInfo);
- /* if (pVBInfo->IF_DEF_ExpLink) */
if (HwDeviceExtension->jChipType == XG27) {
if (pVBInfo->IF_DEF_LVDS == 0)
XGI_SetDefaultVCLK(pVBInfo);
@@ -7735,11 +7353,6 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
if (temp & 0xA0) {
- /* Enable write GPIOF */
- /* xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20); */
- /* P. DWN */
- /* xgifb_reg_and(pVBInfo->P3d4, 0x48, ~0x20); */
- /* XG21 CRT1 Timing */
if (HwDeviceExtension->jChipType == XG27)
XGI_SetXG27CRTC(ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
@@ -7754,10 +7367,9 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo, RefreshRateTableIndex, ModeNo);
if (pVBInfo->IF_DEF_LVDS == 1)
- xgifb_set_lvds(HwDeviceExtension->jChipType,
+ xgifb_set_lvds(xgifb_info,
+ HwDeviceExtension->jChipType,
ModeNo, ModeIdIndex, pVBInfo);
- /* P. ON */
- /* xgifb_reg_or(pVBInfo->P3d4, 0x48, 0x20); */
}
}
@@ -7765,22 +7377,16 @@ static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetCRT1FIFO(ModeNo, HwDeviceExtension, pVBInfo);
XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
-
- /* XGI_LoadCharacter(); //dif ifdef TVFont */
-
XGI_LoadDAC(ModeNo, ModeIdIndex, pVBInfo);
- /* XGI_ClearBuffer(HwDeviceExtension, ModeNo, pVBInfo); */
}
-unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo)
{
unsigned short ModeIdIndex;
- /* unsigned char *pVBInfo->FBAddr =
- HwDeviceExtension->pjVideoMemoryAddress; */
struct vb_device_info VBINF;
struct vb_device_info *pVBInfo = &VBINF;
- pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
pVBInfo->IF_DEF_LVDS = 0;
pVBInfo->IF_DEF_LCDA = 1;
@@ -7831,14 +7437,8 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_GetVBType(pVBInfo);
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
- if (ModeNo & 0x80) {
+ if (ModeNo & 0x80)
ModeNo = ModeNo & 0x7F;
- /* XGINew_flag_clearbuffer = 0; */
- }
- /* else {
- XGINew_flag_clearbuffer = 1;
- }
- */
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 1.Openkey */
@@ -7846,16 +7446,14 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
- XGI_GetVGAType(HwDeviceExtension, pVBInfo);
-
if (HwDeviceExtension->jChipType < XG20) { /* kuku 2004/06/25 */
XGI_GetVBInfo(ModeNo, ModeIdIndex, HwDeviceExtension, pVBInfo);
XGI_GetTVInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
- XGI_DisableBridge(HwDeviceExtension, pVBInfo);
+ XGI_DisableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
+ XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
@@ -7864,7 +7462,8 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
}
} else {
if (!(pVBInfo->VBInfo & SwitchToCRT2)) {
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
+ XGI_SetCRT1Group(xgifb_info,
+ HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
@@ -7894,11 +7493,11 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
XGI_SetCRT2ModeRegs(ModeNo, HwDeviceExtension, pVBInfo);
XGI_OEM310Setting(ModeNo, ModeIdIndex, pVBInfo); /*0212*/
XGI_CloseCRTC(HwDeviceExtension, pVBInfo);
- XGI_EnableBridge(HwDeviceExtension, pVBInfo);
+ XGI_EnableBridge(xgifb_info, HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
if (pVBInfo->IF_DEF_LVDS == 1)
- if (!XGI_XG21CheckLVDSMode(ModeNo,
+ if (!XGI_XG21CheckLVDSMode(xgifb_info, ModeNo,
ModeIdIndex,
pVBInfo))
return 0;
@@ -7914,39 +7513,13 @@ unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
pVBInfo->SetFlag = 0;
pVBInfo->VBInfo = DisableCRT2Display;
- XGI_DisplayOff(HwDeviceExtension, pVBInfo);
+ XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo);
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex,
- pVBInfo);
+ XGI_SetCRT1Group(xgifb_info, HwDeviceExtension, ModeNo,
+ ModeIdIndex, pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- /*
- if (HwDeviceExtension->jChipType == XG21)
- xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0x80, 0x80);
- */
- }
-
- /*
- if (ModeNo <= 0x13) {
- modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
- } else {
- modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
+ XGI_DisplayOn(xgifb_info, HwDeviceExtension, pVBInfo);
}
- pVBInfo->ModeType = modeflag&ModeInfoFlag;
- pVBInfo->SetFlag = 0x00;
- pVBInfo->VBInfo = DisableCRT2Display;
- temp = XGINew_CheckMemorySize(HwDeviceExtension,
- ModeNo,
- ModeIdIndex,
- pVBInfo);
-
- if (temp == 0)
- return (0);
-
- XGI_DisplayOff(HwDeviceExtension, pVBInfo) ;
- XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex, pVBInfo);
- XGI_DisplayOn(HwDeviceExtension, pVBInfo);
- */
XGI_UpdateModeInfo(HwDeviceExtension, pVBInfo);
diff --git a/drivers/staging/xgifb/vb_setmode.h b/drivers/staging/xgifb/vb_setmode.h
index 1bd8667..5524828 100644
--- a/drivers/staging/xgifb/vb_setmode.h
+++ b/drivers/staging/xgifb/vb_setmode.h
@@ -6,66 +6,22 @@ extern void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *);
extern void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *);
-extern void XGI_LongWait(struct vb_device_info *);
-extern void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
- struct xgi_hw_device_info *,
- struct vb_device_info *);
-extern void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_DisplayOff(struct xgi_hw_device_info *,
+extern void XGI_DisplayOff(struct xgifb_video_info *,
+ struct xgi_hw_device_info *,
struct vb_device_info *);
-extern void XGI_DisplayOn(struct xgi_hw_device_info *,
- struct vb_device_info *);
extern void XGI_GetVBType(struct vb_device_info *);
extern void XGI_SenseCRT1(struct vb_device_info *);
-extern void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_GetVBInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
-extern void XGI_GetTVInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *);
-extern unsigned short XGI_GetResInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo);
-
-extern unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
+extern unsigned char XGISetModeNew(struct xgifb_video_info *xgifb_info,
+ struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo) ;
extern unsigned char XGI_SearchModeID(unsigned short ModeNo,
unsigned short *ModeIdIndex,
struct vb_device_info *);
-extern unsigned char XGI_GetLCDInfo(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *);
extern unsigned char XGI_BridgeIsOn(struct vb_device_info *);
-
-extern unsigned char
-XGI_SetCRT2Group301(unsigned short ModeNo,
- struct xgi_hw_device_info *HwDeviceExtension,
- struct vb_device_info *);
extern unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *);
-extern void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo);
-extern void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo);
-extern void XGI_XG21BLSignalVDD(unsigned short tempbh,
- unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern void XGI_XG27BLSignalVDD(unsigned short tempbh,
- unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern void XGI_XG21SetPanelDelay(unsigned short tempbl,
- struct vb_device_info *pVBInfo);
-extern unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
- unsigned short ModeIdIndex,
- struct vb_device_info *pVBInfo);
-extern unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo);
-
#endif
diff --git a/drivers/staging/xgifb/vb_struct.h b/drivers/staging/xgifb/vb_struct.h
index f9ade6f..6556a0d 100644
--- a/drivers/staging/xgifb/vb_struct.h
+++ b/drivers/staging/xgifb/vb_struct.h
@@ -293,13 +293,12 @@ struct vb_device_info {
unsigned short IF_DEF_ExpLink;
unsigned short IF_DEF_HiVision;
unsigned short LCDResInfo, LCDTypeInfo, VBType;/*301b*/
- unsigned short VBInfo, TVInfo, LCDInfo, Set_VGAType;
+ unsigned short VBInfo, TVInfo, LCDInfo;
unsigned short VBExtInfo;/*301lv*/
unsigned short SetFlag;
unsigned short NewFlickerMode;
unsigned short SelectCRT2Rate;
- unsigned char *ROMAddr;
void __iomem *FBAddr;
unsigned long BaseAddr;
unsigned long RelIO;
@@ -376,7 +375,6 @@ struct vb_device_info {
unsigned char *pXGINew_CR97 ;
struct XGI330_LCDCapStruct *LCDCapList;
- struct XGI21_LVDSCapStruct *XG21_LVDSCapList;
struct XGI_TimingHStruct *TimingH;
struct XGI_TimingVStruct *TimingV;
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index b81ac77..e7946f1 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -2569,33 +2569,6 @@ static struct XGI330_LCDCapStruct XGI_LCDCapList[] = {
0x30, 0x10, 0x5A, 0x10, 0x10, 0x0A, 0xC0, 0x28, 0x10}
};
-struct XGI21_LVDSCapStruct XGI21_LCDCapList[] = {
- {DisableLCD24bpp + LCDPolarity,
- 2160, 1250, 1600, 1200, 64, 1, 192, 3,
- 0x70, 0x24, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity,
- 1688, 1066, 1280, 1024, 48, 1, 112, 3,
- 0x70, 0x44, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity + (LCDPolarity << 8),
- 1344, 806, 1024, 768, 24, 3, 136, 6,
- 0x6C, 0x65, 0x20, 0x04, 0x0A, 0x02, 0xC8
- },
- {DisableLCD24bpp + LCDPolarity,
- 1056, 628, 800, 600, 40, 1, 128, 4,
- 0x42, 0xE2, 0x20, 0x14, 0x0A, 0x02, 0x00
- },
- {DisableLCD24bpp + LCDPolarity,
- 928, 525, 800, 480, 40, 13, 48, 3,
- 0x52, 0xC5, 0x20, 0x14, 0x0A, 0x02, 0x00
- },
- {DisableLCD24bpp + LCDPolarity + (LCDPolarity << 8),
- 800, 525, 640, 480, 16, 10, 96, 2,
- 0x1B, 0xE1, 0x20, 0x04, 0x0A, 0x02, 0xC8
- }
-};
-
static struct XGI_Ext2Struct XGI330_RefIndex[] = {
{Support32Bpp + SupportAllCRT2 + SyncPN, RES320x200, VCLK25_175,
0x00, 0x10, 0x59, 320, 200},/* 00 */
diff --git a/drivers/staging/xgifb/vgatypes.h b/drivers/staging/xgifb/vgatypes.h
index 9b939b7..9e166bb 100644
--- a/drivers/staging/xgifb/vgatypes.h
+++ b/drivers/staging/xgifb/vgatypes.h
@@ -51,8 +51,6 @@ struct xgi_hw_device_info {
unsigned long ulExternalChip; /* NO VB or other video bridge*/
/* if ujVBChipID = VB_CHIP_UNKNOWN, */
- unsigned char *pjVirtualRomBase; /* ROM image */
-
void __iomem *pjVideoMemoryAddress;/* base virtual memory address */
/* of Linear VGA memory */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 56c1f9c..ef7c52b 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -358,8 +358,8 @@ static struct zbud_hdr *zbud_create(uint16_t client_id, uint16_t pool_id,
if (unlikely(zbpg == NULL))
goto out;
/* ok, have a page, now compress the data before taking locks */
- spin_lock(&zbpg->lock);
spin_lock(&zbud_budlists_spinlock);
+ spin_lock(&zbpg->lock);
list_add_tail(&zbpg->bud_list, &zbud_unbuddied[nchunks].list);
zbud_unbuddied[nchunks].count++;
zh = &zbpg->buddy[0];
@@ -389,12 +389,11 @@ init_zh:
zh->oid = *oid;
zh->pool_id = pool_id;
zh->client_id = client_id;
- /* can wait to copy the data until the list locks are dropped */
- spin_unlock(&zbud_budlists_spinlock);
-
to = zbud_data(zh, size);
memcpy(to, cdata, size);
spin_unlock(&zbpg->lock);
+ spin_unlock(&zbud_budlists_spinlock);
+
zbud_cumul_chunk_counts[nchunks]++;
atomic_inc(&zcache_zbud_curr_zpages);
zcache_zbud_cumul_zpages++;
@@ -655,8 +654,8 @@ static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
*/
static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
-static unsigned long zv_curr_dist_counts[NCHUNKS];
-static unsigned long zv_cumul_dist_counts[NCHUNKS];
+static atomic_t zv_curr_dist_counts[NCHUNKS];
+static atomic_t zv_cumul_dist_counts[NCHUNKS];
static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
struct tmem_oid *oid, uint32_t index,
@@ -675,8 +674,8 @@ static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
&page, &offset, ZCACHE_GFP_MASK);
if (unlikely(ret))
goto out;
- zv_curr_dist_counts[chunks]++;
- zv_cumul_dist_counts[chunks]++;
+ atomic_inc(&zv_curr_dist_counts[chunks]);
+ atomic_inc(&zv_cumul_dist_counts[chunks]);
zv = kmap_atomic(page, KM_USER0) + offset;
zv->index = index;
zv->oid = *oid;
@@ -698,7 +697,7 @@ static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
ASSERT_SENTINEL(zv, ZVH);
BUG_ON(chunks >= NCHUNKS);
- zv_curr_dist_counts[chunks]--;
+ atomic_dec(&zv_curr_dist_counts[chunks]);
size -= sizeof(*zv);
BUG_ON(size == 0);
INVERT_SENTINEL(zv, ZVH);
@@ -738,7 +737,7 @@ static int zv_curr_dist_counts_show(char *buf)
char *p = buf;
for (i = 0; i < NCHUNKS; i++) {
- n = zv_curr_dist_counts[i];
+ n = atomic_read(&zv_curr_dist_counts[i]);
p += sprintf(p, "%lu ", n);
chunks += n;
sum_total_chunks += i * n;
@@ -754,7 +753,7 @@ static int zv_cumul_dist_counts_show(char *buf)
char *p = buf;
for (i = 0; i < NCHUNKS; i++) {
- n = zv_cumul_dist_counts[i];
+ n = atomic_read(&zv_cumul_dist_counts[i]);
p += sprintf(p, "%lu ", n);
chunks += n;
sum_total_chunks += i * n;
@@ -787,7 +786,7 @@ static ssize_t zv_max_zsize_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
return -EINVAL;
zv_max_zsize = val;
@@ -819,7 +818,7 @@ static ssize_t zv_max_mean_zsize_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > (PAGE_SIZE / 8) * 7))
return -EINVAL;
zv_max_mean_zsize = val;
@@ -853,7 +852,7 @@ static ssize_t zv_page_count_policy_percent_store(struct kobject *kobj,
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- err = strict_strtoul(buf, 10, &val);
+ err = kstrtoul(buf, 10, &val);
if (err || (val == 0) || (val > 150))
return -EINVAL;
zv_page_count_policy_percent = val;
@@ -1782,9 +1781,9 @@ static int zcache_frontswap_poolid = -1;
* Swizzling increases objects per swaptype, increasing tmem concurrency
* for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
* Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
- * frontswap_get_page()
+ * frontswap_get_page(), but has side-effects. Hence using 8.
*/
-#define SWIZ_BITS 27
+#define SWIZ_BITS 8
#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
#define _oswiz(_type, _ind) ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
#define iswiz(_ind) (_ind >> SWIZ_BITS)
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 09de99f..2a2a92d 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -653,7 +653,8 @@ int zram_init_device(struct zram *zram)
goto fail_no_table;
}
- zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
+ zram->compress_buffer =
+ (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
if (!zram->compress_buffer) {
pr_err("Error allocating compressor buffer space\n");
ret = -ENOMEM;
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index 0ea8ed2..d521122 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -58,7 +58,7 @@ static ssize_t disksize_store(struct device *dev,
u64 disksize;
struct zram *zram = dev_to_zram(dev);
- ret = strict_strtoull(buf, 10, &disksize);
+ ret = kstrtoull(buf, 10, &disksize);
if (ret)
return ret;
@@ -88,7 +88,7 @@ static ssize_t reset_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t len)
{
int ret;
- unsigned long do_reset;
+ unsigned short do_reset;
struct zram *zram;
struct block_device *bdev;
@@ -99,7 +99,7 @@ static ssize_t reset_store(struct device *dev,
if (bdev->bd_holders)
return -EBUSY;
- ret = strict_strtoul(buf, 10, &do_reset);
+ ret = kstrtou16(buf, 10, &do_reset);
if (ret)
return ret;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 8599545..501b27c 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -27,8 +27,7 @@
#include <scsi/scsi_device.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np(
sock_in6 = (struct sockaddr_in6 *)sockaddr;
sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
- if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
- (void *)&sock_in6_e->sin6_addr.in6_u,
+ if (!memcmp(&sock_in6->sin6_addr.in6_u,
+ &sock_in6_e->sin6_addr.in6_u,
sizeof(struct in6_addr)))
ip_match = 1;
@@ -1029,7 +1028,7 @@ done:
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
1, 1, buf, cmd);
- } else if (transport_ret == -EINVAL) {
+ } else if (transport_ret < 0) {
/*
* Unsupported SAM Opcode. CHECK_CONDITION will be sent
* in iscsit_execute_cmd() during the CmdSN OOO Execution
@@ -1062,7 +1061,7 @@ attach_cmd:
if (ret < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
+ 1, 0, buf, cmd);
/*
* Check the CmdSN against ExpCmdSN/MaxCmdSN here if
* the Immediate Bit is not set, and no Immediate
@@ -1225,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf(
crypto_hash_init(hash);
- sg_init_one(&sg, (u8 *)buf, payload_length);
+ sg_init_one(&sg, buf, payload_length);
crypto_hash_update(hash, &sg, payload_length);
if (padding) {
@@ -1603,7 +1602,7 @@ static int iscsit_handle_nop_out(
/*
* Attach ping data to struct iscsi_cmd->buf_ptr.
*/
- cmd->buf_ptr = (void *)ping_data;
+ cmd->buf_ptr = ping_data;
cmd->buf_ptr_size = payload_length;
pr_debug("Got %u bytes of NOPOUT ping"
@@ -3165,6 +3164,30 @@ static int iscsit_send_task_mgt_rsp(
return 0;
}
+static bool iscsit_check_inaddr_any(struct iscsi_np *np)
+{
+ bool ret = false;
+
+ if (np->np_sockaddr.ss_family == AF_INET6) {
+ const struct sockaddr_in6 sin6 = {
+ .sin6_addr = IN6ADDR_ANY_INIT };
+ struct sockaddr_in6 *sock_in6 =
+ (struct sockaddr_in6 *)&np->np_sockaddr;
+
+ if (!memcmp(sock_in6->sin6_addr.s6_addr,
+ sin6.sin6_addr.s6_addr, 16))
+ ret = true;
+ } else {
+ struct sockaddr_in * sock_in =
+ (struct sockaddr_in *)&np->np_sockaddr;
+
+ if (sock_in->sin_addr.s_addr == INADDR_ANY)
+ ret = true;
+ }
+
+ return ret;
+}
+
static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
{
char *payload = NULL;
@@ -3197,7 +3220,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
- memcpy((void *)payload + payload_len, buf, len);
+ memcpy(payload + payload_len, buf, len);
payload_len += len;
spin_lock(&tiqn->tiqn_tpg_lock);
@@ -3214,12 +3237,17 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
spin_lock(&tpg->tpg_np_lock);
list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
tpg_np_list) {
+ struct iscsi_np *np = tpg_np->tpg_np;
+ bool inaddr_any = iscsit_check_inaddr_any(np);
+
len = sprintf(buf, "TargetAddress="
"%s%s%s:%hu,%hu",
- (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
- "[" : "", tpg_np->tpg_np->np_ip,
- (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
- "]" : "", tpg_np->tpg_np->np_port,
+ (np->np_sockaddr.ss_family == AF_INET6) ?
+ "[" : "", (inaddr_any == false) ?
+ np->np_ip : conn->local_ip,
+ (np->np_sockaddr.ss_family == AF_INET6) ?
+ "]" : "", (inaddr_any == false) ?
+ np->np_port : conn->local_port,
tpg->tpgt);
len += 1;
@@ -3229,7 +3257,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
end_of_buf = 1;
goto eob;
}
- memcpy((void *)payload + payload_len, buf, len);
+ memcpy(payload + payload_len, buf, len);
payload_len += len;
}
spin_unlock(&tpg->tpg_np_lock);
@@ -3486,7 +3514,7 @@ int iscsi_target_tx_thread(void *arg)
struct iscsi_conn *conn;
struct iscsi_queue_req *qr = NULL;
struct se_cmd *se_cmd;
- struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct iscsi_thread_set *ts = arg;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
@@ -3775,7 +3803,7 @@ int iscsi_target_rx_thread(void *arg)
u8 buffer[ISCSI_HDR_LEN], opcode;
u32 checksum = 0, digest = 0;
struct iscsi_conn *conn = NULL;
- struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+ struct iscsi_thread_set *ts = arg;
struct kvec iov;
/*
* Allow ourselves to be interrupted by SIGINT so that a
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 1cd6ce3..db0cf7c 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -82,7 +82,7 @@ static void chap_gen_challenge(
unsigned int *c_len)
{
unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
@@ -120,7 +120,7 @@ static struct iscsi_chap *chap_server_open(
if (!conn->auth_protocol)
return NULL;
- chap = (struct iscsi_chap *) conn->auth_protocol;
+ chap = conn->auth_protocol;
/*
* We only support MD5 MDA presently.
*/
@@ -165,14 +165,15 @@ static int chap_server_compute_md5(
unsigned int *nr_out_len)
{
char *endptr;
- unsigned char id, digest[MD5_SIGNATURE_SIZE];
+ unsigned long id;
+ unsigned char digest[MD5_SIGNATURE_SIZE];
unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
unsigned char identifier[10], *challenge = NULL;
unsigned char *challenge_binhex = NULL;
unsigned char client_digest[MD5_SIGNATURE_SIZE];
unsigned char server_digest[MD5_SIGNATURE_SIZE];
unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
struct crypto_hash *tfm;
struct hash_desc desc;
struct scatterlist sg;
@@ -246,7 +247,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&chap->id, 1);
+ sg_init_one(&sg, &chap->id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@@ -254,7 +255,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+ sg_init_one(&sg, &auth->password, strlen(auth->password));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
if (ret < 0) {
pr_err("crypto_hash_update() failed for password\n");
@@ -262,7 +263,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+ sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
if (ret < 0) {
pr_err("crypto_hash_update() failed for challenge\n");
@@ -305,14 +306,17 @@ static int chap_server_compute_md5(
}
if (type == HEX)
- id = (unsigned char)simple_strtoul((char *)&identifier[2],
- &endptr, 0);
+ id = simple_strtoul(&identifier[2], &endptr, 0);
else
- id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+ id = simple_strtoul(identifier, &endptr, 0);
+ if (id > 255) {
+ pr_err("chap identifier: %lu greater than 255\n", id);
+ goto out;
+ }
/*
* RFC 1994 says Identifier is no more than octet (8 bits).
*/
- pr_debug("[server] Got CHAP_I=%d\n", id);
+ pr_debug("[server] Got CHAP_I=%lu\n", id);
/*
* Get CHAP_C.
*/
@@ -351,7 +355,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)&id, 1);
+ sg_init_one(&sg, &id, 1);
ret = crypto_hash_update(&desc, &sg, 1);
if (ret < 0) {
pr_err("crypto_hash_update() failed for id\n");
@@ -359,7 +363,7 @@ static int chap_server_compute_md5(
goto out;
}
- sg_init_one(&sg, (void *)auth->password_mutual,
+ sg_init_one(&sg, auth->password_mutual,
strlen(auth->password_mutual));
ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
if (ret < 0) {
@@ -371,7 +375,7 @@ static int chap_server_compute_md5(
/*
* Convert received challenge to binary hex.
*/
- sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+ sg_init_one(&sg, challenge_binhex, challenge_len);
ret = crypto_hash_update(&desc, &sg, challenge_len);
if (ret < 0) {
pr_err("crypto_hash_update() failed for ma challenge\n");
@@ -414,7 +418,7 @@ static int chap_got_response(
char *nr_out_ptr,
unsigned int *nr_out_len)
{
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
switch (chap->digest_type) {
case CHAP_DIGEST_MD5:
@@ -437,7 +441,7 @@ u32 chap_main_loop(
int *in_len,
int *out_len)
{
- struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+ struct iscsi_chap *chap = conn->auth_protocol;
if (!chap) {
chap = chap_server_open(conn, auth, in_text, out_text, out_len);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index db32784..6b35b37 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -21,13 +21,10 @@
#include <linux/configfs.h>
#include <linux/export.h>
+#include <linux/inet.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
@@ -56,8 +53,7 @@ struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
{
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
- struct iscsi_portal_group *tpg =
- (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
+ struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
int ret;
if (!tpg) {
@@ -1225,7 +1221,7 @@ struct se_portal_group *lio_target_tiqn_addtpg(
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
- wwn, &tpg->tpg_se_tpg, (void *)tpg,
+ wwn, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_NORMAL);
if (ret < 0)
return NULL;
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index f1a02da..0ec3b77 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -508,6 +508,7 @@ struct iscsi_conn {
u16 cid;
/* Remote TCP Port */
u16 login_port;
+ u16 local_port;
int net_size;
u32 auth_id;
#define CONNFLAG_SCTP_STRUCT_FILE 0x01
@@ -527,6 +528,7 @@ struct iscsi_conn {
unsigned char bad_hdr[ISCSI_HDR_LEN];
#define IPV6_ADDRESS_SPACE 48
unsigned char login_ip[IPV6_ADDRESS_SPACE];
+ unsigned char local_ip[IPV6_ADDRESS_SPACE];
int conn_usage_count;
int conn_waiting_on_uc;
atomic_t check_immediate_queue;
@@ -561,8 +563,8 @@ struct iscsi_conn {
struct hash_desc conn_tx_hash;
/* Used for scheduling TX and RX connection kthreads */
cpumask_var_t conn_cpumask;
- int conn_rx_reset_cpumask:1;
- int conn_tx_reset_cpumask:1;
+ unsigned int conn_rx_reset_cpumask:1;
+ unsigned int conn_tx_reset_cpumask:1;
/* list_head of struct iscsi_cmd for this connection */
struct list_head conn_cmd_list;
struct list_head immed_queue_list;
diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c
index a19fa5e..f63ea35 100644
--- a/drivers/target/iscsi/iscsi_target_device.c
+++ b/drivers/target/iscsi/iscsi_target_device.c
@@ -21,8 +21,7 @@
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b7ffc3c..4784511 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -21,7 +21,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 101b1be..27901e3 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -21,7 +21,7 @@
#include <linux/list.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
@@ -1238,7 +1238,7 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
{
struct iscsi_conn *conn = cmd->conn;
struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
spin_lock_bh(&cmd->dataout_timeout_lock);
if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
@@ -1261,7 +1261,7 @@ void iscsit_start_dataout_timer(
struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
- struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+ struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
return;
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 0b8404c..1af1f21 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -21,7 +21,7 @@
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_datain_values.h"
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index d734bde..38cb7ce 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -23,7 +23,7 @@
#include <linux/crypto.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_tq.h"
@@ -143,7 +143,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
sess_list) {
- sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess_p = se_sess->fabric_sess_ptr;
spin_lock(&sess_p->conn_lock);
if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
atomic_read(&sess_p->session_logout) ||
@@ -151,9 +151,9 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
spin_unlock(&sess_p->conn_lock);
continue;
}
- if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
- (!strcmp((void *)sess_p->sess_ops->InitiatorName,
- (void *)initiatorname_param->value) &&
+ if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
+ (!strcmp(sess_p->sess_ops->InitiatorName,
+ initiatorname_param->value) &&
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
spin_unlock(&sess_p->conn_lock);
@@ -229,7 +229,7 @@ static int iscsi_login_zero_tsih_s1(
iscsi_login_set_conn_values(sess, conn, pdu->cid);
sess->init_task_tag = pdu->itt;
- memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
+ memcpy(&sess->isid, pdu->isid, 6);
sess->exp_cmd_sn = pdu->cmdsn;
INIT_LIST_HEAD(&sess->sess_conn_list);
INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
@@ -440,8 +440,7 @@ static int iscsi_login_non_zero_tsih_s2(
atomic_read(&sess_p->session_logout) ||
(sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
continue;
- if (!memcmp((const void *)sess_p->isid,
- (const void *)pdu->isid, 6) &&
+ if (!memcmp(sess_p->isid, pdu->isid, 6) &&
(sess_p->tsih == pdu->tsih)) {
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -616,8 +615,8 @@ static int iscsi_post_login_handler(
}
pr_debug("iSCSI Login successful on CID: %hu from %s to"
- " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
- np->np_port, tpg->tpgt);
+ " %s:%hu,%hu\n", conn->cid, conn->login_ip,
+ conn->local_ip, conn->local_port, tpg->tpgt);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
atomic_inc(&sess->nconn);
@@ -654,12 +653,13 @@ static int iscsi_post_login_handler(
spin_lock_bh(&se_tpg->session_lock);
__transport_register_session(&sess->tpg->tpg_se_tpg,
- se_sess->se_node_acl, se_sess, (void *)sess);
+ se_sess->se_node_acl, se_sess, sess);
pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
sess->session_state = TARG_SESS_STATE_LOGGED_IN;
pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
- conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
+ conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
+ tpg->tpgt);
spin_lock_bh(&sess->conn_lock);
list_add_tail(&conn->conn_list, &sess->sess_conn_list);
@@ -811,7 +811,7 @@ int iscsi_target_setup_login_socket(
* Setup the np->np_sockaddr from the passed sockaddr setup
* in iscsi_target_configfs.c code..
*/
- memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
+ memcpy(&np->np_sockaddr, sockaddr,
sizeof(struct __kernel_sockaddr_storage));
if (sockaddr->ss_family == AF_INET6)
@@ -821,6 +821,7 @@ int iscsi_target_setup_login_socket(
/*
* Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
*/
+ /* FIXME: Someone please explain why this is endian-safe */
opt = 1;
if (np->np_network_transport == ISCSI_TCP) {
ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
@@ -832,6 +833,7 @@ int iscsi_target_setup_login_socket(
}
}
+ /* FIXME: Someone please explain why this is endian-safe */
ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
(char *)&opt, sizeof(opt));
if (ret < 0) {
@@ -840,6 +842,14 @@ int iscsi_target_setup_login_socket(
goto fail;
}
+ ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND,
+ (char *)&opt, sizeof(opt));
+ if (ret < 0) {
+ pr_err("kernel_setsockopt() for IP_FREEBIND"
+ " failed\n");
+ goto fail;
+ }
+
ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
if (ret < 0) {
pr_err("kernel_bind() failed: %d\n", ret);
@@ -1019,6 +1029,18 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
&sock_in6.sin6_addr.in6_u);
conn->login_port = ntohs(sock_in6.sin6_port);
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in6, &err, 0) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+ snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
+ &sock_in6.sin6_addr.in6_u);
+ conn->local_port = ntohs(sock_in6.sin6_port);
+
} else {
memset(&sock_in, 0, sizeof(struct sockaddr_in));
@@ -1031,6 +1053,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
}
sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
conn->login_port = ntohs(sock_in.sin_port);
+
+ if (conn->sock->ops->getname(conn->sock,
+ (struct sockaddr *)&sock_in, &err, 0) < 0) {
+ pr_err("sock_ops->getname() failed.\n");
+ iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+ ISCSI_LOGIN_STATUS_TARGET_ERROR);
+ goto new_sess_out;
+ }
+ sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
+ conn->local_port = ntohs(sock_in.sin_port);
}
conn->network_transport = np->np_network_transport;
@@ -1038,7 +1070,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
pr_debug("Received iSCSI login request from %s on %s Network"
" Portal %s:%hu\n", conn->login_ip,
(conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
- np->np_ip, np->np_port);
+ conn->local_ip, conn->local_port);
pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
conn->conn_state = TARG_CONN_STATE_IN_LOGIN;
@@ -1206,7 +1238,7 @@ out:
int iscsi_target_login_thread(void *arg)
{
- struct iscsi_np *np = (struct iscsi_np *)arg;
+ struct iscsi_np *np = arg;
int ret;
allow_signal(SIGINT);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 98936cb..e89fa74 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -21,7 +21,7 @@
#include <linux/ctype.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_parameters.h"
@@ -732,7 +732,7 @@ static void iscsi_initiatorname_tolower(
u32 iqn_size = strlen(param_buf), i;
for (i = 0; i < iqn_size; i++) {
- c = (char *)&param_buf[i];
+ c = &param_buf[i];
if (!isupper(*c))
continue;
diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c
index aeafbe0..b3c699c 100644
--- a/drivers/target/iscsi/iscsi_target_nodeattrib.c
+++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c
@@ -19,7 +19,6 @@
******************************************************************************/
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
#include "iscsi_target_core.h"
#include "iscsi_target_device.h"
@@ -135,7 +134,7 @@ extern int iscsit_na_nopin_timeout(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
spin_lock(&sess->conn_lock);
list_for_each_entry(conn, &sess->sess_conn_list,
diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c
index f1db830..421d694 100644
--- a/drivers/target/iscsi/iscsi_target_stat.c
+++ b/drivers/target/iscsi/iscsi_target_stat.c
@@ -23,7 +23,6 @@
#include <linux/export.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
#include <target/configfs_macros.h>
#include "iscsi_target_core.h"
@@ -746,7 +745,7 @@ static ssize_t iscsi_stat_sess_show_attr_node(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
@@ -770,7 +769,7 @@ static ssize_t iscsi_stat_sess_show_attr_indx(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->session_index);
@@ -794,7 +793,7 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
}
@@ -817,7 +816,7 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
}
@@ -840,7 +839,7 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%llu\n",
(unsigned long long)sess->tx_data_octets);
@@ -864,7 +863,7 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%llu\n",
(unsigned long long)sess->rx_data_octets);
@@ -888,7 +887,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->conn_digest_errors);
@@ -912,7 +911,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
spin_lock_bh(&se_nacl->nacl_sess_lock);
se_sess = se_nacl->nacl_sess;
if (se_sess) {
- sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+ sess = se_sess->fabric_sess_ptr;
if (sess)
ret = snprintf(page, PAGE_SIZE, "%u\n",
sess->conn_timeout_errors);
diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c
index 490207e..255ed35 100644
--- a/drivers/target/iscsi/iscsi_target_tmr.c
+++ b/drivers/target/iscsi/iscsi_target_tmr.c
@@ -21,7 +21,7 @@
#include <asm/unaligned.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index d4cf2cd..879d8d0 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -19,10 +19,8 @@
******************************************************************************/
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_tpg.h>
#include "iscsi_target_core.h"
#include "iscsi_target_erl0.h"
@@ -72,7 +70,7 @@ int iscsit_load_discovery_tpg(void)
ret = core_tpg_register(
&lio_target_fabric_configfs->tf_ops,
- NULL, &tpg->tpg_se_tpg, (void *)tpg,
+ NULL, &tpg->tpg_se_tpg, tpg,
TRANSPORT_TPG_TYPE_DISCOVERY);
if (ret < 0) {
kfree(tpg);
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 02348f7..11287e1 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -22,9 +22,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include "iscsi_target_core.h"
@@ -289,7 +287,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
}
se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
- (void *)cmd->tmr_req, tcm_function,
+ cmd->tmr_req, tcm_function,
GFP_KERNEL);
if (!se_cmd->se_tmr_req)
goto out;
@@ -851,6 +849,17 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
case ISCSI_OP_SCSI_TMFUNC:
transport_generic_free_cmd(&cmd->se_cmd, 1);
break;
+ case ISCSI_OP_REJECT:
+ /*
+ * Handle special case for REJECT when iscsi_add_reject*() has
+ * overwritten the original iscsi_opcode assignment, and the
+ * associated cmd->se_cmd needs to be released.
+ */
+ if (cmd->se_cmd.se_tfo != NULL) {
+ transport_generic_free_cmd(&cmd->se_cmd, 1);
+ break;
+ }
+ /* Fall-through */
default:
iscsit_release_cmd(cmd);
break;
@@ -1066,7 +1075,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
if (tiqn) {
spin_lock_bh(&tiqn->sess_err_stats.lock);
strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
- (void *)conn->sess->sess_ops->InitiatorName);
+ conn->sess->sess_ops->InitiatorName);
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 81d5832..c47ff7f 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -33,14 +33,9 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_tmr.h>
#include "tcm_loop.h"
@@ -421,11 +416,11 @@ static struct scsi_host_template tcm_loop_driver_template = {
.queuecommand = tcm_loop_queuecommand,
.change_queue_depth = tcm_loop_change_queue_depth,
.eh_device_reset_handler = tcm_loop_device_reset,
- .can_queue = TL_SCSI_CAN_QUEUE,
+ .can_queue = 1024,
.this_id = -1,
- .sg_tablesize = TL_SCSI_SG_TABLESIZE,
- .cmd_per_lun = TL_SCSI_CMD_PER_LUN,
- .max_sectors = TL_SCSI_MAX_SECTORS,
+ .sg_tablesize = 256,
+ .cmd_per_lun = 1024,
+ .max_sectors = 0xFFFF,
.use_clustering = DISABLE_CLUSTERING,
.slave_alloc = tcm_loop_slave_alloc,
.slave_configure = tcm_loop_slave_configure,
@@ -564,8 +559,7 @@ static char *tcm_loop_get_fabric_name(void)
static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
/*
* tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
@@ -592,8 +586,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* Return the passed NAA identifier for the SAS Target Port
*/
@@ -602,8 +595,7 @@ static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
/*
* This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
* to represent the SCSI Target Port.
@@ -623,8 +615,7 @@ static u32 tcm_loop_get_pr_transport_id(
int *format_code,
unsigned char *buf)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
@@ -653,8 +644,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
struct t10_pr_registration *pr_reg,
int *format_code)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
@@ -687,8 +677,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
u32 *out_tid_len,
char **port_nexus_ptr)
{
- struct tcm_loop_tpg *tl_tpg =
- (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+ struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
switch (tl_hba->tl_proto_id) {
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 6b76c7a..15a0364 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -1,16 +1,7 @@
#define TCM_LOOP_VERSION "v2.1-rc1"
#define TL_WWN_ADDR_LEN 256
#define TL_TPGS_PER_HBA 32
-/*
- * Defaults for struct scsi_host_template tcm_loop_driver_template
- *
- * We use large can_queue and cmd_per_lun here and let TCM enforce
- * the underlying se_device_t->queue_depth.
- */
-#define TL_SCSI_CAN_QUEUE 1024
-#define TL_SCSI_CMD_PER_LUN 1024
-#define TL_SCSI_MAX_SECTORS 1024
-#define TL_SCSI_SG_TABLESIZE 256
+
/*
* Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
*/
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 1dcbef4..01a2691 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -32,13 +32,12 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_ua.h"
static int core_alua_check_transition(int state, int *primary);
@@ -79,7 +78,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
@@ -164,7 +163,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
buf[2] = ((rd_len >> 8) & 0xff);
buf[3] = (rd_len & 0xff);
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
@@ -195,7 +194,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@@ -352,7 +351,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
}
out:
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 831468b..f3d71fa 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -29,10 +29,11 @@
#include <scsi/scsi.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
#include "target_core_ua.h"
-#include "target_core_cdb.h"
static void
target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -82,7 +83,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
buf[0] = 0x3f; /* Not connected */
@@ -94,6 +95,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
buf[2] = dev->transport->get_device_rev(dev);
/*
+ * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+ *
+ * SPC4 says:
+ * A RESPONSE DATA FORMAT field set to 2h indicates that the
+ * standard INQUIRY data is in the format defined in this
+ * standard. Response data format values less than 2h are
+ * obsolete. Response data format values greater than 2h are
+ * reserved.
+ */
+ buf[3] = 2;
+
+ /*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
if (dev->se_sub_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED)
@@ -115,15 +128,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
goto out;
}
- snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
- snprintf((unsigned char *)&buf[16], 16, "%s",
- &dev->se_sub_dev->t10_wwn.model[0]);
- snprintf((unsigned char *)&buf[32], 4, "%s",
- &dev->se_sub_dev->t10_wwn.revision[0]);
+ snprintf(&buf[8], 8, "LIO-ORG");
+ snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
+ snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
buf[4] = 31; /* Set additional length to 31 */
out:
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
return 0;
}
@@ -138,8 +149,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
- unit_serial_len =
- strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -148,8 +158,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
buf[3] = (len & 0xff);
return 0;
}
- len += sprintf((unsigned char *)&buf[4], "%s",
- &dev->se_sub_dev->t10_wwn.unit_serial[0]);
+ len += sprintf(&buf[4], "%s",
+ dev->se_sub_dev->t10_wwn.unit_serial);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
@@ -279,14 +289,13 @@ check_t10_vend_desc:
len += (prod_len + unit_serial_len);
goto check_port;
}
- id_len += sprintf((unsigned char *)&buf[off+12],
- "%s:%s", prod,
+ id_len += sprintf(&buf[off+12], "%s:%s", prod,
&dev->se_sub_dev->t10_wwn.unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
- memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+ memcpy(&buf[off+4], "LIO-ORG", 8);
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
@@ -689,6 +698,13 @@ int target_emulate_inquiry(struct se_task *task)
int p, ret;
if (!(cdb[1] & 0x1)) {
+ if (cdb[2]) {
+ pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
+ cdb[2]);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
+
ret = target_emulate_inquiry_std(cmd);
goto out;
}
@@ -707,7 +723,7 @@ int target_emulate_inquiry(struct se_task *task)
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
buf[0] = dev->transport->get_device_type(dev);
@@ -720,11 +736,11 @@ int target_emulate_inquiry(struct se_task *task)
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
ret = -EINVAL;
out_unmap:
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
out:
if (!ret) {
task->task_scsi_status = GOOD;
@@ -746,7 +762,7 @@ int target_emulate_readcapacity(struct se_task *task)
else
blocks = (u32)blocks_long;
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
@@ -762,7 +778,7 @@ int target_emulate_readcapacity(struct se_task *task)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
@@ -776,7 +792,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
unsigned char *buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
@@ -797,7 +813,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
buf[14] = 0x80;
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
@@ -1010,9 +1026,9 @@ int target_emulate_modesense(struct se_task *task)
offset = cmd->data_length;
}
- rbuf = transport_kmap_first_data_page(cmd);
+ rbuf = transport_kmap_data_sg(cmd);
memcpy(rbuf, buf, offset);
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
@@ -1034,7 +1050,7 @@ int target_emulate_request_sense(struct se_task *task)
return -ENOSYS;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
/*
@@ -1042,11 +1058,8 @@ int target_emulate_request_sense(struct se_task *task)
*/
buf[0] = 0x70;
buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
- /*
- * Make sure request data length is enough for additional
- * sense data.
- */
- if (cmd->data_length <= 18) {
+
+ if (cmd->data_length < 18) {
buf[7] = 0x00;
err = -EINVAL;
goto end;
@@ -1063,11 +1076,8 @@ int target_emulate_request_sense(struct se_task *task)
*/
buf[0] = 0x70;
buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
- /*
- * Make sure request data length is enough for additional
- * sense data.
- */
- if (cmd->data_length <= 18) {
+
+ if (cmd->data_length < 18) {
buf[7] = 0x00;
err = -EINVAL;
goto end;
@@ -1080,7 +1090,7 @@ int target_emulate_request_sense(struct se_task *task)
}
end:
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
@@ -1114,7 +1124,7 @@ int target_emulate_unmap(struct se_task *task)
dl = get_unaligned_be16(&cdb[0]);
bd_dl = get_unaligned_be16(&cdb[2]);
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
ptr = &buf[offset];
pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
@@ -1138,7 +1148,7 @@ int target_emulate_unmap(struct se_task *task)
}
err:
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
if (!ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
deleted file mode 100644
index ad6b1e3..0000000
--- a/drivers/target/target_core_cdb.h
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef TARGET_CORE_CDB_H
-#define TARGET_CORE_CDB_H
-
-int target_emulate_inquiry(struct se_task *task);
-int target_emulate_readcapacity(struct se_task *task);
-int target_emulate_readcapacity_16(struct se_task *task);
-int target_emulate_modesense(struct se_task *task);
-int target_emulate_request_sense(struct se_task *task);
-int target_emulate_unmap(struct se_task *task);
-int target_emulate_write_same(struct se_task *task);
-int target_emulate_synchronize_cache(struct se_task *task);
-int target_emulate_noop(struct se_task *task);
-
-#endif /* TARGET_CORE_CDB_H */
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 93d4f6a..6e043ee 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -39,18 +39,16 @@
#include <linux/spinlock.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
-#include "target_core_stat.h"
extern struct t10_alua_lu_gp *default_lu_gp;
@@ -1452,7 +1450,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
return -ENOMEM;
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -1631,7 +1629,7 @@ static struct config_item_type target_core_dev_pr_cit = {
static ssize_t target_core_show_dev_info(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
int bl = 0;
@@ -1659,7 +1657,7 @@ static ssize_t target_core_store_dev_control(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
@@ -1682,7 +1680,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
static ssize_t target_core_show_dev_alias(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
return 0;
@@ -1695,7 +1693,7 @@ static ssize_t target_core_store_dev_alias(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
@@ -1706,9 +1704,14 @@ static ssize_t target_core_store_dev_alias(
return -EINVAL;
}
- se_dev->su_dev_flags |= SDF_USING_ALIAS;
read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
"%s", page);
+ if (!read_bytes)
+ return -EINVAL;
+ if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
+ se_dev->se_dev_alias[read_bytes - 1] = '\0';
+
+ se_dev->su_dev_flags |= SDF_USING_ALIAS;
pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
@@ -1728,7 +1731,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
static ssize_t target_core_show_dev_udev_path(void *p, char *page)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
return 0;
@@ -1741,7 +1744,7 @@ static ssize_t target_core_store_dev_udev_path(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
@@ -1752,9 +1755,14 @@ static ssize_t target_core_store_dev_udev_path(
return -EINVAL;
}
- se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
+ if (!read_bytes)
+ return -EINVAL;
+ if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
+ se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+
+ se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
@@ -1777,7 +1785,7 @@ static ssize_t target_core_store_dev_enable(
const char *page,
size_t count)
{
- struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *se_dev = p;
struct se_device *dev;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
@@ -1822,7 +1830,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
{
struct se_device *dev;
- struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *su_dev = p;
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1860,7 +1868,7 @@ static ssize_t target_core_store_alua_lu_gp(
size_t count)
{
struct se_device *dev;
- struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+ struct se_subsystem_dev *su_dev = p;
struct se_hba *hba = su_dev->se_dev_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 9b86394..edbcabb 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -42,13 +42,11 @@
#include <scsi/scsi_device.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@@ -322,11 +320,12 @@ int core_free_device_list_for_node(
void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
+ unsigned long flags;
- spin_lock_irq(&se_nacl->device_list_lock);
+ spin_lock_irqsave(&se_nacl->device_list_lock, flags);
deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
deve->deve_cmds--;
- spin_unlock_irq(&se_nacl->device_list_lock);
+ spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
}
void core_update_device_list_access(
@@ -658,7 +657,7 @@ int target_report_luns(struct se_task *se_task)
unsigned char *buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
- buf = transport_kmap_first_data_page(se_cmd);
+ buf = (unsigned char *) transport_kmap_data_sg(se_cmd);
/*
* If no struct se_session pointer is present, this struct se_cmd is
@@ -696,7 +695,7 @@ int target_report_luns(struct se_task *se_task)
* See SPC3 r07, page 159.
*/
done:
- transport_kunmap_first_data_page(se_cmd);
+ transport_kunmap_data_sg(se_cmd);
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
@@ -1134,8 +1133,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
- u32 orig_queue_depth = dev->queue_depth;
-
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
pr_err("dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
@@ -1169,11 +1166,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
- if (queue_depth > orig_queue_depth)
- atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
- else if (queue_depth < orig_queue_depth)
- atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
-
pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
@@ -1303,24 +1295,26 @@ struct se_lun *core_dev_add_lun(
{
struct se_lun *lun_p;
u32 lun_access = 0;
+ int rc;
if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
atomic_read(&dev->dev_access_obj.obj_access_count));
- return NULL;
+ return ERR_PTR(-EACCES);
}
lun_p = core_tpg_pre_addlun(tpg, lun);
- if ((IS_ERR(lun_p)) || !lun_p)
- return NULL;
+ if (IS_ERR(lun_p))
+ return lun_p;
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
- if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
- return NULL;
+ rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
+ if (rc < 0)
+ return ERR_PTR(rc);
pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -1357,11 +1351,10 @@ int core_dev_del_lun(
u32 unpacked_lun)
{
struct se_lun *lun;
- int ret = 0;
- lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
- if (!lun)
- return ret;
+ lun = core_tpg_pre_dellun(tpg, unpacked_lun);
+ if (IS_ERR(lun))
+ return PTR_ERR(lun);
core_tpg_post_dellun(tpg, lun);
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 09b6f87..9a2ce11 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -36,18 +36,14 @@
#include <linux/configfs.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
-#include "target_core_stat.h"
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
@@ -770,9 +766,9 @@ static int target_fabric_port_link(
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
lun->unpacked_lun);
- if (IS_ERR(lun_p) || !lun_p) {
+ if (IS_ERR(lun_p)) {
pr_err("core_dev_add_lun() failed\n");
- ret = -EINVAL;
+ ret = PTR_ERR(lun_p);
goto out;
}
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index ec4249b..283a36e 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -34,13 +34,10 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
#include "target_core_pr.h"
/*
@@ -402,7 +399,7 @@ char *iscsi_parse_pr_out_transport_id(
add_len = ((buf[2] >> 8) & 0xff);
add_len |= (buf[3] & 0xff);
- tid_len = strlen((char *)&buf[4]);
+ tid_len = strlen(&buf[4]);
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
tid_len += 1; /* Add one byte for NULL terminator */
padding = ((-tid_len) & 3);
@@ -423,11 +420,11 @@ char *iscsi_parse_pr_out_transport_id(
* format.
*/
if (format_code == 0x40) {
- p = strstr((char *)&buf[4], ",i,0x");
+ p = strstr(&buf[4], ",i,0x");
if (!p) {
pr_err("Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
- (char *)&buf[4]);
+ &buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index b4864fb..7ed58e2 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -37,8 +37,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
#include "target_core_file.h"
@@ -86,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
- struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+ struct fd_host *fd_host = hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!fd_dev) {
@@ -114,8 +113,8 @@ static struct se_device *fd_create_virtdevice(
struct se_device *dev;
struct se_dev_limits dev_limits;
struct queue_limits *limits;
- struct fd_dev *fd_dev = (struct fd_dev *) p;
- struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+ struct fd_dev *fd_dev = p;
+ struct fd_host *fd_host = hba->hba_ptr;
mm_segment_t old_fs;
struct file *file;
struct inode *inode = NULL;
@@ -240,7 +239,7 @@ fail:
*/
static void fd_free_device(void *p)
{
- struct fd_dev *fd_dev = (struct fd_dev *) p;
+ struct fd_dev *fd_dev = p;
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
@@ -498,7 +497,7 @@ static ssize_t fd_set_configfs_dev_params(
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -559,7 +558,7 @@ out:
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
{
- struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+ struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
pr_err("Missing fd_dev_name=\n");
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index c68019d..3dd1bd4 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -37,11 +37,10 @@
#include <net/tcp.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
deleted file mode 100644
index bb0fea5..0000000
--- a/drivers/target/target_core_hba.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef TARGET_CORE_HBA_H
-#define TARGET_CORE_HBA_H
-
-extern struct se_hba *core_alloc_hba(const char *, u32, u32);
-extern int core_delete_hba(struct se_hba *);
-
-#endif /* TARGET_CORE_HBA_H */
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 4aa9922..8572eae 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -42,8 +42,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
#include "target_core_iblock.h"
@@ -130,7 +129,7 @@ static struct se_device *iblock_create_virtdevice(
/*
* These settings need to be made tunable..
*/
- ib_dev->ibd_bio_set = bioset_create(32, 64);
+ ib_dev->ibd_bio_set = bioset_create(32, 0);
if (!ib_dev->ibd_bio_set) {
pr_err("IBLOCK: Unable to create bioset()\n");
return ERR_PTR(-ENOMEM);
@@ -182,7 +181,7 @@ static struct se_device *iblock_create_virtdevice(
*/
dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
dev->se_sub_dev->se_dev_attrib.unmap_granularity =
- q->limits.discard_granularity;
+ q->limits.discard_granularity >> 9;
dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
@@ -391,7 +390,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -465,7 +464,7 @@ static ssize_t iblock_show_configfs_dev_params(
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
- "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+ "" : (bd->bd_holder == ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -489,6 +488,13 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio;
+ /*
+ * Only allocate as many vector entries as the bio code allows us to,
+ * we'll loop later on until we have handled the whole request.
+ */
+ if (sg_num > BIO_MAX_PAGES)
+ sg_num = BIO_MAX_PAGES;
+
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!bio) {
pr_err("Unable to allocate memory for bio\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
new file mode 100644
index 0000000..4500136
--- /dev/null
+++ b/drivers/target/target_core_internal.h
@@ -0,0 +1,123 @@
+#ifndef TARGET_CORE_INTERNAL_H
+#define TARGET_CORE_INTERNAL_H
+
+/* target_core_alua.c */
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+/* target_core_cdb.c */
+int target_emulate_inquiry(struct se_task *task);
+int target_emulate_readcapacity(struct se_task *task);
+int target_emulate_readcapacity_16(struct se_task *task);
+int target_emulate_modesense(struct se_task *task);
+int target_emulate_request_sense(struct se_task *task);
+int target_emulate_unmap(struct se_task *task);
+int target_emulate_write_same(struct se_task *task);
+int target_emulate_synchronize_cache(struct se_task *task);
+int target_emulate_noop(struct se_task *task);
+
+/* target_core_device.c */
+struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
+int core_free_device_list_for_node(struct se_node_acl *,
+ struct se_portal_group *);
+void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+void core_update_device_list_access(u32, u32, struct se_node_acl *);
+int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+ u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+int core_dev_export(struct se_device *, struct se_portal_group *,
+ struct se_lun *);
+void core_dev_unexport(struct se_device *, struct se_portal_group *,
+ struct se_lun *);
+int target_report_luns(struct se_task *);
+void se_release_device_for_hba(struct se_device *);
+void se_release_vpd_for_dev(struct se_device *);
+int se_free_virtual_device(struct se_device *, struct se_hba *);
+int se_dev_check_online(struct se_device *);
+int se_dev_check_shutdown(struct se_device *);
+void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+int se_dev_set_task_timeout(struct se_device *, u32);
+int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int se_dev_set_unmap_granularity(struct se_device *, u32);
+int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int se_dev_set_emulate_dpo(struct se_device *, int);
+int se_dev_set_emulate_fua_write(struct se_device *, int);
+int se_dev_set_emulate_fua_read(struct se_device *, int);
+int se_dev_set_emulate_write_cache(struct se_device *, int);
+int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int se_dev_set_emulate_tas(struct se_device *, int);
+int se_dev_set_emulate_tpu(struct se_device *, int);
+int se_dev_set_emulate_tpws(struct se_device *, int);
+int se_dev_set_enforce_pr_isids(struct se_device *, int);
+int se_dev_set_is_nonrot(struct se_device *, int);
+int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int se_dev_set_queue_depth(struct se_device *, u32);
+int se_dev_set_max_sectors(struct se_device *, u32);
+int se_dev_set_optimal_sectors(struct se_device *, u32);
+int se_dev_set_block_size(struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+ struct se_device *, u32);
+int core_dev_del_lun(struct se_portal_group *, u32);
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+ u32, char *, int *);
+int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun_acl *, u32, u32);
+int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun *, struct se_lun_acl *);
+void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+ struct se_lun_acl *lacl);
+int core_dev_setup_virtual_lun0(void);
+void core_dev_release_virtual_lun0(void);
+
+/* target_core_hba.c */
+struct se_hba *core_alloc_hba(const char *, u32, u32);
+int core_delete_hba(struct se_hba *);
+
+/* target_core_tmr.c */
+int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+ struct list_head *, struct se_cmd *);
+
+/* target_core_tpg.c */
+extern struct se_device *g_lun0_dev;
+
+struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+ const char *);
+struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+ unsigned char *);
+void core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
+ u32, void *);
+struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
+int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+/* target_core_transport.c */
+extern struct kmem_cache *se_tmr_req_cache;
+
+int init_se_kmem_caches(void);
+void release_se_kmem_caches(void);
+u32 scsi_get_new_index(scsi_index_t);
+void transport_subsystem_check_init(void);
+void transport_cmd_finish_abort(struct se_cmd *, int);
+void __transport_remove_task_from_execute_queue(struct se_task *,
+ struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+void transport_dump_dev_state(struct se_device *, char *, int *);
+void transport_dump_dev_info(struct se_device *, struct se_lun *,
+ unsigned long long, char *, int *);
+void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
+int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
+int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
+int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
+bool target_stop_task(struct se_task *task, unsigned long *flags);
+int transport_clear_lun_from_sessions(struct se_lun *);
+void transport_send_task_abort(struct se_cmd *);
+
+/* target_core_stat.c */
+void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void target_stat_setup_port_default_groups(struct se_lun *);
+void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+#endif /* TARGET_CORE_INTERNAL_H */
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 95dee70..63e703b 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -33,14 +33,11 @@
#include <asm/unaligned.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@@ -120,7 +117,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(struct se_device *,
struct se_node_acl *, struct se_session *);
static void core_scsi3_put_pr_reg(struct t10_pr_registration *);
-static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
+static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd)
{
struct se_session *se_sess = cmd->se_sess;
struct se_subsystem_dev *su_dev = cmd->se_dev->se_sub_dev;
@@ -130,7 +127,7 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
int conflict = 0;
if (!crh)
- return false;
+ return -EINVAL;
pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
se_sess);
@@ -158,16 +155,14 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
*/
if (pr_reg->pr_res_holder) {
core_scsi3_put_pr_reg(pr_reg);
- *ret = 0;
- return false;
+ return 1;
}
if ((pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY) ||
(pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
core_scsi3_put_pr_reg(pr_reg);
- *ret = 0;
- return true;
+ return 1;
}
core_scsi3_put_pr_reg(pr_reg);
conflict = 1;
@@ -192,10 +187,10 @@ static int target_check_scsi2_reservation_conflict(struct se_cmd *cmd, int *ret)
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
- return true;
+ return -EBUSY;
}
- return false;
+ return 0;
}
int target_scsi2_reservation_release(struct se_task *task)
@@ -204,12 +199,18 @@ int target_scsi2_reservation_release(struct se_task *task)
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- int ret = 0;
+ int ret = 0, rc;
if (!sess || !tpg)
goto out;
- if (target_check_scsi2_reservation_conflict(cmd, &ret))
+ rc = target_check_scsi2_reservation_conflict(cmd);
+ if (rc == 1)
goto out;
+ else if (rc < 0) {
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
+ goto out;
+ }
ret = 0;
spin_lock(&dev->dev_reservation_lock);
@@ -246,7 +247,7 @@ int target_scsi2_reservation_reserve(struct se_task *task)
struct se_device *dev = cmd->se_dev;
struct se_session *sess = cmd->se_sess;
struct se_portal_group *tpg = sess->se_tpg;
- int ret = 0;
+ int ret = 0, rc;
if ((cmd->t_task_cdb[1] & 0x01) &&
(cmd->t_task_cdb[1] & 0x02)) {
@@ -262,8 +263,14 @@ int target_scsi2_reservation_reserve(struct se_task *task)
*/
if (!sess || !tpg)
goto out;
- if (target_check_scsi2_reservation_conflict(cmd, &ret))
+ rc = target_check_scsi2_reservation_conflict(cmd);
+ if (rc == 1)
+ goto out;
+ else if (rc < 0) {
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
+ }
ret = 0;
spin_lock(&dev->dev_reservation_lock);
@@ -481,6 +488,7 @@ static int core_scsi3_pr_seq_non_holder(
case READ_MEDIA_SERIAL_NUMBER:
case REPORT_LUNS:
case REQUEST_SENSE:
+ case PERSISTENT_RESERVE_IN:
ret = 0; /*/ Allowed CDBs */
break;
default:
@@ -1537,7 +1545,7 @@ static int core_scsi3_decode_spec_i_port(
tidh_new->dest_local_nexus = 1;
list_add_tail(&tidh_new->dest_list, &tid_dest_list);
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
/*
* For a PERSISTENT RESERVE OUT specify initiator ports payload,
* first extract TransportID Parameter Data Length, and make sure
@@ -1788,7 +1796,7 @@ static int core_scsi3_decode_spec_i_port(
}
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
/*
* Go ahead and create a registrations from tid_dest_list for the
@@ -1836,7 +1844,7 @@ static int core_scsi3_decode_spec_i_port(
return 0;
out:
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
/*
* For the failure case, release everything from tid_dest_list
* including *dest_pr_reg and the configfs dependances..
@@ -2984,21 +2992,6 @@ static void core_scsi3_release_preempt_and_abort(
}
}
-int core_scsi3_check_cdb_abort_and_preempt(
- struct list_head *preempt_and_abort_list,
- struct se_cmd *cmd)
-{
- struct t10_pr_registration *pr_reg, *pr_reg_tmp;
-
- list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
- pr_reg_abort_list) {
- if (pr_reg->pr_res_key == cmd->pr_res_key)
- return 0;
- }
-
- return 1;
-}
-
static int core_scsi3_pro_preempt(
struct se_cmd *cmd,
int type,
@@ -3138,7 +3131,7 @@ static int core_scsi3_pro_preempt(
if (!calling_it_nexus)
core_scsi3_ua_allocate(pr_reg_nacl,
pr_res_mapped_lun, 0x2A,
- ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ ASCQ_2AH_REGISTRATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
/*
@@ -3251,7 +3244,7 @@ static int core_scsi3_pro_preempt(
* additional sense code set to REGISTRATIONS PREEMPTED;
*/
core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
- ASCQ_2AH_RESERVATIONS_PREEMPTED);
+ ASCQ_2AH_REGISTRATIONS_PREEMPTED);
}
spin_unlock(&pr_tmpl->registration_lock);
/*
@@ -3428,14 +3421,14 @@ static int core_scsi3_emulate_pro_register_and_move(
* will be moved to for the TransportID containing SCSI initiator WWN
* information.
*/
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
rtpi = (buf[18] & 0xff) << 8;
rtpi |= buf[19] & 0xff;
tid_len = (buf[20] & 0xff) << 24;
tid_len |= (buf[21] & 0xff) << 16;
tid_len |= (buf[22] & 0xff) << 8;
tid_len |= buf[23] & 0xff;
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
buf = NULL;
if ((tid_len + 24) != cmd->data_length) {
@@ -3487,7 +3480,7 @@ static int core_scsi3_emulate_pro_register_and_move(
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
proto_ident = (buf[24] & 0x0f);
#if 0
pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
@@ -3521,7 +3514,7 @@ static int core_scsi3_emulate_pro_register_and_move(
goto out;
}
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
buf = NULL;
pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
@@ -3786,13 +3779,13 @@ after_iport_check:
" REGISTER_AND_MOVE\n");
}
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
if (buf)
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
if (dest_se_deve)
core_scsi3_lunacl_undepend_item(dest_se_deve);
if (dest_node_acl)
@@ -3866,7 +3859,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
scope = (cdb[2] & 0xf0);
type = (cdb[2] & 0x0f);
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
/*
* From PERSISTENT_RESERVE_OUT parameter list (payload)
*/
@@ -3884,7 +3877,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
aptpl = (buf[17] & 0x01);
unreg = (buf[17] & 0x02);
}
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
buf = NULL;
/*
@@ -3984,7 +3977,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@@ -4018,7 +4011,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
return 0;
}
@@ -4044,7 +4037,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@@ -4103,7 +4096,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
err:
spin_unlock(&se_dev->dev_reservation_lock);
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
return 0;
}
@@ -4127,7 +4120,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
buf[0] = ((add_len << 8) & 0xff);
buf[1] = (add_len & 0xff);
@@ -4159,7 +4152,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
return 0;
}
@@ -4189,7 +4182,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
return -EINVAL;
}
- buf = transport_kmap_first_data_page(cmd);
+ buf = transport_kmap_data_sg(cmd);
buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
@@ -4310,7 +4303,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
buf[6] = ((add_len >> 8) & 0xff);
buf[7] = (add_len & 0xff);
- transport_kunmap_first_data_page(cmd);
+ transport_kunmap_data_sg(cmd);
return 0;
}
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index b97f694..7a233fe 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,8 +60,6 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
-extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
- struct se_cmd *);
extern int target_scsi3_emulate_pr_in(struct se_task *task);
extern int target_scsi3_emulate_pr_out(struct se_task *task);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 8b15e56..8d4def3 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -44,8 +44,7 @@
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
#include "target_core_pscsi.h"
@@ -105,7 +104,7 @@ static void pscsi_detach_hba(struct se_hba *hba)
static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
/*
* Release the struct Scsi_Host
@@ -351,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
* scsi_device_put() and the pdv->pdv_sd cleared.
*/
pdv->pdv_sd = sd;
-
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
se_dev, dev_flags, pdv,
&dev_limits, NULL, NULL);
@@ -406,7 +404,7 @@ static struct se_device *pscsi_create_type_disk(
__releases(sh->host_lock)
{
struct se_device *dev;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
u32 dev_flags = 0;
@@ -454,7 +452,7 @@ static struct se_device *pscsi_create_type_rom(
__releases(sh->host_lock)
{
struct se_device *dev;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
@@ -489,7 +487,7 @@ static struct se_device *pscsi_create_type_other(
__releases(sh->host_lock)
{
struct se_device *dev;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+ struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
@@ -510,10 +508,10 @@ static struct se_device *pscsi_create_virtdevice(
struct se_subsystem_dev *se_dev,
void *p)
{
- struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+ struct pscsi_dev_virt *pdv = p;
struct se_device *dev;
struct scsi_device *sd;
- struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+ struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
@@ -695,7 +693,7 @@ static int pscsi_transport_complete(struct se_task *task)
if (task->task_se_cmd->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
- unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
+ unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
@@ -705,7 +703,7 @@ static int pscsi_transport_complete(struct se_task *task)
buf[2] |= 0x80;
}
- transport_kunmap_first_data_page(task->task_se_cmd);
+ transport_kunmap_data_sg(task->task_se_cmd);
}
}
after_mode_sense:
@@ -818,7 +816,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
@@ -1144,7 +1142,7 @@ static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
- return (unsigned char *)&pt->pscsi_sense[0];
+ return pt->pscsi_sense;
}
/* pscsi_get_device_rev():
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 02e51fa..8b68f7b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -37,9 +37,7 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
#include "target_core_rd.h"
@@ -474,7 +472,7 @@ static ssize_t rd_set_configfs_dev_params(
orig = opts;
- while ((ptr = strsep(&opts, ",")) != NULL) {
+ while ((ptr = strsep(&opts, ",\n")) != NULL) {
if (!*ptr)
continue;
diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c
index 874152a..f8c2d2c 100644
--- a/drivers/target/target_core_stat.c
+++ b/drivers/target/target_core_stat.c
@@ -43,12 +43,12 @@
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
-#include "target_core_hba.h"
+#include "target_core_internal.h"
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -1755,8 +1755,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
/* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
memset(buf, 0, 64);
if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
- tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
- (unsigned char *)&buf[0], 64);
+ tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
spin_unlock_irq(&nacl->nacl_sess_lock);
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
deleted file mode 100644
index 86c252f..0000000
--- a/drivers/target/target_core_stat.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef TARGET_CORE_STAT_H
-#define TARGET_CORE_STAT_H
-
-extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
-extern void target_stat_setup_port_default_groups(struct se_lun *);
-extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
-
-#endif /*** TARGET_CORE_STAT_H ***/
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 6845228..dcb0618 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -32,12 +32,11 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
#include "target_core_pr.h"
@@ -101,6 +100,21 @@ static void core_tmr_handle_tas_abort(
transport_cmd_finish_abort(cmd, 0);
}
+static int target_check_cdb_and_preempt(struct list_head *list,
+ struct se_cmd *cmd)
+{
+ struct t10_pr_registration *reg;
+
+ if (!list)
+ return 0;
+ list_for_each_entry(reg, list, pr_reg_abort_list) {
+ if (reg->pr_res_key == cmd->pr_res_key)
+ return 0;
+ }
+
+ return 1;
+}
+
static void core_tmr_drain_tmr_list(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -132,9 +146,7 @@ static void core_tmr_drain_tmr_list(
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
- if (preempt_and_abort_list &&
- (core_scsi3_check_cdb_abort_and_preempt(
- preempt_and_abort_list, cmd) != 0))
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
spin_lock(&cmd->t_state_lock);
@@ -211,9 +223,7 @@ static void core_tmr_drain_task_list(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if (preempt_and_abort_list &&
- (core_scsi3_check_cdb_abort_and_preempt(
- preempt_and_abort_list, cmd) != 0))
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
@@ -222,7 +232,7 @@ static void core_tmr_drain_task_list(
continue;
list_move_tail(&task->t_state_list, &drain_task_list);
- atomic_set(&task->task_state_active, 0);
+ task->t_state_active = false;
/*
* Remove from task execute list before processing drain_task_list
*/
@@ -321,9 +331,7 @@ static void core_tmr_drain_cmd_list(
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
- if (preempt_and_abort_list &&
- (core_scsi3_check_cdb_abort_and_preempt(
- preempt_and_abort_list, cmd) != 0))
+ if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 8ddd133..06336ec 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -39,13 +39,10 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
-#include "target_core_hba.h"
-#include "target_core_stat.h"
+#include "target_core_internal.h"
extern struct se_device *g_lun0_dev;
@@ -810,8 +807,7 @@ static void core_tpg_shutdown_lun(
struct se_lun *core_tpg_pre_dellun(
struct se_portal_group *tpg,
- u32 unpacked_lun,
- int *ret)
+ u32 unpacked_lun)
{
struct se_lun *lun;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 0257658..cd5cd95 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -45,16 +45,12 @@
#include <scsi/scsi_tcq.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_cdb.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
@@ -72,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
static int transport_generic_write_pending(struct se_cmd *);
static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev);
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
static void transport_complete_task_attr(struct se_cmd *cmd);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev);
@@ -212,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type)
return new_index;
}
-void transport_init_queue_obj(struct se_queue_obj *qobj)
+static void transport_init_queue_obj(struct se_queue_obj *qobj)
{
atomic_set(&qobj->queue_cnt, 0);
INIT_LIST_HEAD(&qobj->qobj_list);
init_waitqueue_head(&qobj->thread_wq);
spin_lock_init(&qobj->cmd_queue_lock);
}
-EXPORT_SYMBOL(transport_init_queue_obj);
void transport_subsystem_check_init(void)
{
@@ -426,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
if (task->task_flags & TF_ACTIVE)
continue;
- if (!atomic_read(&task->task_state_active))
- continue;
-
spin_lock_irqsave(&dev->execute_task_lock, flags);
- list_del(&task->t_state_list);
- pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
- cmd->se_tfo->get_task_tag(cmd), dev, task);
- spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+ if (task->t_state_active) {
+ pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+ cmd->se_tfo->get_task_tag(cmd), dev, task);
- atomic_set(&task->task_state_active, 0);
- atomic_dec(&cmd->t_task_cdbs_ex_left);
+ list_del(&task->t_state_list);
+ atomic_dec(&cmd->t_task_cdbs_ex_left);
+ task->t_state_active = false;
+ }
+ spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
+
}
/* transport_cmd_check_stop():
@@ -696,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
unsigned long flags;
-#if 0
- pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
- cmd->t_task_cdb[0], dev);
-#endif
- if (dev)
- atomic_inc(&dev->depth_left);
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
@@ -714,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success)
if (dev && dev->transport->transport_complete) {
if (dev->transport->transport_complete(task) != 0) {
cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
- task->task_sense = 1;
+ task->task_flags |= TF_HAS_SENSE;
success = 1;
}
}
@@ -743,13 +732,7 @@ void transport_complete_task(struct se_task *task, int success)
}
if (cmd->t_tasks_failed) {
- if (!task->task_error_status) {
- task->task_error_status =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- cmd->scsi_sense_reason =
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
@@ -824,7 +807,7 @@ static void __transport_add_task_to_execute_queue(
head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
atomic_inc(&dev->execute_tasks);
- if (atomic_read(&task->task_state_active))
+ if (task->t_state_active)
return;
/*
* Determine if this task needs to go to HEAD_OF_QUEUE for the
@@ -838,7 +821,7 @@ static void __transport_add_task_to_execute_queue(
else
list_add_tail(&task->t_state_list, &dev->state_task_list);
- atomic_set(&task->task_state_active, 1);
+ task->t_state_active = true;
pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
@@ -853,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
- if (atomic_read(&task->task_state_active))
- continue;
-
spin_lock(&dev->execute_task_lock);
- list_add_tail(&task->t_state_list, &dev->state_task_list);
- atomic_set(&task->task_state_active, 1);
-
- pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
- task->task_se_cmd->se_tfo->get_task_tag(
- task->task_se_cmd), task, dev);
-
+ if (!task->t_state_active) {
+ list_add_tail(&task->t_state_list,
+ &dev->state_task_list);
+ task->t_state_active = true;
+
+ pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+ task->task_se_cmd->se_tfo->get_task_tag(
+ task->task_se_cmd), task, dev);
+ }
spin_unlock(&dev->execute_task_lock);
}
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
}
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_task *task, *task_prev = NULL;
- unsigned long flags;
- spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (!list_empty(&task->t_execute_list))
continue;
@@ -886,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
__transport_add_task_to_execute_queue(task, task_prev, dev);
task_prev = task;
}
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+ unsigned long flags;
+ struct se_device *dev = cmd->se_dev;
+
+ spin_lock_irqsave(&dev->execute_task_lock, flags);
+ __transport_add_tasks_from_cmd(cmd);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
}
@@ -896,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task,
atomic_dec(&dev->execute_tasks);
}
-void transport_remove_task_from_execute_queue(
+static void transport_remove_task_from_execute_queue(
struct se_task *task,
struct se_device *dev)
{
@@ -983,9 +972,8 @@ void transport_dump_dev_state(
break;
}
- *bl += sprintf(b + *bl, " Execute/Left/Max Queue Depth: %d/%d/%d",
- atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
- dev->queue_depth);
+ *bl += sprintf(b + *bl, " Execute/Max Queue Depth: %d/%d",
+ atomic_read(&dev->execute_tasks), dev->queue_depth);
*bl += sprintf(b + *bl, " SectorSize: %u MaxSectors: %u\n",
dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
*bl += sprintf(b + *bl, " ");
@@ -1267,32 +1255,34 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
static void scsi_dump_inquiry(struct se_device *dev)
{
struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+ char buf[17];
int i, device_type;
/*
* Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
*/
- pr_debug(" Vendor: ");
for (i = 0; i < 8; i++)
if (wwn->vendor[i] >= 0x20)
- pr_debug("%c", wwn->vendor[i]);
+ buf[i] = wwn->vendor[i];
else
- pr_debug(" ");
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Vendor: %s\n", buf);
- pr_debug(" Model: ");
for (i = 0; i < 16; i++)
if (wwn->model[i] >= 0x20)
- pr_debug("%c", wwn->model[i]);
+ buf[i] = wwn->model[i];
else
- pr_debug(" ");
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Model: %s\n", buf);
- pr_debug(" Revision: ");
for (i = 0; i < 4; i++)
if (wwn->revision[i] >= 0x20)
- pr_debug("%c", wwn->revision[i]);
+ buf[i] = wwn->revision[i];
else
- pr_debug(" ");
-
- pr_debug("\n");
+ buf[i] = ' ';
+ buf[i] = '\0';
+ pr_debug(" Revision: %s\n", buf);
device_type = dev->transport->get_device_type(dev);
pr_debug(" Type: %s ", scsi_device_type(device_type));
@@ -1340,9 +1330,6 @@ struct se_device *transport_add_device_to_core_hba(
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
-
- dev->queue_depth = dev_limits->queue_depth;
- atomic_set(&dev->depth_left, dev->queue_depth);
atomic_set(&dev->dev_ordered_id, 0);
se_dev_set_default_attribs(dev, dev_limits);
@@ -1654,6 +1641,81 @@ int transport_handle_cdb_direct(
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
+/**
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ **/
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+ unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+ u32 data_length, int task_attr, int data_dir, int flags)
+{
+ struct se_portal_group *se_tpg;
+ int rc;
+
+ se_tpg = se_sess->se_tpg;
+ BUG_ON(!se_tpg);
+ BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
+ BUG_ON(in_interrupt());
+ /*
+ * Initialize se_cmd for target operation. From this point
+ * exceptions are handled by sending exception status via
+ * target_core_fabric_ops->queue_status() callback
+ */
+ transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+ data_length, data_dir, task_attr, sense);
+ /*
+ * Obtain struct se_cmd->cmd_kref reference and add new cmd to
+ * se_sess->sess_cmd_list. A second kref_get here is necessary
+ * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+ * kref_put() to happen during fabric packet acknowledgement.
+ */
+ target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+ /*
+ * Signal bidirectional data payloads to target-core
+ */
+ if (flags & TARGET_SCF_BIDI_OP)
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+ /*
+ * Locate se_lun pointer and attach it to struct se_cmd
+ */
+ if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
+ transport_send_check_condition_and_sense(se_cmd,
+ se_cmd->scsi_sense_reason, 0);
+ target_put_sess_cmd(se_sess, se_cmd);
+ return;
+ }
+ /*
+ * Sanitize CDBs via transport_generic_cmd_sequencer() and
+ * allocate the necessary tasks to complete the received CDB+data
+ */
+ rc = transport_generic_allocate_tasks(se_cmd, cdb);
+ if (rc != 0) {
+ transport_generic_request_failure(se_cmd);
+ return;
+ }
+ /*
+ * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
+ * for immediate execution of READs, otherwise wait for
+ * transport_generic_handle_data() to be called for WRITEs
+ * when fabric has filled the incoming buffer.
+ */
+ transport_handle_cdb_direct(se_cmd);
+ return;
+}
+EXPORT_SYMBOL(target_submit_cmd);
+
/*
* Used by fabric module frontends defining a TFO->new_cmd_map() caller
* to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -1920,18 +1982,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
}
-static inline int transport_tcq_window_closed(struct se_device *dev)
-{
- if (dev->dev_tcq_window_closed++ <
- PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
- msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
- } else
- msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
-
- wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
- return 0;
-}
-
/*
* Called from Fabric Module context from transport_execute_tasks()
*
@@ -2014,13 +2064,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
static int transport_execute_tasks(struct se_cmd *cmd)
{
int add_tasks;
-
- if (se_dev_check_online(cmd->se_dev) != 0) {
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- transport_generic_request_failure(cmd);
- return 0;
- }
-
+ struct se_device *se_dev = cmd->se_dev;
/*
* Call transport_cmd_check_stop() to see if a fabric exception
* has occurred that prevents execution.
@@ -2034,19 +2078,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
if (!add_tasks)
goto execute_tasks;
/*
- * This calls transport_add_tasks_from_cmd() to handle
- * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
- * (if enabled) in __transport_add_task_to_execute_queue() and
- * transport_add_task_check_sam_attr().
+ * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
+ * adds associated se_tasks while holding dev->execute_task_lock
+ * before I/O dispath to avoid a double spinlock access.
*/
- transport_add_tasks_from_cmd(cmd);
+ __transport_execute_tasks(se_dev, cmd);
+ return 0;
}
- /*
- * Kick the execution queue for the cmd associated struct se_device
- * storage object.
- */
+
execute_tasks:
- __transport_execute_tasks(cmd->se_dev);
+ __transport_execute_tasks(se_dev, NULL);
return 0;
}
@@ -2056,24 +2097,18 @@ execute_tasks:
*
* Called from transport_processing_thread()
*/
-static int __transport_execute_tasks(struct se_device *dev)
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
{
int error;
struct se_cmd *cmd = NULL;
struct se_task *task = NULL;
unsigned long flags;
- /*
- * Check if there is enough room in the device and HBA queue to send
- * struct se_tasks to the selected transport.
- */
check_depth:
- if (!atomic_read(&dev->depth_left))
- return transport_tcq_window_closed(dev);
-
- dev->dev_tcq_window_closed = 0;
-
spin_lock_irq(&dev->execute_task_lock);
+ if (new_cmd != NULL)
+ __transport_add_tasks_from_cmd(new_cmd);
+
if (list_empty(&dev->execute_task_list)) {
spin_unlock_irq(&dev->execute_task_lock);
return 0;
@@ -2083,10 +2118,7 @@ check_depth:
__transport_remove_task_from_execute_queue(task, dev);
spin_unlock_irq(&dev->execute_task_lock);
- atomic_dec(&dev->depth_left);
-
cmd = task->task_se_cmd;
-
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags |= (TF_ACTIVE | TF_SENT);
atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2107,10 +2139,10 @@ check_depth:
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
- atomic_inc(&dev->depth_left);
transport_generic_request_failure(cmd);
}
+ new_cmd = NULL;
goto check_depth;
return 0;
@@ -2351,7 +2383,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) {
- if (!task->task_sense)
+ if (!(task->task_flags & TF_HAS_SENSE))
continue;
if (!dev->transport->get_sense_buffer) {
@@ -2507,6 +2539,7 @@ static int transport_generic_cmd_sequencer(
cmd, cdb, pr_reg_type) != 0) {
cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return -EBUSY;
}
@@ -2665,7 +2698,7 @@ static int transport_generic_cmd_sequencer(
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (target_check_write_same_discard(&cdb[10], dev) < 0)
- goto out_invalid_cdb_field;
+ goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_task = target_emulate_write_same;
break;
@@ -2948,7 +2981,7 @@ static int transport_generic_cmd_sequencer(
cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_invalid_cdb_field;
+ goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_task = target_emulate_write_same;
break;
@@ -2971,7 +3004,7 @@ static int transport_generic_cmd_sequencer(
* of byte 1 bit 3 UNMAP instead of original reserved field
*/
if (target_check_write_same_discard(&cdb[1], dev) < 0)
- goto out_invalid_cdb_field;
+ goto out_unsupported_cdb;
if (!passthrough)
cmd->execute_task = target_emulate_write_same;
break;
@@ -3053,11 +3086,6 @@ static int transport_generic_cmd_sequencer(
(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
goto out_unsupported_cdb;
- /* Let's limit control cdbs to a page, for simplicity's sake. */
- if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
- size > PAGE_SIZE)
- goto out_invalid_cdb_field;
-
transport_set_supported_SAM_opcode(cmd);
return ret;
@@ -3346,6 +3374,32 @@ static inline void transport_free_pages(struct se_cmd *cmd)
}
/**
+ * transport_release_cmd - free a command
+ * @cmd: command to free
+ *
+ * This routine unconditionally frees a command, and reference counting
+ * or list removal must be done in the caller.
+ */
+static void transport_release_cmd(struct se_cmd *cmd)
+{
+ BUG_ON(!cmd->se_tfo);
+
+ if (cmd->se_tmr_req)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
+ /*
+ * If this cmd has been setup with target_get_sess_cmd(), drop
+ * the kref and call ->release_cmd() in kref callback.
+ */
+ if (cmd->check_release != 0) {
+ target_put_sess_cmd(cmd->se_sess, cmd);
+ return;
+ }
+ cmd->se_tfo->release_cmd(cmd);
+}
+
+/**
* transport_put_cmd - release a reference to a command
* @cmd: command to release
*
@@ -3435,9 +3489,11 @@ int transport_generic_map_mem_to_cmd(
}
EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
-void *transport_kmap_first_data_page(struct se_cmd *cmd)
+void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
+ struct page **pages;
+ int i;
BUG_ON(!sg);
/*
@@ -3445,15 +3501,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd)
* tcm_loop who may be using a contig buffer from the SCSI midlayer for
* control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
*/
- return kmap(sg_page(sg)) + sg->offset;
+ if (!cmd->t_data_nents)
+ return NULL;
+ else if (cmd->t_data_nents == 1)
+ return kmap(sg_page(sg)) + sg->offset;
+
+ /* >1 page. use vmap */
+ pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ /* convert sg[] to pages[] */
+ for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
+ pages[i] = sg_page(sg);
+ }
+
+ cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
+ kfree(pages);
+ if (!cmd->t_data_vmap)
+ return NULL;
+
+ return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
}
-EXPORT_SYMBOL(transport_kmap_first_data_page);
+EXPORT_SYMBOL(transport_kmap_data_sg);
-void transport_kunmap_first_data_page(struct se_cmd *cmd)
+void transport_kunmap_data_sg(struct se_cmd *cmd)
{
- kunmap(sg_page(cmd->t_data_sg));
+ if (!cmd->t_data_nents)
+ return;
+ else if (cmd->t_data_nents == 1)
+ kunmap(sg_page(cmd->t_data_sg));
+
+ vunmap(cmd->t_data_vmap);
+ cmd->t_data_vmap = NULL;
}
-EXPORT_SYMBOL(transport_kunmap_first_data_page);
+EXPORT_SYMBOL(transport_kunmap_data_sg);
static int
transport_generic_get_mem(struct se_cmd *cmd)
@@ -3461,6 +3543,7 @@ transport_generic_get_mem(struct se_cmd *cmd)
u32 length = cmd->data_length;
unsigned int nents;
struct page *page;
+ gfp_t zero_flag;
int i = 0;
nents = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -3471,9 +3554,11 @@ transport_generic_get_mem(struct se_cmd *cmd)
cmd->t_data_nents = nents;
sg_init_table(cmd->t_data_sg, nents);
+ zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
+
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
- page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ page = alloc_page(GFP_KERNEL | zero_flag);
if (!page)
goto out;
@@ -3701,6 +3786,11 @@ transport_allocate_control_task(struct se_cmd *cmd)
struct se_task *task;
unsigned long flags;
+ /* Workaround for handling zero-length control CDBs */
+ if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+ !cmd->data_length)
+ return 0;
+
task = transport_generic_get_task(cmd, cmd->data_direction);
if (!task)
return -ENOMEM;
@@ -3772,6 +3862,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
cmd->t_state = TRANSPORT_COMPLETE;
atomic_set(&cmd->t_transport_active, 1);
+
+ if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
+ u8 ua_asc = 0, ua_ascq = 0;
+
+ core_scsi3_ua_clear_for_request_sense(cmd,
+ &ua_asc, &ua_ascq);
+ }
+
INIT_WORK(&cmd->work, target_complete_ok_work);
queue_work(target_completion_wq, &cmd->work);
return 0;
@@ -3870,33 +3968,6 @@ queue_full:
return 0;
}
-/**
- * transport_release_cmd - free a command
- * @cmd: command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-void transport_release_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
-
- if (cmd->se_tmr_req)
- core_tmr_release_req(cmd->se_tmr_req);
- if (cmd->t_task_cdb != cmd->__t_task_cdb)
- kfree(cmd->t_task_cdb);
- /*
- * Check if target_wait_for_sess_cmds() is expecting to
- * release se_cmd directly here..
- */
- if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
- if (cmd->se_tfo->check_release_cmd(cmd) != 0)
- return;
-
- cmd->se_tfo->release_cmd(cmd);
-}
-EXPORT_SYMBOL(transport_release_cmd);
-
void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
{
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3923,11 +3994,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
/* target_get_sess_cmd - Add command to active ->sess_cmd_list
* @se_sess: session to reference
* @se_cmd: command descriptor to add
+ * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
*/
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+ bool ack_kref)
{
unsigned long flags;
+ kref_init(&se_cmd->cmd_kref);
+ /*
+ * Add a second kref if the fabric caller is expecting to handle
+ * fabric acknowledgement that requires two target_put_sess_cmd()
+ * invocations before se_cmd descriptor release.
+ */
+ if (ack_kref == true)
+ kref_get(&se_cmd->cmd_kref);
+
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
se_cmd->check_release = 1;
@@ -3935,30 +4017,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
}
EXPORT_SYMBOL(target_get_sess_cmd);
-/* target_put_sess_cmd - Check for active I/O shutdown or list delete
- * @se_sess: session to reference
- * @se_cmd: command descriptor to drop
- */
-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+static void target_release_cmd_kref(struct kref *kref)
{
+ struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+ struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
WARN_ON(1);
- return 0;
+ return;
}
-
if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
complete(&se_cmd->cmd_wait_comp);
- return 1;
+ return;
}
list_del(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
- return 0;
+ se_cmd->se_tfo->release_cmd(se_cmd);
+}
+
+/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+ * @se_sess: session to reference
+ * @se_cmd: command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+ return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
}
EXPORT_SYMBOL(target_put_sess_cmd);
@@ -4174,7 +4262,7 @@ check_cond:
static int transport_clear_lun_thread(void *p)
{
- struct se_lun *lun = (struct se_lun *)p;
+ struct se_lun *lun = p;
__transport_clear_lun_from_sessions(lun);
complete(&lun->lun_shutdown_comp);
@@ -4353,6 +4441,7 @@ int transport_send_check_condition_and_sense(
case TCM_NON_EXISTENT_LUN:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT NOT SUPPORTED */
@@ -4362,6 +4451,7 @@ int transport_send_check_condition_and_sense(
case TCM_SECTOR_COUNT_TOO_MANY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID COMMAND OPERATION CODE */
@@ -4370,6 +4460,7 @@ int transport_send_check_condition_and_sense(
case TCM_UNKNOWN_MODE_PAGE:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
@@ -4378,6 +4469,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_ABORT_CMD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -4387,6 +4479,7 @@ int transport_send_check_condition_and_sense(
case TCM_INCORRECT_AMOUNT_OF_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
@@ -4397,22 +4490,25 @@ int transport_send_check_condition_and_sense(
case TCM_INVALID_CDB_FIELD:
/* CURRENT ERROR */
buffer[offset] = 0x70;
- /* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN CDB */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
break;
case TCM_INVALID_PARAMETER_LIST:
/* CURRENT ERROR */
buffer[offset] = 0x70;
- /* ABORTED COMMAND */
- buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+ /* ILLEGAL REQUEST */
+ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* INVALID FIELD IN PARAMETER LIST */
buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
break;
case TCM_UNEXPECTED_UNSOLICITED_DATA:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* WRITE ERROR */
@@ -4423,6 +4519,7 @@ int transport_send_check_condition_and_sense(
case TCM_SERVICE_CRC_ERROR:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* PROTOCOL SERVICE CRC ERROR */
@@ -4433,6 +4530,7 @@ int transport_send_check_condition_and_sense(
case TCM_SNACK_REJECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ABORTED COMMAND */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
/* READ ERROR */
@@ -4443,6 +4541,7 @@ int transport_send_check_condition_and_sense(
case TCM_WRITE_PROTECTED:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* DATA PROTECT */
buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
/* WRITE PROTECTED */
@@ -4451,6 +4550,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* UNIT ATTENTION */
buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -4460,6 +4560,7 @@ int transport_send_check_condition_and_sense(
case TCM_CHECK_CONDITION_NOT_READY:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* Not Ready */
buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
transport_get_sense_codes(cmd, &asc, &ascq);
@@ -4470,6 +4571,7 @@ int transport_send_check_condition_and_sense(
default:
/* CURRENT ERROR */
buffer[offset] = 0x70;
+ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
/* ILLEGAL REQUEST */
buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
/* LOGICAL UNIT COMMUNICATION FAILURE */
@@ -4545,11 +4647,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
cmd->se_tfo->queue_status(cmd);
}
-/* transport_generic_do_tmr():
- *
- *
- */
-int transport_generic_do_tmr(struct se_cmd *cmd)
+static int transport_generic_do_tmr(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
@@ -4597,7 +4695,7 @@ static int transport_processing_thread(void *param)
{
int ret;
struct se_cmd *cmd;
- struct se_device *dev = (struct se_device *) param;
+ struct se_device *dev = param;
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
@@ -4607,8 +4705,6 @@ static int transport_processing_thread(void *param)
goto out;
get_cmd:
- __transport_execute_tasks(dev);
-
cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
if (!cmd)
continue;
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 50a480d..3e12f6b 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -30,13 +30,11 @@
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
+#include "target_core_internal.h"
#include "target_core_alua.h"
-#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 71fc9ce..9e7e26c 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -39,12 +39,8 @@
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
-#include <target/target_core_tmr.h>
#include <target/configfs_macros.h>
#include "tcm_fc.h"
@@ -367,6 +363,11 @@ static void ft_send_tm(struct ft_cmd *cmd)
struct ft_sess *sess;
u8 tm_func;
+ transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
+ cmd->sess->se_sess, 0, DMA_NONE, 0,
+ &cmd->ft_sense_buffer[0]);
+ target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
+
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
switch (fcp->fc_tm_flags) {
@@ -420,7 +421,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
sess = cmd->sess;
transport_send_check_condition_and_sense(&cmd->se_cmd,
cmd->se_cmd.scsi_sense_reason, 0);
- transport_generic_free_cmd(&cmd->se_cmd, 0);
ft_sess_put(sess);
return;
}
@@ -536,12 +536,10 @@ static void ft_send_work(struct work_struct *work)
{
struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
- struct se_cmd *se_cmd;
struct fcp_cmnd *fcp;
int data_dir = 0;
u32 data_len;
int task_attr;
- int ret;
fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
if (!fcp)
@@ -591,15 +589,6 @@ static void ft_send_work(struct work_struct *work)
data_len = ntohl(fcp->fc_dl);
cmd->cdb = fcp->fc_cdb;
}
-
- se_cmd = &cmd->se_cmd;
- /*
- * Initialize struct se_cmd descriptor from target_core_mod
- * infrastructure
- */
- transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
- data_len, data_dir, task_attr,
- &cmd->ft_sense_buffer[0]);
/*
* Check for FCP task management flags
*/
@@ -607,39 +596,16 @@ static void ft_send_work(struct work_struct *work)
ft_send_tm(cmd);
return;
}
-
fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
-
cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
- ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
- if (ret < 0) {
- ft_dump_cmd(cmd, __func__);
- transport_send_check_condition_and_sense(&cmd->se_cmd,
- cmd->se_cmd.scsi_sense_reason, 0);
- return;
- }
-
- ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
-
- pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
- ft_dump_cmd(cmd, __func__);
-
- if (ret == -ENOMEM) {
- transport_send_check_condition_and_sense(se_cmd,
- TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
- transport_generic_free_cmd(se_cmd, 0);
- return;
- }
- if (ret == -EINVAL) {
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- ft_queue_status(se_cmd);
- else
- transport_send_check_condition_and_sense(se_cmd,
- se_cmd->scsi_sense_reason, 0);
- transport_generic_free_cmd(se_cmd, 0);
- return;
- }
- transport_handle_cdb_direct(se_cmd);
+ /*
+ * Use a single se_cmd->cmd_kref as we expect to release se_cmd
+ * directly from ft_check_stop_free callback in response path.
+ */
+ target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
+ &cmd->ft_sense_buffer[0], cmd->lun, data_len,
+ task_attr, data_dir, 0);
+ pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
return;
err:
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index 9402b73..73852fb 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -41,12 +41,8 @@
#include <scsi/libfc.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 1369b1c..d8cabc2 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -48,10 +48,7 @@
#include <scsi/fc_encode.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 3269213..4c0507c 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -40,10 +40,7 @@
#include <scsi/libfc.h>
#include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index dd9a574..220ce7e 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -1304,7 +1304,7 @@ static struct genl_multicast_group thermal_event_mcgrp = {
.name = THERMAL_GENL_MCAST_GROUP_NAME,
};
-int generate_netlink_event(u32 orig, enum events event)
+int thermal_generate_netlink_event(u32 orig, enum events event)
{
struct sk_buff *skb;
struct nlattr *attr;
@@ -1363,7 +1363,7 @@ int generate_netlink_event(u32 orig, enum events event)
return result;
}
-EXPORT_SYMBOL(generate_netlink_event);
+EXPORT_SYMBOL(thermal_generate_netlink_event);
static int genetlink_init(void)
{
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index b3d1741..830cd62 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -365,7 +365,7 @@ config PPC_EPAPR_HV_BYTECHAN
config PPC_EARLY_DEBUG_EHV_BC
bool "Early console (udbg) support for ePAPR hypervisors"
- depends on PPC_EPAPR_HV_BYTECHAN
+ depends on PPC_EPAPR_HV_BYTECHAN=y
help
Select this option to enable early console (a.k.a. "udbg") support
via an ePAPR byte channel. You also need to choose the byte channel
diff --git a/drivers/tty/n_hdlc.c b/drivers/tty/n_hdlc.c
index cea5603..a09ce3e 100644
--- a/drivers/tty/n_hdlc.c
+++ b/drivers/tty/n_hdlc.c
@@ -417,7 +417,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
__FILE__,__LINE__,tbuf,tbuf->count);
/* Send the next block of data to device */
- tty->flags |= (1 << TTY_DO_WRITE_WAKEUP);
+ set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
actual = tty->ops->write(tty, tbuf->buf, tbuf->count);
/* rollback was possible and has been done */
@@ -459,7 +459,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty)
}
if (!tbuf)
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
/* Clear the re-entry flag */
spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock, flags);
@@ -491,7 +491,7 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
return;
if (tty != n_hdlc->tty) {
- tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP);
+ clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
return;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 39d6ab6..d2256d0 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -61,7 +61,7 @@
* controlling the space in the read buffer.
*/
#define TTY_THRESHOLD_THROTTLE 128 /* now based on remaining room */
-#define TTY_THRESHOLD_UNTHROTTLE 128
+#define TTY_THRESHOLD_UNTHROTTLE 128
/*
* Special byte codes used in the echo buffer to represent operations
@@ -405,7 +405,7 @@ static ssize_t process_output_block(struct tty_struct *tty,
const unsigned char *buf, unsigned int nr)
{
int space;
- int i;
+ int i;
const unsigned char *cp;
mutex_lock(&tty->output_lock);
@@ -1607,7 +1607,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt)
}
/**
- * copy_from_read_buf - copy read data directly
+ * copy_from_read_buf - copy read data directly
* @tty: terminal device
* @b: user data
* @nr: size of data
@@ -1909,7 +1909,7 @@ do_it_again:
if (nr)
clear_bit(TTY_PUSH, &tty->flags);
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
- goto do_it_again;
+ goto do_it_again;
n_tty_set_room(tty);
return retval;
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index e18604b..d8653ab 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -446,19 +446,8 @@ static inline void legacy_pty_init(void) { }
int pty_limit = NR_UNIX98_PTY_DEFAULT;
static int pty_limit_min;
static int pty_limit_max = NR_UNIX98_PTY_MAX;
-static int tty_count;
static int pty_count;
-static inline void pty_inc_count(void)
-{
- pty_count = (++tty_count) / 2;
-}
-
-static inline void pty_dec_count(void)
-{
- pty_count = (--tty_count) / 2;
-}
-
static struct cdev ptmx_cdev;
static struct ctl_table pty_table[] = {
@@ -600,8 +589,7 @@ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty)
*/
tty_driver_kref_get(driver);
tty->count++;
- pty_inc_count(); /* tty */
- pty_inc_count(); /* tty->link */
+ pty_count++;
return 0;
err_free_mem:
deinitialize_tty_struct(o_tty);
@@ -613,15 +601,19 @@ err_free_tty:
return -ENOMEM;
}
-static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+static void ptm_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
+{
+ pty_count--;
+}
+
+static void pts_unix98_remove(struct tty_driver *driver, struct tty_struct *tty)
{
- pty_dec_count();
}
static const struct tty_operations ptm_unix98_ops = {
.lookup = ptm_unix98_lookup,
.install = pty_unix98_install,
- .remove = pty_unix98_remove,
+ .remove = ptm_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
@@ -638,7 +630,7 @@ static const struct tty_operations ptm_unix98_ops = {
static const struct tty_operations pty_unix98_ops = {
.lookup = pts_unix98_lookup,
.install = pty_unix98_install,
- .remove = pty_unix98_remove,
+ .remove = pts_unix98_remove,
.open = pty_open,
.close = pty_close,
.write = pty_write,
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 6a1241c..de88aa5 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -118,7 +118,7 @@ static unsigned long board2;
static unsigned long board3;
static unsigned long board4;
static unsigned long controller;
-static int support_low_speed;
+static bool support_low_speed;
static unsigned long modem1;
static unsigned long modem2;
static unsigned long modem3;
diff --git a/drivers/tty/serial/8250.c b/drivers/tty/serial/8250/8250.c
index eeadf1b..9b7336f 100644
--- a/drivers/tty/serial/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -45,7 +45,7 @@
#include "8250.h"
#ifdef CONFIG_SPARC
-#include "suncore.h"
+#include "../suncore.h"
#endif
/*
@@ -129,32 +129,6 @@ static unsigned long probe_rsa[PORT_RSA_MAX];
static unsigned int probe_rsa_count;
#endif /* CONFIG_SERIAL_8250_RSA */
-struct uart_8250_port {
- struct uart_port port;
- struct timer_list timer; /* "no irq" timer */
- struct list_head list; /* ports on this IRQ */
- unsigned short capabilities; /* port capabilities */
- unsigned short bugs; /* port bugs */
- unsigned int tx_loadsz; /* transmit fifo load size */
- unsigned char acr;
- unsigned char ier;
- unsigned char lcr;
- unsigned char mcr;
- unsigned char mcr_mask; /* mask of user bits */
- unsigned char mcr_force; /* mask of forced bits */
- unsigned char cur_iotype; /* Running I/O type */
-
- /*
- * Some bits in registers are cleared on a read, so they must
- * be saved whenever the register is read but the bits will not
- * be immediately processed.
- */
-#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
- unsigned char lsr_saved_flags;
-#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
- unsigned char msr_saved_flags;
-};
-
struct irq_info {
struct hlist_node node;
int irq;
@@ -1326,8 +1300,6 @@ static void serial8250_stop_tx(struct uart_port *port)
}
}
-static void transmit_chars(struct uart_8250_port *up);
-
static void serial8250_start_tx(struct uart_port *port)
{
struct uart_8250_port *up =
@@ -1344,7 +1316,7 @@ static void serial8250_start_tx(struct uart_port *port)
if ((up->port.type == PORT_RM9000) ?
(lsr & UART_LSR_THRE) :
(lsr & UART_LSR_TEMT))
- transmit_chars(up);
+ serial8250_tx_chars(up);
}
}
@@ -1401,11 +1373,16 @@ static void clear_rx_fifo(struct uart_8250_port *up)
} while (1);
}
-static void
-receive_chars(struct uart_8250_port *up, unsigned int *status)
+/*
+ * serial8250_rx_chars: processes according to the passed in LSR
+ * value, and returns the remaining LSR bits not handled
+ * by this Rx routine.
+ */
+unsigned char
+serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr)
{
struct tty_struct *tty = up->port.state->port.tty;
- unsigned char ch, lsr = *status;
+ unsigned char ch;
int max_count = 256;
char flag;
@@ -1481,10 +1458,11 @@ ignore_char:
spin_unlock(&up->port.lock);
tty_flip_buffer_push(tty);
spin_lock(&up->port.lock);
- *status = lsr;
+ return lsr;
}
+EXPORT_SYMBOL_GPL(serial8250_rx_chars);
-static void transmit_chars(struct uart_8250_port *up)
+void serial8250_tx_chars(struct uart_8250_port *up)
{
struct circ_buf *xmit = &up->port.state->xmit;
int count;
@@ -1521,8 +1499,9 @@ static void transmit_chars(struct uart_8250_port *up)
if (uart_circ_empty(xmit))
__stop_tx(up);
}
+EXPORT_SYMBOL_GPL(serial8250_tx_chars);
-static unsigned int check_modem_status(struct uart_8250_port *up)
+unsigned int serial8250_modem_status(struct uart_8250_port *up)
{
unsigned int status = serial_in(up, UART_MSR);
@@ -1544,14 +1523,20 @@ static unsigned int check_modem_status(struct uart_8250_port *up)
return status;
}
+EXPORT_SYMBOL_GPL(serial8250_modem_status);
/*
* This handles the interrupt from one port.
*/
-static void serial8250_handle_port(struct uart_8250_port *up)
+int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
{
- unsigned int status;
+ unsigned char status;
unsigned long flags;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ if (iir & UART_IIR_NO_INT)
+ return 0;
spin_lock_irqsave(&up->port.lock, flags);
@@ -1560,25 +1545,13 @@ static void serial8250_handle_port(struct uart_8250_port *up)
DEBUG_INTR("status = %x...", status);
if (status & (UART_LSR_DR | UART_LSR_BI))
- receive_chars(up, &status);
- check_modem_status(up);
+ status = serial8250_rx_chars(up, status);
+ serial8250_modem_status(up);
if (status & UART_LSR_THRE)
- transmit_chars(up);
+ serial8250_tx_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
-}
-
-int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
-{
- struct uart_8250_port *up =
- container_of(port, struct uart_8250_port, port);
-
- if (!(iir & UART_IIR_NO_INT)) {
- serial8250_handle_port(up);
- return 1;
- }
-
- return 0;
+ return 1;
}
EXPORT_SYMBOL_GPL(serial8250_handle_irq);
@@ -1619,11 +1592,13 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
do {
struct uart_8250_port *up;
struct uart_port *port;
+ bool skip;
up = list_entry(l, struct uart_8250_port, list);
port = &up->port;
+ skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
- if (port->handle_irq(port)) {
+ if (!skip && port->handle_irq(port)) {
handled = 1;
end = NULL;
} else if (end == NULL)
@@ -1758,11 +1733,8 @@ static void serial_unlink_irq_chain(struct uart_8250_port *up)
static void serial8250_timeout(unsigned long data)
{
struct uart_8250_port *up = (struct uart_8250_port *)data;
- unsigned int iir;
- iir = serial_in(up, UART_IIR);
- if (!(iir & UART_IIR_NO_INT))
- serial8250_handle_port(up);
+ up->port.handle_irq(&up->port);
mod_timer(&up->timer, jiffies + uart_poll_timeout(&up->port));
}
@@ -1801,7 +1773,7 @@ static void serial8250_backup_timeout(unsigned long data)
}
if (!(iir & UART_IIR_NO_INT))
- transmit_chars(up);
+ serial8250_tx_chars(up);
if (is_real_interrupt(up->port.irq))
serial_out(up, UART_IER, ier);
@@ -1835,7 +1807,7 @@ static unsigned int serial8250_get_mctrl(struct uart_port *port)
unsigned int status;
unsigned int ret;
- status = check_modem_status(up);
+ status = serial8250_modem_status(up);
ret = 0;
if (status & UART_MSR_DCD)
@@ -2000,7 +1972,7 @@ static int serial8250_startup(struct uart_port *port)
serial_outp(up, UART_IER, 0);
serial_outp(up, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
- serial_outp(up, UART_LCR, 0xBF);
+ serial_outp(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_outp(up, UART_EFR, UART_EFR_ECB);
serial_outp(up, UART_LCR, 0);
}
@@ -2848,7 +2820,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
local_irq_save(flags);
if (up->port.sysrq) {
- /* serial8250_handle_port() already took the lock */
+ /* serial8250_handle_irq() already took the lock */
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&up->port.lock);
@@ -2882,7 +2854,7 @@ serial8250_console_write(struct console *co, const char *s, unsigned int count)
* while processing with interrupts off.
*/
if (up->msr_saved_flags)
- check_modem_status(up);
+ serial8250_modem_status(up);
if (locked)
spin_unlock(&up->port.lock);
diff --git a/drivers/tty/serial/8250.h b/drivers/tty/serial/8250/8250.h
index 6edf4a6..ae027be 100644
--- a/drivers/tty/serial/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -13,6 +13,32 @@
#include <linux/serial_8250.h>
+struct uart_8250_port {
+ struct uart_port port;
+ struct timer_list timer; /* "no irq" timer */
+ struct list_head list; /* ports on this IRQ */
+ unsigned short capabilities; /* port capabilities */
+ unsigned short bugs; /* port bugs */
+ unsigned int tx_loadsz; /* transmit fifo load size */
+ unsigned char acr;
+ unsigned char ier;
+ unsigned char lcr;
+ unsigned char mcr;
+ unsigned char mcr_mask; /* mask of user bits */
+ unsigned char mcr_force; /* mask of forced bits */
+ unsigned char cur_iotype; /* Running I/O type */
+
+ /*
+ * Some bits in registers are cleared on a read, so they must
+ * be saved whenever the register is read but the bits will not
+ * be immediately processed.
+ */
+#define LSR_SAVE_FLAGS UART_LSR_BRK_ERROR_BITS
+ unsigned char lsr_saved_flags;
+#define MSR_SAVE_FLAGS UART_MSR_ANY_DELTA
+ unsigned char msr_saved_flags;
+};
+
struct old_serial_port {
unsigned int uart;
unsigned int baud_base;
diff --git a/drivers/tty/serial/8250_accent.c b/drivers/tty/serial/8250/8250_accent.c
index 34b51c6..34b51c6 100644
--- a/drivers/tty/serial/8250_accent.c
+++ b/drivers/tty/serial/8250/8250_accent.c
diff --git a/drivers/tty/serial/8250_acorn.c b/drivers/tty/serial/8250/8250_acorn.c
index b0ce8c5..b0ce8c5 100644
--- a/drivers/tty/serial/8250_acorn.c
+++ b/drivers/tty/serial/8250/8250_acorn.c
diff --git a/drivers/tty/serial/8250_boca.c b/drivers/tty/serial/8250/8250_boca.c
index d125dc1..d125dc1 100644
--- a/drivers/tty/serial/8250_boca.c
+++ b/drivers/tty/serial/8250/8250_boca.c
diff --git a/drivers/tty/serial/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index bf1fba6..f574eef 100644
--- a/drivers/tty/serial/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -177,17 +177,7 @@ static struct platform_driver dw8250_platform_driver = {
.remove = __devexit_p(dw8250_remove),
};
-static int __init dw8250_init(void)
-{
- return platform_driver_register(&dw8250_platform_driver);
-}
-module_init(dw8250_init);
-
-static void __exit dw8250_exit(void)
-{
- platform_driver_unregister(&dw8250_platform_driver);
-}
-module_exit(dw8250_exit);
+module_platform_driver(dw8250_platform_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_LICENSE("GPL");
diff --git a/drivers/tty/serial/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index eaafb98..eaafb98 100644
--- a/drivers/tty/serial/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
diff --git a/drivers/tty/serial/8250_exar_st16c554.c b/drivers/tty/serial/8250/8250_exar_st16c554.c
index bf53aab..bf53aab 100644
--- a/drivers/tty/serial/8250_exar_st16c554.c
+++ b/drivers/tty/serial/8250/8250_exar_st16c554.c
diff --git a/drivers/tty/serial/8250_fourport.c b/drivers/tty/serial/8250/8250_fourport.c
index be15826..be15826 100644
--- a/drivers/tty/serial/8250_fourport.c
+++ b/drivers/tty/serial/8250/8250_fourport.c
diff --git a/drivers/tty/serial/8250/8250_fsl.c b/drivers/tty/serial/8250/8250_fsl.c
new file mode 100644
index 0000000..f4d3c47
--- /dev/null
+++ b/drivers/tty/serial/8250/8250_fsl.c
@@ -0,0 +1,63 @@
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+
+#include "8250.h"
+
+/*
+ * Freescale 16550 UART "driver", Copyright (C) 2011 Paul Gortmaker.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This isn't a full driver; it just provides an alternate IRQ
+ * handler to deal with an errata. Everything else is just
+ * using the bog standard 8250 support.
+ *
+ * We follow code flow of serial8250_default_handle_irq() but add
+ * a check for a break and insert a dummy read on the Rx for the
+ * immediately following IRQ event.
+ *
+ * We re-use the already existing "bug handling" lsr_saved_flags
+ * field to carry the "what we just did" information from the one
+ * IRQ event to the next one.
+ */
+
+int fsl8250_handle_irq(struct uart_port *port)
+{
+ unsigned char lsr, orig_lsr;
+ unsigned long flags;
+ unsigned int iir;
+ struct uart_8250_port *up =
+ container_of(port, struct uart_8250_port, port);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ iir = port->serial_in(port, UART_IIR);
+ if (iir & UART_IIR_NO_INT) {
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 0;
+ }
+
+ /* This is the WAR; if last event was BRK, then read and return */
+ if (unlikely(up->lsr_saved_flags & UART_LSR_BI)) {
+ up->lsr_saved_flags &= ~UART_LSR_BI;
+ port->serial_in(port, UART_RX);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 1;
+ }
+
+ lsr = orig_lsr = up->port.serial_in(&up->port, UART_LSR);
+
+ if (lsr & (UART_LSR_DR | UART_LSR_BI))
+ lsr = serial8250_rx_chars(up, lsr);
+
+ serial8250_modem_status(up);
+
+ if (lsr & UART_LSR_THRE)
+ serial8250_tx_chars(up);
+
+ up->lsr_saved_flags = orig_lsr;
+ spin_unlock_irqrestore(&up->port.lock, flags);
+ return 1;
+}
diff --git a/drivers/tty/serial/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index d8c0ffb..d8c0ffb 100644
--- a/drivers/tty/serial/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
diff --git a/drivers/tty/serial/8250_hp300.c b/drivers/tty/serial/8250/8250_hp300.c
index c13438c..c13438c 100644
--- a/drivers/tty/serial/8250_hp300.c
+++ b/drivers/tty/serial/8250/8250_hp300.c
diff --git a/drivers/tty/serial/8250_hub6.c b/drivers/tty/serial/8250/8250_hub6.c
index a5c778e..a5c778e 100644
--- a/drivers/tty/serial/8250_hub6.c
+++ b/drivers/tty/serial/8250/8250_hub6.c
diff --git a/drivers/tty/serial/8250_mca.c b/drivers/tty/serial/8250/8250_mca.c
index d20abf0..d20abf0 100644
--- a/drivers/tty/serial/8250_mca.c
+++ b/drivers/tty/serial/8250/8250_mca.c
diff --git a/drivers/tty/serial/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 825937a..da2b0b0 100644
--- a/drivers/tty/serial/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1092,6 +1092,14 @@ static int skip_tx_en_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
+static int kt_serial_setup(struct serial_private *priv,
+ const struct pciserial_board *board,
+ struct uart_port *port, int idx)
+{
+ port->flags |= UPF_IIR_ONCE;
+ return skip_tx_en_setup(priv, board, port, idx);
+}
+
static int pci_eg20t_init(struct pci_dev *dev)
{
#if defined(CONFIG_SERIAL_PCH_UART) || defined(CONFIG_SERIAL_PCH_UART_MODULE)
@@ -1110,7 +1118,18 @@ pci_xr17c154_setup(struct serial_private *priv,
return pci_default_setup(priv, board, port, idx);
}
-/* This should be in linux/pci_ids.h */
+static int try_enable_msi(struct pci_dev *dev)
+{
+ /* use msi if available, but fallback to legacy otherwise */
+ pci_enable_msi(dev);
+ return 0;
+}
+
+static void disable_msi(struct pci_dev *dev)
+{
+ pci_disable_msi(dev);
+}
+
#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
#define PCI_DEVICE_ID_OCTPRO 0x0001
@@ -1133,9 +1152,14 @@ pci_xr17c154_setup(struct serial_private *priv,
#define PCI_DEVICE_ID_TITAN_800E 0xA014
#define PCI_DEVICE_ID_TITAN_200EI 0xA016
#define PCI_DEVICE_ID_TITAN_200EISI 0xA017
+#define PCI_DEVICE_ID_TITAN_400V3 0xA310
+#define PCI_DEVICE_ID_TITAN_410V3 0xA312
+#define PCI_DEVICE_ID_TITAN_800V3 0xA314
+#define PCI_DEVICE_ID_TITAN_800V3B 0xA315
#define PCI_DEVICE_ID_OXSEMI_16PCI958 0x9538
#define PCIE_DEVICE_ID_NEO_2_OX_IBM 0x00F6
#define PCI_DEVICE_ID_PLX_CRONYX_OMEGA 0xc001
+#define PCI_DEVICE_ID_INTEL_PATSBURG_KT 0x1d3d
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -1220,6 +1244,15 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = ce4100_serial_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_PATSBURG_KT,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .init = try_enable_msi,
+ .setup = kt_serial_setup,
+ .exit = disable_msi,
+ },
/*
* ITE
*/
@@ -3414,6 +3447,18 @@ static struct pci_device_id serial_pci_tbl[] = {
{ PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200EISI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
pbn_oxsemi_2_4000000 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_410V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800V3B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+ pbn_b0_4_921600 },
{ PCI_VENDOR_ID_SIIG, PCI_DEVICE_ID_SIIG_1S_10x_550,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,
diff --git a/drivers/tty/serial/8250_pnp.c b/drivers/tty/serial/8250/8250_pnp.c
index a2f2365..a2f2365 100644
--- a/drivers/tty/serial/8250_pnp.c
+++ b/drivers/tty/serial/8250/8250_pnp.c
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
new file mode 100644
index 0000000..591f801
--- /dev/null
+++ b/drivers/tty/serial/8250/Kconfig
@@ -0,0 +1,280 @@
+#
+# The 8250/16550 serial drivers. You shouldn't be in this list unless
+# you somehow have an implicit or explicit dependency on SERIAL_8250.
+#
+
+config SERIAL_8250
+ tristate "8250/16550 and compatible serial support"
+ select SERIAL_CORE
+ ---help---
+ This selects whether you want to include the driver for the standard
+ serial ports. The standard answer is Y. People who might say N
+ here are those that are setting up dedicated Ethernet WWW/FTP
+ servers, or users that have one of the various bus mice instead of a
+ serial mouse and don't intend to use their machine's standard serial
+ port for anything. (Note that the Cyclades and Stallion multi
+ serial port drivers do not need this driver built in for them to
+ work.)
+
+ To compile this driver as a module, choose M here: the
+ module will be called 8250.
+ [WARNING: Do not compile this driver as a module if you are using
+ non-standard serial ports, since the configuration information will
+ be lost when the driver is unloaded. This limitation may be lifted
+ in the future.]
+
+ BTW1: If you have a mouseman serial mouse which is not recognized by
+ the X window system, try running gpm first.
+
+ BTW2: If you intend to use a software modem (also called Winmodem)
+ under Linux, forget it. These modems are crippled and require
+ proprietary drivers which are only available under Windows.
+
+ Most people will say Y or M here, so that they can use serial mice,
+ modems and similar devices connecting to the standard serial ports.
+
+config SERIAL_8250_CONSOLE
+ bool "Console on 8250/16550 and compatible serial port"
+ depends on SERIAL_8250=y
+ select SERIAL_CORE_CONSOLE
+ ---help---
+ If you say Y here, it will be possible to use a serial port as the
+ system console (the system console is the device which receives all
+ kernel messages and warnings and which allows logins in single user
+ mode). This could be useful if some terminal or printer is connected
+ to that serial port.
+
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttyS1". (Try "man bootparam" or see the documentation of
+ your boot loader (grub or lilo or loadlin) about how to pass options
+ to the kernel at boot time.)
+
+ If you don't have a VGA card installed and you say Y here, the
+ kernel will automatically use the first serial line, /dev/ttyS0, as
+ system console.
+
+ You can set that using a kernel command line option such as
+ "console=uart8250,io,0x3f8,9600n8"
+ "console=uart8250,mmio,0xff5e0000,115200n8".
+ and it will switch to normal serial console when the corresponding
+ port is ready.
+ "earlycon=uart8250,io,0x3f8,9600n8"
+ "earlycon=uart8250,mmio,0xff5e0000,115200n8".
+ it will not only setup early console.
+
+ If unsure, say N.
+
+config FIX_EARLYCON_MEM
+ bool
+ depends on X86
+ default y
+
+config SERIAL_8250_GSC
+ tristate
+ depends on SERIAL_8250 && GSC
+ default SERIAL_8250
+
+config SERIAL_8250_PCI
+ tristate "8250/16550 PCI device support" if EXPERT
+ depends on SERIAL_8250 && PCI
+ default SERIAL_8250
+ help
+ This builds standard PCI serial support. You may be able to
+ disable this feature if you only need legacy serial support.
+ Saves about 9K.
+
+config SERIAL_8250_PNP
+ tristate "8250/16550 PNP device support" if EXPERT
+ depends on SERIAL_8250 && PNP
+ default SERIAL_8250
+ help
+ This builds standard PNP serial support. You may be able to
+ disable this feature if you only need legacy serial support.
+
+config SERIAL_8250_HP300
+ tristate
+ depends on SERIAL_8250 && HP300
+ default SERIAL_8250
+
+config SERIAL_8250_CS
+ tristate "8250/16550 PCMCIA device support"
+ depends on PCMCIA && SERIAL_8250
+ ---help---
+ Say Y here to enable support for 16-bit PCMCIA serial devices,
+ including serial port cards, modems, and the modem functions of
+ multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are
+ credit-card size devices often used with laptops.)
+
+ To compile this driver as a module, choose M here: the
+ module will be called serial_cs.
+
+ If unsure, say N.
+
+config SERIAL_8250_NR_UARTS
+ int "Maximum number of 8250/16550 serial ports"
+ depends on SERIAL_8250
+ default "4"
+ help
+ Set this to the number of serial ports you want the driver
+ to support. This includes any ports discovered via ACPI or
+ PCI enumeration and any ports that may be added at run-time
+ via hot-plug, or any ISA multi-port serial cards.
+
+config SERIAL_8250_RUNTIME_UARTS
+ int "Number of 8250/16550 serial ports to register at runtime"
+ depends on SERIAL_8250
+ range 0 SERIAL_8250_NR_UARTS
+ default "4"
+ help
+ Set this to the maximum number of serial ports you want
+ the kernel to register at boot time. This can be overridden
+ with the module parameter "nr_uarts", or boot-time parameter
+ 8250.nr_uarts
+
+config SERIAL_8250_EXTENDED
+ bool "Extended 8250/16550 serial driver options"
+ depends on SERIAL_8250
+ help
+ If you wish to use any non-standard features of the standard "dumb"
+ driver, say Y here. This includes HUB6 support, shared serial
+ interrupts, special multiport support, support for more than the
+ four COM 1/2/3/4 boards, etc.
+
+ Note that the answer to this question won't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about serial driver options. If unsure, say N.
+
+config SERIAL_8250_MANY_PORTS
+ bool "Support more than 4 legacy serial ports"
+ depends on SERIAL_8250_EXTENDED && !IA64
+ help
+ Say Y here if you have dumb serial boards other than the four
+ standard COM 1/2/3/4 ports. This may happen if you have an AST
+ FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available
+ from <http://www.tldp.org/docs.html#howto>), or other custom
+ serial port hardware which acts similar to standard serial port
+ hardware. If you only use the standard COM 1/2/3/4 ports, you can
+ say N here to save some memory. You can also say Y if you have an
+ "intelligent" multiport card such as Cyclades, Digiboards, etc.
+
+#
+# Multi-port serial cards
+#
+
+config SERIAL_8250_FOURPORT
+ tristate "Support Fourport cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have an AST FourPort serial board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_fourport.
+
+config SERIAL_8250_ACCENT
+ tristate "Support Accent cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have an Accent Async serial board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_accent.
+
+config SERIAL_8250_BOCA
+ tristate "Support Boca cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have a Boca serial board. Please read the Boca
+ mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_boca.
+
+config SERIAL_8250_EXAR_ST16C554
+ tristate "Support Exar ST16C554/554D Quad UART"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ The Uplogix Envoy TU301 uses this Exar Quad UART. If you are
+ tinkering with your Envoy TU301, or have a machine with this UART,
+ say Y here.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_exar_st16c554.
+
+config SERIAL_8250_HUB6
+ tristate "Support Hub6 cards"
+ depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+ help
+ Say Y here if you have a HUB6 serial board.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_hub6.
+
+#
+# Misc. options/drivers.
+#
+
+config SERIAL_8250_SHARE_IRQ
+ bool "Support for sharing serial interrupts"
+ depends on SERIAL_8250_EXTENDED
+ help
+ Some serial boards have hardware support which allows multiple dumb
+ serial ports on the same board to share a single IRQ. To enable
+ support for this in the serial driver, say Y here.
+
+config SERIAL_8250_DETECT_IRQ
+ bool "Autodetect IRQ on standard ports (unsafe)"
+ depends on SERIAL_8250_EXTENDED
+ help
+ Say Y here if you want the kernel to try to guess which IRQ
+ to use for your serial port.
+
+ This is considered unsafe; it is far better to configure the IRQ in
+ a boot script using the setserial command.
+
+ If unsure, say N.
+
+config SERIAL_8250_RSA
+ bool "Support RSA serial ports"
+ depends on SERIAL_8250_EXTENDED
+ help
+ ::: To be written :::
+
+config SERIAL_8250_MCA
+ tristate "Support 8250-type ports on MCA buses"
+ depends on SERIAL_8250 != n && MCA
+ help
+ Say Y here if you have a MCA serial ports.
+
+ To compile this driver as a module, choose M here: the module
+ will be called 8250_mca.
+
+config SERIAL_8250_ACORN
+ tristate "Acorn expansion card serial port support"
+ depends on ARCH_ACORN && SERIAL_8250
+ help
+ If you have an Atomwide Serial card or Serial Port card for an Acorn
+ system, say Y to this option. The driver can handle 1, 2, or 3 port
+ cards. If unsure, say N.
+
+config SERIAL_8250_RM9K
+ bool "Support for MIPS RM9xxx integrated serial port"
+ depends on SERIAL_8250 != n && SERIAL_RM9000
+ select SERIAL_8250_SHARE_IRQ
+ help
+ Selecting this option will add support for the integrated serial
+ port hardware found on MIPS RM9122 and similar processors.
+ If unsure, say N.
+
+config SERIAL_8250_FSL
+ bool
+ depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
+ default PPC
+
+config SERIAL_8250_DW
+ tristate "Support for Synopsys DesignWare 8250 quirks"
+ depends on SERIAL_8250 && OF
+ help
+ Selecting this option will enable handling of the extra features
+ present in the Synopsys DesignWare APB UART.
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
new file mode 100644
index 0000000..867bba7
--- /dev/null
+++ b/drivers/tty/serial/8250/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for the 8250 serial device drivers.
+#
+
+obj-$(CONFIG_SERIAL_8250) += 8250.o
+obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
+obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
+obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
+obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
+obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
+obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
+obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
+obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
+obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
+obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
+obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
+obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
+obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
+obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
+obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index eef736f..8609060 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -317,7 +317,7 @@ static int serial_probe(struct pcmcia_device *link)
info->p_dev = link;
link->priv = info;
- link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
if (do_sound)
link->config_flags |= CONF_ENABLE_SPKR;
@@ -445,7 +445,7 @@ static int simple_config(struct pcmcia_device *link)
/* First pass: look for a config entry that looks normal.
* Two tries: without IO aliases, then with aliases */
- link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_SET_IO;
+ link->config_flags |= CONF_AUTO_SET_VPP;
for (try = 0; try < 4; try++)
if (!pcmcia_loop_config(link, simple_config_check, &try))
goto found_port;
@@ -501,7 +501,8 @@ static int multi_config_check_notpicky(struct pcmcia_device *p_dev,
{
int *base2 = priv_data;
- if (!p_dev->resource[0]->end || !p_dev->resource[1]->end)
+ if (!p_dev->resource[0]->end || !p_dev->resource[1]->end ||
+ p_dev->resource[0]->start + 8 != p_dev->resource[1]->start)
return -ENODEV;
p_dev->resource[0]->end = p_dev->resource[1]->end = 8;
@@ -520,7 +521,6 @@ static int multi_config(struct pcmcia_device *link)
struct serial_info *info = link->priv;
int i, base2 = 0;
- link->config_flags |= CONF_AUTO_SET_IO;
/* First, look for a generic full-sized window */
if (!pcmcia_loop_config(link, multi_config_check, &info->multi))
base2 = link->resource[0]->start + 8;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 925a1e5..2de9924 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -5,274 +5,7 @@
menu "Serial drivers"
depends on HAS_IOMEM
-#
-# The new 8250/16550 serial drivers
-config SERIAL_8250
- tristate "8250/16550 and compatible serial support"
- select SERIAL_CORE
- ---help---
- This selects whether you want to include the driver for the standard
- serial ports. The standard answer is Y. People who might say N
- here are those that are setting up dedicated Ethernet WWW/FTP
- servers, or users that have one of the various bus mice instead of a
- serial mouse and don't intend to use their machine's standard serial
- port for anything. (Note that the Cyclades and Stallion multi
- serial port drivers do not need this driver built in for them to
- work.)
-
- To compile this driver as a module, choose M here: the
- module will be called 8250.
- [WARNING: Do not compile this driver as a module if you are using
- non-standard serial ports, since the configuration information will
- be lost when the driver is unloaded. This limitation may be lifted
- in the future.]
-
- BTW1: If you have a mouseman serial mouse which is not recognized by
- the X window system, try running gpm first.
-
- BTW2: If you intend to use a software modem (also called Winmodem)
- under Linux, forget it. These modems are crippled and require
- proprietary drivers which are only available under Windows.
-
- Most people will say Y or M here, so that they can use serial mice,
- modems and similar devices connecting to the standard serial ports.
-
-config SERIAL_8250_CONSOLE
- bool "Console on 8250/16550 and compatible serial port"
- depends on SERIAL_8250=y
- select SERIAL_CORE_CONSOLE
- ---help---
- If you say Y here, it will be possible to use a serial port as the
- system console (the system console is the device which receives all
- kernel messages and warnings and which allows logins in single user
- mode). This could be useful if some terminal or printer is connected
- to that serial port.
-
- Even if you say Y here, the currently visible virtual console
- (/dev/tty0) will still be used as the system console by default, but
- you can alter that using a kernel command line option such as
- "console=ttyS1". (Try "man bootparam" or see the documentation of
- your boot loader (grub or lilo or loadlin) about how to pass options
- to the kernel at boot time.)
-
- If you don't have a VGA card installed and you say Y here, the
- kernel will automatically use the first serial line, /dev/ttyS0, as
- system console.
-
- You can set that using a kernel command line option such as
- "console=uart8250,io,0x3f8,9600n8"
- "console=uart8250,mmio,0xff5e0000,115200n8".
- and it will switch to normal serial console when the corresponding
- port is ready.
- "earlycon=uart8250,io,0x3f8,9600n8"
- "earlycon=uart8250,mmio,0xff5e0000,115200n8".
- it will not only setup early console.
-
- If unsure, say N.
-
-config FIX_EARLYCON_MEM
- bool
- depends on X86
- default y
-
-config SERIAL_8250_GSC
- tristate
- depends on SERIAL_8250 && GSC
- default SERIAL_8250
-
-config SERIAL_8250_PCI
- tristate "8250/16550 PCI device support" if EXPERT
- depends on SERIAL_8250 && PCI
- default SERIAL_8250
- help
- This builds standard PCI serial support. You may be able to
- disable this feature if you only need legacy serial support.
- Saves about 9K.
-
-config SERIAL_8250_PNP
- tristate "8250/16550 PNP device support" if EXPERT
- depends on SERIAL_8250 && PNP
- default SERIAL_8250
- help
- This builds standard PNP serial support. You may be able to
- disable this feature if you only need legacy serial support.
-
-config SERIAL_8250_HP300
- tristate
- depends on SERIAL_8250 && HP300
- default SERIAL_8250
-
-config SERIAL_8250_CS
- tristate "8250/16550 PCMCIA device support"
- depends on PCMCIA && SERIAL_8250
- ---help---
- Say Y here to enable support for 16-bit PCMCIA serial devices,
- including serial port cards, modems, and the modem functions of
- multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are
- credit-card size devices often used with laptops.)
-
- To compile this driver as a module, choose M here: the
- module will be called serial_cs.
-
- If unsure, say N.
-
-config SERIAL_8250_NR_UARTS
- int "Maximum number of 8250/16550 serial ports"
- depends on SERIAL_8250
- default "4"
- help
- Set this to the number of serial ports you want the driver
- to support. This includes any ports discovered via ACPI or
- PCI enumeration and any ports that may be added at run-time
- via hot-plug, or any ISA multi-port serial cards.
-
-config SERIAL_8250_RUNTIME_UARTS
- int "Number of 8250/16550 serial ports to register at runtime"
- depends on SERIAL_8250
- range 0 SERIAL_8250_NR_UARTS
- default "4"
- help
- Set this to the maximum number of serial ports you want
- the kernel to register at boot time. This can be overridden
- with the module parameter "nr_uarts", or boot-time parameter
- 8250.nr_uarts
-
-config SERIAL_8250_EXTENDED
- bool "Extended 8250/16550 serial driver options"
- depends on SERIAL_8250
- help
- If you wish to use any non-standard features of the standard "dumb"
- driver, say Y here. This includes HUB6 support, shared serial
- interrupts, special multiport support, support for more than the
- four COM 1/2/3/4 boards, etc.
-
- Note that the answer to this question won't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about serial driver options. If unsure, say N.
-
-config SERIAL_8250_MANY_PORTS
- bool "Support more than 4 legacy serial ports"
- depends on SERIAL_8250_EXTENDED && !IA64
- help
- Say Y here if you have dumb serial boards other than the four
- standard COM 1/2/3/4 ports. This may happen if you have an AST
- FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available
- from <http://www.tldp.org/docs.html#howto>), or other custom
- serial port hardware which acts similar to standard serial port
- hardware. If you only use the standard COM 1/2/3/4 ports, you can
- say N here to save some memory. You can also say Y if you have an
- "intelligent" multiport card such as Cyclades, Digiboards, etc.
-
-#
-# Multi-port serial cards
-#
-
-config SERIAL_8250_FOURPORT
- tristate "Support Fourport cards"
- depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
- help
- Say Y here if you have an AST FourPort serial board.
-
- To compile this driver as a module, choose M here: the module
- will be called 8250_fourport.
-
-config SERIAL_8250_ACCENT
- tristate "Support Accent cards"
- depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
- help
- Say Y here if you have an Accent Async serial board.
-
- To compile this driver as a module, choose M here: the module
- will be called 8250_accent.
-
-config SERIAL_8250_BOCA
- tristate "Support Boca cards"
- depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
- help
- Say Y here if you have a Boca serial board. Please read the Boca
- mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>
-
- To compile this driver as a module, choose M here: the module
- will be called 8250_boca.
-
-config SERIAL_8250_EXAR_ST16C554
- tristate "Support Exar ST16C554/554D Quad UART"
- depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
- help
- The Uplogix Envoy TU301 uses this Exar Quad UART. If you are
- tinkering with your Envoy TU301, or have a machine with this UART,
- say Y here.
-
- To compile this driver as a module, choose M here: the module
- will be called 8250_exar_st16c554.
-
-config SERIAL_8250_HUB6
- tristate "Support Hub6 cards"
- depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
- help
- Say Y here if you have a HUB6 serial board.
-
- To compile this driver as a module, choose M here: the module
- will be called 8250_hub6.
-
-config SERIAL_8250_SHARE_IRQ
- bool "Support for sharing serial interrupts"
- depends on SERIAL_8250_EXTENDED
- help
- Some serial boards have hardware support which allows multiple dumb
- serial ports on the same board to share a single IRQ. To enable
- support for this in the serial driver, say Y here.
-
-config SERIAL_8250_DETECT_IRQ
- bool "Autodetect IRQ on standard ports (unsafe)"
- depends on SERIAL_8250_EXTENDED
- help
- Say Y here if you want the kernel to try to guess which IRQ
- to use for your serial port.
-
- This is considered unsafe; it is far better to configure the IRQ in
- a boot script using the setserial command.
-
- If unsure, say N.
-
-config SERIAL_8250_RSA
- bool "Support RSA serial ports"
- depends on SERIAL_8250_EXTENDED
- help
- ::: To be written :::
-
-config SERIAL_8250_MCA
- tristate "Support 8250-type ports on MCA buses"
- depends on SERIAL_8250 != n && MCA
- help
- Say Y here if you have a MCA serial ports.
-
- To compile this driver as a module, choose M here: the module
- will be called 8250_mca.
-
-config SERIAL_8250_ACORN
- tristate "Acorn expansion card serial port support"
- depends on ARCH_ACORN && SERIAL_8250
- help
- If you have an Atomwide Serial card or Serial Port card for an Acorn
- system, say Y to this option. The driver can handle 1, 2, or 3 port
- cards. If unsure, say N.
-
-config SERIAL_8250_RM9K
- bool "Support for MIPS RM9xxx integrated serial port"
- depends on SERIAL_8250 != n && SERIAL_RM9000
- select SERIAL_8250_SHARE_IRQ
- help
- Selecting this option will add support for the integrated serial
- port hardware found on MIPS RM9122 and similar processors.
- If unsure, say N.
-
-config SERIAL_8250_DW
- tristate "Support for Synopsys DesignWare 8250 quirks"
- depends on SERIAL_8250 && OF
- help
- Selecting this option will enable handling of the extra features
- present in the Synopsys DesignWare APB UART.
+source "drivers/tty/serial/8250/Kconfig"
comment "Non-8250 serial port support"
@@ -457,7 +190,7 @@ config SERIAL_SAMSUNG
config SERIAL_SAMSUNG_UARTS_4
bool
depends on ARM && PLAT_SAMSUNG
- default y if CPU_S3C2443
+ default y if !(CPU_S3C2410 || SERIAL_S3C2412 || CPU_S3C2440 || CPU_S3C2442)
help
Internal node for the common case of 4 Samsung compatible UARTs
@@ -465,7 +198,7 @@ config SERIAL_SAMSUNG_UARTS
int
depends on ARM && PLAT_SAMSUNG
default 6 if ARCH_S5P6450
- default 4 if SERIAL_SAMSUNG_UARTS_4
+ default 4 if SERIAL_SAMSUNG_UARTS_4 || CPU_S3C2416
default 3
help
Select the number of available UART ports for the Samsung S3C
@@ -495,46 +228,27 @@ config SERIAL_SAMSUNG_CONSOLE
your boot loader about how to pass options to the kernel at
boot time.)
-config SERIAL_S3C2410
- tristate "Samsung S3C2410 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S3C2410
- default y if CPU_S3C2410
- help
- Serial port support for the Samsung S3C2410 SoC
-
-config SERIAL_S3C2412
- tristate "Samsung S3C2412/S3C2413 Serial port support"
- depends on SERIAL_SAMSUNG && CPU_S3C2412
- default y if CPU_S3C2412
- help
- Serial port support for the Samsung S3C2412 and S3C2413 SoC
-
-config SERIAL_S3C2440
- tristate "Samsung S3C2440/S3C2442/S3C2416 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C2440 || CPU_S3C2442 || CPU_S3C2416)
- default y if CPU_S3C2440
- default y if CPU_S3C2442
- select SERIAL_SAMSUNG_UARTS_4 if CPU_S3C2416
- help
- Serial port support for the Samsung S3C2440, S3C2416 and S3C2442 SoC
-
-config SERIAL_S3C6400
- tristate "Samsung S3C6400/S3C6410/S5P6440/S5P6450/S5PC100 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S3C6400 || CPU_S3C6410 || CPU_S5P6440 || CPU_S5P6450 || CPU_S5PC100)
- select SERIAL_SAMSUNG_UARTS_4
- default y
- help
- Serial port support for the Samsung S3C6400, S3C6410, S5P6440, S5P6450
- and S5PC100 SoCs
-
-config SERIAL_S5PV210
- tristate "Samsung S5PV210 Serial port support"
- depends on SERIAL_SAMSUNG && (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
- select SERIAL_SAMSUNG_UARTS_4 if (CPU_S5PV210 || CPU_EXYNOS4210 || SOC_EXYNOS4212)
- default y
- help
- Serial port support for Samsung's S5P Family of SoC's
-
+config SERIAL_SIRFSOC
+ tristate "SiRF SoC Platform Serial port support"
+ depends on ARM && ARCH_PRIMA2
+ select SERIAL_CORE
+ help
+ Support for the on-chip UART on the CSR SiRFprimaII series,
+ providing /dev/ttySiRF0, 1 and 2 (note, some machines may not
+ provide all of these ports, depending on how the serial port
+ pins are configured).
+
+config SERIAL_SIRFSOC_CONSOLE
+ bool "Support for console on SiRF SoC serial port"
+ depends on SERIAL_SIRFSOC=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Even if you say Y here, the currently visible virtual console
+ (/dev/tty0) will still be used as the system console by default, but
+ you can alter that using a kernel command line option such as
+ "console=ttySiRFx". (Try "man bootparam" or see the documentation of
+ your boot loader about how to pass options to the kernel at
+ boot time.)
config SERIAL_MAX3100
tristate "MAX3100 support"
@@ -550,15 +264,6 @@ config SERIAL_MAX3107
help
MAX3107 chip support
-config SERIAL_MAX3107_AAVA
- tristate "MAX3107 AAVA platform support"
- depends on X86_MRST && SERIAL_MAX3107 && GPIOLIB
- select SERIAL_CORE
- help
- Support for the MAX3107 chip configuration found on the AAVA
- platform. Includes the extra initialisation and GPIO support
- neded for this device.
-
config SERIAL_DZ
bool "DECstation DZ serial driver"
depends on MACH_DECSTATION && 32BIT
@@ -808,7 +513,7 @@ config BFIN_UART3_CTSRTS
config SERIAL_IMX
bool "IMX serial port support"
- depends on ARM && (ARCH_IMX || ARCH_MXC)
+ depends on ARCH_MXC
select SERIAL_CORE
select RATIONAL
help
@@ -1324,7 +1029,7 @@ config SERIAL_OF_PLATFORM
config SERIAL_OMAP
tristate "OMAP serial port support"
- depends on ARCH_OMAP2 || ARCH_OMAP3 || ARCH_OMAP4
+ depends on ARCH_OMAP2PLUS
select SERIAL_CORE
help
If you have a machine based on an Texas Instruments OMAP CPU you
@@ -1575,6 +1280,15 @@ config SERIAL_PCH_UART
ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
+config SERIAL_PCH_UART_CONSOLE
+ bool "Support for console on Intel EG20T PCH UART/OKI SEMICONDUCTOR ML7213 IOH"
+ depends on SERIAL_PCH_UART=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Say Y here if you wish to use the PCH UART as the system console
+ (the system console is the device which receives all kernel messages and
+ warnings and which allows logins in single user mode).
+
config SERIAL_MSM_SMD
bool "Enable tty device interface for some SMD ports"
default n
@@ -1610,4 +1324,27 @@ config SERIAL_XILINX_PS_UART_CONSOLE
help
Enable a Xilinx PS UART port to be the system console.
+config SERIAL_AR933X
+ bool "AR933X serial port support"
+ depends on SOC_AR933X
+ select SERIAL_CORE
+ help
+ If you have an Atheros AR933X SOC based board and want to use the
+ built-in UART of the SoC, say Y to this option.
+
+config SERIAL_AR933X_CONSOLE
+ bool "Console on AR933X serial port"
+ depends on SERIAL_AR933X=y
+ select SERIAL_CORE_CONSOLE
+ help
+ Enable a built-in UART port of the AR933X to be the system console.
+
+config SERIAL_AR933X_NR_UARTS
+ int "Maximum number of AR933X serial ports"
+ depends on SERIAL_AR933X
+ default "2"
+ help
+ Set this to the number of serial ports you want the driver
+ to support.
+
endmenu
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index e10cf5b..fef32e1 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -14,21 +14,9 @@ obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o
obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o
obj-$(CONFIG_SERIAL_SUNSAB) += sunsab.o
-obj-$(CONFIG_SERIAL_8250) += 8250.o
-obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
-obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
-obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
-obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
-obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
-obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
-obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
-obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
-obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
-obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
-obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
-obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
-obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
-obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
+# Now bring in any enabled 8250/16450/16550 type drivers.
+obj-$(CONFIG_SERIAL_8250) += 8250/
+
obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
@@ -39,14 +27,8 @@ obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
obj-$(CONFIG_SERIAL_BFIN) += bfin_uart.o
obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
-obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o
-obj-$(CONFIG_SERIAL_S3C2412) += s3c2412.o
-obj-$(CONFIG_SERIAL_S3C2440) += s3c2440.o
-obj-$(CONFIG_SERIAL_S3C6400) += s3c6400.o
-obj-$(CONFIG_SERIAL_S5PV210) += s5pv210.o
obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
-obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
obj-$(CONFIG_SERIAL_MUX) += mux.o
obj-$(CONFIG_SERIAL_68328) += 68328serial.o
@@ -94,3 +76,5 @@ obj-$(CONFIG_SERIAL_MSM_SMD) += msm_smd_tty.o
obj-$(CONFIG_SERIAL_MXS_AUART) += mxs-auart.o
obj-$(CONFIG_SERIAL_LANTIQ) += lantiq.o
obj-$(CONFIG_SERIAL_XILINX_PS_UART) += xilinx_uartps.o
+obj-$(CONFIG_SERIAL_SIRFSOC) += sirfsoc_uart.o
+obj-$(CONFIG_SERIAL_AR933X) += ar933x_uart.o
diff --git a/drivers/tty/serial/amba-pl010.c b/drivers/tty/serial/amba-pl010.c
index efdf92c..0d91a54 100644
--- a/drivers/tty/serial/amba-pl010.c
+++ b/drivers/tty/serial/amba-pl010.c
@@ -795,6 +795,8 @@ static struct amba_id pl010_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl010_ids);
+
static struct amba_driver pl010_driver = {
.drv = {
.name = "uart-pl010",
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 00233af..6800f5f 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -159,6 +159,7 @@ struct uart_amba_port {
unsigned int fifosize; /* vendor-specific */
unsigned int lcrh_tx; /* vendor-specific */
unsigned int lcrh_rx; /* vendor-specific */
+ unsigned int old_cr; /* state during shutdown */
bool autorts;
char type[12];
bool interrupt_may_hang; /* vendor-specific */
@@ -268,7 +269,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_slave_config tx_conf = {
.dst_addr = uap->port.mapbase + UART01x_DR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
- .direction = DMA_TO_DEVICE,
+ .direction = DMA_MEM_TO_DEV,
.dst_maxburst = uap->fifosize >> 1,
};
struct dma_chan *chan;
@@ -301,7 +302,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_slave_config rx_conf = {
.src_addr = uap->port.mapbase + UART01x_DR,
.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
- .direction = DMA_FROM_DEVICE,
+ .direction = DMA_DEV_TO_MEM,
.src_maxburst = uap->fifosize >> 1,
};
@@ -480,7 +481,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
return -EBUSY;
}
- desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
+ desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -676,7 +677,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
dma_dev = rxchan->device;
desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
- DMA_FROM_DEVICE,
+ DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
* If the DMA engine is busy and cannot prepare a
@@ -1411,7 +1412,9 @@ static int pl011_startup(struct uart_port *port)
while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
barrier();
- cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
+ /* restore RTS and DTR */
+ cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
+ cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
/* Clear pending error interrupts */
@@ -1469,6 +1472,7 @@ static void pl011_shutdown_channel(struct uart_amba_port *uap,
static void pl011_shutdown(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr;
/*
* disable all interrupts
@@ -1488,9 +1492,16 @@ static void pl011_shutdown(struct uart_port *port)
/*
* disable the port
+ * disable the port. It should not disable RTS and DTR.
+ * Also RTS and DTR state should be preserved to restore
+ * it during startup().
*/
uap->autorts = false;
- writew(UART01x_CR_UARTEN | UART011_CR_TXE, uap->port.membase + UART011_CR);
+ cr = readw(uap->port.membase + UART011_CR);
+ uap->old_cr = cr;
+ cr &= UART011_CR_RTS | UART011_CR_DTR;
+ cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+ writew(cr, uap->port.membase + UART011_CR);
/*
* disable break condition and fifos
@@ -1740,9 +1751,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
{
struct uart_amba_port *uap = amba_ports[co->index];
unsigned int status, old_cr, new_cr;
+ unsigned long flags;
+ int locked = 1;
clk_enable(uap->clk);
+ local_irq_save(flags);
+ if (uap->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&uap->port.lock);
+ else
+ spin_lock(&uap->port.lock);
+
/*
* First save the CR then disable the interrupts
*/
@@ -1762,6 +1783,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
} while (status & UART01x_FR_BUSY);
writew(old_cr, uap->port.membase + UART011_CR);
+ if (locked)
+ spin_unlock(&uap->port.lock);
+ local_irq_restore(flags);
+
clk_disable(uap->clk);
}
@@ -1905,6 +1930,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->vendor = vendor;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
+ uap->old_cr = 0;
uap->fifosize = vendor->fifosize;
uap->interrupt_may_hang = vendor->interrupt_may_hang;
uap->port.dev = &dev->dev;
@@ -1994,6 +2020,8 @@ static struct amba_id pl011_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, pl011_ids);
+
static struct amba_driver pl011_driver = {
.drv = {
.name = "uart-pl011",
diff --git a/drivers/tty/serial/apbuart.c b/drivers/tty/serial/apbuart.c
index 77554fd..7162f70 100644
--- a/drivers/tty/serial/apbuart.c
+++ b/drivers/tty/serial/apbuart.c
@@ -577,7 +577,7 @@ static int __devinit apbuart_probe(struct platform_device *op)
return 0;
}
-static struct of_device_id __initdata apbuart_match[] = {
+static struct of_device_id apbuart_match[] = {
{
.name = "GAISLER_APBUART",
},
@@ -597,7 +597,7 @@ static struct platform_driver grlib_apbuart_of_driver = {
};
-static int grlib_apbuart_configure(void)
+static int __init grlib_apbuart_configure(void)
{
struct device_node *np;
int line = 0;
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
new file mode 100644
index 0000000..e4f60e2
--- /dev/null
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -0,0 +1,688 @@
+/*
+ * Atheros AR933X SoC built-in UART driver
+ *
+ * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/console.h>
+#include <linux/sysrq.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+
+#include <asm/mach-ath79/ar933x_uart.h>
+#include <asm/mach-ath79/ar933x_uart_platform.h>
+
+#define DRIVER_NAME "ar933x-uart"
+
+#define AR933X_DUMMY_STATUS_RD 0x01
+
+static struct uart_driver ar933x_uart_driver;
+
+struct ar933x_uart_port {
+ struct uart_port port;
+ unsigned int ier; /* shadow Interrupt Enable Register */
+};
+
+static inline unsigned int ar933x_uart_read(struct ar933x_uart_port *up,
+ int offset)
+{
+ return readl(up->port.membase + offset);
+}
+
+static inline void ar933x_uart_write(struct ar933x_uart_port *up,
+ int offset, unsigned int value)
+{
+ writel(value, up->port.membase + offset);
+}
+
+static inline void ar933x_uart_rmw(struct ar933x_uart_port *up,
+ unsigned int offset,
+ unsigned int mask,
+ unsigned int val)
+{
+ unsigned int t;
+
+ t = ar933x_uart_read(up, offset);
+ t &= ~mask;
+ t |= val;
+ ar933x_uart_write(up, offset, t);
+}
+
+static inline void ar933x_uart_rmw_set(struct ar933x_uart_port *up,
+ unsigned int offset,
+ unsigned int val)
+{
+ ar933x_uart_rmw(up, offset, 0, val);
+}
+
+static inline void ar933x_uart_rmw_clear(struct ar933x_uart_port *up,
+ unsigned int offset,
+ unsigned int val)
+{
+ ar933x_uart_rmw(up, offset, val, 0);
+}
+
+static inline void ar933x_uart_start_tx_interrupt(struct ar933x_uart_port *up)
+{
+ up->ier |= AR933X_UART_INT_TX_EMPTY;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
+static inline void ar933x_uart_stop_tx_interrupt(struct ar933x_uart_port *up)
+{
+ up->ier &= ~AR933X_UART_INT_TX_EMPTY;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
+static inline void ar933x_uart_putc(struct ar933x_uart_port *up, int ch)
+{
+ unsigned int rdata;
+
+ rdata = ch & AR933X_UART_DATA_TX_RX_MASK;
+ rdata |= AR933X_UART_DATA_TX_CSR;
+ ar933x_uart_write(up, AR933X_UART_DATA_REG, rdata);
+}
+
+static unsigned int ar933x_uart_tx_empty(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned long flags;
+ unsigned int rdata;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return (rdata & AR933X_UART_DATA_TX_CSR) ? 0 : TIOCSER_TEMT;
+}
+
+static unsigned int ar933x_uart_get_mctrl(struct uart_port *port)
+{
+ return TIOCM_CAR;
+}
+
+static void ar933x_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+}
+
+static void ar933x_uart_start_tx(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ ar933x_uart_start_tx_interrupt(up);
+}
+
+static void ar933x_uart_stop_tx(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ ar933x_uart_stop_tx_interrupt(up);
+}
+
+static void ar933x_uart_stop_rx(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ up->ier &= ~AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+}
+
+static void ar933x_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+ if (break_state == -1)
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+ else
+ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static void ar933x_uart_enable_ms(struct uart_port *port)
+{
+}
+
+static void ar933x_uart_set_termios(struct uart_port *port,
+ struct ktermios *new,
+ struct ktermios *old)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned int cs;
+ unsigned long flags;
+ unsigned int baud, scale;
+
+ /* Only CS8 is supported */
+ new->c_cflag &= ~CSIZE;
+ new->c_cflag |= CS8;
+
+ /* Only one stop bit is supported */
+ new->c_cflag &= ~CSTOPB;
+
+ cs = 0;
+ if (new->c_cflag & PARENB) {
+ if (!(new->c_cflag & PARODD))
+ cs |= AR933X_UART_CS_PARITY_EVEN;
+ else
+ cs |= AR933X_UART_CS_PARITY_ODD;
+ } else {
+ cs |= AR933X_UART_CS_PARITY_NONE;
+ }
+
+ /* Mark/space parity is not supported */
+ new->c_cflag &= ~CMSPAR;
+
+ baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
+ scale = (port->uartclk / (16 * baud)) - 1;
+
+ /*
+ * Ok, we're now changing the port state. Do it with
+ * interrupts disabled.
+ */
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, new->c_cflag, baud);
+
+ up->port.ignore_status_mask = 0;
+
+ /* ignore all characters if CREAD is not set */
+ if ((new->c_cflag & CREAD) == 0)
+ up->port.ignore_status_mask |= AR933X_DUMMY_STATUS_RD;
+
+ ar933x_uart_write(up, AR933X_UART_CLOCK_REG,
+ scale << AR933X_UART_CLOCK_SCALE_S | 8192);
+
+ /* setup configuration register */
+ ar933x_uart_rmw(up, AR933X_UART_CS_REG, AR933X_UART_CS_PARITY_M, cs);
+
+ /* enable host interrupt */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ if (tty_termios_baud_rate(new))
+ tty_termios_encode_baud_rate(new, baud, baud);
+}
+
+static void ar933x_uart_rx_chars(struct ar933x_uart_port *up)
+{
+ struct tty_struct *tty;
+ int max_count = 256;
+
+ tty = tty_port_tty_get(&up->port.state->port);
+ do {
+ unsigned int rdata;
+ unsigned char ch;
+
+ rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ if ((rdata & AR933X_UART_DATA_RX_CSR) == 0)
+ break;
+
+ /* remove the character from the FIFO */
+ ar933x_uart_write(up, AR933X_UART_DATA_REG,
+ AR933X_UART_DATA_RX_CSR);
+
+ if (!tty) {
+ /* discard the data if no tty available */
+ continue;
+ }
+
+ up->port.icount.rx++;
+ ch = rdata & AR933X_UART_DATA_TX_RX_MASK;
+
+ if (uart_handle_sysrq_char(&up->port, ch))
+ continue;
+
+ if ((up->port.ignore_status_mask & AR933X_DUMMY_STATUS_RD) == 0)
+ tty_insert_flip_char(tty, ch, TTY_NORMAL);
+ } while (max_count-- > 0);
+
+ if (tty) {
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+ }
+}
+
+static void ar933x_uart_tx_chars(struct ar933x_uart_port *up)
+{
+ struct circ_buf *xmit = &up->port.state->xmit;
+ int count;
+
+ if (uart_tx_stopped(&up->port))
+ return;
+
+ count = up->port.fifosize;
+ do {
+ unsigned int rdata;
+
+ rdata = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ if ((rdata & AR933X_UART_DATA_TX_CSR) == 0)
+ break;
+
+ if (up->port.x_char) {
+ ar933x_uart_putc(up, up->port.x_char);
+ up->port.icount.tx++;
+ up->port.x_char = 0;
+ continue;
+ }
+
+ if (uart_circ_empty(xmit))
+ break;
+
+ ar933x_uart_putc(up, xmit->buf[xmit->tail]);
+
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ up->port.icount.tx++;
+ } while (--count > 0);
+
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&up->port);
+
+ if (!uart_circ_empty(xmit))
+ ar933x_uart_start_tx_interrupt(up);
+}
+
+static irqreturn_t ar933x_uart_interrupt(int irq, void *dev_id)
+{
+ struct ar933x_uart_port *up = dev_id;
+ unsigned int status;
+
+ status = ar933x_uart_read(up, AR933X_UART_CS_REG);
+ if ((status & AR933X_UART_CS_HOST_INT) == 0)
+ return IRQ_NONE;
+
+ spin_lock(&up->port.lock);
+
+ status = ar933x_uart_read(up, AR933X_UART_INT_REG);
+ status &= ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
+
+ if (status & AR933X_UART_INT_RX_VALID) {
+ ar933x_uart_write(up, AR933X_UART_INT_REG,
+ AR933X_UART_INT_RX_VALID);
+ ar933x_uart_rx_chars(up);
+ }
+
+ if (status & AR933X_UART_INT_TX_EMPTY) {
+ ar933x_uart_write(up, AR933X_UART_INT_REG,
+ AR933X_UART_INT_TX_EMPTY);
+ ar933x_uart_stop_tx_interrupt(up);
+ ar933x_uart_tx_chars(up);
+ }
+
+ spin_unlock(&up->port.lock);
+
+ return IRQ_HANDLED;
+}
+
+static int ar933x_uart_startup(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+ unsigned long flags;
+ int ret;
+
+ ret = request_irq(up->port.irq, ar933x_uart_interrupt,
+ up->port.irqflags, dev_name(up->port.dev), up);
+ if (ret)
+ return ret;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Enable HOST interrupts */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_HOST_INT_EN);
+
+ /* Enable RX interrupts */
+ up->ier = AR933X_UART_INT_RX_VALID;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+
+ return 0;
+}
+
+static void ar933x_uart_shutdown(struct uart_port *port)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ /* Disable all interrupts */
+ up->ier = 0;
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
+
+ /* Disable break condition */
+ ar933x_uart_rmw_clear(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_BREAK);
+
+ free_irq(up->port.irq, up);
+}
+
+static const char *ar933x_uart_type(struct uart_port *port)
+{
+ return (port->type == PORT_AR933X) ? "AR933X UART" : NULL;
+}
+
+static void ar933x_uart_release_port(struct uart_port *port)
+{
+ /* Nothing to release ... */
+}
+
+static int ar933x_uart_request_port(struct uart_port *port)
+{
+ /* UARTs always present */
+ return 0;
+}
+
+static void ar933x_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE)
+ port->type = PORT_AR933X;
+}
+
+static int ar933x_uart_verify_port(struct uart_port *port,
+ struct serial_struct *ser)
+{
+ if (ser->type != PORT_UNKNOWN &&
+ ser->type != PORT_AR933X)
+ return -EINVAL;
+
+ if (ser->irq < 0 || ser->irq >= NR_IRQS)
+ return -EINVAL;
+
+ if (ser->baud_base < 28800)
+ return -EINVAL;
+
+ return 0;
+}
+
+static struct uart_ops ar933x_uart_ops = {
+ .tx_empty = ar933x_uart_tx_empty,
+ .set_mctrl = ar933x_uart_set_mctrl,
+ .get_mctrl = ar933x_uart_get_mctrl,
+ .stop_tx = ar933x_uart_stop_tx,
+ .start_tx = ar933x_uart_start_tx,
+ .stop_rx = ar933x_uart_stop_rx,
+ .enable_ms = ar933x_uart_enable_ms,
+ .break_ctl = ar933x_uart_break_ctl,
+ .startup = ar933x_uart_startup,
+ .shutdown = ar933x_uart_shutdown,
+ .set_termios = ar933x_uart_set_termios,
+ .type = ar933x_uart_type,
+ .release_port = ar933x_uart_release_port,
+ .request_port = ar933x_uart_request_port,
+ .config_port = ar933x_uart_config_port,
+ .verify_port = ar933x_uart_verify_port,
+};
+
+#ifdef CONFIG_SERIAL_AR933X_CONSOLE
+
+static struct ar933x_uart_port *
+ar933x_console_ports[CONFIG_SERIAL_AR933X_NR_UARTS];
+
+static void ar933x_uart_wait_xmitr(struct ar933x_uart_port *up)
+{
+ unsigned int status;
+ unsigned int timeout = 60000;
+
+ /* Wait up to 60ms for the character(s) to be sent. */
+ do {
+ status = ar933x_uart_read(up, AR933X_UART_DATA_REG);
+ if (--timeout == 0)
+ break;
+ udelay(1);
+ } while ((status & AR933X_UART_DATA_TX_CSR) == 0);
+}
+
+static void ar933x_uart_console_putchar(struct uart_port *port, int ch)
+{
+ struct ar933x_uart_port *up = (struct ar933x_uart_port *) port;
+
+ ar933x_uart_wait_xmitr(up);
+ ar933x_uart_putc(up, ch);
+}
+
+static void ar933x_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct ar933x_uart_port *up = ar933x_console_ports[co->index];
+ unsigned long flags;
+ unsigned int int_en;
+ int locked = 1;
+
+ local_irq_save(flags);
+
+ if (up->port.sysrq)
+ locked = 0;
+ else if (oops_in_progress)
+ locked = spin_trylock(&up->port.lock);
+ else
+ spin_lock(&up->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ int_en = ar933x_uart_read(up, AR933X_UART_INT_EN_REG);
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, 0);
+
+ uart_console_write(&up->port, s, count, ar933x_uart_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ ar933x_uart_wait_xmitr(up);
+ ar933x_uart_write(up, AR933X_UART_INT_EN_REG, int_en);
+
+ ar933x_uart_write(up, AR933X_UART_INT_REG, AR933X_UART_INT_ALLINTS);
+
+ if (locked)
+ spin_unlock(&up->port.lock);
+
+ local_irq_restore(flags);
+}
+
+static int ar933x_uart_console_setup(struct console *co, char *options)
+{
+ struct ar933x_uart_port *up;
+ int baud = 115200;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ if (co->index < 0 || co->index >= CONFIG_SERIAL_AR933X_NR_UARTS)
+ return -EINVAL;
+
+ up = ar933x_console_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static struct console ar933x_uart_console = {
+ .name = "ttyATH",
+ .write = ar933x_uart_console_write,
+ .device = uart_console_device,
+ .setup = ar933x_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &ar933x_uart_driver,
+};
+
+static void ar933x_uart_add_console_port(struct ar933x_uart_port *up)
+{
+ ar933x_console_ports[up->port.line] = up;
+}
+
+#define AR933X_SERIAL_CONSOLE (&ar933x_uart_console)
+
+#else
+
+static inline void ar933x_uart_add_console_port(struct ar933x_uart_port *up) {}
+
+#define AR933X_SERIAL_CONSOLE NULL
+
+#endif /* CONFIG_SERIAL_AR933X_CONSOLE */
+
+static struct uart_driver ar933x_uart_driver = {
+ .owner = THIS_MODULE,
+ .driver_name = DRIVER_NAME,
+ .dev_name = "ttyATH",
+ .nr = CONFIG_SERIAL_AR933X_NR_UARTS,
+ .cons = AR933X_SERIAL_CONSOLE,
+};
+
+static int __devinit ar933x_uart_probe(struct platform_device *pdev)
+{
+ struct ar933x_uart_platform_data *pdata;
+ struct ar933x_uart_port *up;
+ struct uart_port *port;
+ struct resource *mem_res;
+ struct resource *irq_res;
+ int id;
+ int ret;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata)
+ return -EINVAL;
+
+ id = pdev->id;
+ if (id == -1)
+ id = 0;
+
+ if (id > CONFIG_SERIAL_AR933X_NR_UARTS)
+ return -EINVAL;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem_res) {
+ dev_err(&pdev->dev, "no MEM resource\n");
+ return -EINVAL;
+ }
+
+ irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq_res) {
+ dev_err(&pdev->dev, "no IRQ resource\n");
+ return -EINVAL;
+ }
+
+ up = kzalloc(sizeof(struct ar933x_uart_port), GFP_KERNEL);
+ if (!up)
+ return -ENOMEM;
+
+ port = &up->port;
+ port->mapbase = mem_res->start;
+
+ port->membase = ioremap(mem_res->start, AR933X_UART_REGS_SIZE);
+ if (!port->membase) {
+ ret = -ENOMEM;
+ goto err_free_up;
+ }
+
+ port->line = id;
+ port->irq = irq_res->start;
+ port->dev = &pdev->dev;
+ port->type = PORT_AR933X;
+ port->iotype = UPIO_MEM32;
+ port->uartclk = pdata->uartclk;
+
+ port->regshift = 2;
+ port->fifosize = AR933X_UART_FIFO_SIZE;
+ port->ops = &ar933x_uart_ops;
+
+ ar933x_uart_add_console_port(up);
+
+ ret = uart_add_one_port(&ar933x_uart_driver, &up->port);
+ if (ret)
+ goto err_unmap;
+
+ platform_set_drvdata(pdev, up);
+ return 0;
+
+err_unmap:
+ iounmap(up->port.membase);
+err_free_up:
+ kfree(up);
+ return ret;
+}
+
+static int __devexit ar933x_uart_remove(struct platform_device *pdev)
+{
+ struct ar933x_uart_port *up;
+
+ up = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+
+ if (up) {
+ uart_remove_one_port(&ar933x_uart_driver, &up->port);
+ iounmap(up->port.membase);
+ kfree(up);
+ }
+
+ return 0;
+}
+
+static struct platform_driver ar933x_uart_platform_driver = {
+ .probe = ar933x_uart_probe,
+ .remove = __devexit_p(ar933x_uart_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init ar933x_uart_init(void)
+{
+ int ret;
+
+ ar933x_uart_driver.nr = CONFIG_SERIAL_AR933X_NR_UARTS;
+ ret = uart_register_driver(&ar933x_uart_driver);
+ if (ret)
+ goto err_out;
+
+ ret = platform_driver_register(&ar933x_uart_platform_driver);
+ if (ret)
+ goto err_unregister_uart_driver;
+
+ return 0;
+
+err_unregister_uart_driver:
+ uart_unregister_driver(&ar933x_uart_driver);
+err_out:
+ return ret;
+}
+
+static void __exit ar933x_uart_exit(void)
+{
+ platform_driver_unregister(&ar933x_uart_platform_driver);
+ uart_unregister_driver(&ar933x_uart_driver);
+}
+
+module_init(ar933x_uart_init);
+module_exit(ar933x_uart_exit);
+
+MODULE_DESCRIPTION("Atheros AR933X UART driver");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 4c823f3..10605ec 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -212,8 +212,9 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
{
struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
unsigned int mode;
+ unsigned long flags;
- spin_lock(&port->lock);
+ spin_lock_irqsave(&port->lock, flags);
/* Disable interrupts */
UART_PUT_IDR(port, atmel_port->tx_done_mask);
@@ -244,7 +245,7 @@ void atmel_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
/* Enable interrupts */
UART_PUT_IER(port, atmel_port->tx_done_mask);
- spin_unlock(&port->lock);
+ spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1256,12 +1257,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
static void atmel_set_ldisc(struct uart_port *port, int new)
{
- int line = port->line;
-
- if (line >= port->state->port.tty->driver->num)
- return;
-
- if (port->state->port.tty->ldisc->ops->num == N_PPS) {
+ if (new == N_PPS) {
port->flags |= UPF_HARDPPS_CD;
atmel_enable_ms(port);
} else {
diff --git a/drivers/tty/serial/bfin_sport_uart.c b/drivers/tty/serial/bfin_sport_uart.c
index ee101c0..7fbc3a0 100644
--- a/drivers/tty/serial/bfin_sport_uart.c
+++ b/drivers/tty/serial/bfin_sport_uart.c
@@ -299,8 +299,13 @@ static int sport_startup(struct uart_port *port)
dev_info(port->dev, "Unable to attach BlackFin UART over SPORT CTS interrupt. So, disable it.\n");
}
}
- if (up->rts_pin >= 0)
- gpio_direction_output(up->rts_pin, 0);
+ if (up->rts_pin >= 0) {
+ if (gpio_request(up->rts_pin, DRV_NAME)) {
+ dev_info(port->dev, "fail to request RTS PIN at GPIO_%d\n", up->rts_pin);
+ up->rts_pin = -1;
+ } else
+ gpio_direction_output(up->rts_pin, 0);
+ }
#endif
return 0;
@@ -445,6 +450,8 @@ static void sport_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_SPORT_CTSRTS
if (up->cts_pin >= 0)
free_irq(gpio_to_irq(up->cts_pin), up);
+ if (up->rts_pin >= 0)
+ gpio_free(up->rts_pin);
#endif
}
@@ -803,17 +810,16 @@ static int __devinit sport_uart_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
sport->cts_pin = -1;
- else
+ else {
sport->cts_pin = res->start;
+ sport->port.flags |= ASYNC_CTS_FLOW;
+ }
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
sport->rts_pin = -1;
else
sport->rts_pin = res->start;
-
- if (sport->rts_pin >= 0)
- gpio_request(sport->rts_pin, DRV_NAME);
#endif
}
@@ -853,10 +859,6 @@ static int __devexit sport_uart_remove(struct platform_device *pdev)
if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (sport->rts_pin >= 0)
- gpio_free(sport->rts_pin);
-#endif
iounmap(sport->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/bfin_sport_uart.h b/drivers/tty/serial/bfin_sport_uart.h
index 6d06ce1..e4510ea 100644
--- a/drivers/tty/serial/bfin_sport_uart.h
+++ b/drivers/tty/serial/bfin_sport_uart.h
@@ -45,11 +45,12 @@
#define SPORT_GET_RX32(sport) \
({ \
unsigned int __ret; \
+ unsigned long flags; \
if (ANOMALY_05000473) \
- local_irq_disable(); \
+ local_irq_save(flags); \
__ret = bfin_read32((sport)->port.membase + OFFSET_RX); \
if (ANOMALY_05000473) \
- local_irq_enable(); \
+ local_irq_restore(flags); \
__ret; \
})
#define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1))
diff --git a/drivers/tty/serial/bfin_uart.c b/drivers/tty/serial/bfin_uart.c
index 66afb98..26953bf 100644
--- a/drivers/tty/serial/bfin_uart.c
+++ b/drivers/tty/serial/bfin_uart.c
@@ -116,15 +116,22 @@ static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl)
static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
- unsigned int status;
-
- status = bfin_serial_get_mctrl(&uart->port);
- uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
+ unsigned int status = bfin_serial_get_mctrl(&uart->port);
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- uart->scts = 1;
+ struct tty_struct *tty = uart->port.state->port.tty;
+
UART_CLEAR_SCTS(uart);
- UART_CLEAR_IER(uart, EDSSI);
+ if (tty->hw_stopped) {
+ if (status) {
+ tty->hw_stopped = 0;
+ uart_write_wakeup(&uart->port);
+ }
+ } else {
+ if (!status)
+ tty->hw_stopped = 1;
+ }
#endif
+ uart_handle_cts_change(&uart->port, status & TIOCM_CTS);
return IRQ_HANDLED;
}
@@ -175,13 +182,6 @@ static void bfin_serial_start_tx(struct uart_port *port)
struct bfin_serial_port *uart = (struct bfin_serial_port *)port;
struct tty_struct *tty = uart->port.state->port.tty;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
/*
* To avoid losting RX interrupt, we reset IR function
* before sending data.
@@ -380,12 +380,6 @@ static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id)
{
struct bfin_serial_port *uart = dev_id;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
spin_lock(&uart->port.lock);
if (UART_GET_LSR(uart) & THRE)
bfin_serial_tx_chars(uart);
@@ -531,13 +525,6 @@ static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id)
struct bfin_serial_port *uart = dev_id;
struct circ_buf *xmit = &uart->port.state->xmit;
-#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) {
- uart->scts = 0;
- uart_handle_cts_change(&uart->port, uart->scts);
- }
-#endif
-
spin_lock(&uart->port.lock);
if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) {
disable_dma(uart->tx_dma_channel);
@@ -739,20 +726,26 @@ static int bfin_serial_startup(struct uart_port *port)
pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
}
}
- if (uart->rts_pin >= 0)
- gpio_direction_output(uart->rts_pin, 0);
+ if (uart->rts_pin >= 0) {
+ if (gpio_request(uart->rts_pin, DRIVER_NAME)) {
+ pr_info("fail to request RTS PIN at GPIO_%d\n", uart->rts_pin);
+ uart->rts_pin = -1;
+ } else
+ gpio_direction_output(uart->rts_pin, 0);
+ }
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0 && request_irq(uart->status_irq,
- bfin_serial_mctrl_cts_int,
- 0, "BFIN_UART_MODEM_STATUS", uart)) {
- uart->cts_pin = -1;
- pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
- }
+ if (uart->cts_pin >= 0) {
+ if (request_irq(uart->status_irq, bfin_serial_mctrl_cts_int,
+ IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
+ uart->cts_pin = -1;
+ dev_info(port->dev, "Unable to attach BlackFin UART Modem Status interrupt.\n");
+ }
- /* CTS RTS PINs are negative assertive. */
- UART_PUT_MCR(uart, ACTS);
- UART_SET_IER(uart, EDSSI);
+ /* CTS RTS PINs are negative assertive. */
+ UART_PUT_MCR(uart, ACTS);
+ UART_SET_IER(uart, EDSSI);
+ }
#endif
UART_SET_IER(uart, ERBFI);
@@ -792,6 +785,8 @@ static void bfin_serial_shutdown(struct uart_port *port)
#ifdef CONFIG_SERIAL_BFIN_CTSRTS
if (uart->cts_pin >= 0)
free_irq(gpio_to_irq(uart->cts_pin), uart);
+ if (uart->rts_pin >= 0)
+ gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
if (uart->cts_pin >= 0)
@@ -1370,18 +1365,18 @@ static int bfin_serial_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_IO, 0);
if (res == NULL)
uart->cts_pin = -1;
- else
+ else {
uart->cts_pin = res->start;
+#ifdef CONFIG_SERIAL_BFIN_CTSRTS
+ uart->port.flags |= ASYNC_CTS_FLOW;
+#endif
+ }
res = platform_get_resource(pdev, IORESOURCE_IO, 1);
if (res == NULL)
uart->rts_pin = -1;
else
uart->rts_pin = res->start;
-# if defined(CONFIG_SERIAL_BFIN_CTSRTS)
- if (uart->rts_pin >= 0)
- gpio_request(uart->rts_pin, DRIVER_NAME);
-# endif
#endif
}
@@ -1421,10 +1416,6 @@ static int __devexit bfin_serial_remove(struct platform_device *pdev)
if (uart) {
uart_remove_one_port(&bfin_serial_reg, &uart->port);
-#ifdef CONFIG_SERIAL_BFIN_CTSRTS
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
-#endif
iounmap(uart->port.membase);
peripheral_free_list(
(unsigned short *)pdev->dev.platform_data);
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 426434e..7e925e2 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1334,7 +1334,6 @@ MODULE_DEVICE_TABLE(spi, ifx_id_table);
static const struct spi_driver ifx_spi_driver = {
.driver = {
.name = DRVNAME,
- .bus = &spi_bus_type,
.pm = &ifx_spi_pm,
.owner = THIS_MODULE},
.probe = ifx_spi_spi_probe,
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 163fc90..0b7fed7 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -102,6 +102,7 @@
#define UCR2_STPB (1<<6) /* Stop */
#define UCR2_WS (1<<5) /* Word size */
#define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */
+#define UCR2_ATEN (1<<3) /* Aging Timer Enable */
#define UCR2_TXEN (1<<2) /* Transmitter enabled */
#define UCR2_RXEN (1<<1) /* Receiver enabled */
#define UCR2_SRST (1<<0) /* SW reset */
@@ -207,6 +208,12 @@ struct imx_port {
struct imx_uart_data *devdata;
};
+struct imx_port_ucrs {
+ unsigned int ucr1;
+ unsigned int ucr2;
+ unsigned int ucr3;
+};
+
#ifdef CONFIG_IRDA
#define USE_IRDA(sport) ((sport)->use_irda)
#else
@@ -260,6 +267,27 @@ static inline int is_imx21_uart(struct imx_port *sport)
}
/*
+ * Save and restore functions for UCR1, UCR2 and UCR3 registers
+ */
+static void imx_port_ucrs_save(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* save control registers */
+ ucr->ucr1 = readl(port->membase + UCR1);
+ ucr->ucr2 = readl(port->membase + UCR2);
+ ucr->ucr3 = readl(port->membase + UCR3);
+}
+
+static void imx_port_ucrs_restore(struct uart_port *port,
+ struct imx_port_ucrs *ucr)
+{
+ /* restore control registers */
+ writel(ucr->ucr1, port->membase + UCR1);
+ writel(ucr->ucr2, port->membase + UCR2);
+ writel(ucr->ucr3, port->membase + UCR3);
+}
+
+/*
* Handle any change of modem status signal since we were last called.
*/
static void imx_mctrl_check(struct imx_port *sport)
@@ -566,6 +594,9 @@ static irqreturn_t imx_int(int irq, void *dev_id)
if (sts & USR1_RTSD)
imx_rtsint(irq, dev_id);
+ if (sts & USR1_AWAKE)
+ writel(USR1_AWAKE, sport->port.membase + USR1);
+
return IRQ_HANDLED;
}
@@ -901,6 +932,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
ucr2 |= UCR2_PROE;
}
+ del_timer_sync(&sport->timer);
+
/*
* Ask the core to calculate the divisor for us.
*/
@@ -931,8 +964,6 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
sport->port.ignore_status_mask |= URXD_OVRRUN;
}
- del_timer_sync(&sport->timer);
-
/*
* Update the per-port timeout.
*/
@@ -1079,6 +1110,70 @@ imx_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
+#if defined(CONFIG_CONSOLE_POLL)
+static int imx_poll_get_char(struct uart_port *port)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+ unsigned char c;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* poll */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_RDR);
+
+ /* read */
+ c = readl(port->membase + URXD0);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+
+ return c;
+}
+
+static void imx_poll_put_char(struct uart_port *port, unsigned char c)
+{
+ struct imx_port_ucrs old_ucr;
+ unsigned int status;
+
+ /* save control registers */
+ imx_port_ucrs_save(port, &old_ucr);
+
+ /* disable interrupts */
+ writel(UCR1_UARTEN, port->membase + UCR1);
+ writel(old_ucr.ucr2 & ~(UCR2_ATEN | UCR2_RTSEN | UCR2_ESCI),
+ port->membase + UCR2);
+ writel(old_ucr.ucr3 & ~(UCR3_DCD | UCR3_RI | UCR3_DTREN),
+ port->membase + UCR3);
+
+ /* drain */
+ do {
+ status = readl(port->membase + USR1);
+ } while (~status & USR1_TRDY);
+
+ /* write */
+ writel(c, port->membase + URTX0);
+
+ /* flush */
+ do {
+ status = readl(port->membase + USR2);
+ } while (~status & USR2_TXDC);
+
+ /* restore control registers */
+ imx_port_ucrs_restore(port, &old_ucr);
+}
+#endif
+
static struct uart_ops imx_pops = {
.tx_empty = imx_tx_empty,
.set_mctrl = imx_set_mctrl,
@@ -1096,6 +1191,10 @@ static struct uart_ops imx_pops = {
.request_port = imx_request_port,
.config_port = imx_config_port,
.verify_port = imx_verify_port,
+#if defined(CONFIG_CONSOLE_POLL)
+ .poll_get_char = imx_poll_get_char,
+ .poll_put_char = imx_poll_put_char,
+#endif
};
static struct imx_port *imx_ports[UART_NR];
@@ -1118,13 +1217,14 @@ static void
imx_console_write(struct console *co, const char *s, unsigned int count)
{
struct imx_port *sport = imx_ports[co->index];
- unsigned int old_ucr1, old_ucr2, ucr1;
+ struct imx_port_ucrs old_ucr;
+ unsigned int ucr1;
/*
- * First, save UCR1/2 and then disable interrupts
+ * First, save UCR1/2/3 and then disable interrupts
*/
- ucr1 = old_ucr1 = readl(sport->port.membase + UCR1);
- old_ucr2 = readl(sport->port.membase + UCR2);
+ imx_port_ucrs_save(&sport->port, &old_ucr);
+ ucr1 = old_ucr.ucr1;
if (is_imx1_uart(sport))
ucr1 |= IMX1_UCR1_UARTCLKEN;
@@ -1133,18 +1233,17 @@ imx_console_write(struct console *co, const char *s, unsigned int count)
writel(ucr1, sport->port.membase + UCR1);
- writel(old_ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
+ writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2);
uart_console_write(&sport->port, s, count, imx_console_putchar);
/*
* Finally, wait for transmitter to become empty
- * and restore UCR1/2
+ * and restore UCR1/2/3
*/
while (!(readl(sport->port.membase + USR2) & USR2_TXDC));
- writel(old_ucr1, sport->port.membase + UCR1);
- writel(old_ucr2, sport->port.membase + UCR2);
+ imx_port_ucrs_restore(&sport->port, &old_ucr);
}
/*
@@ -1269,6 +1368,12 @@ static struct uart_driver imx_reg = {
static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
{
struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ /* enable wakeup from i.MX UART */
+ val = readl(sport->port.membase + UCR3);
+ val |= UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
if (sport)
uart_suspend_port(&imx_reg, &sport->port);
@@ -1279,6 +1384,12 @@ static int serial_imx_suspend(struct platform_device *dev, pm_message_t state)
static int serial_imx_resume(struct platform_device *dev)
{
struct imx_port *sport = platform_get_drvdata(dev);
+ unsigned int val;
+
+ /* disable wakeup from i.MX UART */
+ val = readl(sport->port.membase + UCR3);
+ val &= ~UCR3_AWAKEN;
+ writel(val, sport->port.membase + UCR3);
if (sport)
uart_resume_port(&imx_reg, &sport->port);
@@ -1287,6 +1398,10 @@ static int serial_imx_resume(struct platform_device *dev)
}
#ifdef CONFIG_OF
+/*
+ * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it
+ * could successfully get all information from dt or a negative errno.
+ */
static int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
@@ -1296,12 +1411,13 @@ static int serial_imx_probe_dt(struct imx_port *sport,
int ret;
if (!np)
- return -ENODEV;
+ /* no device tree device */
+ return 1;
ret = of_alias_get_id(np, "serial");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
- return -ENODEV;
+ return ret;
}
sport->port.line = ret;
@@ -1319,7 +1435,7 @@ static int serial_imx_probe_dt(struct imx_port *sport,
static inline int serial_imx_probe_dt(struct imx_port *sport,
struct platform_device *pdev)
{
- return -ENODEV;
+ return 1;
}
#endif
@@ -1354,8 +1470,10 @@ static int serial_imx_probe(struct platform_device *pdev)
return -ENOMEM;
ret = serial_imx_probe_dt(sport, pdev);
- if (ret == -ENODEV)
+ if (ret > 0)
serial_imx_probe_pdata(sport, pdev);
+ else if (ret < 0)
+ goto free;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -1476,7 +1594,7 @@ static int __init imx_serial_init(void)
if (ret != 0)
uart_unregister_driver(&imx_reg);
- return 0;
+ return ret;
}
static void __exit imx_serial_exit(void)
diff --git a/drivers/tty/serial/jsm/jsm_driver.c b/drivers/tty/serial/jsm/jsm_driver.c
index 7c867a0..7545fe1 100644
--- a/drivers/tty/serial/jsm/jsm_driver.c
+++ b/drivers/tty/serial/jsm/jsm_driver.c
@@ -251,6 +251,7 @@ static void jsm_io_resume(struct pci_dev *pdev)
struct jsm_board *brd = pci_get_drvdata(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
jsm_uart_port_init(brd);
}
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 0801893..94a6792 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -1000,11 +1000,8 @@ static void __init m32r_sio_register_ports(struct uart_driver *drv)
init_timer(&up->timer);
up->timer.function = m32r_sio_timeout;
- /*
- * ALPHA_KLUDGE_MCR needs to be killed.
- */
- up->mcr_mask = ~ALPHA_KLUDGE_MCR;
- up->mcr_force = ALPHA_KLUDGE_MCR;
+ up->mcr_mask = ~0;
+ up->mcr_force = 0;
uart_add_one_port(drv, &up->port);
}
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index 8a6cc8c..b4902b9 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -901,7 +901,6 @@ static int max3100_resume(struct spi_device *spi)
static struct spi_driver max3100_driver = {
.driver = {
.name = "max3100",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
deleted file mode 100644
index 90c40f2..0000000
--- a/drivers/tty/serial/max3107-aava.c
+++ /dev/null
@@ -1,345 +0,0 @@
-/*
- * max3107.c - spi uart protocol driver for Maxim 3107
- * Based on max3100.c
- * by Christian Pellegrin <chripell@evolware.org>
- * and max3110.c
- * by Feng Tang <feng.tang@intel.com>
- *
- * Copyright (C) Aavamobile 2009
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/spi/spi.h>
-#include <linux/freezer.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/sfi.h>
-#include <linux/module.h>
-#include <asm/mrst.h>
-#include "max3107.h"
-
-/* GPIO direction to input function */
-static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
-{
- struct max3107_port *s = container_of(chip, struct max3107_port, chip);
- u16 buf[1]; /* Buffer for SPI transfer */
-
- if (offset >= MAX3107_GPIO_COUNT) {
- dev_err(&s->spi->dev, "Invalid GPIO\n");
- return -EINVAL;
- }
-
- /* Read current GPIO configuration register */
- buf[0] = MAX3107_GPIOCFG_REG;
- /* Perform SPI transfer */
- if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
- dev_err(&s->spi->dev, "SPI transfer GPIO read failed\n");
- return -EIO;
- }
- buf[0] &= MAX3107_SPI_RX_DATA_MASK;
-
- /* Set GPIO to input */
- buf[0] &= ~(0x0001 << offset);
-
- /* Write new GPIO configuration register value */
- buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
- /* Perform SPI transfer */
- if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
- dev_err(&s->spi->dev, "SPI transfer GPIO write failed\n");
- return -EIO;
- }
- return 0;
-}
-
-/* GPIO direction to output function */
-static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct max3107_port *s = container_of(chip, struct max3107_port, chip);
- u16 buf[2]; /* Buffer for SPI transfers */
-
- if (offset >= MAX3107_GPIO_COUNT) {
- dev_err(&s->spi->dev, "Invalid GPIO\n");
- return -EINVAL;
- }
-
- /* Read current GPIO configuration and data registers */
- buf[0] = MAX3107_GPIOCFG_REG;
- buf[1] = MAX3107_GPIODATA_REG;
- /* Perform SPI transfer */
- if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
- dev_err(&s->spi->dev, "SPI transfer gpio failed\n");
- return -EIO;
- }
- buf[0] &= MAX3107_SPI_RX_DATA_MASK;
- buf[1] &= MAX3107_SPI_RX_DATA_MASK;
-
- /* Set GPIO to output */
- buf[0] |= (0x0001 << offset);
- /* Set value */
- if (value)
- buf[1] |= (0x0001 << offset);
- else
- buf[1] &= ~(0x0001 << offset);
-
- /* Write new GPIO configuration and data register values */
- buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
- buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
- /* Perform SPI transfer */
- if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
- dev_err(&s->spi->dev,
- "SPI transfer for GPIO conf data w failed\n");
- return -EIO;
- }
- return 0;
-}
-
-/* GPIO value query function */
-static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
- struct max3107_port *s = container_of(chip, struct max3107_port, chip);
- u16 buf[1]; /* Buffer for SPI transfer */
-
- if (offset >= MAX3107_GPIO_COUNT) {
- dev_err(&s->spi->dev, "Invalid GPIO\n");
- return -EINVAL;
- }
-
- /* Read current GPIO data register */
- buf[0] = MAX3107_GPIODATA_REG;
- /* Perform SPI transfer */
- if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
- dev_err(&s->spi->dev, "SPI transfer GPIO data r failed\n");
- return -EIO;
- }
- buf[0] &= MAX3107_SPI_RX_DATA_MASK;
-
- /* Return value */
- return buf[0] & (0x0001 << offset);
-}
-
-/* GPIO value set function */
-static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct max3107_port *s = container_of(chip, struct max3107_port, chip);
- u16 buf[2]; /* Buffer for SPI transfers */
-
- if (offset >= MAX3107_GPIO_COUNT) {
- dev_err(&s->spi->dev, "Invalid GPIO\n");
- return;
- }
-
- /* Read current GPIO configuration registers*/
- buf[0] = MAX3107_GPIODATA_REG;
- buf[1] = MAX3107_GPIOCFG_REG;
- /* Perform SPI transfer */
- if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
- dev_err(&s->spi->dev,
- "SPI transfer for GPIO data and config read failed\n");
- return;
- }
- buf[0] &= MAX3107_SPI_RX_DATA_MASK;
- buf[1] &= MAX3107_SPI_RX_DATA_MASK;
-
- if (!(buf[1] & (0x0001 << offset))) {
- /* Configured as input, can't set value */
- dev_warn(&s->spi->dev,
- "Trying to set value for input GPIO\n");
- return;
- }
-
- /* Set value */
- if (value)
- buf[0] |= (0x0001 << offset);
- else
- buf[0] &= ~(0x0001 << offset);
-
- /* Write new GPIO data register value */
- buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
- /* Perform SPI transfer */
- if (max3107_rw(s, (u8 *)buf, NULL, 2))
- dev_err(&s->spi->dev, "SPI transfer GPIO data w failed\n");
-}
-
-/* GPIO chip data */
-static struct gpio_chip max3107_gpio_chip = {
- .owner = THIS_MODULE,
- .direction_input = max3107_gpio_direction_in,
- .direction_output = max3107_gpio_direction_out,
- .get = max3107_gpio_get,
- .set = max3107_gpio_set,
- .can_sleep = 1,
- .base = MAX3107_GPIO_BASE,
- .ngpio = MAX3107_GPIO_COUNT,
-};
-
-/**
- * max3107_aava_reset - reset on AAVA systems
- * @spi: The SPI device we are probing
- *
- * Reset the device ready for probing.
- */
-
-static int max3107_aava_reset(struct spi_device *spi)
-{
- /* Reset the chip */
- if (gpio_request(MAX3107_RESET_GPIO, "max3107")) {
- pr_err("Requesting RESET GPIO failed\n");
- return -EIO;
- }
- if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) {
- pr_err("Setting RESET GPIO to 0 failed\n");
- gpio_free(MAX3107_RESET_GPIO);
- return -EIO;
- }
- msleep(MAX3107_RESET_DELAY);
- if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) {
- pr_err("Setting RESET GPIO to 1 failed\n");
- gpio_free(MAX3107_RESET_GPIO);
- return -EIO;
- }
- gpio_free(MAX3107_RESET_GPIO);
- msleep(MAX3107_WAKEUP_DELAY);
- return 0;
-}
-
-static int max3107_aava_configure(struct max3107_port *s)
-{
- int retval;
-
- /* Initialize GPIO chip data */
- s->chip = max3107_gpio_chip;
- s->chip.label = s->spi->modalias;
- s->chip.dev = &s->spi->dev;
-
- /* Add GPIO chip */
- retval = gpiochip_add(&s->chip);
- if (retval) {
- dev_err(&s->spi->dev, "Adding GPIO chip failed\n");
- return retval;
- }
-
- /* Temporary fix for EV2 boot problems, set modem reset to 0 */
- max3107_gpio_direction_out(&s->chip, 3, 0);
- return 0;
-}
-
-#if 0
-/* This will get enabled once we have the board stuff merged for this
- specific case */
-
-static const struct baud_table brg13_ext[] = {
- { 300, MAX3107_BRG13_B300 },
- { 600, MAX3107_BRG13_B600 },
- { 1200, MAX3107_BRG13_B1200 },
- { 2400, MAX3107_BRG13_B2400 },
- { 4800, MAX3107_BRG13_B4800 },
- { 9600, MAX3107_BRG13_B9600 },
- { 19200, MAX3107_BRG13_B19200 },
- { 57600, MAX3107_BRG13_B57600 },
- { 115200, MAX3107_BRG13_B115200 },
- { 230400, MAX3107_BRG13_B230400 },
- { 460800, MAX3107_BRG13_B460800 },
- { 921600, MAX3107_BRG13_B921600 },
- { 0, 0 }
-};
-
-static void max3107_aava_init(struct max3107_port *s)
-{
- /*override for AAVA SC specific*/
- if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
- if (get_koski_build_id() <= KOSKI_EV2)
- if (s->ext_clk) {
- s->brg_cfg = MAX3107_BRG13_B9600;
- s->baud_tbl = (struct baud_table *)brg13_ext;
- }
- }
-}
-#endif
-
-static int __devexit max3107_aava_remove(struct spi_device *spi)
-{
- struct max3107_port *s = dev_get_drvdata(&spi->dev);
-
- /* Remove GPIO chip */
- if (gpiochip_remove(&s->chip))
- dev_warn(&spi->dev, "Removing GPIO chip failed\n");
-
- /* Then do the default remove */
- return max3107_remove(spi);
-}
-
-/* Platform data */
-static struct max3107_plat aava_plat_data = {
- .loopback = 0,
- .ext_clk = 1,
-/* .init = max3107_aava_init, */
- .configure = max3107_aava_configure,
- .hw_suspend = max3107_hw_susp,
- .polled_mode = 0,
- .poll_time = 0,
-};
-
-
-static int __devinit max3107_probe_aava(struct spi_device *spi)
-{
- int err = max3107_aava_reset(spi);
- if (err < 0)
- return err;
- return max3107_probe(spi, &aava_plat_data);
-}
-
-/* Spi driver data */
-static struct spi_driver max3107_driver = {
- .driver = {
- .name = "aava-max3107",
- .bus = &spi_bus_type,
- .owner = THIS_MODULE,
- },
- .probe = max3107_probe_aava,
- .remove = __devexit_p(max3107_aava_remove),
- .suspend = max3107_suspend,
- .resume = max3107_resume,
-};
-
-/* Driver init function */
-static int __init max3107_init(void)
-{
- return spi_register_driver(&max3107_driver);
-}
-
-/* Driver exit function */
-static void __exit max3107_exit(void)
-{
- spi_unregister_driver(&max3107_driver);
-}
-
-module_init(max3107_init);
-module_exit(max3107_exit);
-
-MODULE_DESCRIPTION("MAX3107 driver");
-MODULE_AUTHOR("Aavamobile");
-MODULE_ALIAS("spi:aava-max3107");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 7827000..17c7ba8 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -1181,7 +1181,6 @@ static int max3107_probe_generic(struct spi_device *spi)
static struct spi_driver max3107_driver = {
.driver = {
.name = "max3107",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = max3107_probe_generic,
diff --git a/drivers/tty/serial/mfd.c b/drivers/tty/serial/mfd.c
index e272d39..a9234ba 100644
--- a/drivers/tty/serial/mfd.c
+++ b/drivers/tty/serial/mfd.c
@@ -1154,7 +1154,6 @@ serial_hsu_console_setup(struct console *co, char *options)
int bits = 8;
int parity = 'n';
int flow = 'n';
- int ret;
if (co->index == -1 || co->index >= serial_hsu_reg.nr)
co->index = 0;
@@ -1165,9 +1164,7 @@ serial_hsu_console_setup(struct console *co, char *options)
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
- ret = uart_set_options(&up->port, co, baud, parity, bits, flow);
-
- return ret;
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
}
static struct console serial_hsu_console = {
@@ -1176,9 +1173,13 @@ static struct console serial_hsu_console = {
.device = uart_console_device,
.setup = serial_hsu_console_setup,
.flags = CON_PRINTBUFFER,
- .index = 2,
+ .index = -1,
.data = &serial_hsu_reg,
};
+
+#define SERIAL_HSU_CONSOLE (&serial_hsu_console)
+#else
+#define SERIAL_HSU_CONSOLE NULL
#endif
struct uart_ops serial_hsu_pops = {
@@ -1208,6 +1209,7 @@ static struct uart_driver serial_hsu_reg = {
.major = TTY_MAJOR,
.minor = 128,
.nr = 3,
+ .cons = SERIAL_HSU_CONSOLE,
};
#ifdef CONFIG_PM
@@ -1342,12 +1344,6 @@ static int serial_hsu_probe(struct pci_dev *pdev,
}
uart_add_one_port(&serial_hsu_reg, &uport->port);
-#ifdef CONFIG_SERIAL_MFD_HSU_CONSOLE
- if (index == 2) {
- register_console(&serial_hsu_console);
- uport->port.cons = &serial_hsu_console;
- }
-#endif
pci_set_drvdata(pdev, uport);
}
diff --git a/drivers/tty/serial/mrst_max3110.c b/drivers/tty/serial/mrst_max3110.c
index 4c309e8..df2a224 100644
--- a/drivers/tty/serial/mrst_max3110.c
+++ b/drivers/tty/serial/mrst_max3110.c
@@ -876,7 +876,6 @@ static int __devexit serial_m3110_remove(struct spi_device *dev)
static struct spi_driver uart_max3110_driver = {
.driver = {
.name = "spi_max3111",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = serial_m3110_probe,
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 60c6eb8..5e85e1e 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -422,9 +422,9 @@ static int __devexit msm_hs_remove(struct platform_device *pdev)
msm_uport->rx.rbuffer);
dma_pool_destroy(msm_uport->rx.pool);
- dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32 *),
+ dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
DMA_TO_DEVICE);
- dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32 *),
+ dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
DMA_TO_DEVICE);
dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
DMA_TO_DEVICE);
@@ -812,7 +812,7 @@ static void msm_hs_submit_tx_locked(struct uart_port *uport)
*tx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
/* Save tx_count to use in Callback */
tx->tx_count = tx_count;
@@ -1087,12 +1087,10 @@ static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
}
/* Handle CTS changes (Called from interrupt handler) */
-static void msm_hs_handle_delta_cts(struct uart_port *uport)
+static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
{
- unsigned long flags;
struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
- spin_lock_irqsave(&uport->lock, flags);
clk_enable(msm_uport->clk);
/* clear interrupt */
@@ -1100,7 +1098,6 @@ static void msm_hs_handle_delta_cts(struct uart_port *uport)
uport->icount.cts++;
clk_disable(msm_uport->clk);
- spin_unlock_irqrestore(&uport->lock, flags);
/* clear the IOCTL TIOCMIWAIT if called */
wake_up_interruptible(&uport->state->port.delta_msr_wait);
@@ -1248,7 +1245,7 @@ static irqreturn_t msm_hs_isr(int irq, void *dev)
/* Change in CTS interrupt */
if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
- msm_hs_handle_delta_cts(uport);
+ msm_hs_handle_delta_cts_locked(uport);
spin_unlock_irqrestore(&uport->lock, flags);
@@ -1537,7 +1534,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
if (!tx->command_ptr)
return -ENOMEM;
- tx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
if (!tx->command_ptr_ptr) {
ret = -ENOMEM;
goto err_tx_command_ptr_ptr;
@@ -1547,7 +1544,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
sizeof(dmov_box), DMA_TO_DEVICE);
tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
tx->command_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
init_waitqueue_head(&rx->wait);
@@ -1575,7 +1572,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
goto err_rx_command_ptr;
}
- rx->command_ptr_ptr = kmalloc(sizeof(u32 *), GFP_KERNEL | __GFP_DMA);
+ rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
if (!rx->command_ptr_ptr) {
pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
ret = -ENOMEM;
@@ -1593,7 +1590,7 @@ static int __devinit uartdm_init_port(struct uart_port *uport)
*rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
INIT_WORK(&rx->tty_work, msm_hs_tty_flip_buffer_work);
@@ -1609,7 +1606,7 @@ err_dma_pool_alloc:
dma_pool_destroy(msm_uport->rx.pool);
err_dma_pool_create:
dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
- sizeof(u32 *), DMA_TO_DEVICE);
+ sizeof(u32), DMA_TO_DEVICE);
dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
sizeof(dmov_box), DMA_TO_DEVICE);
kfree(msm_uport->tx.command_ptr_ptr);
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 7e02c9c..55fd362 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -145,11 +145,12 @@ static inline void mxs_auart_tx_chars(struct mxs_auart_port *s)
writel(xmit->buf[xmit->tail],
s->port.membase + AUART_DATA);
xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
- if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
- uart_write_wakeup(&s->port);
} else
break;
}
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(&s->port);
+
if (uart_circ_empty(&(s->port.state->xmit)))
writel(AUART_INTR_TXIEN,
s->port.membase + AUART_INTR_CLR);
@@ -424,7 +425,7 @@ static int mxs_auart_startup(struct uart_port *u)
{
struct mxs_auart_port *s = to_auart_port(u);
- clk_enable(s->clk);
+ clk_prepare_enable(s->clk);
writel(AUART_CTRL0_CLKGATE, u->membase + AUART_CTRL0_CLR);
@@ -453,7 +454,7 @@ static void mxs_auart_shutdown(struct uart_port *u)
writel(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN,
u->membase + AUART_INTR_CLR);
- clk_disable(s->clk);
+ clk_disable_unprepare(s->clk);
}
static unsigned int mxs_auart_tx_empty(struct uart_port *u)
@@ -634,7 +635,7 @@ auart_console_setup(struct console *co, char *options)
if (!s)
return -ENODEV;
- clk_enable(s->clk);
+ clk_prepare_enable(s->clk);
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -643,7 +644,7 @@ auart_console_setup(struct console *co, char *options)
ret = uart_set_options(&s->port, co, baud, parity, bits, flow);
- clk_disable(s->clk);
+ clk_disable_unprepare(s->clk);
return ret;
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 5e713d3..f809041 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -37,17 +37,31 @@
#include <linux/clk.h>
#include <linux/serial_core.h>
#include <linux/irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
#include <plat/dma.h>
#include <plat/dmtimer.h>
#include <plat/omap-serial.h>
+#define DEFAULT_CLK_SPEED 48000000 /* 48Mhz*/
+
+/* SCR register bitmasks */
+#define OMAP_UART_SCR_RX_TRIG_GRANU1_MASK (1 << 7)
+
+/* FCR register bitmasks */
+#define OMAP_UART_FCR_RX_FIFO_TRIG_SHIFT 6
+#define OMAP_UART_FCR_RX_FIFO_TRIG_MASK (0x3 << 6)
+
static struct uart_omap_port *ui[OMAP_MAX_HSUART_PORTS];
/* Forward declaration of functions */
static void uart_tx_dma_callback(int lch, u16 ch_status, void *data);
-static void serial_omap_rx_timeout(unsigned long uart_no);
+static void serial_omap_rxdma_poll(unsigned long uart_no);
static int serial_omap_start_rxdma(struct uart_omap_port *up);
+static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1);
+
+static struct workqueue_struct *serial_omap_uart_wq;
static inline unsigned int serial_in(struct uart_omap_port *up, int offset)
{
@@ -102,6 +116,8 @@ static void serial_omap_stop_rxdma(struct uart_omap_port *up)
omap_free_dma(up->uart_dma.rx_dma_channel);
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_used = false;
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
}
@@ -109,14 +125,18 @@ static void serial_omap_enable_ms(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_enable_ms+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_stop_tx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+ struct omap_uart_port_info *pdata = up->pdev->dev.platform_data;
if (up->use_dma &&
up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE) {
@@ -129,30 +149,43 @@ static void serial_omap_stop_tx(struct uart_port *port)
omap_stop_dma(up->uart_dma.tx_dma_channel);
omap_free_dma(up->uart_dma.tx_dma_channel);
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
+ pm_runtime_get_sync(&up->pdev->dev);
if (up->ier & UART_IER_THRI) {
up->ier &= ~UART_IER_THRI;
serial_out(up, UART_IER, up->ier);
}
+
+ if (!up->use_dma && pdata->set_forceidle)
+ pdata->set_forceidle(up->pdev);
+
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
static void serial_omap_stop_rx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+ pm_runtime_get_sync(&up->pdev->dev);
if (up->use_dma)
serial_omap_stop_rxdma(up);
up->ier &= ~UART_IER_RLSI;
up->port.read_status_mask &= ~UART_LSR_DR;
serial_out(up, UART_IER, up->ier);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
}
-static inline void receive_chars(struct uart_omap_port *up, int *status)
+static inline void receive_chars(struct uart_omap_port *up,
+ unsigned int *status)
{
struct tty_struct *tty = up->port.state->port.tty;
- unsigned int flag;
- unsigned char ch, lsr = *status;
+ unsigned int flag, lsr = *status;
+ unsigned char ch = 0;
int max_count = 256;
do {
@@ -257,12 +290,18 @@ static inline void serial_omap_enable_ier_thri(struct uart_omap_port *up)
static void serial_omap_start_tx(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+ struct omap_uart_port_info *pdata = up->pdev->dev.platform_data;
struct circ_buf *xmit;
unsigned int start;
int ret = 0;
if (!up->use_dma) {
+ pm_runtime_get_sync(&up->pdev->dev);
serial_omap_enable_ier_thri(up);
+ if (pdata->set_noidle)
+ pdata->set_noidle(up->pdev);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
return;
}
@@ -272,6 +311,7 @@ static void serial_omap_start_tx(struct uart_port *port)
xmit = &up->port.state->xmit;
if (up->uart_dma.tx_dma_channel == OMAP_UART_DMA_CH_FREE) {
+ pm_runtime_get_sync(&up->pdev->dev);
ret = omap_request_dma(up->uart_dma.uart_dma_tx,
"UART Tx DMA",
(void *)uart_tx_dma_callback, up,
@@ -354,9 +394,13 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
unsigned int iir, lsr;
unsigned long flags;
+ pm_runtime_get_sync(&up->pdev->dev);
iir = serial_in(up, UART_IIR);
- if (iir & UART_IIR_NO_INT)
+ if (iir & UART_IIR_NO_INT) {
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
return IRQ_NONE;
+ }
spin_lock_irqsave(&up->port.lock, flags);
lsr = serial_in(up, UART_LSR);
@@ -378,6 +422,9 @@ static inline irqreturn_t serial_omap_irq(int irq, void *dev_id)
transmit_chars(up);
spin_unlock_irqrestore(&up->port.lock, flags);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
+
up->port_activity = jiffies;
return IRQ_HANDLED;
}
@@ -388,22 +435,26 @@ static unsigned int serial_omap_tx_empty(struct uart_port *port)
unsigned long flags = 0;
unsigned int ret = 0;
- dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->pdev->id);
+ pm_runtime_get_sync(&up->pdev->dev);
+ dev_dbg(up->port.dev, "serial_omap_tx_empty+%d\n", up->port.line);
spin_lock_irqsave(&up->port.lock, flags);
ret = serial_in(up, UART_LSR) & UART_LSR_TEMT ? TIOCSER_TEMT : 0;
spin_unlock_irqrestore(&up->port.lock, flags);
-
+ pm_runtime_put(&up->pdev->dev);
return ret;
}
static unsigned int serial_omap_get_mctrl(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- unsigned char status;
+ unsigned int status;
unsigned int ret = 0;
+ pm_runtime_get_sync(&up->pdev->dev);
status = check_modem_status(up);
- dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->pdev->id);
+ pm_runtime_put(&up->pdev->dev);
+
+ dev_dbg(up->port.dev, "serial_omap_get_mctrl+%d\n", up->port.line);
if (status & UART_MSR_DCD)
ret |= TIOCM_CAR;
@@ -421,7 +472,7 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char mcr = 0;
- dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_set_mctrl+%d\n", up->port.line);
if (mctrl & TIOCM_RTS)
mcr |= UART_MCR_RTS;
if (mctrl & TIOCM_DTR)
@@ -433,8 +484,11 @@ static void serial_omap_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (mctrl & TIOCM_LOOP)
mcr |= UART_MCR_LOOP;
- mcr |= up->mcr;
- serial_out(up, UART_MCR, mcr);
+ pm_runtime_get_sync(&up->pdev->dev);
+ up->mcr = serial_in(up, UART_MCR);
+ up->mcr |= mcr;
+ serial_out(up, UART_MCR, up->mcr);
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_break_ctl(struct uart_port *port, int break_state)
@@ -442,7 +496,8 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
- dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_break_ctl+%d\n", up->port.line);
+ pm_runtime_get_sync(&up->pdev->dev);
spin_lock_irqsave(&up->port.lock, flags);
if (break_state == -1)
up->lcr |= UART_LCR_SBC;
@@ -450,6 +505,7 @@ static void serial_omap_break_ctl(struct uart_port *port, int break_state)
up->lcr &= ~UART_LCR_SBC;
serial_out(up, UART_LCR, up->lcr);
spin_unlock_irqrestore(&up->port.lock, flags);
+ pm_runtime_put(&up->pdev->dev);
}
static int serial_omap_startup(struct uart_port *port)
@@ -466,8 +522,9 @@ static int serial_omap_startup(struct uart_port *port)
if (retval)
return retval;
- dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
+ pm_runtime_get_sync(&up->pdev->dev);
/*
* Clear the FIFO buffers and disable them.
* (they will be reenabled in set_termios())
@@ -505,8 +562,8 @@ static int serial_omap_startup(struct uart_port *port)
(dma_addr_t *)&(up->uart_dma.tx_buf_dma_phys),
0);
init_timer(&(up->uart_dma.rx_timer));
- up->uart_dma.rx_timer.function = serial_omap_rx_timeout;
- up->uart_dma.rx_timer.data = up->pdev->id;
+ up->uart_dma.rx_timer.function = serial_omap_rxdma_poll;
+ up->uart_dma.rx_timer.data = up->port.line;
/* Currently the buffer size is 4KB. Can increase it */
up->uart_dma.rx_buf = dma_alloc_coherent(NULL,
up->uart_dma.rx_buf_size,
@@ -523,6 +580,8 @@ static int serial_omap_startup(struct uart_port *port)
/* Enable module level wake up */
serial_out(up, UART_OMAP_WER, OMAP_UART_WER_MOD_WKUP);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
up->port_activity = jiffies;
return 0;
}
@@ -532,7 +591,9 @@ static void serial_omap_shutdown(struct uart_port *port)
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned long flags = 0;
- dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_shutdown+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
/*
* Disable interrupts from this port
*/
@@ -566,6 +627,8 @@ static void serial_omap_shutdown(struct uart_port *port)
up->uart_dma.rx_buf_dma_phys);
up->uart_dma.rx_buf = NULL;
}
+
+ pm_runtime_put(&up->pdev->dev);
free_irq(up->port.irq, up);
}
@@ -573,8 +636,6 @@ static inline void
serial_omap_configure_xonxoff
(struct uart_omap_port *up, struct ktermios *termios)
{
- unsigned char efr = 0;
-
up->lcr = serial_in(up, UART_LCR);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -584,8 +645,7 @@ serial_omap_configure_xonxoff
serial_out(up, UART_XOFF1, termios->c_cc[VSTOP]);
/* clear SW control mode bits */
- efr = up->efr;
- efr &= OMAP_UART_SW_CLR;
+ up->efr &= OMAP_UART_SW_CLR;
/*
* IXON Flag:
@@ -593,7 +653,7 @@ serial_omap_configure_xonxoff
* Transmit XON1, XOFF1
*/
if (termios->c_iflag & IXON)
- efr |= OMAP_UART_SW_TX;
+ up->efr |= OMAP_UART_SW_TX;
/*
* IXOFF Flag:
@@ -601,7 +661,7 @@ serial_omap_configure_xonxoff
* Receiver compares XON1, XOFF1.
*/
if (termios->c_iflag & IXOFF)
- efr |= OMAP_UART_SW_RX;
+ up->efr |= OMAP_UART_SW_RX;
serial_out(up, UART_EFR, up->efr | UART_EFR_ECB);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
@@ -624,13 +684,21 @@ serial_omap_configure_xonxoff
* load the new software flow control mode IXON or IXOFF
* and restore the UARTi.EFR_REG[4] ENHANCED_EN value.
*/
- serial_out(up, UART_EFR, efr | UART_EFR_SCD);
+ serial_out(up, UART_EFR, up->efr | UART_EFR_SCD);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr & ~UART_MCR_TCRTLR);
serial_out(up, UART_LCR, up->lcr);
}
+static void serial_omap_uart_qos_work(struct work_struct *work)
+{
+ struct uart_omap_port *up = container_of(work, struct uart_omap_port,
+ qos_work);
+
+ pm_qos_update_request(&up->pm_qos_request, up->latency);
+}
+
static void
serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
@@ -671,6 +739,15 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/13);
quot = serial_omap_get_divisor(port, baud);
+ /* calculate wakeup latency constraint */
+ up->calc_latency = (USEC_PER_SEC * up->port.fifosize) / (baud / 8);
+ up->latency = up->calc_latency;
+ schedule_work(&up->qos_work);
+
+ up->dll = quot & 0xff;
+ up->dlh = quot >> 8;
+ up->mdr1 = UART_OMAP_MDR1_DISABLE;
+
up->fcr = UART_FCR_R_TRIG_01 | UART_FCR_T_TRIG_01 |
UART_FCR_ENABLE_FIFO;
if (up->use_dma)
@@ -680,6 +757,7 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
* Ok, we're now changing the port state. Do it with
* interrupts disabled.
*/
+ pm_runtime_get_sync(&up->pdev->dev);
spin_lock_irqsave(&up->port.lock, flags);
/*
@@ -723,6 +801,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
up->ier |= UART_IER_MSI;
serial_out(up, UART_IER, up->ier);
serial_out(up, UART_LCR, cval); /* reset DLAB */
+ up->lcr = cval;
+ up->scr = OMAP_UART_SCR_TX_EMPTY;
/* FIFOs and DMA Settings */
@@ -744,22 +824,34 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
up->mcr = serial_in(up, UART_MCR);
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
- serial_out(up, UART_FCR, up->fcr);
- serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
if (up->use_dma) {
serial_out(up, UART_TI752_TLR, 0);
- serial_out(up, UART_OMAP_SCR,
- (UART_FCR_TRIGGER_4 | UART_FCR_TRIGGER_8));
+ up->scr |= UART_FCR_TRIGGER_4;
+ } else {
+ /* Set receive FIFO threshold to 1 byte */
+ up->fcr &= ~OMAP_UART_FCR_RX_FIFO_TRIG_MASK;
+ up->fcr |= (0x1 << OMAP_UART_FCR_RX_FIFO_TRIG_SHIFT);
}
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
+
+ serial_out(up, UART_OMAP_SCR, up->scr);
+
serial_out(up, UART_EFR, up->efr);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
serial_out(up, UART_MCR, up->mcr);
/* Protocol, Baud Rate, and Interrupt Settings */
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
up->efr = serial_in(up, UART_EFR);
@@ -769,8 +861,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_IER, 0);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
- serial_out(up, UART_DLL, quot & 0xff); /* LS of divisor */
- serial_out(up, UART_DLM, quot >> 8); /* MS of divisor */
+ serial_out(up, UART_DLL, up->dll); /* LS of divisor */
+ serial_out(up, UART_DLM, up->dlh); /* MS of divisor */
serial_out(up, UART_LCR, 0);
serial_out(up, UART_IER, up->ier);
@@ -780,9 +872,14 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_LCR, cval);
if (baud > 230400 && baud != 3000000)
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_13X_MODE);
+ up->mdr1 = UART_OMAP_MDR1_13X_MODE;
+ else
+ up->mdr1 = UART_OMAP_MDR1_16X_MODE;
+
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
else
- serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_16X_MODE);
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
/* Hardware Flow Control Configuration */
@@ -809,7 +906,8 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_omap_configure_xonxoff(up, termios);
spin_unlock_irqrestore(&up->port.lock, flags);
- dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->pdev->id);
+ pm_runtime_put(&up->pdev->dev);
+ dev_dbg(up->port.dev, "serial_omap_set_termios+%d\n", up->port.line);
}
static void
@@ -819,7 +917,9 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
struct uart_omap_port *up = (struct uart_omap_port *)port;
unsigned char efr;
- dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_pm+%d\n", up->port.line);
+
+ pm_runtime_get_sync(&up->pdev->dev);
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
efr = serial_in(up, UART_EFR);
serial_out(up, UART_EFR, efr | UART_EFR_ECB);
@@ -829,6 +929,15 @@ serial_omap_pm(struct uart_port *port, unsigned int state,
serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(up, UART_EFR, efr);
serial_out(up, UART_LCR, 0);
+
+ if (!device_may_wakeup(&up->pdev->dev)) {
+ if (!state)
+ pm_runtime_forbid(&up->pdev->dev);
+ else
+ pm_runtime_allow(&up->pdev->dev);
+ }
+
+ pm_runtime_put(&up->pdev->dev);
}
static void serial_omap_release_port(struct uart_port *port)
@@ -847,7 +956,7 @@ static void serial_omap_config_port(struct uart_port *port, int flags)
struct uart_omap_port *up = (struct uart_omap_port *)port;
dev_dbg(up->port.dev, "serial_omap_config_port+%d\n",
- up->pdev->id);
+ up->port.line);
up->port.type = PORT_OMAP;
}
@@ -864,7 +973,7 @@ serial_omap_type(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->pdev->id);
+ dev_dbg(up->port.dev, "serial_omap_type+%d\n", up->port.line);
return up->name;
}
@@ -906,19 +1015,26 @@ static inline void wait_for_xmitr(struct uart_omap_port *up)
static void serial_omap_poll_put_char(struct uart_port *port, unsigned char ch)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
+
+ pm_runtime_get_sync(&up->pdev->dev);
wait_for_xmitr(up);
serial_out(up, UART_TX, ch);
+ pm_runtime_put(&up->pdev->dev);
}
static int serial_omap_poll_get_char(struct uart_port *port)
{
struct uart_omap_port *up = (struct uart_omap_port *)port;
- unsigned int status = serial_in(up, UART_LSR);
+ unsigned int status;
+ pm_runtime_get_sync(&up->pdev->dev);
+ status = serial_in(up, UART_LSR);
if (!(status & UART_LSR_DR))
return NO_POLL_CHAR;
- return serial_in(up, UART_RX);
+ status = serial_in(up, UART_RX);
+ pm_runtime_put(&up->pdev->dev);
+ return status;
}
#endif /* CONFIG_CONSOLE_POLL */
@@ -946,6 +1062,8 @@ serial_omap_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
+ pm_runtime_get_sync(&up->pdev->dev);
+
local_irq_save(flags);
if (up->port.sysrq)
locked = 0;
@@ -978,6 +1096,8 @@ serial_omap_console_write(struct console *co, const char *s,
if (up->msr_saved_flags)
check_modem_status(up);
+ pm_runtime_mark_last_busy(&up->pdev->dev);
+ pm_runtime_put_autosuspend(&up->pdev->dev);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
@@ -1014,7 +1134,7 @@ static struct console serial_omap_console = {
static void serial_omap_add_console_port(struct uart_omap_port *up)
{
- serial_omap_console_ports[up->pdev->id] = up;
+ serial_omap_console_ports[up->port.line] = up;
}
#define OMAP_CONSOLE (&serial_omap_console)
@@ -1060,26 +1180,30 @@ static struct uart_driver serial_omap_reg = {
.cons = OMAP_CONSOLE,
};
-static int
-serial_omap_suspend(struct platform_device *pdev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int serial_omap_suspend(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(pdev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
- if (up)
+ if (up) {
uart_suspend_port(&serial_omap_reg, &up->port);
+ flush_work_sync(&up->qos_work);
+ }
+
return 0;
}
-static int serial_omap_resume(struct platform_device *dev)
+static int serial_omap_resume(struct device *dev)
{
- struct uart_omap_port *up = platform_get_drvdata(dev);
+ struct uart_omap_port *up = dev_get_drvdata(dev);
if (up)
uart_resume_port(&serial_omap_reg, &up->port);
return 0;
}
+#endif
-static void serial_omap_rx_timeout(unsigned long uart_no)
+static void serial_omap_rxdma_poll(unsigned long uart_no)
{
struct uart_omap_port *up = ui[uart_no];
unsigned int curr_dma_pos, curr_transmitted_size;
@@ -1089,9 +1213,9 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) ||
(curr_dma_pos == 0)) {
if (jiffies_to_msecs(jiffies - up->port_activity) <
- RX_TIMEOUT) {
+ up->uart_dma.rx_timeout) {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
} else {
serial_omap_stop_rxdma(up);
up->ier |= (UART_IER_RDI | UART_IER_RLSI);
@@ -1120,7 +1244,7 @@ static void serial_omap_rx_timeout(unsigned long uart_no)
}
} else {
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
}
up->port_activity = jiffies;
}
@@ -1135,6 +1259,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
int ret = 0;
if (up->uart_dma.rx_dma_channel == -1) {
+ pm_runtime_get_sync(&up->pdev->dev);
ret = omap_request_dma(up->uart_dma.uart_dma_rx,
"UART Rx DMA",
(void *)uart_rx_dma_callback, up,
@@ -1158,7 +1283,7 @@ static int serial_omap_start_rxdma(struct uart_omap_port *up)
/* FIXME: Cache maintenance needed here? */
omap_start_dma(up->uart_dma.rx_dma_channel);
mod_timer(&up->uart_dma.rx_timer, jiffies +
- usecs_to_jiffies(up->uart_dma.rx_timeout));
+ usecs_to_jiffies(up->uart_dma.rx_poll_rate));
up->uart_dma.rx_dma_used = true;
return ret;
}
@@ -1221,6 +1346,19 @@ static void uart_tx_dma_callback(int lch, u16 ch_status, void *data)
return;
}
+static struct omap_uart_port_info *of_get_uart_port_info(struct device *dev)
+{
+ struct omap_uart_port_info *omap_up_info;
+
+ omap_up_info = devm_kzalloc(dev, sizeof(*omap_up_info), GFP_KERNEL);
+ if (!omap_up_info)
+ return NULL; /* out of memory */
+
+ of_property_read_u32(dev->of_node, "clock-frequency",
+ &omap_up_info->uartclk);
+ return omap_up_info;
+}
+
static int serial_omap_probe(struct platform_device *pdev)
{
struct uart_omap_port *up;
@@ -1228,6 +1366,9 @@ static int serial_omap_probe(struct platform_device *pdev)
struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
int ret = -ENOSPC;
+ if (pdev->dev.of_node)
+ omap_up_info = of_get_uart_port_info(&pdev->dev);
+
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
@@ -1263,7 +1404,6 @@ static int serial_omap_probe(struct platform_device *pdev)
ret = -ENOMEM;
goto do_release_region;
}
- sprintf(up->name, "OMAP UART%d", pdev->id);
up->pdev = pdev;
up->port.dev = &pdev->dev;
up->port.type = PORT_OMAP;
@@ -1273,34 +1413,74 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.regshift = 2;
up->port.fifosize = 64;
up->port.ops = &serial_omap_pops;
- up->port.line = pdev->id;
- up->port.membase = omap_up_info->membase;
- up->port.mapbase = omap_up_info->mapbase;
+ if (pdev->dev.of_node)
+ up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
+ else
+ up->port.line = pdev->id;
+
+ if (up->port.line < 0) {
+ dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
+ up->port.line);
+ ret = -ENODEV;
+ goto err;
+ }
+
+ sprintf(up->name, "OMAP UART%d", up->port.line);
+ up->port.mapbase = mem->start;
+ up->port.membase = ioremap(mem->start, resource_size(mem));
+ if (!up->port.membase) {
+ dev_err(&pdev->dev, "can't ioremap UART\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+
up->port.flags = omap_up_info->flags;
- up->port.irqflags = omap_up_info->irqflags;
up->port.uartclk = omap_up_info->uartclk;
+ if (!up->port.uartclk) {
+ up->port.uartclk = DEFAULT_CLK_SPEED;
+ dev_warn(&pdev->dev, "No clock speed specified: using default:"
+ "%d\n", DEFAULT_CLK_SPEED);
+ }
up->uart_dma.uart_base = mem->start;
+ up->errata = omap_up_info->errata;
if (omap_up_info->dma_enabled) {
up->uart_dma.uart_dma_tx = dma_tx->start;
up->uart_dma.uart_dma_rx = dma_rx->start;
up->use_dma = 1;
- up->uart_dma.rx_buf_size = 4096;
- up->uart_dma.rx_timeout = 2;
+ up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
+ up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
+ up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
spin_lock_init(&(up->uart_dma.tx_lock));
spin_lock_init(&(up->uart_dma.rx_lock));
up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
}
- ui[pdev->id] = up;
+ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ pm_qos_add_request(&up->pm_qos_request,
+ PM_QOS_CPU_DMA_LATENCY, up->latency);
+ serial_omap_uart_wq = create_singlethread_workqueue(up->name);
+ INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ omap_up_info->autosuspend_timeout);
+
+ pm_runtime_irq_safe(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
+
+ ui[up->port.line] = up;
serial_omap_add_console_port(up);
ret = uart_add_one_port(&serial_omap_reg, &up->port);
if (ret != 0)
goto do_release_region;
+ pm_runtime_put(&pdev->dev);
platform_set_drvdata(pdev, up);
return 0;
err:
@@ -1315,22 +1495,168 @@ static int serial_omap_remove(struct platform_device *dev)
{
struct uart_omap_port *up = platform_get_drvdata(dev);
- platform_set_drvdata(dev, NULL);
if (up) {
+ pm_runtime_disable(&up->pdev->dev);
uart_remove_one_port(&serial_omap_reg, &up->port);
+ pm_qos_remove_request(&up->pm_qos_request);
+
kfree(up);
}
+
+ platform_set_drvdata(dev, NULL);
+ return 0;
+}
+
+/*
+ * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460)
+ * The access to uart register after MDR1 Access
+ * causes UART to corrupt data.
+ *
+ * Need a delay =
+ * 5 L4 clock cycles + 5 UART functional clock cycle (@48MHz = ~0.2uS)
+ * give 10 times as much
+ */
+static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
+{
+ u8 timeout = 255;
+
+ serial_out(up, UART_OMAP_MDR1, mdr1);
+ udelay(2);
+ serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT |
+ UART_FCR_CLEAR_RCVR);
+ /*
+ * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and
+ * TX_FIFO_E bit is 1.
+ */
+ while (UART_LSR_THRE != (serial_in(up, UART_LSR) &
+ (UART_LSR_THRE | UART_LSR_DR))) {
+ timeout--;
+ if (!timeout) {
+ /* Should *never* happen. we warn and carry on */
+ dev_crit(&up->pdev->dev, "Errata i202: timedout %x\n",
+ serial_in(up, UART_LSR));
+ break;
+ }
+ udelay(1);
+ }
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static void serial_omap_restore_context(struct uart_omap_port *up)
+{
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, UART_OMAP_MDR1_DISABLE);
+ else
+ serial_out(up, UART_OMAP_MDR1, UART_OMAP_MDR1_DISABLE);
+
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_EFR, UART_EFR_ECB);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, 0x0);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_DLL, up->dll);
+ serial_out(up, UART_DLM, up->dlh);
+ serial_out(up, UART_LCR, 0x0); /* Operational mode */
+ serial_out(up, UART_IER, up->ier);
+ serial_out(up, UART_FCR, up->fcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A);
+ serial_out(up, UART_MCR, up->mcr);
+ serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); /* Config B mode */
+ serial_out(up, UART_OMAP_SCR, up->scr);
+ serial_out(up, UART_EFR, up->efr);
+ serial_out(up, UART_LCR, up->lcr);
+ if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
+ serial_omap_mdr1_errataset(up, up->mdr1);
+ else
+ serial_out(up, UART_OMAP_MDR1, up->mdr1);
+}
+
+static int serial_omap_runtime_suspend(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+ struct omap_uart_port_info *pdata = dev->platform_data;
+
+ if (!up)
+ return -EINVAL;
+
+ if (!pdata || !pdata->enable_wakeup)
+ return 0;
+
+ if (pdata->get_context_loss_count)
+ up->context_loss_cnt = pdata->get_context_loss_count(dev);
+
+ if (device_may_wakeup(dev)) {
+ if (!up->wakeups_enabled) {
+ pdata->enable_wakeup(up->pdev, true);
+ up->wakeups_enabled = true;
+ }
+ } else {
+ if (up->wakeups_enabled) {
+ pdata->enable_wakeup(up->pdev, false);
+ up->wakeups_enabled = false;
+ }
+ }
+
+ /* Errata i291 */
+ if (up->use_dma && pdata->set_forceidle &&
+ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
+ pdata->set_forceidle(up->pdev);
+
+ up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+ schedule_work(&up->qos_work);
+
return 0;
}
+static int serial_omap_runtime_resume(struct device *dev)
+{
+ struct uart_omap_port *up = dev_get_drvdata(dev);
+ struct omap_uart_port_info *pdata = dev->platform_data;
+
+ if (up) {
+ if (pdata->get_context_loss_count) {
+ u32 loss_cnt = pdata->get_context_loss_count(dev);
+
+ if (up->context_loss_cnt != loss_cnt)
+ serial_omap_restore_context(up);
+ }
+
+ /* Errata i291 */
+ if (up->use_dma && pdata->set_noidle &&
+ (up->errata & UART_ERRATA_i291_DMA_FORCEIDLE))
+ pdata->set_noidle(up->pdev);
+
+ up->latency = up->calc_latency;
+ schedule_work(&up->qos_work);
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops serial_omap_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(serial_omap_suspend, serial_omap_resume)
+ SET_RUNTIME_PM_OPS(serial_omap_runtime_suspend,
+ serial_omap_runtime_resume, NULL)
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id omap_serial_of_match[] = {
+ { .compatible = "ti,omap2-uart" },
+ { .compatible = "ti,omap3-uart" },
+ { .compatible = "ti,omap4-uart" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, omap_serial_of_match);
+#endif
+
static struct platform_driver serial_omap_driver = {
.probe = serial_omap_probe,
.remove = serial_omap_remove,
-
- .suspend = serial_omap_suspend,
- .resume = serial_omap_resume,
.driver = {
.name = DRIVER_NAME,
+ .pm = &serial_omap_dev_pm_ops,
+ .of_match_table = of_match_ptr(omap_serial_of_match),
},
};
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index d6aba8c..17ae657 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -25,6 +25,9 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/dmi.h>
+#include <linux/console.h>
+#include <linux/nmi.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/pch_dma.h>
@@ -198,6 +201,10 @@ enum {
#define PCI_VENDOR_ID_ROHM 0x10DB
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
+#define DEFAULT_BAUD_RATE 1843200 /* 1.8432MHz */
+
struct pch_uart_buffer {
unsigned char *buf;
int size;
@@ -276,6 +283,9 @@ static struct pch_uart_driver_data drv_dat[] = {
[pch_ml7831_uart1] = {PCH_UART_2LINE, 1},
};
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+static struct eg20t_port *pch_uart_ports[PCH_UART_NR];
+#endif
static unsigned int default_baud = 9600;
static const int trigger_level_256[4] = { 1, 64, 128, 224 };
static const int trigger_level_64[4] = { 1, 16, 32, 56 };
@@ -754,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
sg_dma_address(sg) = priv->rx_buf_dma;
desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
- sg, 1, DMA_FROM_DEVICE,
+ sg, 1, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
@@ -913,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
}
desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
- priv->sg_tx_p, nent, DMA_TO_DEVICE,
+ priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
@@ -1385,6 +1395,143 @@ static struct uart_ops pch_uart_ops = {
.verify_port = pch_uart_verify_port
};
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+
+/*
+ * Wait for transmitter & holding register to empty
+ */
+static void wait_for_xmitr(struct eg20t_port *up, int bits)
+{
+ unsigned int status, tmout = 10000;
+
+ /* Wait up to 10ms for the character(s) to be sent. */
+ for (;;) {
+ status = ioread8(up->membase + UART_LSR);
+
+ if ((status & bits) == bits)
+ break;
+ if (--tmout == 0)
+ break;
+ udelay(1);
+ }
+
+ /* Wait up to 1s for flow control if necessary */
+ if (up->port.flags & UPF_CONS_FLOW) {
+ unsigned int tmout;
+ for (tmout = 1000000; tmout; tmout--) {
+ unsigned int msr = ioread8(up->membase + UART_MSR);
+ if (msr & UART_MSR_CTS)
+ break;
+ udelay(1);
+ touch_nmi_watchdog();
+ }
+ }
+}
+
+static void pch_console_putchar(struct uart_port *port, int ch)
+{
+ struct eg20t_port *priv =
+ container_of(port, struct eg20t_port, port);
+
+ wait_for_xmitr(priv, UART_LSR_THRE);
+ iowrite8(ch, priv->membase + PCH_UART_THR);
+}
+
+/*
+ * Print a string to the serial port trying not to disturb
+ * any possible real use of the port...
+ *
+ * The console_lock must be held when we get here.
+ */
+static void
+pch_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct eg20t_port *priv;
+
+ unsigned long flags;
+ u8 ier;
+ int locked = 1;
+
+ priv = pch_uart_ports[co->index];
+
+ touch_nmi_watchdog();
+
+ local_irq_save(flags);
+ if (priv->port.sysrq) {
+ /* serial8250_handle_port() already took the lock */
+ locked = 0;
+ } else if (oops_in_progress) {
+ locked = spin_trylock(&priv->port.lock);
+ } else
+ spin_lock(&priv->port.lock);
+
+ /*
+ * First save the IER then disable the interrupts
+ */
+ ier = ioread8(priv->membase + UART_IER);
+
+ pch_uart_hal_disable_interrupt(priv, PCH_UART_HAL_ALL_INT);
+
+ uart_console_write(&priv->port, s, count, pch_console_putchar);
+
+ /*
+ * Finally, wait for transmitter to become empty
+ * and restore the IER
+ */
+ wait_for_xmitr(priv, BOTH_EMPTY);
+ iowrite8(ier, priv->membase + UART_IER);
+
+ if (locked)
+ spin_unlock(&priv->port.lock);
+ local_irq_restore(flags);
+}
+
+static int __init pch_console_setup(struct console *co, char *options)
+{
+ struct uart_port *port;
+ int baud = 9600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /*
+ * Check whether an invalid uart number has been specified, and
+ * if so, search for the first available port that does have
+ * console support.
+ */
+ if (co->index >= PCH_UART_NR)
+ co->index = 0;
+ port = &pch_uart_ports[co->index]->port;
+
+ if (!port || (!port->iobase && !port->membase))
+ return -ENODEV;
+
+ /* setup uartclock */
+ port->uartclk = DEFAULT_BAUD_RATE;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static struct uart_driver pch_uart_driver;
+
+static struct console pch_console = {
+ .name = PCH_UART_DRIVER_DEVICE,
+ .write = pch_console_write,
+ .device = uart_console_device,
+ .setup = pch_console_setup,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
+ .index = -1,
+ .data = &pch_uart_driver,
+};
+
+#define PCH_CONSOLE (&pch_console)
+#else
+#define PCH_CONSOLE NULL
+#endif
+
static struct uart_driver pch_uart_driver = {
.owner = THIS_MODULE,
.driver_name = KBUILD_MODNAME,
@@ -1392,6 +1539,7 @@ static struct uart_driver pch_uart_driver = {
.major = 0,
.minor = 0,
.nr = PCH_UART_NR,
+ .cons = PCH_CONSOLE,
};
static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
@@ -1418,7 +1566,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
if (!rxbuf)
goto init_port_free_txbuf;
- base_baud = 1843200; /* 1.8432MHz */
+ base_baud = DEFAULT_BAUD_RATE;
/* quirk for CM-iTC board */
board_name = dmi_get_system_info(DMI_BOARD_NAME);
@@ -1468,6 +1616,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
pci_set_drvdata(pdev, priv);
pch_uart_hal_request(pdev, fifosize, base_baud);
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[board->line_no] = priv;
+#endif
ret = uart_add_one_port(&pch_uart_driver, &priv->port);
if (ret < 0)
goto init_port_hal_free;
@@ -1475,6 +1626,9 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
return priv;
init_port_hal_free:
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[board->line_no] = NULL;
+#endif
free_page((unsigned long)rxbuf);
init_port_free_txbuf:
kfree(priv);
@@ -1497,6 +1651,10 @@ static void pch_uart_pci_remove(struct pci_dev *pdev)
priv = (struct eg20t_port *)pci_get_drvdata(pdev);
pci_disable_msi(pdev);
+
+#ifdef CONFIG_SERIAL_PCH_UART_CONSOLE
+ pch_uart_ports[priv->port.line] = NULL;
+#endif
pch_uart_exit_port(priv);
pci_disable_device(pdev);
kfree(priv);
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 5acd24a..e9c2dfe4 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -99,6 +99,9 @@ MODULE_LICENSE("GPL");
#define PMACZILOG_NAME "ttyPZ"
#endif
+#define pmz_debug(fmt, arg...) pr_debug("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_error(fmt, arg...) pr_err("ttyPZ%d: " fmt, uap->port.line, ## arg)
+#define pmz_info(fmt, arg...) pr_info("ttyPZ%d: " fmt, uap->port.line, ## arg)
/*
* For the sake of early serial console, we can do a pre-probe
@@ -106,7 +109,6 @@ MODULE_LICENSE("GPL");
*/
static struct uart_pmac_port pmz_ports[MAX_ZS_PORTS];
static int pmz_ports_count;
-static DEFINE_MUTEX(pmz_irq_mutex);
static struct uart_driver pmz_uart_reg = {
.owner = THIS_MODULE,
@@ -126,9 +128,6 @@ static void pmz_load_zsregs(struct uart_pmac_port *uap, u8 *regs)
{
int i;
- if (ZS_IS_ASLEEP(uap))
- return;
-
/* Let pending transmits finish. */
for (i = 0; i < 1000; i++) {
unsigned char stat = read_zsreg(uap, R1);
@@ -216,32 +215,24 @@ static void pmz_maybe_update_regs(struct uart_pmac_port *uap)
}
}
+static void pmz_interrupt_control(struct uart_pmac_port *uap, int enable)
+{
+ if (enable) {
+ uap->curregs[1] |= INT_ALL_Rx | TxINT_ENAB;
+ if (!ZS_IS_EXTCLK(uap))
+ uap->curregs[1] |= EXT_INT_ENAB;
+ } else {
+ uap->curregs[1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
+ }
+ write_zsreg(uap, R1, uap->curregs[1]);
+}
+
static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_struct *tty = NULL;
unsigned char ch, r1, drop, error, flag;
int loops = 0;
- /* The interrupt can be enabled when the port isn't open, typically
- * that happens when using one port is open and the other closed (stale
- * interrupt) or when one port is used as a console.
- */
- if (!ZS_IS_OPEN(uap)) {
- pmz_debug("pmz: draining input\n");
- /* Port is closed, drain input data */
- for (;;) {
- if ((++loops) > 1000)
- goto flood;
- (void)read_zsreg(uap, R1);
- write_zsreg(uap, R0, ERR_RES);
- (void)read_zsdata(uap);
- ch = read_zsreg(uap, R0);
- if (!(ch & Rx_CH_AV))
- break;
- }
- return NULL;
- }
-
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL || uap->port.state->port.tty == NULL) {
WARN_ON(1);
@@ -339,9 +330,7 @@ static struct tty_struct *pmz_receive_chars(struct uart_pmac_port *uap)
return tty;
flood:
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
- zssync(uap);
+ pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
return tty;
}
@@ -383,8 +372,6 @@ static void pmz_transmit_chars(struct uart_pmac_port *uap)
{
struct circ_buf *xmit;
- if (ZS_IS_ASLEEP(uap))
- return;
if (ZS_IS_CONS(uap)) {
unsigned char status = read_zsreg(uap, R0);
@@ -481,6 +468,10 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
/* Channel A */
tty = NULL;
if (r3 & (CHAEXT | CHATxIP | CHARxIP)) {
+ if (!ZS_IS_OPEN(uap_a)) {
+ pmz_debug("ChanA interrupt while open !\n");
+ goto skip_a;
+ }
write_zsreg(uap_a, R0, RES_H_IUS);
zssync(uap_a);
if (r3 & CHAEXT)
@@ -491,16 +482,21 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_transmit_chars(uap_a);
rc = IRQ_HANDLED;
}
+ skip_a:
spin_unlock(&uap_a->port.lock);
if (tty != NULL)
tty_flip_buffer_push(tty);
- if (uap_b->node == NULL)
+ if (!uap_b)
goto out;
spin_lock(&uap_b->port.lock);
tty = NULL;
if (r3 & (CHBEXT | CHBTxIP | CHBRxIP)) {
+ if (!ZS_IS_OPEN(uap_a)) {
+ pmz_debug("ChanB interrupt while open !\n");
+ goto skip_b;
+ }
write_zsreg(uap_b, R0, RES_H_IUS);
zssync(uap_b);
if (r3 & CHBEXT)
@@ -511,14 +507,12 @@ static irqreturn_t pmz_interrupt(int irq, void *dev_id)
pmz_transmit_chars(uap_b);
rc = IRQ_HANDLED;
}
+ skip_b:
spin_unlock(&uap_b->port.lock);
if (tty != NULL)
tty_flip_buffer_push(tty);
out:
-#ifdef DEBUG_HARD
- pmz_debug("irq done.\n");
-#endif
return rc;
}
@@ -543,12 +537,8 @@ static inline u8 pmz_peek_status(struct uart_pmac_port *uap)
*/
static unsigned int pmz_tx_empty(struct uart_port *port)
{
- struct uart_pmac_port *uap = to_pmz(port);
unsigned char status;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return TIOCSER_TEMT;
-
status = pmz_peek_status(to_pmz(port));
if (status & Tx_BUF_EMP)
return TIOCSER_TEMT;
@@ -570,8 +560,7 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl)
if (ZS_IS_IRDA(uap))
return;
/* We get called during boot with a port not up yet */
- if (ZS_IS_ASLEEP(uap) ||
- !(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
+ if (!(ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)))
return;
set_bits = clear_bits = 0;
@@ -590,8 +579,7 @@ static void pmz_set_mctrl(struct uart_port *port, unsigned int mctrl)
/* NOTE: Not subject to 'transmitter active' rule. */
uap->curregs[R5] |= set_bits;
uap->curregs[R5] &= ~clear_bits;
- if (ZS_IS_ASLEEP(uap))
- return;
+
write_zsreg(uap, R5, uap->curregs[R5]);
pmz_debug("pmz_set_mctrl: set bits: %x, clear bits: %x -> %x\n",
set_bits, clear_bits, uap->curregs[R5]);
@@ -609,9 +597,6 @@ static unsigned int pmz_get_mctrl(struct uart_port *port)
unsigned char status;
unsigned int ret;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return 0;
-
status = read_zsreg(uap, R0);
ret = 0;
@@ -649,9 +634,6 @@ static void pmz_start_tx(struct uart_port *port)
uap->flags |= PMACZILOG_FLAG_TX_ACTIVE;
uap->flags &= ~PMACZILOG_FLAG_TX_STOPPED;
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return;
-
status = read_zsreg(uap, R0);
/* TX busy? Just wait for the TX done interrupt. */
@@ -690,9 +672,6 @@ static void pmz_stop_rx(struct uart_port *port)
{
struct uart_pmac_port *uap = to_pmz(port);
- if (ZS_IS_ASLEEP(uap) || uap->node == NULL)
- return;
-
pmz_debug("pmz: stop_rx()()\n");
/* Disable all RX interrupts. */
@@ -711,14 +690,12 @@ static void pmz_enable_ms(struct uart_port *port)
struct uart_pmac_port *uap = to_pmz(port);
unsigned char new_reg;
- if (ZS_IS_IRDA(uap) || uap->node == NULL)
+ if (ZS_IS_IRDA(uap))
return;
new_reg = uap->curregs[R15] | (DCDIE | SYNCIE | CTSIE);
if (new_reg != uap->curregs[R15]) {
uap->curregs[R15] = new_reg;
- if (ZS_IS_ASLEEP(uap))
- return;
/* NOTE: Not subject to 'transmitter active' rule. */
write_zsreg(uap, R15, uap->curregs[R15]);
}
@@ -734,8 +711,6 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
unsigned char set_bits, clear_bits, new_reg;
unsigned long flags;
- if (uap->node == NULL)
- return;
set_bits = clear_bits = 0;
if (break_state)
@@ -748,12 +723,6 @@ static void pmz_break_ctl(struct uart_port *port, int break_state)
new_reg = (uap->curregs[R5] | set_bits) & ~clear_bits;
if (new_reg != uap->curregs[R5]) {
uap->curregs[R5] = new_reg;
-
- /* NOTE: Not subject to 'transmitter active' rule. */
- if (ZS_IS_ASLEEP(uap)) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- }
write_zsreg(uap, R5, uap->curregs[R5]);
}
@@ -927,14 +896,21 @@ static int __pmz_startup(struct uart_pmac_port *uap)
static void pmz_irda_reset(struct uart_pmac_port *uap)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&uap->port.lock, flags);
uap->curregs[R5] |= DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- mdelay(110);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ msleep(110);
+
+ spin_lock_irqsave(&uap->port.lock, flags);
uap->curregs[R5] &= ~DTR;
write_zsreg(uap, R5, uap->curregs[R5]);
zssync(uap);
- mdelay(10);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+ msleep(10);
}
/*
@@ -949,13 +925,6 @@ static int pmz_startup(struct uart_port *port)
pmz_debug("pmz: startup()\n");
- if (ZS_IS_ASLEEP(uap))
- return -EAGAIN;
- if (uap->node == NULL)
- return -ENODEV;
-
- mutex_lock(&pmz_irq_mutex);
-
uap->flags |= PMACZILOG_FLAG_IS_OPEN;
/* A console is never powered down. Else, power up and
@@ -966,18 +935,14 @@ static int pmz_startup(struct uart_port *port)
pwr_delay = __pmz_startup(uap);
spin_unlock_irqrestore(&port->lock, flags);
}
-
- pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
+ sprintf(uap->irq_name, PMACZILOG_NAME"%d", uap->port.line);
if (request_irq(uap->port.irq, pmz_interrupt, IRQF_SHARED,
- "SCC", uap)) {
+ uap->irq_name, uap)) {
pmz_error("Unable to register zs interrupt handler.\n");
pmz_set_scc_power(uap, 0);
- mutex_unlock(&pmz_irq_mutex);
return -ENXIO;
}
- mutex_unlock(&pmz_irq_mutex);
-
/* Right now, we deal with delay by blocking here, I'll be
* smarter later on
*/
@@ -990,12 +955,9 @@ static int pmz_startup(struct uart_port *port)
if (ZS_IS_IRDA(uap))
pmz_irda_reset(uap);
- /* Enable interrupts emission from the chip */
+ /* Enable interrupt requests for the channel */
spin_lock_irqsave(&port->lock, flags);
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
+ pmz_interrupt_control(uap, 1);
spin_unlock_irqrestore(&port->lock, flags);
pmz_debug("pmz: startup() done.\n");
@@ -1010,49 +972,35 @@ static void pmz_shutdown(struct uart_port *port)
pmz_debug("pmz: shutdown()\n");
- if (uap->node == NULL)
- return;
-
- mutex_lock(&pmz_irq_mutex);
-
- /* Release interrupt handler */
- free_irq(uap->port.irq, uap);
-
spin_lock_irqsave(&port->lock, flags);
- uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
+ /* Disable interrupt requests for the channel */
+ pmz_interrupt_control(uap, 0);
- if (!ZS_IS_OPEN(uap->mate))
- pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
+ if (!ZS_IS_CONS(uap)) {
+ /* Disable receiver and transmitter */
+ uap->curregs[R3] &= ~RxENABLE;
+ uap->curregs[R5] &= ~TxENABLE;
- /* Disable interrupts */
- if (!ZS_IS_ASLEEP(uap)) {
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
- zssync(uap);
+ /* Disable break assertion */
+ uap->curregs[R5] &= ~SND_BRK;
+ pmz_maybe_update_regs(uap);
}
- if (ZS_IS_CONS(uap) || ZS_IS_ASLEEP(uap)) {
- spin_unlock_irqrestore(&port->lock, flags);
- mutex_unlock(&pmz_irq_mutex);
- return;
- }
+ spin_unlock_irqrestore(&port->lock, flags);
+
+ /* Release interrupt handler */
+ free_irq(uap->port.irq, uap);
- /* Disable receiver and transmitter. */
- uap->curregs[R3] &= ~RxENABLE;
- uap->curregs[R5] &= ~TxENABLE;
+ spin_lock_irqsave(&port->lock, flags);
- /* Disable all interrupts and BRK assertion. */
- uap->curregs[R5] &= ~SND_BRK;
- pmz_maybe_update_regs(uap);
+ uap->flags &= ~PMACZILOG_FLAG_IS_OPEN;
- /* Shut the chip down */
- pmz_set_scc_power(uap, 0);
+ if (!ZS_IS_CONS(uap))
+ pmz_set_scc_power(uap, 0); /* Shut the chip down */
spin_unlock_irqrestore(&port->lock, flags);
- mutex_unlock(&pmz_irq_mutex);
-
pmz_debug("pmz: shutdown() done.\n");
}
@@ -1300,9 +1248,6 @@ static void __pmz_set_termios(struct uart_port *port, struct ktermios *termios,
pmz_debug("pmz: set_termios()\n");
- if (ZS_IS_ASLEEP(uap))
- return;
-
memcpy(&uap->termios_cache, termios, sizeof(struct ktermios));
/* XXX Check which revs of machines actually allow 1 and 4Mb speeds
@@ -1352,19 +1297,15 @@ static void pmz_set_termios(struct uart_port *port, struct ktermios *termios,
spin_lock_irqsave(&port->lock, flags);
/* Disable IRQs on the port */
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- write_zsreg(uap, R1, uap->curregs[R1]);
+ pmz_interrupt_control(uap, 0);
/* Setup new port configuration */
__pmz_set_termios(port, termios, old);
/* Re-enable IRQs on the port */
- if (ZS_IS_OPEN(uap)) {
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
- }
+ if (ZS_IS_OPEN(uap))
+ pmz_interrupt_control(uap, 1);
+
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -1604,25 +1545,34 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
*/
static int pmz_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
+ struct uart_pmac_port *uap;
int i;
/* Iterate the pmz_ports array to find a matching entry
*/
for (i = 0; i < MAX_ZS_PORTS; i++)
- if (pmz_ports[i].node == mdev->ofdev.dev.of_node) {
- struct uart_pmac_port *uap = &pmz_ports[i];
-
- uap->dev = mdev;
- dev_set_drvdata(&mdev->ofdev.dev, uap);
- if (macio_request_resources(uap->dev, "pmac_zilog"))
- printk(KERN_WARNING "%s: Failed to request resource"
- ", port still active\n",
- uap->node->name);
- else
- uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
- return 0;
- }
- return -ENODEV;
+ if (pmz_ports[i].node == mdev->ofdev.dev.of_node)
+ break;
+ if (i >= MAX_ZS_PORTS)
+ return -ENODEV;
+
+
+ uap = &pmz_ports[i];
+ uap->dev = mdev;
+ uap->port.dev = &mdev->ofdev.dev;
+ dev_set_drvdata(&mdev->ofdev.dev, uap);
+
+ /* We still activate the port even when failing to request resources
+ * to work around bugs in ancient Apple device-trees
+ */
+ if (macio_request_resources(uap->dev, "pmac_zilog"))
+ printk(KERN_WARNING "%s: Failed to request resource"
+ ", port still active\n",
+ uap->node->name);
+ else
+ uap->flags |= PMACZILOG_FLAG_RSRC_REQUESTED;
+
+ return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
/*
@@ -1636,12 +1586,15 @@ static int pmz_detach(struct macio_dev *mdev)
if (!uap)
return -ENODEV;
+ uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
if (uap->flags & PMACZILOG_FLAG_RSRC_REQUESTED) {
macio_release_resources(uap->dev);
uap->flags &= ~PMACZILOG_FLAG_RSRC_REQUESTED;
}
dev_set_drvdata(&mdev->ofdev.dev, NULL);
uap->dev = NULL;
+ uap->port.dev = NULL;
return 0;
}
@@ -1650,59 +1603,13 @@ static int pmz_detach(struct macio_dev *mdev)
static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
- struct uart_state *state;
- unsigned long flags;
if (uap == NULL) {
printk("HRM... pmz_suspend with NULL uap\n");
return 0;
}
- if (pm_state.event == mdev->ofdev.dev.power.power_state.event)
- return 0;
-
- pmz_debug("suspend, switching to state %d\n", pm_state.event);
-
- state = pmz_uart_reg.state + uap->port.line;
-
- mutex_lock(&pmz_irq_mutex);
- mutex_lock(&state->port.mutex);
-
- spin_lock_irqsave(&uap->port.lock, flags);
-
- if (ZS_IS_OPEN(uap) || ZS_IS_CONS(uap)) {
- /* Disable receiver and transmitter. */
- uap->curregs[R3] &= ~RxENABLE;
- uap->curregs[R5] &= ~TxENABLE;
-
- /* Disable all interrupts and BRK assertion. */
- uap->curregs[R1] &= ~(EXT_INT_ENAB | TxINT_ENAB | RxINT_MASK);
- uap->curregs[R5] &= ~SND_BRK;
- pmz_load_zsregs(uap, uap->curregs);
- uap->flags |= PMACZILOG_FLAG_IS_ASLEEP;
- mb();
- }
-
- spin_unlock_irqrestore(&uap->port.lock, flags);
-
- if (ZS_IS_OPEN(uap) || ZS_IS_OPEN(uap->mate))
- if (ZS_IS_ASLEEP(uap->mate) && ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
- pmz_get_port_A(uap)->flags &= ~PMACZILOG_FLAG_IS_IRQ_ON;
- disable_irq(uap->port.irq);
- }
-
- if (ZS_IS_CONS(uap))
- uap->port.cons->flags &= ~CON_ENABLED;
-
- /* Shut the chip down */
- pmz_set_scc_power(uap, 0);
-
- mutex_unlock(&state->port.mutex);
- mutex_unlock(&pmz_irq_mutex);
-
- pmz_debug("suspend, switching complete\n");
-
- mdev->ofdev.dev.power.power_state = pm_state;
+ uart_suspend_port(&pmz_uart_reg, &uap->port);
return 0;
}
@@ -1711,76 +1618,20 @@ static int pmz_suspend(struct macio_dev *mdev, pm_message_t pm_state)
static int pmz_resume(struct macio_dev *mdev)
{
struct uart_pmac_port *uap = dev_get_drvdata(&mdev->ofdev.dev);
- struct uart_state *state;
- unsigned long flags;
- int pwr_delay = 0;
if (uap == NULL)
return 0;
- if (mdev->ofdev.dev.power.power_state.event == PM_EVENT_ON)
- return 0;
-
- pmz_debug("resume, switching to state 0\n");
-
- state = pmz_uart_reg.state + uap->port.line;
-
- mutex_lock(&pmz_irq_mutex);
- mutex_lock(&state->port.mutex);
-
- spin_lock_irqsave(&uap->port.lock, flags);
- if (!ZS_IS_OPEN(uap) && !ZS_IS_CONS(uap)) {
- spin_unlock_irqrestore(&uap->port.lock, flags);
- goto bail;
- }
- pwr_delay = __pmz_startup(uap);
-
- /* Take care of config that may have changed while asleep */
- __pmz_set_termios(&uap->port, &uap->termios_cache, NULL);
-
- if (ZS_IS_OPEN(uap)) {
- /* Enable interrupts */
- uap->curregs[R1] |= INT_ALL_Rx | TxINT_ENAB;
- if (!ZS_IS_EXTCLK(uap))
- uap->curregs[R1] |= EXT_INT_ENAB;
- write_zsreg(uap, R1, uap->curregs[R1]);
- }
-
- spin_unlock_irqrestore(&uap->port.lock, flags);
-
- if (ZS_IS_CONS(uap))
- uap->port.cons->flags |= CON_ENABLED;
-
- /* Re-enable IRQ on the controller */
- if (ZS_IS_OPEN(uap) && !ZS_IS_IRQ_ON(pmz_get_port_A(uap))) {
- pmz_get_port_A(uap)->flags |= PMACZILOG_FLAG_IS_IRQ_ON;
- enable_irq(uap->port.irq);
- }
-
- bail:
- mutex_unlock(&state->port.mutex);
- mutex_unlock(&pmz_irq_mutex);
-
- /* Right now, we deal with delay by blocking here, I'll be
- * smarter later on
- */
- if (pwr_delay != 0) {
- pmz_debug("pmz: delaying %d ms\n", pwr_delay);
- msleep(pwr_delay);
- }
-
- pmz_debug("resume, switching complete\n");
-
- mdev->ofdev.dev.power.power_state.event = PM_EVENT_ON;
+ uart_resume_port(&pmz_uart_reg, &uap->port);
return 0;
}
/*
* Probe all ports in the system and build the ports array, we register
- * with the serial layer at this point, the macio-type probing is only
- * used later to "attach" to the sysfs tree so we get power management
- * events
+ * with the serial layer later, so we get a proper struct device which
+ * allows the tty to attach properly. This is later than it used to be
+ * but the tty layer really wants it that way.
*/
static int __init pmz_probe(void)
{
@@ -1816,8 +1667,10 @@ static int __init pmz_probe(void)
/*
* Fill basic fields in the port structures
*/
- pmz_ports[count].mate = &pmz_ports[count+1];
- pmz_ports[count+1].mate = &pmz_ports[count];
+ if (node_b != NULL) {
+ pmz_ports[count].mate = &pmz_ports[count+1];
+ pmz_ports[count+1].mate = &pmz_ports[count];
+ }
pmz_ports[count].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
pmz_ports[count].node = node_a;
pmz_ports[count+1].node = node_b;
@@ -1855,8 +1708,8 @@ static int __init pmz_init_port(struct uart_pmac_port *uap)
struct resource *r_ports;
int irq;
- r_ports = platform_get_resource(uap->node, IORESOURCE_MEM, 0);
- irq = platform_get_irq(uap->node, 0);
+ r_ports = platform_get_resource(uap->pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(uap->pdev, 0);
if (!r_ports || !irq)
return -ENODEV;
@@ -1885,19 +1738,19 @@ static int __init pmz_probe(void)
pmz_ports_count = 0;
- pmz_ports[0].mate = &pmz_ports[1];
pmz_ports[0].port.line = 0;
pmz_ports[0].flags = PMACZILOG_FLAG_IS_CHANNEL_A;
- pmz_ports[0].node = &scc_a_pdev;
+ pmz_ports[0].pdev = &scc_a_pdev;
err = pmz_init_port(&pmz_ports[0]);
if (err)
return err;
pmz_ports_count++;
+ pmz_ports[0].mate = &pmz_ports[1];
pmz_ports[1].mate = &pmz_ports[0];
pmz_ports[1].port.line = 1;
pmz_ports[1].flags = 0;
- pmz_ports[1].node = &scc_b_pdev;
+ pmz_ports[1].pdev = &scc_b_pdev;
err = pmz_init_port(&pmz_ports[1]);
if (err)
return err;
@@ -1913,16 +1766,35 @@ static void pmz_dispose_port(struct uart_pmac_port *uap)
static int __init pmz_attach(struct platform_device *pdev)
{
+ struct uart_pmac_port *uap;
int i;
+ /* Iterate the pmz_ports array to find a matching entry */
for (i = 0; i < pmz_ports_count; i++)
- if (pmz_ports[i].node == pdev)
- return 0;
- return -ENODEV;
+ if (pmz_ports[i].pdev == pdev)
+ break;
+ if (i >= pmz_ports_count)
+ return -ENODEV;
+
+ uap = &pmz_ports[i];
+ uap->port.dev = &pdev->dev;
+ platform_set_drvdata(pdev, uap);
+
+ return uart_add_one_port(&pmz_uart_reg, &uap->port);
}
static int __exit pmz_detach(struct platform_device *pdev)
{
+ struct uart_pmac_port *uap = platform_get_drvdata(pdev);
+
+ if (!uap)
+ return -ENODEV;
+
+ uart_remove_one_port(&pmz_uart_reg, &uap->port);
+
+ platform_set_drvdata(pdev, NULL);
+ uap->port.dev = NULL;
+
return 0;
}
@@ -1954,38 +1826,13 @@ static struct console pmz_console = {
*/
static int __init pmz_register(void)
{
- int i, rc;
-
pmz_uart_reg.nr = pmz_ports_count;
pmz_uart_reg.cons = PMACZILOG_CONSOLE;
/*
* Register this driver with the serial core
*/
- rc = uart_register_driver(&pmz_uart_reg);
- if (rc)
- return rc;
-
- /*
- * Register each port with the serial core
- */
- for (i = 0; i < pmz_ports_count; i++) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- /* NULL node may happen on wallstreet */
- if (uport->node != NULL)
- rc = uart_add_one_port(&pmz_uart_reg, &uport->port);
- if (rc)
- goto err_out;
- }
-
- return 0;
-err_out:
- while (i-- > 0) {
- struct uart_pmac_port *uport = &pmz_ports[i];
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
- }
- uart_unregister_driver(&pmz_uart_reg);
- return rc;
+ return uart_register_driver(&pmz_uart_reg);
}
#ifdef CONFIG_PPC_PMAC
@@ -2084,10 +1931,13 @@ static void __exit exit_pmz(void)
for (i = 0; i < pmz_ports_count; i++) {
struct uart_pmac_port *uport = &pmz_ports[i];
- if (uport->node != NULL) {
- uart_remove_one_port(&pmz_uart_reg, &uport->port);
+#ifdef CONFIG_PPC_PMAC
+ if (uport->node != NULL)
pmz_dispose_port(uport);
- }
+#else
+ if (uport->pdev != NULL)
+ pmz_dispose_port(uport);
+#endif
}
/* Unregister UART driver */
uart_unregister_driver(&pmz_uart_reg);
@@ -2114,8 +1964,6 @@ static void pmz_console_write(struct console *con, const char *s, unsigned int c
struct uart_pmac_port *uap = &pmz_ports[con->index];
unsigned long flags;
- if (ZS_IS_ASLEEP(uap))
- return;
spin_lock_irqsave(&uap->port.lock, flags);
/* Turn of interrupts and enable the transmitter. */
@@ -2160,8 +2008,13 @@ static int __init pmz_console_setup(struct console *co, char *options)
if (co->index >= pmz_ports_count)
co->index = 0;
uap = &pmz_ports[co->index];
+#ifdef CONFIG_PPC_PMAC
if (uap->node == NULL)
return -ENODEV;
+#else
+ if (uap->pdev == NULL)
+ return -ENODEV;
+#endif
port = &uap->port;
/*
diff --git a/drivers/tty/serial/pmac_zilog.h b/drivers/tty/serial/pmac_zilog.h
index cbc34fb..3483242 100644
--- a/drivers/tty/serial/pmac_zilog.h
+++ b/drivers/tty/serial/pmac_zilog.h
@@ -1,16 +1,6 @@
#ifndef __PMAC_ZILOG_H__
#define __PMAC_ZILOG_H__
-#ifdef CONFIG_PPC_PMAC
-#define pmz_debug(fmt, arg...) dev_dbg(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_error(fmt, arg...) dev_err(&uap->dev->ofdev.dev, fmt, ## arg)
-#define pmz_info(fmt, arg...) dev_info(&uap->dev->ofdev.dev, fmt, ## arg)
-#else
-#define pmz_debug(fmt, arg...) dev_dbg(&uap->node->dev, fmt, ## arg)
-#define pmz_error(fmt, arg...) dev_err(&uap->node->dev, fmt, ## arg)
-#define pmz_info(fmt, arg...) dev_info(&uap->node->dev, fmt, ## arg)
-#endif
-
/*
* At most 2 ESCCs with 2 ports each
*/
@@ -35,7 +25,7 @@ struct uart_pmac_port {
*/
struct device_node *node;
#else
- struct platform_device *node;
+ struct platform_device *pdev;
#endif
/* Port type as obtained from device tree (IRDA, modem, ...) */
@@ -50,14 +40,11 @@ struct uart_pmac_port {
#define PMACZILOG_FLAG_REGS_HELD 0x00000010
#define PMACZILOG_FLAG_TX_STOPPED 0x00000020
#define PMACZILOG_FLAG_TX_ACTIVE 0x00000040
-#define PMACZILOG_FLAG_ENABLED 0x00000080
#define PMACZILOG_FLAG_IS_IRDA 0x00000100
#define PMACZILOG_FLAG_IS_INTMODEM 0x00000200
#define PMACZILOG_FLAG_HAS_DMA 0x00000400
#define PMACZILOG_FLAG_RSRC_REQUESTED 0x00000800
-#define PMACZILOG_FLAG_IS_ASLEEP 0x00001000
#define PMACZILOG_FLAG_IS_OPEN 0x00002000
-#define PMACZILOG_FLAG_IS_IRQ_ON 0x00004000
#define PMACZILOG_FLAG_IS_EXTCLK 0x00008000
#define PMACZILOG_FLAG_BREAK 0x00010000
@@ -74,6 +61,8 @@ struct uart_pmac_port {
volatile struct dbdma_regs __iomem *rx_dma_regs;
#endif
+ unsigned char irq_name[8];
+
struct ktermios termios_cache;
};
@@ -388,9 +377,7 @@ static inline void zssync(struct uart_pmac_port *port)
#define ZS_IS_IRDA(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRDA)
#define ZS_IS_INTMODEM(UP) ((UP)->flags & PMACZILOG_FLAG_IS_INTMODEM)
#define ZS_HAS_DMA(UP) ((UP)->flags & PMACZILOG_FLAG_HAS_DMA)
-#define ZS_IS_ASLEEP(UP) ((UP)->flags & PMACZILOG_FLAG_IS_ASLEEP)
#define ZS_IS_OPEN(UP) ((UP)->flags & PMACZILOG_FLAG_IS_OPEN)
-#define ZS_IS_IRQ_ON(UP) ((UP)->flags & PMACZILOG_FLAG_IS_IRQ_ON)
#define ZS_IS_EXTCLK(UP) ((UP)->flags & PMACZILOG_FLAG_IS_EXTCLK)
#endif /* __PMAC_ZILOG_H__ */
diff --git a/drivers/tty/serial/s3c2410.c b/drivers/tty/serial/s3c2410.c
deleted file mode 100644
index b1d7e7c..0000000
--- a/drivers/tty/serial/s3c2410.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Driver for Samsung S3C2410 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c2410_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2410_UCON_UCLK;
- else
- ucon &= ~S3C2410_UCON_UCLK;
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-static int s3c2410_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
- clk->name = (ucon & S3C2410_UCON_UCLK) ? "uclk" : "pclk";
-
- return 0;
-}
-
-static int s3c2410_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- dbg("s3c2410_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- wr_regl(port, S3C2410_UCON, cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2410_uart_inf = {
- .name = "Samsung S3C2410 UART",
- .type = PORT_S3C2410,
- .fifosize = 16,
- .rx_fifomask = S3C2410_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2410_UFSTAT_RXFULL,
- .tx_fifofull = S3C2410_UFSTAT_TXFULL,
- .tx_fifomask = S3C2410_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2410_serial_getsource,
- .set_clksrc = s3c2410_serial_setsource,
- .reset_port = s3c2410_serial_resetport,
-};
-
-static int s3c2410_serial_probe(struct platform_device *dev)
-{
- return s3c24xx_serial_probe(dev, &s3c2410_uart_inf);
-}
-
-static struct platform_driver s3c2410_serial_driver = {
- .probe = s3c2410_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2410-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2410_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2410_serial_driver, &s3c2410_uart_inf);
-}
-
-static void __exit s3c2410_serial_exit(void)
-{
- platform_driver_unregister(&s3c2410_serial_driver);
-}
-
-module_init(s3c2410_serial_init);
-module_exit(s3c2410_serial_exit);
-
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_DESCRIPTION("Samsung S3C2410 SoC Serial port driver");
-MODULE_ALIAS("platform:s3c2410-uart");
diff --git a/drivers/tty/serial/s3c2412.c b/drivers/tty/serial/s3c2412.c
deleted file mode 100644
index 2234bf9..0000000
--- a/drivers/tty/serial/s3c2412.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Driver for Samsung S3C2412 and S3C2413 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-static int s3c2412_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- ucon &= ~S3C2412_UCON_CLKMASK;
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2440_UCON_UCLK;
- else if (strcmp(clk->name, "pclk") == 0)
- ucon |= S3C2440_UCON_PCLK;
- else if (strcmp(clk->name, "usysclk") == 0)
- ucon |= S3C2412_UCON_USYSCLK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c2412_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- switch (ucon & S3C2412_UCON_CLKMASK) {
- case S3C2412_UCON_UCLK:
- clk->divisor = 1;
- clk->name = "uclk";
- break;
-
- case S3C2412_UCON_PCLK:
- case S3C2412_UCON_PCLK2:
- clk->divisor = 1;
- clk->name = "pclk";
- break;
-
- case S3C2412_UCON_USYSCLK:
- clk->divisor = 1;
- clk->name = "usysclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c2412_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("%s: port=%p (%08lx), cfg=%p\n",
- __func__, port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= S3C2412_UCON_CLKMASK;
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2412_uart_inf = {
- .name = "Samsung S3C2412 UART",
- .type = PORT_S3C2412,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2412_serial_getsource,
- .set_clksrc = s3c2412_serial_setsource,
- .reset_port = s3c2412_serial_resetport,
-};
-
-/* device management */
-
-static int s3c2412_serial_probe(struct platform_device *dev)
-{
- dbg("s3c2440_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c2412_uart_inf);
-}
-
-static struct platform_driver s3c2412_serial_driver = {
- .probe = s3c2412_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2412-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static inline int s3c2412_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2412_serial_driver, &s3c2412_uart_inf);
-}
-
-static inline void s3c2412_serial_exit(void)
-{
- platform_driver_unregister(&s3c2412_serial_driver);
-}
-
-module_init(s3c2412_serial_init);
-module_exit(s3c2412_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C2412,S3C2413 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c2412-uart");
diff --git a/drivers/tty/serial/s3c2440.c b/drivers/tty/serial/s3c2440.c
deleted file mode 100644
index 1d0c324..0000000
--- a/drivers/tty/serial/s3c2440.c
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Driver for Samsung S3C2440 and S3C2442 SoC onboard UARTs.
- *
- * Ben Dooks, Copyright (c) 2003-2008 Simtec Electronics
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-#include <mach/regs-gpio.h>
-
-#include "samsung.h"
-
-
-static int s3c2440_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- /* todo - proper fclk<>nonfclk switch. */
-
- ucon &= ~S3C2440_UCON_CLKMASK;
-
- if (strcmp(clk->name, "uclk") == 0)
- ucon |= S3C2440_UCON_UCLK;
- else if (strcmp(clk->name, "pclk") == 0)
- ucon |= S3C2440_UCON_PCLK;
- else if (strcmp(clk->name, "fclk") == 0)
- ucon |= S3C2440_UCON_FCLK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c2440_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
- unsigned long ucon0, ucon1, ucon2;
-
- switch (ucon & S3C2440_UCON_CLKMASK) {
- case S3C2440_UCON_UCLK:
- clk->divisor = 1;
- clk->name = "uclk";
- break;
-
- case S3C2440_UCON_PCLK:
- case S3C2440_UCON_PCLK2:
- clk->divisor = 1;
- clk->name = "pclk";
- break;
-
- case S3C2440_UCON_FCLK:
- /* the fun of calculating the uart divisors on
- * the s3c2440 */
-
- ucon0 = __raw_readl(S3C24XX_VA_UART0 + S3C2410_UCON);
- ucon1 = __raw_readl(S3C24XX_VA_UART1 + S3C2410_UCON);
- ucon2 = __raw_readl(S3C24XX_VA_UART2 + S3C2410_UCON);
-
- printk("ucons: %08lx, %08lx, %08lx\n", ucon0, ucon1, ucon2);
-
- ucon0 &= S3C2440_UCON0_DIVMASK;
- ucon1 &= S3C2440_UCON1_DIVMASK;
- ucon2 &= S3C2440_UCON2_DIVMASK;
-
- if (ucon0 != 0) {
- clk->divisor = ucon0 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 6;
- } else if (ucon1 != 0) {
- clk->divisor = ucon1 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 21;
- } else if (ucon2 != 0) {
- clk->divisor = ucon2 >> S3C2440_UCON_DIVSHIFT;
- clk->divisor += 36;
- } else {
- /* manual calims 44, seems to be 9 */
- clk->divisor = 9;
- }
-
- clk->name = "fclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c2440_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("s3c2440_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= (S3C2440_UCON0_DIVMASK | (3<<10));
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c2440_uart_inf = {
- .name = "Samsung S3C2440 UART",
- .type = PORT_S3C2440,
- .fifosize = 64,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c2440_serial_getsource,
- .set_clksrc = s3c2440_serial_setsource,
- .reset_port = s3c2440_serial_resetport,
-};
-
-/* device management */
-
-static int s3c2440_serial_probe(struct platform_device *dev)
-{
- dbg("s3c2440_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c2440_uart_inf);
-}
-
-static struct platform_driver s3c2440_serial_driver = {
- .probe = s3c2440_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c2440-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c2440_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c2440_serial_driver, &s3c2440_uart_inf);
-}
-
-static void __exit s3c2440_serial_exit(void)
-{
- platform_driver_unregister(&s3c2440_serial_driver);
-}
-
-module_init(s3c2440_serial_init);
-module_exit(s3c2440_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C2440,S3C2442 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c2440-uart");
diff --git a/drivers/tty/serial/s3c6400.c b/drivers/tty/serial/s3c6400.c
deleted file mode 100644
index e2f6913..0000000
--- a/drivers/tty/serial/s3c6400.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Driver for Samsung S3C6400 and S3C6410 SoC onboard UARTs.
- *
- * Copyright 2008 Openmoko, Inc.
- * Copyright 2008 Simtec Electronics
- * Ben Dooks <ben@simtec.co.uk>
- * http://armlinux.simtec.co.uk/
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-
-#include <plat/regs-serial.h>
-
-#include "samsung.h"
-
-static int s3c6400_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (strcmp(clk->name, "uclk0") == 0) {
- ucon &= ~S3C6400_UCON_CLKMASK;
- ucon |= S3C6400_UCON_UCLK0;
- } else if (strcmp(clk->name, "uclk1") == 0)
- ucon |= S3C6400_UCON_UCLK1;
- else if (strcmp(clk->name, "pclk") == 0) {
- /* See notes about transitioning from UCLK to PCLK */
- ucon &= ~S3C6400_UCON_UCLK0;
- } else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s3c6400_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- u32 ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
-
- switch (ucon & S3C6400_UCON_CLKMASK) {
- case S3C6400_UCON_UCLK0:
- clk->name = "uclk0";
- break;
-
- case S3C6400_UCON_UCLK1:
- clk->name = "uclk1";
- break;
-
- case S3C6400_UCON_PCLK:
- case S3C6400_UCON_PCLK2:
- clk->name = "pclk";
- break;
- }
-
- return 0;
-}
-
-static int s3c6400_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- dbg("s3c6400_serial_resetport: port=%p (%08lx), cfg=%p\n",
- port, port->mapbase, cfg);
-
- /* ensure we don't change the clock settings... */
-
- ucon &= S3C6400_UCON_CLKMASK;
-
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
-
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- return 0;
-}
-
-static struct s3c24xx_uart_info s3c6400_uart_inf = {
- .name = "Samsung S3C6400 UART",
- .type = PORT_S3C6400,
- .fifosize = 64,
- .has_divslot = 1,
- .rx_fifomask = S3C2440_UFSTAT_RXMASK,
- .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
- .rx_fifofull = S3C2440_UFSTAT_RXFULL,
- .tx_fifofull = S3C2440_UFSTAT_TXFULL,
- .tx_fifomask = S3C2440_UFSTAT_TXMASK,
- .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
- .get_clksrc = s3c6400_serial_getsource,
- .set_clksrc = s3c6400_serial_setsource,
- .reset_port = s3c6400_serial_resetport,
-};
-
-/* device management */
-
-static int s3c6400_serial_probe(struct platform_device *dev)
-{
- dbg("s3c6400_serial_probe: dev=%p\n", dev);
- return s3c24xx_serial_probe(dev, &s3c6400_uart_inf);
-}
-
-static struct platform_driver s3c6400_serial_driver = {
- .probe = s3c6400_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s3c6400-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s3c6400_serial_init(void)
-{
- return s3c24xx_serial_init(&s3c6400_serial_driver, &s3c6400_uart_inf);
-}
-
-static void __exit s3c6400_serial_exit(void)
-{
- platform_driver_unregister(&s3c6400_serial_driver);
-}
-
-module_init(s3c6400_serial_init);
-module_exit(s3c6400_serial_exit);
-
-MODULE_DESCRIPTION("Samsung S3C6400,S3C6410 SoC Serial port driver");
-MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:s3c6400-uart");
diff --git a/drivers/tty/serial/s5pv210.c b/drivers/tty/serial/s5pv210.c
deleted file mode 100644
index 8b0b888..0000000
--- a/drivers/tty/serial/s5pv210.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Copyright (c) 2010 Samsung Electronics Co., Ltd.
- * http://www.samsung.com/
- *
- * Based on drivers/serial/s3c6400.c
- *
- * Driver for Samsung S5PV210 SoC UARTs.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
-
-#include <linux/module.h>
-#include <linux/ioport.h>
-#include <linux/io.h>
-#include <linux/platform_device.h>
-#include <linux/init.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/delay.h>
-
-#include <asm/irq.h>
-#include <mach/hardware.h>
-#include <plat/regs-serial.h>
-#include "samsung.h"
-
-static int s5pv210_serial_setsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- struct s3c2410_uartcfg *cfg = port->dev->platform_data;
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- if (cfg->flags & NO_NEED_CHECK_CLKSRC)
- return 0;
-
- if (strcmp(clk->name, "pclk") == 0)
- ucon &= ~S5PV210_UCON_CLKMASK;
- else if (strcmp(clk->name, "uclk1") == 0)
- ucon |= S5PV210_UCON_CLKMASK;
- else {
- printk(KERN_ERR "unknown clock source %s\n", clk->name);
- return -EINVAL;
- }
-
- wr_regl(port, S3C2410_UCON, ucon);
- return 0;
-}
-
-
-static int s5pv210_serial_getsource(struct uart_port *port,
- struct s3c24xx_uart_clksrc *clk)
-{
- struct s3c2410_uartcfg *cfg = port->dev->platform_data;
- u32 ucon = rd_regl(port, S3C2410_UCON);
-
- clk->divisor = 1;
-
- if (cfg->flags & NO_NEED_CHECK_CLKSRC)
- return 0;
-
- switch (ucon & S5PV210_UCON_CLKMASK) {
- case S5PV210_UCON_PCLK:
- clk->name = "pclk";
- break;
- case S5PV210_UCON_UCLK:
- clk->name = "uclk1";
- break;
- }
-
- return 0;
-}
-
-static int s5pv210_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
-{
- unsigned long ucon = rd_regl(port, S3C2410_UCON);
-
- ucon &= S5PV210_UCON_CLKMASK;
- wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
- wr_regl(port, S3C2410_ULCON, cfg->ulcon);
-
- /* reset both fifos */
- wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
- wr_regl(port, S3C2410_UFCON, cfg->ufcon);
-
- /* It is need to delay When reset FIFO register */
- udelay(1);
-
- return 0;
-}
-
-#define S5PV210_UART_DEFAULT_INFO(fifo_size) \
- .name = "Samsung S5PV210 UART0", \
- .type = PORT_S3C6400, \
- .fifosize = fifo_size, \
- .has_divslot = 1, \
- .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
- .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
- .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
- .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
- .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
- .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
- .get_clksrc = s5pv210_serial_getsource, \
- .set_clksrc = s5pv210_serial_setsource, \
- .reset_port = s5pv210_serial_resetport
-
-static struct s3c24xx_uart_info s5p_port_fifo256 = {
- S5PV210_UART_DEFAULT_INFO(256),
-};
-
-static struct s3c24xx_uart_info s5p_port_fifo64 = {
- S5PV210_UART_DEFAULT_INFO(64),
-};
-
-static struct s3c24xx_uart_info s5p_port_fifo16 = {
- S5PV210_UART_DEFAULT_INFO(16),
-};
-
-static struct s3c24xx_uart_info *s5p_uart_inf[] = {
- [0] = &s5p_port_fifo256,
- [1] = &s5p_port_fifo64,
- [2] = &s5p_port_fifo16,
- [3] = &s5p_port_fifo16,
-};
-
-/* device management */
-static int s5p_serial_probe(struct platform_device *pdev)
-{
- return s3c24xx_serial_probe(pdev, s5p_uart_inf[pdev->id]);
-}
-
-static struct platform_driver s5p_serial_driver = {
- .probe = s5p_serial_probe,
- .remove = __devexit_p(s3c24xx_serial_remove),
- .driver = {
- .name = "s5pv210-uart",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init s5p_serial_init(void)
-{
- return s3c24xx_serial_init(&s5p_serial_driver, *s5p_uart_inf);
-}
-
-static void __exit s5p_serial_exit(void)
-{
- platform_driver_unregister(&s5p_serial_driver);
-}
-
-module_init(s5p_serial_init);
-module_exit(s5p_serial_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:s5pv210-uart");
-MODULE_DESCRIPTION("Samsung S5PV210 UART Driver support");
-MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index b31f1c3..c55e5fb 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -42,6 +42,7 @@
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
#include <asm/irq.h>
@@ -49,6 +50,7 @@
#include <mach/map.h>
#include <plat/regs-serial.h>
+#include <plat/clock.h>
#include "samsung.h"
@@ -190,10 +192,13 @@ static inline struct s3c24xx_uart_info *s3c24xx_port_to_info(struct uart_port *p
static inline struct s3c2410_uartcfg *s3c24xx_port_to_cfg(struct uart_port *port)
{
+ struct s3c24xx_uart_port *ourport;
+
if (port->dev == NULL)
return NULL;
- return (struct s3c2410_uartcfg *)port->dev->platform_data;
+ ourport = container_of(port, struct s3c24xx_uart_port, port);
+ return ourport->cfg;
}
static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
@@ -202,7 +207,7 @@ static int s3c24xx_serial_rx_fifocnt(struct s3c24xx_uart_port *ourport,
struct s3c24xx_uart_info *info = ourport->info;
if (ufstat & info->rx_fifofull)
- return info->fifosize;
+ return ourport->port.fifosize;
return (ufstat & info->rx_fifomask) >> info->rx_fifoshift;
}
@@ -555,154 +560,98 @@ static void s3c24xx_serial_pm(struct uart_port *port, unsigned int level,
*
*/
+#define MAX_CLK_NAME_LENGTH 15
-#define MAX_CLKS (8)
-
-static struct s3c24xx_uart_clksrc tmp_clksrc = {
- .name = "pclk",
- .min_baud = 0,
- .max_baud = 0,
- .divisor = 1,
-};
-
-static inline int
-s3c24xx_serial_getsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
+static inline int s3c24xx_serial_getsource(struct uart_port *port)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned int ucon;
- return (info->get_clksrc)(port, c);
-}
-
-static inline int
-s3c24xx_serial_setsource(struct uart_port *port, struct s3c24xx_uart_clksrc *c)
-{
- struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ if (info->num_clks == 1)
+ return 0;
- return (info->set_clksrc)(port, c);
+ ucon = rd_regl(port, S3C2410_UCON);
+ ucon &= info->clksel_mask;
+ return ucon >> info->clksel_shift;
}
-struct baud_calc {
- struct s3c24xx_uart_clksrc *clksrc;
- unsigned int calc;
- unsigned int divslot;
- unsigned int quot;
- struct clk *src;
-};
-
-static int s3c24xx_serial_calcbaud(struct baud_calc *calc,
- struct uart_port *port,
- struct s3c24xx_uart_clksrc *clksrc,
- unsigned int baud)
+static void s3c24xx_serial_setsource(struct uart_port *port,
+ unsigned int clk_sel)
{
- struct s3c24xx_uart_port *ourport = to_ourport(port);
- unsigned long rate;
-
- calc->src = clk_get(port->dev, clksrc->name);
- if (calc->src == NULL || IS_ERR(calc->src))
- return 0;
-
- rate = clk_get_rate(calc->src);
- rate /= clksrc->divisor;
+ struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned int ucon;
- calc->clksrc = clksrc;
+ if (info->num_clks == 1)
+ return;
- if (ourport->info->has_divslot) {
- unsigned long div = rate / baud;
-
- /* The UDIVSLOT register on the newer UARTs allows us to
- * get a divisor adjustment of 1/16th on the baud clock.
- *
- * We don't keep the UDIVSLOT value (the 16ths we calculated
- * by not multiplying the baud by 16) as it is easy enough
- * to recalculate.
- */
-
- calc->quot = div / 16;
- calc->calc = rate / div;
- } else {
- calc->quot = (rate + (8 * baud)) / (16 * baud);
- calc->calc = (rate / (calc->quot * 16));
- }
+ ucon = rd_regl(port, S3C2410_UCON);
+ if ((ucon & info->clksel_mask) >> info->clksel_shift == clk_sel)
+ return;
- calc->quot--;
- return 1;
+ ucon &= ~info->clksel_mask;
+ ucon |= clk_sel << info->clksel_shift;
+ wr_regl(port, S3C2410_UCON, ucon);
}
-static unsigned int s3c24xx_serial_getclk(struct uart_port *port,
- struct s3c24xx_uart_clksrc **clksrc,
- struct clk **clk,
- unsigned int baud)
+static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
+ unsigned int req_baud, struct clk **best_clk,
+ unsigned int *clk_num)
{
- struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
- struct s3c24xx_uart_clksrc *clkp;
- struct baud_calc res[MAX_CLKS];
- struct baud_calc *resptr, *best, *sptr;
- int i;
-
- clkp = cfg->clocks;
- best = NULL;
-
- if (cfg->clocks_size < 2) {
- if (cfg->clocks_size == 0)
- clkp = &tmp_clksrc;
-
- /* check to see if we're sourcing fclk, and if so we're
- * going to have to update the clock source
- */
-
- if (strcmp(clkp->name, "fclk") == 0) {
- struct s3c24xx_uart_clksrc src;
-
- s3c24xx_serial_getsource(port, &src);
-
- /* check that the port already using fclk, and if
- * not, then re-select fclk
+ struct s3c24xx_uart_info *info = ourport->info;
+ struct clk *clk;
+ unsigned long rate;
+ unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
+ char clkname[MAX_CLK_NAME_LENGTH];
+ int calc_deviation, deviation = (1 << 30) - 1;
+
+ *best_clk = NULL;
+ clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
+ ourport->info->def_clk_sel;
+ for (cnt = 0; cnt < info->num_clks; cnt++) {
+ if (!(clk_sel & (1 << cnt)))
+ continue;
+
+ sprintf(clkname, "clk_uart_baud%d", cnt);
+ clk = clk_get(ourport->port.dev, clkname);
+ if (IS_ERR_OR_NULL(clk))
+ continue;
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ continue;
+
+ if (ourport->info->has_divslot) {
+ unsigned long div = rate / req_baud;
+
+ /* The UDIVSLOT register on the newer UARTs allows us to
+ * get a divisor adjustment of 1/16th on the baud clock.
+ *
+ * We don't keep the UDIVSLOT value (the 16ths we
+ * calculated by not multiplying the baud by 16) as it
+ * is easy enough to recalculate.
*/
- if (strcmp(src.name, clkp->name) == 0) {
- s3c24xx_serial_setsource(port, clkp);
- s3c24xx_serial_getsource(port, &src);
- }
-
- clkp->divisor = src.divisor;
- }
-
- s3c24xx_serial_calcbaud(res, port, clkp, baud);
- best = res;
- resptr = best + 1;
- } else {
- resptr = res;
-
- for (i = 0; i < cfg->clocks_size; i++, clkp++) {
- if (s3c24xx_serial_calcbaud(resptr, port, clkp, baud))
- resptr++;
+ quot = div / 16;
+ baud = rate / div;
+ } else {
+ quot = (rate + (8 * req_baud)) / (16 * req_baud);
+ baud = rate / (quot * 16);
}
- }
-
- /* ok, we now need to select the best clock we found */
-
- if (!best) {
- unsigned int deviation = (1<<30)|((1<<30)-1);
- int calc_deviation;
+ quot--;
- for (sptr = res; sptr < resptr; sptr++) {
- calc_deviation = baud - sptr->calc;
- if (calc_deviation < 0)
- calc_deviation = -calc_deviation;
+ calc_deviation = req_baud - baud;
+ if (calc_deviation < 0)
+ calc_deviation = -calc_deviation;
- if (calc_deviation < deviation) {
- best = sptr;
- deviation = calc_deviation;
- }
+ if (calc_deviation < deviation) {
+ *best_clk = clk;
+ best_quot = quot;
+ *clk_num = cnt;
+ deviation = calc_deviation;
}
}
- /* store results to pass back */
-
- *clksrc = best->clksrc;
- *clk = best->src;
-
- return best->quot;
+ return best_quot;
}
/* udivslot_table[]
@@ -735,10 +684,9 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
{
struct s3c2410_uartcfg *cfg = s3c24xx_port_to_cfg(port);
struct s3c24xx_uart_port *ourport = to_ourport(port);
- struct s3c24xx_uart_clksrc *clksrc = NULL;
struct clk *clk = NULL;
unsigned long flags;
- unsigned int baud, quot;
+ unsigned int baud, quot, clk_sel = 0;
unsigned int ulcon;
unsigned int umcon;
unsigned int udivslot = 0;
@@ -754,17 +702,16 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
*/
baud = uart_get_baud_rate(port, termios, old, 0, 115200*8);
-
+ quot = s3c24xx_serial_getclk(ourport, baud, &clk, &clk_sel);
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
- else
- quot = s3c24xx_serial_getclk(port, &clksrc, &clk, baud);
+ if (!clk)
+ return;
/* check to see if we need to change clock source */
- if (ourport->clksrc != clksrc || ourport->baudclk != clk) {
- dbg("selecting clock %p\n", clk);
- s3c24xx_serial_setsource(port, clksrc);
+ if (ourport->baudclk != clk) {
+ s3c24xx_serial_setsource(port, clk_sel);
if (ourport->baudclk != NULL && !IS_ERR(ourport->baudclk)) {
clk_disable(ourport->baudclk);
@@ -773,7 +720,6 @@ static void s3c24xx_serial_set_termios(struct uart_port *port,
clk_enable(clk);
- ourport->clksrc = clksrc;
ourport->baudclk = clk;
ourport->baudclk_rate = clk ? clk_get_rate(clk) : 0;
}
@@ -1020,16 +966,29 @@ static struct s3c24xx_uart_port s3c24xx_serial_ports[CONFIG_SERIAL_SAMSUNG_UARTS
/* s3c24xx_serial_resetport
*
- * wrapper to call the specific reset for this port (reset the fifos
- * and the settings)
+ * reset the fifos and other the settings.
*/
-static inline int s3c24xx_serial_resetport(struct uart_port *port,
- struct s3c2410_uartcfg *cfg)
+static void s3c24xx_serial_resetport(struct uart_port *port,
+ struct s3c2410_uartcfg *cfg)
{
struct s3c24xx_uart_info *info = s3c24xx_port_to_info(port);
+ unsigned long ucon = rd_regl(port, S3C2410_UCON);
+ unsigned int ucon_mask;
+
+ ucon_mask = info->clksel_mask;
+ if (info->type == PORT_S3C2440)
+ ucon_mask |= S3C2440_UCON0_DIVMASK;
- return (info->reset_port)(port, cfg);
+ ucon &= ucon_mask;
+ wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
+
+ /* reset both fifos */
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
+ wr_regl(port, S3C2410_UFCON, cfg->ufcon);
+
+ /* some delay is required after fifo reset */
+ udelay(1);
}
@@ -1121,11 +1080,10 @@ static inline void s3c24xx_serial_cpufreq_deregister(struct s3c24xx_uart_port *p
*/
static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
- struct s3c24xx_uart_info *info,
struct platform_device *platdev)
{
struct uart_port *port = &ourport->port;
- struct s3c2410_uartcfg *cfg;
+ struct s3c2410_uartcfg *cfg = ourport->cfg;
struct resource *res;
int ret;
@@ -1134,30 +1092,16 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
if (platdev == NULL)
return -ENODEV;
- cfg = s3c24xx_dev_to_cfg(&platdev->dev);
-
if (port->mapbase != 0)
return 0;
- if (cfg->hwport > CONFIG_SERIAL_SAMSUNG_UARTS) {
- printk(KERN_ERR "%s: port %d bigger than %d\n", __func__,
- cfg->hwport, CONFIG_SERIAL_SAMSUNG_UARTS);
- return -ERANGE;
- }
-
/* setup info for port */
port->dev = &platdev->dev;
- ourport->info = info;
/* Startup sequence is different for s3c64xx and higher SoC's */
if (s3c24xx_serial_has_interrupt_mask(port))
s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
- /* copy the info in from provided structure */
- ourport->port.fifosize = info->fifosize;
-
- dbg("s3c24xx_serial_init_port: %p (hw %d)...\n", port, cfg->hwport);
-
port->uartclk = 1;
if (cfg->uart_flags & UPF_CONS_FLOW) {
@@ -1215,43 +1159,74 @@ static ssize_t s3c24xx_serial_show_clksrc(struct device *dev,
struct uart_port *port = s3c24xx_dev_to_port(dev);
struct s3c24xx_uart_port *ourport = to_ourport(port);
- return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->clksrc->name);
+ return snprintf(buf, PAGE_SIZE, "* %s\n", ourport->baudclk->name);
}
static DEVICE_ATTR(clock_source, S_IRUGO, s3c24xx_serial_show_clksrc, NULL);
+
/* Device driver serial port probe */
+static const struct of_device_id s3c24xx_uart_dt_match[];
static int probe_index;
-int s3c24xx_serial_probe(struct platform_device *dev,
- struct s3c24xx_uart_info *info)
+static inline struct s3c24xx_serial_drv_data *s3c24xx_get_driver_data(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c24xx_uart_dt_match, pdev->dev.of_node);
+ return (struct s3c24xx_serial_drv_data *)match->data;
+ }
+#endif
+ return (struct s3c24xx_serial_drv_data *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c24xx_serial_probe(struct platform_device *pdev)
{
struct s3c24xx_uart_port *ourport;
int ret;
- dbg("s3c24xx_serial_probe(%p, %p) %d\n", dev, info, probe_index);
+ dbg("s3c24xx_serial_probe(%p) %d\n", pdev, probe_index);
ourport = &s3c24xx_serial_ports[probe_index];
+
+ ourport->drv_data = s3c24xx_get_driver_data(pdev);
+ if (!ourport->drv_data) {
+ dev_err(&pdev->dev, "could not find driver data\n");
+ return -ENODEV;
+ }
+
+ ourport->info = ourport->drv_data->info;
+ ourport->cfg = (pdev->dev.platform_data) ?
+ (struct s3c2410_uartcfg *)pdev->dev.platform_data :
+ ourport->drv_data->def_cfg;
+
+ ourport->port.fifosize = (ourport->info->fifosize) ?
+ ourport->info->fifosize :
+ ourport->drv_data->fifosize[probe_index];
+
probe_index++;
dbg("%s: initialising port %p...\n", __func__, ourport);
- ret = s3c24xx_serial_init_port(ourport, info, dev);
+ ret = s3c24xx_serial_init_port(ourport, pdev);
if (ret < 0)
goto probe_err;
dbg("%s: adding port\n", __func__);
uart_add_one_port(&s3c24xx_uart_drv, &ourport->port);
- platform_set_drvdata(dev, &ourport->port);
+ platform_set_drvdata(pdev, &ourport->port);
- ret = device_create_file(&dev->dev, &dev_attr_clock_source);
+ ret = device_create_file(&pdev->dev, &dev_attr_clock_source);
if (ret < 0)
- printk(KERN_ERR "%s: failed to add clksrc attr.\n", __func__);
+ dev_err(&pdev->dev, "failed to add clock source attr.\n");
ret = s3c24xx_serial_cpufreq_register(ourport);
if (ret < 0)
- dev_err(&dev->dev, "failed to add cpufreq notifier\n");
+ dev_err(&pdev->dev, "failed to add cpufreq notifier\n");
return 0;
@@ -1259,9 +1234,7 @@ int s3c24xx_serial_probe(struct platform_device *dev,
return ret;
}
-EXPORT_SYMBOL_GPL(s3c24xx_serial_probe);
-
-int __devexit s3c24xx_serial_remove(struct platform_device *dev)
+static int __devexit s3c24xx_serial_remove(struct platform_device *dev)
{
struct uart_port *port = s3c24xx_dev_to_port(&dev->dev);
@@ -1274,8 +1247,6 @@ int __devexit s3c24xx_serial_remove(struct platform_device *dev)
return 0;
}
-EXPORT_SYMBOL_GPL(s3c24xx_serial_remove);
-
/* UART power management code */
#ifdef CONFIG_PM_SLEEP
static int s3c24xx_serial_suspend(struct device *dev)
@@ -1315,41 +1286,6 @@ static const struct dev_pm_ops s3c24xx_serial_pm_ops = {
#define SERIAL_SAMSUNG_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
-int s3c24xx_serial_init(struct platform_driver *drv,
- struct s3c24xx_uart_info *info)
-{
- dbg("s3c24xx_serial_init(%p,%p)\n", drv, info);
-
- drv->driver.pm = SERIAL_SAMSUNG_PM_OPS;
-
- return platform_driver_register(drv);
-}
-
-EXPORT_SYMBOL_GPL(s3c24xx_serial_init);
-
-/* module initialisation code */
-
-static int __init s3c24xx_serial_modinit(void)
-{
- int ret;
-
- ret = uart_register_driver(&s3c24xx_uart_drv);
- if (ret < 0) {
- printk(KERN_ERR "failed to register UART driver\n");
- return -1;
- }
-
- return 0;
-}
-
-static void __exit s3c24xx_serial_modexit(void)
-{
- uart_unregister_driver(&s3c24xx_uart_drv);
-}
-
-module_init(s3c24xx_serial_modinit);
-module_exit(s3c24xx_serial_modexit);
-
/* Console code */
#ifdef CONFIG_SERIAL_SAMSUNG_CONSOLE
@@ -1395,12 +1331,13 @@ static void __init
s3c24xx_serial_get_options(struct uart_port *port, int *baud,
int *parity, int *bits)
{
- struct s3c24xx_uart_clksrc clksrc;
struct clk *clk;
unsigned int ulcon;
unsigned int ucon;
unsigned int ubrdiv;
unsigned long rate;
+ unsigned int clk_sel;
+ char clk_name[MAX_CLK_NAME_LENGTH];
ulcon = rd_regl(port, S3C2410_ULCON);
ucon = rd_regl(port, S3C2410_UCON);
@@ -1445,44 +1382,21 @@ s3c24xx_serial_get_options(struct uart_port *port, int *baud,
/* now calculate the baud rate */
- s3c24xx_serial_getsource(port, &clksrc);
+ clk_sel = s3c24xx_serial_getsource(port);
+ sprintf(clk_name, "clk_uart_baud%d", clk_sel);
- clk = clk_get(port->dev, clksrc.name);
+ clk = clk_get(port->dev, clk_name);
if (!IS_ERR(clk) && clk != NULL)
- rate = clk_get_rate(clk) / clksrc.divisor;
+ rate = clk_get_rate(clk);
else
rate = 1;
-
*baud = rate / (16 * (ubrdiv + 1));
dbg("calculated baud %d\n", *baud);
}
}
-/* s3c24xx_serial_init_ports
- *
- * initialise the serial ports from the machine provided initialisation
- * data.
-*/
-
-static int s3c24xx_serial_init_ports(struct s3c24xx_uart_info **info)
-{
- struct s3c24xx_uart_port *ptr = s3c24xx_serial_ports;
- struct platform_device **platdev_ptr;
- int i;
-
- dbg("s3c24xx_serial_init_ports: initialising ports...\n");
-
- platdev_ptr = s3c24xx_uart_devs;
-
- for (i = 0; i < CONFIG_SERIAL_SAMSUNG_UARTS; i++, ptr++, platdev_ptr++) {
- s3c24xx_serial_init_port(ptr, info[i], *platdev_ptr);
- }
-
- return 0;
-}
-
static int __init
s3c24xx_serial_console_setup(struct console *co, char *options)
{
@@ -1526,11 +1440,6 @@ s3c24xx_serial_console_setup(struct console *co, char *options)
return uart_set_options(port, co, baud, parity, bits, flow);
}
-/* s3c24xx_serial_initconsole
- *
- * initialise the console from one of the uart drivers
-*/
-
static struct console s3c24xx_serial_console = {
.name = S3C24XX_SERIAL_NAME,
.device = uart_console_device,
@@ -1540,34 +1449,251 @@ static struct console s3c24xx_serial_console = {
.setup = s3c24xx_serial_console_setup,
.data = &s3c24xx_uart_drv,
};
+#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
-int s3c24xx_serial_initconsole(struct platform_driver *drv,
- struct s3c24xx_uart_info **info)
+#ifdef CONFIG_CPU_S3C2410
+static struct s3c24xx_serial_drv_data s3c2410_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2410 UART",
+ .type = PORT_S3C2410,
+ .fifosize = 16,
+ .rx_fifomask = S3C2410_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2410_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2410_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2410_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2410_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2410_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 2,
+ .clksel_mask = S3C2410_UCON_CLKMASK,
+ .clksel_shift = S3C2410_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2410_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2410_serial_drv_data)
+#else
+#define S3C2410_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
-{
- struct platform_device *dev = s3c24xx_uart_devs[0];
+#ifdef CONFIG_CPU_S3C2412
+static struct s3c24xx_serial_drv_data s3c2412_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2412 UART",
+ .type = PORT_S3C2412,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C2412_UCON_CLKMASK,
+ .clksel_shift = S3C2412_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2412_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2412_serial_drv_data)
+#else
+#define S3C2412_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- dbg("s3c24xx_serial_initconsole\n");
+#if defined(CONFIG_CPU_S3C2440) || defined(CONFIG_CPU_S3C2416) || \
+ defined(CONFIG_CPU_S3C2443)
+static struct s3c24xx_serial_drv_data s3c2440_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C2440 UART",
+ .type = PORT_S3C2440,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C2412_UCON_CLKMASK,
+ .clksel_shift = S3C2412_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C2440_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c2440_serial_drv_data)
+#else
+#define S3C2440_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- /* select driver based on the cpu */
+#if defined(CONFIG_CPU_S3C6400) || defined(CONFIG_CPU_S3C6410) || \
+ defined(CONFIG_CPU_S5P6440) || defined(CONFIG_CPU_S5P6450) || \
+ defined(CONFIG_CPU_S5PC100)
+static struct s3c24xx_serial_drv_data s3c6400_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S3C6400 UART",
+ .type = PORT_S3C6400,
+ .fifosize = 64,
+ .has_divslot = 1,
+ .rx_fifomask = S3C2440_UFSTAT_RXMASK,
+ .rx_fifoshift = S3C2440_UFSTAT_RXSHIFT,
+ .rx_fifofull = S3C2440_UFSTAT_RXFULL,
+ .tx_fifofull = S3C2440_UFSTAT_TXFULL,
+ .tx_fifomask = S3C2440_UFSTAT_TXMASK,
+ .tx_fifoshift = S3C2440_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL2,
+ .num_clks = 4,
+ .clksel_mask = S3C6400_UCON_CLKMASK,
+ .clksel_shift = S3C6400_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S3C2410_UCON_DEFAULT,
+ .ufcon = S3C2410_UFCON_DEFAULT,
+ },
+};
+#define S3C6400_SERIAL_DRV_DATA ((kernel_ulong_t)&s3c6400_serial_drv_data)
+#else
+#define S3C6400_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- if (dev == NULL) {
- printk(KERN_ERR "s3c24xx: no devices for console init\n");
- return 0;
- }
+#ifdef CONFIG_CPU_S5PV210
+static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung S5PV210 UART",
+ .type = PORT_S3C6400,
+ .has_divslot = 1,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 2,
+ .clksel_mask = S5PV210_UCON_CLKMASK,
+ .clksel_shift = S5PV210_UCON_CLKSHIFT,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ },
+ .fifosize = { 256, 64, 16, 16 },
+};
+#define S5PV210_SERIAL_DRV_DATA ((kernel_ulong_t)&s5pv210_serial_drv_data)
+#else
+#define S5PV210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- if (strcmp(dev->name, drv->driver.name) != 0)
- return 0;
+#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) || \
+ defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
+static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
+ .info = &(struct s3c24xx_uart_info) {
+ .name = "Samsung Exynos4 UART",
+ .type = PORT_S3C6400,
+ .has_divslot = 1,
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK,
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL,
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL,
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK,
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
+ .def_clk_sel = S3C2410_UCON_CLKSEL0,
+ .num_clks = 1,
+ .clksel_mask = 0,
+ .clksel_shift = 0,
+ },
+ .def_cfg = &(struct s3c2410_uartcfg) {
+ .ucon = S5PV210_UCON_DEFAULT,
+ .ufcon = S5PV210_UFCON_DEFAULT,
+ .has_fracval = 1,
+ },
+ .fifosize = { 256, 64, 16, 16 },
+};
+#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#else
+#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#endif
- s3c24xx_serial_console.data = &s3c24xx_uart_drv;
- s3c24xx_serial_init_ports(info);
+static struct platform_device_id s3c24xx_serial_driver_ids[] = {
+ {
+ .name = "s3c2410-uart",
+ .driver_data = S3C2410_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c2412-uart",
+ .driver_data = S3C2412_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c2440-uart",
+ .driver_data = S3C2440_SERIAL_DRV_DATA,
+ }, {
+ .name = "s3c6400-uart",
+ .driver_data = S3C6400_SERIAL_DRV_DATA,
+ }, {
+ .name = "s5pv210-uart",
+ .driver_data = S5PV210_SERIAL_DRV_DATA,
+ }, {
+ .name = "exynos4210-uart",
+ .driver_data = EXYNOS4210_SERIAL_DRV_DATA,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(platform, s3c24xx_serial_driver_ids);
- register_console(&s3c24xx_serial_console);
- return 0;
+#ifdef CONFIG_OF
+static const struct of_device_id s3c24xx_uart_dt_match[] = {
+ { .compatible = "samsung,exynos4210-uart",
+ .data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+ {},
+};
+MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
+#else
+#define s3c24xx_uart_dt_match NULL
+#endif
+
+static struct platform_driver samsung_serial_driver = {
+ .probe = s3c24xx_serial_probe,
+ .remove = __devexit_p(s3c24xx_serial_remove),
+ .id_table = s3c24xx_serial_driver_ids,
+ .driver = {
+ .name = "samsung-uart",
+ .owner = THIS_MODULE,
+ .pm = SERIAL_SAMSUNG_PM_OPS,
+ .of_match_table = s3c24xx_uart_dt_match,
+ },
+};
+
+/* module initialisation code */
+
+static int __init s3c24xx_serial_modinit(void)
+{
+ int ret;
+
+ ret = uart_register_driver(&s3c24xx_uart_drv);
+ if (ret < 0) {
+ printk(KERN_ERR "failed to register UART driver\n");
+ return -1;
+ }
+
+ return platform_driver_register(&samsung_serial_driver);
}
-#endif /* CONFIG_SERIAL_SAMSUNG_CONSOLE */
+static void __exit s3c24xx_serial_modexit(void)
+{
+ uart_unregister_driver(&s3c24xx_uart_drv);
+}
+
+module_init(s3c24xx_serial_modinit);
+module_exit(s3c24xx_serial_modexit);
+MODULE_ALIAS("platform:samsung-uart");
MODULE_DESCRIPTION("Samsung SoC Serial port driver");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/samsung.h b/drivers/tty/serial/samsung.h
index 8e87b78..1a4bca3 100644
--- a/drivers/tty/serial/samsung.h
+++ b/drivers/tty/serial/samsung.h
@@ -19,20 +19,25 @@ struct s3c24xx_uart_info {
unsigned long tx_fifomask;
unsigned long tx_fifoshift;
unsigned long tx_fifofull;
+ unsigned int def_clk_sel;
+ unsigned long num_clks;
+ unsigned long clksel_mask;
+ unsigned long clksel_shift;
/* uart port features */
unsigned int has_divslot:1;
- /* clock source control */
-
- int (*get_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
- int (*set_clksrc)(struct uart_port *, struct s3c24xx_uart_clksrc *clk);
-
/* uart controls */
int (*reset_port)(struct uart_port *, struct s3c2410_uartcfg *);
};
+struct s3c24xx_serial_drv_data {
+ struct s3c24xx_uart_info *info;
+ struct s3c2410_uartcfg *def_cfg;
+ unsigned int fifosize[CONFIG_SERIAL_SAMSUNG_UARTS];
+};
+
struct s3c24xx_uart_port {
unsigned char rx_claimed;
unsigned char tx_claimed;
@@ -43,10 +48,13 @@ struct s3c24xx_uart_port {
unsigned int tx_irq;
struct s3c24xx_uart_info *info;
- struct s3c24xx_uart_clksrc *clksrc;
struct clk *clk;
struct clk *baudclk;
struct uart_port port;
+ struct s3c24xx_serial_drv_data *drv_data;
+
+ /* reference to platform data */
+ struct s3c2410_uartcfg *cfg;
#ifdef CONFIG_CPU_FREQ
struct notifier_block freq_transition;
@@ -56,7 +64,6 @@ struct s3c24xx_uart_port {
/* conversion functions */
#define s3c24xx_dev_to_port(__dev) (struct uart_port *)dev_get_drvdata(__dev)
-#define s3c24xx_dev_to_cfg(__dev) (struct s3c2410_uartcfg *)((__dev)->platform_data)
/* register access controls */
@@ -69,17 +76,6 @@ struct s3c24xx_uart_port {
#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
-extern int s3c24xx_serial_probe(struct platform_device *dev,
- struct s3c24xx_uart_info *uart);
-
-extern int __devexit s3c24xx_serial_remove(struct platform_device *dev);
-
-extern int s3c24xx_serial_initconsole(struct platform_driver *drv,
- struct s3c24xx_uart_info **uart);
-
-extern int s3c24xx_serial_init(struct platform_driver *drv,
- struct s3c24xx_uart_info *info);
-
#ifdef CONFIG_SERIAL_SAMSUNG_DEBUG
extern void printascii(const char *);
diff --git a/drivers/tty/serial/sc26xx.c b/drivers/tty/serial/sc26xx.c
index 75038ad..e0b4b0a 100644
--- a/drivers/tty/serial/sc26xx.c
+++ b/drivers/tty/serial/sc26xx.c
@@ -736,19 +736,7 @@ static struct platform_driver sc26xx_driver = {
},
};
-static int __init sc26xx_init(void)
-{
- return platform_driver_register(&sc26xx_driver);
-}
-
-static void __exit sc26xx_exit(void)
-{
- platform_driver_unregister(&sc26xx_driver);
-}
-
-module_init(sc26xx_init);
-module_exit(sc26xx_exit);
-
+module_platform_driver(sc26xx_driver);
MODULE_AUTHOR("Thomas Bogendörfer");
MODULE_DESCRIPTION("SC681/SC2692 serial driver");
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 0406d7f..1305618 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -22,6 +22,7 @@
*/
#include <linux/module.h>
#include <linux/tty.h>
+#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/console.h>
@@ -60,6 +61,8 @@ static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
static void uart_change_pm(struct uart_state *state, int pm_state);
+static void uart_port_shutdown(struct tty_port *port);
+
/*
* This routine is used by the interrupt handler to schedule processing in
* the software interrupt portion of the driver.
@@ -128,25 +131,16 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
* Startup the port. This will be called once per open. All calls
* will be serialised by the per-port mutex.
*/
-static int uart_startup(struct tty_struct *tty, struct uart_state *state, int init_hw)
+static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
+ int init_hw)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
unsigned long page;
int retval = 0;
- if (port->flags & ASYNC_INITIALIZED)
- return 0;
-
- /*
- * Set the TTY IO error marker - we will only clear this
- * once we have successfully opened the port. Also set
- * up the tty->alt_speed kludge
- */
- set_bit(TTY_IO_ERROR, &tty->flags);
-
if (uport->type == PORT_UNKNOWN)
- return 0;
+ return 1;
/*
* Initialise and allocate the transmit and temporary
@@ -188,10 +182,6 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
tty->hw_stopped = 1;
spin_unlock_irq(&uport->lock);
}
-
- set_bit(ASYNCB_INITIALIZED, &port->flags);
-
- clear_bit(TTY_IO_ERROR, &tty->flags);
}
/*
@@ -200,6 +190,31 @@ static int uart_startup(struct tty_struct *tty, struct uart_state *state, int in
* now.
*/
if (retval && capable(CAP_SYS_ADMIN))
+ return 1;
+
+ return retval;
+}
+
+static int uart_startup(struct tty_struct *tty, struct uart_state *state,
+ int init_hw)
+{
+ struct tty_port *port = &state->port;
+ int retval;
+
+ if (port->flags & ASYNC_INITIALIZED)
+ return 0;
+
+ /*
+ * Set the TTY IO error marker - we will only clear this
+ * once we have successfully opened the port.
+ */
+ set_bit(TTY_IO_ERROR, &tty->flags);
+
+ retval = uart_port_startup(tty, state, init_hw);
+ if (!retval) {
+ set_bit(ASYNCB_INITIALIZED, &port->flags);
+ clear_bit(TTY_IO_ERROR, &tty->flags);
+ } else if (retval > 0)
retval = 0;
return retval;
@@ -228,24 +243,7 @@ static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
if (!tty || (tty->termios->c_cflag & HUPCL))
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
- /*
- * clear delta_msr_wait queue to avoid mem leaks: we may free
- * the irq here so the queue might never be woken up. Note
- * that we won't end up waiting on delta_msr_wait again since
- * any outstanding file descriptors should be pointing at
- * hung_up_tty_fops now.
- */
- wake_up_interruptible(&port->delta_msr_wait);
-
- /*
- * Free the IRQ and disable the port.
- */
- uport->ops->shutdown(uport);
-
- /*
- * Ensure that the IRQ handler isn't running on another CPU.
- */
- synchronize_irq(uport->irq);
+ uart_port_shutdown(port);
}
/*
@@ -423,7 +421,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud)
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
else
- quot = (port->uartclk + (8 * baud)) / (16 * baud);
+ quot = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud);
return quot;
}
@@ -658,10 +656,10 @@ static int uart_get_info(struct uart_state *state,
tmp.flags = uport->flags;
tmp.xmit_fifo_size = uport->fifosize;
tmp.baud_base = uport->uartclk / 16;
- tmp.close_delay = port->close_delay / 10;
+ tmp.close_delay = jiffies_to_msecs(port->close_delay) / 10;
tmp.closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
- port->closing_wait / 10;
+ jiffies_to_msecs(port->closing_wait) / 10;
tmp.custom_divisor = uport->custom_divisor;
tmp.hub6 = uport->hub6;
tmp.io_type = uport->iotype;
@@ -695,9 +693,10 @@ static int uart_set_info(struct tty_struct *tty, struct uart_state *state,
new_port += (unsigned long) new_serial.port_high << HIGH_BITS_OFFSET;
new_serial.irq = irq_canonicalize(new_serial.irq);
- close_delay = new_serial.close_delay * 10;
+ close_delay = msecs_to_jiffies(new_serial.close_delay * 10);
closing_wait = new_serial.closing_wait == ASYNC_CLOSING_WAIT_NONE ?
- ASYNC_CLOSING_WAIT_NONE : new_serial.closing_wait * 10;
+ ASYNC_CLOSING_WAIT_NONE :
+ msecs_to_jiffies(new_serial.closing_wait * 10);
/*
* This semaphore protects port->count. It is also
@@ -1265,47 +1264,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
pr_debug("uart_close(%d) called\n", uport->line);
- spin_lock_irqsave(&port->lock, flags);
-
- if (tty_hung_up_p(filp)) {
- spin_unlock_irqrestore(&port->lock, flags);
+ if (tty_port_close_start(port, tty, filp) == 0)
return;
- }
-
- if ((tty->count == 1) && (port->count != 1)) {
- /*
- * Uh, oh. tty->count is 1, which means that the tty
- * structure will be freed. port->count should always
- * be one in these conditions. If it's greater than
- * one, we've got real problems, since it means the
- * serial port won't be shutdown.
- */
- printk(KERN_ERR "uart_close: bad serial port count; tty->count is 1, "
- "port->count is %d\n", port->count);
- port->count = 1;
- }
- if (--port->count < 0) {
- printk(KERN_ERR "uart_close: bad serial port count for %s: %d\n",
- tty->name, port->count);
- port->count = 0;
- }
- if (port->count) {
- spin_unlock_irqrestore(&port->lock, flags);
- return;
- }
-
- /*
- * Now we wait for the transmit buffer to clear; and we notify
- * the line discipline to only process XON/XOFF characters by
- * setting tty->closing.
- */
- set_bit(ASYNCB_CLOSING, &port->flags);
- tty->closing = 1;
- spin_unlock_irqrestore(&port->lock, flags);
-
- if (port->closing_wait != ASYNC_CLOSING_WAIT_NONE)
- tty_wait_until_sent_from_close(tty,
- msecs_to_jiffies(port->closing_wait));
/*
* At this point, we stop accepting input. To do this, we
@@ -1337,7 +1297,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
- msleep_interruptible(port->close_delay);
+ msleep_interruptible(
+ jiffies_to_msecs(port->close_delay));
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -1441,6 +1402,36 @@ static void uart_hangup(struct tty_struct *tty)
mutex_unlock(&port->mutex);
}
+static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ return 0;
+}
+
+static void uart_port_shutdown(struct tty_port *port)
+{
+ struct uart_state *state = container_of(port, struct uart_state, port);
+ struct uart_port *uport = state->uart_port;
+
+ /*
+ * clear delta_msr_wait queue to avoid mem leaks: we may free
+ * the irq here so the queue might never be woken up. Note
+ * that we won't end up waiting on delta_msr_wait again since
+ * any outstanding file descriptors should be pointing at
+ * hung_up_tty_fops now.
+ */
+ wake_up_interruptible(&port->delta_msr_wait);
+
+ /*
+ * Free the IRQ and disable the port.
+ */
+ uport->ops->shutdown(uport);
+
+ /*
+ * Ensure that the IRQ handler isn't running on another CPU.
+ */
+ synchronize_irq(uport->irq);
+}
+
static int uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
@@ -1466,33 +1457,6 @@ static void uart_dtr_rts(struct tty_port *port, int onoff)
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
-static struct uart_state *uart_get(struct uart_driver *drv, int line)
-{
- struct uart_state *state;
- struct tty_port *port;
- int ret = 0;
-
- state = drv->state + line;
- port = &state->port;
- if (mutex_lock_interruptible(&port->mutex)) {
- ret = -ERESTARTSYS;
- goto err;
- }
-
- port->count++;
- if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
- ret = -ENXIO;
- goto err_unlock;
- }
- return state;
-
- err_unlock:
- port->count--;
- mutex_unlock(&port->mutex);
- err:
- return ERR_PTR(ret);
-}
-
/*
* calls to uart_open are serialised by the BKL in
* fs/char_dev.c:chrdev_open()
@@ -1506,26 +1470,29 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line)
static int uart_open(struct tty_struct *tty, struct file *filp)
{
struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
- struct uart_state *state;
- struct tty_port *port;
int retval, line = tty->index;
+ struct uart_state *state = drv->state + line;
+ struct tty_port *port = &state->port;
pr_debug("uart_open(%d) called\n", line);
/*
- * We take the semaphore inside uart_get to guarantee that we won't
- * be re-entered while allocating the state structure, or while we
- * request any IRQs that the driver may need. This also has the nice
- * side-effect that it delays the action of uart_hangup, so we can
- * guarantee that state->port.tty will always contain something
- * reasonable.
+ * We take the semaphore here to guarantee that we won't be re-entered
+ * while allocating the state structure, or while we request any IRQs
+ * that the driver may need. This also has the nice side-effect that
+ * it delays the action of uart_hangup, so we can guarantee that
+ * state->port.tty will always contain something reasonable.
*/
- state = uart_get(drv, line);
- if (IS_ERR(state)) {
- retval = PTR_ERR(state);
- goto fail;
+ if (mutex_lock_interruptible(&port->mutex)) {
+ retval = -ERESTARTSYS;
+ goto end;
+ }
+
+ port->count++;
+ if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
+ retval = -ENXIO;
+ goto err_dec_count;
}
- port = &state->port;
/*
* Once we set tty->driver_data here, we are guaranteed that
@@ -1535,7 +1502,6 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
tty->driver_data = state;
state->uart_port->state = state;
tty->low_latency = (state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
- tty->alt_speed = 0;
tty_port_tty_set(port, tty);
/*
@@ -1543,9 +1509,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
*/
if (tty_hung_up_p(filp)) {
retval = -EAGAIN;
- port->count--;
- mutex_unlock(&port->mutex);
- goto fail;
+ goto err_dec_count;
}
/*
@@ -1566,8 +1530,12 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
if (retval == 0)
retval = tty_port_block_til_ready(port, tty, filp);
-fail:
+end:
return retval;
+err_dec_count:
+ port->count--;
+ mutex_unlock(&port->mutex);
+ goto end;
}
static const char *uart_type(struct uart_port *port)
@@ -1858,6 +1826,14 @@ uart_set_options(struct uart_port *port, struct console *co,
EXPORT_SYMBOL_GPL(uart_set_options);
#endif /* CONFIG_SERIAL_CORE_CONSOLE */
+/**
+ * uart_change_pm - set power state of the port
+ *
+ * @state: port descriptor
+ * @pm_state: new state
+ *
+ * Locking: port->mutex has to be held
+ */
static void uart_change_pm(struct uart_state *state, int pm_state)
{
struct uart_port *port = state->uart_port;
@@ -2214,6 +2190,8 @@ static const struct tty_operations uart_ops = {
};
static const struct tty_port_operations uart_port_ops = {
+ .activate = uart_port_activate,
+ .shutdown = uart_port_shutdown,
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
};
@@ -2275,8 +2253,8 @@ int uart_register_driver(struct uart_driver *drv)
tty_port_init(port);
port->ops = &uart_port_ops;
- port->close_delay = 500; /* .5 seconds */
- port->closing_wait = 30000; /* 30 seconds */
+ port->close_delay = HZ / 2; /* .5 seconds */
+ port->closing_wait = 30 * HZ;/* 30 seconds */
}
retval = tty_register_driver(normal);
@@ -2370,11 +2348,11 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
*/
tty_dev = tty_register_device(drv->tty_driver, uport->line, uport->dev);
if (likely(!IS_ERR(tty_dev))) {
- device_init_wakeup(tty_dev, 1);
- device_set_wakeup_enable(tty_dev, 0);
- } else
+ device_set_wakeup_capable(tty_dev, 1);
+ } else {
printk(KERN_ERR "Cannot register tty device on line %d\n",
uport->line);
+ }
/*
* Ensure UPF_DEAD is not set.
@@ -2467,6 +2445,99 @@ int uart_match_port(struct uart_port *port1, struct uart_port *port2)
}
EXPORT_SYMBOL(uart_match_port);
+/**
+ * uart_handle_dcd_change - handle a change of carrier detect state
+ * @uport: uart_port structure for the open port
+ * @status: new carrier detect status, nonzero if active
+ */
+void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
+{
+ struct uart_state *state = uport->state;
+ struct tty_port *port = &state->port;
+ struct tty_ldisc *ld = tty_ldisc_ref(port->tty);
+ struct pps_event_time ts;
+
+ if (ld && ld->ops->dcd_change)
+ pps_get_ts(&ts);
+
+ uport->icount.dcd++;
+#ifdef CONFIG_HARD_PPS
+ if ((uport->flags & UPF_HARDPPS_CD) && status)
+ hardpps();
+#endif
+
+ if (port->flags & ASYNC_CHECK_CD) {
+ if (status)
+ wake_up_interruptible(&port->open_wait);
+ else if (port->tty)
+ tty_hangup(port->tty);
+ }
+
+ if (ld && ld->ops->dcd_change)
+ ld->ops->dcd_change(port->tty, status, &ts);
+ if (ld)
+ tty_ldisc_deref(ld);
+}
+EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
+
+/**
+ * uart_handle_cts_change - handle a change of clear-to-send state
+ * @uport: uart_port structure for the open port
+ * @status: new clear to send status, nonzero if active
+ */
+void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
+{
+ struct tty_port *port = &uport->state->port;
+ struct tty_struct *tty = port->tty;
+
+ uport->icount.cts++;
+
+ if (port->flags & ASYNC_CTS_FLOW) {
+ if (tty->hw_stopped) {
+ if (status) {
+ tty->hw_stopped = 0;
+ uport->ops->start_tx(uport);
+ uart_write_wakeup(uport);
+ }
+ } else {
+ if (!status) {
+ tty->hw_stopped = 1;
+ uport->ops->stop_tx(uport);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(uart_handle_cts_change);
+
+/**
+ * uart_insert_char - push a char to the uart layer
+ *
+ * User is responsible to call tty_flip_buffer_push when they are done with
+ * insertion.
+ *
+ * @port: corresponding port
+ * @status: state of the serial port RX buffer (LSR for 8250)
+ * @overrun: mask of overrun bits in @status
+ * @ch: character to push
+ * @flag: flag for the character (see TTY_NORMAL and friends)
+ */
+void uart_insert_char(struct uart_port *port, unsigned int status,
+ unsigned int overrun, unsigned int ch, unsigned int flag)
+{
+ struct tty_struct *tty = port->state->port.tty;
+
+ if ((status & port->ignore_status_mask & ~overrun) == 0)
+ tty_insert_flip_char(tty, ch, flag);
+
+ /*
+ * Overrun is special. Since it's reported immediately,
+ * it doesn't affect the current character.
+ */
+ if (status & ~port->ignore_status_mask & overrun)
+ tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+}
+EXPORT_SYMBOL_GPL(uart_insert_char);
+
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index aff9d61..61b7fd2 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -50,6 +50,7 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
+#include <linux/gpio.h>
#ifdef CONFIG_SUPERH
#include <asm/sh_bios.h>
@@ -73,6 +74,7 @@ struct sci_port {
struct clk *fclk;
char *irqstr[SCIx_NR_IRQS];
+ char *gpiostr[SCIx_NR_FNS];
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
@@ -474,8 +476,15 @@ static void sci_init_pins(struct uart_port *port, unsigned int cflag)
if (!reg->size)
return;
- if (!(cflag & CRTSCTS))
- sci_out(port, SCSPTR, 0x0080); /* Set RTS = 1 */
+ if ((s->cfg->capabilities & SCIx_HAVE_RTSCTS) &&
+ ((!(cflag & CRTSCTS)))) {
+ unsigned short status;
+
+ status = sci_in(port, SCSPTR);
+ status &= ~SCSPTR_CTSIO;
+ status |= SCSPTR_RTSIO;
+ sci_out(port, SCSPTR, status); /* Set RTS = 1 */
+ }
}
static int sci_txfill(struct uart_port *port)
@@ -621,6 +630,7 @@ static void sci_receive_chars(struct uart_port *port)
} else {
for (i = 0; i < count; i++) {
char c = sci_in(port, SCxRDR);
+
status = sci_in(port, SCxSR);
#if defined(CONFIG_CPU_SH3)
/* Skip "chars" during break */
@@ -649,9 +659,11 @@ static void sci_receive_chars(struct uart_port *port)
/* Store data and status */
if (status & SCxSR_FER(port)) {
flag = TTY_FRAME;
+ port->icount.frame++;
dev_notice(port->dev, "frame error\n");
} else if (status & SCxSR_PER(port)) {
flag = TTY_PARITY;
+ port->icount.parity++;
dev_notice(port->dev, "parity error\n");
} else
flag = TTY_NORMAL;
@@ -723,6 +735,8 @@ static int sci_handle_errors(struct uart_port *port)
*/
if (s->cfg->overrun_bit != SCIx_NOT_SUPPORTED) {
if (status & (1 << s->cfg->overrun_bit)) {
+ port->icount.overrun++;
+
/* overrun error */
if (tty_insert_flip_char(tty, 0, TTY_OVERRUN))
copied++;
@@ -737,6 +751,8 @@ static int sci_handle_errors(struct uart_port *port)
struct sci_port *sci_port = to_sci_port(port);
if (!sci_port->break_flag) {
+ port->icount.brk++;
+
sci_port->break_flag = 1;
sci_schedule_break_timer(sci_port);
@@ -752,6 +768,8 @@ static int sci_handle_errors(struct uart_port *port)
} else {
/* frame error */
+ port->icount.frame++;
+
if (tty_insert_flip_char(tty, 0, TTY_FRAME))
copied++;
@@ -761,6 +779,8 @@ static int sci_handle_errors(struct uart_port *port)
if (status & SCxSR_PER(port)) {
/* parity error */
+ port->icount.parity++;
+
if (tty_insert_flip_char(tty, 0, TTY_PARITY))
copied++;
@@ -787,6 +807,8 @@ static int sci_handle_fifo_overrun(struct uart_port *port)
if ((sci_in(port, SCLSR) & (1 << s->cfg->overrun_bit))) {
sci_out(port, SCLSR, 0);
+ port->icount.overrun++;
+
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
tty_flip_buffer_push(tty);
@@ -812,6 +834,9 @@ static int sci_handle_breaks(struct uart_port *port)
/* Debounce break */
s->break_flag = 1;
#endif
+
+ port->icount.brk++;
+
/* Notify of BREAK */
if (tty_insert_flip_char(tty, 0, TTY_BREAK))
copied++;
@@ -1082,6 +1107,67 @@ static void sci_free_irq(struct sci_port *port)
}
}
+static const char *sci_gpio_names[SCIx_NR_FNS] = {
+ "sck", "rxd", "txd", "cts", "rts",
+};
+
+static const char *sci_gpio_str(unsigned int index)
+{
+ return sci_gpio_names[index];
+}
+
+static void __devinit sci_init_gpios(struct sci_port *port)
+{
+ struct uart_port *up = &port->port;
+ int i;
+
+ if (!port->cfg)
+ return;
+
+ for (i = 0; i < SCIx_NR_FNS; i++) {
+ const char *desc;
+ int ret;
+
+ if (!port->cfg->gpios[i])
+ continue;
+
+ desc = sci_gpio_str(i);
+
+ port->gpiostr[i] = kasprintf(GFP_KERNEL, "%s:%s",
+ dev_name(up->dev), desc);
+
+ /*
+ * If we've failed the allocation, we can still continue
+ * on with a NULL string.
+ */
+ if (!port->gpiostr[i])
+ dev_notice(up->dev, "%s string allocation failure\n",
+ desc);
+
+ ret = gpio_request(port->cfg->gpios[i], port->gpiostr[i]);
+ if (unlikely(ret != 0)) {
+ dev_notice(up->dev, "failed %s gpio request\n", desc);
+
+ /*
+ * If we can't get the GPIO for whatever reason,
+ * no point in keeping the verbose string around.
+ */
+ kfree(port->gpiostr[i]);
+ }
+ }
+}
+
+static void sci_free_gpios(struct sci_port *port)
+{
+ int i;
+
+ for (i = 0; i < SCIx_NR_FNS; i++)
+ if (port->cfg->gpios[i]) {
+ gpio_free(port->cfg->gpios[i]);
+ kfree(port->gpiostr[i]);
+ }
+}
+
static unsigned int sci_tx_empty(struct uart_port *port)
{
unsigned short status = sci_in(port, SCxSR);
@@ -1090,19 +1176,39 @@ static unsigned int sci_tx_empty(struct uart_port *port)
return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0;
}
+/*
+ * Modem control is a bit of a mixed bag for SCI(F) ports. Generally
+ * CTS/RTS is supported in hardware by at least one port and controlled
+ * via SCSPTR (SCxPCR for SCIFA/B parts), or external pins (presently
+ * handled via the ->init_pins() op, which is a bit of a one-way street,
+ * lacking any ability to defer pin control -- this will later be
+ * converted over to the GPIO framework).
+ *
+ * Other modes (such as loopback) are supported generically on certain
+ * port types, but not others. For these it's sufficient to test for the
+ * existence of the support register and simply ignore the port type.
+ */
static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl)
{
- /* This routine is used for seting signals of: DTR, DCD, CTS/RTS */
- /* We use SCIF's hardware for CTS/RTS, so don't need any for that. */
- /* If you have signals for DTR and DCD, please implement here. */
+ if (mctrl & TIOCM_LOOP) {
+ struct plat_sci_reg *reg;
+
+ /*
+ * Standard loopback mode for SCFCR ports.
+ */
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size)
+ sci_out(port, SCFCR, sci_in(port, SCFCR) | 1);
+ }
}
static unsigned int sci_get_mctrl(struct uart_port *port)
{
- /* This routine is used for getting signals of: DTR, DCD, DSR, RI,
- and CTS/RTS */
-
- return TIOCM_DTR | TIOCM_RTS | TIOCM_CTS | TIOCM_DSR;
+ /*
+ * CTS/RTS is handled in hardware when supported, while nothing
+ * else is wired up. Keep it simple and simply assert DSR/CAR.
+ */
+ return TIOCM_DSR | TIOCM_CAR;
}
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1233,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s)
struct dma_async_tx_descriptor *desc;
desc = chan->device->device_prep_slave_sg(chan,
- sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
+ sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (desc) {
s->desc_rx[i] = desc;
@@ -1348,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work)
BUG_ON(!sg_dma_len(sg));
desc = chan->device->device_prep_slave_sg(chan,
- sg, s->sg_len_tx, DMA_TO_DEVICE,
+ sg, s->sg_len_tx, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
/* switch to PIO */
@@ -1449,12 +1555,17 @@ static void sci_stop_rx(struct uart_port *port)
static void sci_enable_ms(struct uart_port *port)
{
- /* Nothing here yet .. */
+ /*
+ * Not supported by hardware, always a nop.
+ */
}
static void sci_break_ctl(struct uart_port *port, int break_state)
{
- /* Nothing here yet .. */
+ /*
+ * Not supported by hardware. Most parts couple break and rx
+ * interrupts together, with break detection always enabled.
+ */
}
#ifdef CONFIG_SERIAL_SH_SCI_DMA
@@ -1599,6 +1710,8 @@ static int sci_startup(struct uart_port *port)
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
+ pm_runtime_put_noidle(port->dev);
+
sci_port_enable(s);
ret = sci_request_irq(s);
@@ -1626,6 +1739,8 @@ static void sci_shutdown(struct uart_port *port)
sci_free_irq(s);
sci_port_disable(s);
+
+ pm_runtime_get_noresume(port->dev);
}
static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
@@ -1652,6 +1767,7 @@ static unsigned int sci_scbrr_calc(unsigned int algo_id, unsigned int bps,
static void sci_reset(struct uart_port *port)
{
+ struct plat_sci_reg *reg;
unsigned int status;
do {
@@ -1660,7 +1776,8 @@ static void sci_reset(struct uart_port *port)
sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
- if (port->type != PORT_SCI)
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size)
sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST);
}
@@ -1668,9 +1785,9 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
struct ktermios *old)
{
struct sci_port *s = to_sci_port(port);
+ struct plat_sci_reg *reg;
unsigned int baud, smr_val, max_baud;
int t = -1;
- u16 scfcr = 0;
/*
* earlyprintk comes here early on with port->uartclk set to zero.
@@ -1720,7 +1837,27 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
}
sci_init_pins(port, termios->c_cflag);
- sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
+
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size) {
+ unsigned short ctrl = sci_in(port, SCFCR);
+
+ if (s->cfg->capabilities & SCIx_HAVE_RTSCTS) {
+ if (termios->c_cflag & CRTSCTS)
+ ctrl |= SCFCR_MCE;
+ else
+ ctrl &= ~SCFCR_MCE;
+ }
+
+ /*
+ * As we've done a sci_reset() above, ensure we don't
+ * interfere with the FIFOs while toggling MCE. As the
+ * reset values could still be set, simply mask them out.
+ */
+ ctrl &= ~(SCFCR_RFRST | SCFCR_TFRST);
+
+ sci_out(port, SCFCR, ctrl);
+ }
sci_out(port, SCSCR, s->cfg->scscr);
@@ -1892,6 +2029,8 @@ static int __devinit sci_init_single(struct platform_device *dev,
struct uart_port *port = &sci_port->port;
int ret;
+ sci_port->cfg = p;
+
port->ops = &sci_uart_ops;
port->iotype = UPIO_MEM;
port->line = index;
@@ -1937,7 +2076,10 @@ static int __devinit sci_init_single(struct platform_device *dev,
port->dev = &dev->dev;
+ sci_init_gpios(sci_port);
+
pm_runtime_irq_safe(&dev->dev);
+ pm_runtime_get_noresume(&dev->dev);
pm_runtime_enable(&dev->dev);
}
@@ -1971,8 +2113,6 @@ static int __devinit sci_init_single(struct platform_device *dev,
p->error_mask |= (1 << p->overrun_bit);
}
- sci_port->cfg = p;
-
port->mapbase = p->mapbase;
port->type = p->type;
port->flags = p->flags;
@@ -2113,9 +2253,16 @@ static int sci_runtime_suspend(struct device *dev)
struct uart_port *port = &sci_port->port;
if (uart_console(port)) {
+ struct plat_sci_reg *reg;
+
sci_port->saved_smr = sci_in(port, SCSMR);
sci_port->saved_brr = sci_in(port, SCBRR);
- sci_port->saved_fcr = sci_in(port, SCFCR);
+
+ reg = sci_getreg(port, SCFCR);
+ if (reg->size)
+ sci_port->saved_fcr = sci_in(port, SCFCR);
+ else
+ sci_port->saved_fcr = 0;
}
return 0;
}
@@ -2129,7 +2276,10 @@ static int sci_runtime_resume(struct device *dev)
sci_reset(port);
sci_out(port, SCSMR, sci_port->saved_smr);
sci_out(port, SCBRR, sci_port->saved_brr);
- sci_out(port, SCFCR, sci_port->saved_fcr);
+
+ if (sci_port->saved_fcr)
+ sci_out(port, SCFCR, sci_port->saved_fcr);
+
sci_out(port, SCSCR, sci_port->cfg->scscr);
}
return 0;
@@ -2169,6 +2319,8 @@ static int sci_remove(struct platform_device *dev)
cpufreq_unregister_notifier(&port->freq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
+ sci_free_gpios(port);
+
uart_remove_one_port(&sci_uart_driver, &port->port);
clk_put(port->iclk);
diff --git a/drivers/tty/serial/sh-sci.h b/drivers/tty/serial/sh-sci.h
index e9bed03..a1a2d36 100644
--- a/drivers/tty/serial/sh-sci.h
+++ b/drivers/tty/serial/sh-sci.h
@@ -17,7 +17,9 @@
defined(CONFIG_ARCH_SH73A0) || \
defined(CONFIG_ARCH_SH7367) || \
defined(CONFIG_ARCH_SH7377) || \
- defined(CONFIG_ARCH_SH7372)
+ defined(CONFIG_ARCH_SH7372) || \
+ defined(CONFIG_ARCH_R8A7740)
+
# define SCxSR_RDxF_CLEAR(port) (sci_in(port, SCxSR) & 0xfffc)
# define SCxSR_ERROR_CLEAR(port) (sci_in(port, SCxSR) & 0xfd73)
# define SCxSR_TDxE_CLEAR(port) (sci_in(port, SCxSR) & 0xffdf)
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
new file mode 100644
index 0000000..a60523f
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -0,0 +1,783 @@
+/*
+ * Driver for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/sysrq.h>
+#include <linux/console.h>
+#include <linux/tty.h>
+#include <linux/tty_flip.h>
+#include <linux/serial_core.h>
+#include <linux/serial.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/mach/irq.h>
+#include <linux/pinctrl/pinmux.h>
+
+#include "sirfsoc_uart.h"
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
+static struct uart_driver sirfsoc_uart_drv;
+
+static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
+ {4000000, 2359296},
+ {3500000, 1310721},
+ {3000000, 1572865},
+ {2500000, 1245186},
+ {2000000, 1572866},
+ {1500000, 1245188},
+ {1152000, 1638404},
+ {1000000, 1572869},
+ {921600, 1114120},
+ {576000, 1245196},
+ {500000, 1245198},
+ {460800, 1572876},
+ {230400, 1310750},
+ {115200, 1310781},
+ {57600, 1310843},
+ {38400, 1114328},
+ {19200, 1114545},
+ {9600, 1114979},
+};
+
+static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
+ [0] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 0,
+ },
+ },
+ [1] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 1,
+ },
+ },
+ [2] = {
+ .port = {
+ .iotype = UPIO_MEM,
+ .flags = UPF_BOOT_AUTOCONF,
+ .line = 2,
+ },
+ },
+};
+
+static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
+{
+ return container_of(port, struct sirfsoc_uart_port, port);
+}
+
+static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
+{
+ unsigned long reg;
+ reg = rd_regl(port, SIRFUART_TX_FIFO_STATUS);
+ if (reg & SIRFUART_FIFOEMPTY_MASK(port))
+ return TIOCSER_TEMT;
+ else
+ return 0;
+}
+
+static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ if (!(sirfport->ms_enabled)) {
+ goto cts_asserted;
+ } else if (sirfport->hw_flow_ctrl) {
+ if (!(rd_regl(port, SIRFUART_AFC_CTRL) &
+ SIRFUART_CTS_IN_STATUS))
+ goto cts_asserted;
+ else
+ goto cts_deasserted;
+ }
+cts_deasserted:
+ return TIOCM_CAR | TIOCM_DSR;
+cts_asserted:
+ return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
+}
+
+static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned int assert = mctrl & TIOCM_RTS;
+ unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
+ unsigned int current_val;
+ if (sirfport->hw_flow_ctrl) {
+ current_val = rd_regl(port, SIRFUART_AFC_CTRL) & ~0xFF;
+ val |= current_val;
+ wr_regl(port, SIRFUART_AFC_CTRL, val);
+ }
+}
+
+static void sirfsoc_uart_stop_tx(struct uart_port *port)
+{
+ unsigned int regv;
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_TX_INT_EN);
+}
+
+void sirfsoc_uart_start_tx(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long regv;
+ sirfsoc_uart_pio_tx_chars(sirfport, 1);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_START);
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_TX_INT_EN);
+}
+
+static void sirfsoc_uart_stop_rx(struct uart_port *port)
+{
+ unsigned long regv;
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv & ~SIRFUART_RX_IO_INT_EN);
+}
+
+static void sirfsoc_uart_disable_ms(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long reg;
+ sirfport->ms_enabled = 0;
+ if (!sirfport->hw_flow_ctrl)
+ return;
+ reg = rd_regl(port, SIRFUART_AFC_CTRL);
+ wr_regl(port, SIRFUART_AFC_CTRL, reg & ~0x3FF);
+ reg = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, reg & ~SIRFUART_CTS_INT_EN);
+}
+
+static void sirfsoc_uart_enable_ms(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long reg;
+ unsigned long flg;
+ if (!sirfport->hw_flow_ctrl)
+ return;
+ flg = SIRFUART_AFC_RX_EN | SIRFUART_AFC_TX_EN;
+ reg = rd_regl(port, SIRFUART_AFC_CTRL);
+ wr_regl(port, SIRFUART_AFC_CTRL, reg | flg);
+ reg = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, reg | SIRFUART_CTS_INT_EN);
+ uart_handle_cts_change(port,
+ !(rd_regl(port, SIRFUART_AFC_CTRL) & SIRFUART_CTS_IN_STATUS));
+ sirfport->ms_enabled = 1;
+}
+
+static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
+{
+ unsigned long ulcon = rd_regl(port, SIRFUART_LINE_CTRL);
+ if (break_state)
+ ulcon |= SIRFUART_SET_BREAK;
+ else
+ ulcon &= ~SIRFUART_SET_BREAK;
+ wr_regl(port, SIRFUART_LINE_CTRL, ulcon);
+}
+
+static unsigned int
+sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
+{
+ unsigned int ch, rx_count = 0;
+ struct tty_struct *tty;
+
+ tty = tty_port_tty_get(&port->state->port);
+ if (!tty)
+ return -ENODEV;
+
+ while (!(rd_regl(port, SIRFUART_RX_FIFO_STATUS) &
+ SIRFUART_FIFOEMPTY_MASK(port))) {
+ ch = rd_regl(port, SIRFUART_RX_FIFO_DATA) | SIRFUART_DUMMY_READ;
+ if (unlikely(uart_handle_sysrq_char(port, ch)))
+ continue;
+ uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
+ rx_count++;
+ if (rx_count >= max_rx_count)
+ break;
+ }
+
+ port->icount.rx += rx_count;
+ tty_flip_buffer_push(tty);
+ tty_kref_put(tty);
+
+ return rx_count;
+}
+
+static unsigned int
+sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
+{
+ struct uart_port *port = &sirfport->port;
+ struct circ_buf *xmit = &port->state->xmit;
+ unsigned int num_tx = 0;
+ while (!uart_circ_empty(xmit) &&
+ !(rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+ SIRFUART_FIFOFULL_MASK(port)) &&
+ count--) {
+ wr_regl(port, SIRFUART_TX_FIFO_DATA, xmit->buf[xmit->tail]);
+ xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ port->icount.tx++;
+ num_tx++;
+ }
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+ return num_tx;
+}
+
+static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
+{
+ unsigned long intr_status;
+ unsigned long cts_status;
+ unsigned long flag = TTY_NORMAL;
+ struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
+ struct uart_port *port = &sirfport->port;
+ struct uart_state *state = port->state;
+ struct circ_buf *xmit = &port->state->xmit;
+ intr_status = rd_regl(port, SIRFUART_INT_STATUS);
+ wr_regl(port, SIRFUART_INT_STATUS, intr_status);
+ intr_status &= rd_regl(port, SIRFUART_INT_EN);
+ if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT))) {
+ if (intr_status & SIRFUART_RXD_BREAK) {
+ if (uart_handle_break(port))
+ goto recv_char;
+ uart_insert_char(port, intr_status,
+ SIRFUART_RX_OFLOW, 0, TTY_BREAK);
+ return IRQ_HANDLED;
+ }
+ if (intr_status & SIRFUART_RX_OFLOW)
+ port->icount.overrun++;
+ if (intr_status & SIRFUART_FRM_ERR) {
+ port->icount.frame++;
+ flag = TTY_FRAME;
+ }
+ if (intr_status & SIRFUART_PARITY_ERR)
+ flag = TTY_PARITY;
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+ intr_status &= port->read_status_mask;
+ uart_insert_char(port, intr_status,
+ SIRFUART_RX_OFLOW_INT, 0, flag);
+ }
+recv_char:
+ if (intr_status & SIRFUART_CTS_INT_EN) {
+ cts_status = !(rd_regl(port, SIRFUART_AFC_CTRL) &
+ SIRFUART_CTS_IN_STATUS);
+ if (cts_status != 0) {
+ uart_handle_cts_change(port, 1);
+ } else {
+ uart_handle_cts_change(port, 0);
+ wake_up_interruptible(&state->port.delta_msr_wait);
+ }
+ }
+ if (intr_status & SIRFUART_RX_IO_INT_EN)
+ sirfsoc_uart_pio_rx_chars(port, SIRFSOC_UART_IO_RX_MAX_CNT);
+ if (intr_status & SIRFUART_TX_INT_EN) {
+ if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
+ return IRQ_HANDLED;
+ } else {
+ sirfsoc_uart_pio_tx_chars(sirfport,
+ SIRFSOC_UART_IO_TX_REASONABLE_CNT);
+ if ((uart_circ_empty(xmit)) &&
+ (rd_regl(port, SIRFUART_TX_FIFO_STATUS) &
+ SIRFUART_FIFOEMPTY_MASK(port)))
+ sirfsoc_uart_stop_tx(port);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+static void sirfsoc_uart_start_rx(struct uart_port *port)
+{
+ unsigned long regv;
+ regv = rd_regl(port, SIRFUART_INT_EN);
+ wr_regl(port, SIRFUART_INT_EN, regv | SIRFUART_RX_IO_INT_EN);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_START);
+}
+
+static unsigned int
+sirfsoc_calc_sample_div(unsigned long baud_rate,
+ unsigned long ioclk_rate, unsigned long *setted_baud)
+{
+ unsigned long min_delta = ~0UL;
+ unsigned short sample_div;
+ unsigned int regv = 0;
+ unsigned long ioclk_div;
+ unsigned long baud_tmp;
+ int temp_delta;
+
+ for (sample_div = SIRF_MIN_SAMPLE_DIV;
+ sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
+ ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
+ if (ioclk_div > SIRF_IOCLK_DIV_MAX)
+ continue;
+ baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
+ temp_delta = baud_tmp - baud_rate;
+ temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
+ if (temp_delta < min_delta) {
+ regv = regv & (~SIRF_IOCLK_DIV_MASK);
+ regv = regv | ioclk_div;
+ regv = regv & (~SIRF_SAMPLE_DIV_MASK);
+ regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
+ min_delta = temp_delta;
+ *setted_baud = baud_tmp;
+ }
+ }
+ return regv;
+}
+
+static void sirfsoc_uart_set_termios(struct uart_port *port,
+ struct ktermios *termios,
+ struct ktermios *old)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned long ioclk_rate;
+ unsigned long config_reg = 0;
+ unsigned long baud_rate;
+ unsigned long setted_baud;
+ unsigned long flags;
+ unsigned long ic;
+ unsigned int clk_div_reg = 0;
+ unsigned long temp_reg_val;
+ unsigned long rx_time_out;
+ int threshold_div;
+ int temp;
+
+ ioclk_rate = 150000000;
+ switch (termios->c_cflag & CSIZE) {
+ default:
+ case CS8:
+ config_reg |= SIRFUART_DATA_BIT_LEN_8;
+ break;
+ case CS7:
+ config_reg |= SIRFUART_DATA_BIT_LEN_7;
+ break;
+ case CS6:
+ config_reg |= SIRFUART_DATA_BIT_LEN_6;
+ break;
+ case CS5:
+ config_reg |= SIRFUART_DATA_BIT_LEN_5;
+ break;
+ }
+ if (termios->c_cflag & CSTOPB)
+ config_reg |= SIRFUART_STOP_BIT_LEN_2;
+ baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
+ spin_lock_irqsave(&port->lock, flags);
+ port->read_status_mask = SIRFUART_RX_OFLOW_INT;
+ port->ignore_status_mask = 0;
+ /* read flags */
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |=
+ SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= SIRFUART_RXD_BREAK_INT;
+ /* ignore flags */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |=
+ SIRFUART_FRM_ERR_INT | SIRFUART_PARITY_ERR_INT;
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= SIRFUART_DUMMY_READ;
+ /* enable parity if PARENB is set*/
+ if (termios->c_cflag & PARENB) {
+ if (termios->c_cflag & CMSPAR) {
+ if (termios->c_cflag & PARODD)
+ config_reg |= SIRFUART_STICK_BIT_MARK;
+ else
+ config_reg |= SIRFUART_STICK_BIT_SPACE;
+ } else if (termios->c_cflag & PARODD) {
+ config_reg |= SIRFUART_STICK_BIT_ODD;
+ } else {
+ config_reg |= SIRFUART_STICK_BIT_EVEN;
+ }
+ }
+ /* Hardware Flow Control Settings */
+ if (UART_ENABLE_MS(port, termios->c_cflag)) {
+ if (!sirfport->ms_enabled)
+ sirfsoc_uart_enable_ms(port);
+ } else {
+ if (sirfport->ms_enabled)
+ sirfsoc_uart_disable_ms(port);
+ }
+
+ /* common rate: fast calculation */
+ for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
+ if (baud_rate == baudrate_to_regv[ic].baud_rate)
+ clk_div_reg = baudrate_to_regv[ic].reg_val;
+ setted_baud = baud_rate;
+ /* arbitary rate setting */
+ if (unlikely(clk_div_reg == 0))
+ clk_div_reg = sirfsoc_calc_sample_div(baud_rate, ioclk_rate,
+ &setted_baud);
+ wr_regl(port, SIRFUART_DIVISOR, clk_div_reg);
+
+ if (tty_termios_baud_rate(termios))
+ tty_termios_encode_baud_rate(termios, setted_baud, setted_baud);
+
+ /* set receive timeout */
+ rx_time_out = SIRFSOC_UART_RX_TIMEOUT(baud_rate, 20000);
+ rx_time_out = (rx_time_out > 0xFFFF) ? 0xFFFF : rx_time_out;
+ config_reg |= SIRFUART_RECV_TIMEOUT(rx_time_out);
+ temp_reg_val = rd_regl(port, SIRFUART_TX_FIFO_OP);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_TX_FIFO_OP,
+ temp_reg_val & ~SIRFUART_TX_FIFO_START);
+ wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, SIRFUART_TX_MODE_IO);
+ wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, SIRFUART_RX_MODE_IO);
+ wr_regl(port, SIRFUART_LINE_CTRL, config_reg);
+
+ /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
+ if (baud_rate < 1000000)
+ threshold_div = 1;
+ else
+ threshold_div = 2;
+ temp = port->line == 1 ? 16 : 64;
+ wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp / threshold_div);
+ wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp / threshold_div);
+ temp_reg_val |= SIRFUART_TX_FIFO_START;
+ wr_regl(port, SIRFUART_TX_FIFO_OP, temp_reg_val);
+ uart_update_timeout(port, termios->c_cflag, baud_rate);
+ sirfsoc_uart_start_rx(port);
+ wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_TX_EN | SIRFUART_RX_EN);
+ spin_unlock_irqrestore(&port->lock, flags);
+}
+
+static void startup_uart_controller(struct uart_port *port)
+{
+ unsigned long temp_regv;
+ int temp;
+ temp_regv = rd_regl(port, SIRFUART_TX_DMA_IO_CTRL);
+ wr_regl(port, SIRFUART_TX_DMA_IO_CTRL, temp_regv | SIRFUART_TX_MODE_IO);
+ temp_regv = rd_regl(port, SIRFUART_RX_DMA_IO_CTRL);
+ wr_regl(port, SIRFUART_RX_DMA_IO_CTRL, temp_regv | SIRFUART_RX_MODE_IO);
+ wr_regl(port, SIRFUART_TX_DMA_IO_LEN, 0);
+ wr_regl(port, SIRFUART_RX_DMA_IO_LEN, 0);
+ wr_regl(port, SIRFUART_TX_RX_EN, SIRFUART_RX_EN | SIRFUART_TX_EN);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, SIRFUART_TX_FIFO_RESET);
+ wr_regl(port, SIRFUART_TX_FIFO_OP, 0);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, SIRFUART_RX_FIFO_RESET);
+ wr_regl(port, SIRFUART_RX_FIFO_OP, 0);
+ temp = port->line == 1 ? 16 : 64;
+ wr_regl(port, SIRFUART_TX_FIFO_CTRL, temp);
+ wr_regl(port, SIRFUART_RX_FIFO_CTRL, temp);
+}
+
+static int sirfsoc_uart_startup(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ unsigned int index = port->line;
+ int ret;
+ set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
+ ret = request_irq(port->irq,
+ sirfsoc_uart_isr,
+ 0,
+ SIRFUART_PORT_NAME,
+ sirfport);
+ if (ret != 0) {
+ dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
+ index, port->irq);
+ goto irq_err;
+ }
+ startup_uart_controller(port);
+ enable_irq(port->irq);
+irq_err:
+ return ret;
+}
+
+static void sirfsoc_uart_shutdown(struct uart_port *port)
+{
+ struct sirfsoc_uart_port *sirfport = to_sirfport(port);
+ wr_regl(port, SIRFUART_INT_EN, 0);
+ free_irq(port->irq, sirfport);
+ if (sirfport->ms_enabled) {
+ sirfsoc_uart_disable_ms(port);
+ sirfport->ms_enabled = 0;
+ }
+}
+
+static const char *sirfsoc_uart_type(struct uart_port *port)
+{
+ return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
+}
+
+static int sirfsoc_uart_request_port(struct uart_port *port)
+{
+ void *ret;
+ ret = request_mem_region(port->mapbase,
+ SIRFUART_MAP_SIZE, SIRFUART_PORT_NAME);
+ return ret ? 0 : -EBUSY;
+}
+
+static void sirfsoc_uart_release_port(struct uart_port *port)
+{
+ release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
+}
+
+static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
+{
+ if (flags & UART_CONFIG_TYPE) {
+ port->type = SIRFSOC_PORT_TYPE;
+ sirfsoc_uart_request_port(port);
+ }
+}
+
+static struct uart_ops sirfsoc_uart_ops = {
+ .tx_empty = sirfsoc_uart_tx_empty,
+ .get_mctrl = sirfsoc_uart_get_mctrl,
+ .set_mctrl = sirfsoc_uart_set_mctrl,
+ .stop_tx = sirfsoc_uart_stop_tx,
+ .start_tx = sirfsoc_uart_start_tx,
+ .stop_rx = sirfsoc_uart_stop_rx,
+ .enable_ms = sirfsoc_uart_enable_ms,
+ .break_ctl = sirfsoc_uart_break_ctl,
+ .startup = sirfsoc_uart_startup,
+ .shutdown = sirfsoc_uart_shutdown,
+ .set_termios = sirfsoc_uart_set_termios,
+ .type = sirfsoc_uart_type,
+ .release_port = sirfsoc_uart_release_port,
+ .request_port = sirfsoc_uart_request_port,
+ .config_port = sirfsoc_uart_config_port,
+};
+
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+static int __init sirfsoc_uart_console_setup(struct console *co, char *options)
+{
+ unsigned int baud = 115200;
+ unsigned int bits = 8;
+ unsigned int parity = 'n';
+ unsigned int flow = 'n';
+ struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+
+ if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
+ return -EINVAL;
+
+ if (!port->mapbase)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+ port->cons = co;
+ return uart_set_options(port, co, baud, parity, bits, flow);
+}
+
+static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
+{
+ while (rd_regl(port,
+ SIRFUART_TX_FIFO_STATUS) & SIRFUART_FIFOFULL_MASK(port))
+ cpu_relax();
+ wr_regb(port, SIRFUART_TX_FIFO_DATA, ch);
+}
+
+static void sirfsoc_uart_console_write(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
+ uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
+}
+
+static struct console sirfsoc_uart_console = {
+ .name = SIRFSOC_UART_NAME,
+ .device = uart_console_device,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .write = sirfsoc_uart_console_write,
+ .setup = sirfsoc_uart_console_setup,
+ .data = &sirfsoc_uart_drv,
+};
+
+static int __init sirfsoc_uart_console_init(void)
+{
+ register_console(&sirfsoc_uart_console);
+ return 0;
+}
+console_initcall(sirfsoc_uart_console_init);
+#endif
+
+static struct uart_driver sirfsoc_uart_drv = {
+ .owner = THIS_MODULE,
+ .driver_name = SIRFUART_PORT_NAME,
+ .nr = SIRFSOC_UART_NR,
+ .dev_name = SIRFSOC_UART_NAME,
+ .major = SIRFSOC_UART_MAJOR,
+ .minor = SIRFSOC_UART_MINOR,
+#ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
+ .cons = &sirfsoc_uart_console,
+#else
+ .cons = NULL,
+#endif
+};
+
+int sirfsoc_uart_probe(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport;
+ struct uart_port *port;
+ struct resource *res;
+ int ret;
+
+ if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
+ dev_err(&pdev->dev,
+ "Unable to find cell-index in uart node.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ sirfport = &sirfsoc_uart_ports[pdev->id];
+ port = &sirfport->port;
+ port->dev = &pdev->dev;
+ port->private_data = sirfport;
+
+ if (of_find_property(pdev->dev.of_node, "hw_flow_ctrl", NULL))
+ sirfport->hw_flow_ctrl = 1;
+
+ if (of_property_read_u32(pdev->dev.of_node,
+ "fifosize",
+ &port->fifosize)) {
+ dev_err(&pdev->dev,
+ "Unable to find fifosize in uart node.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Insufficient resources.\n");
+ ret = -EFAULT;
+ goto err;
+ }
+ port->mapbase = res->start;
+ port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!port->membase) {
+ dev_err(&pdev->dev, "Cannot remap resource.\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Insufficient resources.\n");
+ ret = -EFAULT;
+ goto irq_err;
+ }
+ port->irq = res->start;
+
+ if (sirfport->hw_flow_ctrl) {
+ sirfport->pmx = pinmux_get(&pdev->dev, NULL);
+ ret = IS_ERR(sirfport->pmx);
+ if (ret)
+ goto pmx_err;
+
+ pinmux_enable(sirfport->pmx);
+ }
+
+ port->ops = &sirfsoc_uart_ops;
+ spin_lock_init(&port->lock);
+
+ platform_set_drvdata(pdev, sirfport);
+ ret = uart_add_one_port(&sirfsoc_uart_drv, port);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
+ goto port_err;
+ }
+
+ return 0;
+
+port_err:
+ platform_set_drvdata(pdev, NULL);
+ if (sirfport->hw_flow_ctrl) {
+ pinmux_disable(sirfport->pmx);
+ pinmux_put(sirfport->pmx);
+ }
+pmx_err:
+irq_err:
+ devm_iounmap(&pdev->dev, port->membase);
+err:
+ return ret;
+}
+
+static int sirfsoc_uart_remove(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ platform_set_drvdata(pdev, NULL);
+ if (sirfport->hw_flow_ctrl) {
+ pinmux_disable(sirfport->pmx);
+ pinmux_put(sirfport->pmx);
+ }
+ devm_iounmap(&pdev->dev, port->membase);
+ uart_remove_one_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static int
+sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ uart_suspend_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static int sirfsoc_uart_resume(struct platform_device *pdev)
+{
+ struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
+ struct uart_port *port = &sirfport->port;
+ uart_resume_port(&sirfsoc_uart_drv, port);
+ return 0;
+}
+
+static struct of_device_id sirfsoc_uart_ids[] __devinitdata = {
+ { .compatible = "sirf,prima2-uart", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sirfsoc_serial_of_match);
+
+static struct platform_driver sirfsoc_uart_driver = {
+ .probe = sirfsoc_uart_probe,
+ .remove = __devexit_p(sirfsoc_uart_remove),
+ .suspend = sirfsoc_uart_suspend,
+ .resume = sirfsoc_uart_resume,
+ .driver = {
+ .name = SIRFUART_PORT_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = sirfsoc_uart_ids,
+ },
+};
+
+static int __init sirfsoc_uart_init(void)
+{
+ int ret = 0;
+
+ ret = uart_register_driver(&sirfsoc_uart_drv);
+ if (ret)
+ goto out;
+
+ ret = platform_driver_register(&sirfsoc_uart_driver);
+ if (ret)
+ uart_unregister_driver(&sirfsoc_uart_drv);
+out:
+ return ret;
+}
+module_init(sirfsoc_uart_init);
+
+static void __exit sirfsoc_uart_exit(void)
+{
+ platform_driver_unregister(&sirfsoc_uart_driver);
+ uart_unregister_driver(&sirfsoc_uart_drv);
+}
+module_exit(sirfsoc_uart_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
+MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");
diff --git a/drivers/tty/serial/sirfsoc_uart.h b/drivers/tty/serial/sirfsoc_uart.h
new file mode 100644
index 0000000..fc64260
--- /dev/null
+++ b/drivers/tty/serial/sirfsoc_uart.h
@@ -0,0 +1,185 @@
+/*
+ * Drivers for CSR SiRFprimaII onboard UARTs.
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+#include <linux/bitops.h>
+
+/* UART Register Offset Define */
+#define SIRFUART_LINE_CTRL 0x0040
+#define SIRFUART_TX_RX_EN 0x004c
+#define SIRFUART_DIVISOR 0x0050
+#define SIRFUART_INT_EN 0x0054
+#define SIRFUART_INT_STATUS 0x0058
+#define SIRFUART_TX_DMA_IO_CTRL 0x0100
+#define SIRFUART_TX_DMA_IO_LEN 0x0104
+#define SIRFUART_TX_FIFO_CTRL 0x0108
+#define SIRFUART_TX_FIFO_LEVEL_CHK 0x010C
+#define SIRFUART_TX_FIFO_OP 0x0110
+#define SIRFUART_TX_FIFO_STATUS 0x0114
+#define SIRFUART_TX_FIFO_DATA 0x0118
+#define SIRFUART_RX_DMA_IO_CTRL 0x0120
+#define SIRFUART_RX_DMA_IO_LEN 0x0124
+#define SIRFUART_RX_FIFO_CTRL 0x0128
+#define SIRFUART_RX_FIFO_LEVEL_CHK 0x012C
+#define SIRFUART_RX_FIFO_OP 0x0130
+#define SIRFUART_RX_FIFO_STATUS 0x0134
+#define SIRFUART_RX_FIFO_DATA 0x0138
+#define SIRFUART_AFC_CTRL 0x0140
+#define SIRFUART_SWH_DMA_IO 0x0148
+
+/* UART Line Control Register */
+#define SIRFUART_DATA_BIT_LEN_MASK 0x3
+#define SIRFUART_DATA_BIT_LEN_5 BIT(0)
+#define SIRFUART_DATA_BIT_LEN_6 1
+#define SIRFUART_DATA_BIT_LEN_7 2
+#define SIRFUART_DATA_BIT_LEN_8 3
+#define SIRFUART_STOP_BIT_LEN_1 0
+#define SIRFUART_STOP_BIT_LEN_2 BIT(2)
+#define SIRFUART_PARITY_EN BIT(3)
+#define SIRFUART_EVEN_BIT BIT(4)
+#define SIRFUART_STICK_BIT_MASK (7 << 3)
+#define SIRFUART_STICK_BIT_NONE (0 << 3)
+#define SIRFUART_STICK_BIT_EVEN BIT(3)
+#define SIRFUART_STICK_BIT_ODD (3 << 3)
+#define SIRFUART_STICK_BIT_MARK (5 << 3)
+#define SIRFUART_STICK_BIT_SPACE (7 << 3)
+#define SIRFUART_SET_BREAK BIT(6)
+#define SIRFUART_LOOP_BACK BIT(7)
+#define SIRFUART_PARITY_MASK (7 << 3)
+#define SIRFUART_DUMMY_READ BIT(16)
+
+#define SIRFSOC_UART_RX_TIMEOUT(br, to) (((br) * (((to) + 999) / 1000)) / 1000)
+#define SIRFUART_RECV_TIMEOUT_MASK (0xFFFF << 16)
+#define SIRFUART_RECV_TIMEOUT(x) (((x) & 0xFFFF) << 16)
+
+/* UART Auto Flow Control */
+#define SIRFUART_AFC_RX_THD_MASK 0x000000FF
+#define SIRFUART_AFC_RX_EN BIT(8)
+#define SIRFUART_AFC_TX_EN BIT(9)
+#define SIRFUART_CTS_CTRL BIT(10)
+#define SIRFUART_RTS_CTRL BIT(11)
+#define SIRFUART_CTS_IN_STATUS BIT(12)
+#define SIRFUART_RTS_OUT_STATUS BIT(13)
+
+/* UART Interrupt Enable Register */
+#define SIRFUART_RX_DONE_INT BIT(0)
+#define SIRFUART_TX_DONE_INT BIT(1)
+#define SIRFUART_RX_OFLOW_INT BIT(2)
+#define SIRFUART_TX_ALLOUT_INT BIT(3)
+#define SIRFUART_RX_IO_DMA_INT BIT(4)
+#define SIRFUART_TX_IO_DMA_INT BIT(5)
+#define SIRFUART_RXFIFO_FULL_INT BIT(6)
+#define SIRFUART_TXFIFO_EMPTY_INT BIT(7)
+#define SIRFUART_RXFIFO_THD_INT BIT(8)
+#define SIRFUART_TXFIFO_THD_INT BIT(9)
+#define SIRFUART_FRM_ERR_INT BIT(10)
+#define SIRFUART_RXD_BREAK_INT BIT(11)
+#define SIRFUART_RX_TIMEOUT_INT BIT(12)
+#define SIRFUART_PARITY_ERR_INT BIT(13)
+#define SIRFUART_CTS_INT_EN BIT(14)
+#define SIRFUART_RTS_INT_EN BIT(15)
+
+/* UART Interrupt Status Register */
+#define SIRFUART_RX_DONE BIT(0)
+#define SIRFUART_TX_DONE BIT(1)
+#define SIRFUART_RX_OFLOW BIT(2)
+#define SIRFUART_TX_ALL_EMPTY BIT(3)
+#define SIRFUART_DMA_IO_RX_DONE BIT(4)
+#define SIRFUART_DMA_IO_TX_DONE BIT(5)
+#define SIRFUART_RXFIFO_FULL BIT(6)
+#define SIRFUART_TXFIFO_EMPTY BIT(7)
+#define SIRFUART_RXFIFO_THD_REACH BIT(8)
+#define SIRFUART_TXFIFO_THD_REACH BIT(9)
+#define SIRFUART_FRM_ERR BIT(10)
+#define SIRFUART_RXD_BREAK BIT(11)
+#define SIRFUART_RX_TIMEOUT BIT(12)
+#define SIRFUART_PARITY_ERR BIT(13)
+#define SIRFUART_CTS_CHANGE BIT(14)
+#define SIRFUART_RTS_CHANGE BIT(15)
+#define SIRFUART_PLUG_IN BIT(16)
+
+#define SIRFUART_ERR_INT_STAT \
+ (SIRFUART_RX_OFLOW | \
+ SIRFUART_FRM_ERR | \
+ SIRFUART_RXD_BREAK | \
+ SIRFUART_PARITY_ERR)
+#define SIRFUART_ERR_INT_EN \
+ (SIRFUART_RX_OFLOW_INT | \
+ SIRFUART_FRM_ERR_INT | \
+ SIRFUART_RXD_BREAK_INT | \
+ SIRFUART_PARITY_ERR_INT)
+#define SIRFUART_TX_INT_EN SIRFUART_TXFIFO_EMPTY_INT
+#define SIRFUART_RX_IO_INT_EN \
+ (SIRFUART_RX_TIMEOUT_INT | \
+ SIRFUART_RXFIFO_THD_INT | \
+ SIRFUART_RXFIFO_FULL_INT | \
+ SIRFUART_ERR_INT_EN)
+
+/* UART FIFO Register */
+#define SIRFUART_TX_FIFO_STOP 0x0
+#define SIRFUART_TX_FIFO_RESET 0x1
+#define SIRFUART_TX_FIFO_START 0x2
+#define SIRFUART_RX_FIFO_STOP 0x0
+#define SIRFUART_RX_FIFO_RESET 0x1
+#define SIRFUART_RX_FIFO_START 0x2
+#define SIRFUART_TX_MODE_DMA 0
+#define SIRFUART_TX_MODE_IO 1
+#define SIRFUART_RX_MODE_DMA 0
+#define SIRFUART_RX_MODE_IO 1
+
+#define SIRFUART_RX_EN 0x1
+#define SIRFUART_TX_EN 0x2
+
+/* Generic Definitions */
+#define SIRFSOC_UART_NAME "ttySiRF"
+#define SIRFSOC_UART_MAJOR 0
+#define SIRFSOC_UART_MINOR 0
+#define SIRFUART_PORT_NAME "sirfsoc-uart"
+#define SIRFUART_MAP_SIZE 0x200
+#define SIRFSOC_UART_NR 3
+#define SIRFSOC_PORT_TYPE 0xa5
+
+/* Baud Rate Calculation */
+#define SIRF_MIN_SAMPLE_DIV 0xf
+#define SIRF_MAX_SAMPLE_DIV 0x3f
+#define SIRF_IOCLK_DIV_MAX 0xffff
+#define SIRF_SAMPLE_DIV_SHIFT 16
+#define SIRF_IOCLK_DIV_MASK 0xffff
+#define SIRF_SAMPLE_DIV_MASK 0x3f0000
+#define SIRF_BAUD_RATE_SUPPORT_NR 18
+
+/* For Fast Baud Rate Calculation */
+struct sirfsoc_baudrate_to_regv {
+ unsigned int baud_rate;
+ unsigned int reg_val;
+};
+
+struct sirfsoc_uart_port {
+ unsigned char hw_flow_ctrl;
+ unsigned char ms_enabled;
+
+ struct uart_port port;
+ struct pinmux *pmx;
+};
+
+/* Hardware Flow Control */
+#define SIRFUART_AFC_CTRL_RX_THD 0x70
+
+/* Register Access Control */
+#define portaddr(port, reg) ((port)->membase + (reg))
+#define rd_regb(port, reg) (__raw_readb(portaddr(port, reg)))
+#define rd_regl(port, reg) (__raw_readl(portaddr(port, reg)))
+#define wr_regb(port, reg, val) __raw_writeb(val, portaddr(port, reg))
+#define wr_regl(port, reg, val) __raw_writel(val, portaddr(port, reg))
+
+/* UART Port Mask */
+#define SIRFUART_FIFOLEVEL_MASK(port) ((port->line == 1) ? (0x1f) : (0x7f))
+#define SIRFUART_FIFOFULL_MASK(port) ((port->line == 1) ? (0x20) : (0x80))
+#define SIRFUART_FIFOEMPTY_MASK(port) ((port->line == 1) ? (0x40) : (0x100))
+
+/* I/O Mode */
+#define SIRFSOC_UART_IO_RX_MAX_CNT 256
+#define SIRFSOC_UART_IO_TX_REASONABLE_CNT 6
diff --git a/drivers/tty/serial/timbuart.c b/drivers/tty/serial/timbuart.c
index e76c8b7..70f9749 100644
--- a/drivers/tty/serial/timbuart.c
+++ b/drivers/tty/serial/timbuart.c
@@ -513,20 +513,7 @@ static struct platform_driver timbuart_platform_driver = {
.remove = __devexit_p(timbuart_remove),
};
-/*--------------------------------------------------------------------------*/
-
-static int __init timbuart_init(void)
-{
- return platform_driver_register(&timbuart_platform_driver);
-}
-
-static void __exit timbuart_exit(void)
-{
- platform_driver_unregister(&timbuart_platform_driver);
-}
-
-module_init(timbuart_init);
-module_exit(timbuart_exit);
+module_platform_driver(timbuart_platform_driver);
MODULE_DESCRIPTION("Timberdale UART driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/tty/serial/ucc_uart.c b/drivers/tty/serial/ucc_uart.c
index cea8918..2ebe606 100644
--- a/drivers/tty/serial/ucc_uart.c
+++ b/drivers/tty/serial/ucc_uart.c
@@ -963,6 +963,9 @@ static void qe_uart_set_termios(struct uart_port *port,
/* Do we really need a spinlock here? */
spin_lock_irqsave(&port->lock, flags);
+ /* Update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, baud);
+
out_be16(&uccp->upsmr, upsmr);
if (soft_uart) {
out_be16(&uccup->supsmr, supsmr);
diff --git a/drivers/tty/serial/vr41xx_siu.c b/drivers/tty/serial/vr41xx_siu.c
index 3beb6ab..83148e7 100644
--- a/drivers/tty/serial/vr41xx_siu.c
+++ b/drivers/tty/serial/vr41xx_siu.c
@@ -961,18 +961,7 @@ static struct platform_driver siu_device_driver = {
},
};
-static int __init vr41xx_siu_init(void)
-{
- return platform_driver_register(&siu_device_driver);
-}
-
-static void __exit vr41xx_siu_exit(void)
-{
- platform_driver_unregister(&siu_device_driver);
-}
-
-module_init(vr41xx_siu_init);
-module_exit(vr41xx_siu_exit);
+module_platform_driver(siu_device_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:SIU");
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index e67fb20..ff8017f 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -850,7 +850,7 @@ static int mgsl_device_count;
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
-static int break_on_load;
+static bool break_on_load;
/*
* Driver major number, defaults to zero to get auto
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 0f6b796..a7efe53 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -456,7 +456,7 @@ static int synclinkmp_device_count = 0;
* .text section address and breakpoint on module load.
* This is useful for use with gdb and add-symbol-file command.
*/
-static int break_on_load = 0;
+static bool break_on_load = 0;
/*
* Driver major number, defaults to zero to get auto
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 43db715..7867b7c 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -32,7 +32,6 @@
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/writeback.h>
-#include <linux/buffer_head.h> /* for fsync_bdev() */
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/vt_kern.h>
@@ -41,6 +40,7 @@
#include <linux/oom.h>
#include <linux/slab.h>
#include <linux/input.h>
+#include <linux/uaccess.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 05085be..e41b9bb 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -790,19 +790,24 @@ static void session_clear_tty(struct pid *session)
void disassociate_ctty(int on_exit)
{
struct tty_struct *tty;
- struct pid *tty_pgrp = NULL;
if (!current->signal->leader)
return;
tty = get_current_tty();
if (tty) {
- tty_pgrp = get_pid(tty->pgrp);
+ struct pid *tty_pgrp = get_pid(tty->pgrp);
if (on_exit) {
if (tty->driver->type != TTY_DRIVER_TYPE_PTY)
tty_vhangup(tty);
}
tty_kref_put(tty);
+ if (tty_pgrp) {
+ kill_pgrp(tty_pgrp, SIGHUP, on_exit);
+ if (!on_exit)
+ kill_pgrp(tty_pgrp, SIGCONT, on_exit);
+ put_pid(tty_pgrp);
+ }
} else if (on_exit) {
struct pid *old_pgrp;
spin_lock_irq(&current->sighand->siglock);
@@ -816,12 +821,6 @@ void disassociate_ctty(int on_exit)
}
return;
}
- if (tty_pgrp) {
- kill_pgrp(tty_pgrp, SIGHUP, on_exit);
- if (!on_exit)
- kill_pgrp(tty_pgrp, SIGCONT, on_exit);
- put_pid(tty_pgrp);
- }
spin_lock_irq(&current->sighand->siglock);
put_pid(current->signal->tty_old_pgrp);
@@ -1558,6 +1557,59 @@ static void release_tty(struct tty_struct *tty, int idx)
}
/**
+ * tty_release_checks - check a tty before real release
+ * @tty: tty to check
+ * @o_tty: link of @tty (if any)
+ * @idx: index of the tty
+ *
+ * Performs some paranoid checking before true release of the @tty.
+ * This is a no-op unless TTY_PARANOIA_CHECK is defined.
+ */
+static int tty_release_checks(struct tty_struct *tty, struct tty_struct *o_tty,
+ int idx)
+{
+#ifdef TTY_PARANOIA_CHECK
+ if (idx < 0 || idx >= tty->driver->num) {
+ printk(KERN_DEBUG "%s: bad idx when trying to free (%s)\n",
+ __func__, tty->name);
+ return -1;
+ }
+
+ /* not much to check for devpts */
+ if (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)
+ return 0;
+
+ if (tty != tty->driver->ttys[idx]) {
+ printk(KERN_DEBUG "%s: driver.table[%d] not tty for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (tty->termios != tty->driver->termios[idx]) {
+ printk(KERN_DEBUG "%s: driver.termios[%d] not termios for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (tty->driver->other) {
+ if (o_tty != tty->driver->other->ttys[idx]) {
+ printk(KERN_DEBUG "%s: other->table[%d] not o_tty for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (o_tty->termios != tty->driver->other->termios[idx]) {
+ printk(KERN_DEBUG "%s: other->termios[%d] not o_termios for (%s)\n",
+ __func__, idx, tty->name);
+ return -1;
+ }
+ if (o_tty->link != tty) {
+ printk(KERN_DEBUG "%s: bad pty pointers\n", __func__);
+ return -1;
+ }
+ }
+#endif
+ return 0;
+}
+
+/**
* tty_release - vfs callback for close
* @inode: inode of tty
* @filp: file pointer for handle to tty
@@ -1585,11 +1637,11 @@ int tty_release(struct inode *inode, struct file *filp)
int idx;
char buf[64];
- if (tty_paranoia_check(tty, inode, "tty_release_dev"))
+ if (tty_paranoia_check(tty, inode, __func__))
return 0;
tty_lock();
- check_tty_count(tty, "tty_release_dev");
+ check_tty_count(tty, __func__);
__tty_fasync(-1, filp, 0);
@@ -1599,59 +1651,16 @@ int tty_release(struct inode *inode, struct file *filp)
devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
o_tty = tty->link;
-#ifdef TTY_PARANOIA_CHECK
- if (idx < 0 || idx >= tty->driver->num) {
- printk(KERN_DEBUG "tty_release_dev: bad idx when trying to "
- "free (%s)\n", tty->name);
+ if (tty_release_checks(tty, o_tty, idx)) {
tty_unlock();
return 0;
}
- if (!devpts) {
- if (tty != tty->driver->ttys[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: driver.table[%d] not tty "
- "for (%s)\n", idx, tty->name);
- return 0;
- }
- if (tty->termios != tty->driver->termios[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: driver.termios[%d] not termios "
- "for (%s)\n",
- idx, tty->name);
- return 0;
- }
- }
-#endif
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "tty_release_dev of %s (tty count=%d)...",
- tty_name(tty, buf), tty->count);
+ printk(KERN_DEBUG "%s: %s (tty count=%d)...\n", __func__,
+ tty_name(tty, buf), tty->count);
#endif
-#ifdef TTY_PARANOIA_CHECK
- if (tty->driver->other &&
- !(tty->driver->flags & TTY_DRIVER_DEVPTS_MEM)) {
- if (o_tty != tty->driver->other->ttys[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: other->table[%d] "
- "not o_tty for (%s)\n",
- idx, tty->name);
- return 0 ;
- }
- if (o_tty->termios != tty->driver->other->termios[idx]) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: other->termios[%d] "
- "not o_termios for (%s)\n",
- idx, tty->name);
- return 0;
- }
- if (o_tty->link != tty) {
- tty_unlock();
- printk(KERN_DEBUG "tty_release_dev: bad pty pointers\n");
- return 0;
- }
- }
-#endif
if (tty->ops->close)
tty->ops->close(tty, filp);
@@ -1707,8 +1716,8 @@ int tty_release(struct inode *inode, struct file *filp)
if (!do_sleep)
break;
- printk(KERN_WARNING "tty_release_dev: %s: read/write wait queue "
- "active!\n", tty_name(tty, buf));
+ printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
+ __func__, tty_name(tty, buf));
tty_unlock();
mutex_unlock(&tty_mutex);
schedule();
@@ -1721,15 +1730,14 @@ int tty_release(struct inode *inode, struct file *filp)
*/
if (pty_master) {
if (--o_tty->count < 0) {
- printk(KERN_WARNING "tty_release_dev: bad pty slave count "
- "(%d) for %s\n",
- o_tty->count, tty_name(o_tty, buf));
+ printk(KERN_WARNING "%s: bad pty slave count (%d) for %s\n",
+ __func__, o_tty->count, tty_name(o_tty, buf));
o_tty->count = 0;
}
}
if (--tty->count < 0) {
- printk(KERN_WARNING "tty_release_dev: bad tty->count (%d) for %s\n",
- tty->count, tty_name(tty, buf));
+ printk(KERN_WARNING "%s: bad tty->count (%d) for %s\n",
+ __func__, tty->count, tty_name(tty, buf));
tty->count = 0;
}
@@ -1778,7 +1786,7 @@ int tty_release(struct inode *inode, struct file *filp)
}
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "freeing tty structure...");
+ printk(KERN_DEBUG "%s: freeing tty structure...\n", __func__);
#endif
/*
* Ask the line discipline code to release its structures
@@ -1798,6 +1806,83 @@ int tty_release(struct inode *inode, struct file *filp)
}
/**
+ * tty_open_current_tty - get tty of current task for open
+ * @device: device number
+ * @filp: file pointer to tty
+ * @return: tty of the current task iff @device is /dev/tty
+ *
+ * We cannot return driver and index like for the other nodes because
+ * devpts will not work then. It expects inodes to be from devpts FS.
+ */
+static struct tty_struct *tty_open_current_tty(dev_t device, struct file *filp)
+{
+ struct tty_struct *tty;
+
+ if (device != MKDEV(TTYAUX_MAJOR, 0))
+ return NULL;
+
+ tty = get_current_tty();
+ if (!tty)
+ return ERR_PTR(-ENXIO);
+
+ filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
+ /* noctty = 1; */
+ tty_kref_put(tty);
+ /* FIXME: we put a reference and return a TTY! */
+ return tty;
+}
+
+/**
+ * tty_lookup_driver - lookup a tty driver for a given device file
+ * @device: device number
+ * @filp: file pointer to tty
+ * @noctty: set if the device should not become a controlling tty
+ * @index: index for the device in the @return driver
+ * @return: driver for this inode (with increased refcount)
+ *
+ * If @return is not erroneous, the caller is responsible to decrement the
+ * refcount by tty_driver_kref_put.
+ *
+ * Locking: tty_mutex protects get_tty_driver
+ */
+static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
+ int *noctty, int *index)
+{
+ struct tty_driver *driver;
+
+ switch (device) {
+#ifdef CONFIG_VT
+ case MKDEV(TTY_MAJOR, 0): {
+ extern struct tty_driver *console_driver;
+ driver = tty_driver_kref_get(console_driver);
+ *index = fg_console;
+ *noctty = 1;
+ break;
+ }
+#endif
+ case MKDEV(TTYAUX_MAJOR, 1): {
+ struct tty_driver *console_driver = console_device(index);
+ if (console_driver) {
+ driver = tty_driver_kref_get(console_driver);
+ if (driver) {
+ /* Don't let /dev/console block */
+ filp->f_flags |= O_NONBLOCK;
+ *noctty = 1;
+ break;
+ }
+ }
+ return ERR_PTR(-ENODEV);
+ }
+ default:
+ driver = get_tty_driver(device, index);
+ if (!driver)
+ return ERR_PTR(-ENODEV);
+ break;
+ }
+ return driver;
+}
+
+/**
* tty_open - open a tty device
* @inode: inode of device file
* @filp: file pointer to tty
@@ -1813,16 +1898,16 @@ int tty_release(struct inode *inode, struct file *filp)
* The termios state of a pty is reset on first open so that
* settings don't persist across reuse.
*
- * Locking: tty_mutex protects tty, get_tty_driver and tty_init_dev work.
+ * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
* tty->count should protect the rest.
* ->siglock protects ->signal/->sighand
*/
static int tty_open(struct inode *inode, struct file *filp)
{
- struct tty_struct *tty = NULL;
+ struct tty_struct *tty;
int noctty, retval;
- struct tty_driver *driver;
+ struct tty_driver *driver = NULL;
int index;
dev_t device = inode->i_rdev;
unsigned saved_flags = filp->f_flags;
@@ -1841,66 +1926,22 @@ retry_open:
mutex_lock(&tty_mutex);
tty_lock();
- if (device == MKDEV(TTYAUX_MAJOR, 0)) {
- tty = get_current_tty();
- if (!tty) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENXIO;
- }
- driver = tty_driver_kref_get(tty->driver);
- index = tty->index;
- filp->f_flags |= O_NONBLOCK; /* Don't let /dev/tty block */
- /* noctty = 1; */
- /* FIXME: Should we take a driver reference ? */
- tty_kref_put(tty);
- goto got_driver;
- }
-#ifdef CONFIG_VT
- if (device == MKDEV(TTY_MAJOR, 0)) {
- extern struct tty_driver *console_driver;
- driver = tty_driver_kref_get(console_driver);
- index = fg_console;
- noctty = 1;
- goto got_driver;
- }
-#endif
- if (device == MKDEV(TTYAUX_MAJOR, 1)) {
- struct tty_driver *console_driver = console_device(&index);
- if (console_driver) {
- driver = tty_driver_kref_get(console_driver);
- if (driver) {
- /* Don't let /dev/console block */
- filp->f_flags |= O_NONBLOCK;
- noctty = 1;
- goto got_driver;
- }
+ tty = tty_open_current_tty(device, filp);
+ if (IS_ERR(tty)) {
+ retval = PTR_ERR(tty);
+ goto err_unlock;
+ } else if (!tty) {
+ driver = tty_lookup_driver(device, filp, &noctty, &index);
+ if (IS_ERR(driver)) {
+ retval = PTR_ERR(driver);
+ goto err_unlock;
}
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENODEV;
- }
- driver = get_tty_driver(device, &index);
- if (!driver) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_free_file(filp);
- return -ENODEV;
- }
-got_driver:
- if (!tty) {
/* check whether we're reopening an existing tty */
tty = tty_driver_lookup_tty(driver, inode, index);
-
if (IS_ERR(tty)) {
- tty_unlock();
- mutex_unlock(&tty_mutex);
- tty_driver_kref_put(driver);
- tty_free_file(filp);
- return PTR_ERR(tty);
+ retval = PTR_ERR(tty);
+ goto err_unlock;
}
}
@@ -1912,21 +1953,22 @@ got_driver:
tty = tty_init_dev(driver, index, 0);
mutex_unlock(&tty_mutex);
- tty_driver_kref_put(driver);
+ if (driver)
+ tty_driver_kref_put(driver);
if (IS_ERR(tty)) {
tty_unlock();
- tty_free_file(filp);
- return PTR_ERR(tty);
+ retval = PTR_ERR(tty);
+ goto err_file;
}
tty_add_file(tty, filp);
- check_tty_count(tty, "tty_open");
+ check_tty_count(tty, __func__);
if (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
tty->driver->subtype == PTY_TYPE_MASTER)
noctty = 1;
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "opening %s...", tty->name);
+ printk(KERN_DEBUG "%s: opening %s...\n", __func__, tty->name);
#endif
if (tty->ops->open)
retval = tty->ops->open(tty, filp);
@@ -1940,8 +1982,8 @@ got_driver:
if (retval) {
#ifdef TTY_DEBUG_HANGUP
- printk(KERN_DEBUG "error %d in opening %s...", retval,
- tty->name);
+ printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
+ retval, tty->name);
#endif
tty_unlock(); /* need to call tty_release without BTM */
tty_release(inode, filp);
@@ -1976,6 +2018,15 @@ got_driver:
tty_unlock();
mutex_unlock(&tty_mutex);
return 0;
+err_unlock:
+ tty_unlock();
+ mutex_unlock(&tty_mutex);
+ /* after locks to avoid deadlock */
+ if (!IS_ERR_OR_NULL(driver))
+ tty_driver_kref_put(driver);
+err_file:
+ tty_free_file(filp);
+ return retval;
}
@@ -3267,7 +3318,7 @@ void __init console_init(void)
}
}
-static char *tty_devnode(struct device *dev, mode_t *mode)
+static char *tty_devnode(struct device *dev, umode_t *mode)
{
if (!mode)
return NULL;
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 8e0924f..24b95db 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -1,19 +1,11 @@
#include <linux/types.h>
-#include <linux/major.h>
#include <linux/errno.h>
-#include <linux/signal.h>
-#include <linux/fcntl.h>
+#include <linux/kmod.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
-#include <linux/tty_flip.h>
-#include <linux/devpts_fs.h>
#include <linux/file.h>
-#include <linux/console.h>
-#include <linux/timer.h>
-#include <linux/ctype.h>
-#include <linux/kd.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/slab.h>
@@ -24,18 +16,8 @@
#include <linux/device.h>
#include <linux/wait.h>
#include <linux/bitops.h>
-#include <linux/delay.h>
#include <linux/seq_file.h>
-
#include <linux/uaccess.h>
-#include <asm/system.h>
-
-#include <linux/kbd_kern.h>
-#include <linux/vt_kern.h>
-#include <linux/selection.h>
-
-#include <linux/kmod.h>
-#include <linux/nsproxy.h>
#include <linux/ratelimit.h>
/*
@@ -558,8 +540,6 @@ static int tty_ldisc_wait_idle(struct tty_struct *tty, long timeout)
long ret;
ret = wait_event_timeout(tty_ldisc_idle,
atomic_read(&tty->ldisc->users) == 1, timeout);
- if (ret < 0)
- return ret;
return ret > 0 ? 0 : -EBUSY;
}
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index ef9dd62..bf6e238 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -227,7 +227,6 @@ int tty_port_block_til_ready(struct tty_port *port,
int do_clocal = 0, retval;
unsigned long flags;
DEFINE_WAIT(wait);
- int cd;
/* block if port is in the process of being closed */
if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
@@ -284,11 +283,14 @@ int tty_port_block_til_ready(struct tty_port *port,
retval = -ERESTARTSYS;
break;
}
- /* Probe the carrier. For devices with no carrier detect this
- will always return true */
- cd = tty_port_carrier_raised(port);
+ /*
+ * Probe the carrier. For devices with no carrier detect
+ * tty_port_carrier_raised will always return true.
+ * Never ask drivers if CLOCAL is set, this causes troubles
+ * on some hardware.
+ */
if (!(port->flags & ASYNC_CLOSING) &&
- (do_clocal || cd))
+ (do_clocal || tty_port_carrier_raised(port)))
break;
if (signal_pending(current)) {
retval = -ERESTARTSYS;
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 45d3e80..a0f3d6c 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -584,7 +584,7 @@ int con_set_default_unimap(struct vc_data *vc)
return 0;
dflt->refcount++;
*vc->vc_uni_pagedir_loc = (unsigned long)dflt;
- if (p && --p->refcount) {
+ if (p && !--p->refcount) {
con_release_unimap(p);
kfree(p);
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 5e096f4..65447c5 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -1463,7 +1463,6 @@ compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop,
if (!perm && op->op != KD_FONT_OP_GET)
return -EPERM;
op->data = compat_ptr(((struct compat_console_font_op *)op)->data);
- op->flags |= KD_FONT_FLAG_OLD;
i = con_font_op(vc, op);
if (i)
return i;
diff --git a/drivers/uio/uio_pci_generic.c b/drivers/uio/uio_pci_generic.c
index 02bd47b..0bd08ef 100644
--- a/drivers/uio/uio_pci_generic.c
+++ b/drivers/uio/uio_pci_generic.c
@@ -45,77 +45,12 @@ to_uio_pci_generic_dev(struct uio_info *info)
static irqreturn_t irqhandler(int irq, struct uio_info *info)
{
struct uio_pci_generic_dev *gdev = to_uio_pci_generic_dev(info);
- struct pci_dev *pdev = gdev->pdev;
- irqreturn_t ret = IRQ_NONE;
- u32 cmd_status_dword;
- u16 origcmd, newcmd, status;
-
- /* We do a single dword read to retrieve both command and status.
- * Document assumptions that make this possible. */
- BUILD_BUG_ON(PCI_COMMAND % 4);
- BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
-
- pci_block_user_cfg_access(pdev);
-
- /* Read both command and status registers in a single 32-bit operation.
- * Note: we could cache the value for command and move the status read
- * out of the lock if there was a way to get notified of user changes
- * to command register through sysfs. Should be good for shared irqs. */
- pci_read_config_dword(pdev, PCI_COMMAND, &cmd_status_dword);
- origcmd = cmd_status_dword;
- status = cmd_status_dword >> 16;
-
- /* Check interrupt status register to see whether our device
- * triggered the interrupt. */
- if (!(status & PCI_STATUS_INTERRUPT))
- goto done;
-
- /* We triggered the interrupt, disable it. */
- newcmd = origcmd | PCI_COMMAND_INTX_DISABLE;
- if (newcmd != origcmd)
- pci_write_config_word(pdev, PCI_COMMAND, newcmd);
- /* UIO core will signal the user process. */
- ret = IRQ_HANDLED;
-done:
-
- pci_unblock_user_cfg_access(pdev);
- return ret;
-}
+ if (!pci_check_and_mask_intx(gdev->pdev))
+ return IRQ_NONE;
-/* Verify that the device supports Interrupt Disable bit in command register,
- * per PCI 2.3, by flipping this bit and reading it back: this bit was readonly
- * in PCI 2.2. */
-static int __devinit verify_pci_2_3(struct pci_dev *pdev)
-{
- u16 orig, new;
- int err = 0;
-
- pci_block_user_cfg_access(pdev);
- pci_read_config_word(pdev, PCI_COMMAND, &orig);
- pci_write_config_word(pdev, PCI_COMMAND,
- orig ^ PCI_COMMAND_INTX_DISABLE);
- pci_read_config_word(pdev, PCI_COMMAND, &new);
- /* There's no way to protect against
- * hardware bugs or detect them reliably, but as long as we know
- * what the value should be, let's go ahead and check it. */
- if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
- err = -EBUSY;
- dev_err(&pdev->dev, "Command changed from 0x%x to 0x%x: "
- "driver or HW bug?\n", orig, new);
- goto err;
- }
- if (!((new ^ orig) & PCI_COMMAND_INTX_DISABLE)) {
- dev_warn(&pdev->dev, "Device does not support "
- "disabling interrupts: unable to bind.\n");
- err = -ENODEV;
- goto err;
- }
- /* Now restore the original value. */
- pci_write_config_word(pdev, PCI_COMMAND, orig);
-err:
- pci_unblock_user_cfg_access(pdev);
- return err;
+ /* UIO core will signal the user process. */
+ return IRQ_HANDLED;
}
static int __devinit probe(struct pci_dev *pdev,
@@ -138,9 +73,10 @@ static int __devinit probe(struct pci_dev *pdev,
return -ENODEV;
}
- err = verify_pci_2_3(pdev);
- if (err)
+ if (!pci_intx_mask_supported(pdev)) {
+ err = -ENODEV;
goto err_verify;
+ }
gdev = kzalloc(sizeof(struct uio_pci_generic_dev), GFP_KERNEL);
if (!gdev) {
diff --git a/drivers/uio/uio_pdrv.c b/drivers/uio/uio_pdrv.c
index ff50595..72d3646 100644
--- a/drivers/uio/uio_pdrv.c
+++ b/drivers/uio/uio_pdrv.c
@@ -104,17 +104,7 @@ static struct platform_driver uio_pdrv = {
},
};
-static int __init uio_pdrv_init(void)
-{
- return platform_driver_register(&uio_pdrv);
-}
-
-static void __exit uio_pdrv_exit(void)
-{
- platform_driver_unregister(&uio_pdrv);
-}
-module_init(uio_pdrv_init);
-module_exit(uio_pdrv_exit);
+module_platform_driver(uio_pdrv);
MODULE_AUTHOR("Uwe Kleine-Koenig");
MODULE_DESCRIPTION("Userspace I/O platform driver");
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 25de302..b98371d 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -273,18 +273,7 @@ static struct platform_driver uio_pdrv_genirq = {
},
};
-static int __init uio_pdrv_genirq_init(void)
-{
- return platform_driver_register(&uio_pdrv_genirq);
-}
-
-static void __exit uio_pdrv_genirq_exit(void)
-{
- platform_driver_unregister(&uio_pdrv_genirq);
-}
-
-module_init(uio_pdrv_genirq_init);
-module_exit(uio_pdrv_genirq_exit);
+module_platform_driver(uio_pdrv_genirq);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling");
diff --git a/drivers/uio/uio_pruss.c b/drivers/uio/uio_pruss.c
index e67b566..33a7a27 100644
--- a/drivers/uio/uio_pruss.c
+++ b/drivers/uio/uio_pruss.c
@@ -227,19 +227,7 @@ static struct platform_driver pruss_driver = {
},
};
-static int __init pruss_init_module(void)
-{
- return platform_driver_register(&pruss_driver);
-}
-
-module_init(pruss_init_module);
-
-static void __exit pruss_exit_module(void)
-{
- platform_driver_unregister(&pruss_driver);
-}
-
-module_exit(pruss_exit_module);
+module_platform_driver(pruss_driver);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 791f11b..75823a1 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -48,6 +48,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_DAVINCI_DA8XX
default y if ARCH_CNS3XXX
default y if PLAT_SPEAR
+ default y if ARCH_EXYNOS
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index 75eca76..53a7bc0 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -6,6 +6,8 @@
obj-$(CONFIG_USB) += core/
+obj-$(CONFIG_USB_OTG_UTILS) += otg/
+
obj-$(CONFIG_USB_DWC3) += dwc3/
obj-$(CONFIG_USB_MON) += mon/
@@ -51,7 +53,6 @@ obj-$(CONFIG_USB_SPEEDTOUCH) += atm/
obj-$(CONFIG_USB_MUSB_HDRC) += musb/
obj-$(CONFIG_USB_RENESAS_USBHS) += renesas_usbhs/
-obj-$(CONFIG_USB_OTG_UTILS) += otg/
obj-$(CONFIG_USB_GADGET) += gadget/
obj-$(CONFIG_USB_COMMON) += usb-common.o
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index a845f8b..98b89fe 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -1372,18 +1372,7 @@ static struct usb_driver cxacru_usb_driver = {
.id_table = cxacru_usb_ids
};
-static int __init cxacru_init(void)
-{
- return usb_register(&cxacru_usb_driver);
-}
-
-static void __exit cxacru_cleanup(void)
-{
- usb_deregister(&cxacru_usb_driver);
-}
-
-module_init(cxacru_init);
-module_exit(cxacru_cleanup);
+module_usb_driver(cxacru_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/atm/speedtch.c b/drivers/usb/atm/speedtch.c
index 0842cfb..98dd9e4 100644
--- a/drivers/usb/atm/speedtch.c
+++ b/drivers/usb/atm/speedtch.c
@@ -73,9 +73,9 @@ static const char speedtch_driver_name[] = "speedtch";
#define DEFAULT_SW_BUFFERING 0
static unsigned int altsetting = 0; /* zero means: use the default */
-static int dl_512_first = DEFAULT_DL_512_FIRST;
-static int enable_isoc = DEFAULT_ENABLE_ISOC;
-static int sw_buffering = DEFAULT_SW_BUFFERING;
+static bool dl_512_first = DEFAULT_DL_512_FIRST;
+static bool enable_isoc = DEFAULT_ENABLE_ISOC;
+static bool sw_buffering = DEFAULT_SW_BUFFERING;
#define DEFAULT_B_MAX_DSL 8128
#define DEFAULT_MODEM_MODE 11
@@ -953,22 +953,7 @@ static int speedtch_usb_probe(struct usb_interface *intf, const struct usb_devic
return usbatm_usb_probe(intf, id, &speedtch_usbatm_driver);
}
-static int __init speedtch_usb_init(void)
-{
- dbg("%s: driver version %s", __func__, DRIVER_VERSION);
-
- return usb_register(&speedtch_usb_driver);
-}
-
-static void __exit speedtch_usb_cleanup(void)
-{
- dbg("%s", __func__);
-
- usb_deregister(&speedtch_usb_driver);
-}
-
-module_init(speedtch_usb_init);
-module_exit(speedtch_usb_cleanup);
+module_usb_driver(speedtch_usb_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index 428f368..01ea5d7 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -542,7 +542,7 @@ static int modem_index;
static unsigned int debug;
static unsigned int altsetting[NB_MODEM] = {
[0 ... (NB_MODEM - 1)] = FASTEST_ISO_INTF};
-static int sync_wait[NB_MODEM];
+static bool sync_wait[NB_MODEM];
static char *cmv_file[NB_MODEM];
static int annex[NB_MODEM];
@@ -2753,36 +2753,7 @@ static struct usb_driver uea_driver = {
MODULE_DEVICE_TABLE(usb, uea_ids);
-/**
- * uea_init - Initialize the module.
- * Register to USB subsystem
- */
-static int __init uea_init(void)
-{
- printk(KERN_INFO "[ueagle-atm] driver " EAGLEUSBVERSION " loaded\n");
-
- usb_register(&uea_driver);
-
- return 0;
-}
-
-module_init(uea_init);
-
-/**
- * uea_exit - Destroy module
- * Deregister with USB subsystem
- */
-static void __exit uea_exit(void)
-{
- /*
- * This calls automatically the uea_disconnect method if necessary:
- */
- usb_deregister(&uea_driver);
-
- printk(KERN_INFO "[ueagle-atm] driver unloaded\n");
-}
-
-module_exit(uea_exit);
+module_usb_driver(uea_driver);
MODULE_AUTHOR("Damien Bergamini/Matthieu Castet/Stanislaw W. Gruszka");
MODULE_DESCRIPTION("ADI 930/Eagle USB ADSL Modem driver");
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 57ae44c..6f3b6e2 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -225,21 +225,10 @@ static struct platform_driver c67x00_driver = {
.name = "c67x00",
},
};
-MODULE_ALIAS("platform:c67x00");
-
-static int __init c67x00_init(void)
-{
- return platform_driver_register(&c67x00_driver);
-}
-static void __exit c67x00_exit(void)
-{
- platform_driver_unregister(&c67x00_driver);
-}
-
-module_init(c67x00_init);
-module_exit(c67x00_exit);
+module_platform_driver(c67x00_driver);
MODULE_AUTHOR("Peter Korsgaard, Jan Veldeman, Grant Likely");
MODULE_DESCRIPTION("Cypress C67X00 USB Controller Driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:c67x00");
diff --git a/drivers/usb/c67x00/c67x00-hcd.c b/drivers/usb/c67x00/c67x00-hcd.c
index d3e1356..75e47b8 100644
--- a/drivers/usb/c67x00/c67x00-hcd.c
+++ b/drivers/usb/c67x00/c67x00-hcd.c
@@ -271,7 +271,6 @@ static void c67x00_hcd_irq(struct c67x00_sie *sie, u16 int_status, u16 msg)
if (int_status & SOFEOP_FLG(sie->sie_num)) {
c67x00_ll_usb_clear_status(sie, SOF_EOP_IRQ_FLG);
c67x00_sched_kick(c67x00);
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
}
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index a8078d0..9543b19 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -58,12 +58,62 @@ static struct usb_driver acm_driver;
static struct tty_driver *acm_tty_driver;
static struct acm *acm_table[ACM_TTY_MINORS];
-static DEFINE_MUTEX(open_mutex);
+static DEFINE_MUTEX(acm_table_lock);
-#define ACM_READY(acm) (acm && acm->dev && acm->port.count)
+/*
+ * acm_table accessors
+ */
-static const struct tty_port_operations acm_port_ops = {
-};
+/*
+ * Look up an ACM structure by index. If found and not disconnected, increment
+ * its refcount and return it with its mutex held.
+ */
+static struct acm *acm_get_by_index(unsigned index)
+{
+ struct acm *acm;
+
+ mutex_lock(&acm_table_lock);
+ acm = acm_table[index];
+ if (acm) {
+ mutex_lock(&acm->mutex);
+ if (acm->disconnected) {
+ mutex_unlock(&acm->mutex);
+ acm = NULL;
+ } else {
+ tty_port_get(&acm->port);
+ mutex_unlock(&acm->mutex);
+ }
+ }
+ mutex_unlock(&acm_table_lock);
+ return acm;
+}
+
+/*
+ * Try to find an available minor number and if found, associate it with 'acm'.
+ */
+static int acm_alloc_minor(struct acm *acm)
+{
+ int minor;
+
+ mutex_lock(&acm_table_lock);
+ for (minor = 0; minor < ACM_TTY_MINORS; minor++) {
+ if (!acm_table[minor]) {
+ acm_table[minor] = acm;
+ break;
+ }
+ }
+ mutex_unlock(&acm_table_lock);
+
+ return minor;
+}
+
+/* Release the minor number associated with 'acm'. */
+static void acm_release_minor(struct acm *acm)
+{
+ mutex_lock(&acm_table_lock);
+ acm_table[acm->minor] = NULL;
+ mutex_unlock(&acm_table_lock);
+}
/*
* Functions for ACM control messages.
@@ -267,9 +317,6 @@ static void acm_ctrl_irq(struct urb *urb)
goto exit;
}
- if (!ACM_READY(acm))
- goto exit;
-
usb_mark_last_busy(acm->dev);
data = (unsigned char *)(dr + 1);
@@ -429,8 +476,7 @@ static void acm_write_bulk(struct urb *urb)
spin_lock_irqsave(&acm->write_lock, flags);
acm_write_done(acm, wb);
spin_unlock_irqrestore(&acm->write_lock, flags);
- if (ACM_READY(acm))
- schedule_work(&acm->work);
+ schedule_work(&acm->work);
}
static void acm_softint(struct work_struct *work)
@@ -440,8 +486,6 @@ static void acm_softint(struct work_struct *work)
dev_vdbg(&acm->data->dev, "%s\n", __func__);
- if (!ACM_READY(acm))
- return;
tty = tty_port_tty_get(&acm->port);
if (!tty)
return;
@@ -453,93 +497,122 @@ static void acm_softint(struct work_struct *work)
* TTY handlers
*/
-static int acm_tty_open(struct tty_struct *tty, struct file *filp)
+static int acm_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct acm *acm;
- int rv = -ENODEV;
-
- mutex_lock(&open_mutex);
+ int retval;
- acm = acm_table[tty->index];
- if (!acm || !acm->dev)
- goto out;
- else
- rv = 0;
+ dev_dbg(tty->dev, "%s\n", __func__);
- dev_dbg(&acm->control->dev, "%s\n", __func__);
+ acm = acm_get_by_index(tty->index);
+ if (!acm)
+ return -ENODEV;
- set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+ retval = tty_init_termios(tty);
+ if (retval)
+ goto error_init_termios;
tty->driver_data = acm;
- tty_port_tty_set(&acm->port, tty);
- if (usb_autopm_get_interface(acm->control) < 0)
- goto early_bail;
- else
- acm->control->needs_remote_wakeup = 1;
+ /* Final install (we use the default method) */
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[tty->index] = tty;
+
+ return 0;
+
+error_init_termios:
+ tty_port_put(&acm->port);
+ return retval;
+}
+
+static int acm_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct acm *acm = tty->driver_data;
+
+ dev_dbg(tty->dev, "%s\n", __func__);
+
+ return tty_port_open(&acm->port, tty, filp);
+}
+
+static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
+{
+ struct acm *acm = container_of(port, struct acm, port);
+ int retval = -ENODEV;
+
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
mutex_lock(&acm->mutex);
- if (acm->port.count++) {
- mutex_unlock(&acm->mutex);
- usb_autopm_put_interface(acm->control);
- goto out;
- }
+ if (acm->disconnected)
+ goto disconnected;
+
+ retval = usb_autopm_get_interface(acm->control);
+ if (retval)
+ goto error_get_interface;
+
+ /*
+ * FIXME: Why do we need this? Allocating 64K of physically contiguous
+ * memory is really nasty...
+ */
+ set_bit(TTY_NO_WRITE_SPLIT, &tty->flags);
+ acm->control->needs_remote_wakeup = 1;
acm->ctrlurb->dev = acm->dev;
if (usb_submit_urb(acm->ctrlurb, GFP_KERNEL)) {
dev_err(&acm->control->dev,
"%s - usb_submit_urb(ctrl irq) failed\n", __func__);
- goto bail_out;
+ goto error_submit_urb;
}
- if (0 > acm_set_control(acm, acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS) &&
+ acm->ctrlout = ACM_CTRL_DTR | ACM_CTRL_RTS;
+ if (acm_set_control(acm, acm->ctrlout) < 0 &&
(acm->ctrl_caps & USB_CDC_CAP_LINE))
- goto bail_out;
+ goto error_set_control;
usb_autopm_put_interface(acm->control);
if (acm_submit_read_urbs(acm, GFP_KERNEL))
- goto bail_out;
-
- set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
- rv = tty_port_block_til_ready(&acm->port, tty, filp);
+ goto error_submit_read_urbs;
mutex_unlock(&acm->mutex);
-out:
- mutex_unlock(&open_mutex);
- return rv;
-bail_out:
- acm->port.count--;
- mutex_unlock(&acm->mutex);
+ return 0;
+
+error_submit_read_urbs:
+ acm->ctrlout = 0;
+ acm_set_control(acm, acm->ctrlout);
+error_set_control:
+ usb_kill_urb(acm->ctrlurb);
+error_submit_urb:
usb_autopm_put_interface(acm->control);
-early_bail:
- mutex_unlock(&open_mutex);
- tty_port_tty_set(&acm->port, NULL);
- return -EIO;
+error_get_interface:
+disconnected:
+ mutex_unlock(&acm->mutex);
+ return retval;
}
-static void acm_tty_unregister(struct acm *acm)
+static void acm_port_destruct(struct tty_port *port)
{
- int i;
+ struct acm *acm = container_of(port, struct acm, port);
+
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
tty_unregister_device(acm_tty_driver, acm->minor);
+ acm_release_minor(acm);
usb_put_intf(acm->control);
- acm_table[acm->minor] = NULL;
- usb_free_urb(acm->ctrlurb);
- for (i = 0; i < ACM_NW; i++)
- usb_free_urb(acm->wb[i].urb);
- for (i = 0; i < acm->rx_buflimit; i++)
- usb_free_urb(acm->read_urbs[i]);
kfree(acm->country_codes);
kfree(acm);
}
-static void acm_port_down(struct acm *acm)
+static void acm_port_shutdown(struct tty_port *port)
{
+ struct acm *acm = container_of(port, struct acm, port);
int i;
- if (acm->dev) {
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+
+ mutex_lock(&acm->mutex);
+ if (!acm->disconnected) {
usb_autopm_get_interface(acm->control);
acm_set_control(acm, acm->ctrlout = 0);
usb_kill_urb(acm->ctrlurb);
@@ -550,40 +623,28 @@ static void acm_port_down(struct acm *acm)
acm->control->needs_remote_wakeup = 0;
usb_autopm_put_interface(acm->control);
}
+ mutex_unlock(&acm->mutex);
+}
+
+static void acm_tty_cleanup(struct tty_struct *tty)
+{
+ struct acm *acm = tty->driver_data;
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+ tty_port_put(&acm->port);
}
static void acm_tty_hangup(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
tty_port_hangup(&acm->port);
- mutex_lock(&open_mutex);
- acm_port_down(acm);
- mutex_unlock(&open_mutex);
}
static void acm_tty_close(struct tty_struct *tty, struct file *filp)
{
struct acm *acm = tty->driver_data;
-
- /* Perform the closing process and see if we need to do the hardware
- shutdown */
- if (!acm)
- return;
-
- mutex_lock(&open_mutex);
- if (tty_port_close_start(&acm->port, tty, filp) == 0) {
- if (!acm->dev) {
- tty_port_tty_set(&acm->port, NULL);
- acm_tty_unregister(acm);
- tty->driver_data = NULL;
- }
- mutex_unlock(&open_mutex);
- return;
- }
- acm_port_down(acm);
- tty_port_close_end(&acm->port, tty);
- tty_port_tty_set(&acm->port, NULL);
- mutex_unlock(&open_mutex);
+ dev_dbg(&acm->control->dev, "%s\n", __func__);
+ tty_port_close(&acm->port, tty, filp);
}
static int acm_tty_write(struct tty_struct *tty,
@@ -595,8 +656,6 @@ static int acm_tty_write(struct tty_struct *tty,
int wbn;
struct acm_wb *wb;
- if (!ACM_READY(acm))
- return -EINVAL;
if (!count)
return 0;
@@ -625,8 +684,6 @@ static int acm_tty_write(struct tty_struct *tty,
static int acm_tty_write_room(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return -EINVAL;
/*
* Do not let the line discipline to know that we have a reserve,
* or it might get too enthusiastic.
@@ -637,7 +694,11 @@ static int acm_tty_write_room(struct tty_struct *tty)
static int acm_tty_chars_in_buffer(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
+ /*
+ * if the device was unplugged then any remaining characters fell out
+ * of the connector ;)
+ */
+ if (acm->disconnected)
return 0;
/*
* This is inaccurate (overcounts), but it works.
@@ -649,9 +710,6 @@ static void acm_tty_throttle(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return;
-
spin_lock_irq(&acm->read_lock);
acm->throttle_req = 1;
spin_unlock_irq(&acm->read_lock);
@@ -662,9 +720,6 @@ static void acm_tty_unthrottle(struct tty_struct *tty)
struct acm *acm = tty->driver_data;
unsigned int was_throttled;
- if (!ACM_READY(acm))
- return;
-
spin_lock_irq(&acm->read_lock);
was_throttled = acm->throttled;
acm->throttled = 0;
@@ -679,8 +734,7 @@ static int acm_tty_break_ctl(struct tty_struct *tty, int state)
{
struct acm *acm = tty->driver_data;
int retval;
- if (!ACM_READY(acm))
- return -EINVAL;
+
retval = acm_send_break(acm, state ? 0xffff : 0);
if (retval < 0)
dev_dbg(&acm->control->dev, "%s - send break failed\n",
@@ -692,9 +746,6 @@ static int acm_tty_tiocmget(struct tty_struct *tty)
{
struct acm *acm = tty->driver_data;
- if (!ACM_READY(acm))
- return -EINVAL;
-
return (acm->ctrlout & ACM_CTRL_DTR ? TIOCM_DTR : 0) |
(acm->ctrlout & ACM_CTRL_RTS ? TIOCM_RTS : 0) |
(acm->ctrlin & ACM_CTRL_DSR ? TIOCM_DSR : 0) |
@@ -709,9 +760,6 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
struct acm *acm = tty->driver_data;
unsigned int newctrl;
- if (!ACM_READY(acm))
- return -EINVAL;
-
newctrl = acm->ctrlout;
set = (set & TIOCM_DTR ? ACM_CTRL_DTR : 0) |
(set & TIOCM_RTS ? ACM_CTRL_RTS : 0);
@@ -728,11 +776,6 @@ static int acm_tty_tiocmset(struct tty_struct *tty,
static int acm_tty_ioctl(struct tty_struct *tty,
unsigned int cmd, unsigned long arg)
{
- struct acm *acm = tty->driver_data;
-
- if (!ACM_READY(acm))
- return -EINVAL;
-
return -ENOIOCTLCMD;
}
@@ -756,9 +799,6 @@ static void acm_tty_set_termios(struct tty_struct *tty,
struct usb_cdc_line_coding newline;
int newctrl = acm->ctrlout;
- if (!ACM_READY(acm))
- return;
-
newline.dwDTERate = cpu_to_le32(tty_get_baud_rate(tty));
newline.bCharFormat = termios->c_cflag & CSTOPB ? 2 : 0;
newline.bParityType = termios->c_cflag & PARENB ?
@@ -788,6 +828,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
}
}
+static const struct tty_port_operations acm_port_ops = {
+ .shutdown = acm_port_shutdown,
+ .activate = acm_port_activate,
+ .destruct = acm_port_destruct,
+};
+
/*
* USB probe and disconnect routines.
*/
@@ -1047,12 +1093,6 @@ skip_normal_probe:
}
made_compressed_probe:
dev_dbg(&intf->dev, "interfaces are valid\n");
- for (minor = 0; minor < ACM_TTY_MINORS && acm_table[minor]; minor++);
-
- if (minor == ACM_TTY_MINORS) {
- dev_err(&intf->dev, "no more free acm devices\n");
- return -ENODEV;
- }
acm = kzalloc(sizeof(struct acm), GFP_KERNEL);
if (acm == NULL) {
@@ -1060,6 +1100,13 @@ made_compressed_probe:
goto alloc_fail;
}
+ minor = acm_alloc_minor(acm);
+ if (minor == ACM_TTY_MINORS) {
+ dev_err(&intf->dev, "no more free acm devices\n");
+ kfree(acm);
+ return -ENODEV;
+ }
+
ctrlsize = usb_endpoint_maxp(epctrl);
readsize = usb_endpoint_maxp(epread) *
(quirks == SINGLE_RX_URB ? 1 : 2);
@@ -1183,6 +1230,8 @@ made_compressed_probe:
i = device_create_file(&intf->dev, &dev_attr_wCountryCodes);
if (i < 0) {
kfree(acm->country_codes);
+ acm->country_codes = NULL;
+ acm->country_code_size = 0;
goto skip_countries;
}
@@ -1191,6 +1240,8 @@ made_compressed_probe:
if (i < 0) {
device_remove_file(&intf->dev, &dev_attr_wCountryCodes);
kfree(acm->country_codes);
+ acm->country_codes = NULL;
+ acm->country_code_size = 0;
goto skip_countries;
}
}
@@ -1218,8 +1269,6 @@ skip_countries:
usb_get_intf(control_interface);
tty_register_device(acm_tty_driver, minor, &control_interface->dev);
- acm_table[minor] = acm;
-
return 0;
alloc_fail7:
for (i = 0; i < ACM_NW; i++)
@@ -1234,6 +1283,7 @@ alloc_fail5:
alloc_fail4:
usb_free_coherent(usb_dev, ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
alloc_fail2:
+ acm_release_minor(acm);
kfree(acm);
alloc_fail:
return -ENOMEM;
@@ -1259,12 +1309,16 @@ static void acm_disconnect(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct usb_device *usb_dev = interface_to_usbdev(intf);
struct tty_struct *tty;
+ int i;
+
+ dev_dbg(&intf->dev, "%s\n", __func__);
/* sibling interface is already cleaning up */
if (!acm)
return;
- mutex_lock(&open_mutex);
+ mutex_lock(&acm->mutex);
+ acm->disconnected = true;
if (acm->country_codes) {
device_remove_file(&acm->control->dev,
&dev_attr_wCountryCodes);
@@ -1272,33 +1326,32 @@ static void acm_disconnect(struct usb_interface *intf)
&dev_attr_iCountryCodeRelDate);
}
device_remove_file(&acm->control->dev, &dev_attr_bmCapabilities);
- acm->dev = NULL;
usb_set_intfdata(acm->control, NULL);
usb_set_intfdata(acm->data, NULL);
+ mutex_unlock(&acm->mutex);
+
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
+ }
stop_data_traffic(acm);
+ usb_free_urb(acm->ctrlurb);
+ for (i = 0; i < ACM_NW; i++)
+ usb_free_urb(acm->wb[i].urb);
+ for (i = 0; i < acm->rx_buflimit; i++)
+ usb_free_urb(acm->read_urbs[i]);
acm_write_buffers_free(acm);
- usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer,
- acm->ctrl_dma);
+ usb_free_coherent(usb_dev, acm->ctrlsize, acm->ctrl_buffer, acm->ctrl_dma);
acm_read_buffers_free(acm);
if (!acm->combined_interfaces)
usb_driver_release_interface(&acm_driver, intf == acm->control ?
acm->data : acm->control);
- if (acm->port.count == 0) {
- acm_tty_unregister(acm);
- mutex_unlock(&open_mutex);
- return;
- }
-
- mutex_unlock(&open_mutex);
- tty = tty_port_tty_get(&acm->port);
- if (tty) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_port_put(&acm->port);
}
#ifdef CONFIG_PM
@@ -1325,16 +1378,10 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
if (cnt)
return 0;
- /*
- we treat opened interfaces differently,
- we must guard against open
- */
- mutex_lock(&acm->mutex);
- if (acm->port.count)
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
stop_data_traffic(acm);
- mutex_unlock(&acm->mutex);
return 0;
}
@@ -1353,8 +1400,7 @@ static int acm_resume(struct usb_interface *intf)
if (cnt)
return 0;
- mutex_lock(&acm->mutex);
- if (acm->port.count) {
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
spin_lock_irq(&acm->write_lock);
@@ -1378,7 +1424,6 @@ static int acm_resume(struct usb_interface *intf)
}
err_out:
- mutex_unlock(&acm->mutex);
return rv;
}
@@ -1387,15 +1432,14 @@ static int acm_reset_resume(struct usb_interface *intf)
struct acm *acm = usb_get_intfdata(intf);
struct tty_struct *tty;
- mutex_lock(&acm->mutex);
- if (acm->port.count) {
+ if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
tty = tty_port_tty_get(&acm->port);
if (tty) {
tty_hangup(tty);
tty_kref_put(tty);
}
}
- mutex_unlock(&acm->mutex);
+
return acm_resume(intf);
}
@@ -1604,8 +1648,10 @@ static struct usb_driver acm_driver = {
*/
static const struct tty_operations acm_ops = {
+ .install = acm_tty_install,
.open = acm_tty_open,
.close = acm_tty_close,
+ .cleanup = acm_tty_cleanup,
.hangup = acm_tty_hangup,
.write = acm_tty_write,
.write_room = acm_tty_write_room,
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index ca7937f..35ef887 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -101,6 +101,7 @@ struct acm {
int transmitting;
spinlock_t write_lock;
struct mutex mutex;
+ bool disconnected;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
unsigned int ctrlin; /* input control lines (DCD, DSR, RI, break, overruns) */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index efe6849..d2b3cff 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -57,6 +57,8 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
#define WDM_MAX 16
+/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
+#define WDM_DEFAULT_BUFSIZE 256
static DEFINE_MUTEX(wdm_mutex);
@@ -88,7 +90,8 @@ struct wdm_device {
int count;
dma_addr_t shandle;
dma_addr_t ihandle;
- struct mutex lock;
+ struct mutex wlock;
+ struct mutex rlock;
wait_queue_head_t wait;
struct work_struct rxwork;
int werr;
@@ -323,7 +326,7 @@ static ssize_t wdm_write
}
/* concurrent writes and disconnect */
- r = mutex_lock_interruptible(&desc->lock);
+ r = mutex_lock_interruptible(&desc->wlock);
rv = -ERESTARTSYS;
if (r) {
kfree(buf);
@@ -386,7 +389,7 @@ static ssize_t wdm_write
out:
usb_autopm_put_interface(desc->intf);
outnp:
- mutex_unlock(&desc->lock);
+ mutex_unlock(&desc->wlock);
outnl:
return rv < 0 ? rv : count;
}
@@ -399,7 +402,7 @@ static ssize_t wdm_read
struct wdm_device *desc = file->private_data;
- rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */
+ rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
if (rv < 0)
return -ERESTARTSYS;
@@ -467,14 +470,16 @@ retry:
for (i = 0; i < desc->length - cntr; i++)
desc->ubuf[i] = desc->ubuf[i + cntr];
+ spin_lock_irq(&desc->iuspin);
desc->length -= cntr;
+ spin_unlock_irq(&desc->iuspin);
/* in case we had outstanding data */
if (!desc->length)
clear_bit(WDM_READ, &desc->flags);
rv = cntr;
err:
- mutex_unlock(&desc->lock);
+ mutex_unlock(&desc->rlock);
return rv;
}
@@ -540,7 +545,8 @@ static int wdm_open(struct inode *inode, struct file *file)
}
intf->needs_remote_wakeup = 1;
- mutex_lock(&desc->lock);
+ /* using write lock to protect desc->count */
+ mutex_lock(&desc->wlock);
if (!desc->count++) {
desc->werr = 0;
desc->rerr = 0;
@@ -553,7 +559,7 @@ static int wdm_open(struct inode *inode, struct file *file)
} else {
rv = 0;
}
- mutex_unlock(&desc->lock);
+ mutex_unlock(&desc->wlock);
usb_autopm_put_interface(desc->intf);
out:
mutex_unlock(&wdm_mutex);
@@ -565,9 +571,11 @@ static int wdm_release(struct inode *inode, struct file *file)
struct wdm_device *desc = file->private_data;
mutex_lock(&wdm_mutex);
- mutex_lock(&desc->lock);
+
+ /* using write lock to protect desc->count */
+ mutex_lock(&desc->wlock);
desc->count--;
- mutex_unlock(&desc->lock);
+ mutex_unlock(&desc->wlock);
if (!desc->count) {
dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
@@ -630,7 +638,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct usb_cdc_dmm_desc *dmhd;
u8 *buffer = intf->altsetting->extra;
int buflen = intf->altsetting->extralen;
- u16 maxcom = 0;
+ u16 maxcom = WDM_DEFAULT_BUFSIZE;
if (!buffer)
goto out;
@@ -665,7 +673,8 @@ next_desc:
desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
if (!desc)
goto out;
- mutex_init(&desc->lock);
+ mutex_init(&desc->rlock);
+ mutex_init(&desc->wlock);
spin_lock_init(&desc->iuspin);
init_waitqueue_head(&desc->wait);
desc->wMaxCommand = maxcom;
@@ -716,7 +725,7 @@ next_desc:
goto err;
desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf),
- desc->bMaxPacketSize0,
+ desc->wMaxCommand,
GFP_KERNEL,
&desc->response->transfer_dma);
if (!desc->inbuf)
@@ -779,11 +788,13 @@ static void wdm_disconnect(struct usb_interface *intf)
/* to terminate pending flushes */
clear_bit(WDM_IN_USE, &desc->flags);
spin_unlock_irqrestore(&desc->iuspin, flags);
- mutex_lock(&desc->lock);
+ wake_up_all(&desc->wait);
+ mutex_lock(&desc->rlock);
+ mutex_lock(&desc->wlock);
kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
- mutex_unlock(&desc->lock);
- wake_up_all(&desc->wait);
+ mutex_unlock(&desc->wlock);
+ mutex_unlock(&desc->rlock);
if (!desc->count)
cleanup(desc);
mutex_unlock(&wdm_mutex);
@@ -798,8 +809,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
/* if this is an autosuspend the caller does the locking */
- if (!PMSG_IS_AUTO(message))
- mutex_lock(&desc->lock);
+ if (!PMSG_IS_AUTO(message)) {
+ mutex_lock(&desc->rlock);
+ mutex_lock(&desc->wlock);
+ }
spin_lock_irq(&desc->iuspin);
if (PMSG_IS_AUTO(message) &&
@@ -815,8 +828,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
}
- if (!PMSG_IS_AUTO(message))
- mutex_unlock(&desc->lock);
+ if (!PMSG_IS_AUTO(message)) {
+ mutex_unlock(&desc->wlock);
+ mutex_unlock(&desc->rlock);
+ }
return rv;
}
@@ -854,7 +869,8 @@ static int wdm_pre_reset(struct usb_interface *intf)
{
struct wdm_device *desc = usb_get_intfdata(intf);
- mutex_lock(&desc->lock);
+ mutex_lock(&desc->rlock);
+ mutex_lock(&desc->wlock);
kill_urbs(desc);
/*
@@ -876,7 +892,8 @@ static int wdm_post_reset(struct usb_interface *intf)
int rv;
rv = recover_from_urb_loss(desc);
- mutex_unlock(&desc->lock);
+ mutex_unlock(&desc->wlock);
+ mutex_unlock(&desc->rlock);
return 0;
}
@@ -895,24 +912,7 @@ static struct usb_driver wdm_driver = {
.supports_autosuspend = 1,
};
-/* --- low level module stuff --- */
-
-static int __init wdm_init(void)
-{
- int rv;
-
- rv = usb_register(&wdm_driver);
-
- return rv;
-}
-
-static void __exit wdm_exit(void)
-{
- usb_deregister(&wdm_driver);
-}
-
-module_init(wdm_init);
-module_exit(wdm_exit);
+module_usb_driver(wdm_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index cb3a932..a68c1a6 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -1045,7 +1045,7 @@ static const struct file_operations usblp_fops = {
.llseek = noop_llseek,
};
-static char *usblp_devnode(struct device *dev, mode_t *mode)
+static char *usblp_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
@@ -1412,18 +1412,7 @@ static struct usb_driver usblp_driver = {
.supports_autosuspend = 1,
};
-static int __init usblp_init(void)
-{
- return usb_register(&usblp_driver);
-}
-
-static void __exit usblp_exit(void)
-{
- usb_deregister(&usblp_driver);
-}
-
-module_init(usblp_init);
-module_exit(usblp_exit);
+module_usb_driver(usblp_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 12cf5e7..70d69d0 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1116,21 +1116,6 @@ static struct usb_driver usbtmc_driver = {
.resume = usbtmc_resume,
};
-static int __init usbtmc_init(void)
-{
- int retcode;
-
- retcode = usb_register(&usbtmc_driver);
- if (retcode)
- printk(KERN_ERR KBUILD_MODNAME": Unable to register driver\n");
- return retcode;
-}
-module_init(usbtmc_init);
-
-static void __exit usbtmc_exit(void)
-{
- usb_deregister(&usbtmc_driver);
-}
-module_exit(usbtmc_exit);
+module_usb_driver(usbtmc_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e3beaf2..8df4b76 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -86,13 +86,14 @@ struct async {
void __user *userbuffer;
void __user *userurb;
struct urb *urb;
+ unsigned int mem_usage;
int status;
u32 secid;
u8 bulk_addr;
u8 bulk_status;
};
-static int usbfs_snoop;
+static bool usbfs_snoop;
module_param(usbfs_snoop, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic");
@@ -108,8 +109,44 @@ enum snoop_when {
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
-#define MAX_USBFS_BUFFER_SIZE 16384
+/* Limit on the total amount of memory we can allocate for transfers */
+static unsigned usbfs_memory_mb = 16;
+module_param(usbfs_memory_mb, uint, 0644);
+MODULE_PARM_DESC(usbfs_memory_mb,
+ "maximum MB allowed for usbfs buffers (0 = no limit)");
+/* Hard limit, necessary to avoid aithmetic overflow */
+#define USBFS_XFER_MAX (UINT_MAX / 2 - 1000000)
+
+static atomic_t usbfs_memory_usage; /* Total memory currently allocated */
+
+/* Check whether it's okay to allocate more memory for a transfer */
+static int usbfs_increase_memory_usage(unsigned amount)
+{
+ unsigned lim;
+
+ /*
+ * Convert usbfs_memory_mb to bytes, avoiding overflows.
+ * 0 means use the hard limit (effectively unlimited).
+ */
+ lim = ACCESS_ONCE(usbfs_memory_mb);
+ if (lim == 0 || lim > (USBFS_XFER_MAX >> 20))
+ lim = USBFS_XFER_MAX;
+ else
+ lim <<= 20;
+
+ atomic_add(amount, &usbfs_memory_usage);
+ if (atomic_read(&usbfs_memory_usage) <= lim)
+ return 0;
+ atomic_sub(amount, &usbfs_memory_usage);
+ return -ENOMEM;
+}
+
+/* Memory for a transfer is being deallocated */
+static void usbfs_decrease_memory_usage(unsigned amount)
+{
+ atomic_sub(amount, &usbfs_memory_usage);
+}
static int connected(struct dev_state *ps)
{
@@ -249,10 +286,12 @@ static struct async *alloc_async(unsigned int numisoframes)
static void free_async(struct async *as)
{
put_pid(as->pid);
- put_cred(as->cred);
+ if (as->cred)
+ put_cred(as->cred);
kfree(as->urb->transfer_buffer);
kfree(as->urb->setup_packet);
usb_free_urb(as->urb);
+ usbfs_decrease_memory_usage(as->mem_usage);
kfree(as);
}
@@ -792,9 +831,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
wLength = ctrl.wLength; /* To suppress 64k PAGE_SIZE warning */
if (wLength > PAGE_SIZE)
return -EINVAL;
+ ret = usbfs_increase_memory_usage(PAGE_SIZE + sizeof(struct urb) +
+ sizeof(struct usb_ctrlrequest));
+ if (ret)
+ return ret;
tbuf = (unsigned char *)__get_free_page(GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
+ if (!tbuf) {
+ ret = -ENOMEM;
+ goto done;
+ }
tmo = ctrl.timeout;
snoop(&dev->dev, "control urb: bRequestType=%02x "
"bRequest=%02x wValue=%04x "
@@ -806,8 +851,8 @@ static int proc_control(struct dev_state *ps, void __user *arg)
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
- free_page((unsigned long)tbuf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
@@ -821,15 +866,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
tbuf, max(i, 0));
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
} else {
if (ctrl.wLength) {
if (copy_from_user(tbuf, ctrl.data, ctrl.wLength)) {
- free_page((unsigned long)tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
pipe = usb_sndctrlpipe(dev, 0);
@@ -843,14 +888,18 @@ static int proc_control(struct dev_state *ps, void __user *arg)
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
- free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
dev_printk(KERN_DEBUG, &dev->dev, "usbfs: USBDEVFS_CONTROL "
"failed cmd %s rqt %u rq %u len %u ret %d\n",
current->comm, ctrl.bRequestType, ctrl.bRequest,
ctrl.wLength, i);
}
- return i;
+ ret = i;
+ done:
+ free_page((unsigned long) tbuf);
+ usbfs_decrease_memory_usage(PAGE_SIZE + sizeof(struct urb) +
+ sizeof(struct usb_ctrlrequest));
+ return ret;
}
static int proc_bulk(struct dev_state *ps, void __user *arg)
@@ -877,15 +926,20 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
if (!usb_maxpacket(dev, pipe, !(bulk.ep & USB_DIR_IN)))
return -EINVAL;
len1 = bulk.len;
- if (len1 > MAX_USBFS_BUFFER_SIZE)
+ if (len1 >= USBFS_XFER_MAX)
return -EINVAL;
- if (!(tbuf = kmalloc(len1, GFP_KERNEL)))
- return -ENOMEM;
+ ret = usbfs_increase_memory_usage(len1 + sizeof(struct urb));
+ if (ret)
+ return ret;
+ if (!(tbuf = kmalloc(len1, GFP_KERNEL))) {
+ ret = -ENOMEM;
+ goto done;
+ }
tmo = bulk.timeout;
if (bulk.ep & 0x80) {
if (len1 && !access_ok(VERIFY_WRITE, bulk.data, len1)) {
- kfree(tbuf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
@@ -896,15 +950,15 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
- kfree(tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
} else {
if (len1) {
if (copy_from_user(tbuf, bulk.data, len1)) {
- kfree(tbuf);
- return -EFAULT;
+ ret = -EFAULT;
+ goto done;
}
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
@@ -914,10 +968,11 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
+ ret = (i < 0 ? i : len2);
+ done:
kfree(tbuf);
- if (i < 0)
- return i;
- return len2;
+ usbfs_decrease_memory_usage(len1 + sizeof(struct urb));
+ return ret;
}
static int proc_resetep(struct dev_state *ps, void __user *arg)
@@ -1062,7 +1117,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
{
struct usbdevfs_iso_packet_desc *isopkt = NULL;
struct usb_host_endpoint *ep;
- struct async *as;
+ struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
int ret, ifnum = -1;
@@ -1095,32 +1150,30 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
}
if (!ep)
return -ENOENT;
+
+ u = 0;
switch(uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
- /* min 8 byte setup packet,
- * max 8 byte setup plus an arbitrary data stage */
- if (uurb->buffer_length < 8 ||
- uurb->buffer_length > (8 + MAX_USBFS_BUFFER_SIZE))
+ /* min 8 byte setup packet */
+ if (uurb->buffer_length < 8)
return -EINVAL;
dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL);
if (!dr)
return -ENOMEM;
if (copy_from_user(dr, uurb->buffer, 8)) {
- kfree(dr);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
if (uurb->buffer_length < (le16_to_cpup(&dr->wLength) + 8)) {
- kfree(dr);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
ret = check_ctrlrecip(ps, dr->bRequestType, dr->bRequest,
le16_to_cpup(&dr->wIndex));
- if (ret) {
- kfree(dr);
- return ret;
- }
+ if (ret)
+ goto error;
uurb->number_of_packets = 0;
uurb->buffer_length = le16_to_cpup(&dr->wLength);
uurb->buffer += 8;
@@ -1138,6 +1191,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
__le16_to_cpup(&dr->wValue),
__le16_to_cpup(&dr->wIndex),
__le16_to_cpup(&dr->wLength));
+ u = sizeof(struct usb_ctrlrequest);
break;
case USBDEVFS_URB_TYPE_BULK:
@@ -1151,8 +1205,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
goto interrupt_urb;
}
uurb->number_of_packets = 0;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
@@ -1160,8 +1212,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
interrupt_urb:
uurb->number_of_packets = 0;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1176,50 +1226,53 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (!(isopkt = kmalloc(isofrmlen, GFP_KERNEL)))
return -ENOMEM;
if (copy_from_user(isopkt, iso_frame_desc, isofrmlen)) {
- kfree(isopkt);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
for (totlen = u = 0; u < uurb->number_of_packets; u++) {
/* arbitrary limit,
* sufficient for USB 2.0 high-bandwidth iso */
if (isopkt[u].length > 8192) {
- kfree(isopkt);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
totlen += isopkt[u].length;
}
- /* 3072 * 64 microframes */
- if (totlen > 196608) {
- kfree(isopkt);
- return -EINVAL;
- }
+ u *= sizeof(struct usb_iso_packet_descriptor);
uurb->buffer_length = totlen;
break;
default:
return -EINVAL;
}
+
+ if (uurb->buffer_length >= USBFS_XFER_MAX) {
+ ret = -EINVAL;
+ goto error;
+ }
if (uurb->buffer_length > 0 &&
!access_ok(is_in ? VERIFY_WRITE : VERIFY_READ,
uurb->buffer, uurb->buffer_length)) {
- kfree(isopkt);
- kfree(dr);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
as = alloc_async(uurb->number_of_packets);
if (!as) {
- kfree(isopkt);
- kfree(dr);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
+ u += sizeof(struct async) + sizeof(struct urb) + uurb->buffer_length;
+ ret = usbfs_increase_memory_usage(u);
+ if (ret)
+ goto error;
+ as->mem_usage = u;
+
if (uurb->buffer_length > 0) {
as->urb->transfer_buffer = kmalloc(uurb->buffer_length,
GFP_KERNEL);
if (!as->urb->transfer_buffer) {
- kfree(isopkt);
- kfree(dr);
- free_async(as);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto error;
}
/* Isochronous input data may end up being discontiguous
* if some of the packets are short. Clear the buffer so
@@ -1253,6 +1306,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
+ dr = NULL;
as->urb->start_frame = uurb->start_frame;
as->urb->number_of_packets = uurb->number_of_packets;
if (uurb->type == USBDEVFS_URB_TYPE_ISO ||
@@ -1268,6 +1322,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
totlen += isopkt[u].length;
}
kfree(isopkt);
+ isopkt = NULL;
as->ps = ps;
as->userurb = arg;
if (is_in && uurb->buffer_length > 0)
@@ -1282,8 +1337,8 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (!is_in && uurb->buffer_length > 0) {
if (copy_from_user(as->urb->transfer_buffer, uurb->buffer,
uurb->buffer_length)) {
- free_async(as);
- return -EFAULT;
+ ret = -EFAULT;
+ goto error;
}
}
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
@@ -1329,10 +1384,16 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
0, ret, COMPLETE, NULL, 0);
async_removepending(as);
- free_async(as);
- return ret;
+ goto error;
}
return 0;
+
+ error:
+ kfree(isopkt);
+ kfree(dr);
+ if (as)
+ free_async(as);
+ return ret;
}
static int proc_submiturb(struct dev_state *ps, void __user *arg)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 45887a0..d40ff95 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -45,10 +45,12 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
struct usb_dynid *dynid;
u32 idVendor = 0;
u32 idProduct = 0;
+ unsigned int bInterfaceClass = 0;
int fields = 0;
int retval = 0;
- fields = sscanf(buf, "%x %x", &idVendor, &idProduct);
+ fields = sscanf(buf, "%x %x %x", &idVendor, &idProduct,
+ &bInterfaceClass);
if (fields < 2)
return -EINVAL;
@@ -60,6 +62,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idVendor = idVendor;
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
+ if (fields == 3) {
+ dynid->id.bInterfaceClass = (u8)bInterfaceClass;
+ dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
+ }
spin_lock(&dynids->lock);
list_add_tail(&dynid->node, &dynids->list);
@@ -1073,17 +1079,10 @@ static int usb_suspend_interface(struct usb_device *udev,
goto done;
driver = to_usb_driver(intf->dev.driver);
- if (driver->suspend) {
- status = driver->suspend(intf, msg);
- if (status && !PMSG_IS_AUTO(msg))
- dev_err(&intf->dev, "%s error %d\n",
- "suspend", status);
- } else {
- /* Later we will unbind the driver and reprobe */
- intf->needs_binding = 1;
- dev_warn(&intf->dev, "no %s for driver %s?\n",
- "suspend", driver->name);
- }
+ /* at this time we know the driver supports suspend */
+ status = driver->suspend(intf, msg);
+ if (status && !PMSG_IS_AUTO(msg))
+ dev_err(&intf->dev, "suspend error %d\n", status);
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
@@ -1132,16 +1131,9 @@ static int usb_resume_interface(struct usb_device *udev,
"reset_resume", driver->name);
}
} else {
- if (driver->resume) {
- status = driver->resume(intf);
- if (status)
- dev_err(&intf->dev, "%s error %d\n",
- "resume", status);
- } else {
- intf->needs_binding = 1;
- dev_warn(&intf->dev, "no %s for driver %s?\n",
- "resume", driver->name);
- }
+ status = driver->resume(intf);
+ if (status)
+ dev_err(&intf->dev, "resume error %d\n", status);
}
done:
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index 99458c8..d95760d 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -66,7 +66,7 @@ static struct usb_class {
struct class *class;
} *usb_class;
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
{
struct usb_class_driver *drv;
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index a004db3..81e2c0d9 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -187,7 +187,10 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
return -ENODEV;
dev->current_state = PCI_D0;
- if (!dev->irq) {
+ /* The xHCI driver supports MSI and MSI-X,
+ * so don't fail if the BIOS doesn't provide a legacy IRQ.
+ */
+ if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) {
dev_err(&dev->dev,
"Found HC with no IRQ. Check BIOS/PCI %s setup!\n",
pci_name(dev));
@@ -453,10 +456,6 @@ static int resume_common(struct device *dev, int event)
pci_set_master(pci_dev);
- clear_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- clear_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
-
if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) {
if (event != PM_EVENT_AUTO_RESUME)
wait_for_companions(pci_dev, hcd);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 13222d3..e128232 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -658,7 +658,7 @@ error:
len > offsetof(struct usb_device_descriptor,
bDeviceProtocol))
((struct usb_device_descriptor *) ubuf)->
- bDeviceProtocol = 1;
+ bDeviceProtocol = USB_HUB_PR_HS_SINGLE_TT;
}
/* any errors get returned through the urb completion */
@@ -1168,20 +1168,6 @@ int usb_hcd_check_unlink_urb(struct usb_hcd *hcd, struct urb *urb,
if (urb->unlinked)
return -EBUSY;
urb->unlinked = status;
-
- /* IRQ setup can easily be broken so that USB controllers
- * never get completion IRQs ... maybe even the ones we need to
- * finish unlinking the initial failed usb_set_address()
- * or device descriptor fetch.
- */
- if (!HCD_SAW_IRQ(hcd) && !is_root_hub(urb->dev)) {
- dev_warn(hcd->self.controller, "Unlink after no-IRQ? "
- "Controller is probably using the wrong IRQ.\n");
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_check_unlink_urb);
@@ -1412,11 +1398,10 @@ int usb_hcd_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
ret = -EAGAIN;
else
urb->transfer_flags |= URB_DMA_MAP_SG;
- if (n != urb->num_sgs) {
- urb->num_sgs = n;
+ urb->num_mapped_sgs = n;
+ if (n != urb->num_sgs)
urb->transfer_flags |=
URB_DMA_SG_COMBINED;
- }
} else if (urb->sg) {
struct scatterlist *sg = urb->sg;
urb->transfer_dma = dma_map_page(
@@ -2148,16 +2133,12 @@ irqreturn_t usb_hcd_irq (int irq, void *__hcd)
*/
local_irq_save(flags);
- if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd))) {
+ if (unlikely(HCD_DEAD(hcd) || !HCD_HW_ACCESSIBLE(hcd)))
rc = IRQ_NONE;
- } else if (hcd->driver->irq(hcd) == IRQ_NONE) {
+ else if (hcd->driver->irq(hcd) == IRQ_NONE)
rc = IRQ_NONE;
- } else {
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (hcd->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->shared_hcd->flags);
+ else
rc = IRQ_HANDLED;
- }
local_irq_restore(flags);
return rc;
@@ -2466,8 +2447,10 @@ int usb_add_hcd(struct usb_hcd *hcd,
&& device_can_wakeup(&hcd->self.root_hub->dev))
dev_dbg(hcd->self.controller, "supports USB remote wakeup\n");
- /* enable irqs just before we start the controller */
- if (usb_hcd_is_primary_hcd(hcd)) {
+ /* enable irqs just before we start the controller,
+ * if the BIOS provides legacy PCI irqs.
+ */
+ if (usb_hcd_is_primary_hcd(hcd) && irqnum) {
retval = usb_hcd_request_irqs(hcd, irqnum, irqflags);
if (retval)
goto err_request_irq;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 7978146..265c2f6 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -84,7 +84,7 @@ struct usb_hub {
static inline int hub_is_superspeed(struct usb_device *hdev)
{
- return (hdev->descriptor.bDeviceProtocol == 3);
+ return (hdev->descriptor.bDeviceProtocol == USB_HUB_PR_SS);
}
/* Protect struct usb_device->state and ->children members
@@ -102,7 +102,7 @@ static DECLARE_WAIT_QUEUE_HEAD(khubd_wait);
static struct task_struct *khubd_task;
/* cycle leds on hubs that aren't blinking for attention */
-static int blinkenlights = 0;
+static bool blinkenlights = 0;
module_param (blinkenlights, bool, S_IRUGO);
MODULE_PARM_DESC (blinkenlights, "true to cycle leds on hubs");
@@ -131,12 +131,12 @@ MODULE_PARM_DESC(initial_descriptor_timeout,
* otherwise the new scheme is used. If that fails and "use_both_schemes"
* is set, then the driver will make another attempt, using the other scheme.
*/
-static int old_scheme_first = 0;
+static bool old_scheme_first = 0;
module_param(old_scheme_first, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(old_scheme_first,
"start with the old device initialization scheme");
-static int use_both_schemes = 1;
+static bool use_both_schemes = 1;
module_param(use_both_schemes, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(use_both_schemes,
"try the other device initialization scheme if the "
@@ -705,10 +705,26 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
if (type == HUB_INIT3)
goto init3;
- /* After a resume, port power should still be on.
+ /* The superspeed hub except for root hub has to use Hub Depth
+ * value as an offset into the route string to locate the bits
+ * it uses to determine the downstream port number. So hub driver
+ * should send a set hub depth request to superspeed hub after
+ * the superspeed hub is set configuration in initialization or
+ * reset procedure.
+ *
+ * After a resume, port power should still be on.
* For any other type of activation, turn it on.
*/
if (type != HUB_RESUME) {
+ if (hdev->parent && hub_is_superspeed(hdev)) {
+ ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
+ HUB_SET_DEPTH, USB_RT_HUB,
+ hdev->level - 1, 0, NULL, 0,
+ USB_CTRL_SET_TIMEOUT);
+ if (ret < 0)
+ dev_err(hub->intfdev,
+ "set hub depth failed\n");
+ }
/* Speed up system boot by using a delayed_work for the
* hub's initial power-up delays. This is pretty awkward
@@ -987,18 +1003,6 @@ static int hub_configure(struct usb_hub *hub,
goto fail;
}
- if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) {
- ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0),
- HUB_SET_DEPTH, USB_RT_HUB,
- hdev->level - 1, 0, NULL, 0,
- USB_CTRL_SET_TIMEOUT);
-
- if (ret < 0) {
- message = "can't set hub depth";
- goto fail;
- }
- }
-
/* Request the entire hub descriptor.
* hub->descriptor can handle USB_MAXCHILDREN ports,
* but the hub can/will return fewer bytes here.
@@ -1041,58 +1045,58 @@ static int hub_configure(struct usb_hub *hub,
dev_dbg(hub_dev, "standalone hub\n");
switch (wHubCharacteristics & HUB_CHAR_LPSM) {
- case 0x00:
- dev_dbg(hub_dev, "ganged power switching\n");
- break;
- case 0x01:
- dev_dbg(hub_dev, "individual port power switching\n");
- break;
- case 0x02:
- case 0x03:
- dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
- break;
+ case HUB_CHAR_COMMON_LPSM:
+ dev_dbg(hub_dev, "ganged power switching\n");
+ break;
+ case HUB_CHAR_INDV_PORT_LPSM:
+ dev_dbg(hub_dev, "individual port power switching\n");
+ break;
+ case HUB_CHAR_NO_LPSM:
+ case HUB_CHAR_LPSM:
+ dev_dbg(hub_dev, "no power switching (usb 1.0)\n");
+ break;
}
switch (wHubCharacteristics & HUB_CHAR_OCPM) {
- case 0x00:
- dev_dbg(hub_dev, "global over-current protection\n");
- break;
- case 0x08:
- dev_dbg(hub_dev, "individual port over-current protection\n");
- break;
- case 0x10:
- case 0x18:
- dev_dbg(hub_dev, "no over-current protection\n");
- break;
+ case HUB_CHAR_COMMON_OCPM:
+ dev_dbg(hub_dev, "global over-current protection\n");
+ break;
+ case HUB_CHAR_INDV_PORT_OCPM:
+ dev_dbg(hub_dev, "individual port over-current protection\n");
+ break;
+ case HUB_CHAR_NO_OCPM:
+ case HUB_CHAR_OCPM:
+ dev_dbg(hub_dev, "no over-current protection\n");
+ break;
}
spin_lock_init (&hub->tt.lock);
INIT_LIST_HEAD (&hub->tt.clear_list);
INIT_WORK(&hub->tt.clear_work, hub_tt_work);
switch (hdev->descriptor.bDeviceProtocol) {
- case 0:
- break;
- case 1:
- dev_dbg(hub_dev, "Single TT\n");
- hub->tt.hub = hdev;
- break;
- case 2:
- ret = usb_set_interface(hdev, 0, 1);
- if (ret == 0) {
- dev_dbg(hub_dev, "TT per port\n");
- hub->tt.multi = 1;
- } else
- dev_err(hub_dev, "Using single TT (err %d)\n",
- ret);
- hub->tt.hub = hdev;
- break;
- case 3:
- /* USB 3.0 hubs don't have a TT */
- break;
- default:
- dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
- hdev->descriptor.bDeviceProtocol);
- break;
+ case USB_HUB_PR_FS:
+ break;
+ case USB_HUB_PR_HS_SINGLE_TT:
+ dev_dbg(hub_dev, "Single TT\n");
+ hub->tt.hub = hdev;
+ break;
+ case USB_HUB_PR_HS_MULTI_TT:
+ ret = usb_set_interface(hdev, 0, 1);
+ if (ret == 0) {
+ dev_dbg(hub_dev, "TT per port\n");
+ hub->tt.multi = 1;
+ } else
+ dev_err(hub_dev, "Using single TT (err %d)\n",
+ ret);
+ hub->tt.hub = hdev;
+ break;
+ case USB_HUB_PR_SS:
+ /* USB 3.0 hubs don't have a TT */
+ break;
+ default:
+ dev_dbg(hub_dev, "Unrecognized hub protocol %d\n",
+ hdev->descriptor.bDeviceProtocol);
+ break;
}
/* Note 8 FS bit times == (8 bits / 12000000 bps) ~= 666ns */
@@ -1360,7 +1364,6 @@ descriptor_error:
return -ENODEV;
}
-/* No BKL needed */
static int
hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
{
@@ -2027,7 +2030,7 @@ static unsigned hub_is_wusb(struct usb_hub *hub)
#define SET_ADDRESS_TRIES 2
#define GET_DESCRIPTOR_TRIES 2
#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1))
-#define USE_NEW_SCHEME(i) ((i) / 2 == old_scheme_first)
+#define USE_NEW_SCHEME(i) ((i) / 2 == (int)old_scheme_first)
#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 2278dad..9e186f3 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -65,7 +65,7 @@ static umode_t devmode = USBFS_DEFAULT_DEVMODE;
static umode_t busmode = USBFS_DEFAULT_BUSMODE;
static umode_t listmode = USBFS_DEFAULT_LISTMODE;
-static int usbfs_show_options(struct seq_file *seq, struct vfsmount *mnt)
+static int usbfs_show_options(struct seq_file *seq, struct dentry *root)
{
if (devuid != 0)
seq_printf(seq, ",devuid=%u", devuid);
@@ -264,21 +264,19 @@ static int remount(struct super_block *sb, int *flags, char *data)
return -EINVAL;
}
- if (usbfs_mount && usbfs_mount->mnt_sb)
+ if (usbfs_mount)
update_sb(usbfs_mount->mnt_sb);
return 0;
}
-static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t dev)
+static struct inode *usbfs_get_inode (struct super_block *sb, umode_t mode, dev_t dev)
{
struct inode *inode = new_inode(sb);
if (inode) {
inode->i_ino = get_next_ino();
- inode->i_mode = mode;
- inode->i_uid = current_fsuid();
- inode->i_gid = current_fsgid();
+ inode_init_owner(inode, NULL, mode);
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
switch (mode & S_IFMT) {
default:
@@ -300,7 +298,7 @@ static struct inode *usbfs_get_inode (struct super_block *sb, int mode, dev_t de
}
/* SMP-safe */
-static int usbfs_mknod (struct inode *dir, struct dentry *dentry, int mode,
+static int usbfs_mknod (struct inode *dir, struct dentry *dentry, umode_t mode,
dev_t dev)
{
struct inode *inode = usbfs_get_inode(dir->i_sb, mode, dev);
@@ -317,7 +315,7 @@ static int usbfs_mknod (struct inode *dir, struct dentry *dentry, int mode,
return error;
}
-static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, umode_t mode)
{
int res;
@@ -328,7 +326,7 @@ static int usbfs_mkdir (struct inode *dir, struct dentry *dentry, int mode)
return res;
}
-static int usbfs_create (struct inode *dir, struct dentry *dentry, int mode)
+static int usbfs_create (struct inode *dir, struct dentry *dentry, umode_t mode)
{
mode = (mode & S_IALLUGO) | S_IFREG;
return usbfs_mknod (dir, dentry, mode, 0);
@@ -489,7 +487,7 @@ static int usbfs_fill_super(struct super_block *sb, void *data, int silent)
*
* This function handles both regular files and directories.
*/
-static int fs_create_by_name (const char *name, mode_t mode,
+static int fs_create_by_name (const char *name, umode_t mode,
struct dentry *parent, struct dentry **dentry)
{
int error = 0;
@@ -500,9 +498,8 @@ static int fs_create_by_name (const char *name, mode_t mode,
* have around.
*/
if (!parent ) {
- if (usbfs_mount && usbfs_mount->mnt_sb) {
- parent = usbfs_mount->mnt_sb->s_root;
- }
+ if (usbfs_mount)
+ parent = usbfs_mount->mnt_root;
}
if (!parent) {
@@ -514,7 +511,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
mutex_lock(&parent->d_inode->i_mutex);
*dentry = lookup_one_len(name, parent, strlen(name));
if (!IS_ERR(*dentry)) {
- if ((mode & S_IFMT) == S_IFDIR)
+ if (S_ISDIR(mode))
error = usbfs_mkdir (parent->d_inode, *dentry, mode);
else
error = usbfs_create (parent->d_inode, *dentry, mode);
@@ -525,7 +522,7 @@ static int fs_create_by_name (const char *name, mode_t mode,
return error;
}
-static struct dentry *fs_create_file (const char *name, mode_t mode,
+static struct dentry *fs_create_file (const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops,
uid_t uid, gid_t gid)
@@ -608,7 +605,7 @@ static int create_special_files (void)
ignore_mount = 0;
- parent = usbfs_mount->mnt_sb->s_root;
+ parent = usbfs_mount->mnt_root;
devices_usbfs_dentry = fs_create_file ("devices",
listmode | S_IFREG, parent,
NULL, &usbfs_devices_fops,
@@ -662,7 +659,7 @@ static void usbfs_add_bus(struct usb_bus *bus)
sprintf (name, "%03d", bus->busnum);
- parent = usbfs_mount->mnt_sb->s_root;
+ parent = usbfs_mount->mnt_root;
bus->usbfs_dentry = fs_create_file (name, busmode | S_IFDIR, parent,
bus, NULL, busuid, busgid);
if (bus->usbfs_dentry == NULL) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ecf12e1..4c65eb6 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -117,9 +117,12 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x06a3, 0x0006), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
- /* Guillemot Webcam Hercules Dualpix Exchange*/
+ /* Guillemot Webcam Hercules Dualpix Exchange (2nd ID) */
{ USB_DEVICE(0x06f8, 0x0804), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Guillemot Webcam Hercules Dualpix Exchange*/
+ { USB_DEVICE(0x06f8, 0x3005), .driver_info = USB_QUIRK_RESET_RESUME },
+
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 662c0cf..9e491ca 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -642,7 +642,7 @@ static struct attribute *dev_string_attrs[] = {
NULL
};
-static mode_t dev_string_attrs_are_visible(struct kobject *kobj,
+static umode_t dev_string_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -877,7 +877,7 @@ static struct attribute *intf_assoc_attrs[] = {
NULL,
};
-static mode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
+static umode_t intf_assoc_attrs_are_visible(struct kobject *kobj,
struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 73cd900..8ca9f99 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -47,7 +47,7 @@
const char *usbcore_name = "usbcore";
-static int nousb; /* Disable USB when built into kernel image */
+static bool nousb; /* Disable USB when built into kernel image */
#ifdef CONFIG_USB_SUSPEND
static int usb_autosuspend_delay = 2; /* Default delay value,
@@ -326,7 +326,7 @@ static const struct dev_pm_ops usb_device_pm_ops = {
#endif /* CONFIG_PM */
-static char *usb_devnode(struct device *dev, mode_t *mode)
+static char *usb_devnode(struct device *dev, umode_t *mode)
{
struct usb_device *usb_dev;
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 3888778..45e8479 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -132,20 +132,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
for_devices;
}
-/* translate USB error codes to codes user space understands */
-static inline int usb_translate_errors(int error_code)
-{
- switch (error_code) {
- case 0:
- case -ENOMEM:
- case -ENODEV:
- return error_code;
- default:
- return -EIO;
- }
-}
-
-
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 3c1d67d..d8f741f 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -1,7 +1,10 @@
config USB_DWC3
tristate "DesignWare USB3 DRD Core Support"
- depends on (USB || USB_GADGET)
+ depends on (USB && USB_GADGET)
select USB_OTG_UTILS
+ select USB_GADGET_DUALSPEED
+ select USB_GADGET_SUPERSPEED
+ select USB_XHCI_PLATFORM
help
Say Y or M here if your system has a Dual Role SuperSpeed
USB controller based on the DesignWare USB3 IP Core.
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index 593d1db..900ae74 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -4,10 +4,8 @@ ccflags-$(CONFIG_USB_DWC3_VERBOSE) += -DVERBOSE_DEBUG
obj-$(CONFIG_USB_DWC3) += dwc3.o
dwc3-y := core.o
-
-ifneq ($(CONFIG_USB_GADGET_DWC3),)
- dwc3-y += gadget.o ep0.o
-endif
+dwc3-y += host.o
+dwc3-y += gadget.o ep0.o
ifneq ($(CONFIG_DEBUG_FS),)
dwc3-y += debugfs.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 600d823..7c9df63 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -59,6 +59,60 @@
#include "debug.h"
+static char *maximum_speed = "super";
+module_param(maximum_speed, charp, 0);
+MODULE_PARM_DESC(maximum_speed, "Maximum supported speed.");
+
+/* -------------------------------------------------------------------------- */
+
+#define DWC3_DEVS_POSSIBLE 32
+
+static DECLARE_BITMAP(dwc3_devs, DWC3_DEVS_POSSIBLE);
+
+int dwc3_get_device_id(void)
+{
+ int id;
+
+again:
+ id = find_first_zero_bit(dwc3_devs, DWC3_DEVS_POSSIBLE);
+ if (id < DWC3_DEVS_POSSIBLE) {
+ int old;
+
+ old = test_and_set_bit(id, dwc3_devs);
+ if (old)
+ goto again;
+ } else {
+ pr_err("dwc3: no space for new device\n");
+ id = -ENOMEM;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dwc3_get_device_id);
+
+void dwc3_put_device_id(int id)
+{
+ int ret;
+
+ if (id < 0)
+ return;
+
+ ret = test_bit(id, dwc3_devs);
+ WARN(!ret, "dwc3: ID %d not in use\n", id);
+ clear_bit(id, dwc3_devs);
+}
+EXPORT_SYMBOL_GPL(dwc3_put_device_id);
+
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode)
+{
+ u32 reg;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~(DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG));
+ reg |= DWC3_GCTL_PRTCAPDIR(mode);
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+}
+
/**
* dwc3_core_soft_reset - Issues core soft reset and PHY reset
* @dwc: pointer to our context structure
@@ -150,7 +204,7 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int i;
- for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ for (i = 0; i < dwc->num_event_buffers; i++) {
evt = dwc->ev_buffs[i];
if (evt) {
dwc3_free_one_event_buffer(dwc, evt);
@@ -162,17 +216,25 @@ static void dwc3_free_event_buffers(struct dwc3 *dwc)
/**
* dwc3_alloc_event_buffers - Allocates @num event buffers of size @length
* @dwc: Pointer to out controller context structure
- * @num: number of event buffers to allocate
* @length: size of event buffer
*
* Returns 0 on success otherwise negative errno. In error the case, dwc
* may contain some buffers allocated but not all which were requested.
*/
-static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned num,
- unsigned length)
+static int __devinit dwc3_alloc_event_buffers(struct dwc3 *dwc, unsigned length)
{
+ int num;
int i;
+ num = DWC3_NUM_INT(dwc->hwparams.hwparams1);
+ dwc->num_event_buffers = num;
+
+ dwc->ev_buffs = kzalloc(sizeof(*dwc->ev_buffs) * num, GFP_KERNEL);
+ if (!dwc->ev_buffs) {
+ dev_err(dwc->dev, "can't allocate event buffers array\n");
+ return -ENOMEM;
+ }
+
for (i = 0; i < num; i++) {
struct dwc3_event_buffer *evt;
@@ -198,7 +260,7 @@ static int __devinit dwc3_event_buffers_setup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int n;
- for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ for (n = 0; n < dwc->num_event_buffers; n++) {
evt = dwc->ev_buffs[n];
dev_dbg(dwc->dev, "Event buf %p dma %08llx length %d\n",
evt->buf, (unsigned long long) evt->dma,
@@ -221,7 +283,7 @@ static void dwc3_event_buffers_cleanup(struct dwc3 *dwc)
struct dwc3_event_buffer *evt;
int n;
- for (n = 0; n < DWC3_EVENT_BUFFERS_NUM; n++) {
+ for (n = 0; n < dwc->num_event_buffers; n++) {
evt = dwc->ev_buffs[n];
dwc3_writel(dwc->regs, DWC3_GEVNTADRLO(n), 0);
dwc3_writel(dwc->regs, DWC3_GEVNTADRHI(n), 0);
@@ -285,8 +347,32 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
cpu_relax();
} while (true);
- ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_NUM,
- DWC3_EVENT_BUFFERS_SIZE);
+ dwc3_cache_hwparams(dwc);
+
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ reg &= ~DWC3_GCTL_SCALEDOWN(3);
+ reg &= ~DWC3_GCTL_DISSCRAMBLE;
+
+ switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1)) {
+ case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
+ reg &= ~DWC3_GCTL_DSBLCLKGTNG;
+ break;
+ default:
+ dev_dbg(dwc->dev, "No power optimization available\n");
+ }
+
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have a bug
+ * when The device fails to connect at SuperSpeed
+ * and falls back to high-speed mode which causes
+ * the device to enter in a Connect/Disconnect loop
+ */
+ if (dwc->revision < DWC3_REVISION_190A)
+ reg |= DWC3_GCTL_U2RSTECN;
+
+ dwc3_writel(dwc->regs, DWC3_GCTL, reg);
+
+ ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
if (ret) {
dev_err(dwc->dev, "failed to allocate event buffers\n");
ret = -ENOMEM;
@@ -299,8 +385,6 @@ static int __devinit dwc3_core_init(struct dwc3 *dwc)
goto err1;
}
- dwc3_cache_hwparams(dwc);
-
return 0;
err1:
@@ -320,15 +404,17 @@ static void dwc3_core_exit(struct dwc3 *dwc)
static int __devinit dwc3_probe(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
struct resource *res;
struct dwc3 *dwc;
- void __iomem *regs;
- unsigned int features = id->driver_data;
+
int ret = -ENOMEM;
int irq;
+
+ void __iomem *regs;
void *mem;
+ u8 mode;
+
mem = kzalloc(sizeof(*dwc) + DWC3_ALIGN_MASK, GFP_KERNEL);
if (!mem) {
dev_err(&pdev->dev, "not enough memory\n");
@@ -343,6 +429,8 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
goto err1;
}
+ dwc->res = res;
+
res = request_mem_region(res->start, resource_size(res),
dev_name(&pdev->dev));
if (!res) {
@@ -370,6 +458,17 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
dwc->dev = &pdev->dev;
dwc->irq = irq;
+ if (!strncmp("super", maximum_speed, 5))
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+ else if (!strncmp("high", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_HIGHSPEED;
+ else if (!strncmp("full", maximum_speed, 4))
+ dwc->maximum_speed = DWC3_DCFG_FULLSPEED1;
+ else if (!strncmp("low", maximum_speed, 3))
+ dwc->maximum_speed = DWC3_DCFG_LOWSPEED;
+ else
+ dwc->maximum_speed = DWC3_DCFG_SUPERSPEED;
+
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
pm_runtime_forbid(&pdev->dev);
@@ -380,13 +479,44 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
goto err3;
}
- if (features & DWC3_HAS_PERIPHERAL) {
+ mode = DWC3_MODE(dwc->hwparams.hwparams0);
+
+ switch (mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_DEVICE);
ret = dwc3_gadget_init(dwc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialized gadget\n");
+ dev_err(&pdev->dev, "failed to initialize gadget\n");
goto err4;
}
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_HOST);
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ goto err4;
+ }
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_set_mode(dwc, DWC3_GCTL_PRTCAP_OTG);
+ ret = dwc3_host_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize host\n");
+ goto err4;
+ }
+
+ ret = dwc3_gadget_init(dwc);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to initialize gadget\n");
+ goto err4;
+ }
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported mode of operation %d\n", mode);
+ goto err4;
}
+ dwc->mode = mode;
ret = dwc3_debugfs_init(dwc);
if (ret) {
@@ -399,8 +529,21 @@ static int __devinit dwc3_probe(struct platform_device *pdev)
return 0;
err5:
- if (features & DWC3_HAS_PERIPHERAL)
+ switch (mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_gadget_exit(dwc);
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
err4:
dwc3_core_exit(dwc);
@@ -420,10 +563,8 @@ err0:
static int __devexit dwc3_remove(struct platform_device *pdev)
{
- const struct platform_device_id *id = platform_get_device_id(pdev);
struct dwc3 *dwc = platform_get_drvdata(pdev);
struct resource *res;
- unsigned int features = id->driver_data;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -432,8 +573,21 @@ static int __devexit dwc3_remove(struct platform_device *pdev)
dwc3_debugfs_exit(dwc);
- if (features & DWC3_HAS_PERIPHERAL)
+ switch (dwc->mode) {
+ case DWC3_MODE_DEVICE:
+ dwc3_gadget_exit(dwc);
+ break;
+ case DWC3_MODE_HOST:
+ dwc3_host_exit(dwc);
+ break;
+ case DWC3_MODE_DRD:
+ dwc3_host_exit(dwc);
dwc3_gadget_exit(dwc);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
dwc3_core_exit(dwc);
release_mem_region(res->start, resource_size(res));
@@ -443,30 +597,15 @@ static int __devexit dwc3_remove(struct platform_device *pdev)
return 0;
}
-static const struct platform_device_id dwc3_id_table[] __devinitconst = {
- {
- .name = "dwc3-omap",
- .driver_data = (DWC3_HAS_PERIPHERAL
- | DWC3_HAS_XHCI
- | DWC3_HAS_OTG),
- },
- {
- .name = "dwc3-pci",
- .driver_data = DWC3_HAS_PERIPHERAL,
- },
- { }, /* Terminating Entry */
-};
-MODULE_DEVICE_TABLE(platform, dwc3_id_table);
-
static struct platform_driver dwc3_driver = {
.probe = dwc3_probe,
.remove = __devexit_p(dwc3_remove),
.driver = {
.name = "dwc3",
},
- .id_table = dwc3_id_table,
};
+MODULE_ALIAS("platform:dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("DesignWare USB3 DRD Controller Driver");
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 29a8e16..9e57f8e 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -41,6 +41,7 @@
#include <linux/device.h>
#include <linux/spinlock.h>
+#include <linux/ioport.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/mm.h>
@@ -52,7 +53,6 @@
/* Global constants */
#define DWC3_ENDPOINTS_NUM 32
-#define DWC3_EVENT_BUFFERS_NUM 2
#define DWC3_EVENT_BUFFERS_SIZE PAGE_SIZE
#define DWC3_EVENT_TYPE_MASK 0xfe
@@ -153,6 +153,7 @@
#define DWC3_GCTL_CLK_PIPEHALF (2)
#define DWC3_GCTL_CLK_MASK (3)
+#define DWC3_GCTL_PRTCAP(n) (((n) & (3 << 12)) >> 12)
#define DWC3_GCTL_PRTCAPDIR(n) (n << 12)
#define DWC3_GCTL_PRTCAP_HOST 1
#define DWC3_GCTL_PRTCAP_DEVICE 2
@@ -347,6 +348,7 @@ struct dwc3_ep {
u32 free_slot;
u32 busy_slot;
const struct usb_endpoint_descriptor *desc;
+ const struct usb_ss_ep_comp_descriptor *comp_desc;
struct dwc3 *dwc;
unsigned flags;
@@ -536,6 +538,31 @@ struct dwc3_hwparams {
u32 hwparams8;
};
+/* HWPARAMS0 */
+#define DWC3_MODE(n) ((n) & 0x7)
+
+#define DWC3_MODE_DEVICE 0
+#define DWC3_MODE_HOST 1
+#define DWC3_MODE_DRD 2
+#define DWC3_MODE_HUB 3
+
+/* HWPARAMS1 */
+#define DWC3_NUM_INT(n) (((n) & (0x3f << 15)) >> 15)
+
+struct dwc3_request {
+ struct usb_request request;
+ struct list_head list;
+ struct dwc3_ep *dep;
+
+ u8 epnum;
+ struct dwc3_trb_hw *trb;
+ dma_addr_t trb_dma;
+
+ unsigned direction:1;
+ unsigned mapped:1;
+ unsigned queued:1;
+};
+
/**
* struct dwc3 - representation of our controller
* @ctrl_req: usb control request which is used for ep0
@@ -549,19 +576,24 @@ struct dwc3_hwparams {
* @ep0_bounce_addr: dma address of ep0_bounce
* @lock: for synchronizing
* @dev: pointer to our struct device
+ * @xhci: pointer to our xHCI child
* @event_buffer_list: a list of event buffers
* @gadget: device side representation of the peripheral controller
* @gadget_driver: pointer to the gadget driver
* @regs: base address for our registers
* @regs_size: address space size
* @irq: IRQ number
+ * @num_event_buffers: calculated number of event buffers
+ * @u1u2: only used on revisions <1.83a for workaround
+ * @maximum_speed: maximum speed requested (mainly for testing purposes)
* @revision: revision register contents
+ * @mode: mode of operation
* @is_selfpowered: true when we are selfpowered
* @three_stage_setup: set if we perform a three phase setup
- * @ep0_status_pending: ep0 status response without a req is pending
* @ep0_bounced: true when we used bounce buffer
* @ep0_expect_in: true when we expect a DATA IN transfer
* @start_config_issued: true when StartConfig command has been issued
+ * @setup_packet_pending: true when there's a Setup Packet in FIFO. Workaround
* @ep0_next_event: hold the next expected event
* @ep0state: state of endpoint zero
* @link_state: link state
@@ -579,12 +611,15 @@ struct dwc3 {
dma_addr_t ep0_trb_addr;
dma_addr_t setup_buf_addr;
dma_addr_t ep0_bounce_addr;
- struct usb_request ep0_usb_req;
+ struct dwc3_request ep0_usb_req;
/* device lock */
spinlock_t lock;
struct device *dev;
- struct dwc3_event_buffer *ev_buffs[DWC3_EVENT_BUFFERS_NUM];
+ struct platform_device *xhci;
+ struct resource *res;
+
+ struct dwc3_event_buffer **ev_buffs;
struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM];
struct usb_gadget gadget;
@@ -595,7 +630,11 @@ struct dwc3 {
int irq;
+ u32 num_event_buffers;
+ u32 u1u2;
+ u32 maximum_speed;
u32 revision;
+ u32 mode;
#define DWC3_REVISION_173A 0x5533173a
#define DWC3_REVISION_175A 0x5533175a
@@ -607,10 +646,11 @@ struct dwc3 {
unsigned is_selfpowered:1;
unsigned three_stage_setup:1;
- unsigned ep0_status_pending:1;
unsigned ep0_bounced:1;
unsigned ep0_expect_in:1;
unsigned start_config_issued:1;
+ unsigned setup_packet_pending:1;
+ unsigned delayed_status:1;
enum dwc3_ep0_next ep0_next_event;
enum dwc3_ep0_state ep0state;
@@ -765,4 +805,16 @@ union dwc3_event {
#define DWC3_HAS_XHCI BIT(1)
#define DWC3_HAS_OTG BIT(3)
+/* prototypes */
+void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
+
+int dwc3_host_init(struct dwc3 *dwc);
+void dwc3_host_exit(struct dwc3 *dwc);
+
+int dwc3_gadget_init(struct dwc3 *dwc);
+void dwc3_gadget_exit(struct dwc3 *dwc);
+
+extern int dwc3_get_device_id(void);
+extern void dwc3_put_device_id(int id);
+
#endif /* __DRIVERS_USB_DWC3_CORE_H */
diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c
index da1ad77..433c97c 100644
--- a/drivers/usb/dwc3/debugfs.c
+++ b/drivers/usb/dwc3/debugfs.c
@@ -44,17 +44,12 @@
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/delay.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
-
-struct dwc3_register {
- const char *name;
- u32 offset;
-};
+#include "debug.h"
#define dump_register(nm) \
{ \
@@ -62,7 +57,7 @@ struct dwc3_register {
.offset = DWC3_ ##nm, \
}
-static const struct dwc3_register dwc3_regs[] = {
+static const struct debugfs_reg32 dwc3_regs[] = {
dump_register(GSBUSCFG0),
dump_register(GSBUSCFG1),
dump_register(GTXTHRCFG),
@@ -382,15 +377,10 @@ static const struct dwc3_register dwc3_regs[] = {
static int dwc3_regdump_show(struct seq_file *s, void *unused)
{
struct dwc3 *dwc = s->private;
- int i;
seq_printf(s, "DesignWare USB3 Core Register Dump\n");
-
- for (i = 0; i < ARRAY_SIZE(dwc3_regs); i++) {
- seq_printf(s, "%-20s : %08x\n", dwc3_regs[i].name,
- dwc3_readl(dwc->regs, dwc3_regs[i].offset));
- }
-
+ debugfs_print_regs32(s, dwc3_regs, ARRAY_SIZE(dwc3_regs),
+ dwc->regs, "");
return 0;
}
@@ -405,6 +395,75 @@ static const struct file_operations dwc3_regdump_fops = {
.release = single_release,
};
+static int dwc3_mode_show(struct seq_file *s, void *unused)
+{
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 reg;
+
+ spin_lock_irqsave(&dwc->lock, flags);
+ reg = dwc3_readl(dwc->regs, DWC3_GCTL);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+
+ switch (DWC3_GCTL_PRTCAP(reg)) {
+ case DWC3_GCTL_PRTCAP_HOST:
+ seq_printf(s, "host\n");
+ break;
+ case DWC3_GCTL_PRTCAP_DEVICE:
+ seq_printf(s, "device\n");
+ break;
+ case DWC3_GCTL_PRTCAP_OTG:
+ seq_printf(s, "OTG\n");
+ break;
+ default:
+ seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg));
+ }
+
+ return 0;
+}
+
+static int dwc3_mode_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dwc3_mode_show, inode->i_private);
+}
+
+static ssize_t dwc3_mode_write(struct file *file,
+ const char __user *ubuf, size_t count, loff_t *ppos)
+{
+ struct seq_file *s = file->private_data;
+ struct dwc3 *dwc = s->private;
+ unsigned long flags;
+ u32 mode = 0;
+ char buf[32];
+
+ if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+ return -EFAULT;
+
+ if (!strncmp(buf, "host", 4))
+ mode |= DWC3_GCTL_PRTCAP_HOST;
+
+ if (!strncmp(buf, "device", 6))
+ mode |= DWC3_GCTL_PRTCAP_DEVICE;
+
+ if (!strncmp(buf, "otg", 3))
+ mode |= DWC3_GCTL_PRTCAP_OTG;
+
+ if (mode) {
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc3_set_mode(dwc, mode);
+ spin_unlock_irqrestore(&dwc->lock, flags);
+ }
+ return count;
+}
+
+static const struct file_operations dwc3_mode_fops = {
+ .open = dwc3_mode_open,
+ .write = dwc3_mode_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
{
struct dentry *root;
@@ -412,7 +471,7 @@ int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
int ret;
root = debugfs_create_dir(dev_name(dwc->dev), NULL);
- if (IS_ERR(root)){
+ if (IS_ERR(root)) {
ret = PTR_ERR(root);
goto err0;
}
@@ -425,6 +484,14 @@ int __devinit dwc3_debugfs_init(struct dwc3 *dwc)
ret = PTR_ERR(file);
goto err1;
}
+
+ file = debugfs_create_file("mode", S_IRUGO | S_IWUSR, root,
+ dwc, &dwc3_mode_fops);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err1;
+ }
+
return 0;
err1:
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 062552b..3274ac8 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -48,6 +48,7 @@
#include <linux/io.h>
#include <linux/module.h>
+#include "core.h"
#include "io.h"
/*
@@ -200,6 +201,7 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
struct dwc3_omap *omap;
struct resource *res;
+ int devid;
int ret = -ENOMEM;
int irq;
@@ -236,16 +238,20 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
goto err1;
}
- dwc3 = platform_device_alloc("dwc3-omap", -1);
+ devid = dwc3_get_device_id();
+ if (devid < 0)
+ goto err2;
+
+ dwc3 = platform_device_alloc("dwc3", devid);
if (!dwc3) {
dev_err(&pdev->dev, "couldn't allocate dwc3 device\n");
- goto err2;
+ goto err3;
}
context = kzalloc(resource_size(res), GFP_KERNEL);
if (!context) {
dev_err(&pdev->dev, "couldn't allocate dwc3 context memory\n");
- goto err3;
+ goto err4;
}
spin_lock_init(&omap->lock);
@@ -299,7 +305,7 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev, "failed to request IRQ #%d --> %d\n",
omap->irq, ret);
- goto err4;
+ goto err5;
}
/* enable all IRQs */
@@ -322,26 +328,29 @@ static int __devinit dwc3_omap_probe(struct platform_device *pdev)
pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "couldn't add resources to dwc3 device\n");
- goto err5;
+ goto err6;
}
ret = platform_device_add(dwc3);
if (ret) {
dev_err(&pdev->dev, "failed to register dwc3 device\n");
- goto err5;
+ goto err6;
}
return 0;
-err5:
+err6:
free_irq(omap->irq, omap);
-err4:
+err5:
kfree(omap->context);
-err3:
+err4:
platform_device_put(dwc3);
+err3:
+ dwc3_put_device_id(devid);
+
err2:
iounmap(base);
@@ -358,6 +367,7 @@ static int __devexit dwc3_omap_remove(struct platform_device *pdev)
platform_device_unregister(omap->dwc3);
+ dwc3_put_device_id(omap->dwc3->id);
free_irq(omap->irq, omap);
iounmap(omap->base);
@@ -384,18 +394,9 @@ static struct platform_driver dwc3_omap_driver = {
},
};
+module_platform_driver(dwc3_omap_driver);
+
+MODULE_ALIAS("platform:omap-dwc3");
MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("DesignWare USB3 OMAP Glue Layer");
-
-static int __devinit dwc3_omap_init(void)
-{
- return platform_driver_register(&dwc3_omap_driver);
-}
-module_init(dwc3_omap_init);
-
-static void __exit dwc3_omap_exit(void)
-{
- platform_driver_unregister(&dwc3_omap_driver);
-}
-module_exit(dwc3_omap_exit);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index f77c000..64e1f7c 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -42,52 +42,17 @@
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include "core.h"
+
/* FIXME define these in <linux/pci_ids.h> */
#define PCI_VENDOR_ID_SYNOPSYS 0x16c3
#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
-#define DWC3_PCI_DEVS_POSSIBLE 32
-
struct dwc3_pci {
struct device *dev;
struct platform_device *dwc3;
};
-static DECLARE_BITMAP(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
-
-static int dwc3_pci_get_device_id(struct dwc3_pci *glue)
-{
- int id;
-
-again:
- id = find_first_zero_bit(dwc3_pci_devs, DWC3_PCI_DEVS_POSSIBLE);
- if (id < DWC3_PCI_DEVS_POSSIBLE) {
- int old;
-
- old = test_and_set_bit(id, dwc3_pci_devs);
- if (old)
- goto again;
- } else {
- dev_err(glue->dev, "no space for new device\n");
- id = -ENOMEM;
- }
-
- return 0;
-}
-
-static void dwc3_pci_put_device_id(struct dwc3_pci *glue, int id)
-{
- int ret;
-
- if (id < 0)
- return;
-
- ret = test_bit(id, dwc3_pci_devs);
- WARN(!ret, "Device: %s\nID %d not in use\n",
- dev_driver_string(glue->dev), id);
- clear_bit(id, dwc3_pci_devs);
-}
-
static int __devinit dwc3_pci_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
@@ -114,11 +79,11 @@ static int __devinit dwc3_pci_probe(struct pci_dev *pci,
pci_set_power_state(pci, PCI_D0);
pci_set_master(pci);
- devid = dwc3_pci_get_device_id(glue);
+ devid = dwc3_get_device_id();
if (devid < 0)
goto err2;
- dwc3 = platform_device_alloc("dwc3-pci", devid);
+ dwc3 = platform_device_alloc("dwc3", devid);
if (!dwc3) {
dev_err(&pci->dev, "couldn't allocate dwc3 device\n");
goto err3;
@@ -163,13 +128,13 @@ err4:
platform_device_put(dwc3);
err3:
- dwc3_pci_put_device_id(glue, devid);
+ dwc3_put_device_id(devid);
err2:
pci_disable_device(pci);
err1:
- kfree(pci);
+ kfree(glue);
err0:
return ret;
@@ -179,7 +144,7 @@ static void __devexit dwc3_pci_remove(struct pci_dev *pci)
{
struct dwc3_pci *glue = pci_get_drvdata(pci);
- dwc3_pci_put_device_id(glue, glue->dwc3->id);
+ dwc3_put_device_id(glue->dwc3->id);
platform_device_unregister(glue->dwc3);
pci_set_drvdata(pci, NULL);
pci_disable_device(pci);
@@ -196,7 +161,7 @@ static DEFINE_PCI_DEVICE_TABLE(dwc3_pci_id_table) = {
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
static struct pci_driver dwc3_pci_driver = {
- .name = "pci-dwc3",
+ .name = "dwc3-pci",
.id_table = dwc3_pci_id_table,
.probe = dwc3_pci_probe,
.remove = __devexit_p(dwc3_pci_remove),
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 69a4e43..c8df1dd 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -48,13 +48,13 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
+#include <linux/usb/composite.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
-static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
- const struct dwc3_event_depevt *event);
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum);
static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
{
@@ -125,6 +125,7 @@ static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
struct dwc3_request *req)
{
+ struct dwc3 *dwc = dep->dwc;
int ret = 0;
req->request.actual = 0;
@@ -143,28 +144,27 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
* IRQ we were waiting for is long gone.
*/
if (dep->flags & DWC3_EP_PENDING_REQUEST) {
- struct dwc3 *dwc = dep->dwc;
unsigned direction;
- u32 type;
direction = !!(dep->flags & DWC3_EP0_DIR_IN);
- if (dwc->ep0state == EP0_STATUS_PHASE) {
- type = dwc->three_stage_setup
- ? DWC3_TRBCTL_CONTROL_STATUS3
- : DWC3_TRBCTL_CONTROL_STATUS2;
- } else if (dwc->ep0state == EP0_DATA_PHASE) {
- type = DWC3_TRBCTL_CONTROL_DATA;
- } else {
- /* should never happen */
- WARN_ON(1);
+ if (dwc->ep0state != EP0_DATA_PHASE) {
+ dev_WARN(dwc->dev, "Unexpected pending request\n");
return 0;
}
ret = dwc3_ep0_start_trans(dwc, direction,
- req->request.dma, req->request.length, type);
+ req->request.dma, req->request.length,
+ DWC3_TRBCTL_CONTROL_DATA);
dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
DWC3_EP0_DIR_IN);
+ } else if (dwc->delayed_status) {
+ dwc->delayed_status = false;
+
+ if (dwc->ep0state == EP0_STATUS_PHASE)
+ dwc3_ep0_do_control_status(dwc, 1);
+ else
+ dev_dbg(dwc->dev, "too early for delayed status\n");
}
return ret;
@@ -190,9 +190,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
}
/* we share one TRB for ep0/1 */
- if (!list_empty(&dwc->eps[0]->request_list) ||
- !list_empty(&dwc->eps[1]->request_list) ||
- dwc->ep0_status_pending) {
+ if (!list_empty(&dep->request_list)) {
ret = -EBUSY;
goto out;
}
@@ -214,8 +212,9 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
struct dwc3_ep *dep = dwc->eps[0];
/* stall is always issued on EP0 */
- __dwc3_gadget_ep_set_halt(dwc->eps[0], 1);
- dwc->eps[0]->flags = DWC3_EP_ENABLED;
+ __dwc3_gadget_ep_set_halt(dep, 1);
+ dep->flags = DWC3_EP_ENABLED;
+ dwc->delayed_status = false;
if (!list_empty(&dep->request_list)) {
struct dwc3_request *req;
@@ -254,17 +253,14 @@ static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
return NULL;
}
-static void dwc3_ep0_send_status_response(struct dwc3 *dwc)
+static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
{
- dwc3_ep0_start_trans(dwc, 1, dwc->setup_buf_addr,
- dwc->ep0_usb_req.length,
- DWC3_TRBCTL_CONTROL_DATA);
}
-
/*
* ch 9.4.5
*/
-static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
+static int dwc3_ep0_handle_status(struct dwc3 *dwc,
+ struct usb_ctrlrequest *ctrl)
{
struct dwc3_ep *dep;
u32 recip;
@@ -291,7 +287,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl
case USB_RECIP_ENDPOINT:
dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
- return -EINVAL;
+ return -EINVAL;
if (dep->flags & DWC3_EP_STALL)
usb_status = 1 << USB_ENDPOINT_HALT;
@@ -302,10 +298,14 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl
response_pkt = (__le16 *) dwc->setup_buf;
*response_pkt = cpu_to_le16(usb_status);
- dwc->ep0_usb_req.length = sizeof(*response_pkt);
- dwc->ep0_status_pending = 1;
- return 0;
+ dep = dwc->eps[0];
+ dwc->ep0_usb_req.dep = dep;
+ dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
+ dwc->ep0_usb_req.request.dma = dwc->setup_buf_addr;
+ dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
+
+ return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
@@ -396,8 +396,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
case USB_RECIP_ENDPOINT:
switch (wValue) {
case USB_ENDPOINT_HALT:
-
- dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
+ dep = dwc3_wIndex_to_dep(dwc, wIndex);
if (!dep)
return -EINVAL;
ret = __dwc3_gadget_ep_set_halt(dep, set);
@@ -422,8 +421,15 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
u32 reg;
addr = le16_to_cpu(ctrl->wValue);
- if (addr > 127)
+ if (addr > 127) {
+ dev_dbg(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
+ }
+
+ if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
+ dev_dbg(dwc->dev, "trying to set address when configured\n");
+ return -EINVAL;
+ }
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
@@ -473,8 +479,10 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
if (!cfg)
dwc->dev_state = DWC3_ADDRESS_STATE;
break;
+ default:
+ ret = -EINVAL;
}
- return 0;
+ return ret;
}
static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
@@ -537,6 +545,9 @@ static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
else
ret = dwc3_ep0_delegate_req(dwc, ctrl);
+ if (ret == USB_GADGET_DELAYED_STATUS)
+ dwc->delayed_status = true;
+
if (ret >= 0)
return;
@@ -550,27 +561,21 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
struct dwc3_request *r = NULL;
struct usb_request *ur;
struct dwc3_trb trb;
- struct dwc3_ep *dep;
+ struct dwc3_ep *ep0;
u32 transferred;
u8 epnum;
epnum = event->endpoint_number;
- dep = dwc->eps[epnum];
+ ep0 = dwc->eps[0];
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
- if (!dwc->ep0_status_pending) {
- r = next_request(&dwc->eps[0]->request_list);
- ur = &r->request;
- } else {
- ur = &dwc->ep0_usb_req;
- dwc->ep0_status_pending = 0;
- }
+ r = next_request(&ep0->request_list);
+ ur = &r->request;
dwc3_trb_to_nat(dwc->ep0_trb, &trb);
if (dwc->ep0_bounced) {
- struct dwc3_ep *ep0 = dwc->eps[0];
transferred = min_t(u32, ur->length,
ep0->endpoint.maxpacket - trb.length);
@@ -591,7 +596,7 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
* seems to be case when req.length > maxpacket. Could it be?
*/
if (r)
- dwc3_gadget_giveback(dep, r, 0);
+ dwc3_gadget_giveback(ep0, r, 0);
}
}
@@ -619,6 +624,7 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
dep->flags &= ~DWC3_EP_BUSY;
+ dwc->setup_packet_pending = false;
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
@@ -643,7 +649,6 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
- dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
@@ -655,12 +660,6 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
int ret;
dep = dwc->eps[0];
- dwc->ep0state = EP0_DATA_PHASE;
-
- if (dwc->ep0_status_pending) {
- dwc3_ep0_send_status_response(dwc);
- return;
- }
if (list_empty(&dep->request_list)) {
dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
@@ -674,7 +673,6 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
req = next_request(&dep->request_list);
req->direction = !!event->endpoint_number;
- dwc->ep0state = EP0_DATA_PHASE;
if (req->request.length == 0) {
ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
dwc->ctrl_req_addr, 0,
@@ -706,35 +704,79 @@ static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
WARN_ON(ret < 0);
}
-static void dwc3_ep0_do_control_status(struct dwc3 *dwc,
- const struct dwc3_event_depevt *event)
+static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
{
+ struct dwc3 *dwc = dep->dwc;
u32 type;
- int ret;
-
- dwc->ep0state = EP0_STATUS_PHASE;
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
- ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
+ return dwc3_ep0_start_trans(dwc, dep->number,
dwc->ctrl_req_addr, 0, type);
+}
- WARN_ON(ret < 0);
+static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum)
+{
+ struct dwc3_ep *dep = dwc->eps[epnum];
+
+ WARN_ON(dwc3_ep0_start_control_status(dep));
}
static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
+ dwc->setup_packet_pending = true;
+
+ /*
+ * This part is very tricky: If we has just handled
+ * XferNotReady(Setup) and we're now expecting a
+ * XferComplete but, instead, we receive another
+ * XferNotReady(Setup), we should STALL and restart
+ * the state machine.
+ *
+ * In all other cases, we just continue waiting
+ * for the XferComplete event.
+ *
+ * We are a little bit unsafe here because we're
+ * not trying to ensure that last event was, indeed,
+ * XferNotReady(Setup).
+ *
+ * Still, we don't expect any condition where that
+ * should happen and, even if it does, it would be
+ * another error condition.
+ */
+ if (dwc->ep0_next_event == DWC3_EP0_COMPLETE) {
+ switch (event->status) {
+ case DEPEVT_STATUS_CONTROL_SETUP:
+ dev_vdbg(dwc->dev, "Unexpected XferNotReady(Setup)\n");
+ dwc3_ep0_stall_and_restart(dwc);
+ break;
+ case DEPEVT_STATUS_CONTROL_DATA:
+ /* FALLTHROUGH */
+ case DEPEVT_STATUS_CONTROL_STATUS:
+ /* FALLTHROUGH */
+ default:
+ dev_vdbg(dwc->dev, "waiting for XferComplete\n");
+ }
+
+ return;
+ }
+
switch (event->status) {
case DEPEVT_STATUS_CONTROL_SETUP:
dev_vdbg(dwc->dev, "Control Setup\n");
+
+ dwc->ep0state = EP0_SETUP_PHASE;
+
dwc3_ep0_do_control_setup(dwc, event);
break;
case DEPEVT_STATUS_CONTROL_DATA:
dev_vdbg(dwc->dev, "Control Data\n");
+ dwc->ep0state = EP0_DATA_PHASE;
+
if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
@@ -764,6 +806,8 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
case DEPEVT_STATUS_CONTROL_STATUS:
dev_vdbg(dwc->dev, "Control Status\n");
+ dwc->ep0state = EP0_STATUS_PHASE;
+
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
@@ -772,12 +816,19 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
dwc3_ep0_stall_and_restart(dwc);
return;
}
- dwc3_ep0_do_control_status(dwc, event);
+
+ if (dwc->delayed_status) {
+ WARN_ON_ONCE(event->endpoint_number != 1);
+ dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
+ return;
+ }
+
+ dwc3_ep0_do_control_status(dwc, event->endpoint_number);
}
}
void dwc3_ep0_interrupt(struct dwc3 *dwc,
- const const struct dwc3_event_depevt *event)
+ const struct dwc3_event_depevt *event)
{
u8 epnum = event->endpoint_number;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 25dbd86..064b6e2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -65,6 +65,22 @@ void dwc3_map_buffer_to_dma(struct dwc3_request *req)
return;
}
+ if (req->request.num_sgs) {
+ int mapped;
+
+ mapped = dma_map_sg(dwc->dev, req->request.sg,
+ req->request.num_sgs,
+ req->direction ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ if (mapped < 0) {
+ dev_err(dwc->dev, "failed to map SGs\n");
+ return;
+ }
+
+ req->request.num_mapped_sgs = mapped;
+ return;
+ }
+
if (req->request.dma == DMA_ADDR_INVALID) {
req->request.dma = dma_map_single(dwc->dev, req->request.buf,
req->request.length, req->direction
@@ -82,6 +98,17 @@ void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
return;
}
+ if (req->request.num_mapped_sgs) {
+ req->request.dma = DMA_ADDR_INVALID;
+ dma_unmap_sg(dwc->dev, req->request.sg,
+ req->request.num_mapped_sgs,
+ req->direction ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+
+ req->request.num_mapped_sgs = 0;
+ return;
+ }
+
if (req->mapped) {
dma_unmap_single(dwc->dev, req->request.dma,
req->request.length, req->direction
@@ -97,7 +124,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
struct dwc3 *dwc = dep->dwc;
if (req->queued) {
- dep->busy_slot++;
+ if (req->request.num_mapped_sgs)
+ dep->busy_slot += req->request.num_mapped_sgs;
+ else
+ dep->busy_slot++;
+
/*
* Skip LINK TRB. We can't use req->trb and check for
* DWC3_TRBCTL_LINK_TRB because it points the TRB we just
@@ -108,6 +139,7 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
dep->busy_slot++;
}
list_del(&req->list);
+ req->trb = NULL;
if (req->request.status == -EINPROGRESS)
req->request.status = status;
@@ -251,7 +283,8 @@ static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
}
static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc)
+ const struct usb_endpoint_descriptor *desc,
+ const struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct dwc3_gadget_ep_cmd_params params;
@@ -264,7 +297,7 @@ static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
| DWC3_DEPCFG_XFER_NOT_READY_EN;
- if (usb_endpoint_xfer_bulk(desc) && dep->endpoint.max_streams) {
+ if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
| DWC3_DEPCFG_STREAM_EVENT_EN;
dep->stream_capable = true;
@@ -317,7 +350,8 @@ static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
* Caller should take care of locking
*/
static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
- const struct usb_endpoint_descriptor *desc)
+ const struct usb_endpoint_descriptor *desc,
+ const struct usb_ss_ep_comp_descriptor *comp_desc)
{
struct dwc3 *dwc = dep->dwc;
u32 reg;
@@ -329,7 +363,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
}
- ret = dwc3_gadget_set_ep_config(dwc, dep, desc);
+ ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc);
if (ret)
return ret;
@@ -343,6 +377,7 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
return ret;
dep->desc = desc;
+ dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
@@ -405,6 +440,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
dep->stream_capable = false;
dep->desc = NULL;
+ dep->comp_desc = NULL;
dep->type = 0;
dep->flags = 0;
@@ -473,7 +509,7 @@ static int dwc3_gadget_ep_enable(struct usb_ep *ep,
dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
spin_lock_irqsave(&dwc->lock, flags);
- ret = __dwc3_gadget_ep_enable(dep, desc);
+ ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc);
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
@@ -539,6 +575,85 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
kfree(req);
}
+/**
+ * dwc3_prepare_one_trb - setup one TRB from one request
+ * @dep: endpoint for which this request is prepared
+ * @req: dwc3_request pointer
+ */
+static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
+ struct dwc3_request *req, dma_addr_t dma,
+ unsigned length, unsigned last, unsigned chain)
+{
+ struct dwc3 *dwc = dep->dwc;
+ struct dwc3_trb_hw *trb_hw;
+ struct dwc3_trb trb;
+
+ unsigned int cur_slot;
+
+ dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
+ dep->name, req, (unsigned long long) dma,
+ length, last ? " last" : "",
+ chain ? " chain" : "");
+
+ trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
+ cur_slot = dep->free_slot;
+ dep->free_slot++;
+
+ /* Skip the LINK-TRB on ISOC */
+ if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+ usb_endpoint_xfer_isoc(dep->desc))
+ return;
+
+ memset(&trb, 0, sizeof(trb));
+ if (!req->trb) {
+ dwc3_gadget_move_request_queued(req);
+ req->trb = trb_hw;
+ req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
+ }
+
+ if (usb_endpoint_xfer_isoc(dep->desc)) {
+ trb.isp_imi = true;
+ trb.csp = true;
+ } else {
+ trb.chn = chain;
+ trb.lst = last;
+ }
+
+ if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
+ trb.sid_sofn = req->request.stream_id;
+
+ switch (usb_endpoint_type(dep->desc)) {
+ case USB_ENDPOINT_XFER_CONTROL:
+ trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
+ break;
+
+ case USB_ENDPOINT_XFER_ISOC:
+ trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+
+ /* IOC every DWC3_TRB_NUM / 4 so we can refill */
+ if (!(cur_slot % (DWC3_TRB_NUM / 4)))
+ trb.ioc = last;
+ break;
+
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ trb.trbctl = DWC3_TRBCTL_NORMAL;
+ break;
+ default:
+ /*
+ * This is only possible with faulty memory because we
+ * checked it already :)
+ */
+ BUG();
+ }
+
+ trb.length = length;
+ trb.bplh = dma;
+ trb.hwo = true;
+
+ dwc3_trb_to_hw(&trb, trb_hw);
+}
+
/*
* dwc3_prepare_trbs - setup TRBs from requests
* @dep: endpoint for which requests are being prepared
@@ -548,18 +663,17 @@ static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
* transfers. The functions returns once there are not more TRBs available or
* it run out of requests.
*/
-static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
- bool starting)
+static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
{
- struct dwc3_request *req, *n, *ret = NULL;
- struct dwc3_trb_hw *trb_hw;
- struct dwc3_trb trb;
+ struct dwc3_request *req, *n;
u32 trbs_left;
+ unsigned int last_one = 0;
BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
/* the first request must not be queued */
trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
+
/*
* if busy & slot are equal than it is either full or empty. If we are
* starting to proceed requests then we are empty. Otherwise we ar
@@ -567,7 +681,7 @@ static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
*/
if (!trbs_left) {
if (!starting)
- return NULL;
+ return;
trbs_left = DWC3_TRB_NUM;
/*
* In case we start from scratch, we queue the ISOC requests
@@ -591,94 +705,62 @@ static struct dwc3_request *dwc3_prepare_trbs(struct dwc3_ep *dep,
/* The last TRB is a link TRB, not used for xfer */
if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->desc))
- return NULL;
+ return;
list_for_each_entry_safe(req, n, &dep->request_list, list) {
- unsigned int last_one = 0;
- unsigned int cur_slot;
+ unsigned length;
+ dma_addr_t dma;
- trb_hw = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
- cur_slot = dep->free_slot;
- dep->free_slot++;
+ if (req->request.num_mapped_sgs > 0) {
+ struct usb_request *request = &req->request;
+ struct scatterlist *sg = request->sg;
+ struct scatterlist *s;
+ int i;
- /* Skip the LINK-TRB on ISOC */
- if (((cur_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
- usb_endpoint_xfer_isoc(dep->desc))
- continue;
+ for_each_sg(sg, s, request->num_mapped_sgs, i) {
+ unsigned chain = true;
- dwc3_gadget_move_request_queued(req);
- memset(&trb, 0, sizeof(trb));
- trbs_left--;
+ length = sg_dma_len(s);
+ dma = sg_dma_address(s);
- /* Is our TRB pool empty? */
- if (!trbs_left)
- last_one = 1;
- /* Is this the last request? */
- if (list_empty(&dep->request_list))
- last_one = 1;
+ if (i == (request->num_mapped_sgs - 1)
+ || sg_is_last(s)) {
+ last_one = true;
+ chain = false;
+ }
- /*
- * FIXME we shouldn't need to set LST bit always but we are
- * facing some weird problem with the Hardware where it doesn't
- * complete even though it has been previously started.
- *
- * While we're debugging the problem, as a workaround to
- * multiple TRBs handling, use only one TRB at a time.
- */
- last_one = 1;
+ trbs_left--;
+ if (!trbs_left)
+ last_one = true;
- req->trb = trb_hw;
- if (!ret)
- ret = req;
+ if (last_one)
+ chain = false;
- trb.bplh = req->request.dma;
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last_one, chain);
- if (usb_endpoint_xfer_isoc(dep->desc)) {
- trb.isp_imi = true;
- trb.csp = true;
+ if (last_one)
+ break;
+ }
} else {
- trb.lst = last_one;
- }
+ dma = req->request.dma;
+ length = req->request.length;
+ trbs_left--;
- if (usb_endpoint_xfer_bulk(dep->desc) && dep->stream_capable)
- trb.sid_sofn = req->request.stream_id;
-
- switch (usb_endpoint_type(dep->desc)) {
- case USB_ENDPOINT_XFER_CONTROL:
- trb.trbctl = DWC3_TRBCTL_CONTROL_SETUP;
- break;
+ if (!trbs_left)
+ last_one = 1;
- case USB_ENDPOINT_XFER_ISOC:
- trb.trbctl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
+ /* Is this the last request? */
+ if (list_is_last(&req->list, &dep->request_list))
+ last_one = 1;
- /* IOC every DWC3_TRB_NUM / 4 so we can refill */
- if (!(cur_slot % (DWC3_TRB_NUM / 4)))
- trb.ioc = last_one;
- break;
+ dwc3_prepare_one_trb(dep, req, dma, length,
+ last_one, false);
- case USB_ENDPOINT_XFER_BULK:
- case USB_ENDPOINT_XFER_INT:
- trb.trbctl = DWC3_TRBCTL_NORMAL;
- break;
- default:
- /*
- * This is only possible with faulty memory because we
- * checked it already :)
- */
- BUG();
+ if (last_one)
+ break;
}
-
- trb.length = req->request.length;
- trb.hwo = true;
-
- dwc3_trb_to_hw(&trb, trb_hw);
- req->trb_dma = dwc3_trb_dma_offset(dep, trb_hw);
-
- if (last_one)
- break;
}
-
- return ret;
}
static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
@@ -707,11 +789,13 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
/* req points to the first request which will be sent */
req = next_request(&dep->req_queued);
} else {
+ dwc3_prepare_trbs(dep, start_new);
+
/*
* req points to the first request where HWO changed
* from 0 to 1
*/
- req = dwc3_prepare_trbs(dep, start_new);
+ req = next_request(&dep->req_queued);
}
if (!req) {
dep->flags |= DWC3_EP_PENDING_REQUEST;
@@ -745,8 +829,9 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
dep->flags |= DWC3_EP_BUSY;
dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
dep->number);
- if (!dep->res_trans_idx)
- printk_once(KERN_ERR "%s() res_trans_idx is invalid\n", __func__);
+
+ WARN_ON_ONCE(!dep->res_trans_idx);
+
return 0;
}
@@ -1155,35 +1240,9 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc->gadget_driver = driver;
dwc->gadget.dev.driver = &driver->driver;
- reg = dwc3_readl(dwc->regs, DWC3_GCTL);
-
- reg &= ~DWC3_GCTL_SCALEDOWN(3);
- reg &= ~DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_OTG);
- reg &= ~DWC3_GCTL_DISSCRAMBLE;
- reg |= DWC3_GCTL_PRTCAPDIR(DWC3_GCTL_PRTCAP_DEVICE);
-
- switch (DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams0)) {
- case DWC3_GHWPARAMS1_EN_PWROPT_CLK:
- reg &= ~DWC3_GCTL_DSBLCLKGTNG;
- break;
- default:
- dev_dbg(dwc->dev, "No power optimization available\n");
- }
-
- /*
- * WORKAROUND: DWC3 revisions <1.90a have a bug
- * when The device fails to connect at SuperSpeed
- * and falls back to high-speed mode which causes
- * the device to enter in a Connect/Disconnect loop
- */
- if (dwc->revision < DWC3_REVISION_190A)
- reg |= DWC3_GCTL_U2RSTECN;
-
- dwc3_writel(dwc->regs, DWC3_GCTL, reg);
-
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_SPEED_MASK);
- reg |= DWC3_DCFG_SUPERSPEED;
+ reg |= dwc->maximum_speed;
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
dwc->start_config_issued = false;
@@ -1192,14 +1251,14 @@ static int dwc3_gadget_start(struct usb_gadget *g,
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err0;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
goto err1;
@@ -1290,11 +1349,10 @@ static int __devinit dwc3_gadget_init_endpoints(struct dwc3 *dwc)
&dwc->gadget.ep_list);
ret = dwc3_alloc_trb_pool(dep);
- if (ret) {
- dev_err(dwc->dev, "%s: failed to allocate TRB pool\n", dep->name);
+ if (ret)
return ret;
- }
}
+
INIT_LIST_HEAD(&dep->request_list);
INIT_LIST_HEAD(&dep->req_queued);
}
@@ -1334,8 +1392,10 @@ static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
do {
req = next_request(&dep->req_queued);
- if (!req)
- break;
+ if (!req) {
+ WARN_ON_ONCE(1);
+ return 1;
+ }
dwc3_trb_to_nat(req->trb, &trb);
@@ -1400,6 +1460,31 @@ static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
dep->flags &= ~DWC3_EP_BUSY;
dep->res_trans_idx = 0;
}
+
+ /*
+ * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
+ * See dwc3_gadget_linksts_change_interrupt() for 1st half.
+ */
+ if (dwc->revision < DWC3_REVISION_183A) {
+ u32 reg;
+ int i;
+
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!(dep->flags & DWC3_EP_ENABLED))
+ continue;
+
+ if (!list_empty(&dep->req_queued))
+ return;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ reg |= dwc->u1u2;
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+
+ dwc->u1u2 = 0;
+ }
}
static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
@@ -1639,6 +1724,7 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
dwc->start_config_issued = false;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
+ dwc->setup_packet_pending = false;
}
static void dwc3_gadget_usb3_phy_power(struct dwc3 *dwc, int on)
@@ -1675,6 +1761,40 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dev_vdbg(dwc->dev, "%s\n", __func__);
+ /*
+ * WORKAROUND: DWC3 revisions <1.88a have an issue which
+ * would cause a missing Disconnect Event if there's a
+ * pending Setup Packet in the FIFO.
+ *
+ * There's no suggested workaround on the official Bug
+ * report, which states that "unless the driver/application
+ * is doing any special handling of a disconnect event,
+ * there is no functional issue".
+ *
+ * Unfortunately, it turns out that we _do_ some special
+ * handling of a disconnect event, namely complete all
+ * pending transfers, notify gadget driver of the
+ * disconnection, and so on.
+ *
+ * Our suggested workaround is to follow the Disconnect
+ * Event steps here, instead, based on a setup_packet_pending
+ * flag. Such flag gets set whenever we have a XferNotReady
+ * event on EP0 and gets cleared on XferComplete for the
+ * same endpoint.
+ *
+ * Refers to:
+ *
+ * STAR#9000466709: RTL: Device : Disconnect event not
+ * generated if setup packet pending in FIFO
+ */
+ if (dwc->revision < DWC3_REVISION_188A) {
+ if (dwc->setup_packet_pending)
+ dwc3_gadget_disconnect_interrupt(dwc);
+ }
+
+ /* after reset -> Default State */
+ dwc->dev_state = DWC3_DEFAULT_STATE;
+
/* Enable PHYs */
dwc3_gadget_usb2_phy_power(dwc, true);
dwc3_gadget_usb3_phy_power(dwc, true);
@@ -1755,6 +1875,22 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
switch (speed) {
case DWC3_DCFG_SUPERSPEED:
+ /*
+ * WORKAROUND: DWC3 revisions <1.90a have an issue which
+ * would cause a missing USB3 Reset event.
+ *
+ * In such situations, we should force a USB3 Reset
+ * event by calling our dwc3_gadget_reset_interrupt()
+ * routine.
+ *
+ * Refers to:
+ *
+ * STAR#9000483510: RTL: SS : USB3 reset event may
+ * not be generated always when the link enters poll
+ */
+ if (dwc->revision < DWC3_REVISION_190A)
+ dwc3_gadget_reset_interrupt(dwc);
+
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget.ep0->maxpacket = 512;
dwc->gadget.speed = USB_SPEED_SUPER;
@@ -1781,14 +1917,14 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_disable_phy(dwc, dwc->gadget.speed);
dep = dwc->eps[0];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
}
dep = dwc->eps[1];
- ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc);
+ ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL);
if (ret) {
dev_err(dwc->dev, "failed to enable %s\n", dep->name);
return;
@@ -1818,8 +1954,55 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
unsigned int evtinfo)
{
- /* The fith bit says SuperSpeed yes or no. */
- dwc->link_state = evtinfo & DWC3_LINK_STATE_MASK;
+ enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
+
+ /*
+ * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
+ * on the link partner, the USB session might do multiple entry/exit
+ * of low power states before a transfer takes place.
+ *
+ * Due to this problem, we might experience lower throughput. The
+ * suggested workaround is to disable DCTL[12:9] bits if we're
+ * transitioning from U1/U2 to U0 and enable those bits again
+ * after a transfer completes and there are no pending transfers
+ * on any of the enabled endpoints.
+ *
+ * This is the first half of that workaround.
+ *
+ * Refers to:
+ *
+ * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
+ * core send LGO_Ux entering U0
+ */
+ if (dwc->revision < DWC3_REVISION_183A) {
+ if (next == DWC3_LINK_STATE_U0) {
+ u32 u1u2;
+ u32 reg;
+
+ switch (dwc->link_state) {
+ case DWC3_LINK_STATE_U1:
+ case DWC3_LINK_STATE_U2:
+ reg = dwc3_readl(dwc->regs, DWC3_DCTL);
+ u1u2 = reg & (DWC3_DCTL_INITU2ENA
+ | DWC3_DCTL_ACCEPTU2ENA
+ | DWC3_DCTL_INITU1ENA
+ | DWC3_DCTL_ACCEPTU1ENA);
+
+ if (!dwc->u1u2)
+ dwc->u1u2 = reg & u1u2;
+
+ reg &= ~u1u2;
+
+ dwc3_writel(dwc->regs, DWC3_DCTL, reg);
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+ }
+ }
+
+ dwc->link_state = next;
dev_vdbg(dwc->dev, "%s link %d\n", __func__, dwc->link_state);
}
@@ -1925,7 +2108,7 @@ static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
spin_lock(&dwc->lock);
- for (i = 0; i < DWC3_EVENT_BUFFERS_NUM; i++) {
+ for (i = 0; i < dwc->num_event_buffers; i++) {
irqreturn_t status;
status = dwc3_process_event_buf(dwc, i);
@@ -1986,9 +2169,10 @@ int __devinit dwc3_gadget_init(struct dwc3 *dwc)
dev_set_name(&dwc->gadget.dev, "gadget");
dwc->gadget.ops = &dwc3_gadget_ops;
- dwc->gadget.is_dualspeed = true;
+ dwc->gadget.max_speed = USB_SPEED_SUPER;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->gadget.dev.parent = dwc->dev;
+ dwc->gadget.sg_supported = true;
dma_set_coherent_mask(&dwc->gadget.dev, dwc->dev->coherent_dma_mask);
@@ -2076,7 +2260,6 @@ err0:
void dwc3_gadget_exit(struct dwc3 *dwc)
{
int irq;
- int i;
usb_del_gadget_udc(&dwc->gadget);
irq = platform_get_irq(to_platform_device(dwc->dev), 0);
@@ -2084,9 +2267,6 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
free_irq(irq, dwc);
- for (i = 0; i < ARRAY_SIZE(dwc->eps); i++)
- __dwc3_gadget_ep_disable(dwc->eps[i]);
-
dwc3_gadget_free_endpoints(dwc);
dma_free_coherent(dwc->dev, 512, dwc->ep0_bounce,
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 71145a44..d97f467 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -79,19 +79,6 @@ struct dwc3_gadget_ep_cmd_params {
/* -------------------------------------------------------------------------- */
-struct dwc3_request {
- struct usb_request request;
- struct list_head list;
- struct dwc3_ep *dep;
-
- u8 epnum;
- struct dwc3_trb_hw *trb;
- dma_addr_t trb_dma;
-
- unsigned direction:1;
- unsigned mapped:1;
- unsigned queued:1;
-};
#define to_dwc3_request(r) (container_of(r, struct dwc3_request, request))
static inline struct dwc3_request *next_request(struct list_head *list)
@@ -110,23 +97,11 @@ static inline void dwc3_gadget_move_request_queued(struct dwc3_request *req)
list_move_tail(&req->list, &dep->req_queued);
}
-#if defined(CONFIG_USB_GADGET_DWC3) || defined(CONFIG_USB_GADGET_DWC3_MODULE)
-int dwc3_gadget_init(struct dwc3 *dwc);
-void dwc3_gadget_exit(struct dwc3 *dwc);
-#else
-static inline int dwc3_gadget_init(struct dwc3 *dwc) { return 0; }
-static inline void dwc3_gadget_exit(struct dwc3 *dwc) { }
-static inline int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
- unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
-{
- return 0;
-}
-#endif
-
void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
int status);
-void dwc3_ep0_interrupt(struct dwc3 *dwc, const struct dwc3_event_depevt *event);
+void dwc3_ep0_interrupt(struct dwc3 *dwc,
+ const struct dwc3_event_depevt *event);
void dwc3_ep0_out_start(struct dwc3 *dwc);
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags);
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
new file mode 100644
index 0000000..7cfe211
--- /dev/null
+++ b/drivers/usb/dwc3/host.c
@@ -0,0 +1,102 @@
+/**
+ * host.c - DesignWare USB3 DRD Controller Host Glue
+ *
+ * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com
+ *
+ * Authors: Felipe Balbi <balbi@ti.com>,
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The names of the above-listed copyright holders may not be used
+ * to endorse or promote products derived from this software without
+ * specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2, as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/platform_device.h>
+
+#include "core.h"
+
+static struct resource generic_resources[] = {
+ {
+ .flags = IORESOURCE_IRQ,
+ },
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+int dwc3_host_init(struct dwc3 *dwc)
+{
+ struct platform_device *xhci;
+ int ret;
+
+ xhci = platform_device_alloc("xhci", -1);
+ if (!xhci) {
+ dev_err(dwc->dev, "couldn't allocate xHCI device\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ dma_set_coherent_mask(&xhci->dev, dwc->dev->coherent_dma_mask);
+
+ xhci->dev.parent = dwc->dev;
+ xhci->dev.dma_mask = dwc->dev->dma_mask;
+ xhci->dev.dma_parms = dwc->dev->dma_parms;
+
+ dwc->xhci = xhci;
+
+ /* setup resources */
+ generic_resources[0].start = dwc->irq;
+
+ generic_resources[1].start = dwc->res->start;
+ generic_resources[1].end = dwc->res->start + 0x7fff;
+
+ ret = platform_device_add_resources(xhci, generic_resources,
+ ARRAY_SIZE(generic_resources));
+ if (ret) {
+ dev_err(dwc->dev, "couldn't add resources to xHCI device\n");
+ goto err1;
+ }
+
+ ret = platform_device_add(xhci);
+ if (ret) {
+ dev_err(dwc->dev, "failed to register xHCI device\n");
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ platform_device_put(xhci);
+
+err0:
+ return ret;
+}
+
+void dwc3_host_exit(struct dwc3 *dwc)
+{
+ platform_device_unregister(dwc->xhci);
+}
diff --git a/drivers/usb/dwc3/io.h b/drivers/usb/dwc3/io.h
index bc957db..071d561 100644
--- a/drivers/usb/dwc3/io.h
+++ b/drivers/usb/dwc3/io.h
@@ -39,7 +39,7 @@
#ifndef __DRIVERS_USB_DWC3_IO_H
#define __DRIVERS_USB_DWC3_IO_H
-#include <asm/io.h>
+#include <linux/io.h>
static inline u32 dwc3_readl(void __iomem *base, u32 offset)
{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 23a4473..7ecb68a 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -15,6 +15,7 @@
menuconfig USB_GADGET
tristate "USB Gadget Support"
+ select NLS
help
USB is a master/slave protocol, organized with one master
host (such as a PC) controlling up to 127 peripheral devices.
@@ -124,7 +125,6 @@ config USB_GADGET_STORAGE_NUM_BUFFERS
#
choice
prompt "USB Peripheral Controller"
- depends on USB_GADGET
help
A USB device uses a controller to talk to its host.
Systems should have only one such upstream link.
@@ -234,7 +234,6 @@ config USB_R8A66597
config USB_RENESAS_USBHS_UDC
tristate 'Renesas USBHS controller'
- depends on SUPERH || ARCH_SHMOBILE
depends on USB_RENESAS_USBHS
select USB_GADGET_DUALSPEED
help
@@ -264,7 +263,6 @@ config USB_PXA27X
config USB_S3C_HSOTG
tristate "S3C HS/OtG USB Device controller"
depends on S3C_DEV_USB_HSOTG
- select USB_GADGET_S3C_HSOTG_PIO
select USB_GADGET_DUALSPEED
help
The Samsung S3C64XX USB2.0 high-speed gadget controller
@@ -310,25 +308,13 @@ config USB_S3C_HSUDC
This driver has been tested on S3C2416 and S3C2450 processors.
-config USB_PXA_U2O
- tristate "PXA9xx Processor USB2.0 controller"
- depends on ARCH_MMP
+config USB_MV_UDC
+ tristate "Marvell USB2.0 Device Controller"
select USB_GADGET_DUALSPEED
help
- PXA9xx Processor series include a high speed USB2.0 device
- controller, which support high speed and full speed USB peripheral.
-
-config USB_GADGET_DWC3
- tristate "DesignWare USB3.0 (DRD) Controller"
- depends on USB_DWC3
- select USB_GADGET_DUALSPEED
- select USB_GADGET_SUPERSPEED
- help
- DesignWare USB3.0 controller is a SuperSpeed USB3.0 Controller
- which can be configured for peripheral-only, host-only, hub-only
- and Dual-Role operation. This Controller was first integrated into
- the OMAP5 series of processors. More information about the OMAP5
- version of this controller, refer to http://www.ti.com/omap5.
+ Marvell Socs (including PXA and MMP series) include a high speed
+ USB2.0 OTG controller, which can be configured as high speed or
+ full speed USB peripheral.
#
# Controllers available in both integrated and discrete versions
@@ -544,12 +530,10 @@ endchoice
# Selected by UDC drivers that support high-speed operation.
config USB_GADGET_DUALSPEED
bool
- depends on USB_GADGET
# Selected by UDC drivers that support super-speed opperation
config USB_GADGET_SUPERSPEED
bool
- depends on USB_GADGET
depends on USB_GADGET_DUALSPEED
#
@@ -557,7 +541,6 @@ config USB_GADGET_SUPERSPEED
#
choice
tristate "USB Gadget Drivers"
- depends on USB_GADGET
default USB_ETH
help
A Linux "Gadget Driver" talks to the USB Peripheral Controller
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index b54ac61..b7f6eef 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -27,7 +27,7 @@ obj-$(CONFIG_USB_S3C_HSOTG) += s3c-hsotg.o
obj-$(CONFIG_USB_S3C_HSUDC) += s3c-hsudc.o
obj-$(CONFIG_USB_LANGWELL) += langwell_udc.o
obj-$(CONFIG_USB_EG20T) += pch_udc.o
-obj-$(CONFIG_USB_PXA_U2O) += mv_udc.o
+obj-$(CONFIG_USB_MV_UDC) += mv_udc.o
mv_udc-y := mv_udc_core.o
obj-$(CONFIG_USB_CI13XXX_MSM) += ci13xxx_msm.o
obj-$(CONFIG_USB_FUSB300) += fusb300_udc.o
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 45f422a..c16ff55 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -152,15 +152,15 @@ static const char *ep_string[] = {
};
/* DMA usage flag */
-static int use_dma = 1;
+static bool use_dma = 1;
/* packet per buffer dma */
-static int use_dma_ppb = 1;
+static bool use_dma_ppb = 1;
/* with per descr. update */
-static int use_dma_ppb_du;
+static bool use_dma_ppb_du;
/* buffer fill mode */
static int use_dma_bufferfill_mode;
/* full speed only mode */
-static int use_fullspeed;
+static bool use_fullspeed;
/* tx buffer size for high speed */
static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
@@ -1959,7 +1959,7 @@ static int amd5536_start(struct usb_gadget_driver *driver,
u32 tmp;
if (!driver || !bind || !driver->setup
- || driver->speed < USB_SPEED_HIGH)
+ || driver->max_speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
@@ -3349,7 +3349,7 @@ static int udc_probe(struct udc *dev)
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.release = gadget_release;
dev->gadget.name = name;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
/* init registers, interrupts, ... */
startup_registers(dev);
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 8efe0fa..143a725 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1633,7 +1633,7 @@ static int at91_start(struct usb_gadget_driver *driver,
unsigned long flags;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->setup) {
DBG("bad parameter.\n");
@@ -1748,7 +1748,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
/* rm9200 needs manual D+ pullup; off by default */
if (cpu_is_at91rm9200()) {
- if (udc->board.pullup_pin <= 0) {
+ if (gpio_is_valid(udc->board.pullup_pin)) {
DBG("no D+ pullup?\n");
retval = -ENODEV;
goto fail0;
@@ -1815,7 +1815,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
DBG("request irq %d failed\n", udc->udp_irq);
goto fail1;
}
- if (udc->board.vbus_pin > 0) {
+ if (gpio_is_valid(udc->board.vbus_pin)) {
retval = gpio_request(udc->board.vbus_pin, "udc_vbus");
if (retval < 0) {
DBG("request vbus pin failed\n");
@@ -1859,10 +1859,10 @@ static int __init at91udc_probe(struct platform_device *pdev)
INFO("%s version %s\n", driver_name, DRIVER_VERSION);
return 0;
fail4:
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled)
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled)
free_irq(udc->board.vbus_pin, udc);
fail3:
- if (udc->board.vbus_pin > 0)
+ if (gpio_is_valid(udc->board.vbus_pin))
gpio_free(udc->board.vbus_pin);
fail2:
free_irq(udc->udp_irq, udc);
@@ -1897,7 +1897,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
device_init_wakeup(&pdev->dev, 0);
remove_debug_file(udc);
- if (udc->board.vbus_pin > 0) {
+ if (gpio_is_valid(udc->board.vbus_pin)) {
free_irq(udc->board.vbus_pin, udc);
gpio_free(udc->board.vbus_pin);
}
@@ -1941,7 +1941,7 @@ static int at91udc_suspend(struct platform_device *pdev, pm_message_t mesg)
enable_irq_wake(udc->udp_irq);
udc->active_suspend = wake;
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled && wake)
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled && wake)
enable_irq_wake(udc->board.vbus_pin);
return 0;
}
@@ -1951,7 +1951,7 @@ static int at91udc_resume(struct platform_device *pdev)
struct at91_udc *udc = platform_get_drvdata(pdev);
unsigned long flags;
- if (udc->board.vbus_pin > 0 && !udc->board.vbus_polled &&
+ if (gpio_is_valid(udc->board.vbus_pin) && !udc->board.vbus_polled &&
udc->active_suspend)
disable_irq_wake(udc->board.vbus_pin);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 271a9d8..e2fb6d5 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -1038,7 +1038,7 @@ static struct usba_udc the_udc = {
.gadget = {
.ops = &usba_udc_ops,
.ep_list = LIST_HEAD_INIT(the_udc.gadget.ep_list),
- .is_dualspeed = 1,
+ .max_speed = USB_SPEED_HIGH,
.name = "atmel_usba_udc",
.dev = {
.init_name = "gadget",
diff --git a/drivers/usb/gadget/ci13xxx_udc.c b/drivers/usb/gadget/ci13xxx_udc.c
index 9a0c397..27e3137 100644
--- a/drivers/usb/gadget/ci13xxx_udc.c
+++ b/drivers/usb/gadget/ci13xxx_udc.c
@@ -182,6 +182,16 @@ static inline int hw_ep_bit(int num, int dir)
return num + (dir ? 16 : 0);
}
+static int ep_to_bit(int n)
+{
+ int fill = 16 - hw_ep_max / 2;
+
+ if (n >= hw_ep_max / 2)
+ n += fill;
+
+ return n;
+}
+
/**
* hw_aread: reads from register bitfield
* @addr: address relative to bus map
@@ -440,12 +450,13 @@ static int hw_ep_get_halt(int num, int dir)
/**
* hw_test_and_clear_setup_status: test & clear setup status (execute without
* interruption)
- * @n: bit number (endpoint)
+ * @n: endpoint number
*
* This function returns setup status
*/
static int hw_test_and_clear_setup_status(int n)
{
+ n = ep_to_bit(n);
return hw_ctest_and_clear(CAP_ENDPTSETUPSTAT, BIT(n));
}
@@ -641,12 +652,13 @@ static int hw_register_write(u16 addr, u32 data)
/**
* hw_test_and_clear_complete: test & clear complete status (execute without
* interruption)
- * @n: bit number (endpoint)
+ * @n: endpoint number
*
* This function returns complete status
*/
static int hw_test_and_clear_complete(int n)
{
+ n = ep_to_bit(n);
return hw_ctest_and_clear(CAP_ENDPTCOMPLETE, BIT(n));
}
@@ -754,8 +766,11 @@ static ssize_t show_device(struct device *dev, struct device_attribute *attr,
n += scnprintf(buf + n, PAGE_SIZE - n, "speed = %d\n",
gadget->speed);
+ n += scnprintf(buf + n, PAGE_SIZE - n, "max_speed = %d\n",
+ gadget->max_speed);
+ /* TODO: Scheduled for removal in 3.8. */
n += scnprintf(buf + n, PAGE_SIZE - n, "is_dualspeed = %d\n",
- gadget->is_dualspeed);
+ gadget_is_dualspeed(gadget));
n += scnprintf(buf + n, PAGE_SIZE - n, "is_otg = %d\n",
gadget->is_otg);
n += scnprintf(buf + n, PAGE_SIZE - n, "is_a_peripheral = %d\n",
@@ -798,7 +813,7 @@ static ssize_t show_driver(struct device *dev, struct device_attribute *attr,
n += scnprintf(buf + n, PAGE_SIZE - n, "function = %s\n",
(driver->function ? driver->function : ""));
n += scnprintf(buf + n, PAGE_SIZE - n, "max speed = %d\n",
- driver->speed);
+ driver->max_speed);
return n;
}
@@ -2563,9 +2578,7 @@ static int ci13xxx_start(struct usb_gadget_driver *driver,
if (driver == NULL ||
bind == NULL ||
driver->setup == NULL ||
- driver->disconnect == NULL ||
- driver->suspend == NULL ||
- driver->resume == NULL)
+ driver->disconnect == NULL)
return -EINVAL;
else if (udc == NULL)
return -ENODEV;
@@ -2693,8 +2706,6 @@ static int ci13xxx_stop(struct usb_gadget_driver *driver)
driver->unbind == NULL ||
driver->setup == NULL ||
driver->disconnect == NULL ||
- driver->suspend == NULL ||
- driver->resume == NULL ||
driver != udc->driver)
return -EINVAL;
@@ -2793,7 +2804,7 @@ static irqreturn_t udc_irq(void)
isr_statistics.pci++;
udc->gadget.speed = hw_port_is_high_speed() ?
USB_SPEED_HIGH : USB_SPEED_FULL;
- if (udc->suspended) {
+ if (udc->suspended && udc->driver->resume) {
spin_unlock(udc->lock);
udc->driver->resume(&udc->gadget);
spin_lock(udc->lock);
@@ -2807,7 +2818,8 @@ static irqreturn_t udc_irq(void)
isr_tr_complete_handler(udc);
}
if (USBi_SLI & intr) {
- if (udc->gadget.speed != USB_SPEED_UNKNOWN) {
+ if (udc->gadget.speed != USB_SPEED_UNKNOWN &&
+ udc->driver->suspend) {
udc->suspended = 1;
spin_unlock(udc->lock);
udc->driver->suspend(&udc->gadget);
@@ -2871,7 +2883,7 @@ static int udc_probe(struct ci13xxx_udc_driver *driver, struct device *dev,
udc->gadget.ops = &usb_gadget_ops;
udc->gadget.speed = USB_SPEED_UNKNOWN;
- udc->gadget.is_dualspeed = 1;
+ udc->gadget.max_speed = USB_SPEED_HIGH;
udc->gadget.is_otg = 0;
udc->gadget.name = driver->name;
diff --git a/drivers/usb/gadget/ci13xxx_udc.h b/drivers/usb/gadget/ci13xxx_udc.h
index 2370777..f4871e1 100644
--- a/drivers/usb/gadget/ci13xxx_udc.h
+++ b/drivers/usb/gadget/ci13xxx_udc.h
@@ -127,7 +127,7 @@ struct ci13xxx {
struct ci13xxx_ep ci13xxx_ep[ENDPT_MAX]; /* extended endpts */
u32 ep0_dir; /* ep0 direction */
#define ep0out ci13xxx_ep[0]
-#define ep0in ci13xxx_ep[16]
+#define ep0in ci13xxx_ep[hw_ep_max / 2]
u8 remote_wakeup; /* Is remote wakeup feature
enabled by the host? */
u8 suspended; /* suspended by the host */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f71b078..baaebf2 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -175,13 +175,12 @@ ep_found:
_ep->comp_desc = comp_desc;
if (g->speed == USB_SPEED_SUPER) {
switch (usb_endpoint_type(_ep->desc)) {
- case USB_ENDPOINT_XFER_BULK:
- case USB_ENDPOINT_XFER_INT:
- _ep->maxburst = comp_desc->bMaxBurst;
- break;
case USB_ENDPOINT_XFER_ISOC:
/* mult: bits 1:0 of bmAttributes */
_ep->mult = comp_desc->bmAttributes & 0x3;
+ case USB_ENDPOINT_XFER_BULK:
+ case USB_ENDPOINT_XFER_INT:
+ _ep->maxburst = comp_desc->bMaxBurst;
break;
default:
/* Do nothing for control endpoints */
@@ -1535,9 +1534,9 @@ composite_resume(struct usb_gadget *gadget)
static struct usb_gadget_driver composite_driver = {
#ifdef CONFIG_USB_GADGET_SUPERSPEED
- .speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER,
#else
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
#endif
.unbind = composite_unbind,
@@ -1584,8 +1583,8 @@ int usb_composite_probe(struct usb_composite_driver *driver,
driver->iProduct = driver->name;
composite_driver.function = (char *) driver->name;
composite_driver.driver.name = driver->name;
- composite_driver.speed = min((u8)composite_driver.speed,
- (u8)driver->max_speed);
+ composite_driver.max_speed =
+ min_t(u8, composite_driver.max_speed, driver->max_speed);
composite = driver;
composite_gadget_bind = bind;
diff --git a/drivers/usb/gadget/dbgp.c b/drivers/usb/gadget/dbgp.c
index 6256420..19d7bb0 100644
--- a/drivers/usb/gadget/dbgp.c
+++ b/drivers/usb/gadget/dbgp.c
@@ -404,7 +404,7 @@ fail:
static struct usb_gadget_driver dbgp_driver = {
.function = "dbgp",
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
.unbind = dbgp_unbind,
.setup = dbgp_setup,
.disconnect = dbgp_disconnect,
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index ab8f1b4..db815c2 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -823,19 +823,18 @@ static int dummy_pullup (struct usb_gadget *_gadget, int value)
if (value && dum->driver) {
if (mod_data.is_super_speed)
- dum->gadget.speed = dum->driver->speed;
+ dum->gadget.speed = dum->driver->max_speed;
else if (mod_data.is_high_speed)
dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
- dum->driver->speed);
+ dum->driver->max_speed);
else
dum->gadget.speed = USB_SPEED_FULL;
dummy_udc_udpate_ep0(dum);
- if (dum->gadget.speed < dum->driver->speed)
+ if (dum->gadget.speed < dum->driver->max_speed)
dev_dbg(udc_dev(dum), "This device can perform faster"
- " if you connect it to a %s port...\n",
- (dum->driver->speed == USB_SPEED_SUPER ?
- "SuperSpeed" : "HighSpeed"));
+ " if you connect it to a %s port...\n",
+ usb_speed_string(dum->driver->max_speed));
}
dum_hcd = gadget_to_dummy_hcd(_gadget);
@@ -898,7 +897,7 @@ static int dummy_udc_start(struct usb_gadget *g,
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
- if (driver->speed == USB_SPEED_UNKNOWN)
+ if (driver->max_speed == USB_SPEED_UNKNOWN)
return -EINVAL;
/*
@@ -977,7 +976,7 @@ static int dummy_udc_probe (struct platform_device *pdev)
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
- dum->gadget.is_dualspeed = 1;
+ dum->gadget.max_speed = USB_SPEED_SUPER;
dev_set_name(&dum->gadget.dev, "gadget");
dum->gadget.dev.parent = &pdev->dev;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 4dff83d..e0e6375 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -126,7 +126,7 @@ ep_matches (
* descriptor and see if the EP matches it
*/
if (usb_endpoint_xfer_bulk(desc)) {
- if (ep_comp) {
+ if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) {
num_req_streams = ep_comp->bmAttributes & 0x1f;
if (num_req_streams > ep->max_streams)
return 0;
@@ -149,7 +149,7 @@ ep_matches (
switch (type) {
case USB_ENDPOINT_XFER_INT:
/* INT: limit 64 bytes full speed, 1024 high/super speed */
- if (!gadget->is_dualspeed && max > 64)
+ if (!gadget_is_dualspeed(gadget) && max > 64)
return 0;
/* FALLTHROUGH */
@@ -157,12 +157,12 @@ ep_matches (
/* ISO: limit 1023 bytes full speed, 1024 high/super speed */
if (ep->maxpacket < max)
return 0;
- if (!gadget->is_dualspeed && max > 1023)
+ if (!gadget_is_dualspeed(gadget) && max > 1023)
return 0;
/* BOTH: "high bandwidth" works only at high speed */
if ((desc->wMaxPacketSize & cpu_to_le16(3<<11))) {
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
return 0;
/* configure your hardware with enough buffering!! */
}
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 0cd764d..a28f6ff 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -250,9 +250,9 @@ static struct usb_configuration rndis_config_driver = {
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_ETH_EEM
-static int use_eem = 1;
+static bool use_eem = 1;
#else
-static int use_eem;
+static bool use_eem;
#endif
module_param(use_eem, bool, 0);
MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index acb3800..f63dc6c 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1037,7 +1037,6 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
{
struct ffs_sb_fill_data *data = _data;
struct inode *inode;
- struct dentry *d;
struct ffs_data *ffs;
ENTER();
@@ -1045,7 +1044,7 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
/* Initialise data */
ffs = ffs_data_new();
if (unlikely(!ffs))
- goto enomem0;
+ goto Enomem;
ffs->sb = sb;
ffs->dev_name = data->dev_name;
@@ -1065,26 +1064,21 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
&simple_dir_inode_operations,
&data->perms);
if (unlikely(!inode))
- goto enomem1;
- d = d_alloc_root(inode);
- if (unlikely(!d))
- goto enomem2;
- sb->s_root = d;
+ goto Enomem;
+ sb->s_root = d_alloc_root(inode);
+ if (unlikely(!sb->s_root)) {
+ iput(inode);
+ goto Enomem;
+ }
/* EP0 file */
if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
&ffs_ep0_operations, NULL)))
- goto enomem3;
+ goto Enomem;
return 0;
-enomem3:
- dput(d);
-enomem2:
- iput(inode);
-enomem1:
- ffs_data_put(ffs);
-enomem0:
+Enomem:
return -ENOMEM;
}
@@ -1196,14 +1190,11 @@ ffs_fs_mount(struct file_system_type *t, int flags,
static void
ffs_fs_kill_sb(struct super_block *sb)
{
- void *ptr;
-
ENTER();
kill_litter_super(sb);
- ptr = xchg(&sb->s_fs_info, NULL);
- if (ptr)
- ffs_data_put(ptr);
+ if (sb->s_fs_info)
+ ffs_data_put(sb->s_fs_info);
}
static struct file_system_type ffs_fs_type = {
@@ -1408,7 +1399,7 @@ static int ffs_epfiles_create(struct ffs_data *ffs)
ENTER();
count = ffs->eps_count;
- epfiles = kzalloc(count * sizeof *epfiles, GFP_KERNEL);
+ epfiles = kcalloc(count, sizeof(*epfiles), GFP_KERNEL);
if (!epfiles)
return -ENOMEM;
diff --git a/drivers/usb/gadget/f_loopback.c b/drivers/usb/gadget/f_loopback.c
index 6d87f28..2c0cd82 100644
--- a/drivers/usb/gadget/f_loopback.c
+++ b/drivers/usb/gadget/f_loopback.c
@@ -418,7 +418,7 @@ int __init loopback_add(struct usb_composite_dev *cdev, bool autoresume)
/* support autoresume for remote wakeup testing */
if (autoresume)
- sourcesink_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
+ loopback_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
/* support OTG systems */
if (gadget_is_otg(cdev->gadget)) {
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 1a6f415..ee8ceec 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -1873,17 +1873,14 @@ static int check_command(struct fsg_common *common, int cmnd_size,
common->lun, lun);
/* Check the LUN */
- if (common->lun < common->nluns) {
- curlun = &common->luns[common->lun];
- common->curlun = curlun;
+ curlun = common->curlun;
+ if (curlun) {
if (common->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
- common->curlun = NULL;
- curlun = NULL;
common->bad_lun_okay = 0;
/*
@@ -1929,6 +1926,17 @@ static int check_command(struct fsg_common *common, int cmnd_size,
return 0;
}
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_common *common,
+ int cmnd_size, enum data_direction data_dir,
+ unsigned int mask, int needs_medium, const char *name)
+{
+ if (common->curlun)
+ common->data_size_from_cmnd <<= common->curlun->blkbits;
+ return check_command(common, cmnd_size, data_dir,
+ mask, needs_medium, name);
+}
+
static int do_scsi_command(struct fsg_common *common)
{
struct fsg_buffhd *bh;
@@ -2011,9 +2019,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
- common->curlun->blkbits;
- reply = check_command(common, 6, DATA_DIR_TO_HOST,
+ common->data_size_from_cmnd = (i == 0) ? 256 : i;
+ reply = check_command_size_in_blocks(common, 6,
+ DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)");
if (reply == 0)
@@ -2022,9 +2030,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) <<
- common->curlun->blkbits;
- reply = check_command(common, 10, DATA_DIR_TO_HOST,
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command_size_in_blocks(common, 10,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)");
if (reply == 0)
@@ -2033,9 +2041,9 @@ static int do_scsi_command(struct fsg_common *common)
case READ_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) <<
- common->curlun->blkbits;
- reply = check_command(common, 12, DATA_DIR_TO_HOST,
+ get_unaligned_be32(&common->cmnd[6]);
+ reply = check_command_size_in_blocks(common, 12,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)");
if (reply == 0)
@@ -2134,9 +2142,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_6:
i = common->cmnd[4];
- common->data_size_from_cmnd = (i == 0 ? 256 : i) <<
- common->curlun->blkbits;
- reply = check_command(common, 6, DATA_DIR_FROM_HOST,
+ common->data_size_from_cmnd = (i == 0) ? 256 : i;
+ reply = check_command_size_in_blocks(common, 6,
+ DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)");
if (reply == 0)
@@ -2145,9 +2153,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_10:
common->data_size_from_cmnd =
- get_unaligned_be16(&common->cmnd[7]) <<
- common->curlun->blkbits;
- reply = check_command(common, 10, DATA_DIR_FROM_HOST,
+ get_unaligned_be16(&common->cmnd[7]);
+ reply = check_command_size_in_blocks(common, 10,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)");
if (reply == 0)
@@ -2156,9 +2164,9 @@ static int do_scsi_command(struct fsg_common *common)
case WRITE_12:
common->data_size_from_cmnd =
- get_unaligned_be32(&common->cmnd[6]) <<
- common->curlun->blkbits;
- reply = check_command(common, 12, DATA_DIR_FROM_HOST,
+ get_unaligned_be32(&common->cmnd[6]);
+ reply = check_command_size_in_blocks(common, 12,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)");
if (reply == 0)
@@ -2273,6 +2281,10 @@ static int received_cbw(struct fsg_dev *fsg, struct fsg_buffhd *bh)
if (common->data_size == 0)
common->data_dir = DATA_DIR_NONE;
common->lun = cbw->Lun;
+ if (common->lun >= 0 && common->lun < common->nluns)
+ common->curlun = &common->luns[common->lun];
+ else
+ common->curlun = NULL;
common->tag = cbw->Tag;
return 0;
}
@@ -2763,7 +2775,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
* Create the LUNs, open their backing files, and register the
* LUN devices in sysfs.
*/
- curlun = kzalloc(nluns * sizeof *curlun, GFP_KERNEL);
+ curlun = kcalloc(nluns, sizeof(*curlun), GFP_KERNEL);
if (unlikely(!curlun)) {
rc = -ENOMEM;
goto error_release;
@@ -3111,15 +3123,15 @@ fsg_add(struct usb_composite_dev *cdev, struct usb_configuration *c,
struct fsg_module_parameters {
char *file[FSG_MAX_LUNS];
- int ro[FSG_MAX_LUNS];
- int removable[FSG_MAX_LUNS];
- int cdrom[FSG_MAX_LUNS];
- int nofua[FSG_MAX_LUNS];
+ bool ro[FSG_MAX_LUNS];
+ bool removable[FSG_MAX_LUNS];
+ bool cdrom[FSG_MAX_LUNS];
+ bool nofua[FSG_MAX_LUNS];
unsigned int file_count, ro_count, removable_count, cdrom_count;
unsigned int nofua_count;
unsigned int luns; /* nluns */
- int stall; /* can_stall */
+ bool stall; /* can_stall */
};
#define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc) \
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 16a509a..7cdcb63 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -298,11 +298,10 @@ static void pn_net_setup(struct net_device *dev)
static int
pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
{
- struct net_device *dev = fp->dev;
struct page *page;
int err;
- page = __netdev_alloc_page(dev, gfp_flags);
+ page = alloc_page(gfp_flags);
if (!page)
return -ENOMEM;
@@ -312,7 +311,7 @@ pn_rx_submit(struct f_phonet *fp, struct usb_request *req, gfp_t gfp_flags)
err = usb_ep_queue(fp->out_ep, req, gfp_flags);
if (unlikely(err))
- netdev_free_page(dev, page);
+ put_page(page);
return err;
}
@@ -374,9 +373,9 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
}
if (page)
- netdev_free_page(dev, page);
+ put_page(page);
if (req)
- pn_rx_submit(fp, req, GFP_ATOMIC);
+ pn_rx_submit(fp, req, GFP_ATOMIC | __GFP_COLD);
}
/*-------------------------------------------------------------------------*/
@@ -436,7 +435,7 @@ static int pn_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
netif_carrier_on(dev);
for (i = 0; i < phonet_rxq_size; i++)
- pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC);
+ pn_rx_submit(fp, fp->out_reqv[i], GFP_ATOMIC | __GFP_COLD);
}
spin_unlock(&port->lock);
return 0;
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 11b5196..47766f0 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -303,16 +303,16 @@ MODULE_LICENSE("Dual BSD/GPL");
static struct {
char *file[FSG_MAX_LUNS];
char *serial;
- int ro[FSG_MAX_LUNS];
- int nofua[FSG_MAX_LUNS];
+ bool ro[FSG_MAX_LUNS];
+ bool nofua[FSG_MAX_LUNS];
unsigned int num_filenames;
unsigned int num_ros;
unsigned int num_nofuas;
unsigned int nluns;
- int removable;
- int can_stall;
- int cdrom;
+ bool removable;
+ bool can_stall;
+ bool cdrom;
char *transport_parm;
char *protocol_parm;
@@ -2297,19 +2297,17 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
DBG(fsg, "using LUN %d from CBW, "
"not LUN %d from CDB\n",
fsg->lun, lun);
- } else
- fsg->lun = lun; // Use LUN from the command
+ }
/* Check the LUN */
- if (fsg->lun < fsg->nluns) {
- fsg->curlun = curlun = &fsg->luns[fsg->lun];
+ curlun = fsg->curlun;
+ if (curlun) {
if (fsg->cmnd[0] != REQUEST_SENSE) {
curlun->sense_data = SS_NO_SENSE;
curlun->sense_data_info = 0;
curlun->info_valid = 0;
}
} else {
- fsg->curlun = curlun = NULL;
fsg->bad_lun_okay = 0;
/* INQUIRY and REQUEST SENSE commands are explicitly allowed
@@ -2351,6 +2349,16 @@ static int check_command(struct fsg_dev *fsg, int cmnd_size,
return 0;
}
+/* wrapper of check_command for data size in blocks handling */
+static int check_command_size_in_blocks(struct fsg_dev *fsg, int cmnd_size,
+ enum data_direction data_dir, unsigned int mask,
+ int needs_medium, const char *name)
+{
+ if (fsg->curlun)
+ fsg->data_size_from_cmnd <<= fsg->curlun->blkbits;
+ return check_command(fsg, cmnd_size, data_dir,
+ mask, needs_medium, name);
+}
static int do_scsi_command(struct fsg_dev *fsg)
{
@@ -2425,26 +2433,27 @@ static int do_scsi_command(struct fsg_dev *fsg)
case READ_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 6, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+ if ((reply = check_command_size_in_blocks(fsg, 6,
+ DATA_DIR_TO_HOST,
(7<<1) | (1<<4), 1,
"READ(6)")) == 0)
reply = do_read(fsg);
break;
case READ_10:
- fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 10, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+ if ((reply = check_command_size_in_blocks(fsg, 10,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"READ(10)")) == 0)
reply = do_read(fsg);
break;
case READ_12:
- fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 12, DATA_DIR_TO_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+ if ((reply = check_command_size_in_blocks(fsg, 12,
+ DATA_DIR_TO_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"READ(12)")) == 0)
reply = do_read(fsg);
@@ -2529,26 +2538,27 @@ static int do_scsi_command(struct fsg_dev *fsg)
case WRITE_6:
i = fsg->cmnd[4];
- fsg->data_size_from_cmnd = (i == 0 ? 256 : i) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 6, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = (i == 0) ? 256 : i;
+ if ((reply = check_command_size_in_blocks(fsg, 6,
+ DATA_DIR_FROM_HOST,
(7<<1) | (1<<4), 1,
"WRITE(6)")) == 0)
reply = do_write(fsg);
break;
case WRITE_10:
- fsg->data_size_from_cmnd =
- get_unaligned_be16(&fsg->cmnd[7]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 10, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be16(&fsg->cmnd[7]);
+ if ((reply = check_command_size_in_blocks(fsg, 10,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (3<<7), 1,
"WRITE(10)")) == 0)
reply = do_write(fsg);
break;
case WRITE_12:
- fsg->data_size_from_cmnd =
- get_unaligned_be32(&fsg->cmnd[6]) << fsg->curlun->blkbits;
- if ((reply = check_command(fsg, 12, DATA_DIR_FROM_HOST,
+ fsg->data_size_from_cmnd = get_unaligned_be32(&fsg->cmnd[6]);
+ if ((reply = check_command_size_in_blocks(fsg, 12,
+ DATA_DIR_FROM_HOST,
(1<<1) | (0xf<<2) | (0xf<<6), 1,
"WRITE(12)")) == 0)
reply = do_write(fsg);
@@ -2715,7 +2725,17 @@ static int get_next_command(struct fsg_dev *fsg)
memcpy(fsg->cmnd, fsg->cbbuf_cmnd, fsg->cmnd_size);
fsg->cbbuf_cmnd_size = 0;
spin_unlock_irq(&fsg->lock);
+
+ /* Use LUN from the command */
+ fsg->lun = fsg->cmnd[1] >> 5;
}
+
+ /* Update current lun */
+ if (fsg->lun >= 0 && fsg->lun < fsg->nluns)
+ fsg->curlun = &fsg->luns[fsg->lun];
+ else
+ fsg->curlun = NULL;
+
return rc;
}
@@ -3584,7 +3604,7 @@ static void fsg_resume(struct usb_gadget *gadget)
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver fsg_driver = {
- .speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER,
.function = (char *) fsg_string_product,
.unbind = fsg_unbind,
.disconnect = fsg_disconnect,
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index e00cf92..b95697c 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2336,7 +2336,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || driver->speed < USB_SPEED_FULL
+ if (!driver || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
@@ -2350,7 +2350,7 @@ static int fsl_qe_start(struct usb_gadget_driver *driver,
/* hook up the driver */
udc_controller->driver = driver;
udc_controller->gadget.dev.driver = &driver->driver;
- udc_controller->gadget.speed = (enum usb_device_speed)(driver->speed);
+ udc_controller->gadget.speed = driver->max_speed;
spin_unlock_irqrestore(&udc_controller->lock, flags);
retval = bind(&udc_controller->gadget);
@@ -2814,20 +2814,7 @@ static struct platform_driver udc_driver = {
#endif
};
-static int __init qe_udc_init(void)
-{
- printk(KERN_INFO "%s: %s, %s\n", driver_name, driver_desc,
- DRIVER_VERSION);
- return platform_driver_register(&udc_driver);
-}
-
-static void __exit qe_udc_exit(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-
-module_init(qe_udc_init);
-module_exit(qe_udc_exit);
+module_platform_driver(udc_driver);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index dd28ef3..b04712f 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -1430,7 +1430,7 @@ static void setup_received_irq(struct fsl_udc *udc,
int pipe = get_pipe_by_windex(wIndex);
struct fsl_ep *ep;
- if (wValue != 0 || wLength != 0 || pipe > udc->max_ep)
+ if (wValue != 0 || wLength != 0 || pipe >= udc->max_ep)
break;
ep = get_ep_by_pipe(udc, pipe);
@@ -1673,7 +1673,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
if (!bit_pos)
return;
- for (i = 0; i < udc->max_ep * 2; i++) {
+ for (i = 0; i < udc->max_ep; i++) {
ep_num = i >> 1;
direction = i % 2;
@@ -1934,7 +1934,7 @@ static int fsl_start(struct usb_gadget_driver *driver,
if (!udc_controller)
return -ENODEV;
- if (!driver || driver->speed < USB_SPEED_FULL
+ if (!driver || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
@@ -2525,7 +2525,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
/* Setup gadget structure */
udc_controller->gadget.ops = &fsl_gadget_ops;
- udc_controller->gadget.is_dualspeed = 1;
+ udc_controller->gadget.max_speed = USB_SPEED_HIGH;
udc_controller->gadget.ep0 = &udc_controller->eps[0].ep;
INIT_LIST_HEAD(&udc_controller->gadget.ep_list);
udc_controller->gadget.speed = USB_SPEED_UNKNOWN;
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index 74da206..5831cb4 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -1317,7 +1317,7 @@ static int fusb300_udc_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->setup)
return -EINVAL;
@@ -1463,7 +1463,7 @@ static int __init fusb300_probe(struct platform_device *pdev)
dev_set_name(&fusb300->gadget.dev, "gadget");
- fusb300->gadget.is_dualspeed = 1;
+ fusb300->gadget.max_speed = USB_SPEED_HIGH;
fusb300->gadget.dev.parent = &pdev->dev;
fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask;
fusb300->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 7f87805..5af70fc 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1357,7 +1357,7 @@ static int goku_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
@@ -1796,6 +1796,7 @@ static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
+ dev->gadget.max_speed = USB_SPEED_FULL;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index 2d978c0..8d1c75a 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1336,7 +1336,7 @@ static int imx_udc_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 6ccae27..ae04266 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1766,9 +1766,9 @@ gadgetfs_suspend (struct usb_gadget *gadget)
static struct usb_gadget_driver gadgetfs_driver = {
#ifdef CONFIG_USB_GADGET_DUALSPEED
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
#else
- .speed = USB_SPEED_FULL,
+ .max_speed = USB_SPEED_FULL,
#endif
.function = (char *) driver_desc,
.unbind = gadgetfs_unbind,
@@ -1792,7 +1792,7 @@ static int gadgetfs_probe (struct usb_gadget *gadget)
}
static struct usb_gadget_driver probe_driver = {
- .speed = USB_SPEED_HIGH,
+ .max_speed = USB_SPEED_HIGH,
.unbind = gadgetfs_nop,
.setup = (void *)gadgetfs_nop,
.disconnect = gadgetfs_nop,
@@ -2035,7 +2035,6 @@ static int
gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
{
struct inode *inode;
- struct dentry *d;
struct dev_data *dev;
if (the_device)
@@ -2058,24 +2057,27 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
NULL, &simple_dir_operations,
S_IFDIR | S_IRUGO | S_IXUGO);
if (!inode)
- goto enomem0;
+ goto Enomem;
inode->i_op = &simple_dir_inode_operations;
- if (!(d = d_alloc_root (inode)))
- goto enomem1;
- sb->s_root = d;
+ if (!(sb->s_root = d_alloc_root (inode))) {
+ iput(inode);
+ goto Enomem;
+ }
/* the ep0 file is named after the controller we expect;
* user mode code can use it for sanity checks, like we do.
*/
dev = dev_new ();
if (!dev)
- goto enomem2;
+ goto Enomem;
dev->sb = sb;
if (!gadgetfs_create_file (sb, CHIP,
dev, &dev_init_operations,
- &dev->dentry))
- goto enomem3;
+ &dev->dentry)) {
+ put_dev(dev);
+ goto Enomem;
+ }
/* other endpoint files are available after hardware setup,
* from binding to a controller.
@@ -2083,13 +2085,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent)
the_device = dev;
return 0;
-enomem3:
- put_dev (dev);
-enomem2:
- dput (d);
-enomem1:
- iput (inode);
-enomem0:
+Enomem:
return -ENOMEM;
}
diff --git a/drivers/usb/gadget/langwell_udc.c b/drivers/usb/gadget/langwell_udc.c
index c9fa3bf..e2293c1 100644
--- a/drivers/usb/gadget/langwell_udc.c
+++ b/drivers/usb/gadget/langwell_udc.c
@@ -11,11 +11,6 @@
/* #undef DEBUG */
/* #undef VERBOSE_DEBUG */
-#if defined(CONFIG_USB_LANGWELL_OTG)
-#define OTG_TRANSCEIVER
-#endif
-
-
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
@@ -1522,8 +1517,7 @@ static void langwell_udc_stop(struct langwell_udc *dev)
/* stop all USB activities */
-static void stop_activity(struct langwell_udc *dev,
- struct usb_gadget_driver *driver)
+static void stop_activity(struct langwell_udc *dev)
{
struct langwell_ep *ep;
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
@@ -1535,9 +1529,9 @@ static void stop_activity(struct langwell_udc *dev,
}
/* report disconnect; the driver is already quiesced */
- if (driver) {
+ if (dev->driver) {
spin_unlock(&dev->lock);
- driver->disconnect(&dev->gadget);
+ dev->driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
@@ -1925,11 +1919,10 @@ static int langwell_stop(struct usb_gadget *g,
/* stop all usb activities */
dev->gadget.speed = USB_SPEED_UNKNOWN;
- stop_activity(dev, driver);
- spin_unlock_irqrestore(&dev->lock, flags);
-
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
+ stop_activity(dev);
+ spin_unlock_irqrestore(&dev->lock, flags);
device_remove_file(&dev->pdev->dev, &dev_attr_function);
@@ -2315,13 +2308,9 @@ static void handle_setup_packet(struct langwell_udc *dev,
if (!gadget_is_otg(&dev->gadget))
break;
- else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
+ else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
dev->gadget.b_hnp_enable = 1;
-#ifdef OTG_TRANSCEIVER
- if (!dev->lotg->otg.default_a)
- dev->lotg->hsm.b_hnp_enable = 1;
-#endif
- } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
+ else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
dev->gadget.a_hnp_support = 1;
else if (setup->bRequest ==
USB_DEVICE_A_ALT_HNP_SUPPORT)
@@ -2733,7 +2722,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
dev->bus_reset = 1;
/* reset all the queues, stop all USB activities */
- stop_activity(dev, dev->driver);
+ stop_activity(dev);
dev->usb_state = USB_STATE_DEFAULT;
} else {
dev_vdbg(&dev->pdev->dev, "device controller reset\n");
@@ -2741,7 +2730,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
langwell_udc_reset(dev);
/* reset all the queues, stop all USB activities */
- stop_activity(dev, dev->driver);
+ stop_activity(dev);
/* reset ep0 dQH and endptctrl */
ep0_reset(dev);
@@ -2752,12 +2741,6 @@ static void handle_usb_reset(struct langwell_udc *dev)
dev->usb_state = USB_STATE_ATTACHED;
}
-#ifdef OTG_TRANSCEIVER
- /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
- if (!dev->lotg->otg.default_a)
- dev->lotg->hsm.b_hnp_enable = 0;
-#endif
-
dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
}
@@ -2770,29 +2753,6 @@ static void handle_bus_suspend(struct langwell_udc *dev)
dev->resume_state = dev->usb_state;
dev->usb_state = USB_STATE_SUSPENDED;
-#ifdef OTG_TRANSCEIVER
- if (dev->lotg->otg.default_a) {
- if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
- dev->lotg->hsm.b_bus_suspend = 1;
- /* notify transceiver the state changes */
- if (spin_trylock(&dev->lotg->wq_lock)) {
- langwell_update_transceiver();
- spin_unlock(&dev->lotg->wq_lock);
- }
- }
- dev->lotg->hsm.b_bus_suspend_vld++;
- } else {
- if (!dev->lotg->hsm.a_bus_suspend) {
- dev->lotg->hsm.a_bus_suspend = 1;
- /* notify transceiver the state changes */
- if (spin_trylock(&dev->lotg->wq_lock)) {
- langwell_update_transceiver();
- spin_unlock(&dev->lotg->wq_lock);
- }
- }
- }
-#endif
-
/* report suspend to the driver */
if (dev->driver) {
if (dev->driver->suspend) {
@@ -2823,11 +2783,6 @@ static void handle_bus_resume(struct langwell_udc *dev)
if (dev->pdev->device != 0x0829)
langwell_phy_low_power(dev, 0);
-#ifdef OTG_TRANSCEIVER
- if (dev->lotg->otg.default_a == 0)
- dev->lotg->hsm.a_bus_suspend = 0;
-#endif
-
/* report resume to the driver */
if (dev->driver) {
if (dev->driver->resume) {
@@ -3020,7 +2975,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
dev->done = &done;
-#ifndef OTG_TRANSCEIVER
/* free dTD dma_pool and dQH */
if (dev->dtd_pool)
dma_pool_destroy(dev->dtd_pool);
@@ -3032,7 +2986,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
/* release SRAM caching */
if (dev->has_sram && dev->got_sram)
sram_deinit(dev);
-#endif
if (dev->status_req) {
kfree(dev->status_req->req.buf);
@@ -3045,7 +2998,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
if (dev->got_irq)
free_irq(pdev->irq, dev);
-#ifndef OTG_TRANSCEIVER
if (dev->cap_regs)
iounmap(dev->cap_regs);
@@ -3055,13 +3007,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
if (dev->enabled)
pci_disable_device(pdev);
-#else
- if (dev->transceiver) {
- otg_put_transceiver(dev->transceiver);
- dev->transceiver = NULL;
- dev->lotg = NULL;
- }
-#endif
dev->cap_regs = NULL;
@@ -3072,9 +3017,7 @@ static void langwell_udc_remove(struct pci_dev *pdev)
device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
-#ifndef OTG_TRANSCEIVER
pci_set_drvdata(pdev, NULL);
-#endif
/* free dev, wait for the release() finished */
wait_for_completion(&done);
@@ -3089,9 +3032,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct langwell_udc *dev;
-#ifndef OTG_TRANSCEIVER
unsigned long resource, len;
-#endif
void __iomem *base = NULL;
size_t size;
int retval;
@@ -3109,16 +3050,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
dev->pdev = pdev;
dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
-#ifdef OTG_TRANSCEIVER
- /* PCI device is already enabled by otg_transceiver driver */
- dev->enabled = 1;
-
- /* mem region and register base */
- dev->region = 1;
- dev->transceiver = otg_get_transceiver();
- dev->lotg = otg_to_langwell(dev->transceiver);
- base = dev->lotg->regs;
-#else
pci_set_drvdata(pdev, dev);
/* now all the pci goodies ... */
@@ -3139,7 +3070,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
dev->region = 1;
base = ioremap_nocache(resource, len);
-#endif
if (base == NULL) {
dev_err(&dev->pdev->dev, "can't map memory\n");
retval = -EFAULT;
@@ -3163,7 +3093,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
dev->got_sram = 0;
dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
-#ifndef OTG_TRANSCEIVER
/* enable SRAM caching if detected */
if (dev->has_sram && !dev->got_sram)
sram_init(dev);
@@ -3182,7 +3111,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
goto error;
}
dev->got_irq = 1;
-#endif
/* set stopped bit */
dev->stopped = 1;
@@ -3257,20 +3185,15 @@ static int langwell_udc_probe(struct pci_dev *pdev,
dev->remote_wakeup = 0;
dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
-#ifndef OTG_TRANSCEIVER
/* reset device controller */
langwell_udc_reset(dev);
-#endif
/* initialize gadget structure */
dev->gadget.ops = &langwell_ops; /* usb_gadget_ops */
dev->gadget.ep0 = &dev->ep[0].ep; /* gadget ep0 */
INIT_LIST_HEAD(&dev->gadget.ep_list); /* ep_list */
dev->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
- dev->gadget.is_dualspeed = 1; /* support dual speed */
-#ifdef OTG_TRANSCEIVER
- dev->gadget.is_otg = 1; /* support otg mode */
-#endif
+ dev->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
@@ -3282,10 +3205,8 @@ static int langwell_udc_probe(struct pci_dev *pdev,
/* controller endpoints reinit */
eps_reinit(dev);
-#ifndef OTG_TRANSCEIVER
/* reset ep0 dQH and endptctrl */
ep0_reset(dev);
-#endif
/* create dTD dma_pool resource */
dev->dtd_pool = dma_pool_create("langwell_dtd",
@@ -3367,7 +3288,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
spin_lock_irq(&dev->lock);
/* stop all usb activities */
- stop_activity(dev, dev->driver);
+ stop_activity(dev);
spin_unlock_irq(&dev->lock);
/* free dTD dma_pool and dQH */
@@ -3525,22 +3446,14 @@ static struct pci_driver langwell_pci_driver = {
static int __init init(void)
{
-#ifdef OTG_TRANSCEIVER
- return langwell_register_peripheral(&langwell_pci_driver);
-#else
return pci_register_driver(&langwell_pci_driver);
-#endif
}
module_init(init);
static void __exit cleanup(void)
{
-#ifdef OTG_TRANSCEIVER
- return langwell_unregister_peripheral(&langwell_pci_driver);
-#else
pci_unregister_driver(&langwell_pci_driver);
-#endif
}
module_exit(cleanup);
diff --git a/drivers/usb/gadget/langwell_udc.h b/drivers/usb/gadget/langwell_udc.h
index ef79e24..d6e78ac 100644
--- a/drivers/usb/gadget/langwell_udc.h
+++ b/drivers/usb/gadget/langwell_udc.h
@@ -8,7 +8,6 @@
*/
#include <linux/usb/langwell_udc.h>
-#include <linux/usb/langwell_otg.h>
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 9aa1cbb..3608b3b 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1472,7 +1472,7 @@ static int m66592_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_HIGH
+ || driver->max_speed < USB_SPEED_HIGH
|| !bind
|| !driver->setup)
return -EINVAL;
@@ -1653,7 +1653,7 @@ static int __init m66592_probe(struct platform_device *pdev)
m66592->gadget.ops = &m66592_gadget_ops;
device_initialize(&m66592->gadget.dev);
dev_set_name(&m66592->gadget.dev, "gadget");
- m66592->gadget.is_dualspeed = 1;
+ m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.dev.parent = &pdev->dev;
m66592->gadget.dev.dma_mask = pdev->dev.dma_mask;
m66592->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/mv_udc.h b/drivers/usb/gadget/mv_udc.h
index daa75c1..34aadfa 100644
--- a/drivers/usb/gadget/mv_udc.h
+++ b/drivers/usb/gadget/mv_udc.h
@@ -180,7 +180,7 @@ struct mv_udc {
struct mv_cap_regs __iomem *cap_regs;
struct mv_op_regs __iomem *op_regs;
- unsigned int phy_regs;
+ void __iomem *phy_regs;
unsigned int max_eps;
struct mv_dqh *ep_dqh;
size_t ep_dqh_size;
@@ -211,11 +211,14 @@ struct mv_udc {
softconnected:1,
force_fs:1,
clock_gating:1,
- active:1;
+ active:1,
+ stopped:1; /* stop bit is setted */
struct work_struct vbus_work;
struct workqueue_struct *qwork;
+ struct otg_transceiver *transceiver;
+
struct mv_usb_platform_data *pdata;
/* some SOC has mutiple clock sources for USB*/
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index 8924121..f97e737 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -276,11 +276,12 @@ static void done(struct mv_ep *ep, struct mv_req *req, int status)
static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
{
- u32 tmp, epstatus, bit_pos, direction;
struct mv_udc *udc;
struct mv_dqh *dqh;
+ u32 bit_pos, direction;
+ u32 usbcmd, epstatus;
unsigned int loops;
- int readsafe, retval = 0;
+ int retval = 0;
udc = ep->udc;
direction = ep_dir(ep);
@@ -293,30 +294,18 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
lastreq = list_entry(ep->queue.prev, struct mv_req, queue);
lastreq->tail->dtd_next =
req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- if (readl(&udc->op_regs->epprime) & bit_pos) {
- loops = LOOPS(PRIME_TIMEOUT);
- while (readl(&udc->op_regs->epprime) & bit_pos) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
- udelay(LOOPS_USEC);
- loops--;
- }
- if (readl(&udc->op_regs->epstatus) & bit_pos)
- goto done;
- }
- readsafe = 0;
+
+ wmb();
+
+ if (readl(&udc->op_regs->epprime) & bit_pos)
+ goto done;
+
loops = LOOPS(READSAFE_TIMEOUT);
- while (readsafe == 0) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
+ while (1) {
/* start with setting the semaphores */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp |= USBCMD_ATDTW_TRIPWIRE_SET;
- writel(tmp, &udc->op_regs->usbcmd);
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd |= USBCMD_ATDTW_TRIPWIRE_SET;
+ writel(usbcmd, &udc->op_regs->usbcmd);
/* read the endpoint status */
epstatus = readl(&udc->op_regs->epstatus) & bit_pos;
@@ -329,98 +318,46 @@ static int queue_dtd(struct mv_ep *ep, struct mv_req *req)
* primed.
*/
if (readl(&udc->op_regs->usbcmd)
- & USBCMD_ATDTW_TRIPWIRE_SET) {
- readsafe = 1;
- }
+ & USBCMD_ATDTW_TRIPWIRE_SET)
+ break;
+
loops--;
+ if (loops == 0) {
+ dev_err(&udc->dev->dev,
+ "Timeout for ATDTW_TRIPWIRE...\n");
+ retval = -ETIME;
+ goto done;
+ }
udelay(LOOPS_USEC);
}
/* Clear the semaphore */
- tmp = readl(&udc->op_regs->usbcmd);
- tmp &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
- writel(tmp, &udc->op_regs->usbcmd);
-
- /* If endpoint is not active, we activate it now. */
- if (!epstatus) {
- if (direction == EP_DIR_IN) {
- struct mv_dtd *curr_dtd = dma_to_virt(
- &udc->dev->dev, dqh->curr_dtd_ptr);
-
- loops = LOOPS(DTD_TIMEOUT);
- while (curr_dtd->size_ioc_sts
- & DTD_STATUS_ACTIVE) {
- if (loops == 0) {
- retval = -ETIME;
- goto done;
- }
- loops--;
- udelay(LOOPS_USEC);
- }
- }
- /* No other transfers on the queue */
+ usbcmd = readl(&udc->op_regs->usbcmd);
+ usbcmd &= USBCMD_ATDTW_TRIPWIRE_CLEAR;
+ writel(usbcmd, &udc->op_regs->usbcmd);
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dqh->size_ioc_int_sts = 0;
+ if (epstatus)
+ goto done;
+ }
- /*
- * Ensure that updates to the QH will
- * occur before priming.
- */
- wmb();
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- /* Prime the Endpoint */
- writel(bit_pos, &udc->op_regs->epprime);
- }
- } else {
- /* Write dQH next pointer and terminate bit to 0 */
- dqh->next_dtd_ptr = req->head->td_dma
- & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dqh->size_ioc_int_sts = 0;
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
+ /* Ensure that updates to the QH will occure before priming. */
+ wmb();
- /* Prime the Endpoint */
- writel(bit_pos, &udc->op_regs->epprime);
+ /* Prime the Endpoint */
+ writel(bit_pos, &udc->op_regs->epprime);
- if (direction == EP_DIR_IN) {
- /* FIXME add status check after prime the IN ep */
- int prime_again;
- u32 curr_dtd_ptr = dqh->curr_dtd_ptr;
-
- loops = LOOPS(DTD_TIMEOUT);
- prime_again = 0;
- while ((curr_dtd_ptr != req->head->td_dma)) {
- curr_dtd_ptr = dqh->curr_dtd_ptr;
- if (loops == 0) {
- dev_err(&udc->dev->dev,
- "failed to prime %s\n",
- ep->name);
- retval = -ETIME;
- goto done;
- }
- loops--;
- udelay(LOOPS_USEC);
-
- if (loops == (LOOPS(DTD_TIMEOUT) >> 2)) {
- if (prime_again)
- goto done;
- dev_info(&udc->dev->dev,
- "prime again\n");
- writel(bit_pos,
- &udc->op_regs->epprime);
- prime_again = 1;
- }
- }
- }
- }
done:
return retval;
}
+
static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length,
dma_addr_t *dma, int *is_last)
{
@@ -841,6 +778,27 @@ mv_ep_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
return 0;
}
+static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req)
+{
+ struct mv_dqh *dqh = ep->dqh;
+ u32 bit_pos;
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ dqh->next_dtd_ptr = req->head->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
+
+ /* clear active and halt bit, in case set from a previous error */
+ dqh->size_ioc_int_sts &= ~(DTD_STATUS_ACTIVE | DTD_STATUS_HALTED);
+
+ /* Ensure that updates to the QH will occure before priming. */
+ wmb();
+
+ bit_pos = 1 << (((ep_dir(ep) == EP_DIR_OUT) ? 0 : 16) + ep->ep_num);
+
+ /* Prime the Endpoint */
+ writel(bit_pos, &ep->udc->op_regs->epprime);
+}
+
/* dequeues (cancels, unlinks) an I/O request from an endpoint */
static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
@@ -883,15 +841,13 @@ static int mv_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct mv_dqh *qh;
struct mv_req *next_req;
- qh = ep->dqh;
- next_req = list_entry(req->queue.next, struct mv_req,
- queue);
+ next_req = list_entry(req->queue.next,
+ struct mv_req, queue);
/* Point the QH to the first TD of next request */
- writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ mv_prime_ep(ep, next_req);
} else {
struct mv_dqh *qh;
@@ -1056,6 +1012,8 @@ static void udc_stop(struct mv_udc *udc)
USBINTR_PORT_CHANGE_DETECT_EN | USBINTR_RESET_EN);
writel(tmp, &udc->op_regs->usbintr);
+ udc->stopped = 1;
+
/* Reset the Run the bit in the command register to stop VUSB */
tmp = readl(&udc->op_regs->usbcmd);
tmp &= ~USBCMD_RUN_STOP;
@@ -1072,6 +1030,8 @@ static void udc_start(struct mv_udc *udc)
/* Enable interrupts */
writel(usbintr, &udc->op_regs->usbintr);
+ udc->stopped = 0;
+
/* Set the Run bit in the command register */
writel(USBCMD_RUN_STOP, &udc->op_regs->usbcmd);
}
@@ -1134,11 +1094,11 @@ static int udc_reset(struct mv_udc *udc)
return 0;
}
-static int mv_udc_enable(struct mv_udc *udc)
+static int mv_udc_enable_internal(struct mv_udc *udc)
{
int retval;
- if (udc->clock_gating == 0 || udc->active)
+ if (udc->active)
return 0;
dev_dbg(&udc->dev->dev, "enable udc\n");
@@ -1157,9 +1117,17 @@ static int mv_udc_enable(struct mv_udc *udc)
return 0;
}
-static void mv_udc_disable(struct mv_udc *udc)
+static int mv_udc_enable(struct mv_udc *udc)
{
- if (udc->clock_gating && udc->active) {
+ if (udc->clock_gating)
+ return mv_udc_enable_internal(udc);
+
+ return 0;
+}
+
+static void mv_udc_disable_internal(struct mv_udc *udc)
+{
+ if (udc->active) {
dev_dbg(&udc->dev->dev, "disable udc\n");
if (udc->pdata->phy_deinit)
udc->pdata->phy_deinit(udc->phy_regs);
@@ -1168,6 +1136,12 @@ static void mv_udc_disable(struct mv_udc *udc)
}
}
+static void mv_udc_disable(struct mv_udc *udc)
+{
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+}
+
static int mv_udc_get_frame(struct usb_gadget *gadget)
{
struct mv_udc *udc;
@@ -1178,7 +1152,7 @@ static int mv_udc_get_frame(struct usb_gadget *gadget)
udc = container_of(gadget, struct mv_udc, gadget);
- retval = readl(udc->op_regs->frindex) & USB_FRINDEX_MASKS;
+ retval = readl(&udc->op_regs->frindex) & USB_FRINDEX_MASKS;
return retval;
}
@@ -1212,10 +1186,11 @@ static int mv_udc_vbus_session(struct usb_gadget *gadget, int is_active)
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+ udc->vbus_active = (is_active != 0);
+
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
- udc->vbus_active = (is_active != 0);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
@@ -1244,10 +1219,11 @@ static int mv_udc_pullup(struct usb_gadget *gadget, int is_on)
udc = container_of(gadget, struct mv_udc, gadget);
spin_lock_irqsave(&udc->lock, flags);
+ udc->softconnect = (is_on != 0);
+
dev_dbg(&udc->dev->dev, "%s: softconnect %d, vbus_active %d\n",
__func__, udc->softconnect, udc->vbus_active);
- udc->softconnect = (is_on != 0);
if (udc->driver && udc->softconnect && udc->vbus_active) {
retval = mv_udc_enable(udc);
if (retval == 0) {
@@ -1407,6 +1383,20 @@ static int mv_udc_start(struct usb_gadget_driver *driver,
return retval;
}
+ if (udc->transceiver) {
+ retval = otg_set_peripheral(udc->transceiver, &udc->gadget);
+ if (retval) {
+ dev_err(&udc->dev->dev,
+ "unable to register peripheral to otg\n");
+ if (driver->unbind) {
+ driver->unbind(&udc->gadget);
+ udc->gadget.dev.driver = NULL;
+ udc->driver = NULL;
+ }
+ return retval;
+ }
+ }
+
/* pullup is always on */
mv_udc_pullup(&udc->gadget, 1);
@@ -2026,6 +2016,10 @@ static irqreturn_t mv_udc_irq(int irq, void *dev)
struct mv_udc *udc = (struct mv_udc *)dev;
u32 status, intr;
+ /* Disable ISR when stopped bit is set */
+ if (udc->stopped)
+ return IRQ_NONE;
+
spin_lock(&udc->lock);
status = readl(&udc->op_regs->usbsts);
@@ -2109,7 +2103,12 @@ static int __devexit mv_udc_remove(struct platform_device *dev)
destroy_workqueue(udc->qwork);
}
- if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ /*
+ * If we have transceiver inited,
+ * then vbus irq will not be requested in udc driver.
+ */
+ if (udc->pdata && udc->pdata->vbus
+ && udc->clock_gating && udc->transceiver == NULL)
free_irq(udc->pdata->vbus->irq, &dev->dev);
/* free memory allocated in probe */
@@ -2129,11 +2128,9 @@ static int __devexit mv_udc_remove(struct platform_device *dev)
if (udc->cap_regs)
iounmap(udc->cap_regs);
- udc->cap_regs = NULL;
if (udc->phy_regs)
- iounmap((void *)udc->phy_regs);
- udc->phy_regs = 0;
+ iounmap(udc->phy_regs);
if (udc->status_req) {
kfree(udc->status_req->req.buf);
@@ -2182,6 +2179,11 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
udc->dev = dev;
+#ifdef CONFIG_USB_OTG_UTILS
+ if (pdata->mode == MV_USB_MODE_OTG)
+ udc->transceiver = otg_get_transceiver();
+#endif
+
udc->clknum = pdata->clknum;
for (clk_i = 0; clk_i < udc->clknum; clk_i++) {
udc->clk[clk_i] = clk_get(&dev->dev, pdata->clkname[clk_i]);
@@ -2213,24 +2215,20 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
goto err_iounmap_capreg;
}
- udc->phy_regs = (unsigned int)ioremap(r->start, resource_size(r));
- if (udc->phy_regs == 0) {
+ udc->phy_regs = ioremap(r->start, resource_size(r));
+ if (udc->phy_regs == NULL) {
dev_err(&dev->dev, "failed to map phy I/O memory\n");
retval = -EBUSY;
goto err_iounmap_capreg;
}
/* we will acces controller register, so enable the clk */
- udc_clock_enable(udc);
- if (pdata->phy_init) {
- retval = pdata->phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&dev->dev, "phy init error %d\n", retval);
- goto err_iounmap_phyreg;
- }
- }
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
+ goto err_iounmap_phyreg;
- udc->op_regs = (struct mv_op_regs __iomem *)((u32)udc->cap_regs
+ udc->op_regs =
+ (struct mv_op_regs __iomem *)((unsigned long)udc->cap_regs
+ (readl(&udc->cap_regs->caplength_hciversion)
& CAPLENGTH_MASK));
udc->max_eps = readl(&udc->cap_regs->dccparams) & DCCPARAMS_DEN_MASK;
@@ -2312,7 +2310,7 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
udc->gadget.ep0 = &udc->eps[0].ep; /* gadget ep0 */
INIT_LIST_HEAD(&udc->gadget.ep_list); /* ep_list */
udc->gadget.speed = USB_SPEED_UNKNOWN; /* speed */
- udc->gadget.is_dualspeed = 1; /* support dual speed */
+ udc->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&udc->gadget.dev, "gadget");
@@ -2328,7 +2326,9 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
eps_init(udc);
/* VBUS detect: we can disable/enable clock on demand.*/
- if (pdata->vbus) {
+ if (udc->transceiver)
+ udc->clock_gating = 1;
+ else if (pdata->vbus) {
udc->clock_gating = 1;
retval = request_threaded_irq(pdata->vbus->irq, NULL,
mv_udc_vbus_irq, IRQF_ONESHOT, "vbus", udc);
@@ -2354,11 +2354,9 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
* If not, it means that VBUS detection is not supported, we
* have to enable vbus active all the time to let controller work.
*/
- if (udc->clock_gating) {
- if (udc->pdata->phy_deinit)
- udc->pdata->phy_deinit(udc->phy_regs);
- udc_clock_disable(udc);
- } else
+ if (udc->clock_gating)
+ mv_udc_disable_internal(udc);
+ else
udc->vbus_active = 1;
retval = usb_add_gadget_udc(&dev->dev, &udc->gadget);
@@ -2371,7 +2369,8 @@ static int __devinit mv_udc_probe(struct platform_device *dev)
return 0;
err_unregister:
- if (udc->pdata && udc->pdata->vbus && udc->clock_gating)
+ if (udc->pdata && udc->pdata->vbus
+ && udc->clock_gating && udc->transceiver == NULL)
free_irq(pdata->vbus->irq, &dev->dev);
device_unregister(&udc->gadget.dev);
err_free_irq:
@@ -2387,11 +2386,9 @@ err_free_dma:
dma_free_coherent(&dev->dev, udc->ep_dqh_size,
udc->ep_dqh, udc->ep_dqh_dma);
err_disable_clock:
- if (udc->pdata->phy_deinit)
- udc->pdata->phy_deinit(udc->phy_regs);
- udc_clock_disable(udc);
+ mv_udc_disable_internal(udc);
err_iounmap_phyreg:
- iounmap((void *)udc->phy_regs);
+ iounmap(udc->phy_regs);
err_iounmap_capreg:
iounmap(udc->cap_regs);
err_put_clk:
@@ -2407,7 +2404,30 @@ static int mv_udc_suspend(struct device *_dev)
{
struct mv_udc *udc = the_controller;
- udc_stop(udc);
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (udc->pdata->vbus && udc->pdata->vbus->poll)
+ if (udc->pdata->vbus->poll() == VBUS_HIGH) {
+ dev_info(&udc->dev->dev, "USB cable is connected!\n");
+ return -EAGAIN;
+ }
+
+ /*
+ * only cable is unplugged, udc can suspend.
+ * So do not care about clock_gating == 1.
+ */
+ if (!udc->clock_gating) {
+ udc_stop(udc);
+
+ spin_lock_irq(&udc->lock);
+ /* stop all usb activities */
+ stop_activity(udc, udc->driver);
+ spin_unlock_irq(&udc->lock);
+
+ mv_udc_disable_internal(udc);
+ }
return 0;
}
@@ -2417,20 +2437,22 @@ static int mv_udc_resume(struct device *_dev)
struct mv_udc *udc = the_controller;
int retval;
- if (udc->pdata->phy_init) {
- retval = udc->pdata->phy_init(udc->phy_regs);
- if (retval) {
- dev_err(&udc->dev->dev,
- "init phy error %d when resume back\n",
- retval);
+ /* if OTG is enabled, the following will be done in OTG driver*/
+ if (udc->transceiver)
+ return 0;
+
+ if (!udc->clock_gating) {
+ retval = mv_udc_enable_internal(udc);
+ if (retval)
return retval;
+
+ if (udc->driver && udc->softconnect) {
+ udc_reset(udc);
+ ep0_reset(udc);
+ udc_start(udc);
}
}
- udc_reset(udc);
- ep0_reset(udc);
- udc_start(udc);
-
return 0;
}
@@ -2457,30 +2479,16 @@ static struct platform_driver udc_driver = {
.shutdown = mv_udc_shutdown,
.driver = {
.owner = THIS_MODULE,
- .name = "pxa-u2o",
+ .name = "mv-udc",
#ifdef CONFIG_PM
.pm = &mv_udc_pm_ops,
#endif
},
};
-MODULE_ALIAS("platform:pxa-u2o");
+module_platform_driver(udc_driver);
+MODULE_ALIAS("platform:mv-udc");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("Chao Xie <chao.xie@marvell.com>");
MODULE_VERSION(DRIVER_VERSION);
MODULE_LICENSE("GPL");
-
-
-static int __init init(void)
-{
- return platform_driver_register(&udc_driver);
-}
-module_init(init);
-
-
-static void __exit cleanup(void)
-{
- platform_driver_unregister(&udc_driver);
-}
-module_exit(cleanup);
-
diff --git a/drivers/usb/gadget/net2272.c b/drivers/usb/gadget/net2272.c
index d1b7636..7322d29 100644
--- a/drivers/usb/gadget/net2272.c
+++ b/drivers/usb/gadget/net2272.c
@@ -69,7 +69,7 @@ static const char * const ep_name[] = {
*
* If use_dma is disabled, pio will be used instead.
*/
-static int use_dma = 0;
+static bool use_dma = 0;
module_param(use_dma, bool, 0644);
/*
@@ -1459,7 +1459,7 @@ static int net2272_start(struct usb_gadget *_gadget,
unsigned i;
if (!driver || !driver->unbind || !driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->max_speed != USB_SPEED_HIGH)
return -EINVAL;
dev = container_of(_gadget, struct net2272, gadget);
@@ -2235,7 +2235,7 @@ net2272_probe_init(struct device *dev, unsigned int irq)
ret->irq = irq;
ret->dev = dev;
ret->gadget.ops = &net2272_ops;
- ret->gadget.is_dualspeed = 1;
+ ret->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&ret->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index da2b9d0..cdedd13 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -90,8 +90,8 @@ static const char *const ep_name [] = {
* Some gadget drivers work better with the dma support here than others.
* These two parameters let you use PIO or more aggressive DMA.
*/
-static int use_dma = 1;
-static int use_dma_chaining = 0;
+static bool use_dma = 1;
+static bool use_dma_chaining = 0;
/* "modprobe net2280 use_dma=n" etc */
module_param (use_dma, bool, S_IRUGO);
@@ -112,7 +112,7 @@ module_param (fifo_mode, ushort, 0644);
* USB suspend requests will be ignored. This is acceptable for
* self-powered devices
*/
-static int enable_suspend = 0;
+static bool enable_suspend = 0;
/* "modprobe net2280 enable_suspend=1" etc */
module_param (enable_suspend, bool, S_IRUGO);
@@ -1881,7 +1881,7 @@ static int net2280_start(struct usb_gadget *_gadget,
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver || driver->speed < USB_SPEED_HIGH
+ if (!driver || driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
@@ -2698,7 +2698,7 @@ static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init (&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &net2280_ops;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 788989a..576cd85 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -98,7 +98,7 @@ module_param (fifo_mode, uint, 0);
MODULE_PARM_DESC (fifo_mode, "endpoint configuration");
#ifdef USE_DMA
-static unsigned use_dma = 1;
+static bool use_dma = 1;
/* "modprobe omap_udc use_dma=y", or else as a kernel
* boot parameter "omap_udc:use_dma=y"
@@ -2110,7 +2110,7 @@ static int omap_udc_start(struct usb_gadget_driver *driver,
return -ENODEV;
if (!driver
// FIXME if otg, check: driver->is_otg
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind || !driver->setup)
return -EINVAL;
@@ -2676,6 +2676,7 @@ omap_udc_setup(struct platform_device *odev, struct otg_transceiver *xceiv)
INIT_LIST_HEAD(&udc->gadget.ep_list);
INIT_LIST_HEAD(&udc->iso);
udc->gadget.speed = USB_SPEED_UNKNOWN;
+ udc->gadget.max_speed = USB_SPEED_FULL;
udc->gadget.name = driver_name;
device_initialize(&udc->gadget.dev);
diff --git a/drivers/usb/gadget/pch_udc.c b/drivers/usb/gadget/pch_udc.c
index 5048a0c..a3fcaae 100644
--- a/drivers/usb/gadget/pch_udc.c
+++ b/drivers/usb/gadget/pch_udc.c
@@ -359,7 +359,7 @@ struct pch_udc_dev {
static const char ep0_string[] = "ep0in";
static DEFINE_SPINLOCK(udc_stall_spinlock); /* stall spin lock */
struct pch_udc_dev *pch_udc; /* pointer to device object */
-static int speed_fs;
+static bool speed_fs;
module_param_named(speed_fs, speed_fs, bool, S_IRUGO);
MODULE_PARM_DESC(speed_fs, "true for Full speed operation");
@@ -2693,7 +2693,7 @@ static int pch_udc_start(struct usb_gadget_driver *driver,
struct pch_udc_dev *dev = pch_udc;
int retval;
- if (!driver || (driver->speed == USB_SPEED_UNKNOWN) || !bind ||
+ if (!driver || (driver->max_speed == USB_SPEED_UNKNOWN) || !bind ||
!driver->setup || !driver->unbind || !driver->disconnect) {
dev_err(&dev->pdev->dev,
"%s: invalid driver parameter\n", __func__);
@@ -2941,7 +2941,7 @@ static int pch_udc_probe(struct pci_dev *pdev,
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = KBUILD_MODNAME;
- dev->gadget.is_dualspeed = 1;
+ dev->gadget.max_speed = USB_SPEED_HIGH;
retval = device_register(&dev->gadget.dev);
if (retval)
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 65a8834..d83134b 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1141,7 +1141,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
#ifdef CONFIG_USB_GADGET_DUALSPEED
case USB_DT_DEVICE_QUALIFIER:
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
break;
/*
* assumes ep0 uses the same value for both
@@ -1155,7 +1155,7 @@ printer_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
break;
case USB_DT_OTHER_SPEED_CONFIG:
- if (!gadget->is_dualspeed)
+ if (!gadget_is_dualspeed(gadget))
break;
/* FALLTHROUGH */
#endif /* CONFIG_USB_GADGET_DUALSPEED */
@@ -1535,7 +1535,7 @@ fail:
/*-------------------------------------------------------------------------*/
static struct usb_gadget_driver printer_driver = {
- .speed = DEVSPEED,
+ .max_speed = DEVSPEED,
.function = (char *) driver_desc,
.unbind = printer_unbind,
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index c090a7e..dd47063 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -1264,7 +1264,7 @@ static int pxa25x_start(struct usb_gadget_driver *driver,
int retval;
if (!driver
- || driver->speed < USB_SPEED_FULL
+ || driver->max_speed < USB_SPEED_FULL
|| !bind
|| !driver->disconnect
|| !driver->setup)
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index 18b6b09..f4c44eb 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -1807,7 +1807,7 @@ static int pxa27x_udc_start(struct usb_gadget_driver *driver,
struct pxa_udc *udc = the_controller;
int retval;
- if (!driver || driver->speed < USB_SPEED_FULL || !bind
+ if (!driver || driver->max_speed < USB_SPEED_FULL || !bind
|| !driver->disconnect || !driver->setup)
return -EINVAL;
if (!udc)
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index fc719a3..f5b8d21 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1746,7 +1746,7 @@ static int r8a66597_start(struct usb_gadget *gadget,
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
if (!driver
- || driver->speed < USB_SPEED_HIGH
+ || driver->max_speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
@@ -1911,7 +1911,7 @@ static int __init r8a66597_probe(struct platform_device *pdev)
r8a66597->gadget.ops = &r8a66597_gadget_ops;
dev_set_name(&r8a66597->gadget.dev, "gadget");
- r8a66597->gadget.is_dualspeed = 1;
+ r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.dev.parent = &pdev->dev;
r8a66597->gadget.dev.dma_mask = pdev->dev.dma_mask;
r8a66597->gadget.dev.release = pdev->dev.release;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index b314482..69295ba 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2586,7 +2586,7 @@ static int s3c_hsotg_start(struct usb_gadget_driver *driver,
return -EINVAL;
}
- if (driver->speed < USB_SPEED_FULL)
+ if (driver->max_speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
if (!bind || !driver->setup) {
@@ -3362,7 +3362,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
dev_set_name(&hsotg->gadget.dev, "gadget");
- hsotg->gadget.is_dualspeed = 1;
+ hsotg->gadget.max_speed = USB_SPEED_HIGH;
hsotg->gadget.ops = &s3c_hsotg_gadget_ops;
hsotg->gadget.name = dev_name(dev);
@@ -3467,18 +3467,7 @@ static struct platform_driver s3c_hsotg_driver = {
.resume = s3c_hsotg_resume,
};
-static int __init s3c_hsotg_modinit(void)
-{
- return platform_driver_register(&s3c_hsotg_driver);
-}
-
-static void __exit s3c_hsotg_modexit(void)
-{
- platform_driver_unregister(&s3c_hsotg_driver);
-}
-
-module_init(s3c_hsotg_modinit);
-module_exit(s3c_hsotg_modexit);
+module_platform_driver(s3c_hsotg_driver);
MODULE_DESCRIPTION("Samsung S3C USB High-speed/OtG device");
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 20a553b..df8661d 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -28,9 +28,10 @@
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
#include <linux/prefetch.h>
+#include <linux/platform_data/s3c-hsudc.h>
+#include <linux/regulator/consumer.h>
#include <mach/regs-s3c2443-clock.h>
-#include <plat/udc.h>
#define S3C_HSUDC_REG(x) (x)
@@ -87,6 +88,12 @@
#define DATA_STATE_XMIT (1)
#define DATA_STATE_RECV (2)
+static const char * const s3c_hsudc_supply_names[] = {
+ "vdda", /* analog phy supply, 3.3V */
+ "vddi", /* digital phy supply, 1.2V */
+ "vddosc", /* oscillator supply, 1.8V - 3.3V */
+};
+
/**
* struct s3c_hsudc_ep - Endpoint representation used by driver.
* @ep: USB gadget layer representation of device endpoint.
@@ -139,6 +146,7 @@ struct s3c_hsudc {
struct device *dev;
struct s3c24xx_hsudc_platdata *pd;
struct otg_transceiver *transceiver;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(s3c_hsudc_supply_names)];
spinlock_t lock;
void __iomem *regs;
struct resource *mem_rsrc;
@@ -153,7 +161,6 @@ struct s3c_hsudc {
#define ep_index(_ep) ((_ep)->bEndpointAddress & \
USB_ENDPOINT_NUMBER_MASK)
-static struct s3c_hsudc *the_controller;
static const char driver_name[] = "s3c-udc";
static const char ep0name[] = "ep0-control";
@@ -282,8 +289,7 @@ static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status)
* All the endpoints are stopped and any pending transfer requests if any on
* the endpoint are terminated.
*/
-static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc,
- struct usb_gadget_driver *driver)
+static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc)
{
struct s3c_hsudc_ep *hsep;
int epnum;
@@ -295,10 +301,6 @@ static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc,
hsep->stopped = 1;
s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN);
}
-
- spin_unlock(&hsudc->lock);
- driver->disconnect(&hsudc->gadget);
- spin_lock(&hsudc->lock);
}
/**
@@ -1135,16 +1137,15 @@ static irqreturn_t s3c_hsudc_irq(int irq, void *_dev)
return IRQ_HANDLED;
}
-static int s3c_hsudc_start(struct usb_gadget_driver *driver,
- int (*bind)(struct usb_gadget *))
+static int s3c_hsudc_start(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
int ret;
if (!driver
- || driver->speed < USB_SPEED_FULL
- || !bind
- || !driver->unbind || !driver->disconnect || !driver->setup)
+ || driver->max_speed < USB_SPEED_FULL
+ || !driver->setup)
return -EINVAL;
if (!hsudc)
@@ -1155,21 +1156,12 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
hsudc->driver = driver;
hsudc->gadget.dev.driver = &driver->driver;
- hsudc->gadget.speed = USB_SPEED_UNKNOWN;
- ret = device_add(&hsudc->gadget.dev);
- if (ret) {
- dev_err(hsudc->dev, "failed to probe gadget device");
- return ret;
- }
- ret = bind(&hsudc->gadget);
- if (ret) {
- dev_err(hsudc->dev, "%s: bind failed\n", hsudc->gadget.name);
- device_del(&hsudc->gadget.dev);
-
- hsudc->driver = NULL;
- hsudc->gadget.dev.driver = NULL;
- return ret;
+ ret = regulator_bulk_enable(ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ dev_err(hsudc->dev, "failed to enable supplies: %d\n", ret);
+ goto err_supplies;
}
/* connect to bus through transceiver */
@@ -1178,13 +1170,7 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
if (ret) {
dev_err(hsudc->dev, "%s: can't bind to transceiver\n",
hsudc->gadget.name);
- driver->unbind(&hsudc->gadget);
-
- device_del(&hsudc->gadget.dev);
-
- hsudc->driver = NULL;
- hsudc->gadget.dev.driver = NULL;
- return ret;
+ goto err_otg;
}
}
@@ -1197,34 +1183,43 @@ static int s3c_hsudc_start(struct usb_gadget_driver *driver,
hsudc->pd->gpio_init();
return 0;
+err_otg:
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
+ hsudc->driver = NULL;
+ hsudc->gadget.dev.driver = NULL;
+ return ret;
}
-static int s3c_hsudc_stop(struct usb_gadget_driver *driver)
+static int s3c_hsudc_stop(struct usb_gadget *gadget,
+ struct usb_gadget_driver *driver)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
unsigned long flags;
if (!hsudc)
return -ENODEV;
- if (!driver || driver != hsudc->driver || !driver->unbind)
+ if (!driver || driver != hsudc->driver)
return -EINVAL;
spin_lock_irqsave(&hsudc->lock, flags);
- hsudc->driver = 0;
+ hsudc->driver = NULL;
+ hsudc->gadget.dev.driver = NULL;
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
s3c_hsudc_uninit_phy();
if (hsudc->pd->gpio_uninit)
hsudc->pd->gpio_uninit();
- s3c_hsudc_stop_activity(hsudc, driver);
+ s3c_hsudc_stop_activity(hsudc);
spin_unlock_irqrestore(&hsudc->lock, flags);
if (hsudc->transceiver)
(void) otg_set_peripheral(hsudc->transceiver, NULL);
- driver->unbind(&hsudc->gadget);
- device_del(&hsudc->gadget.dev);
disable_irq(hsudc->irq);
+ regulator_bulk_disable(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+
dev_info(hsudc->dev, "unregistered gadget driver '%s'\n",
driver->driver.name);
return 0;
@@ -1242,7 +1237,7 @@ static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget)
static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
{
- struct s3c_hsudc *hsudc = the_controller;
+ struct s3c_hsudc *hsudc = to_hsudc(gadget);
if (!hsudc)
return -ENODEV;
@@ -1255,18 +1250,18 @@ static int s3c_hsudc_vbus_draw(struct usb_gadget *gadget, unsigned mA)
static struct usb_gadget_ops s3c_hsudc_gadget_ops = {
.get_frame = s3c_hsudc_gadget_getframe,
- .start = s3c_hsudc_start,
- .stop = s3c_hsudc_stop,
+ .udc_start = s3c_hsudc_start,
+ .udc_stop = s3c_hsudc_stop,
.vbus_draw = s3c_hsudc_vbus_draw,
};
-static int s3c_hsudc_probe(struct platform_device *pdev)
+static int __devinit s3c_hsudc_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct s3c_hsudc *hsudc;
struct s3c24xx_hsudc_platdata *pd = pdev->dev.platform_data;
- int ret;
+ int ret, i;
hsudc = kzalloc(sizeof(struct s3c_hsudc) +
sizeof(struct s3c_hsudc_ep) * pd->epnum,
@@ -1276,13 +1271,22 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
return -ENOMEM;
}
- the_controller = hsudc;
platform_set_drvdata(pdev, dev);
hsudc->dev = dev;
hsudc->pd = pdev->dev.platform_data;
hsudc->transceiver = otg_get_transceiver();
+ for (i = 0; i < ARRAY_SIZE(hsudc->supplies); i++)
+ hsudc->supplies[i].supply = s3c_hsudc_supply_names[i];
+
+ ret = regulator_bulk_get(dev, ARRAY_SIZE(hsudc->supplies),
+ hsudc->supplies);
+ if (ret != 0) {
+ dev_err(dev, "failed to request supplies: %d\n", ret);
+ goto err_supplies;
+ }
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(dev, "unable to obtain driver resource data\n");
@@ -1307,10 +1311,9 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
spin_lock_init(&hsudc->lock);
- device_initialize(&hsudc->gadget.dev);
dev_set_name(&hsudc->gadget.dev, "gadget");
- hsudc->gadget.is_dualspeed = 1;
+ hsudc->gadget.max_speed = USB_SPEED_HIGH;
hsudc->gadget.ops = &s3c_hsudc_gadget_ops;
hsudc->gadget.name = dev_name(dev);
hsudc->gadget.dev.parent = dev;
@@ -1319,6 +1322,7 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
hsudc->gadget.is_otg = 0;
hsudc->gadget.is_a_peripheral = 0;
+ hsudc->gadget.speed = USB_SPEED_UNKNOWN;
s3c_hsudc_setup_ep(hsudc);
@@ -1348,12 +1352,20 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
disable_irq(hsudc->irq);
local_irq_enable();
+ ret = device_register(&hsudc->gadget.dev);
+ if (ret) {
+ put_device(&hsudc->gadget.dev);
+ goto err_add_device;
+ }
+
ret = usb_add_gadget_udc(&pdev->dev, &hsudc->gadget);
if (ret)
goto err_add_udc;
return 0;
err_add_udc:
+ device_unregister(&hsudc->gadget.dev);
+err_add_device:
clk_disable(hsudc->uclk);
clk_put(hsudc->uclk);
err_clk:
@@ -1362,10 +1374,13 @@ err_irq:
iounmap(hsudc->regs);
err_remap:
- release_resource(hsudc->mem_rsrc);
- kfree(hsudc->mem_rsrc);
-
+ release_mem_region(res->start, resource_size(res));
err_res:
+ if (hsudc->transceiver)
+ otg_put_transceiver(hsudc->transceiver);
+
+ regulator_bulk_free(ARRAY_SIZE(hsudc->supplies), hsudc->supplies);
+err_supplies:
kfree(hsudc);
return ret;
}
@@ -1377,21 +1392,10 @@ static struct platform_driver s3c_hsudc_driver = {
},
.probe = s3c_hsudc_probe,
};
-MODULE_ALIAS("platform:s3c-hsudc");
-
-static int __init s3c_hsudc_modinit(void)
-{
- return platform_driver_register(&s3c_hsudc_driver);
-}
-static void __exit s3c_hsudc_modexit(void)
-{
- platform_driver_unregister(&s3c_hsudc_driver);
-}
-
-module_init(s3c_hsudc_modinit);
-module_exit(s3c_hsudc_modexit);
+module_platform_driver(s3c_hsudc_driver);
MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver");
MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:s3c-hsudc");
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index b864377..3f87cb9 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -3,7 +3,7 @@
*
* Samsung S3C24xx series on-chip full speed USB device controllers
*
- * Copyright (C) 2004-2007 Herbert Ptzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
*
* This program is free software; you can redistribute it and/or modify
@@ -51,7 +51,7 @@
#define DRIVER_DESC "S3C2410 USB Device Controller Gadget"
#define DRIVER_VERSION "29 Apr 2007"
-#define DRIVER_AUTHOR "Herbert Ptzl <herbert@13thfloor.at>, " \
+#define DRIVER_AUTHOR "Herbert Pötzl <herbert@13thfloor.at>, " \
"Arnaud Patard <arnaud.patard@rtp-net.org>"
static const char gadget_name[] = "s3c2410_udc";
@@ -1683,9 +1683,9 @@ static int s3c2410_udc_start(struct usb_gadget_driver *driver,
if (udc->driver)
return -EBUSY;
- if (!bind || !driver->setup || driver->speed < USB_SPEED_FULL) {
+ if (!bind || !driver->setup || driver->max_speed < USB_SPEED_FULL) {
printk(KERN_ERR "Invalid driver: bind %p setup %p speed %d\n",
- bind, driver->setup, driver->speed);
+ bind, driver->setup, driver->max_speed);
return -EINVAL;
}
#if defined(MODULE)
diff --git a/drivers/usb/gadget/s3c2410_udc.h b/drivers/usb/gadget/s3c2410_udc.h
index a48f619..1653bae 100644
--- a/drivers/usb/gadget/s3c2410_udc.h
+++ b/drivers/usb/gadget/s3c2410_udc.h
@@ -2,7 +2,7 @@
* linux/drivers/usb/gadget/s3c2410_udc.h
* Samsung on-chip full speed USB device controllers
*
- * Copyright (C) 2004-2007 Herbert Ptzl - Arnaud Patard
+ * Copyright (C) 2004-2007 Herbert Pötzl - Arnaud Patard
* Additional cleanups by Ben Dooks <ben-linux@fluff.org>
*
* This program is free software; you can redistribute it and/or modify
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c
index ed1b816..ad9e5b2 100644
--- a/drivers/usb/gadget/serial.c
+++ b/drivers/usb/gadget/serial.c
@@ -123,11 +123,11 @@ MODULE_AUTHOR("Al Borchers");
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
-static int use_acm = true;
+static bool use_acm = true;
module_param(use_acm, bool, 0);
MODULE_PARM_DESC(use_acm, "Use CDC ACM, default=yes");
-static int use_obex = false;
+static bool use_obex = false;
module_param(use_obex, bool, 0);
MODULE_PARM_DESC(use_obex, "Use CDC OBEX, default=no");
diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c
index c7f291a..85ea14e 100644
--- a/drivers/usb/gadget/storage_common.c
+++ b/drivers/usb/gadget/storage_common.c
@@ -598,16 +598,16 @@ static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = {
| USB_5GBPS_OPERATION),
.bFunctionalitySupport = USB_LOW_SPEED_OPERATION,
.bU1devExitLat = USB_DEFAULT_U1_DEV_EXIT_LAT,
- .bU2DevExitLat = USB_DEFAULT_U2_DEV_EXIT_LAT,
+ .bU2DevExitLat = cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT),
};
static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = {
.bLength = USB_DT_BOS_SIZE,
.bDescriptorType = USB_DT_BOS,
- .wTotalLength = USB_DT_BOS_SIZE
+ .wTotalLength = cpu_to_le16(USB_DT_BOS_SIZE
+ USB_DT_USB_EXT_CAP_SIZE
- + USB_DT_USB_SS_CAP_SIZE,
+ + USB_DT_USB_SS_CAP_SIZE),
.bNumDeviceCaps = 2,
};
diff --git a/drivers/usb/gadget/udc-core.c b/drivers/usb/gadget/udc-core.c
index 6939e17..0b0d12c 100644
--- a/drivers/usb/gadget/udc-core.c
+++ b/drivers/usb/gadget/udc-core.c
@@ -371,14 +371,28 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
}
static DEVICE_ATTR(soft_connect, S_IWUSR, NULL, usb_udc_softconn_store);
-static ssize_t usb_udc_speed_show(struct device *dev,
+#define USB_UDC_SPEED_ATTR(name, param) \
+ssize_t usb_udc_##param##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct usb_udc *udc = container_of(dev, struct usb_udc, dev); \
+ return snprintf(buf, PAGE_SIZE, "%s\n", \
+ usb_speed_string(udc->gadget->param)); \
+} \
+static DEVICE_ATTR(name, S_IRUSR, usb_udc_##param##_show, NULL)
+
+static USB_UDC_SPEED_ATTR(current_speed, speed);
+static USB_UDC_SPEED_ATTR(maximum_speed, max_speed);
+
+/* TODO: Scheduled for removal in 3.8. */
+static ssize_t usb_udc_is_dualspeed_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
- return snprintf(buf, PAGE_SIZE, "%s\n",
- usb_speed_string(udc->gadget->speed));
+ return snprintf(buf, PAGE_SIZE, "%d\n",
+ gadget_is_dualspeed(udc->gadget));
}
-static DEVICE_ATTR(speed, S_IRUGO, usb_udc_speed_show, NULL);
+static DEVICE_ATTR(is_dualspeed, S_IRUSR, usb_udc_is_dualspeed_show, NULL);
#define USB_UDC_ATTR(name) \
ssize_t usb_udc_##name##_show(struct device *dev, \
@@ -391,7 +405,6 @@ ssize_t usb_udc_##name##_show(struct device *dev, \
} \
static DEVICE_ATTR(name, S_IRUGO, usb_udc_##name##_show, NULL)
-static USB_UDC_ATTR(is_dualspeed);
static USB_UDC_ATTR(is_otg);
static USB_UDC_ATTR(is_a_peripheral);
static USB_UDC_ATTR(b_hnp_enable);
@@ -401,7 +414,8 @@ static USB_UDC_ATTR(a_alt_hnp_support);
static struct attribute *usb_udc_attrs[] = {
&dev_attr_srp.attr,
&dev_attr_soft_connect.attr,
- &dev_attr_speed.attr,
+ &dev_attr_current_speed.attr,
+ &dev_attr_maximum_speed.attr,
&dev_attr_is_dualspeed.attr,
&dev_attr_is_otg.attr,
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c
index 58c4d37..4d25b90 100644
--- a/drivers/usb/gadget/usbstring.c
+++ b/drivers/usb/gadget/usbstring.c
@@ -13,82 +13,17 @@
#include <linux/string.h>
#include <linux/device.h>
#include <linux/init.h>
+#include <linux/nls.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
-#include <asm/unaligned.h>
-
-
-static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len)
-{
- int count = 0;
- u8 c;
- u16 uchar;
-
- /* this insists on correct encodings, though not minimal ones.
- * BUT it currently rejects legit 4-byte UTF-8 code points,
- * which need surrogate pairs. (Unicode 3.1 can use them.)
- */
- while (len != 0 && (c = (u8) *s++) != 0) {
- if (unlikely(c & 0x80)) {
- // 2-byte sequence:
- // 00000yyyyyxxxxxx = 110yyyyy 10xxxxxx
- if ((c & 0xe0) == 0xc0) {
- uchar = (c & 0x1f) << 6;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c;
-
- // 3-byte sequence (most CJKV characters):
- // zzzzyyyyyyxxxxxx = 1110zzzz 10yyyyyy 10xxxxxx
- } else if ((c & 0xf0) == 0xe0) {
- uchar = (c & 0x0f) << 12;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c << 6;
-
- c = (u8) *s++;
- if ((c & 0xc0) != 0x80)
- goto fail;
- c &= 0x3f;
- uchar |= c;
-
- /* no bogus surrogates */
- if (0xd800 <= uchar && uchar <= 0xdfff)
- goto fail;
-
- // 4-byte sequence (surrogate pairs, currently rare):
- // 11101110wwwwzzzzyy + 110111yyyyxxxxxx
- // = 11110uuu 10uuzzzz 10yyyyyy 10xxxxxx
- // (uuuuu = wwww + 1)
- // FIXME accept the surrogate code points (only)
-
- } else
- goto fail;
- } else
- uchar = c;
- put_unaligned_le16(uchar, cp++);
- count++;
- len--;
- }
- return count;
-fail:
- return -1;
-}
-
/**
* usb_gadget_get_string - fill out a string descriptor
* @table: of c strings encoded using UTF-8
* @id: string id, from low byte of wValue in get string descriptor
- * @buf: at least 256 bytes
+ * @buf: at least 256 bytes, must be 16-bit aligned
*
* Finds the UTF-8 string matching the ID, and converts it into a
* string descriptor in utf16-le.
@@ -125,8 +60,8 @@ usb_gadget_get_string (struct usb_gadget_strings *table, int id, u8 *buf)
/* string descriptors have length, tag, then UTF16-LE text */
len = min ((size_t) 126, strlen (s->s));
- memset (buf + 2, 0, 2 * len); /* zero all the bytes */
- len = utf8_to_utf16le(s->s, (__le16 *)&buf[2], len);
+ len = utf8s_to_utf16s(s->s, len, UTF16_LITTLE_ENDIAN,
+ (wchar_t *) &buf[2], 126);
if (len < 0)
return -EINVAL;
buf [0] = (len + 1) * 2;
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 20697cc..31d3483 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -81,7 +81,7 @@ module_param(buflen, uint, 0);
* work better with hosts where config changes are problematic or
* controllers (like original superh) that only support one config.
*/
-static int loopdefault = 0;
+static bool loopdefault = 0;
module_param(loopdefault, bool, S_IRUGO|S_IWUSR);
/*-------------------------------------------------------------------------*/
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 060e0e2..353cdd4 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -130,7 +130,7 @@ config USB_FSL_MPH_DR_OF
tristate
config USB_EHCI_FSL
- bool "Support for Freescale on-chip EHCI USB controller"
+ bool "Support for Freescale PPC on-chip EHCI USB controller"
depends on USB_EHCI_HCD && FSL_SOC
select USB_EHCI_ROOT_HUB_TT
select USB_FSL_MPH_DR_OF if OF
@@ -138,7 +138,7 @@ config USB_EHCI_FSL
Variation of ARC USB block used in some Freescale chips.
config USB_EHCI_MXC
- bool "Support for Freescale on-chip EHCI USB controller"
+ bool "Support for Freescale i.MX on-chip EHCI USB controller"
depends on USB_EHCI_HCD && ARCH_MXC
select USB_EHCI_ROOT_HUB_TT
---help---
@@ -194,6 +194,15 @@ config USB_EHCI_S5P
help
Enable support for the S5P SOC's on-chip EHCI controller.
+config USB_EHCI_MV
+ bool "EHCI support for Marvell on-chip controller"
+ depends on USB_EHCI_HCD
+ select USB_EHCI_ROOT_HUB_TT
+ ---help---
+ Enables support for Marvell (including PXA and MMP series) on-chip
+ USB SPH and OTG controller. SPH is a single port host, and it can
+ only be EHCI host. OTG is controller that can switch to host mode.
+
config USB_W90X900_EHCI
bool "W90X900(W90P910) EHCI support"
depends on USB_EHCI_HCD && ARCH_W90X900
@@ -210,7 +219,7 @@ config USB_CNS3XXX_EHCI
config USB_EHCI_ATH79
bool "EHCI support for AR7XXX/AR9XXX SoCs"
- depends on USB_EHCI_HCD && (SOC_AR71XX || SOC_AR724X || SOC_AR913X)
+ depends on USB_EHCI_HCD && (SOC_AR71XX || SOC_AR724X || SOC_AR913X || SOC_AR933X)
select USB_EHCI_ROOT_HUB_TT
default y
---help---
@@ -371,6 +380,12 @@ config USB_OHCI_SH
Enables support for the on-chip OHCI controller on the SuperH.
If you use the PCI OHCI controller, this option is not necessary.
+config USB_OHCI_EXYNOS
+ boolean "OHCI support for Samsung EXYNOS SoC Series"
+ depends on USB_OHCI_HCD && ARCH_EXYNOS
+ help
+ Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
+
config USB_CNS3XXX_OHCI
bool "Cavium CNS3XXX OHCI Module"
depends on USB_OHCI_HCD && ARCH_CNS3XXX
@@ -531,7 +546,7 @@ config USB_RENESAS_USBHS_HCD
config USB_WHCI_HCD
tristate "Wireless USB Host Controller Interface (WHCI) driver (EXPERIMENTAL)"
depends on EXPERIMENTAL
- depends on PCI && USB
+ depends on PCI && USB && UWB
select USB_WUSB
select UWB_WHCI
help
@@ -544,7 +559,7 @@ config USB_WHCI_HCD
config USB_HWA_HCD
tristate "Host Wire Adapter (HWA) driver (EXPERIMENTAL)"
depends on EXPERIMENTAL
- depends on USB
+ depends on USB && UWB
select USB_WUSB
select UWB_HWA
help
diff --git a/drivers/usb/host/alchemy-common.c b/drivers/usb/host/alchemy-common.c
index b4192c9..936af83 100644
--- a/drivers/usb/host/alchemy-common.c
+++ b/drivers/usb/host/alchemy-common.c
@@ -52,9 +52,263 @@
USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \
USBCFG_OME)
+/* Au1300 USB config registers */
+#define USB_DWC_CTRL1 0x00
+#define USB_DWC_CTRL2 0x04
+#define USB_VBUS_TIMER 0x10
+#define USB_SBUS_CTRL 0x14
+#define USB_MSR_ERR 0x18
+#define USB_DWC_CTRL3 0x1C
+#define USB_DWC_CTRL4 0x20
+#define USB_OTG_STATUS 0x28
+#define USB_DWC_CTRL5 0x2C
+#define USB_DWC_CTRL6 0x30
+#define USB_DWC_CTRL7 0x34
+#define USB_PHY_STATUS 0xC0
+#define USB_INT_STATUS 0xC4
+#define USB_INT_ENABLE 0xC8
+
+#define USB_DWC_CTRL1_OTGD 0x04 /* set to DISable OTG */
+#define USB_DWC_CTRL1_HSTRS 0x02 /* set to ENable EHCI */
+#define USB_DWC_CTRL1_DCRS 0x01 /* set to ENable UDC */
+
+#define USB_DWC_CTRL2_PHY1RS 0x04 /* set to enable PHY1 */
+#define USB_DWC_CTRL2_PHY0RS 0x02 /* set to enable PHY0 */
+#define USB_DWC_CTRL2_PHYRS 0x01 /* set to enable PHY */
+
+#define USB_DWC_CTRL3_OHCI1_CKEN (1 << 19)
+#define USB_DWC_CTRL3_OHCI0_CKEN (1 << 18)
+#define USB_DWC_CTRL3_EHCI0_CKEN (1 << 17)
+#define USB_DWC_CTRL3_OTG0_CKEN (1 << 16)
+
+#define USB_SBUS_CTRL_SBCA 0x04 /* coherent access */
+
+#define USB_INTEN_FORCE 0x20
+#define USB_INTEN_PHY 0x10
+#define USB_INTEN_UDC 0x08
+#define USB_INTEN_EHCI 0x04
+#define USB_INTEN_OHCI1 0x02
+#define USB_INTEN_OHCI0 0x01
static DEFINE_SPINLOCK(alchemy_usb_lock);
+static inline void __au1300_usb_phyctl(void __iomem *base, int enable)
+{
+ unsigned long r, s;
+
+ r = __raw_readl(base + USB_DWC_CTRL2);
+ s = __raw_readl(base + USB_DWC_CTRL3);
+
+ s &= USB_DWC_CTRL3_OHCI1_CKEN | USB_DWC_CTRL3_OHCI0_CKEN |
+ USB_DWC_CTRL3_EHCI0_CKEN | USB_DWC_CTRL3_OTG0_CKEN;
+
+ if (enable) {
+ /* simply enable all PHYs */
+ r |= USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS;
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ } else if (!s) {
+ /* no USB block active, do disable all PHYs */
+ r &= ~(USB_DWC_CTRL2_PHY1RS | USB_DWC_CTRL2_PHY0RS |
+ USB_DWC_CTRL2_PHYRS);
+ __raw_writel(r, base + USB_DWC_CTRL2);
+ wmb();
+ }
+}
+
+static inline void __au1300_ohci_control(void __iomem *base, int enable, int id)
+{
+ unsigned long r;
+
+ if (enable) {
+ __raw_writel(1, base + USB_DWC_CTRL7); /* start OHCI clock */
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3); /* enable OHCI block */
+ r |= (id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable); /* power up the PHYs */
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= (id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ /* reset the OHCI start clock bit */
+ __raw_writel(0, base + USB_DWC_CTRL7);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~((id == 0) ? USB_INTEN_OHCI0 : USB_INTEN_OHCI1);
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~((id == 0) ? USB_DWC_CTRL3_OHCI0_CKEN
+ : USB_DWC_CTRL3_OHCI1_CKEN);
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_ehci_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_EHCI;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_HSTRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_EHCI0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_udc_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r |= USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+ } else {
+ r = __raw_readl(base + USB_INT_ENABLE);
+ r &= ~USB_INTEN_UDC;
+ __raw_writel(r, base + USB_INT_ENABLE);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_DCRS;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline void __au1300_otg_control(void __iomem *base, int enable)
+{
+ unsigned long r;
+ if (enable) {
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r |= USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r &= ~USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ } else {
+ r = __raw_readl(base + USB_DWC_CTRL1);
+ r |= USB_DWC_CTRL1_OTGD;
+ __raw_writel(r, base + USB_DWC_CTRL1);
+ wmb();
+
+ r = __raw_readl(base + USB_DWC_CTRL3);
+ r &= ~USB_DWC_CTRL3_OTG0_CKEN;
+ __raw_writel(r, base + USB_DWC_CTRL3);
+ wmb();
+
+ __au1300_usb_phyctl(base, enable);
+ }
+}
+
+static inline int au1300_usb_control(int block, int enable)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ int ret = 0;
+
+ switch (block) {
+ case ALCHEMY_USB_OHCI0:
+ __au1300_ohci_control(base, enable, 0);
+ break;
+ case ALCHEMY_USB_OHCI1:
+ __au1300_ohci_control(base, enable, 1);
+ break;
+ case ALCHEMY_USB_EHCI0:
+ __au1300_ehci_control(base, enable);
+ break;
+ case ALCHEMY_USB_UDC0:
+ __au1300_udc_control(base, enable);
+ break;
+ case ALCHEMY_USB_OTG0:
+ __au1300_otg_control(base, enable);
+ break;
+ default:
+ ret = -ENODEV;
+ }
+ return ret;
+}
+
+static inline void au1300_usb_init(void)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+
+ /* set some sane defaults. Note: we don't fiddle with DWC_CTRL4
+ * here at all: Port 2 routing (EHCI or UDC) must be set either
+ * by boot firmware or platform init code; I can't autodetect
+ * a sane setting.
+ */
+ __raw_writel(0, base + USB_INT_ENABLE); /* disable all USB irqs */
+ wmb();
+ __raw_writel(0, base + USB_DWC_CTRL3); /* disable all clocks */
+ wmb();
+ __raw_writel(~0, base + USB_MSR_ERR); /* clear all errors */
+ wmb();
+ __raw_writel(~0, base + USB_INT_STATUS); /* clear int status */
+ wmb();
+ /* set coherent access bit */
+ __raw_writel(USB_SBUS_CTRL_SBCA, base + USB_SBUS_CTRL);
+ wmb();
+}
static inline void __au1200_ohci_control(void __iomem *base, int enable)
{
@@ -233,6 +487,9 @@ int alchemy_usb_control(int block, int enable)
case ALCHEMY_CPU_AU1200:
ret = au1200_usb_control(block, enable);
break;
+ case ALCHEMY_CPU_AU1300:
+ ret = au1300_usb_control(block, enable);
+ break;
default:
ret = -ENODEV;
}
@@ -281,6 +538,20 @@ static void au1200_usb_pm(int susp)
}
}
+static void au1300_usb_pm(int susp)
+{
+ void __iomem *base =
+ (void __iomem *)KSEG1ADDR(AU1300_USB_CTL_PHYS_ADDR);
+ /* remember Port2 routing */
+ if (susp) {
+ alchemy_usb_pmdata[0] = __raw_readl(base + USB_DWC_CTRL4);
+ } else {
+ au1300_usb_init();
+ __raw_writel(alchemy_usb_pmdata[0], base + USB_DWC_CTRL4);
+ wmb();
+ }
+}
+
static void alchemy_usb_pm(int susp)
{
switch (alchemy_get_cputype()) {
@@ -295,6 +566,9 @@ static void alchemy_usb_pm(int susp)
case ALCHEMY_CPU_AU1200:
au1200_usb_pm(susp);
break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_pm(susp);
+ break;
}
}
@@ -328,6 +602,9 @@ static int __init alchemy_usb_init(void)
case ALCHEMY_CPU_AU1200:
au1200_usb_init();
break;
+ case ALCHEMY_CPU_AU1300:
+ au1300_usb_init();
+ break;
}
register_syscore_ops(&alchemy_usb_pm_ops);
diff --git a/drivers/usb/host/ehci-ath79.c b/drivers/usb/host/ehci-ath79.c
index afb6743..f1424f9 100644
--- a/drivers/usb/host/ehci-ath79.c
+++ b/drivers/usb/host/ehci-ath79.c
@@ -33,6 +33,10 @@ static const struct platform_device_id ehci_ath79_id_table[] = {
.driver_data = EHCI_ATH79_IP_V2,
},
{
+ .name = "ar933x-ehci",
+ .driver_data = EHCI_ATH79_IP_V2,
+ },
+ {
/* terminating entry */
},
};
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index 18bafa9..bf7441a 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -23,6 +23,7 @@ static int au1xxx_ehci_setup(struct usb_hcd *hcd)
int ret = ehci_init(hcd);
ehci->need_io_watchdog = 0;
+ ehci_reset(ehci);
return ret;
}
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index e90344a..b556a72 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -125,7 +125,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
*/
if (pdata->init && pdata->init(pdev)) {
retval = -ENODEV;
- goto err3;
+ goto err4;
}
/* Enable USB controller, 83xx or 8536 */
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 3ff9f82..a007a9f 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -48,6 +48,10 @@
#include <asm/system.h>
#include <asm/unaligned.h>
+#if defined(CONFIG_PPC_PS3)
+#include <asm/firmware.h>
+#endif
+
/*-------------------------------------------------------------------------*/
/*
@@ -108,7 +112,7 @@ module_param (park, uint, S_IRUGO);
MODULE_PARM_DESC (park, "park setting; 1-3 back-to-back async packets");
/* for flakey hardware, ignore overcurrent indicators */
-static int ignore_oc = 0;
+static bool ignore_oc = 0;
module_param (ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC (ignore_oc, "ignore bogus hardware overcurrent indications");
@@ -230,12 +234,58 @@ static int ehci_halt (struct ehci_hcd *ehci)
STS_HALT, STS_HALT, 16 * 125);
}
+#if defined(CONFIG_USB_SUSPEND) && defined(CONFIG_PPC_PS3)
+
+/*
+ * The EHCI controller of the Cell Super Companion Chip used in the
+ * PS3 will stop the root hub after all root hub ports are suspended.
+ * When in this condition handshake will return -ETIMEDOUT. The
+ * STS_HLT bit will not be set, so inspection of the frame index is
+ * used here to test for the condition. If the condition is found
+ * return success to allow the USB suspend to complete.
+ */
+
+static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
+ void __iomem *ptr, u32 mask, u32 done,
+ int usec)
+{
+ unsigned int old_index;
+ int error;
+
+ if (!firmware_has_feature(FW_FEATURE_PS3_LV1))
+ return -ETIMEDOUT;
+
+ old_index = ehci_read_frame_index(ehci);
+
+ error = handshake(ehci, ptr, mask, done, usec);
+
+ if (error == -ETIMEDOUT && ehci_read_frame_index(ehci) == old_index)
+ return 0;
+
+ return error;
+}
+
+#else
+
+static int handshake_for_broken_root_hub(struct ehci_hcd *ehci,
+ void __iomem *ptr, u32 mask, u32 done,
+ int usec)
+{
+ return -ETIMEDOUT;
+}
+
+#endif
+
static int handshake_on_error_set_halt(struct ehci_hcd *ehci, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
int error;
error = handshake(ehci, ptr, mask, done, usec);
+ if (error == -ETIMEDOUT)
+ error = handshake_for_broken_root_hub(ehci, ptr, mask, done,
+ usec);
+
if (error) {
ehci_halt(ehci);
ehci->rh_state = EHCI_RH_HALTED;
@@ -620,6 +670,7 @@ static int ehci_init(struct usb_hcd *hcd)
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
+ hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
@@ -677,22 +728,13 @@ static int ehci_init(struct usb_hcd *hcd)
static int ehci_run (struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
- int retval;
u32 temp;
u32 hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
- /*
- * TDI driver does the ehci_reset in their reset callback.
- * Don't reset here, because configuration settings will
- * vanish.
- */
- if (!ehci_is_TDI(ehci) && (retval = ehci_reset(ehci)) != 0) {
- ehci_mem_cleanup(ehci);
- return retval;
- }
+
ehci_writel(ehci, ehci->periodic_dma, &ehci->regs->frame_list);
ehci_writel(ehci, (u32)ehci->async->qh_dma, &ehci->regs->async_next);
@@ -1324,11 +1366,16 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_pxa168_driver
#endif
-#ifdef CONFIG_NLM_XLR
+#ifdef CONFIG_CPU_XLR
#include "ehci-xls.c"
#define PLATFORM_DRIVER ehci_xls_driver
#endif
+#ifdef CONFIG_USB_EHCI_MV
+#include "ehci-mv.c"
+#define PLATFORM_DRIVER ehci_mv_driver
+#endif
+
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER) && \
!defined(XILINX_OF_PLATFORM_DRIVER)
diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
new file mode 100644
index 0000000..52a604f
--- /dev/null
+++ b/drivers/usb/host/ehci-mv.c
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/usb/otg.h>
+#include <linux/platform_data/mv_usb.h>
+
+#define CAPLENGTH_MASK (0xff)
+
+struct ehci_hcd_mv {
+ struct usb_hcd *hcd;
+
+ /* Which mode does this ehci running OTG/Host ? */
+ int mode;
+
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ void __iomem *op_regs;
+
+ struct otg_transceiver *otg;
+
+ struct mv_usb_platform_data *pdata;
+
+ /* clock source and total clock number */
+ unsigned int clknum;
+ struct clk *clk[0];
+};
+
+static void ehci_clock_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ unsigned int i;
+
+ for (i = 0; i < ehci_mv->clknum; i++)
+ clk_enable(ehci_mv->clk[i]);
+}
+
+static void ehci_clock_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ unsigned int i;
+
+ for (i = 0; i < ehci_mv->clknum; i++)
+ clk_disable(ehci_mv->clk[i]);
+}
+
+static int mv_ehci_enable(struct ehci_hcd_mv *ehci_mv)
+{
+ int retval;
+
+ ehci_clock_enable(ehci_mv);
+ if (ehci_mv->pdata->phy_init) {
+ retval = ehci_mv->pdata->phy_init(ehci_mv->phy_regs);
+ if (retval)
+ return retval;
+ }
+
+ return 0;
+}
+
+static void mv_ehci_disable(struct ehci_hcd_mv *ehci_mv)
+{
+ if (ehci_mv->pdata->phy_deinit)
+ ehci_mv->pdata->phy_deinit(ehci_mv->phy_regs);
+ ehci_clock_disable(ehci_mv);
+}
+
+static int mv_ehci_reset(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct ehci_hcd_mv *ehci_mv = dev_get_drvdata(dev);
+ int retval;
+
+ if (ehci_mv == NULL) {
+ dev_err(dev, "Can not find private ehci data\n");
+ return -ENODEV;
+ }
+
+ /*
+ * data structure init
+ */
+ retval = ehci_init(hcd);
+ if (retval) {
+ dev_err(dev, "ehci_init failed %d\n", retval);
+ return retval;
+ }
+
+ hcd->has_tt = 1;
+ ehci->sbrn = 0x20;
+
+ retval = ehci_reset(ehci);
+ if (retval) {
+ dev_err(dev, "ehci_reset failed %d\n", retval);
+ return retval;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver mv_ehci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "Marvell EHCI",
+ .hcd_priv_size = sizeof(struct ehci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ehci_irq,
+ .flags = HCD_MEMORY | HCD_USB2,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = mv_ehci_reset,
+ .start = ehci_run,
+ .stop = ehci_stop,
+ .shutdown = ehci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ehci_urb_enqueue,
+ .urb_dequeue = ehci_urb_dequeue,
+ .endpoint_disable = ehci_endpoint_disable,
+ .endpoint_reset = ehci_endpoint_reset,
+ .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ehci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ehci_hub_status_data,
+ .hub_control = ehci_hub_control,
+ .bus_suspend = ehci_bus_suspend,
+ .bus_resume = ehci_bus_resume,
+};
+
+static int mv_ehci_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct usb_hcd *hcd;
+ struct ehci_hcd *ehci;
+ struct ehci_hcd_mv *ehci_mv;
+ struct resource *r;
+ int clk_i, retval = -ENODEV;
+ u32 offset;
+ size_t size;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "missing platform_data\n");
+ return -ENODEV;
+ }
+
+ if (usb_disabled())
+ return -ENODEV;
+
+ hcd = usb_create_hcd(&mv_ehci_hc_driver, &pdev->dev, "mv ehci");
+ if (!hcd)
+ return -ENOMEM;
+
+ size = sizeof(*ehci_mv) + sizeof(struct clk *) * pdata->clknum;
+ ehci_mv = kzalloc(size, GFP_KERNEL);
+ if (ehci_mv == NULL) {
+ dev_err(&pdev->dev, "cannot allocate ehci_hcd_mv\n");
+ retval = -ENOMEM;
+ goto err_put_hcd;
+ }
+
+ platform_set_drvdata(pdev, ehci_mv);
+ ehci_mv->pdata = pdata;
+ ehci_mv->hcd = hcd;
+
+ ehci_mv->clknum = pdata->clknum;
+ for (clk_i = 0; clk_i < ehci_mv->clknum; clk_i++) {
+ ehci_mv->clk[clk_i] =
+ clk_get(&pdev->dev, pdata->clkname[clk_i]);
+ if (IS_ERR(ehci_mv->clk[clk_i])) {
+ dev_err(&pdev->dev, "error get clck \"%s\"\n",
+ pdata->clkname[clk_i]);
+ retval = PTR_ERR(ehci_mv->clk[clk_i]);
+ goto err_put_clk;
+ }
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_put_clk;
+ }
+
+ ehci_mv->phy_regs = ioremap(r->start, resource_size(r));
+ if (ehci_mv->phy_regs == 0) {
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ retval = -EFAULT;
+ goto err_put_clk;
+ }
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "capregs");
+ if (!r) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_iounmap_phyreg;
+ }
+
+ ehci_mv->cap_regs = ioremap(r->start, resource_size(r));
+ if (ehci_mv->cap_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ retval = -EFAULT;
+ goto err_iounmap_phyreg;
+ }
+
+ retval = mv_ehci_enable(ehci_mv);
+ if (retval) {
+ dev_err(&pdev->dev, "init phy error %d\n", retval);
+ goto err_iounmap_capreg;
+ }
+
+ offset = readl(ehci_mv->cap_regs) & CAPLENGTH_MASK;
+ ehci_mv->op_regs =
+ (void __iomem *) ((unsigned long) ehci_mv->cap_regs + offset);
+
+ hcd->rsrc_start = r->start;
+ hcd->rsrc_len = r->end - r->start + 1;
+ hcd->regs = ehci_mv->op_regs;
+
+ hcd->irq = platform_get_irq(pdev, 0);
+ if (!hcd->irq) {
+ dev_err(&pdev->dev, "Cannot get irq.");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+ ehci->regs = (struct ehci_regs *) ehci_mv->op_regs;
+ ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+
+ ehci_mv->mode = pdata->mode;
+ if (ehci_mv->mode == MV_USB_MODE_OTG) {
+#ifdef CONFIG_USB_OTG_UTILS
+ ehci_mv->otg = otg_get_transceiver();
+ if (!ehci_mv->otg) {
+ dev_err(&pdev->dev,
+ "unable to find transceiver\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_host(ehci_mv->otg, &hcd->self);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "unable to register with transceiver\n");
+ retval = -ENODEV;
+ goto err_put_transceiver;
+ }
+ /* otg will enable clock before use as host */
+ mv_ehci_disable(ehci_mv);
+#else
+ dev_info(&pdev->dev, "MV_USB_MODE_OTG "
+ "must have CONFIG_USB_OTG_UTILS enabled\n");
+ goto err_disable_clk;
+#endif
+ } else {
+ if (pdata->set_vbus)
+ pdata->set_vbus(1);
+
+ retval = usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ if (retval) {
+ dev_err(&pdev->dev,
+ "failed to add hcd with err %d\n", retval);
+ goto err_set_vbus;
+ }
+ }
+
+ if (pdata->private_init)
+ pdata->private_init(ehci_mv->op_regs, ehci_mv->phy_regs);
+
+ dev_info(&pdev->dev,
+ "successful find EHCI device with regs 0x%p irq %d"
+ " working in %s mode\n", hcd->regs, hcd->irq,
+ ehci_mv->mode == MV_USB_MODE_OTG ? "OTG" : "Host");
+
+ return 0;
+
+err_set_vbus:
+ if (pdata->set_vbus)
+ pdata->set_vbus(0);
+#ifdef CONFIG_USB_OTG_UTILS
+err_put_transceiver:
+ if (ehci_mv->otg)
+ otg_put_transceiver(ehci_mv->otg);
+#endif
+err_disable_clk:
+ mv_ehci_disable(ehci_mv);
+err_iounmap_capreg:
+ iounmap(ehci_mv->cap_regs);
+err_iounmap_phyreg:
+ iounmap(ehci_mv->phy_regs);
+err_put_clk:
+ for (clk_i--; clk_i >= 0; clk_i--)
+ clk_put(ehci_mv->clk[clk_i]);
+ platform_set_drvdata(pdev, NULL);
+ kfree(ehci_mv);
+err_put_hcd:
+ usb_put_hcd(hcd);
+
+ return retval;
+}
+
+static int mv_ehci_remove(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+ int clk_i;
+
+ if (hcd->rh_registered)
+ usb_remove_hcd(hcd);
+
+ if (ehci_mv->otg) {
+ otg_set_host(ehci_mv->otg, NULL);
+ otg_put_transceiver(ehci_mv->otg);
+ }
+
+ if (ehci_mv->mode == MV_USB_MODE_HOST) {
+ if (ehci_mv->pdata->set_vbus)
+ ehci_mv->pdata->set_vbus(0);
+
+ mv_ehci_disable(ehci_mv);
+ }
+
+ iounmap(ehci_mv->cap_regs);
+ iounmap(ehci_mv->phy_regs);
+
+ for (clk_i = 0; clk_i < ehci_mv->clknum; clk_i++)
+ clk_put(ehci_mv->clk[clk_i]);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(ehci_mv);
+ usb_put_hcd(hcd);
+
+ return 0;
+}
+
+MODULE_ALIAS("mv-ehci");
+
+static const struct platform_device_id ehci_id_table[] = {
+ {"pxa-u2oehci", PXA_U2OEHCI},
+ {"pxa-sph", PXA_SPH},
+ {"mmp3-hsic", MMP3_HSIC},
+ {"mmp3-fsic", MMP3_FSIC},
+ {},
+};
+
+static void mv_ehci_shutdown(struct platform_device *pdev)
+{
+ struct ehci_hcd_mv *ehci_mv = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = ehci_mv->hcd;
+
+ if (!hcd->rh_registered)
+ return;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+static struct platform_driver ehci_mv_driver = {
+ .probe = mv_ehci_probe,
+ .remove = mv_ehci_remove,
+ .shutdown = mv_ehci_shutdown,
+ .driver = {
+ .name = "mv-ehci",
+ .bus = &platform_bus_type,
+ },
+ .id_table = ehci_id_table,
+};
diff --git a/drivers/usb/host/ehci-octeon.c b/drivers/usb/host/ehci-octeon.c
index ba1f513..c010488 100644
--- a/drivers/usb/host/ehci-octeon.c
+++ b/drivers/usb/host/ehci-octeon.c
@@ -155,6 +155,8 @@ static int ehci_octeon_drv_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
+ ehci_reset(ehci);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index e39b029..bba9850 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -41,6 +41,7 @@
#include <linux/usb/ulpi.h>
#include <plat/usb.h>
#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@@ -190,11 +191,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
}
}
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_err(dev, "failed to start usbhs with err %d\n", ret);
- goto err_enable;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
/*
* An undocumented "feature" in the OMAP3 EHCI controller,
@@ -228,6 +226,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
omap_ehci->hcs_params = readl(&omap_ehci->caps->hcs_params);
+ ehci_reset(omap_ehci);
+
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret) {
dev_err(dev, "failed to add hcd with err %d\n", ret);
@@ -240,11 +240,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
-
-err_enable:
disable_put_regulator(pdata);
- usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev);
err_io:
iounmap(regs);
@@ -266,10 +263,12 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
struct usb_hcd *hcd = dev_get_drvdata(dev);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
disable_put_regulator(dev->platform_data);
iounmap(hcd->regs);
usb_put_hcd(hcd);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+
return 0;
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index a68a2a5..6c6a5a3 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -172,7 +172,7 @@ static const struct hc_driver ehci_orion_hc_driver = {
static void __init
ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
- struct mbus_dram_target_info *dram)
+ const struct mbus_dram_target_info *dram)
{
int i;
@@ -182,7 +182,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
}
for (i = 0; i < dram->num_cs; i++) {
- struct mbus_dram_window *cs = dram->cs + i;
+ const struct mbus_dram_window *cs = dram->cs + i;
wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
(cs->mbus_attr << 8) |
@@ -194,6 +194,7 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
{
struct orion_ehci_data *pd = pdev->dev.platform_data;
+ const struct mbus_dram_target_info *dram;
struct resource *res;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
@@ -259,8 +260,9 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
- if (pd != NULL && pd->dram != NULL)
- ehci_orion_conf_mbus_windows(hcd, pd->dram);
+ dram = mv_mbus_dram_info();
+ if (dram)
+ ehci_orion_conf_mbus_windows(hcd, dram);
/*
* setup Orion USB controller.
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index f4b627d..01bb7241d 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -276,6 +276,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
/* Serial Bus Release Number is at PCI 0x60 offset */
pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
+ if (pdev->vendor == PCI_VENDOR_ID_STMICRO
+ && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
+ ehci->sbrn = 0x20; /* ConneXT has no sbrn register */
/* Keep this around for a while just in case some EHCI
* implementation uses legacy PCI PM support. This test
@@ -526,6 +529,9 @@ static const struct pci_device_id pci_ids [] = { {
/* handle any USB 2.0 EHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
.driver_data = (unsigned long) &ehci_pci_hc_driver,
+ }, {
+ PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
+ .driver_data = (unsigned long) &ehci_pci_hc_driver,
},
{ /* end: all zeroes */ }
};
diff --git a/drivers/usb/host/ehci-ps3.c b/drivers/usb/host/ehci-ps3.c
index 2dc32da..a20e496 100644
--- a/drivers/usb/host/ehci-ps3.c
+++ b/drivers/usb/host/ehci-ps3.c
@@ -21,6 +21,34 @@
#include <asm/firmware.h>
#include <asm/ps3.h>
+static void ps3_ehci_setup_insnreg(struct ehci_hcd *ehci)
+{
+ /* PS3 HC internal setup register offsets. */
+
+ enum ps3_ehci_hc_insnreg {
+ ps3_ehci_hc_insnreg01 = 0x084,
+ ps3_ehci_hc_insnreg02 = 0x088,
+ ps3_ehci_hc_insnreg03 = 0x08c,
+ };
+
+ /* PS3 EHCI HC errata fix 316 - The PS3 EHCI HC will reset its
+ * internal INSNREGXX setup regs back to the chip default values
+ * on Host Controller Reset (CMD_RESET) or Light Host Controller
+ * Reset (CMD_LRESET). The work-around for this is for the HC
+ * driver to re-initialise these regs when ever the HC is reset.
+ */
+
+ /* Set burst transfer counts to 256 out, 32 in. */
+
+ writel_be(0x01000020, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg01);
+
+ /* Enable burst transfer counts. */
+
+ writel_be(0x00000001, (void __iomem *)ehci->regs +
+ ps3_ehci_hc_insnreg03);
+}
+
static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
{
int result;
@@ -49,6 +77,8 @@ static int ps3_ehci_hc_reset(struct usb_hcd *hcd)
ehci_reset(ehci);
+ ps3_ehci_setup_insnreg(ehci);
+
return result;
}
diff --git a/drivers/usb/host/ehci-pxa168.c b/drivers/usb/host/ehci-pxa168.c
index ac0c16e..8d0e7a2 100644
--- a/drivers/usb/host/ehci-pxa168.c
+++ b/drivers/usb/host/ehci-pxa168.c
@@ -299,7 +299,7 @@ static int __devinit ehci_pxa168_drv_probe(struct platform_device *pdev)
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs + 0x100;
ehci->regs = hcd->regs + 0x100 +
- HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
+ HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
hcd->has_tt = 1;
ehci->sbrn = 0x20;
diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
index 4e4066c..36ca507 100644
--- a/drivers/usb/host/ehci-q.c
+++ b/drivers/usb/host/ehci-q.c
@@ -373,6 +373,17 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
retry_xacterr:
if ((token & QTD_STS_ACTIVE) == 0) {
+ /* Report Data Buffer Error: non-fatal but useful */
+ if (token & QTD_STS_DBE)
+ ehci_dbg(ehci,
+ "detected DataBufferErr for urb %p ep%d%s len %d, qtd %p [qh %p]\n",
+ urb,
+ usb_endpoint_num(&urb->ep->desc),
+ usb_endpoint_dir_in(&urb->ep->desc) ? "in" : "out",
+ urb->transfer_buffer_length,
+ qtd,
+ qh);
+
/* on STALL, error, and short reads this urb must
* complete and all its qtds must be recycled.
*/
@@ -647,7 +658,7 @@ qh_urb_transaction (
/*
* data transfer stage: buffer setup
*/
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
buf = sg_dma_address(sg);
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index 024b65c..293f741 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -14,8 +14,6 @@
#include <linux/clk.h>
#include <linux/platform_device.h>
-#include <mach/regs-pmu.h>
-#include <plat/cpu.h>
#include <plat/ehci.h>
#include <plat/usb-phy.h>
@@ -136,6 +134,8 @@ static int __devinit s5p_ehci_probe(struct platform_device *pdev)
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = readl(&ehci->caps->hcs_params);
+ ehci_reset(ehci);
+
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index db9d1b4..dbc7fe8 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -21,7 +21,12 @@
#include <linux/platform_data/tegra_usb.h>
#include <linux/irq.h>
#include <linux/usb/otg.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
#include <mach/usb_phy.h>
+#include <mach/iomap.h>
#define TEGRA_USB_DMA_ALIGN 32
@@ -574,6 +579,35 @@ static const struct hc_driver tegra_ehci_hc_driver = {
.port_handed_over = ehci_port_handed_over,
};
+static int setup_vbus_gpio(struct platform_device *pdev)
+{
+ int err = 0;
+ int gpio;
+
+ if (!pdev->dev.of_node)
+ return 0;
+
+ gpio = of_get_named_gpio(pdev->dev.of_node, "nvidia,vbus-gpio", 0);
+ if (!gpio_is_valid(gpio))
+ return 0;
+
+ err = gpio_request(gpio, "vbus_gpio");
+ if (err) {
+ dev_err(&pdev->dev, "can't request vbus gpio %d", gpio);
+ return err;
+ }
+ err = gpio_direction_output(gpio, 1);
+ if (err) {
+ dev_err(&pdev->dev, "can't enable vbus\n");
+ return err;
+ }
+ gpio_set_value(gpio, 1);
+
+ return err;
+}
+
+static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
+
static int tegra_ehci_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -590,6 +624,15 @@ static int tegra_ehci_probe(struct platform_device *pdev)
return -EINVAL;
}
+ /* Right now device-tree probed devices don't get dma_mask set.
+ * Since shared usb code relies on it, set it here for now.
+ * Once we have dma capability bindings this can go away.
+ */
+ if (!pdev->dev.dma_mask)
+ pdev->dev.dma_mask = &tegra_ehci_dma_mask;
+
+ setup_vbus_gpio(pdev);
+
tegra = kzalloc(sizeof(struct tegra_ehci_hcd), GFP_KERNEL);
if (!tegra)
return -ENOMEM;
@@ -640,6 +683,28 @@ static int tegra_ehci_probe(struct platform_device *pdev)
goto fail_io;
}
+ /* This is pretty ugly and needs to be fixed when we do only
+ * device-tree probing. Old code relies on the platform_device
+ * numbering that we lack for device-tree-instantiated devices.
+ */
+ if (instance < 0) {
+ switch (res->start) {
+ case TEGRA_USB_BASE:
+ instance = 0;
+ break;
+ case TEGRA_USB2_BASE:
+ instance = 1;
+ break;
+ case TEGRA_USB3_BASE:
+ instance = 2;
+ break;
+ default:
+ err = -ENODEV;
+ dev_err(&pdev->dev, "unknown usb instance\n");
+ goto fail_phy;
+ }
+ }
+
tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config,
TEGRA_USB_PHY_MODE_HOST);
if (IS_ERR(tegra->phy)) {
@@ -773,6 +838,11 @@ static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
hcd->driver->shutdown(hcd);
}
+static struct of_device_id tegra_ehci_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-ehci", },
+ { },
+};
+
static struct platform_driver tegra_ehci_driver = {
.probe = tegra_ehci_probe,
.remove = tegra_ehci_remove,
@@ -783,5 +853,6 @@ static struct platform_driver tegra_ehci_driver = {
.shutdown = tegra_ehci_hcd_shutdown,
.driver = {
.name = "tegra-ehci",
+ .of_match_table = tegra_ehci_of_match,
}
};
diff --git a/drivers/usb/host/ehci-vt8500.c b/drivers/usb/host/ehci-vt8500.c
index 54d1ab8..c1eda73 100644
--- a/drivers/usb/host/ehci-vt8500.c
+++ b/drivers/usb/host/ehci-vt8500.c
@@ -132,6 +132,8 @@ static int vt8500_ehci_drv_probe(struct platform_device *pdev)
ehci_port_power(ehci, 1);
+ ehci_reset(ehci);
+
ret = usb_add_hcd(hcd, pdev->resource[1].start,
IRQF_SHARED);
if (ret == 0) {
diff --git a/drivers/usb/host/ehci-w90x900.c b/drivers/usb/host/ehci-w90x900.c
index d661cf7..3d2e26c 100644
--- a/drivers/usb/host/ehci-w90x900.c
+++ b/drivers/usb/host/ehci-w90x900.c
@@ -78,6 +78,8 @@ static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
if (irq < 0)
goto err4;
+ ehci_reset(ehci);
+
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
goto err4;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 32793ce..9c2cc46 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -183,7 +183,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
}
irq = irq_of_parse_and_map(dn, 0);
- if (irq == NO_IRQ) {
+ if (!irq) {
printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
diff --git a/drivers/usb/host/ehci-xls.c b/drivers/usb/host/ehci-xls.c
index b4fb511..72f0819 100644
--- a/drivers/usb/host/ehci-xls.c
+++ b/drivers/usb/host/ehci-xls.c
@@ -69,7 +69,7 @@ int ehci_xls_probe_internal(const struct hc_driver *driver,
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 4ed6d19..d262374 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -824,17 +824,7 @@ static struct platform_driver of_fhci_driver = {
.remove = __devexit_p(of_fhci_remove),
};
-static int __init fhci_module_init(void)
-{
- return platform_driver_register(&of_fhci_driver);
-}
-module_init(fhci_module_init);
-
-static void __exit fhci_module_exit(void)
-{
- platform_driver_unregister(&of_fhci_driver);
-}
-module_exit(fhci_module_exit);
+module_platform_driver(of_fhci_driver);
MODULE_DESCRIPTION("USB Freescale Host Controller Interface Driver");
MODULE_AUTHOR("Shlomi Gridish <gridish@freescale.com>, "
diff --git a/drivers/usb/host/fsl-mph-dr-of.c b/drivers/usb/host/fsl-mph-dr-of.c
index 9037035..7916e56 100644
--- a/drivers/usb/host/fsl-mph-dr-of.c
+++ b/drivers/usb/host/fsl-mph-dr-of.c
@@ -297,17 +297,7 @@ static struct platform_driver fsl_usb2_mph_dr_driver = {
.remove = __devexit_p(fsl_usb2_mph_dr_of_remove),
};
-static int __init fsl_usb2_mph_dr_init(void)
-{
- return platform_driver_register(&fsl_usb2_mph_dr_driver);
-}
-module_init(fsl_usb2_mph_dr_init);
-
-static void __exit fsl_usb2_mph_dr_exit(void)
-{
- platform_driver_unregister(&fsl_usb2_mph_dr_driver);
-}
-module_exit(fsl_usb2_mph_dr_exit);
+module_platform_driver(fsl_usb2_mph_dr_driver);
MODULE_DESCRIPTION("FSL MPH DR OF devices driver");
MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index 9bfac65..104730d 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -481,7 +481,7 @@ static int __hwahc_op_set_ptk(struct wusbhc *wusbhc, u8 port_idx, u32 tkid,
encryption_value = 0;
}
- /* Set the encryption type for commmunicating with the device */
+ /* Set the encryption type for communicating with the device */
result = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0),
USB_REQ_SET_ENCRYPTION,
USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
@@ -776,7 +776,6 @@ static int hwahc_probe(struct usb_interface *usb_iface,
goto error_alloc;
}
usb_hcd->wireless = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &usb_hcd->flags);
wusbhc = usb_hcd_to_wusbhc(usb_hcd);
hwahc = container_of(wusbhc, struct hwahc, wusbhc);
hwahc_init(hwahc);
@@ -837,18 +836,7 @@ static struct usb_driver hwahc_driver = {
.id_table = hwahc_id_table,
};
-static int __init hwahc_driver_init(void)
-{
- return usb_register(&hwahc_driver);
-}
-module_init(hwahc_driver_init);
-
-static void __exit hwahc_driver_exit(void)
-{
- usb_deregister(&hwahc_driver);
-}
-module_exit(hwahc_driver_exit);
-
+module_usb_driver(hwahc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wired Adapter USB Host Control Driver");
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
index 2ee18cf..ff471c1 100644
--- a/drivers/usb/host/imx21-hcd.c
+++ b/drivers/usb/host/imx21-hcd.c
@@ -473,7 +473,7 @@ static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
/* End handling */
/* =========================================== */
-/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+/* Endpoint now idle - release its ETD(s) or assign to queued request */
static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
{
int i;
@@ -1924,18 +1924,7 @@ static struct platform_driver imx21_hcd_driver = {
.resume = NULL,
};
-static int __init imx21_hcd_init(void)
-{
- return platform_driver_register(&imx21_hcd_driver);
-}
-
-static void __exit imx21_hcd_cleanup(void)
-{
- platform_driver_unregister(&imx21_hcd_driver);
-}
-
-module_init(imx21_hcd_init);
-module_exit(imx21_hcd_cleanup);
+module_platform_driver(imx21_hcd_driver);
MODULE_DESCRIPTION("i.MX21 USB Host controller");
MODULE_AUTHOR("Martin Fuzzey");
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27dfab8..fc72d44 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -32,6 +32,13 @@ static struct kmem_cache *qtd_cachep;
static struct kmem_cache *qh_cachep;
static struct kmem_cache *urb_listitem_cachep;
+enum queue_head_types {
+ QH_CONTROL,
+ QH_BULK,
+ QH_INTERRUPT,
+ QH_END
+};
+
struct isp1760_hcd {
u32 hcs_params;
spinlock_t lock;
@@ -40,7 +47,7 @@ struct isp1760_hcd {
struct slotinfo int_slots[32];
int int_done_map;
struct memory_chunk memory_pool[BLOCKS];
- struct list_head controlqhs, bulkqhs, interruptqhs;
+ struct list_head qh_list[QH_END];
/* periodic schedule support */
#define DEFAULT_I_TDPS 1024
@@ -406,12 +413,12 @@ static int priv_init(struct usb_hcd *hcd)
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
u32 hcc_params;
+ int i;
spin_lock_init(&priv->lock);
- INIT_LIST_HEAD(&priv->interruptqhs);
- INIT_LIST_HEAD(&priv->controlqhs);
- INIT_LIST_HEAD(&priv->bulkqhs);
+ for (i = 0; i < QH_END; i++)
+ INIT_LIST_HEAD(&priv->qh_list[i]);
/*
* hw default: 1K periodic list heads, one per frame.
@@ -930,9 +937,9 @@ void schedule_ptds(struct usb_hcd *hcd)
struct isp1760_hcd *priv;
struct isp1760_qh *qh, *qh_next;
struct list_head *ep_queue;
- struct usb_host_endpoint *ep;
LIST_HEAD(urb_list);
struct urb_listitem *urb_listitem, *urb_listitem_next;
+ int i;
if (!hcd) {
WARN_ON(1);
@@ -944,28 +951,13 @@ void schedule_ptds(struct usb_hcd *hcd)
/*
* check finished/retired xfers, transfer payloads, call urb_done()
*/
- ep_queue = &priv->interruptqhs;
- while (ep_queue) {
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list) {
- ep = list_entry(qh->qtd_list.next, struct isp1760_qtd,
- qtd_list)->urb->ep;
collect_qtds(hcd, qh, &urb_list);
- if (list_empty(&qh->qtd_list)) {
+ if (list_empty(&qh->qtd_list))
list_del(&qh->qh_list);
- if (ep->hcpriv == NULL) {
- /* Endpoint has been disabled, so we
- can free the associated queue head. */
- qh_free(qh);
- }
- }
}
-
- if (ep_queue == &priv->interruptqhs)
- ep_queue = &priv->controlqhs;
- else if (ep_queue == &priv->controlqhs)
- ep_queue = &priv->bulkqhs;
- else
- ep_queue = NULL;
}
list_for_each_entry_safe(urb_listitem, urb_listitem_next, &urb_list,
@@ -998,17 +990,10 @@ void schedule_ptds(struct usb_hcd *hcd)
*
* I'm sure this scheme could be improved upon!
*/
- ep_queue = &priv->controlqhs;
- while (ep_queue) {
+ for (i = 0; i < QH_END; i++) {
+ ep_queue = &priv->qh_list[i];
list_for_each_entry_safe(qh, qh_next, ep_queue, qh_list)
enqueue_qtds(hcd, qh);
-
- if (ep_queue == &priv->controlqhs)
- ep_queue = &priv->interruptqhs;
- else if (ep_queue == &priv->interruptqhs)
- ep_queue = &priv->bulkqhs;
- else
- ep_queue = NULL;
}
}
@@ -1543,16 +1528,16 @@ static int isp1760_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
- ep_queue = &priv->controlqhs;
+ ep_queue = &priv->qh_list[QH_CONTROL];
break;
case PIPE_BULK:
- ep_queue = &priv->bulkqhs;
+ ep_queue = &priv->qh_list[QH_BULK];
break;
case PIPE_INTERRUPT:
if (urb->interval < 0)
return -EINVAL;
/* FIXME: Check bandwidth */
- ep_queue = &priv->interruptqhs;
+ ep_queue = &priv->qh_list[QH_INTERRUPT];
break;
case PIPE_ISOCHRONOUS:
dev_err(hcd->self.controller, "%s: isochronous USB packets "
@@ -1714,8 +1699,8 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
{
struct isp1760_hcd *priv = hcd_to_priv(hcd);
unsigned long spinflags;
- struct isp1760_qh *qh;
- struct isp1760_qtd *qtd;
+ struct isp1760_qh *qh, *qh_iter;
+ int i;
spin_lock_irqsave(&priv->lock, spinflags);
@@ -1723,14 +1708,17 @@ static void isp1760_endpoint_disable(struct usb_hcd *hcd,
if (!qh)
goto out;
- list_for_each_entry(qtd, &qh->qtd_list, qtd_list)
- if (qtd->status != QTD_RETIRE) {
- dequeue_urb_from_qtd(hcd, qh, qtd);
- qtd->urb->status = -ECONNRESET;
- }
+ WARN_ON(!list_empty(&qh->qtd_list));
+ for (i = 0; i < QH_END; i++)
+ list_for_each_entry(qh_iter, &priv->qh_list[i], qh_list)
+ if (qh_iter == qh) {
+ list_del(&qh_iter->qh_list);
+ i = QH_END;
+ break;
+ }
+ qh_free(qh);
ep->hcpriv = NULL;
- /* Cannot free qh here since it will be parsed by schedule_ptds() */
schedule_ptds(hcd);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 2ac4ac2..4592dc1 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -47,23 +47,27 @@ static int of_isp1760_probe(struct platform_device *dev)
int virq;
resource_size_t res_len;
int ret;
- const unsigned int *prop;
unsigned int devflags = 0;
enum of_gpio_flags gpio_flags;
+ u32 bus_width = 0;
drvdata = kzalloc(sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
ret = of_address_to_resource(dp, 0, &memory);
- if (ret)
- return -ENXIO;
+ if (ret) {
+ ret = -ENXIO;
+ goto free_data;
+ }
res_len = resource_size(&memory);
res = request_mem_region(memory.start, res_len, dev_name(&dev->dev));
- if (!res)
- return -EBUSY;
+ if (!res) {
+ ret = -EBUSY;
+ goto free_data;
+ }
if (of_irq_map_one(dp, 0, &oirq)) {
ret = -ENODEV;
@@ -77,8 +81,8 @@ static int of_isp1760_probe(struct platform_device *dev)
devflags |= ISP1760_FLAG_ISP1761;
/* Some systems wire up only 16 of the 32 data lines */
- prop = of_get_property(dp, "bus-width", NULL);
- if (prop && *prop == 16)
+ of_property_read_u32(dp, "bus-width", &bus_width);
+ if (bus_width == 16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
if (of_get_property(dp, "port1-otg", NULL) != NULL)
@@ -125,6 +129,7 @@ free_gpio:
gpio_free(drvdata->rst_gpio);
release_reg:
release_mem_region(memory.start, res_len);
+free_data:
kfree(drvdata);
return ret;
}
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 95a9fec..77afabc 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -139,8 +139,23 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
}
iclk = clk_get(&pdev->dev, "ohci_clk");
+ if (IS_ERR(iclk)) {
+ dev_err(&pdev->dev, "failed to get ohci_clk\n");
+ retval = PTR_ERR(iclk);
+ goto err3;
+ }
fclk = clk_get(&pdev->dev, "uhpck");
+ if (IS_ERR(fclk)) {
+ dev_err(&pdev->dev, "failed to get uhpck\n");
+ retval = PTR_ERR(fclk);
+ goto err4;
+ }
hclk = clk_get(&pdev->dev, "hclk");
+ if (IS_ERR(hclk)) {
+ dev_err(&pdev->dev, "failed to get hclk\n");
+ retval = PTR_ERR(hclk);
+ goto err5;
+ }
at91_start_hc(pdev);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -153,9 +168,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
at91_stop_hc(pdev);
clk_put(hclk);
+ err5:
clk_put(fclk);
+ err4:
clk_put(iclk);
+ err3:
iounmap(hcd->regs);
err2:
@@ -223,10 +241,11 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
if (port < 0 || port >= 2)
return;
- if (pdata->vbus_pin[port] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
return;
- gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
+ gpio_set_value(pdata->vbus_pin[port],
+ !pdata->vbus_pin_active_low[port] ^ enable);
}
static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -234,10 +253,11 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
if (port < 0 || port >= 2)
return -EINVAL;
- if (pdata->vbus_pin[port] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[port]))
return -EINVAL;
- return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
+ return gpio_get_value(pdata->vbus_pin[port]) ^
+ !pdata->vbus_pin_active_low[port];
}
/*
@@ -465,7 +485,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
gpio_request(pdata->vbus_pin[i], "ohci_vbus");
ohci_at91_usb_set_power(pdata, i, 1);
@@ -474,7 +494,7 @@ static int ohci_hcd_at91_drv_probe(struct platform_device *pdev)
for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
int ret;
- if (pdata->overcurrent_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
continue;
gpio_request(pdata->overcurrent_pin[i], "ohci_overcurrent");
@@ -499,14 +519,14 @@ static int ohci_hcd_at91_drv_remove(struct platform_device *pdev)
if (pdata) {
for (i = 0; i < ARRAY_SIZE(pdata->vbus_pin); i++) {
- if (pdata->vbus_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->vbus_pin[i]))
continue;
ohci_at91_usb_set_power(pdata, i, 0);
gpio_free(pdata->vbus_pin[i]);
}
for (i = 0; i < ARRAY_SIZE(pdata->overcurrent_pin); i++) {
- if (pdata->overcurrent_pin[i] <= 0)
+ if (!gpio_is_valid(pdata->overcurrent_pin[i]))
continue;
free_irq(gpio_to_irq(pdata->overcurrent_pin[i]), pdev);
gpio_free(pdata->overcurrent_pin[i]);
diff --git a/drivers/usb/host/ohci-au1xxx.c b/drivers/usb/host/ohci-au1xxx.c
index 9b66df8..4ea63b2 100644
--- a/drivers/usb/host/ohci-au1xxx.c
+++ b/drivers/usb/host/ohci-au1xxx.c
@@ -89,7 +89,7 @@ static const struct hc_driver ohci_au1xxx_hc_driver = {
static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
{
- int ret;
+ int ret, unit;
struct usb_hcd *hcd;
if (usb_disabled())
@@ -120,7 +120,9 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
goto err2;
}
- if (alchemy_usb_control(ALCHEMY_USB_OHCI0, 1)) {
+ unit = (hcd->rsrc_start == AU1300_USB_OHCI1_PHYS_ADDR) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
+ if (alchemy_usb_control(unit, 1)) {
printk(KERN_INFO "%s: controller init failed!\n", pdev->name);
ret = -ENODEV;
goto err3;
@@ -135,7 +137,7 @@ static int ohci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
return ret;
}
- alchemy_usb_control(ALCHEMY_USB_OHCI0, 0);
+ alchemy_usb_control(unit, 0);
err3:
iounmap(hcd->regs);
err2:
@@ -148,9 +150,12 @@ err1:
static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ int unit;
+ unit = (hcd->rsrc_start == AU1300_USB_OHCI1_PHYS_ADDR) ?
+ ALCHEMY_USB_OHCI1 : ALCHEMY_USB_OHCI0;
usb_remove_hcd(hcd);
- alchemy_usb_control(ALCHEMY_USB_OHCI0, 0);
+ alchemy_usb_control(unit, 0);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
@@ -173,12 +178,9 @@ static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
*/
spin_lock_irqsave(&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
rc = -EINVAL;
goto bail;
}
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index d7d3449..e4bcb62 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -82,6 +82,14 @@ urb_print(struct urb * urb, char * str, int small, int status)
ohci_dbg(ohci,format, ## arg ); \
} while (0);
+/* Version for use where "next" is the address of a local variable */
+#define ohci_dbg_nosw(ohci, next, size, format, arg...) \
+ do { \
+ unsigned s_len; \
+ s_len = scnprintf(*next, *size, format, ## arg); \
+ *size -= s_len; *next += s_len; \
+ } while (0);
+
static void ohci_dump_intr_mask (
struct ohci_hcd *ohci,
@@ -127,6 +135,19 @@ static char *hcfs2string (int state)
return "?";
}
+static const char *rh_state_string(struct ohci_hcd *ohci)
+{
+ switch (ohci->rh_state) {
+ case OHCI_RH_HALTED:
+ return "halted";
+ case OHCI_RH_SUSPENDED:
+ return "suspended";
+ case OHCI_RH_RUNNING:
+ return "running";
+ }
+ return "?";
+}
+
// dump control and status registers
static void
ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
@@ -136,9 +157,10 @@ ohci_dump_status (struct ohci_hcd *controller, char **next, unsigned *size)
temp = ohci_readl (controller, &regs->revision) & 0xff;
ohci_dbg_sw (controller, next, size,
- "OHCI %d.%d, %s legacy support registers\n",
+ "OHCI %d.%d, %s legacy support registers, rh state %s\n",
0x03 & (temp >> 4), (temp & 0x0f),
- (temp & 0x0100) ? "with" : "NO");
+ (temp & 0x0100) ? "with" : "NO",
+ rh_state_string(controller));
temp = ohci_readl (controller, &regs->control);
ohci_dbg_sw (controller, next, size,
@@ -639,7 +661,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
/* dump driver info, then registers in spec order */
- ohci_dbg_sw (ohci, &next, &size,
+ ohci_dbg_nosw(ohci, &next, &size,
"bus %s, device %s\n"
"%s\n"
"%s\n",
@@ -658,7 +680,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
/* hcca */
if (ohci->hcca)
- ohci_dbg_sw (ohci, &next, &size,
+ ohci_dbg_nosw(ohci, &next, &size,
"hcca frame 0x%04x\n", ohci_frame_no(ohci));
/* other registers mostly affect frame timings */
diff --git a/drivers/usb/host/ohci-ep93xx.c b/drivers/usb/host/ohci-ep93xx.c
index dc45d48..3d63574 100644
--- a/drivers/usb/host/ohci-ep93xx.c
+++ b/drivers/usb/host/ohci-ep93xx.c
@@ -179,8 +179,6 @@ static int ohci_hcd_ep93xx_drv_suspend(struct platform_device *pdev, pm_message_
ohci->next_statechange = jiffies;
ep93xx_stop_hc(&pdev->dev);
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
new file mode 100644
index 0000000..55aa35a
--- /dev/null
+++ b/drivers/usb/host/ohci-exynos.c
@@ -0,0 +1,274 @@
+/*
+ * SAMSUNG EXYNOS USB HOST OHCI Controller
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Author: Jingoo Han <jg1.han@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <mach/ohci.h>
+#include <plat/usb-phy.h>
+
+struct exynos_ohci_hcd {
+ struct device *dev;
+ struct usb_hcd *hcd;
+ struct clk *clk;
+};
+
+static int ohci_exynos_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int ret;
+
+ ohci_dbg(ohci, "ohci_exynos_start, ohci:%p", ohci);
+
+ ret = ohci_init(ohci);
+ if (ret < 0)
+ return ret;
+
+ ret = ohci_run(ohci);
+ if (ret < 0) {
+ err("can't start %s", hcd->self.bus_name);
+ ohci_stop(hcd);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hc_driver exynos_ohci_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "EXYNOS OHCI Host Controller",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ .irq = ohci_irq,
+ .flags = HCD_MEMORY|HCD_USB11,
+
+ .start = ohci_exynos_start,
+ .stop = ohci_stop,
+ .shutdown = ohci_shutdown,
+
+ .get_frame_number = ohci_get_frame,
+
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ .hub_status_data = ohci_hub_status_data,
+ .hub_control = ohci_hub_control,
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+static int __devinit exynos_ohci_probe(struct platform_device *pdev)
+{
+ struct exynos4_ohci_platdata *pdata;
+ struct exynos_ohci_hcd *exynos_ohci;
+ struct usb_hcd *hcd;
+ struct ohci_hcd *ohci;
+ struct resource *res;
+ int irq;
+ int err;
+
+ pdata = pdev->dev.platform_data;
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data defined\n");
+ return -EINVAL;
+ }
+
+ exynos_ohci = kzalloc(sizeof(struct exynos_ohci_hcd), GFP_KERNEL);
+ if (!exynos_ohci)
+ return -ENOMEM;
+
+ exynos_ohci->dev = &pdev->dev;
+
+ hcd = usb_create_hcd(&exynos_ohci_hc_driver, &pdev->dev,
+ dev_name(&pdev->dev));
+ if (!hcd) {
+ dev_err(&pdev->dev, "Unable to create HCD\n");
+ err = -ENOMEM;
+ goto fail_hcd;
+ }
+
+ exynos_ohci->hcd = hcd;
+ exynos_ohci->clk = clk_get(&pdev->dev, "usbhost");
+
+ if (IS_ERR(exynos_ohci->clk)) {
+ dev_err(&pdev->dev, "Failed to get usbhost clock\n");
+ err = PTR_ERR(exynos_ohci->clk);
+ goto fail_clk;
+ }
+
+ err = clk_enable(exynos_ohci->clk);
+ if (err)
+ goto fail_clken;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "Failed to get I/O memory\n");
+ err = -ENXIO;
+ goto fail_io;
+ }
+
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
+ hcd->regs = ioremap(res->start, resource_size(res));
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "Failed to remap I/O memory\n");
+ err = -ENOMEM;
+ goto fail_io;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "Failed to get IRQ\n");
+ err = -ENODEV;
+ goto fail;
+ }
+
+ if (pdata->phy_init)
+ pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+
+ ohci = hcd_to_ohci(hcd);
+ ohci_hcd_init(ohci);
+
+ err = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add USB HCD\n");
+ goto fail;
+ }
+
+ platform_set_drvdata(pdev, exynos_ohci);
+
+ return 0;
+
+fail:
+ iounmap(hcd->regs);
+fail_io:
+ clk_disable(exynos_ohci->clk);
+fail_clken:
+ clk_put(exynos_ohci->clk);
+fail_clk:
+ usb_put_hcd(hcd);
+fail_hcd:
+ kfree(exynos_ohci);
+ return err;
+}
+
+static int __devexit exynos_ohci_remove(struct platform_device *pdev)
+{
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+ struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+
+ usb_remove_hcd(hcd);
+
+ if (pdata && pdata->phy_exit)
+ pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+
+ iounmap(hcd->regs);
+
+ clk_disable(exynos_ohci->clk);
+ clk_put(exynos_ohci->clk);
+
+ usb_put_hcd(hcd);
+ kfree(exynos_ohci);
+
+ return 0;
+}
+
+static void exynos_ohci_shutdown(struct platform_device *pdev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = platform_get_drvdata(pdev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+
+ if (hcd->driver->shutdown)
+ hcd->driver->shutdown(hcd);
+}
+
+#ifdef CONFIG_PM
+static int exynos_ohci_suspend(struct device *dev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+ unsigned long flags;
+ int rc = 0;
+
+ /*
+ * Root hub was already suspended. Disable irq emission and
+ * mark HW unaccessible, bail out if RH has been resumed. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ *
+ * This is still racy as hcd->state is manipulated outside of
+ * any locks =P But that will be a different fix.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ if (hcd->state != HC_STATE_SUSPENDED && hcd->state != HC_STATE_HALT) {
+ rc = -EINVAL;
+ goto fail;
+ }
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ if (pdata && pdata->phy_exit)
+ pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
+fail:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return rc;
+}
+
+static int exynos_ohci_resume(struct device *dev)
+{
+ struct exynos_ohci_hcd *exynos_ohci = dev_get_drvdata(dev);
+ struct usb_hcd *hcd = exynos_ohci->hcd;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
+
+ if (pdata && pdata->phy_init)
+ pdata->phy_init(pdev, S5P_USB_PHY_HOST);
+
+ /* Mark hardware accessible again as we are out of D3 state by now */
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ ohci_finish_controller_resume(hcd);
+
+ return 0;
+}
+#else
+#define exynos_ohci_suspend NULL
+#define exynos_ohci_resume NULL
+#endif
+
+static const struct dev_pm_ops exynos_ohci_pm_ops = {
+ .suspend = exynos_ohci_suspend,
+ .resume = exynos_ohci_resume,
+};
+
+static struct platform_driver exynos_ohci_driver = {
+ .probe = exynos_ohci_probe,
+ .remove = __devexit_p(exynos_ohci_remove),
+ .shutdown = exynos_ohci_shutdown,
+ .driver = {
+ .name = "exynos-ohci",
+ .owner = THIS_MODULE,
+ .pm = &exynos_ohci_pm_ops,
+ }
+};
+
+MODULE_ALIAS("platform:exynos-ohci");
+MODULE_AUTHOR("Jingoo Han <jg1.han@samsung.com>");
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index b263919..34b9edd 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -115,13 +115,13 @@ static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
/* Some boards misreport power switching/overcurrent */
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param (distrust_firmware, bool, 0);
MODULE_PARM_DESC (distrust_firmware,
"true to distrust firmware power/overcurrent setup");
/* Some boards leave IR set wrongly, since they fail BIOS/SMM handshakes */
-static int no_handshake = 0;
+static bool no_handshake = 0;
module_param (no_handshake, bool, 0);
MODULE_PARM_DESC (no_handshake, "true (not default) disables BIOS handshake");
@@ -209,7 +209,7 @@ static int ohci_urb_enqueue (
retval = -ENODEV;
goto fail;
}
- if (!HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
retval = -ENODEV;
goto fail;
}
@@ -274,7 +274,7 @@ static int ohci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (rc) {
; /* Do nothing */
- } else if (HC_IS_RUNNING(hcd->state)) {
+ } else if (ohci->rh_state == OHCI_RH_RUNNING) {
urb_priv_t *urb_priv;
/* Unless an IRQ completed the unlink while it was being
@@ -321,7 +321,7 @@ ohci_endpoint_disable (struct usb_hcd *hcd, struct usb_host_endpoint *ep)
rescan:
spin_lock_irqsave (&ohci->lock, flags);
- if (!HC_IS_RUNNING (hcd->state)) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
sanitize:
ed->state = ED_IDLE;
if (quirk_zfmicro(ohci) && ed->type == PIPE_INTERRUPT)
@@ -377,6 +377,7 @@ static void ohci_usb_reset (struct ohci_hcd *ohci)
ohci->hc_control = ohci_readl (ohci, &ohci->regs->control);
ohci->hc_control &= OHCI_CTRL_RWC;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
+ ohci->rh_state = OHCI_RH_HALTED;
}
/* ohci_shutdown forcibly disables IRQs and DMA, helping kexec and
@@ -500,7 +501,7 @@ static int ohci_init (struct ohci_hcd *ohci)
if (distrust_firmware)
ohci->flags |= OHCI_QUIRK_HUB_POWER;
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci->regs = hcd->regs;
/* REVISIT this BIOS handshake is now moved into PCI "quirks", and
@@ -575,7 +576,7 @@ static int ohci_run (struct ohci_hcd *ohci)
int first = ohci->fminterval == 0;
struct usb_hcd *hcd = ohci_to_hcd(ohci);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* boot firmware should have set this up (5.1.1.3.1) */
if (first) {
@@ -688,7 +689,7 @@ retry:
ohci->hc_control &= OHCI_CTRL_RWC;
ohci->hc_control |= OHCI_CONTROL_INIT | OHCI_USB_OPER;
ohci_writel (ohci, ohci->hc_control, &ohci->regs->control);
- hcd->state = HC_STATE_RUNNING;
+ ohci->rh_state = OHCI_RH_RUNNING;
/* wake on ConnectStatusChange, matching external hubs */
ohci_writel (ohci, RH_HS_DRWE, &ohci->regs->roothub.status);
@@ -725,7 +726,6 @@ retry:
// POTPGT delay is bits 24-31, in 2 ms units.
mdelay ((val >> 23) & 0x1fe);
- hcd->state = HC_STATE_RUNNING;
if (quirk_zfmicro(ohci)) {
/* Create timer to watch for bad queue state on ZF Micro */
@@ -761,7 +761,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
* of dead, unclocked, or unplugged (CardBus...) devices
*/
if (ints == ~(u32)0) {
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
ohci_dbg (ohci, "device removed!\n");
usb_hc_died(hcd);
return IRQ_HANDLED;
@@ -771,7 +771,7 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
ints &= ohci_readl(ohci, &regs->intrenable);
/* interrupt for some other device? */
- if (ints == 0 || unlikely(hcd->state == HC_STATE_HALT))
+ if (ints == 0 || unlikely(ohci->rh_state == OHCI_RH_HALTED))
return IRQ_NOTMINE;
if (ints & OHCI_INTR_UE) {
@@ -786,8 +786,8 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
schedule_work (&ohci->nec_work);
} else {
- disable (ohci);
ohci_err (ohci, "OHCI Unrecoverable Error, disabled\n");
+ ohci->rh_state = OHCI_RH_HALTED;
usb_hc_died(hcd);
}
@@ -871,11 +871,11 @@ static irqreturn_t ohci_irq (struct usb_hcd *hcd)
if ((ints & OHCI_INTR_SF) != 0
&& !ohci->ed_rm_list
&& !ohci->ed_to_check
- && HC_IS_RUNNING(hcd->state))
+ && ohci->rh_state == OHCI_RH_RUNNING)
ohci_writel (ohci, OHCI_INTR_SF, &regs->intrdisable);
spin_unlock (&ohci->lock);
- if (HC_IS_RUNNING(hcd->state)) {
+ if (ohci->rh_state == OHCI_RH_RUNNING) {
ohci_writel (ohci, ints, &regs->intrstatus);
ohci_writel (ohci, OHCI_INTR_MIE, &regs->intrenable);
// flush those writes
@@ -929,7 +929,7 @@ static int ohci_restart (struct ohci_hcd *ohci)
struct urb_priv *priv;
spin_lock_irq(&ohci->lock);
- disable (ohci);
+ ohci->rh_state = OHCI_RH_HALTED;
/* Recycle any "live" eds/tds (and urbs). */
if (!list_empty (&ohci->pending))
@@ -1005,6 +1005,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_s3c2410_driver
#endif
+#ifdef CONFIG_USB_OHCI_EXYNOS
+#include "ohci-exynos.c"
+#define PLATFORM_DRIVER exynos_ohci_driver
+#endif
+
#ifdef CONFIG_USB_OHCI_HCD_OMAP1
#include "ohci-omap.c"
#define OMAP1_PLATFORM_DRIVER ohci_hcd_omap_driver
@@ -1111,7 +1116,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ohci_hcd_ath79_driver
#endif
-#ifdef CONFIG_NLM_XLR
+#ifdef CONFIG_CPU_XLR
#include "ohci-xls.c"
#define PLATFORM_DRIVER ohci_xls_driver
#endif
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 2f00040..836772d 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -111,6 +111,7 @@ __acquires(ohci->lock)
if (!autostop) {
ohci->next_statechange = jiffies + msecs_to_jiffies (5);
ohci->autostop = 0;
+ ohci->rh_state = OHCI_RH_SUSPENDED;
}
done:
@@ -140,7 +141,7 @@ __acquires(ohci->lock)
if (ohci->hc_control & (OHCI_CTRL_IR | OHCI_SCHED_ENABLES)) {
/* this can happen after resuming a swsusp snapshot */
- if (hcd->state == HC_STATE_RESUMING) {
+ if (ohci->rh_state != OHCI_RH_RUNNING) {
ohci_dbg (ohci, "BIOS/SMM active, control %03x\n",
ohci->hc_control);
status = -EBUSY;
@@ -274,6 +275,7 @@ skip_resume:
(void) ohci_readl (ohci, &ohci->regs->control);
}
+ ohci->rh_state = OHCI_RH_RUNNING;
return 0;
}
@@ -336,11 +338,8 @@ static void ohci_finish_controller_resume(struct usb_hcd *hcd)
/* If needed, reinitialize and suspend the root hub */
if (need_reinit) {
spin_lock_irq(&ohci->lock);
- hcd->state = HC_STATE_RESUMING;
ohci_rh_resume(ohci);
- hcd->state = HC_STATE_QUIESCING;
ohci_rh_suspend(ohci, 0);
- hcd->state = HC_STATE_SUSPENDED;
spin_unlock_irq(&ohci->lock);
}
diff --git a/drivers/usb/host/ohci-omap.c b/drivers/usb/host/ohci-omap.c
index e4b8782..db39686 100644
--- a/drivers/usb/host/ohci-omap.c
+++ b/drivers/usb/host/ohci-omap.c
@@ -516,7 +516,6 @@ static int ohci_omap_suspend(struct platform_device *dev, pm_message_t message)
ohci->next_statechange = jiffies;
omap_ohci_clock_power(0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index 516ebc4..1b8133b 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -31,6 +31,7 @@
#include <linux/platform_device.h>
#include <plat/usb.h>
+#include <linux/pm_runtime.h>
/*-------------------------------------------------------------------------*/
@@ -134,7 +135,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
int irq;
if (usb_disabled())
- goto err_end;
+ return -ENODEV;
if (!dev->parent) {
dev_err(dev, "Missing parent device\n");
@@ -172,11 +173,8 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
- ret = omap_usbhs_enable(dev);
- if (ret) {
- dev_dbg(dev, "failed to start ohci\n");
- goto err_end;
- }
+ pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
ohci_hcd_init(hcd_to_ohci(hcd));
@@ -189,9 +187,7 @@ static int __devinit ohci_hcd_omap3_probe(struct platform_device *pdev)
return 0;
err_add_hcd:
- omap_usbhs_disable(dev);
-
-err_end:
+ pm_runtime_put_sync(dev);
usb_put_hcd(hcd);
err_io:
@@ -220,9 +216,9 @@ static int __devexit ohci_hcd_omap3_remove(struct platform_device *pdev)
iounmap(hcd->regs);
usb_remove_hcd(hcd);
- omap_usbhs_disable(dev);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
usb_put_hcd(hcd);
-
return 0;
}
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index bc01b06..1843bb6 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -308,12 +308,9 @@ static int ohci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
* mark HW unaccessible, bail out if RH has been resumed. Use
* the spinlock to properly synchronize with possible pending
* RH suspend or resume activity.
- *
- * This is still racy as hcd->state is manipulated outside of
- * any locks =P But that will be a different fix.
*/
spin_lock_irqsave (&ohci->lock, flags);
- if (hcd->state != HC_STATE_SUSPENDED) {
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
rc = -EINVAL;
goto bail;
}
@@ -400,6 +397,10 @@ static const struct pci_device_id pci_ids [] = { {
/* handle any USB OHCI controller */
PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
.driver_data = (unsigned long) &ohci_pci_hc_driver,
+ }, {
+ /* The device in the ConneXT I/O hub has no class reg */
+ PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI),
+ .driver_data = (unsigned long) &ohci_pci_hc_driver,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index 29dfefe..6313e44 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -502,8 +502,6 @@ static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
ohci->ohci.next_statechange = jiffies;
pxa27x_stop_hc(ohci, dev);
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index 15dc51d..c5a1ea9 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -912,7 +912,7 @@ rescan_all:
/* only take off EDs that the HC isn't using, accounting for
* frame counter wraps and EDs with partially retired TDs
*/
- if (likely (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))) {
+ if (likely(ohci->rh_state == OHCI_RH_RUNNING)) {
if (tick_before (tick, ed->tick)) {
skip_ed:
last = &ed->ed_next;
@@ -1012,7 +1012,7 @@ rescan_this:
/* but if there's work queued, reschedule */
if (!list_empty (&ed->td_list)) {
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state))
+ if (ohci->rh_state == OHCI_RH_RUNNING)
ed_schedule (ohci, ed);
}
@@ -1021,9 +1021,7 @@ rescan_this:
}
/* maybe reenable control and bulk lists */
- if (HC_IS_RUNNING(ohci_to_hcd(ohci)->state)
- && ohci_to_hcd(ohci)->state != HC_STATE_QUIESCING
- && !ohci->ed_rm_list) {
+ if (ohci->rh_state == OHCI_RH_RUNNING && !ohci->ed_rm_list) {
u32 command = 0, control = 0;
if (ohci->ed_controltail) {
diff --git a/drivers/usb/host/ohci-s3c2410.c b/drivers/usb/host/ohci-s3c2410.c
index a1877c4..56dcf06 100644
--- a/drivers/usb/host/ohci-s3c2410.c
+++ b/drivers/usb/host/ohci-s3c2410.c
@@ -486,15 +486,66 @@ static int __devexit ohci_hcd_s3c2410_drv_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM
+static int ohci_hcd_s3c2410_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ struct platform_device *pdev = to_platform_device(dev);
+ unsigned long flags;
+ int rc = 0;
+
+ /*
+ * Root hub was already suspended. Disable irq emission and
+ * mark HW unaccessible, bail out if RH has been resumed. Use
+ * the spinlock to properly synchronize with possible pending
+ * RH suspend or resume activity.
+ */
+ spin_lock_irqsave(&ohci->lock, flags);
+ if (ohci->rh_state != OHCI_RH_SUSPENDED) {
+ rc = -EINVAL;
+ goto bail;
+ }
+
+ clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+
+ s3c2410_stop_hc(pdev);
+bail:
+ spin_unlock_irqrestore(&ohci->lock, flags);
+
+ return rc;
+}
+
+static int ohci_hcd_s3c2410_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ s3c2410_start_hc(pdev, hcd);
+
+ set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
+ ohci_finish_controller_resume(hcd);
+
+ return 0;
+}
+#else
+#define ohci_hcd_s3c2410_drv_suspend NULL
+#define ohci_hcd_s3c2410_drv_resume NULL
+#endif
+
+static const struct dev_pm_ops ohci_hcd_s3c2410_pm_ops = {
+ .suspend = ohci_hcd_s3c2410_drv_suspend,
+ .resume = ohci_hcd_s3c2410_drv_resume,
+};
+
static struct platform_driver ohci_hcd_s3c2410_driver = {
.probe = ohci_hcd_s3c2410_drv_probe,
.remove = __devexit_p(ohci_hcd_s3c2410_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
- /*.suspend = ohci_hcd_s3c2410_drv_suspend, */
- /*.resume = ohci_hcd_s3c2410_drv_resume, */
.driver = {
.owner = THIS_MODULE,
.name = "s3c2410-ohci",
+ .pm = &ohci_hcd_s3c2410_pm_ops,
},
};
diff --git a/drivers/usb/host/ohci-sh.c b/drivers/usb/host/ohci-sh.c
index afc4eb6..84686d9 100644
--- a/drivers/usb/host/ohci-sh.c
+++ b/drivers/usb/host/ohci-sh.c
@@ -29,7 +29,6 @@ static int ohci_sh_start(struct usb_hcd *hcd)
ohci_hcd_init(ohci);
ohci_init(ohci);
ohci_run(ohci);
- hcd->state = HC_STATE_RUNNING;
return 0;
}
diff --git a/drivers/usb/host/ohci-sm501.c b/drivers/usb/host/ohci-sm501.c
index 968cea2..5596ac2 100644
--- a/drivers/usb/host/ohci-sm501.c
+++ b/drivers/usb/host/ohci-sm501.c
@@ -224,7 +224,6 @@ static int ohci_sm501_suspend(struct platform_device *pdev, pm_message_t msg)
ohci->next_statechange = jiffies;
sm501_unit_power(dev->parent, SM501_GATE_USB_HOST, 0);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 6987465..95c1648 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -203,7 +203,6 @@ static int spear_ohci_hcd_drv_suspend(struct platform_device *dev,
ohci->next_statechange = jiffies;
spear_stop_ohci(ohci_p);
- ohci_to_hcd(ohci)->state = HC_STATE_SUSPENDED;
return 0;
}
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index 06331d9..120bfe6 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -318,9 +318,6 @@ static int ohci_hcd_tmio_drv_suspend(struct platform_device *dev, pm_message_t s
if (ret)
return ret;
}
-
- hcd->state = HC_STATE_SUSPENDED;
-
return 0;
}
diff --git a/drivers/usb/host/ohci-xls.c b/drivers/usb/host/ohci-xls.c
index a3a9c6f..a224786 100644
--- a/drivers/usb/host/ohci-xls.c
+++ b/drivers/usb/host/ohci-xls.c
@@ -40,7 +40,7 @@ static int ohci_xls_probe_internal(const struct hc_driver *driver,
goto err1;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ohci.h b/drivers/usb/host/ohci.h
index 0795b93..8ff6f7e 100644
--- a/drivers/usb/host/ohci.h
+++ b/drivers/usb/host/ohci.h
@@ -344,6 +344,12 @@ typedef struct urb_priv {
* a subset of what the full implementation needs. (Linus)
*/
+enum ohci_rh_state {
+ OHCI_RH_HALTED,
+ OHCI_RH_SUSPENDED,
+ OHCI_RH_RUNNING
+};
+
struct ohci_hcd {
spinlock_t lock;
@@ -384,6 +390,7 @@ struct ohci_hcd {
/*
* driver state
*/
+ enum ohci_rh_state rh_state;
int num_ports;
int load [NUM_INTS];
u32 hc_control; /* copy of hc control reg */
@@ -679,11 +686,6 @@ static inline u16 ohci_hwPSW(const struct ohci_hcd *ohci,
/*-------------------------------------------------------------------------*/
-static inline void disable (struct ohci_hcd *ohci)
-{
- ohci_to_hcd(ohci)->state = HC_STATE_HALT;
-}
-
#define FI 0x2edf /* 12000 bits per frame (-1) */
#define FSMP(fi) (0x7fff & ((6 * ((fi) - 210)) / 7))
#define FIT (1 << 31)
@@ -707,7 +709,7 @@ static inline void periodic_reinit (struct ohci_hcd *ohci)
#define read_roothub(hc, register, mask) ({ \
u32 temp = ohci_readl (hc, &hc->regs->roothub.register); \
if (temp == -1) \
- disable (hc); \
+ hc->rh_state = OHCI_RH_HALTED; \
else if (hc->flags & OHCI_QUIRK_AMD756) \
while (temp & mask) \
temp = ohci_readl (hc, &hc->regs->roothub.register); \
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index dcd8898..015c7c6 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -233,7 +233,7 @@ module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* For flakey hardware, ignore overcurrent indicators */
-static int ignore_oc;
+static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
@@ -3951,24 +3951,7 @@ static struct platform_driver oxu_driver = {
}
};
-static int __init oxu_module_init(void)
-{
- int retval = 0;
-
- retval = platform_driver_register(&oxu_driver);
- if (retval < 0)
- return retval;
-
- return retval;
-}
-
-static void __exit oxu_module_cleanup(void)
-{
- platform_driver_unregister(&oxu_driver);
-}
-
-module_init(oxu_module_init);
-module_exit(oxu_module_cleanup);
+module_platform_driver(oxu_driver);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index caf8742..7732d69 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -867,6 +867,22 @@ hc_init:
static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
{
+ /* Skip Netlogic mips SoC's internal PCI USB controller.
+ * This device does not need/support EHCI/OHCI handoff
+ */
+ if (pdev->vendor == 0x184e) /* vendor Netlogic */
+ return;
+ if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI &&
+ pdev->class != PCI_CLASS_SERIAL_USB_OHCI &&
+ pdev->class != PCI_CLASS_SERIAL_USB_EHCI &&
+ pdev->class != PCI_CLASS_SERIAL_USB_XHCI)
+ return;
+
+ if (pci_enable_device(pdev) < 0) {
+ dev_warn(&pdev->dev, "Can't enable PCI device, "
+ "BIOS handoff failed.\n");
+ return;
+ }
if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI)
quirk_usb_handoff_uhci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI)
@@ -875,5 +891,6 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
quirk_usb_disable_ehci(pdev);
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev);
+ pci_disable_device(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
diff --git a/drivers/usb/host/u132-hcd.c b/drivers/usb/host/u132-hcd.c
index 533d12c..16dd6a6 100644
--- a/drivers/usb/host/u132-hcd.c
+++ b/drivers/usb/host/u132-hcd.c
@@ -74,7 +74,7 @@ MODULE_LICENSE("GPL");
#define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
INT_MODULE_PARM(testing, 0);
/* Some boards misreport power switching/overcurrent*/
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
"t setup");
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index c8ae199..6b5eb10 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -59,7 +59,7 @@
#define DRIVER_DESC "USB Universal Host Controller Interface driver"
/* for flakey hardware, ignore overcurrent indicators */
-static int ignore_oc;
+static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore hardware overcurrent indications");
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index f6ca80e..d2c6f5a 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -943,7 +943,7 @@ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb,
if (usb_pipein(urb->pipe))
status |= TD_CTRL_SPD;
- i = urb->num_sgs;
+ i = urb->num_mapped_sgs;
if (len > 0 && i > 0) {
sg = urb->sg;
data = sg_dma_address(sg);
diff --git a/drivers/usb/host/whci/qset.c b/drivers/usb/host/whci/qset.c
index a403b53..76083ae 100644
--- a/drivers/usb/host/whci/qset.c
+++ b/drivers/usb/host/whci/qset.c
@@ -443,7 +443,7 @@ static int qset_add_urb_sg(struct whc *whc, struct whc_qset *qset, struct urb *u
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
dma_addr_t dma_addr;
size_t dma_remaining;
dma_addr_t sp, ep;
@@ -561,7 +561,7 @@ static int qset_add_urb_sg_linearize(struct whc *whc, struct whc_qset *qset,
remaining = urb->transfer_buffer_length;
- for_each_sg(urb->sg, sg, urb->num_sgs, i) {
+ for_each_sg(urb->sg, sg, urb->num_mapped_sgs, i) {
size_t len;
size_t sg_remaining;
void *orig;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 430e88f..557b6f3 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -57,17 +57,15 @@ static void xhci_common_hub_descriptor(struct xhci_hcd *xhci,
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
- /* Ugh, these should be #defines, FIXME */
- /* Using table 11-13 in USB 2.0 spec. */
temp = 0;
- /* Bits 1:0 - support port power switching, or power always on */
+ /* Bits 1:0 - support per-port power switching, or power always on */
if (HCC_PPC(xhci->hcc_params))
- temp |= 0x0001;
+ temp |= HUB_CHAR_INDV_PORT_LPSM;
else
- temp |= 0x0002;
+ temp |= HUB_CHAR_NO_LPSM;
/* Bit 2 - root hubs are not part of a compound device */
/* Bits 4:3 - individual port over current protection */
- temp |= 0x0008;
+ temp |= HUB_CHAR_INDV_PORT_OCPM;
/* Bits 6:5 - no TTs in root ports */
/* Bit 7 - no port indicators */
desc->wHubCharacteristics = cpu_to_le16(temp);
@@ -86,16 +84,16 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
ports = xhci->num_usb2_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
- desc->bDescriptorType = 0x29;
+ desc->bDescriptorType = USB_DT_HUB;
temp = 1 + (ports / 8);
- desc->bDescLength = 7 + 2 * temp;
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * temp;
/* The Device Removable bits are reported on a byte granularity.
* If the port doesn't exist within that byte, the bit is set to 0.
*/
memset(port_removable, 0, sizeof(port_removable));
for (i = 0; i < ports; i++) {
- portsc = xhci_readl(xhci, xhci->usb3_ports[i]);
+ portsc = xhci_readl(xhci, xhci->usb2_ports[i]);
/* If a device is removable, PORTSC reports a 0, same as in the
* hub descriptor DeviceRemovable bits.
*/
@@ -137,8 +135,8 @@ static void xhci_usb3_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci,
ports = xhci->num_usb3_ports;
xhci_common_hub_descriptor(xhci, desc, ports);
- desc->bDescriptorType = 0x2a;
- desc->bDescLength = 12;
+ desc->bDescriptorType = USB_DT_SS_HUB;
+ desc->bDescLength = USB_DT_SS_HUB_SIZE;
/* header decode latency should be zero for roothubs,
* see section 4.23.5.2.
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 0e4b25f..383fc85 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -42,15 +42,12 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
seg = kzalloc(sizeof *seg, flags);
if (!seg)
return NULL;
- xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg);
seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma);
if (!seg->trbs) {
kfree(seg);
return NULL;
}
- xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)dma);
memset(seg->trbs, 0, SEGMENT_SIZE);
seg->dma = dma;
@@ -62,12 +59,9 @@ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flag
static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg)
{
if (seg->trbs) {
- xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n",
- seg->trbs, (unsigned long long)seg->dma);
dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma);
seg->trbs = NULL;
}
- xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg);
kfree(seg);
}
@@ -101,9 +95,6 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = cpu_to_le32(val);
}
- xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
- (unsigned long long)prev->dma,
- (unsigned long long)next->dma);
}
/* XXX: Do we need the hcd structure in all these functions? */
@@ -117,7 +108,6 @@ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
if (ring->first_seg) {
first_seg = ring->first_seg;
seg = first_seg->next;
- xhci_dbg(xhci, "Freeing ring at %p\n", ring);
while (seg != first_seg) {
struct xhci_segment *next = seg->next;
xhci_segment_free(xhci, seg);
@@ -160,7 +150,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
struct xhci_segment *prev;
ring = kzalloc(sizeof *(ring), flags);
- xhci_dbg(xhci, "Allocating ring at %p\n", ring);
if (!ring)
return NULL;
@@ -191,9 +180,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
/* See section 4.9.2.1 and 6.4.4.1 */
prev->trbs[TRBS_PER_SEGMENT-1].link.control |=
cpu_to_le32(LINK_TOGGLE);
- xhci_dbg(xhci, "Wrote link toggle flag to"
- " segment %p (virtual), 0x%llx (DMA)\n",
- prev, (unsigned long long)prev->dma);
}
xhci_initialize_ring_info(ring);
return ring;
@@ -1140,26 +1126,42 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
}
/*
- * Convert bInterval expressed in frames (in 1-255 range) to exponent of
+ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
* microframes, rounded down to nearest power of 2.
*/
-static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
- struct usb_host_endpoint *ep)
+static unsigned int xhci_microframes_to_exponent(struct usb_device *udev,
+ struct usb_host_endpoint *ep, unsigned int desc_interval,
+ unsigned int min_exponent, unsigned int max_exponent)
{
unsigned int interval;
- interval = fls(8 * ep->desc.bInterval) - 1;
- interval = clamp_val(interval, 3, 10);
- if ((1 << interval) != 8 * ep->desc.bInterval)
+ interval = fls(desc_interval) - 1;
+ interval = clamp_val(interval, min_exponent, max_exponent);
+ if ((1 << interval) != desc_interval)
dev_warn(&udev->dev,
"ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n",
ep->desc.bEndpointAddress,
1 << interval,
- 8 * ep->desc.bInterval);
+ desc_interval);
return interval;
}
+static unsigned int xhci_parse_microframe_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ return xhci_microframes_to_exponent(udev, ep,
+ ep->desc.bInterval, 0, 15);
+}
+
+
+static unsigned int xhci_parse_frame_interval(struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ return xhci_microframes_to_exponent(udev, ep,
+ ep->desc.bInterval * 8, 3, 10);
+}
+
/* Return the polling or NAK interval.
*
* The polling interval is expressed in "microframes". If xHCI's Interval field
@@ -1178,7 +1180,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev,
/* Max NAK rate */
if (usb_endpoint_xfer_control(&ep->desc) ||
usb_endpoint_xfer_bulk(&ep->desc)) {
- interval = ep->desc.bInterval;
+ interval = xhci_parse_microframe_interval(udev, ep);
break;
}
/* Fall through - SS and HS isoc/int have same decoding */
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9f1d4b1..b62037b 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -155,10 +155,6 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->deq_seg, next)) {
if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
}
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
@@ -231,10 +227,6 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt())
- xhci_dbg(xhci, "Toggle cycle state for ring %p = %i\n",
- ring,
- (unsigned int) ring->cycle_state);
}
}
ring->enq_seg = ring->enq_seg->next;
@@ -560,12 +552,9 @@ static void td_to_noop(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
cpu_to_le32(TRB_CYCLE);
cur_trb->generic.field[3] |= cpu_to_le32(
TRB_TYPE(TRB_TR_NOOP));
- xhci_dbg(xhci, "Cancel TRB %p (0x%llx dma) "
- "in seg %p (0x%llx dma)\n",
- cur_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_seg, cur_trb),
- cur_seg,
- (unsigned long long)cur_seg->dma);
+ xhci_dbg(xhci, "TRB to noop at offset 0x%llx\n",
+ (unsigned long long)
+ xhci_trb_virt_to_dma(cur_seg, cur_trb));
}
if (cur_trb == cur_td->last_trb)
break;
@@ -705,9 +694,9 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
*/
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
- xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
- cur_td->first_trb,
- (unsigned long long)xhci_trb_virt_to_dma(cur_td->start_seg, cur_td->first_trb));
+ xhci_dbg(xhci, "Removing canceled TD starting at 0x%llx (dma).\n",
+ (unsigned long long)xhci_trb_virt_to_dma(
+ cur_td->start_seg, cur_td->first_trb));
ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
if (!ep_ring) {
/* This shouldn't happen unless a driver is mucking
@@ -1215,6 +1204,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
*
* Returns a zero-based port number, which is suitable for indexing into each of
* the split roothubs' port arrays and bus state arrays.
+ * Add one to it in order to call xhci_find_slot_id_by_port.
*/
static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
struct xhci_hcd *xhci, u32 port_id)
@@ -1335,7 +1325,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
xhci_set_link_state(xhci, port_array, faked_port_index,
XDEV_U0);
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
- faked_port_index);
+ faked_port_index + 1);
if (!slot_id) {
xhci_dbg(xhci, "slot_id is zero\n");
goto cleanup;
@@ -1627,7 +1617,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
- xhci_debug_trb(xhci, xhci->event_ring->dequeue);
switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
@@ -1643,7 +1632,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
break;
case COMP_SHORT_TX:
- xhci_warn(xhci, "WARN: short transfer on control ep\n");
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
*status = -EREMOTEIO;
else
@@ -1946,6 +1934,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -1959,6 +1957,16 @@ static int handle_tx_event(struct xhci_hcd *xhci,
EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event for disabled endpoint "
"or incorrect stream ring\n");
+ xhci_err(xhci, "@%016llx %08x %08x %08x %08x\n",
+ (unsigned long long) xhci_trb_virt_to_dma(
+ xhci->event_ring->deq_seg,
+ xhci->event_ring->dequeue),
+ lower_32_bits(le64_to_cpu(event->buffer)),
+ upper_32_bits(le64_to_cpu(event->buffer)),
+ le32_to_cpu(event->transfer_len),
+ le32_to_cpu(event->flags));
+ xhci_dbg(xhci, "Event ring:\n");
+ xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
return -ENODEV;
}
@@ -1985,7 +1993,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
xhci_dbg(xhci, "Stopped on No-op or Link TRB\n");
break;
case COMP_STALL:
- xhci_warn(xhci, "WARN: Stalled endpoint\n");
+ xhci_dbg(xhci, "Stalled endpoint\n");
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
@@ -1995,11 +2003,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_SPLIT_ERR:
case COMP_TX_ERR:
- xhci_warn(xhci, "WARN: transfer error on endpoint\n");
+ xhci_dbg(xhci, "Transfer error on endpoint\n");
status = -EPROTO;
break;
case COMP_BABBLE:
- xhci_warn(xhci, "WARN: babble error on endpoint\n");
+ xhci_dbg(xhci, "Babble error on endpoint\n");
status = -EOVERFLOW;
break;
case COMP_DB_ERR:
@@ -2390,17 +2398,7 @@ hw_died:
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd)
{
- irqreturn_t ret;
- struct xhci_hcd *xhci;
-
- xhci = hcd_to_xhci(hcd);
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
- if (xhci->shared_hcd)
- set_bit(HCD_FLAG_SAW_IRQ, &xhci->shared_hcd->flags);
-
- ret = xhci_irq(hcd);
-
- return ret;
+ return xhci_irq(hcd);
}
/**** Endpoint Ring Operations ****/
@@ -2488,11 +2486,6 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
/* Toggle the cycle bit after the last ring segment. */
if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
ring->cycle_state = (ring->cycle_state ? 0 : 1);
- if (!in_interrupt()) {
- xhci_dbg(xhci, "queue_trb: Toggle cycle "
- "state for ring %p = %i\n",
- ring, (unsigned int)ring->cycle_state);
- }
}
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
@@ -2561,13 +2554,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
struct scatterlist *sg;
sg = NULL;
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
temp = urb->transfer_buffer_length;
- xhci_dbg(xhci, "count sg list trbs: \n");
num_trbs = 0;
for_each_sg(urb->sg, sg, num_sgs, i) {
- unsigned int previous_total_trbs = num_trbs;
unsigned int len = sg_dma_len(sg);
/* Scatter gather list entries may cross 64KB boundaries */
@@ -2582,22 +2573,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
- xhci_dbg(xhci, " sg #%d: dma = %#llx, len = %#x (%d), num_trbs = %d\n",
- i, (unsigned long long)sg_dma_address(sg),
- len, len, num_trbs - previous_total_trbs);
-
len = min_t(int, len, temp);
temp -= len;
if (temp == 0)
break;
}
- xhci_dbg(xhci, "\n");
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %d, sglist used, "
- "num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- num_trbs);
return num_trbs;
}
@@ -2745,7 +2725,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
num_trbs = count_sg_trbs_needed(xhci, urb);
- num_sgs = urb->num_sgs;
+ num_sgs = urb->num_mapped_sgs;
total_packet_count = roundup(urb->transfer_buffer_length,
usb_endpoint_maxp(&urb->ep->desc));
@@ -2783,8 +2763,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
- xhci_dbg(xhci, "First length to xfer from 1st sglist entry = %u\n",
- trb_buff_len);
first_trb = true;
/* Queue the first TRB, even if it's zero-length */
@@ -2816,11 +2794,6 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (usb_urb_dir_in(urb))
field |= TRB_ISP;
- xhci_dbg(xhci, " sg entry: dma = %#x, len = %#x (%d), "
- "64KB boundary at %#x, end dma = %#x\n",
- (unsigned int) addr, trb_buff_len, trb_buff_len,
- (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
- (unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
@@ -2926,15 +2899,6 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
/* FIXME: this doesn't deal with URB_ZERO_PACKET - need one more */
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %#x (%d), "
- "addr = %#llx, num_trbs = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_trbs);
-
ret = prepare_transfer(xhci, xhci->devs[slot_id],
ep_index, urb->stream_id,
num_trbs, urb, 0, false, mem_flags);
@@ -3055,9 +3019,6 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (!urb->setup_packet)
return -EINVAL;
- if (!in_interrupt())
- xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
- slot_id, ep_index);
/* 1 TRB for setup, 1 for status */
num_trbs = 2;
/*
@@ -3249,15 +3210,6 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
return -EINVAL;
}
- if (!in_interrupt())
- xhci_dbg(xhci, "ep %#x - urb len = %#x (%d),"
- " addr = %#llx, num_tds = %d\n",
- urb->ep->desc.bEndpointAddress,
- urb->transfer_buffer_length,
- urb->transfer_buffer_length,
- (unsigned long long)urb->transfer_dma,
- num_tds);
-
start_addr = (u64) urb->transfer_dma;
start_trb = &ep_ring->enqueue->generic;
start_cycle = ep_ring->cycle_state;
@@ -3372,7 +3324,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Check TD length */
if (running_total != td_len) {
xhci_err(xhci, "ISOC TD length unmatch\n");
- return -EINVAL;
+ ret = -EINVAL;
+ goto cleanup;
}
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index a1afb7c..c939f5f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -200,14 +200,14 @@ static int xhci_setup_msi(struct xhci_hcd *xhci)
ret = pci_enable_msi(pdev);
if (ret) {
- xhci_err(xhci, "failed to allocate MSI entry\n");
+ xhci_dbg(xhci, "failed to allocate MSI entry\n");
return ret;
}
ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
0, "xhci_hcd", xhci_to_hcd(xhci));
if (ret) {
- xhci_err(xhci, "disable MSI interrupt\n");
+ xhci_dbg(xhci, "disable MSI interrupt\n");
pci_disable_msi(pdev);
}
@@ -270,7 +270,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
if (ret) {
- xhci_err(xhci, "Failed to enable MSI-X\n");
+ xhci_dbg(xhci, "Failed to enable MSI-X\n");
goto free_entries;
}
@@ -286,7 +286,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci)
return ret;
disable_msix:
- xhci_err(xhci, "disable MSI-X interrupt\n");
+ xhci_dbg(xhci, "disable MSI-X interrupt\n");
xhci_free_irq(xhci);
pci_disable_msix(pdev);
free_entries:
@@ -352,6 +352,11 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd)
/* hcd->irq is -1, we have MSI */
return 0;
+ if (!pdev->irq) {
+ xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n");
+ return -EINVAL;
+ }
+
/* fall back to legacy interrupt*/
ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
hcd->irq_descr, hcd);
@@ -1333,9 +1338,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Cancel URB %p\n", urb);
- xhci_dbg(xhci, "Event ring:\n");
- xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
@@ -1344,12 +1346,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
goto done;
}
- xhci_dbg(xhci, "Endpoint ring:\n");
- xhci_debug_ring(xhci, ep_ring);
-
urb_priv = urb->hcpriv;
-
- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
+ i = urb_priv->td_cnt;
+ if (i < urb_priv->length)
+ xhci_dbg(xhci, "Cancel URB %p, dev %s, ep 0x%x, "
+ "starting at offset 0x%llx\n",
+ urb, urb->dev->devpath,
+ urb->ep->desc.bEndpointAddress,
+ (unsigned long long) xhci_trb_virt_to_dma(
+ urb_priv->td[i]->start_seg,
+ urb_priv->td[i]->first_trb));
+
+ for (; i < urb_priv->length; i++) {
td = urb_priv->td[i];
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
}
@@ -1620,6 +1628,7 @@ static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
+ case COMP_2ND_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
@@ -2796,8 +2805,7 @@ static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
if (ret < 0)
return ret;
- max_streams = USB_SS_MAX_STREAMS(
- eps[i]->ss_ep_comp.bmAttributes);
+ max_streams = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (max_streams < (*num_streams - 1)) {
xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
eps[i]->desc.bEndpointAddress,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 3c8fbd2..fb99c83 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1033,7 +1033,6 @@ struct xhci_transfer_event {
/* Invalid Stream ID Error */
#define COMP_STRID_ERR 34
/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
-/* FIXME - check for this */
#define COMP_2ND_BW_ERR 35
/* Split Transaction Error */
#define COMP_SPLIT_ERR 36
@@ -1356,7 +1355,7 @@ static inline unsigned int hcd_index(struct usb_hcd *hcd)
return 1;
}
-/* There is one ehci_hci structure per controller */
+/* There is one xhci_hcd structure per controller */
struct xhci_hcd {
struct usb_hcd *main_hcd;
struct usb_hcd *shared_hcd;
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 27e209a..9c0f8ca 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -809,19 +809,7 @@ static void mts_usb_disconnect (struct usb_interface *intf)
kfree(desc);
}
-
-static int __init microtek_drv_init(void)
-{
- return usb_register(&mts_usb_driver);
-}
-
-static void __exit microtek_drv_exit(void)
-{
- usb_deregister(&mts_usb_driver);
-}
-
-module_init(microtek_drv_init);
-module_exit(microtek_drv_exit);
+module_usb_driver(mts_usb_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index fe85871..284b854 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -885,40 +885,7 @@ static struct usb_driver adu_driver = {
.id_table = device_table,
};
-static int __init adu_init(void)
-{
- int result;
-
- dbg(2," %s : enter", __func__);
-
- /* register this driver with the USB subsystem */
- result = usb_register(&adu_driver);
- if (result < 0) {
- printk(KERN_ERR "usb_register failed for the "__FILE__
- " driver. Error number %d\n", result);
- goto exit;
- }
-
- printk(KERN_INFO "adutux " DRIVER_DESC " " DRIVER_VERSION "\n");
- printk(KERN_INFO "adutux is an experimental driver. "
- "Use at your own risk\n");
-
-exit:
- dbg(2," %s : leave, return value %d", __func__, result);
-
- return result;
-}
-
-static void __exit adu_exit(void)
-{
- dbg(2," %s : enter", __func__);
- /* deregister this driver with the USB subsystem */
- usb_deregister(&adu_driver);
- dbg(2," %s : leave", __func__);
-}
-
-module_init(adu_init);
-module_exit(adu_exit);
+module_usb_driver(adu_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 9251773..3f7c1a9 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -271,27 +271,7 @@ static struct usb_driver cypress_driver = {
.id_table = cypress_table,
};
-static int __init cypress_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&cypress_driver);
- if (result)
- printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
- "Error number: %d\n", result);
-
- return result;
-}
-
-static void __exit cypress_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&cypress_driver);
-}
-
-module_init(cypress_init);
-module_exit(cypress_exit);
+module_usb_driver(cypress_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 1d7251b..5b9831b 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -417,31 +417,7 @@ static void cytherm_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "Cypress thermometer now disconnected\n");
}
-
-static int __init usb_cytherm_init(void)
-{
- int result;
-
- result = usb_register(&cytherm_driver);
- if (result) {
- printk(KERN_ERR KBUILD_MODNAME ": usb_register failed! "
- "Error number: %d\n", result);
- return result;
- }
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return 0;
-}
-
-static void __exit usb_cytherm_exit(void)
-{
- usb_deregister(&cytherm_driver);
-}
-
-
-module_init (usb_cytherm_init);
-module_exit (usb_cytherm_exit);
+module_usb_driver(cytherm_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index a6521c9..da97dce 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -37,9 +37,6 @@ static int emi26_set_reset(struct usb_device *dev, unsigned char reset_bit);
static int emi26_load_firmware (struct usb_device *dev);
static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id);
static void emi26_disconnect(struct usb_interface *intf);
-static int __init emi26_init (void);
-static void __exit emi26_exit (void);
-
/* thanks to drivers/usb/serial/keyspan_pda.c code */
static int emi26_writememory (struct usb_device *dev, int address,
@@ -276,18 +273,7 @@ static struct usb_driver emi26_driver = {
.id_table = id_table,
};
-static int __init emi26_init (void)
-{
- return usb_register(&emi26_driver);
-}
-
-static void __exit emi26_exit (void)
-{
- usb_deregister (&emi26_driver);
-}
-
-module_init(emi26_init);
-module_exit(emi26_exit);
+module_usb_driver(emi26_driver);
MODULE_AUTHOR("Tapio Laxström");
MODULE_DESCRIPTION("Emagic EMI 2|6 firmware loader.");
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index fc15ad4..4e0f167 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -46,9 +46,6 @@ static int emi62_set_reset(struct usb_device *dev, unsigned char reset_bit);
static int emi62_load_firmware (struct usb_device *dev);
static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id);
static void emi62_disconnect(struct usb_interface *intf);
-static int __init emi62_init (void);
-static void __exit emi62_exit (void);
-
/* thanks to drivers/usb/serial/keyspan_pda.c code */
static int emi62_writememory(struct usb_device *dev, int address,
@@ -290,22 +287,7 @@ static struct usb_driver emi62_driver = {
.id_table = id_table,
};
-static int __init emi62_init (void)
-{
- int retval;
- retval = usb_register (&emi62_driver);
- if (retval)
- printk(KERN_ERR "adi-emi: registration failed\n");
- return retval;
-}
-
-static void __exit emi62_exit (void)
-{
- usb_deregister (&emi62_driver);
-}
-
-module_init(emi62_init);
-module_exit(emi62_exit);
+module_usb_driver(emi62_driver);
MODULE_AUTHOR("Tapio Laxström");
MODULE_DESCRIPTION("Emagic EMI 6|2m firmware loader.");
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 2dbe600..a4a3c7c 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -53,7 +53,7 @@ MODULE_AUTHOR("Tony Olech");
MODULE_DESCRIPTION("FTDI ELAN driver");
MODULE_LICENSE("GPL");
#define INT_MODULE_PARM(n, v) static int n = v;module_param(n, int, 0444)
-static int distrust_firmware = 1;
+static bool distrust_firmware = 1;
module_param(distrust_firmware, bool, 0);
MODULE_PARM_DESC(distrust_firmware, "true to distrust firmware power/overcurren"
"t setup");
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 515b67f..0dee246 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -428,29 +428,7 @@ static void idmouse_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "disconnected\n");
}
-static int __init usb_idmouse_init(void)
-{
- int result;
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
- /* register this driver with the USB subsystem */
- result = usb_register(&idmouse_driver);
- if (result)
- err("Unable to register device (error %d).", result);
-
- return result;
-}
-
-static void __exit usb_idmouse_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&idmouse_driver);
-}
-
-module_init(usb_idmouse_init);
-module_exit(usb_idmouse_exit);
+module_usb_driver(idmouse_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 8145790..4fd0dc8 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -62,7 +62,7 @@ MODULE_LICENSE("GPL");
/* Module parameters */
static DEFINE_MUTEX(iowarrior_mutex);
-static int debug = 0;
+static bool debug = 0;
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "debug=1 enables debugging messages");
@@ -734,7 +734,7 @@ static const struct file_operations iowarrior_fops = {
.llseek = noop_llseek,
};
-static char *iowarrior_devnode(struct device *dev, mode_t *mode)
+static char *iowarrior_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
@@ -927,15 +927,4 @@ static struct usb_driver iowarrior_driver = {
.id_table = iowarrior_ids,
};
-static int __init iowarrior_init(void)
-{
- return usb_register(&iowarrior_driver);
-}
-
-static void __exit iowarrior_exit(void)
-{
- usb_deregister(&iowarrior_driver);
-}
-
-module_init(iowarrior_init);
-module_exit(iowarrior_exit);
+module_usb_driver(iowarrior_driver);
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index fe1d443..1c61830 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -55,8 +55,9 @@ static int isight_firmware_load(struct usb_interface *intf,
ptr = firmware->data;
+ buf[0] = 0x01;
if (usb_control_msg
- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\1", 1,
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR
"Failed to initialise isight firmware loader\n");
@@ -100,8 +101,9 @@ static int isight_firmware_load(struct usb_interface *intf,
}
}
+ buf[0] = 0x00;
if (usb_control_msg
- (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, "\0", 1,
+ (dev, usb_sndctrlpipe(dev, 0), 0xa0, 0x40, 0xe600, 0, buf, 1,
300) != 1) {
printk(KERN_ERR "isight firmware loading completion failed\n");
ret = -ENODEV;
@@ -126,18 +128,7 @@ static struct usb_driver isight_firmware_driver = {
.id_table = id_table,
};
-static int __init isight_firmware_init(void)
-{
- return usb_register(&isight_firmware_driver);
-}
-
-static void __exit isight_firmware_exit(void)
-{
- usb_deregister(&isight_firmware_driver);
-}
-
-module_init(isight_firmware_init);
-module_exit(isight_firmware_exit);
+module_usb_driver(isight_firmware_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 48c166f..5db4ab5 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -821,30 +821,5 @@ static struct usb_driver ld_usb_driver = {
.id_table = ld_usb_table,
};
-/**
- * ld_usb_init
- */
-static int __init ld_usb_init(void)
-{
- int retval;
-
- /* register this driver with the USB subsystem */
- retval = usb_register(&ld_usb_driver);
- if (retval)
- err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
-
- return retval;
-}
-
-/**
- * ld_usb_exit
- */
-static void __exit ld_usb_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&ld_usb_driver);
-}
-
-module_init(ld_usb_init);
-module_exit(ld_usb_exit);
+module_usb_driver(ld_usb_driver);
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index a989356..5752220 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -269,7 +269,7 @@ static const struct file_operations tower_fops = {
.llseek = tower_llseek,
};
-static char *legousbtower_devnode(struct device *dev, mode_t *mode)
+static char *legousbtower_devnode(struct device *dev, umode_t *mode)
{
return kasprintf(GFP_KERNEL, "usb/%s", dev_name(dev));
}
@@ -1043,51 +1043,7 @@ static void tower_disconnect (struct usb_interface *interface)
dbg(2, "%s: leave", __func__);
}
-
-
-/**
- * lego_usb_tower_init
- */
-static int __init lego_usb_tower_init(void)
-{
- int result;
- int retval = 0;
-
- dbg(2, "%s: enter", __func__);
-
- /* register this driver with the USB subsystem */
- result = usb_register(&tower_driver);
- if (result < 0) {
- err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
- retval = -1;
- goto exit;
- }
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
-exit:
- dbg(2, "%s: leave, return value %d", __func__, retval);
-
- return retval;
-}
-
-
-/**
- * lego_usb_tower_exit
- */
-static void __exit lego_usb_tower_exit(void)
-{
- dbg(2, "%s: enter", __func__);
-
- /* deregister this driver with the USB subsystem */
- usb_deregister (&tower_driver);
-
- dbg(2, "%s: leave", __func__);
-}
-
-module_init (lego_usb_tower_init);
-module_exit (lego_usb_tower_exit);
+module_usb_driver(tower_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 4e23d38..487a8ce 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -531,33 +531,7 @@ static struct usb_driver rio_driver = {
.id_table = rio_table,
};
-static int __init usb_rio_init(void)
-{
- int retval;
- retval = usb_register(&rio_driver);
- if (retval)
- goto out;
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
-
-out:
- return retval;
-}
-
-
-static void __exit usb_rio_cleanup(void)
-{
- struct rio_usb_data *rio = &rio_instance;
-
- rio->present = 0;
- usb_deregister(&rio_driver);
-
-
-}
-
-module_init(usb_rio_init);
-module_exit(usb_rio_cleanup);
+module_usb_driver(rio_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index f63776a..741efed 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -137,26 +137,7 @@ static struct usb_driver tv_driver = {
.id_table = id_table,
};
-static int __init tv_init(void)
-{
- int retval = usb_register(&tv_driver);
- if (retval) {
- err("usb_register failed. Error number %d", retval);
- return retval;
- }
-
- printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
- DRIVER_DESC "\n");
- return 0;
-}
-
-static void __exit tv_exit(void)
-{
- usb_deregister(&tv_driver);
-}
-
-module_init (tv_init);
-module_exit (tv_exit);
+module_usb_driver(tv_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 1871cdf..e2b4bd3 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -450,25 +450,7 @@ static struct usb_driver lcd_driver = {
.supports_autosuspend = 1,
};
-static int __init usb_lcd_init(void)
-{
- int result;
-
- result = usb_register(&lcd_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-
-static void __exit usb_lcd_exit(void)
-{
- usb_deregister(&lcd_driver);
-}
-
-module_init(usb_lcd_init);
-module_exit(usb_lcd_exit);
+module_usb_driver(lcd_driver);
MODULE_AUTHOR("Georges Toth <g.toth@e-biz.lu>");
MODULE_DESCRIPTION(DRIVER_VERSION);
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 43f84e5..12d03e7 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -31,6 +31,8 @@ static const struct usb_device_id id_table[] = {
.driver_info = DELCOM_VISUAL_SIGNAL_INDICATOR },
{ USB_DEVICE(0x1d34, 0x0004),
.driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
+ { USB_DEVICE(0x1d34, 0x000a),
+ .driver_info = DREAM_CHEEKY_WEBMAIL_NOTIFIER },
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -231,23 +233,7 @@ static struct usb_driver led_driver = {
.id_table = id_table,
};
-static int __init usb_led_init(void)
-{
- int retval = 0;
-
- retval = usb_register(&led_driver);
- if (retval)
- err("usb_register failed. Error number %d", retval);
- return retval;
-}
-
-static void __exit usb_led_exit(void)
-{
- usb_deregister(&led_driver);
-}
-
-module_init(usb_led_init);
-module_exit(usb_led_exit);
+module_usb_driver(led_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 417b8f2..b2d82b9 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -24,7 +24,7 @@
#define VENDOR_ID 0x0fc5
#define PRODUCT_ID 0x1227
-#define MAXLEN 6
+#define MAXLEN 8
/* table of devices that work with this driver */
static const struct usb_device_id id_table[] = {
@@ -437,23 +437,7 @@ static struct usb_driver sevseg_driver = {
.supports_autosuspend = 1,
};
-static int __init usb_sevseg_init(void)
-{
- int rc = 0;
-
- rc = usb_register(&sevseg_driver);
- if (rc)
- err("usb_register failed. Error number %d", rc);
- return rc;
-}
-
-static void __exit usb_sevseg_exit(void)
-{
- usb_deregister(&sevseg_driver);
-}
-
-module_init(usb_sevseg_init);
-module_exit(usb_sevseg_exit);
+module_usb_driver(sevseg_driver);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index bd6d008..959145b 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1765,7 +1765,6 @@ static int test_unaligned_bulk(
* off just killing the userspace task and waiting for it to exit.
*/
-/* No BKL needed */
static int
usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
{
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index ac5bfd6..897edda 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -539,26 +539,6 @@ static const struct file_operations yurex_fops = {
.llseek = default_llseek,
};
-
-static int __init usb_yurex_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&yurex_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-static void __exit usb_yurex_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&yurex_driver);
-}
-
-module_init(usb_yurex_init);
-module_exit(usb_yurex_exit);
+module_usb_driver(yurex_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 07a0346..f70cab3 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -5,14 +5,13 @@
# (M)HDRC = (Multipoint) Highspeed Dual-Role Controller
config USB_MUSB_HDRC
+ tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
depends on USB && USB_GADGET
- depends on (ARM || (BF54x && !BF544) || (BF52x && !BF522 && !BF523))
select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN)
select TWL4030_USB if MACH_OMAP_3430SDP
select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA
select USB_OTG_UTILS
select USB_GADGET_DUALSPEED
- tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)'
help
Say Y here if your system has a dual role high speed USB
controller based on the Mentor Graphics silicon IP. Then
@@ -31,9 +30,10 @@ config USB_MUSB_HDRC
To compile this driver as a module, choose M here; the
module will be called "musb-hdrc".
+if USB_MUSB_HDRC
+
choice
prompt "Platform Glue Layer"
- depends on USB_MUSB_HDRC
config USB_MUSB_DAVINCI
tristate "DaVinci"
@@ -45,7 +45,6 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
- depends on ARCH_OMAP
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
@@ -65,46 +64,54 @@ config USB_MUSB_UX500
endchoice
-config MUSB_PIO_ONLY
- bool 'Disable DMA (always use PIO)'
- depends on USB_MUSB_HDRC
- default USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
+choice
+ prompt 'MUSB DMA mode'
+ default USB_UX500_DMA if USB_MUSB_UX500
+ default USB_INVENTRA_DMA if USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+ default USB_TI_CPPI_DMA if USB_MUSB_DAVINCI
+ default USB_TUSB_OMAP_DMA if USB_MUSB_TUSB6010
+ default MUSB_PIO_ONLY if USB_MUSB_TUSB6010 || USB_MUSB_DA8XX || USB_MUSB_AM35X
help
- All data is copied between memory and FIFO by the CPU.
- DMA controllers are ignored.
-
- Do not select 'n' here unless DMA support for your SOC or board
- is unavailable (or unstable). When DMA is enabled at compile time,
- you can still disable it at run time using the "use_dma=n" module
- parameter.
+ Unfortunately, only one option can be enabled here. Ideally one
+ should be able to build all these drivers into one kernel to
+ allow using DMA on multiplatform kernels.
config USB_UX500_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_UX500
+ bool 'ST Ericsson U8500 and U5500'
+ depends on USB_MUSB_UX500
help
Enable DMA transfers on UX500 platforms.
config USB_INVENTRA_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
+ bool 'Inventra'
+ depends on USB_MUSB_OMAP2PLUS || USB_MUSB_BLACKFIN
help
Enable DMA transfers using Mentor's engine.
config USB_TI_CPPI_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default USB_MUSB_DAVINCI
+ bool 'TI CPPI (Davinci)'
+ depends on USB_MUSB_DAVINCI
help
Enable DMA transfers when TI CPPI DMA is available.
config USB_TUSB_OMAP_DMA
- bool
- depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
+ bool 'TUSB 6010'
depends on USB_MUSB_TUSB6010
depends on ARCH_OMAP
- default y
help
Enable DMA transfers on TUSB 6010 when OMAP DMA is available.
+config MUSB_PIO_ONLY
+ bool 'Disable DMA (always use PIO)'
+ help
+ All data is copied between memory and FIFO by the CPU.
+ DMA controllers are ignored.
+
+ Do not choose this unless DMA support for your SOC or board
+ is unavailable (or unstable). When DMA is enabled at compile time,
+ you can still disable it at run time using the "use_dma=n" module
+ parameter.
+
+endchoice
+
+endif # USB_MUSB_HDRC
diff --git a/drivers/usb/musb/Makefile b/drivers/usb/musb/Makefile
index d8fd9d0..88bfb9d 100644
--- a/drivers/usb/musb/Makefile
+++ b/drivers/usb/musb/Makefile
@@ -24,25 +24,7 @@ obj-$(CONFIG_USB_MUSB_UX500) += ux500.o
# PIO only, or DMA (several potential schemes).
# though PIO is always there to back up DMA, and for ep0
-ifneq ($(CONFIG_MUSB_PIO_ONLY),y)
-
- ifeq ($(CONFIG_USB_INVENTRA_DMA),y)
- musb_hdrc-y += musbhsdma.o
-
- else
- ifeq ($(CONFIG_USB_TI_CPPI_DMA),y)
- musb_hdrc-y += cppi_dma.o
-
- else
- ifeq ($(CONFIG_USB_TUSB_OMAP_DMA),y)
- musb_hdrc-y += tusb6010_omap.o
-
- else
- ifeq ($(CONFIG_USB_UX500_DMA),y)
- musb_hdrc-y += ux500_dma.o
-
- endif
- endif
- endif
- endif
-endif
+musb_hdrc-$(CONFIG_USB_INVENTRA_DMA) += musbhsdma.o
+musb_hdrc-$(CONFIG_USB_TI_CPPI_DMA) += cppi_dma.o
+musb_hdrc-$(CONFIG_USB_TUSB_OMAP_DMA) += tusb6010_omap.o
+musb_hdrc-$(CONFIG_USB_UX500_DMA) += ux500_dma.o
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index 318fb4e..66bc376 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -513,7 +513,7 @@ static inline int cppi_autoreq_update(struct cppi_channel *rx,
if (!(val & MUSB_RXCSR_H_REQPKT)) {
val |= MUSB_RXCSR_H_REQPKT | MUSB_RXCSR_H_WZC_BITS;
musb_writew(regs, MUSB_RXCSR, val);
- /* flush writebufer */
+ /* flush writebuffer */
val = musb_readw(regs, MUSB_RXCSR);
}
}
@@ -750,7 +750,7 @@ cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx)
* So this module parameter lets the heuristic be disabled. When using
* gadgetfs, the heuristic will probably need to be disabled.
*/
-static int cppi_rx_rndis = 1;
+static bool cppi_rx_rndis = 1;
module_param(cppi_rx_rndis, bool, 0);
MODULE_PARM_DESC(cppi_rx_rndis, "enable/disable RX RNDIS heuristic");
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index f9a3f62..7c569f5 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -33,9 +33,6 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
-#include <mach/hardware.h>
-#include <mach/memory.h>
-#include <asm/gpio.h>
#include <mach/cputype.h>
#include <asm/mach-types.h>
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b63ab15..3d11cf64 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -661,7 +661,6 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
handled = IRQ_HANDLED;
musb->is_active = 1;
- set_bit(HCD_FLAG_SAW_IRQ, &hcd->flags);
musb->ep0_stage = MUSB_EP0_START;
@@ -982,6 +981,9 @@ static void musb_shutdown(struct platform_device *pdev)
unsigned long flags;
pm_runtime_get_sync(musb->controller);
+
+ musb_gadget_cleanup(musb);
+
spin_lock_irqsave(&musb->lock, flags);
musb_platform_disable(musb);
musb_generic_disable(musb);
@@ -1432,7 +1434,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
struct musb_hw_ep *hw_ep = musb->endpoints + i;
hw_ep->fifo = MUSB_FIFO_OFFSET(i) + mbase;
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
hw_ep->fifo_async = musb->async + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync = musb->sync + 0x400 + MUSB_FIFO_OFFSET(i);
hw_ep->fifo_sync_va =
@@ -1587,7 +1589,7 @@ irqreturn_t musb_interrupt(struct musb *musb)
EXPORT_SYMBOL_GPL(musb_interrupt);
#ifndef CONFIG_MUSB_PIO_ONLY
-static int __initdata use_dma = 1;
+static bool __initdata use_dma = 1;
/* "modprobe ... use_dma=0" etc */
module_param(use_dma, bool, 0);
@@ -1631,6 +1633,7 @@ void musb_dma_completion(struct musb *musb, u8 epnum, u8 transmit)
}
}
}
+EXPORT_SYMBOL_GPL(musb_dma_completion);
#else
#define use_dma 0
@@ -1827,8 +1830,6 @@ static void musb_free(struct musb *musb)
sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
#endif
- musb_gadget_cleanup(musb);
-
if (musb->nIrq >= 0) {
if (musb->irq_wake)
disable_irq_wake(musb->nIrq);
@@ -2012,8 +2013,6 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
if (status < 0)
goto fail3;
- pm_runtime_put(musb->controller);
-
status = musb_init_debugfs(musb);
if (status < 0)
goto fail4;
@@ -2158,6 +2157,7 @@ static void musb_save_context(struct musb *musb)
if (!epio)
continue;
+ musb_writeb(musb_base, MUSB_INDEX, i);
musb->context.index_regs[i].txmaxp =
musb_readw(epio, MUSB_TXMAXP);
musb->context.index_regs[i].txcsr =
@@ -2233,6 +2233,7 @@ static void musb_restore_context(struct musb *musb)
if (!epio)
continue;
+ musb_writeb(musb_base, MUSB_INDEX, i);
musb_writew(epio, MUSB_TXMAXP,
musb->context.index_regs[i].txmaxp);
musb_writew(epio, MUSB_TXCSR,
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index b3c065a..3d28fb8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -40,7 +40,6 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/timer.h>
-#include <linux/clk.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
@@ -311,6 +310,7 @@ struct musb_context_registers {
u8 index, testmode;
u8 devctl, busctl, misc;
+ u32 otg_interfsel;
struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
};
@@ -327,6 +327,7 @@ struct musb {
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
+ struct work_struct otg_notifier_work;
u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -372,6 +373,7 @@ struct musb {
u16 int_tx;
struct otg_transceiver *xceiv;
+ u8 xceiv_event;
int nIrq;
unsigned irq_wake:1;
diff --git a/drivers/usb/musb/musb_debug.h b/drivers/usb/musb/musb_debug.h
index 742eada..27ba8f7 100644
--- a/drivers/usb/musb/musb_debug.h
+++ b/drivers/usb/musb/musb_debug.h
@@ -43,8 +43,8 @@
#define ERR(fmt, args...) yprintk(KERN_ERR, fmt, ## args)
#ifdef CONFIG_DEBUG_FS
-extern int musb_init_debugfs(struct musb *musb);
-extern void musb_exit_debugfs(struct musb *musb);
+int musb_init_debugfs(struct musb *musb);
+void musb_exit_debugfs(struct musb *musb);
#else
static inline int musb_init_debugfs(struct musb *musb)
{
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 61f4ee4..13d9af9 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -33,11 +33,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/sched.h>
#include <linux/init.h>
-#include <linux/list.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
@@ -46,10 +42,6 @@
#include "musb_core.h"
#include "musb_debug.h"
-#ifdef CONFIG_ARCH_DAVINCI
-#include "davinci.h"
-#endif
-
struct musb_register_map {
char *name;
unsigned offset;
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 922148f..ac3d2ee 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -40,8 +40,6 @@
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/delay.h>
-#include <linux/moduleparam.h>
-#include <linux/stat.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
@@ -1844,7 +1842,7 @@ int __init musb_gadget_setup(struct musb *musb)
*/
musb->g.ops = &musb_gadget_operations;
- musb->g.is_dualspeed = 1;
+ musb->g.max_speed = USB_SPEED_HIGH;
musb->g.speed = USB_SPEED_UNKNOWN;
/* this "gadget" abstracts/virtualizes the controller */
@@ -1903,7 +1901,7 @@ static int musb_gadget_start(struct usb_gadget *g,
unsigned long flags;
int retval = -EINVAL;
- if (driver->speed < USB_SPEED_HIGH)
+ if (driver->max_speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
index 6a0d046..e40d764 100644
--- a/drivers/usb/musb/musb_gadget_ep0.c
+++ b/drivers/usb/musb/musb_gadget_ep0.c
@@ -37,7 +37,6 @@
#include <linux/list.h>
#include <linux/timer.h>
#include <linux/spinlock.h>
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/interrupt.h>
diff --git a/drivers/usb/musb/musb_io.h b/drivers/usb/musb/musb_io.h
index 03c6ccd..1d5eda2 100644
--- a/drivers/usb/musb/musb_io.h
+++ b/drivers/usb/musb/musb_io.h
@@ -39,7 +39,8 @@
#if !defined(CONFIG_ARM) && !defined(CONFIG_SUPERH) \
&& !defined(CONFIG_AVR32) && !defined(CONFIG_PPC32) \
- && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN)
+ && !defined(CONFIG_PPC64) && !defined(CONFIG_BLACKFIN) \
+ && !defined(CONFIG_MIPS)
static inline void readsl(const void __iomem *addr, void *buf, int len)
{ insl((unsigned long)addr, buf, len); }
static inline void readsw(const void __iomem *addr, void *buf, int len)
@@ -74,7 +75,7 @@ static inline void musb_writel(void __iomem *addr, unsigned offset, u32 data)
{ __raw_writel(data, addr + offset); }
-#ifdef CONFIG_USB_MUSB_TUSB6010
+#if defined(CONFIG_USB_MUSB_TUSB6010) || defined (CONFIG_USB_MUSB_TUSB6010_MODULE)
/*
* TUSB6010 doesn't allow 8-bit access; 16-bit access is the minimum.
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index ba85f27..df719ea 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -29,7 +29,6 @@
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
@@ -223,26 +222,29 @@ static inline void omap2430_low_level_init(struct musb *musb)
musb_writel(musb->mregs, OTG_FORCESTDBY, l);
}
-/* blocking notifier support */
static int musb_otg_notifications(struct notifier_block *nb,
unsigned long event, void *unused)
{
struct musb *musb = container_of(nb, struct musb, nb);
+
+ musb->xceiv_event = event;
+ schedule_work(&musb->otg_notifier_work);
+
+ return NOTIFY_OK;
+}
+
+static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
+{
+ struct musb *musb = container_of(data_notifier_work, struct musb, otg_notifier_work);
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *pdata = dev->platform_data;
struct omap_musb_board_data *data = pdata->board_data;
- switch (event) {
+ switch (musb->xceiv_event) {
case USB_EVENT_ID:
dev_dbg(musb->controller, "ID GND\n");
- if (is_otg_enabled(musb)) {
- if (musb->gadget_driver) {
- pm_runtime_get_sync(musb->controller);
- otg_init(musb->xceiv);
- omap2430_musb_set_vbus(musb, 1);
- }
- } else {
+ if (!is_otg_enabled(musb) || musb->gadget_driver) {
pm_runtime_get_sync(musb->controller);
otg_init(musb->xceiv);
omap2430_musb_set_vbus(musb, 1);
@@ -274,10 +276,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
break;
default:
dev_dbg(musb->controller, "ID float\n");
- return NOTIFY_DONE;
}
-
- return NOTIFY_OK;
}
static int omap2430_musb_init(struct musb *musb)
@@ -297,6 +296,8 @@ static int omap2430_musb_init(struct musb *musb)
return -ENODEV;
}
+ INIT_WORK(&musb->otg_notifier_work, musb_otg_notifier_work);
+
status = pm_runtime_get_sync(dev);
if (status < 0) {
dev_err(dev, "pm_runtime_get_sync FAILED");
@@ -334,7 +335,6 @@ static int omap2430_musb_init(struct musb *musb)
return 0;
err1:
- pm_runtime_disable(dev);
return status;
}
@@ -350,20 +350,19 @@ static void omap2430_musb_enable(struct musb *musb)
case USB_EVENT_ID:
otg_init(musb->xceiv);
- if (data->interface_type == MUSB_INTERFACE_UTMI) {
- devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
- /* start the session */
- devctl |= MUSB_DEVCTL_SESSION;
- musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
- while (musb_readb(musb->mregs, MUSB_DEVCTL) &
- MUSB_DEVCTL_BDEVICE) {
- cpu_relax();
-
- if (time_after(jiffies, timeout)) {
- dev_err(musb->controller,
- "configured as A device timeout");
- break;
- }
+ if (data->interface_type != MUSB_INTERFACE_UTMI)
+ break;
+ devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
+ /* start the session */
+ devctl |= MUSB_DEVCTL_SESSION;
+ musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
+ while (musb_readb(musb->mregs, MUSB_DEVCTL) &
+ MUSB_DEVCTL_BDEVICE) {
+ cpu_relax();
+
+ if (time_after(jiffies, timeout)) {
+ dev_err(dev, "configured as A device timeout");
+ break;
}
}
break;
@@ -386,6 +385,7 @@ static void omap2430_musb_disable(struct musb *musb)
static int omap2430_musb_exit(struct musb *musb)
{
del_timer_sync(&musb_idle_timer);
+ cancel_work_sync(&musb->otg_notifier_work);
omap2430_low_level_exit(musb);
otg_put_transceiver(musb->xceiv);
@@ -478,7 +478,6 @@ static int __exit omap2430_remove(struct platform_device *pdev)
platform_device_del(glue->musb);
platform_device_put(glue->musb);
pm_runtime_put(&pdev->dev);
- pm_runtime_disable(&pdev->dev);
kfree(glue);
return 0;
@@ -491,6 +490,9 @@ static int omap2430_runtime_suspend(struct device *dev)
struct omap2430_glue *glue = dev_get_drvdata(dev);
struct musb *musb = glue_to_musb(glue);
+ musb->context.otg_interfsel = musb_readl(musb->mregs,
+ OTG_INTERFSEL);
+
omap2430_low_level_exit(musb);
otg_set_suspend(musb->xceiv, 1);
@@ -503,6 +505,9 @@ static int omap2430_runtime_resume(struct device *dev)
struct musb *musb = glue_to_musb(glue);
omap2430_low_level_init(musb);
+ musb_writel(musb->mregs, OTG_INTERFSEL,
+ musb->context.otg_interfsel);
+
otg_set_suspend(musb->xceiv, 0);
return 0;
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index ec14801..1f40561 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -56,6 +56,7 @@ u8 tusb_get_revision(struct musb *musb)
return rev;
}
+EXPORT_SYMBOL_GPL(tusb_get_revision);
static int tusb_print_revision(struct musb *musb)
{
diff --git a/drivers/usb/musb/ux500_dma.c b/drivers/usb/musb/ux500_dma.c
index ef4333f..97cb459 100644
--- a/drivers/usb/musb/ux500_dma.c
+++ b/drivers/usb/musb/ux500_dma.c
@@ -37,7 +37,6 @@ struct ux500_dma_channel {
struct dma_channel channel;
struct ux500_dma_controller *controller;
struct musb_hw_ep *hw_ep;
- struct work_struct channel_work;
struct dma_chan *dma_chan;
unsigned int cur_len;
dma_cookie_t cookie;
@@ -56,31 +55,11 @@ struct ux500_dma_controller {
dma_addr_t phy_base;
};
-/* Work function invoked from DMA callback to handle tx transfers. */
-static void ux500_tx_work(struct work_struct *data)
-{
- struct ux500_dma_channel *ux500_channel = container_of(data,
- struct ux500_dma_channel, channel_work);
- struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
- struct musb *musb = hw_ep->musb;
- unsigned long flags;
-
- dev_dbg(musb->controller, "DMA tx transfer done on hw_ep=%d\n",
- hw_ep->epnum);
-
- spin_lock_irqsave(&musb->lock, flags);
- ux500_channel->channel.actual_len = ux500_channel->cur_len;
- ux500_channel->channel.status = MUSB_DMA_STATUS_FREE;
- musb_dma_completion(musb, hw_ep->epnum,
- ux500_channel->is_tx);
- spin_unlock_irqrestore(&musb->lock, flags);
-}
-
/* Work function invoked from DMA callback to handle rx transfers. */
-static void ux500_rx_work(struct work_struct *data)
+void ux500_dma_callback(void *private_data)
{
- struct ux500_dma_channel *ux500_channel = container_of(data,
- struct ux500_dma_channel, channel_work);
+ struct dma_channel *channel = private_data;
+ struct ux500_dma_channel *ux500_channel = channel->private_data;
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct musb *musb = hw_ep->musb;
unsigned long flags;
@@ -94,14 +73,7 @@ static void ux500_rx_work(struct work_struct *data)
musb_dma_completion(musb, hw_ep->epnum,
ux500_channel->is_tx);
spin_unlock_irqrestore(&musb->lock, flags);
-}
-
-void ux500_dma_callback(void *private_data)
-{
- struct dma_channel *channel = (struct dma_channel *)private_data;
- struct ux500_dma_channel *ux500_channel = channel->private_data;
- schedule_work(&ux500_channel->channel_work);
}
static bool ux500_configure_channel(struct dma_channel *channel,
@@ -112,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
struct dma_chan *dma_chan = ux500_channel->dma_chan;
struct dma_async_tx_descriptor *dma_desc;
- enum dma_data_direction direction;
+ enum dma_transfer_direction direction;
struct scatterlist sg;
struct dma_slave_config slave_conf;
enum dma_slave_buswidth addr_width;
@@ -132,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
sg_dma_address(&sg) = dma_addr;
sg_dma_len(&sg) = len;
- direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+ direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -330,7 +302,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
void **param_array;
struct ux500_dma_channel *channel_array;
u32 ch_count;
- void (*musb_channel_work)(struct work_struct *);
dma_cap_mask_t mask;
if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) ||
@@ -347,7 +318,6 @@ static int ux500_dma_controller_start(struct dma_controller *c)
channel_array = controller->rx_channel;
ch_count = data->num_rx_channels;
param_array = data->dma_rx_param_array;
- musb_channel_work = ux500_rx_work;
for (dir = 0; dir < 2; dir++) {
for (ch_num = 0; ch_num < ch_count; ch_num++) {
@@ -374,15 +344,12 @@ static int ux500_dma_controller_start(struct dma_controller *c)
return -EBUSY;
}
- INIT_WORK(&ux500_channel->channel_work,
- musb_channel_work);
}
/* Prepare the loop for TX channels */
channel_array = controller->tx_channel;
ch_count = data->num_tx_channels;
param_array = data->dma_tx_param_array;
- musb_channel_work = ux500_tx_work;
is_tx = 1;
}
diff --git a/drivers/usb/otg/Kconfig b/drivers/usb/otg/Kconfig
index c66481a..735ef4c 100644
--- a/drivers/usb/otg/Kconfig
+++ b/drivers/usb/otg/Kconfig
@@ -82,23 +82,9 @@ config NOP_USB_XCEIV
tristate "NOP USB Transceiver Driver"
select USB_OTG_UTILS
help
- this driver is to be used by all the usb transceiver which are either
- built-in with usb ip or which are autonomous and doesn't require any
- phy programming such as ISP1x04 etc.
-
-config USB_LANGWELL_OTG
- tristate "Intel Langwell USB OTG dual-role support"
- depends on USB && PCI && INTEL_SCU_IPC
- select USB_OTG
- select USB_OTG_UTILS
- help
- Say Y here if you want to build Intel Langwell USB OTG
- transciever driver in kernel. This driver implements role
- switch between EHCI host driver and Langwell USB OTG
- client driver.
-
- To compile this driver as a module, choose M here: the
- module will be called langwell_otg.
+ This driver is to be used by all the usb transceiver which are either
+ built-in with usb ip or which are autonomous and doesn't require any
+ phy programming such as ISP1x04 etc.
config USB_MSM_OTG
tristate "OTG support for Qualcomm on-chip USB controller"
@@ -114,20 +100,32 @@ config USB_MSM_OTG
has an external PHY.
config AB8500_USB
- tristate "AB8500 USB Transceiver Driver"
- depends on AB8500_CORE
- select USB_OTG_UTILS
- help
- Enable this to support the USB OTG transceiver in AB8500 chip.
- This transceiver supports high and full speed devices plus,
- in host mode, low speed.
+ tristate "AB8500 USB Transceiver Driver"
+ depends on AB8500_CORE
+ select USB_OTG_UTILS
+ help
+ Enable this to support the USB OTG transceiver in AB8500 chip.
+ This transceiver supports high and full speed devices plus,
+ in host mode, low speed.
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2
+ depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
select USB_OTG
select USB_OTG_UTILS
help
Enable this to support Freescale USB OTG transceiver.
+config USB_MV_OTG
+ tristate "Marvell USB OTG support"
+ depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND
+ select USB_OTG
+ select USB_OTG_UTILS
+ help
+ Say Y here if you want to build Marvell USB OTG transciever
+ driver in kernel (including PXA and MMP series). This driver
+ implements role switch between EHCI host driver and gadget driver.
+
+ To compile this driver as a module, choose M here.
+
endif # USB || OTG
diff --git a/drivers/usb/otg/Makefile b/drivers/usb/otg/Makefile
index 566655c..41aa509 100644
--- a/drivers/usb/otg/Makefile
+++ b/drivers/usb/otg/Makefile
@@ -13,7 +13,6 @@ obj-$(CONFIG_USB_GPIO_VBUS) += gpio_vbus.o
obj-$(CONFIG_ISP1301_OMAP) += isp1301_omap.o
obj-$(CONFIG_TWL4030_USB) += twl4030-usb.o
obj-$(CONFIG_TWL6030_USB) += twl6030-usb.o
-obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
obj-$(CONFIG_NOP_USB_XCEIV) += nop-usb-xceiv.o
obj-$(CONFIG_USB_ULPI) += ulpi.o
obj-$(CONFIG_USB_ULPI_VIEWPORT) += ulpi_viewport.o
@@ -21,3 +20,4 @@ obj-$(CONFIG_USB_MSM_OTG) += msm_otg.o
obj-$(CONFIG_AB8500_USB) += ab8500-usb.o
fsl_usb2_otg-objs := fsl_otg.o otg_fsm.o
obj-$(CONFIG_FSL_USB2_OTG) += fsl_usb2_otg.o
+obj-$(CONFIG_USB_MV_OTG) += mv_otg.o
diff --git a/drivers/usb/otg/ab8500-usb.c b/drivers/usb/otg/ab8500-usb.c
index 07ccea9..74fe6e6 100644
--- a/drivers/usb/otg/ab8500-usb.c
+++ b/drivers/usb/otg/ab8500-usb.c
@@ -30,7 +30,7 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mfd/abx500.h>
-#include <linux/mfd/ab8500.h>
+#include <linux/mfd/abx500/ab8500.h>
#define AB8500_MAIN_WD_CTRL_REG 0x01
#define AB8500_USB_LINE_STAT_REG 0x80
diff --git a/drivers/usb/otg/fsl_otg.c b/drivers/usb/otg/fsl_otg.c
index 0f420b2..a190850 100644
--- a/drivers/usb/otg/fsl_otg.c
+++ b/drivers/usb/otg/fsl_otg.c
@@ -639,7 +639,7 @@ static int fsl_otg_set_power(struct otg_transceiver *otg_p, unsigned mA)
* Delayed pin detect interrupt processing.
*
* When the Mini-A cable is disconnected from the board,
- * the pin-detect interrupt happens before the disconnnect
+ * the pin-detect interrupt happens before the disconnect
* interrupts for the connected device(s). In order to
* process the disconnect interrupt(s) prior to switching
* roles, the pin-detect interrupts are delayed, and handled
@@ -1151,18 +1151,7 @@ struct platform_driver fsl_otg_driver = {
},
};
-static int __init fsl_usb_otg_init(void)
-{
- pr_info(DRIVER_INFO "\n");
- return platform_driver_register(&fsl_otg_driver);
-}
-module_init(fsl_usb_otg_init);
-
-static void __exit fsl_usb_otg_exit(void)
-{
- platform_driver_unregister(&fsl_otg_driver);
-}
-module_exit(fsl_usb_otg_exit);
+module_platform_driver(fsl_otg_driver);
MODULE_DESCRIPTION(DRIVER_INFO);
MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
deleted file mode 100644
index f08f784..0000000
--- a/drivers/usb/otg/langwell_otg.c
+++ /dev/null
@@ -1,2347 +0,0 @@
-/*
- * Intel Langwell USB OTG transceiver driver
- * Copyright (C) 2008 - 2010, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-/* This driver helps to switch Langwell OTG controller function between host
- * and peripheral. It works with EHCI driver and Langwell client controller
- * driver together.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/moduleparam.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/hcd.h>
-#include <linux/notifier.h>
-#include <linux/delay.h>
-#include <asm/intel_scu_ipc.h>
-
-#include <linux/usb/langwell_otg.h>
-
-#define DRIVER_DESC "Intel Langwell USB OTG transceiver driver"
-#define DRIVER_VERSION "July 10, 2010"
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
-
-static const char driver_name[] = "langwell_otg";
-
-static int langwell_otg_probe(struct pci_dev *pdev,
- const struct pci_device_id *id);
-static void langwell_otg_remove(struct pci_dev *pdev);
-static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
-static int langwell_otg_resume(struct pci_dev *pdev);
-
-static int langwell_otg_set_host(struct otg_transceiver *otg,
- struct usb_bus *host);
-static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
- struct usb_gadget *gadget);
-static int langwell_otg_start_srp(struct otg_transceiver *otg);
-
-static const struct pci_device_id pci_ids[] = {{
- .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
- .class_mask = ~0,
- .vendor = 0x8086,
- .device = 0x0811,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
-}, { /* end: all zeroes */ }
-};
-
-static struct pci_driver otg_pci_driver = {
- .name = (char *) driver_name,
- .id_table = pci_ids,
-
- .probe = langwell_otg_probe,
- .remove = langwell_otg_remove,
-
- .suspend = langwell_otg_suspend,
- .resume = langwell_otg_resume,
-};
-
-/* HSM timers */
-static inline struct langwell_otg_timer *otg_timer_initializer
-(void (*function)(unsigned long), unsigned long expires, unsigned long data)
-{
- struct langwell_otg_timer *timer;
- timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
- if (timer == NULL)
- return timer;
-
- timer->function = function;
- timer->expires = expires;
- timer->data = data;
- return timer;
-}
-
-static struct langwell_otg_timer *a_wait_vrise_tmr, *a_aidl_bdis_tmr,
- *b_se0_srp_tmr, *b_srp_init_tmr;
-
-static struct list_head active_timers;
-
-static struct langwell_otg *the_transceiver;
-
-/* host/client notify transceiver when event affects HNP state */
-void langwell_update_transceiver(void)
-{
- struct langwell_otg *lnw = the_transceiver;
-
- dev_dbg(lnw->dev, "transceiver is updated\n");
-
- if (!lnw->qwork)
- return ;
-
- queue_work(lnw->qwork, &lnw->work);
-}
-EXPORT_SYMBOL(langwell_update_transceiver);
-
-static int langwell_otg_set_host(struct otg_transceiver *otg,
- struct usb_bus *host)
-{
- otg->host = host;
-
- return 0;
-}
-
-static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
- struct usb_gadget *gadget)
-{
- otg->gadget = gadget;
-
- return 0;
-}
-
-static int langwell_otg_set_power(struct otg_transceiver *otg,
- unsigned mA)
-{
- return 0;
-}
-
-/* A-device drives vbus, controlled through IPC commands */
-static int langwell_otg_set_vbus(struct otg_transceiver *otg, bool enabled)
-{
- struct langwell_otg *lnw = the_transceiver;
- u8 sub_id;
-
- dev_dbg(lnw->dev, "%s <--- %s\n", __func__, enabled ? "on" : "off");
-
- if (enabled)
- sub_id = 0x8; /* Turn on the VBus */
- else
- sub_id = 0x9; /* Turn off the VBus */
-
- if (intel_scu_ipc_simple_command(0xef, sub_id)) {
- dev_dbg(lnw->dev, "Failed to set Vbus via IPC commands\n");
- return -EBUSY;
- }
-
- dev_dbg(lnw->dev, "%s --->\n", __func__);
-
- return 0;
-}
-
-/* charge vbus or discharge vbus through a resistor to ground */
-static void langwell_otg_chrg_vbus(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
- u32 val;
-
- val = readl(lnw->iotg.base + CI_OTGSC);
-
- if (on)
- writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
- lnw->iotg.base + CI_OTGSC);
- else
- writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
- lnw->iotg.base + CI_OTGSC);
-}
-
-/* Start SRP */
-static int langwell_otg_start_srp(struct otg_transceiver *otg)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u32 val;
-
- dev_dbg(lnw->dev, "%s --->\n", __func__);
-
- val = readl(iotg->base + CI_OTGSC);
-
- writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
- iotg->base + CI_OTGSC);
-
- /* Check if the data plus is finished or not */
- msleep(8);
- val = readl(iotg->base + CI_OTGSC);
- if (val & (OTGSC_HADP | OTGSC_DP))
- dev_dbg(lnw->dev, "DataLine SRP Error\n");
-
- /* Disable interrupt - b_sess_vld */
- val = readl(iotg->base + CI_OTGSC);
- val &= (~(OTGSC_BSVIE | OTGSC_BSEIE));
- writel(val, iotg->base + CI_OTGSC);
-
- /* Start VBus SRP, drive vbus to generate VBus pulse */
- iotg->otg.set_vbus(&iotg->otg, true);
- msleep(15);
- iotg->otg.set_vbus(&iotg->otg, false);
-
- /* Enable interrupt - b_sess_vld*/
- val = readl(iotg->base + CI_OTGSC);
- dev_dbg(lnw->dev, "after VBUS pulse otgsc = %x\n", val);
-
- val |= (OTGSC_BSVIE | OTGSC_BSEIE);
- writel(val, iotg->base + CI_OTGSC);
-
- /* If Vbus is valid, then update the hsm */
- if (val & OTGSC_BSV) {
- dev_dbg(lnw->dev, "no b_sess_vld interrupt\n");
-
- lnw->iotg.hsm.b_sess_vld = 1;
- langwell_update_transceiver();
- }
-
- dev_dbg(lnw->dev, "%s <---\n", __func__);
- return 0;
-}
-
-/* stop SOF via bus_suspend */
-static void langwell_otg_loc_sof(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct usb_hcd *hcd;
- int err;
-
- dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "suspend" : "resume");
-
- hcd = bus_to_hcd(lnw->iotg.otg.host);
- if (on)
- err = hcd->driver->bus_resume(hcd);
- else
- err = hcd->driver->bus_suspend(hcd);
-
- if (err)
- dev_dbg(lnw->dev, "Fail to resume/suspend USB bus - %d\n", err);
-
- dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-static int langwell_otg_check_otgsc(void)
-{
- struct langwell_otg *lnw = the_transceiver;
- u32 otgsc, usbcfg;
-
- dev_dbg(lnw->dev, "check sync OTGSC and USBCFG registers\n");
-
- otgsc = readl(lnw->iotg.base + CI_OTGSC);
- usbcfg = readl(lnw->usbcfg);
-
- dev_dbg(lnw->dev, "OTGSC = %08x, USBCFG = %08x\n",
- otgsc, usbcfg);
- dev_dbg(lnw->dev, "OTGSC_AVV = %d\n", !!(otgsc & OTGSC_AVV));
- dev_dbg(lnw->dev, "USBCFG.VBUSVAL = %d\n",
- !!(usbcfg & USBCFG_VBUSVAL));
- dev_dbg(lnw->dev, "OTGSC_ASV = %d\n", !!(otgsc & OTGSC_ASV));
- dev_dbg(lnw->dev, "USBCFG.AVALID = %d\n",
- !!(usbcfg & USBCFG_AVALID));
- dev_dbg(lnw->dev, "OTGSC_BSV = %d\n", !!(otgsc & OTGSC_BSV));
- dev_dbg(lnw->dev, "USBCFG.BVALID = %d\n",
- !!(usbcfg & USBCFG_BVALID));
- dev_dbg(lnw->dev, "OTGSC_BSE = %d\n", !!(otgsc & OTGSC_BSE));
- dev_dbg(lnw->dev, "USBCFG.SESEND = %d\n",
- !!(usbcfg & USBCFG_SESEND));
-
- /* Check USBCFG VBusValid/AValid/BValid/SessEnd */
- if (!!(otgsc & OTGSC_AVV) ^ !!(usbcfg & USBCFG_VBUSVAL)) {
- dev_dbg(lnw->dev, "OTGSC.AVV != USBCFG.VBUSVAL\n");
- goto err;
- }
- if (!!(otgsc & OTGSC_ASV) ^ !!(usbcfg & USBCFG_AVALID)) {
- dev_dbg(lnw->dev, "OTGSC.ASV != USBCFG.AVALID\n");
- goto err;
- }
- if (!!(otgsc & OTGSC_BSV) ^ !!(usbcfg & USBCFG_BVALID)) {
- dev_dbg(lnw->dev, "OTGSC.BSV != USBCFG.BVALID\n");
- goto err;
- }
- if (!!(otgsc & OTGSC_BSE) ^ !!(usbcfg & USBCFG_SESEND)) {
- dev_dbg(lnw->dev, "OTGSC.BSE != USBCFG.SESSEN\n");
- goto err;
- }
-
- dev_dbg(lnw->dev, "OTGSC and USBCFG are synced\n");
-
- return 0;
-
-err:
- dev_warn(lnw->dev, "OTGSC isn't equal to USBCFG\n");
- return -EPIPE;
-}
-
-
-static void langwell_otg_phy_low_power(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u8 val, phcd;
- int retval;
-
- dev_dbg(lnw->dev, "%s ---> %s mode\n",
- __func__, on ? "Low power" : "Normal");
-
- phcd = 0x40;
-
- val = readb(iotg->base + CI_HOSTPC1 + 2);
-
- if (on) {
- /* Due to hardware issue, after set PHCD, sync will failed
- * between USBCFG and OTGSC, so before set PHCD, check if
- * sync is in process now. If the answer is "yes", then do
- * not touch PHCD bit */
- retval = langwell_otg_check_otgsc();
- if (retval) {
- dev_dbg(lnw->dev, "Skip PHCD programming..\n");
- return ;
- }
-
- writeb(val | phcd, iotg->base + CI_HOSTPC1 + 2);
- } else
- writeb(val & ~phcd, iotg->base + CI_HOSTPC1 + 2);
-
- dev_dbg(lnw->dev, "%s <--- done\n", __func__);
-}
-
-/* After drv vbus, add 5 ms delay to set PHCD */
-static void langwell_otg_phy_low_power_wait(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
-
- dev_dbg(lnw->dev, "add 5ms delay before programing PHCD\n");
-
- mdelay(5);
- langwell_otg_phy_low_power(on);
-}
-
-/* Enable/Disable OTG interrupt */
-static void langwell_otg_intr(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u32 val;
-
- dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
-
- val = readl(iotg->base + CI_OTGSC);
-
- /* OTGSC_INT_MASK doesn't contains 1msInt */
- if (on) {
- val = val | (OTGSC_INT_MASK);
- writel(val, iotg->base + CI_OTGSC);
- } else {
- val = val & ~(OTGSC_INT_MASK);
- writel(val, iotg->base + CI_OTGSC);
- }
-
- dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-/* set HAAR: Hardware Assist Auto-Reset */
-static void langwell_otg_HAAR(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u32 val;
-
- dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
-
- val = readl(iotg->base + CI_OTGSC);
- if (on)
- writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
- iotg->base + CI_OTGSC);
- else
- writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
- iotg->base + CI_OTGSC);
-
- dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-/* set HABA: Hardware Assist B-Disconnect to A-Connect */
-static void langwell_otg_HABA(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u32 val;
-
- dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
-
- val = readl(iotg->base + CI_OTGSC);
- if (on)
- writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
- iotg->base + CI_OTGSC);
- else
- writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
- iotg->base + CI_OTGSC);
-
- dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-static int langwell_otg_check_se0_srp(int on)
-{
- struct langwell_otg *lnw = the_transceiver;
- int delay_time = TB_SE0_SRP * 10;
- u32 val;
-
- dev_dbg(lnw->dev, "%s --->\n", __func__);
-
- do {
- udelay(100);
- if (!delay_time--)
- break;
- val = readl(lnw->iotg.base + CI_PORTSC1);
- val &= PORTSC_LS;
- } while (!val);
-
- dev_dbg(lnw->dev, "%s <---\n", __func__);
- return val;
-}
-
-/* The timeout callback function to set time out bit */
-static void set_tmout(unsigned long indicator)
-{
- *(int *)indicator = 1;
-}
-
-void langwell_otg_nsf_msg(unsigned long indicator)
-{
- struct langwell_otg *lnw = the_transceiver;
-
- switch (indicator) {
- case 2:
- case 4:
- case 6:
- case 7:
- dev_warn(lnw->dev,
- "OTG:NSF-%lu - deivce not responding\n", indicator);
- break;
- case 3:
- dev_warn(lnw->dev,
- "OTG:NSF-%lu - deivce not supported\n", indicator);
- break;
- default:
- dev_warn(lnw->dev, "Do not have this kind of NSF\n");
- break;
- }
-}
-
-/* Initialize timers */
-static int langwell_otg_init_timers(struct otg_hsm *hsm)
-{
- /* HSM used timers */
- a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
- (unsigned long)&hsm->a_wait_vrise_tmout);
- if (a_wait_vrise_tmr == NULL)
- return -ENOMEM;
- a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
- (unsigned long)&hsm->a_aidl_bdis_tmout);
- if (a_aidl_bdis_tmr == NULL)
- return -ENOMEM;
- b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
- (unsigned long)&hsm->b_se0_srp);
- if (b_se0_srp_tmr == NULL)
- return -ENOMEM;
- b_srp_init_tmr = otg_timer_initializer(&set_tmout, TB_SRP_INIT,
- (unsigned long)&hsm->b_srp_init_tmout);
- if (b_srp_init_tmr == NULL)
- return -ENOMEM;
-
- return 0;
-}
-
-/* Free timers */
-static void langwell_otg_free_timers(void)
-{
- kfree(a_wait_vrise_tmr);
- kfree(a_aidl_bdis_tmr);
- kfree(b_se0_srp_tmr);
- kfree(b_srp_init_tmr);
-}
-
-/* The timeout callback function to set time out bit */
-static void langwell_otg_timer_fn(unsigned long indicator)
-{
- struct langwell_otg *lnw = the_transceiver;
-
- *(int *)indicator = 1;
-
- dev_dbg(lnw->dev, "kernel timer - timeout\n");
-
- langwell_update_transceiver();
-}
-
-/* kernel timer used instead of HW based interrupt */
-static void langwell_otg_add_ktimer(enum langwell_otg_timer_type timers)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- unsigned long j = jiffies;
- unsigned long data, time;
-
- switch (timers) {
- case TA_WAIT_VRISE_TMR:
- iotg->hsm.a_wait_vrise_tmout = 0;
- data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout;
- time = TA_WAIT_VRISE;
- break;
- case TA_WAIT_BCON_TMR:
- iotg->hsm.a_wait_bcon_tmout = 0;
- data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout;
- time = TA_WAIT_BCON;
- break;
- case TA_AIDL_BDIS_TMR:
- iotg->hsm.a_aidl_bdis_tmout = 0;
- data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout;
- time = TA_AIDL_BDIS;
- break;
- case TB_ASE0_BRST_TMR:
- iotg->hsm.b_ase0_brst_tmout = 0;
- data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout;
- time = TB_ASE0_BRST;
- break;
- case TB_SRP_INIT_TMR:
- iotg->hsm.b_srp_init_tmout = 0;
- data = (unsigned long)&iotg->hsm.b_srp_init_tmout;
- time = TB_SRP_INIT;
- break;
- case TB_SRP_FAIL_TMR:
- iotg->hsm.b_srp_fail_tmout = 0;
- data = (unsigned long)&iotg->hsm.b_srp_fail_tmout;
- time = TB_SRP_FAIL;
- break;
- case TB_BUS_SUSPEND_TMR:
- iotg->hsm.b_bus_suspend_tmout = 0;
- data = (unsigned long)&iotg->hsm.b_bus_suspend_tmout;
- time = TB_BUS_SUSPEND;
- break;
- default:
- dev_dbg(lnw->dev, "unknown timer, cannot enable it\n");
- return;
- }
-
- lnw->hsm_timer.data = data;
- lnw->hsm_timer.function = langwell_otg_timer_fn;
- lnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
-
- add_timer(&lnw->hsm_timer);
-
- dev_dbg(lnw->dev, "add timer successfully\n");
-}
-
-/* Add timer to timer list */
-static void langwell_otg_add_timer(void *gtimer)
-{
- struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
- struct langwell_otg_timer *tmp_timer;
- struct intel_mid_otg_xceiv *iotg = &the_transceiver->iotg;
- u32 val32;
-
- /* Check if the timer is already in the active list,
- * if so update timer count
- */
- list_for_each_entry(tmp_timer, &active_timers, list)
- if (tmp_timer == timer) {
- timer->count = timer->expires;
- return;
- }
- timer->count = timer->expires;
-
- if (list_empty(&active_timers)) {
- val32 = readl(iotg->base + CI_OTGSC);
- writel(val32 | OTGSC_1MSE, iotg->base + CI_OTGSC);
- }
-
- list_add_tail(&timer->list, &active_timers);
-}
-
-/* Remove timer from the timer list; clear timeout status */
-static void langwell_otg_del_timer(void *gtimer)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
- struct langwell_otg_timer *tmp_timer, *del_tmp;
- u32 val32;
-
- list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
- if (tmp_timer == timer)
- list_del(&timer->list);
-
- if (list_empty(&active_timers)) {
- val32 = readl(lnw->iotg.base + CI_OTGSC);
- writel(val32 & ~OTGSC_1MSE, lnw->iotg.base + CI_OTGSC);
- }
-}
-
-/* Reduce timer count by 1, and find timeout conditions.*/
-static int langwell_otg_tick_timer(u32 *int_sts)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct langwell_otg_timer *tmp_timer, *del_tmp;
- int expired = 0;
-
- list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
- tmp_timer->count--;
- /* check if timer expires */
- if (!tmp_timer->count) {
- list_del(&tmp_timer->list);
- tmp_timer->function(tmp_timer->data);
- expired = 1;
- }
- }
-
- if (list_empty(&active_timers)) {
- dev_dbg(lnw->dev, "tick timer: disable 1ms int\n");
- *int_sts = *int_sts & ~OTGSC_1MSE;
- }
- return expired;
-}
-
-static void reset_otg(void)
-{
- struct langwell_otg *lnw = the_transceiver;
- int delay_time = 1000;
- u32 val;
-
- dev_dbg(lnw->dev, "reseting OTG controller ...\n");
- val = readl(lnw->iotg.base + CI_USBCMD);
- writel(val | USBCMD_RST, lnw->iotg.base + CI_USBCMD);
- do {
- udelay(100);
- if (!delay_time--)
- dev_dbg(lnw->dev, "reset timeout\n");
- val = readl(lnw->iotg.base + CI_USBCMD);
- val &= USBCMD_RST;
- } while (val != 0);
- dev_dbg(lnw->dev, "reset done.\n");
-}
-
-static void set_host_mode(void)
-{
- struct langwell_otg *lnw = the_transceiver;
- u32 val;
-
- reset_otg();
- val = readl(lnw->iotg.base + CI_USBMODE);
- val = (val & (~USBMODE_CM)) | USBMODE_HOST;
- writel(val, lnw->iotg.base + CI_USBMODE);
-}
-
-static void set_client_mode(void)
-{
- struct langwell_otg *lnw = the_transceiver;
- u32 val;
-
- reset_otg();
- val = readl(lnw->iotg.base + CI_USBMODE);
- val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
- writel(val, lnw->iotg.base + CI_USBMODE);
-}
-
-static void init_hsm(void)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u32 val32;
-
- /* read OTGSC after reset */
- val32 = readl(lnw->iotg.base + CI_OTGSC);
- dev_dbg(lnw->dev, "%s: OTGSC init value = 0x%x\n", __func__, val32);
-
- /* set init state */
- if (val32 & OTGSC_ID) {
- iotg->hsm.id = 1;
- iotg->otg.default_a = 0;
- set_client_mode();
- iotg->otg.state = OTG_STATE_B_IDLE;
- } else {
- iotg->hsm.id = 0;
- iotg->otg.default_a = 1;
- set_host_mode();
- iotg->otg.state = OTG_STATE_A_IDLE;
- }
-
- /* set session indicator */
- if (val32 & OTGSC_BSE)
- iotg->hsm.b_sess_end = 1;
- if (val32 & OTGSC_BSV)
- iotg->hsm.b_sess_vld = 1;
- if (val32 & OTGSC_ASV)
- iotg->hsm.a_sess_vld = 1;
- if (val32 & OTGSC_AVV)
- iotg->hsm.a_vbus_vld = 1;
-
- /* defautly power the bus */
- iotg->hsm.a_bus_req = 1;
- iotg->hsm.a_bus_drop = 0;
- /* defautly don't request bus as B device */
- iotg->hsm.b_bus_req = 0;
- /* no system error */
- iotg->hsm.a_clr_err = 0;
-
- langwell_otg_phy_low_power_wait(1);
-}
-
-static void update_hsm(void)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u32 val32;
-
- /* read OTGSC */
- val32 = readl(lnw->iotg.base + CI_OTGSC);
- dev_dbg(lnw->dev, "%s: OTGSC value = 0x%x\n", __func__, val32);
-
- iotg->hsm.id = !!(val32 & OTGSC_ID);
- iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
- iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
- iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
- iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
-}
-
-static irqreturn_t otg_dummy_irq(int irq, void *_dev)
-{
- struct langwell_otg *lnw = the_transceiver;
- void __iomem *reg_base = _dev;
- u32 val;
- u32 int_mask = 0;
-
- val = readl(reg_base + CI_USBMODE);
- if ((val & USBMODE_CM) != USBMODE_DEVICE)
- return IRQ_NONE;
-
- val = readl(reg_base + CI_USBSTS);
- int_mask = val & INTR_DUMMY_MASK;
-
- if (int_mask == 0)
- return IRQ_NONE;
-
- /* clear hsm.b_conn here since host driver can't detect it
- * otg_dummy_irq called means B-disconnect happened.
- */
- if (lnw->iotg.hsm.b_conn) {
- lnw->iotg.hsm.b_conn = 0;
- if (spin_trylock(&lnw->wq_lock)) {
- langwell_update_transceiver();
- spin_unlock(&lnw->wq_lock);
- }
- }
-
- /* Clear interrupts */
- writel(int_mask, reg_base + CI_USBSTS);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t otg_irq(int irq, void *_dev)
-{
- struct langwell_otg *lnw = _dev;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- u32 int_sts, int_en;
- u32 int_mask = 0;
- int flag = 0;
-
- int_sts = readl(lnw->iotg.base + CI_OTGSC);
- int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
- int_mask = int_sts & int_en;
- if (int_mask == 0)
- return IRQ_NONE;
-
- if (int_mask & OTGSC_IDIS) {
- dev_dbg(lnw->dev, "%s: id change int\n", __func__);
- iotg->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
- dev_dbg(lnw->dev, "id = %d\n", iotg->hsm.id);
- flag = 1;
- }
- if (int_mask & OTGSC_DPIS) {
- dev_dbg(lnw->dev, "%s: data pulse int\n", __func__);
- iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
- dev_dbg(lnw->dev, "data pulse = %d\n", iotg->hsm.a_srp_det);
- flag = 1;
- }
- if (int_mask & OTGSC_BSEIS) {
- dev_dbg(lnw->dev, "%s: b session end int\n", __func__);
- iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
- dev_dbg(lnw->dev, "b_sess_end = %d\n", iotg->hsm.b_sess_end);
- flag = 1;
- }
- if (int_mask & OTGSC_BSVIS) {
- dev_dbg(lnw->dev, "%s: b session valid int\n", __func__);
- iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
- dev_dbg(lnw->dev, "b_sess_vld = %d\n", iotg->hsm.b_sess_end);
- flag = 1;
- }
- if (int_mask & OTGSC_ASVIS) {
- dev_dbg(lnw->dev, "%s: a session valid int\n", __func__);
- iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
- dev_dbg(lnw->dev, "a_sess_vld = %d\n", iotg->hsm.a_sess_vld);
- flag = 1;
- }
- if (int_mask & OTGSC_AVVIS) {
- dev_dbg(lnw->dev, "%s: a vbus valid int\n", __func__);
- iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
- dev_dbg(lnw->dev, "a_vbus_vld = %d\n", iotg->hsm.a_vbus_vld);
- flag = 1;
- }
-
- if (int_mask & OTGSC_1MSS) {
- /* need to schedule otg_work if any timer is expired */
- if (langwell_otg_tick_timer(&int_sts))
- flag = 1;
- }
-
- writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
- lnw->iotg.base + CI_OTGSC);
- if (flag)
- langwell_update_transceiver();
-
- return IRQ_HANDLED;
-}
-
-static int langwell_otg_iotg_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = data;
- int flag = 0;
-
- if (iotg == NULL)
- return NOTIFY_BAD;
-
- if (lnw == NULL)
- return NOTIFY_BAD;
-
- switch (action) {
- case MID_OTG_NOTIFY_CONNECT:
- dev_dbg(lnw->dev, "Lnw OTG Notify Connect Event\n");
- if (iotg->otg.default_a == 1)
- iotg->hsm.b_conn = 1;
- else
- iotg->hsm.a_conn = 1;
- flag = 1;
- break;
- case MID_OTG_NOTIFY_DISCONN:
- dev_dbg(lnw->dev, "Lnw OTG Notify Disconnect Event\n");
- if (iotg->otg.default_a == 1)
- iotg->hsm.b_conn = 0;
- else
- iotg->hsm.a_conn = 0;
- flag = 1;
- break;
- case MID_OTG_NOTIFY_HSUSPEND:
- dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus suspend Event\n");
- if (iotg->otg.default_a == 1)
- iotg->hsm.a_suspend_req = 1;
- else
- iotg->hsm.b_bus_req = 0;
- flag = 1;
- break;
- case MID_OTG_NOTIFY_HRESUME:
- dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus resume Event\n");
- if (iotg->otg.default_a == 1)
- iotg->hsm.b_bus_resume = 1;
- flag = 1;
- break;
- case MID_OTG_NOTIFY_CSUSPEND:
- dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus suspend Event\n");
- if (iotg->otg.default_a == 1) {
- if (iotg->hsm.b_bus_suspend_vld == 2) {
- iotg->hsm.b_bus_suspend = 1;
- iotg->hsm.b_bus_suspend_vld = 0;
- flag = 1;
- } else {
- iotg->hsm.b_bus_suspend_vld++;
- flag = 0;
- }
- } else {
- if (iotg->hsm.a_bus_suspend == 0) {
- iotg->hsm.a_bus_suspend = 1;
- flag = 1;
- }
- }
- break;
- case MID_OTG_NOTIFY_CRESUME:
- dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus resume Event\n");
- if (iotg->otg.default_a == 0)
- iotg->hsm.a_bus_suspend = 0;
- flag = 0;
- break;
- case MID_OTG_NOTIFY_HOSTADD:
- dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver Add\n");
- flag = 1;
- break;
- case MID_OTG_NOTIFY_HOSTREMOVE:
- dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver remove\n");
- flag = 1;
- break;
- case MID_OTG_NOTIFY_CLIENTADD:
- dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver Add\n");
- flag = 1;
- break;
- case MID_OTG_NOTIFY_CLIENTREMOVE:
- dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver remove\n");
- flag = 1;
- break;
- default:
- dev_dbg(lnw->dev, "Lnw OTG Nofity unknown notify message\n");
- return NOTIFY_DONE;
- }
-
- if (flag)
- langwell_update_transceiver();
-
- return NOTIFY_OK;
-}
-
-static void langwell_otg_work(struct work_struct *work)
-{
- struct langwell_otg *lnw;
- struct intel_mid_otg_xceiv *iotg;
- int retval;
- struct pci_dev *pdev;
-
- lnw = container_of(work, struct langwell_otg, work);
- iotg = &lnw->iotg;
- pdev = to_pci_dev(lnw->dev);
-
- dev_dbg(lnw->dev, "%s: old state = %s\n", __func__,
- otg_state_string(iotg->otg.state));
-
- switch (iotg->otg.state) {
- case OTG_STATE_UNDEFINED:
- case OTG_STATE_B_IDLE:
- if (!iotg->hsm.id) {
- langwell_otg_del_timer(b_srp_init_tmr);
- del_timer_sync(&lnw->hsm_timer);
-
- iotg->otg.default_a = 1;
- iotg->hsm.a_srp_det = 0;
-
- langwell_otg_chrg_vbus(0);
- set_host_mode();
- langwell_otg_phy_low_power(1);
-
- iotg->otg.state = OTG_STATE_A_IDLE;
- langwell_update_transceiver();
- } else if (iotg->hsm.b_sess_vld) {
- langwell_otg_del_timer(b_srp_init_tmr);
- del_timer_sync(&lnw->hsm_timer);
- iotg->hsm.b_sess_end = 0;
- iotg->hsm.a_bus_suspend = 0;
- langwell_otg_chrg_vbus(0);
-
- if (lnw->iotg.start_peripheral) {
- lnw->iotg.start_peripheral(&lnw->iotg);
- iotg->otg.state = OTG_STATE_B_PERIPHERAL;
- } else
- dev_dbg(lnw->dev, "client driver not loaded\n");
-
- } else if (iotg->hsm.b_srp_init_tmout) {
- iotg->hsm.b_srp_init_tmout = 0;
- dev_warn(lnw->dev, "SRP init timeout\n");
- } else if (iotg->hsm.b_srp_fail_tmout) {
- iotg->hsm.b_srp_fail_tmout = 0;
- iotg->hsm.b_bus_req = 0;
-
- /* No silence failure */
- langwell_otg_nsf_msg(6);
- } else if (iotg->hsm.b_bus_req && iotg->hsm.b_sess_end) {
- del_timer_sync(&lnw->hsm_timer);
- /* workaround for b_se0_srp detection */
- retval = langwell_otg_check_se0_srp(0);
- if (retval) {
- iotg->hsm.b_bus_req = 0;
- dev_dbg(lnw->dev, "LS isn't SE0, try later\n");
- } else {
- /* clear the PHCD before start srp */
- langwell_otg_phy_low_power(0);
-
- /* Start SRP */
- langwell_otg_add_timer(b_srp_init_tmr);
- iotg->otg.start_srp(&iotg->otg);
- langwell_otg_del_timer(b_srp_init_tmr);
- langwell_otg_add_ktimer(TB_SRP_FAIL_TMR);
-
- /* reset PHY low power mode here */
- langwell_otg_phy_low_power_wait(1);
- }
- }
- break;
- case OTG_STATE_B_SRP_INIT:
- if (!iotg->hsm.id) {
- iotg->otg.default_a = 1;
- iotg->hsm.a_srp_det = 0;
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- langwell_otg_chrg_vbus(0);
- set_host_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_A_IDLE;
- langwell_update_transceiver();
- } else if (iotg->hsm.b_sess_vld) {
- langwell_otg_chrg_vbus(0);
- if (lnw->iotg.start_peripheral) {
- lnw->iotg.start_peripheral(&lnw->iotg);
- iotg->otg.state = OTG_STATE_B_PERIPHERAL;
- } else
- dev_dbg(lnw->dev, "client driver not loaded\n");
- }
- break;
- case OTG_STATE_B_PERIPHERAL:
- if (!iotg->hsm.id) {
- iotg->otg.default_a = 1;
- iotg->hsm.a_srp_det = 0;
-
- langwell_otg_chrg_vbus(0);
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- set_host_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_A_IDLE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.b_sess_vld) {
- iotg->hsm.b_hnp_enable = 0;
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- iotg->otg.state = OTG_STATE_B_IDLE;
- } else if (iotg->hsm.b_bus_req && iotg->otg.gadget &&
- iotg->otg.gadget->b_hnp_enable &&
- iotg->hsm.a_bus_suspend) {
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- langwell_otg_HAAR(1);
- iotg->hsm.a_conn = 0;
-
- if (lnw->iotg.start_host) {
- lnw->iotg.start_host(&lnw->iotg);
- iotg->otg.state = OTG_STATE_B_WAIT_ACON;
- } else
- dev_dbg(lnw->dev,
- "host driver not loaded.\n");
-
- iotg->hsm.a_bus_resume = 0;
- langwell_otg_add_ktimer(TB_ASE0_BRST_TMR);
- }
- break;
-
- case OTG_STATE_B_WAIT_ACON:
- if (!iotg->hsm.id) {
- /* delete hsm timer for b_ase0_brst_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- iotg->otg.default_a = 1;
- iotg->hsm.a_srp_det = 0;
-
- langwell_otg_chrg_vbus(0);
-
- langwell_otg_HAAR(0);
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- set_host_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_A_IDLE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.b_sess_vld) {
- /* delete hsm timer for b_ase0_brst_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- iotg->hsm.b_hnp_enable = 0;
- iotg->hsm.b_bus_req = 0;
-
- langwell_otg_chrg_vbus(0);
- langwell_otg_HAAR(0);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- set_client_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- } else if (iotg->hsm.a_conn) {
- /* delete hsm timer for b_ase0_brst_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- langwell_otg_HAAR(0);
- iotg->otg.state = OTG_STATE_B_HOST;
- langwell_update_transceiver();
- } else if (iotg->hsm.a_bus_resume ||
- iotg->hsm.b_ase0_brst_tmout) {
- /* delete hsm timer for b_ase0_brst_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- langwell_otg_HAAR(0);
- langwell_otg_nsf_msg(7);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- iotg->hsm.a_bus_suspend = 0;
- iotg->hsm.b_bus_req = 0;
-
- if (lnw->iotg.start_peripheral)
- lnw->iotg.start_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver not loaded.\n");
-
- iotg->otg.state = OTG_STATE_B_PERIPHERAL;
- }
- break;
-
- case OTG_STATE_B_HOST:
- if (!iotg->hsm.id) {
- iotg->otg.default_a = 1;
- iotg->hsm.a_srp_det = 0;
-
- langwell_otg_chrg_vbus(0);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- set_host_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_A_IDLE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.b_sess_vld) {
- iotg->hsm.b_hnp_enable = 0;
- iotg->hsm.b_bus_req = 0;
-
- langwell_otg_chrg_vbus(0);
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- set_client_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- } else if ((!iotg->hsm.b_bus_req) ||
- (!iotg->hsm.a_conn)) {
- iotg->hsm.b_bus_req = 0;
- langwell_otg_loc_sof(0);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- iotg->hsm.a_bus_suspend = 0;
-
- if (lnw->iotg.start_peripheral)
- lnw->iotg.start_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver not loaded.\n");
-
- iotg->otg.state = OTG_STATE_B_PERIPHERAL;
- }
- break;
-
- case OTG_STATE_A_IDLE:
- iotg->otg.default_a = 1;
- if (iotg->hsm.id) {
- iotg->otg.default_a = 0;
- iotg->hsm.b_bus_req = 0;
- iotg->hsm.vbus_srp_up = 0;
-
- langwell_otg_chrg_vbus(0);
- set_client_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.a_bus_drop &&
- (iotg->hsm.a_srp_det || iotg->hsm.a_bus_req)) {
- langwell_otg_phy_low_power(0);
-
- /* Turn on VBus */
- iotg->otg.set_vbus(&iotg->otg, true);
-
- iotg->hsm.vbus_srp_up = 0;
- iotg->hsm.a_wait_vrise_tmout = 0;
- langwell_otg_add_timer(a_wait_vrise_tmr);
- iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.a_bus_drop && iotg->hsm.a_sess_vld) {
- iotg->hsm.vbus_srp_up = 1;
- } else if (!iotg->hsm.a_sess_vld && iotg->hsm.vbus_srp_up) {
- msleep(10);
- langwell_otg_phy_low_power(0);
-
- /* Turn on VBus */
- iotg->otg.set_vbus(&iotg->otg, true);
- iotg->hsm.a_srp_det = 1;
- iotg->hsm.vbus_srp_up = 0;
- iotg->hsm.a_wait_vrise_tmout = 0;
- langwell_otg_add_timer(a_wait_vrise_tmr);
- iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.a_sess_vld &&
- !iotg->hsm.vbus_srp_up) {
- langwell_otg_phy_low_power(1);
- }
- break;
- case OTG_STATE_A_WAIT_VRISE:
- if (iotg->hsm.id) {
- langwell_otg_del_timer(a_wait_vrise_tmr);
- iotg->hsm.b_bus_req = 0;
- iotg->otg.default_a = 0;
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- set_client_mode();
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- } else if (iotg->hsm.a_vbus_vld) {
- langwell_otg_del_timer(a_wait_vrise_tmr);
- iotg->hsm.b_conn = 0;
- if (lnw->iotg.start_host)
- lnw->iotg.start_host(&lnw->iotg);
- else {
- dev_dbg(lnw->dev, "host driver not loaded.\n");
- break;
- }
-
- langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
- iotg->otg.state = OTG_STATE_A_WAIT_BCON;
- } else if (iotg->hsm.a_wait_vrise_tmout) {
- iotg->hsm.b_conn = 0;
- if (iotg->hsm.a_vbus_vld) {
- if (lnw->iotg.start_host)
- lnw->iotg.start_host(&lnw->iotg);
- else {
- dev_dbg(lnw->dev,
- "host driver not loaded.\n");
- break;
- }
- langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
- iotg->otg.state = OTG_STATE_A_WAIT_BCON;
- } else {
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_A_VBUS_ERR;
- }
- }
- break;
- case OTG_STATE_A_WAIT_BCON:
- if (iotg->hsm.id) {
- /* delete hsm timer for a_wait_bcon_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- iotg->otg.default_a = 0;
- iotg->hsm.b_bus_req = 0;
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- set_client_mode();
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.a_vbus_vld) {
- /* delete hsm timer for a_wait_bcon_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_A_VBUS_ERR;
- } else if (iotg->hsm.a_bus_drop ||
- (iotg->hsm.a_wait_bcon_tmout &&
- !iotg->hsm.a_bus_req)) {
- /* delete hsm timer for a_wait_bcon_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
- } else if (iotg->hsm.b_conn) {
- /* delete hsm timer for a_wait_bcon_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- iotg->hsm.a_suspend_req = 0;
- iotg->otg.state = OTG_STATE_A_HOST;
- if (iotg->hsm.a_srp_det && iotg->otg.host &&
- !iotg->otg.host->b_hnp_enable) {
- /* SRP capable peripheral-only device */
- iotg->hsm.a_bus_req = 1;
- iotg->hsm.a_srp_det = 0;
- } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
- iotg->otg.host->b_hnp_enable) {
- /* It is not safe enough to do a fast
- * transition from A_WAIT_BCON to
- * A_SUSPEND */
- msleep(10000);
- if (iotg->hsm.a_bus_req)
- break;
-
- if (request_irq(pdev->irq,
- otg_dummy_irq, IRQF_SHARED,
- driver_name, iotg->base) != 0) {
- dev_dbg(lnw->dev,
- "request interrupt %d fail\n",
- pdev->irq);
- }
-
- langwell_otg_HABA(1);
- iotg->hsm.b_bus_resume = 0;
- iotg->hsm.a_aidl_bdis_tmout = 0;
-
- langwell_otg_loc_sof(0);
- /* clear PHCD to enable HW timer */
- langwell_otg_phy_low_power(0);
- langwell_otg_add_timer(a_aidl_bdis_tmr);
- iotg->otg.state = OTG_STATE_A_SUSPEND;
- } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
- !iotg->otg.host->b_hnp_enable) {
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
- }
- }
- break;
- case OTG_STATE_A_HOST:
- if (iotg->hsm.id) {
- iotg->otg.default_a = 0;
- iotg->hsm.b_bus_req = 0;
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- set_client_mode();
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- langwell_update_transceiver();
- } else if (iotg->hsm.a_bus_drop ||
- (iotg->otg.host &&
- !iotg->otg.host->b_hnp_enable &&
- !iotg->hsm.a_bus_req)) {
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
- } else if (!iotg->hsm.a_vbus_vld) {
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_A_VBUS_ERR;
- } else if (iotg->otg.host &&
- iotg->otg.host->b_hnp_enable &&
- !iotg->hsm.a_bus_req) {
- /* Set HABA to enable hardware assistance to signal
- * A-connect after receiver B-disconnect. Hardware
- * will then set client mode and enable URE, SLE and
- * PCE after the assistance. otg_dummy_irq is used to
- * clean these ints when client driver is not resumed.
- */
- if (request_irq(pdev->irq, otg_dummy_irq, IRQF_SHARED,
- driver_name, iotg->base) != 0) {
- dev_dbg(lnw->dev,
- "request interrupt %d failed\n",
- pdev->irq);
- }
-
- /* set HABA */
- langwell_otg_HABA(1);
- iotg->hsm.b_bus_resume = 0;
- iotg->hsm.a_aidl_bdis_tmout = 0;
- langwell_otg_loc_sof(0);
- /* clear PHCD to enable HW timer */
- langwell_otg_phy_low_power(0);
- langwell_otg_add_timer(a_aidl_bdis_tmr);
- iotg->otg.state = OTG_STATE_A_SUSPEND;
- } else if (!iotg->hsm.b_conn || !iotg->hsm.a_bus_req) {
- langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
- iotg->otg.state = OTG_STATE_A_WAIT_BCON;
- }
- break;
- case OTG_STATE_A_SUSPEND:
- if (iotg->hsm.id) {
- langwell_otg_del_timer(a_aidl_bdis_tmr);
- langwell_otg_HABA(0);
- free_irq(pdev->irq, iotg->base);
- iotg->otg.default_a = 0;
- iotg->hsm.b_bus_req = 0;
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- set_client_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- langwell_update_transceiver();
- } else if (iotg->hsm.a_bus_req ||
- iotg->hsm.b_bus_resume) {
- langwell_otg_del_timer(a_aidl_bdis_tmr);
- langwell_otg_HABA(0);
- free_irq(pdev->irq, iotg->base);
- iotg->hsm.a_suspend_req = 0;
- langwell_otg_loc_sof(1);
- iotg->otg.state = OTG_STATE_A_HOST;
- } else if (iotg->hsm.a_aidl_bdis_tmout ||
- iotg->hsm.a_bus_drop) {
- langwell_otg_del_timer(a_aidl_bdis_tmr);
- langwell_otg_HABA(0);
- free_irq(pdev->irq, iotg->base);
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
- } else if (!iotg->hsm.b_conn && iotg->otg.host &&
- iotg->otg.host->b_hnp_enable) {
- langwell_otg_del_timer(a_aidl_bdis_tmr);
- langwell_otg_HABA(0);
- free_irq(pdev->irq, iotg->base);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- iotg->hsm.b_bus_suspend = 0;
- iotg->hsm.b_bus_suspend_vld = 0;
-
- /* msleep(200); */
- if (lnw->iotg.start_peripheral)
- lnw->iotg.start_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver not loaded.\n");
-
- langwell_otg_add_ktimer(TB_BUS_SUSPEND_TMR);
- iotg->otg.state = OTG_STATE_A_PERIPHERAL;
- break;
- } else if (!iotg->hsm.a_vbus_vld) {
- langwell_otg_del_timer(a_aidl_bdis_tmr);
- langwell_otg_HABA(0);
- free_irq(pdev->irq, iotg->base);
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_A_VBUS_ERR;
- }
- break;
- case OTG_STATE_A_PERIPHERAL:
- if (iotg->hsm.id) {
- /* delete hsm timer for b_bus_suspend_tmr */
- del_timer_sync(&lnw->hsm_timer);
- iotg->otg.default_a = 0;
- iotg->hsm.b_bus_req = 0;
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- set_client_mode();
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- langwell_update_transceiver();
- } else if (!iotg->hsm.a_vbus_vld) {
- /* delete hsm timer for b_bus_suspend_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- langwell_otg_phy_low_power_wait(1);
- iotg->otg.state = OTG_STATE_A_VBUS_ERR;
- } else if (iotg->hsm.a_bus_drop) {
- /* delete hsm timer for b_bus_suspend_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
- } else if (iotg->hsm.b_bus_suspend) {
- /* delete hsm timer for b_bus_suspend_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- if (lnw->iotg.start_host)
- lnw->iotg.start_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver not loaded.\n");
- langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
- iotg->otg.state = OTG_STATE_A_WAIT_BCON;
- } else if (iotg->hsm.b_bus_suspend_tmout) {
- u32 val;
- val = readl(lnw->iotg.base + CI_PORTSC1);
- if (!(val & PORTSC_SUSP))
- break;
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "client driver has been removed.\n");
-
- if (lnw->iotg.start_host)
- lnw->iotg.start_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev,
- "host driver not loaded.\n");
- langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
- iotg->otg.state = OTG_STATE_A_WAIT_BCON;
- }
- break;
- case OTG_STATE_A_VBUS_ERR:
- if (iotg->hsm.id) {
- iotg->otg.default_a = 0;
- iotg->hsm.a_clr_err = 0;
- iotg->hsm.a_srp_det = 0;
- set_client_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- langwell_update_transceiver();
- } else if (iotg->hsm.a_clr_err) {
- iotg->hsm.a_clr_err = 0;
- iotg->hsm.a_srp_det = 0;
- reset_otg();
- init_hsm();
- if (iotg->otg.state == OTG_STATE_A_IDLE)
- langwell_update_transceiver();
- } else {
- /* FW will clear PHCD bit when any VBus
- * event detected. Reset PHCD to 1 again */
- langwell_otg_phy_low_power(1);
- }
- break;
- case OTG_STATE_A_WAIT_VFALL:
- if (iotg->hsm.id) {
- iotg->otg.default_a = 0;
- set_client_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_B_IDLE;
- langwell_update_transceiver();
- } else if (iotg->hsm.a_bus_req) {
-
- /* Turn on VBus */
- iotg->otg.set_vbus(&iotg->otg, true);
- iotg->hsm.a_wait_vrise_tmout = 0;
- langwell_otg_add_timer(a_wait_vrise_tmr);
- iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
- } else if (!iotg->hsm.a_sess_vld) {
- iotg->hsm.a_srp_det = 0;
- set_host_mode();
- langwell_otg_phy_low_power(1);
- iotg->otg.state = OTG_STATE_A_IDLE;
- }
- break;
- default:
- ;
- }
-
- dev_dbg(lnw->dev, "%s: new state = %s\n", __func__,
- otg_state_string(iotg->otg.state));
-}
-
-static ssize_t
-show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
-{
- struct langwell_otg *lnw = the_transceiver;
- char *next;
- unsigned size, t;
-
- next = buf;
- size = PAGE_SIZE;
-
- t = scnprintf(next, size,
- "\n"
- "USBCMD = 0x%08x\n"
- "USBSTS = 0x%08x\n"
- "USBINTR = 0x%08x\n"
- "ASYNCLISTADDR = 0x%08x\n"
- "PORTSC1 = 0x%08x\n"
- "HOSTPC1 = 0x%08x\n"
- "OTGSC = 0x%08x\n"
- "USBMODE = 0x%08x\n",
- readl(lnw->iotg.base + 0x30),
- readl(lnw->iotg.base + 0x34),
- readl(lnw->iotg.base + 0x38),
- readl(lnw->iotg.base + 0x48),
- readl(lnw->iotg.base + 0x74),
- readl(lnw->iotg.base + 0xb4),
- readl(lnw->iotg.base + 0xf4),
- readl(lnw->iotg.base + 0xf8)
- );
- size -= t;
- next += t;
-
- return PAGE_SIZE - size;
-}
-static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
-
-static ssize_t
-show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- char *next;
- unsigned size, t;
-
- next = buf;
- size = PAGE_SIZE;
-
- if (iotg->otg.host)
- iotg->hsm.a_set_b_hnp_en = iotg->otg.host->b_hnp_enable;
-
- if (iotg->otg.gadget)
- iotg->hsm.b_hnp_enable = iotg->otg.gadget->b_hnp_enable;
-
- t = scnprintf(next, size,
- "\n"
- "current state = %s\n"
- "a_bus_resume = \t%d\n"
- "a_bus_suspend = \t%d\n"
- "a_conn = \t%d\n"
- "a_sess_vld = \t%d\n"
- "a_srp_det = \t%d\n"
- "a_vbus_vld = \t%d\n"
- "b_bus_resume = \t%d\n"
- "b_bus_suspend = \t%d\n"
- "b_conn = \t%d\n"
- "b_se0_srp = \t%d\n"
- "b_sess_end = \t%d\n"
- "b_sess_vld = \t%d\n"
- "id = \t%d\n"
- "a_set_b_hnp_en = \t%d\n"
- "b_srp_done = \t%d\n"
- "b_hnp_enable = \t%d\n"
- "a_wait_vrise_tmout = \t%d\n"
- "a_wait_bcon_tmout = \t%d\n"
- "a_aidl_bdis_tmout = \t%d\n"
- "b_ase0_brst_tmout = \t%d\n"
- "a_bus_drop = \t%d\n"
- "a_bus_req = \t%d\n"
- "a_clr_err = \t%d\n"
- "a_suspend_req = \t%d\n"
- "b_bus_req = \t%d\n"
- "b_bus_suspend_tmout = \t%d\n"
- "b_bus_suspend_vld = \t%d\n",
- otg_state_string(iotg->otg.state),
- iotg->hsm.a_bus_resume,
- iotg->hsm.a_bus_suspend,
- iotg->hsm.a_conn,
- iotg->hsm.a_sess_vld,
- iotg->hsm.a_srp_det,
- iotg->hsm.a_vbus_vld,
- iotg->hsm.b_bus_resume,
- iotg->hsm.b_bus_suspend,
- iotg->hsm.b_conn,
- iotg->hsm.b_se0_srp,
- iotg->hsm.b_sess_end,
- iotg->hsm.b_sess_vld,
- iotg->hsm.id,
- iotg->hsm.a_set_b_hnp_en,
- iotg->hsm.b_srp_done,
- iotg->hsm.b_hnp_enable,
- iotg->hsm.a_wait_vrise_tmout,
- iotg->hsm.a_wait_bcon_tmout,
- iotg->hsm.a_aidl_bdis_tmout,
- iotg->hsm.b_ase0_brst_tmout,
- iotg->hsm.a_bus_drop,
- iotg->hsm.a_bus_req,
- iotg->hsm.a_clr_err,
- iotg->hsm.a_suspend_req,
- iotg->hsm.b_bus_req,
- iotg->hsm.b_bus_suspend_tmout,
- iotg->hsm.b_bus_suspend_vld
- );
- size -= t;
- next += t;
-
- return PAGE_SIZE - size;
-}
-static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
-
-static ssize_t
-get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct langwell_otg *lnw = the_transceiver;
- char *next;
- unsigned size, t;
-
- next = buf;
- size = PAGE_SIZE;
-
- t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_req);
- size -= t;
- next += t;
-
- return PAGE_SIZE - size;
-}
-
-static ssize_t
-set_a_bus_req(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
-
- if (!iotg->otg.default_a)
- return -1;
- if (count > 2)
- return -1;
-
- if (buf[0] == '0') {
- iotg->hsm.a_bus_req = 0;
- dev_dbg(lnw->dev, "User request: a_bus_req = 0\n");
- } else if (buf[0] == '1') {
- /* If a_bus_drop is TRUE, a_bus_req can't be set */
- if (iotg->hsm.a_bus_drop)
- return -1;
- iotg->hsm.a_bus_req = 1;
- dev_dbg(lnw->dev, "User request: a_bus_req = 1\n");
- }
- if (spin_trylock(&lnw->wq_lock)) {
- langwell_update_transceiver();
- spin_unlock(&lnw->wq_lock);
- }
- return count;
-}
-static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req);
-
-static ssize_t
-get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct langwell_otg *lnw = the_transceiver;
- char *next;
- unsigned size, t;
-
- next = buf;
- size = PAGE_SIZE;
-
- t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_drop);
- size -= t;
- next += t;
-
- return PAGE_SIZE - size;
-}
-
-static ssize_t
-set_a_bus_drop(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
-
- if (!iotg->otg.default_a)
- return -1;
- if (count > 2)
- return -1;
-
- if (buf[0] == '0') {
- iotg->hsm.a_bus_drop = 0;
- dev_dbg(lnw->dev, "User request: a_bus_drop = 0\n");
- } else if (buf[0] == '1') {
- iotg->hsm.a_bus_drop = 1;
- iotg->hsm.a_bus_req = 0;
- dev_dbg(lnw->dev, "User request: a_bus_drop = 1\n");
- dev_dbg(lnw->dev, "User request: and a_bus_req = 0\n");
- }
- if (spin_trylock(&lnw->wq_lock)) {
- langwell_update_transceiver();
- spin_unlock(&lnw->wq_lock);
- }
- return count;
-}
-static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop);
-
-static ssize_t
-get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
-{
- struct langwell_otg *lnw = the_transceiver;
- char *next;
- unsigned size, t;
-
- next = buf;
- size = PAGE_SIZE;
-
- t = scnprintf(next, size, "%d", lnw->iotg.hsm.b_bus_req);
- size -= t;
- next += t;
-
- return PAGE_SIZE - size;
-}
-
-static ssize_t
-set_b_bus_req(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
-
- if (iotg->otg.default_a)
- return -1;
-
- if (count > 2)
- return -1;
-
- if (buf[0] == '0') {
- iotg->hsm.b_bus_req = 0;
- dev_dbg(lnw->dev, "User request: b_bus_req = 0\n");
- } else if (buf[0] == '1') {
- iotg->hsm.b_bus_req = 1;
- dev_dbg(lnw->dev, "User request: b_bus_req = 1\n");
- }
- if (spin_trylock(&lnw->wq_lock)) {
- langwell_update_transceiver();
- spin_unlock(&lnw->wq_lock);
- }
- return count;
-}
-static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req);
-
-static ssize_t
-set_a_clr_err(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
-
- if (!iotg->otg.default_a)
- return -1;
- if (count > 2)
- return -1;
-
- if (buf[0] == '1') {
- iotg->hsm.a_clr_err = 1;
- dev_dbg(lnw->dev, "User request: a_clr_err = 1\n");
- }
- if (spin_trylock(&lnw->wq_lock)) {
- langwell_update_transceiver();
- spin_unlock(&lnw->wq_lock);
- }
- return count;
-}
-static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
-
-static struct attribute *inputs_attrs[] = {
- &dev_attr_a_bus_req.attr,
- &dev_attr_a_bus_drop.attr,
- &dev_attr_b_bus_req.attr,
- &dev_attr_a_clr_err.attr,
- NULL,
-};
-
-static struct attribute_group debug_dev_attr_group = {
- .name = "inputs",
- .attrs = inputs_attrs,
-};
-
-static int langwell_otg_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
-{
- unsigned long resource, len;
- void __iomem *base = NULL;
- int retval;
- u32 val32;
- struct langwell_otg *lnw;
- char qname[] = "langwell_otg_queue";
-
- retval = 0;
- dev_dbg(&pdev->dev, "\notg controller is detected.\n");
- if (pci_enable_device(pdev) < 0) {
- retval = -ENODEV;
- goto done;
- }
-
- lnw = kzalloc(sizeof *lnw, GFP_KERNEL);
- if (lnw == NULL) {
- retval = -ENOMEM;
- goto done;
- }
- the_transceiver = lnw;
-
- /* control register: BAR 0 */
- resource = pci_resource_start(pdev, 0);
- len = pci_resource_len(pdev, 0);
- if (!request_mem_region(resource, len, driver_name)) {
- retval = -EBUSY;
- goto err;
- }
- lnw->region = 1;
-
- base = ioremap_nocache(resource, len);
- if (base == NULL) {
- retval = -EFAULT;
- goto err;
- }
- lnw->iotg.base = base;
-
- if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
- retval = -EBUSY;
- goto err;
- }
- lnw->cfg_region = 1;
-
- /* For the SCCB.USBCFG register */
- base = ioremap_nocache(USBCFG_ADDR, USBCFG_LEN);
- if (base == NULL) {
- retval = -EFAULT;
- goto err;
- }
- lnw->usbcfg = base;
-
- if (!pdev->irq) {
- dev_dbg(&pdev->dev, "No IRQ.\n");
- retval = -ENODEV;
- goto err;
- }
-
- lnw->qwork = create_singlethread_workqueue(qname);
- if (!lnw->qwork) {
- dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname);
- retval = -ENOMEM;
- goto err;
- }
- INIT_WORK(&lnw->work, langwell_otg_work);
-
- /* OTG common part */
- lnw->dev = &pdev->dev;
- lnw->iotg.otg.dev = lnw->dev;
- lnw->iotg.otg.label = driver_name;
- lnw->iotg.otg.set_host = langwell_otg_set_host;
- lnw->iotg.otg.set_peripheral = langwell_otg_set_peripheral;
- lnw->iotg.otg.set_power = langwell_otg_set_power;
- lnw->iotg.otg.set_vbus = langwell_otg_set_vbus;
- lnw->iotg.otg.start_srp = langwell_otg_start_srp;
- lnw->iotg.otg.state = OTG_STATE_UNDEFINED;
-
- if (otg_set_transceiver(&lnw->iotg.otg)) {
- dev_dbg(lnw->dev, "can't set transceiver\n");
- retval = -EBUSY;
- goto err;
- }
-
- reset_otg();
- init_hsm();
-
- spin_lock_init(&lnw->lock);
- spin_lock_init(&lnw->wq_lock);
- INIT_LIST_HEAD(&active_timers);
- retval = langwell_otg_init_timers(&lnw->iotg.hsm);
- if (retval) {
- dev_dbg(&pdev->dev, "Failed to init timers\n");
- goto err;
- }
-
- init_timer(&lnw->hsm_timer);
- ATOMIC_INIT_NOTIFIER_HEAD(&lnw->iotg.iotg_notifier);
-
- lnw->iotg_notifier.notifier_call = langwell_otg_iotg_notify;
-
- retval = intel_mid_otg_register_notifier(&lnw->iotg,
- &lnw->iotg_notifier);
- if (retval) {
- dev_dbg(lnw->dev, "Failed to register notifier\n");
- goto err;
- }
-
- if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
- driver_name, lnw) != 0) {
- dev_dbg(lnw->dev, "request interrupt %d failed\n", pdev->irq);
- retval = -EBUSY;
- goto err;
- }
-
- /* enable OTGSC int */
- val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
- OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
- writel(val32, lnw->iotg.base + CI_OTGSC);
-
- retval = device_create_file(&pdev->dev, &dev_attr_registers);
- if (retval < 0) {
- dev_dbg(lnw->dev,
- "Can't register sysfs attribute: %d\n", retval);
- goto err;
- }
-
- retval = device_create_file(&pdev->dev, &dev_attr_hsm);
- if (retval < 0) {
- dev_dbg(lnw->dev, "Can't hsm sysfs attribute: %d\n", retval);
- goto err;
- }
-
- retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
- if (retval < 0) {
- dev_dbg(lnw->dev,
- "Can't register sysfs attr group: %d\n", retval);
- goto err;
- }
-
- if (lnw->iotg.otg.state == OTG_STATE_A_IDLE)
- langwell_update_transceiver();
-
- return 0;
-
-err:
- if (the_transceiver)
- langwell_otg_remove(pdev);
-done:
- return retval;
-}
-
-static void langwell_otg_remove(struct pci_dev *pdev)
-{
- struct langwell_otg *lnw = the_transceiver;
-
- if (lnw->qwork) {
- flush_workqueue(lnw->qwork);
- destroy_workqueue(lnw->qwork);
- }
- intel_mid_otg_unregister_notifier(&lnw->iotg, &lnw->iotg_notifier);
- langwell_otg_free_timers();
-
- /* disable OTGSC interrupt as OTGSC doesn't change in reset */
- writel(0, lnw->iotg.base + CI_OTGSC);
-
- if (pdev->irq)
- free_irq(pdev->irq, lnw);
- if (lnw->usbcfg)
- iounmap(lnw->usbcfg);
- if (lnw->cfg_region)
- release_mem_region(USBCFG_ADDR, USBCFG_LEN);
- if (lnw->iotg.base)
- iounmap(lnw->iotg.base);
- if (lnw->region)
- release_mem_region(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
-
- otg_set_transceiver(NULL);
- pci_disable_device(pdev);
- sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
- device_remove_file(&pdev->dev, &dev_attr_hsm);
- device_remove_file(&pdev->dev, &dev_attr_registers);
- kfree(lnw);
- lnw = NULL;
-}
-
-static void transceiver_suspend(struct pci_dev *pdev)
-{
- pci_save_state(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
- langwell_otg_phy_low_power(1);
-}
-
-static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
-{
- struct langwell_otg *lnw = the_transceiver;
- struct intel_mid_otg_xceiv *iotg = &lnw->iotg;
- int ret = 0;
-
- /* Disbale OTG interrupts */
- langwell_otg_intr(0);
-
- if (pdev->irq)
- free_irq(pdev->irq, lnw);
-
- /* Prevent more otg_work */
- flush_workqueue(lnw->qwork);
- destroy_workqueue(lnw->qwork);
- lnw->qwork = NULL;
-
- /* start actions */
- switch (iotg->otg.state) {
- case OTG_STATE_A_WAIT_VFALL:
- iotg->otg.state = OTG_STATE_A_IDLE;
- case OTG_STATE_A_IDLE:
- case OTG_STATE_B_IDLE:
- case OTG_STATE_A_VBUS_ERR:
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_A_WAIT_VRISE:
- langwell_otg_del_timer(a_wait_vrise_tmr);
- iotg->hsm.a_srp_det = 0;
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_IDLE;
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_A_WAIT_BCON:
- del_timer_sync(&lnw->hsm_timer);
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(&pdev->dev, "host driver has been removed.\n");
-
- iotg->hsm.a_srp_det = 0;
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_IDLE;
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_A_HOST:
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(&pdev->dev, "host driver has been removed.\n");
-
- iotg->hsm.a_srp_det = 0;
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
-
- iotg->otg.state = OTG_STATE_A_IDLE;
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_A_SUSPEND:
- langwell_otg_del_timer(a_aidl_bdis_tmr);
- langwell_otg_HABA(0);
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(lnw->dev, "host driver has been removed.\n");
- iotg->hsm.a_srp_det = 0;
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_IDLE;
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_A_PERIPHERAL:
- del_timer_sync(&lnw->hsm_timer);
-
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(&pdev->dev,
- "client driver has been removed.\n");
- iotg->hsm.a_srp_det = 0;
-
- /* Turn off VBus */
- iotg->otg.set_vbus(&iotg->otg, false);
- iotg->otg.state = OTG_STATE_A_IDLE;
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_B_HOST:
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(&pdev->dev, "host driver has been removed.\n");
- iotg->hsm.b_bus_req = 0;
- iotg->otg.state = OTG_STATE_B_IDLE;
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_B_PERIPHERAL:
- if (lnw->iotg.stop_peripheral)
- lnw->iotg.stop_peripheral(&lnw->iotg);
- else
- dev_dbg(&pdev->dev,
- "client driver has been removed.\n");
- iotg->otg.state = OTG_STATE_B_IDLE;
- transceiver_suspend(pdev);
- break;
- case OTG_STATE_B_WAIT_ACON:
- /* delete hsm timer for b_ase0_brst_tmr */
- del_timer_sync(&lnw->hsm_timer);
-
- langwell_otg_HAAR(0);
-
- if (lnw->iotg.stop_host)
- lnw->iotg.stop_host(&lnw->iotg);
- else
- dev_dbg(&pdev->dev, "host driver has been removed.\n");
- iotg->hsm.b_bus_req = 0;
- iotg->otg.state = OTG_STATE_B_IDLE;
- transceiver_suspend(pdev);
- break;
- default:
- dev_dbg(lnw->dev, "error state before suspend\n");
- break;
- }
-
- return ret;
-}
-
-static void transceiver_resume(struct pci_dev *pdev)
-{
- pci_restore_state(pdev);
- pci_set_power_state(pdev, PCI_D0);
-}
-
-static int langwell_otg_resume(struct pci_dev *pdev)
-{
- struct langwell_otg *lnw = the_transceiver;
- int ret = 0;
-
- transceiver_resume(pdev);
-
- lnw->qwork = create_singlethread_workqueue("langwell_otg_queue");
- if (!lnw->qwork) {
- dev_dbg(&pdev->dev, "cannot create langwell otg workqueuen");
- ret = -ENOMEM;
- goto error;
- }
-
- if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
- driver_name, lnw) != 0) {
- dev_dbg(&pdev->dev, "request interrupt %d failed\n", pdev->irq);
- ret = -EBUSY;
- goto error;
- }
-
- /* enable OTG interrupts */
- langwell_otg_intr(1);
-
- update_hsm();
-
- langwell_update_transceiver();
-
- return ret;
-error:
- langwell_otg_intr(0);
- transceiver_suspend(pdev);
- return ret;
-}
-
-static int __init langwell_otg_init(void)
-{
- return pci_register_driver(&otg_pci_driver);
-}
-module_init(langwell_otg_init);
-
-static void __exit langwell_otg_cleanup(void)
-{
- pci_unregister_driver(&otg_pci_driver);
-}
-module_exit(langwell_otg_cleanup);
diff --git a/drivers/usb/otg/mv_otg.c b/drivers/usb/otg/mv_otg.c
new file mode 100644
index 0000000..b5fbe14
--- /dev/null
+++ b/drivers/usb/otg/mv_otg.c
@@ -0,0 +1,959 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ * Author: Chao Xie <chao.xie@marvell.com>
+ * Neil Zhang <zhangwm@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/device.h>
+#include <linux/proc_fs.h>
+#include <linux/clk.h>
+#include <linux/workqueue.h>
+#include <linux/platform_device.h>
+
+#include <linux/usb.h>
+#include <linux/usb/ch9.h>
+#include <linux/usb/otg.h>
+#include <linux/usb/gadget.h>
+#include <linux/usb/hcd.h>
+#include <linux/platform_data/mv_usb.h>
+
+#include "mv_otg.h"
+
+#define DRIVER_DESC "Marvell USB OTG transceiver driver"
+#define DRIVER_VERSION "Jan 20, 2010"
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
+
+static const char driver_name[] = "mv-otg";
+
+static char *state_string[] = {
+ "undefined",
+ "b_idle",
+ "b_srp_init",
+ "b_peripheral",
+ "b_wait_acon",
+ "b_host",
+ "a_idle",
+ "a_wait_vrise",
+ "a_wait_bcon",
+ "a_host",
+ "a_suspend",
+ "a_peripheral",
+ "a_wait_vfall",
+ "a_vbus_err"
+};
+
+static int mv_otg_set_vbus(struct otg_transceiver *otg, bool on)
+{
+ struct mv_otg *mvotg = container_of(otg, struct mv_otg, otg);
+ if (mvotg->pdata->set_vbus == NULL)
+ return -ENODEV;
+
+ return mvotg->pdata->set_vbus(on);
+}
+
+static int mv_otg_set_host(struct otg_transceiver *otg,
+ struct usb_bus *host)
+{
+ otg->host = host;
+
+ return 0;
+}
+
+static int mv_otg_set_peripheral(struct otg_transceiver *otg,
+ struct usb_gadget *gadget)
+{
+ otg->gadget = gadget;
+
+ return 0;
+}
+
+static void mv_otg_run_state_machine(struct mv_otg *mvotg,
+ unsigned long delay)
+{
+ dev_dbg(&mvotg->pdev->dev, "transceiver is updated\n");
+ if (!mvotg->qwork)
+ return;
+
+ queue_delayed_work(mvotg->qwork, &mvotg->work, delay);
+}
+
+static void mv_otg_timer_await_bcon(unsigned long data)
+{
+ struct mv_otg *mvotg = (struct mv_otg *) data;
+
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 1;
+
+ dev_info(&mvotg->pdev->dev, "B Device No Response!\n");
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+}
+
+static int mv_otg_cancel_timer(struct mv_otg *mvotg, unsigned int id)
+{
+ struct timer_list *timer;
+
+ if (id >= OTG_TIMER_NUM)
+ return -EINVAL;
+
+ timer = &mvotg->otg_ctrl.timer[id];
+
+ if (timer_pending(timer))
+ del_timer(timer);
+
+ return 0;
+}
+
+static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id,
+ unsigned long interval,
+ void (*callback) (unsigned long))
+{
+ struct timer_list *timer;
+
+ if (id >= OTG_TIMER_NUM)
+ return -EINVAL;
+
+ timer = &mvotg->otg_ctrl.timer[id];
+ if (timer_pending(timer)) {
+ dev_err(&mvotg->pdev->dev, "Timer%d is already running\n", id);
+ return -EBUSY;
+ }
+
+ init_timer(timer);
+ timer->data = (unsigned long) mvotg;
+ timer->function = callback;
+ timer->expires = jiffies + interval;
+ add_timer(timer);
+
+ return 0;
+}
+
+static int mv_otg_reset(struct mv_otg *mvotg)
+{
+ unsigned int loops;
+ u32 tmp;
+
+ /* Stop the controller */
+ tmp = readl(&mvotg->op_regs->usbcmd);
+ tmp &= ~USBCMD_RUN_STOP;
+ writel(tmp, &mvotg->op_regs->usbcmd);
+
+ /* Reset the controller to get default values */
+ writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd);
+
+ loops = 500;
+ while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) {
+ if (loops == 0) {
+ dev_err(&mvotg->pdev->dev,
+ "Wait for RESET completed TIMEOUT\n");
+ return -ETIMEDOUT;
+ }
+ loops--;
+ udelay(20);
+ }
+
+ writel(0x0, &mvotg->op_regs->usbintr);
+ tmp = readl(&mvotg->op_regs->usbsts);
+ writel(tmp, &mvotg->op_regs->usbsts);
+
+ return 0;
+}
+
+static void mv_otg_init_irq(struct mv_otg *mvotg)
+{
+ u32 otgsc;
+
+ mvotg->irq_en = OTGSC_INTR_A_SESSION_VALID
+ | OTGSC_INTR_A_VBUS_VALID;
+ mvotg->irq_status = OTGSC_INTSTS_A_SESSION_VALID
+ | OTGSC_INTSTS_A_VBUS_VALID;
+
+ if (mvotg->pdata->vbus == NULL) {
+ mvotg->irq_en |= OTGSC_INTR_B_SESSION_VALID
+ | OTGSC_INTR_B_SESSION_END;
+ mvotg->irq_status |= OTGSC_INTSTS_B_SESSION_VALID
+ | OTGSC_INTSTS_B_SESSION_END;
+ }
+
+ if (mvotg->pdata->id == NULL) {
+ mvotg->irq_en |= OTGSC_INTR_USB_ID;
+ mvotg->irq_status |= OTGSC_INTSTS_USB_ID;
+ }
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ otgsc |= mvotg->irq_en;
+ writel(otgsc, &mvotg->op_regs->otgsc);
+}
+
+static void mv_otg_start_host(struct mv_otg *mvotg, int on)
+{
+#ifdef CONFIG_USB
+ struct otg_transceiver *otg = &mvotg->otg;
+ struct usb_hcd *hcd;
+
+ if (!otg->host)
+ return;
+
+ dev_info(&mvotg->pdev->dev, "%s host\n", on ? "start" : "stop");
+
+ hcd = bus_to_hcd(otg->host);
+
+ if (on)
+ usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
+ else
+ usb_remove_hcd(hcd);
+#endif /* CONFIG_USB */
+}
+
+static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
+{
+ struct otg_transceiver *otg = &mvotg->otg;
+
+ if (!otg->gadget)
+ return;
+
+ dev_info(otg->dev, "gadget %s\n", on ? "on" : "off");
+
+ if (on)
+ usb_gadget_vbus_connect(otg->gadget);
+ else
+ usb_gadget_vbus_disconnect(otg->gadget);
+}
+
+static void otg_clock_enable(struct mv_otg *mvotg)
+{
+ unsigned int i;
+
+ for (i = 0; i < mvotg->clknum; i++)
+ clk_enable(mvotg->clk[i]);
+}
+
+static void otg_clock_disable(struct mv_otg *mvotg)
+{
+ unsigned int i;
+
+ for (i = 0; i < mvotg->clknum; i++)
+ clk_disable(mvotg->clk[i]);
+}
+
+static int mv_otg_enable_internal(struct mv_otg *mvotg)
+{
+ int retval = 0;
+
+ if (mvotg->active)
+ return 0;
+
+ dev_dbg(&mvotg->pdev->dev, "otg enabled\n");
+
+ otg_clock_enable(mvotg);
+ if (mvotg->pdata->phy_init) {
+ retval = mvotg->pdata->phy_init(mvotg->phy_regs);
+ if (retval) {
+ dev_err(&mvotg->pdev->dev,
+ "init phy error %d\n", retval);
+ otg_clock_disable(mvotg);
+ return retval;
+ }
+ }
+ mvotg->active = 1;
+
+ return 0;
+
+}
+
+static int mv_otg_enable(struct mv_otg *mvotg)
+{
+ if (mvotg->clock_gating)
+ return mv_otg_enable_internal(mvotg);
+
+ return 0;
+}
+
+static void mv_otg_disable_internal(struct mv_otg *mvotg)
+{
+ if (mvotg->active) {
+ dev_dbg(&mvotg->pdev->dev, "otg disabled\n");
+ if (mvotg->pdata->phy_deinit)
+ mvotg->pdata->phy_deinit(mvotg->phy_regs);
+ otg_clock_disable(mvotg);
+ mvotg->active = 0;
+ }
+}
+
+static void mv_otg_disable(struct mv_otg *mvotg)
+{
+ if (mvotg->clock_gating)
+ mv_otg_disable_internal(mvotg);
+}
+
+static void mv_otg_update_inputs(struct mv_otg *mvotg)
+{
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
+ u32 otgsc;
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+
+ if (mvotg->pdata->vbus) {
+ if (mvotg->pdata->vbus->poll() == VBUS_HIGH) {
+ otg_ctrl->b_sess_vld = 1;
+ otg_ctrl->b_sess_end = 0;
+ } else {
+ otg_ctrl->b_sess_vld = 0;
+ otg_ctrl->b_sess_end = 1;
+ }
+ } else {
+ otg_ctrl->b_sess_vld = !!(otgsc & OTGSC_STS_B_SESSION_VALID);
+ otg_ctrl->b_sess_end = !!(otgsc & OTGSC_STS_B_SESSION_END);
+ }
+
+ if (mvotg->pdata->id)
+ otg_ctrl->id = !!mvotg->pdata->id->poll();
+ else
+ otg_ctrl->id = !!(otgsc & OTGSC_STS_USB_ID);
+
+ if (mvotg->pdata->otg_force_a_bus_req && !otg_ctrl->id)
+ otg_ctrl->a_bus_req = 1;
+
+ otg_ctrl->a_sess_vld = !!(otgsc & OTGSC_STS_A_SESSION_VALID);
+ otg_ctrl->a_vbus_vld = !!(otgsc & OTGSC_STS_A_VBUS_VALID);
+
+ dev_dbg(&mvotg->pdev->dev, "%s: ", __func__);
+ dev_dbg(&mvotg->pdev->dev, "id %d\n", otg_ctrl->id);
+ dev_dbg(&mvotg->pdev->dev, "b_sess_vld %d\n", otg_ctrl->b_sess_vld);
+ dev_dbg(&mvotg->pdev->dev, "b_sess_end %d\n", otg_ctrl->b_sess_end);
+ dev_dbg(&mvotg->pdev->dev, "a_vbus_vld %d\n", otg_ctrl->a_vbus_vld);
+ dev_dbg(&mvotg->pdev->dev, "a_sess_vld %d\n", otg_ctrl->a_sess_vld);
+}
+
+static void mv_otg_update_state(struct mv_otg *mvotg)
+{
+ struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
+ struct otg_transceiver *otg = &mvotg->otg;
+ int old_state = otg->state;
+
+ switch (old_state) {
+ case OTG_STATE_UNDEFINED:
+ otg->state = OTG_STATE_B_IDLE;
+ /* FALL THROUGH */
+ case OTG_STATE_B_IDLE:
+ if (otg_ctrl->id == 0)
+ otg->state = OTG_STATE_A_IDLE;
+ else if (otg_ctrl->b_sess_vld)
+ otg->state = OTG_STATE_B_PERIPHERAL;
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ if (!otg_ctrl->b_sess_vld || otg_ctrl->id == 0)
+ otg->state = OTG_STATE_B_IDLE;
+ break;
+ case OTG_STATE_A_IDLE:
+ if (otg_ctrl->id)
+ otg->state = OTG_STATE_B_IDLE;
+ else if (!(otg_ctrl->a_bus_drop) &&
+ (otg_ctrl->a_bus_req || otg_ctrl->a_srp_det))
+ otg->state = OTG_STATE_A_WAIT_VRISE;
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ if (otg_ctrl->a_vbus_vld)
+ otg->state = OTG_STATE_A_WAIT_BCON;
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (otg_ctrl->id || otg_ctrl->a_bus_drop
+ || otg_ctrl->a_wait_bcon_timeout) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_WAIT_VFALL;
+ otg_ctrl->a_bus_req = 0;
+ } else if (!otg_ctrl->a_vbus_vld) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_VBUS_ERR;
+ } else if (otg_ctrl->b_conn) {
+ mv_otg_cancel_timer(mvotg, A_WAIT_BCON_TIMER);
+ mvotg->otg_ctrl.a_wait_bcon_timeout = 0;
+ otg->state = OTG_STATE_A_HOST;
+ }
+ break;
+ case OTG_STATE_A_HOST:
+ if (otg_ctrl->id || !otg_ctrl->b_conn
+ || otg_ctrl->a_bus_drop)
+ otg->state = OTG_STATE_A_WAIT_BCON;
+ else if (!otg_ctrl->a_vbus_vld)
+ otg->state = OTG_STATE_A_VBUS_ERR;
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ if (otg_ctrl->id
+ || (!otg_ctrl->b_conn && otg_ctrl->a_sess_vld)
+ || otg_ctrl->a_bus_req)
+ otg->state = OTG_STATE_A_IDLE;
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ if (otg_ctrl->id || otg_ctrl->a_clr_err
+ || otg_ctrl->a_bus_drop) {
+ otg_ctrl->a_clr_err = 0;
+ otg->state = OTG_STATE_A_WAIT_VFALL;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void mv_otg_work(struct work_struct *work)
+{
+ struct mv_otg *mvotg;
+ struct otg_transceiver *otg;
+ int old_state;
+
+ mvotg = container_of((struct delayed_work *)work, struct mv_otg, work);
+
+run:
+ /* work queue is single thread, or we need spin_lock to protect */
+ otg = &mvotg->otg;
+ old_state = otg->state;
+
+ if (!mvotg->active)
+ return;
+
+ mv_otg_update_inputs(mvotg);
+ mv_otg_update_state(mvotg);
+
+ if (old_state != otg->state) {
+ dev_info(&mvotg->pdev->dev, "change from state %s to %s\n",
+ state_string[old_state],
+ state_string[otg->state]);
+
+ switch (otg->state) {
+ case OTG_STATE_B_IDLE:
+ mvotg->otg.default_a = 0;
+ if (old_state == OTG_STATE_B_PERIPHERAL)
+ mv_otg_start_periphrals(mvotg, 0);
+ mv_otg_reset(mvotg);
+ mv_otg_disable(mvotg);
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ mv_otg_enable(mvotg);
+ mv_otg_start_periphrals(mvotg, 1);
+ break;
+ case OTG_STATE_A_IDLE:
+ mvotg->otg.default_a = 1;
+ mv_otg_enable(mvotg);
+ if (old_state == OTG_STATE_A_WAIT_VFALL)
+ mv_otg_start_host(mvotg, 0);
+ mv_otg_reset(mvotg);
+ break;
+ case OTG_STATE_A_WAIT_VRISE:
+ mv_otg_set_vbus(&mvotg->otg, 1);
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (old_state != OTG_STATE_A_HOST)
+ mv_otg_start_host(mvotg, 1);
+ mv_otg_set_timer(mvotg, A_WAIT_BCON_TIMER,
+ T_A_WAIT_BCON,
+ mv_otg_timer_await_bcon);
+ /*
+ * Now, we directly enter A_HOST. So set b_conn = 1
+ * here. In fact, it need host driver to notify us.
+ */
+ mvotg->otg_ctrl.b_conn = 1;
+ break;
+ case OTG_STATE_A_HOST:
+ break;
+ case OTG_STATE_A_WAIT_VFALL:
+ /*
+ * Now, we has exited A_HOST. So set b_conn = 0
+ * here. In fact, it need host driver to notify us.
+ */
+ mvotg->otg_ctrl.b_conn = 0;
+ mv_otg_set_vbus(&mvotg->otg, 0);
+ break;
+ case OTG_STATE_A_VBUS_ERR:
+ break;
+ default:
+ break;
+ }
+ goto run;
+ }
+}
+
+static irqreturn_t mv_otg_irq(int irq, void *dev)
+{
+ struct mv_otg *mvotg = dev;
+ u32 otgsc;
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ writel(otgsc, &mvotg->op_regs->otgsc);
+
+ /*
+ * if we have vbus, then the vbus detection for B-device
+ * will be done by mv_otg_inputs_irq().
+ */
+ if (mvotg->pdata->vbus)
+ if ((otgsc & OTGSC_STS_USB_ID) &&
+ !(otgsc & OTGSC_INTSTS_USB_ID))
+ return IRQ_NONE;
+
+ if ((otgsc & mvotg->irq_status) == 0)
+ return IRQ_NONE;
+
+ mv_otg_run_state_machine(mvotg, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mv_otg_inputs_irq(int irq, void *dev)
+{
+ struct mv_otg *mvotg = dev;
+
+ /* The clock may disabled at this time */
+ if (!mvotg->active) {
+ mv_otg_enable(mvotg);
+ mv_otg_init_irq(mvotg);
+ }
+
+ mv_otg_run_state_machine(mvotg, 0);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ mvotg->otg_ctrl.a_bus_req);
+}
+
+static ssize_t
+set_a_bus_req(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+
+ if (count > 2)
+ return -1;
+
+ /* We will use this interface to change to A device */
+ if (mvotg->otg.state != OTG_STATE_B_IDLE
+ && mvotg->otg.state != OTG_STATE_A_IDLE)
+ return -1;
+
+ /* The clock may disabled and we need to set irq for ID detected */
+ mv_otg_enable(mvotg);
+ mv_otg_init_irq(mvotg);
+
+ if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_bus_req = 1;
+ mvotg->otg_ctrl.a_bus_drop = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_req = 1\n");
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req,
+ set_a_bus_req);
+
+static ssize_t
+set_a_clr_err(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ if (!mvotg->otg.default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_clr_err = 1;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_clr_err = 1\n");
+ }
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
+
+static ssize_t
+get_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ mvotg->otg_ctrl.a_bus_drop);
+}
+
+static ssize_t
+set_a_bus_drop(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mv_otg *mvotg = dev_get_drvdata(dev);
+ if (!mvotg->otg.default_a)
+ return -1;
+
+ if (count > 2)
+ return -1;
+
+ if (buf[0] == '0') {
+ mvotg->otg_ctrl.a_bus_drop = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_drop = 0\n");
+ } else if (buf[0] == '1') {
+ mvotg->otg_ctrl.a_bus_drop = 1;
+ mvotg->otg_ctrl.a_bus_req = 0;
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: a_bus_drop = 1\n");
+ dev_dbg(&mvotg->pdev->dev,
+ "User request: and a_bus_req = 0\n");
+ }
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR,
+ get_a_bus_drop, set_a_bus_drop);
+
+static struct attribute *inputs_attrs[] = {
+ &dev_attr_a_bus_req.attr,
+ &dev_attr_a_clr_err.attr,
+ &dev_attr_a_bus_drop.attr,
+ NULL,
+};
+
+static struct attribute_group inputs_attr_group = {
+ .name = "inputs",
+ .attrs = inputs_attrs,
+};
+
+int mv_otg_remove(struct platform_device *pdev)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+ int clk_i;
+
+ sysfs_remove_group(&mvotg->pdev->dev.kobj, &inputs_attr_group);
+
+ if (mvotg->irq)
+ free_irq(mvotg->irq, mvotg);
+
+ if (mvotg->pdata->vbus)
+ free_irq(mvotg->pdata->vbus->irq, mvotg);
+ if (mvotg->pdata->id)
+ free_irq(mvotg->pdata->id->irq, mvotg);
+
+ if (mvotg->qwork) {
+ flush_workqueue(mvotg->qwork);
+ destroy_workqueue(mvotg->qwork);
+ }
+
+ mv_otg_disable(mvotg);
+
+ if (mvotg->cap_regs)
+ iounmap(mvotg->cap_regs);
+
+ if (mvotg->phy_regs)
+ iounmap(mvotg->phy_regs);
+
+ for (clk_i = 0; clk_i <= mvotg->clknum; clk_i++)
+ clk_put(mvotg->clk[clk_i]);
+
+ otg_set_transceiver(NULL);
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(mvotg);
+
+ return 0;
+}
+
+static int mv_otg_probe(struct platform_device *pdev)
+{
+ struct mv_usb_platform_data *pdata = pdev->dev.platform_data;
+ struct mv_otg *mvotg;
+ struct resource *r;
+ int retval = 0, clk_i, i;
+ size_t size;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "failed to get platform data\n");
+ return -ENODEV;
+ }
+
+ size = sizeof(*mvotg) + sizeof(struct clk *) * pdata->clknum;
+ mvotg = kzalloc(size, GFP_KERNEL);
+ if (!mvotg) {
+ dev_err(&pdev->dev, "failed to allocate memory!\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, mvotg);
+
+ mvotg->pdev = pdev;
+ mvotg->pdata = pdata;
+
+ mvotg->clknum = pdata->clknum;
+ for (clk_i = 0; clk_i < mvotg->clknum; clk_i++) {
+ mvotg->clk[clk_i] = clk_get(&pdev->dev, pdata->clkname[clk_i]);
+ if (IS_ERR(mvotg->clk[clk_i])) {
+ retval = PTR_ERR(mvotg->clk[clk_i]);
+ goto err_put_clk;
+ }
+ }
+
+ mvotg->qwork = create_singlethread_workqueue("mv_otg_queue");
+ if (!mvotg->qwork) {
+ dev_dbg(&pdev->dev, "cannot create workqueue for OTG\n");
+ retval = -ENOMEM;
+ goto err_put_clk;
+ }
+
+ INIT_DELAYED_WORK(&mvotg->work, mv_otg_work);
+
+ /* OTG common part */
+ mvotg->pdev = pdev;
+ mvotg->otg.dev = &pdev->dev;
+ mvotg->otg.label = driver_name;
+ mvotg->otg.set_host = mv_otg_set_host;
+ mvotg->otg.set_peripheral = mv_otg_set_peripheral;
+ mvotg->otg.set_vbus = mv_otg_set_vbus;
+ mvotg->otg.state = OTG_STATE_UNDEFINED;
+
+ for (i = 0; i < OTG_TIMER_NUM; i++)
+ init_timer(&mvotg->otg_ctrl.timer[i]);
+
+ r = platform_get_resource_byname(mvotg->pdev,
+ IORESOURCE_MEM, "phyregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no phy I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_destroy_workqueue;
+ }
+
+ mvotg->phy_regs = ioremap(r->start, resource_size(r));
+ if (mvotg->phy_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map phy I/O memory\n");
+ retval = -EFAULT;
+ goto err_destroy_workqueue;
+ }
+
+ r = platform_get_resource_byname(mvotg->pdev,
+ IORESOURCE_MEM, "capregs");
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no I/O memory resource defined\n");
+ retval = -ENODEV;
+ goto err_unmap_phyreg;
+ }
+
+ mvotg->cap_regs = ioremap(r->start, resource_size(r));
+ if (mvotg->cap_regs == NULL) {
+ dev_err(&pdev->dev, "failed to map I/O memory\n");
+ retval = -EFAULT;
+ goto err_unmap_phyreg;
+ }
+
+ /* we will acces controller register, so enable the udc controller */
+ retval = mv_otg_enable_internal(mvotg);
+ if (retval) {
+ dev_err(&pdev->dev, "mv otg enable error %d\n", retval);
+ goto err_unmap_capreg;
+ }
+
+ mvotg->op_regs =
+ (struct mv_otg_regs __iomem *) ((unsigned long) mvotg->cap_regs
+ + (readl(mvotg->cap_regs) & CAPLENGTH_MASK));
+
+ if (pdata->id) {
+ retval = request_threaded_irq(pdata->id->irq, NULL,
+ mv_otg_inputs_irq,
+ IRQF_ONESHOT, "id", mvotg);
+ if (retval) {
+ dev_info(&pdev->dev,
+ "Failed to request irq for ID\n");
+ pdata->id = NULL;
+ }
+ }
+
+ if (pdata->vbus) {
+ mvotg->clock_gating = 1;
+ retval = request_threaded_irq(pdata->vbus->irq, NULL,
+ mv_otg_inputs_irq,
+ IRQF_ONESHOT, "vbus", mvotg);
+ if (retval) {
+ dev_info(&pdev->dev,
+ "Failed to request irq for VBUS, "
+ "disable clock gating\n");
+ mvotg->clock_gating = 0;
+ pdata->vbus = NULL;
+ }
+ }
+
+ if (pdata->disable_otg_clock_gating)
+ mvotg->clock_gating = 0;
+
+ mv_otg_reset(mvotg);
+ mv_otg_init_irq(mvotg);
+
+ r = platform_get_resource(mvotg->pdev, IORESOURCE_IRQ, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "no IRQ resource defined\n");
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ mvotg->irq = r->start;
+ if (request_irq(mvotg->irq, mv_otg_irq, IRQF_SHARED,
+ driver_name, mvotg)) {
+ dev_err(&pdev->dev, "Request irq %d for OTG failed\n",
+ mvotg->irq);
+ mvotg->irq = 0;
+ retval = -ENODEV;
+ goto err_disable_clk;
+ }
+
+ retval = otg_set_transceiver(&mvotg->otg);
+ if (retval < 0) {
+ dev_err(&pdev->dev, "can't register transceiver, %d\n",
+ retval);
+ goto err_free_irq;
+ }
+
+ retval = sysfs_create_group(&pdev->dev.kobj, &inputs_attr_group);
+ if (retval < 0) {
+ dev_dbg(&pdev->dev,
+ "Can't register sysfs attr group: %d\n", retval);
+ goto err_set_transceiver;
+ }
+
+ spin_lock_init(&mvotg->wq_lock);
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 2 * HZ);
+ spin_unlock(&mvotg->wq_lock);
+ }
+
+ dev_info(&pdev->dev,
+ "successful probe OTG device %s clock gating.\n",
+ mvotg->clock_gating ? "with" : "without");
+
+ return 0;
+
+err_set_transceiver:
+ otg_set_transceiver(NULL);
+err_free_irq:
+ free_irq(mvotg->irq, mvotg);
+err_disable_clk:
+ if (pdata->vbus)
+ free_irq(pdata->vbus->irq, mvotg);
+ if (pdata->id)
+ free_irq(pdata->id->irq, mvotg);
+ mv_otg_disable_internal(mvotg);
+err_unmap_capreg:
+ iounmap(mvotg->cap_regs);
+err_unmap_phyreg:
+ iounmap(mvotg->phy_regs);
+err_destroy_workqueue:
+ flush_workqueue(mvotg->qwork);
+ destroy_workqueue(mvotg->qwork);
+err_put_clk:
+ for (clk_i--; clk_i >= 0; clk_i--)
+ clk_put(mvotg->clk[clk_i]);
+
+ platform_set_drvdata(pdev, NULL);
+ kfree(mvotg);
+
+ return retval;
+}
+
+#ifdef CONFIG_PM
+static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+
+ if (mvotg->otg.state != OTG_STATE_B_IDLE) {
+ dev_info(&pdev->dev,
+ "OTG state is not B_IDLE, it is %d!\n",
+ mvotg->otg.state);
+ return -EAGAIN;
+ }
+
+ if (!mvotg->clock_gating)
+ mv_otg_disable_internal(mvotg);
+
+ return 0;
+}
+
+static int mv_otg_resume(struct platform_device *pdev)
+{
+ struct mv_otg *mvotg = platform_get_drvdata(pdev);
+ u32 otgsc;
+
+ if (!mvotg->clock_gating) {
+ mv_otg_enable_internal(mvotg);
+
+ otgsc = readl(&mvotg->op_regs->otgsc);
+ otgsc |= mvotg->irq_en;
+ writel(otgsc, &mvotg->op_regs->otgsc);
+
+ if (spin_trylock(&mvotg->wq_lock)) {
+ mv_otg_run_state_machine(mvotg, 0);
+ spin_unlock(&mvotg->wq_lock);
+ }
+ }
+ return 0;
+}
+#endif
+
+static struct platform_driver mv_otg_driver = {
+ .probe = mv_otg_probe,
+ .remove = __exit_p(mv_otg_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = driver_name,
+ },
+#ifdef CONFIG_PM
+ .suspend = mv_otg_suspend,
+ .resume = mv_otg_resume,
+#endif
+};
+
+static int __init mv_otg_init(void)
+{
+ return platform_driver_register(&mv_otg_driver);
+}
+
+static void __exit mv_otg_exit(void)
+{
+ platform_driver_unregister(&mv_otg_driver);
+}
+
+module_init(mv_otg_init);
+module_exit(mv_otg_exit);
diff --git a/drivers/usb/otg/mv_otg.h b/drivers/usb/otg/mv_otg.h
new file mode 100644
index 0000000..be6ca14
--- /dev/null
+++ b/drivers/usb/otg/mv_otg.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2011 Marvell International Ltd. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MV_USB_OTG_CONTROLLER__
+#define __MV_USB_OTG_CONTROLLER__
+
+#include <linux/types.h>
+
+/* Command Register Bit Masks */
+#define USBCMD_RUN_STOP (0x00000001)
+#define USBCMD_CTRL_RESET (0x00000002)
+
+/* otgsc Register Bit Masks */
+#define OTGSC_CTRL_VUSB_DISCHARGE 0x00000001
+#define OTGSC_CTRL_VUSB_CHARGE 0x00000002
+#define OTGSC_CTRL_OTG_TERM 0x00000008
+#define OTGSC_CTRL_DATA_PULSING 0x00000010
+#define OTGSC_STS_USB_ID 0x00000100
+#define OTGSC_STS_A_VBUS_VALID 0x00000200
+#define OTGSC_STS_A_SESSION_VALID 0x00000400
+#define OTGSC_STS_B_SESSION_VALID 0x00000800
+#define OTGSC_STS_B_SESSION_END 0x00001000
+#define OTGSC_STS_1MS_TOGGLE 0x00002000
+#define OTGSC_STS_DATA_PULSING 0x00004000
+#define OTGSC_INTSTS_USB_ID 0x00010000
+#define OTGSC_INTSTS_A_VBUS_VALID 0x00020000
+#define OTGSC_INTSTS_A_SESSION_VALID 0x00040000
+#define OTGSC_INTSTS_B_SESSION_VALID 0x00080000
+#define OTGSC_INTSTS_B_SESSION_END 0x00100000
+#define OTGSC_INTSTS_1MS 0x00200000
+#define OTGSC_INTSTS_DATA_PULSING 0x00400000
+#define OTGSC_INTR_USB_ID 0x01000000
+#define OTGSC_INTR_A_VBUS_VALID 0x02000000
+#define OTGSC_INTR_A_SESSION_VALID 0x04000000
+#define OTGSC_INTR_B_SESSION_VALID 0x08000000
+#define OTGSC_INTR_B_SESSION_END 0x10000000
+#define OTGSC_INTR_1MS_TIMER 0x20000000
+#define OTGSC_INTR_DATA_PULSING 0x40000000
+
+#define CAPLENGTH_MASK (0xff)
+
+/* Timer's interval, unit 10ms */
+#define T_A_WAIT_VRISE 100
+#define T_A_WAIT_BCON 2000
+#define T_A_AIDL_BDIS 100
+#define T_A_BIDL_ADIS 20
+#define T_B_ASE0_BRST 400
+#define T_B_SE0_SRP 300
+#define T_B_SRP_FAIL 2000
+#define T_B_DATA_PLS 10
+#define T_B_SRP_INIT 100
+#define T_A_SRP_RSPNS 10
+#define T_A_DRV_RSM 5
+
+enum otg_function {
+ OTG_B_DEVICE = 0,
+ OTG_A_DEVICE
+};
+
+enum mv_otg_timer {
+ A_WAIT_BCON_TIMER = 0,
+ OTG_TIMER_NUM
+};
+
+/* PXA OTG state machine */
+struct mv_otg_ctrl {
+ /* internal variables */
+ u8 a_set_b_hnp_en; /* A-Device set b_hnp_en */
+ u8 b_srp_done;
+ u8 b_hnp_en;
+
+ /* OTG inputs */
+ u8 a_bus_drop;
+ u8 a_bus_req;
+ u8 a_clr_err;
+ u8 a_bus_resume;
+ u8 a_bus_suspend;
+ u8 a_conn;
+ u8 a_sess_vld;
+ u8 a_srp_det;
+ u8 a_vbus_vld;
+ u8 b_bus_req; /* B-Device Require Bus */
+ u8 b_bus_resume;
+ u8 b_bus_suspend;
+ u8 b_conn;
+ u8 b_se0_srp;
+ u8 b_sess_end;
+ u8 b_sess_vld;
+ u8 id;
+ u8 a_suspend_req;
+
+ /*Timer event */
+ u8 a_aidl_bdis_timeout;
+ u8 b_ase0_brst_timeout;
+ u8 a_bidl_adis_timeout;
+ u8 a_wait_bcon_timeout;
+
+ struct timer_list timer[OTG_TIMER_NUM];
+};
+
+#define VUSBHS_MAX_PORTS 8
+
+struct mv_otg_regs {
+ u32 usbcmd; /* Command register */
+ u32 usbsts; /* Status register */
+ u32 usbintr; /* Interrupt enable */
+ u32 frindex; /* Frame index */
+ u32 reserved1[1];
+ u32 deviceaddr; /* Device Address */
+ u32 eplistaddr; /* Endpoint List Address */
+ u32 ttctrl; /* HOST TT status and control */
+ u32 burstsize; /* Programmable Burst Size */
+ u32 txfilltuning; /* Host Transmit Pre-Buffer Packet Tuning */
+ u32 reserved[4];
+ u32 epnak; /* Endpoint NAK */
+ u32 epnaken; /* Endpoint NAK Enable */
+ u32 configflag; /* Configured Flag register */
+ u32 portsc[VUSBHS_MAX_PORTS]; /* Port Status/Control x, x = 1..8 */
+ u32 otgsc;
+ u32 usbmode; /* USB Host/Device mode */
+ u32 epsetupstat; /* Endpoint Setup Status */
+ u32 epprime; /* Endpoint Initialize */
+ u32 epflush; /* Endpoint De-initialize */
+ u32 epstatus; /* Endpoint Status */
+ u32 epcomplete; /* Endpoint Interrupt On Complete */
+ u32 epctrlx[16]; /* Endpoint Control, where x = 0.. 15 */
+ u32 mcr; /* Mux Control */
+ u32 isr; /* Interrupt Status */
+ u32 ier; /* Interrupt Enable */
+};
+
+struct mv_otg {
+ struct otg_transceiver otg;
+ struct mv_otg_ctrl otg_ctrl;
+
+ /* base address */
+ void __iomem *phy_regs;
+ void __iomem *cap_regs;
+ struct mv_otg_regs __iomem *op_regs;
+
+ struct platform_device *pdev;
+ int irq;
+ u32 irq_status;
+ u32 irq_en;
+
+ struct delayed_work work;
+ struct workqueue_struct *qwork;
+
+ spinlock_t wq_lock;
+
+ struct mv_usb_platform_data *pdata;
+
+ unsigned int active;
+ unsigned int clock_gating;
+ unsigned int clknum;
+ struct clk *clk[0];
+};
+
+#endif
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index 08c679c..e9a5b1d 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -95,25 +95,15 @@ struct usbhs_priv *usbhs_pdev_to_priv(struct platform_device *pdev)
/*
* syscfg functions
*/
-void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
+static void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable)
{
usbhs_bset(priv, SYSCFG, SCKE, enable ? SCKE : 0);
}
-void usbhs_sys_hispeed_ctrl(struct usbhs_priv *priv, int enable)
-{
- usbhs_bset(priv, SYSCFG, HSE, enable ? HSE : 0);
-}
-
-void usbhs_sys_usb_ctrl(struct usbhs_priv *priv, int enable)
-{
- usbhs_bset(priv, SYSCFG, USBE, enable ? USBE : 0);
-}
-
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
{
- u16 mask = DCFM | DRPD | DPRPU;
- u16 val = DCFM | DRPD;
+ u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
+ u16 val = DCFM | DRPD | HSE | USBE;
int has_otg = usbhs_get_dparam(priv, has_otg);
if (has_otg)
@@ -130,8 +120,8 @@ void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable)
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
{
- u16 mask = DCFM | DRPD | DPRPU;
- u16 val = DPRPU;
+ u16 mask = DCFM | DRPD | DPRPU | HSE | USBE;
+ u16 val = DPRPU | HSE | USBE;
/*
* if enable
@@ -142,6 +132,11 @@ void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable)
usbhs_bset(priv, SYSCFG, mask, enable ? val : 0);
}
+void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode)
+{
+ usbhs_write(priv, TESTMODE, mode);
+}
+
/*
* frame functions
*/
@@ -229,7 +224,7 @@ static void usbhsc_bus_init(struct usbhs_priv *priv)
/*
* device configuration
*/
-int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum,
+int usbhs_set_device_config(struct usbhs_priv *priv, int devnum,
u16 upphub, u16 hubport, u16 speed)
{
struct device *dev = usbhs_priv_to_dev(priv);
@@ -301,18 +296,25 @@ static u32 usbhsc_default_pipe_type[] = {
*/
static void usbhsc_power_ctrl(struct usbhs_priv *priv, int enable)
{
+ struct platform_device *pdev = usbhs_priv_to_pdev(priv);
struct device *dev = usbhs_priv_to_dev(priv);
if (enable) {
/* enable PM */
pm_runtime_get_sync(dev);
+ /* enable platform power */
+ usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+
/* USB on */
usbhs_sys_clock_ctrl(priv, enable);
} else {
/* USB off */
usbhs_sys_clock_ctrl(priv, enable);
+ /* disable platform power */
+ usbhs_platform_call(priv, power_ctrl, pdev, priv->base, enable);
+
/* disable PM */
pm_runtime_put_sync(dev);
}
@@ -388,7 +390,7 @@ static void usbhsc_notify_hotplug(struct work_struct *work)
usbhsc_hotplug(priv);
}
-int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
+static int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
{
struct usbhs_priv *priv = usbhs_pdev_to_priv(pdev);
int delay = usbhs_get_dparam(priv, detection_delay);
@@ -398,7 +400,8 @@ int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev)
* To make sure safety context,
* use workqueue for usbhs_notify_hotplug
*/
- schedule_delayed_work(&priv->notify_hotplug_work, delay);
+ schedule_delayed_work(&priv->notify_hotplug_work,
+ msecs_to_jiffies(delay));
return 0;
}
@@ -637,18 +640,7 @@ static struct platform_driver renesas_usbhs_driver = {
.remove = __devexit_p(usbhs_remove),
};
-static int __init usbhs_init(void)
-{
- return platform_driver_register(&renesas_usbhs_driver);
-}
-
-static void __exit usbhs_exit(void)
-{
- platform_driver_unregister(&renesas_usbhs_driver);
-}
-
-module_init(usbhs_init);
-module_exit(usbhs_exit);
+module_platform_driver(renesas_usbhs_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Renesas USB driver");
diff --git a/drivers/usb/renesas_usbhs/common.h b/drivers/usb/renesas_usbhs/common.h
index 8729da5..d79b3e2 100644
--- a/drivers/usb/renesas_usbhs/common.h
+++ b/drivers/usb/renesas_usbhs/common.h
@@ -33,6 +33,7 @@ struct usbhs_priv;
#define SYSCFG 0x0000
#define BUSWAIT 0x0002
#define DVSTCTR 0x0008
+#define TESTMODE 0x000C
#define CFIFO 0x0014
#define CFIFOSEL 0x0020
#define CFIFOCTR 0x0022
@@ -275,19 +276,15 @@ u16 usbhs_read(struct usbhs_priv *priv, u32 reg);
void usbhs_write(struct usbhs_priv *priv, u32 reg, u16 data);
void usbhs_bset(struct usbhs_priv *priv, u32 reg, u16 mask, u16 data);
-int usbhsc_drvcllbck_notify_hotplug(struct platform_device *pdev);
-
#define usbhs_lock(p, f) spin_lock_irqsave(usbhs_priv_to_lock(p), f)
#define usbhs_unlock(p, f) spin_unlock_irqrestore(usbhs_priv_to_lock(p), f)
/*
* sysconfig
*/
-void usbhs_sys_clock_ctrl(struct usbhs_priv *priv, int enable);
-void usbhs_sys_hispeed_ctrl(struct usbhs_priv *priv, int enable);
-void usbhs_sys_usb_ctrl(struct usbhs_priv *priv, int enable);
void usbhs_sys_host_ctrl(struct usbhs_priv *priv, int enable);
void usbhs_sys_function_ctrl(struct usbhs_priv *priv, int enable);
+void usbhs_sys_set_test_mode(struct usbhs_priv *priv, u16 mode);
/*
* usb request
@@ -311,7 +308,7 @@ int usbhs_frame_get_num(struct usbhs_priv *priv);
/*
* device config
*/
-int usbhs_set_device_speed(struct usbhs_priv *priv, int devnum, u16 upphub,
+int usbhs_set_device_config(struct usbhs_priv *priv, int devnum, u16 upphub,
u16 hubport, u16 speed);
/*
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index ffdf5d1..72339bd 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -56,7 +56,7 @@ static struct usbhs_pkt_handle usbhsf_null_handler = {
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt),
- void *buf, int len, int zero)
+ void *buf, int len, int zero, int sequence)
{
struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
struct device *dev = usbhs_priv_to_dev(priv);
@@ -90,6 +90,7 @@ void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
pkt->zero = zero;
pkt->actual = 0;
pkt->done = done;
+ pkt->sequence = sequence;
usbhs_unlock(priv, flags);
/******************** spin unlock ******************/
@@ -481,6 +482,9 @@ static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
int i, ret, len;
int is_short;
+ usbhs_pipe_data_sequence(pipe, pkt->sequence);
+ pkt->sequence = -1; /* -1 sequence will be ignored */
+
ret = usbhsf_fifo_select(pipe, fifo, 1);
if (ret < 0)
return 0;
@@ -584,6 +588,8 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
/*
* pipe enable to prepare packet receive
*/
+ usbhs_pipe_data_sequence(pipe, pkt->sequence);
+ pkt->sequence = -1; /* -1 sequence will be ignored */
usbhs_pipe_enable(pipe);
usbhsf_rx_irq_ctrl(pipe, 1);
@@ -641,6 +647,7 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
* "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
*/
if (0 == rcv_len) {
+ pkt->zero = 1;
usbhsf_fifo_clear(pipe, fifo);
goto usbhs_fifo_read_end;
}
@@ -765,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data)
struct dma_async_tx_descriptor *desc;
struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
struct device *dev = usbhs_priv_to_dev(priv);
- enum dma_data_direction dir;
+ enum dma_transfer_direction dir;
dma_cookie_t cookie;
- dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
sg_init_table(&sg, 1);
sg_set_page(&sg, virt_to_page(pkt->dma),
diff --git a/drivers/usb/renesas_usbhs/fifo.h b/drivers/usb/renesas_usbhs/fifo.h
index 32a7b24..f68609c 100644
--- a/drivers/usb/renesas_usbhs/fifo.h
+++ b/drivers/usb/renesas_usbhs/fifo.h
@@ -59,6 +59,7 @@ struct usbhs_pkt {
int trans;
int actual;
int zero;
+ int sequence;
};
struct usbhs_pkt_handle {
@@ -95,7 +96,7 @@ void usbhs_pkt_init(struct usbhs_pkt *pkt);
void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
void (*done)(struct usbhs_priv *priv,
struct usbhs_pkt *pkt),
- void *buf, int len, int zero);
+ void *buf, int len, int zero, int sequence);
struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt);
void usbhs_pkt_start(struct usbhs_pipe *pipe);
diff --git a/drivers/usb/renesas_usbhs/mod.c b/drivers/usb/renesas_usbhs/mod.c
index ad96a38..1b97fb1 100644
--- a/drivers/usb/renesas_usbhs/mod.c
+++ b/drivers/usb/renesas_usbhs/mod.c
@@ -50,7 +50,9 @@ static int usbhsm_autonomy_irq_vbus(struct usbhs_priv *priv,
{
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- return usbhsc_drvcllbck_notify_hotplug(pdev);
+ renesas_usbhs_call_notify_hotplug(pdev);
+
+ return 0;
}
void usbhs_mod_autonomy_mode(struct usbhs_priv *priv)
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 7f4e803..7542aa9 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -14,6 +14,7 @@
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
+#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -44,7 +45,6 @@ struct usbhsg_uep {
struct usbhsg_gpriv {
struct usb_gadget gadget;
struct usbhs_mod mod;
- struct list_head link;
struct usbhsg_uep *uep;
int uep_size;
@@ -114,16 +114,6 @@ struct usbhsg_recip_handle {
#define usbhsg_status_clr(gp, b) (gp->status &= ~b)
#define usbhsg_status_has(gp, b) (gp->status & b)
-/* controller */
-LIST_HEAD(the_controller_link);
-
-#define usbhsg_for_each_controller(gpriv)\
- list_for_each_entry(gpriv, &the_controller_link, link)
-#define usbhsg_controller_register(gpriv)\
- list_add_tail(&(gpriv)->link, &the_controller_link)
-#define usbhsg_controller_unregister(gpriv)\
- list_del_init(&(gpriv)->link)
-
/*
* queue push/pop
*/
@@ -164,7 +154,7 @@ static void usbhsg_queue_push(struct usbhsg_uep *uep,
req->actual = 0;
req->status = -EINPROGRESS;
usbhs_pkt_push(pipe, pkt, usbhsg_queue_done,
- req->buf, req->length, req->zero);
+ req->buf, req->length, req->zero, -1);
usbhs_pkt_start(pipe);
dev_dbg(dev, "pipe %d : queue push (%d)\n",
@@ -195,7 +185,7 @@ static int usbhsg_dma_map(struct device *dev,
}
if (dma_mapping_error(dev, pkt->dma)) {
- dev_err(dev, "dma mapping error %x\n", pkt->dma);
+ dev_err(dev, "dma mapping error %llx\n", (u64)pkt->dma);
return -EIO;
}
@@ -271,6 +261,8 @@ static int usbhsg_recip_handler_std_clear_endpoint(struct usbhs_priv *priv,
usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ usbhs_pkt_start(pipe);
+
return 0;
}
@@ -282,6 +274,145 @@ struct usbhsg_recip_handle req_clear_feature = {
};
/*
+ * USB_TYPE_STANDARD / set feature functions
+ */
+static int usbhsg_recip_handler_std_set_device(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ switch (le16_to_cpu(ctrl->wValue)) {
+ case USB_DEVICE_TEST_MODE:
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ udelay(100);
+ usbhs_sys_set_test_mode(priv, le16_to_cpu(ctrl->wIndex >> 8));
+ break;
+ default:
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+ break;
+ }
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_set_endpoint(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+
+ usbhs_pipe_stall(pipe);
+
+ usbhsg_recip_handler_std_control_done(priv, uep, ctrl);
+
+ return 0;
+}
+
+struct usbhsg_recip_handle req_set_feature = {
+ .name = "set feature",
+ .device = usbhsg_recip_handler_std_set_device,
+ .interface = usbhsg_recip_handler_std_control_done,
+ .endpoint = usbhsg_recip_handler_std_set_endpoint,
+};
+
+/*
+ * USB_TYPE_STANDARD / get status functions
+ */
+static void __usbhsg_recip_send_complete(struct usb_ep *ep,
+ struct usb_request *req)
+{
+ struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
+
+ /* free allocated recip-buffer/usb_request */
+ kfree(ureq->pkt.buf);
+ usb_ep_free_request(ep, req);
+}
+
+static void __usbhsg_recip_send_status(struct usbhsg_gpriv *gpriv,
+ unsigned short status)
+{
+ struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(dcp);
+ struct device *dev = usbhsg_gpriv_to_dev(gpriv);
+ struct usb_request *req;
+ unsigned short *buf;
+
+ /* alloc new usb_request for recip */
+ req = usb_ep_alloc_request(&dcp->ep, GFP_ATOMIC);
+ if (!req) {
+ dev_err(dev, "recip request allocation fail\n");
+ return;
+ }
+
+ /* alloc recip data buffer */
+ buf = kmalloc(sizeof(*buf), GFP_ATOMIC);
+ if (!buf) {
+ usb_ep_free_request(&dcp->ep, req);
+ dev_err(dev, "recip data allocation fail\n");
+ return;
+ }
+
+ /* recip data is status */
+ *buf = cpu_to_le16(status);
+
+ /* allocated usb_request/buffer will be freed */
+ req->complete = __usbhsg_recip_send_complete;
+ req->buf = buf;
+ req->length = sizeof(*buf);
+ req->zero = 0;
+
+ /* push packet */
+ pipe->handler = &usbhs_fifo_pio_push_handler;
+ usbhsg_queue_push(dcp, usbhsg_req_to_ureq(req));
+}
+
+static int usbhsg_recip_handler_std_get_device(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ unsigned short status = 1 << USB_DEVICE_SELF_POWERED;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_get_interface(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ unsigned short status = 0;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+static int usbhsg_recip_handler_std_get_endpoint(struct usbhs_priv *priv,
+ struct usbhsg_uep *uep,
+ struct usb_ctrlrequest *ctrl)
+{
+ struct usbhsg_gpriv *gpriv = usbhsg_uep_to_gpriv(uep);
+ struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ unsigned short status = 0;
+
+ if (usbhs_pipe_is_stall(pipe))
+ status = 1 << USB_ENDPOINT_HALT;
+
+ __usbhsg_recip_send_status(gpriv, status);
+
+ return 0;
+}
+
+struct usbhsg_recip_handle req_get_status = {
+ .name = "get status",
+ .device = usbhsg_recip_handler_std_get_device,
+ .interface = usbhsg_recip_handler_std_get_interface,
+ .endpoint = usbhsg_recip_handler_std_get_endpoint,
+};
+
+/*
* USB_TYPE handler
*/
static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
@@ -294,7 +425,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
struct usbhs_pipe *pipe;
int recip = ctrl->bRequestType & USB_RECIP_MASK;
int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
- int ret;
+ int ret = 0;
int (*func)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
struct usb_ctrlrequest *ctrl);
char *msg;
@@ -303,8 +434,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
pipe = usbhsg_uep_to_pipe(uep);
if (!pipe) {
dev_err(dev, "wrong recip request\n");
- ret = -EINVAL;
- goto usbhsg_recip_run_handle_end;
+ return -EINVAL;
}
switch (recip) {
@@ -327,20 +457,10 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
}
if (func) {
- unsigned long flags;
-
dev_dbg(dev, "%s (pipe %d :%s)\n", handler->name, nth, msg);
-
- /******************** spin lock ********************/
- usbhs_lock(priv, flags);
ret = func(priv, uep, ctrl);
- usbhs_unlock(priv, flags);
- /******************** spin unlock ******************/
}
-usbhsg_recip_run_handle_end:
- usbhs_pkt_start(pipe);
-
return ret;
}
@@ -412,6 +532,12 @@ static int usbhsg_irq_ctrl_stage(struct usbhs_priv *priv,
case USB_REQ_CLEAR_FEATURE:
recip_handler = &req_clear_feature;
break;
+ case USB_REQ_SET_FEATURE:
+ recip_handler = &req_set_feature;
+ break;
+ case USB_REQ_GET_STATUS:
+ recip_handler = &req_get_status;
+ break;
}
}
@@ -439,14 +565,16 @@ static int usbhsg_pipe_disable(struct usbhsg_uep *uep)
struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
struct usbhs_pkt *pkt;
- usbhs_pipe_disable(pipe);
-
while (1) {
pkt = usbhs_pkt_pop(pipe, NULL);
if (!pkt)
break;
+
+ usbhsg_queue_pop(uep, usbhsg_pkt_to_ureq(pkt), -ECONNRESET);
}
+ usbhs_pipe_disable(pipe);
+
return 0;
}
@@ -681,9 +809,7 @@ static int usbhsg_try_start(struct usbhs_priv *priv, u32 status)
* - function
* - usb module
*/
- usbhs_sys_hispeed_ctrl(priv, 1);
usbhs_sys_function_ctrl(priv, 1);
- usbhs_sys_usb_ctrl(priv, 1);
/*
* enable irq callback
@@ -731,9 +857,8 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status)
gpriv->gadget.speed = USB_SPEED_UNKNOWN;
/* disable sys */
- usbhs_sys_hispeed_ctrl(priv, 0);
+ usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhs_sys_usb_ctrl(priv, 0);
usbhsg_pipe_disable(dcp);
@@ -755,7 +880,7 @@ static int usbhsg_gadget_start(struct usb_gadget *gadget,
if (!driver ||
!driver->setup ||
- driver->speed < USB_SPEED_FULL)
+ driver->max_speed < USB_SPEED_FULL)
return -EINVAL;
/* first hook up the driver ... */
@@ -866,7 +991,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
- gpriv->gadget.is_dualspeed = 1;
+ gpriv->gadget.max_speed = USB_SPEED_HIGH;
ret = device_register(&gpriv->gadget.dev);
if (ret < 0)
goto err_add_udc;
@@ -896,8 +1021,6 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv)
}
}
- usbhsg_controller_register(gpriv);
-
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
goto err_register;
@@ -926,8 +1049,6 @@ void usbhs_mod_gadget_remove(struct usbhs_priv *priv)
device_unregister(&gpriv->gadget.dev);
- usbhsg_controller_unregister(gpriv);
-
kfree(gpriv->uep);
kfree(gpriv);
}
diff --git a/drivers/usb/renesas_usbhs/mod_host.c b/drivers/usb/renesas_usbhs/mod_host.c
index 7955de5..1834cf5 100644
--- a/drivers/usb/renesas_usbhs/mod_host.c
+++ b/drivers/usb/renesas_usbhs/mod_host.c
@@ -45,36 +45,34 @@
*
* +--------+ pipes are reused for each uep.
* | udev 1 |-+- [uep 0 (dcp) ] --+ pipe will be switched when
- * +--------+ | | target device was changed
+ * +--------+ | | other device requested
* +- [uep 1 (bulk)] --|---+ +--------------+
* | +--------------> | pipe0 (dcp) |
- * +- [uep 2 (bulk)] --|---|---+ +--------------+
- * | | | | pipe1 (isoc) |
- * +--------+ | | | +--------------+
- * | udev 2 |-+- [uep 0 (dcp) ] --+ +-- |------> | pipe2 (bulk) |
- * +--------+ | | | | +--------------+
- * +- [uep 1 (int) ] --|-+ | +------> | pipe3 (bulk) |
- * | | | | +--------------+
- * +--------+ | +-|---|------> | pipe4 (int) |
- * | udev 3 |-+- [uep 0 (dcp) ] --+ | | +--------------+
- * +--------+ | | | | .... |
- * +- [uep 1 (bulk)] ------+ | | .... |
+ * +- [uep 2 (bulk)] -@ | +--------------+
+ * | | pipe1 (isoc) |
+ * +--------+ | +--------------+
+ * | udev 2 |-+- [uep 0 (dcp) ] -@ +----------> | pipe2 (bulk) |
+ * +--------+ | +--------------+
+ * +- [uep 1 (int) ] ----+ +------> | pipe3 (bulk) |
+ * | | +--------------+
+ * +--------+ +-----|------> | pipe4 (int) |
+ * | udev 3 |-+- [uep 0 (dcp) ] -@ | +--------------+
+ * +--------+ | | | .... |
+ * +- [uep 1 (bulk)] -@ | | .... |
* | |
* +- [uep 2 (bulk)]-----------+
+ *
+ * @ : uep requested free pipe, but all have been used.
+ * now it is waiting for free pipe
*/
/*
* struct
*/
-struct usbhsh_pipe_info {
- unsigned int usr_cnt; /* see usbhsh_endpoint_alloc() */
-};
-
struct usbhsh_request {
struct urb *urb;
struct usbhs_pkt pkt;
- struct list_head ureq_link; /* see hpriv :: ureq_link_xxx */
};
struct usbhsh_device {
@@ -83,11 +81,10 @@ struct usbhsh_device {
};
struct usbhsh_ep {
- struct usbhs_pipe *pipe;
+ struct usbhs_pipe *pipe; /* attached pipe */
struct usbhsh_device *udev; /* attached udev */
+ struct usb_host_endpoint *ep;
struct list_head ep_list; /* list to usbhsh_device */
-
- int maxp;
};
#define USBHSH_DEVICE_MAX 10 /* see DEVADDn / DCPMAXP / PIPEMAXP */
@@ -98,16 +95,9 @@ struct usbhsh_hpriv {
struct usbhsh_device udev[USBHSH_DEVICE_MAX];
- struct usbhsh_pipe_info *pipe_info;
- int pipe_size;
-
u32 port_stat; /* USB_PORT_STAT_xxx */
struct completion setup_ack_done;
-
- /* see usbhsh_req_alloc/free */
- struct list_head ureq_link_active;
- struct list_head ureq_link_free;
};
@@ -119,17 +109,6 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_priv_to_hpriv(priv) \
container_of(usbhs_mod_get(priv, USBHS_HOST), struct usbhsh_hpriv, mod)
-#define __usbhsh_for_each_hpipe(start, pos, h, i) \
- for (i = start, pos = (h)->hpipe + i; \
- i < (h)->hpipe_size; \
- i++, pos = (h)->hpipe + i)
-
-#define usbhsh_for_each_hpipe(pos, hpriv, i) \
- __usbhsh_for_each_hpipe(1, pos, hpriv, i)
-
-#define usbhsh_for_each_hpipe_with_dcp(pos, hpriv, i) \
- __usbhsh_for_each_hpipe(0, pos, hpriv, i)
-
#define __usbhsh_for_each_udev(start, pos, h, i) \
for (i = start, pos = (h)->udev + i; \
i < USBHSH_DEVICE_MAX; \
@@ -152,15 +131,20 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_ep_to_uep(u) ((u)->hcpriv)
#define usbhsh_uep_to_pipe(u) ((u)->pipe)
#define usbhsh_uep_to_udev(u) ((u)->udev)
+#define usbhsh_uep_to_ep(u) ((u)->ep)
+
#define usbhsh_urb_to_ureq(u) ((u)->hcpriv)
#define usbhsh_urb_to_usbv(u) ((u)->dev)
#define usbhsh_usbv_to_udev(d) dev_get_drvdata(&(d)->dev)
#define usbhsh_udev_to_usbv(h) ((h)->usbv)
+#define usbhsh_udev_is_used(h) usbhsh_udev_to_usbv(h)
-#define usbhsh_pipe_info(p) ((p)->mod_private)
+#define usbhsh_pipe_to_uep(p) ((p)->mod_private)
+#define usbhsh_device_parent(d) (usbhsh_usbv_to_udev((d)->usbv->parent))
+#define usbhsh_device_hubport(d) ((d)->usbv->portnum)
#define usbhsh_device_number(h, d) ((int)((d) - (h)->udev))
#define usbhsh_device_nth(h, d) ((h)->udev + d)
#define usbhsh_device0(h) usbhsh_device_nth(h, 0)
@@ -170,38 +154,13 @@ static const char usbhsh_hcd_name[] = "renesas_usbhs host";
#define usbhsh_port_stat_clear(h, s) ((h)->port_stat &= ~(s))
#define usbhsh_port_stat_get(h) ((h)->port_stat)
-#define usbhsh_pkt_to_req(p) \
+#define usbhsh_pkt_to_ureq(p) \
container_of((void *)p, struct usbhsh_request, pkt)
/*
* req alloc/free
*/
-static void usbhsh_req_list_init(struct usbhsh_hpriv *hpriv)
-{
- INIT_LIST_HEAD(&hpriv->ureq_link_active);
- INIT_LIST_HEAD(&hpriv->ureq_link_free);
-}
-
-static void usbhsh_req_list_quit(struct usbhsh_hpriv *hpriv)
-{
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usbhsh_request *ureq, *next;
-
- /* kfree all active ureq */
- list_for_each_entry_safe(ureq, next,
- &hpriv->ureq_link_active,
- ureq_link) {
- dev_err(dev, "active ureq (%p) is force freed\n", ureq);
- kfree(ureq);
- }
-
- /* kfree all free ureq */
- list_for_each_entry_safe(ureq, next, &hpriv->ureq_link_free, ureq_link)
- kfree(ureq);
-}
-
-static struct usbhsh_request *usbhsh_req_alloc(struct usbhsh_hpriv *hpriv,
+static struct usbhsh_request *usbhsh_ureq_alloc(struct usbhsh_hpriv *hpriv,
struct urb *urb,
gfp_t mem_flags)
{
@@ -209,270 +168,460 @@ static struct usbhsh_request *usbhsh_req_alloc(struct usbhsh_hpriv *hpriv,
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
- if (list_empty(&hpriv->ureq_link_free)) {
- /*
- * create new one if there is no free ureq
- */
- ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
- if (ureq)
- INIT_LIST_HEAD(&ureq->ureq_link);
- } else {
- /*
- * reuse "free" ureq if exist
- */
- ureq = list_entry(hpriv->ureq_link_free.next,
- struct usbhsh_request,
- ureq_link);
- if (ureq)
- list_del_init(&ureq->ureq_link);
- }
-
+ ureq = kzalloc(sizeof(struct usbhsh_request), mem_flags);
if (!ureq) {
dev_err(dev, "ureq alloc fail\n");
return NULL;
}
usbhs_pkt_init(&ureq->pkt);
-
- /*
- * push it to "active" list
- */
- list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_active);
ureq->urb = urb;
+ usbhsh_urb_to_ureq(urb) = ureq;
return ureq;
}
-static void usbhsh_req_free(struct usbhsh_hpriv *hpriv,
+static void usbhsh_ureq_free(struct usbhsh_hpriv *hpriv,
struct usbhsh_request *ureq)
{
- struct usbhs_pkt *pkt = &ureq->pkt;
+ usbhsh_urb_to_ureq(ureq->urb) = NULL;
+ ureq->urb = NULL;
- usbhs_pkt_init(pkt);
+ kfree(ureq);
+}
+/*
+ * status
+ */
+static int usbhsh_is_running(struct usbhsh_hpriv *hpriv)
+{
/*
- * removed from "active" list,
- * and push it to "free" list
+ * we can decide some device is attached or not
+ * by checking mod.irq_attch
+ * see
+ * usbhsh_irq_attch()
+ * usbhsh_irq_dtch()
*/
- ureq->urb = NULL;
- list_del_init(&ureq->ureq_link);
- list_add_tail(&ureq->ureq_link, &hpriv->ureq_link_free);
+ return (hpriv->mod.irq_attch == NULL);
}
/*
- * device control
+ * pipe control
*/
-
-static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
+static void usbhsh_endpoint_sequence_save(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pkt *pkt)
{
- return !list_empty(&udev->ep_list_head);
-}
+ int len = urb->actual_length;
+ int maxp = usb_endpoint_maxp(&urb->ep->desc);
+ int t = 0;
-static struct usbhsh_device *usbhsh_device_alloc(struct usbhsh_hpriv *hpriv,
- struct urb *urb)
-{
- struct usbhsh_device *udev = NULL;
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
- struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- int i;
+ /* DCP is out of sequence control */
+ if (usb_pipecontrol(urb->pipe))
+ return;
/*
- * device 0
+ * renesas_usbhs pipe has a limitation in a number.
+ * So, driver should re-use the limited pipe for each device/endpoint.
+ * DATA0/1 sequence should be saved for it.
+ * see [image of mod_host]
+ * [HARDWARE LIMITATION]
*/
- if (0 == usb_pipedevice(urb->pipe)) {
- udev = usbhsh_device0(hpriv);
- goto usbhsh_device_find;
- }
/*
- * find unused device
+ * next sequence depends on actual_length
+ *
+ * ex) actual_length = 1147, maxp = 512
+ * data0 : 512
+ * data1 : 512
+ * data0 : 123
+ * data1 is the next sequence
*/
- usbhsh_for_each_udev(udev, hpriv, i) {
- if (usbhsh_udev_to_usbv(udev))
- continue;
- goto usbhsh_device_find;
+ t = len / maxp;
+ if (len % maxp)
+ t++;
+ if (pkt->zero)
+ t++;
+ t %= 2;
+
+ if (t)
+ usb_dotoggle(urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+}
+
+static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
+ struct urb *urb);
+
+static int usbhsh_pipe_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
+ struct usbhs_pipe *pipe;
+ struct usb_endpoint_descriptor *desc = &urb->ep->desc;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ unsigned long flags;
+ int dir_in_req = !!usb_pipein(urb->pipe);
+ int is_dcp = usb_endpoint_xfer_control(desc);
+ int i, dir_in;
+ int ret = -EBUSY;
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ if (unlikely(usbhsh_uep_to_pipe(uep))) {
+ dev_err(dev, "uep already has pipe\n");
+ goto usbhsh_pipe_attach_done;
}
- dev_err(dev, "no free usbhsh_device\n");
+ usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
- return NULL;
+ /* check pipe type */
+ if (!usbhs_pipe_type_is(pipe, usb_endpoint_type(desc)))
+ continue;
-usbhsh_device_find:
- if (usbhsh_device_has_endpoint(udev))
- dev_warn(dev, "udev have old endpoint\n");
+ /* check pipe direction if normal pipe */
+ if (!is_dcp) {
+ dir_in = !!usbhs_pipe_is_dir_in(pipe);
+ if (0 != (dir_in - dir_in_req))
+ continue;
+ }
- /* uep will be attached */
- INIT_LIST_HEAD(&udev->ep_list_head);
+ /* check pipe is free */
+ if (usbhsh_pipe_to_uep(pipe))
+ continue;
- /*
- * usbhsh_usbv_to_udev()
- * usbhsh_udev_to_usbv()
- * will be enable
- */
- dev_set_drvdata(&usbv->dev, udev);
- udev->usbv = usbv;
+ /*
+ * attach pipe to uep
+ *
+ * usbhs_pipe_config_update() should be called after
+ * usbhs_set_device_config()
+ * see
+ * DCPMAXP/PIPEMAXP
+ */
+ usbhsh_uep_to_pipe(uep) = pipe;
+ usbhsh_pipe_to_uep(pipe) = uep;
- /* set device config */
- usbhs_set_device_speed(priv,
- usbhsh_device_number(hpriv, udev),
- usbhsh_device_number(hpriv, udev),
- 0, /* FIXME no parent */
- usbv->speed);
+ usbhs_pipe_config_update(pipe,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc),
+ usb_endpoint_maxp(desc));
- dev_dbg(dev, "%s [%d](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev), udev);
+ dev_dbg(dev, "%s [%d-%d(%s:%s)]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc),
+ usbhs_pipe_name(pipe),
+ dir_in_req ? "in" : "out");
- return udev;
+ ret = 0;
+ break;
+ }
+
+usbhsh_pipe_attach_done:
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ return ret;
}
-static void usbhsh_device_free(struct usbhsh_hpriv *hpriv,
- struct usbhsh_device *udev)
+static void usbhsh_pipe_detach(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_ep *uep)
{
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- struct device *dev = usbhsh_hcd_to_dev(hcd);
- struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhs_pipe *pipe;
+ struct device *dev = usbhs_priv_to_dev(priv);
+ unsigned long flags;
- dev_dbg(dev, "%s [%d](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev), udev);
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
- if (usbhsh_device_has_endpoint(udev))
- dev_warn(dev, "udev still have endpoint\n");
+ pipe = usbhsh_uep_to_pipe(uep);
- /*
- * usbhsh_usbv_to_udev()
- * usbhsh_udev_to_usbv()
- * will be disable
- */
- dev_set_drvdata(&usbv->dev, NULL);
- udev->usbv = NULL;
+ if (unlikely(!pipe)) {
+ dev_err(dev, "uep doens't have pipe\n");
+ } else {
+ struct usb_host_endpoint *ep = usbhsh_uep_to_ep(uep);
+ struct usbhsh_device *udev = usbhsh_uep_to_udev(uep);
+
+ /* detach pipe from uep */
+ usbhsh_uep_to_pipe(uep) = NULL;
+ usbhsh_pipe_to_uep(pipe) = NULL;
+
+ dev_dbg(dev, "%s [%d-%d(%s)]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(&ep->desc),
+ usbhs_pipe_name(pipe));
+ }
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
}
/*
- * end-point control
+ * endpoint control
*/
-struct usbhsh_ep *usbhsh_endpoint_alloc(struct usbhsh_hpriv *hpriv,
- struct usbhsh_device *udev,
- struct usb_host_endpoint *ep,
- int dir_in_req,
- gfp_t mem_flags)
+static int usbhsh_endpoint_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ gfp_t mem_flags)
{
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct usbhsh_device *udev = usbhsh_device_get(hpriv, urb);
+ struct usb_host_endpoint *ep = urb->ep;
struct usbhsh_ep *uep;
- struct usbhsh_pipe_info *info;
- struct usbhs_pipe *pipe, *best_pipe;
- struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct device *dev = usbhs_priv_to_dev(priv);
struct usb_endpoint_descriptor *desc = &ep->desc;
- int type, i, dir_in;
- unsigned int min_usr;
-
- dir_in_req = !!dir_in_req;
+ unsigned long flags;
uep = kzalloc(sizeof(struct usbhsh_ep), mem_flags);
if (!uep) {
dev_err(dev, "usbhsh_ep alloc fail\n");
- return NULL;
+ return -ENOMEM;
}
- if (usb_endpoint_xfer_control(desc)) {
- best_pipe = usbhsh_hpriv_to_dcp(hpriv);
- goto usbhsh_endpoint_alloc_find_pipe;
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ /*
+ * init endpoint
+ */
+ INIT_LIST_HEAD(&uep->ep_list);
+ list_add_tail(&uep->ep_list, &udev->ep_list_head);
+
+ usbhsh_uep_to_udev(uep) = udev;
+ usbhsh_uep_to_ep(uep) = ep;
+ usbhsh_ep_to_uep(ep) = uep;
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ dev_dbg(dev, "%s [%d-%d]\n", __func__,
+ usbhsh_device_number(hpriv, udev),
+ usb_endpoint_num(desc));
+
+ return 0;
+}
+
+static void usbhsh_endpoint_detach(struct usbhsh_hpriv *hpriv,
+ struct usb_host_endpoint *ep)
+{
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct device *dev = usbhs_priv_to_dev(priv);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
+ unsigned long flags;
+
+ if (!uep)
+ return;
+
+ dev_dbg(dev, "%s [%d-%d]\n", __func__,
+ usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
+ usb_endpoint_num(&ep->desc));
+
+ if (usbhsh_uep_to_pipe(uep))
+ usbhsh_pipe_detach(hpriv, uep);
+
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
+ /* remove this endpoint from udev */
+ list_del_init(&uep->ep_list);
+
+ usbhsh_uep_to_udev(uep) = NULL;
+ usbhsh_uep_to_ep(uep) = NULL;
+ usbhsh_ep_to_uep(ep) = NULL;
+
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
+
+ kfree(uep);
+}
+
+static void usbhsh_endpoint_detach_all(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev)
+{
+ struct usbhsh_ep *uep, *next;
+
+ list_for_each_entry_safe(uep, next, &udev->ep_list_head, ep_list)
+ usbhsh_endpoint_detach(hpriv, usbhsh_uep_to_ep(uep));
+}
+
+/*
+ * device control
+ */
+static int usbhsh_connected_to_rhdev(struct usb_hcd *hcd,
+ struct usbhsh_device *udev)
+{
+ struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+
+ return hcd->self.root_hub == usbv->parent;
+}
+
+static int usbhsh_device_has_endpoint(struct usbhsh_device *udev)
+{
+ return !list_empty(&udev->ep_list_head);
+}
+
+static struct usbhsh_device *usbhsh_device_get(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
+
+ /* usbhsh_device_attach() is still not called */
+ if (!udev)
+ return NULL;
+
+ /* if it is device0, return it */
+ if (0 == usb_pipedevice(urb->pipe))
+ return usbhsh_device0(hpriv);
+
+ /* return attached device */
+ return udev;
+}
+
+static struct usbhsh_device *usbhsh_device_attach(struct usbhsh_hpriv *hpriv,
+ struct urb *urb)
+{
+ struct usbhsh_device *udev = NULL;
+ struct usbhsh_device *udev0 = usbhsh_device0(hpriv);
+ struct usbhsh_device *pos;
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ unsigned long flags;
+ u16 upphub, hubport;
+ int i;
+
+ /*
+ * This function should be called only while urb is pointing to device0.
+ * It will attach unused usbhsh_device to urb (usbv),
+ * and initialize device0.
+ * You can use usbhsh_device_get() to get "current" udev,
+ * and usbhsh_usbv_to_udev() is for "attached" udev.
+ */
+ if (0 != usb_pipedevice(urb->pipe)) {
+ dev_err(dev, "%s fail: urb isn't pointing device0\n", __func__);
+ return NULL;
}
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
+
/*
- * find best pipe for endpoint
- * see
- * HARDWARE LIMITATION
+ * find unused device
*/
- type = usb_endpoint_type(desc);
- min_usr = ~0;
- best_pipe = NULL;
- usbhs_for_each_pipe(pipe, priv, i) {
- if (!usbhs_pipe_type_is(pipe, type))
+ usbhsh_for_each_udev(pos, hpriv, i) {
+ if (usbhsh_udev_is_used(pos))
continue;
+ udev = pos;
+ break;
+ }
- dir_in = !!usbhs_pipe_is_dir_in(pipe);
- if (0 != (dir_in - dir_in_req))
- continue;
+ if (udev) {
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be enable
+ */
+ dev_set_drvdata(&usbv->dev, udev);
+ udev->usbv = usbv;
+ }
- info = usbhsh_pipe_info(pipe);
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
- if (min_usr > info->usr_cnt) {
- min_usr = info->usr_cnt;
- best_pipe = pipe;
- }
+ if (!udev) {
+ dev_err(dev, "no free usbhsh_device\n");
+ return NULL;
}
- if (unlikely(!best_pipe)) {
- dev_err(dev, "couldn't find best pipe\n");
- kfree(uep);
- return NULL;
+ if (usbhsh_device_has_endpoint(udev)) {
+ dev_warn(dev, "udev have old endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev);
+ }
+
+ if (usbhsh_device_has_endpoint(udev0)) {
+ dev_warn(dev, "udev0 have old endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev0);
}
-usbhsh_endpoint_alloc_find_pipe:
+
+ /* uep will be attached */
+ INIT_LIST_HEAD(&udev0->ep_list_head);
+ INIT_LIST_HEAD(&udev->ep_list_head);
+
/*
- * init uep
+ * set device0 config
*/
- uep->pipe = best_pipe;
- uep->maxp = usb_endpoint_maxp(desc);
- usbhsh_uep_to_udev(uep) = udev;
- usbhsh_ep_to_uep(ep) = uep;
+ usbhs_set_device_config(priv,
+ 0, 0, 0, usbv->speed);
/*
- * update pipe user count
+ * set new device config
*/
- info = usbhsh_pipe_info(best_pipe);
- info->usr_cnt++;
+ upphub = 0;
+ hubport = 0;
+ if (!usbhsh_connected_to_rhdev(hcd, udev)) {
+ /* if udev is not connected to rhdev, it means parent is Hub */
+ struct usbhsh_device *parent = usbhsh_device_parent(udev);
- /* init this endpoint, and attach it to udev */
- INIT_LIST_HEAD(&uep->ep_list);
- list_add_tail(&uep->ep_list, &udev->ep_list_head);
+ upphub = usbhsh_device_number(hpriv, parent);
+ hubport = usbhsh_device_hubport(udev);
- /*
- * usbhs_pipe_config_update() should be called after
- * usbhs_device_config()
- * see
- * DCPMAXP/PIPEMAXP
- */
- usbhs_pipe_sequence_data0(uep->pipe);
- usbhs_pipe_config_update(uep->pipe,
- usbhsh_device_number(hpriv, udev),
- usb_endpoint_num(desc),
- uep->maxp);
+ dev_dbg(dev, "%s connecte to Hub [%d:%d](%p)\n", __func__,
+ upphub, hubport, parent);
+ }
- dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
- usbhsh_device_number(hpriv, udev),
- usbhs_pipe_name(uep->pipe), uep);
+ usbhs_set_device_config(priv,
+ usbhsh_device_number(hpriv, udev),
+ upphub, hubport, usbv->speed);
- return uep;
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
+
+ return udev;
}
-void usbhsh_endpoint_free(struct usbhsh_hpriv *hpriv,
- struct usb_host_endpoint *ep)
+static void usbhsh_device_detach(struct usbhsh_hpriv *hpriv,
+ struct usbhsh_device *udev)
{
+ struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
- struct device *dev = usbhs_priv_to_dev(priv);
- struct usbhsh_ep *uep = usbhsh_ep_to_uep(ep);
- struct usbhsh_pipe_info *info;
+ struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usb_device *usbv = usbhsh_udev_to_usbv(udev);
+ unsigned long flags;
- if (!uep)
- return;
+ dev_dbg(dev, "%s [%d](%p)\n", __func__,
+ usbhsh_device_number(hpriv, udev), udev);
- dev_dbg(dev, "%s [%d-%s](%p)\n", __func__,
- usbhsh_device_number(hpriv, usbhsh_uep_to_udev(uep)),
- usbhs_pipe_name(uep->pipe), uep);
+ if (usbhsh_device_has_endpoint(udev)) {
+ dev_warn(dev, "udev still have endpoint\n");
+ usbhsh_endpoint_detach_all(hpriv, udev);
+ }
- info = usbhsh_pipe_info(uep->pipe);
- info->usr_cnt--;
+ /*
+ * There is nothing to do if it is device0.
+ * see
+ * usbhsh_device_attach()
+ * usbhsh_device_get()
+ */
+ if (0 == usbhsh_device_number(hpriv, udev))
+ return;
- /* remove this endpoint from udev */
- list_del_init(&uep->ep_list);
+ /******************** spin lock ********************/
+ usbhs_lock(priv, flags);
- usbhsh_uep_to_udev(uep) = NULL;
- usbhsh_ep_to_uep(ep) = NULL;
+ /*
+ * usbhsh_usbv_to_udev()
+ * usbhsh_udev_to_usbv()
+ * will be disable
+ */
+ dev_set_drvdata(&usbv->dev, NULL);
+ udev->usbv = NULL;
- kfree(uep);
+ usbhs_unlock(priv, flags);
+ /******************** spin unlock ******************/
}
/*
@@ -480,11 +629,12 @@ void usbhsh_endpoint_free(struct usbhsh_hpriv *hpriv,
*/
static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
{
- struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
struct urb *urb = ureq->urb;
struct device *dev = usbhs_priv_to_dev(priv);
+ int status = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -493,29 +643,43 @@ static void usbhsh_queue_done(struct usbhs_priv *priv, struct usbhs_pkt *pkt)
return;
}
+ if (!usbhsh_is_running(hpriv))
+ status = -ESHUTDOWN;
+
urb->actual_length = pkt->actual;
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ usbhsh_ureq_free(hpriv, ureq);
+
+ usbhsh_endpoint_sequence_save(hpriv, urb, pkt);
+ usbhsh_pipe_detach(hpriv, usbhsh_ep_to_uep(urb->ep));
usb_hcd_unlink_urb_from_ep(hcd, urb);
- usb_hcd_giveback_urb(hcd, urb, 0);
+ usb_hcd_giveback_urb(hcd, urb, status);
}
static int usbhsh_queue_push(struct usb_hcd *hcd,
- struct usbhs_pipe *pipe,
- struct urb *urb)
+ struct urb *urb,
+ gfp_t mem_flags)
{
- struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
- struct usbhs_pkt *pkt = &ureq->pkt;
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
+ struct usbhsh_request *ureq;
void *buf;
- int len;
+ int len, sequence;
if (usb_pipeisoc(urb->pipe)) {
dev_err(dev, "pipe iso is not supported now\n");
return -EIO;
}
+ /* this ureq will be freed on usbhsh_queue_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq)) {
+ dev_err(dev, "ureq alloc fail\n");
+ return -ENOMEM;
+ }
+
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_fifo_pio_pop_handler;
else
@@ -524,25 +688,59 @@ static int usbhsh_queue_push(struct usb_hcd *hcd,
buf = (void *)(urb->transfer_buffer + urb->actual_length);
len = urb->transfer_buffer_length - urb->actual_length;
+ sequence = usb_gettoggle(urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe));
+
dev_dbg(dev, "%s\n", __func__);
- usbhs_pkt_push(pipe, pkt, usbhsh_queue_done,
- buf, len, (urb->transfer_flags & URB_ZERO_PACKET));
+ usbhs_pkt_push(pipe, &ureq->pkt, usbhsh_queue_done,
+ buf, len, (urb->transfer_flags & URB_ZERO_PACKET),
+ sequence);
+
usbhs_pkt_start(pipe);
return 0;
}
+static void usbhsh_queue_force_pop(struct usbhs_priv *priv,
+ struct usbhs_pipe *pipe)
+{
+ struct usbhs_pkt *pkt;
+
+ while (1) {
+ pkt = usbhs_pkt_pop(pipe, NULL);
+ if (!pkt)
+ break;
+
+ /*
+ * if all packet are gone, usbhsh_endpoint_disable()
+ * will be called.
+ * then, attached device/endpoint/pipe will be detached
+ */
+ usbhsh_queue_done(priv, pkt);
+ }
+}
+
+static void usbhsh_queue_force_pop_all(struct usbhs_priv *priv)
+{
+ struct usbhs_pipe *pos;
+ int i;
+
+ usbhs_for_each_pipe_with_dcp(pos, priv, i)
+ usbhsh_queue_force_pop(priv, pos);
+}
+
/*
* DCP setup stage
*/
static int usbhsh_is_request_address(struct urb *urb)
{
- struct usb_ctrlrequest *cmd;
+ struct usb_ctrlrequest *req;
- cmd = (struct usb_ctrlrequest *)urb->setup_packet;
+ req = (struct usb_ctrlrequest *)urb->setup_packet;
- if ((DeviceOutRequest == cmd->bRequestType << 8) &&
- (USB_REQ_SET_ADDRESS == cmd->bRequest))
+ if ((DeviceOutRequest == req->bRequestType << 8) &&
+ (USB_REQ_SET_ADDRESS == req->bRequest))
return 1;
else
return 0;
@@ -570,11 +768,15 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
/*
* renesas_usbhs can not use original usb address.
* see HARDWARE LIMITATION.
- * modify usb address here.
+ * modify usb address here to use attached device.
+ * see usbhsh_device_attach()
*/
if (usbhsh_is_request_address(urb)) {
- /* FIXME */
- req.wValue = 1;
+ struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
+ struct usbhsh_device *udev = usbhsh_usbv_to_udev(usbv);
+
+ /* udev is a attached device */
+ req.wValue = usbhsh_device_number(hpriv, udev);
dev_dbg(dev, "create new address - %d\n", req.wValue);
}
@@ -595,82 +797,80 @@ static void usbhsh_setup_stage_packet_push(struct usbhsh_hpriv *hpriv,
static void usbhsh_data_stage_packet_done(struct usbhs_priv *priv,
struct usbhs_pkt *pkt)
{
- struct usbhsh_request *ureq = usbhsh_pkt_to_req(pkt);
+ struct usbhsh_request *ureq = usbhsh_pkt_to_ureq(pkt);
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
- struct urb *urb = ureq->urb;
/* this ureq was connected to urb when usbhsh_urb_enqueue() */
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ usbhsh_ureq_free(hpriv, ureq);
}
-static void usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
- struct urb *urb,
- struct usbhs_pipe *pipe)
+static int usbhsh_data_stage_packet_push(struct usbhsh_hpriv *hpriv,
+ struct urb *urb,
+ struct usbhs_pipe *pipe,
+ gfp_t mem_flags)
+
{
struct usbhsh_request *ureq;
- struct usbhs_pkt *pkt;
- /*
- * FIXME
- *
- * data stage uses ureq which is connected to urb
- * see usbhsh_urb_enqueue() :: alloc new request.
- * it will be freed in usbhsh_data_stage_packet_done()
- */
- ureq = usbhsh_urb_to_ureq(urb);
- pkt = &ureq->pkt;
+ /* this ureq will be freed on usbhsh_data_stage_packet_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq))
+ return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_data_stage_in_handler;
else
pipe->handler = &usbhs_dcp_data_stage_out_handler;
- usbhs_pkt_push(pipe, pkt,
+ usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_data_stage_packet_done,
urb->transfer_buffer,
urb->transfer_buffer_length,
- (urb->transfer_flags & URB_ZERO_PACKET));
+ (urb->transfer_flags & URB_ZERO_PACKET),
+ -1);
+
+ return 0;
}
/*
* DCP status stage
*/
-static void usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
+static int usbhsh_status_stage_packet_push(struct usbhsh_hpriv *hpriv,
struct urb *urb,
- struct usbhs_pipe *pipe)
+ struct usbhs_pipe *pipe,
+ gfp_t mem_flags)
{
struct usbhsh_request *ureq;
- struct usbhs_pkt *pkt;
- /*
- * FIXME
- *
- * status stage uses allocated ureq.
- * it will be freed on usbhsh_queue_done()
- */
- ureq = usbhsh_req_alloc(hpriv, urb, GFP_KERNEL);
- pkt = &ureq->pkt;
+ /* This ureq will be freed on usbhsh_queue_done() */
+ ureq = usbhsh_ureq_alloc(hpriv, urb, mem_flags);
+ if (unlikely(!ureq))
+ return -ENOMEM;
if (usb_pipein(urb->pipe))
pipe->handler = &usbhs_dcp_status_stage_in_handler;
else
pipe->handler = &usbhs_dcp_status_stage_out_handler;
- usbhs_pkt_push(pipe, pkt,
+ usbhs_pkt_push(pipe, &ureq->pkt,
usbhsh_queue_done,
NULL,
urb->transfer_buffer_length,
- 0);
+ 0, -1);
+
+ return 0;
}
static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
- struct usbhsh_hpriv *hpriv,
- struct usbhs_pipe *pipe,
- struct urb *urb)
+ struct urb *urb,
+ gfp_t mflags)
{
+ struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
+ struct usbhsh_ep *uep = usbhsh_ep_to_uep(urb->ep);
+ struct usbhs_pipe *pipe = usbhsh_uep_to_pipe(uep);
struct device *dev = usbhsh_hcd_to_dev(hcd);
+ int ret;
dev_dbg(dev, "%s\n", __func__);
@@ -686,13 +886,22 @@ static int usbhsh_dcp_queue_push(struct usb_hcd *hcd,
*
* It is pushed only when urb has buffer.
*/
- if (urb->transfer_buffer_length)
- usbhsh_data_stage_packet_push(hpriv, urb, pipe);
+ if (urb->transfer_buffer_length) {
+ ret = usbhsh_data_stage_packet_push(hpriv, urb, pipe, mflags);
+ if (ret < 0) {
+ dev_err(dev, "data stage failed\n");
+ return ret;
+ }
+ }
/*
* status stage
*/
- usbhsh_status_stage_packet_push(hpriv, urb, pipe);
+ ret = usbhsh_status_stage_packet_push(hpriv, urb, pipe, mflags);
+ if (ret < 0) {
+ dev_err(dev, "status stage failed\n");
+ return ret;
+ }
/*
* start pushed packets
@@ -729,71 +938,82 @@ static int usbhsh_urb_enqueue(struct usb_hcd *hcd,
struct usbhsh_hpriv *hpriv = usbhsh_hcd_to_hpriv(hcd);
struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
struct device *dev = usbhs_priv_to_dev(priv);
- struct usb_device *usbv = usbhsh_urb_to_usbv(urb);
struct usb_host_endpoint *ep = urb->ep;
- struct usbhsh_request *ureq;
- struct usbhsh_device *udev, *new_udev = NULL;
- struct usbhs_pipe *pipe;
- struct usbhsh_ep *uep;
+ struct usbhsh_device *new_udev = NULL;
int is_dir_in = usb_pipein(urb->pipe);
-
+ int i;
int ret;
dev_dbg(dev, "%s (%s)\n", __func__, is_dir_in ? "in" : "out");
+ if (!usbhsh_is_running(hpriv)) {
+ ret = -EIO;
+ dev_err(dev, "host is not running\n");
+ goto usbhsh_urb_enqueue_error_not_linked;
+ }
+
ret = usb_hcd_link_urb_to_ep(hcd, urb);
- if (ret)
+ if (ret) {
+ dev_err(dev, "urb link failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
+ }
/*
- * get udev
+ * attach udev if needed
+ * see [image of mod_host]
*/
- udev = usbhsh_usbv_to_udev(usbv);
- if (!udev) {
- new_udev = usbhsh_device_alloc(hpriv, urb);
- if (!new_udev)
+ if (!usbhsh_device_get(hpriv, urb)) {
+ new_udev = usbhsh_device_attach(hpriv, urb);
+ if (!new_udev) {
+ ret = -EIO;
+ dev_err(dev, "device attach failed\n");
goto usbhsh_urb_enqueue_error_not_linked;
-
- udev = new_udev;
+ }
}
/*
- * get uep
+ * attach endpoint if needed
+ * see [image of mod_host]
*/
- uep = usbhsh_ep_to_uep(ep);
- if (!uep) {
- uep = usbhsh_endpoint_alloc(hpriv, udev, ep,
- is_dir_in, mem_flags);
- if (!uep)
+ if (!usbhsh_ep_to_uep(ep)) {
+ ret = usbhsh_endpoint_attach(hpriv, urb, mem_flags);
+ if (ret < 0) {
+ dev_err(dev, "endpoint attach failed\n");
goto usbhsh_urb_enqueue_error_free_device;
+ }
}
- pipe = usbhsh_uep_to_pipe(uep);
/*
- * alloc new request
+ * attach pipe to endpoint
+ * see [image of mod_host]
*/
- ureq = usbhsh_req_alloc(hpriv, urb, mem_flags);
- if (unlikely(!ureq)) {
- ret = -ENOMEM;
+ for (i = 0; i < 1024; i++) {
+ ret = usbhsh_pipe_attach(hpriv, urb);
+ if (ret < 0)
+ msleep(100);
+ else
+ break;
+ }
+ if (ret < 0) {
+ dev_err(dev, "pipe attach failed\n");
goto usbhsh_urb_enqueue_error_free_endpoint;
}
- usbhsh_urb_to_ureq(urb) = ureq;
/*
* push packet
*/
if (usb_pipecontrol(urb->pipe))
- usbhsh_dcp_queue_push(hcd, hpriv, pipe, urb);
+ ret = usbhsh_dcp_queue_push(hcd, urb, mem_flags);
else
- usbhsh_queue_push(hcd, pipe, urb);
+ ret = usbhsh_queue_push(hcd, urb, mem_flags);
- return 0;
+ return ret;
usbhsh_urb_enqueue_error_free_endpoint:
- usbhsh_endpoint_free(hpriv, ep);
+ usbhsh_endpoint_detach(hpriv, ep);
usbhsh_urb_enqueue_error_free_device:
if (new_udev)
- usbhsh_device_free(hpriv, new_udev);
+ usbhsh_device_detach(hpriv, new_udev);
usbhsh_urb_enqueue_error_not_linked:
dev_dbg(dev, "%s error\n", __func__);
@@ -807,8 +1027,11 @@ static int usbhsh_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct usbhsh_request *ureq = usbhsh_urb_to_ureq(urb);
if (ureq) {
- usbhsh_req_free(hpriv, ureq);
- usbhsh_urb_to_ureq(urb) = NULL;
+ struct usbhs_priv *priv = usbhsh_hpriv_to_priv(hpriv);
+ struct usbhs_pkt *pkt = &ureq->pkt;
+
+ usbhs_pkt_pop(pkt->pipe, pkt);
+ usbhsh_queue_done(priv, pkt);
}
return 0;
@@ -823,7 +1046,7 @@ static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
/*
* this function might be called manytimes by same hcd/ep
- * in-endpoitn == out-endpoint if ep == dcp.
+ * in-endpoint == out-endpoint if ep == dcp.
*/
if (!uep)
return;
@@ -831,15 +1054,14 @@ static void usbhsh_endpoint_disable(struct usb_hcd *hcd,
udev = usbhsh_uep_to_udev(uep);
hpriv = usbhsh_hcd_to_hpriv(hcd);
- usbhsh_endpoint_free(hpriv, ep);
- ep->hcpriv = NULL;
+ usbhsh_endpoint_detach(hpriv, ep);
/*
* if there is no endpoint,
* free device
*/
if (!usbhsh_device_has_endpoint(udev))
- usbhsh_device_free(hpriv, udev);
+ usbhsh_device_detach(hpriv, udev);
}
static int usbhsh_hub_status_data(struct usb_hcd *hcd, char *buf)
@@ -919,6 +1141,8 @@ static int __usbhsh_hub_port_feature(struct usbhsh_hpriv *hpriv,
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_LOW_SPEED);
+ usbhsh_queue_force_pop_all(priv);
+
usbhs_bus_send_reset(priv);
msleep(20);
usbhs_bus_send_sof_enable(priv);
@@ -1082,6 +1306,20 @@ static int usbhsh_irq_attch(struct usbhs_priv *priv,
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+ /*
+ * attch interrupt might happen infinitely on some device
+ * (on self power USB hub ?)
+ * disable it here.
+ *
+ * usbhsh_is_running() becomes effective
+ * according to this process.
+ * see
+ * usbhsh_is_running()
+ * usbhsh_urb_enqueue()
+ */
+ hpriv->mod.irq_attch = NULL;
+ usbhs_irq_callback_update(priv, &hpriv->mod);
+
return 0;
}
@@ -1096,6 +1334,24 @@ static int usbhsh_irq_dtch(struct usbhs_priv *priv,
usbhsh_port_stat_clear(hpriv, USB_PORT_STAT_CONNECTION);
usbhsh_port_stat_set(hpriv, USB_PORT_STAT_C_CONNECTION << 16);
+ /*
+ * enable attch interrupt again
+ *
+ * usbhsh_is_running() becomes invalid
+ * according to this process.
+ * see
+ * usbhsh_is_running()
+ * usbhsh_urb_enqueue()
+ */
+ hpriv->mod.irq_attch = usbhsh_irq_attch;
+ usbhs_irq_callback_update(priv, &hpriv->mod);
+
+ /*
+ * usbhsh_queue_force_pop_all() should be called
+ * after usbhsh_is_running() becomes invalid.
+ */
+ usbhsh_queue_force_pop_all(priv);
+
return 0;
}
@@ -1131,7 +1387,6 @@ static int usbhsh_irq_setup_err(struct usbhs_priv *priv,
static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
- struct usbhsh_pipe_info *pipe_info = hpriv->pipe_info;
struct usbhs_pipe *pipe;
u32 *pipe_type = usbhs_get_dparam(priv, pipe_type);
int pipe_size = usbhs_get_dparam(priv, pipe_size);
@@ -1140,7 +1395,6 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
/* init all pipe */
old_type = USB_ENDPOINT_XFER_CONTROL;
for (i = 0; i < pipe_size; i++) {
- pipe_info[i].usr_cnt = 0;
/*
* data "output" will be finished as soon as possible,
@@ -1174,7 +1428,7 @@ static void usbhsh_pipe_init_for_host(struct usbhs_priv *priv)
dir_in);
}
- pipe->mod_private = pipe_info + i;
+ pipe->mod_private = NULL;
}
}
@@ -1205,9 +1459,7 @@ static int usbhsh_start(struct usbhs_priv *priv)
* - host
* - usb module
*/
- usbhs_sys_hispeed_ctrl(priv, 1);
usbhs_sys_host_ctrl(priv, 1);
- usbhs_sys_usb_ctrl(priv, 1);
/*
* enable irq callback
@@ -1242,9 +1494,7 @@ static int usbhsh_stop(struct usbhs_priv *priv)
usb_remove_hcd(hcd);
/* disable sys */
- usbhs_sys_hispeed_ctrl(priv, 0);
usbhs_sys_host_ctrl(priv, 0);
- usbhs_sys_usb_ctrl(priv, 0);
dev_dbg(dev, "quit host\n");
@@ -1255,10 +1505,8 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
{
struct usbhsh_hpriv *hpriv;
struct usb_hcd *hcd;
- struct usbhsh_pipe_info *pipe_info;
struct usbhsh_device *udev;
struct device *dev = usbhs_priv_to_dev(priv);
- int pipe_size = usbhs_get_dparam(priv, pipe_size);
int i;
/* initialize hcd */
@@ -1269,12 +1517,6 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
}
hcd->has_tt = 1; /* for low/full speed */
- pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
- if (!pipe_info) {
- dev_err(dev, "Could not allocate pipe_info\n");
- goto usbhs_mod_host_probe_err;
- }
-
/*
* CAUTION
*
@@ -1294,9 +1536,6 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
hpriv->mod.name = "host";
hpriv->mod.start = usbhsh_start;
hpriv->mod.stop = usbhsh_stop;
- hpriv->pipe_info = pipe_info;
- hpriv->pipe_size = pipe_size;
- usbhsh_req_list_init(hpriv);
usbhsh_port_stat_init(hpriv);
/* init all device */
@@ -1308,11 +1547,6 @@ int usbhs_mod_host_probe(struct usbhs_priv *priv)
dev_info(dev, "host probed\n");
return 0;
-
-usbhs_mod_host_probe_err:
- usb_put_hcd(hcd);
-
- return -ENOMEM;
}
int usbhs_mod_host_remove(struct usbhs_priv *priv)
@@ -1320,8 +1554,6 @@ int usbhs_mod_host_remove(struct usbhs_priv *priv)
struct usbhsh_hpriv *hpriv = usbhsh_priv_to_hpriv(priv);
struct usb_hcd *hcd = usbhsh_hpriv_to_hcd(hpriv);
- usbhsh_req_list_quit(hpriv);
-
usb_put_hcd(hcd);
return 0;
diff --git a/drivers/usb/renesas_usbhs/pipe.c b/drivers/usb/renesas_usbhs/pipe.c
index c74389c..feb06d6 100644
--- a/drivers/usb/renesas_usbhs/pipe.c
+++ b/drivers/usb/renesas_usbhs/pipe.c
@@ -257,6 +257,13 @@ void usbhs_pipe_stall(struct usbhs_pipe *pipe)
}
}
+int usbhs_pipe_is_stall(struct usbhs_pipe *pipe)
+{
+ u16 pid = usbhsp_pipectrl_get(pipe) & PID_MASK;
+
+ return (int)(pid == PID_STALL10 || pid == PID_STALL11);
+}
+
/*
* pipe setup
*/
@@ -323,8 +330,7 @@ static u16 usbhsp_setup_pipecfg(struct usbhs_pipe *pipe,
if (dir_in)
usbhsp_flags_set(pipe, IS_DIR_HOST);
- if ((is_host && !dir_in) ||
- (!is_host && dir_in))
+ if (!!is_host ^ !!dir_in)
dir |= DIR_OUT;
if (!dir)
@@ -471,10 +477,27 @@ int usbhs_pipe_is_dir_host(struct usbhs_pipe *pipe)
return usbhsp_flags_has(pipe, IS_DIR_HOST);
}
-void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int data)
+void usbhs_pipe_data_sequence(struct usbhs_pipe *pipe, int sequence)
{
u16 mask = (SQCLR | SQSET);
- u16 val = (data) ? SQSET : SQCLR;
+ u16 val;
+
+ /*
+ * sequence
+ * 0 : data0
+ * 1 : data1
+ * -1 : no change
+ */
+ switch (sequence) {
+ case 0:
+ val = SQCLR;
+ break;
+ case 1:
+ val = SQSET;
+ break;
+ default:
+ return;
+ }
usbhsp_pipectrl_set(pipe, mask, val);
}
diff --git a/drivers/usb/renesas_usbhs/pipe.h b/drivers/usb/renesas_usbhs/pipe.h
index 6334fc6..fa18b7d 100644
--- a/drivers/usb/renesas_usbhs/pipe.h
+++ b/drivers/usb/renesas_usbhs/pipe.h
@@ -87,6 +87,7 @@ int usbhs_pipe_is_accessible(struct usbhs_pipe *pipe);
void usbhs_pipe_enable(struct usbhs_pipe *pipe);
void usbhs_pipe_disable(struct usbhs_pipe *pipe);
void usbhs_pipe_stall(struct usbhs_pipe *pipe);
+int usbhs_pipe_is_stall(struct usbhs_pipe *pipe);
void usbhs_pipe_select_fifo(struct usbhs_pipe *pipe, struct usbhs_fifo *fifo);
void usbhs_pipe_config_update(struct usbhs_pipe *pipe, u16 devsel,
u16 epnum, u16 maxp);
diff --git a/drivers/usb/serial/ChangeLog.history b/drivers/usb/serial/ChangeLog.history
deleted file mode 100644
index f13fd48..0000000
--- a/drivers/usb/serial/ChangeLog.history
+++ /dev/null
@@ -1,730 +0,0 @@
-This is the contents of some of the drivers/usb/serial/ files that had old
-changelog comments. They were quite old, and out of date, and we don't keep
-them anymore, so I've put them here, away from the source files, in case
-people still care to see them.
-
-- Greg Kroah-Hartman <greg@kroah.com> October 20, 2005
-
------------------------------------------------------------------------
-usb-serial.h Change Log comments:
-
- (03/26/2002) gkh
- removed the port->tty check from port_paranoia_check() due to serial
- consoles not having a tty device assigned to them.
-
- (12/03/2001) gkh
- removed active from the port structure.
- added documentation to the usb_serial_device_type structure
-
- (10/10/2001) gkh
- added vendor and product to serial structure. Needed to determine device
- owner when the device is disconnected.
-
- (05/30/2001) gkh
- added sem to port structure and removed port_lock
-
- (10/05/2000) gkh
- Added interrupt_in_endpointAddress and bulk_in_endpointAddress to help
- fix bug with urb->dev not being set properly, now that the usb core
- needs it.
-
- (09/11/2000) gkh
- Added usb_serial_debug_data function to help get rid of #DEBUG in the
- drivers.
-
- (08/28/2000) gkh
- Added port_lock to port structure.
-
- (08/08/2000) gkh
- Added open_count to port structure.
-
- (07/23/2000) gkh
- Added bulk_out_endpointAddress to port structure.
-
- (07/19/2000) gkh, pberger, and borchers
- Modifications to allow usb-serial drivers to be modules.
-
------------------------------------------------------------------------
-usb-serial.c Change Log comments:
-
- (12/10/2002) gkh
- Split the ports off into their own struct device, and added a
- usb-serial bus driver.
-
- (11/19/2002) gkh
- removed a few #ifdefs for the generic code and cleaned up the failure
- logic in initialization.
-
- (10/02/2002) gkh
- moved the console code to console.c and out of this file.
-
- (06/05/2002) gkh
- moved location of startup() call in serial_probe() until after all
- of the port information and endpoints are initialized. This makes
- things easier for some drivers.
-
- (04/10/2002) gkh
- added serial_read_proc function which creates a
- /proc/tty/driver/usb-serial file.
-
- (03/27/2002) gkh
- Got USB serial console code working properly and merged into the main
- version of the tree. Thanks to Randy Dunlap for the initial version
- of this code, and for pushing me to finish it up.
- The USB serial console works with any usb serial driver device.
-
- (03/21/2002) gkh
- Moved all manipulation of port->open_count into the core. Now the
- individual driver's open and close functions are called only when the
- first open() and last close() is called. Making the drivers a bit
- smaller and simpler.
- Fixed a bug if a driver didn't have the owner field set.
-
- (02/26/2002) gkh
- Moved all locking into the main serial_* functions, instead of having
- the individual drivers have to grab the port semaphore. This should
- reduce races.
- Reworked the MOD_INC logic a bit to always increment and decrement, even
- if the generic driver is being used.
-
- (10/10/2001) gkh
- usb_serial_disconnect() now sets the serial->dev pointer is to NULL to
- help prevent child drivers from accessing the device since it is now
- gone.
-
- (09/13/2001) gkh
- Moved generic driver initialize after we have registered with the USB
- core. Thanks to Randy Dunlap for pointing this problem out.
-
- (07/03/2001) gkh
- Fixed module paramater size. Thanks to John Brockmeyer for the pointer.
- Fixed vendor and product getting defined through the MODULE_PARM macro
- if the Generic driver wasn't compiled in.
- Fixed problem with generic_shutdown() not being called for drivers that
- don't have a shutdown() function.
-
- (06/06/2001) gkh
- added evil hack that is needed for the prolific pl2303 device due to the
- crazy way its endpoints are set up.
-
- (05/30/2001) gkh
- switched from using spinlock to a semaphore, which fixes lots of problems.
-
- (04/08/2001) gb
- Identify version on module load.
-
- 2001_02_05 gkh
- Fixed buffer overflows bug with the generic serial driver. Thanks to
- Todd Squires <squirest@ct0.com> for fixing this.
-
- (01/10/2001) gkh
- Fixed bug where the generic serial adaptor grabbed _any_ device that was
- offered to it.
-
- (12/12/2000) gkh
- Removed MOD_INC and MOD_DEC from poll and disconnect functions, and
- moved them to the serial_open and serial_close functions.
- Also fixed bug with there not being a MOD_DEC for the generic driver
- (thanks to Gary Brubaker for finding this.)
-
- (11/29/2000) gkh
- Small NULL pointer initialization cleanup which saves a bit of disk image
-
- (11/01/2000) Adam J. Richter
- instead of using idVendor/idProduct pairs, usb serial drivers
- now identify their hardware interest with usb_device_id tables,
- which they usually have anyhow for use with MODULE_DEVICE_TABLE.
-
- (10/05/2000) gkh
- Fixed bug with urb->dev not being set properly, now that the usb
- core needs it.
-
- (09/11/2000) gkh
- Removed DEBUG #ifdefs with call to usb_serial_debug_data
-
- (08/28/2000) gkh
- Added port_lock to port structure.
- Added locks for SMP safeness to generic driver
- Fixed the ability to open a generic device's port more than once.
-
- (07/23/2000) gkh
- Added bulk_out_endpointAddress to port structure.
-
- (07/19/2000) gkh, pberger, and borchers
- Modifications to allow usb-serial drivers to be modules.
-
- (07/03/2000) gkh
- Added more debugging to serial_ioctl call
-
- (06/25/2000) gkh
- Changed generic_write_bulk_callback to not call wake_up_interruptible
- directly, but to have port_softint do it at a safer time.
-
- (06/23/2000) gkh
- Cleaned up debugging statements in a quest to find UHCI timeout bug.
-
- (05/22/2000) gkh
- Changed the makefile, enabling the big CONFIG_USB_SERIAL_SOMTHING to be
- removed from the individual device source files.
-
- (05/03/2000) gkh
- Added the Digi Acceleport driver from Al Borchers and Peter Berger.
-
- (05/02/2000) gkh
- Changed devfs and tty register code to work properly now. This was based on
- the ACM driver changes by Vojtech Pavlik.
-
- (04/27/2000) Ryan VanderBijl
- Put calls to *_paranoia_checks into one function.
-
- (04/23/2000) gkh
- Fixed bug that Randy Dunlap found for Generic devices with no bulk out ports.
- Moved when the startup code printed out the devices that are supported.
-
- (04/19/2000) gkh
- Added driver for ZyXEL omni.net lcd plus ISDN TA
- Made startup info message specify which drivers were compiled in.
-
- (04/03/2000) gkh
- Changed the probe process to remove the module unload races.
- Changed where the tty layer gets initialized to have devfs work nicer.
- Added initial devfs support.
-
- (03/26/2000) gkh
- Split driver up into device specific pieces.
-
- (03/19/2000) gkh
- Fixed oops that could happen when device was removed while a program
- was talking to the device.
- Removed the static urbs and now all urbs are created and destroyed
- dynamically.
- Reworked the internal interface. Now everything is based on the
- usb_serial_port structure instead of the larger usb_serial structure.
- This fixes the bug that a multiport device could not have more than
- one port open at one time.
-
- (03/17/2000) gkh
- Added config option for debugging messages.
- Added patch for keyspan pda from Brian Warner.
-
- (03/06/2000) gkh
- Added the keyspan pda code from Brian Warner <warner@lothar.com>
- Moved a bunch of the port specific stuff into its own structure. This
- is in anticipation of the true multiport devices (there's a bug if you
- try to access more than one port of any multiport device right now)
-
- (02/21/2000) gkh
- Made it so that any serial devices only have to specify which functions
- they want to overload from the generic function calls (great,
- inheritance in C, in a driver, just what I wanted...)
- Added support for set_termios and ioctl function calls. No drivers take
- advantage of this yet.
- Removed the #ifdef MODULE, now there is no module specific code.
- Cleaned up a few comments in usb-serial.h that were wrong (thanks again
- to Miles Lott).
- Small fix to get_free_serial.
-
- (02/14/2000) gkh
- Removed the Belkin and Peracom functionality from the driver due to
- the lack of support from the vendor, and me not wanting people to
- accidenatly buy the device, expecting it to work with Linux.
- Added read_bulk_callback and write_bulk_callback to the type structure
- for the needs of the FTDI and WhiteHEAT driver.
- Changed all reverences to FTDI to FTDI_SIO at the request of Bill
- Ryder.
- Changed the output urb size back to the max endpoint size to make
- the ftdi_sio driver have it easier, and due to the fact that it didn't
- really increase the speed any.
-
- (02/11/2000) gkh
- Added VISOR_FUNCTION_CONSOLE to the visor startup function. This was a
- patch from Miles Lott (milos@insync.net).
- Fixed bug with not restoring the minor range that a device grabs, if
- the startup function fails (thanks Miles for finding this).
-
- (02/05/2000) gkh
- Added initial framework for the Keyspan PDA serial converter so that
- Brian Warner has a place to put his code.
- Made the ezusb specific functions generic enough that different
- devices can use them (whiteheat and keyspan_pda both need them).
- Split out a whole bunch of structure and other stuff to a separate
- usb-serial.h file.
- Made the Visor connection messages a little more understandable, now
- that Miles Lott (milos@insync.net) has gotten the Generic channel to
- work. Also made them always show up in the log file.
-
- (01/25/2000) gkh
- Added initial framework for FTDI serial converter so that Bill Ryder
- has a place to put his code.
- Added the vendor specific info from Handspring. Now we can print out
- informational debug messages as well as understand what is happening.
-
- (01/23/2000) gkh
- Fixed problem of crash when trying to open a port that didn't have a
- device assigned to it. Made the minor node finding a little smarter,
- now it looks to find a continuous space for the new device.
-
- (01/21/2000) gkh
- Fixed bug in visor_startup with patch from Miles Lott (milos@insync.net)
- Fixed get_serial_by_minor which was all messed up for multi port
- devices. Fixed multi port problem for generic devices. Now the number
- of ports is determined by the number of bulk out endpoints for the
- generic device.
-
- (01/19/2000) gkh
- Removed lots of cruft that was around from the old (pre urb) driver
- interface.
- Made the serial_table dynamic. This should save lots of memory when
- the number of minor nodes goes up to 256.
- Added initial support for devices that have more than one port.
- Added more debugging comments for the Visor, and added a needed
- set_configuration call.
-
- (01/17/2000) gkh
- Fixed the WhiteHEAT firmware (my processing tool had a bug)
- and added new debug loader firmware for it.
- Removed the put_char function as it isn't really needed.
- Added visor startup commands as found by the Win98 dump.
-
- (01/13/2000) gkh
- Fixed the vendor id for the generic driver to the one I meant it to be.
-
- (01/12/2000) gkh
- Forget the version numbering...that's pretty useless...
- Made the driver able to be compiled so that the user can select which
- converter they want to use. This allows people who only want the Visor
- support to not pay the memory size price of the WhiteHEAT.
- Fixed bug where the generic driver (idVendor=0000 and idProduct=0000)
- grabbed the root hub. Not good.
-
- version 0.4.0 (01/10/2000) gkh
- Added whiteheat.h containing the firmware for the ConnectTech WhiteHEAT
- device. Added startup function to allow firmware to be downloaded to
- a device if it needs to be.
- Added firmware download logic to the WhiteHEAT device.
- Started to add #defines to split up the different drivers for potential
- configuration option.
-
- version 0.3.1 (12/30/99) gkh
- Fixed problems with urb for bulk out.
- Added initial support for multiple sets of endpoints. This enables
- the Handspring Visor to be attached successfully. Only the first
- bulk in / bulk out endpoint pair is being used right now.
-
- version 0.3.0 (12/27/99) gkh
- Added initial support for the Handspring Visor based on a patch from
- Miles Lott (milos@sneety.insync.net)
- Cleaned up the code a bunch and converted over to using urbs only.
-
- version 0.2.3 (12/21/99) gkh
- Added initial support for the Connect Tech WhiteHEAT converter.
- Incremented the number of ports in expectation of getting the
- WhiteHEAT to work properly (4 ports per connection).
- Added notification on insertion and removal of what port the
- device is/was connected to (and what kind of device it was).
-
- version 0.2.2 (12/16/99) gkh
- Changed major number to the new allocated number. We're legal now!
-
- version 0.2.1 (12/14/99) gkh
- Fixed bug that happens when device node is opened when there isn't a
- device attached to it. Thanks to marek@webdesign.no for noticing this.
-
- version 0.2.0 (11/10/99) gkh
- Split up internals to make it easier to add different types of serial
- converters to the code.
- Added a "generic" driver that gets it's vendor and product id
- from when the module is loaded. Thanks to David E. Nelson (dnelson@jump.net)
- for the idea and sample code (from the usb scanner driver.)
- Cleared up any licensing questions by releasing it under the GNU GPL.
-
- version 0.1.2 (10/25/99) gkh
- Fixed bug in detecting device.
-
- version 0.1.1 (10/05/99) gkh
- Changed the major number to not conflict with anything else.
-
- version 0.1 (09/28/99) gkh
- Can recognize the two different devices and start up a read from
- device when asked to. Writes also work. No control signals yet, this
- all is vendor specific data (i.e. no spec), also no control for
- different baud rates or other bit settings.
- Currently we are using the same devid as the acm driver. This needs
- to change.
-
------------------------------------------------------------------------
-visor.c Change Log comments:
-
- (06/03/2003) Judd Montgomery <judd at jpilot.org>
- Added support for module parameter options for untested/unknown
- devices.
-
- (03/09/2003) gkh
- Added support for the Sony Clie NZ90V device. Thanks to Martin Brachtl
- <brachtl@redgrep.cz> for the information.
-
- (03/05/2003) gkh
- Think Treo support is now working.
-
- (04/03/2002) gkh
- Added support for the Sony OS 4.1 devices. Thanks to Hiroyuki ARAKI
- <hiro@zob.ne.jp> for the information.
-
- (03/27/2002) gkh
- Removed assumptions that port->tty was always valid (is not true
- for usb serial console devices.)
-
- (03/23/2002) gkh
- Added support for the Palm i705 device, thanks to Thomas Riemer
- <tom@netmech.com> for the information.
-
- (03/21/2002) gkh
- Added support for the Palm m130 device, thanks to Udo Eisenbarth
- <udo.eisenbarth@web.de> for the information.
-
- (02/27/2002) gkh
- Reworked the urb handling logic. We have no more pool, but dynamically
- allocate the urb and the transfer buffer on the fly. In testing this
- does not incure any measurable overhead. This also relies on the fact
- that we have proper reference counting logic for urbs.
-
- (02/21/2002) SilaS
- Added initial support for the Palm m515 devices.
-
- (02/14/2002) gkh
- Added support for the Clie S-360 device.
-
- (12/18/2001) gkh
- Added better Clie support for 3.5 devices. Thanks to Geoffrey Levand
- for the patch.
-
- (11/11/2001) gkh
- Added support for the m125 devices, and added check to prevent oopses
- for Clié devices that lie about the number of ports they have.
-
- (08/30/2001) gkh
- Added support for the Clie devices, both the 3.5 and 4.0 os versions.
- Many thanks to Daniel Burke, and Bryan Payne for helping with this.
-
- (08/23/2001) gkh
- fixed a few potential bugs pointed out by Oliver Neukum.
-
- (05/30/2001) gkh
- switched from using spinlock to a semaphore, which fixes lots of problems.
-
- (05/28/2000) gkh
- Added initial support for the Palm m500 and Palm m505 devices.
-
- (04/08/2001) gb
- Identify version on module load.
-
- (01/21/2000) gkh
- Added write_room and chars_in_buffer, as they were previously using the
- generic driver versions which is all wrong now that we are using an urb
- pool. Thanks to Wolfgang Grandegger for pointing this out to me.
- Removed count assignment in the write function, which was not needed anymore
- either. Thanks to Al Borchers for pointing this out.
-
- (12/12/2000) gkh
- Moved MOD_DEC to end of visor_close to be nicer, as the final write
- message can sleep.
-
- (11/12/2000) gkh
- Fixed bug with data being dropped on the floor by forcing tty->low_latency
- to be on. Hopefully this fixes the OHCI issue!
-
- (11/01/2000) Adam J. Richter
- usb_device_id table support
-
- (10/05/2000) gkh
- Fixed bug with urb->dev not being set properly, now that the usb
- core needs it.
-
- (09/11/2000) gkh
- Got rid of always calling kmalloc for every urb we wrote out to the
- device.
- Added visor_read_callback so we can keep track of bytes in and out for
- those people who like to know the speed of their device.
- Removed DEBUG #ifdefs with call to usb_serial_debug_data
-
- (09/06/2000) gkh
- Fixed oops in visor_exit. Need to uncomment usb_unlink_urb call _after_
- the host controller drivers set urb->dev = NULL when the urb is finished.
-
- (08/28/2000) gkh
- Added locks for SMP safeness.
-
- (08/08/2000) gkh
- Fixed endian problem in visor_startup.
- Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- than once.
-
- (07/23/2000) gkh
- Added pool of write urbs to speed up transfers to the visor.
-
- (07/19/2000) gkh
- Added module_init and module_exit functions to handle the fact that this
- driver is a loadable module now.
-
- (07/03/2000) gkh
- Added visor_set_ioctl and visor_set_termios functions (they don't do much
- of anything, but are good for debugging.)
-
- (06/25/2000) gkh
- Fixed bug in visor_unthrottle that should help with the disconnect in PPP
- bug that people have been reporting.
-
- (06/23/2000) gkh
- Cleaned up debugging statements in a quest to find UHCI timeout bug.
-
- (04/27/2000) Ryan VanderBijl
- Fixed memory leak in visor_close
-
- (03/26/2000) gkh
- Split driver up into device specific pieces.
-
------------------------------------------------------------------------
-pl2303.c Change Log comments:
-
- 2002_Mar_26 gkh
- allowed driver to work properly if there is no tty assigned to a port
- (this happens for serial console devices.)
-
- 2001_Oct_06 gkh
- Added RTS and DTR line control. Thanks to joe@bndlg.de for parts of it.
-
- 2001_Sep_19 gkh
- Added break support.
-
- 2001_Aug_30 gkh
- fixed oops in write_bulk_callback.
-
- 2001_Aug_28 gkh
- reworked buffer logic to be like other usb-serial drivers. Hopefully
- removing some reported problems.
-
- 2001_Jun_06 gkh
- finished porting to 2.4 format.
-
-
------------------------------------------------------------------------
-io_edgeport.c Change Log comments:
-
- 2003_04_03 al borchers
- - fixed a bug (that shows up with dosemu) where the tty struct is
- used in a callback after it has been freed
-
- 2.3 2002_03_08 greg kroah-hartman
- - fixed bug when multiple devices were attached at the same time.
-
- 2.2 2001_11_14 greg kroah-hartman
- - fixed bug in edge_close that kept the port from being used more
- than once.
- - fixed memory leak on device removal.
- - fixed potential double free of memory when command urb submitting
- failed.
- - other small cleanups when the device is removed
-
- 2.1 2001_07_09 greg kroah-hartman
- - added support for TIOCMBIS and TIOCMBIC.
-
- (04/08/2001) gb
- - Identify version on module load.
-
- 2.0 2001_03_05 greg kroah-hartman
- - reworked entire driver to fit properly in with the other usb-serial
- drivers. Occasional oopses still happen, but it's a good start.
-
- 1.2.3 (02/23/2001) greg kroah-hartman
- - changed device table to work properly for 2.4.x final format.
- - fixed problem with dropping data at high data rates.
-
- 1.2.2 (11/27/2000) greg kroah-hartman
- - cleaned up more NTisms.
- - Added device table for 2.4.0-test11
-
- 1.2.1 (11/08/2000) greg kroah-hartman
- - Started to clean up NTisms.
- - Fixed problem with dev field of urb for kernels >= 2.4.0-test9
-
- 1.2 (10/17/2000) David Iacovelli
- Remove all EPIC code and GPL source
- Fix RELEVANT_IFLAG macro to include flow control
- changes port configuration changes.
- Fix redefinition of SERIAL_MAGIC
- Change all timeout values to 5 seconds
- Tried to fix the UHCI multiple urb submission, but failed miserably.
- it seems to work fine with OHCI.
- ( Greg take a look at the #if 0 at end of WriteCmdUsb() we must
- find a way to work arount this UHCI bug )
-
- 1.1 (10/11/2000) David Iacovelli
- Fix XON/XOFF flow control to support both IXON and IXOFF
-
- 0.9.27 (06/30/2000) David Iacovelli
- Added transmit queue and now allocate urb for command writes.
-
- 0.9.26 (06/29/2000) David Iacovelli
- Add support for 80251 based edgeport
-
- 0.9.25 (06/27/2000) David Iacovelli
- Do not close the port if it has multiple opens.
-
- 0.9.24 (05/26/2000) David Iacovelli
- Add IOCTLs to support RXTX and JAVA POS
- and first cut at running BlackBox Demo
-
- 0.9.23 (05/24/2000) David Iacovelli
- Add IOCTLs to support RXTX and JAVA POS
-
- 0.9.22 (05/23/2000) David Iacovelli
- fixed bug in enumeration. If epconfig turns on mapping by
- path after a device is already plugged in, we now update
- the mapping correctly
-
- 0.9.21 (05/16/2000) David Iacovelli
- Added BlockUntilChaseResp() to also wait for txcredits
- Updated the way we allocate and handle write URBs
- Add debug code to dump buffers
-
- 0.9.20 (05/01/2000) David Iacovelli
- change driver to use usb/tts/
-
- 0.9.19 (05/01/2000) David Iacovelli
- Update code to compile if DEBUG is off
-
- 0.9.18 (04/28/2000) David Iacovelli
- cleanup and test tty_register with devfs
-
- 0.9.17 (04/27/2000) greg kroah-hartman
- changed tty_register around to be like the way it
- was before, but now it works properly with devfs.
-
- 0.9.16 (04/26/2000) david iacovelli
- Fixed bug in GetProductInfo()
-
- 0.9.15 (04/25/2000) david iacovelli
- Updated enumeration
-
- 0.9.14 (04/24/2000) david iacovelli
- Removed all config/status IOCTLS and
- converted to using /proc/edgeport
- still playing with devfs
-
- 0.9.13 (04/24/2000) david iacovelli
- Removed configuration based on ttyUSB0
- Added support for configuration using /prod/edgeport
- first attempt at using devfs (not working yet!)
- Added IOCTL to GetProductInfo()
- Added support for custom baud rates
- Add support for random port numbers
-
- 0.9.12 (04/18/2000) david iacovelli
- added additional configuration IOCTLs
- use ttyUSB0 for configuration
-
- 0.9.11 (04/17/2000) greg kroah-hartman
- fixed module initialization race conditions.
- made all urbs dynamically allocated.
- made driver devfs compatible. now it only registers the tty device
- when the device is actually plugged in.
-
- 0.9.10 (04/13/2000) greg kroah-hartman
- added proc interface framework.
-
- 0.9.9 (04/13/2000) david iacovelli
- added enumeration code and ioctls to configure the device
-
- 0.9.8 (04/12/2000) david iacovelli
- Change interrupt read start when device is plugged in
- and stop when device is removed
- process interrupt reads when all ports are closed
- (keep value of rxBytesAvail consistent with the edgeport)
- set the USB_BULK_QUEUE flag so that we can shove a bunch
- of urbs at once down the pipe
-
- 0.9.7 (04/10/2000) david iacovelli
- start to add enumeration code.
- generate serial number for epic devices
- add support for kdb
-
- 0.9.6 (03/30/2000) david iacovelli
- add IOCTL to get string, manufacture, and boot descriptors
-
- 0.9.5 (03/14/2000) greg kroah-hartman
- more error checking added to SerialOpen to try to fix UHCI open problem
-
- 0.9.4 (03/09/2000) greg kroah-hartman
- added more error checking to handle oops when data is hanging
- around and tty is abruptly closed.
-
- 0.9.3 (03/09/2000) david iacovelli
- Add epic support for xon/xoff chars
- play with performance
-
- 0.9.2 (03/08/2000) greg kroah-hartman
- changed most "info" calls to "dbg"
- implemented flow control properly in the termios call
-
- 0.9.1 (03/08/2000) david iacovelli
- added EPIC support
- enabled bootloader update
-
- 0.9 (03/08/2000) greg kroah-hartman
- Release to IO networks.
- Integrated changes that David made
- made getting urbs for writing SMP safe
-
- 0.8 (03/07/2000) greg kroah-hartman
- Release to IO networks.
- Fixed problems that were seen in code by David.
- Now both Edgeport/4 and Edgeport/2 works properly.
- Changed most of the functions to use port instead of serial.
-
- 0.7 (02/27/2000) greg kroah-hartman
- Milestone 3 release.
- Release to IO Networks
- ioctl for waiting on line change implemented.
- ioctl for getting statistics implemented.
- multiport support working.
- lsr and msr registers are now handled properly.
- change break now hooked up and working.
- support for all known Edgeport devices.
-
- 0.6 (02/22/2000) greg kroah-hartman
- Release to IO networks.
- CHASE is implemented correctly when port is closed.
- SerialOpen now blocks correctly until port is fully opened.
-
- 0.5 (02/20/2000) greg kroah-hartman
- Release to IO networks.
- Known problems:
- modem status register changes are not sent on to the user
- CHASE is not implemented when the port is closed.
-
- 0.4 (02/16/2000) greg kroah-hartman
- Second cut at the CeBit demo.
- Doesn't leak memory on every write to the port
- Still small leaks on startup.
- Added support for Edgeport/2 and Edgeport/8
-
- 0.3 (02/15/2000) greg kroah-hartman
- CeBit demo release.
- Force the line settings to 4800, 8, 1, e for the demo.
- Warning! This version leaks memory like crazy!
-
- 0.2 (01/30/2000) greg kroah-hartman
- Milestone 1 release.
- Device is found by USB subsystem, enumerated, firmware is downloaded
- and the descriptors are printed to the debug log, config is set, and
- green light starts to blink. Open port works, and data can be sent
- and received at the default settings of the UART. Loopback connector
- and debug log confirms this.
-
- 0.1 (01/23/2000) greg kroah-hartman
- Initial release to help IO Networks try to set up their test system.
- Edgeport4 is recognized, firmware is downloaded, config is set so
- device blinks green light every 3 sec. Port is bound, but opening,
- closing, and sending data do not work properly.
-
-
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b43d07d..123bf91 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -52,7 +52,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/* Vendor and Product ID */
#define AIRCABLE_VID 0x16CA
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 18e875b..69328dc 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -37,7 +37,7 @@
#include <linux/mutex.h>
#include <linux/spinlock.h>
-static int debug;
+static bool debug;
/*
* Version information
*/
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index d6921fa..29ffeb6 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -20,50 +20,7 @@
* TODO:
* -- Add true modem contol line query capability. Currently we track the
* states reported by the interrupt and the states we request.
- * -- Add error reporting back to application for UART error conditions.
- * Just point me at how to implement this and I'll do it. I've put the
- * framework in, but haven't analyzed the "tty_flip" interface yet.
* -- Add support for flush commands
- * -- Add everything that is missing :)
- *
- * 27-Nov-2001 gkh
- * compressed all the differnent device entries into 1.
- *
- * 30-May-2001 gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * 08-Apr-2001 gb
- * - Identify version on module load.
- *
- * 12-Mar-2001 gkh
- * - Added support for the GoHubs GO-COM232 device which is the same as the
- * Peracom device.
- *
- * 06-Nov-2000 gkh
- * - Added support for the old Belkin and Peracom devices.
- * - Made the port able to be opened multiple times.
- * - Added some defaults incase the line settings are things these devices
- * can't support.
- *
- * 18-Oct-2000 William Greathouse
- * Released into the wild (linux-usb-devel)
- *
- * 17-Oct-2000 William Greathouse
- * Add code to recognize firmware version and set hardware flow control
- * appropriately. Belkin states that firmware prior to 3.05 does not
- * operate correctly in hardware handshake mode. I have verified this
- * on firmware 2.05 -- for both RTS and DTR input flow control, the control
- * line is not reset. The test performed by the Belkin Win* driver is
- * to enable hardware flow control for firmware 2.06 or greater and
- * for 1.00 or prior. I am only enabling for 2.06 or greater.
- *
- * 12-Oct-2000 William Greathouse
- * First cut at supporting Belkin USB Serial Adapter F5U103
- * I did not have a copy of the original work to support this
- * adapter, so pardon any stupid mistakes. All of the information
- * I am using to write this driver was acquired by using a modified
- * UsbSnoop on Windows2000 and from examining the other USB drivers.
*/
#include <linux/kernel.h>
@@ -80,7 +37,7 @@
#include <linux/usb/serial.h>
#include "belkin_sa.h"
-static int debug;
+static bool debug;
/*
* Version Information
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 6ae1c06..5e53cc5 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -70,7 +70,7 @@
#define CH341_NBREAK_BITS_REG2 0x40
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
@@ -335,13 +335,12 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
goto out;
dbg("%s - submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
r = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (r) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, r);
ch341_close(port);
- return -EPROTO;
+ goto out;
}
r = usb_serial_generic_open(tty, port);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fd67cc5..08a5575 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -39,6 +39,8 @@ static void cp210x_get_termios(struct tty_struct *,
struct usb_serial_port *port);
static void cp210x_get_termios_port(struct usb_serial_port *port,
unsigned int *cflagp, unsigned int *baudp);
+static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
+ struct ktermios *);
static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
struct ktermios*);
static int cp210x_tiocmget(struct tty_struct *);
@@ -49,7 +51,7 @@ static void cp210x_break_ctl(struct tty_struct *, int);
static int cp210x_startup(struct usb_serial *);
static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */
@@ -92,6 +94,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
+ { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */
{ USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */
{ USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
@@ -133,10 +136,13 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */
{ USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */
{ USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */
+ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */
+ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */
{ USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
@@ -200,6 +206,8 @@ static struct usb_serial_driver cp210x_device = {
#define CP210X_EMBED_EVENTS 0x15
#define CP210X_GET_EVENTSTATE 0x16
#define CP210X_SET_CHARS 0x19
+#define CP210X_GET_BAUDRATE 0x1D
+#define CP210X_SET_BAUDRATE 0x1E
/* CP210X_IFC_ENABLE */
#define UART_ENABLE 0x0001
@@ -280,7 +288,10 @@ static int cp210x_get_config(struct usb_serial_port *port, u8 request,
dbg("%s - Unable to send config request, "
"request=0x%x size=%d result=%d\n",
__func__, request, size, result);
- return -EPROTO;
+ if (result > 0)
+ result = -EPROTO;
+
+ return result;
}
return 0;
@@ -331,7 +342,10 @@ static int cp210x_set_config(struct usb_serial_port *port, u8 request,
dbg("%s - Unable to send request, "
"request=0x%x size=%d result=%d\n",
__func__, request, size, result);
- return -EPROTO;
+ if (result > 0)
+ result = -EPROTO;
+
+ return result;
}
return 0;
@@ -353,8 +367,8 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port,
* Quantises the baud rate as per AN205 Table 1
*/
static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
- if (baud <= 56) baud = 0;
- else if (baud <= 300) baud = 300;
+ if (baud <= 300)
+ baud = 300;
else if (baud <= 600) baud = 600;
else if (baud <= 1200) baud = 1200;
else if (baud <= 1800) baud = 1800;
@@ -382,10 +396,10 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
else if (baud <= 491520) baud = 460800;
else if (baud <= 567138) baud = 500000;
else if (baud <= 670254) baud = 576000;
- else if (baud <= 1053257) baud = 921600;
- else if (baud <= 1474560) baud = 1228800;
- else if (baud <= 2457600) baud = 1843200;
- else baud = 3686400;
+ else if (baud < 1000000)
+ baud = 921600;
+ else if (baud > 2000000)
+ baud = 2000000;
return baud;
}
@@ -395,19 +409,21 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
- if (cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE)) {
- dev_err(&port->dev, "%s - Unable to enable UART\n",
- __func__);
- return -EPROTO;
- }
-
- result = usb_serial_generic_open(tty, port);
- if (result)
+ result = cp210x_set_config_single(port, CP210X_IFC_ENABLE,
+ UART_ENABLE);
+ if (result) {
+ dev_err(&port->dev, "%s - Unable to enable UART\n", __func__);
return result;
+ }
/* Configure the termios structure */
cp210x_get_termios(tty, port);
- return 0;
+
+ /* The baud rate must be initialised on cp2104 */
+ if (tty)
+ cp210x_change_speed(tty, port, NULL);
+
+ return usb_serial_generic_open(tty, port);
}
static void cp210x_close(struct usb_serial_port *port)
@@ -459,10 +475,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
dbg("%s - port %d", __func__, port->number);
- cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
- /* Convert to baudrate */
- if (baud)
- baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
+ cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4);
dbg("%s - baud rate = %d", __func__, baud);
*baudp = baud;
@@ -520,18 +533,13 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
cflag |= PARENB;
break;
case BITS_PARITY_MARK:
- dbg("%s - parity = MARK (not supported, disabling parity)",
- __func__);
- cflag &= ~PARENB;
- bits &= ~BITS_PARITY_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ dbg("%s - parity = MARK", __func__);
+ cflag |= (PARENB|PARODD|CMSPAR);
break;
case BITS_PARITY_SPACE:
- dbg("%s - parity = SPACE (not supported, disabling parity)",
- __func__);
- cflag &= ~PARENB;
- bits &= ~BITS_PARITY_MASK;
- cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2);
+ dbg("%s - parity = SPACE", __func__);
+ cflag &= ~PARODD;
+ cflag |= (PARENB|CMSPAR);
break;
default:
dbg("%s - Unknown parity mode, disabling parity", __func__);
@@ -576,11 +584,64 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
*cflagp = cflag;
}
+/*
+ * CP2101 supports the following baud rates:
+ *
+ * 300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800,
+ * 38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600
+ *
+ * CP2102 and CP2103 support the following additional rates:
+ *
+ * 4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000,
+ * 576000
+ *
+ * The device will map a requested rate to a supported one, but the result
+ * of requests for rates greater than 1053257 is undefined (see AN205).
+ *
+ * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud,
+ * respectively, with an error less than 1%. The actual rates are determined
+ * by
+ *
+ * div = round(freq / (2 x prescale x request))
+ * actual = freq / (2 x prescale x div)
+ *
+ * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps
+ * or 1 otherwise.
+ * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1
+ * otherwise.
+ */
+static void cp210x_change_speed(struct tty_struct *tty,
+ struct usb_serial_port *port, struct ktermios *old_termios)
+{
+ u32 baud;
+
+ baud = tty->termios->c_ospeed;
+
+ /* This maps the requested rate to a rate valid on cp2102 or cp2103,
+ * or to an arbitrary rate in [1M,2M].
+ *
+ * NOTE: B0 is not implemented.
+ */
+ baud = cp210x_quantise_baudrate(baud);
+
+ dbg("%s - setting baud rate to %u", __func__, baud);
+ if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud,
+ sizeof(baud))) {
+ dev_warn(&port->dev, "failed to set baud rate to %u\n", baud);
+ if (old_termios)
+ baud = old_termios->c_ospeed;
+ else
+ baud = 9600;
+ }
+
+ tty_encode_baud_rate(tty, baud, baud);
+}
+
static void cp210x_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
unsigned int cflag, old_cflag;
- unsigned int baud = 0, bits;
+ unsigned int bits;
unsigned int modem_ctl[4];
dbg("%s - port %d", __func__, port->number);
@@ -588,23 +649,11 @@ static void cp210x_set_termios(struct tty_struct *tty,
if (!tty)
return;
- tty->termios->c_cflag &= ~CMSPAR;
cflag = tty->termios->c_cflag;
old_cflag = old_termios->c_cflag;
- baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
-
- /* If the baud rate is to be updated*/
- if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
- dbg("%s - Setting baud rate to %d baud", __func__,
- baud);
- if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
- ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
- dbg("Baud rate requested not supported by device");
- baud = tty_termios_baud_rate(old_termios);
- }
- }
- /* Report back the resulting baud rate */
- tty_encode_baud_rate(tty, baud, baud);
+
+ if (tty->termios->c_ospeed != old_termios->c_ospeed)
+ cp210x_change_speed(tty, port, old_termios);
/* If the number of data bits is to be updated */
if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
@@ -643,16 +692,27 @@ static void cp210x_set_termios(struct tty_struct *tty,
"not supported by device\n");
}
- if ((cflag & (PARENB|PARODD)) != (old_cflag & (PARENB|PARODD))) {
+ if ((cflag & (PARENB|PARODD|CMSPAR)) !=
+ (old_cflag & (PARENB|PARODD|CMSPAR))) {
cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2);
bits &= ~BITS_PARITY_MASK;
if (cflag & PARENB) {
- if (cflag & PARODD) {
- bits |= BITS_PARITY_ODD;
- dbg("%s - parity = ODD", __func__);
+ if (cflag & CMSPAR) {
+ if (cflag & PARODD) {
+ bits |= BITS_PARITY_MARK;
+ dbg("%s - parity = MARK", __func__);
+ } else {
+ bits |= BITS_PARITY_SPACE;
+ dbg("%s - parity = SPACE", __func__);
+ }
} else {
- bits |= BITS_PARITY_EVEN;
- dbg("%s - parity = EVEN", __func__);
+ if (cflag & PARODD) {
+ bits |= BITS_PARITY_ODD;
+ dbg("%s - parity = ODD", __func__);
+ } else {
+ bits |= BITS_PARITY_EVEN;
+ dbg("%s - parity = EVEN", __func__);
+ }
}
}
if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2))
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index f744ab7..6bc3802 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -43,7 +43,7 @@
#define CYBERJACK_LOCAL_BUF_SIZE 32
-static int debug;
+static bool debug;
/*
* Version Information
@@ -138,7 +138,6 @@ static int cyberjack_startup(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; ++i) {
int result;
- serial->port[i]->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(serial->port[i]->interrupt_in_urb,
GFP_KERNEL);
if (result)
@@ -208,7 +207,6 @@ static void cyberjack_close(struct usb_serial_port *port)
static int cyberjack_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
struct cyberjack_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
int result;
@@ -221,22 +219,18 @@ static int cyberjack_write(struct tty_struct *tty,
return 0;
}
- spin_lock_bh(&port->lock);
- if (port->write_urb_busy) {
- spin_unlock_bh(&port->lock);
+ if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dbg("%s - already writing", __func__);
return 0;
}
- port->write_urb_busy = 1;
- spin_unlock_bh(&port->lock);
spin_lock_irqsave(&priv->lock, flags);
if (count+priv->wrfilled > sizeof(priv->wrbuf)) {
/* To much data for buffer. Reset buffer. */
priv->wrfilled = 0;
- port->write_urb_busy = 0;
spin_unlock_irqrestore(&priv->lock, flags);
+ set_bit(0, &port->write_urbs_free);
return 0;
}
@@ -265,13 +259,7 @@ static int cyberjack_write(struct tty_struct *tty,
priv->wrsent = length;
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, serial->dev,
- usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, length,
- ((serial->type->write_bulk_callback) ?
- serial->type->write_bulk_callback :
- cyberjack_write_bulk_callback),
- port);
+ port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -283,7 +271,7 @@ static int cyberjack_write(struct tty_struct *tty,
priv->wrfilled = 0;
priv->wrsent = 0;
spin_unlock_irqrestore(&priv->lock, flags);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
return 0;
}
@@ -351,7 +339,6 @@ static void cyberjack_read_int_callback(struct urb *urb)
spin_unlock(&priv->lock);
if (!old_rdtodo) {
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "%s - failed resubmitting "
@@ -362,7 +349,6 @@ static void cyberjack_read_int_callback(struct urb *urb)
}
resubmit:
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "usb_submit_urb(read int) failed\n");
@@ -415,7 +401,6 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
/* Continue to read if we have still urbs to do. */
if (todo /* || (urb->actual_length==port->bulk_in_endpointAddress)*/) {
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev, "%s - failed resubmitting read "
@@ -432,7 +417,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
if (status) {
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
@@ -455,13 +440,7 @@ static void cyberjack_write_bulk_callback(struct urb *urb)
priv->wrsent += length;
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev, port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, length,
- ((port->serial->type->write_bulk_callback) ?
- port->serial->type->write_bulk_callback :
- cyberjack_write_bulk_callback),
- port);
+ port->write_urb->transfer_buffer_length = length;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index d9906eb..3bdeafa 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -16,32 +16,6 @@
*
* See http://geocities.com/i0xox0i for information on this driver and the
* earthmate usb device.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 4-29-2005
- * Fixed problem where setting or retreiving the serial config would fail
- * with EPIPE. Removed CRTS toggling so the driver behaves more like
- * other usbserial adapters. Issued new interval of 1ms instead of the
- * default 10ms. As a result, transfer speed has been substantially
- * increased from avg. 850bps to avg. 3300bps. initial termios has also
- * been modified. Cleaned up code and formatting issues so it is more
- * readable. Replaced the C++ style comments.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 12-15-2004
- * Incorporated write buffering from pl2303 driver. Fixed bug with line
- * handling so both lines are raised in cypress_open. (was dropping rts)
- * Various code cleanups made as well along with other misc bug fixes.
- *
- * Lonnie Mendez <dignome@gmail.com>
- * 04-10-2004
- * Driver modified to support dynamic line settings. Various improvements
- * and features.
- *
- * Neil Whelchel
- * 10-2003
- * Driver first released.
- *
*/
/* Thanks to Neil Whelchel for writing the first cypress m8 implementation
@@ -72,10 +46,10 @@
#include "cypress_m8.h"
-static int debug;
-static int stats;
+static bool debug;
+static bool stats;
static int interval;
-static int unstable_bauds;
+static bool unstable_bauds;
/*
* Version Information
@@ -1162,8 +1136,6 @@ static void cypress_unthrottle(struct tty_struct *tty)
return;
if (actually_throttled) {
- port->interrupt_in_urb->dev = port->serial->dev;
-
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb, "
@@ -1352,7 +1324,6 @@ static void cypress_write_int_callback(struct urb *urb)
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
port->interrupt_out_urb->transfer_buffer_length = 1;
- port->interrupt_out_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_out_urb, GFP_ATOMIC);
if (!result)
return;
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index e92cbefc..b23bebd 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -13,222 +13,6 @@
*
* Peter Berger (pberger@brimson.com)
* Al Borchers (borchers@steinerpoint.com)
-*
-* (12/03/2001) gkh
-* switched to using port->port.count instead of private version.
-* Removed port->active
-*
-* (04/08/2001) gb
-* Identify version on module load.
-*
-* (11/01/2000) Adam J. Richter
-* usb_device_id table support
-*
-* (11/01/2000) pberger and borchers
-* -- Turned off the USB_DISABLE_SPD flag for write bulk urbs--it caused
-* USB 4 ports to hang on startup.
-* -- Serialized access to write urbs by adding the dp_write_urb_in_use
-* flag; otherwise, the driver caused SMP system hangs. Watching the
-* urb status is not sufficient.
-*
-* (10/05/2000) gkh
-* -- Fixed bug with urb->dev not being set properly, now that the usb
-* core needs it.
-*
-* (8/8/2000) pberger and borchers
-* -- Fixed close so that
-* - it can timeout while waiting for transmit idle, if needed;
-* - it ignores interrupts when flushing the port, turning
-* of modem signalling, and so on;
-* - it waits for the flush to really complete before returning.
-* -- Read_bulk_callback and write_bulk_callback check for a closed
-* port before using the tty struct or writing to the port.
-* -- The two changes above fix the oops caused by interrupted closes.
-* -- Added interruptible args to write_oob_command and set_modem_signals
-* and added a timeout arg to transmit_idle; needed for fixes to
-* close.
-* -- Added code for rx_throttle and rx_unthrottle so that input flow
-* control works.
-* -- Added code to set overrun, parity, framing, and break errors
-* (untested).
-* -- Set USB_DISABLE_SPD flag for write bulk urbs, so no 0 length
-* bulk writes are done. These hung the Digi USB device. The
-* 0 length bulk writes were a new feature of usb-uhci added in
-* the 2.4.0-test6 kernels.
-* -- Fixed mod inc race in open; do mod inc before sleeping to wait
-* for a close to finish.
-*
-* (7/31/2000) pberger
-* -- Fixed bugs with hardware handshaking:
-* - Added code to set/clear tty->hw_stopped in digi_read_oob_callback()
-* and digi_set_termios()
-* -- Added code in digi_set_termios() to
-* - add conditional in code handling transition from B0 to only
-* set RTS if RTS/CTS flow control is either not in use or if
-* the port is not currently throttled.
-* - handle turning off CRTSCTS.
-*
-* (7/30/2000) borchers
-* -- Added support for more than one Digi USB device by moving
-* globals to a private structure in the pointed to from the
-* usb_serial structure.
-* -- Moved the modem change and transmit idle wait queues into
-* the port private structure, so each port has its own queue
-* rather than sharing global queues.
-* -- Added support for break signals.
-*
-* (7/25/2000) pberger
-* -- Added USB-2 support. Note: the USB-2 supports 3 devices: two
-* serial and a parallel port. The parallel port is implemented
-* as a serial-to-parallel converter. That is, the driver actually
-* presents all three USB-2 interfaces as serial ports, but the third
-* one physically connects to a parallel device. Thus, for example,
-* one could plug a parallel printer into the USB-2's third port,
-* but from the kernel's (and userland's) point of view what's
-* actually out there is a serial device.
-*
-* (7/15/2000) borchers
-* -- Fixed race in open when a close is in progress.
-* -- Keep count of opens and dec the module use count for each
-* outstanding open when shutdown is called (on disconnect).
-* -- Fixed sanity checks in read_bulk_callback and write_bulk_callback
-* so pointers are checked before use.
-* -- Split read bulk callback into in band and out of band
-* callbacks, and no longer restart read chains if there is
-* a status error or a sanity error. This fixed the seg
-* faults and other errors we used to get on disconnect.
-* -- Port->active is once again a flag as usb-serial intended it
-* to be, not a count. Since it was only a char it would
-* have been limited to 256 simultaneous opens. Now the open
-* count is kept in the port private structure in dp_open_count.
-* -- Added code for modularization of the digi_acceleport driver.
-*
-* (6/27/2000) pberger and borchers
-* -- Zeroed out sync field in the wakeup_task before first use;
-* otherwise the uninitialized value might prevent the task from
-* being scheduled.
-* -- Initialized ret value to 0 in write_bulk_callback, otherwise
-* the uninitialized value could cause a spurious debugging message.
-*
-* (6/22/2000) pberger and borchers
-* -- Made cond_wait_... inline--apparently on SPARC the flags arg
-* to spin_lock_irqsave cannot be passed to another function
-* to call spin_unlock_irqrestore. Thanks to Pauline Middelink.
-* -- In digi_set_modem_signals the inner nested spin locks use just
-* spin_lock() rather than spin_lock_irqsave(). The old code
-* mistakenly left interrupts off. Thanks to Pauline Middelink.
-* -- copy_from_user (which can sleep) is no longer called while a
-* spinlock is held. We copy to a local buffer before getting
-* the spinlock--don't like the extra copy but the code is simpler.
-* -- Printk and dbg are no longer called while a spin lock is held.
-*
-* (6/4/2000) pberger and borchers
-* -- Replaced separate calls to spin_unlock_irqrestore and
-* interruptible_sleep_on_timeout with a new function
-* cond_wait_interruptible_timeout_irqrestore. This eliminates
-* the race condition where the wake up could happen after
-* the unlock and before the sleep.
-* -- Close now waits for output to drain.
-* -- Open waits until any close in progress is finished.
-* -- All out of band responses are now processed, not just the
-* first in a USB packet.
-* -- Fixed a bug that prevented the driver from working when the
-* first Digi port was not the first USB serial port--the driver
-* was mistakenly using the external USB serial port number to
-* try to index into its internal ports.
-* -- Fixed an SMP bug -- write_bulk_callback is called directly from
-* an interrupt, so spin_lock_irqsave/spin_unlock_irqrestore are
-* needed for locks outside write_bulk_callback that are also
-* acquired by write_bulk_callback to prevent deadlocks.
-* -- Fixed support for select() by making digi_chars_in_buffer()
-* return 256 when -EINPROGRESS is set, as the line discipline
-* code in n_tty.c expects.
-* -- Fixed an include file ordering problem that prevented debugging
-* messages from working.
-* -- Fixed an intermittent timeout problem that caused writes to
-* sometimes get stuck on some machines on some kernels. It turns
-* out in these circumstances write_chan() (in n_tty.c) was
-* asleep waiting for our wakeup call. Even though we call
-* wake_up_interruptible() in digi_write_bulk_callback(), there is
-* a race condition that could cause the wakeup to fail: if our
-* wake_up_interruptible() call occurs between the time that our
-* driver write routine finishes and write_chan() sets current->state
-* to TASK_INTERRUPTIBLE, the effect of our wakeup setting the state
-* to TASK_RUNNING will be lost and write_chan's subsequent call to
-* schedule() will never return (unless it catches a signal).
-* This race condition occurs because write_bulk_callback() (and thus
-* the wakeup) are called asynchronously from an interrupt, rather than
-* from the scheduler. We can avoid the race by calling the wakeup
-* from the scheduler queue and that's our fix: Now, at the end of
-* write_bulk_callback() we queue up a wakeup call on the scheduler
-* task queue. We still also invoke the wakeup directly since that
-* squeezes a bit more performance out of the driver, and any lost
-* race conditions will get cleaned up at the next scheduler run.
-*
-* NOTE: The problem also goes away if you comment out
-* the two code lines in write_chan() where current->state
-* is set to TASK_RUNNING just before calling driver.write() and to
-* TASK_INTERRUPTIBLE immediately afterwards. This is why the
-* problem did not show up with the 2.2 kernels -- they do not
-* include that code.
-*
-* (5/16/2000) pberger and borchers
-* -- Added timeouts to sleeps, to defend against lost wake ups.
-* -- Handle transition to/from B0 baud rate in digi_set_termios.
-*
-* (5/13/2000) pberger and borchers
-* -- All commands now sent on out of band port, using
-* digi_write_oob_command.
-* -- Get modem control signals whenever they change, support TIOCMGET/
-* SET/BIS/BIC ioctls.
-* -- digi_set_termios now supports parity, word size, stop bits, and
-* receive enable.
-* -- Cleaned up open and close, use digi_set_termios and
-* digi_write_oob_command to set port parameters.
-* -- Added digi_startup_device to start read chains on all ports.
-* -- Write buffer is only used when count==1, to be sure put_char can
-* write a char (unless the buffer is full).
-*
-* (5/10/2000) pberger and borchers
-* -- Added MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT calls on open/close.
-* -- Fixed problem where the first incoming character is lost on
-* port opens after the first close on that port. Now we keep
-* the read_urb chain open until shutdown.
-* -- Added more port conditioning calls in digi_open and digi_close.
-* -- Convert port->active to a use count so that we can deal with multiple
-* opens and closes properly.
-* -- Fixed some problems with the locking code.
-*
-* (5/3/2000) pberger and borchers
-* -- First alpha version of the driver--many known limitations and bugs.
-*
-*
-* Locking and SMP
-*
-* - Each port, including the out-of-band port, has a lock used to
-* serialize all access to the port's private structure.
-* - The port lock is also used to serialize all writes and access to
-* the port's URB.
-* - The port lock is also used for the port write_wait condition
-* variable. Holding the port lock will prevent a wake up on the
-* port's write_wait; this can be used with cond_wait_... to be sure
-* the wake up is not lost in a race when dropping the lock and
-* sleeping waiting for the wakeup.
-* - digi_write() does not sleep, since it is sometimes called on
-* interrupt time.
-* - digi_write_bulk_callback() and digi_read_bulk_callback() are
-* called directly from interrupts. Hence spin_lock_irqsave()
-* and spin_unlock_irqrestore() are used in the rest of the code
-* for any locks they acquire.
-* - digi_write_bulk_callback() gets the port lock before waking up
-* processes sleeping on the port write_wait. It also schedules
-* wake ups so they happen from the scheduler, because the tty
-* system can miss wake ups from interrupts.
-* - All sleeps use a timeout of DIGI_RETRY_TIMEOUT before looping to
-* recheck the condition they are sleeping on. This is defensive,
-* in case a wake up is lost.
-* - Following Documentation/DocBook/kernel-locking.tmpl no spin locks
-* are held when calling copy_to/from_user or printk.
*/
#include <linux/kernel.h>
@@ -467,7 +251,7 @@ static int digi_read_oob_callback(struct urb *urb);
/* Statics */
-static int debug;
+static bool debug;
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
@@ -654,7 +438,6 @@ static int digi_write_oob_command(struct usb_serial_port *port,
len &= ~3;
memcpy(oob_port->write_urb->transfer_buffer, buf, len);
oob_port->write_urb->transfer_buffer_length = len;
- oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
@@ -732,7 +515,6 @@ static int digi_write_inb_command(struct usb_serial_port *port,
memcpy(data, buf, len);
port->write_urb->transfer_buffer_length = len;
}
- port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
@@ -803,7 +585,6 @@ static int digi_set_modem_signals(struct usb_serial_port *port,
data[7] = 0;
oob_port->write_urb->transfer_buffer_length = 8;
- oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
@@ -899,10 +680,8 @@ static void digi_rx_unthrottle(struct tty_struct *tty)
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* restart read chain */
- if (priv->dp_throttle_restart) {
- port->read_urb->dev = port->serial->dev;
+ if (priv->dp_throttle_restart)
ret = usb_submit_urb(port->read_urb, GFP_ATOMIC);
- }
/* turn throttle off */
priv->dp_throttled = 0;
@@ -1195,7 +974,6 @@ static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
}
port->write_urb->transfer_buffer_length = data_len+2;
- port->write_urb->dev = port->serial->dev;
*data++ = DIGI_CMD_SEND_DATA;
*data++ = data_len;
@@ -1271,7 +1049,6 @@ static void digi_write_bulk_callback(struct urb *urb)
= (unsigned char)priv->dp_out_buf_len;
port->write_urb->transfer_buffer_length =
priv->dp_out_buf_len + 2;
- port->write_urb->dev = serial->dev;
memcpy(port->write_urb->transfer_buffer + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -1473,7 +1250,6 @@ static int digi_startup_device(struct usb_serial *serial)
/* set USB_DISABLE_SPD flag for write bulk urbs */
for (i = 0; i < serial->type->num_ports + 1; i++) {
port = serial->port[i];
- port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret != 0) {
dev_err(&port->dev,
@@ -1616,7 +1392,6 @@ static void digi_read_bulk_callback(struct urb *urb)
}
/* continue read */
- urb->dev = port->serial->dev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 504b558..aced681 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -28,7 +28,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/*
* Version Information
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ff3db5d..f770415 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -55,7 +55,7 @@
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Bill Ryder <bryder@sgi.com>, Kuba Ober <kuba@mareimbrium.org>, Andreas Mohr, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB FTDI Serial Converters Driver"
-static int debug;
+static bool debug;
static __u16 vendor = FTDI_VID;
static __u16 product;
@@ -797,6 +797,7 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(HORNBY_VID, HORNBY_ELITE_PID) },
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -805,6 +806,8 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
@@ -836,11 +839,13 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_CINTERION_MC55I_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) },
{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
.driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
+ { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -1333,8 +1338,7 @@ static int set_serial_info(struct tty_struct *tty,
goto check_and_exit;
}
- if ((new_serial.baud_base != priv->baud_base) &&
- (new_serial.baud_base < 9600)) {
+ if (new_serial.baud_base != priv->baud_base) {
mutex_unlock(&priv->cfg_lock);
return -EINVAL;
}
@@ -1824,6 +1828,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
+ struct ktermios dummy;
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
int result;
@@ -1842,8 +1847,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
This is same behaviour as serial.c/rs_open() - Kuba */
/* ftdi_set_termios will send usb control messages */
- if (tty)
- ftdi_set_termios(tty, port, tty->termios);
+ if (tty) {
+ memset(&dummy, 0, sizeof(dummy));
+ ftdi_set_termios(tty, port, &dummy);
+ }
/* Start reading from the device */
result = usb_serial_generic_open(tty, port);
@@ -2105,6 +2112,9 @@ static void ftdi_set_termios(struct tty_struct *tty,
cflag = termios->c_cflag;
+ if (old_termios == 0)
+ goto no_skip;
+
if (old_termios->c_cflag == termios->c_cflag
&& old_termios->c_ispeed == termios->c_ispeed
&& old_termios->c_ospeed == termios->c_ospeed)
@@ -2118,6 +2128,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
(termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB)))
goto no_data_parity_stop_changes;
+no_skip:
/* Set number of data bits, parity, stop bits */
urb_value = 0;
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 055b64e..6f6058f 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -2,7 +2,7 @@
* vendor/product IDs (VID/PID) of devices using FTDI USB serial converters.
* Please keep numerically sorted within individual areas, thanks!
*
- * Philipp Ghring - pg@futureware.at - added the Device ID of the USB relais
+ * Philipp Gühring - pg@futureware.at - added the Device ID of the USB relais
* from Rudolf Gugler
*
*/
@@ -39,6 +39,13 @@
/* www.candapter.com Ewert Energy Systems CANdapter device */
#define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
+/*
+ * Texas Instruments XDS100v2 JTAG / BeagleBone A3
+ * http://processors.wiki.ti.com/index.php/XDS100
+ * http://beagleboard.org/bone
+ */
+#define TI_XDS100V2_PID 0xa6d0
+
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
/* US Interface Navigator (http://www.usinterface.com/) */
@@ -78,7 +85,7 @@
*/
#define FTDI_ASK_RDR400_PID 0xC991 /* ASK RDR 400 series card reader */
-/* www.starting-point-systems.com Chameleon device */
+/* www.starting-point-systems.com µChameleon device */
#define FTDI_MICRO_CHAMELEON_PID 0xCAA0 /* Product Id */
/*
@@ -291,7 +298,7 @@
/*
* Teratronik product ids.
- * Submitted by O. Wlfelschneider.
+ * Submitted by O. Wölfelschneider.
*/
#define FTDI_TERATRONIK_VCP_PID 0xEC88 /* Teratronik device (preferring VCP driver on windows) */
#define FTDI_TERATRONIK_D2XX_PID 0xEC89 /* Teratronik device (preferring D2XX driver on windows) */
@@ -525,6 +532,12 @@
#define ADI_GNICEPLUS_PID 0xF001
/*
+ * Hornby Elite
+ */
+#define HORNBY_VID 0x04D8
+#define HORNBY_ELITE_PID 0x000A
+
+/*
* RATOC REX-USB60F
*/
#define RATOC_VENDOR_ID 0x0584
@@ -1168,3 +1181,16 @@
*/
/* TagTracer MIFARE*/
#define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID 0xF7C0
+
+/*
+ * Rainforest Automation
+ */
+/* ZigBee controller */
+#define FTDI_RF_R106 0x8A28
+
+/*
+ * Product: HCP HIT GPRS modem
+ * Manufacturer: HCP d.o.o.
+ * ATI command output: Cinterion MC55i
+ */
+#define FTDI_CINTERION_MC55I_PID 0xA951
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index e21ce9d..5d4b099 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -16,7 +16,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1404, 0xcddc) },
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 1a49ca9..2134337 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -42,7 +42,7 @@
static int initial_mode = 1;
/* debug flag */
-static int debug;
+static bool debug;
#define GARMIN_VENDOR_ID 0x091E
@@ -901,7 +901,6 @@ static int garmin_init_session(struct usb_serial_port *port)
usb_kill_urb(port->interrupt_in_urb);
dbg("%s - adding interrupt input", __func__);
- port->interrupt_in_urb->dev = serial->dev;
status = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (status)
dev_err(&serial->dev->dev,
@@ -1277,7 +1276,6 @@ static void garmin_read_int_callback(struct urb *urb)
unsigned long flags;
int retval;
struct usb_serial_port *port = urb->context;
- struct usb_serial *serial = port->serial;
struct garmin_data *garmin_data_p = usb_get_serial_port_data(port);
unsigned char *data = urb->transfer_buffer;
int status = urb->status;
@@ -1311,12 +1309,6 @@ static void garmin_read_int_callback(struct urb *urb)
if (0 == (garmin_data_p->flags & FLAGS_BULK_IN_ACTIVE)) {
/* bulk data available */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- garmin_read_bulk_callback, port);
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&port->dev,
@@ -1353,7 +1345,6 @@ static void garmin_read_int_callback(struct urb *urb)
garmin_read_process(garmin_data_p, data, urb->actual_length, 0);
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval)
dev_err(&urb->dev->dev,
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index e4db5ad..f740357 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -1,7 +1,7 @@
/*
* USB Serial Converter Generic functions
*
- * Copyright (C) 2010 Johan Hovold (jhovold@gmail.com)
+ * Copyright (C) 2010 - 2011 Johan Hovold (jhovold@gmail.com)
* Copyright (C) 1999 - 2002 Greg Kroah-Hartman (greg@kroah.com)
*
* This program is free software; you can redistribute it and/or
@@ -132,7 +132,7 @@ int usb_serial_generic_open(struct tty_struct *tty, struct usb_serial_port *port
/* if we have a bulk endpoint, start reading from it */
if (port->bulk_in_size)
- result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+ result = usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
return result;
}
@@ -157,8 +157,10 @@ static void generic_cleanup(struct usb_serial_port *port)
kfifo_reset_out(&port->write_fifo);
spin_unlock_irqrestore(&port->lock, flags);
}
- if (port->bulk_in_size)
- usb_kill_urb(port->read_urb);
+ if (port->bulk_in_size) {
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
+ }
}
}
@@ -308,19 +310,52 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
return chars;
}
-int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
+ int index, gfp_t mem_flags)
+{
+ int res;
+
+ if (!test_and_clear_bit(index, &port->read_urbs_free))
+ return 0;
+
+ dbg("%s - port %d, urb %d\n", __func__, port->number, index);
+
+ res = usb_submit_urb(port->read_urbs[index], mem_flags);
+ if (res) {
+ if (res != -EPERM) {
+ dev_err(&port->dev,
+ "%s - usb_submit_urb failed: %d\n",
+ __func__, res);
+ }
+ set_bit(index, &port->read_urbs_free);
+ return res;
+ }
+
+ return 0;
+}
+
+int usb_serial_generic_submit_read_urbs(struct usb_serial_port *port,
gfp_t mem_flags)
{
- int result;
+ int res;
+ int i;
- result = usb_submit_urb(port->read_urb, mem_flags);
- if (result && result != -EPERM) {
- dev_err(&port->dev, "%s - error submitting urb: %d\n",
- __func__, result);
+ dbg("%s - port %d", __func__, port->number);
+
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ res = usb_serial_generic_submit_read_urb(port, i, mem_flags);
+ if (res)
+ goto err;
}
- return result;
+
+ return 0;
+err:
+ for (; i >= 0; --i)
+ usb_kill_urb(port->read_urbs[i]);
+
+ return res;
}
-EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urb);
+EXPORT_SYMBOL_GPL(usb_serial_generic_submit_read_urbs);
void usb_serial_generic_process_read_urb(struct urb *urb)
{
@@ -356,14 +391,19 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
- int status = urb->status;
unsigned long flags;
+ int i;
- dbg("%s - port %d", __func__, port->number);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ if (urb == port->read_urbs[i])
+ break;
+ }
+ set_bit(i, &port->read_urbs_free);
- if (unlikely(status != 0)) {
- dbg("%s - nonzero read bulk status received: %d",
- __func__, status);
+ dbg("%s - port %d, urb %d, len %d\n", __func__, port->number, i,
+ urb->actual_length);
+ if (urb->status) {
+ dbg("%s - non-zero urb status: %d\n", __func__, urb->status);
return;
}
@@ -376,7 +416,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
port->throttled = port->throttle_req;
if (!port->throttled) {
spin_unlock_irqrestore(&port->lock, flags);
- usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
+ usb_serial_generic_submit_read_urb(port, i, GFP_ATOMIC);
} else
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -443,7 +483,7 @@ void usb_serial_generic_unthrottle(struct tty_struct *tty)
spin_unlock_irq(&port->lock);
if (was_throttled)
- usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
+ usb_serial_generic_submit_read_urbs(port, GFP_KERNEL);
}
EXPORT_SYMBOL_GPL(usb_serial_generic_unthrottle);
@@ -509,8 +549,9 @@ int usb_serial_generic_resume(struct usb_serial *serial)
if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
continue;
- if (port->read_urb) {
- r = usb_submit_urb(port->read_urb, GFP_NOIO);
+ if (port->bulk_in_size) {
+ r = usb_serial_generic_submit_read_urbs(port,
+ GFP_NOIO);
if (r < 0)
c++;
}
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index 2ee8075..0497575 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -191,7 +191,7 @@ static const struct divisor_table_entry divisor_table[] = {
};
/* local variables */
-static int debug;
+static bool debug;
static atomic_t CmdUrbs; /* Number of outstanding Command Write Urbs */
@@ -610,7 +610,6 @@ static void edge_interrupt_callback(struct urb *urb)
/* we have pending bytes on the
bulk in pipe, send a request */
- edge_serial->read_urb->dev = edge_serial->serial->dev;
result = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (result) {
dev_err(&edge_serial->serial->dev->dev, "%s - usb_submit_urb(read bulk) failed with result = %d\n", __func__, result);
@@ -711,7 +710,6 @@ static void edge_bulk_in_callback(struct urb *urb)
/* check to see if there's any more data for us to read */
if (edge_serial->rxBytesAvail > 0) {
dbg("%s - posting a read", __func__);
- edge_serial->read_urb->dev = edge_serial->serial->dev;
retval = usb_submit_urb(edge_serial->read_urb, GFP_ATOMIC);
if (retval) {
dev_err(&urb->dev->dev,
@@ -1330,7 +1328,6 @@ static void send_more_port_data(struct edgeport_serial *edge_serial,
edge_port->txCredits -= count;
edge_port->icount.tx += count;
- urb->dev = edge_serial->serial->dev;
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
/* something went wrong */
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 0aac00a..5818bfc 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -15,13 +15,6 @@
* For questions or problems with this driver, contact Inside Out
* Networks technical support, or Peter Berger <pberger@brimson.com>,
* or Al Borchers <alborchers@steinerpoint.com>.
- *
- * Version history:
- *
- * July 11, 2002 Removed 4 port device structure since all TI UMP
- * chips have only 2 ports
- * David Iacovelli (davidi@ionetworks.com)
- *
*/
#include <linux/kernel.h>
@@ -217,10 +210,10 @@ static unsigned char OperationalMajorVersion;
static unsigned char OperationalMinorVersion;
static unsigned short OperationalBuildNumber;
-static int debug;
+static bool debug;
static int closing_wait = EDGE_CLOSING_WAIT;
-static int ignore_cpu_rev;
+static bool ignore_cpu_rev;
static int default_uart_mode; /* RS232 */
static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
@@ -1777,12 +1770,11 @@ static void edge_bulk_in_callback(struct urb *urb)
exit:
/* continue read unless stopped */
spin_lock(&edge_port->ep_lock);
- if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING) {
- urb->dev = edge_port->port->serial->dev;
+ if (edge_port->ep_read_urb_state == EDGE_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
- } else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING) {
+ else if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPING)
edge_port->ep_read_urb_state = EDGE_READ_URB_STOPPED;
- }
+
spin_unlock(&edge_port->ep_lock);
if (retval)
dev_err(&urb->dev->dev,
@@ -1959,9 +1951,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
status = -EINVAL;
goto release_es_lock;
}
- urb->complete = edge_interrupt_callback;
urb->context = edge_serial;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -1987,9 +1977,7 @@ static int edge_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
- urb->complete = edge_bulk_in_callback;
urb->context = edge_port;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -2118,12 +2106,7 @@ static void edge_send(struct tty_struct *tty)
port->write_urb->transfer_buffer);
/* set up our urb */
- usb_fill_bulk_urb(port->write_urb, port->serial->dev,
- usb_sndbulkpipe(port->serial->dev,
- port->bulk_out_endpointAddress),
- port->write_urb->transfer_buffer, count,
- edge_bulk_out_callback,
- port);
+ port->write_urb->transfer_buffer_length = count;
/* send the data out the bulk port */
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
@@ -2267,9 +2250,6 @@ static int restart_read(struct edgeport_port *edge_port)
if (edge_port->ep_read_urb_state == EDGE_READ_URB_STOPPED) {
urb = edge_port->port->read_urb;
- urb->complete = edge_bulk_in_callback;
- urb->context = edge_port;
- urb->dev = edge_port->port->serial->dev;
status = usb_submit_urb(urb, GFP_ATOMIC);
}
edge_port->ep_read_urb_state = EDGE_READ_URB_RUNNING;
@@ -2677,15 +2657,7 @@ cleanup:
static void edge_disconnect(struct usb_serial *serial)
{
- int i;
- struct edgeport_port *edge_port;
-
dbg("%s", __func__);
-
- for (i = 0; i < serial->num_ports; ++i) {
- edge_port = usb_get_serial_port_data(serial->port[i]);
- edge_remove_sysfs_attrs(edge_port->port);
- }
}
static void edge_release(struct usb_serial *serial)
@@ -2764,6 +2736,7 @@ static struct usb_serial_driver edgeport_1port_device = {
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_create_sysfs_attrs,
+ .port_remove = edge_remove_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
@@ -2795,6 +2768,7 @@ static struct usb_serial_driver edgeport_2port_device = {
.disconnect = edge_disconnect,
.release = edge_release,
.port_probe = edge_create_sysfs_attrs,
+ .port_remove = edge_remove_sysfs_attrs,
.ioctl = edge_ioctl,
.set_termios = edge_set_termios,
.tiocmget = edge_tiocmget,
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index 4735931..06053a9 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -8,40 +8,6 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
- * (12/12/2002) ganesh
- * Added support for practically all devices supported by ActiveSync
- * on Windows. Thanks to Wes Cilldhaire <billybobjoehenrybob@hotmail.com>.
- *
- * (26/11/2002) ganesh
- * Added insmod options to specify product and vendor id.
- * Use modprobe ipaq vendor=0xfoo product=0xbar
- *
- * (26/7/2002) ganesh
- * Fixed up broken error handling in ipaq_open. Retry the "kickstart"
- * packet much harder - this drastically reduces connection failures.
- *
- * (30/4/2002) ganesh
- * Added support for the Casio EM500. Completely untested. Thanks
- * to info from Nathan <wfilardo@fuse.net>
- *
- * (19/3/2002) ganesh
- * Don't submit urbs while holding spinlocks. Not strictly necessary
- * in 2.5.x.
- *
- * (8/3/2002) ganesh
- * The ipaq sometimes emits a '\0' before the CLIENT string. At this
- * point of time, the ppp ldisc is not yet attached to the tty, so
- * n_tty echoes "^ " to the ipaq, which messes up the chat. In 2.5.6-pre2
- * this causes a panic because echo_char() tries to sleep in interrupt
- * context.
- * The fix is to tell the upper layers that this is a raw device so that
- * echoing is suppressed. Thanks to Lyle Lindholm for a detailed bug
- * report.
- *
- * (25/2/2002) ganesh
- * Added support for the HP Jornada 548 and 568. Completely untested.
- * Thanks to info from Heath Robinson and Arieh Davidoff.
*/
#include <linux/kernel.h>
@@ -68,7 +34,7 @@
#define DRIVER_DESC "USB PocketPC PDA driver"
static __u16 product, vendor;
-static int debug;
+static bool debug;
static int connect_retries = KP_RETRIES;
static int initial_wait;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 5170799..6f9356f 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -147,7 +147,7 @@ static struct usb_driver usb_ipw_driver = {
.no_dynamic_id = 1,
};
-static int debug;
+static bool debug;
static int ipw_open(struct tty_struct *tty, struct usb_serial_port *port)
{
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index ccbce40..84a396e 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -22,38 +22,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * 2008_Jun_02 Felipe Balbi <me@felipebalbi.com>
- * Introduced common header to be used also in USB Gadget Framework.
- * Still needs some other style fixes.
- *
- * 2007_Jun_21 Alan Cox <alan@lxorguk.ukuu.org.uk>
- * Minimal cleanups for some of the driver problens and tty layer abuse.
- * Still needs fixing to allow multiple dongles.
- *
- * 2002_Mar_07 greg kh
- * moved some needed structures and #define values from the
- * net/irda/irda-usb.h file into our file, as we don't want to depend on
- * that codebase compiling correctly :)
- *
- * 2002_Jan_14 gb
- * Added module parameter to force specific number of XBOFs.
- * Added ir_xbof_change().
- * Reorganized read_bulk_callback error handling.
- * Switched from FILL_BULK_URB() to usb_fill_bulk_urb().
- *
- * 2001_Nov_08 greg kh
- * Changed the irda_usb_find_class_desc() function based on comments and
- * code from Martin Diehl.
- *
- * 2001_Nov_01 greg kh
- * Added support for more IrDA USB devices.
- * Added support for zero packet. Added buffer override paramater, so
- * users can transfer larger packets at once if they wish. Both patches
- * came from Dag Brattli <dag@obexcode.com>.
- *
- * 2001_Oct_07 greg kh
- * initial version released.
*/
#include <linux/kernel.h>
@@ -77,7 +45,7 @@
#define DRIVER_AUTHOR "Greg Kroah-Hartman <greg@kroah.com>, Johan Hovold <jhovold@gmail.com>"
#define DRIVER_DESC "USB IR Dongle driver"
-static int debug;
+static bool debug;
/* if overridden by the user, then use their value for the size of the read and
* write urbs */
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index 6aca631..3077a44 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -34,9 +34,9 @@
#ifdef CONFIG_USB_SERIAL_DEBUG
-static int debug = 1;
+static bool debug = 1;
#else
-static int debug;
+static bool debug;
#endif
/*
@@ -65,7 +65,7 @@ static int clockmode = 1;
static int cdmode = 1;
static int iuu_cardin;
static int iuu_cardout;
-static int xmas;
+static bool xmas;
static int vcc_default = 5;
static void read_rxcmd_callback(struct urb *urb);
@@ -1168,15 +1168,14 @@ static int iuu_open(struct tty_struct *tty, struct usb_serial_port *port)
port->write_urb->transfer_buffer, 1,
read_rxcmd_callback, port);
result = usb_submit_urb(port->write_urb, GFP_KERNEL);
-
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
" error %d\n", __func__, result);
iuu_close(port);
- return -EPROTO;
} else {
dbg("%s - rxcmd OK", __func__);
}
+
return result;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index a442352..4cc36c7 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -25,73 +25,6 @@
Tip 'o the hat to IBM (and previously Linuxcare :) for supporting
staff in their work on open source projects.
-
- Change History
-
- 2003sep04 LPM (Keyspan) add support for new single port product USA19HS.
- Improve setup message handling for all devices.
-
- Wed Feb 19 22:00:00 PST 2003 (Jeffrey S. Laing <keyspan@jsl.com>)
- Merged the current (1/31/03) Keyspan code with the current (2.4.21-pre4)
- Linux source tree. The Linux tree lacked support for the 49WLC and
- others. The Keyspan patches didn't work with the current kernel.
-
- 2003jan30 LPM add support for the 49WLC and MPR
-
- Wed Apr 25 12:00:00 PST 2002 (Keyspan)
- Started with Hugh Blemings' code dated Jan 17, 2002. All adapters
- now supported (including QI and QW). Modified port open, port
- close, and send setup() logic to fix various data and endpoint
- synchronization bugs and device LED status bugs. Changed keyspan_
- write_room() to accurately return transmit buffer availability.
- Changed forwardingLength from 1 to 16 for all adapters.
-
- Fri Oct 12 16:45:00 EST 2001
- Preliminary USA-19QI and USA-28 support (both test OK for me, YMMV)
-
- Wed Apr 25 12:00:00 PST 2002 (Keyspan)
- Started with Hugh Blemings' code dated Jan 17, 2002. All adapters
- now supported (including QI and QW). Modified port open, port
- close, and send setup() logic to fix various data and endpoint
- synchronization bugs and device LED status bugs. Changed keyspan_
- write_room() to accurately return transmit buffer availability.
- Changed forwardingLength from 1 to 16 for all adapters.
-
- Fri Oct 12 16:45:00 EST 2001
- Preliminary USA-19QI and USA-28 support (both test OK for me, YMMV)
-
- Mon Oct 8 14:29:00 EST 2001 hugh
- Fixed bug that prevented mulitport devices operating correctly
- if they weren't the first unit attached.
-
- Sat Oct 6 12:31:21 EST 2001 hugh
- Added support for USA-28XA and -28XB, misc cleanups, break support
- for usa26 based models thanks to David Gibson.
-
- Thu May 31 11:56:42 PDT 2001 gkh
- switched from using spinlock to a semaphore
-
- (04/08/2001) gb
- Identify version on module load.
-
- (11/01/2000) Adam J. Richter
- usb_device_id table support.
-
- Tue Oct 10 23:15:33 EST 2000 Hugh
- Merged Paul's changes with my USA-49W mods. Work in progress
- still...
-
- Wed Jul 19 14:00:42 EST 2000 gkh
- Added module_init and module_exit functions to handle the fact that
- this driver is a loadable module now.
-
- Tue Jul 18 16:14:52 EST 2000 Hugh
- Basic character input/output for USA-19 now mostly works,
- fixed at 9600 baud for the moment.
-
- Sat Jul 8 11:11:48 EST 2000 Hugh
- First public release - nothing works except the firmware upload.
- Tested on PPC and x86 architectures, seems to behave...
*/
@@ -112,7 +45,7 @@
#include <linux/usb/serial.h>
#include "keyspan.h"
-static int debug;
+static bool debug;
/*
* Version Information
@@ -397,7 +330,6 @@ static int keyspan_write(struct tty_struct *tty,
/* send the data out the bulk port */
this_urb->transfer_buffer_length = todo + dataOffset;
- this_urb->dev = port->serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("usb_submit_urb(write bulk) failed (%d)", err);
@@ -463,7 +395,6 @@ static void usa26_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -559,7 +490,6 @@ static void usa26_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -609,7 +539,6 @@ static void usa28_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)",
@@ -694,7 +623,6 @@ static void usa28_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -789,8 +717,6 @@ static void usa49_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
-
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -848,7 +774,6 @@ static void usa49_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -919,8 +844,6 @@ static void usa49wg_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
-
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -996,7 +919,6 @@ static void usa90_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = port->serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1047,7 +969,6 @@ static void usa90_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1123,7 +1044,6 @@ static void usa67_instat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - resubmit read urb failed. (%d)", __func__, err);
@@ -1223,7 +1143,6 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
urb = p_priv->in_urbs[i];
if (urb == NULL)
continue;
- urb->dev = serial->dev;
/* make sure endpoint data toggle is synchronized
with the device */
@@ -1239,7 +1158,6 @@ static int keyspan_open(struct tty_struct *tty, struct usb_serial_port *port)
urb = p_priv->out_urbs[i];
if (urb == NULL)
continue;
- urb->dev = serial->dev;
/* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
usb_pipeout(urb->pipe), 0); */
}
@@ -1956,7 +1874,6 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
@@ -2084,7 +2001,6 @@ static int keyspan_usa28_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed", __func__);
@@ -2271,8 +2187,6 @@ static int keyspan_usa49_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
-
- this_urb->dev = serial->dev;
}
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
@@ -2415,7 +2329,6 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
dbg("%s - usb_submit_urb(setup) failed (%d)", __func__, err);
@@ -2561,7 +2474,6 @@ static int keyspan_usa67_send_setup(struct usb_serial *serial,
/* send the data out the device on control endpoint */
this_urb->transfer_buffer_length = sizeof(msg);
- this_urb->dev = serial->dev;
err = usb_submit_urb(this_urb, GFP_ATOMIC);
if (err != 0)
@@ -2650,14 +2562,12 @@ static int keyspan_startup(struct usb_serial *serial)
keyspan_setup_urbs(serial);
if (s_priv->instat_urb != NULL) {
- s_priv->instat_urb->dev = serial->dev;
err = usb_submit_urb(s_priv->instat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit instat urb failed %d", __func__,
err);
}
if (s_priv->indat_urb != NULL) {
- s_priv->indat_urb->dev = serial->dev;
err = usb_submit_urb(s_priv->indat_urb, GFP_KERNEL);
if (err != 0)
dbg("%s - submit indat urb failed %d", __func__,
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index d5c0c6a..7c62a70 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -12,59 +12,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (09/07/2001) gkh
- * cleaned up the Xircom support. Added ids for Entregra device which is
- * the same as the Xircom device. Enabled the code to be compiled for
- * either Xircom or Keyspan devices.
- *
- * (08/11/2001) Cristian M. Craciunescu
- * support for Xircom PGSDB9
- *
- * (05/31/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (08/28/2000) gkh
- * Added locks for SMP safeness.
- * Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- * than once.
- *
- * (07/20/2000) borchers
- * - keyspan_pda_write no longer sleeps if it is called on interrupt time;
- * PPP and the line discipline with stty echo on can call write on
- * interrupt time and this would cause an oops if write slept
- * - if keyspan_pda_write is in an interrupt, it will not call
- * usb_control_msg (which sleeps) to query the room in the device
- * buffer, it simply uses the current room value it has
- * - if the urb is busy or if it is throttled keyspan_pda_write just
- * returns 0, rather than sleeping to wait for this to change; the
- * write_chan code in n_tty.c will sleep if needed before calling
- * keyspan_pda_write again
- * - if the device needs to be unthrottled, write now queues up the
- * call to usb_control_msg (which sleeps) to unthrottle the device
- * - the wakeups from keyspan_pda_write_bulk_callback are queued rather
- * than done directly from the callback to avoid the race in write_chan
- * - keyspan_pda_chars_in_buffer also indicates its buffer is full if the
- * urb status is -EINPROGRESS, meaning it cannot write at the moment
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- *
- * (03/26/2000) gkh
- * Split driver up into device specific pieces.
- *
*/
@@ -84,7 +31,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/* make a simple define to handle if we are compiling keyspan_pda or xircom support */
#if defined(CONFIG_USB_SERIAL_KEYSPAN_PDA) || defined(CONFIG_USB_SERIAL_KEYSPAN_PDA_MODULE)
@@ -290,7 +237,6 @@ static void keyspan_pda_rx_unthrottle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
/* just restart the receive interrupt URB */
dbg("keyspan_pda_rx_unthrottle port %d", port->number);
- port->interrupt_in_urb->dev = port->serial->dev;
if (usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL))
dbg(" usb_submit_urb(read urb) failed");
}
@@ -532,11 +478,11 @@ static int keyspan_pda_write(struct tty_struct *tty,
the device is full (wait until it says there is room)
*/
spin_lock_bh(&port->lock);
- if (port->write_urb_busy || priv->tx_throttled) {
+ if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled) {
spin_unlock_bh(&port->lock);
return 0;
}
- port->write_urb_busy = 1;
+ clear_bit(0, &port->write_urbs_free);
spin_unlock_bh(&port->lock);
/* At this point the URB is in our control, nobody else can submit it
@@ -598,7 +544,6 @@ static int keyspan_pda_write(struct tty_struct *tty,
priv->tx_room -= count;
- port->write_urb->dev = port->serial->dev;
rc = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (rc) {
dbg(" usb_submit_urb(write bulk) failed");
@@ -618,7 +563,7 @@ static int keyspan_pda_write(struct tty_struct *tty,
rc = count;
exit:
if (rc < 0)
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
return rc;
}
@@ -628,7 +573,7 @@ static void keyspan_pda_write_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
struct keyspan_pda_private *priv;
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
priv = usb_get_serial_port_data(port);
/* queue up a wakeup at scheduler time */
@@ -661,7 +606,7 @@ static int keyspan_pda_chars_in_buffer(struct tty_struct *tty)
n_tty.c:normal_poll() ) that we're not writeable. */
spin_lock_irqsave(&port->lock, flags);
- if (port->write_urb_busy || priv->tx_throttled)
+ if (!test_bit(0, &port->write_urbs_free) || priv->tx_throttled)
ret = 256;
spin_unlock_irqrestore(&port->lock, flags);
return ret;
@@ -717,7 +662,6 @@ static int keyspan_pda_open(struct tty_struct *tty,
priv->tx_throttled = *room ? 0 : 1;
/*Start reading from the device*/
- port->interrupt_in_urb->dev = serial->dev;
rc = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (rc) {
dbg("%s - usb_submit_urb(read int) failed", __func__);
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 19373cb..fc064e1 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -49,7 +49,7 @@
#include <linux/usb/serial.h>
#include "kl5kusb105.h"
-static int debug;
+static bool debug;
/*
* Version Information
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index ddd1463..a92a3ef 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -20,18 +20,6 @@
*
* Supported readers: USB TWIN, KAAN Standard Plus and SecOVID Reader Plus
* (Adapter K), B1 Professional and KAAN Professional (Adapter B)
- *
- * (21/05/2004) tw
- * Fix bug with P'n'P readers
- *
- * (28/05/2003) tw
- * Add support for KAAN SIM
- *
- * (12/09/2002) tw
- * Adapted to 2.5.
- *
- * (11/08/2002) tw
- * Initial version.
*/
@@ -50,7 +38,7 @@
#include <linux/ioctl.h>
#include "kobil_sct.h"
-static int debug;
+static bool debug;
/* Version Information */
#define DRIVER_VERSION "21/05/2004"
@@ -231,9 +219,6 @@ static int kobil_open(struct tty_struct *tty, struct usb_serial_port *port)
dbg("%s - port %d", __func__, port->number);
priv = usb_get_serial_port_data(port);
- /* someone sets the dev to 0 if the close method has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
/* allocate memory for transfer buffer */
transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
if (!transfer_buffer)
@@ -393,8 +378,6 @@ static void kobil_read_int_callback(struct urb *urb)
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
- /* someone sets the dev to 0 if the close method has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
dbg("%s - port %d Send read URB returns: %i",
@@ -475,17 +458,9 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
priv->filled = 0;
priv->cur_pos = 0;
- /* someone sets the dev to 0 if the close method
- has been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
/* start reading (except TWIN and KAAN SIM) */
if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
- /* someone sets the dev to 0 if the close method has
- been called */
- port->interrupt_in_urb->dev = port->serial->dev;
-
result = usb_submit_urb(port->interrupt_in_urb,
GFP_NOIO);
dbg("%s - port %d Send read URB returns: %i",
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index ba0d287..27fa9c8 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -19,50 +19,6 @@
* DTR/RTS signal handling may be incomplete or incorrect. I have mainly
* implemented what I have seen with SniffUSB or found in belkin_sa.c.
* For further TODOs check also belkin_sa.c.
- *
- * TEST STATUS:
- * Basic tests have been performed with minicom/zmodem transfers and
- * modem dialing under Linux 2.4.0-test10 (for me it works fine).
- *
- * 04-Nov-2003 Bill Marr <marr at flex dot com>
- * - Mimic Windows driver by sending 2 USB 'device request' messages
- * following normal 'baud rate change' message. This allows data to be
- * transmitted to RS-232 devices which don't assert the 'CTS' signal.
- *
- * 10-Nov-2001 Wolfgang Grandegger
- * - Fixed an endianess problem with the baudrate selection for PowerPC.
- *
- * 06-Dec-2001 Martin Hamilton <martinh@gnu.org>
- * - Added support for the Belkin F5U109 DB9 adaptor
- *
- * 30-May-2001 Greg Kroah-Hartman
- * - switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * 04-May-2001 Stelian Pop
- * - Set the maximum bulk output size for Sitecom U232-P25 model to 16 bytes
- * instead of the device reported 32 (using 32 bytes causes many data
- * loss, Windows driver uses 16 too).
- *
- * 02-May-2001 Stelian Pop
- * - Fixed the baud calculation for Sitecom U232-P25 model
- *
- * 08-Apr-2001 gb
- * - Identify version on module load.
- *
- * 06-Jan-2001 Cornel Ciocirlan
- * - Added support for Sitecom U232-P25 model (Product Id 0x0230)
- * - Added support for D-Link DU-H3SP USB BAY (Product Id 0x0200)
- *
- * 29-Nov-2000 Greg Kroah-Hartman
- * - Added device id table to fit with 2.4.0-test11 structure.
- * - took out DEAL_WITH_TWO_INT_IN_ENDPOINTS #define as it's not needed
- * (lots of things will change if/when the usb-serial core changes to
- * handle these issues.
- *
- * 27-Nov-2000 Wolfgang Grandegge
- * A version for kernel 2.4.0-test10 released to the Linux community
- * (via linux-usb-devel).
*/
#include <linux/kernel.h>
@@ -89,7 +45,7 @@
#define DRIVER_AUTHOR "Wolfgang Grandegger <wolfgang@ces.ch>"
#define DRIVER_DESC "Magic Control Technology USB-RS232 converter driver"
-static int debug;
+static bool debug;
/*
* Function prototypes
@@ -526,7 +482,6 @@ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
mct_u232_msr_to_state(&priv->control_state, priv->last_msr);
spin_unlock_irqrestore(&priv->lock, flags);
- port->read_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&port->dev,
@@ -535,7 +490,6 @@ static int mct_u232_open(struct tty_struct *tty, struct usb_serial_port *port)
goto error;
}
- port->interrupt_in_urb->dev = port->serial->dev;
retval = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (retval) {
usb_kill_urb(port->read_urb);
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 3524a10..4554ee4 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -71,7 +71,7 @@ struct moschip_port {
struct urb *write_urb_pool[NUM_URBS];
};
-static int debug;
+static bool debug;
static struct usb_serial_driver moschip7720_2port_driver;
@@ -939,14 +939,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
}
tty_kref_put(tty);
- if (!port->read_urb) {
- dbg("URB KILLED !!!");
- return;
- }
-
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = port->serial->dev;
-
retval = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (retval)
dbg("usb_submit_urb(read bulk) failed, retval = %d",
@@ -1014,7 +1007,6 @@ static int mos77xx_calc_num_ports(struct usb_serial *serial)
static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
- struct usb_serial_port *port0;
struct urb *urb;
struct moschip_port *mos7720_port;
int response;
@@ -1029,8 +1021,6 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
if (mos7720_port == NULL)
return -ENODEV;
- port0 = serial->port[0];
-
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
@@ -1735,8 +1725,6 @@ static void change_port_settings(struct tty_struct *tty,
write_mos_reg(serial, port_number, IER, 0x0c);
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = serial->dev;
-
status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (status)
dbg("usb_submit_urb(read bulk) failed, status = %d",
@@ -1786,13 +1774,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
/* change the port settings to the new ones specified */
change_port_settings(tty, mos7720_port, old_termios);
- if (!port->read_urb) {
- dbg("%s", "URB KILLED !!!!!");
- return;
- }
-
if (port->read_urb->status != -EINPROGRESS) {
- port->read_urb->dev = serial->dev;
status = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (status)
dbg("usb_submit_urb(read bulk) failed, status = %d",
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index c72abd5..03b5e249 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -263,7 +263,7 @@ struct moschip_port {
};
-static int debug;
+static bool debug;
/*
* mos7840_set_reg_sync
@@ -792,8 +792,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
}
- mos7840_port->read_urb->dev = serial->dev;
-
mos7840_port->read_urb_busy = true;
retval = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
@@ -2058,7 +2056,6 @@ static void mos7840_change_port_settings(struct tty_struct *tty,
mos7840_set_uart_reg(port, INTERRUPT_ENABLE_REGISTER, Data);
if (mos7840_port->read_urb_busy == false) {
- mos7840_port->read_urb->dev = serial->dev;
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
@@ -2130,7 +2127,6 @@ static void mos7840_set_termios(struct tty_struct *tty,
}
if (mos7840_port->read_urb_busy == false) {
- mos7840_port->read_urb->dev = serial->dev;
mos7840_port->read_urb_busy = true;
status = usb_submit_urb(mos7840_port->read_urb, GFP_ATOMIC);
if (status) {
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 1f00f24..b28f1db 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -21,7 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 60f38d5..8b8d58a 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -9,31 +9,6 @@
* driver
*
* Please report both successes and troubles to the author at omninet@kroah.com
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (08/28/2000) gkh
- * Added locks for SMP safeness.
- * Fixed MOD_INC and MOD_DEC logic and the ability to open a port more
- * than once.
- * Fixed potential race in omninet_write_bulk_callback
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- *
*/
#include <linux/kernel.h>
@@ -44,12 +19,11 @@
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
-#include <linux/spinlock.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static int debug;
+static bool debug;
/*
* Version Information
@@ -174,12 +148,6 @@ static int omninet_open(struct tty_struct *tty, struct usb_serial_port *port)
tty_port_tty_set(&wport->port, tty);
/* Start reading from the device */
- usb_fill_bulk_urb(port->read_urb, serial->dev,
- usb_rcvbulkpipe(serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- omninet_read_bulk_callback, port);
result = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (result)
dev_err(&port->dev,
@@ -236,11 +204,6 @@ static void omninet_read_bulk_callback(struct urb *urb)
}
/* Continue trying to always read */
- usb_fill_bulk_urb(urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- urb->transfer_buffer, urb->transfer_buffer_length,
- omninet_read_bulk_callback, port);
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&port->dev,
@@ -267,14 +230,10 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
return 0;
}
- spin_lock_bh(&wport->lock);
- if (wport->write_urb_busy) {
- spin_unlock_bh(&wport->lock);
+ if (!test_and_clear_bit(0, &port->write_urbs_free)) {
dbg("%s - already writing", __func__);
return 0;
}
- wport->write_urb_busy = 1;
- spin_unlock_bh(&wport->lock);
count = (count > OMNINET_BULKOUTSIZE) ? OMNINET_BULKOUTSIZE : count;
@@ -292,10 +251,9 @@ static int omninet_write(struct tty_struct *tty, struct usb_serial_port *port,
/* send the data out the bulk port, always 64 bytes */
wport->write_urb->transfer_buffer_length = 64;
- wport->write_urb->dev = serial->dev;
result = usb_submit_urb(wport->write_urb, GFP_ATOMIC);
if (result) {
- wport->write_urb_busy = 0;
+ set_bit(0, &wport->write_urbs_free);
dev_err(&port->dev,
"%s - failed submitting write urb, error %d\n",
__func__, result);
@@ -314,8 +272,7 @@ static int omninet_write_room(struct tty_struct *tty)
int room = 0; /* Default: no room */
- /* FIXME: no consistent locking for write_urb_busy */
- if (wport->write_urb_busy)
+ if (test_bit(0, &wport->write_urbs_free))
room = wport->bulk_out_size - OMNINET_HEADERLEN;
dbg("%s - returns %d", __func__, room);
@@ -332,7 +289,7 @@ static void omninet_write_bulk_callback(struct urb *urb)
dbg("%s - port %0x", __func__, port->number);
- port->write_urb_busy = 0;
+ set_bit(0, &port->write_urbs_free);
if (status) {
dbg("%s - nonzero write bulk status received: %d",
__func__, status);
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index c248a91..262ded9 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -32,7 +32,7 @@
* an examples of 1D barcode types are EAN, UPC, Code39, IATA etc.. */
#define DRIVER_DESC "Opticon USB barcode to serial driver (1D)"
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x065a, 0x0009) },
@@ -384,7 +384,6 @@ static void opticon_unthrottle(struct tty_struct *tty)
priv->actually_throttled = false;
spin_unlock_irqrestore(&priv->lock, flags);
- priv->bulk_read_urb->dev = port->serial->dev;
if (was_throttled) {
result = usb_submit_urb(priv->bulk_read_urb, GFP_ATOMIC);
if (result)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6dd6453..b54afce 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -476,6 +476,14 @@ static void option_instat_callback(struct urb *urb);
#define VIETTEL_VENDOR_ID 0x2262
#define VIETTEL_PRODUCT_VT1000 0x0002
+/* ZD Incorporated */
+#define ZD_VENDOR_ID 0x0685
+#define ZD_PRODUCT_7000 0x7000
+
+/* LG products */
+#define LG_VENDOR_ID 0x1004
+#define LG_PRODUCT_L02C 0x618f
+
/* some devices interfaces need special handling due to a number of reasons */
enum option_blacklist_reason {
OPTION_BLACKLIST_NONE = 0,
@@ -780,7 +788,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
@@ -795,7 +802,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
- /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
@@ -820,7 +826,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf1_blacklist },
@@ -828,7 +833,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
@@ -838,7 +842,6 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) },
@@ -847,6 +850,16 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0083, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0086, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0087, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0088, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0089, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0090, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0091, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0092, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0093, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
@@ -867,23 +880,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0154, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
@@ -1058,17 +1066,27 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
+ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) },
+
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
@@ -1178,6 +1196,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
{ USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, option_ids);
@@ -1229,7 +1249,7 @@ static struct usb_serial_driver option_1port_device = {
#endif
};
-static int debug;
+static bool debug;
/* per port private data */
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index 4c29e6c..e287fd3 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -74,7 +74,7 @@ static struct usb_driver oti6858_driver = {
.no_dynamic_id = 1,
};
-static int debug;
+static bool debug;
/* requests */
#define OTI6858_REQ_GET_STATUS (USB_DIR_IN | USB_TYPE_VENDOR | 0x00)
@@ -264,7 +264,6 @@ static void setup_line(struct work_struct *work)
spin_unlock_irqrestore(&priv->lock, flags);
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -321,7 +320,6 @@ static void send_data(struct work_struct *work)
priv->flags.write_urb_in_use = 0;
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -334,7 +332,6 @@ static void send_data(struct work_struct *work)
port->write_urb->transfer_buffer,
count, &port->lock);
port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_NOIO);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
@@ -583,13 +580,12 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
kfree(buf);
dbg("%s(): submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result != 0) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed"
" with error %d\n", __func__, result);
oti6858_close(port);
- return -EPROTO;
+ return result;
}
/* setup termios */
@@ -837,7 +833,6 @@ static void oti6858_read_int_callback(struct urb *urb)
if (can_recv) {
int result;
- port->read_urb->dev = port->serial->dev;
result = usb_submit_urb(port->read_urb, GFP_ATOMIC);
if (result != 0) {
priv->flags.read_urb_in_use = 0;
@@ -866,7 +861,6 @@ static void oti6858_read_int_callback(struct urb *urb)
int result;
/* dbg("%s(): submitting interrupt urb", __func__); */
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result != 0) {
dev_err(&urb->dev->dev,
@@ -894,18 +888,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
- /*
- if (status == -EPROTO) {
- * PL2303 mysteriously fails with -EPROTO reschedule
- the read *
- dbg("%s - caught -EPROTO, resubmitting the urb",
- __func__);
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting read urb, error %d\n", __func__, result);
- return;
- }
- */
dbg("%s(): unable to handle the error, exiting", __func__);
return;
}
@@ -918,7 +900,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
tty_kref_put(tty);
/* schedule the interrupt urb */
- port->interrupt_in_urb->dev = port->serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0 && result != -EPERM) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
@@ -955,7 +936,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
dbg("%s(): overflow in write", __func__);
port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
@@ -968,7 +948,6 @@ static void oti6858_write_bulk_callback(struct urb *urb)
priv->flags.write_urb_in_use = 0;
/* schedule the interrupt urb if we are still open */
- port->interrupt_in_urb->dev = port->serial->dev;
dbg("%s(): submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
if (result != 0) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index fc2d66f..3d8cda5 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -36,7 +36,7 @@
*/
#define DRIVER_DESC "Prolific PL2303 USB to serial adaptor driver"
-static int debug;
+static bool debug;
#define PL2303_CLOSING_WAIT (30*HZ)
@@ -502,21 +502,20 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
if (tty)
pl2303_set_termios(tty, port, &tmp_termios);
- dbg("%s - submitting read urb", __func__);
- result = usb_serial_generic_submit_read_urb(port, GFP_KERNEL);
- if (result) {
- pl2303_close(port);
- return -EPROTO;
- }
-
dbg("%s - submitting interrupt urb", __func__);
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
" error %d\n", __func__, result);
- pl2303_close(port);
- return -EPROTO;
+ return result;
}
+
+ result = usb_serial_generic_open(tty, port);
+ if (result) {
+ usb_kill_urb(port->interrupt_in_urb);
+ return result;
+ }
+
port->port.drain_delay = 256;
return 0;
}
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
index 30b73e6..a348198 100644
--- a/drivers/usb/serial/qcaux.c
+++ b/drivers/usb/serial/qcaux.c
@@ -36,6 +36,7 @@
#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+#define PANTECH_PRODUCT_UML190_VZW 0x3716
#define PANTECH_PRODUCT_UML290_VZW 0x3718
/* CMOTECH devices */
@@ -67,7 +68,11 @@ static struct usb_device_id id_table[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
- { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) }, /* NMEA */
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) }, /* WMC */
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) }, /* DIAG */
{ },
};
MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index aa9367f..f98800f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -22,7 +22,7 @@
#define DRIVER_AUTHOR "Qualcomm Inc"
#define DRIVER_DESC "Qualcomm USB Serial driver"
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
@@ -36,6 +36,11 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x413c, 0x8171)}, /* Dell Gobi QDL device */
{USB_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */
{USB_DEVICE(0x1410, 0xa008)}, /* Novatel Gobi QDL device */
+ {USB_DEVICE(0x1410, 0xa010)}, /* Novatel Gobi QDL device */
+ {USB_DEVICE(0x1410, 0xa011)}, /* Novatel Gobi QDL device */
+ {USB_DEVICE(0x1410, 0xa012)}, /* Novatel Gobi QDL device */
+ {USB_DEVICE(0x1410, 0xa013)}, /* Novatel Gobi QDL device */
+ {USB_DEVICE(0x1410, 0xa014)}, /* Novatel Gobi QDL device */
{USB_DEVICE(0x0b05, 0x1776)}, /* Asus Gobi Modem device */
{USB_DEVICE(0x0b05, 0x1774)}, /* Asus Gobi QDL device */
{USB_DEVICE(0x19d2, 0xfff3)}, /* ONDA Gobi Modem device */
@@ -86,7 +91,16 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
{USB_DEVICE(0x05c6, 0x9204)}, /* Gobi 2000 QDL device */
{USB_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
+
+ {USB_DEVICE(0x05c6, 0x920c)}, /* Gobi 3000 QDL */
+ {USB_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */
+ {USB_DEVICE(0x1410, 0xa020)}, /* Novatel Gobi 3000 QDL */
+ {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
+ {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
+ {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
{USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
+ {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
+ {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table);
@@ -123,8 +137,6 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id)
spin_lock_init(&data->susp_lock);
- usb_enable_autosuspend(serial->dev);
-
switch (nintf) {
case 1:
/* QDL mode */
diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c
index a36e231..d074b37 100644
--- a/drivers/usb/serial/safe_serial.c
+++ b/drivers/usb/serial/safe_serial.c
@@ -81,9 +81,9 @@
#define CONFIG_USB_SERIAL_SAFE_PADDED 0
#endif
-static int debug;
-static int safe = 1;
-static int padded = CONFIG_USB_SERIAL_SAFE_PADDED;
+static bool debug;
+static bool safe = 1;
+static bool padded = CONFIG_USB_SERIAL_SAFE_PADDED;
#define DRIVER_VERSION "v0.1"
#define DRIVER_AUTHOR "sl@lineo.com, tbr@lineo.com, Johan Hovold <jhovold@gmail.com>"
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index b18179b..fdae0a4 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -46,8 +46,8 @@
allocations > PAGE_SIZE and the number of packets in a page
is an integer 512 is the largest possible packet on EHCI */
-static int debug;
-static int nmea;
+static bool debug;
+static bool nmea;
/* Used in interface blacklisting */
struct sierra_iface_info {
@@ -681,7 +681,6 @@ static void sierra_instat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving IRQ data */
if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(serial->dev);
- urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err && err != -EPERM)
dev_err(&port->dev, "%s: resubmit intr urb "
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 180ea6c..d7f5eee 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -33,7 +33,7 @@
#define DRIVER_VERSION "v0.10"
#define DRIVER_DESC "SPCP8x5 USB to serial adaptor driver"
-static int debug;
+static bool debug;
#define SPCP8x5_007_VID 0x04FC
#define SPCP8x5_007_PID 0x0201
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 87362e4..7697858 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -46,7 +46,7 @@
#define FULLPWRBIT 0x00000080
#define NEXT_BOARD_POWER_BIT 0x00000004
-static int debug;
+static bool debug;
/* Version Information */
#define DRIVER_VERSION "v0.1"
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index 7096f79..50651cf 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -20,7 +20,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
-static int debug;
+static bool debug;
static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05e0, 0x0600) },
@@ -182,7 +182,6 @@ static void symbol_unthrottle(struct tty_struct *tty)
priv->actually_throttled = false;
spin_unlock_irq(&priv->lock);
- priv->int_urb->dev = port->serial->dev;
if (was_throttled) {
result = usb_submit_urb(priv->int_urb, GFP_KERNEL);
if (result)
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index ea84456..75b838e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -150,7 +150,7 @@ static int ti_download_firmware(struct ti_device *tdev);
/* Data */
/* module parameters */
-static int debug;
+static bool debug;
static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
static ushort vendor_3410[TI_EXTRA_VID_PID_COUNT];
static unsigned int vendor_3410_count;
@@ -165,7 +165,7 @@ static unsigned int product_5052_count;
/* the array dimension is the number of default entries plus */
/* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */
/* null entry */
-static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_3410[14+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -179,6 +179,7 @@ static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
};
static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
@@ -188,7 +189,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) },
};
-static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = {
+static struct usb_device_id ti_id_table_combined[18+2*TI_EXTRA_VID_PID_COUNT+1] = {
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) },
{ USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) },
{ USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) },
@@ -206,6 +207,7 @@ static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1]
{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
+ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
{ }
};
@@ -535,9 +537,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
status = -EINVAL;
goto release_lock;
}
- urb->complete = ti_interrupt_callback;
urb->context = tdev;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev,
@@ -619,9 +619,7 @@ static int ti_open(struct tty_struct *tty, struct usb_serial_port *port)
goto unlink_int_urb;
}
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
- urb->complete = ti_bulk_in_callback;
urb->context = tport;
- urb->dev = dev;
status = usb_submit_urb(urb, GFP_KERNEL);
if (status) {
dev_err(&port->dev, "%s - submit read urb failed, %d\n",
@@ -1236,12 +1234,11 @@ static void ti_bulk_in_callback(struct urb *urb)
exit:
/* continue to read unless stopping */
spin_lock(&tport->tp_lock);
- if (tport->tp_read_urb_state == TI_READ_URB_RUNNING) {
- urb->dev = port->serial->dev;
+ if (tport->tp_read_urb_state == TI_READ_URB_RUNNING)
retval = usb_submit_urb(urb, GFP_ATOMIC);
- } else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING) {
+ else if (tport->tp_read_urb_state == TI_READ_URB_STOPPING)
tport->tp_read_urb_state = TI_READ_URB_STOPPED;
- }
+
spin_unlock(&tport->tp_lock);
if (retval)
dev_err(dev, "%s - resubmit read urb failed, %d\n",
@@ -1574,9 +1571,7 @@ static int ti_restart_read(struct ti_port *tport, struct tty_struct *tty)
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
urb = tport->tp_port->read_urb;
spin_unlock_irqrestore(&tport->tp_lock, flags);
- urb->complete = ti_bulk_in_callback;
urb->context = tport;
- urb->dev = tport->tp_port->serial->dev;
status = usb_submit_urb(urb, GFP_KERNEL);
} else {
tport->tp_read_urb_state = TI_READ_URB_RUNNING;
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index 2aac195..f140f1b 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -49,6 +49,10 @@
#define MTS_MT9234ZBA_PRODUCT_ID 0xF115
#define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319
+/* Abbott Diabetics vendor and product ids */
+#define ABBOTT_VENDOR_ID 0x1a61
+#define ABBOTT_PRODUCT_ID 0x3410
+
/* Commands */
#define TI_GET_VERSION 0x01
#define TI_GET_PORT_STATUS 0x02
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index cc274fd..611b206 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -50,7 +50,7 @@ static struct usb_driver usb_serial_driver = {
.disconnect = usb_serial_disconnect,
.suspend = usb_serial_suspend,
.resume = usb_serial_resume,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
.supports_autosuspend = 1,
};
@@ -61,7 +61,7 @@ static struct usb_driver usb_serial_driver = {
drivers depend on it.
*/
-static int debug;
+static bool debug;
/* initially all NULL */
static struct usb_serial *serial_table[SERIAL_TTY_MINORS];
static DEFINE_MUTEX(table_lock);
@@ -260,6 +260,10 @@ static int serial_activate(struct tty_port *tport, struct tty_struct *tty)
else
retval = port->serial->type->open(tty, port);
mutex_unlock(&serial->disc_mutex);
+
+ if (retval < 0)
+ retval = usb_translate_errors(retval);
+
return retval;
}
@@ -360,7 +364,8 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
/* pass on to the driver specific version of this function */
retval = port->serial->type->write(tty, port, buf, count);
-
+ if (retval < 0)
+ retval = usb_translate_errors(retval);
exit:
return retval;
}
@@ -562,8 +567,8 @@ static void kill_traffic(struct usb_serial_port *port)
{
int i;
- usb_kill_urb(port->read_urb);
- usb_kill_urb(port->write_urb);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i)
+ usb_kill_urb(port->read_urbs[i]);
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i)
usb_kill_urb(port->write_urbs[i]);
/*
@@ -595,17 +600,17 @@ static void port_release(struct device *dev)
kill_traffic(port);
cancel_work_sync(&port->work);
- usb_free_urb(port->read_urb);
- usb_free_urb(port->write_urb);
usb_free_urb(port->interrupt_in_urb);
usb_free_urb(port->interrupt_out_urb);
+ for (i = 0; i < ARRAY_SIZE(port->read_urbs); ++i) {
+ usb_free_urb(port->read_urbs[i]);
+ kfree(port->bulk_in_buffers[i]);
+ }
for (i = 0; i < ARRAY_SIZE(port->write_urbs); ++i) {
usb_free_urb(port->write_urbs[i]);
kfree(port->bulk_out_buffers[i]);
}
kfifo_free(&port->write_fifo);
- kfree(port->bulk_in_buffer);
- kfree(port->bulk_out_buffer);
kfree(port->interrupt_in_buffer);
kfree(port->interrupt_out_buffer);
kfree(port);
@@ -686,16 +691,18 @@ static int serial_carrier_raised(struct tty_port *port)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
+
if (drv->carrier_raised)
return drv->carrier_raised(p);
/* No carrier control - don't block */
- return 1;
+ return 1;
}
static void serial_dtr_rts(struct tty_port *port, int on)
{
struct usb_serial_port *p = container_of(port, struct usb_serial_port, port);
struct usb_serial_driver *drv = p->serial->type;
+
if (drv->dtr_rts)
drv->dtr_rts(p, on);
}
@@ -724,6 +731,7 @@ int usb_serial_probe(struct usb_interface *interface,
unsigned int minor;
int buffer_size;
int i;
+ int j;
int num_interrupt_in = 0;
int num_interrupt_out = 0;
int num_bulk_in = 0;
@@ -906,38 +914,41 @@ int usb_serial_probe(struct usb_interface *interface,
for (i = 0; i < num_bulk_in; ++i) {
endpoint = bulk_in_endpoint[i];
port = serial->port[i];
- port->read_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->read_urb) {
- dev_err(&interface->dev, "No free urbs available\n");
- goto probe_error;
- }
buffer_size = max_t(int, serial->type->bulk_in_size,
usb_endpoint_maxp(endpoint));
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
- port->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!port->bulk_in_buffer) {
- dev_err(&interface->dev,
+
+ for (j = 0; j < ARRAY_SIZE(port->read_urbs); ++j) {
+ set_bit(j, &port->read_urbs_free);
+ port->read_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
+ if (!port->read_urbs[j]) {
+ dev_err(&interface->dev,
+ "No free urbs available\n");
+ goto probe_error;
+ }
+ port->bulk_in_buffers[j] = kmalloc(buffer_size,
+ GFP_KERNEL);
+ if (!port->bulk_in_buffers[j]) {
+ dev_err(&interface->dev,
"Couldn't allocate bulk_in_buffer\n");
- goto probe_error;
- }
- usb_fill_bulk_urb(port->read_urb, dev,
- usb_rcvbulkpipe(dev,
+ goto probe_error;
+ }
+ usb_fill_bulk_urb(port->read_urbs[j], dev,
+ usb_rcvbulkpipe(dev,
endpoint->bEndpointAddress),
- port->bulk_in_buffer, buffer_size,
- serial->type->read_bulk_callback, port);
+ port->bulk_in_buffers[j], buffer_size,
+ serial->type->read_bulk_callback,
+ port);
+ }
+
+ port->read_urb = port->read_urbs[0];
+ port->bulk_in_buffer = port->bulk_in_buffers[0];
}
for (i = 0; i < num_bulk_out; ++i) {
- int j;
-
endpoint = bulk_out_endpoint[i];
port = serial->port[i];
- port->write_urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!port->write_urb) {
- dev_err(&interface->dev, "No free urbs available\n");
- goto probe_error;
- }
if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL))
goto probe_error;
buffer_size = serial->type->bulk_out_size;
@@ -945,17 +956,7 @@ int usb_serial_probe(struct usb_interface *interface,
buffer_size = usb_endpoint_maxp(endpoint);
port->bulk_out_size = buffer_size;
port->bulk_out_endpointAddress = endpoint->bEndpointAddress;
- port->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
- if (!port->bulk_out_buffer) {
- dev_err(&interface->dev,
- "Couldn't allocate bulk_out_buffer\n");
- goto probe_error;
- }
- usb_fill_bulk_urb(port->write_urb, dev,
- usb_sndbulkpipe(dev,
- endpoint->bEndpointAddress),
- port->bulk_out_buffer, buffer_size,
- serial->type->write_bulk_callback, port);
+
for (j = 0; j < ARRAY_SIZE(port->write_urbs); ++j) {
set_bit(j, &port->write_urbs_free);
port->write_urbs[j] = usb_alloc_urb(0, GFP_KERNEL);
@@ -978,6 +979,9 @@ int usb_serial_probe(struct usb_interface *interface,
serial->type->write_bulk_callback,
port);
}
+
+ port->write_urb = port->write_urbs[0];
+ port->bulk_out_buffer = port->bulk_out_buffers[0];
}
if (serial->type->read_int_callback) {
@@ -1196,7 +1200,7 @@ static const struct tty_operations serial_ops = {
.open = serial_open,
.close = serial_close,
.write = serial_write,
- .hangup = serial_hangup,
+ .hangup = serial_hangup,
.write_room = serial_write_room,
.ioctl = serial_ioctl,
.set_termios = serial_set_termios,
@@ -1206,9 +1210,9 @@ static const struct tty_operations serial_ops = {
.chars_in_buffer = serial_chars_in_buffer,
.tiocmget = serial_tiocmget,
.tiocmset = serial_tiocmset,
- .get_icount = serial_get_icount,
- .cleanup = serial_cleanup,
- .install = serial_install,
+ .get_icount = serial_get_icount,
+ .cleanup = serial_cleanup,
+ .install = serial_install,
.proc_fops = &serial_proc_fops,
};
@@ -1237,7 +1241,7 @@ static int __init usb_serial_init(void)
usb_serial_tty_driver->owner = THIS_MODULE;
usb_serial_tty_driver->driver_name = "usbserial";
- usb_serial_tty_driver->name = "ttyUSB";
+ usb_serial_tty_driver->name = "ttyUSB";
usb_serial_tty_driver->major = SERIAL_TTY_MAJOR;
usb_serial_tty_driver->minor_start = 0;
usb_serial_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
@@ -1336,7 +1340,6 @@ static void fixup_generic(struct usb_serial_driver *device)
int usb_serial_register(struct usb_serial_driver *driver)
{
- /* must be called with BKL held */
int retval;
if (usb_disabled())
@@ -1374,7 +1377,6 @@ EXPORT_SYMBOL_GPL(usb_serial_register);
void usb_serial_deregister(struct usb_serial_driver *device)
{
- /* must be called with BKL held */
printk(KERN_INFO "USB Serial deregistering driver %s\n",
device->description);
mutex_lock(&table_lock);
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 95a8214..9b632e7 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -40,7 +40,7 @@ static struct usb_driver debug_driver = {
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table,
- .no_dynamic_id = 1,
+ .no_dynamic_id = 1,
};
/* This HW really does not support a serial break, so one will be
@@ -54,19 +54,18 @@ static void usb_debug_break_ctl(struct tty_struct *tty, int break_state)
usb_serial_generic_write(tty, port, USB_DEBUG_BRK, USB_DEBUG_BRK_SIZE);
}
-static void usb_debug_read_bulk_callback(struct urb *urb)
+static void usb_debug_process_read_urb(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
if (urb->actual_length == USB_DEBUG_BRK_SIZE &&
- memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
- USB_DEBUG_BRK_SIZE) == 0) {
+ memcmp(urb->transfer_buffer, USB_DEBUG_BRK,
+ USB_DEBUG_BRK_SIZE) == 0) {
usb_serial_handle_break(port);
- usb_serial_generic_submit_read_urb(port, GFP_ATOMIC);
return;
}
- usb_serial_generic_read_bulk_callback(urb);
+ usb_serial_generic_process_read_urb(urb);
}
static struct usb_serial_driver debug_device = {
@@ -79,7 +78,7 @@ static struct usb_serial_driver debug_device = {
.num_ports = 1,
.bulk_out_size = USB_DEBUG_MAX_PACKET_SIZE,
.break_ctl = usb_debug_break_ctl,
- .read_bulk_callback = usb_debug_read_bulk_callback,
+ .process_read_urb = usb_debug_process_read_urb,
};
static int __init debug_init(void)
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index d555ca9..c88657d 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -37,7 +37,7 @@
#include <linux/serial.h>
#include "usb-wwan.h"
-static int debug;
+static bool debug;
void usb_wwan_dtr_rts(struct usb_serial_port *port, int on)
{
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 1c11959..210e4b1 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -52,7 +52,7 @@ static int palm_os_4_probe(struct usb_serial *serial,
const struct usb_device_id *id);
/* Parameters that may be passed into the module. */
-static int debug;
+static bool debug;
static __u16 vendor;
static __u16 product;
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 5b073bc..7e0acf5 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -14,57 +14,6 @@
*
* See Documentation/usb/usb-serial.txt for more information on using this
* driver
- *
- * (10/09/2002) Stuart MacDonald (stuartm@connecttech.com)
- * Upgrade to full working driver
- *
- * (05/30/2001) gkh
- * switched from using spinlock to a semaphore, which fixes lots of
- * problems.
- *
- * (04/08/2001) gb
- * Identify version on module load.
- *
- * 2001_Mar_19 gkh
- * Fixed MOD_INC and MOD_DEC logic, the ability to open a port more
- * than once, and the got the proper usb_device_id table entries so
- * the driver works again.
- *
- * (11/01/2000) Adam J. Richter
- * usb_device_id table support
- *
- * (10/05/2000) gkh
- * Fixed bug with urb->dev not being set properly, now that the usb
- * core needs it.
- *
- * (10/03/2000) smd
- * firmware is improved to guard against crap sent to device
- * firmware now replies CMD_FAILURE on bad things
- * read_callback fix you provided for private info struct
- * command_finished now indicates success or fail
- * setup_port struct now packed to avoid gcc padding
- * firmware uses 1 based port numbering, driver now handles that
- *
- * (09/11/2000) gkh
- * Removed DEBUG #ifdefs with call to usb_serial_debug_data
- *
- * (07/19/2000) gkh
- * Added module_init and module_exit functions to handle the fact that this
- * driver is a loadable module now.
- * Fixed bug with port->minor that was found by Al Borchers
- *
- * (07/04/2000) gkh
- * Added support for port settings. Baud rate can now be changed. Line
- * signals are not transferred to and from the tty layer yet, but things
- * seem to be working well now.
- *
- * (05/04/2000) gkh
- * First cut at open and close commands. Data can flow through the ports at
- * default speeds now.
- *
- * (03/26/2000) gkh
- * Split driver up into device specific pieces.
- *
*/
#include <linux/kernel.h>
@@ -87,7 +36,7 @@
#include <linux/ihex.h>
#include "whiteheat.h" /* WhiteHEAT specific commands */
-static int debug;
+static bool debug;
#ifndef CMSPAR
#define CMSPAR 0
@@ -753,7 +702,6 @@ static void whiteheat_close(struct usb_serial_port *port)
static int whiteheat_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count)
{
- struct usb_serial *serial = port->serial;
struct whiteheat_private *info = usb_get_serial_port_data(port);
struct whiteheat_urb_wrap *wrap;
struct urb *urb;
@@ -789,7 +737,6 @@ static int whiteheat_write(struct tty_struct *tty,
usb_serial_debug_data(debug, &port->dev,
__func__, bytes, urb->transfer_buffer);
- urb->dev = serial->dev;
urb->transfer_buffer_length = bytes;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
@@ -1035,7 +982,6 @@ static void command_port_read_callback(struct urb *urb)
dbg("%s - bad reply from firmware", __func__);
/* Continue trying to always read */
- command_port->read_urb->dev = command_port->serial->dev;
result = usb_submit_urb(command_port->read_urb, GFP_ATOMIC);
if (result)
dbg("%s - failed resubmitting read urb, error %d",
@@ -1141,7 +1087,6 @@ static int firm_send_command(struct usb_serial_port *port, __u8 command,
transfer_buffer[0] = command;
memcpy(&transfer_buffer[1], data, datasize);
command_port->write_urb->transfer_buffer_length = datasize + 1;
- command_port->write_urb->dev = port->serial->dev;
retval = usb_submit_urb(command_port->write_urb, GFP_NOIO);
if (retval) {
dbg("%s - submit urb failed", __func__);
@@ -1362,7 +1307,6 @@ static int start_command_port(struct usb_serial *serial)
/* Work around HCD bugs */
usb_clear_halt(serial->dev, command_port->read_urb->pipe);
- command_port->read_urb->dev = serial->dev;
retval = usb_submit_urb(command_port->read_urb, GFP_KERNEL);
if (retval) {
dev_err(&serial->dev->dev,
@@ -1410,7 +1354,6 @@ static int start_port_read(struct usb_serial_port *port)
list_del(tmp);
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- urb->dev = port->serial->dev;
spin_unlock_irqrestore(&info->lock, flags);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval) {
@@ -1490,7 +1433,6 @@ static void rx_data_softint(struct work_struct *work)
sent += tty_insert_flip_string(tty,
urb->transfer_buffer, urb->actual_length);
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev,
diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c
index 42d0eae..51af2fe 100644
--- a/drivers/usb/storage/alauda.c
+++ b/drivers/usb/storage/alauda.c
@@ -139,7 +139,7 @@ static int init_alauda(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id alauda_usb_ids[] = {
+static struct usb_device_id alauda_usb_ids[] = {
# include "unusual_alauda.h"
{ } /* Terminating entry */
};
@@ -1278,15 +1278,4 @@ static struct usb_driver alauda_driver = {
.soft_unbind = 1,
};
-static int __init alauda_init(void)
-{
- return usb_register(&alauda_driver);
-}
-
-static void __exit alauda_exit(void)
-{
- usb_deregister(&alauda_driver);
-}
-
-module_init(alauda_init);
-module_exit(alauda_exit);
+module_usb_driver(alauda_driver);
diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
index c844718..387cbd47 100644
--- a/drivers/usb/storage/cypress_atacb.c
+++ b/drivers/usb/storage/cypress_atacb.c
@@ -43,7 +43,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id cypress_usb_ids[] = {
+static struct usb_device_id cypress_usb_ids[] = {
# include "unusual_cypress.h"
{ } /* Terminating entry */
};
@@ -274,15 +274,4 @@ static struct usb_driver cypress_driver = {
.soft_unbind = 1,
};
-static int __init cypress_init(void)
-{
- return usb_register(&cypress_driver);
-}
-
-static void __exit cypress_exit(void)
-{
- usb_deregister(&cypress_driver);
-}
-
-module_init(cypress_init);
-module_exit(cypress_exit);
+module_usb_driver(cypress_driver);
diff --git a/drivers/usb/storage/datafab.c b/drivers/usb/storage/datafab.c
index ded836b..15d41f2 100644
--- a/drivers/usb/storage/datafab.c
+++ b/drivers/usb/storage/datafab.c
@@ -88,7 +88,7 @@ static int datafab_determine_lun(struct us_data *us,
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id datafab_usb_ids[] = {
+static struct usb_device_id datafab_usb_ids[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
@@ -753,15 +753,4 @@ static struct usb_driver datafab_driver = {
.soft_unbind = 1,
};
-static int __init datafab_init(void)
-{
- return usb_register(&datafab_driver);
-}
-
-static void __exit datafab_exit(void)
-{
- usb_deregister(&datafab_driver);
-}
-
-module_init(datafab_init);
-module_exit(datafab_exit);
+module_usb_driver(datafab_driver);
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index 9fbe742..a6ade40 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -42,7 +42,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id ene_ub6250_usb_ids[] = {
+static struct usb_device_id ene_ub6250_usb_ids[] = {
# include "unusual_ene_ub6250.h"
{ } /* Terminating entry */
};
@@ -607,8 +607,8 @@ static int sd_scsi_mode_sense(struct us_data *us, struct scsi_cmnd *srb)
static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
{
- u32 bl_num;
- u16 bl_len;
+ u32 bl_num;
+ u32 bl_len;
unsigned int offset = 0;
unsigned char buf[8];
struct scatterlist *sg = NULL;
@@ -622,7 +622,7 @@ static int sd_scsi_read_capacity(struct us_data *us, struct scsi_cmnd *srb)
else
bl_num = (info->HC_C_SIZE + 1) * 1024 - 1;
} else {
- bl_len = 1<<(info->SD_READ_BL_LEN);
+ bl_len = 1 << (info->SD_READ_BL_LEN);
bl_num = info->SD_Block_Mult * (info->SD_C_SIZE + 1)
* (1 << (info->SD_C_SIZE_MULT + 2)) - 1;
}
@@ -777,7 +777,7 @@ static int ms_lib_free_logicalmap(struct us_data *us)
return 0;
}
-int ms_lib_alloc_logicalmap(struct us_data *us)
+static int ms_lib_alloc_logicalmap(struct us_data *us)
{
u32 i;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
@@ -2248,7 +2248,7 @@ static int sd_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
/*
* ms_scsi_irp()
*/
-int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
+static int ms_scsi_irp(struct us_data *us, struct scsi_cmnd *srb)
{
int result;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)us->extra;
@@ -2409,15 +2409,4 @@ static struct usb_driver ene_ub6250_driver = {
.soft_unbind = 1,
};
-static int __init ene_ub6250_init(void)
-{
- return usb_register(&ene_ub6250_driver);
-}
-
-static void __exit ene_ub6250_exit(void)
-{
- usb_deregister(&ene_ub6250_driver);
-}
-
-module_init(ene_ub6250_init);
-module_exit(ene_ub6250_exit);
+module_usb_driver(ene_ub6250_driver);
diff --git a/drivers/usb/storage/freecom.c b/drivers/usb/storage/freecom.c
index 6542ca4..fa16157 100644
--- a/drivers/usb/storage/freecom.c
+++ b/drivers/usb/storage/freecom.c
@@ -119,7 +119,7 @@ static int init_freecom(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id freecom_usb_ids[] = {
+static struct usb_device_id freecom_usb_ids[] = {
# include "unusual_freecom.h"
{ } /* Terminating entry */
};
@@ -555,15 +555,4 @@ static struct usb_driver freecom_driver = {
.soft_unbind = 1,
};
-static int __init freecom_init(void)
-{
- return usb_register(&freecom_driver);
-}
-
-static void __exit freecom_exit(void)
-{
- usb_deregister(&freecom_driver);
-}
-
-module_init(freecom_init);
-module_exit(freecom_exit);
+module_usb_driver(freecom_driver);
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index ffc4193..bd55027 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -61,7 +61,7 @@
#include "scsiglue.h"
MODULE_DESCRIPTION("Driver for In-System Design, Inc. ISD200 ASIC");
-MODULE_AUTHOR("Bjrn Stenberg <bjorn@haxx.se>");
+MODULE_AUTHOR("Björn Stenberg <bjorn@haxx.se>");
MODULE_LICENSE("GPL");
static int isd200_Initialization(struct us_data *us);
@@ -76,7 +76,7 @@ static int isd200_Initialization(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id isd200_usb_ids[] = {
+static struct usb_device_id isd200_usb_ids[] = {
# include "unusual_isd200.h"
{ } /* Terminating entry */
};
@@ -1568,15 +1568,4 @@ static struct usb_driver isd200_driver = {
.soft_unbind = 1,
};
-static int __init isd200_init(void)
-{
- return usb_register(&isd200_driver);
-}
-
-static void __exit isd200_exit(void)
-{
- usb_deregister(&isd200_driver);
-}
-
-module_init(isd200_init);
-module_exit(isd200_exit);
+module_usb_driver(isd200_driver);
diff --git a/drivers/usb/storage/jumpshot.c b/drivers/usb/storage/jumpshot.c
index 6168596..a19211b 100644
--- a/drivers/usb/storage/jumpshot.c
+++ b/drivers/usb/storage/jumpshot.c
@@ -71,7 +71,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id jumpshot_usb_ids[] = {
+static struct usb_device_id jumpshot_usb_ids[] = {
# include "unusual_jumpshot.h"
{ } /* Terminating entry */
};
@@ -679,15 +679,4 @@ static struct usb_driver jumpshot_driver = {
.soft_unbind = 1,
};
-static int __init jumpshot_init(void)
-{
- return usb_register(&jumpshot_driver);
-}
-
-static void __exit jumpshot_exit(void)
-{
- usb_deregister(&jumpshot_driver);
-}
-
-module_init(jumpshot_init);
-module_exit(jumpshot_exit);
+module_usb_driver(jumpshot_driver);
diff --git a/drivers/usb/storage/karma.c b/drivers/usb/storage/karma.c
index ba1b789..e720f8e 100644
--- a/drivers/usb/storage/karma.c
+++ b/drivers/usb/storage/karma.c
@@ -59,7 +59,7 @@ static int rio_karma_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id karma_usb_ids[] = {
+static struct usb_device_id karma_usb_ids[] = {
# include "unusual_karma.h"
{ } /* Terminating entry */
};
@@ -232,15 +232,4 @@ static struct usb_driver karma_driver = {
.soft_unbind = 1,
};
-static int __init karma_init(void)
-{
- return usb_register(&karma_driver);
-}
-
-static void __exit karma_exit(void)
-{
- usb_deregister(&karma_driver);
-}
-
-module_init(karma_init);
-module_exit(karma_exit);
+module_usb_driver(karma_driver);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 1943be5..d75155c 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -69,7 +69,7 @@ struct usb_onetouch {
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id onetouch_usb_ids[] = {
+static struct usb_device_id onetouch_usb_ids[] = {
# include "unusual_onetouch.h"
{ } /* Terminating entry */
};
@@ -314,15 +314,4 @@ static struct usb_driver onetouch_driver = {
.soft_unbind = 1,
};
-static int __init onetouch_init(void)
-{
- return usb_register(&onetouch_driver);
-}
-
-static void __exit onetouch_exit(void)
-{
- usb_deregister(&onetouch_driver);
-}
-
-module_init(onetouch_init);
-module_exit(onetouch_exit);
+module_usb_driver(onetouch_driver);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 0ce5f79..d32f720 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -398,10 +398,9 @@ static int rts51x_write_mem(struct us_data *us, u16 addr, u8 *data, u16 len)
u8 cmnd[12] = { 0 };
u8 *buf;
- buf = kmalloc(len, GFP_NOIO);
+ buf = kmemdup(data, len, GFP_NOIO);
if (buf == NULL)
return USB_STOR_TRANSPORT_ERROR;
- memcpy(buf, data, len);
US_DEBUGP("%s, addr = 0x%x, len = %d\n", __func__, addr, len);
@@ -507,15 +506,14 @@ static int enable_oscillator(struct us_data *us)
static int __do_config_autodelink(struct us_data *us, u8 *data, u16 len)
{
int retval;
- u16 addr = 0xFE47;
u8 cmnd[12] = {0};
- US_DEBUGP("%s, addr = 0x%x, len = %d\n", __FUNCTION__, addr, len);
+ US_DEBUGP("%s, addr = 0xfe47, len = %d\n", __FUNCTION__, len);
cmnd[0] = 0xF0;
cmnd[1] = 0x0E;
- cmnd[2] = (u8)(addr >> 8);
- cmnd[3] = (u8)addr;
+ cmnd[2] = 0xfe;
+ cmnd[3] = 0x47;
cmnd[4] = (u8)(len >> 8);
cmnd[5] = (u8)len;
@@ -791,7 +789,7 @@ static void rts51x_suspend_timer_fn(unsigned long data)
rts51x_set_stat(chip, RTS51X_STAT_SS);
/* ignore mass storage interface's children */
pm_suspend_ignore_children(&us->pusb_intf->dev, true);
- usb_autopm_put_interface(us->pusb_intf);
+ usb_autopm_put_interface_async(us->pusb_intf);
US_DEBUGP("%s: RTS51X_STAT_SS 01,"
"intf->pm_usage_cnt:%d, power.usage:%d\n",
__func__,
@@ -818,7 +816,7 @@ static inline int working_scsi(struct scsi_cmnd *srb)
return 1;
}
-void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
+static void rts51x_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
static int card_first_show = 1;
@@ -977,7 +975,7 @@ static void realtek_cr_destructor(void *extra)
}
#ifdef CONFIG_PM
-int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
+static int realtek_cr_suspend(struct usb_interface *iface, pm_message_t message)
{
struct us_data *us = usb_get_intfdata(iface);
@@ -1104,15 +1102,4 @@ static struct usb_driver realtek_cr_driver = {
.supports_autosuspend = 1,
};
-static int __init realtek_cr_init(void)
-{
- return usb_register(&realtek_cr_driver);
-}
-
-static void __exit realtek_cr_exit(void)
-{
- usb_deregister(&realtek_cr_driver);
-}
-
-module_init(realtek_cr_init);
-module_exit(realtek_cr_exit);
+module_usb_driver(realtek_cr_driver);
diff --git a/drivers/usb/storage/sddr09.c b/drivers/usb/storage/sddr09.c
index bcb9a70..425df7d 100644
--- a/drivers/usb/storage/sddr09.c
+++ b/drivers/usb/storage/sddr09.c
@@ -71,7 +71,7 @@ static int usb_stor_sddr09_init(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id sddr09_usb_ids[] = {
+static struct usb_device_id sddr09_usb_ids[] = {
# include "unusual_sddr09.h"
{ } /* Terminating entry */
};
@@ -1789,15 +1789,4 @@ static struct usb_driver sddr09_driver = {
.soft_unbind = 1,
};
-static int __init sddr09_init(void)
-{
- return usb_register(&sddr09_driver);
-}
-
-static void __exit sddr09_exit(void)
-{
- usb_deregister(&sddr09_driver);
-}
-
-module_init(sddr09_init);
-module_exit(sddr09_exit);
+module_usb_driver(sddr09_driver);
diff --git a/drivers/usb/storage/sddr55.c b/drivers/usb/storage/sddr55.c
index 44dfed7..e4ca5fc 100644
--- a/drivers/usb/storage/sddr55.c
+++ b/drivers/usb/storage/sddr55.c
@@ -48,7 +48,7 @@ MODULE_LICENSE("GPL");
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id sddr55_usb_ids[] = {
+static struct usb_device_id sddr55_usb_ids[] = {
# include "unusual_sddr55.h"
{ } /* Terminating entry */
};
@@ -1008,15 +1008,4 @@ static struct usb_driver sddr55_driver = {
.soft_unbind = 1,
};
-static int __init sddr55_init(void)
-{
- return usb_register(&sddr55_driver);
-}
-
-static void __exit sddr55_exit(void)
-{
- usb_deregister(&sddr55_driver);
-}
-
-module_init(sddr55_init);
-module_exit(sddr55_exit);
+module_usb_driver(sddr55_driver);
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index 0b00091..1369d25 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -170,7 +170,7 @@ static int init_usbat_flash(struct us_data *us);
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
-struct usb_device_id usbat_usb_ids[] = {
+static struct usb_device_id usbat_usb_ids[] = {
# include "unusual_usbat.h"
{ } /* Terminating entry */
};
@@ -1865,15 +1865,4 @@ static struct usb_driver usbat_driver = {
.soft_unbind = 1,
};
-static int __init usbat_init(void)
-{
- return usb_register(&usbat_driver);
-}
-
-static void __exit usbat_exit(void)
-{
- usb_deregister(&usbat_driver);
-}
-
-module_init(usbat_init);
-module_exit(usbat_exit);
+module_usb_driver(usbat_driver);
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 1d10d5b..a33ead5 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -760,18 +760,7 @@ static struct usb_driver uas_driver = {
.id_table = uas_usb_ids,
};
-static int uas_init(void)
-{
- return usb_register(&uas_driver);
-}
-
-static void uas_exit(void)
-{
- usb_deregister(&uas_driver);
-}
-
-module_init(uas_init);
-module_exit(uas_exit);
+module_usb_driver(uas_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Matthew Wilcox and Sarah Sharp");
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 24caba7..856ad92 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1914,7 +1914,7 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_INITIAL_READ10 ),
-/* Patch by Richard Schtz <r.schtz@t-online.de>
+/* Patch by Richard Schütz <r.schtz@t-online.de>
* This external hard drive enclosure uses a JMicron chip which
* needs the US_FL_IGNORE_RESIDUE flag to work properly. */
UNUSUAL_DEV( 0x1e68, 0x001b, 0x0000, 0x0000,
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c325e69..db51ba1 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -788,15 +788,19 @@ static void quiesce_and_remove_host(struct us_data *us)
struct Scsi_Host *host = us_to_host(us);
/* If the device is really gone, cut short reset delays */
- if (us->pusb_dev->state == USB_STATE_NOTATTACHED)
+ if (us->pusb_dev->state == USB_STATE_NOTATTACHED) {
set_bit(US_FLIDX_DISCONNECTING, &us->dflags);
+ wake_up(&us->delay_wait);
+ }
- /* Prevent SCSI-scanning (if it hasn't started yet)
- * and wait for the SCSI-scanning thread to stop.
+ /* Prevent SCSI scanning (if it hasn't started yet)
+ * or wait for the SCSI-scanning routine to stop.
*/
- set_bit(US_FLIDX_DONT_SCAN, &us->dflags);
- wake_up(&us->delay_wait);
- wait_for_completion(&us->scanning_done);
+ cancel_delayed_work_sync(&us->scan_dwork);
+
+ /* Balance autopm calls if scanning was cancelled */
+ if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags))
+ usb_autopm_put_interface_no_suspend(us->pusb_intf);
/* Removing the host will perform an orderly shutdown: caches
* synchronized, disks spun down, etc.
@@ -823,52 +827,28 @@ static void release_everything(struct us_data *us)
scsi_host_put(us_to_host(us));
}
-/* Thread to carry out delayed SCSI-device scanning */
-static int usb_stor_scan_thread(void * __us)
+/* Delayed-work routine to carry out SCSI-device scanning */
+static void usb_stor_scan_dwork(struct work_struct *work)
{
- struct us_data *us = (struct us_data *)__us;
+ struct us_data *us = container_of(work, struct us_data,
+ scan_dwork.work);
struct device *dev = &us->pusb_intf->dev;
- dev_dbg(dev, "device found\n");
+ dev_dbg(dev, "starting scan\n");
- set_freezable_with_signal();
- /*
- * Wait for the timeout to expire or for a disconnect
- *
- * We can't freeze in this thread or we risk causing khubd to
- * fail to freeze, but we can't be non-freezable either. Nor can
- * khubd freeze while waiting for scanning to complete as it may
- * hold the device lock, causing a hang when suspending devices.
- * So we request a fake signal when freezing and use
- * interruptible sleep to kick us out of our wait early when
- * freezing happens.
- */
- if (delay_use > 0) {
- dev_dbg(dev, "waiting for device to settle "
- "before scanning\n");
- wait_event_interruptible_timeout(us->delay_wait,
- test_bit(US_FLIDX_DONT_SCAN, &us->dflags),
- delay_use * HZ);
+ /* For bulk-only devices, determine the max LUN value */
+ if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) {
+ mutex_lock(&us->dev_mutex);
+ us->max_lun = usb_stor_Bulk_max_lun(us);
+ mutex_unlock(&us->dev_mutex);
}
+ scsi_scan_host(us_to_host(us));
+ dev_dbg(dev, "scan complete\n");
- /* If the device is still connected, perform the scanning */
- if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) {
-
- /* For bulk-only devices, determine the max LUN value */
- if (us->protocol == USB_PR_BULK &&
- !(us->fflags & US_FL_SINGLE_LUN)) {
- mutex_lock(&us->dev_mutex);
- us->max_lun = usb_stor_Bulk_max_lun(us);
- mutex_unlock(&us->dev_mutex);
- }
- scsi_scan_host(us_to_host(us));
- dev_dbg(dev, "scan complete\n");
-
- /* Should we unbind if no devices were detected? */
- }
+ /* Should we unbind if no devices were detected? */
usb_autopm_put_interface(us->pusb_intf);
- complete_and_exit(&us->scanning_done, 0);
+ clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
}
static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf)
@@ -915,7 +895,7 @@ int usb_stor_probe1(struct us_data **pus,
init_completion(&us->cmnd_ready);
init_completion(&(us->notify));
init_waitqueue_head(&us->delay_wait);
- init_completion(&us->scanning_done);
+ INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork);
/* Associate the us_data structure with the USB device */
result = associate_dev(us, intf);
@@ -946,7 +926,6 @@ EXPORT_SYMBOL_GPL(usb_stor_probe1);
/* Second part of general USB mass-storage probing */
int usb_stor_probe2(struct us_data *us)
{
- struct task_struct *th;
int result;
struct device *dev = &us->pusb_intf->dev;
@@ -987,20 +966,14 @@ int usb_stor_probe2(struct us_data *us)
goto BadDevice;
}
- /* Start up the thread for delayed SCSI-device scanning */
- th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan");
- if (IS_ERR(th)) {
- dev_warn(dev,
- "Unable to start the device-scanning thread\n");
- complete(&us->scanning_done);
- quiesce_and_remove_host(us);
- result = PTR_ERR(th);
- goto BadDevice;
- }
-
+ /* Submit the delayed_work for SCSI-device scanning */
usb_autopm_get_interface_no_resume(us->pusb_intf);
- wake_up_process(th);
+ set_bit(US_FLIDX_SCAN_PENDING, &us->dflags);
+ if (delay_use > 0)
+ dev_dbg(dev, "waiting for device to settle before scanning\n");
+ queue_delayed_work(system_freezable_wq, &us->scan_dwork,
+ delay_use * HZ);
return 0;
/* We come here if there are any problems */
@@ -1073,6 +1046,7 @@ static struct usb_driver usb_storage_driver = {
.id_table = usb_storage_usb_ids,
.supports_autosuspend = 1,
.soft_unbind = 1,
+ .no_dynamic_id = 1,
};
static int __init usb_stor_init(void)
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 7b0f211..75f70f0 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -47,6 +47,7 @@
#include <linux/blkdev.h>
#include <linux/completion.h>
#include <linux/mutex.h>
+#include <linux/workqueue.h>
#include <scsi/scsi_host.h>
struct us_data;
@@ -72,7 +73,7 @@ struct us_unusual_dev {
#define US_FLIDX_DISCONNECTING 3 /* disconnect in progress */
#define US_FLIDX_RESETTING 4 /* device reset in progress */
#define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */
-#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */
+#define US_FLIDX_SCAN_PENDING 6 /* scanning not yet done */
#define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */
#define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */
@@ -147,8 +148,8 @@ struct us_data {
/* mutual exclusion and synchronization structures */
struct completion cmnd_ready; /* to sleep thread on */
struct completion notify; /* thread begin/end */
- wait_queue_head_t delay_wait; /* wait during scan, reset */
- struct completion scanning_done; /* wait for scan thread */
+ wait_queue_head_t delay_wait; /* wait during reset */
+ struct delayed_work scan_dwork; /* for async scanning */
/* subdriver information */
void *extra; /* Any extra data */
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index 32d6fc9..b4a7167 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -60,7 +60,6 @@ struct usb_skel {
__u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */
__u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */
int errors; /* the last request tanked */
- int open_count; /* count the number of openers */
bool ongoing_read; /* a read is going on */
bool processed_urb; /* indicates we haven't processed the urb */
spinlock_t err_lock; /* lock for errors */
@@ -113,22 +112,9 @@ static int skel_open(struct inode *inode, struct file *file)
* in resumption */
mutex_lock(&dev->io_mutex);
- if (!dev->open_count++) {
- retval = usb_autopm_get_interface(interface);
- if (retval) {
- dev->open_count--;
- mutex_unlock(&dev->io_mutex);
- kref_put(&dev->kref, skel_delete);
- goto exit;
- }
- } /* else { //uncomment this block if you want exclusive open
- retval = -EBUSY;
- dev->open_count--;
- mutex_unlock(&dev->io_mutex);
- kref_put(&dev->kref, skel_delete);
- goto exit;
- } */
- /* prevent the device from being autosuspended */
+ retval = usb_autopm_get_interface(interface);
+ if (retval)
+ goto out_err;
/* save our object in the file's private structure */
file->private_data = dev;
@@ -148,7 +134,7 @@ static int skel_release(struct inode *inode, struct file *file)
/* allow the device to be autosuspended */
mutex_lock(&dev->io_mutex);
- if (!--dev->open_count && dev->interface)
+ if (dev->interface)
usb_autopm_put_interface(dev->interface);
mutex_unlock(&dev->io_mutex);
@@ -688,25 +674,6 @@ static struct usb_driver skel_driver = {
.supports_autosuspend = 1,
};
-static int __init usb_skel_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&skel_driver);
- if (result)
- err("usb_register failed. Error number %d", result);
-
- return result;
-}
-
-static void __exit usb_skel_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&skel_driver);
-}
-
-module_init(usb_skel_init);
-module_exit(usb_skel_exit);
+module_usb_driver(skel_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/usb/wusbcore/Kconfig b/drivers/usb/wusbcore/Kconfig
index eb09a0a..f29fdd7 100644
--- a/drivers/usb/wusbcore/Kconfig
+++ b/drivers/usb/wusbcore/Kconfig
@@ -5,7 +5,8 @@ config USB_WUSB
tristate "Enable Wireless USB extensions (EXPERIMENTAL)"
depends on EXPERIMENTAL
depends on USB
- select UWB
+ depends on PCI
+ depends on UWB
select CRYPTO
select CRYPTO_BLKCIPHER
select CRYPTO_CBC
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 200fd7c..7f78f30 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -655,17 +655,7 @@ static struct usb_driver cbaf_driver = {
.disconnect = cbaf_disconnect,
};
-static int __init cbaf_driver_init(void)
-{
- return usb_register(&cbaf_driver);
-}
-module_init(cbaf_driver_init);
-
-static void __exit cbaf_driver_exit(void)
-{
- usb_deregister(&cbaf_driver);
-}
-module_exit(cbaf_driver_exit);
+module_usb_driver(cbaf_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Wireless USB Cable Based Association");
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 371f617..fa810a8 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -354,7 +354,7 @@ int wusb_dev_4way_handshake(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev,
struct wusb_keydvt_in keydvt_in;
struct wusb_keydvt_out keydvt_out;
- hs = kzalloc(3*sizeof(hs[0]), GFP_KERNEL);
+ hs = kcalloc(3, sizeof(hs[0]), GFP_KERNEL);
if (hs == NULL) {
dev_err(dev, "can't allocate handshake data\n");
goto error_kzalloc;
diff --git a/drivers/uwb/est.c b/drivers/uwb/est.c
index de81ebf..86ed7e6 100644
--- a/drivers/uwb/est.c
+++ b/drivers/uwb/est.c
@@ -184,7 +184,7 @@ int uwb_est_create(void)
uwb_est_size = 2;
uwb_est_used = 0;
- uwb_est = kzalloc(uwb_est_size * sizeof(uwb_est[0]), GFP_KERNEL);
+ uwb_est = kcalloc(uwb_est_size, sizeof(uwb_est[0]), GFP_KERNEL);
if (uwb_est == NULL)
return -ENOMEM;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 2babcd4..66797e9 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -914,17 +914,7 @@ static struct usb_driver hwarc_driver = {
.post_reset = hwarc_post_reset,
};
-static int __init hwarc_driver_init(void)
-{
- return usb_register(&hwarc_driver);
-}
-module_init(hwarc_driver_init);
-
-static void __exit hwarc_driver_exit(void)
-{
- usb_deregister(&hwarc_driver);
-}
-module_exit(hwarc_driver_exit);
+module_usb_driver(hwarc_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Host Wireless Adapter Radio Control Driver");
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index ba86643..2bfc846 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -104,7 +104,7 @@ void i1480_usb_destroy(struct i1480_usb *i1480_usb)
*
* Data buffers to USB cannot be on the stack or in vmalloc'ed areas,
* so we copy it to the local i1480 buffer before proceeding. In any
- * case, we have a max size we can send, soooo.
+ * case, we have a max size we can send.
*/
static
int i1480_usb_write(struct i1480 *i1480, u32 memory_address,
@@ -451,25 +451,7 @@ static struct usb_driver i1480_dfu_driver = {
.disconnect = NULL,
};
-
-/*
- * Initialize the i1480 DFU driver.
- *
- * We also need to register our function for guessing event sizes.
- */
-static int __init i1480_dfu_driver_init(void)
-{
- return usb_register(&i1480_dfu_driver);
-}
-module_init(i1480_dfu_driver_init);
-
-
-static void __exit i1480_dfu_driver_exit(void)
-{
- usb_deregister(&i1480_dfu_driver);
-}
-module_exit(i1480_dfu_driver_exit);
-
+module_usb_driver(i1480_dfu_driver);
MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>");
MODULE_DESCRIPTION("Intel Wireless UWB Link 1480 firmware uploader for USB");
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 882a51f..9dab1f5 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -856,9 +856,9 @@ static const struct file_operations vhost_net_fops = {
};
static struct miscdevice vhost_net_misc = {
- MISC_DYNAMIC_MINOR,
- "vhost-net",
- &vhost_net_fops,
+ .minor = VHOST_NET_MINOR,
+ .name = "vhost-net",
+ .fops = &vhost_net_fops,
};
static int vhost_net_init(void)
@@ -879,3 +879,5 @@ MODULE_VERSION("0.0.1");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Michael S. Tsirkin");
MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
+MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
+MODULE_ALIAS("devname:vhost-net");
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d83e967..6ca0c40 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -1763,16 +1763,16 @@ config FB_AU1100
au1100fb:panel=<name>.
config FB_AU1200
- bool "Au1200 LCD Driver"
+ bool "Au1200/Au1300 LCD Driver"
depends on (FB = y) && MIPS_ALCHEMY
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select FB_SYS_FOPS
help
- This is the framebuffer driver for the AMD Au1200 SOC. It can drive
- various panels and CRTs by passing in kernel cmd line option
- au1200fb:panel=<name>.
+ This is the framebuffer driver for the Au1200/Au1300 SOCs.
+ It can drive various panels and CRTs by passing in kernel cmd line
+ option au1200fb:panel=<name>.
config FB_VT8500
bool "VT8500 LCD Driver"
@@ -2413,7 +2413,6 @@ source "drivers/video/omap/Kconfig"
source "drivers/video/omap2/Kconfig"
source "drivers/video/backlight/Kconfig"
-source "drivers/video/display/Kconfig"
if VT
source "drivers/video/console/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9b9d8ff..1426068 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -13,7 +13,7 @@ fb-objs := $(fb-y)
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_LOGO) += logo/
-obj-y += backlight/ display/
+obj-y += backlight/
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index 2cda6ba..0a2cce7 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -621,6 +621,8 @@ static struct amba_id clcdfb_id_table[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, clcdfb_id_table);
+
static struct amba_driver clcd_driver = {
.drv = {
.name = "clcd-pl11x",
diff --git a/drivers/video/amifb.c b/drivers/video/amifb.c
index 5ea6596..f23cae0 100644
--- a/drivers/video/amifb.c
+++ b/drivers/video/amifb.c
@@ -152,10 +152,10 @@
- hsstrt: Start of horizontal synchronization pulse
- hsstop: End of horizontal synchronization pulse
- - htotal: Last value on the line (i.e. line length = htotal+1)
+ - htotal: Last value on the line (i.e. line length = htotal + 1)
- vsstrt: Start of vertical synchronization pulse
- vsstop: End of vertical synchronization pulse
- - vtotal: Last line value (i.e. number of lines = vtotal+1)
+ - vtotal: Last line value (i.e. number of lines = vtotal + 1)
- hcenter: Start of vertical retrace for interlace
You can specify the blanking timings independently. Currently I just set
@@ -184,7 +184,7 @@
clock):
- diwstrt_h: Horizontal start of the visible window
- - diwstop_h: Horizontal stop+1(*) of the visible window
+ - diwstop_h: Horizontal stop + 1(*) of the visible window
- diwstrt_v: Vertical start of the visible window
- diwstop_v: Vertical stop of the visible window
- ddfstrt: Horizontal start of display DMA
@@ -193,7 +193,7 @@
Sprite positioning:
- - sprstrt_h: Horizontal start-4 of sprite
+ - sprstrt_h: Horizontal start - 4 of sprite
- sprstrt_v: Vertical start of sprite
(*) Even Commodore did it wrong in the AGA monitor drivers by not adding 1.
@@ -212,21 +212,21 @@
display parameters. Here's what I found out:
- ddfstrt and ddfstop are best aligned to 64 pixels.
- - the chipset needs 64+4 horizontal pixels after the DMA start before the
- first pixel is output, so diwstrt_h = ddfstrt+64+4 if you want to
- display the first pixel on the line too. Increase diwstrt_h for virtual
- screen panning.
+ - the chipset needs 64 + 4 horizontal pixels after the DMA start before
+ the first pixel is output, so diwstrt_h = ddfstrt + 64 + 4 if you want
+ to display the first pixel on the line too. Increase diwstrt_h for
+ virtual screen panning.
- the display DMA always fetches 64 pixels at a time (fmode = 3).
- - ddfstop is ddfstrt+#pixels-64.
- - diwstop_h = diwstrt_h+xres+1. Because of the additional 1 this can be 1
- more than htotal.
+ - ddfstop is ddfstrt+#pixels - 64.
+ - diwstop_h = diwstrt_h + xres + 1. Because of the additional 1 this can
+ be 1 more than htotal.
- hscroll simply adds a delay to the display output. Smooth horizontal
- panning needs an extra 64 pixels on the left to prefetch the pixels that
- `fall off' on the left.
+ panning needs an extra 64 pixels on the left to prefetch the pixels that
+ `fall off' on the left.
- if ddfstrt < 192, the sprite DMA cycles are all stolen by the bitplane
- DMA, so it's best to make the DMA start as late as possible.
+ DMA, so it's best to make the DMA start as late as possible.
- you really don't want to make ddfstrt < 128, since this will steal DMA
- cycles from the other DMA channels (audio, floppy and Chip RAM refresh).
+ cycles from the other DMA channels (audio, floppy and Chip RAM refresh).
- I make diwstop_h and diwstop_v as large as possible.
General dependencies
@@ -234,8 +234,8 @@
- all values are SHRES pixel (35ns)
- table 1:fetchstart table 2:prefetch table 3:fetchsize
- ------------------ ---------------- -----------------
+ table 1:fetchstart table 2:prefetch table 3:fetchsize
+ ------------------ ---------------- -----------------
Pixclock # SHRES|HIRES|LORES # SHRES|HIRES|LORES # SHRES|HIRES|LORES
-------------#------+-----+------#------+-----+------#------+-----+------
Bus width 1x # 16 | 32 | 64 # 16 | 32 | 64 # 64 | 64 | 64
@@ -245,21 +245,21 @@
- chipset needs 4 pixels before the first pixel is output
- ddfstrt must be aligned to fetchstart (table 1)
- chipset needs also prefetch (table 2) to get first pixel data, so
- ddfstrt = ((diwstrt_h-4) & -fetchstart) - prefetch
+ ddfstrt = ((diwstrt_h - 4) & -fetchstart) - prefetch
- for horizontal panning decrease diwstrt_h
- the length of a fetchline must be aligned to fetchsize (table 3)
- if fetchstart is smaller than fetchsize, then ddfstrt can a little bit
- moved to optimize use of dma (useful for OCS/ECS overscan displays)
- - ddfstop is ddfstrt+ddfsize-fetchsize
+ moved to optimize use of dma (useful for OCS/ECS overscan displays)
+ - ddfstop is ddfstrt + ddfsize - fetchsize
- If C= didn't change anything for AGA, then at following positions the
- dma bus is already used:
- ddfstrt < 48 -> memory refresh
- < 96 -> disk dma
- < 160 -> audio dma
- < 192 -> sprite 0 dma
- < 416 -> sprite dma (32 per sprite)
+ dma bus is already used:
+ ddfstrt < 48 -> memory refresh
+ < 96 -> disk dma
+ < 160 -> audio dma
+ < 192 -> sprite 0 dma
+ < 416 -> sprite dma (32 per sprite)
- in accordance with the hardware reference manual a hardware stop is at
- 192, but AGA (ECS?) can go below this.
+ 192, but AGA (ECS?) can go below this.
DMA priorities
--------------
@@ -269,7 +269,7 @@
the hardware cursor:
- if you want to start display DMA too early, you lose the ability to
- do smooth horizontal panning (xpanstep 1 -> 64).
+ do smooth horizontal panning (xpanstep 1 -> 64).
- if you want to go even further, you lose the hardware cursor too.
IMHO a hardware cursor is more important for X than horizontal scrolling,
@@ -286,8 +286,8 @@
Standard VGA timings
--------------------
- xres yres left right upper lower hsync vsync
- ---- ---- ---- ----- ----- ----- ----- -----
+ xres yres left right upper lower hsync vsync
+ ---- ---- ---- ----- ----- ----- ----- -----
80x25 720 400 27 45 35 12 108 2
80x30 720 480 27 45 30 9 108 2
@@ -297,8 +297,8 @@
As a comparison, graphics/monitor.h suggests the following:
- xres yres left right upper lower hsync vsync
- ---- ---- ---- ----- ----- ----- ----- -----
+ xres yres left right upper lower hsync vsync
+ ---- ---- ---- ----- ----- ----- ----- -----
VGA 640 480 52 112 24 19 112 - 2 +
VGA70 640 400 52 112 27 21 112 - 2 -
@@ -309,10 +309,10 @@
VSYNC HSYNC Vertical size Vertical total
----- ----- ------------- --------------
- + + Reserved Reserved
- + - 400 414
- - + 350 362
- - - 480 496
+ + + Reserved Reserved
+ + - 400 414
+ - + 350 362
+ - - 480 496
Source: CL-GD542X Technical Reference Manual, Cirrus Logic, Oct 1992
@@ -326,33 +326,34 @@
-----------
- a scanline is 64 µs long, of which 52.48 µs are visible. This is about
- 736 visible 70 ns pixels per line.
+ 736 visible 70 ns pixels per line.
- we have 625 scanlines, of which 575 are visible (interlaced); after
- rounding this becomes 576.
+ rounding this becomes 576.
RETMA -> NTSC
-------------
- a scanline is 63.5 µs long, of which 53.5 µs are visible. This is about
- 736 visible 70 ns pixels per line.
+ 736 visible 70 ns pixels per line.
- we have 525 scanlines, of which 485 are visible (interlaced); after
- rounding this becomes 484.
+ rounding this becomes 484.
Thus if you want a PAL compatible display, you have to do the following:
- set the FB_SYNC_BROADCAST flag to indicate that standard broadcast
- timings are to be used.
- - make sure upper_margin+yres+lower_margin+vsync_len = 625 for an
- interlaced, 312 for a non-interlaced and 156 for a doublescanned
- display.
- - make sure left_margin+xres+right_margin+hsync_len = 1816 for a SHRES,
- 908 for a HIRES and 454 for a LORES display.
+ timings are to be used.
+ - make sure upper_margin + yres + lower_margin + vsync_len = 625 for an
+ interlaced, 312 for a non-interlaced and 156 for a doublescanned
+ display.
+ - make sure left_margin + xres + right_margin + hsync_len = 1816 for a
+ SHRES, 908 for a HIRES and 454 for a LORES display.
- the left visible part begins at 360 (SHRES; HIRES:180, LORES:90),
- left_margin+2*hsync_len must be greater or equal.
+ left_margin + 2 * hsync_len must be greater or equal.
- the upper visible part begins at 48 (interlaced; non-interlaced:24,
- doublescanned:12), upper_margin+2*vsync_len must be greater or equal.
+ doublescanned:12), upper_margin + 2 * vsync_len must be greater or
+ equal.
- ami_encode_var() calculates margins with a hsync of 5320 ns and a vsync
- of 4 scanlines
+ of 4 scanlines
The settings for a NTSC compatible display are straightforward.
@@ -361,7 +362,7 @@
anything about horizontal/vertical synchronization nor refresh rates.
- -- Geert --
+ -- Geert --
*******************************************************************************/
@@ -540,45 +541,45 @@ static u_short maxfmode, chipset;
* Various macros
*/
-#define up2(v) (((v)+1) & -2)
+#define up2(v) (((v) + 1) & -2)
#define down2(v) ((v) & -2)
#define div2(v) ((v)>>1)
#define mod2(v) ((v) & 1)
-#define up4(v) (((v)+3) & -4)
+#define up4(v) (((v) + 3) & -4)
#define down4(v) ((v) & -4)
-#define mul4(v) ((v)<<2)
+#define mul4(v) ((v) << 2)
#define div4(v) ((v)>>2)
#define mod4(v) ((v) & 3)
-#define up8(v) (((v)+7) & -8)
+#define up8(v) (((v) + 7) & -8)
#define down8(v) ((v) & -8)
#define div8(v) ((v)>>3)
#define mod8(v) ((v) & 7)
-#define up16(v) (((v)+15) & -16)
+#define up16(v) (((v) + 15) & -16)
#define down16(v) ((v) & -16)
#define div16(v) ((v)>>4)
#define mod16(v) ((v) & 15)
-#define up32(v) (((v)+31) & -32)
+#define up32(v) (((v) + 31) & -32)
#define down32(v) ((v) & -32)
#define div32(v) ((v)>>5)
#define mod32(v) ((v) & 31)
-#define up64(v) (((v)+63) & -64)
+#define up64(v) (((v) + 63) & -64)
#define down64(v) ((v) & -64)
#define div64(v) ((v)>>6)
#define mod64(v) ((v) & 63)
-#define upx(x,v) (((v)+(x)-1) & -(x))
-#define downx(x,v) ((v) & -(x))
-#define modx(x,v) ((v) & ((x)-1))
+#define upx(x, v) (((v) + (x) - 1) & -(x))
+#define downx(x, v) ((v) & -(x))
+#define modx(x, v) ((v) & ((x) - 1))
/* if x1 is not a constant, this macro won't make real sense :-) */
#ifdef __mc68000__
#define DIVUL(x1, x2) ({int res; asm("divul %1,%2,%3": "=d" (res): \
- "d" (x2), "d" ((long)((x1)/0x100000000ULL)), "0" ((long)(x1))); res;})
+ "d" (x2), "d" ((long)((x1) / 0x100000000ULL)), "0" ((long)(x1))); res;})
#else
/* We know a bit about the numbers, so we can do it this way */
#define DIVUL(x1, x2) ((((long)((unsigned long long)x1 >> 8) / x2) << 8) + \
@@ -607,7 +608,7 @@ static u_short maxfmode, chipset;
#define VIDEOMEMSIZE_ECS_1M (393216) /* ECS (1MB) : max 1024*768*16 */
#define VIDEOMEMSIZE_OCS (262144) /* OCS : max ca. 800*600*16 */
-#define SPRITEMEMSIZE (64*64/4) /* max 64*64*4 */
+#define SPRITEMEMSIZE (64 * 64 / 4) /* max 64*64*4 */
#define DUMMYSPRITEMEMSIZE (8)
static u_long spritememory;
@@ -634,9 +635,9 @@ static u_long min_fstrt = 192;
* Copper Instructions
*/
-#define CMOVE(val, reg) (CUSTOM_OFS(reg)<<16 | (val))
-#define CMOVE2(val, reg) ((CUSTOM_OFS(reg)+2)<<16 | (val))
-#define CWAIT(x, y) (((y) & 0x1fe)<<23 | ((x) & 0x7f0)<<13 | 0x0001fffe)
+#define CMOVE(val, reg) (CUSTOM_OFS(reg) << 16 | (val))
+#define CMOVE2(val, reg) ((CUSTOM_OFS(reg) + 2) << 16 | (val))
+#define CWAIT(x, y) (((y) & 0x1fe) << 23 | ((x) & 0x7f0) << 13 | 0x0001fffe)
#define CEND (0xfffffffe)
@@ -709,7 +710,7 @@ static u_short *lofsprite, *shfsprite, *dummysprite;
* Current Video Mode
*/
-static struct amifb_par {
+struct amifb_par {
/* General Values */
@@ -772,15 +773,6 @@ static struct amifb_par {
/* Additional AGA Hardware Registers */
u_short fmode; /* vmode */
-} currentpar;
-
-
-static struct fb_info fb_info = {
- .fix = {
- .id = "Amiga ",
- .visual = FB_VISUAL_PSEUDOCOLOR,
- .accel = FB_ACCEL_AMIGABLITT
- }
};
@@ -820,116 +812,123 @@ static u_short is_lace = 0; /* Screen is laced */
static struct fb_videomode ami_modedb[] __initdata = {
- /*
- * AmigaOS Video Modes
- *
- * If you change these, make sure to update DEFMODE_* as well!
- */
-
- {
- /* 640x200, 15 kHz, 60 Hz (NTSC) */
- "ntsc", 60, 640, 200, TAG_HIRES, 106, 86, 44, 16, 76, 2,
- FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 15 kHz, 60 Hz interlaced (NTSC) */
- "ntsc-lace", 60, 640, 400, TAG_HIRES, 106, 86, 88, 33, 76, 4,
- FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x256, 15 kHz, 50 Hz (PAL) */
- "pal", 50, 640, 256, TAG_HIRES, 106, 86, 40, 14, 76, 2,
- FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x512, 15 kHz, 50 Hz interlaced (PAL) */
- "pal-lace", 50, 640, 512, TAG_HIRES, 106, 86, 80, 29, 76, 4,
- FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x480, 29 kHz, 57 Hz */
- "multiscan", 57, 640, 480, TAG_SHRES, 96, 112, 29, 8, 72, 8,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x960, 29 kHz, 57 Hz interlaced */
- "multiscan-lace", 57, 640, 960, TAG_SHRES, 96, 112, 58, 16, 72, 16,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x200, 15 kHz, 72 Hz */
- "euro36", 72, 640, 200, TAG_HIRES, 92, 124, 6, 6, 52, 5,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 15 kHz, 72 Hz interlaced */
- "euro36-lace", 72, 640, 400, TAG_HIRES, 92, 124, 12, 12, 52, 10,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 29 kHz, 68 Hz */
- "euro72", 68, 640, 400, TAG_SHRES, 164, 92, 9, 9, 80, 8,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x800, 29 kHz, 68 Hz interlaced */
- "euro72-lace", 68, 640, 800, TAG_SHRES, 164, 92, 18, 18, 80, 16,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 800x300, 23 kHz, 70 Hz */
- "super72", 70, 800, 300, TAG_SHRES, 212, 140, 10, 11, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 800x600, 23 kHz, 70 Hz interlaced */
- "super72-lace", 70, 800, 600, TAG_SHRES, 212, 140, 20, 22, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x200, 27 kHz, 57 Hz doublescan */
- "dblntsc", 57, 640, 200, TAG_SHRES, 196, 124, 18, 17, 80, 4,
- 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
- }, {
- /* 640x400, 27 kHz, 57 Hz */
- "dblntsc-ff", 57, 640, 400, TAG_SHRES, 196, 124, 36, 35, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x800, 27 kHz, 57 Hz interlaced */
- "dblntsc-lace", 57, 640, 800, TAG_SHRES, 196, 124, 72, 70, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x256, 27 kHz, 47 Hz doublescan */
- "dblpal", 47, 640, 256, TAG_SHRES, 196, 124, 14, 13, 80, 4,
- 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
- }, {
- /* 640x512, 27 kHz, 47 Hz */
- "dblpal-ff", 47, 640, 512, TAG_SHRES, 196, 124, 28, 27, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x1024, 27 kHz, 47 Hz interlaced */
- "dblpal-lace", 47, 640, 1024, TAG_SHRES, 196, 124, 56, 54, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- },
-
- /*
- * VGA Video Modes
- */
-
- {
- /* 640x480, 31 kHz, 60 Hz (VGA) */
- "vga", 60, 640, 480, TAG_SHRES, 64, 96, 30, 9, 112, 2,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 31 kHz, 70 Hz (VGA) */
- "vga70", 70, 640, 400, TAG_SHRES, 64, 96, 35, 12, 112, 2,
- FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- },
+ /*
+ * AmigaOS Video Modes
+ *
+ * If you change these, make sure to update DEFMODE_* as well!
+ */
+
+ {
+ /* 640x200, 15 kHz, 60 Hz (NTSC) */
+ "ntsc", 60, 640, 200, TAG_HIRES, 106, 86, 44, 16, 76, 2,
+ FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 15 kHz, 60 Hz interlaced (NTSC) */
+ "ntsc-lace", 60, 640, 400, TAG_HIRES, 106, 86, 88, 33, 76, 4,
+ FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x256, 15 kHz, 50 Hz (PAL) */
+ "pal", 50, 640, 256, TAG_HIRES, 106, 86, 40, 14, 76, 2,
+ FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x512, 15 kHz, 50 Hz interlaced (PAL) */
+ "pal-lace", 50, 640, 512, TAG_HIRES, 106, 86, 80, 29, 76, 4,
+ FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x480, 29 kHz, 57 Hz */
+ "multiscan", 57, 640, 480, TAG_SHRES, 96, 112, 29, 8, 72, 8,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x960, 29 kHz, 57 Hz interlaced */
+ "multiscan-lace", 57, 640, 960, TAG_SHRES, 96, 112, 58, 16, 72,
+ 16,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x200, 15 kHz, 72 Hz */
+ "euro36", 72, 640, 200, TAG_HIRES, 92, 124, 6, 6, 52, 5,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 15 kHz, 72 Hz interlaced */
+ "euro36-lace", 72, 640, 400, TAG_HIRES, 92, 124, 12, 12, 52,
+ 10,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 29 kHz, 68 Hz */
+ "euro72", 68, 640, 400, TAG_SHRES, 164, 92, 9, 9, 80, 8,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x800, 29 kHz, 68 Hz interlaced */
+ "euro72-lace", 68, 640, 800, TAG_SHRES, 164, 92, 18, 18, 80,
+ 16,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 800x300, 23 kHz, 70 Hz */
+ "super72", 70, 800, 300, TAG_SHRES, 212, 140, 10, 11, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 800x600, 23 kHz, 70 Hz interlaced */
+ "super72-lace", 70, 800, 600, TAG_SHRES, 212, 140, 20, 22, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x200, 27 kHz, 57 Hz doublescan */
+ "dblntsc", 57, 640, 200, TAG_SHRES, 196, 124, 18, 17, 80, 4,
+ 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 27 kHz, 57 Hz */
+ "dblntsc-ff", 57, 640, 400, TAG_SHRES, 196, 124, 36, 35, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x800, 27 kHz, 57 Hz interlaced */
+ "dblntsc-lace", 57, 640, 800, TAG_SHRES, 196, 124, 72, 70, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x256, 27 kHz, 47 Hz doublescan */
+ "dblpal", 47, 640, 256, TAG_SHRES, 196, 124, 14, 13, 80, 4,
+ 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
+ }, {
+ /* 640x512, 27 kHz, 47 Hz */
+ "dblpal-ff", 47, 640, 512, TAG_SHRES, 196, 124, 28, 27, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x1024, 27 kHz, 47 Hz interlaced */
+ "dblpal-lace", 47, 640, 1024, TAG_SHRES, 196, 124, 56, 54, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ },
+
+ /*
+ * VGA Video Modes
+ */
+
+ {
+ /* 640x480, 31 kHz, 60 Hz (VGA) */
+ "vga", 60, 640, 480, TAG_SHRES, 64, 96, 30, 9, 112, 2,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 31 kHz, 70 Hz (VGA) */
+ "vga70", 70, 640, 400, TAG_SHRES, 64, 96, 35, 12, 112, 2,
+ FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ },
#if 0
- /*
- * A2024 video modes
- * These modes don't work yet because there's no A2024 driver.
- */
-
- {
- /* 1024x800, 10 Hz */
- "a2024-10", 10, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 1024x800, 15 Hz */
- "a2024-15", 15, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }
+ /*
+ * A2024 video modes
+ * These modes don't work yet because there's no A2024 driver.
+ */
+
+ {
+ /* 1024x800, 10 Hz */
+ "a2024-10", 10, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 1024x800, 15 Hz */
+ "a2024-15", 15, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }
#endif
};
@@ -953,6 +952,11 @@ static int round_down_bpp = 1; /* for mode probing */
static int amifb_ilbm = 0; /* interleaved or normal bitplanes */
static int amifb_inverse = 0;
+static u32 amifb_hfmin __initdata; /* monitor hfreq lower limit (Hz) */
+static u32 amifb_hfmax __initdata; /* monitor hfreq upper limit (Hz) */
+static u16 amifb_vfmin __initdata; /* monitor vfreq lower limit (Hz) */
+static u16 amifb_vfmax __initdata; /* monitor vfreq upper limit (Hz) */
+
/*
* Macros for the conversion from real world values to hardware register
@@ -992,19 +996,20 @@ static int amifb_inverse = 0;
/* bplcon1 (smooth scrolling) */
#define hscroll2hw(hscroll) \
- (((hscroll)<<12 & 0x3000) | ((hscroll)<<8 & 0xc300) | \
- ((hscroll)<<4 & 0x0c00) | ((hscroll)<<2 & 0x00f0) | ((hscroll)>>2 & 0x000f))
+ (((hscroll) << 12 & 0x3000) | ((hscroll) << 8 & 0xc300) | \
+ ((hscroll) << 4 & 0x0c00) | ((hscroll) << 2 & 0x00f0) | \
+ ((hscroll)>>2 & 0x000f))
/* diwstrt/diwstop/diwhigh (visible display window) */
#define diwstrt2hw(diwstrt_h, diwstrt_v) \
- (((diwstrt_v)<<7 & 0xff00) | ((diwstrt_h)>>2 & 0x00ff))
+ (((diwstrt_v) << 7 & 0xff00) | ((diwstrt_h)>>2 & 0x00ff))
#define diwstop2hw(diwstop_h, diwstop_v) \
- (((diwstop_v)<<7 & 0xff00) | ((diwstop_h)>>2 & 0x00ff))
+ (((diwstop_v) << 7 & 0xff00) | ((diwstop_h)>>2 & 0x00ff))
#define diwhigh2hw(diwstrt_h, diwstrt_v, diwstop_h, diwstop_v) \
- (((diwstop_h)<<3 & 0x2000) | ((diwstop_h)<<11 & 0x1800) | \
+ (((diwstop_h) << 3 & 0x2000) | ((diwstop_h) << 11 & 0x1800) | \
((diwstop_v)>>1 & 0x0700) | ((diwstrt_h)>>5 & 0x0020) | \
- ((diwstrt_h)<<3 & 0x0018) | ((diwstrt_v)>>9 & 0x0007))
+ ((diwstrt_h) << 3 & 0x0018) | ((diwstrt_v)>>9 & 0x0007))
/* ddfstrt/ddfstop (display DMA) */
@@ -1015,38 +1020,39 @@ static int amifb_inverse = 0;
#define hsstrt2hw(hsstrt) (div8(hsstrt))
#define hsstop2hw(hsstop) (div8(hsstop))
-#define htotal2hw(htotal) (div8(htotal)-1)
+#define htotal2hw(htotal) (div8(htotal) - 1)
#define vsstrt2hw(vsstrt) (div2(vsstrt))
#define vsstop2hw(vsstop) (div2(vsstop))
-#define vtotal2hw(vtotal) (div2(vtotal)-1)
+#define vtotal2hw(vtotal) (div2(vtotal) - 1)
#define hcenter2hw(htotal) (div8(htotal))
/* hbstrt/hbstop/vbstrt/vbstop (blanking timings) */
-#define hbstrt2hw(hbstrt) (((hbstrt)<<8 & 0x0700) | ((hbstrt)>>3 & 0x00ff))
-#define hbstop2hw(hbstop) (((hbstop)<<8 & 0x0700) | ((hbstop)>>3 & 0x00ff))
+#define hbstrt2hw(hbstrt) (((hbstrt) << 8 & 0x0700) | ((hbstrt)>>3 & 0x00ff))
+#define hbstop2hw(hbstop) (((hbstop) << 8 & 0x0700) | ((hbstop)>>3 & 0x00ff))
#define vbstrt2hw(vbstrt) (div2(vbstrt))
#define vbstop2hw(vbstop) (div2(vbstop))
/* colour */
#define rgb2hw8_high(red, green, blue) \
- (((red & 0xf0)<<4) | (green & 0xf0) | ((blue & 0xf0)>>4))
+ (((red & 0xf0) << 4) | (green & 0xf0) | ((blue & 0xf0)>>4))
#define rgb2hw8_low(red, green, blue) \
- (((red & 0x0f)<<8) | ((green & 0x0f)<<4) | (blue & 0x0f))
+ (((red & 0x0f) << 8) | ((green & 0x0f) << 4) | (blue & 0x0f))
#define rgb2hw4(red, green, blue) \
- (((red & 0xf0)<<4) | (green & 0xf0) | ((blue & 0xf0)>>4))
+ (((red & 0xf0) << 4) | (green & 0xf0) | ((blue & 0xf0)>>4))
#define rgb2hw2(red, green, blue) \
- (((red & 0xc0)<<4) | (green & 0xc0) | ((blue & 0xc0)>>4))
+ (((red & 0xc0) << 4) | (green & 0xc0) | ((blue & 0xc0)>>4))
/* sprpos/sprctl (sprite positioning) */
#define spr2hw_pos(start_v, start_h) \
- (((start_v)<<7&0xff00) | ((start_h)>>3&0x00ff))
+ (((start_v) << 7 & 0xff00) | ((start_h)>>3 & 0x00ff))
#define spr2hw_ctl(start_v, start_h, stop_v) \
- (((stop_v)<<7&0xff00) | ((start_v)>>4&0x0040) | ((stop_v)>>5&0x0020) | \
- ((start_h)<<3&0x0018) | ((start_v)>>7&0x0004) | ((stop_v)>>8&0x0002) | \
- ((start_h)>>2&0x0001))
+ (((stop_v) << 7 & 0xff00) | ((start_v)>>4 & 0x0040) | \
+ ((stop_v)>>5 & 0x0020) | ((start_h) << 3 & 0x0018) | \
+ ((start_v)>>7 & 0x0004) | ((stop_v)>>8 & 0x0002) | \
+ ((start_h)>>2 & 0x0001))
/* get current vertical position of beam */
#define get_vbpos() ((u_short)((*(u_long volatile *)&custom.vposr >> 7) & 0xffe))
@@ -1055,7 +1061,7 @@ static int amifb_inverse = 0;
* Copper Initialisation List
*/
-#define COPINITSIZE (sizeof(copins)*40)
+#define COPINITSIZE (sizeof(copins) * 40)
enum {
cip_bplcon0
@@ -1066,7 +1072,7 @@ enum {
* Don't change the order, build_copper()/rebuild_copper() rely on this
*/
-#define COPLISTSIZE (sizeof(copins)*64)
+#define COPLISTSIZE (sizeof(copins) * 64)
enum {
cop_wait, cop_bplcon0,
@@ -1108,82 +1114,1199 @@ static u_short sprfetchmode[3] = {
};
+/* --------------------------- Hardware routines --------------------------- */
+
/*
- * Interface used by the world
+ * Get the video params out of `var'. If a value doesn't fit, round
+ * it up, if it's too big, return -EINVAL.
*/
-int amifb_setup(char*);
+static int ami_decode_var(struct fb_var_screeninfo *var, struct amifb_par *par,
+ const struct fb_info *info)
+{
+ u_short clk_shift, line_shift;
+ u_long maxfetchstop, fstrt, fsize, fconst, xres_n, yres_n;
+ u_int htotal, vtotal;
-static int amifb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int amifb_set_par(struct fb_info *info);
-static int amifb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info);
-static int amifb_blank(int blank, struct fb_info *info);
-static int amifb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static void amifb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-static void amifb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region);
-static void amifb_imageblit(struct fb_info *info,
- const struct fb_image *image);
-static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg);
+ /*
+ * Find a matching Pixel Clock
+ */
+ for (clk_shift = TAG_SHRES; clk_shift <= TAG_LORES; clk_shift++)
+ if (var->pixclock <= pixclock[clk_shift])
+ break;
+ if (clk_shift > TAG_LORES) {
+ DPRINTK("pixclock too high\n");
+ return -EINVAL;
+ }
+ par->clk_shift = clk_shift;
/*
- * Interface to the low level console driver
+ * Check the Geometry Values
*/
-static void amifb_deinit(struct platform_device *pdev);
+ if ((par->xres = var->xres) < 64)
+ par->xres = 64;
+ if ((par->yres = var->yres) < 64)
+ par->yres = 64;
+ if ((par->vxres = var->xres_virtual) < par->xres)
+ par->vxres = par->xres;
+ if ((par->vyres = var->yres_virtual) < par->yres)
+ par->vyres = par->yres;
+
+ par->bpp = var->bits_per_pixel;
+ if (!var->nonstd) {
+ if (par->bpp < 1)
+ par->bpp = 1;
+ if (par->bpp > maxdepth[clk_shift]) {
+ if (round_down_bpp && maxdepth[clk_shift])
+ par->bpp = maxdepth[clk_shift];
+ else {
+ DPRINTK("invalid bpp\n");
+ return -EINVAL;
+ }
+ }
+ } else if (var->nonstd == FB_NONSTD_HAM) {
+ if (par->bpp < 6)
+ par->bpp = 6;
+ if (par->bpp != 6) {
+ if (par->bpp < 8)
+ par->bpp = 8;
+ if (par->bpp != 8 || !IS_AGA) {
+ DPRINTK("invalid bpp for ham mode\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ DPRINTK("unknown nonstd mode\n");
+ return -EINVAL;
+ }
/*
- * Internal routines
+ * FB_VMODE_SMOOTH_XPAN will be cleared, if one of the folloing
+ * checks failed and smooth scrolling is not possible
*/
-static int flash_cursor(void);
-static irqreturn_t amifb_interrupt(int irq, void *dev_id);
-static u_long chipalloc(u_long size);
-static void chipfree(void);
+ par->vmode = var->vmode | FB_VMODE_SMOOTH_XPAN;
+ switch (par->vmode & FB_VMODE_MASK) {
+ case FB_VMODE_INTERLACED:
+ line_shift = 0;
+ break;
+ case FB_VMODE_NONINTERLACED:
+ line_shift = 1;
+ break;
+ case FB_VMODE_DOUBLE:
+ if (!IS_AGA) {
+ DPRINTK("double mode only possible with aga\n");
+ return -EINVAL;
+ }
+ line_shift = 2;
+ break;
+ default:
+ DPRINTK("unknown video mode\n");
+ return -EINVAL;
+ break;
+ }
+ par->line_shift = line_shift;
/*
- * Hardware routines
+ * Vertical and Horizontal Timings
*/
-static int ami_decode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par);
-static int ami_encode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par);
-static void ami_pan_var(struct fb_var_screeninfo *var);
-static int ami_update_par(void);
-static void ami_update_display(void);
-static void ami_init_display(void);
-static void ami_do_blank(void);
-static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix);
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
-static int ami_get_cursorstate(struct fb_cursorstate *state);
-static int ami_set_cursorstate(struct fb_cursorstate *state);
-static void ami_set_sprite(void);
-static void ami_init_copper(void);
-static void ami_reinit_copper(void);
-static void ami_build_copper(void);
-static void ami_rebuild_copper(void);
+ xres_n = par->xres << clk_shift;
+ yres_n = par->yres << line_shift;
+ par->htotal = down8((var->left_margin + par->xres + var->right_margin +
+ var->hsync_len) << clk_shift);
+ par->vtotal =
+ down2(((var->upper_margin + par->yres + var->lower_margin +
+ var->vsync_len) << line_shift) + 1);
+ if (IS_AGA)
+ par->bplcon3 = sprpixmode[clk_shift];
+ else
+ par->bplcon3 = 0;
+ if (var->sync & FB_SYNC_BROADCAST) {
+ par->diwstop_h = par->htotal -
+ ((var->right_margin - var->hsync_len) << clk_shift);
+ if (IS_AGA)
+ par->diwstop_h += mod4(var->hsync_len);
+ else
+ par->diwstop_h = down4(par->diwstop_h);
+
+ par->diwstrt_h = par->diwstop_h - xres_n;
+ par->diwstop_v = par->vtotal -
+ ((var->lower_margin - var->vsync_len) << line_shift);
+ par->diwstrt_v = par->diwstop_v - yres_n;
+ if (par->diwstop_h >= par->htotal + 8) {
+ DPRINTK("invalid diwstop_h\n");
+ return -EINVAL;
+ }
+ if (par->diwstop_v > par->vtotal) {
+ DPRINTK("invalid diwstop_v\n");
+ return -EINVAL;
+ }
+
+ if (!IS_OCS) {
+ /* Initialize sync with some reasonable values for pwrsave */
+ par->hsstrt = 160;
+ par->hsstop = 320;
+ par->vsstrt = 30;
+ par->vsstop = 34;
+ } else {
+ par->hsstrt = 0;
+ par->hsstop = 0;
+ par->vsstrt = 0;
+ par->vsstop = 0;
+ }
+ if (par->vtotal > (PAL_VTOTAL + NTSC_VTOTAL) / 2) {
+ /* PAL video mode */
+ if (par->htotal != PAL_HTOTAL) {
+ DPRINTK("htotal invalid for pal\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_h < PAL_DIWSTRT_H) {
+ DPRINTK("diwstrt_h too low for pal\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_v < PAL_DIWSTRT_V) {
+ DPRINTK("diwstrt_v too low for pal\n");
+ return -EINVAL;
+ }
+ htotal = PAL_HTOTAL>>clk_shift;
+ vtotal = PAL_VTOTAL>>1;
+ if (!IS_OCS) {
+ par->beamcon0 = BMC0_PAL;
+ par->bplcon3 |= BPC3_BRDRBLNK;
+ } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
+ AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
+ par->beamcon0 = BMC0_PAL;
+ par->hsstop = 1;
+ } else if (amiga_vblank != 50) {
+ DPRINTK("pal not supported by this chipset\n");
+ return -EINVAL;
+ }
+ } else {
+ /* NTSC video mode
+ * In the AGA chipset seems to be hardware bug with BPC3_BRDRBLNK
+ * and NTSC activated, so than better let diwstop_h <= 1812
+ */
+ if (par->htotal != NTSC_HTOTAL) {
+ DPRINTK("htotal invalid for ntsc\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_h < NTSC_DIWSTRT_H) {
+ DPRINTK("diwstrt_h too low for ntsc\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_v < NTSC_DIWSTRT_V) {
+ DPRINTK("diwstrt_v too low for ntsc\n");
+ return -EINVAL;
+ }
+ htotal = NTSC_HTOTAL>>clk_shift;
+ vtotal = NTSC_VTOTAL>>1;
+ if (!IS_OCS) {
+ par->beamcon0 = 0;
+ par->bplcon3 |= BPC3_BRDRBLNK;
+ } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
+ AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
+ par->beamcon0 = 0;
+ par->hsstop = 1;
+ } else if (amiga_vblank != 60) {
+ DPRINTK("ntsc not supported by this chipset\n");
+ return -EINVAL;
+ }
+ }
+ if (IS_OCS) {
+ if (par->diwstrt_h >= 1024 || par->diwstop_h < 1024 ||
+ par->diwstrt_v >= 512 || par->diwstop_v < 256) {
+ DPRINTK("invalid position for display on ocs\n");
+ return -EINVAL;
+ }
+ }
+ } else if (!IS_OCS) {
+ /* Programmable video mode */
+ par->hsstrt = var->right_margin << clk_shift;
+ par->hsstop = (var->right_margin + var->hsync_len) << clk_shift;
+ par->diwstop_h = par->htotal - mod8(par->hsstrt) + 8 - (1 << clk_shift);
+ if (!IS_AGA)
+ par->diwstop_h = down4(par->diwstop_h) - 16;
+ par->diwstrt_h = par->diwstop_h - xres_n;
+ par->hbstop = par->diwstrt_h + 4;
+ par->hbstrt = par->diwstop_h + 4;
+ if (par->hbstrt >= par->htotal + 8)
+ par->hbstrt -= par->htotal;
+ par->hcenter = par->hsstrt + (par->htotal >> 1);
+ par->vsstrt = var->lower_margin << line_shift;
+ par->vsstop = (var->lower_margin + var->vsync_len) << line_shift;
+ par->diwstop_v = par->vtotal;
+ if ((par->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
+ par->diwstop_v -= 2;
+ par->diwstrt_v = par->diwstop_v - yres_n;
+ par->vbstop = par->diwstrt_v - 2;
+ par->vbstrt = par->diwstop_v - 2;
+ if (par->vtotal > 2048) {
+ DPRINTK("vtotal too high\n");
+ return -EINVAL;
+ }
+ if (par->htotal > 2048) {
+ DPRINTK("htotal too high\n");
+ return -EINVAL;
+ }
+ par->bplcon3 |= BPC3_EXTBLKEN;
+ par->beamcon0 = BMC0_HARDDIS | BMC0_VARVBEN | BMC0_LOLDIS |
+ BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARBEAMEN |
+ BMC0_PAL | BMC0_VARCSYEN;
+ if (var->sync & FB_SYNC_HOR_HIGH_ACT)
+ par->beamcon0 |= BMC0_HSYTRUE;
+ if (var->sync & FB_SYNC_VERT_HIGH_ACT)
+ par->beamcon0 |= BMC0_VSYTRUE;
+ if (var->sync & FB_SYNC_COMP_HIGH_ACT)
+ par->beamcon0 |= BMC0_CSYTRUE;
+ htotal = par->htotal>>clk_shift;
+ vtotal = par->vtotal>>1;
+ } else {
+ DPRINTK("only broadcast modes possible for ocs\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Checking the DMA timing
+ */
+
+ fconst = 16 << maxfmode << clk_shift;
+
+ /*
+ * smallest window start value without turn off other dma cycles
+ * than sprite1-7, unless you change min_fstrt
+ */
+
+
+ fsize = ((maxfmode + clk_shift <= 1) ? fconst : 64);
+ fstrt = downx(fconst, par->diwstrt_h - 4) - fsize;
+ if (fstrt < min_fstrt) {
+ DPRINTK("fetch start too low\n");
+ return -EINVAL;
+ }
+
+ /*
+ * smallest window start value where smooth scrolling is possible
+ */
+
+ fstrt = downx(fconst, par->diwstrt_h - fconst + (1 << clk_shift) - 4) -
+ fsize;
+ if (fstrt < min_fstrt)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+
+ maxfetchstop = down16(par->htotal - 80);
+
+ fstrt = downx(fconst, par->diwstrt_h - 4) - 64 - fconst;
+ fsize = upx(fconst, xres_n +
+ modx(fconst, downx(1 << clk_shift, par->diwstrt_h - 4)));
+ if (fstrt + fsize > maxfetchstop)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+
+ fsize = upx(fconst, xres_n);
+ if (fstrt + fsize > maxfetchstop) {
+ DPRINTK("fetch stop too high\n");
+ return -EINVAL;
+ }
+
+ if (maxfmode + clk_shift <= 1) {
+ fsize = up64(xres_n + fconst - 1);
+ if (min_fstrt + fsize - 64 > maxfetchstop)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+
+ fsize = up64(xres_n);
+ if (min_fstrt + fsize - 64 > maxfetchstop) {
+ DPRINTK("fetch size too high\n");
+ return -EINVAL;
+ }
+
+ fsize -= 64;
+ } else
+ fsize -= fconst;
+
+ /*
+ * Check if there is enough time to update the bitplane pointers for ywrap
+ */
+
+ if (par->htotal - fsize - 64 < par->bpp * 64)
+ par->vmode &= ~FB_VMODE_YWRAP;
+
+ /*
+ * Bitplane calculations and check the Memory Requirements
+ */
+
+ if (amifb_ilbm) {
+ par->next_plane = div8(upx(16 << maxfmode, par->vxres));
+ par->next_line = par->bpp * par->next_plane;
+ if (par->next_line * par->vyres > info->fix.smem_len) {
+ DPRINTK("too few video mem\n");
+ return -EINVAL;
+ }
+ } else {
+ par->next_line = div8(upx(16 << maxfmode, par->vxres));
+ par->next_plane = par->vyres * par->next_line;
+ if (par->next_plane * par->bpp > info->fix.smem_len) {
+ DPRINTK("too few video mem\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * Hardware Register Values
+ */
+
+ par->bplcon0 = BPC0_COLOR | bplpixmode[clk_shift];
+ if (!IS_OCS)
+ par->bplcon0 |= BPC0_ECSENA;
+ if (par->bpp == 8)
+ par->bplcon0 |= BPC0_BPU3;
+ else
+ par->bplcon0 |= par->bpp << 12;
+ if (var->nonstd == FB_NONSTD_HAM)
+ par->bplcon0 |= BPC0_HAM;
+ if (var->sync & FB_SYNC_EXT)
+ par->bplcon0 |= BPC0_ERSY;
+
+ if (IS_AGA)
+ par->fmode = bplfetchmode[maxfmode];
+
+ switch (par->vmode & FB_VMODE_MASK) {
+ case FB_VMODE_INTERLACED:
+ par->bplcon0 |= BPC0_LACE;
+ break;
+ case FB_VMODE_DOUBLE:
+ if (IS_AGA)
+ par->fmode |= FMODE_SSCAN2 | FMODE_BSCAN2;
+ break;
+ }
+
+ if (!((par->vmode ^ var->vmode) & FB_VMODE_YWRAP)) {
+ par->xoffset = var->xoffset;
+ par->yoffset = var->yoffset;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if (par->xoffset || par->yoffset < 0 ||
+ par->yoffset >= par->vyres)
+ par->xoffset = par->yoffset = 0;
+ } else {
+ if (par->xoffset < 0 ||
+ par->xoffset > upx(16 << maxfmode, par->vxres - par->xres) ||
+ par->yoffset < 0 || par->yoffset > par->vyres - par->yres)
+ par->xoffset = par->yoffset = 0;
+ }
+ } else
+ par->xoffset = par->yoffset = 0;
+
+ par->crsr.crsr_x = par->crsr.crsr_y = 0;
+ par->crsr.spot_x = par->crsr.spot_y = 0;
+ par->crsr.height = par->crsr.width = 0;
+
+ return 0;
+}
+
+ /*
+ * Fill the `var' structure based on the values in `par' and maybe
+ * other values read out of the hardware.
+ */
+
+static void ami_encode_var(struct fb_var_screeninfo *var,
+ struct amifb_par *par)
+{
+ u_short clk_shift, line_shift;
+
+ memset(var, 0, sizeof(struct fb_var_screeninfo));
+
+ clk_shift = par->clk_shift;
+ line_shift = par->line_shift;
+
+ var->xres = par->xres;
+ var->yres = par->yres;
+ var->xres_virtual = par->vxres;
+ var->yres_virtual = par->vyres;
+ var->xoffset = par->xoffset;
+ var->yoffset = par->yoffset;
+
+ var->bits_per_pixel = par->bpp;
+ var->grayscale = 0;
+
+ var->red.offset = 0;
+ var->red.msb_right = 0;
+ var->red.length = par->bpp;
+ if (par->bplcon0 & BPC0_HAM)
+ var->red.length -= 2;
+ var->blue = var->green = var->red;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->transp.msb_right = 0;
+
+ if (par->bplcon0 & BPC0_HAM)
+ var->nonstd = FB_NONSTD_HAM;
+ else
+ var->nonstd = 0;
+ var->activate = 0;
+
+ var->height = -1;
+ var->width = -1;
+
+ var->pixclock = pixclock[clk_shift];
+
+ if (IS_AGA && par->fmode & FMODE_BSCAN2)
+ var->vmode = FB_VMODE_DOUBLE;
+ else if (par->bplcon0 & BPC0_LACE)
+ var->vmode = FB_VMODE_INTERLACED;
+ else
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ if (!IS_OCS && par->beamcon0 & BMC0_VARBEAMEN) {
+ var->hsync_len = (par->hsstop - par->hsstrt)>>clk_shift;
+ var->right_margin = par->hsstrt>>clk_shift;
+ var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
+ var->vsync_len = (par->vsstop - par->vsstrt)>>line_shift;
+ var->lower_margin = par->vsstrt>>line_shift;
+ var->upper_margin = (par->vtotal>>line_shift) - var->yres - var->lower_margin - var->vsync_len;
+ var->sync = 0;
+ if (par->beamcon0 & BMC0_HSYTRUE)
+ var->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (par->beamcon0 & BMC0_VSYTRUE)
+ var->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (par->beamcon0 & BMC0_CSYTRUE)
+ var->sync |= FB_SYNC_COMP_HIGH_ACT;
+ } else {
+ var->sync = FB_SYNC_BROADCAST;
+ var->hsync_len = (152>>clk_shift) + mod4(par->diwstop_h);
+ var->right_margin = ((par->htotal - down4(par->diwstop_h))>>clk_shift) + var->hsync_len;
+ var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
+ var->vsync_len = 4>>line_shift;
+ var->lower_margin = ((par->vtotal - par->diwstop_v)>>line_shift) + var->vsync_len;
+ var->upper_margin = (((par->vtotal - 2)>>line_shift) + 1) - var->yres -
+ var->lower_margin - var->vsync_len;
+ }
+
+ if (par->bplcon0 & BPC0_ERSY)
+ var->sync |= FB_SYNC_EXT;
+ if (par->vmode & FB_VMODE_YWRAP)
+ var->vmode |= FB_VMODE_YWRAP;
+}
+
+
+ /*
+ * Update hardware
+ */
+
+static void ami_update_par(struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
+ short clk_shift, vshift, fstrt, fsize, fstop, fconst, shift, move, mod;
+
+ clk_shift = par->clk_shift;
+
+ if (!(par->vmode & FB_VMODE_SMOOTH_XPAN))
+ par->xoffset = upx(16 << maxfmode, par->xoffset);
+
+ fconst = 16 << maxfmode << clk_shift;
+ vshift = modx(16 << maxfmode, par->xoffset);
+ fstrt = par->diwstrt_h - (vshift << clk_shift) - 4;
+ fsize = (par->xres + vshift) << clk_shift;
+ shift = modx(fconst, fstrt);
+ move = downx(2 << maxfmode, div8(par->xoffset));
+ if (maxfmode + clk_shift > 1) {
+ fstrt = downx(fconst, fstrt) - 64;
+ fsize = upx(fconst, fsize);
+ fstop = fstrt + fsize - fconst;
+ } else {
+ mod = fstrt = downx(fconst, fstrt) - fconst;
+ fstop = fstrt + upx(fconst, fsize) - 64;
+ fsize = up64(fsize);
+ fstrt = fstop - fsize + 64;
+ if (fstrt < min_fstrt) {
+ fstop += min_fstrt - fstrt;
+ fstrt = min_fstrt;
+ }
+ move = move - div8((mod - fstrt)>>clk_shift);
+ }
+ mod = par->next_line - div8(fsize>>clk_shift);
+ par->ddfstrt = fstrt;
+ par->ddfstop = fstop;
+ par->bplcon1 = hscroll2hw(shift);
+ par->bpl2mod = mod;
+ if (par->bplcon0 & BPC0_LACE)
+ par->bpl2mod += par->next_line;
+ if (IS_AGA && (par->fmode & FMODE_BSCAN2))
+ par->bpl1mod = -div8(fsize>>clk_shift);
+ else
+ par->bpl1mod = par->bpl2mod;
+
+ if (par->yoffset) {
+ par->bplpt0 = info->fix.smem_start +
+ par->next_line * par->yoffset + move;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if (par->yoffset > par->vyres - par->yres) {
+ par->bplpt0wrap = info->fix.smem_start + move;
+ if (par->bplcon0 & BPC0_LACE &&
+ mod2(par->diwstrt_v + par->vyres -
+ par->yoffset))
+ par->bplpt0wrap += par->next_line;
+ }
+ }
+ } else
+ par->bplpt0 = info->fix.smem_start + move;
+
+ if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v))
+ par->bplpt0 += par->next_line;
+}
+
+
+ /*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ * in `var'.
+ */
+
+static void ami_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
+
+ par->xoffset = var->xoffset;
+ par->yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ par->vmode |= FB_VMODE_YWRAP;
+ else
+ par->vmode &= ~FB_VMODE_YWRAP;
+
+ do_vmode_pan = 0;
+ ami_update_par(info);
+ do_vmode_pan = 1;
+}
+
+
+static void ami_update_display(const struct amifb_par *par)
+{
+ custom.bplcon1 = par->bplcon1;
+ custom.bpl1mod = par->bpl1mod;
+ custom.bpl2mod = par->bpl2mod;
+ custom.ddfstrt = ddfstrt2hw(par->ddfstrt);
+ custom.ddfstop = ddfstop2hw(par->ddfstop);
+}
+
+ /*
+ * Change the video mode (called by VBlank interrupt)
+ */
+
+static void ami_init_display(const struct amifb_par *par)
+{
+ int i;
+
+ custom.bplcon0 = par->bplcon0 & ~BPC0_LACE;
+ custom.bplcon2 = (IS_OCS ? 0 : BPC2_KILLEHB) | BPC2_PF2P2 | BPC2_PF1P2;
+ if (!IS_OCS) {
+ custom.bplcon3 = par->bplcon3;
+ if (IS_AGA)
+ custom.bplcon4 = BPC4_ESPRM4 | BPC4_OSPRM4;
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ custom.htotal = htotal2hw(par->htotal);
+ custom.hbstrt = hbstrt2hw(par->hbstrt);
+ custom.hbstop = hbstop2hw(par->hbstop);
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.hcenter = hcenter2hw(par->hcenter);
+ custom.vtotal = vtotal2hw(par->vtotal);
+ custom.vbstrt = vbstrt2hw(par->vbstrt);
+ custom.vbstop = vbstop2hw(par->vbstop);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstop2hw(par->vsstop);
+ }
+ }
+ if (!IS_OCS || par->hsstop)
+ custom.beamcon0 = par->beamcon0;
+ if (IS_AGA)
+ custom.fmode = par->fmode;
+
+ /*
+ * The minimum period for audio depends on htotal
+ */
+
+ amiga_audio_min_period = div16(par->htotal);
+
+ is_lace = par->bplcon0 & BPC0_LACE ? 1 : 0;
+#if 1
+ if (is_lace) {
+ i = custom.vposr >> 15;
+ } else {
+ custom.vposw = custom.vposr | 0x8000;
+ i = 1;
+ }
+#else
+ i = 1;
+ custom.vposw = custom.vposr | 0x8000;
+#endif
+ custom.cop2lc = (u_short *)ZTWO_PADDR(copdisplay.list[currentcop][i]);
+}
+
+ /*
+ * (Un)Blank the screen (called by VBlank interrupt)
+ */
+
+static void ami_do_blank(const struct amifb_par *par)
+{
+#if defined(CONFIG_FB_AMIGA_AGA)
+ u_short bplcon3 = par->bplcon3;
+#endif
+ u_char red, green, blue;
+
+ if (do_blank > 0) {
+ custom.dmacon = DMAF_RASTER | DMAF_SPRITE;
+ red = green = blue = 0;
+ if (!IS_OCS && do_blank > 1) {
+ switch (do_blank) {
+ case FB_BLANK_VSYNC_SUSPEND:
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.vsstrt = vsstrt2hw(par->vtotal + 4);
+ custom.vsstop = vsstop2hw(par->vtotal + 4);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ custom.hsstrt = hsstrt2hw(par->htotal + 16);
+ custom.hsstop = hsstop2hw(par->htotal + 16);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstrt2hw(par->vsstop);
+ break;
+ case FB_BLANK_POWERDOWN:
+ custom.hsstrt = hsstrt2hw(par->htotal + 16);
+ custom.hsstop = hsstop2hw(par->htotal + 16);
+ custom.vsstrt = vsstrt2hw(par->vtotal + 4);
+ custom.vsstop = vsstop2hw(par->vtotal + 4);
+ break;
+ }
+ if (!(par->beamcon0 & BMC0_VARBEAMEN)) {
+ custom.htotal = htotal2hw(par->htotal);
+ custom.vtotal = vtotal2hw(par->vtotal);
+ custom.beamcon0 = BMC0_HARDDIS | BMC0_VARBEAMEN |
+ BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARCSYEN;
+ }
+ }
+ } else {
+ custom.dmacon = DMAF_SETCLR | DMAF_RASTER | DMAF_SPRITE;
+ red = red0;
+ green = green0;
+ blue = blue0;
+ if (!IS_OCS) {
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstop2hw(par->vsstop);
+ custom.beamcon0 = par->beamcon0;
+ }
+ }
+#if defined(CONFIG_FB_AMIGA_AGA)
+ if (IS_AGA) {
+ custom.bplcon3 = bplcon3;
+ custom.color[0] = rgb2hw8_high(red, green, blue);
+ custom.bplcon3 = bplcon3 | BPC3_LOCT;
+ custom.color[0] = rgb2hw8_low(red, green, blue);
+ custom.bplcon3 = bplcon3;
+ } else
+#endif
+#if defined(CONFIG_FB_AMIGA_ECS)
+ if (par->bplcon0 & BPC0_SHRES) {
+ u_short color, mask;
+ int i;
+
+ mask = 0x3333;
+ color = rgb2hw2(red, green, blue);
+ for (i = 12; i >= 0; i -= 4)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ mask <<= 2; color >>= 2;
+ for (i = 3; i >= 0; i--)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ } else
+#endif
+ custom.color[0] = rgb2hw4(red, green, blue);
+ is_blanked = do_blank > 0 ? do_blank : 0;
+}
+
+static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix,
+ const struct amifb_par *par)
+{
+ fix->crsr_width = fix->crsr_xsize = par->crsr.width;
+ fix->crsr_height = fix->crsr_ysize = par->crsr.height;
+ fix->crsr_color1 = 17;
+ fix->crsr_color2 = 18;
+ return 0;
+}
+
+static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var,
+ u_char __user *data,
+ const struct amifb_par *par)
+{
+ register u_short *lspr, *sspr;
+#ifdef __mc68000__
+ register u_long datawords asm ("d2");
+#else
+ register u_long datawords;
+#endif
+ register short delta;
+ register u_char color;
+ short height, width, bits, words;
+ int size, alloc;
+
+ size = par->crsr.height * par->crsr.width;
+ alloc = var->height * var->width;
+ var->height = par->crsr.height;
+ var->width = par->crsr.width;
+ var->xspot = par->crsr.spot_x;
+ var->yspot = par->crsr.spot_y;
+ if (size > var->height * var->width)
+ return -ENAMETOOLONG;
+ if (!access_ok(VERIFY_WRITE, data, size))
+ return -EFAULT;
+ delta = 1 << par->crsr.fmode;
+ lspr = lofsprite + (delta << 1);
+ if (par->bplcon0 & BPC0_LACE)
+ sspr = shfsprite + (delta << 1);
+ else
+ sspr = NULL;
+ for (height = (short)var->height - 1; height >= 0; height--) {
+ bits = 0; words = delta; datawords = 0;
+ for (width = (short)var->width - 1; width >= 0; width--) {
+ if (bits == 0) {
+ bits = 16; --words;
+#ifdef __mc68000__
+ asm volatile ("movew %1@(%3:w:2),%0 ; swap %0 ; movew %1@+,%0"
+ : "=d" (datawords), "=a" (lspr) : "1" (lspr), "d" (delta));
+#else
+ datawords = (*(lspr + delta) << 16) | (*lspr++);
+#endif
+ }
+ --bits;
+#ifdef __mc68000__
+ asm volatile (
+ "clrb %0 ; swap %1 ; lslw #1,%1 ; roxlb #1,%0 ; "
+ "swap %1 ; lslw #1,%1 ; roxlb #1,%0"
+ : "=d" (color), "=d" (datawords) : "1" (datawords));
+#else
+ color = (((datawords >> 30) & 2)
+ | ((datawords >> 15) & 1));
+ datawords <<= 1;
+#endif
+ put_user(color, data++);
+ }
+ if (bits > 0) {
+ --words; ++lspr;
+ }
+ while (--words >= 0)
+ ++lspr;
+#ifdef __mc68000__
+ asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
+ : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
+#else
+ lspr += delta;
+ if (sspr) {
+ u_short *tmp = lspr;
+ lspr = sspr;
+ sspr = tmp;
+ }
+#endif
+ }
+ return 0;
+}
+
+static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var,
+ u_char __user *data, struct amifb_par *par)
+{
+ register u_short *lspr, *sspr;
+#ifdef __mc68000__
+ register u_long datawords asm ("d2");
+#else
+ register u_long datawords;
+#endif
+ register short delta;
+ u_short fmode;
+ short height, width, bits, words;
+
+ if (!var->width)
+ return -EINVAL;
+ else if (var->width <= 16)
+ fmode = TAG_FMODE_1;
+ else if (var->width <= 32)
+ fmode = TAG_FMODE_2;
+ else if (var->width <= 64)
+ fmode = TAG_FMODE_4;
+ else
+ return -EINVAL;
+ if (fmode > maxfmode)
+ return -EINVAL;
+ if (!var->height)
+ return -EINVAL;
+ if (!access_ok(VERIFY_READ, data, var->width * var->height))
+ return -EFAULT;
+ delta = 1 << fmode;
+ lofsprite = shfsprite = (u_short *)spritememory;
+ lspr = lofsprite + (delta << 1);
+ if (par->bplcon0 & BPC0_LACE) {
+ if (((var->height + 4) << fmode << 2) > SPRITEMEMSIZE)
+ return -EINVAL;
+ memset(lspr, 0, (var->height + 4) << fmode << 2);
+ shfsprite += ((var->height + 5)&-2) << fmode;
+ sspr = shfsprite + (delta << 1);
+ } else {
+ if (((var->height + 2) << fmode << 2) > SPRITEMEMSIZE)
+ return -EINVAL;
+ memset(lspr, 0, (var->height + 2) << fmode << 2);
+ sspr = NULL;
+ }
+ for (height = (short)var->height - 1; height >= 0; height--) {
+ bits = 16; words = delta; datawords = 0;
+ for (width = (short)var->width - 1; width >= 0; width--) {
+ unsigned long tdata = 0;
+ get_user(tdata, data);
+ data++;
+#ifdef __mc68000__
+ asm volatile (
+ "lsrb #1,%2 ; roxlw #1,%0 ; swap %0 ; "
+ "lsrb #1,%2 ; roxlw #1,%0 ; swap %0"
+ : "=d" (datawords)
+ : "0" (datawords), "d" (tdata));
+#else
+ datawords = ((datawords << 1) & 0xfffefffe);
+ datawords |= tdata & 1;
+ datawords |= (tdata & 2) << (16 - 1);
+#endif
+ if (--bits == 0) {
+ bits = 16; --words;
+#ifdef __mc68000__
+ asm volatile ("swap %2 ; movew %2,%0@(%3:w:2) ; swap %2 ; movew %2,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta));
+#else
+ *(lspr + delta) = (u_short) (datawords >> 16);
+ *lspr++ = (u_short) (datawords & 0xffff);
+#endif
+ }
+ }
+ if (bits < 16) {
+ --words;
+#ifdef __mc68000__
+ asm volatile (
+ "swap %2 ; lslw %4,%2 ; movew %2,%0@(%3:w:2) ; "
+ "swap %2 ; lslw %4,%2 ; movew %2,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta), "d" (bits));
+#else
+ *(lspr + delta) = (u_short) (datawords >> (16 + bits));
+ *lspr++ = (u_short) ((datawords & 0x0000ffff) >> bits);
+#endif
+ }
+ while (--words >= 0) {
+#ifdef __mc68000__
+ asm volatile ("moveql #0,%%d0 ; movew %%d0,%0@(%2:w:2) ; movew %%d0,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (delta) : "d0");
+#else
+ *(lspr + delta) = 0;
+ *lspr++ = 0;
+#endif
+ }
+#ifdef __mc68000__
+ asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
+ : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
+#else
+ lspr += delta;
+ if (sspr) {
+ u_short *tmp = lspr;
+ lspr = sspr;
+ sspr = tmp;
+ }
+#endif
+ }
+ par->crsr.height = var->height;
+ par->crsr.width = var->width;
+ par->crsr.spot_x = var->xspot;
+ par->crsr.spot_y = var->yspot;
+ par->crsr.fmode = fmode;
+ if (IS_AGA) {
+ par->fmode &= ~(FMODE_SPAGEM | FMODE_SPR32);
+ par->fmode |= sprfetchmode[fmode];
+ custom.fmode = par->fmode;
+ }
+ return 0;
+}
+
+static int ami_get_cursorstate(struct fb_cursorstate *state,
+ const struct amifb_par *par)
+{
+ state->xoffset = par->crsr.crsr_x;
+ state->yoffset = par->crsr.crsr_y;
+ state->mode = cursormode;
+ return 0;
+}
+
+static int ami_set_cursorstate(struct fb_cursorstate *state,
+ struct amifb_par *par)
+{
+ par->crsr.crsr_x = state->xoffset;
+ par->crsr.crsr_y = state->yoffset;
+ if ((cursormode = state->mode) == FB_CURSOR_OFF)
+ cursorstate = -1;
+ do_cursor = 1;
+ return 0;
+}
+
+static void ami_set_sprite(const struct amifb_par *par)
+{
+ copins *copl, *cops;
+ u_short hs, vs, ve;
+ u_long pl, ps, pt;
+ short mx, my;
+
+ cops = copdisplay.list[currentcop][0];
+ copl = copdisplay.list[currentcop][1];
+ ps = pl = ZTWO_PADDR(dummysprite);
+ mx = par->crsr.crsr_x - par->crsr.spot_x;
+ my = par->crsr.crsr_y - par->crsr.spot_y;
+ if (!(par->vmode & FB_VMODE_YWRAP)) {
+ mx -= par->xoffset;
+ my -= par->yoffset;
+ }
+ if (!is_blanked && cursorstate > 0 && par->crsr.height > 0 &&
+ mx > -(short)par->crsr.width && mx < par->xres &&
+ my > -(short)par->crsr.height && my < par->yres) {
+ pl = ZTWO_PADDR(lofsprite);
+ hs = par->diwstrt_h + (mx << par->clk_shift) - 4;
+ vs = par->diwstrt_v + (my << par->line_shift);
+ ve = vs + (par->crsr.height << par->line_shift);
+ if (par->bplcon0 & BPC0_LACE) {
+ ps = ZTWO_PADDR(shfsprite);
+ lofsprite[0] = spr2hw_pos(vs, hs);
+ shfsprite[0] = spr2hw_pos(vs + 1, hs);
+ if (mod2(vs)) {
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
+ shfsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs + 1, hs, ve + 1);
+ pt = pl; pl = ps; ps = pt;
+ } else {
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve + 1);
+ shfsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs + 1, hs, ve);
+ }
+ } else {
+ lofsprite[0] = spr2hw_pos(vs, hs) | (IS_AGA && (par->fmode & FMODE_BSCAN2) ? 0x80 : 0);
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
+ }
+ }
+ copl[cop_spr0ptrh].w[1] = highw(pl);
+ copl[cop_spr0ptrl].w[1] = loww(pl);
+ if (par->bplcon0 & BPC0_LACE) {
+ cops[cop_spr0ptrh].w[1] = highw(ps);
+ cops[cop_spr0ptrl].w[1] = loww(ps);
+ }
+}
+
+
+ /*
+ * Initialise the Copper Initialisation List
+ */
+
+static void __init ami_init_copper(void)
+{
+ copins *cop = copdisplay.init;
+ u_long p;
+ int i;
+
+ if (!IS_OCS) {
+ (cop++)->l = CMOVE(BPC0_COLOR | BPC0_SHRES | BPC0_ECSENA, bplcon0);
+ (cop++)->l = CMOVE(0x0181, diwstrt);
+ (cop++)->l = CMOVE(0x0281, diwstop);
+ (cop++)->l = CMOVE(0x0000, diwhigh);
+ } else
+ (cop++)->l = CMOVE(BPC0_COLOR, bplcon0);
+ p = ZTWO_PADDR(dummysprite);
+ for (i = 0; i < 8; i++) {
+ (cop++)->l = CMOVE(0, spr[i].pos);
+ (cop++)->l = CMOVE(highw(p), sprpt[i]);
+ (cop++)->l = CMOVE2(loww(p), sprpt[i]);
+ }
+
+ (cop++)->l = CMOVE(IF_SETCLR | IF_COPER, intreq);
+ copdisplay.wait = cop;
+ (cop++)->l = CEND;
+ (cop++)->l = CMOVE(0, copjmp2);
+ cop->l = CEND;
+
+ custom.cop1lc = (u_short *)ZTWO_PADDR(copdisplay.init);
+ custom.copjmp1 = 0;
+}
+
+static void ami_reinit_copper(const struct amifb_par *par)
+{
+ copdisplay.init[cip_bplcon0].w[1] = ~(BPC0_BPU3 | BPC0_BPU2 | BPC0_BPU1 | BPC0_BPU0) & par->bplcon0;
+ copdisplay.wait->l = CWAIT(32, par->diwstrt_v - 4);
+}
+
+
+ /*
+ * Rebuild the Copper List
+ *
+ * We only change the things that are not static
+ */
+
+static void ami_rebuild_copper(const struct amifb_par *par)
+{
+ copins *copl, *cops;
+ u_short line, h_end1, h_end2;
+ short i;
+ u_long p;
+
+ if (IS_AGA && maxfmode + par->clk_shift == 0)
+ h_end1 = par->diwstrt_h - 64;
+ else
+ h_end1 = par->htotal - 32;
+ h_end2 = par->ddfstop + 64;
+
+ ami_set_sprite(par);
+
+ copl = copdisplay.rebuild[1];
+ p = par->bplpt0;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if ((par->vyres - par->yoffset) != 1 || !mod2(par->diwstrt_v)) {
+ if (par->yoffset > par->vyres - par->yres) {
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (copl++)->l = CMOVE(highw(p), bplpt[i]);
+ (copl++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ line = par->diwstrt_v + ((par->vyres - par->yoffset) << par->line_shift) - 1;
+ while (line >= 512) {
+ (copl++)->l = CWAIT(h_end1, 510);
+ line -= 512;
+ }
+ if (line >= 510 && IS_AGA && maxfmode + par->clk_shift == 0)
+ (copl++)->l = CWAIT(h_end1, line);
+ else
+ (copl++)->l = CWAIT(h_end2, line);
+ p = par->bplpt0wrap;
+ }
+ } else
+ p = par->bplpt0wrap;
+ }
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (copl++)->l = CMOVE(highw(p), bplpt[i]);
+ (copl++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ copl->l = CEND;
+
+ if (par->bplcon0 & BPC0_LACE) {
+ cops = copdisplay.rebuild[0];
+ p = par->bplpt0;
+ if (mod2(par->diwstrt_v))
+ p -= par->next_line;
+ else
+ p += par->next_line;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if ((par->vyres - par->yoffset) != 1 || mod2(par->diwstrt_v)) {
+ if (par->yoffset > par->vyres - par->yres + 1) {
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (cops++)->l = CMOVE(highw(p), bplpt[i]);
+ (cops++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ line = par->diwstrt_v + ((par->vyres - par->yoffset) << par->line_shift) - 2;
+ while (line >= 512) {
+ (cops++)->l = CWAIT(h_end1, 510);
+ line -= 512;
+ }
+ if (line > 510 && IS_AGA && maxfmode + par->clk_shift == 0)
+ (cops++)->l = CWAIT(h_end1, line);
+ else
+ (cops++)->l = CWAIT(h_end2, line);
+ p = par->bplpt0wrap;
+ if (mod2(par->diwstrt_v + par->vyres -
+ par->yoffset))
+ p -= par->next_line;
+ else
+ p += par->next_line;
+ }
+ } else
+ p = par->bplpt0wrap - par->next_line;
+ }
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (cops++)->l = CMOVE(highw(p), bplpt[i]);
+ (cops++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ cops->l = CEND;
+ }
+}
+
+
+ /*
+ * Build the Copper List
+ */
+
+static void ami_build_copper(struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
+ copins *copl, *cops;
+ u_long p;
+
+ currentcop = 1 - currentcop;
+
+ copl = copdisplay.list[currentcop][1];
+
+ (copl++)->l = CWAIT(0, 10);
+ (copl++)->l = CMOVE(par->bplcon0, bplcon0);
+ (copl++)->l = CMOVE(0, sprpt[0]);
+ (copl++)->l = CMOVE2(0, sprpt[0]);
+
+ if (par->bplcon0 & BPC0_LACE) {
+ cops = copdisplay.list[currentcop][0];
+
+ (cops++)->l = CWAIT(0, 10);
+ (cops++)->l = CMOVE(par->bplcon0, bplcon0);
+ (cops++)->l = CMOVE(0, sprpt[0]);
+ (cops++)->l = CMOVE2(0, sprpt[0]);
+
+ (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v + 1), diwstrt);
+ (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v + 1), diwstop);
+ (cops++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
+ (cops++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
+ if (!IS_OCS) {
+ (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v + 1,
+ par->diwstop_h, par->diwstop_v + 1), diwhigh);
+ (cops++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
+ par->diwstop_h, par->diwstop_v), diwhigh);
+#if 0
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt + 1), vbstrt);
+ (copl++)->l = CMOVE(vbstop2hw(par->vbstop + 1), vbstop);
+ (cops++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (cops++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
+ (cops++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
+ }
+#endif
+ }
+ p = ZTWO_PADDR(copdisplay.list[currentcop][0]);
+ (copl++)->l = CMOVE(highw(p), cop2lc);
+ (copl++)->l = CMOVE2(loww(p), cop2lc);
+ p = ZTWO_PADDR(copdisplay.list[currentcop][1]);
+ (cops++)->l = CMOVE(highw(p), cop2lc);
+ (cops++)->l = CMOVE2(loww(p), cop2lc);
+ copdisplay.rebuild[0] = cops;
+ } else {
+ (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
+ (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
+ if (!IS_OCS) {
+ (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
+ par->diwstop_h, par->diwstop_v), diwhigh);
+#if 0
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
+ (copl++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
+ }
+#endif
+ }
+ }
+ copdisplay.rebuild[1] = copl;
+
+ ami_update_par(info);
+ ami_rebuild_copper(info->par);
+}
-static struct fb_ops amifb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = amifb_check_var,
- .fb_set_par = amifb_set_par,
- .fb_setcolreg = amifb_setcolreg,
- .fb_blank = amifb_blank,
- .fb_pan_display = amifb_pan_display,
- .fb_fillrect = amifb_fillrect,
- .fb_copyarea = amifb_copyarea,
- .fb_imageblit = amifb_imageblit,
- .fb_ioctl = amifb_ioctl,
-};
static void __init amifb_setup_mcap(char *spec)
{
@@ -1216,13 +2339,13 @@ static void __init amifb_setup_mcap(char *spec)
if (hmax <= 0 || hmax <= hmin)
return;
- fb_info.monspecs.vfmin = vmin;
- fb_info.monspecs.vfmax = vmax;
- fb_info.monspecs.hfmin = hmin;
- fb_info.monspecs.hfmax = hmax;
+ amifb_hfmin = hmin;
+ amifb_hfmax = hmax;
+ amifb_vfmin = vmin;
+ amifb_vfmax = vmax;
}
-int __init amifb_setup(char *options)
+static int __init amifb_setup(char *options)
{
char *this_opt;
@@ -1238,9 +2361,9 @@ int __init amifb_setup(char *options)
} else if (!strcmp(this_opt, "ilbm"))
amifb_ilbm = 1;
else if (!strncmp(this_opt, "monitorcap:", 11))
- amifb_setup_mcap(this_opt+11);
+ amifb_setup_mcap(this_opt + 11);
else if (!strncmp(this_opt, "fstart:", 7))
- min_fstrt = simple_strtoul(this_opt+7, NULL, 0);
+ min_fstrt = simple_strtoul(this_opt + 7, NULL, 0);
else
mode_option = this_opt;
}
@@ -1259,7 +2382,8 @@ static int amifb_check_var(struct fb_var_screeninfo *var,
struct amifb_par par;
/* Validate wanted screen parameters */
- if ((err = ami_decode_var(var, &par)))
+ err = ami_decode_var(var, &par, info);
+ if (err)
return err;
/* Encode (possibly rounded) screen parameters */
@@ -1270,16 +2394,19 @@ static int amifb_check_var(struct fb_var_screeninfo *var,
static int amifb_set_par(struct fb_info *info)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
+ int error;
do_vmode_pan = 0;
do_vmode_full = 0;
/* Decode wanted screen parameters */
- ami_decode_var(&info->var, par);
+ error = ami_decode_var(&info->var, par, info);
+ if (error)
+ return error;
/* Set new videomode */
- ami_build_copper();
+ ami_build_copper(info);
/* Set VBlank trigger */
do_vmode_full = 1;
@@ -1295,20 +2422,20 @@ static int amifb_set_par(struct fb_info *info)
info->fix.type = FB_TYPE_PLANES;
info->fix.type_aux = 0;
}
- info->fix.line_length = div8(upx(16<<maxfmode, par->vxres));
+ info->fix.line_length = div8(upx(16 << maxfmode, par->vxres));
if (par->vmode & FB_VMODE_YWRAP) {
info->fix.ywrapstep = 1;
info->fix.xpanstep = 0;
info->fix.ypanstep = 0;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YWRAP |
- FBINFO_READS_FAST; /* override SCROLL_REDRAW */
+ FBINFO_READS_FAST; /* override SCROLL_REDRAW */
} else {
info->fix.ywrapstep = 0;
if (par->vmode & FB_VMODE_SMOOTH_XPAN)
info->fix.xpanstep = 1;
else
- info->fix.xpanstep = 16<<maxfmode;
+ info->fix.xpanstep = 16 << maxfmode;
info->fix.ypanstep = 1;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
}
@@ -1317,6 +2444,95 @@ static int amifb_set_par(struct fb_info *info)
/*
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+ * entries in the var structure). Return != 0 for invalid regno.
+ */
+
+static int amifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
+{
+ const struct amifb_par *par = info->par;
+
+ if (IS_AGA) {
+ if (regno > 255)
+ return 1;
+ } else if (par->bplcon0 & BPC0_SHRES) {
+ if (regno > 3)
+ return 1;
+ } else {
+ if (regno > 31)
+ return 1;
+ }
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ if (!regno) {
+ red0 = red;
+ green0 = green;
+ blue0 = blue;
+ }
+
+ /*
+ * Update the corresponding Hardware Color Register, unless it's Color
+ * Register 0 and the screen is blanked.
+ *
+ * VBlank is switched off to protect bplcon3 or ecs_palette[] from
+ * being changed by ami_do_blank() during the VBlank.
+ */
+
+ if (regno || !is_blanked) {
+#if defined(CONFIG_FB_AMIGA_AGA)
+ if (IS_AGA) {
+ u_short bplcon3 = par->bplcon3;
+ VBlankOff();
+ custom.bplcon3 = bplcon3 | (regno << 8 & 0xe000);
+ custom.color[regno & 31] = rgb2hw8_high(red, green,
+ blue);
+ custom.bplcon3 = bplcon3 | (regno << 8 & 0xe000) |
+ BPC3_LOCT;
+ custom.color[regno & 31] = rgb2hw8_low(red, green,
+ blue);
+ custom.bplcon3 = bplcon3;
+ VBlankOn();
+ } else
+#endif
+#if defined(CONFIG_FB_AMIGA_ECS)
+ if (par->bplcon0 & BPC0_SHRES) {
+ u_short color, mask;
+ int i;
+
+ mask = 0x3333;
+ color = rgb2hw2(red, green, blue);
+ VBlankOff();
+ for (i = regno + 12; i >= (int)regno; i -= 4)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ mask <<= 2; color >>= 2;
+ regno = down16(regno) + mul4(mod4(regno));
+ for (i = regno + 3; i >= (int)regno; i--)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ VBlankOn();
+ } else
+#endif
+ custom.color[regno] = rgb2hw4(red, green, blue);
+ }
+ return 0;
+}
+
+
+ /*
+ * Blank the display.
+ */
+
+static int amifb_blank(int blank, struct fb_info *info)
+{
+ do_blank = blank ? blank : -1;
+
+ return 0;
+}
+
+
+ /*
* Pan or Wrap the Display
*
* This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
@@ -1327,18 +2543,19 @@ static int amifb_pan_display(struct fb_var_screeninfo *var,
{
if (var->vmode & FB_VMODE_YWRAP) {
if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual || var->xoffset)
- return -EINVAL;
+ var->yoffset >= info->var.yres_virtual || var->xoffset)
+ return -EINVAL;
} else {
/*
* TODO: There will be problems when xpan!=1, so some columns
* on the right side will never be seen
*/
- if (var->xoffset+info->var.xres > upx(16<<maxfmode, info->var.xres_virtual) ||
- var->yoffset+info->var.yres > info->var.yres_virtual)
+ if (var->xoffset + info->var.xres >
+ upx(16 << maxfmode, info->var.xres_virtual) ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
return -EINVAL;
}
- ami_pan_var(var);
+ ami_pan_var(var, info);
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
if (var->vmode & FB_VMODE_YWRAP)
@@ -1360,10 +2577,10 @@ static int amifb_pan_display(struct fb_var_screeninfo *var,
#endif
- /*
- * Compose two values, using a bitmask as decision value
- * This is equivalent to (a & mask) | (b & ~mask)
- */
+ /*
+ * Compose two values, using a bitmask as decision value
+ * This is equivalent to (a & mask) | (b & ~mask)
+ */
static inline unsigned long comp(unsigned long a, unsigned long b,
unsigned long mask)
@@ -1379,29 +2596,29 @@ static inline unsigned long xor(unsigned long a, unsigned long b,
}
- /*
- * Unaligned forward bit copy using 32-bit or 64-bit memory accesses
- */
+ /*
+ * Unaligned forward bit copy using 32-bit or 64-bit memory accesses
+ */
static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
int src_idx, u32 n)
{
unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
+ int shift = dst_idx - src_idx, left, right;
unsigned long d0, d1;
int m;
if (!n)
return;
- shift = dst_idx-src_idx;
+ shift = dst_idx - src_idx;
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
if (!shift) {
// Same alignment for source and dest
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1413,7 +2630,7 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
*dst = comp(*src, *dst, first);
dst++;
src++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1439,17 +2656,17 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
} else {
// Different alignment for source and dest
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single destination word
if (last)
first &= last;
if (shift > 0) {
// Single source word
*dst = comp(*src >> right, *dst, first);
- } else if (src_idx+n <= BITS_PER_LONG) {
+ } else if (src_idx + n <= BITS_PER_LONG) {
// Single source word
*dst = comp(*src << left, *dst, first);
} else {
@@ -1467,7 +2684,7 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
// Single source word
*dst = comp(d0 >> right, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
} else {
// 2 source words
d1 = *src++;
@@ -1475,7 +2692,7 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
first);
d0 = d1;
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1519,40 +2736,40 @@ static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
}
- /*
- * Unaligned reverse bit copy using 32-bit or 64-bit memory accesses
- */
+ /*
+ * Unaligned reverse bit copy using 32-bit or 64-bit memory accesses
+ */
static void bitcpy_rev(unsigned long *dst, int dst_idx,
const unsigned long *src, int src_idx, u32 n)
{
unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
+ int shift = dst_idx - src_idx, left, right;
unsigned long d0, d1;
int m;
if (!n)
return;
- dst += (n-1)/BITS_PER_LONG;
- src += (n-1)/BITS_PER_LONG;
- if ((n-1) % BITS_PER_LONG) {
- dst_idx += (n-1) % BITS_PER_LONG;
+ dst += (n - 1) / BITS_PER_LONG;
+ src += (n - 1) / BITS_PER_LONG;
+ if ((n - 1) % BITS_PER_LONG) {
+ dst_idx += (n - 1) % BITS_PER_LONG;
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= BITS_PER_LONG-1;
- src_idx += (n-1) % BITS_PER_LONG;
+ dst_idx &= BITS_PER_LONG - 1;
+ src_idx += (n - 1) % BITS_PER_LONG;
src += src_idx >> SHIFT_PER_LONG;
- src_idx &= BITS_PER_LONG-1;
+ src_idx &= BITS_PER_LONG - 1;
}
- shift = dst_idx-src_idx;
- first = ~0UL << (BITS_PER_LONG-1-dst_idx);
- last = ~(~0UL << (BITS_PER_LONG-1-((dst_idx-n) % BITS_PER_LONG)));
+ shift = dst_idx - src_idx;
+ first = ~0UL << (BITS_PER_LONG - 1 - dst_idx);
+ last = ~(~0UL << (BITS_PER_LONG - 1 - ((dst_idx - n) % BITS_PER_LONG)));
if (!shift) {
// Same alignment for source and dest
- if ((unsigned long)dst_idx+1 >= n) {
+ if ((unsigned long)dst_idx + 1 >= n) {
// Single word
if (last)
first &= last;
@@ -1564,7 +2781,7 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
*dst = comp(*src, *dst, first);
dst--;
src--;
- n -= dst_idx+1;
+ n -= dst_idx + 1;
}
// Main chunk
@@ -1590,17 +2807,17 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
} else {
// Different alignment for source and dest
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if ((unsigned long)dst_idx+1 >= n) {
+ if ((unsigned long)dst_idx + 1 >= n) {
// Single destination word
if (last)
first &= last;
if (shift < 0) {
// Single source word
*dst = comp(*src << left, *dst, first);
- } else if (1+(unsigned long)src_idx >= n) {
+ } else if (1 + (unsigned long)src_idx >= n) {
// Single source word
*dst = comp(*src >> right, *dst, first);
} else {
@@ -1618,7 +2835,7 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
// Single source word
*dst = comp(d0 << left, *dst, first);
dst--;
- n -= dst_idx+1;
+ n -= dst_idx + 1;
} else {
// 2 source words
d1 = *src--;
@@ -1626,7 +2843,7 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
first);
d0 = d1;
dst--;
- n -= dst_idx+1;
+ n -= dst_idx + 1;
}
// Main chunk
@@ -1670,30 +2887,30 @@ static void bitcpy_rev(unsigned long *dst, int dst_idx,
}
- /*
- * Unaligned forward inverting bit copy using 32-bit or 64-bit memory
- * accesses
- */
+ /*
+ * Unaligned forward inverting bit copy using 32-bit or 64-bit memory
+ * accesses
+ */
static void bitcpy_not(unsigned long *dst, int dst_idx,
const unsigned long *src, int src_idx, u32 n)
{
unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
+ int shift = dst_idx - src_idx, left, right;
unsigned long d0, d1;
int m;
if (!n)
return;
- shift = dst_idx-src_idx;
+ shift = dst_idx - src_idx;
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
if (!shift) {
// Same alignment for source and dest
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1705,7 +2922,7 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
*dst = comp(~*src, *dst, first);
dst++;
src++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1731,17 +2948,17 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
} else {
// Different alignment for source and dest
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single destination word
if (last)
first &= last;
if (shift > 0) {
// Single source word
*dst = comp(~*src >> right, *dst, first);
- } else if (src_idx+n <= BITS_PER_LONG) {
+ } else if (src_idx + n <= BITS_PER_LONG) {
// Single source word
*dst = comp(~*src << left, *dst, first);
} else {
@@ -1759,7 +2976,7 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
// Single source word
*dst = comp(d0 >> right, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
} else {
// 2 source words
d1 = ~*src++;
@@ -1767,7 +2984,7 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
first);
d0 = d1;
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1811,9 +3028,9 @@ static void bitcpy_not(unsigned long *dst, int dst_idx,
}
- /*
- * Unaligned 32-bit pattern fill using 32/64-bit memory accesses
- */
+ /*
+ * Unaligned 32-bit pattern fill using 32/64-bit memory accesses
+ */
static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
{
@@ -1828,9 +3045,9 @@ static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
#endif
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1841,7 +3058,7 @@ static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
if (first) {
*dst = comp(val, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1867,9 +3084,9 @@ static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
}
- /*
- * Unaligned 32-bit pattern xor using 32/64-bit memory accesses
- */
+ /*
+ * Unaligned 32-bit pattern xor using 32/64-bit memory accesses
+ */
static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
{
@@ -1884,9 +3101,9 @@ static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
#endif
first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
- if (dst_idx+n <= BITS_PER_LONG) {
+ if (dst_idx + n <= BITS_PER_LONG) {
// Single word
if (last)
first &= last;
@@ -1897,7 +3114,7 @@ static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
if (first) {
*dst = xor(val, *dst, first);
dst++;
- n -= BITS_PER_LONG-dst_idx;
+ n -= BITS_PER_LONG - dst_idx;
}
// Main chunk
@@ -1924,12 +3141,12 @@ static inline void fill_one_line(int bpp, unsigned long next_plane,
{
while (1) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
bitfill32(dst, dst_idx, color & 1 ? ~0 : 0, n);
if (!--bpp)
break;
color >>= 1;
- dst_idx += next_plane*8;
+ dst_idx += next_plane * 8;
}
}
@@ -1939,12 +3156,12 @@ static inline void xor_one_line(int bpp, unsigned long next_plane,
{
while (color) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
bitxor32(dst, dst_idx, color & 1 ? ~0 : 0, n);
if (!--bpp)
break;
color >>= 1;
- dst_idx += next_plane*8;
+ dst_idx += next_plane * 8;
}
}
@@ -1952,7 +3169,7 @@ static inline void xor_one_line(int bpp, unsigned long next_plane,
static void amifb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
int dst_idx, x2, y2;
unsigned long *dst;
u32 width, height;
@@ -1972,23 +3189,23 @@ static void amifb_fillrect(struct fb_info *info,
height = y2 - rect->dy;
dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
- dst_idx += rect->dy*par->next_line*8+rect->dx;
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
+ dst_idx += rect->dy * par->next_line * 8 + rect->dx;
while (height--) {
switch (rect->rop) {
- case ROP_COPY:
+ case ROP_COPY:
fill_one_line(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, width,
rect->color);
break;
- case ROP_XOR:
+ case ROP_XOR:
xor_one_line(info->var.bits_per_pixel, par->next_plane,
dst, dst_idx, width, rect->color);
break;
}
- dst_idx += par->next_line*8;
+ dst_idx += par->next_line * 8;
}
}
@@ -1998,14 +3215,14 @@ static inline void copy_one_line(int bpp, unsigned long next_plane,
{
while (1) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
src += src_idx >> SHIFT_PER_LONG;
- src_idx &= (BITS_PER_LONG-1);
+ src_idx &= (BITS_PER_LONG - 1);
bitcpy(dst, dst_idx, src, src_idx, n);
if (!--bpp)
break;
- dst_idx += next_plane*8;
- src_idx += next_plane*8;
+ dst_idx += next_plane * 8;
+ src_idx += next_plane * 8;
}
}
@@ -2015,14 +3232,14 @@ static inline void copy_one_line_rev(int bpp, unsigned long next_plane,
{
while (1) {
dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
+ dst_idx &= (BITS_PER_LONG - 1);
src += src_idx >> SHIFT_PER_LONG;
- src_idx &= (BITS_PER_LONG-1);
+ src_idx &= (BITS_PER_LONG - 1);
bitcpy_rev(dst, dst_idx, src, src_idx, n);
if (!--bpp)
break;
- dst_idx += next_plane*8;
- src_idx += next_plane*8;
+ dst_idx += next_plane * 8;
+ src_idx += next_plane * 8;
}
}
@@ -2030,7 +3247,7 @@ static inline void copy_one_line_rev(int bpp, unsigned long next_plane,
static void amifb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
int x2, y2;
u32 dx, dy, sx, sy, width, height;
unsigned long *dst, *src;
@@ -2065,16 +3282,16 @@ static void amifb_copyarea(struct fb_info *info,
rev_copy = 1;
}
dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
src = dst;
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
src_idx = dst_idx;
- dst_idx += dy*par->next_line*8+dx;
- src_idx += sy*par->next_line*8+sx;
+ dst_idx += dy * par->next_line * 8 + dx;
+ src_idx += sy * par->next_line * 8 + sx;
if (rev_copy) {
while (height--) {
- dst_idx -= par->next_line*8;
- src_idx -= par->next_line*8;
+ dst_idx -= par->next_line * 8;
+ src_idx -= par->next_line * 8;
copy_one_line_rev(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, src,
src_idx, width);
@@ -2084,8 +3301,8 @@ static void amifb_copyarea(struct fb_info *info,
copy_one_line(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, src,
src_idx, width);
- dst_idx += par->next_line*8;
- src_idx += par->next_line*8;
+ dst_idx += par->next_line * 8;
+ src_idx += par->next_line * 8;
}
}
}
@@ -2095,34 +3312,35 @@ static inline void expand_one_line(int bpp, unsigned long next_plane,
unsigned long *dst, int dst_idx, u32 n,
const u8 *data, u32 bgcolor, u32 fgcolor)
{
- const unsigned long *src;
- int src_idx;
-
- while (1) {
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
- if ((bgcolor ^ fgcolor) & 1) {
- src = (unsigned long *)((unsigned long)data & ~(BYTES_PER_LONG-1));
- src_idx = ((unsigned long)data & (BYTES_PER_LONG-1))*8;
- if (fgcolor & 1)
- bitcpy(dst, dst_idx, src, src_idx, n);
- else
- bitcpy_not(dst, dst_idx, src, src_idx, n);
- /* set or clear */
- } else
- bitfill32(dst, dst_idx, fgcolor & 1 ? ~0 : 0, n);
- if (!--bpp)
- break;
- bgcolor >>= 1;
- fgcolor >>= 1;
- dst_idx += next_plane*8;
- }
+ const unsigned long *src;
+ int src_idx;
+
+ while (1) {
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= (BITS_PER_LONG - 1);
+ if ((bgcolor ^ fgcolor) & 1) {
+ src = (unsigned long *)
+ ((unsigned long)data & ~(BYTES_PER_LONG - 1));
+ src_idx = ((unsigned long)data & (BYTES_PER_LONG - 1)) * 8;
+ if (fgcolor & 1)
+ bitcpy(dst, dst_idx, src, src_idx, n);
+ else
+ bitcpy_not(dst, dst_idx, src, src_idx, n);
+ /* set or clear */
+ } else
+ bitfill32(dst, dst_idx, fgcolor & 1 ? ~0 : 0, n);
+ if (!--bpp)
+ break;
+ bgcolor >>= 1;
+ fgcolor >>= 1;
+ dst_idx += next_plane * 8;
+ }
}
static void amifb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
+ struct amifb_par *par = info->par;
int x2, y2;
unsigned long *dst;
int dst_idx;
@@ -2145,17 +3363,17 @@ static void amifb_imageblit(struct fb_info *info, const struct fb_image *image)
if (image->depth == 1) {
dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
- dst_idx += dy*par->next_line*8+dx;
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
+ dst_idx += dy * par->next_line * 8 + dx;
src = image->data;
- pitch = (image->width+7)/8;
+ pitch = (image->width + 7) / 8;
while (height--) {
expand_one_line(info->var.bits_per_pixel,
par->next_plane, dst, dst_idx, width,
src, image->bg_color,
image->fg_color);
- dst_idx += par->next_line*8;
+ dst_idx += par->next_line * 8;
src += pitch;
}
} else {
@@ -2182,45 +3400,119 @@ static int amifb_ioctl(struct fb_info *info,
int i;
switch (cmd) {
- case FBIOGET_FCURSORINFO:
- i = ami_get_fix_cursorinfo(&crsr.fix);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.fix,
- sizeof(crsr.fix)) ? -EFAULT : 0;
-
- case FBIOGET_VCURSORINFO:
- i = ami_get_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo __user *)arg)->data);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.var,
- sizeof(crsr.var)) ? -EFAULT : 0;
-
- case FBIOPUT_VCURSORINFO:
- if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
- return -EFAULT;
- return ami_set_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo __user *)arg)->data);
-
- case FBIOGET_CURSORSTATE:
- i = ami_get_cursorstate(&crsr.state);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.state,
- sizeof(crsr.state)) ? -EFAULT : 0;
-
- case FBIOPUT_CURSORSTATE:
- if (copy_from_user(&crsr.state, argp,
- sizeof(crsr.state)))
- return -EFAULT;
- return ami_set_cursorstate(&crsr.state);
+ case FBIOGET_FCURSORINFO:
+ i = ami_get_fix_cursorinfo(&crsr.fix, info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.fix,
+ sizeof(crsr.fix)) ? -EFAULT : 0;
+
+ case FBIOGET_VCURSORINFO:
+ i = ami_get_var_cursorinfo(&crsr.var,
+ ((struct fb_var_cursorinfo __user *)arg)->data,
+ info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.var,
+ sizeof(crsr.var)) ? -EFAULT : 0;
+
+ case FBIOPUT_VCURSORINFO:
+ if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
+ return -EFAULT;
+ return ami_set_var_cursorinfo(&crsr.var,
+ ((struct fb_var_cursorinfo __user *)arg)->data,
+ info->par);
+
+ case FBIOGET_CURSORSTATE:
+ i = ami_get_cursorstate(&crsr.state, info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.state,
+ sizeof(crsr.state)) ? -EFAULT : 0;
+
+ case FBIOPUT_CURSORSTATE:
+ if (copy_from_user(&crsr.state, argp, sizeof(crsr.state)))
+ return -EFAULT;
+ return ami_set_cursorstate(&crsr.state, info->par);
}
return -EINVAL;
}
/*
+ * Flash the cursor (called by VBlank interrupt)
+ */
+
+static int flash_cursor(void)
+{
+ static int cursorcount = 1;
+
+ if (cursormode == FB_CURSOR_FLASH) {
+ if (!--cursorcount) {
+ cursorstate = -cursorstate;
+ cursorcount = cursorrate;
+ if (!is_blanked)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+ /*
+ * VBlank Display Interrupt
+ */
+
+static irqreturn_t amifb_interrupt(int irq, void *dev_id)
+{
+ struct amifb_par *par = dev_id;
+
+ if (do_vmode_pan || do_vmode_full)
+ ami_update_display(par);
+
+ if (do_vmode_full)
+ ami_init_display(par);
+
+ if (do_vmode_pan) {
+ flash_cursor();
+ ami_rebuild_copper(par);
+ do_cursor = do_vmode_pan = 0;
+ } else if (do_cursor) {
+ flash_cursor();
+ ami_set_sprite(par);
+ do_cursor = 0;
+ } else {
+ if (flash_cursor())
+ ami_set_sprite(par);
+ }
+
+ if (do_blank) {
+ ami_do_blank(par);
+ do_blank = 0;
+ }
+
+ if (do_vmode_full) {
+ ami_reinit_copper(par);
+ do_vmode_full = 0;
+ }
+ return IRQ_HANDLED;
+}
+
+
+static struct fb_ops amifb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = amifb_check_var,
+ .fb_set_par = amifb_set_par,
+ .fb_setcolreg = amifb_setcolreg,
+ .fb_blank = amifb_blank,
+ .fb_pan_display = amifb_pan_display,
+ .fb_fillrect = amifb_fillrect,
+ .fb_copyarea = amifb_copyarea,
+ .fb_imageblit = amifb_imageblit,
+ .fb_ioctl = amifb_ioctl,
+};
+
+
+ /*
* Allocate, Clear and Align a Block of Chip Memory
*/
@@ -2250,6 +3542,7 @@ static inline void chipfree(void)
static int __init amifb_probe(struct platform_device *pdev)
{
+ struct fb_info *info;
int tag, i, err = 0;
u_long chipptr;
u_int defmode;
@@ -2265,71 +3558,80 @@ static int __init amifb_probe(struct platform_device *pdev)
#endif
custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ info = framebuffer_alloc(sizeof(struct amifb_par), &pdev->dev);
+ if (!info) {
+ dev_err(&pdev->dev, "framebuffer_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ strcpy(info->fix.id, "Amiga ");
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ info->fix.accel = FB_ACCEL_AMIGABLITT;
+
switch (amiga_chipset) {
#ifdef CONFIG_FB_AMIGA_OCS
- case CS_OCS:
- strcat(fb_info.fix.id, "OCS");
+ case CS_OCS:
+ strcat(info->fix.id, "OCS");
default_chipset:
- chipset = TAG_OCS;
- maxdepth[TAG_SHRES] = 0; /* OCS means no SHRES */
- maxdepth[TAG_HIRES] = 4;
- maxdepth[TAG_LORES] = 6;
- maxfmode = TAG_FMODE_1;
- defmode = amiga_vblank == 50 ? DEFMODE_PAL
- : DEFMODE_NTSC;
- fb_info.fix.smem_len = VIDEOMEMSIZE_OCS;
- break;
+ chipset = TAG_OCS;
+ maxdepth[TAG_SHRES] = 0; /* OCS means no SHRES */
+ maxdepth[TAG_HIRES] = 4;
+ maxdepth[TAG_LORES] = 6;
+ maxfmode = TAG_FMODE_1;
+ defmode = amiga_vblank == 50 ? DEFMODE_PAL : DEFMODE_NTSC;
+ info->fix.smem_len = VIDEOMEMSIZE_OCS;
+ break;
#endif /* CONFIG_FB_AMIGA_OCS */
#ifdef CONFIG_FB_AMIGA_ECS
- case CS_ECS:
- strcat(fb_info.fix.id, "ECS");
- chipset = TAG_ECS;
- maxdepth[TAG_SHRES] = 2;
- maxdepth[TAG_HIRES] = 4;
- maxdepth[TAG_LORES] = 6;
- maxfmode = TAG_FMODE_1;
- if (AMIGAHW_PRESENT(AMBER_FF))
- defmode = amiga_vblank == 50 ? DEFMODE_AMBER_PAL
- : DEFMODE_AMBER_NTSC;
- else
- defmode = amiga_vblank == 50 ? DEFMODE_PAL
- : DEFMODE_NTSC;
- if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
- VIDEOMEMSIZE_ECS_2M)
- fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M;
- else
- fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M;
- break;
+ case CS_ECS:
+ strcat(info->fix.id, "ECS");
+ chipset = TAG_ECS;
+ maxdepth[TAG_SHRES] = 2;
+ maxdepth[TAG_HIRES] = 4;
+ maxdepth[TAG_LORES] = 6;
+ maxfmode = TAG_FMODE_1;
+ if (AMIGAHW_PRESENT(AMBER_FF))
+ defmode = amiga_vblank == 50 ? DEFMODE_AMBER_PAL
+ : DEFMODE_AMBER_NTSC;
+ else
+ defmode = amiga_vblank == 50 ? DEFMODE_PAL
+ : DEFMODE_NTSC;
+ if (amiga_chip_avail() - CHIPRAM_SAFETY_LIMIT >
+ VIDEOMEMSIZE_ECS_2M)
+ info->fix.smem_len = VIDEOMEMSIZE_ECS_2M;
+ else
+ info->fix.smem_len = VIDEOMEMSIZE_ECS_1M;
+ break;
#endif /* CONFIG_FB_AMIGA_ECS */
#ifdef CONFIG_FB_AMIGA_AGA
- case CS_AGA:
- strcat(fb_info.fix.id, "AGA");
- chipset = TAG_AGA;
- maxdepth[TAG_SHRES] = 8;
- maxdepth[TAG_HIRES] = 8;
- maxdepth[TAG_LORES] = 8;
- maxfmode = TAG_FMODE_4;
- defmode = DEFMODE_AGA;
- if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
- VIDEOMEMSIZE_AGA_2M)
- fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M;
- else
- fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M;
- break;
+ case CS_AGA:
+ strcat(info->fix.id, "AGA");
+ chipset = TAG_AGA;
+ maxdepth[TAG_SHRES] = 8;
+ maxdepth[TAG_HIRES] = 8;
+ maxdepth[TAG_LORES] = 8;
+ maxfmode = TAG_FMODE_4;
+ defmode = DEFMODE_AGA;
+ if (amiga_chip_avail() - CHIPRAM_SAFETY_LIMIT >
+ VIDEOMEMSIZE_AGA_2M)
+ info->fix.smem_len = VIDEOMEMSIZE_AGA_2M;
+ else
+ info->fix.smem_len = VIDEOMEMSIZE_AGA_1M;
+ break;
#endif /* CONFIG_FB_AMIGA_AGA */
- default:
+ default:
#ifdef CONFIG_FB_AMIGA_OCS
- printk("Unknown graphics chipset, defaulting to OCS\n");
- strcat(fb_info.fix.id, "Unknown");
- goto default_chipset;
+ printk("Unknown graphics chipset, defaulting to OCS\n");
+ strcat(info->fix.id, "Unknown");
+ goto default_chipset;
#else /* CONFIG_FB_AMIGA_OCS */
- err = -ENODEV;
- goto amifb_error;
+ err = -ENODEV;
+ goto release;
#endif /* CONFIG_FB_AMIGA_OCS */
- break;
+ break;
}
/*
@@ -2356,42 +3658,44 @@ default_chipset:
}
}
- /*
- * These monitor specs are for a typical Amiga monitor (e.g. A1960)
- */
- if (fb_info.monspecs.hfmin == 0) {
- fb_info.monspecs.hfmin = 15000;
- fb_info.monspecs.hfmax = 38000;
- fb_info.monspecs.vfmin = 49;
- fb_info.monspecs.vfmax = 90;
+ if (amifb_hfmin) {
+ info->monspecs.hfmin = amifb_hfmin;
+ info->monspecs.hfmax = amifb_hfmax;
+ info->monspecs.vfmin = amifb_vfmin;
+ info->monspecs.vfmax = amifb_vfmax;
+ } else {
+ /*
+ * These are for a typical Amiga monitor (e.g. A1960)
+ */
+ info->monspecs.hfmin = 15000;
+ info->monspecs.hfmax = 38000;
+ info->monspecs.vfmin = 49;
+ info->monspecs.vfmax = 90;
}
- fb_info.fbops = &amifb_ops;
- fb_info.par = &currentpar;
- fb_info.flags = FBINFO_DEFAULT;
- fb_info.device = &pdev->dev;
+ info->fbops = &amifb_ops;
+ info->flags = FBINFO_DEFAULT;
+ info->device = &pdev->dev;
- if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb,
+ if (!fb_find_mode(&info->var, info, mode_option, ami_modedb,
NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
err = -EINVAL;
- goto amifb_error;
+ goto release;
}
fb_videomode_to_modelist(ami_modedb, NUM_TOTAL_MODES,
- &fb_info.modelist);
+ &info->modelist);
round_down_bpp = 0;
- chipptr = chipalloc(fb_info.fix.smem_len+
- SPRITEMEMSIZE+
- DUMMYSPRITEMEMSIZE+
- COPINITSIZE+
- 4*COPLISTSIZE);
+ chipptr = chipalloc(info->fix.smem_len + SPRITEMEMSIZE +
+ DUMMYSPRITEMEMSIZE + COPINITSIZE +
+ 4 * COPLISTSIZE);
if (!chipptr) {
err = -ENOMEM;
- goto amifb_error;
+ goto release;
}
- assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len);
+ assignchunk(videomemory, u_long, chipptr, info->fix.smem_len);
assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE);
assignchunk(dummysprite, u_short *, chipptr, DUMMYSPRITEMEMSIZE);
assignchunk(copdisplay.init, copins *, chipptr, COPINITSIZE);
@@ -2403,1398 +3707,78 @@ default_chipset:
/*
* access the videomem with writethrough cache
*/
- fb_info.fix.smem_start = (u_long)ZTWO_PADDR(videomemory);
- videomemory = (u_long)ioremap_writethrough(fb_info.fix.smem_start,
- fb_info.fix.smem_len);
+ info->fix.smem_start = (u_long)ZTWO_PADDR(videomemory);
+ videomemory = (u_long)ioremap_writethrough(info->fix.smem_start,
+ info->fix.smem_len);
if (!videomemory) {
- printk("amifb: WARNING! unable to map videomem cached writethrough\n");
- fb_info.screen_base = (char *)ZTWO_VADDR(fb_info.fix.smem_start);
+ dev_warn(&pdev->dev,
+ "Unable to map videomem cached writethrough\n");
+ info->screen_base = (char *)ZTWO_VADDR(info->fix.smem_start);
} else
- fb_info.screen_base = (char *)videomemory;
+ info->screen_base = (char *)videomemory;
memset(dummysprite, 0, DUMMYSPRITEMEMSIZE);
/*
- * Enable Display DMA
- */
-
- custom.dmacon = DMAF_SETCLR | DMAF_MASTER | DMAF_RASTER | DMAF_COPPER |
- DMAF_BLITTER | DMAF_SPRITE;
-
- /*
* Make sure the Copper has something to do
*/
-
ami_init_copper();
- if (request_irq(IRQ_AMIGA_COPPER, amifb_interrupt, 0,
- "fb vertb handler", &currentpar)) {
- err = -EBUSY;
- goto amifb_error;
- }
-
- err = fb_alloc_cmap(&fb_info.cmap, 1<<fb_info.var.bits_per_pixel, 0);
- if (err)
- goto amifb_error;
-
- if (register_framebuffer(&fb_info) < 0) {
- err = -EINVAL;
- goto amifb_error;
- }
-
- printk("fb%d: %s frame buffer device, using %dK of video memory\n",
- fb_info.node, fb_info.fix.id, fb_info.fix.smem_len>>10);
-
- return 0;
-
-amifb_error:
- amifb_deinit(pdev);
- return err;
-}
-
-static void amifb_deinit(struct platform_device *pdev)
-{
- if (fb_info.cmap.len)
- fb_dealloc_cmap(&fb_info.cmap);
- fb_dealloc_cmap(&fb_info.cmap);
- chipfree();
- if (videomemory)
- iounmap((void*)videomemory);
- custom.dmacon = DMAF_ALL | DMAF_MASTER;
-}
-
-
- /*
- * Blank the display.
- */
-
-static int amifb_blank(int blank, struct fb_info *info)
-{
- do_blank = blank ? blank : -1;
-
- return 0;
-}
-
- /*
- * Flash the cursor (called by VBlank interrupt)
- */
-
-static int flash_cursor(void)
-{
- static int cursorcount = 1;
-
- if (cursormode == FB_CURSOR_FLASH) {
- if (!--cursorcount) {
- cursorstate = -cursorstate;
- cursorcount = cursorrate;
- if (!is_blanked)
- return 1;
- }
- }
- return 0;
-}
-
- /*
- * VBlank Display Interrupt
- */
-
-static irqreturn_t amifb_interrupt(int irq, void *dev_id)
-{
- if (do_vmode_pan || do_vmode_full)
- ami_update_display();
-
- if (do_vmode_full)
- ami_init_display();
-
- if (do_vmode_pan) {
- flash_cursor();
- ami_rebuild_copper();
- do_cursor = do_vmode_pan = 0;
- } else if (do_cursor) {
- flash_cursor();
- ami_set_sprite();
- do_cursor = 0;
- } else {
- if (flash_cursor())
- ami_set_sprite();
- }
-
- if (do_blank) {
- ami_do_blank();
- do_blank = 0;
- }
-
- if (do_vmode_full) {
- ami_reinit_copper();
- do_vmode_full = 0;
- }
- return IRQ_HANDLED;
-}
-
-/* --------------------------- Hardware routines --------------------------- */
-
- /*
- * Get the video params out of `var'. If a value doesn't fit, round
- * it up, if it's too big, return -EINVAL.
- */
-
-static int ami_decode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par)
-{
- u_short clk_shift, line_shift;
- u_long maxfetchstop, fstrt, fsize, fconst, xres_n, yres_n;
- u_int htotal, vtotal;
-
- /*
- * Find a matching Pixel Clock
- */
-
- for (clk_shift = TAG_SHRES; clk_shift <= TAG_LORES; clk_shift++)
- if (var->pixclock <= pixclock[clk_shift])
- break;
- if (clk_shift > TAG_LORES) {
- DPRINTK("pixclock too high\n");
- return -EINVAL;
- }
- par->clk_shift = clk_shift;
-
- /*
- * Check the Geometry Values
- */
-
- if ((par->xres = var->xres) < 64)
- par->xres = 64;
- if ((par->yres = var->yres) < 64)
- par->yres = 64;
- if ((par->vxres = var->xres_virtual) < par->xres)
- par->vxres = par->xres;
- if ((par->vyres = var->yres_virtual) < par->yres)
- par->vyres = par->yres;
-
- par->bpp = var->bits_per_pixel;
- if (!var->nonstd) {
- if (par->bpp < 1)
- par->bpp = 1;
- if (par->bpp > maxdepth[clk_shift]) {
- if (round_down_bpp && maxdepth[clk_shift])
- par->bpp = maxdepth[clk_shift];
- else {
- DPRINTK("invalid bpp\n");
- return -EINVAL;
- }
- }
- } else if (var->nonstd == FB_NONSTD_HAM) {
- if (par->bpp < 6)
- par->bpp = 6;
- if (par->bpp != 6) {
- if (par->bpp < 8)
- par->bpp = 8;
- if (par->bpp != 8 || !IS_AGA) {
- DPRINTK("invalid bpp for ham mode\n");
- return -EINVAL;
- }
- }
- } else {
- DPRINTK("unknown nonstd mode\n");
- return -EINVAL;
- }
-
- /*
- * FB_VMODE_SMOOTH_XPAN will be cleared, if one of the folloing
- * checks failed and smooth scrolling is not possible
- */
-
- par->vmode = var->vmode | FB_VMODE_SMOOTH_XPAN;
- switch (par->vmode & FB_VMODE_MASK) {
- case FB_VMODE_INTERLACED:
- line_shift = 0;
- break;
- case FB_VMODE_NONINTERLACED:
- line_shift = 1;
- break;
- case FB_VMODE_DOUBLE:
- if (!IS_AGA) {
- DPRINTK("double mode only possible with aga\n");
- return -EINVAL;
- }
- line_shift = 2;
- break;
- default:
- DPRINTK("unknown video mode\n");
- return -EINVAL;
- break;
- }
- par->line_shift = line_shift;
-
- /*
- * Vertical and Horizontal Timings
- */
-
- xres_n = par->xres<<clk_shift;
- yres_n = par->yres<<line_shift;
- par->htotal = down8((var->left_margin+par->xres+var->right_margin+var->hsync_len)<<clk_shift);
- par->vtotal = down2(((var->upper_margin+par->yres+var->lower_margin+var->vsync_len)<<line_shift)+1);
-
- if (IS_AGA)
- par->bplcon3 = sprpixmode[clk_shift];
- else
- par->bplcon3 = 0;
- if (var->sync & FB_SYNC_BROADCAST) {
- par->diwstop_h = par->htotal-((var->right_margin-var->hsync_len)<<clk_shift);
- if (IS_AGA)
- par->diwstop_h += mod4(var->hsync_len);
- else
- par->diwstop_h = down4(par->diwstop_h);
-
- par->diwstrt_h = par->diwstop_h - xres_n;
- par->diwstop_v = par->vtotal-((var->lower_margin-var->vsync_len)<<line_shift);
- par->diwstrt_v = par->diwstop_v - yres_n;
- if (par->diwstop_h >= par->htotal+8) {
- DPRINTK("invalid diwstop_h\n");
- return -EINVAL;
- }
- if (par->diwstop_v > par->vtotal) {
- DPRINTK("invalid diwstop_v\n");
- return -EINVAL;
- }
-
- if (!IS_OCS) {
- /* Initialize sync with some reasonable values for pwrsave */
- par->hsstrt = 160;
- par->hsstop = 320;
- par->vsstrt = 30;
- par->vsstop = 34;
- } else {
- par->hsstrt = 0;
- par->hsstop = 0;
- par->vsstrt = 0;
- par->vsstop = 0;
- }
- if (par->vtotal > (PAL_VTOTAL+NTSC_VTOTAL)/2) {
- /* PAL video mode */
- if (par->htotal != PAL_HTOTAL) {
- DPRINTK("htotal invalid for pal\n");
- return -EINVAL;
- }
- if (par->diwstrt_h < PAL_DIWSTRT_H) {
- DPRINTK("diwstrt_h too low for pal\n");
- return -EINVAL;
- }
- if (par->diwstrt_v < PAL_DIWSTRT_V) {
- DPRINTK("diwstrt_v too low for pal\n");
- return -EINVAL;
- }
- htotal = PAL_HTOTAL>>clk_shift;
- vtotal = PAL_VTOTAL>>1;
- if (!IS_OCS) {
- par->beamcon0 = BMC0_PAL;
- par->bplcon3 |= BPC3_BRDRBLNK;
- } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
- AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
- par->beamcon0 = BMC0_PAL;
- par->hsstop = 1;
- } else if (amiga_vblank != 50) {
- DPRINTK("pal not supported by this chipset\n");
- return -EINVAL;
- }
- } else {
- /* NTSC video mode
- * In the AGA chipset seems to be hardware bug with BPC3_BRDRBLNK
- * and NTSC activated, so than better let diwstop_h <= 1812
- */
- if (par->htotal != NTSC_HTOTAL) {
- DPRINTK("htotal invalid for ntsc\n");
- return -EINVAL;
- }
- if (par->diwstrt_h < NTSC_DIWSTRT_H) {
- DPRINTK("diwstrt_h too low for ntsc\n");
- return -EINVAL;
- }
- if (par->diwstrt_v < NTSC_DIWSTRT_V) {
- DPRINTK("diwstrt_v too low for ntsc\n");
- return -EINVAL;
- }
- htotal = NTSC_HTOTAL>>clk_shift;
- vtotal = NTSC_VTOTAL>>1;
- if (!IS_OCS) {
- par->beamcon0 = 0;
- par->bplcon3 |= BPC3_BRDRBLNK;
- } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
- AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
- par->beamcon0 = 0;
- par->hsstop = 1;
- } else if (amiga_vblank != 60) {
- DPRINTK("ntsc not supported by this chipset\n");
- return -EINVAL;
- }
- }
- if (IS_OCS) {
- if (par->diwstrt_h >= 1024 || par->diwstop_h < 1024 ||
- par->diwstrt_v >= 512 || par->diwstop_v < 256) {
- DPRINTK("invalid position for display on ocs\n");
- return -EINVAL;
- }
- }
- } else if (!IS_OCS) {
- /* Programmable video mode */
- par->hsstrt = var->right_margin<<clk_shift;
- par->hsstop = (var->right_margin+var->hsync_len)<<clk_shift;
- par->diwstop_h = par->htotal - mod8(par->hsstrt) + 8 - (1 << clk_shift);
- if (!IS_AGA)
- par->diwstop_h = down4(par->diwstop_h) - 16;
- par->diwstrt_h = par->diwstop_h - xres_n;
- par->hbstop = par->diwstrt_h + 4;
- par->hbstrt = par->diwstop_h + 4;
- if (par->hbstrt >= par->htotal + 8)
- par->hbstrt -= par->htotal;
- par->hcenter = par->hsstrt + (par->htotal >> 1);
- par->vsstrt = var->lower_margin<<line_shift;
- par->vsstop = (var->lower_margin+var->vsync_len)<<line_shift;
- par->diwstop_v = par->vtotal;
- if ((par->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
- par->diwstop_v -= 2;
- par->diwstrt_v = par->diwstop_v - yres_n;
- par->vbstop = par->diwstrt_v - 2;
- par->vbstrt = par->diwstop_v - 2;
- if (par->vtotal > 2048) {
- DPRINTK("vtotal too high\n");
- return -EINVAL;
- }
- if (par->htotal > 2048) {
- DPRINTK("htotal too high\n");
- return -EINVAL;
- }
- par->bplcon3 |= BPC3_EXTBLKEN;
- par->beamcon0 = BMC0_HARDDIS | BMC0_VARVBEN | BMC0_LOLDIS |
- BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARBEAMEN |
- BMC0_PAL | BMC0_VARCSYEN;
- if (var->sync & FB_SYNC_HOR_HIGH_ACT)
- par->beamcon0 |= BMC0_HSYTRUE;
- if (var->sync & FB_SYNC_VERT_HIGH_ACT)
- par->beamcon0 |= BMC0_VSYTRUE;
- if (var->sync & FB_SYNC_COMP_HIGH_ACT)
- par->beamcon0 |= BMC0_CSYTRUE;
- htotal = par->htotal>>clk_shift;
- vtotal = par->vtotal>>1;
- } else {
- DPRINTK("only broadcast modes possible for ocs\n");
- return -EINVAL;
- }
-
- /*
- * Checking the DMA timing
- */
-
- fconst = 16<<maxfmode<<clk_shift;
-
- /*
- * smallest window start value without turn off other dma cycles
- * than sprite1-7, unless you change min_fstrt
- */
-
-
- fsize = ((maxfmode+clk_shift <= 1) ? fconst : 64);
- fstrt = downx(fconst, par->diwstrt_h-4) - fsize;
- if (fstrt < min_fstrt) {
- DPRINTK("fetch start too low\n");
- return -EINVAL;
- }
-
- /*
- * smallest window start value where smooth scrolling is possible
- */
-
- fstrt = downx(fconst, par->diwstrt_h-fconst+(1<<clk_shift)-4) - fsize;
- if (fstrt < min_fstrt)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
-
- maxfetchstop = down16(par->htotal - 80);
-
- fstrt = downx(fconst, par->diwstrt_h-4) - 64 - fconst;
- fsize = upx(fconst, xres_n + modx(fconst, downx(1<<clk_shift, par->diwstrt_h-4)));
- if (fstrt + fsize > maxfetchstop)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
-
- fsize = upx(fconst, xres_n);
- if (fstrt + fsize > maxfetchstop) {
- DPRINTK("fetch stop too high\n");
- return -EINVAL;
- }
-
- if (maxfmode + clk_shift <= 1) {
- fsize = up64(xres_n + fconst - 1);
- if (min_fstrt + fsize - 64 > maxfetchstop)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
-
- fsize = up64(xres_n);
- if (min_fstrt + fsize - 64 > maxfetchstop) {
- DPRINTK("fetch size too high\n");
- return -EINVAL;
- }
-
- fsize -= 64;
- } else
- fsize -= fconst;
-
- /*
- * Check if there is enough time to update the bitplane pointers for ywrap
- */
-
- if (par->htotal-fsize-64 < par->bpp*64)
- par->vmode &= ~FB_VMODE_YWRAP;
-
- /*
- * Bitplane calculations and check the Memory Requirements
- */
-
- if (amifb_ilbm) {
- par->next_plane = div8(upx(16<<maxfmode, par->vxres));
- par->next_line = par->bpp*par->next_plane;
- if (par->next_line * par->vyres > fb_info.fix.smem_len) {
- DPRINTK("too few video mem\n");
- return -EINVAL;
- }
- } else {
- par->next_line = div8(upx(16<<maxfmode, par->vxres));
- par->next_plane = par->vyres*par->next_line;
- if (par->next_plane * par->bpp > fb_info.fix.smem_len) {
- DPRINTK("too few video mem\n");
- return -EINVAL;
- }
- }
-
- /*
- * Hardware Register Values
- */
-
- par->bplcon0 = BPC0_COLOR | bplpixmode[clk_shift];
- if (!IS_OCS)
- par->bplcon0 |= BPC0_ECSENA;
- if (par->bpp == 8)
- par->bplcon0 |= BPC0_BPU3;
- else
- par->bplcon0 |= par->bpp<<12;
- if (var->nonstd == FB_NONSTD_HAM)
- par->bplcon0 |= BPC0_HAM;
- if (var->sync & FB_SYNC_EXT)
- par->bplcon0 |= BPC0_ERSY;
-
- if (IS_AGA)
- par->fmode = bplfetchmode[maxfmode];
-
- switch (par->vmode & FB_VMODE_MASK) {
- case FB_VMODE_INTERLACED:
- par->bplcon0 |= BPC0_LACE;
- break;
- case FB_VMODE_DOUBLE:
- if (IS_AGA)
- par->fmode |= FMODE_SSCAN2 | FMODE_BSCAN2;
- break;
- }
-
- if (!((par->vmode ^ var->vmode) & FB_VMODE_YWRAP)) {
- par->xoffset = var->xoffset;
- par->yoffset = var->yoffset;
- if (par->vmode & FB_VMODE_YWRAP) {
- if (par->xoffset || par->yoffset < 0 || par->yoffset >= par->vyres)
- par->xoffset = par->yoffset = 0;
- } else {
- if (par->xoffset < 0 || par->xoffset > upx(16<<maxfmode, par->vxres-par->xres) ||
- par->yoffset < 0 || par->yoffset > par->vyres-par->yres)
- par->xoffset = par->yoffset = 0;
- }
- } else
- par->xoffset = par->yoffset = 0;
-
- par->crsr.crsr_x = par->crsr.crsr_y = 0;
- par->crsr.spot_x = par->crsr.spot_y = 0;
- par->crsr.height = par->crsr.width = 0;
-
- return 0;
-}
-
- /*
- * Fill the `var' structure based on the values in `par' and maybe
- * other values read out of the hardware.
- */
-
-static int ami_encode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par)
-{
- u_short clk_shift, line_shift;
-
- memset(var, 0, sizeof(struct fb_var_screeninfo));
-
- clk_shift = par->clk_shift;
- line_shift = par->line_shift;
-
- var->xres = par->xres;
- var->yres = par->yres;
- var->xres_virtual = par->vxres;
- var->yres_virtual = par->vyres;
- var->xoffset = par->xoffset;
- var->yoffset = par->yoffset;
-
- var->bits_per_pixel = par->bpp;
- var->grayscale = 0;
-
- var->red.offset = 0;
- var->red.msb_right = 0;
- var->red.length = par->bpp;
- if (par->bplcon0 & BPC0_HAM)
- var->red.length -= 2;
- var->blue = var->green = var->red;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->transp.msb_right = 0;
-
- if (par->bplcon0 & BPC0_HAM)
- var->nonstd = FB_NONSTD_HAM;
- else
- var->nonstd = 0;
- var->activate = 0;
-
- var->height = -1;
- var->width = -1;
-
- var->pixclock = pixclock[clk_shift];
-
- if (IS_AGA && par->fmode & FMODE_BSCAN2)
- var->vmode = FB_VMODE_DOUBLE;
- else if (par->bplcon0 & BPC0_LACE)
- var->vmode = FB_VMODE_INTERLACED;
- else
- var->vmode = FB_VMODE_NONINTERLACED;
-
- if (!IS_OCS && par->beamcon0 & BMC0_VARBEAMEN) {
- var->hsync_len = (par->hsstop-par->hsstrt)>>clk_shift;
- var->right_margin = par->hsstrt>>clk_shift;
- var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
- var->vsync_len = (par->vsstop-par->vsstrt)>>line_shift;
- var->lower_margin = par->vsstrt>>line_shift;
- var->upper_margin = (par->vtotal>>line_shift) - var->yres - var->lower_margin - var->vsync_len;
- var->sync = 0;
- if (par->beamcon0 & BMC0_HSYTRUE)
- var->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (par->beamcon0 & BMC0_VSYTRUE)
- var->sync |= FB_SYNC_VERT_HIGH_ACT;
- if (par->beamcon0 & BMC0_CSYTRUE)
- var->sync |= FB_SYNC_COMP_HIGH_ACT;
- } else {
- var->sync = FB_SYNC_BROADCAST;
- var->hsync_len = (152>>clk_shift) + mod4(par->diwstop_h);
- var->right_margin = ((par->htotal - down4(par->diwstop_h))>>clk_shift) + var->hsync_len;
- var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
- var->vsync_len = 4>>line_shift;
- var->lower_margin = ((par->vtotal - par->diwstop_v)>>line_shift) + var->vsync_len;
- var->upper_margin = (((par->vtotal - 2)>>line_shift) + 1) - var->yres -
- var->lower_margin - var->vsync_len;
- }
-
- if (par->bplcon0 & BPC0_ERSY)
- var->sync |= FB_SYNC_EXT;
- if (par->vmode & FB_VMODE_YWRAP)
- var->vmode |= FB_VMODE_YWRAP;
-
- return 0;
-}
-
-
/*
- * Pan or Wrap the Display
- *
- * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
- * in `var'.
- */
-
-static void ami_pan_var(struct fb_var_screeninfo *var)
-{
- struct amifb_par *par = &currentpar;
-
- par->xoffset = var->xoffset;
- par->yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- par->vmode |= FB_VMODE_YWRAP;
- else
- par->vmode &= ~FB_VMODE_YWRAP;
-
- do_vmode_pan = 0;
- ami_update_par();
- do_vmode_pan = 1;
-}
-
- /*
- * Update hardware
- */
-
-static int ami_update_par(void)
-{
- struct amifb_par *par = &currentpar;
- short clk_shift, vshift, fstrt, fsize, fstop, fconst, shift, move, mod;
-
- clk_shift = par->clk_shift;
-
- if (!(par->vmode & FB_VMODE_SMOOTH_XPAN))
- par->xoffset = upx(16<<maxfmode, par->xoffset);
-
- fconst = 16<<maxfmode<<clk_shift;
- vshift = modx(16<<maxfmode, par->xoffset);
- fstrt = par->diwstrt_h - (vshift<<clk_shift) - 4;
- fsize = (par->xres+vshift)<<clk_shift;
- shift = modx(fconst, fstrt);
- move = downx(2<<maxfmode, div8(par->xoffset));
- if (maxfmode + clk_shift > 1) {
- fstrt = downx(fconst, fstrt) - 64;
- fsize = upx(fconst, fsize);
- fstop = fstrt + fsize - fconst;
- } else {
- mod = fstrt = downx(fconst, fstrt) - fconst;
- fstop = fstrt + upx(fconst, fsize) - 64;
- fsize = up64(fsize);
- fstrt = fstop - fsize + 64;
- if (fstrt < min_fstrt) {
- fstop += min_fstrt - fstrt;
- fstrt = min_fstrt;
- }
- move = move - div8((mod-fstrt)>>clk_shift);
- }
- mod = par->next_line - div8(fsize>>clk_shift);
- par->ddfstrt = fstrt;
- par->ddfstop = fstop;
- par->bplcon1 = hscroll2hw(shift);
- par->bpl2mod = mod;
- if (par->bplcon0 & BPC0_LACE)
- par->bpl2mod += par->next_line;
- if (IS_AGA && (par->fmode & FMODE_BSCAN2))
- par->bpl1mod = -div8(fsize>>clk_shift);
- else
- par->bpl1mod = par->bpl2mod;
-
- if (par->yoffset) {
- par->bplpt0 = fb_info.fix.smem_start + par->next_line*par->yoffset + move;
- if (par->vmode & FB_VMODE_YWRAP) {
- if (par->yoffset > par->vyres-par->yres) {
- par->bplpt0wrap = fb_info.fix.smem_start + move;
- if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v+par->vyres-par->yoffset))
- par->bplpt0wrap += par->next_line;
- }
- }
- } else
- par->bplpt0 = fb_info.fix.smem_start + move;
-
- if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v))
- par->bplpt0 += par->next_line;
-
- return 0;
-}
-
-
- /*
- * Set a single color register. The values supplied are already
- * rounded down to the hardware's capabilities (according to the
- * entries in the var structure). Return != 0 for invalid regno.
- */
-
-static int amifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
-{
- if (IS_AGA) {
- if (regno > 255)
- return 1;
- } else if (currentpar.bplcon0 & BPC0_SHRES) {
- if (regno > 3)
- return 1;
- } else {
- if (regno > 31)
- return 1;
- }
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- if (!regno) {
- red0 = red;
- green0 = green;
- blue0 = blue;
- }
-
- /*
- * Update the corresponding Hardware Color Register, unless it's Color
- * Register 0 and the screen is blanked.
- *
- * VBlank is switched off to protect bplcon3 or ecs_palette[] from
- * being changed by ami_do_blank() during the VBlank.
- */
-
- if (regno || !is_blanked) {
-#if defined(CONFIG_FB_AMIGA_AGA)
- if (IS_AGA) {
- u_short bplcon3 = currentpar.bplcon3;
- VBlankOff();
- custom.bplcon3 = bplcon3 | (regno<<8 & 0xe000);
- custom.color[regno&31] = rgb2hw8_high(red, green, blue);
- custom.bplcon3 = bplcon3 | (regno<<8 & 0xe000) | BPC3_LOCT;
- custom.color[regno&31] = rgb2hw8_low(red, green, blue);
- custom.bplcon3 = bplcon3;
- VBlankOn();
- } else
-#endif
-#if defined(CONFIG_FB_AMIGA_ECS)
- if (currentpar.bplcon0 & BPC0_SHRES) {
- u_short color, mask;
- int i;
-
- mask = 0x3333;
- color = rgb2hw2(red, green, blue);
- VBlankOff();
- for (i = regno+12; i >= (int)regno; i -= 4)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- mask <<=2; color >>= 2;
- regno = down16(regno)+mul4(mod4(regno));
- for (i = regno+3; i >= (int)regno; i--)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- VBlankOn();
- } else
-#endif
- custom.color[regno] = rgb2hw4(red, green, blue);
- }
- return 0;
-}
-
-static void ami_update_display(void)
-{
- struct amifb_par *par = &currentpar;
-
- custom.bplcon1 = par->bplcon1;
- custom.bpl1mod = par->bpl1mod;
- custom.bpl2mod = par->bpl2mod;
- custom.ddfstrt = ddfstrt2hw(par->ddfstrt);
- custom.ddfstop = ddfstop2hw(par->ddfstop);
-}
-
- /*
- * Change the video mode (called by VBlank interrupt)
- */
-
-static void ami_init_display(void)
-{
- struct amifb_par *par = &currentpar;
- int i;
-
- custom.bplcon0 = par->bplcon0 & ~BPC0_LACE;
- custom.bplcon2 = (IS_OCS ? 0 : BPC2_KILLEHB) | BPC2_PF2P2 | BPC2_PF1P2;
- if (!IS_OCS) {
- custom.bplcon3 = par->bplcon3;
- if (IS_AGA)
- custom.bplcon4 = BPC4_ESPRM4 | BPC4_OSPRM4;
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- custom.htotal = htotal2hw(par->htotal);
- custom.hbstrt = hbstrt2hw(par->hbstrt);
- custom.hbstop = hbstop2hw(par->hbstop);
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.hcenter = hcenter2hw(par->hcenter);
- custom.vtotal = vtotal2hw(par->vtotal);
- custom.vbstrt = vbstrt2hw(par->vbstrt);
- custom.vbstop = vbstop2hw(par->vbstop);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstop2hw(par->vsstop);
- }
- }
- if (!IS_OCS || par->hsstop)
- custom.beamcon0 = par->beamcon0;
- if (IS_AGA)
- custom.fmode = par->fmode;
-
- /*
- * The minimum period for audio depends on htotal
- */
-
- amiga_audio_min_period = div16(par->htotal);
-
- is_lace = par->bplcon0 & BPC0_LACE ? 1 : 0;
-#if 1
- if (is_lace) {
- i = custom.vposr >> 15;
- } else {
- custom.vposw = custom.vposr | 0x8000;
- i = 1;
- }
-#else
- i = 1;
- custom.vposw = custom.vposr | 0x8000;
-#endif
- custom.cop2lc = (u_short *)ZTWO_PADDR(copdisplay.list[currentcop][i]);
-}
-
- /*
- * (Un)Blank the screen (called by VBlank interrupt)
+ * Enable Display DMA
*/
+ custom.dmacon = DMAF_SETCLR | DMAF_MASTER | DMAF_RASTER | DMAF_COPPER |
+ DMAF_BLITTER | DMAF_SPRITE;
-static void ami_do_blank(void)
-{
- struct amifb_par *par = &currentpar;
-#if defined(CONFIG_FB_AMIGA_AGA)
- u_short bplcon3 = par->bplcon3;
-#endif
- u_char red, green, blue;
-
- if (do_blank > 0) {
- custom.dmacon = DMAF_RASTER | DMAF_SPRITE;
- red = green = blue = 0;
- if (!IS_OCS && do_blank > 1) {
- switch (do_blank) {
- case FB_BLANK_VSYNC_SUSPEND:
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.vsstrt = vsstrt2hw(par->vtotal+4);
- custom.vsstop = vsstop2hw(par->vtotal+4);
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- custom.hsstrt = hsstrt2hw(par->htotal+16);
- custom.hsstop = hsstop2hw(par->htotal+16);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstrt2hw(par->vsstop);
- break;
- case FB_BLANK_POWERDOWN:
- custom.hsstrt = hsstrt2hw(par->htotal+16);
- custom.hsstop = hsstop2hw(par->htotal+16);
- custom.vsstrt = vsstrt2hw(par->vtotal+4);
- custom.vsstop = vsstop2hw(par->vtotal+4);
- break;
- }
- if (!(par->beamcon0 & BMC0_VARBEAMEN)) {
- custom.htotal = htotal2hw(par->htotal);
- custom.vtotal = vtotal2hw(par->vtotal);
- custom.beamcon0 = BMC0_HARDDIS | BMC0_VARBEAMEN |
- BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARCSYEN;
- }
- }
- } else {
- custom.dmacon = DMAF_SETCLR | DMAF_RASTER | DMAF_SPRITE;
- red = red0;
- green = green0;
- blue = blue0;
- if (!IS_OCS) {
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstop2hw(par->vsstop);
- custom.beamcon0 = par->beamcon0;
- }
- }
-#if defined(CONFIG_FB_AMIGA_AGA)
- if (IS_AGA) {
- custom.bplcon3 = bplcon3;
- custom.color[0] = rgb2hw8_high(red, green, blue);
- custom.bplcon3 = bplcon3 | BPC3_LOCT;
- custom.color[0] = rgb2hw8_low(red, green, blue);
- custom.bplcon3 = bplcon3;
- } else
-#endif
-#if defined(CONFIG_FB_AMIGA_ECS)
- if (par->bplcon0 & BPC0_SHRES) {
- u_short color, mask;
- int i;
-
- mask = 0x3333;
- color = rgb2hw2(red, green, blue);
- for (i = 12; i >= 0; i -= 4)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- mask <<=2; color >>= 2;
- for (i = 3; i >= 0; i--)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- } else
-#endif
- custom.color[0] = rgb2hw4(red, green, blue);
- is_blanked = do_blank > 0 ? do_blank : 0;
-}
-
-static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix)
-{
- struct amifb_par *par = &currentpar;
-
- fix->crsr_width = fix->crsr_xsize = par->crsr.width;
- fix->crsr_height = fix->crsr_ysize = par->crsr.height;
- fix->crsr_color1 = 17;
- fix->crsr_color2 = 18;
- return 0;
-}
-
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
-{
- struct amifb_par *par = &currentpar;
- register u_short *lspr, *sspr;
-#ifdef __mc68000__
- register u_long datawords asm ("d2");
-#else
- register u_long datawords;
-#endif
- register short delta;
- register u_char color;
- short height, width, bits, words;
- int size, alloc;
-
- size = par->crsr.height*par->crsr.width;
- alloc = var->height*var->width;
- var->height = par->crsr.height;
- var->width = par->crsr.width;
- var->xspot = par->crsr.spot_x;
- var->yspot = par->crsr.spot_y;
- if (size > var->height*var->width)
- return -ENAMETOOLONG;
- if (!access_ok(VERIFY_WRITE, data, size))
- return -EFAULT;
- delta = 1<<par->crsr.fmode;
- lspr = lofsprite + (delta<<1);
- if (par->bplcon0 & BPC0_LACE)
- sspr = shfsprite + (delta<<1);
- else
- sspr = NULL;
- for (height = (short)var->height-1; height >= 0; height--) {
- bits = 0; words = delta; datawords = 0;
- for (width = (short)var->width-1; width >= 0; width--) {
- if (bits == 0) {
- bits = 16; --words;
-#ifdef __mc68000__
- asm volatile ("movew %1@(%3:w:2),%0 ; swap %0 ; movew %1@+,%0"
- : "=d" (datawords), "=a" (lspr) : "1" (lspr), "d" (delta));
-#else
- datawords = (*(lspr+delta) << 16) | (*lspr++);
-#endif
- }
- --bits;
-#ifdef __mc68000__
- asm volatile (
- "clrb %0 ; swap %1 ; lslw #1,%1 ; roxlb #1,%0 ; "
- "swap %1 ; lslw #1,%1 ; roxlb #1,%0"
- : "=d" (color), "=d" (datawords) : "1" (datawords));
-#else
- color = (((datawords >> 30) & 2)
- | ((datawords >> 15) & 1));
- datawords <<= 1;
-#endif
- put_user(color, data++);
- }
- if (bits > 0) {
- --words; ++lspr;
- }
- while (--words >= 0)
- ++lspr;
-#ifdef __mc68000__
- asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
- : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
-#else
- lspr += delta;
- if (sspr) {
- u_short *tmp = lspr;
- lspr = sspr;
- sspr = tmp;
- }
-#endif
- }
- return 0;
-}
-
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
-{
- struct amifb_par *par = &currentpar;
- register u_short *lspr, *sspr;
-#ifdef __mc68000__
- register u_long datawords asm ("d2");
-#else
- register u_long datawords;
-#endif
- register short delta;
- u_short fmode;
- short height, width, bits, words;
+ err = request_irq(IRQ_AMIGA_COPPER, amifb_interrupt, 0,
+ "fb vertb handler", info->par);
+ if (err)
+ goto disable_dma;
- if (!var->width)
- return -EINVAL;
- else if (var->width <= 16)
- fmode = TAG_FMODE_1;
- else if (var->width <= 32)
- fmode = TAG_FMODE_2;
- else if (var->width <= 64)
- fmode = TAG_FMODE_4;
- else
- return -EINVAL;
- if (fmode > maxfmode)
- return -EINVAL;
- if (!var->height)
- return -EINVAL;
- if (!access_ok(VERIFY_READ, data, var->width*var->height))
- return -EFAULT;
- delta = 1<<fmode;
- lofsprite = shfsprite = (u_short *)spritememory;
- lspr = lofsprite + (delta<<1);
- if (par->bplcon0 & BPC0_LACE) {
- if (((var->height+4)<<fmode<<2) > SPRITEMEMSIZE)
- return -EINVAL;
- memset(lspr, 0, (var->height+4)<<fmode<<2);
- shfsprite += ((var->height+5)&-2)<<fmode;
- sspr = shfsprite + (delta<<1);
- } else {
- if (((var->height+2)<<fmode<<2) > SPRITEMEMSIZE)
- return -EINVAL;
- memset(lspr, 0, (var->height+2)<<fmode<<2);
- sspr = NULL;
- }
- for (height = (short)var->height-1; height >= 0; height--) {
- bits = 16; words = delta; datawords = 0;
- for (width = (short)var->width-1; width >= 0; width--) {
- unsigned long tdata = 0;
- get_user(tdata, data);
- data++;
-#ifdef __mc68000__
- asm volatile (
- "lsrb #1,%2 ; roxlw #1,%0 ; swap %0 ; "
- "lsrb #1,%2 ; roxlw #1,%0 ; swap %0"
- : "=d" (datawords)
- : "0" (datawords), "d" (tdata));
-#else
- datawords = ((datawords << 1) & 0xfffefffe);
- datawords |= tdata & 1;
- datawords |= (tdata & 2) << (16-1);
-#endif
- if (--bits == 0) {
- bits = 16; --words;
-#ifdef __mc68000__
- asm volatile ("swap %2 ; movew %2,%0@(%3:w:2) ; swap %2 ; movew %2,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta));
-#else
- *(lspr+delta) = (u_short) (datawords >> 16);
- *lspr++ = (u_short) (datawords & 0xffff);
-#endif
- }
- }
- if (bits < 16) {
- --words;
-#ifdef __mc68000__
- asm volatile (
- "swap %2 ; lslw %4,%2 ; movew %2,%0@(%3:w:2) ; "
- "swap %2 ; lslw %4,%2 ; movew %2,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta), "d" (bits));
-#else
- *(lspr+delta) = (u_short) (datawords >> (16+bits));
- *lspr++ = (u_short) ((datawords & 0x0000ffff) >> bits);
-#endif
- }
- while (--words >= 0) {
-#ifdef __mc68000__
- asm volatile ("moveql #0,%%d0 ; movew %%d0,%0@(%2:w:2) ; movew %%d0,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (delta) : "d0");
-#else
- *(lspr+delta) = 0;
- *lspr++ = 0;
-#endif
- }
-#ifdef __mc68000__
- asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
- : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
-#else
- lspr += delta;
- if (sspr) {
- u_short *tmp = lspr;
- lspr = sspr;
- sspr = tmp;
- }
-#endif
- }
- par->crsr.height = var->height;
- par->crsr.width = var->width;
- par->crsr.spot_x = var->xspot;
- par->crsr.spot_y = var->yspot;
- par->crsr.fmode = fmode;
- if (IS_AGA) {
- par->fmode &= ~(FMODE_SPAGEM | FMODE_SPR32);
- par->fmode |= sprfetchmode[fmode];
- custom.fmode = par->fmode;
- }
- return 0;
-}
+ err = fb_alloc_cmap(&info->cmap, 1 << info->var.bits_per_pixel, 0);
+ if (err)
+ goto free_irq;
-static int ami_get_cursorstate(struct fb_cursorstate *state)
-{
- struct amifb_par *par = &currentpar;
+ dev_set_drvdata(&pdev->dev, info);
- state->xoffset = par->crsr.crsr_x;
- state->yoffset = par->crsr.crsr_y;
- state->mode = cursormode;
- return 0;
-}
+ err = register_framebuffer(info);
+ if (err)
+ goto unset_drvdata;
-static int ami_set_cursorstate(struct fb_cursorstate *state)
-{
- struct amifb_par *par = &currentpar;
+ printk("fb%d: %s frame buffer device, using %dK of video memory\n",
+ info->node, info->fix.id, info->fix.smem_len>>10);
- par->crsr.crsr_x = state->xoffset;
- par->crsr.crsr_y = state->yoffset;
- if ((cursormode = state->mode) == FB_CURSOR_OFF)
- cursorstate = -1;
- do_cursor = 1;
return 0;
-}
-
-static void ami_set_sprite(void)
-{
- struct amifb_par *par = &currentpar;
- copins *copl, *cops;
- u_short hs, vs, ve;
- u_long pl, ps, pt;
- short mx, my;
-
- cops = copdisplay.list[currentcop][0];
- copl = copdisplay.list[currentcop][1];
- ps = pl = ZTWO_PADDR(dummysprite);
- mx = par->crsr.crsr_x-par->crsr.spot_x;
- my = par->crsr.crsr_y-par->crsr.spot_y;
- if (!(par->vmode & FB_VMODE_YWRAP)) {
- mx -= par->xoffset;
- my -= par->yoffset;
- }
- if (!is_blanked && cursorstate > 0 && par->crsr.height > 0 &&
- mx > -(short)par->crsr.width && mx < par->xres &&
- my > -(short)par->crsr.height && my < par->yres) {
- pl = ZTWO_PADDR(lofsprite);
- hs = par->diwstrt_h + (mx<<par->clk_shift) - 4;
- vs = par->diwstrt_v + (my<<par->line_shift);
- ve = vs + (par->crsr.height<<par->line_shift);
- if (par->bplcon0 & BPC0_LACE) {
- ps = ZTWO_PADDR(shfsprite);
- lofsprite[0] = spr2hw_pos(vs, hs);
- shfsprite[0] = spr2hw_pos(vs+1, hs);
- if (mod2(vs)) {
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
- shfsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs+1, hs, ve+1);
- pt = pl; pl = ps; ps = pt;
- } else {
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve+1);
- shfsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs+1, hs, ve);
- }
- } else {
- lofsprite[0] = spr2hw_pos(vs, hs) | (IS_AGA && (par->fmode & FMODE_BSCAN2) ? 0x80 : 0);
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
- }
- }
- copl[cop_spr0ptrh].w[1] = highw(pl);
- copl[cop_spr0ptrl].w[1] = loww(pl);
- if (par->bplcon0 & BPC0_LACE) {
- cops[cop_spr0ptrh].w[1] = highw(ps);
- cops[cop_spr0ptrl].w[1] = loww(ps);
- }
-}
-
-
- /*
- * Initialise the Copper Initialisation List
- */
-
-static void __init ami_init_copper(void)
-{
- copins *cop = copdisplay.init;
- u_long p;
- int i;
-
- if (!IS_OCS) {
- (cop++)->l = CMOVE(BPC0_COLOR | BPC0_SHRES | BPC0_ECSENA, bplcon0);
- (cop++)->l = CMOVE(0x0181, diwstrt);
- (cop++)->l = CMOVE(0x0281, diwstop);
- (cop++)->l = CMOVE(0x0000, diwhigh);
- } else
- (cop++)->l = CMOVE(BPC0_COLOR, bplcon0);
- p = ZTWO_PADDR(dummysprite);
- for (i = 0; i < 8; i++) {
- (cop++)->l = CMOVE(0, spr[i].pos);
- (cop++)->l = CMOVE(highw(p), sprpt[i]);
- (cop++)->l = CMOVE2(loww(p), sprpt[i]);
- }
-
- (cop++)->l = CMOVE(IF_SETCLR | IF_COPER, intreq);
- copdisplay.wait = cop;
- (cop++)->l = CEND;
- (cop++)->l = CMOVE(0, copjmp2);
- cop->l = CEND;
-
- custom.cop1lc = (u_short *)ZTWO_PADDR(copdisplay.init);
- custom.copjmp1 = 0;
-}
-static void ami_reinit_copper(void)
-{
- struct amifb_par *par = &currentpar;
-
- copdisplay.init[cip_bplcon0].w[1] = ~(BPC0_BPU3 | BPC0_BPU2 | BPC0_BPU1 | BPC0_BPU0) & par->bplcon0;
- copdisplay.wait->l = CWAIT(32, par->diwstrt_v-4);
+unset_drvdata:
+ dev_set_drvdata(&pdev->dev, NULL);
+ fb_dealloc_cmap(&info->cmap);
+free_irq:
+ free_irq(IRQ_AMIGA_COPPER, info->par);
+disable_dma:
+ custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ if (videomemory)
+ iounmap((void *)videomemory);
+ chipfree();
+release:
+ framebuffer_release(info);
+ return err;
}
- /*
- * Build the Copper List
- */
-
-static void ami_build_copper(void)
-{
- struct amifb_par *par = &currentpar;
- copins *copl, *cops;
- u_long p;
-
- currentcop = 1 - currentcop;
-
- copl = copdisplay.list[currentcop][1];
-
- (copl++)->l = CWAIT(0, 10);
- (copl++)->l = CMOVE(par->bplcon0, bplcon0);
- (copl++)->l = CMOVE(0, sprpt[0]);
- (copl++)->l = CMOVE2(0, sprpt[0]);
-
- if (par->bplcon0 & BPC0_LACE) {
- cops = copdisplay.list[currentcop][0];
-
- (cops++)->l = CWAIT(0, 10);
- (cops++)->l = CMOVE(par->bplcon0, bplcon0);
- (cops++)->l = CMOVE(0, sprpt[0]);
- (cops++)->l = CMOVE2(0, sprpt[0]);
-
- (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v+1), diwstrt);
- (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v+1), diwstop);
- (cops++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
- (cops++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
- if (!IS_OCS) {
- (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v+1,
- par->diwstop_h, par->diwstop_v+1), diwhigh);
- (cops++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
- par->diwstop_h, par->diwstop_v), diwhigh);
-#if 0
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt+1), vbstrt);
- (copl++)->l = CMOVE(vbstop2hw(par->vbstop+1), vbstop);
- (cops++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (cops++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
- (cops++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
- }
-#endif
- }
- p = ZTWO_PADDR(copdisplay.list[currentcop][0]);
- (copl++)->l = CMOVE(highw(p), cop2lc);
- (copl++)->l = CMOVE2(loww(p), cop2lc);
- p = ZTWO_PADDR(copdisplay.list[currentcop][1]);
- (cops++)->l = CMOVE(highw(p), cop2lc);
- (cops++)->l = CMOVE2(loww(p), cop2lc);
- copdisplay.rebuild[0] = cops;
- } else {
- (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
- (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
- if (!IS_OCS) {
- (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
- par->diwstop_h, par->diwstop_v), diwhigh);
-#if 0
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
- (copl++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
- }
-#endif
- }
- }
- copdisplay.rebuild[1] = copl;
-
- ami_update_par();
- ami_rebuild_copper();
-}
-
- /*
- * Rebuild the Copper List
- *
- * We only change the things that are not static
- */
-
-static void ami_rebuild_copper(void)
-{
- struct amifb_par *par = &currentpar;
- copins *copl, *cops;
- u_short line, h_end1, h_end2;
- short i;
- u_long p;
-
- if (IS_AGA && maxfmode + par->clk_shift == 0)
- h_end1 = par->diwstrt_h-64;
- else
- h_end1 = par->htotal-32;
- h_end2 = par->ddfstop+64;
-
- ami_set_sprite();
-
- copl = copdisplay.rebuild[1];
- p = par->bplpt0;
- if (par->vmode & FB_VMODE_YWRAP) {
- if ((par->vyres-par->yoffset) != 1 || !mod2(par->diwstrt_v)) {
- if (par->yoffset > par->vyres-par->yres) {
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (copl++)->l = CMOVE(highw(p), bplpt[i]);
- (copl++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- line = par->diwstrt_v + ((par->vyres-par->yoffset)<<par->line_shift) - 1;
- while (line >= 512) {
- (copl++)->l = CWAIT(h_end1, 510);
- line -= 512;
- }
- if (line >= 510 && IS_AGA && maxfmode + par->clk_shift == 0)
- (copl++)->l = CWAIT(h_end1, line);
- else
- (copl++)->l = CWAIT(h_end2, line);
- p = par->bplpt0wrap;
- }
- } else p = par->bplpt0wrap;
- }
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (copl++)->l = CMOVE(highw(p), bplpt[i]);
- (copl++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- copl->l = CEND;
-
- if (par->bplcon0 & BPC0_LACE) {
- cops = copdisplay.rebuild[0];
- p = par->bplpt0;
- if (mod2(par->diwstrt_v))
- p -= par->next_line;
- else
- p += par->next_line;
- if (par->vmode & FB_VMODE_YWRAP) {
- if ((par->vyres-par->yoffset) != 1 || mod2(par->diwstrt_v)) {
- if (par->yoffset > par->vyres-par->yres+1) {
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (cops++)->l = CMOVE(highw(p), bplpt[i]);
- (cops++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- line = par->diwstrt_v + ((par->vyres-par->yoffset)<<par->line_shift) - 2;
- while (line >= 512) {
- (cops++)->l = CWAIT(h_end1, 510);
- line -= 512;
- }
- if (line > 510 && IS_AGA && maxfmode + par->clk_shift == 0)
- (cops++)->l = CWAIT(h_end1, line);
- else
- (cops++)->l = CWAIT(h_end2, line);
- p = par->bplpt0wrap;
- if (mod2(par->diwstrt_v+par->vyres-par->yoffset))
- p -= par->next_line;
- else
- p += par->next_line;
- }
- } else p = par->bplpt0wrap - par->next_line;
- }
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (cops++)->l = CMOVE(highw(p), bplpt[i]);
- (cops++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- cops->l = CEND;
- }
-}
static int __exit amifb_remove(struct platform_device *pdev)
{
- unregister_framebuffer(&fb_info);
- amifb_deinit(pdev);
+ struct fb_info *info = dev_get_drvdata(&pdev->dev);
+
+ unregister_framebuffer(info);
+ dev_set_drvdata(&pdev->dev, NULL);
+ fb_dealloc_cmap(&info->cmap);
+ free_irq(IRQ_AMIGA_COPPER, info->par);
+ custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ if (videomemory)
+ iounmap((void *)videomemory);
+ chipfree();
+ framebuffer_release(info);
amifb_video_off();
return 0;
}
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 63409c1..e40c00f 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -100,8 +100,11 @@ static int atmel_bl_update_status(struct backlight_device *bl)
brightness = 0;
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
- lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
+ if (contrast_ctr & ATMEL_LCDC_POL_POSITIVE)
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
brightness ? contrast_ctr : 0);
+ else
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
@@ -682,14 +685,30 @@ static int atmel_lcdfb_setcolreg(unsigned int regno, unsigned int red,
case FB_VISUAL_PSEUDOCOLOR:
if (regno < 256) {
- val = ((red >> 11) & 0x001f);
- val |= ((green >> 6) & 0x03e0);
- val |= ((blue >> 1) & 0x7c00);
-
- /*
- * TODO: intensity bit. Maybe something like
- * ~(red[10] ^ green[10] ^ blue[10]) & 1
- */
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9263()
+ || cpu_is_at91sam9rl()) {
+ /* old style I+BGR:555 */
+ val = ((red >> 11) & 0x001f);
+ val |= ((green >> 6) & 0x03e0);
+ val |= ((blue >> 1) & 0x7c00);
+
+ /*
+ * TODO: intensity bit. Maybe something like
+ * ~(red[10] ^ green[10] ^ blue[10]) & 1
+ */
+ } else {
+ /* new style BGR:565 / RGB:565 */
+ if (sinfo->lcd_wiring_mode ==
+ ATMEL_LCDC_WIRING_RGB) {
+ val = ((blue >> 11) & 0x001f);
+ val |= ((red >> 0) & 0xf800);
+ } else {
+ val = ((red >> 11) & 0x001f);
+ val |= ((blue >> 0) & 0xf800);
+ }
+
+ val |= ((green >> 5) & 0x07e0);
+ }
lcdc_writel(sinfo, ATMEL_LCDC_LUT(regno), val);
ret = 0;
@@ -1089,7 +1108,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
*/
lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
- sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+ sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
if (sinfo->atmel_lcdfb_power_control)
sinfo->atmel_lcdfb_power_control(0);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index 44bdce4..622f12b 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -301,9 +301,9 @@ static struct fb_ops atyfb_ops = {
.fb_sync = atyfb_sync,
};
-static int noaccel;
+static bool noaccel;
#ifdef CONFIG_MTRR
-static int nomtrr;
+static bool nomtrr;
#endif
static int vram;
static int pll;
diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c
index 1506848..ce1506b 100644
--- a/drivers/video/aty/radeon_base.c
+++ b/drivers/video/aty/radeon_base.c
@@ -263,19 +263,19 @@ static reg_val common_regs[] = {
static char *mode_option;
static char *monitor_layout;
-static int noaccel = 0;
+static bool noaccel = 0;
static int default_dynclk = -2;
-static int nomodeset = 0;
-static int ignore_edid = 0;
-static int mirror = 0;
+static bool nomodeset = 0;
+static bool ignore_edid = 0;
+static bool mirror = 0;
static int panel_yres = 0;
-static int force_dfp = 0;
-static int force_measure_pll = 0;
+static bool force_dfp = 0;
+static bool force_measure_pll = 0;
#ifdef CONFIG_MTRR
-static int nomtrr = 0;
+static bool nomtrr = 0;
#endif
-static int force_sleep;
-static int ignore_devlist;
+static bool force_sleep;
+static bool ignore_devlist;
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight = 1;
#else
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index 649cb35..de9da67 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -60,18 +60,6 @@
#include "au1100fb.h"
-/*
- * Sanity check. If this is a new Au1100 based board, search for
- * the PB1100 ifdefs to make sure you modify the code accordingly.
- */
-#if defined(CONFIG_MIPS_PB1100)
- #include <asm/mach-pb1x00/pb1100.h>
-#elif defined(CONFIG_MIPS_DB1100)
- #include <asm/mach-db1x00/db1x00.h>
-#else
- #error "Unknown Au1100 board, Au1100 FB driver not supported"
-#endif
-
#define DRIVER_NAME "au1100fb"
#define DRIVER_DESC "LCD controller driver for AU1100 processors"
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 7200559..04e4479 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1200fb.h> /* platform_data */
#include "au1200fb.h"
#define DRIVER_NAME "au1200fb"
@@ -143,6 +144,7 @@ struct au1200_lcd_iodata_t {
/* Private, per-framebuffer management information (independent of the panel itself) */
struct au1200fb_device {
struct fb_info *fb_info; /* FB driver info record */
+ struct au1200fb_platdata *pd;
int plane;
unsigned char* fb_mem; /* FrameBuffer memory map */
@@ -201,9 +203,6 @@ struct window_settings {
#define LCD_WINCTRL1_PO_16BPP LCD_WINCTRL1_PO_01
#endif
-extern int board_au1200fb_panel_init (void);
-extern int board_au1200fb_panel_shutdown (void);
-
/*
* Default window configurations
*/
@@ -334,8 +333,6 @@ struct panel_settings
uint32 mode_toyclksrc;
uint32 mode_backlight;
uint32 mode_auxpll;
- int (*device_init)(void);
- int (*device_shutdown)(void);
#define Xres min_xres
#define Yres min_yres
u32 min_xres; /* Minimum horizontal resolution */
@@ -385,8 +382,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
320, 320,
240, 240,
},
@@ -415,8 +410,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
640, 480,
640, 480,
},
@@ -445,8 +438,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
800, 800,
600, 600,
},
@@ -475,8 +466,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 6, /* 72MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
1024, 1024,
768, 768,
},
@@ -505,8 +494,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 10, /* 120MHz AUXPLL */
- .device_init = NULL,
- .device_shutdown = NULL,
1280, 1280,
1024, 1024,
},
@@ -535,8 +522,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
1024, 1024,
768, 768,
},
@@ -568,8 +553,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
640, 480,
640, 480,
},
@@ -601,8 +584,6 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
320, 320,
240, 240,
},
@@ -634,11 +615,43 @@ static struct panel_settings known_lcd_panels[] =
.mode_toyclksrc = 0x00000004, /* AUXPLL directly */
.mode_backlight = 0x00000000,
.mode_auxpll = 8, /* 96MHz AUXPLL */
- .device_init = board_au1200fb_panel_init,
- .device_shutdown = board_au1200fb_panel_shutdown,
856, 856,
480, 480,
},
+ [9] = {
+ .name = "DB1300_800x480",
+ .monspecs = {
+ .modedb = NULL,
+ .modedb_len = 0,
+ .hfmin = 30000,
+ .hfmax = 70000,
+ .vfmin = 60,
+ .vfmax = 60,
+ .dclkmin = 6000000,
+ .dclkmax = 28000000,
+ .input = FB_DISP_RGB,
+ },
+ .mode_screen = LCD_SCREEN_SX_N(800) |
+ LCD_SCREEN_SY_N(480),
+ .mode_horztiming = LCD_HORZTIMING_HPW_N(5) |
+ LCD_HORZTIMING_HND1_N(16) |
+ LCD_HORZTIMING_HND2_N(8),
+ .mode_verttiming = LCD_VERTTIMING_VPW_N(4) |
+ LCD_VERTTIMING_VND1_N(8) |
+ LCD_VERTTIMING_VND2_N(5),
+ .mode_clkcontrol = LCD_CLKCONTROL_PCD_N(1) |
+ LCD_CLKCONTROL_IV |
+ LCD_CLKCONTROL_IH,
+ .mode_pwmdiv = 0x00000000,
+ .mode_pwmhi = 0x00000000,
+ .mode_outmask = 0x00FFFFFF,
+ .mode_fifoctrl = 0x2f2f2f2f,
+ .mode_toyclksrc = 0x00000004, /* AUXPLL directly */
+ .mode_backlight = 0x00000000,
+ .mode_auxpll = (48/12) * 2,
+ 800, 800,
+ 480, 480,
+ },
};
#define NUM_PANELS (ARRAY_SIZE(known_lcd_panels))
@@ -764,7 +777,8 @@ static int au1200_setlocation (struct au1200fb_device *fbdev, int plane,
return 0;
}
-static void au1200_setpanel (struct panel_settings *newpanel)
+static void au1200_setpanel(struct panel_settings *newpanel,
+ struct au1200fb_platdata *pd)
{
/*
* Perform global setup/init of LCD controller
@@ -798,8 +812,8 @@ static void au1200_setpanel (struct panel_settings *newpanel)
the controller, the clock cannot be turned off before first
shutting down the controller.
*/
- if (panel->device_shutdown != NULL)
- panel->device_shutdown();
+ if (pd->panel_shutdown)
+ pd->panel_shutdown();
}
/* Newpanel == NULL indicates a shutdown operation only */
@@ -852,7 +866,8 @@ static void au1200_setpanel (struct panel_settings *newpanel)
au_sync();
/* Call init of panel */
- if (panel->device_init != NULL) panel->device_init();
+ if (pd->panel_init)
+ pd->panel_init();
/* FIX!!!! not appropriate on panel change!!! Global setup/init */
lcd->intenable = 0;
@@ -1185,6 +1200,8 @@ static int au1200fb_fb_setcolreg(unsigned regno, unsigned red, unsigned green,
*/
static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
{
+ struct au1200fb_device *fbdev = fbi->par;
+
/* Short-circuit screen blanking */
if (noblanking)
return 0;
@@ -1194,13 +1211,13 @@ static int au1200fb_fb_blank(int blank_mode, struct fb_info *fbi)
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
/* printk("turn on panel\n"); */
- au1200_setpanel(panel);
+ au1200_setpanel(panel, fbdev->pd);
break;
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
case FB_BLANK_POWERDOWN:
/* printk("turn off panel\n"); */
- au1200_setpanel(NULL);
+ au1200_setpanel(NULL, fbdev->pd);
break;
default:
break;
@@ -1428,6 +1445,7 @@ static void get_window(unsigned int plane,
static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
+ struct au1200fb_device *fbdev = info->par;
int plane;
int val;
@@ -1472,7 +1490,7 @@ static int au1200fb_ioctl(struct fb_info *info, unsigned int cmd,
struct panel_settings *newpanel;
panel_index = iodata.global.panel_choice;
newpanel = &known_lcd_panels[panel_index];
- au1200_setpanel(newpanel);
+ au1200_setpanel(newpanel, fbdev->pd);
}
break;
@@ -1588,22 +1606,102 @@ static int au1200fb_init_fbinfo(struct au1200fb_device *fbdev)
/*-------------------------------------------------------------------------*/
-/* AU1200 LCD controller device driver */
+static int au1200fb_setup(struct au1200fb_platdata *pd)
+{
+ char *options = NULL;
+ char *this_opt, *endptr;
+ int num_panels = ARRAY_SIZE(known_lcd_panels);
+ int panel_idx = -1;
+
+ fb_get_options(DRIVER_NAME, &options);
+
+ if (!options)
+ goto out;
+
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ /* Panel option - can be panel name,
+ * "bs" for board-switch, or number/index */
+ if (!strncmp(this_opt, "panel:", 6)) {
+ int i;
+ long int li;
+ char *endptr;
+ this_opt += 6;
+ /* First check for index, which allows
+ * to short circuit this mess */
+ li = simple_strtol(this_opt, &endptr, 0);
+ if (*endptr == '\0')
+ panel_idx = (int)li;
+ else if (strcmp(this_opt, "bs") == 0)
+ panel_idx = pd->panel_index();
+ else {
+ for (i = 0; i < num_panels; i++) {
+ if (!strcmp(this_opt,
+ known_lcd_panels[i].name)) {
+ panel_idx = i;
+ break;
+ }
+ }
+ }
+ if ((panel_idx < 0) || (panel_idx >= num_panels))
+ print_warn("Panel %s not supported!", this_opt);
+ else
+ panel_index = panel_idx;
+
+ } else if (strncmp(this_opt, "nohwcursor", 10) == 0)
+ nohwcursor = 1;
+ else if (strncmp(this_opt, "devices:", 8) == 0) {
+ this_opt += 8;
+ device_count = simple_strtol(this_opt, &endptr, 0);
+ if ((device_count < 0) ||
+ (device_count > MAX_DEVICE_COUNT))
+ device_count = MAX_DEVICE_COUNT;
+ } else if (strncmp(this_opt, "wincfg:", 7) == 0) {
+ this_opt += 7;
+ window_index = simple_strtol(this_opt, &endptr, 0);
+ if ((window_index < 0) ||
+ (window_index >= ARRAY_SIZE(windows)))
+ window_index = DEFAULT_WINDOW_INDEX;
+ } else if (strncmp(this_opt, "off", 3) == 0)
+ return 1;
+ else
+ print_warn("Unsupported option \"%s\"", this_opt);
+ }
+
+out:
+ return 0;
+}
+
+/* AU1200 LCD controller device driver */
static int __devinit au1200fb_drv_probe(struct platform_device *dev)
{
struct au1200fb_device *fbdev;
+ struct au1200fb_platdata *pd;
struct fb_info *fbi = NULL;
unsigned long page;
int bpp, plane, ret, irq;
+ print_info("" DRIVER_DESC "");
+
+ pd = dev->dev.platform_data;
+ if (!pd)
+ return -ENODEV;
+
+ /* Setup driver with options */
+ if (au1200fb_setup(pd))
+ return -ENODEV;
+
+ /* Point to the panel selected */
+ panel = &known_lcd_panels[panel_index];
+ win = &windows[window_index];
+
+ printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
+ printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
+
/* shut gcc up */
ret = 0;
fbdev = NULL;
- /* Kickstart the panel */
- au1200_setpanel(panel);
-
for (plane = 0; plane < device_count; ++plane) {
bpp = winbpp(win->w[plane].mode_winctrl1);
if (win->w[plane].xres == 0)
@@ -1619,6 +1717,7 @@ static int __devinit au1200fb_drv_probe(struct platform_device *dev)
_au1200fb_infos[plane] = fbi;
fbdev = fbi->par;
fbdev->fb_info = fbi;
+ fbdev->pd = pd;
fbdev->plane = plane;
@@ -1680,6 +1779,11 @@ static int __devinit au1200fb_drv_probe(struct platform_device *dev)
goto failed;
}
+ platform_set_drvdata(dev, pd);
+
+ /* Kickstart the panel */
+ au1200_setpanel(panel, pd);
+
return 0;
failed:
@@ -1699,12 +1803,13 @@ failed:
static int __devexit au1200fb_drv_remove(struct platform_device *dev)
{
+ struct au1200fb_platdata *pd = platform_get_drvdata(dev);
struct au1200fb_device *fbdev;
struct fb_info *fbi;
int plane;
/* Turn off the panel */
- au1200_setpanel(NULL);
+ au1200_setpanel(NULL, pd);
for (plane = 0; plane < device_count; ++plane) {
fbi = _au1200fb_infos[plane];
@@ -1732,7 +1837,8 @@ static int __devexit au1200fb_drv_remove(struct platform_device *dev)
#ifdef CONFIG_PM
static int au1200fb_drv_suspend(struct device *dev)
{
- au1200_setpanel(NULL);
+ struct au1200fb_platdata *pd = dev_get_drvdata(dev);
+ au1200_setpanel(NULL, pd);
lcd->outmask = 0;
au_sync();
@@ -1742,11 +1848,12 @@ static int au1200fb_drv_suspend(struct device *dev)
static int au1200fb_drv_resume(struct device *dev)
{
+ struct au1200fb_platdata *pd = dev_get_drvdata(dev);
struct fb_info *fbi;
int i;
/* Kickstart the panel */
- au1200_setpanel(panel);
+ au1200_setpanel(panel, pd);
for (i = 0; i < device_count; i++) {
fbi = _au1200fb_infos[i];
@@ -1781,100 +1888,8 @@ static struct platform_driver au1200fb_driver = {
/*-------------------------------------------------------------------------*/
-/* Kernel driver */
-
-static int au1200fb_setup(void)
-{
- char *options = NULL;
- char *this_opt, *endptr;
- int num_panels = ARRAY_SIZE(known_lcd_panels);
- int panel_idx = -1;
-
- fb_get_options(DRIVER_NAME, &options);
-
- if (options) {
- while ((this_opt = strsep(&options,",")) != NULL) {
- /* Panel option - can be panel name,
- * "bs" for board-switch, or number/index */
- if (!strncmp(this_opt, "panel:", 6)) {
- int i;
- long int li;
- char *endptr;
- this_opt += 6;
- /* First check for index, which allows
- * to short circuit this mess */
- li = simple_strtol(this_opt, &endptr, 0);
- if (*endptr == '\0') {
- panel_idx = (int)li;
- }
- else if (strcmp(this_opt, "bs") == 0) {
- extern int board_au1200fb_panel(void);
- panel_idx = board_au1200fb_panel();
- }
-
- else
- for (i = 0; i < num_panels; i++) {
- if (!strcmp(this_opt, known_lcd_panels[i].name)) {
- panel_idx = i;
- break;
- }
- }
-
- if ((panel_idx < 0) || (panel_idx >= num_panels)) {
- print_warn("Panel %s not supported!", this_opt);
- }
- else
- panel_index = panel_idx;
- }
-
- else if (strncmp(this_opt, "nohwcursor", 10) == 0) {
- nohwcursor = 1;
- }
-
- else if (strncmp(this_opt, "devices:", 8) == 0) {
- this_opt += 8;
- device_count = simple_strtol(this_opt,
- &endptr, 0);
- if ((device_count < 0) ||
- (device_count > MAX_DEVICE_COUNT))
- device_count = MAX_DEVICE_COUNT;
- }
-
- else if (strncmp(this_opt, "wincfg:", 7) == 0) {
- this_opt += 7;
- window_index = simple_strtol(this_opt,
- &endptr, 0);
- if ((window_index < 0) ||
- (window_index >= ARRAY_SIZE(windows)))
- window_index = DEFAULT_WINDOW_INDEX;
- }
-
- else if (strncmp(this_opt, "off", 3) == 0)
- return 1;
- /* Unsupported option */
- else {
- print_warn("Unsupported option \"%s\"", this_opt);
- }
- }
- }
- return 0;
-}
-
static int __init au1200fb_init(void)
{
- print_info("" DRIVER_DESC "");
-
- /* Setup driver with options */
- if (au1200fb_setup())
- return -ENODEV;
-
- /* Point to the panel selected */
- panel = &known_lcd_panels[panel_index];
- win = &windows[window_index];
-
- printk(DRIVER_NAME ": Panel %d %s\n", panel_index, panel->name);
- printk(DRIVER_NAME ": Win %d %s\n", window_index, win->name);
-
return platform_driver_register(&au1200fb_driver);
}
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index 1105fa1..a1376dc 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -270,17 +270,7 @@ static struct platform_driver pm860x_backlight_driver = {
.remove = pm860x_backlight_remove,
};
-static int __init pm860x_backlight_init(void)
-{
- return platform_driver_register(&pm860x_backlight_driver);
-}
-module_init(pm860x_backlight_init);
-
-static void __exit pm860x_backlight_exit(void)
-{
- platform_driver_unregister(&pm860x_backlight_driver);
-}
-module_exit(pm860x_backlight_exit);
+module_platform_driver(pm860x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Marvell Semiconductor 88PM8606");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 278aeaa..681b369 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -280,14 +280,6 @@ config BACKLIGHT_WM831X
If you have a backlight driven by the ISINK and DCDC of a
WM831x PMIC say y to enable the backlight driver for it.
-config BACKLIGHT_ADX
- tristate "Avionic Design Xanthos Backlight Driver"
- depends on ARCH_PXA_ADX
- default y
- help
- Say Y to enable the backlight driver on Avionic Design Xanthos-based
- boards.
-
config BACKLIGHT_ADP5520
tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
depends on PMIC_ADP5520
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index fdd1fc4..af5cf65 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -32,7 +32,6 @@ obj-$(CONFIG_BACKLIGHT_APPLE) += apple_bl.o
obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
-obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index dfb763e..2e630bf 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -384,17 +384,7 @@ static struct platform_driver adp5520_bl_driver = {
.resume = adp5520_bl_resume,
};
-static int __init adp5520_bl_init(void)
-{
- return platform_driver_register(&adp5520_bl_driver);
-}
-module_init(adp5520_bl_init);
-
-static void __exit adp5520_bl_exit(void)
-{
- platform_driver_unregister(&adp5520_bl_driver);
-}
-module_exit(adp5520_bl_exit);
+module_platform_driver(adp5520_bl_driver);
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 66bc74d..378276c 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -146,7 +146,7 @@ static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
ret = adp8860_read(client, reg, &reg_val);
- if (!ret && ((reg_val & bit_mask) == 0)) {
+ if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = adp8860_write(client, reg, reg_val);
}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 6c68a68..6735059 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -160,7 +160,7 @@ static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
ret = adp8870_read(client, reg, &reg_val);
- if (!ret && ((reg_val & bit_mask) == 0)) {
+ if (!ret && ((reg_val & bit_mask) != bit_mask)) {
reg_val |= bit_mask;
ret = adp8870_write(client, reg, reg_val);
}
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
deleted file mode 100644
index c861c41..0000000
--- a/drivers/video/backlight/adx_bl.c
+++ /dev/null
@@ -1,182 +0,0 @@
-/*
- * linux/drivers/video/backlight/adx.c
- *
- * Copyright (C) 2009 Avionic Design GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Written by Thierry Reding <thierry.reding@avionic-design.de>
- */
-
-#include <linux/backlight.h>
-#include <linux/fb.h>
-#include <linux/gfp.h>
-#include <linux/io.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-/* register definitions */
-#define ADX_BACKLIGHT_CONTROL 0x00
-#define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0)
-#define ADX_BACKLIGHT_BRIGHTNESS 0x08
-#define ADX_BACKLIGHT_STATUS 0x10
-#define ADX_BACKLIGHT_ERROR 0x18
-
-struct adxbl {
- void __iomem *base;
-};
-
-static int adx_backlight_update_status(struct backlight_device *bldev)
-{
- struct adxbl *bl = bl_get_data(bldev);
- u32 value;
-
- value = bldev->props.brightness;
- writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
-
- value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
-
- if (bldev->props.state & BL_CORE_FBBLANK)
- value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
- else
- value |= ADX_BACKLIGHT_CONTROL_ENABLE;
-
- writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
-
- return 0;
-}
-
-static int adx_backlight_get_brightness(struct backlight_device *bldev)
-{
- struct adxbl *bl = bl_get_data(bldev);
- u32 brightness;
-
- brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
- return brightness & 0xff;
-}
-
-static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb)
-{
- return 1;
-}
-
-static const struct backlight_ops adx_backlight_ops = {
- .options = 0,
- .update_status = adx_backlight_update_status,
- .get_brightness = adx_backlight_get_brightness,
- .check_fb = adx_backlight_check_fb,
-};
-
-static int __devinit adx_backlight_probe(struct platform_device *pdev)
-{
- struct backlight_properties props;
- struct backlight_device *bldev;
- struct resource *res;
- struct adxbl *bl;
- int ret = 0;
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- ret = -ENXIO;
- goto out;
- }
-
- res = devm_request_mem_region(&pdev->dev, res->start,
- resource_size(res), res->name);
- if (!res) {
- ret = -ENXIO;
- goto out;
- }
-
- bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
- if (!bl) {
- ret = -ENOMEM;
- goto out;
- }
-
- bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
- resource_size(res));
- if (!bl->base) {
- ret = -ENXIO;
- goto out;
- }
-
- memset(&props, 0, sizeof(struct backlight_properties));
- props.type = BACKLIGHT_RAW;
- props.max_brightness = 0xff;
- bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
- bl, &adx_backlight_ops, &props);
- if (IS_ERR(bldev)) {
- ret = PTR_ERR(bldev);
- goto out;
- }
-
- bldev->props.brightness = 0xff;
- bldev->props.power = FB_BLANK_UNBLANK;
-
- platform_set_drvdata(pdev, bldev);
-
-out:
- return ret;
-}
-
-static int __devexit adx_backlight_remove(struct platform_device *pdev)
-{
- struct backlight_device *bldev;
- int ret = 0;
-
- bldev = platform_get_drvdata(pdev);
- bldev->props.power = FB_BLANK_UNBLANK;
- bldev->props.brightness = 0xff;
- backlight_update_status(bldev);
- backlight_device_unregister(bldev);
- platform_set_drvdata(pdev, NULL);
-
- return ret;
-}
-
-#ifdef CONFIG_PM
-static int adx_backlight_suspend(struct platform_device *pdev,
- pm_message_t state)
-{
- return 0;
-}
-
-static int adx_backlight_resume(struct platform_device *pdev)
-{
- return 0;
-}
-#else
-#define adx_backlight_suspend NULL
-#define adx_backlight_resume NULL
-#endif
-
-static struct platform_driver adx_backlight_driver = {
- .probe = adx_backlight_probe,
- .remove = __devexit_p(adx_backlight_remove),
- .suspend = adx_backlight_suspend,
- .resume = adx_backlight_resume,
- .driver = {
- .name = "adx-backlight",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init adx_backlight_init(void)
-{
- return platform_driver_register(&adx_backlight_driver);
-}
-
-static void __exit adx_backlight_exit(void)
-{
- platform_driver_unregister(&adx_backlight_driver);
-}
-
-module_init(adx_backlight_init);
-module_exit(adx_backlight_exit);
-
-MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 7363c1b..bf5b1ec 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -102,7 +102,7 @@ static void backlight_generate_event(struct backlight_device *bd,
}
static ssize_t backlight_show_power(struct device *dev,
- struct device_attribute *attr,char *buf)
+ struct device_attribute *attr, char *buf)
{
struct backlight_device *bd = to_backlight_device(dev);
@@ -116,7 +116,7 @@ static ssize_t backlight_store_power(struct device *dev,
struct backlight_device *bd = to_backlight_device(dev);
unsigned long power;
- rc = strict_strtoul(buf, 0, &power);
+ rc = kstrtoul(buf, 0, &power);
if (rc)
return rc;
@@ -150,7 +150,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
struct backlight_device *bd = to_backlight_device(dev);
unsigned long brightness;
- rc = strict_strtoul(buf, 0, &brightness);
+ rc = kstrtoul(buf, 0, &brightness);
if (rc)
return rc;
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index d68f14b..abb4a06 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -199,17 +199,7 @@ static struct platform_driver da903x_backlight_driver = {
.remove = da903x_backlight_remove,
};
-static int __init da903x_backlight_init(void)
-{
- return platform_driver_register(&da903x_backlight_driver);
-}
-module_init(da903x_backlight_init);
-
-static void __exit da903x_backlight_exit(void)
-{
- platform_driver_unregister(&da903x_backlight_driver);
-}
-module_exit(da903x_backlight_exit);
+module_platform_driver(da903x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Dialog Semiconductor DA9030/DA9034");
MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"
diff --git a/drivers/video/backlight/ep93xx_bl.c b/drivers/video/backlight/ep93xx_bl.c
index c74a6f4..b62b8b9 100644
--- a/drivers/video/backlight/ep93xx_bl.c
+++ b/drivers/video/backlight/ep93xx_bl.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/module.h>
#include <linux/io.h>
#include <linux/fb.h>
#include <linux/backlight.h>
@@ -144,17 +143,7 @@ static struct platform_driver ep93xxbl_driver = {
.resume = ep93xxbl_resume,
};
-static int __init ep93xxbl_init(void)
-{
- return platform_driver_register(&ep93xxbl_driver);
-}
-module_init(ep93xxbl_init);
-
-static void __exit ep93xxbl_exit(void)
-{
- platform_driver_unregister(&ep93xxbl_driver);
-}
-module_exit(ep93xxbl_exit);
+module_platform_driver(ep93xxbl_driver);
MODULE_DESCRIPTION("EP93xx Backlight Driver");
MODULE_AUTHOR("H Hartley Sweeten <hsweeten@visionengravers.com>");
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index adb1914..9ce6170 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -132,18 +132,7 @@ static struct platform_driver genericbl_driver = {
},
};
-static int __init genericbl_init(void)
-{
- return platform_driver_register(&genericbl_driver);
-}
-
-static void __exit genericbl_exit(void)
-{
- platform_driver_unregister(&genericbl_driver);
-}
-
-module_init(genericbl_init);
-module_exit(genericbl_exit);
+module_platform_driver(genericbl_driver);
MODULE_AUTHOR("Richard Purdie <rpurdie@rpsys.net>");
MODULE_DESCRIPTION("Generic Backlight Driver");
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index de65d80..2f8af5d 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -147,19 +147,8 @@ static struct platform_driver jornada_bl_driver = {
},
};
-static int __init jornada_bl_init(void)
-{
- return platform_driver_register(&jornada_bl_driver);
-}
-
-static void __exit jornada_bl_exit(void)
-{
- platform_driver_unregister(&jornada_bl_driver);
-}
+module_platform_driver(jornada_bl_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 Backlight driver");
MODULE_LICENSE("GPL");
-
-module_init(jornada_bl_init);
-module_exit(jornada_bl_exit);
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index d2ff658..22d231a 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -135,19 +135,8 @@ static struct platform_driver jornada_lcd_driver = {
},
};
-static int __init jornada_lcd_init(void)
-{
- return platform_driver_register(&jornada_lcd_driver);
-}
-
-static void __exit jornada_lcd_exit(void)
-{
- platform_driver_unregister(&jornada_lcd_driver);
-}
+module_platform_driver(jornada_lcd_driver);
MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>");
MODULE_DESCRIPTION("HP Jornada 710/720/728 LCD driver");
MODULE_LICENSE("GPL");
-
-module_init(jornada_lcd_init);
-module_exit(jornada_lcd_exit);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 4f5d1c4..27d1d7a 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -190,6 +190,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
priv->io_reg = regulator_get(&spi->dev, "vdd");
if (IS_ERR(priv->io_reg)) {
+ ret = PTR_ERR(priv->io_reg);
dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
__func__);
goto err3;
@@ -197,6 +198,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
priv->core_reg = regulator_get(&spi->dev, "vcore");
if (IS_ERR(priv->core_reg)) {
+ ret = PTR_ERR(priv->core_reg);
dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
__func__);
goto err4;
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 71a11ca..79c1b0d 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -97,19 +97,16 @@ static ssize_t lcd_store_power(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
- char *endp;
struct lcd_device *ld = to_lcd_device(dev);
- int power = simple_strtoul(buf, &endp, 0);
- size_t size = endp - buf;
+ unsigned long power;
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
+ rc = kstrtoul(buf, 0, &power);
+ if (rc)
+ return rc;
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_power) {
- pr_debug("lcd: set power to %d\n", power);
+ pr_debug("lcd: set power to %lu\n", power);
ld->ops->set_power(ld, power);
rc = count;
}
@@ -136,19 +133,16 @@ static ssize_t lcd_store_contrast(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int rc = -ENXIO;
- char *endp;
struct lcd_device *ld = to_lcd_device(dev);
- int contrast = simple_strtoul(buf, &endp, 0);
- size_t size = endp - buf;
+ unsigned long contrast;
- if (isspace(*endp))
- size++;
- if (size != count)
- return -EINVAL;
+ rc = kstrtoul(buf, 0, &contrast);
+ if (rc)
+ return rc;
mutex_lock(&ld->ops_lock);
if (ld->ops && ld->ops->set_contrast) {
- pr_debug("lcd: set contrast to %d\n", contrast);
+ pr_debug("lcd: set contrast to %lu\n", contrast);
ld->ops->set_contrast(ld, contrast);
rc = count;
}
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index da9a5ce..78dafc0 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -31,6 +31,7 @@
#include <linux/lcd.h>
#include <linux/backlight.h>
#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include "ld9040_gamma.h"
@@ -53,8 +54,51 @@ struct ld9040 {
struct lcd_device *ld;
struct backlight_device *bd;
struct lcd_platform_data *lcd_pd;
+
+ struct mutex lock;
+ bool enabled;
+};
+
+static struct regulator_bulk_data supplies[] = {
+ { .supply = "vdd3", },
+ { .supply = "vci", },
};
+static void ld9040_regulator_enable(struct ld9040 *lcd)
+{
+ int ret = 0;
+ struct lcd_platform_data *pd = NULL;
+
+ pd = lcd->lcd_pd;
+ mutex_lock(&lcd->lock);
+ if (!lcd->enabled) {
+ ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ goto out;
+
+ lcd->enabled = true;
+ }
+ mdelay(pd->power_on_delay);
+out:
+ mutex_unlock(&lcd->lock);
+}
+
+static void ld9040_regulator_disable(struct ld9040 *lcd)
+{
+ int ret = 0;
+
+ mutex_lock(&lcd->lock);
+ if (lcd->enabled) {
+ ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies);
+ if (ret)
+ goto out;
+
+ lcd->enabled = false;
+ }
+out:
+ mutex_unlock(&lcd->lock);
+}
+
static const unsigned short seq_swreset[] = {
0x01, COMMAND_ONLY,
ENDDEF, 0x00
@@ -532,13 +576,8 @@ static int ld9040_power_on(struct ld9040 *lcd)
return -EFAULT;
}
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else {
- pd->power_on(lcd->ld, 1);
- mdelay(pd->power_on_delay);
- }
+ /* lcd power on */
+ ld9040_regulator_enable(lcd);
if (!pd->reset) {
dev_err(lcd->dev, "reset is NULL.\n");
@@ -582,11 +621,8 @@ static int ld9040_power_off(struct ld9040 *lcd)
mdelay(pd->power_off_delay);
- if (!pd->power_on) {
- dev_err(lcd->dev, "power_on is NULL.\n");
- return -EFAULT;
- } else
- pd->power_on(lcd->ld, 0);
+ /* lcd power off */
+ ld9040_regulator_disable(lcd);
return 0;
}
@@ -693,6 +729,14 @@ static int ld9040_probe(struct spi_device *spi)
goto out_free_lcd;
}
+ mutex_init(&lcd->lock);
+
+ ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
+ if (ret) {
+ dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
+ goto out_free_lcd;
+ }
+
ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
if (IS_ERR(ld)) {
ret = PTR_ERR(ld);
@@ -739,6 +783,8 @@ static int ld9040_probe(struct spi_device *spi)
out_unregister_lcd:
lcd_device_unregister(lcd->ld);
out_free_lcd:
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
+
kfree(lcd);
return ret;
}
@@ -750,6 +796,7 @@ static int __devexit ld9040_remove(struct spi_device *spi)
ld9040_power(lcd, FB_BLANK_POWERDOWN);
backlight_device_unregister(lcd->bd);
lcd_device_unregister(lcd->ld);
+ regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
kfree(lcd);
return 0;
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index 7bbc802..c915e3b 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -188,17 +188,7 @@ static struct platform_driver max8925_backlight_driver = {
.remove = __devexit_p(max8925_backlight_remove),
};
-static int __init max8925_backlight_init(void)
-{
- return platform_driver_register(&max8925_backlight_driver);
-}
-module_init(max8925_backlight_init);
-
-static void __exit max8925_backlight_exit(void)
-{
- platform_driver_unregister(&max8925_backlight_driver);
-};
-module_exit(max8925_backlight_exit);
+module_platform_driver(max8925_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for Maxim MAX8925");
MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 08d26a7..d8cde27 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -195,18 +195,7 @@ static struct platform_driver omapbl_driver = {
},
};
-static int __init omapbl_init(void)
-{
- return platform_driver_register(&omapbl_driver);
-}
-
-static void __exit omapbl_exit(void)
-{
- platform_driver_unregister(&omapbl_driver);
-}
-
-module_init(omapbl_init);
-module_exit(omapbl_exit);
+module_platform_driver(omapbl_driver);
MODULE_AUTHOR("Andrzej Zaborowski <balrog@zabor.org>");
MODULE_DESCRIPTION("OMAP LCD Backlight driver");
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index ef5628d..13e88b7 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -173,17 +173,7 @@ static struct platform_driver pcf50633_bl_driver = {
},
};
-static int __init pcf50633_bl_init(void)
-{
- return platform_driver_register(&pcf50633_bl_driver);
-}
-module_init(pcf50633_bl_init);
-
-static void __exit pcf50633_bl_exit(void)
-{
- platform_driver_unregister(&pcf50633_bl_driver);
-}
-module_exit(pcf50633_bl_exit);
+module_platform_driver(pcf50633_bl_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("PCF50633 backlight driver");
diff --git a/drivers/video/backlight/platform_lcd.c b/drivers/video/backlight/platform_lcd.c
index 302330a..f0bf491 100644
--- a/drivers/video/backlight/platform_lcd.c
+++ b/drivers/video/backlight/platform_lcd.c
@@ -85,7 +85,8 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
return -EINVAL;
}
- plcd = kzalloc(sizeof(struct platform_lcd), GFP_KERNEL);
+ plcd = devm_kzalloc(&pdev->dev, sizeof(struct platform_lcd),
+ GFP_KERNEL);
if (!plcd) {
dev_err(dev, "no memory for state\n");
return -ENOMEM;
@@ -98,7 +99,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
if (IS_ERR(plcd->lcd)) {
dev_err(dev, "cannot register lcd device\n");
err = PTR_ERR(plcd->lcd);
- goto err_mem;
+ goto err;
}
platform_set_drvdata(pdev, plcd);
@@ -106,8 +107,7 @@ static int __devinit platform_lcd_probe(struct platform_device *pdev)
return 0;
- err_mem:
- kfree(plcd);
+ err:
return err;
}
@@ -116,7 +116,6 @@ static int __devexit platform_lcd_remove(struct platform_device *pdev)
struct platform_lcd *plcd = platform_get_drvdata(pdev);
lcd_device_unregister(plcd->lcd);
- kfree(plcd);
return 0;
}
@@ -157,18 +156,7 @@ static struct platform_driver platform_lcd_driver = {
.resume = platform_lcd_resume,
};
-static int __init platform_lcd_init(void)
-{
- return platform_driver_register(&platform_lcd_driver);
-}
-
-static void __exit platform_lcd_cleanup(void)
-{
- platform_driver_unregister(&platform_lcd_driver);
-}
-
-module_init(platform_lcd_init);
-module_exit(platform_lcd_cleanup);
+module_platform_driver(platform_lcd_driver);
MODULE_AUTHOR("Ben Dooks <ben-linux@fluff.org>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 8b5b2a4..7496d04 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -169,10 +169,9 @@ static int pwm_backlight_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
-static int pwm_backlight_suspend(struct platform_device *pdev,
- pm_message_t state)
+static int pwm_backlight_suspend(struct device *dev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
struct pwm_bl_data *pb = dev_get_drvdata(&bl->dev);
if (pb->notify)
@@ -184,40 +183,32 @@ static int pwm_backlight_suspend(struct platform_device *pdev,
return 0;
}
-static int pwm_backlight_resume(struct platform_device *pdev)
+static int pwm_backlight_resume(struct device *dev)
{
- struct backlight_device *bl = platform_get_drvdata(pdev);
+ struct backlight_device *bl = dev_get_drvdata(dev);
backlight_update_status(bl);
return 0;
}
-#else
-#define pwm_backlight_suspend NULL
-#define pwm_backlight_resume NULL
+
+static SIMPLE_DEV_PM_OPS(pwm_backlight_pm_ops, pwm_backlight_suspend,
+ pwm_backlight_resume);
+
#endif
static struct platform_driver pwm_backlight_driver = {
.driver = {
.name = "pwm-backlight",
.owner = THIS_MODULE,
+#ifdef CONFIG_PM
+ .pm = &pwm_backlight_pm_ops,
+#endif
},
.probe = pwm_backlight_probe,
.remove = pwm_backlight_remove,
- .suspend = pwm_backlight_suspend,
- .resume = pwm_backlight_resume,
};
-static int __init pwm_backlight_init(void)
-{
- return platform_driver_register(&pwm_backlight_driver);
-}
-module_init(pwm_backlight_init);
-
-static void __exit pwm_backlight_exit(void)
-{
- platform_driver_unregister(&pwm_backlight_driver);
-}
-module_exit(pwm_backlight_exit);
+module_platform_driver(pwm_backlight_driver);
MODULE_DESCRIPTION("PWM based Backlight Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index fbe9e93..4e915f5 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -236,17 +236,7 @@ static struct platform_driver wm831x_backlight_driver = {
.remove = wm831x_backlight_remove,
};
-static int __init wm831x_backlight_init(void)
-{
- return platform_driver_register(&wm831x_backlight_driver);
-}
-module_init(wm831x_backlight_init);
-
-static void __exit wm831x_backlight_exit(void)
-{
- platform_driver_unregister(&wm831x_backlight_driver);
-}
-module_exit(wm831x_backlight_exit);
+module_platform_driver(wm831x_backlight_driver);
MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 56720fb..46b03f5 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -4,7 +4,7 @@
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Created:
- * Description: ADSP-BF54x Framebufer driver
+ * Description: ADSP-BF54x Framebuffer driver
*
*
* Modified:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index d5e1267..7a0c05f 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -4,7 +4,7 @@
* Author: Michael Hennerich <hennerich@blackfin.uclinux.org>
*
* Created:
- * Description: Blackfin LCD Framebufer driver
+ * Description: Blackfin LCD Framebuffer driver
*
*
* Modified:
diff --git a/drivers/video/cirrusfb.c b/drivers/video/cirrusfb.c
index 6df7c54..738c8ce 100644
--- a/drivers/video/cirrusfb.c
+++ b/drivers/video/cirrusfb.c
@@ -280,52 +280,74 @@ MODULE_DEVICE_TABLE(pci, cirrusfb_pci_table);
#endif /* CONFIG_PCI */
#ifdef CONFIG_ZORRO
-static const struct zorro_device_id cirrusfb_zorro_table[] = {
+struct zorrocl {
+ enum cirrus_board type; /* Board type */
+ u32 regoffset; /* Offset of registers in first Zorro device */
+ u32 ramsize; /* Size of video RAM in first Zorro device */
+ /* If zero, use autoprobe on RAM device */
+ u32 ramoffset; /* Offset of video RAM in first Zorro device */
+ zorro_id ramid; /* Zorro ID of RAM device */
+ zorro_id ramid2; /* Zorro ID of optional second RAM device */
+};
+
+static const struct zorrocl zcl_sd64 __devinitconst = {
+ .type = BT_SD64,
+ .ramid = ZORRO_PROD_HELFRICH_SD64_RAM,
+};
+
+static const struct zorrocl zcl_piccolo __devinitconst = {
+ .type = BT_PICCOLO,
+ .ramid = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
+};
+
+static const struct zorrocl zcl_picasso __devinitconst = {
+ .type = BT_PICASSO,
+ .ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
+};
+
+static const struct zorrocl zcl_spectrum __devinitconst = {
+ .type = BT_SPECTRUM,
+ .ramid = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
+};
+
+static const struct zorrocl zcl_picasso4_z3 __devinitconst = {
+ .type = BT_PICASSO4,
+ .regoffset = 0x00600000,
+ .ramsize = 4 * MB_,
+ .ramoffset = 0x01000000, /* 0x02000000 for 64 MiB boards */
+};
+
+static const struct zorrocl zcl_picasso4_z2 __devinitconst = {
+ .type = BT_PICASSO4,
+ .regoffset = 0x10000,
+ .ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1,
+ .ramid2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM2,
+};
+
+
+static const struct zorro_device_id cirrusfb_zorro_table[] __devinitconst = {
{
- .id = ZORRO_PROD_HELFRICH_SD64_RAM,
- .driver_data = BT_SD64,
+ .id = ZORRO_PROD_HELFRICH_SD64_REG,
+ .driver_data = (unsigned long)&zcl_sd64,
}, {
- .id = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
- .driver_data = BT_PICCOLO,
+ .id = ZORRO_PROD_HELFRICH_PICCOLO_REG,
+ .driver_data = (unsigned long)&zcl_piccolo,
}, {
- .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
- .driver_data = BT_PICASSO,
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
+ .driver_data = (unsigned long)&zcl_picasso,
}, {
- .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
- .driver_data = BT_SPECTRUM,
+ .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
+ .driver_data = (unsigned long)&zcl_spectrum,
}, {
.id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
- .driver_data = BT_PICASSO4,
+ .driver_data = (unsigned long)&zcl_picasso4_z3,
+ }, {
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_REG,
+ .driver_data = (unsigned long)&zcl_picasso4_z2,
},
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
-
-static const struct {
- zorro_id id2;
- unsigned long size;
-} cirrusfb_zorro_table2[] = {
- [BT_SD64] = {
- .id2 = ZORRO_PROD_HELFRICH_SD64_REG,
- .size = 0x400000
- },
- [BT_PICCOLO] = {
- .id2 = ZORRO_PROD_HELFRICH_PICCOLO_REG,
- .size = 0x200000
- },
- [BT_PICASSO] = {
- .id2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
- .size = 0x200000
- },
- [BT_SPECTRUM] = {
- .id2 = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
- .size = 0x200000
- },
- [BT_PICASSO4] = {
- .id2 = 0,
- .size = 0x400000
- }
-};
#endif /* CONFIG_ZORRO */
#ifdef CIRRUSFB_DEBUG
@@ -350,7 +372,7 @@ struct cirrusfb_info {
void (*unmap)(struct fb_info *info);
};
-static int noaccel __devinitdata;
+static bool noaccel __devinitdata;
static char *mode_option __devinitdata = "640x480@60";
/****************************************************************************/
@@ -1956,16 +1978,12 @@ static void cirrusfb_zorro_unmap(struct fb_info *info)
struct cirrusfb_info *cinfo = info->par;
struct zorro_dev *zdev = to_zorro_dev(info->device);
- zorro_release_device(zdev);
-
- if (cinfo->btype == BT_PICASSO4) {
- cinfo->regbase -= 0x600000;
- iounmap((void *)cinfo->regbase);
+ if (info->fix.smem_start > 16 * MB_)
iounmap(info->screen_base);
- } else {
- if (zorro_resource_start(zdev) > 0x01000000)
- iounmap(info->screen_base);
- }
+ if (info->fix.mmio_start > 16 * MB_)
+ iounmap(cinfo->regbase);
+
+ zorro_release_device(zdev);
}
#endif /* CONFIG_ZORRO */
@@ -2222,115 +2240,116 @@ static struct pci_driver cirrusfb_pci_driver = {
static int __devinit cirrusfb_zorro_register(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
- struct cirrusfb_info *cinfo;
struct fb_info *info;
+ int error;
+ const struct zorrocl *zcl;
enum cirrus_board btype;
- struct zorro_dev *z2 = NULL;
- unsigned long board_addr, board_size, size;
- int ret;
-
- btype = ent->driver_data;
- if (cirrusfb_zorro_table2[btype].id2)
- z2 = zorro_find_device(cirrusfb_zorro_table2[btype].id2, NULL);
- size = cirrusfb_zorro_table2[btype].size;
+ unsigned long regbase, ramsize, rambase;
+ struct cirrusfb_info *cinfo;
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
if (!info) {
printk(KERN_ERR "cirrusfb: could not allocate memory\n");
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
+ }
+
+ zcl = (const struct zorrocl *)ent->driver_data;
+ btype = zcl->type;
+ regbase = zorro_resource_start(z) + zcl->regoffset;
+ ramsize = zcl->ramsize;
+ if (ramsize) {
+ rambase = zorro_resource_start(z) + zcl->ramoffset;
+ if (zorro_resource_len(z) == 64 * MB_) {
+ /* Quirk for 64 MiB Picasso IV */
+ rambase += zcl->ramoffset;
+ }
+ } else {
+ struct zorro_dev *ram = zorro_find_device(zcl->ramid, NULL);
+ if (!ram || !zorro_resource_len(ram)) {
+ dev_err(info->device, "No video RAM found\n");
+ error = -ENODEV;
+ goto err_release_fb;
+ }
+ rambase = zorro_resource_start(ram);
+ ramsize = zorro_resource_len(ram);
+ if (zcl->ramid2 &&
+ (ram = zorro_find_device(zcl->ramid2, NULL))) {
+ if (zorro_resource_start(ram) != rambase + ramsize) {
+ dev_warn(info->device,
+ "Skipping non-contiguous RAM at %pR\n",
+ &ram->resource);
+ } else {
+ ramsize += zorro_resource_len(ram);
+ }
+ }
}
- dev_info(info->device, "%s board detected\n",
- cirrusfb_board_info[btype].name);
-
- cinfo = info->par;
- cinfo->btype = btype;
-
- assert(z);
- assert(btype != BT_NONE);
-
- board_addr = zorro_resource_start(z);
- board_size = zorro_resource_len(z);
- info->screen_size = size;
+ dev_info(info->device,
+ "%s board detected, REG at 0x%lx, %lu MiB RAM at 0x%lx\n",
+ cirrusfb_board_info[btype].name, regbase, ramsize / MB_,
+ rambase);
if (!zorro_request_device(z, "cirrusfb")) {
- dev_err(info->device, "cannot reserve region 0x%lx, abort\n",
- board_addr);
- ret = -EBUSY;
+ dev_err(info->device, "Cannot reserve %pR\n", &z->resource);
+ error = -EBUSY;
goto err_release_fb;
}
- ret = -EIO;
-
- if (btype == BT_PICASSO4) {
- dev_info(info->device, " REG at $%lx\n", board_addr + 0x600000);
-
- /* To be precise, for the P4 this is not the */
- /* begin of the board, but the begin of RAM. */
- /* for P4, map in its address space in 2 chunks (### TEST! ) */
- /* (note the ugly hardcoded 16M number) */
- cinfo->regbase = ioremap(board_addr, 16777216);
- if (!cinfo->regbase)
- goto err_release_region;
-
- dev_dbg(info->device, "Virtual address for board set to: $%p\n",
- cinfo->regbase);
- cinfo->regbase += 0x600000;
- info->fix.mmio_start = board_addr + 0x600000;
-
- info->fix.smem_start = board_addr + 16777216;
- info->screen_base = ioremap(info->fix.smem_start, 16777216);
- if (!info->screen_base)
- goto err_unmap_regbase;
- } else {
- dev_info(info->device, " REG at $%lx\n",
- (unsigned long) z2->resource.start);
-
- info->fix.smem_start = board_addr;
- if (board_addr > 0x01000000)
- info->screen_base = ioremap(board_addr, board_size);
- else
- info->screen_base = (caddr_t) ZTWO_VADDR(board_addr);
- if (!info->screen_base)
- goto err_release_region;
+ cinfo = info->par;
+ cinfo->btype = btype;
- /* set address for REG area of board */
- cinfo->regbase = (caddr_t) ZTWO_VADDR(z2->resource.start);
- info->fix.mmio_start = z2->resource.start;
+ info->fix.mmio_start = regbase;
+ cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024)
+ : (caddr_t)ZTWO_VADDR(regbase);
+ if (!cinfo->regbase) {
+ dev_err(info->device, "Cannot map registers\n");
+ error = -EIO;
+ goto err_release_dev;
+ }
- dev_dbg(info->device, "Virtual address for board set to: $%p\n",
- cinfo->regbase);
+ info->fix.smem_start = rambase;
+ info->screen_size = ramsize;
+ info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize)
+ : (caddr_t)ZTWO_VADDR(rambase);
+ if (!info->screen_base) {
+ dev_err(info->device, "Cannot map video RAM\n");
+ error = -EIO;
+ goto err_unmap_reg;
}
+
cinfo->unmap = cirrusfb_zorro_unmap;
dev_info(info->device,
- "Cirrus Logic chipset on Zorro bus, RAM (%lu MB) at $%lx\n",
- board_size / MB_, board_addr);
-
- zorro_set_drvdata(z, info);
+ "Cirrus Logic chipset on Zorro bus, RAM (%lu MiB) at 0x%lx\n",
+ ramsize / MB_, rambase);
/* MCLK select etc. */
if (cirrusfb_board_info[btype].init_sr1f)
vga_wseq(cinfo->regbase, CL_SEQR1F,
cirrusfb_board_info[btype].sr1f);
- ret = cirrusfb_register(info);
- if (!ret)
- return 0;
+ error = cirrusfb_register(info);
+ if (error) {
+ dev_err(info->device, "Failed to register device, error %d\n",
+ error);
+ goto err_unmap_ram;
+ }
- if (btype == BT_PICASSO4 || board_addr > 0x01000000)
+ zorro_set_drvdata(z, info);
+ return 0;
+
+err_unmap_ram:
+ if (rambase > 16 * MB_)
iounmap(info->screen_base);
-err_unmap_regbase:
- if (btype == BT_PICASSO4)
- iounmap(cinfo->regbase - 0x600000);
-err_release_region:
- release_region(board_addr, board_size);
+err_unmap_reg:
+ if (regbase > 16 * MB_)
+ iounmap(cinfo->regbase);
+err_release_dev:
+ zorro_release_device(z);
err_release_fb:
framebuffer_release(info);
-err_out:
- return ret;
+ return error;
}
void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
@@ -2338,6 +2357,7 @@ void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
struct fb_info *info = zorro_get_drvdata(z);
cirrusfb_cleanup(info);
+ zorro_set_drvdata(z, NULL);
}
static struct zorro_driver cirrusfb_zorro_driver = {
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 2209e35..c2d11fe 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -66,8 +66,6 @@ config SGI_NEWPORT_CONSOLE
Say Y here if you want the console on the Newport aka XL graphics
card of your Indy. Most people say Y here.
-# bool 'IODC console' CONFIG_IODC_CONSOLE
-
config DUMMY_CONSOLE
bool
depends on VGA_CONSOLE!=y || SGI_NEWPORT_CONSOLE!=y
diff --git a/drivers/video/console/newport_con.c b/drivers/video/console/newport_con.c
index 93317b5..a122d92 100644
--- a/drivers/video/console/newport_con.c
+++ b/drivers/video/console/newport_con.c
@@ -25,14 +25,13 @@
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/gio_device.h>
+
#include <video/newport.h>
#include <linux/linux_logo.h>
#include <linux/font.h>
-
-extern unsigned long sgi_gfxaddr;
-
#define FONT_DATA ((unsigned char *)font_vga_8x16.data)
/* borrowed from fbcon.c */
@@ -304,12 +303,6 @@ static const char *newport_startup(void)
{
int i;
- if (!sgi_gfxaddr)
- return NULL;
-
- if (!npregs)
- npregs = (struct newport_regs *)/* ioremap cannot fail */
- ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
npregs->cset.config = NPORT_CFG_GD0;
if (newport_wait(npregs))
@@ -743,26 +736,58 @@ const struct consw newport_con = {
.con_save_screen = DUMMY
};
-#ifdef MODULE
-static int __init newport_console_init(void)
+static int newport_probe(struct gio_device *dev,
+ const struct gio_device_id *id)
{
- if (!sgi_gfxaddr)
- return 0;
+ unsigned long newport_addr;
- if (!npregs)
- npregs = (struct newport_regs *)/* ioremap cannot fail */
- ioremap(sgi_gfxaddr, sizeof(struct newport_regs));
+ if (!dev->resource.start)
+ return -EINVAL;
+
+ if (npregs)
+ return -EBUSY; /* we only support one Newport as console */
+
+ newport_addr = dev->resource.start + 0xF0000;
+ if (!request_mem_region(newport_addr, 0x10000, "Newport"))
+ return -ENODEV;
+
+ npregs = (struct newport_regs *)/* ioremap cannot fail */
+ ioremap(newport_addr, sizeof(struct newport_regs));
return take_over_console(&newport_con, 0, MAX_NR_CONSOLES - 1, 1);
}
-module_init(newport_console_init);
-static void __exit newport_console_exit(void)
+static void newport_remove(struct gio_device *dev)
{
give_up_console(&newport_con);
iounmap((void *)npregs);
}
+
+static struct gio_device_id newport_ids[] = {
+ { .id = 0x7e },
+ { .id = 0xff }
+};
+
+MODULE_ALIAS("gio:7e");
+
+static struct gio_driver newport_driver = {
+ .name = "newport",
+ .id_table = newport_ids,
+ .probe = newport_probe,
+ .remove = newport_remove,
+};
+
+int __init newport_console_init(void)
+{
+ return gio_register_driver(&newport_driver);
+}
+
+void __exit newport_console_exit(void)
+{
+ gio_unregister_driver(&newport_driver);
+}
+
+module_init(newport_console_init);
module_exit(newport_console_exit);
-#endif
MODULE_LICENSE("GPL");
diff --git a/drivers/video/controlfb.c b/drivers/video/controlfb.c
index 7b2c40a..0c189b3 100644
--- a/drivers/video/controlfb.c
+++ b/drivers/video/controlfb.c
@@ -420,7 +420,7 @@ static int __init init_control(struct fb_info_control *p)
/* Try to pick a video mode out of NVRAM if we have one. */
#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM){
+ if (default_cmode == CMODE_NVRAM) {
cmode = nvram_read_byte(NV_CMODE);
if(cmode < CMODE_8 || cmode > CMODE_32)
cmode = CMODE_8;
diff --git a/drivers/video/display/Kconfig b/drivers/video/display/Kconfig
deleted file mode 100644
index f99af93..0000000
--- a/drivers/video/display/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Display drivers configuration
-#
-
-menu "Display device support"
-
-config DISPLAY_SUPPORT
- tristate "Display panel/monitor support"
- ---help---
- This framework adds support for low-level control of a display.
- This includes support for power.
-
- Enable this to be able to choose the drivers for controlling the
- physical display panel/monitor on some platforms. This not only
- covers LCD displays for PDAs but also other types of displays
- such as CRT, TVout etc.
-
- To have support for your specific display panel you will have to
- select the proper drivers which depend on this option.
-
-comment "Display hardware drivers"
- depends on DISPLAY_SUPPORT
-
-endmenu
diff --git a/drivers/video/display/Makefile b/drivers/video/display/Makefile
deleted file mode 100644
index c0ea832..0000000
--- a/drivers/video/display/Makefile
+++ /dev/null
@@ -1,6 +0,0 @@
-# Display drivers
-
-display-objs := display-sysfs.o
-
-obj-$(CONFIG_DISPLAY_SUPPORT) += display.o
-
diff --git a/drivers/video/display/display-sysfs.c b/drivers/video/display/display-sysfs.c
deleted file mode 100644
index 0c647d7..0000000
--- a/drivers/video/display/display-sysfs.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * display-sysfs.c - Display output driver sysfs interface
- *
- * Copyright (C) 2007 James Simmons <jsimmons@infradead.org>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/module.h>
-#include <linux/display.h>
-#include <linux/ctype.h>
-#include <linux/idr.h>
-#include <linux/err.h>
-#include <linux/kdev_t.h>
-#include <linux/slab.h>
-
-static ssize_t display_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", dsp->name);
-}
-
-static ssize_t display_show_type(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", dsp->type);
-}
-
-static ssize_t display_show_contrast(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t rc = -ENXIO;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver) && dsp->driver->get_contrast)
- rc = sprintf(buf, "%d\n", dsp->driver->get_contrast(dsp));
- mutex_unlock(&dsp->lock);
- return rc;
-}
-
-static ssize_t display_store_contrast(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t ret = -EINVAL, size;
- int contrast;
- char *endp;
-
- contrast = simple_strtoul(buf, &endp, 0);
- size = endp - buf;
-
- if (isspace(*endp))
- size++;
-
- if (size != count)
- return ret;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver && dsp->driver->set_contrast)) {
- pr_debug("display: set contrast to %d\n", contrast);
- dsp->driver->set_contrast(dsp, contrast);
- ret = count;
- }
- mutex_unlock(&dsp->lock);
- return ret;
-}
-
-static ssize_t display_show_max_contrast(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t rc = -ENXIO;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver))
- rc = sprintf(buf, "%d\n", dsp->driver->max_contrast);
- mutex_unlock(&dsp->lock);
- return rc;
-}
-
-static struct device_attribute display_attrs[] = {
- __ATTR(name, S_IRUGO, display_show_name, NULL),
- __ATTR(type, S_IRUGO, display_show_type, NULL),
- __ATTR(contrast, S_IRUGO | S_IWUSR, display_show_contrast, display_store_contrast),
- __ATTR(max_contrast, S_IRUGO, display_show_max_contrast, NULL),
-};
-
-static int display_suspend(struct device *dev, pm_message_t state)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver->suspend))
- dsp->driver->suspend(dsp, state);
- mutex_unlock(&dsp->lock);
- return 0;
-};
-
-static int display_resume(struct device *dev)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver->resume))
- dsp->driver->resume(dsp);
- mutex_unlock(&dsp->lock);
- return 0;
-};
-
-static struct mutex allocated_dsp_lock;
-static DEFINE_IDR(allocated_dsp);
-static struct class *display_class;
-
-struct display_device *display_device_register(struct display_driver *driver,
- struct device *parent, void *devdata)
-{
- struct display_device *new_dev = NULL;
- int ret = -EINVAL;
-
- if (unlikely(!driver))
- return ERR_PTR(ret);
-
- mutex_lock(&allocated_dsp_lock);
- ret = idr_pre_get(&allocated_dsp, GFP_KERNEL);
- mutex_unlock(&allocated_dsp_lock);
- if (!ret)
- return ERR_PTR(ret);
-
- new_dev = kzalloc(sizeof(struct display_device), GFP_KERNEL);
- if (likely(new_dev) && unlikely(driver->probe(new_dev, devdata))) {
- // Reserve the index for this display
- mutex_lock(&allocated_dsp_lock);
- ret = idr_get_new(&allocated_dsp, new_dev, &new_dev->idx);
- mutex_unlock(&allocated_dsp_lock);
-
- if (!ret) {
- new_dev->dev = device_create(display_class, parent,
- MKDEV(0, 0), new_dev,
- "display%d", new_dev->idx);
- if (!IS_ERR(new_dev->dev)) {
- new_dev->parent = parent;
- new_dev->driver = driver;
- mutex_init(&new_dev->lock);
- return new_dev;
- }
- mutex_lock(&allocated_dsp_lock);
- idr_remove(&allocated_dsp, new_dev->idx);
- mutex_unlock(&allocated_dsp_lock);
- ret = -EINVAL;
- }
- }
- kfree(new_dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(display_device_register);
-
-void display_device_unregister(struct display_device *ddev)
-{
- if (!ddev)
- return;
- // Free device
- mutex_lock(&ddev->lock);
- device_unregister(ddev->dev);
- mutex_unlock(&ddev->lock);
- // Mark device index as available
- mutex_lock(&allocated_dsp_lock);
- idr_remove(&allocated_dsp, ddev->idx);
- mutex_unlock(&allocated_dsp_lock);
- kfree(ddev);
-}
-EXPORT_SYMBOL(display_device_unregister);
-
-static int __init display_class_init(void)
-{
- display_class = class_create(THIS_MODULE, "display");
- if (IS_ERR(display_class)) {
- printk(KERN_ERR "Failed to create display class\n");
- display_class = NULL;
- return -EINVAL;
- }
- display_class->dev_attrs = display_attrs;
- display_class->suspend = display_suspend;
- display_class->resume = display_resume;
- mutex_init(&allocated_dsp_lock);
- return 0;
-}
-
-static void __exit display_class_exit(void)
-{
- class_destroy(display_class);
-}
-
-module_init(display_class_init);
-module_exit(display_class_exit);
-
-MODULE_DESCRIPTION("Display Hardware handling");
-MODULE_AUTHOR("James Simmons <jsimmons@infradead.org>");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index ad93629..ac9141b 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -967,6 +967,20 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) {
u32 activate = var->activate;
+ /* When using FOURCC mode, make sure the red, green, blue and
+ * transp fields are set to 0.
+ */
+ if ((info->fix.capabilities & FB_CAP_FOURCC) &&
+ var->grayscale > 1) {
+ if (var->red.offset || var->green.offset ||
+ var->blue.offset || var->transp.offset ||
+ var->red.length || var->green.length ||
+ var->blue.length || var->transp.length ||
+ var->red.msb_right || var->green.msb_right ||
+ var->blue.msb_right || var->transp.msb_right)
+ return -EINVAL;
+ }
+
if (!info->fbops->fb_check_var) {
*var = info->var;
goto done;
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index a16beeb..6af3f16 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -36,8 +36,7 @@
#include <linux/fsl-diu-fb.h>
#include "edid.h"
-#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */
- /* 1 for plane 0, 2 for plane 1&2 each */
+#define NUM_AOIS 5 /* 1 for plane 0, 2 for planes 1 & 2 each */
/* HW cursor parameters */
#define MAX_CURS 32
@@ -49,12 +48,6 @@
#define INT_PARERR 0x08 /* Display parameters error interrupt */
#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
-struct diu_addr {
- void *vaddr; /* Virtual address */
- dma_addr_t paddr; /* Physical address */
- __u32 offset;
-};
-
/*
* List of supported video modes
*
@@ -330,23 +323,6 @@ static unsigned int d_cache_line_size;
static DEFINE_SPINLOCK(diu_lock);
-struct fsl_diu_data {
- struct fb_info *fsl_diu_info[FSL_AOI_NUM - 1];
- /*FSL_AOI_NUM has one dummy AOI */
- struct device_attribute dev_attr;
- struct diu_ad *dummy_ad;
- void *dummy_aoi_virt;
- unsigned int irq;
- int fb_enabled;
- enum fsl_diu_monitor_port monitor_port;
- struct diu __iomem *diu_reg;
- spinlock_t reg_lock;
- struct diu_addr ad;
- struct diu_addr gamma;
- struct diu_addr pallete;
- struct diu_addr cursor;
-};
-
enum mfb_index {
PLANE0 = 0, /* Plane 0, only one AOI that fills the screen */
PLANE1_AOI0, /* Plane 1, first AOI */
@@ -370,6 +346,42 @@ struct mfb_info {
u8 *edid_data;
};
+/**
+ * struct fsl_diu_data - per-DIU data structure
+ * @dma_addr: DMA address of this structure
+ * @fsl_diu_info: fb_info objects, one per AOI
+ * @dev_attr: sysfs structure
+ * @irq: IRQ
+ * @monitor_port: the monitor port this DIU is connected to
+ * @diu_reg: pointer to the DIU hardware registers
+ * @reg_lock: spinlock for register access
+ * @dummy_aoi: video buffer for the 4x4 32-bit dummy AOI
+ * dummy_ad: DIU Area Descriptor for the dummy AOI
+ * @ad[]: Area Descriptors for each real AOI
+ * @gamma: gamma color table
+ * @cursor: hardware cursor data
+ *
+ * This data structure must be allocated with 32-byte alignment, so that the
+ * internal fields can be aligned properly.
+ */
+struct fsl_diu_data {
+ dma_addr_t dma_addr;
+ struct fb_info fsl_diu_info[NUM_AOIS];
+ struct mfb_info mfb[NUM_AOIS];
+ struct device_attribute dev_attr;
+ unsigned int irq;
+ enum fsl_diu_monitor_port monitor_port;
+ struct diu __iomem *diu_reg;
+ spinlock_t reg_lock;
+ u8 dummy_aoi[4 * 4 * 4];
+ struct diu_ad dummy_ad __aligned(8);
+ struct diu_ad ad[NUM_AOIS] __aligned(8);
+ u8 gamma[256 * 3] __aligned(32);
+ u8 cursor[MAX_CURS * MAX_CURS * 2] __aligned(32);
+} __aligned(32);
+
+/* Determine the DMA address of a member of the fsl_diu_data structure */
+#define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f))
static struct mfb_info mfb_template[] = {
{
@@ -449,37 +461,6 @@ static enum fsl_diu_monitor_port fsl_diu_name_to_port(const char *s)
return diu_ops.valid_monitor_port(port);
}
-/**
- * fsl_diu_alloc - allocate memory for the DIU
- * @size: number of bytes to allocate
- * @param: returned physical address of memory
- *
- * This function allocates a physically-contiguous block of memory.
- */
-static void *fsl_diu_alloc(size_t size, phys_addr_t *phys)
-{
- void *virt;
-
- virt = alloc_pages_exact(size, GFP_DMA | __GFP_ZERO);
- if (virt)
- *phys = virt_to_phys(virt);
-
- return virt;
-}
-
-/**
- * fsl_diu_free - release DIU memory
- * @virt: pointer returned by fsl_diu_alloc()
- * @size: number of bytes allocated by fsl_diu_alloc()
- *
- * This function releases memory allocated by fsl_diu_alloc().
- */
-static void fsl_diu_free(void *virt, size_t size)
-{
- if (virt && size)
- free_pages_exact(virt, size);
-}
-
/*
* Workaround for failed writing desc register of planes.
* Needed with MPC5121 DIU rev 2.0 silicon.
@@ -495,8 +476,8 @@ static void fsl_diu_enable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
@@ -504,7 +485,7 @@ static void fsl_diu_enable_panel(struct fb_info *info)
wr_reg_wa(&hw->desc[0], ad->paddr);
break;
case PLANE1_AOI0:
- cmfbi = machine_data->fsl_diu_info[2]->par;
+ cmfbi = &data->mfb[2];
if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
@@ -515,7 +496,7 @@ static void fsl_diu_enable_panel(struct fb_info *info)
}
break;
case PLANE2_AOI0:
- cmfbi = machine_data->fsl_diu_info[4]->par;
+ cmfbi = &data->mfb[4];
if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
@@ -526,17 +507,17 @@ static void fsl_diu_enable_panel(struct fb_info *info)
}
break;
case PLANE1_AOI1:
- pmfbi = machine_data->fsl_diu_info[1]->par;
+ pmfbi = &data->mfb[1];
ad->next_ad = 0;
- if (hw->desc[1] == machine_data->dummy_ad->paddr)
+ if (hw->desc[1] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[1], ad->paddr);
else /* AOI0 open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
break;
case PLANE2_AOI1:
- pmfbi = machine_data->fsl_diu_info[3]->par;
+ pmfbi = &data->mfb[3];
ad->next_ad = 0;
- if (hw->desc[2] == machine_data->dummy_ad->paddr)
+ if (hw->desc[2] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[2], ad->paddr);
else /* AOI0 was open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
@@ -548,52 +529,52 @@ static void fsl_diu_disable_panel(struct fb_info *info)
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
- if (hw->desc[0] != machine_data->dummy_ad->paddr)
- wr_reg_wa(&hw->desc[0], machine_data->dummy_ad->paddr);
+ if (hw->desc[0] != data->dummy_ad.paddr)
+ wr_reg_wa(&hw->desc[0], data->dummy_ad.paddr);
break;
case PLANE1_AOI0:
- cmfbi = machine_data->fsl_diu_info[2]->par;
+ cmfbi = &data->mfb[2];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE2_AOI0:
- cmfbi = machine_data->fsl_diu_info[4]->par;
+ cmfbi = &data->mfb[4];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE1_AOI1:
- pmfbi = machine_data->fsl_diu_info[1]->par;
+ pmfbi = &data->mfb[1];
if (hw->desc[1] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 1 */
break;
case PLANE2_AOI1:
- pmfbi = machine_data->fsl_diu_info[3]->par;
+ pmfbi = &data->mfb[3];
if (hw->desc[2] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 1 */
break;
}
@@ -602,39 +583,33 @@ static void fsl_diu_disable_panel(struct fb_info *info)
static void enable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
- if (!machine_data->fb_enabled) {
- out_be32(&hw->diu_mode, MFB_MODE1);
- machine_data->fb_enabled++;
- }
+ out_be32(&hw->diu_mode, MFB_MODE1);
}
static void disable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
- if (machine_data->fb_enabled) {
- out_be32(&hw->diu_mode, 0);
- machine_data->fb_enabled = 0;
- }
+ out_be32(&hw->diu_mode, 0);
}
static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
int available_height, upper_aoi_bottom;
enum mfb_index index = mfbi->index;
int lower_aoi_is_open, upper_aoi_is_open;
__u32 base_plane_width, base_plane_height, upper_aoi_height;
- base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
- base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
+ base_plane_width = data->fsl_diu_info[0].var.xres;
+ base_plane_height = data->fsl_diu_info[0].var.yres;
if (mfbi->x_aoi_d < 0)
mfbi->x_aoi_d = 0;
@@ -649,7 +624,7 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
break;
case PLANE1_AOI0:
case PLANE2_AOI0:
- lower_aoi_mfbi = machine_data->fsl_diu_info[index+1]->par;
+ lower_aoi_mfbi = data->fsl_diu_info[index+1].par;
lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
var->xres = base_plane_width;
@@ -667,9 +642,8 @@ static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
break;
case PLANE1_AOI1:
case PLANE2_AOI1:
- upper_aoi_mfbi = machine_data->fsl_diu_info[index-1]->par;
- upper_aoi_height =
- machine_data->fsl_diu_info[index-1]->var.yres;
+ upper_aoi_mfbi = data->fsl_diu_info[index-1].par;
+ upper_aoi_height = data->fsl_diu_info[index-1].var.yres;
upper_aoi_bottom = upper_aoi_mfbi->y_aoi_d + upper_aoi_height;
upper_aoi_is_open = upper_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
@@ -809,33 +783,33 @@ static void update_lcdc(struct fb_info *info)
{
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw;
int i, j;
- char __iomem *cursor_base, *gamma_table_base;
+ u8 *gamma_table_base;
u32 temp;
- hw = machine_data->diu_reg;
+ hw = data->diu_reg;
+
+ diu_ops.set_monitor_port(data->monitor_port);
+ gamma_table_base = data->gamma;
- diu_ops.set_monitor_port(machine_data->monitor_port);
- gamma_table_base = machine_data->gamma.vaddr;
- cursor_base = machine_data->cursor.vaddr;
/* Prep for DIU init - gamma table, cursor table */
for (i = 0; i <= 2; i++)
for (j = 0; j <= 255; j++)
*gamma_table_base++ = j;
- diu_ops.set_gamma_table(machine_data->monitor_port,
- machine_data->gamma.vaddr);
+ if (diu_ops.set_gamma_table)
+ diu_ops.set_gamma_table(data->monitor_port, data->gamma);
disable_lcdc(info);
/* Program DIU registers */
- out_be32(&hw->gamma, machine_data->gamma.paddr);
- out_be32(&hw->cursor, machine_data->cursor.paddr);
+ out_be32(&hw->gamma, DMA_ADDR(data, gamma));
+ out_be32(&hw->cursor, DMA_ADDR(data, cursor));
out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
@@ -870,16 +844,17 @@ static void update_lcdc(struct fb_info *info)
static int map_video_memory(struct fb_info *info)
{
- phys_addr_t phys;
u32 smem_len = info->fix.line_length * info->var.yres_virtual;
+ void *p;
- info->screen_base = fsl_diu_alloc(smem_len, &phys);
- if (info->screen_base == NULL) {
+ p = alloc_pages_exact(smem_len, GFP_DMA | __GFP_ZERO);
+ if (!p) {
dev_err(info->dev, "unable to allocate fb memory\n");
return -ENOMEM;
}
mutex_lock(&info->mm_lock);
- info->fix.smem_start = (unsigned long) phys;
+ info->screen_base = p;
+ info->fix.smem_start = virt_to_phys(info->screen_base);
info->fix.smem_len = smem_len;
mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
@@ -889,12 +864,17 @@ static int map_video_memory(struct fb_info *info)
static void unmap_video_memory(struct fb_info *info)
{
- fsl_diu_free(info->screen_base, info->fix.smem_len);
+ void *p = info->screen_base;
+ size_t l = info->fix.smem_len;
+
mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
mutex_unlock(&info->mm_lock);
+
+ if (p)
+ free_pages_exact(p, l);
}
/*
@@ -913,6 +893,59 @@ static int fsl_diu_set_aoi(struct fb_info *info)
return 0;
}
+/**
+ * fsl_diu_get_pixel_format: return the pixel format for a given color depth
+ *
+ * The pixel format is a 32-bit value that determine which bits in each
+ * pixel are to be used for each color. This is the default function used
+ * if the platform does not define its own version.
+ */
+static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
+{
+#define PF_BYTE_F 0x10000000
+#define PF_ALPHA_C_MASK 0x0E000000
+#define PF_ALPHA_C_SHIFT 25
+#define PF_BLUE_C_MASK 0x01800000
+#define PF_BLUE_C_SHIFT 23
+#define PF_GREEN_C_MASK 0x00600000
+#define PF_GREEN_C_SHIFT 21
+#define PF_RED_C_MASK 0x00180000
+#define PF_RED_C_SHIFT 19
+#define PF_PALETTE 0x00040000
+#define PF_PIXEL_S_MASK 0x00030000
+#define PF_PIXEL_S_SHIFT 16
+#define PF_COMP_3_MASK 0x0000F000
+#define PF_COMP_3_SHIFT 12
+#define PF_COMP_2_MASK 0x00000F00
+#define PF_COMP_2_SHIFT 8
+#define PF_COMP_1_MASK 0x000000F0
+#define PF_COMP_1_SHIFT 4
+#define PF_COMP_0_MASK 0x0000000F
+#define PF_COMP_0_SHIFT 0
+
+#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
+ cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
+ (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
+ (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
+ (c2 << PF_COMP_2_SHIFT) | (c1 << PF_COMP_1_SHIFT) | \
+ (c0 << PF_COMP_0_SHIFT) | (size << PF_PIXEL_S_SHIFT))
+
+ switch (bits_per_pixel) {
+ case 32:
+ /* 0x88883316 */
+ return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
+ case 24:
+ /* 0x88082219 */
+ return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
+ case 16:
+ /* 0x65053118 */
+ return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
+ default:
+ pr_err("fsl-diu: unsupported color depth %u\n", bits_per_pixel);
+ return 0;
+ }
+}
+
/*
* Using the fb_var_screeninfo in fb_info we set the resolution of this
* particular framebuffer. This function alters the fb_fix_screeninfo stored
@@ -926,11 +959,11 @@ static int fsl_diu_set_par(struct fb_info *info)
unsigned long len;
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
struct diu_ad *ad = mfbi->ad;
struct diu __iomem *hw;
- hw = machine_data->diu_reg;
+ hw = data->diu_reg;
set_fix(info);
mfbi->cursor_reset = 1;
@@ -948,8 +981,12 @@ static int fsl_diu_set_par(struct fb_info *info)
}
}
- ad->pix_fmt = diu_ops.get_pixel_format(machine_data->monitor_port,
- var->bits_per_pixel);
+ if (diu_ops.get_pixel_format)
+ ad->pix_fmt = diu_ops.get_pixel_format(data->monitor_port,
+ var->bits_per_pixel);
+ else
+ ad->pix_fmt = fsl_diu_get_pixel_format(var->bits_per_pixel);
+
ad->addr = cpu_to_le32(info->fix.smem_start);
ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
var->xres_virtual) | mfbi->g_alpha;
@@ -1208,21 +1245,6 @@ static struct fb_ops fsl_diu_ops = {
.fb_release = fsl_diu_release,
};
-static int init_fbinfo(struct fb_info *info)
-{
- struct mfb_info *mfbi = info->par;
-
- info->device = NULL;
- info->var.activate = FB_ACTIVATE_NOW;
- info->fbops = &fsl_diu_ops;
- info->flags = FBINFO_FLAG_DEFAULT;
- info->pseudo_palette = &mfbi->pseudo_palette;
-
- /* Allocate colormap */
- fb_alloc_cmap(&info->cmap, 16, 0);
- return 0;
-}
-
static int __devinit install_fb(struct fb_info *info)
{
int rc;
@@ -1232,8 +1254,15 @@ static int __devinit install_fb(struct fb_info *info)
unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db);
int has_default_mode = 1;
- if (init_fbinfo(info))
- return -EINVAL;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->fbops = &fsl_diu_ops;
+ info->flags = FBINFO_DEFAULT | FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_READS_FAST;
+ info->pseudo_palette = mfbi->pseudo_palette;
+
+ rc = fb_alloc_cmap(&info->cmap, 16, 0);
+ if (rc)
+ return rc;
if (mfbi->index == PLANE0) {
if (mfbi->edid_data) {
@@ -1359,16 +1388,16 @@ static irqreturn_t fsl_diu_isr(int irq, void *dev_id)
return IRQ_NONE;
}
-static int request_irq_local(struct fsl_diu_data *machine_data)
+static int request_irq_local(struct fsl_diu_data *data)
{
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct diu __iomem *hw = data->diu_reg;
u32 ints;
int ret;
/* Read to clear the status */
in_be32(&hw->int_status);
- ret = request_irq(machine_data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
+ ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
if (!ret) {
ints = INT_PARERR | INT_LS_BF_VS;
#if !defined(CONFIG_NOT_COHERENT_CACHE)
@@ -1383,14 +1412,14 @@ static int request_irq_local(struct fsl_diu_data *machine_data)
return ret;
}
-static void free_irq_local(struct fsl_diu_data *machine_data)
+static void free_irq_local(struct fsl_diu_data *data)
{
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct diu __iomem *hw = data->diu_reg;
/* Disable all LCDC interrupt */
out_be32(&hw->int_mask, 0x1f);
- free_irq(machine_data->irq, NULL);
+ free_irq(data->irq, NULL);
}
#ifdef CONFIG_PM
@@ -1400,20 +1429,20 @@ static void free_irq_local(struct fsl_diu_data *machine_data)
*/
static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
- machine_data = dev_get_drvdata(&ofdev->dev);
- disable_lcdc(machine_data->fsl_diu_info[0]);
+ data = dev_get_drvdata(&ofdev->dev);
+ disable_lcdc(data->fsl_diu_info);
return 0;
}
static int fsl_diu_resume(struct platform_device *ofdev)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
- machine_data = dev_get_drvdata(&ofdev->dev);
- enable_lcdc(machine_data->fsl_diu_info[0]);
+ data = dev_get_drvdata(&ofdev->dev);
+ enable_lcdc(data->fsl_diu_info);
return 0;
}
@@ -1423,56 +1452,24 @@ static int fsl_diu_resume(struct platform_device *ofdev)
#define fsl_diu_resume NULL
#endif /* CONFIG_PM */
-/* Align to 64-bit(8-byte), 32-byte, etc. */
-static int allocate_buf(struct device *dev, struct diu_addr *buf, u32 size,
- u32 bytes_align)
-{
- u32 offset;
- dma_addr_t mask;
-
- buf->vaddr =
- dma_alloc_coherent(dev, size + bytes_align, &buf->paddr,
- GFP_DMA | __GFP_ZERO);
- if (!buf->vaddr)
- return -ENOMEM;
-
- mask = bytes_align - 1;
- offset = buf->paddr & mask;
- if (offset) {
- buf->offset = bytes_align - offset;
- buf->paddr = buf->paddr + offset;
- } else
- buf->offset = 0;
-
- return 0;
-}
-
-static void free_buf(struct device *dev, struct diu_addr *buf, u32 size,
- u32 bytes_align)
-{
- dma_free_coherent(dev, size + bytes_align, buf->vaddr,
- buf->paddr - buf->offset);
-}
-
static ssize_t store_monitor(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
enum fsl_diu_monitor_port old_monitor_port;
- struct fsl_diu_data *machine_data =
+ struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
- old_monitor_port = machine_data->monitor_port;
- machine_data->monitor_port = fsl_diu_name_to_port(buf);
+ old_monitor_port = data->monitor_port;
+ data->monitor_port = fsl_diu_name_to_port(buf);
- if (old_monitor_port != machine_data->monitor_port) {
+ if (old_monitor_port != data->monitor_port) {
/* All AOIs need adjust pixel format
* fsl_diu_set_par only change the pixsel format here
* unlikely to fail. */
- fsl_diu_set_par(machine_data->fsl_diu_info[0]);
- fsl_diu_set_par(machine_data->fsl_diu_info[1]);
- fsl_diu_set_par(machine_data->fsl_diu_info[2]);
- fsl_diu_set_par(machine_data->fsl_diu_info[3]);
- fsl_diu_set_par(machine_data->fsl_diu_info[4]);
+ unsigned int i;
+
+ for (i=0; i < NUM_AOIS; i++)
+ fsl_diu_set_par(&data->fsl_diu_info[i]);
}
return count;
}
@@ -1480,10 +1477,10 @@ static ssize_t store_monitor(struct device *device,
static ssize_t show_monitor(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct fsl_diu_data *machine_data =
+ struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
- switch (machine_data->monitor_port) {
+ switch (data->monitor_port) {
case FSL_DIU_PORT_DVI:
return sprintf(buf, "DVI\n");
case FSL_DIU_PORT_LVDS:
@@ -1499,28 +1496,52 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct mfb_info *mfbi;
- phys_addr_t dummy_ad_addr = 0;
- int ret, i, error = 0;
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
int diu_mode;
+ dma_addr_t dma_addr; /* DMA addr of fsl_diu_data struct */
+ unsigned int i;
+ int ret;
- machine_data = kzalloc(sizeof(struct fsl_diu_data), GFP_KERNEL);
- if (!machine_data)
+ data = dma_alloc_coherent(&pdev->dev, sizeof(struct fsl_diu_data),
+ &dma_addr, GFP_DMA | __GFP_ZERO);
+ if (!data)
return -ENOMEM;
+ data->dma_addr = dma_addr;
+
+ /*
+ * dma_alloc_coherent() uses a page allocator, so the address is
+ * always page-aligned. We need the memory to be 32-byte aligned,
+ * so that's good. However, if one day the allocator changes, we
+ * need to catch that. It's not worth the effort to handle unaligned
+ * alloctions now because it's highly unlikely to ever be a problem.
+ */
+ if ((unsigned long)data & 31) {
+ dev_err(&pdev->dev, "misaligned allocation");
+ ret = -ENOMEM;
+ goto error;
+ }
- spin_lock_init(&machine_data->reg_lock);
+ spin_lock_init(&data->reg_lock);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
- machine_data->fsl_diu_info[i] =
- framebuffer_alloc(sizeof(struct mfb_info), &pdev->dev);
- if (!machine_data->fsl_diu_info[i]) {
- dev_err(&pdev->dev, "cannot allocate memory\n");
- ret = -ENOMEM;
- goto error2;
- }
- mfbi = machine_data->fsl_diu_info[i]->par;
+ for (i = 0; i < NUM_AOIS; i++) {
+ struct fb_info *info = &data->fsl_diu_info[i];
+
+ info->device = &pdev->dev;
+ info->par = &data->mfb[i];
+
+ /*
+ * We store the physical address of the AD in the reserved
+ * 'paddr' field of the AD itself.
+ */
+ data->ad[i].paddr = DMA_ADDR(data, ad[i]);
+
+ info->fix.smem_start = 0;
+
+ /* Initialize the AOI data structure */
+ mfbi = info->par;
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
- mfbi->parent = machine_data;
+ mfbi->parent = data;
+ mfbi->ad = &data->ad[i];
if (mfbi->index == PLANE0) {
const u8 *prop;
@@ -1534,158 +1555,102 @@ static int __devinit fsl_diu_probe(struct platform_device *pdev)
}
}
- machine_data->diu_reg = of_iomap(np, 0);
- if (!machine_data->diu_reg) {
+ data->diu_reg = of_iomap(np, 0);
+ if (!data->diu_reg) {
dev_err(&pdev->dev, "cannot map DIU registers\n");
ret = -EFAULT;
- goto error2;
+ goto error;
}
- diu_mode = in_be32(&machine_data->diu_reg->diu_mode);
+ diu_mode = in_be32(&data->diu_reg->diu_mode);
if (diu_mode == MFB_MODE0)
- out_be32(&machine_data->diu_reg->diu_mode, 0); /* disable DIU */
+ out_be32(&data->diu_reg->diu_mode, 0); /* disable DIU */
/* Get the IRQ of the DIU */
- machine_data->irq = irq_of_parse_and_map(np, 0);
+ data->irq = irq_of_parse_and_map(np, 0);
- if (!machine_data->irq) {
+ if (!data->irq) {
dev_err(&pdev->dev, "could not get DIU IRQ\n");
ret = -EINVAL;
goto error;
}
- machine_data->monitor_port = monitor_port;
-
- /* Area descriptor memory pool aligns to 64-bit boundary */
- if (allocate_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8))
- return -ENOMEM;
-
- /* Get memory for Gamma Table - 32-byte aligned memory */
- if (allocate_buf(&pdev->dev, &machine_data->gamma, 768, 32)) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* For performance, cursor bitmap buffer aligns to 32-byte boundary */
- if (allocate_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32)) {
- ret = -ENOMEM;
- goto error;
- }
-
- i = ARRAY_SIZE(machine_data->fsl_diu_info);
- machine_data->dummy_ad = (struct diu_ad *)((u32)machine_data->ad.vaddr +
- machine_data->ad.offset) + i;
- machine_data->dummy_ad->paddr = machine_data->ad.paddr +
- i * sizeof(struct diu_ad);
- machine_data->dummy_aoi_virt = fsl_diu_alloc(64, &dummy_ad_addr);
- if (!machine_data->dummy_aoi_virt) {
- ret = -ENOMEM;
- goto error;
- }
- machine_data->dummy_ad->addr = cpu_to_le32(dummy_ad_addr);
- machine_data->dummy_ad->pix_fmt = 0x88882317;
- machine_data->dummy_ad->src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
- machine_data->dummy_ad->aoi_size = cpu_to_le32((4 << 16) | 2);
- machine_data->dummy_ad->offset_xyi = 0;
- machine_data->dummy_ad->offset_xyd = 0;
- machine_data->dummy_ad->next_ad = 0;
+ data->monitor_port = monitor_port;
+
+ /* Initialize the dummy Area Descriptor */
+ data->dummy_ad.addr = cpu_to_le32(DMA_ADDR(data, dummy_aoi));
+ data->dummy_ad.pix_fmt = 0x88882317;
+ data->dummy_ad.src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
+ data->dummy_ad.aoi_size = cpu_to_le32((4 << 16) | 2);
+ data->dummy_ad.offset_xyi = 0;
+ data->dummy_ad.offset_xyd = 0;
+ data->dummy_ad.next_ad = 0;
+ data->dummy_ad.paddr = DMA_ADDR(data, dummy_ad);
/*
* Let DIU display splash screen if it was pre-initialized
* by the bootloader, set dummy area descriptor otherwise.
*/
if (diu_mode == MFB_MODE0)
- out_be32(&machine_data->diu_reg->desc[0],
- machine_data->dummy_ad->paddr);
-
- out_be32(&machine_data->diu_reg->desc[1], machine_data->dummy_ad->paddr);
- out_be32(&machine_data->diu_reg->desc[2], machine_data->dummy_ad->paddr);
-
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
- machine_data->fsl_diu_info[i]->fix.smem_start = 0;
- mfbi = machine_data->fsl_diu_info[i]->par;
- mfbi->ad = (struct diu_ad *)((u32)machine_data->ad.vaddr
- + machine_data->ad.offset) + i;
- mfbi->ad->paddr =
- machine_data->ad.paddr + i * sizeof(struct diu_ad);
- ret = install_fb(machine_data->fsl_diu_info[i]);
+ out_be32(&data->diu_reg->desc[0], data->dummy_ad.paddr);
+
+ out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr);
+ out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr);
+
+ for (i = 0; i < NUM_AOIS; i++) {
+ ret = install_fb(&data->fsl_diu_info[i]);
if (ret) {
dev_err(&pdev->dev, "could not register fb %d\n", i);
goto error;
}
}
- if (request_irq_local(machine_data)) {
+ if (request_irq_local(data)) {
dev_err(&pdev->dev, "could not claim irq\n");
goto error;
}
- sysfs_attr_init(&machine_data->dev_attr.attr);
- machine_data->dev_attr.attr.name = "monitor";
- machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
- machine_data->dev_attr.show = show_monitor;
- machine_data->dev_attr.store = store_monitor;
- error = device_create_file(machine_data->fsl_diu_info[0]->dev,
- &machine_data->dev_attr);
- if (error) {
+ sysfs_attr_init(&data->dev_attr.attr);
+ data->dev_attr.attr.name = "monitor";
+ data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
+ data->dev_attr.show = show_monitor;
+ data->dev_attr.store = store_monitor;
+ ret = device_create_file(&pdev->dev, &data->dev_attr);
+ if (ret) {
dev_err(&pdev->dev, "could not create sysfs file %s\n",
- machine_data->dev_attr.attr.name);
+ data->dev_attr.attr.name);
}
- dev_set_drvdata(&pdev->dev, machine_data);
+ dev_set_drvdata(&pdev->dev, data);
return 0;
error:
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- uninstall_fb(machine_data->fsl_diu_info[i]);
-
- if (machine_data->ad.vaddr)
- free_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (machine_data->gamma.vaddr)
- free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
- if (machine_data->cursor.vaddr)
- free_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32);
- if (machine_data->dummy_aoi_virt)
- fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(machine_data->diu_reg);
-
-error2:
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- if (machine_data->fsl_diu_info[i])
- framebuffer_release(machine_data->fsl_diu_info[i]);
- kfree(machine_data);
+ for (i = 0; i < NUM_AOIS; i++)
+ uninstall_fb(&data->fsl_diu_info[i]);
+
+ iounmap(data->diu_reg);
+
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
+ data->dma_addr);
return ret;
}
static int fsl_diu_remove(struct platform_device *pdev)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
int i;
- machine_data = dev_get_drvdata(&pdev->dev);
- disable_lcdc(machine_data->fsl_diu_info[0]);
- free_irq_local(machine_data);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- uninstall_fb(machine_data->fsl_diu_info[i]);
- if (machine_data->ad.vaddr)
- free_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (machine_data->gamma.vaddr)
- free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
- if (machine_data->cursor.vaddr)
- free_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32);
- if (machine_data->dummy_aoi_virt)
- fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(machine_data->diu_reg);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- if (machine_data->fsl_diu_info[i])
- framebuffer_release(machine_data->fsl_diu_info[i]);
- kfree(machine_data);
+ data = dev_get_drvdata(&pdev->dev);
+ disable_lcdc(&data->fsl_diu_info[0]);
+ free_irq_local(data);
+
+ for (i = 0; i < NUM_AOIS; i++)
+ uninstall_fb(&data->fsl_diu_info[i]);
+
+ iounmap(data->diu_reg);
+
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
+ data->dma_addr);
return 0;
}
diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c
index f37e025..da066c2 100644
--- a/drivers/video/grvga.c
+++ b/drivers/video/grvga.c
@@ -70,7 +70,7 @@ static const struct fb_videomode grvga_modedb[] = {
}
};
-static struct fb_fix_screeninfo grvga_fix __initdata = {
+static struct fb_fix_screeninfo grvga_fix __devinitdata = {
.id = "AG SVGACTRL",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
@@ -267,7 +267,7 @@ static struct fb_ops grvga_ops = {
.fb_imageblit = cfb_imageblit
};
-static int __init grvga_parse_custom(char *options,
+static int __devinit grvga_parse_custom(char *options,
struct fb_var_screeninfo *screendata)
{
char *this_opt;
diff --git a/drivers/video/hgafb.c b/drivers/video/hgafb.c
index 4394389..c645f92 100644
--- a/drivers/video/hgafb.c
+++ b/drivers/video/hgafb.c
@@ -133,7 +133,7 @@ static struct fb_fix_screeninfo hga_fix __devinitdata = {
/* Don't assume that tty1 will be the initial current console. */
static int release_io_port = 0;
static int release_io_ports = 0;
-static int nologo = 0;
+static bool nologo = 0;
/* -------------------------------------------------------------------------
*
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
index 318f6fb..b83f361 100644
--- a/drivers/video/i810/i810_main.c
+++ b/drivers/video/i810/i810_main.c
@@ -135,8 +135,8 @@ static struct pci_driver i810fb_driver = {
static char *mode_option __devinitdata = NULL;
static int vram __devinitdata = 4;
static int bpp __devinitdata = 8;
-static int mtrr __devinitdata;
-static int accel __devinitdata;
+static bool mtrr __devinitdata;
+static bool accel __devinitdata;
static int hsync1 __devinitdata;
static int hsync2 __devinitdata;
static int vsync1 __devinitdata;
@@ -144,10 +144,10 @@ static int vsync2 __devinitdata;
static int xres __devinitdata;
static int yres;
static int vyres __devinitdata;
-static int sync __devinitdata;
-static int extvga __devinitdata;
-static int dcolor __devinitdata;
-static int ddc3 __devinitdata = 2;
+static bool sync __devinitdata;
+static bool extvga __devinitdata;
+static bool dcolor __devinitdata;
+static bool ddc3 __devinitdata;
/*------------------------------------------------------------*/
@@ -1776,7 +1776,7 @@ static void __devinit i810_init_defaults(struct i810fb_par *par,
if (sync)
par->dev_flags |= ALWAYS_SYNC;
- par->ddc_num = ddc3;
+ par->ddc_num = (ddc3 ? 3 : 2);
if (bpp < 8)
bpp = 8;
@@ -1999,7 +1999,7 @@ static int __devinit i810fb_setup(char *options)
else if (!strncmp(this_opt, "dcolor", 6))
dcolor = 1;
else if (!strncmp(this_opt, "ddc3", 4))
- ddc3 = 3;
+ ddc3 = true;
else
mode_option = this_opt;
}
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 5ba3999..02fd226 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -230,15 +230,15 @@ MODULE_DESCRIPTION("Framebuffer driver for Intel(R) " SUPPORTED_CHIPSETS
MODULE_LICENSE("Dual BSD/GPL");
MODULE_DEVICE_TABLE(pci, intelfb_pci_table);
-static int accel = 1;
+static bool accel = 1;
static int vram = 4;
-static int hwcursor = 0;
-static int mtrr = 1;
-static int fixed = 0;
-static int noinit = 0;
-static int noregister = 0;
-static int probeonly = 0;
-static int idonly = 0;
+static bool hwcursor = 0;
+static bool mtrr = 1;
+static bool fixed = 0;
+static bool noinit = 0;
+static bool noregister = 0;
+static bool probeonly = 0;
+static bool idonly = 0;
static int bailearly = 0;
static int voffset = 48;
static char *mode = NULL;
@@ -263,7 +263,7 @@ module_param(probeonly, bool, 0);
MODULE_PARM_DESC(probeonly, "Do a minimal probe (debug)");
module_param(idonly, bool, 0);
MODULE_PARM_DESC(idonly, "Just identify without doing anything else (debug)");
-module_param(bailearly, bool, 0);
+module_param(bailearly, int, 0);
MODULE_PARM_DESC(bailearly, "Bail out early, depending on value (debug)");
module_param(mode, charp, S_IRUGO);
MODULE_PARM_DESC(mode,
@@ -529,7 +529,6 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
ERR_MSG("Could not allocate cmap for intelfb_info.\n");
goto err_out_cmap;
- return -ENODEV;
}
dinfo = info->par;
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index ea7a8cc..080c35b 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,7 +21,7 @@
#include <asm/bootinfo.h>
#endif
-static int nologo;
+static bool nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
diff --git a/drivers/video/macfb.c b/drivers/video/macfb.c
index 43207cc..fe01add 100644
--- a/drivers/video/macfb.c
+++ b/drivers/video/macfb.c
@@ -592,12 +592,12 @@ static int __init macfb_init(void)
if (!fb_info.screen_base)
return -ENODEV;
- printk("macfb: framebuffer at 0x%08lx, mapped to 0x%p, size %dk\n",
- macfb_fix.smem_start, fb_info.screen_base,
- macfb_fix.smem_len / 1024);
- printk("macfb: mode is %dx%dx%d, linelength=%d\n",
- macfb_defined.xres, macfb_defined.yres,
- macfb_defined.bits_per_pixel, macfb_fix.line_length);
+ pr_info("macfb: framebuffer at 0x%08lx, mapped to 0x%p, size %dk\n",
+ macfb_fix.smem_start, fb_info.screen_base,
+ macfb_fix.smem_len / 1024);
+ pr_info("macfb: mode is %dx%dx%d, linelength=%d\n",
+ macfb_defined.xres, macfb_defined.yres,
+ macfb_defined.bits_per_pixel, macfb_fix.line_length);
/* Fill in the available video resolution */
macfb_defined.xres_virtual = macfb_defined.xres;
@@ -613,14 +613,10 @@ static int __init macfb_init(void)
switch (macfb_defined.bits_per_pixel) {
case 1:
- /*
- * XXX: I think this will catch any program that tries
- * to do FBIO_PUTCMAP when the visual is monochrome.
- */
macfb_defined.red.length = macfb_defined.bits_per_pixel;
macfb_defined.green.length = macfb_defined.bits_per_pixel;
macfb_defined.blue.length = macfb_defined.bits_per_pixel;
- video_cmap_len = 0;
+ video_cmap_len = 2;
macfb_fix.visual = FB_VISUAL_MONO01;
break;
case 2:
@@ -660,11 +656,10 @@ static int __init macfb_init(void)
macfb_fix.visual = FB_VISUAL_TRUECOLOR;
break;
default:
- video_cmap_len = 0;
- macfb_fix.visual = FB_VISUAL_MONO01;
- printk("macfb: unknown or unsupported bit depth: %d\n",
+ pr_err("macfb: unknown or unsupported bit depth: %d\n",
macfb_defined.bits_per_pixel);
- break;
+ err = -EINVAL;
+ goto fail_unmap;
}
/*
@@ -734,8 +729,8 @@ static int __init macfb_init(void)
case MAC_MODEL_Q950:
strcpy(macfb_fix.id, "DAFB");
macfb_setpalette = dafb_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -744,8 +739,8 @@ static int __init macfb_init(void)
case MAC_MODEL_LCII:
strcpy(macfb_fix.id, "V8");
macfb_setpalette = v8_brazil_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -758,8 +753,8 @@ static int __init macfb_init(void)
case MAC_MODEL_P600:
strcpy(macfb_fix.id, "Brazil");
macfb_setpalette = v8_brazil_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -773,10 +768,10 @@ static int __init macfb_init(void)
case MAC_MODEL_P520:
case MAC_MODEL_P550:
case MAC_MODEL_P460:
- macfb_setpalette = v8_brazil_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
strcpy(macfb_fix.id, "Sonora");
+ macfb_setpalette = v8_brazil_setpalette;
v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -786,10 +781,10 @@ static int __init macfb_init(void)
*/
case MAC_MODEL_IICI:
case MAC_MODEL_IISI:
- macfb_setpalette = rbv_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
strcpy(macfb_fix.id, "RBV");
+ macfb_setpalette = rbv_setpalette;
rbv_cmap_regs = ioremap(DAC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
break;
/*
@@ -797,10 +792,10 @@ static int __init macfb_init(void)
*/
case MAC_MODEL_Q840:
case MAC_MODEL_C660:
- macfb_setpalette = civic_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
strcpy(macfb_fix.id, "Civic");
+ macfb_setpalette = civic_setpalette;
civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
break;
@@ -809,26 +804,26 @@ static int __init macfb_init(void)
* We think this may be like the LC II
*/
case MAC_MODEL_LC:
+ strcpy(macfb_fix.id, "LC");
if (vidtest) {
macfb_setpalette = v8_brazil_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
v8_brazil_cmap_regs =
ioremap(DAC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
}
- strcpy(macfb_fix.id, "LC");
break;
/*
* We think this may be like the LC II
*/
case MAC_MODEL_CCL:
+ strcpy(macfb_fix.id, "Color Classic");
if (vidtest) {
macfb_setpalette = v8_brazil_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
v8_brazil_cmap_regs =
ioremap(DAC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
}
- strcpy(macfb_fix.id, "Color Classic");
break;
/*
@@ -893,10 +888,10 @@ static int __init macfb_init(void)
case MAC_MODEL_PB270C:
case MAC_MODEL_PB280:
case MAC_MODEL_PB280C:
- macfb_setpalette = csc_setpalette;
- macfb_defined.activate = FB_ACTIVATE_NOW;
strcpy(macfb_fix.id, "CSC");
+ macfb_setpalette = csc_setpalette;
csc_cmap_regs = ioremap(CSC_BASE, 0x1000);
+ macfb_defined.activate = FB_ACTIVATE_NOW;
break;
default:
@@ -918,8 +913,9 @@ static int __init macfb_init(void)
if (err)
goto fail_dealloc;
- printk("fb%d: %s frame buffer device\n",
- fb_info.node, fb_info.fix.id);
+ pr_info("fb%d: %s frame buffer device\n",
+ fb_info.node, fb_info.fix.id);
+
return 0;
fail_dealloc:
diff --git a/drivers/video/matrox/matroxfb_base.c b/drivers/video/matrox/matroxfb_base.c
index 44bf8d4..401a56e 100644
--- a/drivers/video/matrox/matroxfb_base.c
+++ b/drivers/video/matrox/matroxfb_base.c
@@ -147,7 +147,6 @@ static struct fb_var_screeninfo vesafb_defined = {
39721L,48L,16L,33L,10L,
96L,2L,~0, /* No sync info */
FB_VMODE_NONINTERLACED,
- 0, {0,0,0,0,0}
};
diff --git a/drivers/video/matrox/matroxfb_crtc2.c b/drivers/video/matrox/matroxfb_crtc2.c
index d7112c3..02796a4 100644
--- a/drivers/video/matrox/matroxfb_crtc2.c
+++ b/drivers/video/matrox/matroxfb_crtc2.c
@@ -593,7 +593,6 @@ static struct fb_var_screeninfo matroxfb_dh_defined = {
39721L,48L,16L,33L,10L,
96L,2,0, /* no sync info */
FB_VMODE_NONINTERLACED,
- 0, {0,0,0,0,0}
};
static int matroxfb_dh_regit(const struct matrox_fb_info *minfo,
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 6ce3416..55bf619 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -1053,18 +1053,7 @@ static struct platform_driver mbxfb_driver = {
},
};
-int __devinit mbxfb_init(void)
-{
- return platform_driver_register(&mbxfb_driver);
-}
-
-static void __devexit mbxfb_exit(void)
-{
- platform_driver_unregister(&mbxfb_driver);
-}
-
-module_init(mbxfb_init);
-module_exit(mbxfb_exit);
+module_platform_driver(mbxfb_driver);
MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device");
MODULE_AUTHOR("Mike Rapoport, Compulab");
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index e3406ab..727a514 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -245,6 +245,7 @@ struct mx3fb_data {
uint32_t h_start_width;
uint32_t v_start_width;
+ enum disp_data_mapping disp_data_fmt;
};
struct dma_chan_request {
@@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r
__raw_writel(value, mx3fb->reg_base + reg);
}
-static const uint32_t di_mappings[] = {
- 0x1600AAAA, 0x00E05555, 0x00070000, 3, /* RGB888 */
- 0x0005000F, 0x000B000F, 0x0011000F, 1, /* RGB666 */
- 0x0011000F, 0x000B000F, 0x0005000F, 1, /* BGR666 */
- 0x0004003F, 0x000A000F, 0x000F003F, 1 /* RGB565 */
+struct di_mapping {
+ uint32_t b0, b1, b2;
+};
+
+static const struct di_mapping di_mappings[] = {
+ [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
+ [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
+ [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
};
static void sdc_fb_init(struct mx3fb_info *fbi)
@@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
/* This enables the channel */
if (mx3_fbi->cookie < 0) {
mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
- &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+ &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!mx3_fbi->txd) {
dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
dma_chan->chan_id);
@@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
* @pixel_clk: desired pixel clock frequency in Hz.
* @width: width of panel in pixels.
* @height: height of panel in pixels.
- * @pixel_fmt: pixel format of buffer as FOURCC ASCII code.
* @h_start_width: number of pixel clocks between the HSYNC signal pulse
* and the start of valid data.
* @h_sync_width: width of the HSYNC signal in units of pixel clocks.
@@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
uint32_t pixel_clk,
uint16_t width, uint16_t height,
- enum pixel_fmt pixel_fmt,
uint16_t h_start_width, uint16_t h_sync_width,
uint16_t h_end_width, uint16_t v_start_width,
uint16_t v_sync_width, uint16_t v_end_width,
@@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
uint32_t old_conf;
uint32_t div;
struct clk *ipu_clk;
+ const struct di_mapping *map;
dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
@@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
- switch (pixel_fmt) {
- case IPU_PIX_FMT_RGB24:
- mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
- break;
- case IPU_PIX_FMT_RGB666:
- mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
- break;
- case IPU_PIX_FMT_BGR666:
- mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
- break;
- default:
- mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
- mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
- mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
- ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
- break;
- }
+ map = &di_mappings[mx3fb->disp_data_fmt];
+ mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
+ mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
+ mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
@@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock)
if (sdc_init_panel(mx3fb, mode,
(PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
fbi->var.xres, fbi->var.yres,
- (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
- IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
fbi->var.left_margin,
fbi->var.hsync_len,
fbi->var.right_margin +
@@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
async_tx_ack(mx3_fbi->txd);
txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
- mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+ mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txd) {
dev_err(fbi->device,
"Error preparing a DMA transaction descriptor.\n");
@@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
const struct fb_videomode *mode;
int ret, num_modes;
+ if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
+ dev_err(dev, "Illegal display data format %d\n",
+ mx3fb_pdata->disp_data_fmt);
+ return -EINVAL;
+ }
+
ichan->client = mx3fb;
irq = ichan->eof_irq;
@@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
mx3fbi->mx3fb = mx3fb;
mx3fbi->blank = FB_BLANK_NORMAL;
+ mx3fb->disp_data_fmt = mx3fb_pdata->disp_data_fmt;
+
init_completion(&mx3fbi->flip_cmpl);
disable_irq(ichan->eof_irq);
dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index d837d63..4a89f88 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -328,7 +328,7 @@ static void mxsfb_enable_controller(struct fb_info *fb_info)
dev_dbg(&host->pdev->dev, "%s\n", __func__);
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
clk_set_rate(host->clk, PICOS2KHZ(fb_info->var.pixclock) * 1000U);
/* if it was disabled, re-enable the mode again */
@@ -368,7 +368,7 @@ static void mxsfb_disable_controller(struct fb_info *fb_info)
writel(VDCTRL4_SYNC_SIGNALS_ON, host->base + LCDC_VDCTRL4 + REG_CLR);
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
host->enabled = 0;
}
@@ -668,7 +668,7 @@ static int __devinit mxsfb_restore_mode(struct mxsfb_info *host)
line_count = fb_info->fix.smem_len / fb_info->fix.line_length;
fb_info->fix.ypanstep = 1;
- clk_enable(host->clk);
+ clk_prepare_enable(host->clk);
host->enabled = 1;
return 0;
@@ -841,7 +841,7 @@ static int __devinit mxsfb_probe(struct platform_device *pdev)
error_register:
if (host->enabled)
- clk_disable(host->clk);
+ clk_disable_unprepare(host->clk);
fb_destroy_modelist(&fb_info->modelist);
error_init_fb:
kfree(fb_info->pseudo_palette);
@@ -902,18 +902,7 @@ static struct platform_driver mxsfb_driver = {
},
};
-static int __init mxsfb_init(void)
-{
- return platform_driver_register(&mxsfb_driver);
-}
-
-static void __exit mxsfb_exit(void)
-{
- platform_driver_unregister(&mxsfb_driver);
-}
-
-module_init(mxsfb_init);
-module_exit(mxsfb_exit);
+module_platform_driver(mxsfb_driver);
MODULE_DESCRIPTION("Freescale mxs framebuffer driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/video/neofb.c b/drivers/video/neofb.c
index feea7b1..fb3f673 100644
--- a/drivers/video/neofb.c
+++ b/drivers/video/neofb.c
@@ -84,11 +84,11 @@
/* --------------------------------------------------------------------- */
-static int internal;
-static int external;
-static int libretto;
-static int nostretch;
-static int nopciburst;
+static bool internal;
+static bool external;
+static bool libretto;
+static bool nostretch;
+static bool nopciburst;
static char *mode_option __devinitdata = NULL;
#ifdef MODULE
diff --git a/drivers/video/nuc900fb.c b/drivers/video/nuc900fb.c
index d1fbbd8..e10f551 100644
--- a/drivers/video/nuc900fb.c
+++ b/drivers/video/nuc900fb.c
@@ -762,18 +762,7 @@ static struct platform_driver nuc900fb_driver = {
},
};
-int __devinit nuc900fb_init(void)
-{
- return platform_driver_register(&nuc900fb_driver);
-}
-
-static void __exit nuc900fb_cleanup(void)
-{
- platform_driver_unregister(&nuc900fb_driver);
-}
-
-module_init(nuc900fb_init);
-module_exit(nuc900fb_cleanup);
+module_platform_driver(nuc900fb_driver);
MODULE_DESCRIPTION("Framebuffer driver for the NUC900");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
index 081dc47..fe13ac5 100644
--- a/drivers/video/nvidia/nvidia.c
+++ b/drivers/video/nvidia/nvidia.c
@@ -81,7 +81,7 @@ static int vram __devinitdata = 0;
static int bpp __devinitdata = 8;
static int reverse_i2c __devinitdata;
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata = 0;
+static bool nomtrr __devinitdata = false;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
@@ -1509,7 +1509,7 @@ static int __devinit nvidiafb_setup(char *options)
backlight = simple_strtoul(this_opt+10, NULL, 0);
#ifdef CONFIG_MTRR
} else if (!strncmp(this_opt, "nomtrr", 6)) {
- nomtrr = 1;
+ nomtrr = true;
#endif
} else if (!strncmp(this_opt, "fpdither:", 9)) {
fpdither = simple_strtol(this_opt+9, NULL, 0);
@@ -1599,7 +1599,7 @@ MODULE_PARM_DESC(bpp, "pixel width in bits"
module_param(reverse_i2c, int, 0);
MODULE_PARM_DESC(reverse_i2c, "reverse port assignment of the i2c bus");
#ifdef CONFIG_MTRR
-module_param(nomtrr, bool, 0);
+module_param(nomtrr, bool, false);
MODULE_PARM_DESC(nomtrr, "Disables MTRR support (0 or 1=disabled) "
"(default=0)");
#endif
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index cb163a5..0c4f343 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -41,13 +41,14 @@
/* Supported palette hacks */
enum {
cmap_unknown,
- cmap_m64, /* ATI Mach64 */
+ cmap_simple, /* ATI Mach64 */
cmap_r128, /* ATI Rage128 */
cmap_M3A, /* ATI Rage Mobility M3 Head A */
cmap_M3B, /* ATI Rage Mobility M3 Head B */
cmap_radeon, /* ATI Radeon */
cmap_gxt2000, /* IBM GXT2000 */
cmap_avivo, /* ATI R5xx */
+ cmap_qemu, /* qemu vga */
};
struct offb_par {
@@ -100,36 +101,32 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int transp, struct fb_info *info)
{
struct offb_par *par = (struct offb_par *) info->par;
- int i, depth;
- u32 *pal = info->pseudo_palette;
-
- depth = info->var.bits_per_pixel;
- if (depth == 16)
- depth = (info->var.green.length == 5) ? 15 : 16;
-
- if (regno > 255 ||
- (depth == 16 && regno > 63) ||
- (depth == 15 && regno > 31))
- return 1;
-
- if (regno < 16) {
- switch (depth) {
- case 15:
- pal[regno] = (regno << 10) | (regno << 5) | regno;
- break;
- case 16:
- pal[regno] = (regno << 11) | (regno << 5) | regno;
- break;
- case 24:
- pal[regno] = (regno << 16) | (regno << 8) | regno;
- break;
- case 32:
- i = (regno << 8) | regno;
- pal[regno] = (i << 16) | i;
- break;
+
+ if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
+ u32 *pal = info->pseudo_palette;
+ u32 cr = red >> (16 - info->var.red.length);
+ u32 cg = green >> (16 - info->var.green.length);
+ u32 cb = blue >> (16 - info->var.blue.length);
+ u32 value;
+
+ if (regno >= 16)
+ return -EINVAL;
+
+ value = (cr << info->var.red.offset) |
+ (cg << info->var.green.offset) |
+ (cb << info->var.blue.offset);
+ if (info->var.transp.length > 0) {
+ u32 mask = (1 << info->var.transp.length) - 1;
+ mask <<= info->var.transp.offset;
+ value |= mask;
}
+ pal[regno] = value;
+ return 0;
}
+ if (regno > 255)
+ return -EINVAL;
+
red >>= 8;
green >>= 8;
blue >>= 8;
@@ -138,7 +135,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
return 0;
switch (par->cmap_type) {
- case cmap_m64:
+ case cmap_simple:
writeb(regno, par->cmap_adr);
writeb(red, par->cmap_data);
writeb(green, par->cmap_data);
@@ -208,7 +205,7 @@ static int offb_blank(int blank, struct fb_info *info)
if (blank)
for (i = 0; i < 256; i++) {
switch (par->cmap_type) {
- case cmap_m64:
+ case cmap_simple:
writeb(i, par->cmap_adr);
for (j = 0; j < 3; j++)
writeb(0, par->cmap_data);
@@ -350,7 +347,7 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_adr =
ioremap(base + 0x7ff000, 0x1000) + 0xcc0;
par->cmap_data = par->cmap_adr + 1;
- par->cmap_type = cmap_m64;
+ par->cmap_type = cmap_simple;
} else if (dp && (of_device_is_compatible(dp, "pci1014,b7") ||
of_device_is_compatible(dp, "pci1014,21c"))) {
par->cmap_adr = offb_map_reg(dp, 0, 0x6000, 0x1000);
@@ -371,6 +368,16 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
par->cmap_type = cmap_avivo;
}
of_node_put(pciparent);
+ } else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
+ const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+ u64 io_addr = of_translate_address(dp, io_of_addr);
+ if (io_addr != OF_BAD_ADDR) {
+ par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
+ if (par->cmap_adr) {
+ par->cmap_type = cmap_simple;
+ par->cmap_data = par->cmap_adr + 1;
+ }
+ }
}
info->fix.visual = (par->cmap_type != cmap_unknown) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_STATIC_PSEUDOCOLOR;
@@ -381,7 +388,7 @@ static void __init offb_init_fb(const char *name, const char *full_name,
int pitch, unsigned long address,
int foreign_endian, struct device_node *dp)
{
- unsigned long res_size = pitch * height * (depth + 7) / 8;
+ unsigned long res_size = pitch * height;
struct offb_par *par = &default_par;
unsigned long res_start = address;
struct fb_fix_screeninfo *fix;
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
index 6978ae4..0fdd6f6 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -198,7 +198,7 @@ static int ams_delta_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver ams_delta_panel_driver = {
+static struct platform_driver ams_delta_panel_driver = {
.probe = ams_delta_panel_probe,
.remove = ams_delta_panel_remove,
.suspend = ams_delta_panel_suspend,
@@ -209,15 +209,4 @@ struct platform_driver ams_delta_panel_driver = {
},
};
-static int __init ams_delta_panel_drv_init(void)
-{
- return platform_driver_register(&ams_delta_panel_driver);
-}
-
-static void __exit ams_delta_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&ams_delta_panel_driver);
-}
-
-module_init(ams_delta_panel_drv_init);
-module_exit(ams_delta_panel_drv_cleanup);
+module_platform_driver(ams_delta_panel_driver);
diff --git a/drivers/video/omap/lcd_h3.c b/drivers/video/omap/lcd_h3.c
index 622ad83..49bdeca 100644
--- a/drivers/video/omap/lcd_h3.c
+++ b/drivers/video/omap/lcd_h3.c
@@ -113,7 +113,7 @@ static int h3_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver h3_panel_driver = {
+static struct platform_driver h3_panel_driver = {
.probe = h3_panel_probe,
.remove = h3_panel_remove,
.suspend = h3_panel_suspend,
@@ -124,16 +124,4 @@ struct platform_driver h3_panel_driver = {
},
};
-static int __init h3_panel_drv_init(void)
-{
- return platform_driver_register(&h3_panel_driver);
-}
-
-static void __exit h3_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&h3_panel_driver);
-}
-
-module_init(h3_panel_drv_init);
-module_exit(h3_panel_drv_cleanup);
-
+module_platform_driver(h3_panel_driver);
diff --git a/drivers/video/omap/lcd_htcherald.c b/drivers/video/omap/lcd_htcherald.c
index 4802419..20f4778 100644
--- a/drivers/video/omap/lcd_htcherald.c
+++ b/drivers/video/omap/lcd_htcherald.c
@@ -104,7 +104,7 @@ static int htcherald_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver htcherald_panel_driver = {
+static struct platform_driver htcherald_panel_driver = {
.probe = htcherald_panel_probe,
.remove = htcherald_panel_remove,
.suspend = htcherald_panel_suspend,
@@ -115,16 +115,4 @@ struct platform_driver htcherald_panel_driver = {
},
};
-static int __init htcherald_panel_drv_init(void)
-{
- return platform_driver_register(&htcherald_panel_driver);
-}
-
-static void __exit htcherald_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&htcherald_panel_driver);
-}
-
-module_init(htcherald_panel_drv_init);
-module_exit(htcherald_panel_drv_cleanup);
-
+module_platform_driver(htcherald_panel_driver);
diff --git a/drivers/video/omap/lcd_inn1510.c b/drivers/video/omap/lcd_inn1510.c
index 3271f16..b38b1dd 100644
--- a/drivers/video/omap/lcd_inn1510.c
+++ b/drivers/video/omap/lcd_inn1510.c
@@ -98,7 +98,7 @@ static int innovator1510_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver innovator1510_panel_driver = {
+static struct platform_driver innovator1510_panel_driver = {
.probe = innovator1510_panel_probe,
.remove = innovator1510_panel_remove,
.suspend = innovator1510_panel_suspend,
@@ -109,16 +109,4 @@ struct platform_driver innovator1510_panel_driver = {
},
};
-static int __init innovator1510_panel_drv_init(void)
-{
- return platform_driver_register(&innovator1510_panel_driver);
-}
-
-static void __exit innovator1510_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&innovator1510_panel_driver);
-}
-
-module_init(innovator1510_panel_drv_init);
-module_exit(innovator1510_panel_drv_cleanup);
-
+module_platform_driver(innovator1510_panel_driver);
diff --git a/drivers/video/omap/lcd_inn1610.c b/drivers/video/omap/lcd_inn1610.c
index 12cc52a..7e8bd8e 100644
--- a/drivers/video/omap/lcd_inn1610.c
+++ b/drivers/video/omap/lcd_inn1610.c
@@ -122,7 +122,7 @@ static int innovator1610_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver innovator1610_panel_driver = {
+static struct platform_driver innovator1610_panel_driver = {
.probe = innovator1610_panel_probe,
.remove = innovator1610_panel_remove,
.suspend = innovator1610_panel_suspend,
@@ -133,16 +133,4 @@ struct platform_driver innovator1610_panel_driver = {
},
};
-static int __init innovator1610_panel_drv_init(void)
-{
- return platform_driver_register(&innovator1610_panel_driver);
-}
-
-static void __exit innovator1610_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&innovator1610_panel_driver);
-}
-
-module_init(innovator1610_panel_drv_init);
-module_exit(innovator1610_panel_drv_cleanup);
-
+module_platform_driver(innovator1610_panel_driver);
diff --git a/drivers/video/omap/lcd_mipid.c b/drivers/video/omap/lcd_mipid.c
index eb381db..8d546dd 100644
--- a/drivers/video/omap/lcd_mipid.c
+++ b/drivers/video/omap/lcd_mipid.c
@@ -603,7 +603,6 @@ static int mipid_spi_remove(struct spi_device *spi)
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = MIPID_MODULE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
diff --git a/drivers/video/omap/lcd_osk.c b/drivers/video/omap/lcd_osk.c
index 6f8d13c..5914220 100644
--- a/drivers/video/omap/lcd_osk.c
+++ b/drivers/video/omap/lcd_osk.c
@@ -116,7 +116,7 @@ static int osk_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver osk_panel_driver = {
+static struct platform_driver osk_panel_driver = {
.probe = osk_panel_probe,
.remove = osk_panel_remove,
.suspend = osk_panel_suspend,
@@ -127,16 +127,4 @@ struct platform_driver osk_panel_driver = {
},
};
-static int __init osk_panel_drv_init(void)
-{
- return platform_driver_register(&osk_panel_driver);
-}
-
-static void __exit osk_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&osk_panel_driver);
-}
-
-module_init(osk_panel_drv_init);
-module_exit(osk_panel_drv_cleanup);
-
+module_platform_driver(osk_panel_driver);
diff --git a/drivers/video/omap/lcd_palmte.c b/drivers/video/omap/lcd_palmte.c
index 4cb3017..88c31eb 100644
--- a/drivers/video/omap/lcd_palmte.c
+++ b/drivers/video/omap/lcd_palmte.c
@@ -97,7 +97,7 @@ static int palmte_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver palmte_panel_driver = {
+static struct platform_driver palmte_panel_driver = {
.probe = palmte_panel_probe,
.remove = palmte_panel_remove,
.suspend = palmte_panel_suspend,
@@ -108,16 +108,4 @@ struct platform_driver palmte_panel_driver = {
},
};
-static int __init palmte_panel_drv_init(void)
-{
- return platform_driver_register(&palmte_panel_driver);
-}
-
-static void __exit palmte_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&palmte_panel_driver);
-}
-
-module_init(palmte_panel_drv_init);
-module_exit(palmte_panel_drv_cleanup);
-
+module_platform_driver(palmte_panel_driver);
diff --git a/drivers/video/omap/lcd_palmtt.c b/drivers/video/omap/lcd_palmtt.c
index b51b332..aaf3c8b 100644
--- a/drivers/video/omap/lcd_palmtt.c
+++ b/drivers/video/omap/lcd_palmtt.c
@@ -102,7 +102,7 @@ static int palmtt_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver palmtt_panel_driver = {
+static struct platform_driver palmtt_panel_driver = {
.probe = palmtt_panel_probe,
.remove = palmtt_panel_remove,
.suspend = palmtt_panel_suspend,
@@ -113,15 +113,4 @@ struct platform_driver palmtt_panel_driver = {
},
};
-static int __init palmtt_panel_drv_init(void)
-{
- return platform_driver_register(&palmtt_panel_driver);
-}
-
-static void __exit palmtt_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&palmtt_panel_driver);
-}
-
-module_init(palmtt_panel_drv_init);
-module_exit(palmtt_panel_drv_cleanup);
+module_platform_driver(palmtt_panel_driver);
diff --git a/drivers/video/omap/lcd_palmz71.c b/drivers/video/omap/lcd_palmz71.c
index 2334e56..3b7d8aa 100644
--- a/drivers/video/omap/lcd_palmz71.c
+++ b/drivers/video/omap/lcd_palmz71.c
@@ -98,7 +98,7 @@ static int palmz71_panel_resume(struct platform_device *pdev)
return 0;
}
-struct platform_driver palmz71_panel_driver = {
+static struct platform_driver palmz71_panel_driver = {
.probe = palmz71_panel_probe,
.remove = palmz71_panel_remove,
.suspend = palmz71_panel_suspend,
@@ -109,15 +109,4 @@ struct platform_driver palmz71_panel_driver = {
},
};
-static int __init palmz71_panel_drv_init(void)
-{
- return platform_driver_register(&palmz71_panel_driver);
-}
-
-static void __exit palmz71_panel_drv_cleanup(void)
-{
- platform_driver_unregister(&palmz71_panel_driver);
-}
-
-module_init(palmz71_panel_drv_init);
-module_exit(palmz71_panel_drv_cleanup);
+module_platform_driver(palmz71_panel_driver);
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 25d8e51..b291bfa 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -47,9 +47,9 @@ static unsigned int def_rotate;
static unsigned int def_mirror;
#ifdef CONFIG_FB_OMAP_MANUAL_UPDATE
-static int manual_update = 1;
+static bool manual_update = 1;
#else
-static int manual_update;
+static bool manual_update;
#endif
static struct platform_device *fbdev_pdev;
diff --git a/drivers/video/omap/rfbi.c b/drivers/video/omap/rfbi.c
index 0c6981f..2c1a340 100644
--- a/drivers/video/omap/rfbi.c
+++ b/drivers/video/omap/rfbi.c
@@ -2,7 +2,7 @@
* OMAP2 Remote Frame Buffer Interface support
*
* Copyright (C) 2005 Nokia Corporation
- * Author: Juha Yrjl <juha.yrjola@nokia.com>
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
* Imre Deak <imre.deak@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
diff --git a/drivers/video/omap/sossi.c b/drivers/video/omap/sossi.c
index 8fb7c70..f79c137 100644
--- a/drivers/video/omap/sossi.c
+++ b/drivers/video/omap/sossi.c
@@ -2,7 +2,7 @@
* OMAP1 Special OptimiSed Screen Interface support
*
* Copyright (C) 2004-2005 Nokia Corporation
- * Author: Juha Yrjl <juha.yrjola@nokia.com>
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index 8d8e1fe..408a992 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -12,7 +12,7 @@ config PANEL_GENERIC_DPI
config PANEL_DVI
tristate "DVI output"
- depends on OMAP2_DSS_DPI
+ depends on OMAP2_DSS_DPI && I2C
help
Driver for external monitors, connected via DVI. The driver uses i2c
to read EDID information from the monitor.
@@ -41,7 +41,7 @@ config PANEL_NEC_NL8048HL11_01B
config PANEL_PICODLP
tristate "TI PICO DLP mini-projector"
- depends on OMAP2_DSS && I2C
+ depends on OMAP2_DSS_DPI && I2C
help
A mini-projector used in TI's SDP4430 and EVM boards
For more info please visit http://www.dlp.com/projector/
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index dbd59b8..51a87e1 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -803,7 +803,6 @@ static int acx565akm_spi_remove(struct spi_device *spi)
static struct spi_driver acx565akm_spi_driver = {
.driver = {
.name = "acx565akm",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = acx565akm_spi_probe,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 519c47d..28b9a6d 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -297,6 +297,72 @@ static struct panel_config generic_dpi_panels[] = {
.name = "apollon",
},
+ /* FocalTech ETM070003DH6 */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 28000,
+
+ .hsw = 48,
+ .hfp = 40,
+ .hbp = 40,
+
+ .vsw = 3,
+ .vfp = 13,
+ .vbp = 29,
+ },
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS,
+ .name = "focaltech_etm070003dh6",
+ },
+
+ /* Microtips Technologies - UMSH-8173MD */
+ {
+ {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 34560,
+
+ .hsw = 13,
+ .hfp = 101,
+ .hbp = 101,
+
+ .vsw = 23,
+ .vfp = 1,
+ .vbp = 1,
+ },
+ .acbi = 0x0,
+ .acb = 0x0,
+ .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
+ .power_on_delay = 0,
+ .power_off_delay = 0,
+ .name = "microtips_umsh_8173md",
+ },
+
+ /* OrtusTech COM43H4M10XTC */
+ {
+ {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 8000,
+
+ .hsw = 41,
+ .hfp = 8,
+ .hbp = 4,
+
+ .vsw = 10,
+ .vfp = 4,
+ .vbp = 2,
+ },
+ .config = OMAP_DSS_LCD_TFT,
+
+ .name = "ortustech_com43h4m10xtc",
+ },
};
struct panel_drv_data {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index 150e8ba..dc9408d 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -708,7 +708,6 @@ static int mipid_spi_remove(struct spi_device *spi)
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = "lcd_mipid",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
index 2ba9d0c..0eb31ca 100644
--- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
+++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c
@@ -163,50 +163,93 @@ static void nec_8048_panel_remove(struct omap_dss_device *dssdev)
kfree(necd);
}
-static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
+static int nec_8048_panel_power_on(struct omap_dss_device *dssdev)
{
- int r = 0;
+ int r;
struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
struct backlight_device *bl = necd->bl;
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
if (dssdev->platform_enable) {
r = dssdev->platform_enable(dssdev);
if (r)
- return r;
+ goto err1;
}
r = nec_8048_bl_update_status(bl);
if (r < 0)
dev_err(&dssdev->dev, "failed to set lcd brightness\n");
- r = omapdss_dpi_display_enable(dssdev);
-
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
return r;
}
-static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
+static void nec_8048_panel_power_off(struct omap_dss_device *dssdev)
{
struct nec_8048_data *necd = dev_get_drvdata(&dssdev->dev);
struct backlight_device *bl = necd->bl;
- omapdss_dpi_display_disable(dssdev);
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return;
bl->props.brightness = 0;
nec_8048_bl_update_status(bl);
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int nec_8048_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = nec_8048_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void nec_8048_panel_disable(struct omap_dss_device *dssdev)
+{
+ nec_8048_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static int nec_8048_panel_suspend(struct omap_dss_device *dssdev)
{
- nec_8048_panel_disable(dssdev);
+ nec_8048_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+
return 0;
}
static int nec_8048_panel_resume(struct omap_dss_device *dssdev)
{
- return nec_8048_panel_enable(dssdev);
+ int r;
+
+ r = nec_8048_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
}
static int nec_8048_recommended_bpp(struct omap_dss_device *dssdev)
@@ -303,7 +346,6 @@ static struct spi_driver nec_8048_spi_driver = {
.resume = nec_8048_spi_resume,
.driver = {
.name = "nec_8048_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
};
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 80c3f6a..00c5c61 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -198,12 +198,6 @@ struct taal_data {
bool te_enabled;
atomic_t do_update;
- struct {
- u16 x;
- u16 y;
- u16 w;
- u16 h;
- } update_region;
int channel;
struct delayed_work te_timeout_work;
@@ -1188,6 +1182,10 @@ static int taal_power_on(struct omap_dss_device *dssdev)
if (r)
goto err;
+ r = dsi_enable_video_output(dssdev, td->channel);
+ if (r)
+ goto err;
+
td->enabled = 1;
if (!td->intro_printed) {
@@ -1217,6 +1215,8 @@ static void taal_power_off(struct omap_dss_device *dssdev)
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
+ dsi_disable_video_output(dssdev, td->channel);
+
r = taal_dcs_write_0(td, MIPI_DCS_SET_DISPLAY_OFF);
if (!r)
r = taal_sleep_in(td);
@@ -1394,12 +1394,8 @@ static irqreturn_t taal_te_isr(int irq, void *data)
if (old) {
cancel_delayed_work(&td->te_timeout_work);
- r = omap_dsi_update(dssdev, td->channel,
- td->update_region.x,
- td->update_region.y,
- td->update_region.w,
- td->update_region.h,
- taal_framedone_cb, dssdev);
+ r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb,
+ dssdev);
if (r)
goto err;
}
@@ -1444,26 +1440,20 @@ static int taal_update(struct omap_dss_device *dssdev,
goto err;
}
- r = omap_dsi_prepare_update(dssdev, &x, &y, &w, &h, true);
- if (r)
- goto err;
-
- r = taal_set_update_window(td, x, y, w, h);
+ /* XXX no need to send this every frame, but dsi break if not done */
+ r = taal_set_update_window(td, 0, 0,
+ td->panel_config->timings.x_res,
+ td->panel_config->timings.y_res);
if (r)
goto err;
if (td->te_enabled && panel_data->use_ext_te) {
- td->update_region.x = x;
- td->update_region.y = y;
- td->update_region.w = w;
- td->update_region.h = h;
- barrier();
schedule_delayed_work(&td->te_timeout_work,
msecs_to_jiffies(250));
atomic_set(&td->do_update, 1);
} else {
- r = omap_dsi_update(dssdev, td->channel, x, y, w, h,
- taal_framedone_cb, dssdev);
+ r = omap_dsi_update(dssdev, td->channel, taal_framedone_cb,
+ dssdev);
if (r)
goto err;
}
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 2462b9e..e6649aa 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -512,7 +512,6 @@ static int __devexit tpo_td043_spi_remove(struct spi_device *spi)
static struct spi_driver tpo_td043_spi_driver = {
.driver = {
.name = "tpo_td043mtea1_panel_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = tpo_td043_spi_probe,
diff --git a/drivers/video/omap2/dss/Makefile b/drivers/video/omap2/dss/Makefile
index bd34ac5..5c450b0 100644
--- a/drivers/video/omap2/dss/Makefile
+++ b/drivers/video/omap2/dss/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
-omapdss-y := core.o dss.o dss_features.o dispc.o display.o manager.o overlay.o
+omapdss-y := core.o dss.o dss_features.o dispc.o dispc_coefs.o display.o \
+ manager.o overlay.o apply.o
omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o
omapdss-$(CONFIG_OMAP2_DSS_RFBI) += rfbi.o
omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
new file mode 100644
index 0000000..87b3e25
--- /dev/null
+++ b/drivers/video/omap2/dss/apply.c
@@ -0,0 +1,1330 @@
+/*
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DSS_SUBSYS_NAME "APPLY"
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+
+#include <video/omapdss.h>
+
+#include "dss.h"
+#include "dss_features.h"
+
+/*
+ * We have 4 levels of cache for the dispc settings. First two are in SW and
+ * the latter two in HW.
+ *
+ * set_info()
+ * v
+ * +--------------------+
+ * | user_info |
+ * +--------------------+
+ * v
+ * apply()
+ * v
+ * +--------------------+
+ * | info |
+ * +--------------------+
+ * v
+ * write_regs()
+ * v
+ * +--------------------+
+ * | shadow registers |
+ * +--------------------+
+ * v
+ * VFP or lcd/digit_enable
+ * v
+ * +--------------------+
+ * | registers |
+ * +--------------------+
+ */
+
+struct ovl_priv_data {
+
+ bool user_info_dirty;
+ struct omap_overlay_info user_info;
+
+ bool info_dirty;
+ struct omap_overlay_info info;
+
+ bool shadow_info_dirty;
+
+ bool extra_info_dirty;
+ bool shadow_extra_info_dirty;
+
+ bool enabled;
+ enum omap_channel channel;
+ u32 fifo_low, fifo_high;
+
+ /*
+ * True if overlay is to be enabled. Used to check and calculate configs
+ * for the overlay before it is enabled in the HW.
+ */
+ bool enabling;
+};
+
+struct mgr_priv_data {
+
+ bool user_info_dirty;
+ struct omap_overlay_manager_info user_info;
+
+ bool info_dirty;
+ struct omap_overlay_manager_info info;
+
+ bool shadow_info_dirty;
+
+ /* If true, GO bit is up and shadow registers cannot be written.
+ * Never true for manual update displays */
+ bool busy;
+
+ /* If true, dispc output is enabled */
+ bool updating;
+
+ /* If true, a display is enabled using this manager */
+ bool enabled;
+};
+
+static struct {
+ struct ovl_priv_data ovl_priv_data_array[MAX_DSS_OVERLAYS];
+ struct mgr_priv_data mgr_priv_data_array[MAX_DSS_MANAGERS];
+
+ bool irq_enabled;
+} dss_data;
+
+/* protects dss_data */
+static spinlock_t data_lock;
+/* lock for blocking functions */
+static DEFINE_MUTEX(apply_lock);
+static DECLARE_COMPLETION(extra_updated_completion);
+
+static void dss_register_vsync_isr(void);
+
+static struct ovl_priv_data *get_ovl_priv(struct omap_overlay *ovl)
+{
+ return &dss_data.ovl_priv_data_array[ovl->id];
+}
+
+static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr)
+{
+ return &dss_data.mgr_priv_data_array[mgr->id];
+}
+
+void dss_apply_init(void)
+{
+ const int num_ovls = dss_feat_get_num_ovls();
+ int i;
+
+ spin_lock_init(&data_lock);
+
+ for (i = 0; i < num_ovls; ++i) {
+ struct ovl_priv_data *op;
+
+ op = &dss_data.ovl_priv_data_array[i];
+
+ op->info.global_alpha = 255;
+
+ switch (i) {
+ case 0:
+ op->info.zorder = 0;
+ break;
+ case 1:
+ op->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
+ break;
+ case 2:
+ op->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
+ break;
+ case 3:
+ op->info.zorder =
+ dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
+ break;
+ }
+
+ op->user_info = op->info;
+ }
+}
+
+static bool ovl_manual_update(struct omap_overlay *ovl)
+{
+ return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+}
+
+static bool mgr_manual_update(struct omap_overlay_manager *mgr)
+{
+ return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
+}
+
+static int dss_check_settings_low(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev, bool applying)
+{
+ struct omap_overlay_info *oi;
+ struct omap_overlay_manager_info *mi;
+ struct omap_overlay *ovl;
+ struct omap_overlay_info *ois[MAX_DSS_OVERLAYS];
+ struct ovl_priv_data *op;
+ struct mgr_priv_data *mp;
+
+ mp = get_mgr_priv(mgr);
+
+ if (applying && mp->user_info_dirty)
+ mi = &mp->user_info;
+ else
+ mi = &mp->info;
+
+ /* collect the infos to be tested into the array */
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ op = get_ovl_priv(ovl);
+
+ if (!op->enabled && !op->enabling)
+ oi = NULL;
+ else if (applying && op->user_info_dirty)
+ oi = &op->user_info;
+ else
+ oi = &op->info;
+
+ ois[ovl->id] = oi;
+ }
+
+ return dss_mgr_check(mgr, dssdev, mi, ois);
+}
+
+/*
+ * check manager and overlay settings using overlay_info from data->info
+ */
+static int dss_check_settings(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev)
+{
+ return dss_check_settings_low(mgr, dssdev, false);
+}
+
+/*
+ * check manager and overlay settings using overlay_info from ovl->info if
+ * dirty and from data->info otherwise
+ */
+static int dss_check_settings_apply(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev)
+{
+ return dss_check_settings_low(mgr, dssdev, true);
+}
+
+static bool need_isr(void)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+ struct omap_overlay *ovl;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled)
+ continue;
+
+ if (mgr_manual_update(mgr)) {
+ /* to catch FRAMEDONE */
+ if (mp->updating)
+ return true;
+ } else {
+ /* to catch GO bit going down */
+ if (mp->busy)
+ return true;
+
+ /* to write new values to registers */
+ if (mp->info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (mp->shadow_info_dirty)
+ return true;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ struct ovl_priv_data *op;
+
+ op = get_ovl_priv(ovl);
+
+ /*
+ * NOTE: we check extra_info flags even for
+ * disabled overlays, as extra_infos need to be
+ * always written.
+ */
+
+ /* to write new values to registers */
+ if (op->extra_info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (op->shadow_extra_info_dirty)
+ return true;
+
+ if (!op->enabled)
+ continue;
+
+ /* to write new values to registers */
+ if (op->info_dirty)
+ return true;
+
+ /* to set GO bit */
+ if (op->shadow_info_dirty)
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+static bool need_go(struct omap_overlay_manager *mgr)
+{
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+ struct ovl_priv_data *op;
+
+ mp = get_mgr_priv(mgr);
+
+ if (mp->shadow_info_dirty)
+ return true;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ op = get_ovl_priv(ovl);
+ if (op->shadow_info_dirty || op->shadow_extra_info_dirty)
+ return true;
+ }
+
+ return false;
+}
+
+/* returns true if an extra_info field is currently being updated */
+static bool extra_info_update_ongoing(void)
+{
+ const int num_ovls = omap_dss_get_num_overlays();
+ struct ovl_priv_data *op;
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+ int i;
+
+ for (i = 0; i < num_ovls; ++i) {
+ ovl = omap_dss_get_overlay(i);
+ op = get_ovl_priv(ovl);
+
+ if (!ovl->manager)
+ continue;
+
+ mp = get_mgr_priv(ovl->manager);
+
+ if (!mp->enabled)
+ continue;
+
+ if (!mp->updating)
+ continue;
+
+ if (op->extra_info_dirty || op->shadow_extra_info_dirty)
+ return true;
+ }
+
+ return false;
+}
+
+/* wait until no extra_info updates are pending */
+static void wait_pending_extra_info_updates(void)
+{
+ bool updating;
+ unsigned long flags;
+ unsigned long t;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ updating = extra_info_update_ongoing();
+
+ if (!updating) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ return;
+ }
+
+ init_completion(&extra_updated_completion);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ t = msecs_to_jiffies(500);
+ wait_for_completion_timeout(&extra_updated_completion, t);
+
+ updating = extra_info_update_ongoing();
+
+ WARN_ON(updating);
+}
+
+int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ struct mgr_priv_data *mp;
+ u32 irq;
+ int r;
+ int i;
+ struct omap_dss_device *dssdev = mgr->device;
+
+ if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ if (mgr_manual_update(mgr))
+ return 0;
+
+ irq = dispc_mgr_get_vsync_irq(mgr->id);
+
+ mp = get_mgr_priv(mgr);
+ i = 0;
+ while (1) {
+ unsigned long flags;
+ bool shadow_dirty, dirty;
+
+ spin_lock_irqsave(&data_lock, flags);
+ dirty = mp->info_dirty;
+ shadow_dirty = mp->shadow_info_dirty;
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ if (!dirty && !shadow_dirty) {
+ r = 0;
+ break;
+ }
+
+ /* 4 iterations is the worst case:
+ * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+ * 2 - first VSYNC, dirty = true
+ * 3 - dirty = false, shadow_dirty = true
+ * 4 - shadow_dirty = false */
+ if (i++ == 3) {
+ DSSERR("mgr(%d)->wait_for_go() not finishing\n",
+ mgr->id);
+ r = 0;
+ break;
+ }
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (r == -ERESTARTSYS)
+ break;
+
+ if (r) {
+ DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
+ break;
+ }
+ }
+
+ return r;
+}
+
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ struct ovl_priv_data *op;
+ struct omap_dss_device *dssdev;
+ u32 irq;
+ int r;
+ int i;
+
+ if (!ovl->manager)
+ return 0;
+
+ dssdev = ovl->manager->device;
+
+ if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return 0;
+
+ if (ovl_manual_update(ovl))
+ return 0;
+
+ irq = dispc_mgr_get_vsync_irq(ovl->manager->id);
+
+ op = get_ovl_priv(ovl);
+ i = 0;
+ while (1) {
+ unsigned long flags;
+ bool shadow_dirty, dirty;
+
+ spin_lock_irqsave(&data_lock, flags);
+ dirty = op->info_dirty;
+ shadow_dirty = op->shadow_info_dirty;
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ if (!dirty && !shadow_dirty) {
+ r = 0;
+ break;
+ }
+
+ /* 4 iterations is the worst case:
+ * 1 - initial iteration, dirty = true (between VFP and VSYNC)
+ * 2 - first VSYNC, dirty = true
+ * 3 - dirty = false, shadow_dirty = true
+ * 4 - shadow_dirty = false */
+ if (i++ == 3) {
+ DSSERR("ovl(%d)->wait_for_go() not finishing\n",
+ ovl->id);
+ r = 0;
+ break;
+ }
+
+ r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+ if (r == -ERESTARTSYS)
+ break;
+
+ if (r) {
+ DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
+ break;
+ }
+ }
+
+ return r;
+}
+
+static void dss_ovl_write_regs(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ struct omap_overlay_info *oi;
+ bool ilace, replication;
+ struct mgr_priv_data *mp;
+ int r;
+
+ DSSDBGF("%d", ovl->id);
+
+ if (!op->enabled || !op->info_dirty)
+ return;
+
+ oi = &op->info;
+
+ replication = dss_use_replication(ovl->manager->device, oi->color_mode);
+
+ ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
+
+ r = dispc_ovl_setup(ovl->id, oi, ilace, replication);
+ if (r) {
+ /*
+ * We can't do much here, as this function can be called from
+ * vsync interrupt.
+ */
+ DSSERR("dispc_ovl_setup failed for ovl %d\n", ovl->id);
+
+ /* This will leave fifo configurations in a nonoptimal state */
+ op->enabled = false;
+ dispc_ovl_enable(ovl->id, false);
+ return;
+ }
+
+ mp = get_mgr_priv(ovl->manager);
+
+ op->info_dirty = false;
+ if (mp->updating)
+ op->shadow_info_dirty = true;
+}
+
+static void dss_ovl_write_regs_extra(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ struct mgr_priv_data *mp;
+
+ DSSDBGF("%d", ovl->id);
+
+ if (!op->extra_info_dirty)
+ return;
+
+ /* note: write also when op->enabled == false, so that the ovl gets
+ * disabled */
+
+ dispc_ovl_enable(ovl->id, op->enabled);
+ dispc_ovl_set_channel_out(ovl->id, op->channel);
+ dispc_ovl_set_fifo_threshold(ovl->id, op->fifo_low, op->fifo_high);
+
+ mp = get_mgr_priv(ovl->manager);
+
+ op->extra_info_dirty = false;
+ if (mp->updating)
+ op->shadow_extra_info_dirty = true;
+}
+
+static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ struct omap_overlay *ovl;
+
+ DSSDBGF("%d", mgr->id);
+
+ if (!mp->enabled)
+ return;
+
+ WARN_ON(mp->busy);
+
+ /* Commit overlay settings */
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ dss_ovl_write_regs(ovl);
+ dss_ovl_write_regs_extra(ovl);
+ }
+
+ if (mp->info_dirty) {
+ dispc_mgr_setup(mgr->id, &mp->info);
+
+ mp->info_dirty = false;
+ if (mp->updating)
+ mp->shadow_info_dirty = true;
+ }
+}
+
+static void dss_write_regs(void)
+{
+ const int num_mgrs = omap_dss_get_num_overlay_managers();
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+ int r;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
+ continue;
+
+ r = dss_check_settings(mgr, mgr->device);
+ if (r) {
+ DSSERR("cannot write registers for manager %s: "
+ "illegal configuration\n", mgr->name);
+ continue;
+ }
+
+ dss_mgr_write_regs(mgr);
+ }
+}
+
+static void dss_set_go_bits(void)
+{
+ const int num_mgrs = omap_dss_get_num_overlay_managers();
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
+ continue;
+
+ if (!need_go(mgr))
+ continue;
+
+ mp->busy = true;
+
+ if (!dss_data.irq_enabled && need_isr())
+ dss_register_vsync_isr();
+
+ dispc_mgr_go(mgr->id);
+ }
+
+}
+
+void dss_mgr_start_update(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+ int r;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ WARN_ON(mp->updating);
+
+ r = dss_check_settings(mgr, mgr->device);
+ if (r) {
+ DSSERR("cannot start manual update: illegal configuration\n");
+ spin_unlock_irqrestore(&data_lock, flags);
+ return;
+ }
+
+ dss_mgr_write_regs(mgr);
+
+ mp->updating = true;
+
+ if (!dss_data.irq_enabled && need_isr())
+ dss_register_vsync_isr();
+
+ dispc_mgr_enable(mgr->id, true);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+}
+
+static void dss_apply_irq_handler(void *data, u32 mask);
+
+static void dss_register_vsync_isr(void)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ u32 mask;
+ int r, i;
+
+ mask = 0;
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_vsync_irq(i);
+
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_framedone_irq(i);
+
+ r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
+ WARN_ON(r);
+
+ dss_data.irq_enabled = true;
+}
+
+static void dss_unregister_vsync_isr(void)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ u32 mask;
+ int r, i;
+
+ mask = 0;
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_vsync_irq(i);
+
+ for (i = 0; i < num_mgrs; ++i)
+ mask |= dispc_mgr_get_framedone_irq(i);
+
+ r = omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, mask);
+ WARN_ON(r);
+
+ dss_data.irq_enabled = false;
+}
+
+static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
+{
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+ struct ovl_priv_data *op;
+
+ mp = get_mgr_priv(mgr);
+ mp->shadow_info_dirty = false;
+
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ op = get_ovl_priv(ovl);
+ op->shadow_info_dirty = false;
+ op->shadow_extra_info_dirty = false;
+ }
+}
+
+static void dss_apply_irq_handler(void *data, u32 mask)
+{
+ const int num_mgrs = dss_feat_get_num_mgrs();
+ int i;
+ bool extra_updating;
+
+ spin_lock(&data_lock);
+
+ /* clear busy, updating flags, shadow_dirty flags */
+ for (i = 0; i < num_mgrs; i++) {
+ struct omap_overlay_manager *mgr;
+ struct mgr_priv_data *mp;
+ bool was_updating;
+
+ mgr = omap_dss_get_overlay_manager(i);
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled)
+ continue;
+
+ was_updating = mp->updating;
+ mp->updating = dispc_mgr_is_enabled(i);
+
+ if (!mgr_manual_update(mgr)) {
+ bool was_busy = mp->busy;
+ mp->busy = dispc_mgr_go_busy(i);
+
+ if (was_busy && !mp->busy)
+ mgr_clear_shadow_dirty(mgr);
+ } else {
+ if (was_updating && !mp->updating)
+ mgr_clear_shadow_dirty(mgr);
+ }
+ }
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ extra_updating = extra_info_update_ongoing();
+ if (!extra_updating)
+ complete_all(&extra_updated_completion);
+
+ if (!need_isr())
+ dss_unregister_vsync_isr();
+
+ spin_unlock(&data_lock);
+}
+
+static void omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op;
+
+ op = get_ovl_priv(ovl);
+
+ if (!op->user_info_dirty)
+ return;
+
+ op->user_info_dirty = false;
+ op->info_dirty = true;
+ op->info = op->user_info;
+}
+
+static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp;
+
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->user_info_dirty)
+ return;
+
+ mp->user_info_dirty = false;
+ mp->info_dirty = true;
+ mp->info = mp->user_info;
+}
+
+int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+{
+ unsigned long flags;
+ struct omap_overlay *ovl;
+ int r;
+
+ DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ r = dss_check_settings_apply(mgr, mgr->device);
+ if (r) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("failed to apply settings: illegal configuration.\n");
+ return r;
+ }
+
+ /* Configure overlays */
+ list_for_each_entry(ovl, &mgr->overlays, list)
+ omap_dss_mgr_apply_ovl(ovl);
+
+ /* Configure manager */
+ omap_dss_mgr_apply_mgr(mgr);
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return 0;
+}
+
+static void dss_apply_ovl_enable(struct omap_overlay *ovl, bool enable)
+{
+ struct ovl_priv_data *op;
+
+ op = get_ovl_priv(ovl);
+
+ if (op->enabled == enable)
+ return;
+
+ op->enabled = enable;
+ op->extra_info_dirty = true;
+}
+
+static void dss_apply_ovl_fifo_thresholds(struct omap_overlay *ovl,
+ u32 fifo_low, u32 fifo_high)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+
+ if (op->fifo_low == fifo_low && op->fifo_high == fifo_high)
+ return;
+
+ op->fifo_low = fifo_low;
+ op->fifo_high = fifo_high;
+ op->extra_info_dirty = true;
+}
+
+static void dss_ovl_setup_fifo(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ struct omap_dss_device *dssdev;
+ u32 size, burst_size;
+ u32 fifo_low, fifo_high;
+
+ if (!op->enabled && !op->enabling)
+ return;
+
+ dssdev = ovl->manager->device;
+
+ size = dispc_ovl_get_fifo_size(ovl->id);
+
+ burst_size = dispc_ovl_get_burst_size(ovl->id);
+
+ switch (dssdev->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ case OMAP_DISPLAY_TYPE_SDI:
+ case OMAP_DISPLAY_TYPE_VENC:
+ case OMAP_DISPLAY_TYPE_HDMI:
+ default_get_overlay_fifo_thresholds(ovl->id, size,
+ burst_size, &fifo_low, &fifo_high);
+ break;
+#ifdef CONFIG_OMAP2_DSS_DSI
+ case OMAP_DISPLAY_TYPE_DSI:
+ dsi_get_overlay_fifo_thresholds(ovl->id, size,
+ burst_size, &fifo_low, &fifo_high);
+ break;
+#endif
+ default:
+ BUG();
+ }
+
+ dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
+}
+
+static void dss_mgr_setup_fifos(struct omap_overlay_manager *mgr)
+{
+ struct omap_overlay *ovl;
+ struct mgr_priv_data *mp;
+
+ mp = get_mgr_priv(mgr);
+
+ if (!mp->enabled)
+ return;
+
+ list_for_each_entry(ovl, &mgr->overlays, list)
+ dss_ovl_setup_fifo(ovl);
+}
+
+static void dss_setup_fifos(void)
+{
+ const int num_mgrs = omap_dss_get_num_overlay_managers();
+ struct omap_overlay_manager *mgr;
+ int i;
+
+ for (i = 0; i < num_mgrs; ++i) {
+ mgr = omap_dss_get_overlay_manager(i);
+ dss_mgr_setup_fifos(mgr);
+ }
+}
+
+int dss_mgr_enable(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (mp->enabled)
+ goto out;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ mp->enabled = true;
+
+ r = dss_check_settings(mgr, mgr->device);
+ if (r) {
+ DSSERR("failed to enable manager %d: check_settings failed\n",
+ mgr->id);
+ goto err;
+ }
+
+ dss_setup_fifos();
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ if (!mgr_manual_update(mgr))
+ mp->updating = true;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ if (!mgr_manual_update(mgr))
+ dispc_mgr_enable(mgr->id, true);
+
+out:
+ mutex_unlock(&apply_lock);
+
+ return 0;
+
+err:
+ mp->enabled = false;
+ spin_unlock_irqrestore(&data_lock, flags);
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+void dss_mgr_disable(struct omap_overlay_manager *mgr)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+
+ mutex_lock(&apply_lock);
+
+ if (!mp->enabled)
+ goto out;
+
+ if (!mgr_manual_update(mgr))
+ dispc_mgr_enable(mgr->id, false);
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ mp->updating = false;
+ mp->enabled = false;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+out:
+ mutex_unlock(&apply_lock);
+}
+
+int dss_mgr_set_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+ int r;
+
+ r = dss_mgr_simple_check(mgr, info);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ mp->user_info = *info;
+ mp->user_info_dirty = true;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return 0;
+}
+
+void dss_mgr_get_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info)
+{
+ struct mgr_priv_data *mp = get_mgr_priv(mgr);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ *info = mp->user_info;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+}
+
+int dss_mgr_set_device(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev)
+{
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (dssdev->manager) {
+ DSSERR("display '%s' already has a manager '%s'\n",
+ dssdev->name, dssdev->manager->name);
+ r = -EINVAL;
+ goto err;
+ }
+
+ if ((mgr->supported_displays & dssdev->type) == 0) {
+ DSSERR("display '%s' does not support manager '%s'\n",
+ dssdev->name, mgr->name);
+ r = -EINVAL;
+ goto err;
+ }
+
+ dssdev->manager = mgr;
+ mgr->device = dssdev;
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+int dss_mgr_unset_device(struct omap_overlay_manager *mgr)
+{
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (!mgr->device) {
+ DSSERR("failed to unset display, display not set.\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ /*
+ * Don't allow currently enabled displays to have the overlay manager
+ * pulled out from underneath them
+ */
+ if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ mgr->device->manager = NULL;
+ mgr->device = NULL;
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+
+int dss_ovl_set_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ r = dss_ovl_simple_check(ovl, info);
+ if (r)
+ return r;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ op->user_info = *info;
+ op->user_info_dirty = true;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return 0;
+}
+
+void dss_ovl_get_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ *info = op->user_info;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+}
+
+int dss_ovl_set_manager(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ if (!mgr)
+ return -EINVAL;
+
+ mutex_lock(&apply_lock);
+
+ if (ovl->manager) {
+ DSSERR("overlay '%s' already has a manager '%s'\n",
+ ovl->name, ovl->manager->name);
+ r = -EINVAL;
+ goto err;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ if (op->enabled) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("overlay has to be disabled to change the manager\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ op->channel = mgr->id;
+ op->extra_info_dirty = true;
+
+ ovl->manager = mgr;
+ list_add_tail(&ovl->list, &mgr->overlays);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ /* XXX: When there is an overlay on a DSI manual update display, and
+ * the overlay is first disabled, then moved to tv, and enabled, we
+ * seem to get SYNC_LOST_DIGIT error.
+ *
+ * Waiting doesn't seem to help, but updating the manual update display
+ * after disabling the overlay seems to fix this. This hints that the
+ * overlay is perhaps somehow tied to the LCD output until the output
+ * is updated.
+ *
+ * Userspace workaround for this is to update the LCD after disabling
+ * the overlay, but before moving the overlay to TV.
+ */
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+int dss_ovl_unset_manager(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (!ovl->manager) {
+ DSSERR("failed to detach overlay: manager not set\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ if (op->enabled) {
+ spin_unlock_irqrestore(&data_lock, flags);
+ DSSERR("overlay has to be disabled to unset the manager\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ op->channel = -1;
+
+ ovl->manager = NULL;
+ list_del(&ovl->list);
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+bool dss_ovl_is_enabled(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ bool e;
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ e = op->enabled;
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ return e;
+}
+
+int dss_ovl_enable(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (op->enabled) {
+ r = 0;
+ goto err1;
+ }
+
+ if (ovl->manager == NULL || ovl->manager->device == NULL) {
+ r = -EINVAL;
+ goto err1;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ op->enabling = true;
+
+ r = dss_check_settings(ovl->manager, ovl->manager->device);
+ if (r) {
+ DSSERR("failed to enable overlay %d: check_settings failed\n",
+ ovl->id);
+ goto err2;
+ }
+
+ dss_setup_fifos();
+
+ op->enabling = false;
+ dss_apply_ovl_enable(ovl, true);
+
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ /* wait for overlay to be enabled */
+ wait_pending_extra_info_updates();
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+err2:
+ op->enabling = false;
+ spin_unlock_irqrestore(&data_lock, flags);
+err1:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
+int dss_ovl_disable(struct omap_overlay *ovl)
+{
+ struct ovl_priv_data *op = get_ovl_priv(ovl);
+ unsigned long flags;
+ int r;
+
+ mutex_lock(&apply_lock);
+
+ if (!op->enabled) {
+ r = 0;
+ goto err;
+ }
+
+ if (ovl->manager == NULL || ovl->manager->device == NULL) {
+ r = -EINVAL;
+ goto err;
+ }
+
+ spin_lock_irqsave(&data_lock, flags);
+
+ dss_apply_ovl_enable(ovl, false);
+ dss_write_regs();
+ dss_set_go_bits();
+
+ spin_unlock_irqrestore(&data_lock, flags);
+
+ /* wait for the overlay to be disabled */
+ wait_pending_extra_info_updates();
+
+ mutex_unlock(&apply_lock);
+
+ return 0;
+
+err:
+ mutex_unlock(&apply_lock);
+ return r;
+}
+
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 86ec12e..8613f86 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -50,7 +50,7 @@ module_param_named(def_disp, def_disp_name, charp, 0);
MODULE_PARM_DESC(def_disp, "default display name");
#ifdef DEBUG
-unsigned int dss_debug;
+bool dss_debug;
module_param_named(debug, dss_debug, bool, 0644);
#endif
@@ -178,6 +178,8 @@ static int omap_dss_probe(struct platform_device *pdev)
dss_features_init();
+ dss_apply_init();
+
dss_init_overlay_managers(pdev);
dss_init_overlays(pdev);
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index 5c81533..e1626a1 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -64,22 +64,6 @@ struct omap_dispc_isr_data {
u32 mask;
};
-struct dispc_h_coef {
- s8 hc4;
- s8 hc3;
- u8 hc2;
- s8 hc1;
- s8 hc0;
-};
-
-struct dispc_v_coef {
- s8 vc22;
- s8 vc2;
- u8 vc1;
- s8 vc0;
- s8 vc00;
-};
-
enum omap_burst_size {
BURST_SIZE_X2 = 0,
BURST_SIZE_X4 = 1,
@@ -417,7 +401,7 @@ void dispc_runtime_put(void)
DSSDBG("dispc_runtime_put\n");
- r = pm_runtime_put(&dispc.pdev->dev);
+ r = pm_runtime_put_sync(&dispc.pdev->dev);
WARN_ON(r < 0);
}
@@ -438,6 +422,34 @@ static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
return mgr ? mgr->device : NULL;
}
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
+{
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return DISPC_IRQ_VSYNC;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return DISPC_IRQ_VSYNC2;
+ case OMAP_DSS_CHANNEL_DIGIT:
+ return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
+ default:
+ BUG();
+ }
+}
+
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
+{
+ switch (channel) {
+ case OMAP_DSS_CHANNEL_LCD:
+ return DISPC_IRQ_FRAMEDONE;
+ case OMAP_DSS_CHANNEL_LCD2:
+ return DISPC_IRQ_FRAMEDONE2;
+ case OMAP_DSS_CHANNEL_DIGIT:
+ return 0;
+ default:
+ BUG();
+ }
+}
+
bool dispc_mgr_go_busy(enum omap_channel channel)
{
int bit;
@@ -533,105 +545,27 @@ static void dispc_ovl_write_firv2_reg(enum omap_plane plane, int reg, u32 value)
dispc_write_reg(DISPC_OVL_FIR_COEF_V2(plane, reg), value);
}
-static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
- int vscaleup, int five_taps,
- enum omap_color_component color_comp)
-{
- /* Coefficients for horizontal up-sampling */
- static const struct dispc_h_coef coef_hup[8] = {
- { 0, 0, 128, 0, 0 },
- { -1, 13, 124, -8, 0 },
- { -2, 30, 112, -11, -1 },
- { -5, 51, 95, -11, -2 },
- { 0, -9, 73, 73, -9 },
- { -2, -11, 95, 51, -5 },
- { -1, -11, 112, 30, -2 },
- { 0, -8, 124, 13, -1 },
- };
-
- /* Coefficients for vertical up-sampling */
- static const struct dispc_v_coef coef_vup_3tap[8] = {
- { 0, 0, 128, 0, 0 },
- { 0, 3, 123, 2, 0 },
- { 0, 12, 111, 5, 0 },
- { 0, 32, 89, 7, 0 },
- { 0, 0, 64, 64, 0 },
- { 0, 7, 89, 32, 0 },
- { 0, 5, 111, 12, 0 },
- { 0, 2, 123, 3, 0 },
- };
-
- static const struct dispc_v_coef coef_vup_5tap[8] = {
- { 0, 0, 128, 0, 0 },
- { -1, 13, 124, -8, 0 },
- { -2, 30, 112, -11, -1 },
- { -5, 51, 95, -11, -2 },
- { 0, -9, 73, 73, -9 },
- { -2, -11, 95, 51, -5 },
- { -1, -11, 112, 30, -2 },
- { 0, -8, 124, 13, -1 },
- };
-
- /* Coefficients for horizontal down-sampling */
- static const struct dispc_h_coef coef_hdown[8] = {
- { 0, 36, 56, 36, 0 },
- { 4, 40, 55, 31, -2 },
- { 8, 44, 54, 27, -5 },
- { 12, 48, 53, 22, -7 },
- { -9, 17, 52, 51, 17 },
- { -7, 22, 53, 48, 12 },
- { -5, 27, 54, 44, 8 },
- { -2, 31, 55, 40, 4 },
- };
-
- /* Coefficients for vertical down-sampling */
- static const struct dispc_v_coef coef_vdown_3tap[8] = {
- { 0, 36, 56, 36, 0 },
- { 0, 40, 57, 31, 0 },
- { 0, 45, 56, 27, 0 },
- { 0, 50, 55, 23, 0 },
- { 0, 18, 55, 55, 0 },
- { 0, 23, 55, 50, 0 },
- { 0, 27, 56, 45, 0 },
- { 0, 31, 57, 40, 0 },
- };
-
- static const struct dispc_v_coef coef_vdown_5tap[8] = {
- { 0, 36, 56, 36, 0 },
- { 4, 40, 55, 31, -2 },
- { 8, 44, 54, 27, -5 },
- { 12, 48, 53, 22, -7 },
- { -9, 17, 52, 51, 17 },
- { -7, 22, 53, 48, 12 },
- { -5, 27, 54, 44, 8 },
- { -2, 31, 55, 40, 4 },
- };
-
- const struct dispc_h_coef *h_coef;
- const struct dispc_v_coef *v_coef;
+static void dispc_ovl_set_scale_coef(enum omap_plane plane, int fir_hinc,
+ int fir_vinc, int five_taps,
+ enum omap_color_component color_comp)
+{
+ const struct dispc_coef *h_coef, *v_coef;
int i;
- if (hscaleup)
- h_coef = coef_hup;
- else
- h_coef = coef_hdown;
-
- if (vscaleup)
- v_coef = five_taps ? coef_vup_5tap : coef_vup_3tap;
- else
- v_coef = five_taps ? coef_vdown_5tap : coef_vdown_3tap;
+ h_coef = dispc_ovl_get_scale_coef(fir_hinc, true);
+ v_coef = dispc_ovl_get_scale_coef(fir_vinc, five_taps);
for (i = 0; i < 8; i++) {
u32 h, hv;
- h = FLD_VAL(h_coef[i].hc0, 7, 0)
- | FLD_VAL(h_coef[i].hc1, 15, 8)
- | FLD_VAL(h_coef[i].hc2, 23, 16)
- | FLD_VAL(h_coef[i].hc3, 31, 24);
- hv = FLD_VAL(h_coef[i].hc4, 7, 0)
- | FLD_VAL(v_coef[i].vc0, 15, 8)
- | FLD_VAL(v_coef[i].vc1, 23, 16)
- | FLD_VAL(v_coef[i].vc2, 31, 24);
+ h = FLD_VAL(h_coef[i].hc0_vc00, 7, 0)
+ | FLD_VAL(h_coef[i].hc1_vc0, 15, 8)
+ | FLD_VAL(h_coef[i].hc2_vc1, 23, 16)
+ | FLD_VAL(h_coef[i].hc3_vc2, 31, 24);
+ hv = FLD_VAL(h_coef[i].hc4_vc22, 7, 0)
+ | FLD_VAL(v_coef[i].hc1_vc0, 15, 8)
+ | FLD_VAL(v_coef[i].hc2_vc1, 23, 16)
+ | FLD_VAL(v_coef[i].hc3_vc2, 31, 24);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y) {
dispc_ovl_write_firh_reg(plane, i, h);
@@ -646,8 +580,8 @@ static void dispc_ovl_set_scale_coef(enum omap_plane plane, int hscaleup,
if (five_taps) {
for (i = 0; i < 8; i++) {
u32 v;
- v = FLD_VAL(v_coef[i].vc00, 7, 0)
- | FLD_VAL(v_coef[i].vc22, 15, 8);
+ v = FLD_VAL(v_coef[i].hc0_vc00, 7, 0)
+ | FLD_VAL(v_coef[i].hc4_vc22, 15, 8);
if (color_comp == DISPC_COLOR_COMPONENT_RGB_Y)
dispc_ovl_write_firv_reg(plane, i, v);
else
@@ -875,8 +809,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
}
-static void dispc_ovl_set_channel_out(enum omap_plane plane,
- enum omap_channel channel)
+void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
{
int shift;
u32 val;
@@ -923,6 +856,39 @@ static void dispc_ovl_set_channel_out(enum omap_plane plane,
dispc_write_reg(DISPC_OVL_ATTRIBUTES(plane), val);
}
+static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
+{
+ int shift;
+ u32 val;
+ enum omap_channel channel;
+
+ switch (plane) {
+ case OMAP_DSS_GFX:
+ shift = 8;
+ break;
+ case OMAP_DSS_VIDEO1:
+ case OMAP_DSS_VIDEO2:
+ case OMAP_DSS_VIDEO3:
+ shift = 16;
+ break;
+ default:
+ BUG();
+ }
+
+ val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
+
+ if (dss_has_feature(FEAT_MGR_LCD2)) {
+ if (FLD_GET(val, 31, 30) == 0)
+ channel = FLD_GET(val, shift, shift);
+ else
+ channel = OMAP_DSS_CHANNEL_LCD2;
+ } else {
+ channel = FLD_GET(val, shift, shift);
+ }
+
+ return channel;
+}
+
static void dispc_ovl_set_burst_size(enum omap_plane plane,
enum omap_burst_size burst_size)
{
@@ -964,7 +930,7 @@ void dispc_enable_gamma_table(bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 9, 9);
}
-void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
+static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
{
u16 reg;
@@ -978,7 +944,7 @@ void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable)
REG_FLD_MOD(reg, enable, 15, 15);
}
-void dispc_mgr_set_cpr_coef(enum omap_channel channel,
+static void dispc_mgr_set_cpr_coef(enum omap_channel channel,
struct omap_dss_cpr_coefs *coefs)
{
u32 coef_r, coef_g, coef_b;
@@ -1057,8 +1023,7 @@ u32 dispc_ovl_get_fifo_size(enum omap_plane plane)
return dispc.fifo_size[plane];
}
-static void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low,
- u32 high)
+void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high)
{
u8 hi_start, hi_end, lo_start, lo_end;
u32 unit;
@@ -1169,17 +1134,12 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
enum omap_color_component color_comp)
{
int fir_hinc, fir_vinc;
- int hscaleup, vscaleup;
-
- hscaleup = orig_width <= out_width;
- vscaleup = orig_height <= out_height;
-
- dispc_ovl_set_scale_coef(plane, hscaleup, vscaleup, five_taps,
- color_comp);
fir_hinc = 1024 * orig_width / out_width;
fir_vinc = 1024 * orig_height / out_height;
+ dispc_ovl_set_scale_coef(plane, fir_hinc, fir_vinc, five_taps,
+ color_comp);
dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
}
@@ -1654,6 +1614,9 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
u32 fclk = 0;
u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
+ if (height <= out_height && width <= out_width)
+ return (unsigned long) pclk;
+
if (height > out_height) {
struct omap_dss_device *dssdev = dispc_mgr_get_device(channel);
unsigned int ppl = dssdev->panel.timings.x_res;
@@ -1708,7 +1671,16 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
else
vf = 1;
- return dispc_mgr_pclk_rate(channel) * vf * hf;
+ if (cpu_is_omap24xx()) {
+ if (vf > 1 && hf > 1)
+ return dispc_mgr_pclk_rate(channel) * 4;
+ else
+ return dispc_mgr_pclk_rate(channel) * 2;
+ } else if (cpu_is_omap34xx()) {
+ return dispc_mgr_pclk_rate(channel) * vf * hf;
+ } else {
+ return dispc_mgr_pclk_rate(channel) * hf;
+ }
}
static int dispc_ovl_calc_scaling(enum omap_plane plane,
@@ -1718,6 +1690,8 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
+ const int maxsinglelinewidth =
+ dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
unsigned long fclk = 0;
if (width == out_width && height == out_height)
@@ -1734,28 +1708,40 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
out_height > height * 8)
return -EINVAL;
- /* Must use 5-tap filter? */
- *five_taps = height > out_height * 2;
-
- if (!*five_taps) {
+ if (cpu_is_omap24xx()) {
+ if (width > maxsinglelinewidth)
+ DSSERR("Cannot scale max input width exceeded");
+ *five_taps = false;
+ fclk = calc_fclk(channel, width, height, out_width,
+ out_height);
+ } else if (cpu_is_omap34xx()) {
+ if (width > (maxsinglelinewidth * 2)) {
+ DSSERR("Cannot setup scaling");
+ DSSERR("width exceeds maximum width possible");
+ return -EINVAL;
+ }
+ fclk = calc_fclk_five_taps(channel, width, height, out_width,
+ out_height, color_mode);
+ if (width > maxsinglelinewidth) {
+ if (height > out_height && height < out_height * 2)
+ *five_taps = false;
+ else {
+ DSSERR("cannot setup scaling with five taps");
+ return -EINVAL;
+ }
+ }
+ if (!*five_taps)
+ fclk = calc_fclk(channel, width, height, out_width,
+ out_height);
+ } else {
+ if (width > maxsinglelinewidth) {
+ DSSERR("Cannot scale width exceeds max line width");
+ return -EINVAL;
+ }
fclk = calc_fclk(channel, width, height, out_width,
out_height);
-
- /* Try 5-tap filter if 3-tap fclk is too high */
- if (cpu_is_omap34xx() && height > out_height &&
- fclk > dispc_fclk_rate())
- *five_taps = true;
- }
-
- if (width > (2048 >> *five_taps)) {
- DSSERR("failed to set up scaling, fclk too low\n");
- return -EINVAL;
}
- if (*five_taps)
- fclk = calc_fclk_five_taps(channel, width, height,
- out_width, out_height, color_mode);
-
DSSDBG("required fclk rate = %lu Hz\n", fclk);
DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate());
@@ -1771,11 +1757,10 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
}
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, enum omap_channel channel, bool replication,
- u32 fifo_low, u32 fifo_high)
+ bool ilace, bool replication)
{
struct omap_overlay *ovl = omap_dss_get_overlay(plane);
- bool five_taps = false;
+ bool five_taps = true;
bool fieldmode = 0;
int r, cconv = 0;
unsigned offset0, offset1;
@@ -1783,36 +1768,43 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
s32 pix_inc;
u16 frame_height = oi->height;
unsigned int field_offset = 0;
+ u16 outw, outh;
+ enum omap_channel channel;
+
+ channel = dispc_ovl_get_channel_out(plane);
DSSDBG("dispc_ovl_setup %d, pa %x, pa_uv %x, sw %d, %d,%d, %dx%d -> "
- "%dx%d, cmode %x, rot %d, mir %d, ilace %d chan %d repl %d "
- "fifo_low %d fifo high %d\n", plane, oi->paddr, oi->p_uv_addr,
+ "%dx%d, cmode %x, rot %d, mir %d, ilace %d chan %d repl %d\n",
+ plane, oi->paddr, oi->p_uv_addr,
oi->screen_width, oi->pos_x, oi->pos_y, oi->width, oi->height,
oi->out_width, oi->out_height, oi->color_mode, oi->rotation,
- oi->mirror, ilace, channel, replication, fifo_low, fifo_high);
+ oi->mirror, ilace, channel, replication);
if (oi->paddr == 0)
return -EINVAL;
- if (ilace && oi->height == oi->out_height)
+ outw = oi->out_width == 0 ? oi->width : oi->out_width;
+ outh = oi->out_height == 0 ? oi->height : oi->out_height;
+
+ if (ilace && oi->height == outh)
fieldmode = 1;
if (ilace) {
if (fieldmode)
oi->height /= 2;
oi->pos_y /= 2;
- oi->out_height /= 2;
+ outh /= 2;
DSSDBG("adjusting for ilace: height %d, pos_y %d, "
"out_height %d\n",
- oi->height, oi->pos_y, oi->out_height);
+ oi->height, oi->pos_y, outh);
}
if (!dss_feat_color_mode_supported(plane, oi->color_mode))
return -EINVAL;
r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height,
- oi->out_width, oi->out_height, oi->color_mode,
+ outw, outh, oi->color_mode,
&five_taps);
if (r)
return r;
@@ -1830,10 +1822,10 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
* so the integer part must be added to the base address of the
* bottom field.
*/
- if (!oi->height || oi->height == oi->out_height)
+ if (!oi->height || oi->height == outh)
field_offset = 0;
else
- field_offset = oi->height / oi->out_height / 2;
+ field_offset = oi->height / outh / 2;
}
/* Fields are independent but interleaved in memory. */
@@ -1869,7 +1861,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
dispc_ovl_set_pix_inc(plane, pix_inc);
DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width,
- oi->height, oi->out_width, oi->out_height);
+ oi->height, outw, outh);
dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
@@ -1877,10 +1869,10 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
dispc_ovl_set_scaling(plane, oi->width, oi->height,
- oi->out_width, oi->out_height,
+ outw, outh,
ilace, five_taps, fieldmode,
oi->color_mode, oi->rotation);
- dispc_ovl_set_vid_size(plane, oi->out_width, oi->out_height);
+ dispc_ovl_set_vid_size(plane, outw, outh);
dispc_ovl_set_vid_color_conv(plane, cconv);
}
@@ -1891,10 +1883,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
dispc_ovl_set_pre_mult_alpha(plane, oi->pre_mult_alpha);
dispc_ovl_setup_global_alpha(plane, oi->global_alpha);
- dispc_ovl_set_channel_out(plane, channel);
-
dispc_ovl_enable_replication(plane, replication);
- dispc_ovl_set_fifo_threshold(plane, fifo_low, fifo_high);
return 0;
}
@@ -1916,10 +1905,14 @@ static void dispc_disable_isr(void *data, u32 mask)
static void _enable_lcd_out(enum omap_channel channel, bool enable)
{
- if (channel == OMAP_DSS_CHANNEL_LCD2)
+ if (channel == OMAP_DSS_CHANNEL_LCD2) {
REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0);
- else
+ /* flush posted write */
+ dispc_read_reg(DISPC_CONTROL2);
+ } else {
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
+ dispc_read_reg(DISPC_CONTROL);
+ }
}
static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
@@ -1967,6 +1960,8 @@ static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable)
static void _enable_digit_out(bool enable)
{
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
+ /* flush posted write */
+ dispc_read_reg(DISPC_CONTROL);
}
static void dispc_mgr_enable_digit_out(bool enable)
@@ -2124,25 +2119,12 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode)
}
-void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
+static void dispc_mgr_set_default_color(enum omap_channel channel, u32 color)
{
dispc_write_reg(DISPC_DEFAULT_COLOR(channel), color);
}
-u32 dispc_mgr_get_default_color(enum omap_channel channel)
-{
- u32 l;
-
- BUG_ON(channel != OMAP_DSS_CHANNEL_DIGIT &&
- channel != OMAP_DSS_CHANNEL_LCD &&
- channel != OMAP_DSS_CHANNEL_LCD2);
-
- l = dispc_read_reg(DISPC_DEFAULT_COLOR(channel));
-
- return l;
-}
-
-void dispc_mgr_set_trans_key(enum omap_channel ch,
+static void dispc_mgr_set_trans_key(enum omap_channel ch,
enum omap_dss_trans_key_type type,
u32 trans_key)
{
@@ -2156,26 +2138,7 @@ void dispc_mgr_set_trans_key(enum omap_channel ch,
dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key);
}
-void dispc_mgr_get_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type *type,
- u32 *trans_key)
-{
- if (type) {
- if (ch == OMAP_DSS_CHANNEL_LCD)
- *type = REG_GET(DISPC_CONFIG, 11, 11);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- *type = REG_GET(DISPC_CONFIG, 13, 13);
- else if (ch == OMAP_DSS_CHANNEL_LCD2)
- *type = REG_GET(DISPC_CONFIG2, 11, 11);
- else
- BUG();
- }
-
- if (trans_key)
- *trans_key = dispc_read_reg(DISPC_TRANS_COLOR(ch));
-}
-
-void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
+static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
{
if (ch == OMAP_DSS_CHANNEL_LCD)
REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10);
@@ -2185,7 +2148,8 @@ void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable)
REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10);
}
-void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable)
+static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch,
+ bool enable)
{
if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
return;
@@ -2196,40 +2160,20 @@ void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable)
REG_FLD_MOD(DISPC_CONFIG, enable, 19, 19);
}
-bool dispc_mgr_alpha_fixed_zorder_enabled(enum omap_channel ch)
-{
- bool enabled;
-
- if (!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER))
- return false;
-
- if (ch == OMAP_DSS_CHANNEL_LCD)
- enabled = REG_GET(DISPC_CONFIG, 18, 18);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- enabled = REG_GET(DISPC_CONFIG, 19, 19);
- else
- BUG();
-
- return enabled;
-}
-
-bool dispc_mgr_trans_key_enabled(enum omap_channel ch)
+void dispc_mgr_setup(enum omap_channel channel,
+ struct omap_overlay_manager_info *info)
{
- bool enabled;
-
- if (ch == OMAP_DSS_CHANNEL_LCD)
- enabled = REG_GET(DISPC_CONFIG, 10, 10);
- else if (ch == OMAP_DSS_CHANNEL_DIGIT)
- enabled = REG_GET(DISPC_CONFIG, 12, 12);
- else if (ch == OMAP_DSS_CHANNEL_LCD2)
- enabled = REG_GET(DISPC_CONFIG2, 10, 10);
- else
- BUG();
-
- return enabled;
+ dispc_mgr_set_default_color(channel, info->default_color);
+ dispc_mgr_set_trans_key(channel, info->trans_key_type, info->trans_key);
+ dispc_mgr_enable_trans_key(channel, info->trans_enabled);
+ dispc_mgr_enable_alpha_fixed_zorder(channel,
+ info->partial_alpha_enabled);
+ if (dss_has_feature(FEAT_CPR)) {
+ dispc_mgr_enable_cpr(channel, info->cpr_enable);
+ dispc_mgr_set_cpr_coef(channel, &info->cpr_coefs);
+ }
}
-
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines)
{
int code;
@@ -3184,7 +3128,8 @@ static void dispc_error_worker(struct work_struct *work)
for (i = 0; i < omap_dss_get_num_overlay_managers(); ++i) {
struct omap_overlay_manager *mgr;
mgr = omap_dss_get_overlay_manager(i);
- mgr->device->driver->disable(mgr->device);
+ if (mgr->device && mgr->device->driver)
+ mgr->device->driver->disable(mgr->device);
}
}
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index c06efc3..5836bd1 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -97,6 +97,17 @@
#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
DISPC_PRELOAD_OFFSET(n))
+/* DISPC up/downsampling FIR filter coefficient structure */
+struct dispc_coef {
+ s8 hc4_vc22;
+ s8 hc3_vc2;
+ u8 hc2_vc1;
+ s8 hc1_vc0;
+ s8 hc0_vc00;
+};
+
+const struct dispc_coef *dispc_ovl_get_scale_coef(int inc, int five_taps);
+
/* DISPC manager/channel specific registers */
static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
{
diff --git a/drivers/video/omap2/dss/dispc_coefs.c b/drivers/video/omap2/dss/dispc_coefs.c
new file mode 100644
index 0000000..069bccb
--- /dev/null
+++ b/drivers/video/omap2/dss/dispc_coefs.c
@@ -0,0 +1,326 @@
+/*
+ * linux/drivers/video/omap2/dss/dispc_coefs.c
+ *
+ * Copyright (C) 2011 Texas Instruments
+ * Author: Chandrabhanu Mahapatra <cmahapatra@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <video/omapdss.h>
+#include "dispc.h"
+
+#define ARRAY_LEN(array) (sizeof(array) / sizeof(array[0]))
+
+static const struct dispc_coef coef3_M8[8] = {
+ { 0, 0, 128, 0, 0 },
+ { 0, -4, 123, 9, 0 },
+ { 0, -4, 108, 87, 0 },
+ { 0, -2, 87, 43, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 43, 87, -2, 0 },
+ { 0, 24, 108, -4, 0 },
+ { 0, 9, 123, -4, 0 },
+};
+
+static const struct dispc_coef coef3_M9[8] = {
+ { 0, 6, 116, 6, 0 },
+ { 0, 0, 112, 16, 0 },
+ { 0, -2, 100, 30, 0 },
+ { 0, -2, 83, 47, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 47, 83, -2, 0 },
+ { 0, 30, 100, -2, 0 },
+ { 0, 16, 112, 0, 0 },
+};
+
+static const struct dispc_coef coef3_M10[8] = {
+ { 0, 10, 108, 10, 0 },
+ { 0, 3, 104, 21, 0 },
+ { 0, 0, 94, 34, 0 },
+ { 0, -1, 80, 49, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 49, 80, -1, 0 },
+ { 0, 34, 94, 0, 0 },
+ { 0, 21, 104, 3, 0 },
+};
+
+static const struct dispc_coef coef3_M11[8] = {
+ { 0, 14, 100, 14, 0 },
+ { 0, 6, 98, 24, 0 },
+ { 0, 2, 90, 36, 0 },
+ { 0, 0, 78, 50, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 50, 78, 0, 0 },
+ { 0, 36, 90, 2, 0 },
+ { 0, 24, 98, 6, 0 },
+};
+
+static const struct dispc_coef coef3_M12[8] = {
+ { 0, 16, 96, 16, 0 },
+ { 0, 9, 93, 26, 0 },
+ { 0, 4, 86, 38, 0 },
+ { 0, 1, 76, 51, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 51, 76, 1, 0 },
+ { 0, 38, 86, 4, 0 },
+ { 0, 26, 93, 9, 0 },
+};
+
+static const struct dispc_coef coef3_M13[8] = {
+ { 0, 18, 92, 18, 0 },
+ { 0, 10, 90, 28, 0 },
+ { 0, 5, 83, 40, 0 },
+ { 0, 1, 75, 52, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 52, 75, 1, 0 },
+ { 0, 40, 83, 5, 0 },
+ { 0, 28, 90, 10, 0 },
+};
+
+static const struct dispc_coef coef3_M14[8] = {
+ { 0, 20, 88, 20, 0 },
+ { 0, 12, 86, 30, 0 },
+ { 0, 6, 81, 41, 0 },
+ { 0, 2, 74, 52, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 52, 74, 2, 0 },
+ { 0, 41, 81, 6, 0 },
+ { 0, 30, 86, 12, 0 },
+};
+
+static const struct dispc_coef coef3_M16[8] = {
+ { 0, 22, 84, 22, 0 },
+ { 0, 14, 82, 32, 0 },
+ { 0, 8, 78, 42, 0 },
+ { 0, 3, 72, 53, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 53, 72, 3, 0 },
+ { 0, 42, 78, 8, 0 },
+ { 0, 32, 82, 14, 0 },
+};
+
+static const struct dispc_coef coef3_M19[8] = {
+ { 0, 24, 80, 24, 0 },
+ { 0, 16, 79, 33, 0 },
+ { 0, 9, 76, 43, 0 },
+ { 0, 4, 70, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 70, 4, 0 },
+ { 0, 43, 76, 9, 0 },
+ { 0, 33, 79, 16, 0 },
+};
+
+static const struct dispc_coef coef3_M22[8] = {
+ { 0, 25, 78, 25, 0 },
+ { 0, 17, 77, 34, 0 },
+ { 0, 10, 74, 44, 0 },
+ { 0, 5, 69, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 69, 5, 0 },
+ { 0, 44, 74, 10, 0 },
+ { 0, 34, 77, 17, 0 },
+};
+
+static const struct dispc_coef coef3_M26[8] = {
+ { 0, 26, 76, 26, 0 },
+ { 0, 19, 74, 35, 0 },
+ { 0, 11, 72, 45, 0 },
+ { 0, 5, 69, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 69, 5, 0 },
+ { 0, 45, 72, 11, 0 },
+ { 0, 35, 74, 19, 0 },
+};
+
+static const struct dispc_coef coef3_M32[8] = {
+ { 0, 27, 74, 27, 0 },
+ { 0, 19, 73, 36, 0 },
+ { 0, 12, 71, 45, 0 },
+ { 0, 6, 68, 54, 0 },
+ { 0, 64, 64, 0, 0 },
+ { 0, 54, 68, 6, 0 },
+ { 0, 45, 71, 12, 0 },
+ { 0, 36, 73, 19, 0 },
+};
+
+static const struct dispc_coef coef5_M8[8] = {
+ { 0, 0, 128, 0, 0 },
+ { -2, 14, 125, -10, 1 },
+ { -6, 33, 114, -15, 2 },
+ { -10, 55, 98, -16, 1 },
+ { 0, -14, 78, 78, -14 },
+ { 1, -16, 98, 55, -10 },
+ { 2, -15, 114, 33, -6 },
+ { 1, -10, 125, 14, -2 },
+};
+
+static const struct dispc_coef coef5_M9[8] = {
+ { -3, 10, 114, 10, -3 },
+ { -6, 24, 110, 0, -1 },
+ { -8, 40, 103, -7, 0 },
+ { -11, 58, 91, -11, 1 },
+ { 0, -12, 76, 76, -12 },
+ { 1, -11, 91, 58, -11 },
+ { 0, -7, 103, 40, -8 },
+ { -1, 0, 111, 24, -6 },
+};
+
+static const struct dispc_coef coef5_M10[8] = {
+ { -4, 18, 100, 18, -4 },
+ { -6, 30, 99, 8, -3 },
+ { -8, 44, 93, 0, -1 },
+ { -9, 58, 84, -5, 0 },
+ { 0, -8, 72, 72, -8 },
+ { 0, -5, 84, 58, -9 },
+ { -1, 0, 93, 44, -8 },
+ { -3, 8, 99, 30, -6 },
+};
+
+static const struct dispc_coef coef5_M11[8] = {
+ { -5, 23, 92, 23, -5 },
+ { -6, 34, 90, 13, -3 },
+ { -6, 45, 85, 6, -2 },
+ { -6, 57, 78, 0, -1 },
+ { 0, -4, 68, 68, -4 },
+ { -1, 0, 78, 57, -6 },
+ { -2, 6, 85, 45, -6 },
+ { -3, 13, 90, 34, -6 },
+};
+
+static const struct dispc_coef coef5_M12[8] = {
+ { -4, 26, 84, 26, -4 },
+ { -5, 36, 82, 18, -3 },
+ { -4, 46, 78, 10, -2 },
+ { -3, 55, 72, 5, -1 },
+ { 0, 0, 64, 64, 0 },
+ { -1, 5, 72, 55, -3 },
+ { -2, 10, 78, 46, -4 },
+ { -3, 18, 82, 36, -5 },
+};
+
+static const struct dispc_coef coef5_M13[8] = {
+ { -3, 28, 78, 28, -3 },
+ { -3, 37, 76, 21, -3 },
+ { -2, 45, 73, 14, -2 },
+ { 0, 53, 68, 8, -1 },
+ { 0, 3, 61, 61, 3 },
+ { -1, 8, 68, 53, 0 },
+ { -2, 14, 73, 45, -2 },
+ { -3, 21, 76, 37, -3 },
+};
+
+static const struct dispc_coef coef5_M14[8] = {
+ { -2, 30, 72, 30, -2 },
+ { -1, 37, 71, 23, -2 },
+ { 0, 45, 69, 16, -2 },
+ { 3, 52, 64, 10, -1 },
+ { 0, 6, 58, 58, 6 },
+ { -1, 10, 64, 52, 3 },
+ { -2, 16, 69, 45, 0 },
+ { -2, 23, 71, 37, -1 },
+};
+
+static const struct dispc_coef coef5_M16[8] = {
+ { 0, 31, 66, 31, 0 },
+ { 1, 38, 65, 25, -1 },
+ { 3, 44, 62, 20, -1 },
+ { 6, 49, 59, 14, 0 },
+ { 0, 10, 54, 54, 10 },
+ { 0, 14, 59, 49, 6 },
+ { -1, 20, 62, 44, 3 },
+ { -1, 25, 65, 38, 1 },
+};
+
+static const struct dispc_coef coef5_M19[8] = {
+ { 3, 32, 58, 32, 3 },
+ { 4, 38, 58, 27, 1 },
+ { 7, 42, 55, 23, 1 },
+ { 10, 46, 54, 18, 0 },
+ { 0, 14, 50, 50, 14 },
+ { 0, 18, 54, 46, 10 },
+ { 1, 23, 55, 42, 7 },
+ { 1, 27, 58, 38, 4 },
+};
+
+static const struct dispc_coef coef5_M22[8] = {
+ { 4, 33, 54, 33, 4 },
+ { 6, 37, 54, 28, 3 },
+ { 9, 41, 53, 24, 1 },
+ { 12, 45, 51, 20, 0 },
+ { 0, 16, 48, 48, 16 },
+ { 0, 20, 51, 45, 12 },
+ { 1, 24, 53, 41, 9 },
+ { 3, 28, 54, 37, 6 },
+};
+
+static const struct dispc_coef coef5_M26[8] = {
+ { 6, 33, 50, 33, 6 },
+ { 8, 36, 51, 29, 4 },
+ { 11, 40, 50, 25, 2 },
+ { 14, 43, 48, 22, 1 },
+ { 0, 18, 46, 46, 18 },
+ { 1, 22, 48, 43, 14 },
+ { 2, 25, 50, 40, 11 },
+ { 4, 29, 51, 36, 8 },
+};
+
+static const struct dispc_coef coef5_M32[8] = {
+ { 7, 33, 48, 33, 7 },
+ { 10, 36, 48, 29, 5 },
+ { 13, 39, 47, 26, 3 },
+ { 16, 42, 46, 23, 1 },
+ { 0, 19, 45, 45, 19 },
+ { 1, 23, 46, 42, 16 },
+ { 3, 26, 47, 39, 13 },
+ { 5, 29, 48, 36, 10 },
+};
+
+const struct dispc_coef *dispc_ovl_get_scale_coef(int inc, int five_taps)
+{
+ int i;
+ static const struct {
+ int Mmin;
+ int Mmax;
+ const struct dispc_coef *coef_3;
+ const struct dispc_coef *coef_5;
+ } coefs[] = {
+ { 27, 32, coef3_M32, coef5_M32 },
+ { 23, 26, coef3_M26, coef5_M26 },
+ { 20, 22, coef3_M22, coef5_M22 },
+ { 17, 19, coef3_M19, coef5_M19 },
+ { 15, 16, coef3_M16, coef5_M16 },
+ { 14, 14, coef3_M14, coef5_M14 },
+ { 13, 13, coef3_M13, coef5_M13 },
+ { 12, 12, coef3_M12, coef5_M12 },
+ { 11, 11, coef3_M11, coef5_M11 },
+ { 10, 10, coef3_M10, coef5_M10 },
+ { 9, 9, coef3_M9, coef5_M9 },
+ { 4, 8, coef3_M8, coef5_M8 },
+ /*
+ * When upscaling more than two times, blockiness and outlines
+ * around the image are observed when M8 tables are used. M11,
+ * M16 and M19 tables are used to prevent this.
+ */
+ { 3, 3, coef3_M11, coef5_M11 },
+ { 2, 2, coef3_M16, coef5_M16 },
+ { 0, 1, coef3_M19, coef5_M19 },
+ };
+
+ inc /= 128;
+ for (i = 0; i < ARRAY_LEN(coefs); ++i)
+ if (inc >= coefs[i].Mmin && inc <= coefs[i].Mmax)
+ return five_taps ? coefs[i].coef_5 : coefs[i].coef_3;
+ return NULL;
+}
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 976ac23..faaf305 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -180,6 +180,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
{
int r;
+ if (cpu_is_omap34xx() && !dpi.vdds_dsi_reg) {
+ DSSERR("no VDSS_DSI regulator\n");
+ return -ENODEV;
+ }
+
if (dssdev->manager == NULL) {
DSSERR("failed to enable display: no manager\n");
return -ENODEV;
@@ -223,10 +228,13 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err_mgr_enable;
return 0;
+err_mgr_enable:
err_set_mode:
if (dpi_use_dsi_pll(dssdev))
dsi_pll_uninit(dpi.dsidev, true);
@@ -249,7 +257,7 @@ EXPORT_SYMBOL(omapdss_dpi_display_enable);
void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
{
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
if (dpi_use_dsi_pll(dssdev)) {
dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 5abf8e7..52f36ec 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -203,6 +203,21 @@ struct dsi_reg { u16 idx; };
typedef void (*omap_dsi_isr_t) (void *arg, u32 mask);
#define DSI_MAX_NR_ISRS 2
+#define DSI_MAX_NR_LANES 5
+
+enum dsi_lane_function {
+ DSI_LANE_UNUSED = 0,
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+};
+
+struct dsi_lane_config {
+ enum dsi_lane_function function;
+ u8 polarity;
+};
struct dsi_isr_data {
omap_dsi_isr_t isr;
@@ -223,24 +238,6 @@ enum dsi_vc_source {
DSI_VC_SOURCE_VP,
};
-enum dsi_lane {
- DSI_CLK_P = 1 << 0,
- DSI_CLK_N = 1 << 1,
- DSI_DATA1_P = 1 << 2,
- DSI_DATA1_N = 1 << 3,
- DSI_DATA2_P = 1 << 4,
- DSI_DATA2_N = 1 << 5,
- DSI_DATA3_P = 1 << 6,
- DSI_DATA3_N = 1 << 7,
- DSI_DATA4_P = 1 << 8,
- DSI_DATA4_N = 1 << 9,
-};
-
-struct dsi_update_region {
- u16 x, y, w, h;
- struct omap_dss_device *device;
-};
-
struct dsi_irq_stats {
unsigned long last_reset;
unsigned irq_count;
@@ -290,7 +287,9 @@ struct dsi_data {
struct dsi_isr_tables isr_tables_copy;
int update_channel;
- struct dsi_update_region update_region;
+#ifdef DEBUG
+ unsigned update_bytes;
+#endif
bool te_enabled;
bool ulps_enabled;
@@ -327,7 +326,10 @@ struct dsi_data {
unsigned long fint_min, fint_max;
unsigned long lpdiv_max;
- int num_data_lanes;
+ unsigned num_lanes_supported;
+
+ struct dsi_lane_config lanes[DSI_MAX_NR_LANES];
+ unsigned num_lanes_used;
unsigned scp_clk_refcount;
};
@@ -340,8 +342,8 @@ struct dsi_packet_sent_handler_data {
static struct platform_device *dsi_pdev_map[MAX_NUM_DSI];
#ifdef DEBUG
-static unsigned int dsi_perf;
-module_param_named(dsi_perf, dsi_perf, bool, 0644);
+static bool dsi_perf;
+module_param(dsi_perf, bool, 0644);
#endif
static inline struct dsi_data *dsi_get_dsidrv_data(struct platform_device *dsidev)
@@ -413,14 +415,29 @@ static void dsi_completion_handler(void *data, u32 mask)
static inline int wait_for_bit_change(struct platform_device *dsidev,
const struct dsi_reg idx, int bitnum, int value)
{
- int t = 100000;
+ unsigned long timeout;
+ ktime_t wait;
+ int t;
- while (REG_GET(dsidev, idx, bitnum, bitnum) != value) {
- if (--t == 0)
- return !value;
+ /* first busyloop to see if the bit changes right away */
+ t = 100;
+ while (t-- > 0) {
+ if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
+ return value;
}
- return value;
+ /* then loop for 500ms, sleeping for 1ms in between */
+ timeout = jiffies + msecs_to_jiffies(500);
+ while (time_before(jiffies, timeout)) {
+ if (REG_GET(dsidev, idx, bitnum, bitnum) == value)
+ return value;
+
+ wait = ns_to_ktime(1000 * 1000);
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_hrtimeout(&wait, HRTIMER_MODE_REL);
+ }
+
+ return !value;
}
u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
@@ -454,7 +471,6 @@ static void dsi_perf_mark_start(struct platform_device *dsidev)
static void dsi_perf_show(struct platform_device *dsidev, const char *name)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- struct omap_dss_device *dssdev = dsi->update_region.device;
ktime_t t, setup_time, trans_time;
u32 total_bytes;
u32 setup_us, trans_us, total_us;
@@ -476,9 +492,7 @@ static void dsi_perf_show(struct platform_device *dsidev, const char *name)
total_us = setup_us + trans_us;
- total_bytes = dsi->update_region.w *
- dsi->update_region.h *
- dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
+ total_bytes = dsi->update_bytes;
printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
"%u bytes, %u kbytes/sec\n",
@@ -1065,7 +1079,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
DSSDBG("dsi_runtime_put\n");
- r = pm_runtime_put(&dsi->pdev->dev);
+ r = pm_runtime_put_sync(&dsi->pdev->dev);
WARN_ON(r < 0);
}
@@ -1720,17 +1734,19 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
seq_printf(s, "CLKIN4DDR\t%-16luregm %u\n",
cinfo->clkin4ddr, cinfo->regm);
- seq_printf(s, "%s (%s)\t%-16luregm_dispc %u\t(%s)\n",
- dss_get_generic_clk_source_name(dispc_clk_src),
- dss_feat_get_clk_source_name(dispc_clk_src),
+ seq_printf(s, "DSI_PLL_HSDIV_DISPC (%s)\t%-16luregm_dispc %u\t(%s)\n",
+ dss_feat_get_clk_source_name(dsi_module == 0 ?
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DISPC :
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC),
cinfo->dsi_pll_hsdiv_dispc_clk,
cinfo->regm_dispc,
dispc_clk_src == OMAP_DSS_CLK_SRC_FCK ?
"off" : "on");
- seq_printf(s, "%s (%s)\t%-16luregm_dsi %u\t(%s)\n",
- dss_get_generic_clk_source_name(dsi_clk_src),
- dss_feat_get_clk_source_name(dsi_clk_src),
+ seq_printf(s, "DSI_PLL_HSDIV_DSI (%s)\t%-16luregm_dsi %u\t(%s)\n",
+ dss_feat_get_clk_source_name(dsi_module == 0 ?
+ OMAP_DSS_CLK_SRC_DSI_PLL_HSDIV_DSI :
+ OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DSI),
cinfo->dsi_pll_hsdiv_dsi_clk,
cinfo->regm_dsi,
dsi_clk_src == OMAP_DSS_CLK_SRC_FCK ?
@@ -2029,34 +2045,6 @@ static int dsi_cio_power(struct platform_device *dsidev,
return 0;
}
-/* Number of data lanes present on DSI interface */
-static inline int dsi_get_num_data_lanes(struct platform_device *dsidev)
-{
- /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
- * of data lanes as 2 by default */
- if (dss_has_feature(FEAT_DSI_GNQ))
- return REG_GET(dsidev, DSI_GNQ, 11, 9); /* NB_DATA_LANES */
- else
- return 2;
-}
-
-/* Number of data lanes used by the dss device */
-static inline int dsi_get_num_data_lanes_dssdev(struct omap_dss_device *dssdev)
-{
- int num_data_lanes = 0;
-
- if (dssdev->phy.dsi.data1_lane != 0)
- num_data_lanes++;
- if (dssdev->phy.dsi.data2_lane != 0)
- num_data_lanes++;
- if (dssdev->phy.dsi.data3_lane != 0)
- num_data_lanes++;
- if (dssdev->phy.dsi.data4_lane != 0)
- num_data_lanes++;
-
- return num_data_lanes;
-}
-
static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
{
int val;
@@ -2088,59 +2076,112 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
}
}
-static void dsi_set_lane_config(struct omap_dss_device *dssdev)
+static int dsi_parse_lane_config(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- u32 r;
- int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ u8 lanes[DSI_MAX_NR_LANES];
+ u8 polarities[DSI_MAX_NR_LANES];
+ int num_lanes, i;
+
+ static const enum dsi_lane_function functions[] = {
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+ };
+
+ lanes[0] = dssdev->phy.dsi.clk_lane;
+ lanes[1] = dssdev->phy.dsi.data1_lane;
+ lanes[2] = dssdev->phy.dsi.data2_lane;
+ lanes[3] = dssdev->phy.dsi.data3_lane;
+ lanes[4] = dssdev->phy.dsi.data4_lane;
+ polarities[0] = dssdev->phy.dsi.clk_pol;
+ polarities[1] = dssdev->phy.dsi.data1_pol;
+ polarities[2] = dssdev->phy.dsi.data2_pol;
+ polarities[3] = dssdev->phy.dsi.data3_pol;
+ polarities[4] = dssdev->phy.dsi.data4_pol;
- int clk_lane = dssdev->phy.dsi.clk_lane;
- int data1_lane = dssdev->phy.dsi.data1_lane;
- int data2_lane = dssdev->phy.dsi.data2_lane;
- int clk_pol = dssdev->phy.dsi.clk_pol;
- int data1_pol = dssdev->phy.dsi.data1_pol;
- int data2_pol = dssdev->phy.dsi.data2_pol;
+ num_lanes = 0;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i)
+ dsi->lanes[i].function = DSI_LANE_UNUSED;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ int num;
+
+ if (lanes[i] == DSI_LANE_UNUSED)
+ break;
+
+ num = lanes[i] - 1;
+
+ if (num >= dsi->num_lanes_supported)
+ return -EINVAL;
+
+ if (dsi->lanes[num].function != DSI_LANE_UNUSED)
+ return -EINVAL;
+
+ dsi->lanes[num].function = functions[i];
+ dsi->lanes[num].polarity = polarities[i];
+ num_lanes++;
+ }
+
+ if (num_lanes < 2 || num_lanes > dsi->num_lanes_supported)
+ return -EINVAL;
+
+ dsi->num_lanes_used = num_lanes;
+
+ return 0;
+}
+
+static int dsi_set_lane_config(struct omap_dss_device *dssdev)
+{
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ static const u8 offsets[] = { 0, 4, 8, 12, 16 };
+ static const enum dsi_lane_function functions[] = {
+ DSI_LANE_CLK,
+ DSI_LANE_DATA1,
+ DSI_LANE_DATA2,
+ DSI_LANE_DATA3,
+ DSI_LANE_DATA4,
+ };
+ u32 r;
+ int i;
r = dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG1);
- r = FLD_MOD(r, clk_lane, 2, 0);
- r = FLD_MOD(r, clk_pol, 3, 3);
- r = FLD_MOD(r, data1_lane, 6, 4);
- r = FLD_MOD(r, data1_pol, 7, 7);
- r = FLD_MOD(r, data2_lane, 10, 8);
- r = FLD_MOD(r, data2_pol, 11, 11);
- if (num_data_lanes_dssdev > 2) {
- int data3_lane = dssdev->phy.dsi.data3_lane;
- int data3_pol = dssdev->phy.dsi.data3_pol;
-
- r = FLD_MOD(r, data3_lane, 14, 12);
- r = FLD_MOD(r, data3_pol, 15, 15);
+
+ for (i = 0; i < dsi->num_lanes_used; ++i) {
+ unsigned offset = offsets[i];
+ unsigned polarity, lane_number;
+ unsigned t;
+
+ for (t = 0; t < dsi->num_lanes_supported; ++t)
+ if (dsi->lanes[t].function == functions[i])
+ break;
+
+ if (t == dsi->num_lanes_supported)
+ return -EINVAL;
+
+ lane_number = t;
+ polarity = dsi->lanes[t].polarity;
+
+ r = FLD_MOD(r, lane_number + 1, offset + 2, offset);
+ r = FLD_MOD(r, polarity, offset + 3, offset + 3);
}
- if (num_data_lanes_dssdev > 3) {
- int data4_lane = dssdev->phy.dsi.data4_lane;
- int data4_pol = dssdev->phy.dsi.data4_pol;
- r = FLD_MOD(r, data4_lane, 18, 16);
- r = FLD_MOD(r, data4_pol, 19, 19);
+ /* clear the unused lanes */
+ for (; i < dsi->num_lanes_supported; ++i) {
+ unsigned offset = offsets[i];
+
+ r = FLD_MOD(r, 0, offset + 2, offset);
+ r = FLD_MOD(r, 0, offset + 3, offset + 3);
}
- dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
- /* The configuration of the DSI complex I/O (number of data lanes,
- position, differential order) should not be changed while
- DSS.DSI_CLK_CRTRL[20] LP_CLK_ENABLE bit is set to 1. In order for
- the hardware to take into account a new configuration of the complex
- I/O (done in DSS.DSI_COMPLEXIO_CFG1 register), it is recommended to
- follow this sequence: First set the DSS.DSI_CTRL[0] IF_EN bit to 1,
- then reset the DSS.DSI_CTRL[0] IF_EN to 0, then set
- DSS.DSI_CLK_CTRL[20] LP_CLK_ENABLE to 1 and finally set again the
- DSS.DSI_CTRL[0] IF_EN bit to 1. If the sequence is not followed, the
- DSI complex I/O configuration is unknown. */
+ dsi_write_reg(dsidev, DSI_COMPLEXIO_CFG1, r);
- /*
- REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
- REG_FLD_MOD(dsidev, DSI_CTRL, 0, 0, 0);
- REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 1, 20, 20);
- REG_FLD_MOD(dsidev, DSI_CTRL, 1, 0, 0);
- */
+ return 0;
}
static inline unsigned ns2ddr(struct platform_device *dsidev, unsigned ns)
@@ -2230,49 +2271,28 @@ static void dsi_cio_timings(struct platform_device *dsidev)
dsi_write_reg(dsidev, DSI_DSIPHY_CFG2, r);
}
+/* lane masks have lane 0 at lsb. mask_p for positive lines, n for negative */
static void dsi_cio_enable_lane_override(struct omap_dss_device *dssdev,
- enum dsi_lane lanes)
+ unsigned mask_p, unsigned mask_n)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int clk_lane = dssdev->phy.dsi.clk_lane;
- int data1_lane = dssdev->phy.dsi.data1_lane;
- int data2_lane = dssdev->phy.dsi.data2_lane;
- int data3_lane = dssdev->phy.dsi.data3_lane;
- int data4_lane = dssdev->phy.dsi.data4_lane;
- int clk_pol = dssdev->phy.dsi.clk_pol;
- int data1_pol = dssdev->phy.dsi.data1_pol;
- int data2_pol = dssdev->phy.dsi.data2_pol;
- int data3_pol = dssdev->phy.dsi.data3_pol;
- int data4_pol = dssdev->phy.dsi.data4_pol;
-
- u32 l = 0;
- u8 lptxscp_start = dsi->num_data_lanes == 2 ? 22 : 26;
-
- if (lanes & DSI_CLK_P)
- l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 0 : 1));
- if (lanes & DSI_CLK_N)
- l |= 1 << ((clk_lane - 1) * 2 + (clk_pol ? 1 : 0));
-
- if (lanes & DSI_DATA1_P)
- l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 0 : 1));
- if (lanes & DSI_DATA1_N)
- l |= 1 << ((data1_lane - 1) * 2 + (data1_pol ? 1 : 0));
-
- if (lanes & DSI_DATA2_P)
- l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 0 : 1));
- if (lanes & DSI_DATA2_N)
- l |= 1 << ((data2_lane - 1) * 2 + (data2_pol ? 1 : 0));
-
- if (lanes & DSI_DATA3_P)
- l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 0 : 1));
- if (lanes & DSI_DATA3_N)
- l |= 1 << ((data3_lane - 1) * 2 + (data3_pol ? 1 : 0));
-
- if (lanes & DSI_DATA4_P)
- l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 0 : 1));
- if (lanes & DSI_DATA4_N)
- l |= 1 << ((data4_lane - 1) * 2 + (data4_pol ? 1 : 0));
+ int i;
+ u32 l;
+ u8 lptxscp_start = dsi->num_lanes_supported == 3 ? 22 : 26;
+
+ l = 0;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ unsigned p = dsi->lanes[i].polarity;
+
+ if (mask_p & (1 << i))
+ l |= 1 << (i * 2 + (p ? 0 : 1));
+
+ if (mask_n & (1 << i))
+ l |= 1 << (i * 2 + (p ? 1 : 0));
+ }
+
/*
* Bits in REGLPTXSCPDAT4TO0DXDY:
* 17: DY0 18: DX0
@@ -2305,51 +2325,40 @@ static void dsi_cio_disable_lane_override(struct platform_device *dsidev)
static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- int t;
- int bits[3];
- bool in_use[3];
-
- if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC)) {
- bits[0] = 28;
- bits[1] = 27;
- bits[2] = 26;
- } else {
- bits[0] = 24;
- bits[1] = 25;
- bits[2] = 26;
- }
-
- in_use[0] = false;
- in_use[1] = false;
- in_use[2] = false;
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ int t, i;
+ bool in_use[DSI_MAX_NR_LANES];
+ static const u8 offsets_old[] = { 28, 27, 26 };
+ static const u8 offsets_new[] = { 24, 25, 26, 27, 28 };
+ const u8 *offsets;
+
+ if (dss_has_feature(FEAT_DSI_REVERSE_TXCLKESC))
+ offsets = offsets_old;
+ else
+ offsets = offsets_new;
- if (dssdev->phy.dsi.clk_lane != 0)
- in_use[dssdev->phy.dsi.clk_lane - 1] = true;
- if (dssdev->phy.dsi.data1_lane != 0)
- in_use[dssdev->phy.dsi.data1_lane - 1] = true;
- if (dssdev->phy.dsi.data2_lane != 0)
- in_use[dssdev->phy.dsi.data2_lane - 1] = true;
+ for (i = 0; i < dsi->num_lanes_supported; ++i)
+ in_use[i] = dsi->lanes[i].function != DSI_LANE_UNUSED;
t = 100000;
while (true) {
u32 l;
- int i;
int ok;
l = dsi_read_reg(dsidev, DSI_DSIPHY_CFG5);
ok = 0;
- for (i = 0; i < 3; ++i) {
- if (!in_use[i] || (l & (1 << bits[i])))
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (!in_use[i] || (l & (1 << offsets[i])))
ok++;
}
- if (ok == 3)
+ if (ok == dsi->num_lanes_supported)
break;
if (--t == 0) {
- for (i = 0; i < 3; ++i) {
- if (!in_use[i] || (l & (1 << bits[i])))
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (!in_use[i] || (l & (1 << offsets[i])))
continue;
DSSERR("CIO TXCLKESC%d domain not coming " \
@@ -2362,22 +2371,20 @@ static int dsi_cio_wait_tx_clk_esc_reset(struct omap_dss_device *dssdev)
return 0;
}
+/* return bitmask of enabled lanes, lane0 being the lsb */
static unsigned dsi_get_lane_mask(struct omap_dss_device *dssdev)
{
- unsigned lanes = 0;
+ struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
+ unsigned mask = 0;
+ int i;
- if (dssdev->phy.dsi.clk_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.clk_lane - 1);
- if (dssdev->phy.dsi.data1_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data1_lane - 1);
- if (dssdev->phy.dsi.data2_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data2_lane - 1);
- if (dssdev->phy.dsi.data3_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data3_lane - 1);
- if (dssdev->phy.dsi.data4_lane != 0)
- lanes |= 1 << (dssdev->phy.dsi.data4_lane - 1);
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (dsi->lanes[i].function != DSI_LANE_UNUSED)
+ mask |= 1 << i;
+ }
- return lanes;
+ return mask;
}
static int dsi_cio_init(struct omap_dss_device *dssdev)
@@ -2385,7 +2392,6 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
int r;
- int num_data_lanes_dssdev = dsi_get_num_data_lanes_dssdev(dssdev);
u32 l;
DSSDBGF();
@@ -2407,7 +2413,9 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
goto err_scp_clk_dom;
}
- dsi_set_lane_config(dssdev);
+ r = dsi_set_lane_config(dssdev);
+ if (r)
+ goto err_scp_clk_dom;
/* set TX STOP MODE timer to maximum for this operation */
l = dsi_read_reg(dsidev, DSI_TIMING1);
@@ -2418,7 +2426,8 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
dsi_write_reg(dsidev, DSI_TIMING1, l);
if (dsi->ulps_enabled) {
- u32 lane_mask = DSI_CLK_P | DSI_DATA1_P | DSI_DATA2_P;
+ unsigned mask_p;
+ int i;
DSSDBG("manual ulps exit\n");
@@ -2427,16 +2436,19 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
* ULPS exit sequence, as after reset the DSS HW thinks
* that we are not in ULPS mode, and refuses to send the
* sequence. So we need to send the ULPS exit sequence
- * manually.
+ * manually by setting positive lines high and negative lines
+ * low for 1ms.
*/
- if (num_data_lanes_dssdev > 2)
- lane_mask |= DSI_DATA3_P;
+ mask_p = 0;
- if (num_data_lanes_dssdev > 3)
- lane_mask |= DSI_DATA4_P;
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (dsi->lanes[i].function == DSI_LANE_UNUSED)
+ continue;
+ mask_p |= 1 << i;
+ }
- dsi_cio_enable_lane_override(dssdev, lane_mask);
+ dsi_cio_enable_lane_override(dssdev, mask_p, 0);
}
r = dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ON);
@@ -2913,6 +2925,9 @@ static int dsi_vc_send_bta(struct platform_device *dsidev, int channel)
REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 6, 6); /* BTA_EN */
+ /* flush posted write */
+ dsi_read_reg(dsidev, DSI_VC_CTRL(channel));
+
return 0;
}
@@ -3513,7 +3528,8 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
{
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
DECLARE_COMPLETION_ONSTACK(completion);
- int r;
+ int r, i;
+ unsigned mask;
DSSDBGF();
@@ -3524,9 +3540,11 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
if (dsi->ulps_enabled)
return 0;
+ /* DDR_CLK_ALWAYS_ON */
if (REG_GET(dsidev, DSI_CLK_CTRL, 13, 13)) {
- DSSERR("DDR_CLK_ALWAYS_ON enabled when entering ULPS\n");
- return -EIO;
+ dsi_if_enable(dsidev, 0);
+ REG_FLD_MOD(dsidev, DSI_CLK_CTRL, 0, 13, 13);
+ dsi_if_enable(dsidev, 1);
}
dsi_sync_vc(dsidev, 0);
@@ -3556,10 +3574,19 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
if (r)
return r;
+ mask = 0;
+
+ for (i = 0; i < dsi->num_lanes_supported; ++i) {
+ if (dsi->lanes[i].function == DSI_LANE_UNUSED)
+ continue;
+ mask |= 1 << i;
+ }
/* Assert TxRequestEsc for data lanes and TxUlpsClk for clk lane */
/* LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (1 << 0) | (1 << 1) | (1 << 2),
- 7, 5);
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, mask, 9, 5);
+
+ /* flush posted write and wait for SCP interface to finish the write */
+ dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
if (wait_for_completion_timeout(&completion,
msecs_to_jiffies(1000)) == 0) {
@@ -3572,8 +3599,10 @@ static int dsi_enter_ulps(struct platform_device *dsidev)
DSI_CIO_IRQ_ULPSACTIVENOT_ALL0);
/* Reset LANEx_ULPS_SIG2 */
- REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, (0 << 0) | (0 << 1) | (0 << 2),
- 7, 5);
+ REG_FLD_MOD(dsidev, DSI_COMPLEXIO_CFG2, 0, 9, 5);
+
+ /* flush posted write and wait for SCP interface to finish the write */
+ dsi_read_reg(dsidev, DSI_COMPLEXIO_CFG2);
dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_ULPS);
@@ -3836,6 +3865,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
static void dsi_proto_timings(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
unsigned tlpx, tclk_zero, tclk_prepare, tclk_trail;
unsigned tclk_pre, tclk_post;
unsigned ths_prepare, ths_prepare_ths_zero, ths_zero;
@@ -3843,7 +3873,7 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
unsigned ddr_clk_pre, ddr_clk_post;
unsigned enter_hs_mode_lat, exit_hs_mode_lat;
unsigned ths_eot;
- int ndl = dsi_get_num_data_lanes_dssdev(dssdev);
+ int ndl = dsi->num_lanes_used - 1;
u32 r;
r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
@@ -3945,68 +3975,82 @@ static void dsi_proto_timings(struct omap_dss_device *dssdev)
}
}
-int dsi_video_mode_enable(struct omap_dss_device *dssdev, int channel)
+int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
u8 data_type;
u16 word_count;
+ int r;
- switch (dssdev->panel.dsi_pix_fmt) {
- case OMAP_DSS_DSI_FMT_RGB888:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
- break;
- case OMAP_DSS_DSI_FMT_RGB666:
- data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB666_PACKED:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
- break;
- case OMAP_DSS_DSI_FMT_RGB565:
- data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
- break;
- default:
- BUG();
- };
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ switch (dssdev->panel.dsi_pix_fmt) {
+ case OMAP_DSS_DSI_FMT_RGB888:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB666:
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB666_PACKED:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+ case OMAP_DSS_DSI_FMT_RGB565:
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ BUG();
+ };
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
- /* MODE, 1 = video mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
+ /* MODE, 1 = video mode */
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 1, 4, 4);
- word_count = DIV_ROUND_UP(dssdev->panel.timings.x_res * bpp, 8);
+ word_count = DIV_ROUND_UP(dssdev->panel.timings.x_res * bpp, 8);
- dsi_vc_write_long_header(dsidev, channel, data_type, word_count, 0);
+ dsi_vc_write_long_header(dsidev, channel, data_type,
+ word_count, 0);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsidev, channel, true);
+ dsi_if_enable(dsidev, true);
+ }
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r) {
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
+ }
+
+ return r;
+ }
return 0;
}
-EXPORT_SYMBOL(dsi_video_mode_enable);
+EXPORT_SYMBOL(dsi_enable_video_output);
-void dsi_video_mode_disable(struct omap_dss_device *dssdev, int channel)
+void dsi_disable_video_output(struct omap_dss_device *dssdev, int channel)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- dsi_if_enable(dsidev, false);
- dsi_vc_enable(dsidev, channel, false);
+ if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
+ dsi_if_enable(dsidev, false);
+ dsi_vc_enable(dsidev, channel, false);
- /* MODE, 0 = command mode */
- REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
+ /* MODE, 0 = command mode */
+ REG_FLD_MOD(dsidev, DSI_VC_CTRL(channel), 0, 4, 4);
- dsi_vc_enable(dsidev, channel, true);
- dsi_if_enable(dsidev, true);
+ dsi_vc_enable(dsidev, channel, true);
+ dsi_if_enable(dsidev, true);
+ }
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
}
-EXPORT_SYMBOL(dsi_video_mode_disable);
+EXPORT_SYMBOL(dsi_disable_video_output);
static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
+ u16 w, u16 h)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4021,8 +4065,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
const unsigned channel = dsi->update_channel;
const unsigned line_buf_size = dsi_get_line_buf_size(dsidev);
- DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
- x, y, w, h);
+ DSSDBG("dsi_update_screen_dispc(%dx%d)\n", w, h);
dsi_vc_config_source(dsidev, channel, DSI_VC_SOURCE_VP);
@@ -4070,7 +4113,7 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_start_update(dssdev);
+ dss_mgr_start_update(dssdev->manager);
if (dsi->te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
@@ -4146,66 +4189,27 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
#endif
}
-int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
- u16 *x, u16 *y, u16 *w, u16 *h,
- bool enlarge_update_area)
+int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
+ void (*callback)(int, void *), void *data)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
+ struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
u16 dw, dh;
- dssdev->driver->get_resolution(dssdev, &dw, &dh);
-
- if (*x > dw || *y > dh)
- return -EINVAL;
-
- if (*x + *w > dw)
- return -EINVAL;
-
- if (*y + *h > dh)
- return -EINVAL;
-
- if (*w == 1)
- return -EINVAL;
-
- if (*w == 0 || *h == 0)
- return -EINVAL;
-
dsi_perf_mark_setup(dsidev);
- dss_setup_partial_planes(dssdev, x, y, w, h,
- enlarge_update_area);
- dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
-
- return 0;
-}
-EXPORT_SYMBOL(omap_dsi_prepare_update);
-
-int omap_dsi_update(struct omap_dss_device *dssdev,
- int channel,
- u16 x, u16 y, u16 w, u16 h,
- void (*callback)(int, void *), void *data)
-{
- struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
- struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
-
dsi->update_channel = channel;
- /* OMAP DSS cannot send updates of odd widths.
- * omap_dsi_prepare_update() makes the widths even, but add a BUG_ON
- * here to make sure we catch erroneous updates. Otherwise we'll only
- * see rather obscure HW error happening, as DSS halts. */
- BUG_ON(x % 2 == 1);
-
dsi->framedone_callback = callback;
dsi->framedone_data = data;
- dsi->update_region.x = x;
- dsi->update_region.y = y;
- dsi->update_region.w = w;
- dsi->update_region.h = h;
- dsi->update_region.device = dssdev;
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
- dsi_update_screen_dispc(dssdev, x, y, w, h);
+#ifdef DEBUG
+ dsi->update_bytes = dw * dh *
+ dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt) / 8;
+#endif
+ dsi_update_screen_dispc(dssdev, dw, dh);
return 0;
}
@@ -4218,6 +4222,7 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
int r;
if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) {
+ u16 dw, dh;
u32 irq;
struct omap_video_timings timings = {
.hsw = 1,
@@ -4228,6 +4233,10 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
.vbp = 0,
};
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
+ timings.x_res = dw;
+ timings.y_res = dh;
+
irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ?
DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2;
@@ -4330,6 +4339,12 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
int dsi_module = dsi_get_dsidev_id(dsidev);
int r;
+ r = dsi_parse_lane_config(dssdev);
+ if (r) {
+ DSSERR("illegal lane config");
+ goto err0;
+ }
+
r = dsi_pll_init(dsidev, true, true);
if (r)
goto err0;
@@ -4521,7 +4536,6 @@ int dsi_init_display(struct omap_dss_device *dssdev)
{
struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
- int dsi_module = dsi_get_dsidev_id(dsidev);
DSSDBG("DSI init\n");
@@ -4543,12 +4557,6 @@ int dsi_init_display(struct omap_dss_device *dssdev)
dsi->vdds_dsi_reg = vdds_dsi;
}
- if (dsi_get_num_data_lanes_dssdev(dssdev) > dsi->num_data_lanes) {
- DSSERR("DSI%d can't support more than %d data lanes\n",
- dsi_module + 1, dsi->num_data_lanes);
- return -EINVAL;
- }
-
return 0;
}
@@ -4771,7 +4779,13 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
dev_dbg(&dsidev->dev, "OMAP DSI rev %d.%d\n",
FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
- dsi->num_data_lanes = dsi_get_num_data_lanes(dsidev);
+ /* DSI on OMAP3 doesn't have register DSI_GNQ, set number
+ * of data to 3 by default */
+ if (dss_has_feature(FEAT_DSI_GNQ))
+ /* NB_DATA_LANES */
+ dsi->num_lanes_supported = 1 + REG_GET(dsidev, DSI_GNQ, 11, 9);
+ else
+ dsi->num_lanes_supported = 3;
dsi_runtime_put(dsidev);
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 1703345..77c2b5a 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -720,7 +720,7 @@ void dss_runtime_put(void)
DSSDBG("dss_runtime_put\n");
- r = pm_runtime_put(&dss.pdev->dev);
+ r = pm_runtime_put_sync(&dss.pdev->dev);
WARN_ON(r < 0);
}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 6308fc5..32ff69f 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -28,7 +28,7 @@
#endif
#ifdef DEBUG
-extern unsigned int dss_debug;
+extern bool dss_debug;
#ifdef DSS_SUBSYS_NAME
#define DSSDBG(format, ...) \
if (dss_debug) \
@@ -163,6 +163,34 @@ struct bus_type *dss_get_bus(void);
struct regulator *dss_get_vdds_dsi(void);
struct regulator *dss_get_vdds_sdi(void);
+/* apply */
+void dss_apply_init(void);
+int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr);
+int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
+void dss_mgr_start_update(struct omap_overlay_manager *mgr);
+int omap_dss_mgr_apply(struct omap_overlay_manager *mgr);
+
+int dss_mgr_enable(struct omap_overlay_manager *mgr);
+void dss_mgr_disable(struct omap_overlay_manager *mgr);
+int dss_mgr_set_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+void dss_mgr_get_info(struct omap_overlay_manager *mgr,
+ struct omap_overlay_manager_info *info);
+int dss_mgr_set_device(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev);
+int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
+
+bool dss_ovl_is_enabled(struct omap_overlay *ovl);
+int dss_ovl_enable(struct omap_overlay *ovl);
+int dss_ovl_disable(struct omap_overlay *ovl);
+int dss_ovl_set_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+void dss_ovl_get_info(struct omap_overlay *ovl,
+ struct omap_overlay_info *info);
+int dss_ovl_set_manager(struct omap_overlay *ovl,
+ struct omap_overlay_manager *mgr);
+int dss_ovl_unset_manager(struct omap_overlay *ovl);
+
/* display */
int dss_suspend_all_devices(void);
int dss_resume_all_devices(void);
@@ -181,21 +209,22 @@ void default_get_overlay_fifo_thresholds(enum omap_plane plane,
/* manager */
int dss_init_overlay_managers(struct platform_device *pdev);
void dss_uninit_overlay_managers(struct platform_device *pdev);
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl);
-void dss_setup_partial_planes(struct omap_dss_device *dssdev,
- u16 *x, u16 *y, u16 *w, u16 *h,
- bool enlarge_update_area);
-void dss_start_update(struct omap_dss_device *dssdev);
+int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
+ const struct omap_overlay_manager_info *info);
+int dss_mgr_check(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev,
+ struct omap_overlay_manager_info *info,
+ struct omap_overlay_info **overlay_infos);
/* overlay */
void dss_init_overlays(struct platform_device *pdev);
void dss_uninit_overlays(struct platform_device *pdev);
-int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev);
void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
-#ifdef L4_EXAMPLE
-void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr);
-#endif
void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
+int dss_ovl_simple_check(struct omap_overlay *ovl,
+ const struct omap_overlay_info *info);
+int dss_ovl_check(struct omap_overlay *ovl,
+ struct omap_overlay_info *info, struct omap_dss_device *dssdev);
/* DSS */
int dss_init_platform_driver(void);
@@ -399,21 +428,22 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
+void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
u32 dispc_ovl_get_fifo_size(enum omap_plane plane);
u32 dispc_ovl_get_burst_size(enum omap_plane plane);
int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
- bool ilace, enum omap_channel channel, bool replication,
- u32 fifo_low, u32 fifo_high);
+ bool ilace, bool replication);
int dispc_ovl_enable(enum omap_plane plane, bool enable);
-
+void dispc_ovl_set_channel_out(enum omap_plane plane,
+ enum omap_channel channel);
void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
-void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable);
-void dispc_mgr_set_cpr_coef(enum omap_channel channel,
- struct omap_dss_cpr_coefs *coefs);
+u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
+u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
bool dispc_mgr_go_busy(enum omap_channel channel);
void dispc_mgr_go(enum omap_channel channel);
+bool dispc_mgr_is_enabled(enum omap_channel channel);
void dispc_mgr_enable(enum omap_channel channel, bool enable);
bool dispc_mgr_is_channel_enabled(enum omap_channel channel);
void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode);
@@ -421,18 +451,6 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
enum omap_lcd_display_type type);
-void dispc_mgr_set_default_color(enum omap_channel channel, u32 color);
-u32 dispc_mgr_get_default_color(enum omap_channel channel);
-void dispc_mgr_set_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type type,
- u32 trans_key);
-void dispc_mgr_get_trans_key(enum omap_channel ch,
- enum omap_dss_trans_key_type *type,
- u32 *trans_key);
-void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable);
-void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, bool enable);
-bool dispc_mgr_trans_key_enabled(enum omap_channel ch);
-bool dispc_mgr_alpha_fixed_zorder_enabled(enum omap_channel ch);
void dispc_mgr_set_lcd_timings(enum omap_channel channel,
struct omap_video_timings *timings);
void dispc_mgr_set_pol_freq(enum omap_channel channel,
@@ -443,6 +461,8 @@ int dispc_mgr_set_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
int dispc_mgr_get_clock_div(enum omap_channel channel,
struct dispc_clock_info *cinfo);
+void dispc_mgr_setup(enum omap_channel channel,
+ struct omap_overlay_manager_info *info);
/* VENC */
#ifdef CONFIG_OMAP2_DSS_VENC
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index b402699..afcb593 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -304,6 +304,11 @@ static const struct dss_param_range omap2_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_FINT] = { 0, 0 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, 0 },
[FEAT_PARAM_DOWNSCALE] = { 1, 2 },
+ /*
+ * Assuming the line width buffer to be 768 pixels as OMAP2 DISPC
+ * scaler cannot scale a image with width more than 768.
+ */
+ [FEAT_PARAM_LINEWIDTH] = { 1, 768 },
};
static const struct dss_param_range omap3_dss_param_range[] = {
@@ -316,6 +321,7 @@ static const struct dss_param_range omap3_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_FINT] = { 750000, 2100000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
+ [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
};
static const struct dss_param_range omap4_dss_param_range[] = {
@@ -328,6 +334,7 @@ static const struct dss_param_range omap4_dss_param_range[] = {
[FEAT_PARAM_DSIPLL_FINT] = { 500000, 2500000 },
[FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
[FEAT_PARAM_DOWNSCALE] = { 1, 4 },
+ [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
};
/* OMAP2 DSS Features */
@@ -465,6 +472,10 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
.dump_core = ti_hdmi_4xxx_core_dump,
.dump_pll = ti_hdmi_4xxx_pll_dump,
.dump_phy = ti_hdmi_4xxx_phy_dump,
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+ .audio_enable = ti_hdmi_4xxx_wp_audio_enable,
+#endif
};
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index 6a6c05d..cd833bb 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -86,6 +86,7 @@ enum dss_range_param {
FEAT_PARAM_DSIPLL_FINT,
FEAT_PARAM_DSIPLL_LPDIV,
FEAT_PARAM_DOWNSCALE,
+ FEAT_PARAM_LINEWIDTH,
};
/* DSS Feature Functions */
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index c56378c..a36b934 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -165,9 +165,25 @@ static int hdmi_runtime_get(void)
DSSDBG("hdmi_runtime_get\n");
+ /*
+ * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
+ * This should be removed later.
+ */
+ r = dss_runtime_get();
+ if (r < 0)
+ goto err_get_dss;
+
r = pm_runtime_get_sync(&hdmi.pdev->dev);
WARN_ON(r < 0);
- return r < 0 ? r : 0;
+ if (r < 0)
+ goto err_get_hdmi;
+
+ return 0;
+
+err_get_hdmi:
+ dss_runtime_put();
+err_get_dss:
+ return r;
}
static void hdmi_runtime_put(void)
@@ -176,8 +192,14 @@ static void hdmi_runtime_put(void)
DSSDBG("hdmi_runtime_put\n");
- r = pm_runtime_put(&hdmi.pdev->dev);
+ r = pm_runtime_put_sync(&hdmi.pdev->dev);
WARN_ON(r < 0);
+
+ /*
+ * HACK: This is added to complement the dss_runtime_get() call in
+ * hdmi_runtime_get(). This should be removed later.
+ */
+ dss_runtime_put();
}
int hdmi_init_display(struct omap_dss_device *dssdev)
@@ -333,7 +355,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
if (r)
return r;
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 0);
+ dss_mgr_disable(dssdev->manager);
p = &dssdev->panel.timings;
@@ -387,9 +409,16 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1);
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 1);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err_mgr_enable;
return 0;
+
+err_mgr_enable:
+ hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
+ hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
+ hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
err:
hdmi_runtime_put();
return -EIO;
@@ -397,7 +426,7 @@ err:
static void hdmi_power_off(struct omap_dss_device *dssdev)
{
- dispc_mgr_enable(OMAP_DSS_CHANNEL_DIGIT, 0);
+ dss_mgr_disable(dssdev->manager);
hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0);
hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
@@ -490,6 +519,7 @@ bool omapdss_hdmi_detect(void)
int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
{
+ struct omap_dss_hdmi_data *priv = dssdev->data;
int r = 0;
DSSDBG("ENTER hdmi_display_enable\n");
@@ -502,6 +532,8 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
+ hdmi.ip_data.hpd_gpio = priv->hpd_gpio;
+
r = omap_dss_start_device(dssdev);
if (r) {
DSSERR("failed to start device\n");
@@ -554,11 +586,44 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-static int hdmi_audio_hw_params(struct hdmi_ip_data *ip_data,
- struct snd_pcm_substream *substream,
+static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
+ struct snd_soc_dai *dai)
+{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct platform_device *pdev = to_platform_device(codec->dev);
+ struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
+ int err = 0;
+
+ if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) {
+ dev_err(&pdev->dev, "Cannot enable/disable audio\n");
+ return -ENODEV;
+ }
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ip_data->ops->audio_enable(ip_data, true);
+ break;
+ case SNDRV_PCM_TRIGGER_STOP:
+ case SNDRV_PCM_TRIGGER_SUSPEND:
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ ip_data->ops->audio_enable(ip_data, false);
+ break;
+ default:
+ err = -EINVAL;
+ }
+ return err;
+}
+
+static int hdmi_audio_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
+ struct snd_soc_pcm_runtime *rtd = substream->private_data;
+ struct snd_soc_codec *codec = rtd->codec;
+ struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
struct hdmi_audio_format audio_format;
struct hdmi_audio_dma audio_dma;
struct hdmi_core_audio_config core_cfg;
@@ -698,7 +763,16 @@ static int hdmi_audio_startup(struct snd_pcm_substream *substream,
return 0;
}
+static int hdmi_audio_codec_probe(struct snd_soc_codec *codec)
+{
+ struct hdmi_ip_data *priv = &hdmi.ip_data;
+
+ snd_soc_codec_set_drvdata(codec, priv);
+ return 0;
+}
+
static struct snd_soc_codec_driver hdmi_audio_codec_drv = {
+ .probe = hdmi_audio_codec_probe,
};
static struct snd_soc_dai_ops hdmi_audio_codec_ops = {
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 6e63845..d1858e7 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -26,17 +26,15 @@
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/spinlock.h>
#include <linux/jiffies.h>
#include <video/omapdss.h>
-#include <plat/cpu.h>
#include "dss.h"
#include "dss_features.h"
static int num_managers;
-static struct list_head manager_list;
+static struct omap_overlay_manager *managers;
static ssize_t manager_name_show(struct omap_overlay_manager *mgr, char *buf)
{
@@ -106,7 +104,11 @@ put_device:
static ssize_t manager_default_color_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%#x\n", mgr->info.default_color);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%#x\n", info.default_color);
}
static ssize_t manager_default_color_store(struct omap_overlay_manager *mgr,
@@ -144,8 +146,11 @@ static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr,
char *buf)
{
enum omap_dss_trans_key_type key_type;
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
- key_type = mgr->info.trans_key_type;
+ key_type = info.trans_key_type;
BUG_ON(key_type >= ARRAY_SIZE(trans_key_type_str));
return snprintf(buf, PAGE_SIZE, "%s\n", trans_key_type_str[key_type]);
@@ -185,7 +190,11 @@ static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr,
static ssize_t manager_trans_key_value_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%#x\n", mgr->info.trans_key);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%#x\n", info.trans_key);
}
static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
@@ -217,7 +226,11 @@ static ssize_t manager_trans_key_value_store(struct omap_overlay_manager *mgr,
static ssize_t manager_trans_key_enabled_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.trans_enabled);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.trans_enabled);
}
static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
@@ -249,10 +262,14 @@ static ssize_t manager_trans_key_enabled_store(struct omap_overlay_manager *mgr,
static ssize_t manager_alpha_blending_enabled_show(
struct omap_overlay_manager *mgr, char *buf)
{
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
WARN_ON(!dss_has_feature(FEAT_ALPHA_FIXED_ZORDER));
return snprintf(buf, PAGE_SIZE, "%d\n",
- mgr->info.partial_alpha_enabled);
+ info.partial_alpha_enabled);
}
static ssize_t manager_alpha_blending_enabled_store(
@@ -287,7 +304,11 @@ static ssize_t manager_alpha_blending_enabled_store(
static ssize_t manager_cpr_enable_show(struct omap_overlay_manager *mgr,
char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", mgr->info.cpr_enable);
+ struct omap_overlay_manager_info info;
+
+ mgr->get_manager_info(mgr, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.cpr_enable);
}
static ssize_t manager_cpr_enable_store(struct omap_overlay_manager *mgr,
@@ -469,143 +490,6 @@ static struct kobj_type manager_ktype = {
.default_attrs = manager_sysfs_attrs,
};
-/*
- * We have 4 levels of cache for the dispc settings. First two are in SW and
- * the latter two in HW.
- *
- * +--------------------+
- * |overlay/manager_info|
- * +--------------------+
- * v
- * apply()
- * v
- * +--------------------+
- * | dss_cache |
- * +--------------------+
- * v
- * configure()
- * v
- * +--------------------+
- * | shadow registers |
- * +--------------------+
- * v
- * VFP or lcd/digit_enable
- * v
- * +--------------------+
- * | registers |
- * +--------------------+
- */
-
-struct overlay_cache_data {
- /* If true, cache changed, but not written to shadow registers. Set
- * in apply(), cleared when registers written. */
- bool dirty;
- /* If true, shadow registers contain changed values not yet in real
- * registers. Set when writing to shadow registers, cleared at
- * VSYNC/EVSYNC */
- bool shadow_dirty;
-
- bool enabled;
-
- struct omap_overlay_info info;
-
- enum omap_channel channel;
- bool replication;
- bool ilace;
-
- u32 fifo_low;
- u32 fifo_high;
-};
-
-struct manager_cache_data {
- /* If true, cache changed, but not written to shadow registers. Set
- * in apply(), cleared when registers written. */
- bool dirty;
- /* If true, shadow registers contain changed values not yet in real
- * registers. Set when writing to shadow registers, cleared at
- * VSYNC/EVSYNC */
- bool shadow_dirty;
-
- struct omap_overlay_manager_info info;
-
- bool manual_update;
- bool do_manual_update;
-
- /* manual update region */
- u16 x, y, w, h;
-
- /* enlarge the update area if the update area contains scaled
- * overlays */
- bool enlarge_update_area;
-};
-
-static struct {
- spinlock_t lock;
- struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
- struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
-
- bool irq_enabled;
-} dss_cache;
-
-
-
-static int omap_dss_set_device(struct omap_overlay_manager *mgr,
- struct omap_dss_device *dssdev)
-{
- int i;
- int r;
-
- if (dssdev->manager) {
- DSSERR("display '%s' already has a manager '%s'\n",
- dssdev->name, dssdev->manager->name);
- return -EINVAL;
- }
-
- if ((mgr->supported_displays & dssdev->type) == 0) {
- DSSERR("display '%s' does not support manager '%s'\n",
- dssdev->name, mgr->name);
- return -EINVAL;
- }
-
- for (i = 0; i < mgr->num_overlays; i++) {
- struct omap_overlay *ovl = mgr->overlays[i];
-
- if (ovl->manager != mgr || !ovl->info.enabled)
- continue;
-
- r = dss_check_overlay(ovl, dssdev);
- if (r)
- return r;
- }
-
- dssdev->manager = mgr;
- mgr->device = dssdev;
- mgr->device_changed = true;
-
- return 0;
-}
-
-static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
-{
- if (!mgr->device) {
- DSSERR("failed to unset display, display not set.\n");
- return -EINVAL;
- }
-
- /*
- * Don't allow currently enabled displays to have the overlay manager
- * pulled out from underneath them
- */
- if (mgr->device->state != OMAP_DSS_DISPLAY_DISABLED)
- return -EINVAL;
-
- mgr->device->manager = NULL;
- mgr->device = NULL;
- mgr->device_changed = true;
-
- return 0;
-}
-
static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
{
unsigned long timeout = msecs_to_jiffies(500);
@@ -624,1022 +508,169 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
}
-static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct manager_cache_data *mc;
- u32 irq;
- int r;
- int i;
- struct omap_dss_device *dssdev = mgr->device;
-
- if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
- return 0;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
- || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
- irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
- }
-
- mc = &dss_cache.manager_cache[mgr->id];
- i = 0;
- while (1) {
- unsigned long flags;
- bool shadow_dirty, dirty;
-
- spin_lock_irqsave(&dss_cache.lock, flags);
- dirty = mc->dirty;
- shadow_dirty = mc->shadow_dirty;
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- if (!dirty && !shadow_dirty) {
- r = 0;
- break;
- }
-
- /* 4 iterations is the worst case:
- * 1 - initial iteration, dirty = true (between VFP and VSYNC)
- * 2 - first VSYNC, dirty = true
- * 3 - dirty = false, shadow_dirty = true
- * 4 - shadow_dirty = false */
- if (i++ == 3) {
- DSSERR("mgr(%d)->wait_for_go() not finishing\n",
- mgr->id);
- r = 0;
- break;
- }
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
- if (r == -ERESTARTSYS)
- break;
-
- if (r) {
- DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
- break;
- }
- }
-
- return r;
-}
-
-int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
-{
- unsigned long timeout = msecs_to_jiffies(500);
- struct overlay_cache_data *oc;
- struct omap_dss_device *dssdev;
- u32 irq;
- int r;
- int i;
-
- if (!ovl->manager)
- return 0;
-
- dssdev = ovl->manager->device;
-
- if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return 0;
-
- if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE)
- return 0;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
- || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
- irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
- } else {
- irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
- DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
- }
-
- oc = &dss_cache.overlay_cache[ovl->id];
- i = 0;
- while (1) {
- unsigned long flags;
- bool shadow_dirty, dirty;
-
- spin_lock_irqsave(&dss_cache.lock, flags);
- dirty = oc->dirty;
- shadow_dirty = oc->shadow_dirty;
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- if (!dirty && !shadow_dirty) {
- r = 0;
- break;
- }
-
- /* 4 iterations is the worst case:
- * 1 - initial iteration, dirty = true (between VFP and VSYNC)
- * 2 - first VSYNC, dirty = true
- * 3 - dirty = false, shadow_dirty = true
- * 4 - shadow_dirty = false */
- if (i++ == 3) {
- DSSERR("ovl(%d)->wait_for_go() not finishing\n",
- ovl->id);
- r = 0;
- break;
- }
-
- r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
- if (r == -ERESTARTSYS)
- break;
-
- if (r) {
- DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
- break;
- }
- }
-
- return r;
-}
-
-static int overlay_enabled(struct omap_overlay *ovl)
-{
- return ovl->info.enabled && ovl->manager && ovl->manager->device;
-}
-
-/* Is rect1 a subset of rect2? */
-static bool rectangle_subset(int x1, int y1, int w1, int h1,
- int x2, int y2, int w2, int h2)
-{
- if (x1 < x2 || y1 < y2)
- return false;
-
- if (x1 + w1 > x2 + w2)
- return false;
-
- if (y1 + h1 > y2 + h2)
- return false;
-
- return true;
-}
-
-/* Do rect1 and rect2 overlap? */
-static bool rectangle_intersects(int x1, int y1, int w1, int h1,
- int x2, int y2, int w2, int h2)
-{
- if (x1 >= x2 + w2)
- return false;
-
- if (x2 >= x1 + w1)
- return false;
-
- if (y1 >= y2 + h2)
- return false;
-
- if (y2 >= y1 + h1)
- return false;
-
- return true;
-}
-
-static bool dispc_is_overlay_scaled(struct overlay_cache_data *oc)
-{
- struct omap_overlay_info *oi = &oc->info;
-
- if (oi->out_width != 0 && oi->width != oi->out_width)
- return true;
-
- if (oi->out_height != 0 && oi->height != oi->out_height)
- return true;
-
- return false;
-}
-
-static int configure_overlay(enum omap_plane plane)
+int dss_init_overlay_managers(struct platform_device *pdev)
{
- struct overlay_cache_data *c;
- struct manager_cache_data *mc;
- struct omap_overlay_info *oi, new_oi;
- struct omap_overlay_manager_info *mi;
- u16 outw, outh;
- u16 x, y, w, h;
- u32 paddr;
- int r;
- u16 orig_w, orig_h, orig_outw, orig_outh;
+ int i, r;
- DSSDBGF("%d", plane);
+ num_managers = dss_feat_get_num_mgrs();
- c = &dss_cache.overlay_cache[plane];
- oi = &c->info;
+ managers = kzalloc(sizeof(struct omap_overlay_manager) * num_managers,
+ GFP_KERNEL);
- if (!c->enabled) {
- dispc_ovl_enable(plane, 0);
- return 0;
- }
+ BUG_ON(managers == NULL);
- mc = &dss_cache.manager_cache[c->channel];
- mi = &mc->info;
-
- x = oi->pos_x;
- y = oi->pos_y;
- w = oi->width;
- h = oi->height;
- outw = oi->out_width == 0 ? oi->width : oi->out_width;
- outh = oi->out_height == 0 ? oi->height : oi->out_height;
- paddr = oi->paddr;
-
- orig_w = w;
- orig_h = h;
- orig_outw = outw;
- orig_outh = outh;
-
- if (mc->manual_update && mc->do_manual_update) {
- unsigned bpp;
- unsigned scale_x_m = w, scale_x_d = outw;
- unsigned scale_y_m = h, scale_y_d = outh;
-
- /* If the overlay is outside the update region, disable it */
- if (!rectangle_intersects(mc->x, mc->y, mc->w, mc->h,
- x, y, outw, outh)) {
- dispc_ovl_enable(plane, 0);
- return 0;
- }
+ for (i = 0; i < num_managers; ++i) {
+ struct omap_overlay_manager *mgr = &managers[i];
- switch (oi->color_mode) {
- case OMAP_DSS_COLOR_NV12:
- bpp = 8;
- break;
- case OMAP_DSS_COLOR_RGB16:
- case OMAP_DSS_COLOR_ARGB16:
- case OMAP_DSS_COLOR_YUV2:
- case OMAP_DSS_COLOR_UYVY:
- case OMAP_DSS_COLOR_RGBA16:
- case OMAP_DSS_COLOR_RGBX16:
- case OMAP_DSS_COLOR_ARGB16_1555:
- case OMAP_DSS_COLOR_XRGB16_1555:
- bpp = 16;
+ switch (i) {
+ case 0:
+ mgr->name = "lcd";
+ mgr->id = OMAP_DSS_CHANNEL_LCD;
break;
-
- case OMAP_DSS_COLOR_RGB24P:
- bpp = 24;
+ case 1:
+ mgr->name = "tv";
+ mgr->id = OMAP_DSS_CHANNEL_DIGIT;
break;
-
- case OMAP_DSS_COLOR_RGB24U:
- case OMAP_DSS_COLOR_ARGB32:
- case OMAP_DSS_COLOR_RGBA32:
- case OMAP_DSS_COLOR_RGBX32:
- bpp = 32;
+ case 2:
+ mgr->name = "lcd2";
+ mgr->id = OMAP_DSS_CHANNEL_LCD2;
break;
-
- default:
- BUG();
}
- if (mc->x > oi->pos_x) {
- x = 0;
- outw -= (mc->x - oi->pos_x);
- paddr += (mc->x - oi->pos_x) *
- scale_x_m / scale_x_d * bpp / 8;
- } else {
- x = oi->pos_x - mc->x;
- }
-
- if (mc->y > oi->pos_y) {
- y = 0;
- outh -= (mc->y - oi->pos_y);
- paddr += (mc->y - oi->pos_y) *
- scale_y_m / scale_y_d *
- oi->screen_width * bpp / 8;
- } else {
- y = oi->pos_y - mc->y;
- }
-
- if (mc->w < (x + outw))
- outw -= (x + outw) - (mc->w);
-
- if (mc->h < (y + outh))
- outh -= (y + outh) - (mc->h);
-
- w = w * outw / orig_outw;
- h = h * outh / orig_outh;
-
- /* YUV mode overlay's input width has to be even and the
- * algorithm above may adjust the width to be odd.
- *
- * Here we adjust the width if needed, preferring to increase
- * the width if the original width was bigger.
- */
- if ((w & 1) &&
- (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
- oi->color_mode == OMAP_DSS_COLOR_UYVY)) {
- if (orig_w > w)
- w += 1;
- else
- w -= 1;
- }
- }
-
- new_oi = *oi;
-
- /* update new_oi members which could have been possibly updated */
- new_oi.pos_x = x;
- new_oi.pos_y = y;
- new_oi.width = w;
- new_oi.height = h;
- new_oi.out_width = outw;
- new_oi.out_height = outh;
- new_oi.paddr = paddr;
-
- r = dispc_ovl_setup(plane, &new_oi, c->ilace, c->channel,
- c->replication, c->fifo_low, c->fifo_high);
- if (r) {
- /* this shouldn't happen */
- DSSERR("dispc_ovl_setup failed for ovl %d\n", plane);
- dispc_ovl_enable(plane, 0);
- return r;
- }
-
- dispc_ovl_enable(plane, 1);
-
- return 0;
-}
-
-static void configure_manager(enum omap_channel channel)
-{
- struct omap_overlay_manager_info *mi;
-
- DSSDBGF("%d", channel);
-
- /* picking info from the cache */
- mi = &dss_cache.manager_cache[channel].info;
-
- dispc_mgr_set_default_color(channel, mi->default_color);
- dispc_mgr_set_trans_key(channel, mi->trans_key_type, mi->trans_key);
- dispc_mgr_enable_trans_key(channel, mi->trans_enabled);
- dispc_mgr_enable_alpha_fixed_zorder(channel, mi->partial_alpha_enabled);
- if (dss_has_feature(FEAT_CPR)) {
- dispc_mgr_enable_cpr(channel, mi->cpr_enable);
- dispc_mgr_set_cpr_coef(channel, &mi->cpr_coefs);
- }
-}
-
-/* configure_dispc() tries to write values from cache to shadow registers.
- * It writes only to those managers/overlays that are not busy.
- * returns 0 if everything could be written to shadow registers.
- * returns 1 if not everything could be written to shadow registers. */
-static int configure_dispc(void)
-{
- struct overlay_cache_data *oc;
- struct manager_cache_data *mc;
- const int num_ovls = dss_feat_get_num_ovls();
- const int num_mgrs = dss_feat_get_num_mgrs();
- int i;
- int r;
- bool mgr_busy[MAX_DSS_MANAGERS];
- bool mgr_go[MAX_DSS_MANAGERS];
- bool busy;
-
- r = 0;
- busy = false;
-
- for (i = 0; i < num_mgrs; i++) {
- mgr_busy[i] = dispc_mgr_go_busy(i);
- mgr_go[i] = false;
- }
-
- /* Commit overlay settings */
- for (i = 0; i < num_ovls; ++i) {
- oc = &dss_cache.overlay_cache[i];
- mc = &dss_cache.manager_cache[oc->channel];
+ mgr->set_device = &dss_mgr_set_device;
+ mgr->unset_device = &dss_mgr_unset_device;
+ mgr->apply = &omap_dss_mgr_apply;
+ mgr->set_manager_info = &dss_mgr_set_info;
+ mgr->get_manager_info = &dss_mgr_get_info;
+ mgr->wait_for_go = &dss_mgr_wait_for_go;
+ mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
- if (!oc->dirty)
- continue;
+ mgr->caps = 0;
+ mgr->supported_displays =
+ dss_feat_get_supported_displays(mgr->id);
- if (mc->manual_update && !mc->do_manual_update)
- continue;
+ INIT_LIST_HEAD(&mgr->overlays);
- if (mgr_busy[oc->channel]) {
- busy = true;
- continue;
- }
+ r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
+ &pdev->dev.kobj, "manager%d", i);
- r = configure_overlay(i);
if (r)
- DSSERR("configure_overlay %d failed\n", i);
-
- oc->dirty = false;
- oc->shadow_dirty = true;
- mgr_go[oc->channel] = true;
- }
-
- /* Commit manager settings */
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
-
- if (!mc->dirty)
- continue;
-
- if (mc->manual_update && !mc->do_manual_update)
- continue;
-
- if (mgr_busy[i]) {
- busy = true;
- continue;
- }
-
- configure_manager(i);
- mc->dirty = false;
- mc->shadow_dirty = true;
- mgr_go[i] = true;
- }
-
- /* set GO */
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
-
- if (!mgr_go[i])
- continue;
-
- /* We don't need GO with manual update display. LCD iface will
- * always be turned off after frame, and new settings will be
- * taken in to use at next update */
- if (!mc->manual_update)
- dispc_mgr_go(i);
- }
-
- if (busy)
- r = 1;
- else
- r = 0;
-
- return r;
-}
-
-/* Make the coordinates even. There are some strange problems with OMAP and
- * partial DSI update when the update widths are odd. */
-static void make_even(u16 *x, u16 *w)
-{
- u16 x1, x2;
-
- x1 = *x;
- x2 = *x + *w;
-
- x1 &= ~1;
- x2 = ALIGN(x2, 2);
-
- *x = x1;
- *w = x2 - x1;
-}
-
-/* Configure dispc for partial update. Return possibly modified update
- * area */
-void dss_setup_partial_planes(struct omap_dss_device *dssdev,
- u16 *xi, u16 *yi, u16 *wi, u16 *hi, bool enlarge_update_area)
-{
- struct overlay_cache_data *oc;
- struct manager_cache_data *mc;
- struct omap_overlay_info *oi;
- const int num_ovls = dss_feat_get_num_ovls();
- struct omap_overlay_manager *mgr;
- int i;
- u16 x, y, w, h;
- unsigned long flags;
- bool area_changed;
-
- x = *xi;
- y = *yi;
- w = *wi;
- h = *hi;
-
- DSSDBG("dispc_setup_partial_planes %d,%d %dx%d\n",
- *xi, *yi, *wi, *hi);
-
- mgr = dssdev->manager;
-
- if (!mgr) {
- DSSDBG("no manager\n");
- return;
+ DSSERR("failed to create sysfs file\n");
}
- make_even(&x, &w);
-
- spin_lock_irqsave(&dss_cache.lock, flags);
-
- /*
- * Execute the outer loop until the inner loop has completed
- * once without increasing the update area. This will ensure that
- * all scaled overlays end up completely within the update area.
- */
- do {
- area_changed = false;
-
- /* We need to show the whole overlay if it is scaled. So look
- * for those, and make the update area larger if found.
- * Also mark the overlay cache dirty */
- for (i = 0; i < num_ovls; ++i) {
- unsigned x1, y1, x2, y2;
- unsigned outw, outh;
-
- oc = &dss_cache.overlay_cache[i];
- oi = &oc->info;
-
- if (oc->channel != mgr->id)
- continue;
-
- oc->dirty = true;
-
- if (!enlarge_update_area)
- continue;
-
- if (!oc->enabled)
- continue;
-
- if (!dispc_is_overlay_scaled(oc))
- continue;
-
- outw = oi->out_width == 0 ?
- oi->width : oi->out_width;
- outh = oi->out_height == 0 ?
- oi->height : oi->out_height;
-
- /* is the overlay outside the update region? */
- if (!rectangle_intersects(x, y, w, h,
- oi->pos_x, oi->pos_y,
- outw, outh))
- continue;
-
- /* if the overlay totally inside the update region? */
- if (rectangle_subset(oi->pos_x, oi->pos_y, outw, outh,
- x, y, w, h))
- continue;
-
- if (x > oi->pos_x)
- x1 = oi->pos_x;
- else
- x1 = x;
-
- if (y > oi->pos_y)
- y1 = oi->pos_y;
- else
- y1 = y;
-
- if ((x + w) < (oi->pos_x + outw))
- x2 = oi->pos_x + outw;
- else
- x2 = x + w;
-
- if ((y + h) < (oi->pos_y + outh))
- y2 = oi->pos_y + outh;
- else
- y2 = y + h;
-
- x = x1;
- y = y1;
- w = x2 - x1;
- h = y2 - y1;
-
- make_even(&x, &w);
-
- DSSDBG("changing upd area due to ovl(%d) "
- "scaling %d,%d %dx%d\n",
- i, x, y, w, h);
-
- area_changed = true;
- }
- } while (area_changed);
-
- mc = &dss_cache.manager_cache[mgr->id];
- mc->do_manual_update = true;
- mc->enlarge_update_area = enlarge_update_area;
- mc->x = x;
- mc->y = y;
- mc->w = w;
- mc->h = h;
-
- configure_dispc();
-
- mc->do_manual_update = false;
-
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- *xi = x;
- *yi = y;
- *wi = w;
- *hi = h;
+ return 0;
}
-void dss_start_update(struct omap_dss_device *dssdev)
+void dss_uninit_overlay_managers(struct platform_device *pdev)
{
- struct manager_cache_data *mc;
- struct overlay_cache_data *oc;
- const int num_ovls = dss_feat_get_num_ovls();
- const int num_mgrs = dss_feat_get_num_mgrs();
- struct omap_overlay_manager *mgr;
int i;
- mgr = dssdev->manager;
+ for (i = 0; i < num_managers; ++i) {
+ struct omap_overlay_manager *mgr = &managers[i];
- for (i = 0; i < num_ovls; ++i) {
- oc = &dss_cache.overlay_cache[i];
- if (oc->channel != mgr->id)
- continue;
-
- oc->shadow_dirty = false;
- }
-
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
- if (mgr->id != i)
- continue;
-
- mc->shadow_dirty = false;
+ kobject_del(&mgr->kobj);
+ kobject_put(&mgr->kobj);
}
- dssdev->manager->enable(dssdev->manager);
+ kfree(managers);
+ managers = NULL;
+ num_managers = 0;
}
-static void dss_apply_irq_handler(void *data, u32 mask)
+int omap_dss_get_num_overlay_managers(void)
{
- struct manager_cache_data *mc;
- struct overlay_cache_data *oc;
- const int num_ovls = dss_feat_get_num_ovls();
- const int num_mgrs = dss_feat_get_num_mgrs();
- int i, r;
- bool mgr_busy[MAX_DSS_MANAGERS];
- u32 irq_mask;
-
- for (i = 0; i < num_mgrs; i++)
- mgr_busy[i] = dispc_mgr_go_busy(i);
-
- spin_lock(&dss_cache.lock);
-
- for (i = 0; i < num_ovls; ++i) {
- oc = &dss_cache.overlay_cache[i];
- if (!mgr_busy[oc->channel])
- oc->shadow_dirty = false;
- }
-
- for (i = 0; i < num_mgrs; ++i) {
- mc = &dss_cache.manager_cache[i];
- if (!mgr_busy[i])
- mc->shadow_dirty = false;
- }
-
- r = configure_dispc();
- if (r == 1)
- goto end;
-
- /* re-read busy flags */
- for (i = 0; i < num_mgrs; i++)
- mgr_busy[i] = dispc_mgr_go_busy(i);
-
- /* keep running as long as there are busy managers, so that
- * we can collect overlay-applied information */
- for (i = 0; i < num_mgrs; ++i) {
- if (mgr_busy[i])
- goto end;
- }
-
- irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN;
- if (dss_has_feature(FEAT_MGR_LCD2))
- irq_mask |= DISPC_IRQ_VSYNC2;
-
- omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
- dss_cache.irq_enabled = false;
-
-end:
- spin_unlock(&dss_cache.lock);
+ return num_managers;
}
+EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
-static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
+struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
{
- struct overlay_cache_data *oc;
- struct manager_cache_data *mc;
- int i;
- struct omap_overlay *ovl;
- int num_planes_enabled = 0;
- bool use_fifomerge;
- unsigned long flags;
- int r;
-
- DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
-
- r = dispc_runtime_get();
- if (r)
- return r;
-
- spin_lock_irqsave(&dss_cache.lock, flags);
-
- /* Configure overlays */
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_dss_device *dssdev;
-
- ovl = omap_dss_get_overlay(i);
-
- oc = &dss_cache.overlay_cache[ovl->id];
-
- if (ovl->manager_changed) {
- ovl->manager_changed = false;
- ovl->info_dirty = true;
- }
-
- if (!overlay_enabled(ovl)) {
- if (oc->enabled) {
- oc->enabled = false;
- oc->dirty = true;
- }
- continue;
- }
-
- if (!ovl->info_dirty) {
- if (oc->enabled)
- ++num_planes_enabled;
- continue;
- }
-
- dssdev = ovl->manager->device;
-
- if (dss_check_overlay(ovl, dssdev)) {
- if (oc->enabled) {
- oc->enabled = false;
- oc->dirty = true;
- }
- continue;
- }
-
- ovl->info_dirty = false;
- oc->dirty = true;
- oc->info = ovl->info;
-
- oc->replication =
- dss_use_replication(dssdev, ovl->info.color_mode);
-
- oc->ilace = dssdev->type == OMAP_DISPLAY_TYPE_VENC;
-
- oc->channel = ovl->manager->id;
-
- oc->enabled = true;
-
- ++num_planes_enabled;
- }
-
- /* Configure managers */
- list_for_each_entry(mgr, &manager_list, list) {
- struct omap_dss_device *dssdev;
+ if (num >= num_managers)
+ return NULL;
- mc = &dss_cache.manager_cache[mgr->id];
-
- if (mgr->device_changed) {
- mgr->device_changed = false;
- mgr->info_dirty = true;
- }
-
- if (!mgr->info_dirty)
- continue;
-
- if (!mgr->device)
- continue;
-
- dssdev = mgr->device;
-
- mgr->info_dirty = false;
- mc->dirty = true;
- mc->info = mgr->info;
-
- mc->manual_update =
- dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
- }
-
- /* XXX TODO: Try to get fifomerge working. The problem is that it
- * affects both managers, not individually but at the same time. This
- * means the change has to be well synchronized. I guess the proper way
- * is to have a two step process for fifo merge:
- * fifomerge enable:
- * 1. disable other planes, leaving one plane enabled
- * 2. wait until the planes are disabled on HW
- * 3. config merged fifo thresholds, enable fifomerge
- * fifomerge disable:
- * 1. config unmerged fifo thresholds, disable fifomerge
- * 2. wait until fifo changes are in HW
- * 3. enable planes
- */
- use_fifomerge = false;
-
- /* Configure overlay fifos */
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_dss_device *dssdev;
- u32 size, burst_size;
-
- ovl = omap_dss_get_overlay(i);
-
- oc = &dss_cache.overlay_cache[ovl->id];
-
- if (!oc->enabled)
- continue;
-
- dssdev = ovl->manager->device;
-
- size = dispc_ovl_get_fifo_size(ovl->id);
- if (use_fifomerge)
- size *= 3;
-
- burst_size = dispc_ovl_get_burst_size(ovl->id);
-
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- case OMAP_DISPLAY_TYPE_SDI:
- case OMAP_DISPLAY_TYPE_VENC:
- case OMAP_DISPLAY_TYPE_HDMI:
- default_get_overlay_fifo_thresholds(ovl->id, size,
- burst_size, &oc->fifo_low,
- &oc->fifo_high);
- break;
-#ifdef CONFIG_OMAP2_DSS_DSI
- case OMAP_DISPLAY_TYPE_DSI:
- dsi_get_overlay_fifo_thresholds(ovl->id, size,
- burst_size, &oc->fifo_low,
- &oc->fifo_high);
- break;
-#endif
- default:
- BUG();
- }
- }
-
- r = 0;
- if (!dss_cache.irq_enabled) {
- u32 mask;
-
- mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
- DISPC_IRQ_EVSYNC_EVEN;
- if (dss_has_feature(FEAT_MGR_LCD2))
- mask |= DISPC_IRQ_VSYNC2;
-
- r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
- dss_cache.irq_enabled = true;
- }
- configure_dispc();
-
- spin_unlock_irqrestore(&dss_cache.lock, flags);
-
- dispc_runtime_put();
-
- return r;
+ return &managers[num];
}
+EXPORT_SYMBOL(omap_dss_get_overlay_manager);
-static int dss_check_manager(struct omap_overlay_manager *mgr)
+int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
+ const struct omap_overlay_manager_info *info)
{
if (dss_has_feature(FEAT_ALPHA_FIXED_ZORDER)) {
/*
* OMAP3 supports only graphics source transparency color key
* and alpha blending simultaneously. See TRM 15.4.2.4.2.2
- * Alpha Mode
+ * Alpha Mode.
*/
- if (mgr->info.partial_alpha_enabled && mgr->info.trans_enabled
- && mgr->info.trans_key_type !=
- OMAP_DSS_COLOR_KEY_GFX_DST)
+ if (info->partial_alpha_enabled && info->trans_enabled
+ && info->trans_key_type != OMAP_DSS_COLOR_KEY_GFX_DST) {
+ DSSERR("check_manager: illegal transparency key\n");
return -EINVAL;
+ }
}
return 0;
}
-static int omap_dss_mgr_set_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info)
-{
- int r;
- struct omap_overlay_manager_info old_info;
-
- old_info = mgr->info;
- mgr->info = *info;
-
- r = dss_check_manager(mgr);
- if (r) {
- mgr->info = old_info;
- return r;
- }
-
- mgr->info_dirty = true;
-
- return 0;
-}
-
-static void omap_dss_mgr_get_info(struct omap_overlay_manager *mgr,
- struct omap_overlay_manager_info *info)
+static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
+ struct omap_overlay_info **overlay_infos)
{
- *info = mgr->info;
-}
+ struct omap_overlay *ovl1, *ovl2;
+ struct omap_overlay_info *info1, *info2;
-static int dss_mgr_enable(struct omap_overlay_manager *mgr)
-{
- dispc_mgr_enable(mgr->id, 1);
- return 0;
-}
+ list_for_each_entry(ovl1, &mgr->overlays, list) {
+ info1 = overlay_infos[ovl1->id];
-static int dss_mgr_disable(struct omap_overlay_manager *mgr)
-{
- dispc_mgr_enable(mgr->id, 0);
- return 0;
-}
-
-static void omap_dss_add_overlay_manager(struct omap_overlay_manager *manager)
-{
- ++num_managers;
- list_add_tail(&manager->list, &manager_list);
-}
-
-int dss_init_overlay_managers(struct platform_device *pdev)
-{
- int i, r;
-
- spin_lock_init(&dss_cache.lock);
-
- INIT_LIST_HEAD(&manager_list);
-
- num_managers = 0;
-
- for (i = 0; i < dss_feat_get_num_mgrs(); ++i) {
- struct omap_overlay_manager *mgr;
- mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
-
- BUG_ON(mgr == NULL);
-
- switch (i) {
- case 0:
- mgr->name = "lcd";
- mgr->id = OMAP_DSS_CHANNEL_LCD;
- break;
- case 1:
- mgr->name = "tv";
- mgr->id = OMAP_DSS_CHANNEL_DIGIT;
- break;
- case 2:
- mgr->name = "lcd2";
- mgr->id = OMAP_DSS_CHANNEL_LCD2;
- break;
- }
-
- mgr->set_device = &omap_dss_set_device;
- mgr->unset_device = &omap_dss_unset_device;
- mgr->apply = &omap_dss_mgr_apply;
- mgr->set_manager_info = &omap_dss_mgr_set_info;
- mgr->get_manager_info = &omap_dss_mgr_get_info;
- mgr->wait_for_go = &dss_mgr_wait_for_go;
- mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
-
- mgr->enable = &dss_mgr_enable;
- mgr->disable = &dss_mgr_disable;
-
- mgr->caps = 0;
- mgr->supported_displays =
- dss_feat_get_supported_displays(mgr->id);
+ if (info1 == NULL)
+ continue;
- dss_overlay_setup_dispc_manager(mgr);
+ list_for_each_entry(ovl2, &mgr->overlays, list) {
+ if (ovl1 == ovl2)
+ continue;
- omap_dss_add_overlay_manager(mgr);
+ info2 = overlay_infos[ovl2->id];
- r = kobject_init_and_add(&mgr->kobj, &manager_ktype,
- &pdev->dev.kobj, "manager%d", i);
+ if (info2 == NULL)
+ continue;
- if (r) {
- DSSERR("failed to create sysfs file\n");
- continue;
+ if (info1->zorder == info2->zorder) {
+ DSSERR("overlays %d and %d have the same "
+ "zorder %d\n",
+ ovl1->id, ovl2->id, info1->zorder);
+ return -EINVAL;
+ }
}
}
return 0;
}
-void dss_uninit_overlay_managers(struct platform_device *pdev)
+int dss_mgr_check(struct omap_overlay_manager *mgr,
+ struct omap_dss_device *dssdev,
+ struct omap_overlay_manager_info *info,
+ struct omap_overlay_info **overlay_infos)
{
- struct omap_overlay_manager *mgr;
+ struct omap_overlay *ovl;
+ int r;
- while (!list_empty(&manager_list)) {
- mgr = list_first_entry(&manager_list,
- struct omap_overlay_manager, list);
- list_del(&mgr->list);
- kobject_del(&mgr->kobj);
- kobject_put(&mgr->kobj);
- kfree(mgr);
+ if (dss_has_feature(FEAT_ALPHA_FREE_ZORDER)) {
+ r = dss_mgr_check_zorder(mgr, overlay_infos);
+ if (r)
+ return r;
}
- num_managers = 0;
-}
+ list_for_each_entry(ovl, &mgr->overlays, list) {
+ struct omap_overlay_info *oi;
+ int r;
-int omap_dss_get_num_overlay_managers(void)
-{
- return num_managers;
-}
-EXPORT_SYMBOL(omap_dss_get_num_overlay_managers);
+ oi = overlay_infos[ovl->id];
-struct omap_overlay_manager *omap_dss_get_overlay_manager(int num)
-{
- int i = 0;
- struct omap_overlay_manager *mgr;
+ if (oi == NULL)
+ continue;
- list_for_each_entry(mgr, &manager_list, list) {
- if (i++ == num)
- return mgr;
+ r = dss_ovl_check(ovl, oi, dssdev);
+ if (r)
+ return r;
}
- return NULL;
+ return 0;
}
-EXPORT_SYMBOL(omap_dss_get_overlay_manager);
-
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index ab8e40e..6e82181 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -38,7 +38,7 @@
#include "dss_features.h"
static int num_overlays;
-static struct list_head overlay_list;
+static struct omap_overlay *overlays;
static ssize_t overlay_name_show(struct omap_overlay *ovl, char *buf)
{
@@ -124,19 +124,31 @@ err:
static ssize_t overlay_input_size_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- ovl->info.width, ovl->info.height);
+ info.width, info.height);
}
static ssize_t overlay_screen_width_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.screen_width);
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.screen_width);
}
static ssize_t overlay_position_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- ovl->info.pos_x, ovl->info.pos_y);
+ info.pos_x, info.pos_y);
}
static ssize_t overlay_position_store(struct omap_overlay *ovl,
@@ -170,8 +182,12 @@ static ssize_t overlay_position_store(struct omap_overlay *ovl,
static ssize_t overlay_output_size_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d,%d\n",
- ovl->info.out_width, ovl->info.out_height);
+ info.out_width, info.out_height);
}
static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
@@ -205,7 +221,7 @@ static ssize_t overlay_output_size_store(struct omap_overlay *ovl,
static ssize_t overlay_enabled_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.enabled);
+ return snprintf(buf, PAGE_SIZE, "%d\n", ovl->is_enabled(ovl));
}
static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
@@ -213,33 +229,30 @@ static ssize_t overlay_enabled_store(struct omap_overlay *ovl, const char *buf,
{
int r;
bool enable;
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
r = strtobool(buf, &enable);
if (r)
return r;
- info.enabled = enable;
+ if (enable)
+ r = ovl->enable(ovl);
+ else
+ r = ovl->disable(ovl);
- r = ovl->set_overlay_info(ovl, &info);
if (r)
return r;
- if (ovl->manager) {
- r = ovl->manager->apply(ovl->manager);
- if (r)
- return r;
- }
-
return size;
}
static ssize_t overlay_global_alpha_show(struct omap_overlay *ovl, char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d\n",
- ovl->info.global_alpha);
+ info.global_alpha);
}
static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
@@ -276,8 +289,12 @@ static ssize_t overlay_global_alpha_store(struct omap_overlay *ovl,
static ssize_t overlay_pre_mult_alpha_show(struct omap_overlay *ovl,
char *buf)
{
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
return snprintf(buf, PAGE_SIZE, "%d\n",
- ovl->info.pre_mult_alpha);
+ info.pre_mult_alpha);
}
static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
@@ -313,7 +330,11 @@ static ssize_t overlay_pre_mult_alpha_store(struct omap_overlay *ovl,
static ssize_t overlay_zorder_show(struct omap_overlay *ovl, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", ovl->info.zorder);
+ struct omap_overlay_info info;
+
+ ovl->get_overlay_info(ovl, &info);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", info.zorder);
}
static ssize_t overlay_zorder_store(struct omap_overlay *ovl,
@@ -430,183 +451,6 @@ static struct kobj_type overlay_ktype = {
.default_attrs = overlay_sysfs_attrs,
};
-/* Check if overlay parameters are compatible with display */
-int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
-{
- struct omap_overlay_info *info;
- u16 outw, outh;
- u16 dw, dh;
- int i;
-
- if (!dssdev)
- return 0;
-
- if (!ovl->info.enabled)
- return 0;
-
- info = &ovl->info;
-
- if (info->paddr == 0) {
- DSSDBG("check_overlay failed: paddr 0\n");
- return -EINVAL;
- }
-
- dssdev->driver->get_resolution(dssdev, &dw, &dh);
-
- DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n",
- ovl->id,
- info->pos_x, info->pos_y,
- info->width, info->height,
- info->out_width, info->out_height,
- dw, dh);
-
- if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
- outw = info->width;
- outh = info->height;
- } else {
- if (info->out_width == 0)
- outw = info->width;
- else
- outw = info->out_width;
-
- if (info->out_height == 0)
- outh = info->height;
- else
- outh = info->out_height;
- }
-
- if (dw < info->pos_x + outw) {
- DSSDBG("check_overlay failed 1: %d < %d + %d\n",
- dw, info->pos_x, outw);
- return -EINVAL;
- }
-
- if (dh < info->pos_y + outh) {
- DSSDBG("check_overlay failed 2: %d < %d + %d\n",
- dh, info->pos_y, outh);
- return -EINVAL;
- }
-
- if ((ovl->supported_modes & info->color_mode) == 0) {
- DSSERR("overlay doesn't support mode %d\n", info->color_mode);
- return -EINVAL;
- }
-
- if (ovl->caps & OMAP_DSS_OVL_CAP_ZORDER) {
- if (info->zorder < 0 || info->zorder > 3) {
- DSSERR("zorder out of range: %d\n",
- info->zorder);
- return -EINVAL;
- }
- /*
- * Check that zorder doesn't match with zorder of any other
- * overlay which is enabled and is also connected to the same
- * manager
- */
- for (i = 0; i < omap_dss_get_num_overlays(); i++) {
- struct omap_overlay *tmp_ovl = omap_dss_get_overlay(i);
-
- if (tmp_ovl->id != ovl->id &&
- tmp_ovl->manager == ovl->manager &&
- tmp_ovl->info.enabled == true &&
- tmp_ovl->info.zorder == info->zorder) {
- DSSERR("%s and %s have same zorder: %d\n",
- ovl->name, tmp_ovl->name, info->zorder);
- return -EINVAL;
- }
- }
- }
-
- return 0;
-}
-
-static int dss_ovl_set_overlay_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info)
-{
- int r;
- struct omap_overlay_info old_info;
-
- old_info = ovl->info;
- ovl->info = *info;
-
- if (ovl->manager) {
- r = dss_check_overlay(ovl, ovl->manager->device);
- if (r) {
- ovl->info = old_info;
- return r;
- }
- }
-
- ovl->info_dirty = true;
-
- return 0;
-}
-
-static void dss_ovl_get_overlay_info(struct omap_overlay *ovl,
- struct omap_overlay_info *info)
-{
- *info = ovl->info;
-}
-
-static int dss_ovl_wait_for_go(struct omap_overlay *ovl)
-{
- return dss_mgr_wait_for_go_ovl(ovl);
-}
-
-static int omap_dss_set_manager(struct omap_overlay *ovl,
- struct omap_overlay_manager *mgr)
-{
- if (!mgr)
- return -EINVAL;
-
- if (ovl->manager) {
- DSSERR("overlay '%s' already has a manager '%s'\n",
- ovl->name, ovl->manager->name);
- return -EINVAL;
- }
-
- if (ovl->info.enabled) {
- DSSERR("overlay has to be disabled to change the manager\n");
- return -EINVAL;
- }
-
- ovl->manager = mgr;
- ovl->manager_changed = true;
-
- /* XXX: When there is an overlay on a DSI manual update display, and
- * the overlay is first disabled, then moved to tv, and enabled, we
- * seem to get SYNC_LOST_DIGIT error.
- *
- * Waiting doesn't seem to help, but updating the manual update display
- * after disabling the overlay seems to fix this. This hints that the
- * overlay is perhaps somehow tied to the LCD output until the output
- * is updated.
- *
- * Userspace workaround for this is to update the LCD after disabling
- * the overlay, but before moving the overlay to TV.
- */
-
- return 0;
-}
-
-static int omap_dss_unset_manager(struct omap_overlay *ovl)
-{
- if (!ovl->manager) {
- DSSERR("failed to detach overlay: manager not set\n");
- return -EINVAL;
- }
-
- if (ovl->info.enabled) {
- DSSERR("overlay has to be disabled to unset the manager\n");
- return -EINVAL;
- }
-
- ovl->manager = NULL;
- ovl->manager_changed = true;
-
- return 0;
-}
-
int omap_dss_get_num_overlays(void)
{
return num_overlays;
@@ -615,134 +459,65 @@ EXPORT_SYMBOL(omap_dss_get_num_overlays);
struct omap_overlay *omap_dss_get_overlay(int num)
{
- int i = 0;
- struct omap_overlay *ovl;
+ if (num >= num_overlays)
+ return NULL;
- list_for_each_entry(ovl, &overlay_list, list) {
- if (i++ == num)
- return ovl;
- }
-
- return NULL;
+ return &overlays[num];
}
EXPORT_SYMBOL(omap_dss_get_overlay);
-static void omap_dss_add_overlay(struct omap_overlay *overlay)
-{
- ++num_overlays;
- list_add_tail(&overlay->list, &overlay_list);
-}
-
-static struct omap_overlay *dispc_overlays[MAX_DSS_OVERLAYS];
-
-void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr)
-{
- mgr->num_overlays = dss_feat_get_num_ovls();
- mgr->overlays = dispc_overlays;
-}
-
-#ifdef L4_EXAMPLE
-static struct omap_overlay *l4_overlays[1];
-void dss_overlay_setup_l4_manager(struct omap_overlay_manager *mgr)
-{
- mgr->num_overlays = 1;
- mgr->overlays = l4_overlays;
-}
-#endif
-
void dss_init_overlays(struct platform_device *pdev)
{
int i, r;
- INIT_LIST_HEAD(&overlay_list);
+ num_overlays = dss_feat_get_num_ovls();
- num_overlays = 0;
+ overlays = kzalloc(sizeof(struct omap_overlay) * num_overlays,
+ GFP_KERNEL);
- for (i = 0; i < dss_feat_get_num_ovls(); ++i) {
- struct omap_overlay *ovl;
- ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
+ BUG_ON(overlays == NULL);
- BUG_ON(ovl == NULL);
+ for (i = 0; i < num_overlays; ++i) {
+ struct omap_overlay *ovl = &overlays[i];
switch (i) {
case 0:
ovl->name = "gfx";
ovl->id = OMAP_DSS_GFX;
- ovl->info.global_alpha = 255;
- ovl->info.zorder = 0;
break;
case 1:
ovl->name = "vid1";
ovl->id = OMAP_DSS_VIDEO1;
- ovl->info.global_alpha = 255;
- ovl->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 3 : 0;
break;
case 2:
ovl->name = "vid2";
ovl->id = OMAP_DSS_VIDEO2;
- ovl->info.global_alpha = 255;
- ovl->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 2 : 0;
break;
case 3:
ovl->name = "vid3";
ovl->id = OMAP_DSS_VIDEO3;
- ovl->info.global_alpha = 255;
- ovl->info.zorder =
- dss_has_feature(FEAT_ALPHA_FREE_ZORDER) ? 1 : 0;
break;
}
- ovl->set_manager = &omap_dss_set_manager;
- ovl->unset_manager = &omap_dss_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_overlay_info;
- ovl->get_overlay_info = &dss_ovl_get_overlay_info;
- ovl->wait_for_go = &dss_ovl_wait_for_go;
+ ovl->is_enabled = &dss_ovl_is_enabled;
+ ovl->enable = &dss_ovl_enable;
+ ovl->disable = &dss_ovl_disable;
+ ovl->set_manager = &dss_ovl_set_manager;
+ ovl->unset_manager = &dss_ovl_unset_manager;
+ ovl->set_overlay_info = &dss_ovl_set_info;
+ ovl->get_overlay_info = &dss_ovl_get_info;
+ ovl->wait_for_go = &dss_mgr_wait_for_go_ovl;
ovl->caps = dss_feat_get_overlay_caps(ovl->id);
ovl->supported_modes =
dss_feat_get_supported_color_modes(ovl->id);
- omap_dss_add_overlay(ovl);
-
r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
&pdev->dev.kobj, "overlay%d", i);
- if (r) {
- DSSERR("failed to create sysfs file\n");
- continue;
- }
-
- dispc_overlays[i] = ovl;
- }
-
-#ifdef L4_EXAMPLE
- {
- struct omap_overlay *ovl;
- ovl = kzalloc(sizeof(*ovl), GFP_KERNEL);
-
- BUG_ON(ovl == NULL);
-
- ovl->name = "l4";
- ovl->supported_modes = OMAP_DSS_COLOR_RGB24U;
-
- ovl->set_manager = &omap_dss_set_manager;
- ovl->unset_manager = &omap_dss_unset_manager;
- ovl->set_overlay_info = &dss_ovl_set_overlay_info;
- ovl->get_overlay_info = &dss_ovl_get_overlay_info;
-
- omap_dss_add_overlay(ovl);
-
- r = kobject_init_and_add(&ovl->kobj, &overlay_ktype,
- &pdev->dev.kobj, "overlayl4");
-
if (r)
DSSERR("failed to create sysfs file\n");
-
- l4_overlays[0] = ovl;
}
-#endif
}
/* connect overlays to the new device, if not already connected. if force
@@ -795,8 +570,8 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
ovl = omap_dss_get_overlay(i);
if (!ovl->manager || force) {
if (ovl->manager)
- omap_dss_unset_manager(ovl);
- omap_dss_set_manager(ovl, mgr);
+ ovl->unset_manager(ovl);
+ ovl->set_manager(ovl, mgr);
}
}
@@ -806,17 +581,95 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force)
void dss_uninit_overlays(struct platform_device *pdev)
{
- struct omap_overlay *ovl;
+ int i;
+
+ for (i = 0; i < num_overlays; ++i) {
+ struct omap_overlay *ovl = &overlays[i];
- while (!list_empty(&overlay_list)) {
- ovl = list_first_entry(&overlay_list,
- struct omap_overlay, list);
- list_del(&ovl->list);
kobject_del(&ovl->kobj);
kobject_put(&ovl->kobj);
- kfree(ovl);
}
+ kfree(overlays);
+ overlays = NULL;
num_overlays = 0;
}
+int dss_ovl_simple_check(struct omap_overlay *ovl,
+ const struct omap_overlay_info *info)
+{
+ if (info->paddr == 0) {
+ DSSERR("check_overlay: paddr cannot be 0\n");
+ return -EINVAL;
+ }
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ if (info->out_width != 0 && info->width != info->out_width) {
+ DSSERR("check_overlay: overlay %d doesn't support "
+ "scaling\n", ovl->id);
+ return -EINVAL;
+ }
+
+ if (info->out_height != 0 && info->height != info->out_height) {
+ DSSERR("check_overlay: overlay %d doesn't support "
+ "scaling\n", ovl->id);
+ return -EINVAL;
+ }
+ }
+
+ if ((ovl->supported_modes & info->color_mode) == 0) {
+ DSSERR("check_overlay: overlay %d doesn't support mode %d\n",
+ ovl->id, info->color_mode);
+ return -EINVAL;
+ }
+
+ if (info->zorder >= omap_dss_get_num_overlays()) {
+ DSSERR("check_overlay: zorder %d too high\n", info->zorder);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int dss_ovl_check(struct omap_overlay *ovl,
+ struct omap_overlay_info *info, struct omap_dss_device *dssdev)
+{
+ u16 outw, outh;
+ u16 dw, dh;
+
+ if (dssdev == NULL)
+ return 0;
+
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
+
+ if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
+ outw = info->width;
+ outh = info->height;
+ } else {
+ if (info->out_width == 0)
+ outw = info->width;
+ else
+ outw = info->out_width;
+
+ if (info->out_height == 0)
+ outh = info->height;
+ else
+ outh = info->out_height;
+ }
+
+ if (dw < info->pos_x + outw) {
+ DSSERR("overlay %d horizontally not inside the display area "
+ "(%d + %d >= %d)\n",
+ ovl->id, info->pos_x, outw, dw);
+ return -EINVAL;
+ }
+
+ if (dh < info->pos_y + outh) {
+ DSSERR("overlay %d vertically not inside the display area "
+ "(%d + %d >= %d)\n",
+ ovl->id, info->pos_y, outh, dh);
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 1130c60..55f3980 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -140,7 +140,7 @@ static void rfbi_runtime_put(void)
DSSDBG("rfbi_runtime_put\n");
- r = pm_runtime_put(&rfbi.pdev->dev);
+ r = pm_runtime_put_sync(&rfbi.pdev->dev);
WARN_ON(r < 0);
}
@@ -784,7 +784,6 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
if (*w == 0 || *h == 0)
return -EINVAL;
- dss_setup_partial_planes(dssdev, x, y, w, h, true);
dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h);
return 0;
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 40305ad..8266ca0 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -123,10 +123,14 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
goto err_sdi_enable;
mdelay(2);
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err_mgr_enable;
return 0;
+err_mgr_enable:
+ dss_sdi_disable();
err_sdi_enable:
err_set_dispc_clock_div:
err_set_dss_clock_div:
@@ -145,7 +149,7 @@ EXPORT_SYMBOL(omapdss_sdi_display_enable);
void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
{
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
dss_sdi_disable();
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 2c3443d..50dadba 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -110,6 +110,11 @@ struct ti_hdmi_ip_ops {
void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+ void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start);
+#endif
+
};
struct hdmi_ip_data {
@@ -121,6 +126,10 @@ struct hdmi_ip_data {
const struct ti_hdmi_ip_ops *ops;
struct hdmi_config cfg;
struct hdmi_pll_info pll_data;
+
+ /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
+ int hpd_gpio;
+ bool phy_tx_enabled;
};
int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
@@ -134,5 +143,8 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
-
+#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
+ defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
+void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable);
+#endif
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index e1a6ce5..6847a47 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -28,6 +28,7 @@
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/seq_file.h>
+#include <linux/gpio.h>
#include "ti_hdmi_4xxx_ip.h"
#include "dss.h"
@@ -223,6 +224,49 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
}
+static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data)
+{
+ unsigned long flags;
+ bool hpd;
+ int r;
+ /* this should be in ti_hdmi_4xxx_ip private data */
+ static DEFINE_SPINLOCK(phy_tx_lock);
+
+ spin_lock_irqsave(&phy_tx_lock, flags);
+
+ hpd = gpio_get_value(ip_data->hpd_gpio);
+
+ if (hpd == ip_data->phy_tx_enabled) {
+ spin_unlock_irqrestore(&phy_tx_lock, flags);
+ return 0;
+ }
+
+ if (hpd)
+ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
+ else
+ r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
+
+ if (r) {
+ DSSERR("Failed to %s PHY TX power\n",
+ hpd ? "enable" : "disable");
+ goto err;
+ }
+
+ ip_data->phy_tx_enabled = hpd;
+err:
+ spin_unlock_irqrestore(&phy_tx_lock, flags);
+ return r;
+}
+
+static irqreturn_t hpd_irq_handler(int irq, void *data)
+{
+ struct hdmi_ip_data *ip_data = data;
+
+ hdmi_check_hpd_state(ip_data);
+
+ return IRQ_HANDLED;
+}
+
int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
{
u16 r = 0;
@@ -232,10 +276,6 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
if (r)
return r;
- r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
- if (r)
- return r;
-
/*
* Read address 0 in order to get the SCP reset done completed
* Dummy access performed to make sure reset is done
@@ -257,12 +297,32 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
/* Write to phy address 3 to change the polarity control */
REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
+ r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
+ NULL, hpd_irq_handler,
+ IRQF_DISABLED | IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING, "hpd", ip_data);
+ if (r) {
+ DSSERR("HPD IRQ request failed\n");
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
+ return r;
+ }
+
+ r = hdmi_check_hpd_state(ip_data);
+ if (r) {
+ free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
+ hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
+ return r;
+ }
+
return 0;
}
void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
{
+ free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
+
hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
+ ip_data->phy_tx_enabled = false;
}
static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
@@ -419,14 +479,7 @@ int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data,
bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data)
{
- int r;
-
- void __iomem *base = hdmi_core_sys_base(ip_data);
-
- /* HPD */
- r = REG_GET(base, HDMI_CORE_SYS_SYS_STAT, 1, 1);
-
- return r == 1;
+ return gpio_get_value(ip_data->hpd_gpio);
}
static void hdmi_core_init(struct hdmi_core_video_config *video_cfg,
@@ -1204,36 +1257,13 @@ int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
return 0;
}
-int hdmi_audio_trigger(struct hdmi_ip_data *ip_data,
- struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai)
+void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable)
{
- int err = 0;
- switch (cmd) {
- case SNDRV_PCM_TRIGGER_START:
- case SNDRV_PCM_TRIGGER_RESUME:
- case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, 1, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 1, 31, 31);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 1, 30, 30);
- break;
-
- case SNDRV_PCM_TRIGGER_STOP:
- case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
- REG_FLD_MOD(hdmi_av_base(ip_data),
- HDMI_CORE_AV_AUD_MODE, 0, 0, 0);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 0, 30, 30);
- REG_FLD_MOD(hdmi_wp_base(ip_data),
- HDMI_WP_AUDIO_CTRL, 0, 31, 31);
- break;
- default:
- err = -EINVAL;
- }
- return err;
+ REG_FLD_MOD(hdmi_av_base(ip_data),
+ HDMI_CORE_AV_AUD_MODE, enable, 0, 0);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, enable, 31, 31);
+ REG_FLD_MOD(hdmi_wp_base(ip_data),
+ HDMI_WP_AUDIO_CTRL, enable, 30, 30);
}
#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index 2040956..a442998 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -576,9 +576,6 @@ struct hdmi_core_audio_config {
#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
-int hdmi_audio_trigger(struct hdmi_ip_data *ip_data,
- struct snd_pcm_substream *substream, int cmd,
- struct snd_soc_dai *dai);
int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
u32 sample_freq, u32 *n, u32 *cts);
void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 7533458..5c3d0f9 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -401,7 +401,7 @@ static void venc_runtime_put(void)
DSSDBG("venc_runtime_put\n");
- r = pm_runtime_put(&venc.pdev->dev);
+ r = pm_runtime_put_sync(&venc.pdev->dev);
WARN_ON(r < 0);
}
@@ -417,9 +417,10 @@ static const struct venc_config *venc_timings_to_config(
BUG();
}
-static void venc_power_on(struct omap_dss_device *dssdev)
+static int venc_power_on(struct omap_dss_device *dssdev)
{
u32 l;
+ int r;
venc_reset();
venc_write_config(venc_timings_to_config(&dssdev->panel.timings));
@@ -447,7 +448,22 @@ static void venc_power_on(struct omap_dss_device *dssdev)
if (dssdev->platform_enable)
dssdev->platform_enable(dssdev);
- dssdev->manager->enable(dssdev->manager);
+ r = dss_mgr_enable(dssdev->manager);
+ if (r)
+ goto err;
+
+ return 0;
+
+err:
+ venc_write_reg(VENC_OUTPUT_CONTROL, 0);
+ dss_set_dac_pwrdn_bgz(0);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ regulator_disable(venc.vdda_dac_reg);
+
+ return r;
}
static void venc_power_off(struct omap_dss_device *dssdev)
@@ -455,7 +471,7 @@ static void venc_power_off(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, 0);
dss_set_dac_pwrdn_bgz(0);
- dssdev->manager->disable(dssdev->manager);
+ dss_mgr_disable(dssdev->manager);
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
@@ -504,7 +520,9 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
if (r)
goto err1;
- venc_power_on(dssdev);
+ r = venc_power_on(dssdev);
+ if (r)
+ goto err2;
venc.wss_data = 0;
@@ -512,6 +530,8 @@ static int venc_panel_enable(struct omap_dss_device *dssdev)
mutex_unlock(&venc.venc_lock);
return 0;
+err2:
+ venc_runtime_put();
err1:
omap_dss_stop_device(dssdev);
err0:
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index 83d3fe7..4ea17dc 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -1,6 +1,6 @@
menuconfig FB_OMAP2
tristate "OMAP2+ frame buffer support"
- depends on FB && OMAP2_DSS
+ depends on FB && OMAP2_DSS && !DRM_OMAP
select OMAP2_VRAM
select OMAP2_VRFB if ARCH_OMAP2 || ARCH_OMAP3
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index df7bcce..16ba619 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -111,28 +111,22 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
set_fb_fix(fbi);
}
- if (pi->enabled) {
- struct omap_overlay_info info;
+ if (!pi->enabled) {
+ r = ovl->disable(ovl);
+ if (r)
+ goto undo;
+ }
+ if (pi->enabled) {
r = omapfb_setup_overlay(fbi, ovl, pi->pos_x, pi->pos_y,
pi->out_width, pi->out_height);
if (r)
goto undo;
-
- ovl->get_overlay_info(ovl, &info);
-
- if (!info.enabled) {
- info.enabled = pi->enabled;
- r = ovl->set_overlay_info(ovl, &info);
- if (r)
- goto undo;
- }
} else {
struct omap_overlay_info info;
ovl->get_overlay_info(ovl, &info);
- info.enabled = pi->enabled;
info.pos_x = pi->pos_x;
info.pos_y = pi->pos_y;
info.out_width = pi->out_width;
@@ -146,6 +140,12 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
if (ovl->manager)
ovl->manager->apply(ovl->manager);
+ if (pi->enabled) {
+ r = ovl->enable(ovl);
+ if (r)
+ goto undo;
+ }
+
/* Release the locks in a specific order to keep lockdep happy */
if (old_rg->id > new_rg->id) {
omapfb_put_mem_region(old_rg);
@@ -189,19 +189,19 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
memset(pi, 0, sizeof(*pi));
} else {
struct omap_overlay *ovl;
- struct omap_overlay_info *ovli;
+ struct omap_overlay_info ovli;
ovl = ofbi->overlays[0];
- ovli = &ovl->info;
+ ovl->get_overlay_info(ovl, &ovli);
- pi->pos_x = ovli->pos_x;
- pi->pos_y = ovli->pos_y;
- pi->enabled = ovli->enabled;
+ pi->pos_x = ovli.pos_x;
+ pi->pos_y = ovli.pos_y;
+ pi->enabled = ovl->is_enabled(ovl);
pi->channel_out = 0; /* xxx */
pi->mirror = 0;
pi->mem_idx = get_mem_idx(ofbi);
- pi->out_width = ovli->out_width;
- pi->out_height = ovli->out_height;
+ pi->out_width = ovli.out_width;
+ pi->out_height = ovli.out_height;
}
return 0;
@@ -238,7 +238,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
continue;
for (j = 0; j < ofbi2->num_overlays; j++) {
- if (ofbi2->overlays[j]->info.enabled) {
+ struct omap_overlay *ovl;
+ ovl = ofbi2->overlays[j];
+ if (ovl->is_enabled(ovl)) {
r = -EBUSY;
goto out;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index 70aa47d..ce15831 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -43,18 +43,18 @@
static char *def_mode;
static char *def_vram;
-static int def_vrfb;
+static bool def_vrfb;
static int def_rotate;
-static int def_mirror;
+static bool def_mirror;
static bool auto_update;
static unsigned int auto_update_freq;
module_param(auto_update, bool, 0);
module_param(auto_update_freq, uint, 0644);
#ifdef DEBUG
-unsigned int omapfb_debug;
+bool omapfb_debug;
module_param_named(debug, omapfb_debug, bool, 0644);
-static unsigned int omapfb_test_pattern;
+static bool omapfb_test_pattern;
module_param_named(test, omapfb_test_pattern, bool, 0644);
#endif
@@ -970,16 +970,20 @@ int omapfb_apply_changes(struct fb_info *fbi, int init)
outh = var->yres;
}
} else {
- outw = ovl->info.out_width;
- outh = ovl->info.out_height;
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ outw = info.out_width;
+ outh = info.out_height;
}
if (init) {
posx = 0;
posy = 0;
} else {
- posx = ovl->info.pos_x;
- posy = ovl->info.pos_y;
+ struct omap_overlay_info info;
+ ovl->get_overlay_info(ovl, &info);
+ posx = info.pos_x;
+ posy = info.pos_y;
}
r = omapfb_setup_overlay(fbi, ovl, posx, posy, outw, outh);
@@ -2067,6 +2071,8 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev)
if (ofbi->num_overlays > 0) {
struct omap_overlay *ovl = ofbi->overlays[0];
+ ovl->manager->apply(ovl->manager);
+
r = omapfb_overlay_enable(ovl, 1);
if (r) {
diff --git a/drivers/video/omap2/omapfb/omapfb-sysfs.c b/drivers/video/omap2/omapfb/omapfb-sysfs.c
index 1694d51..e8d8cc7 100644
--- a/drivers/video/omap2/omapfb/omapfb-sysfs.c
+++ b/drivers/video/omap2/omapfb/omapfb-sysfs.c
@@ -473,7 +473,9 @@ static ssize_t store_size(struct device *dev, struct device_attribute *attr,
continue;
for (j = 0; j < ofbi2->num_overlays; j++) {
- if (ofbi2->overlays[j]->info.enabled) {
+ struct omap_overlay *ovl;
+ ovl = ofbi2->overlays[j];
+ if (ovl->is_enabled(ovl)) {
r = -EBUSY;
goto out;
}
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index fdf0ede..c0bdc9b 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -32,7 +32,7 @@
#include <video/omapdss.h>
#ifdef DEBUG
-extern unsigned int omapfb_debug;
+extern bool omapfb_debug;
#define DBG(format, ...) \
do { \
if (omapfb_debug) \
@@ -181,13 +181,10 @@ static inline void omapfb_unlock(struct omapfb2_device *fbdev)
static inline int omapfb_overlay_enable(struct omap_overlay *ovl,
int enable)
{
- struct omap_overlay_info info;
-
- ovl->get_overlay_info(ovl, &info);
- if (info.enabled == enable)
- return 0;
- info.enabled = enable;
- return ovl->set_overlay_info(ovl, &info);
+ if (enable)
+ return ovl->enable(ovl);
+ else
+ return ovl->disable(ovl);
}
static inline struct omapfb2_mem_region *
diff --git a/drivers/video/pm2fb.c b/drivers/video/pm2fb.c
index dc7bfa9..df31a24 100644
--- a/drivers/video/pm2fb.c
+++ b/drivers/video/pm2fb.c
@@ -78,12 +78,12 @@ static char *mode_option __devinitdata;
* these flags allow the user to specify that requests for +ve sync
* should be silently turned in -ve sync.
*/
-static int lowhsync;
-static int lowvsync;
-static int noaccel __devinitdata;
+static bool lowhsync;
+static bool lowvsync;
+static bool noaccel __devinitdata;
/* mtrr option */
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata;
+static bool nomtrr __devinitdata;
#endif
/*
diff --git a/drivers/video/pm3fb.c b/drivers/video/pm3fb.c
index 6632ee5..055e527 100644
--- a/drivers/video/pm3fb.c
+++ b/drivers/video/pm3fb.c
@@ -57,11 +57,11 @@
*/
static int hwcursor = 1;
static char *mode_option __devinitdata;
-static int noaccel __devinitdata;
+static bool noaccel __devinitdata;
/* mtrr option */
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata;
+static bool nomtrr __devinitdata;
#endif
/*
diff --git a/drivers/video/pnx4008/pnxrgbfb.c b/drivers/video/pnx4008/pnxrgbfb.c
index b2252fe..6d30428 100644
--- a/drivers/video/pnx4008/pnxrgbfb.c
+++ b/drivers/video/pnx4008/pnxrgbfb.c
@@ -193,17 +193,6 @@ static struct platform_driver rgbfb_driver = {
.remove = rgbfb_remove,
};
-static int __init rgbfb_init(void)
-{
- return platform_driver_register(&rgbfb_driver);
-}
-
-static void __exit rgbfb_exit(void)
-{
- platform_driver_unregister(&rgbfb_driver);
-}
-
-module_init(rgbfb_init);
-module_exit(rgbfb_exit);
+module_platform_driver(rgbfb_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/pnx4008/sdum.c b/drivers/video/pnx4008/sdum.c
index 50e0039..c5c7414 100644
--- a/drivers/video/pnx4008/sdum.c
+++ b/drivers/video/pnx4008/sdum.c
@@ -856,17 +856,6 @@ static struct platform_driver sdum_driver = {
.resume = sdum_resume,
};
-int __init sdum_init(void)
-{
- return platform_driver_register(&sdum_driver);
-}
-
-static void __exit sdum_exit(void)
-{
- platform_driver_unregister(&sdum_driver);
-};
-
-module_init(sdum_init);
-module_exit(sdum_exit);
+module_platform_driver(sdum_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/video/pvr2fb.c b/drivers/video/pvr2fb.c
index f997510..3a3fdc6 100644
--- a/drivers/video/pvr2fb.c
+++ b/drivers/video/pvr2fb.c
@@ -1061,7 +1061,7 @@ static struct pvr2_board {
int (*init)(void);
void (*exit)(void);
char name[16];
-} board_driver[] = {
+} board_driver[] __refdata = {
#ifdef CONFIG_SH_DREAMCAST
{ pvr2fb_dc_init, pvr2fb_dc_exit, "Sega DC PVR2" },
#endif
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index 18ead6f..8384b94 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -832,17 +832,7 @@ static struct platform_driver pxa168fb_driver = {
.remove = __devexit_p(pxa168fb_remove),
};
-static int __init pxa168fb_init(void)
-{
- return platform_driver_register(&pxa168fb_driver);
-}
-module_init(pxa168fb_init);
-
-static void __exit pxa168fb_exit(void)
-{
- platform_driver_unregister(&pxa168fb_driver);
-}
-module_exit(pxa168fb_exit);
+module_platform_driver(pxa168fb_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> "
"Green Wan <gwan@marvell.com>");
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 1ed8b36..1d71c08 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -747,20 +747,7 @@ static struct platform_driver pxa3xx_gcu_driver = {
},
};
-static int __init
-pxa3xx_gcu_init(void)
-{
- return platform_driver_register(&pxa3xx_gcu_driver);
-}
-
-static void __exit
-pxa3xx_gcu_exit(void)
-{
- platform_driver_unregister(&pxa3xx_gcu_driver);
-}
-
-module_init(pxa3xx_gcu_init);
-module_exit(pxa3xx_gcu_exit);
+module_platform_driver(pxa3xx_gcu_driver);
MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index d8ab7be..2f58cf9 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -207,9 +207,9 @@ MODULE_DEVICE_TABLE(pci, rivafb_pci_tbl);
/* command line data, set in rivafb_setup() */
static int flatpanel __devinitdata = -1; /* Autodetect later */
static int forceCRTC __devinitdata = -1;
-static int noaccel __devinitdata = 0;
+static bool noaccel __devinitdata = 0;
#ifdef CONFIG_MTRR
-static int nomtrr __devinitdata = 0;
+static bool nomtrr __devinitdata = 0;
#endif
#ifdef CONFIG_PMAC_BACKLIGHT
static int backlight __devinitdata = 1;
@@ -218,7 +218,7 @@ static int backlight __devinitdata = 0;
#endif
static char *mode_option __devinitdata = NULL;
-static int strictmode = 0;
+static bool strictmode = 0;
static struct fb_fix_screeninfo __devinitdata rivafb_fix = {
.type = FB_TYPE_PACKED_PIXELS,
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 0753b1c..0c63b69 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -192,6 +192,7 @@ struct s3c_fb_vsync {
* @regs: The mapped hardware registers.
* @variant: Variant information for this hardware.
* @enabled: A bitmask of enabled hardware windows.
+ * @output_on: Flag if the physical output is enabled.
* @pdata: The platform configuration data passed with the device.
* @windows: The hardware windows that have been claimed.
* @irq_no: IRQ line number
@@ -208,6 +209,7 @@ struct s3c_fb {
struct s3c_fb_variant variant;
unsigned char enabled;
+ bool output_on;
struct s3c_fb_platdata *pdata;
struct s3c_fb_win *windows[S3C_FB_MAX_WIN];
@@ -441,6 +443,39 @@ static void shadow_protect_win(struct s3c_fb_win *win, bool protect)
}
/**
+ * s3c_fb_enable() - Set the state of the main LCD output
+ * @sfb: The main framebuffer state.
+ * @enable: The state to set.
+ */
+static void s3c_fb_enable(struct s3c_fb *sfb, int enable)
+{
+ u32 vidcon0 = readl(sfb->regs + VIDCON0);
+
+ if (enable && !sfb->output_on)
+ pm_runtime_get_sync(sfb->dev);
+
+ if (enable) {
+ vidcon0 |= VIDCON0_ENVID | VIDCON0_ENVID_F;
+ } else {
+ /* see the note in the framebuffer datasheet about
+ * why you cannot take both of these bits down at the
+ * same time. */
+
+ if (vidcon0 & VIDCON0_ENVID) {
+ vidcon0 |= VIDCON0_ENVID;
+ vidcon0 &= ~VIDCON0_ENVID_F;
+ }
+ }
+
+ writel(vidcon0, sfb->regs + VIDCON0);
+
+ if (!enable && sfb->output_on)
+ pm_runtime_put_sync(sfb->dev);
+
+ sfb->output_on = enable;
+}
+
+/**
* s3c_fb_set_par() - framebuffer request to set new framebuffer state.
* @info: The framebuffer to change.
*
@@ -461,6 +496,8 @@ static int s3c_fb_set_par(struct fb_info *info)
dev_dbg(sfb->dev, "setting framebuffer parameters\n");
+ pm_runtime_get_sync(sfb->dev);
+
shadow_protect_win(win, 1);
switch (var->bits_per_pixel) {
@@ -510,9 +547,10 @@ static int s3c_fb_set_par(struct fb_info *info)
if (sfb->variant.is_2443)
data |= (1 << 5);
- data |= VIDCON0_ENVID | VIDCON0_ENVID_F;
writel(data, regs + VIDCON0);
+ s3c_fb_enable(sfb, 1);
+
data = VIDTCON0_VBPD(var->upper_margin - 1) |
VIDTCON0_VFPD(var->lower_margin - 1) |
VIDTCON0_VSPW(var->vsync_len - 1);
@@ -574,6 +612,7 @@ static int s3c_fb_set_par(struct fb_info *info)
}
data = WINCONx_ENWIN;
+ sfb->enabled |= (1 << win->index);
/* note, since we have to round up the bits-per-pixel, we end up
* relying on the bitfield information for r/g/b/a to work out
@@ -621,7 +660,8 @@ static int s3c_fb_set_par(struct fb_info *info)
} else if (var->transp.length == 1)
data |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX;
- else if (var->transp.length == 4)
+ else if ((var->transp.length == 4) ||
+ (var->transp.length == 8))
data |= WINCON1_BPPMODE_28BPP_A4888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
else
@@ -654,6 +694,8 @@ static int s3c_fb_set_par(struct fb_info *info)
shadow_protect_win(win, 0);
+ pm_runtime_put_sync(sfb->dev);
+
return 0;
}
@@ -725,6 +767,8 @@ static int s3c_fb_setcolreg(unsigned regno,
dev_dbg(sfb->dev, "%s: win %d: %d => rgb=%d/%d/%d\n",
__func__, win->index, regno, red, green, blue);
+ pm_runtime_get_sync(sfb->dev);
+
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
/* true-colour, use pseudo-palette */
@@ -752,39 +796,15 @@ static int s3c_fb_setcolreg(unsigned regno,
break;
default:
+ pm_runtime_put_sync(sfb->dev);
return 1; /* unknown type */
}
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
/**
- * s3c_fb_enable() - Set the state of the main LCD output
- * @sfb: The main framebuffer state.
- * @enable: The state to set.
- */
-static void s3c_fb_enable(struct s3c_fb *sfb, int enable)
-{
- u32 vidcon0 = readl(sfb->regs + VIDCON0);
-
- if (enable)
- vidcon0 |= VIDCON0_ENVID | VIDCON0_ENVID_F;
- else {
- /* see the note in the framebuffer datasheet about
- * why you cannot take both of these bits down at the
- * same time. */
-
- if (!(vidcon0 & VIDCON0_ENVID))
- return;
-
- vidcon0 |= VIDCON0_ENVID;
- vidcon0 &= ~VIDCON0_ENVID_F;
- }
-
- writel(vidcon0, sfb->regs + VIDCON0);
-}
-
-/**
* s3c_fb_blank() - blank or unblank the given window
* @blank_mode: The blank state from FB_BLANK_*
* @info: The framebuffer to blank.
@@ -800,6 +820,8 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
+ pm_runtime_get_sync(sfb->dev);
+
wincon = readl(sfb->regs + sfb->variant.wincon + (index * 4));
switch (blank_mode) {
@@ -810,12 +832,16 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_NORMAL:
/* disable the DMA and display 0x0 (black) */
+ shadow_protect_win(win, 1);
writel(WINxMAP_MAP | WINxMAP_MAP_COLOUR(0x0),
sfb->regs + sfb->variant.winmap + (index * 4));
+ shadow_protect_win(win, 0);
break;
case FB_BLANK_UNBLANK:
+ shadow_protect_win(win, 1);
writel(0x0, sfb->regs + sfb->variant.winmap + (index * 4));
+ shadow_protect_win(win, 0);
wincon |= WINCONx_ENWIN;
sfb->enabled |= (1 << index);
break;
@@ -823,10 +849,13 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
default:
+ pm_runtime_put_sync(sfb->dev);
return 1;
}
+ shadow_protect_win(win, 1);
writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
+ shadow_protect_win(win, 0);
/* Check the enabled state to see if we need to be running the
* main LCD interface, as if there are no active windows then
@@ -845,8 +874,13 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
/* we're stuck with this until we can do something about overriding
* the power control using the blanking event for a single fb.
*/
- if (index == sfb->pdata->default_win)
+ if (index == sfb->pdata->default_win) {
+ shadow_protect_win(win, 1);
s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
+ shadow_protect_win(win, 0);
+ }
+
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
@@ -870,6 +904,8 @@ static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
void __iomem *buf = sfb->regs + win->index * 8;
unsigned int start_boff, end_boff;
+ pm_runtime_get_sync(sfb->dev);
+
/* Offset in bytes to the start of the displayed area */
start_boff = var->yoffset * info->fix.line_length;
/* X offset depends on the current bpp */
@@ -888,6 +924,7 @@ static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
break;
default:
dev_err(sfb->dev, "invalid bpp\n");
+ pm_runtime_put_sync(sfb->dev);
return -EINVAL;
}
}
@@ -903,6 +940,7 @@ static int s3c_fb_pan_display(struct fb_var_screeninfo *var,
shadow_protect_win(win, 0);
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
@@ -992,11 +1030,16 @@ static int s3c_fb_wait_for_vsync(struct s3c_fb *sfb, u32 crtc)
if (crtc != 0)
return -ENODEV;
+ pm_runtime_get_sync(sfb->dev);
+
count = sfb->vsync_info.count;
s3c_fb_enable_irq(sfb);
ret = wait_event_interruptible_timeout(sfb->vsync_info.wait,
count != sfb->vsync_info.count,
msecs_to_jiffies(VSYNC_TIMEOUT_MSEC));
+
+ pm_runtime_put_sync(sfb->dev);
+
if (ret == 0)
return -ETIMEDOUT;
@@ -1027,30 +1070,8 @@ static int s3c_fb_ioctl(struct fb_info *info, unsigned int cmd,
return ret;
}
-static int s3c_fb_open(struct fb_info *info, int user)
-{
- struct s3c_fb_win *win = info->par;
- struct s3c_fb *sfb = win->parent;
-
- pm_runtime_get_sync(sfb->dev);
-
- return 0;
-}
-
-static int s3c_fb_release(struct fb_info *info, int user)
-{
- struct s3c_fb_win *win = info->par;
- struct s3c_fb *sfb = win->parent;
-
- pm_runtime_put_sync(sfb->dev);
-
- return 0;
-}
-
static struct fb_ops s3c_fb_ops = {
.owner = THIS_MODULE,
- .fb_open = s3c_fb_open,
- .fb_release = s3c_fb_release,
.fb_check_var = s3c_fb_check_var,
.fb_set_par = s3c_fb_set_par,
.fb_blank = s3c_fb_blank,
@@ -1452,7 +1473,7 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
dev_err(dev, "failed to create window %d\n", win);
for (; win >= 0; win--)
s3c_fb_release_win(sfb, sfb->windows[win]);
- goto err_irq;
+ goto err_pm_runtime;
}
}
@@ -1461,7 +1482,8 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
return 0;
-err_irq:
+err_pm_runtime:
+ pm_runtime_put_sync(sfb->dev);
free_irq(sfb->irq_no, sfb);
err_ioremap:
@@ -1471,6 +1493,8 @@ err_req_region:
release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
err_lcd_clk:
+ pm_runtime_disable(sfb->dev);
+
if (!sfb->variant.has_clksel) {
clk_disable(sfb->lcd_clk);
clk_put(sfb->lcd_clk);
@@ -1524,7 +1548,7 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c_fb_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -1571,10 +1595,15 @@ static int s3c_fb_resume(struct device *dev)
for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) {
void __iomem *regs = sfb->regs + sfb->variant.keycon;
+ win = sfb->windows[win_no];
+ if (!win)
+ continue;
+ shadow_protect_win(win, 1);
regs += (win_no * 8);
writel(0xffffff, regs + WKEYCON0);
writel(0xffffff, regs + WKEYCON1);
+ shadow_protect_win(win, 0);
}
/* restore framebuffers */
@@ -1589,27 +1618,19 @@ static int s3c_fb_resume(struct device *dev)
return 0;
}
+#endif
+#ifdef CONFIG_PM_RUNTIME
static int s3c_fb_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c_fb *sfb = platform_get_drvdata(pdev);
- struct s3c_fb_win *win;
- int win_no;
-
- for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
- win = sfb->windows[win_no];
- if (!win)
- continue;
-
- /* use the blank function to push into power-down */
- s3c_fb_blank(FB_BLANK_POWERDOWN, win->fbinfo);
- }
if (!sfb->variant.has_clksel)
clk_disable(sfb->lcd_clk);
clk_disable(sfb->bus_clk);
+
return 0;
}
@@ -1618,8 +1639,6 @@ static int s3c_fb_runtime_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
struct s3c_fb *sfb = platform_get_drvdata(pdev);
struct s3c_fb_platdata *pd = sfb->pdata;
- struct s3c_fb_win *win;
- int win_no;
clk_enable(sfb->bus_clk);
@@ -1630,39 +1649,10 @@ static int s3c_fb_runtime_resume(struct device *dev)
pd->setup_gpio();
writel(pd->vidcon1, sfb->regs + VIDCON1);
- /* zero all windows before we do anything */
- for (win_no = 0; win_no < sfb->variant.nr_windows; win_no++)
- s3c_fb_clear_win(sfb, win_no);
-
- for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) {
- void __iomem *regs = sfb->regs + sfb->variant.keycon;
-
- regs += (win_no * 8);
- writel(0xffffff, regs + WKEYCON0);
- writel(0xffffff, regs + WKEYCON1);
- }
-
- /* restore framebuffers */
- for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
- win = sfb->windows[win_no];
- if (!win)
- continue;
-
- dev_dbg(&pdev->dev, "resuming window %d\n", win_no);
- s3c_fb_set_par(win->fbinfo);
- }
-
return 0;
}
-
-#else
-#define s3c_fb_suspend NULL
-#define s3c_fb_resume NULL
-#define s3c_fb_runtime_suspend NULL
-#define s3c_fb_runtime_resume NULL
#endif
-
#define VALID_BPP124 (VALID_BPP(1) | VALID_BPP(2) | VALID_BPP(4))
#define VALID_BPP1248 (VALID_BPP124 | VALID_BPP(8))
@@ -1985,10 +1975,9 @@ static struct platform_device_id s3c_fb_driver_ids[] = {
MODULE_DEVICE_TABLE(platform, s3c_fb_driver_ids);
static const struct dev_pm_ops s3cfb_pm_ops = {
- .suspend = s3c_fb_suspend,
- .resume = s3c_fb_resume,
- .runtime_suspend = s3c_fb_runtime_suspend,
- .runtime_resume = s3c_fb_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(s3c_fb_suspend, s3c_fb_resume)
+ SET_RUNTIME_PM_OPS(s3c_fb_runtime_suspend, s3c_fb_runtime_resume,
+ NULL)
};
static struct platform_driver s3c_fb_driver = {
@@ -2002,18 +1991,7 @@ static struct platform_driver s3c_fb_driver = {
},
};
-static int __init s3c_fb_init(void)
-{
- return platform_driver_register(&s3c_fb_driver);
-}
-
-static void __exit s3c_fb_cleanup(void)
-{
- platform_driver_unregister(&s3c_fb_driver);
-}
-
-module_init(s3c_fb_init);
-module_exit(s3c_fb_cleanup);
+module_platform_driver(s3c_fb_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("Samsung S3C SoC Framebuffer driver");
diff --git a/drivers/video/s3c2410fb.c b/drivers/video/s3c2410fb.c
index ee4c0df..77f34c6 100644
--- a/drivers/video/s3c2410fb.c
+++ b/drivers/video/s3c2410fb.c
@@ -26,8 +26,8 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/div64.h>
#include <asm/mach/map.h>
@@ -45,10 +45,10 @@
#ifdef CONFIG_FB_S3C2410_DEBUG
static int debug = 1;
#else
-static int debug = 0;
+static int debug;
#endif
-#define dprintk(msg...) if (debug) { printk(KERN_DEBUG "s3c2410fb: " msg); }
+#define dprintk(msg...) if (debug) printk(KERN_DEBUG "s3c2410fb: " msg);
/* useful functions */
@@ -567,11 +567,10 @@ static int s3c2410fb_blank(int blank_mode, struct fb_info *info)
tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL;
- if (blank_mode == FB_BLANK_POWERDOWN) {
+ if (blank_mode == FB_BLANK_POWERDOWN)
s3c2410fb_lcd_enable(fbi, 0);
- } else {
+ else
s3c2410fb_lcd_enable(fbi, 1);
- }
if (blank_mode == FB_BLANK_UNBLANK)
writel(0x0, tpal_reg);
@@ -812,7 +811,7 @@ static inline void s3c2410fb_cpufreq_deregister(struct s3c2410fb_info *info)
#endif
-static char driver_name[] = "s3c2410fb";
+static const char driver_name[] = "s3c2410fb";
static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
enum s3c_drv_type drv_type)
@@ -881,7 +880,10 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
goto release_mem;
}
- info->irq_base = info->io + ((drv_type == DRV_S3C2412) ? S3C2412_LCDINTBASE : S3C2410_LCDINTBASE);
+ if (drv_type == DRV_S3C2412)
+ info->irq_base = info->io + S3C2412_LCDINTBASE;
+ else
+ info->irq_base = info->io + S3C2410_LCDINTBASE;
dprintk("devinit\n");
@@ -927,7 +929,7 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
clk_enable(info->clk);
dprintk("got and enabled clock\n");
- msleep(1);
+ usleep_range(1000, 1000);
info->clk_rate = clk_get_rate(info->clk);
@@ -975,9 +977,8 @@ static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
/* create device files */
ret = device_create_file(&pdev->dev, &dev_attr_debug);
- if (ret) {
+ if (ret)
printk(KERN_ERR "failed to add debug attribute\n");
- }
printk(KERN_INFO "fb%d: %s frame buffer device\n",
fbinfo->node, fbinfo->fix.id);
@@ -1027,7 +1028,7 @@ static int __devexit s3c2410fb_remove(struct platform_device *pdev)
s3c2410fb_cpufreq_deregister(info);
s3c2410fb_lcd_enable(info, 0);
- msleep(1);
+ usleep_range(1000, 1000);
s3c2410fb_unmap_video_memory(fbinfo);
@@ -1064,7 +1065,7 @@ static int s3c2410fb_suspend(struct platform_device *dev, pm_message_t state)
* the LCD DMA engine is not going to get back on the bus
* before the clock goes off again (bjd) */
- msleep(1);
+ usleep_range(1000, 1000);
clk_disable(info->clk);
return 0;
@@ -1076,7 +1077,7 @@ static int s3c2410fb_resume(struct platform_device *dev)
struct s3c2410fb_info *info = fbinfo->par;
clk_enable(info->clk);
- msleep(1);
+ usleep_range(1000, 1000);
s3c2410fb_init_registers(fbinfo);
diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c
index 946a949..2c80246 100644
--- a/drivers/video/s3fb.c
+++ b/drivers/video/s3fb.c
@@ -727,7 +727,7 @@ static int s3fb_set_par(struct fb_info *info)
if (par->chip == CHIP_988_VIRGE_VX) {
vga_wcrt(par->state.vgabase, 0x50, 0x00);
vga_wcrt(par->state.vgabase, 0x67, 0x50);
-
+ msleep(10); /* screen remains blank sometimes without this */
vga_wcrt(par->state.vgabase, 0x63, (mode <= 2) ? 0x90 : 0x09);
vga_wcrt(par->state.vgabase, 0x66, 0x90);
}
@@ -901,7 +901,8 @@ static int s3fb_set_par(struct fb_info *info)
/* Set Data Transfer Position */
hsstart = ((info->var.xres + info->var.right_margin) * hmul) / 8;
- value = clamp((htotal + hsstart + 1) / 2, hsstart + 4, htotal + 1);
+ /* + 2 is needed for Virge/VX, does no harm on other cards */
+ value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
memset_io(info->screen_base, 0x00, screen_size);
@@ -1216,6 +1217,31 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i
info->screen_size = 2 << 20;
break;
}
+ } else if (par->chip == CHIP_988_VIRGE_VX) {
+ switch ((regval & 0x60) >> 5) {
+ case 0: /* 2MB */
+ info->screen_size = 2 << 20;
+ break;
+ case 1: /* 4MB */
+ info->screen_size = 4 << 20;
+ break;
+ case 2: /* 6MB */
+ info->screen_size = 6 << 20;
+ break;
+ case 3: /* 8MB */
+ info->screen_size = 8 << 20;
+ break;
+ }
+ /* off-screen memory */
+ regval = vga_rcrt(par->state.vgabase, 0x37);
+ switch ((regval & 0x60) >> 5) {
+ case 1: /* 4MB */
+ info->screen_size -= 4 << 20;
+ break;
+ case 2: /* 2MB */
+ info->screen_size -= 2 << 20;
+ break;
+ }
} else
info->screen_size = s3_memsizes[regval >> 5] << 10;
info->fix.smem_len = info->screen_size;
diff --git a/drivers/video/sbuslib.c b/drivers/video/sbuslib.c
index 37d764a..3c1de98 100644
--- a/drivers/video/sbuslib.c
+++ b/drivers/video/sbuslib.c
@@ -76,7 +76,7 @@ int sbusfb_mmap_helper(struct sbus_mmap_map *map,
map_offset = (physbase + map[i].poff) & POFF_MASK;
break;
}
- if (!map_size){
+ if (!map_size) {
page += PAGE_SIZE;
continue;
}
diff --git a/drivers/video/sh7760fb.c b/drivers/video/sh7760fb.c
index 45e47d8..83b16e2 100644
--- a/drivers/video/sh7760fb.c
+++ b/drivers/video/sh7760fb.c
@@ -585,18 +585,7 @@ static struct platform_driver sh7760_lcdc_driver = {
.remove = __devexit_p(sh7760fb_remove),
};
-static int __init sh7760fb_init(void)
-{
- return platform_driver_register(&sh7760_lcdc_driver);
-}
-
-static void __exit sh7760fb_exit(void)
-{
- platform_driver_unregister(&sh7760_lcdc_driver);
-}
-
-module_init(sh7760fb_init);
-module_exit(sh7760fb_exit);
+module_platform_driver(sh7760_lcdc_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Manuel Lauss");
MODULE_DESCRIPTION("FBdev for SH7760/63 integrated LCD Controller");
diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c
index 72ee96b..05151b8 100644
--- a/drivers/video/sh_mipi_dsi.c
+++ b/drivers/video/sh_mipi_dsi.c
@@ -8,6 +8,7 @@
* published by the Free Software Foundation.
*/
+#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
@@ -41,6 +42,7 @@
#define VMCTR1 0x0020
#define VMCTR2 0x0024
#define VMLEN1 0x0028
+#define VMLEN2 0x002c
#define CMTSRTREQ 0x0070
#define CMTSRTCTR 0x00d0
@@ -51,8 +53,7 @@ struct sh_mipi {
void __iomem *base;
void __iomem *linkbase;
struct clk *dsit_clk;
- struct clk *dsip_clk;
- struct device *dev;
+ struct platform_device *pdev;
void *next_board_data;
void (*next_display_on)(void *board_data, struct fb_info *info);
@@ -124,35 +125,15 @@ static void sh_mipi_shutdown(struct platform_device *pdev)
sh_mipi_dsi_enable(mipi, false);
}
-static void mipi_display_on(void *arg, struct fb_info *info)
-{
- struct sh_mipi *mipi = arg;
-
- pm_runtime_get_sync(mipi->dev);
- sh_mipi_dsi_enable(mipi, true);
-
- if (mipi->next_display_on)
- mipi->next_display_on(mipi->next_board_data, info);
-}
-
-static void mipi_display_off(void *arg)
-{
- struct sh_mipi *mipi = arg;
-
- if (mipi->next_display_off)
- mipi->next_display_off(mipi->next_board_data);
-
- sh_mipi_dsi_enable(mipi, false);
- pm_runtime_put(mipi->dev);
-}
-
static int __init sh_mipi_setup(struct sh_mipi *mipi,
struct sh_mipi_dsi_info *pdata)
{
void __iomem *base = mipi->base;
struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
- u32 pctype, datatype, pixfmt, linelength, vmctr2 = 0x00e00000;
+ u32 pctype, datatype, pixfmt, linelength, vmctr2;
+ u32 tmp, top, bottom, delay, div;
bool yuv;
+ int bpp;
/*
* Select data format. MIPI DSI is not hot-pluggable, so, we just use
@@ -253,6 +234,9 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
(!yuv && ch->interface_type != RGB24))
return -EINVAL;
+ if (!pdata->lane)
+ return -EINVAL;
+
/* reset DSI link */
iowrite32(0x00000001, base + SYSCTRL);
/* Hold reset for 100 cycles of the slowest of bus, HS byte and LP clock */
@@ -262,15 +246,6 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
/* setup DSI link */
/*
- * Default = ULPS enable |
- * Contention detection enabled |
- * EoT packet transmission enable |
- * CRC check enable |
- * ECC check enable
- * additionally enable first two lanes
- */
- iowrite32(0x00003703, base + SYSCONF);
- /*
* T_wakeup = 0x7000
* T_hs-trail = 3
* T_hs-prepare = 3
@@ -290,15 +265,24 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
iowrite32(0x0fffffff, base + TATOVSET);
/* Peripheral reset timeout, default 0xffffffff */
iowrite32(0x0fffffff, base + PRTOVSET);
- /* Enable timeout counters */
- iowrite32(0x00000f00, base + DSICTRL);
/* Interrupts not used, disable all */
iowrite32(0, base + DSIINTE);
/* DSI-Tx bias on */
iowrite32(0x00000001, base + PHYCTRL);
udelay(200);
- /* Deassert resets, power on, set multiplier */
- iowrite32(0x03070b01, base + PHYCTRL);
+ /* Deassert resets, power on */
+ iowrite32(0x03070001, base + PHYCTRL);
+
+ /*
+ * Default = ULPS enable |
+ * Contention detection enabled |
+ * EoT packet transmission enable |
+ * CRC check enable |
+ * ECC check enable
+ */
+ bitmap_fill((unsigned long *)&tmp, pdata->lane);
+ tmp |= 0x00003700;
+ iowrite32(tmp, base + SYSCONF);
/* setup l-bridge */
@@ -316,18 +300,68 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
* Non-burst mode with sync pulses: VSE and HSE are output,
* HSA period allowed, no commands in LP
*/
+ vmctr2 = 0;
+ if (pdata->flags & SH_MIPI_DSI_VSEE)
+ vmctr2 |= 1 << 23;
+ if (pdata->flags & SH_MIPI_DSI_HSEE)
+ vmctr2 |= 1 << 22;
+ if (pdata->flags & SH_MIPI_DSI_HSAE)
+ vmctr2 |= 1 << 21;
+ if (pdata->flags & SH_MIPI_DSI_BL2E)
+ vmctr2 |= 1 << 17;
if (pdata->flags & SH_MIPI_DSI_HSABM)
- vmctr2 |= 0x20;
- if (pdata->flags & SH_MIPI_DSI_HSPBM)
- vmctr2 |= 0x10;
+ vmctr2 |= 1 << 5;
+ if (pdata->flags & SH_MIPI_DSI_HBPBM)
+ vmctr2 |= 1 << 4;
+ if (pdata->flags & SH_MIPI_DSI_HFPBM)
+ vmctr2 |= 1 << 3;
iowrite32(vmctr2, mipi->linkbase + VMCTR2);
/*
- * 0x660 = 1632 bytes per line (RGB24, 544 pixels: see
- * sh_mobile_lcdc_info.ch[0].lcd_cfg[0].xres), HSALEN = 1 - default
- * (unused if VMCTR2[HSABM] = 0)
+ * VMLEN1 = RGBLEN | HSALEN
+ *
+ * see
+ * Video mode - Blanking Packet setting
+ */
+ top = linelength << 16; /* RGBLEN */
+ bottom = 0x00000001;
+ if (pdata->flags & SH_MIPI_DSI_HSABM) /* HSALEN */
+ bottom = (pdata->lane * ch->lcd_cfg[0].hsync_len) - 10;
+ iowrite32(top | bottom , mipi->linkbase + VMLEN1);
+
+ /*
+ * VMLEN2 = HBPLEN | HFPLEN
+ *
+ * see
+ * Video mode - Blanking Packet setting
*/
- iowrite32(1 | (linelength << 16), mipi->linkbase + VMLEN1);
+ top = 0x00010000;
+ bottom = 0x00000001;
+ delay = 0;
+
+ div = 1; /* HSbyteCLK is calculation base
+ * HS4divCLK = HSbyteCLK/2
+ * HS6divCLK is not supported for now */
+ if (pdata->flags & SH_MIPI_DSI_HS4divCLK)
+ div = 2;
+
+ if (pdata->flags & SH_MIPI_DSI_HFPBM) { /* HBPLEN */
+ top = ch->lcd_cfg[0].hsync_len + ch->lcd_cfg[0].left_margin;
+ top = ((pdata->lane * top / div) - 10) << 16;
+ }
+ if (pdata->flags & SH_MIPI_DSI_HBPBM) { /* HFPLEN */
+ bottom = ch->lcd_cfg[0].right_margin;
+ bottom = (pdata->lane * bottom / div) - 12;
+ }
+
+ bpp = linelength / ch->lcd_cfg[0].xres; /* byte / pixel */
+ if ((pdata->lane / div) > bpp) {
+ tmp = ch->lcd_cfg[0].xres / bpp; /* output cycle */
+ tmp = ch->lcd_cfg[0].xres - tmp; /* (input - output) cycle */
+ delay = (pdata->lane * tmp);
+ }
+
+ iowrite32(top | (bottom + delay) , mipi->linkbase + VMLEN2);
msleep(5);
@@ -352,9 +386,56 @@ static int __init sh_mipi_setup(struct sh_mipi *mipi,
pixfmt << 4);
sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON);
+ /* Enable timeout counters */
+ iowrite32(0x00000f00, base + DSICTRL);
+
return 0;
}
+static void mipi_display_on(void *arg, struct fb_info *info)
+{
+ struct sh_mipi *mipi = arg;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
+ int ret;
+
+ pm_runtime_get_sync(&mipi->pdev->dev);
+
+ ret = pdata->set_dot_clock(mipi->pdev, mipi->base, 1);
+ if (ret < 0)
+ goto mipi_display_on_fail1;
+
+ ret = sh_mipi_setup(mipi, pdata);
+ if (ret < 0)
+ goto mipi_display_on_fail2;
+
+ sh_mipi_dsi_enable(mipi, true);
+
+ if (mipi->next_display_on)
+ mipi->next_display_on(mipi->next_board_data, info);
+
+ return;
+
+mipi_display_on_fail1:
+ pm_runtime_put_sync(&mipi->pdev->dev);
+mipi_display_on_fail2:
+ pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
+}
+
+static void mipi_display_off(void *arg)
+{
+ struct sh_mipi *mipi = arg;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
+
+ if (mipi->next_display_off)
+ mipi->next_display_off(mipi->next_board_data);
+
+ sh_mipi_dsi_enable(mipi, false);
+
+ pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
+
+ pm_runtime_put_sync(&mipi->pdev->dev);
+}
+
static int __init sh_mipi_probe(struct platform_device *pdev)
{
struct sh_mipi *mipi;
@@ -363,11 +444,13 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
unsigned long rate, f_current;
int idx = pdev->id, ret;
- char dsip_clk[] = "dsi.p_clk";
if (!res || !res2 || idx >= ARRAY_SIZE(mipi_dsi) || !pdata)
return -ENODEV;
+ if (!pdata->set_dot_clock)
+ return -EINVAL;
+
mutex_lock(&array_lock);
if (idx < 0)
for (idx = 0; idx < ARRAY_SIZE(mipi_dsi) && mipi_dsi[idx]; idx++)
@@ -408,7 +491,7 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
goto emap2;
}
- mipi->dev = &pdev->dev;
+ mipi->pdev = pdev;
mipi->dsit_clk = clk_get(&pdev->dev, "dsit_clk");
if (IS_ERR(mipi->dsit_clk)) {
@@ -428,44 +511,15 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "DSI-T clk %lu -> %lu\n", f_current, rate);
- sprintf(dsip_clk, "dsi%1.1dp_clk", idx);
- mipi->dsip_clk = clk_get(&pdev->dev, dsip_clk);
- if (IS_ERR(mipi->dsip_clk)) {
- ret = PTR_ERR(mipi->dsip_clk);
- goto eclkpget;
- }
-
- f_current = clk_get_rate(mipi->dsip_clk);
- /* Between 10 and 50MHz */
- rate = clk_round_rate(mipi->dsip_clk, 24000000);
- if (rate > 0 && rate != f_current)
- ret = clk_set_rate(mipi->dsip_clk, rate);
- else
- ret = rate;
- if (ret < 0)
- goto esetprate;
-
- dev_dbg(&pdev->dev, "DSI-P clk %lu -> %lu\n", f_current, rate);
-
- msleep(10);
-
ret = clk_enable(mipi->dsit_clk);
if (ret < 0)
goto eclkton;
- ret = clk_enable(mipi->dsip_clk);
- if (ret < 0)
- goto eclkpon;
-
mipi_dsi[idx] = mipi;
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
- ret = sh_mipi_setup(mipi, pdata);
- if (ret < 0)
- goto emipisetup;
-
mutex_unlock(&array_lock);
platform_set_drvdata(pdev, mipi);
@@ -482,16 +536,7 @@ static int __init sh_mipi_probe(struct platform_device *pdev)
return 0;
-emipisetup:
- mipi_dsi[idx] = NULL;
- pm_runtime_disable(&pdev->dev);
- clk_disable(mipi->dsip_clk);
-eclkpon:
- clk_disable(mipi->dsit_clk);
eclkton:
-esetprate:
- clk_put(mipi->dsip_clk);
-eclkpget:
esettrate:
clk_put(mipi->dsit_clk);
eclktget:
@@ -542,10 +587,9 @@ static int __exit sh_mipi_remove(struct platform_device *pdev)
pdata->lcd_chan->board_cfg.board_data = NULL;
pm_runtime_disable(&pdev->dev);
- clk_disable(mipi->dsip_clk);
clk_disable(mipi->dsit_clk);
clk_put(mipi->dsit_clk);
- clk_put(mipi->dsip_clk);
+
iounmap(mipi->linkbase);
if (res2)
release_mem_region(res2->start, resource_size(res2));
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index facffc2..aac5b36 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -17,6 +17,7 @@
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
@@ -102,7 +103,7 @@ struct sh_mobile_lcdc_priv {
struct sh_mobile_lcdc_chan ch[2];
struct notifier_block notifier;
int started;
- int forced_bpp; /* 2 channel LCDC must share bpp setting */
+ int forced_fourcc; /* 2 channel LCDC must share fourcc setting */
struct sh_mobile_meram_info *meram_dev;
};
@@ -215,6 +216,47 @@ struct sh_mobile_lcdc_sys_bus_ops sh_mobile_lcdc_sys_bus_ops = {
lcdc_sys_read_data,
};
+static int sh_mobile_format_fourcc(const struct fb_var_screeninfo *var)
+{
+ if (var->grayscale > 1)
+ return var->grayscale;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ return V4L2_PIX_FMT_RGB565;
+ case 24:
+ return V4L2_PIX_FMT_BGR24;
+ case 32:
+ return V4L2_PIX_FMT_BGR32;
+ default:
+ return 0;
+ }
+}
+
+static int sh_mobile_format_is_fourcc(const struct fb_var_screeninfo *var)
+{
+ return var->grayscale > 1;
+}
+
+static bool sh_mobile_format_is_yuv(const struct fb_var_screeninfo *var)
+{
+ if (var->grayscale <= 1)
+ return false;
+
+ switch (var->grayscale) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_inc_and_test(&priv->hw_usecnt)) {
@@ -420,7 +462,7 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch)
tmp = ((display_var->xres & 7) << 24) |
((display_h_total & 7) << 16) |
((display_var->hsync_len & 7) << 8) |
- hsync_pos;
+ (hsync_pos & 7);
lcdc_write_chan(ch, LDHAJR, tmp);
}
@@ -435,7 +477,6 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
{
struct sh_mobile_lcdc_chan *ch;
unsigned long tmp;
- int bpp = 0;
int k, m;
/* Enable LCDC channels. Read data from external memory, avoid using the
@@ -454,9 +495,6 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
if (!ch->enabled)
continue;
- if (!bpp)
- bpp = ch->info->var.bits_per_pixel;
-
/* Power supply */
lcdc_write_chan(ch, LDPMR, 0);
@@ -487,31 +525,37 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
sh_mobile_lcdc_geometry(ch);
- if (ch->info->var.nonstd) {
- tmp = (ch->info->var.nonstd << 16);
- switch (ch->info->var.bits_per_pixel) {
- case 12:
- tmp |= LDDFR_YF_420;
- break;
- case 16:
- tmp |= LDDFR_YF_422;
- break;
- case 24:
- default:
- tmp |= LDDFR_YF_444;
- break;
- }
- } else {
- switch (ch->info->var.bits_per_pixel) {
- case 16:
- tmp = LDDFR_PKF_RGB16;
- break;
- case 24:
- tmp = LDDFR_PKF_RGB24;
+ switch (sh_mobile_format_fourcc(&ch->info->var)) {
+ case V4L2_PIX_FMT_RGB565:
+ tmp = LDDFR_PKF_RGB16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ tmp = LDDFR_PKF_RGB24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ tmp = LDDFR_PKF_ARGB32;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ tmp = LDDFR_CC | LDDFR_YF_420;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ tmp = LDDFR_CC | LDDFR_YF_422;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ tmp = LDDFR_CC | LDDFR_YF_444;
+ break;
+ }
+
+ if (sh_mobile_format_is_yuv(&ch->info->var)) {
+ switch (ch->info->var.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ tmp |= LDDFR_CF1;
break;
- case 32:
- default:
- tmp = LDDFR_PKF_ARGB32;
+ case V4L2_COLORSPACE_JPEG:
+ tmp |= LDDFR_CF0;
break;
}
}
@@ -519,7 +563,7 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
lcdc_write_chan(ch, LDDFR, tmp);
lcdc_write_chan(ch, LDMLSR, ch->pitch);
lcdc_write_chan(ch, LDSA1R, ch->base_addr_y);
- if (ch->info->var.nonstd)
+ if (sh_mobile_format_is_yuv(&ch->info->var))
lcdc_write_chan(ch, LDSA2R, ch->base_addr_c);
/* When using deferred I/O mode, configure the LCDC for one-shot
@@ -536,21 +580,23 @@ static void __sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
}
/* Word and long word swap. */
- if (priv->ch[0].info->var.nonstd)
+ switch (sh_mobile_format_fourcc(&priv->ch[0].info->var)) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV42:
+ tmp = LDDDSR_LS | LDDDSR_WS;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
- else {
- switch (bpp) {
- case 16:
- tmp = LDDDSR_LS | LDDDSR_WS;
- break;
- case 24:
- tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
- break;
- case 32:
- default:
- tmp = LDDDSR_LS;
- break;
- }
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ default:
+ tmp = LDDDSR_LS;
+ break;
}
lcdc_write(priv, _LDDDSR, tmp);
@@ -622,12 +668,24 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
ch->meram_enabled = 0;
}
- if (!ch->info->var.nonstd)
- pixelformat = SH_MOBILE_MERAM_PF_RGB;
- else if (ch->info->var.bits_per_pixel == 24)
- pixelformat = SH_MOBILE_MERAM_PF_NV24;
- else
+ switch (sh_mobile_format_fourcc(&ch->info->var)) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
pixelformat = SH_MOBILE_MERAM_PF_NV;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ pixelformat = SH_MOBILE_MERAM_PF_NV24;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_BGR32:
+ default:
+ pixelformat = SH_MOBILE_MERAM_PF_RGB;
+ break;
+ }
ret = mdev->ops->meram_register(mdev, cfg, ch->pitch,
ch->info->var.yres, pixelformat,
@@ -845,6 +903,7 @@ static struct fb_fix_screeninfo sh_mobile_lcdc_fix = {
.xpanstep = 0,
.ypanstep = 1,
.ywrapstep = 0,
+ .capabilities = FB_CAP_FOURCC,
};
static void sh_mobile_lcdc_fillrect(struct fb_info *info,
@@ -877,8 +936,9 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
unsigned long new_pan_offset;
unsigned long base_addr_y, base_addr_c;
unsigned long c_offset;
+ bool yuv = sh_mobile_format_is_yuv(&info->var);
- if (!info->var.nonstd)
+ if (!yuv)
new_pan_offset = var->yoffset * info->fix.line_length
+ var->xoffset * (info->var.bits_per_pixel / 8);
else
@@ -892,7 +952,7 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
/* Set the source address for the next refresh */
base_addr_y = ch->dma_handle + new_pan_offset;
- if (info->var.nonstd) {
+ if (yuv) {
/* Set y offset */
c_offset = var->yoffset * info->fix.line_length
* (info->var.bits_per_pixel - 8) / 8;
@@ -900,7 +960,7 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
+ info->var.xres * info->var.yres_virtual
+ c_offset;
/* Set x offset */
- if (info->var.bits_per_pixel == 24)
+ if (sh_mobile_format_fourcc(&info->var) == V4L2_PIX_FMT_NV24)
base_addr_c += 2 * var->xoffset;
else
base_addr_c += var->xoffset;
@@ -924,7 +984,7 @@ static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var,
ch->base_addr_c = base_addr_c;
lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
- if (info->var.nonstd)
+ if (yuv)
lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
if (lcdc_chan_is_sublcd(ch))
@@ -1100,51 +1160,84 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
- if (var->bits_per_pixel <= 16) { /* RGB 565 */
- var->bits_per_pixel = 16;
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
- var->bits_per_pixel = 24;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
- var->bits_per_pixel = 32;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- } else
- return -EINVAL;
+ if (sh_mobile_format_is_fourcc(var)) {
+ switch (var->grayscale) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ var->bits_per_pixel = 12;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ var->bits_per_pixel = 16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ var->bits_per_pixel = 24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
+ /* Default to RGB and JPEG color-spaces for RGB and YUV formats
+ * respectively.
+ */
+ if (!sh_mobile_format_is_yuv(var))
+ var->colorspace = V4L2_COLORSPACE_SRGB;
+ else if (var->colorspace != V4L2_COLORSPACE_REC709)
+ var->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ if (var->bits_per_pixel <= 16) { /* RGB 565 */
+ var->bits_per_pixel = 16;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
+ var->bits_per_pixel = 24;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
+ var->bits_per_pixel = 32;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ } else
+ return -EINVAL;
+
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+ }
/* Make sure we don't exceed our allocated memory. */
if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 >
info->fix.smem_len)
return -EINVAL;
- /* only accept the forced_bpp for dual channel configurations */
- if (p->forced_bpp && p->forced_bpp != var->bits_per_pixel)
+ /* only accept the forced_fourcc for dual channel configurations */
+ if (p->forced_fourcc &&
+ p->forced_fourcc != sh_mobile_format_fourcc(var))
return -EINVAL;
return 0;
@@ -1158,7 +1251,7 @@ static int sh_mobile_set_par(struct fb_info *info)
sh_mobile_lcdc_stop(ch->lcdc);
- if (info->var.nonstd)
+ if (sh_mobile_format_is_yuv(&info->var))
info->fix.line_length = info->var.xres;
else
info->fix.line_length = info->var.xres
@@ -1170,6 +1263,14 @@ static int sh_mobile_set_par(struct fb_info *info)
info->fix.line_length = line_length;
}
+ if (sh_mobile_format_is_fourcc(&info->var)) {
+ info->fix.type = FB_TYPE_FOURCC;
+ info->fix.visual = FB_VISUAL_FOURCC;
+ } else {
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ }
+
return ret;
}
@@ -1464,9 +1565,9 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
for (i = 0, mode = cfg->lcd_cfg; i < cfg->num_cfg; i++, mode++) {
unsigned int size = mode->yres * mode->xres;
- /* NV12 buffers must have even number of lines */
- if ((cfg->nonstd) && cfg->bpp == 12 &&
- (mode->yres & 0x1)) {
+ /* NV12/NV21 buffers must have even number of lines */
+ if ((cfg->fourcc == V4L2_PIX_FMT_NV12 ||
+ cfg->fourcc == V4L2_PIX_FMT_NV21) && (mode->yres & 0x1)) {
dev_err(dev, "yres must be multiple of 2 for YCbCr420 "
"mode.\n");
return -EINVAL;
@@ -1484,14 +1585,6 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
dev_dbg(dev, "Found largest videomode %ux%u\n",
max_mode->xres, max_mode->yres);
- /* Initialize fixed screen information. Restrict pan to 2 lines steps
- * for NV12.
- */
- info->fix = sh_mobile_lcdc_fix;
- info->fix.smem_len = max_size * 2 * cfg->bpp / 8;
- if (cfg->nonstd && cfg->bpp == 12)
- info->fix.ypanstep = 2;
-
/* Create the mode list. */
if (cfg->lcd_cfg == NULL) {
mode = &default_720p;
@@ -1509,19 +1602,38 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
*/
var = &info->var;
fb_videomode_to_var(var, mode);
- var->bits_per_pixel = cfg->bpp;
var->width = cfg->lcd_size_cfg.width;
var->height = cfg->lcd_size_cfg.height;
var->yres_virtual = var->yres * 2;
var->activate = FB_ACTIVATE_NOW;
+ switch (cfg->fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ var->bits_per_pixel = 16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ var->bits_per_pixel = 24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ var->grayscale = cfg->fourcc;
+ break;
+ }
+
+ /* Make sure the memory size check won't fail. smem_len is initialized
+ * later based on var.
+ */
+ info->fix.smem_len = UINT_MAX;
ret = sh_mobile_check_var(var, info);
if (ret)
return ret;
+ max_size = max_size * var->bits_per_pixel / 8 * 2;
+
/* Allocate frame buffer memory and color map. */
- buf = dma_alloc_coherent(dev, info->fix.smem_len, &ch->dma_handle,
- GFP_KERNEL);
+ buf = dma_alloc_coherent(dev, max_size, &ch->dma_handle, GFP_KERNEL);
if (!buf) {
dev_err(dev, "unable to allocate buffer\n");
return -ENOMEM;
@@ -1530,16 +1642,27 @@ static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_chan *ch,
ret = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
if (ret < 0) {
dev_err(dev, "unable to allocate cmap\n");
- dma_free_coherent(dev, info->fix.smem_len,
- buf, ch->dma_handle);
+ dma_free_coherent(dev, max_size, buf, ch->dma_handle);
return ret;
}
+ /* Initialize fixed screen information. Restrict pan to 2 lines steps
+ * for NV12 and NV21.
+ */
+ info->fix = sh_mobile_lcdc_fix;
info->fix.smem_start = ch->dma_handle;
- if (var->nonstd)
+ info->fix.smem_len = max_size;
+ if (cfg->fourcc == V4L2_PIX_FMT_NV12 ||
+ cfg->fourcc == V4L2_PIX_FMT_NV21)
+ info->fix.ypanstep = 2;
+
+ if (sh_mobile_format_is_yuv(var)) {
info->fix.line_length = var->xres;
- else
- info->fix.line_length = var->xres * (cfg->bpp / 8);
+ info->fix.visual = FB_VISUAL_FOURCC;
+ } else {
+ info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ }
info->screen_base = buf;
info->device = dev;
@@ -1626,9 +1749,9 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
goto err1;
}
- /* for dual channel LCDC (MAIN + SUB) force shared bpp setting */
+ /* for dual channel LCDC (MAIN + SUB) force shared format setting */
if (num_channels == 2)
- priv->forced_bpp = pdata->ch[0].bpp;
+ priv->forced_fourcc = pdata->ch[0].fourcc;
priv->base = ioremap_nocache(res->start, resource_size(res));
if (!priv->base)
@@ -1675,13 +1798,10 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev)
if (error < 0)
goto err1;
- dev_info(info->dev,
- "registered %s/%s as %dx%d %dbpp.\n",
- pdev->name,
- (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
- "mainlcd" : "sublcd",
- info->var.xres, info->var.yres,
- ch->cfg.bpp);
+ dev_info(info->dev, "registered %s/%s as %dx%d %dbpp.\n",
+ pdev->name, (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
+ "mainlcd" : "sublcd", info->var.xres, info->var.yres,
+ info->var.bits_per_pixel);
/* deferred io mode: disable clock to save power */
if (info->fbdefio || info->state == FBINFO_STATE_SUSPENDED)
@@ -1709,18 +1829,7 @@ static struct platform_driver sh_mobile_lcdc_driver = {
.remove = sh_mobile_lcdc_remove,
};
-static int __init sh_mobile_lcdc_init(void)
-{
- return platform_driver_register(&sh_mobile_lcdc_driver);
-}
-
-static void __exit sh_mobile_lcdc_exit(void)
-{
- platform_driver_unregister(&sh_mobile_lcdc_driver);
-}
-
-module_init(sh_mobile_lcdc_init);
-module_exit(sh_mobile_lcdc_exit);
+module_platform_driver(sh_mobile_lcdc_driver);
MODULE_DESCRIPTION("SuperH Mobile LCDC Framebuffer driver");
MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c
index 4d63490..f45d83e 100644
--- a/drivers/video/sh_mobile_meram.c
+++ b/drivers/video/sh_mobile_meram.c
@@ -679,18 +679,7 @@ static struct platform_driver sh_mobile_meram_driver = {
.remove = sh_mobile_meram_remove,
};
-static int __init sh_mobile_meram_init(void)
-{
- return platform_driver_register(&sh_mobile_meram_driver);
-}
-
-static void __exit sh_mobile_meram_exit(void)
-{
- platform_driver_unregister(&sh_mobile_meram_driver);
-}
-
-module_init(sh_mobile_meram_init);
-module_exit(sh_mobile_meram_exit);
+module_platform_driver(sh_mobile_meram_driver);
MODULE_DESCRIPTION("SuperH Mobile MERAM driver");
MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama");
diff --git a/drivers/video/sm501fb.c b/drivers/video/sm501fb.c
index a78254c..3690eff 100644
--- a/drivers/video/sm501fb.c
+++ b/drivers/video/sm501fb.c
@@ -2230,18 +2230,7 @@ static struct platform_driver sm501fb_driver = {
},
};
-static int __devinit sm501fb_init(void)
-{
- return platform_driver_register(&sm501fb_driver);
-}
-
-static void __exit sm501fb_cleanup(void)
-{
- platform_driver_unregister(&sm501fb_driver);
-}
-
-module_init(sm501fb_init);
-module_exit(sm501fb_cleanup);
+module_platform_driver(sm501fb_driver);
module_param_named(mode, fb_mode, charp, 0);
MODULE_PARM_DESC(mode,
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index aaccffa..ccbfef5 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -130,8 +130,8 @@ static struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
/* module options */
-static int console; /* Optionally allow fbcon to consume first framebuffer */
-static int fb_defio = true; /* Optionally enable fb_defio mmap support */
+static bool console; /* Optionally allow fbcon to consume first framebuffer */
+static bool fb_defio = true; /* Optionally enable fb_defio mmap support */
/* ufx keeps a list of urbs for efficient bulk transfers */
static void ufx_urb_completion(struct urb *urb);
@@ -1792,24 +1792,7 @@ static struct usb_driver ufx_driver = {
.id_table = id_table,
};
-static int __init ufx_module_init(void)
-{
- int res;
-
- res = usb_register(&ufx_driver);
- if (res)
- err("usb_register failed. Error number %d", res);
-
- return res;
-}
-
-static void __exit ufx_module_exit(void)
-{
- usb_deregister(&ufx_driver);
-}
-
-module_init(ufx_module_init);
-module_exit(ufx_module_exit);
+module_usb_driver(ufx_driver);
static void ufx_urb_completion(struct urb *urb)
{
diff --git a/drivers/video/sstfb.c b/drivers/video/sstfb.c
index 2301c27..111fb32 100644
--- a/drivers/video/sstfb.c
+++ b/drivers/video/sstfb.c
@@ -93,11 +93,11 @@
/* initialized by setup */
-static int vgapass; /* enable VGA passthrough cable */
+static bool vgapass; /* enable VGA passthrough cable */
static int mem; /* mem size in MB, 0 = autodetect */
-static int clipping = 1; /* use clipping (slower, safer) */
+static bool clipping = 1; /* use clipping (slower, safer) */
static int gfxclk; /* force FBI freq in Mhz . Dangerous */
-static int slowpci; /* slow PCI settings */
+static bool slowpci; /* slow PCI settings */
/*
Possible default video modes: 800x600@60, 640x480@75, 1024x768@76, 640x480@60
diff --git a/drivers/video/tdfxfb.c b/drivers/video/tdfxfb.c
index a99b994..e026724 100644
--- a/drivers/video/tdfxfb.c
+++ b/drivers/video/tdfxfb.c
@@ -169,7 +169,7 @@ static int nowrap = 1; /* not implemented (yet) */
static int hwcursor = 1;
static char *mode_option __devinitdata;
/* mtrr option */
-static int nomtrr __devinitdata;
+static bool nomtrr __devinitdata;
/* -------------------------------------------------------------------------
* Hardware-specific funcions
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 3473e75..a197731 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -69,9 +69,9 @@ static struct usb_device_id id_table[] = {
MODULE_DEVICE_TABLE(usb, id_table);
/* module options */
-static int console = 1; /* Allow fbcon to open framebuffer */
-static int fb_defio = 1; /* Detect mmap writes using page faults */
-static int shadow = 1; /* Optionally disable shadow framebuffer */
+static bool console = 1; /* Allow fbcon to open framebuffer */
+static bool fb_defio = 1; /* Detect mmap writes using page faults */
+static bool shadow = 1; /* Optionally disable shadow framebuffer */
/* dlfb keeps a list of urbs for efficient bulk transfers */
static void dlfb_urb_completion(struct urb *urb);
@@ -1761,24 +1761,7 @@ static struct usb_driver dlfb_driver = {
.id_table = id_table,
};
-static int __init dlfb_module_init(void)
-{
- int res;
-
- res = usb_register(&dlfb_driver);
- if (res)
- err("usb_register failed. Error number %d", res);
-
- return res;
-}
-
-static void __exit dlfb_module_exit(void)
-{
- usb_deregister(&dlfb_driver);
-}
-
-module_init(dlfb_module_init);
-module_exit(dlfb_module_exit);
+module_usb_driver(dlfb_driver);
static void dlfb_urb_completion(struct urb *urb)
{
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 7f8472c..e7f69ef 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -44,11 +44,11 @@ static struct fb_fix_screeninfo uvesafb_fix __devinitdata = {
};
static int mtrr __devinitdata = 3; /* enable mtrr by default */
-static int blank = 1; /* enable blanking by default */
+static bool blank = 1; /* enable blanking by default */
static int ypan = 1; /* 0: scroll, 1: ypan, 2: ywrap */
static bool pmi_setpal __devinitdata = true; /* use PMI for palette changes */
-static int nocrtc __devinitdata; /* ignore CRTC settings */
-static int noedid __devinitdata; /* don't try DDC transfers */
+static bool nocrtc __devinitdata; /* ignore CRTC settings */
+static bool noedid __devinitdata; /* don't try DDC transfers */
static int vram_remap __devinitdata; /* set amt. of memory to be used */
static int vram_total __devinitdata; /* set total amount of memory */
static u16 maxclk __devinitdata; /* maximum pixel clock */
diff --git a/drivers/video/vfb.c b/drivers/video/vfb.c
index bf2f780..501a922 100644
--- a/drivers/video/vfb.c
+++ b/drivers/video/vfb.c
@@ -110,7 +110,7 @@ static struct fb_fix_screeninfo vfb_fix __devinitdata = {
.accel = FB_ACCEL_NONE,
};
-static int vfb_enable __initdata = 0; /* disabled by default */
+static bool vfb_enable __initdata = 0; /* disabled by default */
module_param(vfb_enable, bool, 0);
static int vfb_check_var(struct fb_var_screeninfo *var,
diff --git a/drivers/video/via/hw.c b/drivers/video/via/hw.c
index d5aaca9..8497727 100644
--- a/drivers/video/via/hw.c
+++ b/drivers/video/via/hw.c
@@ -1810,7 +1810,11 @@ static void hw_init(void)
break;
}
+ /* magic required on VX900 for correct modesetting on IGA1 */
+ via_write_reg_mask(VIACR, 0x45, 0x00, 0x01);
+
/* probably this should go to the scaling code one day */
+ via_write_reg_mask(VIACR, 0xFD, 0, 0x80); /* VX900 hw scale on IGA2 */
viafb_write_regx(scaling_parameters, ARRAY_SIZE(scaling_parameters));
/* Fill VPIT Parameters */
diff --git a/drivers/video/vt8500lcdfb.c b/drivers/video/vt8500lcdfb.c
index 777c21d..2a5fe6e 100644
--- a/drivers/video/vt8500lcdfb.c
+++ b/drivers/video/vt8500lcdfb.c
@@ -457,18 +457,7 @@ static struct platform_driver vt8500lcd_driver = {
},
};
-static int __init vt8500lcd_init(void)
-{
- return platform_driver_register(&vt8500lcd_driver);
-}
-
-static void __exit vt8500lcd_exit(void)
-{
- platform_driver_unregister(&vt8500lcd_driver);
-}
-
-module_init(vt8500lcd_init);
-module_exit(vt8500lcd_exit);
+module_platform_driver(vt8500lcd_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("LCD controller driver for VIA VT8500");
diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c
index 2375e5b..90a2e30 100644
--- a/drivers/video/w100fb.c
+++ b/drivers/video/w100fb.c
@@ -1620,18 +1620,7 @@ static struct platform_driver w100fb_driver = {
},
};
-int __init w100fb_init(void)
-{
- return platform_driver_register(&w100fb_driver);
-}
-
-void __exit w100fb_cleanup(void)
-{
- platform_driver_unregister(&w100fb_driver);
-}
-
-module_init(w100fb_init);
-module_exit(w100fb_cleanup);
+module_platform_driver(w100fb_driver);
MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/video/wm8505fb.c b/drivers/video/wm8505fb.c
index 96e34a5..c8703bd 100644
--- a/drivers/video/wm8505fb.c
+++ b/drivers/video/wm8505fb.c
@@ -404,18 +404,7 @@ static struct platform_driver wm8505fb_driver = {
},
};
-static int __init wm8505fb_init(void)
-{
- return platform_driver_register(&wm8505fb_driver);
-}
-
-static void __exit wm8505fb_exit(void)
-{
- platform_driver_unregister(&wm8505fb_driver);
-}
-
-module_init(wm8505fb_init);
-module_exit(wm8505fb_exit);
+module_platform_driver(wm8505fb_driver);
MODULE_AUTHOR("Ed Spiridonov <edo.rus@gmail.com>");
MODULE_DESCRIPTION("Framebuffer driver for WMT WM8505");
diff --git a/drivers/video/wmt_ge_rops.c b/drivers/video/wmt_ge_rops.c
index 45832b7..55be386 100644
--- a/drivers/video/wmt_ge_rops.c
+++ b/drivers/video/wmt_ge_rops.c
@@ -167,18 +167,7 @@ static struct platform_driver wmt_ge_rops_driver = {
},
};
-static int __init wmt_ge_rops_init(void)
-{
- return platform_driver_register(&wmt_ge_rops_driver);
-}
-
-static void __exit wmt_ge_rops_exit(void)
-{
- platform_driver_unregister(&wmt_ge_rops_driver);
-}
-
-module_init(wmt_ge_rops_init);
-module_exit(wmt_ge_rops_exit);
+module_platform_driver(wmt_ge_rops_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com");
MODULE_DESCRIPTION("Accelerators for raster operations using "
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index beac52fc..cb4529c 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -671,20 +671,17 @@ InitWait:
}
}
-static struct xenbus_device_id xenfb_ids[] = {
+static const struct xenbus_device_id xenfb_ids[] = {
{ "vfb" },
{ "" }
};
-static struct xenbus_driver xenfb_driver = {
- .name = "vfb",
- .owner = THIS_MODULE,
- .ids = xenfb_ids,
+static DEFINE_XENBUS_DRIVER(xenfb, ,
.probe = xenfb_probe,
.remove = xenfb_remove,
.resume = xenfb_resume,
.otherend_changed = xenfb_backend_changed,
-};
+);
static int __init xenfb_init(void)
{
diff --git a/drivers/video/xilinxfb.c b/drivers/video/xilinxfb.c
index fcb6cd9..1808452 100644
--- a/drivers/video/xilinxfb.c
+++ b/drivers/video/xilinxfb.c
@@ -511,25 +511,7 @@ static struct platform_driver xilinxfb_of_driver = {
},
};
-
-/* ---------------------------------------------------------------------
- * Module setup and teardown
- */
-
-static int __init
-xilinxfb_init(void)
-{
- return platform_driver_register(&xilinxfb_of_driver);
-}
-
-static void __exit
-xilinxfb_cleanup(void)
-{
- platform_driver_unregister(&xilinxfb_of_driver);
-}
-
-module_init(xilinxfb_init);
-module_exit(xilinxfb_cleanup);
+module_platform_driver(xilinxfb_of_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx TFT frame buffer driver");
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 94fd738..958e512 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -1,4 +1,5 @@
-/* Virtio balloon implementation, inspired by Dor Loar and Marcelo
+/*
+ * Virtio balloon implementation, inspired by Dor Laor and Marcelo
* Tosatti's implementations.
*
* Copyright 2008 Rusty Russell IBM Corporation
@@ -17,7 +18,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
-//#define DEBUG
+
#include <linux/virtio.h>
#include <linux/virtio_balloon.h>
#include <linux/swap.h>
@@ -87,7 +88,7 @@ static void tell_host(struct virtio_balloon *vb, struct virtqueue *vq)
init_completion(&vb->acked);
/* We should always be able to add one buffer to an empty queue. */
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
@@ -149,7 +150,6 @@ static void leak_balloon(struct virtio_balloon *vb, size_t num)
vb->num_pages--;
}
-
/*
* Note that if
* virtio_has_feature(vdev, VIRTIO_BALLOON_F_MUST_TELL_HOST);
@@ -220,7 +220,7 @@ static void stats_handle_request(struct virtio_balloon *vb)
vq = vb->stats_vq;
sg_init_one(&sg, vb->stats, sizeof(vb->stats));
- if (virtqueue_add_buf(vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vq, &sg, 1, 0, vb, GFP_KERNEL) < 0)
BUG();
virtqueue_kick(vq);
}
@@ -275,32 +275,21 @@ static int balloon(void *_vballoon)
return 0;
}
-static int virtballoon_probe(struct virtio_device *vdev)
+static int init_vqs(struct virtio_balloon *vb)
{
- struct virtio_balloon *vb;
struct virtqueue *vqs[3];
vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
const char *names[] = { "inflate", "deflate", "stats" };
int err, nvqs;
- vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
- if (!vb) {
- err = -ENOMEM;
- goto out;
- }
-
- INIT_LIST_HEAD(&vb->pages);
- vb->num_pages = 0;
- init_waitqueue_head(&vb->config_change);
- vb->vdev = vdev;
- vb->need_stats_update = 0;
-
- /* We expect two virtqueues: inflate and deflate,
- * and optionally stat. */
+ /*
+ * We expect two virtqueues: inflate and deflate, and
+ * optionally stat.
+ */
nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
- err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
+ err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names);
if (err)
- goto out_free_vb;
+ return err;
vb->inflate_vq = vqs[0];
vb->deflate_vq = vqs[1];
@@ -313,10 +302,34 @@ static int virtballoon_probe(struct virtio_device *vdev)
* use it to signal us later.
*/
sg_init_one(&sg, vb->stats, sizeof vb->stats);
- if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb) < 0)
+ if (virtqueue_add_buf(vb->stats_vq, &sg, 1, 0, vb, GFP_KERNEL)
+ < 0)
BUG();
virtqueue_kick(vb->stats_vq);
}
+ return 0;
+}
+
+static int virtballoon_probe(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb;
+ int err;
+
+ vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL);
+ if (!vb) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&vb->pages);
+ vb->num_pages = 0;
+ init_waitqueue_head(&vb->config_change);
+ vb->vdev = vdev;
+ vb->need_stats_update = 0;
+
+ err = init_vqs(vb);
+ if (err)
+ goto out_free_vb;
vb->thread = kthread_run(balloon, vb, "vballoon");
if (IS_ERR(vb->thread)) {
@@ -351,6 +364,59 @@ static void __devexit virtballoon_remove(struct virtio_device *vdev)
kfree(vb);
}
+#ifdef CONFIG_PM
+static int virtballoon_freeze(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb = vdev->priv;
+
+ /*
+ * The kthread is already frozen by the PM core before this
+ * function is called.
+ */
+
+ while (vb->num_pages)
+ leak_balloon(vb, vb->num_pages);
+ update_balloon_size(vb);
+
+ /* Ensure we don't get any more requests from the host */
+ vdev->config->reset(vdev);
+ vdev->config->del_vqs(vdev);
+ return 0;
+}
+
+static int restore_common(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb = vdev->priv;
+ int ret;
+
+ ret = init_vqs(vdev->priv);
+ if (ret)
+ return ret;
+
+ fill_balloon(vb, towards_target(vb));
+ update_balloon_size(vb);
+ return 0;
+}
+
+static int virtballoon_thaw(struct virtio_device *vdev)
+{
+ return restore_common(vdev);
+}
+
+static int virtballoon_restore(struct virtio_device *vdev)
+{
+ struct virtio_balloon *vb = vdev->priv;
+
+ /*
+ * If a request wasn't complete at the time of freezing, this
+ * could have been set.
+ */
+ vb->need_stats_update = 0;
+
+ return restore_common(vdev);
+}
+#endif
+
static unsigned int features[] = {
VIRTIO_BALLOON_F_MUST_TELL_HOST,
VIRTIO_BALLOON_F_STATS_VQ,
@@ -365,6 +431,11 @@ static struct virtio_driver virtio_balloon_driver = {
.probe = virtballoon_probe,
.remove = __devexit_p(virtballoon_remove),
.config_changed = virtballoon_changed,
+#ifdef CONFIG_PM
+ .freeze = virtballoon_freeze,
+ .restore = virtballoon_restore,
+ .thaw = virtballoon_thaw,
+#endif
};
static int __init init(void)
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index 7317dc2..01d6dc2 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -310,8 +310,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
/* Create the vring */
- vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN,
- vdev, info->queue, vm_notify, callback, name);
+ vq = vring_new_virtqueue(info->num, VIRTIO_MMIO_VRING_ALIGN, vdev,
+ true, info->queue, vm_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto error_new_virtqueue;
@@ -361,7 +361,12 @@ static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
return 0;
}
+static const char *vm_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
+ return vm_dev->pdev->name;
+}
static struct virtio_config_ops virtio_mmio_config_ops = {
.get = vm_get,
@@ -373,6 +378,7 @@ static struct virtio_config_ops virtio_mmio_config_ops = {
.del_vqs = vm_del_vqs,
.get_features = vm_get_features,
.finalize_features = vm_finalize_features,
+ .bus_name = vm_bus_name,
};
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 03d1984..635e1ef 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -55,6 +55,10 @@ struct virtio_pci_device
unsigned msix_vectors;
/* Vectors allocated, excluding per-vq vectors if any */
unsigned msix_used_vectors;
+
+ /* Status saved during hibernate/restore */
+ u8 saved_status;
+
/* Whether we have vector per vq */
bool per_vq_vectors;
};
@@ -414,8 +418,8 @@ static struct virtqueue *setup_vq(struct virtio_device *vdev, unsigned index,
vp_dev->ioaddr + VIRTIO_PCI_QUEUE_PFN);
/* create the vring */
- vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN,
- vdev, info->queue, vp_notify, callback, name);
+ vq = vring_new_virtqueue(info->num, VIRTIO_PCI_VRING_ALIGN, vdev,
+ true, info->queue, vp_notify, callback, name);
if (!vq) {
err = -ENOMEM;
goto out_activate_queue;
@@ -598,6 +602,13 @@ static int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
false, false);
}
+static const char *vp_bus_name(struct virtio_device *vdev)
+{
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ return pci_name(vp_dev->pci_dev);
+}
+
static struct virtio_config_ops virtio_pci_config_ops = {
.get = vp_get,
.set = vp_set,
@@ -608,6 +619,7 @@ static struct virtio_config_ops virtio_pci_config_ops = {
.del_vqs = vp_del_vqs,
.get_features = vp_get_features,
.finalize_features = vp_finalize_features,
+ .bus_name = vp_bus_name,
};
static void virtio_pci_release_dev(struct device *_d)
@@ -708,19 +720,114 @@ static void __devexit virtio_pci_remove(struct pci_dev *pci_dev)
}
#ifdef CONFIG_PM
-static int virtio_pci_suspend(struct pci_dev *pci_dev, pm_message_t state)
+static int virtio_pci_suspend(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
pci_save_state(pci_dev);
pci_set_power_state(pci_dev, PCI_D3hot);
return 0;
}
-static int virtio_pci_resume(struct pci_dev *pci_dev)
+static int virtio_pci_resume(struct device *dev)
{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+
pci_restore_state(pci_dev);
pci_set_power_state(pci_dev, PCI_D0);
return 0;
}
+
+static int virtio_pci_freeze(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
+ int ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ ret = 0;
+ vp_dev->saved_status = vp_get_status(&vp_dev->vdev);
+ if (drv && drv->freeze)
+ ret = drv->freeze(&vp_dev->vdev);
+
+ if (!ret)
+ pci_disable_device(pci_dev);
+ return ret;
+}
+
+static int restore_common(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ int ret;
+
+ ret = pci_enable_device(pci_dev);
+ if (ret)
+ return ret;
+ pci_set_master(pci_dev);
+ vp_finalize_features(&vp_dev->vdev);
+
+ return ret;
+}
+
+static int virtio_pci_thaw(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
+ int ret;
+
+ ret = restore_common(dev);
+ if (ret)
+ return ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ if (drv && drv->thaw)
+ ret = drv->thaw(&vp_dev->vdev);
+ else if (drv && drv->restore)
+ ret = drv->restore(&vp_dev->vdev);
+
+ /* Finally, tell the device we're all set */
+ if (!ret)
+ vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
+
+ return ret;
+}
+
+static int virtio_pci_restore(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
+ struct virtio_driver *drv;
+ int ret;
+
+ drv = container_of(vp_dev->vdev.dev.driver,
+ struct virtio_driver, driver);
+
+ ret = restore_common(dev);
+ if (!ret && drv && drv->restore)
+ ret = drv->restore(&vp_dev->vdev);
+
+ /* Finally, tell the device we're all set */
+ if (!ret)
+ vp_set_status(&vp_dev->vdev, vp_dev->saved_status);
+
+ return ret;
+}
+
+static const struct dev_pm_ops virtio_pci_pm_ops = {
+ .suspend = virtio_pci_suspend,
+ .resume = virtio_pci_resume,
+ .freeze = virtio_pci_freeze,
+ .thaw = virtio_pci_thaw,
+ .restore = virtio_pci_restore,
+ .poweroff = virtio_pci_suspend,
+};
#endif
static struct pci_driver virtio_pci_driver = {
@@ -729,8 +836,7 @@ static struct pci_driver virtio_pci_driver = {
.probe = virtio_pci_probe,
.remove = __devexit_p(virtio_pci_remove),
#ifdef CONFIG_PM
- .suspend = virtio_pci_suspend,
- .resume = virtio_pci_resume,
+ .driver.pm = &virtio_pci_pm_ops,
#endif
};
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index c7a2c20..5aa43c3 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -22,23 +22,27 @@
#include <linux/device.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/hrtimer.h>
/* virtio guest is communicating with a virtual "device" that actually runs on
* a host processor. Memory barriers are used to control SMP effects. */
#ifdef CONFIG_SMP
/* Where possible, use SMP barriers which are more lightweight than mandatory
* barriers, because mandatory barriers control MMIO effects on accesses
- * through relaxed memory I/O windows (which virtio does not use). */
-#define virtio_mb() smp_mb()
-#define virtio_rmb() smp_rmb()
-#define virtio_wmb() smp_wmb()
+ * through relaxed memory I/O windows (which virtio-pci does not use). */
+#define virtio_mb(vq) \
+ do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0)
+#define virtio_rmb(vq) \
+ do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
+#define virtio_wmb(vq) \
+ do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
#else
/* We must force memory ordering even if guest is UP since host could be
* running on another CPU, but SMP barriers are defined to barrier() in that
* configuration. So fall back to mandatory barriers instead. */
-#define virtio_mb() mb()
-#define virtio_rmb() rmb()
-#define virtio_wmb() wmb()
+#define virtio_mb(vq) mb()
+#define virtio_rmb(vq) rmb()
+#define virtio_wmb(vq) wmb()
#endif
#ifdef DEBUG
@@ -77,6 +81,9 @@ struct vring_virtqueue
/* Actual memory layout for this queue */
struct vring vring;
+ /* Can we use weak barriers? */
+ bool weak_barriers;
+
/* Other side has made a mess, don't try any more. */
bool broken;
@@ -102,6 +109,10 @@ struct vring_virtqueue
#ifdef DEBUG
/* They're supposed to lock for us. */
unsigned int in_use;
+
+ /* Figure out if their kicks are too delayed. */
+ bool last_add_time_valid;
+ ktime_t last_add_time;
#endif
/* Tokens for callbacks. */
@@ -160,12 +171,29 @@ static int vring_add_indirect(struct vring_virtqueue *vq,
return head;
}
-int virtqueue_add_buf_gfp(struct virtqueue *_vq,
- struct scatterlist sg[],
- unsigned int out,
- unsigned int in,
- void *data,
- gfp_t gfp)
+/**
+ * virtqueue_add_buf - expose buffer to other end
+ * @vq: the struct virtqueue we're talking about.
+ * @sg: the description of the buffer(s).
+ * @out_num: the number of sg readable by other side
+ * @in_num: the number of sg which are writable (after readable ones)
+ * @data: the token identifying the buffer.
+ * @gfp: how to do memory allocations (if necessary).
+ *
+ * Caller must ensure we don't call this with other virtqueue operations
+ * at the same time (except where noted).
+ *
+ * Returns remaining capacity of queue or a negative error
+ * (ie. ENOSPC). Note that it only really makes sense to treat all
+ * positive return values as "available": indirect buffers mean that
+ * we can put an entire sg[] array inside a single queue entry.
+ */
+int virtqueue_add_buf(struct virtqueue *_vq,
+ struct scatterlist sg[],
+ unsigned int out,
+ unsigned int in,
+ void *data,
+ gfp_t gfp)
{
struct vring_virtqueue *vq = to_vvq(_vq);
unsigned int i, avail, uninitialized_var(prev);
@@ -175,6 +203,19 @@ int virtqueue_add_buf_gfp(struct virtqueue *_vq,
BUG_ON(data == NULL);
+#ifdef DEBUG
+ {
+ ktime_t now = ktime_get();
+
+ /* No kick or get, with .1 second between? Warn. */
+ if (vq->last_add_time_valid)
+ WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
+ > 100);
+ vq->last_add_time = now;
+ vq->last_add_time_valid = true;
+ }
+#endif
+
/* If the host supports indirect descriptor tables, and we have multiple
* buffers, then go indirect. FIXME: tune this threshold */
if (vq->indirect && (out + in) > 1 && vq->num_free) {
@@ -227,40 +268,102 @@ add_head:
vq->data[head] = data;
/* Put entry in available array (but don't update avail->idx until they
- * do sync). FIXME: avoid modulus here? */
- avail = (vq->vring.avail->idx + vq->num_added++) % vq->vring.num;
+ * do sync). */
+ avail = (vq->vring.avail->idx & (vq->vring.num-1));
vq->vring.avail->ring[avail] = head;
+ /* Descriptors and available array need to be set before we expose the
+ * new available array entries. */
+ virtio_wmb(vq);
+ vq->vring.avail->idx++;
+ vq->num_added++;
+
+ /* This is very unlikely, but theoretically possible. Kick
+ * just in case. */
+ if (unlikely(vq->num_added == (1 << 16) - 1))
+ virtqueue_kick(_vq);
+
pr_debug("Added buffer head %i to %p\n", head, vq);
END_USE(vq);
return vq->num_free;
}
-EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
+EXPORT_SYMBOL_GPL(virtqueue_add_buf);
-void virtqueue_kick(struct virtqueue *_vq)
+/**
+ * virtqueue_kick_prepare - first half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * Instead of virtqueue_kick(), you can do:
+ * if (virtqueue_kick_prepare(vq))
+ * virtqueue_notify(vq);
+ *
+ * This is sometimes useful because the virtqueue_kick_prepare() needs
+ * to be serialized, but the actual virtqueue_notify() call does not.
+ */
+bool virtqueue_kick_prepare(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
u16 new, old;
+ bool needs_kick;
+
START_USE(vq);
- /* Descriptors and available array need to be set before we expose the
- * new available array entries. */
- virtio_wmb();
+ /* We need to expose available array entries before checking avail
+ * event. */
+ virtio_mb(vq);
- old = vq->vring.avail->idx;
- new = vq->vring.avail->idx = old + vq->num_added;
+ old = vq->vring.avail->idx - vq->num_added;
+ new = vq->vring.avail->idx;
vq->num_added = 0;
- /* Need to update avail index before checking if we should notify */
- virtio_mb();
-
- if (vq->event ?
- vring_need_event(vring_avail_event(&vq->vring), new, old) :
- !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY))
- /* Prod other side to tell it about changes. */
- vq->notify(&vq->vq);
+#ifdef DEBUG
+ if (vq->last_add_time_valid) {
+ WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
+ vq->last_add_time)) > 100);
+ }
+ vq->last_add_time_valid = false;
+#endif
+ if (vq->event) {
+ needs_kick = vring_need_event(vring_avail_event(&vq->vring),
+ new, old);
+ } else {
+ needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY);
+ }
END_USE(vq);
+ return needs_kick;
+}
+EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
+
+/**
+ * virtqueue_notify - second half of split virtqueue_kick call.
+ * @vq: the struct virtqueue
+ *
+ * This does not need to be serialized.
+ */
+void virtqueue_notify(struct virtqueue *_vq)
+{
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
+ /* Prod other side to tell it about changes. */
+ vq->notify(_vq);
+}
+EXPORT_SYMBOL_GPL(virtqueue_notify);
+
+/**
+ * virtqueue_kick - update after add_buf
+ * @vq: the struct virtqueue
+ *
+ * After one or more virtqueue_add_buf calls, invoke this to kick
+ * the other side.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
+void virtqueue_kick(struct virtqueue *vq)
+{
+ if (virtqueue_kick_prepare(vq))
+ virtqueue_notify(vq);
}
EXPORT_SYMBOL_GPL(virtqueue_kick);
@@ -294,11 +397,28 @@ static inline bool more_used(const struct vring_virtqueue *vq)
return vq->last_used_idx != vq->vring.used->idx;
}
+/**
+ * virtqueue_get_buf - get the next used buffer
+ * @vq: the struct virtqueue we're talking about.
+ * @len: the length written into the buffer
+ *
+ * If the driver wrote data into the buffer, @len will be set to the
+ * amount written. This means you don't need to clear the buffer
+ * beforehand to ensure there's no data leakage in the case of short
+ * writes.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ *
+ * Returns NULL if there are no used buffers, or the "data" token
+ * handed to virtqueue_add_buf().
+ */
void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
{
struct vring_virtqueue *vq = to_vvq(_vq);
void *ret;
unsigned int i;
+ u16 last_used;
START_USE(vq);
@@ -314,10 +434,11 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
}
/* Only get used array entries after they have been exposed by host. */
- virtio_rmb();
+ virtio_rmb(vq);
- i = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].id;
- *len = vq->vring.used->ring[vq->last_used_idx%vq->vring.num].len;
+ last_used = (vq->last_used_idx & (vq->vring.num - 1));
+ i = vq->vring.used->ring[last_used].id;
+ *len = vq->vring.used->ring[last_used].len;
if (unlikely(i >= vq->vring.num)) {
BAD_RING(vq, "id %u out of range\n", i);
@@ -337,14 +458,27 @@ void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
* the read in the next get_buf call. */
if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) {
vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb();
+ virtio_mb(vq);
}
+#ifdef DEBUG
+ vq->last_add_time_valid = false;
+#endif
+
END_USE(vq);
return ret;
}
EXPORT_SYMBOL_GPL(virtqueue_get_buf);
+/**
+ * virtqueue_disable_cb - disable callbacks
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Note that this is not necessarily synchronous, hence unreliable and only
+ * useful as an optimization.
+ *
+ * Unlike other operations, this need not be serialized.
+ */
void virtqueue_disable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -353,6 +487,17 @@ void virtqueue_disable_cb(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
+/**
+ * virtqueue_enable_cb - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks; it returns "false" if there are pending
+ * buffers in the queue, to detect a possible race between the driver
+ * checking for more work, and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
bool virtqueue_enable_cb(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -366,7 +511,7 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
* entry. Always do both to keep code simple. */
vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
vring_used_event(&vq->vring) = vq->last_used_idx;
- virtio_mb();
+ virtio_mb(vq);
if (unlikely(more_used(vq))) {
END_USE(vq);
return false;
@@ -377,6 +522,19 @@ bool virtqueue_enable_cb(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
+/**
+ * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * This re-enables callbacks but hints to the other side to delay
+ * interrupts until most of the available buffers have been processed;
+ * it returns "false" if there are many pending buffers in the queue,
+ * to detect a possible race between the driver checking for more work,
+ * and enabling callbacks.
+ *
+ * Caller must ensure we don't call this with other virtqueue
+ * operations at the same time (except where noted).
+ */
bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -393,7 +551,7 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
/* TODO: tune this threshold */
bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4;
vring_used_event(&vq->vring) = vq->last_used_idx + bufs;
- virtio_mb();
+ virtio_mb(vq);
if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) {
END_USE(vq);
return false;
@@ -404,6 +562,14 @@ bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
}
EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
+/**
+ * virtqueue_detach_unused_buf - detach first unused buffer
+ * @vq: the struct virtqueue we're talking about.
+ *
+ * Returns NULL or the "data" token handed to virtqueue_add_buf().
+ * This is not valid on an active queue; it is useful only for device
+ * shutdown.
+ */
void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
{
struct vring_virtqueue *vq = to_vvq(_vq);
@@ -453,6 +619,7 @@ EXPORT_SYMBOL_GPL(vring_interrupt);
struct virtqueue *vring_new_virtqueue(unsigned int num,
unsigned int vring_align,
struct virtio_device *vdev,
+ bool weak_barriers,
void *pages,
void (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
@@ -476,12 +643,14 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
vq->vq.vdev = vdev;
vq->vq.name = name;
vq->notify = notify;
+ vq->weak_barriers = weak_barriers;
vq->broken = false;
vq->last_used_idx = 0;
vq->num_added = 0;
list_add_tail(&vq->vq.list, &vdev->vqs);
#ifdef DEBUG
vq->in_use = false;
+ vq->last_add_time_valid = false;
#endif
vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
@@ -530,7 +699,13 @@ void vring_transport_features(struct virtio_device *vdev)
}
EXPORT_SYMBOL_GPL(vring_transport_features);
-/* return the size of the vring within the virtqueue */
+/**
+ * virtqueue_get_vring_size - return the size of the virtqueue's vring
+ * @vq: the struct virtqueue containing the vring of interest.
+ *
+ * Returns the size of the vring. This is mainly used for boasting to
+ * userspace. Unlike other operations, this need not be serialized.
+ */
unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
{
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index b5abaae..4f7e1d7 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -1002,26 +1002,7 @@ static void ds_disconnect(struct usb_interface *intf)
kfree(dev);
}
-static int ds_init(void)
-{
- int err;
-
- err = usb_register(&ds_driver);
- if (err) {
- printk(KERN_INFO "Failed to register DS9490R USB device: err=%d.\n", err);
- return err;
- }
-
- return 0;
-}
-
-static void ds_fini(void)
-{
- usb_deregister(&ds_driver);
-}
-
-module_init(ds_init);
-module_exit(ds_fini);
+module_usb_driver(ds_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <zbr@ioremap.net>");
diff --git a/drivers/w1/slaves/w1_therm.c b/drivers/w1/slaves/w1_therm.c
index a1ef9b5..ff29ae7 100644
--- a/drivers/w1/slaves/w1_therm.c
+++ b/drivers/w1/slaves/w1_therm.c
@@ -175,11 +175,13 @@ static ssize_t w1_therm_read(struct device *device,
{
struct w1_slave *sl = dev_to_w1_slave(device);
struct w1_master *dev = sl->master;
- u8 rom[9], crc, verdict;
+ u8 rom[9], crc, verdict, external_power;
int i, max_trying = 10;
ssize_t c = PAGE_SIZE;
- mutex_lock(&dev->mutex);
+ i = mutex_lock_interruptible(&dev->mutex);
+ if (i != 0)
+ return i;
memset(rom, 0, sizeof(rom));
@@ -190,13 +192,37 @@ static ssize_t w1_therm_read(struct device *device,
if (!w1_reset_select_slave(sl)) {
int count = 0;
unsigned int tm = 750;
+ unsigned long sleep_rem;
+
+ w1_write_8(dev, W1_READ_PSUPPLY);
+ external_power = w1_read_8(dev);
+
+ if (w1_reset_select_slave(sl))
+ continue;
/* 750ms strong pullup (or delay) after the convert */
- if (w1_strong_pullup)
+ if (!external_power && w1_strong_pullup)
w1_next_pullup(dev, tm);
+
w1_write_8(dev, W1_CONVERT_TEMP);
- if (!w1_strong_pullup)
- msleep(tm);
+
+ if (external_power) {
+ mutex_unlock(&dev->mutex);
+
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0)
+ return -EINTR;
+
+ i = mutex_lock_interruptible(&dev->mutex);
+ if (i != 0)
+ return i;
+ } else if (!w1_strong_pullup) {
+ sleep_rem = msleep_interruptible(tm);
+ if (sleep_rem != 0) {
+ mutex_unlock(&dev->mutex);
+ return -EINTR;
+ }
+ }
if (!w1_reset_select_slave(sl)) {
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index c374978..9761950 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -892,6 +892,16 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb
break;
}
+ /* Do fast search on single slave bus */
+ if (dev->max_slave_count == 1) {
+ w1_write_8(dev, W1_READ_ROM);
+
+ if (w1_read_block(dev, (u8 *)&rn, 8) == 8 && rn)
+ cb(dev, rn);
+
+ break;
+ }
+
/* Start the search */
w1_write_8(dev, search_type);
for (i = 0; i < 64; ++i) {
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 79fd606..df9e8f0 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -772,6 +772,19 @@ config SMSC37B787_WDT
Most people will say N.
+config VIA_WDT
+ tristate "VIA Watchdog Timer"
+ depends on X86
+ select WATCHDOG_CORE
+ ---help---
+ This is the driver for the hardware watchdog timer on VIA
+ southbridge chipset CX700, VX800/VX820 or VX855/VX875.
+
+ To compile this driver as a module, choose M here; the module
+ will be called via_wdt.
+
+ Most people will say N.
+
config W83627HF_WDT
tristate "W83627HF/W83627DHG Watchdog Timer"
depends on X86
@@ -1085,7 +1098,7 @@ config BOOKE_WDT_DEFAULT_TIMEOUT
For Freescale Book-E processors, this is a number between 0 and 63.
For other Book-E processors, this is a number between 0 and 3.
- The value can be overidden by the wdt_period command-line parameter.
+ The value can be overridden by the wdt_period command-line parameter.
# PPC64 Architecture
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index fe893e9..e8f479a 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -99,6 +99,7 @@ obj-$(CONFIG_SBC7240_WDT) += sbc7240_wdt.o
obj-$(CONFIG_CPU5_WDT) += cpu5wdt.o
obj-$(CONFIG_SMSC_SCH311X_WDT) += sch311x_wdt.o
obj-$(CONFIG_SMSC37B787_WDT) += smsc37b787_wdt.o
+obj-$(CONFIG_VIA_WDT) += via_wdt.o
obj-$(CONFIG_W83627HF_WDT) += w83627hf_wdt.o
obj-$(CONFIG_W83697HF_WDT) += w83697hf_wdt.o
obj-$(CONFIG_W83697UG_WDT) += w83697ug_wdt.o
diff --git a/drivers/watchdog/ar7_wdt.c b/drivers/watchdog/ar7_wdt.c
index b292217..502773a 100644
--- a/drivers/watchdog/ar7_wdt.c
+++ b/drivers/watchdog/ar7_wdt.c
@@ -70,8 +70,8 @@ struct ar7_wdt {
};
static unsigned long wdt_is_open;
-static spinlock_t wdt_lock;
static unsigned expect_close;
+static DEFINE_SPINLOCK(wdt_lock);
/* XXX currently fixed, allows max margin ~68.72 secs */
#define prescale_value 0xffff
@@ -280,8 +280,6 @@ static int __devinit ar7_wdt_probe(struct platform_device *pdev)
{
int rc;
- spin_lock_init(&wdt_lock);
-
ar7_regs_wdt =
platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!ar7_regs_wdt) {
@@ -355,15 +353,4 @@ static struct platform_driver ar7_wdt_driver = {
},
};
-static int __init ar7_wdt_init(void)
-{
- return platform_driver_register(&ar7_wdt_driver);
-}
-
-static void __exit ar7_wdt_cleanup(void)
-{
- platform_driver_unregister(&ar7_wdt_driver);
-}
-
-module_init(ar7_wdt_init);
-module_exit(ar7_wdt_cleanup);
+module_platform_driver(ar7_wdt_driver);
diff --git a/drivers/watchdog/at91sam9_wdt.c b/drivers/watchdog/at91sam9_wdt.c
index 87445b2..0056256 100644
--- a/drivers/watchdog/at91sam9_wdt.c
+++ b/drivers/watchdog/at91sam9_wdt.c
@@ -35,6 +35,11 @@
#define DRV_NAME "AT91SAM9 Watchdog"
+#define wdt_read(field) \
+ __raw_readl(at91wdt_private.base + field)
+#define wdt_write(field, val) \
+ __raw_writel((val), at91wdt_private.base + field)
+
/* AT91SAM9 watchdog runs a 12bit counter @ 256Hz,
* use this to convert a watchdog
* value from/to milliseconds.
@@ -63,6 +68,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
static void at91_ping(unsigned long data);
static struct {
+ void __iomem *base;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
unsigned long open;
char expect_close;
@@ -77,7 +83,7 @@ static struct {
*/
static inline void at91_wdt_reset(void)
{
- at91_sys_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
+ wdt_write(AT91_WDT_CR, AT91_WDT_KEY | AT91_WDT_WDRSTT);
}
/*
@@ -132,7 +138,7 @@ static int at91_wdt_settimeout(unsigned int timeout)
unsigned int mr;
/* Check if disabled */
- mr = at91_sys_read(AT91_WDT_MR);
+ mr = wdt_read(AT91_WDT_MR);
if (mr & AT91_WDT_WDDIS) {
printk(KERN_ERR DRV_NAME": sorry, watchdog is disabled\n");
return -EIO;
@@ -149,7 +155,7 @@ static int at91_wdt_settimeout(unsigned int timeout)
| AT91_WDT_WDDBGHLT /* disabled in debug mode */
| AT91_WDT_WDD /* restart at any time */
| (timeout & AT91_WDT_WDV); /* timer value */
- at91_sys_write(AT91_WDT_MR, reg);
+ wdt_write(AT91_WDT_MR, reg);
return 0;
}
@@ -248,12 +254,22 @@ static struct miscdevice at91wdt_miscdev = {
static int __init at91wdt_probe(struct platform_device *pdev)
{
+ struct resource *r;
int res;
if (at91wdt_miscdev.parent)
return -EBUSY;
at91wdt_miscdev.parent = &pdev->dev;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r)
+ return -ENODEV;
+ at91wdt_private.base = ioremap(r->start, resource_size(r));
+ if (!at91wdt_private.base) {
+ dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+ return -ENOMEM;
+ }
+
/* Set watchdog */
res = at91_wdt_settimeout(ms_to_ticks(WDT_HW_TIMEOUT * 1000));
if (res)
diff --git a/drivers/watchdog/at91sam9_wdt.h b/drivers/watchdog/at91sam9_wdt.h
index 757f9ca..c6fbb2e6 100644
--- a/drivers/watchdog/at91sam9_wdt.h
+++ b/drivers/watchdog/at91sam9_wdt.h
@@ -16,11 +16,11 @@
#ifndef AT91_WDT_H
#define AT91_WDT_H
-#define AT91_WDT_CR (AT91_WDT + 0x00) /* Watchdog Control Register */
+#define AT91_WDT_CR 0x00 /* Watchdog Control Register */
#define AT91_WDT_WDRSTT (1 << 0) /* Restart */
#define AT91_WDT_KEY (0xa5 << 24) /* KEY Password */
-#define AT91_WDT_MR (AT91_WDT + 0x04) /* Watchdog Mode Register */
+#define AT91_WDT_MR 0x04 /* Watchdog Mode Register */
#define AT91_WDT_WDV (0xfff << 0) /* Counter Value */
#define AT91_WDT_WDFIEN (1 << 12) /* Fault Interrupt Enable */
#define AT91_WDT_WDRSTEN (1 << 13) /* Reset Processor */
@@ -30,7 +30,7 @@
#define AT91_WDT_WDDBGHLT (1 << 28) /* Debug Halt */
#define AT91_WDT_WDIDLEHLT (1 << 29) /* Idle Halt */
-#define AT91_WDT_SR (AT91_WDT + 0x08) /* Watchdog Status Register */
+#define AT91_WDT_SR 0x08 /* Watchdog Status Register */
#define AT91_WDT_WDUNF (1 << 0) /* Watchdog Underflow */
#define AT91_WDT_WDERR (1 << 1) /* Watchdog Error */
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index 725c84b..9db8083 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -68,17 +68,23 @@ static int max_timeout;
static inline void ath79_wdt_keepalive(void)
{
ath79_reset_wr(AR71XX_RESET_REG_WDOG, wdt_freq * timeout);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG);
}
static inline void ath79_wdt_enable(void)
{
ath79_wdt_keepalive();
ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_FCR);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
}
static inline void ath79_wdt_disable(void)
{
ath79_reset_wr(AR71XX_RESET_REG_WDOG_CTRL, WDOG_CTRL_ACTION_NONE);
+ /* flush write */
+ ath79_reset_rr(AR71XX_RESET_REG_WDOG_CTRL);
}
static int ath79_wdt_set_timeout(int val)
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index 5064e83..8dc7de6 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -311,18 +311,7 @@ static struct platform_driver bcm63xx_wdt = {
}
};
-static int __init bcm63xx_wdt_init(void)
-{
- return platform_driver_register(&bcm63xx_wdt);
-}
-
-static void __exit bcm63xx_wdt_exit(void)
-{
- platform_driver_unregister(&bcm63xx_wdt);
-}
-
-module_init(bcm63xx_wdt_init);
-module_exit(bcm63xx_wdt_exit);
+module_platform_driver(bcm63xx_wdt);
MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>");
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 337265b..7c0fdfc 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -198,9 +198,13 @@ static long booke_wdt_ioctl(struct file *file,
booke_wdt_period = tmp;
#endif
booke_wdt_set();
- return 0;
+ /* Fall */
case WDIOC_GETTIMEOUT:
+#ifdef CONFIG_FSL_BOOKE
+ return put_user(period_to_sec(booke_wdt_period), p);
+#else
return put_user(booke_wdt_period, p);
+#endif
default:
return -ENOTTY;
}
diff --git a/drivers/watchdog/cpu5wdt.c b/drivers/watchdog/cpu5wdt.c
index edd3475..251c863 100644
--- a/drivers/watchdog/cpu5wdt.c
+++ b/drivers/watchdog/cpu5wdt.c
@@ -39,7 +39,7 @@
static int verbose;
static int port = 0x91;
static int ticks = 10000;
-static spinlock_t cpu5wdt_lock;
+static DEFINE_SPINLOCK(cpu5wdt_lock);
#define PFX "cpu5wdt: "
@@ -223,7 +223,6 @@ static int __devinit cpu5wdt_init(void)
"port=0x%x, verbose=%i\n", port, verbose);
init_completion(&cpu5wdt_device.stop);
- spin_lock_init(&cpu5wdt_lock);
cpu5wdt_device.queue = 0;
setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index 1e013e8..1b793df 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -687,15 +687,4 @@ static struct platform_driver cpwd_driver = {
.remove = __devexit_p(cpwd_remove),
};
-static int __init cpwd_init(void)
-{
- return platform_driver_register(&cpwd_driver);
-}
-
-static void __exit cpwd_exit(void)
-{
- platform_driver_unregister(&cpwd_driver);
-}
-
-module_init(cpwd_init);
-module_exit(cpwd_exit);
+module_platform_driver(cpwd_driver);
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 51b5551..c8c5c80 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -271,18 +271,7 @@ static struct platform_driver platform_wdt_driver = {
.remove = __devexit_p(davinci_wdt_remove),
};
-static int __init davinci_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit davinci_wdt_exit(void)
-{
- platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(davinci_wdt_init);
-module_exit(davinci_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("Texas Instruments");
MODULE_DESCRIPTION("DaVinci Watchdog Driver");
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index f10f8c0..63d7b58 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -300,11 +300,7 @@ static int __devinit dw_wdt_drv_probe(struct platform_device *pdev)
if (!mem)
return -EINVAL;
- if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
- "dw_wdt"))
- return -ENOMEM;
-
- dw_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+ dw_wdt.regs = devm_request_and_ioremap(&pdev->dev, mem);
if (!dw_wdt.regs)
return -ENOMEM;
@@ -358,17 +354,7 @@ static struct platform_driver dw_wdt_driver = {
},
};
-static int __init dw_wdt_watchdog_init(void)
-{
- return platform_driver_register(&dw_wdt_driver);
-}
-module_init(dw_wdt_watchdog_init);
-
-static void __exit dw_wdt_watchdog_exit(void)
-{
- platform_driver_unregister(&dw_wdt_driver);
-}
-module_exit(dw_wdt_watchdog_exit);
+module_platform_driver(dw_wdt_driver);
MODULE_AUTHOR("Jamie Iles");
MODULE_DESCRIPTION("Synopsys DesignWare Watchdog Driver");
diff --git a/drivers/watchdog/eurotechwdt.c b/drivers/watchdog/eurotechwdt.c
index 41018d4..3946c51 100644
--- a/drivers/watchdog/eurotechwdt.c
+++ b/drivers/watchdog/eurotechwdt.c
@@ -64,7 +64,7 @@
static unsigned long eurwdt_is_open;
static int eurwdt_timeout;
static char eur_expect_close;
-static spinlock_t eurwdt_lock;
+static DEFINE_SPINLOCK(eurwdt_lock);
/*
* You must set these - there is no sane way to probe for this board.
@@ -446,8 +446,6 @@ static int __init eurwdt_init(void)
goto outreg;
}
- spin_lock_init(&eurwdt_lock);
-
ret = misc_register(&eurwdt_miscdev);
if (ret) {
printk(KERN_ERR "eurwdt: can't misc_register on minor=%d\n",
diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
index d4d8d1f..e45ca2b 100644
--- a/drivers/watchdog/f71808e_wdt.c
+++ b/drivers/watchdog/f71808e_wdt.c
@@ -100,7 +100,7 @@ MODULE_PARM_DESC(f71862fg_pin,
"Watchdog f71862fg reset output pin configuration. Choose pin 56 or 63"
" (default=" __MODULE_STRING(WATCHDOG_F71862FG_PIN)")");
-static int nowayout = WATCHDOG_NOWAYOUT;
+static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0444);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 8464ea1..3c166d3 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -231,7 +231,7 @@ static int __devinit cru_detect(unsigned long map_entry,
cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE;
- set_memory_x((unsigned long)bios32_entrypoint, (2 * PAGE_SIZE));
+ set_memory_x((unsigned long)bios32_map, 2);
asminline_call(&cmn_regs, bios32_entrypoint);
if (cmn_regs.u1.ral != 0) {
@@ -250,7 +250,8 @@ static int __devinit cru_detect(unsigned long map_entry,
cru_rom_addr =
ioremap(cru_physical_address, cru_length);
if (cru_rom_addr) {
- set_memory_x((unsigned long)cru_rom_addr, cru_length);
+ set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK,
+ (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT);
retval = 0;
}
}
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 99796c5..bdf401b 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -36,6 +36,7 @@
* document number TBD : Patsburg (PBG)
* document number TBD : DH89xxCC
* document number TBD : Panther Point
+ * document number TBD : Lynx Point
*/
/*
@@ -126,6 +127,7 @@ enum iTCO_chipsets {
TCO_PBG, /* Patsburg */
TCO_DH89XXCC, /* DH89xxCC */
TCO_PPT, /* Panther Point */
+ TCO_LPT, /* Lynx Point */
};
static struct {
@@ -189,6 +191,7 @@ static struct {
{"Patsburg", 2},
{"DH89xxCC", 2},
{"Panther Point", 2},
+ {"Lynx Point", 2},
{NULL, 0}
};
@@ -331,6 +334,38 @@ static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
{ PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
{ PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
+ { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
+ { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
{ 0, }, /* End of list */
};
MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/watchdog/ibmasr.c b/drivers/watchdog/ibmasr.c
index 195e0f7..c7481ad 100644
--- a/drivers/watchdog/ibmasr.c
+++ b/drivers/watchdog/ibmasr.c
@@ -68,7 +68,7 @@ static char asr_expect_close;
static unsigned int asr_type, asr_base, asr_length;
static unsigned int asr_read_addr, asr_write_addr;
static unsigned char asr_toggle_mask, asr_disable_mask;
-static spinlock_t asr_lock;
+static DEFINE_SPINLOCK(asr_lock);
static void __asr_toggle(void)
{
@@ -386,8 +386,6 @@ static int __init ibmasr_init(void)
if (!asr_type)
return -ENODEV;
- spin_lock_init(&asr_lock);
-
rc = asr_get_base_address();
if (rc)
return rc;
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index b8ef2c6..c44c333 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -247,7 +247,6 @@ static struct miscdevice imx2_wdt_miscdev = {
static int __init imx2_wdt_probe(struct platform_device *pdev)
{
int ret;
- int res_size;
struct resource *res;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -256,15 +255,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
return -ENODEV;
}
- res_size = resource_size(res);
- if (!devm_request_mem_region(&pdev->dev, res->start, res_size,
- res->name)) {
- dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
- res_size, res->start);
- return -ENOMEM;
- }
-
- imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size);
+ imx2_wdt.base = devm_request_and_ioremap(&pdev->dev, res);
if (!imx2_wdt.base) {
dev_err(&pdev->dev, "ioremap failed\n");
return -ENOMEM;
diff --git a/drivers/watchdog/indydog.c b/drivers/watchdog/indydog.c
index 1cc5609..1475e09 100644
--- a/drivers/watchdog/indydog.c
+++ b/drivers/watchdog/indydog.c
@@ -28,7 +28,7 @@
#define PFX "indydog: "
static unsigned long indydog_alive;
-static spinlock_t indydog_lock;
+static DEFINE_SPINLOCK(indydog_lock);
#define WATCHDOG_TIMEOUT 30 /* 30 sec default timeout */
@@ -185,8 +185,6 @@ static int __init watchdog_init(void)
{
int ret;
- spin_lock_init(&indydog_lock);
-
ret = register_reboot_notifier(&indydog_notifier);
if (ret) {
printk(KERN_ERR PFX
diff --git a/drivers/watchdog/iop_wdt.c b/drivers/watchdog/iop_wdt.c
index aef9478..82fa7a9 100644
--- a/drivers/watchdog/iop_wdt.c
+++ b/drivers/watchdog/iop_wdt.c
@@ -37,7 +37,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static unsigned long wdt_status;
static unsigned long boot_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
@@ -226,9 +226,6 @@ static int __init iop_wdt_init(void)
{
int ret;
- spin_lock_init(&wdt_lock);
-
-
/* check if the reset was caused by the watchdog timer */
boot_status = (read_rcsr() & IOP_RCSR_WDT) ? WDIOF_CARDRESET : 0;
diff --git a/drivers/watchdog/ixp2000_wdt.c b/drivers/watchdog/ixp2000_wdt.c
index e86952a..084f71aa 100644
--- a/drivers/watchdog/ixp2000_wdt.c
+++ b/drivers/watchdog/ixp2000_wdt.c
@@ -32,7 +32,7 @@
static int nowayout = WATCHDOG_NOWAYOUT;
static unsigned int heartbeat = 60; /* (secs) Default is 1 minute */
static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
#define WDT_IN_USE 0
#define WDT_OK_TO_CLOSE 1
@@ -189,7 +189,6 @@ static int __init ixp2000_wdt_init(void)
return -EIO;
}
wdt_tick_rate = (*IXP2000_T1_CLD * HZ) / 256;
- spin_lock_init(&wdt_lock);
return misc_register(&ixp2000_wdt_miscdev);
}
diff --git a/drivers/watchdog/ixp4xx_wdt.c b/drivers/watchdog/ixp4xx_wdt.c
index e02c0ec..4fc2e9a 100644
--- a/drivers/watchdog/ixp4xx_wdt.c
+++ b/drivers/watchdog/ixp4xx_wdt.c
@@ -181,7 +181,6 @@ static int __init ixp4xx_wdt_init(void)
return -ENODEV;
}
- spin_lock_init(&wdt_lock);
boot_status = (*IXP4XX_OSST & IXP4XX_OSST_TIMER_WARM_RESET) ?
WDIOF_CARDRESET : 0;
ret = misc_register(&ixp4xx_wdt_miscdev);
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 684ba01..17ef300 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -295,18 +295,7 @@ static struct platform_driver jz4740_wdt_driver = {
},
};
-
-static int __init jz4740_wdt_init(void)
-{
- return platform_driver_register(&jz4740_wdt_driver);
-}
-module_init(jz4740_wdt_init);
-
-static void __exit jz4740_wdt_exit(void)
-{
- platform_driver_unregister(&jz4740_wdt_driver);
-}
-module_exit(jz4740_wdt_exit);
+module_platform_driver(jz4740_wdt_driver);
MODULE_AUTHOR("Paul Cercueil <paul@crapouillou.net>");
MODULE_DESCRIPTION("jz4740 Watchdog Driver");
diff --git a/drivers/watchdog/ks8695_wdt.c b/drivers/watchdog/ks8695_wdt.c
index 8114719..51757a5 100644
--- a/drivers/watchdog/ks8695_wdt.c
+++ b/drivers/watchdog/ks8695_wdt.c
@@ -42,7 +42,7 @@ MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
static unsigned long ks8695wdt_busy;
-static spinlock_t ks8695_lock;
+static DEFINE_SPINLOCK(ks8695_lock);
/* ......................................................................... */
@@ -288,7 +288,6 @@ static struct platform_driver ks8695wdt_driver = {
static int __init ks8695_wdt_init(void)
{
- spin_lock_init(&ks8695_lock);
/* Check that the heartbeat value is within range;
if not reset to the default */
if (ks8695_wdt_settimeout(wdt_time)) {
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index 102aed0..d3a63be 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -222,9 +222,6 @@ ltq_wdt_remove(struct platform_device *pdev)
{
misc_deregister(&ltq_wdt_miscdev);
- if (ltq_wdt_membase)
- iounmap(ltq_wdt_membase);
-
return 0;
}
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 73ba2fd..af63ecf 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -364,18 +364,7 @@ static struct platform_driver max63xx_wdt_driver = {
},
};
-static int __init max63xx_wdt_init(void)
-{
- return platform_driver_register(&max63xx_wdt_driver);
-}
-
-static void __exit max63xx_wdt_exit(void)
-{
- platform_driver_unregister(&max63xx_wdt_driver);
-}
-
-module_init(max63xx_wdt_init);
-module_exit(max63xx_wdt_exit);
+module_platform_driver(max63xx_wdt_driver);
MODULE_AUTHOR("Marc Zyngier <maz@misterjones.org>");
MODULE_DESCRIPTION("max63xx Watchdog Driver");
diff --git a/drivers/watchdog/mpc8xxx_wdt.c b/drivers/watchdog/mpc8xxx_wdt.c
index eed5436f..20feb4d 100644
--- a/drivers/watchdog/mpc8xxx_wdt.c
+++ b/drivers/watchdog/mpc8xxx_wdt.c
@@ -55,7 +55,7 @@ module_param(timeout, ushort, 0);
MODULE_PARM_DESC(timeout,
"Watchdog timeout in ticks. (0<timeout<65536, default=65535)");
-static int reset = 1;
+static bool reset = 1;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset,
"Watchdog Interrupt/Reset Mode. 0 = interrupt, 1 = reset");
diff --git a/drivers/watchdog/mtx-1_wdt.c b/drivers/watchdog/mtx-1_wdt.c
index ac37bb8..c29e31d 100644
--- a/drivers/watchdog/mtx-1_wdt.c
+++ b/drivers/watchdog/mtx-1_wdt.c
@@ -253,18 +253,7 @@ static struct platform_driver mtx1_wdt_driver = {
.driver.owner = THIS_MODULE,
};
-static int __init mtx1_wdt_init(void)
-{
- return platform_driver_register(&mtx1_wdt_driver);
-}
-
-static void __exit mtx1_wdt_exit(void)
-{
- platform_driver_unregister(&mtx1_wdt_driver);
-}
-
-module_init(mtx1_wdt_init);
-module_exit(mtx1_wdt_exit);
+module_platform_driver(mtx1_wdt_driver);
MODULE_AUTHOR("Michael Stickel, Florian Fainelli");
MODULE_DESCRIPTION("Driver for the MTX-1 watchdog");
diff --git a/drivers/watchdog/nuc900_wdt.c b/drivers/watchdog/nuc900_wdt.c
index 6cee33d..529085b 100644
--- a/drivers/watchdog/nuc900_wdt.c
+++ b/drivers/watchdog/nuc900_wdt.c
@@ -72,7 +72,7 @@ struct nuc900_wdt {
};
static unsigned long nuc900wdt_busy;
-struct nuc900_wdt *nuc900_wdt;
+static struct nuc900_wdt *nuc900_wdt;
static inline void nuc900_wdt_keepalive(void)
{
@@ -287,7 +287,8 @@ static int __devinit nuc900wdt_probe(struct platform_device *pdev)
setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
- if (misc_register(&nuc900wdt_miscdev)) {
+ ret = misc_register(&nuc900wdt_miscdev);
+ if (ret) {
dev_err(&pdev->dev, "err register miscdev on minor=%d (%d)\n",
WATCHDOG_MINOR, ret);
goto err_clk;
@@ -334,18 +335,7 @@ static struct platform_driver nuc900wdt_driver = {
},
};
-static int __init nuc900_wdt_init(void)
-{
- return platform_driver_register(&nuc900wdt_driver);
-}
-
-static void __exit nuc900_wdt_exit(void)
-{
- platform_driver_unregister(&nuc900wdt_driver);
-}
-
-module_init(nuc900_wdt_init);
-module_exit(nuc900_wdt_exit);
+module_platform_driver(nuc900wdt_driver);
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("Watchdog driver for NUC900");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 4ec741a..f359ab8 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -414,18 +414,7 @@ static struct platform_driver xwdt_driver = {
},
};
-static int __init xwdt_init(void)
-{
- return platform_driver_register(&xwdt_driver);
-}
-
-static void __exit xwdt_exit(void)
-{
- platform_driver_unregister(&xwdt_driver);
-}
-
-module_init(xwdt_init);
-module_exit(xwdt_exit);
+module_platform_driver(xwdt_driver);
MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>");
MODULE_DESCRIPTION("Xilinx Watchdog driver");
diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c
index 2b4acb8..d19ff51 100644
--- a/drivers/watchdog/omap_wdt.c
+++ b/drivers/watchdog/omap_wdt.c
@@ -55,7 +55,7 @@ module_param(timer_margin, uint, 0);
MODULE_PARM_DESC(timer_margin, "initial watchdog timeout (in seconds)");
static unsigned int wdt_trgr_pattern = 0x1234;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
struct omap_wdt_dev {
void __iomem *base; /* physical */
@@ -232,6 +232,7 @@ static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
if (cpu_is_omap24xx())
return put_user(omap_prcm_get_reset_sources(),
(int __user *)arg);
+ return put_user(0, (int __user *)arg);
case WDIOC_KEEPALIVE:
pm_runtime_get_sync(wdev->dev);
spin_lock(&wdt_lock);
@@ -338,6 +339,7 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev)
return 0;
err_misc:
+ pm_runtime_disable(wdev->dev);
platform_set_drvdata(pdev, NULL);
iounmap(wdev->base);
@@ -370,6 +372,7 @@ static int __devexit omap_wdt_remove(struct platform_device *pdev)
struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pm_runtime_disable(wdev->dev);
if (!res)
return -ENOENT;
@@ -437,19 +440,7 @@ static struct platform_driver omap_wdt_driver = {
},
};
-static int __init omap_wdt_init(void)
-{
- spin_lock_init(&wdt_lock);
- return platform_driver_register(&omap_wdt_driver);
-}
-
-static void __exit omap_wdt_exit(void)
-{
- platform_driver_unregister(&omap_wdt_driver);
-}
-
-module_init(omap_wdt_init);
-module_exit(omap_wdt_exit);
+module_platform_driver(omap_wdt_driver);
MODULE_AUTHOR("George G. Davis");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 2d9fb96..4ad78f8 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -41,7 +41,7 @@ static int heartbeat = -1; /* module parameter (seconds) */
static unsigned int wdt_max_duration; /* (seconds) */
static unsigned int wdt_tclk;
static unsigned long wdt_status;
-static spinlock_t wdt_lock;
+static DEFINE_SPINLOCK(wdt_lock);
static void orion_wdt_ping(void)
{
@@ -294,19 +294,7 @@ static struct platform_driver orion_wdt_driver = {
},
};
-static int __init orion_wdt_init(void)
-{
- spin_lock_init(&wdt_lock);
- return platform_driver_register(&orion_wdt_driver);
-}
-
-static void __exit orion_wdt_exit(void)
-{
- platform_driver_unregister(&orion_wdt_driver);
-}
-
-module_init(orion_wdt_init);
-module_exit(orion_wdt_exit);
+module_platform_driver(orion_wdt_driver);
MODULE_AUTHOR("Sylver Bruneau <sylver.bruneau@googlemail.com>");
MODULE_DESCRIPTION("Orion Processor Watchdog");
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 748a74b..d8de1dd 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -827,37 +827,4 @@ static void usb_pcwd_disconnect(struct usb_interface *interface)
printk(KERN_INFO PFX "USB PC Watchdog disconnected\n");
}
-
-
-/**
- * usb_pcwd_init
- */
-static int __init usb_pcwd_init(void)
-{
- int result;
-
- /* register this driver with the USB subsystem */
- result = usb_register(&usb_pcwd_driver);
- if (result) {
- printk(KERN_ERR PFX "usb_register failed. Error number %d\n",
- result);
- return result;
- }
-
- printk(KERN_INFO PFX DRIVER_DESC " v" DRIVER_VERSION "\n");
- return 0;
-}
-
-
-/**
- * usb_pcwd_exit
- */
-static void __exit usb_pcwd_exit(void)
-{
- /* deregister this driver with the USB subsystem */
- usb_deregister(&usb_pcwd_driver);
-}
-
-
-module_init(usb_pcwd_init);
-module_exit(usb_pcwd_exit);
+module_usb_driver(usb_pcwd_driver);
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 6149332..dfae030 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -226,7 +226,7 @@ static long pnx4008_wdt_ioctl(struct file *file, unsigned int cmd,
static int pnx4008_wdt_release(struct inode *inode, struct file *file)
{
if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status))
- printk(KERN_WARNING "WATCHDOG: Device closed unexpectdly\n");
+ printk(KERN_WARNING "WATCHDOG: Device closed unexpectedly\n");
wdt_disable();
clk_disable(wdt_clk);
@@ -264,7 +264,7 @@ static int __devinit pnx4008_wdt_probe(struct platform_device *pdev)
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (wdt_mem == NULL) {
printk(KERN_INFO MODULE_NAME
- "failed to get memory region resouce\n");
+ "failed to get memory region resource\n");
return -ENOENT;
}
@@ -334,18 +334,7 @@ static struct platform_driver platform_wdt_driver = {
.remove = __devexit_p(pnx4008_wdt_remove),
};
-static int __init pnx4008_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit pnx4008_wdt_exit(void)
-{
- platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(pnx4008_wdt_init);
-module_exit(pnx4008_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("PNX4008 Watchdog Driver");
diff --git a/drivers/watchdog/rc32434_wdt.c b/drivers/watchdog/rc32434_wdt.c
index d4c29b5..bf7bc8a 100644
--- a/drivers/watchdog/rc32434_wdt.c
+++ b/drivers/watchdog/rc32434_wdt.c
@@ -332,18 +332,7 @@ static struct platform_driver rc32434_wdt_driver = {
}
};
-static int __init rc32434_wdt_init(void)
-{
- return platform_driver_register(&rc32434_wdt_driver);
-}
-
-static void __exit rc32434_wdt_exit(void)
-{
- platform_driver_unregister(&rc32434_wdt_driver);
-}
-
-module_init(rc32434_wdt_init);
-module_exit(rc32434_wdt_exit);
+module_platform_driver(rc32434_wdt_driver);
MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>,"
"Florian Fainelli <florian@openwrt.org>");
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c
index 428f8a1..042ccc5 100644
--- a/drivers/watchdog/rdc321x_wdt.c
+++ b/drivers/watchdog/rdc321x_wdt.c
@@ -293,18 +293,7 @@ static struct platform_driver rdc321x_wdt_driver = {
},
};
-static int __init rdc321x_wdt_init(void)
-{
- return platform_driver_register(&rdc321x_wdt_driver);
-}
-
-static void __exit rdc321x_wdt_exit(void)
-{
- platform_driver_unregister(&rdc321x_wdt_driver);
-}
-
-module_init(rdc321x_wdt_init);
-module_exit(rdc321x_wdt_exit);
+module_platform_driver(rdc321x_wdt_driver);
MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
MODULE_DESCRIPTION("RDC321x watchdog driver");
diff --git a/drivers/watchdog/riowd.c b/drivers/watchdog/riowd.c
index 109b533..c7e17ce 100644
--- a/drivers/watchdog/riowd.c
+++ b/drivers/watchdog/riowd.c
@@ -247,15 +247,4 @@ static struct platform_driver riowd_driver = {
.remove = __devexit_p(riowd_remove),
};
-static int __init riowd_init(void)
-{
- return platform_driver_register(&riowd_driver);
-}
-
-static void __exit riowd_exit(void)
-{
- platform_driver_unregister(&riowd_driver);
-}
-
-module_init(riowd_init);
-module_exit(riowd_exit);
+module_platform_driver(riowd_driver);
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index a79e384..404172f 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -312,18 +312,26 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
dev = &pdev->dev;
wdt_dev = &pdev->dev;
- /* get the memory region for the watchdog timer */
-
wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (wdt_mem == NULL) {
dev_err(dev, "no memory resource specified\n");
return -ENOENT;
}
+ wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (wdt_irq == NULL) {
+ dev_err(dev, "no irq resource specified\n");
+ ret = -ENOENT;
+ goto err;
+ }
+
+ /* get the memory region for the watchdog timer */
+
size = resource_size(wdt_mem);
if (!request_mem_region(wdt_mem->start, size, pdev->name)) {
dev_err(dev, "failed to get memory region\n");
- return -EBUSY;
+ ret = -EBUSY;
+ goto err;
}
wdt_base = ioremap(wdt_mem->start, size);
@@ -335,29 +343,17 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
DBG("probe: mapped wdt_base=%p\n", wdt_base);
- wdt_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (wdt_irq == NULL) {
- dev_err(dev, "no irq resource specified\n");
- ret = -ENOENT;
- goto err_map;
- }
-
- ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
- if (ret != 0) {
- dev_err(dev, "failed to install irq (%d)\n", ret);
- goto err_map;
- }
-
wdt_clock = clk_get(&pdev->dev, "watchdog");
if (IS_ERR(wdt_clock)) {
dev_err(dev, "failed to find watchdog clock source\n");
ret = PTR_ERR(wdt_clock);
- goto err_irq;
+ goto err_map;
}
clk_enable(wdt_clock);
- if (s3c2410wdt_cpufreq_register() < 0) {
+ ret = s3c2410wdt_cpufreq_register();
+ if (ret < 0) {
printk(KERN_ERR PFX "failed to register cpufreq\n");
goto err_clk;
}
@@ -378,10 +374,18 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
"cannot start\n");
}
+ ret = request_irq(wdt_irq->start, s3c2410wdt_irq, 0, pdev->name, pdev);
+ if (ret != 0) {
+ dev_err(dev, "failed to install irq (%d)\n", ret);
+ goto err_cpufreq;
+ }
+
+ watchdog_set_nowayout(&s3c2410_wdd, nowayout);
+
ret = watchdog_register_device(&s3c2410_wdd);
if (ret) {
dev_err(dev, "cannot register watchdog (%d)\n", ret);
- goto err_cpufreq;
+ goto err_irq;
}
if (tmr_atboot && started == 0) {
@@ -406,23 +410,26 @@ static int __devinit s3c2410wdt_probe(struct platform_device *pdev)
return 0;
+ err_irq:
+ free_irq(wdt_irq->start, pdev);
+
err_cpufreq:
s3c2410wdt_cpufreq_deregister();
err_clk:
clk_disable(wdt_clock);
clk_put(wdt_clock);
-
- err_irq:
- free_irq(wdt_irq->start, pdev);
+ wdt_clock = NULL;
err_map:
iounmap(wdt_base);
err_req:
release_mem_region(wdt_mem->start, size);
- wdt_mem = NULL;
+ err:
+ wdt_irq = NULL;
+ wdt_mem = NULL;
return ret;
}
@@ -430,18 +437,18 @@ static int __devexit s3c2410wdt_remove(struct platform_device *dev)
{
watchdog_unregister_device(&s3c2410_wdd);
+ free_irq(wdt_irq->start, dev);
+
s3c2410wdt_cpufreq_deregister();
clk_disable(wdt_clock);
clk_put(wdt_clock);
wdt_clock = NULL;
- free_irq(wdt_irq->start, dev);
- wdt_irq = NULL;
-
iounmap(wdt_base);
release_mem_region(wdt_mem->start, resource_size(wdt_mem));
+ wdt_irq = NULL;
wdt_mem = NULL;
return 0;
}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index bfaf9bb..eef1524 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -359,6 +359,8 @@ static struct amba_id sp805_wdt_ids[] = {
{ 0, 0 },
};
+MODULE_DEVICE_TABLE(amba, sp805_wdt_ids);
+
static struct amba_driver sp805_wdt_driver = {
.drv = {
.name = MODULE_NAME,
diff --git a/drivers/watchdog/stmp3xxx_wdt.c b/drivers/watchdog/stmp3xxx_wdt.c
index ac2346a..e37d811 100644
--- a/drivers/watchdog/stmp3xxx_wdt.c
+++ b/drivers/watchdog/stmp3xxx_wdt.c
@@ -174,7 +174,7 @@ static int stmp3xxx_wdt_release(struct inode *inode, struct file *file)
if (!nowayout) {
if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
wdt_ping();
- pr_debug("%s: Device closed unexpectdly\n", __func__);
+ pr_debug("%s: Device closed unexpectedly\n", __func__);
ret = -EINVAL;
} else {
wdt_disable();
@@ -272,18 +272,7 @@ static struct platform_driver platform_wdt_driver = {
.resume = stmp3xxx_wdt_resume,
};
-static int __init stmp3xxx_wdt_init(void)
-{
- return platform_driver_register(&platform_wdt_driver);
-}
-
-static void __exit stmp3xxx_wdt_exit(void)
-{
- return platform_driver_unregister(&platform_wdt_driver);
-}
-
-module_init(stmp3xxx_wdt_init);
-module_exit(stmp3xxx_wdt_exit);
+module_platform_driver(platform_wdt_driver);
MODULE_DESCRIPTION("STMP3XXX Watchdog Driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index 5a90a4a..1490293 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -506,17 +506,7 @@ static struct platform_driver ts72xx_wdt_driver = {
},
};
-static __init int ts72xx_wdt_init(void)
-{
- return platform_driver_register(&ts72xx_wdt_driver);
-}
-module_init(ts72xx_wdt_init);
-
-static __exit void ts72xx_wdt_exit(void)
-{
- platform_driver_unregister(&ts72xx_wdt_driver);
-}
-module_exit(ts72xx_wdt_exit);
+module_platform_driver(ts72xx_wdt_driver);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("TS-72xx SBC Watchdog");
diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c
index b5045ca..0764c62 100644
--- a/drivers/watchdog/twl4030_wdt.c
+++ b/drivers/watchdog/twl4030_wdt.c
@@ -256,17 +256,7 @@ static struct platform_driver twl4030_wdt_driver = {
},
};
-static int __devinit twl4030_wdt_init(void)
-{
- return platform_driver_register(&twl4030_wdt_driver);
-}
-module_init(twl4030_wdt_init);
-
-static void __devexit twl4030_wdt_exit(void)
-{
- platform_driver_unregister(&twl4030_wdt_driver);
-}
-module_exit(twl4030_wdt_exit);
+module_platform_driver(twl4030_wdt_driver);
MODULE_AUTHOR("Nokia Corporation");
MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c
new file mode 100644
index 0000000..8f07dd4
--- /dev/null
+++ b/drivers/watchdog/via_wdt.c
@@ -0,0 +1,267 @@
+/*
+ * VIA Chipset Watchdog Driver
+ *
+ * Copyright (C) 2011 Sigfox
+ * License terms: GNU General Public License (GPL) version 2
+ * Author: Marc Vertes <marc.vertes@sigfox.com>
+ * Based on a preliminary version from Harald Welte <HaraldWelte@viatech.com>
+ * Timer code by Wim Van Sebroeck <wim@iguana.be>
+ *
+ * Caveat: PnP must be enabled in BIOS to allow full access to watchdog
+ * control registers. If not, the watchdog must be configured in BIOS manually.
+ */
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/jiffies.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/timer.h>
+#include <linux/watchdog.h>
+
+/* Configuration registers relative to the pci device */
+#define VIA_WDT_MMIO_BASE 0xe8 /* MMIO region base address */
+#define VIA_WDT_CONF 0xec /* watchdog enable state */
+
+/* Relevant bits for the VIA_WDT_CONF register */
+#define VIA_WDT_CONF_ENABLE 0x01 /* 1: enable watchdog */
+#define VIA_WDT_CONF_MMIO 0x02 /* 1: enable watchdog MMIO */
+
+/*
+ * The MMIO region contains the watchog control register and the
+ * hardware timer counter.
+ */
+#define VIA_WDT_MMIO_LEN 8 /* MMIO region length in bytes */
+#define VIA_WDT_CTL 0 /* MMIO addr+0: state/control reg. */
+#define VIA_WDT_COUNT 4 /* MMIO addr+4: timer counter reg. */
+
+/* Bits for the VIA_WDT_CTL register */
+#define VIA_WDT_RUNNING 0x01 /* 0: stop, 1: running */
+#define VIA_WDT_FIRED 0x02 /* 1: restarted by expired watchdog */
+#define VIA_WDT_PWROFF 0x04 /* 0: reset, 1: poweroff */
+#define VIA_WDT_DISABLED 0x08 /* 1: timer is disabled */
+#define VIA_WDT_TRIGGER 0x80 /* 1: start a new countdown */
+
+/* Hardware heartbeat in seconds */
+#define WDT_HW_HEARTBEAT 1
+
+/* Timer heartbeat (500ms) */
+#define WDT_HEARTBEAT (HZ/2) /* should be <= ((WDT_HW_HEARTBEAT*HZ)/2) */
+
+/* User space timeout in seconds */
+#define WDT_TIMEOUT_MAX 1023 /* approx. 17 min. */
+#define WDT_TIMEOUT 60
+static int timeout = WDT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds, between 1 and 1023 "
+ "(default = " __MODULE_STRING(WDT_TIMEOUT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
+ "(default = " __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
+static struct watchdog_device wdt_dev;
+static struct resource wdt_res;
+static void __iomem *wdt_mem;
+static unsigned int mmio;
+static void wdt_timer_tick(unsigned long data);
+static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0);
+ /* The timer that pings the watchdog */
+static unsigned long next_heartbeat; /* the next_heartbeat for the timer */
+
+static inline void wdt_reset(void)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(ctl | VIA_WDT_TRIGGER, wdt_mem);
+}
+
+/*
+ * Timer tick: the timer will make sure that the watchdog timer hardware
+ * is being reset in time. The conditions to do this are:
+ * 1) the watchog timer has been started and /dev/watchdog is open
+ * and there is still time left before userspace should send the
+ * next heartbeat/ping. (note: the internal heartbeat is much smaller
+ * then the external/userspace heartbeat).
+ * 2) the watchdog timer has been stopped by userspace.
+ */
+static void wdt_timer_tick(unsigned long data)
+{
+ if (time_before(jiffies, next_heartbeat) ||
+ (!test_bit(WDOG_ACTIVE, &wdt_dev.status))) {
+ wdt_reset();
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ } else
+ pr_crit("I will reboot your machine !\n");
+}
+
+static int wdt_ping(struct watchdog_device *wdd)
+{
+ /* calculate when the next userspace timeout will be */
+ next_heartbeat = jiffies + timeout * HZ;
+ return 0;
+}
+
+static int wdt_start(struct watchdog_device *wdd)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(timeout, wdt_mem + VIA_WDT_COUNT);
+ writel(ctl | VIA_WDT_RUNNING | VIA_WDT_TRIGGER, wdt_mem);
+ wdt_ping(wdd);
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ return 0;
+}
+
+static int wdt_stop(struct watchdog_device *wdd)
+{
+ unsigned int ctl = readl(wdt_mem);
+
+ writel(ctl & ~VIA_WDT_RUNNING, wdt_mem);
+ return 0;
+}
+
+static int wdt_set_timeout(struct watchdog_device *wdd,
+ unsigned int new_timeout)
+{
+ writel(new_timeout, wdt_mem + VIA_WDT_COUNT);
+ timeout = new_timeout;
+ return 0;
+}
+
+static const struct watchdog_info wdt_info = {
+ .identity = "VIA watchdog",
+ .options = WDIOF_CARDRESET |
+ WDIOF_SETTIMEOUT |
+ WDIOF_MAGICCLOSE |
+ WDIOF_KEEPALIVEPING,
+};
+
+static const struct watchdog_ops wdt_ops = {
+ .owner = THIS_MODULE,
+ .start = wdt_start,
+ .stop = wdt_stop,
+ .ping = wdt_ping,
+ .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_device wdt_dev = {
+ .info = &wdt_info,
+ .ops = &wdt_ops,
+ .min_timeout = 1,
+ .max_timeout = WDT_TIMEOUT_MAX,
+};
+
+static int __devinit wdt_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ unsigned char conf;
+ int ret = -ENODEV;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot enable PCI device\n");
+ return -ENODEV;
+ }
+
+ /*
+ * Allocate a MMIO region which contains watchdog control register
+ * and counter, then configure the watchdog to use this region.
+ * This is possible only if PnP is properly enabled in BIOS.
+ * If not, the watchdog must be configured in BIOS manually.
+ */
+ if (allocate_resource(&iomem_resource, &wdt_res, VIA_WDT_MMIO_LEN,
+ 0xf0000000, 0xffffff00, 0xff, NULL, NULL)) {
+ dev_err(&pdev->dev, "MMIO allocation failed\n");
+ goto err_out_disable_device;
+ }
+
+ pci_write_config_dword(pdev, VIA_WDT_MMIO_BASE, wdt_res.start);
+ pci_read_config_byte(pdev, VIA_WDT_CONF, &conf);
+ conf |= VIA_WDT_CONF_ENABLE | VIA_WDT_CONF_MMIO;
+ pci_write_config_byte(pdev, VIA_WDT_CONF, conf);
+
+ pci_read_config_dword(pdev, VIA_WDT_MMIO_BASE, &mmio);
+ if (mmio) {
+ dev_info(&pdev->dev, "VIA Chipset watchdog MMIO: %x\n", mmio);
+ } else {
+ dev_err(&pdev->dev, "MMIO setting failed. Check BIOS.\n");
+ goto err_out_resource;
+ }
+
+ if (!request_mem_region(mmio, VIA_WDT_MMIO_LEN, "via_wdt")) {
+ dev_err(&pdev->dev, "MMIO region busy\n");
+ goto err_out_resource;
+ }
+
+ wdt_mem = ioremap(mmio, VIA_WDT_MMIO_LEN);
+ if (wdt_mem == NULL) {
+ dev_err(&pdev->dev, "cannot remap VIA wdt MMIO registers\n");
+ goto err_out_release;
+ }
+
+ wdt_dev.timeout = timeout;
+ watchdog_set_nowayout(&wdt_dev, nowayout);
+ if (readl(wdt_mem) & VIA_WDT_FIRED)
+ wdt_dev.bootstatus |= WDIOF_CARDRESET;
+
+ ret = watchdog_register_device(&wdt_dev);
+ if (ret)
+ goto err_out_iounmap;
+
+ /* start triggering, in case of watchdog already enabled by BIOS */
+ mod_timer(&timer, jiffies + WDT_HEARTBEAT);
+ return 0;
+
+err_out_iounmap:
+ iounmap(wdt_mem);
+err_out_release:
+ release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+err_out_resource:
+ release_resource(&wdt_res);
+err_out_disable_device:
+ pci_disable_device(pdev);
+ return ret;
+}
+
+static void __devexit wdt_remove(struct pci_dev *pdev)
+{
+ watchdog_unregister_device(&wdt_dev);
+ del_timer(&timer);
+ iounmap(wdt_mem);
+ release_mem_region(mmio, VIA_WDT_MMIO_LEN);
+ release_resource(&wdt_res);
+ pci_disable_device(pdev);
+}
+
+static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
+ { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
+ { 0 }
+};
+
+static struct pci_driver wdt_driver = {
+ .name = "via_wdt",
+ .id_table = wdt_pci_table,
+ .probe = wdt_probe,
+ .remove = __devexit_p(wdt_remove),
+};
+
+static int __init wdt_init(void)
+{
+ if (timeout < 1 || timeout > WDT_TIMEOUT_MAX)
+ timeout = WDT_TIMEOUT;
+ return pci_register_driver(&wdt_driver);
+}
+
+static void __exit wdt_exit(void)
+{
+ pci_unregister_driver(&wdt_driver);
+}
+
+module_init(wdt_init);
+module_exit(wdt_exit);
+
+MODULE_AUTHOR("Marc Vertes");
+MODULE_DESCRIPTION("Driver for watchdog timer on VIA chipset");
+MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c
index dd5d675..576a388 100644
--- a/drivers/watchdog/w83627hf_wdt.c
+++ b/drivers/watchdog/w83627hf_wdt.c
@@ -4,7 +4,7 @@
* (c) Copyright 2007 Vlad Drukker <vlad@storewiz.com>
* added support for W83627THF.
*
- * (c) Copyright 2003,2007 Pdraig Brady <P@draigBrady.com>
+ * (c) Copyright 2003,2007 Pádraig Brady <P@draigBrady.com>
*
* Based on advantechwdt.c which is based on wdt.c.
* Original copyright messages:
@@ -401,6 +401,6 @@ module_init(wdt_init);
module_exit(wdt_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pdraig Brady <P@draigBrady.com>");
+MODULE_AUTHOR("Pádraig Brady <P@draigBrady.com>");
MODULE_DESCRIPTION("w83627hf/thf WDT driver");
MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c
index 42e940c..c3c3188 100644
--- a/drivers/watchdog/wafer5823wdt.c
+++ b/drivers/watchdog/wafer5823wdt.c
@@ -152,12 +152,12 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
if (options & WDIOS_DISABLECARD) {
- wafwdt_start();
+ wafwdt_stop();
retval = 0;
}
if (options & WDIOS_ENABLECARD) {
- wafwdt_stop();
+ wafwdt_start();
retval = 0;
}
diff --git a/drivers/watchdog/wm831x_wdt.c b/drivers/watchdog/wm831x_wdt.c
index e789a47..263c883 100644
--- a/drivers/watchdog/wm831x_wdt.c
+++ b/drivers/watchdog/wm831x_wdt.c
@@ -199,7 +199,8 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
if (reg & WM831X_WDOG_DEBUG)
dev_warn(wm831x->dev, "Watchdog is paused\n");
- driver_data = kzalloc(sizeof(*driver_data), GFP_KERNEL);
+ driver_data = devm_kzalloc(&pdev->dev, sizeof(*driver_data),
+ GFP_KERNEL);
if (!driver_data) {
dev_err(wm831x->dev, "Unable to alloacate watchdog device\n");
ret = -ENOMEM;
@@ -213,11 +214,9 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
wm831x_wdt->info = &wm831x_wdt_info;
wm831x_wdt->ops = &wm831x_wdt_ops;
+ watchdog_set_nowayout(wm831x_wdt, nowayout);
watchdog_set_drvdata(wm831x_wdt, driver_data);
- if (nowayout)
- wm831x_wdt->status |= WDOG_NO_WAY_OUT;
-
reg = wm831x_reg_read(wm831x, WM831X_WATCHDOG);
reg &= WM831X_WDOG_TO_MASK;
for (i = 0; i < ARRAY_SIZE(wm831x_wdt_cfgs); i++)
@@ -252,7 +251,7 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
dev_err(wm831x->dev,
"Failed to request update GPIO: %d\n",
ret);
- goto err_alloc;
+ goto err;
}
ret = gpio_direction_output(pdata->update_gpio, 0);
@@ -294,8 +293,6 @@ static int __devinit wm831x_wdt_probe(struct platform_device *pdev)
err_gpio:
if (driver_data->update_gpio)
gpio_free(driver_data->update_gpio);
-err_alloc:
- kfree(driver_data);
err:
return ret;
}
@@ -320,17 +317,7 @@ static struct platform_driver wm831x_wdt_driver = {
},
};
-static int __init wm831x_wdt_init(void)
-{
- return platform_driver_register(&wm831x_wdt_driver);
-}
-module_init(wm831x_wdt_init);
-
-static void __exit wm831x_wdt_exit(void)
-{
- platform_driver_unregister(&wm831x_wdt_driver);
-}
-module_exit(wm831x_wdt_exit);
+module_platform_driver(wm831x_wdt_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM831x Watchdog");
diff --git a/drivers/watchdog/wm8350_wdt.c b/drivers/watchdog/wm8350_wdt.c
index b68d928..5d7113c 100644
--- a/drivers/watchdog/wm8350_wdt.c
+++ b/drivers/watchdog/wm8350_wdt.c
@@ -212,10 +212,10 @@ static long wm8350_wdt_ioctl(struct file *file, unsigned int cmd,
/* Setting both simultaneously means at least one must fail */
if (options == WDIOS_DISABLECARD)
- ret = wm8350_wdt_start(wm8350);
+ ret = wm8350_wdt_stop(wm8350);
if (options == WDIOS_ENABLECARD)
- ret = wm8350_wdt_stop(wm8350);
+ ret = wm8350_wdt_start(wm8350);
break;
}
@@ -311,17 +311,7 @@ static struct platform_driver wm8350_wdt_driver = {
},
};
-static int __init wm8350_wdt_init(void)
-{
- return platform_driver_register(&wm8350_wdt_driver);
-}
-module_init(wm8350_wdt_init);
-
-static void __exit wm8350_wdt_exit(void)
-{
- platform_driver_unregister(&wm8350_wdt_driver);
-}
-module_exit(wm8350_wdt_exit);
+module_platform_driver(wm8350_wdt_driver);
MODULE_AUTHOR("Mark Brown");
MODULE_DESCRIPTION("WM8350 Watchdog");
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index 8795480..a1ced52 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -86,6 +86,7 @@ config XEN_BACKEND
config XENFS
tristate "Xen filesystem"
+ select XEN_PRIVCMD
default y
help
The xen filesystem provides a way for domains to share
@@ -171,4 +172,10 @@ config XEN_PCIDEV_BACKEND
xen-pciback.hide=(03:00.0)(04:00.0)
If in doubt, say m.
+
+config XEN_PRIVCMD
+ tristate
+ depends on XEN
+ default m
+
endmenu
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 974fffd..aa31337 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -19,7 +19,9 @@ obj-$(CONFIG_XEN_TMEM) += tmem.o
obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
obj-$(CONFIG_XEN_DOM0) += pci.o
obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
+obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
+xen-privcmd-y := privcmd.o
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index ba6eda4..0edb91c 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -1,5 +1,6 @@
#include <linux/bio.h>
#include <linux/io.h>
+#include <linux/export.h>
#include <xen/page.h>
bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -11,3 +12,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
((mfn1 == mfn2) || ((mfn1+1) == mfn2));
}
+EXPORT_SYMBOL(xen_biovec_phys_mergeable);
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 14e2d99..4dcfced 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -30,7 +30,8 @@ static int vcpu_online(unsigned int cpu)
sprintf(dir, "cpu/%u", cpu);
err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
if (err != 1) {
- printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
+ if (!xen_initial_domain())
+ printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
return err;
}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6e075cd..e5e5812 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -87,6 +87,7 @@ enum xen_irq_type {
*/
struct irq_info {
struct list_head list;
+ int refcnt;
enum xen_irq_type type; /* type */
unsigned irq;
unsigned short evtchn; /* event channel */
@@ -406,6 +407,7 @@ static void xen_irq_init(unsigned irq)
panic("Unable to allocate metadata for IRQ%d\n", irq);
info->type = IRQT_UNBOUND;
+ info->refcnt = -1;
irq_set_handler_data(irq, info);
@@ -469,6 +471,8 @@ static void xen_free_irq(unsigned irq)
irq_set_handler_data(irq, NULL);
+ WARN_ON(info->refcnt > 0);
+
kfree(info);
/* Legacy IRQ descriptors are managed by the arch. */
@@ -637,7 +641,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
if (irq != -1) {
printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
irq, gsi);
- goto out; /* XXX need refcount? */
+ goto out;
}
irq = xen_allocate_irq_gsi(gsi);
@@ -939,9 +943,16 @@ static void unbind_from_irq(unsigned int irq)
{
struct evtchn_close close;
int evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = irq_get_handler_data(irq);
mutex_lock(&irq_mapping_update_lock);
+ if (info->refcnt > 0) {
+ info->refcnt--;
+ if (info->refcnt != 0)
+ goto done;
+ }
+
if (VALID_EVTCHN(evtchn)) {
close.port = evtchn;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
@@ -970,6 +981,7 @@ static void unbind_from_irq(unsigned int irq)
xen_free_irq(irq);
+ done:
mutex_unlock(&irq_mapping_update_lock);
}
@@ -1065,6 +1077,69 @@ void unbind_from_irqhandler(unsigned int irq, void *dev_id)
}
EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
+int evtchn_make_refcounted(unsigned int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+ struct irq_info *info;
+
+ if (irq == -1)
+ return -ENOENT;
+
+ info = irq_get_handler_data(irq);
+
+ if (!info)
+ return -ENOENT;
+
+ WARN_ON(info->refcnt != -1);
+
+ info->refcnt = 1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
+
+int evtchn_get(unsigned int evtchn)
+{
+ int irq;
+ struct irq_info *info;
+ int err = -ENOENT;
+
+ if (evtchn >= NR_EVENT_CHANNELS)
+ return -EINVAL;
+
+ mutex_lock(&irq_mapping_update_lock);
+
+ irq = evtchn_to_irq[evtchn];
+ if (irq == -1)
+ goto done;
+
+ info = irq_get_handler_data(irq);
+
+ if (!info)
+ goto done;
+
+ err = -EINVAL;
+ if (info->refcnt <= 0)
+ goto done;
+
+ info->refcnt++;
+ err = 0;
+ done:
+ mutex_unlock(&irq_mapping_update_lock);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(evtchn_get);
+
+void evtchn_put(unsigned int evtchn)
+{
+ int irq = evtchn_to_irq[evtchn];
+ if (WARN_ON(irq == -1))
+ return;
+ unbind_from_irq(irq);
+}
+EXPORT_SYMBOL_GPL(evtchn_put);
+
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
{
int irq = per_cpu(ipi_to_irq, cpu)[vector];
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index dbc13e9..b1f60a0 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -268,7 +268,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
u->name, (void *)(unsigned long)port);
if (rc >= 0)
- rc = 0;
+ rc = evtchn_make_refcounted(port);
return rc;
}
diff --git a/drivers/xen/gntalloc.c b/drivers/xen/gntalloc.c
index e1c4c6e..934985d 100644
--- a/drivers/xen/gntalloc.c
+++ b/drivers/xen/gntalloc.c
@@ -74,7 +74,7 @@ MODULE_PARM_DESC(limit, "Maximum number of grants that may be allocated by "
"the gntalloc device");
static LIST_HEAD(gref_list);
-static DEFINE_SPINLOCK(gref_lock);
+static DEFINE_MUTEX(gref_mutex);
static int gref_size;
struct notify_info {
@@ -99,6 +99,12 @@ struct gntalloc_file_private_data {
uint64_t index;
};
+struct gntalloc_vma_private_data {
+ struct gntalloc_gref *gref;
+ int users;
+ int count;
+};
+
static void __del_gref(struct gntalloc_gref *gref);
static void do_cleanup(void)
@@ -143,15 +149,15 @@ static int add_grefs(struct ioctl_gntalloc_alloc_gref *op,
}
/* Add to gref lists. */
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
list_splice_tail(&queue_gref, &gref_list);
list_splice_tail(&queue_file, &priv->list);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return 0;
undo:
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref_size -= (op->count - i);
list_for_each_entry(gref, &queue_file, next_file) {
@@ -167,7 +173,7 @@ undo:
*/
if (unlikely(!list_empty(&queue_gref)))
list_splice_tail(&queue_gref, &gref_list);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rc;
}
@@ -178,8 +184,10 @@ static void __del_gref(struct gntalloc_gref *gref)
tmp[gref->notify.pgoff] = 0;
kunmap(gref->page);
}
- if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(gref->notify.event);
+ evtchn_put(gref->notify.event);
+ }
gref->notify.flags = 0;
@@ -189,6 +197,8 @@ static void __del_gref(struct gntalloc_gref *gref)
if (!gnttab_end_foreign_access_ref(gref->gref_id, 0))
return;
+
+ gnttab_free_grant_reference(gref->gref_id);
}
gref_size--;
@@ -251,7 +261,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
pr_debug("%s: priv %p\n", __func__, priv);
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
while (!list_empty(&priv->list)) {
gref = list_entry(priv->list.next,
struct gntalloc_gref, next_file);
@@ -261,7 +271,7 @@ static int gntalloc_release(struct inode *inode, struct file *filp)
__del_gref(gref);
}
kfree(priv);
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return 0;
}
@@ -286,21 +296,21 @@ static long gntalloc_ioctl_alloc(struct gntalloc_file_private_data *priv,
goto out;
}
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
/* Clean up pages that were at zero (local) users but were still mapped
* by remote domains. Since those pages count towards the limit that we
* are about to enforce, removing them here is a good idea.
*/
do_cleanup();
if (gref_size + op.count > limit) {
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
rc = -ENOSPC;
goto out_free;
}
gref_size += op.count;
op.index = priv->index;
priv->index += op.count * PAGE_SIZE;
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
rc = add_grefs(&op, gref_ids, priv);
if (rc < 0)
@@ -343,7 +353,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
goto dealloc_grant_out;
}
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref = find_grefs(priv, op.index, op.count);
if (gref) {
/* Remove from the file list only, and decrease reference count.
@@ -363,7 +373,7 @@ static long gntalloc_ioctl_dealloc(struct gntalloc_file_private_data *priv,
do_cleanup();
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
dealloc_grant_out:
return rc;
}
@@ -383,7 +393,7 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
index = op.index & ~(PAGE_SIZE - 1);
pgoff = op.index & (PAGE_SIZE - 1);
- spin_lock(&gref_lock);
+ mutex_lock(&gref_mutex);
gref = find_grefs(priv, index, 1);
if (!gref) {
@@ -396,12 +406,30 @@ static long gntalloc_ioctl_unmap_notify(struct gntalloc_file_private_data *priv,
goto unlock_out;
}
+ /* We need to grab a reference to the event channel we are going to use
+ * to send the notify before releasing the reference we may already have
+ * (if someone has called this ioctl twice). This is required so that
+ * it is possible to change the clear_byte part of the notification
+ * without disturbing the event channel part, which may now be the last
+ * reference to that event channel.
+ */
+ if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+ if (evtchn_get(op.event_channel_port)) {
+ rc = -EINVAL;
+ goto unlock_out;
+ }
+ }
+
+ if (gref->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ evtchn_put(gref->notify.event);
+
gref->notify.flags = op.action;
gref->notify.pgoff = pgoff;
gref->notify.event = op.event_channel_port;
rc = 0;
+
unlock_out:
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rc;
}
@@ -429,26 +457,40 @@ static long gntalloc_ioctl(struct file *filp, unsigned int cmd,
static void gntalloc_vma_open(struct vm_area_struct *vma)
{
- struct gntalloc_gref *gref = vma->vm_private_data;
- if (!gref)
+ struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+
+ if (!priv)
return;
- spin_lock(&gref_lock);
- gref->users++;
- spin_unlock(&gref_lock);
+ mutex_lock(&gref_mutex);
+ priv->users++;
+ mutex_unlock(&gref_mutex);
}
static void gntalloc_vma_close(struct vm_area_struct *vma)
{
- struct gntalloc_gref *gref = vma->vm_private_data;
- if (!gref)
+ struct gntalloc_vma_private_data *priv = vma->vm_private_data;
+ struct gntalloc_gref *gref, *next;
+ int i;
+
+ if (!priv)
return;
- spin_lock(&gref_lock);
- gref->users--;
- if (gref->users == 0)
- __del_gref(gref);
- spin_unlock(&gref_lock);
+ mutex_lock(&gref_mutex);
+ priv->users--;
+ if (priv->users == 0) {
+ gref = priv->gref;
+ for (i = 0; i < priv->count; i++) {
+ gref->users--;
+ next = list_entry(gref->next_gref.next,
+ struct gntalloc_gref, next_gref);
+ if (gref->users == 0)
+ __del_gref(gref);
+ gref = next;
+ }
+ kfree(priv);
+ }
+ mutex_unlock(&gref_mutex);
}
static struct vm_operations_struct gntalloc_vmops = {
@@ -459,30 +501,41 @@ static struct vm_operations_struct gntalloc_vmops = {
static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct gntalloc_file_private_data *priv = filp->private_data;
+ struct gntalloc_vma_private_data *vm_priv;
struct gntalloc_gref *gref;
int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
int rv, i;
- pr_debug("%s: priv %p, page %lu+%d\n", __func__,
- priv, vma->vm_pgoff, count);
-
if (!(vma->vm_flags & VM_SHARED)) {
printk(KERN_ERR "%s: Mapping must be shared.\n", __func__);
return -EINVAL;
}
- spin_lock(&gref_lock);
+ vm_priv = kmalloc(sizeof(*vm_priv), GFP_KERNEL);
+ if (!vm_priv)
+ return -ENOMEM;
+
+ mutex_lock(&gref_mutex);
+
+ pr_debug("%s: priv %p,%p, page %lu+%d\n", __func__,
+ priv, vm_priv, vma->vm_pgoff, count);
+
gref = find_grefs(priv, vma->vm_pgoff << PAGE_SHIFT, count);
if (gref == NULL) {
rv = -ENOENT;
pr_debug("%s: Could not find grant reference",
__func__);
+ kfree(vm_priv);
goto out_unlock;
}
- vma->vm_private_data = gref;
+ vm_priv->gref = gref;
+ vm_priv->users = 1;
+ vm_priv->count = count;
+
+ vma->vm_private_data = vm_priv;
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
vma->vm_ops = &gntalloc_vmops;
@@ -499,7 +552,7 @@ static int gntalloc_mmap(struct file *filp, struct vm_area_struct *vma)
rv = 0;
out_unlock:
- spin_unlock(&gref_lock);
+ mutex_unlock(&gref_mutex);
return rv;
}
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index afca14d..99d8151 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -193,8 +193,10 @@ static void gntdev_put_map(struct grant_map *map)
atomic_sub(map->count, &pages_mapped);
- if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT)
+ if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
notify_remote_via_evtchn(map->notify.event);
+ evtchn_put(map->notify.event);
+ }
if (map->pages) {
if (!use_ptemod)
@@ -312,7 +314,8 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
}
- err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
+ err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset,
+ pages, true);
if (err)
return err;
@@ -599,6 +602,8 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
struct ioctl_gntdev_unmap_notify op;
struct grant_map *map;
int rc;
+ int out_flags;
+ unsigned int out_event;
if (copy_from_user(&op, u, sizeof(op)))
return -EFAULT;
@@ -606,6 +611,21 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
return -EINVAL;
+ /* We need to grab a reference to the event channel we are going to use
+ * to send the notify before releasing the reference we may already have
+ * (if someone has called this ioctl twice). This is required so that
+ * it is possible to change the clear_byte part of the notification
+ * without disturbing the event channel part, which may now be the last
+ * reference to that event channel.
+ */
+ if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
+ if (evtchn_get(op.event_channel_port))
+ return -EINVAL;
+ }
+
+ out_flags = op.action;
+ out_event = op.event_channel_port;
+
spin_lock(&priv->lock);
list_for_each_entry(map, &priv->maps, next) {
@@ -624,12 +644,22 @@ static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
goto unlock_out;
}
+ out_flags = map->notify.flags;
+ out_event = map->notify.event;
+
map->notify.flags = op.action;
map->notify.addr = op.index - (map->index << PAGE_SHIFT);
map->notify.event = op.event_channel_port;
+
rc = 0;
+
unlock_out:
spin_unlock(&priv->lock);
+
+ /* Drop the reference to the event channel we did not save in the map */
+ if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
+ evtchn_put(out_event);
+
return rc;
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index bf1c094..b4d4eac 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -44,16 +44,19 @@
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/interface/memory.h>
+#include <xen/hvc-console.h>
#include <asm/xen/hypercall.h>
#include <asm/pgtable.h>
#include <asm/sync_bitops.h>
-
/* External tools reserve first few grant table entries. */
#define NR_RESERVED_ENTRIES 8
#define GNTTAB_LIST_END 0xffffffff
-#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
+#define GREFS_PER_GRANT_FRAME \
+(grant_table_version == 1 ? \
+(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
+(PAGE_SIZE / sizeof(union grant_entry_v2)))
static grant_ref_t **gnttab_list;
static unsigned int nr_grant_frames;
@@ -64,13 +67,97 @@ static DEFINE_SPINLOCK(gnttab_list_lock);
unsigned long xen_hvm_resume_frames;
EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
-static struct grant_entry *shared;
+static union {
+ struct grant_entry_v1 *v1;
+ union grant_entry_v2 *v2;
+ void *addr;
+} gnttab_shared;
+
+/*This is a structure of function pointers for grant table*/
+struct gnttab_ops {
+ /*
+ * Mapping a list of frames for storing grant entries. Frames parameter
+ * is used to store grant table address when grant table being setup,
+ * nr_gframes is the number of frames to map grant table. Returning
+ * GNTST_okay means success and negative value means failure.
+ */
+ int (*map_frames)(unsigned long *frames, unsigned int nr_gframes);
+ /*
+ * Release a list of frames which are mapped in map_frames for grant
+ * entry status.
+ */
+ void (*unmap_frames)(void);
+ /*
+ * Introducing a valid entry into the grant table, granting the frame of
+ * this grant entry to domain for accessing or transfering. Ref
+ * parameter is reference of this introduced grant entry, domid is id of
+ * granted domain, frame is the page frame to be granted, and flags is
+ * status of the grant entry to be updated.
+ */
+ void (*update_entry)(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags);
+ /*
+ * Stop granting a grant entry to domain for accessing. Ref parameter is
+ * reference of a grant entry whose grant access will be stopped,
+ * readonly is not in use in this function. If the grant entry is
+ * currently mapped for reading or writing, just return failure(==0)
+ * directly and don't tear down the grant access. Otherwise, stop grant
+ * access for this entry and return success(==1).
+ */
+ int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
+ /*
+ * Stop granting a grant entry to domain for transfer. Ref parameter is
+ * reference of a grant entry whose grant transfer will be stopped. If
+ * tranfer has not started, just reclaim the grant entry and return
+ * failure(==0). Otherwise, wait for the transfer to complete and then
+ * return the frame.
+ */
+ unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
+ /*
+ * Query the status of a grant entry. Ref parameter is reference of
+ * queried grant entry, return value is the status of queried entry.
+ * Detailed status(writing/reading) can be gotten from the return value
+ * by bit operations.
+ */
+ int (*query_foreign_access)(grant_ref_t ref);
+ /*
+ * Grant a domain to access a range of bytes within the page referred by
+ * an available grant entry. Ref parameter is reference of a grant entry
+ * which will be sub-page accessed, domid is id of grantee domain, frame
+ * is frame address of subpage grant, flags is grant type and flag
+ * information, page_off is offset of the range of bytes, and length is
+ * length of bytes to be accessed.
+ */
+ void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off, unsigned length);
+ /*
+ * Redirect an available grant entry on domain A to another grant
+ * reference of domain B, then allow domain C to use grant reference
+ * of domain B transitively. Ref parameter is an available grant entry
+ * reference on domain A, domid is id of domain C which accesses grant
+ * entry transitively, flags is grant type and flag information,
+ * trans_domid is id of domain B whose grant entry is finally accessed
+ * transitively, trans_gref is grant entry transitive reference of
+ * domain B.
+ */
+ void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
+ domid_t trans_domid, grant_ref_t trans_gref);
+};
+
+static struct gnttab_ops *gnttab_interface;
+
+/*This reflects status of grant entries, so act as a global value*/
+static grant_status_t *grstatus;
+
+static int grant_table_version;
static struct gnttab_free_callback *gnttab_free_callback_list;
static int gnttab_expand(unsigned int req_entries);
#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
+#define SPP (PAGE_SIZE / sizeof(grant_status_t))
static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
{
@@ -142,23 +229,33 @@ static void put_free_entry(grant_ref_t ref)
spin_unlock_irqrestore(&gnttab_list_lock, flags);
}
-static void update_grant_entry(grant_ref_t ref, domid_t domid,
- unsigned long frame, unsigned flags)
+/*
+ * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
+ * Introducing a valid entry into the grant table:
+ * 1. Write ent->domid.
+ * 2. Write ent->frame:
+ * GTF_permit_access: Frame to which access is permitted.
+ * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
+ * frame, or zero if none.
+ * 3. Write memory barrier (WMB).
+ * 4. Write ent->flags, inc. valid type.
+ */
+static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
+{
+ gnttab_shared.v1[ref].domid = domid;
+ gnttab_shared.v1[ref].frame = frame;
+ wmb();
+ gnttab_shared.v1[ref].flags = flags;
+}
+
+static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, unsigned flags)
{
- /*
- * Introducing a valid entry into the grant table:
- * 1. Write ent->domid.
- * 2. Write ent->frame:
- * GTF_permit_access: Frame to which access is permitted.
- * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
- * frame, or zero if none.
- * 3. Write memory barrier (WMB).
- * 4. Write ent->flags, inc. valid type.
- */
- shared[ref].frame = frame;
- shared[ref].domid = domid;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ gnttab_shared.v2[ref].full_page.frame = frame;
wmb();
- shared[ref].flags = flags;
+ gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
}
/*
@@ -167,7 +264,7 @@ static void update_grant_entry(grant_ref_t ref, domid_t domid,
void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
unsigned long frame, int readonly)
{
- update_grant_entry(ref, domid, frame,
+ gnttab_interface->update_entry(ref, domid, frame,
GTF_permit_access | (readonly ? GTF_readonly : 0));
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
@@ -187,31 +284,184 @@ int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
-int gnttab_query_foreign_access(grant_ref_t ref)
+void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off,
+ unsigned length)
+{
+ gnttab_shared.v2[ref].sub_page.frame = frame;
+ gnttab_shared.v2[ref].sub_page.page_off = page_off;
+ gnttab_shared.v2[ref].sub_page.length = length;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ wmb();
+ gnttab_shared.v2[ref].hdr.flags =
+ GTF_permit_access | GTF_sub_page | flags;
+}
+
+int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
+ unsigned long frame, int flags,
+ unsigned page_off,
+ unsigned length)
+{
+ if (flags & (GTF_accept_transfer | GTF_reading |
+ GTF_writing | GTF_transitive))
+ return -EPERM;
+
+ if (gnttab_interface->update_subpage_entry == NULL)
+ return -ENOSYS;
+
+ gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
+ page_off, length);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
+
+int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
+ int flags, unsigned page_off,
+ unsigned length)
+{
+ int ref, rc;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
+ page_off, length);
+ if (rc < 0) {
+ put_free_entry(ref);
+ return rc;
+ }
+
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
+
+bool gnttab_subpage_grants_available(void)
+{
+ return gnttab_interface->update_subpage_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
+
+void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
+ int flags, domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
+ gnttab_shared.v2[ref].transitive.gref = trans_gref;
+ gnttab_shared.v2[ref].hdr.domid = domid;
+ wmb();
+ gnttab_shared.v2[ref].hdr.flags =
+ GTF_permit_access | GTF_transitive | flags;
+}
+
+int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
+ int flags, domid_t trans_domid,
+ grant_ref_t trans_gref)
+{
+ if (flags & (GTF_accept_transfer | GTF_reading |
+ GTF_writing | GTF_sub_page))
+ return -EPERM;
+
+ if (gnttab_interface->update_trans_entry == NULL)
+ return -ENOSYS;
+
+ gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
+ trans_gref);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
+
+int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
+ domid_t trans_domid,
+ grant_ref_t trans_gref)
{
- u16 nflags;
+ int ref, rc;
+
+ ref = get_free_entries(1);
+ if (unlikely(ref < 0))
+ return -ENOSPC;
+
+ rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
+ trans_domid, trans_gref);
+ if (rc < 0) {
+ put_free_entry(ref);
+ return rc;
+ }
- nflags = shared[ref].flags;
+ return ref;
+}
+EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
- return nflags & (GTF_reading|GTF_writing);
+bool gnttab_trans_grants_available(void)
+{
+ return gnttab_interface->update_trans_entry != NULL;
+}
+EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
+
+static int gnttab_query_foreign_access_v1(grant_ref_t ref)
+{
+ return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
+}
+
+static int gnttab_query_foreign_access_v2(grant_ref_t ref)
+{
+ return grstatus[ref] & (GTF_reading|GTF_writing);
+}
+
+int gnttab_query_foreign_access(grant_ref_t ref)
+{
+ return gnttab_interface->query_foreign_access(ref);
}
EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
-int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
{
u16 flags, nflags;
+ u16 *pflags;
- nflags = shared[ref].flags;
+ pflags = &gnttab_shared.v1[ref].flags;
+ nflags = *pflags;
do {
flags = nflags;
if (flags & (GTF_reading|GTF_writing)) {
printk(KERN_ALERT "WARNING: g.e. still in use!\n");
return 0;
}
- } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
+ } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
return 1;
}
+
+static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
+{
+ gnttab_shared.v2[ref].hdr.flags = 0;
+ mb();
+ if (grstatus[ref] & (GTF_reading|GTF_writing)) {
+ return 0;
+ } else {
+ /* The read of grstatus needs to have acquire
+ semantics. On x86, reads already have
+ that, and we just need to protect against
+ compiler reorderings. On other
+ architectures we may need a full
+ barrier. */
+#ifdef CONFIG_X86
+ barrier();
+#else
+ mb();
+#endif
+ }
+
+ return 1;
+}
+
+int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
+{
+ return gnttab_interface->end_foreign_access_ref(ref, readonly);
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
@@ -246,37 +496,76 @@ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
unsigned long pfn)
{
- update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
+ gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
}
EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
-unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
{
unsigned long frame;
u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v1[ref].flags;
/*
* If a transfer is not even yet started, try to reclaim the grant
* reference and return failure (== 0).
*/
- while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
- if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
return 0;
cpu_relax();
}
/* If a transfer is in progress then wait until it is completed. */
while (!(flags & GTF_transfer_completed)) {
- flags = shared[ref].flags;
+ flags = *pflags;
cpu_relax();
}
rmb(); /* Read the frame number /after/ reading completion status. */
- frame = shared[ref].frame;
+ frame = gnttab_shared.v1[ref].frame;
+ BUG_ON(frame == 0);
+
+ return frame;
+}
+
+static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
+{
+ unsigned long frame;
+ u16 flags;
+ u16 *pflags;
+
+ pflags = &gnttab_shared.v2[ref].hdr.flags;
+
+ /*
+ * If a transfer is not even yet started, try to reclaim the grant
+ * reference and return failure (== 0).
+ */
+ while (!((flags = *pflags) & GTF_transfer_committed)) {
+ if (sync_cmpxchg(pflags, flags, 0) == flags)
+ return 0;
+ cpu_relax();
+ }
+
+ /* If a transfer is in progress then wait until it is completed. */
+ while (!(flags & GTF_transfer_completed)) {
+ flags = *pflags;
+ cpu_relax();
+ }
+
+ rmb(); /* Read the frame number /after/ reading completion status. */
+ frame = gnttab_shared.v2[ref].full_page.frame;
BUG_ON(frame == 0);
return frame;
}
+
+unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
+{
+ return gnttab_interface->end_foreign_transfer_ref(ref);
+}
EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
@@ -448,8 +737,8 @@ unsigned int gnttab_max_grant_frames(void)
EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
+ struct gnttab_map_grant_ref *kmap_ops,
+ struct page **pages, unsigned int count)
{
int i, ret;
pte_t *pte;
@@ -472,24 +761,10 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
(map_ops[i].host_addr & ~PAGE_MASK));
mfn = pte_mfn(*pte);
} else {
- /* If you really wanted to do this:
- * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
- *
- * The reason we do not implement it is b/c on the
- * unmap path (gnttab_unmap_refs) we have no means of
- * checking whether the page is !GNTMAP_contains_pte.
- *
- * That is without some extra data-structure to carry
- * the struct page, bool clear_pte, and list_head next
- * tuples and deal with allocation/delallocation, etc.
- *
- * The users of this API set the GNTMAP_contains_pte
- * flag so lets just return not supported until it
- * becomes neccessary to implement.
- */
- return -EOPNOTSUPP;
+ mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
- ret = m2p_add_override(mfn, pages[i], &kmap_ops[i]);
+ ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
return ret;
}
@@ -499,7 +774,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
EXPORT_SYMBOL_GPL(gnttab_map_refs);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
- struct page **pages, unsigned int count)
+ struct page **pages, unsigned int count, bool clear_pte)
{
int i, ret;
@@ -511,7 +786,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
for (i = 0; i < count; i++) {
- ret = m2p_remove_override(pages[i], true /* clear the PTE */);
+ ret = m2p_remove_override(pages[i], clear_pte);
if (ret)
return ret;
}
@@ -520,6 +795,77 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
+static unsigned nr_status_frames(unsigned nr_grant_frames)
+{
+ return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
+}
+
+static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
+{
+ int rc;
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v1(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+}
+
+static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes)
+{
+ uint64_t *sframes;
+ unsigned int nr_sframes;
+ struct gnttab_get_status_frames getframes;
+ int rc;
+
+ nr_sframes = nr_status_frames(nr_gframes);
+
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_get_status_frames.
+ */
+ sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
+ if (!sframes)
+ return -ENOMEM;
+
+ getframes.dom = DOMID_SELF;
+ getframes.nr_frames = nr_sframes;
+ set_xen_guest_handle(getframes.frame_list, sframes);
+
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
+ &getframes, 1);
+ if (rc == -ENOSYS) {
+ kfree(sframes);
+ return -ENOSYS;
+ }
+
+ BUG_ON(rc || getframes.status);
+
+ rc = arch_gnttab_map_status(sframes, nr_sframes,
+ nr_status_frames(gnttab_max_grant_frames()),
+ &grstatus);
+ BUG_ON(rc);
+ kfree(sframes);
+
+ rc = arch_gnttab_map_shared(frames, nr_gframes,
+ gnttab_max_grant_frames(),
+ &gnttab_shared.addr);
+ BUG_ON(rc);
+
+ return 0;
+}
+
+static void gnttab_unmap_frames_v2(void)
+{
+ arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
+ arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
+}
+
static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
{
struct gnttab_setup_table setup;
@@ -551,6 +897,9 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
return rc;
}
+ /* No need for kzalloc as it is initialized in following hypercall
+ * GNTTABOP_setup_table.
+ */
frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
if (!frames)
return -ENOMEM;
@@ -567,19 +916,68 @@ static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
BUG_ON(rc || setup.status);
- rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
- &shared);
- BUG_ON(rc);
+ rc = gnttab_interface->map_frames(frames, nr_gframes);
kfree(frames);
- return 0;
+ return rc;
+}
+
+static struct gnttab_ops gnttab_v1_ops = {
+ .map_frames = gnttab_map_frames_v1,
+ .unmap_frames = gnttab_unmap_frames_v1,
+ .update_entry = gnttab_update_entry_v1,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
+ .query_foreign_access = gnttab_query_foreign_access_v1,
+};
+
+static struct gnttab_ops gnttab_v2_ops = {
+ .map_frames = gnttab_map_frames_v2,
+ .unmap_frames = gnttab_unmap_frames_v2,
+ .update_entry = gnttab_update_entry_v2,
+ .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
+ .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
+ .query_foreign_access = gnttab_query_foreign_access_v2,
+ .update_subpage_entry = gnttab_update_subpage_entry_v2,
+ .update_trans_entry = gnttab_update_trans_entry_v2,
+};
+
+static void gnttab_request_version(void)
+{
+ int rc;
+ struct gnttab_set_version gsv;
+
+ if (xen_hvm_domain())
+ gsv.version = 1;
+ else
+ gsv.version = 2;
+ rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
+ if (rc == 0 && gsv.version == 2) {
+ grant_table_version = 2;
+ gnttab_interface = &gnttab_v2_ops;
+ } else if (grant_table_version == 2) {
+ /*
+ * If we've already used version 2 features,
+ * but then suddenly discover that they're not
+ * available (e.g. migrating to an older
+ * version of Xen), almost unbounded badness
+ * can happen.
+ */
+ panic("we need grant tables version 2, but only version 1 is available");
+ } else {
+ grant_table_version = 1;
+ gnttab_interface = &gnttab_v1_ops;
+ }
+ printk(KERN_INFO "Grant tables using version %d layout.\n",
+ grant_table_version);
}
int gnttab_resume(void)
{
unsigned int max_nr_gframes;
+ gnttab_request_version();
max_nr_gframes = gnttab_max_grant_frames();
if (max_nr_gframes < nr_grant_frames)
return -ENOSYS;
@@ -587,9 +985,10 @@ int gnttab_resume(void)
if (xen_pv_domain())
return gnttab_map(0, nr_grant_frames - 1);
- if (!shared) {
- shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
- if (shared == NULL) {
+ if (gnttab_shared.addr == NULL) {
+ gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
+ PAGE_SIZE * max_nr_gframes);
+ if (gnttab_shared.addr == NULL) {
printk(KERN_WARNING
"Failed to ioremap gnttab share frames!");
return -ENOMEM;
@@ -603,7 +1002,7 @@ int gnttab_resume(void)
int gnttab_suspend(void)
{
- arch_gnttab_unmap_shared(shared, nr_grant_frames);
+ gnttab_interface->unmap_frames();
return 0;
}
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/privcmd.c
index dbd3b16..ccee0f1 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -7,6 +7,7 @@
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -18,6 +19,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
@@ -32,6 +34,10 @@
#include <xen/page.h>
#include <xen/xen-ops.h>
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
#endif
@@ -359,7 +365,6 @@ static long privcmd_ioctl(struct file *file,
return ret;
}
-#ifndef HAVE_ARCH_PRIVCMD_MMAP
static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
printk(KERN_DEBUG "privcmd_fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
@@ -392,9 +397,39 @@ static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
{
return (xchg(&vma->vm_private_data, (void *)1) == NULL);
}
-#endif
-const struct file_operations privcmd_file_ops = {
+const struct file_operations xen_privcmd_fops = {
+ .owner = THIS_MODULE,
.unlocked_ioctl = privcmd_ioctl,
.mmap = privcmd_mmap,
};
+EXPORT_SYMBOL_GPL(xen_privcmd_fops);
+
+static struct miscdevice privcmd_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/privcmd",
+ .fops = &xen_privcmd_fops,
+};
+
+static int __init privcmd_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&privcmd_dev);
+ if (err != 0) {
+ printk(KERN_ERR "Could not register Xen privcmd device\n");
+ return err;
+ }
+ return 0;
+}
+
+static void __exit privcmd_exit(void)
+{
+ misc_deregister(&privcmd_dev);
+}
+
+module_init(privcmd_init);
+module_exit(privcmd_exit);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
new file mode 100644
index 0000000..14facae
--- /dev/null
+++ b/drivers/xen/privcmd.h
@@ -0,0 +1,3 @@
+#include <linux/fs.h>
+
+extern const struct file_operations xen_privcmd_fops;
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 284798a..19e6a20 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -153,7 +153,7 @@ void __init xen_swiotlb_init(int verbose)
char *m = NULL;
unsigned int repeat = 3;
- nr_tbl = swioltb_nr_tbl();
+ nr_tbl = swiotlb_nr_tbl();
if (nr_tbl)
xen_io_tlb_nslabs = nr_tbl;
else {
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c
index 9cc2259..596e6a7 100644
--- a/drivers/xen/xen-balloon.c
+++ b/drivers/xen/xen-balloon.c
@@ -32,7 +32,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/sysdev.h>
#include <linux/capability.h>
#include <xen/xen.h>
@@ -46,9 +45,9 @@
#define BALLOON_CLASS_NAME "xen_memory"
-static struct sys_device balloon_sysdev;
+static struct device balloon_dev;
-static int register_balloon(struct sys_device *sysdev);
+static int register_balloon(struct device *dev);
/* React to a change in the target key */
static void watch_target(struct xenbus_watch *watch,
@@ -98,9 +97,9 @@ static int __init balloon_init(void)
pr_info("xen-balloon: Initialising balloon driver.\n");
- register_balloon(&balloon_sysdev);
+ register_balloon(&balloon_dev);
- register_xen_selfballooning(&balloon_sysdev);
+ register_xen_selfballooning(&balloon_dev);
register_xenstore_notifier(&xenstore_notifier);
@@ -117,31 +116,31 @@ static void balloon_exit(void)
module_exit(balloon_exit);
#define BALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
+ static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, \
char *buf) \
{ \
return sprintf(buf, format, ##args); \
} \
- static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
+ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(balloon_stats.current_pages));
BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_low));
BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(balloon_stats.balloon_high));
-static SYSDEV_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
-static SYSDEV_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
-static SYSDEV_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
-static SYSDEV_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
+static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay);
+static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay);
+static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count);
+static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count);
-static ssize_t show_target_kb(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", PAGES2KB(balloon_stats.target_pages));
}
-static ssize_t store_target_kb(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_target_kb(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -158,11 +157,11 @@ static ssize_t store_target_kb(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(target_kb, S_IRUGO | S_IWUSR,
show_target_kb, store_target_kb);
-static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr,
+static ssize_t show_target(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%llu\n",
@@ -170,8 +169,8 @@ static ssize_t show_target(struct sys_device *dev, struct sysdev_attribute *attr
<< PAGE_SHIFT);
}
-static ssize_t store_target(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_target(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -188,23 +187,23 @@ static ssize_t store_target(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(target, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(target, S_IRUGO | S_IWUSR,
show_target, store_target);
-static struct sysdev_attribute *balloon_attrs[] = {
- &attr_target_kb,
- &attr_target,
- &attr_schedule_delay.attr,
- &attr_max_schedule_delay.attr,
- &attr_retry_count.attr,
- &attr_max_retry_count.attr
+static struct device_attribute *balloon_attrs[] = {
+ &dev_attr_target_kb,
+ &dev_attr_target,
+ &dev_attr_schedule_delay.attr,
+ &dev_attr_max_schedule_delay.attr,
+ &dev_attr_retry_count.attr,
+ &dev_attr_max_retry_count.attr
};
static struct attribute *balloon_info_attrs[] = {
- &attr_current_kb.attr,
- &attr_low_kb.attr,
- &attr_high_kb.attr,
+ &dev_attr_current_kb.attr,
+ &dev_attr_low_kb.attr,
+ &dev_attr_high_kb.attr,
NULL
};
@@ -213,34 +212,35 @@ static struct attribute_group balloon_info_group = {
.attrs = balloon_info_attrs
};
-static struct sysdev_class balloon_sysdev_class = {
- .name = BALLOON_CLASS_NAME
+static struct bus_type balloon_subsys = {
+ .name = BALLOON_CLASS_NAME,
+ .dev_name = BALLOON_CLASS_NAME,
};
-static int register_balloon(struct sys_device *sysdev)
+static int register_balloon(struct device *dev)
{
int i, error;
- error = sysdev_class_register(&balloon_sysdev_class);
+ error = subsys_system_register(&balloon_subsys, NULL);
if (error)
return error;
- sysdev->id = 0;
- sysdev->cls = &balloon_sysdev_class;
+ dev->id = 0;
+ dev->bus = &balloon_subsys;
- error = sysdev_register(sysdev);
+ error = device_register(dev);
if (error) {
- sysdev_class_unregister(&balloon_sysdev_class);
+ bus_unregister(&balloon_subsys);
return error;
}
for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
- error = sysdev_create_file(sysdev, balloon_attrs[i]);
+ error = device_create_file(dev, balloon_attrs[i]);
if (error)
goto fail;
}
- error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
+ error = sysfs_create_group(&dev->kobj, &balloon_info_group);
if (error)
goto fail;
@@ -248,9 +248,9 @@ static int register_balloon(struct sys_device *sysdev)
fail:
while (--i >= 0)
- sysdev_remove_file(sysdev, balloon_attrs[i]);
- sysdev_unregister(sysdev);
- sysdev_class_unregister(&balloon_sysdev_class);
+ device_remove_file(dev, balloon_attrs[i]);
+ device_unregister(dev);
+ bus_unregister(&balloon_subsys);
return error;
}
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 52fed16..30d7be0 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,7 +16,7 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-static int permissive;
+static bool permissive;
module_param(permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 8f06e1e..19834d1 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -99,6 +99,7 @@ static void pcistub_device_release(struct kref *kref)
kfree(pci_get_drvdata(psdev->dev));
pci_set_drvdata(psdev->dev, NULL);
+ psdev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
pci_dev_put(psdev->dev);
kfree(psdev);
@@ -234,6 +235,8 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
xen_pcibk_config_free_dyn_fields(found_psdev->dev);
xen_pcibk_config_reset_dev(found_psdev->dev);
+ xen_unregister_device_domain_owner(found_psdev->dev);
+
spin_lock_irqsave(&found_psdev->lock, flags);
found_psdev->pdev = NULL;
spin_unlock_irqrestore(&found_psdev->lock, flags);
@@ -331,6 +334,7 @@ static int __devinit pcistub_init_device(struct pci_dev *dev)
dev_dbg(&dev->dev, "reset device\n");
xen_pcibk_reset_device(dev);
+ dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
return 0;
config_release:
@@ -880,7 +884,7 @@ static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
int err;
err =
- sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
+ sscanf(buf, " %04x:%02x:%02x.%d-%08x:%1x:%08x", domain, bus, slot,
func, reg, size, mask);
if (err == 7)
return 0;
@@ -900,7 +904,7 @@ static int pcistub_device_id_add(int domain, int bus, int slot, int func)
pci_dev_id->bus = bus;
pci_dev_id->devfn = PCI_DEVFN(slot, func);
- pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%01x\n",
+ pr_debug(DRV_NAME ": wants to seize %04x:%02x:%02x.%d\n",
domain, bus, slot, func);
spin_lock_irqsave(&device_ids_lock, flags);
@@ -930,7 +934,7 @@ static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
err = 0;
- pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%01x from "
+ pr_debug(DRV_NAME ": removed %04x:%02x:%02x.%d from "
"seize list\n", domain, bus, slot, func);
}
}
@@ -1025,7 +1029,7 @@ static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
break;
count += scnprintf(buf + count, PAGE_SIZE - count,
- "%04x:%02x:%02x.%01x\n",
+ "%04x:%02x:%02x.%d\n",
pci_dev_id->domain, pci_dev_id->bus,
PCI_SLOT(pci_dev_id->devfn),
PCI_FUNC(pci_dev_id->devfn));
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index 0755259..64b11f9 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -16,7 +16,7 @@
#define INVALID_EVTCHN_IRQ (-1)
struct workqueue_struct *xen_pcibk_wq;
-static int __read_mostly passthrough;
+static bool __read_mostly passthrough;
module_param(passthrough, bool, S_IRUGO);
MODULE_PARM_DESC(passthrough,
"Option to specify how to export PCI topology to guest:\n"\
@@ -206,6 +206,7 @@ static int xen_pcibk_publish_pci_dev(struct xen_pcibk_device *pdev,
goto out;
}
+ /* Note: The PV protocol uses %02x, don't change it */
err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
"%04x:%02x:%02x.%02x", domain, bus,
PCI_SLOT(devfn), PCI_FUNC(devfn));
@@ -229,7 +230,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
err = -EINVAL;
xenbus_dev_fatal(pdev->xdev, err,
"Couldn't locate PCI device "
- "(%04x:%02x:%02x.%01x)! "
+ "(%04x:%02x:%02x.%d)! "
"perhaps already in-use?",
domain, bus, slot, func);
goto out;
@@ -241,11 +242,10 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
goto out;
dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
- dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
if (xen_register_device_domain_owner(dev,
pdev->xdev->otherend_id) != 0) {
- dev_err(&dev->dev, "device has been assigned to another " \
- "domain! Over-writting the ownership, but beware.\n");
+ dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
+ xen_find_device_domain_owner(dev));
xen_unregister_device_domain_owner(dev);
xen_register_device_domain_owner(dev, pdev->xdev->otherend_id);
}
@@ -275,13 +275,12 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
if (!dev) {
err = -EINVAL;
dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
- "(%04x:%02x:%02x.%01x)! not owned by this domain\n",
+ "(%04x:%02x:%02x.%d)! not owned by this domain\n",
domain, bus, slot, func);
goto out;
}
dev_dbg(&dev->dev, "unregistering for %d\n", pdev->xdev->otherend_id);
- dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
xen_unregister_device_domain_owner(dev);
xen_pcibk_release_pci_dev(pdev, dev);
@@ -707,19 +706,16 @@ static int xen_pcibk_xenbus_remove(struct xenbus_device *dev)
return 0;
}
-static const struct xenbus_device_id xenpci_ids[] = {
+static const struct xenbus_device_id xen_pcibk_ids[] = {
{"pci"},
{""},
};
-static struct xenbus_driver xenbus_xen_pcibk_driver = {
- .name = DRV_NAME,
- .owner = THIS_MODULE,
- .ids = xenpci_ids,
+static DEFINE_XENBUS_DRIVER(xen_pcibk, DRV_NAME,
.probe = xen_pcibk_xenbus_probe,
.remove = xen_pcibk_xenbus_remove,
.otherend_changed = xen_pcibk_frontend_changed,
-};
+);
const struct xen_pcibk_backend *__read_mostly xen_pcibk_backend;
@@ -735,11 +731,11 @@ int __init xen_pcibk_xenbus_register(void)
if (passthrough)
xen_pcibk_backend = &xen_pcibk_passthrough_backend;
pr_info(DRV_NAME ": backend is %s\n", xen_pcibk_backend->name);
- return xenbus_register_backend(&xenbus_xen_pcibk_driver);
+ return xenbus_register_backend(&xen_pcibk_driver);
}
void __exit xen_pcibk_xenbus_unregister(void)
{
destroy_workqueue(xen_pcibk_wq);
- xenbus_unregister_driver(&xenbus_xen_pcibk_driver);
+ xenbus_unregister_driver(&xen_pcibk_driver);
}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index d93c708..767ff65 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -74,6 +74,7 @@
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/workqueue.h>
+#include <linux/device.h>
#include <xen/balloon.h>
#include <xen/tmem.h>
#include <xen/xen.h>
@@ -266,21 +267,20 @@ static void selfballoon_process(struct work_struct *work)
#ifdef CONFIG_SYSFS
-#include <linux/sysdev.h>
#include <linux/capability.h>
#define SELFBALLOON_SHOW(name, format, args...) \
- static ssize_t show_##name(struct sys_device *dev, \
- struct sysdev_attribute *attr, \
- char *buf) \
+ static ssize_t show_##name(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
{ \
return sprintf(buf, format, ##args); \
}
SELFBALLOON_SHOW(selfballooning, "%d\n", xen_selfballooning_enabled);
-static ssize_t store_selfballooning(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballooning(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -303,13 +303,13 @@ static ssize_t store_selfballooning(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballooning, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballooning, S_IRUGO | S_IWUSR,
show_selfballooning, store_selfballooning);
SELFBALLOON_SHOW(selfballoon_interval, "%d\n", selfballoon_interval);
-static ssize_t store_selfballoon_interval(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_interval(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -325,13 +325,13 @@ static ssize_t store_selfballoon_interval(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_interval, S_IRUGO | S_IWUSR,
show_selfballoon_interval, store_selfballoon_interval);
SELFBALLOON_SHOW(selfballoon_downhys, "%d\n", selfballoon_downhysteresis);
-static ssize_t store_selfballoon_downhys(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_downhys(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -347,14 +347,14 @@ static ssize_t store_selfballoon_downhys(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_downhysteresis, S_IRUGO | S_IWUSR,
show_selfballoon_downhys, store_selfballoon_downhys);
SELFBALLOON_SHOW(selfballoon_uphys, "%d\n", selfballoon_uphysteresis);
-static ssize_t store_selfballoon_uphys(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_uphys(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -370,14 +370,14 @@ static ssize_t store_selfballoon_uphys(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_uphysteresis, S_IRUGO | S_IWUSR,
show_selfballoon_uphys, store_selfballoon_uphys);
SELFBALLOON_SHOW(selfballoon_min_usable_mb, "%d\n",
selfballoon_min_usable_mb);
-static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_selfballoon_min_usable_mb(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -393,7 +393,7 @@ static ssize_t store_selfballoon_min_usable_mb(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
show_selfballoon_min_usable_mb,
store_selfballoon_min_usable_mb);
@@ -401,8 +401,8 @@ static SYSDEV_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
#ifdef CONFIG_FRONTSWAP
SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
-static ssize_t store_frontswap_selfshrinking(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_frontswap_selfshrinking(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -424,13 +424,13 @@ static ssize_t store_frontswap_selfshrinking(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_selfshrinking, S_IRUGO | S_IWUSR,
show_frontswap_selfshrinking, store_frontswap_selfshrinking);
SELFBALLOON_SHOW(frontswap_inertia, "%d\n", frontswap_inertia);
-static ssize_t store_frontswap_inertia(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_frontswap_inertia(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -447,13 +447,13 @@ static ssize_t store_frontswap_inertia(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_inertia, S_IRUGO | S_IWUSR,
show_frontswap_inertia, store_frontswap_inertia);
SELFBALLOON_SHOW(frontswap_hysteresis, "%d\n", frontswap_hysteresis);
-static ssize_t store_frontswap_hysteresis(struct sys_device *dev,
- struct sysdev_attribute *attr,
+static ssize_t store_frontswap_hysteresis(struct device *dev,
+ struct device_attribute *attr,
const char *buf,
size_t count)
{
@@ -469,21 +469,21 @@ static ssize_t store_frontswap_hysteresis(struct sys_device *dev,
return count;
}
-static SYSDEV_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
+static DEVICE_ATTR(frontswap_hysteresis, S_IRUGO | S_IWUSR,
show_frontswap_hysteresis, store_frontswap_hysteresis);
#endif /* CONFIG_FRONTSWAP */
static struct attribute *selfballoon_attrs[] = {
- &attr_selfballooning.attr,
- &attr_selfballoon_interval.attr,
- &attr_selfballoon_downhysteresis.attr,
- &attr_selfballoon_uphysteresis.attr,
- &attr_selfballoon_min_usable_mb.attr,
+ &dev_attr_selfballooning.attr,
+ &dev_attr_selfballoon_interval.attr,
+ &dev_attr_selfballoon_downhysteresis.attr,
+ &dev_attr_selfballoon_uphysteresis.attr,
+ &dev_attr_selfballoon_min_usable_mb.attr,
#ifdef CONFIG_FRONTSWAP
- &attr_frontswap_selfshrinking.attr,
- &attr_frontswap_hysteresis.attr,
- &attr_frontswap_inertia.attr,
+ &dev_attr_frontswap_selfshrinking.attr,
+ &dev_attr_frontswap_hysteresis.attr,
+ &dev_attr_frontswap_inertia.attr,
#endif
NULL
};
@@ -494,12 +494,12 @@ static struct attribute_group selfballoon_group = {
};
#endif
-int register_xen_selfballooning(struct sys_device *sysdev)
+int register_xen_selfballooning(struct device *dev)
{
int error = -1;
#ifdef CONFIG_SYSFS
- error = sysfs_create_group(&sysdev->kobj, &selfballoon_group);
+ error = sysfs_create_group(&dev->kobj, &selfballoon_group);
#endif
return error;
}
diff --git a/drivers/xen/xenbus/Makefile b/drivers/xen/xenbus/Makefile
index 8dca685..31e2e90 100644
--- a/drivers/xen/xenbus/Makefile
+++ b/drivers/xen/xenbus/Makefile
@@ -1,4 +1,5 @@
obj-y += xenbus.o
+obj-y += xenbus_dev_frontend.o
xenbus-objs =
xenbus-objs += xenbus_client.o
@@ -9,4 +10,5 @@ xenbus-objs += xenbus_probe.o
xenbus-be-objs-$(CONFIG_XEN_BACKEND) += xenbus_probe_backend.o
xenbus-objs += $(xenbus-be-objs-y)
+obj-$(CONFIG_XEN_BACKEND) += xenbus_dev_backend.o
obj-$(CONFIG_XEN_XENBUS_FRONTEND) += xenbus_probe_frontend.o
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 1906125..566d2ad 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -32,15 +32,39 @@
#include <linux/slab.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/export.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/page.h>
#include <xen/interface/xen.h>
#include <xen/interface/event_channel.h>
+#include <xen/balloon.h>
#include <xen/events.h>
#include <xen/grant_table.h>
#include <xen/xenbus.h>
+#include <xen/xen.h>
+
+#include "xenbus_probe.h"
+
+struct xenbus_map_node {
+ struct list_head next;
+ union {
+ struct vm_struct *area; /* PV */
+ struct page *page; /* HVM */
+ };
+ grant_handle_t handle;
+};
+
+static DEFINE_SPINLOCK(xenbus_valloc_lock);
+static LIST_HEAD(xenbus_valloc_pages);
+
+struct xenbus_ring_ops {
+ int (*map)(struct xenbus_device *dev, int gnt, void **vaddr);
+ int (*unmap)(struct xenbus_device *dev, void *vaddr);
+};
+
+static const struct xenbus_ring_ops *ring_ops __read_mostly;
const char *xenbus_strstate(enum xenbus_state state)
{
@@ -436,19 +460,33 @@ EXPORT_SYMBOL_GPL(xenbus_free_evtchn);
*/
int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
{
+ return ring_ops->map(dev, gnt_ref, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_pv(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
+{
struct gnttab_map_grant_ref op = {
.flags = GNTMAP_host_map | GNTMAP_contains_pte,
.ref = gnt_ref,
.dom = dev->otherend_id,
};
+ struct xenbus_map_node *node;
struct vm_struct *area;
pte_t *pte;
*vaddr = NULL;
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
area = alloc_vm_area(PAGE_SIZE, &pte);
- if (!area)
+ if (!area) {
+ kfree(node);
return -ENOMEM;
+ }
op.host_addr = arbitrary_virt_to_machine(pte).maddr;
@@ -457,19 +495,59 @@ int xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref, void **vaddr)
if (op.status != GNTST_okay) {
free_vm_area(area);
+ kfree(node);
xenbus_dev_fatal(dev, op.status,
"mapping in shared page %d from domain %d",
gnt_ref, dev->otherend_id);
return op.status;
}
- /* Stuff the handle in an unused field */
- area->phys_addr = (unsigned long)op.handle;
+ node->handle = op.handle;
+ node->area = area;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
*vaddr = area->addr;
return 0;
}
-EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
+
+static int xenbus_map_ring_valloc_hvm(struct xenbus_device *dev,
+ int gnt_ref, void **vaddr)
+{
+ struct xenbus_map_node *node;
+ int err;
+ void *addr;
+
+ *vaddr = NULL;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+
+ err = alloc_xenballooned_pages(1, &node->page, false /* lowmem */);
+ if (err)
+ goto out_err;
+
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+
+ err = xenbus_map_ring(dev, gnt_ref, &node->handle, addr);
+ if (err)
+ goto out_err;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_add(&node->next, &xenbus_valloc_pages);
+ spin_unlock(&xenbus_valloc_lock);
+
+ *vaddr = addr;
+ return 0;
+
+ out_err:
+ free_xenballooned_pages(1, &node->page);
+ kfree(node);
+ return err;
+}
/**
@@ -489,12 +567,10 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
grant_handle_t *handle, void *vaddr)
{
- struct gnttab_map_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .flags = GNTMAP_host_map,
- .ref = gnt_ref,
- .dom = dev->otherend_id,
- };
+ struct gnttab_map_grant_ref op;
+
+ gnttab_set_map_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, gnt_ref,
+ dev->otherend_id);
if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
BUG();
@@ -525,32 +601,36 @@ EXPORT_SYMBOL_GPL(xenbus_map_ring);
*/
int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
{
- struct vm_struct *area;
+ return ring_ops->unmap(dev, vaddr);
+}
+EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+
+static int xenbus_unmap_ring_vfree_pv(struct xenbus_device *dev, void *vaddr)
+{
+ struct xenbus_map_node *node;
struct gnttab_unmap_grant_ref op = {
.host_addr = (unsigned long)vaddr,
};
unsigned int level;
- /* It'd be nice if linux/vmalloc.h provided a find_vm_area(void *addr)
- * method so that we don't have to muck with vmalloc internals here.
- * We could force the user to hang on to their struct vm_struct from
- * xenbus_map_ring_valloc, but these 6 lines considerably simplify
- * this API.
- */
- read_lock(&vmlist_lock);
- for (area = vmlist; area != NULL; area = area->next) {
- if (area->addr == vaddr)
- break;
+ spin_lock(&xenbus_valloc_lock);
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
+ if (node->area->addr == vaddr) {
+ list_del(&node->next);
+ goto found;
+ }
}
- read_unlock(&vmlist_lock);
+ node = NULL;
+ found:
+ spin_unlock(&xenbus_valloc_lock);
- if (!area) {
+ if (!node) {
xenbus_dev_error(dev, -ENOENT,
"can't find mapped virtual address %p", vaddr);
return GNTST_bad_virt_addr;
}
- op.handle = (grant_handle_t)area->phys_addr;
+ op.handle = node->handle;
op.host_addr = arbitrary_virt_to_machine(
lookup_address((unsigned long)vaddr, &level)).maddr;
@@ -558,16 +638,50 @@ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr)
BUG();
if (op.status == GNTST_okay)
- free_vm_area(area);
+ free_vm_area(node->area);
else
xenbus_dev_error(dev, op.status,
"unmapping page at handle %d error %d",
- (int16_t)area->phys_addr, op.status);
+ node->handle, op.status);
+ kfree(node);
return op.status;
}
-EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
+static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
+{
+ int rv;
+ struct xenbus_map_node *node;
+ void *addr;
+
+ spin_lock(&xenbus_valloc_lock);
+ list_for_each_entry(node, &xenbus_valloc_pages, next) {
+ addr = pfn_to_kaddr(page_to_pfn(node->page));
+ if (addr == vaddr) {
+ list_del(&node->next);
+ goto found;
+ }
+ }
+ node = NULL;
+ found:
+ spin_unlock(&xenbus_valloc_lock);
+
+ if (!node) {
+ xenbus_dev_error(dev, -ENOENT,
+ "can't find mapped virtual address %p", vaddr);
+ return GNTST_bad_virt_addr;
+ }
+
+ rv = xenbus_unmap_ring(dev, node->handle, addr);
+
+ if (!rv)
+ free_xenballooned_pages(1, &node->page);
+ else
+ WARN(1, "Leaking %p\n", vaddr);
+
+ kfree(node);
+ return rv;
+}
/**
* xenbus_unmap_ring
@@ -582,10 +696,9 @@ EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
int xenbus_unmap_ring(struct xenbus_device *dev,
grant_handle_t handle, void *vaddr)
{
- struct gnttab_unmap_grant_ref op = {
- .host_addr = (unsigned long)vaddr,
- .handle = handle,
- };
+ struct gnttab_unmap_grant_ref op;
+
+ gnttab_set_unmap_op(&op, (phys_addr_t)vaddr, GNTMAP_host_map, handle);
if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
BUG();
@@ -617,3 +730,21 @@ enum xenbus_state xenbus_read_driver_state(const char *path)
return result;
}
EXPORT_SYMBOL_GPL(xenbus_read_driver_state);
+
+static const struct xenbus_ring_ops ring_ops_pv = {
+ .map = xenbus_map_ring_valloc_pv,
+ .unmap = xenbus_unmap_ring_vfree_pv,
+};
+
+static const struct xenbus_ring_ops ring_ops_hvm = {
+ .map = xenbus_map_ring_valloc_hvm,
+ .unmap = xenbus_unmap_ring_vfree_hvm,
+};
+
+void __init xenbus_ring_ops_init(void)
+{
+ if (xen_pv_domain())
+ ring_ops = &ring_ops_pv;
+ else
+ ring_ops = &ring_ops_hvm;
+}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index c21db75..6e42800 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -31,6 +31,8 @@
#ifndef _XENBUS_COMMS_H
#define _XENBUS_COMMS_H
+#include <linux/fs.h>
+
int xs_init(void);
int xb_init_comms(void);
@@ -43,4 +45,6 @@ int xs_input_avail(void);
extern struct xenstore_domain_interface *xen_store_interface;
extern int xen_store_evtchn;
+extern const struct file_operations xen_xenbus_fops;
+
#endif /* _XENBUS_COMMS_H */
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
new file mode 100644
index 0000000..3d3be78
--- /dev/null
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -0,0 +1,90 @@
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/capability.h>
+
+#include <xen/xen.h>
+#include <xen/page.h>
+#include <xen/xenbus_dev.h>
+
+#include "xenbus_comms.h"
+
+MODULE_LICENSE("GPL");
+
+static int xenbus_backend_open(struct inode *inode, struct file *filp)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return nonseekable_open(inode, filp);
+}
+
+static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ switch (cmd) {
+ case IOCTL_XENBUS_BACKEND_EVTCHN:
+ if (xen_store_evtchn > 0)
+ return xen_store_evtchn;
+ return -ENODEV;
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ size_t size = vma->vm_end - vma->vm_start;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
+ return -EINVAL;
+
+ if (remap_pfn_range(vma, vma->vm_start,
+ virt_to_pfn(xen_store_interface),
+ size, vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
+}
+
+const struct file_operations xenbus_backend_fops = {
+ .open = xenbus_backend_open,
+ .mmap = xenbus_backend_mmap,
+ .unlocked_ioctl = xenbus_backend_ioctl,
+};
+
+static struct miscdevice xenbus_backend_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/xenbus_backend",
+ .fops = &xenbus_backend_fops,
+};
+
+static int __init xenbus_backend_init(void)
+{
+ int err;
+
+ if (!xen_initial_domain())
+ return -ENODEV;
+
+ err = misc_register(&xenbus_backend_dev);
+ if (err)
+ printk(KERN_ERR "Could not register xenbus backend device\n");
+ return err;
+}
+
+static void __exit xenbus_backend_exit(void)
+{
+ misc_deregister(&xenbus_backend_dev);
+}
+
+module_init(xenbus_backend_init);
+module_exit(xenbus_backend_exit);
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index bbd000f..89f7625 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -52,13 +52,17 @@
#include <linux/namei.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
-#include "xenfs.h"
-#include "../xenbus/xenbus_comms.h"
+#include "xenbus_comms.h"
#include <xen/xenbus.h>
+#include <xen/xen.h>
#include <asm/xen/hypervisor.h>
+MODULE_LICENSE("GPL");
+
/*
* An element of a list of outstanding transactions, for which we're
* still waiting a reply.
@@ -101,7 +105,7 @@ struct xenbus_file_priv {
unsigned int len;
union {
struct xsd_sockmsg msg;
- char buffer[PAGE_SIZE];
+ char buffer[XENSTORE_PAYLOAD_MAX];
} u;
/* Response queue. */
@@ -365,6 +369,10 @@ static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u)
goto out;
}
token++;
+ if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) {
+ rc = -EILSEQ;
+ goto out;
+ }
if (msg_type == XS_WATCH) {
watch = alloc_watch_adapter(path, token);
@@ -583,7 +591,7 @@ static unsigned int xenbus_file_poll(struct file *file, poll_table *wait)
return 0;
}
-const struct file_operations xenbus_file_ops = {
+const struct file_operations xen_xenbus_fops = {
.read = xenbus_file_read,
.write = xenbus_file_write,
.open = xenbus_file_open,
@@ -591,3 +599,31 @@ const struct file_operations xenbus_file_ops = {
.poll = xenbus_file_poll,
.llseek = no_llseek,
};
+EXPORT_SYMBOL_GPL(xen_xenbus_fops);
+
+static struct miscdevice xenbus_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/xenbus",
+ .fops = &xen_xenbus_fops,
+};
+
+static int __init xenbus_init(void)
+{
+ int err;
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ err = misc_register(&xenbus_dev);
+ if (err)
+ printk(KERN_ERR "Could not register xenbus frontend device\n");
+ return err;
+}
+
+static void __exit xenbus_exit(void)
+{
+ misc_deregister(&xenbus_dev);
+}
+
+module_init(xenbus_init);
+module_exit(xenbus_exit);
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 1b178c6..3864967 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -291,14 +291,9 @@ void xenbus_dev_shutdown(struct device *_dev)
EXPORT_SYMBOL_GPL(xenbus_dev_shutdown);
int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name)
+ struct xen_bus_type *bus)
{
- drv->driver.name = drv->name;
drv->driver.bus = &bus->bus;
- drv->driver.owner = owner;
- drv->driver.mod_name = mod_name;
return driver_register(&drv->driver);
}
@@ -730,6 +725,8 @@ static int __init xenbus_init(void)
if (!xen_domain())
return -ENODEV;
+ xenbus_ring_ops_init();
+
if (xen_hvm_domain()) {
uint64_t v = 0;
err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
diff --git a/drivers/xen/xenbus/xenbus_probe.h b/drivers/xen/xenbus/xenbus_probe.h
index 9b1de4e..bb4f92e 100644
--- a/drivers/xen/xenbus/xenbus_probe.h
+++ b/drivers/xen/xenbus/xenbus_probe.h
@@ -53,9 +53,7 @@ extern int xenbus_match(struct device *_dev, struct device_driver *_drv);
extern int xenbus_dev_probe(struct device *_dev);
extern int xenbus_dev_remove(struct device *_dev);
extern int xenbus_register_driver_common(struct xenbus_driver *drv,
- struct xen_bus_type *bus,
- struct module *owner,
- const char *mod_name);
+ struct xen_bus_type *bus);
extern int xenbus_probe_node(struct xen_bus_type *bus,
const char *type,
const char *nodename);
@@ -76,4 +74,6 @@ extern void xenbus_otherend_changed(struct xenbus_watch *watch,
extern int xenbus_read_otherend_details(struct xenbus_device *xendev,
char *id_node, char *path_node);
+void xenbus_ring_ops_init(void);
+
#endif
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index c3c7cd1..257be37 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -232,15 +232,13 @@ int xenbus_dev_is_online(struct xenbus_device *dev)
}
EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
-int __xenbus_register_backend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_backend(struct xenbus_driver *drv)
{
drv->read_otherend_details = read_frontend_details;
- return xenbus_register_driver_common(drv, &xenbus_backend,
- owner, mod_name);
+ return xenbus_register_driver_common(drv, &xenbus_backend);
}
-EXPORT_SYMBOL_GPL(__xenbus_register_backend);
+EXPORT_SYMBOL_GPL(xenbus_register_backend);
static int backend_probe_and_watch(struct notifier_block *notifier,
unsigned long event,
diff --git a/drivers/xen/xenbus/xenbus_probe_frontend.c b/drivers/xen/xenbus/xenbus_probe_frontend.c
index 2f73195..9c57819 100644
--- a/drivers/xen/xenbus/xenbus_probe_frontend.c
+++ b/drivers/xen/xenbus/xenbus_probe_frontend.c
@@ -230,15 +230,13 @@ static void wait_for_devices(struct xenbus_driver *xendrv)
print_device_status);
}
-int __xenbus_register_frontend(struct xenbus_driver *drv,
- struct module *owner, const char *mod_name)
+int xenbus_register_frontend(struct xenbus_driver *drv)
{
int ret;
drv->read_otherend_details = read_backend_details;
- ret = xenbus_register_driver_common(drv, &xenbus_frontend,
- owner, mod_name);
+ ret = xenbus_register_driver_common(drv, &xenbus_frontend);
if (ret)
return ret;
@@ -247,7 +245,7 @@ int __xenbus_register_frontend(struct xenbus_driver *drv,
return 0;
}
-EXPORT_SYMBOL_GPL(__xenbus_register_frontend);
+EXPORT_SYMBOL_GPL(xenbus_register_frontend);
static DECLARE_WAIT_QUEUE_HEAD(backend_state_wq);
static int backend_state;
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index ede860f..d1c217b 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -532,21 +532,18 @@ int xenbus_printf(struct xenbus_transaction t,
{
va_list ap;
int ret;
-#define PRINTF_BUFFER_SIZE 4096
- char *printf_buffer;
-
- printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_NOIO | __GFP_HIGH);
- if (printf_buffer == NULL)
- return -ENOMEM;
+ char *buf;
va_start(ap, fmt);
- ret = vsnprintf(printf_buffer, PRINTF_BUFFER_SIZE, fmt, ap);
+ buf = kvasprintf(GFP_NOIO | __GFP_HIGH, fmt, ap);
va_end(ap);
- BUG_ON(ret > PRINTF_BUFFER_SIZE-1);
- ret = xenbus_write(t, dir, node, printf_buffer);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = xenbus_write(t, dir, node, buf);
- kfree(printf_buffer);
+ kfree(buf);
return ret;
}
@@ -801,6 +798,12 @@ static int process_msg(void)
goto out;
}
+ if (msg->hdr.len > XENSTORE_PAYLOAD_MAX) {
+ kfree(msg);
+ err = -EINVAL;
+ goto out;
+ }
+
body = kmalloc(msg->hdr.len + 1, GFP_NOIO | __GFP_HIGH);
if (body == NULL) {
kfree(msg);
diff --git a/drivers/xen/xenfs/Makefile b/drivers/xen/xenfs/Makefile
index 4fde944..b019865 100644
--- a/drivers/xen/xenfs/Makefile
+++ b/drivers/xen/xenfs/Makefile
@@ -1,4 +1,4 @@
obj-$(CONFIG_XENFS) += xenfs.o
-xenfs-y = super.o xenbus.o privcmd.o
+xenfs-y = super.o
xenfs-$(CONFIG_XEN_DOM0) += xenstored.o
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index 1aa3897..a84b53c 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -16,6 +16,8 @@
#include <xen/xen.h>
#include "xenfs.h"
+#include "../privcmd.h"
+#include "../xenbus/xenbus_comms.h"
#include <asm/xen/hypervisor.h>
@@ -82,9 +84,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
[1] = {},
- { "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
+ { "xenbus", &xen_xenbus_fops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
- { "privcmd", &privcmd_file_ops, S_IRUSR|S_IWUSR },
+ { "privcmd", &xen_privcmd_fops, S_IRUSR|S_IWUSR },
{""},
};
int rc;
diff --git a/drivers/xen/xenfs/xenfs.h b/drivers/xen/xenfs/xenfs.h
index b68aa62..6b80c77 100644
--- a/drivers/xen/xenfs/xenfs.h
+++ b/drivers/xen/xenfs/xenfs.h
@@ -1,8 +1,6 @@
#ifndef _XENFS_XENBUS_H
#define _XENFS_XENBUS_H
-extern const struct file_operations xenbus_file_ops;
-extern const struct file_operations privcmd_file_ops;
extern const struct file_operations xsd_kva_file_ops;
extern const struct file_operations xsd_port_file_ops;
diff --git a/drivers/zorro/zorro.ids b/drivers/zorro/zorro.ids
index de24e3d..119abea 100644
--- a/drivers/zorro/zorro.ids
+++ b/drivers/zorro/zorro.ids
@@ -351,7 +351,7 @@
0200 EGS 28/24 Spectrum [Graphics Card]
0892 Apollo
0100 A1200 [FPU and RAM Expansion]
-0893 Ingenieurbro Helfrich
+0893 Ingenieurbüro Helfrich
0500 Piccolo RAM [Graphics Card]
0600 Piccolo [Graphics Card]
0700 PeggyPlus MPEG [Video Card]